From 3e84d6a2ae7df5f6b9073a91ccc6acef50b45aab Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Thu, 27 Apr 2023 11:49:00 +0200 Subject: afl++ -> AFL++ --- docs/Changelog.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 20b915fa..cd5ed9fc 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -229,7 +229,7 @@ afl-showmap and other tools. - afl-cc: - detect overflow reads on initial input buffer for asan - - new cmplog mode (incompatible with older afl++ versions) + - new cmplog mode (incompatible with older AFL++ versions) - support llvm IR select instrumentation for default PCGUARD and LTO - fix for shared linking on MacOS - better selective instrumentation AFL_LLVM_{ALLOW|DENY}LIST -- cgit 1.4.1 From 3a98d7af18e6ebf12e7cce2eb78bdb9b9927be3e Mon Sep 17 00:00:00 2001 From: Dominik Maier Date: Thu, 11 May 2023 21:02:46 +0200 Subject: qemuafl: Persistent mode for PPC32 targets --- docs/Changelog.md | 2 ++ qemu_mode/QEMUAFL_VERSION | 2 +- qemu_mode/qemuafl | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index cd5ed9fc..1fe714e8 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -18,6 +18,8 @@ - add `-I filelist` option, an alternative to `-i in_dir` - afl-cmin + afl-cmin.bash: - `-T threads` parallel task support, can be a huge speedup! + - qemuafl: + - Persistent mode support for ppc32 tragets by @worksbutnottested - a new grammar custom mutator atnwalk was submitted by @voidptr127 ! diff --git a/qemu_mode/QEMUAFL_VERSION b/qemu_mode/QEMUAFL_VERSION index fa44d173..043a9f82 100644 --- a/qemu_mode/QEMUAFL_VERSION +++ b/qemu_mode/QEMUAFL_VERSION @@ -1 +1 @@ -0569eff8a1 +b0abbe2 diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index 0569eff8..b0abbe2e 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit 0569eff8a12dec73642b96757f6b5b51a618a03a +Subproject commit b0abbe2e74ed74ff6ff25b5ea3110d27ba978001 -- cgit 1.4.1 From a752b159212db458d77cd13c46fdfbde01045d91 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Fri, 12 May 2023 08:29:31 +0200 Subject: update qemu_mode --- docs/Changelog.md | 4 ++-- qemu_mode/QEMUAFL_VERSION | 2 +- qemu_mode/qemuafl | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 1fe714e8..e85de763 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -18,8 +18,8 @@ - add `-I filelist` option, an alternative to `-i in_dir` - afl-cmin + afl-cmin.bash: - `-T threads` parallel task support, can be a huge speedup! - - qemuafl: - - Persistent mode support for ppc32 tragets by @worksbutnottested + - qemu_mode: + - Persistent mode +QASAN support for ppc32 tragets by @worksbutnottested - a new grammar custom mutator atnwalk was submitted by @voidptr127 ! diff --git a/qemu_mode/QEMUAFL_VERSION b/qemu_mode/QEMUAFL_VERSION index 043a9f82..44ea5345 100644 --- a/qemu_mode/QEMUAFL_VERSION +++ b/qemu_mode/QEMUAFL_VERSION @@ -1 +1 @@ -b0abbe2 +a1321713c7 diff --git a/qemu_mode/qemuafl b/qemu_mode/qemuafl index b0abbe2e..a1321713 160000 --- a/qemu_mode/qemuafl +++ b/qemu_mode/qemuafl @@ -1 +1 @@ -Subproject commit b0abbe2e74ed74ff6ff25b5ea3110d27ba978001 +Subproject commit a1321713c7502c152dd7527555e0f8a800d55225 -- cgit 1.4.1 From 93c821aaa3df0cf20f892ce72447ff022161c8ab Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Fri, 12 May 2023 08:39:11 +0200 Subject: afl-clang-lto incomptable with -flto=thin --- docs/Changelog.md | 1 + src/afl-cc.c | 9 +++++++++ 2 files changed, 10 insertions(+) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index e85de763..799c13af 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -13,6 +13,7 @@ - afl-cc: - new env `AFL_LLVM_LTO_SKIPINIT` to support the AFL++ based WASM (https://github.com/fgsect/WAFL) project + - error and print help if afl-clan-lto is used with lto=thin - afl-showmap: - added custom mutator post_process and send support - add `-I filelist` option, an alternative to `-i in_dir` diff --git a/src/afl-cc.c b/src/afl-cc.c index 19314555..13ca751e 100644 --- a/src/afl-cc.c +++ b/src/afl-cc.c @@ -853,6 +853,15 @@ static void edit_params(u32 argc, char **argv, char **envp) { if (cur[0] != '-') { non_dash = 1; } if (!strncmp(cur, "--afl", 5)) continue; + + if (lto_mode && !strncmp(cur, "-flto=thin", 10)) { + + FATAL( + "afl-clang-lto cannot work with -flto=thin. Switch to -flto=full or " + "use afl-clang-fast!"); + + } + if (lto_mode && !strncmp(cur, "-fuse-ld=", 9)) continue; if (lto_mode && !strncmp(cur, "--ld-path=", 10)) continue; if (!strncmp(cur, "-fno-unroll", 11)) continue; -- cgit 1.4.1 From 7f636dbfc247fbe75910fa8fb681ea55d230ba79 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Fri, 12 May 2023 15:58:20 +0200 Subject: add @responsefile support for afl-cc --- docs/Changelog.md | 1 + src/afl-cc.c | 460 +++++++++++++++++++++++++++++++++++------------------- 2 files changed, 300 insertions(+), 161 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 799c13af..3602af50 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -11,6 +11,7 @@ - new env `AFL_IGNORE_PROBLEMS_COVERAGE` to ignore coverage from loaded libs after forkserver initialization (required by Mozilla) - afl-cc: + - added @responsefile support - new env `AFL_LLVM_LTO_SKIPINIT` to support the AFL++ based WASM (https://github.com/fgsect/WAFL) project - error and print help if afl-clan-lto is used with lto=thin diff --git a/src/afl-cc.c b/src/afl-cc.c index 13ca751e..972ac8cd 100644 --- a/src/afl-cc.c +++ b/src/afl-cc.c @@ -31,6 +31,8 @@ #include #include #include +#include +#include #if (LLVM_MAJOR - 0 == 0) #undef LLVM_MAJOR @@ -376,15 +378,304 @@ void parse_fsanitize(char *string) { } +static u8 fortify_set = 0, asan_set = 0, x_set = 0, bit_mode = 0, + shared_linking = 0, preprocessor_only = 0, have_unroll = 0, + have_o = 0, have_pic = 0, have_c = 0, partial_linking = 0, + non_dash = 0; + +static void process_params(u32 argc, char **argv) { + + if (cc_par_cnt + argc >= 1024) { FATAL("Too many command line parameters"); } + + if (lto_mode && argc > 1) { + + u32 idx; + for (idx = 1; idx < argc; idx++) { + + if (!strncasecmp(argv[idx], "-fpic", 5)) have_pic = 1; + + } + + } + + // for (u32 x = 0; x < argc; ++x) fprintf(stderr, "[%u] %s\n", x, argv[x]); + + /* Process the argument list. */ + + u8 skip_next = 0; + while (--argc) { + + u8 *cur = *(++argv); + + if (skip_next) { + + skip_next = 0; + continue; + + } + + if (cur[0] != '-') { non_dash = 1; } + if (!strncmp(cur, "--afl", 5)) continue; + + if (lto_mode && !strncmp(cur, "-flto=thin", 10)) { + + FATAL( + "afl-clang-lto cannot work with -flto=thin. Switch to -flto=full or " + "use afl-clang-fast!"); + + } + + if (lto_mode && !strncmp(cur, "-fuse-ld=", 9)) continue; + if (lto_mode && !strncmp(cur, "--ld-path=", 10)) continue; + if (!strncmp(cur, "-fno-unroll", 11)) continue; + if (strstr(cur, "afl-compiler-rt") || strstr(cur, "afl-llvm-rt")) continue; + if (!strcmp(cur, "-Wl,-z,defs") || !strcmp(cur, "-Wl,--no-undefined") || + !strcmp(cur, "--no-undefined")) { + + continue; + + } + + if (compiler_mode == GCC_PLUGIN && !strcmp(cur, "-pipe")) { continue; } + + if (!strcmp(cur, "-z") || !strcmp(cur, "-Wl,-z")) { + + u8 *param = *(argv + 1); + if (!strcmp(param, "defs") || !strcmp(param, "-Wl,defs")) { + + skip_next = 1; + continue; + + } + + } + + if ((compiler_mode == GCC || compiler_mode == GCC_PLUGIN) && + !strncmp(cur, "-stdlib=", 8)) { + + if (!be_quiet) { WARNF("Found '%s' - stripping!", cur); } + continue; + + } + + if (!strncmp(cur, "-fsanitize-coverage-", 20) && strstr(cur, "list=")) { + + have_instr_list = 1; + + } + + if (!strncmp(cur, "-fsanitize=", strlen("-fsanitize=")) && + strchr(cur, ',')) { + + parse_fsanitize(cur); + if (!cur || strlen(cur) <= strlen("-fsanitize=")) { continue; } + + } else if ((!strncmp(cur, "-fsanitize=fuzzer-", + + strlen("-fsanitize=fuzzer-")) || + !strncmp(cur, "-fsanitize-coverage", + strlen("-fsanitize-coverage"))) && + (strncmp(cur, "sanitize-coverage-allow", + strlen("sanitize-coverage-allow")) && + strncmp(cur, "sanitize-coverage-deny", + strlen("sanitize-coverage-deny")) && + instrument_mode != INSTRUMENT_LLVMNATIVE)) { + + if (!be_quiet) { WARNF("Found '%s' - stripping!", cur); } + continue; + + } + + if (need_aflpplib || !strcmp(cur, "-fsanitize=fuzzer")) { + + u8 *afllib = find_object("libAFLDriver.a", argv[0]); + + if (!be_quiet) { + + OKF("Found '-fsanitize=fuzzer', replacing with libAFLDriver.a"); + + } + + if (!afllib) { + + if (!be_quiet) { + + WARNF( + "Cannot find 'libAFLDriver.a' to replace '-fsanitize=fuzzer' in " + "the flags - this will fail!"); + + } + + } else { + + cc_params[cc_par_cnt++] = afllib; + +#ifdef __APPLE__ + cc_params[cc_par_cnt++] = "-undefined"; + cc_params[cc_par_cnt++] = "dynamic_lookup"; +#endif + + } + + if (need_aflpplib) { + + need_aflpplib = 0; + + } else { + + continue; + + } + + } + + if (!strcmp(cur, "-m32")) bit_mode = 32; + if (!strcmp(cur, "armv7a-linux-androideabi")) bit_mode = 32; + if (!strcmp(cur, "-m64")) bit_mode = 64; + + if (!strcmp(cur, "-fsanitize=address") || !strcmp(cur, "-fsanitize=memory")) + asan_set = 1; + + if (strstr(cur, "FORTIFY_SOURCE")) fortify_set = 1; + + if (!strcmp(cur, "-x")) x_set = 1; + if (!strcmp(cur, "-E")) preprocessor_only = 1; + if (!strcmp(cur, "-shared")) shared_linking = 1; + if (!strcmp(cur, "-dynamiclib")) shared_linking = 1; + if (!strcmp(cur, "--target=wasm32-wasi")) passthrough = 1; + if (!strcmp(cur, "-Wl,-r")) partial_linking = 1; + if (!strcmp(cur, "-Wl,-i")) partial_linking = 1; + if (!strcmp(cur, "-Wl,--relocatable")) partial_linking = 1; + if (!strcmp(cur, "-r")) partial_linking = 1; + if (!strcmp(cur, "--relocatable")) partial_linking = 1; + if (!strcmp(cur, "-c")) have_c = 1; + + if (!strncmp(cur, "-O", 2)) have_o = 1; + if (!strncmp(cur, "-funroll-loop", 13)) have_unroll = 1; + + if (*cur == '@') { + + // response file support. + // we have two choices - move everything to the command line or + // rewrite the response files to temporary files and delete them + // afterwards. We choose the first for easiness. + // We do *not* support quotes in the rsp files to cope with spaces in + // filenames etc! If you need that then send a patch! + u8 *filename = cur + 1; + if (debug) { DEBUGF("response file=%s\n", filename); } + FILE *f = fopen(filename, "r"); + struct stat st; + + // Check not found or empty? let the compiler complain if so. + if (!f || fstat(fileno(f), &st) < 0 || st.st_size < 1) { + + cc_params[cc_par_cnt++] = cur; + continue; + + } + + u8 *tmpbuf = malloc(st.st_size + 1), *ptr; + char **args = malloc(sizeof(char *) * (st.st_size >> 1)); + int count = 1, cont = 0, cont_act = 0; + + while (fgets(tmpbuf, st.st_size, f)) { + + ptr = tmpbuf; + // no leading whitespace + while (isspace(*ptr)) { + + ++ptr; + cont_act = 0; + + } + + // no comments, no empty lines + if (*ptr == '#' || *ptr == '\n' || !*ptr) { continue; } + // remove LF + if (ptr[strlen(ptr) - 1] == '\n') { ptr[strlen(ptr) - 1] = 0; } + // remove CR + if (*ptr && ptr[strlen(ptr) - 1] == '\r') { ptr[strlen(ptr) - 1] = 0; } + // handle \ at end of line + if (*ptr && ptr[strlen(ptr) - 1] == '\\') { + + cont = 1; + ptr[strlen(ptr) - 1] = 0; + + } + + // remove whitespace at end + while (*ptr && isspace(ptr[strlen(ptr) - 1])) { + + ptr[strlen(ptr) - 1] = 0; + cont = 0; + + } + + if (*ptr) { + + do { + + u8 *value = ptr; + while (*ptr && !isspace(*ptr)) { + + ++ptr; + + } + + while (*ptr && isspace(*ptr)) { + + *ptr++ = 0; + + } + + if (cont_act) { + + u32 len = strlen(args[count - 1]) + strlen(value) + 1; + u8 *tmp = malloc(len); + snprintf(tmp, len, "%s%s", args[count - 1], value); + free(args[count - 1]); + args[count - 1] = tmp; + cont_act = 0; + + } else { + + args[count++] = strdup(value); + + } + + } while (*ptr); + + } + + if (cont) { + + cont_act = 1; + cont = 0; + + } + + } + + if (count) { process_params(count, args); } + + // we cannot free args[] + free(tmpbuf); + + continue; + + } + + cc_params[cc_par_cnt++] = cur; + + } + +} + /* Copy argv to cc_params, making the necessary edits. */ static void edit_params(u32 argc, char **argv, char **envp) { - u8 fortify_set = 0, asan_set = 0, x_set = 0, bit_mode = 0, shared_linking = 0, - preprocessor_only = 0, have_unroll = 0, have_o = 0, have_pic = 0, - have_c = 0, partial_linking = 0; - - cc_params = ck_alloc((argc + 128) * sizeof(u8 *)); + cc_params = ck_alloc(1024 * sizeof(u8 *)); if (lto_mode) { @@ -831,168 +1122,15 @@ static void edit_params(u32 argc, char **argv, char **envp) { } - if (!have_pic) cc_params[cc_par_cnt++] = "-fPIC"; - } } - /* Detect stray -v calls from ./configure scripts. */ - - u8 skip_next = 0, non_dash = 0; - while (--argc) { - - u8 *cur = *(++argv); - - if (skip_next) { - - skip_next = 0; - continue; - - } - - if (cur[0] != '-') { non_dash = 1; } - if (!strncmp(cur, "--afl", 5)) continue; - - if (lto_mode && !strncmp(cur, "-flto=thin", 10)) { - - FATAL( - "afl-clang-lto cannot work with -flto=thin. Switch to -flto=full or " - "use afl-clang-fast!"); - - } - - if (lto_mode && !strncmp(cur, "-fuse-ld=", 9)) continue; - if (lto_mode && !strncmp(cur, "--ld-path=", 10)) continue; - if (!strncmp(cur, "-fno-unroll", 11)) continue; - if (strstr(cur, "afl-compiler-rt") || strstr(cur, "afl-llvm-rt")) continue; - if (!strcmp(cur, "-Wl,-z,defs") || !strcmp(cur, "-Wl,--no-undefined") || - !strcmp(cur, "--no-undefined")) { - - continue; - - } + /* Inspect the command line parameters. */ - if (compiler_mode == GCC_PLUGIN && !strcmp(cur, "-pipe")) { continue; } - - if (!strcmp(cur, "-z") || !strcmp(cur, "-Wl,-z")) { + process_params(argc, argv); - u8 *param = *(argv + 1); - if (!strcmp(param, "defs") || !strcmp(param, "-Wl,defs")) { - - skip_next = 1; - continue; - - } - - } - - if ((compiler_mode == GCC || compiler_mode == GCC_PLUGIN) && - !strncmp(cur, "-stdlib=", 8)) { - - if (!be_quiet) { WARNF("Found '%s' - stripping!", cur); } - continue; - - } - - if (!strncmp(cur, "-fsanitize-coverage-", 20) && strstr(cur, "list=")) { - - have_instr_list = 1; - - } - - if (!strncmp(cur, "-fsanitize=", strlen("-fsanitize=")) && - strchr(cur, ',')) { - - parse_fsanitize(cur); - if (!cur || strlen(cur) <= strlen("-fsanitize=")) { continue; } - - } else if ((!strncmp(cur, "-fsanitize=fuzzer-", - - strlen("-fsanitize=fuzzer-")) || - !strncmp(cur, "-fsanitize-coverage", - strlen("-fsanitize-coverage"))) && - (strncmp(cur, "sanitize-coverage-allow", - strlen("sanitize-coverage-allow")) && - strncmp(cur, "sanitize-coverage-deny", - strlen("sanitize-coverage-deny")) && - instrument_mode != INSTRUMENT_LLVMNATIVE)) { - - if (!be_quiet) { WARNF("Found '%s' - stripping!", cur); } - continue; - - } - - if (need_aflpplib || !strcmp(cur, "-fsanitize=fuzzer")) { - - u8 *afllib = find_object("libAFLDriver.a", argv[0]); - - if (!be_quiet) { - - OKF("Found '-fsanitize=fuzzer', replacing with libAFLDriver.a"); - - } - - if (!afllib) { - - if (!be_quiet) { - - WARNF( - "Cannot find 'libAFLDriver.a' to replace '-fsanitize=fuzzer' in " - "the flags - this will fail!"); - - } - - } else { - - cc_params[cc_par_cnt++] = afllib; - -#ifdef __APPLE__ - cc_params[cc_par_cnt++] = "-undefined"; - cc_params[cc_par_cnt++] = "dynamic_lookup"; -#endif - - } - - if (need_aflpplib) { - - need_aflpplib = 0; - - } else { - - continue; - - } - - } - - if (!strcmp(cur, "-m32")) bit_mode = 32; - if (!strcmp(cur, "armv7a-linux-androideabi")) bit_mode = 32; - if (!strcmp(cur, "-m64")) bit_mode = 64; - - if (!strcmp(cur, "-fsanitize=address") || !strcmp(cur, "-fsanitize=memory")) - asan_set = 1; - - if (strstr(cur, "FORTIFY_SOURCE")) fortify_set = 1; - - if (!strcmp(cur, "-x")) x_set = 1; - if (!strcmp(cur, "-E")) preprocessor_only = 1; - if (!strcmp(cur, "-shared")) shared_linking = 1; - if (!strcmp(cur, "-dynamiclib")) shared_linking = 1; - if (!strcmp(cur, "--target=wasm32-wasi")) passthrough = 1; - if (!strcmp(cur, "-Wl,-r")) partial_linking = 1; - if (!strcmp(cur, "-Wl,-i")) partial_linking = 1; - if (!strcmp(cur, "-Wl,--relocatable")) partial_linking = 1; - if (!strcmp(cur, "-r")) partial_linking = 1; - if (!strcmp(cur, "--relocatable")) partial_linking = 1; - if (!strcmp(cur, "-c")) have_c = 1; - - if (!strncmp(cur, "-O", 2)) have_o = 1; - if (!strncmp(cur, "-funroll-loop", 13)) have_unroll = 1; - - cc_params[cc_par_cnt++] = cur; - - } + if (!have_pic) { cc_params[cc_par_cnt++] = "-fPIC"; } // in case LLVM is installed not via a package manager or "make install" // e.g. compiled download or compiled from github then its ./lib directory -- cgit 1.4.1 From 1d0694df86a3ce70ffac2846f36605eac9300abe Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Wed, 17 May 2023 15:25:26 +0200 Subject: add symqemu custom mutator --- custom_mutators/symcc/README.md | 2 + custom_mutators/symqemu/Makefile | 14 ++ custom_mutators/symqemu/README.md | 11 + custom_mutators/symqemu/symqemu.c | 446 ++++++++++++++++++++++++++++++++++++++ docs/Changelog.md | 3 + instrumentation/afl-llvm-common.h | 2 +- test-instr.c | 9 +- 7 files changed, 483 insertions(+), 4 deletions(-) create mode 100644 custom_mutators/symqemu/Makefile create mode 100644 custom_mutators/symqemu/README.md create mode 100644 custom_mutators/symqemu/symqemu.c (limited to 'docs/Changelog.md') diff --git a/custom_mutators/symcc/README.md b/custom_mutators/symcc/README.md index 364a348e..a6839a37 100644 --- a/custom_mutators/symcc/README.md +++ b/custom_mutators/symcc/README.md @@ -5,6 +5,8 @@ This uses the symcc to find new paths into the target. Note that this is a just a proof of concept example! It is better to use the fuzzing helpers of symcc, symqemu, Fuzzolic, etc. rather than this. +Also the symqemu custom mutator is better than this. + To use this custom mutator follow the steps in the symcc repository [https://github.com/eurecom-s3/symcc/](https://github.com/eurecom-s3/symcc/) on how to build symcc and how to instrument a target binary (the same target diff --git a/custom_mutators/symqemu/Makefile b/custom_mutators/symqemu/Makefile new file mode 100644 index 00000000..3361ab0f --- /dev/null +++ b/custom_mutators/symqemu/Makefile @@ -0,0 +1,14 @@ + +ifdef DEBUG + CFLAGS += -DDEBUG +endif + +all: symqemu-mutator.so + +CFLAGS += -O3 -funroll-loops + +symqemu-mutator.so: symqemu.c + $(CC) $(CFLAGS) $(CPPFLAGS) -g -I../../include -shared -fPIC -o symqemu-mutator.so symqemu.c + +clean: + rm -f symqemu-mutator.so *.o *~ core diff --git a/custom_mutators/symqemu/README.md b/custom_mutators/symqemu/README.md new file mode 100644 index 00000000..0d5cd4d7 --- /dev/null +++ b/custom_mutators/symqemu/README.md @@ -0,0 +1,11 @@ +# custum mutator: symqemu + +This uses the symcc to find new paths into the target. + +To use this custom mutator follow the steps in the symqemu repository +[https://github.com/eurecom-s3/symqemu/](https://github.com/eurecom-s3/symqemu/) +on how to build symqemu-x86_x64 and put it in your `PATH`. + +just type `make` to build this custom mutator. + +```AFL_CUSTOM_MUTATOR_LIBRARY=custom_mutators/symqemu/symqemu-mutator.so AFL_SYNC_TIME=1 afl-fuzz ...``` diff --git a/custom_mutators/symqemu/symqemu.c b/custom_mutators/symqemu/symqemu.c new file mode 100644 index 00000000..9030397b --- /dev/null +++ b/custom_mutators/symqemu/symqemu.c @@ -0,0 +1,446 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include "config.h" +#include "debug.h" +#include "afl-fuzz.h" +#include "common.h" + +afl_state_t *afl_struct; +static u32 debug = 0; + +#define DBG(x...) \ + if (debug) { fprintf(stderr, x); } + +typedef struct my_mutator { + + afl_state_t *afl; + u8 *mutator_buf; + u8 *out_dir; + u8 *queue_dir; + u8 *target; + u8 *symqemu; + u8 *input_file; + u32 counter; + u32 seed; + u32 argc; + u8 **argv; + +} my_mutator_t; + +my_mutator_t *afl_custom_init(afl_state_t *afl, unsigned int seed) { + + if (getenv("AFL_DEBUG")) debug = 1; + + my_mutator_t *data = calloc(1, sizeof(my_mutator_t)); + if (!data) { + + perror("afl_custom_init alloc"); + return NULL; + + } + + char *path = getenv("PATH"); + char *exec_name = "symqemu-x86_64"; + char *token = strtok(path, ":"); + char exec_path[4096]; + + while (token != NULL && data->symqemu == NULL) { + + snprintf(exec_path, sizeof(exec_path), "%s/%s", token, exec_name); + if (access(exec_path, X_OK) == 0) { + + data->symqemu = (u8 *)strdup(exec_path); + break; + + } + + token = strtok(NULL, ":"); + + } + + if (!data->symqemu) FATAL("symqemu binary %s not found", exec_name); + DBG("Found %s\n", data->symqemu); + + if (getenv("AFL_CUSTOM_MUTATOR_ONLY")) + FATAL("the symqemu module cannot be used with AFL_CUSTOM_MUTATOR_ONLY."); + + if ((data->mutator_buf = malloc(MAX_FILE)) == NULL) { + + free(data); + perror("mutator_buf alloc"); + return NULL; + + } + + data->target = getenv("AFL_CUSTOM_INFO_PROGRAM"); + + u8 *path_tmp = getenv("AFL_CUSTOM_INFO_OUT"); + u32 len = strlen(path_tmp) + 32; + u8 *symqemu_path = malloc(len); + data->out_dir = malloc(len); + data->queue_dir = malloc(len); + snprintf(symqemu_path, len, "%s/../symqemu", path_tmp); + snprintf(data->out_dir, len, "%s/../symqemu/out", path_tmp); + snprintf(data->queue_dir, len, "%s/../symqemu/queue", path_tmp); + + mkdir(symqemu_path, 0755); + mkdir(data->out_dir, 0755); + mkdir(data->queue_dir, 0755); + + setenv("SYMCC_OUTPUT_DIR", data->out_dir, 1); + + data->input_file = getenv("AFL_CUSTOM_INFO_PROGRAM_INPUT"); + + u8 *tmp = NULL; + if ((tmp = getenv("AFL_CUSTOM_INFO_PROGRAM_ARGV")) && *tmp) { + + int argc = 0, index = 2; + for (u32 i = 0; i < strlen(tmp); ++i) + if (isspace(tmp[i])) ++argc; + + data->argv = (u8 **)malloc((argc + 4) * sizeof(u8 **)); + u8 *p = strdup(tmp); + + do { + + data->argv[index] = p; + while (*p && !isspace(*p)) + ++p; + if (*p) { + + *p++ = 0; + while (isspace(*p)) + ++p; + + } + + if (strcmp(data->argv[index], "@@") == 0) { + + if (!data->input_file) { + + u32 ilen = strlen(symqemu_path) + 32; + data->input_file = malloc(ilen); + snprintf(data->input_file, ilen, "%s/.input", symqemu_path); + + } + + data->argv[index] = data->input_file; + + } + + DBG("%d: %s\n", index, data->argv[index]); + index++; + + } while (*p); + + data->argv[index] = NULL; + data->argc = index; + + } else { + + data->argv = (u8 **)malloc(8 * sizeof(u8 **)); + data->argc = 2; + data->argv[2] = NULL; + + } + + data->argv[0] = data->symqemu; + data->argv[1] = data->target; + + DBG("out_dir=%s, queue_dir=%s, target=%s, input_file=%s, argc=%u\n", + data->out_dir, data->queue_dir, data->target, + data->input_file ? (char *)data->input_file : (char *)"", + data->argc); + + if (data->input_file) { setenv("SYMCC_INPUT_FILE", data->input_file, 1); } + + data->afl = afl; + data->seed = seed; + afl_struct = afl; + + if (debug) { + + fprintf(stderr, "["); + for (u32 i = 0; i <= data->argc; ++i) + fprintf(stderr, " \"%s\"", + data->argv[i] ? (char *)data->argv[i] : ""); + fprintf(stderr, " ]\n"); + + } + + OKF("Custom mutator symqemu loaded - note that the initial startup of " + "afl-fuzz will be delayed the more starting seeds are present. This is " + "fine, do not worry!"); + + return data; + +} + +/* When a new queue entry is added we run this input with the symqemu + instrumented binary */ +uint8_t afl_custom_queue_new_entry(my_mutator_t *data, + const uint8_t *filename_new_queue, + const uint8_t *filename_orig_queue) { + + int pipefd[2]; + struct stat st; + if (data->afl->afl_env.afl_no_ui) + ACTF("Sending to symqemu: %s", filename_new_queue); + u8 *fn = alloc_printf("%s", filename_new_queue); + if (!(stat(fn, &st) == 0 && S_ISREG(st.st_mode) && st.st_size)) { + + ck_free(fn); + PFATAL("Couldn't find enqueued file: %s", fn); + + } + + if (afl_struct->fsrv.use_stdin) { + + if (pipe(pipefd) == -1) { + + ck_free(fn); + PFATAL( + "Couldn't create a pipe for interacting with symqemu child process"); + + } + + } + + int fd = open(fn, O_RDONLY); + if (fd < 0) return 0; + ssize_t r = read(fd, data->mutator_buf, MAX_FILE); + DBG("fn=%s, fd=%d, size=%ld\n", fn, fd, r); + ck_free(fn); + close(fd); + + if (data->input_file) { + + fd = open(data->input_file, O_WRONLY | O_CREAT | O_TRUNC, 0644); + ssize_t s = write(fd, data->mutator_buf, r); + close(fd); + DBG("wrote %zd/%zd to %s\n", s, r, data->input_file); + + } + + int pid = fork(); + + if (pid == -1) return 0; + + if (pid) { + + if (!data->input_file || afl_struct->fsrv.use_stdin) { + + close(pipefd[0]); + + if (fd >= 0) { + + if (r <= 0) { + + close(pipefd[1]); + return 0; + + } + + if (r > fcntl(pipefd[1], F_GETPIPE_SZ)) + fcntl(pipefd[1], F_SETPIPE_SZ, MAX_FILE); + ck_write(pipefd[1], data->mutator_buf, r, filename_new_queue); + + } else { + + ck_free(fn); + close(pipefd[1]); + PFATAL( + "Something happened to the enqueued file before sending its " + "contents to symqemu binary"); + + } + + close(pipefd[1]); + + } + + pid = waitpid(pid, NULL, 0); + DBG("symqemu finished executing!\n"); + + // At this point we need to transfer files to output dir, since their names + // collide and symqemu will just overwrite them + + struct dirent **nl; + int32_t items = scandir(data->out_dir, &nl, NULL, NULL); + u8 *origin_name = basename(filename_new_queue); + u8 source_name[4096], destination_name[4096]; + int32_t i; + + if (items > 0) { + + for (i = 0; i < (u32)items; ++i) { + + // symqemu output files start with a digit + if (!isdigit(nl[i]->d_name[0])) continue; + + struct stat st; + snprintf(source_name, sizeof(source_name), "%s/%s", data->out_dir, + nl[i]->d_name); + DBG("file=%s\n", source_name); + + if (stat(source_name, &st) == 0 && S_ISREG(st.st_mode) && st.st_size) { + + snprintf(destination_name, sizeof(destination_name), "%s/id:%06u,%s", + data->queue_dir, data->counter++, nl[i]->d_name); + DBG("src=%s dst=%s\n", source_name, destination_name); + rename(source_name, destination_name); + + } + + free(nl[i]); + + } + + free(nl); + + } + + DBG("Done!\n"); + + } else /* (pid == 0) */ { // child + + if (afl_struct->fsrv.use_stdin) { + + close(pipefd[1]); + dup2(pipefd[0], 0); + + } + + DBG("exec=%s\n", data->target); + if (!debug) { + + close(1); + close(2); + dup2(afl_struct->fsrv.dev_null_fd, 1); + dup2(afl_struct->fsrv.dev_null_fd, 2); + + } + + execvp((char *)data->argv[0], (char **)data->argv); + fprintf(stderr, "Executing: ["); + for (u32 i = 0; i <= data->argc; ++i) + fprintf(stderr, " \"%s\"", + data->argv[i] ? (char *)data->argv[i] : ""); + fprintf(stderr, " ]\n"); + FATAL("Failed to execute %s %s\n", data->argv[0], data->argv[1]); + exit(-1); + + } + + return 0; + +} + +/* +uint32_t afl_custom_fuzz_count(my_mutator_t *data, const u8 *buf, + size_t buf_size) { + + uint32_t count = 0, i; + struct dirent **nl; + int32_t items = scandir(data->out_dir, &nl, NULL, NULL); + + if (items > 0) { + + for (i = 0; i < (u32)items; ++i) { + + struct stat st; + u8 * fn = alloc_printf("%s/%s", data->out_dir, nl[i]->d_name); + DBG("test=%s\n", fn); + if (stat(fn, &st) == 0 && S_ISREG(st.st_mode) && st.st_size) { + + DBG("found=%s\n", fn); + count++; + + } + + ck_free(fn); + free(nl[i]); + + } + + free(nl); + + } + + DBG("dir=%s, count=%u\n", data->out_dir, count); + return count; + +} + +*/ + +// here we actually just read the files generated from symqemu +/* +size_t afl_custom_fuzz(my_mutator_t *data, uint8_t *buf, size_t buf_size, + u8 **out_buf, uint8_t *add_buf, size_t add_buf_size, + size_t max_size) { + + struct dirent **nl; + int32_t i, done = 0, items = scandir(data->out_dir, &nl, NULL, NULL); + ssize_t size = 0; + + if (items <= 0) return 0; + + for (i = 0; i < (u32)items; ++i) { + + struct stat st; + u8 * fn = alloc_printf("%s/%s", data->out_dir, nl[i]->d_name); + + if (done == 0) { + + if (stat(fn, &st) == 0 && S_ISREG(st.st_mode) && st.st_size) { + + int fd = open(fn, O_RDONLY); + + if (fd >= 0) { + + size = read(fd, data->mutator_buf, max_size); + *out_buf = data->mutator_buf; + + close(fd); + done = 1; + + } + + } + + unlink(fn); + + } + + ck_free(fn); + free(nl[i]); + + } + + free(nl); + DBG("FUZZ size=%lu\n", size); + return (uint32_t)size; + +} + +*/ + +/** + * Deinitialize everything + * + * @param data The data ptr from afl_custom_init + */ +void afl_custom_deinit(my_mutator_t *data) { + + free(data->mutator_buf); + free(data); + +} + diff --git a/docs/Changelog.md b/docs/Changelog.md index 3602af50..e99747f6 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -23,6 +23,9 @@ - qemu_mode: - Persistent mode +QASAN support for ppc32 tragets by @worksbutnottested - a new grammar custom mutator atnwalk was submitted by @voidptr127 ! + - two new custom mutators are now available: + - TritonDSE in custom_mutators/aflpp_tritondse + - SymQEMU in custom_mutators/symqemu ### Version ++4.06c (release) diff --git a/instrumentation/afl-llvm-common.h b/instrumentation/afl-llvm-common.h index c9324460..23f67179 100644 --- a/instrumentation/afl-llvm-common.h +++ b/instrumentation/afl-llvm-common.h @@ -23,7 +23,7 @@ typedef long double max_align_t; #include "llvm/Support/Debug.h" #include "llvm/Support/MathExtras.h" #if LLVM_VERSION_MAJOR < 17 -#include "llvm/Transforms/IPO/PassManagerBuilder.h" + #include "llvm/Transforms/IPO/PassManagerBuilder.h" #endif #if LLVM_VERSION_MAJOR > 3 || \ diff --git a/test-instr.c b/test-instr.c index 1d9f2e6e..eda5189c 100644 --- a/test-instr.c +++ b/test-instr.c @@ -24,7 +24,7 @@ int main(int argc, char **argv) { - int fd = 0; + int fd = 0, cnt; char buff[8]; char *buf = buff; @@ -32,7 +32,6 @@ int main(int argc, char **argv) { if (argc == 2) { buf = argv[1]; - printf("Input %s - ", buf); } else { @@ -47,15 +46,19 @@ int main(int argc, char **argv) { } - if (read(fd, buf, sizeof(buf)) < 1) { + if ((cnt = read(fd, buf, sizeof(buf) - 1)) < 1) { printf("Hum?\n"); return 1; } + buf[cnt] = 0; + } + if (getenv("AFL_DEBUG")) fprintf(stderr, "test-instr: %s\n", buf); + // we support three input cases (plus a 4th if stdin is used but there is no // input) switch (buf[0]) { -- cgit 1.4.1 From 9324f3f6289c62451e2add1f7553a7eda0d7d642 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Thu, 1 Jun 2023 12:19:45 +0200 Subject: rewrote PCGUARD --- GNUmakefile.llvm | 4 +- TODO.md | 2 - afl-cmin.bash | 4 +- docs/Changelog.md | 5 +- instrumentation/SanitizerCoveragePCGUARD.so.cc | 603 +++++++++---------------- src/afl-cc.c | 31 +- 6 files changed, 249 insertions(+), 400 deletions(-) (limited to 'docs/Changelog.md') diff --git a/GNUmakefile.llvm b/GNUmakefile.llvm index 2bb4e7f8..6c68f1f3 100644 --- a/GNUmakefile.llvm +++ b/GNUmakefile.llvm @@ -49,7 +49,7 @@ LLVM_UNSUPPORTED = $(shell $(LLVM_CONFIG) --version 2>/dev/null | grep -E -q '^[ LLVM_TOO_NEW = $(shell $(LLVM_CONFIG) --version 2>/dev/null | grep -E -q '^1[5-9]' && echo 1 || echo 0 ) LLVM_NEW_API = $(shell $(LLVM_CONFIG) --version 2>/dev/null | grep -E -q '^1[0-9]' && echo 1 || echo 0 ) LLVM_NEWER_API = $(shell $(LLVM_CONFIG) --version 2>/dev/null | grep -E -q '^1[6-9]' && echo 1 || echo 0 ) -LLVM_10_OK = $(shell $(LLVM_CONFIG) --version 2>/dev/null | grep -E -q '^1[1-9]|^10\.[1-9]|^10\.0.[1-9]' && echo 1 || echo 0 ) +LLVM_13_OK = $(shell $(LLVM_CONFIG) --version 2>/dev/null | grep -E -q '^1[3-9]' && echo 1 || echo 0 ) LLVM_HAVE_LTO = $(shell $(LLVM_CONFIG) --version 2>/dev/null | grep -E -q '^1[1-9]' && echo 1 || echo 0 ) LLVM_BINDIR = $(shell $(LLVM_CONFIG) --bindir 2>/dev/null) LLVM_LIBDIR = $(shell $(LLVM_CONFIG) --libdir 2>/dev/null) @@ -422,7 +422,7 @@ endif $(CXX) $(CLANG_CPPFL) -Wdeprecated -fno-rtti -fPIC -std=$(LLVM_STDCXX) -shared $< -o $@ $(CLANG_LFL) instrumentation/afl-llvm-common.o ./SanitizerCoveragePCGUARD.so: instrumentation/SanitizerCoveragePCGUARD.so.cc instrumentation/afl-llvm-common.o | test_deps -ifeq "$(LLVM_10_OK)" "1" +ifeq "$(LLVM_13_OK)" "1" -$(CXX) $(CLANG_CPPFL) -fno-rtti -fPIC -std=$(LLVM_STDCXX) -shared $< -o $@ $(CLANG_LFL) -Wno-deprecated-copy-dtor -Wdeprecated instrumentation/afl-llvm-common.o endif diff --git a/TODO.md b/TODO.md index dc02a914..2b7e8fcf 100644 --- a/TODO.md +++ b/TODO.md @@ -2,9 +2,7 @@ ## Should - - redo PCGUARD + LTO for llvm 15+ - test cmplog for less than 16bit - - splicing selection weighted? - support persistent and deferred fork server in afl-showmap? - better autodetection of shifting runtime timeout values - Update afl->pending_not_fuzzed for MOpt diff --git a/afl-cmin.bash b/afl-cmin.bash index d390ff65..dc6d5342 100755 --- a/afl-cmin.bash +++ b/afl-cmin.bash @@ -206,7 +206,7 @@ fi # Check for obvious errors. -if [ ! "$T_ARG" = "" -a ! "$F_ARG" = "" -a ! "$NYX_MODE" == 1 ]; then +if [ ! "$T_ARG" = "" -a -n "$F_ARG" -a ! "$NYX_MODE" == 1 ]; then echo "[-] Error: -T and -f can not be used together." 1>&2 exit 1 fi @@ -323,7 +323,7 @@ if [ ! "$T_ARG" = "" ]; then fi fi else - if [ "$F_ARG" = ""]; then + if [ -z "$F_ARG" ]; then echo "[*] Are you aware of the '-T all' parallelize option that massively improves the speed?" fi fi diff --git a/docs/Changelog.md b/docs/Changelog.md index e99747f6..facf2196 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -15,13 +15,16 @@ - new env `AFL_LLVM_LTO_SKIPINIT` to support the AFL++ based WASM (https://github.com/fgsect/WAFL) project - error and print help if afl-clan-lto is used with lto=thin + - rewrote our PCGUARD pass to be compatible with LLVM 15+ shenanigans, + requires LLVM 13+ now instead of 10.0.1+ + - fallback to native LLVM PCGUARD if our PCGUARD is unavailable - afl-showmap: - added custom mutator post_process and send support - add `-I filelist` option, an alternative to `-i in_dir` - afl-cmin + afl-cmin.bash: - `-T threads` parallel task support, can be a huge speedup! - qemu_mode: - - Persistent mode +QASAN support for ppc32 tragets by @worksbutnottested + - Persistent mode + QASAN support for ppc32 targets by @worksbutnottested - a new grammar custom mutator atnwalk was submitted by @voidptr127 ! - two new custom mutators are now available: - TritonDSE in custom_mutators/aflpp_tritondse diff --git a/instrumentation/SanitizerCoveragePCGUARD.so.cc b/instrumentation/SanitizerCoveragePCGUARD.so.cc index 8fed2042..2abc58ec 100644 --- a/instrumentation/SanitizerCoveragePCGUARD.so.cc +++ b/instrumentation/SanitizerCoveragePCGUARD.so.cc @@ -13,42 +13,64 @@ #include "llvm/Transforms/Instrumentation/SanitizerCoverage.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" +#if LLVM_VERSION_MAJOR >= 15 + #if LLVM_VERSION_MAJOR < 17 + #include "llvm/ADT/Triple.h" + #endif +#endif #if LLVM_VERSION_MAJOR < 17 - #include "llvm/ADT/Triple.h" #include "llvm/Analysis/EHPersonalities.h" -#else - #include "llvm/IR/EHPersonalities.h" #endif #include "llvm/Analysis/PostDominators.h" -#include "llvm/IR/CFG.h" +#if LLVM_VERSION_MAJOR < 15 + #include "llvm/IR/CFG.h" +#endif #include "llvm/IR/Constant.h" #include "llvm/IR/DataLayout.h" -#include "llvm/IR/DebugInfo.h" +#if LLVM_VERSION_MAJOR < 15 + #include "llvm/IR/DebugInfo.h" +#endif #include "llvm/IR/Dominators.h" +#if LLVM_VERSION_MAJOR >= 17 + #include "llvm/Analysis/EHPersonalities.h" +#endif #include "llvm/IR/Function.h" -#include "llvm/IR/GlobalVariable.h" +#if LLVM_VERSION_MAJOR >= 16 + #include "llvm/IR/GlobalVariable.h" +#endif #include "llvm/IR/IRBuilder.h" -#include "llvm/IR/InlineAsm.h" +#if LLVM_VERSION_MAJOR < 15 + #include "llvm/IR/InlineAsm.h" +#endif #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/LLVMContext.h" -#include "llvm/IR/MDBuilder.h" -#include "llvm/IR/Mangler.h" +#if LLVM_VERSION_MAJOR < 15 + #include "llvm/IR/MDBuilder.h" + #include "llvm/IR/Mangler.h" +#endif #include "llvm/IR/Module.h" #include "llvm/IR/PassManager.h" +#include "llvm/Passes/PassBuilder.h" +#include "llvm/Passes/PassPlugin.h" #include "llvm/IR/Type.h" -#include "llvm/InitializePasses.h" +#if LLVM_VERSION_MAJOR < 17 + #include "llvm/InitializePasses.h" +#endif #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/SpecialCaseList.h" #include "llvm/Support/VirtualFileSystem.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/Transforms/Instrumentation.h" +#if LLVM_VERSION_MAJOR < 15 + #include "llvm/Support/raw_ostream.h" +#endif +#if LLVM_VERSION_MAJOR < 17 + #include "llvm/Transforms/Instrumentation.h" +#else + #include "llvm/TargetParser/Triple.h" +#endif #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/ModuleUtils.h" -#include "llvm/Passes/PassPlugin.h" -#include "llvm/Passes/PassBuilder.h" -#include "llvm/IR/PassManager.h" #include "config.h" #include "debug.h" @@ -58,7 +80,8 @@ using namespace llvm; #define DEBUG_TYPE "sancov" -const char SanCovTracePCIndirName[] = "__sanitizer_cov_trace_pc_indir"; +static const uint64_t SanCtorAndDtorPriority = 2; + const char SanCovTracePCName[] = "__sanitizer_cov_trace_pc"; const char SanCovTraceCmp1[] = "__sanitizer_cov_trace_cmp1"; const char SanCovTraceCmp2[] = "__sanitizer_cov_trace_cmp2"; @@ -68,22 +91,13 @@ const char SanCovTraceConstCmp1[] = "__sanitizer_cov_trace_const_cmp1"; const char SanCovTraceConstCmp2[] = "__sanitizer_cov_trace_const_cmp2"; const char SanCovTraceConstCmp4[] = "__sanitizer_cov_trace_const_cmp4"; const char SanCovTraceConstCmp8[] = "__sanitizer_cov_trace_const_cmp8"; -const char SanCovTraceDiv4[] = "__sanitizer_cov_trace_div4"; -const char SanCovTraceDiv8[] = "__sanitizer_cov_trace_div8"; -const char SanCovTraceGep[] = "__sanitizer_cov_trace_gep"; const char SanCovTraceSwitchName[] = "__sanitizer_cov_trace_switch"; + const char SanCovModuleCtorTracePcGuardName[] = "sancov.module_ctor_trace_pc_guard"; -const char SanCovModuleCtor8bitCountersName[] = - "sancov.module_ctor_8bit_counters"; -const char SanCovModuleCtorBoolFlagName[] = "sancov.module_ctor_bool_flag"; -static const uint64_t SanCtorAndDtorPriority = 2; +const char SanCovTracePCGuardInitName[] = "__sanitizer_cov_trace_pc_guard_init"; const char SanCovTracePCGuardName[] = "__sanitizer_cov_trace_pc_guard"; -const char SanCovTracePCGuardInitName[] = "__sanitizer_cov_trace_pc_guard_init"; -const char SanCov8bitCountersInitName[] = "__sanitizer_cov_8bit_counters_init"; -const char SanCovBoolFlagInitName[] = "__sanitizer_cov_bool_flag_init"; -const char SanCovPCsInitName[] = "__sanitizer_cov_pcs_init"; const char SanCovGuardsSectionName[] = "sancov_guards"; const char SanCovCountersSectionName[] = "sancov_cntrs"; @@ -99,27 +113,9 @@ namespace { SanitizerCoverageOptions OverrideFromCL(SanitizerCoverageOptions Options) { - // Sets CoverageType and IndirectCalls. - // SanitizerCoverageOptions CLOpts = getOptions(ClCoverageLevel); - Options.CoverageType = - SanitizerCoverageOptions::SCK_Edge; // std::max(Options.CoverageType, - // CLOpts.CoverageType); - Options.IndirectCalls = false; // CLOpts.IndirectCalls; - Options.TraceCmp = false; //|= ClCMPTracing; - Options.TraceDiv = false; //|= ClDIVTracing; - Options.TraceGep = false; //|= ClGEPTracing; - Options.TracePC = false; //|= ClTracePC; - Options.TracePCGuard = true; // |= ClTracePCGuard; - Options.Inline8bitCounters = 0; //|= ClInline8bitCounters; - // Options.InlineBoolFlag = 0; //|= ClInlineBoolFlag; - Options.PCTable = false; //|= ClCreatePCTable; - Options.NoPrune = false; //|= !ClPruneBlocks; - Options.StackDepth = false; //|= ClStackDepth; - if (!Options.TracePCGuard && !Options.TracePC && - !Options.Inline8bitCounters && !Options.StackDepth /*&& - !Options.InlineBoolFlag*/) - Options.TracePCGuard = true; // TracePCGuard is default. - + Options.CoverageType = SanitizerCoverageOptions::SCK_Edge; + // Options.NoPrune = true; + Options.TracePCGuard = true; // TracePCGuard is default. return Options; } @@ -139,20 +135,13 @@ class ModuleSanitizerCoverageAFL } PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM); - - bool instrumentModule(Module &M, DomTreeCallback DTCallback, - PostDomTreeCallback PDTCallback); + bool instrumentModule(Module &M, DomTreeCallback DTCallback, + PostDomTreeCallback PDTCallback); private: void instrumentFunction(Function &F, DomTreeCallback DTCallback, PostDomTreeCallback PDTCallback); - void InjectCoverageForIndirectCalls(Function &F, - ArrayRef IndirCalls); void InjectTraceForCmp(Function &F, ArrayRef CmpTraceTargets); - void InjectTraceForDiv(Function &F, - ArrayRef DivTraceTargets); - void InjectTraceForGep(Function &F, - ArrayRef GepTraceTargets); void InjectTraceForSwitch(Function &F, ArrayRef SwitchTraceTargets); bool InjectCoverage(Function &F, ArrayRef AllBlocks, @@ -173,20 +162,23 @@ class ModuleSanitizerCoverageAFL void SetNoSanitizeMetadata(Instruction *I) { +#if LLVM_VERSION_MAJOR == 15 + I->setMetadata(LLVMContext::MD_nosanitize, MDNode::get(*C, None)); +#elif LLVM_VERSION_MAJOR >= 16 + I->setMetadata(LLVMContext::MD_nosanitize, MDNode::get(*C, std::nullopt)); +#else I->setMetadata(I->getModule()->getMDKindID("nosanitize"), MDNode::get(*C, None)); +#endif } std::string getSectionName(const std::string &Section) const; std::string getSectionStart(const std::string &Section) const; std::string getSectionEnd(const std::string &Section) const; - FunctionCallee SanCovTracePCIndir; FunctionCallee SanCovTracePC, SanCovTracePCGuard; FunctionCallee SanCovTraceCmpFunction[4]; FunctionCallee SanCovTraceConstCmpFunction[4]; - FunctionCallee SanCovTraceDivFunction[2]; - FunctionCallee SanCovTraceGepFunction; FunctionCallee SanCovTraceSwitchFunction; GlobalVariable *SanCovLowestStack; Type *IntptrTy, *IntptrPtrTy, *Int64Ty, *Int64PtrTy, *Int32Ty, *Int32PtrTy, @@ -215,18 +207,16 @@ class ModuleSanitizerCoverageAFL } // namespace -#if LLVM_VERSION_MAJOR >= 11 /* use new pass manager */ - extern "C" ::llvm::PassPluginLibraryInfo LLVM_ATTRIBUTE_WEAK llvmGetPassPluginInfo() { - return {LLVM_PLUGIN_API_VERSION, "SanitizerCoveragePCGUARD", "v0.1", + return {LLVM_PLUGIN_API_VERSION, "SanitizerCoveragePCGUARD", "v0.2", /* lambda to insert our pass into the pass pipeline. */ [](PassBuilder &PB) { - #if LLVM_VERSION_MAJOR <= 13 +#if LLVM_VERSION_MAJOR == 13 using OptimizationLevel = typename PassBuilder::OptimizationLevel; - #endif +#endif PB.registerOptimizerLastEPCallback( [](ModulePassManager &MPM, OptimizationLevel OL) { @@ -238,8 +228,7 @@ llvmGetPassPluginInfo() { } -#endif - +#if LLVM_VERSION_MAJOR == 1 PreservedAnalyses ModuleSanitizerCoverageAFL::run(Module &M, ModuleAnalysisManager &MAM) { @@ -257,34 +246,65 @@ PreservedAnalyses ModuleSanitizerCoverageAFL::run(Module &M, }; + if (!ModuleSancov.instrumentModule(M, DTCallback, PDTCallback)) + return PreservedAnalyses::all(); + + PreservedAnalyses PA = PreservedAnalyses::none(); + // GlobalsAA is considered stateless and does not get invalidated unless + // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers + // make changes that require GlobalsAA to be invalidated. + PA.abandon(); + return PA; + +} + +#else + #if LLVM_VERSION_MAJOR >= 16 +PreservedAnalyses ModuleSanitizerCoverageAFL::run(Module &M, + ModuleAnalysisManager &MAM) { + + #else +PreservedAnalyses ModuleSanitizerCoverageAFL::run(Module &M, + ModuleAnalysisManager &MAM) { + + #endif + ModuleSanitizerCoverageAFL ModuleSancov(Options); + auto &FAM = MAM.getResult(M).getManager(); + auto DTCallback = [&FAM](Function &F) -> const DominatorTree * { + + return &FAM.getResult(F); + + }; + + auto PDTCallback = [&FAM](Function &F) -> const PostDominatorTree * { + + return &FAM.getResult(F); + + }; + if (ModuleSancov.instrumentModule(M, DTCallback, PDTCallback)) return PreservedAnalyses::none(); return PreservedAnalyses::all(); } +#endif + std::pair ModuleSanitizerCoverageAFL::CreateSecStartEnd( Module &M, const char *Section, Type *Ty) { - GlobalVariable *SecStart = - new GlobalVariable(M, -#if LLVM_VERSION_MAJOR >= 15 - Ty, -#else - Ty->getPointerElementType(), -#endif - false, GlobalVariable::ExternalWeakLinkage, nullptr, - getSectionStart(Section)); + // Use ExternalWeak so that if all sections are discarded due to section + // garbage collection, the linker will not report undefined symbol errors. + // Windows defines the start/stop symbols in compiler-rt so no need for + // ExternalWeak. + GlobalValue::LinkageTypes Linkage = TargetTriple.isOSBinFormatCOFF() + ? GlobalVariable::ExternalLinkage + : GlobalVariable::ExternalWeakLinkage; + GlobalVariable *SecStart = new GlobalVariable(M, Ty, false, Linkage, nullptr, + getSectionStart(Section)); SecStart->setVisibility(GlobalValue::HiddenVisibility); - GlobalVariable *SecEnd = - new GlobalVariable(M, -#if LLVM_VERSION_MAJOR >= 15 - Ty, -#else - Ty->getPointerElementType(), -#endif - false, GlobalVariable::ExternalWeakLinkage, nullptr, - getSectionEnd(Section)); + GlobalVariable *SecEnd = new GlobalVariable(M, Ty, false, Linkage, nullptr, + getSectionEnd(Section)); SecEnd->setVisibility(GlobalValue::HiddenVisibility); IRBuilder<> IRB(M.getContext()); if (!TargetTriple.isOSBinFormatCOFF()) @@ -295,7 +315,8 @@ std::pair ModuleSanitizerCoverageAFL::CreateSecStartEnd( auto SecStartI8Ptr = IRB.CreatePointerCast(SecStart, Int8PtrTy); auto GEP = IRB.CreateGEP(Int8Ty, SecStartI8Ptr, ConstantInt::get(IntptrTy, sizeof(uint64_t))); - return std::make_pair(IRB.CreatePointerCast(GEP, Ty), SecEnd); + return std::make_pair(IRB.CreatePointerCast(GEP, PointerType::getUnqual(Ty)), + SecEnd); } @@ -307,8 +328,9 @@ Function *ModuleSanitizerCoverageAFL::CreateInitCallsForSections( auto SecStart = SecStartEnd.first; auto SecEnd = SecStartEnd.second; Function *CtorFunc; + Type *PtrTy = PointerType::getUnqual(Ty); std::tie(CtorFunc, std::ignore) = createSanitizerCtorAndInitFunctions( - M, CtorName, InitFunctionName, {Ty, Ty}, {SecStart, SecEnd}); + M, CtorName, InitFunctionName, {PtrTy, PtrTy}, {SecStart, SecEnd}); assert(CtorFunc->getName() == CtorName); if (TargetTriple.supportsCOMDAT()) { @@ -332,7 +354,6 @@ Function *ModuleSanitizerCoverageAFL::CreateInitCallsForSections( // to include the sancov constructor. This way the linker can deduplicate // the constructors but always leave one copy. CtorFunc->setLinkage(GlobalValue::WeakODRLinkage); - appendToUsed(M, CtorFunc); } @@ -344,37 +365,25 @@ bool ModuleSanitizerCoverageAFL::instrumentModule( Module &M, DomTreeCallback DTCallback, PostDomTreeCallback PDTCallback) { setvbuf(stdout, NULL, _IONBF, 0); - if (getenv("AFL_DEBUG")) debug = 1; + + if (getenv("AFL_DEBUG")) { debug = 1; } if ((isatty(2) && !getenv("AFL_QUIET")) || debug) { SAYF(cCYA "SanitizerCoveragePCGUARD" VERSION cRST "\n"); - } else + } else { be_quiet = 1; + } + skip_nozero = getenv("AFL_LLVM_SKIP_NEVERZERO"); use_threadsafe_counters = getenv("AFL_LLVM_THREADSAFE_INST"); initInstrumentList(); scanForDangerousFunctions(&M); - if (debug) { - - fprintf(stderr, - "SANCOV: covtype:%u indirect:%d stack:%d noprune:%d " - "createtable:%d tracepcguard:%d tracepc:%d\n", - Options.CoverageType, Options.IndirectCalls == true ? 1 : 0, - Options.StackDepth == true ? 1 : 0, Options.NoPrune == true ? 1 : 0, - // Options.InlineBoolFlag == true ? 1 : 0, - Options.PCTable == true ? 1 : 0, - Options.TracePCGuard == true ? 1 : 0, - Options.TracePC == true ? 1 : 0); - - } - - if (Options.CoverageType == SanitizerCoverageOptions::SCK_None) return false; C = &(M.getContext()); DL = &M.getDataLayout(); CurModule = &M; @@ -397,16 +406,14 @@ bool ModuleSanitizerCoverageAFL::instrumentModule( Int16Ty = IRB.getInt16Ty(); Int8Ty = IRB.getInt8Ty(); Int1Ty = IRB.getInt1Ty(); - LLVMContext &Ctx = M.getContext(); + LLVMContext &Ctx = M.getContext(); AFLMapPtr = new GlobalVariable(M, PointerType::get(Int8Ty, 0), false, GlobalValue::ExternalLinkage, 0, "__afl_area_ptr"); One = ConstantInt::get(IntegerType::getInt8Ty(Ctx), 1); Zero = ConstantInt::get(IntegerType::getInt8Ty(Ctx), 0); - SanCovTracePCIndir = - M.getOrInsertFunction(SanCovTracePCIndirName, VoidTy, IntptrTy); // Make sure smaller parameters are zero-extended to i64 if required by the // target ABI. AttributeList SanCovTraceCmpZeroExtAL; @@ -436,26 +443,13 @@ bool ModuleSanitizerCoverageAFL::instrumentModule( SanCovTraceConstCmpFunction[3] = M.getOrInsertFunction(SanCovTraceConstCmp8, VoidTy, Int64Ty, Int64Ty); - { - - AttributeList AL; - AL = AL.addParamAttribute(*C, 0, Attribute::ZExt); - SanCovTraceDivFunction[0] = - M.getOrInsertFunction(SanCovTraceDiv4, AL, VoidTy, IRB.getInt32Ty()); - - } - - SanCovTraceDivFunction[1] = - M.getOrInsertFunction(SanCovTraceDiv8, VoidTy, Int64Ty); - SanCovTraceGepFunction = - M.getOrInsertFunction(SanCovTraceGep, VoidTy, IntptrTy); SanCovTraceSwitchFunction = M.getOrInsertFunction(SanCovTraceSwitchName, VoidTy, Int64Ty, Int64PtrTy); Constant *SanCovLowestStackConstant = M.getOrInsertGlobal(SanCovLowestStackName, IntptrTy); SanCovLowestStack = dyn_cast(SanCovLowestStackConstant); - if (!SanCovLowestStack) { + if (!SanCovLowestStack || SanCovLowestStack->getValueType() != IntptrTy) { C->emitError(StringRef("'") + SanCovLowestStackName + "' should not be declared by the user"); @@ -465,8 +459,6 @@ bool ModuleSanitizerCoverageAFL::instrumentModule( SanCovLowestStack->setThreadLocalMode( GlobalValue::ThreadLocalMode::InitialExecTLSModel); - if (Options.StackDepth && !SanCovLowestStack->isDeclaration()) - SanCovLowestStack->setInitializer(Constant::getAllOnesValue(IntptrTy)); SanCovTracePC = M.getOrInsertFunction(SanCovTracePCName, VoidTy); SanCovTracePCGuard = @@ -481,40 +473,25 @@ bool ModuleSanitizerCoverageAFL::instrumentModule( Ctor = CreateInitCallsForSections(M, SanCovModuleCtorTracePcGuardName, SanCovTracePCGuardInitName, Int32PtrTy, SanCovGuardsSectionName); - if (Function8bitCounterArray) - Ctor = CreateInitCallsForSections(M, SanCovModuleCtor8bitCountersName, - SanCov8bitCountersInitName, Int8PtrTy, - SanCovCountersSectionName); - if (FunctionBoolArray) { - - Ctor = CreateInitCallsForSections(M, SanCovModuleCtorBoolFlagName, - SanCovBoolFlagInitName, Int1PtrTy, - SanCovBoolFlagSectionName); - } - - if (Ctor && Options.PCTable) { + if (Ctor && debug) { - auto SecStartEnd = CreateSecStartEnd(M, SanCovPCsSectionName, IntptrPtrTy); - FunctionCallee InitFunction = declareSanitizerInitFunction( - M, SanCovPCsInitName, {IntptrPtrTy, IntptrPtrTy}); - IRBuilder<> IRBCtor(Ctor->getEntryBlock().getTerminator()); - IRBCtor.CreateCall(InitFunction, {SecStartEnd.first, SecStartEnd.second}); + fprintf(stderr, "SANCOV: installed pcguard_init in ctor\n"); } - // We don't reference these arrays directly in any of our runtime functions, - // so we need to prevent them from being dead stripped. - if (TargetTriple.isOSBinFormatMachO()) appendToUsed(M, GlobalsToAppendToUsed); + appendToUsed(M, GlobalsToAppendToUsed); appendToCompilerUsed(M, GlobalsToAppendToCompilerUsed); if (!be_quiet) { - if (!instr) + if (!instr) { + WARNF("No instrumentation targets found."); - else { - char modeline[100]; + } else { + + char modeline[128]; snprintf(modeline, sizeof(modeline), "%s%s%s%s%s%s", getenv("AFL_HARDEN") ? "hardened" : "non-hardened", getenv("AFL_USE_ASAN") ? ", ASAN" : "", @@ -535,39 +512,36 @@ bool ModuleSanitizerCoverageAFL::instrumentModule( } // True if block has successors and it dominates all of them. -bool isFullDominator(const BasicBlock *BB, const DominatorTree *DT) { +static bool isFullDominator(const BasicBlock *BB, const DominatorTree *DT) { - if (succ_begin(BB) == succ_end(BB)) return false; + if (succ_empty(BB)) return false; - for (const BasicBlock *SUCC : make_range(succ_begin(BB), succ_end(BB))) { + return llvm::all_of(successors(BB), [&](const BasicBlock *SUCC) { - if (!DT->dominates(BB, SUCC)) return false; + return DT->dominates(BB, SUCC); - } - - return true; + }); } // True if block has predecessors and it postdominates all of them. -bool isFullPostDominator(const BasicBlock *BB, const PostDominatorTree *PDT) { +static bool isFullPostDominator(const BasicBlock *BB, + const PostDominatorTree *PDT) { - if (pred_begin(BB) == pred_end(BB)) return false; + if (pred_empty(BB)) return false; - for (const BasicBlock *PRED : make_range(pred_begin(BB), pred_end(BB))) { + return llvm::all_of(predecessors(BB), [&](const BasicBlock *PRED) { - if (!PDT->dominates(BB, PRED)) return false; + return PDT->dominates(BB, PRED); - } - - return true; + }); } -bool shouldInstrumentBlock(const Function &F, const BasicBlock *BB, - const DominatorTree *DT, - const PostDominatorTree *PDT, - const SanitizerCoverageOptions &Options) { +static bool shouldInstrumentBlock(const Function &F, const BasicBlock *BB, + const DominatorTree *DT, + const PostDominatorTree *PDT, + const SanitizerCoverageOptions &Options) { // Don't insert coverage for blocks containing nothing but unreachable: we // will never call __sanitizer_cov() for them, so counting them in @@ -582,10 +556,6 @@ bool shouldInstrumentBlock(const Function &F, const BasicBlock *BB, if (Options.NoPrune || &F.getEntryBlock() == BB) return true; - if (Options.CoverageType == SanitizerCoverageOptions::SCK_Function && - &F.getEntryBlock() != BB) - return false; - // Do not instrument full dominators, or full post-dominators with multiple // predecessors. return !isFullDominator(BB, DT) && @@ -597,38 +567,47 @@ bool shouldInstrumentBlock(const Function &F, const BasicBlock *BB, // A twist here is that we treat From->To as a backedge if // * To dominates From or // * To->UniqueSuccessor dominates From -bool IsBackEdge(BasicBlock *From, BasicBlock *To, const DominatorTree *DT) { +#if 0 +static bool IsBackEdge(BasicBlock *From, BasicBlock *To, + const DominatorTree *DT) { - if (DT->dominates(To, From)) return true; + if (DT->dominates(To, From)) + return true; if (auto Next = To->getUniqueSuccessor()) - if (DT->dominates(Next, From)) return true; + if (DT->dominates(Next, From)) + return true; return false; } +#endif + // Prunes uninteresting Cmp instrumentation: // * CMP instructions that feed into loop backedge branch. // // Note that Cmp pruning is controlled by the same flag as the // BB pruning. -bool IsInterestingCmp(ICmpInst *CMP, const DominatorTree *DT, - const SanitizerCoverageOptions &Options) { +#if 0 +static bool IsInterestingCmp(ICmpInst *CMP, const DominatorTree *DT, + const SanitizerCoverageOptions &Options) { if (!Options.NoPrune) if (CMP->hasOneUse()) if (auto BR = dyn_cast(CMP->user_back())) for (BasicBlock *B : BR->successors()) - if (IsBackEdge(BR->getParent(), B, DT)) return false; + if (IsBackEdge(BR->getParent(), B, DT)) + return false; return true; } +#endif + void ModuleSanitizerCoverageAFL::instrumentFunction( Function &F, DomTreeCallback DTCallback, PostDomTreeCallback PDTCallback) { if (F.empty()) return; if (!isInInstrumentList(&F, FMNAME)) return; - if (F.getName().find(".module_ctor") != std::string::npos) return; // Should not instrument sanitizer init functions. if (F.getName().startswith("__sanitizer_")) @@ -647,15 +626,13 @@ void ModuleSanitizerCoverageAFL::instrumentFunction( if (F.hasPersonalityFn() && isAsynchronousEHPersonality(classifyEHPersonality(F.getPersonalityFn()))) return; + if (F.hasFnAttribute(Attribute::NoSanitizeCoverage)) return; if (Options.CoverageType >= SanitizerCoverageOptions::SCK_Edge) SplitAllCriticalEdges( F, CriticalEdgeSplittingOptions().setIgnoreUnreachableDests()); - SmallVector IndirCalls; - SmallVector BlocksToInstrument; - SmallVector CmpTraceTargets; - SmallVector SwitchTraceTargets; - SmallVector DivTraceTargets; - SmallVector GepTraceTargets; + SmallVector BlocksToInstrument; + SmallVector CmpTraceTargets; + SmallVector SwitchTraceTargets; const DominatorTree *DT = DTCallback(F); const PostDominatorTree *PDT = PDTCallback(F); @@ -665,47 +642,28 @@ void ModuleSanitizerCoverageAFL::instrumentFunction( if (shouldInstrumentBlock(F, &BB, DT, PDT, Options)) BlocksToInstrument.push_back(&BB); - for (auto &Inst : BB) { - - if (Options.IndirectCalls) { - - CallBase *CB = dyn_cast(&Inst); - if (CB && !CB->getCalledFunction()) IndirCalls.push_back(&Inst); - - } + /* + for (auto &Inst : BB) { - if (Options.TraceCmp) { + if (Options.TraceCmp) { - if (ICmpInst *CMP = dyn_cast(&Inst)) - if (IsInterestingCmp(CMP, DT, Options)) - CmpTraceTargets.push_back(&Inst); - if (isa(&Inst)) SwitchTraceTargets.push_back(&Inst); + if (ICmpInst *CMP = dyn_cast(&Inst)) + if (IsInterestingCmp(CMP, DT, Options)) + CmpTraceTargets.push_back(&Inst); + if (isa(&Inst)) + SwitchTraceTargets.push_back(&Inst); - } + } - if (Options.TraceDiv) - if (BinaryOperator *BO = dyn_cast(&Inst)) - if (BO->getOpcode() == Instruction::SDiv || - BO->getOpcode() == Instruction::UDiv) - DivTraceTargets.push_back(BO); - if (Options.TraceGep) - if (GetElementPtrInst *GEP = dyn_cast(&Inst)) - GepTraceTargets.push_back(GEP); - if (Options.StackDepth) - if (isa(Inst) || - (isa(Inst) && !isa(Inst))) - IsLeafFunc = false; + } - } + */ } InjectCoverage(F, BlocksToInstrument, IsLeafFunc); - InjectCoverageForIndirectCalls(F, IndirCalls); - InjectTraceForCmp(F, CmpTraceTargets); - InjectTraceForSwitch(F, SwitchTraceTargets); - InjectTraceForDiv(F, DivTraceTargets); - InjectTraceForGep(F, GepTraceTargets); + // InjectTraceForCmp(F, CmpTraceTargets); + // InjectTraceForSwitch(F, SwitchTraceTargets); } @@ -717,33 +675,30 @@ GlobalVariable *ModuleSanitizerCoverageAFL::CreateFunctionLocalArrayInSection( *CurModule, ArrayTy, false, GlobalVariable::PrivateLinkage, Constant::getNullValue(ArrayTy), "__sancov_gen_"); -#if LLVM_VERSION_MAJOR >= 13 if (TargetTriple.supportsCOMDAT() && (TargetTriple.isOSBinFormatELF() || !F.isInterposable())) if (auto Comdat = getOrCreateFunctionComdat(F, TargetTriple)) Array->setComdat(Comdat); -#else - if (TargetTriple.supportsCOMDAT() && !F.isInterposable()) - if (auto Comdat = - GetOrCreateFunctionComdat(F, TargetTriple, CurModuleUniqueId)) - Array->setComdat(Comdat); -#endif - Array->setSection(getSectionName(Section)); -#if (LLVM_VERSION_MAJOR >= 11) || \ - (LLVM_VERSION_MAJOR == 10 && LLVM_VERSION_MINOR >= 1) - #if LLVM_VERSION_MAJOR >= 16 +#if LLVM_VERSION_MAJOR >= 16 Array->setAlignment(Align(DL->getTypeStoreSize(Ty).getFixedValue())); - #else - Array->setAlignment(Align(DL->getTypeStoreSize(Ty).getFixedSize())); - #endif #else - Array->setAlignment(Align(4)); // cheating + Array->setAlignment(Align(DL->getTypeStoreSize(Ty).getFixedSize())); #endif - GlobalsToAppendToUsed.push_back(Array); - GlobalsToAppendToCompilerUsed.push_back(Array); - MDNode *MD = MDNode::get(F.getContext(), ValueAsMetadata::get(&F)); - Array->addMetadata(LLVMContext::MD_associated, *MD); + + // sancov_pcs parallels the other metadata section(s). Optimizers (e.g. + // GlobalOpt/ConstantMerge) may not discard sancov_pcs and the other + // section(s) as a unit, so we conservatively retain all unconditionally in + // the compiler. + // + // With comdat (COFF/ELF), the linker can guarantee the associated sections + // will be retained or discarded as a unit, so llvm.compiler.used is + // sufficient. Otherwise, conservatively make all of them retained by the + // linker. + if (Array->hasComdat()) + GlobalsToAppendToCompilerUsed.push_back(Array); + else + GlobalsToAppendToUsed.push_back(Array); return Array; @@ -768,8 +723,12 @@ GlobalVariable *ModuleSanitizerCoverageAFL::CreatePCArray( PCs.push_back((Constant *)IRB.CreatePointerCast( BlockAddress::get(AllBlocks[i]), IntptrPtrTy)); +#if LLVM_VERSION_MAJOR >= 16 + PCs.push_back(Constant::getNullValue(IntptrPtrTy)); +#else PCs.push_back((Constant *)IRB.CreateIntToPtr( ConstantInt::get(IntptrTy, 0), IntptrPtrTy)); +#endif } @@ -792,21 +751,13 @@ void ModuleSanitizerCoverageAFL::CreateFunctionLocalArrays( FunctionGuardArray = CreateFunctionLocalArrayInSection( AllBlocks.size() + special, F, Int32Ty, SanCovGuardsSectionName); - if (Options.Inline8bitCounters) - Function8bitCounterArray = CreateFunctionLocalArrayInSection( - AllBlocks.size(), F, Int8Ty, SanCovCountersSectionName); - /* - if (Options.InlineBoolFlag) - FunctionBoolArray = CreateFunctionLocalArrayInSection( - AllBlocks.size(), F, Int1Ty, SanCovBoolFlagSectionName); - */ - if (Options.PCTable) FunctionPCsArray = CreatePCArray(F, AllBlocks); - } bool ModuleSanitizerCoverageAFL::InjectCoverage( Function &F, ArrayRef AllBlocks, bool IsLeafFunc) { + if (AllBlocks.empty()) return false; + uint32_t cnt_cov = 0, cnt_sel = 0, cnt_sel_inc = 0; static uint32_t first = 1; @@ -855,7 +806,6 @@ bool ModuleSanitizerCoverageAFL::InjectCoverage( } -#if (LLVM_VERSION_MAJOR >= 12) else if (t->getTypeID() == llvm::Type::FixedVectorTyID) { FixedVectorType *tt = dyn_cast(t); @@ -868,16 +818,14 @@ bool ModuleSanitizerCoverageAFL::InjectCoverage( } -#endif - } } } - /* Create PCGUARD array */ CreateFunctionLocalArrays(F, AllBlocks, first + cnt_cov + cnt_sel_inc); + if (first) { first = 0; } selects += cnt_sel; @@ -889,12 +837,6 @@ bool ModuleSanitizerCoverageAFL::InjectCoverage( CallInst *callInst = nullptr; - /* - std::string errMsg; - raw_string_ostream os(errMsg); - IN.print(os); - fprintf(stderr, "X: %s\n", os.str().c_str()); - */ if ((callInst = dyn_cast(&IN))) { Function *Callee = callInst->getCalledFunction(); @@ -1033,12 +975,6 @@ bool ModuleSanitizerCoverageAFL::InjectCoverage( } - /* - std::string errMsg; - raw_string_ostream os(errMsg); - x->print(os); - fprintf(stderr, "X: %s\n", os.str().c_str()); - */ result = IRB.CreateSelect(condition, x, y); } @@ -1063,13 +999,6 @@ bool ModuleSanitizerCoverageAFL::InjectCoverage( IRB.CreateLoad(PointerType::get(Int8Ty, 0), AFLMapPtr); ModuleSanitizerCoverageAFL::SetNoSanitizeMetadata(MapPtr); - /* - std::string errMsg; - raw_string_ostream os(errMsg); - result->print(os); - fprintf(stderr, "X: %s\n", os.str().c_str()); - */ - while (1) { /* Get CurLoc */ @@ -1159,29 +1088,6 @@ bool ModuleSanitizerCoverageAFL::InjectCoverage( } -// On every indirect call we call a run-time function -// __sanitizer_cov_indir_call* with two parameters: -// - callee address, -// - global cache array that contains CacheSize pointers (zero-initialized). -// The cache is used to speed up recording the caller-callee pairs. -// The address of the caller is passed implicitly via caller PC. -// CacheSize is encoded in the name of the run-time function. -void ModuleSanitizerCoverageAFL::InjectCoverageForIndirectCalls( - Function &F, ArrayRef IndirCalls) { - - if (IndirCalls.empty()) return; - for (auto I : IndirCalls) { - - IRBuilder<> IRB(I); - CallBase &CB = cast(*I); - Value *Callee = CB.getCalledOperand(); - if (isa(Callee)) continue; - IRB.CreateCall(SanCovTracePCIndir, IRB.CreatePointerCast(Callee, IntptrTy)); - - } - -} - // For every switch statement we insert a call: // __sanitizer_cov_trace_switch(CondValue, // {NumCases, ValueSizeInBits, Case0Value, Case1Value, Case2Value, ... }) @@ -1237,41 +1143,6 @@ void ModuleSanitizerCoverageAFL::InjectTraceForSwitch( } -void ModuleSanitizerCoverageAFL::InjectTraceForDiv( - Function &, ArrayRef DivTraceTargets) { - - for (auto BO : DivTraceTargets) { - - IRBuilder<> IRB(BO); - Value *A1 = BO->getOperand(1); - if (isa(A1)) continue; - if (!A1->getType()->isIntegerTy()) continue; - uint64_t TypeSize = DL->getTypeStoreSizeInBits(A1->getType()); - int CallbackIdx = TypeSize == 32 ? 0 : TypeSize == 64 ? 1 : -1; - if (CallbackIdx < 0) continue; - auto Ty = Type::getIntNTy(*C, TypeSize); - IRB.CreateCall(SanCovTraceDivFunction[CallbackIdx], - {IRB.CreateIntCast(A1, Ty, true)}); - - } - -} - -void ModuleSanitizerCoverageAFL::InjectTraceForGep( - Function &, ArrayRef GepTraceTargets) { - - for (auto GEP : GepTraceTargets) { - - IRBuilder<> IRB(GEP); - for (Use &Idx : GEP->indices()) - if (!isa(Idx) && Idx->getType()->isIntegerTy()) - IRB.CreateCall(SanCovTraceGepFunction, - {IRB.CreateIntCast(Idx, IntptrTy, true)}); - - } - -} - void ModuleSanitizerCoverageAFL::InjectTraceForCmp( Function &, ArrayRef CmpTraceTargets) { @@ -1321,27 +1192,44 @@ void ModuleSanitizerCoverageAFL::InjectCoverageAtBlock(Function &F, BasicBlock::iterator IP = BB.getFirstInsertionPt(); bool IsEntryBB = &BB == &F.getEntryBlock(); + DebugLoc EntryLoc; if (IsEntryBB) { - // Keep allocas and llvm.localescape calls in the entry block. Even + if (auto SP = F.getSubprogram()) + EntryLoc = DILocation::get(SP->getContext(), SP->getScopeLine(), 0, SP); + // Keep static allocas and llvm.localescape calls in the entry block. Even // if we aren't splitting the block, it's nice for allocas to be before // calls. IP = PrepareToSplitEntryBlock(BB, IP); +#if LLVM_VERSION_MAJOR < 15 - } - - IRBuilder<> IRB(&*IP); - - if (Options.TracePC) { + } else { - IRB.CreateCall(SanCovTracePC); - // ->setCannotMerge(); // gets the PC using GET_CALLER_PC. + EntryLoc = IP->getDebugLoc(); + if (!EntryLoc) + if (auto *SP = F.getSubprogram()) + EntryLoc = DILocation::get(SP->getContext(), 0, 0, SP); +#endif } +#if LLVM_VERSION_MAJOR >= 15 + InstrumentationIRBuilder IRB(&*IP); +#else + IRBuilder<> IRB(&*IP); +#endif + if (EntryLoc) IRB.SetCurrentDebugLocation(EntryLoc); if (Options.TracePCGuard) { + /* + auto GuardPtr = IRB.CreateIntToPtr( + IRB.CreateAdd(IRB.CreatePointerCast(FunctionGuardArray, IntptrTy), + ConstantInt::get(IntptrTy, Idx * 4)), + Int32PtrTy); + IRB.CreateCall(SanCovTracePCGuard, GuardPtr)->setCannotMerge(); + */ + /* Get CurLoc */ Value *GuardPtr = IRB.CreateIntToPtr( @@ -1399,57 +1287,6 @@ void ModuleSanitizerCoverageAFL::InjectCoverageAtBlock(Function &F, } - if (Options.Inline8bitCounters) { - - auto CounterPtr = IRB.CreateGEP( - Function8bitCounterArray->getValueType(), Function8bitCounterArray, - {ConstantInt::get(IntptrTy, 0), ConstantInt::get(IntptrTy, Idx)}); - auto Load = IRB.CreateLoad(Int8Ty, CounterPtr); - auto Inc = IRB.CreateAdd(Load, ConstantInt::get(Int8Ty, 1)); - auto Store = IRB.CreateStore(Inc, CounterPtr); - SetNoSanitizeMetadata(Load); - SetNoSanitizeMetadata(Store); - - } - - /* - if (Options.InlineBoolFlag) { - - auto FlagPtr = IRB.CreateGEP( - FunctionBoolArray->getValueType(), FunctionBoolArray, - {ConstantInt::get(IntptrTy, 0), ConstantInt::get(IntptrTy, Idx)}); - auto Load = IRB.CreateLoad(Int1Ty, FlagPtr); - auto ThenTerm = - SplitBlockAndInsertIfThen(IRB.CreateIsNull(Load), &*IP, false); - IRBuilder<> ThenIRB(ThenTerm); - auto Store = ThenIRB.CreateStore(ConstantInt::getTrue(Int1Ty), FlagPtr); - SetNoSanitizeMetadata(Load); - SetNoSanitizeMetadata(Store); - - } - - */ - - if (Options.StackDepth && IsEntryBB && !IsLeafFunc) { - - // Check stack depth. If it's the deepest so far, record it. - Module *M = F.getParent(); - Function *GetFrameAddr = Intrinsic::getDeclaration( - M, Intrinsic::frameaddress, - IRB.getInt8PtrTy(M->getDataLayout().getAllocaAddrSpace())); - auto FrameAddrPtr = - IRB.CreateCall(GetFrameAddr, {Constant::getNullValue(Int32Ty)}); - auto FrameAddrInt = IRB.CreatePtrToInt(FrameAddrPtr, IntptrTy); - auto LowestStack = IRB.CreateLoad(IntptrTy, SanCovLowestStack); - auto IsStackLower = IRB.CreateICmpULT(FrameAddrInt, LowestStack); - auto ThenTerm = SplitBlockAndInsertIfThen(IsStackLower, &*IP, false); - IRBuilder<> ThenIRB(ThenTerm); - auto Store = ThenIRB.CreateStore(FrameAddrInt, SanCovLowestStack); - SetNoSanitizeMetadata(LowestStack); - SetNoSanitizeMetadata(Store); - - } - } std::string ModuleSanitizerCoverageAFL::getSectionName( diff --git a/src/afl-cc.c b/src/afl-cc.c index 84fe70ec..9e56828c 100644 --- a/src/afl-cc.c +++ b/src/afl-cc.c @@ -997,7 +997,7 @@ static void edit_params(u32 argc, char **argv, char **envp) { if (instrument_mode == INSTRUMENT_PCGUARD) { -#if LLVM_MAJOR >= 11 +#if LLVM_MAJOR >= 13 #if defined __ANDROID__ || ANDROID cc_params[cc_par_cnt++] = "-fsanitize-coverage=trace-pc-guard"; instrument_mode = INSTRUMENT_LLVMNATIVE; @@ -1014,7 +1014,7 @@ static void edit_params(u32 argc, char **argv, char **envp) { } else { - #if LLVM_MAJOR >= 11 /* use new pass manager */ + #if LLVM_MAJOR >= 13 /* use new pass manager */ #if LLVM_MAJOR < 16 cc_params[cc_par_cnt++] = "-fexperimental-new-pass-manager"; #endif @@ -1035,12 +1035,12 @@ static void edit_params(u32 argc, char **argv, char **envp) { #if LLVM_MAJOR >= 4 if (!be_quiet) SAYF( - "Using unoptimized trace-pc-guard, upgrade to llvm 10.0.1+ for " + "Using unoptimized trace-pc-guard, upgrade to LLVM 13+ for " "enhanced version.\n"); cc_params[cc_par_cnt++] = "-fsanitize-coverage=trace-pc-guard"; instrument_mode = INSTRUMENT_LLVMNATIVE; #else - FATAL("pcguard instrumentation requires llvm 4.0.1+"); + FATAL("pcguard instrumentation requires LLVM 4.0.1+"); #endif #endif @@ -1053,7 +1053,7 @@ static void edit_params(u32 argc, char **argv, char **envp) { cc_params[cc_par_cnt++] = "-fsanitize-coverage=trace-pc-guard,bb,no-prune,pc-table"; #else - FATAL("pcguard instrumentation with pc-table requires llvm 6.0.1+"); + FATAL("pcguard instrumentation with pc-table requires LLVM 6.0.1+"); #endif } else { @@ -1063,7 +1063,7 @@ static void edit_params(u32 argc, char **argv, char **envp) { } #else - FATAL("pcguard instrumentation requires llvm 4.0.1+"); + FATAL("pcguard instrumentation requires LLVM 4.0.1+"); #endif } else { @@ -2031,7 +2031,7 @@ int main(int argc, char **argv, char **envp) { if (!compiler_mode) { // lto is not a default because outside of afl-cc RANLIB and AR have to - // be set to llvm versions so this would work + // be set to LLVM versions so this would work if (have_llvm) compiler_mode = LLVM; else if (have_gcc_plugin) @@ -2050,6 +2050,17 @@ int main(int argc, char **argv, char **envp) { } + /* if our PCGUARD implementation is not available then silently switch to + native LLVM PCGUARD */ + if (compiler_mode == CLANG && + (instrument_mode == INSTRUMENT_DEFAULT || + instrument_mode == INSTRUMENT_PCGUARD) && + find_object("SanitizerCoveragePCGUARD.so", argv[0]) == NULL) { + + instrument_mode = INSTRUMENT_LLVMNATIVE; + + } + if (compiler_mode == GCC) { if (clang_mode) { @@ -2096,12 +2107,12 @@ int main(int argc, char **argv, char **envp) { "-------------|\n" "MODES: NCC PERSIST DICT LAF " "CMPLOG SELECT\n" - " [LTO] llvm LTO: %s%s\n" + " [LTO] LLVM LTO: %s%s\n" " PCGUARD DEFAULT yes yes yes yes yes " " yes\n" " CLASSIC yes yes yes yes yes " " yes\n" - " [LLVM] llvm: %s%s\n" + " [LLVM] LLVM: %s%s\n" " PCGUARD %s yes yes module yes yes " "yes\n" " CLASSIC %s no yes module yes yes " @@ -2171,7 +2182,7 @@ int main(int argc, char **argv, char **envp) { " (instrumentation/README.lto.md)\n" " PERSIST: persistent mode support [code] (huge speed increase!)\n" " (instrumentation/README.persistent_mode.md)\n" - " DICT: dictionary in the target [yes=automatic or llvm module " + " DICT: dictionary in the target [yes=automatic or LLVM module " "pass]\n" " (instrumentation/README.lto.md + " "instrumentation/README.llvm.md)\n" -- cgit 1.4.1 From a4b927241651c645cc1a2f5b185681830e7000f9 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Wed, 7 Jun 2023 10:58:10 +0200 Subject: fix gcc cmplog crash --- docs/Changelog.md | 1 + 1 file changed, 1 insertion(+) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index facf2196..9ed930b3 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -18,6 +18,7 @@ - rewrote our PCGUARD pass to be compatible with LLVM 15+ shenanigans, requires LLVM 13+ now instead of 10.0.1+ - fallback to native LLVM PCGUARD if our PCGUARD is unavailable + - fixed a crash in GCC CMPLOG - afl-showmap: - added custom mutator post_process and send support - add `-I filelist` option, an alternative to `-i in_dir` -- cgit 1.4.1 From bf2727b76366ce4c9cdc723c3f3ccffae3cc3619 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Mon, 12 Jun 2023 08:28:47 +0200 Subject: v4.07c release --- README.md | 4 ++-- TODO.md | 2 ++ docs/Changelog.md | 2 +- include/config.h | 2 +- 4 files changed, 6 insertions(+), 4 deletions(-) (limited to 'docs/Changelog.md') diff --git a/README.md b/README.md index 0208a9fe..97fd3997 100644 --- a/README.md +++ b/README.md @@ -2,9 +2,9 @@ AFL++ logo -Release version: [4.06c](https://github.com/AFLplusplus/AFLplusplus/releases) +Release version: [4.07c](https://github.com/AFLplusplus/AFLplusplus/releases) -GitHub version: 4.07a +GitHub version: 4.07c Repository: [https://github.com/AFLplusplus/AFLplusplus](https://github.com/AFLplusplus/AFLplusplus) diff --git a/TODO.md b/TODO.md index 2b7e8fcf..26e12cee 100644 --- a/TODO.md +++ b/TODO.md @@ -2,6 +2,8 @@ ## Should + - afl-crash-analysis + - show in the UI when fuzzing is "done" - test cmplog for less than 16bit - support persistent and deferred fork server in afl-showmap? - better autodetection of shifting runtime timeout values diff --git a/docs/Changelog.md b/docs/Changelog.md index 9ed930b3..c52ddd56 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -3,7 +3,7 @@ This is the list of all noteworthy changes made in every public release of the tool. See README.md for the general instruction manual. -### Version ++4.07a (dev) +### Version ++4.07c (release) - afl-fuzz: - reverse reading the seeds only on restarts (increases performance) - new env `AFL_POST_PROCESS_KEEP_ORIGINAL` to keep the orignal diff --git a/include/config.h b/include/config.h index 194786f7..53be8549 100644 --- a/include/config.h +++ b/include/config.h @@ -26,7 +26,7 @@ /* Version string: */ // c = release, a = volatile github dev, e = experimental branch -#define VERSION "++4.07a" +#define VERSION "++4.07c" /****************************************************** * * -- cgit 1.4.1 From 61b6f4ed9e4dce15c39e4350278a95a41ea2522c Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Mon, 12 Jun 2023 09:16:15 +0200 Subject: 4.08a init --- README.md | 2 +- docs/Changelog.md | 7 +++++++ include/config.h | 2 +- 3 files changed, 9 insertions(+), 2 deletions(-) (limited to 'docs/Changelog.md') diff --git a/README.md b/README.md index 97fd3997..05c662c1 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ Release version: [4.07c](https://github.com/AFLplusplus/AFLplusplus/releases) -GitHub version: 4.07c +GitHub version: 4.08a Repository: [https://github.com/AFLplusplus/AFLplusplus](https://github.com/AFLplusplus/AFLplusplus) diff --git a/docs/Changelog.md b/docs/Changelog.md index c52ddd56..98d59527 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -3,6 +3,13 @@ This is the list of all noteworthy changes made in every public release of the tool. See README.md for the general instruction manual. +### Version ++4.08a (dev) + - new mutation engine: mutations that favor discovery more paths are prefered + until no new finds for 10 minutes then switching to mutations that favor + triggering crashes. Modes and switch time can be configured wie `-P`. + - display the state of the fuzzing run in the UI :-) + + ### Version ++4.07c (release) - afl-fuzz: - reverse reading the seeds only on restarts (increases performance) diff --git a/include/config.h b/include/config.h index 53be8549..d8153a2c 100644 --- a/include/config.h +++ b/include/config.h @@ -26,7 +26,7 @@ /* Version string: */ // c = release, a = volatile github dev, e = experimental branch -#define VERSION "++4.07c" +#define VERSION "++4.08a" /****************************************************** * * -- cgit 1.4.1 From 3ad8e9856cc48a6f69aa701dafd0623f91f31c5c Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Mon, 12 Jun 2023 09:23:57 +0200 Subject: update changelog --- docs/Changelog.md | 3 +++ 1 file changed, 3 insertions(+) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 98d59527..70f38d05 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -7,6 +7,9 @@ - new mutation engine: mutations that favor discovery more paths are prefered until no new finds for 10 minutes then switching to mutations that favor triggering crashes. Modes and switch time can be configured wie `-P`. + - new custom mutator that has the new afl++ engine (so it can easily + incorporated into new custom mutators), and also comes with a standalone + command line tool! See custom_mutators/aflpp/standalone/ - display the state of the fuzzing run in the UI :-) -- cgit 1.4.1 From 7b29f2cd244424c5385605d1302b68be44e432bc Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Tue, 20 Jun 2023 19:58:08 +0200 Subject: fix timeout for sessions restart and + usage --- docs/Changelog.md | 17 ++++++++++------- src/afl-fuzz-stats.c | 14 ++++++-------- src/afl-fuzz.c | 1 + 3 files changed, 17 insertions(+), 15 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 70f38d05..4454456e 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -4,13 +4,16 @@ release of the tool. See README.md for the general instruction manual. ### Version ++4.08a (dev) - - new mutation engine: mutations that favor discovery more paths are prefered - until no new finds for 10 minutes then switching to mutations that favor - triggering crashes. Modes and switch time can be configured wie `-P`. - - new custom mutator that has the new afl++ engine (so it can easily - incorporated into new custom mutators), and also comes with a standalone - command line tool! See custom_mutators/aflpp/standalone/ - - display the state of the fuzzing run in the UI :-) + - afl-fuzz: + - new mutation engine: mutations that favor discovery more paths are + prefered until no new finds for 10 minutes then switching to mutations + that favor triggering crashes. Modes and switch time can be configured + with `-P`. + - new custom mutator that has the new afl++ engine (so it can easily + incorporated into new custom mutators), and also comes with a standalone + command line tool! See custom_mutators/aflpp/standalone/ + - display the state of the fuzzing run in the UI :-) + - fix timeout setting if '+' is used or a session is restarted ### Version ++4.07c (release) diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c index 1499a7e4..389b82fc 100644 --- a/src/afl-fuzz-stats.c +++ b/src/afl-fuzz-stats.c @@ -2303,7 +2303,12 @@ void show_init_stats(afl_state_t *afl) { stringify_int(IB(0), min_us), stringify_int(IB(1), max_us), stringify_int(IB(2), avg_us)); - if (afl->timeout_given != 1) { + if (afl->timeout_given == 3) { + + ACTF("Applying timeout settings from resumed session (%u ms).", + afl->fsrv.exec_tmout); + + } else if (afl->timeout_given != 1) { /* Figure out the appropriate timeout. The basic idea is: 5x average or 1x max, rounded up to EXEC_TM_ROUND ms and capped at 1 second. @@ -2345,13 +2350,6 @@ void show_init_stats(afl_state_t *afl) { afl->timeout_given = 1; - } else if (afl->timeout_given == 3) { - - ACTF("Applying timeout settings from resumed session (%u ms).", - afl->fsrv.exec_tmout); - - } else { - ACTF("-t option specified. We'll use an exec timeout of %u ms.", afl->fsrv.exec_tmout); diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index d727fff5..9eabfae1 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -2362,6 +2362,7 @@ int main(int argc, char **argv_orig, char **envp) { max_ms = afl->queue_buf[entry]->exec_us; afl->fsrv.exec_tmout = max_ms; + afl->timeout_given = 1; } -- cgit 1.4.1 From 64b15a00f270f0ac9c00cf13e569481672227635 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Wed, 21 Jun 2023 12:20:10 +0200 Subject: fix afl-cmin* for old afl vanilla issue --- afl-cmin | 35 +++++++++++++++++++++++++++++------ afl-cmin.bash | 4 +++- docs/Changelog.md | 4 ++++ src/afl-showmap.c | 5 +++-- 4 files changed, 39 insertions(+), 9 deletions(-) (limited to 'docs/Changelog.md') diff --git a/afl-cmin b/afl-cmin index 3e37dbdb..d0bbed2b 100755 --- a/afl-cmin +++ b/afl-cmin @@ -318,7 +318,9 @@ BEGIN { if (!nyx_mode && target_bin && !exists_and_is_executable(target_bin)) { - "command -v "target_bin" 2>/dev/null" | getline tnew + cmd = "command -v "target_bin" 2>/dev/null" + cmd | getline tnew + close(cmd) if (!tnew || !exists_and_is_executable(tnew)) { print "[-] Error: binary '"target_bin"' not found or not executable." > "/dev/stderr" exit 1 @@ -330,6 +332,7 @@ BEGIN { echo "[!] Trying to obtain the map size of the target ..." get_map_size = "AFL_DUMP_MAP_SIZE=1 " target_bin get_map_size | getline mapsize + close(get_map_size) if (mapsize && mapsize > 65535 && mapsize < 100000000) { AFL_MAP_SIZE = "AFL_MAP_SIZE="mapsize" " print "[+] Setting "AFL_MAP_SIZE @@ -359,14 +362,18 @@ BEGIN { system("rm -rf "trace_dir" 2>/dev/null"); system("rm "out_dir"/id[:_]* 2>/dev/null") - "ls "out_dir"/* 2>/dev/null | wc -l" | getline noofentries + cmd = "ls "out_dir"/* 2>/dev/null | wc -l" + cmd | getline noofentries + close(cmd) if (0 == system( "test -d "out_dir" -a "noofentries" -gt 0" )) { print "[-] Error: directory '"out_dir"' exists and is not empty - delete it first." > "/dev/stderr" exit 1 } if (threads) { - "nproc" | getline nproc + cmd = "nproc" + cmd | getline nproc + close(cmd) if (threads == "all") { threads = nproc } else { @@ -386,12 +393,14 @@ BEGIN { if (stdin_file) { # truncate input file printf "" > stdin_file - close( stdin_file ) + close(stdin_file) } # First we look in PATH if (0 == system("command -v afl-showmap >/dev/null 2>&1")) { - "command -v afl-showmap 2>/dev/null" | getline showmap + cmd = "command -v afl-showmap 2>/dev/null" + cmd | getline showmap + close(cmd) } else { # then we look in the current directory if (0 == system("test -x ./afl-showmap")) { @@ -413,7 +422,9 @@ BEGIN { # yuck, gnu stat is option incompatible to bsd stat # we use a heuristic to differentiate between # GNU stat and other stats - "stat --version 2>/dev/null" | getline statversion + cmd = "stat --version 2>/dev/null" + cmd | getline statversion + close(cmd) if (statversion ~ /GNU coreutils/) { stat_format = "-c '%s %n'" # GNU } else { @@ -432,6 +443,7 @@ BEGIN { infilesSmallToBigFullMap[infilesSmallToBigFull[i]] = infilesSmallToBig[i] i++ } + close(cmdline) in_count = i first_file = infilesSmallToBigFull[0] @@ -468,6 +480,7 @@ BEGIN { while ((getline < runtest) > 0) { ++first_count } + close(runtest) if (first_count) { print "[+] OK, "first_count" tuples recorded." @@ -582,6 +595,15 @@ BEGIN { else { print " Processing file "cur"/"in_count } # create path for the trace file from afl-showmap tracefile_path = trace_dir"/"fn + # ensure the file size is not zero + cmd = "du -b "tracefile_path + "ls -l "tracefile_path + cmd | getline output + close(cmd) + split(output, result, "\t") + if (result[1] == 0) { + print "[!] WARNING: file "fn" is crashing the target, ignoring..." + } # gather all keys, and count them while ((getline line < tracefile_path) > 0) { key = line @@ -643,6 +665,7 @@ BEGIN { } } close(sortedKeys) + print "" print "[+] Found "tuple_count" unique tuples across "in_count" files." if (out_count == 1) { diff --git a/afl-cmin.bash b/afl-cmin.bash index dc6d5342..1d080491 100755 --- a/afl-cmin.bash +++ b/afl-cmin.bash @@ -479,7 +479,7 @@ else echo "[+] all $THREADS running tasks completed." rm -f ${TMPFILE}* - echo trace dir files: $(ls $TRACE_DIR/*|wc -l) + #echo trace dir files: $(ls $TRACE_DIR/*|wc -l) fi @@ -523,6 +523,8 @@ ls -rS "$IN_DIR" | while read -r fn; do sed "s#\$# $fn#" "$TRACE_DIR/$fn" >>"$TRACE_DIR/.candidate_list" + test -s "$TRACE_DIR/$fn" || echo Warning: $fn is ignored because of crashing the target + done echo diff --git a/docs/Changelog.md b/docs/Changelog.md index 4454456e..246c3cac 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -14,6 +14,10 @@ command line tool! See custom_mutators/aflpp/standalone/ - display the state of the fuzzing run in the UI :-) - fix timeout setting if '+' is used or a session is restarted + - afl-cmin/afl-cmin.bash: + - fixed a bug inherited from vanilla AFL where a coverage of + map[123] = 11 would be the same as map[1123] = 1 + - warn on crashing inputs ### Version ++4.07c (release) diff --git a/src/afl-showmap.c b/src/afl-showmap.c index 9c029035..13867fda 100644 --- a/src/afl-showmap.c +++ b/src/afl-showmap.c @@ -243,7 +243,8 @@ static void analyze_results(afl_forkserver_t *fsrv) { total += fsrv->trace_bits[i]; if (fsrv->trace_bits[i] > highest) highest = fsrv->trace_bits[i]; - if (!coverage_map[i]) { coverage_map[i] = 1; } + // if (!coverage_map[i]) { coverage_map[i] = 1; } + coverage_map[i] |= fsrv->trace_bits[i]; } @@ -328,7 +329,7 @@ static u32 write_results_to_file(afl_forkserver_t *fsrv, u8 *outfile) { if (cmin_mode) { - fprintf(f, "%u%u\n", fsrv->trace_bits[i], i); + fprintf(f, "%u%03u\n", i, fsrv->trace_bits[i]); } else { -- cgit 1.4.1 From 90f83c13d08f44fbf50036076a1772909c4d2c86 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Thu, 22 Jun 2023 09:24:00 +0200 Subject: remove dead code, code format --- .custom-format.py | 2 +- docs/Changelog.md | 3 ++ include/alloc-inl.h | 8 +++--- instrumentation/SanitizerCoveragePCGUARD.so.cc | 39 ++------------------------ qemu_mode/libqasan/dlmalloc.c | 2 +- src/afl-fuzz-init.c | 8 +++--- src/afl-fuzz.c | 3 +- utils/afl_network_proxy/afl-network-server.c | 2 +- 8 files changed, 19 insertions(+), 48 deletions(-) (limited to 'docs/Changelog.md') diff --git a/.custom-format.py b/.custom-format.py index 1d5c8839..3521c05d 100755 --- a/.custom-format.py +++ b/.custom-format.py @@ -24,7 +24,7 @@ import importlib.metadata # string_re = re.compile('(\\"(\\\\.|[^"\\\\])*\\")') # TODO: for future use -CURRENT_LLVM = os.getenv('LLVM_VERSION', 15) +CURRENT_LLVM = os.getenv('LLVM_VERSION', 16) CLANG_FORMAT_BIN = os.getenv("CLANG_FORMAT_BIN", "") diff --git a/docs/Changelog.md b/docs/Changelog.md index 246c3cac..c850c43e 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -18,6 +18,9 @@ - fixed a bug inherited from vanilla AFL where a coverage of map[123] = 11 would be the same as map[1123] = 1 - warn on crashing inputs + - afl-cc + - fixed an off-by-one instrumentation of iselect, hurting coverage a bit. + Thanks to @amykweon for spotting and fixing! ### Version ++4.07c (release) diff --git a/include/alloc-inl.h b/include/alloc-inl.h index 1e9a192b..cff808b2 100644 --- a/include/alloc-inl.h +++ b/include/alloc-inl.h @@ -322,7 +322,7 @@ static inline void DFL_ck_free(void *mem) { static inline void *DFL_ck_realloc(void *orig, u32 size) { void *ret; - u32 old_size = 0; + u32 old_size = 0; if (!size) { @@ -392,7 +392,7 @@ static inline void *DFL_ck_realloc(void *orig, u32 size) { static inline u8 *DFL_ck_strdup(u8 *str) { void *ret; - u32 size; + u32 size; if (!str) return NULL; @@ -438,14 +438,14 @@ struct TRK_obj { void *ptr; char *file, *func; - u32 line; + u32 line; }; #ifdef AFL_MAIN struct TRK_obj *TRK[ALLOC_BUCKETS]; -u32 TRK_cnt[ALLOC_BUCKETS]; +u32 TRK_cnt[ALLOC_BUCKETS]; #define alloc_report() TRK_report() diff --git a/instrumentation/SanitizerCoveragePCGUARD.so.cc b/instrumentation/SanitizerCoveragePCGUARD.so.cc index d87af775..57b5d128 100644 --- a/instrumentation/SanitizerCoveragePCGUARD.so.cc +++ b/instrumentation/SanitizerCoveragePCGUARD.so.cc @@ -225,49 +225,18 @@ llvmGetPassPluginInfo() { } -#if LLVM_VERSION_MAJOR == 1 +#if LLVM_VERSION_MAJOR >= 16 PreservedAnalyses ModuleSanitizerCoverageAFL::run(Module &M, ModuleAnalysisManager &MAM) { - ModuleSanitizerCoverageAFL ModuleSancov(Options); - auto &FAM = MAM.getResult(M).getManager(); - auto DTCallback = [&FAM](Function &F) -> const DominatorTree *{ - - return &FAM.getResult(F); - - }; - - auto PDTCallback = [&FAM](Function &F) -> const PostDominatorTree * { - - return &FAM.getResult(F); - - }; - - if (!ModuleSancov.instrumentModule(M, DTCallback, PDTCallback)) - return PreservedAnalyses::all(); - - PreservedAnalyses PA = PreservedAnalyses::none(); - // GlobalsAA is considered stateless and does not get invalidated unless - // explicitly invalidated; PreservedAnalyses::none() is not enough. Sanitizers - // make changes that require GlobalsAA to be invalidated. - PA.abandon(); - return PA; - -} - #else - #if LLVM_VERSION_MAJOR >= 16 -PreservedAnalyses ModuleSanitizerCoverageAFL::run(Module &M, - ModuleAnalysisManager &MAM) { - - #else PreservedAnalyses ModuleSanitizerCoverageAFL::run(Module &M, ModuleAnalysisManager &MAM) { - #endif +#endif ModuleSanitizerCoverageAFL ModuleSancov(Options); auto &FAM = MAM.getResult(M).getManager(); - auto DTCallback = [&FAM](Function &F) -> const DominatorTree * { + auto DTCallback = [&FAM](Function &F) -> const DominatorTree *{ return &FAM.getResult(F); @@ -285,8 +254,6 @@ PreservedAnalyses ModuleSanitizerCoverageAFL::run(Module &M, } -#endif - std::pair ModuleSanitizerCoverageAFL::CreateSecStartEnd( Module &M, const char *Section, Type *Ty) { diff --git a/qemu_mode/libqasan/dlmalloc.c b/qemu_mode/libqasan/dlmalloc.c index 5d0b65ce..b459eb7b 100644 --- a/qemu_mode/libqasan/dlmalloc.c +++ b/qemu_mode/libqasan/dlmalloc.c @@ -1762,7 +1762,7 @@ static FORCEINLINE void *win32direct_mmap(size_t size) { static FORCEINLINE int win32munmap(void *ptr, size_t size) { MEMORY_BASIC_INFORMATION minfo; - char *cptr = (char *)ptr; + char *cptr = (char *)ptr; while (size) { diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index 13802f40..24fd7077 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -1542,8 +1542,8 @@ double get_runnable_processes(void) { processes well. */ FILE *f = fopen("/proc/stat", "r"); - u8 tmp[1024]; - u32 val = 0; + u8 tmp[1024]; + u32 val = 0; if (!f) { return 0; } @@ -2226,7 +2226,7 @@ void check_crash_handling(void) { *BSD, so we can just let it slide for now. */ s32 fd = open("/proc/sys/kernel/core_pattern", O_RDONLY); - u8 fchar; + u8 fchar; if (fd < 0) { return; } @@ -2365,7 +2365,7 @@ void check_cpu_governor(afl_state_t *afl) { FATAL("Suboptimal CPU scaling governor"); #elif defined __APPLE__ - u64 min = 0, max = 0; + u64 min = 0, max = 0; size_t mlen = sizeof(min); if (afl->afl_env.afl_skip_cpufreq) return; diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 8cf786af..79b05da7 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -164,7 +164,8 @@ static void usage(u8 *argv0, int more_help) { "\n" "Mutator settings:\n" - " -a - target expects ascii text input (prefer text mutators)\n" + " -a - target expects ascii text input (prefer text " + "mutators)\n" " -g minlength - set min length of generated fuzz input (default: 1)\n" " -G maxlength - set max length of generated fuzz input (default: " "%lu)\n" diff --git a/utils/afl_network_proxy/afl-network-server.c b/utils/afl_network_proxy/afl-network-server.c index 7eb3d18e..95b0a551 100644 --- a/utils/afl_network_proxy/afl-network-server.c +++ b/utils/afl_network_proxy/afl-network-server.c @@ -173,7 +173,7 @@ static void set_up_environment(afl_forkserver_t *fsrv) { } out_file = alloc_printf("%s/.afl-input-temp-%u", use_dir, getpid()); - fsrv->out_file = out_file; + fsrv->out_file = out_file; } -- cgit 1.4.1 From edd352612da1f58832cbe84d909a8998ce4fa690 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Sat, 24 Jun 2023 09:30:09 +0200 Subject: code format --- docs/Changelog.md | 6 +++++- instrumentation/split-compares-pass.so.cc | 12 +++++++++--- 2 files changed, 14 insertions(+), 4 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index c850c43e..e6b90d3d 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -18,9 +18,13 @@ - fixed a bug inherited from vanilla AFL where a coverage of map[123] = 11 would be the same as map[1123] = 1 - warn on crashing inputs - - afl-cc + - afl-cc: - fixed an off-by-one instrumentation of iselect, hurting coverage a bit. Thanks to @amykweon for spotting and fixing! + - @toka fixed a bug in laf-intel signed integer comparison splitting, + thanks a lot!! + - frida_mode: + - support for long form instrumentation on x86_x64 and arm64 ### Version ++4.07c (release) diff --git a/instrumentation/split-compares-pass.so.cc b/instrumentation/split-compares-pass.so.cc index 3cfd1964..6eafb332 100644 --- a/instrumentation/split-compares-pass.so.cc +++ b/instrumentation/split-compares-pass.so.cc @@ -464,8 +464,11 @@ bool SplitComparesTransform::simplifyOrEqualsCompare(CmpInst *IcmpInst, ReplaceInstWithInst(IcmpInst->getParent()->getInstList(), ii, PN); #endif if (new_pred == CmpInst::ICMP_SGT || new_pred == CmpInst::ICMP_SLT) { + simplifySignedCompare(icmp_np, M, worklist); + } + worklist.push_back(icmp_eq); return true; @@ -751,11 +754,14 @@ bool SplitComparesTransform::splitCompare(CmpInst *cmp_inst, Module &M, icmp_inv_cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_UGT, op0_high, op1_high); - } - else { + } else { + // Never gonna appen if (!be_quiet) - fprintf(stderr, "Error: split-compare: Equals or signed not removed: %d\n", pred); + fprintf(stderr, + "Error: split-compare: Equals or signed not removed: %d\n", + pred); + } #if LLVM_MAJOR >= 16 -- cgit 1.4.1 From 3e1d7941077b1457f702988063d6b9fdd9b80740 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Thu, 29 Jun 2023 16:57:20 +0200 Subject: update mutation strategy --- docs/Changelog.md | 4 +++- include/afl-fuzz.h | 59 +++++++++++++++++++++++++------------------------ include/afl-mutations.h | 6 ++--- src/afl-fuzz-one.c | 56 +++++++++++++++++++++++++++------------------- src/afl-fuzz.c | 26 +++++++++++++++++----- 5 files changed, 90 insertions(+), 61 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index e6b90d3d..ad58e99e 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -8,7 +8,8 @@ - new mutation engine: mutations that favor discovery more paths are prefered until no new finds for 10 minutes then switching to mutations that favor triggering crashes. Modes and switch time can be configured - with `-P`. + with `-P`. Also input mode for the target can be defined with `-a` to + be `text` or `binary` (defaults to `generic`) - new custom mutator that has the new afl++ engine (so it can easily incorporated into new custom mutators), and also comes with a standalone command line tool! See custom_mutators/aflpp/standalone/ @@ -23,6 +24,7 @@ Thanks to @amykweon for spotting and fixing! - @toka fixed a bug in laf-intel signed integer comparison splitting, thanks a lot!! + - more LLVM compatability - frida_mode: - support for long form instrumentation on x86_x64 and arm64 diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index c6c45fbd..9da5cc03 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -505,36 +505,37 @@ typedef struct afl_state { is_main_node, /* if this is the main node */ is_secondary_node, /* if this is a secondary instance */ pizza_is_served, /* pizza mode */ - text_input, /* target wants text inputs */ - fuzz_mode, /* current mode: coverage/exploration or crash/exploitation */ + input_mode, /* target wants text inputs */ + fuzz_mode, /* coverage/exploration or crash/exploitation mode */ schedule, /* Power schedule (default: EXPLORE)*/ - havoc_max_mult, skip_deterministic, /* Skip deterministic stages? */ - use_splicing, /* Recombine input files? */ - non_instrumented_mode, /* Run in non-instrumented mode? */ - score_changed, /* Scoring for favorites changed? */ - resuming_fuzz, /* Resuming an older fuzzing job? */ - timeout_given, /* Specific timeout given? */ - not_on_tty, /* stdout is not a tty */ - term_too_small, /* terminal dimensions too small */ - no_forkserver, /* Disable forkserver? */ - crash_mode, /* Crash mode! Yeah! */ - in_place_resume, /* Attempt in-place resume? */ - autoresume, /* Resume if afl->out_dir exists? */ - auto_changed, /* Auto-generated tokens changed? */ - no_cpu_meter_red, /* Feng shui on the status screen */ - no_arith, /* Skip most arithmetic ops */ - shuffle_queue, /* Shuffle input queue? */ - bitmap_changed, /* Time to update bitmap? */ - unicorn_mode, /* Running in Unicorn mode? */ - use_wine, /* Use WINE with QEMU mode */ - skip_requested, /* Skip request, via SIGUSR1 */ - run_over10m, /* Run time over 10 minutes? */ - persistent_mode, /* Running in persistent mode? */ - deferred_mode, /* Deferred forkserver mode? */ - fixed_seed, /* do not reseed */ - fast_cal, /* Try to calibrate faster? */ - disable_trim, /* Never trim in fuzz_one */ - shmem_testcase_mode, /* If sharedmem testcases are used */ + havoc_max_mult, /* havoc multiplier */ + skip_deterministic, /* Skip deterministic stages? */ + use_splicing, /* Recombine input files? */ + non_instrumented_mode, /* Run in non-instrumented mode? */ + score_changed, /* Scoring for favorites changed? */ + resuming_fuzz, /* Resuming an older fuzzing job? */ + timeout_given, /* Specific timeout given? */ + not_on_tty, /* stdout is not a tty */ + term_too_small, /* terminal dimensions too small */ + no_forkserver, /* Disable forkserver? */ + crash_mode, /* Crash mode! Yeah! */ + in_place_resume, /* Attempt in-place resume? */ + autoresume, /* Resume if afl->out_dir exists? */ + auto_changed, /* Auto-generated tokens changed? */ + no_cpu_meter_red, /* Feng shui on the status screen */ + no_arith, /* Skip most arithmetic ops */ + shuffle_queue, /* Shuffle input queue? */ + bitmap_changed, /* Time to update bitmap? */ + unicorn_mode, /* Running in Unicorn mode? */ + use_wine, /* Use WINE with QEMU mode */ + skip_requested, /* Skip request, via SIGUSR1 */ + run_over10m, /* Run time over 10 minutes? */ + persistent_mode, /* Running in persistent mode? */ + deferred_mode, /* Deferred forkserver mode? */ + fixed_seed, /* do not reseed */ + fast_cal, /* Try to calibrate faster? */ + disable_trim, /* Never trim in fuzz_one */ + shmem_testcase_mode, /* If sharedmem testcases are used */ expand_havoc, /* perform expensive havoc after no find */ cycle_schedules, /* cycle power schedules? */ old_seed_selection, /* use vanilla afl seed selection */ diff --git a/include/afl-mutations.h b/include/afl-mutations.h index cc4840c8..0a9bbbf4 100644 --- a/include/afl-mutations.h +++ b/include/afl-mutations.h @@ -14,14 +14,14 @@ Parameters: afl_state_t *afl - the *afl state pointer u8 *buf - the input buffer to mutate which will be mutated into. - NOTE: must be able to contain a size of at least max_len (see below)! + NOTE: must be able to contain a size of at least max_len!! (see below) u32 len - the length of the input u32 steps - how many mutations to perform on the input bool is_text - is the target expecting text inputs bool is_exploration - mutate for exploration mode (instead of exploitation) splice_buf - a buffer from another corpus item to splice with. - If NULL then no splicing - splice_len - the length of the splice buffer. If 0 then no splicing + If NULL then no splicing is done (obviously). + splice_len - the length of the splice buffer. If 0 then no splicing. u32 max_len - the maximum size the mutated buffer may grow to */ diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index c6e49653..0d3c29f2 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -2085,47 +2085,57 @@ havoc_stage: u32 *mutation_array; u32 stack_max, rand_max; // stack_max_pow = afl->havoc_stack_pow2; - /* + switch (afl->input_mode) { - if (unlikely(afl->expand_havoc && afl->ready_for_splicing_count > 1)) { + case 1: { // TEXT - mutation_array = full_splice_array; - rand_max = MUT_SPLICE_ARRAY_SIZE; + if (likely(afl->fuzz_mode == 0)) { // is exploration? + mutation_array = (unsigned int *)&binary_array; + rand_max = MUT_BIN_ARRAY_SIZE; - } else { + } else { // exploitation mode - mutation_array = normal_splice_array; - rand_max = MUT_NORMAL_ARRAY_SIZE; + mutation_array = (unsigned int *)&mutation_strategy_exploitation_text; + rand_max = MUT_STRATEGY_ARRAY_SIZE; - } + } - */ + break; - if (unlikely(afl->text_input)) { // is text? + } - if (likely(afl->fuzz_mode == 0)) { // is exploration? + case 2: { // BINARY - mutation_array = (unsigned int *)&text_array; - rand_max = MUT_TXT_ARRAY_SIZE; + if (likely(afl->fuzz_mode == 0)) { // is exploration? + mutation_array = (unsigned int *)&mutation_strategy_exploration_binary; + rand_max = MUT_STRATEGY_ARRAY_SIZE; - } else { // is exploitation! + } else { // exploitation mode - mutation_array = (unsigned int *)&mutation_strategy_exploitation_text; - rand_max = MUT_STRATEGY_ARRAY_SIZE; + mutation_array = (unsigned int *)&mutation_strategy_exploitation_binary; + rand_max = MUT_STRATEGY_ARRAY_SIZE; + + } + + break; } - } else { // is binary! + default: { // DEFAULT/GENERIC - if (likely(afl->fuzz_mode == 0)) { // is exploration? + if (likely(afl->fuzz_mode == 0)) { // is exploration? + mutation_array = (unsigned int *)&binary_array; + rand_max = MUT_BIN_ARRAY_SIZE; - mutation_array = (unsigned int *)&binary_array; - rand_max = MUT_BIN_ARRAY_SIZE; + } else { // exploitation mode - } else { // is exploitation! + // this will need to be changed I guess + mutation_array = (unsigned int *)&mutation_strategy_exploration_text; + rand_max = MUT_STRATEGY_ARRAY_SIZE; + + } - mutation_array = (unsigned int *)&mutation_strategy_exploitation_binary; - rand_max = MUT_STRATEGY_ARRAY_SIZE; + break; } diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 79b05da7..ab7d6534 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -125,7 +125,8 @@ static void usage(u8 *argv0, int more_help) { "Required parameters:\n" " -i dir - input directory with test cases (or '-' to resume, " - "also see AFL_AUTORESUME)\n" + "also see \n" + " AFL_AUTORESUME)\n" " -o dir - output directory for fuzzer findings\n\n" "Execution control settings:\n" @@ -164,8 +165,8 @@ static void usage(u8 *argv0, int more_help) { "\n" "Mutator settings:\n" - " -a - target expects ascii text input (prefer text " - "mutators)\n" + " -a - target input format, \"text\" or \"binary\" (default: " + "generic)\n" " -g minlength - set min length of generated fuzz input (default: 1)\n" " -G maxlength - set max length of generated fuzz input (default: " "%lu)\n" @@ -506,13 +507,28 @@ int main(int argc, char **argv_orig, char **envp) { // still available: HjJkKqruvwz while ((opt = getopt(argc, argv, - "+aAb:B:c:CdDe:E:f:F:g:G:hi:I:l:L:m:M:nNo:Op:P:QRs:S:t:" + "+a:Ab:B:c:CdDe:E:f:F:g:G:hi:I:l:L:m:M:nNo:Op:P:QRs:S:t:" "T:UV:WXx:YZ")) > 0) { switch (opt) { case 'a': - afl->text_input = 1; + + if (!stricmp(optarg, "text") || !stricmp(optarg, "ascii") || + !stricmp(optarg, "txt") || !stricmp(optarg, "asc")) { + + afl->input_mode = 1; + + } else if (!stricmp(optarg, "bin") || !stricmp(optarg, "binary")) { + + afl->input_mode = 2; + + } else { + + FATAL("-a input mode needs to be \"text\" or \"binary\"."); + + } + break; case 'P': -- cgit 1.4.1 From 20dcb40c53811e36a3ace91a66a70cfddc4b3f1c Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Sat, 8 Jul 2023 13:31:06 +0200 Subject: fix cmin -T --- afl-cmin | 5 +++++ afl-cmin.bash | 7 +++++++ docs/Changelog.md | 1 + 3 files changed, 13 insertions(+) (limited to 'docs/Changelog.md') diff --git a/afl-cmin b/afl-cmin index d0bbed2b..f3ae4304 100755 --- a/afl-cmin +++ b/afl-cmin @@ -493,6 +493,11 @@ BEGIN { } } + if (in_count < threads) { + threads = in_count + print "[!] WARNING: less inputs than threads, reducing threads to "threads" and likely the overhead of threading makes things slower..." + } + # Let's roll! ############################# diff --git a/afl-cmin.bash b/afl-cmin.bash index 1d080491..b326bee8 100755 --- a/afl-cmin.bash +++ b/afl-cmin.bash @@ -339,6 +339,13 @@ fi echo "[*] Are you aware that afl-cmin is faster than this afl-cmin.bash script?" echo "[+] Found $IN_COUNT files for minimizing." +if [ -n "$THREADS" ]; then + if [ "$IN_COUNT" -lt "$THREADS" ]; then + THREADS=$IN_COUNT + echo "[!] WARNING: less inputs than threads, reducing threads to $THREADS and likely the overhead of threading makes things slower..." + fi +fi + FIRST_FILE=`ls "$IN_DIR" | head -1` # Make sure that we're not dealing with a directory. diff --git a/docs/Changelog.md b/docs/Changelog.md index ad58e99e..032bb774 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -19,6 +19,7 @@ - fixed a bug inherited from vanilla AFL where a coverage of map[123] = 11 would be the same as map[1123] = 1 - warn on crashing inputs + - adjust threads if less inputs than threads specified - afl-cc: - fixed an off-by-one instrumentation of iselect, hurting coverage a bit. Thanks to @amykweon for spotting and fixing! -- cgit 1.4.1 From 534b3eba143c0532e600eb6da08ac2195fa24570 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Thu, 13 Jul 2023 10:10:30 +0200 Subject: qemu_get_symbol_addr.sh --- Dockerfile | 2 +- docs/Changelog.md | 3 ++ frida_mode/test/bloaty/GNUmakefile | 2 +- frida_mode/test/cache/GNUmakefile | 2 +- frida_mode/test/cmov/GNUmakefile | 2 +- frida_mode/test/deferred/GNUmakefile | 2 +- frida_mode/test/dynamic/GNUmakefile | 2 +- frida_mode/test/entry_point/GNUmakefile | 2 +- frida_mode/test/freetype2/GNUmakefile | 2 +- frida_mode/test/jpeg/GNUmakefile | 2 +- frida_mode/test/libpcap/GNUmakefile | 2 +- frida_mode/test/libxml/GNUmakefile | 2 +- frida_mode/test/libxslt/GNUmakefile | 2 +- frida_mode/test/osx-lib/GNUmakefile | 2 +- frida_mode/test/perf/GNUmakefile | 2 +- frida_mode/test/persistent_ret/GNUmakefile | 2 +- frida_mode/test/png/persistent/GNUmakefile | 2 +- frida_mode/test/png/persistent/hook/GNUmakefile | 2 +- frida_mode/test/proj4/GNUmakefile | 2 +- frida_mode/test/re2/GNUmakefile | 2 +- frida_mode/test/sqlite/GNUmakefile | 2 +- frida_mode/test/unstable/GNUmakefile | 2 +- frida_mode/test/vorbis/GNUmakefile | 2 +- frida_mode/util/frida_get_symbol_addr.sh | 55 +++++++++++++++++++++++++ frida_mode/util/get_symbol_addr.sh | 32 -------------- qemu_mode/util/qemu_get_symbol_addr.sh | 53 ++++++++++++++++++++++++ 26 files changed, 133 insertions(+), 54 deletions(-) create mode 100755 frida_mode/util/frida_get_symbol_addr.sh delete mode 100755 frida_mode/util/get_symbol_addr.sh create mode 100755 qemu_mode/util/qemu_get_symbol_addr.sh (limited to 'docs/Changelog.md') diff --git a/Dockerfile b/Dockerfile index 1b5ffd28..e1616198 100644 --- a/Dockerfile +++ b/Dockerfile @@ -42,7 +42,7 @@ RUN apt-get update && \ python3 python3-dev python3-pip python-is-python3 \ libtool libtool-bin libglib2.0-dev \ apt-transport-https gnupg dialog \ - gnuplot-nox libpixman-1-dev \ + gnuplot-nox libpixman-1-dev bc \ gcc-${GCC_VERSION} g++-${GCC_VERSION} gcc-${GCC_VERSION}-plugin-dev gdb lcov \ clang-${LLVM_VERSION} clang-tools-${LLVM_VERSION} libc++1-${LLVM_VERSION} \ libc++-${LLVM_VERSION}-dev libc++abi1-${LLVM_VERSION} libc++abi-${LLVM_VERSION}-dev \ diff --git a/docs/Changelog.md b/docs/Changelog.md index 032bb774..d61ce8ec 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -28,6 +28,9 @@ - more LLVM compatability - frida_mode: - support for long form instrumentation on x86_x64 and arm64 + - renamed utils/get_symbol_addr.sh to utils/frida_get_symbol_addr.sh + - qemu_mode: + - added qemu_mode/utils/qemu_get_symbol_addr.sh ### Version ++4.07c (release) diff --git a/frida_mode/test/bloaty/GNUmakefile b/frida_mode/test/bloaty/GNUmakefile index 8e767fae..02a0a1e2 100644 --- a/frida_mode/test/bloaty/GNUmakefile +++ b/frida_mode/test/bloaty/GNUmakefile @@ -35,7 +35,7 @@ endif endif ADDR_BIN:=$(ROOT)frida_mode/build/addr -GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/get_symbol_addr.sh +GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/frida_get_symbol_addr.sh AFL_FRIDA_BASE_ADDR:=$(shell $(ADDR_BIN)) AFL_FRIDA_PERSISTENT_ADDR=$(shell $(GET_SYMBOL_ADDR) $(TEST_BIN) LLVMFuzzerTestOneInput $(AFL_FRIDA_BASE_ADDR)) diff --git a/frida_mode/test/cache/GNUmakefile b/frida_mode/test/cache/GNUmakefile index 12736a3f..98776193 100644 --- a/frida_mode/test/cache/GNUmakefile +++ b/frida_mode/test/cache/GNUmakefile @@ -11,7 +11,7 @@ QEMU_OUT:=$(BUILD_DIR)qemu-out FRIDA_OUT:=$(BUILD_DIR)frida-out ADDR_BIN:=$(ROOT)frida_mode/build/addr -GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/get_symbol_addr.sh +GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/frida_get_symbol_addr.sh AFLPP_FRIDA_DRIVER_HOOK_OBJ=$(ROOT)frida_mode/build/frida_hook.so diff --git a/frida_mode/test/cmov/GNUmakefile b/frida_mode/test/cmov/GNUmakefile index 96f1ae5b..0712e33b 100644 --- a/frida_mode/test/cmov/GNUmakefile +++ b/frida_mode/test/cmov/GNUmakefile @@ -11,7 +11,7 @@ QEMU_OUT:=$(BUILD_DIR)qemu-out FRIDA_OUT:=$(BUILD_DIR)frida-out ADDR_BIN:=$(ROOT)frida_mode/build/addr -GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/get_symbol_addr.sh +GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/frida_get_symbol_addr.sh AFLPP_FRIDA_DRIVER_HOOK_OBJ=$(ROOT)frida_mode/build/frida_hook.so diff --git a/frida_mode/test/deferred/GNUmakefile b/frida_mode/test/deferred/GNUmakefile index 22aeb2bf..e0b48797 100644 --- a/frida_mode/test/deferred/GNUmakefile +++ b/frida_mode/test/deferred/GNUmakefile @@ -10,7 +10,7 @@ TESTINSTSRC:=$(PWD)testinstr.c QEMU_OUT:=$(BUILD_DIR)qemu-out FRIDA_OUT:=$(BUILD_DIR)frida-out -GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/get_symbol_addr.sh +GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/frida_get_symbol_addr.sh ifndef ARCH diff --git a/frida_mode/test/dynamic/GNUmakefile b/frida_mode/test/dynamic/GNUmakefile index f43416f7..6c577dff 100644 --- a/frida_mode/test/dynamic/GNUmakefile +++ b/frida_mode/test/dynamic/GNUmakefile @@ -17,7 +17,7 @@ FRIDA_OUT:=$(BUILD_DIR)frida-out AFLPP_FRIDA_DRIVER_HOOK_OBJ=$(ROOT)frida_mode/build/frida_hook.so ADDR_BIN:=$(ROOT)frida_mode/build/addr -GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/get_symbol_addr.sh +GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/frida_get_symbol_addr.sh AFL_FRIDA_BASE_ADDR:=$(shell $(ADDR_BIN)) AFL_FRIDA_PERSISTENT_ADDR=$(shell $(GET_SYMBOL_ADDR) $(TESTINSTBIN) testinstr $(AFL_FRIDA_BASE_ADDR)) diff --git a/frida_mode/test/entry_point/GNUmakefile b/frida_mode/test/entry_point/GNUmakefile index 08c660f7..b8c0ecb5 100644 --- a/frida_mode/test/entry_point/GNUmakefile +++ b/frida_mode/test/entry_point/GNUmakefile @@ -10,7 +10,7 @@ TESTINSTSRC:=$(PWD)testinstr.c QEMU_OUT:=$(BUILD_DIR)qemu-out FRIDA_OUT:=$(BUILD_DIR)frida-out -GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/get_symbol_addr.sh +GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/frida_get_symbol_addr.sh ifndef ARCH diff --git a/frida_mode/test/freetype2/GNUmakefile b/frida_mode/test/freetype2/GNUmakefile index 8c35d5de..23318d52 100644 --- a/frida_mode/test/freetype2/GNUmakefile +++ b/frida_mode/test/freetype2/GNUmakefile @@ -64,7 +64,7 @@ endif endif ADDR_BIN:=$(ROOT)frida_mode/build/addr -GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/get_symbol_addr.sh +GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/frida_get_symbol_addr.sh AFL_FRIDA_BASE_ADDR:=$(shell $(ADDR_BIN)) AFL_FRIDA_PERSISTENT_ADDR=$(shell $(GET_SYMBOL_ADDR) $(TEST_BIN) LLVMFuzzerTestOneInput $(AFL_FRIDA_BASE_ADDR)) diff --git a/frida_mode/test/jpeg/GNUmakefile b/frida_mode/test/jpeg/GNUmakefile index a8242081..a4967039 100644 --- a/frida_mode/test/jpeg/GNUmakefile +++ b/frida_mode/test/jpeg/GNUmakefile @@ -47,7 +47,7 @@ endif endif ADDR_BIN:=$(ROOT)frida_mode/build/addr -GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/get_symbol_addr.sh +GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/frida_get_symbol_addr.sh AFL_FRIDA_BASE_ADDR:=$(shell $(ADDR_BIN)) AFL_FRIDA_PERSISTENT_ADDR=$(shell $(GET_SYMBOL_ADDR) $(TEST_BIN) LLVMFuzzerTestOneInput $(AFL_FRIDA_BASE_ADDR)) diff --git a/frida_mode/test/libpcap/GNUmakefile b/frida_mode/test/libpcap/GNUmakefile index 1bf9cd7f..745d7057 100644 --- a/frida_mode/test/libpcap/GNUmakefile +++ b/frida_mode/test/libpcap/GNUmakefile @@ -56,7 +56,7 @@ endif endif ADDR_BIN:=$(ROOT)frida_mode/build/addr -GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/get_symbol_addr.sh +GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/frida_get_symbol_addr.sh AFL_FRIDA_BASE_ADDR:=$(shell $(ADDR_BIN)) AFL_FRIDA_PERSISTENT_ADDR=$(shell $(GET_SYMBOL_ADDR) $(TEST_BIN) LLVMFuzzerTestOneInput $(AFL_FRIDA_BASE_ADDR)) diff --git a/frida_mode/test/libxml/GNUmakefile b/frida_mode/test/libxml/GNUmakefile index 6fc87585..f1f4a738 100644 --- a/frida_mode/test/libxml/GNUmakefile +++ b/frida_mode/test/libxml/GNUmakefile @@ -43,7 +43,7 @@ endif endif ADDR_BIN:=$(ROOT)frida_mode/build/addr -GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/get_symbol_addr.sh +GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/frida_get_symbol_addr.sh AFL_FRIDA_BASE_ADDR:=$(shell $(ADDR_BIN)) AFL_FRIDA_PERSISTENT_ADDR=$(shell $(GET_SYMBOL_ADDR) $(TEST_BIN) LLVMFuzzerTestOneInput $(AFL_FRIDA_BASE_ADDR)) diff --git a/frida_mode/test/libxslt/GNUmakefile b/frida_mode/test/libxslt/GNUmakefile index 655e652b..48bb0b40 100644 --- a/frida_mode/test/libxslt/GNUmakefile +++ b/frida_mode/test/libxslt/GNUmakefile @@ -42,7 +42,7 @@ endif endif ADDR_BIN:=$(ROOT)frida_mode/build/addr -GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/get_symbol_addr.sh +GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/frida_get_symbol_addr.sh AFL_FRIDA_BASE_ADDR:=$(shell $(ADDR_BIN)) AFL_FRIDA_PERSISTENT_ADDR=$(shell $(GET_SYMBOL_ADDR) $(TEST_BIN) LLVMFuzzerTestOneInput $(AFL_FRIDA_BASE_ADDR)) diff --git a/frida_mode/test/osx-lib/GNUmakefile b/frida_mode/test/osx-lib/GNUmakefile index 96dbb5ad..fdc9ec04 100644 --- a/frida_mode/test/osx-lib/GNUmakefile +++ b/frida_mode/test/osx-lib/GNUmakefile @@ -26,7 +26,7 @@ FRIDA_OUT:=$(BUILD_DIR)frida-out HARNESS_LDFLAGS:=-Wl,-no_pie LIB_CFLAGS:=-dynamiclib -GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/get_symbol_addr.sh +GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/frida_get_symbol_addr.sh AFL_FRIDA_MAIN_ADDR=$(shell $(GET_SYMBOL_ADDR) $(HARNESS_BIN) main 0x0) AFL_FRIDA_MAIN_ADDR2=$(shell $(GET_SYMBOL_ADDR) $(HARNESS2_BIN) main 0x0) AFL_FRIDA_FUZZ_ADDR=$(shell $(GET_SYMBOL_ADDR) $(HARNESS_BIN) LLVMFuzzerTestOneInput 0x0) diff --git a/frida_mode/test/perf/GNUmakefile b/frida_mode/test/perf/GNUmakefile index 2d7c0239..6b49c2ba 100644 --- a/frida_mode/test/perf/GNUmakefile +++ b/frida_mode/test/perf/GNUmakefile @@ -31,7 +31,7 @@ endif endif ADDR_BIN:=$(ROOT)frida_mode/build/addr -GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/get_symbol_addr.sh +GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/frida_get_symbol_addr.sh AFL_FRIDA_BASE_ADDR:=$(shell $(ADDR_BIN)) AFL_FRIDA_PERSISTENT_ADDR=$(shell $(GET_SYMBOL_ADDR) $(TEST_BIN) LLVMFuzzerTestOneInput $(AFL_FRIDA_BASE_ADDR)) diff --git a/frida_mode/test/persistent_ret/GNUmakefile b/frida_mode/test/persistent_ret/GNUmakefile index 71f6a124..73d710a1 100644 --- a/frida_mode/test/persistent_ret/GNUmakefile +++ b/frida_mode/test/persistent_ret/GNUmakefile @@ -23,7 +23,7 @@ endif endif ADDR_BIN:=$(ROOT)frida_mode/build/addr -GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/get_symbol_addr.sh +GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/frida_get_symbol_addr.sh ifeq "$(shell uname)" "Darwin" TEST_BIN_LDFLAGS:=-Wl,-no_pie diff --git a/frida_mode/test/png/persistent/GNUmakefile b/frida_mode/test/png/persistent/GNUmakefile index 94e2be38..3dab713e 100644 --- a/frida_mode/test/png/persistent/GNUmakefile +++ b/frida_mode/test/png/persistent/GNUmakefile @@ -22,7 +22,7 @@ endif endif ADDR_BIN:=$(ROOT)frida_mode/build/addr -GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/get_symbol_addr.sh +GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/frida_get_symbol_addr.sh AFL_FRIDA_BASE_ADDR:=$(shell $(ADDR_BIN)) AFL_FRIDA_PERSISTENT_ADDR=$(shell $(GET_SYMBOL_ADDR) $(TEST_BIN) LLVMFuzzerTestOneInput $(AFL_FRIDA_BASE_ADDR)) diff --git a/frida_mode/test/png/persistent/hook/GNUmakefile b/frida_mode/test/png/persistent/hook/GNUmakefile index b6a1ca1a..f3d06c87 100644 --- a/frida_mode/test/png/persistent/hook/GNUmakefile +++ b/frida_mode/test/png/persistent/hook/GNUmakefile @@ -33,7 +33,7 @@ endif endif ADDR_BIN:=$(ROOT)frida_mode/build/addr -GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/get_symbol_addr.sh +GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/frida_get_symbol_addr.sh AFL_FRIDA_BASE_ADDR:=$(shell $(ADDR_BIN)) AFL_FRIDA_PERSISTENT_ADDR=$(shell $(GET_SYMBOL_ADDR) $(TEST_BIN) LLVMFuzzerTestOneInput $(AFL_FRIDA_BASE_ADDR)) diff --git a/frida_mode/test/proj4/GNUmakefile b/frida_mode/test/proj4/GNUmakefile index debc8a88..17850fa8 100644 --- a/frida_mode/test/proj4/GNUmakefile +++ b/frida_mode/test/proj4/GNUmakefile @@ -47,7 +47,7 @@ endif endif ADDR_BIN:=$(ROOT)frida_mode/build/addr -GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/get_symbol_addr.sh +GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/frida_get_symbol_addr.sh AFL_FRIDA_BASE_ADDR:=$(shell $(ADDR_BIN)) AFL_FRIDA_PERSISTENT_ADDR=$(shell $(GET_SYMBOL_ADDR) $(TEST_BIN) LLVMFuzzerTestOneInput $(AFL_FRIDA_BASE_ADDR)) diff --git a/frida_mode/test/re2/GNUmakefile b/frida_mode/test/re2/GNUmakefile index 220e7616..0b79210b 100644 --- a/frida_mode/test/re2/GNUmakefile +++ b/frida_mode/test/re2/GNUmakefile @@ -48,7 +48,7 @@ endif endif ADDR_BIN:=$(ROOT)frida_mode/build/addr -GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/get_symbol_addr.sh +GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/frida_get_symbol_addr.sh AFL_FRIDA_BASE_ADDR:=$(shell $(ADDR_BIN)) AFL_FRIDA_PERSISTENT_ADDR=$(shell $(GET_SYMBOL_ADDR) $(TEST_BIN) LLVMFuzzerTestOneInput $(AFL_FRIDA_BASE_ADDR)) diff --git a/frida_mode/test/sqlite/GNUmakefile b/frida_mode/test/sqlite/GNUmakefile index df470af8..6d3c7496 100644 --- a/frida_mode/test/sqlite/GNUmakefile +++ b/frida_mode/test/sqlite/GNUmakefile @@ -43,7 +43,7 @@ endif endif ADDR_BIN:=$(ROOT)frida_mode/build/addr -GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/get_symbol_addr.sh +GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/frida_get_symbol_addr.sh AFL_FRIDA_BASE_ADDR:=$(shell $(ADDR_BIN)) AFL_FRIDA_PERSISTENT_ADDR=$(shell $(GET_SYMBOL_ADDR) $(TEST_BIN) LLVMFuzzerTestOneInput $(AFL_FRIDA_BASE_ADDR)) diff --git a/frida_mode/test/unstable/GNUmakefile b/frida_mode/test/unstable/GNUmakefile index 59b49449..3b7b6ddb 100644 --- a/frida_mode/test/unstable/GNUmakefile +++ b/frida_mode/test/unstable/GNUmakefile @@ -23,7 +23,7 @@ endif endif ADDR_BIN:=$(ROOT)frida_mode/build/addr -GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/get_symbol_addr.sh +GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/frida_get_symbol_addr.sh AFL_FRIDA_BASE_ADDR:=$(shell $(ADDR_BIN)) AFL_FRIDA_PERSISTENT_ADDR=$(shell $(GET_SYMBOL_ADDR) $(TEST_BIN) LLVMFuzzerTestOneInput $(AFL_FRIDA_BASE_ADDR)) diff --git a/frida_mode/test/vorbis/GNUmakefile b/frida_mode/test/vorbis/GNUmakefile index 4cb5d417..b10d059e 100644 --- a/frida_mode/test/vorbis/GNUmakefile +++ b/frida_mode/test/vorbis/GNUmakefile @@ -54,7 +54,7 @@ endif endif ADDR_BIN:=$(ROOT)frida_mode/build/addr -GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/get_symbol_addr.sh +GET_SYMBOL_ADDR:=$(ROOT)frida_mode/util/frida_get_symbol_addr.sh AFL_FRIDA_BASE_ADDR:=$(shell $(ADDR_BIN)) AFL_FRIDA_PERSISTENT_ADDR=$(shell $(GET_SYMBOL_ADDR) $(TEST_BIN) LLVMFuzzerTestOneInput $(AFL_FRIDA_BASE_ADDR)) diff --git a/frida_mode/util/frida_get_symbol_addr.sh b/frida_mode/util/frida_get_symbol_addr.sh new file mode 100755 index 00000000..fb0002b7 --- /dev/null +++ b/frida_mode/util/frida_get_symbol_addr.sh @@ -0,0 +1,55 @@ +#!/bin/bash +# Copyright 2023 AFLplusplus +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +target="$1" +symbol="$2" +base="$3" + +test -z "$target" -o -z "$symbol" -o '!' -x "$target" && { + echo "Syntax: $0 executable function [baseaddress]" + echo + echo Help script to calculate the function address of a binary QEMU will load it to. + echo function is e.g. LLVMFuzzerTestOneInput, afl_qemu_driver_stdin, etc. + echo "baseaddress is tried to be auto-detected, you can use 'AFL_QEMU_DEBUG_MAPS=1 afl-qemu-trace ./executable' to see the maps." + exit 1 +} + +file=$(file $target|sed 's/.*: //') + +arch=$(echo $file|awk -F, '{print$2}'|tr -d ' ') +bits=$(echo $file|sed 's/-bit .*//'|sed 's/.* //') +pie=$(echo $file|grep -wqi pie && echo pie) + +test $(uname -s) = "Darwin" && symbol=_"$symbol" +tmp_addr=$(nm "$target" | grep -i "T $symbol" | awk '{print$1}' | tr a-f A-F) + +test -z "$tmp_addr" && { echo Error: function $symbol not found 1>&2; exit 1; } +test -z "$pie" && { echo 0x$tmp_addr; exit 0; } + +test -z "$base" && { + test "$bits" = 32 -o "$bits" = 64 || { echo "Error: could not identify arch (bits=$bits)" 1>&2 ; exit 1; } + # is this true for arm/aarch64/i386 too? + base=0x555555554000 + #test "$arch" = Intel80386 && base=0x5555554000 + #test "$arch" = x86-64 && base=0x555555554000 + #test "$arch" = ARMaarch64 && base=0x5500000000 + # add more here, e.g. "$arch" = ARM +} + +test -z "$base" && { echo "Error: could not identify base address! bits=$bits arch=$arch" 1>&2 ; exit 1; } + +hex_base=$(echo "$base" | awk '{sub("^0x","");print $0}' | tr a-f A-F ) +echo $tmp_addr | echo "ibase=16;obase=10;$hex_base + $tmp_addr" | bc | tr A-F a-f | awk '{print "0x"$0}' +exit 0 diff --git a/frida_mode/util/get_symbol_addr.sh b/frida_mode/util/get_symbol_addr.sh deleted file mode 100755 index f5d8df91..00000000 --- a/frida_mode/util/get_symbol_addr.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# set -x -target="$1" -symbol="$2" -base="$3" - -test -z "$target" -o -z "$symbol" -o '!' -e "$target" && exit 0 - -test $(uname -s) = "Darwin" && symbol=_"$symbol" - -file "$target" | grep -q executable && { - nm "$target" | grep -i "T $symbol" | awk '{print"0x"$1}' - exit 0 -} - -hex_base=$(echo "$3" | awk '{sub("^0x","");print $0}' | tr a-f A-F ) -nm "$target" | grep -i "T $symbol" | awk '{print$1}' | tr a-f A-F | \ - xargs echo "ibase=16;obase=10;$hex_base + " | bc | tr A-F a-f | awk '{print "0x"$0}' -exit 0 diff --git a/qemu_mode/util/qemu_get_symbol_addr.sh b/qemu_mode/util/qemu_get_symbol_addr.sh new file mode 100755 index 00000000..e0a7ae80 --- /dev/null +++ b/qemu_mode/util/qemu_get_symbol_addr.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# Copyright 2023 AFLplusplus +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +target="$1" +symbol="$2" +base="$3" + +test -z "$target" -o -z "$symbol" -o '!' -x "$target" && { + echo "Syntax: $0 executable function [baseaddress]" + echo + echo Help script to calculate the function address of a binary QEMU will load it to. + echo function is e.g. LLVMFuzzerTestOneInput, afl_qemu_driver_stdin, etc. + echo "baseaddress is tried to be auto-detected, you can use 'AFL_QEMU_DEBUG_MAPS=1 afl-qemu-trace ./executable' to see the maps." + exit 1 +} + +file=$(file $target|sed 's/.*: //') + +arch=$(echo $file|awk -F, '{print$2}'|tr -d ' ') +bits=$(echo $file|sed 's/-bit .*//'|sed 's/.* //') +pie=$(echo $file|grep -wqi pie && echo pie) + +test $(uname -s) = "Darwin" && symbol=_"$symbol" +tmp_addr=$(nm "$target" | grep -i "T $symbol" | awk '{print$1}' | tr a-f A-F) + +test -z "$tmp_addr" && { echo Error: function $symbol not found 1>&2; exit 1; } +test -z "$pie" && { echo 0x$tmp_addr; exit 0; } + +test -z "$base" && { + test "$bits" = 32 -o "$bits" = 64 || { echo "Error: could not identify arch (bits=$bits)" 1>&2 ; exit 1; } + test "$arch" = Intel80386 && base=0x40000000 + test "$arch" = x86-64 && base=0x4000000000 + test "$arch" = ARMaarch64 && base=0x5500000000 + # add more here, e.g. "$arch" = ARM +} + +test -z "$base" && { echo "Error: could not identify base address! bits=$bits arch=$arch" 1>&2 ; exit 1; } + +hex_base=$(echo "$base" | awk '{sub("^0x","");print $0}' | tr a-f A-F ) +echo $tmp_addr | echo "ibase=16;obase=10;$hex_base + $tmp_addr" | bc | tr A-F a-f | awk '{print "0x"$0}' +exit 0 -- cgit 1.4.1 From 5f813bbb86e1c9e2480669c44501e9780043728c Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Fri, 21 Jul 2023 18:02:30 +0200 Subject: improve cmplog level 3 --- docs/Changelog.md | 1 + include/afl-fuzz.h | 3 +- include/config.h | 8 +-- src/afl-fuzz-redqueen.c | 171 +++++++++++++++++++++++++----------------------- src/afl-fuzz.c | 7 +- 5 files changed, 100 insertions(+), 90 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index d61ce8ec..75167172 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -15,6 +15,7 @@ command line tool! See custom_mutators/aflpp/standalone/ - display the state of the fuzzing run in the UI :-) - fix timeout setting if '+' is used or a session is restarted + - -c X option to enable base64 transformation solving - afl-cmin/afl-cmin.bash: - fixed a bug inherited from vanilla AFL where a coverage of map[123] = 11 would be the same as map[1123] = 1 diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 27668da0..e114b0fc 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -674,7 +674,8 @@ typedef struct afl_state { u32 cmplog_max_filesize; u32 cmplog_lvl; u32 colorize_success; - u8 cmplog_enable_arith, cmplog_enable_transform, cmplog_random_colorization; + u8 cmplog_enable_arith, cmplog_enable_transform, + cmplog_enable_xtreme_transform, cmplog_random_colorization; struct afl_pass_stat *pass_stats; struct cmp_map *orig_cmp_map; diff --git a/include/config.h b/include/config.h index 7c29a674..df545583 100644 --- a/include/config.h +++ b/include/config.h @@ -60,10 +60,6 @@ * */ -/* if TRANSFORM is enabled with '-l T', this additionally enables base64 - encoding/decoding */ -// #define CMPLOG_SOLVE_TRANSFORM_BASE64 - /* If a redqueen pass finds more than one solution, try to combine them? */ #define CMPLOG_COMBINE @@ -71,10 +67,10 @@ #define CMPLOG_CORPUS_PERCENT 5U /* Number of potential positions from which we decide if cmplog becomes - useless, default 8096 */ + useless, default 12288 */ #define CMPLOG_POSITIONS_MAX (12 * 1024) -/* Maximum allowed fails per CMP value. Default: 128 */ +/* Maximum allowed fails per CMP value. Default: 96 */ #define CMPLOG_FAIL_MAX 96 /* -------------------------------------*/ diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index 73e188e7..5a1f512d 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -571,7 +571,6 @@ static u8 its_fuzz(afl_state_t *afl, u8 *buf, u32 len, u8 *status) { } -// #ifdef CMPLOG_SOLVE_TRANSFORM static int strntoll(const char *str, size_t sz, char **end, int base, long long *out) { @@ -656,7 +655,6 @@ static int is_hex(const char *str) { } -#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64 // tests 4 bytes at location static int is_base64(const char *str) { @@ -769,10 +767,6 @@ static void to_base64(u8 *src, u8 *dst, u32 dst_len) { } -#endif - -// #endif - static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, u64 pattern, u64 repl, u64 o_pattern, u64 changed_val, u8 attr, u32 idx, u32 taint_len, @@ -797,42 +791,54 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, } - // fprintf(stderr, - // "Encode: %llx->%llx into %llx(<-%llx) at idx=%u " - // "taint_len=%u shape=%u attr=%u\n", - // o_pattern, pattern, repl, changed_val, idx, taint_len, - // hshape, attr); + /* + fprintf(stderr, + "Encode: %llx->%llx into %llx(<-%llx) at idx=%u " + "taint_len=%u shape=%u attr=%u\n", + o_pattern, pattern, repl, changed_val, idx, taint_len, + hshape, attr); + */ - // #ifdef CMPLOG_SOLVE_TRANSFORM // reverse atoi()/strnu?toll() is expensive, so we only to it in lvl 3 if (afl->cmplog_enable_transform && (lvl & LVL3)) { u8 *endptr; u8 use_num = 0, use_unum = 0; - unsigned long long unum; - long long num; + unsigned long long unum = 0; + long long num = 0; + + // if (afl->queue_cur->is_ascii) { + + // we first check if our input are ascii numbers that are transformed to + // an integer and used for comparison: - if (afl->queue_cur->is_ascii) { + endptr = buf_8; + if (strntoll(buf_8, len - idx, (char **)&endptr, 0, &num)) { - endptr = buf_8; - if (strntoll(buf_8, len - idx, (char **)&endptr, 0, &num)) { + if (!strntoull(buf_8, len - idx, (char **)&endptr, 0, &unum)) { - if (!strntoull(buf_8, len - idx, (char **)&endptr, 0, &unum)) - use_unum = 1; + use_unum = 1; - } else + } + + } else { - use_num = 1; + use_num = 1; } + //} + #ifdef _DEBUG if (idx == 0) - fprintf(stderr, "ASCII is=%u use_num=%u use_unum=%u idx=%u %llx==%llx\n", - afl->queue_cur->is_ascii, use_num, use_unum, idx, num, pattern); + fprintf(stderr, + "ASCII is=%u use_num=%u>%lld use_unum=%u>%llu idx=%u " + "pattern=0x%llx\n", + afl->queue_cur->is_ascii, use_num, num, use_unum, unum, idx, + pattern); #endif - // num is likely not pattern as atoi("AAA") will be zero... + // atoi("AAA") == 0 so !num means we have to investigate if (use_num && ((u64)num == pattern || !num)) { u8 tmp_buf[32]; @@ -961,10 +967,12 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, // test for arithmetic, eg. "if ((user_val - 0x1111) == 0x1234) ..." s64 diff = pattern - b_val; s64 o_diff = o_pattern - o_b_val; - /* fprintf(stderr, "DIFF1 idx=%03u shape=%02u %llx-%llx=%lx\n", idx, - hshape, o_pattern, o_b_val, o_diff); - fprintf(stderr, "DIFF1 %016llx %llx-%llx=%lx\n", repl, pattern, - b_val, diff); */ + /* + fprintf(stderr, "DIFF1 idx=%03u shape=%02u %llx-%llx=%lx\n", idx, + hshape, o_pattern, o_b_val, o_diff); + fprintf(stderr, "DIFF1 %016llx %llx-%llx=%lx\n", repl, pattern, + b_val, diff); + */ if (diff == o_diff && diff) { // this could be an arithmetic transformation @@ -1275,7 +1283,6 @@ static u8 cmp_extend_encoding(afl_state_t *afl, struct cmp_header *h, // 16 = modified float, 32 = modified integer (modified = wont match // in original buffer) - // #ifdef CMPLOG_SOLVE_ARITHMETIC if (!afl->cmplog_enable_arith || lvl < LVL3 || attr == IS_TRANSFORM) { return 0; @@ -2009,8 +2016,12 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 entry, its_len = MIN(its_len, taint_len); u32 saved_its_len = its_len; + if (its_len <= 1) { return 0; } + if (lvl & LVL3) { + if (memcmp(changed_val, repl, its_len) != 0) { return 0; } + u32 max_to = MIN(4U, idx); if (!(lvl & LVL1) && max_to) { from = 1; } to = max_to; @@ -2089,9 +2100,7 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 entry, if (afl->cmplog_enable_transform && (lvl & LVL3)) { u32 toupper = 0, tolower = 0, xor = 0, arith = 0, tohex = 0, fromhex = 0; -#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64 u32 tob64 = 0, fromb64 = 0; -#endif u32 from_0 = 0, from_x = 0, from_X = 0, from_slash = 0, from_up = 0; u32 to_0 = 0, to_x = 0, to_slash = 0, to_up = 0; u8 xor_val[32], arith_val[32], tmp[48]; @@ -2144,7 +2153,8 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 entry, } - if (i < 16 && is_hex(repl + (i << 1))) { + if (afl->cmplog_enable_xtreme_transform && i < 16 && + is_hex(repl + (i << 1))) { ++tohex; @@ -2163,7 +2173,7 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 entry, } - if ((i % 2)) { + if (afl->cmplog_enable_xtreme_transform && (i % 2)) { if (len > idx + i + 1 && is_hex(orig_buf + idx + i)) { @@ -2187,20 +2197,21 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 entry, } -#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64 - if (i % 3 == 2 && i < 24) { + if (afl->cmplog_enable_xtreme_transform) { - if (is_base64(repl + ((i / 3) << 2))) tob64 += 3; + if (i % 3 == 2 && i < 24) { - } + if (is_base64(repl + ((i / 3) << 2))) tob64 += 3; + + } - if (i % 4 == 3 && i < 24) { + if (i % 4 == 3 && i < 24) { - if (is_base64(orig_buf + idx + i - 3)) fromb64 += 4; + if (is_base64(orig_buf + idx + i - 3)) fromb64 += 4; - } + } -#endif + } if ((o_pattern[i] ^ orig_buf[idx + i]) == xor_val[i] && xor_val[i]) { @@ -2229,45 +2240,50 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 entry, } #ifdef _DEBUG + fprintf(stderr, "RTN %s %s %s %s\n", buf, pattern, orig_buf, o_pattern); fprintf(stderr, - "RTN idx=%u loop=%u xor=%u arith=%u tolower=%u toupper=%u " + "RTN idx=%u len=%u loop=%u xor=%u arith=%u tolower=%u toupper=%u " "tohex=%u fromhex=%u to_0=%u to_slash=%u to_x=%u " "from_0=%u from_slash=%u from_x=%u\n", - idx, i, xor, arith, tolower, toupper, tohex, fromhex, to_0, - to_slash, to_x, from_0, from_slash, from_x); - #ifdef CMPLOG_SOLVE_TRANSFORM_BASE64 - fprintf(stderr, "RTN idx=%u loop=%u tob64=%u from64=%u\n", tob64, - fromb64); - #endif + idx, its_len, i, xor, arith, tolower, toupper, tohex, fromhex, + to_0, to_slash, to_x, from_0, from_slash, from_x); + if (afl->cmplog_enable_xtreme_transform) { + + fprintf(stderr, "RTN idx=%u loop=%u tob64=%u from64=%u\n", idx, i, + tob64, fromb64); + + } + #endif -#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64 - // input is base64 and converted to binary? convert repl to base64! - if ((i % 4) == 3 && i < 24 && fromb64 > i) { + if (afl->cmplog_enable_xtreme_transform) { - to_base64(repl, tmp, i + 1); - memcpy(buf + idx, tmp, i + 1); - if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } - // fprintf(stderr, "RTN ATTEMPT fromb64 %u result %u\n", fromb64, - // *status); + // input is base64 and converted to binary? convert repl to base64! + if ((i % 4) == 3 && i < 24 && fromb64 > i) { - } + to_base64(repl, tmp, i + 1); + memcpy(buf + idx, tmp, i + 1); + if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } + // fprintf(stderr, "RTN ATTEMPT fromb64 %u result %u\n", fromb64, + // *status); + + } - // input is converted to base64? decode repl with base64! - if ((i % 3) == 2 && i < 24 && tob64 > i) { + // input is converted to base64? decode repl with base64! + if ((i % 3) == 2 && i < 24 && tob64 > i) { - u32 olen = from_base64(repl, tmp, i + 1); - memcpy(buf + idx, tmp, olen); - if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } - // fprintf(stderr, "RTN ATTEMPT tob64 %u idx=%u result %u\n", tob64, - // idx, *status); + u32 olen = from_base64(repl, tmp, i + 1); + memcpy(buf + idx, tmp, olen); + if (unlikely(its_fuzz(afl, buf, len, status))) { return 1; } + // fprintf(stderr, "RTN ATTEMPT tob64 %u idx=%u result %u\n", tob64, + // idx, *status); - } + } -#endif + } // input is converted to hex? convert repl to binary! - if (i < 16 && tohex > i) { + if (afl->cmplog_enable_xtreme_transform && i < 16 && tohex > i) { u32 off; if (to_slash + to_x + to_0 == 2) { @@ -2292,8 +2308,8 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 entry, } // input is hex and converted to binary? convert repl to hex! - if (i && (i % 2) && i < 16 && fromhex && - fromhex + from_slash + from_x + from_0 > i) { + if (afl->cmplog_enable_xtreme_transform && i && (i % 2) && i < 16 && + fromhex && fromhex + from_slash + from_x + from_0 > i) { u8 off = 0; if (from_slash && from_x) { @@ -2401,11 +2417,9 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 entry, if ((i >= 7 && (i >= xor&&i >= arith &&i >= tolower &&i >= toupper &&i > tohex &&i > - (fromhex + from_0 + from_x + from_slash + 1) -#ifdef CMPLOG_SOLVE_TRANSFORM_BASE64 - && i > tob64 + 3 && i > fromb64 + 4 -#endif - )) || + (fromhex + from_0 + from_x + from_slash + 1) && + (afl->cmplog_enable_xtreme_transform && i > tob64 + 3 && + i > fromb64 + 4))) || repl[i] != changed_val[i] || *status == 1) { break; @@ -2418,8 +2432,6 @@ static u8 rtn_extend_encoding(afl_state_t *afl, u8 entry, } - // #endif - return 0; } @@ -2818,12 +2830,7 @@ u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len) { } - } else if ((lvl & LVL1) - - // #ifdef CMPLOG_SOLVE_TRANSFORM - || ((lvl & LVL3) && afl->cmplog_enable_transform) - // #endif - ) { + } else if ((lvl & LVL1) || ((lvl & LVL3) && afl->cmplog_enable_transform)) { if (unlikely(rtn_fuzz(afl, k, orig_buf, buf, cbuf, len, lvl, taint))) { diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index d8a88f00..21a8915c 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -185,7 +185,8 @@ static void usage(u8 *argv0, int more_help) { " 1=small files, 2=larger files (default), 3=all " "files,\n" " A=arithmetic solving, T=transformational solving,\n" - " R=random colorization bytes.\n\n" + " X=extreme transform solving, R=random colorization " + "bytes.\n\n" "Fuzzing behavior settings:\n" " -Z - sequential queue selection instead of weighted " "random\n" @@ -1120,6 +1121,10 @@ int main(int argc, char **argv_orig, char **envp) { case 'T': afl->cmplog_enable_transform = 1; break; + case 'x': + case 'X': + afl->cmplog_enable_xtreme_transform = 1; + break; case 'r': case 'R': afl->cmplog_random_colorization = 1; -- cgit 1.4.1 From d9cadb2e7db1d1c208cd40299f0e5c4f6364aa2c Mon Sep 17 00:00:00 2001 From: marc Date: Wed, 9 Aug 2023 16:31:30 +0200 Subject: -c - support --- docs/Changelog.md | 5 ++++- src/afl-fuzz.c | 25 ++++++++++++++++++++----- 2 files changed, 24 insertions(+), 6 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 75167172..76f98547 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -15,7 +15,10 @@ command line tool! See custom_mutators/aflpp/standalone/ - display the state of the fuzzing run in the UI :-) - fix timeout setting if '+' is used or a session is restarted - - -c X option to enable base64 transformation solving + - -l X option to enable base64 transformation solving + - allow to disable CMPLOG with '-c -' (e.g. afl.rs enforces '-c 0' on + every instance which is counterproductive). + - afl-cmin/afl-cmin.bash: - fixed a bug inherited from vanilla AFL where a coverage of map[123] = 11 would be the same as map[1123] = 1 diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index e1f93f0d..cdb3f996 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -180,7 +180,8 @@ static void usage(u8 *argv0, int more_help) { "it.\n" " if using QEMU/FRIDA or the fuzzing target is " "compiled\n" - " for CmpLog then just use -c 0.\n" + " for CmpLog then use '-c 0'. To disable Cmplog use '-c " + "-'.\n" " -l cmplog_opts - CmpLog configuration values (e.g. \"2ATR\"):\n" " 1=small files, 2=larger files (default), 3=all " "files,\n" @@ -600,8 +601,23 @@ int main(int argc, char **argv_orig, char **envp) { case 'c': { - afl->shm.cmplog_mode = 1; - afl->cmplog_binary = ck_strdup(optarg); + if (strcmp(optarg, "-") == 0) { + + if (afl->shm.cmplog_mode) { + + ACTF("Disabling cmplog again because of '-c -'."); + afl->shm.cmplog_mode = 0; + afl->cmplog_binary = NULL; + + } + + } else { + + afl->shm.cmplog_mode = 1; + afl->cmplog_binary = ck_strdup(optarg); + + } + break; } @@ -1510,8 +1526,7 @@ int main(int argc, char **argv_orig, char **envp) { if (!afl->use_banner) { afl->use_banner = argv[optind]; } - if (afl->shm.cmplog_mode && - (!strcmp("-", afl->cmplog_binary) || !strcmp("0", afl->cmplog_binary))) { + if (afl->shm.cmplog_mode && strcmp("0", afl->cmplog_binary) == 0) { afl->cmplog_binary = strdup(argv[optind]); -- cgit 1.4.1 From 3721c65a0b7fdf2b24713f8009030c6c241e200b Mon Sep 17 00:00:00 2001 From: marc Date: Thu, 10 Aug 2023 10:41:55 +0200 Subject: v4.08c release --- README.md | 10 +++++----- docs/Changelog.md | 2 +- include/config.h | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'docs/Changelog.md') diff --git a/README.md b/README.md index b73cf2c1..951efe59 100644 --- a/README.md +++ b/README.md @@ -2,9 +2,9 @@ AFL++ logo -Release version: [4.07c](https://github.com/AFLplusplus/AFLplusplus/releases) +Release version: [4.08c](https://github.com/AFLplusplus/AFLplusplus/releases) -GitHub version: 4.08a +GitHub version: 4.08c Repository: [https://github.com/AFLplusplus/AFLplusplus](https://github.com/AFLplusplus/AFLplusplus) @@ -12,13 +12,13 @@ Repository: AFL++ is maintained by: * Marc "van Hauser" Heuse -* Andrea Fioraldi * Dominik Maier -* Heiko "hexcoder-" Eißfeldt +* Andrea Fioraldi +* Heiko "hexcoder-" Eissfeldt * frida_mode is maintained by @Worksbutnottested * Documentation: Jana Aydinbas -Originally developed by Michał "lcamtuf" Zalewski. +Originally developed by Michal "lcamtuf" Zalewski. AFL++ is a superior fork to Google's AFL - more speed, more and better mutations, more and better instrumentation, custom module support, etc. diff --git a/docs/Changelog.md b/docs/Changelog.md index 76f98547..2c747e42 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -3,7 +3,7 @@ This is the list of all noteworthy changes made in every public release of the tool. See README.md for the general instruction manual. -### Version ++4.08a (dev) +### Version ++4.08c (release) - afl-fuzz: - new mutation engine: mutations that favor discovery more paths are prefered until no new finds for 10 minutes then switching to mutations diff --git a/include/config.h b/include/config.h index df545583..5a81c4e2 100644 --- a/include/config.h +++ b/include/config.h @@ -5,9 +5,9 @@ Originally written by Michal Zalewski Now maintained by Marc Heuse , - Heiko Eißfeldt , - Andrea Fioraldi , Dominik Maier + Andrea Fioraldi , + Heiko Eissfeldt , Copyright 2016, 2017 Google Inc. All rights reserved. Copyright 2019-2023 AFLplusplus Project. All rights reserved. @@ -26,7 +26,7 @@ /* Version string: */ // c = release, a = volatile github dev, e = experimental branch -#define VERSION "++4.08a" +#define VERSION "++4.08c" /****************************************************** * * -- cgit 1.4.1 From 9607d1db06ebfc2fe1ba565a0ef0123ab3f3e76c Mon Sep 17 00:00:00 2001 From: marc Date: Thu, 10 Aug 2023 10:56:20 +0200 Subject: v4.09a init --- README.md | 2 +- docs/Changelog.md | 4 ++++ include/config.h | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) (limited to 'docs/Changelog.md') diff --git a/README.md b/README.md index 951efe59..322ebcf2 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ Release version: [4.08c](https://github.com/AFLplusplus/AFLplusplus/releases) -GitHub version: 4.08c +GitHub version: 4.09a Repository: [https://github.com/AFLplusplus/AFLplusplus](https://github.com/AFLplusplus/AFLplusplus) diff --git a/docs/Changelog.md b/docs/Changelog.md index 2c747e42..94b4c502 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -3,6 +3,10 @@ This is the list of all noteworthy changes made in every public release of the tool. See README.md for the general instruction manual. +### Version ++4.09a (dev) + - something cool :-) + + ### Version ++4.08c (release) - afl-fuzz: - new mutation engine: mutations that favor discovery more paths are diff --git a/include/config.h b/include/config.h index 5a81c4e2..6a75737f 100644 --- a/include/config.h +++ b/include/config.h @@ -26,7 +26,7 @@ /* Version string: */ // c = release, a = volatile github dev, e = experimental branch -#define VERSION "++4.08c" +#define VERSION "++4.09a" /****************************************************** * * -- cgit 1.4.1 From 8823f22a9c87123c1bfcc5bff10044de4c7a4a1f Mon Sep 17 00:00:00 2001 From: marc Date: Fri, 11 Aug 2023 11:22:18 +0200 Subject: add AFL_FINAL_SYNC --- docs/Changelog.md | 7 +++---- docs/env_variables.md | 13 +++++++++---- include/afl-fuzz.h | 3 ++- include/envs.h | 1 + src/afl-fuzz-state.c | 7 +++++++ src/afl-fuzz.c | 9 +++++++++ 6 files changed, 31 insertions(+), 9 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 94b4c502..8f2b2545 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -4,7 +4,9 @@ release of the tool. See README.md for the general instruction manual. ### Version ++4.09a (dev) - - something cool :-) + - afl-fuzz: + - added `AFL_FINAL_SYNC` which forces a final fuzzer sync (also for `-F`) + before terminating. ### Version ++4.08c (release) @@ -22,7 +24,6 @@ - -l X option to enable base64 transformation solving - allow to disable CMPLOG with '-c -' (e.g. afl.rs enforces '-c 0' on every instance which is counterproductive). - - afl-cmin/afl-cmin.bash: - fixed a bug inherited from vanilla AFL where a coverage of map[123] = 11 would be the same as map[1123] = 1 @@ -40,7 +41,6 @@ - qemu_mode: - added qemu_mode/utils/qemu_get_symbol_addr.sh - ### Version ++4.07c (release) - afl-fuzz: - reverse reading the seeds only on restarts (increases performance) @@ -69,7 +69,6 @@ - TritonDSE in custom_mutators/aflpp_tritondse - SymQEMU in custom_mutators/symqemu - ### Version ++4.06c (release) - afl-fuzz: - ensure temporary file descriptor is closed when not used diff --git a/docs/env_variables.md b/docs/env_variables.md index affc9e3c..2ce274d3 100644 --- a/docs/env_variables.md +++ b/docs/env_variables.md @@ -412,10 +412,15 @@ checks or alter some of the more exotic semantics of the tool: set `AFL_IGNORE_PROBLEMS`. If you additionally want to also ignore coverage from late loaded libraries, you can set `AFL_IGNORE_PROBLEMS_COVERAGE`. - - When running in the `-M` or `-S` mode, setting `AFL_IMPORT_FIRST` causes the - fuzzer to import test cases from other instances before doing anything else. - This makes the "own finds" counter in the UI more accurate. Beyond counter - aesthetics, not much else should change. + - When running with multiple afl-fuzz or with `-F`, setting `AFL_IMPORT_FIRST` + causes the fuzzer to import test cases from other instances before doing + anything else. This makes the "own finds" counter in the UI more accurate. + + - When running with multiple afl-fuzz or with `-F`, setting `AFL_FINAL_SYNC` + will cause the fuzzer to perform a final import of test cases when + terminating. This is beneficial for `-M` main fuzzers to ensure it has all + unique test cases and hence you only need to `afl-cmin` this single + queue. - Setting `AFL_INPUT_LEN_MIN` and `AFL_INPUT_LEN_MAX` are an alternative to the afl-fuzz -g/-G command line option to control the minimum/maximum diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index ef84a18c..1f89bbd8 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -402,7 +402,8 @@ typedef struct afl_env_vars { afl_exit_on_seed_issues, afl_try_affinity, afl_ignore_problems, afl_keep_timeouts, afl_no_crash_readme, afl_ignore_timeouts, afl_no_startup_calibration, afl_no_warn_instability, - afl_post_process_keep_original, afl_crashing_seeds_as_new_crash; + afl_post_process_keep_original, afl_crashing_seeds_as_new_crash, + afl_final_sync; u8 *afl_tmpdir, *afl_custom_mutator_library, *afl_python_module, *afl_path, *afl_hang_tmout, *afl_forksrv_init_tmout, *afl_preload, diff --git a/include/envs.h b/include/envs.h index 0007d5a8..3f5a9e1c 100644 --- a/include/envs.h +++ b/include/envs.h @@ -59,6 +59,7 @@ static char *afl_environment_variables[] = { "AFL_EXIT_ON_TIME", "AFL_EXIT_ON_SEED_ISSUES", "AFL_FAST_CAL", + "AFL_FINAL_SYNC", "AFL_FORCE_UI", "AFL_FRIDA_DEBUG_MAPS", "AFL_FRIDA_DRIVER_NO_HOOK", diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index 5a6b95cf..97e00415 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -269,6 +269,13 @@ void read_afl_environment(afl_state_t *afl, char **envp) { afl->afl_env.afl_import_first = get_afl_env(afl_environment_variables[i]) ? 1 : 0; + } else if (!strncmp(env, "AFL_FINAL_SYNC", + + afl_environment_variable_len)) { + + afl->afl_env.afl_final_sync = + get_afl_env(afl_environment_variables[i]) ? 1 : 0; + } else if (!strncmp(env, "AFL_CUSTOM_MUTATOR_ONLY", afl_environment_variable_len)) { diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index cdb3f996..c2ec4a1d 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -2899,6 +2899,15 @@ stop_fuzzing: time_spent_working / afl->fsrv.total_execs); #endif + if (afl->afl_env.afl_final_sync) { + + SAYF(cYEL "[!] " cRST "\nPerforming final sync, this make take some time ...\n"); + sync_fuzzers(afl); + write_bitmap(afl); + SAYF(cYEL "[!] " cRST "Done!\n\n"); + + } + if (afl->is_main_node) { u8 path[PATH_MAX]; -- cgit 1.4.1 From c2c8e780a5d10fe7500ec9add0aa5b2cb081fe71 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Wed, 16 Aug 2023 10:50:07 +0200 Subject: add benchmark --- benchmark/COMPARISON | 4 ++++ benchmark/benchmark.sh | 42 ++++++++++++++++++++++++++++++++++++++++++ docs/Changelog.md | 2 ++ 3 files changed, 48 insertions(+) create mode 100644 benchmark/COMPARISON create mode 100755 benchmark/benchmark.sh (limited to 'docs/Changelog.md') diff --git a/benchmark/COMPARISON b/benchmark/COMPARISON new file mode 100644 index 00000000..55ab94b4 --- /dev/null +++ b/benchmark/COMPARISON @@ -0,0 +1,4 @@ +CPU | Mz | exec/s | afl-*-config | +========================================|======|========|==============| +CPU 12th Gen Intel(R) Core(TM) i7-1270P | 4200 | 12750 | both | +AMD EPYC 7282 16-Core Processor | 3190 | 10060 | both | diff --git a/benchmark/benchmark.sh b/benchmark/benchmark.sh new file mode 100755 index 00000000..3318adce --- /dev/null +++ b/benchmark/benchmark.sh @@ -0,0 +1,42 @@ +#!/bin/sh +test -x ../afl-fuzz -a -x ../afl-cc -a -e ../SanitizerCoveragePCGUARD.so || { + echo Error: you need to compile AFL++ first, we need afl-fuzz, afl-clang-fast and SanitizerCoveragePCGUARD.so built. + exit 1 +} + +echo Preparing environment + +env | grep AFL_ | sed 's/=.*//' | while read e; do + unset $e +done + +AFL_PATH=`pwd`/.. +export PATH=$AFL_PATH:$PATH + +AFL_LLVM_INSTRUMENT=PCGUARD afl-cc -o test-instr ../test-instr.c > afl.log 2>&1 || { + echo Error: afl-cc is unable to compile + exit 1 +} + +{ +mkdir in +dd if=/dev/zero of=in/in.txt bs=10K count=1 +} > /dev/null 2>&1 + +echo Ready, starting benchmark - this will take approx 20-30 seconds ... + +AFL_DISABLE_TRIM=1 AFL_NO_UI=1 AFL_TRY_AFFINITY=1 AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES=1 AFL_BENCH_JUST_ONE=1 time afl-fuzz -i in -o out -s 123 -D ./test-instr >> afl.log 2>&1 + +echo Analysis: + +CPUID=$(grep 'try binding to' afl.log | tail -n 1 | sed 's/.*#//' | sed 's/\..*//') +grep 'model name' /proc/cpuinfo | head -n 1 | sed 's/.*:/ CPU:/' +test -n "$CPUID" && grep -E '^processor|^cpu MHz' /proc/cpuinfo | grep -A1 -w "$CPUID" | grep 'cpu MHz' | head -n 1 | sed 's/.*:/ Mhz:/' +test -z "$CPUID" && grep 'cpu MHz' /proc/cpuinfo | head -n 1 | sed 's/.*:/ Mhz:/' +grep execs_per_sec out/default/fuzzer_stats | sed 's/.*:/ execs\/s:/' + +echo +echo "Comparison: (note that values can change by 10-15% per run)" +cat COMPARISON + +rm -rf in out test-instr afl.log diff --git a/docs/Changelog.md b/docs/Changelog.md index 8f2b2545..b809559e 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -7,6 +7,8 @@ - afl-fuzz: - added `AFL_FINAL_SYNC` which forces a final fuzzer sync (also for `-F`) before terminating. + - added benchmark/benchmark.sh if you want to see how good your fuzzing + speed is in comparison to other setups. ### Version ++4.08c (release) -- cgit 1.4.1 From 213298fe5939df730d2341e2d2f75cd6daf77df7 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Mon, 21 Aug 2023 16:38:48 +0200 Subject: afl-whatsup add coverage output --- afl-whatsup | 12 ++++++++++++ docs/Changelog.md | 1 + 2 files changed, 13 insertions(+) (limited to 'docs/Changelog.md') diff --git a/afl-whatsup b/afl-whatsup index 6f29ab24..c1f0abaa 100755 --- a/afl-whatsup +++ b/afl-whatsup @@ -70,6 +70,8 @@ if [ -d queue ]; then fi +BC=`which bc 2>/dev/null` + RED=`tput setaf 9 1 1 2>/dev/null` GREEN=`tput setaf 2 1 1 2>/dev/null` BLUE=`tput setaf 4 1 1 2>/dev/null` @@ -91,6 +93,7 @@ TOTAL_CRASHES=0 TOTAL_HANGS=0 TOTAL_PFAV=0 TOTAL_PENDING=0 +TOTAL_COVERAGE= # Time since last find / crash / hang, formatted as string FMT_TIME="0 days 0 hours" @@ -147,6 +150,13 @@ for i in `find . -maxdepth 2 -iname fuzzer_stats | sort`; do RUN_UNIX=$run_time RUN_DAYS=$((RUN_UNIX / 60 / 60 / 24)) RUN_HRS=$(((RUN_UNIX / 60 / 60) % 24)) + COVERAGE=$(echo $bitmap_cvg|tr -d %) + if [ -n "$TOTAL_COVERAGE" -a -n "$B" ]; then + if [ "$(echo "$TOTAL_COVERAGE < $COVERAGE" | bc)" -eq 1 ]; then + TOTAL_COVERAGE=$COVERAGE + fi + fi + if [ -z "$TOTAL_COVERAGE" ]; then TOTAL_COVERAGE=$COVERAGE ; fi test -n "$cycles_wo_finds" && { test -z "$FIRST" && TOTAL_WCOP="${TOTAL_WCOP}/" @@ -227,6 +237,7 @@ for i in `find . -maxdepth 2 -iname fuzzer_stats | sort`; do echo " last_crash : $FMT_CRASH" echo " last_hang : $FMT_HANG" echo " cycles_wo_finds : $FMT_CWOP" + echo " coverage : $COVERAGE%" CPU_USAGE=$(ps aux | grep $fuzzer_pid | grep -v grep | awk '{print $3}') MEM_USAGE=$(ps aux | grep $fuzzer_pid | grep -v grep | awk '{print $4}') @@ -302,6 +313,7 @@ if [ "$ALIVE_CNT" -gt "1" ]; then echo " Pending per fuzzer : $((TOTAL_PFAV/ALIVE_CNT)) faves, $((TOTAL_PENDING/ALIVE_CNT)) total (on average)" fi +echo " Coverage reached : ${TOTAL_COVERAGE}%" echo " Crashes saved : $TOTAL_CRASHES" echo " Hangs saved : $TOTAL_HANGS" echo "Cycles without finds : $TOTAL_WCOP" diff --git a/docs/Changelog.md b/docs/Changelog.md index b809559e..dfb5afa1 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -7,6 +7,7 @@ - afl-fuzz: - added `AFL_FINAL_SYNC` which forces a final fuzzer sync (also for `-F`) before terminating. + - afl-whatsup: now also shows coverage reached - added benchmark/benchmark.sh if you want to see how good your fuzzing speed is in comparison to other setups. -- cgit 1.4.1 From f41d121f0767d929e34bbac7cb8d09ba4731730c Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Tue, 22 Aug 2023 10:03:03 +0200 Subject: afl-whatsup -m -n --- afl-whatsup | 89 +++++++++++++++++++++++++++++++++++++++---------------- docs/Changelog.md | 5 +++- 2 files changed, 67 insertions(+), 27 deletions(-) (limited to 'docs/Changelog.md') diff --git a/afl-whatsup b/afl-whatsup index c1f0abaa..47133a13 100755 --- a/afl-whatsup +++ b/afl-whatsup @@ -24,38 +24,60 @@ test "$1" = "-h" -o "$1" = "-hh" && { echo "Usage: $0 [-s] [-d] afl_output_directory" echo echo Options: - echo " -s - skip details and output summary results only" echo " -d - include dead fuzzer stats" + echo " -m - just show minimal stats" + echo " -n - no color output" + echo " -s - skip details and output summary results only" echo exit 1 } -unset SUMMARY_ONLY +unset MINIMAL_ONLY +unset NO_COLOR unset PROCESS_DEAD +unset SUMMARY_ONLY +unset RED +unset GREEN +unset YELLOW +unset BLUE +unset NC +unset RESET -while [ "$1" = "-s" -o "$1" = "-d" ]; do +if [ -z "$TERM" ]; then export TERM=vt220; fi - if [ "$1" = "-s" ]; then - SUMMARY_ONLY=1 - fi +while [ "$1" = "-d" -o "$1" = "-m" -o "$1" = "-n" -o "$1" = "-s" ]; do if [ "$1" = "-d" ]; then PROCESS_DEAD=1 fi + if [ "$1" = "-m" ]; then + MINIMAL_ONLY=1 + fi + + if [ "$1" = "-n" ]; then + NO_COLOR=1 + fi + + if [ "$1" = "-s" ]; then + SUMMARY_ONLY=1 + fi + shift done DIR="$1" -if [ "$DIR" = "" ]; then +if [ "$DIR" = "" -o "$DIR" = "-h" -o "$DIR" = "--help" ]; then - echo "Usage: $0 [-s] [-d] afl_output_directory" 1>&2 + echo "Usage: $0 [-d] [-m] [-n] [-s] afl_output_directory" 1>&2 echo 1>&2 echo Options: 1>&2 - echo " -s - skip details and output summary results only" 1>&2 echo " -d - include dead fuzzer stats" 1>&2 + echo " -m - just show minimal stats" 1>&2 + echo " -n - no color output" 1>&2 + echo " -s - skip details and output summary results only" 1>&2 echo 1>&2 exit 1 @@ -72,12 +94,14 @@ fi BC=`which bc 2>/dev/null` -RED=`tput setaf 9 1 1 2>/dev/null` -GREEN=`tput setaf 2 1 1 2>/dev/null` -BLUE=`tput setaf 4 1 1 2>/dev/null` -YELLOW=`tput setaf 11 1 1 2>/dev/null` -NC=`tput sgr0` -RESET="$NC" +if [ -z "$NO_COLOR" ]; then + RED=`tput setaf 9 1 1 2>/dev/null` + GREEN=`tput setaf 2 1 1 2>/dev/null` + BLUE=`tput setaf 4 1 1 2>/dev/null` + YELLOW=`tput setaf 11 1 1 2>/dev/null` + NC=`tput sgr0` + RESET="$NC" +fi CUR_TIME=`date +%s` @@ -235,14 +259,21 @@ for i in `find . -maxdepth 2 -iname fuzzer_stats | sort`; do echo " last_find : $FMT_FIND" echo " last_crash : $FMT_CRASH" - echo " last_hang : $FMT_HANG" - echo " cycles_wo_finds : $FMT_CWOP" + if [ -z "$MINIMAL_ONLY" ]; then + echo " last_hang : $FMT_HANG" + echo " cycles_wo_finds : $FMT_CWOP" + fi echo " coverage : $COVERAGE%" - CPU_USAGE=$(ps aux | grep $fuzzer_pid | grep -v grep | awk '{print $3}') - MEM_USAGE=$(ps aux | grep $fuzzer_pid | grep -v grep | awk '{print $4}') + if [ -z "$MINIMAL_ONLY" ]; then + + CPU_USAGE=$(ps aux | grep $fuzzer_pid | grep -v grep | awk '{print $3}') + MEM_USAGE=$(ps aux | grep $fuzzer_pid | grep -v grep | awk '{print $4}') + + echo " cpu usage $CPU_USAGE%, memory usage $MEM_USAGE%" + + fi - echo " cpu usage $CPU_USAGE%, memory usage $MEM_USAGE%" echo " cycles $((cycles_done + 1)), lifetime speed $EXEC_SEC execs/sec, items $cur_item/$corpus_count (${PATH_PERC}%)" if [ "$saved_crashes" = "0" ]; then @@ -302,21 +333,27 @@ if [ ! "$DEAD_CNT" = "0" ]; then fi echo " Total run time : $FMT_TIME" -echo " Total execs : $FMT_EXECS" -echo " Cumulative speed : $TOTAL_EPS execs/sec" +if [ -z "$MINIMAL_ONLY" ]; then + echo " Total execs : $FMT_EXECS" + echo " Cumulative speed : $TOTAL_EPS execs/sec" +fi if [ "$ALIVE_CNT" -gt "0" ]; then echo " Average speed : $((TOTAL_EPS / ALIVE_CNT)) execs/sec" fi -echo " Pending items : $TOTAL_PFAV faves, $TOTAL_PENDING total" +if [ -z "$MINIMAL_ONLY" ]; then + echo " Pending items : $TOTAL_PFAV faves, $TOTAL_PENDING total" +fi -if [ "$ALIVE_CNT" -gt "1" ]; then +if [ "$ALIVE_CNT" -gt "1" -o -n "$MINIMAL_ONLY" ]; then echo " Pending per fuzzer : $((TOTAL_PFAV/ALIVE_CNT)) faves, $((TOTAL_PENDING/ALIVE_CNT)) total (on average)" fi echo " Coverage reached : ${TOTAL_COVERAGE}%" echo " Crashes saved : $TOTAL_CRASHES" -echo " Hangs saved : $TOTAL_HANGS" -echo "Cycles without finds : $TOTAL_WCOP" +if [ -z "$MINIMAL_ONLY" ]; then + echo " Hangs saved : $TOTAL_HANGS" + echo "Cycles without finds : $TOTAL_WCOP" +fi echo " Time without finds : $TOTAL_LAST_FIND" echo diff --git a/docs/Changelog.md b/docs/Changelog.md index dfb5afa1..fa9099c0 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -7,7 +7,10 @@ - afl-fuzz: - added `AFL_FINAL_SYNC` which forces a final fuzzer sync (also for `-F`) before terminating. - - afl-whatsup: now also shows coverage reached + - afl-whatsup: + - now also shows coverage reached + - option -m shows only very relevant stats + - option -n will not use color in the output - added benchmark/benchmark.sh if you want to see how good your fuzzing speed is in comparison to other setups. -- cgit 1.4.1 From 19d0c6a4c5015368ff610418994b2dc8f3f66117 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Wed, 23 Aug 2023 17:35:24 +0200 Subject: afl-whatsup startup detection --- afl-whatsup | 63 ++++++++++++++++++++++++++++++++++++++++++++++++------- docs/Changelog.md | 1 + 2 files changed, 56 insertions(+), 8 deletions(-) (limited to 'docs/Changelog.md') diff --git a/afl-whatsup b/afl-whatsup index bbb73e47..ebd1ce61 100755 --- a/afl-whatsup +++ b/afl-whatsup @@ -100,6 +100,7 @@ if [ -d queue ]; then fi BC=`which bc 2>/dev/null` +FUSER=`which fuser 2>/dev/null` if [ -z "$NO_COLOR" ]; then RED=`tput setaf 9 1 1 2>/dev/null` @@ -116,6 +117,7 @@ TMP=`mktemp -t .afl-whatsup-XXXXXXXX` || TMP=`mktemp -p /data/local/tmp .afl-wha ALIVE_CNT=0 DEAD_CNT=0 +START_CNT=0 TOTAL_TIME=0 TOTAL_EXECS=0 @@ -177,6 +179,7 @@ for i in `find . -maxdepth 2 -iname fuzzer_stats | sort`; do sed 's/^command_line.*$/_skip:1/;s/[ ]*:[ ]*/="/;s/$/"/' "$i" >"$TMP" . "$TMP" DIR=$(dirname "$i") + DIRECTORY=$DIR DIR=${DIR##*/} RUN_UNIX=$run_time RUN_DAYS=$((RUN_UNIX / 60 / 60 / 24)) @@ -204,19 +207,59 @@ for i in `find . -maxdepth 2 -iname fuzzer_stats | sort`; do if ! kill -0 "$fuzzer_pid" 2>/dev/null; then - if [ "$SUMMARY_ONLY" = "" ]; then + IS_STARTING= + IS_DEAD= - echo " Instance is dead or running remotely, skipping." - echo + if [ -e "$i" ] && [ -e "$DIRECTORY/fuzzer_setup" ] && [ -n "$FUSER" ]; then + + if [ "$i" -ot "$DIRECTORY/fuzzer_setup" ]; then + + # fuzzer_setup is newer than fuzzer_stats, maybe the instance is starting? + TMP_PID=`fuser -v "$DIRECTORY" 2>&1 | grep afl-fuzz` + + if [ -n "$TMP_PID" ]; then + + if [ "$SUMMARY_ONLY" = "" ]; then + + echo " Instance is still starting up, skipping." + echo + + fi + + START_CNT=$((START_CNT + 1)) + last_find=0 + IS_STARTING=1 + + if [ "$PROCESS_DEAD" = "" ]; then + + continue + + fi + + fi + + fi fi - DEAD_CNT=$((DEAD_CNT + 1)) - last_find=0 + if [ -z "$IS_STARTING" ]; then + + if [ "$SUMMARY_ONLY" = "" ]; then + + echo " Instance is dead or running remotely, skipping." + echo + + fi - if [ "$PROCESS_DEAD" = "" ]; then + DEAD_CNT=$((DEAD_CNT + 1)) + IS_DEAD=1 + last_find=0 - continue + if [ "$PROCESS_DEAD" = "" ]; then + + continue + + fi fi @@ -326,7 +369,7 @@ if [ "$PROCESS_DEAD" = "" ]; then else TXT="included in stats" - ALIVE_CNT=$(($ALIVE_CNT - $DEAD_CNT)) + ALIVE_CNT=$(($ALIVE_CNT - $DEAD_CNT - $START_CNT)) fi @@ -338,6 +381,10 @@ fi echo " Fuzzers alive : $ALIVE_CNT" +if [ ! "$START_CNT" = "0" ]; then + echo " Starting up : $START_CNT ($TXT)" +fi + if [ ! "$DEAD_CNT" = "0" ]; then echo " Dead or remote : $DEAD_CNT ($TXT)" fi diff --git a/docs/Changelog.md b/docs/Changelog.md index fa9099c0..961b2940 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -8,6 +8,7 @@ - added `AFL_FINAL_SYNC` which forces a final fuzzer sync (also for `-F`) before terminating. - afl-whatsup: + - detect instanced that are starting up and show them as such as not dead - now also shows coverage reached - option -m shows only very relevant stats - option -n will not use color in the output -- cgit 1.4.1 From 549e5dd9269238ac43ff482d439f7f671946185c Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Wed, 23 Aug 2023 18:02:33 +0200 Subject: AFL_IGNORE_SEED_PROBLEMS --- docs/Changelog.md | 2 ++ docs/env_variables.md | 3 +++ include/afl-fuzz.h | 7 +++---- include/envs.h | 1 + src/afl-fuzz-init.c | 53 +++++++++++++++++++++++++++++++++++++++------------ src/afl-fuzz-state.c | 7 +++++++ src/afl-fuzz.c | 2 ++ 7 files changed, 59 insertions(+), 16 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 961b2940..87c01f21 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -7,6 +7,8 @@ - afl-fuzz: - added `AFL_FINAL_SYNC` which forces a final fuzzer sync (also for `-F`) before terminating. + - added AFL_IGNORE_SEED_PROBLEMS to skip over seeds that time out instead + of exiting with an error message - afl-whatsup: - detect instanced that are starting up and show them as such as not dead - now also shows coverage reached diff --git a/docs/env_variables.md b/docs/env_variables.md index 2ce274d3..3bb4e844 100644 --- a/docs/env_variables.md +++ b/docs/env_variables.md @@ -327,6 +327,9 @@ checks or alter some of the more exotic semantics of the tool: (`-i in`). This is an important feature to set when resuming a fuzzing session. + - `AFL_IGNORE_SEED_PROBLEMS` will skip over crashes and timeouts in the seeds + instead of exiting. + - Setting `AFL_CRASH_EXITCODE` sets the exit code AFL++ treats as crash. For example, if `AFL_CRASH_EXITCODE='-1'` is set, each input resulting in a `-1` return code (i.e. `exit(-1)` got called), will be treated as if a crash had diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 3dfd2b2c..d02e852e 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -1,4 +1,3 @@ - /* american fuzzy lop++ - fuzzer header ------------------------------------ @@ -175,10 +174,10 @@ struct queue_entry { stats_skipped, /* stats: how often skipped */ stats_finds, /* stats: # of saved finds */ stats_crashes, /* stats: # of saved crashes */ - stats_tmouts, /* stats: # of saved timeouts */ + stats_tmouts, /* stats: # of saved timeouts */ #endif fuzz_level, /* Number of fuzzing iterations */ - n_fuzz_entry; /* offset in n_fuzz */ + n_fuzz_entry; /* offset in n_fuzz */ u64 exec_us, /* Execution time (us) */ handicap, /* Number of queue cycles behind */ @@ -402,7 +401,7 @@ typedef struct afl_env_vars { afl_keep_timeouts, afl_no_crash_readme, afl_ignore_timeouts, afl_no_startup_calibration, afl_no_warn_instability, afl_post_process_keep_original, afl_crashing_seeds_as_new_crash, - afl_final_sync; + afl_final_sync, afl_ignore_seed_problems; u8 *afl_tmpdir, *afl_custom_mutator_library, *afl_python_module, *afl_path, *afl_hang_tmout, *afl_forksrv_init_tmout, *afl_preload, diff --git a/include/envs.h b/include/envs.h index 3f5a9e1c..4259d6dd 100644 --- a/include/envs.h +++ b/include/envs.h @@ -113,6 +113,7 @@ static char *afl_environment_variables[] = { "AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES", "AFL_IGNORE_PROBLEMS", "AFL_IGNORE_PROBLEMS_COVERAGE", + "AFL_IGNORE_SEED_PROBLEMS", "AFL_IGNORE_TIMEOUTS", "AFL_IGNORE_UNKNOWN_ENVS", "AFL_IMPORT_FIRST", diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index 4c09fab7..9fc0cc57 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -951,19 +951,47 @@ void perform_dry_run(afl_state_t *afl) { } else { - SAYF("\n" cLRD "[-] " cRST - "The program took more than %u ms to process one of the initial " - "test cases.\n" - " This is bad news; raising the limit with the -t option is " - "possible, but\n" - " will probably make the fuzzing process extremely slow.\n\n" + static int say_once = 0; + + if (!say_once) { + + SAYF( + "\n" cLRD "[-] " cRST + "The program took more than %u ms to process one of the " + "initial " + "test cases.\n" + " This is bad news; raising the limit with the -t option is " + "possible, but\n" + " will probably make the fuzzing process extremely slow.\n\n" + + " If this test case is just a fluke, the other option is to " + "just avoid it\n" + " altogether, and find one that is less of a CPU hog.\n", + afl->fsrv.exec_tmout); + + if (!afl->afl_env.afl_ignore_seed_problems) { + + FATAL("Test case '%s' results in a timeout", fn); + + } + + say_once = 1; + + } + + if (!q->was_fuzzed) { - " If this test case is just a fluke, the other option is to " - "just avoid it\n" - " altogether, and find one that is less of a CPU hog.\n", - afl->fsrv.exec_tmout); + q->was_fuzzed = 1; + --afl->pending_not_fuzzed; + --afl->active_items; - FATAL("Test case '%s' results in a timeout", fn); + } + + q->disabled = 1; + q->perf_score = 0; + + WARNF("Test case '%s' results in a timeout, skipping", fn); + break; } @@ -2270,7 +2298,8 @@ void check_crash_handling(void) { reporting the awful way. */ #if !TARGET_OS_IPHONE - if (system("launchctl list 2>/dev/null | grep -q '\\.ReportCrash\\>'")) return; + if (system("launchctl list 2>/dev/null | grep -q '\\.ReportCrash\\>'")) + return; SAYF( "\n" cLRD "[-] " cRST diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index 97e00415..db82536d 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -316,6 +316,13 @@ void read_afl_environment(afl_state_t *afl, char **envp) { afl->afl_env.afl_ignore_problems = get_afl_env(afl_environment_variables[i]) ? 1 : 0; + } else if (!strncmp(env, "AFL_IGNORE_SEED_PROBLEMS", + + afl_environment_variable_len)) { + + afl->afl_env.afl_ignore_seed_problems = + get_afl_env(afl_environment_variables[i]) ? 1 : 0; + } else if (!strncmp(env, "AFL_IGNORE_TIMEOUTS", afl_environment_variable_len)) { diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 43834172..08960ac6 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -275,6 +275,8 @@ static void usage(u8 *argv0, int more_help) { "AFL_IGNORE_PROBLEMS: do not abort fuzzing if an incorrect setup is detected\n" "AFL_IGNORE_PROBLEMS_COVERAGE: if set in addition to AFL_IGNORE_PROBLEMS - also\n" " ignore those libs for coverage\n" + "AFL_IGNORE_SEED_PROBLEMS: skip over crashes and timeouts in the seeds instead of\n" + " exiting\n" "AFL_IGNORE_TIMEOUTS: do not process or save any timeouts\n" "AFL_IGNORE_UNKNOWN_ENVS: don't warn on unknown env vars\n" "AFL_IMPORT_FIRST: sync and import test cases from other fuzzer instances first\n" -- cgit 1.4.1 From 1604351368c26a1dd91c43c054fb466b8093e86e Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Thu, 31 Aug 2023 14:45:03 +0200 Subject: changelog --- docs/Changelog.md | 2 ++ 1 file changed, 2 insertions(+) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 87c01f21..8d9a0aa8 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -14,6 +14,8 @@ - now also shows coverage reached - option -m shows only very relevant stats - option -n will not use color in the output + - frida_mode: + - fixes support for large map offsets - added benchmark/benchmark.sh if you want to see how good your fuzzing speed is in comparison to other setups. -- cgit 1.4.1 From 9307ef4b7caa96754d0449361d48b5a98ef73d8f Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Mon, 4 Sep 2023 09:11:47 +0200 Subject: fix string transform laf --- docs/Changelog.md | 2 ++ instrumentation/compare-transform-pass.so.cc | 4 ---- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 8d9a0aa8..bccc6748 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -14,6 +14,8 @@ - now also shows coverage reached - option -m shows only very relevant stats - option -n will not use color in the output + - instrumentation: + - fix for a few string compare transform functions for LAF - frida_mode: - fixes support for large map offsets - added benchmark/benchmark.sh if you want to see how good your fuzzing diff --git a/instrumentation/compare-transform-pass.so.cc b/instrumentation/compare-transform-pass.so.cc index b0bbd39a..5a5415d7 100644 --- a/instrumentation/compare-transform-pass.so.cc +++ b/instrumentation/compare-transform-pass.so.cc @@ -228,7 +228,6 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, isStrcmp &= (!FuncName.compare("strcmp") || !FuncName.compare("xmlStrcmp") || !FuncName.compare("xmlStrEqual") || - !FuncName.compare("g_strcmp0") || !FuncName.compare("curl_strequal") || !FuncName.compare("strcsequal") || !FuncName.compare("g_strcmp0")); @@ -239,7 +238,6 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, !FuncName.compare("memcmp_const_time") || !FuncName.compare("memcmpct")); isStrncmp &= (!FuncName.compare("strncmp") || - !FuncName.compare("xmlStrncmp") || !FuncName.compare("curl_strnequal") || !FuncName.compare("xmlStrncmp")); isStrcasecmp &= (!FuncName.compare("strcasecmp") || @@ -508,10 +506,8 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, isCaseInsensitive = true; if (!Callee->getName().compare("xmlStrEqual") || - !Callee->getName().compare("g_strcmp0") || !Callee->getName().compare("curl_strequal") || !Callee->getName().compare("strcsequal") || - !Callee->getName().compare("xmlStrncmp") || !Callee->getName().compare("curl_strnequal")) success_is_one = true; -- cgit 1.4.1 From 3b835b7c8b2f73be6d5972951d049cef66c24abd Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Tue, 12 Sep 2023 16:05:56 +0200 Subject: increase sync length --- docs/Changelog.md | 1 + src/afl-fuzz.c | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index bccc6748..dfbadea3 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -9,6 +9,7 @@ before terminating. - added AFL_IGNORE_SEED_PROBLEMS to skip over seeds that time out instead of exiting with an error message + - allow -S/-M naming up to 50 characters (from 24) - afl-whatsup: - detect instanced that are starting up and show them as such as not dead - now also shows coverage reached diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index a3d5e300..f659395e 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -1492,9 +1492,9 @@ int main(int argc, char **argv_orig, char **envp) { if (afl->sync_id) { - if (strlen(afl->sync_id) > 24) { + if (strlen(afl->sync_id) > 50) { - FATAL("sync_id max length is 24 characters"); + FATAL("sync_id max length is 50 characters"); } -- cgit 1.4.1 From 6b73dee7da4e4e8bd227a9cb156c7a683d124682 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Sat, 30 Sep 2023 12:42:40 +0200 Subject: add afl-addseeds tool --- GNUmakefile | 2 +- afl-addseeds | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ docs/Changelog.md | 1 + src/afl-fuzz.c | 6 ++++++ 4 files changed, 62 insertions(+), 1 deletion(-) create mode 100755 afl-addseeds (limited to 'docs/Changelog.md') diff --git a/GNUmakefile b/GNUmakefile index fadf20bd..5fd37147 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -32,7 +32,7 @@ VERSION = $(shell grep '^$(HASH)define VERSION ' ../config.h | cut -d '"' -f # PROGS intentionally omit afl-as, which gets installed elsewhere. PROGS = afl-fuzz afl-showmap afl-tmin afl-gotcpu afl-analyze -SH_PROGS = afl-plot afl-cmin afl-cmin.bash afl-whatsup afl-system-config afl-persistent-config afl-cc +SH_PROGS = afl-plot afl-cmin afl-cmin.bash afl-whatsup afl-addseeds afl-system-config afl-persistent-config afl-cc MANPAGES=$(foreach p, $(PROGS) $(SH_PROGS), $(p).8) afl-as.8 ASAN_OPTIONS=detect_leaks=0 diff --git a/afl-addseeds b/afl-addseeds new file mode 100755 index 00000000..bb2843a8 --- /dev/null +++ b/afl-addseeds @@ -0,0 +1,54 @@ +#!/bin/sh + +test -z "$1" -o "$1" = "-h" -o "$1" = "--help" && { + echo Syntax: afl-addseeds -o afl-out-dir [-i seed_file_or_dir] seed_file_or_seed_dir seed_file_or_seed_dir ... + echo + echo Options: + echo " -o afl-out-dir the output directory being used in the fuzzing campaign" + echo " -i seed_file_or_dir file or directory of files to add" + echo + echo Adds new seeds to an existing AFL++ fuzzing campaign. + exit 0 +} + +for TOOL in find ls; do + X=`which $TOOL` + test -n "$X" || { echo "Error: required tool '$TOOL' not found."; exit 1; } +done + +TEST=`printf %06d 123 2>/dev/null` +test "$TEST" = "000123" || { echo "Error: required tool 'printf' not found."; exit 1; } + +OUT= +NEXT= +for i in $*; do + test -n "$NEXT" && { OUT=$i ; NEXT=""; } + test "$i" = "-o" && { NEXT=1; } +done + +test -d "$OUT" || { echo Error: $OUT is not an existing directory; exit 1; } +OK=`ls $OUT/*/fuzzer_stats 2>/dev/null` +test -n "$OK" || { echo "Error: $OUT is not an 'afl-fuzz -o ... ' output directory" ; exit 1; } + +OUTDIR=$OUT/addseeds/queue +mkdir -p "$OUTDIR" 2>/dev/null +test -d "$OUTDIR" || { echo Error: could not create $OUTDIR ; exit 1 ; } + +echo Adding seeds ... +NEXTID=0 +for i in $*; do + test -z "$i" -o "$i" = "$OUT" -o "$i" = "-i" -o "$i" = "-o" || { + find "$i" -type f | while read FILE; do + N=xxx + while [ -n "$N" ]; do + ID=$NEXTID + N=`ls "$OUTDIR/id:$(printf %06d $ID),"* 2>/dev/null` + NEXTID=$(($NEXTID + 1)) + done + FN=`echo "$FILE" | sed 's/.*\///'` + cp -v "$FILE" "$OUTDIR/id:$(printf %06d $ID),time:0,execs:0,orig:$FN" + done + } +done + +echo Done. diff --git a/docs/Changelog.md b/docs/Changelog.md index dfbadea3..101d380b 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -19,6 +19,7 @@ - fix for a few string compare transform functions for LAF - frida_mode: - fixes support for large map offsets + - added new tool afl-addseeds that adds new seeds to a running campaign - added benchmark/benchmark.sh if you want to see how good your fuzzing speed is in comparison to other setups. diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 8574b9b3..0a6755d7 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -1346,6 +1346,12 @@ int main(int argc, char **argv_orig, char **envp) { } + if (strcmp(afl->sync_id, "addseeds") == 0) { + + FATAL("-M/-S name 'addseeds' is a reserved name, choose something else"); + + } + if (afl->is_main_node == 1 && afl->schedule != FAST && afl->schedule != EXPLORE) { -- cgit 1.4.1 From 2230f88887e3e8d1793fdb98f9cd12d3449ba791 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Fri, 3 Nov 2023 11:19:14 +0100 Subject: add --help/--version/... --- afl-persistent-config | 7 ++++++- afl-system-config | 6 +++++- docs/Changelog.md | 2 ++ instrumentation/afl-compiler-rt.o.c | 24 ++++++++++++++---------- src/afl-fuzz.c | 18 ++++++++++++++++-- 5 files changed, 43 insertions(+), 14 deletions(-) (limited to 'docs/Changelog.md') diff --git a/afl-persistent-config b/afl-persistent-config index 3abcb866..d78db286 100755 --- a/afl-persistent-config +++ b/afl-persistent-config @@ -2,7 +2,7 @@ # written by jhertz # -test "$1" = "-h" -o "$1" = "-hh" && { +test "$1" = "-h" -o "$1" = "-hh" -o "$1" = "--help" && { echo 'afl-persistent-config' echo echo $0 @@ -17,6 +17,11 @@ test "$1" = "-h" -o "$1" = "-hh" && { exit 0 } +if [ $# -ne 0 ]; then + echo "ERROR: Unknown option(s): $@" + exit 1 +fi + echo echo "WARNING: This scripts makes permanent configuration changes to the system to" echo " increase the performance for fuzzing. As a result, the system also" diff --git a/afl-system-config b/afl-system-config index e64857eb..c633e4e8 100755 --- a/afl-system-config +++ b/afl-system-config @@ -1,5 +1,5 @@ #!/bin/sh -test "$1" = "-h" -o "$1" = "-hh" && { +test "$1" = "-h" -o "$1" = "-hh" -o "$1" = "--help" && { echo 'afl-system-config by Marc Heuse ' echo echo $0 @@ -13,6 +13,10 @@ test "$1" = "-h" -o "$1" = "-hh" && { echo configuration options. exit 0 } +if [ $# -ne 0 ]; then + echo "ERROR: Unknown option(s): $@" + exit 1 +fi DONE= PLATFORM=`uname -s` diff --git a/docs/Changelog.md b/docs/Changelog.md index 101d380b..bf1a7d87 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -10,6 +10,8 @@ - added AFL_IGNORE_SEED_PROBLEMS to skip over seeds that time out instead of exiting with an error message - allow -S/-M naming up to 50 characters (from 24) + - added scale support to CMPLOG (-l S) + - added --version and --help command line parameters - afl-whatsup: - detect instanced that are starting up and show them as such as not dead - now also shows coverage reached diff --git a/instrumentation/afl-compiler-rt.o.c b/instrumentation/afl-compiler-rt.o.c index c3197c8a..d6b4d6b4 100644 --- a/instrumentation/afl-compiler-rt.o.c +++ b/instrumentation/afl-compiler-rt.o.c @@ -872,7 +872,7 @@ static void __afl_start_snapshots(void) { if (__afl_debug) { - fprintf(stderr, "target forkserver recv: %08x\n", was_killed); + fprintf(stderr, "DEBUG: target forkserver recv: %08x\n", was_killed); } @@ -1139,7 +1139,7 @@ static void __afl_start_forkserver(void) { if (__afl_debug) { - fprintf(stderr, "target forkserver recv: %08x\n", was_killed); + fprintf(stderr, "DEBUG: target forkserver recv: %08x\n", was_killed); } @@ -1472,6 +1472,7 @@ __attribute__((constructor(1))) void __afl_auto_second(void) { __afl_debug = 1; fprintf(stderr, "DEBUG: debug enabled\n"); + fprintf(stderr, "DEBUG: AFL++ afl-compiler-rt" VERSION "\n"); } @@ -1700,11 +1701,12 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop) { if (__afl_debug) { - fprintf(stderr, - "Running __sanitizer_cov_trace_pc_guard_init: %p-%p (%lu edges) " - "after_fs=%u\n", - start, stop, (unsigned long)(stop - start), - __afl_already_initialized_forkserver); + fprintf( + stderr, + "DEBUG: Running __sanitizer_cov_trace_pc_guard_init: %p-%p (%lu edges) " + "after_fs=%u\n", + start, stop, (unsigned long)(stop - start), + __afl_already_initialized_forkserver); } @@ -1802,7 +1804,8 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop) { u8 ignore_dso_after_fs = !!getenv("AFL_IGNORE_PROBLEMS_COVERAGE"); if (__afl_debug && ignore_dso_after_fs) { - fprintf(stderr, "Ignoring coverage from dynamically loaded code\n"); + fprintf(stderr, + "DEBUG: Ignoring coverage from dynamically loaded code\n"); } @@ -1872,7 +1875,8 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop) { if (__afl_debug) { fprintf(stderr, - "Done __sanitizer_cov_trace_pc_guard_init: __afl_final_loc = %u\n", + "DEBUG: Done __sanitizer_cov_trace_pc_guard_init: __afl_final_loc " + "= %u\n", __afl_final_loc); } @@ -1883,7 +1887,7 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop) { if (__afl_debug) { - fprintf(stderr, "Reinit shm necessary (+%u)\n", + fprintf(stderr, "DEBUG: Reinit shm necessary (+%u)\n", __afl_final_loc - __afl_map_size); } diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 2538f4a4..6a8a6aae 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -486,6 +486,22 @@ int main(int argc, char **argv_orig, char **envp) { struct timeval tv; struct timezone tz; + doc_path = access(DOC_PATH, F_OK) != 0 ? (u8 *)"docs" : (u8 *)DOC_PATH; + + if (argc > 1 && strcmp(argv_orig[1], "--version") == 0) { + + printf("afl-fuzz" VERSION "\n"); + exit(0); + + } + + if (argc > 1 && strcmp(argv_orig[1], "--help") == 0) { + + usage(argv_orig[0], 1); + exit(0); + + } + #if defined USE_COLOR && defined ALWAYS_COLORED if (getenv("AFL_NO_COLOR") || getenv("AFL_NO_COLOUR")) { @@ -515,8 +531,6 @@ int main(int argc, char **argv_orig, char **envp) { SAYF(cCYA "afl-fuzz" VERSION cRST " based on afl by Michal Zalewski and a large online community\n"); - doc_path = access(DOC_PATH, F_OK) != 0 ? (u8 *)"docs" : (u8 *)DOC_PATH; - gettimeofday(&tv, &tz); rand_set_seed(afl, tv.tv_sec ^ tv.tv_usec ^ getpid()); -- cgit 1.4.1 From ac0ad563480e3bf1fb69349e960b7957fffe75df Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Tue, 7 Nov 2023 10:31:09 +0100 Subject: fix dictionary and cmin --- afl-cmin | 32 +++++++++---------- afl-cmin.bash | 86 ++++++++++++++++++++++++++++++--------------------- docs/Changelog.md | 2 ++ src/afl-fuzz-extras.c | 5 +-- 4 files changed, 70 insertions(+), 55 deletions(-) (limited to 'docs/Changelog.md') diff --git a/afl-cmin b/afl-cmin index 23532b63..566f157d 100755 --- a/afl-cmin +++ b/afl-cmin @@ -259,22 +259,20 @@ BEGIN { # Do a sanity check to discourage the use of /tmp, since we can't really # handle this safely from an awk script. - #if (!ENVIRON["AFL_ALLOW_TMP"]) { - # dirlist[0] = in_dir - # dirlist[1] = target_bin - # dirlist[2] = out_dir - # dirlist[3] = stdin_file - # "pwd" | getline dirlist[4] # current directory - # for (dirind in dirlist) { - # dir = dirlist[dirind] - # - # if (dir ~ /^(\/var)?\/tmp/) { - # print "[-] Error: do not use this script in /tmp or /var/tmp." > "/dev/stderr" - # exit 1 - # } - # } - # delete dirlist - #} + if (!ENVIRON["AFL_ALLOW_TMP"]) { + dirlist[0] = in_dir + dirlist[1] = target_bin + dirlist[2] = out_dir + dirlist[3] = stdin_file + "pwd" | getline dirlist[4] # current directory + for (dirind in dirlist) { + dir = dirlist[dirind] + if (dir ~ /^(\/var)?\/tmp/) { + print "[-] Warning: do not use this script in /tmp or /var/tmp for security reasons." > "/dev/stderr" + } + } + delete dirlist + } if (threads && stdin_file) { print "[-] Error: -T and -f cannot be used together." > "/dev/stderr" @@ -430,7 +428,7 @@ BEGIN { } else { stat_format = "-f '%z %N'" # *BSD, MacOS } - cmdline = "(cd "in_dir" && find . \\( ! -name \".*\" -a -type d \\) -o -type f -exec stat "stat_format" \\{\\} + | sort -k1n -k2r)" + cmdline = "(cd "in_dir" && find . \\( ! -name \".*\" -a -type d \\) -o -type f -exec stat "stat_format" \\{\\} + | sort -k1n -k2r) | grep -Ev '^0'" #cmdline = "ls "in_dir" | (cd "in_dir" && xargs stat "stat_format" 2>/dev/null) | sort -k1n -k2r" #cmdline = "(cd "in_dir" && stat "stat_format" *) | sort -k1n -k2r" #cmdline = "(cd "in_dir" && ls | xargs stat "stat_format" ) | sort -k1n -k2r" diff --git a/afl-cmin.bash b/afl-cmin.bash index b326bee8..fda48fb4 100755 --- a/afl-cmin.bash +++ b/afl-cmin.bash @@ -167,29 +167,28 @@ fi # Do a sanity check to discourage the use of /tmp, since we can't really # handle this safely from a shell script. -#if [ "$AFL_ALLOW_TMP" = "" ]; then -# -# echo "$IN_DIR" | grep -qE '^(/var)?/tmp/' -# T1="$?" -# -# echo "$TARGET_BIN" | grep -qE '^(/var)?/tmp/' -# T2="$?" -# -# echo "$OUT_DIR" | grep -qE '^(/var)?/tmp/' -# T3="$?" -# -# echo "$STDIN_FILE" | grep -qE '^(/var)?/tmp/' -# T4="$?" -# -# echo "$PWD" | grep -qE '^(/var)?/tmp/' -# T5="$?" -# -# if [ "$T1" = "0" -o "$T2" = "0" -o "$T3" = "0" -o "$T4" = "0" -o "$T5" = "0" ]; then -# echo "[-] Error: do not use this script in /tmp or /var/tmp." 1>&2 -# exit 1 -# fi -# -#fi +if [ "$AFL_ALLOW_TMP" = "" ]; then + + echo "$IN_DIR" | grep -qE '^(/var)?/tmp/' + T1="$?" + + echo "$TARGET_BIN" | grep -qE '^(/var)?/tmp/' + T2="$?" + + echo "$OUT_DIR" | grep -qE '^(/var)?/tmp/' + T3="$?" + + echo "$STDIN_FILE" | grep -qE '^(/var)?/tmp/' + T4="$?" + + echo "$PWD" | grep -qE '^(/var)?/tmp/' + T5="$?" + + if [ "$T1" = "0" -o "$T2" = "0" -o "$T3" = "0" -o "$T4" = "0" -o "$T5" = "0" ]; then + echo "[-] Warning: do not use this script in /tmp or /var/tmp for security reasons." 1>&2 + fi + +fi # If @@ is specified, but there's no -f, let's come up with a temporary input # file name. @@ -423,10 +422,14 @@ if [ "$THREADS" = "" ]; then ls "$IN_DIR" | while read -r fn; do - CUR=$((CUR+1)) - printf "\\r Processing file $CUR/$IN_COUNT... " + if [ -s "$IN_DIR/$fn" ]; then - "$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/$fn" -Z $EXTRA_PAR -- "$@" <"$IN_DIR/$fn" + CUR=$((CUR+1)) + printf "\\r Processing file $CUR/$IN_COUNT... " + + "$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/$fn" -Z $EXTRA_PAR -- "$@" <"$IN_DIR/$fn" + + fi done @@ -434,11 +437,15 @@ if [ "$THREADS" = "" ]; then ls "$IN_DIR" | while read -r fn; do - CUR=$((CUR+1)) - printf "\\r Processing file $CUR/$IN_COUNT... " + if [ -s "$IN_DIR/$fn" ]; then + + CUR=$((CUR+1)) + printf "\\r Processing file $CUR/$IN_COUNT... " + + cp "$IN_DIR/$fn" "$STDIN_FILE" + "$SHOWMAP" -m "$MEM_LIMIT" -t "$TIMEOUT" -o "$TRACE_DIR/$fn" -Z $EXTRA_PAR -H "$STDIN_FILE" -- "$@" extras = afl_realloc((void **)&afl->extras, (afl->extras_cnt + 1) * sizeof(struct extra_data)); + char *hexdigits = "0123456789abcdef"; + if (unlikely(!afl->extras)) { PFATAL("alloc"); } wptr = afl->extras[afl->extras_cnt].data = ck_alloc(rptr - lptr); @@ -184,13 +186,12 @@ void load_extras_file(afl_state_t *afl, u8 *fname, u32 *min_len, u32 *max_len, while (*lptr) { - char *hexdigits = "0123456789abcdef"; - switch (*lptr) { case 1 ... 31: case 128 ... 255: WARNF("Non-printable characters in line %u.", cur_line); + ++lptr; continue; break; -- cgit 1.4.1 From 5681267bbc901941e6056390abfe6aad43b45775 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Mon, 20 Nov 2023 09:32:00 +0100 Subject: nits --- docs/Changelog.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index c74a9ad7..1e2a4765 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -24,7 +24,7 @@ - fixes support for large map offsets - afl-cmin/afl-cmin.bash: prevent unneeded file errors - added new tool afl-addseeds that adds new seeds to a running campaign - - added benchmark/benchmark.sh if you want to see how good your fuzzing + - added benchmark/benchmark.py if you want to see how good your fuzzing speed is in comparison to other setups. -- cgit 1.4.1 From dd9a04c901c79fe2f3f078de6cc0777e3a5d96df Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Tue, 28 Nov 2023 09:14:29 +0100 Subject: code format --- docs/Changelog.md | 1 + src/afl-fuzz-run.c | 9 +++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 1e2a4765..f7842d59 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -13,6 +13,7 @@ - added scale support to CMPLOG (-l S) - added --version and --help command line parameters - fixed endless loop when reading malformed dictionaries + - new custom mutator function: post_run - thanks to yangzao! - afl-whatsup: - detect instanced that are starting up and show them as such as not dead - now also shows coverage reached diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index b6d5df95..34a5ff81 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -62,12 +62,16 @@ fuzz_run_target(afl_state_t *afl, afl_forkserver_t *fsrv, u32 timeout) { /* If post_run() function is defined in custom mutator, the function will be called each time after AFL++ executes the target program. */ - + if (unlikely(afl->custom_mutators_count)) { LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, { - if (el->afl_custom_post_run) { el->afl_custom_post_run(el->data); } + if (unlikely(el->afl_custom_post_run)) { + + el->afl_custom_post_run(el->data); + + } }); @@ -1123,3 +1127,4 @@ common_fuzz_stuff(afl_state_t *afl, u8 *out_buf, u32 len) { return 0; } + -- cgit 1.4.1 From 74f8ca6b468b6d89e8d588e3835486be48184893 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Tue, 28 Nov 2023 10:26:37 +0100 Subject: improve cmplog --- docs/Changelog.md | 4 ++- instrumentation/afl-compiler-rt.o.c | 10 ++++++-- instrumentation/cmplog-instructions-pass.cc | 38 +++++++++++++++-------------- src/afl-fuzz-redqueen.c | 4 +++ 4 files changed, 35 insertions(+), 21 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index f7842d59..b2e9fbf6 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -10,7 +10,9 @@ - added AFL_IGNORE_SEED_PROBLEMS to skip over seeds that time out instead of exiting with an error message - allow -S/-M naming up to 50 characters (from 24) - - added scale support to CMPLOG (-l S) + - CMPLOG: + - added scale support (-l S) + - skip unhelpful insertions (u8) - added --version and --help command line parameters - fixed endless loop when reading malformed dictionaries - new custom mutator function: post_run - thanks to yangzao! diff --git a/instrumentation/afl-compiler-rt.o.c b/instrumentation/afl-compiler-rt.o.c index 8ce8bca1..106892e2 100644 --- a/instrumentation/afl-compiler-rt.o.c +++ b/instrumentation/afl-compiler-rt.o.c @@ -1910,6 +1910,10 @@ void __cmplog_ins_hook1(uint8_t arg1, uint8_t arg2, uint8_t attr) { // fprintf(stderr, "hook1 arg0=%02x arg1=%02x attr=%u\n", // (u8) arg1, (u8) arg2, attr); + return; + + /* + if (unlikely(!__afl_cmp_map || arg1 == arg2)) return; uintptr_t k = (uintptr_t)__builtin_return_address(0); @@ -1936,6 +1940,8 @@ void __cmplog_ins_hook1(uint8_t arg1, uint8_t arg2, uint8_t attr) { __afl_cmp_map->log[k][hits].v0 = arg1; __afl_cmp_map->log[k][hits].v1 = arg2; + */ + } void __cmplog_ins_hook2(uint16_t arg1, uint16_t arg2, uint8_t attr) { @@ -2142,13 +2148,13 @@ void __cmplog_ins_hook16(uint128_t arg1, uint128_t arg2, uint8_t attr) { void __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2) { - __cmplog_ins_hook1(arg1, arg2, 0); + //__cmplog_ins_hook1(arg1, arg2, 0); } void __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2) { - __cmplog_ins_hook1(arg1, arg2, 0); + //__cmplog_ins_hook1(arg1, arg2, 0); } diff --git a/instrumentation/cmplog-instructions-pass.cc b/instrumentation/cmplog-instructions-pass.cc index 9cd1dc59..8be8c294 100644 --- a/instrumentation/cmplog-instructions-pass.cc +++ b/instrumentation/cmplog-instructions-pass.cc @@ -165,23 +165,25 @@ bool CmpLogInstructions::hookInstrs(Module &M) { IntegerType *Int64Ty = IntegerType::getInt64Ty(C); IntegerType *Int128Ty = IntegerType::getInt128Ty(C); -#if LLVM_VERSION_MAJOR >= 9 - FunctionCallee -#else - Constant * -#endif - c1 = M.getOrInsertFunction("__cmplog_ins_hook1", VoidTy, Int8Ty, Int8Ty, - Int8Ty -#if LLVM_VERSION_MAJOR < 5 - , - NULL -#endif - ); -#if LLVM_VERSION_MAJOR >= 9 - FunctionCallee cmplogHookIns1 = c1; -#else - Function *cmplogHookIns1 = cast(c1); -#endif + /* + #if LLVM_VERSION_MAJOR >= 9 + FunctionCallee + #else + Constant * + #endif + c1 = M.getOrInsertFunction("__cmplog_ins_hook1", VoidTy, Int8Ty, Int8Ty, + Int8Ty + #if LLVM_VERSION_MAJOR < 5 + , + NULL + #endif + ); + #if LLVM_VERSION_MAJOR >= 9 + FunctionCallee cmplogHookIns1 = c1; + #else + Function *cmplogHookIns1 = cast(c1); + #endif + */ #if LLVM_VERSION_MAJOR >= 9 FunctionCallee @@ -619,7 +621,7 @@ bool CmpLogInstructions::hookInstrs(Module &M) { switch (cast_size) { case 8: - IRB.CreateCall(cmplogHookIns1, args); + // IRB.CreateCall(cmplogHookIns1, args); break; case 16: IRB.CreateCall(cmplogHookIns2, args); diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index 13f164f5..c0ea5005 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -1906,6 +1906,8 @@ static u8 cmp_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf, #endif + if (hshape < 2) { return 0; } + for (i = 0; i < loggeds; ++i) { struct cmp_operands *o = &afl->shm.cmp_map->log[key][i]; @@ -2698,6 +2700,8 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf, hshape = SHAPE_BYTES(h->shape); + if (hshape < 2) { return 0; } + if (h->hits > CMP_MAP_RTN_H) { loggeds = CMP_MAP_RTN_H; -- cgit 1.4.1 From ae9cdb34e4fdc10c7c2d1c775238a7501fda288a Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Thu, 14 Dec 2023 16:04:00 +0100 Subject: AFL_FUZZER_LOOPCOUNT --- docs/Changelog.md | 1 + utils/aflpp_driver/aflpp_driver.c | 7 +++++++ 2 files changed, 8 insertions(+) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index b2e9fbf6..7faa0ab3 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -25,6 +25,7 @@ - fix for a few string compare transform functions for LAF - frida_mode: - fixes support for large map offsets + - support for AFL_FUZZER_LOOPCOUNT for afl.rs and LLVMFuzzerTestOneInput - afl-cmin/afl-cmin.bash: prevent unneeded file errors - added new tool afl-addseeds that adds new seeds to a running campaign - added benchmark/benchmark.py if you want to see how good your fuzzing diff --git a/utils/aflpp_driver/aflpp_driver.c b/utils/aflpp_driver/aflpp_driver.c index dab7fd95..9ffb2383 100644 --- a/utils/aflpp_driver/aflpp_driver.c +++ b/utils/aflpp_driver/aflpp_driver.c @@ -292,6 +292,7 @@ __attribute__((weak)) int main(int argc, char **argv) { "afl-fuzz will run N iterations before re-spawning the process " "(default: " "INT_MAX)\n" + "You can also use AFL_FUZZER_LOOPCOUNT to set N\n" "For stdin input processing, pass '-' as single command line option.\n" "For file input processing, pass '@@' as single command line option.\n" "To use with afl-cmin or afl-cmin.bash pass '-' as single command line " @@ -379,6 +380,12 @@ __attribute__((weak)) int LLVMFuzzerRunDriver( } + if (getenv("AFL_FUZZER_LOOPCOUNT")) { + + N = atoi(getenv("AFL_FUZZER_LOOPCOUNT")); + + } + assert(N > 0); __afl_manual_init(); -- cgit 1.4.1 From 37505928bcec63a08fe50cdebdbf7b9b28b952d0 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Fri, 15 Dec 2023 09:23:30 +0100 Subject: fix 2 mutation bugs --- docs/Changelog.md | 3 +++ include/afl-mutations.h | 16 ++++++++-------- 2 files changed, 11 insertions(+), 8 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 7faa0ab3..0d75782d 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -5,6 +5,7 @@ ### Version ++4.09a (dev) - afl-fuzz: + - fixed the new mutation implementation for two bugs - added `AFL_FINAL_SYNC` which forces a final fuzzer sync (also for `-F`) before terminating. - added AFL_IGNORE_SEED_PROBLEMS to skip over seeds that time out instead @@ -23,6 +24,8 @@ - option -n will not use color in the output - instrumentation: - fix for a few string compare transform functions for LAF + - we are instrumenting __cxx internal functions again. this might break + a few targets, please report if so. - frida_mode: - fixes support for large map offsets - support for AFL_FUZZER_LOOPCOUNT for afl.rs and LLVMFuzzerTestOneInput diff --git a/include/afl-mutations.h b/include/afl-mutations.h index d709b90d..6338c93c 100644 --- a/include/afl-mutations.h +++ b/include/afl-mutations.h @@ -2456,14 +2456,14 @@ inline u32 afl_mutate(afl_state_t *afl, u8 *buf, u32 len, u32 steps, } - char buf[20]; - snprintf(buf, sizeof(buf), "%" PRId64, val); + char numbuf[32]; + snprintf(numbuf, sizeof(buf), "%" PRId64, val); u32 old_len = off2 - off; - u32 new_len = strlen(buf); + u32 new_len = strlen(numbuf); if (old_len == new_len) { - memcpy(buf + off, buf, new_len); + memcpy(buf + off, numbuf, new_len); } else { @@ -2473,7 +2473,7 @@ inline u32 afl_mutate(afl_state_t *afl, u8 *buf, u32 len, u32 steps, /* Inserted part */ - memcpy(tmp_buf + off, buf, new_len); + memcpy(tmp_buf + off, numbuf, new_len); /* Tail */ memcpy(tmp_buf + off + new_len, buf + off2, len - off2); @@ -2509,9 +2509,9 @@ inline u32 afl_mutate(afl_state_t *afl, u8 *buf, u32 len, u32 steps, } u64 val = rand_next(afl); - char buf[20]; - snprintf(buf, sizeof(buf), "%llu", val); - memcpy(buf + pos, buf, len); + char numbuf[32]; + snprintf(numbuf, sizeof(numbuf), "%llu", val); + memcpy(buf + pos, numbuf, len); break; -- cgit 1.4.1 From 8a7705aedbb759dd8ff331d47a99cc6bbc17902b Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Fri, 15 Dec 2023 09:28:39 +0100 Subject: v4.09c release --- README.md | 4 ++-- docs/Changelog.md | 2 +- include/config.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'docs/Changelog.md') diff --git a/README.md b/README.md index 322ebcf2..a09147c5 100644 --- a/README.md +++ b/README.md @@ -2,9 +2,9 @@ AFL++ logo -Release version: [4.08c](https://github.com/AFLplusplus/AFLplusplus/releases) +Release version: [4.09c](https://github.com/AFLplusplus/AFLplusplus/releases) -GitHub version: 4.09a +GitHub version: 4.09c Repository: [https://github.com/AFLplusplus/AFLplusplus](https://github.com/AFLplusplus/AFLplusplus) diff --git a/docs/Changelog.md b/docs/Changelog.md index 0d75782d..2dfcb482 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -3,7 +3,7 @@ This is the list of all noteworthy changes made in every public release of the tool. See README.md for the general instruction manual. -### Version ++4.09a (dev) +### Version ++4.09c (release) - afl-fuzz: - fixed the new mutation implementation for two bugs - added `AFL_FINAL_SYNC` which forces a final fuzzer sync (also for `-F`) diff --git a/include/config.h b/include/config.h index 988e536e..b346d7b4 100644 --- a/include/config.h +++ b/include/config.h @@ -26,7 +26,7 @@ /* Version string: */ // c = release, a = volatile github dev, e = experimental branch -#define VERSION "++4.09a" +#define VERSION "++4.09c" /****************************************************** * * -- cgit 1.4.1 From ca0c9f6d1797bac121996c3b2ac50423f6e67b8f Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Fri, 15 Dec 2023 09:44:02 +0100 Subject: v4.10a init --- README.md | 2 +- docs/Changelog.md | 5 ++++- include/config.h | 2 +- 3 files changed, 6 insertions(+), 3 deletions(-) (limited to 'docs/Changelog.md') diff --git a/README.md b/README.md index a09147c5..fd48cb14 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ Release version: [4.09c](https://github.com/AFLplusplus/AFLplusplus/releases) -GitHub version: 4.09c +GitHub version: 4.10a Repository: [https://github.com/AFLplusplus/AFLplusplus](https://github.com/AFLplusplus/AFLplusplus) diff --git a/docs/Changelog.md b/docs/Changelog.md index 2dfcb482..2ac87f47 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -3,6 +3,10 @@ This is the list of all noteworthy changes made in every public release of the tool. See README.md for the general instruction manual. +### Version ++4.10a (dev) + - ... + + ### Version ++4.09c (release) - afl-fuzz: - fixed the new mutation implementation for two bugs @@ -34,7 +38,6 @@ - added benchmark/benchmark.py if you want to see how good your fuzzing speed is in comparison to other setups. - ### Version ++4.08c (release) - afl-fuzz: - new mutation engine: mutations that favor discovery more paths are diff --git a/include/config.h b/include/config.h index b346d7b4..63340650 100644 --- a/include/config.h +++ b/include/config.h @@ -26,7 +26,7 @@ /* Version string: */ // c = release, a = volatile github dev, e = experimental branch -#define VERSION "++4.09c" +#define VERSION "++4.10a" /****************************************************** * * -- cgit 1.4.1 From 353ae3682a02634abae0b6590dfb47b762cf6bfa Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Fri, 15 Dec 2023 10:24:12 +0100 Subject: switch to explore powerschedule as default --- docs/Changelog.md | 3 ++- src/afl-fuzz-state.c | 3 +-- src/afl-fuzz.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 2ac87f47..150ce6c7 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -4,7 +4,8 @@ release of the tool. See README.md for the general instruction manual. ### Version ++4.10a (dev) - - ... + - default power schedule is now EXPLORE, due a fix in fast schedules + explore is slightly better now. ### Version ++4.09c (release) diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index db82536d..7d6fdfb9 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -89,9 +89,8 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) { afl->w_end = 0.3; afl->g_max = 5000; afl->period_pilot_tmp = 5000.0; - afl->schedule = FAST; /* Power schedule (default: FAST) */ + afl->schedule = EXPLORE; /* Power schedule (default: EXPLORE)*/ afl->havoc_max_mult = HAVOC_MAX_MULT; - afl->clear_screen = 1; /* Window resized? */ afl->havoc_div = 1; /* Cycle count divisor for havoc */ afl->stage_name = "init"; /* Name of the current fuzz stage */ diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index becad351..dd990e71 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -138,7 +138,7 @@ static void usage(u8 *argv0, int more_help) { "to\n" " exploit mode, and back on new coverage (default: %u)\n" " -p schedule - power schedules compute a seed's performance score:\n" - " fast(default), explore, exploit, seek, rare, mmopt, " + " explore(default), fast, exploit, seek, rare, mmopt, " "coe, lin\n" " quad -- see docs/FAQ.md for more information\n" " -f file - location read by the fuzzed program (default: stdin " -- cgit 1.4.1 From 806a76afaeb1e1c99847df95af4181b3d1b48a91 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Tue, 19 Dec 2023 11:15:33 +0100 Subject: fix bad fix for MUT_STRATEGY_ARRAY_SIZE --- docs/Changelog.md | 7 +++++-- include/afl-mutations.h | 3 ++- 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 150ce6c7..133e460b 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -4,8 +4,11 @@ release of the tool. See README.md for the general instruction manual. ### Version ++4.10a (dev) - - default power schedule is now EXPLORE, due a fix in fast schedules - explore is slightly better now. + - afl-fuzz: + - default power schedule is now EXPLORE, due a fix in fast schedules + explore is slightly better now. + - fixed minor issues in the mutation engine, thanks to @futhewo for + reporting! ### Version ++4.09c (release) diff --git a/include/afl-mutations.h b/include/afl-mutations.h index dcc62d0b..75e66484 100644 --- a/include/afl-mutations.h +++ b/include/afl-mutations.h @@ -32,7 +32,7 @@ #include #include "afl-fuzz.h" -#define MUT_STRATEGY_ARRAY_SIZE 255 +#define MUT_STRATEGY_ARRAY_SIZE 256 enum { @@ -1082,6 +1082,7 @@ u32 mutation_strategy_exploration_binary[MUT_STRATEGY_ARRAY_SIZE] = { MUT_CLONE_COPY, MUT_CLONE_COPY, MUT_CLONE_COPY, + MUT_CLONE_COPY, MUT_CLONE_FIXED, MUT_CLONE_FIXED, MUT_CLONE_FIXED, -- cgit 1.4.1 From daaefcddc063b356018c29027494a00bcfc3e240 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Sun, 24 Dec 2023 10:35:02 +0100 Subject: code format --- docs/Changelog.md | 2 ++ instrumentation/SanitizerCoverageLTO.so.cc | 48 +++++++++++++++------------- instrumentation/afl-llvm-dict2file.so.cc | 33 ++++++++++--------- instrumentation/cmplog-routines-pass.cc | 15 +++++---- instrumentation/compare-transform-pass.so.cc | 30 +++++++++-------- 5 files changed, 70 insertions(+), 58 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 133e460b..c8f04217 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -9,6 +9,8 @@ explore is slightly better now. - fixed minor issues in the mutation engine, thanks to @futhewo for reporting! + - instrumentation: + - LLVM 18 support, thanks to @devnexen! ### Version ++4.09c (release) diff --git a/instrumentation/SanitizerCoverageLTO.so.cc b/instrumentation/SanitizerCoverageLTO.so.cc index 8365cf65..68423029 100644 --- a/instrumentation/SanitizerCoverageLTO.so.cc +++ b/instrumentation/SanitizerCoverageLTO.so.cc @@ -692,33 +692,37 @@ bool ModuleSanitizerCoverageLTO::instrumentModule( * prototype */ FunctionType *FT = Callee->getFunctionType(); - isStrcmp &= FT->getNumParams() == 2 && - FT->getReturnType()->isIntegerTy(32) && - FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == - IntegerType::getInt8Ty(M.getContext())->getPointerTo(0); - isStrcasecmp &= FT->getNumParams() == 2 && - FT->getReturnType()->isIntegerTy(32) && - FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == - IntegerType::getInt8Ty(M.getContext())->getPointerTo(0); + isStrcmp &= + FT->getNumParams() == 2 && + FT->getReturnType()->isIntegerTy(32) && + FT->getParamType(0) == FT->getParamType(1) && + FT->getParamType(0) == + IntegerType::getInt8Ty(M.getContext())->getPointerTo(0); + isStrcasecmp &= + FT->getNumParams() == 2 && + FT->getReturnType()->isIntegerTy(32) && + FT->getParamType(0) == FT->getParamType(1) && + FT->getParamType(0) == + IntegerType::getInt8Ty(M.getContext())->getPointerTo(0); isMemcmp &= FT->getNumParams() == 3 && FT->getReturnType()->isIntegerTy(32) && FT->getParamType(0)->isPointerTy() && FT->getParamType(1)->isPointerTy() && FT->getParamType(2)->isIntegerTy(); - isStrncmp &= FT->getNumParams() == 3 && - FT->getReturnType()->isIntegerTy(32) && - FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == - IntegerType::getInt8Ty(M.getContext())->getPointerTo(0) && - FT->getParamType(2)->isIntegerTy(); - isStrncasecmp &= FT->getNumParams() == 3 && - FT->getReturnType()->isIntegerTy(32) && - FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == - IntegerType::getInt8Ty(M.getContext())->getPointerTo(0) && - FT->getParamType(2)->isIntegerTy(); + isStrncmp &= + FT->getNumParams() == 3 && + FT->getReturnType()->isIntegerTy(32) && + FT->getParamType(0) == FT->getParamType(1) && + FT->getParamType(0) == + IntegerType::getInt8Ty(M.getContext())->getPointerTo(0) && + FT->getParamType(2)->isIntegerTy(); + isStrncasecmp &= + FT->getNumParams() == 3 && + FT->getReturnType()->isIntegerTy(32) && + FT->getParamType(0) == FT->getParamType(1) && + FT->getParamType(0) == + IntegerType::getInt8Ty(M.getContext())->getPointerTo(0) && + FT->getParamType(2)->isIntegerTy(); isStdString &= FT->getNumParams() >= 2 && FT->getParamType(0)->isPointerTy() && FT->getParamType(1)->isPointerTy(); diff --git a/instrumentation/afl-llvm-dict2file.so.cc b/instrumentation/afl-llvm-dict2file.so.cc index 3a5b99f8..c60f3e06 100644 --- a/instrumentation/afl-llvm-dict2file.so.cc +++ b/instrumentation/afl-llvm-dict2file.so.cc @@ -433,32 +433,35 @@ bool AFLdict2filePass::runOnModule(Module &M) { isStrstr &= FT->getNumParams() == 2 && FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == IntegerType::getInt8Ty(M.getContext())->getPointerTo(0); + FT->getParamType(0) == + IntegerType::getInt8Ty(M.getContext())->getPointerTo(0); isStrcmp &= FT->getNumParams() == 2 && FT->getReturnType()->isIntegerTy(32) && FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == IntegerType::getInt8Ty(M.getContext())->getPointerTo(0); + FT->getParamType(0) == + IntegerType::getInt8Ty(M.getContext())->getPointerTo(0); isStrcasecmp &= FT->getNumParams() == 2 && FT->getReturnType()->isIntegerTy(32) && FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == IntegerType::getInt8Ty(M.getContext())->getPointerTo(0); + FT->getParamType(0) == + IntegerType::getInt8Ty(M.getContext())->getPointerTo(0); isMemcmp &= FT->getNumParams() == 3 && FT->getReturnType()->isIntegerTy(32) && FT->getParamType(0)->isPointerTy() && FT->getParamType(1)->isPointerTy() && FT->getParamType(2)->isIntegerTy(); - isStrncmp &= FT->getNumParams() == 3 && - FT->getReturnType()->isIntegerTy(32) && - FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == - IntegerType::getInt8Ty(M.getContext())->getPointerTo(0) && - FT->getParamType(2)->isIntegerTy(); - isStrncasecmp &= FT->getNumParams() == 3 && - FT->getReturnType()->isIntegerTy(32) && - FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == - IntegerType::getInt8Ty(M.getContext())->getPointerTo(0) && - FT->getParamType(2)->isIntegerTy(); + isStrncmp &= + FT->getNumParams() == 3 && FT->getReturnType()->isIntegerTy(32) && + FT->getParamType(0) == FT->getParamType(1) && + FT->getParamType(0) == + IntegerType::getInt8Ty(M.getContext())->getPointerTo(0) && + FT->getParamType(2)->isIntegerTy(); + isStrncasecmp &= + FT->getNumParams() == 3 && FT->getReturnType()->isIntegerTy(32) && + FT->getParamType(0) == FT->getParamType(1) && + FT->getParamType(0) == + IntegerType::getInt8Ty(M.getContext())->getPointerTo(0) && + FT->getParamType(2)->isIntegerTy(); isStdString &= FT->getNumParams() >= 2 && FT->getParamType(0)->isPointerTy() && FT->getParamType(1)->isPointerTy(); diff --git a/instrumentation/cmplog-routines-pass.cc b/instrumentation/cmplog-routines-pass.cc index 9272be62..b27e06e0 100644 --- a/instrumentation/cmplog-routines-pass.cc +++ b/instrumentation/cmplog-routines-pass.cc @@ -385,7 +385,8 @@ bool CmpLogRoutines::hookRtns(Module &M) { isStrcmp &= FT->getNumParams() == 2 && FT->getReturnType()->isIntegerTy(32) && FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == IntegerType::getInt8Ty(M.getContext())->getPointerTo(0); + FT->getParamType(0) == + IntegerType::getInt8Ty(M.getContext())->getPointerTo(0); bool isStrncmp = (!FuncName.compare("strncmp") || !FuncName.compare("xmlStrncmp") || @@ -398,12 +399,12 @@ bool CmpLogRoutines::hookRtns(Module &M) { !FuncName.compare("g_ascii_strncasecmp") || !FuncName.compare("Curl_strncasecompare") || !FuncName.compare("g_strncasecmp")); - isStrncmp &= FT->getNumParams() == 3 && - FT->getReturnType()->isIntegerTy(32) && - FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == - IntegerType::getInt8Ty(M.getContext())->getPointerTo(0) && - FT->getParamType(2)->isIntegerTy(); + isStrncmp &= + FT->getNumParams() == 3 && FT->getReturnType()->isIntegerTy(32) && + FT->getParamType(0) == FT->getParamType(1) && + FT->getParamType(0) == + IntegerType::getInt8Ty(M.getContext())->getPointerTo(0) && + FT->getParamType(2)->isIntegerTy(); bool isGccStdStringStdString = Callee->getName().find("__is_charIT_EE7__value") != diff --git a/instrumentation/compare-transform-pass.so.cc b/instrumentation/compare-transform-pass.so.cc index 5c707914..b0d6355a 100644 --- a/instrumentation/compare-transform-pass.so.cc +++ b/instrumentation/compare-transform-pass.so.cc @@ -271,28 +271,30 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, isStrcmp &= FT->getNumParams() == 2 && FT->getReturnType()->isIntegerTy(32) && FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == IntegerType::getInt8Ty(M.getContext())->getPointerTo(0); + FT->getParamType(0) == + IntegerType::getInt8Ty(M.getContext())->getPointerTo(0); isStrcasecmp &= FT->getNumParams() == 2 && FT->getReturnType()->isIntegerTy(32) && FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == IntegerType::getInt8Ty(M.getContext())->getPointerTo(0); + FT->getParamType(0) == + IntegerType::getInt8Ty(M.getContext())->getPointerTo(0); isMemcmp &= FT->getNumParams() == 3 && FT->getReturnType()->isIntegerTy(32) && FT->getParamType(0)->isPointerTy() && FT->getParamType(1)->isPointerTy() && FT->getParamType(2)->isIntegerTy(); - isStrncmp &= FT->getNumParams() == 3 && - FT->getReturnType()->isIntegerTy(32) && - FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == - IntegerType::getInt8Ty(M.getContext())->getPointerTo(0) && - FT->getParamType(2)->isIntegerTy(); - isStrncasecmp &= FT->getNumParams() == 3 && - FT->getReturnType()->isIntegerTy(32) && - FT->getParamType(0) == FT->getParamType(1) && - FT->getParamType(0) == - IntegerType::getInt8Ty(M.getContext())->getPointerTo(0) && - FT->getParamType(2)->isIntegerTy(); + isStrncmp &= + FT->getNumParams() == 3 && FT->getReturnType()->isIntegerTy(32) && + FT->getParamType(0) == FT->getParamType(1) && + FT->getParamType(0) == + IntegerType::getInt8Ty(M.getContext())->getPointerTo(0) && + FT->getParamType(2)->isIntegerTy(); + isStrncasecmp &= + FT->getNumParams() == 3 && FT->getReturnType()->isIntegerTy(32) && + FT->getParamType(0) == FT->getParamType(1) && + FT->getParamType(0) == + IntegerType::getInt8Ty(M.getContext())->getPointerTo(0) && + FT->getParamType(2)->isIntegerTy(); if (!isStrcmp && !isMemcmp && !isStrncmp && !isStrcasecmp && !isStrncasecmp && !isIntMemcpy) -- cgit 1.4.1 From 88cbaeb3e14de3ee5960ca78564e41741e7bd85b Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Fri, 29 Dec 2023 10:03:02 +0100 Subject: LLVM 17 bug workaround --- TODO.md | 9 +++ docs/Changelog.md | 5 ++ instrumentation/SanitizerCoveragePCGUARD.so.cc | 1 + instrumentation/split-compares-pass.so.cc | 77 ++++++++++++++++---------- 4 files changed, 63 insertions(+), 29 deletions(-) (limited to 'docs/Changelog.md') diff --git a/TODO.md b/TODO.md index 9e9a2366..8d746d50 100644 --- a/TODO.md +++ b/TODO.md @@ -10,6 +10,15 @@ - when trimming then perform crash detection - either -L0 and/or -p mmopt results in zero new coverage +afl-clang-fast -Iapps -I. -Iinclude -Iapps/include -pthread -m64 -fsanitize=address -fno-omit-frame-pointer -g -Wa,--noexecstack -Qunused-arguments -fno-inline-functions -g -pthread -Wno-unused-command-line-argument -O3 -fno-sanitize=alignment -DOPENSSL_BUILDING_OPENSSL -DPEDANTIC -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -MMD -MF apps/openssl-bin-speed.d.tmp -MT apps/openssl-bin-speed.o -c -o apps/openssl-bin-speed.o apps/speed.c +afl-cc++4.10a by Michal Zalewski, Laszlo Szekeres, Marc Heuse - mode: LLVM-PCGUARD +Split-compare-newpass by laf.intel@gmail.com, extended by heiko@hexco.de (splitting icmp to 8 bit) +Split-floatingpoint-compare-pass: 2 FP comparisons split +724 comparisons found +SanitizerCoveragePCGUARD++4.10a +[+] Instrumented 7356 locations with no collisions (non-hardened mode) of which are 99 handled and 7 unhandled selects. + + ## Should <<<<<<< Updated upstream diff --git a/docs/Changelog.md b/docs/Changelog.md index c8f04217..178d0f8a 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -11,6 +11,11 @@ reporting! - instrumentation: - LLVM 18 support, thanks to @devnexen! + - compcov/LAF-intel: + - floating point splitting bug fix by @hexcoder + - due a bug in LLVM 17 integer splitting is disabled! + - when splitting floats was selected, integers were always split as well, + fixed to require AFL_LLVM_LAF_SPLIT_COMPARES as it should ### Version ++4.09c (release) diff --git a/instrumentation/SanitizerCoveragePCGUARD.so.cc b/instrumentation/SanitizerCoveragePCGUARD.so.cc index 1c019d26..aae04bb1 100644 --- a/instrumentation/SanitizerCoveragePCGUARD.so.cc +++ b/instrumentation/SanitizerCoveragePCGUARD.so.cc @@ -952,6 +952,7 @@ bool ModuleSanitizerCoverageAFL::InjectCoverage( #endif { + // fprintf(stderr, "UNHANDLED: %u\n", t->getTypeID()); unhandled++; continue; diff --git a/instrumentation/split-compares-pass.so.cc b/instrumentation/split-compares-pass.so.cc index cbe8b743..144025fb 100644 --- a/instrumentation/split-compares-pass.so.cc +++ b/instrumentation/split-compares-pass.so.cc @@ -1707,12 +1707,6 @@ bool SplitComparesTransform::runOnModule(Module &M) { #endif - char *bitw_env = getenv("AFL_LLVM_LAF_SPLIT_COMPARES_BITW"); - if (!bitw_env) bitw_env = getenv("LAF_SPLIT_COMPARES_BITW"); - if (bitw_env) { target_bitwidth = atoi(bitw_env); } - - enableFPSplit = getenv("AFL_LLVM_LAF_SPLIT_FLOATS") != NULL; - if ((isatty(2) && getenv("AFL_QUIET") == NULL) || getenv("AFL_DEBUG") != NULL) { @@ -1728,6 +1722,27 @@ bool SplitComparesTransform::runOnModule(Module &M) { } + char *bitw_env = getenv("AFL_LLVM_LAF_SPLIT_COMPARES_BITW"); + if (!bitw_env) bitw_env = getenv("LAF_SPLIT_COMPARES_BITW"); + if (bitw_env) { target_bitwidth = atoi(bitw_env); } + + if (getenv("AFL_LLVM_LAF_SPLIT_FLOATS")) { enableFPSplit = true; } + + bool split_comp = false; + + if (getenv("AFL_LLVM_LAF_SPLIT_COMPARES")) { + +#if LLVM_MAJOR == 17 + if (!be_quiet) + fprintf(stderr, + "WARNING: AFL++ splitting integer comparisons is disabled in " + "LLVM 17 due bugs, switch to 16 or 18!\n"); +#else + split_comp = true; +#endif + + } + #if LLVM_MAJOR >= 11 auto PA = PreservedAnalyses::all(); #endif @@ -1746,36 +1761,40 @@ bool SplitComparesTransform::runOnModule(Module &M) { } - std::vector worklist; - /* iterate over all functions, bbs and instruction search for all integer - * compare instructions. Save them into the worklist for later. */ - for (auto &F : M) { + if (split_comp) { - if (!isInInstrumentList(&F, MNAME)) continue; + std::vector worklist; + /* iterate over all functions, bbs and instruction search for all integer + * compare instructions. Save them into the worklist for later. */ + for (auto &F : M) { - for (auto &BB : F) { + if (!isInInstrumentList(&F, MNAME)) continue; - for (auto &IN : BB) { + for (auto &BB : F) { - if (auto CI = dyn_cast(&IN)) { + for (auto &IN : BB) { - auto op0 = CI->getOperand(0); - auto op1 = CI->getOperand(1); - if (!op0 || !op1) { + if (auto CI = dyn_cast(&IN)) { + + auto op0 = CI->getOperand(0); + auto op1 = CI->getOperand(1); + if (!op0 || !op1) { #if LLVM_MAJOR >= 11 - return PA; + return PA; #else - return false; + return false; #endif - } + } - auto iTy1 = dyn_cast(op0->getType()); - if (iTy1 && isa(op1->getType())) { + auto iTy1 = dyn_cast(op0->getType()); + if (iTy1 && isa(op1->getType())) { - unsigned bitw = iTy1->getBitWidth(); - if (isSupportedBitWidth(bitw)) { worklist.push_back(CI); } + unsigned bitw = iTy1->getBitWidth(); + if (isSupportedBitWidth(bitw)) { worklist.push_back(CI); } + + } } @@ -1785,13 +1804,13 @@ bool SplitComparesTransform::runOnModule(Module &M) { } - } + // now that we have a list of all integer comparisons we can start replacing + // them with the splitted alternatives. + for (auto CI : worklist) { - // now that we have a list of all integer comparisons we can start replacing - // them with the splitted alternatives. - for (auto CI : worklist) { + simplifyAndSplit(CI, M); - simplifyAndSplit(CI, M); + } } -- cgit 1.4.1 From 5f492da71793e75e145e13d4c0dd9506bac2d60c Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Sat, 30 Dec 2023 11:00:28 +0100 Subject: update changelog --- docs/Changelog.md | 2 ++ 1 file changed, 2 insertions(+) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 178d0f8a..adc81d64 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -11,6 +11,8 @@ reporting! - instrumentation: - LLVM 18 support, thanks to @devnexen! + - Injection (SQL, LDAP, XSS) feature now available, see + `instrumentation/README.injections.md` how to activate/use/expand. - compcov/LAF-intel: - floating point splitting bug fix by @hexcoder - due a bug in LLVM 17 integer splitting is disabled! -- cgit 1.4.1 From ee7d69b8175f31f2efb2b3bf15f2bbb29f61aa14 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Thu, 4 Jan 2024 15:44:28 +0100 Subject: changelog --- docs/Changelog.md | 6 +- include/envs.h | 319 ++++++++++++++---------------------------------------- 2 files changed, 88 insertions(+), 237 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index adc81d64..69a369e3 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -9,15 +9,17 @@ explore is slightly better now. - fixed minor issues in the mutation engine, thanks to @futhewo for reporting! + - afl-cc: + - large rewrite by @SonicStark which fixes a few corner cases, thanks! - instrumentation: - LLVM 18 support, thanks to @devnexen! - Injection (SQL, LDAP, XSS) feature now available, see `instrumentation/README.injections.md` how to activate/use/expand. - compcov/LAF-intel: - floating point splitting bug fix by @hexcoder - - due a bug in LLVM 17 integer splitting is disabled! + - due a bug in LLVM 17 integer splitting is disabled there! - when splitting floats was selected, integers were always split as well, - fixed to require AFL_LLVM_LAF_SPLIT_COMPARES as it should + fixed to require AFL_LLVM_LAF_SPLIT_COMPARES or _ALL as it should ### Version ++4.09c (release) diff --git a/include/envs.h b/include/envs.h index aa5c658e..0f645d23 100644 --- a/include/envs.h +++ b/include/envs.h @@ -16,255 +16,104 @@ static char *afl_environment_deprecated[] = { static char *afl_environment_variables[] = { - "AFL_ALIGNED_ALLOC", - "AFL_ALLOW_TMP", - "AFL_ANALYZE_HEX", - "AFL_AS", - "AFL_AUTORESUME", - "AFL_AS_FORCE_INSTRUMENT", - "AFL_BENCH_JUST_ONE", - "AFL_BENCH_UNTIL_CRASH", - "AFL_CAL_FAST", - "AFL_CC", - "AFL_CC_COMPILER", - "AFL_CMIN_ALLOW_ANY", - "AFL_CMIN_CRASHES_ONLY", - "AFL_CMPLOG_ONLY_NEW", - "AFL_CODE_END", - "AFL_CODE_START", - "AFL_COMPCOV_BINNAME", - "AFL_COMPCOV_LEVEL", - "AFL_CRASH_EXITCODE", - "AFL_CRASHING_SEEDS_AS_NEW_CRASH", - "AFL_CUSTOM_MUTATOR_LIBRARY", - "AFL_CUSTOM_MUTATOR_ONLY", - "AFL_CUSTOM_INFO_PROGRAM", - "AFL_CUSTOM_INFO_PROGRAM_ARGV", - "AFL_CUSTOM_INFO_PROGRAM_INPUT", - "AFL_CUSTOM_INFO_OUT", - "AFL_CXX", - "AFL_CYCLE_SCHEDULES", - "AFL_DEBUG", - "AFL_DEBUG_CHILD", - "AFL_DEBUG_GDB", - "AFL_DEBUG_UNICORN", - "AFL_DISABLE_TRIM", - "AFL_DISABLE_LLVM_INSTRUMENTATION", - "AFL_DONT_OPTIMIZE", - "AFL_DRIVER_STDERR_DUPLICATE_FILENAME", - "AFL_DUMB_FORKSRV", - "AFL_EARLY_FORKSERVER", - "AFL_ENTRYPOINT", - "AFL_EXIT_WHEN_DONE", - "AFL_EXIT_ON_TIME", - "AFL_EXIT_ON_SEED_ISSUES", - "AFL_FAST_CAL", - "AFL_FINAL_SYNC", - "AFL_FORCE_UI", - "AFL_FRIDA_DEBUG_MAPS", - "AFL_FRIDA_DRIVER_NO_HOOK", - "AFL_FRIDA_EXCLUDE_RANGES", - "AFL_FRIDA_INST_CACHE_SIZE", - "AFL_FRIDA_INST_COVERAGE_ABSOLUTE", - "AFL_FRIDA_INST_COVERAGE_FILE", - "AFL_FRIDA_INST_DEBUG_FILE", - "AFL_FRIDA_INST_INSN", - "AFL_FRIDA_INST_JIT", - "AFL_FRIDA_INST_NO_CACHE", - "AFL_FRIDA_INST_NO_DYNAMIC_LOAD", - "AFL_FRIDA_INST_NO_OPTIMIZE", - "AFL_FRIDA_INST_NO_PREFETCH", - "AFL_FRIDA_INST_NO_PREFETCH_BACKPATCH", + "AFL_ALIGNED_ALLOC", "AFL_ALLOW_TMP", "AFL_ANALYZE_HEX", "AFL_AS", + "AFL_AUTORESUME", "AFL_AS_FORCE_INSTRUMENT", "AFL_BENCH_JUST_ONE", + "AFL_BENCH_UNTIL_CRASH", "AFL_CAL_FAST", "AFL_CC", "AFL_CC_COMPILER", + "AFL_CMIN_ALLOW_ANY", "AFL_CMIN_CRASHES_ONLY", "AFL_CMPLOG_ONLY_NEW", + "AFL_CODE_END", "AFL_CODE_START", "AFL_COMPCOV_BINNAME", + "AFL_COMPCOV_LEVEL", "AFL_CRASH_EXITCODE", + "AFL_CRASHING_SEEDS_AS_NEW_CRASH", "AFL_CUSTOM_MUTATOR_LIBRARY", + "AFL_CUSTOM_MUTATOR_ONLY", "AFL_CUSTOM_INFO_PROGRAM", + "AFL_CUSTOM_INFO_PROGRAM_ARGV", "AFL_CUSTOM_INFO_PROGRAM_INPUT", + "AFL_CUSTOM_INFO_OUT", "AFL_CXX", "AFL_CYCLE_SCHEDULES", "AFL_DEBUG", + "AFL_DEBUG_CHILD", "AFL_DEBUG_GDB", "AFL_DEBUG_UNICORN", "AFL_DISABLE_TRIM", + "AFL_DISABLE_LLVM_INSTRUMENTATION", "AFL_DONT_OPTIMIZE", + "AFL_DRIVER_STDERR_DUPLICATE_FILENAME", "AFL_DUMB_FORKSRV", + "AFL_EARLY_FORKSERVER", "AFL_ENTRYPOINT", "AFL_EXIT_WHEN_DONE", + "AFL_EXIT_ON_TIME", "AFL_EXIT_ON_SEED_ISSUES", "AFL_FAST_CAL", + "AFL_FINAL_SYNC", "AFL_FORCE_UI", "AFL_FRIDA_DEBUG_MAPS", + "AFL_FRIDA_DRIVER_NO_HOOK", "AFL_FRIDA_EXCLUDE_RANGES", + "AFL_FRIDA_INST_CACHE_SIZE", "AFL_FRIDA_INST_COVERAGE_ABSOLUTE", + "AFL_FRIDA_INST_COVERAGE_FILE", "AFL_FRIDA_INST_DEBUG_FILE", + "AFL_FRIDA_INST_INSN", "AFL_FRIDA_INST_JIT", "AFL_FRIDA_INST_NO_CACHE", + "AFL_FRIDA_INST_NO_DYNAMIC_LOAD", "AFL_FRIDA_INST_NO_OPTIMIZE", + "AFL_FRIDA_INST_NO_PREFETCH", "AFL_FRIDA_INST_NO_PREFETCH_BACKPATCH", "AFL_FRIDA_INST_NO_SUPPRESS" "AFL_FRIDA_INST_RANGES", - "AFL_FRIDA_INST_REGS_FILE", - "AFL_FRIDA_INST_SEED", - "AFL_FRIDA_INST_TRACE", - "AFL_FRIDA_INST_TRACE_UNIQUE", - "AFL_FRIDA_INST_UNSTABLE_COVERAGE_FILE", - "AFL_FRIDA_JS_SCRIPT", - "AFL_FRIDA_OUTPUT_STDOUT", - "AFL_FRIDA_OUTPUT_STDERR", - "AFL_FRIDA_PERSISTENT_ADDR", - "AFL_FRIDA_PERSISTENT_CNT", - "AFL_FRIDA_PERSISTENT_DEBUG", - "AFL_FRIDA_PERSISTENT_HOOK", - "AFL_FRIDA_PERSISTENT_RET", - "AFL_FRIDA_STALKER_ADJACENT_BLOCKS", - "AFL_FRIDA_STALKER_IC_ENTRIES", - "AFL_FRIDA_STALKER_NO_BACKPATCH", - "AFL_FRIDA_STATS_FILE", - "AFL_FRIDA_STATS_INTERVAL", - "AFL_FRIDA_TRACEABLE", + "AFL_FRIDA_INST_REGS_FILE", "AFL_FRIDA_INST_SEED", "AFL_FRIDA_INST_TRACE", + "AFL_FRIDA_INST_TRACE_UNIQUE", "AFL_FRIDA_INST_UNSTABLE_COVERAGE_FILE", + "AFL_FRIDA_JS_SCRIPT", "AFL_FRIDA_OUTPUT_STDOUT", "AFL_FRIDA_OUTPUT_STDERR", + "AFL_FRIDA_PERSISTENT_ADDR", "AFL_FRIDA_PERSISTENT_CNT", + "AFL_FRIDA_PERSISTENT_DEBUG", "AFL_FRIDA_PERSISTENT_HOOK", + "AFL_FRIDA_PERSISTENT_RET", "AFL_FRIDA_STALKER_ADJACENT_BLOCKS", + "AFL_FRIDA_STALKER_IC_ENTRIES", "AFL_FRIDA_STALKER_NO_BACKPATCH", + "AFL_FRIDA_STATS_FILE", "AFL_FRIDA_STATS_INTERVAL", "AFL_FRIDA_TRACEABLE", "AFL_FRIDA_VERBOSE", "AFL_FUZZER_ARGS", // oss-fuzz - "AFL_FUZZER_STATS_UPDATE_INTERVAL", - "AFL_GDB", - "AFL_GCC_ALLOWLIST", - "AFL_GCC_DENYLIST", - "AFL_GCC_BLOCKLIST", - "AFL_GCC_INSTRUMENT_FILE", - "AFL_GCC_OUT_OF_LINE", - "AFL_GCC_SKIP_NEVERZERO", - "AFL_GCJ", - "AFL_HANG_TMOUT", - "AFL_FORKSRV_INIT_TMOUT", - "AFL_HARDEN", - "AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES", - "AFL_IGNORE_PROBLEMS", - "AFL_IGNORE_PROBLEMS_COVERAGE", - "AFL_IGNORE_SEED_PROBLEMS", - "AFL_IGNORE_TIMEOUTS", - "AFL_IGNORE_UNKNOWN_ENVS", - "AFL_IMPORT_FIRST", - "AFL_INPUT_LEN_MIN", - "AFL_INPUT_LEN_MAX", - "AFL_INST_LIBS", - "AFL_INST_RATIO", - "AFL_KEEP_TIMEOUTS", - "AFL_KILL_SIGNAL", - "AFL_FORK_SERVER_KILL_SIGNAL", - "AFL_KEEP_TRACES", - "AFL_KEEP_ASSEMBLY", - "AFL_LD_HARD_FAIL", - "AFL_LD_LIMIT_MB", - "AFL_LD_NO_CALLOC_OVER", - "AFL_LD_PASSTHROUGH", - "AFL_REAL_LD", - "AFL_LD_PRELOAD", - "AFL_LD_VERBOSE", - "AFL_LLVM_ALLOWLIST", - "AFL_LLVM_DENYLIST", - "AFL_LLVM_BLOCKLIST", - "AFL_CMPLOG", - "AFL_LLVM_CMPLOG", - "AFL_GCC_CMPLOG", - "AFL_LLVM_INSTRIM", - "AFL_LLVM_CALLER", - "AFL_LLVM_CTX", - "AFL_LLVM_CTX_K", - "AFL_LLVM_DICT2FILE", - "AFL_LLVM_DICT2FILE_NO_MAIN", - "AFL_LLVM_DOCUMENT_IDS", - "AFL_LLVM_INSTRIM_LOOPHEAD", - "AFL_LLVM_INSTRUMENT", - "AFL_LLVM_LTO_AUTODICTIONARY", - "AFL_LLVM_AUTODICTIONARY", + "AFL_FUZZER_STATS_UPDATE_INTERVAL", "AFL_GDB", "AFL_GCC_ALLOWLIST", + "AFL_GCC_DENYLIST", "AFL_GCC_BLOCKLIST", "AFL_GCC_INSTRUMENT_FILE", + "AFL_GCC_OUT_OF_LINE", "AFL_GCC_SKIP_NEVERZERO", "AFL_GCJ", + "AFL_HANG_TMOUT", "AFL_FORKSRV_INIT_TMOUT", "AFL_HARDEN", + "AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES", "AFL_IGNORE_PROBLEMS", + "AFL_IGNORE_PROBLEMS_COVERAGE", "AFL_IGNORE_SEED_PROBLEMS", + "AFL_IGNORE_TIMEOUTS", "AFL_IGNORE_UNKNOWN_ENVS", "AFL_IMPORT_FIRST", + "AFL_INPUT_LEN_MIN", "AFL_INPUT_LEN_MAX", "AFL_INST_LIBS", "AFL_INST_RATIO", + "AFL_KEEP_TIMEOUTS", "AFL_KILL_SIGNAL", "AFL_FORK_SERVER_KILL_SIGNAL", + "AFL_KEEP_TRACES", "AFL_KEEP_ASSEMBLY", "AFL_LD_HARD_FAIL", + "AFL_LD_LIMIT_MB", "AFL_LD_NO_CALLOC_OVER", "AFL_LD_PASSTHROUGH", + "AFL_REAL_LD", "AFL_LD_PRELOAD", "AFL_LD_VERBOSE", "AFL_LLVM_ALLOWLIST", + "AFL_LLVM_DENYLIST", "AFL_LLVM_BLOCKLIST", "AFL_CMPLOG", "AFL_LLVM_CMPLOG", + "AFL_GCC_CMPLOG", "AFL_LLVM_INSTRIM", "AFL_LLVM_CALLER", "AFL_LLVM_CTX", + "AFL_LLVM_CTX_K", "AFL_LLVM_DICT2FILE", "AFL_LLVM_DICT2FILE_NO_MAIN", + "AFL_LLVM_DOCUMENT_IDS", "AFL_LLVM_INSTRIM_LOOPHEAD", "AFL_LLVM_INSTRUMENT", + "AFL_LLVM_LTO_AUTODICTIONARY", "AFL_LLVM_AUTODICTIONARY", "AFL_LLVM_SKIPSINGLEBLOCK", // Marker: ADD_TO_INJECTIONS - "AFL_LLVM_INJECTIONS_ALL", - "AFL_LLVM_INJECTIONS_SQL", - "AFL_LLVM_INJECTIONS_LDAP", - "AFL_LLVM_INJECTIONS_XSS", - "AFL_LLVM_INSTRIM_SKIPSINGLEBLOCK", - "AFL_LLVM_LAF_SPLIT_COMPARES", - "AFL_LLVM_LAF_SPLIT_COMPARES_BITW", - "AFL_LLVM_LAF_SPLIT_FLOATS", - "AFL_LLVM_LAF_SPLIT_SWITCHES", - "AFL_LLVM_LAF_ALL", - "AFL_LLVM_LAF_TRANSFORM_COMPARES", - "AFL_LLVM_MAP_ADDR", - "AFL_LLVM_MAP_DYNAMIC", - "AFL_LLVM_NGRAM_SIZE", - "AFL_NGRAM_SIZE", - "AFL_LLVM_NO_RPATH", - "AFL_LLVM_NOT_ZERO", - "AFL_LLVM_INSTRUMENT_FILE", - "AFL_LLVM_THREADSAFE_INST", - "AFL_LLVM_SKIP_NEVERZERO", - "AFL_NO_AFFINITY", - "AFL_TRY_AFFINITY", - "AFL_LLVM_LTO_DONTWRITEID", + "AFL_LLVM_INJECTIONS_ALL", "AFL_LLVM_INJECTIONS_SQL", + "AFL_LLVM_INJECTIONS_LDAP", "AFL_LLVM_INJECTIONS_XSS", + "AFL_LLVM_INSTRIM_SKIPSINGLEBLOCK", "AFL_LLVM_LAF_SPLIT_COMPARES", + "AFL_LLVM_LAF_SPLIT_COMPARES_BITW", "AFL_LLVM_LAF_SPLIT_FLOATS", + "AFL_LLVM_LAF_SPLIT_SWITCHES", "AFL_LLVM_LAF_ALL", + "AFL_LLVM_LAF_TRANSFORM_COMPARES", "AFL_LLVM_MAP_ADDR", + "AFL_LLVM_MAP_DYNAMIC", "AFL_LLVM_NGRAM_SIZE", "AFL_NGRAM_SIZE", + "AFL_LLVM_NO_RPATH", "AFL_LLVM_NOT_ZERO", "AFL_LLVM_INSTRUMENT_FILE", + "AFL_LLVM_THREADSAFE_INST", "AFL_LLVM_SKIP_NEVERZERO", "AFL_NO_AFFINITY", + "AFL_TRY_AFFINITY", "AFL_LLVM_LTO_DONTWRITEID", "AFL_LLVM_LTO_SKIPINIT" "AFL_LLVM_LTO_STARTID", - "AFL_FUZZER_LOOPCOUNT", - "AFL_NO_ARITH", - "AFL_NO_AUTODICT", - "AFL_NO_BUILTIN", + "AFL_FUZZER_LOOPCOUNT", "AFL_NO_ARITH", "AFL_NO_AUTODICT", "AFL_NO_BUILTIN", #if defined USE_COLOR && !defined ALWAYS_COLORED - "AFL_NO_COLOR", - "AFL_NO_COLOUR", + "AFL_NO_COLOR", "AFL_NO_COLOUR", #endif "AFL_NO_CPU_RED", "AFL_NO_CFG_FUZZING", // afl.rs rust crate option - "AFL_NO_CRASH_README", - "AFL_NO_FORKSRV", - "AFL_NO_UI", - "AFL_NO_PYTHON", - "AFL_NO_STARTUP_CALIBRATION", - "AFL_NO_WARN_INSTABILITY", - "AFL_UNTRACER_FILE", - "AFL_LLVM_USE_TRACE_PC", - "AFL_MAP_SIZE", - "AFL_MAPSIZE", + "AFL_NO_CRASH_README", "AFL_NO_FORKSRV", "AFL_NO_UI", "AFL_NO_PYTHON", + "AFL_NO_STARTUP_CALIBRATION", "AFL_NO_WARN_INSTABILITY", + "AFL_UNTRACER_FILE", "AFL_LLVM_USE_TRACE_PC", "AFL_MAP_SIZE", "AFL_MAPSIZE", "AFL_MAX_DET_EXTRAS", "AFL_NO_X86", // not really an env but we dont want to warn on it - "AFL_NOOPT", - "AFL_NYX_AUX_SIZE", - "AFL_NYX_DISABLE_SNAPSHOT_MODE", - "AFL_NYX_LOG", - "AFL_NYX_REUSE_SNAPSHOT", - "AFL_PASSTHROUGH", - "AFL_PATH", - "AFL_PERFORMANCE_FILE", - "AFL_PERSISTENT_RECORD", - "AFL_POST_PROCESS_KEEP_ORIGINAL", - "AFL_PRELOAD", - "AFL_TARGET_ENV", - "AFL_PYTHON_MODULE", - "AFL_QEMU_CUSTOM_BIN", - "AFL_QEMU_COMPCOV", - "AFL_QEMU_COMPCOV_DEBUG", - "AFL_QEMU_DEBUG_MAPS", - "AFL_QEMU_DISABLE_CACHE", - "AFL_QEMU_DRIVER_NO_HOOK", - "AFL_QEMU_FORCE_DFL", - "AFL_QEMU_PERSISTENT_ADDR", - "AFL_QEMU_PERSISTENT_CNT", - "AFL_QEMU_PERSISTENT_GPR", - "AFL_QEMU_PERSISTENT_HOOK", - "AFL_QEMU_PERSISTENT_MEM", - "AFL_QEMU_PERSISTENT_RET", - "AFL_QEMU_PERSISTENT_RETADDR_OFFSET", - "AFL_QEMU_PERSISTENT_EXITS", - "AFL_QEMU_INST_RANGES", - "AFL_QEMU_EXCLUDE_RANGES", - "AFL_QEMU_SNAPSHOT", - "AFL_QEMU_TRACK_UNSTABLE", - "AFL_QUIET", - "AFL_RANDOM_ALLOC_CANARY", - "AFL_REAL_PATH", - "AFL_SHUFFLE_QUEUE", - "AFL_SKIP_BIN_CHECK", - "AFL_SKIP_CPUFREQ", - "AFL_SKIP_CRASHES", - "AFL_SKIP_OSSFUZZ", - "AFL_STATSD", - "AFL_STATSD_HOST", - "AFL_STATSD_PORT", - "AFL_STATSD_TAGS_FLAVOR", - "AFL_SYNC_TIME", - "AFL_TESTCACHE_SIZE", - "AFL_TESTCACHE_ENTRIES", - "AFL_TMIN_EXACT", - "AFL_TMPDIR", - "AFL_TOKEN_FILE", - "AFL_TRACE_PC", - "AFL_USE_ASAN", - "AFL_USE_MSAN", - "AFL_USE_TRACE_PC", - "AFL_USE_UBSAN", - "AFL_USE_TSAN", - "AFL_USE_CFISAN", - "AFL_USE_LSAN", - "AFL_WINE_PATH", - "AFL_NO_SNAPSHOT", - "AFL_EXPAND_HAVOC_NOW", - "AFL_USE_FASAN", - "AFL_USE_QASAN", - "AFL_PRINT_FILENAMES", - "AFL_PIZZA_MODE", - NULL + "AFL_NOOPT", "AFL_NYX_AUX_SIZE", "AFL_NYX_DISABLE_SNAPSHOT_MODE", + "AFL_NYX_LOG", "AFL_NYX_REUSE_SNAPSHOT", "AFL_PASSTHROUGH", "AFL_PATH", + "AFL_PERFORMANCE_FILE", "AFL_PERSISTENT_RECORD", + "AFL_POST_PROCESS_KEEP_ORIGINAL", "AFL_PRELOAD", "AFL_TARGET_ENV", + "AFL_PYTHON_MODULE", "AFL_QEMU_CUSTOM_BIN", "AFL_QEMU_COMPCOV", + "AFL_QEMU_COMPCOV_DEBUG", "AFL_QEMU_DEBUG_MAPS", "AFL_QEMU_DISABLE_CACHE", + "AFL_QEMU_DRIVER_NO_HOOK", "AFL_QEMU_FORCE_DFL", "AFL_QEMU_PERSISTENT_ADDR", + "AFL_QEMU_PERSISTENT_CNT", "AFL_QEMU_PERSISTENT_GPR", + "AFL_QEMU_PERSISTENT_HOOK", "AFL_QEMU_PERSISTENT_MEM", + "AFL_QEMU_PERSISTENT_RET", "AFL_QEMU_PERSISTENT_RETADDR_OFFSET", + "AFL_QEMU_PERSISTENT_EXITS", "AFL_QEMU_INST_RANGES", + "AFL_QEMU_EXCLUDE_RANGES", "AFL_QEMU_SNAPSHOT", "AFL_QEMU_TRACK_UNSTABLE", + "AFL_QUIET", "AFL_RANDOM_ALLOC_CANARY", "AFL_REAL_PATH", + "AFL_SHUFFLE_QUEUE", "AFL_SKIP_BIN_CHECK", "AFL_SKIP_CPUFREQ", + "AFL_SKIP_CRASHES", "AFL_SKIP_OSSFUZZ", "AFL_STATSD", "AFL_STATSD_HOST", + "AFL_STATSD_PORT", "AFL_STATSD_TAGS_FLAVOR", "AFL_SYNC_TIME", + "AFL_TESTCACHE_SIZE", "AFL_TESTCACHE_ENTRIES", "AFL_TMIN_EXACT", + "AFL_TMPDIR", "AFL_TOKEN_FILE", "AFL_TRACE_PC", "AFL_USE_ASAN", + "AFL_USE_MSAN", "AFL_USE_TRACE_PC", "AFL_USE_UBSAN", "AFL_USE_TSAN", + "AFL_USE_CFISAN", "AFL_USE_LSAN", "AFL_WINE_PATH", "AFL_NO_SNAPSHOT", + "AFL_EXPAND_HAVOC_NOW", "AFL_USE_FASAN", "AFL_USE_QASAN", + "AFL_PRINT_FILENAMES", "AFL_PIZZA_MODE", NULL }; -- cgit 1.4.1 From db65dc5a0b5174f6eb22befee6dbc703092ff168 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Tue, 9 Jan 2024 16:50:57 +0100 Subject: lto llvm 12+ --- GNUmakefile.llvm | 6 +++--- docs/Changelog.md | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'docs/Changelog.md') diff --git a/GNUmakefile.llvm b/GNUmakefile.llvm index c704d772..7437130d 100644 --- a/GNUmakefile.llvm +++ b/GNUmakefile.llvm @@ -51,7 +51,7 @@ LLVM_TOO_OLD = $(shell $(LLVM_CONFIG) --version 2>/dev/null | grep -E -q '^[1-9] LLVM_NEW_API = $(shell $(LLVM_CONFIG) --version 2>/dev/null | grep -E -q '^1[0-9]' && echo 1 || echo 0 ) LLVM_NEWER_API = $(shell $(LLVM_CONFIG) --version 2>/dev/null | grep -E -q '^1[6-9]' && echo 1 || echo 0 ) LLVM_13_OK = $(shell $(LLVM_CONFIG) --version 2>/dev/null | grep -E -q '^1[3-9]' && echo 1 || echo 0 ) -LLVM_HAVE_LTO = $(shell $(LLVM_CONFIG) --version 2>/dev/null | grep -E -q '^1[1-9]' && echo 1 || echo 0 ) +LLVM_HAVE_LTO = $(shell $(LLVM_CONFIG) --version 2>/dev/null | grep -E -q '^1[2-9]' && echo 1 || echo 0 ) LLVM_BINDIR = $(shell $(LLVM_CONFIG) --bindir 2>/dev/null) LLVM_LIBDIR = $(shell $(LLVM_CONFIG) --libdir 2>/dev/null) LLVM_STDCXX = gnu++11 @@ -95,12 +95,12 @@ ifeq "$(LLVM_NEWER_API)" "1" endif ifeq "$(LLVM_HAVE_LTO)" "1" - $(info [+] llvm_mode detected llvm 11+, enabling afl-lto LTO implementation) + $(info [+] llvm_mode detected llvm 12+, enabling afl-lto LTO implementation) LLVM_LTO = 1 endif ifeq "$(LLVM_LTO)" "0" - $(info [+] llvm_mode detected llvm < 11, afl-lto LTO will not be build.) + $(info [+] llvm_mode detected llvm < 12, afl-lto LTO will not be build.) endif ifeq "$(LLVM_APPLE_XCODE)" "1" diff --git a/docs/Changelog.md b/docs/Changelog.md index 69a369e3..7d388134 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -11,6 +11,7 @@ reporting! - afl-cc: - large rewrite by @SonicStark which fixes a few corner cases, thanks! + - LTO mode now requires llvm 12+ - instrumentation: - LLVM 18 support, thanks to @devnexen! - Injection (SQL, LDAP, XSS) feature now available, see -- cgit 1.4.1 From 68d883d4285b7adedb2cd87a034c28a76ce2aa8c Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Fri, 12 Jan 2024 15:44:45 +0100 Subject: changelog --- docs/Changelog.md | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 7d388134..76470bde 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -21,6 +21,10 @@ - due a bug in LLVM 17 integer splitting is disabled there! - when splitting floats was selected, integers were always split as well, fixed to require AFL_LLVM_LAF_SPLIT_COMPARES or _ALL as it should + - qemu_mode: + - plugins are now activated by default and a new module is included that + produces drcov compatible traces for lighthouse/lightkeeper/... + thanks to @JRomainG to submitting! ### Version ++4.09c (release) -- cgit 1.4.1 From 04219f98575847e9846b4ecf0de40dd92cd9d56c Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Thu, 18 Jan 2024 09:28:56 +0100 Subject: update grammar mutator --- custom_mutators/grammar_mutator/GRAMMAR_VERSION | 2 +- custom_mutators/grammar_mutator/grammar_mutator | 2 +- docs/Changelog.md | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) (limited to 'docs/Changelog.md') diff --git a/custom_mutators/grammar_mutator/GRAMMAR_VERSION b/custom_mutators/grammar_mutator/GRAMMAR_VERSION index 4c291bf8..2568c6a5 100644 --- a/custom_mutators/grammar_mutator/GRAMMAR_VERSION +++ b/custom_mutators/grammar_mutator/GRAMMAR_VERSION @@ -1 +1 @@ -9716f1092737be0f18e59a449070687a31c5f41e +ff4e5a2 diff --git a/custom_mutators/grammar_mutator/grammar_mutator b/custom_mutators/grammar_mutator/grammar_mutator index 9716f109..ff4e5a26 160000 --- a/custom_mutators/grammar_mutator/grammar_mutator +++ b/custom_mutators/grammar_mutator/grammar_mutator @@ -1 +1 @@ -Subproject commit 9716f1092737be0f18e59a449070687a31c5f41e +Subproject commit ff4e5a265daf5d88c4a636fb6a2c22b1d733db09 diff --git a/docs/Changelog.md b/docs/Changelog.md index 76470bde..c681c4e1 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -25,6 +25,7 @@ - plugins are now activated by default and a new module is included that produces drcov compatible traces for lighthouse/lightkeeper/... thanks to @JRomainG to submitting! + - updated the custom grammar mutator ### Version ++4.09c (release) -- cgit 1.4.1 From a518c4d75c15e2e0cb8633361a0162eb70c7965b Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Fri, 19 Jan 2024 11:53:44 +0100 Subject: macos --- afl-cmin | 6 ++++- docs/Changelog.md | 1 + test/test-basic.sh | 69 ++++++++++++++++++++++++++++++------------------------ test/test-llvm.sh | 28 +++++++++++++--------- 4 files changed, 62 insertions(+), 42 deletions(-) (limited to 'docs/Changelog.md') diff --git a/afl-cmin b/afl-cmin index 566f157d..4aaf3953 100755 --- a/afl-cmin +++ b/afl-cmin @@ -1,11 +1,15 @@ #!/usr/bin/env sh +SYS=$(uname -s) +test "$SYS" = "Darwin" && { + echo Error: afl-cmin does not work on Apple currently. please use afl-cmin.bash instead. + exit 1 +} export AFL_QUIET=1 export ASAN_OPTIONS=detect_leaks=0 THISPATH=`dirname ${0}` export PATH="${THISPATH}:$PATH" awk -f - -- ${@+"$@"} <<'EOF' #!/usr/bin/awk -f - # awk script to minimize a test corpus of input files # # based on afl-cmin bash script written by Michal Zalewski diff --git a/docs/Changelog.md b/docs/Changelog.md index c681c4e1..ad0f7a5a 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -26,6 +26,7 @@ produces drcov compatible traces for lighthouse/lightkeeper/... thanks to @JRomainG to submitting! - updated the custom grammar mutator + - document afl-cmin does not work on macOS ### Version ++4.09c (release) diff --git a/test/test-basic.sh b/test/test-basic.sh index 61ad4b7c..7005d3ce 100755 --- a/test/test-basic.sh +++ b/test/test-basic.sh @@ -2,6 +2,7 @@ . ./test-pre.sh +OS=$(uname -s) AFL_GCC=afl-gcc $ECHO "$BLUE[*] Testing: ${AFL_GCC}, afl-showmap, afl-fuzz, afl-cmin and afl-tmin" @@ -61,7 +62,7 @@ test "$SYS" = "i686" -o "$SYS" = "x86_64" -o "$SYS" = "amd64" -o "$SYS" = "i86pc } # now we want to be sure that afl-fuzz is working # make sure crash reporter is disabled on Mac OS X - (test "$(uname -s)" = "Darwin" && test $(launchctl list 2>/dev/null | grep -q '\.ReportCrash$') && { + (test "$OS" = "Darwin" && test $(launchctl list 2>/dev/null | grep -q '\.ReportCrash$') && { $ECHO "$RED[!] we cannot run afl-fuzz with enabled crash reporter. Run 'sudo sh afl-system-config'.$RESET" true }) || { @@ -84,16 +85,20 @@ test "$SYS" = "i686" -o "$SYS" = "x86_64" -o "$SYS" = "amd64" -o "$SYS" = "i86pc } echo 000000000000000000000000 > in/in2 echo 111 > in/in3 - mkdir -p in2 - ../afl-cmin -m ${MEM_LIMIT} -i in -o in2 -- ./test-instr.plain >/dev/null 2>&1 # why is afl-forkserver writing to stderr? - CNT=`ls in2/* 2>/dev/null | wc -l` - case "$CNT" in - *2) $ECHO "$GREEN[+] afl-cmin correctly minimized the number of testcases" ;; - *) $ECHO "$RED[!] afl-cmin did not correctly minimize the number of testcases ($CNT)" - CODE=1 - ;; - esac - rm -f in2/in* + test "$OS" = "Darwin" && { + $ECHO "$GREY[*] afl-cmin not available on macOS, cannot test afl-cmin" + } || { + mkdir -p in2 + ../afl-cmin -m ${MEM_LIMIT} -i in -o in2 -- ./test-instr.plain >/dev/null 2>&1 # why is afl-forkserver writing to stderr? + CNT=`ls in2/* 2>/dev/null | wc -l` + case "$CNT" in + *2) $ECHO "$GREEN[+] afl-cmin correctly minimized the number of testcases" ;; + *) $ECHO "$RED[!] afl-cmin did not correctly minimize the number of testcases ($CNT)" + CODE=1 + ;; + esac + rm -f in2/in* + } export AFL_QUIET=1 if command -v bash >/dev/null ; then { ../afl-cmin.bash -m ${MEM_LIMIT} -i in -o in2 -- ./test-instr.plain >/dev/null @@ -182,7 +187,7 @@ test "$SYS" = "i686" -o "$SYS" = "x86_64" -o "$SYS" = "amd64" -o "$SYS" = "i86pc } # now we want to be sure that afl-fuzz is working # make sure crash reporter is disabled on Mac OS X - (test "$(uname -s)" = "Darwin" && test $(launchctl list 2>/dev/null | grep -q '\.ReportCrash$') && { + (test "$OS" = "Darwin" && test $(launchctl list 2>/dev/null | grep -q '\.ReportCrash$') && { $ECHO "$RED[!] we cannot run afl-fuzz with enabled crash reporter. Run 'sudo sh afl-system-config'.$RESET" true }) || { @@ -204,25 +209,29 @@ test "$SYS" = "i686" -o "$SYS" = "x86_64" -o "$SYS" = "amd64" -o "$SYS" = "i86pc } } echo 000000000000000000000000 > in/in2 - echo AAA > in/in3 - mkdir -p in2 - ../afl-cmin -m ${MEM_LIMIT} -i in -o in2 -- ./test-instr.plain >/dev/null 2>&1 # why is afl-forkserver writing to stderr? - CNT=`ls in2/* 2>/dev/null | wc -l` - case "$CNT" in - *2) $ECHO "$GREEN[+] afl-cmin correctly minimized the number of testcases" ;; - \ *1|1) { # allow leading whitecase for portability - test -s in2/* && $ECHO "$YELLOW[?] afl-cmin did minimize to one testcase. This can be a bug or due compiler optimization." - test -s in2/* || { - $ECHO "$RED[!] afl-cmin did not correctly minimize the number of testcases ($CNT)" - CODE=1 + echo AAA > in/in2 + test "$OS" = "Darwin" && { + $ECHO "$GREY[*] afl-cmin not available on macOS, cannot test afl-cmin" + } || { + mkdir -p in2 + ../afl-cmin -m ${MEM_LIMIT} -i in -o in2 -- ./test-instr.plain >/dev/null 2>&1 # why is afl-forkserver writing to stderr? + CNT=`ls in2/* 2>/dev/null | wc -l` + case "$CNT" in + *2) $ECHO "$GREEN[+] afl-cmin correctly minimized the number of testcases" ;; + \ *1|1) { # allow leading whitecase for portability + test -s in2/* && $ECHO "$YELLOW[?] afl-cmin did minimize to one testcase. This can be a bug or due compiler optimization." + test -s in2/* || { + $ECHO "$RED[!] afl-cmin did not correctly minimize the number of testcases ($CNT)" + CODE=1 + } } - } - ;; - *) $ECHO "$RED[!] afl-cmin did not correctly minimize the number of testcases ($CNT)" - CODE=1 - ;; - esac - rm -f in2/in* + ;; + *) $ECHO "$RED[!] afl-cmin did not correctly minimize the number of testcases ($CNT)" + CODE=1 + ;; + esac + rm -f in2/in* + } export AFL_QUIET=1 if command -v bash >/dev/null ; then { ../afl-cmin.bash -m ${MEM_LIMIT} -i in -o in2 -- ./test-instr.plain >/dev/null diff --git a/test/test-llvm.sh b/test/test-llvm.sh index 95e43b1c..53bbd7b4 100755 --- a/test/test-llvm.sh +++ b/test/test-llvm.sh @@ -2,6 +2,8 @@ . ./test-pre.sh +OS=$(uname -s) + $ECHO "$BLUE[*] Testing: llvm_mode, afl-showmap, afl-fuzz, afl-cmin and afl-tmin" test -e ../afl-clang-fast -a -e ../split-switches-pass.so && { ../afl-clang-fast -o test-instr.plain ../test-instr.c > /dev/null 2>&1 @@ -123,7 +125,7 @@ test -e ../afl-clang-fast -a -e ../split-switches-pass.so && { } # now we want to be sure that afl-fuzz is working # make sure crash reporter is disabled on Mac OS X - (test "$(uname -s)" = "Darwin" && test $(launchctl list 2>/dev/null | grep -q '\.ReportCrash$') && { + (test "$OS" = "Darwin" && test $(launchctl list 2>/dev/null | grep -q '\.ReportCrash$') && { $ECHO "$RED[!] we cannot run afl-fuzz with enabled crash reporter. Run 'sudo sh afl-system-config'.$RESET" CODE=1 true @@ -146,18 +148,22 @@ test -e ../afl-clang-fast -a -e ../split-switches-pass.so && { } } test "$SYS" = "i686" -o "$SYS" = "x86_64" -o "$SYS" = "amd64" -o "$SYS" = "i86pc" || { + mkdir -p in2 echo 000000000000000000000000 > in/in2 echo 111 > in/in3 - mkdir -p in2 - ../afl-cmin -m ${MEM_LIMIT} -i in -o in2 -- ./test-instr.plain >/dev/null 2>&1 # why is afl-forkserver writing to stderr? - CNT=`ls in2/* 2>/dev/null | wc -l` - case "$CNT" in - *2) $ECHO "$GREEN[+] afl-cmin correctly minimized the number of testcases" ;; - *) $ECHO "$RED[!] afl-cmin did not correctly minimize the number of testcases ($CNT)" - CODE=1 - ;; - esac - rm -f in2/in* + test "$OS" = "Darwin" && { + $ECHO "$GREY[*] afl-cmin not available on macOS, cannot test afl-cmin" + } || { + ../afl-cmin -m ${MEM_LIMIT} -i in -o in2 -- ./test-instr.plain >/dev/null 2>&1 # why is afl-forkserver writing to stderr? + CNT=`ls in2/* 2>/dev/null | wc -l` + case "$CNT" in + *2) $ECHO "$GREEN[+] afl-cmin correctly minimized the number of testcases" ;; + *) $ECHO "$RED[!] afl-cmin did not correctly minimize the number of testcases ($CNT)" + CODE=1 + ;; + esac + rm -f in2/in* + } export AFL_QUIET=1 if type bash >/dev/null ; then { ../afl-cmin.bash -m ${MEM_LIMIT} -i in -o in2 -- ./test-instr.plain >/dev/null -- cgit 1.4.1 From 33a129e00cbfcc0f979880270e01cbf58400c75a Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Mon, 22 Jan 2024 11:01:30 +0100 Subject: update changelog --- docs/Changelog.md | 1 + 1 file changed, 1 insertion(+) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index ad0f7a5a..9accb9da 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -25,6 +25,7 @@ - plugins are now activated by default and a new module is included that produces drcov compatible traces for lighthouse/lightkeeper/... thanks to @JRomainG to submitting! + - updated Nyx checkout (fixes a bug) - updated the custom grammar mutator - document afl-cmin does not work on macOS -- cgit 1.4.1 From 1ffb1b6b2adaedbee7febc9f344c042fd25eae1a Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Fri, 26 Jan 2024 16:58:17 +0100 Subject: changelog --- docs/Changelog.md | 3 +++ 1 file changed, 3 insertions(+) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 9accb9da..38dbba82 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -12,6 +12,7 @@ - afl-cc: - large rewrite by @SonicStark which fixes a few corner cases, thanks! - LTO mode now requires llvm 12+ + - workaround for ASAN with gcc_plugin mode - instrumentation: - LLVM 18 support, thanks to @devnexen! - Injection (SQL, LDAP, XSS) feature now available, see @@ -21,6 +22,8 @@ - due a bug in LLVM 17 integer splitting is disabled there! - when splitting floats was selected, integers were always split as well, fixed to require AFL_LLVM_LAF_SPLIT_COMPARES or _ALL as it should + - dynamic instrumentation filtering for LLVM NATIVE, thanks @Mozilla! + see utils/dynamic_covfilter/README.md - qemu_mode: - plugins are now activated by default and a new module is included that produces drcov compatible traces for lighthouse/lightkeeper/... -- cgit 1.4.1 From ccad11f7eb04e8c0de76fec6fd4b6eab1e940319 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Wed, 31 Jan 2024 14:03:25 +0100 Subject: nyx build script updates --- docs/Changelog.md | 6 ++--- nyx_mode/build_nyx_support.sh | 57 ++++++++++++++++++++++++++++++++++++------- 2 files changed, 51 insertions(+), 12 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 38dbba82..720a0689 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -15,7 +15,7 @@ - workaround for ASAN with gcc_plugin mode - instrumentation: - LLVM 18 support, thanks to @devnexen! - - Injection (SQL, LDAP, XSS) feature now available, see + - Injection (SQL, LDAP, XSS) fuzzing feature now available, see `instrumentation/README.injections.md` how to activate/use/expand. - compcov/LAF-intel: - floating point splitting bug fix by @hexcoder @@ -28,9 +28,9 @@ - plugins are now activated by default and a new module is included that produces drcov compatible traces for lighthouse/lightkeeper/... thanks to @JRomainG to submitting! - - updated Nyx checkout (fixes a bug) + - updated Nyx checkout (fixes a bug) and some QOL - updated the custom grammar mutator - - document afl-cmin does not work on macOS + - document afl-cmin does not work on macOS (but afl-cmin.bash does) ### Version ++4.09c (release) diff --git a/nyx_mode/build_nyx_support.sh b/nyx_mode/build_nyx_support.sh index 581a8292..454d1e7b 100755 --- a/nyx_mode/build_nyx_support.sh +++ b/nyx_mode/build_nyx_support.sh @@ -28,6 +28,7 @@ echo "[*] Making sure all Nyx is checked out" if git status 1>/dev/null 2>&1; then + set +e git submodule init echo "[*] initializing QEMU-Nyx submodule" git submodule update ./QEMU-Nyx 2>/dev/null # ignore errors @@ -35,6 +36,7 @@ if git status 1>/dev/null 2>&1; then git submodule update ./packer 2>/dev/null # ignore errors echo "[*] initializing libnyx submodule" git submodule update ./libnyx 2>/dev/null # ignore errors + set -e else @@ -48,20 +50,57 @@ test -e packer/.git || { echo "[-] packer not checked out, please install git or test -e libnyx/.git || { echo "[-] libnyx not checked out, please install git or check your internet connection." ; exit 1 ; } test -e QEMU-Nyx/.git || { echo "[-] QEMU-Nyx not checked out, please install git or check your internet connection." ; exit 1 ; } -echo "[*] checking packer init.cpio.gz ..." -if [ ! -f "packer/linux_initramfs/init.cpio.gz" ]; then - (cd packer/linux_initramfs/ && sh pack.sh) + +QEMU_NYX_VERSION="$(cat ./QEMU_NYX_VERSION)" +cd "./QEMU-Nyx" || exit 1 +if [ -n "$NO_CHECKOUT" ]; then + echo "[*] Skipping checkout to $QEMU_NYX_VERSION" +else + echo "[*] Checking out $QEMU_NYX_VERSION" + set +e + sh -c 'git stash' 1>/dev/null 2>/dev/null + git pull 1>/dev/null 2>/dev/null + git checkout "$QEMU_NYX_VERSION" || echo Warning: could not check out to commit $QEMU_NYX_VERSION + set -e fi +cd - > /dev/null -echo "[*] Checking libnyx ..." -if [ ! -f "libnyx/libnyx/target/release/liblibnyx.a" ]; then - (cd libnyx/libnyx && cargo build --release) +PACKER_VERSION="$(cat ./PACKER_VERSION)" +cd "./packer" || exit 1 +if [ -n "$NO_CHECKOUT" ]; then + echo "[*] Skipping checkout to $PACKER_VERSION" +else + echo "[*] Checking out $PACKER_VERSION" + set +e + sh -c 'git stash' 1>/dev/null 2>/dev/null + git pull 1>/dev/null 2>/dev/null + git checkout "$PACKER_VERSION" || echo Warning: could not check out to commit $PACKER_VERSION + set -e fi +cd - > /dev/null -echo "[*] Checking QEMU-Nyx ..." -if [ ! -f "QEMU-Nyx/x86_64-softmmu/qemu-system-x86_64" ]; then - (cd QEMU-Nyx && ./compile_qemu_nyx.sh static) +LIBNYX_VERSION="$(cat ./LIBNYX_VERSION)" +cd "./libnyx/" || exit 1 +if [ -n "$NO_CHECKOUT" ]; then + echo "[*] Skipping checkout to $LIBNYX_VERSION" +else + echo "[*] Checking out $LIBNYX_VERSION" + set +e + sh -c 'git stash' 1>/dev/null 2>/dev/null + git pull 1>/dev/null 2>/dev/null + git checkout "$LIBNYX_VERSION" || echo Warning: could not check out to commit $LIBNYX_VERSION + set -e fi +cd - > /dev/null + +echo "[*] checking packer init.cpio.gz ..." +(cd packer/linux_initramfs/ && sh pack.sh) + +echo "[*] Checking libnyx ..." +(cd libnyx/libnyx && cargo build --release) + +echo "[*] Checking QEMU-Nyx ..." +(cd QEMU-Nyx && ./compile_qemu_nyx.sh static ) echo "[*] Checking libnyx.so ..." cp libnyx/libnyx/target/release/liblibnyx.so ../libnyx.so -- cgit 1.4.1 From 5ba66a8860657b21c45480f1d565634cfe38a7dc Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Thu, 1 Feb 2024 15:22:51 +0100 Subject: final touches for skipdet --- GNUmakefile.llvm | 2 +- docs/Changelog.md | 2 ++ src/afl-forkserver.c | 4 +++- src/afl-fuzz-state.c | 2 +- src/afl-fuzz.c | 22 ++++++++++++++-------- test/test-custom-mutators.sh | 4 ++-- 6 files changed, 23 insertions(+), 13 deletions(-) (limited to 'docs/Changelog.md') diff --git a/GNUmakefile.llvm b/GNUmakefile.llvm index 7437130d..ec8fefe4 100644 --- a/GNUmakefile.llvm +++ b/GNUmakefile.llvm @@ -45,7 +45,7 @@ endif LLVMVER = $(shell $(LLVM_CONFIG) --version 2>/dev/null | sed 's/git//' | sed 's/svn//' ) LLVM_MAJOR = $(shell $(LLVM_CONFIG) --version 2>/dev/null | sed 's/\..*//' ) LLVM_MINOR = $(shell $(LLVM_CONFIG) --version 2>/dev/null | sed 's/.*\.//' | sed 's/git//' | sed 's/svn//' | sed 's/ .*//' ) -LLVM_UNSUPPORTED = $(shell $(LLVM_CONFIG) --version 2>/dev/null | grep -E -q '^[0-2]\.|^3.[0-7]\.' && echo 1 || echo 0 ) +LLVM_UNSUPPORTED = $(shell $(LLVM_CONFIG) --version 2>/dev/null | grep -E -q '^[0-2]\.|^3.[0-8]\.' && echo 1 || echo 0 ) LLVM_TOO_NEW = $(shell $(LLVM_CONFIG) --version 2>/dev/null | grep -E -q '^1[8-9]|^2[0-9]' && echo 1 || echo 0 ) LLVM_TOO_OLD = $(shell $(LLVM_CONFIG) --version 2>/dev/null | grep -E -q '^[1-9]\.|^1[012]\.' && echo 1 || echo 0 ) LLVM_NEW_API = $(shell $(LLVM_CONFIG) --version 2>/dev/null | grep -E -q '^1[0-9]' && echo 1 || echo 0 ) diff --git a/docs/Changelog.md b/docs/Changelog.md index 720a0689..29081549 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -9,6 +9,8 @@ explore is slightly better now. - fixed minor issues in the mutation engine, thanks to @futhewo for reporting! + - better deterministic fuzzing is now available, benchmarks have shown + to improve fuzzing. Enable with -D. Thanks to @kdsjZh for the PR! - afl-cc: - large rewrite by @SonicStark which fixes a few corner cases, thanks! - LTO mode now requires llvm 12+ diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c index 214b4fe9..ded0c21d 100644 --- a/src/afl-forkserver.c +++ b/src/afl-forkserver.c @@ -1019,7 +1019,9 @@ void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv, if (status >= 0x41464c00 && status <= 0x41464cff) { - FATAL("Target uses the new forkserver model, you need to switch to a newer afl-fuzz too!"); + FATAL( + "Target uses the new forkserver model, you need to switch to a newer " + "afl-fuzz too!"); } diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index 6cf580ce..b647ac84 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -102,7 +102,7 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) { afl->stats_update_freq = 1; afl->stats_file_update_freq_msecs = STATS_UPDATE_SEC * 1000; afl->stats_avg_exec = 0; - afl->skip_deterministic = 0; + afl->skip_deterministic = 1; afl->sync_time = SYNC_TIME; afl->cmplog_lvl = 2; afl->min_length = 1; diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 7db1aeb3..69064d51 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -170,7 +170,7 @@ static void usage(u8 *argv0, int more_help) { " -g minlength - set min length of generated fuzz input (default: 1)\n" " -G maxlength - set max length of generated fuzz input (default: " "%lu)\n" - " -D - enable deterministic fuzzing (once per queue entry)\n" + " -D - enable (a new) effective deterministic fuzzing\n" " -L minutes - use MOpt(imize) mode and set the time limit for " "entering the\n" " pacemaker mode (minutes of no new finds). 0 = " @@ -955,14 +955,20 @@ int main(int argc, char **argv_orig, char **envp) { break; - case 'D': /* no deterministic */ + case 'D': /* partial deterministic */ - afl->skip_deterministic = 1; + afl->skip_deterministic = 0; break; - case 'd': /* partial deterministic */ + case 'd': /* no deterministic */ - afl->skip_deterministic = 0; + // this is the default and currently a lot of infrastructure enforces + // it (e.g. clusterfuzz, fuzzbench) based on that this feature + // originally was bad performance wise. We now have a better + // implementation, hence if it is activated, we do not want to + // deactivate it by such setups. + + // afl->skip_deterministic = 1; break; case 'B': /* load bitmap */ @@ -1424,11 +1430,11 @@ int main(int argc, char **argv_orig, char **envp) { } #endif + + // silently disable deterministic mutation if custom mutators are used if (!afl->skip_deterministic && afl->afl_env.afl_custom_mutator_only) { - FATAL( - "Using -D determinstic fuzzing is incompatible with " - "AFL_CUSTOM_MUTATOR_ONLY!"); + afl->skip_deterministic = 1; } diff --git a/test/test-custom-mutators.sh b/test/test-custom-mutators.sh index 49feedc0..8c8b0ad3 100755 --- a/test/test-custom-mutators.sh +++ b/test/test-custom-mutators.sh @@ -38,7 +38,7 @@ test -e test-custom-mutator.c -a -e ${CUSTOM_MUTATOR_PATH}/example.c -a -e ${CUS # Run afl-fuzz w/ the C mutator $ECHO "$GREY[*] running afl-fuzz for the C mutator, this will take approx 10 seconds" { - AFL_CUSTOM_MUTATOR_LIBRARY=./libexamplemutator.so AFL_CUSTOM_MUTATOR_ONLY=1 ../afl-fuzz -V07 -m ${MEM_LIMIT} -i in -o out -- ./test-custom-mutator >>errors 2>&1 + AFL_CUSTOM_MUTATOR_LIBRARY=./libexamplemutator.so AFL_CUSTOM_MUTATOR_ONLY=1 ../afl-fuzz -V07 -m ${MEM_LIMIT} -i in -o out -d -- ./test-custom-mutator >>errors 2>&1 } >>errors 2>&1 # Check results @@ -58,7 +58,7 @@ test -e test-custom-mutator.c -a -e ${CUSTOM_MUTATOR_PATH}/example.c -a -e ${CUS # Run afl-fuzz w/ multiple C mutators $ECHO "$GREY[*] running afl-fuzz with multiple custom C mutators, this will take approx 10 seconds" { - AFL_CUSTOM_MUTATOR_LIBRARY="./libexamplemutator.so;./libexamplemutator2.so" AFL_CUSTOM_MUTATOR_ONLY=1 ../afl-fuzz -V07 -m ${MEM_LIMIT} -i in -o out -- ./test-multiple-mutators >>errors 2>&1 + AFL_CUSTOM_MUTATOR_LIBRARY="./libexamplemutator.so;./libexamplemutator2.so" AFL_CUSTOM_MUTATOR_ONLY=1 ../afl-fuzz -V07 -m ${MEM_LIMIT} -i in -o out -d -- ./test-multiple-mutators >>errors 2>&1 } >>errors 2>&1 test -n "$( ls out/default/crashes/id:000000* 2>/dev/null )" && { # TODO: update here -- cgit 1.4.1 From ed1a6f8a570c6fcabee962f402d8d58f6cea77b7 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Sat, 3 Feb 2024 11:01:31 +0100 Subject: 2024 v4.10c release --- GNUmakefile.gcc_plugin | 2 +- README.md | 4 ++-- afl-cmin.bash | 2 +- afl-whatsup | 2 +- docs/Changelog.md | 3 +-- frida_mode/Scripting.md | 2 +- frida_mode/test/cmplog/cmplog.c | 2 +- frida_mode/test/deferred/testinstr.c | 2 +- frida_mode/test/dynamic/testinstr.c | 2 +- frida_mode/test/entry_point/testinstr.c | 2 +- frida_mode/test/exe/testinstr.c | 2 +- frida_mode/test/js/test.c | 2 +- frida_mode/test/js/test2.c | 2 +- frida_mode/test/output/testinstr.c | 2 +- frida_mode/test/perf/perf.c | 2 +- frida_mode/test/persistent_ret/testinstr.c | 2 +- frida_mode/test/testinstr/testinstr.c | 2 +- frida_mode/test/unstable/unstable.c | 2 +- frida_mode/util/frida_get_symbol_addr.sh | 2 +- include/afl-as.h | 2 +- include/afl-fuzz.h | 2 +- include/afl-prealloc.h | 2 +- include/alloc-inl.h | 2 +- include/cmplog.h | 2 +- include/common.h | 2 +- include/config.h | 4 ++-- include/debug.h | 2 +- include/forkserver.h | 2 +- include/hash.h | 2 +- include/list.h | 2 +- include/sharedmem.h | 2 +- include/snapshot-inl.h | 2 +- include/types.h | 2 +- include/xxhash.h | 2 +- instrumentation/afl-compiler-rt.o.c | 2 +- instrumentation/afl-gcc-cmplog-pass.so.cc | 2 +- instrumentation/afl-gcc-cmptrs-pass.so.cc | 2 +- instrumentation/afl-gcc-common.h | 2 +- instrumentation/afl-gcc-pass.so.cc | 2 +- instrumentation/afl-llvm-dict2file.so.cc | 2 +- instrumentation/afl-llvm-lto-instrumentlist.so.cc | 2 +- instrumentation/afl-llvm-pass.so.cc | 2 +- instrumentation/cmplog-instructions-pass.cc | 2 +- instrumentation/cmplog-routines-pass.cc | 2 +- instrumentation/cmplog-switches-pass.cc | 2 +- instrumentation/injection-pass.cc | 2 +- qemu_mode/build_qemu_support.sh | 2 +- qemu_mode/fastexit/Makefile | 2 +- qemu_mode/libcompcov/Makefile | 2 +- qemu_mode/libcompcov/compcovtest.cc | 2 +- qemu_mode/libcompcov/libcompcov.so.c | 2 +- qemu_mode/libqasan/Makefile | 2 +- qemu_mode/libqasan/hooks.c | 2 +- qemu_mode/libqasan/libqasan.c | 2 +- qemu_mode/libqasan/libqasan.h | 2 +- qemu_mode/libqasan/malloc.c | 2 +- qemu_mode/libqasan/patch.c | 2 +- qemu_mode/libqasan/string.c | 2 +- qemu_mode/libqasan/uninstrument.c | 2 +- qemu_mode/unsigaction/Makefile | 2 +- qemu_mode/util/qemu_get_symbol_addr.sh | 2 +- src/afl-analyze.c | 2 +- src/afl-as.c | 2 +- src/afl-cc.c | 2 +- src/afl-common.c | 2 +- src/afl-forkserver.c | 2 +- src/afl-fuzz-bitmap.c | 2 +- src/afl-fuzz-cmplog.c | 2 +- src/afl-fuzz-extras.c | 2 +- src/afl-fuzz-init.c | 2 +- src/afl-fuzz-mutators.c | 2 +- src/afl-fuzz-one.c | 2 +- src/afl-fuzz-python.c | 2 +- src/afl-fuzz-queue.c | 2 +- src/afl-fuzz-redqueen.c | 2 +- src/afl-fuzz-run.c | 2 +- src/afl-fuzz-state.c | 2 +- src/afl-fuzz-stats.c | 2 +- src/afl-fuzz.c | 2 +- src/afl-gotcpu.c | 2 +- src/afl-ld-lto.c | 2 +- src/afl-sharedmem.c | 2 +- src/afl-showmap.c | 2 +- src/afl-tmin.c | 2 +- test-instr.c | 2 +- unicorn_mode/build_unicorn_support.sh | 2 +- utils/afl_network_proxy/afl-network-client.c | 2 +- utils/afl_network_proxy/afl-network-server.c | 2 +- utils/afl_proxy/afl-proxy.c | 2 +- utils/afl_untracer/afl-untracer.c | 2 +- utils/afl_untracer/libtestinstr.c | 2 +- utils/argv_fuzzing/Makefile | 2 +- utils/argv_fuzzing/argvfuzz.c | 2 +- utils/distributed_fuzzing/sync_script.sh | 2 +- utils/libdislocator/libdislocator.so.c | 2 +- utils/libtokencap/libtokencap.so.c | 2 +- utils/persistent_mode/test-instr.c | 2 +- 97 files changed, 99 insertions(+), 100 deletions(-) (limited to 'docs/Changelog.md') diff --git a/GNUmakefile.gcc_plugin b/GNUmakefile.gcc_plugin index 16c98399..8f06792d 100644 --- a/GNUmakefile.gcc_plugin +++ b/GNUmakefile.gcc_plugin @@ -11,7 +11,7 @@ # from Laszlo Szekeres. # # Copyright 2015 Google Inc. All rights reserved. -# Copyright 2019-2023 AFLplusplus Project. All rights reserved. +# Copyright 2019-2024 AFLplusplus Project. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index fd48cb14..f713e971 100644 --- a/README.md +++ b/README.md @@ -2,9 +2,9 @@ AFL++ logo -Release version: [4.09c](https://github.com/AFLplusplus/AFLplusplus/releases) +Release version: [4.10c](https://github.com/AFLplusplus/AFLplusplus/releases) -GitHub version: 4.10a +GitHub version: 4.10c Repository: [https://github.com/AFLplusplus/AFLplusplus](https://github.com/AFLplusplus/AFLplusplus) diff --git a/afl-cmin.bash b/afl-cmin.bash index fda48fb4..6c271220 100755 --- a/afl-cmin.bash +++ b/afl-cmin.bash @@ -7,7 +7,7 @@ # # Copyright 2014, 2015 Google Inc. All rights reserved. # -# Copyright 2019-2023 AFLplusplus +# Copyright 2019-2024 AFLplusplus # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/afl-whatsup b/afl-whatsup index 5b7cbcd6..aa081e41 100755 --- a/afl-whatsup +++ b/afl-whatsup @@ -6,7 +6,7 @@ # Originally written by Michal Zalewski # # Copyright 2015 Google Inc. All rights reserved. -# Copyright 2019-2023 AFLplusplus Project. All rights reserved. +# Copyright 2019-2024 AFLplusplus Project. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/docs/Changelog.md b/docs/Changelog.md index 29081549..48003f4b 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -3,7 +3,7 @@ This is the list of all noteworthy changes made in every public release of the tool. See README.md for the general instruction manual. -### Version ++4.10a (dev) +### Version ++4.10c (release) - afl-fuzz: - default power schedule is now EXPLORE, due a fix in fast schedules explore is slightly better now. @@ -34,7 +34,6 @@ - updated the custom grammar mutator - document afl-cmin does not work on macOS (but afl-cmin.bash does) - ### Version ++4.09c (release) - afl-fuzz: - fixed the new mutation implementation for two bugs diff --git a/frida_mode/Scripting.md b/frida_mode/Scripting.md index dfd09e7b..653687f0 100644 --- a/frida_mode/Scripting.md +++ b/frida_mode/Scripting.md @@ -390,7 +390,7 @@ Consider the [following](test/js/test2.c) test code... -------------------------------------------------------- Originally written by Michal Zalewski Copyright 2014 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: diff --git a/frida_mode/test/cmplog/cmplog.c b/frida_mode/test/cmplog/cmplog.c index 2565b35c..d397f36e 100644 --- a/frida_mode/test/cmplog/cmplog.c +++ b/frida_mode/test/cmplog/cmplog.c @@ -2,7 +2,7 @@ // // Author: Mateusz Jurczyk (mjurczyk@google.com) // -// Copyright 2019-2023 Google LLC +// Copyright 2019-2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/frida_mode/test/deferred/testinstr.c b/frida_mode/test/deferred/testinstr.c index 0ab44582..4e5124ed 100644 --- a/frida_mode/test/deferred/testinstr.c +++ b/frida_mode/test/deferred/testinstr.c @@ -3,7 +3,7 @@ -------------------------------------------------------- Originally written by Michal Zalewski Copyright 2014 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: diff --git a/frida_mode/test/dynamic/testinstr.c b/frida_mode/test/dynamic/testinstr.c index 8b285f6d..0abc61fd 100644 --- a/frida_mode/test/dynamic/testinstr.c +++ b/frida_mode/test/dynamic/testinstr.c @@ -3,7 +3,7 @@ -------------------------------------------------------- Originally written by Michal Zalewski Copyright 2014 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: diff --git a/frida_mode/test/entry_point/testinstr.c b/frida_mode/test/entry_point/testinstr.c index 24d9a615..75e71bda 100644 --- a/frida_mode/test/entry_point/testinstr.c +++ b/frida_mode/test/entry_point/testinstr.c @@ -3,7 +3,7 @@ -------------------------------------------------------- Originally written by Michal Zalewski Copyright 2014 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: diff --git a/frida_mode/test/exe/testinstr.c b/frida_mode/test/exe/testinstr.c index d965502e..7b603659 100644 --- a/frida_mode/test/exe/testinstr.c +++ b/frida_mode/test/exe/testinstr.c @@ -3,7 +3,7 @@ -------------------------------------------------------- Originally written by Michal Zalewski Copyright 2014 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: diff --git a/frida_mode/test/js/test.c b/frida_mode/test/js/test.c index 87c9cdf6..9799bf3b 100644 --- a/frida_mode/test/js/test.c +++ b/frida_mode/test/js/test.c @@ -3,7 +3,7 @@ -------------------------------------------------------- Originally written by Michal Zalewski Copyright 2014 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: diff --git a/frida_mode/test/js/test2.c b/frida_mode/test/js/test2.c index 6b680a24..60b30eb5 100644 --- a/frida_mode/test/js/test2.c +++ b/frida_mode/test/js/test2.c @@ -3,7 +3,7 @@ -------------------------------------------------------- Originally written by Michal Zalewski Copyright 2014 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: diff --git a/frida_mode/test/output/testinstr.c b/frida_mode/test/output/testinstr.c index d965502e..7b603659 100644 --- a/frida_mode/test/output/testinstr.c +++ b/frida_mode/test/output/testinstr.c @@ -3,7 +3,7 @@ -------------------------------------------------------- Originally written by Michal Zalewski Copyright 2014 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: diff --git a/frida_mode/test/perf/perf.c b/frida_mode/test/perf/perf.c index d9626974..55efba26 100644 --- a/frida_mode/test/perf/perf.c +++ b/frida_mode/test/perf/perf.c @@ -3,7 +3,7 @@ -------------------------------------------------------- Originally written by Michal Zalewski Copyright 2014 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: diff --git a/frida_mode/test/persistent_ret/testinstr.c b/frida_mode/test/persistent_ret/testinstr.c index 12365ceb..85aa2b80 100644 --- a/frida_mode/test/persistent_ret/testinstr.c +++ b/frida_mode/test/persistent_ret/testinstr.c @@ -3,7 +3,7 @@ -------------------------------------------------------- Originally written by Michal Zalewski Copyright 2014 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: diff --git a/frida_mode/test/testinstr/testinstr.c b/frida_mode/test/testinstr/testinstr.c index d965502e..7b603659 100644 --- a/frida_mode/test/testinstr/testinstr.c +++ b/frida_mode/test/testinstr/testinstr.c @@ -3,7 +3,7 @@ -------------------------------------------------------- Originally written by Michal Zalewski Copyright 2014 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: diff --git a/frida_mode/test/unstable/unstable.c b/frida_mode/test/unstable/unstable.c index a87b6c74..16978e7e 100644 --- a/frida_mode/test/unstable/unstable.c +++ b/frida_mode/test/unstable/unstable.c @@ -3,7 +3,7 @@ -------------------------------------------------------- Originally written by Michal Zalewski Copyright 2014 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: diff --git a/frida_mode/util/frida_get_symbol_addr.sh b/frida_mode/util/frida_get_symbol_addr.sh index 2e682255..53d5b802 100755 --- a/frida_mode/util/frida_get_symbol_addr.sh +++ b/frida_mode/util/frida_get_symbol_addr.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2023 AFLplusplus +# Copyright 2024 AFLplusplus # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/include/afl-as.h b/include/afl-as.h index 486314e2..612f34f4 100644 --- a/include/afl-as.h +++ b/include/afl-as.h @@ -10,7 +10,7 @@ Dominik Maier Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index c2b09b2e..c24f39e2 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -10,7 +10,7 @@ Dominik Maier Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/afl-prealloc.h b/include/afl-prealloc.h index d19a7b52..3c621d79 100644 --- a/include/afl-prealloc.h +++ b/include/afl-prealloc.h @@ -10,7 +10,7 @@ Dominik Maier Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/alloc-inl.h b/include/alloc-inl.h index cff808b2..0aa417be 100644 --- a/include/alloc-inl.h +++ b/include/alloc-inl.h @@ -10,7 +10,7 @@ Dominik Maier Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/cmplog.h b/include/cmplog.h index e4821444..6bfc146b 100644 --- a/include/cmplog.h +++ b/include/cmplog.h @@ -12,7 +12,7 @@ Dominik Maier Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/common.h b/include/common.h index a9739a7d..0df07dee 100644 --- a/include/common.h +++ b/include/common.h @@ -10,7 +10,7 @@ Dominik Maier Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/config.h b/include/config.h index 7ad73c2f..9349828f 100644 --- a/include/config.h +++ b/include/config.h @@ -10,7 +10,7 @@ Heiko Eissfeldt , Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ /* Version string: */ // c = release, a = volatile github dev, e = experimental branch -#define VERSION "++4.10a" +#define VERSION "++4.10c" /****************************************************** * * diff --git a/include/debug.h b/include/debug.h index 234d8fc4..4b812f8e 100644 --- a/include/debug.h +++ b/include/debug.h @@ -10,7 +10,7 @@ Dominik Maier Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/forkserver.h b/include/forkserver.h index f1d3b5b1..be7f9e8d 100644 --- a/include/forkserver.h +++ b/include/forkserver.h @@ -12,7 +12,7 @@ Dominik Maier > Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/hash.h b/include/hash.h index 0243c5b7..5d56a108 100644 --- a/include/hash.h +++ b/include/hash.h @@ -15,7 +15,7 @@ Other code written by Michal Zalewski Copyright 2016 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/list.h b/include/list.h index 283bf035..441eccd3 100644 --- a/include/list.h +++ b/include/list.h @@ -10,7 +10,7 @@ Dominik Maier Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/sharedmem.h b/include/sharedmem.h index d32bd845..4484066e 100644 --- a/include/sharedmem.h +++ b/include/sharedmem.h @@ -12,7 +12,7 @@ Dominik Maier Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/snapshot-inl.h b/include/snapshot-inl.h index 3864e473..b2c81402 100644 --- a/include/snapshot-inl.h +++ b/include/snapshot-inl.h @@ -12,7 +12,7 @@ Dominik Maier Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/types.h b/include/types.h index d6476d82..22332135 100644 --- a/include/types.h +++ b/include/types.h @@ -10,7 +10,7 @@ Dominik Maier Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/xxhash.h b/include/xxhash.h index a8bd6f27..9a880470 100644 --- a/include/xxhash.h +++ b/include/xxhash.h @@ -1,7 +1,7 @@ /* * xxHash - Extremely Fast Hash algorithm * Header File - * Copyright (C) 2012-2023 Yann Collet + * Copyright (C) 2012-2024 Yann Collet * * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php) * diff --git a/instrumentation/afl-compiler-rt.o.c b/instrumentation/afl-compiler-rt.o.c index 8e55d6a0..caa3c3a8 100644 --- a/instrumentation/afl-compiler-rt.o.c +++ b/instrumentation/afl-compiler-rt.o.c @@ -3,7 +3,7 @@ ------------------------------------------------ Copyright 2015, 2016 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/instrumentation/afl-gcc-cmplog-pass.so.cc b/instrumentation/afl-gcc-cmplog-pass.so.cc index b4e6fda9..774dd5fd 100644 --- a/instrumentation/afl-gcc-cmplog-pass.so.cc +++ b/instrumentation/afl-gcc-cmplog-pass.so.cc @@ -3,7 +3,7 @@ Copyright 2014-2019 Free Software Foundation, Inc Copyright 2015, 2016 Google Inc. All rights reserved. Copyright 2019-2020 AFLplusplus Project. All rights reserved. - Copyright 2019-2023 AdaCore + Copyright 2019-2024 AdaCore Written by Alexandre Oliva , based on the AFL++ LLVM CmpLog pass by Andrea Fioraldi , and diff --git a/instrumentation/afl-gcc-cmptrs-pass.so.cc b/instrumentation/afl-gcc-cmptrs-pass.so.cc index c56263dd..929a9d7a 100644 --- a/instrumentation/afl-gcc-cmptrs-pass.so.cc +++ b/instrumentation/afl-gcc-cmptrs-pass.so.cc @@ -3,7 +3,7 @@ Copyright 2014-2019 Free Software Foundation, Inc Copyright 2015, 2016 Google Inc. All rights reserved. Copyright 2019-2020 AFLplusplus Project. All rights reserved. - Copyright 2019-2023 AdaCore + Copyright 2019-2024 AdaCore Written by Alexandre Oliva , based on the AFL++ LLVM CmpLog Routines pass by Andrea Fioraldi diff --git a/instrumentation/afl-gcc-common.h b/instrumentation/afl-gcc-common.h index 1d5eb466..80ded57d 100644 --- a/instrumentation/afl-gcc-common.h +++ b/instrumentation/afl-gcc-common.h @@ -2,7 +2,7 @@ Copyright 2014-2019 Free Software Foundation, Inc Copyright 2015, 2016 Google Inc. All rights reserved. - Copyright 2019-2023 AdaCore + Copyright 2019-2024 AdaCore Written by Alexandre Oliva , based on the AFL++ GCC plugin. diff --git a/instrumentation/afl-gcc-pass.so.cc b/instrumentation/afl-gcc-pass.so.cc index 4d7fd0ef..41b1e5af 100644 --- a/instrumentation/afl-gcc-pass.so.cc +++ b/instrumentation/afl-gcc-pass.so.cc @@ -2,7 +2,7 @@ Copyright 2014-2019 Free Software Foundation, Inc Copyright 2015, 2016 Google Inc. All rights reserved. - Copyright 2019-2023 AdaCore + Copyright 2019-2024 AdaCore Written by Alexandre Oliva , based on the AFL LLVM pass by Laszlo Szekeres and Michal diff --git a/instrumentation/afl-llvm-dict2file.so.cc b/instrumentation/afl-llvm-dict2file.so.cc index c60f3e06..ac497b5b 100644 --- a/instrumentation/afl-llvm-dict2file.so.cc +++ b/instrumentation/afl-llvm-dict2file.so.cc @@ -4,7 +4,7 @@ Written by Marc Heuse - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/instrumentation/afl-llvm-lto-instrumentlist.so.cc b/instrumentation/afl-llvm-lto-instrumentlist.so.cc index 61f97d77..e0899cd3 100644 --- a/instrumentation/afl-llvm-lto-instrumentlist.so.cc +++ b/instrumentation/afl-llvm-lto-instrumentlist.so.cc @@ -9,7 +9,7 @@ from afl-as.c are Michal's fault. Copyright 2015, 2016 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/instrumentation/afl-llvm-pass.so.cc b/instrumentation/afl-llvm-pass.so.cc index 052488a9..62f5023d 100644 --- a/instrumentation/afl-llvm-pass.so.cc +++ b/instrumentation/afl-llvm-pass.so.cc @@ -12,7 +12,7 @@ NGRAM previous location coverage comes from Adrian Herrera. Copyright 2015, 2016 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/instrumentation/cmplog-instructions-pass.cc b/instrumentation/cmplog-instructions-pass.cc index 8be8c294..dc60221e 100644 --- a/instrumentation/cmplog-instructions-pass.cc +++ b/instrumentation/cmplog-instructions-pass.cc @@ -5,7 +5,7 @@ Written by Andrea Fioraldi Copyright 2015, 2016 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/instrumentation/cmplog-routines-pass.cc b/instrumentation/cmplog-routines-pass.cc index b27e06e0..78317d5d 100644 --- a/instrumentation/cmplog-routines-pass.cc +++ b/instrumentation/cmplog-routines-pass.cc @@ -5,7 +5,7 @@ Written by Andrea Fioraldi Copyright 2015, 2016 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/instrumentation/cmplog-switches-pass.cc b/instrumentation/cmplog-switches-pass.cc index 01da6da7..3e05c13d 100644 --- a/instrumentation/cmplog-switches-pass.cc +++ b/instrumentation/cmplog-switches-pass.cc @@ -5,7 +5,7 @@ Written by Andrea Fioraldi Copyright 2015, 2016 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/instrumentation/injection-pass.cc b/instrumentation/injection-pass.cc index 971b103b..2280208b 100644 --- a/instrumentation/injection-pass.cc +++ b/instrumentation/injection-pass.cc @@ -5,7 +5,7 @@ Written by Marc Heuse Copyright 2015, 2016 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/qemu_mode/build_qemu_support.sh b/qemu_mode/build_qemu_support.sh index 3f8a88f2..45019cc8 100755 --- a/qemu_mode/build_qemu_support.sh +++ b/qemu_mode/build_qemu_support.sh @@ -13,7 +13,7 @@ # counters by Andrea Fioraldi # # Copyright 2015, 2016, 2017 Google Inc. All rights reserved. -# Copyright 2019-2023 AFLplusplus Project. All rights reserved. +# Copyright 2019-2024 AFLplusplus Project. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/qemu_mode/fastexit/Makefile b/qemu_mode/fastexit/Makefile index c7b79277..be80207d 100644 --- a/qemu_mode/fastexit/Makefile +++ b/qemu_mode/fastexit/Makefile @@ -4,7 +4,7 @@ # # Written by Andrea Fioraldi # -# Copyright 2019-2023 Andrea Fioraldi. All rights reserved. +# Copyright 2019-2024 Andrea Fioraldi. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/qemu_mode/libcompcov/Makefile b/qemu_mode/libcompcov/Makefile index 7260df87..4761ac02 100644 --- a/qemu_mode/libcompcov/Makefile +++ b/qemu_mode/libcompcov/Makefile @@ -4,7 +4,7 @@ # # Written by Andrea Fioraldi # -# Copyright 2019-2023 Andrea Fioraldi. All rights reserved. +# Copyright 2019-2024 Andrea Fioraldi. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/qemu_mode/libcompcov/compcovtest.cc b/qemu_mode/libcompcov/compcovtest.cc index 23215013..11797091 100644 --- a/qemu_mode/libcompcov/compcovtest.cc +++ b/qemu_mode/libcompcov/compcovtest.cc @@ -2,7 +2,7 @@ // // Author: Mateusz Jurczyk (mjurczyk@google.com) // -// Copyright 2019-2023 Google LLC +// Copyright 2019-2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/qemu_mode/libcompcov/libcompcov.so.c b/qemu_mode/libcompcov/libcompcov.so.c index b57e9701..36f7b2e2 100644 --- a/qemu_mode/libcompcov/libcompcov.so.c +++ b/qemu_mode/libcompcov/libcompcov.so.c @@ -5,7 +5,7 @@ Written and maintained by Andrea Fioraldi - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/qemu_mode/libqasan/Makefile b/qemu_mode/libqasan/Makefile index 61782894..7366d6f6 100644 --- a/qemu_mode/libqasan/Makefile +++ b/qemu_mode/libqasan/Makefile @@ -4,7 +4,7 @@ # # Written by Andrea Fioraldi # -# Copyright 2019-2023 Andrea Fioraldi. All rights reserved. +# Copyright 2019-2024 Andrea Fioraldi. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/qemu_mode/libqasan/hooks.c b/qemu_mode/libqasan/hooks.c index a9fd0ce9..cf1b0820 100644 --- a/qemu_mode/libqasan/hooks.c +++ b/qemu_mode/libqasan/hooks.c @@ -1,5 +1,5 @@ /******************************************************************************* -Copyright (c) 2019-2023, Andrea Fioraldi +Copyright (c) 2019-2024, Andrea Fioraldi Redistribution and use in source and binary forms, with or without diff --git a/qemu_mode/libqasan/libqasan.c b/qemu_mode/libqasan/libqasan.c index 12be7778..45f47d5a 100644 --- a/qemu_mode/libqasan/libqasan.c +++ b/qemu_mode/libqasan/libqasan.c @@ -1,5 +1,5 @@ /******************************************************************************* -Copyright (c) 2019-2023, Andrea Fioraldi +Copyright (c) 2019-2024, Andrea Fioraldi Redistribution and use in source and binary forms, with or without diff --git a/qemu_mode/libqasan/libqasan.h b/qemu_mode/libqasan/libqasan.h index a430c868..f0844e23 100644 --- a/qemu_mode/libqasan/libqasan.h +++ b/qemu_mode/libqasan/libqasan.h @@ -1,5 +1,5 @@ /******************************************************************************* -Copyright (c) 2019-2023, Andrea Fioraldi +Copyright (c) 2019-2024, Andrea Fioraldi Redistribution and use in source and binary forms, with or without diff --git a/qemu_mode/libqasan/malloc.c b/qemu_mode/libqasan/malloc.c index 4448f480..ae470b56 100644 --- a/qemu_mode/libqasan/malloc.c +++ b/qemu_mode/libqasan/malloc.c @@ -1,5 +1,5 @@ /******************************************************************************* -Copyright (c) 2019-2023, Andrea Fioraldi +Copyright (c) 2019-2024, Andrea Fioraldi Redistribution and use in source and binary forms, with or without diff --git a/qemu_mode/libqasan/patch.c b/qemu_mode/libqasan/patch.c index 38e0903b..4ce8c3d8 100644 --- a/qemu_mode/libqasan/patch.c +++ b/qemu_mode/libqasan/patch.c @@ -1,5 +1,5 @@ /******************************************************************************* -Copyright (c) 2019-2023, Andrea Fioraldi +Copyright (c) 2019-2024, Andrea Fioraldi Redistribution and use in source and binary forms, with or without diff --git a/qemu_mode/libqasan/string.c b/qemu_mode/libqasan/string.c index e17cff4b..cd14d57b 100644 --- a/qemu_mode/libqasan/string.c +++ b/qemu_mode/libqasan/string.c @@ -1,5 +1,5 @@ /******************************************************************************* -Copyright (c) 2019-2023, Andrea Fioraldi +Copyright (c) 2019-2024, Andrea Fioraldi Redistribution and use in source and binary forms, with or without diff --git a/qemu_mode/libqasan/uninstrument.c b/qemu_mode/libqasan/uninstrument.c index e37a9b46..996f2a74 100644 --- a/qemu_mode/libqasan/uninstrument.c +++ b/qemu_mode/libqasan/uninstrument.c @@ -7,7 +7,7 @@ for some strange reason. */ /******************************************************************************* -Copyright (c) 2019-2023, Andrea Fioraldi +Copyright (c) 2019-2024, Andrea Fioraldi Redistribution and use in source and binary forms, with or without diff --git a/qemu_mode/unsigaction/Makefile b/qemu_mode/unsigaction/Makefile index c1a7397f..d5e807d8 100644 --- a/qemu_mode/unsigaction/Makefile +++ b/qemu_mode/unsigaction/Makefile @@ -4,7 +4,7 @@ # # Written by Andrea Fioraldi # -# Copyright 2019-2023 Andrea Fioraldi. All rights reserved. +# Copyright 2019-2024 Andrea Fioraldi. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/qemu_mode/util/qemu_get_symbol_addr.sh b/qemu_mode/util/qemu_get_symbol_addr.sh index e0a7ae80..5e00f1b2 100755 --- a/qemu_mode/util/qemu_get_symbol_addr.sh +++ b/qemu_mode/util/qemu_get_symbol_addr.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2023 AFLplusplus +# Copyright 2024 AFLplusplus # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/src/afl-analyze.c b/src/afl-analyze.c index 5b122741..95f32fee 100644 --- a/src/afl-analyze.c +++ b/src/afl-analyze.c @@ -9,7 +9,7 @@ Andrea Fioraldi Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-as.c b/src/afl-as.c index 772e31b3..09ba75bf 100644 --- a/src/afl-as.c +++ b/src/afl-as.c @@ -9,7 +9,7 @@ Andrea Fioraldi Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-cc.c b/src/afl-cc.c index 98310545..e9564277 100644 --- a/src/afl-cc.c +++ b/src/afl-cc.c @@ -5,7 +5,7 @@ Written by Michal Zalewski, Laszlo Szekeres and Marc Heuse Copyright 2015, 2016 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-common.c b/src/afl-common.c index ba498b3b..87003b03 100644 --- a/src/afl-common.c +++ b/src/afl-common.c @@ -9,7 +9,7 @@ Andrea Fioraldi Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c index ded0c21d..0a77d61c 100644 --- a/src/afl-forkserver.c +++ b/src/afl-forkserver.c @@ -13,7 +13,7 @@ Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-fuzz-bitmap.c b/src/afl-fuzz-bitmap.c index 5f67347c..d056ac9f 100644 --- a/src/afl-fuzz-bitmap.c +++ b/src/afl-fuzz-bitmap.c @@ -9,7 +9,7 @@ Andrea Fioraldi Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-fuzz-cmplog.c b/src/afl-fuzz-cmplog.c index 3e6432ca..21f34e12 100644 --- a/src/afl-fuzz-cmplog.c +++ b/src/afl-fuzz-cmplog.c @@ -11,7 +11,7 @@ Andrea Fioraldi Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-fuzz-extras.c b/src/afl-fuzz-extras.c index 905431d1..3b1d13f1 100644 --- a/src/afl-fuzz-extras.c +++ b/src/afl-fuzz-extras.c @@ -9,7 +9,7 @@ Andrea Fioraldi Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index 057d8cf5..76291cc4 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -9,7 +9,7 @@ Andrea Fioraldi Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-fuzz-mutators.c b/src/afl-fuzz-mutators.c index 17fb9368..ae4d6668 100644 --- a/src/afl-fuzz-mutators.c +++ b/src/afl-fuzz-mutators.c @@ -10,7 +10,7 @@ Dominik Maier Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index c163a420..d9c074ec 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -9,7 +9,7 @@ Andrea Fioraldi Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-fuzz-python.c b/src/afl-fuzz-python.c index 4c7da774..16a398fd 100644 --- a/src/afl-fuzz-python.c +++ b/src/afl-fuzz-python.c @@ -9,7 +9,7 @@ Andrea Fioraldi Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index 67931bba..1ea50418 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -9,7 +9,7 @@ Andrea Fioraldi Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index 9e9b3822..eead7a8b 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -11,7 +11,7 @@ Andrea Fioraldi Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index 1ee8ebe7..d764952c 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -10,7 +10,7 @@ Dominik Maier Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index b647ac84..4467cae8 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -9,7 +9,7 @@ Andrea Fioraldi Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c index 4b83ad29..76577081 100644 --- a/src/afl-fuzz-stats.c +++ b/src/afl-fuzz-stats.c @@ -9,7 +9,7 @@ Andrea Fioraldi Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 69064d51..12d67fe7 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -9,7 +9,7 @@ Andrea Fioraldi Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-gotcpu.c b/src/afl-gotcpu.c index 4f851099..7aee2985 100644 --- a/src/afl-gotcpu.c +++ b/src/afl-gotcpu.c @@ -9,7 +9,7 @@ Andrea Fioraldi Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-ld-lto.c b/src/afl-ld-lto.c index 7ce5de41..513c1ae9 100644 --- a/src/afl-ld-lto.c +++ b/src/afl-ld-lto.c @@ -9,7 +9,7 @@ Andrea Fioraldi Dominik Maier - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-sharedmem.c b/src/afl-sharedmem.c index a2c81586..daea8f46 100644 --- a/src/afl-sharedmem.c +++ b/src/afl-sharedmem.c @@ -11,7 +11,7 @@ Andrea Fioraldi Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-showmap.c b/src/afl-showmap.c index 7a639cf6..20ba5a5e 100644 --- a/src/afl-showmap.c +++ b/src/afl-showmap.c @@ -12,7 +12,7 @@ Dominik Maier Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/src/afl-tmin.c b/src/afl-tmin.c index e7442d1d..4e5dab41 100644 --- a/src/afl-tmin.c +++ b/src/afl-tmin.c @@ -12,7 +12,7 @@ Dominik Maier Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/test-instr.c b/test-instr.c index eda5189c..28552893 100644 --- a/test-instr.c +++ b/test-instr.c @@ -3,7 +3,7 @@ -------------------------------------------------------- Originally written by Michal Zalewski Copyright 2014 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: diff --git a/unicorn_mode/build_unicorn_support.sh b/unicorn_mode/build_unicorn_support.sh index d3d16ad5..baca2171 100755 --- a/unicorn_mode/build_unicorn_support.sh +++ b/unicorn_mode/build_unicorn_support.sh @@ -14,7 +14,7 @@ # # # Copyright 2017 Battelle Memorial Institute. All rights reserved. -# Copyright 2019-2023 AFLplusplus Project. All rights reserved. +# Copyright 2019-2024 AFLplusplus Project. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/utils/afl_network_proxy/afl-network-client.c b/utils/afl_network_proxy/afl-network-client.c index 0416f0f9..1f04dd87 100644 --- a/utils/afl_network_proxy/afl-network-client.c +++ b/utils/afl_network_proxy/afl-network-client.c @@ -4,7 +4,7 @@ Written by Marc Heuse - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/utils/afl_network_proxy/afl-network-server.c b/utils/afl_network_proxy/afl-network-server.c index 95b0a551..c4a700f4 100644 --- a/utils/afl_network_proxy/afl-network-server.c +++ b/utils/afl_network_proxy/afl-network-server.c @@ -12,7 +12,7 @@ Dominik Maier Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/utils/afl_proxy/afl-proxy.c b/utils/afl_proxy/afl-proxy.c index 531a97a2..6cf47636 100644 --- a/utils/afl_proxy/afl-proxy.c +++ b/utils/afl_proxy/afl-proxy.c @@ -4,7 +4,7 @@ Written by Marc Heuse - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/utils/afl_untracer/afl-untracer.c b/utils/afl_untracer/afl-untracer.c index 0e3f8a45..e6a74518 100644 --- a/utils/afl_untracer/afl-untracer.c +++ b/utils/afl_untracer/afl-untracer.c @@ -4,7 +4,7 @@ Written by Marc Heuse - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/utils/afl_untracer/libtestinstr.c b/utils/afl_untracer/libtestinstr.c index b7afc325..0a98778a 100644 --- a/utils/afl_untracer/libtestinstr.c +++ b/utils/afl_untracer/libtestinstr.c @@ -3,7 +3,7 @@ -------------------------------------------------------- Originally written by Michal Zalewski Copyright 2014 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: diff --git a/utils/argv_fuzzing/Makefile b/utils/argv_fuzzing/Makefile index 6786467a..ba977e5f 100644 --- a/utils/argv_fuzzing/Makefile +++ b/utils/argv_fuzzing/Makefile @@ -2,7 +2,7 @@ # american fuzzy lop++ - argvfuzz # -------------------------------- # -# Copyright 2019-2023 Kjell Braden +# Copyright 2019-2024 Kjell Braden # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/utils/argv_fuzzing/argvfuzz.c b/utils/argv_fuzzing/argvfuzz.c index 41eead0c..47383138 100644 --- a/utils/argv_fuzzing/argvfuzz.c +++ b/utils/argv_fuzzing/argvfuzz.c @@ -2,7 +2,7 @@ american fuzzy lop++ - LD_PRELOAD for fuzzing argv in binaries ------------------------------------------------------------ - Copyright 2019-2023 Kjell Braden + Copyright 2019-2024 Kjell Braden Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/utils/distributed_fuzzing/sync_script.sh b/utils/distributed_fuzzing/sync_script.sh index b22816f1..861b65c8 100755 --- a/utils/distributed_fuzzing/sync_script.sh +++ b/utils/distributed_fuzzing/sync_script.sh @@ -6,7 +6,7 @@ # Originally written by Michal Zalewski # # Copyright 2014 Google Inc. All rights reserved. -# Copyright 2019-2023 AFLplusplus Project. All rights reserved. +# Copyright 2019-2024 AFLplusplus Project. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/utils/libdislocator/libdislocator.so.c b/utils/libdislocator/libdislocator.so.c index 1cd7abc6..b80be1a1 100644 --- a/utils/libdislocator/libdislocator.so.c +++ b/utils/libdislocator/libdislocator.so.c @@ -6,7 +6,7 @@ Originally written by Michal Zalewski Copyright 2016 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/utils/libtokencap/libtokencap.so.c b/utils/libtokencap/libtokencap.so.c index f4024799..cc499150 100644 --- a/utils/libtokencap/libtokencap.so.c +++ b/utils/libtokencap/libtokencap.so.c @@ -6,7 +6,7 @@ Originally written by Michal Zalewski Copyright 2016 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/utils/persistent_mode/test-instr.c b/utils/persistent_mode/test-instr.c index 4ead6577..72e26e93 100644 --- a/utils/persistent_mode/test-instr.c +++ b/utils/persistent_mode/test-instr.c @@ -3,7 +3,7 @@ -------------------------------------------------------- Originally written by Michal Zalewski Copyright 2014 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: -- cgit 1.4.1 From d85722a4f6329940545dd66bf16718d591fca681 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Sat, 3 Feb 2024 13:31:31 +0100 Subject: deterministic fuzzing and -z --- docs/Changelog.md | 5 +++++ src/afl-fuzz-state.c | 2 +- src/afl-fuzz.c | 19 +++++++------------ 3 files changed, 13 insertions(+), 13 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 48003f4b..2f0fba33 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -3,6 +3,11 @@ This is the list of all noteworthy changes made in every public release of the tool. See README.md for the general instruction manual. +### Version ++4.20a (dev) + - afl-fuzz: + - the new deterministic fuzzing feature is now activated by default, + deactivate with -z. Parameters -d and -D are ignored. + ### Version ++4.10c (release) - afl-fuzz: - default power schedule is now EXPLORE, due a fix in fast schedules diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index 4467cae8..ae327117 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -102,7 +102,7 @@ void afl_state_init(afl_state_t *afl, uint32_t map_size) { afl->stats_update_freq = 1; afl->stats_file_update_freq_msecs = STATS_UPDATE_SEC * 1000; afl->stats_avg_exec = 0; - afl->skip_deterministic = 1; + afl->skip_deterministic = 0; afl->sync_time = SYNC_TIME; afl->cmplog_lvl = 2; afl->min_length = 1; diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 12d67fe7..b556b4b6 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -170,7 +170,6 @@ static void usage(u8 *argv0, int more_help) { " -g minlength - set min length of generated fuzz input (default: 1)\n" " -G maxlength - set max length of generated fuzz input (default: " "%lu)\n" - " -D - enable (a new) effective deterministic fuzzing\n" " -L minutes - use MOpt(imize) mode and set the time limit for " "entering the\n" " pacemaker mode (minutes of no new finds). 0 = " @@ -213,7 +212,8 @@ static void usage(u8 *argv0, int more_help) { " -F path - sync to a foreign fuzzer queue directory (requires " "-M, can\n" " be specified up to %u times)\n" - // " -d - skip deterministic fuzzing in -M mode\n" + " -z - skip the enhanced deterministic fuzzing\n" + " (note that the old -d and -D flags are ignored.)\n" " -T text - text banner to show on the screen\n" " -I command - execute this command/script when a new crash is " "found\n" @@ -955,20 +955,15 @@ int main(int argc, char **argv_orig, char **envp) { break; - case 'D': /* partial deterministic */ + case 'd': + case 'D': /* old deterministic */ - afl->skip_deterministic = 0; + WARNF("Parameters -d and -D are deprecated, a new enhanced deterministic fuzzing is active by default, to disable it use -z"); break; - case 'd': /* no deterministic */ + case 'z': /* no deterministic */ - // this is the default and currently a lot of infrastructure enforces - // it (e.g. clusterfuzz, fuzzbench) based on that this feature - // originally was bad performance wise. We now have a better - // implementation, hence if it is activated, we do not want to - // deactivate it by such setups. - - // afl->skip_deterministic = 1; + afl->skip_deterministic = 1; break; case 'B': /* load bitmap */ -- cgit 1.4.1 From dc151caa1839162e470e003837e630db6d5d543e Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Sat, 3 Feb 2024 15:53:54 +0100 Subject: add lto caller instrumentation --- docs/Changelog.md | 5 +++++ docs/env_variables.md | 3 +++ instrumentation/SanitizerCoverageLTO.so.cc | 27 ++++++++++++++++++++++++--- src/afl-cc.c | 5 +++-- 4 files changed, 35 insertions(+), 5 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 2f0fba33..e5169daf 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -7,6 +7,11 @@ - afl-fuzz: - the new deterministic fuzzing feature is now activated by default, deactivate with -z. Parameters -d and -D are ignored. + - afl-cc: + - added collision free caller instrumentation to LTO mode. activate with + `AFL_LLVM_LTO_CALLER=1`. You can set a max depth to go through single + block functions with `AFL_LLVM_LTO_CALLER_DEPTH` (default 0) + ### Version ++4.10c (release) - afl-fuzz: diff --git a/docs/env_variables.md b/docs/env_variables.md index a972b6da..1e4fc7ba 100644 --- a/docs/env_variables.md +++ b/docs/env_variables.md @@ -248,6 +248,9 @@ use (which only ever the author of this LTO implementation will use). These are used if several separated instrumentations are performed which are then later combined. + - `AFL_LLVM_LTO_CALLER` activates collision free CALLER instrumentation + - `AFL_LLVM_LTO_CALLER` sets the maximum mumber of single block functions + to dig deeper into a real function. Default 0. - `AFL_LLVM_DOCUMENT_IDS=file` will document to a file which edge ID was given to which function. This helps to identify functions with variable bytes or which functions were touched by an input. diff --git a/instrumentation/SanitizerCoverageLTO.so.cc b/instrumentation/SanitizerCoverageLTO.so.cc index 65602109..b93b72bf 100644 --- a/instrumentation/SanitizerCoverageLTO.so.cc +++ b/instrumentation/SanitizerCoverageLTO.so.cc @@ -251,6 +251,7 @@ class ModuleSanitizerCoverageLTO uint32_t unhandled = 0; uint32_t select_cnt = 0; uint32_t instrument_ctx = 0; + uint32_t instrument_ctx_max_depth = 0; uint32_t extra_ctx_inst = 0; uint64_t map_addr = 0; const char *skip_nozero = NULL; @@ -428,12 +429,31 @@ bool ModuleSanitizerCoverageLTO::instrumentModule( setvbuf(stdout, NULL, _IONBF, 0); if (getenv("AFL_DEBUG")) { debug = 1; } if (getenv("AFL_LLVM_DICT2FILE_NO_MAIN")) { autodictionary_no_main = 1; } - if (getenv("AFL_LLVM_CALLER") || getenv("AFL_LLVM_CTX")) { + if (getenv("AFL_LLVM_CALLER") || getenv("AFL_LLVM_CTX") || + getenv("AFL_LLVM_LTO_CALLER") || getenv("AFL_LLVM_LTO_CTX")) { instrument_ctx = 1; } + if (getenv("AFL_LLVM_LTO_CALLER_DEPTH")) { + + instrument_ctx_max_depth = atoi(getenv("AFL_LLVM_LTO_CALLER_DEPTH")); + + } else if (getenv("AFL_LLVM_LTO_CTX_DEPTH")) { + + instrument_ctx_max_depth = atoi(getenv("AFL_LLVM_LTO_CTX_DEPTH")); + + } else if (getenv("AFL_LLVM_CALLER_DEPTH")) { + + instrument_ctx_max_depth = atoi(getenv("AFL_LLVM_CALLER_DEPTH")); + + } else if (getenv("AFL_LLVM_CTX_DEPTH")) { + + instrument_ctx_max_depth = atoi(getenv("AFL_LLVM_CTX_DEPTH")); + + } + if ((isatty(2) && !getenv("AFL_QUIET")) || debug) { SAYF(cCYA "afl-llvm-lto" VERSION cRST @@ -1406,11 +1426,12 @@ void ModuleSanitizerCoverageLTO::instrumentFunction( call_counter = countCallers(caller); Function *callee = caller; - if (call_counter == 1) { + if (call_counter == 1 && instrument_ctx_max_depth) { ++call_depth; - while (((caller = returnOnlyCaller(callee)) || 1 == 1) && + while (instrument_ctx_max_depth >= call_depth && + ((caller = returnOnlyCaller(callee)) || 1 == 1) && (call_counter = countCallers(callee)) == 1) { if (debug && caller && callee) diff --git a/src/afl-cc.c b/src/afl-cc.c index 7d33b9f5..4d586ce8 100644 --- a/src/afl-cc.c +++ b/src/afl-cc.c @@ -2921,11 +2921,12 @@ static void maybe_usage(aflcc_state_t *aflcc, int argc, char **argv) { " AFL_LLVM_DOCUMENT_IDS: write all edge IDs and the corresponding " "functions\n" " into this file (LTO mode)\n" + " AFL_LLVM_LTO_CALLER: activate CALLER/CTX instrumentation\n" + " AFL_LLVM_LTO_CALLER_DEPTH: skip how many empty functions\n" " AFL_LLVM_LTO_DONTWRITEID: don't write the highest ID used to a " "global var\n" " AFL_LLVM_LTO_STARTID: from which ID to start counting from for " - "a " - "bb\n" + "a bb\n" " AFL_REAL_LD: use this lld linker instead of the compiled in " "path\n" " AFL_LLVM_LTO_SKIPINIT: don't inject initialization code " -- cgit 1.4.1 From 369fce9c85bf3b850a7109e4604fee71f694d2cb Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Thu, 8 Feb 2024 15:13:46 +0100 Subject: code format --- TODO.md | 4 +- docs/Changelog.md | 18 + include/cmplog.h | 7 +- include/envs.h | 2 + include/t1ha.h | 527 +-- include/t1ha0_ia32aes_b.h | 116 +- include/t1ha_bits.h | 1466 ++++--- include/t1ha_selfcheck.h | 15 +- include/xxhash.h | 10283 ++++++++++++++++++++++++-------------------- src/afl-fuzz-redqueen.c | 2 + src/afl-fuzz.c | 4 +- src/afl-performance.c | 17 +- utils/bench/hash.c | 31 +- 13 files changed, 6853 insertions(+), 5639 deletions(-) (limited to 'docs/Changelog.md') diff --git a/TODO.md b/TODO.md index f2e3963f..d47372b8 100644 --- a/TODO.md +++ b/TODO.md @@ -2,17 +2,15 @@ ## Must - - UI revamp - hardened_usercopy=0 page_alloc.shuffle=0 - add value_profile but only enable after 15 minutes without finds - - cmplog max len, cmplog max items envs? + - cmplog max items env? - adapt MOpt to new mutation engine - Update afl->pending_not_fuzzed for MOpt - cmplog rtn sanity check on fixed length? currently we ignore the length - afl-showmap -f support - afl-fuzz multicore wrapper script - when trimming then perform crash detection - - problem: either -L0 and/or -p mmopt results in zero new coverage ## Should diff --git a/docs/Changelog.md b/docs/Changelog.md index e5169daf..3415150a 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -4,13 +4,31 @@ release of the tool. See README.md for the general instruction manual. ### Version ++4.20a (dev) + ! A new forkserver communication model is now introduced. afl-fuzz is + backward compatible to old compiled targets if they are not built + for CMPLOG/Redqueen, but new compiled targets will not work with + old afl-fuzz versions! + ! Recompiled all targets that are instrumented for CMPLOG/Redqueen! + - AFL++ now supports up to 4 billion coverage edges, up from 6 million. + - New compile option: `make PERFORMANCE=1` - this will enable special + CPU dependent optimizations that make everything more performant - but + the binaries will likely won't work on different platforms. Also + enables a faster hasher if the CPU requirements are met. + - The persistent record feature (see config.h) was expanded to also + support replay, thanks to @quarta-qti ! - afl-fuzz: - the new deterministic fuzzing feature is now activated by default, deactivate with -z. Parameters -d and -D are ignored. + - small improvements to CMPLOG/redqueen + - workround for a bug with MOpt -L when used with -M - in the future + we will either remove or rewrite MOpt. - afl-cc: - added collision free caller instrumentation to LTO mode. activate with `AFL_LLVM_LTO_CALLER=1`. You can set a max depth to go through single block functions with `AFL_LLVM_LTO_CALLER_DEPTH` (default 0) + - Minor edits to afl-persistent-config + - Prevent temporary files being left behind on aborted afl-whatsup + - More CPU benchmarks added to benchmark/ ### Version ++4.10c (release) diff --git a/include/cmplog.h b/include/cmplog.h index 589570fe..a6162b59 100644 --- a/include/cmplog.h +++ b/include/cmplog.h @@ -41,13 +41,12 @@ #define CMP_TYPE_INS 0 #define CMP_TYPE_RTN 1 -struct cmp_header { +struct cmp_header { // 16 bit = 2 bytes unsigned hits : 6; // up to 63 entries, we have CMP_MAP_H = 32 - unsigned shape : 5; // 31+1 bytes - unsigned type : 1; // 4, we use 3: none, rtn, cmp + unsigned shape : 5; // 31+1 bytes max + unsigned type : 1; // 2: cmp, rtn unsigned attribute : 4; // 16 for arithmetic comparison types - //unsigned reserved : 6; } __attribute__((packed)); diff --git a/include/envs.h b/include/envs.h index 8f342553..d32e2f92 100644 --- a/include/envs.h +++ b/include/envs.h @@ -64,6 +64,8 @@ static char *afl_environment_variables[] = { "AFL_REAL_LD", "AFL_LD_PRELOAD", "AFL_LD_VERBOSE", "AFL_LLVM_ALLOWLIST", "AFL_LLVM_DENYLIST", "AFL_LLVM_BLOCKLIST", "AFL_CMPLOG", "AFL_LLVM_CMPLOG", "AFL_GCC_CMPLOG", "AFL_LLVM_INSTRIM", "AFL_LLVM_CALLER", "AFL_LLVM_CTX", + "AFL_LLVM_LTO_CALLER", "AFL_LLVM_LTO_CTX", "AFL_LLVM_LTO_CALLER_DEPTH", + "AFL_LLVM_LTO_CTX_DEPTH", "AFL_LLVM_CALLER_DEPTH", "AFL_LLVM_CTX_DEPTH", "AFL_LLVM_CTX_K", "AFL_LLVM_DICT2FILE", "AFL_LLVM_DICT2FILE_NO_MAIN", "AFL_LLVM_DOCUMENT_IDS", "AFL_LLVM_INSTRIM_LOOPHEAD", "AFL_LLVM_INSTRUMENT", "AFL_LLVM_LTO_AUTODICTIONARY", "AFL_LLVM_AUTODICTIONARY", diff --git a/include/t1ha.h b/include/t1ha.h index 498f0dd6..1af29395 100644 --- a/include/t1ha.h +++ b/include/t1ha.h @@ -172,56 +172,56 @@ #define T1HA_VERSION_RELEASE 1 #ifndef __has_attribute -#define __has_attribute(x) (0) + #define __has_attribute(x) (0) #endif #ifndef __has_include -#define __has_include(x) (0) + #define __has_include(x) (0) #endif #ifndef __GNUC_PREREQ -#if defined(__GNUC__) && defined(__GNUC_MINOR__) -#define __GNUC_PREREQ(maj, min) \ - ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min)) -#else -#define __GNUC_PREREQ(maj, min) 0 -#endif -#endif /* __GNUC_PREREQ */ + #if defined(__GNUC__) && defined(__GNUC_MINOR__) + #define __GNUC_PREREQ(maj, min) \ + ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min)) + #else + #define __GNUC_PREREQ(maj, min) 0 + #endif +#endif /* __GNUC_PREREQ */ #ifndef __CLANG_PREREQ -#ifdef __clang__ -#define __CLANG_PREREQ(maj, min) \ - ((__clang_major__ << 16) + __clang_minor__ >= ((maj) << 16) + (min)) -#else -#define __CLANG_PREREQ(maj, min) (0) -#endif -#endif /* __CLANG_PREREQ */ + #ifdef __clang__ + #define __CLANG_PREREQ(maj, min) \ + ((__clang_major__ << 16) + __clang_minor__ >= ((maj) << 16) + (min)) + #else + #define __CLANG_PREREQ(maj, min) (0) + #endif +#endif /* __CLANG_PREREQ */ #ifndef __LCC_PREREQ -#ifdef __LCC__ -#define __LCC_PREREQ(maj, min) \ - ((__LCC__ << 16) + __LCC_MINOR__ >= ((maj) << 16) + (min)) -#else -#define __LCC_PREREQ(maj, min) (0) -#endif -#endif /* __LCC_PREREQ */ + #ifdef __LCC__ + #define __LCC_PREREQ(maj, min) \ + ((__LCC__ << 16) + __LCC_MINOR__ >= ((maj) << 16) + (min)) + #else + #define __LCC_PREREQ(maj, min) (0) + #endif +#endif /* __LCC_PREREQ */ /*****************************************************************************/ #ifdef _MSC_VER -/* Avoid '16' bytes padding added after data member 't1ha_context::total' - * and other warnings from std-headers if warning-level > 3. */ -#pragma warning(push, 3) + /* Avoid '16' bytes padding added after data member 't1ha_context::total' + * and other warnings from std-headers if warning-level > 3. */ + #pragma warning(push, 3) #endif #if defined(__cplusplus) && __cplusplus >= 201103L -#include -#include -#include + #include + #include + #include #else -#include -#include -#include + #include + #include + #include #endif /*****************************************************************************/ @@ -234,18 +234,18 @@ defined(__INTEL__) || defined(__x86_64) || defined(__x86_64__) || \ defined(__amd64__) || defined(__amd64) || defined(_M_X64) || \ defined(_M_AMD64) || defined(__IA32__) || defined(__INTEL__) -#ifndef __ia32__ -/* LY: define neutral __ia32__ for x86 and x86-64 archs */ -#define __ia32__ 1 -#endif /* __ia32__ */ -#if !defined(__amd64__) && (defined(__x86_64) || defined(__x86_64__) || \ - defined(__amd64) || defined(_M_X64)) -/* LY: define trusty __amd64__ for all AMD64/x86-64 arch */ -#define __amd64__ 1 -#endif /* __amd64__ */ -#endif /* all x86 */ - -#if !defined(__BYTE_ORDER__) || !defined(__ORDER_LITTLE_ENDIAN__) || \ + #ifndef __ia32__ + /* LY: define neutral __ia32__ for x86 and x86-64 archs */ + #define __ia32__ 1 + #endif /* __ia32__ */ + #if !defined(__amd64__) && (defined(__x86_64) || defined(__x86_64__) || \ + defined(__amd64) || defined(_M_X64)) + /* LY: define trusty __amd64__ for all AMD64/x86-64 arch */ + #define __amd64__ 1 + #endif /* __amd64__ */ +#endif /* all x86 */ + +#if !defined(__BYTE_ORDER__) || !defined(__ORDER_LITTLE_ENDIAN__) || \ !defined(__ORDER_BIG_ENDIAN__) /* *INDENT-OFF* */ @@ -267,160 +267,168 @@ defined(__NETBSD__) || defined(__NetBSD__) || \ defined(HAVE_SYS_PARAM_H) || __has_include() #include -#endif /* OS */ +#endif /* OS */ /* *INDENT-ON* */ /* clang-format on */ -#if defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN) -#define __ORDER_LITTLE_ENDIAN__ __LITTLE_ENDIAN -#define __ORDER_BIG_ENDIAN__ __BIG_ENDIAN -#define __BYTE_ORDER__ __BYTE_ORDER -#elif defined(_BYTE_ORDER) && defined(_LITTLE_ENDIAN) && defined(_BIG_ENDIAN) -#define __ORDER_LITTLE_ENDIAN__ _LITTLE_ENDIAN -#define __ORDER_BIG_ENDIAN__ _BIG_ENDIAN -#define __BYTE_ORDER__ _BYTE_ORDER -#else -#define __ORDER_LITTLE_ENDIAN__ 1234 -#define __ORDER_BIG_ENDIAN__ 4321 - -#if defined(__LITTLE_ENDIAN__) || \ - (defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN)) || \ - defined(__ARMEL__) || defined(__THUMBEL__) || defined(__AARCH64EL__) || \ - defined(__MIPSEL__) || defined(_MIPSEL) || defined(__MIPSEL) || \ - defined(_M_ARM) || defined(_M_ARM64) || defined(__e2k__) || \ - defined(__elbrus_4c__) || defined(__elbrus_8c__) || defined(__bfin__) || \ - defined(__BFIN__) || defined(__ia64__) || defined(_IA64) || \ - defined(__IA64__) || defined(__ia64) || defined(_M_IA64) || \ - defined(__itanium__) || defined(__ia32__) || defined(__CYGWIN__) || \ - defined(_WIN64) || defined(_WIN32) || defined(__TOS_WIN__) || \ - defined(__WINDOWS__) -#define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__ - -#elif defined(__BIG_ENDIAN__) || \ - (defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN)) || \ - defined(__ARMEB__) || defined(__THUMBEB__) || defined(__AARCH64EB__) || \ - defined(__MIPSEB__) || defined(_MIPSEB) || defined(__MIPSEB) || \ - defined(__m68k__) || defined(M68000) || defined(__hppa__) || \ - defined(__hppa) || defined(__HPPA__) || defined(__sparc__) || \ - defined(__sparc) || defined(__370__) || defined(__THW_370__) || \ - defined(__s390__) || defined(__s390x__) || defined(__SYSC_ZARCH__) -#define __BYTE_ORDER__ __ORDER_BIG_ENDIAN__ - -#else -#error __BYTE_ORDER__ should be defined. -#endif /* Arch */ - -#endif + #if defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN) + #define __ORDER_LITTLE_ENDIAN__ __LITTLE_ENDIAN + #define __ORDER_BIG_ENDIAN__ __BIG_ENDIAN + #define __BYTE_ORDER__ __BYTE_ORDER + #elif defined(_BYTE_ORDER) && defined(_LITTLE_ENDIAN) && defined(_BIG_ENDIAN) + #define __ORDER_LITTLE_ENDIAN__ _LITTLE_ENDIAN + #define __ORDER_BIG_ENDIAN__ _BIG_ENDIAN + #define __BYTE_ORDER__ _BYTE_ORDER + #else + #define __ORDER_LITTLE_ENDIAN__ 1234 + #define __ORDER_BIG_ENDIAN__ 4321 + + #if defined(__LITTLE_ENDIAN__) || \ + (defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN)) || \ + defined(__ARMEL__) || defined(__THUMBEL__) || \ + defined(__AARCH64EL__) || defined(__MIPSEL__) || defined(_MIPSEL) || \ + defined(__MIPSEL) || defined(_M_ARM) || defined(_M_ARM64) || \ + defined(__e2k__) || defined(__elbrus_4c__) || \ + defined(__elbrus_8c__) || defined(__bfin__) || defined(__BFIN__) || \ + defined(__ia64__) || defined(_IA64) || defined(__IA64__) || \ + defined(__ia64) || defined(_M_IA64) || defined(__itanium__) || \ + defined(__ia32__) || defined(__CYGWIN__) || defined(_WIN64) || \ + defined(_WIN32) || defined(__TOS_WIN__) || defined(__WINDOWS__) + #define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__ + + #elif defined(__BIG_ENDIAN__) || \ + (defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN)) || \ + defined(__ARMEB__) || defined(__THUMBEB__) || \ + defined(__AARCH64EB__) || defined(__MIPSEB__) || defined(_MIPSEB) || \ + defined(__MIPSEB) || defined(__m68k__) || defined(M68000) || \ + defined(__hppa__) || defined(__hppa) || defined(__HPPA__) || \ + defined(__sparc__) || defined(__sparc) || defined(__370__) || \ + defined(__THW_370__) || defined(__s390__) || defined(__s390x__) || \ + defined(__SYSC_ZARCH__) + #define __BYTE_ORDER__ __ORDER_BIG_ENDIAN__ + + #else + #error __BYTE_ORDER__ should be defined. + #endif /* Arch */ + + #endif #endif /* __BYTE_ORDER__ || __ORDER_LITTLE_ENDIAN__ || __ORDER_BIG_ENDIAN__ */ /*****************************************************************************/ #ifndef __dll_export -#if defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__) -#if defined(__GNUC__) || __has_attribute(dllexport) -#define __dll_export __attribute__((dllexport)) -#else -#define __dll_export __declspec(dllexport) -#endif -#elif defined(__GNUC__) || __has_attribute(__visibility__) -#define __dll_export __attribute__((__visibility__("default"))) -#else -#define __dll_export -#endif -#endif /* __dll_export */ + #if defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__) + #if defined(__GNUC__) || __has_attribute(dllexport) + #define __dll_export __attribute__((dllexport)) + #else + #define __dll_export __declspec(dllexport) + #endif + #elif defined(__GNUC__) || __has_attribute(__visibility__) + #define __dll_export __attribute__((__visibility__("default"))) + #else + #define __dll_export + #endif +#endif /* __dll_export */ #ifndef __dll_import -#if defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__) -#if defined(__GNUC__) || __has_attribute(dllimport) -#define __dll_import __attribute__((dllimport)) -#else -#define __dll_import __declspec(dllimport) -#endif -#elif defined(__GNUC__) || __has_attribute(__visibility__) -#define __dll_import __attribute__((__visibility__("default"))) -#else -#define __dll_import -#endif -#endif /* __dll_import */ + #if defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__) + #if defined(__GNUC__) || __has_attribute(dllimport) + #define __dll_import __attribute__((dllimport)) + #else + #define __dll_import __declspec(dllimport) + #endif + #elif defined(__GNUC__) || __has_attribute(__visibility__) + #define __dll_import __attribute__((__visibility__("default"))) + #else + #define __dll_import + #endif +#endif /* __dll_import */ #ifndef __force_inline -#ifdef _MSC_VER -#define __force_inline __forceinline -#elif __GNUC_PREREQ(3, 2) || __has_attribute(__always_inline__) -#define __force_inline __inline __attribute__((__always_inline__)) -#else -#define __force_inline __inline -#endif -#endif /* __force_inline */ + #ifdef _MSC_VER + #define __force_inline __forceinline + #elif __GNUC_PREREQ(3, 2) || __has_attribute(__always_inline__) + #define __force_inline __inline __attribute__((__always_inline__)) + #else + #define __force_inline __inline + #endif +#endif /* __force_inline */ #ifndef T1HA_API -#if defined(t1ha_EXPORTS) -#define T1HA_API __dll_export -#elif defined(t1ha_IMPORTS) -#define T1HA_API __dll_import -#else -#define T1HA_API -#endif -#endif /* T1HA_API */ + #if defined(t1ha_EXPORTS) + #define T1HA_API __dll_export + #elif defined(t1ha_IMPORTS) + #define T1HA_API __dll_import + #else + #define T1HA_API + #endif +#endif /* T1HA_API */ #if defined(_MSC_VER) && defined(__ia32__) -#define T1HA_ALIGN_PREFIX __declspec(align(32)) /* required only for SIMD */ + #define T1HA_ALIGN_PREFIX __declspec(align(32)) /* required only for SIMD */ #else -#define T1HA_ALIGN_PREFIX -#endif /* _MSC_VER */ + #define T1HA_ALIGN_PREFIX +#endif /* _MSC_VER */ #if defined(__GNUC__) && defined(__ia32__) -#define T1HA_ALIGN_SUFFIX \ - __attribute__((__aligned__(32))) /* required only for SIMD */ + #define T1HA_ALIGN_SUFFIX \ + __attribute__((__aligned__(32))) /* required only for SIMD */ #else -#define T1HA_ALIGN_SUFFIX -#endif /* GCC x86 */ + #define T1HA_ALIGN_SUFFIX +#endif /* GCC x86 */ #ifndef T1HA_USE_INDIRECT_FUNCTIONS -/* GNU ELF indirect functions usage control. For more info please see - * https://en.wikipedia.org/wiki/Executable_and_Linkable_Format - * and https://sourceware.org/glibc/wiki/GNU_IFUNC */ -#if defined(__ELF__) && defined(__amd64__) && \ - (__has_attribute(__ifunc__) || \ - (!defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && \ - !defined(__SANITIZE_ADDRESS__) && !defined(__SSP_ALL__))) -/* Enable gnu_indirect_function by default if : - * - ELF AND x86_64 - * - attribute(__ifunc__) is available OR - * GCC >= 4 WITHOUT -fsanitize=address NOR -fstack-protector-all */ -#define T1HA_USE_INDIRECT_FUNCTIONS 1 -#else -#define T1HA_USE_INDIRECT_FUNCTIONS 0 -#endif -#endif /* T1HA_USE_INDIRECT_FUNCTIONS */ + /* GNU ELF indirect functions usage control. For more info please see + * https://en.wikipedia.org/wiki/Executable_and_Linkable_Format + * and https://sourceware.org/glibc/wiki/GNU_IFUNC */ + #if defined(__ELF__) && defined(__amd64__) && \ + (__has_attribute(__ifunc__) || \ + (!defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && \ + !defined(__SANITIZE_ADDRESS__) && !defined(__SSP_ALL__))) + /* Enable gnu_indirect_function by default if : + * - ELF AND x86_64 + * - attribute(__ifunc__) is available OR + * GCC >= 4 WITHOUT -fsanitize=address NOR -fstack-protector-all */ + #define T1HA_USE_INDIRECT_FUNCTIONS 1 + #else + #define T1HA_USE_INDIRECT_FUNCTIONS 0 + #endif +#endif /* T1HA_USE_INDIRECT_FUNCTIONS */ #if __GNUC_PREREQ(4, 0) -#pragma GCC visibility push(hidden) -#endif /* __GNUC_PREREQ(4,0) */ + #pragma GCC visibility push(hidden) +#endif /* __GNUC_PREREQ(4,0) */ #ifdef __cplusplus extern "C" { + #endif typedef union T1HA_ALIGN_PREFIX t1ha_state256 { - uint8_t bytes[32]; + + uint8_t bytes[32]; uint32_t u32[8]; uint64_t u64[4]; struct { + uint64_t a, b, c, d; + } n; + } t1ha_state256_t T1HA_ALIGN_SUFFIX; typedef struct t1ha_context { + t1ha_state256_t state; t1ha_state256_t buffer; - size_t partial; - uint64_t total; + size_t partial; + uint64_t total; + } t1ha_context_t; #ifdef _MSC_VER -#pragma warning(pop) + #pragma warning(pop) #endif /****************************************************************************** @@ -443,37 +451,37 @@ T1HA_API int t1ha_selfcheck__t1ha2_atonce(void); T1HA_API int t1ha_selfcheck__t1ha2_atonce128(void); T1HA_API int t1ha_selfcheck__t1ha2_stream(void); T1HA_API int t1ha_selfcheck__t1ha2(void); -#endif /* T1HA2_DISABLED */ +#endif /* T1HA2_DISABLED */ #ifndef T1HA1_DISABLED T1HA_API int t1ha_selfcheck__t1ha1_le(void); T1HA_API int t1ha_selfcheck__t1ha1_be(void); T1HA_API int t1ha_selfcheck__t1ha1(void); -#endif /* T1HA1_DISABLED */ +#endif /* T1HA1_DISABLED */ #ifndef T1HA0_DISABLED T1HA_API int t1ha_selfcheck__t1ha0_32le(void); T1HA_API int t1ha_selfcheck__t1ha0_32be(void); T1HA_API int t1ha_selfcheck__t1ha0(void); -/* Define T1HA0_AESNI_AVAILABLE to 0 for disable AES-NI support. */ -#ifndef T1HA0_AESNI_AVAILABLE -#if defined(__e2k__) || \ - (defined(__ia32__) && (!defined(_M_IX86) || _MSC_VER > 1800)) -#define T1HA0_AESNI_AVAILABLE 1 -#else -#define T1HA0_AESNI_AVAILABLE 0 -#endif -#endif /* ifndef T1HA0_AESNI_AVAILABLE */ - -#if T1HA0_AESNI_AVAILABLE + /* Define T1HA0_AESNI_AVAILABLE to 0 for disable AES-NI support. */ + #ifndef T1HA0_AESNI_AVAILABLE + #if defined(__e2k__) || \ + (defined(__ia32__) && (!defined(_M_IX86) || _MSC_VER > 1800)) + #define T1HA0_AESNI_AVAILABLE 1 + #else + #define T1HA0_AESNI_AVAILABLE 0 + #endif + #endif /* ifndef T1HA0_AESNI_AVAILABLE */ + + #if T1HA0_AESNI_AVAILABLE T1HA_API int t1ha_selfcheck__t1ha0_ia32aes_noavx(void); T1HA_API int t1ha_selfcheck__t1ha0_ia32aes_avx(void); -#ifndef __e2k__ + #ifndef __e2k__ T1HA_API int t1ha_selfcheck__t1ha0_ia32aes_avx2(void); -#endif -#endif /* if T1HA0_AESNI_AVAILABLE */ -#endif /* T1HA0_DISABLED */ + #endif + #endif /* if T1HA0_AESNI_AVAILABLE */ +#endif /* T1HA0_DISABLED */ /****************************************************************************** * @@ -521,7 +529,7 @@ T1HA_API void t1ha2_update(t1ha_context_t *__restrict ctx, T1HA_API uint64_t t1ha2_final(t1ha_context_t *__restrict ctx, uint64_t *__restrict extra_result /* optional */); -#endif /* T1HA2_DISABLED */ +#endif /* T1HA2_DISABLED */ /****************************************************************************** * @@ -546,7 +554,7 @@ T1HA_API uint64_t t1ha1_le(const void *data, size_t length, uint64_t seed); /* The big-endian variant. */ T1HA_API uint64_t t1ha1_be(const void *data, size_t length, uint64_t seed); -#endif /* T1HA1_DISABLED */ +#endif /* T1HA1_DISABLED */ /****************************************************************************** * @@ -589,131 +597,142 @@ uint64_t t1ha0_32le(const void *data, size_t length, uint64_t seed); /* The big-endian variant for 32-bit CPU. */ uint64_t t1ha0_32be(const void *data, size_t length, uint64_t seed); -/* Define T1HA0_AESNI_AVAILABLE to 0 for disable AES-NI support. */ -#ifndef T1HA0_AESNI_AVAILABLE -#if defined(__e2k__) || \ - (defined(__ia32__) && (!defined(_M_IX86) || _MSC_VER > 1800)) -#define T1HA0_AESNI_AVAILABLE 1 -#else -#define T1HA0_AESNI_AVAILABLE 0 -#endif -#endif /* T1HA0_AESNI_AVAILABLE */ - -/* Define T1HA0_RUNTIME_SELECT to 0 for disable dispatching t1ha0 at runtime. */ -#ifndef T1HA0_RUNTIME_SELECT -#if T1HA0_AESNI_AVAILABLE && !defined(__e2k__) -#define T1HA0_RUNTIME_SELECT 1 -#else -#define T1HA0_RUNTIME_SELECT 0 -#endif -#endif /* T1HA0_RUNTIME_SELECT */ - -#if !T1HA0_RUNTIME_SELECT && !defined(T1HA0_USE_DEFINE) -#if defined(__LCC__) -#define T1HA0_USE_DEFINE 1 -#else -#define T1HA0_USE_DEFINE 0 -#endif -#endif /* T1HA0_USE_DEFINE */ - -#if T1HA0_AESNI_AVAILABLE + /* Define T1HA0_AESNI_AVAILABLE to 0 for disable AES-NI support. */ + #ifndef T1HA0_AESNI_AVAILABLE + #if defined(__e2k__) || \ + (defined(__ia32__) && (!defined(_M_IX86) || _MSC_VER > 1800)) + #define T1HA0_AESNI_AVAILABLE 1 + #else + #define T1HA0_AESNI_AVAILABLE 0 + #endif + #endif /* T1HA0_AESNI_AVAILABLE */ + + /* Define T1HA0_RUNTIME_SELECT to 0 for disable dispatching t1ha0 at runtime. + */ + #ifndef T1HA0_RUNTIME_SELECT + #if T1HA0_AESNI_AVAILABLE && !defined(__e2k__) + #define T1HA0_RUNTIME_SELECT 1 + #else + #define T1HA0_RUNTIME_SELECT 0 + #endif + #endif /* T1HA0_RUNTIME_SELECT */ + + #if !T1HA0_RUNTIME_SELECT && !defined(T1HA0_USE_DEFINE) + #if defined(__LCC__) + #define T1HA0_USE_DEFINE 1 + #else + #define T1HA0_USE_DEFINE 0 + #endif + #endif /* T1HA0_USE_DEFINE */ + + #if T1HA0_AESNI_AVAILABLE uint64_t t1ha0_ia32aes_noavx(const void *data, size_t length, uint64_t seed); uint64_t t1ha0_ia32aes_avx(const void *data, size_t length, uint64_t seed); -#ifndef __e2k__ + #ifndef __e2k__ uint64_t t1ha0_ia32aes_avx2(const void *data, size_t length, uint64_t seed); -#endif -#endif /* T1HA0_AESNI_AVAILABLE */ + #endif + #endif /* T1HA0_AESNI_AVAILABLE */ -#if T1HA0_RUNTIME_SELECT + #if T1HA0_RUNTIME_SELECT typedef uint64_t (*t1ha0_function_t)(const void *, size_t, uint64_t); T1HA_API t1ha0_function_t t1ha0_resolve(void); -#if T1HA_USE_INDIRECT_FUNCTIONS + #if T1HA_USE_INDIRECT_FUNCTIONS T1HA_API uint64_t t1ha0(const void *data, size_t length, uint64_t seed); -#else + #else /* Otherwise function pointer will be used. * Unfortunately this may cause some overhead calling. */ T1HA_API extern uint64_t (*t1ha0_funcptr)(const void *data, size_t length, uint64_t seed); static __force_inline uint64_t t1ha0(const void *data, size_t length, uint64_t seed) { + return t1ha0_funcptr(data, length, seed); + } -#endif /* T1HA_USE_INDIRECT_FUNCTIONS */ -#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + #endif /* T1HA_USE_INDIRECT_FUNCTIONS */ -#if T1HA0_USE_DEFINE + #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -#if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \ - (!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED)) -#if defined(T1HA1_DISABLED) -#define t1ha0 t1ha2_atonce -#else -#define t1ha0 t1ha1_be -#endif /* T1HA1_DISABLED */ -#else /* 32/64 */ -#define t1ha0 t1ha0_32be -#endif /* 32/64 */ + #if T1HA0_USE_DEFINE + + #if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \ + (!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED)) + #if defined(T1HA1_DISABLED) + #define t1ha0 t1ha2_atonce + #else + #define t1ha0 t1ha1_be + #endif /* T1HA1_DISABLED */ + #else /* 32/64 */ + #define t1ha0 t1ha0_32be + #endif /* 32/64 */ -#else /* T1HA0_USE_DEFINE */ + #else /* T1HA0_USE_DEFINE */ static __force_inline uint64_t t1ha0(const void *data, size_t length, uint64_t seed) { -#if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \ - (!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED)) -#if defined(T1HA1_DISABLED) + + #if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \ + (!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED)) + #if defined(T1HA1_DISABLED) return t1ha2_atonce(data, length, seed); -#else + #else return t1ha1_be(data, length, seed); -#endif /* T1HA1_DISABLED */ -#else /* 32/64 */ + #endif /* T1HA1_DISABLED */ + #else /* 32/64 */ return t1ha0_32be(data, length, seed); -#endif /* 32/64 */ + #endif /* 32/64 */ + } -#endif /* !T1HA0_USE_DEFINE */ + #endif /* !T1HA0_USE_DEFINE */ -#else /* !T1HA0_RUNTIME_SELECT && __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__ */ + #else /* !T1HA0_RUNTIME_SELECT && __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__ */ -#if T1HA0_USE_DEFINE + #if T1HA0_USE_DEFINE -#if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \ - (!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED)) -#if defined(T1HA1_DISABLED) -#define t1ha0 t1ha2_atonce -#else -#define t1ha0 t1ha1_le -#endif /* T1HA1_DISABLED */ -#else /* 32/64 */ -#define t1ha0 t1ha0_32le -#endif /* 32/64 */ + #if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \ + (!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED)) + #if defined(T1HA1_DISABLED) + #define t1ha0 t1ha2_atonce + #else + #define t1ha0 t1ha1_le + #endif /* T1HA1_DISABLED */ + #else /* 32/64 */ + #define t1ha0 t1ha0_32le + #endif /* 32/64 */ -#else + #else static __force_inline uint64_t t1ha0(const void *data, size_t length, uint64_t seed) { -#if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \ - (!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED)) -#if defined(T1HA1_DISABLED) + + #if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \ + (!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED)) + #if defined(T1HA1_DISABLED) return t1ha2_atonce(data, length, seed); -#else + #else return t1ha1_le(data, length, seed); -#endif /* T1HA1_DISABLED */ -#else /* 32/64 */ + #endif /* T1HA1_DISABLED */ + #else /* 32/64 */ return t1ha0_32le(data, length, seed); -#endif /* 32/64 */ + #endif /* 32/64 */ + } -#endif /* !T1HA0_USE_DEFINE */ + #endif /* !T1HA0_USE_DEFINE */ -#endif /* !T1HA0_RUNTIME_SELECT */ + #endif /* !T1HA0_RUNTIME_SELECT */ -#endif /* T1HA0_DISABLED */ +#endif /* T1HA0_DISABLED */ #ifdef __cplusplus + } + #endif #if __GNUC_PREREQ(4, 0) -#pragma GCC visibility pop -#endif /* __GNUC_PREREQ(4,0) */ + #pragma GCC visibility pop +#endif /* __GNUC_PREREQ(4,0) */ + diff --git a/include/t1ha0_ia32aes_b.h b/include/t1ha0_ia32aes_b.h index e8e52638..93b16771 100644 --- a/include/t1ha0_ia32aes_b.h +++ b/include/t1ha0_ia32aes_b.h @@ -47,27 +47,34 @@ #if T1HA0_AESNI_AVAILABLE uint64_t T1HA_IA32AES_NAME(const void *data, uint32_t len) { + uint64_t a = 0; uint64_t b = len; if (likely(len > 32)) { + __m128i x = _mm_set_epi64x(a, b); __m128i y = _mm_aesenc_si128(x, _mm_set_epi64x(prime_0, prime_1)); - const __m128i *v = (const __m128i *)data; + const __m128i *v = (const __m128i *)data; const __m128i *const detent = (const __m128i *)((const uint8_t *)data + (len & ~15ul)); data = detent; if (len & 16) { + x = _mm_add_epi64(x, _mm_loadu_si128(v++)); y = _mm_aesenc_si128(x, y); + } + len &= 15; if (v + 7 < detent) { + __m128i salt = y; do { + __m128i t = _mm_aesenc_si128(_mm_loadu_si128(v++), salt); t = _mm_aesdec_si128(t, _mm_loadu_si128(v++)); t = _mm_aesdec_si128(t, _mm_loadu_si128(v++)); @@ -82,86 +89,95 @@ uint64_t T1HA_IA32AES_NAME(const void *data, uint32_t len) { t = _mm_aesenc_si128(x, t); x = _mm_add_epi64(y, x); y = t; + } while (v + 7 < detent); + } while (v < detent) { + __m128i v0y = _mm_add_epi64(y, _mm_loadu_si128(v++)); __m128i v1x = _mm_sub_epi64(x, _mm_loadu_si128(v++)); x = _mm_aesdec_si128(x, v0y); y = _mm_aesdec_si128(y, v1x); + } x = _mm_add_epi64(_mm_aesdec_si128(x, _mm_aesenc_si128(y, x)), y); -#if defined(__x86_64__) || defined(_M_X64) -#if defined(__SSE4_1__) || defined(__AVX__) + #if defined(__x86_64__) || defined(_M_X64) + #if defined(__SSE4_1__) || defined(__AVX__) a = _mm_extract_epi64(x, 0); b = _mm_extract_epi64(x, 1); -#else + #else a = _mm_cvtsi128_si64(x); b = _mm_cvtsi128_si64(_mm_unpackhi_epi64(x, x)); -#endif -#else -#if defined(__SSE4_1__) || defined(__AVX__) + #endif + #else + #if defined(__SSE4_1__) || defined(__AVX__) a = (uint32_t)_mm_extract_epi32(x, 0) | (uint64_t)_mm_extract_epi32(x, 1) << 32; b = (uint32_t)_mm_extract_epi32(x, 2) | (uint64_t)_mm_extract_epi32(x, 3) << 32; -#else + #else a = (uint32_t)_mm_cvtsi128_si32(x); a |= (uint64_t)_mm_cvtsi128_si32(_mm_shuffle_epi32(x, 1)) << 32; x = _mm_unpackhi_epi64(x, x); b = (uint32_t)_mm_cvtsi128_si32(x); b |= (uint64_t)_mm_cvtsi128_si32(_mm_shuffle_epi32(x, 1)) << 32; -#endif -#endif -#ifdef __AVX__ + #endif + #endif + #ifdef __AVX__ _mm256_zeroupper(); -#elif !(defined(_X86_64_) || defined(__x86_64__) || defined(_M_X64) || \ - defined(__e2k__)) + #elif !(defined(_X86_64_) || defined(__x86_64__) || defined(_M_X64) || \ + defined(__e2k__)) _mm_empty(); -#endif + #endif + } const uint64_t *v = (const uint64_t *)data; switch (len) { - default: - mixup64(&a, &b, fetch64_le_unaligned(v++), prime_4); - /* fall through */ - case 24: - case 23: - case 22: - case 21: - case 20: - case 19: - case 18: - case 17: - mixup64(&b, &a, fetch64_le_unaligned(v++), prime_3); - /* fall through */ - case 16: - case 15: - case 14: - case 13: - case 12: - case 11: - case 10: - case 9: - mixup64(&a, &b, fetch64_le_unaligned(v++), prime_2); - /* fall through */ - case 8: - case 7: - case 6: - case 5: - case 4: - case 3: - case 2: - case 1: - mixup64(&b, &a, tail64_le_unaligned(v, len), prime_1); - /* fall through */ - case 0: - return final64(a, b); + + default: + mixup64(&a, &b, fetch64_le_unaligned(v++), prime_4); + /* fall through */ + case 24: + case 23: + case 22: + case 21: + case 20: + case 19: + case 18: + case 17: + mixup64(&b, &a, fetch64_le_unaligned(v++), prime_3); + /* fall through */ + case 16: + case 15: + case 14: + case 13: + case 12: + case 11: + case 10: + case 9: + mixup64(&a, &b, fetch64_le_unaligned(v++), prime_2); + /* fall through */ + case 8: + case 7: + case 6: + case 5: + case 4: + case 3: + case 2: + case 1: + mixup64(&b, &a, tail64_le_unaligned(v, len), prime_1); + /* fall through */ + case 0: + return final64(a, b); + } + } -#endif /* T1HA0_AESNI_AVAILABLE */ +#endif /* T1HA0_AESNI_AVAILABLE */ #undef T1HA_IA32AES_NAME + diff --git a/include/t1ha_bits.h b/include/t1ha_bits.h index 539369aa..e7a8d53c 100644 --- a/include/t1ha_bits.h +++ b/include/t1ha_bits.h @@ -44,30 +44,30 @@ #pragma once #if defined(_MSC_VER) -#pragma warning(disable : 4201) /* nameless struct/union */ -#if _MSC_VER > 1800 -#pragma warning(disable : 4464) /* relative include path contains '..' */ -#endif /* 1800 */ -#endif /* MSVC */ + #pragma warning(disable : 4201) /* nameless struct/union */ + #if _MSC_VER > 1800 + #pragma warning(disable : 4464) /* relative include path contains '..' */ + #endif /* 1800 */ +#endif /* MSVC */ #include "t1ha.h" #ifndef T1HA_USE_FAST_ONESHOT_READ -/* Define it to 1 for little bit faster code. - * Unfortunately this may triggering a false-positive alarms from Valgrind, - * AddressSanitizer and other similar tool. - * So, define it to 0 for calmness if doubt. */ -#define T1HA_USE_FAST_ONESHOT_READ 1 -#endif /* T1HA_USE_FAST_ONESHOT_READ */ + /* Define it to 1 for little bit faster code. + * Unfortunately this may triggering a false-positive alarms from Valgrind, + * AddressSanitizer and other similar tool. + * So, define it to 0 for calmness if doubt. */ + #define T1HA_USE_FAST_ONESHOT_READ 1 +#endif /* T1HA_USE_FAST_ONESHOT_READ */ /*****************************************************************************/ -#include /* for assert() */ -#include /* for bool */ -#include /* for memcpy() */ +#include /* for assert() */ +#include /* for bool */ +#include /* for memcpy() */ -#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ && \ +#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ && \ __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__ -#error Unsupported byte order. + #error Unsupported byte order. #endif #define T1HA_UNALIGNED_ACCESS__UNABLE 0 @@ -75,534 +75,600 @@ #define T1HA_UNALIGNED_ACCESS__EFFICIENT 2 #ifndef T1HA_SYS_UNALIGNED_ACCESS -#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) -#define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__EFFICIENT -#elif defined(__ia32__) -#define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__EFFICIENT -#elif defined(__e2k__) -#define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__SLOW -#elif defined(__ARM_FEATURE_UNALIGNED) -#define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__EFFICIENT -#else -#define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__UNABLE -#endif -#endif /* T1HA_SYS_UNALIGNED_ACCESS */ + #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + #define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__EFFICIENT + #elif defined(__ia32__) + #define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__EFFICIENT + #elif defined(__e2k__) + #define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__SLOW + #elif defined(__ARM_FEATURE_UNALIGNED) + #define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__EFFICIENT + #else + #define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__UNABLE + #endif +#endif /* T1HA_SYS_UNALIGNED_ACCESS */ #define ALIGNMENT_16 2 #define ALIGNMENT_32 4 #if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul -#define ALIGNMENT_64 8 + #define ALIGNMENT_64 8 #else -#define ALIGNMENT_64 4 + #define ALIGNMENT_64 4 #endif #ifndef PAGESIZE -#define PAGESIZE 4096 -#endif /* PAGESIZE */ + #define PAGESIZE 4096 +#endif /* PAGESIZE */ /***************************************************************************/ #ifndef __has_builtin -#define __has_builtin(x) (0) + #define __has_builtin(x) (0) #endif #ifndef __has_warning -#define __has_warning(x) (0) + #define __has_warning(x) (0) #endif #ifndef __has_feature -#define __has_feature(x) (0) + #define __has_feature(x) (0) #endif #ifndef __has_extension -#define __has_extension(x) (0) + #define __has_extension(x) (0) #endif #if __has_feature(address_sanitizer) -#define __SANITIZE_ADDRESS__ 1 + #define __SANITIZE_ADDRESS__ 1 #endif #ifndef __optimize -#if defined(__clang__) && !__has_attribute(__optimize__) -#define __optimize(ops) -#elif defined(__GNUC__) || __has_attribute(__optimize__) -#define __optimize(ops) __attribute__((__optimize__(ops))) -#else -#define __optimize(ops) -#endif -#endif /* __optimize */ + #if defined(__clang__) && !__has_attribute(__optimize__) + #define __optimize(ops) + #elif defined(__GNUC__) || __has_attribute(__optimize__) + #define __optimize(ops) __attribute__((__optimize__(ops))) + #else + #define __optimize(ops) + #endif +#endif /* __optimize */ #ifndef __cold -#if defined(__OPTIMIZE__) -#if defined(__e2k__) -#define __cold __optimize(1) __attribute__((__cold__)) -#elif defined(__clang__) && !__has_attribute(__cold__) && \ - __has_attribute(__section__) -/* just put infrequently used functions in separate section */ -#define __cold __attribute__((__section__("text.unlikely"))) __optimize("Os") -#elif defined(__GNUC__) || __has_attribute(__cold__) -#define __cold __attribute__((__cold__)) __optimize("Os") -#else -#define __cold __optimize("Os") -#endif -#else -#define __cold -#endif -#endif /* __cold */ + #if defined(__OPTIMIZE__) + #if defined(__e2k__) + #define __cold __optimize(1) __attribute__((__cold__)) + #elif defined(__clang__) && !__has_attribute(__cold__) && \ + __has_attribute(__section__) + /* just put infrequently used functions in separate section */ + #define __cold \ + __attribute__((__section__("text.unlikely"))) __optimize("Os") + #elif defined(__GNUC__) || __has_attribute(__cold__) + #define __cold __attribute__((__cold__)) __optimize("Os") + #else + #define __cold __optimize("Os") + #endif + #else + #define __cold + #endif +#endif /* __cold */ #if __GNUC_PREREQ(4, 4) || defined(__clang__) -#if defined(__ia32__) || defined(__e2k__) -#include -#endif + #if defined(__ia32__) || defined(__e2k__) + #include + #endif -#if defined(__ia32__) && !defined(__cpuid_count) -#include -#endif + #if defined(__ia32__) && !defined(__cpuid_count) + #include + #endif -#if defined(__e2k__) -#include -#endif + #if defined(__e2k__) + #include + #endif -#ifndef likely -#define likely(cond) __builtin_expect(!!(cond), 1) -#endif + #ifndef likely + #define likely(cond) __builtin_expect(!!(cond), 1) + #endif -#ifndef unlikely -#define unlikely(cond) __builtin_expect(!!(cond), 0) -#endif + #ifndef unlikely + #define unlikely(cond) __builtin_expect(!!(cond), 0) + #endif -#if __GNUC_PREREQ(4, 5) || __has_builtin(__builtin_unreachable) -#define unreachable() __builtin_unreachable() -#endif + #if __GNUC_PREREQ(4, 5) || __has_builtin(__builtin_unreachable) + #define unreachable() __builtin_unreachable() + #endif -#define bswap64(v) __builtin_bswap64(v) -#define bswap32(v) __builtin_bswap32(v) -#if __GNUC_PREREQ(4, 8) || __has_builtin(__builtin_bswap16) -#define bswap16(v) __builtin_bswap16(v) -#endif + #define bswap64(v) __builtin_bswap64(v) + #define bswap32(v) __builtin_bswap32(v) + #if __GNUC_PREREQ(4, 8) || __has_builtin(__builtin_bswap16) + #define bswap16(v) __builtin_bswap16(v) + #endif -#if !defined(__maybe_unused) && \ - (__GNUC_PREREQ(4, 3) || __has_attribute(__unused__)) -#define __maybe_unused __attribute__((__unused__)) -#endif + #if !defined(__maybe_unused) && \ + (__GNUC_PREREQ(4, 3) || __has_attribute(__unused__)) + #define __maybe_unused __attribute__((__unused__)) + #endif -#if !defined(__always_inline) && \ - (__GNUC_PREREQ(3, 2) || __has_attribute(__always_inline__)) -#define __always_inline __inline __attribute__((__always_inline__)) -#endif + #if !defined(__always_inline) && \ + (__GNUC_PREREQ(3, 2) || __has_attribute(__always_inline__)) + #define __always_inline __inline __attribute__((__always_inline__)) + #endif -#if defined(__e2k__) + #if defined(__e2k__) -#if __iset__ >= 3 -#define mul_64x64_high(a, b) __builtin_e2k_umulhd(a, b) -#endif /* __iset__ >= 3 */ + #if __iset__ >= 3 + #define mul_64x64_high(a, b) __builtin_e2k_umulhd(a, b) + #endif /* __iset__ >= 3 */ + + #if __iset__ >= 5 +static __maybe_unused __always_inline unsigned e2k_add64carry_first( + uint64_t base, uint64_t addend, uint64_t *sum) { -#if __iset__ >= 5 -static __maybe_unused __always_inline unsigned -e2k_add64carry_first(uint64_t base, uint64_t addend, uint64_t *sum) { *sum = base + addend; return (unsigned)__builtin_e2k_addcd_c(base, addend, 0); + } -#define add64carry_first(base, addend, sum) \ - e2k_add64carry_first(base, addend, sum) +\ + #define add64carry_first(base, addend, sum) \ + e2k_add64carry_first(base, addend, sum) + +static __maybe_unused __always_inline unsigned e2k_add64carry_next( + unsigned carry, uint64_t base, uint64_t addend, uint64_t *sum) { -static __maybe_unused __always_inline unsigned -e2k_add64carry_next(unsigned carry, uint64_t base, uint64_t addend, - uint64_t *sum) { *sum = __builtin_e2k_addcd(base, addend, carry); return (unsigned)__builtin_e2k_addcd_c(base, addend, carry); + } -#define add64carry_next(carry, base, addend, sum) \ - e2k_add64carry_next(carry, base, addend, sum) +\ + #define add64carry_next(carry, base, addend, sum) \ + e2k_add64carry_next(carry, base, addend, sum) -static __maybe_unused __always_inline void e2k_add64carry_last(unsigned carry, - uint64_t base, - uint64_t addend, +static __maybe_unused __always_inline void e2k_add64carry_last(unsigned carry, + uint64_t base, + uint64_t addend, uint64_t *sum) { + *sum = __builtin_e2k_addcd(base, addend, carry); + } -#define add64carry_last(carry, base, addend, sum) \ - e2k_add64carry_last(carry, base, addend, sum) -#endif /* __iset__ >= 5 */ +\ + #define add64carry_last(carry, base, addend, sum) \ + e2k_add64carry_last(carry, base, addend, sum) + #endif /* __iset__ >= 5 */ -#define fetch64_be_aligned(ptr) ((uint64_t)__builtin_e2k_ld_64s_be(ptr)) -#define fetch32_be_aligned(ptr) ((uint32_t)__builtin_e2k_ld_32u_be(ptr)) + #define fetch64_be_aligned(ptr) ((uint64_t)__builtin_e2k_ld_64s_be(ptr)) + #define fetch32_be_aligned(ptr) ((uint32_t)__builtin_e2k_ld_32u_be(ptr)) -#endif /* __e2k__ Elbrus */ + #endif /* __e2k__ Elbrus */ #elif defined(_MSC_VER) -#if _MSC_FULL_VER < 190024234 && defined(_M_IX86) -#pragma message( \ - "For AES-NI at least \"Microsoft C/C++ Compiler\" version 19.00.24234 (Visual Studio 2015 Update 3) is required.") -#endif -#if _MSC_FULL_VER < 191526730 -#pragma message( \ - "It is recommended to use \"Microsoft C/C++ Compiler\" version 19.15.26730 (Visual Studio 2017 15.8) or newer.") -#endif -#if _MSC_FULL_VER < 180040629 -#error At least "Microsoft C/C++ Compiler" version 18.00.40629 (Visual Studio 2013 Update 5) is required. -#endif - -#pragma warning(push, 1) - -#include -#include -#define likely(cond) (cond) -#define unlikely(cond) (cond) -#define unreachable() __assume(0) -#define bswap64(v) _byteswap_uint64(v) -#define bswap32(v) _byteswap_ulong(v) -#define bswap16(v) _byteswap_ushort(v) -#define rot64(v, s) _rotr64(v, s) -#define rot32(v, s) _rotr(v, s) -#define __always_inline __forceinline - -#if defined(_M_X64) || defined(_M_IA64) -#pragma intrinsic(_umul128) -#define mul_64x64_128(a, b, ph) _umul128(a, b, ph) -#pragma intrinsic(_addcarry_u64) -#define add64carry_first(base, addend, sum) _addcarry_u64(0, base, addend, sum) -#define add64carry_next(carry, base, addend, sum) \ - _addcarry_u64(carry, base, addend, sum) -#define add64carry_last(carry, base, addend, sum) \ - (void)_addcarry_u64(carry, base, addend, sum) -#endif - -#if defined(_M_ARM64) || defined(_M_X64) || defined(_M_IA64) -#pragma intrinsic(__umulh) -#define mul_64x64_high(a, b) __umulh(a, b) -#endif - -#if defined(_M_IX86) -#pragma intrinsic(__emulu) -#define mul_32x32_64(a, b) __emulu(a, b) + #if _MSC_FULL_VER < 190024234 && defined(_M_IX86) + #pragma message( \ + "For AES-NI at least \"Microsoft C/C++ Compiler\" version 19.00.24234 (Visual Studio 2015 Update 3) is required.") + #endif + #if _MSC_FULL_VER < 191526730 + #pragma message( \ + "It is recommended to use \"Microsoft C/C++ Compiler\" version 19.15.26730 (Visual Studio 2017 15.8) or newer.") + #endif + #if _MSC_FULL_VER < 180040629 + #error At least "Microsoft C/C++ Compiler" version 18.00.40629 (Visual Studio 2013 Update 5) is required. + #endif + + #pragma warning(push, 1) + + #include + #include + #define likely(cond) (cond) + #define unlikely(cond) (cond) + #define unreachable() __assume(0) + #define bswap64(v) _byteswap_uint64(v) + #define bswap32(v) _byteswap_ulong(v) + #define bswap16(v) _byteswap_ushort(v) + #define rot64(v, s) _rotr64(v, s) + #define rot32(v, s) _rotr(v, s) + #define __always_inline __forceinline + + #if defined(_M_X64) || defined(_M_IA64) + #pragma intrinsic(_umul128) + #define mul_64x64_128(a, b, ph) _umul128(a, b, ph) + #pragma intrinsic(_addcarry_u64) + #define add64carry_first(base, addend, sum) \ + _addcarry_u64(0, base, addend, sum) + #define add64carry_next(carry, base, addend, sum) \ + _addcarry_u64(carry, base, addend, sum) + #define add64carry_last(carry, base, addend, sum) \ + (void)_addcarry_u64(carry, base, addend, sum) + #endif + + #if defined(_M_ARM64) || defined(_M_X64) || defined(_M_IA64) + #pragma intrinsic(__umulh) + #define mul_64x64_high(a, b) __umulh(a, b) + #endif + + #if defined(_M_IX86) + #pragma intrinsic(__emulu) + #define mul_32x32_64(a, b) __emulu(a, b) + + #if _MSC_VER >= 1915 /* LY: workaround for SSA-optimizer bug */ + #pragma intrinsic(_addcarry_u32) + #define add32carry_first(base, addend, sum) \ + _addcarry_u32(0, base, addend, sum) + #define add32carry_next(carry, base, addend, sum) \ + _addcarry_u32(carry, base, addend, sum) + #define add32carry_last(carry, base, addend, sum) \ + (void)_addcarry_u32(carry, base, addend, sum) + +static __forceinline char msvc32_add64carry_first(uint64_t base, + uint64_t addend, + uint64_t *sum) { -#if _MSC_VER >= 1915 /* LY: workaround for SSA-optimizer bug */ -#pragma intrinsic(_addcarry_u32) -#define add32carry_first(base, addend, sum) _addcarry_u32(0, base, addend, sum) -#define add32carry_next(carry, base, addend, sum) \ - _addcarry_u32(carry, base, addend, sum) -#define add32carry_last(carry, base, addend, sum) \ - (void)_addcarry_u32(carry, base, addend, sum) - -static __forceinline char -msvc32_add64carry_first(uint64_t base, uint64_t addend, uint64_t *sum) { uint32_t *const sum32 = (uint32_t *)sum; - const uint32_t base_32l = (uint32_t)base; - const uint32_t base_32h = (uint32_t)(base >> 32); - const uint32_t addend_32l = (uint32_t)addend; - const uint32_t addend_32h = (uint32_t)(addend >> 32); + const uint32_t base_32l = (uint32_t)base; + const uint32_t base_32h = (uint32_t)(base >> 32); + const uint32_t addend_32l = (uint32_t)addend; + const uint32_t addend_32h = (uint32_t)(addend >> 32); return add32carry_next(add32carry_first(base_32l, addend_32l, sum32), base_32h, addend_32h, sum32 + 1); + } -#define add64carry_first(base, addend, sum) \ - msvc32_add64carry_first(base, addend, sum) +\ + #define add64carry_first(base, addend, sum) \ + msvc32_add64carry_first(base, addend, sum) static __forceinline char msvc32_add64carry_next(char carry, uint64_t base, - uint64_t addend, + uint64_t addend, uint64_t *sum) { + uint32_t *const sum32 = (uint32_t *)sum; - const uint32_t base_32l = (uint32_t)base; - const uint32_t base_32h = (uint32_t)(base >> 32); - const uint32_t addend_32l = (uint32_t)addend; - const uint32_t addend_32h = (uint32_t)(addend >> 32); + const uint32_t base_32l = (uint32_t)base; + const uint32_t base_32h = (uint32_t)(base >> 32); + const uint32_t addend_32l = (uint32_t)addend; + const uint32_t addend_32h = (uint32_t)(addend >> 32); return add32carry_next(add32carry_next(carry, base_32l, addend_32l, sum32), base_32h, addend_32h, sum32 + 1); + } -#define add64carry_next(carry, base, addend, sum) \ - msvc32_add64carry_next(carry, base, addend, sum) +\ + #define add64carry_next(carry, base, addend, sum) \ + msvc32_add64carry_next(carry, base, addend, sum) static __forceinline void msvc32_add64carry_last(char carry, uint64_t base, - uint64_t addend, + uint64_t addend, uint64_t *sum) { + uint32_t *const sum32 = (uint32_t *)sum; - const uint32_t base_32l = (uint32_t)base; - const uint32_t base_32h = (uint32_t)(base >> 32); - const uint32_t addend_32l = (uint32_t)addend; - const uint32_t addend_32h = (uint32_t)(addend >> 32); + const uint32_t base_32l = (uint32_t)base; + const uint32_t base_32h = (uint32_t)(base >> 32); + const uint32_t addend_32l = (uint32_t)addend; + const uint32_t addend_32h = (uint32_t)(addend >> 32); add32carry_last(add32carry_next(carry, base_32l, addend_32l, sum32), base_32h, addend_32h, sum32 + 1); -} -#define add64carry_last(carry, base, addend, sum) \ - msvc32_add64carry_last(carry, base, addend, sum) -#endif /* _MSC_FULL_VER >= 190024231 */ - -#elif defined(_M_ARM) -#define mul_32x32_64(a, b) _arm_umull(a, b) -#endif -#pragma warning(pop) -#pragma warning(disable : 4514) /* 'xyz': unreferenced inline function \ - has been removed */ -#pragma warning(disable : 4710) /* 'xyz': function not inlined */ -#pragma warning(disable : 4711) /* function 'xyz' selected for \ - automatic inline expansion */ -#pragma warning(disable : 4127) /* conditional expression is constant */ -#pragma warning(disable : 4702) /* unreachable code */ -#endif /* Compiler */ +} +\ + #define add64carry_last(carry, base, addend, sum) \ + msvc32_add64carry_last(carry, base, addend, sum) + #endif /* _MSC_FULL_VER >= 190024231 */ + + #elif defined(_M_ARM) + #define mul_32x32_64(a, b) _arm_umull(a, b) + #endif + + #pragma warning(pop) + #pragma warning(disable : 4514) /* 'xyz': unreferenced inline function \ + has been removed */ + #pragma warning(disable : 4710) /* 'xyz': function not inlined */ + #pragma warning(disable : 4711) /* function 'xyz' selected for \ + automatic inline expansion */ + #pragma warning(disable : 4127) /* conditional expression is constant */ + #pragma warning(disable : 4702) /* unreachable code */ +#endif /* Compiler */ #ifndef likely -#define likely(cond) (cond) + #define likely(cond) (cond) #endif #ifndef unlikely -#define unlikely(cond) (cond) + #define unlikely(cond) (cond) #endif #ifndef __maybe_unused -#define __maybe_unused + #define __maybe_unused #endif #ifndef __always_inline -#define __always_inline __inline + #define __always_inline __inline #endif #ifndef unreachable -#define unreachable() \ - do { \ - } while (1) + #define unreachable() \ + do { \ + \ + } while (1) #endif #ifndef bswap64 -#if defined(bswap_64) -#define bswap64 bswap_64 -#elif defined(__bswap_64) -#define bswap64 __bswap_64 -#else + #if defined(bswap_64) + #define bswap64 bswap_64 + #elif defined(__bswap_64) + #define bswap64 __bswap_64 + #else static __always_inline uint64_t bswap64(uint64_t v) { + return v << 56 | v >> 56 | ((v << 40) & UINT64_C(0x00ff000000000000)) | ((v << 24) & UINT64_C(0x0000ff0000000000)) | ((v << 8) & UINT64_C(0x000000ff00000000)) | ((v >> 8) & UINT64_C(0x00000000ff000000)) | ((v >> 24) & UINT64_C(0x0000000000ff0000)) | ((v >> 40) & UINT64_C(0x000000000000ff00)); + } -#endif -#endif /* bswap64 */ + + #endif +#endif /* bswap64 */ #ifndef bswap32 -#if defined(bswap_32) -#define bswap32 bswap_32 -#elif defined(__bswap_32) -#define bswap32 __bswap_32 -#else + #if defined(bswap_32) + #define bswap32 bswap_32 + #elif defined(__bswap_32) + #define bswap32 __bswap_32 + #else static __always_inline uint32_t bswap32(uint32_t v) { + return v << 24 | v >> 24 | ((v << 8) & UINT32_C(0x00ff0000)) | ((v >> 8) & UINT32_C(0x0000ff00)); + } -#endif -#endif /* bswap32 */ + + #endif +#endif /* bswap32 */ #ifndef bswap16 -#if defined(bswap_16) -#define bswap16 bswap_16 -#elif defined(__bswap_16) -#define bswap16 __bswap_16 -#else -static __always_inline uint16_t bswap16(uint16_t v) { return v << 8 | v >> 8; } -#endif -#endif /* bswap16 */ + #if defined(bswap_16) + #define bswap16 bswap_16 + #elif defined(__bswap_16) + #define bswap16 __bswap_16 + #else +static __always_inline uint16_t bswap16(uint16_t v) { + + return v << 8 | v >> 8; + +} + + #endif +#endif /* bswap16 */ -#if defined(__ia32__) || \ +#if defined(__ia32__) || \ T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT -/* The __builtin_assume_aligned() leads gcc/clang to load values into the - * registers, even when it is possible to directly use an operand from memory. - * This can lead to a shortage of registers and a significant slowdown. - * Therefore avoid unnecessary use of __builtin_assume_aligned() for x86. */ -#define read_unaligned(ptr, bits) (*(const uint##bits##_t *__restrict)(ptr)) -#define read_aligned(ptr, bits) (*(const uint##bits##_t *__restrict)(ptr)) -#endif /* __ia32__ */ + /* The __builtin_assume_aligned() leads gcc/clang to load values into the + * registers, even when it is possible to directly use an operand from memory. + * This can lead to a shortage of registers and a significant slowdown. + * Therefore avoid unnecessary use of __builtin_assume_aligned() for x86. */ + #define read_unaligned(ptr, bits) (*(const uint##bits##_t *__restrict)(ptr)) + #define read_aligned(ptr, bits) (*(const uint##bits##_t *__restrict)(ptr)) +#endif /* __ia32__ */ #ifndef read_unaligned -#if defined(__GNUC__) || __has_attribute(__packed__) + #if defined(__GNUC__) || __has_attribute(__packed__) typedef struct { - uint8_t unaligned_8; + + uint8_t unaligned_8; uint16_t unaligned_16; uint32_t unaligned_32; uint64_t unaligned_64; + } __attribute__((__packed__)) t1ha_unaligned_proxy; -#define read_unaligned(ptr, bits) \ - (((const t1ha_unaligned_proxy *)((const uint8_t *)(ptr)-offsetof( \ - t1ha_unaligned_proxy, unaligned_##bits))) \ - ->unaligned_##bits) -#elif defined(_MSC_VER) -#pragma warning( \ - disable : 4235) /* nonstandard extension used: '__unaligned' \ - * keyword not supported on this architecture */ -#define read_unaligned(ptr, bits) (*(const __unaligned uint##bits##_t *)(ptr)) -#else -#pragma pack(push, 1) +\ + #define read_unaligned(ptr, bits) \ + (((const t1ha_unaligned_proxy *)((const uint8_t *)(ptr)-offsetof( \ + t1ha_unaligned_proxy, unaligned_##bits))) \ + ->unaligned_##bits) + #elif defined(_MSC_VER) + #pragma warning( \ + disable : 4235) /* nonstandard extension used: '__unaligned' \ + * keyword not supported on this architecture */ + #define read_unaligned(ptr, bits) \ + (*(const __unaligned uint##bits##_t *)(ptr)) + #else + #pragma pack(push, 1) typedef struct { - uint8_t unaligned_8; + + uint8_t unaligned_8; uint16_t unaligned_16; uint32_t unaligned_32; uint64_t unaligned_64; + } t1ha_unaligned_proxy; -#pragma pack(pop) -#define read_unaligned(ptr, bits) \ - (((const t1ha_unaligned_proxy *)((const uint8_t *)(ptr)-offsetof( \ - t1ha_unaligned_proxy, unaligned_##bits))) \ - ->unaligned_##bits) -#endif -#endif /* read_unaligned */ + + #pragma pack(pop) + #define read_unaligned(ptr, bits) \ + (((const t1ha_unaligned_proxy *)((const uint8_t *)(ptr)-offsetof( \ + t1ha_unaligned_proxy, unaligned_##bits))) \ + ->unaligned_##bits) + #endif +#endif /* read_unaligned */ #ifndef read_aligned -#if __GNUC_PREREQ(4, 8) || __has_builtin(__builtin_assume_aligned) -#define read_aligned(ptr, bits) \ - (*(const uint##bits##_t *)__builtin_assume_aligned(ptr, ALIGNMENT_##bits)) -#elif (__GNUC_PREREQ(3, 3) || __has_attribute(__aligned__)) && \ - !defined(__clang__) -#define read_aligned(ptr, bits) \ - (*(const uint##bits##_t \ - __attribute__((__aligned__(ALIGNMENT_##bits))) *)(ptr)) -#elif __has_attribute(__assume_aligned__) - -static __always_inline const - uint16_t *__attribute__((__assume_aligned__(ALIGNMENT_16))) - cast_aligned_16(const void *ptr) { + #if __GNUC_PREREQ(4, 8) || __has_builtin(__builtin_assume_aligned) + #define read_aligned(ptr, bits) \ + (*(const uint##bits##_t *)__builtin_assume_aligned(ptr, ALIGNMENT_##bits)) + #elif (__GNUC_PREREQ(3, 3) || __has_attribute(__aligned__)) && \ + !defined(__clang__) + #define read_aligned(ptr, bits) \ + (*(const uint##bits##_t \ + __attribute__((__aligned__(ALIGNMENT_##bits))) *)(ptr)) + #elif __has_attribute(__assume_aligned__) + +static __always_inline const uint16_t *__attribute__(( + __assume_aligned__(ALIGNMENT_16))) cast_aligned_16(const void *ptr) { + return (const uint16_t *)ptr; + } -static __always_inline const - uint32_t *__attribute__((__assume_aligned__(ALIGNMENT_32))) - cast_aligned_32(const void *ptr) { + +static __always_inline const uint32_t *__attribute__(( + __assume_aligned__(ALIGNMENT_32))) cast_aligned_32(const void *ptr) { + return (const uint32_t *)ptr; + } -static __always_inline const - uint64_t *__attribute__((__assume_aligned__(ALIGNMENT_64))) - cast_aligned_64(const void *ptr) { + +static __always_inline const uint64_t *__attribute__(( + __assume_aligned__(ALIGNMENT_64))) cast_aligned_64(const void *ptr) { + return (const uint64_t *)ptr; + } -#define read_aligned(ptr, bits) (*cast_aligned_##bits(ptr)) + #define read_aligned(ptr, bits) (*cast_aligned_##bits(ptr)) -#elif defined(_MSC_VER) -#define read_aligned(ptr, bits) \ - (*(const __declspec(align(ALIGNMENT_##bits)) uint##bits##_t *)(ptr)) -#else -#define read_aligned(ptr, bits) (*(const uint##bits##_t *)(ptr)) -#endif -#endif /* read_aligned */ + #elif defined(_MSC_VER) + #define read_aligned(ptr, bits) \ + (*(const __declspec(align(ALIGNMENT_##bits)) uint##bits##_t *)(ptr)) + #else + #define read_aligned(ptr, bits) (*(const uint##bits##_t *)(ptr)) + #endif +#endif /* read_aligned */ #ifndef prefetch -#if (__GNUC_PREREQ(4, 0) || __has_builtin(__builtin_prefetch)) && \ - !defined(__ia32__) -#define prefetch(ptr) __builtin_prefetch(ptr) -#elif defined(_M_ARM64) || defined(_M_ARM) -#define prefetch(ptr) __prefetch(ptr) -#else -#define prefetch(ptr) \ - do { \ - (void)(ptr); \ - } while (0) -#endif -#endif /* prefetch */ + #if (__GNUC_PREREQ(4, 0) || __has_builtin(__builtin_prefetch)) && \ + !defined(__ia32__) + #define prefetch(ptr) __builtin_prefetch(ptr) + #elif defined(_M_ARM64) || defined(_M_ARM) + #define prefetch(ptr) __prefetch(ptr) + #else + #define prefetch(ptr) \ + do { \ + \ + (void)(ptr); \ + \ + } while (0) + #endif +#endif /* prefetch */ #if __has_warning("-Wconstant-logical-operand") -#if defined(__clang__) -#pragma clang diagnostic ignored "-Wconstant-logical-operand" -#elif defined(__GNUC__) -#pragma GCC diagnostic ignored "-Wconstant-logical-operand" -#else -#pragma warning disable "constant-logical-operand" -#endif -#endif /* -Wconstant-logical-operand */ + #if defined(__clang__) + #pragma clang diagnostic ignored "-Wconstant-logical-operand" + #elif defined(__GNUC__) + #pragma GCC diagnostic ignored "-Wconstant-logical-operand" + #else + #pragma warning disable "constant-logical-operand" + #endif +#endif /* -Wconstant-logical-operand */ #if __has_warning("-Wtautological-pointer-compare") -#if defined(__clang__) -#pragma clang diagnostic ignored "-Wtautological-pointer-compare" -#elif defined(__GNUC__) -#pragma GCC diagnostic ignored "-Wtautological-pointer-compare" -#else -#pragma warning disable "tautological-pointer-compare" -#endif -#endif /* -Wtautological-pointer-compare */ + #if defined(__clang__) + #pragma clang diagnostic ignored "-Wtautological-pointer-compare" + #elif defined(__GNUC__) + #pragma GCC diagnostic ignored "-Wtautological-pointer-compare" + #else + #pragma warning disable "tautological-pointer-compare" + #endif +#endif /* -Wtautological-pointer-compare */ /***************************************************************************/ #if __GNUC_PREREQ(4, 0) -#pragma GCC visibility push(hidden) -#endif /* __GNUC_PREREQ(4,0) */ + #pragma GCC visibility push(hidden) +#endif /* __GNUC_PREREQ(4,0) */ /*---------------------------------------------------------- Little Endian */ #ifndef fetch16_le_aligned static __maybe_unused __always_inline uint16_t fetch16_le_aligned(const void *v) { + assert(((uintptr_t)v) % ALIGNMENT_16 == 0); -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ return read_aligned(v, 16); -#else + #else return bswap16(read_aligned(v, 16)); -#endif + #endif + } -#endif /* fetch16_le_aligned */ + +#endif /* fetch16_le_aligned */ #ifndef fetch16_le_unaligned static __maybe_unused __always_inline uint16_t fetch16_le_unaligned(const void *v) { -#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE + + #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE const uint8_t *p = (const uint8_t *)v; return p[0] | (uint16_t)p[1] << 8; -#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + #elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ return read_unaligned(v, 16); -#else + #else return bswap16(read_unaligned(v, 16)); -#endif + #endif + } -#endif /* fetch16_le_unaligned */ + +#endif /* fetch16_le_unaligned */ #ifndef fetch32_le_aligned static __maybe_unused __always_inline uint32_t fetch32_le_aligned(const void *v) { + assert(((uintptr_t)v) % ALIGNMENT_32 == 0); -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ return read_aligned(v, 32); -#else + #else return bswap32(read_aligned(v, 32)); -#endif + #endif + } -#endif /* fetch32_le_aligned */ + +#endif /* fetch32_le_aligned */ #ifndef fetch32_le_unaligned static __maybe_unused __always_inline uint32_t fetch32_le_unaligned(const void *v) { -#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE + + #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE return fetch16_le_unaligned(v) | (uint32_t)fetch16_le_unaligned((const uint8_t *)v + 2) << 16; -#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + #elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ return read_unaligned(v, 32); -#else + #else return bswap32(read_unaligned(v, 32)); -#endif + #endif + } -#endif /* fetch32_le_unaligned */ + +#endif /* fetch32_le_unaligned */ #ifndef fetch64_le_aligned static __maybe_unused __always_inline uint64_t fetch64_le_aligned(const void *v) { + assert(((uintptr_t)v) % ALIGNMENT_64 == 0); -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ return read_aligned(v, 64); -#else + #else return bswap64(read_aligned(v, 64)); -#endif + #endif + } -#endif /* fetch64_le_aligned */ + +#endif /* fetch64_le_aligned */ #ifndef fetch64_le_unaligned static __maybe_unused __always_inline uint64_t fetch64_le_unaligned(const void *v) { -#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE + + #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE return fetch32_le_unaligned(v) | (uint64_t)fetch32_le_unaligned((const uint8_t *)v + 4) << 32; -#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + #elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ return read_unaligned(v, 64); -#else + #else return bswap64(read_unaligned(v, 64)); -#endif + #endif + } -#endif /* fetch64_le_unaligned */ + +#endif /* fetch64_le_unaligned */ static __maybe_unused __always_inline uint64_t tail64_le_aligned(const void *v, size_t tail) { + const uint8_t *const p = (const uint8_t *)v; #if T1HA_USE_FAST_ONESHOT_READ && !defined(__SANITIZE_ADDRESS__) /* We can perform a 'oneshot' read, which is little bit faster. */ @@ -611,79 +677,84 @@ static __maybe_unused __always_inline uint64_t tail64_le_aligned(const void *v, #else uint64_t r = 0; switch (tail & 7) { - default: - unreachable(); -/* fall through */ -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - /* For most CPUs this code is better when not needed byte reordering. */ - case 0: - return fetch64_le_aligned(p); - case 7: - r = (uint64_t)p[6] << 8; - /* fall through */ - case 6: - r += p[5]; - r <<= 8; - /* fall through */ - case 5: - r += p[4]; - r <<= 32; - /* fall through */ - case 4: - return r + fetch32_le_aligned(p); - case 3: - r = (uint64_t)p[2] << 16; - /* fall through */ - case 2: - return r + fetch16_le_aligned(p); - case 1: - return p[0]; -#else - case 0: - r = p[7] << 8; - /* fall through */ - case 7: - r += p[6]; - r <<= 8; - /* fall through */ - case 6: - r += p[5]; - r <<= 8; - /* fall through */ - case 5: - r += p[4]; - r <<= 8; - /* fall through */ - case 4: - r += p[3]; - r <<= 8; - /* fall through */ - case 3: - r += p[2]; - r <<= 8; - /* fall through */ - case 2: - r += p[1]; - r <<= 8; + + default: + unreachable(); /* fall through */ - case 1: - return r + p[0]; -#endif + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + /* For most CPUs this code is better when not needed byte reordering. */ + case 0: + return fetch64_le_aligned(p); + case 7: + r = (uint64_t)p[6] << 8; + /* fall through */ + case 6: + r += p[5]; + r <<= 8; + /* fall through */ + case 5: + r += p[4]; + r <<= 32; + /* fall through */ + case 4: + return r + fetch32_le_aligned(p); + case 3: + r = (uint64_t)p[2] << 16; + /* fall through */ + case 2: + return r + fetch16_le_aligned(p); + case 1: + return p[0]; + #else + case 0: + r = p[7] << 8; + /* fall through */ + case 7: + r += p[6]; + r <<= 8; + /* fall through */ + case 6: + r += p[5]; + r <<= 8; + /* fall through */ + case 5: + r += p[4]; + r <<= 8; + /* fall through */ + case 4: + r += p[3]; + r <<= 8; + /* fall through */ + case 3: + r += p[2]; + r <<= 8; + /* fall through */ + case 2: + r += p[1]; + r <<= 8; + /* fall through */ + case 1: + return r + p[0]; + #endif + } -#endif /* T1HA_USE_FAST_ONESHOT_READ */ + +#endif /* T1HA_USE_FAST_ONESHOT_READ */ + } -#if T1HA_USE_FAST_ONESHOT_READ && \ - T1HA_SYS_UNALIGNED_ACCESS != T1HA_UNALIGNED_ACCESS__UNABLE && \ +#if T1HA_USE_FAST_ONESHOT_READ && \ + T1HA_SYS_UNALIGNED_ACCESS != T1HA_UNALIGNED_ACCESS__UNABLE && \ defined(PAGESIZE) && PAGESIZE > 42 && !defined(__SANITIZE_ADDRESS__) -#define can_read_underside(ptr, size) \ - (((PAGESIZE - (size)) & (uintptr_t)(ptr)) != 0) -#endif /* T1HA_USE_FAST_ONESHOT_READ */ + #define can_read_underside(ptr, size) \ + (((PAGESIZE - (size)) & (uintptr_t)(ptr)) != 0) +#endif /* T1HA_USE_FAST_ONESHOT_READ */ static __maybe_unused __always_inline uint64_t tail64_le_unaligned(const void *v, size_t tail) { + const uint8_t *p = (const uint8_t *)v; -#if defined(can_read_underside) && \ +#if defined(can_read_underside) && \ (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) /* On some systems (e.g. x86_64) we can perform a 'oneshot' read, which * is little bit faster. Thanks Marcin Żukowski @@ -691,77 +762,84 @@ tail64_le_unaligned(const void *v, size_t tail) { const unsigned offset = (8 - tail) & 7; const unsigned shift = offset << 3; if (likely(can_read_underside(p, 8))) { + p -= offset; return fetch64_le_unaligned(p) >> shift; + } + return fetch64_le_unaligned(p) & ((~UINT64_C(0)) >> shift); #else uint64_t r = 0; switch (tail & 7) { - default: - unreachable(); -/* fall through */ -#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT && \ - __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - /* For most CPUs this code is better when not needed - * copying for alignment or byte reordering. */ - case 0: - return fetch64_le_unaligned(p); - case 7: - r = (uint64_t)p[6] << 8; - /* fall through */ - case 6: - r += p[5]; - r <<= 8; - /* fall through */ - case 5: - r += p[4]; - r <<= 32; - /* fall through */ - case 4: - return r + fetch32_le_unaligned(p); - case 3: - r = (uint64_t)p[2] << 16; - /* fall through */ - case 2: - return r + fetch16_le_unaligned(p); - case 1: - return p[0]; -#else - /* For most CPUs this code is better than a - * copying for alignment and/or byte reordering. */ - case 0: - r = p[7] << 8; - /* fall through */ - case 7: - r += p[6]; - r <<= 8; - /* fall through */ - case 6: - r += p[5]; - r <<= 8; - /* fall through */ - case 5: - r += p[4]; - r <<= 8; - /* fall through */ - case 4: - r += p[3]; - r <<= 8; - /* fall through */ - case 3: - r += p[2]; - r <<= 8; - /* fall through */ - case 2: - r += p[1]; - r <<= 8; + + default: + unreachable(); /* fall through */ - case 1: - return r + p[0]; -#endif + #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT && \ + __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + /* For most CPUs this code is better when not needed + * copying for alignment or byte reordering. */ + case 0: + return fetch64_le_unaligned(p); + case 7: + r = (uint64_t)p[6] << 8; + /* fall through */ + case 6: + r += p[5]; + r <<= 8; + /* fall through */ + case 5: + r += p[4]; + r <<= 32; + /* fall through */ + case 4: + return r + fetch32_le_unaligned(p); + case 3: + r = (uint64_t)p[2] << 16; + /* fall through */ + case 2: + return r + fetch16_le_unaligned(p); + case 1: + return p[0]; + #else + /* For most CPUs this code is better than a + * copying for alignment and/or byte reordering. */ + case 0: + r = p[7] << 8; + /* fall through */ + case 7: + r += p[6]; + r <<= 8; + /* fall through */ + case 6: + r += p[5]; + r <<= 8; + /* fall through */ + case 5: + r += p[4]; + r <<= 8; + /* fall through */ + case 4: + r += p[3]; + r <<= 8; + /* fall through */ + case 3: + r += p[2]; + r <<= 8; + /* fall through */ + case 2: + r += p[1]; + r <<= 8; + /* fall through */ + case 1: + return r + p[0]; + #endif + } -#endif /* can_read_underside */ + +#endif /* can_read_underside */ + } /*------------------------------------------------------------- Big Endian */ @@ -769,83 +847,102 @@ tail64_le_unaligned(const void *v, size_t tail) { #ifndef fetch16_be_aligned static __maybe_unused __always_inline uint16_t fetch16_be_aligned(const void *v) { + assert(((uintptr_t)v) % ALIGNMENT_16 == 0); -#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ return read_aligned(v, 16); -#else + #else return bswap16(read_aligned(v, 16)); -#endif + #endif + } -#endif /* fetch16_be_aligned */ + +#endif /* fetch16_be_aligned */ #ifndef fetch16_be_unaligned static __maybe_unused __always_inline uint16_t fetch16_be_unaligned(const void *v) { -#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE + + #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE const uint8_t *p = (const uint8_t *)v; return (uint16_t)p[0] << 8 | p[1]; -#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ return read_unaligned(v, 16); -#else + #else return bswap16(read_unaligned(v, 16)); -#endif + #endif + } -#endif /* fetch16_be_unaligned */ + +#endif /* fetch16_be_unaligned */ #ifndef fetch32_be_aligned static __maybe_unused __always_inline uint32_t fetch32_be_aligned(const void *v) { + assert(((uintptr_t)v) % ALIGNMENT_32 == 0); -#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ return read_aligned(v, 32); -#else + #else return bswap32(read_aligned(v, 32)); -#endif + #endif + } -#endif /* fetch32_be_aligned */ + +#endif /* fetch32_be_aligned */ #ifndef fetch32_be_unaligned static __maybe_unused __always_inline uint32_t fetch32_be_unaligned(const void *v) { -#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE + + #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE return (uint32_t)fetch16_be_unaligned(v) << 16 | fetch16_be_unaligned((const uint8_t *)v + 2); -#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ return read_unaligned(v, 32); -#else + #else return bswap32(read_unaligned(v, 32)); -#endif + #endif + } -#endif /* fetch32_be_unaligned */ + +#endif /* fetch32_be_unaligned */ #ifndef fetch64_be_aligned static __maybe_unused __always_inline uint64_t fetch64_be_aligned(const void *v) { + assert(((uintptr_t)v) % ALIGNMENT_64 == 0); -#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ return read_aligned(v, 64); -#else + #else return bswap64(read_aligned(v, 64)); -#endif + #endif + } -#endif /* fetch64_be_aligned */ + +#endif /* fetch64_be_aligned */ #ifndef fetch64_be_unaligned static __maybe_unused __always_inline uint64_t fetch64_be_unaligned(const void *v) { -#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE + + #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE return (uint64_t)fetch32_be_unaligned(v) << 32 | fetch32_be_unaligned((const uint8_t *)v + 4); -#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ return read_unaligned(v, 64); -#else + #else return bswap64(read_unaligned(v, 64)); -#endif + #endif + } -#endif /* fetch64_be_unaligned */ + +#endif /* fetch64_be_unaligned */ static __maybe_unused __always_inline uint64_t tail64_be_aligned(const void *v, size_t tail) { + const uint8_t *const p = (const uint8_t *)v; #if T1HA_USE_FAST_ONESHOT_READ && !defined(__SANITIZE_ADDRESS__) /* We can perform a 'oneshot' read, which is little bit faster. */ @@ -853,61 +950,66 @@ static __maybe_unused __always_inline uint64_t tail64_be_aligned(const void *v, return fetch64_be_aligned(p) >> shift; #else switch (tail & 7) { - default: - unreachable(); -/* fall through */ -#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ - /* For most CPUs this code is better when not byte reordering. */ - case 1: - return p[0]; - case 2: - return fetch16_be_aligned(p); - case 3: - return (uint32_t)fetch16_be_aligned(p) << 8 | p[2]; - case 4: - return fetch32_be_aligned(p); - case 5: - return (uint64_t)fetch32_be_aligned(p) << 8 | p[4]; - case 6: - return (uint64_t)fetch32_be_aligned(p) << 16 | fetch16_be_aligned(p + 4); - case 7: - return (uint64_t)fetch32_be_aligned(p) << 24 | - (uint32_t)fetch16_be_aligned(p + 4) << 8 | p[6]; - case 0: - return fetch64_be_aligned(p); -#else - case 1: - return p[0]; - case 2: - return p[1] | (uint32_t)p[0] << 8; - case 3: - return p[2] | (uint32_t)p[1] << 8 | (uint32_t)p[0] << 16; - case 4: - return p[3] | (uint32_t)p[2] << 8 | (uint32_t)p[1] << 16 | - (uint32_t)p[0] << 24; - case 5: - return p[4] | (uint32_t)p[3] << 8 | (uint32_t)p[2] << 16 | - (uint32_t)p[1] << 24 | (uint64_t)p[0] << 32; - case 6: - return p[5] | (uint32_t)p[4] << 8 | (uint32_t)p[3] << 16 | - (uint32_t)p[2] << 24 | (uint64_t)p[1] << 32 | (uint64_t)p[0] << 40; - case 7: - return p[6] | (uint32_t)p[5] << 8 | (uint32_t)p[4] << 16 | - (uint32_t)p[3] << 24 | (uint64_t)p[2] << 32 | (uint64_t)p[1] << 40 | - (uint64_t)p[0] << 48; - case 0: - return p[7] | (uint32_t)p[6] << 8 | (uint32_t)p[5] << 16 | - (uint32_t)p[4] << 24 | (uint64_t)p[3] << 32 | (uint64_t)p[2] << 40 | - (uint64_t)p[1] << 48 | (uint64_t)p[0] << 56; -#endif + + default: + unreachable(); + /* fall through */ + #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + /* For most CPUs this code is better when not byte reordering. */ + case 1: + return p[0]; + case 2: + return fetch16_be_aligned(p); + case 3: + return (uint32_t)fetch16_be_aligned(p) << 8 | p[2]; + case 4: + return fetch32_be_aligned(p); + case 5: + return (uint64_t)fetch32_be_aligned(p) << 8 | p[4]; + case 6: + return (uint64_t)fetch32_be_aligned(p) << 16 | fetch16_be_aligned(p + 4); + case 7: + return (uint64_t)fetch32_be_aligned(p) << 24 | + (uint32_t)fetch16_be_aligned(p + 4) << 8 | p[6]; + case 0: + return fetch64_be_aligned(p); + #else + case 1: + return p[0]; + case 2: + return p[1] | (uint32_t)p[0] << 8; + case 3: + return p[2] | (uint32_t)p[1] << 8 | (uint32_t)p[0] << 16; + case 4: + return p[3] | (uint32_t)p[2] << 8 | (uint32_t)p[1] << 16 | + (uint32_t)p[0] << 24; + case 5: + return p[4] | (uint32_t)p[3] << 8 | (uint32_t)p[2] << 16 | + (uint32_t)p[1] << 24 | (uint64_t)p[0] << 32; + case 6: + return p[5] | (uint32_t)p[4] << 8 | (uint32_t)p[3] << 16 | + (uint32_t)p[2] << 24 | (uint64_t)p[1] << 32 | (uint64_t)p[0] << 40; + case 7: + return p[6] | (uint32_t)p[5] << 8 | (uint32_t)p[4] << 16 | + (uint32_t)p[3] << 24 | (uint64_t)p[2] << 32 | + (uint64_t)p[1] << 40 | (uint64_t)p[0] << 48; + case 0: + return p[7] | (uint32_t)p[6] << 8 | (uint32_t)p[5] << 16 | + (uint32_t)p[4] << 24 | (uint64_t)p[3] << 32 | + (uint64_t)p[2] << 40 | (uint64_t)p[1] << 48 | (uint64_t)p[0] << 56; + #endif + } -#endif /* T1HA_USE_FAST_ONESHOT_READ */ + +#endif /* T1HA_USE_FAST_ONESHOT_READ */ + } static __maybe_unused __always_inline uint64_t tail64_be_unaligned(const void *v, size_t tail) { + const uint8_t *p = (const uint8_t *)v; -#if defined(can_read_underside) && \ +#if defined(can_read_underside) && \ (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) /* On some systems (e.g. x86_64) we can perform a 'oneshot' read, which * is little bit faster. Thanks Marcin Żukowski @@ -915,139 +1017,167 @@ tail64_be_unaligned(const void *v, size_t tail) { const unsigned offset = (8 - tail) & 7; const unsigned shift = offset << 3; if (likely(can_read_underside(p, 8))) { + p -= offset; return fetch64_be_unaligned(p) & ((~UINT64_C(0)) >> shift); + } + return fetch64_be_unaligned(p) >> shift; #else switch (tail & 7) { - default: - unreachable(); -/* fall through */ -#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT && \ - __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ - /* For most CPUs this code is better when not needed - * copying for alignment or byte reordering. */ - case 1: - return p[0]; - case 2: - return fetch16_be_unaligned(p); - case 3: - return (uint32_t)fetch16_be_unaligned(p) << 8 | p[2]; - case 4: - return fetch32_be(p); - case 5: - return (uint64_t)fetch32_be_unaligned(p) << 8 | p[4]; - case 6: - return (uint64_t)fetch32_be_unaligned(p) << 16 | - fetch16_be_unaligned(p + 4); - case 7: - return (uint64_t)fetch32_be_unaligned(p) << 24 | - (uint32_t)fetch16_be_unaligned(p + 4) << 8 | p[6]; - case 0: - return fetch64_be_unaligned(p); -#else - /* For most CPUs this code is better than a - * copying for alignment and/or byte reordering. */ - case 1: - return p[0]; - case 2: - return p[1] | (uint32_t)p[0] << 8; - case 3: - return p[2] | (uint32_t)p[1] << 8 | (uint32_t)p[0] << 16; - case 4: - return p[3] | (uint32_t)p[2] << 8 | (uint32_t)p[1] << 16 | - (uint32_t)p[0] << 24; - case 5: - return p[4] | (uint32_t)p[3] << 8 | (uint32_t)p[2] << 16 | - (uint32_t)p[1] << 24 | (uint64_t)p[0] << 32; - case 6: - return p[5] | (uint32_t)p[4] << 8 | (uint32_t)p[3] << 16 | - (uint32_t)p[2] << 24 | (uint64_t)p[1] << 32 | (uint64_t)p[0] << 40; - case 7: - return p[6] | (uint32_t)p[5] << 8 | (uint32_t)p[4] << 16 | - (uint32_t)p[3] << 24 | (uint64_t)p[2] << 32 | (uint64_t)p[1] << 40 | - (uint64_t)p[0] << 48; - case 0: - return p[7] | (uint32_t)p[6] << 8 | (uint32_t)p[5] << 16 | - (uint32_t)p[4] << 24 | (uint64_t)p[3] << 32 | (uint64_t)p[2] << 40 | - (uint64_t)p[1] << 48 | (uint64_t)p[0] << 56; -#endif + + default: + unreachable(); + /* fall through */ + #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT && \ + __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + /* For most CPUs this code is better when not needed + * copying for alignment or byte reordering. */ + case 1: + return p[0]; + case 2: + return fetch16_be_unaligned(p); + case 3: + return (uint32_t)fetch16_be_unaligned(p) << 8 | p[2]; + case 4: + return fetch32_be(p); + case 5: + return (uint64_t)fetch32_be_unaligned(p) << 8 | p[4]; + case 6: + return (uint64_t)fetch32_be_unaligned(p) << 16 | + fetch16_be_unaligned(p + 4); + case 7: + return (uint64_t)fetch32_be_unaligned(p) << 24 | + (uint32_t)fetch16_be_unaligned(p + 4) << 8 | p[6]; + case 0: + return fetch64_be_unaligned(p); + #else + /* For most CPUs this code is better than a + * copying for alignment and/or byte reordering. */ + case 1: + return p[0]; + case 2: + return p[1] | (uint32_t)p[0] << 8; + case 3: + return p[2] | (uint32_t)p[1] << 8 | (uint32_t)p[0] << 16; + case 4: + return p[3] | (uint32_t)p[2] << 8 | (uint32_t)p[1] << 16 | + (uint32_t)p[0] << 24; + case 5: + return p[4] | (uint32_t)p[3] << 8 | (uint32_t)p[2] << 16 | + (uint32_t)p[1] << 24 | (uint64_t)p[0] << 32; + case 6: + return p[5] | (uint32_t)p[4] << 8 | (uint32_t)p[3] << 16 | + (uint32_t)p[2] << 24 | (uint64_t)p[1] << 32 | (uint64_t)p[0] << 40; + case 7: + return p[6] | (uint32_t)p[5] << 8 | (uint32_t)p[4] << 16 | + (uint32_t)p[3] << 24 | (uint64_t)p[2] << 32 | + (uint64_t)p[1] << 40 | (uint64_t)p[0] << 48; + case 0: + return p[7] | (uint32_t)p[6] << 8 | (uint32_t)p[5] << 16 | + (uint32_t)p[4] << 24 | (uint64_t)p[3] << 32 | + (uint64_t)p[2] << 40 | (uint64_t)p[1] << 48 | (uint64_t)p[0] << 56; + #endif + } -#endif /* can_read_underside */ + +#endif /* can_read_underside */ + } /***************************************************************************/ #ifndef rot64 static __maybe_unused __always_inline uint64_t rot64(uint64_t v, unsigned s) { + return (v >> s) | (v << (64 - s)); + } -#endif /* rot64 */ + +#endif /* rot64 */ #ifndef mul_32x32_64 static __maybe_unused __always_inline uint64_t mul_32x32_64(uint32_t a, uint32_t b) { + return a * (uint64_t)b; + } -#endif /* mul_32x32_64 */ + +#endif /* mul_32x32_64 */ #ifndef add64carry_first -static __maybe_unused __always_inline unsigned -add64carry_first(uint64_t base, uint64_t addend, uint64_t *sum) { -#if __has_builtin(__builtin_addcll) +static __maybe_unused __always_inline unsigned add64carry_first(uint64_t base, + uint64_t addend, + uint64_t *sum) { + + #if __has_builtin(__builtin_addcll) unsigned long long carryout; *sum = __builtin_addcll(base, addend, 0, &carryout); return (unsigned)carryout; -#else + #else *sum = base + addend; return *sum < addend; -#endif /* __has_builtin(__builtin_addcll) */ + #endif /* __has_builtin(__builtin_addcll) */ + } -#endif /* add64carry_fist */ + +#endif /* add64carry_fist */ #ifndef add64carry_next -static __maybe_unused __always_inline unsigned -add64carry_next(unsigned carry, uint64_t base, uint64_t addend, uint64_t *sum) { -#if __has_builtin(__builtin_addcll) +static __maybe_unused __always_inline unsigned add64carry_next(unsigned carry, + uint64_t base, + uint64_t addend, + uint64_t *sum) { + + #if __has_builtin(__builtin_addcll) unsigned long long carryout; *sum = __builtin_addcll(base, addend, carry, &carryout); return (unsigned)carryout; -#else + #else *sum = base + addend + carry; return *sum < addend || (carry && *sum == addend); -#endif /* __has_builtin(__builtin_addcll) */ + #endif /* __has_builtin(__builtin_addcll) */ + } -#endif /* add64carry_next */ + +#endif /* add64carry_next */ #ifndef add64carry_last -static __maybe_unused __always_inline void -add64carry_last(unsigned carry, uint64_t base, uint64_t addend, uint64_t *sum) { -#if __has_builtin(__builtin_addcll) +static __maybe_unused __always_inline void add64carry_last(unsigned carry, + uint64_t base, + uint64_t addend, + uint64_t *sum) { + + #if __has_builtin(__builtin_addcll) unsigned long long carryout; *sum = __builtin_addcll(base, addend, carry, &carryout); (void)carryout; -#else + #else *sum = base + addend + carry; -#endif /* __has_builtin(__builtin_addcll) */ + #endif /* __has_builtin(__builtin_addcll) */ + } -#endif /* add64carry_last */ + +#endif /* add64carry_last */ #ifndef mul_64x64_128 -static __maybe_unused __always_inline uint64_t mul_64x64_128(uint64_t a, - uint64_t b, +static __maybe_unused __always_inline uint64_t mul_64x64_128(uint64_t a, + uint64_t b, uint64_t *h) { -#if (defined(__SIZEOF_INT128__) || \ - (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)) && \ - (!defined(__LCC__) || __LCC__ != 124) + + #if (defined(__SIZEOF_INT128__) || \ + (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)) && \ + (!defined(__LCC__) || __LCC__ != 124) __uint128_t r = (__uint128_t)a * (__uint128_t)b; /* modern GCC could nicely optimize this */ *h = (uint64_t)(r >> 64); return (uint64_t)r; -#elif defined(mul_64x64_high) + #elif defined(mul_64x64_high) *h = mul_64x64_high(a, b); return a * b; -#else + #else /* performs 64x64 to 128 bit multiplication */ const uint64_t ll = mul_32x32_64((uint32_t)a, (uint32_t)b); const uint64_t lh = mul_32x32_64(a >> 32, (uint32_t)b); @@ -1062,18 +1192,23 @@ static __maybe_unused __always_inline uint64_t mul_64x64_128(uint64_t a, add64carry_last(add64carry_first(ll, lh << 32, &l), hh, lh >> 32, h); add64carry_last(add64carry_first(l, hl << 32, &l), *h, hl >> 32, h); return l; -#endif + #endif + } -#endif /* mul_64x64_128() */ + +#endif /* mul_64x64_128() */ #ifndef mul_64x64_high static __maybe_unused __always_inline uint64_t mul_64x64_high(uint64_t a, uint64_t b) { + uint64_t h; mul_64x64_128(a, b, &h); return h; + } -#endif /* mul_64x64_high */ + +#endif /* mul_64x64_high */ /***************************************************************************/ @@ -1089,45 +1224,56 @@ static const uint64_t prime_6 = UINT64_C(0xCB5AF53AE3AAAC31); /* xor high and low parts of full 128-bit product */ static __maybe_unused __always_inline uint64_t mux64(uint64_t v, uint64_t prime) { + uint64_t l, h; l = mul_64x64_128(v, prime, &h); return l ^ h; + } static __maybe_unused __always_inline uint64_t final64(uint64_t a, uint64_t b) { + uint64_t x = (a + rot64(b, 41)) * prime_0; uint64_t y = (rot64(a, 23) + b) * prime_6; return mux64(x ^ y, prime_5); + } static __maybe_unused __always_inline void mixup64(uint64_t *__restrict a, uint64_t *__restrict b, uint64_t v, uint64_t prime) { + uint64_t h; *a ^= mul_64x64_128(*b + v, prime, &h); *b += h; + } /***************************************************************************/ typedef union t1ha_uint128 { -#if defined(__SIZEOF_INT128__) || \ + +#if defined(__SIZEOF_INT128__) || \ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) __uint128_t v; #endif struct { + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ uint64_t l, h; #else uint64_t h, l; #endif + }; + } t1ha_uint128_t; static __maybe_unused __always_inline t1ha_uint128_t not128(const t1ha_uint128_t v) { + t1ha_uint128_t r; -#if defined(__SIZEOF_INT128__) || \ +#if defined(__SIZEOF_INT128__) || \ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) r.v = ~v.v; #else @@ -1135,13 +1281,15 @@ not128(const t1ha_uint128_t v) { r.h = ~v.h; #endif return r; + } static __maybe_unused __always_inline t1ha_uint128_t left128(const t1ha_uint128_t v, unsigned s) { + t1ha_uint128_t r; assert(s < 128); -#if defined(__SIZEOF_INT128__) || \ +#if defined(__SIZEOF_INT128__) || \ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) r.v = v.v << s; #else @@ -1149,13 +1297,15 @@ left128(const t1ha_uint128_t v, unsigned s) { r.h = (s < 64) ? (v.h << s) | (s ? v.l >> (64 - s) : 0) : v.l << (s - 64); #endif return r; + } static __maybe_unused __always_inline t1ha_uint128_t right128(const t1ha_uint128_t v, unsigned s) { + t1ha_uint128_t r; assert(s < 128); -#if defined(__SIZEOF_INT128__) || \ +#if defined(__SIZEOF_INT128__) || \ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) r.v = v.v >> s; #else @@ -1163,12 +1313,14 @@ right128(const t1ha_uint128_t v, unsigned s) { r.h = (s < 64) ? v.h >> s : 0; #endif return r; + } static __maybe_unused __always_inline t1ha_uint128_t or128(t1ha_uint128_t x, t1ha_uint128_t y) { + t1ha_uint128_t r; -#if defined(__SIZEOF_INT128__) || \ +#if defined(__SIZEOF_INT128__) || \ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) r.v = x.v | y.v; #else @@ -1176,12 +1328,14 @@ static __maybe_unused __always_inline t1ha_uint128_t or128(t1ha_uint128_t x, r.h = x.h | y.h; #endif return r; + } static __maybe_unused __always_inline t1ha_uint128_t xor128(t1ha_uint128_t x, t1ha_uint128_t y) { + t1ha_uint128_t r; -#if defined(__SIZEOF_INT128__) || \ +#if defined(__SIZEOF_INT128__) || \ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) r.v = x.v ^ y.v; #else @@ -1189,36 +1343,42 @@ static __maybe_unused __always_inline t1ha_uint128_t xor128(t1ha_uint128_t x, r.h = x.h ^ y.h; #endif return r; + } static __maybe_unused __always_inline t1ha_uint128_t rot128(t1ha_uint128_t v, - unsigned s) { + unsigned s) { + s &= 127; -#if defined(__SIZEOF_INT128__) || \ +#if defined(__SIZEOF_INT128__) || \ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) v.v = (v.v << (128 - s)) | (v.v >> s); return v; #else return s ? or128(left128(v, 128 - s), right128(v, s)) : v; #endif + } static __maybe_unused __always_inline t1ha_uint128_t add128(t1ha_uint128_t x, t1ha_uint128_t y) { + t1ha_uint128_t r; -#if defined(__SIZEOF_INT128__) || \ +#if defined(__SIZEOF_INT128__) || \ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) r.v = x.v + y.v; #else add64carry_last(add64carry_first(x.l, y.l, &r.l), x.h, y.h, &r.h); #endif return r; + } static __maybe_unused __always_inline t1ha_uint128_t mul128(t1ha_uint128_t x, t1ha_uint128_t y) { + t1ha_uint128_t r; -#if defined(__SIZEOF_INT128__) || \ +#if defined(__SIZEOF_INT128__) || \ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) r.v = x.v * y.v; #else @@ -1226,6 +1386,7 @@ static __maybe_unused __always_inline t1ha_uint128_t mul128(t1ha_uint128_t x, r.h += x.l * y.h + y.l * x.h; #endif return r; + } /***************************************************************************/ @@ -1233,22 +1394,29 @@ static __maybe_unused __always_inline t1ha_uint128_t mul128(t1ha_uint128_t x, #if T1HA0_AESNI_AVAILABLE && defined(__ia32__) uint64_t t1ha_ia32cpu_features(void); -static __maybe_unused __always_inline bool -t1ha_ia32_AESNI_avail(uint64_t ia32cpu_features) { +static __maybe_unused __always_inline bool t1ha_ia32_AESNI_avail( + uint64_t ia32cpu_features) { + /* check for AES-NI */ return (ia32cpu_features & UINT32_C(0x02000000)) != 0; + } -static __maybe_unused __always_inline bool -t1ha_ia32_AVX_avail(uint64_t ia32cpu_features) { +static __maybe_unused __always_inline bool t1ha_ia32_AVX_avail( + uint64_t ia32cpu_features) { + /* check for any AVX */ return (ia32cpu_features & UINT32_C(0x1A000000)) == UINT32_C(0x1A000000); + } -static __maybe_unused __always_inline bool -t1ha_ia32_AVX2_avail(uint64_t ia32cpu_features) { +static __maybe_unused __always_inline bool t1ha_ia32_AVX2_avail( + uint64_t ia32cpu_features) { + /* check for 'Advanced Vector Extensions 2' */ return ((ia32cpu_features >> 32) & 32) != 0; + } -#endif /* T1HA0_AESNI_AVAILABLE && __ia32__ */ +#endif /* T1HA0_AESNI_AVAILABLE && __ia32__ */ + diff --git a/include/t1ha_selfcheck.h b/include/t1ha_selfcheck.h index ff7c589c..65343bfe 100644 --- a/include/t1ha_selfcheck.h +++ b/include/t1ha_selfcheck.h @@ -43,8 +43,8 @@ #pragma once #if defined(_MSC_VER) && _MSC_VER > 1800 -#pragma warning(disable : 4464) /* relative include path contains '..' */ -#endif /* MSVC */ + #pragma warning(disable : 4464) /* relative include path contains '..' */ +#endif /* MSVC */ #include "t1ha.h" /***************************************************************************/ @@ -59,18 +59,19 @@ extern const uint64_t t1ha_refval_2atonce[81]; extern const uint64_t t1ha_refval_2atonce128[81]; extern const uint64_t t1ha_refval_2stream[81]; extern const uint64_t t1ha_refval_2stream128[81]; -#endif /* T1HA2_DISABLED */ +#endif /* T1HA2_DISABLED */ #ifndef T1HA1_DISABLED extern const uint64_t t1ha_refval_64le[81]; extern const uint64_t t1ha_refval_64be[81]; -#endif /* T1HA1_DISABLED */ +#endif /* T1HA1_DISABLED */ #ifndef T1HA0_DISABLED extern const uint64_t t1ha_refval_32le[81]; extern const uint64_t t1ha_refval_32be[81]; -#if T1HA0_AESNI_AVAILABLE + #if T1HA0_AESNI_AVAILABLE extern const uint64_t t1ha_refval_ia32aes_a[81]; extern const uint64_t t1ha_refval_ia32aes_b[81]; -#endif /* T1HA0_AESNI_AVAILABLE */ -#endif /* T1HA0_DISABLED */ + #endif /* T1HA0_AESNI_AVAILABLE */ +#endif /* T1HA0_DISABLED */ + diff --git a/include/xxhash.h b/include/xxhash.h index d11f0f63..7697d0f2 100644 --- a/include/xxhash.h +++ b/include/xxhash.h @@ -36,8 +36,8 @@ /*! * @mainpage xxHash * - * xxHash is an extremely fast non-cryptographic hash algorithm, working at RAM speed - * limits. + * xxHash is an extremely fast non-cryptographic hash algorithm, working at RAM + * speed limits. * * It is proposed in four flavors, in three families: * 1. @ref XXH32_family @@ -54,44 +54,46 @@ * Benchmarks * --- * The reference system uses an Intel i7-9700K CPU, and runs Ubuntu x64 20.04. - * The open source benchmark program is compiled with clang v10.0 using -O3 flag. - * - * | Hash Name | ISA ext | Width | Large Data Speed | Small Data Velocity | - * | -------------------- | ------- | ----: | ---------------: | ------------------: | - * | XXH3_64bits() | @b AVX2 | 64 | 59.4 GB/s | 133.1 | - * | MeowHash | AES-NI | 128 | 58.2 GB/s | 52.5 | - * | XXH3_128bits() | @b AVX2 | 128 | 57.9 GB/s | 118.1 | - * | CLHash | PCLMUL | 64 | 37.1 GB/s | 58.1 | - * | XXH3_64bits() | @b SSE2 | 64 | 31.5 GB/s | 133.1 | - * | XXH3_128bits() | @b SSE2 | 128 | 29.6 GB/s | 118.1 | - * | RAM sequential read | | N/A | 28.0 GB/s | N/A | - * | ahash | AES-NI | 64 | 22.5 GB/s | 107.2 | - * | City64 | | 64 | 22.0 GB/s | 76.6 | - * | T1ha2 | | 64 | 22.0 GB/s | 99.0 | - * | City128 | | 128 | 21.7 GB/s | 57.7 | - * | FarmHash | AES-NI | 64 | 21.3 GB/s | 71.9 | - * | XXH64() | | 64 | 19.4 GB/s | 71.0 | - * | SpookyHash | | 64 | 19.3 GB/s | 53.2 | - * | Mum | | 64 | 18.0 GB/s | 67.0 | - * | CRC32C | SSE4.2 | 32 | 13.0 GB/s | 57.9 | - * | XXH32() | | 32 | 9.7 GB/s | 71.9 | - * | City32 | | 32 | 9.1 GB/s | 66.0 | - * | Blake3* | @b AVX2 | 256 | 4.4 GB/s | 8.1 | - * | Murmur3 | | 32 | 3.9 GB/s | 56.1 | - * | SipHash* | | 64 | 3.0 GB/s | 43.2 | - * | Blake3* | @b SSE2 | 256 | 2.4 GB/s | 8.1 | - * | HighwayHash | | 64 | 1.4 GB/s | 6.0 | - * | FNV64 | | 64 | 1.2 GB/s | 62.7 | - * | Blake2* | | 256 | 1.1 GB/s | 5.1 | - * | SHA1* | | 160 | 0.8 GB/s | 5.6 | - * | MD5* | | 128 | 0.6 GB/s | 7.8 | + * The open source benchmark program is compiled with clang v10.0 using -O3 + * flag. + * + * | Hash Name | ISA ext | Width | Large Data Speed | Small Data + * Velocity | | -------------------- | ------- | ----: | ---------------: | + * ------------------: | | XXH3_64bits() | @b AVX2 | 64 | 59.4 + * GB/s | 133.1 | | MeowHash | AES-NI | 128 | 58.2 + * GB/s | 52.5 | | XXH3_128bits() | @b AVX2 | 128 | 57.9 + * GB/s | 118.1 | | CLHash | PCLMUL | 64 | 37.1 + * GB/s | 58.1 | | XXH3_64bits() | @b SSE2 | 64 | 31.5 + * GB/s | 133.1 | | XXH3_128bits() | @b SSE2 | 128 | 29.6 + * GB/s | 118.1 | | RAM sequential read | | N/A | 28.0 + * GB/s | N/A | | ahash | AES-NI | 64 | 22.5 + * GB/s | 107.2 | | City64 | | 64 | 22.0 + * GB/s | 76.6 | | T1ha2 | | 64 | 22.0 + * GB/s | 99.0 | | City128 | | 128 | 21.7 + * GB/s | 57.7 | | FarmHash | AES-NI | 64 | 21.3 + * GB/s | 71.9 | | XXH64() | | 64 | 19.4 + * GB/s | 71.0 | | SpookyHash | | 64 | 19.3 + * GB/s | 53.2 | | Mum | | 64 | 18.0 + * GB/s | 67.0 | | CRC32C | SSE4.2 | 32 | 13.0 + * GB/s | 57.9 | | XXH32() | | 32 | 9.7 + * GB/s | 71.9 | | City32 | | 32 | 9.1 + * GB/s | 66.0 | | Blake3* | @b AVX2 | 256 | 4.4 + * GB/s | 8.1 | | Murmur3 | | 32 | 3.9 + * GB/s | 56.1 | | SipHash* | | 64 | 3.0 + * GB/s | 43.2 | | Blake3* | @b SSE2 | 256 | 2.4 + * GB/s | 8.1 | | HighwayHash | | 64 | 1.4 + * GB/s | 6.0 | | FNV64 | | 64 | 1.2 + * GB/s | 62.7 | | Blake2* | | 256 | 1.1 + * GB/s | 5.1 | | SHA1* | | 160 | 0.8 + * GB/s | 5.6 | | MD5* | | 128 | 0.6 + * GB/s | 7.8 | * @note - * - Hashes which require a specific ISA extension are noted. SSE2 is also noted, - * even though it is mandatory on x64. - * - Hashes with an asterisk are cryptographic. Note that MD5 is non-cryptographic - * by modern standards. - * - Small data velocity is a rough average of algorithm's efficiency for small - * data. For more accurate information, see the wiki. + * - Hashes which require a specific ISA extension are noted. SSE2 is also + * noted, even though it is mandatory on x64. + * - Hashes with an asterisk are cryptographic. Note that MD5 is + * non-cryptographic by modern standards. + * - Small data velocity is a rough average of algorithm's efficiency for + * small data. For more accurate information, see the wiki. * - More benchmarks and strength tests are found on the wiki: * https://github.com/Cyan4973/xxHash/wiki * @@ -106,14 +108,15 @@ * - The range from [`input`, `input + length`) is valid, readable memory. * - The only exception is if the `length` is `0`, `input` may be `NULL`. * - For C++, the objects must have the *TriviallyCopyable* property, as the - * functions access bytes directly as if it was an array of `unsigned char`. + * functions access bytes directly as if it was an array of `unsigned + * char`. * * @anchor single_shot_example * **Single Shot** * - * These functions are stateless functions which hash a contiguous block of memory, - * immediately returning the result. They are the easiest and usually the fastest - * option. + * These functions are stateless functions which hash a contiguous block of + * memory, immediately returning the result. They are the easiest and usually + * the fastest option. * * XXH32(), XXH64(), XXH3_64bits(), XXH3_128bits() * @@ -121,9 +124,10 @@ * #include * #include "xxhash.h" * - * // Example for a function which hashes a null terminated string with XXH32(). - * XXH32_hash_t hash_string(const char* string, XXH32_hash_t seed) + * // Example for a function which hashes a null terminated string with + * XXH32(). XXH32_hash_t hash_string(const char* string, XXH32_hash_t seed) * { + * // NULL pointers are only valid if the length is zero * size_t length = (string == NULL) ? 0 : strlen(string); * return XXH32(string, length, seed); @@ -143,9 +147,10 @@ * #include * #include * #include "xxhash.h" - * // Example for a function which hashes a FILE incrementally with XXH3_64bits(). - * XXH64_hash_t hashFile(FILE* f) + * // Example for a function which hashes a FILE incrementally with + * XXH3_64bits(). XXH64_hash_t hashFile(FILE* f) * { + * // Allocate a state struct. Do not just use malloc() or new. * XXH3_state_t* state = XXH3_createState(); * assert(state != NULL && "Out of memory!"); @@ -155,6 +160,7 @@ * size_t count; * // Read the file in chunks * while ((count = fread(buffer, 1, sizeof(buffer), f)) != 0) { + * // Run update() as many times as necessary to process the data * XXH3_64bits_update(state, buffer, count); * } @@ -174,7 +180,8 @@ * * Start a new hash by initializing the state with a seed using `XXH*_reset()`. * - * Then, feed the hash state by calling `XXH*_update()` as many times as necessary. + * Then, feed the hash state by calling `XXH*_update()` as many times as + * necessary. * * The function returns an error code, with 0 meaning OK, and any other value * meaning there is an error. @@ -195,11 +202,13 @@ * integers. * This the simplest and fastest format for further post-processing. * - * However, this leaves open the question of what is the order on the byte level, - * since little and big endian conventions will store the same number differently. + * However, this leaves open the question of what is the order on the byte + * level, since little and big endian conventions will store the same number + * differently. * * The canonical representation settles this issue by mandating big-endian - * convention, the same convention as human-readable numbers (large digits first). + * convention, the same convention as human-readable numbers (large digits + * first). * * When writing hash values to storage, sending them over a network, or printing * them, it's highly recommended to use the canonical representation to ensure @@ -216,13 +225,15 @@ * #include * #include "xxhash.h" * - * // Example for a function which prints XXH32_hash_t in human readable format - * void printXxh32(XXH32_hash_t hash) + * // Example for a function which prints XXH32_hash_t in human readable + * format void printXxh32(XXH32_hash_t hash) * { + * XXH32_canonical_t cano; * XXH32_canonicalFromHash(&cano, hash); * size_t i; * for(i = 0; i < sizeof(cano.digest); ++i) { + * printf("%02x", cano.digest[i]); * } * printf("\n"); @@ -231,6 +242,7 @@ * // Example for a function which converts XXH32_canonical_t to XXH32_hash_t * XXH32_hash_t convertCanonicalToXxh32(XXH32_canonical_t cano) * { + * XXH32_hash_t hash = XXH32_hashFromCanonical(&cano); * return hash; * } @@ -241,8 +253,9 @@ * xxHash prototypes and implementation */ -#if defined (__cplusplus) +#if defined(__cplusplus) extern "C" { + #endif /* **************************** @@ -252,304 +265,328 @@ extern "C" { * @defgroup public Public API * Contains details on the public xxHash functions. * @{ - */ -#ifdef XXH_DOXYGEN -/*! - * @brief Gives access to internal state declaration, required for static allocation. - * - * Incompatible with dynamic linking, due to risks of ABI changes. - * - * Usage: - * @code{.c} - * #define XXH_STATIC_LINKING_ONLY - * #include "xxhash.h" - * @endcode - */ -# define XXH_STATIC_LINKING_ONLY -/* Do not undef XXH_STATIC_LINKING_ONLY for Doxygen */ - -/*! - * @brief Gives access to internal definitions. - * - * Usage: - * @code{.c} - * #define XXH_STATIC_LINKING_ONLY - * #define XXH_IMPLEMENTATION - * #include "xxhash.h" - * @endcode - */ -# define XXH_IMPLEMENTATION -/* Do not undef XXH_IMPLEMENTATION for Doxygen */ -/*! - * @brief Exposes the implementation and marks all functions as `inline`. - * - * Use these build macros to inline xxhash into the target unit. - * Inlining improves performance on small inputs, especially when the length is - * expressed as a compile-time constant: - * - * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html - * - * It also keeps xxHash symbols private to the unit, so they are not exported. - * - * Usage: - * @code{.c} - * #define XXH_INLINE_ALL - * #include "xxhash.h" - * @endcode - * Do not compile and link xxhash.o as a separate object, as it is not useful. - */ -# define XXH_INLINE_ALL -# undef XXH_INLINE_ALL -/*! - * @brief Exposes the implementation without marking functions as inline. - */ -# define XXH_PRIVATE_API -# undef XXH_PRIVATE_API -/*! - * @brief Emulate a namespace by transparently prefixing all symbols. - * - * If you want to include _and expose_ xxHash functions from within your own - * library, but also want to avoid symbol collisions with other libraries which - * may also include xxHash, you can use @ref XXH_NAMESPACE to automatically prefix - * any public symbol from xxhash library with the value of @ref XXH_NAMESPACE - * (therefore, avoid empty or numeric values). - * - * Note that no change is required within the calling program as long as it - * includes `xxhash.h`: Regular symbol names will be automatically translated - * by this header. */ -# define XXH_NAMESPACE /* YOUR NAME HERE */ -# undef XXH_NAMESPACE +#ifdef XXH_DOXYGEN + /*! + * @brief Gives access to internal state declaration, required for static + * allocation. + * + * Incompatible with dynamic linking, due to risks of ABI changes. + * + * Usage: + * @code{.c} + * #define XXH_STATIC_LINKING_ONLY + * #include "xxhash.h" + * @endcode + */ + #define XXH_STATIC_LINKING_ONLY + /* Do not undef XXH_STATIC_LINKING_ONLY for Doxygen */ + + /*! + * @brief Gives access to internal definitions. + * + * Usage: + * @code{.c} + * #define XXH_STATIC_LINKING_ONLY + * #define XXH_IMPLEMENTATION + * #include "xxhash.h" + * @endcode + */ + #define XXH_IMPLEMENTATION + /* Do not undef XXH_IMPLEMENTATION for Doxygen */ + + /*! + * @brief Exposes the implementation and marks all functions as `inline`. + * + * Use these build macros to inline xxhash into the target unit. + * Inlining improves performance on small inputs, especially when the length + * is expressed as a compile-time constant: + * + * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html + * + * It also keeps xxHash symbols private to the unit, so they are not exported. + * + * Usage: + * @code{.c} + * #define XXH_INLINE_ALL + * #include "xxhash.h" + * @endcode + * Do not compile and link xxhash.o as a separate object, as it is not useful. + */ + #define XXH_INLINE_ALL + #undef XXH_INLINE_ALL + /*! + * @brief Exposes the implementation without marking functions as inline. + */ + #define XXH_PRIVATE_API + #undef XXH_PRIVATE_API + /*! + * @brief Emulate a namespace by transparently prefixing all symbols. + * + * If you want to include _and expose_ xxHash functions from within your own + * library, but also want to avoid symbol collisions with other libraries + * which may also include xxHash, you can use @ref XXH_NAMESPACE to + * automatically prefix any public symbol from xxhash library with the value + * of @ref XXH_NAMESPACE (therefore, avoid empty or numeric values). + * + * Note that no change is required within the calling program as long as it + * includes `xxhash.h`: Regular symbol names will be automatically translated + * by this header. + */ + #define XXH_NAMESPACE /* YOUR NAME HERE */ + #undef XXH_NAMESPACE #endif -#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \ - && !defined(XXH_INLINE_ALL_31684351384) - /* this section should be traversed only once */ -# define XXH_INLINE_ALL_31684351384 - /* give access to the advanced API, required to compile implementations */ -# undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */ -# define XXH_STATIC_LINKING_ONLY - /* make all functions private */ -# undef XXH_PUBLIC_API -# if defined(__GNUC__) -# define XXH_PUBLIC_API static __inline __attribute__((unused)) -# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) -# define XXH_PUBLIC_API static inline -# elif defined(_MSC_VER) -# define XXH_PUBLIC_API static __inline -# else - /* note: this version may generate warnings for unused static functions */ -# define XXH_PUBLIC_API static -# endif - - /* - * This part deals with the special case where a unit wants to inline xxHash, - * but "xxhash.h" has previously been included without XXH_INLINE_ALL, - * such as part of some previously included *.h header file. - * Without further action, the new include would just be ignored, - * and functions would effectively _not_ be inlined (silent failure). - * The following macros solve this situation by prefixing all inlined names, - * avoiding naming collision with previous inclusions. - */ - /* Before that, we unconditionally #undef all symbols, - * in case they were already defined with XXH_NAMESPACE. - * They will then be redefined for XXH_INLINE_ALL - */ -# undef XXH_versionNumber - /* XXH32 */ -# undef XXH32 -# undef XXH32_createState -# undef XXH32_freeState -# undef XXH32_reset -# undef XXH32_update -# undef XXH32_digest -# undef XXH32_copyState -# undef XXH32_canonicalFromHash -# undef XXH32_hashFromCanonical - /* XXH64 */ -# undef XXH64 -# undef XXH64_createState -# undef XXH64_freeState -# undef XXH64_reset -# undef XXH64_update -# undef XXH64_digest -# undef XXH64_copyState -# undef XXH64_canonicalFromHash -# undef XXH64_hashFromCanonical - /* XXH3_64bits */ -# undef XXH3_64bits -# undef XXH3_64bits_withSecret -# undef XXH3_64bits_withSeed -# undef XXH3_64bits_withSecretandSeed -# undef XXH3_createState -# undef XXH3_freeState -# undef XXH3_copyState -# undef XXH3_64bits_reset -# undef XXH3_64bits_reset_withSeed -# undef XXH3_64bits_reset_withSecret -# undef XXH3_64bits_update -# undef XXH3_64bits_digest -# undef XXH3_generateSecret - /* XXH3_128bits */ -# undef XXH128 -# undef XXH3_128bits -# undef XXH3_128bits_withSeed -# undef XXH3_128bits_withSecret -# undef XXH3_128bits_reset -# undef XXH3_128bits_reset_withSeed -# undef XXH3_128bits_reset_withSecret -# undef XXH3_128bits_reset_withSecretandSeed -# undef XXH3_128bits_update -# undef XXH3_128bits_digest -# undef XXH128_isEqual -# undef XXH128_cmp -# undef XXH128_canonicalFromHash -# undef XXH128_hashFromCanonical - /* Finally, free the namespace itself */ -# undef XXH_NAMESPACE - - /* employ the namespace for XXH_INLINE_ALL */ -# define XXH_NAMESPACE XXH_INLINE_ - /* - * Some identifiers (enums, type names) are not symbols, - * but they must nonetheless be renamed to avoid redeclaration. - * Alternative solution: do not redeclare them. - * However, this requires some #ifdefs, and has a more dispersed impact. - * Meanwhile, renaming can be achieved in a single place. - */ -# define XXH_IPREF(Id) XXH_NAMESPACE ## Id -# define XXH_OK XXH_IPREF(XXH_OK) -# define XXH_ERROR XXH_IPREF(XXH_ERROR) -# define XXH_errorcode XXH_IPREF(XXH_errorcode) -# define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t) -# define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t) -# define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t) -# define XXH32_state_s XXH_IPREF(XXH32_state_s) -# define XXH32_state_t XXH_IPREF(XXH32_state_t) -# define XXH64_state_s XXH_IPREF(XXH64_state_s) -# define XXH64_state_t XXH_IPREF(XXH64_state_t) -# define XXH3_state_s XXH_IPREF(XXH3_state_s) -# define XXH3_state_t XXH_IPREF(XXH3_state_t) -# define XXH128_hash_t XXH_IPREF(XXH128_hash_t) - /* Ensure the header is parsed again, even if it was previously included */ -# undef XXHASH_H_5627135585666179 -# undef XXHASH_H_STATIC_13879238742 -#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ - -/* **************************************************************** - * Stable API - *****************************************************************/ -#ifndef XXHASH_H_5627135585666179 -#define XXHASH_H_5627135585666179 1 - -/*! @brief Marks a global symbol. */ -#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) -# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) -# ifdef XXH_EXPORT -# define XXH_PUBLIC_API __declspec(dllexport) -# elif XXH_IMPORT -# define XXH_PUBLIC_API __declspec(dllimport) -# endif -# else -# define XXH_PUBLIC_API /* do nothing */ -# endif -#endif +#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) && \ + !defined(XXH_INLINE_ALL_31684351384) +/* this section should be traversed only once */ + #define XXH_INLINE_ALL_31684351384 +/* give access to the advanced API, required to compile implementations */ + #undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */ + #define XXH_STATIC_LINKING_ONLY +/* make all functions private */ + #undef XXH_PUBLIC_API + #if defined(__GNUC__) + #define XXH_PUBLIC_API static __inline __attribute__((unused)) + #elif defined(__cplusplus) || \ + (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) + #define XXH_PUBLIC_API static inline + #elif defined(_MSC_VER) + #define XXH_PUBLIC_API static __inline + #else + /* note: this version may generate warnings for unused static functions */ + #define XXH_PUBLIC_API static + #endif -#ifdef XXH_NAMESPACE -# define XXH_CAT(A,B) A##B -# define XXH_NAME2(A,B) XXH_CAT(A,B) -# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) +/* + * This part deals with the special case where a unit wants to inline xxHash, + * but "xxhash.h" has previously been included without XXH_INLINE_ALL, + * such as part of some previously included *.h header file. + * Without further action, the new include would just be ignored, + * and functions would effectively _not_ be inlined (silent failure). + * The following macros solve this situation by prefixing all inlined names, + * avoiding naming collision with previous inclusions. + */ +/* Before that, we unconditionally #undef all symbols, + * in case they were already defined with XXH_NAMESPACE. + * They will then be redefined for XXH_INLINE_ALL + */ + #undef XXH_versionNumber /* XXH32 */ -# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) -# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) -# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) -# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) -# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) -# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) -# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) -# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) -# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) + #undef XXH32 + #undef XXH32_createState + #undef XXH32_freeState + #undef XXH32_reset + #undef XXH32_update + #undef XXH32_digest + #undef XXH32_copyState + #undef XXH32_canonicalFromHash + #undef XXH32_hashFromCanonical /* XXH64 */ -# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) -# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) -# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) -# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) -# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) -# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) -# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) -# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) -# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) + #undef XXH64 + #undef XXH64_createState + #undef XXH64_freeState + #undef XXH64_reset + #undef XXH64_update + #undef XXH64_digest + #undef XXH64_copyState + #undef XXH64_canonicalFromHash + #undef XXH64_hashFromCanonical /* XXH3_64bits */ -# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits) -# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret) -# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed) -# define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed) -# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState) -# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState) -# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState) -# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset) -# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed) -# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret) -# define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed) -# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update) -# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest) -# define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret) -# define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed) + #undef XXH3_64bits + #undef XXH3_64bits_withSecret + #undef XXH3_64bits_withSeed + #undef XXH3_64bits_withSecretandSeed + #undef XXH3_createState + #undef XXH3_freeState + #undef XXH3_copyState + #undef XXH3_64bits_reset + #undef XXH3_64bits_reset_withSeed + #undef XXH3_64bits_reset_withSecret + #undef XXH3_64bits_update + #undef XXH3_64bits_digest + #undef XXH3_generateSecret /* XXH3_128bits */ -# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128) -# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits) -# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed) -# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret) -# define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed) -# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset) -# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed) -# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret) -# define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed) -# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update) -# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest) -# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual) -# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp) -# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash) -# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical) -#endif - - -/* ************************************* -* Compiler specifics -***************************************/ - -/* specific declaration modes for Windows */ -#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) -# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) -# ifdef XXH_EXPORT -# define XXH_PUBLIC_API __declspec(dllexport) -# elif XXH_IMPORT -# define XXH_PUBLIC_API __declspec(dllimport) -# endif -# else -# define XXH_PUBLIC_API /* do nothing */ -# endif -#endif - -#if defined (__GNUC__) -# define XXH_CONSTF __attribute__((const)) -# define XXH_PUREF __attribute__((pure)) -# define XXH_MALLOCF __attribute__((malloc)) -#else -# define XXH_CONSTF /* disable */ -# define XXH_PUREF -# define XXH_MALLOCF -#endif + #undef XXH128 + #undef XXH3_128bits + #undef XXH3_128bits_withSeed + #undef XXH3_128bits_withSecret + #undef XXH3_128bits_reset + #undef XXH3_128bits_reset_withSeed + #undef XXH3_128bits_reset_withSecret + #undef XXH3_128bits_reset_withSecretandSeed + #undef XXH3_128bits_update + #undef XXH3_128bits_digest + #undef XXH128_isEqual + #undef XXH128_cmp + #undef XXH128_canonicalFromHash + #undef XXH128_hashFromCanonical +/* Finally, free the namespace itself */ + #undef XXH_NAMESPACE + +/* employ the namespace for XXH_INLINE_ALL */ + #define XXH_NAMESPACE XXH_INLINE_ +/* + * Some identifiers (enums, type names) are not symbols, + * but they must nonetheless be renamed to avoid redeclaration. + * Alternative solution: do not redeclare them. + * However, this requires some #ifdefs, and has a more dispersed impact. + * Meanwhile, renaming can be achieved in a single place. + */ + #define XXH_IPREF(Id) XXH_NAMESPACE##Id + #define XXH_OK XXH_IPREF(XXH_OK) + #define XXH_ERROR XXH_IPREF(XXH_ERROR) + #define XXH_errorcode XXH_IPREF(XXH_errorcode) + #define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t) + #define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t) + #define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t) + #define XXH32_state_s XXH_IPREF(XXH32_state_s) + #define XXH32_state_t XXH_IPREF(XXH32_state_t) + #define XXH64_state_s XXH_IPREF(XXH64_state_s) + #define XXH64_state_t XXH_IPREF(XXH64_state_t) + #define XXH3_state_s XXH_IPREF(XXH3_state_s) + #define XXH3_state_t XXH_IPREF(XXH3_state_t) + #define XXH128_hash_t XXH_IPREF(XXH128_hash_t) +/* Ensure the header is parsed again, even if it was previously included */ + #undef XXHASH_H_5627135585666179 + #undef XXHASH_H_STATIC_13879238742 +#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ -/* ************************************* -* Version -***************************************/ -#define XXH_VERSION_MAJOR 0 -#define XXH_VERSION_MINOR 8 -#define XXH_VERSION_RELEASE 2 -/*! @brief Version number, encoded as two digits each */ -#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) +/* **************************************************************** + * Stable API + *****************************************************************/ +#ifndef XXHASH_H_5627135585666179 + #define XXHASH_H_5627135585666179 1 + + /*! @brief Marks a global symbol. */ + #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) + #if defined(WIN32) && defined(_MSC_VER) && \ + (defined(XXH_IMPORT) || defined(XXH_EXPORT)) + #ifdef XXH_EXPORT + #define XXH_PUBLIC_API __declspec(dllexport) + #elif XXH_IMPORT + #define XXH_PUBLIC_API __declspec(dllimport) + #endif + #else + #define XXH_PUBLIC_API /* do nothing */ + #endif + #endif + + #ifdef XXH_NAMESPACE + #define XXH_CAT(A, B) A##B + #define XXH_NAME2(A, B) XXH_CAT(A, B) + #define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) + /* XXH32 */ + #define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) + #define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) + #define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) + #define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) + #define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) + #define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) + #define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) + #define XXH32_canonicalFromHash \ + XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) + #define XXH32_hashFromCanonical \ + XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) + /* XXH64 */ + #define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) + #define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) + #define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) + #define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) + #define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) + #define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) + #define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) + #define XXH64_canonicalFromHash \ + XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) + #define XXH64_hashFromCanonical \ + XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) + /* XXH3_64bits */ + #define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits) + #define XXH3_64bits_withSecret \ + XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret) + #define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed) + #define XXH3_64bits_withSecretandSeed \ + XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed) + #define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState) + #define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState) + #define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState) + #define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset) + #define XXH3_64bits_reset_withSeed \ + XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed) + #define XXH3_64bits_reset_withSecret \ + XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret) + #define XXH3_64bits_reset_withSecretandSeed \ + XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed) + #define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update) + #define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest) + #define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret) + #define XXH3_generateSecret_fromSeed \ + XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed) + /* XXH3_128bits */ + #define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128) + #define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits) + #define XXH3_128bits_withSeed \ + XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed) + #define XXH3_128bits_withSecret \ + XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret) + #define XXH3_128bits_withSecretandSeed \ + XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed) + #define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset) + #define XXH3_128bits_reset_withSeed \ + XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed) + #define XXH3_128bits_reset_withSecret \ + XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret) + #define XXH3_128bits_reset_withSecretandSeed \ + XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed) + #define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update) + #define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest) + #define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual) + #define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp) + #define XXH128_canonicalFromHash \ + XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash) + #define XXH128_hashFromCanonical \ + XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical) + #endif + + /* ************************************* + * Compiler specifics + ***************************************/ + + /* specific declaration modes for Windows */ + #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) + #if defined(WIN32) && defined(_MSC_VER) && \ + (defined(XXH_IMPORT) || defined(XXH_EXPORT)) + #ifdef XXH_EXPORT + #define XXH_PUBLIC_API __declspec(dllexport) + #elif XXH_IMPORT + #define XXH_PUBLIC_API __declspec(dllimport) + #endif + #else + #define XXH_PUBLIC_API /* do nothing */ + #endif + #endif + + #if defined(__GNUC__) + #define XXH_CONSTF __attribute__((const)) + #define XXH_PUREF __attribute__((pure)) + #define XXH_MALLOCF __attribute__((malloc)) + #else + #define XXH_CONSTF /* disable */ + #define XXH_PUREF + #define XXH_MALLOCF + #endif + + /* ************************************* + * Version + ***************************************/ + #define XXH_VERSION_MAJOR 0 + #define XXH_VERSION_MINOR 8 + #define XXH_VERSION_RELEASE 2 + /*! @brief Version number, encoded as two digits each */ + #define XXH_VERSION_NUMBER \ + (XXH_VERSION_MAJOR * 100 * 100 + XXH_VERSION_MINOR * 100 + \ + XXH_VERSION_RELEASE) /*! * @brief Obtains the xxHash version. @@ -559,26 +596,26 @@ extern "C" { * * @return @ref XXH_VERSION_NUMBER of the invoked library. */ -XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber (void); - +XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber(void); -/* **************************** -* Common basic types -******************************/ -#include /* size_t */ + /* **************************** + * Common basic types + ******************************/ + #include /* size_t */ /*! * @brief Exit code for the streaming API. */ typedef enum { - XXH_OK = 0, /*!< OK */ - XXH_ERROR /*!< Error */ -} XXH_errorcode; + XXH_OK = 0, /*!< OK */ + XXH_ERROR /*!< Error */ -/*-********************************************************************** -* 32-bit hash -************************************************************************/ -#if defined(XXH_DOXYGEN) /* Don't show include */ +} XXH_errorcode; + + /*-********************************************************************** + * 32-bit hash + ************************************************************************/ + #if defined(XXH_DOXYGEN) /* Don't show include */ /*! * @brief An unsigned 32-bit integer. * @@ -586,22 +623,22 @@ typedef enum { */ typedef uint32_t XXH32_hash_t; -#elif !defined (__VMS) \ - && (defined (__cplusplus) \ - || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - typedef uint32_t XXH32_hash_t; - -#else -# include -# if UINT_MAX == 0xFFFFFFFFUL - typedef unsigned int XXH32_hash_t; -# elif ULONG_MAX == 0xFFFFFFFFUL - typedef unsigned long XXH32_hash_t; -# else -# error "unsupported platform: need a 32-bit type" -# endif -#endif + #elif !defined(__VMS) && \ + (defined(__cplusplus) || \ + (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)) + #include +typedef uint32_t XXH32_hash_t; + + #else + #include + #if UINT_MAX == 0xFFFFFFFFUL +typedef unsigned int XXH32_hash_t; + #elif ULONG_MAX == 0xFFFFFFFFUL +typedef unsigned long XXH32_hash_t; + #else + #error "unsupported platform: need a 32-bit type" + #endif + #endif /*! * @} @@ -618,12 +655,14 @@ typedef uint32_t XXH32_hash_t; * @see @ref XXH64_family, @ref XXH3_family : Other xxHash families * @see @ref XXH32_impl for implementation details * @{ + */ /*! * @brief Calculates the 32-bit hash of @p input using xxHash32. * - * @param input The block of data to be hashed, at least @p length bytes in size. + * @param input The block of data to be hashed, at least @p length bytes in + * size. * @param length The length of @p input, in bytes. * @param seed The 32-bit seed to alter the hash's output predictably. * @@ -636,9 +675,10 @@ typedef uint32_t XXH32_hash_t; * * @see @ref single_shot_example "Single Shot Example" for an example. */ -XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed); +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32(const void *input, size_t length, + XXH32_hash_t seed); -#ifndef XXH_NO_STREAM + #ifndef XXH_NO_STREAM /*! * @typedef struct XXH32_state_s XXH32_state_t * @brief The opaque state struct for the XXH32 streaming API. @@ -658,11 +698,12 @@ typedef struct XXH32_state_s XXH32_state_t; * * @see @ref streaming_example "Streaming Example" */ -XXH_PUBLIC_API XXH_MALLOCF XXH32_state_t* XXH32_createState(void); +XXH_PUBLIC_API XXH_MALLOCF XXH32_state_t *XXH32_createState(void); /*! * @brief Frees an @ref XXH32_state_t. * - * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState(). + * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref + * XXH32_createState(). * * @return @ref XXH_OK. * @@ -671,7 +712,7 @@ XXH_PUBLIC_API XXH_MALLOCF XXH32_state_t* XXH32_createState(void); * @see @ref streaming_example "Streaming Example" * */ -XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t *statePtr); /*! * @brief Copies one @ref XXH32_state_t to another. * @@ -680,7 +721,8 @@ XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); * @pre * @p dst_state and @p src_state must not be `NULL` and must not overlap. */ -XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t *dst_state, + const XXH32_state_t *src_state); /*! * @brief Resets an @ref XXH32_state_t to begin a new hash. @@ -694,17 +736,20 @@ XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_ * @return @ref XXH_OK on success. * @return @ref XXH_ERROR on failure. * - * @note This function resets and seeds a state. Call it before @ref XXH32_update(). + * @note This function resets and seeds a state. Call it before @ref + * XXH32_update(). * * @see @ref streaming_example "Streaming Example" */ -XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed); +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t *statePtr, + XXH32_hash_t seed); /*! * @brief Consumes a block of @p input to an @ref XXH32_state_t. * * @param statePtr The state struct to update. - * @param input The block of data to be hashed, at least @p length bytes in size. + * @param input The block of data to be hashed, at least @p length bytes in + * size. * @param length The length of @p input, in bytes. * * @pre @@ -721,7 +766,8 @@ XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t * * @see @ref streaming_example "Streaming Example" */ -XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t *statePtr, + const void *input, size_t length); /*! * @brief Returns the calculated hash value from an @ref XXH32_state_t. @@ -739,8 +785,9 @@ XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* * * @see @ref streaming_example "Streaming Example" */ -XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); -#endif /* !XXH_NO_STREAM */ +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t +XXH32_digest(const XXH32_state_t *statePtr); + #endif /* !XXH_NO_STREAM */ /******* Canonical representation *******/ @@ -748,7 +795,9 @@ XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_digest (const XXH32_state_t* statePt * @brief Canonical (big endian) representation of @ref XXH32_hash_t. */ typedef struct { - unsigned char digest[4]; /*!< Hash bytes, big endian */ + + unsigned char digest[4]; /*!< Hash bytes, big endian */ + } XXH32_canonical_t; /*! @@ -762,7 +811,8 @@ typedef struct { * * @see @ref canonical_representation_example "Canonical Representation Example" */ -XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t *dst, + XXH32_hash_t hash); /*! * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t. @@ -776,105 +826,106 @@ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t * * @see @ref canonical_representation_example "Canonical Representation Example" */ -XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); - - -/*! @cond Doxygen ignores this part */ -#ifdef __has_attribute -# define XXH_HAS_ATTRIBUTE(x) __has_attribute(x) -#else -# define XXH_HAS_ATTRIBUTE(x) 0 -#endif -/*! @endcond */ - -/*! @cond Doxygen ignores this part */ -/* - * C23 __STDC_VERSION__ number hasn't been specified yet. For now - * leave as `201711L` (C17 + 1). - * TODO: Update to correct value when its been specified. - */ -#define XXH_C23_VN 201711L -/*! @endcond */ - -/*! @cond Doxygen ignores this part */ -/* C-language Attributes are added in C23. */ -#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) && defined(__has_c_attribute) -# define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x) -#else -# define XXH_HAS_C_ATTRIBUTE(x) 0 -#endif -/*! @endcond */ - -/*! @cond Doxygen ignores this part */ -#if defined(__cplusplus) && defined(__has_cpp_attribute) -# define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) -#else -# define XXH_HAS_CPP_ATTRIBUTE(x) 0 -#endif -/*! @endcond */ - -/*! @cond Doxygen ignores this part */ -/* - * Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute - * introduced in CPP17 and C23. - * CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough - * C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough - */ -#if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough) -# define XXH_FALLTHROUGH [[fallthrough]] -#elif XXH_HAS_ATTRIBUTE(__fallthrough__) -# define XXH_FALLTHROUGH __attribute__ ((__fallthrough__)) -#else -# define XXH_FALLTHROUGH /* fallthrough */ -#endif -/*! @endcond */ - -/*! @cond Doxygen ignores this part */ -/* - * Define XXH_NOESCAPE for annotated pointers in public API. - * https://clang.llvm.org/docs/AttributeReference.html#noescape - * As of writing this, only supported by clang. - */ -#if XXH_HAS_ATTRIBUTE(noescape) -# define XXH_NOESCAPE __attribute__((noescape)) -#else -# define XXH_NOESCAPE -#endif +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t +XXH32_hashFromCanonical(const XXH32_canonical_t *src); + + /*! @cond Doxygen ignores this part */ + #ifdef __has_attribute + #define XXH_HAS_ATTRIBUTE(x) __has_attribute(x) + #else + #define XXH_HAS_ATTRIBUTE(x) 0 + #endif + /*! @endcond */ + + /*! @cond Doxygen ignores this part */ + /* + * C23 __STDC_VERSION__ number hasn't been specified yet. For now + * leave as `201711L` (C17 + 1). + * TODO: Update to correct value when its been specified. + */ + #define XXH_C23_VN 201711L + /*! @endcond */ + + /*! @cond Doxygen ignores this part */ + /* C-language Attributes are added in C23. */ + #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) && \ + defined(__has_c_attribute) + #define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x) + #else + #define XXH_HAS_C_ATTRIBUTE(x) 0 + #endif + /*! @endcond */ + + /*! @cond Doxygen ignores this part */ + #if defined(__cplusplus) && defined(__has_cpp_attribute) + #define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) + #else + #define XXH_HAS_CPP_ATTRIBUTE(x) 0 + #endif + /*! @endcond */ + + /*! @cond Doxygen ignores this part */ + /* + * Define XXH_FALLTHROUGH macro for annotating switch case with the + * 'fallthrough' attribute introduced in CPP17 and C23. CPP17 : + * https://en.cppreference.com/w/cpp/language/attributes/fallthrough C23 : + * https://en.cppreference.com/w/c/language/attributes/fallthrough + */ + #if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough) + #define XXH_FALLTHROUGH [[fallthrough]] + #elif XXH_HAS_ATTRIBUTE(__fallthrough__) + #define XXH_FALLTHROUGH __attribute__((__fallthrough__)) + #else + #define XXH_FALLTHROUGH /* fallthrough */ + #endif + /*! @endcond */ + + /*! @cond Doxygen ignores this part */ + /* + * Define XXH_NOESCAPE for annotated pointers in public API. + * https://clang.llvm.org/docs/AttributeReference.html#noescape + * As of writing this, only supported by clang. + */ + #if XXH_HAS_ATTRIBUTE(noescape) + #define XXH_NOESCAPE __attribute__((noescape)) + #else + #define XXH_NOESCAPE + #endif /*! @endcond */ - /*! * @} * @ingroup public * @{ + */ -#ifndef XXH_NO_LONG_LONG -/*-********************************************************************** -* 64-bit hash -************************************************************************/ -#if defined(XXH_DOXYGEN) /* don't include */ + #ifndef XXH_NO_LONG_LONG + /*-********************************************************************** + * 64-bit hash + ************************************************************************/ + #if defined(XXH_DOXYGEN) /* don't include */ /*! * @brief An unsigned 64-bit integer. * * Not necessarily defined to `uint64_t` but functionally equivalent. */ typedef uint64_t XXH64_hash_t; -#elif !defined (__VMS) \ - && (defined (__cplusplus) \ - || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - typedef uint64_t XXH64_hash_t; -#else -# include -# if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL - /* LP64 ABI says uint64_t is unsigned long */ - typedef unsigned long XXH64_hash_t; -# else - /* the following type must have a width of 64-bit */ - typedef unsigned long long XXH64_hash_t; -# endif -#endif + #elif !defined(__VMS) && \ + (defined(__cplusplus) || (defined(__STDC_VERSION__) && \ + (__STDC_VERSION__ >= 199901L) /* C99 */)) + #include +typedef uint64_t XXH64_hash_t; + #else + #include + #if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL +/* LP64 ABI says uint64_t is unsigned long */ +typedef unsigned long XXH64_hash_t; + #else +/* the following type must have a width of 64-bit */ +typedef unsigned long long XXH64_hash_t; + #endif + #endif /*! * @} @@ -882,6 +933,7 @@ typedef uint64_t XXH64_hash_t; * @defgroup XXH64_family XXH64 family * @ingroup public * @{ + * Contains functions used in the classic 64-bit xxHash algorithm. * * @note @@ -893,7 +945,8 @@ typedef uint64_t XXH64_hash_t; /*! * @brief Calculates the 64-bit hash of @p input using xxHash64. * - * @param input The block of data to be hashed, at least @p length bytes in size. + * @param input The block of data to be hashed, at least @p length bytes in + * size. * @param length The length of @p input, in bytes. * @param seed The 64-bit seed to alter the hash's output predictably. * @@ -906,17 +959,18 @@ typedef uint64_t XXH64_hash_t; * * @see @ref single_shot_example "Single Shot Example" for an example. */ -XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed); +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(XXH_NOESCAPE const void *input, + size_t length, XXH64_hash_t seed); -/******* Streaming *******/ -#ifndef XXH_NO_STREAM + /******* Streaming *******/ + #ifndef XXH_NO_STREAM /*! * @brief The opaque state struct for the XXH64 streaming API. * * @see XXH64_state_s for details. * @see @ref streaming_example "Streaming Example" */ -typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ +typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ /*! * @brief Allocates an @ref XXH64_state_t. @@ -928,12 +982,13 @@ typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ * * @see @ref streaming_example "Streaming Example" */ -XXH_PUBLIC_API XXH_MALLOCF XXH64_state_t* XXH64_createState(void); +XXH_PUBLIC_API XXH_MALLOCF XXH64_state_t *XXH64_createState(void); /*! * @brief Frees an @ref XXH64_state_t. * - * @param statePtr A pointer to an @ref XXH64_state_t allocated with @ref XXH64_createState(). + * @param statePtr A pointer to an @ref XXH64_state_t allocated with @ref + * XXH64_createState(). * * @return @ref XXH_OK. * @@ -941,7 +996,7 @@ XXH_PUBLIC_API XXH_MALLOCF XXH64_state_t* XXH64_createState(void); * * @see @ref streaming_example "Streaming Example" */ -XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t *statePtr); /*! * @brief Copies one @ref XXH64_state_t to another. @@ -951,7 +1006,8 @@ XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); * @pre * @p dst_state and @p src_state must not be `NULL` and must not overlap. */ -XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dst_state, const XXH64_state_t* src_state); +XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t *dst_state, + const XXH64_state_t *src_state); /*! * @brief Resets an @ref XXH64_state_t to begin a new hash. @@ -965,17 +1021,20 @@ XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dst_state, const * @return @ref XXH_OK on success. * @return @ref XXH_ERROR on failure. * - * @note This function resets and seeds a state. Call it before @ref XXH64_update(). + * @note This function resets and seeds a state. Call it before @ref + * XXH64_update(). * * @see @ref streaming_example "Streaming Example" */ -XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed); +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH_NOESCAPE XXH64_state_t *statePtr, + XXH64_hash_t seed); /*! * @brief Consumes a block of @p input to an @ref XXH64_state_t. * * @param statePtr The state struct to update. - * @param input The block of data to be hashed, at least @p length bytes in size. + * @param input The block of data to be hashed, at least @p length bytes in + * size. * @param length The length of @p input, in bytes. * * @pre @@ -992,7 +1051,9 @@ XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH_NOESCAPE XXH64_state_t* statePtr, * * @see @ref streaming_example "Streaming Example" */ -XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH_NOESCAPE XXH64_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); +XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH_NOESCAPE XXH64_state_t *statePtr, + XXH_NOESCAPE const void *input, + size_t length); /*! * @brief Returns the calculated hash value from an @ref XXH64_state_t. @@ -1010,14 +1071,19 @@ XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH_NOESCAPE XXH64_state_t* statePtr, * * @see @ref streaming_example "Streaming Example" */ -XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_digest (XXH_NOESCAPE const XXH64_state_t* statePtr); -#endif /* !XXH_NO_STREAM */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t +XXH64_digest(XXH_NOESCAPE const XXH64_state_t *statePtr); + #endif /* !XXH_NO_STREAM */ /******* Canonical representation *******/ /*! * @brief Canonical (big endian) representation of @ref XXH64_hash_t. */ -typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t; +typedef struct { + + unsigned char digest[sizeof(XXH64_hash_t)]; + +} XXH64_canonical_t; /*! * @brief Converts an @ref XXH64_hash_t to a big endian @ref XXH64_canonical_t. @@ -1030,7 +1096,8 @@ typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t * * @see @ref canonical_representation_example "Canonical Representation Example" */ -XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash); +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t *dst, + XXH64_hash_t hash); /*! * @brief Converts an @ref XXH64_canonical_t to a native @ref XXH64_hash_t. @@ -1044,9 +1111,10 @@ XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, * * @see @ref canonical_representation_example "Canonical Representation Example" */ -XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src); +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t +XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t *src); -#ifndef XXH_NO_XXH3 + #ifndef XXH_NO_XXH3 /*! * @} @@ -1054,6 +1122,7 @@ XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const * @defgroup XXH3_family XXH3 family * @ingroup public * @{ + * * XXH3 is a more recent hash algorithm featuring: * - Improved speed for both small and large inputs @@ -1085,8 +1154,9 @@ XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const * - POWER8 VSX * - s390x ZVector * This can be controlled via the @ref XXH_VECTOR macro, but it automatically - * selects the best version according to predefined macros. For the x86 family, an - * automatic runtime dispatcher is included separately in @ref xxh_x86dispatch.c. + * selects the best version according to predefined macros. For the x86 family, + * an automatic runtime dispatcher is included separately in @ref + * xxh_x86dispatch.c. * * XXH3 implementation is portable: * it has a generic C90 formulation that can be compiled on any platform, @@ -1103,13 +1173,14 @@ XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const * The API supports one-shot hashing, streaming mode, and custom secrets. */ /*-********************************************************************** -* XXH3 64-bit variant -************************************************************************/ + * XXH3 64-bit variant + ************************************************************************/ /*! * @brief Calculates 64-bit unseeded variant of XXH3 hash of @p input. * - * @param input The block of data to be hashed, at least @p length bytes in size. + * @param input The block of data to be hashed, at least @p length bytes in + * size. * @param length The length of @p input, in bytes. * * @pre @@ -1120,20 +1191,22 @@ XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const * @return The calculated 64-bit XXH3 hash value. * * @note - * This is equivalent to @ref XXH3_64bits_withSeed() with a seed of `0`, however - * it may have slightly better performance due to constant propagation of the - * defaults. + * This is equivalent to @ref XXH3_64bits_withSeed() with a seed of `0`, + * however it may have slightly better performance due to constant propagation + * of the defaults. * * @see * XXH3_64bits_withSeed(), XXH3_64bits_withSecret(): other seeding variants * @see @ref single_shot_example "Single Shot Example" for an example. */ -XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length); +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t +XXH3_64bits(XXH_NOESCAPE const void *input, size_t length); /*! * @brief Calculates 64-bit seeded variant of XXH3 hash of @p input. * - * @param input The block of data to be hashed, at least @p length bytes in size. + * @param input The block of data to be hashed, at least @p length bytes in + * size. * @param length The length of @p input, in bytes. * @param seed The 64-bit seed to alter the hash result predictably. * @@ -1154,21 +1227,23 @@ XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input * * @see @ref single_shot_example "Single Shot Example" for an example. */ -XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed); +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed( + XXH_NOESCAPE const void *input, size_t length, XXH64_hash_t seed); -/*! - * The bare minimum size for a custom secret. - * - * @see - * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(), - * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret(). - */ -#define XXH3_SECRET_SIZE_MIN 136 + /*! + * The bare minimum size for a custom secret. + * + * @see + * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(), + * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret(). + */ + #define XXH3_SECRET_SIZE_MIN 136 /*! * @brief Calculates 64-bit variant of XXH3 with a custom "secret". * - * @param data The block of data to be hashed, at least @p len bytes in size. + * @param data The block of data to be hashed, at least @p len bytes in + * size. * @param len The length of @p data, in bytes. * @param secret The secret data. * @param secretSize The length of @p secret, in bytes. @@ -1180,28 +1255,29 @@ XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed(XXH_NOESCAPE const vo * readable, contiguous memory. However, if @p length is `0`, @p data may be * `NULL`. In C++, this also must be *TriviallyCopyable*. * - * It's possible to provide any blob of bytes as a "secret" to generate the hash. - * This makes it more difficult for an external actor to prepare an intentional collision. - * The main condition is that @p secretSize *must* be large enough (>= @ref XXH3_SECRET_SIZE_MIN). - * However, the quality of the secret impacts the dispersion of the hash algorithm. - * Therefore, the secret _must_ look like a bunch of random bytes. - * Avoid "trivial" or structured data such as repeated sequences or a text document. - * Whenever in doubt about the "randomness" of the blob of bytes, - * consider employing @ref XXH3_generateSecret() instead (see below). - * It will generate a proper high entropy secret derived from the blob of bytes. - * Another advantage of using XXH3_generateSecret() is that - * it guarantees that all bits within the initial blob of bytes - * will impact every bit of the output. - * This is not necessarily the case when using the blob of bytes directly - * because, when hashing _small_ inputs, only a portion of the secret is employed. + * It's possible to provide any blob of bytes as a "secret" to generate the + * hash. This makes it more difficult for an external actor to prepare an + * intentional collision. The main condition is that @p secretSize *must* be + * large enough (>= @ref XXH3_SECRET_SIZE_MIN). However, the quality of the + * secret impacts the dispersion of the hash algorithm. Therefore, the secret + * _must_ look like a bunch of random bytes. Avoid "trivial" or structured data + * such as repeated sequences or a text document. Whenever in doubt about the + * "randomness" of the blob of bytes, consider employing @ref + * XXH3_generateSecret() instead (see below). It will generate a proper high + * entropy secret derived from the blob of bytes. Another advantage of using + * XXH3_generateSecret() is that it guarantees that all bits within the initial + * blob of bytes will impact every bit of the output. This is not necessarily + * the case when using the blob of bytes directly because, when hashing _small_ + * inputs, only a portion of the secret is employed. * * @see @ref single_shot_example "Single Shot Example" for an example. */ -XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize); - +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t +XXH3_64bits_withSecret(XXH_NOESCAPE const void *data, size_t len, + XXH_NOESCAPE const void *secret, size_t secretSize); -/******* Streaming *******/ -#ifndef XXH_NO_STREAM + /******* Streaming *******/ + #ifndef XXH_NO_STREAM /* * Streaming requires state maintenance. * This operation costs memory and CPU. @@ -1215,9 +1291,9 @@ XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecret(XXH_NOESCAPE const * @see XXH3_state_s for details. * @see @ref streaming_example "Streaming Example" */ -typedef struct XXH3_state_s XXH3_state_t; -XXH_PUBLIC_API XXH_MALLOCF XXH3_state_t* XXH3_createState(void); -XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr); +typedef struct XXH3_state_s XXH3_state_t; +XXH_PUBLIC_API XXH_MALLOCF XXH3_state_t *XXH3_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t *statePtr); /*! * @brief Copies one @ref XXH3_state_t to another. @@ -1227,7 +1303,8 @@ XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr); * @pre * @p dst_state and @p src_state must not be `NULL` and must not overlap. */ -XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state); +XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t *dst_state, + XXH_NOESCAPE const XXH3_state_t *src_state); /*! * @brief Resets an @ref XXH3_state_t to begin a new hash. @@ -1241,14 +1318,16 @@ XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOE * @return @ref XXH_ERROR on failure. * * @note - * - This function resets `statePtr` and generate a secret with default parameters. + * - This function resets `statePtr` and generate a secret with default + * parameters. * - Call this function before @ref XXH3_64bits_update(). * - Digest will be equivalent to `XXH3_64bits()`. * * @see @ref streaming_example "Streaming Example" * */ -XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr); +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t *statePtr); /*! * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash. @@ -1270,7 +1349,8 @@ XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* stateP * @see @ref streaming_example "Streaming Example" * */ -XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed); +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed( + XXH_NOESCAPE XXH3_state_t *statePtr, XXH64_hash_t seed); /*! * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash. @@ -1296,13 +1376,16 @@ XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_ * * @see @ref streaming_example "Streaming Example" */ -XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize); +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret( + XXH_NOESCAPE XXH3_state_t *statePtr, XXH_NOESCAPE const void *secret, + size_t secretSize); /*! * @brief Consumes a block of @p input to an @ref XXH3_state_t. * * @param statePtr The state struct to update. - * @param input The block of data to be hashed, at least @p length bytes in size. + * @param input The block of data to be hashed, at least @p length bytes in + * size. * @param length The length of @p input, in bytes. * * @pre @@ -1319,10 +1402,13 @@ XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_stat * * @see @ref streaming_example "Streaming Example" */ -XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t *statePtr, + XXH_NOESCAPE const void *input, size_t length); /*! - * @brief Returns the calculated XXH3 64-bit hash value from an @ref XXH3_state_t. + * @brief Returns the calculated XXH3 64-bit hash value from an @ref + * XXH3_state_t. * * @param statePtr The state struct to calculate the hash from. * @@ -1332,21 +1418,21 @@ XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH_NOESCAPE XXH3_state_t* stat * @return The calculated XXH3 64-bit hash value from that state. * * @note - * Calling XXH3_64bits_digest() will not affect @p statePtr, so you can update, - * digest, and update again. + * Calling XXH3_64bits_digest() will not affect @p statePtr, so you can + * update, digest, and update again. * * @see @ref streaming_example "Streaming Example" */ -XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr); -#endif /* !XXH_NO_STREAM */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t +XXH3_64bits_digest(XXH_NOESCAPE const XXH3_state_t *statePtr); + #endif /* !XXH_NO_STREAM */ /* note : canonical representation of XXH3 is the same as XXH64 * since they both produce XXH64_hash_t values */ - /*-********************************************************************** -* XXH3 128-bit variant -************************************************************************/ + * XXH3 128-bit variant + ************************************************************************/ /*! * @brief The return value from 128-bit hashes. @@ -1355,8 +1441,10 @@ XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XX * endianness. */ typedef struct { - XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */ - XXH64_hash_t high64; /*!< `value >> 64` */ + + XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */ + XXH64_hash_t high64; /*!< `value >> 64` */ + } XXH128_hash_t; /*! @@ -1370,14 +1458,16 @@ typedef struct { * The 128-bit variant of XXH3 has more strength, but it has a bit of overhead * for shorter inputs. * - * This is equivalent to @ref XXH3_128bits_withSeed() with a seed of `0`, however - * it may have slightly better performance due to constant propagation of the - * defaults. + * This is equivalent to @ref XXH3_128bits_withSeed() with a seed of `0`, + * however it may have slightly better performance due to constant propagation + * of the defaults. * - * @see XXH3_128bits_withSeed(), XXH3_128bits_withSecret(): other seeding variants + * @see XXH3_128bits_withSeed(), XXH3_128bits_withSecret(): other seeding + * variants * @see @ref single_shot_example "Single Shot Example" for an example. */ -XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* data, size_t len); +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t +XXH3_128bits(XXH_NOESCAPE const void *data, size_t len); /*! @brief Calculates 128-bit seeded variant of XXH3 hash of @p data. * * @param data The block of data to be hashed, at least @p length bytes in size. @@ -1397,38 +1487,42 @@ XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* dat * @see XXH3_128bits(), XXH3_128bits_withSecret(): other seeding variants * @see @ref single_shot_example "Single Shot Example" for an example. */ -XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSeed(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed); +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSeed( + XXH_NOESCAPE const void *data, size_t len, XXH64_hash_t seed); /*! * @brief Calculates 128-bit variant of XXH3 with a custom "secret". * - * @param data The block of data to be hashed, at least @p len bytes in size. + * @param data The block of data to be hashed, at least @p len bytes in + * size. * @param len The length of @p data, in bytes. * @param secret The secret data. * @param secretSize The length of @p secret, in bytes. * * @return The calculated 128-bit variant of XXH3 value. * - * It's possible to provide any blob of bytes as a "secret" to generate the hash. - * This makes it more difficult for an external actor to prepare an intentional collision. - * The main condition is that @p secretSize *must* be large enough (>= @ref XXH3_SECRET_SIZE_MIN). - * However, the quality of the secret impacts the dispersion of the hash algorithm. - * Therefore, the secret _must_ look like a bunch of random bytes. - * Avoid "trivial" or structured data such as repeated sequences or a text document. - * Whenever in doubt about the "randomness" of the blob of bytes, - * consider employing @ref XXH3_generateSecret() instead (see below). - * It will generate a proper high entropy secret derived from the blob of bytes. - * Another advantage of using XXH3_generateSecret() is that - * it guarantees that all bits within the initial blob of bytes - * will impact every bit of the output. - * This is not necessarily the case when using the blob of bytes directly - * because, when hashing _small_ inputs, only a portion of the secret is employed. + * It's possible to provide any blob of bytes as a "secret" to generate the + * hash. This makes it more difficult for an external actor to prepare an + * intentional collision. The main condition is that @p secretSize *must* be + * large enough (>= @ref XXH3_SECRET_SIZE_MIN). However, the quality of the + * secret impacts the dispersion of the hash algorithm. Therefore, the secret + * _must_ look like a bunch of random bytes. Avoid "trivial" or structured data + * such as repeated sequences or a text document. Whenever in doubt about the + * "randomness" of the blob of bytes, consider employing @ref + * XXH3_generateSecret() instead (see below). It will generate a proper high + * entropy secret derived from the blob of bytes. Another advantage of using + * XXH3_generateSecret() is that it guarantees that all bits within the initial + * blob of bytes will impact every bit of the output. This is not necessarily + * the case when using the blob of bytes directly because, when hashing _small_ + * inputs, only a portion of the secret is employed. * * @see @ref single_shot_example "Single Shot Example" for an example. */ -XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize); +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t +XXH3_128bits_withSecret(XXH_NOESCAPE const void *data, size_t len, + XXH_NOESCAPE const void *secret, size_t secretSize); -/******* Streaming *******/ -#ifndef XXH_NO_STREAM + /******* Streaming *******/ + #ifndef XXH_NO_STREAM /* * Streaming requires state maintenance. * This operation costs memory and CPU. @@ -1438,7 +1532,8 @@ XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecret(XXH_NOESCAPE cons * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits(). * Use already declared XXH3_createState() and XXH3_freeState(). * - * All reset and streaming functions have same meaning as their 64-bit counterpart. + * All reset and streaming functions have same meaning as their 64-bit + * counterpart. */ /*! @@ -1453,13 +1548,15 @@ XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecret(XXH_NOESCAPE cons * @return @ref XXH_ERROR on failure. * * @note - * - This function resets `statePtr` and generate a secret with default parameters. + * - This function resets `statePtr` and generate a secret with default + * parameters. * - Call it before @ref XXH3_128bits_update(). * - Digest will be equivalent to `XXH3_128bits()`. * * @see @ref streaming_example "Streaming Example" */ -XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr); +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t *statePtr); /*! * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash. @@ -1480,7 +1577,8 @@ XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* state * * @see @ref streaming_example "Streaming Example" */ -XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed); +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed( + XXH_NOESCAPE XXH3_state_t *statePtr, XXH64_hash_t seed); /*! * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash. * @@ -1503,7 +1601,9 @@ XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state * * @see @ref streaming_example "Streaming Example" */ -XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize); +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret( + XXH_NOESCAPE XXH3_state_t *statePtr, XXH_NOESCAPE const void *secret, + size_t secretSize); /*! * @brief Consumes a block of @p input to an @ref XXH3_state_t. @@ -1511,7 +1611,8 @@ XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_sta * Call this to incrementally consume blocks of data. * * @param statePtr The state struct to update. - * @param input The block of data to be hashed, at least @p length bytes in size. + * @param input The block of data to be hashed, at least @p length bytes in + * size. * @param length The length of @p input, in bytes. * * @pre @@ -1526,10 +1627,13 @@ XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_sta * `NULL`. In C++, this also must be *TriviallyCopyable*. * */ -XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length); +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t *statePtr, + XXH_NOESCAPE const void *input, size_t length); /*! - * @brief Returns the calculated XXH3 128-bit hash value from an @ref XXH3_state_t. + * @brief Returns the calculated XXH3 128-bit hash value from an @ref + * XXH3_state_t. * * @param statePtr The state struct to calculate the hash from. * @@ -1539,16 +1643,18 @@ XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH_NOESCAPE XXH3_state_t* sta * @return The calculated XXH3 128-bit hash value from that state. * * @note - * Calling XXH3_128bits_digest() will not affect @p statePtr, so you can update, - * digest, and update again. + * Calling XXH3_128bits_digest() will not affect @p statePtr, so you can + * update, digest, and update again. * */ -XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr); -#endif /* !XXH_NO_STREAM */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t +XXH3_128bits_digest(XXH_NOESCAPE const XXH3_state_t *statePtr); + #endif /* !XXH_NO_STREAM */ /* Following helper functions make it possible to compare XXH128_hast_t values. - * Since XXH128_hash_t is a structure, this capability is not offered by the language. - * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */ + * Since XXH128_hash_t is a structure, this capability is not offered by the + * language. Note: For better performance, these functions can be inlined using + * XXH_INLINE_ALL */ /*! * @brief Check equality of two XXH128_hash_t values @@ -1573,15 +1679,19 @@ XXH_PUBLIC_API XXH_PUREF int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2); * @return =0 if @p h128_1 == @p h128_2 * @return <0 if @p h128_1 < @p h128_2 */ -XXH_PUBLIC_API XXH_PUREF int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2); - +XXH_PUBLIC_API XXH_PUREF int XXH128_cmp(XXH_NOESCAPE const void *h128_1, + XXH_NOESCAPE const void *h128_2); /******* Canonical representation *******/ -typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t; +typedef struct { + unsigned char digest[sizeof(XXH128_hash_t)]; + +} XXH128_canonical_t; /*! - * @brief Converts an @ref XXH128_hash_t to a big endian @ref XXH128_canonical_t. + * @brief Converts an @ref XXH128_hash_t to a big endian @ref + * XXH128_canonical_t. * * @param dst The @ref XXH128_canonical_t pointer to be stored to. * @param hash The @ref XXH128_hash_t to be converted. @@ -1590,7 +1700,8 @@ typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical * @p dst must not be `NULL`. * @see @ref canonical_representation_example "Canonical Representation Example" */ -XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash); +XXH_PUBLIC_API void XXH128_canonicalFromHash( + XXH_NOESCAPE XXH128_canonical_t *dst, XXH128_hash_t hash); /*! * @brief Converts an @ref XXH128_canonical_t to a native @ref XXH128_hash_t. @@ -1603,28 +1714,27 @@ XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* ds * @return The converted hash. * @see @ref canonical_representation_example "Canonical Representation Example" */ -XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src); - +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t +XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t *src); -#endif /* !XXH_NO_XXH3 */ -#endif /* XXH_NO_LONG_LONG */ + #endif /* !XXH_NO_XXH3 */ + #endif /* XXH_NO_LONG_LONG */ /*! * @} */ -#endif /* XXHASH_H_5627135585666179 */ - - +#endif /* XXHASH_H_5627135585666179 */ #if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) -#define XXHASH_H_STATIC_13879238742 + #define XXHASH_H_STATIC_13879238742 /* **************************************************************************** * This section contains declarations which are not guaranteed to remain stable. * They may change in future versions, becoming incompatible with a different * version of the library. * These declarations should only be used with static linking. * Never use them in association with dynamic linking! - ***************************************************************************** */ + ***************************************************************************** +*/ /* * These definitions are only present to allow static allocation @@ -1645,16 +1755,19 @@ XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128_hashFromCanonical(XXH_NOESCAPE con * @see XXH64_state_s, XXH3_state_s */ struct XXH32_state_s { - XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */ - XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */ - XXH32_hash_t v[4]; /*!< Accumulator lanes */ - XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */ - XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */ - XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */ -}; /* typedef'd to XXH32_state_t */ + XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */ + XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref + total_len_32 overflow) */ + XXH32_hash_t v[4]; /*!< Accumulator lanes */ + XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as + unsigned char[16]. */ + XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */ + XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */ + +}; /* typedef'd to XXH32_state_t */ -#ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */ + #ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */ /*! * @internal @@ -1669,57 +1782,64 @@ struct XXH32_state_s { * @see XXH32_state_s, XXH3_state_s */ struct XXH64_state_s { - XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */ - XXH64_hash_t v[4]; /*!< Accumulator lanes */ - XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */ - XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */ - XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/ - XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */ -}; /* typedef'd to XXH64_state_t */ - -#ifndef XXH_NO_XXH3 - -#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */ -# include -# define XXH_ALIGN(n) alignas(n) -#elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */ -/* In C++ alignas() is a keyword */ -# define XXH_ALIGN(n) alignas(n) -#elif defined(__GNUC__) -# define XXH_ALIGN(n) __attribute__ ((aligned(n))) -#elif defined(_MSC_VER) -# define XXH_ALIGN(n) __declspec(align(n)) -#else -# define XXH_ALIGN(n) /* disabled */ -#endif - -/* Old GCC versions only accept the attribute after the type in structures. */ -#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \ - && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \ - && defined(__GNUC__) -# define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align) -#else -# define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type -#endif - -/*! - * @brief The size of the internal XXH3 buffer. - * - * This is the optimal update size for incremental hashing. - * - * @see XXH3_64b_update(), XXH3_128b_update(). - */ -#define XXH3_INTERNALBUFFER_SIZE 256 -/*! - * @internal - * @brief Default size of the secret buffer (and @ref XXH3_kSecret). - * - * This is the size used in @ref XXH3_kSecret and the seeded functions. - * - * Not to be confused with @ref XXH3_SECRET_SIZE_MIN. - */ -#define XXH3_SECRET_DEFAULT_SIZE 192 + XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */ + XXH64_hash_t v[4]; /*!< Accumulator lanes */ + XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as + unsigned char[32]. */ + XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */ + XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/ + XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */ + +}; /* typedef'd to XXH64_state_t */ + + #ifndef XXH_NO_XXH3 + + #if defined(__STDC_VERSION__) && \ + (__STDC_VERSION__ >= 201112L) /* >= C11 */ + #include + #define XXH_ALIGN(n) alignas(n) + #elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */ + /* In C++ alignas() is a keyword */ + #define XXH_ALIGN(n) alignas(n) + #elif defined(__GNUC__) + #define XXH_ALIGN(n) __attribute__((aligned(n))) + #elif defined(_MSC_VER) + #define XXH_ALIGN(n) __declspec(align(n)) + #else + #define XXH_ALIGN(n) /* disabled */ + #endif + + /* Old GCC versions only accept the attribute after the type in + * structures. */ + #if !(defined(__STDC_VERSION__) && \ + (__STDC_VERSION__ >= 201112L)) /* C11+ */ \ + && \ + !(defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \ + && defined(__GNUC__) + #define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align) + #else + #define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type + #endif + + /*! + * @brief The size of the internal XXH3 buffer. + * + * This is the optimal update size for incremental hashing. + * + * @see XXH3_64b_update(), XXH3_128b_update(). + */ + #define XXH3_INTERNALBUFFER_SIZE 256 + + /*! + * @internal + * @brief Default size of the secret buffer (and @ref XXH3_kSecret). + * + * This is the size used in @ref XXH3_kSecret and the seeded functions. + * + * Not to be confused with @ref XXH3_SECRET_SIZE_MIN. + */ + #define XXH3_SECRET_DEFAULT_SIZE 192 /*! * @internal @@ -1744,54 +1864,60 @@ struct XXH64_state_s { * @see XXH32_state_s, XXH64_state_s */ struct XXH3_state_s { - XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]); - /*!< The 8 accumulators. See @ref XXH32_state_s::v and @ref XXH64_state_s::v */ - XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]); - /*!< Used to store a custom secret generated from a seed. */ - XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]); - /*!< The internal buffer. @see XXH32_state_s::mem32 */ - XXH32_hash_t bufferedSize; - /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */ - XXH32_hash_t useSeed; - /*!< Reserved field. Needed for padding on 64-bit. */ - size_t nbStripesSoFar; - /*!< Number or stripes processed. */ - XXH64_hash_t totalLen; - /*!< Total length hashed. 64-bit even on 32-bit targets. */ - size_t nbStripesPerBlock; - /*!< Number of stripes per block. */ - size_t secretLimit; - /*!< Size of @ref customSecret or @ref extSecret */ - XXH64_hash_t seed; - /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */ - XXH64_hash_t reserved64; - /*!< Reserved field. */ - const unsigned char* extSecret; - /*!< Reference to an external secret for the _withSecret variants, NULL - * for other variants. */ - /* note: there may be some padding at the end due to alignment on 64 bytes */ -}; /* typedef'd to XXH3_state_t */ - -#undef XXH_ALIGN_MEMBER - -/*! - * @brief Initializes a stack-allocated `XXH3_state_s`. - * - * When the @ref XXH3_state_t structure is merely emplaced on stack, - * it should be initialized with XXH3_INITSTATE() or a memset() - * in case its first reset uses XXH3_NNbits_reset_withSeed(). - * This init can be omitted if the first reset uses default or _withSecret mode. - * This operation isn't necessary when the state is created with XXH3_createState(). - * Note that this doesn't prepare the state for a streaming operation, - * it's still necessary to use XXH3_NNbits_reset*() afterwards. - */ -#define XXH3_INITSTATE(XXH3_state_ptr) \ - do { \ - XXH3_state_t* tmp_xxh3_state_ptr = (XXH3_state_ptr); \ - tmp_xxh3_state_ptr->seed = 0; \ - tmp_xxh3_state_ptr->extSecret = NULL; \ - } while(0) + XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]); + /*!< The 8 accumulators. See @ref XXH32_state_s::v and @ref XXH64_state_s::v + */ + XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]); + /*!< Used to store a custom secret generated from a seed. */ + XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]); + /*!< The internal buffer. @see XXH32_state_s::mem32 */ + XXH32_hash_t bufferedSize; + /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */ + XXH32_hash_t useSeed; + /*!< Reserved field. Needed for padding on 64-bit. */ + size_t nbStripesSoFar; + /*!< Number or stripes processed. */ + XXH64_hash_t totalLen; + /*!< Total length hashed. 64-bit even on 32-bit targets. */ + size_t nbStripesPerBlock; + /*!< Number of stripes per block. */ + size_t secretLimit; + /*!< Size of @ref customSecret or @ref extSecret */ + XXH64_hash_t seed; + /*!< Seed for _withSeed variants. Must be zero otherwise, @see + * XXH3_INITSTATE() */ + XXH64_hash_t reserved64; + /*!< Reserved field. */ + const unsigned char *extSecret; + /*!< Reference to an external secret for the _withSecret variants, NULL + * for other variants. */ + /* note: there may be some padding at the end due to alignment on 64 bytes */ + +}; /* typedef'd to XXH3_state_t */ + + #undef XXH_ALIGN_MEMBER + + /*! + * @brief Initializes a stack-allocated `XXH3_state_s`. + * + * When the @ref XXH3_state_t structure is merely emplaced on stack, + * it should be initialized with XXH3_INITSTATE() or a memset() + * in case its first reset uses XXH3_NNbits_reset_withSeed(). + * This init can be omitted if the first reset uses default or _withSecret + * mode. This operation isn't necessary when the state is created with + * XXH3_createState(). Note that this doesn't prepare the state for a + * streaming operation, it's still necessary to use XXH3_NNbits_reset*() + * afterwards. + */ + #define XXH3_INITSTATE(XXH3_state_ptr) \ + do { \ + \ + XXH3_state_t *tmp_xxh3_state_ptr = (XXH3_state_ptr); \ + tmp_xxh3_state_ptr->seed = 0; \ + tmp_xxh3_state_ptr->extSecret = NULL; \ + \ + } while (0) /*! * @brief Calculates the 128-bit hash of @p data using XXH3. @@ -1809,27 +1935,31 @@ struct XXH3_state_s { * * @see @ref single_shot_example "Single Shot Example" for an example. */ -XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed); - +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void *data, + size_t len, XXH64_hash_t seed); /* === Experimental API === */ -/* Symbols defined below must be considered tied to a specific library version. */ +/* Symbols defined below must be considered tied to a specific library version. + */ /*! - * @brief Derive a high-entropy secret from any user-defined content, named customSeed. + * @brief Derive a high-entropy secret from any user-defined content, named + * customSeed. * - * @param secretBuffer A writable buffer for derived high-entropy secret data. - * @param secretSize Size of secretBuffer, in bytes. Must be >= XXH3_SECRET_DEFAULT_SIZE. + * @param secretBuffer A writable buffer for derived high-entropy secret + * data. + * @param secretSize Size of secretBuffer, in bytes. Must be >= + * XXH3_SECRET_DEFAULT_SIZE. * @param customSeed A user-defined content. * @param customSeedSize Size of customSeed, in bytes. * * @return @ref XXH_OK on success. * @return @ref XXH_ERROR on failure. * - * The generated secret can be used in combination with `*_withSecret()` functions. - * The `_withSecret()` variants are useful to provide a higher level of protection - * than 64-bit seed, as it becomes much more difficult for an external actor to - * guess how to impact the calculation logic. + * The generated secret can be used in combination with `*_withSecret()` + * functions. The `_withSecret()` variants are useful to provide a higher level + * of protection than 64-bit seed, as it becomes much more difficult for an + * external actor to guess how to impact the calculation logic. * * The function accepts as input a custom seed of any length and any content, * and derives from it a high-entropy secret of length @p secretSize into an @@ -1839,18 +1969,20 @@ XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, siz * The functions @ref XXH3_128bits_withSecret(), @ref XXH3_64bits_withSecret(), * @ref XXH3_128bits_reset_withSecret() and @ref XXH3_64bits_reset_withSecret() * are part of this list. They all accept a `secret` parameter - * which must be large enough for implementation reasons (>= @ref XXH3_SECRET_SIZE_MIN) - * _and_ feature very high entropy (consist of random-looking bytes). - * These conditions can be a high bar to meet, so @ref XXH3_generateSecret() can - * be employed to ensure proper quality. + * which must be large enough for implementation reasons (>= @ref + * XXH3_SECRET_SIZE_MIN) _and_ feature very high entropy (consist of + * random-looking bytes). These conditions can be a high bar to meet, so @ref + * XXH3_generateSecret() can be employed to ensure proper quality. * * @p customSeed can be anything. It can have any size, even small ones, * and its content can be anything, even "poor entropy" sources such as a bunch - * of zeroes. The resulting `secret` will nonetheless provide all required qualities. + * of zeroes. The resulting `secret` will nonetheless provide all required + * qualities. * * @pre * - @p secretSize must be >= @ref XXH3_SECRET_SIZE_MIN - * - When @p customSeedSize > 0, supplying NULL as customSeed is undefined behavior. + * - When @p customSeedSize > 0, supplying NULL as customSeed is undefined + * behavior. * * Example code: * @code{.c} @@ -1862,6 +1994,7 @@ XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, siz * // Hashes argv[2] using the entropy from argv[1]. * int main(int argc, char* argv[]) * { + * char secret[XXH3_SECRET_SIZE_MIN]; * if (argv != 3) { return 1; } * XXH3_generateSecret(secret, sizeof(secret), argv[1], strlen(argv[1])); @@ -1873,7 +2006,9 @@ XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, siz * } * @endcode */ -XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize); +XXH_PUBLIC_API XXH_errorcode +XXH3_generateSecret(XXH_NOESCAPE void *secretBuffer, size_t secretSize, + XXH_NOESCAPE const void *customSeed, size_t customSeedSize); /*! * @brief Generate the same secret as the _withSeed() variants. @@ -1891,34 +2026,43 @@ XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer * #include "xxhash.h" * // Slow, seeds each time * class HashSlow { + * XXH64_hash_t seed; * public: * HashSlow(XXH64_hash_t s) : seed{s} {} * size_t operator()(const std::string& x) const { + * return size_t{XXH3_64bits_withSeed(x.c_str(), x.length(), seed)}; * } * }; * // Fast, caches the seeded secret for future uses. * class HashFast { + * unsigned char secret[XXH3_SECRET_SIZE_MIN]; * public: * HashFast(XXH64_hash_t s) { + * XXH3_generateSecret_fromSeed(secret, seed); * } * size_t operator()(const std::string& x) const { + * return size_t{ - * XXH3_64bits_withSecret(x.c_str(), x.length(), secret, sizeof(secret)) + + * XXH3_64bits_withSecret(x.c_str(), x.length(), secret, + *sizeof(secret)) * }; * } * }; * @endcode */ -XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed); +XXH_PUBLIC_API void XXH3_generateSecret_fromSeed( + XXH_NOESCAPE void *secretBuffer, XXH64_hash_t seed); /*! * @brief Calculates 64/128-bit seeded variant of XXH3 hash of @p data. * - * @param data The block of data to be hashed, at least @p len bytes in size. + * @param data The block of data to be hashed, at least @p len bytes in + * size. * @param len The length of @p data, in bytes. * @param secret The secret data. * @param secretSize The length of @p secret, in bytes. @@ -1946,17 +2090,17 @@ XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer * On top of speed, an added benefit is that each bit in the secret * has a 50% chance to swap each bit in the output, via its impact to the seed. * - * This is not guaranteed when using the secret directly in "small data" scenarios, - * because only portions of the secret are employed for small data. + * This is not guaranteed when using the secret directly in "small data" + * scenarios, because only portions of the secret are employed for small data. */ -XXH_PUBLIC_API XXH_PUREF XXH64_hash_t -XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* data, size_t len, - XXH_NOESCAPE const void* secret, size_t secretSize, - XXH64_hash_t seed); +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecretandSeed( + XXH_NOESCAPE const void *data, size_t len, XXH_NOESCAPE const void *secret, + size_t secretSize, XXH64_hash_t seed); /*! * @brief Calculates 128-bit seeded variant of XXH3 hash of @p data. * - * @param input The block of data to be hashed, at least @p len bytes in size. + * @param input The block of data to be hashed, at least @p len bytes in + * size. * @param length The length of @p data, in bytes. * @param secret The secret data. * @param secretSize The length of @p secret, in bytes. @@ -1967,15 +2111,15 @@ XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* data, size_t len, * * @see XXH3_64bits_withSecretandSeed() */ -XXH_PUBLIC_API XXH_PUREF XXH128_hash_t -XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, - XXH_NOESCAPE const void* secret, size_t secretSize, - XXH64_hash_t seed64); -#ifndef XXH_NO_STREAM +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecretandSeed( + XXH_NOESCAPE const void *input, size_t length, + XXH_NOESCAPE const void *secret, size_t secretSize, XXH64_hash_t seed64); + #ifndef XXH_NO_STREAM /*! * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash. * - * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState(). + * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref + * XXH3_createState(). * @param secret The secret data. * @param secretSize The length of @p secret, in bytes. * @param seed64 The 64-bit seed to alter the hash result predictably. @@ -1985,14 +2129,14 @@ XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, * * @see XXH3_64bits_withSecretandSeed() */ -XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, - XXH_NOESCAPE const void* secret, size_t secretSize, - XXH64_hash_t seed64); +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecretandSeed( + XXH_NOESCAPE XXH3_state_t *statePtr, XXH_NOESCAPE const void *secret, + size_t secretSize, XXH64_hash_t seed64); /*! * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash. * - * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState(). + * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref + * XXH3_createState(). * @param secret The secret data. * @param secretSize The length of @p secret, in bytes. * @param seed64 The 64-bit seed to alter the hash result predictably. @@ -2002,26 +2146,24 @@ XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, * * @see XXH3_64bits_withSecretandSeed() */ -XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, - XXH_NOESCAPE const void* secret, size_t secretSize, - XXH64_hash_t seed64); -#endif /* !XXH_NO_STREAM */ - -#endif /* !XXH_NO_XXH3 */ -#endif /* XXH_NO_LONG_LONG */ -#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) -# define XXH_IMPLEMENTATION -#endif +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecretandSeed( + XXH_NOESCAPE XXH3_state_t *statePtr, XXH_NOESCAPE const void *secret, + size_t secretSize, XXH64_hash_t seed64); + #endif /* !XXH_NO_STREAM */ -#endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */ + #endif /* !XXH_NO_XXH3 */ + #endif /* XXH_NO_LONG_LONG */ + #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) + #define XXH_IMPLEMENTATION + #endif +#endif /* defined(XXH_STATIC_LINKING_ONLY) && \ + !defined(XXHASH_H_STATIC_13879238742) */ /* ======================================================================== */ /* ======================================================================== */ /* ======================================================================== */ - /*-********************************************************************** * xxHash implementation *-********************************************************************** @@ -2044,277 +2186,290 @@ XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, * which can then be linked into the final binary. ************************************************************************/ -#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \ - || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387) -# define XXH_IMPLEM_13a8737387 +#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) || \ + defined(XXH_IMPLEMENTATION)) && \ + !defined(XXH_IMPLEM_13a8737387) + #define XXH_IMPLEM_13a8737387 + + /* ************************************* + * Tuning parameters + ***************************************/ + + /*! + * @defgroup tuning Tuning parameters + * @{ + + * + * Various macros to control xxHash's behavior. + */ + #ifdef XXH_DOXYGEN + /*! + * @brief Define this to disable 64-bit code. + * + * Useful if only using the @ref XXH32_family and you have a strict C90 + * compiler. + */ + #define XXH_NO_LONG_LONG + #undef XXH_NO_LONG_LONG /* don't actually */ + /*! + * @brief Controls how unaligned memory is accessed. + * + * By default, access to unaligned memory is controlled by `memcpy()`, which + * is safe and portable. + * + * Unfortunately, on some target/compiler combinations, the generated + * assembly is sub-optimal. + * + * The below switch allow selection of a different access method + * in the search for improved performance. + * + * @par Possible options: + * + * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy` + * @par + * Use `memcpy()`. Safe and portable. Note that most modern compilers + * will eliminate the function call and treat it as an unaligned access. + * + * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((aligned(1)))` + * @par + * Depends on compiler extensions and is therefore not portable. + * This method is safe _if_ your compiler supports it, + * and *generally* as fast or faster than `memcpy`. + * + * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast + * @par + * Casts directly and dereferences. This method doesn't depend on the + * compiler, but it violates the C standard as it directly dereferences + * an unaligned pointer. It can generate buggy code on targets which do not + * support unaligned memory accesses, but in some circumstances, it's + * the only known way to get the most performance. + * + * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift + * @par + * Also portable. This can generate the best code on old compilers which + * don't inline small `memcpy()` calls, and it might also be faster on + * big-endian systems which lack a native byteswap instruction. However, + * some compilers will emit literal byteshifts even if the target supports + * unaligned access. + * + * + * @warning + * Methods 1 and 2 rely on implementation-defined behavior. Use these with + * care, as what works on one compiler/platform/optimization level may + * cause another to read garbage data or even crash. + * + * See + * https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + * for details. + * + * Prefer these methods in priority order (0 > 3 > 1 > 2) + */ + #define XXH_FORCE_MEMORY_ACCESS 0 -/* ************************************* -* Tuning parameters -***************************************/ + /*! + * @def XXH_SIZE_OPT + * @brief Controls how much xxHash optimizes for size. + * + * xxHash, when compiled, tends to result in a rather large binary size. + * This is mostly due to heavy usage to forced inlining and constant folding + * of the + * @ref XXH3_family to increase performance. + * + * However, some developers prefer size over speed. This option can + * significantly reduce the size of the generated code. When using the `-Os` + * or `-Oz` options on GCC or Clang, this is defined to 1 by default, + * otherwise it is defined to 0. + * + * Most of these size optimizations can be controlled manually. + * + * This is a number from 0-2. + * - `XXH_SIZE_OPT` == 0: Default. xxHash makes no size optimizations. + * Speed comes first. + * - `XXH_SIZE_OPT` == 1: Default for `-Os` and `-Oz`. xxHash is more + * conservative and disables hacks that increase code size. It implies + * the options @ref XXH_NO_INLINE_HINTS == 1, @ref XXH_FORCE_ALIGN_CHECK == + * 0, and @ref XXH3_NEON_LANES == 8 if they are not already defined. + * - `XXH_SIZE_OPT` == 2: xxHash tries to make itself as small as possible. + * Performance may cry. For example, the single shot functions just use + * the streaming API. + */ + #define XXH_SIZE_OPT 0 -/*! - * @defgroup tuning Tuning parameters - * @{ - * - * Various macros to control xxHash's behavior. - */ -#ifdef XXH_DOXYGEN -/*! - * @brief Define this to disable 64-bit code. - * - * Useful if only using the @ref XXH32_family and you have a strict C90 compiler. - */ -# define XXH_NO_LONG_LONG -# undef XXH_NO_LONG_LONG /* don't actually */ -/*! - * @brief Controls how unaligned memory is accessed. - * - * By default, access to unaligned memory is controlled by `memcpy()`, which is - * safe and portable. - * - * Unfortunately, on some target/compiler combinations, the generated assembly - * is sub-optimal. - * - * The below switch allow selection of a different access method - * in the search for improved performance. - * - * @par Possible options: - * - * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy` - * @par - * Use `memcpy()`. Safe and portable. Note that most modern compilers will - * eliminate the function call and treat it as an unaligned access. - * - * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((aligned(1)))` - * @par - * Depends on compiler extensions and is therefore not portable. - * This method is safe _if_ your compiler supports it, - * and *generally* as fast or faster than `memcpy`. - * - * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast - * @par - * Casts directly and dereferences. This method doesn't depend on the - * compiler, but it violates the C standard as it directly dereferences an - * unaligned pointer. It can generate buggy code on targets which do not - * support unaligned memory accesses, but in some circumstances, it's the - * only known way to get the most performance. - * - * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift - * @par - * Also portable. This can generate the best code on old compilers which don't - * inline small `memcpy()` calls, and it might also be faster on big-endian - * systems which lack a native byteswap instruction. However, some compilers - * will emit literal byteshifts even if the target supports unaligned access. - * - * - * @warning - * Methods 1 and 2 rely on implementation-defined behavior. Use these with - * care, as what works on one compiler/platform/optimization level may cause - * another to read garbage data or even crash. - * - * See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details. - * - * Prefer these methods in priority order (0 > 3 > 1 > 2) - */ -# define XXH_FORCE_MEMORY_ACCESS 0 + /*! + * @def XXH_FORCE_ALIGN_CHECK + * @brief If defined to non-zero, adds a special path for aligned inputs + * (XXH32() and XXH64() only). + * + * This is an important performance trick for architectures without decent + * unaligned memory access performance. + * + * It checks for input alignment, and when conditions are met, uses a "fast + * path" employing direct 32-bit/64-bit reads, resulting in _dramatically + * faster_ read speed. + * + * The check costs one initial branch per hash, which is generally + * negligible, but not zero. + * + * Moreover, it's not useful to generate an additional code path if memory + * access uses the same instruction for both aligned and unaligned + * addresses (e.g. x86 and aarch64). + * + * In these cases, the alignment check can be removed by setting this macro + * to 0. Then the code will always use unaligned memory access. Align check + * is automatically disabled on x86, x64, ARM64, and some ARM chips which + * are platforms known to offer good unaligned memory accesses performance. + * + * It is also disabled by default when @ref XXH_SIZE_OPT >= 1. + * + * This option does not affect XXH3 (only XXH32 and XXH64). + */ + #define XXH_FORCE_ALIGN_CHECK 0 -/*! - * @def XXH_SIZE_OPT - * @brief Controls how much xxHash optimizes for size. - * - * xxHash, when compiled, tends to result in a rather large binary size. This - * is mostly due to heavy usage to forced inlining and constant folding of the - * @ref XXH3_family to increase performance. - * - * However, some developers prefer size over speed. This option can - * significantly reduce the size of the generated code. When using the `-Os` - * or `-Oz` options on GCC or Clang, this is defined to 1 by default, - * otherwise it is defined to 0. - * - * Most of these size optimizations can be controlled manually. - * - * This is a number from 0-2. - * - `XXH_SIZE_OPT` == 0: Default. xxHash makes no size optimizations. Speed - * comes first. - * - `XXH_SIZE_OPT` == 1: Default for `-Os` and `-Oz`. xxHash is more - * conservative and disables hacks that increase code size. It implies the - * options @ref XXH_NO_INLINE_HINTS == 1, @ref XXH_FORCE_ALIGN_CHECK == 0, - * and @ref XXH3_NEON_LANES == 8 if they are not already defined. - * - `XXH_SIZE_OPT` == 2: xxHash tries to make itself as small as possible. - * Performance may cry. For example, the single shot functions just use the - * streaming API. - */ -# define XXH_SIZE_OPT 0 + /*! + * @def XXH_NO_INLINE_HINTS + * @brief When non-zero, sets all functions to `static`. + * + * By default, xxHash tries to force the compiler to inline almost all + * internal functions. + * + * This can usually improve performance due to reduced jumping and improved + * constant folding, but significantly increases the size of the binary + * which might not be favorable. + * + * Additionally, sometimes the forced inlining can be detrimental to + * performance, depending on the architecture. + * + * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the + * compiler full control on whether to inline or not. + * + * When not optimizing (-O0), using `-fno-inline` with GCC or Clang, or if + * @ref XXH_SIZE_OPT >= 1, this will automatically be defined. + */ + #define XXH_NO_INLINE_HINTS 0 + + /*! + * @def XXH3_INLINE_SECRET + * @brief Determines whether to inline the XXH3 withSecret code. + * + * When the secret size is known, the compiler can improve the performance + * of XXH3_64bits_withSecret() and XXH3_128bits_withSecret(). + * + * However, if the secret size is not known, it doesn't have any benefit. + * This happens when xxHash is compiled into a global symbol. Therefore, if + * @ref XXH_INLINE_ALL is *not* defined, this will be defined to 0. + * + * Additionally, this defaults to 0 on GCC 12+, which has an issue with + * function pointers that are *sometimes* force inline on -Og, and it is + * impossible to automatically detect this optimization level. + */ + #define XXH3_INLINE_SECRET 0 + + /*! + * @def XXH32_ENDJMP + * @brief Whether to use a jump for `XXH32_finalize`. + * + * For performance, `XXH32_finalize` uses multiple branches in the + * finalizer. This is generally preferable for performance, but depending on + * exact architecture, a jmp may be preferable. + * + * This setting is only possibly making a difference for very small inputs. + */ + #define XXH32_ENDJMP 0 + + /*! + * @internal + * @brief Redefines old internal names. + * + * For compatibility with code that uses xxHash's internals before the names + * were changed to improve namespacing. There is no other reason to use + * this. + */ + #define XXH_OLD_NAMES + #undef XXH_OLD_NAMES /* don't actually use, it is ugly. */ + /*! + * @def XXH_NO_STREAM + * @brief Disables the streaming API. + * + * When xxHash is not inlined and the streaming functions are not used, + * disabling the streaming functions can improve code size significantly, + * especially with the @ref XXH3_family which tends to make constant folded + * copies of itself. + */ + #define XXH_NO_STREAM + #undef XXH_NO_STREAM /* don't actually */ + #endif /* XXH_DOXYGEN */ /*! - * @def XXH_FORCE_ALIGN_CHECK - * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32() - * and XXH64() only). - * - * This is an important performance trick for architectures without decent - * unaligned memory access performance. - * - * It checks for input alignment, and when conditions are met, uses a "fast - * path" employing direct 32-bit/64-bit reads, resulting in _dramatically - * faster_ read speed. - * - * The check costs one initial branch per hash, which is generally negligible, - * but not zero. - * - * Moreover, it's not useful to generate an additional code path if memory - * access uses the same instruction for both aligned and unaligned - * addresses (e.g. x86 and aarch64). - * - * In these cases, the alignment check can be removed by setting this macro to 0. - * Then the code will always use unaligned memory access. - * Align check is automatically disabled on x86, x64, ARM64, and some ARM chips - * which are platforms known to offer good unaligned memory accesses performance. - * - * It is also disabled by default when @ref XXH_SIZE_OPT >= 1. - * - * This option does not affect XXH3 (only XXH32 and XXH64). + * @} */ -# define XXH_FORCE_ALIGN_CHECK 0 -/*! - * @def XXH_NO_INLINE_HINTS - * @brief When non-zero, sets all functions to `static`. - * - * By default, xxHash tries to force the compiler to inline almost all internal - * functions. - * - * This can usually improve performance due to reduced jumping and improved - * constant folding, but significantly increases the size of the binary which - * might not be favorable. - * - * Additionally, sometimes the forced inlining can be detrimental to performance, - * depending on the architecture. - * - * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the - * compiler full control on whether to inline or not. - * - * When not optimizing (-O0), using `-fno-inline` with GCC or Clang, or if - * @ref XXH_SIZE_OPT >= 1, this will automatically be defined. - */ -# define XXH_NO_INLINE_HINTS 0 - -/*! - * @def XXH3_INLINE_SECRET - * @brief Determines whether to inline the XXH3 withSecret code. - * - * When the secret size is known, the compiler can improve the performance - * of XXH3_64bits_withSecret() and XXH3_128bits_withSecret(). - * - * However, if the secret size is not known, it doesn't have any benefit. This - * happens when xxHash is compiled into a global symbol. Therefore, if - * @ref XXH_INLINE_ALL is *not* defined, this will be defined to 0. - * - * Additionally, this defaults to 0 on GCC 12+, which has an issue with function pointers - * that are *sometimes* force inline on -Og, and it is impossible to automatically - * detect this optimization level. - */ -# define XXH3_INLINE_SECRET 0 - -/*! - * @def XXH32_ENDJMP - * @brief Whether to use a jump for `XXH32_finalize`. - * - * For performance, `XXH32_finalize` uses multiple branches in the finalizer. - * This is generally preferable for performance, - * but depending on exact architecture, a jmp may be preferable. - * - * This setting is only possibly making a difference for very small inputs. - */ -# define XXH32_ENDJMP 0 - -/*! - * @internal - * @brief Redefines old internal names. - * - * For compatibility with code that uses xxHash's internals before the names - * were changed to improve namespacing. There is no other reason to use this. - */ -# define XXH_OLD_NAMES -# undef XXH_OLD_NAMES /* don't actually use, it is ugly. */ - -/*! - * @def XXH_NO_STREAM - * @brief Disables the streaming API. - * - * When xxHash is not inlined and the streaming functions are not used, disabling - * the streaming functions can improve code size significantly, especially with - * the @ref XXH3_family which tends to make constant folded copies of itself. - */ -# define XXH_NO_STREAM -# undef XXH_NO_STREAM /* don't actually */ -#endif /* XXH_DOXYGEN */ -/*! - * @} - */ - -#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ - /* prefer __packed__ structures (method 1) for GCC - * < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte shifting, so we use memcpy - * which for some reason does unaligned loads. */ -# if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && defined(__ARM_FEATURE_UNALIGNED)) -# define XXH_FORCE_MEMORY_ACCESS 1 -# endif -#endif - -#ifndef XXH_SIZE_OPT - /* default to 1 for -Os or -Oz */ -# if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__) -# define XXH_SIZE_OPT 1 -# else -# define XXH_SIZE_OPT 0 -# endif -#endif - -#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ - /* don't check on sizeopt, x86, aarch64, or arm when unaligned access is available */ -# if XXH_SIZE_OPT >= 1 || \ - defined(__i386) || defined(__x86_64__) || defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) \ - || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM) /* visual */ -# define XXH_FORCE_ALIGN_CHECK 0 -# else -# define XXH_FORCE_ALIGN_CHECK 1 -# endif -#endif - -#ifndef XXH_NO_INLINE_HINTS -# if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__) /* -O0, -fno-inline */ -# define XXH_NO_INLINE_HINTS 1 -# else -# define XXH_NO_INLINE_HINTS 0 -# endif -#endif - -#ifndef XXH3_INLINE_SECRET -# if (defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 12) \ - || !defined(XXH_INLINE_ALL) -# define XXH3_INLINE_SECRET 0 -# else -# define XXH3_INLINE_SECRET 1 -# endif -#endif - -#ifndef XXH32_ENDJMP -/* generally preferable for performance */ -# define XXH32_ENDJMP 0 -#endif - -/*! - * @defgroup impl Implementation - * @{ - */ - - -/* ************************************* -* Includes & Memory related functions -***************************************/ -#if defined(XXH_NO_STREAM) -/* nothing */ -#elif defined(XXH_NO_STDLIB) + #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command \ + line for example */ + /* prefer __packed__ structures (method 1) for GCC + * < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte + * shifting, so we use memcpy which for some reason does unaligned loads. */ + #if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && \ + defined(__ARM_FEATURE_UNALIGNED)) + #define XXH_FORCE_MEMORY_ACCESS 1 + #endif + #endif + + #ifndef XXH_SIZE_OPT + /* default to 1 for -Os or -Oz */ + #if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__) + #define XXH_SIZE_OPT 1 + #else + #define XXH_SIZE_OPT 0 + #endif + #endif + + #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ + /* don't check on sizeopt, x86, aarch64, or arm when unaligned access is + * available */ + #if XXH_SIZE_OPT >= 1 || defined(__i386) || defined(__x86_64__) || \ + defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) || \ + defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || \ + defined(_M_ARM) /* visual */ + #define XXH_FORCE_ALIGN_CHECK 0 + #else + #define XXH_FORCE_ALIGN_CHECK 1 + #endif + #endif + + #ifndef XXH_NO_INLINE_HINTS + #if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__) /* -O0, -fno-inline */ + #define XXH_NO_INLINE_HINTS 1 + #else + #define XXH_NO_INLINE_HINTS 0 + #endif + #endif + + #ifndef XXH3_INLINE_SECRET + #if (defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 12) || \ + !defined(XXH_INLINE_ALL) + #define XXH3_INLINE_SECRET 0 + #else + #define XXH3_INLINE_SECRET 1 + #endif + #endif + + #ifndef XXH32_ENDJMP + /* generally preferable for performance */ + #define XXH32_ENDJMP 0 + #endif + + /*! + * @defgroup impl Implementation + * @{ + + */ + + /* ************************************* + * Includes & Memory related functions + ***************************************/ + #if defined(XXH_NO_STREAM) + /* nothing */ + #elif defined(XXH_NO_STDLIB) /* When requesting to disable any mention of stdlib, * the library loses the ability to invoked malloc / free. @@ -2325,173 +2480,212 @@ XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, * without access to dynamic allocation. */ -static XXH_CONSTF void* XXH_malloc(size_t s) { (void)s; return NULL; } -static void XXH_free(void* p) { (void)p; } +static XXH_CONSTF void *XXH_malloc(size_t s) { -#else + (void)s; + return NULL; -/* - * Modify the local functions below should you wish to use - * different memory routines for malloc() and free() - */ -#include +} -/*! - * @internal - * @brief Modify this function to use a different routine than malloc(). - */ -static XXH_MALLOCF void* XXH_malloc(size_t s) { return malloc(s); } +static void XXH_free(void *p) { -/*! - * @internal - * @brief Modify this function to use a different routine than free(). - */ -static void XXH_free(void* p) { free(p); } + (void)p; -#endif /* XXH_NO_STDLIB */ +} + + #else -#include + /* + * Modify the local functions below should you wish to use + * different memory routines for malloc() and free() + */ + #include /*! * @internal - * @brief Modify this function to use a different routine than memcpy(). + * @brief Modify this function to use a different routine than malloc(). */ -static void* XXH_memcpy(void* dest, const void* src, size_t size) -{ - return memcpy(dest,src,size); -} - -#include /* ULLONG_MAX */ - +static XXH_MALLOCF void *XXH_malloc(size_t s) { -/* ************************************* -* Compiler Specific Options -***************************************/ -#ifdef _MSC_VER /* Visual Studio warning fix */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -#endif + return malloc(s); -#if XXH_NO_INLINE_HINTS /* disable inlining hints */ -# if defined(__GNUC__) || defined(__clang__) -# define XXH_FORCE_INLINE static __attribute__((unused)) -# else -# define XXH_FORCE_INLINE static -# endif -# define XXH_NO_INLINE static -/* enable inlining hints */ -#elif defined(__GNUC__) || defined(__clang__) -# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused)) -# define XXH_NO_INLINE static __attribute__((noinline)) -#elif defined(_MSC_VER) /* Visual Studio */ -# define XXH_FORCE_INLINE static __forceinline -# define XXH_NO_INLINE static __declspec(noinline) -#elif defined (__cplusplus) \ - || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */ -# define XXH_FORCE_INLINE static inline -# define XXH_NO_INLINE static -#else -# define XXH_FORCE_INLINE static -# define XXH_NO_INLINE static -#endif +} -#if XXH3_INLINE_SECRET -# define XXH3_WITH_SECRET_INLINE XXH_FORCE_INLINE -#else -# define XXH3_WITH_SECRET_INLINE XXH_NO_INLINE -#endif +/*! + * @internal + * @brief Modify this function to use a different routine than free(). + */ +static void XXH_free(void *p) { + free(p); -/* ************************************* -* Debug -***************************************/ -/*! - * @ingroup tuning - * @def XXH_DEBUGLEVEL - * @brief Sets the debugging level. - * - * XXH_DEBUGLEVEL is expected to be defined externally, typically via the - * compiler's command line options. The value must be a number. - */ -#ifndef XXH_DEBUGLEVEL -# ifdef DEBUGLEVEL /* backwards compat */ -# define XXH_DEBUGLEVEL DEBUGLEVEL -# else -# define XXH_DEBUGLEVEL 0 -# endif -#endif +} -#if (XXH_DEBUGLEVEL>=1) -# include /* note: can still be disabled with NDEBUG */ -# define XXH_ASSERT(c) assert(c) -#else -# if defined(__INTEL_COMPILER) -# define XXH_ASSERT(c) XXH_ASSUME((unsigned char) (c)) -# else -# define XXH_ASSERT(c) XXH_ASSUME(c) -# endif -#endif + #endif /* XXH_NO_STDLIB */ -/* note: use after variable declarations */ -#ifndef XXH_STATIC_ASSERT -# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */ -# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0) -# elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */ -# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0) -# else -# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0) -# endif -# define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c) -#endif + #include /*! * @internal - * @def XXH_COMPILER_GUARD(var) - * @brief Used to prevent unwanted optimizations for @p var. - * - * It uses an empty GCC inline assembly statement with a register constraint - * which forces @p var into a general purpose register (eg eax, ebx, ecx - * on x86) and marks it as modified. - * - * This is used in a few places to avoid unwanted autovectorization (e.g. - * XXH32_round()). All vectorization we want is explicit via intrinsics, - * and _usually_ isn't wanted elsewhere. - * - * We also use it to prevent unwanted constant folding for AArch64 in - * XXH3_initCustomSecret_scalar(). + * @brief Modify this function to use a different routine than memcpy(). */ -#if defined(__GNUC__) || defined(__clang__) -# define XXH_COMPILER_GUARD(var) __asm__("" : "+r" (var)) -#else -# define XXH_COMPILER_GUARD(var) ((void)0) -#endif - -/* Specifically for NEON vectors which use the "w" constraint, on - * Clang. */ -#if defined(__clang__) && defined(__ARM_ARCH) && !defined(__wasm__) -# define XXH_COMPILER_GUARD_CLANG_NEON(var) __asm__("" : "+w" (var)) -#else -# define XXH_COMPILER_GUARD_CLANG_NEON(var) ((void)0) -#endif - -/* ************************************* -* Basic Types -***************************************/ -#if !defined (__VMS) \ - && (defined (__cplusplus) \ - || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - typedef uint8_t xxh_u8; -#else - typedef unsigned char xxh_u8; -#endif +static void *XXH_memcpy(void *dest, const void *src, size_t size) { + + return memcpy(dest, src, size); + +} + + #include /* ULLONG_MAX */ + + /* ************************************* + * Compiler Specific Options + ***************************************/ + #ifdef _MSC_VER /* Visual Studio warning fix */ + #pragma warning(disable : 4127) /* disable: C4127: conditional expression \ + is constant */ + #endif + + #if XXH_NO_INLINE_HINTS /* disable inlining hints */ + #if defined(__GNUC__) || defined(__clang__) + #define XXH_FORCE_INLINE static __attribute__((unused)) + #else + #define XXH_FORCE_INLINE static + #endif + #define XXH_NO_INLINE static + /* enable inlining hints */ + #elif defined(__GNUC__) || defined(__clang__) + #define XXH_FORCE_INLINE \ + static __inline__ __attribute__((always_inline, unused)) + #define XXH_NO_INLINE static __attribute__((noinline)) + #elif defined(_MSC_VER) /* Visual Studio */ + #define XXH_FORCE_INLINE static __forceinline + #define XXH_NO_INLINE static __declspec(noinline) + #elif defined(__cplusplus) || \ + (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */ + #define XXH_FORCE_INLINE static inline + #define XXH_NO_INLINE static + #else + #define XXH_FORCE_INLINE static + #define XXH_NO_INLINE static + #endif + + #if XXH3_INLINE_SECRET + #define XXH3_WITH_SECRET_INLINE XXH_FORCE_INLINE + #else + #define XXH3_WITH_SECRET_INLINE XXH_NO_INLINE + #endif + + /* ************************************* + * Debug + ***************************************/ + /*! + * @ingroup tuning + * @def XXH_DEBUGLEVEL + * @brief Sets the debugging level. + * + * XXH_DEBUGLEVEL is expected to be defined externally, typically via the + * compiler's command line options. The value must be a number. + */ + #ifndef XXH_DEBUGLEVEL + #ifdef DEBUGLEVEL /* backwards compat */ + #define XXH_DEBUGLEVEL DEBUGLEVEL + #else + #define XXH_DEBUGLEVEL 0 + #endif + #endif + + #if (XXH_DEBUGLEVEL >= 1) + #include /* note: can still be disabled with NDEBUG */ + #define XXH_ASSERT(c) assert(c) + #else + #if defined(__INTEL_COMPILER) + #define XXH_ASSERT(c) XXH_ASSUME((unsigned char)(c)) + #else + #define XXH_ASSERT(c) XXH_ASSUME(c) + #endif + #endif + + /* note: use after variable declarations */ + #ifndef XXH_STATIC_ASSERT + #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */ + #define XXH_STATIC_ASSERT_WITH_MESSAGE(c, m) \ + do { \ + \ + _Static_assert((c), m); \ + \ + } while (0) + #elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */ + #define XXH_STATIC_ASSERT_WITH_MESSAGE(c, m) \ + do { \ + \ + static_assert((c), m); \ + \ + } while (0) + #else + #define XXH_STATIC_ASSERT_WITH_MESSAGE(c, m) \ + do { \ + \ + struct xxh_sa { \ + \ + char x[(c) ? 1 : -1]; \ + \ + }; \ + \ + } while (0) + #endif + #define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c), #c) + #endif + + /*! + * @internal + * @def XXH_COMPILER_GUARD(var) + * @brief Used to prevent unwanted optimizations for @p var. + * + * It uses an empty GCC inline assembly statement with a register constraint + * which forces @p var into a general purpose register (eg eax, ebx, ecx + * on x86) and marks it as modified. + * + * This is used in a few places to avoid unwanted autovectorization (e.g. + * XXH32_round()). All vectorization we want is explicit via intrinsics, + * and _usually_ isn't wanted elsewhere. + * + * We also use it to prevent unwanted constant folding for AArch64 in + * XXH3_initCustomSecret_scalar(). + */ + #if defined(__GNUC__) || defined(__clang__) + #define XXH_COMPILER_GUARD(var) __asm__("" : "+r"(var)) + #else + #define XXH_COMPILER_GUARD(var) ((void)0) + #endif + + /* Specifically for NEON vectors which use the "w" constraint, on + * Clang. */ + #if defined(__clang__) && defined(__ARM_ARCH) && !defined(__wasm__) + #define XXH_COMPILER_GUARD_CLANG_NEON(var) __asm__("" : "+w"(var)) + #else + #define XXH_COMPILER_GUARD_CLANG_NEON(var) ((void)0) + #endif + + /* ************************************* + * Basic Types + ***************************************/ + #if !defined(__VMS) && \ + (defined(__cplusplus) || \ + (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)) + #include +typedef uint8_t xxh_u8; + #else +typedef unsigned char xxh_u8; + #endif typedef XXH32_hash_t xxh_u32; -#ifdef XXH_OLD_NAMES -# warning "XXH_OLD_NAMES is planned to be removed starting v0.9. If the program depends on it, consider moving away from it by employing newer type names directly" -# define BYTE xxh_u8 -# define U8 xxh_u8 -# define U32 xxh_u32 -#endif + #ifdef XXH_OLD_NAMES + #warning \ + "XXH_OLD_NAMES is planned to be removed starting v0.9. If the program depends on it, consider moving away from it by employing newer type names directly" + #define BYTE xxh_u8 + #define U8 xxh_u8 + #define U32 xxh_u32 + #endif /* *** Memory access *** */ @@ -2545,118 +2739,132 @@ typedef XXH32_hash_t xxh_u32; * @return The 32-bit little endian integer from the bytes at @p ptr. */ -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) -/* - * Manual byteshift. Best for old compilers which don't inline memcpy. - * We actually directly use XXH_readLE32 and XXH_readBE32. - */ -#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 3)) + /* + * Manual byteshift. Best for old compilers which don't inline memcpy. + * We actually directly use XXH_readLE32 and XXH_readBE32. + */ + #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 2)) /* * Force direct memory access. Only works on CPU which support unaligned memory * access in hardware. */ -static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; } +static xxh_u32 XXH_read32(const void *memPtr) { -#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + return *(const xxh_u32 *)memPtr; -/* - * __attribute__((aligned(1))) is supported by gcc and clang. Originally the - * documentation claimed that it only increased the alignment, but actually it - * can decrease it on gcc, clang, and icc: - * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, - * https://gcc.godbolt.org/z/xYez1j67Y. - */ -#ifdef XXH_OLD_NAMES -typedef union { xxh_u32 u32; } __attribute__((packed)) unalign; -#endif -static xxh_u32 XXH_read32(const void* ptr) -{ - typedef __attribute__((aligned(1))) xxh_u32 xxh_unalign32; - return *((const xxh_unalign32*)ptr); } -#else + #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 1)) -/* - * Portable and safe solution. Generally efficient. - * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html - */ -static xxh_u32 XXH_read32(const void* memPtr) -{ - xxh_u32 val; - XXH_memcpy(&val, memPtr, sizeof(val)); - return val; -} + /* + * __attribute__((aligned(1))) is supported by gcc and clang. Originally the + * documentation claimed that it only increased the alignment, but actually + * it can decrease it on gcc, clang, and icc: + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, + * https://gcc.godbolt.org/z/xYez1j67Y. + */ + #ifdef XXH_OLD_NAMES +typedef union { -#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + xxh_u32 u32; +} __attribute__((packed)) unalign; -/* *** Endianness *** */ + #endif +static xxh_u32 XXH_read32(const void *ptr) { + + typedef __attribute__((aligned(1))) xxh_u32 xxh_unalign32; + return *((const xxh_unalign32 *)ptr); + +} + + #else -/*! - * @ingroup tuning - * @def XXH_CPU_LITTLE_ENDIAN - * @brief Whether the target is little endian. - * - * Defined to 1 if the target is little endian, or 0 if it is big endian. - * It can be defined externally, for example on the compiler command line. - * - * If it is not defined, - * a runtime check (which is usually constant folded) is used instead. - * - * @note - * This is not necessarily defined to an integer constant. - * - * @see XXH_isLittleEndian() for the runtime check. - */ -#ifndef XXH_CPU_LITTLE_ENDIAN /* - * Try to detect endianness automatically, to avoid the nonstandard behavior - * in `XXH_isLittleEndian()` - */ -# if defined(_WIN32) /* Windows is always little endian */ \ - || defined(__LITTLE_ENDIAN__) \ - || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) -# define XXH_CPU_LITTLE_ENDIAN 1 -# elif defined(__BIG_ENDIAN__) \ - || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) -# define XXH_CPU_LITTLE_ENDIAN 0 -# else + * Portable and safe solution. Generally efficient. + * see: + * https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + */ +static xxh_u32 XXH_read32(const void *memPtr) { + + xxh_u32 val; + XXH_memcpy(&val, memPtr, sizeof(val)); + return val; + +} + + #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + + /* *** Endianness *** */ + + /*! + * @ingroup tuning + * @def XXH_CPU_LITTLE_ENDIAN + * @brief Whether the target is little endian. + * + * Defined to 1 if the target is little endian, or 0 if it is big endian. + * It can be defined externally, for example on the compiler command line. + * + * If it is not defined, + * a runtime check (which is usually constant folded) is used instead. + * + * @note + * This is not necessarily defined to an integer constant. + * + * @see XXH_isLittleEndian() for the runtime check. + */ + #ifndef XXH_CPU_LITTLE_ENDIAN + /* + * Try to detect endianness automatically, to avoid the nonstandard behavior + * in `XXH_isLittleEndian()` + */ + #if defined(_WIN32) /* Windows is always little endian */ \ + || defined(__LITTLE_ENDIAN__) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) + #define XXH_CPU_LITTLE_ENDIAN 1 + #elif defined(__BIG_ENDIAN__) || \ + (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) + #define XXH_CPU_LITTLE_ENDIAN 0 + #else /*! * @internal * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN. * * Most compilers will constant fold this. */ -static int XXH_isLittleEndian(void) -{ - /* - * Portable and well-defined behavior. - * Don't use static: it is detrimental to performance. - */ - const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 }; - return one.c[0]; -} -# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() -# endif -#endif +static int XXH_isLittleEndian(void) { + /* + * Portable and well-defined behavior. + * Don't use static: it is detrimental to performance. + */ + const union { + xxh_u32 u; + xxh_u8 c[4]; + } one = {1}; -/* **************************************** -* Compiler-specific Functions and Macros -******************************************/ -#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) + return one.c[0]; -#ifdef __has_builtin -# define XXH_HAS_BUILTIN(x) __has_builtin(x) -#else -# define XXH_HAS_BUILTIN(x) 0 -#endif +} +\ + #define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() + #endif + #endif + /* **************************************** + * Compiler-specific Functions and Macros + ******************************************/ + #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) + #ifdef __has_builtin + #define XXH_HAS_BUILTIN(x) __has_builtin(x) + #else + #define XXH_HAS_BUILTIN(x) 0 + #endif /* * C23 and future versions have standard "unreachable()". @@ -2685,142 +2893,154 @@ static int XXH_isLittleEndian(void) * doesn't work on GCC12 */ -#if XXH_HAS_BUILTIN(__builtin_unreachable) -# define XXH_UNREACHABLE() __builtin_unreachable() - -#elif defined(_MSC_VER) -# define XXH_UNREACHABLE() __assume(0) - -#else -# define XXH_UNREACHABLE() -#endif - -#if XXH_HAS_BUILTIN(__builtin_assume) -# define XXH_ASSUME(c) __builtin_assume(c) -#else -# define XXH_ASSUME(c) if (!(c)) { XXH_UNREACHABLE(); } -#endif - -/*! - * @internal - * @def XXH_rotl32(x,r) - * @brief 32-bit rotate left. - * - * @param x The 32-bit integer to be rotated. - * @param r The number of bits to rotate. - * @pre - * @p r > 0 && @p r < 32 - * @note - * @p x and @p r may be evaluated multiple times. - * @return The rotated result. - */ -#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \ - && XXH_HAS_BUILTIN(__builtin_rotateleft64) -# define XXH_rotl32 __builtin_rotateleft32 -# define XXH_rotl64 __builtin_rotateleft64 -/* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */ -#elif defined(_MSC_VER) -# define XXH_rotl32(x,r) _rotl(x,r) -# define XXH_rotl64(x,r) _rotl64(x,r) -#else -# define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r)))) -# define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r)))) -#endif - -/*! - * @internal - * @fn xxh_u32 XXH_swap32(xxh_u32 x) - * @brief A 32-bit byteswap. - * - * @param x The 32-bit integer to byteswap. - * @return @p x, byteswapped. - */ -#if defined(_MSC_VER) /* Visual Studio */ -# define XXH_swap32 _byteswap_ulong -#elif XXH_GCC_VERSION >= 403 -# define XXH_swap32 __builtin_bswap32 -#else -static xxh_u32 XXH_swap32 (xxh_u32 x) -{ - return ((x << 24) & 0xff000000 ) | - ((x << 8) & 0x00ff0000 ) | - ((x >> 8) & 0x0000ff00 ) | - ((x >> 24) & 0x000000ff ); -} -#endif - + #if XXH_HAS_BUILTIN(__builtin_unreachable) + #define XXH_UNREACHABLE() __builtin_unreachable() + + #elif defined(_MSC_VER) + #define XXH_UNREACHABLE() __assume(0) + + #else + #define XXH_UNREACHABLE() + #endif + + #if XXH_HAS_BUILTIN(__builtin_assume) + #define XXH_ASSUME(c) __builtin_assume(c) + #else + #define XXH_ASSUME(c) \ + if (!(c)) { XXH_UNREACHABLE(); } + #endif + + /*! + * @internal + * @def XXH_rotl32(x,r) + * @brief 32-bit rotate left. + * + * @param x The 32-bit integer to be rotated. + * @param r The number of bits to rotate. + * @pre + * @p r > 0 && @p r < 32 + * @note + * @p x and @p r may be evaluated multiple times. + * @return The rotated result. + */ + #if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) && \ + XXH_HAS_BUILTIN(__builtin_rotateleft64) + #define XXH_rotl32 __builtin_rotateleft32 + #define XXH_rotl64 __builtin_rotateleft64 + /* Note: although _rotl exists for minGW (GCC under windows), performance + * seems poor */ + #elif defined(_MSC_VER) + #define XXH_rotl32(x, r) _rotl(x, r) + #define XXH_rotl64(x, r) _rotl64(x, r) + #else + #define XXH_rotl32(x, r) (((x) << (r)) | ((x) >> (32 - (r)))) + #define XXH_rotl64(x, r) (((x) << (r)) | ((x) >> (64 - (r)))) + #endif + + /*! + * @internal + * @fn xxh_u32 XXH_swap32(xxh_u32 x) + * @brief A 32-bit byteswap. + * + * @param x The 32-bit integer to byteswap. + * @return @p x, byteswapped. + */ + #if defined(_MSC_VER) /* Visual Studio */ + #define XXH_swap32 _byteswap_ulong + #elif XXH_GCC_VERSION >= 403 + #define XXH_swap32 __builtin_bswap32 + #else +static xxh_u32 XXH_swap32(xxh_u32 x) { + + return ((x << 24) & 0xff000000) | ((x << 8) & 0x00ff0000) | + ((x >> 8) & 0x0000ff00) | ((x >> 24) & 0x000000ff); + +} + + #endif /* *************************** -* Memory reads -*****************************/ + * Memory reads + *****************************/ /*! * @internal * @brief Enum to indicate whether a pointer is aligned. */ typedef enum { - XXH_aligned, /*!< Aligned */ - XXH_unaligned /*!< Possibly unaligned */ + + XXH_aligned, /*!< Aligned */ + XXH_unaligned /*!< Possibly unaligned */ + } XXH_alignment; -/* - * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. - * - * This is ideal for older compilers which don't inline memcpy. - */ -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) + /* + * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. + * + * This is ideal for older compilers which don't inline memcpy. + */ + #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 3)) -XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr) -{ - const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; - return bytePtr[0] - | ((xxh_u32)bytePtr[1] << 8) - | ((xxh_u32)bytePtr[2] << 16) - | ((xxh_u32)bytePtr[3] << 24); -} +XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void *memPtr) { + + const xxh_u8 *bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[0] | ((xxh_u32)bytePtr[1] << 8) | ((xxh_u32)bytePtr[2] << 16) | + ((xxh_u32)bytePtr[3] << 24); -XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr) -{ - const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; - return bytePtr[3] - | ((xxh_u32)bytePtr[2] << 8) - | ((xxh_u32)bytePtr[1] << 16) - | ((xxh_u32)bytePtr[0] << 24); } -#else -XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr) -{ - return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); +XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void *memPtr) { + + const xxh_u8 *bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[3] | ((xxh_u32)bytePtr[2] << 8) | ((xxh_u32)bytePtr[1] << 16) | + ((xxh_u32)bytePtr[0] << 24); + } -static xxh_u32 XXH_readBE32(const void* ptr) -{ - return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); + #else +XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void *ptr) { + + return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); + } -#endif -XXH_FORCE_INLINE xxh_u32 -XXH_readLE32_align(const void* ptr, XXH_alignment align) -{ - if (align==XXH_unaligned) { - return XXH_readLE32(ptr); - } else { - return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr); - } +static xxh_u32 XXH_readBE32(const void *ptr) { + + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); + } + #endif + +XXH_FORCE_INLINE xxh_u32 XXH_readLE32_align(const void *ptr, + XXH_alignment align) { + + if (align == XXH_unaligned) { + + return XXH_readLE32(ptr); + + } else { + + return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32 *)ptr + : XXH_swap32(*(const xxh_u32 *)ptr); + + } + +} /* ************************************* -* Misc -***************************************/ + * Misc + ***************************************/ /*! @ingroup public */ -XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } +XXH_PUBLIC_API unsigned XXH_versionNumber(void) { + + return XXH_VERSION_NUMBER; +} /* ******************************************************************* -* 32-bit hash functions -*********************************************************************/ + * 32-bit hash functions + *********************************************************************/ /*! * @} * @defgroup XXH32_impl XXH32 implementation @@ -2828,21 +3048,22 @@ XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } * * Details on the XXH32 implementation. * @{ + */ - /* #define instead of static const, to be used as initializers */ -#define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */ -#define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */ -#define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */ -#define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */ -#define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */ - -#ifdef XXH_OLD_NAMES -# define PRIME32_1 XXH_PRIME32_1 -# define PRIME32_2 XXH_PRIME32_2 -# define PRIME32_3 XXH_PRIME32_3 -# define PRIME32_4 XXH_PRIME32_4 -# define PRIME32_5 XXH_PRIME32_5 -#endif +/* #define instead of static const, to be used as initializers */ + #define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */ + #define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */ + #define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */ + #define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */ + #define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */ + + #ifdef XXH_OLD_NAMES + #define PRIME32_1 XXH_PRIME32_1 + #define PRIME32_2 XXH_PRIME32_2 + #define PRIME32_3 XXH_PRIME32_3 + #define PRIME32_4 XXH_PRIME32_4 + #define PRIME32_5 XXH_PRIME32_5 + #endif /*! * @internal @@ -2855,51 +3076,54 @@ XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } * @param input The stripe of input to mix. * @return The mixed accumulator lane. */ -static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) -{ - acc += input * XXH_PRIME32_2; - acc = XXH_rotl32(acc, 13); - acc *= XXH_PRIME32_1; -#if (defined(__SSE4_1__) || defined(__aarch64__) || defined(__wasm_simd128__)) && !defined(XXH_ENABLE_AUTOVECTORIZE) - /* - * UGLY HACK: - * A compiler fence is the only thing that prevents GCC and Clang from - * autovectorizing the XXH32 loop (pragmas and attributes don't work for some - * reason) without globally disabling SSE4.1. - * - * The reason we want to avoid vectorization is because despite working on - * 4 integers at a time, there are multiple factors slowing XXH32 down on - * SSE4: - * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on - * newer chips!) making it slightly slower to multiply four integers at - * once compared to four integers independently. Even when pmulld was - * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE - * just to multiply unless doing a long operation. - * - * - Four instructions are required to rotate, - * movqda tmp, v // not required with VEX encoding - * pslld tmp, 13 // tmp <<= 13 - * psrld v, 19 // x >>= 19 - * por v, tmp // x |= tmp - * compared to one for scalar: - * roll v, 13 // reliably fast across the board - * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason - * - * - Instruction level parallelism is actually more beneficial here because - * the SIMD actually serializes this operation: While v1 is rotating, v2 - * can load data, while v3 can multiply. SSE forces them to operate - * together. - * - * This is also enabled on AArch64, as Clang is *very aggressive* in vectorizing - * the loop. NEON is only faster on the A53, and with the newer cores, it is less - * than half the speed. - * - * Additionally, this is used on WASM SIMD128 because it JITs to the same - * SIMD instructions and has the same issue. - */ - XXH_COMPILER_GUARD(acc); -#endif - return acc; +static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) { + + acc += input * XXH_PRIME32_2; + acc = XXH_rotl32(acc, 13); + acc *= XXH_PRIME32_1; + #if (defined(__SSE4_1__) || defined(__aarch64__) || \ + defined(__wasm_simd128__)) && \ + !defined(XXH_ENABLE_AUTOVECTORIZE) + /* + * UGLY HACK: + * A compiler fence is the only thing that prevents GCC and Clang from + * autovectorizing the XXH32 loop (pragmas and attributes don't work for some + * reason) without globally disabling SSE4.1. + * + * The reason we want to avoid vectorization is because despite working on + * 4 integers at a time, there are multiple factors slowing XXH32 down on + * SSE4: + * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on + * newer chips!) making it slightly slower to multiply four integers at + * once compared to four integers independently. Even when pmulld was + * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE + * just to multiply unless doing a long operation. + * + * - Four instructions are required to rotate, + * movqda tmp, v // not required with VEX encoding + * pslld tmp, 13 // tmp <<= 13 + * psrld v, 19 // x >>= 19 + * por v, tmp // x |= tmp + * compared to one for scalar: + * roll v, 13 // reliably fast across the board + * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason + * + * - Instruction level parallelism is actually more beneficial here because + * the SIMD actually serializes this operation: While v1 is rotating, v2 + * can load data, while v3 can multiply. SSE forces them to operate + * together. + * + * This is also enabled on AArch64, as Clang is *very aggressive* in + * vectorizing the loop. NEON is only faster on the A53, and with the newer + * cores, it is less than half the speed. + * + * Additionally, this is used on WASM SIMD128 because it JITs to the same + * SIMD instructions and has the same issue. + */ + XXH_COMPILER_GUARD(acc); + #endif + return acc; + } /*! @@ -2912,17 +3136,18 @@ static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) * @param hash The hash to avalanche. * @return The avalanched hash. */ -static xxh_u32 XXH32_avalanche(xxh_u32 hash) -{ - hash ^= hash >> 15; - hash *= XXH_PRIME32_2; - hash ^= hash >> 13; - hash *= XXH_PRIME32_3; - hash ^= hash >> 16; - return hash; +static xxh_u32 XXH32_avalanche(xxh_u32 hash) { + + hash ^= hash >> 15; + hash *= XXH_PRIME32_2; + hash ^= hash >> 13; + hash *= XXH_PRIME32_3; + hash ^= hash >> 16; + return hash; + } -#define XXH_get32bits(p) XXH_readLE32_align(p, align) + #define XXH_get32bits(p) XXH_readLE32_align(p, align) /*! * @internal @@ -2939,86 +3164,122 @@ static xxh_u32 XXH32_avalanche(xxh_u32 hash) * @return The finalized hash. * @see XXH64_finalize(). */ -static XXH_PUREF xxh_u32 -XXH32_finalize(xxh_u32 hash, const xxh_u8* ptr, size_t len, XXH_alignment align) -{ -#define XXH_PROCESS1 do { \ - hash += (*ptr++) * XXH_PRIME32_5; \ - hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1; \ -} while (0) - -#define XXH_PROCESS4 do { \ - hash += XXH_get32bits(ptr) * XXH_PRIME32_3; \ - ptr += 4; \ - hash = XXH_rotl32(hash, 17) * XXH_PRIME32_4; \ -} while (0) - - if (ptr==NULL) XXH_ASSERT(len == 0); - - /* Compact rerolled version; generally faster */ - if (!XXH32_ENDJMP) { - len &= 15; - while (len >= 4) { - XXH_PROCESS4; - len -= 4; - } - while (len > 0) { - XXH_PROCESS1; - --len; - } +static XXH_PUREF xxh_u32 XXH32_finalize(xxh_u32 hash, const xxh_u8 *ptr, + size_t len, XXH_alignment align) { +\ + #define XXH_PROCESS1 \ + do { \ + \ + hash += (*ptr++) * XXH_PRIME32_5; \ + hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1; \ + \ + } while (0) + + #define XXH_PROCESS4 \ + do { \ + \ + hash += XXH_get32bits(ptr) * XXH_PRIME32_3; \ + ptr += 4; \ + hash = XXH_rotl32(hash, 17) * XXH_PRIME32_4; \ + \ + } while (0) + + if (ptr == NULL) XXH_ASSERT(len == 0); + + /* Compact rerolled version; generally faster */ + if (!XXH32_ENDJMP) { + + len &= 15; + while (len >= 4) { + + XXH_PROCESS4; + len -= 4; + + } + + while (len > 0) { + + XXH_PROCESS1; + --len; + + } + + return XXH32_avalanche(hash); + + } else { + + switch (len & 15) /* or switch(bEnd - p) */ { + + case 12: + XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 8: + XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 4: + XXH_PROCESS4; return XXH32_avalanche(hash); - } else { - switch(len&15) /* or switch(bEnd - p) */ { - case 12: XXH_PROCESS4; - XXH_FALLTHROUGH; /* fallthrough */ - case 8: XXH_PROCESS4; - XXH_FALLTHROUGH; /* fallthrough */ - case 4: XXH_PROCESS4; - return XXH32_avalanche(hash); - - case 13: XXH_PROCESS4; - XXH_FALLTHROUGH; /* fallthrough */ - case 9: XXH_PROCESS4; - XXH_FALLTHROUGH; /* fallthrough */ - case 5: XXH_PROCESS4; - XXH_PROCESS1; - return XXH32_avalanche(hash); - - case 14: XXH_PROCESS4; - XXH_FALLTHROUGH; /* fallthrough */ - case 10: XXH_PROCESS4; - XXH_FALLTHROUGH; /* fallthrough */ - case 6: XXH_PROCESS4; - XXH_PROCESS1; - XXH_PROCESS1; - return XXH32_avalanche(hash); - - case 15: XXH_PROCESS4; - XXH_FALLTHROUGH; /* fallthrough */ - case 11: XXH_PROCESS4; - XXH_FALLTHROUGH; /* fallthrough */ - case 7: XXH_PROCESS4; - XXH_FALLTHROUGH; /* fallthrough */ - case 3: XXH_PROCESS1; - XXH_FALLTHROUGH; /* fallthrough */ - case 2: XXH_PROCESS1; - XXH_FALLTHROUGH; /* fallthrough */ - case 1: XXH_PROCESS1; - XXH_FALLTHROUGH; /* fallthrough */ - case 0: return XXH32_avalanche(hash); - } - XXH_ASSERT(0); - return hash; /* reaching this point is deemed impossible */ + + case 13: + XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 9: + XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 5: + XXH_PROCESS4; + XXH_PROCESS1; + return XXH32_avalanche(hash); + + case 14: + XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 10: + XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 6: + XXH_PROCESS4; + XXH_PROCESS1; + XXH_PROCESS1; + return XXH32_avalanche(hash); + + case 15: + XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 11: + XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 7: + XXH_PROCESS4; + XXH_FALLTHROUGH; /* fallthrough */ + case 3: + XXH_PROCESS1; + XXH_FALLTHROUGH; /* fallthrough */ + case 2: + XXH_PROCESS1; + XXH_FALLTHROUGH; /* fallthrough */ + case 1: + XXH_PROCESS1; + XXH_FALLTHROUGH; /* fallthrough */ + case 0: + return XXH32_avalanche(hash); + } + + XXH_ASSERT(0); + return hash; /* reaching this point is deemed impossible */ + + } + } -#ifdef XXH_OLD_NAMES -# define PROCESS1 XXH_PROCESS1 -# define PROCESS4 XXH_PROCESS4 -#else -# undef XXH_PROCESS1 -# undef XXH_PROCESS4 -#endif + #ifdef XXH_OLD_NAMES + #define PROCESS1 XXH_PROCESS1 + #define PROCESS4 XXH_PROCESS4 + #else + #undef XXH_PROCESS1 + #undef XXH_PROCESS4 + #endif /*! * @internal @@ -3028,372 +3289,459 @@ XXH32_finalize(xxh_u32 hash, const xxh_u8* ptr, size_t len, XXH_alignment align) * @param align Whether @p input is aligned. * @return The calculated hash. */ -XXH_FORCE_INLINE XXH_PUREF xxh_u32 -XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align) -{ - xxh_u32 h32; - - if (input==NULL) XXH_ASSERT(len == 0); - - if (len>=16) { - const xxh_u8* const bEnd = input + len; - const xxh_u8* const limit = bEnd - 15; - xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2; - xxh_u32 v2 = seed + XXH_PRIME32_2; - xxh_u32 v3 = seed + 0; - xxh_u32 v4 = seed - XXH_PRIME32_1; - - do { - v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4; - v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4; - v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4; - v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4; - } while (input < limit); - - h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) - + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); - } else { - h32 = seed + XXH_PRIME32_5; - } +XXH_FORCE_INLINE XXH_PUREF xxh_u32 XXH32_endian_align(const xxh_u8 *input, + size_t len, xxh_u32 seed, + XXH_alignment align) { - h32 += (xxh_u32)len; + xxh_u32 h32; - return XXH32_finalize(h32, input, len&15, align); -} + if (input == NULL) XXH_ASSERT(len == 0); -/*! @ingroup XXH32_family */ -XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed) -{ -#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2 - /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ - XXH32_state_t state; - XXH32_reset(&state, seed); - XXH32_update(&state, (const xxh_u8*)input, len); - return XXH32_digest(&state); -#else - if (XXH_FORCE_ALIGN_CHECK) { - if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ - return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); - } } - - return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); -#endif -} + if (len >= 16) { + const xxh_u8 *const bEnd = input + len; + const xxh_u8 *const limit = bEnd - 15; + xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2; + xxh_u32 v2 = seed + XXH_PRIME32_2; + xxh_u32 v3 = seed + 0; + xxh_u32 v4 = seed - XXH_PRIME32_1; + do { -/******* Hash streaming *******/ -#ifndef XXH_NO_STREAM -/*! @ingroup XXH32_family */ -XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) -{ - return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); -} -/*! @ingroup XXH32_family */ -XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) -{ - XXH_free(statePtr); - return XXH_OK; -} + v1 = XXH32_round(v1, XXH_get32bits(input)); + input += 4; + v2 = XXH32_round(v2, XXH_get32bits(input)); + input += 4; + v3 = XXH32_round(v3, XXH_get32bits(input)); + input += 4; + v4 = XXH32_round(v4, XXH_get32bits(input)); + input += 4; -/*! @ingroup XXH32_family */ -XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) -{ - XXH_memcpy(dstState, srcState, sizeof(*dstState)); -} + } while (input < limit); -/*! @ingroup XXH32_family */ -XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed) -{ - XXH_ASSERT(statePtr != NULL); - memset(statePtr, 0, sizeof(*statePtr)); - statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2; - statePtr->v[1] = seed + XXH_PRIME32_2; - statePtr->v[2] = seed + 0; - statePtr->v[3] = seed - XXH_PRIME32_1; - return XXH_OK; -} + h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + + XXH_rotl32(v4, 18); + } else { -/*! @ingroup XXH32_family */ -XXH_PUBLIC_API XXH_errorcode -XXH32_update(XXH32_state_t* state, const void* input, size_t len) -{ - if (input==NULL) { - XXH_ASSERT(len == 0); - return XXH_OK; - } + h32 = seed + XXH_PRIME32_5; - { const xxh_u8* p = (const xxh_u8*)input; - const xxh_u8* const bEnd = p + len; + } - state->total_len_32 += (XXH32_hash_t)len; - state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16)); + h32 += (xxh_u32)len; - if (state->memsize + len < 16) { /* fill in tmp buffer */ - XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len); - state->memsize += (XXH32_hash_t)len; - return XXH_OK; - } + return XXH32_finalize(h32, input, len & 15, align); - if (state->memsize) { /* some data left from previous update */ - XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize); - { const xxh_u32* p32 = state->mem32; - state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++; - state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++; - state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++; - state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32)); - } - p += 16-state->memsize; - state->memsize = 0; - } +} - if (p <= bEnd-16) { - const xxh_u8* const limit = bEnd - 16; +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_hash_t XXH32(const void *input, size_t len, + XXH32_hash_t seed) { + + #if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2 + /* Simple version, good for code maintenance, but unfortunately slow for small + * inputs */ + XXH32_state_t state; + XXH32_reset(&state, seed); + XXH32_update(&state, (const xxh_u8 *)input, len); + return XXH32_digest(&state); + #else + if (XXH_FORCE_ALIGN_CHECK) { + + if ((((size_t)input) & 3) == + 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ + return XXH32_endian_align((const xxh_u8 *)input, len, seed, XXH_aligned); - do { - state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4; - state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4; - state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4; - state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4; - } while (p<=limit); + } - } + } - if (p < bEnd) { - XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); - state->memsize = (unsigned)(bEnd-p); - } - } + return XXH32_endian_align((const xxh_u8 *)input, len, seed, XXH_unaligned); + #endif - return XXH_OK; } + /******* Hash streaming *******/ + #ifndef XXH_NO_STREAM +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_state_t *XXH32_createState(void) { + + return (XXH32_state_t *)XXH_malloc(sizeof(XXH32_state_t)); + +} /*! @ingroup XXH32_family */ -XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state) -{ - xxh_u32 h32; +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t *statePtr) { + + XXH_free(statePtr); + return XXH_OK; + +} + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t *dstState, + const XXH32_state_t *srcState) { + + XXH_memcpy(dstState, srcState, sizeof(*dstState)); + +} + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t *statePtr, + XXH32_hash_t seed) { + + XXH_ASSERT(statePtr != NULL); + memset(statePtr, 0, sizeof(*statePtr)); + statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2; + statePtr->v[1] = seed + XXH_PRIME32_2; + statePtr->v[2] = seed + 0; + statePtr->v[3] = seed - XXH_PRIME32_1; + return XXH_OK; + +} + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t *state, + const void *input, size_t len) { + + if (input == NULL) { + + XXH_ASSERT(len == 0); + return XXH_OK; + + } + + { + + const xxh_u8 *p = (const xxh_u8 *)input; + const xxh_u8 *const bEnd = p + len; + + state->total_len_32 += (XXH32_hash_t)len; + state->large_len |= + (XXH32_hash_t)((len >= 16) | (state->total_len_32 >= 16)); + + if (state->memsize + len < 16) { /* fill in tmp buffer */ + XXH_memcpy((xxh_u8 *)(state->mem32) + state->memsize, input, len); + state->memsize += (XXH32_hash_t)len; + return XXH_OK; + + } + + if (state->memsize) { /* some data left from previous update */ + XXH_memcpy((xxh_u8 *)(state->mem32) + state->memsize, input, + 16 - state->memsize); + { + + const xxh_u32 *p32 = state->mem32; + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); + p32++; + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); + p32++; + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); + p32++; + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32)); + + } + + p += 16 - state->memsize; + state->memsize = 0; - if (state->large_len) { - h32 = XXH_rotl32(state->v[0], 1) - + XXH_rotl32(state->v[1], 7) - + XXH_rotl32(state->v[2], 12) - + XXH_rotl32(state->v[3], 18); - } else { - h32 = state->v[2] /* == seed */ + XXH_PRIME32_5; } - h32 += state->total_len_32; + if (p <= bEnd - 16) { + + const xxh_u8 *const limit = bEnd - 16; + + do { + + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); + p += 4; + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); + p += 4; + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); + p += 4; + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); + p += 4; + + } while (p <= limit); + + } + + if (p < bEnd) { + + XXH_memcpy(state->mem32, p, (size_t)(bEnd - p)); + state->memsize = (unsigned)(bEnd - p); + + } + + } + + return XXH_OK; + +} + +/*! @ingroup XXH32_family */ +XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t *state) { + + xxh_u32 h32; + + if (state->large_len) { + + h32 = XXH_rotl32(state->v[0], 1) + XXH_rotl32(state->v[1], 7) + + XXH_rotl32(state->v[2], 12) + XXH_rotl32(state->v[3], 18); + + } else { + + h32 = state->v[2] /* == seed */ + XXH_PRIME32_5; + + } + + h32 += state->total_len_32; + + return XXH32_finalize(h32, (const xxh_u8 *)state->mem32, state->memsize, + XXH_aligned); - return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned); } -#endif /* !XXH_NO_STREAM */ + + #endif /* !XXH_NO_STREAM */ /******* Canonical representation *******/ /*! @ingroup XXH32_family */ -XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) -{ - XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); - if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); - XXH_memcpy(dst, &hash, sizeof(*dst)); +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t *dst, + XXH32_hash_t hash) { + + XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); + XXH_memcpy(dst, &hash, sizeof(*dst)); + } + /*! @ingroup XXH32_family */ -XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) -{ - return XXH_readBE32(src); -} +XXH_PUBLIC_API XXH32_hash_t +XXH32_hashFromCanonical(const XXH32_canonical_t *src) { + + return XXH_readBE32(src); +} -#ifndef XXH_NO_LONG_LONG + #ifndef XXH_NO_LONG_LONG /* ******************************************************************* -* 64-bit hash functions -*********************************************************************/ + * 64-bit hash functions + *********************************************************************/ /*! * @} * @ingroup impl * @{ + */ /******* Memory access *******/ typedef XXH64_hash_t xxh_u64; -#ifdef XXH_OLD_NAMES -# define U64 xxh_u64 -#endif + #ifdef XXH_OLD_NAMES + #define U64 xxh_u64 + #endif -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) -/* - * Manual byteshift. Best for old compilers which don't inline memcpy. - * We actually directly use XXH_readLE64 and XXH_readBE64. - */ -#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 3)) + /* + * Manual byteshift. Best for old compilers which don't inline memcpy. + * We actually directly use XXH_readLE64 and XXH_readBE64. + */ + #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 2)) + +/* Force direct memory access. Only works on CPU which support unaligned memory + * access in hardware */ +static xxh_u64 XXH_read64(const void *memPtr) { + + return *(const xxh_u64 *)memPtr; -/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ -static xxh_u64 XXH_read64(const void* memPtr) -{ - return *(const xxh_u64*) memPtr; } -#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 1)) + + /* + * __attribute__((aligned(1))) is supported by gcc and clang. Originally + * the documentation claimed that it only increased the alignment, but + * actually it can decrease it on gcc, clang, and icc: + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, + * https://gcc.godbolt.org/z/xYez1j67Y. + */ + #ifdef XXH_OLD_NAMES +typedef union { + + xxh_u32 u32; + xxh_u64 u64; + +} __attribute__((packed)) unalign64; + + #endif +static xxh_u64 XXH_read64(const void *ptr) { + + typedef __attribute__((aligned(1))) xxh_u64 xxh_unalign64; + return *((const xxh_unalign64 *)ptr); -/* - * __attribute__((aligned(1))) is supported by gcc and clang. Originally the - * documentation claimed that it only increased the alignment, but actually it - * can decrease it on gcc, clang, and icc: - * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, - * https://gcc.godbolt.org/z/xYez1j67Y. - */ -#ifdef XXH_OLD_NAMES -typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64; -#endif -static xxh_u64 XXH_read64(const void* ptr) -{ - typedef __attribute__((aligned(1))) xxh_u64 xxh_unalign64; - return *((const xxh_unalign64*)ptr); } -#else + #else /* * Portable and safe solution. Generally efficient. - * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + * see: + * https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html */ -static xxh_u64 XXH_read64(const void* memPtr) -{ - xxh_u64 val; - XXH_memcpy(&val, memPtr, sizeof(val)); - return val; +static xxh_u64 XXH_read64(const void *memPtr) { + + xxh_u64 val; + XXH_memcpy(&val, memPtr, sizeof(val)); + return val; + } -#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + + #if defined(_MSC_VER) /* Visual Studio */ + #define XXH_swap64 _byteswap_uint64 + #elif XXH_GCC_VERSION >= 403 + #define XXH_swap64 __builtin_bswap64 + #else +static xxh_u64 XXH_swap64(xxh_u64 x) { + + return ((x << 56) & 0xff00000000000000ULL) | + ((x << 40) & 0x00ff000000000000ULL) | + ((x << 24) & 0x0000ff0000000000ULL) | + ((x << 8) & 0x000000ff00000000ULL) | + ((x >> 8) & 0x00000000ff000000ULL) | + ((x >> 24) & 0x0000000000ff0000ULL) | + ((x >> 40) & 0x000000000000ff00ULL) | + ((x >> 56) & 0x00000000000000ffULL); -#if defined(_MSC_VER) /* Visual Studio */ -# define XXH_swap64 _byteswap_uint64 -#elif XXH_GCC_VERSION >= 403 -# define XXH_swap64 __builtin_bswap64 -#else -static xxh_u64 XXH_swap64(xxh_u64 x) -{ - return ((x << 56) & 0xff00000000000000ULL) | - ((x << 40) & 0x00ff000000000000ULL) | - ((x << 24) & 0x0000ff0000000000ULL) | - ((x << 8) & 0x000000ff00000000ULL) | - ((x >> 8) & 0x00000000ff000000ULL) | - ((x >> 24) & 0x0000000000ff0000ULL) | - ((x >> 40) & 0x000000000000ff00ULL) | - ((x >> 56) & 0x00000000000000ffULL); } -#endif + #endif -/* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */ -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) + /* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */ + #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 3)) + +XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void *memPtr) { + + const xxh_u8 *bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[0] | ((xxh_u64)bytePtr[1] << 8) | ((xxh_u64)bytePtr[2] << 16) | + ((xxh_u64)bytePtr[3] << 24) | ((xxh_u64)bytePtr[4] << 32) | + ((xxh_u64)bytePtr[5] << 40) | ((xxh_u64)bytePtr[6] << 48) | + ((xxh_u64)bytePtr[7] << 56); -XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr) -{ - const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; - return bytePtr[0] - | ((xxh_u64)bytePtr[1] << 8) - | ((xxh_u64)bytePtr[2] << 16) - | ((xxh_u64)bytePtr[3] << 24) - | ((xxh_u64)bytePtr[4] << 32) - | ((xxh_u64)bytePtr[5] << 40) - | ((xxh_u64)bytePtr[6] << 48) - | ((xxh_u64)bytePtr[7] << 56); -} - -XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr) -{ - const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; - return bytePtr[7] - | ((xxh_u64)bytePtr[6] << 8) - | ((xxh_u64)bytePtr[5] << 16) - | ((xxh_u64)bytePtr[4] << 24) - | ((xxh_u64)bytePtr[3] << 32) - | ((xxh_u64)bytePtr[2] << 40) - | ((xxh_u64)bytePtr[1] << 48) - | ((xxh_u64)bytePtr[0] << 56); -} - -#else -XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr) -{ - return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); } -static xxh_u64 XXH_readBE64(const void* ptr) -{ - return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); +XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void *memPtr) { + + const xxh_u8 *bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[7] | ((xxh_u64)bytePtr[6] << 8) | ((xxh_u64)bytePtr[5] << 16) | + ((xxh_u64)bytePtr[4] << 24) | ((xxh_u64)bytePtr[3] << 32) | + ((xxh_u64)bytePtr[2] << 40) | ((xxh_u64)bytePtr[1] << 48) | + ((xxh_u64)bytePtr[0] << 56); + } -#endif -XXH_FORCE_INLINE xxh_u64 -XXH_readLE64_align(const void* ptr, XXH_alignment align) -{ - if (align==XXH_unaligned) - return XXH_readLE64(ptr); - else - return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr); + #else +XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void *ptr) { + + return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); + } +static xxh_u64 XXH_readBE64(const void *ptr) { -/******* xxh64 *******/ -/*! - * @} - * @defgroup XXH64_impl XXH64 implementation - * @ingroup impl - * - * Details on the XXH64 implementation. - * @{ - */ -/* #define rather that static const, to be used as initializers */ -#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */ -#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */ -#define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */ -#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */ -#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */ - -#ifdef XXH_OLD_NAMES -# define PRIME64_1 XXH_PRIME64_1 -# define PRIME64_2 XXH_PRIME64_2 -# define PRIME64_3 XXH_PRIME64_3 -# define PRIME64_4 XXH_PRIME64_4 -# define PRIME64_5 XXH_PRIME64_5 -#endif + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); + +} + + #endif + +XXH_FORCE_INLINE xxh_u64 XXH_readLE64_align(const void *ptr, + XXH_alignment align) { + + if (align == XXH_unaligned) + return XXH_readLE64(ptr); + else + return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64 *)ptr + : XXH_swap64(*(const xxh_u64 *)ptr); + +} + + /******* xxh64 *******/ + /*! + * @} + * @defgroup XXH64_impl XXH64 implementation + * @ingroup impl + * + * Details on the XXH64 implementation. + * @{ + + */ + /* #define rather that static const, to be used as initializers */ + #define XXH_PRIME64_1 \ + 0x9E3779B185EBCA87ULL /*!< \ + 0b1001111000110111011110011011000110000101111010111100101010000111 \ + */ + #define XXH_PRIME64_2 \ + 0xC2B2AE3D27D4EB4FULL /*!< \ + 0b1100001010110010101011100011110100100111110101001110101101001111 \ + */ + #define XXH_PRIME64_3 \ + 0x165667B19E3779F9ULL /*!< \ + 0b0001011001010110011001111011000110011110001101110111100111111001 \ + */ + #define XXH_PRIME64_4 \ + 0x85EBCA77C2B2AE63ULL /*!< \ + 0b1000010111101011110010100111011111000010101100101010111001100011 \ + */ + #define XXH_PRIME64_5 \ + 0x27D4EB2F165667C5ULL /*!< \ + 0b0010011111010100111010110010111100010110010101100110011111000101 \ + */ + + #ifdef XXH_OLD_NAMES + #define PRIME64_1 XXH_PRIME64_1 + #define PRIME64_2 XXH_PRIME64_2 + #define PRIME64_3 XXH_PRIME64_3 + #define PRIME64_4 XXH_PRIME64_4 + #define PRIME64_5 XXH_PRIME64_5 + #endif /*! @copydoc XXH32_round */ -static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input) -{ - acc += input * XXH_PRIME64_2; - acc = XXH_rotl64(acc, 31); - acc *= XXH_PRIME64_1; - return acc; +static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input) { + + acc += input * XXH_PRIME64_2; + acc = XXH_rotl64(acc, 31); + acc *= XXH_PRIME64_1; + return acc; + } -static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val) -{ - val = XXH64_round(0, val); - acc ^= val; - acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4; - return acc; +static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val) { + + val = XXH64_round(0, val); + acc ^= val; + acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4; + return acc; + } /*! @copydoc XXH32_avalanche */ -static xxh_u64 XXH64_avalanche(xxh_u64 hash) -{ - hash ^= hash >> 33; - hash *= XXH_PRIME64_2; - hash ^= hash >> 29; - hash *= XXH_PRIME64_3; - hash ^= hash >> 32; - return hash; -} +static xxh_u64 XXH64_avalanche(xxh_u64 hash) { + + hash ^= hash >> 33; + hash *= XXH_PRIME64_2; + hash ^= hash >> 29; + hash *= XXH_PRIME64_3; + hash ^= hash >> 32; + return hash; +} -#define XXH_get64bits(p) XXH_readLE64_align(p, align) + #define XXH_get64bits(p) XXH_readLE64_align(p, align) /*! * @internal @@ -3410,41 +3758,51 @@ static xxh_u64 XXH64_avalanche(xxh_u64 hash) * @return The finalized hash * @see XXH32_finalize(). */ -static XXH_PUREF xxh_u64 -XXH64_finalize(xxh_u64 hash, const xxh_u8* ptr, size_t len, XXH_alignment align) -{ - if (ptr==NULL) XXH_ASSERT(len == 0); - len &= 31; - while (len >= 8) { - xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); - ptr += 8; - hash ^= k1; - hash = XXH_rotl64(hash,27) * XXH_PRIME64_1 + XXH_PRIME64_4; - len -= 8; - } - if (len >= 4) { - hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1; - ptr += 4; - hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; - len -= 4; - } - while (len > 0) { - hash ^= (*ptr++) * XXH_PRIME64_5; - hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1; - --len; - } - return XXH64_avalanche(hash); +static XXH_PUREF xxh_u64 XXH64_finalize(xxh_u64 hash, const xxh_u8 *ptr, + size_t len, XXH_alignment align) { + + if (ptr == NULL) XXH_ASSERT(len == 0); + len &= 31; + while (len >= 8) { + + xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); + ptr += 8; + hash ^= k1; + hash = XXH_rotl64(hash, 27) * XXH_PRIME64_1 + XXH_PRIME64_4; + len -= 8; + + } + + if (len >= 4) { + + hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1; + ptr += 4; + hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; + len -= 4; + + } + + while (len > 0) { + + hash ^= (*ptr++) * XXH_PRIME64_5; + hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1; + --len; + + } + + return XXH64_avalanche(hash); + } -#ifdef XXH_OLD_NAMES -# define PROCESS1_64 XXH_PROCESS1_64 -# define PROCESS4_64 XXH_PROCESS4_64 -# define PROCESS8_64 XXH_PROCESS8_64 -#else -# undef XXH_PROCESS1_64 -# undef XXH_PROCESS4_64 -# undef XXH_PROCESS8_64 -#endif + #ifdef XXH_OLD_NAMES + #define PROCESS1_64 XXH_PROCESS1_64 + #define PROCESS4_64 XXH_PROCESS4_64 + #define PROCESS8_64 XXH_PROCESS8_64 + #else + #undef XXH_PROCESS1_64 + #undef XXH_PROCESS4_64 + #undef XXH_PROCESS8_64 + #endif /*! * @internal @@ -3454,349 +3812,416 @@ XXH64_finalize(xxh_u64 hash, const xxh_u8* ptr, size_t len, XXH_alignment align) * @param align Whether @p input is aligned. * @return The calculated hash. */ -XXH_FORCE_INLINE XXH_PUREF xxh_u64 -XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align) -{ - xxh_u64 h64; - if (input==NULL) XXH_ASSERT(len == 0); - - if (len>=32) { - const xxh_u8* const bEnd = input + len; - const xxh_u8* const limit = bEnd - 31; - xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2; - xxh_u64 v2 = seed + XXH_PRIME64_2; - xxh_u64 v3 = seed + 0; - xxh_u64 v4 = seed - XXH_PRIME64_1; - - do { - v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8; - v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8; - v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8; - v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8; - } while (input= 32) { - return XXH64_finalize(h64, input, len, align); -} + const xxh_u8 *const bEnd = input + len; + const xxh_u8 *const limit = bEnd - 31; + xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2; + xxh_u64 v2 = seed + XXH_PRIME64_2; + xxh_u64 v3 = seed + 0; + xxh_u64 v4 = seed - XXH_PRIME64_1; + + do { + + v1 = XXH64_round(v1, XXH_get64bits(input)); + input += 8; + v2 = XXH64_round(v2, XXH_get64bits(input)); + input += 8; + v3 = XXH64_round(v3, XXH_get64bits(input)); + input += 8; + v4 = XXH64_round(v4, XXH_get64bits(input)); + input += 8; + + } while (input < limit); + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + + } else { + + h64 = seed + XXH_PRIME64_5; + + } + + h64 += (xxh_u64)len; + return XXH64_finalize(h64, input, len, align); + +} /*! @ingroup XXH64_family */ -XXH_PUBLIC_API XXH64_hash_t XXH64 (XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed) -{ -#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2 - /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ - XXH64_state_t state; - XXH64_reset(&state, seed); - XXH64_update(&state, (const xxh_u8*)input, len); - return XXH64_digest(&state); -#else - if (XXH_FORCE_ALIGN_CHECK) { - if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ - return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); - } } - - return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); +XXH_PUBLIC_API XXH64_hash_t XXH64(XXH_NOESCAPE const void *input, size_t len, + XXH64_hash_t seed) { + + #if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2 + /* Simple version, good for code maintenance, but unfortunately slow for small + * inputs */ + XXH64_state_t state; + XXH64_reset(&state, seed); + XXH64_update(&state, (const xxh_u8 *)input, len); + return XXH64_digest(&state); + #else + if (XXH_FORCE_ALIGN_CHECK) { + + if ((((size_t)input) & 7) == + 0) { /* Input is aligned, let's leverage the speed advantage */ + return XXH64_endian_align((const xxh_u8 *)input, len, seed, XXH_aligned); + + } + + } + + return XXH64_endian_align((const xxh_u8 *)input, len, seed, XXH_unaligned); + + #endif -#endif } -/******* Hash Streaming *******/ -#ifndef XXH_NO_STREAM + /******* Hash Streaming *******/ + #ifndef XXH_NO_STREAM /*! @ingroup XXH64_family*/ -XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) -{ - return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); +XXH_PUBLIC_API XXH64_state_t *XXH64_createState(void) { + + return (XXH64_state_t *)XXH_malloc(sizeof(XXH64_state_t)); + } + /*! @ingroup XXH64_family */ -XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) -{ - XXH_free(statePtr); - return XXH_OK; +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t *statePtr) { + + XXH_free(statePtr); + return XXH_OK; + } /*! @ingroup XXH64_family */ -XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dstState, const XXH64_state_t* srcState) -{ - XXH_memcpy(dstState, srcState, sizeof(*dstState)); +XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t *dstState, + const XXH64_state_t *srcState) { + + XXH_memcpy(dstState, srcState, sizeof(*dstState)); + } /*! @ingroup XXH64_family */ -XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed) -{ - XXH_ASSERT(statePtr != NULL); - memset(statePtr, 0, sizeof(*statePtr)); - statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2; - statePtr->v[1] = seed + XXH_PRIME64_2; - statePtr->v[2] = seed + 0; - statePtr->v[3] = seed - XXH_PRIME64_1; - return XXH_OK; +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH_NOESCAPE XXH64_state_t *statePtr, + XXH64_hash_t seed) { + + XXH_ASSERT(statePtr != NULL); + memset(statePtr, 0, sizeof(*statePtr)); + statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2; + statePtr->v[1] = seed + XXH_PRIME64_2; + statePtr->v[2] = seed + 0; + statePtr->v[3] = seed - XXH_PRIME64_1; + return XXH_OK; + } /*! @ingroup XXH64_family */ -XXH_PUBLIC_API XXH_errorcode -XXH64_update (XXH_NOESCAPE XXH64_state_t* state, XXH_NOESCAPE const void* input, size_t len) -{ - if (input==NULL) { - XXH_ASSERT(len == 0); - return XXH_OK; - } +XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH_NOESCAPE XXH64_state_t *state, + XXH_NOESCAPE const void *input, + size_t len) { - { const xxh_u8* p = (const xxh_u8*)input; - const xxh_u8* const bEnd = p + len; + if (input == NULL) { - state->total_len += len; + XXH_ASSERT(len == 0); + return XXH_OK; - if (state->memsize + len < 32) { /* fill in tmp buffer */ - XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len); - state->memsize += (xxh_u32)len; - return XXH_OK; - } + } - if (state->memsize) { /* tmp buffer is full */ - XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize); - state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0)); - state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1)); - state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2)); - state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3)); - p += 32 - state->memsize; - state->memsize = 0; - } + { - if (p+32 <= bEnd) { - const xxh_u8* const limit = bEnd - 32; + const xxh_u8 *p = (const xxh_u8 *)input; + const xxh_u8 *const bEnd = p + len; - do { - state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8; - state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8; - state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8; - state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8; - } while (p<=limit); + state->total_len += len; - } + if (state->memsize + len < 32) { /* fill in tmp buffer */ + XXH_memcpy(((xxh_u8 *)state->mem64) + state->memsize, input, len); + state->memsize += (xxh_u32)len; + return XXH_OK; - if (p < bEnd) { - XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); - state->memsize = (unsigned)(bEnd-p); - } } - return XXH_OK; -} + if (state->memsize) { /* tmp buffer is full */ + XXH_memcpy(((xxh_u8 *)state->mem64) + state->memsize, input, + 32 - state->memsize); + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64 + 0)); + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64 + 1)); + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64 + 2)); + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64 + 3)); + p += 32 - state->memsize; + state->memsize = 0; + } + + if (p + 32 <= bEnd) { + + const xxh_u8 *const limit = bEnd - 32; + + do { + + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); + p += 8; + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); + p += 8; + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); + p += 8; + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); + p += 8; + + } while (p <= limit); -/*! @ingroup XXH64_family */ -XXH_PUBLIC_API XXH64_hash_t XXH64_digest(XXH_NOESCAPE const XXH64_state_t* state) -{ - xxh_u64 h64; - - if (state->total_len >= 32) { - h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18); - h64 = XXH64_mergeRound(h64, state->v[0]); - h64 = XXH64_mergeRound(h64, state->v[1]); - h64 = XXH64_mergeRound(h64, state->v[2]); - h64 = XXH64_mergeRound(h64, state->v[3]); - } else { - h64 = state->v[2] /*seed*/ + XXH_PRIME64_5; } - h64 += (xxh_u64) state->total_len; + if (p < bEnd) { - return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned); -} -#endif /* !XXH_NO_STREAM */ + XXH_memcpy(state->mem64, p, (size_t)(bEnd - p)); + state->memsize = (unsigned)(bEnd - p); -/******* Canonical representation *******/ + } + + } + + return XXH_OK; -/*! @ingroup XXH64_family */ -XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash) -{ - XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); - if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); - XXH_memcpy(dst, &hash, sizeof(*dst)); } /*! @ingroup XXH64_family */ -XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src) -{ - return XXH_readBE64(src); +XXH_PUBLIC_API XXH64_hash_t +XXH64_digest(XXH_NOESCAPE const XXH64_state_t *state) { + + xxh_u64 h64; + + if (state->total_len >= 32) { + + h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18); + h64 = XXH64_mergeRound(h64, state->v[0]); + h64 = XXH64_mergeRound(h64, state->v[1]); + h64 = XXH64_mergeRound(h64, state->v[2]); + h64 = XXH64_mergeRound(h64, state->v[3]); + + } else { + + h64 = state->v[2] /*seed*/ + XXH_PRIME64_5; + + } + + h64 += (xxh_u64)state->total_len; + + return XXH64_finalize(h64, (const xxh_u8 *)state->mem64, + (size_t)state->total_len, XXH_aligned); + } -#ifndef XXH_NO_XXH3 + #endif /* !XXH_NO_STREAM */ -/* ********************************************************************* -* XXH3 -* New generation hash designed for speed on small keys and vectorization -************************************************************************ */ -/*! - * @} - * @defgroup XXH3_impl XXH3 implementation - * @ingroup impl - * @{ - */ +/******* Canonical representation *******/ -/* === Compiler specifics === */ +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t *dst, + XXH64_hash_t hash) { -#if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */ -# define XXH_RESTRICT /* disable */ -#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */ -# define XXH_RESTRICT restrict -#elif (defined (__GNUC__) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))) \ - || (defined (__clang__)) \ - || (defined (_MSC_VER) && (_MSC_VER >= 1400)) \ - || (defined (__INTEL_COMPILER) && (__INTEL_COMPILER >= 1300)) -/* - * There are a LOT more compilers that recognize __restrict but this - * covers the major ones. - */ -# define XXH_RESTRICT __restrict -#else -# define XXH_RESTRICT /* disable */ -#endif + XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); + XXH_memcpy(dst, &hash, sizeof(*dst)); -#if (defined(__GNUC__) && (__GNUC__ >= 3)) \ - || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \ - || defined(__clang__) -# define XXH_likely(x) __builtin_expect(x, 1) -# define XXH_unlikely(x) __builtin_expect(x, 0) -#else -# define XXH_likely(x) (x) -# define XXH_unlikely(x) (x) -#endif +} -#ifndef XXH_HAS_INCLUDE -# ifdef __has_include -/* - * Not defined as XXH_HAS_INCLUDE(x) (function-like) because - * this causes segfaults in Apple Clang 4.2 (on Mac OS X 10.7 Lion) - */ -# define XXH_HAS_INCLUDE __has_include -# else -# define XXH_HAS_INCLUDE(x) 0 -# endif -#endif +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH64_hash_t +XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t *src) { -#if defined(__GNUC__) || defined(__clang__) -# if defined(__ARM_FEATURE_SVE) -# include -# endif -# if defined(__ARM_NEON__) || defined(__ARM_NEON) \ - || (defined(_M_ARM) && _M_ARM >= 7) \ - || defined(_M_ARM64) || defined(_M_ARM64EC) \ - || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE()) /* WASM SIMD128 via SIMDe */ -# define inline __inline__ /* circumvent a clang bug */ -# include -# undef inline -# elif defined(__AVX2__) -# include -# elif defined(__SSE2__) -# include -# endif -#endif + return XXH_readBE64(src); -#if defined(_MSC_VER) -# include -#endif +} -/* - * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while - * remaining a true 64-bit/128-bit hash function. - * - * This is done by prioritizing a subset of 64-bit operations that can be - * emulated without too many steps on the average 32-bit machine. - * - * For example, these two lines seem similar, and run equally fast on 64-bit: - * - * xxh_u64 x; - * x ^= (x >> 47); // good - * x ^= (x >> 13); // bad - * - * However, to a 32-bit machine, there is a major difference. - * - * x ^= (x >> 47) looks like this: - * - * x.lo ^= (x.hi >> (47 - 32)); - * - * while x ^= (x >> 13) looks like this: - * - * // note: funnel shifts are not usually cheap. - * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13)); - * x.hi ^= (x.hi >> 13); - * - * The first one is significantly faster than the second, simply because the - * shift is larger than 32. This means: - * - All the bits we need are in the upper 32 bits, so we can ignore the lower - * 32 bits in the shift. - * - The shift result will always fit in the lower 32 bits, and therefore, - * we can ignore the upper 32 bits in the xor. - * - * Thanks to this optimization, XXH3 only requires these features to be efficient: - * - * - Usable unaligned access - * - A 32-bit or 64-bit ALU - * - If 32-bit, a decent ADC instruction - * - A 32 or 64-bit multiply with a 64-bit result - * - For the 128-bit variant, a decent byteswap helps short inputs. - * - * The first two are already required by XXH32, and almost all 32-bit and 64-bit - * platforms which can run XXH32 can run XXH3 efficiently. - * - * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one - * notable exception. - * - * First of all, Thumb-1 lacks support for the UMULL instruction which - * performs the important long multiply. This means numerous __aeabi_lmul - * calls. - * - * Second of all, the 8 functional registers are just not enough. - * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need - * Lo registers, and this shuffling results in thousands more MOVs than A32. - * - * A32 and T32 don't have this limitation. They can access all 14 registers, - * do a 32->64 multiply with UMULL, and the flexible operand allowing free - * shifts is helpful, too. - * - * Therefore, we do a quick sanity check. - * - * If compiling Thumb-1 for a target which supports ARM instructions, we will - * emit a warning, as it is not a "sane" platform to compile for. - * - * Usually, if this happens, it is because of an accident and you probably need - * to specify -march, as you likely meant to compile for a newer architecture. - * - * Credit: large sections of the vectorial and asm source code paths - * have been contributed by @easyaspi314 - */ -#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM) -# warning "XXH3 is highly inefficient without ARM or Thumb-2." -#endif + #ifndef XXH_NO_XXH3 -/* ========================================== - * Vectorization detection - * ========================================== */ + /* ********************************************************************* + * XXH3 + * New generation hash designed for speed on small keys and vectorization + ************************************************************************ */ + /*! + * @} + * @defgroup XXH3_impl XXH3 implementation + * @ingroup impl + * @{ -#ifdef XXH_DOXYGEN -/*! - * @ingroup tuning - * @brief Overrides the vectorization implementation chosen for XXH3. - * - * Can be defined to 0 to disable SIMD or any of the values mentioned in - * @ref XXH_VECTOR_TYPE. - * - * If this is not defined, it uses predefined macros to determine the best - * implementation. - */ -# define XXH_VECTOR XXH_SCALAR + */ + + /* === Compiler specifics === */ + + #if ((defined(sun) || defined(__sun)) && \ + __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested \ + with GCC 5.5 */ + #define XXH_RESTRICT /* disable */ + #elif defined(__STDC_VERSION__) && \ + __STDC_VERSION__ >= 199901L /* >= C99 */ + #define XXH_RESTRICT restrict + #elif (defined(__GNUC__) && \ + ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))) || \ + (defined(__clang__)) || (defined(_MSC_VER) && (_MSC_VER >= 1400)) || \ + (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1300)) + /* + * There are a LOT more compilers that recognize __restrict but this + * covers the major ones. + */ + #define XXH_RESTRICT __restrict + #else + #define XXH_RESTRICT /* disable */ + #endif + + #if (defined(__GNUC__) && (__GNUC__ >= 3)) || \ + (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || \ + defined(__clang__) + #define XXH_likely(x) __builtin_expect(x, 1) + #define XXH_unlikely(x) __builtin_expect(x, 0) + #else + #define XXH_likely(x) (x) + #define XXH_unlikely(x) (x) + #endif + + #ifndef XXH_HAS_INCLUDE + #ifdef __has_include + /* + * Not defined as XXH_HAS_INCLUDE(x) (function-like) because + * this causes segfaults in Apple Clang 4.2 (on Mac OS X 10.7 Lion) + */ + #define XXH_HAS_INCLUDE __has_include + #else + #define XXH_HAS_INCLUDE(x) 0 + #endif + #endif + + #if defined(__GNUC__) || defined(__clang__) + #if defined(__ARM_FEATURE_SVE) + #include + #endif + #if defined(__ARM_NEON__) || defined(__ARM_NEON) || \ + (defined(_M_ARM) && _M_ARM >= 7) || defined(_M_ARM64) || \ + defined(_M_ARM64EC) || \ + (defined(__wasm_simd128__) && \ + XXH_HAS_INCLUDE()) /* WASM SIMD128 via SIMDe */ + #define inline __inline__ /* circumvent a clang bug */ + #include + #undef inline + #elif defined(__AVX2__) + #include + #elif defined(__SSE2__) + #include + #endif + #endif + + #if defined(_MSC_VER) + #include + #endif + + /* + * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while + * remaining a true 64-bit/128-bit hash function. + * + * This is done by prioritizing a subset of 64-bit operations that can be + * emulated without too many steps on the average 32-bit machine. + * + * For example, these two lines seem similar, and run equally fast on + * 64-bit: + * + * xxh_u64 x; + * x ^= (x >> 47); // good + * x ^= (x >> 13); // bad + * + * However, to a 32-bit machine, there is a major difference. + * + * x ^= (x >> 47) looks like this: + * + * x.lo ^= (x.hi >> (47 - 32)); + * + * while x ^= (x >> 13) looks like this: + * + * // note: funnel shifts are not usually cheap. + * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13)); + * x.hi ^= (x.hi >> 13); + * + * The first one is significantly faster than the second, simply because + * the shift is larger than 32. This means: + * - All the bits we need are in the upper 32 bits, so we can ignore the + * lower 32 bits in the shift. + * - The shift result will always fit in the lower 32 bits, and + * therefore, we can ignore the upper 32 bits in the xor. + * + * Thanks to this optimization, XXH3 only requires these features to be + * efficient: + * + * - Usable unaligned access + * - A 32-bit or 64-bit ALU + * - If 32-bit, a decent ADC instruction + * - A 32 or 64-bit multiply with a 64-bit result + * - For the 128-bit variant, a decent byteswap helps short inputs. + * + * The first two are already required by XXH32, and almost all 32-bit and + * 64-bit platforms which can run XXH32 can run XXH3 efficiently. + * + * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is + * one notable exception. + * + * First of all, Thumb-1 lacks support for the UMULL instruction which + * performs the important long multiply. This means numerous __aeabi_lmul + * calls. + * + * Second of all, the 8 functional registers are just not enough. + * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic + * need Lo registers, and this shuffling results in thousands more MOVs + * than A32. + * + * A32 and T32 don't have this limitation. They can access all 14 + * registers, do a 32->64 multiply with UMULL, and the flexible operand + * allowing free shifts is helpful, too. + * + * Therefore, we do a quick sanity check. + * + * If compiling Thumb-1 for a target which supports ARM instructions, we + * will emit a warning, as it is not a "sane" platform to compile for. + * + * Usually, if this happens, it is because of an accident and you probably + * need to specify -march, as you likely meant to compile for a newer + * architecture. + * + * Credit: large sections of the vectorial and asm source code paths + * have been contributed by @easyaspi314 + */ + #if defined(__thumb__) && !defined(__thumb2__) && \ + defined(__ARM_ARCH_ISA_ARM) + #warning "XXH3 is highly inefficient without ARM or Thumb-2." + #endif + + /* ========================================== + * Vectorization detection + * ========================================== */ + + #ifdef XXH_DOXYGEN + /*! + * @ingroup tuning + * @brief Overrides the vectorization implementation chosen for XXH3. + * + * Can be defined to 0 to disable SIMD or any of the values mentioned in + * @ref XXH_VECTOR_TYPE. + * + * If this is not defined, it uses predefined macros to determine the + * best implementation. + */ + #define XXH_VECTOR XXH_SCALAR /*! * @ingroup tuning * @brief Possible values for @ref XXH_VECTOR. @@ -3807,491 +4232,560 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_can * internal macro XXH_X86DISPATCH overrides this. */ enum XXH_VECTOR_TYPE /* fake enum */ { - XXH_SCALAR = 0, /*!< Portable scalar version */ - XXH_SSE2 = 1, /*!< - * SSE2 for Pentium 4, Opteron, all x86_64. - * - * @note SSE2 is also guaranteed on Windows 10, macOS, and - * Android x86. - */ - XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */ - XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */ - XXH_NEON = 4, /*!< - * NEON for most ARMv7-A, all AArch64, and WASM SIMD128 - * via the SIMDeverywhere polyfill provided with the - * Emscripten SDK. - */ - XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */ - XXH_SVE = 6, /*!< SVE for some ARMv8-A and ARMv9-A */ -}; -/*! - * @ingroup tuning - * @brief Selects the minimum alignment for XXH3's accumulators. - * - * When using SIMD, this should match the alignment required for said vector - * type, so, for example, 32 for AVX2. - * - * Default: Auto detected. - */ -# define XXH_ACC_ALIGN 8 -#endif - -/* Actual definition */ -#ifndef XXH_DOXYGEN -# define XXH_SCALAR 0 -# define XXH_SSE2 1 -# define XXH_AVX2 2 -# define XXH_AVX512 3 -# define XXH_NEON 4 -# define XXH_VSX 5 -# define XXH_SVE 6 -#endif - -#ifndef XXH_VECTOR /* can be defined on command line */ -# if defined(__ARM_FEATURE_SVE) -# define XXH_VECTOR XXH_SVE -# elif ( \ - defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \ - || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \ - || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE()) /* wasm simd128 via SIMDe */ \ - ) && ( \ - defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \ - || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \ - ) -# define XXH_VECTOR XXH_NEON -# elif defined(__AVX512F__) -# define XXH_VECTOR XXH_AVX512 -# elif defined(__AVX2__) -# define XXH_VECTOR XXH_AVX2 -# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2)) -# define XXH_VECTOR XXH_SSE2 -# elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \ - || (defined(__s390x__) && defined(__VEC__)) \ - && defined(__GNUC__) /* TODO: IBM XL */ -# define XXH_VECTOR XXH_VSX -# else -# define XXH_VECTOR XXH_SCALAR -# endif -#endif - -/* __ARM_FEATURE_SVE is only supported by GCC & Clang. */ -#if (XXH_VECTOR == XXH_SVE) && !defined(__ARM_FEATURE_SVE) -# ifdef _MSC_VER -# pragma warning(once : 4606) -# else -# warning "__ARM_FEATURE_SVE isn't supported. Use SCALAR instead." -# endif -# undef XXH_VECTOR -# define XXH_VECTOR XXH_SCALAR -#endif - -/* - * Controls the alignment of the accumulator, - * for compatibility with aligned vector loads, which are usually faster. - */ -#ifndef XXH_ACC_ALIGN -# if defined(XXH_X86DISPATCH) -# define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */ -# elif XXH_VECTOR == XXH_SCALAR /* scalar */ -# define XXH_ACC_ALIGN 8 -# elif XXH_VECTOR == XXH_SSE2 /* sse2 */ -# define XXH_ACC_ALIGN 16 -# elif XXH_VECTOR == XXH_AVX2 /* avx2 */ -# define XXH_ACC_ALIGN 32 -# elif XXH_VECTOR == XXH_NEON /* neon */ -# define XXH_ACC_ALIGN 16 -# elif XXH_VECTOR == XXH_VSX /* vsx */ -# define XXH_ACC_ALIGN 16 -# elif XXH_VECTOR == XXH_AVX512 /* avx512 */ -# define XXH_ACC_ALIGN 64 -# elif XXH_VECTOR == XXH_SVE /* sve */ -# define XXH_ACC_ALIGN 64 -# endif -#endif -#if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \ - || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512 -# define XXH_SEC_ALIGN XXH_ACC_ALIGN -#elif XXH_VECTOR == XXH_SVE -# define XXH_SEC_ALIGN XXH_ACC_ALIGN -#else -# define XXH_SEC_ALIGN 8 -#endif - -#if defined(__GNUC__) || defined(__clang__) -# define XXH_ALIASING __attribute__((may_alias)) -#else -# define XXH_ALIASING /* nothing */ -#endif + XXH_SCALAR = 0, /*!< Portable scalar version */ + XXH_SSE2 = 1, /*!< + * SSE2 for Pentium 4, Opteron, all x86_64. + * + * @note SSE2 is also guaranteed on Windows 10, macOS, and + * Android x86. + */ + XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */ + XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */ + XXH_NEON = 4, /*!< + * NEON for most ARMv7-A, all AArch64, and WASM SIMD128 + * via the SIMDeverywhere polyfill provided with the + * Emscripten SDK. + */ + XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */ + XXH_SVE = 6, /*!< SVE for some ARMv8-A and ARMv9-A */ -/* - * UGLY HACK: - * GCC usually generates the best code with -O3 for xxHash. - * - * However, when targeting AVX2, it is overzealous in its unrolling resulting - * in code roughly 3/4 the speed of Clang. - * - * There are other issues, such as GCC splitting _mm256_loadu_si256 into - * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which - * only applies to Sandy and Ivy Bridge... which don't even support AVX2. - * - * That is why when compiling the AVX2 version, it is recommended to use either - * -O2 -mavx2 -march=haswell - * or - * -O2 -mavx2 -mno-avx256-split-unaligned-load - * for decent performance, or to use Clang instead. - * - * Fortunately, we can control the first one with a pragma that forces GCC into - * -O2, but the other one we can't control without "failed to inline always - * inline function due to target mismatch" warnings. - */ -#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \ - && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ - && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */ -# pragma GCC push_options -# pragma GCC optimize("-O2") -#endif +}; -#if XXH_VECTOR == XXH_NEON + /*! + * @ingroup tuning + * @brief Selects the minimum alignment for XXH3's accumulators. + * + * When using SIMD, this should match the alignment required for said + * vector type, so, for example, 32 for AVX2. + * + * Default: Auto detected. + */ + #define XXH_ACC_ALIGN 8 + #endif + + /* Actual definition */ + #ifndef XXH_DOXYGEN + #define XXH_SCALAR 0 + #define XXH_SSE2 1 + #define XXH_AVX2 2 + #define XXH_AVX512 3 + #define XXH_NEON 4 + #define XXH_VSX 5 + #define XXH_SVE 6 + #endif + + #ifndef XXH_VECTOR /* can be defined on command line */ + #if defined(__ARM_FEATURE_SVE) + #define XXH_VECTOR XXH_SVE + #elif (defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \ + || defined(_M_ARM) || defined(_M_ARM64) || \ + defined(_M_ARM64EC) /* msvc */ \ + || (defined(__wasm_simd128__) && \ + XXH_HAS_INCLUDE()) /* wasm simd128 via SIMDe */ \ + ) && \ + (defined(_WIN32) || \ + defined(__LITTLE_ENDIAN__) /* little endian only */ \ + || (defined(__BYTE_ORDER__) && \ + __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) + #define XXH_VECTOR XXH_NEON + #elif defined(__AVX512F__) + #define XXH_VECTOR XXH_AVX512 + #elif defined(__AVX2__) + #define XXH_VECTOR XXH_AVX2 + #elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || \ + (defined(_M_IX86_FP) && (_M_IX86_FP == 2)) + #define XXH_VECTOR XXH_SSE2 + #elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) || \ + (defined(__s390x__) && defined(__VEC__)) && \ + defined(__GNUC__) /* TODO: IBM XL */ + #define XXH_VECTOR XXH_VSX + #else + #define XXH_VECTOR XXH_SCALAR + #endif + #endif + + /* __ARM_FEATURE_SVE is only supported by GCC & Clang. */ + #if (XXH_VECTOR == XXH_SVE) && !defined(__ARM_FEATURE_SVE) + #ifdef _MSC_VER + #pragma warning(once : 4606) + #else + #warning "__ARM_FEATURE_SVE isn't supported. Use SCALAR instead." + #endif + #undef XXH_VECTOR + #define XXH_VECTOR XXH_SCALAR + #endif + + /* + * Controls the alignment of the accumulator, + * for compatibility with aligned vector loads, which are usually faster. + */ + #ifndef XXH_ACC_ALIGN + #if defined(XXH_X86DISPATCH) + #define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */ + #elif XXH_VECTOR == XXH_SCALAR /* scalar */ + #define XXH_ACC_ALIGN 8 + #elif XXH_VECTOR == XXH_SSE2 /* sse2 */ + #define XXH_ACC_ALIGN 16 + #elif XXH_VECTOR == XXH_AVX2 /* avx2 */ + #define XXH_ACC_ALIGN 32 + #elif XXH_VECTOR == XXH_NEON /* neon */ + #define XXH_ACC_ALIGN 16 + #elif XXH_VECTOR == XXH_VSX /* vsx */ + #define XXH_ACC_ALIGN 16 + #elif XXH_VECTOR == XXH_AVX512 /* avx512 */ + #define XXH_ACC_ALIGN 64 + #elif XXH_VECTOR == XXH_SVE /* sve */ + #define XXH_ACC_ALIGN 64 + #endif + #endif + + #if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 || \ + XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512 + #define XXH_SEC_ALIGN XXH_ACC_ALIGN + #elif XXH_VECTOR == XXH_SVE + #define XXH_SEC_ALIGN XXH_ACC_ALIGN + #else + #define XXH_SEC_ALIGN 8 + #endif + + #if defined(__GNUC__) || defined(__clang__) + #define XXH_ALIASING __attribute__((may_alias)) + #else + #define XXH_ALIASING /* nothing */ + #endif + + /* + * UGLY HACK: + * GCC usually generates the best code with -O3 for xxHash. + * + * However, when targeting AVX2, it is overzealous in its unrolling + * resulting in code roughly 3/4 the speed of Clang. + * + * There are other issues, such as GCC splitting _mm256_loadu_si256 into + * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization + * which only applies to Sandy and Ivy Bridge... which don't even support + * AVX2. + * + * That is why when compiling the AVX2 version, it is recommended to use + * either -O2 -mavx2 -march=haswell or -O2 -mavx2 + * -mno-avx256-split-unaligned-load for decent performance, or to use + * Clang instead. + * + * Fortunately, we can control the first one with a pragma that forces GCC + * into -O2, but the other one we can't control without "failed to inline + * always inline function due to target mismatch" warnings. + */ + #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \ + && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ + && defined(__OPTIMIZE__) && \ + XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */ + #pragma GCC push_options + #pragma GCC optimize("-O2") + #endif + + #if XXH_VECTOR == XXH_NEON /* - * UGLY HACK: While AArch64 GCC on Linux does not seem to care, on macOS, GCC -O3 - * optimizes out the entire hashLong loop because of the aliasing violation. + * UGLY HACK: While AArch64 GCC on Linux does not seem to care, on macOS, GCC + * -O3 optimizes out the entire hashLong loop because of the aliasing violation. * * However, GCC is also inefficient at load-store optimization with vld1q/vst1q, * so the only option is to mark it as aliasing. */ typedef uint64x2_t xxh_aliasing_uint64x2_t XXH_ALIASING; -/*! - * @internal - * @brief `vld1q_u64` but faster and alignment-safe. - * - * On AArch64, unaligned access is always safe, but on ARMv7-a, it is only - * *conditionally* safe (`vld1` has an alignment bit like `movdq[ua]` in x86). - * - * GCC for AArch64 sees `vld1q_u8` as an intrinsic instead of a load, so it - * prohibits load-store optimizations. Therefore, a direct dereference is used. - * - * Otherwise, `vld1q_u8` is used with `vreinterpretq_u8_u64` to do a safe - * unaligned load. - */ -#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) -XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) /* silence -Wcast-align */ + /*! + * @internal + * @brief `vld1q_u64` but faster and alignment-safe. + * + * On AArch64, unaligned access is always safe, but on ARMv7-a, it is + * only *conditionally* safe (`vld1` has an alignment bit like + * `movdq[ua]` in x86). + * + * GCC for AArch64 sees `vld1q_u8` as an intrinsic instead of a load, so + * it prohibits load-store optimizations. Therefore, a direct + * dereference is used. + * + * Otherwise, `vld1q_u8` is used with `vreinterpretq_u8_u64` to do a + * safe unaligned load. + */ + #if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) +XXH_FORCE_INLINE uint64x2_t +XXH_vld1q_u64(void const *ptr) /* silence -Wcast-align */ { - return *(xxh_aliasing_uint64x2_t const *)ptr; + + return *(xxh_aliasing_uint64x2_t const *)ptr; + } -#else -XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) -{ - return vreinterpretq_u64_u8(vld1q_u8((uint8_t const*)ptr)); + + #else +XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const *ptr) { + + return vreinterpretq_u64_u8(vld1q_u8((uint8_t const *)ptr)); + } -#endif -/*! - * @internal - * @brief `vmlal_u32` on low and high halves of a vector. - * - * This is a workaround for AArch64 GCC < 11 which implemented arm_neon.h with - * inline assembly and were therefore incapable of merging the `vget_{low, high}_u32` - * with `vmlal_u32`. - */ -#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 11 -XXH_FORCE_INLINE uint64x2_t -XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) -{ - /* Inline assembly is the only way */ - __asm__("umlal %0.2d, %1.2s, %2.2s" : "+w" (acc) : "w" (lhs), "w" (rhs)); - return acc; + #endif + + /*! + * @internal + * @brief `vmlal_u32` on low and high halves of a vector. + * + * This is a workaround for AArch64 GCC < 11 which implemented + * arm_neon.h with inline assembly and were therefore incapable of + * merging the `vget_{low, high}_u32` with `vmlal_u32`. + */ + #if defined(__aarch64__) && defined(__GNUC__) && \ + !defined(__clang__) && __GNUC__ < 11 +XXH_FORCE_INLINE uint64x2_t XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, + uint32x4_t rhs) { + + /* Inline assembly is the only way */ + __asm__("umlal %0.2d, %1.2s, %2.2s" : "+w"(acc) : "w"(lhs), "w"(rhs)); + return acc; + } -XXH_FORCE_INLINE uint64x2_t -XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) -{ - /* This intrinsic works as expected */ - return vmlal_high_u32(acc, lhs, rhs); + +XXH_FORCE_INLINE uint64x2_t XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, + uint32x4_t rhs) { + + /* This intrinsic works as expected */ + return vmlal_high_u32(acc, lhs, rhs); + } -#else + + #else /* Portable intrinsic versions */ -XXH_FORCE_INLINE uint64x2_t -XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) -{ - return vmlal_u32(acc, vget_low_u32(lhs), vget_low_u32(rhs)); +XXH_FORCE_INLINE uint64x2_t XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, + uint32x4_t rhs) { + + return vmlal_u32(acc, vget_low_u32(lhs), vget_low_u32(rhs)); + } + /*! @copydoc XXH_vmlal_low_u32 * Assume the compiler converts this to vmlal_high_u32 on aarch64 */ -XXH_FORCE_INLINE uint64x2_t -XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs) -{ - return vmlal_u32(acc, vget_high_u32(lhs), vget_high_u32(rhs)); -} -#endif +XXH_FORCE_INLINE uint64x2_t XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, + uint32x4_t rhs) { -/*! - * @ingroup tuning - * @brief Controls the NEON to scalar ratio for XXH3 - * - * This can be set to 2, 4, 6, or 8. - * - * ARM Cortex CPUs are _very_ sensitive to how their pipelines are used. - * - * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but only 2 of those - * can be NEON. If you are only using NEON instructions, you are only using 2/3 of the CPU - * bandwidth. - * - * This is even more noticeable on the more advanced cores like the Cortex-A76 which - * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once. - * - * Therefore, to make the most out of the pipeline, it is beneficial to run 6 NEON lanes - * and 2 scalar lanes, which is chosen by default. - * - * This does not apply to Apple processors or 32-bit processors, which run better with - * full NEON. These will default to 8. Additionally, size-optimized builds run 8 lanes. - * - * This change benefits CPUs with large micro-op buffers without negatively affecting - * most other CPUs: - * - * | Chipset | Dispatch type | NEON only | 6:2 hybrid | Diff. | - * |:----------------------|:--------------------|----------:|-----------:|------:| - * | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 GB/s | ~16% | - * | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 GB/s | 5.3 GB/s | ~5% | - * | Marvell PXA1928 (A53) | In-order dual-issue | 1.9 GB/s | 1.9 GB/s | 0% | - * | Apple M1 | 4 NEON/8 micro-ops | 37.3 GB/s | 36.1 GB/s | ~-3% | - * - * It also seems to fix some bad codegen on GCC, making it almost as fast as clang. - * - * When using WASM SIMD128, if this is 2 or 6, SIMDe will scalarize 2 of the lanes meaning - * it effectively becomes worse 4. - * - * @see XXH3_accumulate_512_neon() - */ -# ifndef XXH3_NEON_LANES -# if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \ - && !defined(__APPLE__) && XXH_SIZE_OPT <= 0 -# define XXH3_NEON_LANES 6 -# else -# define XXH3_NEON_LANES XXH_ACC_NB -# endif -# endif -#endif /* XXH_VECTOR == XXH_NEON */ + return vmlal_u32(acc, vget_high_u32(lhs), vget_high_u32(rhs)); -/* - * VSX and Z Vector helpers. - * - * This is very messy, and any pull requests to clean this up are welcome. - * - * There are a lot of problems with supporting VSX and s390x, due to - * inconsistent intrinsics, spotty coverage, and multiple endiannesses. - */ -#if XXH_VECTOR == XXH_VSX -/* Annoyingly, these headers _may_ define three macros: `bool`, `vector`, - * and `pixel`. This is a problem for obvious reasons. - * - * These keywords are unnecessary; the spec literally says they are - * equivalent to `__bool`, `__vector`, and `__pixel` and may be undef'd - * after including the header. - * - * We use pragma push_macro/pop_macro to keep the namespace clean. */ -# pragma push_macro("bool") -# pragma push_macro("vector") -# pragma push_macro("pixel") -/* silence potential macro redefined warnings */ -# undef bool -# undef vector -# undef pixel +} -# if defined(__s390x__) -# include -# else -# include -# endif + #endif -/* Restore the original macro values, if applicable. */ -# pragma pop_macro("pixel") -# pragma pop_macro("vector") -# pragma pop_macro("bool") + /*! + * @ingroup tuning + * @brief Controls the NEON to scalar ratio for XXH3 + * + * This can be set to 2, 4, 6, or 8. + * + * ARM Cortex CPUs are _very_ sensitive to how their pipelines are used. + * + * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but + * only 2 of those can be NEON. If you are only using NEON instructions, + * you are only using 2/3 of the CPU bandwidth. + * + * This is even more noticeable on the more advanced cores like the + * Cortex-A76 which can dispatch 8 micro-ops per cycle, but still only 2 + * NEON micro-ops at once. + * + * Therefore, to make the most out of the pipeline, it is beneficial to + * run 6 NEON lanes and 2 scalar lanes, which is chosen by default. + * + * This does not apply to Apple processors or 32-bit processors, which + * run better with full NEON. These will default to 8. Additionally, + * size-optimized builds run 8 lanes. + * + * This change benefits CPUs with large micro-op buffers without + * negatively affecting most other CPUs: + * + * | Chipset | Dispatch type | NEON only | 6:2 + * hybrid | Diff. | + * |:----------------------|:--------------------|----------:|-----------:|------:| + * | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 + * GB/s | ~16% | | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 + * GB/s | 5.3 GB/s | ~5% | | Marvell PXA1928 (A53) | In-order + * dual-issue | 1.9 GB/s | 1.9 GB/s | 0% | | Apple M1 | 4 NEON/8 + * micro-ops | 37.3 GB/s | 36.1 GB/s | ~-3% | + * + * It also seems to fix some bad codegen on GCC, making it almost as + * fast as clang. + * + * When using WASM SIMD128, if this is 2 or 6, SIMDe will scalarize 2 of + * the lanes meaning it effectively becomes worse 4. + * + * @see XXH3_accumulate_512_neon() + */ + #ifndef XXH3_NEON_LANES + #if (defined(__aarch64__) || defined(__arm64__) || \ + defined(_M_ARM64) || defined(_M_ARM64EC)) && \ + !defined(__APPLE__) && XXH_SIZE_OPT <= 0 + #define XXH3_NEON_LANES 6 + #else + #define XXH3_NEON_LANES XXH_ACC_NB + #endif + #endif + #endif /* XXH_VECTOR == XXH_NEON */ + + /* + * VSX and Z Vector helpers. + * + * This is very messy, and any pull requests to clean this up are welcome. + * + * There are a lot of problems with supporting VSX and s390x, due to + * inconsistent intrinsics, spotty coverage, and multiple endiannesses. + */ + #if XXH_VECTOR == XXH_VSX + /* Annoyingly, these headers _may_ define three macros: `bool`, + * `vector`, and `pixel`. This is a problem for obvious reasons. + * + * These keywords are unnecessary; the spec literally says they are + * equivalent to `__bool`, `__vector`, and `__pixel` and may be undef'd + * after including the header. + * + * We use pragma push_macro/pop_macro to keep the namespace clean. */ + #pragma push_macro("bool") + #pragma push_macro("vector") + #pragma push_macro("pixel") + /* silence potential macro redefined warnings */ + #undef bool + #undef vector + #undef pixel + + #if defined(__s390x__) + #include + #else + #include + #endif + + /* Restore the original macro values, if applicable. */ + #pragma pop_macro("pixel") + #pragma pop_macro("vector") + #pragma pop_macro("bool") typedef __vector unsigned long long xxh_u64x2; -typedef __vector unsigned char xxh_u8x16; -typedef __vector unsigned xxh_u32x4; +typedef __vector unsigned char xxh_u8x16; +typedef __vector unsigned xxh_u32x4; /* - * UGLY HACK: Similar to aarch64 macOS GCC, s390x GCC has the same aliasing issue. + * UGLY HACK: Similar to aarch64 macOS GCC, s390x GCC has the same aliasing + * issue. */ typedef xxh_u64x2 xxh_aliasing_u64x2 XXH_ALIASING; -# ifndef XXH_VSX_BE -# if defined(__BIG_ENDIAN__) \ - || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) -# define XXH_VSX_BE 1 -# elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__ -# warning "-maltivec=be is not recommended. Please use native endianness." -# define XXH_VSX_BE 1 -# else -# define XXH_VSX_BE 0 -# endif -# endif /* !defined(XXH_VSX_BE) */ - -# if XXH_VSX_BE -# if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__)) -# define XXH_vec_revb vec_revb -# else + #ifndef XXH_VSX_BE + #if defined(__BIG_ENDIAN__) || \ + (defined(__BYTE_ORDER__) && \ + __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) + #define XXH_VSX_BE 1 + #elif defined(__VEC_ELEMENT_REG_ORDER__) && \ + __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__ + #warning \ + "-maltivec=be is not recommended. Please use native endianness." + #define XXH_VSX_BE 1 + #else + #define XXH_VSX_BE 0 + #endif + #endif /* !defined(XXH_VSX_BE) */ + + #if XXH_VSX_BE + #if defined(__POWER9_VECTOR__) || \ + (defined(__clang__) && defined(__s390x__)) + #define XXH_vec_revb vec_revb + #else /*! * A polyfill for POWER9's vec_revb(). */ -XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val) -{ - xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, - 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 }; - return vec_perm(val, val, vByteSwap); +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val) { + + xxh_u8x16 const vByteSwap = {0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, + 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08}; + return vec_perm(val, val, vByteSwap); + } -# endif -# endif /* XXH_VSX_BE */ + + #endif + #endif /* XXH_VSX_BE */ /*! * Performs an unaligned vector load and byte swaps it on big endian. */ -XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr) -{ - xxh_u64x2 ret; - XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2)); -# if XXH_VSX_BE - ret = XXH_vec_revb(ret); -# endif - return ret; -} +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr) { -/* - * vec_mulo and vec_mule are very problematic intrinsics on PowerPC - * - * These intrinsics weren't added until GCC 8, despite existing for a while, - * and they are endian dependent. Also, their meaning swap depending on version. - * */ -# if defined(__s390x__) - /* s390x is always big endian, no issue on this platform */ -# define XXH_vec_mulo vec_mulo -# define XXH_vec_mule vec_mule -# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) && !defined(__ibmxl__) -/* Clang has a better way to control this, we can just use the builtin which doesn't swap. */ - /* The IBM XL Compiler (which defined __clang__) only implements the vec_* operations */ -# define XXH_vec_mulo __builtin_altivec_vmulouw -# define XXH_vec_mule __builtin_altivec_vmuleuw -# else -/* gcc needs inline assembly */ -/* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */ -XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b) -{ - xxh_u64x2 result; - __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); - return result; -} -XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) -{ - xxh_u64x2 result; - __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); - return result; -} -# endif /* XXH_vec_mulo, XXH_vec_mule */ -#endif /* XXH_VECTOR == XXH_VSX */ - -#if XXH_VECTOR == XXH_SVE -#define ACCRND(acc, offset) \ -do { \ - svuint64_t input_vec = svld1_u64(mask, xinput + offset); \ - svuint64_t secret_vec = svld1_u64(mask, xsecret + offset); \ - svuint64_t mixed = sveor_u64_x(mask, secret_vec, input_vec); \ - svuint64_t swapped = svtbl_u64(input_vec, kSwap); \ - svuint64_t mixed_lo = svextw_u64_x(mask, mixed); \ - svuint64_t mixed_hi = svlsr_n_u64_x(mask, mixed, 32); \ - svuint64_t mul = svmad_u64_x(mask, mixed_lo, mixed_hi, swapped); \ - acc = svadd_u64_x(mask, acc, mul); \ -} while (0) -#endif /* XXH_VECTOR == XXH_SVE */ - -/* prefetch - * can be disabled, by declaring XXH_NO_PREFETCH build macro */ -#if defined(XXH_NO_PREFETCH) -# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ -#else -# if XXH_SIZE_OPT >= 1 -# define XXH_PREFETCH(ptr) (void)(ptr) -# elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */ -# include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ -# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) -# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) -# define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) -# else -# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ -# endif -#endif /* XXH_NO_PREFETCH */ + xxh_u64x2 ret; + XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2)); + #if XXH_VSX_BE + ret = XXH_vec_revb(ret); + #endif + return ret; +} -/* ========================================== - * XXH3 default settings - * ========================================== */ - -#define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */ - -#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN) -# error "default keyset is not large enough" -#endif + /* + * vec_mulo and vec_mule are very problematic intrinsics on PowerPC + * + * These intrinsics weren't added until GCC 8, despite existing for a + * while, and they are endian dependent. Also, their meaning swap + * depending on version. + * */ + #if defined(__s390x__) + /* s390x is always big endian, no issue on this platform */ + #define XXH_vec_mulo vec_mulo + #define XXH_vec_mule vec_mule + #elif defined(__clang__) && \ + XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) && !defined(__ibmxl__) + /* Clang has a better way to control this, we can just use the builtin + * which doesn't swap. */ + /* The IBM XL Compiler (which defined __clang__) only implements the + * vec_* operations */ + #define XXH_vec_mulo __builtin_altivec_vmulouw + #define XXH_vec_mule __builtin_altivec_vmuleuw + #else +/* gcc needs inline assembly */ +/* Adapted from + * https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */ +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b) { + + xxh_u64x2 result; + __asm__("vmulouw %0, %1, %2" : "=v"(result) : "v"(a), "v"(b)); + return result; + +} + +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) { + + xxh_u64x2 result; + __asm__("vmuleuw %0, %1, %2" : "=v"(result) : "v"(a), "v"(b)); + return result; + +} + + #endif /* XXH_vec_mulo, XXH_vec_mule */ + #endif /* XXH_VECTOR == XXH_VSX */ + + #if XXH_VECTOR == XXH_SVE + #define ACCRND(acc, offset) \ + do { \ + \ + svuint64_t input_vec = svld1_u64(mask, xinput + offset); \ + svuint64_t secret_vec = svld1_u64(mask, xsecret + offset); \ + svuint64_t mixed = sveor_u64_x(mask, secret_vec, input_vec); \ + svuint64_t swapped = svtbl_u64(input_vec, kSwap); \ + svuint64_t mixed_lo = svextw_u64_x(mask, mixed); \ + svuint64_t mixed_hi = svlsr_n_u64_x(mask, mixed, 32); \ + svuint64_t mul = svmad_u64_x(mask, mixed_lo, mixed_hi, swapped); \ + acc = svadd_u64_x(mask, acc, mul); \ + \ + } while (0) + #endif /* XXH_VECTOR == XXH_SVE */ + + /* prefetch + * can be disabled, by declaring XXH_NO_PREFETCH build macro */ + #if defined(XXH_NO_PREFETCH) + #define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ + #else + #if XXH_SIZE_OPT >= 1 + #define XXH_PREFETCH(ptr) (void)(ptr) + #elif defined(_MSC_VER) && \ + (defined(_M_X64) || \ + defined( \ + _M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */ + #include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ + #define XXH_PREFETCH(ptr) \ + _mm_prefetch((const char *)(ptr), _MM_HINT_T0) + #elif defined(__GNUC__) && \ + ((__GNUC__ >= 4) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1))) + #define XXH_PREFETCH(ptr) \ + __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) + #else + #define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ + #endif + #endif /* XXH_NO_PREFETCH */ + + /* ========================================== + * XXH3 default settings + * ========================================== */ + + #define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */ + + #if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN) + #error "default keyset is not large enough" + #endif /*! Pseudorandom secret taken directly from FARSH. */ -XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = { - 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c, - 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f, - 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21, - 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c, - 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3, - 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8, - 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d, - 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64, - 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb, - 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e, - 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce, - 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e, +XXH_ALIGN(64) +static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = { + + 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, + 0xf7, 0x21, 0xad, 0x1c, 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, + 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f, 0xcb, 0x79, 0xe6, 0x4e, + 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21, + 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, + 0x81, 0x3a, 0x26, 0x4c, 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, + 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3, 0x71, 0x64, 0x48, 0x97, + 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8, + 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, + 0xc7, 0x0b, 0x4f, 0x1d, 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, + 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64, 0xea, 0xc5, 0xac, 0x83, + 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb, + 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, + 0x29, 0xd4, 0x68, 0x9e, 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, + 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce, 0x45, 0xcb, 0x3a, 0x8f, + 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e, + }; -static const xxh_u64 PRIME_MX1 = 0x165667919E3779F9ULL; /*!< 0b0001011001010110011001111001000110011110001101110111100111111001 */ -static const xxh_u64 PRIME_MX2 = 0x9FB21C651E98DF25ULL; /*!< 0b1001111110110010000111000110010100011110100110001101111100100101 */ +static const xxh_u64 PRIME_MX1 = 0x165667919E3779F9ULL; /*!< + 0b0001011001010110011001111001000110011110001101110111100111111001 + */ +static const xxh_u64 PRIME_MX2 = 0x9FB21C651E98DF25ULL; /*!< + 0b1001111110110010000111000110010100011110100110001101111100100101 + */ -#ifdef XXH_OLD_NAMES -# define kSecret XXH3_kSecret -#endif + #ifdef XXH_OLD_NAMES + #define kSecret XXH3_kSecret + #endif -#ifdef XXH_DOXYGEN + #ifdef XXH_DOXYGEN /*! * @brief Calculates a 32-bit to 64-bit long multiply. * * Implemented as a macro. * - * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't - * need to (but it shouldn't need to anyways, it is about 7 instructions to do - * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we - * use that instead of the normal method. + * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it + * doesn't need to (but it shouldn't need to anyways, it is about 7 instructions + * to do a 64x64 multiply...). Since we know that this will _always_ emit + * `MULL`, we use that instead of the normal method. * - * If you are compiling for platforms like Thumb-1 and don't have a better option, - * you may also want to write your own long multiply routine here. + * If you are compiling for platforms like Thumb-1 and don't have a better + * option, you may also want to write your own long multiply routine here. * * @param x, y Numbers to be multiplied * @return 64-bit product of the low 32 bits of @p x and @p y. */ -XXH_FORCE_INLINE xxh_u64 -XXH_mult32to64(xxh_u64 x, xxh_u64 y) -{ - return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF); +XXH_FORCE_INLINE xxh_u64 XXH_mult32to64(xxh_u64 x, xxh_u64 y) { + + return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF); + } -#elif defined(_MSC_VER) && defined(_M_IX86) -# define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y)) -#else -/* - * Downcast + upcast is usually better than masking on older compilers like - * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers. - * - * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands - * and perform a full 64x64 multiply -- entirely redundant on 32-bit. - */ -# define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y)) -#endif + + #elif defined(_MSC_VER) && defined(_M_IX86) + #define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y)) + #else + /* + * Downcast + upcast is usually better than masking on older compilers + * like GCC 4.2 (especially 32-bit ones), all without affecting newer + * compilers. + * + * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both + * operands and perform a full 64x64 multiply -- entirely redundant on + * 32-bit. + */ + #define XXH_mult32to64(x, y) \ + ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y)) + #endif /*! * @brief Calculates a 64->128-bit long multiply. @@ -4302,164 +4796,170 @@ XXH_mult32to64(xxh_u64 x, xxh_u64 y) * @param lhs , rhs The 64-bit integers to be multiplied * @return The 128-bit result represented in an @ref XXH128_hash_t. */ -static XXH128_hash_t -XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) -{ - /* - * GCC/Clang __uint128_t method. - * - * On most 64-bit targets, GCC and Clang define a __uint128_t type. - * This is usually the best way as it usually uses a native long 64-bit - * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64. - * - * Usually. - * - * Despite being a 32-bit platform, Clang (and emscripten) define this type - * despite not having the arithmetic for it. This results in a laggy - * compiler builtin call which calculates a full 128-bit multiply. - * In that case it is best to use the portable one. - * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677 - */ -#if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \ - && defined(__SIZEOF_INT128__) \ - || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) +static XXH128_hash_t XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) { + + /* + * GCC/Clang __uint128_t method. + * + * On most 64-bit targets, GCC and Clang define a __uint128_t type. + * This is usually the best way as it usually uses a native long 64-bit + * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64. + * + * Usually. + * + * Despite being a 32-bit platform, Clang (and emscripten) define this + * type despite not having the arithmetic for it. This results in a laggy + * compiler builtin call which calculates a full 128-bit multiply. + * In that case it is best to use the portable one. + * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677 + */ + #if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) && \ + defined(__SIZEOF_INT128__) || \ + (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) + + __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs; + XXH128_hash_t r128; + r128.low64 = (xxh_u64)(product); + r128.high64 = (xxh_u64)(product >> 64); + return r128; - __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs; - XXH128_hash_t r128; - r128.low64 = (xxh_u64)(product); - r128.high64 = (xxh_u64)(product >> 64); - return r128; - - /* - * MSVC for x64's _umul128 method. - * - * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct); - * - * This compiles to single operand MUL on x64. - */ -#elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC) - -#ifndef _MSC_VER -# pragma intrinsic(_umul128) -#endif - xxh_u64 product_high; - xxh_u64 const product_low = _umul128(lhs, rhs, &product_high); - XXH128_hash_t r128; - r128.low64 = product_low; - r128.high64 = product_high; - return r128; - - /* - * MSVC for ARM64's __umulh method. - * - * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method. - */ -#elif defined(_M_ARM64) || defined(_M_ARM64EC) - -#ifndef _MSC_VER -# pragma intrinsic(__umulh) -#endif - XXH128_hash_t r128; - r128.low64 = lhs * rhs; - r128.high64 = __umulh(lhs, rhs); - return r128; + /* + * MSVC for x64's _umul128 method. + * + * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 + * *HighProduct); + * + * This compiles to single operand MUL on x64. + */ + #elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC) + + #ifndef _MSC_VER + #pragma intrinsic(_umul128) + #endif + xxh_u64 product_high; + xxh_u64 const product_low = _umul128(lhs, rhs, &product_high); + XXH128_hash_t r128; + r128.low64 = product_low; + r128.high64 = product_high; + return r128; -#else - /* - * Portable scalar method. Optimized for 32-bit and 64-bit ALUs. - * - * This is a fast and simple grade school multiply, which is shown below - * with base 10 arithmetic instead of base 0x100000000. - * - * 9 3 // D2 lhs = 93 - * x 7 5 // D2 rhs = 75 - * ---------- - * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15 - * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45 - * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21 - * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63 - * --------- - * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27 - * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67 - * --------- - * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975 - * - * The reasons for adding the products like this are: - * 1. It avoids manual carry tracking. Just like how - * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX. - * This avoids a lot of complexity. - * - * 2. It hints for, and on Clang, compiles to, the powerful UMAAL - * instruction available in ARM's Digital Signal Processing extension - * in 32-bit ARMv6 and later, which is shown below: - * - * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm) - * { - * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm; - * *RdLo = (xxh_u32)(product & 0xFFFFFFFF); - * *RdHi = (xxh_u32)(product >> 32); - * } - * - * This instruction was designed for efficient long multiplication, and - * allows this to be calculated in only 4 instructions at speeds - * comparable to some 64-bit ALUs. - * - * 3. It isn't terrible on other platforms. Usually this will be a couple - * of 32-bit ADD/ADCs. - */ + /* + * MSVC for ARM64's __umulh method. + * + * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t + * method. + */ + #elif defined(_M_ARM64) || defined(_M_ARM64EC) + + #ifndef _MSC_VER + #pragma intrinsic(__umulh) + #endif + XXH128_hash_t r128; + r128.low64 = lhs * rhs; + r128.high64 = __umulh(lhs, rhs); + return r128; + + #else + /* + * Portable scalar method. Optimized for 32-bit and 64-bit ALUs. + * + * This is a fast and simple grade school multiply, which is shown below + * with base 10 arithmetic instead of base 0x100000000. + * + * 9 3 // D2 lhs = 93 + * x 7 5 // D2 rhs = 75 + * ---------- + * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15 + * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45 + * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21 + * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63 + * --------- + * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27 + * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67 + * --------- + * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975 + * + * The reasons for adding the products like this are: + * 1. It avoids manual carry tracking. Just like how + * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX. + * This avoids a lot of complexity. + * + * 2. It hints for, and on Clang, compiles to, the powerful UMAAL + * instruction available in ARM's Digital Signal Processing extension + * in 32-bit ARMv6 and later, which is shown below: + * + * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm) + * { + + * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm; + * *RdLo = (xxh_u32)(product & 0xFFFFFFFF); + * *RdHi = (xxh_u32)(product >> 32); + * } + * + * This instruction was designed for efficient long multiplication, and + * allows this to be calculated in only 4 instructions at speeds + * comparable to some 64-bit ALUs. + * + * 3. It isn't terrible on other platforms. Usually this will be a couple + * of 32-bit ADD/ADCs. + */ + + /* First calculate all of the cross products. */ + xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF); + xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF); + xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32); + xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32); + + /* Now add the products together. These will never overflow. */ + xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi; + xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi; + xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF); + + XXH128_hash_t r128; + r128.low64 = lower; + r128.high64 = upper; + return r128; + #endif - /* First calculate all of the cross products. */ - xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF); - xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF); - xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32); - xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32); - - /* Now add the products together. These will never overflow. */ - xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi; - xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi; - xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF); - - XXH128_hash_t r128; - r128.low64 = lower; - r128.high64 = upper; - return r128; -#endif } /*! * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it. * * The reason for the separate function is to prevent passing too many structs - * around by value. This will hopefully inline the multiply, but we don't force it. + * around by value. This will hopefully inline the multiply, but we don't force + * it. * * @param lhs , rhs The 64-bit integers to multiply * @return The low 64 bits of the product XOR'd by the high 64 bits. * @see XXH_mult64to128() */ -static xxh_u64 -XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs) -{ - XXH128_hash_t product = XXH_mult64to128(lhs, rhs); - return product.low64 ^ product.high64; +static xxh_u64 XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs) { + + XXH128_hash_t product = XXH_mult64to128(lhs, rhs); + return product.low64 ^ product.high64; + } /*! Seems to produce slightly better code on GCC for some reason. */ -XXH_FORCE_INLINE XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) -{ - XXH_ASSERT(0 <= shift && shift < 64); - return v64 ^ (v64 >> shift); +XXH_FORCE_INLINE XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) { + + XXH_ASSERT(0 <= shift && shift < 64); + return v64 ^ (v64 >> shift); + } /* * This is a fast avalanche stage, * suitable when input bits are already partially mixed */ -static XXH64_hash_t XXH3_avalanche(xxh_u64 h64) -{ - h64 = XXH_xorshift64(h64, 37); - h64 *= PRIME_MX1; - h64 = XXH_xorshift64(h64, 32); - return h64; +static XXH64_hash_t XXH3_avalanche(xxh_u64 h64) { + + h64 = XXH_xorshift64(h64, 37); + h64 *= PRIME_MX1; + h64 = XXH_xorshift64(h64, 32); + return h64; + } /* @@ -4467,16 +4967,16 @@ static XXH64_hash_t XXH3_avalanche(xxh_u64 h64) * inspired by Pelle Evensen's rrmxmx * preferable when input has not been previously mixed */ -static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) -{ - /* this mix is inspired by Pelle Evensen's rrmxmx */ - h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24); - h64 *= PRIME_MX2; - h64 ^= (h64 >> 35) + len ; - h64 *= PRIME_MX2; - return XXH_xorshift64(h64, 28); -} +static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) { + /* this mix is inspired by Pelle Evensen's rrmxmx */ + h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24); + h64 *= PRIME_MX2; + h64 ^= (h64 >> 35) + len; + h64 *= PRIME_MX2; + return XXH_xorshift64(h64, 28); + +} /* ========================================== * Short keys @@ -4486,7 +4986,8 @@ static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) * favored lengths that were a multiple of 4 or 8. * * Instead of iterating over individual inputs, we use a set of single shot - * functions which piece together a range of lengths and operate in constant time. + * functions which piece together a range of lengths and operate in constant + * time. * * Additionally, the number of multiplies has been significantly reduced. This * reduces latency, especially when emulating 64-bit multiplies on 32-bit. @@ -4511,70 +5012,100 @@ static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) * * This adds an extra layer of strength for custom secrets. */ -XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t -XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(input != NULL); - XXH_ASSERT(1 <= len && len <= 3); - XXH_ASSERT(secret != NULL); - /* - * len = 1: combined = { input[0], 0x01, input[0], input[0] } - * len = 2: combined = { input[1], 0x02, input[0], input[1] } - * len = 3: combined = { input[2], 0x03, input[0], input[1] } - */ - { xxh_u8 const c1 = input[0]; - xxh_u8 const c2 = input[len >> 1]; - xxh_u8 const c3 = input[len - 1]; - xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24) - | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); - xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; - xxh_u64 const keyed = (xxh_u64)combined ^ bitflip; - return XXH64_avalanche(keyed); - } +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t XXH3_len_1to3_64b(const xxh_u8 *input, + size_t len, + const xxh_u8 *secret, + XXH64_hash_t seed) { + + XXH_ASSERT(input != NULL); + XXH_ASSERT(1 <= len && len <= 3); + XXH_ASSERT(secret != NULL); + /* + * len = 1: combined = { input[0], 0x01, input[0], input[0] } + * len = 2: combined = { input[1], 0x02, input[0], input[1] } + * len = 3: combined = { input[2], 0x03, input[0], input[1] } + */ + { + + xxh_u8 const c1 = input[0]; + xxh_u8 const c2 = input[len >> 1]; + xxh_u8 const c3 = input[len - 1]; + xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24) | + ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); + xxh_u64 const bitflip = + (XXH_readLE32(secret) ^ XXH_readLE32(secret + 4)) + seed; + xxh_u64 const keyed = (xxh_u64)combined ^ bitflip; + return XXH64_avalanche(keyed); + + } + } -XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t -XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(input != NULL); - XXH_ASSERT(secret != NULL); - XXH_ASSERT(4 <= len && len <= 8); - seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; - { xxh_u32 const input1 = XXH_readLE32(input); - xxh_u32 const input2 = XXH_readLE32(input + len - 4); - xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed; - xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32); - xxh_u64 const keyed = input64 ^ bitflip; - return XXH3_rrmxmx(keyed, len); - } +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t XXH3_len_4to8_64b(const xxh_u8 *input, + size_t len, + const xxh_u8 *secret, + XXH64_hash_t seed) { + + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(4 <= len && len <= 8); + seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; + { + + xxh_u32 const input1 = XXH_readLE32(input); + xxh_u32 const input2 = XXH_readLE32(input + len - 4); + xxh_u64 const bitflip = + (XXH_readLE64(secret + 8) ^ XXH_readLE64(secret + 16)) - seed; + xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32); + xxh_u64 const keyed = input64 ^ bitflip; + return XXH3_rrmxmx(keyed, len); + + } + } -XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t -XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(input != NULL); - XXH_ASSERT(secret != NULL); - XXH_ASSERT(9 <= len && len <= 16); - { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed; - xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed; - xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1; - xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2; - xxh_u64 const acc = len - + XXH_swap64(input_lo) + input_hi - + XXH3_mul128_fold64(input_lo, input_hi); - return XXH3_avalanche(acc); - } +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t XXH3_len_9to16_64b(const xxh_u8 *input, + size_t len, + const xxh_u8 *secret, + XXH64_hash_t seed) { + + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(9 <= len && len <= 16); + { + + xxh_u64 const bitflip1 = + (XXH_readLE64(secret + 24) ^ XXH_readLE64(secret + 32)) + seed; + xxh_u64 const bitflip2 = + (XXH_readLE64(secret + 40) ^ XXH_readLE64(secret + 48)) - seed; + xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1; + xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2; + xxh_u64 const acc = len + XXH_swap64(input_lo) + input_hi + + XXH3_mul128_fold64(input_lo, input_hi); + return XXH3_avalanche(acc); + + } + } -XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t -XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(len <= 16); - { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed); - if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed); - if (len) return XXH3_len_1to3_64b(input, len, secret, seed); - return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64))); - } +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t XXH3_len_0to16_64b(const xxh_u8 *input, + size_t len, + const xxh_u8 *secret, + XXH64_hash_t seed) { + + XXH_ASSERT(len <= 16); + { + + if (XXH_likely(len > 8)) + return XXH3_len_9to16_64b(input, len, secret, seed); + if (XXH_likely(len >= 4)) + return XXH3_len_4to8_64b(input, len, secret, seed); + if (len) return XXH3_len_1to3_64b(input, len, secret, seed); + return XXH64_avalanche( + seed ^ (XXH_readLE64(secret + 56) ^ XXH_readLE64(secret + 64))); + + } + } /* @@ -4603,106 +5134,134 @@ XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_ * by this, although it is always a good idea to use a proper seed if you care * about strength. */ -XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input, - const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64) -{ -#if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ - && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \ - && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */ - /* - * UGLY HACK: - * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in - * slower code. - * - * By forcing seed64 into a register, we disrupt the cost model and - * cause it to scalarize. See `XXH32_round()` - * - * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600, - * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on - * GCC 9.2, despite both emitting scalar code. - * - * GCC generates much better scalar code than Clang for the rest of XXH3, - * which is why finding a more optimal codepath is an interest. - */ - XXH_COMPILER_GUARD(seed64); -#endif - { xxh_u64 const input_lo = XXH_readLE64(input); - xxh_u64 const input_hi = XXH_readLE64(input+8); - return XXH3_mul128_fold64( - input_lo ^ (XXH_readLE64(secret) + seed64), - input_hi ^ (XXH_readLE64(secret+8) - seed64) - ); - } +XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8 *XXH_RESTRICT input, + const xxh_u8 *XXH_RESTRICT secret, + xxh_u64 seed64) { + + #if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ + && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like \ + XXH32 hack */ + /* + * UGLY HACK: + * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in + * slower code. + * + * By forcing seed64 into a register, we disrupt the cost model and + * cause it to scalarize. See `XXH32_round()` + * + * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600, + * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on + * GCC 9.2, despite both emitting scalar code. + * + * GCC generates much better scalar code than Clang for the rest of XXH3, + * which is why finding a more optimal codepath is an interest. + */ + XXH_COMPILER_GUARD(seed64); + #endif + { + + xxh_u64 const input_lo = XXH_readLE64(input); + xxh_u64 const input_hi = XXH_readLE64(input + 8); + return XXH3_mul128_fold64(input_lo ^ (XXH_readLE64(secret) + seed64), + input_hi ^ (XXH_readLE64(secret + 8) - seed64)); + + } + } /* For mid range keys, XXH3 uses a Mum-hash variant. */ -XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t -XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len, - const xxh_u8* XXH_RESTRICT secret, size_t secretSize, - XXH64_hash_t seed) -{ - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; - XXH_ASSERT(16 < len && len <= 128); - - { xxh_u64 acc = len * XXH_PRIME64_1; -#if XXH_SIZE_OPT >= 1 - /* Smaller and cleaner, but slightly slower. */ - unsigned int i = (unsigned int)(len - 1) / 32; - do { - acc += XXH3_mix16B(input+16 * i, secret+32*i, seed); - acc += XXH3_mix16B(input+len-16*(i+1), secret+32*i+16, seed); - } while (i-- != 0); -#else - if (len > 32) { - if (len > 64) { - if (len > 96) { - acc += XXH3_mix16B(input+48, secret+96, seed); - acc += XXH3_mix16B(input+len-64, secret+112, seed); - } - acc += XXH3_mix16B(input+32, secret+64, seed); - acc += XXH3_mix16B(input+len-48, secret+80, seed); - } - acc += XXH3_mix16B(input+16, secret+32, seed); - acc += XXH3_mix16B(input+len-32, secret+48, seed); +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t XXH3_len_17to128_64b( + const xxh_u8 *XXH_RESTRICT input, size_t len, + const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) { + + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + (void)secretSize; + XXH_ASSERT(16 < len && len <= 128); + + { + + xxh_u64 acc = len * XXH_PRIME64_1; + #if XXH_SIZE_OPT >= 1 + /* Smaller and cleaner, but slightly slower. */ + unsigned int i = (unsigned int)(len - 1) / 32; + do { + + acc += XXH3_mix16B(input + 16 * i, secret + 32 * i, seed); + acc += + XXH3_mix16B(input + len - 16 * (i + 1), secret + 32 * i + 16, seed); + + } while (i-- != 0); + + #else + if (len > 32) { + + if (len > 64) { + + if (len > 96) { + + acc += XXH3_mix16B(input + 48, secret + 96, seed); + acc += XXH3_mix16B(input + len - 64, secret + 112, seed); + } - acc += XXH3_mix16B(input+0, secret+0, seed); - acc += XXH3_mix16B(input+len-16, secret+16, seed); -#endif - return XXH3_avalanche(acc); + + acc += XXH3_mix16B(input + 32, secret + 64, seed); + acc += XXH3_mix16B(input + len - 48, secret + 80, seed); + + } + + acc += XXH3_mix16B(input + 16, secret + 32, seed); + acc += XXH3_mix16B(input + len - 32, secret + 48, seed); + } + + acc += XXH3_mix16B(input + 0, secret + 0, seed); + acc += XXH3_mix16B(input + len - 16, secret + 16, seed); + #endif + return XXH3_avalanche(acc); + + } + } -/*! - * @brief Maximum size of "short" key in bytes. - */ -#define XXH3_MIDSIZE_MAX 240 + /*! + * @brief Maximum size of "short" key in bytes. + */ + #define XXH3_MIDSIZE_MAX 240 -XXH_NO_INLINE XXH_PUREF XXH64_hash_t -XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len, - const xxh_u8* XXH_RESTRICT secret, size_t secretSize, - XXH64_hash_t seed) -{ - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; +XXH_NO_INLINE XXH_PUREF XXH64_hash_t XXH3_len_129to240_64b( + const xxh_u8 *XXH_RESTRICT input, size_t len, + const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) { + + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + (void)secretSize; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + + #define XXH3_MIDSIZE_STARTOFFSET 3 + #define XXH3_MIDSIZE_LASTOFFSET 17 + + { + + xxh_u64 acc = len * XXH_PRIME64_1; + xxh_u64 acc_end; + unsigned int const nbRounds = (unsigned int)len / 16; + unsigned int i; XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + for (i = 0; i < 8; i++) { - #define XXH3_MIDSIZE_STARTOFFSET 3 - #define XXH3_MIDSIZE_LASTOFFSET 17 + acc += XXH3_mix16B(input + (16 * i), secret + (16 * i), seed); - { xxh_u64 acc = len * XXH_PRIME64_1; - xxh_u64 acc_end; - unsigned int const nbRounds = (unsigned int)len / 16; - unsigned int i; - XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); - for (i=0; i<8; i++) { - acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed); - } - /* last bytes */ - acc_end = XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed); - XXH_ASSERT(nbRounds >= 8); - acc = XXH3_avalanche(acc); -#if defined(__clang__) /* Clang */ \ - && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ - && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ + } + + /* last bytes */ + acc_end = XXH3_mix16B( + input + len - 16, + secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed); + XXH_ASSERT(nbRounds >= 8); + acc = XXH3_avalanche(acc); + #if defined(__clang__) /* Clang */ \ + && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ /* * UGLY HACK: * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86. @@ -4724,441 +5283,522 @@ XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len, * SLP vectorization. */ #pragma clang loop vectorize(disable) -#endif - for (i=8 ; i < nbRounds; i++) { - /* - * Prevents clang for unrolling the acc loop and interleaving with this one. - */ - XXH_COMPILER_GUARD(acc); - acc_end += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed); - } - return XXH3_avalanche(acc + acc_end); + #endif + for (i = 8; i < nbRounds; i++) { + + /* + * Prevents clang for unrolling the acc loop and interleaving with this + * one. + */ + XXH_COMPILER_GUARD(acc); + acc_end += + XXH3_mix16B(input + (16 * i), + secret + (16 * (i - 8)) + XXH3_MIDSIZE_STARTOFFSET, seed); + } -} + return XXH3_avalanche(acc + acc_end); + + } + +} + + /* ======= Long Keys ======= */ + + #define XXH_STRIPE_LEN 64 + #define XXH_SECRET_CONSUME_RATE \ + 8 /* nb of secret bytes consumed at each accumulation */ + #define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64)) + + #ifdef XXH_OLD_NAMES + #define STRIPE_LEN XXH_STRIPE_LEN + #define ACC_NB XXH_ACC_NB + #endif + + #ifndef XXH_PREFETCH_DIST + #ifdef __clang__ + #define XXH_PREFETCH_DIST 320 + #else + #if (XXH_VECTOR == XXH_AVX512) + #define XXH_PREFETCH_DIST 512 + #else + #define XXH_PREFETCH_DIST 384 + #endif + #endif /* __clang__ */ + #endif /* XXH_PREFETCH_DIST */ + + /* + * These macros are to generate an XXH3_accumulate() function. + * The two arguments select the name suffix and target attribute. + * + * The name of this symbol is XXH3_accumulate_() and it calls + * XXH3_accumulate_512_(). + * + * It may be useful to hand implement this function if the compiler fails + * to optimize the inline function. + */ + #define XXH3_ACCUMULATE_TEMPLATE(name) \ + void XXH3_accumulate_##name( \ + xxh_u64 *XXH_RESTRICT acc, const xxh_u8 *XXH_RESTRICT input, \ + const xxh_u8 *XXH_RESTRICT secret, size_t nbStripes) { \ + \ + size_t n; \ + for (n = 0; n < nbStripes; n++) { \ + \ + const xxh_u8 *const in = input + n * XXH_STRIPE_LEN; \ + XXH_PREFETCH(in + XXH_PREFETCH_DIST); \ + XXH3_accumulate_512_##name(acc, in, \ + secret + n * XXH_SECRET_CONSUME_RATE); \ + \ + } \ + \ + } -/* ======= Long Keys ======= */ +XXH_FORCE_INLINE void XXH_writeLE64(void *dst, xxh_u64 v64) { -#define XXH_STRIPE_LEN 64 -#define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */ -#define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64)) + if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64); + XXH_memcpy(dst, &v64, sizeof(v64)); -#ifdef XXH_OLD_NAMES -# define STRIPE_LEN XXH_STRIPE_LEN -# define ACC_NB XXH_ACC_NB -#endif +} -#ifndef XXH_PREFETCH_DIST -# ifdef __clang__ -# define XXH_PREFETCH_DIST 320 -# else -# if (XXH_VECTOR == XXH_AVX512) -# define XXH_PREFETCH_DIST 512 -# else -# define XXH_PREFETCH_DIST 384 -# endif -# endif /* __clang__ */ -#endif /* XXH_PREFETCH_DIST */ + /* Several intrinsic functions below are supposed to accept __int64 as + * argument, as documented in + * https://software.intel.com/sites/landingpage/IntrinsicsGuide/ . + * However, several environments do not define __int64 type, + * requiring a workaround. + */ + #if !defined(__VMS) && \ + (defined(__cplusplus) || (defined(__STDC_VERSION__) && \ + (__STDC_VERSION__ >= 199901L) /* C99 */)) +typedef int64_t xxh_i64; + #else +/* the following type must have a width of 64-bit */ +typedef long long xxh_i64; + #endif -/* - * These macros are to generate an XXH3_accumulate() function. - * The two arguments select the name suffix and target attribute. - * - * The name of this symbol is XXH3_accumulate_() and it calls - * XXH3_accumulate_512_(). - * - * It may be useful to hand implement this function if the compiler fails to - * optimize the inline function. - */ -#define XXH3_ACCUMULATE_TEMPLATE(name) \ -void \ -XXH3_accumulate_##name(xxh_u64* XXH_RESTRICT acc, \ - const xxh_u8* XXH_RESTRICT input, \ - const xxh_u8* XXH_RESTRICT secret, \ - size_t nbStripes) \ -{ \ - size_t n; \ - for (n = 0; n < nbStripes; n++ ) { \ - const xxh_u8* const in = input + n*XXH_STRIPE_LEN; \ - XXH_PREFETCH(in + XXH_PREFETCH_DIST); \ - XXH3_accumulate_512_##name( \ - acc, \ - in, \ - secret + n*XXH_SECRET_CONSUME_RATE); \ - } \ -} - - -XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64) -{ - if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64); - XXH_memcpy(dst, &v64, sizeof(v64)); -} - -/* Several intrinsic functions below are supposed to accept __int64 as argument, - * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ . - * However, several environments do not define __int64 type, - * requiring a workaround. - */ -#if !defined (__VMS) \ - && (defined (__cplusplus) \ - || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) - typedef int64_t xxh_i64; -#else - /* the following type must have a width of 64-bit */ - typedef long long xxh_i64; -#endif + /* + * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the + * most optimized. + * + * It is a hardened version of UMAC, based off of FARSH's implementation. + * + * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD + * implementations, and it is ridiculously fast. + * + * We harden it by mixing the original input to the accumulators as well as + * the product. + * + * This means that in the (relatively likely) case of a multiply by zero, + * the original input is preserved. + * + * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve + * cross-pollination, as otherwise the upper and lower halves would be + * essentially independent. + * + * This doesn't matter on 64-bit hashes since they all get merged together + * in the end, so we skip the extra step. + * + * Both XXH3_64bits and XXH3_128bits use this subroutine. + */ + #if (XXH_VECTOR == XXH_AVX512) || \ + (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0) -/* - * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized. - * - * It is a hardened version of UMAC, based off of FARSH's implementation. - * - * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD - * implementations, and it is ridiculously fast. - * - * We harden it by mixing the original input to the accumulators as well as the product. - * - * This means that in the (relatively likely) case of a multiply by zero, the - * original input is preserved. - * - * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve - * cross-pollination, as otherwise the upper and lower halves would be - * essentially independent. - * - * This doesn't matter on 64-bit hashes since they all get merged together in - * the end, so we skip the extra step. - * - * Both XXH3_64bits and XXH3_128bits use this subroutine. - */ + #ifndef XXH_TARGET_AVX512 + #define XXH_TARGET_AVX512 /* disable attribute target */ + #endif -#if (XXH_VECTOR == XXH_AVX512) \ - || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0) +XXH_FORCE_INLINE XXH_TARGET_AVX512 void XXH3_accumulate_512_avx512( + void *XXH_RESTRICT acc, const void *XXH_RESTRICT input, + const void *XXH_RESTRICT secret) { -#ifndef XXH_TARGET_AVX512 -# define XXH_TARGET_AVX512 /* disable attribute target */ -#endif + __m512i *const xacc = (__m512i *)acc; + XXH_ASSERT((((size_t)acc) & 63) == 0); + XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); -XXH_FORCE_INLINE XXH_TARGET_AVX512 void -XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc, - const void* XXH_RESTRICT input, - const void* XXH_RESTRICT secret) -{ - __m512i* const xacc = (__m512i *) acc; - XXH_ASSERT((((size_t)acc) & 63) == 0); - XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); + { + + /* data_vec = input[0]; */ + __m512i const data_vec = _mm512_loadu_si512(input); + /* key_vec = secret[0]; */ + __m512i const key_vec = _mm512_loadu_si512(secret); + /* data_key = data_vec ^ key_vec; */ + __m512i const data_key = _mm512_xor_si512(data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m512i const data_key_lo = _mm512_srli_epi64(data_key, 32); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m512i const product = _mm512_mul_epu32(data_key, data_key_lo); + /* xacc[0] += swap(data_vec); */ + __m512i const data_swap = + _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2)); + __m512i const sum = _mm512_add_epi64(*xacc, data_swap); + /* xacc[0] += product; */ + *xacc = _mm512_add_epi64(product, sum); + + } - { - /* data_vec = input[0]; */ - __m512i const data_vec = _mm512_loadu_si512 (input); - /* key_vec = secret[0]; */ - __m512i const key_vec = _mm512_loadu_si512 (secret); - /* data_key = data_vec ^ key_vec; */ - __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec); - /* data_key_lo = data_key >> 32; */ - __m512i const data_key_lo = _mm512_srli_epi64 (data_key, 32); - /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ - __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo); - /* xacc[0] += swap(data_vec); */ - __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2)); - __m512i const sum = _mm512_add_epi64(*xacc, data_swap); - /* xacc[0] += product; */ - *xacc = _mm512_add_epi64(product, sum); - } } + XXH_FORCE_INLINE XXH_TARGET_AVX512 XXH3_ACCUMULATE_TEMPLATE(avx512) -/* - * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing. - * - * Multiplication isn't perfect, as explained by Google in HighwayHash: - * - * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to - * // varying degrees. In descending order of goodness, bytes - * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32. - * // As expected, the upper and lower bytes are much worse. - * - * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291 - * - * Since our algorithm uses a pseudorandom secret to add some variance into the - * mix, we don't need to (or want to) mix as often or as much as HighwayHash does. - * - * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid - * extraction. - * - * Both XXH3_64bits and XXH3_128bits use this subroutine. - */ + /* + * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing. + * + * Multiplication isn't perfect, as explained by Google in HighwayHash: + * + * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to + * // varying degrees. In descending order of goodness, bytes + * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32. + * // As expected, the upper and lower bytes are much worse. + * + * Source: + * https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291 + * + * Since our algorithm uses a pseudorandom secret to add some variance into + * the mix, we don't need to (or want to) mix as often or as much as + * HighwayHash does. + * + * This isn't as tight as XXH3_accumulate, but still written in SIMD to + * avoid extraction. + * + * Both XXH3_64bits and XXH3_128bits use this subroutine. + */ + + XXH_FORCE_INLINE XXH_TARGET_AVX512 + void XXH3_scrambleAcc_avx512(void *XXH_RESTRICT acc, + const void *XXH_RESTRICT secret) { + + XXH_ASSERT((((size_t)acc) & 63) == 0); + XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); + { + + __m512i *const xacc = (__m512i *)acc; + const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1); + + /* xacc[0] ^= (xacc[0] >> 47) */ + __m512i const acc_vec = *xacc; + __m512i const shifted = _mm512_srli_epi64(acc_vec, 47); + /* xacc[0] ^= secret; */ + __m512i const key_vec = _mm512_loadu_si512(secret); + __m512i const data_key = _mm512_ternarylogic_epi32( + key_vec, acc_vec, shifted, 0x96 /* key_vec ^ acc_vec ^ shifted */); + + /* xacc[0] *= XXH_PRIME32_1; */ + __m512i const data_key_hi = _mm512_srli_epi64(data_key, 32); + __m512i const prod_lo = _mm512_mul_epu32(data_key, prime32); + __m512i const prod_hi = _mm512_mul_epu32(data_key_hi, prime32); + *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32)); + + } -XXH_FORCE_INLINE XXH_TARGET_AVX512 void -XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) -{ - XXH_ASSERT((((size_t)acc) & 63) == 0); - XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); - { __m512i* const xacc = (__m512i*) acc; - const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1); - - /* xacc[0] ^= (xacc[0] >> 47) */ - __m512i const acc_vec = *xacc; - __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47); - /* xacc[0] ^= secret; */ - __m512i const key_vec = _mm512_loadu_si512 (secret); - __m512i const data_key = _mm512_ternarylogic_epi32(key_vec, acc_vec, shifted, 0x96 /* key_vec ^ acc_vec ^ shifted */); - - /* xacc[0] *= XXH_PRIME32_1; */ - __m512i const data_key_hi = _mm512_srli_epi64 (data_key, 32); - __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32); - __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32); - *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32)); - } } -XXH_FORCE_INLINE XXH_TARGET_AVX512 void -XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64) -{ - XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0); - XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64); - XXH_ASSERT(((size_t)customSecret & 63) == 0); - (void)(&XXH_writeLE64); - { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i); - __m512i const seed_pos = _mm512_set1_epi64((xxh_i64)seed64); - __m512i const seed = _mm512_mask_sub_epi64(seed_pos, 0xAA, _mm512_set1_epi8(0), seed_pos); - - const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret); - __m512i* const dest = ( __m512i*) customSecret; - int i; - XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */ - XXH_ASSERT(((size_t)dest & 63) == 0); - for (i=0; i < nbRounds; ++i) { - dest[i] = _mm512_add_epi64(_mm512_load_si512(src + i), seed); - } } +XXH_FORCE_INLINE XXH_TARGET_AVX512 void XXH3_initCustomSecret_avx512( + void *XXH_RESTRICT customSecret, xxh_u64 seed64) { + + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0); + XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64); + XXH_ASSERT(((size_t)customSecret & 63) == 0); + (void)(&XXH_writeLE64); + { + + int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i); + __m512i const seed_pos = _mm512_set1_epi64((xxh_i64)seed64); + __m512i const seed = + _mm512_mask_sub_epi64(seed_pos, 0xAA, _mm512_set1_epi8(0), seed_pos); + + const __m512i *const src = (const __m512i *)((const void *)XXH3_kSecret); + __m512i *const dest = (__m512i *)customSecret; + int i; + XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dest & 63) == 0); + for (i = 0; i < nbRounds; ++i) { + + dest[i] = _mm512_add_epi64(_mm512_load_si512(src + i), seed); + + } + + } + } -#endif + #endif -#if (XXH_VECTOR == XXH_AVX2) \ - || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0) + #if (XXH_VECTOR == XXH_AVX2) || \ + (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0) -#ifndef XXH_TARGET_AVX2 -# define XXH_TARGET_AVX2 /* disable attribute target */ -#endif + #ifndef XXH_TARGET_AVX2 + #define XXH_TARGET_AVX2 /* disable attribute target */ + #endif + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_accumulate_512_avx2( + void *XXH_RESTRICT acc, const void *XXH_RESTRICT input, + const void *XXH_RESTRICT secret) { + + XXH_ASSERT((((size_t)acc) & 31) == 0); + { + + __m256i *const xacc = (__m256i *)acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. + */ + const __m256i *const xinput = (const __m256i *)input; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i *const xsecret = (const __m256i *)secret; + + size_t i; + for (i = 0; i < XXH_STRIPE_LEN / sizeof(__m256i); i++) { + + /* data_vec = xinput[i]; */ + __m256i const data_vec = _mm256_loadu_si256(xinput + i); + /* key_vec = xsecret[i]; */ + __m256i const key_vec = _mm256_loadu_si256(xsecret + i); + /* data_key = data_vec ^ key_vec; */ + __m256i const data_key = _mm256_xor_si256(data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m256i const data_key_lo = _mm256_srli_epi64(data_key, 32); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m256i const product = _mm256_mul_epu32(data_key, data_key_lo); + /* xacc[i] += swap(data_vec); */ + __m256i const data_swap = + _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2)); + __m256i const sum = _mm256_add_epi64(xacc[i], data_swap); + /* xacc[i] += product; */ + xacc[i] = _mm256_add_epi64(product, sum); + + } + + } -XXH_FORCE_INLINE XXH_TARGET_AVX2 void -XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc, - const void* XXH_RESTRICT input, - const void* XXH_RESTRICT secret) -{ - XXH_ASSERT((((size_t)acc) & 31) == 0); - { __m256i* const xacc = (__m256i *) acc; - /* Unaligned. This is mainly for pointer arithmetic, and because - * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ - const __m256i* const xinput = (const __m256i *) input; - /* Unaligned. This is mainly for pointer arithmetic, and because - * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ - const __m256i* const xsecret = (const __m256i *) secret; - - size_t i; - for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { - /* data_vec = xinput[i]; */ - __m256i const data_vec = _mm256_loadu_si256 (xinput+i); - /* key_vec = xsecret[i]; */ - __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); - /* data_key = data_vec ^ key_vec; */ - __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); - /* data_key_lo = data_key >> 32; */ - __m256i const data_key_lo = _mm256_srli_epi64 (data_key, 32); - /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ - __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo); - /* xacc[i] += swap(data_vec); */ - __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2)); - __m256i const sum = _mm256_add_epi64(xacc[i], data_swap); - /* xacc[i] += product; */ - xacc[i] = _mm256_add_epi64(product, sum); - } } } + XXH_FORCE_INLINE XXH_TARGET_AVX2 XXH3_ACCUMULATE_TEMPLATE(avx2) -XXH_FORCE_INLINE XXH_TARGET_AVX2 void -XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) -{ - XXH_ASSERT((((size_t)acc) & 31) == 0); - { __m256i* const xacc = (__m256i*) acc; - /* Unaligned. This is mainly for pointer arithmetic, and because - * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ - const __m256i* const xsecret = (const __m256i *) secret; - const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1); - - size_t i; - for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { - /* xacc[i] ^= (xacc[i] >> 47) */ - __m256i const acc_vec = xacc[i]; - __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47); - __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted); - /* xacc[i] ^= xsecret; */ - __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); - __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); - - /* xacc[i] *= XXH_PRIME32_1; */ - __m256i const data_key_hi = _mm256_srli_epi64 (data_key, 32); - __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32); - __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32); - xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32)); - } + XXH_FORCE_INLINE XXH_TARGET_AVX2 + void XXH3_scrambleAcc_avx2(void *XXH_RESTRICT acc, + const void *XXH_RESTRICT secret) { + + XXH_ASSERT((((size_t)acc) & 31) == 0); + { + + __m256i *const xacc = (__m256i *)acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i *const xsecret = (const __m256i *)secret; + const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1); + + size_t i; + for (i = 0; i < XXH_STRIPE_LEN / sizeof(__m256i); i++) { + + /* xacc[i] ^= (xacc[i] >> 47) */ + __m256i const acc_vec = xacc[i]; + __m256i const shifted = _mm256_srli_epi64(acc_vec, 47); + __m256i const data_vec = _mm256_xor_si256(acc_vec, shifted); + /* xacc[i] ^= xsecret; */ + __m256i const key_vec = _mm256_loadu_si256(xsecret + i); + __m256i const data_key = _mm256_xor_si256(data_vec, key_vec); + + /* xacc[i] *= XXH_PRIME32_1; */ + __m256i const data_key_hi = _mm256_srli_epi64(data_key, 32); + __m256i const prod_lo = _mm256_mul_epu32(data_key, prime32); + __m256i const prod_hi = _mm256_mul_epu32(data_key_hi, prime32); + xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32)); + } + + } + } -XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) -{ - XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0); - XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6); - XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64); - (void)(&XXH_writeLE64); - XXH_PREFETCH(customSecret); - { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64); +XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2( + void *XXH_RESTRICT customSecret, xxh_u64 seed64) { - const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret); - __m256i* dest = ( __m256i*) customSecret; + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0); + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6); + XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64); + (void)(&XXH_writeLE64); + XXH_PREFETCH(customSecret); + { + + __m256i const seed = + _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, + (xxh_i64)(0U - seed64), (xxh_i64)seed64); + + const __m256i *const src = (const __m256i *)((const void *)XXH3_kSecret); + __m256i *dest = (__m256i *)customSecret; + + #if defined(__GNUC__) || defined(__clang__) + /* + * On GCC & Clang, marking 'dest' as modified will cause the compiler: + * - do not extract the secret from sse registers in the internal loop + * - use less common registers, and avoid pushing these reg into stack + */ + XXH_COMPILER_GUARD(dest); + #endif + XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dest & 31) == 0); + + /* GCC -O2 need unroll loop manually */ + dest[0] = _mm256_add_epi64(_mm256_load_si256(src + 0), seed); + dest[1] = _mm256_add_epi64(_mm256_load_si256(src + 1), seed); + dest[2] = _mm256_add_epi64(_mm256_load_si256(src + 2), seed); + dest[3] = _mm256_add_epi64(_mm256_load_si256(src + 3), seed); + dest[4] = _mm256_add_epi64(_mm256_load_si256(src + 4), seed); + dest[5] = _mm256_add_epi64(_mm256_load_si256(src + 5), seed); + + } -# if defined(__GNUC__) || defined(__clang__) - /* - * On GCC & Clang, marking 'dest' as modified will cause the compiler: - * - do not extract the secret from sse registers in the internal loop - * - use less common registers, and avoid pushing these reg into stack - */ - XXH_COMPILER_GUARD(dest); -# endif - XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */ - XXH_ASSERT(((size_t)dest & 31) == 0); - - /* GCC -O2 need unroll loop manually */ - dest[0] = _mm256_add_epi64(_mm256_load_si256(src+0), seed); - dest[1] = _mm256_add_epi64(_mm256_load_si256(src+1), seed); - dest[2] = _mm256_add_epi64(_mm256_load_si256(src+2), seed); - dest[3] = _mm256_add_epi64(_mm256_load_si256(src+3), seed); - dest[4] = _mm256_add_epi64(_mm256_load_si256(src+4), seed); - dest[5] = _mm256_add_epi64(_mm256_load_si256(src+5), seed); - } } -#endif + #endif -/* x86dispatch always generates SSE2 */ -#if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH) + /* x86dispatch always generates SSE2 */ + #if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH) -#ifndef XXH_TARGET_SSE2 -# define XXH_TARGET_SSE2 /* disable attribute target */ -#endif + #ifndef XXH_TARGET_SSE2 + #define XXH_TARGET_SSE2 /* disable attribute target */ + #endif + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_accumulate_512_sse2( + void *XXH_RESTRICT acc, const void *XXH_RESTRICT input, + const void *XXH_RESTRICT secret) { + + /* SSE2 is just a half-scale version of the AVX2 version. */ + XXH_ASSERT((((size_t)acc) & 15) == 0); + { + + __m128i *const xacc = (__m128i *)acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i *const xinput = (const __m128i *)input; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i *const xsecret = (const __m128i *)secret; + + size_t i; + for (i = 0; i < XXH_STRIPE_LEN / sizeof(__m128i); i++) { + + /* data_vec = xinput[i]; */ + __m128i const data_vec = _mm_loadu_si128(xinput + i); + /* key_vec = xsecret[i]; */ + __m128i const key_vec = _mm_loadu_si128(xsecret + i); + /* data_key = data_vec ^ key_vec; */ + __m128i const data_key = _mm_xor_si128(data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m128i const data_key_lo = + _mm_shuffle_epi32(data_key, _MM_SHUFFLE(0, 3, 0, 1)); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m128i const product = _mm_mul_epu32(data_key, data_key_lo); + /* xacc[i] += swap(data_vec); */ + __m128i const data_swap = + _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2)); + __m128i const sum = _mm_add_epi64(xacc[i], data_swap); + /* xacc[i] += product; */ + xacc[i] = _mm_add_epi64(product, sum); + + } + + } -XXH_FORCE_INLINE XXH_TARGET_SSE2 void -XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc, - const void* XXH_RESTRICT input, - const void* XXH_RESTRICT secret) -{ - /* SSE2 is just a half-scale version of the AVX2 version. */ - XXH_ASSERT((((size_t)acc) & 15) == 0); - { __m128i* const xacc = (__m128i *) acc; - /* Unaligned. This is mainly for pointer arithmetic, and because - * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ - const __m128i* const xinput = (const __m128i *) input; - /* Unaligned. This is mainly for pointer arithmetic, and because - * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ - const __m128i* const xsecret = (const __m128i *) secret; - - size_t i; - for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { - /* data_vec = xinput[i]; */ - __m128i const data_vec = _mm_loadu_si128 (xinput+i); - /* key_vec = xsecret[i]; */ - __m128i const key_vec = _mm_loadu_si128 (xsecret+i); - /* data_key = data_vec ^ key_vec; */ - __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); - /* data_key_lo = data_key >> 32; */ - __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); - /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ - __m128i const product = _mm_mul_epu32 (data_key, data_key_lo); - /* xacc[i] += swap(data_vec); */ - __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2)); - __m128i const sum = _mm_add_epi64(xacc[i], data_swap); - /* xacc[i] += product; */ - xacc[i] = _mm_add_epi64(product, sum); - } } } + XXH_FORCE_INLINE XXH_TARGET_SSE2 XXH3_ACCUMULATE_TEMPLATE(sse2) -XXH_FORCE_INLINE XXH_TARGET_SSE2 void -XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) -{ - XXH_ASSERT((((size_t)acc) & 15) == 0); - { __m128i* const xacc = (__m128i*) acc; - /* Unaligned. This is mainly for pointer arithmetic, and because - * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ - const __m128i* const xsecret = (const __m128i *) secret; - const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1); - - size_t i; - for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { - /* xacc[i] ^= (xacc[i] >> 47) */ - __m128i const acc_vec = xacc[i]; - __m128i const shifted = _mm_srli_epi64 (acc_vec, 47); - __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted); - /* xacc[i] ^= xsecret[i]; */ - __m128i const key_vec = _mm_loadu_si128 (xsecret+i); - __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); - - /* xacc[i] *= XXH_PRIME32_1; */ - __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); - __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32); - __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32); - xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32)); - } + XXH_FORCE_INLINE XXH_TARGET_SSE2 + void XXH3_scrambleAcc_sse2(void *XXH_RESTRICT acc, + const void *XXH_RESTRICT secret) { + + XXH_ASSERT((((size_t)acc) & 15) == 0); + { + + __m128i *const xacc = (__m128i *)acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i *const xsecret = (const __m128i *)secret; + const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1); + + size_t i; + for (i = 0; i < XXH_STRIPE_LEN / sizeof(__m128i); i++) { + + /* xacc[i] ^= (xacc[i] >> 47) */ + __m128i const acc_vec = xacc[i]; + __m128i const shifted = _mm_srli_epi64(acc_vec, 47); + __m128i const data_vec = _mm_xor_si128(acc_vec, shifted); + /* xacc[i] ^= xsecret[i]; */ + __m128i const key_vec = _mm_loadu_si128(xsecret + i); + __m128i const data_key = _mm_xor_si128(data_vec, key_vec); + + /* xacc[i] *= XXH_PRIME32_1; */ + __m128i const data_key_hi = + _mm_shuffle_epi32(data_key, _MM_SHUFFLE(0, 3, 0, 1)); + __m128i const prod_lo = _mm_mul_epu32(data_key, prime32); + __m128i const prod_hi = _mm_mul_epu32(data_key_hi, prime32); + xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32)); + } + + } + } -XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) -{ - XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); - (void)(&XXH_writeLE64); - { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i); - -# if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900 - /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */ - XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) }; - __m128i const seed = _mm_load_si128((__m128i const*)seed64x2); -# else - __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64); -# endif - int i; - - const void* const src16 = XXH3_kSecret; - __m128i* dst16 = (__m128i*) customSecret; -# if defined(__GNUC__) || defined(__clang__) - /* - * On GCC & Clang, marking 'dest' as modified will cause the compiler: - * - do not extract the secret from sse registers in the internal loop - * - use less common registers, and avoid pushing these reg into stack - */ - XXH_COMPILER_GUARD(dst16); -# endif - XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */ - XXH_ASSERT(((size_t)dst16 & 15) == 0); +XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2( + void *XXH_RESTRICT customSecret, xxh_u64 seed64) { + + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); + (void)(&XXH_writeLE64); + { + + int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i); + + #if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900 + /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */ + XXH_ALIGN(16) + const xxh_i64 seed64x2[2] = {(xxh_i64)seed64, (xxh_i64)(0U - seed64)}; + __m128i const seed = _mm_load_si128((__m128i const *)seed64x2); + #else + __m128i const seed = + _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64); + #endif + int i; + + const void *const src16 = XXH3_kSecret; + __m128i *dst16 = (__m128i *)customSecret; + #if defined(__GNUC__) || defined(__clang__) + /* + * On GCC & Clang, marking 'dest' as modified will cause the compiler: + * - do not extract the secret from sse registers in the internal loop + * - use less common registers, and avoid pushing these reg into stack + */ + XXH_COMPILER_GUARD(dst16); + #endif + XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dst16 & 15) == 0); + + for (i = 0; i < nbRounds; ++i) { + + dst16[i] = + _mm_add_epi64(_mm_load_si128((const __m128i *)src16 + i), seed); + + } + + } - for (i=0; i < nbRounds; ++i) { - dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed); - } } } -#endif + #endif -#if (XXH_VECTOR == XXH_NEON) + #if (XXH_VECTOR == XXH_NEON) /* forward declarations for the scalar routines */ -XXH_FORCE_INLINE void -XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input, - void const* XXH_RESTRICT secret, size_t lane); +XXH_FORCE_INLINE void XXH3_scalarRound(void *XXH_RESTRICT acc, + void const *XXH_RESTRICT input, + void const *XXH_RESTRICT secret, + size_t lane); -XXH_FORCE_INLINE void -XXH3_scalarScrambleRound(void* XXH_RESTRICT acc, - void const* XXH_RESTRICT secret, size_t lane); +XXH_FORCE_INLINE void XXH3_scalarScrambleRound(void *XXH_RESTRICT acc, + void const *XXH_RESTRICT secret, + size_t lane); /*! * @internal @@ -5168,7 +5808,8 @@ XXH3_scalarScrambleRound(void* XXH_RESTRICT acc, * is to optimize the pipelining and can have up to 15% speedup depending on the * CPU, and it also mitigates some GCC codegen issues. * - * @see XXH3_NEON_LANES for configuring this and details about this optimization. + * @see XXH3_NEON_LANES for configuring this and details about this + * optimization. * * NEON's 32-bit to 64-bit long multiply takes a half vector of 32-bit * integers instead of the other platforms which mask full 64-bit vectors, @@ -5180,740 +5821,866 @@ XXH3_scalarScrambleRound(void* XXH_RESTRICT acc, * there needs to be *three* versions of the accumulate operation used * for the remaining 2 lanes. * - * WASM's SIMD128 uses SIMDe's arm_neon.h polyfill because the intrinsics overlap - * nearly perfectly. + * WASM's SIMD128 uses SIMDe's arm_neon.h polyfill because the intrinsics + * overlap nearly perfectly. */ -XXH_FORCE_INLINE void -XXH3_accumulate_512_neon( void* XXH_RESTRICT acc, - const void* XXH_RESTRICT input, - const void* XXH_RESTRICT secret) -{ - XXH_ASSERT((((size_t)acc) & 15) == 0); - XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0); - { /* GCC for darwin arm64 does not like aliasing here */ - xxh_aliasing_uint64x2_t* const xacc = (xxh_aliasing_uint64x2_t*) acc; - /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */ - uint8_t const* xinput = (const uint8_t *) input; - uint8_t const* xsecret = (const uint8_t *) secret; - - size_t i; -#ifdef __wasm_simd128__ - /* - * On WASM SIMD128, Clang emits direct address loads when XXH3_kSecret - * is constant propagated, which results in it converting it to this - * inside the loop: - * - * a = v128.load(XXH3_kSecret + 0 + $secret_offset, offset = 0) - * b = v128.load(XXH3_kSecret + 16 + $secret_offset, offset = 0) - * ... - * - * This requires a full 32-bit address immediate (and therefore a 6 byte - * instruction) as well as an add for each offset. - * - * Putting an asm guard prevents it from folding (at the cost of losing - * the alignment hint), and uses the free offset in `v128.load` instead - * of adding secret_offset each time which overall reduces code size by - * about a kilobyte and improves performance. - */ - XXH_COMPILER_GUARD(xsecret); -#endif - /* Scalar lanes use the normal scalarRound routine */ - for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { - XXH3_scalarRound(acc, input, secret, i); - } - i = 0; - /* 4 NEON lanes at a time. */ - for (; i+1 < XXH3_NEON_LANES / 2; i+=2) { - /* data_vec = xinput[i]; */ - uint64x2_t data_vec_1 = XXH_vld1q_u64(xinput + (i * 16)); - uint64x2_t data_vec_2 = XXH_vld1q_u64(xinput + ((i+1) * 16)); - /* key_vec = xsecret[i]; */ - uint64x2_t key_vec_1 = XXH_vld1q_u64(xsecret + (i * 16)); - uint64x2_t key_vec_2 = XXH_vld1q_u64(xsecret + ((i+1) * 16)); - /* data_swap = swap(data_vec) */ - uint64x2_t data_swap_1 = vextq_u64(data_vec_1, data_vec_1, 1); - uint64x2_t data_swap_2 = vextq_u64(data_vec_2, data_vec_2, 1); - /* data_key = data_vec ^ key_vec; */ - uint64x2_t data_key_1 = veorq_u64(data_vec_1, key_vec_1); - uint64x2_t data_key_2 = veorq_u64(data_vec_2, key_vec_2); - - /* - * If we reinterpret the 64x2 vectors as 32x4 vectors, we can use a - * de-interleave operation for 4 lanes in 1 step with `vuzpq_u32` to - * get one vector with the low 32 bits of each lane, and one vector - * with the high 32 bits of each lane. - * - * The intrinsic returns a double vector because the original ARMv7-a - * instruction modified both arguments in place. AArch64 and SIMD128 emit - * two instructions from this intrinsic. - * - * [ dk11L | dk11H | dk12L | dk12H ] -> [ dk11L | dk12L | dk21L | dk22L ] - * [ dk21L | dk21H | dk22L | dk22H ] -> [ dk11H | dk12H | dk21H | dk22H ] - */ - uint32x4x2_t unzipped = vuzpq_u32( - vreinterpretq_u32_u64(data_key_1), - vreinterpretq_u32_u64(data_key_2) - ); - /* data_key_lo = data_key & 0xFFFFFFFF */ - uint32x4_t data_key_lo = unzipped.val[0]; - /* data_key_hi = data_key >> 32 */ - uint32x4_t data_key_hi = unzipped.val[1]; - /* - * Then, we can split the vectors horizontally and multiply which, as for most - * widening intrinsics, have a variant that works on both high half vectors - * for free on AArch64. A similar instruction is available on SIMD128. - * - * sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi - */ - uint64x2_t sum_1 = XXH_vmlal_low_u32(data_swap_1, data_key_lo, data_key_hi); - uint64x2_t sum_2 = XXH_vmlal_high_u32(data_swap_2, data_key_lo, data_key_hi); - /* - * Clang reorders - * a += b * c; // umlal swap.2d, dkl.2s, dkh.2s - * c += a; // add acc.2d, acc.2d, swap.2d - * to - * c += a; // add acc.2d, acc.2d, swap.2d - * c += b * c; // umlal acc.2d, dkl.2s, dkh.2s - * - * While it would make sense in theory since the addition is faster, - * for reasons likely related to umlal being limited to certain NEON - * pipelines, this is worse. A compiler guard fixes this. - */ - XXH_COMPILER_GUARD_CLANG_NEON(sum_1); - XXH_COMPILER_GUARD_CLANG_NEON(sum_2); - /* xacc[i] = acc_vec + sum; */ - xacc[i] = vaddq_u64(xacc[i], sum_1); - xacc[i+1] = vaddq_u64(xacc[i+1], sum_2); - } - /* Operate on the remaining NEON lanes 2 at a time. */ - for (; i < XXH3_NEON_LANES / 2; i++) { - /* data_vec = xinput[i]; */ - uint64x2_t data_vec = XXH_vld1q_u64(xinput + (i * 16)); - /* key_vec = xsecret[i]; */ - uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); - /* acc_vec_2 = swap(data_vec) */ - uint64x2_t data_swap = vextq_u64(data_vec, data_vec, 1); - /* data_key = data_vec ^ key_vec; */ - uint64x2_t data_key = veorq_u64(data_vec, key_vec); - /* For two lanes, just use VMOVN and VSHRN. */ - /* data_key_lo = data_key & 0xFFFFFFFF; */ - uint32x2_t data_key_lo = vmovn_u64(data_key); - /* data_key_hi = data_key >> 32; */ - uint32x2_t data_key_hi = vshrn_n_u64(data_key, 32); - /* sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi; */ - uint64x2_t sum = vmlal_u32(data_swap, data_key_lo, data_key_hi); - /* Same Clang workaround as before */ - XXH_COMPILER_GUARD_CLANG_NEON(sum); - /* xacc[i] = acc_vec + sum; */ - xacc[i] = vaddq_u64 (xacc[i], sum); - } +XXH_FORCE_INLINE void XXH3_accumulate_512_neon( + void *XXH_RESTRICT acc, const void *XXH_RESTRICT input, + const void *XXH_RESTRICT secret) { + + XXH_ASSERT((((size_t)acc) & 15) == 0); + XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && + XXH3_NEON_LANES % 2 == 0); + { /* GCC for darwin arm64 does not like aliasing here */ + xxh_aliasing_uint64x2_t *const xacc = (xxh_aliasing_uint64x2_t *)acc; + /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. + */ + uint8_t const *xinput = (const uint8_t *)input; + uint8_t const *xsecret = (const uint8_t *)secret; + + size_t i; + #ifdef __wasm_simd128__ + /* + * On WASM SIMD128, Clang emits direct address loads when XXH3_kSecret + * is constant propagated, which results in it converting it to this + * inside the loop: + * + * a = v128.load(XXH3_kSecret + 0 + $secret_offset, offset = 0) + * b = v128.load(XXH3_kSecret + 16 + $secret_offset, offset = 0) + * ... + * + * This requires a full 32-bit address immediate (and therefore a 6 byte + * instruction) as well as an add for each offset. + * + * Putting an asm guard prevents it from folding (at the cost of losing + * the alignment hint), and uses the free offset in `v128.load` instead + * of adding secret_offset each time which overall reduces code size by + * about a kilobyte and improves performance. + */ + XXH_COMPILER_GUARD(xsecret); + #endif + /* Scalar lanes use the normal scalarRound routine */ + for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { + + XXH3_scalarRound(acc, input, secret, i); + + } + + i = 0; + /* 4 NEON lanes at a time. */ + for (; i + 1 < XXH3_NEON_LANES / 2; i += 2) { + + /* data_vec = xinput[i]; */ + uint64x2_t data_vec_1 = XXH_vld1q_u64(xinput + (i * 16)); + uint64x2_t data_vec_2 = XXH_vld1q_u64(xinput + ((i + 1) * 16)); + /* key_vec = xsecret[i]; */ + uint64x2_t key_vec_1 = XXH_vld1q_u64(xsecret + (i * 16)); + uint64x2_t key_vec_2 = XXH_vld1q_u64(xsecret + ((i + 1) * 16)); + /* data_swap = swap(data_vec) */ + uint64x2_t data_swap_1 = vextq_u64(data_vec_1, data_vec_1, 1); + uint64x2_t data_swap_2 = vextq_u64(data_vec_2, data_vec_2, 1); + /* data_key = data_vec ^ key_vec; */ + uint64x2_t data_key_1 = veorq_u64(data_vec_1, key_vec_1); + uint64x2_t data_key_2 = veorq_u64(data_vec_2, key_vec_2); + + /* + * If we reinterpret the 64x2 vectors as 32x4 vectors, we can use a + * de-interleave operation for 4 lanes in 1 step with `vuzpq_u32` to + * get one vector with the low 32 bits of each lane, and one vector + * with the high 32 bits of each lane. + * + * The intrinsic returns a double vector because the original ARMv7-a + * instruction modified both arguments in place. AArch64 and SIMD128 emit + * two instructions from this intrinsic. + * + * [ dk11L | dk11H | dk12L | dk12H ] -> [ dk11L | dk12L | dk21L | dk22L ] + * [ dk21L | dk21H | dk22L | dk22H ] -> [ dk11H | dk12H | dk21H | dk22H ] + */ + uint32x4x2_t unzipped = vuzpq_u32(vreinterpretq_u32_u64(data_key_1), + vreinterpretq_u32_u64(data_key_2)); + /* data_key_lo = data_key & 0xFFFFFFFF */ + uint32x4_t data_key_lo = unzipped.val[0]; + /* data_key_hi = data_key >> 32 */ + uint32x4_t data_key_hi = unzipped.val[1]; + /* + * Then, we can split the vectors horizontally and multiply which, as for + * most widening intrinsics, have a variant that works on both high half + * vectors for free on AArch64. A similar instruction is available on + * SIMD128. + * + * sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi + */ + uint64x2_t sum_1 = + XXH_vmlal_low_u32(data_swap_1, data_key_lo, data_key_hi); + uint64x2_t sum_2 = + XXH_vmlal_high_u32(data_swap_2, data_key_lo, data_key_hi); + /* + * Clang reorders + * a += b * c; // umlal swap.2d, dkl.2s, dkh.2s + * c += a; // add acc.2d, acc.2d, swap.2d + * to + * c += a; // add acc.2d, acc.2d, swap.2d + * c += b * c; // umlal acc.2d, dkl.2s, dkh.2s + * + * While it would make sense in theory since the addition is faster, + * for reasons likely related to umlal being limited to certain NEON + * pipelines, this is worse. A compiler guard fixes this. + */ + XXH_COMPILER_GUARD_CLANG_NEON(sum_1); + XXH_COMPILER_GUARD_CLANG_NEON(sum_2); + /* xacc[i] = acc_vec + sum; */ + xacc[i] = vaddq_u64(xacc[i], sum_1); + xacc[i + 1] = vaddq_u64(xacc[i + 1], sum_2); + + } + + /* Operate on the remaining NEON lanes 2 at a time. */ + for (; i < XXH3_NEON_LANES / 2; i++) { + + /* data_vec = xinput[i]; */ + uint64x2_t data_vec = XXH_vld1q_u64(xinput + (i * 16)); + /* key_vec = xsecret[i]; */ + uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); + /* acc_vec_2 = swap(data_vec) */ + uint64x2_t data_swap = vextq_u64(data_vec, data_vec, 1); + /* data_key = data_vec ^ key_vec; */ + uint64x2_t data_key = veorq_u64(data_vec, key_vec); + /* For two lanes, just use VMOVN and VSHRN. */ + /* data_key_lo = data_key & 0xFFFFFFFF; */ + uint32x2_t data_key_lo = vmovn_u64(data_key); + /* data_key_hi = data_key >> 32; */ + uint32x2_t data_key_hi = vshrn_n_u64(data_key, 32); + /* sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi; */ + uint64x2_t sum = vmlal_u32(data_swap, data_key_lo, data_key_hi); + /* Same Clang workaround as before */ + XXH_COMPILER_GUARD_CLANG_NEON(sum); + /* xacc[i] = acc_vec + sum; */ + xacc[i] = vaddq_u64(xacc[i], sum); + } + + } + } + XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(neon) -XXH_FORCE_INLINE void -XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) -{ - XXH_ASSERT((((size_t)acc) & 15) == 0); - - { xxh_aliasing_uint64x2_t* xacc = (xxh_aliasing_uint64x2_t*) acc; - uint8_t const* xsecret = (uint8_t const*) secret; - - size_t i; - /* WASM uses operator overloads and doesn't need these. */ -#ifndef __wasm_simd128__ - /* { prime32_1, prime32_1 } */ - uint32x2_t const kPrimeLo = vdup_n_u32(XXH_PRIME32_1); - /* { 0, prime32_1, 0, prime32_1 } */ - uint32x4_t const kPrimeHi = vreinterpretq_u32_u64(vdupq_n_u64((xxh_u64)XXH_PRIME32_1 << 32)); -#endif + XXH_FORCE_INLINE + void XXH3_scrambleAcc_neon(void *XXH_RESTRICT acc, + const void *XXH_RESTRICT secret) { - /* AArch64 uses both scalar and neon at the same time */ - for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { - XXH3_scalarScrambleRound(acc, secret, i); - } - for (i=0; i < XXH3_NEON_LANES / 2; i++) { - /* xacc[i] ^= (xacc[i] >> 47); */ - uint64x2_t acc_vec = xacc[i]; - uint64x2_t shifted = vshrq_n_u64(acc_vec, 47); - uint64x2_t data_vec = veorq_u64(acc_vec, shifted); - - /* xacc[i] ^= xsecret[i]; */ - uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); - uint64x2_t data_key = veorq_u64(data_vec, key_vec); + XXH_ASSERT((((size_t)acc) & 15) == 0); + + { + + xxh_aliasing_uint64x2_t *xacc = (xxh_aliasing_uint64x2_t *)acc; + uint8_t const *xsecret = (uint8_t const *)secret; + + size_t i; + /* WASM uses operator overloads and doesn't need these. */ + #ifndef __wasm_simd128__ + /* { prime32_1, prime32_1 } */ + uint32x2_t const kPrimeLo = vdup_n_u32(XXH_PRIME32_1); + /* { 0, prime32_1, 0, prime32_1 } */ + uint32x4_t const kPrimeHi = + vreinterpretq_u32_u64(vdupq_n_u64((xxh_u64)XXH_PRIME32_1 << 32)); + #endif + + /* AArch64 uses both scalar and neon at the same time */ + for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { + + XXH3_scalarScrambleRound(acc, secret, i); + + } + + for (i = 0; i < XXH3_NEON_LANES / 2; i++) { + + /* xacc[i] ^= (xacc[i] >> 47); */ + uint64x2_t acc_vec = xacc[i]; + uint64x2_t shifted = vshrq_n_u64(acc_vec, 47); + uint64x2_t data_vec = veorq_u64(acc_vec, shifted); + + /* xacc[i] ^= xsecret[i]; */ + uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); + uint64x2_t data_key = veorq_u64(data_vec, key_vec); /* xacc[i] *= XXH_PRIME32_1 */ -#ifdef __wasm_simd128__ - /* SIMD128 has multiply by u64x2, use it instead of expanding and scalarizing */ - xacc[i] = data_key * XXH_PRIME32_1; -#else - /* - * Expanded version with portable NEON intrinsics - * - * lo(x) * lo(y) + (hi(x) * lo(y) << 32) - * - * prod_hi = hi(data_key) * lo(prime) << 32 - * - * Since we only need 32 bits of this multiply a trick can be used, reinterpreting the vector - * as a uint32x4_t and multiplying by { 0, prime, 0, prime } to cancel out the unwanted bits - * and avoid the shift. - */ - uint32x4_t prod_hi = vmulq_u32 (vreinterpretq_u32_u64(data_key), kPrimeHi); - /* Extract low bits for vmlal_u32 */ - uint32x2_t data_key_lo = vmovn_u64(data_key); - /* xacc[i] = prod_hi + lo(data_key) * XXH_PRIME32_1; */ - xacc[i] = vmlal_u32(vreinterpretq_u64_u32(prod_hi), data_key_lo, kPrimeLo); -#endif - } + #ifdef __wasm_simd128__ + /* SIMD128 has multiply by u64x2, use it instead of expanding and + * scalarizing */ + xacc[i] = data_key * XXH_PRIME32_1; + #else + /* + * Expanded version with portable NEON intrinsics + * + * lo(x) * lo(y) + (hi(x) * lo(y) << 32) + * + * prod_hi = hi(data_key) * lo(prime) << 32 + * + * Since we only need 32 bits of this multiply a trick can be used, + * reinterpreting the vector as a uint32x4_t and multiplying by { 0, + * prime, 0, prime } to cancel out the unwanted bits and avoid the shift. + */ + uint32x4_t prod_hi = vmulq_u32(vreinterpretq_u32_u64(data_key), kPrimeHi); + /* Extract low bits for vmlal_u32 */ + uint32x2_t data_key_lo = vmovn_u64(data_key); + /* xacc[i] = prod_hi + lo(data_key) * XXH_PRIME32_1; */ + xacc[i] = + vmlal_u32(vreinterpretq_u64_u32(prod_hi), data_key_lo, kPrimeLo); + #endif + } + + } + } -#endif -#if (XXH_VECTOR == XXH_VSX) + #endif + + #if (XXH_VECTOR == XXH_VSX) + +XXH_FORCE_INLINE void XXH3_accumulate_512_vsx(void *XXH_RESTRICT acc, + const void *XXH_RESTRICT input, + const void *XXH_RESTRICT secret) { + + /* presumed aligned */ + xxh_aliasing_u64x2 *const xacc = (xxh_aliasing_u64x2 *)acc; + xxh_u8 const *const xinput = + (xxh_u8 const *)input; /* no alignment restriction */ + xxh_u8 const *const xsecret = + (xxh_u8 const *)secret; /* no alignment restriction */ + xxh_u64x2 const v32 = {32, 32}; + size_t i; + for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { + + /* data_vec = xinput[i]; */ + xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + 16 * i); + /* key_vec = xsecret[i]; */ + xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16 * i); + xxh_u64x2 const data_key = data_vec ^ key_vec; + /* shuffled = (data_key << 32) | (data_key >> 32); */ + xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32); + /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & + * 0xFFFFFFFF); */ + xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled); + /* acc_vec = xacc[i]; */ + xxh_u64x2 acc_vec = xacc[i]; + acc_vec += product; + + /* swap high and low halves */ + #ifdef __s390x__ + acc_vec += vec_permi(data_vec, data_vec, 2); + #else + acc_vec += vec_xxpermdi(data_vec, data_vec, 2); + #endif + xacc[i] = acc_vec; + + } + +} + +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(vsx) + + XXH_FORCE_INLINE + void XXH3_scrambleAcc_vsx(void *XXH_RESTRICT acc, + const void *XXH_RESTRICT secret) { + + XXH_ASSERT((((size_t)acc) & 15) == 0); + + { -XXH_FORCE_INLINE void -XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc, - const void* XXH_RESTRICT input, - const void* XXH_RESTRICT secret) -{ - /* presumed aligned */ - xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc; - xxh_u8 const* const xinput = (xxh_u8 const*) input; /* no alignment restriction */ - xxh_u8 const* const xsecret = (xxh_u8 const*) secret; /* no alignment restriction */ - xxh_u64x2 const v32 = { 32, 32 }; - size_t i; + xxh_aliasing_u64x2 *const xacc = (xxh_aliasing_u64x2 *)acc; + const xxh_u8 *const xsecret = (const xxh_u8 *)secret; + /* constants */ + xxh_u64x2 const v32 = {32, 32}; + xxh_u64x2 const v47 = {47, 47}; + xxh_u32x4 const prime = {XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, + XXH_PRIME32_1}; + size_t i; for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { - /* data_vec = xinput[i]; */ - xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + 16*i); - /* key_vec = xsecret[i]; */ - xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i); - xxh_u64x2 const data_key = data_vec ^ key_vec; - /* shuffled = (data_key << 32) | (data_key >> 32); */ - xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32); - /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */ - xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled); - /* acc_vec = xacc[i]; */ - xxh_u64x2 acc_vec = xacc[i]; - acc_vec += product; - - /* swap high and low halves */ -#ifdef __s390x__ - acc_vec += vec_permi(data_vec, data_vec, 2); -#else - acc_vec += vec_xxpermdi(data_vec, data_vec, 2); -#endif - xacc[i] = acc_vec; + + /* xacc[i] ^= (xacc[i] >> 47); */ + xxh_u64x2 const acc_vec = xacc[i]; + xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47); + + /* xacc[i] ^= xsecret[i]; */ + xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16 * i); + xxh_u64x2 const data_key = data_vec ^ key_vec; + + /* xacc[i] *= XXH_PRIME32_1 */ + /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & + * 0xFFFFFFFF); */ + xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime); + /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */ + xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime); + xacc[i] = prod_odd + (prod_even << v32); + } + + } + } -XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(vsx) -XXH_FORCE_INLINE void -XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) -{ - XXH_ASSERT((((size_t)acc) & 15) == 0); - - { xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc; - const xxh_u8* const xsecret = (const xxh_u8*) secret; - /* constants */ - xxh_u64x2 const v32 = { 32, 32 }; - xxh_u64x2 const v47 = { 47, 47 }; - xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 }; - size_t i; - for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { - /* xacc[i] ^= (xacc[i] >> 47); */ - xxh_u64x2 const acc_vec = xacc[i]; - xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47); - - /* xacc[i] ^= xsecret[i]; */ - xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i); - xxh_u64x2 const data_key = data_vec ^ key_vec; + #endif + + #if (XXH_VECTOR == XXH_SVE) + +XXH_FORCE_INLINE void XXH3_accumulate_512_sve(void *XXH_RESTRICT acc, + const void *XXH_RESTRICT input, + const void *XXH_RESTRICT secret) { + + uint64_t *xacc = (uint64_t *)acc; + const uint64_t *xinput = (const uint64_t *)(const void *)input; + const uint64_t *xsecret = (const uint64_t *)(const void *)secret; + svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); + uint64_t element_count = svcntd(); + if (element_count >= 8) { + + svbool_t mask = svptrue_pat_b64(SV_VL8); + svuint64_t vacc = svld1_u64(mask, xacc); + ACCRND(vacc, 0); + svst1_u64(mask, xacc, vacc); + + } else if (element_count == 2) { /* sve128 */ + + svbool_t mask = svptrue_pat_b64(SV_VL2); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 2); + svuint64_t acc2 = svld1_u64(mask, xacc + 4); + svuint64_t acc3 = svld1_u64(mask, xacc + 6); + ACCRND(acc0, 0); + ACCRND(acc1, 2); + ACCRND(acc2, 4); + ACCRND(acc3, 6); + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 2, acc1); + svst1_u64(mask, xacc + 4, acc2); + svst1_u64(mask, xacc + 6, acc3); + + } else { + + svbool_t mask = svptrue_pat_b64(SV_VL4); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 4); + ACCRND(acc0, 0); + ACCRND(acc1, 4); + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 4, acc1); + + } - /* xacc[i] *= XXH_PRIME32_1 */ - /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */ - xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime); - /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */ - xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime); - xacc[i] = prod_odd + (prod_even << v32); - } } } -#endif +XXH_FORCE_INLINE void XXH3_accumulate_sve(xxh_u64 *XXH_RESTRICT acc, + const xxh_u8 *XXH_RESTRICT input, + const xxh_u8 *XXH_RESTRICT secret, + size_t nbStripes) { -#if (XXH_VECTOR == XXH_SVE) + if (nbStripes != 0) { -XXH_FORCE_INLINE void -XXH3_accumulate_512_sve( void* XXH_RESTRICT acc, - const void* XXH_RESTRICT input, - const void* XXH_RESTRICT secret) -{ - uint64_t *xacc = (uint64_t *)acc; + uint64_t *xacc = (uint64_t *)acc; const uint64_t *xinput = (const uint64_t *)(const void *)input; const uint64_t *xsecret = (const uint64_t *)(const void *)secret; - svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); - uint64_t element_count = svcntd(); + svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); + uint64_t element_count = svcntd(); if (element_count >= 8) { - svbool_t mask = svptrue_pat_b64(SV_VL8); - svuint64_t vacc = svld1_u64(mask, xacc); + + svbool_t mask = svptrue_pat_b64(SV_VL8); + svuint64_t vacc = svld1_u64(mask, xacc + 0); + do { + + /* svprfd(svbool_t, void *, enum svfprop); */ + svprfd(mask, xinput + 128, SV_PLDL1STRM); ACCRND(vacc, 0); - svst1_u64(mask, xacc, vacc); - } else if (element_count == 2) { /* sve128 */ - svbool_t mask = svptrue_pat_b64(SV_VL2); - svuint64_t acc0 = svld1_u64(mask, xacc + 0); - svuint64_t acc1 = svld1_u64(mask, xacc + 2); - svuint64_t acc2 = svld1_u64(mask, xacc + 4); - svuint64_t acc3 = svld1_u64(mask, xacc + 6); + xinput += 8; + xsecret += 1; + nbStripes--; + + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, vacc); + + } else if (element_count == 2) { /* sve128 */ + + svbool_t mask = svptrue_pat_b64(SV_VL2); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 2); + svuint64_t acc2 = svld1_u64(mask, xacc + 4); + svuint64_t acc3 = svld1_u64(mask, xacc + 6); + do { + + svprfd(mask, xinput + 128, SV_PLDL1STRM); ACCRND(acc0, 0); ACCRND(acc1, 2); ACCRND(acc2, 4); ACCRND(acc3, 6); - svst1_u64(mask, xacc + 0, acc0); - svst1_u64(mask, xacc + 2, acc1); - svst1_u64(mask, xacc + 4, acc2); - svst1_u64(mask, xacc + 6, acc3); + xinput += 8; + xsecret += 1; + nbStripes--; + + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 2, acc1); + svst1_u64(mask, xacc + 4, acc2); + svst1_u64(mask, xacc + 6, acc3); + } else { - svbool_t mask = svptrue_pat_b64(SV_VL4); - svuint64_t acc0 = svld1_u64(mask, xacc + 0); - svuint64_t acc1 = svld1_u64(mask, xacc + 4); + + svbool_t mask = svptrue_pat_b64(SV_VL4); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 4); + do { + + svprfd(mask, xinput + 128, SV_PLDL1STRM); ACCRND(acc0, 0); ACCRND(acc1, 4); - svst1_u64(mask, xacc + 0, acc0); - svst1_u64(mask, xacc + 4, acc1); - } -} + xinput += 8; + xsecret += 1; + nbStripes--; + + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 4, acc1); -XXH_FORCE_INLINE void -XXH3_accumulate_sve(xxh_u64* XXH_RESTRICT acc, - const xxh_u8* XXH_RESTRICT input, - const xxh_u8* XXH_RESTRICT secret, - size_t nbStripes) -{ - if (nbStripes != 0) { - uint64_t *xacc = (uint64_t *)acc; - const uint64_t *xinput = (const uint64_t *)(const void *)input; - const uint64_t *xsecret = (const uint64_t *)(const void *)secret; - svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); - uint64_t element_count = svcntd(); - if (element_count >= 8) { - svbool_t mask = svptrue_pat_b64(SV_VL8); - svuint64_t vacc = svld1_u64(mask, xacc + 0); - do { - /* svprfd(svbool_t, void *, enum svfprop); */ - svprfd(mask, xinput + 128, SV_PLDL1STRM); - ACCRND(vacc, 0); - xinput += 8; - xsecret += 1; - nbStripes--; - } while (nbStripes != 0); - - svst1_u64(mask, xacc + 0, vacc); - } else if (element_count == 2) { /* sve128 */ - svbool_t mask = svptrue_pat_b64(SV_VL2); - svuint64_t acc0 = svld1_u64(mask, xacc + 0); - svuint64_t acc1 = svld1_u64(mask, xacc + 2); - svuint64_t acc2 = svld1_u64(mask, xacc + 4); - svuint64_t acc3 = svld1_u64(mask, xacc + 6); - do { - svprfd(mask, xinput + 128, SV_PLDL1STRM); - ACCRND(acc0, 0); - ACCRND(acc1, 2); - ACCRND(acc2, 4); - ACCRND(acc3, 6); - xinput += 8; - xsecret += 1; - nbStripes--; - } while (nbStripes != 0); - - svst1_u64(mask, xacc + 0, acc0); - svst1_u64(mask, xacc + 2, acc1); - svst1_u64(mask, xacc + 4, acc2); - svst1_u64(mask, xacc + 6, acc3); - } else { - svbool_t mask = svptrue_pat_b64(SV_VL4); - svuint64_t acc0 = svld1_u64(mask, xacc + 0); - svuint64_t acc1 = svld1_u64(mask, xacc + 4); - do { - svprfd(mask, xinput + 128, SV_PLDL1STRM); - ACCRND(acc0, 0); - ACCRND(acc1, 4); - xinput += 8; - xsecret += 1; - nbStripes--; - } while (nbStripes != 0); - - svst1_u64(mask, xacc + 0, acc0); - svst1_u64(mask, xacc + 4, acc1); - } } + + } + } -#endif + #endif -/* scalar variants - universal */ + /* scalar variants - universal */ -#if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__)) + #if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__)) /* * In XXH3_scalarRound(), GCC and Clang have a similar codegen issue, where they * emit an excess mask and a full 64-bit multiply-add (MADD X-form). * - * While this might not seem like much, as AArch64 is a 64-bit architecture, only - * big Cortex designs have a full 64-bit multiplier. + * While this might not seem like much, as AArch64 is a 64-bit architecture, + * only big Cortex designs have a full 64-bit multiplier. * * On the little cores, the smaller 32-bit multiplier is used, and full 64-bit * multiplies expand to 2-3 multiplies in microcode. This has a major penalty * of up to 4 latency cycles and 2 stall cycles in the multiply pipeline. * - * Thankfully, AArch64 still provides the 32-bit long multiply-add (UMADDL) which does - * not have this penalty and does the mask automatically. + * Thankfully, AArch64 still provides the 32-bit long multiply-add (UMADDL) + * which does not have this penalty and does the mask automatically. */ -XXH_FORCE_INLINE xxh_u64 -XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc) -{ - xxh_u64 ret; - /* note: %x = 64-bit register, %w = 32-bit register */ - __asm__("umaddl %x0, %w1, %w2, %x3" : "=r" (ret) : "r" (lhs), "r" (rhs), "r" (acc)); - return ret; -} -#else -XXH_FORCE_INLINE xxh_u64 -XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc) -{ - return XXH_mult32to64((xxh_u32)lhs, (xxh_u32)rhs) + acc; +XXH_FORCE_INLINE xxh_u64 XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, + xxh_u64 acc) { + + xxh_u64 ret; + /* note: %x = 64-bit register, %w = 32-bit register */ + __asm__("umaddl %x0, %w1, %w2, %x3" + : "=r"(ret) + : "r"(lhs), "r"(rhs), "r"(acc)); + return ret; + } -#endif + + #else +XXH_FORCE_INLINE xxh_u64 XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, + xxh_u64 acc) { + + return XXH_mult32to64((xxh_u32)lhs, (xxh_u32)rhs) + acc; + +} + + #endif /*! * @internal * @brief Scalar round for @ref XXH3_accumulate_512_scalar(). * - * This is extracted to its own function because the NEON path uses a combination - * of NEON and scalar. + * This is extracted to its own function because the NEON path uses a + * combination of NEON and scalar. */ -XXH_FORCE_INLINE void -XXH3_scalarRound(void* XXH_RESTRICT acc, - void const* XXH_RESTRICT input, - void const* XXH_RESTRICT secret, - size_t lane) -{ - xxh_u64* xacc = (xxh_u64*) acc; - xxh_u8 const* xinput = (xxh_u8 const*) input; - xxh_u8 const* xsecret = (xxh_u8 const*) secret; - XXH_ASSERT(lane < XXH_ACC_NB); - XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0); - { - xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8); - xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8); - xacc[lane ^ 1] += data_val; /* swap adjacent lanes */ - xacc[lane] = XXH_mult32to64_add64(data_key /* & 0xFFFFFFFF */, data_key >> 32, xacc[lane]); - } +XXH_FORCE_INLINE void XXH3_scalarRound(void *XXH_RESTRICT acc, + void const *XXH_RESTRICT input, + void const *XXH_RESTRICT secret, + size_t lane) { + + xxh_u64 *xacc = (xxh_u64 *)acc; + xxh_u8 const *xinput = (xxh_u8 const *)input; + xxh_u8 const *xsecret = (xxh_u8 const *)secret; + XXH_ASSERT(lane < XXH_ACC_NB); + XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN - 1)) == 0); + { + + xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8); + xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8); + xacc[lane ^ 1] += data_val; /* swap adjacent lanes */ + xacc[lane] = XXH_mult32to64_add64(data_key /* & 0xFFFFFFFF */, + data_key >> 32, xacc[lane]); + + } + } /*! * @internal * @brief Processes a 64 byte block of data using the scalar path. */ -XXH_FORCE_INLINE void -XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc, - const void* XXH_RESTRICT input, - const void* XXH_RESTRICT secret) -{ - size_t i; - /* ARM GCC refuses to unroll this loop, resulting in a 24% slowdown on ARMv6. */ -#if defined(__GNUC__) && !defined(__clang__) \ - && (defined(__arm__) || defined(__thumb2__)) \ - && defined(__ARM_FEATURE_UNALIGNED) /* no unaligned access just wastes bytes */ \ - && XXH_SIZE_OPT <= 0 -# pragma GCC unroll 8 -#endif - for (i=0; i < XXH_ACC_NB; i++) { - XXH3_scalarRound(acc, input, secret, i); - } +XXH_FORCE_INLINE void XXH3_accumulate_512_scalar( + void *XXH_RESTRICT acc, const void *XXH_RESTRICT input, + const void *XXH_RESTRICT secret) { + + size_t i; + /* ARM GCC refuses to unroll this loop, resulting in a 24% slowdown on + * ARMv6. */ + #if defined(__GNUC__) && !defined(__clang__) && \ + (defined(__arm__) || defined(__thumb2__)) && \ + defined(__ARM_FEATURE_UNALIGNED) /* no unaligned access just wastes \ + bytes */ \ + && XXH_SIZE_OPT <= 0 + #pragma GCC unroll 8 + #endif + for (i = 0; i < XXH_ACC_NB; i++) { + + XXH3_scalarRound(acc, input, secret, i); + + } + } + XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(scalar) -/*! - * @internal - * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar(). - * - * This is extracted to its own function because the NEON path uses a combination - * of NEON and scalar. - */ -XXH_FORCE_INLINE void -XXH3_scalarScrambleRound(void* XXH_RESTRICT acc, - void const* XXH_RESTRICT secret, - size_t lane) -{ - xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */ - const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */ - XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0); - XXH_ASSERT(lane < XXH_ACC_NB); - { - xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8); - xxh_u64 acc64 = xacc[lane]; - acc64 = XXH_xorshift64(acc64, 47); - acc64 ^= key64; - acc64 *= XXH_PRIME32_1; - xacc[lane] = acc64; - } + /*! + * @internal + * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar(). + * + * This is extracted to its own function because the NEON path uses a + * combination of NEON and scalar. + */ + XXH_FORCE_INLINE + void XXH3_scalarScrambleRound(void *XXH_RESTRICT acc, + void const *XXH_RESTRICT secret, + size_t lane) { + + xxh_u64 *const xacc = (xxh_u64 *)acc; /* presumed aligned */ + const xxh_u8 *const xsecret = + (const xxh_u8 *)secret; /* no alignment restriction */ + XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN - 1)) == 0); + XXH_ASSERT(lane < XXH_ACC_NB); + { + + xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8); + xxh_u64 acc64 = xacc[lane]; + acc64 = XXH_xorshift64(acc64, 47); + acc64 ^= key64; + acc64 *= XXH_PRIME32_1; + xacc[lane] = acc64; + + } + } /*! * @internal * @brief Scrambles the accumulators after a large chunk has been read */ -XXH_FORCE_INLINE void -XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) -{ - size_t i; - for (i=0; i < XXH_ACC_NB; i++) { - XXH3_scalarScrambleRound(acc, secret, i); +XXH_FORCE_INLINE void XXH3_scrambleAcc_scalar(void *XXH_RESTRICT acc, + const void *XXH_RESTRICT secret) { + + size_t i; + for (i = 0; i < XXH_ACC_NB; i++) { + + XXH3_scalarScrambleRound(acc, secret, i); + + } + +} + +XXH_FORCE_INLINE void XXH3_initCustomSecret_scalar( + void *XXH_RESTRICT customSecret, xxh_u64 seed64) { + + /* + * We need a separate pointer for the hack below, + * which requires a non-const pointer. + * Any decent compiler will optimize this out otherwise. + */ + const xxh_u8 *kSecretPtr = XXH3_kSecret; + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); + + #if defined(__GNUC__) && defined(__aarch64__) + /* + * UGLY HACK: + * GCC and Clang generate a bunch of MOV/MOVK pairs for aarch64, and they are + * placed sequentially, in order, at the top of the unrolled loop. + * + * While MOVK is great for generating constants (2 cycles for a 64-bit + * constant compared to 4 cycles for LDR), it fights for bandwidth with + * the arithmetic instructions. + * + * I L S + * MOVK + * MOVK + * MOVK + * MOVK + * ADD + * SUB STR + * STR + * By forcing loads from memory (as the asm line causes the compiler to assume + * that XXH3_kSecretPtr has been changed), the pipelines are used more + * efficiently: + * I L S + * LDR + * ADD LDR + * SUB STR + * STR + * + * See XXH3_NEON_LANES for details on the pipsline. + * + * XXH3_64bits_withSeed, len == 256, Snapdragon 835 + * without hack: 2654.4 MB/s + * with hack: 3202.9 MB/s + */ + XXH_COMPILER_GUARD(kSecretPtr); + #endif + { + + int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16; + int i; + for (i = 0; i < nbRounds; i++) { + + /* + * The asm hack causes the compiler to assume that kSecretPtr aliases with + * customSecret, and on aarch64, this prevented LDP from merging two + * loads together for free. Putting the loads together before the stores + * properly generates LDP. + */ + xxh_u64 lo = XXH_readLE64(kSecretPtr + 16 * i) + seed64; + xxh_u64 hi = XXH_readLE64(kSecretPtr + 16 * i + 8) - seed64; + XXH_writeLE64((xxh_u8 *)customSecret + 16 * i, lo); + XXH_writeLE64((xxh_u8 *)customSecret + 16 * i + 8, hi); + } -} -XXH_FORCE_INLINE void -XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64) -{ - /* - * We need a separate pointer for the hack below, - * which requires a non-const pointer. - * Any decent compiler will optimize this out otherwise. - */ - const xxh_u8* kSecretPtr = XXH3_kSecret; - XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); + } -#if defined(__GNUC__) && defined(__aarch64__) - /* - * UGLY HACK: - * GCC and Clang generate a bunch of MOV/MOVK pairs for aarch64, and they are - * placed sequentially, in order, at the top of the unrolled loop. - * - * While MOVK is great for generating constants (2 cycles for a 64-bit - * constant compared to 4 cycles for LDR), it fights for bandwidth with - * the arithmetic instructions. - * - * I L S - * MOVK - * MOVK - * MOVK - * MOVK - * ADD - * SUB STR - * STR - * By forcing loads from memory (as the asm line causes the compiler to assume - * that XXH3_kSecretPtr has been changed), the pipelines are used more - * efficiently: - * I L S - * LDR - * ADD LDR - * SUB STR - * STR - * - * See XXH3_NEON_LANES for details on the pipsline. - * - * XXH3_64bits_withSeed, len == 256, Snapdragon 835 - * without hack: 2654.4 MB/s - * with hack: 3202.9 MB/s - */ - XXH_COMPILER_GUARD(kSecretPtr); -#endif - { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16; - int i; - for (i=0; i < nbRounds; i++) { - /* - * The asm hack causes the compiler to assume that kSecretPtr aliases with - * customSecret, and on aarch64, this prevented LDP from merging two - * loads together for free. Putting the loads together before the stores - * properly generates LDP. - */ - xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64; - xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64; - XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo); - XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi); - } } } +typedef void (*XXH3_f_accumulate)(xxh_u64 *XXH_RESTRICT, + const xxh_u8 *XXH_RESTRICT, + const xxh_u8 *XXH_RESTRICT, size_t); +typedef void (*XXH3_f_scrambleAcc)(void *XXH_RESTRICT, const void *); +typedef void (*XXH3_f_initCustomSecret)(void *XXH_RESTRICT, xxh_u64); -typedef void (*XXH3_f_accumulate)(xxh_u64* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, size_t); -typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*); -typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64); + #if (XXH_VECTOR == XXH_AVX512) + #define XXH3_accumulate_512 XXH3_accumulate_512_avx512 + #define XXH3_accumulate XXH3_accumulate_avx512 + #define XXH3_scrambleAcc XXH3_scrambleAcc_avx512 + #define XXH3_initCustomSecret XXH3_initCustomSecret_avx512 -#if (XXH_VECTOR == XXH_AVX512) + #elif (XXH_VECTOR == XXH_AVX2) -#define XXH3_accumulate_512 XXH3_accumulate_512_avx512 -#define XXH3_accumulate XXH3_accumulate_avx512 -#define XXH3_scrambleAcc XXH3_scrambleAcc_avx512 -#define XXH3_initCustomSecret XXH3_initCustomSecret_avx512 + #define XXH3_accumulate_512 XXH3_accumulate_512_avx2 + #define XXH3_accumulate XXH3_accumulate_avx2 + #define XXH3_scrambleAcc XXH3_scrambleAcc_avx2 + #define XXH3_initCustomSecret XXH3_initCustomSecret_avx2 -#elif (XXH_VECTOR == XXH_AVX2) + #elif (XXH_VECTOR == XXH_SSE2) -#define XXH3_accumulate_512 XXH3_accumulate_512_avx2 -#define XXH3_accumulate XXH3_accumulate_avx2 -#define XXH3_scrambleAcc XXH3_scrambleAcc_avx2 -#define XXH3_initCustomSecret XXH3_initCustomSecret_avx2 + #define XXH3_accumulate_512 XXH3_accumulate_512_sse2 + #define XXH3_accumulate XXH3_accumulate_sse2 + #define XXH3_scrambleAcc XXH3_scrambleAcc_sse2 + #define XXH3_initCustomSecret XXH3_initCustomSecret_sse2 -#elif (XXH_VECTOR == XXH_SSE2) + #elif (XXH_VECTOR == XXH_NEON) -#define XXH3_accumulate_512 XXH3_accumulate_512_sse2 -#define XXH3_accumulate XXH3_accumulate_sse2 -#define XXH3_scrambleAcc XXH3_scrambleAcc_sse2 -#define XXH3_initCustomSecret XXH3_initCustomSecret_sse2 + #define XXH3_accumulate_512 XXH3_accumulate_512_neon + #define XXH3_accumulate XXH3_accumulate_neon + #define XXH3_scrambleAcc XXH3_scrambleAcc_neon + #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar -#elif (XXH_VECTOR == XXH_NEON) + #elif (XXH_VECTOR == XXH_VSX) -#define XXH3_accumulate_512 XXH3_accumulate_512_neon -#define XXH3_accumulate XXH3_accumulate_neon -#define XXH3_scrambleAcc XXH3_scrambleAcc_neon -#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + #define XXH3_accumulate_512 XXH3_accumulate_512_vsx + #define XXH3_accumulate XXH3_accumulate_vsx + #define XXH3_scrambleAcc XXH3_scrambleAcc_vsx + #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar -#elif (XXH_VECTOR == XXH_VSX) + #elif (XXH_VECTOR == XXH_SVE) + #define XXH3_accumulate_512 XXH3_accumulate_512_sve + #define XXH3_accumulate XXH3_accumulate_sve + #define XXH3_scrambleAcc XXH3_scrambleAcc_scalar + #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar -#define XXH3_accumulate_512 XXH3_accumulate_512_vsx -#define XXH3_accumulate XXH3_accumulate_vsx -#define XXH3_scrambleAcc XXH3_scrambleAcc_vsx -#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + #else /* scalar */ -#elif (XXH_VECTOR == XXH_SVE) -#define XXH3_accumulate_512 XXH3_accumulate_512_sve -#define XXH3_accumulate XXH3_accumulate_sve -#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar -#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + #define XXH3_accumulate_512 XXH3_accumulate_512_scalar + #define XXH3_accumulate XXH3_accumulate_scalar + #define XXH3_scrambleAcc XXH3_scrambleAcc_scalar + #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar -#else /* scalar */ + #endif -#define XXH3_accumulate_512 XXH3_accumulate_512_scalar -#define XXH3_accumulate XXH3_accumulate_scalar -#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar -#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + #if XXH_SIZE_OPT >= 1 /* don't do SIMD for initialization */ + #undef XXH3_initCustomSecret + #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + #endif -#endif +XXH_FORCE_INLINE void XXH3_hashLong_internal_loop( + xxh_u64 *XXH_RESTRICT acc, const xxh_u8 *XXH_RESTRICT input, size_t len, + const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate f_acc, XXH3_f_scrambleAcc f_scramble) { -#if XXH_SIZE_OPT >= 1 /* don't do SIMD for initialization */ -# undef XXH3_initCustomSecret -# define XXH3_initCustomSecret XXH3_initCustomSecret_scalar -#endif + size_t const nbStripesPerBlock = + (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE; + size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock; + size_t const nb_blocks = (len - 1) / block_len; -XXH_FORCE_INLINE void -XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc, - const xxh_u8* XXH_RESTRICT input, size_t len, - const xxh_u8* XXH_RESTRICT secret, size_t secretSize, - XXH3_f_accumulate f_acc, - XXH3_f_scrambleAcc f_scramble) -{ - size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE; - size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock; - size_t const nb_blocks = (len - 1) / block_len; + size_t n; + + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + + for (n = 0; n < nb_blocks; n++) { + + f_acc(acc, input + n * block_len, secret, nbStripesPerBlock); + f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN); + + } - size_t n; + /* last partial block */ + XXH_ASSERT(len > XXH_STRIPE_LEN); + { - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + size_t const nbStripes = + ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN; + XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE)); + f_acc(acc, input + nb_blocks * block_len, secret, nbStripes); + + /* last stripe */ + { + + const xxh_u8 *const p = input + len - XXH_STRIPE_LEN; + #define XXH_SECRET_LASTACC_START \ + 7 /* not aligned on 8, last secret is different from acc & scrambler \ + */ + XXH3_accumulate_512( + acc, p, + secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START); - for (n = 0; n < nb_blocks; n++) { - f_acc(acc, input + n*block_len, secret, nbStripesPerBlock); - f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN); } - /* last partial block */ - XXH_ASSERT(len > XXH_STRIPE_LEN); - { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN; - XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE)); - f_acc(acc, input + nb_blocks*block_len, secret, nbStripes); + } - /* last stripe */ - { const xxh_u8* const p = input + len - XXH_STRIPE_LEN; -#define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */ - XXH3_accumulate_512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START); - } } } -XXH_FORCE_INLINE xxh_u64 -XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret) -{ - return XXH3_mul128_fold64( - acc[0] ^ XXH_readLE64(secret), - acc[1] ^ XXH_readLE64(secret+8) ); +XXH_FORCE_INLINE xxh_u64 XXH3_mix2Accs(const xxh_u64 *XXH_RESTRICT acc, + const xxh_u8 *XXH_RESTRICT secret) { + + return XXH3_mul128_fold64(acc[0] ^ XXH_readLE64(secret), + acc[1] ^ XXH_readLE64(secret + 8)); + } -static XXH64_hash_t -XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start) -{ - xxh_u64 result64 = start; - size_t i = 0; - - for (i = 0; i < 4; i++) { - result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i); -#if defined(__clang__) /* Clang */ \ - && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \ - && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ - && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ - /* - * UGLY HACK: - * Prevent autovectorization on Clang ARMv7-a. Exact same problem as - * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b. - * XXH3_64bits, len == 256, Snapdragon 835: - * without hack: 2063.7 MB/s - * with hack: 2560.7 MB/s - */ - XXH_COMPILER_GUARD(result64); -#endif - } +static XXH64_hash_t XXH3_mergeAccs(const xxh_u64 *XXH_RESTRICT acc, + const xxh_u8 *XXH_RESTRICT secret, + xxh_u64 start) { + + xxh_u64 result64 = start; + size_t i = 0; + + for (i = 0; i < 4; i++) { + + result64 += XXH3_mix2Accs(acc + 2 * i, secret + 16 * i); + #if defined(__clang__) /* Clang */ \ + && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \ + && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ + /* + * UGLY HACK: + * Prevent autovectorization on Clang ARMv7-a. Exact same problem as + * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b. + * XXH3_64bits, len == 256, Snapdragon 835: + * without hack: 2063.7 MB/s + * with hack: 2560.7 MB/s + */ + XXH_COMPILER_GUARD(result64); + #endif + + } + + return XXH3_avalanche(result64); - return XXH3_avalanche(result64); } -#define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \ - XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 } + #define XXH3_INIT_ACC \ + { \ + \ + XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \ + XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 \ + \ + } -XXH_FORCE_INLINE XXH64_hash_t -XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len, - const void* XXH_RESTRICT secret, size_t secretSize, - XXH3_f_accumulate f_acc, - XXH3_f_scrambleAcc f_scramble) -{ - XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; +XXH_FORCE_INLINE XXH64_hash_t XXH3_hashLong_64b_internal( + const void *XXH_RESTRICT input, size_t len, const void *XXH_RESTRICT secret, + size_t secretSize, XXH3_f_accumulate f_acc, XXH3_f_scrambleAcc f_scramble) { + + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; - XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc, f_scramble); + XXH3_hashLong_internal_loop(acc, (const xxh_u8 *)input, len, + (const xxh_u8 *)secret, secretSize, f_acc, + f_scramble); + + /* converge into final hash */ + XXH_STATIC_ASSERT(sizeof(acc) == 64); + /* do not align on 8, so that the secret is different from the accumulator + */ + #define XXH_SECRET_MERGEACCS_START 11 + XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + return XXH3_mergeAccs(acc, + (const xxh_u8 *)secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)len * XXH_PRIME64_1); - /* converge into final hash */ - XXH_STATIC_ASSERT(sizeof(acc) == 64); - /* do not align on 8, so that the secret is different from the accumulator */ -#define XXH_SECRET_MERGEACCS_START 11 - XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); - return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1); } /* * It's important for performance to transmit secret's size (when it's static) * so that the compiler can properly optimize the vectorized loop. - * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set. - * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE - * breaks -Og, this is XXH_NO_INLINE. + * This makes a big performance difference for "medium" keys (<1 KB) when using + * AVX instruction set. When the secret size is unknown, or on GCC 12 where the + * mix of NO_INLINE and FORCE_INLINE breaks -Og, this is XXH_NO_INLINE. */ -XXH3_WITH_SECRET_INLINE XXH64_hash_t -XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len, - XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) -{ - (void)seed64; - return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate, XXH3_scrambleAcc); +XXH3_WITH_SECRET_INLINE XXH64_hash_t XXH3_hashLong_64b_withSecret( + const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64, + const xxh_u8 *XXH_RESTRICT secret, size_t secretLen) { + + (void)seed64; + return XXH3_hashLong_64b_internal(input, len, secret, secretLen, + XXH3_accumulate, XXH3_scrambleAcc); + } /* * It's preferable for performance that XXH3_hashLong is not inlined, - * as it results in a smaller function for small data, easier to the instruction cache. - * Note that inside this no_inline function, we do inline the internal loop, - * and provide a statically defined secret size to allow optimization of vector loop. + * as it results in a smaller function for small data, easier to the instruction + * cache. Note that inside this no_inline function, we do inline the internal + * loop, and provide a statically defined secret size to allow optimization of + * vector loop. */ -XXH_NO_INLINE XXH_PUREF XXH64_hash_t -XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len, - XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) -{ - (void)seed64; (void)secret; (void)secretLen; - return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate, XXH3_scrambleAcc); +XXH_NO_INLINE XXH_PUREF XXH64_hash_t XXH3_hashLong_64b_default( + const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64, + const xxh_u8 *XXH_RESTRICT secret, size_t secretLen) { + + (void)seed64; + (void)secret; + (void)secretLen; + return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, + sizeof(XXH3_kSecret), XXH3_accumulate, + XXH3_scrambleAcc); + } /* * XXH3_hashLong_64b_withSeed(): - * Generate a custom key based on alteration of default XXH3_kSecret with the seed, - * and then use this key for long mode hashing. + * Generate a custom key based on alteration of default XXH3_kSecret with the + * seed, and then use this key for long mode hashing. * * This operation is decently fast but nonetheless costs a little bit of time. * Try to avoid it whenever possible (typically when seed==0). @@ -5921,98 +6688,116 @@ XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len, * It's important for performance that XXH3_hashLong is not inlined. Not sure * why (uop cache maybe?), but the difference is large and easily measurable. */ -XXH_FORCE_INLINE XXH64_hash_t -XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len, - XXH64_hash_t seed, - XXH3_f_accumulate f_acc, - XXH3_f_scrambleAcc f_scramble, - XXH3_f_initCustomSecret f_initSec) -{ -#if XXH_SIZE_OPT <= 0 - if (seed == 0) - return XXH3_hashLong_64b_internal(input, len, - XXH3_kSecret, sizeof(XXH3_kSecret), - f_acc, f_scramble); -#endif - { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; - f_initSec(secret, seed); - return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret), - f_acc, f_scramble); - } +XXH_FORCE_INLINE XXH64_hash_t XXH3_hashLong_64b_withSeed_internal( + const void *input, size_t len, XXH64_hash_t seed, XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble, XXH3_f_initCustomSecret f_initSec) { + + #if XXH_SIZE_OPT <= 0 + if (seed == 0) + return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, + sizeof(XXH3_kSecret), f_acc, f_scramble); + #endif + { + + XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; + f_initSec(secret, seed); + return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret), f_acc, + f_scramble); + + } + } /* * It's important for performance that XXH3_hashLong is not inlined. */ -XXH_NO_INLINE XXH64_hash_t -XXH3_hashLong_64b_withSeed(const void* XXH_RESTRICT input, size_t len, - XXH64_hash_t seed, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) -{ - (void)secret; (void)secretLen; - return XXH3_hashLong_64b_withSeed_internal(input, len, seed, - XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret); -} +XXH_NO_INLINE XXH64_hash_t XXH3_hashLong_64b_withSeed( + const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed, + const xxh_u8 *XXH_RESTRICT secret, size_t secretLen) { + (void)secret; + (void)secretLen; + return XXH3_hashLong_64b_withSeed_internal(input, len, seed, XXH3_accumulate, + XXH3_scrambleAcc, + XXH3_initCustomSecret); + +} -typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t, - XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t); +typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void *XXH_RESTRICT, size_t, + XXH64_hash_t, + const xxh_u8 *XXH_RESTRICT, size_t); XXH_FORCE_INLINE XXH64_hash_t -XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len, - XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, - XXH3_hashLong64_f f_hashLong) -{ - XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); - /* - * If an action is to be taken if `secretLen` condition is not respected, - * it should be done here. - * For now, it's a contract pre-condition. - * Adding a check and a branch here would cost performance at every hash. - * Also, note that function signature doesn't offer room to return an error. - */ - if (len <= 16) - return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); - if (len <= 128) - return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); - if (len <= XXH3_MIDSIZE_MAX) - return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); - return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen); -} +XXH3_64bits_internal(const void *XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const void *XXH_RESTRICT secret, + size_t secretLen, XXH3_hashLong64_f f_hashLong) { + + XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); + /* + * If an action is to be taken if `secretLen` condition is not respected, + * it should be done here. + * For now, it's a contract pre-condition. + * Adding a check and a branch here would cost performance at every hash. + * Also, note that function signature doesn't offer room to return an error. + */ + if (len <= 16) + return XXH3_len_0to16_64b((const xxh_u8 *)input, len, + (const xxh_u8 *)secret, seed64); + if (len <= 128) + return XXH3_len_17to128_64b((const xxh_u8 *)input, len, + (const xxh_u8 *)secret, secretLen, seed64); + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_len_129to240_64b((const xxh_u8 *)input, len, + (const xxh_u8 *)secret, secretLen, seed64); + return f_hashLong(input, len, seed64, (const xxh_u8 *)secret, secretLen); +} /* === Public entry point === */ /*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length) -{ - return XXH3_64bits_internal(input, length, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default); +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void *input, + size_t length) { + + return XXH3_64bits_internal(input, length, 0, XXH3_kSecret, + sizeof(XXH3_kSecret), XXH3_hashLong_64b_default); + } /*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH64_hash_t -XXH3_64bits_withSecret(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize) -{ - return XXH3_64bits_internal(input, length, 0, secret, secretSize, XXH3_hashLong_64b_withSecret); +XXH3_64bits_withSecret(XXH_NOESCAPE const void *input, size_t length, + XXH_NOESCAPE const void *secret, size_t secretSize) { + + return XXH3_64bits_internal(input, length, 0, secret, secretSize, + XXH3_hashLong_64b_withSecret); + } /*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH64_hash_t -XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed) -{ - return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed); -} +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(XXH_NOESCAPE const void *input, + size_t length, + XXH64_hash_t seed) { + + return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, + sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed); -XXH_PUBLIC_API XXH64_hash_t -XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) -{ - if (length <= XXH3_MIDSIZE_MAX) - return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); - return XXH3_hashLong_64b_withSecret(input, length, seed, (const xxh_u8*)secret, secretSize); } +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecretandSeed( + XXH_NOESCAPE const void *input, size_t length, + XXH_NOESCAPE const void *secret, size_t secretSize, XXH64_hash_t seed) { + + if (length <= XXH3_MIDSIZE_MAX) + return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, + sizeof(XXH3_kSecret), NULL); + return XXH3_hashLong_64b_withSecret(input, length, seed, + (const xxh_u8 *)secret, secretSize); + +} -/* === XXH3 streaming === */ -#ifndef XXH_NO_STREAM + /* === XXH3 streaming === */ + #ifndef XXH_NO_STREAM /* * Malloc's a pointer that is always aligned to align. * @@ -6036,48 +6821,58 @@ XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, XXH * * Align must be a power of 2 and 8 <= align <= 128. */ -static XXH_MALLOCF void* XXH_alignedMalloc(size_t s, size_t align) -{ - XXH_ASSERT(align <= 128 && align >= 8); /* range check */ - XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */ - XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */ - { /* Overallocate to make room for manual realignment and an offset byte */ - xxh_u8* base = (xxh_u8*)XXH_malloc(s + align); - if (base != NULL) { - /* - * Get the offset needed to align this pointer. - * - * Even if the returned pointer is aligned, there will always be - * at least one byte to store the offset to the original pointer. - */ - size_t offset = align - ((size_t)base & (align - 1)); /* base % align */ - /* Add the offset for the now-aligned pointer */ - xxh_u8* ptr = base + offset; - - XXH_ASSERT((size_t)ptr % align == 0); - - /* Store the offset immediately before the returned pointer. */ - ptr[-1] = (xxh_u8)offset; - return ptr; - } - return NULL; +static XXH_MALLOCF void *XXH_alignedMalloc(size_t s, size_t align) { + + XXH_ASSERT(align <= 128 && align >= 8); /* range check */ + XXH_ASSERT((align & (align - 1)) == 0); /* power of 2 */ + XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */ + { /* Overallocate to make room for manual realignment and an offset byte */ + xxh_u8 *base = (xxh_u8 *)XXH_malloc(s + align); + if (base != NULL) { + + /* + * Get the offset needed to align this pointer. + * + * Even if the returned pointer is aligned, there will always be + * at least one byte to store the offset to the original pointer. + */ + size_t offset = align - ((size_t)base & (align - 1)); /* base % align */ + /* Add the offset for the now-aligned pointer */ + xxh_u8 *ptr = base + offset; + + XXH_ASSERT((size_t)ptr % align == 0); + + /* Store the offset immediately before the returned pointer. */ + ptr[-1] = (xxh_u8)offset; + return ptr; + } + + return NULL; + + } + } + /* * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout. */ -static void XXH_alignedFree(void* p) -{ - if (p != NULL) { - xxh_u8* ptr = (xxh_u8*)p; - /* Get the offset byte we added in XXH_malloc. */ - xxh_u8 offset = ptr[-1]; - /* Free the original malloc'd pointer */ - xxh_u8* base = ptr - offset; - XXH_free(base); - } +static void XXH_alignedFree(void *p) { + + if (p != NULL) { + + xxh_u8 *ptr = (xxh_u8 *)p; + /* Get the offset byte we added in XXH_malloc. */ + xxh_u8 offset = ptr[-1]; + /* Free the original malloc'd pointer */ + xxh_u8 *base = ptr - offset; + XXH_free(base); + + } + } + /*! @ingroup XXH3_family */ /*! * @brief Allocate an @ref XXH3_state_t. @@ -6089,19 +6884,22 @@ static void XXH_alignedFree(void* p) * * @see @ref streaming_example "Streaming Example" */ -XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void) -{ - XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64); - if (state==NULL) return NULL; - XXH3_INITSTATE(state); - return state; +XXH_PUBLIC_API XXH3_state_t *XXH3_createState(void) { + + XXH3_state_t *const state = + (XXH3_state_t *)XXH_alignedMalloc(sizeof(XXH3_state_t), 64); + if (state == NULL) return NULL; + XXH3_INITSTATE(state); + return state; + } /*! @ingroup XXH3_family */ /*! * @brief Frees an @ref XXH3_state_t. * - * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState(). + * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref + * XXH3_createState(). * * @return @ref XXH_OK. * @@ -6109,98 +6907,108 @@ XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void) * * @see @ref streaming_example "Streaming Example" */ -XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr) -{ - XXH_alignedFree(statePtr); - return XXH_OK; +XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t *statePtr) { + + XXH_alignedFree(statePtr); + return XXH_OK; + } /*! @ingroup XXH3_family */ -XXH_PUBLIC_API void -XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state) -{ - XXH_memcpy(dst_state, src_state, sizeof(*dst_state)); +XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t *dst_state, + XXH_NOESCAPE const XXH3_state_t *src_state) { + + XXH_memcpy(dst_state, src_state, sizeof(*dst_state)); + } -static void -XXH3_reset_internal(XXH3_state_t* statePtr, - XXH64_hash_t seed, - const void* secret, size_t secretSize) -{ - size_t const initStart = offsetof(XXH3_state_t, bufferedSize); - size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart; - XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart); - XXH_ASSERT(statePtr != NULL); - /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */ - memset((char*)statePtr + initStart, 0, initLength); - statePtr->acc[0] = XXH_PRIME32_3; - statePtr->acc[1] = XXH_PRIME64_1; - statePtr->acc[2] = XXH_PRIME64_2; - statePtr->acc[3] = XXH_PRIME64_3; - statePtr->acc[4] = XXH_PRIME64_4; - statePtr->acc[5] = XXH_PRIME32_2; - statePtr->acc[6] = XXH_PRIME64_5; - statePtr->acc[7] = XXH_PRIME32_1; - statePtr->seed = seed; - statePtr->useSeed = (seed != 0); - statePtr->extSecret = (const unsigned char*)secret; - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); - statePtr->secretLimit = secretSize - XXH_STRIPE_LEN; - statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE; +static void XXH3_reset_internal(XXH3_state_t *statePtr, XXH64_hash_t seed, + const void *secret, size_t secretSize) { + + size_t const initStart = offsetof(XXH3_state_t, bufferedSize); + size_t const initLength = + offsetof(XXH3_state_t, nbStripesPerBlock) - initStart; + XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart); + XXH_ASSERT(statePtr != NULL); + /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */ + memset((char *)statePtr + initStart, 0, initLength); + statePtr->acc[0] = XXH_PRIME32_3; + statePtr->acc[1] = XXH_PRIME64_1; + statePtr->acc[2] = XXH_PRIME64_2; + statePtr->acc[3] = XXH_PRIME64_3; + statePtr->acc[4] = XXH_PRIME64_4; + statePtr->acc[5] = XXH_PRIME32_2; + statePtr->acc[6] = XXH_PRIME64_5; + statePtr->acc[7] = XXH_PRIME32_1; + statePtr->seed = seed; + statePtr->useSeed = (seed != 0); + statePtr->extSecret = (const unsigned char *)secret; + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + statePtr->secretLimit = secretSize - XXH_STRIPE_LEN; + statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE; + } /*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr) -{ - if (statePtr == NULL) return XXH_ERROR; - XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); - return XXH_OK; +XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t *statePtr) { + + if (statePtr == NULL) return XXH_ERROR; + XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); + return XXH_OK; + } /*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize) -{ - if (statePtr == NULL) return XXH_ERROR; - XXH3_reset_internal(statePtr, 0, secret, secretSize); - if (secret == NULL) return XXH_ERROR; - if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; - return XXH_OK; +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret( + XXH_NOESCAPE XXH3_state_t *statePtr, XXH_NOESCAPE const void *secret, + size_t secretSize) { + + if (statePtr == NULL) return XXH_ERROR; + XXH3_reset_internal(statePtr, 0, secret, secretSize); + if (secret == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; + return XXH_OK; + } /*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed) -{ - if (statePtr == NULL) return XXH_ERROR; - if (seed==0) return XXH3_64bits_reset(statePtr); - if ((seed != statePtr->seed) || (statePtr->extSecret != NULL)) - XXH3_initCustomSecret(statePtr->customSecret, seed); - XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE); - return XXH_OK; +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed( + XXH_NOESCAPE XXH3_state_t *statePtr, XXH64_hash_t seed) { + + if (statePtr == NULL) return XXH_ERROR; + if (seed == 0) return XXH3_64bits_reset(statePtr); + if ((seed != statePtr->seed) || (statePtr->extSecret != NULL)) + XXH3_initCustomSecret(statePtr->customSecret, seed); + XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE); + return XXH_OK; + } /*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed64) -{ - if (statePtr == NULL) return XXH_ERROR; - if (secret == NULL) return XXH_ERROR; - if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; - XXH3_reset_internal(statePtr, seed64, secret, secretSize); - statePtr->useSeed = 1; /* always, even if seed64==0 */ - return XXH_OK; +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecretandSeed( + XXH_NOESCAPE XXH3_state_t *statePtr, XXH_NOESCAPE const void *secret, + size_t secretSize, XXH64_hash_t seed64) { + + if (statePtr == NULL) return XXH_ERROR; + if (secret == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; + XXH3_reset_internal(statePtr, seed64, secret, secretSize); + statePtr->useSeed = 1; /* always, even if seed64==0 */ + return XXH_OK; + } /*! * @internal * @brief Processes a large input for XXH3_update() and XXH3_digest_long(). * - * Unlike XXH3_hashLong_internal_loop(), this can process data that overlaps a block. + * Unlike XXH3_hashLong_internal_loop(), this can process data that overlaps a + * block. * * @param acc Pointer to the 8 accumulator lanes - * @param nbStripesSoFarPtr In/out pointer to the number of leftover stripes in the block* + * @param nbStripesSoFarPtr In/out pointer to the number of leftover stripes in + * the block* * @param nbStripesPerBlock Number of stripes in a block * @param input Input pointer * @param nbStripes Number of stripes to process @@ -6210,200 +7018,233 @@ XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOE * @param f_scramble Pointer to an XXH3_scrambleAcc implementation * @return Pointer past the end of @p input after processing */ -XXH_FORCE_INLINE const xxh_u8 * -XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc, - size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock, - const xxh_u8* XXH_RESTRICT input, size_t nbStripes, - const xxh_u8* XXH_RESTRICT secret, size_t secretLimit, - XXH3_f_accumulate f_acc, - XXH3_f_scrambleAcc f_scramble) -{ - const xxh_u8* initialSecret = secret + *nbStripesSoFarPtr * XXH_SECRET_CONSUME_RATE; - /* Process full blocks */ - if (nbStripes >= (nbStripesPerBlock - *nbStripesSoFarPtr)) { - /* Process the initial partial block... */ - size_t nbStripesThisIter = nbStripesPerBlock - *nbStripesSoFarPtr; - - do { - /* Accumulate and scramble */ - f_acc(acc, input, initialSecret, nbStripesThisIter); - f_scramble(acc, secret + secretLimit); - input += nbStripesThisIter * XXH_STRIPE_LEN; - nbStripes -= nbStripesThisIter; - /* Then continue the loop with the full block size */ - nbStripesThisIter = nbStripesPerBlock; - initialSecret = secret; - } while (nbStripes >= nbStripesPerBlock); - *nbStripesSoFarPtr = 0; - } - /* Process a partial block */ - if (nbStripes > 0) { - f_acc(acc, input, initialSecret, nbStripes); - input += nbStripes * XXH_STRIPE_LEN; - *nbStripesSoFarPtr += nbStripes; - } - /* Return end pointer */ - return input; +XXH_FORCE_INLINE const xxh_u8 *XXH3_consumeStripes( + xxh_u64 *XXH_RESTRICT acc, size_t *XXH_RESTRICT nbStripesSoFarPtr, + size_t nbStripesPerBlock, const xxh_u8 *XXH_RESTRICT input, + size_t nbStripes, const xxh_u8 *XXH_RESTRICT secret, size_t secretLimit, + XXH3_f_accumulate f_acc, XXH3_f_scrambleAcc f_scramble) { + + const xxh_u8 *initialSecret = + secret + *nbStripesSoFarPtr * XXH_SECRET_CONSUME_RATE; + /* Process full blocks */ + if (nbStripes >= (nbStripesPerBlock - *nbStripesSoFarPtr)) { + + /* Process the initial partial block... */ + size_t nbStripesThisIter = nbStripesPerBlock - *nbStripesSoFarPtr; + + do { + + /* Accumulate and scramble */ + f_acc(acc, input, initialSecret, nbStripesThisIter); + f_scramble(acc, secret + secretLimit); + input += nbStripesThisIter * XXH_STRIPE_LEN; + nbStripes -= nbStripesThisIter; + /* Then continue the loop with the full block size */ + nbStripesThisIter = nbStripesPerBlock; + initialSecret = secret; + + } while (nbStripes >= nbStripesPerBlock); + + *nbStripesSoFarPtr = 0; + + } + + /* Process a partial block */ + if (nbStripes > 0) { + + f_acc(acc, input, initialSecret, nbStripes); + input += nbStripes * XXH_STRIPE_LEN; + *nbStripesSoFarPtr += nbStripes; + + } + + /* Return end pointer */ + return input; + } -#ifndef XXH3_STREAM_USE_STACK -# if XXH_SIZE_OPT <= 0 && !defined(__clang__) /* clang doesn't need additional stack space */ -# define XXH3_STREAM_USE_STACK 1 -# endif -#endif + #ifndef XXH3_STREAM_USE_STACK + #if XXH_SIZE_OPT <= 0 && \ + !defined( \ + __clang__) /* clang doesn't need additional stack space */ + #define XXH3_STREAM_USE_STACK 1 + #endif + #endif /* * Both XXH3_64bits_update and XXH3_128bits_update use this routine. */ -XXH_FORCE_INLINE XXH_errorcode -XXH3_update(XXH3_state_t* XXH_RESTRICT const state, - const xxh_u8* XXH_RESTRICT input, size_t len, - XXH3_f_accumulate f_acc, - XXH3_f_scrambleAcc f_scramble) -{ - if (input==NULL) { - XXH_ASSERT(len == 0); - return XXH_OK; - } +XXH_FORCE_INLINE XXH_errorcode XXH3_update( + XXH3_state_t *XXH_RESTRICT const state, const xxh_u8 *XXH_RESTRICT input, + size_t len, XXH3_f_accumulate f_acc, XXH3_f_scrambleAcc f_scramble) { - XXH_ASSERT(state != NULL); - { const xxh_u8* const bEnd = input + len; - const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; -#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 - /* For some reason, gcc and MSVC seem to suffer greatly - * when operating accumulators directly into state. - * Operating into stack space seems to enable proper optimization. - * clang, on the other hand, doesn't seem to need this trick */ - XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; - XXH_memcpy(acc, state->acc, sizeof(acc)); -#else - xxh_u64* XXH_RESTRICT const acc = state->acc; -#endif - state->totalLen += len; - XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE); - - /* small input : just fill in tmp buffer */ - if (len <= XXH3_INTERNALBUFFER_SIZE - state->bufferedSize) { - XXH_memcpy(state->buffer + state->bufferedSize, input, len); - state->bufferedSize += (XXH32_hash_t)len; - return XXH_OK; - } + if (input == NULL) { + + XXH_ASSERT(len == 0); + return XXH_OK; + + } + + XXH_ASSERT(state != NULL); + { + + const xxh_u8 *const bEnd = input + len; + const unsigned char *const secret = + (state->extSecret == NULL) ? state->customSecret : state->extSecret; + #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 + /* For some reason, gcc and MSVC seem to suffer greatly + * when operating accumulators directly into state. + * Operating into stack space seems to enable proper optimization. + * clang, on the other hand, doesn't seem to need this trick */ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; + XXH_memcpy(acc, state->acc, sizeof(acc)); + #else + xxh_u64 *XXH_RESTRICT const acc = state->acc; + #endif + state->totalLen += len; + XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE); + + /* small input : just fill in tmp buffer */ + if (len <= XXH3_INTERNALBUFFER_SIZE - state->bufferedSize) { + + XXH_memcpy(state->buffer + state->bufferedSize, input, len); + state->bufferedSize += (XXH32_hash_t)len; + return XXH_OK; + + } /* total input is now > XXH3_INTERNALBUFFER_SIZE */ - #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN) - XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */ + #define XXH3_INTERNALBUFFER_STRIPES \ + (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN) + XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == + 0); /* clean multiple */ - /* - * Internal buffer is partially filled (always, except at beginning) - * Complete it, then consume it. - */ - if (state->bufferedSize) { - size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize; - XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize); - input += loadSize; - XXH3_consumeStripes(acc, - &state->nbStripesSoFar, state->nbStripesPerBlock, - state->buffer, XXH3_INTERNALBUFFER_STRIPES, - secret, state->secretLimit, - f_acc, f_scramble); - state->bufferedSize = 0; - } - XXH_ASSERT(input < bEnd); - if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) { - size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN; - input = XXH3_consumeStripes(acc, - &state->nbStripesSoFar, state->nbStripesPerBlock, - input, nbStripes, - secret, state->secretLimit, - f_acc, f_scramble); - XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN); + /* + * Internal buffer is partially filled (always, except at beginning) + * Complete it, then consume it. + */ + if (state->bufferedSize) { + + size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize; + XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize); + input += loadSize; + XXH3_consumeStripes(acc, &state->nbStripesSoFar, state->nbStripesPerBlock, + state->buffer, XXH3_INTERNALBUFFER_STRIPES, secret, + state->secretLimit, f_acc, f_scramble); + state->bufferedSize = 0; + + } + + XXH_ASSERT(input < bEnd); + if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) { + + size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN; + input = XXH3_consumeStripes( + acc, &state->nbStripesSoFar, state->nbStripesPerBlock, input, + nbStripes, secret, state->secretLimit, f_acc, f_scramble); + XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, + input - XXH_STRIPE_LEN, XXH_STRIPE_LEN); - } - /* Some remaining input (always) : buffer it */ - XXH_ASSERT(input < bEnd); - XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE); - XXH_ASSERT(state->bufferedSize == 0); - XXH_memcpy(state->buffer, input, (size_t)(bEnd-input)); - state->bufferedSize = (XXH32_hash_t)(bEnd-input); -#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 - /* save stack accumulators into state */ - XXH_memcpy(state->acc, acc, sizeof(acc)); -#endif } - return XXH_OK; + /* Some remaining input (always) : buffer it */ + XXH_ASSERT(input < bEnd); + XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE); + XXH_ASSERT(state->bufferedSize == 0); + XXH_memcpy(state->buffer, input, (size_t)(bEnd - input)); + state->bufferedSize = (XXH32_hash_t)(bEnd - input); + #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 + /* save stack accumulators into state */ + XXH_memcpy(state->acc, acc, sizeof(acc)); + #endif + + } + + return XXH_OK; + } /*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH_errorcode -XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len) -{ - return XXH3_update(state, (const xxh_u8*)input, len, - XXH3_accumulate, XXH3_scrambleAcc); +XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t *state, + XXH_NOESCAPE const void *input, size_t len) { + + return XXH3_update(state, (const xxh_u8 *)input, len, XXH3_accumulate, + XXH3_scrambleAcc); + } +XXH_FORCE_INLINE void XXH3_digest_long(XXH64_hash_t *acc, + const XXH3_state_t *state, + const unsigned char *secret) { -XXH_FORCE_INLINE void -XXH3_digest_long (XXH64_hash_t* acc, - const XXH3_state_t* state, - const unsigned char* secret) -{ - xxh_u8 lastStripe[XXH_STRIPE_LEN]; - const xxh_u8* lastStripePtr; + xxh_u8 lastStripe[XXH_STRIPE_LEN]; + const xxh_u8 *lastStripePtr; + + /* + * Digest on a local copy. This way, the state remains unaltered, and it can + * continue ingesting more input afterwards. + */ + XXH_memcpy(acc, state->acc, sizeof(state->acc)); + if (state->bufferedSize >= XXH_STRIPE_LEN) { + + /* Consume remaining stripes then point to remaining data in buffer */ + size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN; + size_t nbStripesSoFar = state->nbStripesSoFar; + XXH3_consumeStripes(acc, &nbStripesSoFar, state->nbStripesPerBlock, + state->buffer, nbStripes, secret, state->secretLimit, + XXH3_accumulate, XXH3_scrambleAcc); + lastStripePtr = state->buffer + state->bufferedSize - XXH_STRIPE_LEN; + + } else { /* bufferedSize < XXH_STRIPE_LEN */ + + /* Copy to temp buffer */ + size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize; + XXH_ASSERT(state->bufferedSize > + 0); /* there is always some input buffered */ + XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, + catchupSize); + XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize); + lastStripePtr = lastStripe; + + } + + /* Last stripe */ + XXH3_accumulate_512(acc, lastStripePtr, + secret + state->secretLimit - XXH_SECRET_LASTACC_START); - /* - * Digest on a local copy. This way, the state remains unaltered, and it can - * continue ingesting more input afterwards. - */ - XXH_memcpy(acc, state->acc, sizeof(state->acc)); - if (state->bufferedSize >= XXH_STRIPE_LEN) { - /* Consume remaining stripes then point to remaining data in buffer */ - size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN; - size_t nbStripesSoFar = state->nbStripesSoFar; - XXH3_consumeStripes(acc, - &nbStripesSoFar, state->nbStripesPerBlock, - state->buffer, nbStripes, - secret, state->secretLimit, - XXH3_accumulate, XXH3_scrambleAcc); - lastStripePtr = state->buffer + state->bufferedSize - XXH_STRIPE_LEN; - } else { /* bufferedSize < XXH_STRIPE_LEN */ - /* Copy to temp buffer */ - size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize; - XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */ - XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize); - XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize); - lastStripePtr = lastStripe; - } - /* Last stripe */ - XXH3_accumulate_512(acc, - lastStripePtr, - secret + state->secretLimit - XXH_SECRET_LASTACC_START); } /*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* state) -{ - const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; - if (state->totalLen > XXH3_MIDSIZE_MAX) { - XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; - XXH3_digest_long(acc, state, secret); - return XXH3_mergeAccs(acc, - secret + XXH_SECRET_MERGEACCS_START, - (xxh_u64)state->totalLen * XXH_PRIME64_1); - } - /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */ - if (state->useSeed) - return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); - return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen), - secret, state->secretLimit + XXH_STRIPE_LEN); +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_digest(XXH_NOESCAPE const XXH3_state_t *state) { + + const unsigned char *const secret = + (state->extSecret == NULL) ? state->customSecret : state->extSecret; + if (state->totalLen > XXH3_MIDSIZE_MAX) { + + XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; + XXH3_digest_long(acc, state, secret); + return XXH3_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)state->totalLen * XXH_PRIME64_1); + + } + + /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */ + if (state->useSeed) + return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, + state->seed); + return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen), + secret, state->secretLimit + XXH_STRIPE_LEN); + } -#endif /* !XXH_NO_STREAM */ + #endif /* !XXH_NO_STREAM */ /* ========================================== * XXH3 128 bits (a.k.a XXH128) * ========================================== - * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant, - * even without counting the significantly larger output size. + * XXH3's 128-bit variant has better mixing and strength than the 64-bit + * variant, even without counting the significantly larger output size. * * For example, extra steps are taken to avoid the seed-dependent collisions * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B). @@ -6416,503 +7257,614 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64). */ -XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t -XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - /* A doubled version of 1to3_64b with different constants. */ - XXH_ASSERT(input != NULL); - XXH_ASSERT(1 <= len && len <= 3); - XXH_ASSERT(secret != NULL); - /* - * len = 1: combinedl = { input[0], 0x01, input[0], input[0] } - * len = 2: combinedl = { input[1], 0x02, input[0], input[1] } - * len = 3: combinedl = { input[2], 0x03, input[0], input[1] } +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t XXH3_len_1to3_128b( + const xxh_u8 *input, size_t len, const xxh_u8 *secret, XXH64_hash_t seed) { + + /* A doubled version of 1to3_64b with different constants. */ + XXH_ASSERT(input != NULL); + XXH_ASSERT(1 <= len && len <= 3); + XXH_ASSERT(secret != NULL); + /* + * len = 1: combinedl = { input[0], 0x01, input[0], input[0] } + * len = 2: combinedl = { input[1], 0x02, input[0], input[1] } + * len = 3: combinedl = { input[2], 0x03, input[0], input[1] } + */ + { + + xxh_u8 const c1 = input[0]; + xxh_u8 const c2 = input[len >> 1]; + xxh_u8 const c3 = input[len - 1]; + xxh_u32 const combinedl = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24) | + ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); + xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13); + xxh_u64 const bitflipl = + (XXH_readLE32(secret) ^ XXH_readLE32(secret + 4)) + seed; + xxh_u64 const bitfliph = + (XXH_readLE32(secret + 8) ^ XXH_readLE32(secret + 12)) - seed; + xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl; + xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph; + XXH128_hash_t h128; + h128.low64 = XXH64_avalanche(keyed_lo); + h128.high64 = XXH64_avalanche(keyed_hi); + return h128; + + } + +} + +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t XXH3_len_4to8_128b( + const xxh_u8 *input, size_t len, const xxh_u8 *secret, XXH64_hash_t seed) { + + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(4 <= len && len <= 8); + seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; + { + + xxh_u32 const input_lo = XXH_readLE32(input); + xxh_u32 const input_hi = XXH_readLE32(input + len - 4); + xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32); + xxh_u64 const bitflip = + (XXH_readLE64(secret + 16) ^ XXH_readLE64(secret + 24)) + seed; + xxh_u64 const keyed = input_64 ^ bitflip; + + /* Shift len to the left to ensure it is even, this avoids even multiplies. */ - { xxh_u8 const c1 = input[0]; - xxh_u8 const c2 = input[len >> 1]; - xxh_u8 const c3 = input[len - 1]; - xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24) - | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); - xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13); - xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; - xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed; - xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl; - xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph; - XXH128_hash_t h128; - h128.low64 = XXH64_avalanche(keyed_lo); - h128.high64 = XXH64_avalanche(keyed_hi); - return h128; - } + XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2)); + + m128.high64 += (m128.low64 << 1); + m128.low64 ^= (m128.high64 >> 3); + + m128.low64 = XXH_xorshift64(m128.low64, 35); + m128.low64 *= PRIME_MX2; + m128.low64 = XXH_xorshift64(m128.low64, 28); + m128.high64 = XXH3_avalanche(m128.high64); + return m128; + + } + } -XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t -XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(input != NULL); - XXH_ASSERT(secret != NULL); - XXH_ASSERT(4 <= len && len <= 8); - seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; - { xxh_u32 const input_lo = XXH_readLE32(input); - xxh_u32 const input_hi = XXH_readLE32(input + len - 4); - xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32); - xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed; - xxh_u64 const keyed = input_64 ^ bitflip; - - /* Shift len to the left to ensure it is even, this avoids even multiplies. */ - XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2)); - - m128.high64 += (m128.low64 << 1); - m128.low64 ^= (m128.high64 >> 3); - - m128.low64 = XXH_xorshift64(m128.low64, 35); - m128.low64 *= PRIME_MX2; - m128.low64 = XXH_xorshift64(m128.low64, 28); - m128.high64 = XXH3_avalanche(m128.high64); - return m128; +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t XXH3_len_9to16_128b( + const xxh_u8 *input, size_t len, const xxh_u8 *secret, XXH64_hash_t seed) { + + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(9 <= len && len <= 16); + { + + xxh_u64 const bitflipl = + (XXH_readLE64(secret + 32) ^ XXH_readLE64(secret + 40)) - seed; + xxh_u64 const bitfliph = + (XXH_readLE64(secret + 48) ^ XXH_readLE64(secret + 56)) + seed; + xxh_u64 const input_lo = XXH_readLE64(input); + xxh_u64 input_hi = XXH_readLE64(input + len - 8); + XXH128_hash_t m128 = + XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1); + /* + * Put len in the middle of m128 to ensure that the length gets mixed to + * both the low and high bits in the 128x64 multiply below. + */ + m128.low64 += (xxh_u64)(len - 1) << 54; + input_hi ^= bitfliph; + /* + * Add the high 32 bits of input_hi to the high 32 bits of m128, then + * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to + * the high 64 bits of m128. + * + * The best approach to this operation is different on 32-bit and 64-bit. + */ + if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */ + /* + * 32-bit optimized version, which is more readable. + * + * On 32-bit, it removes an ADC and delays a dependency between the two + * halves of m128.high64, but it generates an extra mask on 64-bit. + */ + m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2); + + } else { + + /* + * 64-bit optimized (albeit more confusing) version. + * + * Uses some properties of addition and multiplication to remove the mask: + * + * Let: + * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF) + * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000) + * c = XXH_PRIME32_2 + * + * a + (b * c) + * Inverse Property: x + y - x == y + * a + (b * (1 + c - 1)) + * Distributive Property: x * (y + z) == (x * y) + (x * z) + * a + (b * 1) + (b * (c - 1)) + * Identity Property: x * 1 == x + * a + b + (b * (c - 1)) + * + * Substitute a, b, and c: + * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - + * 1)) + * + * Since input_hi.hi + input_hi.lo == input_hi, we get this: + * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) + */ + m128.high64 += + input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1); + } -} -XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t -XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(input != NULL); - XXH_ASSERT(secret != NULL); - XXH_ASSERT(9 <= len && len <= 16); - { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed; - xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed; - xxh_u64 const input_lo = XXH_readLE64(input); - xxh_u64 input_hi = XXH_readLE64(input + len - 8); - XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1); - /* - * Put len in the middle of m128 to ensure that the length gets mixed to - * both the low and high bits in the 128x64 multiply below. - */ - m128.low64 += (xxh_u64)(len - 1) << 54; - input_hi ^= bitfliph; - /* - * Add the high 32 bits of input_hi to the high 32 bits of m128, then - * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to - * the high 64 bits of m128. - * - * The best approach to this operation is different on 32-bit and 64-bit. - */ - if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */ - /* - * 32-bit optimized version, which is more readable. - * - * On 32-bit, it removes an ADC and delays a dependency between the two - * halves of m128.high64, but it generates an extra mask on 64-bit. - */ - m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2); - } else { - /* - * 64-bit optimized (albeit more confusing) version. - * - * Uses some properties of addition and multiplication to remove the mask: - * - * Let: - * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF) - * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000) - * c = XXH_PRIME32_2 - * - * a + (b * c) - * Inverse Property: x + y - x == y - * a + (b * (1 + c - 1)) - * Distributive Property: x * (y + z) == (x * y) + (x * z) - * a + (b * 1) + (b * (c - 1)) - * Identity Property: x * 1 == x - * a + b + (b * (c - 1)) - * - * Substitute a, b, and c: - * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) - * - * Since input_hi.hi + input_hi.lo == input_hi, we get this: - * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) - */ - m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1); - } - /* m128 ^= XXH_swap64(m128 >> 64); */ - m128.low64 ^= XXH_swap64(m128.high64); + /* m128 ^= XXH_swap64(m128 >> 64); */ + m128.low64 ^= XXH_swap64(m128.high64); + + { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */ + XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2); + h128.high64 += m128.high64 * XXH_PRIME64_2; - { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */ - XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2); - h128.high64 += m128.high64 * XXH_PRIME64_2; + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = XXH3_avalanche(h128.high64); + return h128; + + } + + } - h128.low64 = XXH3_avalanche(h128.low64); - h128.high64 = XXH3_avalanche(h128.high64); - return h128; - } } } /* * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN */ -XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t -XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) -{ - XXH_ASSERT(len <= 16); - { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed); - if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed); - if (len) return XXH3_len_1to3_128b(input, len, secret, seed); - { XXH128_hash_t h128; - xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72); - xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88); - h128.low64 = XXH64_avalanche(seed ^ bitflipl); - h128.high64 = XXH64_avalanche( seed ^ bitfliph); - return h128; - } } +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t XXH3_len_0to16_128b( + const xxh_u8 *input, size_t len, const xxh_u8 *secret, XXH64_hash_t seed) { + + XXH_ASSERT(len <= 16); + { + + if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed); + if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed); + if (len) return XXH3_len_1to3_128b(input, len, secret, seed); + { + + XXH128_hash_t h128; + xxh_u64 const bitflipl = + XXH_readLE64(secret + 64) ^ XXH_readLE64(secret + 72); + xxh_u64 const bitfliph = + XXH_readLE64(secret + 80) ^ XXH_readLE64(secret + 88); + h128.low64 = XXH64_avalanche(seed ^ bitflipl); + h128.high64 = XXH64_avalanche(seed ^ bitfliph); + return h128; + + } + + } + } /* * A bit slower than XXH3_mix16B, but handles multiply by zero better. */ -XXH_FORCE_INLINE XXH128_hash_t -XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2, - const xxh_u8* secret, XXH64_hash_t seed) -{ - acc.low64 += XXH3_mix16B (input_1, secret+0, seed); - acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8); - acc.high64 += XXH3_mix16B (input_2, secret+16, seed); - acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8); - return acc; +XXH_FORCE_INLINE XXH128_hash_t XXH128_mix32B(XXH128_hash_t acc, + const xxh_u8 *input_1, + const xxh_u8 *input_2, + const xxh_u8 *secret, + XXH64_hash_t seed) { + + acc.low64 += XXH3_mix16B(input_1, secret + 0, seed); + acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8); + acc.high64 += XXH3_mix16B(input_2, secret + 16, seed); + acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8); + return acc; + } +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t XXH3_len_17to128_128b( + const xxh_u8 *XXH_RESTRICT input, size_t len, + const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) { + + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + (void)secretSize; + XXH_ASSERT(16 < len && len <= 128); + + { + + XXH128_hash_t acc; + acc.low64 = len * XXH_PRIME64_1; + acc.high64 = 0; + + #if XXH_SIZE_OPT >= 1 + { + + /* Smaller, but slightly slower. */ + unsigned int i = (unsigned int)(len - 1) / 32; + do { + + acc = XXH128_mix32B(acc, input + 16 * i, input + len - 16 * (i + 1), + secret + 32 * i, seed); + + } while (i-- != 0); -XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t -XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len, - const xxh_u8* XXH_RESTRICT secret, size_t secretSize, - XXH64_hash_t seed) -{ - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; - XXH_ASSERT(16 < len && len <= 128); - - { XXH128_hash_t acc; - acc.low64 = len * XXH_PRIME64_1; - acc.high64 = 0; - -#if XXH_SIZE_OPT >= 1 - { - /* Smaller, but slightly slower. */ - unsigned int i = (unsigned int)(len - 1) / 32; - do { - acc = XXH128_mix32B(acc, input+16*i, input+len-16*(i+1), secret+32*i, seed); - } while (i-- != 0); - } -#else - if (len > 32) { - if (len > 64) { - if (len > 96) { - acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed); - } - acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed); - } - acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed); - } - acc = XXH128_mix32B(acc, input, input+len-16, secret, seed); -#endif - { XXH128_hash_t h128; - h128.low64 = acc.low64 + acc.high64; - h128.high64 = (acc.low64 * XXH_PRIME64_1) - + (acc.high64 * XXH_PRIME64_4) - + ((len - seed) * XXH_PRIME64_2); - h128.low64 = XXH3_avalanche(h128.low64); - h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); - return h128; - } } -} -XXH_NO_INLINE XXH_PUREF XXH128_hash_t -XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len, - const xxh_u8* XXH_RESTRICT secret, size_t secretSize, - XXH64_hash_t seed) -{ - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; - XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + #else + if (len > 32) { + + if (len > 64) { + + if (len > 96) { + + acc = XXH128_mix32B(acc, input + 48, input + len - 64, secret + 96, + seed); - { XXH128_hash_t acc; - unsigned i; - acc.low64 = len * XXH_PRIME64_1; - acc.high64 = 0; - /* - * We set as `i` as offset + 32. We do this so that unchanged - * `len` can be used as upper bound. This reaches a sweet spot - * where both x86 and aarch64 get simple agen and good codegen - * for the loop. - */ - for (i = 32; i < 160; i += 32) { - acc = XXH128_mix32B(acc, - input + i - 32, - input + i - 16, - secret + i - 32, - seed); - } - acc.low64 = XXH3_avalanche(acc.low64); - acc.high64 = XXH3_avalanche(acc.high64); - /* - * NB: `i <= len` will duplicate the last 32-bytes if - * len % 32 was zero. This is an unfortunate necessity to keep - * the hash result stable. - */ - for (i=160; i <= len; i += 32) { - acc = XXH128_mix32B(acc, - input + i - 32, - input + i - 16, - secret + XXH3_MIDSIZE_STARTOFFSET + i - 160, - seed); - } - /* last bytes */ - acc = XXH128_mix32B(acc, - input + len - 16, - input + len - 32, - secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16, - (XXH64_hash_t)0 - seed); - - { XXH128_hash_t h128; - h128.low64 = acc.low64 + acc.high64; - h128.high64 = (acc.low64 * XXH_PRIME64_1) - + (acc.high64 * XXH_PRIME64_4) - + ((len - seed) * XXH_PRIME64_2); - h128.low64 = XXH3_avalanche(h128.low64); - h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); - return h128; } + + acc = + XXH128_mix32B(acc, input + 32, input + len - 48, secret + 64, seed); + + } + + acc = XXH128_mix32B(acc, input + 16, input + len - 32, secret + 32, seed); + + } + + acc = XXH128_mix32B(acc, input, input + len - 16, secret, seed); + #endif + { + + XXH128_hash_t h128; + h128.low64 = acc.low64 + acc.high64; + h128.high64 = (acc.low64 * XXH_PRIME64_1) + (acc.high64 * XXH_PRIME64_4) + + ((len - seed) * XXH_PRIME64_2); + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); + return h128; + } + + } + } -XXH_FORCE_INLINE XXH128_hash_t -XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len, - const xxh_u8* XXH_RESTRICT secret, size_t secretSize, - XXH3_f_accumulate f_acc, - XXH3_f_scrambleAcc f_scramble) -{ - XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; - - XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc, f_scramble); - - /* converge into final hash */ - XXH_STATIC_ASSERT(sizeof(acc) == 64); - XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); - { XXH128_hash_t h128; - h128.low64 = XXH3_mergeAccs(acc, - secret + XXH_SECRET_MERGEACCS_START, - (xxh_u64)len * XXH_PRIME64_1); - h128.high64 = XXH3_mergeAccs(acc, - secret + secretSize - - sizeof(acc) - XXH_SECRET_MERGEACCS_START, - ~((xxh_u64)len * XXH_PRIME64_2)); - return h128; +XXH_NO_INLINE XXH_PUREF XXH128_hash_t XXH3_len_129to240_128b( + const xxh_u8 *XXH_RESTRICT input, size_t len, + const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) { + + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + (void)secretSize; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + + { + + XXH128_hash_t acc; + unsigned i; + acc.low64 = len * XXH_PRIME64_1; + acc.high64 = 0; + /* + * We set as `i` as offset + 32. We do this so that unchanged + * `len` can be used as upper bound. This reaches a sweet spot + * where both x86 and aarch64 get simple agen and good codegen + * for the loop. + */ + for (i = 32; i < 160; i += 32) { + + acc = XXH128_mix32B(acc, input + i - 32, input + i - 16, secret + i - 32, + seed); + + } + + acc.low64 = XXH3_avalanche(acc.low64); + acc.high64 = XXH3_avalanche(acc.high64); + /* + * NB: `i <= len` will duplicate the last 32-bytes if + * len % 32 was zero. This is an unfortunate necessity to keep + * the hash result stable. + */ + for (i = 160; i <= len; i += 32) { + + acc = XXH128_mix32B(acc, input + i - 32, input + i - 16, + secret + XXH3_MIDSIZE_STARTOFFSET + i - 160, seed); + + } + + /* last bytes */ + acc = XXH128_mix32B( + acc, input + len - 16, input + len - 32, + secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16, + (XXH64_hash_t)0 - seed); + + { + + XXH128_hash_t h128; + h128.low64 = acc.low64 + acc.high64; + h128.high64 = (acc.low64 * XXH_PRIME64_1) + (acc.high64 * XXH_PRIME64_4) + + ((len - seed) * XXH_PRIME64_2); + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); + return h128; + } + + } + +} + +XXH_FORCE_INLINE XXH128_hash_t XXH3_hashLong_128b_internal( + const void *XXH_RESTRICT input, size_t len, + const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate f_acc, XXH3_f_scrambleAcc f_scramble) { + + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; + + XXH3_hashLong_internal_loop(acc, (const xxh_u8 *)input, len, secret, + secretSize, f_acc, f_scramble); + + /* converge into final hash */ + XXH_STATIC_ASSERT(sizeof(acc) == 64); + XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + { + + XXH128_hash_t h128; + h128.low64 = XXH3_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)len * XXH_PRIME64_1); + h128.high64 = XXH3_mergeAccs( + acc, secret + secretSize - sizeof(acc) - XXH_SECRET_MERGEACCS_START, + ~((xxh_u64)len * XXH_PRIME64_2)); + return h128; + + } + } /* * It's important for performance that XXH3_hashLong() is not inlined. */ -XXH_NO_INLINE XXH_PUREF XXH128_hash_t -XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len, - XXH64_hash_t seed64, - const void* XXH_RESTRICT secret, size_t secretLen) -{ - (void)seed64; (void)secret; (void)secretLen; - return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), - XXH3_accumulate, XXH3_scrambleAcc); +XXH_NO_INLINE XXH_PUREF XXH128_hash_t XXH3_hashLong_128b_default( + const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64, + const void *XXH_RESTRICT secret, size_t secretLen) { + + (void)seed64; + (void)secret; + (void)secretLen; + return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, + sizeof(XXH3_kSecret), XXH3_accumulate, + XXH3_scrambleAcc); + } /* * It's important for performance to pass @p secretLen (when it's static) * to the compiler, so that it can properly optimize the vectorized loop. * - * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE - * breaks -Og, this is XXH_NO_INLINE. + * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and + * FORCE_INLINE breaks -Og, this is XXH_NO_INLINE. */ -XXH3_WITH_SECRET_INLINE XXH128_hash_t -XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len, - XXH64_hash_t seed64, - const void* XXH_RESTRICT secret, size_t secretLen) -{ - (void)seed64; - return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen, - XXH3_accumulate, XXH3_scrambleAcc); +XXH3_WITH_SECRET_INLINE XXH128_hash_t XXH3_hashLong_128b_withSecret( + const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64, + const void *XXH_RESTRICT secret, size_t secretLen) { + + (void)seed64; + return XXH3_hashLong_128b_internal(input, len, (const xxh_u8 *)secret, + secretLen, XXH3_accumulate, + XXH3_scrambleAcc); + } -XXH_FORCE_INLINE XXH128_hash_t -XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len, - XXH64_hash_t seed64, - XXH3_f_accumulate f_acc, - XXH3_f_scrambleAcc f_scramble, - XXH3_f_initCustomSecret f_initSec) -{ - if (seed64 == 0) - return XXH3_hashLong_128b_internal(input, len, - XXH3_kSecret, sizeof(XXH3_kSecret), - f_acc, f_scramble); - { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; - f_initSec(secret, seed64); - return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret), - f_acc, f_scramble); - } +XXH_FORCE_INLINE XXH128_hash_t XXH3_hashLong_128b_withSeed_internal( + const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64, + XXH3_f_accumulate f_acc, XXH3_f_scrambleAcc f_scramble, + XXH3_f_initCustomSecret f_initSec) { + + if (seed64 == 0) + return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, + sizeof(XXH3_kSecret), f_acc, f_scramble); + { + + XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; + f_initSec(secret, seed64); + return XXH3_hashLong_128b_internal(input, len, (const xxh_u8 *)secret, + sizeof(secret), f_acc, f_scramble); + + } + } /* * It's important for performance that XXH3_hashLong is not inlined. */ XXH_NO_INLINE XXH128_hash_t -XXH3_hashLong_128b_withSeed(const void* input, size_t len, - XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen) -{ - (void)secret; (void)secretLen; - return XXH3_hashLong_128b_withSeed_internal(input, len, seed64, - XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret); +XXH3_hashLong_128b_withSeed(const void *input, size_t len, XXH64_hash_t seed64, + const void *XXH_RESTRICT secret, size_t secretLen) { + + (void)secret; + (void)secretLen; + return XXH3_hashLong_128b_withSeed_internal(input, len, seed64, + XXH3_accumulate, XXH3_scrambleAcc, + XXH3_initCustomSecret); + } -typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t, - XXH64_hash_t, const void* XXH_RESTRICT, size_t); +typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void *XXH_RESTRICT, size_t, + XXH64_hash_t, + const void *XXH_RESTRICT, size_t); XXH_FORCE_INLINE XXH128_hash_t -XXH3_128bits_internal(const void* input, size_t len, - XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, - XXH3_hashLong128_f f_hl128) -{ - XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); - /* - * If an action is to be taken if `secret` conditions are not respected, - * it should be done here. - * For now, it's a contract pre-condition. - * Adding a check and a branch here would cost performance at every hash. - */ - if (len <= 16) - return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); - if (len <= 128) - return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); - if (len <= XXH3_MIDSIZE_MAX) - return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); - return f_hl128(input, len, seed64, secret, secretLen); -} +XXH3_128bits_internal(const void *input, size_t len, XXH64_hash_t seed64, + const void *XXH_RESTRICT secret, size_t secretLen, + XXH3_hashLong128_f f_hl128) { + + XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); + /* + * If an action is to be taken if `secret` conditions are not respected, + * it should be done here. + * For now, it's a contract pre-condition. + * Adding a check and a branch here would cost performance at every hash. + */ + if (len <= 16) + return XXH3_len_0to16_128b((const xxh_u8 *)input, len, + (const xxh_u8 *)secret, seed64); + if (len <= 128) + return XXH3_len_17to128_128b((const xxh_u8 *)input, len, + (const xxh_u8 *)secret, secretLen, seed64); + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_len_129to240_128b((const xxh_u8 *)input, len, + (const xxh_u8 *)secret, secretLen, seed64); + return f_hl128(input, len, seed64, secret, secretLen); +} /* === Public XXH128 API === */ /*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* input, size_t len) -{ - return XXH3_128bits_internal(input, len, 0, - XXH3_kSecret, sizeof(XXH3_kSecret), - XXH3_hashLong_128b_default); +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void *input, + size_t len) { + + return XXH3_128bits_internal(input, len, 0, XXH3_kSecret, + sizeof(XXH3_kSecret), + XXH3_hashLong_128b_default); + } /*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH128_hash_t -XXH3_128bits_withSecret(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize) -{ - return XXH3_128bits_internal(input, len, 0, - (const xxh_u8*)secret, secretSize, - XXH3_hashLong_128b_withSecret); +XXH3_128bits_withSecret(XXH_NOESCAPE const void *input, size_t len, + XXH_NOESCAPE const void *secret, size_t secretSize) { + + return XXH3_128bits_internal(input, len, 0, (const xxh_u8 *)secret, + secretSize, XXH3_hashLong_128b_withSecret); + } /*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH128_hash_t -XXH3_128bits_withSeed(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed) -{ - return XXH3_128bits_internal(input, len, seed, - XXH3_kSecret, sizeof(XXH3_kSecret), - XXH3_hashLong_128b_withSeed); +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed( + XXH_NOESCAPE const void *input, size_t len, XXH64_hash_t seed) { + + return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, + sizeof(XXH3_kSecret), + XXH3_hashLong_128b_withSeed); + } /*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH128_hash_t -XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) -{ - if (len <= XXH3_MIDSIZE_MAX) - return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); - return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize); +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecretandSeed( + XXH_NOESCAPE const void *input, size_t len, XXH_NOESCAPE const void *secret, + size_t secretSize, XXH64_hash_t seed) { + + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, + sizeof(XXH3_kSecret), NULL); + return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize); + } /*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH128_hash_t -XXH128(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed) -{ - return XXH3_128bits_withSeed(input, len, seed); -} +XXH_PUBLIC_API XXH128_hash_t XXH128(XXH_NOESCAPE const void *input, size_t len, + XXH64_hash_t seed) { + return XXH3_128bits_withSeed(input, len, seed); + +} -/* === XXH3 128-bit streaming === */ -#ifndef XXH_NO_STREAM + /* === XXH3 128-bit streaming === */ + #ifndef XXH_NO_STREAM /* - * All initialization and update functions are identical to 64-bit streaming variant. - * The only difference is the finalization routine. + * All initialization and update functions are identical to 64-bit streaming + * variant. The only difference is the finalization routine. */ /*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr) -{ - return XXH3_64bits_reset(statePtr); +XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t *statePtr) { + + return XXH3_64bits_reset(statePtr); + } /*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize) -{ - return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize); +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret( + XXH_NOESCAPE XXH3_state_t *statePtr, XXH_NOESCAPE const void *secret, + size_t secretSize) { + + return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize); + } /*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed) -{ - return XXH3_64bits_reset_withSeed(statePtr, seed); +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed( + XXH_NOESCAPE XXH3_state_t *statePtr, XXH64_hash_t seed) { + + return XXH3_64bits_reset_withSeed(statePtr, seed); + } /*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed) -{ - return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed); +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecretandSeed( + XXH_NOESCAPE XXH3_state_t *statePtr, XXH_NOESCAPE const void *secret, + size_t secretSize, XXH64_hash_t seed) { + + return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, + seed); + } /*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH_errorcode -XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len) -{ - return XXH3_64bits_update(state, input, len); +XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t *state, + XXH_NOESCAPE const void *input, size_t len) { + + return XXH3_64bits_update(state, input, len); + } /*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* state) -{ - const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; - if (state->totalLen > XXH3_MIDSIZE_MAX) { - XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; - XXH3_digest_long(acc, state, secret); - XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); - { XXH128_hash_t h128; - h128.low64 = XXH3_mergeAccs(acc, - secret + XXH_SECRET_MERGEACCS_START, - (xxh_u64)state->totalLen * XXH_PRIME64_1); - h128.high64 = XXH3_mergeAccs(acc, - secret + state->secretLimit + XXH_STRIPE_LEN - - sizeof(acc) - XXH_SECRET_MERGEACCS_START, - ~((xxh_u64)state->totalLen * XXH_PRIME64_2)); - return h128; - } +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_digest(XXH_NOESCAPE const XXH3_state_t *state) { + + const unsigned char *const secret = + (state->extSecret == NULL) ? state->customSecret : state->extSecret; + if (state->totalLen > XXH3_MIDSIZE_MAX) { + + XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; + XXH3_digest_long(acc, state, secret); + XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= + sizeof(acc) + XXH_SECRET_MERGEACCS_START); + { + + XXH128_hash_t h128; + h128.low64 = XXH3_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)state->totalLen * XXH_PRIME64_1); + h128.high64 = + XXH3_mergeAccs(acc, + secret + state->secretLimit + XXH_STRIPE_LEN - + sizeof(acc) - XXH_SECRET_MERGEACCS_START, + ~((xxh_u64)state->totalLen * XXH_PRIME64_2)); + return h128; + } - /* len <= XXH3_MIDSIZE_MAX : short code */ - if (state->seed) - return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); - return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen), - secret, state->secretLimit + XXH_STRIPE_LEN); + + } + + /* len <= XXH3_MIDSIZE_MAX : short code */ + if (state->seed) + return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, + state->seed); + return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen), + secret, state->secretLimit + XXH_STRIPE_LEN); + } -#endif /* !XXH_NO_STREAM */ -/* 128-bit utility functions */ -#include /* memcmp, memcpy */ + #endif /* !XXH_NO_STREAM */ + /* 128-bit utility functions */ + + #include /* memcmp, memcpy */ /* return : 1 is equal, 0 if different */ /*! @ingroup XXH3_family */ -XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) -{ - /* note : XXH128_hash_t is compact, it has no padding byte */ - return !(memcmp(&h1, &h2, sizeof(h1))); +XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) { + + /* note : XXH128_hash_t is compact, it has no padding byte */ + return !(memcmp(&h1, &h2, sizeof(h1))); + } /* This prototype is compatible with stdlib's qsort(). @@ -6920,129 +7872,156 @@ XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) * <0 if *h128_1 < *h128_2 * =0 if *h128_1 == *h128_2 */ /*! @ingroup XXH3_family */ -XXH_PUBLIC_API int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2) -{ - XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1; - XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2; - int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64); - /* note : bets that, in most cases, hash values are different */ - if (hcmp) return hcmp; - return (h1.low64 > h2.low64) - (h2.low64 > h1.low64); -} +XXH_PUBLIC_API int XXH128_cmp(XXH_NOESCAPE const void *h128_1, + XXH_NOESCAPE const void *h128_2) { + + XXH128_hash_t const h1 = *(const XXH128_hash_t *)h128_1; + XXH128_hash_t const h2 = *(const XXH128_hash_t *)h128_2; + int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64); + /* note : bets that, in most cases, hash values are different */ + if (hcmp) return hcmp; + return (h1.low64 > h2.low64) - (h2.low64 > h1.low64); +} /*====== Canonical representation ======*/ /*! @ingroup XXH3_family */ -XXH_PUBLIC_API void -XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash) -{ - XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t)); - if (XXH_CPU_LITTLE_ENDIAN) { - hash.high64 = XXH_swap64(hash.high64); - hash.low64 = XXH_swap64(hash.low64); - } - XXH_memcpy(dst, &hash.high64, sizeof(hash.high64)); - XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64)); +XXH_PUBLIC_API void XXH128_canonicalFromHash( + XXH_NOESCAPE XXH128_canonical_t *dst, XXH128_hash_t hash) { + + XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) { + + hash.high64 = XXH_swap64(hash.high64); + hash.low64 = XXH_swap64(hash.low64); + + } + + XXH_memcpy(dst, &hash.high64, sizeof(hash.high64)); + XXH_memcpy((char *)dst + sizeof(hash.high64), &hash.low64, + sizeof(hash.low64)); + } /*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH128_hash_t -XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src) -{ - XXH128_hash_t h; - h.high64 = XXH_readBE64(src); - h.low64 = XXH_readBE64(src->digest + 8); - return h; +XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t *src) { + + XXH128_hash_t h; + h.high64 = XXH_readBE64(src); + h.low64 = XXH_readBE64(src->digest + 8); + return h; + } + /* ========================================== + * Secret generators + * ========================================== + */ + #define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x)) +XXH_FORCE_INLINE void XXH3_combine16(void *dst, XXH128_hash_t h128) { -/* ========================================== - * Secret generators - * ========================================== - */ -#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x)) + XXH_writeLE64(dst, XXH_readLE64(dst) ^ h128.low64); + XXH_writeLE64((char *)dst + 8, XXH_readLE64((char *)dst + 8) ^ h128.high64); -XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128) -{ - XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 ); - XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 ); } /*! @ingroup XXH3_family */ -XXH_PUBLIC_API XXH_errorcode -XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize) -{ -#if (XXH_DEBUGLEVEL >= 1) - XXH_ASSERT(secretBuffer != NULL); - XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); -#else - /* production mode, assert() are disabled */ - if (secretBuffer == NULL) return XXH_ERROR; - if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; -#endif +XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret( + XXH_NOESCAPE void *secretBuffer, size_t secretSize, + XXH_NOESCAPE const void *customSeed, size_t customSeedSize) { + + #if (XXH_DEBUGLEVEL >= 1) + XXH_ASSERT(secretBuffer != NULL); + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + #else + /* production mode, assert() are disabled */ + if (secretBuffer == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; + #endif + + if (customSeedSize == 0) { + + customSeed = XXH3_kSecret; + customSeedSize = XXH_SECRET_DEFAULT_SIZE; + + } + + #if (XXH_DEBUGLEVEL >= 1) + XXH_ASSERT(customSeed != NULL); + #else + if (customSeed == NULL) return XXH_ERROR; + #endif + + /* Fill secretBuffer with a copy of customSeed - repeat as needed */ + { + + size_t pos = 0; + while (pos < secretSize) { + + size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize); + memcpy((char *)secretBuffer + pos, customSeed, toCopy); + pos += toCopy; - if (customSeedSize == 0) { - customSeed = XXH3_kSecret; - customSeedSize = XXH_SECRET_DEFAULT_SIZE; } -#if (XXH_DEBUGLEVEL >= 1) - XXH_ASSERT(customSeed != NULL); -#else - if (customSeed == NULL) return XXH_ERROR; -#endif - /* Fill secretBuffer with a copy of customSeed - repeat as needed */ - { size_t pos = 0; - while (pos < secretSize) { - size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize); - memcpy((char*)secretBuffer + pos, customSeed, toCopy); - pos += toCopy; - } } - - { size_t const nbSeg16 = secretSize / 16; - size_t n; - XXH128_canonical_t scrambler; - XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0)); - for (n=0; ninit_seed = init_seed; diff --git a/utils/bench/hash.c b/utils/bench/hash.c index 013a5321..d4be0ab4 100644 --- a/utils/bench/hash.c +++ b/utils/bench/hash.c @@ -13,30 +13,41 @@ #undef XXH_INLINE_ALL int main() { - char *data = malloc(4097); + + char *data = malloc(4097); struct timespec start, end; - long long duration; - int i; - uint64_t res; + long long duration; + int i; + uint64_t res; clock_gettime(CLOCK_MONOTONIC, &start); for (i = 0; i < 100000000; ++i) { - res = XXH3_64bits(data, 4097); - memcpy(data + 16, (char*)&res, 8); + + res = XXH3_64bits(data, 4097); + memcpy(data + 16, (char *)&res, 8); + } + clock_gettime(CLOCK_MONOTONIC, &end); - duration = (end.tv_sec - start.tv_sec) * 1000000000LL + (end.tv_nsec - start.tv_nsec); + duration = (end.tv_sec - start.tv_sec) * 1000000000LL + + (end.tv_nsec - start.tv_nsec); printf("xxh3 duration: %lld ns\n", duration); memset(data, 0, 4097); clock_gettime(CLOCK_MONOTONIC, &start); for (i = 0; i < 100000000; ++i) { - res = t1ha0_ia32aes(data, 4097); - memcpy(data + 16, (char*)&res, 8); + + res = t1ha0_ia32aes(data, 4097); + memcpy(data + 16, (char *)&res, 8); + } + clock_gettime(CLOCK_MONOTONIC, &end); - duration = (end.tv_sec - start.tv_sec) * 1000000000LL + (end.tv_nsec - start.tv_nsec); + duration = (end.tv_sec - start.tv_sec) * 1000000000LL + + (end.tv_nsec - start.tv_nsec); printf("t1ha0_ia32aes duration: %lld ns\n", duration); return 0; + } + -- cgit 1.4.1 From 5ae4a7ae023e7acdefc95cc9ec899763e6e4f69f Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Tue, 20 Feb 2024 15:46:51 +0100 Subject: afl-whatsup current speed --- afl-whatsup | 41 ++++++++++++++++++++++++----------------- docs/Changelog.md | 3 +++ 2 files changed, 27 insertions(+), 17 deletions(-) (limited to 'docs/Changelog.md') diff --git a/afl-whatsup b/afl-whatsup index de3c3022..55ef2473 100755 --- a/afl-whatsup +++ b/afl-whatsup @@ -123,6 +123,7 @@ START_CNT=0 TOTAL_TIME=0 TOTAL_EXECS=0 TOTAL_EPS=0 +TOTAL_EPLM=0 TOTAL_CRASHES=0 TOTAL_HANGS=0 TOTAL_PFAV=0 @@ -182,6 +183,8 @@ for j in `find . -maxdepth 2 -iname fuzzer_setup | sort`; do if [ -f "$i" ]; then + IS_STARTING= + IS_DEAD= sed 's/^command_line.*$/_skip:1/;s/[ ]*:[ ]*/="/;s/$/"/' "$i" >"$TMP" . "$TMP" DIRECTORY=$DIR @@ -212,9 +215,6 @@ for j in `find . -maxdepth 2 -iname fuzzer_setup | sort`; do if ! kill -0 "$fuzzer_pid" 2>/dev/null; then - IS_STARTING= - IS_DEAD= - if [ -e "$i" ] && [ -e "$j" ] && [ -n "$FUSER" ]; then if [ "$i" -ot "$j" ]; then @@ -273,11 +273,15 @@ for j in `find . -maxdepth 2 -iname fuzzer_setup | sort`; do ALIVE_CNT=$((ALIVE_CNT + 1)) EXEC_SEC=0 + EXEC_MIN=0 test -z "$RUN_UNIX" -o "$RUN_UNIX" = 0 || EXEC_SEC=$((execs_done / RUN_UNIX)) PATH_PERC=$((cur_item * 100 / corpus_count)) + + test "$IS_DEAD" = 1 || EXEC_MIN=$(echo $execs_ps_last_min|sed 's/\..*//') TOTAL_TIME=$((TOTAL_TIME + RUN_UNIX)) TOTAL_EPS=$((TOTAL_EPS + EXEC_SEC)) + TOTAL_EPLM=$((TOTAL_EPLM + EXEC_MIN)) TOTAL_EXECS=$((TOTAL_EXECS + execs_done)) TOTAL_CRASHES=$((TOTAL_CRASHES + saved_crashes)) TOTAL_HANGS=$((TOTAL_HANGS + saved_hangs)) @@ -399,41 +403,44 @@ if [ -z "$SUMMARY_ONLY" -o -z "$MINIMAL_ONLY" ]; then echo fi -echo " Fuzzers alive : $ALIVE_CNT" +echo " Fuzzers alive : $ALIVE_CNT" if [ ! "$START_CNT" = "0" ]; then - echo " Starting up : $START_CNT ($TXT)" + echo " Starting up : $START_CNT ($TXT)" fi if [ ! "$DEAD_CNT" = "0" ]; then - echo " Dead or remote : $DEAD_CNT ($TXT)" + echo " Dead or remote : $DEAD_CNT ($TXT)" fi -echo " Total run time : $FMT_TIME" +echo " Total run time : $FMT_TIME" if [ -z "$MINIMAL_ONLY" ]; then - echo " Total execs : $FMT_EXECS" - echo " Cumulative speed : $TOTAL_EPS execs/sec" + echo " Total execs : $FMT_EXECS" + echo " Cumulative speed : $TOTAL_EPS execs/sec" + if [ "$ALIVE_CNT" -gt "0" ]; then + echo " Total average speed : $((TOTAL_EPS / ALIVE_CNT)) execs/sec" + fi fi if [ "$ALIVE_CNT" -gt "0" ]; then - echo " Average speed : $((TOTAL_EPS / ALIVE_CNT)) execs/sec" + echo "Current average speed : $TOTAL_EPLM execs/sec" fi if [ -z "$MINIMAL_ONLY" ]; then - echo " Pending items : $TOTAL_PFAV faves, $TOTAL_PENDING total" + echo " Pending items : $TOTAL_PFAV faves, $TOTAL_PENDING total" fi if [ "$ALIVE_CNT" -gt "1" -o -n "$MINIMAL_ONLY" ]; then if [ "$ALIVE_CNT" -gt "0" ]; then - echo " Pending per fuzzer : $((TOTAL_PFAV/ALIVE_CNT)) faves, $((TOTAL_PENDING/ALIVE_CNT)) total (on average)" + echo " Pending per fuzzer : $((TOTAL_PFAV/ALIVE_CNT)) faves, $((TOTAL_PENDING/ALIVE_CNT)) total (on average)" fi fi -echo " Coverage reached : ${TOTAL_COVERAGE}%" -echo " Crashes saved : $TOTAL_CRASHES" +echo " Coverage reached : ${TOTAL_COVERAGE}%" +echo " Crashes saved : $TOTAL_CRASHES" if [ -z "$MINIMAL_ONLY" ]; then - echo " Hangs saved : $TOTAL_HANGS" - echo "Cycles without finds : $TOTAL_WCOP" + echo " Hangs saved : $TOTAL_HANGS" + echo " Cycles without finds : $TOTAL_WCOP" fi -echo " Time without finds : $TOTAL_LAST_FIND" +echo " Time without finds : $TOTAL_LAST_FIND" echo exit 0 diff --git a/docs/Changelog.md b/docs/Changelog.md index 3415150a..6408785a 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -26,6 +26,9 @@ - added collision free caller instrumentation to LTO mode. activate with `AFL_LLVM_LTO_CALLER=1`. You can set a max depth to go through single block functions with `AFL_LLVM_LTO_CALLER_DEPTH` (default 0) + - afl-whatsup: + - Now also displays current average speed + - small bugfixes - Minor edits to afl-persistent-config - Prevent temporary files being left behind on aborted afl-whatsup - More CPU benchmarks added to benchmark/ -- cgit 1.4.1 From 849994dedde124ee3ba2491ccb9b6a18d4e52d29 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Fri, 23 Feb 2024 14:09:22 +0100 Subject: update changelog --- docs/Changelog.md | 1 + 1 file changed, 1 insertion(+) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 6408785a..ead015eb 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -22,6 +22,7 @@ - small improvements to CMPLOG/redqueen - workround for a bug with MOpt -L when used with -M - in the future we will either remove or rewrite MOpt. + - fix for `-t xxx+` feature - afl-cc: - added collision free caller instrumentation to LTO mode. activate with `AFL_LLVM_LTO_CALLER=1`. You can set a max depth to go through single -- cgit 1.4.1 From 036a79268b48a0e3e061d5e3387711f69bed8d56 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Thu, 29 Feb 2024 09:10:22 +0100 Subject: gcc cmplog fix --- docs/Changelog.md | 3 ++- include/t1ha_bits.h | 15 ++++++++------- include/xxhash.h | 30 ++++++++++++++++++------------ instrumentation/afl-gcc-cmptrs-pass.so.cc | 15 +++++++++------ src/afl-cc.c | 3 ++- src/afl-fuzz.c | 14 ++++++-------- 6 files changed, 45 insertions(+), 35 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index ead015eb..da4b3a20 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -27,8 +27,9 @@ - added collision free caller instrumentation to LTO mode. activate with `AFL_LLVM_LTO_CALLER=1`. You can set a max depth to go through single block functions with `AFL_LLVM_LTO_CALLER_DEPTH` (default 0) + - fix for GCC_PLUGIN cmplog that broke on std::strings - afl-whatsup: - - Now also displays current average speed + - now also displays current average speed - small bugfixes - Minor edits to afl-persistent-config - Prevent temporary files being left behind on aborted afl-whatsup diff --git a/include/t1ha_bits.h b/include/t1ha_bits.h index e7a8d53c..0b9bbda5 100644 --- a/include/t1ha_bits.h +++ b/include/t1ha_bits.h @@ -207,7 +207,7 @@ static __maybe_unused __always_inline unsigned e2k_add64carry_first( return (unsigned)__builtin_e2k_addcd_c(base, addend, 0); } -\ + #define add64carry_first(base, addend, sum) \ e2k_add64carry_first(base, addend, sum) @@ -218,7 +218,7 @@ static __maybe_unused __always_inline unsigned e2k_add64carry_next( return (unsigned)__builtin_e2k_addcd_c(base, addend, carry); } -\ + #define add64carry_next(carry, base, addend, sum) \ e2k_add64carry_next(carry, base, addend, sum) @@ -230,7 +230,7 @@ static __maybe_unused __always_inline void e2k_add64carry_last(unsigned carry, *sum = __builtin_e2k_addcd(base, addend, carry); } -\ + #define add64carry_last(carry, base, addend, sum) \ e2k_add64carry_last(carry, base, addend, sum) #endif /* __iset__ >= 5 */ @@ -311,7 +311,7 @@ static __forceinline char msvc32_add64carry_first(uint64_t base, base_32h, addend_32h, sum32 + 1); } -\ + #define add64carry_first(base, addend, sum) \ msvc32_add64carry_first(base, addend, sum) @@ -328,7 +328,7 @@ static __forceinline char msvc32_add64carry_next(char carry, uint64_t base, base_32h, addend_32h, sum32 + 1); } -\ + #define add64carry_next(carry, base, addend, sum) \ msvc32_add64carry_next(carry, base, addend, sum) @@ -345,7 +345,7 @@ static __forceinline void msvc32_add64carry_last(char carry, uint64_t base, addend_32h, sum32 + 1); } -\ + #define add64carry_last(carry, base, addend, sum) \ msvc32_add64carry_last(carry, base, addend, sum) #endif /* _MSC_FULL_VER >= 190024231 */ @@ -454,7 +454,7 @@ typedef struct { uint64_t unaligned_64; } __attribute__((__packed__)) t1ha_unaligned_proxy; -\ + #define read_unaligned(ptr, bits) \ (((const t1ha_unaligned_proxy *)((const uint8_t *)(ptr)-offsetof( \ t1ha_unaligned_proxy, unaligned_##bits))) \ @@ -539,6 +539,7 @@ static __always_inline const uint64_t *__attribute__(( (void)(ptr); \ \ } while (0) + #endif #endif /* prefetch */ diff --git a/include/xxhash.h b/include/xxhash.h index 7697d0f2..991a8f1e 100644 --- a/include/xxhash.h +++ b/include/xxhash.h @@ -1734,7 +1734,7 @@ XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t *src); * These declarations should only be used with static linking. * Never use them in association with dynamic linking! ***************************************************************************** -*/ + */ /* * These definitions are only present to allow static allocation @@ -2399,9 +2399,9 @@ XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecretandSeed( #define XXH_NO_STREAM #undef XXH_NO_STREAM /* don't actually */ #endif /* XXH_DOXYGEN */ -/*! - * @} - */ + /*! + * @} + */ #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command \ line for example */ @@ -2614,6 +2614,7 @@ static void *XXH_memcpy(void *dest, const void *src, size_t size) { _Static_assert((c), m); \ \ } while (0) + #elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */ #define XXH_STATIC_ASSERT_WITH_MESSAGE(c, m) \ do { \ @@ -2621,6 +2622,7 @@ static void *XXH_memcpy(void *dest, const void *src, size_t size) { static_assert((c), m); \ \ } while (0) + #else #define XXH_STATIC_ASSERT_WITH_MESSAGE(c, m) \ do { \ @@ -2632,6 +2634,7 @@ static void *XXH_memcpy(void *dest, const void *src, size_t size) { }; \ \ } while (0) + #endif #define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c), #c) #endif @@ -2850,7 +2853,7 @@ static int XXH_isLittleEndian(void) { return one.c[0]; } -\ + #define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() #endif #endif @@ -4679,6 +4682,7 @@ XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) { acc = svadd_u64_x(mask, acc, mul); \ \ } while (0) + #endif /* XXH_VECTOR == XXH_SVE */ /* prefetch @@ -4737,12 +4741,14 @@ static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = { }; -static const xxh_u64 PRIME_MX1 = 0x165667919E3779F9ULL; /*!< - 0b0001011001010110011001111001000110011110001101110111100111111001 - */ -static const xxh_u64 PRIME_MX2 = 0x9FB21C651E98DF25ULL; /*!< - 0b1001111110110010000111000110010100011110100110001101111100100101 - */ +static const xxh_u64 PRIME_MX1 = + 0x165667919E3779F9ULL; /*!< + 0b0001011001010110011001111001000110011110001101110111100111111001 + */ +static const xxh_u64 PRIME_MX2 = + 0x9FB21C651E98DF25ULL; /*!< + 0b1001111110110010000111000110010100011110100110001101111100100101 + */ #ifdef XXH_OLD_NAMES #define kSecret XXH3_kSecret @@ -7854,7 +7860,7 @@ XXH3_128bits_digest(XXH_NOESCAPE const XXH3_state_t *state) { } #endif /* !XXH_NO_STREAM */ - /* 128-bit utility functions */ + /* 128-bit utility functions */ #include /* memcmp, memcpy */ diff --git a/instrumentation/afl-gcc-cmptrs-pass.so.cc b/instrumentation/afl-gcc-cmptrs-pass.so.cc index 929a9d7a..96bd5ba8 100644 --- a/instrumentation/afl-gcc-cmptrs-pass.so.cc +++ b/instrumentation/afl-gcc-cmptrs-pass.so.cc @@ -180,19 +180,19 @@ struct afl_cmptrs_pass : afl_base_pass { c = DECL_CONTEXT(c); if (c && TREE_CODE(c) != TRANSLATION_UNIT_DECL) return false; - /* Check that the first nonstatic data member of the record type + /* Check that the first nonstatic named data member of the record type is named _M_dataplus. */ for (c = TYPE_FIELDS(t); c; c = DECL_CHAIN(c)) - if (TREE_CODE(c) == FIELD_DECL) break; + if (TREE_CODE(c) == FIELD_DECL && DECL_NAME(c)) break; if (!c || !integer_zerop(DECL_FIELD_BIT_OFFSET(c)) || strcmp(IDENTIFIER_POINTER(DECL_NAME(c)), "_M_dataplus") != 0) return false; - /* Check that the second nonstatic data member of the record type + /* Check that the second nonstatic named data member of the record type is named _M_string_length. */ tree f2; for (f2 = DECL_CHAIN(c); f2; f2 = DECL_CHAIN(f2)) - if (TREE_CODE(f2) == FIELD_DECL) break; + if (TREE_CODE(f2) == FIELD_DECL && DECL_NAME(f2)) break; if (!f2 /* No need to check this field's offset. */ || strcmp(IDENTIFIER_POINTER(DECL_NAME(f2)), "_M_string_length") != 0) return false; @@ -208,9 +208,12 @@ struct afl_cmptrs_pass : afl_base_pass { strcmp(IDENTIFIER_POINTER(TYPE_IDENTIFIER(c)), "_Alloc_hider") != 0) return false; - /* And its first data member is named _M_p. */ + /* And its first nonstatic named data member should be named _M_p. + There may be (unnamed) subobjects from empty base classes. We + skip the subobjects, then check the offset of the first data + member. */ for (c = TYPE_FIELDS(c); c; c = DECL_CHAIN(c)) - if (TREE_CODE(c) == FIELD_DECL) break; + if (TREE_CODE(c) == FIELD_DECL && DECL_NAME(c)) break; if (!c || !integer_zerop(DECL_FIELD_BIT_OFFSET(c)) || strcmp(IDENTIFIER_POINTER(DECL_NAME(c)), "_M_p") != 0) return false; diff --git a/src/afl-cc.c b/src/afl-cc.c index 6aa0da6a..faa46103 100644 --- a/src/afl-cc.c +++ b/src/afl-cc.c @@ -828,7 +828,8 @@ static void instrument_mode_old_environ(aflcc_state_t *aflcc) { } if (getenv("AFL_LLVM_CTX")) aflcc->instrument_opt_mode |= INSTRUMENT_OPT_CTX; - if (getenv("AFL_LLVM_CALLER") || getenv("AFL_LLVM_LTO_CALLER") || getenv("AFL_LLVM_LTO_CTX")) + if (getenv("AFL_LLVM_CALLER") || getenv("AFL_LLVM_LTO_CALLER") || + getenv("AFL_LLVM_LTO_CTX")) aflcc->instrument_opt_mode |= INSTRUMENT_OPT_CALLER; if (getenv("AFL_LLVM_NGRAM_SIZE")) { diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 08f716fa..443d93b0 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -2493,17 +2493,15 @@ int main(int argc, char **argv_orig, char **envp) { for (entry = 0; entry < afl->queued_items; ++entry) if (!afl->queue_buf[entry]->disabled) - if ((afl->queue_buf[entry]->exec_us/1000) > max_ms) - max_ms = afl->queue_buf[entry]->exec_us/1000; - + if ((afl->queue_buf[entry]->exec_us / 1000) > max_ms) + max_ms = afl->queue_buf[entry]->exec_us / 1000; + // Add 20% as a safety margin, capped to exec_tmout given in -t option max_ms *= 1.2; - if(max_ms > afl->fsrv.exec_tmout) - max_ms = afl->fsrv.exec_tmout; - + if (max_ms > afl->fsrv.exec_tmout) max_ms = afl->fsrv.exec_tmout; + // Ensure that there is a sensible timeout even for very fast binaries - if(max_ms < 5) - max_ms = 5; + if (max_ms < 5) max_ms = 5; afl->fsrv.exec_tmout = max_ms; afl->timeout_given = 1; -- cgit 1.4.1 From acc178e5dd572d166404531ebe948c6719299e8b Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Thu, 29 Feb 2024 14:16:56 +0100 Subject: log --- docs/Changelog.md | 2 ++ 1 file changed, 2 insertions(+) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index da4b3a20..51f8dc4f 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -23,6 +23,8 @@ - workround for a bug with MOpt -L when used with -M - in the future we will either remove or rewrite MOpt. - fix for `-t xxx+` feature + - -e extension option now saves the queue items crashes etc. with the + extension too - afl-cc: - added collision free caller instrumentation to LTO mode. activate with `AFL_LLVM_LTO_CALLER=1`. You can set a max depth to go through single -- cgit 1.4.1 From 443edcd77162b901b7785eeedf669b12a82f822a Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Tue, 12 Mar 2024 07:42:16 +0100 Subject: nits --- docs/Changelog.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 51f8dc4f..94ea5fca 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -8,7 +8,7 @@ backward compatible to old compiled targets if they are not built for CMPLOG/Redqueen, but new compiled targets will not work with old afl-fuzz versions! - ! Recompiled all targets that are instrumented for CMPLOG/Redqueen! + ! Recompile all targets that are instrumented for CMPLOG/Redqueen! - AFL++ now supports up to 4 billion coverage edges, up from 6 million. - New compile option: `make PERFORMANCE=1` - this will enable special CPU dependent optimizations that make everything more performant - but @@ -23,7 +23,7 @@ - workround for a bug with MOpt -L when used with -M - in the future we will either remove or rewrite MOpt. - fix for `-t xxx+` feature - - -e extension option now saves the queue items crashes etc. with the + - -e extension option now saves the queue items, crashes, etc. with the extension too - afl-cc: - added collision free caller instrumentation to LTO mode. activate with -- cgit 1.4.1 From f7ea0f569fa57e22548c1dc8eaba2903213e496e Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Fri, 5 Apr 2024 14:52:53 +0200 Subject: fix aflpp custom mutator + standalone tool --- custom_mutators/aflpp/aflpp.c | 1 + custom_mutators/aflpp/standalone/aflpp-standalone.c | 7 ++----- docs/Changelog.md | 1 + include/afl-mutations.h | 5 ++++- src/afl-fuzz-state.c | 4 ---- 5 files changed, 8 insertions(+), 10 deletions(-) (limited to 'docs/Changelog.md') diff --git a/custom_mutators/aflpp/aflpp.c b/custom_mutators/aflpp/aflpp.c index e15d0391..0b236f76 100644 --- a/custom_mutators/aflpp/aflpp.c +++ b/custom_mutators/aflpp/aflpp.c @@ -1,3 +1,4 @@ +#include "afl-fuzz.h" #include "afl-mutations.h" typedef struct my_mutator { diff --git a/custom_mutators/aflpp/standalone/aflpp-standalone.c b/custom_mutators/aflpp/standalone/aflpp-standalone.c index 361feaba..3a2cbc2f 100644 --- a/custom_mutators/aflpp/standalone/aflpp-standalone.c +++ b/custom_mutators/aflpp/standalone/aflpp-standalone.c @@ -1,9 +1,6 @@ +#include "afl-fuzz.h" #include "afl-mutations.h" -s8 interesting_8[] = {INTERESTING_8}; -s16 interesting_16[] = {INTERESTING_8, INTERESTING_16}; -s32 interesting_32[] = {INTERESTING_8, INTERESTING_16, INTERESTING_32}; - typedef struct my_mutator { afl_state_t *afl; @@ -155,7 +152,7 @@ int main(int argc, char *argv[]) { return -1; } - if (verbose) fprintf(stderr, "Mutation output length: %zu\n", outlen); + if (verbose) fprintf(stderr, "Mutation output length: %u\n", outlen); if (fwrite(outbuf, 1, outlen, out) != outlen) { fprintf(stderr, "Warning: incomplete write.\n"); diff --git a/docs/Changelog.md b/docs/Changelog.md index 94ea5fca..70f4e375 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -33,6 +33,7 @@ - afl-whatsup: - now also displays current average speed - small bugfixes + - Fixes for aflpp custom mutator and standalone tool - Minor edits to afl-persistent-config - Prevent temporary files being left behind on aborted afl-whatsup - More CPU benchmarks added to benchmark/ diff --git a/include/afl-mutations.h b/include/afl-mutations.h index 75e66484..79cf7c6a 100644 --- a/include/afl-mutations.h +++ b/include/afl-mutations.h @@ -30,10 +30,13 @@ #include #include -#include "afl-fuzz.h" #define MUT_STRATEGY_ARRAY_SIZE 256 +s8 interesting_8[] = {INTERESTING_8}; +s16 interesting_16[] = {INTERESTING_8, INTERESTING_16}; +s32 interesting_32[] = {INTERESTING_8, INTERESTING_16, INTERESTING_32}; + enum { /* 00 */ MUT_FLIPBIT, diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index ae327117..c61f00bd 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -28,10 +28,6 @@ #include "afl-fuzz.h" #include "envs.h" -s8 interesting_8[] = {INTERESTING_8}; -s16 interesting_16[] = {INTERESTING_8, INTERESTING_16}; -s32 interesting_32[] = {INTERESTING_8, INTERESTING_16, INTERESTING_32}; - char *power_names[POWER_SCHEDULES_NUM] = {"explore", "mmopt", "exploit", "fast", "coe", "lin", "quad", "rare", "seek"}; -- cgit 1.4.1 From 45603367bfb71948f56715ac88e34c05c0dc0486 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Sun, 7 Apr 2024 09:44:33 +0200 Subject: fix llvm modules --- docs/Changelog.md | 1 + instrumentation/SanitizerCoverageLTO.so.cc | 9 +++++-- instrumentation/afl-llvm-dict2file.so.cc | 2 +- instrumentation/afl-llvm-pass.so.cc | 10 ++++---- instrumentation/cmplog-instructions-pass.cc | 9 ++++--- instrumentation/cmplog-routines-pass.cc | 12 ++++----- instrumentation/cmplog-switches-pass.cc | 12 ++++----- instrumentation/compare-transform-pass.so.cc | 27 ++++++++++++++------ instrumentation/injection-pass.cc | 17 +++++++------ instrumentation/split-compares-pass.so.cc | 37 +++++++++++++--------------- instrumentation/split-switches-pass.so.cc | 17 +++++++------ src/afl-cc.c | 7 ++++++ 12 files changed, 95 insertions(+), 65 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 70f4e375..72e20a18 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -29,6 +29,7 @@ - added collision free caller instrumentation to LTO mode. activate with `AFL_LLVM_LTO_CALLER=1`. You can set a max depth to go through single block functions with `AFL_LLVM_LTO_CALLER_DEPTH` (default 0) + - fixes for COMPCOV/LAF and most other modules - fix for GCC_PLUGIN cmplog that broke on std::strings - afl-whatsup: - now also displays current average speed diff --git a/instrumentation/SanitizerCoverageLTO.so.cc b/instrumentation/SanitizerCoverageLTO.so.cc index 43c6ca40..4518c1c7 100644 --- a/instrumentation/SanitizerCoverageLTO.so.cc +++ b/instrumentation/SanitizerCoverageLTO.so.cc @@ -341,7 +341,7 @@ llvmGetPassPluginInfo() { using OptimizationLevel = typename PassBuilder::OptimizationLevel; #endif #if LLVM_VERSION_MAJOR >= 15 - PB.registerFullLinkTimeOptimizationLastEPCallback( + PB.registerFullLinkTimeOptimizationEarlyEPCallback( #else PB.registerOptimizerLastEPCallback( #endif @@ -1304,7 +1304,12 @@ u32 countCallers(Function *F) { for (auto *U : F->users()) { - if (auto *CI = dyn_cast(U)) { ++callers; } + if (auto *CI = dyn_cast(U)) { + + ++callers; + (void)(CI); + + } } diff --git a/instrumentation/afl-llvm-dict2file.so.cc b/instrumentation/afl-llvm-dict2file.so.cc index ac497b5b..b93f61f0 100644 --- a/instrumentation/afl-llvm-dict2file.so.cc +++ b/instrumentation/afl-llvm-dict2file.so.cc @@ -746,7 +746,7 @@ bool AFLdict2filePass::runOnModule(Module &M) { auto PA = PreservedAnalyses::all(); return PA; #else - return true; + return false; #endif } diff --git a/instrumentation/afl-llvm-pass.so.cc b/instrumentation/afl-llvm-pass.so.cc index 62f5023d..75b8532b 100644 --- a/instrumentation/afl-llvm-pass.so.cc +++ b/instrumentation/afl-llvm-pass.so.cc @@ -128,7 +128,11 @@ llvmGetPassPluginInfo() { #if LLVM_VERSION_MAJOR <= 13 using OptimizationLevel = typename PassBuilder::OptimizationLevel; #endif + #if LLVM_VERSION_MAJOR >= 16 + PB.registerOptimizerEarlyEPCallback( + #else PB.registerOptimizerLastEPCallback( + #endif [](ModulePassManager &MPM, OptimizationLevel OL) { MPM.addPass(AFLCoverage()); @@ -212,10 +216,6 @@ bool AFLCoverage::runOnModule(Module &M) { u32 rand_seed; unsigned int cur_loc = 0; -#if LLVM_VERSION_MAJOR >= 11 /* use new pass manager */ - auto PA = PreservedAnalyses::all(); -#endif - /* Setup random() so we get Actually Random(TM) outputs from AFL_R() */ gettimeofday(&tv, &tz); rand_seed = tv.tv_sec ^ tv.tv_usec ^ getpid(); @@ -1081,7 +1081,7 @@ bool AFLCoverage::runOnModule(Module &M) { } #if LLVM_VERSION_MAJOR >= 11 /* use new pass manager */ - return PA; + return PreservedAnalyses(); #else return true; #endif diff --git a/instrumentation/cmplog-instructions-pass.cc b/instrumentation/cmplog-instructions-pass.cc index dc60221e..fe5c2926 100644 --- a/instrumentation/cmplog-instructions-pass.cc +++ b/instrumentation/cmplog-instructions-pass.cc @@ -680,13 +680,16 @@ bool CmpLogInstructions::runOnModule(Module &M) { printf("Running cmplog-instructions-pass by andreafioraldi@gmail.com\n"); else be_quiet = 1; - hookInstrs(M); + bool ret = hookInstrs(M); verifyModule(M); #if LLVM_MAJOR >= 11 /* use new pass manager */ - return PreservedAnalyses::all(); + if (ret == false) + return PreservedAnalyses::all(); + else + return PreservedAnalyses(); #else - return true; + return ret; #endif } diff --git a/instrumentation/cmplog-routines-pass.cc b/instrumentation/cmplog-routines-pass.cc index 78317d5d..560bd73b 100644 --- a/instrumentation/cmplog-routines-pass.cc +++ b/instrumentation/cmplog-routines-pass.cc @@ -758,16 +758,16 @@ bool CmpLogRoutines::runOnModule(Module &M) { printf("Running cmplog-routines-pass by andreafioraldi@gmail.com\n"); else be_quiet = 1; - hookRtns(M); -#if LLVM_VERSION_MAJOR >= 11 /* use new pass manager */ - auto PA = PreservedAnalyses::all(); -#endif + bool ret = hookRtns(M); verifyModule(M); #if LLVM_VERSION_MAJOR >= 11 /* use new pass manager */ - return PA; + if (ret == false) + return PreservedAnalyses::all(); + else + return PreservedAnalyses(); #else - return true; + return ret; #endif } diff --git a/instrumentation/cmplog-switches-pass.cc b/instrumentation/cmplog-switches-pass.cc index 3e05c13d..2b87ea8c 100644 --- a/instrumentation/cmplog-switches-pass.cc +++ b/instrumentation/cmplog-switches-pass.cc @@ -442,16 +442,16 @@ bool CmplogSwitches::runOnModule(Module &M) { printf("Running cmplog-switches-pass by andreafioraldi@gmail.com\n"); else be_quiet = 1; - hookInstrs(M); -#if LLVM_VERSION_MAJOR >= 11 /* use new pass manager */ - auto PA = PreservedAnalyses::all(); -#endif + bool ret = hookInstrs(M); verifyModule(M); #if LLVM_VERSION_MAJOR >= 11 /* use new pass manager */ - return PA; + if (ret == false) + return PreservedAnalyses::all(); + else + return PreservedAnalyses(); #else - return true; + return ret; #endif } diff --git a/instrumentation/compare-transform-pass.so.cc b/instrumentation/compare-transform-pass.so.cc index b0d6355a..f8ba9de5 100644 --- a/instrumentation/compare-transform-pass.so.cc +++ b/instrumentation/compare-transform-pass.so.cc @@ -89,7 +89,7 @@ class CompareTransform : public ModulePass { #endif - return "cmplog transform"; + return "compcov transform"; } @@ -123,7 +123,11 @@ llvmGetPassPluginInfo() { #if LLVM_VERSION_MAJOR <= 13 using OptimizationLevel = typename PassBuilder::OptimizationLevel; #endif + #if LLVM_VERSION_MAJOR >= 16 + PB.registerOptimizerEarlyEPCallback( + #else PB.registerOptimizerLastEPCallback( + #endif [](ModulePassManager &MPM, OptimizationLevel OL) { MPM.addPass(CompareTransform()); @@ -746,6 +750,8 @@ bool CompareTransform::runOnModule(Module &M) { #endif + bool ret = false; + if ((isatty(2) && getenv("AFL_QUIET") == NULL) || getenv("AFL_DEBUG") != NULL) printf( "Running compare-transform-pass by laf.intel@gmail.com, extended by " @@ -753,11 +759,7 @@ bool CompareTransform::runOnModule(Module &M) { else be_quiet = 1; -#if LLVM_MAJOR >= 11 /* use new pass manager */ - auto PA = PreservedAnalyses::all(); -#endif - - transformCmps(M, true, true, true, true, true); + if (transformCmps(M, true, true, true, true, true) == true) ret = true; verifyModule(M); #if LLVM_MAJOR >= 11 /* use new pass manager */ @@ -767,9 +769,18 @@ bool CompareTransform::runOnModule(Module &M) { }*/ - return PA; + if (ret == true) { + + return PreservedAnalyses(); + + } else { + + return PreservedAnalyses::all(); + + } + #else - return true; + return ret; #endif } diff --git a/instrumentation/injection-pass.cc b/instrumentation/injection-pass.cc index 2280208b..47ddabd9 100644 --- a/instrumentation/injection-pass.cc +++ b/instrumentation/injection-pass.cc @@ -204,6 +204,8 @@ bool InjectionRoutines::hookRtns(Module &M) { Function *FuncPtr; #endif + bool ret = false; + /* iterate over all functions, bbs and instruction and add suitable calls */ for (auto &F : M) { @@ -281,6 +283,7 @@ bool InjectionRoutines::hookRtns(Module &M) { IRBuilder<> IRB(callInst->getParent()); IRB.SetInsertPoint(callInst); + ret = true; Value *parameter = callInst->getArgOperand(param); @@ -299,7 +302,7 @@ bool InjectionRoutines::hookRtns(Module &M) { } - return true; + return ret; } @@ -328,16 +331,16 @@ bool InjectionRoutines::runOnModule(Module &M) { if (getenv("AFL_LLVM_INJECTIONS_LDAP")) { doLDAP = true; } if (getenv("AFL_LLVM_INJECTIONS_XSS")) { doXSS = true; } - hookRtns(M); -#if LLVM_VERSION_MAJOR >= 11 /* use new pass manager */ - auto PA = PreservedAnalyses::all(); -#endif + bool ret = hookRtns(M); verifyModule(M); #if LLVM_VERSION_MAJOR >= 11 /* use new pass manager */ - return PA; + if (ret == false) + return PreservedAnalyses::all(); + else + return PreservedAnalyses(); #else - return true; + return ret; #endif } diff --git a/instrumentation/split-compares-pass.so.cc b/instrumentation/split-compares-pass.so.cc index 144025fb..421a7c39 100644 --- a/instrumentation/split-compares-pass.so.cc +++ b/instrumentation/split-compares-pass.so.cc @@ -189,7 +189,11 @@ llvmGetPassPluginInfo() { #if LLVM_VERSION_MAJOR <= 13 using OptimizationLevel = typename PassBuilder::OptimizationLevel; #endif + #if LLVM_VERSION_MAJOR >= 16 + PB.registerOptimizerEarlyEPCallback( + #else PB.registerOptimizerLastEPCallback( + #endif [](ModulePassManager &MPM, OptimizationLevel OL) { MPM.addPass(SplitComparesTransform()); @@ -935,7 +939,7 @@ size_t SplitComparesTransform::nextPowerOfTwo(size_t in) { /* splits fcmps into two nested fcmps with sign compare and the rest */ size_t SplitComparesTransform::splitFPCompares(Module &M) { - size_t count = 0; + size_t counts = 0; LLVMContext &C = M.getContext(); @@ -951,7 +955,7 @@ size_t SplitComparesTransform::splitFPCompares(Module &M) { } else { - return count; + return counts; } @@ -1004,7 +1008,7 @@ size_t SplitComparesTransform::splitFPCompares(Module &M) { } - if (!fcomps.size()) { return count; } + if (!fcomps.size()) { return counts; } IntegerType *Int1Ty = IntegerType::getInt1Ty(C); @@ -1690,11 +1694,11 @@ size_t SplitComparesTransform::splitFPCompares(Module &M) { #else ReplaceInstWithInst(FcmpInst->getParent()->getInstList(), ii, PN); #endif - ++count; + ++counts; } - return count; + return counts; } @@ -1743,10 +1747,6 @@ bool SplitComparesTransform::runOnModule(Module &M) { } -#if LLVM_MAJOR >= 11 - auto PA = PreservedAnalyses::all(); -#endif - if (enableFPSplit) { simplifyFPCompares(M); @@ -1778,15 +1778,7 @@ bool SplitComparesTransform::runOnModule(Module &M) { auto op0 = CI->getOperand(0); auto op1 = CI->getOperand(1); - if (!op0 || !op1) { - -#if LLVM_MAJOR >= 11 - return PA; -#else - return false; -#endif - - } + if (!op0 || !op1) { continue; } auto iTy1 = dyn_cast(op0->getType()); if (iTy1 && isa(op1->getType())) { @@ -1814,6 +1806,8 @@ bool SplitComparesTransform::runOnModule(Module &M) { } + bool ret = count == 0 ? false : true; + bool brokenDebug = false; if (verifyModule(M, &errs() #if LLVM_VERSION_MAJOR >= 4 || \ @@ -1852,9 +1846,12 @@ bool SplitComparesTransform::runOnModule(Module &M) { }*/ - return PA; + if (ret == false) + return PreservedAnalyses::all(); + else + return PreservedAnalyses(); #else - return true; + return ret; #endif } diff --git a/instrumentation/split-switches-pass.so.cc b/instrumentation/split-switches-pass.so.cc index e3dfea0d..aa552a42 100644 --- a/instrumentation/split-switches-pass.so.cc +++ b/instrumentation/split-switches-pass.so.cc @@ -137,7 +137,11 @@ llvmGetPassPluginInfo() { #if LLVM_VERSION_MAJOR <= 13 using OptimizationLevel = typename PassBuilder::OptimizationLevel; #endif + #if LLVM_VERSION_MAJOR >= 16 + PB.registerOptimizerEarlyEPCallback( + #else PB.registerOptimizerLastEPCallback( + #endif [](ModulePassManager &MPM, OptimizationLevel OL) { MPM.addPass(SplitSwitchesTransform()); @@ -516,11 +520,7 @@ bool SplitSwitchesTransform::runOnModule(Module &M) { else be_quiet = 1; -#if LLVM_VERSION_MAJOR >= 11 /* use new pass manager */ - auto PA = PreservedAnalyses::all(); -#endif - - splitSwitches(M); + bool ret = splitSwitches(M); verifyModule(M); #if LLVM_VERSION_MAJOR >= 11 /* use new pass manager */ @@ -530,9 +530,12 @@ bool SplitSwitchesTransform::runOnModule(Module &M) { }*/ - return PA; + if (ret == false) + return PreservedAnalyses::all(); + else + return PreservedAnalyses(); #else - return true; + return ret; #endif } diff --git a/src/afl-cc.c b/src/afl-cc.c index faa46103..45fd398b 100644 --- a/src/afl-cc.c +++ b/src/afl-cc.c @@ -1369,6 +1369,13 @@ void mode_final_checkout(aflcc_state_t *aflcc, int argc, char **argv) { } + if (getenv("AFL_LLVM_DICT2FILE") && + (getenv("AFL_LLVM_LAF_SPLIT_SWITCHES") || + getenv("AFL_LLVM_LAF_SPLIT_COMPARES") || + getenv("AFL_LLVM_LAF_SPLIT_FLOATS") || + getenv("AFL_LLVM_LAF_TRANSFORM_COMPARES"))) + FATAL("AFL_LLVM_DICT2FILE is incompatible with AFL_LLVM_LAF_*"); + aflcc->cmplog_mode = getenv("AFL_CMPLOG") || getenv("AFL_LLVM_CMPLOG") || getenv("AFL_GCC_CMPLOG"); -- cgit 1.4.1 From 40adc344136c954cdc58e62acb46708816f5870a Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Tue, 9 Apr 2024 09:24:19 +0200 Subject: fix -V, code format --- docs/Changelog.md | 2 ++ include/afl-fuzz.h | 13 +++---- src/afl-fuzz-run.c | 25 ++++++++++---- src/afl-fuzz-stats.c | 98 +++++++++++++++++++++++++++++++++++++++------------- src/afl-fuzz.c | 21 ++++++++--- 5 files changed, 117 insertions(+), 42 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 72e20a18..116134ff 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -25,6 +25,8 @@ - fix for `-t xxx+` feature - -e extension option now saves the queue items, crashes, etc. with the extension too + - fixes for trimmming, correct -V time and reading stats on resume by eqv + thanks a lot! - afl-cc: - added collision free caller instrumentation to LTO mode. activate with `AFL_LLVM_LTO_CALLER=1`. You can set a max depth to go through single diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 91eb6887..c813ae7e 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -5,9 +5,9 @@ Originally written by Michal Zalewski Now maintained by Marc Heuse , - Heiko Eißfeldt , - Andrea Fioraldi , - Dominik Maier + Dominik Maier , + Andrea Fioraldi , and + Heiko Eissfeldt Copyright 2016, 2017 Google Inc. All rights reserved. Copyright 2019-2024 AFLplusplus Project. All rights reserved. @@ -1218,9 +1218,9 @@ void show_stats_normal(afl_state_t *); void show_stats_pizza(afl_state_t *); void show_init_stats(afl_state_t *); -void update_calibration_time(afl_state_t *afl, u64* time); -void update_trim_time(afl_state_t *afl, u64* time); -void update_sync_time(afl_state_t *afl, u64* time); +void update_calibration_time(afl_state_t *afl, u64 *time); +void update_trim_time(afl_state_t *afl, u64 *time); +void update_sync_time(afl_state_t *afl, u64 *time); /* StatsD */ @@ -1409,3 +1409,4 @@ void queue_testcase_store_mem(afl_state_t *afl, struct queue_entry *q, u8 *mem); #endif #endif + diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index 82cdeb81..1c6ce56a 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -505,7 +505,8 @@ u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem, fault = fuzz_run_target(afl, &afl->fsrv, use_tmout); - // update the time spend in calibration after each execution, as those may be slow + // update the time spend in calibration after each execution, as those may + // be slow update_calibration_time(afl, &calibration_start_us); /* afl->stop_soon is set by the handler for Ctrl+C. When it's pressed, @@ -680,7 +681,8 @@ void sync_fuzzers(afl_state_t *afl) { while ((sd_ent = readdir(sd))) { - // since sync can take substantial amounts of time, update time spend every iteration + // since sync can take substantial amounts of time, update time spend every + // iteration update_sync_time(afl, &sync_start_us); u8 qd_synced_path[PATH_MAX], qd_path[PATH_MAX]; @@ -870,7 +872,7 @@ void sync_fuzzers(afl_state_t *afl) { if (afl->foreign_sync_cnt) read_foreign_testcases(afl, 0); - //add time in sync one last time + // add time in sync one last time update_sync_time(afl, &sync_start_us); afl->last_sync_time = get_cur_time(); @@ -910,7 +912,12 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) { } - if (custom_trimmed) { fault = trimmed_case; goto abort_trimming; } + if (custom_trimmed) { + + fault = trimmed_case; + goto abort_trimming; + + } } @@ -924,7 +931,12 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) { detected, it will still work to some extent, so we don't check for this. */ - if (unlikely(q->len < 5)) { fault = 0; goto abort_trimming; } + if (unlikely(q->len < 5)) { + + fault = 0; + goto abort_trimming; + + } afl->stage_name = afl->stage_name_buf; afl->bytes_trim_in += q->len; @@ -986,7 +998,6 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) { /* Let's save a clean trace, which will be needed by update_bitmap_score once we're done with the trimming stuff. */ - if (!needs_write) { needs_write = 1; @@ -1001,7 +1012,6 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) { } /* Since this can be slow, update the screen every now and then. */ - if (!(trim_exec++ % afl->stats_update_freq)) { show_stats(afl); } ++afl->stage_cur; @@ -1119,3 +1129,4 @@ common_fuzz_stuff(afl_state_t *afl, u8 *out_buf, u32 len) { return 0; } + diff --git a/src/afl-fuzz-stats.c b/src/afl-fuzz-stats.c index b39c8299..7e1a3b92 100644 --- a/src/afl-fuzz-stats.c +++ b/src/afl-fuzz-stats.c @@ -133,8 +133,10 @@ void write_setup_file(afl_state_t *afl, u32 argc, char **argv) { } -static bool starts_with(char* key, char* line) { +static bool starts_with(char *key, char *line) { + return strncmp(key, line, strlen(key)) == 0; + } /* load some of the existing stats file when resuming.*/ @@ -179,25 +181,43 @@ void load_stats_file(afl_state_t *afl) { strcpy(keystring, lstartptr); lptr++; char *nptr; - if (starts_with("run_time", keystring)){ + if (starts_with("run_time", keystring)) { + afl->prev_run_time = 1000 * strtoull(lptr, &nptr, 10); + } - if (starts_with("cycles_done", keystring)){ + + if (starts_with("cycles_done", keystring)) { + afl->queue_cycle = strtoull(lptr, &nptr, 10) ? strtoull(lptr, &nptr, 10) + 1 : 0; + } - if (starts_with("calibration_time", keystring)){ + + if (starts_with("calibration_time", keystring)) { + afl->calibration_time_us = strtoull(lptr, &nptr, 10) * 1000000; + } - if (starts_with("sync_time", keystring)){ + + if (starts_with("sync_time", keystring)) { + afl->sync_time_us = strtoull(lptr, &nptr, 10) * 1000000; + } - if (starts_with("trim_time", keystring)){ + + if (starts_with("trim_time", keystring)) { + afl->trim_time_us = strtoull(lptr, &nptr, 10) * 1000000; + } - if (starts_with("execs_done", keystring)){ + + if (starts_with("execs_done", keystring)) { + afl->fsrv.total_execs = strtoull(lptr, &nptr, 10); + } + if (starts_with("corpus_count", keystring)) { u32 corpus_count = strtoul(lptr, &nptr, 10); @@ -206,27 +226,46 @@ void load_stats_file(afl_state_t *afl) { WARNF( "queue/ has been modified -- things might not work, you're " "on your own!"); + sleep(3); } } - if (starts_with("corpus_found", keystring)){ + + if (starts_with("corpus_found", keystring)) { + afl->queued_discovered = strtoul(lptr, &nptr, 10); + } - if (starts_with("corpus_imported", keystring)){ + + if (starts_with("corpus_imported", keystring)) { + afl->queued_imported = strtoul(lptr, &nptr, 10); + } + if (starts_with("max_depth", keystring)) { + afl->max_depth = strtoul(lptr, &nptr, 10); + } + if (starts_with("saved_crashes", keystring)) { + afl->saved_crashes = strtoull(lptr, &nptr, 10); + } + if (starts_with("saved_hangs", keystring)) { + afl->saved_hangs = strtoull(lptr, &nptr, 10); + } + } + } + if (afl->saved_crashes) { write_crash_readme(afl); } return; @@ -334,7 +373,7 @@ void write_stats_file(afl_state_t *afl, u32 t_bytes, double bitmap_cvg, "\n" "target_mode : %s%s%s%s%s%s%s%s%s%s\n" "command_line : %s\n", - (afl->start_time - afl->prev_run_time) / 1000, cur_time / 1000, + (afl->start_time /*- afl->prev_run_time*/) / 1000, cur_time / 1000, runtime / 1000, (u32)getpid(), afl->queue_cycle ? (afl->queue_cycle - 1) : 0, afl->cycles_wo_finds, afl->longest_find_time > cur_time - afl->last_find_time @@ -342,11 +381,13 @@ void write_stats_file(afl_state_t *afl, u32 t_bytes, double bitmap_cvg, : ((afl->start_time == 0 || afl->last_find_time == 0) ? 0 : (cur_time - afl->last_find_time) / 1000), - (runtime - (afl->calibration_time_us + afl->sync_time_us + afl->trim_time_us) / 1000) / 1000, - afl->calibration_time_us / 1000000, - afl->sync_time_us / 1000000, - afl->trim_time_us / 1000000, - afl->fsrv.total_execs, afl->fsrv.total_execs / ((double)(runtime) / 1000), + (runtime - + (afl->calibration_time_us + afl->sync_time_us + afl->trim_time_us) / + 1000) / + 1000, + afl->calibration_time_us / 1000000, afl->sync_time_us / 1000000, + afl->trim_time_us / 1000000, afl->fsrv.total_execs, + afl->fsrv.total_execs / ((double)(runtime) / 1000), afl->last_avg_execs_saved, afl->queued_items, afl->queued_favored, afl->queued_discovered, afl->queued_imported, afl->queued_variable, afl->max_depth, afl->current_entry, afl->pending_favored, @@ -415,6 +456,7 @@ void write_stats_file(afl_state_t *afl, u32 t_bytes, double bitmap_cvg, fclose(f); rename(fn_tmp, fn_final); + } #ifdef INTROSPECTION @@ -2438,20 +2480,28 @@ void show_init_stats(afl_state_t *afl) { #undef IB } -void update_calibration_time(afl_state_t *afl, u64* time){ - u64 cur = get_cur_time_us(); - afl->calibration_time_us += cur-*time; + +void update_calibration_time(afl_state_t *afl, u64 *time) { + + u64 cur = get_cur_time_us(); + afl->calibration_time_us += cur - *time; *time = cur; + } -void update_trim_time(afl_state_t *afl, u64* time){ - u64 cur = get_cur_time_us(); - afl->trim_time_us += cur-*time; +void update_trim_time(afl_state_t *afl, u64 *time) { + + u64 cur = get_cur_time_us(); + afl->trim_time_us += cur - *time; *time = cur; + } -void update_sync_time(afl_state_t *afl, u64* time){ - u64 cur = get_cur_time_us(); - afl->sync_time_us += cur-*time; +void update_sync_time(afl_state_t *afl, u64 *time) { + + u64 cur = get_cur_time_us(); + afl->sync_time_us += cur - *time; *time = cur; + } + diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 102809cd..00d24ab1 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -5,8 +5,9 @@ Originally written by Michal Zalewski Now maintained by Marc Heuse , - Heiko Eißfeldt and - Andrea Fioraldi + Dominik Meier , + Andrea Fioraldi , and + Heiko Eissfeldt Copyright 2016, 2017 Google Inc. All rights reserved. Copyright 2019-2024 AFLplusplus Project. All rights reserved. @@ -199,7 +200,8 @@ static void usage(u8 *argv0, int more_help) { "Test settings:\n" " -s seed - use a fixed seed for the RNG\n" - " -V seconds - fuzz for a specified time then terminate\n" + " -V seconds - fuzz for a specified time then terminate (fuzz time " + "only!)\n" " -E execs - fuzz for an approx. no. of total executions then " "terminate\n" " Note: not precise and can have several more " @@ -2543,8 +2545,6 @@ int main(int argc, char **argv_orig, char **envp) { } // (void)nice(-20); // does not improve the speed - // real start time, we reset, so this works correctly with -V - afl->start_time = get_cur_time(); #ifdef INTROSPECTION u32 prev_saved_crashes = 0, prev_saved_tmouts = 0; @@ -2565,6 +2565,9 @@ int main(int argc, char **argv_orig, char **envp) { OKF("Writing mutation introspection to '%s'", ifn); #endif + // real start time, we reset, so this works correctly with -V + afl->start_time = get_cur_time(); + while (likely(!afl->stop_soon)) { cull_queue(afl); @@ -2585,6 +2588,13 @@ int main(int argc, char **argv_orig, char **envp) { sync_fuzzers(afl); + if (!afl->queue_cycle && afl->afl_env.afl_import_first) { + + // real start time, we reset, so this works correctly with -V + afl->start_time = get_cur_time(); + + } + } ++afl->queue_cycle; @@ -3099,3 +3109,4 @@ stop_fuzzing: } #endif /* !AFL_LIB */ + -- cgit 1.4.1 From e01307a993387bfe842df1deb23ec7facffd4859 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Sat, 13 Apr 2024 11:39:19 +0200 Subject: v4.20c --- README.md | 4 ++-- docs/Changelog.md | 2 +- include/config.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'docs/Changelog.md') diff --git a/README.md b/README.md index f15089c2..2583407e 100644 --- a/README.md +++ b/README.md @@ -2,9 +2,9 @@ AFL++ logo -Release version: [4.10c](https://github.com/AFLplusplus/AFLplusplus/releases) +Release version: [4.20c](https://github.com/AFLplusplus/AFLplusplus/releases) -GitHub version: 4.20a +GitHub version: 4.20c Repository: [https://github.com/AFLplusplus/AFLplusplus](https://github.com/AFLplusplus/AFLplusplus) diff --git a/docs/Changelog.md b/docs/Changelog.md index 116134ff..2428d63f 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -3,7 +3,7 @@ This is the list of all noteworthy changes made in every public release of the tool. See README.md for the general instruction manual. -### Version ++4.20a (dev) +### Version ++4.20c (release) ! A new forkserver communication model is now introduced. afl-fuzz is backward compatible to old compiled targets if they are not built for CMPLOG/Redqueen, but new compiled targets will not work with diff --git a/include/config.h b/include/config.h index 31d66b14..3ea059ff 100644 --- a/include/config.h +++ b/include/config.h @@ -26,7 +26,7 @@ /* Version string: */ // c = release, a = volatile github dev, e = experimental branch -#define VERSION "++4.20a" +#define VERSION "++4.20c" /****************************************************** * * -- cgit 1.4.1 From 6b049536f1614892df99f7c1ebd7710192b607a8 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Sat, 13 Apr 2024 11:54:02 +0200 Subject: v4.21 init --- .github/workflows/ci.yml | 1 - README.md | 2 +- docs/Changelog.md | 4 ++++ include/config.h | 2 +- 4 files changed, 6 insertions(+), 3 deletions(-) (limited to 'docs/Changelog.md') diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index dd0d13e9..ed382fbb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -5,7 +5,6 @@ on: branches: - stable - dev - - 420 pull_request: branches: - dev # No need for stable-pull-request, as that equals dev-push diff --git a/README.md b/README.md index 2583407e..34d73890 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ Release version: [4.20c](https://github.com/AFLplusplus/AFLplusplus/releases) -GitHub version: 4.20c +GitHub version: 4.21a Repository: [https://github.com/AFLplusplus/AFLplusplus](https://github.com/AFLplusplus/AFLplusplus) diff --git a/docs/Changelog.md b/docs/Changelog.md index 2428d63f..a7eb239b 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -3,6 +3,10 @@ This is the list of all noteworthy changes made in every public release of the tool. See README.md for the general instruction manual. +### Version ++4.21a (dev) + * your PR? :-) + + ### Version ++4.20c (release) ! A new forkserver communication model is now introduced. afl-fuzz is backward compatible to old compiled targets if they are not built diff --git a/include/config.h b/include/config.h index 3ea059ff..a2ff68ea 100644 --- a/include/config.h +++ b/include/config.h @@ -26,7 +26,7 @@ /* Version string: */ // c = release, a = volatile github dev, e = experimental branch -#define VERSION "++4.20c" +#define VERSION "++4.21a" /****************************************************** * * -- cgit 1.4.1 From 458b939bc4f0ed4016c2741529435a72283ffc74 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Fri, 19 Apr 2024 17:34:50 +0200 Subject: LTO fix --- docs/Changelog.md | 3 ++- instrumentation/SanitizerCoverageLTO.so.cc | 2 +- src/afl-cc.c | 5 ----- 3 files changed, 3 insertions(+), 7 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index a7eb239b..4e34baea 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -4,7 +4,8 @@ release of the tool. See README.md for the general instruction manual. ### Version ++4.21a (dev) - * your PR? :-) + * afl-cc: + - fixes for LTO and outdated afl-gcc mode ### Version ++4.20c (release) diff --git a/instrumentation/SanitizerCoverageLTO.so.cc b/instrumentation/SanitizerCoverageLTO.so.cc index 4518c1c7..14482deb 100644 --- a/instrumentation/SanitizerCoverageLTO.so.cc +++ b/instrumentation/SanitizerCoverageLTO.so.cc @@ -341,7 +341,7 @@ llvmGetPassPluginInfo() { using OptimizationLevel = typename PassBuilder::OptimizationLevel; #endif #if LLVM_VERSION_MAJOR >= 15 - PB.registerFullLinkTimeOptimizationEarlyEPCallback( + PB.registerFullLinkTimeOptimizationLastEPCallback( #else PB.registerOptimizerLastEPCallback( #endif diff --git a/src/afl-cc.c b/src/afl-cc.c index 202e8145..15a5bd8e 100644 --- a/src/afl-cc.c +++ b/src/afl-cc.c @@ -1269,13 +1269,8 @@ void mode_final_checkout(aflcc_state_t *aflcc, int argc, char **argv) { aflcc->instrument_mode == INSTRUMENT_PCGUARD) { aflcc->lto_mode = 1; - // force CFG - // if (!aflcc->instrument_mode) { - aflcc->instrument_mode = INSTRUMENT_PCGUARD; - // } - } else if (aflcc->instrument_mode == INSTRUMENT_CLASSIC) { aflcc->lto_mode = 1; -- cgit 1.4.1 From 951a0e52254d873dd0f1a3a80d9acda44563edd5 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Thu, 25 Apr 2024 10:04:58 +0200 Subject: fix AFL_PERSISTENT_RECORD --- docs/Changelog.md | 2 ++ src/afl-forkserver.c | 14 ++++++++++---- 2 files changed, 12 insertions(+), 4 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 4e34baea..48c0ab06 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -4,6 +4,8 @@ release of the tool. See README.md for the general instruction manual. ### Version ++4.21a (dev) + * afl-fuzz + - fix AFL_PERSISTENT_RECORD * afl-cc: - fixes for LTO and outdated afl-gcc mode diff --git a/src/afl-forkserver.c b/src/afl-forkserver.c index 149a973e..e5f64c81 100644 --- a/src/afl-forkserver.c +++ b/src/afl-forkserver.c @@ -27,6 +27,9 @@ */ #include "config.h" +#ifdef AFL_PERSISTENT_RECORD + #include "afl-fuzz.h" +#endif #include "types.h" #include "debug.h" #include "common.h" @@ -2078,10 +2081,13 @@ store_persistent_record: { u32 len = fsrv->persistent_record_len[entry]; if (likely(len && data)) { - snprintf(fn, sizeof(fn), persistent_out_fmt, fsrv->persistent_record_dir, - fsrv->persistent_record_cnt, writecnt++, - afl->file_extension ? "." : "", - afl->file_extension ? (const char *)afl->file_extension : ""); + snprintf( + fn, sizeof(fn), persistent_out_fmt, fsrv->persistent_record_dir, + fsrv->persistent_record_cnt, writecnt++, + ((afl_state_t *)(fsrv->afl_ptr))->file_extension ? "." : "", + ((afl_state_t *)(fsrv->afl_ptr))->file_extension + ? (const char *)((afl_state_t *)(fsrv->afl_ptr))->file_extension + : ""); int fd = open(fn, O_CREAT | O_TRUNC | O_WRONLY, 0644); if (fd >= 0) { -- cgit 1.4.1 From 70c60cfba798d4c7349280746e9f2488778be25e Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Fri, 26 Apr 2024 16:14:45 +0200 Subject: work with spaces in filenames --- afl-cmin | 6 +++--- afl-cmin.bash | 1 + docs/Changelog.md | 3 +++ src/afl-fuzz-init.c | 25 +++++++++++++++++++++++-- 4 files changed, 30 insertions(+), 5 deletions(-) (limited to 'docs/Changelog.md') diff --git a/afl-cmin b/afl-cmin index a1d5401f..a88460a8 100755 --- a/afl-cmin +++ b/afl-cmin @@ -13,7 +13,7 @@ awk -f - -- ${@+"$@"} <<'EOF' # awk script to minimize a test corpus of input files # # based on afl-cmin bash script written by Michal Zalewski -# rewritten by Heiko Eißfeldt (hexcoder-) +# rewritten by Heiko Eissfeldt (hexcoder-) # tested with: # gnu awk (x86 Linux) # bsd awk (x86 *BSD) @@ -603,8 +603,8 @@ BEGIN { # create path for the trace file from afl-showmap tracefile_path = trace_dir"/"fn # ensure the file size is not zero - cmd = "du -b "tracefile_path - "ls -l "tracefile_path + cmd = "du -b \""tracefile_path"\"" + # "ls -l \""tracefile_path"\"" cmd | getline output close(cmd) split(output, result, "\t") diff --git a/afl-cmin.bash b/afl-cmin.bash index 6c271220..99ae80d9 100755 --- a/afl-cmin.bash +++ b/afl-cmin.bash @@ -152,6 +152,7 @@ Minimization settings: -e - solve for edge coverage only, ignore hit counts For additional tips, please consult README.md. +This script cannot read filenames that end with a space ' '. Environment variables used: AFL_KEEP_TRACES: leave the temporary \.traces directory diff --git a/docs/Changelog.md b/docs/Changelog.md index 48c0ab06..f288c33c 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -6,8 +6,11 @@ ### Version ++4.21a (dev) * afl-fuzz - fix AFL_PERSISTENT_RECORD + - prevent filenames in the queue that have spaces * afl-cc: - fixes for LTO and outdated afl-gcc mode + * afl-cmin + - work with input files that have a space ### Version ++4.20c (release) diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index b844123d..2d540eb1 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -459,6 +459,24 @@ void bind_to_free_cpu(afl_state_t *afl) { #endif /* HAVE_AFFINITY */ +/* transforms spaces in a string to underscores (inplace) */ + +static void no_spaces(u8 *string) { + + if (string) { + + u8 *ptr = string; + while (*ptr != 0) { + + if (*ptr == ' ') { *ptr = '_'; } + ++ptr; + + } + + } + +} + /* Shuffle an array of pointers. Might be slightly biased. */ static void shuffle_ptrs(afl_state_t *afl, void **ptrs, u32 cnt) { @@ -1381,11 +1399,11 @@ void perform_dry_run(afl_state_t *afl) { static void link_or_copy(u8 *old_path, u8 *new_path) { s32 i = link(old_path, new_path); + if (!i) { return; } + s32 sfd, dfd; u8 *tmp; - if (!i) { return; } - sfd = open(old_path, O_RDONLY); if (sfd < 0) { PFATAL("Unable to open '%s'", old_path); } @@ -1495,6 +1513,9 @@ void pivot_inputs(afl_state_t *afl) { afl->fsrv.total_execs, use_name, afl->file_extension ? "." : "", afl->file_extension ? (const char *)afl->file_extension : ""); + u8 *pos = strrchr(nfn, '/'); + no_spaces(pos + 30); + #else nfn = alloc_printf( -- cgit 1.4.1 From 2c3f761ede22c132277a855f2219b85a34c6048a Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Fri, 26 Apr 2024 16:16:21 +0200 Subject: changes --- docs/Changelog.md | 2 ++ 1 file changed, 2 insertions(+) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index f288c33c..c1b2f62a 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -7,8 +7,10 @@ * afl-fuzz - fix AFL_PERSISTENT_RECORD - prevent filenames in the queue that have spaces + - minor fix for FAST schedules * afl-cc: - fixes for LTO and outdated afl-gcc mode + - ensure shared memory variables are visible in weird build setups * afl-cmin - work with input files that have a space -- cgit 1.4.1 From 26eaf53a832be0b12dadbbd290b4a7e676818347 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Thu, 2 May 2024 08:35:24 +0200 Subject: AFL_DISABLE_REDUNDANT --- docs/Changelog.md | 2 ++ docs/env_variables.md | 3 +++ include/afl-fuzz.h | 2 +- include/envs.h | 3 ++- src/afl-fuzz-init.c | 7 +++++-- src/afl-fuzz-queue.c | 1 + src/afl-fuzz-redqueen.c | 9 +++++---- src/afl-fuzz-state.c | 7 +++++++ src/afl-fuzz.c | 1 + 9 files changed, 27 insertions(+), 8 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index c1b2f62a..5cb6973a 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -5,6 +5,7 @@ ### Version ++4.21a (dev) * afl-fuzz + - added AFL_DISABLE_REDUNDANT for huge queues - fix AFL_PERSISTENT_RECORD - prevent filenames in the queue that have spaces - minor fix for FAST schedules @@ -13,6 +14,7 @@ - ensure shared memory variables are visible in weird build setups * afl-cmin - work with input files that have a space + * enhanced the ASAN configuration ### Version ++4.20c (release) diff --git a/docs/env_variables.md b/docs/env_variables.md index 1e4fc7ba..01904aea 100644 --- a/docs/env_variables.md +++ b/docs/env_variables.md @@ -381,6 +381,9 @@ checks or alter some of the more exotic semantics of the tool: - Setting `AFL_DISABLE_TRIM` tells afl-fuzz not to trim test cases. This is usually a bad idea! + - Setting `AFL_DISABLE_REDUNDANT` disables any queue items that are redundant. + This can be useful with huge queues. + - Setting `AFL_KEEP_TIMEOUTS` will keep longer running inputs if they reach new coverage diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index c813ae7e..1a958006 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -452,7 +452,7 @@ typedef struct afl_env_vars { afl_keep_timeouts, afl_no_crash_readme, afl_ignore_timeouts, afl_no_startup_calibration, afl_no_warn_instability, afl_post_process_keep_original, afl_crashing_seeds_as_new_crash, - afl_final_sync, afl_ignore_seed_problems; + afl_final_sync, afl_ignore_seed_problems, afl_disable_redundant; u8 *afl_tmpdir, *afl_custom_mutator_library, *afl_python_module, *afl_path, *afl_hang_tmout, *afl_forksrv_init_tmout, *afl_preload, diff --git a/include/envs.h b/include/envs.h index 56a4916c..c895f726 100644 --- a/include/envs.h +++ b/include/envs.h @@ -26,7 +26,8 @@ static char *afl_environment_variables[] = { "AFL_CUSTOM_MUTATOR_ONLY", "AFL_CUSTOM_INFO_PROGRAM", "AFL_CUSTOM_INFO_PROGRAM_ARGV", "AFL_CUSTOM_INFO_PROGRAM_INPUT", "AFL_CUSTOM_INFO_OUT", "AFL_CXX", "AFL_CYCLE_SCHEDULES", "AFL_DEBUG", - "AFL_DEBUG_CHILD", "AFL_DEBUG_GDB", "AFL_DEBUG_UNICORN", "AFL_DISABLE_TRIM", + "AFL_DEBUG_CHILD", "AFL_DEBUG_GDB", "AFL_DEBUG_UNICORN", + "AFL_DISABLE_REDUNDANT", "AFL_DISABLE_TRIM", "AFL_DISABLE_LLVM_INSTRUMENTATION", "AFL_DONT_OPTIMIZE", "AFL_DRIVER_STDERR_DUPLICATE_FILENAME", "AFL_DUMB_FORKSRV", "AFL_EARLY_FORKSERVER", "AFL_ENTRYPOINT", "AFL_EXIT_WHEN_DONE", diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index 2d540eb1..b3fe9318 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -933,10 +933,13 @@ void perform_dry_run(afl_state_t *afl) { res = calibrate_case(afl, q, use_mem, 0, 1); /* For AFLFast schedules we update the queue entry */ - if (unlikely(afl->schedule >= FAST && afl->schedule <= RARE) && likely(q->exec_cksum)) { + if (unlikely(afl->schedule >= FAST && afl->schedule <= RARE) && + likely(q->exec_cksum)) { + q->n_fuzz_entry = q->exec_cksum % N_FUZZ_SIZE; + } - + if (afl->stop_soon) { return; } if (res == afl->crash_mode || res == FSRV_RUN_NOBITS) { diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index df4e7d79..5987ad0c 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -370,6 +370,7 @@ void mark_as_redundant(afl_state_t *afl, struct queue_entry *q, u8 state) { s32 fd; + if (unlikely(afl->afl_env.afl_disable_redundant)) { q->disabled = 1; } fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, DEFAULT_PERMISSION); if (fd < 0) { PFATAL("Unable to create '%s'", fn); } close(fd); diff --git a/src/afl-fuzz-redqueen.c b/src/afl-fuzz-redqueen.c index cfa57c1d..9316da71 100644 --- a/src/afl-fuzz-redqueen.c +++ b/src/afl-fuzz-redqueen.c @@ -2764,15 +2764,15 @@ static u8 rtn_fuzz(afl_state_t *afl, u32 key, u8 *orig_buf, u8 *buf, u8 *cbuf, #ifdef _DEBUG u32 j; struct cmp_header *hh = &afl->orig_cmp_map->headers[key]; - fprintf(stderr, "RTN N hits=%u shape=%u attr=%u v0=", h->hits, - hshape, h->attribute); + fprintf(stderr, "RTN N hits=%u shape=%u attr=%u v0=", h->hits, hshape, + h->attribute); for (j = 0; j < 8; j++) fprintf(stderr, "%02x", o->v0[j]); fprintf(stderr, " v1="); for (j = 0; j < 8; j++) fprintf(stderr, "%02x", o->v1[j]); - fprintf(stderr, "\nRTN O hits=%u shape=%u attr=%u o0=", hh->hits, - hshape, hh->attribute); + fprintf(stderr, "\nRTN O hits=%u shape=%u attr=%u o0=", hh->hits, hshape, + hh->attribute); for (j = 0; j < 8; j++) fprintf(stderr, "%02x", orig_o->v0[j]); fprintf(stderr, " o1="); @@ -3273,3 +3273,4 @@ exit_its: return r; } + diff --git a/src/afl-fuzz-state.c b/src/afl-fuzz-state.c index c21ae6be..543fdc1c 100644 --- a/src/afl-fuzz-state.c +++ b/src/afl-fuzz-state.c @@ -293,6 +293,13 @@ void read_afl_environment(afl_state_t *afl, char **envp) { afl->afl_env.afl_cmplog_only_new = get_afl_env(afl_environment_variables[i]) ? 1 : 0; + } else if (!strncmp(env, "AFL_DISABLE_REDUNDANT", + + afl_environment_variable_len)) { + + afl->afl_env.afl_disable_redundant = + get_afl_env(afl_environment_variables[i]) ? 1 : 0; + } else if (!strncmp(env, "AFL_NO_STARTUP_CALIBRATION", afl_environment_variable_len)) { diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 00d24ab1..329ce942 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -264,6 +264,7 @@ static void usage(u8 *argv0, int more_help) { "AFL_CYCLE_SCHEDULES: after completing a cycle, switch to a different -p schedule\n" "AFL_DEBUG: extra debugging output for Python mode trimming\n" "AFL_DEBUG_CHILD: do not suppress stdout/stderr from target\n" + "AFL_DISABLE_REDUNDANT: disable any queue item that is redundant\n" "AFL_DISABLE_TRIM: disable the trimming of test cases\n" "AFL_DUMB_FORKSRV: use fork server without feedback from target\n" "AFL_EXIT_WHEN_DONE: exit when all inputs are run and no new finds are found\n" -- cgit 1.4.1 From ac6ccd53dff5a43050ad8a0922c8fa47e69333a8 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Tue, 7 May 2024 16:46:15 +0200 Subject: stat update during syncing --- docs/Changelog.md | 1 + src/afl-fuzz-init.c | 9 ++++++++- src/afl-fuzz-run.c | 3 +++ 3 files changed, 12 insertions(+), 1 deletion(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 5cb6973a..87311b1b 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -9,6 +9,7 @@ - fix AFL_PERSISTENT_RECORD - prevent filenames in the queue that have spaces - minor fix for FAST schedules + - more frequent stats update when syncing (todo: check performance impact) * afl-cc: - fixes for LTO and outdated afl-gcc mode - ensure shared memory variables are visible in weird build setups diff --git a/src/afl-fuzz-init.c b/src/afl-fuzz-init.c index b3fe9318..01d0730d 100644 --- a/src/afl-fuzz-init.c +++ b/src/afl-fuzz-init.c @@ -577,6 +577,8 @@ void read_foreign_testcases(afl_state_t *afl, int first) { afl->stage_cur = 0; afl->stage_max = 0; + show_stats(afl); + for (i = 0; i < (u32)nl_cnt; ++i) { struct stat st; @@ -655,7 +657,12 @@ void read_foreign_testcases(afl_state_t *afl, int first) { munmap(mem, st.st_size); close(fd); - if (st.st_mtime > mtime_max) mtime_max = st.st_mtime; + if (st.st_mtime > mtime_max) { + + mtime_max = st.st_mtime; + show_stats(afl); + + } } diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index ab96c778..ed7cb4ce 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -771,6 +771,8 @@ void sync_fuzzers(afl_state_t *afl) { afl->stage_cur = 0; afl->stage_max = 0; + show_stats(afl); + /* For every file queued by this fuzzer, parse ID and see if we have looked at it before; exec a test case if not. */ @@ -830,6 +832,7 @@ void sync_fuzzers(afl_state_t *afl) { afl->syncing_party = sd_ent->d_name; afl->queued_imported += save_if_interesting(afl, mem, new_len, fault); + show_stats(afl); afl->syncing_party = 0; munmap(mem, st.st_size); -- cgit 1.4.1 From db60555c1b01e72301d249eed4cbb478827c4d50 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Sat, 11 May 2024 08:58:57 +0200 Subject: update changelog --- docs/Changelog.md | 1 + 1 file changed, 1 insertion(+) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 87311b1b..aa142274 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -11,6 +11,7 @@ - minor fix for FAST schedules - more frequent stats update when syncing (todo: check performance impact) * afl-cc: + - re-enable i386 support that was accidently disabled - fixes for LTO and outdated afl-gcc mode - ensure shared memory variables are visible in weird build setups * afl-cmin -- cgit 1.4.1 From 24b9d74e70107a4517396d7fa940140e206398bf Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Mon, 13 May 2024 08:44:43 +0200 Subject: compcov int fix --- docs/Changelog.md | 1 + instrumentation/split-compares-pass.so.cc | 6 ++++++ 2 files changed, 7 insertions(+) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index aa142274..9a95e343 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -13,6 +13,7 @@ * afl-cc: - re-enable i386 support that was accidently disabled - fixes for LTO and outdated afl-gcc mode + - fix COMPCOV split compare for old LLVMs - ensure shared memory variables are visible in weird build setups * afl-cmin - work with input files that have a space diff --git a/instrumentation/split-compares-pass.so.cc b/instrumentation/split-compares-pass.so.cc index 728ebc22..9b7bf256 100644 --- a/instrumentation/split-compares-pass.so.cc +++ b/instrumentation/split-compares-pass.so.cc @@ -1778,7 +1778,13 @@ bool SplitComparesTransform::runOnModule(Module &M) { auto op0 = CI->getOperand(0); auto op1 = CI->getOperand(1); + // has to valid operands if (!op0 || !op1) { continue; } + // has exactly one constant and one variable + int constants = 0; + if (dyn_cast(op0)) { ++constants; } + if (dyn_cast(op1)) { ++constants; } + if (constants != 1) { continue; } auto iTy1 = dyn_cast(op0->getType()); if (iTy1 && isa(op1->getType())) { -- cgit 1.4.1 From b282ce999d2ab9428210deb0e838f45a6a534084 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Mon, 13 May 2024 13:42:58 +0200 Subject: post_process after trim --- docs/Changelog.md | 1 + docs/custom_mutators.md | 5 ++++ src/afl-fuzz-run.c | 62 +++++++++++++++++++++++++++++++++++++++++++++++++ test/test-llvm.sh | 5 ++-- 4 files changed, 71 insertions(+), 2 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 9a95e343..818010a7 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -7,6 +7,7 @@ * afl-fuzz - added AFL_DISABLE_REDUNDANT for huge queues - fix AFL_PERSISTENT_RECORD + - run custom_post_process after standard trimming - prevent filenames in the queue that have spaces - minor fix for FAST schedules - more frequent stats update when syncing (todo: check performance impact) diff --git a/docs/custom_mutators.md b/docs/custom_mutators.md index 73e3c802..b7a7032f 100644 --- a/docs/custom_mutators.md +++ b/docs/custom_mutators.md @@ -266,6 +266,11 @@ trimmed input. Here's a quick API description: Omitting any of three trimming methods will cause the trimming to be disabled and trigger a fallback to the built-in default trimming routine. +**IMPORTANT** If you have a custom post process mutator that needs to be run +after trimming, you must call it yourself at the end of your successful +trimming! + + ### Environment Variables Optionally, the following environment variables are supported: diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index ed7cb4ce..2a55da00 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -1028,6 +1028,68 @@ u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) { if (needs_write) { + // run afl_custom_post_process + + if (unlikely(afl->custom_mutators_count) && + likely(!afl->afl_env.afl_post_process_keep_original)) { + + ssize_t new_size = q->len; + u8 *new_mem = in_buf; + u8 *new_buf = NULL; + + LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, { + + if (el->afl_custom_post_process) { + + new_size = el->afl_custom_post_process(el->data, new_mem, new_size, + &new_buf); + + if (unlikely(!new_buf || new_size <= 0)) { + + new_size = 0; + new_buf = new_mem; + + } else { + + new_mem = new_buf; + + } + + } + + }); + + if (unlikely(!new_size)) { + + new_size = q->len; + new_mem = in_buf; + + } + + if (unlikely(new_size < afl->min_length)) { + + new_size = afl->min_length; + + } else if (unlikely(new_size > afl->max_length)) { + + new_size = afl->max_length; + + } + + q->len = new_size; + + if (new_mem != in_buf && new_mem != NULL) { + + new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), new_size); + if (unlikely(!new_buf)) { PFATAL("alloc"); } + memcpy(new_buf, new_mem, new_size); + + in_buf = new_buf; + + } + + } + s32 fd; if (unlikely(afl->no_unlink)) { diff --git a/test/test-llvm.sh b/test/test-llvm.sh index aef7a5e2..13e1bad1 100755 --- a/test/test-llvm.sh +++ b/test/test-llvm.sh @@ -197,7 +197,8 @@ test -e ../afl-clang-fast -a -e ../split-switches-pass.so && { for I in char short int long "long long"; do for BITS in 8 16 32 64; do bin="$testcase-split-$I-$BITS.compcov" - AFL_LLVM_INSTRUMENT=AFL AFL_DEBUG=1 AFL_LLVM_LAF_SPLIT_COMPARES_BITW=$BITS AFL_LLVM_LAF_SPLIT_COMPARES=1 ../afl-clang-fast -fsigned-char -DINT_TYPE="$I" -o "$bin" "$testcase" > test.out 2>&1; + #AFL_LLVM_INSTRUMENT=AFL + AFL_DEBUG=1 AFL_LLVM_LAF_SPLIT_COMPARES_BITW=$BITS AFL_LLVM_LAF_SPLIT_COMPARES=1 ../afl-clang-fast -fsigned-char -DINT_TYPE="$I" -o "$bin" "$testcase" > test.out 2>&1; if ! test -e "$bin"; then cat test.out $ECHO "$RED[!] llvm_mode laf-intel/compcov integer splitting failed! ($testcase with type $I split to $BITS)!"; @@ -269,7 +270,7 @@ test -e ../afl-clang-fast -a -e ../split-switches-pass.so && { { mkdir -p in echo 00000000000000000000000000000000 > in/in - AFL_BENCH_UNTIL_CRASH=1 ../afl-fuzz -l 3 -m none -V30 -i in -o out -c ./test-cmplog -- ./test-c >>errors 2>&1 + AFL_BENCH_UNTIL_CRASH=1 ../afl-fuzz -Z -l 3 -m none -V30 -i in -o out -c ./test-cmplog -- ./test-c >>errors 2>&1 } >>errors 2>&1 test -n "$( ls out/default/crashes/id:000000* out/default/hangs/id:000000* 2>/dev/null )" && { $ECHO "$GREEN[+] afl-fuzz is working correctly with llvm_mode cmplog" -- cgit 1.4.1 From b6c4f3775a229a5c760052cad1580358f5c44f56 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Tue, 14 May 2024 12:34:51 +0200 Subject: disable xml/curl/g_ string transform compare --- docs/Changelog.md | 2 ++ instrumentation/compare-transform-pass.so.cc | 36 +++++++++++++++++++++------- 2 files changed, 29 insertions(+), 9 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 818010a7..79594e38 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -15,6 +15,8 @@ - re-enable i386 support that was accidently disabled - fixes for LTO and outdated afl-gcc mode - fix COMPCOV split compare for old LLVMs + - disable xml/curl/g_ string transform functions because we do not check + for null pointers ... TODO - ensure shared memory variables are visible in weird build setups * afl-cmin - work with input files that have a space diff --git a/instrumentation/compare-transform-pass.so.cc b/instrumentation/compare-transform-pass.so.cc index f8ba9de5..ebab9cbb 100644 --- a/instrumentation/compare-transform-pass.so.cc +++ b/instrumentation/compare-transform-pass.so.cc @@ -230,38 +230,38 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, if (callInst->getCallingConv() != llvm::CallingConv::C) continue; StringRef FuncName = Callee->getName(); isStrcmp &= - (!FuncName.compare("strcmp") || !FuncName.compare("xmlStrcmp") || + (!FuncName.compare("strcmp") /*|| !FuncName.compare("xmlStrcmp") || !FuncName.compare("xmlStrEqual") || !FuncName.compare("curl_strequal") || !FuncName.compare("strcsequal") || - !FuncName.compare("g_strcmp0")); + !FuncName.compare("g_strcmp0")*/); isMemcmp &= (!FuncName.compare("memcmp") || !FuncName.compare("bcmp") || !FuncName.compare("CRYPTO_memcmp") || !FuncName.compare("OPENSSL_memcmp") || !FuncName.compare("memcmp_const_time") || !FuncName.compare("memcmpct")); - isStrncmp &= (!FuncName.compare("strncmp") || + isStrncmp &= (!FuncName.compare("strncmp")/* || !FuncName.compare("curl_strnequal") || - !FuncName.compare("xmlStrncmp")); + !FuncName.compare("xmlStrncmp")*/); isStrcasecmp &= (!FuncName.compare("strcasecmp") || !FuncName.compare("stricmp") || !FuncName.compare("ap_cstr_casecmp") || !FuncName.compare("OPENSSL_strcasecmp") || - !FuncName.compare("xmlStrcasecmp") || + /*!FuncName.compare("xmlStrcasecmp") || !FuncName.compare("g_strcasecmp") || !FuncName.compare("g_ascii_strcasecmp") || !FuncName.compare("Curl_strcasecompare") || - !FuncName.compare("Curl_safe_strcasecompare") || + !FuncName.compare("Curl_safe_strcasecompare") ||*/ !FuncName.compare("cmsstrcasecmp")); isStrncasecmp &= (!FuncName.compare("strncasecmp") || !FuncName.compare("strnicmp") || !FuncName.compare("ap_cstr_casecmpn") || - !FuncName.compare("OPENSSL_strncasecmp") || + !FuncName.compare("OPENSSL_strncasecmp") /*|| !FuncName.compare("xmlStrncasecmp") || !FuncName.compare("g_ascii_strncasecmp") || !FuncName.compare("Curl_strncasecompare") || - !FuncName.compare("g_strncasecmp")); + !FuncName.compare("g_strncasecmp")*/); isIntMemcpy &= !FuncName.compare("llvm.memcpy.p0i8.p0i8.i64"); if (!isStrcmp && !isMemcmp && !isStrncmp && !isStrcasecmp && @@ -465,8 +465,19 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, bool isCaseInsensitive = false; bool needs_null = false; bool success_is_one = false; + bool nullCheck = false; Function *Callee = callInst->getCalledFunction(); + fprintf(stderr, "%s - %s - %s\n", + callInst->getParent() + ->getParent() + ->getParent() + ->getName() + .str() + .c_str(), + callInst->getParent()->getParent()->getName().str().c_str(), + Callee ? Callee->getName().str().c_str() : "NULL"); + if (Callee) { if (!Callee->getName().compare("memcmp") || @@ -520,6 +531,11 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, } if (!isSizedcmp) needs_null = true; + if (Callee->getName().startswith("g_") || + Callee->getName().startswith("curl_") || + Callee->getName().startswith("Curl_") || + Callee->getName().startswith("xml")) + nullCheck = true; Value *sizedValue = isSizedcmp ? callInst->getArgOperand(2) : NULL; bool isConstSized = sizedValue && isa(sizedValue); @@ -604,8 +620,10 @@ bool CompareTransform::transformCmps(Module &M, const bool processStrcmp, /* split before the call instruction */ BasicBlock *bb = callInst->getParent(); BasicBlock *end_bb = bb->splitBasicBlock(BasicBlock::iterator(callInst)); - BasicBlock *next_lenchk_bb = NULL; + + if (nullCheck) { fprintf(stderr, "TODO: null check\n"); } + if (isSizedcmp && !isConstSized) { next_lenchk_bb = -- cgit 1.4.1 From 6ae95271becde1cd35a7792fd31ff84a548561ea Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Thu, 16 May 2024 09:17:59 +0200 Subject: nits --- docs/Changelog.md | 4 +++- src/afl-showmap.c | 11 +++++------ 2 files changed, 8 insertions(+), 7 deletions(-) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index 79594e38..a4501818 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -13,13 +13,15 @@ - more frequent stats update when syncing (todo: check performance impact) * afl-cc: - re-enable i386 support that was accidently disabled - - fixes for LTO and outdated afl-gcc mode + - fixes for LTO and outdated afl-gcc mode for i386 - fix COMPCOV split compare for old LLVMs - disable xml/curl/g_ string transform functions because we do not check for null pointers ... TODO - ensure shared memory variables are visible in weird build setups * afl-cmin - work with input files that have a space + * afl-showmap + - minor fix to collect coverage -C (thanks to @bet4it) * enhanced the ASAN configuration diff --git a/src/afl-showmap.c b/src/afl-showmap.c index 4ce01444..7e875040 100644 --- a/src/afl-showmap.c +++ b/src/afl-showmap.c @@ -178,7 +178,8 @@ fsrv_run_result_t fuzz_run_target(afl_state_t *afl, afl_forkserver_t *fsrv, void classify_counts(afl_forkserver_t *fsrv) { u8 *mem = fsrv->trace_bits; - const u8 *map = (binary_mode || collect_coverage) ? count_class_binary : count_class_human; + const u8 *map = (binary_mode || collect_coverage) ? count_class_binary + : count_class_human; u32 i = map_size; @@ -240,11 +241,7 @@ static void analyze_results(afl_forkserver_t *fsrv) { u32 i; for (i = 0; i < map_size; i++) { - if (fsrv->trace_bits[i]) { - - coverage_map[i] |= fsrv->trace_bits[i]; - - } + if (fsrv->trace_bits[i]) { coverage_map[i] |= fsrv->trace_bits[i]; } } @@ -1336,6 +1333,8 @@ int main(int argc, char **argv_orig, char **envp) { } + if (collect_coverage) { binary_mode = false; } // ensure this + if (optind == argc || !out_file) { usage(argv[0]); } if (in_dir && in_filelist) { FATAL("you can only specify either -i or -I"); } -- cgit 1.4.1 From 56d5aa3101945e81519a3fac8783d0d8fad82779 Mon Sep 17 00:00:00 2001 From: vanhauser-thc Date: Fri, 17 May 2024 23:55:43 +0200 Subject: log --- docs/Changelog.md | 5 +++++ src/afl-fuzz.c | 2 ++ 2 files changed, 7 insertions(+) (limited to 'docs/Changelog.md') diff --git a/docs/Changelog.md b/docs/Changelog.md index a4501818..6736e42b 100644 --- a/docs/Changelog.md +++ b/docs/Changelog.md @@ -11,6 +11,11 @@ - prevent filenames in the queue that have spaces - minor fix for FAST schedules - more frequent stats update when syncing (todo: check performance impact) + - now timing of calibration, trimming and syncing is measured seperately, + thanks to @eqv! + - -V timing is now accurately the fuzz time (without syncing), before + long calibration times and syncing could result in now fuzzing being + made when the time was already run out until then, thanks to @eqv! * afl-cc: - re-enable i386 support that was accidently disabled - fixes for LTO and outdated afl-gcc mode for i386 diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index cf3940f1..70ab983c 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -2593,6 +2593,7 @@ int main(int argc, char **argv_orig, char **envp) { } sync_fuzzers(afl); + } ++afl->queue_cycle; @@ -3107,3 +3108,4 @@ stop_fuzzing: } #endif /* !AFL_LIB */ + -- cgit 1.4.1