aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorvanhauser-thc <vh@thc.org>2024-02-08 12:46:00 +0100
committervanhauser-thc <vh@thc.org>2024-02-08 12:46:08 +0100
commit038fef962c3d85fe7e37fcd8717270654f927881 (patch)
tree03a7e572705e1d2ec813822b1ad09bfff1a39a2c
parent42c663e7c76bd3abee2c6a84dd689bcfea3f59dc (diff)
downloadafl++-038fef962c3d85fe7e37fcd8717270654f927881.tar.gz
performance
-rw-r--r--.gitignore1
-rw-r--r--GNUmakefile59
-rw-r--r--docs/INSTALL.md5
-rw-r--r--include/config.h2
-rw-r--r--include/t1ha.h719
-rw-r--r--include/t1ha0_ia32aes_b.h167
-rw-r--r--include/t1ha_bits.h1254
-rw-r--r--include/t1ha_selfcheck.h76
-rw-r--r--include/xxhash.h10631
-rw-r--r--src/afl-fuzz.c4
-rw-r--r--src/afl-performance.c9
-rw-r--r--utils/bench/Makefile8
-rw-r--r--utils/bench/README.md2
-rw-r--r--utils/bench/hash.c42
14 files changed, 8024 insertions, 4955 deletions
diff --git a/.gitignore b/.gitignore
index 67feb240..8e191e29 100644
--- a/.gitignore
+++ b/.gitignore
@@ -99,6 +99,7 @@ unicorn_mode/samples/*/\.test-*
utils/afl_network_proxy/afl-network-client
utils/afl_network_proxy/afl-network-server
utils/afl_proxy/afl-proxy
+utils/bench/hash
utils/optimin/build
utils/optimin/optimin
utils/persistent_mode/persistent_demo
diff --git a/GNUmakefile b/GNUmakefile
index 283c57c2..64dfc37f 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -84,21 +84,27 @@ else
endif
endif
-#ifeq "$(shell echo 'int main() {return 0; }' | $(CC) -fno-move-loop-invariants -fdisable-tree-cunrolli -x c - -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1"
-# SPECIAL_PERFORMANCE += -fno-move-loop-invariants -fdisable-tree-cunrolli
-#endif
-
-#ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -march=native -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1"
-# ifndef SOURCE_DATE_EPOCH
-# HAVE_MARCHNATIVE = 1
-# CFLAGS_OPT += -march=native
-# endif
-#endif
+ifdef PERFORMANCE
+ SPECIAL_PERFORMANCE := -D_AFL_SPECIAL_PERFORMANCE
+ ifeq "$(SYS)" "Linux"
+ ifeq "$(shell grep avx2 /proc/cpuinfo)" ""
+ else
+ SPECIAL_PERFORMANCE += -mavx2 -D_HAVE_AVX2
+ endif
+ endif
+ ifeq "$(shell echo 'int main() {return 0; }' | $(CC) $(CFLAGS) -Werror -x c - -march=native -o .test 2>/dev/null && echo 1 || echo 0 ; rm -f .test )" "1"
+ HAVE_MARCHNATIVE = 1
+ SPECIAL_PERFORMANCE += -march=native
+ endif
+ $(info SPECIAL_PERFORMANCE=$(SPECIAL_PERFORMANCE))
+else
+ SPECIAL_PERFORMANCE :=
+endif
ifneq "$(SYS)" "Darwin"
- #ifeq "$(HAVE_MARCHNATIVE)" "1"
- # SPECIAL_PERFORMANCE += -march=native
- #endif
+ #ifeq "$(HAVE_MARCHNATIVE)" "1"
+ # SPECIAL_PERFORMANCE += -march=native
+ #endif
#ifndef DEBUG
# CFLAGS_OPT += -D_FORTIFY_SOURCE=1
#endif
@@ -389,6 +395,7 @@ help:
@echo
@echo Known build environment options:
@echo "=========================================="
+ @echo "PERFORMANCE - compile with performance options that make the binary not transferable to other systems. Recommended!"
@echo STATIC - compile AFL++ static
@echo "CODE_COVERAGE - compile the target for code coverage (see docs/instrumentation/README.llvm.md)"
@echo ASAN_BUILD - compiles AFL++ with memory sanitizer for debug purposes
@@ -453,31 +460,31 @@ afl-as: src/afl-as.c include/afl-as.h $(COMM_HDR) | test_x86
@ln -sf afl-as as
src/afl-performance.o : $(COMM_HDR) src/afl-performance.c include/hash.h
- $(CC) $(CFLAGS) $(CFLAGS_OPT) -Iinclude -c src/afl-performance.c -o src/afl-performance.o
+ $(CC) $(CFLAGS) $(CFLAGS_OPT) $(SPECIAL_PERFORMANCE) -Iinclude -c src/afl-performance.c -o src/afl-performance.o
src/afl-common.o : $(COMM_HDR) src/afl-common.c include/common.h
- $(CC) $(CFLAGS) $(CFLAGS_FLTO) -c src/afl-common.c -o src/afl-common.o
+ $(CC) $(CFLAGS) $(CFLAGS_FLTO) $(SPECIAL_PERFORMANCE) -c src/afl-common.c -o src/afl-common.o
src/afl-forkserver.o : $(COMM_HDR) src/afl-forkserver.c include/forkserver.h
- $(CC) $(CFLAGS) $(CFLAGS_FLTO) -c src/afl-forkserver.c -o src/afl-forkserver.o
+ $(CC) $(CFLAGS) $(CFLAGS_FLTO) $(SPECIAL_PERFORMANCE) -c src/afl-forkserver.c -o src/afl-forkserver.o
src/afl-sharedmem.o : $(COMM_HDR) src/afl-sharedmem.c include/sharedmem.h
- $(CC) $(CFLAGS) $(CFLAGS_FLTO) -c src/afl-sharedmem.c -o src/afl-sharedmem.o
+ $(CC) $(CFLAGS) $(CFLAGS_FLTO) $(SPECIAL_PERFORMANCE) -c src/afl-sharedmem.c -o src/afl-sharedmem.o
afl-fuzz: $(COMM_HDR) include/afl-fuzz.h $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o | test_x86
- $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o -o $@ $(PYFLAGS) $(LDFLAGS) -lm
+ $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) $(SPECIAL_PERFORMANCE) $(AFL_FUZZ_FILES) src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o -o $@ $(PYFLAGS) $(LDFLAGS) -lm
afl-showmap: src/afl-showmap.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o $(COMM_HDR) | test_x86
- $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) src/$@.c src/afl-fuzz-mutators.c src/afl-fuzz-python.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o -o $@ $(PYFLAGS) $(LDFLAGS)
+ $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) $(SPECIAL_PERFORMANCE) src/$@.c src/afl-fuzz-mutators.c src/afl-fuzz-python.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o -o $@ $(PYFLAGS) $(LDFLAGS)
afl-tmin: src/afl-tmin.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o $(COMM_HDR) | test_x86
- $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) src/$@.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o -o $@ $(LDFLAGS)
+ $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) $(SPECIAL_PERFORMANCE) src/$@.c src/afl-common.o src/afl-sharedmem.o src/afl-forkserver.o src/afl-performance.o -o $@ $(LDFLAGS)
afl-analyze: src/afl-analyze.c src/afl-common.o src/afl-sharedmem.o src/afl-performance.o src/afl-forkserver.o $(COMM_HDR) | test_x86
- $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) src/$@.c src/afl-common.o src/afl-sharedmem.o src/afl-performance.o src/afl-forkserver.o -o $@ $(LDFLAGS)
+ $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) $(SPECIAL_PERFORMANCE) src/$@.c src/afl-common.o src/afl-sharedmem.o src/afl-performance.o src/afl-forkserver.o -o $@ $(LDFLAGS)
afl-gotcpu: src/afl-gotcpu.c src/afl-common.o $(COMM_HDR) | test_x86
- $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) src/$@.c src/afl-common.o -o $@ $(LDFLAGS)
+ $(CC) $(CFLAGS) $(COMPILE_STATIC) $(CFLAGS_FLTO) $(SPECIAL_PERFORMANCE) src/$@.c src/afl-common.o -o $@ $(LDFLAGS)
.PHONY: document
document: afl-fuzz-document
@@ -494,17 +501,17 @@ unit_maybe_alloc: test/unittests/unit_maybe_alloc.o
./test/unittests/unit_maybe_alloc
test/unittests/unit_hash.o : $(COMM_HDR) include/alloc-inl.h test/unittests/unit_hash.c $(AFL_FUZZ_FILES) src/afl-performance.o
- @$(CC) $(CFLAGS) $(ASAN_CFLAGS) -c test/unittests/unit_hash.c -o test/unittests/unit_hash.o
+ @$(CC) $(CFLAGS) $(ASAN_CFLAGS) $(SPECIAL_PERFORMANCE) -c test/unittests/unit_hash.c -o test/unittests/unit_hash.o
unit_hash: test/unittests/unit_hash.o src/afl-performance.o
- @$(CC) $(CFLAGS) -Wl,--wrap=exit -Wl,--wrap=printf $^ -o test/unittests/unit_hash $(LDFLAGS) $(ASAN_LDFLAGS) -lcmocka
+ @$(CC) $(CFLAGS) $(SPECIAL_PERFORMANCE) -Wl,--wrap=exit -Wl,--wrap=printf $^ -o test/unittests/unit_hash $(LDFLAGS) $(ASAN_LDFLAGS) -lcmocka
./test/unittests/unit_hash
test/unittests/unit_rand.o : $(COMM_HDR) include/alloc-inl.h test/unittests/unit_rand.c $(AFL_FUZZ_FILES) src/afl-performance.o
- @$(CC) $(CFLAGS) $(ASAN_CFLAGS) -c test/unittests/unit_rand.c -o test/unittests/unit_rand.o
+ @$(CC) $(CFLAGS) $(ASAN_CFLAGS) $(SPECIAL_PERFORMANCE) -c test/unittests/unit_rand.c -o test/unittests/unit_rand.o
unit_rand: test/unittests/unit_rand.o src/afl-common.o src/afl-performance.o
- @$(CC) $(CFLAGS) $(ASAN_CFLAGS) -Wl,--wrap=exit -Wl,--wrap=printf $^ -o test/unittests/unit_rand $(LDFLAGS) $(ASAN_LDFLAGS) -lcmocka
+ @$(CC) $(CFLAGS) $(ASAN_CFLAGS) $(SPECIAL_PERFORMANCE) -Wl,--wrap=exit -Wl,--wrap=printf $^ -o test/unittests/unit_rand $(LDFLAGS) $(ASAN_LDFLAGS) -lcmocka
./test/unittests/unit_rand
test/unittests/unit_list.o : $(COMM_HDR) include/list.h test/unittests/unit_list.c $(AFL_FUZZ_FILES)
diff --git a/docs/INSTALL.md b/docs/INSTALL.md
index 84bbe3ea..9f53afed 100644
--- a/docs/INSTALL.md
+++ b/docs/INSTALL.md
@@ -69,14 +69,15 @@ These build targets exist:
[Unless you are on Mac OS X](https://developer.apple.com/library/archive/qa/qa1118/_index.html),
you can also build statically linked versions of the AFL++ binaries by passing
-the `STATIC=1` argument to make:
+the `PERFORMANCE=1` argument to make:
```shell
-make STATIC=1
+make PERFORMANCE=1
```
These build options exist:
+* PERFORMANCE - compile with performance options that make the binary not transferable to other systems. Recommended!
* STATIC - compile AFL++ static
* CODE_COVERAGE - compile the target for code coverage (see docs/instrumentation/README.llvm.md)
* ASAN_BUILD - compiles AFL++ with memory sanitizer for debug purposes
diff --git a/include/config.h b/include/config.h
index 70ce2ae3..31d66b14 100644
--- a/include/config.h
+++ b/include/config.h
@@ -26,7 +26,7 @@
/* Version string: */
// c = release, a = volatile github dev, e = experimental branch
-#define VERSION "++4.10c"
+#define VERSION "++4.20a"
/******************************************************
* *
diff --git a/include/t1ha.h b/include/t1ha.h
new file mode 100644
index 00000000..498f0dd6
--- /dev/null
+++ b/include/t1ha.h
@@ -0,0 +1,719 @@
+/*
+ * Copyright (c) 2016-2020 Positive Technologies, https://www.ptsecurity.com,
+ * Fast Positive Hash.
+ *
+ * Portions Copyright (c) 2010-2020 Leonid Yuriev <leo@yuriev.ru>,
+ * The 1Hippeus project (t1h).
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgement in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/*
+ * t1ha = { Fast Positive Hash, aka "Позитивный Хэш" }
+ * by [Positive Technologies](https://www.ptsecurity.ru)
+ *
+ * Briefly, it is a 64-bit Hash Function:
+ * 1. Created for 64-bit little-endian platforms, in predominantly for x86_64,
+ * but portable and without penalties it can run on any 64-bit CPU.
+ * 2. In most cases up to 15% faster than City64, xxHash, mum-hash, metro-hash
+ * and all others portable hash-functions (which do not use specific
+ * hardware tricks).
+ * 3. Not suitable for cryptography.
+ *
+ * The Future will (be) Positive. Всё будет хорошо.
+ *
+ * ACKNOWLEDGEMENT:
+ * The t1ha was originally developed by Leonid Yuriev (Леонид Юрьев)
+ * for The 1Hippeus project - zerocopy messaging in the spirit of Sparta!
+ */
+
+#pragma once
+
+/*****************************************************************************
+ *
+ * PLEASE PAY ATTENTION TO THE FOLLOWING NOTES
+ * about macros definitions which controls t1ha behaviour and/or performance.
+ *
+ *
+ * 1) T1HA_SYS_UNALIGNED_ACCESS = Defines the system/platform/CPU/architecture
+ * abilities for unaligned data access.
+ *
+ * By default, when the T1HA_SYS_UNALIGNED_ACCESS not defined,
+ * it will defined on the basis hardcoded knowledge about of capabilities
+ * of most common CPU architectures. But you could override this
+ * default behavior when build t1ha library itself:
+ *
+ * // To disable unaligned access at all.
+ * #define T1HA_SYS_UNALIGNED_ACCESS 0
+ *
+ * // To enable unaligned access, but indicate that it significantly slow.
+ * #define T1HA_SYS_UNALIGNED_ACCESS 1
+ *
+ * // To enable unaligned access, and indicate that it effecient.
+ * #define T1HA_SYS_UNALIGNED_ACCESS 2
+ *
+ *
+ * 2) T1HA_USE_FAST_ONESHOT_READ = Controls the data reads at the end of buffer.
+ *
+ * When defined to non-zero, t1ha will use 'one shot' method for reading
+ * up to 8 bytes at the end of data. In this case just the one 64-bit read
+ * will be performed even when the available less than 8 bytes.
+ *
+ * This is little bit faster that switching by length of data tail.
+ * Unfortunately this will triggering a false-positive alarms from Valgrind,
+ * AddressSanitizer and other similar tool.
+ *
+ * By default, t1ha defines it to 1, but you could override this
+ * default behavior when build t1ha library itself:
+ *
+ * // For little bit faster and small code.
+ * #define T1HA_USE_FAST_ONESHOT_READ 1
+ *
+ * // For calmness if doubt.
+ * #define T1HA_USE_FAST_ONESHOT_READ 0
+ *
+ *
+ * 3) T1HA0_RUNTIME_SELECT = Controls choice fastest function in runtime.
+ *
+ * t1ha library offers the t1ha0() function as the fastest for current CPU.
+ * But actual CPU's features/capabilities and may be significantly different,
+ * especially on x86 platform. Therefore, internally, t1ha0() may require
+ * dynamic dispatching for choice best implementation.
+ *
+ * By default, t1ha enables such runtime choice and (may be) corresponding
+ * indirect calls if it reasonable, but you could override this default
+ * behavior when build t1ha library itself:
+ *
+ * // To enable runtime choice of fastest implementation.
+ * #define T1HA0_RUNTIME_SELECT 1
+ *
+ * // To disable runtime choice of fastest implementation.
+ * #define T1HA0_RUNTIME_SELECT 0
+ *
+ * When T1HA0_RUNTIME_SELECT is nonzero the t1ha0_resolve() function could
+ * be used to get actual t1ha0() implementation address at runtime. This is
+ * useful for two cases:
+ * - calling by local pointer-to-function usually is little
+ * bit faster (less overhead) than via a PLT thru the DSO boundary.
+ * - GNU Indirect functions (see below) don't supported by environment
+ * and calling by t1ha0_funcptr is not available and/or expensive.
+ *
+ * 4) T1HA_USE_INDIRECT_FUNCTIONS = Controls usage of GNU Indirect functions.
+ *
+ * In continue of T1HA0_RUNTIME_SELECT the T1HA_USE_INDIRECT_FUNCTIONS
+ * controls usage of ELF indirect functions feature. In general, when
+ * available, this reduces overhead of indirect function's calls though
+ * a DSO-bundary (https://sourceware.org/glibc/wiki/GNU_IFUNC).
+ *
+ * By default, t1ha engage GNU Indirect functions when it available
+ * and useful, but you could override this default behavior when build
+ * t1ha library itself:
+ *
+ * // To enable use of GNU ELF Indirect functions.
+ * #define T1HA_USE_INDIRECT_FUNCTIONS 1
+ *
+ * // To disable use of GNU ELF Indirect functions. This may be useful
+ * // if the actual toolchain or the system's loader don't support ones.
+ * #define T1HA_USE_INDIRECT_FUNCTIONS 0
+ *
+ * 5) T1HA0_AESNI_AVAILABLE = Controls AES-NI detection and dispatching on x86.
+ *
+ * In continue of T1HA0_RUNTIME_SELECT the T1HA0_AESNI_AVAILABLE controls
+ * detection and usage of AES-NI CPU's feature. On the other hand, this
+ * requires compiling parts of t1ha library with certain properly options,
+ * and could be difficult or inconvenient in some cases.
+ *
+ * By default, t1ha engade AES-NI for t1ha0() on the x86 platform, but
+ * you could override this default behavior when build t1ha library itself:
+ *
+ * // To disable detection and usage of AES-NI instructions for t1ha0().
+ * // This may be useful when you unable to build t1ha library properly
+ * // or known that AES-NI will be unavailable at the deploy.
+ * #define T1HA0_AESNI_AVAILABLE 0
+ *
+ * // To force detection and usage of AES-NI instructions for t1ha0(),
+ * // but I don't known reasons to anybody would need this.
+ * #define T1HA0_AESNI_AVAILABLE 1
+ *
+ * 6) T1HA0_DISABLED, T1HA1_DISABLED, T1HA2_DISABLED = Controls availability of
+ * t1ha functions.
+ *
+ * In some cases could be useful to import/use only few of t1ha functions
+ * or just the one. So, this definitions allows disable corresponding parts
+ * of t1ha library.
+ *
+ * // To disable t1ha0(), t1ha0_32le(), t1ha0_32be() and all AES-NI.
+ * #define T1HA0_DISABLED
+ *
+ * // To disable t1ha1_le() and t1ha1_be().
+ * #define T1HA1_DISABLED
+ *
+ * // To disable t1ha2_atonce(), t1ha2_atonce128() and so on.
+ * #define T1HA2_DISABLED
+ *
+ *****************************************************************************/
+
+#define T1HA_VERSION_MAJOR 2
+#define T1HA_VERSION_MINOR 1
+#define T1HA_VERSION_RELEASE 1
+
+#ifndef __has_attribute
+#define __has_attribute(x) (0)
+#endif
+
+#ifndef __has_include
+#define __has_include(x) (0)
+#endif
+
+#ifndef __GNUC_PREREQ
+#if defined(__GNUC__) && defined(__GNUC_MINOR__)
+#define __GNUC_PREREQ(maj, min) \
+ ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min))
+#else
+#define __GNUC_PREREQ(maj, min) 0
+#endif
+#endif /* __GNUC_PREREQ */
+
+#ifndef __CLANG_PREREQ
+#ifdef __clang__
+#define __CLANG_PREREQ(maj, min) \
+ ((__clang_major__ << 16) + __clang_minor__ >= ((maj) << 16) + (min))
+#else
+#define __CLANG_PREREQ(maj, min) (0)
+#endif
+#endif /* __CLANG_PREREQ */
+
+#ifndef __LCC_PREREQ
+#ifdef __LCC__
+#define __LCC_PREREQ(maj, min) \
+ ((__LCC__ << 16) + __LCC_MINOR__ >= ((maj) << 16) + (min))
+#else
+#define __LCC_PREREQ(maj, min) (0)
+#endif
+#endif /* __LCC_PREREQ */
+
+/*****************************************************************************/
+
+#ifdef _MSC_VER
+/* Avoid '16' bytes padding added after data member 't1ha_context::total'
+ * and other warnings from std-headers if warning-level > 3. */
+#pragma warning(push, 3)
+#endif
+
+#if defined(__cplusplus) && __cplusplus >= 201103L
+#include <climits>
+#include <cstddef>
+#include <cstdint>
+#else
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+#endif
+
+/*****************************************************************************/
+
+#if defined(i386) || defined(__386) || defined(__i386) || defined(__i386__) || \
+ defined(i486) || defined(__i486) || defined(__i486__) || \
+ defined(i586) | defined(__i586) || defined(__i586__) || defined(i686) || \
+ defined(__i686) || defined(__i686__) || defined(_M_IX86) || \
+ defined(_X86_) || defined(__THW_INTEL__) || defined(__I86__) || \
+ defined(__INTEL__) || defined(__x86_64) || defined(__x86_64__) || \
+ defined(__amd64__) || defined(__amd64) || defined(_M_X64) || \
+ defined(_M_AMD64) || defined(__IA32__) || defined(__INTEL__)
+#ifndef __ia32__
+/* LY: define neutral __ia32__ for x86 and x86-64 archs */
+#define __ia32__ 1
+#endif /* __ia32__ */
+#if !defined(__amd64__) && (defined(__x86_64) || defined(__x86_64__) || \
+ defined(__amd64) || defined(_M_X64))
+/* LY: define trusty __amd64__ for all AMD64/x86-64 arch */
+#define __amd64__ 1
+#endif /* __amd64__ */
+#endif /* all x86 */
+
+#if !defined(__BYTE_ORDER__) || !defined(__ORDER_LITTLE_ENDIAN__) || \
+ !defined(__ORDER_BIG_ENDIAN__)
+
+/* *INDENT-OFF* */
+/* clang-format off */
+
+#if defined(__GLIBC__) || defined(__GNU_LIBRARY__) || defined(__ANDROID__) || \
+ defined(HAVE_ENDIAN_H) || __has_include(<endian.h>)
+#include <endian.h>
+#elif defined(__APPLE__) || defined(__MACH__) || defined(__OpenBSD__) || \
+ defined(HAVE_MACHINE_ENDIAN_H) || __has_include(<machine/endian.h>)
+#include <machine/endian.h>
+#elif defined(HAVE_SYS_ISA_DEFS_H) || __has_include(<sys/isa_defs.h>)
+#include <sys/isa_defs.h>
+#elif (defined(HAVE_SYS_TYPES_H) && defined(HAVE_SYS_ENDIAN_H)) || \
+ (__has_include(<sys/types.h>) && __has_include(<sys/endian.h>))
+#include <sys/endian.h>
+#include <sys/types.h>
+#elif defined(__bsdi__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
+ defined(__NETBSD__) || defined(__NetBSD__) || \
+ defined(HAVE_SYS_PARAM_H) || __has_include(<sys/param.h>)
+#include <sys/param.h>
+#endif /* OS */
+
+/* *INDENT-ON* */
+/* clang-format on */
+
+#if defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN)
+#define __ORDER_LITTLE_ENDIAN__ __LITTLE_ENDIAN
+#define __ORDER_BIG_ENDIAN__ __BIG_ENDIAN
+#define __BYTE_ORDER__ __BYTE_ORDER
+#elif defined(_BYTE_ORDER) && defined(_LITTLE_ENDIAN) && defined(_BIG_ENDIAN)
+#define __ORDER_LITTLE_ENDIAN__ _LITTLE_ENDIAN
+#define __ORDER_BIG_ENDIAN__ _BIG_ENDIAN
+#define __BYTE_ORDER__ _BYTE_ORDER
+#else
+#define __ORDER_LITTLE_ENDIAN__ 1234
+#define __ORDER_BIG_ENDIAN__ 4321
+
+#if defined(__LITTLE_ENDIAN__) || \
+ (defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN)) || \
+ defined(__ARMEL__) || defined(__THUMBEL__) || defined(__AARCH64EL__) || \
+ defined(__MIPSEL__) || defined(_MIPSEL) || defined(__MIPSEL) || \
+ defined(_M_ARM) || defined(_M_ARM64) || defined(__e2k__) || \
+ defined(__elbrus_4c__) || defined(__elbrus_8c__) || defined(__bfin__) || \
+ defined(__BFIN__) || defined(__ia64__) || defined(_IA64) || \
+ defined(__IA64__) || defined(__ia64) || defined(_M_IA64) || \
+ defined(__itanium__) || defined(__ia32__) || defined(__CYGWIN__) || \
+ defined(_WIN64) || defined(_WIN32) || defined(__TOS_WIN__) || \
+ defined(__WINDOWS__)
+#define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__
+
+#elif defined(__BIG_ENDIAN__) || \
+ (defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN)) || \
+ defined(__ARMEB__) || defined(__THUMBEB__) || defined(__AARCH64EB__) || \
+ defined(__MIPSEB__) || defined(_MIPSEB) || defined(__MIPSEB) || \
+ defined(__m68k__) || defined(M68000) || defined(__hppa__) || \
+ defined(__hppa) || defined(__HPPA__) || defined(__sparc__) || \
+ defined(__sparc) || defined(__370__) || defined(__THW_370__) || \
+ defined(__s390__) || defined(__s390x__) || defined(__SYSC_ZARCH__)
+#define __BYTE_ORDER__ __ORDER_BIG_ENDIAN__
+
+#else
+#error __BYTE_ORDER__ should be defined.
+#endif /* Arch */
+
+#endif
+#endif /* __BYTE_ORDER__ || __ORDER_LITTLE_ENDIAN__ || __ORDER_BIG_ENDIAN__ */
+
+/*****************************************************************************/
+
+#ifndef __dll_export
+#if defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__)
+#if defined(__GNUC__) || __has_attribute(dllexport)
+#define __dll_export __attribute__((dllexport))
+#else
+#define __dll_export __declspec(dllexport)
+#endif
+#elif defined(__GNUC__) || __has_attribute(__visibility__)
+#define __dll_export __attribute__((__visibility__("default")))
+#else
+#define __dll_export
+#endif
+#endif /* __dll_export */
+
+#ifndef __dll_import
+#if defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__)
+#if defined(__GNUC__) || __has_attribute(dllimport)
+#define __dll_import __attribute__((dllimport))
+#else
+#define __dll_import __declspec(dllimport)
+#endif
+#elif defined(__GNUC__) || __has_attribute(__visibility__)
+#define __dll_import __attribute__((__visibility__("default")))
+#else
+#define __dll_import
+#endif
+#endif /* __dll_import */
+
+#ifndef __force_inline
+#ifdef _MSC_VER
+#define __force_inline __forceinline
+#elif __GNUC_PREREQ(3, 2) || __has_attribute(__always_inline__)
+#define __force_inline __inline __attribute__((__always_inline__))
+#else
+#define __force_inline __inline
+#endif
+#endif /* __force_inline */
+
+#ifndef T1HA_API
+#if defined(t1ha_EXPORTS)
+#define T1HA_API __dll_export
+#elif defined(t1ha_IMPORTS)
+#define T1HA_API __dll_import
+#else
+#define T1HA_API
+#endif
+#endif /* T1HA_API */
+
+#if defined(_MSC_VER) && defined(__ia32__)
+#define T1HA_ALIGN_PREFIX __declspec(align(32)) /* required only for SIMD */
+#else
+#define T1HA_ALIGN_PREFIX
+#endif /* _MSC_VER */
+
+#if defined(__GNUC__) && defined(__ia32__)
+#define T1HA_ALIGN_SUFFIX \
+ __attribute__((__aligned__(32))) /* required only for SIMD */
+#else
+#define T1HA_ALIGN_SUFFIX
+#endif /* GCC x86 */
+
+#ifndef T1HA_USE_INDIRECT_FUNCTIONS
+/* GNU ELF indirect functions usage control. For more info please see
+ * https://en.wikipedia.org/wiki/Executable_and_Linkable_Format
+ * and https://sourceware.org/glibc/wiki/GNU_IFUNC */
+#if defined(__ELF__) && defined(__amd64__) && \
+ (__has_attribute(__ifunc__) || \
+ (!defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && \
+ !defined(__SANITIZE_ADDRESS__) && !defined(__SSP_ALL__)))
+/* Enable gnu_indirect_function by default if :
+ * - ELF AND x86_64
+ * - attribute(__ifunc__) is available OR
+ * GCC >= 4 WITHOUT -fsanitize=address NOR -fstack-protector-all */
+#define T1HA_USE_INDIRECT_FUNCTIONS 1
+#else
+#define T1HA_USE_INDIRECT_FUNCTIONS 0
+#endif
+#endif /* T1HA_USE_INDIRECT_FUNCTIONS */
+
+#if __GNUC_PREREQ(4, 0)
+#pragma GCC visibility push(hidden)
+#endif /* __GNUC_PREREQ(4,0) */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef union T1HA_ALIGN_PREFIX t1ha_state256 {
+ uint8_t bytes[32];
+ uint32_t u32[8];
+ uint64_t u64[4];
+ struct {
+ uint64_t a, b, c, d;
+ } n;
+} t1ha_state256_t T1HA_ALIGN_SUFFIX;
+
+typedef struct t1ha_context {
+ t1ha_state256_t state;
+ t1ha_state256_t buffer;
+ size_t partial;
+ uint64_t total;
+} t1ha_context_t;
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+/******************************************************************************
+ *
+ * Self-testing API.
+ *
+ * Unfortunately, some compilers (exactly only Microsoft Visual C/C++) has
+ * a bugs which leads t1ha-functions to produce wrong results. This API allows
+ * check the correctness of the actual code in runtime.
+ *
+ * All check-functions returns 0 on success, or -1 in case the corresponding
+ * hash-function failed verification. PLEASE, always perform such checking at
+ * initialization of your code, if you using MSVC or other troubleful compilers.
+ */
+
+T1HA_API int t1ha_selfcheck__all_enabled(void);
+
+#ifndef T1HA2_DISABLED
+T1HA_API int t1ha_selfcheck__t1ha2_atonce(void);
+T1HA_API int t1ha_selfcheck__t1ha2_atonce128(void);
+T1HA_API int t1ha_selfcheck__t1ha2_stream(void);
+T1HA_API int t1ha_selfcheck__t1ha2(void);
+#endif /* T1HA2_DISABLED */
+
+#ifndef T1HA1_DISABLED
+T1HA_API int t1ha_selfcheck__t1ha1_le(void);
+T1HA_API int t1ha_selfcheck__t1ha1_be(void);
+T1HA_API int t1ha_selfcheck__t1ha1(void);
+#endif /* T1HA1_DISABLED */
+
+#ifndef T1HA0_DISABLED
+T1HA_API int t1ha_selfcheck__t1ha0_32le(void);
+T1HA_API int t1ha_selfcheck__t1ha0_32be(void);
+T1HA_API int t1ha_selfcheck__t1ha0(void);
+
+/* Define T1HA0_AESNI_AVAILABLE to 0 for disable AES-NI support. */
+#ifndef T1HA0_AESNI_AVAILABLE
+#if defined(__e2k__) || \
+ (defined(__ia32__) && (!defined(_M_IX86) || _MSC_VER > 1800))
+#define T1HA0_AESNI_AVAILABLE 1
+#else
+#define T1HA0_AESNI_AVAILABLE 0
+#endif
+#endif /* ifndef T1HA0_AESNI_AVAILABLE */
+
+#if T1HA0_AESNI_AVAILABLE
+T1HA_API int t1ha_selfcheck__t1ha0_ia32aes_noavx(void);
+T1HA_API int t1ha_selfcheck__t1ha0_ia32aes_avx(void);
+#ifndef __e2k__
+T1HA_API int t1ha_selfcheck__t1ha0_ia32aes_avx2(void);
+#endif
+#endif /* if T1HA0_AESNI_AVAILABLE */
+#endif /* T1HA0_DISABLED */
+
+/******************************************************************************
+ *
+ * t1ha2 = 64 and 128-bit, SLIGHTLY MORE ATTENTION FOR QUALITY AND STRENGTH.
+ *
+ * - The recommended version of "Fast Positive Hash" with good quality
+ * for checksum, hash tables and fingerprinting.
+ * - Portable and extremely efficiency on modern 64-bit CPUs.
+ * Designed for 64-bit little-endian platforms,
+ * in other cases will runs slowly.
+ * - Great quality of hashing and still faster than other non-t1ha hashes.
+ * Provides streaming mode and 128-bit result.
+ *
+ * Note: Due performance reason 64- and 128-bit results are completely
+ * different each other, i.e. 64-bit result is NOT any part of 128-bit.
+ */
+#ifndef T1HA2_DISABLED
+
+/* The at-once variant with 64-bit result */
+T1HA_API uint64_t t1ha2_atonce(const void *data, size_t length, uint64_t seed);
+
+/* The at-once variant with 128-bit result.
+ * Argument `extra_result` is NOT optional and MUST be valid.
+ * The high 64-bit part of 128-bit hash will be always unconditionally
+ * stored to the address given by `extra_result` argument. */
+T1HA_API uint64_t t1ha2_atonce128(uint64_t *__restrict extra_result,
+ const void *__restrict data, size_t length,
+ uint64_t seed);
+
+/* The init/update/final trinity for streaming.
+ * Return 64 or 128-bit result depentently from `extra_result` argument. */
+T1HA_API void t1ha2_init(t1ha_context_t *ctx, uint64_t seed_x, uint64_t seed_y);
+T1HA_API void t1ha2_update(t1ha_context_t *__restrict ctx,
+ const void *__restrict data, size_t length);
+
+/* Argument `extra_result` is optional and MAY be NULL.
+ * - If `extra_result` is NOT NULL then the 128-bit hash will be calculated,
+ * and high 64-bit part of it will be stored to the address given
+ * by `extra_result` argument.
+ * - Otherwise the 64-bit hash will be calculated
+ * and returned from function directly.
+ *
+ * Note: Due performance reason 64- and 128-bit results are completely
+ * different each other, i.e. 64-bit result is NOT any part of 128-bit. */
+T1HA_API uint64_t t1ha2_final(t1ha_context_t *__restrict ctx,
+ uint64_t *__restrict extra_result /* optional */);
+
+#endif /* T1HA2_DISABLED */
+
+/******************************************************************************
+ *
+ * t1ha1 = 64-bit, BASELINE FAST PORTABLE HASH:
+ *
+ * - Runs faster on 64-bit platforms in other cases may runs slowly.
+ * - Portable and stable, returns same 64-bit result
+ * on all architectures and CPUs.
+ * - Unfortunately it fails the "strict avalanche criteria",
+ * see test results at https://github.com/demerphq/smhasher.
+ *
+ * This flaw is insignificant for the t1ha1() purposes and imperceptible
+ * from a practical point of view.
+ * However, nowadays this issue has resolved in the next t1ha2(),
+ * that was initially planned to providing a bit more quality.
+ */
+#ifndef T1HA1_DISABLED
+
+/* The little-endian variant. */
+T1HA_API uint64_t t1ha1_le(const void *data, size_t length, uint64_t seed);
+
+/* The big-endian variant. */
+T1HA_API uint64_t t1ha1_be(const void *data, size_t length, uint64_t seed);
+
+#endif /* T1HA1_DISABLED */
+
+/******************************************************************************
+ *
+ * t1ha0 = 64-bit, JUST ONLY FASTER:
+ *
+ * - Provides fast-as-possible hashing for current CPU, including
+ * 32-bit systems and engaging the available hardware acceleration.
+ * - It is a facade that selects most quick-and-dirty hash
+ * for the current processor. For instance, on IA32 (x86) actual function
+ * will be selected in runtime, depending on current CPU capabilities
+ *
+ * BE CAREFUL!!! THIS IS MEANS:
+ *
+ * 1. The quality of hash is a subject for tradeoffs with performance.
+ * So, the quality and strength of t1ha0() may be lower than t1ha1(),
+ * especially on 32-bit targets, but then much faster.
+ * However, guaranteed that it passes all SMHasher tests.
+ *
+ * 2. No warranty that the hash result will be same for particular
+ * key on another machine or another version of libt1ha.
+ *
+ * Briefly, such hash-results and their derivatives, should be
+ * used only in runtime, but should not be persist or transferred
+ * over a network.
+ *
+ *
+ * When T1HA0_RUNTIME_SELECT is nonzero the t1ha0_resolve() function could
+ * be used to get actual t1ha0() implementation address at runtime. This is
+ * useful for two cases:
+ * - calling by local pointer-to-function usually is little
+ * bit faster (less overhead) than via a PLT thru the DSO boundary.
+ * - GNU Indirect functions (see below) don't supported by environment
+ * and calling by t1ha0_funcptr is not available and/or expensive.
+ */
+
+#ifndef T1HA0_DISABLED
+
+/* The little-endian variant for 32-bit CPU. */
+uint64_t t1ha0_32le(const void *data, size_t length, uint64_t seed);
+/* The big-endian variant for 32-bit CPU. */
+uint64_t t1ha0_32be(const void *data, size_t length, uint64_t seed);
+
+/* Define T1HA0_AESNI_AVAILABLE to 0 for disable AES-NI support. */
+#ifndef T1HA0_AESNI_AVAILABLE
+#if defined(__e2k__) || \
+ (defined(__ia32__) && (!defined(_M_IX86) || _MSC_VER > 1800))
+#define T1HA0_AESNI_AVAILABLE 1
+#else
+#define T1HA0_AESNI_AVAILABLE 0
+#endif
+#endif /* T1HA0_AESNI_AVAILABLE */
+
+/* Define T1HA0_RUNTIME_SELECT to 0 for disable dispatching t1ha0 at runtime. */
+#ifndef T1HA0_RUNTIME_SELECT
+#if T1HA0_AESNI_AVAILABLE && !defined(__e2k__)
+#define T1HA0_RUNTIME_SELECT 1
+#else
+#define T1HA0_RUNTIME_SELECT 0
+#endif
+#endif /* T1HA0_RUNTIME_SELECT */
+
+#if !T1HA0_RUNTIME_SELECT && !defined(T1HA0_USE_DEFINE)
+#if defined(__LCC__)
+#define T1HA0_USE_DEFINE 1
+#else
+#define T1HA0_USE_DEFINE 0
+#endif
+#endif /* T1HA0_USE_DEFINE */
+
+#if T1HA0_AESNI_AVAILABLE
+uint64_t t1ha0_ia32aes_noavx(const void *data, size_t length, uint64_t seed);
+uint64_t t1ha0_ia32aes_avx(const void *data, size_t length, uint64_t seed);
+#ifndef __e2k__
+uint64_t t1ha0_ia32aes_avx2(const void *data, size_t length, uint64_t seed);
+#endif
+#endif /* T1HA0_AESNI_AVAILABLE */
+
+#if T1HA0_RUNTIME_SELECT
+typedef uint64_t (*t1ha0_function_t)(const void *, size_t, uint64_t);
+T1HA_API t1ha0_function_t t1ha0_resolve(void);
+#if T1HA_USE_INDIRECT_FUNCTIONS
+T1HA_API uint64_t t1ha0(const void *data, size_t length, uint64_t seed);
+#else
+/* Otherwise function pointer will be used.
+ * Unfortunately this may cause some overhead calling. */
+T1HA_API extern uint64_t (*t1ha0_funcptr)(const void *data, size_t length,
+ uint64_t seed);
+static __force_inline uint64_t t1ha0(const void *data, size_t length,
+ uint64_t seed) {
+ return t1ha0_funcptr(data, length, seed);
+}
+#endif /* T1HA_USE_INDIRECT_FUNCTIONS */
+
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+
+#if T1HA0_USE_DEFINE
+
+#if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \
+ (!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED))
+#if defined(T1HA1_DISABLED)
+#define t1ha0 t1ha2_atonce
+#else
+#define t1ha0 t1ha1_be
+#endif /* T1HA1_DISABLED */
+#else /* 32/64 */
+#define t1ha0 t1ha0_32be
+#endif /* 32/64 */
+
+#else /* T1HA0_USE_DEFINE */
+
+static __force_inline uint64_t t1ha0(const void *data, size_t length,
+ uint64_t seed) {
+#if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \
+ (!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED))
+#if defined(T1HA1_DISABLED)
+ return t1ha2_atonce(data, length, seed);
+#else
+ return t1ha1_be(data, length, seed);
+#endif /* T1HA1_DISABLED */
+#else /* 32/64 */
+ return t1ha0_32be(data, length, seed);
+#endif /* 32/64 */
+}
+
+#endif /* !T1HA0_USE_DEFINE */
+
+#else /* !T1HA0_RUNTIME_SELECT && __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__ */
+
+#if T1HA0_USE_DEFINE
+
+#if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \
+ (!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED))
+#if defined(T1HA1_DISABLED)
+#define t1ha0 t1ha2_atonce
+#else
+#define t1ha0 t1ha1_le
+#endif /* T1HA1_DISABLED */
+#else /* 32/64 */
+#define t1ha0 t1ha0_32le
+#endif /* 32/64 */
+
+#else
+
+static __force_inline uint64_t t1ha0(const void *data, size_t length,
+ uint64_t seed) {
+#if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \
+ (!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED))
+#if defined(T1HA1_DISABLED)
+ return t1ha2_atonce(data, length, seed);
+#else
+ return t1ha1_le(data, length, seed);
+#endif /* T1HA1_DISABLED */
+#else /* 32/64 */
+ return t1ha0_32le(data, length, seed);
+#endif /* 32/64 */
+}
+
+#endif /* !T1HA0_USE_DEFINE */
+
+#endif /* !T1HA0_RUNTIME_SELECT */
+
+#endif /* T1HA0_DISABLED */
+
+#ifdef __cplusplus
+}
+#endif
+
+#if __GNUC_PREREQ(4, 0)
+#pragma GCC visibility pop
+#endif /* __GNUC_PREREQ(4,0) */
diff --git a/include/t1ha0_ia32aes_b.h b/include/t1ha0_ia32aes_b.h
new file mode 100644
index 00000000..e8e52638
--- /dev/null
+++ b/include/t1ha0_ia32aes_b.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2016-2020 Positive Technologies, https://www.ptsecurity.com,
+ * Fast Positive Hash.
+ *
+ * Portions Copyright (c) 2010-2020 Leonid Yuriev <leo@yuriev.ru>,
+ * The 1Hippeus project (t1h).
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgement in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/*
+ * t1ha = { Fast Positive Hash, aka "Позитивный Хэш" }
+ * by [Positive Technologies](https://www.ptsecurity.ru)
+ *
+ * Briefly, it is a 64-bit Hash Function:
+ * 1. Created for 64-bit little-endian platforms, in predominantly for x86_64,
+ * but portable and without penalties it can run on any 64-bit CPU.
+ * 2. In most cases up to 15% faster than City64, xxHash, mum-hash, metro-hash
+ * and all others portable hash-functions (which do not use specific
+ * hardware tricks).
+ * 3. Not suitable for cryptography.
+ *
+ * The Future will (be) Positive. Всё будет хорошо.
+ *
+ * ACKNOWLEDGEMENT:
+ * The t1ha was originally developed by Leonid Yuriev (Леонид Юрьев)
+ * for The 1Hippeus project - zerocopy messaging in the spirit of Sparta!
+ */
+
+#include "t1ha_bits.h"
+#include "t1ha_selfcheck.h"
+
+#if T1HA0_AESNI_AVAILABLE
+
+uint64_t T1HA_IA32AES_NAME(const void *data, uint32_t len) {
+ uint64_t a = 0;
+ uint64_t b = len;
+
+ if (likely(len > 32)) {
+ __m128i x = _mm_set_epi64x(a, b);
+ __m128i y = _mm_aesenc_si128(x, _mm_set_epi64x(prime_0, prime_1));
+
+ const __m128i *v = (const __m128i *)data;
+ const __m128i *const detent =
+ (const __m128i *)((const uint8_t *)data + (len & ~15ul));
+ data = detent;
+
+ if (len & 16) {
+ x = _mm_add_epi64(x, _mm_loadu_si128(v++));
+ y = _mm_aesenc_si128(x, y);
+ }
+ len &= 15;
+
+ if (v + 7 < detent) {
+ __m128i salt = y;
+ do {
+ __m128i t = _mm_aesenc_si128(_mm_loadu_si128(v++), salt);
+ t = _mm_aesdec_si128(t, _mm_loadu_si128(v++));
+ t = _mm_aesdec_si128(t, _mm_loadu_si128(v++));
+ t = _mm_aesdec_si128(t, _mm_loadu_si128(v++));
+
+ t = _mm_aesdec_si128(t, _mm_loadu_si128(v++));
+ t = _mm_aesdec_si128(t, _mm_loadu_si128(v++));
+ t = _mm_aesdec_si128(t, _mm_loadu_si128(v++));
+ t = _mm_aesdec_si128(t, _mm_loadu_si128(v++));
+
+ salt = _mm_add_epi64(salt, _mm_set_epi64x(prime_5, prime_6));
+ t = _mm_aesenc_si128(x, t);
+ x = _mm_add_epi64(y, x);
+ y = t;
+ } while (v + 7 < detent);
+ }
+
+ while (v < detent) {
+ __m128i v0y = _mm_add_epi64(y, _mm_loadu_si128(v++));
+ __m128i v1x = _mm_sub_epi64(x, _mm_loadu_si128(v++));
+ x = _mm_aesdec_si128(x, v0y);
+ y = _mm_aesdec_si128(y, v1x);
+ }
+
+ x = _mm_add_epi64(_mm_aesdec_si128(x, _mm_aesenc_si128(y, x)), y);
+#if defined(__x86_64__) || defined(_M_X64)
+#if defined(__SSE4_1__) || defined(__AVX__)
+ a = _mm_extract_epi64(x, 0);
+ b = _mm_extract_epi64(x, 1);
+#else
+ a = _mm_cvtsi128_si64(x);
+ b = _mm_cvtsi128_si64(_mm_unpackhi_epi64(x, x));
+#endif
+#else
+#if defined(__SSE4_1__) || defined(__AVX__)
+ a = (uint32_t)_mm_extract_epi32(x, 0) | (uint64_t)_mm_extract_epi32(x, 1)
+ << 32;
+ b = (uint32_t)_mm_extract_epi32(x, 2) | (uint64_t)_mm_extract_epi32(x, 3)
+ << 32;
+#else
+ a = (uint32_t)_mm_cvtsi128_si32(x);
+ a |= (uint64_t)_mm_cvtsi128_si32(_mm_shuffle_epi32(x, 1)) << 32;
+ x = _mm_unpackhi_epi64(x, x);
+ b = (uint32_t)_mm_cvtsi128_si32(x);
+ b |= (uint64_t)_mm_cvtsi128_si32(_mm_shuffle_epi32(x, 1)) << 32;
+#endif
+#endif
+#ifdef __AVX__
+ _mm256_zeroupper();
+#elif !(defined(_X86_64_) || defined(__x86_64__) || defined(_M_X64) || \
+ defined(__e2k__))
+ _mm_empty();
+#endif
+ }
+
+ const uint64_t *v = (const uint64_t *)data;
+ switch (len) {
+ default:
+ mixup64(&a, &b, fetch64_le_unaligned(v++), prime_4);
+ /* fall through */
+ case 24:
+ case 23:
+ case 22:
+ case 21:
+ case 20:
+ case 19:
+ case 18:
+ case 17:
+ mixup64(&b, &a, fetch64_le_unaligned(v++), prime_3);
+ /* fall through */
+ case 16:
+ case 15:
+ case 14:
+ case 13:
+ case 12:
+ case 11:
+ case 10:
+ case 9:
+ mixup64(&a, &b, fetch64_le_unaligned(v++), prime_2);
+ /* fall through */
+ case 8:
+ case 7:
+ case 6:
+ case 5:
+ case 4:
+ case 3:
+ case 2:
+ case 1:
+ mixup64(&b, &a, tail64_le_unaligned(v, len), prime_1);
+ /* fall through */
+ case 0:
+ return final64(a, b);
+ }
+}
+
+#endif /* T1HA0_AESNI_AVAILABLE */
+#undef T1HA_IA32AES_NAME
diff --git a/include/t1ha_bits.h b/include/t1ha_bits.h
new file mode 100644
index 00000000..539369aa
--- /dev/null
+++ b/include/t1ha_bits.h
@@ -0,0 +1,1254 @@
+/*
+ * Copyright (c) 2016-2020 Positive Technologies, https://www.ptsecurity.com,
+ * Fast Positive Hash.
+ *
+ * Portions Copyright (c) 2010-2020 Leonid Yuriev <leo@yuriev.ru>,
+ * The 1Hippeus project (t1h).
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgement in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/*
+ * t1ha = { Fast Positive Hash, aka "Позитивный Хэш" }
+ * by [Positive Technologies](https://www.ptsecurity.ru)
+ *
+ * Briefly, it is a 64-bit Hash Function:
+ * 1. Created for 64-bit little-endian platforms, in predominantly for x86_64,
+ * but portable and without penalties it can run on any 64-bit CPU.
+ * 2. In most cases up to 15% faster than City64, xxHash, mum-hash, metro-hash
+ * and all others portable hash-functions (which do not use specific
+ * hardware tricks).
+ * 3. Not suitable for cryptography.
+ *
+ * The Future will (be) Positive. Всё будет хорошо.
+ *
+ * ACKNOWLEDGEMENT:
+ * The t1ha was originally developed by Leonid Yuriev (Леонид Юрьев)
+ * for The 1Hippeus project - zerocopy messaging in the spirit of Sparta!
+ */
+
+#pragma once
+
+#if defined(_MSC_VER)
+#pragma warning(disable : 4201) /* nameless struct/union */
+#if _MSC_VER > 1800
+#pragma warning(disable : 4464) /* relative include path contains '..' */
+#endif /* 1800 */
+#endif /* MSVC */
+#include "t1ha.h"
+
+#ifndef T1HA_USE_FAST_ONESHOT_READ
+/* Define it to 1 for little bit faster code.
+ * Unfortunately this may triggering a false-positive alarms from Valgrind,
+ * AddressSanitizer and other similar tool.
+ * So, define it to 0 for calmness if doubt. */
+#define T1HA_USE_FAST_ONESHOT_READ 1
+#endif /* T1HA_USE_FAST_ONESHOT_READ */
+
+/*****************************************************************************/
+
+#include <assert.h> /* for assert() */
+#include <stdbool.h> /* for bool */
+#include <string.h> /* for memcpy() */
+
+#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ && \
+ __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__
+#error Unsupported byte order.
+#endif
+
+#define T1HA_UNALIGNED_ACCESS__UNABLE 0
+#define T1HA_UNALIGNED_ACCESS__SLOW 1
+#define T1HA_UNALIGNED_ACCESS__EFFICIENT 2
+
+#ifndef T1HA_SYS_UNALIGNED_ACCESS
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+#define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__EFFICIENT
+#elif defined(__ia32__)
+#define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__EFFICIENT
+#elif defined(__e2k__)
+#define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__SLOW
+#elif defined(__ARM_FEATURE_UNALIGNED)
+#define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__EFFICIENT
+#else
+#define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__UNABLE
+#endif
+#endif /* T1HA_SYS_UNALIGNED_ACCESS */
+
+#define ALIGNMENT_16 2
+#define ALIGNMENT_32 4
+#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul
+#define ALIGNMENT_64 8
+#else
+#define ALIGNMENT_64 4
+#endif
+
+#ifndef PAGESIZE
+#define PAGESIZE 4096
+#endif /* PAGESIZE */
+
+/***************************************************************************/
+
+#ifndef __has_builtin
+#define __has_builtin(x) (0)
+#endif
+
+#ifndef __has_warning
+#define __has_warning(x) (0)
+#endif
+
+#ifndef __has_feature
+#define __has_feature(x) (0)
+#endif
+
+#ifndef __has_extension
+#define __has_extension(x) (0)
+#endif
+
+#if __has_feature(address_sanitizer)
+#define __SANITIZE_ADDRESS__ 1
+#endif
+
+#ifndef __optimize
+#if defined(__clang__) && !__has_attribute(__optimize__)
+#define __optimize(ops)
+#elif defined(__GNUC__) || __has_attribute(__optimize__)
+#define __optimize(ops) __attribute__((__optimize__(ops)))
+#else
+#define __optimize(ops)
+#endif
+#endif /* __optimize */
+
+#ifndef __cold
+#if defined(__OPTIMIZE__)
+#if defined(__e2k__)
+#define __cold __optimize(1) __attribute__((__cold__))
+#elif defined(__clang__) && !__has_attribute(__cold__) && \
+ __has_attribute(__section__)
+/* just put infrequently used functions in separate section */
+#define __cold __attribute__((__section__("text.unlikely"))) __optimize("Os")
+#elif defined(__GNUC__) || __has_attribute(__cold__)
+#define __cold __attribute__((__cold__)) __optimize("Os")
+#else
+#define __cold __optimize("Os")
+#endif
+#else
+#define __cold
+#endif
+#endif /* __cold */
+
+#if __GNUC_PREREQ(4, 4) || defined(__clang__)
+
+#if defined(__ia32__) || defined(__e2k__)
+#include <x86intrin.h>
+#endif
+
+#if defined(__ia32__) && !defined(__cpuid_count)
+#include <cpuid.h>
+#endif
+
+#if defined(__e2k__)
+#include <e2kbuiltin.h>
+#endif
+
+#ifndef likely
+#define likely(cond) __builtin_expect(!!(cond), 1)
+#endif
+
+#ifndef unlikely
+#define unlikely(cond) __builtin_expect(!!(cond), 0)
+#endif
+
+#if __GNUC_PREREQ(4, 5) || __has_builtin(__builtin_unreachable)
+#define unreachable() __builtin_unreachable()
+#endif
+
+#define bswap64(v) __builtin_bswap64(v)
+#define bswap32(v) __builtin_bswap32(v)
+#if __GNUC_PREREQ(4, 8) || __has_builtin(__builtin_bswap16)
+#define bswap16(v) __builtin_bswap16(v)
+#endif
+
+#if !defined(__maybe_unused) && \
+ (__GNUC_PREREQ(4, 3) || __has_attribute(__unused__))
+#define __maybe_unused __attribute__((__unused__))
+#endif
+
+#if !defined(__always_inline) && \
+ (__GNUC_PREREQ(3, 2) || __has_attribute(__always_inline__))
+#define __always_inline __inline __attribute__((__always_inline__))
+#endif
+
+#if defined(__e2k__)
+
+#if __iset__ >= 3
+#define mul_64x64_high(a, b) __builtin_e2k_umulhd(a, b)
+#endif /* __iset__ >= 3 */
+
+#if __iset__ >= 5
+static __maybe_unused __always_inline unsigned
+e2k_add64carry_first(uint64_t base, uint64_t addend, uint64_t *sum) {
+ *sum = base + addend;
+ return (unsigned)__builtin_e2k_addcd_c(base, addend, 0);
+}
+#define add64carry_first(base, addend, sum) \
+ e2k_add64carry_first(base, addend, sum)
+
+static __maybe_unused __always_inline unsigned
+e2k_add64carry_next(unsigned carry, uint64_t base, uint64_t addend,
+ uint64_t *sum) {
+ *sum = __builtin_e2k_addcd(base, addend, carry);
+ return (unsigned)__builtin_e2k_addcd_c(base, addend, carry);
+}
+#define add64carry_next(carry, base, addend, sum) \
+ e2k_add64carry_next(carry, base, addend, sum)
+
+static __maybe_unused __always_inline void e2k_add64carry_last(unsigned carry,
+ uint64_t base,
+ uint64_t addend,
+ uint64_t *sum) {
+ *sum = __builtin_e2k_addcd(base, addend, carry);
+}
+#define add64carry_last(carry, base, addend, sum) \
+ e2k_add64carry_last(carry, base, addend, sum)
+#endif /* __iset__ >= 5 */
+
+#define fetch64_be_aligned(ptr) ((uint64_t)__builtin_e2k_ld_64s_be(ptr))
+#define fetch32_be_aligned(ptr) ((uint32_t)__builtin_e2k_ld_32u_be(ptr))
+
+#endif /* __e2k__ Elbrus */
+
+#elif defined(_MSC_VER)
+
+#if _MSC_FULL_VER < 190024234 && defined(_M_IX86)
+#pragma message( \
+ "For AES-NI at least \"Microsoft C/C++ Compiler\" version 19.00.24234 (Visual Studio 2015 Update 3) is required.")
+#endif
+#if _MSC_FULL_VER < 191526730
+#pragma message( \
+ "It is recommended to use \"Microsoft C/C++ Compiler\" version 19.15.26730 (Visual Studio 2017 15.8) or newer.")
+#endif
+#if _MSC_FULL_VER < 180040629
+#error At least "Microsoft C/C++ Compiler" version 18.00.40629 (Visual Studio 2013 Update 5) is required.
+#endif
+
+#pragma warning(push, 1)
+
+#include <intrin.h>
+#include <stdlib.h>
+#define likely(cond) (cond)
+#define unlikely(cond) (cond)
+#define unreachable() __assume(0)
+#define bswap64(v) _byteswap_uint64(v)
+#define bswap32(v) _byteswap_ulong(v)
+#define bswap16(v) _byteswap_ushort(v)
+#define rot64(v, s) _rotr64(v, s)
+#define rot32(v, s) _rotr(v, s)
+#define __always_inline __forceinline
+
+#if defined(_M_X64) || defined(_M_IA64)
+#pragma intrinsic(_umul128)
+#define mul_64x64_128(a, b, ph) _umul128(a, b, ph)
+#pragma intrinsic(_addcarry_u64)
+#define add64carry_first(base, addend, sum) _addcarry_u64(0, base, addend, sum)
+#define add64carry_next(carry, base, addend, sum) \
+ _addcarry_u64(carry, base, addend, sum)
+#define add64carry_last(carry, base, addend, sum) \
+ (void)_addcarry_u64(carry, base, addend, sum)
+#endif
+
+#if defined(_M_ARM64) || defined(_M_X64) || defined(_M_IA64)
+#pragma intrinsic(__umulh)
+#define mul_64x64_high(a, b) __umulh(a, b)
+#endif
+
+#if defined(_M_IX86)
+#pragma intrinsic(__emulu)
+#define mul_32x32_64(a, b) __emulu(a, b)
+
+#if _MSC_VER >= 1915 /* LY: workaround for SSA-optimizer bug */
+#pragma intrinsic(_addcarry_u32)
+#define add32carry_first(base, addend, sum) _addcarry_u32(0, base, addend, sum)
+#define add32carry_next(carry, base, addend, sum) \
+ _addcarry_u32(carry, base, addend, sum)
+#define add32carry_last(carry, base, addend, sum) \
+ (void)_addcarry_u32(carry, base, addend, sum)
+
+static __forceinline char
+msvc32_add64carry_first(uint64_t base, uint64_t addend, uint64_t *sum) {
+ uint32_t *const sum32 = (uint32_t *)sum;
+ const uint32_t base_32l = (uint32_t)base;
+ const uint32_t base_32h = (uint32_t)(base >> 32);
+ const uint32_t addend_32l = (uint32_t)addend;
+ const uint32_t addend_32h = (uint32_t)(addend >> 32);
+ return add32carry_next(add32carry_first(base_32l, addend_32l, sum32),
+ base_32h, addend_32h, sum32 + 1);
+}
+#define add64carry_first(base, addend, sum) \
+ msvc32_add64carry_first(base, addend, sum)
+
+static __forceinline char msvc32_add64carry_next(char carry, uint64_t base,
+ uint64_t addend,
+ uint64_t *sum) {
+ uint32_t *const sum32 = (uint32_t *)sum;
+ const uint32_t base_32l = (uint32_t)base;
+ const uint32_t base_32h = (uint32_t)(base >> 32);
+ const uint32_t addend_32l = (uint32_t)addend;
+ const uint32_t addend_32h = (uint32_t)(addend >> 32);
+ return add32carry_next(add32carry_next(carry, base_32l, addend_32l, sum32),
+ base_32h, addend_32h, sum32 + 1);
+}
+#define add64carry_next(carry, base, addend, sum) \
+ msvc32_add64carry_next(carry, base, addend, sum)
+
+static __forceinline void msvc32_add64carry_last(char carry, uint64_t base,
+ uint64_t addend,
+ uint64_t *sum) {
+ uint32_t *const sum32 = (uint32_t *)sum;
+ const uint32_t base_32l = (uint32_t)base;
+ const uint32_t base_32h = (uint32_t)(base >> 32);
+ const uint32_t addend_32l = (uint32_t)addend;
+ const uint32_t addend_32h = (uint32_t)(addend >> 32);
+ add32carry_last(add32carry_next(carry, base_32l, addend_32l, sum32), base_32h,
+ addend_32h, sum32 + 1);
+}
+#define add64carry_last(carry, base, addend, sum) \
+ msvc32_add64carry_last(carry, base, addend, sum)
+#endif /* _MSC_FULL_VER >= 190024231 */
+
+#elif defined(_M_ARM)
+#define mul_32x32_64(a, b) _arm_umull(a, b)
+#endif
+
+#pragma warning(pop)
+#pragma warning(disable : 4514) /* 'xyz': unreferenced inline function \
+ has been removed */
+#pragma warning(disable : 4710) /* 'xyz': function not inlined */
+#pragma warning(disable : 4711) /* function 'xyz' selected for \
+ automatic inline expansion */
+#pragma warning(disable : 4127) /* conditional expression is constant */
+#pragma warning(disable : 4702) /* unreachable code */
+#endif /* Compiler */
+
+#ifndef likely
+#define likely(cond) (cond)
+#endif
+#ifndef unlikely
+#define unlikely(cond) (cond)
+#endif
+#ifndef __maybe_unused
+#define __maybe_unused
+#endif
+#ifndef __always_inline
+#define __always_inline __inline
+#endif
+#ifndef unreachable
+#define unreachable() \
+ do { \
+ } while (1)
+#endif
+
+#ifndef bswap64
+#if defined(bswap_64)
+#define bswap64 bswap_64
+#elif defined(__bswap_64)
+#define bswap64 __bswap_64
+#else
+static __always_inline uint64_t bswap64(uint64_t v) {
+ return v << 56 | v >> 56 | ((v << 40) & UINT64_C(0x00ff000000000000)) |
+ ((v << 24) & UINT64_C(0x0000ff0000000000)) |
+ ((v << 8) & UINT64_C(0x000000ff00000000)) |
+ ((v >> 8) & UINT64_C(0x00000000ff000000)) |
+ ((v >> 24) & UINT64_C(0x0000000000ff0000)) |
+ ((v >> 40) & UINT64_C(0x000000000000ff00));
+}
+#endif
+#endif /* bswap64 */
+
+#ifndef bswap32
+#if defined(bswap_32)
+#define bswap32 bswap_32
+#elif defined(__bswap_32)
+#define bswap32 __bswap_32
+#else
+static __always_inline uint32_t bswap32(uint32_t v) {
+ return v << 24 | v >> 24 | ((v << 8) & UINT32_C(0x00ff0000)) |
+ ((v >> 8) & UINT32_C(0x0000ff00));
+}
+#endif
+#endif /* bswap32 */
+
+#ifndef bswap16
+#if defined(bswap_16)
+#define bswap16 bswap_16
+#elif defined(__bswap_16)
+#define bswap16 __bswap_16
+#else
+static __always_inline uint16_t bswap16(uint16_t v) { return v << 8 | v >> 8; }
+#endif
+#endif /* bswap16 */
+
+#if defined(__ia32__) || \
+ T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT
+/* The __builtin_assume_aligned() leads gcc/clang to load values into the
+ * registers, even when it is possible to directly use an operand from memory.
+ * This can lead to a shortage of registers and a significant slowdown.
+ * Therefore avoid unnecessary use of __builtin_assume_aligned() for x86. */
+#define read_unaligned(ptr, bits) (*(const uint##bits##_t *__restrict)(ptr))
+#define read_aligned(ptr, bits) (*(const uint##bits##_t *__restrict)(ptr))
+#endif /* __ia32__ */
+
+#ifndef read_unaligned
+#if defined(__GNUC__) || __has_attribute(__packed__)
+typedef struct {
+ uint8_t unaligned_8;
+ uint16_t unaligned_16;
+ uint32_t unaligned_32;
+ uint64_t unaligned_64;
+} __attribute__((__packed__)) t1ha_unaligned_proxy;
+#define read_unaligned(ptr, bits) \
+ (((const t1ha_unaligned_proxy *)((const uint8_t *)(ptr)-offsetof( \
+ t1ha_unaligned_proxy, unaligned_##bits))) \
+ ->unaligned_##bits)
+#elif defined(_MSC_VER)
+#pragma warning( \
+ disable : 4235) /* nonstandard extension used: '__unaligned' \
+ * keyword not supported on this architecture */
+#define read_unaligned(ptr, bits) (*(const __unaligned uint##bits##_t *)(ptr))
+#else
+#pragma pack(push, 1)
+typedef struct {
+ uint8_t unaligned_8;
+ uint16_t unaligned_16;
+ uint32_t unaligned_32;
+ uint64_t unaligned_64;
+} t1ha_unaligned_proxy;
+#pragma pack(pop)
+#define read_unaligned(ptr, bits) \
+ (((const t1ha_unaligned_proxy *)((const uint8_t *)(ptr)-offsetof( \
+ t1ha_unaligned_proxy, unaligned_##bits))) \
+ ->unaligned_##bits)
+#endif
+#endif /* read_unaligned */
+
+#ifndef read_aligned
+#if __GNUC_PREREQ(4, 8) || __has_builtin(__builtin_assume_aligned)
+#define read_aligned(ptr, bits) \
+ (*(const uint##bits##_t *)__builtin_assume_aligned(ptr, ALIGNMENT_##bits))
+#elif (__GNUC_PREREQ(3, 3) || __has_attribute(__aligned__)) && \
+ !defined(__clang__)
+#define read_aligned(ptr, bits) \
+ (*(const uint##bits##_t \
+ __attribute__((__aligned__(ALIGNMENT_##bits))) *)(ptr))
+#elif __has_attribute(__assume_aligned__)
+
+static __always_inline const
+ uint16_t *__attribute__((__assume_aligned__(ALIGNMENT_16)))
+ cast_aligned_16(const void *ptr) {
+ return (const uint16_t *)ptr;
+}
+static __always_inline const
+ uint32_t *__attribute__((__assume_aligned__(ALIGNMENT_32)))
+ cast_aligned_32(const void *ptr) {
+ return (const uint32_t *)ptr;
+}
+static __always_inline const
+ uint64_t *__attribute__((__assume_aligned__(ALIGNMENT_64)))
+ cast_aligned_64(const void *ptr) {
+ return (const uint64_t *)ptr;
+}
+
+#define read_aligned(ptr, bits) (*cast_aligned_##bits(ptr))
+
+#elif defined(_MSC_VER)
+#define read_aligned(ptr, bits) \
+ (*(const __declspec(align(ALIGNMENT_##bits)) uint##bits##_t *)(ptr))
+#else
+#define read_aligned(ptr, bits) (*(const uint##bits##_t *)(ptr))
+#endif
+#endif /* read_aligned */
+
+#ifndef prefetch
+#if (__GNUC_PREREQ(4, 0) || __has_builtin(__builtin_prefetch)) && \
+ !defined(__ia32__)
+#define prefetch(ptr) __builtin_prefetch(ptr)
+#elif defined(_M_ARM64) || defined(_M_ARM)
+#define prefetch(ptr) __prefetch(ptr)
+#else
+#define prefetch(ptr) \
+ do { \
+ (void)(ptr); \
+ } while (0)
+#endif
+#endif /* prefetch */
+
+#if __has_warning("-Wconstant-logical-operand")
+#if defined(__clang__)
+#pragma clang diagnostic ignored "-Wconstant-logical-operand"
+#elif defined(__GNUC__)
+#pragma GCC diagnostic ignored "-Wconstant-logical-operand"
+#else
+#pragma warning disable "constant-logical-operand"
+#endif
+#endif /* -Wconstant-logical-operand */
+
+#if __has_warning("-Wtautological-pointer-compare")
+#if defined(__clang__)
+#pragma clang diagnostic ignored "-Wtautological-pointer-compare"
+#elif defined(__GNUC__)
+#pragma GCC diagnostic ignored "-Wtautological-pointer-compare"
+#else
+#pragma warning disable "tautological-pointer-compare"
+#endif
+#endif /* -Wtautological-pointer-compare */
+
+/***************************************************************************/
+
+#if __GNUC_PREREQ(4, 0)
+#pragma GCC visibility push(hidden)
+#endif /* __GNUC_PREREQ(4,0) */
+
+/*---------------------------------------------------------- Little Endian */
+
+#ifndef fetch16_le_aligned
+static __maybe_unused __always_inline uint16_t
+fetch16_le_aligned(const void *v) {
+ assert(((uintptr_t)v) % ALIGNMENT_16 == 0);
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return read_aligned(v, 16);
+#else
+ return bswap16(read_aligned(v, 16));
+#endif
+}
+#endif /* fetch16_le_aligned */
+
+#ifndef fetch16_le_unaligned
+static __maybe_unused __always_inline uint16_t
+fetch16_le_unaligned(const void *v) {
+#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE
+ const uint8_t *p = (const uint8_t *)v;
+ return p[0] | (uint16_t)p[1] << 8;
+#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return read_unaligned(v, 16);
+#else
+ return bswap16(read_unaligned(v, 16));
+#endif
+}
+#endif /* fetch16_le_unaligned */
+
+#ifndef fetch32_le_aligned
+static __maybe_unused __always_inline uint32_t
+fetch32_le_aligned(const void *v) {
+ assert(((uintptr_t)v) % ALIGNMENT_32 == 0);
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return read_aligned(v, 32);
+#else
+ return bswap32(read_aligned(v, 32));
+#endif
+}
+#endif /* fetch32_le_aligned */
+
+#ifndef fetch32_le_unaligned
+static __maybe_unused __always_inline uint32_t
+fetch32_le_unaligned(const void *v) {
+#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE
+ return fetch16_le_unaligned(v) |
+ (uint32_t)fetch16_le_unaligned((const uint8_t *)v + 2) << 16;
+#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return read_unaligned(v, 32);
+#else
+ return bswap32(read_unaligned(v, 32));
+#endif
+}
+#endif /* fetch32_le_unaligned */
+
+#ifndef fetch64_le_aligned
+static __maybe_unused __always_inline uint64_t
+fetch64_le_aligned(const void *v) {
+ assert(((uintptr_t)v) % ALIGNMENT_64 == 0);
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return read_aligned(v, 64);
+#else
+ return bswap64(read_aligned(v, 64));
+#endif
+}
+#endif /* fetch64_le_aligned */
+
+#ifndef fetch64_le_unaligned
+static __maybe_unused __always_inline uint64_t
+fetch64_le_unaligned(const void *v) {
+#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE
+ return fetch32_le_unaligned(v) |
+ (uint64_t)fetch32_le_unaligned((const uint8_t *)v + 4) << 32;
+#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return read_unaligned(v, 64);
+#else
+ return bswap64(read_unaligned(v, 64));
+#endif
+}
+#endif /* fetch64_le_unaligned */
+
+static __maybe_unused __always_inline uint64_t tail64_le_aligned(const void *v,
+ size_t tail) {
+ const uint8_t *const p = (const uint8_t *)v;
+#if T1HA_USE_FAST_ONESHOT_READ && !defined(__SANITIZE_ADDRESS__)
+ /* We can perform a 'oneshot' read, which is little bit faster. */
+ const unsigned shift = ((8 - tail) & 7) << 3;
+ return fetch64_le_aligned(p) & ((~UINT64_C(0)) >> shift);
+#else
+ uint64_t r = 0;
+ switch (tail & 7) {
+ default:
+ unreachable();
+/* fall through */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* For most CPUs this code is better when not needed byte reordering. */
+ case 0:
+ return fetch64_le_aligned(p);
+ case 7:
+ r = (uint64_t)p[6] << 8;
+ /* fall through */
+ case 6:
+ r += p[5];
+ r <<= 8;
+ /* fall through */
+ case 5:
+ r += p[4];
+ r <<= 32;
+ /* fall through */
+ case 4:
+ return r + fetch32_le_aligned(p);
+ case 3:
+ r = (uint64_t)p[2] << 16;
+ /* fall through */
+ case 2:
+ return r + fetch16_le_aligned(p);
+ case 1:
+ return p[0];
+#else
+ case 0:
+ r = p[7] << 8;
+ /* fall through */
+ case 7:
+ r += p[6];
+ r <<= 8;
+ /* fall through */
+ case 6:
+ r += p[5];
+ r <<= 8;
+ /* fall through */
+ case 5:
+ r += p[4];
+ r <<= 8;
+ /* fall through */
+ case 4:
+ r += p[3];
+ r <<= 8;
+ /* fall through */
+ case 3:
+ r += p[2];
+ r <<= 8;
+ /* fall through */
+ case 2:
+ r += p[1];
+ r <<= 8;
+ /* fall through */
+ case 1:
+ return r + p[0];
+#endif
+ }
+#endif /* T1HA_USE_FAST_ONESHOT_READ */
+}
+
+#if T1HA_USE_FAST_ONESHOT_READ && \
+ T1HA_SYS_UNALIGNED_ACCESS != T1HA_UNALIGNED_ACCESS__UNABLE && \
+ defined(PAGESIZE) && PAGESIZE > 42 && !defined(__SANITIZE_ADDRESS__)
+#define can_read_underside(ptr, size) \
+ (((PAGESIZE - (size)) & (uintptr_t)(ptr)) != 0)
+#endif /* T1HA_USE_FAST_ONESHOT_READ */
+
+static __maybe_unused __always_inline uint64_t
+tail64_le_unaligned(const void *v, size_t tail) {
+ const uint8_t *p = (const uint8_t *)v;
+#if defined(can_read_underside) && \
+ (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul)
+ /* On some systems (e.g. x86_64) we can perform a 'oneshot' read, which
+ * is little bit faster. Thanks Marcin Żukowski <marcin.zukowski@gmail.com>
+ * for the reminder. */
+ const unsigned offset = (8 - tail) & 7;
+ const unsigned shift = offset << 3;
+ if (likely(can_read_underside(p, 8))) {
+ p -= offset;
+ return fetch64_le_unaligned(p) >> shift;
+ }
+ return fetch64_le_unaligned(p) & ((~UINT64_C(0)) >> shift);
+#else
+ uint64_t r = 0;
+ switch (tail & 7) {
+ default:
+ unreachable();
+/* fall through */
+#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT && \
+ __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* For most CPUs this code is better when not needed
+ * copying for alignment or byte reordering. */
+ case 0:
+ return fetch64_le_unaligned(p);
+ case 7:
+ r = (uint64_t)p[6] << 8;
+ /* fall through */
+ case 6:
+ r += p[5];
+ r <<= 8;
+ /* fall through */
+ case 5:
+ r += p[4];
+ r <<= 32;
+ /* fall through */
+ case 4:
+ return r + fetch32_le_unaligned(p);
+ case 3:
+ r = (uint64_t)p[2] << 16;
+ /* fall through */
+ case 2:
+ return r + fetch16_le_unaligned(p);
+ case 1:
+ return p[0];
+#else
+ /* For most CPUs this code is better than a
+ * copying for alignment and/or byte reordering. */
+ case 0:
+ r = p[7] << 8;
+ /* fall through */
+ case 7:
+ r += p[6];
+ r <<= 8;
+ /* fall through */
+ case 6:
+ r += p[5];
+ r <<= 8;
+ /* fall through */
+ case 5:
+ r += p[4];
+ r <<= 8;
+ /* fall through */
+ case 4:
+ r += p[3];
+ r <<= 8;
+ /* fall through */
+ case 3:
+ r += p[2];
+ r <<= 8;
+ /* fall through */
+ case 2:
+ r += p[1];
+ r <<= 8;
+ /* fall through */
+ case 1:
+ return r + p[0];
+#endif
+ }
+#endif /* can_read_underside */
+}
+
+/*------------------------------------------------------------- Big Endian */
+
+#ifndef fetch16_be_aligned
+static __maybe_unused __always_inline uint16_t
+fetch16_be_aligned(const void *v) {
+ assert(((uintptr_t)v) % ALIGNMENT_16 == 0);
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return read_aligned(v, 16);
+#else
+ return bswap16(read_aligned(v, 16));
+#endif
+}
+#endif /* fetch16_be_aligned */
+
+#ifndef fetch16_be_unaligned
+static __maybe_unused __always_inline uint16_t
+fetch16_be_unaligned(const void *v) {
+#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE
+ const uint8_t *p = (const uint8_t *)v;
+ return (uint16_t)p[0] << 8 | p[1];
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return read_unaligned(v, 16);
+#else
+ return bswap16(read_unaligned(v, 16));
+#endif
+}
+#endif /* fetch16_be_unaligned */
+
+#ifndef fetch32_be_aligned
+static __maybe_unused __always_inline uint32_t
+fetch32_be_aligned(const void *v) {
+ assert(((uintptr_t)v) % ALIGNMENT_32 == 0);
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return read_aligned(v, 32);
+#else
+ return bswap32(read_aligned(v, 32));
+#endif
+}
+#endif /* fetch32_be_aligned */
+
+#ifndef fetch32_be_unaligned
+static __maybe_unused __always_inline uint32_t
+fetch32_be_unaligned(const void *v) {
+#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE
+ return (uint32_t)fetch16_be_unaligned(v) << 16 |
+ fetch16_be_unaligned((const uint8_t *)v + 2);
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return read_unaligned(v, 32);
+#else
+ return bswap32(read_unaligned(v, 32));
+#endif
+}
+#endif /* fetch32_be_unaligned */
+
+#ifndef fetch64_be_aligned
+static __maybe_unused __always_inline uint64_t
+fetch64_be_aligned(const void *v) {
+ assert(((uintptr_t)v) % ALIGNMENT_64 == 0);
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return read_aligned(v, 64);
+#else
+ return bswap64(read_aligned(v, 64));
+#endif
+}
+#endif /* fetch64_be_aligned */
+
+#ifndef fetch64_be_unaligned
+static __maybe_unused __always_inline uint64_t
+fetch64_be_unaligned(const void *v) {
+#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE
+ return (uint64_t)fetch32_be_unaligned(v) << 32 |
+ fetch32_be_unaligned((const uint8_t *)v + 4);
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return read_unaligned(v, 64);
+#else
+ return bswap64(read_unaligned(v, 64));
+#endif
+}
+#endif /* fetch64_be_unaligned */
+
+static __maybe_unused __always_inline uint64_t tail64_be_aligned(const void *v,
+ size_t tail) {
+ const uint8_t *const p = (const uint8_t *)v;
+#if T1HA_USE_FAST_ONESHOT_READ && !defined(__SANITIZE_ADDRESS__)
+ /* We can perform a 'oneshot' read, which is little bit faster. */
+ const unsigned shift = ((8 - tail) & 7) << 3;
+ return fetch64_be_aligned(p) >> shift;
+#else
+ switch (tail & 7) {
+ default:
+ unreachable();
+/* fall through */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ /* For most CPUs this code is better when not byte reordering. */
+ case 1:
+ return p[0];
+ case 2:
+ return fetch16_be_aligned(p);
+ case 3:
+ return (uint32_t)fetch16_be_aligned(p) << 8 | p[2];
+ case 4:
+ return fetch32_be_aligned(p);
+ case 5:
+ return (uint64_t)fetch32_be_aligned(p) << 8 | p[4];
+ case 6:
+ return (uint64_t)fetch32_be_aligned(p) << 16 | fetch16_be_aligned(p + 4);
+ case 7:
+ return (uint64_t)fetch32_be_aligned(p) << 24 |
+ (uint32_t)fetch16_be_aligned(p + 4) << 8 | p[6];
+ case 0:
+ return fetch64_be_aligned(p);
+#else
+ case 1:
+ return p[0];
+ case 2:
+ return p[1] | (uint32_t)p[0] << 8;
+ case 3:
+ return p[2] | (uint32_t)p[1] << 8 | (uint32_t)p[0] << 16;
+ case 4:
+ return p[3] | (uint32_t)p[2] << 8 | (uint32_t)p[1] << 16 |
+ (uint32_t)p[0] << 24;
+ case 5:
+ return p[4] | (uint32_t)p[3] << 8 | (uint32_t)p[2] << 16 |
+ (uint32_t)p[1] << 24 | (uint64_t)p[0] << 32;
+ case 6:
+ return p[5] | (uint32_t)p[4] << 8 | (uint32_t)p[3] << 16 |
+ (uint32_t)p[2] << 24 | (uint64_t)p[1] << 32 | (uint64_t)p[0] << 40;
+ case 7:
+ return p[6] | (uint32_t)p[5] << 8 | (uint32_t)p[4] << 16 |
+ (uint32_t)p[3] << 24 | (uint64_t)p[2] << 32 | (uint64_t)p[1] << 40 |
+ (uint64_t)p[0] << 48;
+ case 0:
+ return p[7] | (uint32_t)p[6] << 8 | (uint32_t)p[5] << 16 |
+ (uint32_t)p[4] << 24 | (uint64_t)p[3] << 32 | (uint64_t)p[2] << 40 |
+ (uint64_t)p[1] << 48 | (uint64_t)p[0] << 56;
+#endif
+ }
+#endif /* T1HA_USE_FAST_ONESHOT_READ */
+}
+
+static __maybe_unused __always_inline uint64_t
+tail64_be_unaligned(const void *v, size_t tail) {
+ const uint8_t *p = (const uint8_t *)v;
+#if defined(can_read_underside) && \
+ (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul)
+ /* On some systems (e.g. x86_64) we can perform a 'oneshot' read, which
+ * is little bit faster. Thanks Marcin Żukowski <marcin.zukowski@gmail.com>
+ * for the reminder. */
+ const unsigned offset = (8 - tail) & 7;
+ const unsigned shift = offset << 3;
+ if (likely(can_read_underside(p, 8))) {
+ p -= offset;
+ return fetch64_be_unaligned(p) & ((~UINT64_C(0)) >> shift);
+ }
+ return fetch64_be_unaligned(p) >> shift;
+#else
+ switch (tail & 7) {
+ default:
+ unreachable();
+/* fall through */
+#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT && \
+ __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ /* For most CPUs this code is better when not needed
+ * copying for alignment or byte reordering. */
+ case 1:
+ return p[0];
+ case 2:
+ return fetch16_be_unaligned(p);
+ case 3:
+ return (uint32_t)fetch16_be_unaligned(p) << 8 | p[2];
+ case 4:
+ return fetch32_be(p);
+ case 5:
+ return (uint64_t)fetch32_be_unaligned(p) << 8 | p[4];
+ case 6:
+ return (uint64_t)fetch32_be_unaligned(p) << 16 |
+ fetch16_be_unaligned(p + 4);
+ case 7:
+ return (uint64_t)fetch32_be_unaligned(p) << 24 |
+ (uint32_t)fetch16_be_unaligned(p + 4) << 8 | p[6];
+ case 0:
+ return fetch64_be_unaligned(p);
+#else
+ /* For most CPUs this code is better than a
+ * copying for alignment and/or byte reordering. */
+ case 1:
+ return p[0];
+ case 2:
+ return p[1] | (uint32_t)p[0] << 8;
+ case 3:
+ return p[2] | (uint32_t)p[1] << 8 | (uint32_t)p[0] << 16;
+ case 4:
+ return p[3] | (uint32_t)p[2] << 8 | (uint32_t)p[1] << 16 |
+ (uint32_t)p[0] << 24;
+ case 5:
+ return p[4] | (uint32_t)p[3] << 8 | (uint32_t)p[2] << 16 |
+ (uint32_t)p[1] << 24 | (uint64_t)p[0] << 32;
+ case 6:
+ return p[5] | (uint32_t)p[4] << 8 | (uint32_t)p[3] << 16 |
+ (uint32_t)p[2] << 24 | (uint64_t)p[1] << 32 | (uint64_t)p[0] << 40;
+ case 7:
+ return p[6] | (uint32_t)p[5] << 8 | (uint32_t)p[4] << 16 |
+ (uint32_t)p[3] << 24 | (uint64_t)p[2] << 32 | (uint64_t)p[1] << 40 |
+ (uint64_t)p[0] << 48;
+ case 0:
+ return p[7] | (uint32_t)p[6] << 8 | (uint32_t)p[5] << 16 |
+ (uint32_t)p[4] << 24 | (uint64_t)p[3] << 32 | (uint64_t)p[2] << 40 |
+ (uint64_t)p[1] << 48 | (uint64_t)p[0] << 56;
+#endif
+ }
+#endif /* can_read_underside */
+}
+
+/***************************************************************************/
+
+#ifndef rot64
+static __maybe_unused __always_inline uint64_t rot64(uint64_t v, unsigned s) {
+ return (v >> s) | (v << (64 - s));
+}
+#endif /* rot64 */
+
+#ifndef mul_32x32_64
+static __maybe_unused __always_inline uint64_t mul_32x32_64(uint32_t a,
+ uint32_t b) {
+ return a * (uint64_t)b;
+}
+#endif /* mul_32x32_64 */
+
+#ifndef add64carry_first
+static __maybe_unused __always_inline unsigned
+add64carry_first(uint64_t base, uint64_t addend, uint64_t *sum) {
+#if __has_builtin(__builtin_addcll)
+ unsigned long long carryout;
+ *sum = __builtin_addcll(base, addend, 0, &carryout);
+ return (unsigned)carryout;
+#else
+ *sum = base + addend;
+ return *sum < addend;
+#endif /* __has_builtin(__builtin_addcll) */
+}
+#endif /* add64carry_fist */
+
+#ifndef add64carry_next
+static __maybe_unused __always_inline unsigned
+add64carry_next(unsigned carry, uint64_t base, uint64_t addend, uint64_t *sum) {
+#if __has_builtin(__builtin_addcll)
+ unsigned long long carryout;
+ *sum = __builtin_addcll(base, addend, carry, &carryout);
+ return (unsigned)carryout;
+#else
+ *sum = base + addend + carry;
+ return *sum < addend || (carry && *sum == addend);
+#endif /* __has_builtin(__builtin_addcll) */
+}
+#endif /* add64carry_next */
+
+#ifndef add64carry_last
+static __maybe_unused __always_inline void
+add64carry_last(unsigned carry, uint64_t base, uint64_t addend, uint64_t *sum) {
+#if __has_builtin(__builtin_addcll)
+ unsigned long long carryout;
+ *sum = __builtin_addcll(base, addend, carry, &carryout);
+ (void)carryout;
+#else
+ *sum = base + addend + carry;
+#endif /* __has_builtin(__builtin_addcll) */
+}
+#endif /* add64carry_last */
+
+#ifndef mul_64x64_128
+static __maybe_unused __always_inline uint64_t mul_64x64_128(uint64_t a,
+ uint64_t b,
+ uint64_t *h) {
+#if (defined(__SIZEOF_INT128__) || \
+ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)) && \
+ (!defined(__LCC__) || __LCC__ != 124)
+ __uint128_t r = (__uint128_t)a * (__uint128_t)b;
+ /* modern GCC could nicely optimize this */
+ *h = (uint64_t)(r >> 64);
+ return (uint64_t)r;
+#elif defined(mul_64x64_high)
+ *h = mul_64x64_high(a, b);
+ return a * b;
+#else
+ /* performs 64x64 to 128 bit multiplication */
+ const uint64_t ll = mul_32x32_64((uint32_t)a, (uint32_t)b);
+ const uint64_t lh = mul_32x32_64(a >> 32, (uint32_t)b);
+ const uint64_t hl = mul_32x32_64((uint32_t)a, b >> 32);
+ const uint64_t hh = mul_32x32_64(a >> 32, b >> 32);
+
+ /* Few simplification are possible here for 32-bit architectures,
+ * but thus we would lost compatibility with the original 64-bit
+ * version. Think is very bad idea, because then 32-bit t1ha will
+ * still (relatively) very slowly and well yet not compatible. */
+ uint64_t l;
+ add64carry_last(add64carry_first(ll, lh << 32, &l), hh, lh >> 32, h);
+ add64carry_last(add64carry_first(l, hl << 32, &l), *h, hl >> 32, h);
+ return l;
+#endif
+}
+#endif /* mul_64x64_128() */
+
+#ifndef mul_64x64_high
+static __maybe_unused __always_inline uint64_t mul_64x64_high(uint64_t a,
+ uint64_t b) {
+ uint64_t h;
+ mul_64x64_128(a, b, &h);
+ return h;
+}
+#endif /* mul_64x64_high */
+
+/***************************************************************************/
+
+/* 'magic' primes */
+static const uint64_t prime_0 = UINT64_C(0xEC99BF0D8372CAAB);
+static const uint64_t prime_1 = UINT64_C(0x82434FE90EDCEF39);
+static const uint64_t prime_2 = UINT64_C(0xD4F06DB99D67BE4B);
+static const uint64_t prime_3 = UINT64_C(0xBD9CACC22C6E9571);
+static const uint64_t prime_4 = UINT64_C(0x9C06FAF4D023E3AB);
+static const uint64_t prime_5 = UINT64_C(0xC060724A8424F345);
+static const uint64_t prime_6 = UINT64_C(0xCB5AF53AE3AAAC31);
+
+/* xor high and low parts of full 128-bit product */
+static __maybe_unused __always_inline uint64_t mux64(uint64_t v,
+ uint64_t prime) {
+ uint64_t l, h;
+ l = mul_64x64_128(v, prime, &h);
+ return l ^ h;
+}
+
+static __maybe_unused __always_inline uint64_t final64(uint64_t a, uint64_t b) {
+ uint64_t x = (a + rot64(b, 41)) * prime_0;
+ uint64_t y = (rot64(a, 23) + b) * prime_6;
+ return mux64(x ^ y, prime_5);
+}
+
+static __maybe_unused __always_inline void mixup64(uint64_t *__restrict a,
+ uint64_t *__restrict b,
+ uint64_t v, uint64_t prime) {
+ uint64_t h;
+ *a ^= mul_64x64_128(*b + v, prime, &h);
+ *b += h;
+}
+
+/***************************************************************************/
+
+typedef union t1ha_uint128 {
+#if defined(__SIZEOF_INT128__) || \
+ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+ __uint128_t v;
+#endif
+ struct {
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ uint64_t l, h;
+#else
+ uint64_t h, l;
+#endif
+ };
+} t1ha_uint128_t;
+
+static __maybe_unused __always_inline t1ha_uint128_t
+not128(const t1ha_uint128_t v) {
+ t1ha_uint128_t r;
+#if defined(__SIZEOF_INT128__) || \
+ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+ r.v = ~v.v;
+#else
+ r.l = ~v.l;
+ r.h = ~v.h;
+#endif
+ return r;
+}
+
+static __maybe_unused __always_inline t1ha_uint128_t
+left128(const t1ha_uint128_t v, unsigned s) {
+ t1ha_uint128_t r;
+ assert(s < 128);
+#if defined(__SIZEOF_INT128__) || \
+ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+ r.v = v.v << s;
+#else
+ r.l = (s < 64) ? v.l << s : 0;
+ r.h = (s < 64) ? (v.h << s) | (s ? v.l >> (64 - s) : 0) : v.l << (s - 64);
+#endif
+ return r;
+}
+
+static __maybe_unused __always_inline t1ha_uint128_t
+right128(const t1ha_uint128_t v, unsigned s) {
+ t1ha_uint128_t r;
+ assert(s < 128);
+#if defined(__SIZEOF_INT128__) || \
+ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+ r.v = v.v >> s;
+#else
+ r.l = (s < 64) ? (s ? v.h << (64 - s) : 0) | (v.l >> s) : v.h >> (s - 64);
+ r.h = (s < 64) ? v.h >> s : 0;
+#endif
+ return r;
+}
+
+static __maybe_unused __always_inline t1ha_uint128_t or128(t1ha_uint128_t x,
+ t1ha_uint128_t y) {
+ t1ha_uint128_t r;
+#if defined(__SIZEOF_INT128__) || \
+ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+ r.v = x.v | y.v;
+#else
+ r.l = x.l | y.l;
+ r.h = x.h | y.h;
+#endif
+ return r;
+}
+
+static __maybe_unused __always_inline t1ha_uint128_t xor128(t1ha_uint128_t x,
+ t1ha_uint128_t y) {
+ t1ha_uint128_t r;
+#if defined(__SIZEOF_INT128__) || \
+ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+ r.v = x.v ^ y.v;
+#else
+ r.l = x.l ^ y.l;
+ r.h = x.h ^ y.h;
+#endif
+ return r;
+}
+
+static __maybe_unused __always_inline t1ha_uint128_t rot128(t1ha_uint128_t v,
+ unsigned s) {
+ s &= 127;
+#if defined(__SIZEOF_INT128__) || \
+ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+ v.v = (v.v << (128 - s)) | (v.v >> s);
+ return v;
+#else
+ return s ? or128(left128(v, 128 - s), right128(v, s)) : v;
+#endif
+}
+
+static __maybe_unused __always_inline t1ha_uint128_t add128(t1ha_uint128_t x,
+ t1ha_uint128_t y) {
+ t1ha_uint128_t r;
+#if defined(__SIZEOF_INT128__) || \
+ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+ r.v = x.v + y.v;
+#else
+ add64carry_last(add64carry_first(x.l, y.l, &r.l), x.h, y.h, &r.h);
+#endif
+ return r;
+}
+
+static __maybe_unused __always_inline t1ha_uint128_t mul128(t1ha_uint128_t x,
+ t1ha_uint128_t y) {
+ t1ha_uint128_t r;
+#if defined(__SIZEOF_INT128__) || \
+ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+ r.v = x.v * y.v;
+#else
+ r.l = mul_64x64_128(x.l, y.l, &r.h);
+ r.h += x.l * y.h + y.l * x.h;
+#endif
+ return r;
+}
+
+/***************************************************************************/
+
+#if T1HA0_AESNI_AVAILABLE && defined(__ia32__)
+uint64_t t1ha_ia32cpu_features(void);
+
+static __maybe_unused __always_inline bool
+t1ha_ia32_AESNI_avail(uint64_t ia32cpu_features) {
+ /* check for AES-NI */
+ return (ia32cpu_features & UINT32_C(0x02000000)) != 0;
+}
+
+static __maybe_unused __always_inline bool
+t1ha_ia32_AVX_avail(uint64_t ia32cpu_features) {
+ /* check for any AVX */
+ return (ia32cpu_features & UINT32_C(0x1A000000)) == UINT32_C(0x1A000000);
+}
+
+static __maybe_unused __always_inline bool
+t1ha_ia32_AVX2_avail(uint64_t ia32cpu_features) {
+ /* check for 'Advanced Vector Extensions 2' */
+ return ((ia32cpu_features >> 32) & 32) != 0;
+}
+
+#endif /* T1HA0_AESNI_AVAILABLE && __ia32__ */
diff --git a/include/t1ha_selfcheck.h b/include/t1ha_selfcheck.h
new file mode 100644
index 00000000..ff7c589c
--- /dev/null
+++ b/include/t1ha_selfcheck.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2016-2020 Positive Technologies, https://www.ptsecurity.com,
+ * Fast Positive Hash.
+ *
+ * Portions Copyright (c) 2010-2020 Leonid Yuriev <leo@yuriev.ru>,
+ * The 1Hippeus project (t1h).
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgement in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/*
+ * t1ha = { Fast Positive Hash, aka "Позитивный Хэш" }
+ * by [Positive Technologies](https://www.ptsecurity.ru)
+ *
+ * Briefly, it is a 64-bit Hash Function:
+ * 1. Created for 64-bit little-endian platforms, in predominantly for x86_64,
+ * but portable and without penalties it can run on any 64-bit CPU.
+ * 2. In most cases up to 15% faster than City64, xxHash, mum-hash, metro-hash
+ * and all others portable hash-functions (which do not use specific
+ * hardware tricks).
+ * 3. Not suitable for cryptography.
+ *
+ * The Future will (be) Positive. Всё будет хорошо.
+ *
+ * ACKNOWLEDGEMENT:
+ * The t1ha was originally developed by Leonid Yuriev (Леонид Юрьев)
+ * for The 1Hippeus project - zerocopy messaging in the spirit of Sparta!
+ */
+
+#pragma once
+#if defined(_MSC_VER) && _MSC_VER > 1800
+#pragma warning(disable : 4464) /* relative include path contains '..' */
+#endif /* MSVC */
+#include "t1ha.h"
+
+/***************************************************************************/
+/* Self-checking */
+
+extern const uint8_t t1ha_test_pattern[64];
+int t1ha_selfcheck(uint64_t (*hash)(const void *, size_t, uint64_t),
+ const uint64_t *reference_values);
+
+#ifndef T1HA2_DISABLED
+extern const uint64_t t1ha_refval_2atonce[81];
+extern const uint64_t t1ha_refval_2atonce128[81];
+extern const uint64_t t1ha_refval_2stream[81];
+extern const uint64_t t1ha_refval_2stream128[81];
+#endif /* T1HA2_DISABLED */
+
+#ifndef T1HA1_DISABLED
+extern const uint64_t t1ha_refval_64le[81];
+extern const uint64_t t1ha_refval_64be[81];
+#endif /* T1HA1_DISABLED */
+
+#ifndef T1HA0_DISABLED
+extern const uint64_t t1ha_refval_32le[81];
+extern const uint64_t t1ha_refval_32be[81];
+#if T1HA0_AESNI_AVAILABLE
+extern const uint64_t t1ha_refval_ia32aes_a[81];
+extern const uint64_t t1ha_refval_ia32aes_b[81];
+#endif /* T1HA0_AESNI_AVAILABLE */
+#endif /* T1HA0_DISABLED */
diff --git a/include/xxhash.h b/include/xxhash.h
index 9a880470..d11f0f63 100644
--- a/include/xxhash.h
+++ b/include/xxhash.h
@@ -1,7 +1,7 @@
/*
* xxHash - Extremely Fast Hash algorithm
* Header File
- * Copyright (C) 2012-2024 Yann Collet
+ * Copyright (C) 2012-2023 Yann Collet
*
* BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
*
@@ -32,328 +32,553 @@
* - xxHash homepage: https://www.xxhash.com
* - xxHash source repository: https://github.com/Cyan4973/xxHash
*/
+
/*!
* @mainpage xxHash
*
+ * xxHash is an extremely fast non-cryptographic hash algorithm, working at RAM speed
+ * limits.
+ *
+ * It is proposed in four flavors, in three families:
+ * 1. @ref XXH32_family
+ * - Classic 32-bit hash function. Simple, compact, and runs on almost all
+ * 32-bit and 64-bit systems.
+ * 2. @ref XXH64_family
+ * - Classic 64-bit adaptation of XXH32. Just as simple, and runs well on most
+ * 64-bit systems (but _not_ 32-bit systems).
+ * 3. @ref XXH3_family
+ * - Modern 64-bit and 128-bit hash function family which features improved
+ * strength and performance across the board, especially on smaller data.
+ * It benefits greatly from SIMD and 64-bit without requiring it.
+ *
+ * Benchmarks
+ * ---
+ * The reference system uses an Intel i7-9700K CPU, and runs Ubuntu x64 20.04.
+ * The open source benchmark program is compiled with clang v10.0 using -O3 flag.
+ *
+ * | Hash Name | ISA ext | Width | Large Data Speed | Small Data Velocity |
+ * | -------------------- | ------- | ----: | ---------------: | ------------------: |
+ * | XXH3_64bits() | @b AVX2 | 64 | 59.4 GB/s | 133.1 |
+ * | MeowHash | AES-NI | 128 | 58.2 GB/s | 52.5 |
+ * | XXH3_128bits() | @b AVX2 | 128 | 57.9 GB/s | 118.1 |
+ * | CLHash | PCLMUL | 64 | 37.1 GB/s | 58.1 |
+ * | XXH3_64bits() | @b SSE2 | 64 | 31.5 GB/s | 133.1 |
+ * | XXH3_128bits() | @b SSE2 | 128 | 29.6 GB/s | 118.1 |
+ * | RAM sequential read | | N/A | 28.0 GB/s | N/A |
+ * | ahash | AES-NI | 64 | 22.5 GB/s | 107.2 |
+ * | City64 | | 64 | 22.0 GB/s | 76.6 |
+ * | T1ha2 | | 64 | 22.0 GB/s | 99.0 |
+ * | City128 | | 128 | 21.7 GB/s | 57.7 |
+ * | FarmHash | AES-NI | 64 | 21.3 GB/s | 71.9 |
+ * | XXH64() | | 64 | 19.4 GB/s | 71.0 |
+ * | SpookyHash | | 64 | 19.3 GB/s | 53.2 |
+ * | Mum | | 64 | 18.0 GB/s | 67.0 |
+ * | CRC32C | SSE4.2 | 32 | 13.0 GB/s | 57.9 |
+ * | XXH32() | | 32 | 9.7 GB/s | 71.9 |
+ * | City32 | | 32 | 9.1 GB/s | 66.0 |
+ * | Blake3* | @b AVX2 | 256 | 4.4 GB/s | 8.1 |
+ * | Murmur3 | | 32 | 3.9 GB/s | 56.1 |
+ * | SipHash* | | 64 | 3.0 GB/s | 43.2 |
+ * | Blake3* | @b SSE2 | 256 | 2.4 GB/s | 8.1 |
+ * | HighwayHash | | 64 | 1.4 GB/s | 6.0 |
+ * | FNV64 | | 64 | 1.2 GB/s | 62.7 |
+ * | Blake2* | | 256 | 1.1 GB/s | 5.1 |
+ * | SHA1* | | 160 | 0.8 GB/s | 5.6 |
+ * | MD5* | | 128 | 0.6 GB/s | 7.8 |
+ * @note
+ * - Hashes which require a specific ISA extension are noted. SSE2 is also noted,
+ * even though it is mandatory on x64.
+ * - Hashes with an asterisk are cryptographic. Note that MD5 is non-cryptographic
+ * by modern standards.
+ * - Small data velocity is a rough average of algorithm's efficiency for small
+ * data. For more accurate information, see the wiki.
+ * - More benchmarks and strength tests are found on the wiki:
+ * https://github.com/Cyan4973/xxHash/wiki
+ *
+ * Usage
+ * ------
+ * All xxHash variants use a similar API. Changing the algorithm is a trivial
+ * substitution.
+ *
+ * @pre
+ * For functions which take an input and length parameter, the following
+ * requirements are assumed:
+ * - The range from [`input`, `input + length`) is valid, readable memory.
+ * - The only exception is if the `length` is `0`, `input` may be `NULL`.
+ * - For C++, the objects must have the *TriviallyCopyable* property, as the
+ * functions access bytes directly as if it was an array of `unsigned char`.
+ *
+ * @anchor single_shot_example
+ * **Single Shot**
+ *
+ * These functions are stateless functions which hash a contiguous block of memory,
+ * immediately returning the result. They are the easiest and usually the fastest
+ * option.
+ *
+ * XXH32(), XXH64(), XXH3_64bits(), XXH3_128bits()
+ *
+ * @code{.c}
+ * #include <string.h>
+ * #include "xxhash.h"
+ *
+ * // Example for a function which hashes a null terminated string with XXH32().
+ * XXH32_hash_t hash_string(const char* string, XXH32_hash_t seed)
+ * {
+ * // NULL pointers are only valid if the length is zero
+ * size_t length = (string == NULL) ? 0 : strlen(string);
+ * return XXH32(string, length, seed);
+ * }
+ * @endcode
+ *
+ *
+ * @anchor streaming_example
+ * **Streaming**
+ *
+ * These groups of functions allow incremental hashing of unknown size, even
+ * more than what would fit in a size_t.
+ *
+ * XXH32_reset(), XXH64_reset(), XXH3_64bits_reset(), XXH3_128bits_reset()
+ *
+ * @code{.c}
+ * #include <stdio.h>
+ * #include <assert.h>
+ * #include "xxhash.h"
+ * // Example for a function which hashes a FILE incrementally with XXH3_64bits().
+ * XXH64_hash_t hashFile(FILE* f)
+ * {
+ * // Allocate a state struct. Do not just use malloc() or new.
+ * XXH3_state_t* state = XXH3_createState();
+ * assert(state != NULL && "Out of memory!");
+ * // Reset the state to start a new hashing session.
+ * XXH3_64bits_reset(state);
+ * char buffer[4096];
+ * size_t count;
+ * // Read the file in chunks
+ * while ((count = fread(buffer, 1, sizeof(buffer), f)) != 0) {
+ * // Run update() as many times as necessary to process the data
+ * XXH3_64bits_update(state, buffer, count);
+ * }
+ * // Retrieve the finalized hash. This will not change the state.
+ * XXH64_hash_t result = XXH3_64bits_digest(state);
+ * // Free the state. Do not use free().
+ * XXH3_freeState(state);
+ * return result;
+ * }
+ * @endcode
+ *
+ * Streaming functions generate the xxHash value from an incremental input.
+ * This method is slower than single-call functions, due to state management.
+ * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
+ *
+ * An XXH state must first be allocated using `XXH*_createState()`.
+ *
+ * Start a new hash by initializing the state with a seed using `XXH*_reset()`.
+ *
+ * Then, feed the hash state by calling `XXH*_update()` as many times as necessary.
+ *
+ * The function returns an error code, with 0 meaning OK, and any other value
+ * meaning there is an error.
+ *
+ * Finally, a hash value can be produced anytime, by using `XXH*_digest()`.
+ * This function returns the nn-bits hash as an int or long long.
+ *
+ * It's still possible to continue inserting input into the hash state after a
+ * digest, and generate new hash values later on by invoking `XXH*_digest()`.
+ *
+ * When done, release the state using `XXH*_freeState()`.
+ *
+ *
+ * @anchor canonical_representation_example
+ * **Canonical Representation**
+ *
+ * The default return values from XXH functions are unsigned 32, 64 and 128 bit
+ * integers.
+ * This the simplest and fastest format for further post-processing.
+ *
+ * However, this leaves open the question of what is the order on the byte level,
+ * since little and big endian conventions will store the same number differently.
+ *
+ * The canonical representation settles this issue by mandating big-endian
+ * convention, the same convention as human-readable numbers (large digits first).
+ *
+ * When writing hash values to storage, sending them over a network, or printing
+ * them, it's highly recommended to use the canonical representation to ensure
+ * portability across a wider range of systems, present and future.
+ *
+ * The following functions allow transformation of hash values to and from
+ * canonical format.
+ *
+ * XXH32_canonicalFromHash(), XXH32_hashFromCanonical(),
+ * XXH64_canonicalFromHash(), XXH64_hashFromCanonical(),
+ * XXH128_canonicalFromHash(), XXH128_hashFromCanonical(),
+ *
+ * @code{.c}
+ * #include <stdio.h>
+ * #include "xxhash.h"
+ *
+ * // Example for a function which prints XXH32_hash_t in human readable format
+ * void printXxh32(XXH32_hash_t hash)
+ * {
+ * XXH32_canonical_t cano;
+ * XXH32_canonicalFromHash(&cano, hash);
+ * size_t i;
+ * for(i = 0; i < sizeof(cano.digest); ++i) {
+ * printf("%02x", cano.digest[i]);
+ * }
+ * printf("\n");
+ * }
+ *
+ * // Example for a function which converts XXH32_canonical_t to XXH32_hash_t
+ * XXH32_hash_t convertCanonicalToXxh32(XXH32_canonical_t cano)
+ * {
+ * XXH32_hash_t hash = XXH32_hashFromCanonical(&cano);
+ * return hash;
+ * }
+ * @endcode
+ *
+ *
* @file xxhash.h
* xxHash prototypes and implementation
*/
-/* TODO: update */
-/* Notice extracted from xxHash homepage:
-
-xxHash is an extremely fast hash algorithm, running at RAM speed limits.
-It also successfully passes all tests from the SMHasher suite.
-
-Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo
-@3GHz)
-
-Name Speed Q.Score Author
-xxHash 5.4 GB/s 10
-CrapWow 3.2 GB/s 2 Andrew
-MurmurHash 3a 2.7 GB/s 10 Austin Appleby
-SpookyHash 2.0 GB/s 10 Bob Jenkins
-SBox 1.4 GB/s 9 Bret Mulvey
-Lookup3 1.2 GB/s 9 Bob Jenkins
-SuperFastHash 1.2 GB/s 1 Paul Hsieh
-CityHash64 1.05 GB/s 10 Pike & Alakuijala
-FNV 0.55 GB/s 5 Fowler, Noll, Vo
-CRC32 0.43 GB/s 9
-MD5-32 0.33 GB/s 10 Ronald L. Rivest
-SHA1-32 0.28 GB/s 10
-
-Q.Score is a measure of quality of the hash function.
-It depends on successfully passing SMHasher test set.
-10 is a perfect score.
-
-Note: SMHasher's CRC32 implementation is not the fastest one.
-Other speed-oriented implementations can be faster,
-especially in combination with PCLMUL instruction:
-https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735
-
-A 64-bit version, named XXH64, is available since r35.
-It offers much better speed, but for 64-bit applications only.
-Name Speed on 64 bits Speed on 32 bits
-XXH64 13.8 GB/s 1.9 GB/s
-XXH32 6.8 GB/s 6.0 GB/s
-*/
-
-#if defined(__cplusplus)
-extern "C" {
+#if defined (__cplusplus)
+extern "C" {
#endif
/* ****************************
* INLINE mode
******************************/
/*!
- * XXH_INLINE_ALL (and XXH_PRIVATE_API)
+ * @defgroup public Public API
+ * Contains details on the public xxHash functions.
+ * @{
+ */
+#ifdef XXH_DOXYGEN
+/*!
+ * @brief Gives access to internal state declaration, required for static allocation.
+ *
+ * Incompatible with dynamic linking, due to risks of ABI changes.
+ *
+ * Usage:
+ * @code{.c}
+ * #define XXH_STATIC_LINKING_ONLY
+ * #include "xxhash.h"
+ * @endcode
+ */
+# define XXH_STATIC_LINKING_ONLY
+/* Do not undef XXH_STATIC_LINKING_ONLY for Doxygen */
+
+/*!
+ * @brief Gives access to internal definitions.
+ *
+ * Usage:
+ * @code{.c}
+ * #define XXH_STATIC_LINKING_ONLY
+ * #define XXH_IMPLEMENTATION
+ * #include "xxhash.h"
+ * @endcode
+ */
+# define XXH_IMPLEMENTATION
+/* Do not undef XXH_IMPLEMENTATION for Doxygen */
+
+/*!
+ * @brief Exposes the implementation and marks all functions as `inline`.
+ *
* Use these build macros to inline xxhash into the target unit.
* Inlining improves performance on small inputs, especially when the length is
* expressed as a compile-time constant:
*
- * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html
+ * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html
*
* It also keeps xxHash symbols private to the unit, so they are not exported.
*
* Usage:
+ * @code{.c}
* #define XXH_INLINE_ALL
* #include "xxhash.h"
- *
+ * @endcode
* Do not compile and link xxhash.o as a separate object, as it is not useful.
*/
-#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) && \
- !defined(XXH_INLINE_ALL_31684351384)
-/* this section should be traversed only once */
- #define XXH_INLINE_ALL_31684351384
-/* give access to the advanced API, required to compile implementations */
- #undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */
- #define XXH_STATIC_LINKING_ONLY
-/* make all functions private */
- #undef XXH_PUBLIC_API
- #if defined(__GNUC__)
- #define XXH_PUBLIC_API static __inline __attribute__((unused))
- #elif defined(__cplusplus) || \
- (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
- #define XXH_PUBLIC_API static inline
- #elif defined(_MSC_VER)
- #define XXH_PUBLIC_API static __inline
- #else
- /* note: this version may generate warnings for unused static functions */
- #define XXH_PUBLIC_API static
- #endif
+# define XXH_INLINE_ALL
+# undef XXH_INLINE_ALL
+/*!
+ * @brief Exposes the implementation without marking functions as inline.
+ */
+# define XXH_PRIVATE_API
+# undef XXH_PRIVATE_API
+/*!
+ * @brief Emulate a namespace by transparently prefixing all symbols.
+ *
+ * If you want to include _and expose_ xxHash functions from within your own
+ * library, but also want to avoid symbol collisions with other libraries which
+ * may also include xxHash, you can use @ref XXH_NAMESPACE to automatically prefix
+ * any public symbol from xxhash library with the value of @ref XXH_NAMESPACE
+ * (therefore, avoid empty or numeric values).
+ *
+ * Note that no change is required within the calling program as long as it
+ * includes `xxhash.h`: Regular symbol names will be automatically translated
+ * by this header.
+ */
+# define XXH_NAMESPACE /* YOUR NAME HERE */
+# undef XXH_NAMESPACE
+#endif
-/*
- * This part deals with the special case where a unit wants to inline xxHash,
- * but "xxhash.h" has previously been included without XXH_INLINE_ALL,
- * such as part of some previously included *.h header file.
- * Without further action, the new include would just be ignored,
- * and functions would effectively _not_ be inlined (silent failure).
- * The following macros solve this situation by prefixing all inlined names,
- * avoiding naming collision with previous inclusions.
- */
-/* Before that, we unconditionally #undef all symbols,
- * in case they were already defined with XXH_NAMESPACE.
- * They will then be redefined for XXH_INLINE_ALL
- */
- #undef XXH_versionNumber
-/* XXH32 */
- #undef XXH32
- #undef XXH32_createState
- #undef XXH32_freeState
- #undef XXH32_reset
- #undef XXH32_update
- #undef XXH32_digest
- #undef XXH32_copyState
- #undef XXH32_canonicalFromHash
- #undef XXH32_hashFromCanonical
-/* XXH64 */
- #undef XXH64
- #undef XXH64_createState
- #undef XXH64_freeState
- #undef XXH64_reset
- #undef XXH64_update
- #undef XXH64_digest
- #undef XXH64_copyState
- #undef XXH64_canonicalFromHash
- #undef XXH64_hashFromCanonical
-/* XXH3_64bits */
- #undef XXH3_64bits
- #undef XXH3_64bits_withSecret
- #undef XXH3_64bits_withSeed
- #undef XXH3_createState
- #undef XXH3_freeState
- #undef XXH3_copyState
- #undef XXH3_64bits_reset
- #undef XXH3_64bits_reset_withSeed
- #undef XXH3_64bits_reset_withSecret
- #undef XXH3_64bits_update
- #undef XXH3_64bits_digest
- #undef XXH3_generateSecret
-/* XXH3_128bits */
- #undef XXH128
- #undef XXH3_128bits
- #undef XXH3_128bits_withSeed
- #undef XXH3_128bits_withSecret
- #undef XXH3_128bits_reset
- #undef XXH3_128bits_reset_withSeed
- #undef XXH3_128bits_reset_withSecret
- #undef XXH3_128bits_update
- #undef XXH3_128bits_digest
- #undef XXH128_isEqual
- #undef XXH128_cmp
- #undef XXH128_canonicalFromHash
- #undef XXH128_hashFromCanonical
-/* Finally, free the namespace itself */
- #undef XXH_NAMESPACE
-
-/* employ the namespace for XXH_INLINE_ALL */
- #define XXH_NAMESPACE XXH_INLINE_
-/*
- * Some identifiers (enums, type names) are not symbols,
- * but they must nonetheless be renamed to avoid redeclaration.
- * Alternative solution: do not redeclare them.
- * However, this requires some #ifdefs, and has a more dispersed impact.
- * Meanwhile, renaming can be achieved in a single place.
- */
- #define XXH_IPREF(Id) XXH_NAMESPACE##Id
- #define XXH_OK XXH_IPREF(XXH_OK)
- #define XXH_ERROR XXH_IPREF(XXH_ERROR)
- #define XXH_errorcode XXH_IPREF(XXH_errorcode)
- #define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t)
- #define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t)
- #define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
- #define XXH32_state_s XXH_IPREF(XXH32_state_s)
- #define XXH32_state_t XXH_IPREF(XXH32_state_t)
- #define XXH64_state_s XXH_IPREF(XXH64_state_s)
- #define XXH64_state_t XXH_IPREF(XXH64_state_t)
- #define XXH3_state_s XXH_IPREF(XXH3_state_s)
- #define XXH3_state_t XXH_IPREF(XXH3_state_t)
- #define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
-/* Ensure the header is parsed again, even if it was previously included */
- #undef XXHASH_H_5627135585666179
- #undef XXHASH_H_STATIC_13879238742
-#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
+#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
+ && !defined(XXH_INLINE_ALL_31684351384)
+ /* this section should be traversed only once */
+# define XXH_INLINE_ALL_31684351384
+ /* give access to the advanced API, required to compile implementations */
+# undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */
+# define XXH_STATIC_LINKING_ONLY
+ /* make all functions private */
+# undef XXH_PUBLIC_API
+# if defined(__GNUC__)
+# define XXH_PUBLIC_API static __inline __attribute__((unused))
+# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# define XXH_PUBLIC_API static inline
+# elif defined(_MSC_VER)
+# define XXH_PUBLIC_API static __inline
+# else
+ /* note: this version may generate warnings for unused static functions */
+# define XXH_PUBLIC_API static
+# endif
+
+ /*
+ * This part deals with the special case where a unit wants to inline xxHash,
+ * but "xxhash.h" has previously been included without XXH_INLINE_ALL,
+ * such as part of some previously included *.h header file.
+ * Without further action, the new include would just be ignored,
+ * and functions would effectively _not_ be inlined (silent failure).
+ * The following macros solve this situation by prefixing all inlined names,
+ * avoiding naming collision with previous inclusions.
+ */
+ /* Before that, we unconditionally #undef all symbols,
+ * in case they were already defined with XXH_NAMESPACE.
+ * They will then be redefined for XXH_INLINE_ALL
+ */
+# undef XXH_versionNumber
+ /* XXH32 */
+# undef XXH32
+# undef XXH32_createState
+# undef XXH32_freeState
+# undef XXH32_reset
+# undef XXH32_update
+# undef XXH32_digest
+# undef XXH32_copyState
+# undef XXH32_canonicalFromHash
+# undef XXH32_hashFromCanonical
+ /* XXH64 */
+# undef XXH64
+# undef XXH64_createState
+# undef XXH64_freeState
+# undef XXH64_reset
+# undef XXH64_update
+# undef XXH64_digest
+# undef XXH64_copyState
+# undef XXH64_canonicalFromHash
+# undef XXH64_hashFromCanonical
+ /* XXH3_64bits */
+# undef XXH3_64bits
+# undef XXH3_64bits_withSecret
+# undef XXH3_64bits_withSeed
+# undef XXH3_64bits_withSecretandSeed
+# undef XXH3_createState
+# undef XXH3_freeState
+# undef XXH3_copyState
+# undef XXH3_64bits_reset
+# undef XXH3_64bits_reset_withSeed
+# undef XXH3_64bits_reset_withSecret
+# undef XXH3_64bits_update
+# undef XXH3_64bits_digest
+# undef XXH3_generateSecret
+ /* XXH3_128bits */
+# undef XXH128
+# undef XXH3_128bits
+# undef XXH3_128bits_withSeed
+# undef XXH3_128bits_withSecret
+# undef XXH3_128bits_reset
+# undef XXH3_128bits_reset_withSeed
+# undef XXH3_128bits_reset_withSecret
+# undef XXH3_128bits_reset_withSecretandSeed
+# undef XXH3_128bits_update
+# undef XXH3_128bits_digest
+# undef XXH128_isEqual
+# undef XXH128_cmp
+# undef XXH128_canonicalFromHash
+# undef XXH128_hashFromCanonical
+ /* Finally, free the namespace itself */
+# undef XXH_NAMESPACE
+
+ /* employ the namespace for XXH_INLINE_ALL */
+# define XXH_NAMESPACE XXH_INLINE_
+ /*
+ * Some identifiers (enums, type names) are not symbols,
+ * but they must nonetheless be renamed to avoid redeclaration.
+ * Alternative solution: do not redeclare them.
+ * However, this requires some #ifdefs, and has a more dispersed impact.
+ * Meanwhile, renaming can be achieved in a single place.
+ */
+# define XXH_IPREF(Id) XXH_NAMESPACE ## Id
+# define XXH_OK XXH_IPREF(XXH_OK)
+# define XXH_ERROR XXH_IPREF(XXH_ERROR)
+# define XXH_errorcode XXH_IPREF(XXH_errorcode)
+# define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t)
+# define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t)
+# define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
+# define XXH32_state_s XXH_IPREF(XXH32_state_s)
+# define XXH32_state_t XXH_IPREF(XXH32_state_t)
+# define XXH64_state_s XXH_IPREF(XXH64_state_s)
+# define XXH64_state_t XXH_IPREF(XXH64_state_t)
+# define XXH3_state_s XXH_IPREF(XXH3_state_s)
+# define XXH3_state_t XXH_IPREF(XXH3_state_t)
+# define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
+ /* Ensure the header is parsed again, even if it was previously included */
+# undef XXHASH_H_5627135585666179
+# undef XXHASH_H_STATIC_13879238742
+#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
/* ****************************************************************
* Stable API
*****************************************************************/
#ifndef XXHASH_H_5627135585666179
- #define XXHASH_H_5627135585666179 1
-
- /*!
- * @defgroup public Public API
- * Contains details on the public xxHash functions.
- * @{
-
- */
- /* specific declaration modes for Windows */
- #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
- #if defined(WIN32) && defined(_MSC_VER) && \
- (defined(XXH_IMPORT) || defined(XXH_EXPORT))
- #ifdef XXH_EXPORT
- #define XXH_PUBLIC_API __declspec(dllexport)
- #elif XXH_IMPORT
- #define XXH_PUBLIC_API __declspec(dllimport)
- #endif
- #else
- #define XXH_PUBLIC_API /* do nothing */
- #endif
- #endif
-
- #ifdef XXH_DOXYGEN
- /*!
- * @brief Emulate a namespace by transparently prefixing all symbols.
- *
- * If you want to include _and expose_ xxHash functions from within your own
- * library, but also want to avoid symbol collisions with other libraries
- * which may also include xxHash, you can use XXH_NAMESPACE to automatically
- * prefix any public symbol from xxhash library with the value of
- * XXH_NAMESPACE (therefore, avoid empty or numeric values).
- *
- * Note that no change is required within the calling program as long as it
- * includes `xxhash.h`: Regular symbol names will be automatically
- * translated by this header.
- */
- #define XXH_NAMESPACE /* YOUR NAME HERE */
- #undef XXH_NAMESPACE
- #endif
-
- #ifdef XXH_NAMESPACE
- #define XXH_CAT(A, B) A##B
- #define XXH_NAME2(A, B) XXH_CAT(A, B)
- #define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
- /* XXH32 */
- #define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
- #define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
- #define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
- #define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
- #define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
- #define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
- #define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
- #define XXH32_canonicalFromHash \
- XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
- #define XXH32_hashFromCanonical \
- XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
- /* XXH64 */
- #define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
- #define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
- #define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
- #define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
- #define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
- #define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
- #define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
- #define XXH64_canonicalFromHash \
- XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
- #define XXH64_hashFromCanonical \
- XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
- /* XXH3_64bits */
- #define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
- #define XXH3_64bits_withSecret \
- XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
- #define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
- #define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
- #define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
- #define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
- #define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
- #define XXH3_64bits_reset_withSeed \
- XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
- #define XXH3_64bits_reset_withSecret \
- XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
- #define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
- #define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
- #define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
- /* XXH3_128bits */
- #define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
- #define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
- #define XXH3_128bits_withSeed \
- XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
- #define XXH3_128bits_withSecret \
- XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
- #define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
- #define XXH3_128bits_reset_withSeed \
- XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
- #define XXH3_128bits_reset_withSecret \
- XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
- #define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
- #define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
- #define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
- #define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
- #define XXH128_canonicalFromHash \
- XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
- #define XXH128_hashFromCanonical \
- XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
- #endif
-
- /* *************************************
- * Version
- ***************************************/
- #define XXH_VERSION_MAJOR 0
- #define XXH_VERSION_MINOR 8
- #define XXH_VERSION_RELEASE 1
- #define XXH_VERSION_NUMBER \
- (XXH_VERSION_MAJOR * 100 * 100 + XXH_VERSION_MINOR * 100 + \
- XXH_VERSION_RELEASE)
+#define XXHASH_H_5627135585666179 1
+
+/*! @brief Marks a global symbol. */
+#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
+# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
+# ifdef XXH_EXPORT
+# define XXH_PUBLIC_API __declspec(dllexport)
+# elif XXH_IMPORT
+# define XXH_PUBLIC_API __declspec(dllimport)
+# endif
+# else
+# define XXH_PUBLIC_API /* do nothing */
+# endif
+#endif
+
+#ifdef XXH_NAMESPACE
+# define XXH_CAT(A,B) A##B
+# define XXH_NAME2(A,B) XXH_CAT(A,B)
+# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
+/* XXH32 */
+# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
+# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
+# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
+# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
+# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
+# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
+# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
+# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
+# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
+/* XXH64 */
+# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
+# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
+# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
+# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
+# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
+# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
+# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
+# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
+# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
+/* XXH3_64bits */
+# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
+# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
+# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
+# define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed)
+# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
+# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
+# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
+# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
+# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
+# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
+# define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed)
+# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
+# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
+# define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
+# define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed)
+/* XXH3_128bits */
+# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
+# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
+# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
+# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
+# define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed)
+# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
+# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
+# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
+# define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed)
+# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
+# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
+# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
+# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
+# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
+# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
+#endif
+
+
+/* *************************************
+* Compiler specifics
+***************************************/
+
+/* specific declaration modes for Windows */
+#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
+# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
+# ifdef XXH_EXPORT
+# define XXH_PUBLIC_API __declspec(dllexport)
+# elif XXH_IMPORT
+# define XXH_PUBLIC_API __declspec(dllimport)
+# endif
+# else
+# define XXH_PUBLIC_API /* do nothing */
+# endif
+#endif
+
+#if defined (__GNUC__)
+# define XXH_CONSTF __attribute__((const))
+# define XXH_PUREF __attribute__((pure))
+# define XXH_MALLOCF __attribute__((malloc))
+#else
+# define XXH_CONSTF /* disable */
+# define XXH_PUREF
+# define XXH_MALLOCF
+#endif
+
+/* *************************************
+* Version
+***************************************/
+#define XXH_VERSION_MAJOR 0
+#define XXH_VERSION_MINOR 8
+#define XXH_VERSION_RELEASE 2
+/*! @brief Version number, encoded as two digits each */
+#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
/*!
* @brief Obtains the xxHash version.
*
- * This is only useful when xxHash is compiled as a shared library, as it is
- * independent of the version defined in the header.
+ * This is mostly useful when xxHash is compiled as a shared library,
+ * since the returned value comes from the library, as opposed to header file.
*
- * @return `XXH_VERSION_NUMBER` as of when the libray was compiled.
+ * @return @ref XXH_VERSION_NUMBER of the invoked library.
+ */
+XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber (void);
+
+
+/* ****************************
+* Common basic types
+******************************/
+#include <stddef.h> /* size_t */
+/*!
+ * @brief Exit code for the streaming API.
*/
-XXH_PUBLIC_API unsigned XXH_versionNumber(void);
+typedef enum {
+ XXH_OK = 0, /*!< OK */
+ XXH_ERROR /*!< Error */
+} XXH_errorcode;
- /* ****************************
- * Definitions
- ******************************/
- #include <stddef.h> /* size_t */
-typedef enum { XXH_OK = 0, XXH_ERROR } XXH_errorcode;
- /*-**********************************************************************
- * 32-bit hash
- ************************************************************************/
- #if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */
+/*-**********************************************************************
+* 32-bit hash
+************************************************************************/
+#if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */
/*!
* @brief An unsigned 32-bit integer.
*
@@ -361,51 +586,44 @@ typedef enum { XXH_OK = 0, XXH_ERROR } XXH_errorcode;
*/
typedef uint32_t XXH32_hash_t;
- #elif !defined(__VMS) && \
- (defined(__cplusplus) || \
- (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */))
- #include <stdint.h>
-typedef uint32_t XXH32_hash_t;
-
- #else
- #include <limits.h>
- #if UINT_MAX == 0xFFFFFFFFUL
-typedef unsigned int XXH32_hash_t;
- #else
- #if ULONG_MAX == 0xFFFFFFFFUL
-typedef unsigned long XXH32_hash_t;
- #else
- #error "unsupported platform: need a 32-bit type"
- #endif
- #endif
- #endif
+#elif !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint32_t XXH32_hash_t;
+
+#else
+# include <limits.h>
+# if UINT_MAX == 0xFFFFFFFFUL
+ typedef unsigned int XXH32_hash_t;
+# elif ULONG_MAX == 0xFFFFFFFFUL
+ typedef unsigned long XXH32_hash_t;
+# else
+# error "unsupported platform: need a 32-bit type"
+# endif
+#endif
/*!
* @}
*
- * @defgroup xxh32_family XXH32 family
+ * @defgroup XXH32_family XXH32 family
* @ingroup public
* Contains functions used in the classic 32-bit xxHash algorithm.
*
* @note
- * XXH32 is considered rather weak by today's standards.
- * The @ref xxh3_family provides competitive speed for both 32-bit and 64-bit
- * systems, and offers true 64/128 bit hash results. It provides a superior
- * level of dispersion, and greatly reduces the risks of collisions.
+ * XXH32 is useful for older platforms, with no or poor 64-bit performance.
+ * Note that the @ref XXH3_family provides competitive speed for both 32-bit
+ * and 64-bit systems, and offers true 64/128 bit hash results.
*
- * @see @ref xxh64_family, @ref xxh3_family : Other xxHash families
- * @see @ref xxh32_impl for implementation details
+ * @see @ref XXH64_family, @ref XXH3_family : Other xxHash families
+ * @see @ref XXH32_impl for implementation details
* @{
-
*/
/*!
* @brief Calculates the 32-bit hash of @p input using xxHash32.
*
- * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s
- *
- * @param input The block of data to be hashed, at least @p length bytes in
- * size.
+ * @param input The block of data to be hashed, at least @p length bytes in size.
* @param length The length of @p input, in bytes.
* @param seed The 32-bit seed to alter the hash's output predictably.
*
@@ -414,94 +632,46 @@ typedef unsigned long XXH32_hash_t;
* readable, contiguous memory. However, if @p length is `0`, @p input may be
* `NULL`. In C++, this also must be *TriviallyCopyable*.
*
- * @return The calculated 32-bit hash value.
+ * @return The calculated 32-bit xxHash32 value.
*
- * @see
- * XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
- * Direct equivalents for the other variants of xxHash.
- * @see
- * XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version.
- */
-XXH_PUBLIC_API XXH32_hash_t XXH32(const void *input, size_t length,
- XXH32_hash_t seed);
-
-/*!
- * Streaming functions generate the xxHash value from an incremental input.
- * This method is slower than single-call functions, due to state management.
- * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
- *
- * An XXH state must first be allocated using `XXH*_createState()`.
- *
- * Start a new hash by initializing the state with a seed using `XXH*_reset()`.
- *
- * Then, feed the hash state by calling `XXH*_update()` as many times as
- * necessary.
- *
- * The function returns an error code, with 0 meaning OK, and any other value
- * meaning there is an error.
- *
- * Finally, a hash value can be produced anytime, by using `XXH*_digest()`.
- * This function returns the nn-bits hash as an int or long long.
- *
- * It's still possible to continue inserting input into the hash state after a
- * digest, and generate new hash values later on by invoking `XXH*_digest()`.
- *
- * When done, release the state using `XXH*_freeState()`.
- *
- * Example code for incrementally hashing a file:
- * @code{.c}
- * #include <stdio.h>
- * #include <xxhash.h>
- * #define BUFFER_SIZE 256
- *
- * // Note: XXH64 and XXH3 use the same interface.
- * XXH32_hash_t
- * hashFile(FILE* stream)
- * {
-
- * XXH32_state_t* state;
- * unsigned char buf[BUFFER_SIZE];
- * size_t amt;
- * XXH32_hash_t hash;
- *
- * state = XXH32_createState(); // Create a state
- * assert(state != NULL); // Error check here
- * XXH32_reset(state, 0xbaad5eed); // Reset state with our seed
- * while ((amt = fread(buf, 1, sizeof(buf), stream)) != 0) {
-
- * XXH32_update(state, buf, amt); // Hash the file in chunks
- * }
- * hash = XXH32_digest(state); // Finalize the hash
- * XXH32_freeState(state); // Clean up
- * return hash;
- * }
- * @endcode
+ * @see @ref single_shot_example "Single Shot Example" for an example.
*/
+XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
+#ifndef XXH_NO_STREAM
/*!
* @typedef struct XXH32_state_s XXH32_state_t
* @brief The opaque state struct for the XXH32 streaming API.
*
* @see XXH32_state_s for details.
+ * @see @ref streaming_example "Streaming Example"
*/
typedef struct XXH32_state_s XXH32_state_t;
/*!
* @brief Allocates an @ref XXH32_state_t.
*
- * Must be freed with XXH32_freeState().
- * @return An allocated XXH32_state_t on success, `NULL` on failure.
+ * @return An allocated pointer of @ref XXH32_state_t on success.
+ * @return `NULL` on failure.
+ *
+ * @note Must be freed with XXH32_freeState().
+ *
+ * @see @ref streaming_example "Streaming Example"
*/
-XXH_PUBLIC_API XXH32_state_t *XXH32_createState(void);
+XXH_PUBLIC_API XXH_MALLOCF XXH32_state_t* XXH32_createState(void);
/*!
* @brief Frees an @ref XXH32_state_t.
*
- * Must be allocated with XXH32_createState().
- * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref
- * XXH32_createState().
- * @return XXH_OK.
+ * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState().
+ *
+ * @return @ref XXH_OK.
+ *
+ * @note @p statePtr must be allocated with XXH32_createState().
+ *
+ * @see @ref streaming_example "Streaming Example"
+ *
*/
-XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t *statePtr);
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
/*!
* @brief Copies one @ref XXH32_state_t to another.
*
@@ -510,33 +680,31 @@ XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t *statePtr);
* @pre
* @p dst_state and @p src_state must not be `NULL` and must not overlap.
*/
-XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t *dst_state,
- const XXH32_state_t *src_state);
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
/*!
* @brief Resets an @ref XXH32_state_t to begin a new hash.
*
- * This function resets and seeds a state. Call it before @ref XXH32_update().
- *
* @param statePtr The state struct to reset.
* @param seed The 32-bit seed to alter the hash result predictably.
*
* @pre
* @p statePtr must not be `NULL`.
*
- * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ * @return @ref XXH_OK on success.
+ * @return @ref XXH_ERROR on failure.
+ *
+ * @note This function resets and seeds a state. Call it before @ref XXH32_update().
+ *
+ * @see @ref streaming_example "Streaming Example"
*/
-XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t *statePtr,
- XXH32_hash_t seed);
+XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed);
/*!
* @brief Consumes a block of @p input to an @ref XXH32_state_t.
*
- * Call this to incrementally consume blocks of data.
- *
* @param statePtr The state struct to update.
- * @param input The block of data to be hashed, at least @p length bytes in
- * size.
+ * @param input The block of data to be hashed, at least @p length bytes in size.
* @param length The length of @p input, in bytes.
*
* @pre
@@ -546,70 +714,55 @@ XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t *statePtr,
* readable, contiguous memory. However, if @p length is `0`, @p input may be
* `NULL`. In C++, this also must be *TriviallyCopyable*.
*
- * @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
+ * @return @ref XXH_OK on success.
+ * @return @ref XXH_ERROR on failure.
+ *
+ * @note Call this to incrementally consume blocks of data.
+ *
+ * @see @ref streaming_example "Streaming Example"
*/
-XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t *statePtr,
- const void *input, size_t length);
+XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
/*!
* @brief Returns the calculated hash value from an @ref XXH32_state_t.
*
- * @note
- * Calling XXH32_digest() will not affect @p statePtr, so you can update,
- * digest, and update again.
- *
* @param statePtr The state struct to calculate the hash from.
*
* @pre
* @p statePtr must not be `NULL`.
*
- * @return The calculated xxHash32 value from that state.
- */
-XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t *statePtr);
-
-/******* Canonical representation *******/
-
-/*
- * The default return values from XXH functions are unsigned 32 and 64 bit
- * integers.
- * This the simplest and fastest format for further post-processing.
- *
- * However, this leaves open the question of what is the order on the byte
- * level, since little and big endian conventions will store the same number
- * differently.
- *
- * The canonical representation settles this issue by mandating big-endian
- * convention, the same convention as human-readable numbers (large digits
- * first).
+ * @return The calculated 32-bit xxHash32 value from that state.
*
- * When writing hash values to storage, sending them over a network, or printing
- * them, it's highly recommended to use the canonical representation to ensure
- * portability across a wider range of systems, present and future.
+ * @note
+ * Calling XXH32_digest() will not affect @p statePtr, so you can update,
+ * digest, and update again.
*
- * The following functions allow transformation of hash values to and from
- * canonical format.
+ * @see @ref streaming_example "Streaming Example"
*/
+XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
+#endif /* !XXH_NO_STREAM */
+
+/******* Canonical representation *******/
/*!
* @brief Canonical (big endian) representation of @ref XXH32_hash_t.
*/
typedef struct {
-
- unsigned char digest[4]; /*!< Hash bytes, big endian */
-
+ unsigned char digest[4]; /*!< Hash bytes, big endian */
} XXH32_canonical_t;
/*!
* @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t.
*
- * @param dst The @ref XXH32_canonical_t pointer to be stored to.
+ * @param dst The @ref XXH32_canonical_t pointer to be stored to.
* @param hash The @ref XXH32_hash_t to be converted.
*
* @pre
* @p dst must not be `NULL`.
+ *
+ * @see @ref canonical_representation_example "Canonical Representation Example"
*/
-XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t *dst,
- XXH32_hash_t hash);
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
/*!
* @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t.
@@ -620,103 +773,127 @@ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t *dst,
* @p src must not be `NULL`.
*
* @return The converted hash.
+ *
+ * @see @ref canonical_representation_example "Canonical Representation Example"
+ */
+XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
+
+
+/*! @cond Doxygen ignores this part */
+#ifdef __has_attribute
+# define XXH_HAS_ATTRIBUTE(x) __has_attribute(x)
+#else
+# define XXH_HAS_ATTRIBUTE(x) 0
+#endif
+/*! @endcond */
+
+/*! @cond Doxygen ignores this part */
+/*
+ * C23 __STDC_VERSION__ number hasn't been specified yet. For now
+ * leave as `201711L` (C17 + 1).
+ * TODO: Update to correct value when its been specified.
+ */
+#define XXH_C23_VN 201711L
+/*! @endcond */
+
+/*! @cond Doxygen ignores this part */
+/* C-language Attributes are added in C23. */
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) && defined(__has_c_attribute)
+# define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
+#else
+# define XXH_HAS_C_ATTRIBUTE(x) 0
+#endif
+/*! @endcond */
+
+/*! @cond Doxygen ignores this part */
+#if defined(__cplusplus) && defined(__has_cpp_attribute)
+# define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
+#else
+# define XXH_HAS_CPP_ATTRIBUTE(x) 0
+#endif
+/*! @endcond */
+
+/*! @cond Doxygen ignores this part */
+/*
+ * Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute
+ * introduced in CPP17 and C23.
+ * CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough
+ * C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough
+ */
+#if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough)
+# define XXH_FALLTHROUGH [[fallthrough]]
+#elif XXH_HAS_ATTRIBUTE(__fallthrough__)
+# define XXH_FALLTHROUGH __attribute__ ((__fallthrough__))
+#else
+# define XXH_FALLTHROUGH /* fallthrough */
+#endif
+/*! @endcond */
+
+/*! @cond Doxygen ignores this part */
+/*
+ * Define XXH_NOESCAPE for annotated pointers in public API.
+ * https://clang.llvm.org/docs/AttributeReference.html#noescape
+ * As of writing this, only supported by clang.
*/
-XXH_PUBLIC_API XXH32_hash_t
-XXH32_hashFromCanonical(const XXH32_canonical_t *src);
-
- #ifdef __has_attribute
- #define XXH_HAS_ATTRIBUTE(x) __has_attribute(x)
- #else
- #define XXH_HAS_ATTRIBUTE(x) 0
- #endif
-
- /* C-language Attributes are added in C23. */
- #if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && \
- defined(__has_c_attribute)
- #define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
- #else
- #define XXH_HAS_C_ATTRIBUTE(x) 0
- #endif
-
- #if defined(__cplusplus) && defined(__has_cpp_attribute)
- #define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
- #else
- #define XXH_HAS_CPP_ATTRIBUTE(x) 0
- #endif
-
- /*
- Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough'
- attribute introduced in CPP17 and C23. CPP17 :
- https://en.cppreference.com/w/cpp/language/attributes/fallthrough C23 :
- https://en.cppreference.com/w/c/language/attributes/fallthrough
- */
- #if XXH_HAS_C_ATTRIBUTE(x)
- #define XXH_FALLTHROUGH [[fallthrough]]
- #elif XXH_HAS_CPP_ATTRIBUTE(x)
- #define XXH_FALLTHROUGH [[fallthrough]]
- #elif XXH_HAS_ATTRIBUTE(__fallthrough__)
- #define XXH_FALLTHROUGH __attribute__((fallthrough))
- #else
- #define XXH_FALLTHROUGH
- #endif
+#if XXH_HAS_ATTRIBUTE(noescape)
+# define XXH_NOESCAPE __attribute__((noescape))
+#else
+# define XXH_NOESCAPE
+#endif
+/*! @endcond */
+
/*!
* @}
* @ingroup public
* @{
-
*/
- #ifndef XXH_NO_LONG_LONG
- /*-**********************************************************************
- * 64-bit hash
- ************************************************************************/
- #if defined(XXH_DOXYGEN) /* don't include <stdint.h> */
+#ifndef XXH_NO_LONG_LONG
+/*-**********************************************************************
+* 64-bit hash
+************************************************************************/
+#if defined(XXH_DOXYGEN) /* don't include <stdint.h> */
/*!
* @brief An unsigned 64-bit integer.
*
* Not necessarily defined to `uint64_t` but functionally equivalent.
*/
typedef uint64_t XXH64_hash_t;
- #elif !defined(__VMS) && \
- (defined(__cplusplus) || (defined(__STDC_VERSION__) && \
- (__STDC_VERSION__ >= 199901L) /* C99 */))
- #include <stdint.h>
-typedef uint64_t XXH64_hash_t;
- #else
- #include <limits.h>
- #if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
-/* LP64 ABI says uint64_t is unsigned long */
-typedef unsigned long XXH64_hash_t;
- #else
-/* the following type must have a width of 64-bit */
-typedef unsigned long long XXH64_hash_t;
- #endif
- #endif
+#elif !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint64_t XXH64_hash_t;
+#else
+# include <limits.h>
+# if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
+ /* LP64 ABI says uint64_t is unsigned long */
+ typedef unsigned long XXH64_hash_t;
+# else
+ /* the following type must have a width of 64-bit */
+ typedef unsigned long long XXH64_hash_t;
+# endif
+#endif
/*!
* @}
*
- * @defgroup xxh64_family XXH64 family
+ * @defgroup XXH64_family XXH64 family
* @ingroup public
* @{
-
* Contains functions used in the classic 64-bit xxHash algorithm.
*
* @note
* XXH3 provides competitive speed for both 32-bit and 64-bit systems,
- * and offers true 64/128 bit hash results. It provides a superior level of
- * dispersion, and greatly reduces the risks of collisions.
+ * and offers true 64/128 bit hash results.
+ * It provides better speed for systems with vector processing capabilities.
*/
/*!
* @brief Calculates the 64-bit hash of @p input using xxHash64.
*
- * This function usually runs faster on 64-bit systems, but slower on 32-bit
- * systems (see benchmark).
- *
- * @param input The block of data to be hashed, at least @p length bytes in
- * size.
+ * @param input The block of data to be hashed, at least @p length bytes in size.
* @param length The length of @p input, in bytes.
* @param seed The 64-bit seed to alter the hash's output predictably.
*
@@ -725,54 +902,158 @@ typedef unsigned long long XXH64_hash_t;
* readable, contiguous memory. However, if @p length is `0`, @p input may be
* `NULL`. In C++, this also must be *TriviallyCopyable*.
*
- * @return The calculated 64-bit hash.
+ * @return The calculated 64-bit xxHash64 value.
*
- * @see
- * XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
- * Direct equivalents for the other variants of xxHash.
- * @see
- * XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version.
+ * @see @ref single_shot_example "Single Shot Example" for an example.
*/
-XXH_PUBLIC_API XXH64_hash_t XXH64(const void *input, size_t length,
- XXH64_hash_t seed);
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed);
/******* Streaming *******/
+#ifndef XXH_NO_STREAM
/*!
* @brief The opaque state struct for the XXH64 streaming API.
*
* @see XXH64_state_s for details.
+ * @see @ref streaming_example "Streaming Example"
+ */
+typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
+
+/*!
+ * @brief Allocates an @ref XXH64_state_t.
+ *
+ * @return An allocated pointer of @ref XXH64_state_t on success.
+ * @return `NULL` on failure.
+ *
+ * @note Must be freed with XXH64_freeState().
+ *
+ * @see @ref streaming_example "Streaming Example"
+ */
+XXH_PUBLIC_API XXH_MALLOCF XXH64_state_t* XXH64_createState(void);
+
+/*!
+ * @brief Frees an @ref XXH64_state_t.
+ *
+ * @param statePtr A pointer to an @ref XXH64_state_t allocated with @ref XXH64_createState().
+ *
+ * @return @ref XXH_OK.
+ *
+ * @note @p statePtr must be allocated with XXH64_createState().
+ *
+ * @see @ref streaming_example "Streaming Example"
*/
-typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
-XXH_PUBLIC_API XXH64_state_t *XXH64_createState(void);
-XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t *statePtr);
-XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t *dst_state,
- const XXH64_state_t *src_state);
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
-XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t *statePtr,
- XXH64_hash_t seed);
-XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH64_state_t *statePtr,
- const void *input, size_t length);
-XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t *statePtr);
+/*!
+ * @brief Copies one @ref XXH64_state_t to another.
+ *
+ * @param dst_state The state to copy to.
+ * @param src_state The state to copy from.
+ * @pre
+ * @p dst_state and @p src_state must not be `NULL` and must not overlap.
+ */
+XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dst_state, const XXH64_state_t* src_state);
+
+/*!
+ * @brief Resets an @ref XXH64_state_t to begin a new hash.
+ *
+ * @param statePtr The state struct to reset.
+ * @param seed The 64-bit seed to alter the hash result predictably.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success.
+ * @return @ref XXH_ERROR on failure.
+ *
+ * @note This function resets and seeds a state. Call it before @ref XXH64_update().
+ *
+ * @see @ref streaming_example "Streaming Example"
+ */
+XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed);
+/*!
+ * @brief Consumes a block of @p input to an @ref XXH64_state_t.
+ *
+ * @param statePtr The state struct to update.
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return @ref XXH_OK on success.
+ * @return @ref XXH_ERROR on failure.
+ *
+ * @note Call this to incrementally consume blocks of data.
+ *
+ * @see @ref streaming_example "Streaming Example"
+ */
+XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH_NOESCAPE XXH64_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
+
+/*!
+ * @brief Returns the calculated hash value from an @ref XXH64_state_t.
+ *
+ * @param statePtr The state struct to calculate the hash from.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return The calculated 64-bit xxHash64 value from that state.
+ *
+ * @note
+ * Calling XXH64_digest() will not affect @p statePtr, so you can update,
+ * digest, and update again.
+ *
+ * @see @ref streaming_example "Streaming Example"
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_digest (XXH_NOESCAPE const XXH64_state_t* statePtr);
+#endif /* !XXH_NO_STREAM */
/******* Canonical representation *******/
-typedef struct {
- unsigned char digest[sizeof(XXH64_hash_t)];
+/*!
+ * @brief Canonical (big endian) representation of @ref XXH64_hash_t.
+ */
+typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t;
-} XXH64_canonical_t;
+/*!
+ * @brief Converts an @ref XXH64_hash_t to a big endian @ref XXH64_canonical_t.
+ *
+ * @param dst The @ref XXH64_canonical_t pointer to be stored to.
+ * @param hash The @ref XXH64_hash_t to be converted.
+ *
+ * @pre
+ * @p dst must not be `NULL`.
+ *
+ * @see @ref canonical_representation_example "Canonical Representation Example"
+ */
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash);
-XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t *dst,
- XXH64_hash_t hash);
-XXH_PUBLIC_API XXH64_hash_t
-XXH64_hashFromCanonical(const XXH64_canonical_t *src);
+/*!
+ * @brief Converts an @ref XXH64_canonical_t to a native @ref XXH64_hash_t.
+ *
+ * @param src The @ref XXH64_canonical_t to convert.
+ *
+ * @pre
+ * @p src must not be `NULL`.
+ *
+ * @return The converted hash.
+ *
+ * @see @ref canonical_representation_example "Canonical Representation Example"
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src);
+
+#ifndef XXH_NO_XXH3
/*!
* @}
* ************************************************************************
- * @defgroup xxh3_family XXH3 family
+ * @defgroup XXH3_family XXH3 family
* @ingroup public
* @{
-
*
* XXH3 is a more recent hash algorithm featuring:
* - Improved speed for both small and large inputs
@@ -790,16 +1071,26 @@ XXH64_hashFromCanonical(const XXH64_canonical_t *src);
*
* XXH3's speed benefits greatly from SIMD and 64-bit arithmetic,
* but does not require it.
- * Any 32-bit and 64-bit targets that can run XXH32 smoothly
- * can run XXH3 at competitive speeds, even without vector support.
- * Further details are explained in the implementation.
- *
- * Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8,
- * ZVector and scalar targets. This can be controlled via the XXH_VECTOR macro.
+ * Most 32-bit and 64-bit targets that can run XXH32 smoothly can run XXH3
+ * at competitive speeds, even without vector support. Further details are
+ * explained in the implementation.
+ *
+ * XXH3 has a fast scalar implementation, but it also includes accelerated SIMD
+ * implementations for many common platforms:
+ * - AVX512
+ * - AVX2
+ * - SSE2
+ * - ARM NEON
+ * - WebAssembly SIMD128
+ * - POWER8 VSX
+ * - s390x ZVector
+ * This can be controlled via the @ref XXH_VECTOR macro, but it automatically
+ * selects the best version according to predefined macros. For the x86 family, an
+ * automatic runtime dispatcher is included separately in @ref xxh_x86dispatch.c.
*
* XXH3 implementation is portable:
* it has a generic C90 formulation that can be compiled on any platform,
- * all implementations generage exactly the same hash value on all platforms.
+ * all implementations generate exactly the same hash value on all platforms.
* Starting from v0.8.0, it's also labelled "stable", meaning that
* any future version will also generate the same hash value.
*
@@ -811,53 +1102,106 @@ XXH64_hashFromCanonical(const XXH64_canonical_t *src);
*
* The API supports one-shot hashing, streaming mode, and custom secrets.
*/
-
/*-**********************************************************************
- * XXH3 64-bit variant
- ************************************************************************/
+* XXH3 64-bit variant
+************************************************************************/
-/* XXH3_64bits():
- * default 64-bit variant, using default secret and default seed of 0.
- * It's the fastest variant. */
-XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void *data, size_t len);
+/*!
+ * @brief Calculates 64-bit unseeded variant of XXH3 hash of @p input.
+ *
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ *
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return The calculated 64-bit XXH3 hash value.
+ *
+ * @note
+ * This is equivalent to @ref XXH3_64bits_withSeed() with a seed of `0`, however
+ * it may have slightly better performance due to constant propagation of the
+ * defaults.
+ *
+ * @see
+ * XXH3_64bits_withSeed(), XXH3_64bits_withSecret(): other seeding variants
+ * @see @ref single_shot_example "Single Shot Example" for an example.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length);
-/*
- * XXH3_64bits_withSeed():
- * This variant generates a custom secret on the fly
- * based on default secret altered using the `seed` value.
+/*!
+ * @brief Calculates 64-bit seeded variant of XXH3 hash of @p input.
+ *
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ * @param seed The 64-bit seed to alter the hash result predictably.
+ *
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return The calculated 64-bit XXH3 hash value.
+ *
+ * @note
+ * seed == 0 produces the same results as @ref XXH3_64bits().
+ *
+ * This variant generates a custom secret on the fly based on default secret
+ * altered using the @p seed value.
+ *
* While this operation is decently fast, note that it's not completely free.
- * Note: seed==0 produces the same results as XXH3_64bits().
+ *
+ * @see @ref single_shot_example "Single Shot Example" for an example.
*/
-XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void *data, size_t len,
- XXH64_hash_t seed);
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed);
- /*!
- * The bare minimum size for a custom secret.
- *
- * @see
- * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(),
- * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret().
- */
- #define XXH3_SECRET_SIZE_MIN 136
+/*!
+ * The bare minimum size for a custom secret.
+ *
+ * @see
+ * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(),
+ * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret().
+ */
+#define XXH3_SECRET_SIZE_MIN 136
+
+/*!
+ * @brief Calculates 64-bit variant of XXH3 with a custom "secret".
+ *
+ * @param data The block of data to be hashed, at least @p len bytes in size.
+ * @param len The length of @p data, in bytes.
+ * @param secret The secret data.
+ * @param secretSize The length of @p secret, in bytes.
+ *
+ * @return The calculated 64-bit XXH3 hash value.
+ *
+ * @pre
+ * The memory between @p data and @p data + @p len must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p data may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * It's possible to provide any blob of bytes as a "secret" to generate the hash.
+ * This makes it more difficult for an external actor to prepare an intentional collision.
+ * The main condition is that @p secretSize *must* be large enough (>= @ref XXH3_SECRET_SIZE_MIN).
+ * However, the quality of the secret impacts the dispersion of the hash algorithm.
+ * Therefore, the secret _must_ look like a bunch of random bytes.
+ * Avoid "trivial" or structured data such as repeated sequences or a text document.
+ * Whenever in doubt about the "randomness" of the blob of bytes,
+ * consider employing @ref XXH3_generateSecret() instead (see below).
+ * It will generate a proper high entropy secret derived from the blob of bytes.
+ * Another advantage of using XXH3_generateSecret() is that
+ * it guarantees that all bits within the initial blob of bytes
+ * will impact every bit of the output.
+ * This is not necessarily the case when using the blob of bytes directly
+ * because, when hashing _small_ inputs, only a portion of the secret is employed.
+ *
+ * @see @ref single_shot_example "Single Shot Example" for an example.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize);
-/*
- * XXH3_64bits_withSecret():
- * It's possible to provide any blob of bytes as a "secret" to generate the
- * hash. This makes it more difficult for an external actor to prepare an
- * intentional collision. The main condition is that secretSize *must* be large
- * enough (>= XXH3_SECRET_SIZE_MIN). However, the quality of produced hash
- * values depends on secret's entropy. Technically, the secret must look like a
- * bunch of random bytes. Avoid "trivial" or structured data such as repeated
- * sequences or a text document. Whenever unsure about the "randomness" of the
- * blob of bytes, consider relabelling it as a "custom seed" instead, and employ
- * "XXH3_generateSecret()" (see below) to generate a high entropy secret derived
- * from the custom seed.
- */
-XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void *data, size_t len,
- const void *secret,
- size_t secretSize);
/******* Streaming *******/
+#ifndef XXH_NO_STREAM
/*
* Streaming requires state maintenance.
* This operation costs memory and CPU.
@@ -866,52 +1210,143 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void *data, size_t len,
*/
/*!
- * @brief The state struct for the XXH3 streaming API.
+ * @brief The opaque state struct for the XXH3 streaming API.
*
* @see XXH3_state_s for details.
+ * @see @ref streaming_example "Streaming Example"
*/
-typedef struct XXH3_state_s XXH3_state_t;
-XXH_PUBLIC_API XXH3_state_t *XXH3_createState(void);
-XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t *statePtr);
-XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t *dst_state,
- const XXH3_state_t *src_state);
+typedef struct XXH3_state_s XXH3_state_t;
+XXH_PUBLIC_API XXH_MALLOCF XXH3_state_t* XXH3_createState(void);
+XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
-/*
- * XXH3_64bits_reset():
- * Initialize with default parameters.
- * digest will be equivalent to `XXH3_64bits()`.
+/*!
+ * @brief Copies one @ref XXH3_state_t to another.
+ *
+ * @param dst_state The state to copy to.
+ * @param src_state The state to copy from.
+ * @pre
+ * @p dst_state and @p src_state must not be `NULL` and must not overlap.
*/
-XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t *statePtr);
-/*
- * XXH3_64bits_reset_withSeed():
- * Generate a custom secret from `seed`, and store it into `statePtr`.
- * digest will be equivalent to `XXH3_64bits_withSeed()`.
+XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state);
+
+/*!
+ * @brief Resets an @ref XXH3_state_t to begin a new hash.
+ *
+ * @param statePtr The state struct to reset.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success.
+ * @return @ref XXH_ERROR on failure.
+ *
+ * @note
+ * - This function resets `statePtr` and generate a secret with default parameters.
+ * - Call this function before @ref XXH3_64bits_update().
+ * - Digest will be equivalent to `XXH3_64bits()`.
+ *
+ * @see @ref streaming_example "Streaming Example"
+ *
*/
-XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t *statePtr,
- XXH64_hash_t seed);
-/*
- * XXH3_64bits_reset_withSecret():
- * `secret` is referenced, it _must outlive_ the hash streaming session.
- * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`,
+XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr);
+
+/*!
+ * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash.
+ *
+ * @param statePtr The state struct to reset.
+ * @param seed The 64-bit seed to alter the hash result predictably.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success.
+ * @return @ref XXH_ERROR on failure.
+ *
+ * @note
+ * - This function resets `statePtr` and generate a secret from `seed`.
+ * - Call this function before @ref XXH3_64bits_update().
+ * - Digest will be equivalent to `XXH3_64bits_withSeed()`.
+ *
+ * @see @ref streaming_example "Streaming Example"
+ *
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed);
+
+/*!
+ * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash.
+ *
+ * @param statePtr The state struct to reset.
+ * @param secret The secret data.
+ * @param secretSize The length of @p secret, in bytes.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success.
+ * @return @ref XXH_ERROR on failure.
+ *
+ * @note
+ * `secret` is referenced, it _must outlive_ the hash streaming session.
+ *
+ * Similar to one-shot API, `secretSize` must be >= @ref XXH3_SECRET_SIZE_MIN,
* and the quality of produced hash values depends on secret's entropy
* (secret's content should look like a bunch of random bytes).
* When in doubt about the randomness of a candidate `secret`,
* consider employing `XXH3_generateSecret()` instead (see below).
+ *
+ * @see @ref streaming_example "Streaming Example"
*/
-XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(
- XXH3_state_t *statePtr, const void *secret, size_t secretSize);
+XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize);
-XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update(XXH3_state_t *statePtr,
- const void *input,
- size_t length);
-XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest(const XXH3_state_t *statePtr);
+/*!
+ * @brief Consumes a block of @p input to an @ref XXH3_state_t.
+ *
+ * @param statePtr The state struct to update.
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ * @pre
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return @ref XXH_OK on success.
+ * @return @ref XXH_ERROR on failure.
+ *
+ * @note Call this to incrementally consume blocks of data.
+ *
+ * @see @ref streaming_example "Streaming Example"
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
+
+/*!
+ * @brief Returns the calculated XXH3 64-bit hash value from an @ref XXH3_state_t.
+ *
+ * @param statePtr The state struct to calculate the hash from.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return The calculated XXH3 64-bit hash value from that state.
+ *
+ * @note
+ * Calling XXH3_64bits_digest() will not affect @p statePtr, so you can update,
+ * digest, and update again.
+ *
+ * @see @ref streaming_example "Streaming Example"
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr);
+#endif /* !XXH_NO_STREAM */
/* note : canonical representation of XXH3 is the same as XXH64
* since they both produce XXH64_hash_t values */
+
/*-**********************************************************************
- * XXH3 128-bit variant
- ************************************************************************/
+* XXH3 128-bit variant
+************************************************************************/
/*!
* @brief The return value from 128-bit hashes.
@@ -920,21 +1355,80 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest(const XXH3_state_t *statePtr);
* endianness.
*/
typedef struct {
-
- XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */
- XXH64_hash_t high64; /*!< `value >> 64` */
-
+ XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */
+ XXH64_hash_t high64; /*!< `value >> 64` */
} XXH128_hash_t;
-XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void *data, size_t len);
-XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void *data, size_t len,
- XXH64_hash_t seed);
-XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void *data,
- size_t len,
- const void *secret,
- size_t secretSize);
+/*!
+ * @brief Calculates 128-bit unseeded variant of XXH3 of @p data.
+ *
+ * @param data The block of data to be hashed, at least @p length bytes in size.
+ * @param len The length of @p data, in bytes.
+ *
+ * @return The calculated 128-bit variant of XXH3 value.
+ *
+ * The 128-bit variant of XXH3 has more strength, but it has a bit of overhead
+ * for shorter inputs.
+ *
+ * This is equivalent to @ref XXH3_128bits_withSeed() with a seed of `0`, however
+ * it may have slightly better performance due to constant propagation of the
+ * defaults.
+ *
+ * @see XXH3_128bits_withSeed(), XXH3_128bits_withSecret(): other seeding variants
+ * @see @ref single_shot_example "Single Shot Example" for an example.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* data, size_t len);
+/*! @brief Calculates 128-bit seeded variant of XXH3 hash of @p data.
+ *
+ * @param data The block of data to be hashed, at least @p length bytes in size.
+ * @param len The length of @p data, in bytes.
+ * @param seed The 64-bit seed to alter the hash result predictably.
+ *
+ * @return The calculated 128-bit variant of XXH3 value.
+ *
+ * @note
+ * seed == 0 produces the same results as @ref XXH3_64bits().
+ *
+ * This variant generates a custom secret on the fly based on default secret
+ * altered using the @p seed value.
+ *
+ * While this operation is decently fast, note that it's not completely free.
+ *
+ * @see XXH3_128bits(), XXH3_128bits_withSecret(): other seeding variants
+ * @see @ref single_shot_example "Single Shot Example" for an example.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSeed(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed);
+/*!
+ * @brief Calculates 128-bit variant of XXH3 with a custom "secret".
+ *
+ * @param data The block of data to be hashed, at least @p len bytes in size.
+ * @param len The length of @p data, in bytes.
+ * @param secret The secret data.
+ * @param secretSize The length of @p secret, in bytes.
+ *
+ * @return The calculated 128-bit variant of XXH3 value.
+ *
+ * It's possible to provide any blob of bytes as a "secret" to generate the hash.
+ * This makes it more difficult for an external actor to prepare an intentional collision.
+ * The main condition is that @p secretSize *must* be large enough (>= @ref XXH3_SECRET_SIZE_MIN).
+ * However, the quality of the secret impacts the dispersion of the hash algorithm.
+ * Therefore, the secret _must_ look like a bunch of random bytes.
+ * Avoid "trivial" or structured data such as repeated sequences or a text document.
+ * Whenever in doubt about the "randomness" of the blob of bytes,
+ * consider employing @ref XXH3_generateSecret() instead (see below).
+ * It will generate a proper high entropy secret derived from the blob of bytes.
+ * Another advantage of using XXH3_generateSecret() is that
+ * it guarantees that all bits within the initial blob of bytes
+ * will impact every bit of the output.
+ * This is not necessarily the case when using the blob of bytes directly
+ * because, when hashing _small_ inputs, only a portion of the secret is employed.
+ *
+ * @see @ref single_shot_example "Single Shot Example" for an example.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize);
/******* Streaming *******/
+#ifndef XXH_NO_STREAM
/*
* Streaming requires state maintenance.
* This operation costs memory and CPU.
@@ -944,73 +1438,193 @@ XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void *data,
* XXH3_128bits uses the same XXH3_state_t as XXH3_64bits().
* Use already declared XXH3_createState() and XXH3_freeState().
*
- * All reset and streaming functions have same meaning as their 64-bit
- * counterpart.
+ * All reset and streaming functions have same meaning as their 64-bit counterpart.
+ */
+
+/*!
+ * @brief Resets an @ref XXH3_state_t to begin a new hash.
+ *
+ * @param statePtr The state struct to reset.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success.
+ * @return @ref XXH_ERROR on failure.
+ *
+ * @note
+ * - This function resets `statePtr` and generate a secret with default parameters.
+ * - Call it before @ref XXH3_128bits_update().
+ * - Digest will be equivalent to `XXH3_128bits()`.
+ *
+ * @see @ref streaming_example "Streaming Example"
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr);
+
+/*!
+ * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash.
+ *
+ * @param statePtr The state struct to reset.
+ * @param seed The 64-bit seed to alter the hash result predictably.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success.
+ * @return @ref XXH_ERROR on failure.
+ *
+ * @note
+ * - This function resets `statePtr` and generate a secret from `seed`.
+ * - Call it before @ref XXH3_128bits_update().
+ * - Digest will be equivalent to `XXH3_128bits_withSeed()`.
+ *
+ * @see @ref streaming_example "Streaming Example"
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed);
+/*!
+ * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash.
+ *
+ * @param statePtr The state struct to reset.
+ * @param secret The secret data.
+ * @param secretSize The length of @p secret, in bytes.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success.
+ * @return @ref XXH_ERROR on failure.
+ *
+ * `secret` is referenced, it _must outlive_ the hash streaming session.
+ * Similar to one-shot API, `secretSize` must be >= @ref XXH3_SECRET_SIZE_MIN,
+ * and the quality of produced hash values depends on secret's entropy
+ * (secret's content should look like a bunch of random bytes).
+ * When in doubt about the randomness of a candidate `secret`,
+ * consider employing `XXH3_generateSecret()` instead (see below).
+ *
+ * @see @ref streaming_example "Streaming Example"
*/
+XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize);
-XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t *statePtr);
-XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t *statePtr,
- XXH64_hash_t seed);
-XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(
- XXH3_state_t *statePtr, const void *secret, size_t secretSize);
+/*!
+ * @brief Consumes a block of @p input to an @ref XXH3_state_t.
+ *
+ * Call this to incrementally consume blocks of data.
+ *
+ * @param statePtr The state struct to update.
+ * @param input The block of data to be hashed, at least @p length bytes in size.
+ * @param length The length of @p input, in bytes.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return @ref XXH_OK on success.
+ * @return @ref XXH_ERROR on failure.
+ *
+ * @note
+ * The memory between @p input and @p input + @p length must be valid,
+ * readable, contiguous memory. However, if @p length is `0`, @p input may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
-XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update(XXH3_state_t *statePtr,
- const void *input,
- size_t length);
-XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest(const XXH3_state_t *statePtr);
+/*!
+ * @brief Returns the calculated XXH3 128-bit hash value from an @ref XXH3_state_t.
+ *
+ * @param statePtr The state struct to calculate the hash from.
+ *
+ * @pre
+ * @p statePtr must not be `NULL`.
+ *
+ * @return The calculated XXH3 128-bit hash value from that state.
+ *
+ * @note
+ * Calling XXH3_128bits_digest() will not affect @p statePtr, so you can update,
+ * digest, and update again.
+ *
+ */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr);
+#endif /* !XXH_NO_STREAM */
/* Following helper functions make it possible to compare XXH128_hast_t values.
- * Since XXH128_hash_t is a structure, this capability is not offered by the
- * language.
- * Note: For better performance, these functions can be inlined using
- * XXH_INLINE_ALL */
+ * Since XXH128_hash_t is a structure, this capability is not offered by the language.
+ * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */
/*!
- * XXH128_isEqual():
- * Return: 1 if `h1` and `h2` are equal, 0 if they are not.
+ * @brief Check equality of two XXH128_hash_t values
+ *
+ * @param h1 The 128-bit hash value.
+ * @param h2 Another 128-bit hash value.
+ *
+ * @return `1` if `h1` and `h2` are equal.
+ * @return `0` if they are not.
*/
-XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
+XXH_PUBLIC_API XXH_PUREF int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
/*!
- * XXH128_cmp():
+ * @brief Compares two @ref XXH128_hash_t
*
* This comparator is compatible with stdlib's `qsort()`/`bsearch()`.
*
- * return: >0 if *h128_1 > *h128_2
- * =0 if *h128_1 == *h128_2
- * <0 if *h128_1 < *h128_2
+ * @param h128_1 Left-hand side value
+ * @param h128_2 Right-hand side value
+ *
+ * @return >0 if @p h128_1 > @p h128_2
+ * @return =0 if @p h128_1 == @p h128_2
+ * @return <0 if @p h128_1 < @p h128_2
*/
-XXH_PUBLIC_API int XXH128_cmp(const void *h128_1, const void *h128_2);
+XXH_PUBLIC_API XXH_PUREF int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2);
+
/******* Canonical representation *******/
-typedef struct {
+typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
- unsigned char digest[sizeof(XXH128_hash_t)];
-} XXH128_canonical_t;
+/*!
+ * @brief Converts an @ref XXH128_hash_t to a big endian @ref XXH128_canonical_t.
+ *
+ * @param dst The @ref XXH128_canonical_t pointer to be stored to.
+ * @param hash The @ref XXH128_hash_t to be converted.
+ *
+ * @pre
+ * @p dst must not be `NULL`.
+ * @see @ref canonical_representation_example "Canonical Representation Example"
+ */
+XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash);
-XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t *dst,
- XXH128_hash_t hash);
-XXH_PUBLIC_API XXH128_hash_t
-XXH128_hashFromCanonical(const XXH128_canonical_t *src);
+/*!
+ * @brief Converts an @ref XXH128_canonical_t to a native @ref XXH128_hash_t.
+ *
+ * @param src The @ref XXH128_canonical_t to convert.
+ *
+ * @pre
+ * @p src must not be `NULL`.
+ *
+ * @return The converted hash.
+ * @see @ref canonical_representation_example "Canonical Representation Example"
+ */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src);
- #endif /* XXH_NO_LONG_LONG */
+
+#endif /* !XXH_NO_XXH3 */
+#endif /* XXH_NO_LONG_LONG */
/*!
* @}
*/
-#endif /* XXHASH_H_5627135585666179 */
+#endif /* XXHASH_H_5627135585666179 */
+
+
#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
- #define XXHASH_H_STATIC_13879238742
+#define XXHASH_H_STATIC_13879238742
/* ****************************************************************************
* This section contains declarations which are not guaranteed to remain stable.
* They may change in future versions, becoming incompatible with a different
* version of the library.
* These declarations should only be used with static linking.
* Never use them in association with dynamic linking!
- *****************************************************************************
- */
+ ***************************************************************************** */
/*
* These definitions are only present to allow static allocation
@@ -1031,23 +1645,16 @@ XXH128_hashFromCanonical(const XXH128_canonical_t *src);
* @see XXH64_state_s, XXH3_state_s
*/
struct XXH32_state_s {
+ XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */
+ XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */
+ XXH32_hash_t v[4]; /*!< Accumulator lanes */
+ XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */
+ XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */
+ XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */
+}; /* typedef'd to XXH32_state_t */
- XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */
- XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref
- total_len_32 overflow) */
- XXH32_hash_t v1; /*!< First accumulator lane */
- XXH32_hash_t v2; /*!< Second accumulator lane */
- XXH32_hash_t v3; /*!< Third accumulator lane */
- XXH32_hash_t v4; /*!< Fourth accumulator lane */
- XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as
- unsigned char[16]. */
- XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */
- XXH32_hash_t reserved; /*!< Reserved field. Do not read or write to it, it may
- be removed. */
-}; /* typedef'd to XXH32_state_t */
-
- #ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */
+#ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */
/*!
* @internal
@@ -1062,64 +1669,57 @@ struct XXH32_state_s {
* @see XXH32_state_s, XXH3_state_s
*/
struct XXH64_state_s {
+ XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */
+ XXH64_hash_t v[4]; /*!< Accumulator lanes */
+ XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */
+ XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */
+ XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/
+ XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */
+}; /* typedef'd to XXH64_state_t */
+
+#ifndef XXH_NO_XXH3
+
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */
+# include <stdalign.h>
+# define XXH_ALIGN(n) alignas(n)
+#elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */
+/* In C++ alignas() is a keyword */
+# define XXH_ALIGN(n) alignas(n)
+#elif defined(__GNUC__)
+# define XXH_ALIGN(n) __attribute__ ((aligned(n)))
+#elif defined(_MSC_VER)
+# define XXH_ALIGN(n) __declspec(align(n))
+#else
+# define XXH_ALIGN(n) /* disabled */
+#endif
- XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */
- XXH64_hash_t v1; /*!< First accumulator lane */
- XXH64_hash_t v2; /*!< Second accumulator lane */
- XXH64_hash_t v3; /*!< Third accumulator lane */
- XXH64_hash_t v4; /*!< Fourth accumulator lane */
- XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as
- unsigned char[32]. */
- XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */
- XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/
- XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it, it
- may be removed. */
-
-}; /* typedef'd to XXH64_state_t */
-
- #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 \
- */
- #include <stdalign.h>
- #define XXH_ALIGN(n) alignas(n)
- #elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */
- /* In C++ alignas() is a keyword */
- #define XXH_ALIGN(n) alignas(n)
- #elif defined(__GNUC__)
- #define XXH_ALIGN(n) __attribute__((aligned(n)))
- #elif defined(_MSC_VER)
- #define XXH_ALIGN(n) __declspec(align(n))
- #else
- #define XXH_ALIGN(n) /* disabled */
- #endif
-
- /* Old GCC versions only accept the attribute after the type in structures.
- */
- #if !(defined(__STDC_VERSION__) && \
- (__STDC_VERSION__ >= 201112L)) /* C11+ */ \
- && !(defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \
- && defined(__GNUC__)
- #define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
- #else
- #define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
- #endif
-
- /*!
- * @brief The size of the internal XXH3 buffer.
- *
- * This is the optimal update size for incremental hashing.
- *
- * @see XXH3_64b_update(), XXH3_128b_update().
- */
- #define XXH3_INTERNALBUFFER_SIZE 256
+/* Old GCC versions only accept the attribute after the type in structures. */
+#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \
+ && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \
+ && defined(__GNUC__)
+# define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
+#else
+# define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
+#endif
- /*!
- * @brief Default size of the secret buffer (and @ref XXH3_kSecret).
- *
- * This is the size used in @ref XXH3_kSecret and the seeded functions.
- *
- * Not to be confused with @ref XXH3_SECRET_SIZE_MIN.
- */
- #define XXH3_SECRET_DEFAULT_SIZE 192
+/*!
+ * @brief The size of the internal XXH3 buffer.
+ *
+ * This is the optimal update size for incremental hashing.
+ *
+ * @see XXH3_64b_update(), XXH3_128b_update().
+ */
+#define XXH3_INTERNALBUFFER_SIZE 256
+
+/*!
+ * @internal
+ * @brief Default size of the secret buffer (and @ref XXH3_kSecret).
+ *
+ * This is the size used in @ref XXH3_kSecret and the seeded functions.
+ *
+ * Not to be confused with @ref XXH3_SECRET_SIZE_MIN.
+ */
+#define XXH3_SECRET_DEFAULT_SIZE 192
/*!
* @internal
@@ -1144,111 +1744,284 @@ struct XXH64_state_s {
* @see XXH32_state_s, XXH64_state_s
*/
struct XXH3_state_s {
+ XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
+ /*!< The 8 accumulators. See @ref XXH32_state_s::v and @ref XXH64_state_s::v */
+ XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
+ /*!< Used to store a custom secret generated from a seed. */
+ XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
+ /*!< The internal buffer. @see XXH32_state_s::mem32 */
+ XXH32_hash_t bufferedSize;
+ /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */
+ XXH32_hash_t useSeed;
+ /*!< Reserved field. Needed for padding on 64-bit. */
+ size_t nbStripesSoFar;
+ /*!< Number or stripes processed. */
+ XXH64_hash_t totalLen;
+ /*!< Total length hashed. 64-bit even on 32-bit targets. */
+ size_t nbStripesPerBlock;
+ /*!< Number of stripes per block. */
+ size_t secretLimit;
+ /*!< Size of @ref customSecret or @ref extSecret */
+ XXH64_hash_t seed;
+ /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */
+ XXH64_hash_t reserved64;
+ /*!< Reserved field. */
+ const unsigned char* extSecret;
+ /*!< Reference to an external secret for the _withSecret variants, NULL
+ * for other variants. */
+ /* note: there may be some padding at the end due to alignment on 64 bytes */
+}; /* typedef'd to XXH3_state_t */
+
+#undef XXH_ALIGN_MEMBER
- XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
- /*!< The 8 accumulators. Similar to `vN` in @ref XXH32_state_s::v1 and @ref
- * XXH64_state_s */
- XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
- /*!< Used to store a custom secret generated from a seed. */
- XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
- /*!< The internal buffer. @see XXH32_state_s::mem32 */
- XXH32_hash_t bufferedSize;
- /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */
- XXH32_hash_t reserved32;
- /*!< Reserved field. Needed for padding on 64-bit. */
- size_t nbStripesSoFar;
- /*!< Number or stripes processed. */
- XXH64_hash_t totalLen;
- /*!< Total length hashed. 64-bit even on 32-bit targets. */
- size_t nbStripesPerBlock;
- /*!< Number of stripes per block. */
- size_t secretLimit;
- /*!< Size of @ref customSecret or @ref extSecret */
- XXH64_hash_t seed;
- /*!< Seed for _withSeed variants. Must be zero otherwise, @see
- * XXH3_INITSTATE() */
- XXH64_hash_t reserved64;
- /*!< Reserved field. */
- const unsigned char *extSecret;
- /*!< Reference to an external secret for the _withSecret variants, NULL
- * for other variants. */
- /* note: there may be some padding at the end due to alignment on 64 bytes */
-
-}; /* typedef'd to XXH3_state_t */
-
- #undef XXH_ALIGN_MEMBER
-
- /*!
- * @brief Initializes a stack-allocated `XXH3_state_s`.
- *
- * When the @ref XXH3_state_t structure is merely emplaced on stack,
- * it should be initialized with XXH3_INITSTATE() or a memset()
- * in case its first reset uses XXH3_NNbits_reset_withSeed().
- * This init can be omitted if the first reset uses default or _withSecret
- * mode. This operation isn't necessary when the state is created with
- * XXH3_createState(). Note that this doesn't prepare the state for a
- * streaming operation, it's still necessary to use XXH3_NNbits_reset*()
- * afterwards.
- */
- #define XXH3_INITSTATE(XXH3_state_ptr) \
- { (XXH3_state_ptr)->seed = 0; }
+/*!
+ * @brief Initializes a stack-allocated `XXH3_state_s`.
+ *
+ * When the @ref XXH3_state_t structure is merely emplaced on stack,
+ * it should be initialized with XXH3_INITSTATE() or a memset()
+ * in case its first reset uses XXH3_NNbits_reset_withSeed().
+ * This init can be omitted if the first reset uses default or _withSecret mode.
+ * This operation isn't necessary when the state is created with XXH3_createState().
+ * Note that this doesn't prepare the state for a streaming operation,
+ * it's still necessary to use XXH3_NNbits_reset*() afterwards.
+ */
+#define XXH3_INITSTATE(XXH3_state_ptr) \
+ do { \
+ XXH3_state_t* tmp_xxh3_state_ptr = (XXH3_state_ptr); \
+ tmp_xxh3_state_ptr->seed = 0; \
+ tmp_xxh3_state_ptr->extSecret = NULL; \
+ } while(0)
-/* === Experimental API === */
-/* Symbols defined below must be considered tied to a specific library version.
+
+/*!
+ * @brief Calculates the 128-bit hash of @p data using XXH3.
+ *
+ * @param data The block of data to be hashed, at least @p len bytes in size.
+ * @param len The length of @p data, in bytes.
+ * @param seed The 64-bit seed to alter the hash's output predictably.
+ *
+ * @pre
+ * The memory between @p data and @p data + @p len must be valid,
+ * readable, contiguous memory. However, if @p len is `0`, @p data may be
+ * `NULL`. In C++, this also must be *TriviallyCopyable*.
+ *
+ * @return The calculated 128-bit XXH3 value.
+ *
+ * @see @ref single_shot_example "Single Shot Example" for an example.
*/
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed);
-/*
- * XXH3_generateSecret():
+
+/* === Experimental API === */
+/* Symbols defined below must be considered tied to a specific library version. */
+
+/*!
+ * @brief Derive a high-entropy secret from any user-defined content, named customSeed.
+ *
+ * @param secretBuffer A writable buffer for derived high-entropy secret data.
+ * @param secretSize Size of secretBuffer, in bytes. Must be >= XXH3_SECRET_DEFAULT_SIZE.
+ * @param customSeed A user-defined content.
+ * @param customSeedSize Size of customSeed, in bytes.
*
- * Derive a high-entropy secret from any user-defined content, named customSeed.
- * The generated secret can be used in combination with `*_withSecret()`
- * functions. The `_withSecret()` variants are useful to provide a higher level
- * of protection than 64-bit seed, as it becomes much more difficult for an
- * external actor to guess how to impact the calculation logic.
+ * @return @ref XXH_OK on success.
+ * @return @ref XXH_ERROR on failure.
+ *
+ * The generated secret can be used in combination with `*_withSecret()` functions.
+ * The `_withSecret()` variants are useful to provide a higher level of protection
+ * than 64-bit seed, as it becomes much more difficult for an external actor to
+ * guess how to impact the calculation logic.
*
* The function accepts as input a custom seed of any length and any content,
- * and derives from it a high-entropy secret of length XXH3_SECRET_DEFAULT_SIZE
- * into an already allocated buffer secretBuffer.
- * The generated secret is _always_ XXH_SECRET_DEFAULT_SIZE bytes long.
+ * and derives from it a high-entropy secret of length @p secretSize into an
+ * already allocated buffer @p secretBuffer.
*
* The generated secret can then be used with any `*_withSecret()` variant.
- * Functions `XXH3_128bits_withSecret()`, `XXH3_64bits_withSecret()`,
- * `XXH3_128bits_reset_withSecret()` and `XXH3_64bits_reset_withSecret()`
+ * The functions @ref XXH3_128bits_withSecret(), @ref XXH3_64bits_withSecret(),
+ * @ref XXH3_128bits_reset_withSecret() and @ref XXH3_64bits_reset_withSecret()
* are part of this list. They all accept a `secret` parameter
- * which must be very long for implementation reasons (>= XXH3_SECRET_SIZE_MIN)
+ * which must be large enough for implementation reasons (>= @ref XXH3_SECRET_SIZE_MIN)
* _and_ feature very high entropy (consist of random-looking bytes).
- * These conditions can be a high bar to meet, so
- * this function can be used to generate a secret of proper quality.
+ * These conditions can be a high bar to meet, so @ref XXH3_generateSecret() can
+ * be employed to ensure proper quality.
+ *
+ * @p customSeed can be anything. It can have any size, even small ones,
+ * and its content can be anything, even "poor entropy" sources such as a bunch
+ * of zeroes. The resulting `secret` will nonetheless provide all required qualities.
*
- * customSeed can be anything. It can have any size, even small ones,
- * and its content can be anything, even stupidly "low entropy" source such as a
- * bunch of zeroes. The resulting `secret` will nonetheless provide all expected
- * qualities.
+ * @pre
+ * - @p secretSize must be >= @ref XXH3_SECRET_SIZE_MIN
+ * - When @p customSeedSize > 0, supplying NULL as customSeed is undefined behavior.
*
- * Supplying NULL as the customSeed copies the default secret into
- * `secretBuffer`. When customSeedSize > 0, supplying NULL as customSeed is
- * undefined behavior.
+ * Example code:
+ * @code{.c}
+ * #include <stdio.h>
+ * #include <stdlib.h>
+ * #include <string.h>
+ * #define XXH_STATIC_LINKING_ONLY // expose unstable API
+ * #include "xxhash.h"
+ * // Hashes argv[2] using the entropy from argv[1].
+ * int main(int argc, char* argv[])
+ * {
+ * char secret[XXH3_SECRET_SIZE_MIN];
+ * if (argv != 3) { return 1; }
+ * XXH3_generateSecret(secret, sizeof(secret), argv[1], strlen(argv[1]));
+ * XXH64_hash_t h = XXH3_64bits_withSecret(
+ * argv[2], strlen(argv[2]),
+ * secret, sizeof(secret)
+ * );
+ * printf("%016llx\n", (unsigned long long) h);
+ * }
+ * @endcode
*/
-XXH_PUBLIC_API void XXH3_generateSecret(void *secretBuffer,
- const void *customSeed,
- size_t customSeedSize);
+XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize);
-/* simple short-cut to pre-selected XXH3_128bits variant */
-XXH_PUBLIC_API XXH128_hash_t XXH128(const void *data, size_t len,
- XXH64_hash_t seed);
+/*!
+ * @brief Generate the same secret as the _withSeed() variants.
+ *
+ * @param secretBuffer A writable buffer of @ref XXH3_SECRET_SIZE_MIN bytes
+ * @param seed The 64-bit seed to alter the hash result predictably.
+ *
+ * The generated secret can be used in combination with
+ *`*_withSecret()` and `_withSecretandSeed()` variants.
+ *
+ * Example C++ `std::string` hash class:
+ * @code{.cpp}
+ * #include <string>
+ * #define XXH_STATIC_LINKING_ONLY // expose unstable API
+ * #include "xxhash.h"
+ * // Slow, seeds each time
+ * class HashSlow {
+ * XXH64_hash_t seed;
+ * public:
+ * HashSlow(XXH64_hash_t s) : seed{s} {}
+ * size_t operator()(const std::string& x) const {
+ * return size_t{XXH3_64bits_withSeed(x.c_str(), x.length(), seed)};
+ * }
+ * };
+ * // Fast, caches the seeded secret for future uses.
+ * class HashFast {
+ * unsigned char secret[XXH3_SECRET_SIZE_MIN];
+ * public:
+ * HashFast(XXH64_hash_t s) {
+ * XXH3_generateSecret_fromSeed(secret, seed);
+ * }
+ * size_t operator()(const std::string& x) const {
+ * return size_t{
+ * XXH3_64bits_withSecret(x.c_str(), x.length(), secret, sizeof(secret))
+ * };
+ * }
+ * };
+ * @endcode
+ */
+XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed);
+
+/*!
+ * @brief Calculates 64/128-bit seeded variant of XXH3 hash of @p data.
+ *
+ * @param data The block of data to be hashed, at least @p len bytes in size.
+ * @param len The length of @p data, in bytes.
+ * @param secret The secret data.
+ * @param secretSize The length of @p secret, in bytes.
+ * @param seed The 64-bit seed to alter the hash result predictably.
+ *
+ * These variants generate hash values using either
+ * @p seed for "short" keys (< @ref XXH3_MIDSIZE_MAX = 240 bytes)
+ * or @p secret for "large" keys (>= @ref XXH3_MIDSIZE_MAX).
+ *
+ * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`.
+ * `_withSeed()` has to generate the secret on the fly for "large" keys.
+ * It's fast, but can be perceptible for "not so large" keys (< 1 KB).
+ * `_withSecret()` has to generate the masks on the fly for "small" keys,
+ * which requires more instructions than _withSeed() variants.
+ * Therefore, _withSecretandSeed variant combines the best of both worlds.
+ *
+ * When @p secret has been generated by XXH3_generateSecret_fromSeed(),
+ * this variant produces *exactly* the same results as `_withSeed()` variant,
+ * hence offering only a pure speed benefit on "large" input,
+ * by skipping the need to regenerate the secret for every large input.
+ *
+ * Another usage scenario is to hash the secret to a 64-bit hash value,
+ * for example with XXH3_64bits(), which then becomes the seed,
+ * and then employ both the seed and the secret in _withSecretandSeed().
+ * On top of speed, an added benefit is that each bit in the secret
+ * has a 50% chance to swap each bit in the output, via its impact to the seed.
+ *
+ * This is not guaranteed when using the secret directly in "small data" scenarios,
+ * because only portions of the secret are employed for small data.
+ */
+XXH_PUBLIC_API XXH_PUREF XXH64_hash_t
+XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* data, size_t len,
+ XXH_NOESCAPE const void* secret, size_t secretSize,
+ XXH64_hash_t seed);
+/*!
+ * @brief Calculates 128-bit seeded variant of XXH3 hash of @p data.
+ *
+ * @param input The block of data to be hashed, at least @p len bytes in size.
+ * @param length The length of @p data, in bytes.
+ * @param secret The secret data.
+ * @param secretSize The length of @p secret, in bytes.
+ * @param seed64 The 64-bit seed to alter the hash result predictably.
+ *
+ * @return @ref XXH_OK on success.
+ * @return @ref XXH_ERROR on failure.
+ *
+ * @see XXH3_64bits_withSecretandSeed()
+ */
+XXH_PUBLIC_API XXH_PUREF XXH128_hash_t
+XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length,
+ XXH_NOESCAPE const void* secret, size_t secretSize,
+ XXH64_hash_t seed64);
+#ifndef XXH_NO_STREAM
+/*!
+ * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash.
+ *
+ * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState().
+ * @param secret The secret data.
+ * @param secretSize The length of @p secret, in bytes.
+ * @param seed64 The 64-bit seed to alter the hash result predictably.
+ *
+ * @return @ref XXH_OK on success.
+ * @return @ref XXH_ERROR on failure.
+ *
+ * @see XXH3_64bits_withSecretandSeed()
+ */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr,
+ XXH_NOESCAPE const void* secret, size_t secretSize,
+ XXH64_hash_t seed64);
+/*!
+ * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash.
+ *
+ * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState().
+ * @param secret The secret data.
+ * @param secretSize The length of @p secret, in bytes.
+ * @param seed64 The 64-bit seed to alter the hash result predictably.
+ *
+ * @return @ref XXH_OK on success.
+ * @return @ref XXH_ERROR on failure.
+ *
+ * @see XXH3_64bits_withSecretandSeed()
+ */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr,
+ XXH_NOESCAPE const void* secret, size_t secretSize,
+ XXH64_hash_t seed64);
+#endif /* !XXH_NO_STREAM */
+
+#endif /* !XXH_NO_XXH3 */
+#endif /* XXH_NO_LONG_LONG */
+#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
+# define XXH_IMPLEMENTATION
+#endif
- #endif /* XXH_NO_LONG_LONG */
- #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
- #define XXH_IMPLEMENTATION
- #endif
+#endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
-#endif /* defined(XXH_STATIC_LINKING_ONLY) && \
- !defined(XXHASH_H_STATIC_13879238742) */
/* ======================================================================== */
/* ======================================================================== */
/* ======================================================================== */
+
/*-**********************************************************************
* xxHash implementation
*-**********************************************************************
@@ -1271,401 +2044,454 @@ XXH_PUBLIC_API XXH128_hash_t XXH128(const void *data, size_t len,
* which can then be linked into the final binary.
************************************************************************/
-#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) || \
- defined(XXH_IMPLEMENTATION)) && \
- !defined(XXH_IMPLEM_13a8737387)
- #define XXH_IMPLEM_13a8737387
-
- /* *************************************
- * Tuning parameters
- ***************************************/
-
- /*!
- * @defgroup tuning Tuning parameters
- * @{
-
- *
- * Various macros to control xxHash's behavior.
- */
- #ifdef XXH_DOXYGEN
- /*!
- * @brief Define this to disable 64-bit code.
- *
- * Useful if only using the @ref xxh32_family and you have a strict C90
- * compiler.
- */
- #define XXH_NO_LONG_LONG
- #undef XXH_NO_LONG_LONG /* don't actually */
- /*!
- * @brief Controls how unaligned memory is accessed.
- *
- * By default, access to unaligned memory is controlled by `memcpy()`, which
- * is safe and portable.
- *
- * Unfortunately, on some target/compiler combinations, the generated
- * assembly is sub-optimal.
- *
- * The below switch allow selection of a different access method
- * in the search for improved performance.
- *
- * @par Possible options:
- *
- * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy`
- * @par
- * Use `memcpy()`. Safe and portable. Note that most modern compilers
- * will eliminate the function call and treat it as an unaligned access.
- *
- * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((packed))`
- * @par
- * Depends on compiler extensions and is therefore not portable.
- * This method is safe _if_ your compiler supports it,
- * and *generally* as fast or faster than `memcpy`.
- *
- * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast
- * @par
- * Casts directly and dereferences. This method doesn't depend on the
- * compiler, but it violates the C standard as it directly dereferences
- * an unaligned pointer. It can generate buggy code on targets which do not
- * support unaligned memory accesses, but in some circumstances, it's
- * the only known way to get the most performance.
- *
- * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift
- * @par
- * Also portable. This can generate the best code on old compilers which
- * don't inline small `memcpy()` calls, and it might also be faster on
- * big-endian systems which lack a native byteswap instruction. However,
- * some compilers will emit literal byteshifts even if the target supports
- * unaligned access.
- * .
- *
- * @warning
- * Methods 1 and 2 rely on implementation-defined behavior. Use these with
- * care, as what works on one compiler/platform/optimization level may
- * cause another to read garbage data or even crash.
- *
- * See https://stackoverflow.com/a/32095106/646947 for details.
- *
- * Prefer these methods in priority order (0 > 3 > 1 > 2)
- */
- #define XXH_FORCE_MEMORY_ACCESS 0
- /*!
- * @def XXH_ACCEPT_NULL_INPUT_POINTER
- * @brief Whether to add explicit `NULL` checks.
- *
- * If the input pointer is `NULL` and the length is non-zero, xxHash's
- * default behavior is to dereference it, triggering a segfault.
- *
- * When this macro is enabled, xxHash actively checks the input for a null
- * pointer. If it is, the result for null input pointers is the same as a
- * zero-length input.
- */
- #define XXH_ACCEPT_NULL_INPUT_POINTER 0
- /*!
- * @def XXH_FORCE_ALIGN_CHECK
- * @brief If defined to non-zero, adds a special path for aligned inputs
- * (XXH32() and XXH64() only).
- *
- * This is an important performance trick for architectures without decent
- * unaligned memory access performance.
- *
- * It checks for input alignment, and when conditions are met, uses a "fast
- * path" employing direct 32-bit/64-bit reads, resulting in _dramatically
- * faster_ read speed.
- *
- * The check costs one initial branch per hash, which is generally
- * negligible, but not zero.
- *
- * Moreover, it's not useful to generate an additional code path if memory
- * access uses the same instruction for both aligned and unaligned
- * addresses (e.g. x86 and aarch64).
- *
- * In these cases, the alignment check can be removed by setting this macro
- * to 0. Then the code will always use unaligned memory access. Align check
- * is automatically disabled on x86, x64 & arm64, which are platforms known
- * to offer good unaligned memory accesses performance.
- *
- * This option does not affect XXH3 (only XXH32 and XXH64).
- */
- #define XXH_FORCE_ALIGN_CHECK 0
+#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
+ || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
+# define XXH_IMPLEM_13a8737387
- /*!
- * @def XXH_NO_INLINE_HINTS
- * @brief When non-zero, sets all functions to `static`.
- *
- * By default, xxHash tries to force the compiler to inline almost all
- * internal functions.
- *
- * This can usually improve performance due to reduced jumping and improved
- * constant folding, but significantly increases the size of the binary
- * which might not be favorable.
- *
- * Additionally, sometimes the forced inlining can be detrimental to
- * performance, depending on the architecture.
- *
- * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the
- * compiler full control on whether to inline or not.
- *
- * When not optimizing (-O0), optimizing for size (-Os, -Oz), or using
- * -fno-inline with GCC or Clang, this will automatically be defined.
- */
- #define XXH_NO_INLINE_HINTS 0
+/* *************************************
+* Tuning parameters
+***************************************/
- /*!
- * @def XXH_REROLL
- * @brief Whether to reroll `XXH32_finalize`.
- *
- * For performance, `XXH32_finalize` uses an unrolled loop
- * in the form of a switch statement.
- *
- * This is not always desirable, as it generates larger code,
- * and depending on the architecture, may even be slower
- *
- * This is automatically defined with `-Os`/`-Oz` on GCC and Clang.
- */
- #define XXH_REROLL 0
+/*!
+ * @defgroup tuning Tuning parameters
+ * @{
+ *
+ * Various macros to control xxHash's behavior.
+ */
+#ifdef XXH_DOXYGEN
+/*!
+ * @brief Define this to disable 64-bit code.
+ *
+ * Useful if only using the @ref XXH32_family and you have a strict C90 compiler.
+ */
+# define XXH_NO_LONG_LONG
+# undef XXH_NO_LONG_LONG /* don't actually */
+/*!
+ * @brief Controls how unaligned memory is accessed.
+ *
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is
+ * safe and portable.
+ *
+ * Unfortunately, on some target/compiler combinations, the generated assembly
+ * is sub-optimal.
+ *
+ * The below switch allow selection of a different access method
+ * in the search for improved performance.
+ *
+ * @par Possible options:
+ *
+ * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy`
+ * @par
+ * Use `memcpy()`. Safe and portable. Note that most modern compilers will
+ * eliminate the function call and treat it as an unaligned access.
+ *
+ * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((aligned(1)))`
+ * @par
+ * Depends on compiler extensions and is therefore not portable.
+ * This method is safe _if_ your compiler supports it,
+ * and *generally* as fast or faster than `memcpy`.
+ *
+ * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast
+ * @par
+ * Casts directly and dereferences. This method doesn't depend on the
+ * compiler, but it violates the C standard as it directly dereferences an
+ * unaligned pointer. It can generate buggy code on targets which do not
+ * support unaligned memory accesses, but in some circumstances, it's the
+ * only known way to get the most performance.
+ *
+ * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift
+ * @par
+ * Also portable. This can generate the best code on old compilers which don't
+ * inline small `memcpy()` calls, and it might also be faster on big-endian
+ * systems which lack a native byteswap instruction. However, some compilers
+ * will emit literal byteshifts even if the target supports unaligned access.
+ *
+ *
+ * @warning
+ * Methods 1 and 2 rely on implementation-defined behavior. Use these with
+ * care, as what works on one compiler/platform/optimization level may cause
+ * another to read garbage data or even crash.
+ *
+ * See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details.
+ *
+ * Prefer these methods in priority order (0 > 3 > 1 > 2)
+ */
+# define XXH_FORCE_MEMORY_ACCESS 0
- /*!
- * @internal
- * @brief Redefines old internal names.
- *
- * For compatibility with code that uses xxHash's internals before the names
- * were changed to improve namespacing. There is no other reason to use
- * this.
- */
- #define XXH_OLD_NAMES
- #undef XXH_OLD_NAMES /* don't actually use, it is ugly. */
- #endif /* XXH_DOXYGEN */
- /*!
- * @}
- */
-
- #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command \
- line for example */
- /* prefer __packed__ structures (method 1) for gcc on armv7+ and mips */
- #if !defined(__clang__) && \
- ((defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
- (defined(__GNUC__) && \
- ((defined(__ARM_ARCH) && __ARM_ARCH >= 7) || \
- (defined(__mips__) && (__mips <= 5 || __mips_isa_rev < 6) && \
- (!defined(__mips16) || defined(__mips_mips16e2))))))
- #define XXH_FORCE_MEMORY_ACCESS 1
- #endif
- #endif
-
- #ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */
- #define XXH_ACCEPT_NULL_INPUT_POINTER 0
- #endif
-
- #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
- #if defined(__i386) || defined(__x86_64__) || defined(__aarch64__) || \
- defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) /* visual */
- #define XXH_FORCE_ALIGN_CHECK 0
- #else
- #define XXH_FORCE_ALIGN_CHECK 1
- #endif
- #endif
-
- #ifndef XXH_NO_INLINE_HINTS
- #if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ \
- || defined(__NO_INLINE__) /* -O0, -fno-inline */
- #define XXH_NO_INLINE_HINTS 1
- #else
- #define XXH_NO_INLINE_HINTS 0
- #endif
- #endif
-
- #ifndef XXH_REROLL
- #if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ || \
- (defined(__GNUC__) && !defined(__clang__))
- /* The if/then loop is preferable to switch/case on gcc (on x64) */
- #define XXH_REROLL 1
- #else
- #define XXH_REROLL 0
- #endif
- #endif
-
- /*!
- * @defgroup impl Implementation
- * @{
-
- */
-
- /* *************************************
- * Includes & Memory related functions
- ***************************************/
- /*
- * Modify the local functions below should you wish to use
- * different memory routines for malloc() and free()
- */
- #include <stdlib.h>
+/*!
+ * @def XXH_SIZE_OPT
+ * @brief Controls how much xxHash optimizes for size.
+ *
+ * xxHash, when compiled, tends to result in a rather large binary size. This
+ * is mostly due to heavy usage to forced inlining and constant folding of the
+ * @ref XXH3_family to increase performance.
+ *
+ * However, some developers prefer size over speed. This option can
+ * significantly reduce the size of the generated code. When using the `-Os`
+ * or `-Oz` options on GCC or Clang, this is defined to 1 by default,
+ * otherwise it is defined to 0.
+ *
+ * Most of these size optimizations can be controlled manually.
+ *
+ * This is a number from 0-2.
+ * - `XXH_SIZE_OPT` == 0: Default. xxHash makes no size optimizations. Speed
+ * comes first.
+ * - `XXH_SIZE_OPT` == 1: Default for `-Os` and `-Oz`. xxHash is more
+ * conservative and disables hacks that increase code size. It implies the
+ * options @ref XXH_NO_INLINE_HINTS == 1, @ref XXH_FORCE_ALIGN_CHECK == 0,
+ * and @ref XXH3_NEON_LANES == 8 if they are not already defined.
+ * - `XXH_SIZE_OPT` == 2: xxHash tries to make itself as small as possible.
+ * Performance may cry. For example, the single shot functions just use the
+ * streaming API.
+ */
+# define XXH_SIZE_OPT 0
+
+/*!
+ * @def XXH_FORCE_ALIGN_CHECK
+ * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32()
+ * and XXH64() only).
+ *
+ * This is an important performance trick for architectures without decent
+ * unaligned memory access performance.
+ *
+ * It checks for input alignment, and when conditions are met, uses a "fast
+ * path" employing direct 32-bit/64-bit reads, resulting in _dramatically
+ * faster_ read speed.
+ *
+ * The check costs one initial branch per hash, which is generally negligible,
+ * but not zero.
+ *
+ * Moreover, it's not useful to generate an additional code path if memory
+ * access uses the same instruction for both aligned and unaligned
+ * addresses (e.g. x86 and aarch64).
+ *
+ * In these cases, the alignment check can be removed by setting this macro to 0.
+ * Then the code will always use unaligned memory access.
+ * Align check is automatically disabled on x86, x64, ARM64, and some ARM chips
+ * which are platforms known to offer good unaligned memory accesses performance.
+ *
+ * It is also disabled by default when @ref XXH_SIZE_OPT >= 1.
+ *
+ * This option does not affect XXH3 (only XXH32 and XXH64).
+ */
+# define XXH_FORCE_ALIGN_CHECK 0
+
+/*!
+ * @def XXH_NO_INLINE_HINTS
+ * @brief When non-zero, sets all functions to `static`.
+ *
+ * By default, xxHash tries to force the compiler to inline almost all internal
+ * functions.
+ *
+ * This can usually improve performance due to reduced jumping and improved
+ * constant folding, but significantly increases the size of the binary which
+ * might not be favorable.
+ *
+ * Additionally, sometimes the forced inlining can be detrimental to performance,
+ * depending on the architecture.
+ *
+ * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the
+ * compiler full control on whether to inline or not.
+ *
+ * When not optimizing (-O0), using `-fno-inline` with GCC or Clang, or if
+ * @ref XXH_SIZE_OPT >= 1, this will automatically be defined.
+ */
+# define XXH_NO_INLINE_HINTS 0
+
+/*!
+ * @def XXH3_INLINE_SECRET
+ * @brief Determines whether to inline the XXH3 withSecret code.
+ *
+ * When the secret size is known, the compiler can improve the performance
+ * of XXH3_64bits_withSecret() and XXH3_128bits_withSecret().
+ *
+ * However, if the secret size is not known, it doesn't have any benefit. This
+ * happens when xxHash is compiled into a global symbol. Therefore, if
+ * @ref XXH_INLINE_ALL is *not* defined, this will be defined to 0.
+ *
+ * Additionally, this defaults to 0 on GCC 12+, which has an issue with function pointers
+ * that are *sometimes* force inline on -Og, and it is impossible to automatically
+ * detect this optimization level.
+ */
+# define XXH3_INLINE_SECRET 0
+
+/*!
+ * @def XXH32_ENDJMP
+ * @brief Whether to use a jump for `XXH32_finalize`.
+ *
+ * For performance, `XXH32_finalize` uses multiple branches in the finalizer.
+ * This is generally preferable for performance,
+ * but depending on exact architecture, a jmp may be preferable.
+ *
+ * This setting is only possibly making a difference for very small inputs.
+ */
+# define XXH32_ENDJMP 0
/*!
* @internal
- * @brief Modify this function to use a different routine than malloc().
+ * @brief Redefines old internal names.
+ *
+ * For compatibility with code that uses xxHash's internals before the names
+ * were changed to improve namespacing. There is no other reason to use this.
*/
-static void *XXH_malloc(size_t s) {
+# define XXH_OLD_NAMES
+# undef XXH_OLD_NAMES /* don't actually use, it is ugly. */
- return malloc(s);
+/*!
+ * @def XXH_NO_STREAM
+ * @brief Disables the streaming API.
+ *
+ * When xxHash is not inlined and the streaming functions are not used, disabling
+ * the streaming functions can improve code size significantly, especially with
+ * the @ref XXH3_family which tends to make constant folded copies of itself.
+ */
+# define XXH_NO_STREAM
+# undef XXH_NO_STREAM /* don't actually */
+#endif /* XXH_DOXYGEN */
+/*!
+ * @}
+ */
-}
+#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
+ /* prefer __packed__ structures (method 1) for GCC
+ * < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte shifting, so we use memcpy
+ * which for some reason does unaligned loads. */
+# if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && defined(__ARM_FEATURE_UNALIGNED))
+# define XXH_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+#ifndef XXH_SIZE_OPT
+ /* default to 1 for -Os or -Oz */
+# if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__)
+# define XXH_SIZE_OPT 1
+# else
+# define XXH_SIZE_OPT 0
+# endif
+#endif
+
+#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
+ /* don't check on sizeopt, x86, aarch64, or arm when unaligned access is available */
+# if XXH_SIZE_OPT >= 1 || \
+ defined(__i386) || defined(__x86_64__) || defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) \
+ || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM) /* visual */
+# define XXH_FORCE_ALIGN_CHECK 0
+# else
+# define XXH_FORCE_ALIGN_CHECK 1
+# endif
+#endif
+
+#ifndef XXH_NO_INLINE_HINTS
+# if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__) /* -O0, -fno-inline */
+# define XXH_NO_INLINE_HINTS 1
+# else
+# define XXH_NO_INLINE_HINTS 0
+# endif
+#endif
+
+#ifndef XXH3_INLINE_SECRET
+# if (defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 12) \
+ || !defined(XXH_INLINE_ALL)
+# define XXH3_INLINE_SECRET 0
+# else
+# define XXH3_INLINE_SECRET 1
+# endif
+#endif
+
+#ifndef XXH32_ENDJMP
+/* generally preferable for performance */
+# define XXH32_ENDJMP 0
+#endif
+
+/*!
+ * @defgroup impl Implementation
+ * @{
+ */
+
+
+/* *************************************
+* Includes & Memory related functions
+***************************************/
+#if defined(XXH_NO_STREAM)
+/* nothing */
+#elif defined(XXH_NO_STDLIB)
+
+/* When requesting to disable any mention of stdlib,
+ * the library loses the ability to invoked malloc / free.
+ * In practice, it means that functions like `XXH*_createState()`
+ * will always fail, and return NULL.
+ * This flag is useful in situations where
+ * xxhash.h is integrated into some kernel, embedded or limited environment
+ * without access to dynamic allocation.
+ */
+
+static XXH_CONSTF void* XXH_malloc(size_t s) { (void)s; return NULL; }
+static void XXH_free(void* p) { (void)p; }
+
+#else
+
+/*
+ * Modify the local functions below should you wish to use
+ * different memory routines for malloc() and free()
+ */
+#include <stdlib.h>
+
+/*!
+ * @internal
+ * @brief Modify this function to use a different routine than malloc().
+ */
+static XXH_MALLOCF void* XXH_malloc(size_t s) { return malloc(s); }
/*!
* @internal
* @brief Modify this function to use a different routine than free().
*/
-static void XXH_free(void *p) {
+static void XXH_free(void* p) { free(p); }
+
+#endif /* XXH_NO_STDLIB */
- free(p);
+#include <string.h>
+/*!
+ * @internal
+ * @brief Modify this function to use a different routine than memcpy().
+ */
+static void* XXH_memcpy(void* dest, const void* src, size_t size)
+{
+ return memcpy(dest,src,size);
}
- #include <string.h>
+#include <limits.h> /* ULLONG_MAX */
+
+
+/* *************************************
+* Compiler Specific Options
+***************************************/
+#ifdef _MSC_VER /* Visual Studio warning fix */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+#endif
+
+#if XXH_NO_INLINE_HINTS /* disable inlining hints */
+# if defined(__GNUC__) || defined(__clang__)
+# define XXH_FORCE_INLINE static __attribute__((unused))
+# else
+# define XXH_FORCE_INLINE static
+# endif
+# define XXH_NO_INLINE static
+/* enable inlining hints */
+#elif defined(__GNUC__) || defined(__clang__)
+# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused))
+# define XXH_NO_INLINE static __attribute__((noinline))
+#elif defined(_MSC_VER) /* Visual Studio */
+# define XXH_FORCE_INLINE static __forceinline
+# define XXH_NO_INLINE static __declspec(noinline)
+#elif defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */
+# define XXH_FORCE_INLINE static inline
+# define XXH_NO_INLINE static
+#else
+# define XXH_FORCE_INLINE static
+# define XXH_NO_INLINE static
+#endif
+
+#if XXH3_INLINE_SECRET
+# define XXH3_WITH_SECRET_INLINE XXH_FORCE_INLINE
+#else
+# define XXH3_WITH_SECRET_INLINE XXH_NO_INLINE
+#endif
+
+
+/* *************************************
+* Debug
+***************************************/
+/*!
+ * @ingroup tuning
+ * @def XXH_DEBUGLEVEL
+ * @brief Sets the debugging level.
+ *
+ * XXH_DEBUGLEVEL is expected to be defined externally, typically via the
+ * compiler's command line options. The value must be a number.
+ */
+#ifndef XXH_DEBUGLEVEL
+# ifdef DEBUGLEVEL /* backwards compat */
+# define XXH_DEBUGLEVEL DEBUGLEVEL
+# else
+# define XXH_DEBUGLEVEL 0
+# endif
+#endif
+
+#if (XXH_DEBUGLEVEL>=1)
+# include <assert.h> /* note: can still be disabled with NDEBUG */
+# define XXH_ASSERT(c) assert(c)
+#else
+# if defined(__INTEL_COMPILER)
+# define XXH_ASSERT(c) XXH_ASSUME((unsigned char) (c))
+# else
+# define XXH_ASSERT(c) XXH_ASSUME(c)
+# endif
+#endif
+
+/* note: use after variable declarations */
+#ifndef XXH_STATIC_ASSERT
+# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */
+# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0)
+# elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */
+# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
+# else
+# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0)
+# endif
+# define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c)
+#endif
/*!
* @internal
- * @brief Modify this function to use a different routine than memcpy().
+ * @def XXH_COMPILER_GUARD(var)
+ * @brief Used to prevent unwanted optimizations for @p var.
+ *
+ * It uses an empty GCC inline assembly statement with a register constraint
+ * which forces @p var into a general purpose register (eg eax, ebx, ecx
+ * on x86) and marks it as modified.
+ *
+ * This is used in a few places to avoid unwanted autovectorization (e.g.
+ * XXH32_round()). All vectorization we want is explicit via intrinsics,
+ * and _usually_ isn't wanted elsewhere.
+ *
+ * We also use it to prevent unwanted constant folding for AArch64 in
+ * XXH3_initCustomSecret_scalar().
*/
-static void *XXH_memcpy(void *dest, const void *src, size_t size) {
-
- return memcpy(dest, src, size);
-
-}
-
- #include <limits.h> /* ULLONG_MAX */
-
- /* *************************************
- * Compiler Specific Options
- ***************************************/
- #ifdef _MSC_VER /* Visual Studio warning fix */
- #pragma warning(disable : 4127) /* disable: C4127: conditional expression \
- is constant */
- #endif
-
- #if XXH_NO_INLINE_HINTS /* disable inlining hints */
- #if defined(__GNUC__)
- #define XXH_FORCE_INLINE static __attribute__((unused))
- #else
- #define XXH_FORCE_INLINE static
- #endif
- #define XXH_NO_INLINE static
- /* enable inlining hints */
- #elif defined(_MSC_VER) /* Visual Studio */
- #define XXH_FORCE_INLINE static __forceinline
- #define XXH_NO_INLINE static __declspec(noinline)
- #elif defined(__GNUC__)
- #define XXH_FORCE_INLINE \
- static __inline__ __attribute__((always_inline, unused))
- #define XXH_NO_INLINE static __attribute__((noinline))
- #elif defined(__cplusplus) || \
- (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */
- #define XXH_FORCE_INLINE static inline
- #define XXH_NO_INLINE static
- #else
- #define XXH_FORCE_INLINE static
- #define XXH_NO_INLINE static
- #endif
-
- /* *************************************
- * Debug
- ***************************************/
- /*!
- * @ingroup tuning
- * @def XXH_DEBUGLEVEL
- * @brief Sets the debugging level.
- *
- * XXH_DEBUGLEVEL is expected to be defined externally, typically via the
- * compiler's command line options. The value must be a number.
- */
- #ifndef XXH_DEBUGLEVEL
- #ifdef DEBUGLEVEL /* backwards compat */
- #define XXH_DEBUGLEVEL DEBUGLEVEL
- #else
- #define XXH_DEBUGLEVEL 0
- #endif
- #endif
-
- #if (XXH_DEBUGLEVEL >= 1)
- #include <assert.h> /* note: can still be disabled with NDEBUG */
- #define XXH_ASSERT(c) assert(c)
- #else
- #define XXH_ASSERT(c) ((void)0)
- #endif
-
- /* note: use after variable declarations */
- #ifndef XXH_STATIC_ASSERT
- #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */
- #include <assert.h>
- #define XXH_STATIC_ASSERT_WITH_MESSAGE(c, m) \
- do { \
- \
- static_assert((c), m); \
- \
- } while (0)
-
- #elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */
- #define XXH_STATIC_ASSERT_WITH_MESSAGE(c, m) \
- do { \
- \
- static_assert((c), m); \
- \
- } while (0)
-
- #else
- #define XXH_STATIC_ASSERT_WITH_MESSAGE(c, m) \
- do { \
- \
- struct xxh_sa { \
- \
- char x[(c) ? 1 : -1]; \
- \
- }; \
- \
- } while (0)
-
- #endif
- #define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c), #c)
- #endif
-
- /*!
- * @internal
- * @def XXH_COMPILER_GUARD(var)
- * @brief Used to prevent unwanted optimizations for @p var.
- *
- * It uses an empty GCC inline assembly statement with a register constraint
- * which forces @p var into a general purpose register (eg eax, ebx, ecx
- * on x86) and marks it as modified.
- *
- * This is used in a few places to avoid unwanted autovectorization (e.g.
- * XXH32_round()). All vectorization we want is explicit via intrinsics,
- * and _usually_ isn't wanted elsewhere.
- *
- * We also use it to prevent unwanted constant folding for AArch64 in
- * XXH3_initCustomSecret_scalar().
- */
- #ifdef __GNUC__
- #define XXH_COMPILER_GUARD(var) __asm__ __volatile__("" : "+r"(var))
- #else
- #define XXH_COMPILER_GUARD(var) ((void)0)
- #endif
-
- /* *************************************
- * Basic Types
- ***************************************/
- #if !defined(__VMS) && \
- (defined(__cplusplus) || \
- (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */))
- #include <stdint.h>
-typedef uint8_t xxh_u8;
- #else
-typedef unsigned char xxh_u8;
- #endif
+#if defined(__GNUC__) || defined(__clang__)
+# define XXH_COMPILER_GUARD(var) __asm__("" : "+r" (var))
+#else
+# define XXH_COMPILER_GUARD(var) ((void)0)
+#endif
+
+/* Specifically for NEON vectors which use the "w" constraint, on
+ * Clang. */
+#if defined(__clang__) && defined(__ARM_ARCH) && !defined(__wasm__)
+# define XXH_COMPILER_GUARD_CLANG_NEON(var) __asm__("" : "+w" (var))
+#else
+# define XXH_COMPILER_GUARD_CLANG_NEON(var) ((void)0)
+#endif
+
+/* *************************************
+* Basic Types
+***************************************/
+#if !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint8_t xxh_u8;
+#else
+ typedef unsigned char xxh_u8;
+#endif
typedef XXH32_hash_t xxh_u32;
- #ifdef XXH_OLD_NAMES
- #define BYTE xxh_u8
- #define U8 xxh_u8
- #define U32 xxh_u32
- #endif
+#ifdef XXH_OLD_NAMES
+# warning "XXH_OLD_NAMES is planned to be removed starting v0.9. If the program depends on it, consider moving away from it by employing newer type names directly"
+# define BYTE xxh_u8
+# define U8 xxh_u8
+# define U32 xxh_u32
+#endif
/* *** Memory access *** */
@@ -1719,288 +2545,304 @@ typedef XXH32_hash_t xxh_u32;
* @return The 32-bit little endian integer from the bytes at @p ptr.
*/
- #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 3))
- /*
- * Manual byteshift. Best for old compilers which don't inline memcpy.
- * We actually directly use XXH_readLE32 and XXH_readBE32.
- */
- #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 2))
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
+/*
+ * Manual byteshift. Best for old compilers which don't inline memcpy.
+ * We actually directly use XXH_readLE32 and XXH_readBE32.
+ */
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
/*
* Force direct memory access. Only works on CPU which support unaligned memory
* access in hardware.
*/
-static xxh_u32 XXH_read32(const void *memPtr) {
+static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
- return *(const xxh_u32 *)memPtr;
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
+/*
+ * __attribute__((aligned(1))) is supported by gcc and clang. Originally the
+ * documentation claimed that it only increased the alignment, but actually it
+ * can decrease it on gcc, clang, and icc:
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502,
+ * https://gcc.godbolt.org/z/xYez1j67Y.
+ */
+#ifdef XXH_OLD_NAMES
+typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
+#endif
+static xxh_u32 XXH_read32(const void* ptr)
+{
+ typedef __attribute__((aligned(1))) xxh_u32 xxh_unalign32;
+ return *((const xxh_unalign32*)ptr);
}
- #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 1))
+#else
- /*
- * __pack instructions are safer but compiler specific, hence potentially
- * problematic for some compilers.
- *
- * Currently only defined for GCC and ICC.
- */
- #ifdef XXH_OLD_NAMES
-typedef union {
+/*
+ * Portable and safe solution. Generally efficient.
+ * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
+ */
+static xxh_u32 XXH_read32(const void* memPtr)
+{
+ xxh_u32 val;
+ XXH_memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
- xxh_u32 u32;
+#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
-} __attribute__((packed)) unalign;
- #endif
-static xxh_u32 XXH_read32(const void *ptr) {
+/* *** Endianness *** */
- typedef union {
+/*!
+ * @ingroup tuning
+ * @def XXH_CPU_LITTLE_ENDIAN
+ * @brief Whether the target is little endian.
+ *
+ * Defined to 1 if the target is little endian, or 0 if it is big endian.
+ * It can be defined externally, for example on the compiler command line.
+ *
+ * If it is not defined,
+ * a runtime check (which is usually constant folded) is used instead.
+ *
+ * @note
+ * This is not necessarily defined to an integer constant.
+ *
+ * @see XXH_isLittleEndian() for the runtime check.
+ */
+#ifndef XXH_CPU_LITTLE_ENDIAN
+/*
+ * Try to detect endianness automatically, to avoid the nonstandard behavior
+ * in `XXH_isLittleEndian()`
+ */
+# if defined(_WIN32) /* Windows is always little endian */ \
+ || defined(__LITTLE_ENDIAN__) \
+ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+# define XXH_CPU_LITTLE_ENDIAN 1
+# elif defined(__BIG_ENDIAN__) \
+ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+# define XXH_CPU_LITTLE_ENDIAN 0
+# else
+/*!
+ * @internal
+ * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN.
+ *
+ * Most compilers will constant fold this.
+ */
+static int XXH_isLittleEndian(void)
+{
+ /*
+ * Portable and well-defined behavior.
+ * Don't use static: it is detrimental to performance.
+ */
+ const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
+ return one.c[0];
+}
+# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
+# endif
+#endif
- xxh_u32 u32;
- } __attribute__((packed)) xxh_unalign;
- return ((const xxh_unalign *)ptr)->u32;
-}
+/* ****************************************
+* Compiler-specific Functions and Macros
+******************************************/
+#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+
+#ifdef __has_builtin
+# define XXH_HAS_BUILTIN(x) __has_builtin(x)
+#else
+# define XXH_HAS_BUILTIN(x) 0
+#endif
+
- #else
/*
- * Portable and safe solution. Generally efficient.
- * see: https://stackoverflow.com/a/32095106/646947
+ * C23 and future versions have standard "unreachable()".
+ * Once it has been implemented reliably we can add it as an
+ * additional case:
+ *
+ * ```
+ * #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN)
+ * # include <stddef.h>
+ * # ifdef unreachable
+ * # define XXH_UNREACHABLE() unreachable()
+ * # endif
+ * #endif
+ * ```
+ *
+ * Note C++23 also has std::unreachable() which can be detected
+ * as follows:
+ * ```
+ * #if defined(__cpp_lib_unreachable) && (__cpp_lib_unreachable >= 202202L)
+ * # include <utility>
+ * # define XXH_UNREACHABLE() std::unreachable()
+ * #endif
+ * ```
+ * NB: `__cpp_lib_unreachable` is defined in the `<version>` header.
+ * We don't use that as including `<utility>` in `extern "C"` blocks
+ * doesn't work on GCC12
*/
-static xxh_u32 XXH_read32(const void *memPtr) {
- xxh_u32 val;
- memcpy(&val, memPtr, sizeof(val));
- return val;
+#if XXH_HAS_BUILTIN(__builtin_unreachable)
+# define XXH_UNREACHABLE() __builtin_unreachable()
-}
+#elif defined(_MSC_VER)
+# define XXH_UNREACHABLE() __assume(0)
- #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+#else
+# define XXH_UNREACHABLE()
+#endif
- /* *** Endianness *** */
+#if XXH_HAS_BUILTIN(__builtin_assume)
+# define XXH_ASSUME(c) __builtin_assume(c)
+#else
+# define XXH_ASSUME(c) if (!(c)) { XXH_UNREACHABLE(); }
+#endif
- /*!
- * @ingroup tuning
- * @def XXH_CPU_LITTLE_ENDIAN
- * @brief Whether the target is little endian.
- *
- * Defined to 1 if the target is little endian, or 0 if it is big endian.
- * It can be defined externally, for example on the compiler command line.
- *
- * If it is not defined,
- * a runtime check (which is usually constant folded) is used instead.
- *
- * @note
- * This is not necessarily defined to an integer constant.
- *
- * @see XXH_isLittleEndian() for the runtime check.
- */
- #ifndef XXH_CPU_LITTLE_ENDIAN
- /*
- * Try to detect endianness automatically, to avoid the nonstandard behavior
- * in `XXH_isLittleEndian()`
- */
- #if defined(_WIN32) /* Windows is always little endian */ \
- || defined(__LITTLE_ENDIAN__) || \
- (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
- #define XXH_CPU_LITTLE_ENDIAN 1
- #elif defined(__BIG_ENDIAN__) || \
- (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
- #define XXH_CPU_LITTLE_ENDIAN 0
- #else
/*!
* @internal
- * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN.
+ * @def XXH_rotl32(x,r)
+ * @brief 32-bit rotate left.
*
- * Most compilers will constant fold this.
+ * @param x The 32-bit integer to be rotated.
+ * @param r The number of bits to rotate.
+ * @pre
+ * @p r > 0 && @p r < 32
+ * @note
+ * @p x and @p r may be evaluated multiple times.
+ * @return The rotated result.
*/
-static int XXH_isLittleEndian(void) {
-
- /*
- * Portable and well-defined behavior.
- * Don't use static: it is detrimental to performance.
- */
- const union {
-
- xxh_u32 u;
- xxh_u8 c[4];
-
- } one = {1};
-
- return one.c[0];
-
-}
-
- #define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
- #endif
- #endif
-
- /* ****************************************
- * Compiler-specific Functions and Macros
- ******************************************/
- #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
-
- #ifdef __has_builtin
- #define XXH_HAS_BUILTIN(x) __has_builtin(x)
- #else
- #define XXH_HAS_BUILTIN(x) 0
- #endif
-
- /*!
- * @internal
- * @def XXH_rotl32(x,r)
- * @brief 32-bit rotate left.
- *
- * @param x The 32-bit integer to be rotated.
- * @param r The number of bits to rotate.
- * @pre
- * @p r > 0 && @p r < 32
- * @note
- * @p x and @p r may be evaluated multiple times.
- * @return The rotated result.
- */
- #if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) && \
- XXH_HAS_BUILTIN(__builtin_rotateleft64)
- #define XXH_rotl32 __builtin_rotateleft32
- #define XXH_rotl64 __builtin_rotateleft64
- /* Note: although _rotl exists for minGW (GCC under windows), performance
- * seems poor */
- #elif defined(_MSC_VER)
- #define XXH_rotl32(x, r) _rotl(x, r)
- #define XXH_rotl64(x, r) _rotl64(x, r)
- #else
- #define XXH_rotl32(x, r) (((x) << (r)) | ((x) >> (32 - (r))))
- #define XXH_rotl64(x, r) (((x) << (r)) | ((x) >> (64 - (r))))
- #endif
-
- /*!
- * @internal
- * @fn xxh_u32 XXH_swap32(xxh_u32 x)
- * @brief A 32-bit byteswap.
- *
- * @param x The 32-bit integer to byteswap.
- * @return @p x, byteswapped.
- */
- #if defined(_MSC_VER) /* Visual Studio */
- #define XXH_swap32 _byteswap_ulong
- #elif XXH_GCC_VERSION >= 403
- #define XXH_swap32 __builtin_bswap32
- #else
-static xxh_u32 XXH_swap32(xxh_u32 x) {
-
- return ((x << 24) & 0xff000000) | ((x << 8) & 0x00ff0000) |
- ((x >> 8) & 0x0000ff00) | ((x >> 24) & 0x000000ff);
-
-}
-
- #endif
+#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
+ && XXH_HAS_BUILTIN(__builtin_rotateleft64)
+# define XXH_rotl32 __builtin_rotateleft32
+# define XXH_rotl64 __builtin_rotateleft64
+/* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
+#elif defined(_MSC_VER)
+# define XXH_rotl32(x,r) _rotl(x,r)
+# define XXH_rotl64(x,r) _rotl64(x,r)
+#else
+# define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
+# define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
+#endif
+
+/*!
+ * @internal
+ * @fn xxh_u32 XXH_swap32(xxh_u32 x)
+ * @brief A 32-bit byteswap.
+ *
+ * @param x The 32-bit integer to byteswap.
+ * @return @p x, byteswapped.
+ */
+#if defined(_MSC_VER) /* Visual Studio */
+# define XXH_swap32 _byteswap_ulong
+#elif XXH_GCC_VERSION >= 403
+# define XXH_swap32 __builtin_bswap32
+#else
+static xxh_u32 XXH_swap32 (xxh_u32 x)
+{
+ return ((x << 24) & 0xff000000 ) |
+ ((x << 8) & 0x00ff0000 ) |
+ ((x >> 8) & 0x0000ff00 ) |
+ ((x >> 24) & 0x000000ff );
+}
+#endif
+
/* ***************************
- * Memory reads
- *****************************/
+* Memory reads
+*****************************/
/*!
* @internal
* @brief Enum to indicate whether a pointer is aligned.
*/
typedef enum {
-
- XXH_aligned, /*!< Aligned */
- XXH_unaligned /*!< Possibly unaligned */
-
+ XXH_aligned, /*!< Aligned */
+ XXH_unaligned /*!< Possibly unaligned */
} XXH_alignment;
- /*
- * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
- *
- * This is ideal for older compilers which don't inline memcpy.
- */
- #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 3))
-
-XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void *memPtr) {
-
- const xxh_u8 *bytePtr = (const xxh_u8 *)memPtr;
- return bytePtr[0] | ((xxh_u32)bytePtr[1] << 8) | ((xxh_u32)bytePtr[2] << 16) |
- ((xxh_u32)bytePtr[3] << 24);
+/*
+ * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
+ *
+ * This is ideal for older compilers which don't inline memcpy.
+ */
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
+XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr)
+{
+ const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
+ return bytePtr[0]
+ | ((xxh_u32)bytePtr[1] << 8)
+ | ((xxh_u32)bytePtr[2] << 16)
+ | ((xxh_u32)bytePtr[3] << 24);
}
-XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void *memPtr) {
-
- const xxh_u8 *bytePtr = (const xxh_u8 *)memPtr;
- return bytePtr[3] | ((xxh_u32)bytePtr[2] << 8) | ((xxh_u32)bytePtr[1] << 16) |
- ((xxh_u32)bytePtr[0] << 24);
-
+XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr)
+{
+ const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
+ return bytePtr[3]
+ | ((xxh_u32)bytePtr[2] << 8)
+ | ((xxh_u32)bytePtr[1] << 16)
+ | ((xxh_u32)bytePtr[0] << 24);
}
- #else
-XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void *ptr) {
-
- return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
-
+#else
+XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
}
-static xxh_u32 XXH_readBE32(const void *ptr) {
-
- return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
-
+static xxh_u32 XXH_readBE32(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
}
+#endif
- #endif
-
-XXH_FORCE_INLINE xxh_u32 XXH_readLE32_align(const void *ptr,
- XXH_alignment align) {
-
- if (align == XXH_unaligned) {
-
- return XXH_readLE32(ptr);
-
- } else {
-
- return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32 *)ptr
- : XXH_swap32(*(const xxh_u32 *)ptr);
-
- }
-
+XXH_FORCE_INLINE xxh_u32
+XXH_readLE32_align(const void* ptr, XXH_alignment align)
+{
+ if (align==XXH_unaligned) {
+ return XXH_readLE32(ptr);
+ } else {
+ return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
+ }
}
+
/* *************************************
- * Misc
- ***************************************/
+* Misc
+***************************************/
/*! @ingroup public */
-XXH_PUBLIC_API unsigned XXH_versionNumber(void) {
+XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
- return XXH_VERSION_NUMBER;
-
-}
/* *******************************************************************
- * 32-bit hash functions
- *********************************************************************/
+* 32-bit hash functions
+*********************************************************************/
/*!
* @}
- * @defgroup xxh32_impl XXH32 implementation
+ * @defgroup XXH32_impl XXH32 implementation
* @ingroup impl
+ *
+ * Details on the XXH32 implementation.
* @{
-
*/
-/* #define instead of static const, to be used as initializers */
- #define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */
- #define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */
- #define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */
- #define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */
- #define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */
-
- #ifdef XXH_OLD_NAMES
- #define PRIME32_1 XXH_PRIME32_1
- #define PRIME32_2 XXH_PRIME32_2
- #define PRIME32_3 XXH_PRIME32_3
- #define PRIME32_4 XXH_PRIME32_4
- #define PRIME32_5 XXH_PRIME32_5
- #endif
+ /* #define instead of static const, to be used as initializers */
+#define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */
+#define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */
+#define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */
+#define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */
+#define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */
+
+#ifdef XXH_OLD_NAMES
+# define PRIME32_1 XXH_PRIME32_1
+# define PRIME32_2 XXH_PRIME32_2
+# define PRIME32_3 XXH_PRIME32_3
+# define PRIME32_4 XXH_PRIME32_4
+# define PRIME32_5 XXH_PRIME32_5
+#endif
/*!
* @internal
@@ -2013,50 +2855,51 @@ XXH_PUBLIC_API unsigned XXH_versionNumber(void) {
* @param input The stripe of input to mix.
* @return The mixed accumulator lane.
*/
-static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) {
-
- acc += input * XXH_PRIME32_2;
- acc = XXH_rotl32(acc, 13);
- acc *= XXH_PRIME32_1;
- #if (defined(__SSE4_1__) || defined(__aarch64__)) && \
- !defined(XXH_ENABLE_AUTOVECTORIZE)
- /*
- * UGLY HACK:
- * A compiler fence is the only thing that prevents GCC and Clang from
- * autovectorizing the XXH32 loop (pragmas and attributes don't work for some
- * reason) without globally disabling SSE4.1.
- *
- * The reason we want to avoid vectorization is because despite working on
- * 4 integers at a time, there are multiple factors slowing XXH32 down on
- * SSE4:
- * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
- * newer chips!) making it slightly slower to multiply four integers at
- * once compared to four integers independently. Even when pmulld was
- * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
- * just to multiply unless doing a long operation.
- *
- * - Four instructions are required to rotate,
- * movqda tmp, v // not required with VEX encoding
- * pslld tmp, 13 // tmp <<= 13
- * psrld v, 19 // x >>= 19
- * por v, tmp // x |= tmp
- * compared to one for scalar:
- * roll v, 13 // reliably fast across the board
- * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason
- *
- * - Instruction level parallelism is actually more beneficial here because
- * the SIMD actually serializes this operation: While v1 is rotating, v2
- * can load data, while v3 can multiply. SSE forces them to operate
- * together.
- *
- * This is also enabled on AArch64, as Clang autovectorizes it incorrectly
- * and it is pointless writing a NEON implementation that is basically the
- * same speed as scalar for XXH32.
- */
- XXH_COMPILER_GUARD(acc);
- #endif
- return acc;
-
+static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
+{
+ acc += input * XXH_PRIME32_2;
+ acc = XXH_rotl32(acc, 13);
+ acc *= XXH_PRIME32_1;
+#if (defined(__SSE4_1__) || defined(__aarch64__) || defined(__wasm_simd128__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
+ /*
+ * UGLY HACK:
+ * A compiler fence is the only thing that prevents GCC and Clang from
+ * autovectorizing the XXH32 loop (pragmas and attributes don't work for some
+ * reason) without globally disabling SSE4.1.
+ *
+ * The reason we want to avoid vectorization is because despite working on
+ * 4 integers at a time, there are multiple factors slowing XXH32 down on
+ * SSE4:
+ * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
+ * newer chips!) making it slightly slower to multiply four integers at
+ * once compared to four integers independently. Even when pmulld was
+ * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
+ * just to multiply unless doing a long operation.
+ *
+ * - Four instructions are required to rotate,
+ * movqda tmp, v // not required with VEX encoding
+ * pslld tmp, 13 // tmp <<= 13
+ * psrld v, 19 // x >>= 19
+ * por v, tmp // x |= tmp
+ * compared to one for scalar:
+ * roll v, 13 // reliably fast across the board
+ * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason
+ *
+ * - Instruction level parallelism is actually more beneficial here because
+ * the SIMD actually serializes this operation: While v1 is rotating, v2
+ * can load data, while v3 can multiply. SSE forces them to operate
+ * together.
+ *
+ * This is also enabled on AArch64, as Clang is *very aggressive* in vectorizing
+ * the loop. NEON is only faster on the A53, and with the newer cores, it is less
+ * than half the speed.
+ *
+ * Additionally, this is used on WASM SIMD128 because it JITs to the same
+ * SIMD instructions and has the same issue.
+ */
+ XXH_COMPILER_GUARD(acc);
+#endif
+ return acc;
}
/*!
@@ -2066,38 +2909,20 @@ static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) {
* The final mix ensures that all input bits have a chance to impact any bit in
* the output digest, resulting in an unbiased distribution.
*
- * @param h32 The hash to avalanche.
+ * @param hash The hash to avalanche.
* @return The avalanched hash.
*/
-static xxh_u32 XXH32_avalanche(xxh_u32 h32) {
-
- h32 ^= h32 >> 15;
- h32 *= XXH_PRIME32_2;
- h32 ^= h32 >> 13;
- h32 *= XXH_PRIME32_3;
- h32 ^= h32 >> 16;
- return (h32);
-
+static xxh_u32 XXH32_avalanche(xxh_u32 hash)
+{
+ hash ^= hash >> 15;
+ hash *= XXH_PRIME32_2;
+ hash ^= hash >> 13;
+ hash *= XXH_PRIME32_3;
+ hash ^= hash >> 16;
+ return hash;
}
- #define XXH_get32bits(p) XXH_readLE32_align(p, align)
-
- #define XXH_PROCESS1 \
- do { \
- \
- h32 += (*ptr++) * XXH_PRIME32_5; \
- h32 = XXH_rotl32(h32, 11) * XXH_PRIME32_1; \
- \
- } while (0)
-
- #define XXH_PROCESS4 \
- do { \
- \
- h32 += XXH_get32bits(ptr) * XXH_PRIME32_3; \
- ptr += 4; \
- h32 = XXH_rotl32(h32, 17) * XXH_PRIME32_4; \
- \
- } while (0)
+#define XXH_get32bits(p) XXH_readLE32_align(p, align)
/*!
* @internal
@@ -2107,1053 +2932,871 @@ static xxh_u32 XXH32_avalanche(xxh_u32 h32) {
* This final stage will digest them to ensure that all input bytes are present
* in the final mix.
*
- * @param h32 The hash to finalize.
+ * @param hash The hash to finalize.
* @param ptr The pointer to the remaining input.
* @param len The remaining length, modulo 16.
* @param align Whether @p ptr is aligned.
* @return The finalized hash.
+ * @see XXH64_finalize().
*/
-static xxh_u32 XXH32_finalize(xxh_u32 h32, const xxh_u8 *ptr, size_t len,
- XXH_alignment align) {
-
- /* Compact rerolled version */
- if (XXH_REROLL) {
-
- len &= 15;
- while (len >= 4) {
-
- XXH_PROCESS4;
- len -= 4;
-
- }
-
- while (len > 0) {
-
- XXH_PROCESS1;
- --len;
-
- }
-
- return XXH32_avalanche(h32);
-
- } else {
-
- switch (len & 15) /* or switch(bEnd - p) */ {
-
- case 12:
- XXH_PROCESS4;
- XXH_FALLTHROUGH;
- case 8:
- XXH_PROCESS4;
- XXH_FALLTHROUGH;
- case 4:
- XXH_PROCESS4;
- return XXH32_avalanche(h32);
-
- case 13:
- XXH_PROCESS4;
- XXH_FALLTHROUGH;
- case 9:
- XXH_PROCESS4;
- XXH_FALLTHROUGH;
- case 5:
- XXH_PROCESS4;
- XXH_PROCESS1;
- return XXH32_avalanche(h32);
-
- case 14:
- XXH_PROCESS4;
- XXH_FALLTHROUGH;
- case 10:
- XXH_PROCESS4;
- XXH_FALLTHROUGH;
- case 6:
- XXH_PROCESS4;
- XXH_PROCESS1;
- XXH_PROCESS1;
- return XXH32_avalanche(h32);
-
- case 15:
- XXH_PROCESS4;
- XXH_FALLTHROUGH;
- case 11:
- XXH_PROCESS4;
- XXH_FALLTHROUGH;
- case 7:
- XXH_PROCESS4;
- XXH_FALLTHROUGH;
- case 3:
- XXH_PROCESS1;
- XXH_FALLTHROUGH;
- case 2:
- XXH_PROCESS1;
- XXH_FALLTHROUGH;
- case 1:
- XXH_PROCESS1;
- XXH_FALLTHROUGH;
- case 0:
- return XXH32_avalanche(h32);
-
+static XXH_PUREF xxh_u32
+XXH32_finalize(xxh_u32 hash, const xxh_u8* ptr, size_t len, XXH_alignment align)
+{
+#define XXH_PROCESS1 do { \
+ hash += (*ptr++) * XXH_PRIME32_5; \
+ hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1; \
+} while (0)
+
+#define XXH_PROCESS4 do { \
+ hash += XXH_get32bits(ptr) * XXH_PRIME32_3; \
+ ptr += 4; \
+ hash = XXH_rotl32(hash, 17) * XXH_PRIME32_4; \
+} while (0)
+
+ if (ptr==NULL) XXH_ASSERT(len == 0);
+
+ /* Compact rerolled version; generally faster */
+ if (!XXH32_ENDJMP) {
+ len &= 15;
+ while (len >= 4) {
+ XXH_PROCESS4;
+ len -= 4;
+ }
+ while (len > 0) {
+ XXH_PROCESS1;
+ --len;
+ }
+ return XXH32_avalanche(hash);
+ } else {
+ switch(len&15) /* or switch(bEnd - p) */ {
+ case 12: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 8: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 4: XXH_PROCESS4;
+ return XXH32_avalanche(hash);
+
+ case 13: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 9: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 5: XXH_PROCESS4;
+ XXH_PROCESS1;
+ return XXH32_avalanche(hash);
+
+ case 14: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 10: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 6: XXH_PROCESS4;
+ XXH_PROCESS1;
+ XXH_PROCESS1;
+ return XXH32_avalanche(hash);
+
+ case 15: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 11: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 7: XXH_PROCESS4;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 3: XXH_PROCESS1;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 2: XXH_PROCESS1;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 1: XXH_PROCESS1;
+ XXH_FALLTHROUGH; /* fallthrough */
+ case 0: return XXH32_avalanche(hash);
+ }
+ XXH_ASSERT(0);
+ return hash; /* reaching this point is deemed impossible */
}
-
- XXH_ASSERT(0);
- return h32; /* reaching this point is deemed impossible */
-
- }
-
}
- #ifdef XXH_OLD_NAMES
- #define PROCESS1 XXH_PROCESS1
- #define PROCESS4 XXH_PROCESS4
- #else
- #undef XXH_PROCESS1
- #undef XXH_PROCESS4
- #endif
+#ifdef XXH_OLD_NAMES
+# define PROCESS1 XXH_PROCESS1
+# define PROCESS4 XXH_PROCESS4
+#else
+# undef XXH_PROCESS1
+# undef XXH_PROCESS4
+#endif
/*!
* @internal
* @brief The implementation for @ref XXH32().
*
- * @param input, len, seed Directly passed from @ref XXH32().
+ * @param input , len , seed Directly passed from @ref XXH32().
* @param align Whether @p input is aligned.
* @return The calculated hash.
*/
-XXH_FORCE_INLINE xxh_u32 XXH32_endian_align(const xxh_u8 *input, size_t len,
- xxh_u32 seed, XXH_alignment align) {
-
- const xxh_u8 *bEnd = input ? input + len : NULL;
- xxh_u32 h32;
-
- #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \
- (XXH_ACCEPT_NULL_INPUT_POINTER >= 1)
- if (input == NULL) {
-
- len = 0;
- bEnd = input = (const xxh_u8 *)(size_t)16;
-
- }
-
- #endif
-
- if (len >= 16) {
-
- const xxh_u8 *const limit = bEnd - 15;
- xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
- xxh_u32 v2 = seed + XXH_PRIME32_2;
- xxh_u32 v3 = seed + 0;
- xxh_u32 v4 = seed - XXH_PRIME32_1;
-
- do {
-
- v1 = XXH32_round(v1, XXH_get32bits(input));
- input += 4;
- v2 = XXH32_round(v2, XXH_get32bits(input));
- input += 4;
- v3 = XXH32_round(v3, XXH_get32bits(input));
- input += 4;
- v4 = XXH32_round(v4, XXH_get32bits(input));
- input += 4;
-
- } while (input < limit);
-
- h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) +
- XXH_rotl32(v4, 18);
-
- } else {
-
- h32 = seed + XXH_PRIME32_5;
-
- }
-
- h32 += (xxh_u32)len;
+XXH_FORCE_INLINE XXH_PUREF xxh_u32
+XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
+{
+ xxh_u32 h32;
+
+ if (input==NULL) XXH_ASSERT(len == 0);
+
+ if (len>=16) {
+ const xxh_u8* const bEnd = input + len;
+ const xxh_u8* const limit = bEnd - 15;
+ xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
+ xxh_u32 v2 = seed + XXH_PRIME32_2;
+ xxh_u32 v3 = seed + 0;
+ xxh_u32 v4 = seed - XXH_PRIME32_1;
+
+ do {
+ v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
+ v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
+ v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
+ v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
+ } while (input < limit);
+
+ h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
+ + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
+ } else {
+ h32 = seed + XXH_PRIME32_5;
+ }
- return XXH32_finalize(h32, input, len & 15, align);
+ h32 += (xxh_u32)len;
+ return XXH32_finalize(h32, input, len&15, align);
}
-/*! @ingroup xxh32_family */
-XXH_PUBLIC_API XXH32_hash_t XXH32(const void *input, size_t len,
- XXH32_hash_t seed) {
-
- #if 0
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
+{
+#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
XXH32_state_t state;
XXH32_reset(&state, seed);
XXH32_update(&state, (const xxh_u8*)input, len);
return XXH32_digest(&state);
- #else
- if (XXH_FORCE_ALIGN_CHECK) {
-
- if ((((size_t)input) & 3) ==
- 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
- return XXH32_endian_align((const xxh_u8 *)input, len, seed, XXH_aligned);
-
- }
-
- }
-
- return XXH32_endian_align((const xxh_u8 *)input, len, seed, XXH_unaligned);
- #endif
-
-}
-
-/******* Hash streaming *******/
-/*!
- * @ingroup xxh32_family
- */
-XXH_PUBLIC_API XXH32_state_t *XXH32_createState(void) {
-
- return (XXH32_state_t *)XXH_malloc(sizeof(XXH32_state_t));
-
-}
-
-/*! @ingroup xxh32_family */
-XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t *statePtr) {
-
- XXH_free(statePtr);
- return XXH_OK;
+#else
+ if (XXH_FORCE_ALIGN_CHECK) {
+ if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
+ return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
+ } }
+ return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
+#endif
}
-/*! @ingroup xxh32_family */
-XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t *dstState,
- const XXH32_state_t *srcState) {
- memcpy(dstState, srcState, sizeof(*dstState));
+/******* Hash streaming *******/
+#ifndef XXH_NO_STREAM
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
+{
+ return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
+}
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
+{
+ XXH_free(statePtr);
+ return XXH_OK;
}
-/*! @ingroup xxh32_family */
-XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t *statePtr,
- XXH32_hash_t seed) {
-
- XXH32_state_t state; /* using a local state to memcpy() in order to avoid
- strict-aliasing warnings */
- memset(&state, 0, sizeof(state));
- state.v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
- state.v2 = seed + XXH_PRIME32_2;
- state.v3 = seed + 0;
- state.v4 = seed - XXH_PRIME32_1;
- /* do not write into reserved, planned to be removed in a future version */
- memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
- return XXH_OK;
-
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
+{
+ XXH_memcpy(dstState, srcState, sizeof(*dstState));
}
-/*! @ingroup xxh32_family */
-XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t *state,
- const void *input, size_t len) {
-
- if (input == NULL)
- #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \
- (XXH_ACCEPT_NULL_INPUT_POINTER >= 1)
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
+{
+ XXH_ASSERT(statePtr != NULL);
+ memset(statePtr, 0, sizeof(*statePtr));
+ statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
+ statePtr->v[1] = seed + XXH_PRIME32_2;
+ statePtr->v[2] = seed + 0;
+ statePtr->v[3] = seed - XXH_PRIME32_1;
return XXH_OK;
- #else
- return XXH_ERROR;
- #endif
-
- {
-
- const xxh_u8 *p = (const xxh_u8 *)input;
- const xxh_u8 *const bEnd = p + len;
-
- state->total_len_32 += (XXH32_hash_t)len;
- state->large_len |=
- (XXH32_hash_t)((len >= 16) | (state->total_len_32 >= 16));
-
- if (state->memsize + len < 16) { /* fill in tmp buffer */
- XXH_memcpy((xxh_u8 *)(state->mem32) + state->memsize, input, len);
- state->memsize += (XXH32_hash_t)len;
- return XXH_OK;
-
- }
-
- if (state->memsize) { /* some data left from previous update */
- XXH_memcpy((xxh_u8 *)(state->mem32) + state->memsize, input,
- 16 - state->memsize);
- {
-
- const xxh_u32 *p32 = state->mem32;
- state->v1 = XXH32_round(state->v1, XXH_readLE32(p32));
- p32++;
- state->v2 = XXH32_round(state->v2, XXH_readLE32(p32));
- p32++;
- state->v3 = XXH32_round(state->v3, XXH_readLE32(p32));
- p32++;
- state->v4 = XXH32_round(state->v4, XXH_readLE32(p32));
-
- }
+}
- p += 16 - state->memsize;
- state->memsize = 0;
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH32_update(XXH32_state_t* state, const void* input, size_t len)
+{
+ if (input==NULL) {
+ XXH_ASSERT(len == 0);
+ return XXH_OK;
}
- if (p <= bEnd - 16) {
-
- const xxh_u8 *const limit = bEnd - 16;
- xxh_u32 v1 = state->v1;
- xxh_u32 v2 = state->v2;
- xxh_u32 v3 = state->v3;
- xxh_u32 v4 = state->v4;
-
- do {
+ { const xxh_u8* p = (const xxh_u8*)input;
+ const xxh_u8* const bEnd = p + len;
- v1 = XXH32_round(v1, XXH_readLE32(p));
- p += 4;
- v2 = XXH32_round(v2, XXH_readLE32(p));
- p += 4;
- v3 = XXH32_round(v3, XXH_readLE32(p));
- p += 4;
- v4 = XXH32_round(v4, XXH_readLE32(p));
- p += 4;
+ state->total_len_32 += (XXH32_hash_t)len;
+ state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
- } while (p <= limit);
+ if (state->memsize + len < 16) { /* fill in tmp buffer */
+ XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
+ state->memsize += (XXH32_hash_t)len;
+ return XXH_OK;
+ }
- state->v1 = v1;
- state->v2 = v2;
- state->v3 = v3;
- state->v4 = v4;
+ if (state->memsize) { /* some data left from previous update */
+ XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
+ { const xxh_u32* p32 = state->mem32;
+ state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++;
+ state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++;
+ state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++;
+ state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32));
+ }
+ p += 16-state->memsize;
+ state->memsize = 0;
+ }
- }
+ if (p <= bEnd-16) {
+ const xxh_u8* const limit = bEnd - 16;
- if (p < bEnd) {
+ do {
+ state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4;
+ state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4;
+ state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4;
+ state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4;
+ } while (p<=limit);
- XXH_memcpy(state->mem32, p, (size_t)(bEnd - p));
- state->memsize = (unsigned)(bEnd - p);
+ }
+ if (p < bEnd) {
+ XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
+ state->memsize = (unsigned)(bEnd-p);
+ }
}
- }
-
- return XXH_OK;
-
+ return XXH_OK;
}
-/*! @ingroup xxh32_family */
-XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t *state) {
-
- xxh_u32 h32;
-
- if (state->large_len) {
-
- h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) +
- XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
- } else {
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state)
+{
+ xxh_u32 h32;
- h32 = state->v3 /* == seed */ + XXH_PRIME32_5;
-
- }
-
- h32 += state->total_len_32;
+ if (state->large_len) {
+ h32 = XXH_rotl32(state->v[0], 1)
+ + XXH_rotl32(state->v[1], 7)
+ + XXH_rotl32(state->v[2], 12)
+ + XXH_rotl32(state->v[3], 18);
+ } else {
+ h32 = state->v[2] /* == seed */ + XXH_PRIME32_5;
+ }
- return XXH32_finalize(h32, (const xxh_u8 *)state->mem32, state->memsize,
- XXH_aligned);
+ h32 += state->total_len_32;
+ return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
}
+#endif /* !XXH_NO_STREAM */
/******* Canonical representation *******/
-/*!
- * @ingroup xxh32_family
- * The default return values from XXH functions are unsigned 32 and 64 bit
- * integers.
- *
- * The canonical representation uses big endian convention, the same convention
- * as human-readable numbers (large digits first).
- *
- * This way, hash values can be written into a file or buffer, remaining
- * comparable across different systems.
- *
- * The following functions allow transformation of hash values to and from their
- * canonical format.
- */
-XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t *dst,
- XXH32_hash_t hash) {
-
- XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
- if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
- memcpy(dst, &hash, sizeof(*dst));
-
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
+ XXH_memcpy(dst, &hash, sizeof(*dst));
}
-
-/*! @ingroup xxh32_family */
-XXH_PUBLIC_API XXH32_hash_t
-XXH32_hashFromCanonical(const XXH32_canonical_t *src) {
-
- return XXH_readBE32(src);
-
+/*! @ingroup XXH32_family */
+XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
+{
+ return XXH_readBE32(src);
}
- #ifndef XXH_NO_LONG_LONG
+
+#ifndef XXH_NO_LONG_LONG
/* *******************************************************************
- * 64-bit hash functions
- *********************************************************************/
+* 64-bit hash functions
+*********************************************************************/
/*!
* @}
* @ingroup impl
* @{
-
*/
/******* Memory access *******/
typedef XXH64_hash_t xxh_u64;
- #ifdef XXH_OLD_NAMES
- #define U64 xxh_u64
- #endif
-
- #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 3))
- /*
- * Manual byteshift. Best for old compilers which don't inline memcpy.
- * We actually directly use XXH_readLE64 and XXH_readBE64.
- */
- #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 2))
-
-/* Force direct memory access. Only works on CPU which support unaligned memory
- * access in hardware */
-static xxh_u64 XXH_read64(const void *memPtr) {
+#ifdef XXH_OLD_NAMES
+# define U64 xxh_u64
+#endif
- return *(const xxh_u64 *)memPtr;
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
+/*
+ * Manual byteshift. Best for old compilers which don't inline memcpy.
+ * We actually directly use XXH_readLE64 and XXH_readBE64.
+ */
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
+/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
+static xxh_u64 XXH_read64(const void* memPtr)
+{
+ return *(const xxh_u64*) memPtr;
}
- #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 1))
-
- /*
- * __pack instructions are safer, but compiler specific, hence potentially
- * problematic for some compilers.
- *
- * Currently only defined for GCC and ICC.
- */
- #ifdef XXH_OLD_NAMES
-typedef union {
-
- xxh_u32 u32;
- xxh_u64 u64;
-
-} __attribute__((packed)) unalign64;
-
- #endif
-static xxh_u64 XXH_read64(const void *ptr) {
-
- typedef union {
-
- xxh_u32 u32;
- xxh_u64 u64;
-
- } __attribute__((packed)) xxh_unalign64;
-
- return ((const xxh_unalign64 *)ptr)->u64;
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
+/*
+ * __attribute__((aligned(1))) is supported by gcc and clang. Originally the
+ * documentation claimed that it only increased the alignment, but actually it
+ * can decrease it on gcc, clang, and icc:
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502,
+ * https://gcc.godbolt.org/z/xYez1j67Y.
+ */
+#ifdef XXH_OLD_NAMES
+typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
+#endif
+static xxh_u64 XXH_read64(const void* ptr)
+{
+ typedef __attribute__((aligned(1))) xxh_u64 xxh_unalign64;
+ return *((const xxh_unalign64*)ptr);
}
- #else
+#else
/*
* Portable and safe solution. Generally efficient.
- * see: https://stackoverflow.com/a/32095106/646947
+ * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
*/
-static xxh_u64 XXH_read64(const void *memPtr) {
-
- xxh_u64 val;
- memcpy(&val, memPtr, sizeof(val));
- return val;
-
-}
-
- #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
-
- #if defined(_MSC_VER) /* Visual Studio */
- #define XXH_swap64 _byteswap_uint64
- #elif XXH_GCC_VERSION >= 403
- #define XXH_swap64 __builtin_bswap64
- #else
-static xxh_u64 XXH_swap64(xxh_u64 x) {
-
- return ((x << 56) & 0xff00000000000000ULL) |
- ((x << 40) & 0x00ff000000000000ULL) |
- ((x << 24) & 0x0000ff0000000000ULL) |
- ((x << 8) & 0x000000ff00000000ULL) |
- ((x >> 8) & 0x00000000ff000000ULL) |
- ((x >> 24) & 0x0000000000ff0000ULL) |
- ((x >> 40) & 0x000000000000ff00ULL) |
- ((x >> 56) & 0x00000000000000ffULL);
-
+static xxh_u64 XXH_read64(const void* memPtr)
+{
+ xxh_u64 val;
+ XXH_memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+#if defined(_MSC_VER) /* Visual Studio */
+# define XXH_swap64 _byteswap_uint64
+#elif XXH_GCC_VERSION >= 403
+# define XXH_swap64 __builtin_bswap64
+#else
+static xxh_u64 XXH_swap64(xxh_u64 x)
+{
+ return ((x << 56) & 0xff00000000000000ULL) |
+ ((x << 40) & 0x00ff000000000000ULL) |
+ ((x << 24) & 0x0000ff0000000000ULL) |
+ ((x << 8) & 0x000000ff00000000ULL) |
+ ((x >> 8) & 0x00000000ff000000ULL) |
+ ((x >> 24) & 0x0000000000ff0000ULL) |
+ ((x >> 40) & 0x000000000000ff00ULL) |
+ ((x >> 56) & 0x00000000000000ffULL);
}
+#endif
- #endif
-
- /* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
- #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 3))
-
-XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void *memPtr) {
- const xxh_u8 *bytePtr = (const xxh_u8 *)memPtr;
- return bytePtr[0] | ((xxh_u64)bytePtr[1] << 8) | ((xxh_u64)bytePtr[2] << 16) |
- ((xxh_u64)bytePtr[3] << 24) | ((xxh_u64)bytePtr[4] << 32) |
- ((xxh_u64)bytePtr[5] << 40) | ((xxh_u64)bytePtr[6] << 48) |
- ((xxh_u64)bytePtr[7] << 56);
+/* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
+XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr)
+{
+ const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
+ return bytePtr[0]
+ | ((xxh_u64)bytePtr[1] << 8)
+ | ((xxh_u64)bytePtr[2] << 16)
+ | ((xxh_u64)bytePtr[3] << 24)
+ | ((xxh_u64)bytePtr[4] << 32)
+ | ((xxh_u64)bytePtr[5] << 40)
+ | ((xxh_u64)bytePtr[6] << 48)
+ | ((xxh_u64)bytePtr[7] << 56);
}
-XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void *memPtr) {
-
- const xxh_u8 *bytePtr = (const xxh_u8 *)memPtr;
- return bytePtr[7] | ((xxh_u64)bytePtr[6] << 8) | ((xxh_u64)bytePtr[5] << 16) |
- ((xxh_u64)bytePtr[4] << 24) | ((xxh_u64)bytePtr[3] << 32) |
- ((xxh_u64)bytePtr[2] << 40) | ((xxh_u64)bytePtr[1] << 48) |
- ((xxh_u64)bytePtr[0] << 56);
-
+XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr)
+{
+ const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
+ return bytePtr[7]
+ | ((xxh_u64)bytePtr[6] << 8)
+ | ((xxh_u64)bytePtr[5] << 16)
+ | ((xxh_u64)bytePtr[4] << 24)
+ | ((xxh_u64)bytePtr[3] << 32)
+ | ((xxh_u64)bytePtr[2] << 40)
+ | ((xxh_u64)bytePtr[1] << 48)
+ | ((xxh_u64)bytePtr[0] << 56);
}
- #else
-XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void *ptr) {
-
- return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
-
+#else
+XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
}
-static xxh_u64 XXH_readBE64(const void *ptr) {
-
- return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
-
+static xxh_u64 XXH_readBE64(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
}
+#endif
- #endif
-
-XXH_FORCE_INLINE xxh_u64 XXH_readLE64_align(const void *ptr,
- XXH_alignment align) {
-
- if (align == XXH_unaligned)
- return XXH_readLE64(ptr);
- else
- return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64 *)ptr
- : XXH_swap64(*(const xxh_u64 *)ptr);
-
+XXH_FORCE_INLINE xxh_u64
+XXH_readLE64_align(const void* ptr, XXH_alignment align)
+{
+ if (align==XXH_unaligned)
+ return XXH_readLE64(ptr);
+ else
+ return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
}
- /******* xxh64 *******/
- /*!
- * @}
- * @defgroup xxh64_impl XXH64 implementation
- * @ingroup impl
- * @{
-
- */
- /* #define rather that static const, to be used as initializers */
- #define XXH_PRIME64_1 \
- 0x9E3779B185EBCA87ULL /*!< \
- 0b1001111000110111011110011011000110000101111010111100101010000111 \
- */
- #define XXH_PRIME64_2 \
- 0xC2B2AE3D27D4EB4FULL /*!< \
- 0b1100001010110010101011100011110100100111110101001110101101001111 \
- */
- #define XXH_PRIME64_3 \
- 0x165667B19E3779F9ULL /*!< \
- 0b0001011001010110011001111011000110011110001101110111100111111001 \
- */
- #define XXH_PRIME64_4 \
- 0x85EBCA77C2B2AE63ULL /*!< \
- 0b1000010111101011110010100111011111000010101100101010111001100011 \
- */
- #define XXH_PRIME64_5 \
- 0x27D4EB2F165667C5ULL /*!< \
- 0b0010011111010100111010110010111100010110010101100110011111000101 \
- */
-
- #ifdef XXH_OLD_NAMES
- #define PRIME64_1 XXH_PRIME64_1
- #define PRIME64_2 XXH_PRIME64_2
- #define PRIME64_3 XXH_PRIME64_3
- #define PRIME64_4 XXH_PRIME64_4
- #define PRIME64_5 XXH_PRIME64_5
- #endif
-static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input) {
-
- acc += input * XXH_PRIME64_2;
- acc = XXH_rotl64(acc, 31);
- acc *= XXH_PRIME64_1;
- return acc;
+/******* xxh64 *******/
+/*!
+ * @}
+ * @defgroup XXH64_impl XXH64 implementation
+ * @ingroup impl
+ *
+ * Details on the XXH64 implementation.
+ * @{
+ */
+/* #define rather that static const, to be used as initializers */
+#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */
+#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */
+#define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */
+#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
+#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
+
+#ifdef XXH_OLD_NAMES
+# define PRIME64_1 XXH_PRIME64_1
+# define PRIME64_2 XXH_PRIME64_2
+# define PRIME64_3 XXH_PRIME64_3
+# define PRIME64_4 XXH_PRIME64_4
+# define PRIME64_5 XXH_PRIME64_5
+#endif
+/*! @copydoc XXH32_round */
+static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
+{
+ acc += input * XXH_PRIME64_2;
+ acc = XXH_rotl64(acc, 31);
+ acc *= XXH_PRIME64_1;
+ return acc;
}
-static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val) {
-
- val = XXH64_round(0, val);
- acc ^= val;
- acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
- return acc;
-
+static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
+{
+ val = XXH64_round(0, val);
+ acc ^= val;
+ acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
+ return acc;
}
-static xxh_u64 XXH64_avalanche(xxh_u64 h64) {
-
- h64 ^= h64 >> 33;
- h64 *= XXH_PRIME64_2;
- h64 ^= h64 >> 29;
- h64 *= XXH_PRIME64_3;
- h64 ^= h64 >> 32;
- return h64;
-
+/*! @copydoc XXH32_avalanche */
+static xxh_u64 XXH64_avalanche(xxh_u64 hash)
+{
+ hash ^= hash >> 33;
+ hash *= XXH_PRIME64_2;
+ hash ^= hash >> 29;
+ hash *= XXH_PRIME64_3;
+ hash ^= hash >> 32;
+ return hash;
}
- #define XXH_get64bits(p) XXH_readLE64_align(p, align)
-
-static xxh_u64 XXH64_finalize(xxh_u64 h64, const xxh_u8 *ptr, size_t len,
- XXH_alignment align) {
-
- len &= 31;
- while (len >= 8) {
-
- xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr));
- ptr += 8;
- h64 ^= k1;
- h64 = XXH_rotl64(h64, 27) * XXH_PRIME64_1 + XXH_PRIME64_4;
- len -= 8;
-
- }
- if (len >= 4) {
-
- h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
- ptr += 4;
- h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
- len -= 4;
-
- }
-
- while (len > 0) {
-
- h64 ^= (*ptr++) * XXH_PRIME64_5;
- h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1;
- --len;
-
- }
-
- return XXH64_avalanche(h64);
+#define XXH_get64bits(p) XXH_readLE64_align(p, align)
+/*!
+ * @internal
+ * @brief Processes the last 0-31 bytes of @p ptr.
+ *
+ * There may be up to 31 bytes remaining to consume from the input.
+ * This final stage will digest them to ensure that all input bytes are present
+ * in the final mix.
+ *
+ * @param hash The hash to finalize.
+ * @param ptr The pointer to the remaining input.
+ * @param len The remaining length, modulo 32.
+ * @param align Whether @p ptr is aligned.
+ * @return The finalized hash
+ * @see XXH32_finalize().
+ */
+static XXH_PUREF xxh_u64
+XXH64_finalize(xxh_u64 hash, const xxh_u8* ptr, size_t len, XXH_alignment align)
+{
+ if (ptr==NULL) XXH_ASSERT(len == 0);
+ len &= 31;
+ while (len >= 8) {
+ xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr));
+ ptr += 8;
+ hash ^= k1;
+ hash = XXH_rotl64(hash,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
+ len -= 8;
+ }
+ if (len >= 4) {
+ hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
+ ptr += 4;
+ hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
+ len -= 4;
+ }
+ while (len > 0) {
+ hash ^= (*ptr++) * XXH_PRIME64_5;
+ hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1;
+ --len;
+ }
+ return XXH64_avalanche(hash);
}
- #ifdef XXH_OLD_NAMES
- #define PROCESS1_64 XXH_PROCESS1_64
- #define PROCESS4_64 XXH_PROCESS4_64
- #define PROCESS8_64 XXH_PROCESS8_64
- #else
- #undef XXH_PROCESS1_64
- #undef XXH_PROCESS4_64
- #undef XXH_PROCESS8_64
- #endif
-
-XXH_FORCE_INLINE xxh_u64 XXH64_endian_align(const xxh_u8 *input, size_t len,
- xxh_u64 seed, XXH_alignment align) {
-
- const xxh_u8 *bEnd = input ? input + len : NULL;
- xxh_u64 h64;
-
- #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \
- (XXH_ACCEPT_NULL_INPUT_POINTER >= 1)
- if (input == NULL) {
-
- len = 0;
- bEnd = input = (const xxh_u8 *)(size_t)32;
-
- }
-
- #endif
-
- if (len >= 32) {
-
- const xxh_u8 *const limit = bEnd - 32;
- xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
- xxh_u64 v2 = seed + XXH_PRIME64_2;
- xxh_u64 v3 = seed + 0;
- xxh_u64 v4 = seed - XXH_PRIME64_1;
-
- do {
-
- v1 = XXH64_round(v1, XXH_get64bits(input));
- input += 8;
- v2 = XXH64_round(v2, XXH_get64bits(input));
- input += 8;
- v3 = XXH64_round(v3, XXH_get64bits(input));
- input += 8;
- v4 = XXH64_round(v4, XXH_get64bits(input));
- input += 8;
-
- } while (input <= limit);
-
- h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) +
- XXH_rotl64(v4, 18);
- h64 = XXH64_mergeRound(h64, v1);
- h64 = XXH64_mergeRound(h64, v2);
- h64 = XXH64_mergeRound(h64, v3);
- h64 = XXH64_mergeRound(h64, v4);
-
- } else {
-
- h64 = seed + XXH_PRIME64_5;
+#ifdef XXH_OLD_NAMES
+# define PROCESS1_64 XXH_PROCESS1_64
+# define PROCESS4_64 XXH_PROCESS4_64
+# define PROCESS8_64 XXH_PROCESS8_64
+#else
+# undef XXH_PROCESS1_64
+# undef XXH_PROCESS4_64
+# undef XXH_PROCESS8_64
+#endif
- }
+/*!
+ * @internal
+ * @brief The implementation for @ref XXH64().
+ *
+ * @param input , len , seed Directly passed from @ref XXH64().
+ * @param align Whether @p input is aligned.
+ * @return The calculated hash.
+ */
+XXH_FORCE_INLINE XXH_PUREF xxh_u64
+XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
+{
+ xxh_u64 h64;
+ if (input==NULL) XXH_ASSERT(len == 0);
+
+ if (len>=32) {
+ const xxh_u8* const bEnd = input + len;
+ const xxh_u8* const limit = bEnd - 31;
+ xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
+ xxh_u64 v2 = seed + XXH_PRIME64_2;
+ xxh_u64 v3 = seed + 0;
+ xxh_u64 v4 = seed - XXH_PRIME64_1;
+
+ do {
+ v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
+ v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
+ v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
+ v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
+ } while (input<limit);
+
+ h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+ h64 = XXH64_mergeRound(h64, v1);
+ h64 = XXH64_mergeRound(h64, v2);
+ h64 = XXH64_mergeRound(h64, v3);
+ h64 = XXH64_mergeRound(h64, v4);
- h64 += (xxh_u64)len;
+ } else {
+ h64 = seed + XXH_PRIME64_5;
+ }
- return XXH64_finalize(h64, input, len, align);
+ h64 += (xxh_u64) len;
+ return XXH64_finalize(h64, input, len, align);
}
-/*! @ingroup xxh64_family */
-XXH_PUBLIC_API XXH64_hash_t XXH64(const void *input, size_t len,
- XXH64_hash_t seed) {
- #if 0
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH64_hash_t XXH64 (XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
+{
+#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
XXH64_state_t state;
XXH64_reset(&state, seed);
XXH64_update(&state, (const xxh_u8*)input, len);
return XXH64_digest(&state);
- #else
- if (XXH_FORCE_ALIGN_CHECK) {
-
- if ((((size_t)input) & 7) ==
- 0) { /* Input is aligned, let's leverage the speed advantage */
- return XXH64_endian_align((const xxh_u8 *)input, len, seed, XXH_aligned);
-
- }
-
- }
+#else
+ if (XXH_FORCE_ALIGN_CHECK) {
+ if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
+ return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
+ } }
- return XXH64_endian_align((const xxh_u8 *)input, len, seed, XXH_unaligned);
-
- #endif
+ return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
+#endif
}
/******* Hash Streaming *******/
-
-/*! @ingroup xxh64_family*/
-XXH_PUBLIC_API XXH64_state_t *XXH64_createState(void) {
-
- return (XXH64_state_t *)XXH_malloc(sizeof(XXH64_state_t));
-
-}
-
-/*! @ingroup xxh64_family */
-XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t *statePtr) {
-
- XXH_free(statePtr);
- return XXH_OK;
-
-}
-
-/*! @ingroup xxh64_family */
-XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t *dstState,
- const XXH64_state_t *srcState) {
-
- memcpy(dstState, srcState, sizeof(*dstState));
-
+#ifndef XXH_NO_STREAM
+/*! @ingroup XXH64_family*/
+XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
+{
+ return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
+}
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
+{
+ XXH_free(statePtr);
+ return XXH_OK;
}
-/*! @ingroup xxh64_family */
-XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t *statePtr,
- XXH64_hash_t seed) {
-
- XXH64_state_t state; /* use a local state to memcpy() in order to avoid
- strict-aliasing warnings */
- memset(&state, 0, sizeof(state));
- state.v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
- state.v2 = seed + XXH_PRIME64_2;
- state.v3 = seed + 0;
- state.v4 = seed - XXH_PRIME64_1;
- /* do not write into reserved64, might be removed in a future version */
- memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved64));
- return XXH_OK;
-
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dstState, const XXH64_state_t* srcState)
+{
+ XXH_memcpy(dstState, srcState, sizeof(*dstState));
}
-/*! @ingroup xxh64_family */
-XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH64_state_t *state,
- const void *input, size_t len) {
-
- if (input == NULL)
- #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \
- (XXH_ACCEPT_NULL_INPUT_POINTER >= 1)
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed)
+{
+ XXH_ASSERT(statePtr != NULL);
+ memset(statePtr, 0, sizeof(*statePtr));
+ statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
+ statePtr->v[1] = seed + XXH_PRIME64_2;
+ statePtr->v[2] = seed + 0;
+ statePtr->v[3] = seed - XXH_PRIME64_1;
return XXH_OK;
- #else
- return XXH_ERROR;
- #endif
-
- {
-
- const xxh_u8 *p = (const xxh_u8 *)input;
- const xxh_u8 *const bEnd = p + len;
-
- state->total_len += len;
-
- if (state->memsize + len < 32) { /* fill in tmp buffer */
- XXH_memcpy(((xxh_u8 *)state->mem64) + state->memsize, input, len);
- state->memsize += (xxh_u32)len;
- return XXH_OK;
-
- }
-
- if (state->memsize) { /* tmp buffer is full */
- XXH_memcpy(((xxh_u8 *)state->mem64) + state->memsize, input,
- 32 - state->memsize);
- state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64 + 0));
- state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64 + 1));
- state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64 + 2));
- state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64 + 3));
- p += 32 - state->memsize;
- state->memsize = 0;
+}
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH64_update (XXH_NOESCAPE XXH64_state_t* state, XXH_NOESCAPE const void* input, size_t len)
+{
+ if (input==NULL) {
+ XXH_ASSERT(len == 0);
+ return XXH_OK;
}
- if (p + 32 <= bEnd) {
-
- const xxh_u8 *const limit = bEnd - 32;
- xxh_u64 v1 = state->v1;
- xxh_u64 v2 = state->v2;
- xxh_u64 v3 = state->v3;
- xxh_u64 v4 = state->v4;
+ { const xxh_u8* p = (const xxh_u8*)input;
+ const xxh_u8* const bEnd = p + len;
- do {
+ state->total_len += len;
- v1 = XXH64_round(v1, XXH_readLE64(p));
- p += 8;
- v2 = XXH64_round(v2, XXH_readLE64(p));
- p += 8;
- v3 = XXH64_round(v3, XXH_readLE64(p));
- p += 8;
- v4 = XXH64_round(v4, XXH_readLE64(p));
- p += 8;
-
- } while (p <= limit);
+ if (state->memsize + len < 32) { /* fill in tmp buffer */
+ XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
+ state->memsize += (xxh_u32)len;
+ return XXH_OK;
+ }
- state->v1 = v1;
- state->v2 = v2;
- state->v3 = v3;
- state->v4 = v4;
+ if (state->memsize) { /* tmp buffer is full */
+ XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
+ state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0));
+ state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1));
+ state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2));
+ state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3));
+ p += 32 - state->memsize;
+ state->memsize = 0;
+ }
- }
+ if (p+32 <= bEnd) {
+ const xxh_u8* const limit = bEnd - 32;
- if (p < bEnd) {
+ do {
+ state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8;
+ state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8;
+ state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8;
+ state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8;
+ } while (p<=limit);
- XXH_memcpy(state->mem64, p, (size_t)(bEnd - p));
- state->memsize = (unsigned)(bEnd - p);
+ }
+ if (p < bEnd) {
+ XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
+ state->memsize = (unsigned)(bEnd-p);
+ }
}
- }
-
- return XXH_OK;
-
+ return XXH_OK;
}
-/*! @ingroup xxh64_family */
-XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t *state) {
- xxh_u64 h64;
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH64_hash_t XXH64_digest(XXH_NOESCAPE const XXH64_state_t* state)
+{
+ xxh_u64 h64;
- if (state->total_len >= 32) {
-
- xxh_u64 const v1 = state->v1;
- xxh_u64 const v2 = state->v2;
- xxh_u64 const v3 = state->v3;
- xxh_u64 const v4 = state->v4;
-
- h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) +
- XXH_rotl64(v4, 18);
- h64 = XXH64_mergeRound(h64, v1);
- h64 = XXH64_mergeRound(h64, v2);
- h64 = XXH64_mergeRound(h64, v3);
- h64 = XXH64_mergeRound(h64, v4);
-
- } else {
+ if (state->total_len >= 32) {
+ h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18);
+ h64 = XXH64_mergeRound(h64, state->v[0]);
+ h64 = XXH64_mergeRound(h64, state->v[1]);
+ h64 = XXH64_mergeRound(h64, state->v[2]);
+ h64 = XXH64_mergeRound(h64, state->v[3]);
+ } else {
+ h64 = state->v[2] /*seed*/ + XXH_PRIME64_5;
+ }
- h64 = state->v3 /*seed*/ + XXH_PRIME64_5;
+ h64 += (xxh_u64) state->total_len;
- }
+ return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
+}
+#endif /* !XXH_NO_STREAM */
- h64 += (xxh_u64)state->total_len;
+/******* Canonical representation *******/
- return XXH64_finalize(h64, (const xxh_u8 *)state->mem64,
- (size_t)state->total_len, XXH_aligned);
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
+ XXH_memcpy(dst, &hash, sizeof(*dst));
+}
+/*! @ingroup XXH64_family */
+XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src)
+{
+ return XXH_readBE64(src);
}
-/******* Canonical representation *******/
+#ifndef XXH_NO_XXH3
-/*! @ingroup xxh64_family */
-XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t *dst,
- XXH64_hash_t hash) {
+/* *********************************************************************
+* XXH3
+* New generation hash designed for speed on small keys and vectorization
+************************************************************************ */
+/*!
+ * @}
+ * @defgroup XXH3_impl XXH3 implementation
+ * @ingroup impl
+ * @{
+ */
- XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
- if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
- memcpy(dst, &hash, sizeof(*dst));
+/* === Compiler specifics === */
-}
+#if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */
+# define XXH_RESTRICT /* disable */
+#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */
+# define XXH_RESTRICT restrict
+#elif (defined (__GNUC__) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))) \
+ || (defined (__clang__)) \
+ || (defined (_MSC_VER) && (_MSC_VER >= 1400)) \
+ || (defined (__INTEL_COMPILER) && (__INTEL_COMPILER >= 1300))
+/*
+ * There are a LOT more compilers that recognize __restrict but this
+ * covers the major ones.
+ */
+# define XXH_RESTRICT __restrict
+#else
+# define XXH_RESTRICT /* disable */
+#endif
-/*! @ingroup xxh64_family */
-XXH_PUBLIC_API XXH64_hash_t
-XXH64_hashFromCanonical(const XXH64_canonical_t *src) {
+#if (defined(__GNUC__) && (__GNUC__ >= 3)) \
+ || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
+ || defined(__clang__)
+# define XXH_likely(x) __builtin_expect(x, 1)
+# define XXH_unlikely(x) __builtin_expect(x, 0)
+#else
+# define XXH_likely(x) (x)
+# define XXH_unlikely(x) (x)
+#endif
- return XXH_readBE64(src);
+#ifndef XXH_HAS_INCLUDE
+# ifdef __has_include
+/*
+ * Not defined as XXH_HAS_INCLUDE(x) (function-like) because
+ * this causes segfaults in Apple Clang 4.2 (on Mac OS X 10.7 Lion)
+ */
+# define XXH_HAS_INCLUDE __has_include
+# else
+# define XXH_HAS_INCLUDE(x) 0
+# endif
+#endif
-}
+#if defined(__GNUC__) || defined(__clang__)
+# if defined(__ARM_FEATURE_SVE)
+# include <arm_sve.h>
+# endif
+# if defined(__ARM_NEON__) || defined(__ARM_NEON) \
+ || (defined(_M_ARM) && _M_ARM >= 7) \
+ || defined(_M_ARM64) || defined(_M_ARM64EC) \
+ || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE(<arm_neon.h>)) /* WASM SIMD128 via SIMDe */
+# define inline __inline__ /* circumvent a clang bug */
+# include <arm_neon.h>
+# undef inline
+# elif defined(__AVX2__)
+# include <immintrin.h>
+# elif defined(__SSE2__)
+# include <emmintrin.h>
+# endif
+#endif
- #ifndef XXH_NO_XXH3
+#if defined(_MSC_VER)
+# include <intrin.h>
+#endif
- /* *********************************************************************
- * XXH3
- * New generation hash designed for speed on small keys and vectorization
- ************************************************************************ */
- /*!
- * @}
- * @defgroup xxh3_impl XXH3 implementation
- * @ingroup impl
- * @{
+/*
+ * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
+ * remaining a true 64-bit/128-bit hash function.
+ *
+ * This is done by prioritizing a subset of 64-bit operations that can be
+ * emulated without too many steps on the average 32-bit machine.
+ *
+ * For example, these two lines seem similar, and run equally fast on 64-bit:
+ *
+ * xxh_u64 x;
+ * x ^= (x >> 47); // good
+ * x ^= (x >> 13); // bad
+ *
+ * However, to a 32-bit machine, there is a major difference.
+ *
+ * x ^= (x >> 47) looks like this:
+ *
+ * x.lo ^= (x.hi >> (47 - 32));
+ *
+ * while x ^= (x >> 13) looks like this:
+ *
+ * // note: funnel shifts are not usually cheap.
+ * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
+ * x.hi ^= (x.hi >> 13);
+ *
+ * The first one is significantly faster than the second, simply because the
+ * shift is larger than 32. This means:
+ * - All the bits we need are in the upper 32 bits, so we can ignore the lower
+ * 32 bits in the shift.
+ * - The shift result will always fit in the lower 32 bits, and therefore,
+ * we can ignore the upper 32 bits in the xor.
+ *
+ * Thanks to this optimization, XXH3 only requires these features to be efficient:
+ *
+ * - Usable unaligned access
+ * - A 32-bit or 64-bit ALU
+ * - If 32-bit, a decent ADC instruction
+ * - A 32 or 64-bit multiply with a 64-bit result
+ * - For the 128-bit variant, a decent byteswap helps short inputs.
+ *
+ * The first two are already required by XXH32, and almost all 32-bit and 64-bit
+ * platforms which can run XXH32 can run XXH3 efficiently.
+ *
+ * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
+ * notable exception.
+ *
+ * First of all, Thumb-1 lacks support for the UMULL instruction which
+ * performs the important long multiply. This means numerous __aeabi_lmul
+ * calls.
+ *
+ * Second of all, the 8 functional registers are just not enough.
+ * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need
+ * Lo registers, and this shuffling results in thousands more MOVs than A32.
+ *
+ * A32 and T32 don't have this limitation. They can access all 14 registers,
+ * do a 32->64 multiply with UMULL, and the flexible operand allowing free
+ * shifts is helpful, too.
+ *
+ * Therefore, we do a quick sanity check.
+ *
+ * If compiling Thumb-1 for a target which supports ARM instructions, we will
+ * emit a warning, as it is not a "sane" platform to compile for.
+ *
+ * Usually, if this happens, it is because of an accident and you probably need
+ * to specify -march, as you likely meant to compile for a newer architecture.
+ *
+ * Credit: large sections of the vectorial and asm source code paths
+ * have been contributed by @easyaspi314
+ */
+#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
+# warning "XXH3 is highly inefficient without ARM or Thumb-2."
+#endif
- */
+/* ==========================================
+ * Vectorization detection
+ * ========================================== */
- /* === Compiler specifics === */
-
- #if ((defined(sun) || defined(__sun)) && \
- __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested \
- with GCC 5.5 */
- #define XXH_RESTRICT /* disable */
- #elif defined(__STDC_VERSION__) && \
- __STDC_VERSION__ >= 199901L /* >= C99 */
- #define XXH_RESTRICT restrict
- #else
- /* Note: it might be useful to define __restrict or __restrict__ for
- * some C++ compilers */
- #define XXH_RESTRICT /* disable */
- #endif
-
- #if (defined(__GNUC__) && (__GNUC__ >= 3)) || \
- (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || \
- defined(__clang__)
- #define XXH_likely(x) __builtin_expect(x, 1)
- #define XXH_unlikely(x) __builtin_expect(x, 0)
- #else
- #define XXH_likely(x) (x)
- #define XXH_unlikely(x) (x)
- #endif
-
- #if defined(__GNUC__)
- #if defined(__AVX2__)
- #include <immintrin.h>
- #elif defined(__SSE2__)
- #include <emmintrin.h>
- #elif defined(__ARM_NEON__) || defined(__ARM_NEON)
- #define inline __inline__ /* circumvent a clang bug */
- #include <arm_neon.h>
- #undef inline
- #endif
- #elif defined(_MSC_VER)
- #include <intrin.h>
- #endif
-
- /*
- * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
- * remaining a true 64-bit/128-bit hash function.
- *
- * This is done by prioritizing a subset of 64-bit operations that can be
- * emulated without too many steps on the average 32-bit machine.
- *
- * For example, these two lines seem similar, and run equally fast on
- * 64-bit:
- *
- * xxh_u64 x;
- * x ^= (x >> 47); // good
- * x ^= (x >> 13); // bad
- *
- * However, to a 32-bit machine, there is a major difference.
- *
- * x ^= (x >> 47) looks like this:
- *
- * x.lo ^= (x.hi >> (47 - 32));
- *
- * while x ^= (x >> 13) looks like this:
- *
- * // note: funnel shifts are not usually cheap.
- * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
- * x.hi ^= (x.hi >> 13);
- *
- * The first one is significantly faster than the second, simply because
- * the shift is larger than 32. This means:
- * - All the bits we need are in the upper 32 bits, so we can ignore the
- * lower 32 bits in the shift.
- * - The shift result will always fit in the lower 32 bits, and
- * therefore, we can ignore the upper 32 bits in the xor.
- *
- * Thanks to this optimization, XXH3 only requires these features to be
- * efficient:
- *
- * - Usable unaligned access
- * - A 32-bit or 64-bit ALU
- * - If 32-bit, a decent ADC instruction
- * - A 32 or 64-bit multiply with a 64-bit result
- * - For the 128-bit variant, a decent byteswap helps short inputs.
- *
- * The first two are already required by XXH32, and almost all 32-bit and
- * 64-bit platforms which can run XXH32 can run XXH3 efficiently.
- *
- * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is
- * one notable exception.
- *
- * First of all, Thumb-1 lacks support for the UMULL instruction which
- * performs the important long multiply. This means numerous __aeabi_lmul
- * calls.
- *
- * Second of all, the 8 functional registers are just not enough.
- * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic
- * need Lo registers, and this shuffling results in thousands more MOVs
- * than A32.
- *
- * A32 and T32 don't have this limitation. They can access all 14
- * registers, do a 32->64 multiply with UMULL, and the flexible operand
- * allowing free shifts is helpful, too.
- *
- * Therefore, we do a quick sanity check.
- *
- * If compiling Thumb-1 for a target which supports ARM instructions, we
- * will emit a warning, as it is not a "sane" platform to compile for.
- *
- * Usually, if this happens, it is because of an accident and you probably
- * need to specify -march, as you likely meant to compile for a newer
- * architecture.
- *
- * Credit: large sections of the vectorial and asm source code paths
- * have been contributed by @easyaspi314
- */
- #if defined(__thumb__) && !defined(__thumb2__) && \
- defined(__ARM_ARCH_ISA_ARM)
- #warning "XXH3 is highly inefficient without ARM or Thumb-2."
- #endif
-
- /* ==========================================
- * Vectorization detection
- * ========================================== */
-
- #ifdef XXH_DOXYGEN
- /*!
- * @ingroup tuning
- * @brief Overrides the vectorization implementation chosen for XXH3.
- *
- * Can be defined to 0 to disable SIMD or any of the values mentioned in
- * @ref XXH_VECTOR_TYPE.
- *
- * If this is not defined, it uses predefined macros to determine the
- * best implementation.
- */
- #define XXH_VECTOR XXH_SCALAR
+#ifdef XXH_DOXYGEN
+/*!
+ * @ingroup tuning
+ * @brief Overrides the vectorization implementation chosen for XXH3.
+ *
+ * Can be defined to 0 to disable SIMD or any of the values mentioned in
+ * @ref XXH_VECTOR_TYPE.
+ *
+ * If this is not defined, it uses predefined macros to determine the best
+ * implementation.
+ */
+# define XXH_VECTOR XXH_SCALAR
/*!
* @ingroup tuning
* @brief Possible values for @ref XXH_VECTOR.
@@ -3161,461 +3804,494 @@ XXH64_hashFromCanonical(const XXH64_canonical_t *src) {
* Note that these are actually implemented as macros.
*
* If this is not defined, it is detected automatically.
- * @ref XXH_X86DISPATCH overrides this.
+ * internal macro XXH_X86DISPATCH overrides this.
*/
enum XXH_VECTOR_TYPE /* fake enum */ {
+ XXH_SCALAR = 0, /*!< Portable scalar version */
+ XXH_SSE2 = 1, /*!<
+ * SSE2 for Pentium 4, Opteron, all x86_64.
+ *
+ * @note SSE2 is also guaranteed on Windows 10, macOS, and
+ * Android x86.
+ */
+ XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */
+ XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */
+ XXH_NEON = 4, /*!<
+ * NEON for most ARMv7-A, all AArch64, and WASM SIMD128
+ * via the SIMDeverywhere polyfill provided with the
+ * Emscripten SDK.
+ */
+ XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */
+ XXH_SVE = 6, /*!< SVE for some ARMv8-A and ARMv9-A */
+};
+/*!
+ * @ingroup tuning
+ * @brief Selects the minimum alignment for XXH3's accumulators.
+ *
+ * When using SIMD, this should match the alignment required for said vector
+ * type, so, for example, 32 for AVX2.
+ *
+ * Default: Auto detected.
+ */
+# define XXH_ACC_ALIGN 8
+#endif
- XXH_SCALAR = 0, /*!< Portable scalar version */
- XXH_SSE2 = 1, /*!<
- * SSE2 for Pentium 4, Opteron, all x86_64.
- *
- * @note SSE2 is also guaranteed on Windows 10, macOS, and
- * Android x86.
- */
- XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */
- XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */
- XXH_NEON = 4, /*!< NEON for most ARMv7-A and all AArch64 */
- XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */
+/* Actual definition */
+#ifndef XXH_DOXYGEN
+# define XXH_SCALAR 0
+# define XXH_SSE2 1
+# define XXH_AVX2 2
+# define XXH_AVX512 3
+# define XXH_NEON 4
+# define XXH_VSX 5
+# define XXH_SVE 6
+#endif
-};
+#ifndef XXH_VECTOR /* can be defined on command line */
+# if defined(__ARM_FEATURE_SVE)
+# define XXH_VECTOR XXH_SVE
+# elif ( \
+ defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \
+ || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \
+ || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE(<arm_neon.h>)) /* wasm simd128 via SIMDe */ \
+ ) && ( \
+ defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \
+ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
+ )
+# define XXH_VECTOR XXH_NEON
+# elif defined(__AVX512F__)
+# define XXH_VECTOR XXH_AVX512
+# elif defined(__AVX2__)
+# define XXH_VECTOR XXH_AVX2
+# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
+# define XXH_VECTOR XXH_SSE2
+# elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
+ || (defined(__s390x__) && defined(__VEC__)) \
+ && defined(__GNUC__) /* TODO: IBM XL */
+# define XXH_VECTOR XXH_VSX
+# else
+# define XXH_VECTOR XXH_SCALAR
+# endif
+#endif
- /*!
- * @ingroup tuning
- * @brief Selects the minimum alignment for XXH3's accumulators.
- *
- * When using SIMD, this should match the alignment reqired for said
- * vector type, so, for example, 32 for AVX2.
- *
- * Default: Auto detected.
- */
- #define XXH_ACC_ALIGN 8
- #endif
-
- /* Actual definition */
- #ifndef XXH_DOXYGEN
- #define XXH_SCALAR 0
- #define XXH_SSE2 1
- #define XXH_AVX2 2
- #define XXH_AVX512 3
- #define XXH_NEON 4
- #define XXH_VSX 5
- #endif
-
- #ifndef XXH_VECTOR /* can be defined on command line */
- #if defined(__AVX512F__)
- #define XXH_VECTOR XXH_AVX512
- #elif defined(__AVX2__)
- #define XXH_VECTOR XXH_AVX2
- #elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || \
- (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
- #define XXH_VECTOR XXH_SSE2
- #elif defined(__GNUC__) /* msvc support maybe later */ \
- && (defined(__ARM_NEON__) || defined(__ARM_NEON)) && \
- (defined( \
- __LITTLE_ENDIAN__) /* We only support little endian NEON */ \
- || (defined(__BYTE_ORDER__) && \
- __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
- #define XXH_VECTOR XXH_NEON
- #elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) || \
- (defined(__s390x__) && defined(__VEC__)) && \
- defined(__GNUC__) /* TODO: IBM XL */
- #define XXH_VECTOR XXH_VSX
- #else
- #define XXH_VECTOR XXH_SCALAR
- #endif
- #endif
-
- /*
- * Controls the alignment of the accumulator,
- * for compatibility with aligned vector loads, which are usually faster.
- */
- #ifndef XXH_ACC_ALIGN
- #if defined(XXH_X86DISPATCH)
- #define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */
- #elif XXH_VECTOR == XXH_SCALAR /* scalar */
- #define XXH_ACC_ALIGN 8
- #elif XXH_VECTOR == XXH_SSE2 /* sse2 */
- #define XXH_ACC_ALIGN 16
- #elif XXH_VECTOR == XXH_AVX2 /* avx2 */
- #define XXH_ACC_ALIGN 32
- #elif XXH_VECTOR == XXH_NEON /* neon */
- #define XXH_ACC_ALIGN 16
- #elif XXH_VECTOR == XXH_VSX /* vsx */
- #define XXH_ACC_ALIGN 16
- #elif XXH_VECTOR == XXH_AVX512 /* avx512 */
- #define XXH_ACC_ALIGN 64
- #endif
- #endif
-
- #if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 || \
- XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
- #define XXH_SEC_ALIGN XXH_ACC_ALIGN
- #else
- #define XXH_SEC_ALIGN 8
- #endif
-
- /*
- * UGLY HACK:
- * GCC usually generates the best code with -O3 for xxHash.
- *
- * However, when targeting AVX2, it is overzealous in its unrolling
- * resulting in code roughly 3/4 the speed of Clang.
- *
- * There are other issues, such as GCC splitting _mm256_loadu_si256 into
- * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization
- * which only applies to Sandy and Ivy Bridge... which don't even support
- * AVX2.
- *
- * That is why when compiling the AVX2 version, it is recommended to use
- * either -O2 -mavx2 -march=haswell or -O2 -mavx2
- * -mno-avx256-split-unaligned-load for decent performance, or to use
- * Clang instead.
- *
- * Fortunately, we can control the first one with a pragma that forces GCC
- * into -O2, but the other one we can't control without "failed to inline
- * always inline function due to target mismatch" warnings.
- */
- #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
- && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
- && defined(__OPTIMIZE__) && \
- !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
- #pragma GCC push_options
- #pragma GCC optimize("-O2")
- #endif
-
- #if XXH_VECTOR == XXH_NEON
- /*
- * NEON's setup for vmlal_u32 is a little more complicated than it is on
- * SSE2, AVX2, and VSX.
- *
- * While PMULUDQ and VMULEUW both perform a mask, VMLAL.U32 performs an
- * upcast.
- *
- * To do the same operation, the 128-bit 'Q' register needs to be split
- * into two 64-bit 'D' registers, performing this operation::
- *
- * [ a | b ] |
- * '---------. .--------' | | x |
- * | .---------' '--------. |
- * [ a & 0xFFFFFFFF | b & 0xFFFFFFFF ],[ a >> 32 | b >> 32
- * ]
- *
- * Due to significant changes in aarch64, the fastest method for aarch64
- * is completely different than the fastest method for ARMv7-A.
- *
- * ARMv7-A treats D registers as unions overlaying Q registers, so
- * modifying D11 will modify the high half of Q5. This is similar to how
- * modifying AH will only affect bits 8-15 of AX on x86.
- *
- * VZIP takes two registers, and puts even lanes in one register and odd
- * lanes in the other.
- *
- * On ARMv7-A, this strangely modifies both parameters in place instead
- * of taking the usual 3-operand form.
- *
- * Therefore, if we want to do this, we can simply use a D-form VZIP.32
- * on the lower and upper halves of the Q register to end up with the
- * high and low halves where we want - all in one instruction.
- *
- * vzip.32 d10, d11 @ d10 = { d10[0], d11[0] }; d11 = {
+/* __ARM_FEATURE_SVE is only supported by GCC & Clang. */
+#if (XXH_VECTOR == XXH_SVE) && !defined(__ARM_FEATURE_SVE)
+# ifdef _MSC_VER
+# pragma warning(once : 4606)
+# else
+# warning "__ARM_FEATURE_SVE isn't supported. Use SCALAR instead."
+# endif
+# undef XXH_VECTOR
+# define XXH_VECTOR XXH_SCALAR
+#endif
- * d10[1], d11[1] }
- *
- * Unfortunately we need inline assembly for this: Instructions
- * modifying two registers at once is not possible in GCC or Clang's IR,
- * and they have to create a copy.
- *
- * aarch64 requires a different approach.
- *
- * In order to make it easier to write a decent compiler for aarch64,
- * many quirks were removed, such as conditional execution.
- *
- * NEON was also affected by this.
- *
- * aarch64 cannot access the high bits of a Q-form register, and writes
- * to a D-form register zero the high bits, similar to how writes to
- * W-form scalar registers (or DWORD registers on x86_64) work.
- *
- * The formerly free vget_high intrinsics now require a vext (with a few
- * exceptions)
- *
- * Additionally, VZIP was replaced by ZIP1 and ZIP2, which are the
- * equivalent of PUNPCKL* and PUNPCKH* in SSE, respectively, in order to
- * only modify one operand.
- *
- * The equivalent of the VZIP.32 on the lower and upper halves would be
- * this mess:
- *
- * ext v2.4s, v0.4s, v0.4s, #2 // v2 = { v0[2], v0[3], v0[0],
- * v0[1] } zip1 v1.2s, v0.2s, v2.2s // v1 = { v0[0], v2[0] } zip2
- * v0.2s, v0.2s, v1.2s // v0 = { v0[1], v2[1] }
- *
- * Instead, we use a literal downcast, vmovn_u64 (XTN), and vshrn_n_u64
- * (SHRN):
- *
- * shrn v1.2s, v0.2d, #32 // v1 = (uint32x2_t)(v0 >> 32);
- * xtn v0.2s, v0.2d // v0 = (uint32x2_t)(v0 & 0xFFFFFFFF);
- *
- * This is available on ARMv7-A, but is less efficient than a single
- * VZIP.32.
- */
+/*
+ * Controls the alignment of the accumulator,
+ * for compatibility with aligned vector loads, which are usually faster.
+ */
+#ifndef XXH_ACC_ALIGN
+# if defined(XXH_X86DISPATCH)
+# define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */
+# elif XXH_VECTOR == XXH_SCALAR /* scalar */
+# define XXH_ACC_ALIGN 8
+# elif XXH_VECTOR == XXH_SSE2 /* sse2 */
+# define XXH_ACC_ALIGN 16
+# elif XXH_VECTOR == XXH_AVX2 /* avx2 */
+# define XXH_ACC_ALIGN 32
+# elif XXH_VECTOR == XXH_NEON /* neon */
+# define XXH_ACC_ALIGN 16
+# elif XXH_VECTOR == XXH_VSX /* vsx */
+# define XXH_ACC_ALIGN 16
+# elif XXH_VECTOR == XXH_AVX512 /* avx512 */
+# define XXH_ACC_ALIGN 64
+# elif XXH_VECTOR == XXH_SVE /* sve */
+# define XXH_ACC_ALIGN 64
+# endif
+#endif
- /*!
- * Function-like macro:
- * void XXH_SPLIT_IN_PLACE(uint64x2_t &in, uint32x2_t &outLo, uint32x2_t
- * &outHi)
- * {
+#if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
+ || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
+# define XXH_SEC_ALIGN XXH_ACC_ALIGN
+#elif XXH_VECTOR == XXH_SVE
+# define XXH_SEC_ALIGN XXH_ACC_ALIGN
+#else
+# define XXH_SEC_ALIGN 8
+#endif
- * outLo = (uint32x2_t)(in & 0xFFFFFFFF);
- * outHi = (uint32x2_t)(in >> 32);
- * in = UNDEFINED;
- * }
- */
- #if !defined(XXH_NO_VZIP_HACK) /* define to disable */ \
- && defined(__GNUC__) && !defined(__aarch64__) && \
- !defined(__arm64__)
- #define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
- do { \
- \
- /* Undocumented GCC/Clang operand modifier: %e0 = lower D half, \
- * %f0 = upper D half */ \
- /* https://github.com/gcc-mirror/gcc/blob/38cf91e5/gcc/config/arm/arm.c#L22486 \
- */ \
- /* https://github.com/llvm-mirror/llvm/blob/2c4ca683/lib/Target/ARM/ARMAsmPrinter.cpp#L399 \
- */ \
- __asm__("vzip.32 %e0, %f0" : "+w"(in)); \
- (outLo) = vget_low_u32(vreinterpretq_u32_u64(in)); \
- (outHi) = vget_high_u32(vreinterpretq_u32_u64(in)); \
- \
- } while (0)
-
- #else
- #define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
- do { \
- \
- (outLo) = vmovn_u64(in); \
- (outHi) = vshrn_n_u64((in), 32); \
- \
- } while (0)
-
- #endif
- #endif /* XXH_VECTOR == XXH_NEON */
-
- /*
- * VSX and Z Vector helpers.
- *
- * This is very messy, and any pull requests to clean this up are welcome.
- *
- * There are a lot of problems with supporting VSX and s390x, due to
- * inconsistent intrinsics, spotty coverage, and multiple endiannesses.
- */
- #if XXH_VECTOR == XXH_VSX
- #if defined(__s390x__)
- #include <s390intrin.h>
- #else
- /* gcc's altivec.h can have the unwanted consequence to
- * unconditionally #define bool, vector, and pixel keywords, with bad
- * consequences for programs already using these keywords for other
- * purposes. The paragraph defining these macros is skipped when
- * __APPLE_ALTIVEC__ is defined.
- * __APPLE_ALTIVEC__ is _generally_ defined automatically by the
- * compiler, but it seems that, in some cases, it isn't. Force the
- * build macro to be defined, so that keywords are not altered.
- */
- #if defined(__GNUC__) && !defined(__APPLE_ALTIVEC__)
- #define __APPLE_ALTIVEC__
- #endif
- #include <altivec.h>
- #endif
+#if defined(__GNUC__) || defined(__clang__)
+# define XXH_ALIASING __attribute__((may_alias))
+#else
+# define XXH_ALIASING /* nothing */
+#endif
-typedef __vector unsigned long long xxh_u64x2;
-typedef __vector unsigned char xxh_u8x16;
-typedef __vector unsigned xxh_u32x4;
-
- #ifndef XXH_VSX_BE
- #if defined(__BIG_ENDIAN__) || \
- (defined(__BYTE_ORDER__) && \
- __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
- #define XXH_VSX_BE 1
- #elif defined(__VEC_ELEMENT_REG_ORDER__) && \
- __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
- #warning \
- "-maltivec=be is not recommended. Please use native endianness."
- #define XXH_VSX_BE 1
- #else
- #define XXH_VSX_BE 0
- #endif
- #endif /* !defined(XXH_VSX_BE) */
-
- #if XXH_VSX_BE
- #if defined(__POWER9_VECTOR__) || \
- (defined(__clang__) && defined(__s390x__))
- #define XXH_vec_revb vec_revb
- #else
-/*!
- * A polyfill for POWER9's vec_revb().
+/*
+ * UGLY HACK:
+ * GCC usually generates the best code with -O3 for xxHash.
+ *
+ * However, when targeting AVX2, it is overzealous in its unrolling resulting
+ * in code roughly 3/4 the speed of Clang.
+ *
+ * There are other issues, such as GCC splitting _mm256_loadu_si256 into
+ * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
+ * only applies to Sandy and Ivy Bridge... which don't even support AVX2.
+ *
+ * That is why when compiling the AVX2 version, it is recommended to use either
+ * -O2 -mavx2 -march=haswell
+ * or
+ * -O2 -mavx2 -mno-avx256-split-unaligned-load
+ * for decent performance, or to use Clang instead.
+ *
+ * Fortunately, we can control the first one with a pragma that forces GCC into
+ * -O2, but the other one we can't control without "failed to inline always
+ * inline function due to target mismatch" warnings.
*/
-XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val) {
-
- xxh_u8x16 const vByteSwap = {0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
- 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08};
- return vec_perm(val, val, vByteSwap);
+#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
+ && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
+ && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */
+# pragma GCC push_options
+# pragma GCC optimize("-O2")
+#endif
-}
+#if XXH_VECTOR == XXH_NEON
- #endif
- #endif /* XXH_VSX_BE */
+/*
+ * UGLY HACK: While AArch64 GCC on Linux does not seem to care, on macOS, GCC -O3
+ * optimizes out the entire hashLong loop because of the aliasing violation.
+ *
+ * However, GCC is also inefficient at load-store optimization with vld1q/vst1q,
+ * so the only option is to mark it as aliasing.
+ */
+typedef uint64x2_t xxh_aliasing_uint64x2_t XXH_ALIASING;
/*!
- * Performs an unaligned vector load and byte swaps it on big endian.
+ * @internal
+ * @brief `vld1q_u64` but faster and alignment-safe.
+ *
+ * On AArch64, unaligned access is always safe, but on ARMv7-a, it is only
+ * *conditionally* safe (`vld1` has an alignment bit like `movdq[ua]` in x86).
+ *
+ * GCC for AArch64 sees `vld1q_u8` as an intrinsic instead of a load, so it
+ * prohibits load-store optimizations. Therefore, a direct dereference is used.
+ *
+ * Otherwise, `vld1q_u8` is used with `vreinterpretq_u8_u64` to do a safe
+ * unaligned load.
*/
-XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr) {
-
- xxh_u64x2 ret;
- memcpy(&ret, ptr, sizeof(xxh_u64x2));
- #if XXH_VSX_BE
- ret = XXH_vec_revb(ret);
- #endif
- return ret;
-
+#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__)
+XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) /* silence -Wcast-align */
+{
+ return *(xxh_aliasing_uint64x2_t const *)ptr;
}
+#else
+XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr)
+{
+ return vreinterpretq_u64_u8(vld1q_u8((uint8_t const*)ptr));
+}
+#endif
- /*
- * vec_mulo and vec_mule are very problematic intrinsics on PowerPC
- *
- * These intrinsics weren't added until GCC 8, despite existing for a
- * while, and they are endian dependent. Also, their meaning swap
- * depending on version.
- * */
- #if defined(__s390x__)
- /* s390x is always big endian, no issue on this platform */
- #define XXH_vec_mulo vec_mulo
- #define XXH_vec_mule vec_mule
- #elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw)
- /* Clang has a better way to control this, we can just use the builtin
- * which doesn't swap. */
- #define XXH_vec_mulo __builtin_altivec_vmulouw
- #define XXH_vec_mule __builtin_altivec_vmuleuw
- #else
-/* gcc needs inline assembly */
-/* Adapted from
- * https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
-XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b) {
+/*!
+ * @internal
+ * @brief `vmlal_u32` on low and high halves of a vector.
+ *
+ * This is a workaround for AArch64 GCC < 11 which implemented arm_neon.h with
+ * inline assembly and were therefore incapable of merging the `vget_{low, high}_u32`
+ * with `vmlal_u32`.
+ */
+#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 11
+XXH_FORCE_INLINE uint64x2_t
+XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
+{
+ /* Inline assembly is the only way */
+ __asm__("umlal %0.2d, %1.2s, %2.2s" : "+w" (acc) : "w" (lhs), "w" (rhs));
+ return acc;
+}
+XXH_FORCE_INLINE uint64x2_t
+XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
+{
+ /* This intrinsic works as expected */
+ return vmlal_high_u32(acc, lhs, rhs);
+}
+#else
+/* Portable intrinsic versions */
+XXH_FORCE_INLINE uint64x2_t
+XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
+{
+ return vmlal_u32(acc, vget_low_u32(lhs), vget_low_u32(rhs));
+}
+/*! @copydoc XXH_vmlal_low_u32
+ * Assume the compiler converts this to vmlal_high_u32 on aarch64 */
+XXH_FORCE_INLINE uint64x2_t
+XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
+{
+ return vmlal_u32(acc, vget_high_u32(lhs), vget_high_u32(rhs));
+}
+#endif
- xxh_u64x2 result;
- __asm__("vmulouw %0, %1, %2" : "=v"(result) : "v"(a), "v"(b));
- return result;
+/*!
+ * @ingroup tuning
+ * @brief Controls the NEON to scalar ratio for XXH3
+ *
+ * This can be set to 2, 4, 6, or 8.
+ *
+ * ARM Cortex CPUs are _very_ sensitive to how their pipelines are used.
+ *
+ * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but only 2 of those
+ * can be NEON. If you are only using NEON instructions, you are only using 2/3 of the CPU
+ * bandwidth.
+ *
+ * This is even more noticeable on the more advanced cores like the Cortex-A76 which
+ * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once.
+ *
+ * Therefore, to make the most out of the pipeline, it is beneficial to run 6 NEON lanes
+ * and 2 scalar lanes, which is chosen by default.
+ *
+ * This does not apply to Apple processors or 32-bit processors, which run better with
+ * full NEON. These will default to 8. Additionally, size-optimized builds run 8 lanes.
+ *
+ * This change benefits CPUs with large micro-op buffers without negatively affecting
+ * most other CPUs:
+ *
+ * | Chipset | Dispatch type | NEON only | 6:2 hybrid | Diff. |
+ * |:----------------------|:--------------------|----------:|-----------:|------:|
+ * | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 GB/s | ~16% |
+ * | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 GB/s | 5.3 GB/s | ~5% |
+ * | Marvell PXA1928 (A53) | In-order dual-issue | 1.9 GB/s | 1.9 GB/s | 0% |
+ * | Apple M1 | 4 NEON/8 micro-ops | 37.3 GB/s | 36.1 GB/s | ~-3% |
+ *
+ * It also seems to fix some bad codegen on GCC, making it almost as fast as clang.
+ *
+ * When using WASM SIMD128, if this is 2 or 6, SIMDe will scalarize 2 of the lanes meaning
+ * it effectively becomes worse 4.
+ *
+ * @see XXH3_accumulate_512_neon()
+ */
+# ifndef XXH3_NEON_LANES
+# if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \
+ && !defined(__APPLE__) && XXH_SIZE_OPT <= 0
+# define XXH3_NEON_LANES 6
+# else
+# define XXH3_NEON_LANES XXH_ACC_NB
+# endif
+# endif
+#endif /* XXH_VECTOR == XXH_NEON */
-}
+/*
+ * VSX and Z Vector helpers.
+ *
+ * This is very messy, and any pull requests to clean this up are welcome.
+ *
+ * There are a lot of problems with supporting VSX and s390x, due to
+ * inconsistent intrinsics, spotty coverage, and multiple endiannesses.
+ */
+#if XXH_VECTOR == XXH_VSX
+/* Annoyingly, these headers _may_ define three macros: `bool`, `vector`,
+ * and `pixel`. This is a problem for obvious reasons.
+ *
+ * These keywords are unnecessary; the spec literally says they are
+ * equivalent to `__bool`, `__vector`, and `__pixel` and may be undef'd
+ * after including the header.
+ *
+ * We use pragma push_macro/pop_macro to keep the namespace clean. */
+# pragma push_macro("bool")
+# pragma push_macro("vector")
+# pragma push_macro("pixel")
+/* silence potential macro redefined warnings */
+# undef bool
+# undef vector
+# undef pixel
+
+# if defined(__s390x__)
+# include <s390intrin.h>
+# else
+# include <altivec.h>
+# endif
+
+/* Restore the original macro values, if applicable. */
+# pragma pop_macro("pixel")
+# pragma pop_macro("vector")
+# pragma pop_macro("bool")
-XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) {
+typedef __vector unsigned long long xxh_u64x2;
+typedef __vector unsigned char xxh_u8x16;
+typedef __vector unsigned xxh_u32x4;
- xxh_u64x2 result;
- __asm__("vmuleuw %0, %1, %2" : "=v"(result) : "v"(a), "v"(b));
- return result;
+/*
+ * UGLY HACK: Similar to aarch64 macOS GCC, s390x GCC has the same aliasing issue.
+ */
+typedef xxh_u64x2 xxh_aliasing_u64x2 XXH_ALIASING;
+
+# ifndef XXH_VSX_BE
+# if defined(__BIG_ENDIAN__) \
+ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+# define XXH_VSX_BE 1
+# elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
+# warning "-maltivec=be is not recommended. Please use native endianness."
+# define XXH_VSX_BE 1
+# else
+# define XXH_VSX_BE 0
+# endif
+# endif /* !defined(XXH_VSX_BE) */
+
+# if XXH_VSX_BE
+# if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
+# define XXH_vec_revb vec_revb
+# else
+/*!
+ * A polyfill for POWER9's vec_revb().
+ */
+XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
+{
+ xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
+ 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
+ return vec_perm(val, val, vByteSwap);
+}
+# endif
+# endif /* XXH_VSX_BE */
+/*!
+ * Performs an unaligned vector load and byte swaps it on big endian.
+ */
+XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr)
+{
+ xxh_u64x2 ret;
+ XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2));
+# if XXH_VSX_BE
+ ret = XXH_vec_revb(ret);
+# endif
+ return ret;
}
- #endif /* XXH_vec_mulo, XXH_vec_mule */
- #endif /* XXH_VECTOR == XXH_VSX */
+/*
+ * vec_mulo and vec_mule are very problematic intrinsics on PowerPC
+ *
+ * These intrinsics weren't added until GCC 8, despite existing for a while,
+ * and they are endian dependent. Also, their meaning swap depending on version.
+ * */
+# if defined(__s390x__)
+ /* s390x is always big endian, no issue on this platform */
+# define XXH_vec_mulo vec_mulo
+# define XXH_vec_mule vec_mule
+# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) && !defined(__ibmxl__)
+/* Clang has a better way to control this, we can just use the builtin which doesn't swap. */
+ /* The IBM XL Compiler (which defined __clang__) only implements the vec_* operations */
+# define XXH_vec_mulo __builtin_altivec_vmulouw
+# define XXH_vec_mule __builtin_altivec_vmuleuw
+# else
+/* gcc needs inline assembly */
+/* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
+XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b)
+{
+ xxh_u64x2 result;
+ __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
+ return result;
+}
+XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
+{
+ xxh_u64x2 result;
+ __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
+ return result;
+}
+# endif /* XXH_vec_mulo, XXH_vec_mule */
+#endif /* XXH_VECTOR == XXH_VSX */
+
+#if XXH_VECTOR == XXH_SVE
+#define ACCRND(acc, offset) \
+do { \
+ svuint64_t input_vec = svld1_u64(mask, xinput + offset); \
+ svuint64_t secret_vec = svld1_u64(mask, xsecret + offset); \
+ svuint64_t mixed = sveor_u64_x(mask, secret_vec, input_vec); \
+ svuint64_t swapped = svtbl_u64(input_vec, kSwap); \
+ svuint64_t mixed_lo = svextw_u64_x(mask, mixed); \
+ svuint64_t mixed_hi = svlsr_n_u64_x(mask, mixed, 32); \
+ svuint64_t mul = svmad_u64_x(mask, mixed_lo, mixed_hi, swapped); \
+ acc = svadd_u64_x(mask, acc, mul); \
+} while (0)
+#endif /* XXH_VECTOR == XXH_SVE */
+
+/* prefetch
+ * can be disabled, by declaring XXH_NO_PREFETCH build macro */
+#if defined(XXH_NO_PREFETCH)
+# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
+#else
+# if XXH_SIZE_OPT >= 1
+# define XXH_PREFETCH(ptr) (void)(ptr)
+# elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */
+# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
+# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
+# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
+# define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
+# else
+# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
+# endif
+#endif /* XXH_NO_PREFETCH */
- /* prefetch
- * can be disabled, by declaring XXH_NO_PREFETCH build macro */
- #if defined(XXH_NO_PREFETCH)
- #define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
- #else
- #if defined(_MSC_VER) && \
- (defined(_M_X64) || \
- defined( \
- _M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */
- #include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
- #define XXH_PREFETCH(ptr) \
- _mm_prefetch((const char *)(ptr), _MM_HINT_T0)
- #elif defined(__GNUC__) && \
- ((__GNUC__ >= 4) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1)))
- #define XXH_PREFETCH(ptr) \
- __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
- #else
- #define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
- #endif
- #endif /* XXH_NO_PREFETCH */
- /* ==========================================
- * XXH3 default settings
- * ========================================== */
+/* ==========================================
+ * XXH3 default settings
+ * ========================================== */
- #define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */
+#define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */
- #if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
- #error "default keyset is not large enough"
- #endif
+#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
+# error "default keyset is not large enough"
+#endif
/*! Pseudorandom secret taken directly from FARSH. */
-XXH_ALIGN(64)
-static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
-
- 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c,
- 0xf7, 0x21, 0xad, 0x1c, 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb,
- 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f, 0xcb, 0x79, 0xe6, 0x4e,
- 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
- 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6,
- 0x81, 0x3a, 0x26, 0x4c, 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb,
- 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3, 0x71, 0x64, 0x48, 0x97,
- 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
- 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7,
- 0xc7, 0x0b, 0x4f, 0x1d, 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31,
- 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64, 0xea, 0xc5, 0xac, 0x83,
- 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
- 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26,
- 0x29, 0xd4, 0x68, 0x9e, 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc,
- 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce, 0x45, 0xcb, 0x3a, 0x8f,
- 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
-
+XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
+ 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
+ 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
+ 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
+ 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
+ 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
+ 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
+ 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
+ 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
+ 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
+ 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
+ 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
+ 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
};
- #ifdef XXH_OLD_NAMES
- #define kSecret XXH3_kSecret
- #endif
+static const xxh_u64 PRIME_MX1 = 0x165667919E3779F9ULL; /*!< 0b0001011001010110011001111001000110011110001101110111100111111001 */
+static const xxh_u64 PRIME_MX2 = 0x9FB21C651E98DF25ULL; /*!< 0b1001111110110010000111000110010100011110100110001101111100100101 */
- #ifdef XXH_DOXYGEN
+#ifdef XXH_OLD_NAMES
+# define kSecret XXH3_kSecret
+#endif
+
+#ifdef XXH_DOXYGEN
/*!
* @brief Calculates a 32-bit to 64-bit long multiply.
*
* Implemented as a macro.
*
- * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it
- * doesn't need to (but it shouldn't need to anyways, it is about 7 instructions
- * to do a 64x64 multiply...). Since we know that this will _always_ emit
- * `MULL`, we use that instead of the normal method.
+ * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't
+ * need to (but it shouldn't need to anyways, it is about 7 instructions to do
+ * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we
+ * use that instead of the normal method.
*
- * If you are compiling for platforms like Thumb-1 and don't have a better
- * option, you may also want to write your own long multiply routine here.
+ * If you are compiling for platforms like Thumb-1 and don't have a better option,
+ * you may also want to write your own long multiply routine here.
*
* @param x, y Numbers to be multiplied
* @return 64-bit product of the low 32 bits of @p x and @p y.
*/
-XXH_FORCE_INLINE xxh_u64 XXH_mult32to64(xxh_u64 x, xxh_u64 y) {
-
- return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
-
-}
-
- #elif defined(_MSC_VER) && defined(_M_IX86)
- #include <intrin.h>
- #define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
- #else
- /*
- * Downcast + upcast is usually better than masking on older compilers
- * like GCC 4.2 (especially 32-bit ones), all without affecting newer
- * compilers.
- *
- * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both
- * operands and perform a full 64x64 multiply -- entirely redundant on
- * 32-bit.
- */
- #define XXH_mult32to64(x, y) \
- ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
- #endif
+XXH_FORCE_INLINE xxh_u64
+XXH_mult32to64(xxh_u64 x, xxh_u64 y)
+{
+ return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
+}
+#elif defined(_MSC_VER) && defined(_M_IX86)
+# define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
+#else
+/*
+ * Downcast + upcast is usually better than masking on older compilers like
+ * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers.
+ *
+ * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands
+ * and perform a full 64x64 multiply -- entirely redundant on 32-bit.
+ */
+# define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
+#endif
/*!
* @brief Calculates a 64->128-bit long multiply.
@@ -3623,157 +4299,167 @@ XXH_FORCE_INLINE xxh_u64 XXH_mult32to64(xxh_u64 x, xxh_u64 y) {
* Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar
* version.
*
- * @param lhs, rhs The 64-bit integers to be multiplied
+ * @param lhs , rhs The 64-bit integers to be multiplied
* @return The 128-bit result represented in an @ref XXH128_hash_t.
*/
-static XXH128_hash_t XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) {
-
- /*
- * GCC/Clang __uint128_t method.
- *
- * On most 64-bit targets, GCC and Clang define a __uint128_t type.
- * This is usually the best way as it usually uses a native long 64-bit
- * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
- *
- * Usually.
- *
- * Despite being a 32-bit platform, Clang (and emscripten) define this
- * type despite not having the arithmetic for it. This results in a laggy
- * compiler builtin call which calculates a full 128-bit multiply.
- * In that case it is best to use the portable one.
- * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
- */
- #if defined(__GNUC__) && !defined(__wasm__) && \
- defined(__SIZEOF_INT128__) || \
- (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
-
- __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
- XXH128_hash_t r128;
- r128.low64 = (xxh_u64)(product);
- r128.high64 = (xxh_u64)(product >> 64);
- return r128;
+static XXH128_hash_t
+XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
+{
+ /*
+ * GCC/Clang __uint128_t method.
+ *
+ * On most 64-bit targets, GCC and Clang define a __uint128_t type.
+ * This is usually the best way as it usually uses a native long 64-bit
+ * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
+ *
+ * Usually.
+ *
+ * Despite being a 32-bit platform, Clang (and emscripten) define this type
+ * despite not having the arithmetic for it. This results in a laggy
+ * compiler builtin call which calculates a full 128-bit multiply.
+ * In that case it is best to use the portable one.
+ * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
+ */
+#if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \
+ && defined(__SIZEOF_INT128__) \
+ || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
- /*
- * MSVC for x64's _umul128 method.
- *
- * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64
- * *HighProduct);
- *
- * This compiles to single operand MUL on x64.
- */
- #elif defined(_M_X64) || defined(_M_IA64)
-
- #ifndef _MSC_VER
- #pragma intrinsic(_umul128)
- #endif
- xxh_u64 product_high;
- xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
- XXH128_hash_t r128;
- r128.low64 = product_low;
- r128.high64 = product_high;
- return r128;
-
- #else
- /*
- * Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
- *
- * This is a fast and simple grade school multiply, which is shown below
- * with base 10 arithmetic instead of base 0x100000000.
- *
- * 9 3 // D2 lhs = 93
- * x 7 5 // D2 rhs = 75
- * ----------
- * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
- * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
- * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
- * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
- * ---------
- * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
- * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
- * ---------
- * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
- *
- * The reasons for adding the products like this are:
- * 1. It avoids manual carry tracking. Just like how
- * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
- * This avoids a lot of complexity.
- *
- * 2. It hints for, and on Clang, compiles to, the powerful UMAAL
- * instruction available in ARM's Digital Signal Processing extension
- * in 32-bit ARMv6 and later, which is shown below:
- *
- * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
- * {
-
- * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
- * *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
- * *RdHi = (xxh_u32)(product >> 32);
- * }
- *
- * This instruction was designed for efficient long multiplication, and
- * allows this to be calculated in only 4 instructions at speeds
- * comparable to some 64-bit ALUs.
- *
- * 3. It isn't terrible on other platforms. Usually this will be a couple
- * of 32-bit ADD/ADCs.
- */
-
- /* First calculate all of the cross products. */
- xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
- xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
- xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
- xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
-
- /* Now add the products together. These will never overflow. */
- xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
- xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
- xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
-
- XXH128_hash_t r128;
- r128.low64 = lower;
- r128.high64 = upper;
- return r128;
- #endif
+ __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
+ XXH128_hash_t r128;
+ r128.low64 = (xxh_u64)(product);
+ r128.high64 = (xxh_u64)(product >> 64);
+ return r128;
+
+ /*
+ * MSVC for x64's _umul128 method.
+ *
+ * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct);
+ *
+ * This compiles to single operand MUL on x64.
+ */
+#elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC)
+
+#ifndef _MSC_VER
+# pragma intrinsic(_umul128)
+#endif
+ xxh_u64 product_high;
+ xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
+ XXH128_hash_t r128;
+ r128.low64 = product_low;
+ r128.high64 = product_high;
+ return r128;
+
+ /*
+ * MSVC for ARM64's __umulh method.
+ *
+ * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method.
+ */
+#elif defined(_M_ARM64) || defined(_M_ARM64EC)
+
+#ifndef _MSC_VER
+# pragma intrinsic(__umulh)
+#endif
+ XXH128_hash_t r128;
+ r128.low64 = lhs * rhs;
+ r128.high64 = __umulh(lhs, rhs);
+ return r128;
+#else
+ /*
+ * Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
+ *
+ * This is a fast and simple grade school multiply, which is shown below
+ * with base 10 arithmetic instead of base 0x100000000.
+ *
+ * 9 3 // D2 lhs = 93
+ * x 7 5 // D2 rhs = 75
+ * ----------
+ * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
+ * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
+ * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
+ * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
+ * ---------
+ * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
+ * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
+ * ---------
+ * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
+ *
+ * The reasons for adding the products like this are:
+ * 1. It avoids manual carry tracking. Just like how
+ * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
+ * This avoids a lot of complexity.
+ *
+ * 2. It hints for, and on Clang, compiles to, the powerful UMAAL
+ * instruction available in ARM's Digital Signal Processing extension
+ * in 32-bit ARMv6 and later, which is shown below:
+ *
+ * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
+ * {
+ * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
+ * *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
+ * *RdHi = (xxh_u32)(product >> 32);
+ * }
+ *
+ * This instruction was designed for efficient long multiplication, and
+ * allows this to be calculated in only 4 instructions at speeds
+ * comparable to some 64-bit ALUs.
+ *
+ * 3. It isn't terrible on other platforms. Usually this will be a couple
+ * of 32-bit ADD/ADCs.
+ */
+
+ /* First calculate all of the cross products. */
+ xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
+ xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
+ xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
+ xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
+
+ /* Now add the products together. These will never overflow. */
+ xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
+ xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
+ xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
+
+ XXH128_hash_t r128;
+ r128.low64 = lower;
+ r128.high64 = upper;
+ return r128;
+#endif
}
/*!
* @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it.
*
* The reason for the separate function is to prevent passing too many structs
- * around by value. This will hopefully inline the multiply, but we don't force
- * it.
+ * around by value. This will hopefully inline the multiply, but we don't force it.
*
- * @param lhs, rhs The 64-bit integers to multiply
+ * @param lhs , rhs The 64-bit integers to multiply
* @return The low 64 bits of the product XOR'd by the high 64 bits.
* @see XXH_mult64to128()
*/
-static xxh_u64 XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs) {
-
- XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
- return product.low64 ^ product.high64;
-
+static xxh_u64
+XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
+{
+ XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
+ return product.low64 ^ product.high64;
}
/*! Seems to produce slightly better code on GCC for some reason. */
-XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) {
-
- XXH_ASSERT(0 <= shift && shift < 64);
- return v64 ^ (v64 >> shift);
-
+XXH_FORCE_INLINE XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift)
+{
+ XXH_ASSERT(0 <= shift && shift < 64);
+ return v64 ^ (v64 >> shift);
}
/*
* This is a fast avalanche stage,
* suitable when input bits are already partially mixed
*/
-static XXH64_hash_t XXH3_avalanche(xxh_u64 h64) {
-
- h64 = XXH_xorshift64(h64, 37);
- h64 *= 0x165667919E3779F9ULL;
- h64 = XXH_xorshift64(h64, 32);
- return h64;
-
+static XXH64_hash_t XXH3_avalanche(xxh_u64 h64)
+{
+ h64 = XXH_xorshift64(h64, 37);
+ h64 *= PRIME_MX1;
+ h64 = XXH_xorshift64(h64, 32);
+ return h64;
}
/*
@@ -3781,17 +4467,17 @@ static XXH64_hash_t XXH3_avalanche(xxh_u64 h64) {
* inspired by Pelle Evensen's rrmxmx
* preferable when input has not been previously mixed
*/
-static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) {
-
- /* this mix is inspired by Pelle Evensen's rrmxmx */
- h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
- h64 *= 0x9FB21C651E98DF25ULL;
- h64 ^= (h64 >> 35) + len;
- h64 *= 0x9FB21C651E98DF25ULL;
- return XXH_xorshift64(h64, 28);
-
+static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len)
+{
+ /* this mix is inspired by Pelle Evensen's rrmxmx */
+ h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
+ h64 *= PRIME_MX2;
+ h64 ^= (h64 >> 35) + len ;
+ h64 *= PRIME_MX2;
+ return XXH_xorshift64(h64, 28);
}
+
/* ==========================================
* Short keys
* ==========================================
@@ -3800,8 +4486,7 @@ static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) {
* favored lengths that were a multiple of 4 or 8.
*
* Instead of iterating over individual inputs, we use a set of single shot
- * functions which piece together a range of lengths and operate in constant
- * time.
+ * functions which piece together a range of lengths and operate in constant time.
*
* Additionally, the number of multiplies has been significantly reduced. This
* reduces latency, especially when emulating 64-bit multiplies on 32-bit.
@@ -3826,98 +4511,70 @@ static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) {
*
* This adds an extra layer of strength for custom secrets.
*/
-XXH_FORCE_INLINE XXH64_hash_t XXH3_len_1to3_64b(const xxh_u8 *input, size_t len,
- const xxh_u8 *secret,
- XXH64_hash_t seed) {
-
- XXH_ASSERT(input != NULL);
- XXH_ASSERT(1 <= len && len <= 3);
- XXH_ASSERT(secret != NULL);
- /*
- * len = 1: combined = { input[0], 0x01, input[0], input[0] }
- * len = 2: combined = { input[1], 0x02, input[0], input[1] }
- * len = 3: combined = { input[2], 0x03, input[0], input[1] }
- */
- {
-
- xxh_u8 const c1 = input[0];
- xxh_u8 const c2 = input[len >> 1];
- xxh_u8 const c3 = input[len - 1];
- xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24) |
- ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
- xxh_u64 const bitflip =
- (XXH_readLE32(secret) ^ XXH_readLE32(secret + 4)) + seed;
- xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
- return XXH64_avalanche(keyed);
-
- }
-
+XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(1 <= len && len <= 3);
+ XXH_ASSERT(secret != NULL);
+ /*
+ * len = 1: combined = { input[0], 0x01, input[0], input[0] }
+ * len = 2: combined = { input[1], 0x02, input[0], input[1] }
+ * len = 3: combined = { input[2], 0x03, input[0], input[1] }
+ */
+ { xxh_u8 const c1 = input[0];
+ xxh_u8 const c2 = input[len >> 1];
+ xxh_u8 const c3 = input[len - 1];
+ xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24)
+ | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
+ xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
+ xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
+ return XXH64_avalanche(keyed);
+ }
}
-XXH_FORCE_INLINE XXH64_hash_t XXH3_len_4to8_64b(const xxh_u8 *input, size_t len,
- const xxh_u8 *secret,
- XXH64_hash_t seed) {
-
- XXH_ASSERT(input != NULL);
- XXH_ASSERT(secret != NULL);
- XXH_ASSERT(4 <= len && len <= 8);
- seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
- {
-
- xxh_u32 const input1 = XXH_readLE32(input);
- xxh_u32 const input2 = XXH_readLE32(input + len - 4);
- xxh_u64 const bitflip =
- (XXH_readLE64(secret + 8) ^ XXH_readLE64(secret + 16)) - seed;
- xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
- xxh_u64 const keyed = input64 ^ bitflip;
- return XXH3_rrmxmx(keyed, len);
-
- }
-
+XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(secret != NULL);
+ XXH_ASSERT(4 <= len && len <= 8);
+ seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
+ { xxh_u32 const input1 = XXH_readLE32(input);
+ xxh_u32 const input2 = XXH_readLE32(input + len - 4);
+ xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed;
+ xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
+ xxh_u64 const keyed = input64 ^ bitflip;
+ return XXH3_rrmxmx(keyed, len);
+ }
}
-XXH_FORCE_INLINE XXH64_hash_t XXH3_len_9to16_64b(const xxh_u8 *input,
- size_t len,
- const xxh_u8 *secret,
- XXH64_hash_t seed) {
-
- XXH_ASSERT(input != NULL);
- XXH_ASSERT(secret != NULL);
- XXH_ASSERT(9 <= len && len <= 16);
- {
-
- xxh_u64 const bitflip1 =
- (XXH_readLE64(secret + 24) ^ XXH_readLE64(secret + 32)) + seed;
- xxh_u64 const bitflip2 =
- (XXH_readLE64(secret + 40) ^ XXH_readLE64(secret + 48)) - seed;
- xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1;
- xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
- xxh_u64 const acc = len + XXH_swap64(input_lo) + input_hi +
- XXH3_mul128_fold64(input_lo, input_hi);
- return XXH3_avalanche(acc);
-
- }
-
+XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(secret != NULL);
+ XXH_ASSERT(9 <= len && len <= 16);
+ { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
+ xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
+ xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1;
+ xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
+ xxh_u64 const acc = len
+ + XXH_swap64(input_lo) + input_hi
+ + XXH3_mul128_fold64(input_lo, input_hi);
+ return XXH3_avalanche(acc);
+ }
}
-XXH_FORCE_INLINE XXH64_hash_t XXH3_len_0to16_64b(const xxh_u8 *input,
- size_t len,
- const xxh_u8 *secret,
- XXH64_hash_t seed) {
-
- XXH_ASSERT(len <= 16);
- {
-
- if (XXH_likely(len > 8))
- return XXH3_len_9to16_64b(input, len, secret, seed);
- if (XXH_likely(len >= 4))
- return XXH3_len_4to8_64b(input, len, secret, seed);
- if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
- return XXH64_avalanche(
- seed ^ (XXH_readLE64(secret + 56) ^ XXH_readLE64(secret + 64)));
-
- }
-
+XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(len <= 16);
+ { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed);
+ if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed);
+ if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
+ return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
+ }
}
/*
@@ -3946,113 +4603,106 @@ XXH_FORCE_INLINE XXH64_hash_t XXH3_len_0to16_64b(const xxh_u8 *input,
* by this, although it is always a good idea to use a proper seed if you care
* about strength.
*/
-XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8 *XXH_RESTRICT input,
- const xxh_u8 *XXH_RESTRICT secret,
- xxh_u64 seed64) {
-
- #if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
- && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \
- && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like \
- XXH32 hack */
- /*
- * UGLY HACK:
- * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
- * slower code.
- *
- * By forcing seed64 into a register, we disrupt the cost model and
- * cause it to scalarize. See `XXH32_round()`
- *
- * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
- * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
- * GCC 9.2, despite both emitting scalar code.
- *
- * GCC generates much better scalar code than Clang for the rest of XXH3,
- * which is why finding a more optimal codepath is an interest.
- */
- XXH_COMPILER_GUARD(seed64);
- #endif
- {
-
- xxh_u64 const input_lo = XXH_readLE64(input);
- xxh_u64 const input_hi = XXH_readLE64(input + 8);
- return XXH3_mul128_fold64(input_lo ^ (XXH_readLE64(secret) + seed64),
- input_hi ^ (XXH_readLE64(secret + 8) - seed64));
-
- }
-
+XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
+ const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
+{
+#if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
+ && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \
+ && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */
+ /*
+ * UGLY HACK:
+ * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
+ * slower code.
+ *
+ * By forcing seed64 into a register, we disrupt the cost model and
+ * cause it to scalarize. See `XXH32_round()`
+ *
+ * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
+ * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
+ * GCC 9.2, despite both emitting scalar code.
+ *
+ * GCC generates much better scalar code than Clang for the rest of XXH3,
+ * which is why finding a more optimal codepath is an interest.
+ */
+ XXH_COMPILER_GUARD(seed64);
+#endif
+ { xxh_u64 const input_lo = XXH_readLE64(input);
+ xxh_u64 const input_hi = XXH_readLE64(input+8);
+ return XXH3_mul128_fold64(
+ input_lo ^ (XXH_readLE64(secret) + seed64),
+ input_hi ^ (XXH_readLE64(secret+8) - seed64)
+ );
+ }
}
/* For mid range keys, XXH3 uses a Mum-hash variant. */
-XXH_FORCE_INLINE XXH64_hash_t XXH3_len_17to128_64b(
- const xxh_u8 *XXH_RESTRICT input, size_t len,
- const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) {
-
- XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
- (void)secretSize;
- XXH_ASSERT(16 < len && len <= 128);
-
- {
-
- xxh_u64 acc = len * XXH_PRIME64_1;
- if (len > 32) {
-
- if (len > 64) {
-
- if (len > 96) {
-
- acc += XXH3_mix16B(input + 48, secret + 96, seed);
- acc += XXH3_mix16B(input + len - 64, secret + 112, seed);
-
+XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH64_hash_t seed)
+{
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
+ XXH_ASSERT(16 < len && len <= 128);
+
+ { xxh_u64 acc = len * XXH_PRIME64_1;
+#if XXH_SIZE_OPT >= 1
+ /* Smaller and cleaner, but slightly slower. */
+ unsigned int i = (unsigned int)(len - 1) / 32;
+ do {
+ acc += XXH3_mix16B(input+16 * i, secret+32*i, seed);
+ acc += XXH3_mix16B(input+len-16*(i+1), secret+32*i+16, seed);
+ } while (i-- != 0);
+#else
+ if (len > 32) {
+ if (len > 64) {
+ if (len > 96) {
+ acc += XXH3_mix16B(input+48, secret+96, seed);
+ acc += XXH3_mix16B(input+len-64, secret+112, seed);
+ }
+ acc += XXH3_mix16B(input+32, secret+64, seed);
+ acc += XXH3_mix16B(input+len-48, secret+80, seed);
+ }
+ acc += XXH3_mix16B(input+16, secret+32, seed);
+ acc += XXH3_mix16B(input+len-32, secret+48, seed);
}
-
- acc += XXH3_mix16B(input + 32, secret + 64, seed);
- acc += XXH3_mix16B(input + len - 48, secret + 80, seed);
-
- }
-
- acc += XXH3_mix16B(input + 16, secret + 32, seed);
- acc += XXH3_mix16B(input + len - 32, secret + 48, seed);
-
+ acc += XXH3_mix16B(input+0, secret+0, seed);
+ acc += XXH3_mix16B(input+len-16, secret+16, seed);
+#endif
+ return XXH3_avalanche(acc);
}
-
- acc += XXH3_mix16B(input + 0, secret + 0, seed);
- acc += XXH3_mix16B(input + len - 16, secret + 16, seed);
-
- return XXH3_avalanche(acc);
-
- }
-
}
- #define XXH3_MIDSIZE_MAX 240
-
-XXH_NO_INLINE XXH64_hash_t XXH3_len_129to240_64b(
- const xxh_u8 *XXH_RESTRICT input, size_t len,
- const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) {
-
- XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
- (void)secretSize;
- XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
-
- #define XXH3_MIDSIZE_STARTOFFSET 3
- #define XXH3_MIDSIZE_LASTOFFSET 17
-
- {
-
- xxh_u64 acc = len * XXH_PRIME64_1;
- int const nbRounds = (int)len / 16;
- int i;
- for (i = 0; i < 8; i++) {
-
- acc += XXH3_mix16B(input + (16 * i), secret + (16 * i), seed);
-
- }
-
- acc = XXH3_avalanche(acc);
- XXH_ASSERT(nbRounds >= 8);
- #if defined(__clang__) /* Clang */ \
- && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
- && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
+/*!
+ * @brief Maximum size of "short" key in bytes.
+ */
+#define XXH3_MIDSIZE_MAX 240
+
+XXH_NO_INLINE XXH_PUREF XXH64_hash_t
+XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH64_hash_t seed)
+{
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
+ XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
+
+ #define XXH3_MIDSIZE_STARTOFFSET 3
+ #define XXH3_MIDSIZE_LASTOFFSET 17
+
+ { xxh_u64 acc = len * XXH_PRIME64_1;
+ xxh_u64 acc_end;
+ unsigned int const nbRounds = (unsigned int)len / 16;
+ unsigned int i;
+ XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
+ for (i=0; i<8; i++) {
+ acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed);
+ }
+ /* last bytes */
+ acc_end = XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
+ XXH_ASSERT(nbRounds >= 8);
+ acc = XXH3_avalanche(acc);
+#if defined(__clang__) /* Clang */ \
+ && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
+ && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
/*
* UGLY HACK:
* Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
@@ -4074,122 +4724,150 @@ XXH_NO_INLINE XXH64_hash_t XXH3_len_129to240_64b(
* SLP vectorization.
*/
#pragma clang loop vectorize(disable)
- #endif
- for (i = 8; i < nbRounds; i++) {
-
- acc +=
- XXH3_mix16B(input + (16 * i),
- secret + (16 * (i - 8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
-
+#endif
+ for (i=8 ; i < nbRounds; i++) {
+ /*
+ * Prevents clang for unrolling the acc loop and interleaving with this one.
+ */
+ XXH_COMPILER_GUARD(acc);
+ acc_end += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
+ }
+ return XXH3_avalanche(acc + acc_end);
}
-
- /* last bytes */
- acc += XXH3_mix16B(input + len - 16,
- secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET,
- seed);
- return XXH3_avalanche(acc);
-
- }
-
}
- /* ======= Long Keys ======= */
- #define XXH_STRIPE_LEN 64
- #define XXH_SECRET_CONSUME_RATE \
- 8 /* nb of secret bytes consumed at each accumulation */
- #define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
+/* ======= Long Keys ======= */
- #ifdef XXH_OLD_NAMES
- #define STRIPE_LEN XXH_STRIPE_LEN
- #define ACC_NB XXH_ACC_NB
- #endif
+#define XXH_STRIPE_LEN 64
+#define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */
+#define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
-XXH_FORCE_INLINE void XXH_writeLE64(void *dst, xxh_u64 v64) {
-
- if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
- memcpy(dst, &v64, sizeof(v64));
-
-}
-
- /* Several intrinsic functions below are supposed to accept __int64 as
- * argument, as documented in
- * https://software.intel.com/sites/landingpage/IntrinsicsGuide/ .
- * However, several environments do not define __int64 type,
- * requiring a workaround.
- */
- #if !defined(__VMS) && \
- (defined(__cplusplus) || (defined(__STDC_VERSION__) && \
- (__STDC_VERSION__ >= 199901L) /* C99 */))
-typedef int64_t xxh_i64;
- #else
-/* the following type must have a width of 64-bit */
-typedef long long xxh_i64;
- #endif
-
- /*
- * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the
- * most optimized.
- *
- * It is a hardened version of UMAC, based off of FARSH's implementation.
- *
- * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
- * implementations, and it is ridiculously fast.
- *
- * We harden it by mixing the original input to the accumulators as well as
- * the product.
- *
- * This means that in the (relatively likely) case of a multiply by zero,
- * the original input is preserved.
- *
- * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
- * cross-pollination, as otherwise the upper and lower halves would be
- * essentially independent.
- *
- * This doesn't matter on 64-bit hashes since they all get merged together
- * in the end, so we skip the extra step.
- *
- * Both XXH3_64bits and XXH3_128bits use this subroutine.
- */
+#ifdef XXH_OLD_NAMES
+# define STRIPE_LEN XXH_STRIPE_LEN
+# define ACC_NB XXH_ACC_NB
+#endif
- #if (XXH_VECTOR == XXH_AVX512) || \
- (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
+#ifndef XXH_PREFETCH_DIST
+# ifdef __clang__
+# define XXH_PREFETCH_DIST 320
+# else
+# if (XXH_VECTOR == XXH_AVX512)
+# define XXH_PREFETCH_DIST 512
+# else
+# define XXH_PREFETCH_DIST 384
+# endif
+# endif /* __clang__ */
+#endif /* XXH_PREFETCH_DIST */
- #ifndef XXH_TARGET_AVX512
- #define XXH_TARGET_AVX512 /* disable attribute target */
- #endif
+/*
+ * These macros are to generate an XXH3_accumulate() function.
+ * The two arguments select the name suffix and target attribute.
+ *
+ * The name of this symbol is XXH3_accumulate_<name>() and it calls
+ * XXH3_accumulate_512_<name>().
+ *
+ * It may be useful to hand implement this function if the compiler fails to
+ * optimize the inline function.
+ */
+#define XXH3_ACCUMULATE_TEMPLATE(name) \
+void \
+XXH3_accumulate_##name(xxh_u64* XXH_RESTRICT acc, \
+ const xxh_u8* XXH_RESTRICT input, \
+ const xxh_u8* XXH_RESTRICT secret, \
+ size_t nbStripes) \
+{ \
+ size_t n; \
+ for (n = 0; n < nbStripes; n++ ) { \
+ const xxh_u8* const in = input + n*XXH_STRIPE_LEN; \
+ XXH_PREFETCH(in + XXH_PREFETCH_DIST); \
+ XXH3_accumulate_512_##name( \
+ acc, \
+ in, \
+ secret + n*XXH_SECRET_CONSUME_RATE); \
+ } \
+}
+
+
+XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64)
+{
+ if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
+ XXH_memcpy(dst, &v64, sizeof(v64));
+}
+
+/* Several intrinsic functions below are supposed to accept __int64 as argument,
+ * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ .
+ * However, several environments do not define __int64 type,
+ * requiring a workaround.
+ */
+#if !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+ typedef int64_t xxh_i64;
+#else
+ /* the following type must have a width of 64-bit */
+ typedef long long xxh_i64;
+#endif
-XXH_FORCE_INLINE XXH_TARGET_AVX512 void XXH3_accumulate_512_avx512(
- void *XXH_RESTRICT acc, const void *XXH_RESTRICT input,
- const void *XXH_RESTRICT secret) {
- __m512i *const xacc = (__m512i *)acc;
- XXH_ASSERT((((size_t)acc) & 63) == 0);
- XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
+/*
+ * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized.
+ *
+ * It is a hardened version of UMAC, based off of FARSH's implementation.
+ *
+ * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
+ * implementations, and it is ridiculously fast.
+ *
+ * We harden it by mixing the original input to the accumulators as well as the product.
+ *
+ * This means that in the (relatively likely) case of a multiply by zero, the
+ * original input is preserved.
+ *
+ * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
+ * cross-pollination, as otherwise the upper and lower halves would be
+ * essentially independent.
+ *
+ * This doesn't matter on 64-bit hashes since they all get merged together in
+ * the end, so we skip the extra step.
+ *
+ * Both XXH3_64bits and XXH3_128bits use this subroutine.
+ */
- {
+#if (XXH_VECTOR == XXH_AVX512) \
+ || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
- /* data_vec = input[0]; */
- __m512i const data_vec = _mm512_loadu_si512(input);
- /* key_vec = secret[0]; */
- __m512i const key_vec = _mm512_loadu_si512(secret);
- /* data_key = data_vec ^ key_vec; */
- __m512i const data_key = _mm512_xor_si512(data_vec, key_vec);
- /* data_key_lo = data_key >> 32; */
- __m512i const data_key_lo =
- _mm512_shuffle_epi32(data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
- /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
- __m512i const product = _mm512_mul_epu32(data_key, data_key_lo);
- /* xacc[0] += swap(data_vec); */
- __m512i const data_swap =
- _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
- __m512i const sum = _mm512_add_epi64(*xacc, data_swap);
- /* xacc[0] += product; */
- *xacc = _mm512_add_epi64(product, sum);
+#ifndef XXH_TARGET_AVX512
+# define XXH_TARGET_AVX512 /* disable attribute target */
+#endif
- }
+XXH_FORCE_INLINE XXH_TARGET_AVX512 void
+XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ __m512i* const xacc = (__m512i *) acc;
+ XXH_ASSERT((((size_t)acc) & 63) == 0);
+ XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
+ {
+ /* data_vec = input[0]; */
+ __m512i const data_vec = _mm512_loadu_si512 (input);
+ /* key_vec = secret[0]; */
+ __m512i const key_vec = _mm512_loadu_si512 (secret);
+ /* data_key = data_vec ^ key_vec; */
+ __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
+ /* data_key_lo = data_key >> 32; */
+ __m512i const data_key_lo = _mm512_srli_epi64 (data_key, 32);
+ /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
+ __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo);
+ /* xacc[0] += swap(data_vec); */
+ __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
+ __m512i const sum = _mm512_add_epi64(*xacc, data_swap);
+ /* xacc[0] += product; */
+ *xacc = _mm512_add_epi64(product, sum);
+ }
}
+XXH_FORCE_INLINE XXH_TARGET_AVX512 XXH3_ACCUMULATE_TEMPLATE(avx512)
/*
* XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
@@ -4201,12 +4879,10 @@ XXH_FORCE_INLINE XXH_TARGET_AVX512 void XXH3_accumulate_512_avx512(
* // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
* // As expected, the upper and lower bytes are much worse.
*
- * Source:
- * https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
+ * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
*
* Since our algorithm uses a pseudorandom secret to add some variance into the
- * mix, we don't need to (or want to) mix as often or as much as HighwayHash
- * does.
+ * mix, we don't need to (or want to) mix as often or as much as HighwayHash does.
*
* This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
* extraction.
@@ -4214,869 +4890,1030 @@ XXH_FORCE_INLINE XXH_TARGET_AVX512 void XXH3_accumulate_512_avx512(
* Both XXH3_64bits and XXH3_128bits use this subroutine.
*/
-XXH_FORCE_INLINE XXH_TARGET_AVX512 void XXH3_scrambleAcc_avx512(
- void *XXH_RESTRICT acc, const void *XXH_RESTRICT secret) {
-
- XXH_ASSERT((((size_t)acc) & 63) == 0);
- XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
- {
-
- __m512i *const xacc = (__m512i *)acc;
- const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
-
- /* xacc[0] ^= (xacc[0] >> 47) */
- __m512i const acc_vec = *xacc;
- __m512i const shifted = _mm512_srli_epi64(acc_vec, 47);
- __m512i const data_vec = _mm512_xor_si512(acc_vec, shifted);
- /* xacc[0] ^= secret; */
- __m512i const key_vec = _mm512_loadu_si512(secret);
- __m512i const data_key = _mm512_xor_si512(data_vec, key_vec);
-
- /* xacc[0] *= XXH_PRIME32_1; */
- __m512i const data_key_hi =
- _mm512_shuffle_epi32(data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
- __m512i const prod_lo = _mm512_mul_epu32(data_key, prime32);
- __m512i const prod_hi = _mm512_mul_epu32(data_key_hi, prime32);
- *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
-
- }
-
+XXH_FORCE_INLINE XXH_TARGET_AVX512 void
+XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 63) == 0);
+ XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
+ { __m512i* const xacc = (__m512i*) acc;
+ const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
+
+ /* xacc[0] ^= (xacc[0] >> 47) */
+ __m512i const acc_vec = *xacc;
+ __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47);
+ /* xacc[0] ^= secret; */
+ __m512i const key_vec = _mm512_loadu_si512 (secret);
+ __m512i const data_key = _mm512_ternarylogic_epi32(key_vec, acc_vec, shifted, 0x96 /* key_vec ^ acc_vec ^ shifted */);
+
+ /* xacc[0] *= XXH_PRIME32_1; */
+ __m512i const data_key_hi = _mm512_srli_epi64 (data_key, 32);
+ __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32);
+ __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32);
+ *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
+ }
}
-XXH_FORCE_INLINE XXH_TARGET_AVX512 void XXH3_initCustomSecret_avx512(
- void *XXH_RESTRICT customSecret, xxh_u64 seed64) {
-
- XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
- XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
- XXH_ASSERT(((size_t)customSecret & 63) == 0);
- (void)(&XXH_writeLE64);
- {
-
- int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
- __m512i const seed = _mm512_mask_set1_epi64(
- _mm512_set1_epi64((xxh_i64)seed64), 0xAA, (xxh_i64)(0U - seed64));
+XXH_FORCE_INLINE XXH_TARGET_AVX512 void
+XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
+{
+ XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
+ XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
+ XXH_ASSERT(((size_t)customSecret & 63) == 0);
+ (void)(&XXH_writeLE64);
+ { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
+ __m512i const seed_pos = _mm512_set1_epi64((xxh_i64)seed64);
+ __m512i const seed = _mm512_mask_sub_epi64(seed_pos, 0xAA, _mm512_set1_epi8(0), seed_pos);
- const __m512i *const src = (const __m512i *)((const void *)XXH3_kSecret);
- __m512i *const dest = (__m512i *)customSecret;
- int i;
- XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */
- XXH_ASSERT(((size_t)dest & 63) == 0);
- for (i = 0; i < nbRounds; ++i) {
-
- /* GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void
- * const*', this will warn "discards 'const' qualifier". */
- union {
+ const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret);
+ __m512i* const dest = ( __m512i*) customSecret;
+ int i;
+ XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */
+ XXH_ASSERT(((size_t)dest & 63) == 0);
+ for (i=0; i < nbRounds; ++i) {
+ dest[i] = _mm512_add_epi64(_mm512_load_si512(src + i), seed);
+ } }
+}
- const __m512i *cp;
- void *p;
+#endif
- } remote_const_void;
+#if (XXH_VECTOR == XXH_AVX2) \
+ || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
- remote_const_void.cp = src + i;
- dest[i] =
- _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p), seed);
+#ifndef XXH_TARGET_AVX2
+# define XXH_TARGET_AVX2 /* disable attribute target */
+#endif
+XXH_FORCE_INLINE XXH_TARGET_AVX2 void
+XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 31) == 0);
+ { __m256i* const xacc = (__m256i *) acc;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
+ const __m256i* const xinput = (const __m256i *) input;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
+ const __m256i* const xsecret = (const __m256i *) secret;
+
+ size_t i;
+ for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
+ /* data_vec = xinput[i]; */
+ __m256i const data_vec = _mm256_loadu_si256 (xinput+i);
+ /* key_vec = xsecret[i]; */
+ __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
+ /* data_key = data_vec ^ key_vec; */
+ __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
+ /* data_key_lo = data_key >> 32; */
+ __m256i const data_key_lo = _mm256_srli_epi64 (data_key, 32);
+ /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
+ __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo);
+ /* xacc[i] += swap(data_vec); */
+ __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
+ __m256i const sum = _mm256_add_epi64(xacc[i], data_swap);
+ /* xacc[i] += product; */
+ xacc[i] = _mm256_add_epi64(product, sum);
+ } }
+}
+XXH_FORCE_INLINE XXH_TARGET_AVX2 XXH3_ACCUMULATE_TEMPLATE(avx2)
+
+XXH_FORCE_INLINE XXH_TARGET_AVX2 void
+XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 31) == 0);
+ { __m256i* const xacc = (__m256i*) acc;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
+ const __m256i* const xsecret = (const __m256i *) secret;
+ const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
+
+ size_t i;
+ for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
+ /* xacc[i] ^= (xacc[i] >> 47) */
+ __m256i const acc_vec = xacc[i];
+ __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47);
+ __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted);
+ /* xacc[i] ^= xsecret; */
+ __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
+ __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
+
+ /* xacc[i] *= XXH_PRIME32_1; */
+ __m256i const data_key_hi = _mm256_srli_epi64 (data_key, 32);
+ __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32);
+ __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32);
+ xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
+ }
}
-
- }
-
}
- #endif
-
- #if (XXH_VECTOR == XXH_AVX2) || \
- (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
-
- #ifndef XXH_TARGET_AVX2
- #define XXH_TARGET_AVX2 /* disable attribute target */
- #endif
-
-XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_accumulate_512_avx2(
- void *XXH_RESTRICT acc, const void *XXH_RESTRICT input,
- const void *XXH_RESTRICT secret) {
-
- XXH_ASSERT((((size_t)acc) & 31) == 0);
- {
+XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
+{
+ XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
+ XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
+ XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
+ (void)(&XXH_writeLE64);
+ XXH_PREFETCH(customSecret);
+ { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64);
- __m256i *const xacc = (__m256i *)acc;
- /* Unaligned. This is mainly for pointer arithmetic, and because
- * _mm256_loadu_si256 requires a const __m256i * pointer for some reason.
- */
- const __m256i *const xinput = (const __m256i *)input;
- /* Unaligned. This is mainly for pointer arithmetic, and because
- * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
- const __m256i *const xsecret = (const __m256i *)secret;
-
- size_t i;
- for (i = 0; i < XXH_STRIPE_LEN / sizeof(__m256i); i++) {
-
- /* data_vec = xinput[i]; */
- __m256i const data_vec = _mm256_loadu_si256(xinput + i);
- /* key_vec = xsecret[i]; */
- __m256i const key_vec = _mm256_loadu_si256(xsecret + i);
- /* data_key = data_vec ^ key_vec; */
- __m256i const data_key = _mm256_xor_si256(data_vec, key_vec);
- /* data_key_lo = data_key >> 32; */
- __m256i const data_key_lo =
- _mm256_shuffle_epi32(data_key, _MM_SHUFFLE(0, 3, 0, 1));
- /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
- __m256i const product = _mm256_mul_epu32(data_key, data_key_lo);
- /* xacc[i] += swap(data_vec); */
- __m256i const data_swap =
- _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
- __m256i const sum = _mm256_add_epi64(xacc[i], data_swap);
- /* xacc[i] += product; */
- xacc[i] = _mm256_add_epi64(product, sum);
+ const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret);
+ __m256i* dest = ( __m256i*) customSecret;
+# if defined(__GNUC__) || defined(__clang__)
+ /*
+ * On GCC & Clang, marking 'dest' as modified will cause the compiler:
+ * - do not extract the secret from sse registers in the internal loop
+ * - use less common registers, and avoid pushing these reg into stack
+ */
+ XXH_COMPILER_GUARD(dest);
+# endif
+ XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */
+ XXH_ASSERT(((size_t)dest & 31) == 0);
+
+ /* GCC -O2 need unroll loop manually */
+ dest[0] = _mm256_add_epi64(_mm256_load_si256(src+0), seed);
+ dest[1] = _mm256_add_epi64(_mm256_load_si256(src+1), seed);
+ dest[2] = _mm256_add_epi64(_mm256_load_si256(src+2), seed);
+ dest[3] = _mm256_add_epi64(_mm256_load_si256(src+3), seed);
+ dest[4] = _mm256_add_epi64(_mm256_load_si256(src+4), seed);
+ dest[5] = _mm256_add_epi64(_mm256_load_si256(src+5), seed);
}
-
- }
-
}
-XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_scrambleAcc_avx2(
- void *XXH_RESTRICT acc, const void *XXH_RESTRICT secret) {
-
- XXH_ASSERT((((size_t)acc) & 31) == 0);
- {
+#endif
- __m256i *const xacc = (__m256i *)acc;
- /* Unaligned. This is mainly for pointer arithmetic, and because
- * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
- const __m256i *const xsecret = (const __m256i *)secret;
- const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
+/* x86dispatch always generates SSE2 */
+#if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
- size_t i;
- for (i = 0; i < XXH_STRIPE_LEN / sizeof(__m256i); i++) {
-
- /* xacc[i] ^= (xacc[i] >> 47) */
- __m256i const acc_vec = xacc[i];
- __m256i const shifted = _mm256_srli_epi64(acc_vec, 47);
- __m256i const data_vec = _mm256_xor_si256(acc_vec, shifted);
- /* xacc[i] ^= xsecret; */
- __m256i const key_vec = _mm256_loadu_si256(xsecret + i);
- __m256i const data_key = _mm256_xor_si256(data_vec, key_vec);
-
- /* xacc[i] *= XXH_PRIME32_1; */
- __m256i const data_key_hi =
- _mm256_shuffle_epi32(data_key, _MM_SHUFFLE(0, 3, 0, 1));
- __m256i const prod_lo = _mm256_mul_epu32(data_key, prime32);
- __m256i const prod_hi = _mm256_mul_epu32(data_key_hi, prime32);
- xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
+#ifndef XXH_TARGET_SSE2
+# define XXH_TARGET_SSE2 /* disable attribute target */
+#endif
+XXH_FORCE_INLINE XXH_TARGET_SSE2 void
+XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ /* SSE2 is just a half-scale version of the AVX2 version. */
+ XXH_ASSERT((((size_t)acc) & 15) == 0);
+ { __m128i* const xacc = (__m128i *) acc;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
+ const __m128i* const xinput = (const __m128i *) input;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
+ const __m128i* const xsecret = (const __m128i *) secret;
+
+ size_t i;
+ for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
+ /* data_vec = xinput[i]; */
+ __m128i const data_vec = _mm_loadu_si128 (xinput+i);
+ /* key_vec = xsecret[i]; */
+ __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
+ /* data_key = data_vec ^ key_vec; */
+ __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
+ /* data_key_lo = data_key >> 32; */
+ __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
+ /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
+ __m128i const product = _mm_mul_epu32 (data_key, data_key_lo);
+ /* xacc[i] += swap(data_vec); */
+ __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
+ __m128i const sum = _mm_add_epi64(xacc[i], data_swap);
+ /* xacc[i] += product; */
+ xacc[i] = _mm_add_epi64(product, sum);
+ } }
+}
+XXH_FORCE_INLINE XXH_TARGET_SSE2 XXH3_ACCUMULATE_TEMPLATE(sse2)
+
+XXH_FORCE_INLINE XXH_TARGET_SSE2 void
+XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 15) == 0);
+ { __m128i* const xacc = (__m128i*) acc;
+ /* Unaligned. This is mainly for pointer arithmetic, and because
+ * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
+ const __m128i* const xsecret = (const __m128i *) secret;
+ const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
+
+ size_t i;
+ for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
+ /* xacc[i] ^= (xacc[i] >> 47) */
+ __m128i const acc_vec = xacc[i];
+ __m128i const shifted = _mm_srli_epi64 (acc_vec, 47);
+ __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted);
+ /* xacc[i] ^= xsecret[i]; */
+ __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
+ __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
+
+ /* xacc[i] *= XXH_PRIME32_1; */
+ __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
+ __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32);
+ __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32);
+ xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
+ }
}
-
- }
-
}
-XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(
- void *XXH_RESTRICT customSecret, xxh_u64 seed64) {
+XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
+{
+ XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
+ (void)(&XXH_writeLE64);
+ { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
+
+# if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
+ /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */
+ XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) };
+ __m128i const seed = _mm_load_si128((__m128i const*)seed64x2);
+# else
+ __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64);
+# endif
+ int i;
+
+ const void* const src16 = XXH3_kSecret;
+ __m128i* dst16 = (__m128i*) customSecret;
+# if defined(__GNUC__) || defined(__clang__)
+ /*
+ * On GCC & Clang, marking 'dest' as modified will cause the compiler:
+ * - do not extract the secret from sse registers in the internal loop
+ * - use less common registers, and avoid pushing these reg into stack
+ */
+ XXH_COMPILER_GUARD(dst16);
+# endif
+ XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */
+ XXH_ASSERT(((size_t)dst16 & 15) == 0);
- XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
- XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
- XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
- (void)(&XXH_writeLE64);
- XXH_PREFETCH(customSecret);
- {
+ for (i=0; i < nbRounds; ++i) {
+ dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed);
+ } }
+}
- __m256i const seed =
- _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64,
- (xxh_i64)(0U - seed64), (xxh_i64)seed64);
+#endif
- const __m256i *const src = (const __m256i *)((const void *)XXH3_kSecret);
- __m256i *dest = (__m256i *)customSecret;
+#if (XXH_VECTOR == XXH_NEON)
- #if defined(__GNUC__) || defined(__clang__)
- /*
- * On GCC & Clang, marking 'dest' as modified will cause the compiler:
- * - do not extract the secret from sse registers in the internal loop
- * - use less common registers, and avoid pushing these reg into stack
- */
- XXH_COMPILER_GUARD(dest);
- #endif
- XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */
- XXH_ASSERT(((size_t)dest & 31) == 0);
+/* forward declarations for the scalar routines */
+XXH_FORCE_INLINE void
+XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input,
+ void const* XXH_RESTRICT secret, size_t lane);
- /* GCC -O2 need unroll loop manually */
- dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(src + 0), seed);
- dest[1] = _mm256_add_epi64(_mm256_stream_load_si256(src + 1), seed);
- dest[2] = _mm256_add_epi64(_mm256_stream_load_si256(src + 2), seed);
- dest[3] = _mm256_add_epi64(_mm256_stream_load_si256(src + 3), seed);
- dest[4] = _mm256_add_epi64(_mm256_stream_load_si256(src + 4), seed);
- dest[5] = _mm256_add_epi64(_mm256_stream_load_si256(src + 5), seed);
+XXH_FORCE_INLINE void
+XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
+ void const* XXH_RESTRICT secret, size_t lane);
- }
+/*!
+ * @internal
+ * @brief The bulk processing loop for NEON and WASM SIMD128.
+ *
+ * The NEON code path is actually partially scalar when running on AArch64. This
+ * is to optimize the pipelining and can have up to 15% speedup depending on the
+ * CPU, and it also mitigates some GCC codegen issues.
+ *
+ * @see XXH3_NEON_LANES for configuring this and details about this optimization.
+ *
+ * NEON's 32-bit to 64-bit long multiply takes a half vector of 32-bit
+ * integers instead of the other platforms which mask full 64-bit vectors,
+ * so the setup is more complicated than just shifting right.
+ *
+ * Additionally, there is an optimization for 4 lanes at once noted below.
+ *
+ * Since, as stated, the most optimal amount of lanes for Cortexes is 6,
+ * there needs to be *three* versions of the accumulate operation used
+ * for the remaining 2 lanes.
+ *
+ * WASM's SIMD128 uses SIMDe's arm_neon.h polyfill because the intrinsics overlap
+ * nearly perfectly.
+ */
+XXH_FORCE_INLINE void
+XXH3_accumulate_512_neon( void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 15) == 0);
+ XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0);
+ { /* GCC for darwin arm64 does not like aliasing here */
+ xxh_aliasing_uint64x2_t* const xacc = (xxh_aliasing_uint64x2_t*) acc;
+ /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */
+ uint8_t const* xinput = (const uint8_t *) input;
+ uint8_t const* xsecret = (const uint8_t *) secret;
+
+ size_t i;
+#ifdef __wasm_simd128__
+ /*
+ * On WASM SIMD128, Clang emits direct address loads when XXH3_kSecret
+ * is constant propagated, which results in it converting it to this
+ * inside the loop:
+ *
+ * a = v128.load(XXH3_kSecret + 0 + $secret_offset, offset = 0)
+ * b = v128.load(XXH3_kSecret + 16 + $secret_offset, offset = 0)
+ * ...
+ *
+ * This requires a full 32-bit address immediate (and therefore a 6 byte
+ * instruction) as well as an add for each offset.
+ *
+ * Putting an asm guard prevents it from folding (at the cost of losing
+ * the alignment hint), and uses the free offset in `v128.load` instead
+ * of adding secret_offset each time which overall reduces code size by
+ * about a kilobyte and improves performance.
+ */
+ XXH_COMPILER_GUARD(xsecret);
+#endif
+ /* Scalar lanes use the normal scalarRound routine */
+ for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
+ XXH3_scalarRound(acc, input, secret, i);
+ }
+ i = 0;
+ /* 4 NEON lanes at a time. */
+ for (; i+1 < XXH3_NEON_LANES / 2; i+=2) {
+ /* data_vec = xinput[i]; */
+ uint64x2_t data_vec_1 = XXH_vld1q_u64(xinput + (i * 16));
+ uint64x2_t data_vec_2 = XXH_vld1q_u64(xinput + ((i+1) * 16));
+ /* key_vec = xsecret[i]; */
+ uint64x2_t key_vec_1 = XXH_vld1q_u64(xsecret + (i * 16));
+ uint64x2_t key_vec_2 = XXH_vld1q_u64(xsecret + ((i+1) * 16));
+ /* data_swap = swap(data_vec) */
+ uint64x2_t data_swap_1 = vextq_u64(data_vec_1, data_vec_1, 1);
+ uint64x2_t data_swap_2 = vextq_u64(data_vec_2, data_vec_2, 1);
+ /* data_key = data_vec ^ key_vec; */
+ uint64x2_t data_key_1 = veorq_u64(data_vec_1, key_vec_1);
+ uint64x2_t data_key_2 = veorq_u64(data_vec_2, key_vec_2);
+
+ /*
+ * If we reinterpret the 64x2 vectors as 32x4 vectors, we can use a
+ * de-interleave operation for 4 lanes in 1 step with `vuzpq_u32` to
+ * get one vector with the low 32 bits of each lane, and one vector
+ * with the high 32 bits of each lane.
+ *
+ * The intrinsic returns a double vector because the original ARMv7-a
+ * instruction modified both arguments in place. AArch64 and SIMD128 emit
+ * two instructions from this intrinsic.
+ *
+ * [ dk11L | dk11H | dk12L | dk12H ] -> [ dk11L | dk12L | dk21L | dk22L ]
+ * [ dk21L | dk21H | dk22L | dk22H ] -> [ dk11H | dk12H | dk21H | dk22H ]
+ */
+ uint32x4x2_t unzipped = vuzpq_u32(
+ vreinterpretq_u32_u64(data_key_1),
+ vreinterpretq_u32_u64(data_key_2)
+ );
+ /* data_key_lo = data_key & 0xFFFFFFFF */
+ uint32x4_t data_key_lo = unzipped.val[0];
+ /* data_key_hi = data_key >> 32 */
+ uint32x4_t data_key_hi = unzipped.val[1];
+ /*
+ * Then, we can split the vectors horizontally and multiply which, as for most
+ * widening intrinsics, have a variant that works on both high half vectors
+ * for free on AArch64. A similar instruction is available on SIMD128.
+ *
+ * sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi
+ */
+ uint64x2_t sum_1 = XXH_vmlal_low_u32(data_swap_1, data_key_lo, data_key_hi);
+ uint64x2_t sum_2 = XXH_vmlal_high_u32(data_swap_2, data_key_lo, data_key_hi);
+ /*
+ * Clang reorders
+ * a += b * c; // umlal swap.2d, dkl.2s, dkh.2s
+ * c += a; // add acc.2d, acc.2d, swap.2d
+ * to
+ * c += a; // add acc.2d, acc.2d, swap.2d
+ * c += b * c; // umlal acc.2d, dkl.2s, dkh.2s
+ *
+ * While it would make sense in theory since the addition is faster,
+ * for reasons likely related to umlal being limited to certain NEON
+ * pipelines, this is worse. A compiler guard fixes this.
+ */
+ XXH_COMPILER_GUARD_CLANG_NEON(sum_1);
+ XXH_COMPILER_GUARD_CLANG_NEON(sum_2);
+ /* xacc[i] = acc_vec + sum; */
+ xacc[i] = vaddq_u64(xacc[i], sum_1);
+ xacc[i+1] = vaddq_u64(xacc[i+1], sum_2);
+ }
+ /* Operate on the remaining NEON lanes 2 at a time. */
+ for (; i < XXH3_NEON_LANES / 2; i++) {
+ /* data_vec = xinput[i]; */
+ uint64x2_t data_vec = XXH_vld1q_u64(xinput + (i * 16));
+ /* key_vec = xsecret[i]; */
+ uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16));
+ /* acc_vec_2 = swap(data_vec) */
+ uint64x2_t data_swap = vextq_u64(data_vec, data_vec, 1);
+ /* data_key = data_vec ^ key_vec; */
+ uint64x2_t data_key = veorq_u64(data_vec, key_vec);
+ /* For two lanes, just use VMOVN and VSHRN. */
+ /* data_key_lo = data_key & 0xFFFFFFFF; */
+ uint32x2_t data_key_lo = vmovn_u64(data_key);
+ /* data_key_hi = data_key >> 32; */
+ uint32x2_t data_key_hi = vshrn_n_u64(data_key, 32);
+ /* sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi; */
+ uint64x2_t sum = vmlal_u32(data_swap, data_key_lo, data_key_hi);
+ /* Same Clang workaround as before */
+ XXH_COMPILER_GUARD_CLANG_NEON(sum);
+ /* xacc[i] = acc_vec + sum; */
+ xacc[i] = vaddq_u64 (xacc[i], sum);
+ }
+ }
}
+XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(neon)
- #endif
+XXH_FORCE_INLINE void
+XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 15) == 0);
- /* x86dispatch always generates SSE2 */
- #if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
+ { xxh_aliasing_uint64x2_t* xacc = (xxh_aliasing_uint64x2_t*) acc;
+ uint8_t const* xsecret = (uint8_t const*) secret;
- #ifndef XXH_TARGET_SSE2
- #define XXH_TARGET_SSE2 /* disable attribute target */
- #endif
-
-XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_accumulate_512_sse2(
- void *XXH_RESTRICT acc, const void *XXH_RESTRICT input,
- const void *XXH_RESTRICT secret) {
-
- /* SSE2 is just a half-scale version of the AVX2 version. */
- XXH_ASSERT((((size_t)acc) & 15) == 0);
- {
-
- __m128i *const xacc = (__m128i *)acc;
- /* Unaligned. This is mainly for pointer arithmetic, and because
- * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
- const __m128i *const xinput = (const __m128i *)input;
- /* Unaligned. This is mainly for pointer arithmetic, and because
- * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
- const __m128i *const xsecret = (const __m128i *)secret;
-
- size_t i;
- for (i = 0; i < XXH_STRIPE_LEN / sizeof(__m128i); i++) {
-
- /* data_vec = xinput[i]; */
- __m128i const data_vec = _mm_loadu_si128(xinput + i);
- /* key_vec = xsecret[i]; */
- __m128i const key_vec = _mm_loadu_si128(xsecret + i);
- /* data_key = data_vec ^ key_vec; */
- __m128i const data_key = _mm_xor_si128(data_vec, key_vec);
- /* data_key_lo = data_key >> 32; */
- __m128i const data_key_lo =
- _mm_shuffle_epi32(data_key, _MM_SHUFFLE(0, 3, 0, 1));
- /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
- __m128i const product = _mm_mul_epu32(data_key, data_key_lo);
- /* xacc[i] += swap(data_vec); */
- __m128i const data_swap =
- _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
- __m128i const sum = _mm_add_epi64(xacc[i], data_swap);
- /* xacc[i] += product; */
- xacc[i] = _mm_add_epi64(product, sum);
+ size_t i;
+ /* WASM uses operator overloads and doesn't need these. */
+#ifndef __wasm_simd128__
+ /* { prime32_1, prime32_1 } */
+ uint32x2_t const kPrimeLo = vdup_n_u32(XXH_PRIME32_1);
+ /* { 0, prime32_1, 0, prime32_1 } */
+ uint32x4_t const kPrimeHi = vreinterpretq_u32_u64(vdupq_n_u64((xxh_u64)XXH_PRIME32_1 << 32));
+#endif
+ /* AArch64 uses both scalar and neon at the same time */
+ for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
+ XXH3_scalarScrambleRound(acc, secret, i);
+ }
+ for (i=0; i < XXH3_NEON_LANES / 2; i++) {
+ /* xacc[i] ^= (xacc[i] >> 47); */
+ uint64x2_t acc_vec = xacc[i];
+ uint64x2_t shifted = vshrq_n_u64(acc_vec, 47);
+ uint64x2_t data_vec = veorq_u64(acc_vec, shifted);
+
+ /* xacc[i] ^= xsecret[i]; */
+ uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16));
+ uint64x2_t data_key = veorq_u64(data_vec, key_vec);
+ /* xacc[i] *= XXH_PRIME32_1 */
+#ifdef __wasm_simd128__
+ /* SIMD128 has multiply by u64x2, use it instead of expanding and scalarizing */
+ xacc[i] = data_key * XXH_PRIME32_1;
+#else
+ /*
+ * Expanded version with portable NEON intrinsics
+ *
+ * lo(x) * lo(y) + (hi(x) * lo(y) << 32)
+ *
+ * prod_hi = hi(data_key) * lo(prime) << 32
+ *
+ * Since we only need 32 bits of this multiply a trick can be used, reinterpreting the vector
+ * as a uint32x4_t and multiplying by { 0, prime, 0, prime } to cancel out the unwanted bits
+ * and avoid the shift.
+ */
+ uint32x4_t prod_hi = vmulq_u32 (vreinterpretq_u32_u64(data_key), kPrimeHi);
+ /* Extract low bits for vmlal_u32 */
+ uint32x2_t data_key_lo = vmovn_u64(data_key);
+ /* xacc[i] = prod_hi + lo(data_key) * XXH_PRIME32_1; */
+ xacc[i] = vmlal_u32(vreinterpretq_u64_u32(prod_hi), data_key_lo, kPrimeLo);
+#endif
+ }
}
-
- }
-
}
+#endif
-XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_scrambleAcc_sse2(
- void *XXH_RESTRICT acc, const void *XXH_RESTRICT secret) {
-
- XXH_ASSERT((((size_t)acc) & 15) == 0);
- {
-
- __m128i *const xacc = (__m128i *)acc;
- /* Unaligned. This is mainly for pointer arithmetic, and because
- * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
- const __m128i *const xsecret = (const __m128i *)secret;
- const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
-
+#if (XXH_VECTOR == XXH_VSX)
+
+XXH_FORCE_INLINE void
+XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ /* presumed aligned */
+ xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc;
+ xxh_u8 const* const xinput = (xxh_u8 const*) input; /* no alignment restriction */
+ xxh_u8 const* const xsecret = (xxh_u8 const*) secret; /* no alignment restriction */
+ xxh_u64x2 const v32 = { 32, 32 };
size_t i;
- for (i = 0; i < XXH_STRIPE_LEN / sizeof(__m128i); i++) {
-
- /* xacc[i] ^= (xacc[i] >> 47) */
- __m128i const acc_vec = xacc[i];
- __m128i const shifted = _mm_srli_epi64(acc_vec, 47);
- __m128i const data_vec = _mm_xor_si128(acc_vec, shifted);
- /* xacc[i] ^= xsecret[i]; */
- __m128i const key_vec = _mm_loadu_si128(xsecret + i);
- __m128i const data_key = _mm_xor_si128(data_vec, key_vec);
-
- /* xacc[i] *= XXH_PRIME32_1; */
- __m128i const data_key_hi =
- _mm_shuffle_epi32(data_key, _MM_SHUFFLE(0, 3, 0, 1));
- __m128i const prod_lo = _mm_mul_epu32(data_key, prime32);
- __m128i const prod_hi = _mm_mul_epu32(data_key_hi, prime32);
- xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
-
+ for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
+ /* data_vec = xinput[i]; */
+ xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + 16*i);
+ /* key_vec = xsecret[i]; */
+ xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i);
+ xxh_u64x2 const data_key = data_vec ^ key_vec;
+ /* shuffled = (data_key << 32) | (data_key >> 32); */
+ xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
+ /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */
+ xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
+ /* acc_vec = xacc[i]; */
+ xxh_u64x2 acc_vec = xacc[i];
+ acc_vec += product;
+
+ /* swap high and low halves */
+#ifdef __s390x__
+ acc_vec += vec_permi(data_vec, data_vec, 2);
+#else
+ acc_vec += vec_xxpermdi(data_vec, data_vec, 2);
+#endif
+ xacc[i] = acc_vec;
}
-
- }
-
+}
+XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(vsx)
+
+XXH_FORCE_INLINE void
+XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ XXH_ASSERT((((size_t)acc) & 15) == 0);
+
+ { xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc;
+ const xxh_u8* const xsecret = (const xxh_u8*) secret;
+ /* constants */
+ xxh_u64x2 const v32 = { 32, 32 };
+ xxh_u64x2 const v47 = { 47, 47 };
+ xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
+ size_t i;
+ for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
+ /* xacc[i] ^= (xacc[i] >> 47); */
+ xxh_u64x2 const acc_vec = xacc[i];
+ xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
+
+ /* xacc[i] ^= xsecret[i]; */
+ xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i);
+ xxh_u64x2 const data_key = data_vec ^ key_vec;
+
+ /* xacc[i] *= XXH_PRIME32_1 */
+ /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */
+ xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime);
+ /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */
+ xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime);
+ xacc[i] = prod_odd + (prod_even << v32);
+ } }
}
-XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(
- void *XXH_RESTRICT customSecret, xxh_u64 seed64) {
-
- XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
- (void)(&XXH_writeLE64);
- {
-
- int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
-
- #if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
- /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */
- XXH_ALIGN(16)
- const xxh_i64 seed64x2[2] = {(xxh_i64)seed64, (xxh_i64)(0U - seed64)};
- __m128i const seed = _mm_load_si128((__m128i const *)seed64x2);
- #else
- __m128i const seed =
- _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64);
- #endif
- int i;
-
- const void *const src16 = XXH3_kSecret;
- __m128i *dst16 = (__m128i *)customSecret;
- #if defined(__GNUC__) || defined(__clang__)
- /*
- * On GCC & Clang, marking 'dest' as modified will cause the compiler:
- * - do not extract the secret from sse registers in the internal loop
- * - use less common registers, and avoid pushing these reg into stack
- */
- XXH_COMPILER_GUARD(dst16);
- #endif
- XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */
- XXH_ASSERT(((size_t)dst16 & 15) == 0);
-
- for (i = 0; i < nbRounds; ++i) {
-
- dst16[i] =
- _mm_add_epi64(_mm_load_si128((const __m128i *)src16 + i), seed);
+#endif
+#if (XXH_VECTOR == XXH_SVE)
+
+XXH_FORCE_INLINE void
+XXH3_accumulate_512_sve( void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ uint64_t *xacc = (uint64_t *)acc;
+ const uint64_t *xinput = (const uint64_t *)(const void *)input;
+ const uint64_t *xsecret = (const uint64_t *)(const void *)secret;
+ svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1);
+ uint64_t element_count = svcntd();
+ if (element_count >= 8) {
+ svbool_t mask = svptrue_pat_b64(SV_VL8);
+ svuint64_t vacc = svld1_u64(mask, xacc);
+ ACCRND(vacc, 0);
+ svst1_u64(mask, xacc, vacc);
+ } else if (element_count == 2) { /* sve128 */
+ svbool_t mask = svptrue_pat_b64(SV_VL2);
+ svuint64_t acc0 = svld1_u64(mask, xacc + 0);
+ svuint64_t acc1 = svld1_u64(mask, xacc + 2);
+ svuint64_t acc2 = svld1_u64(mask, xacc + 4);
+ svuint64_t acc3 = svld1_u64(mask, xacc + 6);
+ ACCRND(acc0, 0);
+ ACCRND(acc1, 2);
+ ACCRND(acc2, 4);
+ ACCRND(acc3, 6);
+ svst1_u64(mask, xacc + 0, acc0);
+ svst1_u64(mask, xacc + 2, acc1);
+ svst1_u64(mask, xacc + 4, acc2);
+ svst1_u64(mask, xacc + 6, acc3);
+ } else {
+ svbool_t mask = svptrue_pat_b64(SV_VL4);
+ svuint64_t acc0 = svld1_u64(mask, xacc + 0);
+ svuint64_t acc1 = svld1_u64(mask, xacc + 4);
+ ACCRND(acc0, 0);
+ ACCRND(acc1, 4);
+ svst1_u64(mask, xacc + 0, acc0);
+ svst1_u64(mask, xacc + 4, acc1);
}
-
- }
-
}
- #endif
-
- #if (XXH_VECTOR == XXH_NEON)
-
-XXH_FORCE_INLINE void XXH3_accumulate_512_neon(
- void *XXH_RESTRICT acc, const void *XXH_RESTRICT input,
- const void *XXH_RESTRICT secret) {
-
- XXH_ASSERT((((size_t)acc) & 15) == 0);
- {
-
- uint64x2_t *const xacc = (uint64x2_t *)acc;
- /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7.
- */
- uint8_t const *const xinput = (const uint8_t *)input;
- uint8_t const *const xsecret = (const uint8_t *)secret;
-
- size_t i;
- for (i = 0; i < XXH_STRIPE_LEN / sizeof(uint64x2_t); i++) {
-
- /* data_vec = xinput[i]; */
- uint8x16_t data_vec = vld1q_u8(xinput + (i * 16));
- /* key_vec = xsecret[i]; */
- uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16));
- uint64x2_t data_key;
- uint32x2_t data_key_lo, data_key_hi;
- /* xacc[i] += swap(data_vec); */
- uint64x2_t const data64 = vreinterpretq_u64_u8(data_vec);
- uint64x2_t const swapped = vextq_u64(data64, data64, 1);
- xacc[i] = vaddq_u64(xacc[i], swapped);
- /* data_key = data_vec ^ key_vec; */
- data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec));
- /* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF);
- * data_key_hi = (uint32x2_t) (data_key >> 32);
- * data_key = UNDEFINED; */
- XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
- /* xacc[i] += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */
- xacc[i] = vmlal_u32(xacc[i], data_key_lo, data_key_hi);
-
+XXH_FORCE_INLINE void
+XXH3_accumulate_sve(xxh_u64* XXH_RESTRICT acc,
+ const xxh_u8* XXH_RESTRICT input,
+ const xxh_u8* XXH_RESTRICT secret,
+ size_t nbStripes)
+{
+ if (nbStripes != 0) {
+ uint64_t *xacc = (uint64_t *)acc;
+ const uint64_t *xinput = (const uint64_t *)(const void *)input;
+ const uint64_t *xsecret = (const uint64_t *)(const void *)secret;
+ svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1);
+ uint64_t element_count = svcntd();
+ if (element_count >= 8) {
+ svbool_t mask = svptrue_pat_b64(SV_VL8);
+ svuint64_t vacc = svld1_u64(mask, xacc + 0);
+ do {
+ /* svprfd(svbool_t, void *, enum svfprop); */
+ svprfd(mask, xinput + 128, SV_PLDL1STRM);
+ ACCRND(vacc, 0);
+ xinput += 8;
+ xsecret += 1;
+ nbStripes--;
+ } while (nbStripes != 0);
+
+ svst1_u64(mask, xacc + 0, vacc);
+ } else if (element_count == 2) { /* sve128 */
+ svbool_t mask = svptrue_pat_b64(SV_VL2);
+ svuint64_t acc0 = svld1_u64(mask, xacc + 0);
+ svuint64_t acc1 = svld1_u64(mask, xacc + 2);
+ svuint64_t acc2 = svld1_u64(mask, xacc + 4);
+ svuint64_t acc3 = svld1_u64(mask, xacc + 6);
+ do {
+ svprfd(mask, xinput + 128, SV_PLDL1STRM);
+ ACCRND(acc0, 0);
+ ACCRND(acc1, 2);
+ ACCRND(acc2, 4);
+ ACCRND(acc3, 6);
+ xinput += 8;
+ xsecret += 1;
+ nbStripes--;
+ } while (nbStripes != 0);
+
+ svst1_u64(mask, xacc + 0, acc0);
+ svst1_u64(mask, xacc + 2, acc1);
+ svst1_u64(mask, xacc + 4, acc2);
+ svst1_u64(mask, xacc + 6, acc3);
+ } else {
+ svbool_t mask = svptrue_pat_b64(SV_VL4);
+ svuint64_t acc0 = svld1_u64(mask, xacc + 0);
+ svuint64_t acc1 = svld1_u64(mask, xacc + 4);
+ do {
+ svprfd(mask, xinput + 128, SV_PLDL1STRM);
+ ACCRND(acc0, 0);
+ ACCRND(acc1, 4);
+ xinput += 8;
+ xsecret += 1;
+ nbStripes--;
+ } while (nbStripes != 0);
+
+ svst1_u64(mask, xacc + 0, acc0);
+ svst1_u64(mask, xacc + 4, acc1);
+ }
}
-
- }
-
}
-XXH_FORCE_INLINE void XXH3_scrambleAcc_neon(void *XXH_RESTRICT acc,
- const void *XXH_RESTRICT secret) {
-
- XXH_ASSERT((((size_t)acc) & 15) == 0);
-
- {
-
- uint64x2_t *xacc = (uint64x2_t *)acc;
- uint8_t const *xsecret = (uint8_t const *)secret;
- uint32x2_t prime = vdup_n_u32(XXH_PRIME32_1);
+#endif
- size_t i;
- for (i = 0; i < XXH_STRIPE_LEN / sizeof(uint64x2_t); i++) {
-
- /* xacc[i] ^= (xacc[i] >> 47); */
- uint64x2_t acc_vec = xacc[i];
- uint64x2_t shifted = vshrq_n_u64(acc_vec, 47);
- uint64x2_t data_vec = veorq_u64(acc_vec, shifted);
-
- /* xacc[i] ^= xsecret[i]; */
- uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16));
- uint64x2_t data_key = veorq_u64(data_vec, vreinterpretq_u64_u8(key_vec));
-
- /* xacc[i] *= XXH_PRIME32_1 */
- uint32x2_t data_key_lo, data_key_hi;
- /* data_key_lo = (uint32x2_t) (xacc[i] & 0xFFFFFFFF);
- * data_key_hi = (uint32x2_t) (xacc[i] >> 32);
- * xacc[i] = UNDEFINED; */
- XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
- { /*
- * prod_hi = (data_key >> 32) * XXH_PRIME32_1;
- *
- * Avoid vmul_u32 + vshll_n_u32 since Clang 6 and 7 will
- * incorrectly "optimize" this:
- * tmp = vmul_u32(vmovn_u64(a), vmovn_u64(b));
- * shifted = vshll_n_u32(tmp, 32);
- * to this:
- * tmp = "vmulq_u64"(a, b); // no such thing!
- * shifted = vshlq_n_u64(tmp, 32);
- *
- * However, unlike SSE, Clang lacks a 64-bit multiply routine
- * for NEON, and it scalarizes two 64-bit multiplies instead.
- *
- * vmull_u32 has the same timing as vmul_u32, and it avoids
- * this bug completely.
- * See https://bugs.llvm.org/show_bug.cgi?id=39967
- */
- uint64x2_t prod_hi = vmull_u32(data_key_hi, prime);
- /* xacc[i] = prod_hi << 32; */
- xacc[i] = vshlq_n_u64(prod_hi, 32);
- /* xacc[i] += (prod_hi & 0xFFFFFFFF) * XXH_PRIME32_1; */
- xacc[i] = vmlal_u32(xacc[i], data_key_lo, prime);
+/* scalar variants - universal */
- }
+#if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__))
+/*
+ * In XXH3_scalarRound(), GCC and Clang have a similar codegen issue, where they
+ * emit an excess mask and a full 64-bit multiply-add (MADD X-form).
+ *
+ * While this might not seem like much, as AArch64 is a 64-bit architecture, only
+ * big Cortex designs have a full 64-bit multiplier.
+ *
+ * On the little cores, the smaller 32-bit multiplier is used, and full 64-bit
+ * multiplies expand to 2-3 multiplies in microcode. This has a major penalty
+ * of up to 4 latency cycles and 2 stall cycles in the multiply pipeline.
+ *
+ * Thankfully, AArch64 still provides the 32-bit long multiply-add (UMADDL) which does
+ * not have this penalty and does the mask automatically.
+ */
+XXH_FORCE_INLINE xxh_u64
+XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc)
+{
+ xxh_u64 ret;
+ /* note: %x = 64-bit register, %w = 32-bit register */
+ __asm__("umaddl %x0, %w1, %w2, %x3" : "=r" (ret) : "r" (lhs), "r" (rhs), "r" (acc));
+ return ret;
+}
+#else
+XXH_FORCE_INLINE xxh_u64
+XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc)
+{
+ return XXH_mult32to64((xxh_u32)lhs, (xxh_u32)rhs) + acc;
+}
+#endif
+/*!
+ * @internal
+ * @brief Scalar round for @ref XXH3_accumulate_512_scalar().
+ *
+ * This is extracted to its own function because the NEON path uses a combination
+ * of NEON and scalar.
+ */
+XXH_FORCE_INLINE void
+XXH3_scalarRound(void* XXH_RESTRICT acc,
+ void const* XXH_RESTRICT input,
+ void const* XXH_RESTRICT secret,
+ size_t lane)
+{
+ xxh_u64* xacc = (xxh_u64*) acc;
+ xxh_u8 const* xinput = (xxh_u8 const*) input;
+ xxh_u8 const* xsecret = (xxh_u8 const*) secret;
+ XXH_ASSERT(lane < XXH_ACC_NB);
+ XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
+ {
+ xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8);
+ xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8);
+ xacc[lane ^ 1] += data_val; /* swap adjacent lanes */
+ xacc[lane] = XXH_mult32to64_add64(data_key /* & 0xFFFFFFFF */, data_key >> 32, xacc[lane]);
}
-
- }
-
}
- #endif
-
- #if (XXH_VECTOR == XXH_VSX)
-
-XXH_FORCE_INLINE void XXH3_accumulate_512_vsx(void *XXH_RESTRICT acc,
- const void *XXH_RESTRICT input,
- const void *XXH_RESTRICT secret) {
-
- xxh_u64x2 *const xacc = (xxh_u64x2 *)acc; /* presumed aligned */
- xxh_u64x2 const *const xinput =
- (xxh_u64x2 const *)input; /* no alignment restriction */
- xxh_u64x2 const *const xsecret =
- (xxh_u64x2 const *)secret; /* no alignment restriction */
- xxh_u64x2 const v32 = {32, 32};
- size_t i;
- for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
-
- /* data_vec = xinput[i]; */
- xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + i);
- /* key_vec = xsecret[i]; */
- xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
- xxh_u64x2 const data_key = data_vec ^ key_vec;
- /* shuffled = (data_key << 32) | (data_key >> 32); */
- xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
- /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled &
- * 0xFFFFFFFF); */
- xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
- xacc[i] += product;
-
- /* swap high and low halves */
- #ifdef __s390x__
- xacc[i] += vec_permi(data_vec, data_vec, 2);
- #else
- xacc[i] += vec_xxpermdi(data_vec, data_vec, 2);
- #endif
-
- }
-
+/*!
+ * @internal
+ * @brief Processes a 64 byte block of data using the scalar path.
+ */
+XXH_FORCE_INLINE void
+XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc,
+ const void* XXH_RESTRICT input,
+ const void* XXH_RESTRICT secret)
+{
+ size_t i;
+ /* ARM GCC refuses to unroll this loop, resulting in a 24% slowdown on ARMv6. */
+#if defined(__GNUC__) && !defined(__clang__) \
+ && (defined(__arm__) || defined(__thumb2__)) \
+ && defined(__ARM_FEATURE_UNALIGNED) /* no unaligned access just wastes bytes */ \
+ && XXH_SIZE_OPT <= 0
+# pragma GCC unroll 8
+#endif
+ for (i=0; i < XXH_ACC_NB; i++) {
+ XXH3_scalarRound(acc, input, secret, i);
+ }
}
+XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(scalar)
-XXH_FORCE_INLINE void XXH3_scrambleAcc_vsx(void *XXH_RESTRICT acc,
- const void *XXH_RESTRICT secret) {
-
- XXH_ASSERT((((size_t)acc) & 15) == 0);
-
- {
-
- xxh_u64x2 *const xacc = (xxh_u64x2 *)acc;
- const xxh_u64x2 *const xsecret = (const xxh_u64x2 *)secret;
- /* constants */
- xxh_u64x2 const v32 = {32, 32};
- xxh_u64x2 const v47 = {47, 47};
- xxh_u32x4 const prime = {XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1,
- XXH_PRIME32_1};
- size_t i;
- for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
-
- /* xacc[i] ^= (xacc[i] >> 47); */
- xxh_u64x2 const acc_vec = xacc[i];
- xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
-
- /* xacc[i] ^= xsecret[i]; */
- xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
- xxh_u64x2 const data_key = data_vec ^ key_vec;
-
- /* xacc[i] *= XXH_PRIME32_1 */
- /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime &
- * 0xFFFFFFFF); */
- xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime);
- /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */
- xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime);
- xacc[i] = prod_odd + (prod_even << v32);
-
+/*!
+ * @internal
+ * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar().
+ *
+ * This is extracted to its own function because the NEON path uses a combination
+ * of NEON and scalar.
+ */
+XXH_FORCE_INLINE void
+XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
+ void const* XXH_RESTRICT secret,
+ size_t lane)
+{
+ xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */
+ const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */
+ XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
+ XXH_ASSERT(lane < XXH_ACC_NB);
+ {
+ xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8);
+ xxh_u64 acc64 = xacc[lane];
+ acc64 = XXH_xorshift64(acc64, 47);
+ acc64 ^= key64;
+ acc64 *= XXH_PRIME32_1;
+ xacc[lane] = acc64;
}
-
- }
-
}
- #endif
-
-/* scalar variants - universal */
-
-XXH_FORCE_INLINE void XXH3_accumulate_512_scalar(
- void *XXH_RESTRICT acc, const void *XXH_RESTRICT input,
- const void *XXH_RESTRICT secret) {
-
- xxh_u64 *const xacc = (xxh_u64 *)acc; /* presumed aligned */
- const xxh_u8 *const xinput =
- (const xxh_u8 *)input; /* no alignment restriction */
- const xxh_u8 *const xsecret =
- (const xxh_u8 *)secret; /* no alignment restriction */
- size_t i;
- XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN - 1)) == 0);
- for (i = 0; i < XXH_ACC_NB; i++) {
-
- xxh_u64 const data_val = XXH_readLE64(xinput + 8 * i);
- xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + i * 8);
- xacc[i ^ 1] += data_val; /* swap adjacent lanes */
- xacc[i] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32);
-
- }
-
-}
-
-XXH_FORCE_INLINE void XXH3_scrambleAcc_scalar(void *XXH_RESTRICT acc,
- const void *XXH_RESTRICT secret) {
-
- xxh_u64 *const xacc = (xxh_u64 *)acc; /* presumed aligned */
- const xxh_u8 *const xsecret =
- (const xxh_u8 *)secret; /* no alignment restriction */
- size_t i;
- XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN - 1)) == 0);
- for (i = 0; i < XXH_ACC_NB; i++) {
-
- xxh_u64 const key64 = XXH_readLE64(xsecret + 8 * i);
- xxh_u64 acc64 = xacc[i];
- acc64 = XXH_xorshift64(acc64, 47);
- acc64 ^= key64;
- acc64 *= XXH_PRIME32_1;
- xacc[i] = acc64;
-
- }
-
-}
-
-XXH_FORCE_INLINE void XXH3_initCustomSecret_scalar(
- void *XXH_RESTRICT customSecret, xxh_u64 seed64) {
-
- /*
- * We need a separate pointer for the hack below,
- * which requires a non-const pointer.
- * Any decent compiler will optimize this out otherwise.
- */
- const xxh_u8 *kSecretPtr = XXH3_kSecret;
- XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
-
- #if defined(__clang__) && defined(__aarch64__)
- /*
- * UGLY HACK:
- * Clang generates a bunch of MOV/MOVK pairs for aarch64, and they are
- * placed sequentially, in order, at the top of the unrolled loop.
- *
- * While MOVK is great for generating constants (2 cycles for a 64-bit
- * constant compared to 4 cycles for LDR), long MOVK chains stall the
- * integer pipelines:
- * I L S
- * MOVK
- * MOVK
- * MOVK
- * MOVK
- * ADD
- * SUB STR
- * STR
- * By forcing loads from memory (as the asm line causes Clang to assume
- * that XXH3_kSecretPtr has been changed), the pipelines are used more
- * efficiently:
- * I L S
- * LDR
- * ADD LDR
- * SUB STR
- * STR
- * XXH3_64bits_withSeed, len == 256, Snapdragon 835
- * without hack: 2654.4 MB/s
- * with hack: 3202.9 MB/s
- */
- XXH_COMPILER_GUARD(kSecretPtr);
- #endif
- /*
- * Note: in debug mode, this overrides the asm optimization
- * and Clang will emit MOVK chains again.
- */
- XXH_ASSERT(kSecretPtr == XXH3_kSecret);
-
- {
-
- int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
- int i;
- for (i = 0; i < nbRounds; i++) {
-
- /*
- * The asm hack causes Clang to assume that kSecretPtr aliases with
- * customSecret, and on aarch64, this prevented LDP from merging two
- * loads together for free. Putting the loads together before the stores
- * properly generates LDP.
- */
- xxh_u64 lo = XXH_readLE64(kSecretPtr + 16 * i) + seed64;
- xxh_u64 hi = XXH_readLE64(kSecretPtr + 16 * i + 8) - seed64;
- XXH_writeLE64((xxh_u8 *)customSecret + 16 * i, lo);
- XXH_writeLE64((xxh_u8 *)customSecret + 16 * i + 8, hi);
-
+/*!
+ * @internal
+ * @brief Scrambles the accumulators after a large chunk has been read
+ */
+XXH_FORCE_INLINE void
+XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
+{
+ size_t i;
+ for (i=0; i < XXH_ACC_NB; i++) {
+ XXH3_scalarScrambleRound(acc, secret, i);
}
-
- }
-
}
-typedef void (*XXH3_f_accumulate_512)(void *XXH_RESTRICT, const void *,
- const void *);
-typedef void (*XXH3_f_scrambleAcc)(void *XXH_RESTRICT, const void *);
-typedef void (*XXH3_f_initCustomSecret)(void *XXH_RESTRICT, xxh_u64);
-
- #if (XXH_VECTOR == XXH_AVX512)
-
- #define XXH3_accumulate_512 XXH3_accumulate_512_avx512
- #define XXH3_scrambleAcc XXH3_scrambleAcc_avx512
- #define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
-
- #elif (XXH_VECTOR == XXH_AVX2)
-
- #define XXH3_accumulate_512 XXH3_accumulate_512_avx2
- #define XXH3_scrambleAcc XXH3_scrambleAcc_avx2
- #define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
-
- #elif (XXH_VECTOR == XXH_SSE2)
-
- #define XXH3_accumulate_512 XXH3_accumulate_512_sse2
- #define XXH3_scrambleAcc XXH3_scrambleAcc_sse2
- #define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
-
- #elif (XXH_VECTOR == XXH_NEON)
-
- #define XXH3_accumulate_512 XXH3_accumulate_512_neon
- #define XXH3_scrambleAcc XXH3_scrambleAcc_neon
- #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
+XXH_FORCE_INLINE void
+XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
+{
+ /*
+ * We need a separate pointer for the hack below,
+ * which requires a non-const pointer.
+ * Any decent compiler will optimize this out otherwise.
+ */
+ const xxh_u8* kSecretPtr = XXH3_kSecret;
+ XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
- #elif (XXH_VECTOR == XXH_VSX)
+#if defined(__GNUC__) && defined(__aarch64__)
+ /*
+ * UGLY HACK:
+ * GCC and Clang generate a bunch of MOV/MOVK pairs for aarch64, and they are
+ * placed sequentially, in order, at the top of the unrolled loop.
+ *
+ * While MOVK is great for generating constants (2 cycles for a 64-bit
+ * constant compared to 4 cycles for LDR), it fights for bandwidth with
+ * the arithmetic instructions.
+ *
+ * I L S
+ * MOVK
+ * MOVK
+ * MOVK
+ * MOVK
+ * ADD
+ * SUB STR
+ * STR
+ * By forcing loads from memory (as the asm line causes the compiler to assume
+ * that XXH3_kSecretPtr has been changed), the pipelines are used more
+ * efficiently:
+ * I L S
+ * LDR
+ * ADD LDR
+ * SUB STR
+ * STR
+ *
+ * See XXH3_NEON_LANES for details on the pipsline.
+ *
+ * XXH3_64bits_withSeed, len == 256, Snapdragon 835
+ * without hack: 2654.4 MB/s
+ * with hack: 3202.9 MB/s
+ */
+ XXH_COMPILER_GUARD(kSecretPtr);
+#endif
+ { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
+ int i;
+ for (i=0; i < nbRounds; i++) {
+ /*
+ * The asm hack causes the compiler to assume that kSecretPtr aliases with
+ * customSecret, and on aarch64, this prevented LDP from merging two
+ * loads together for free. Putting the loads together before the stores
+ * properly generates LDP.
+ */
+ xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64;
+ xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64;
+ XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo);
+ XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi);
+ } }
+}
- #define XXH3_accumulate_512 XXH3_accumulate_512_vsx
- #define XXH3_scrambleAcc XXH3_scrambleAcc_vsx
- #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
- #else /* scalar */
+typedef void (*XXH3_f_accumulate)(xxh_u64* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, size_t);
+typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*);
+typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64);
- #define XXH3_accumulate_512 XXH3_accumulate_512_scalar
- #define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
- #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
- #endif
+#if (XXH_VECTOR == XXH_AVX512)
- #ifndef XXH_PREFETCH_DIST
- #ifdef __clang__
- #define XXH_PREFETCH_DIST 320
- #else
- #if (XXH_VECTOR == XXH_AVX512)
- #define XXH_PREFETCH_DIST 512
- #else
- #define XXH_PREFETCH_DIST 384
- #endif
- #endif /* __clang__ */
- #endif /* XXH_PREFETCH_DIST */
+#define XXH3_accumulate_512 XXH3_accumulate_512_avx512
+#define XXH3_accumulate XXH3_accumulate_avx512
+#define XXH3_scrambleAcc XXH3_scrambleAcc_avx512
+#define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
-/*
- * XXH3_accumulate()
- * Loops over XXH3_accumulate_512().
- * Assumption: nbStripes will not overflow the secret size
- */
-XXH_FORCE_INLINE void XXH3_accumulate(xxh_u64 *XXH_RESTRICT acc,
- const xxh_u8 *XXH_RESTRICT input,
- const xxh_u8 *XXH_RESTRICT secret,
- size_t nbStripes,
- XXH3_f_accumulate_512 f_acc512) {
+#elif (XXH_VECTOR == XXH_AVX2)
- size_t n;
- for (n = 0; n < nbStripes; n++) {
+#define XXH3_accumulate_512 XXH3_accumulate_512_avx2
+#define XXH3_accumulate XXH3_accumulate_avx2
+#define XXH3_scrambleAcc XXH3_scrambleAcc_avx2
+#define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
- const xxh_u8 *const in = input + n * XXH_STRIPE_LEN;
- XXH_PREFETCH(in + XXH_PREFETCH_DIST);
- f_acc512(acc, in, secret + n * XXH_SECRET_CONSUME_RATE);
+#elif (XXH_VECTOR == XXH_SSE2)
- }
+#define XXH3_accumulate_512 XXH3_accumulate_512_sse2
+#define XXH3_accumulate XXH3_accumulate_sse2
+#define XXH3_scrambleAcc XXH3_scrambleAcc_sse2
+#define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
-}
+#elif (XXH_VECTOR == XXH_NEON)
-XXH_FORCE_INLINE void XXH3_hashLong_internal_loop(
- xxh_u64 *XXH_RESTRICT acc, const xxh_u8 *XXH_RESTRICT input, size_t len,
- const xxh_u8 *XXH_RESTRICT secret, size_t secretSize,
- XXH3_f_accumulate_512 f_acc512, XXH3_f_scrambleAcc f_scramble) {
+#define XXH3_accumulate_512 XXH3_accumulate_512_neon
+#define XXH3_accumulate XXH3_accumulate_neon
+#define XXH3_scrambleAcc XXH3_scrambleAcc_neon
+#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
- size_t const nbStripesPerBlock =
- (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
- size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
- size_t const nb_blocks = (len - 1) / block_len;
+#elif (XXH_VECTOR == XXH_VSX)
- size_t n;
+#define XXH3_accumulate_512 XXH3_accumulate_512_vsx
+#define XXH3_accumulate XXH3_accumulate_vsx
+#define XXH3_scrambleAcc XXH3_scrambleAcc_vsx
+#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
- XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
+#elif (XXH_VECTOR == XXH_SVE)
+#define XXH3_accumulate_512 XXH3_accumulate_512_sve
+#define XXH3_accumulate XXH3_accumulate_sve
+#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
+#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
- for (n = 0; n < nb_blocks; n++) {
+#else /* scalar */
- XXH3_accumulate(acc, input + n * block_len, secret, nbStripesPerBlock,
- f_acc512);
- f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
+#define XXH3_accumulate_512 XXH3_accumulate_512_scalar
+#define XXH3_accumulate XXH3_accumulate_scalar
+#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
+#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
- }
+#endif
- /* last partial block */
- XXH_ASSERT(len > XXH_STRIPE_LEN);
- {
+#if XXH_SIZE_OPT >= 1 /* don't do SIMD for initialization */
+# undef XXH3_initCustomSecret
+# define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
+#endif
- size_t const nbStripes =
- ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
- XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
- XXH3_accumulate(acc, input + nb_blocks * block_len, secret, nbStripes,
- f_acc512);
+XXH_FORCE_INLINE void
+XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
+ const xxh_u8* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble)
+{
+ size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
+ size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
+ size_t const nb_blocks = (len - 1) / block_len;
- /* last stripe */
- {
+ size_t n;
- const xxh_u8 *const p = input + len - XXH_STRIPE_LEN;
- #define XXH_SECRET_LASTACC_START \
- 7 /* not aligned on 8, last secret is different from acc & scrambler \
- */
- f_acc512(acc, p,
- secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
+ for (n = 0; n < nb_blocks; n++) {
+ f_acc(acc, input + n*block_len, secret, nbStripesPerBlock);
+ f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
}
- }
+ /* last partial block */
+ XXH_ASSERT(len > XXH_STRIPE_LEN);
+ { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
+ XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
+ f_acc(acc, input + nb_blocks*block_len, secret, nbStripes);
+ /* last stripe */
+ { const xxh_u8* const p = input + len - XXH_STRIPE_LEN;
+#define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */
+ XXH3_accumulate_512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
+ } }
}
-XXH_FORCE_INLINE xxh_u64 XXH3_mix2Accs(const xxh_u64 *XXH_RESTRICT acc,
- const xxh_u8 *XXH_RESTRICT secret) {
-
- return XXH3_mul128_fold64(acc[0] ^ XXH_readLE64(secret),
- acc[1] ^ XXH_readLE64(secret + 8));
-
+XXH_FORCE_INLINE xxh_u64
+XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret)
+{
+ return XXH3_mul128_fold64(
+ acc[0] ^ XXH_readLE64(secret),
+ acc[1] ^ XXH_readLE64(secret+8) );
}
-static XXH64_hash_t XXH3_mergeAccs(const xxh_u64 *XXH_RESTRICT acc,
- const xxh_u8 *XXH_RESTRICT secret,
- xxh_u64 start) {
-
- xxh_u64 result64 = start;
- size_t i = 0;
+static XXH64_hash_t
+XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
+{
+ xxh_u64 result64 = start;
+ size_t i = 0;
- for (i = 0; i < 4; i++) {
-
- result64 += XXH3_mix2Accs(acc + 2 * i, secret + 16 * i);
- #if defined(__clang__) /* Clang */ \
- && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \
- && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
- && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
- /*
- * UGLY HACK:
- * Prevent autovectorization on Clang ARMv7-a. Exact same problem as
- * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
- * XXH3_64bits, len == 256, Snapdragon 835:
- * without hack: 2063.7 MB/s
- * with hack: 2560.7 MB/s
- */
- XXH_COMPILER_GUARD(result64);
- #endif
-
- }
-
- return XXH3_avalanche(result64);
+ for (i = 0; i < 4; i++) {
+ result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i);
+#if defined(__clang__) /* Clang */ \
+ && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \
+ && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
+ && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
+ /*
+ * UGLY HACK:
+ * Prevent autovectorization on Clang ARMv7-a. Exact same problem as
+ * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
+ * XXH3_64bits, len == 256, Snapdragon 835:
+ * without hack: 2063.7 MB/s
+ * with hack: 2560.7 MB/s
+ */
+ XXH_COMPILER_GUARD(result64);
+#endif
+ }
+ return XXH3_avalanche(result64);
}
- #define XXH3_INIT_ACC \
- { \
- \
- XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
- XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 \
- \
- }
-
-XXH_FORCE_INLINE XXH64_hash_t XXH3_hashLong_64b_internal(
- const void *XXH_RESTRICT input, size_t len, const void *XXH_RESTRICT secret,
- size_t secretSize, XXH3_f_accumulate_512 f_acc512,
- XXH3_f_scrambleAcc f_scramble) {
+#define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
+ XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
- XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
-
- XXH3_hashLong_internal_loop(acc, (const xxh_u8 *)input, len,
- (const xxh_u8 *)secret, secretSize, f_acc512,
- f_scramble);
+XXH_FORCE_INLINE XXH64_hash_t
+XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len,
+ const void* XXH_RESTRICT secret, size_t secretSize,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble)
+{
+ XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
- /* converge into final hash */
- XXH_STATIC_ASSERT(sizeof(acc) == 64);
- /* do not align on 8, so that the secret is different from the accumulator
- */
- #define XXH_SECRET_MERGEACCS_START 11
- XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
- return XXH3_mergeAccs(acc,
- (const xxh_u8 *)secret + XXH_SECRET_MERGEACCS_START,
- (xxh_u64)len * XXH_PRIME64_1);
+ XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc, f_scramble);
+ /* converge into final hash */
+ XXH_STATIC_ASSERT(sizeof(acc) == 64);
+ /* do not align on 8, so that the secret is different from the accumulator */
+#define XXH_SECRET_MERGEACCS_START 11
+ XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
+ return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1);
}
/*
- * It's important for performance that XXH3_hashLong is not inlined.
+ * It's important for performance to transmit secret's size (when it's static)
+ * so that the compiler can properly optimize the vectorized loop.
+ * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set.
+ * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE
+ * breaks -Og, this is XXH_NO_INLINE.
*/
-XXH_NO_INLINE XXH64_hash_t XXH3_hashLong_64b_withSecret(
- const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64,
- const xxh_u8 *XXH_RESTRICT secret, size_t secretLen) {
-
- (void)seed64;
- return XXH3_hashLong_64b_internal(input, len, secret, secretLen,
- XXH3_accumulate_512, XXH3_scrambleAcc);
-
+XXH3_WITH_SECRET_INLINE XXH64_hash_t
+XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)seed64;
+ return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate, XXH3_scrambleAcc);
}
/*
- * It's important for performance that XXH3_hashLong is not inlined.
- * Since the function is not inlined, the compiler may not be able to understand
- * that, in some scenarios, its `secret` argument is actually a compile time
- * constant. This variant enforces that the compiler can detect that, and uses
- * this opportunity to streamline the generated code for better performance.
+ * It's preferable for performance that XXH3_hashLong is not inlined,
+ * as it results in a smaller function for small data, easier to the instruction cache.
+ * Note that inside this no_inline function, we do inline the internal loop,
+ * and provide a statically defined secret size to allow optimization of vector loop.
*/
-XXH_NO_INLINE XXH64_hash_t XXH3_hashLong_64b_default(
- const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64,
- const xxh_u8 *XXH_RESTRICT secret, size_t secretLen) {
-
- (void)seed64;
- (void)secret;
- (void)secretLen;
- return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret,
- sizeof(XXH3_kSecret), XXH3_accumulate_512,
- XXH3_scrambleAcc);
-
+XXH_NO_INLINE XXH_PUREF XXH64_hash_t
+XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)seed64; (void)secret; (void)secretLen;
+ return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate, XXH3_scrambleAcc);
}
/*
* XXH3_hashLong_64b_withSeed():
- * Generate a custom key based on alteration of default XXH3_kSecret with the
- * seed, and then use this key for long mode hashing.
+ * Generate a custom key based on alteration of default XXH3_kSecret with the seed,
+ * and then use this key for long mode hashing.
*
* This operation is decently fast but nonetheless costs a little bit of time.
* Try to avoid it whenever possible (typically when seed==0).
@@ -5084,104 +5921,98 @@ XXH_NO_INLINE XXH64_hash_t XXH3_hashLong_64b_default(
* It's important for performance that XXH3_hashLong is not inlined. Not sure
* why (uop cache maybe?), but the difference is large and easily measurable.
*/
-XXH_FORCE_INLINE XXH64_hash_t XXH3_hashLong_64b_withSeed_internal(
- const void *input, size_t len, XXH64_hash_t seed,
- XXH3_f_accumulate_512 f_acc512, XXH3_f_scrambleAcc f_scramble,
- XXH3_f_initCustomSecret f_initSec) {
-
- if (seed == 0)
- return XXH3_hashLong_64b_internal(
- input, len, XXH3_kSecret, sizeof(XXH3_kSecret), f_acc512, f_scramble);
- {
-
- XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
- f_initSec(secret, seed);
- return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
- f_acc512, f_scramble);
-
- }
-
+XXH_FORCE_INLINE XXH64_hash_t
+XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len,
+ XXH64_hash_t seed,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble,
+ XXH3_f_initCustomSecret f_initSec)
+{
+#if XXH_SIZE_OPT <= 0
+ if (seed == 0)
+ return XXH3_hashLong_64b_internal(input, len,
+ XXH3_kSecret, sizeof(XXH3_kSecret),
+ f_acc, f_scramble);
+#endif
+ { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
+ f_initSec(secret, seed);
+ return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
+ f_acc, f_scramble);
+ }
}
/*
* It's important for performance that XXH3_hashLong is not inlined.
*/
-XXH_NO_INLINE XXH64_hash_t XXH3_hashLong_64b_withSeed(const void *input,
- size_t len,
- XXH64_hash_t seed,
- const xxh_u8 *secret,
- size_t secretLen) {
-
- (void)secret;
- (void)secretLen;
- return XXH3_hashLong_64b_withSeed_internal(
- input, len, seed, XXH3_accumulate_512, XXH3_scrambleAcc,
- XXH3_initCustomSecret);
-
+XXH_NO_INLINE XXH64_hash_t
+XXH3_hashLong_64b_withSeed(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)secret; (void)secretLen;
+ return XXH3_hashLong_64b_withSeed_internal(input, len, seed,
+ XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret);
}
-typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void *XXH_RESTRICT, size_t,
- XXH64_hash_t,
- const xxh_u8 *XXH_RESTRICT, size_t);
-XXH_FORCE_INLINE XXH64_hash_t
-XXH3_64bits_internal(const void *XXH_RESTRICT input, size_t len,
- XXH64_hash_t seed64, const void *XXH_RESTRICT secret,
- size_t secretLen, XXH3_hashLong64_f f_hashLong) {
-
- XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
- /*
- * If an action is to be taken if `secretLen` condition is not respected,
- * it should be done here.
- * For now, it's a contract pre-condition.
- * Adding a check and a branch here would cost performance at every hash.
- * Also, note that function signature doesn't offer room to return an error.
- */
- if (len <= 16)
- return XXH3_len_0to16_64b((const xxh_u8 *)input, len,
- (const xxh_u8 *)secret, seed64);
- if (len <= 128)
- return XXH3_len_17to128_64b((const xxh_u8 *)input, len,
- (const xxh_u8 *)secret, secretLen, seed64);
- if (len <= XXH3_MIDSIZE_MAX)
- return XXH3_len_129to240_64b((const xxh_u8 *)input, len,
- (const xxh_u8 *)secret, secretLen, seed64);
- return f_hashLong(input, len, seed64, (const xxh_u8 *)secret, secretLen);
+typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t,
+ XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t);
+XXH_FORCE_INLINE XXH64_hash_t
+XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
+ XXH3_hashLong64_f f_hashLong)
+{
+ XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
+ /*
+ * If an action is to be taken if `secretLen` condition is not respected,
+ * it should be done here.
+ * For now, it's a contract pre-condition.
+ * Adding a check and a branch here would cost performance at every hash.
+ * Also, note that function signature doesn't offer room to return an error.
+ */
+ if (len <= 16)
+ return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
+ if (len <= 128)
+ return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
+ if (len <= XXH3_MIDSIZE_MAX)
+ return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
+ return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen);
}
-/* === Public entry point === */
-
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void *input, size_t len) {
- return XXH3_64bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret),
- XXH3_hashLong_64b_default);
+/* === Public entry point === */
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length)
+{
+ return XXH3_64bits_internal(input, length, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
}
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void *input,
- size_t len,
- const void *secret,
- size_t secretSize) {
-
- return XXH3_64bits_internal(input, len, 0, secret, secretSize,
- XXH3_hashLong_64b_withSecret);
-
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH64_hash_t
+XXH3_64bits_withSecret(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize)
+{
+ return XXH3_64bits_internal(input, length, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
}
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void *input, size_t len,
- XXH64_hash_t seed) {
-
- return XXH3_64bits_internal(input, len, seed, XXH3_kSecret,
- sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH64_hash_t
+XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed)
+{
+ return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
+}
+XXH_PUBLIC_API XXH64_hash_t
+XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
+{
+ if (length <= XXH3_MIDSIZE_MAX)
+ return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
+ return XXH3_hashLong_64b_withSecret(input, length, seed, (const xxh_u8*)secret, secretSize);
}
-/* === XXH3 streaming === */
+/* === XXH3 streaming === */
+#ifndef XXH_NO_STREAM
/*
* Malloc's a pointer that is always aligned to align.
*
@@ -5205,412 +6036,374 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void *input, size_t len,
*
* Align must be a power of 2 and 8 <= align <= 128.
*/
-static void *XXH_alignedMalloc(size_t s, size_t align) {
-
- XXH_ASSERT(align <= 128 && align >= 8); /* range check */
- XXH_ASSERT((align & (align - 1)) == 0); /* power of 2 */
- XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */
- { /* Overallocate to make room for manual realignment and an offset byte */
- xxh_u8 *base = (xxh_u8 *)XXH_malloc(s + align);
- if (base != NULL) {
-
- /*
- * Get the offset needed to align this pointer.
- *
- * Even if the returned pointer is aligned, there will always be
- * at least one byte to store the offset to the original pointer.
- */
- size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
- /* Add the offset for the now-aligned pointer */
- xxh_u8 *ptr = base + offset;
-
- XXH_ASSERT((size_t)ptr % align == 0);
-
- /* Store the offset immediately before the returned pointer. */
- ptr[-1] = (xxh_u8)offset;
- return ptr;
-
+static XXH_MALLOCF void* XXH_alignedMalloc(size_t s, size_t align)
+{
+ XXH_ASSERT(align <= 128 && align >= 8); /* range check */
+ XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */
+ XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */
+ { /* Overallocate to make room for manual realignment and an offset byte */
+ xxh_u8* base = (xxh_u8*)XXH_malloc(s + align);
+ if (base != NULL) {
+ /*
+ * Get the offset needed to align this pointer.
+ *
+ * Even if the returned pointer is aligned, there will always be
+ * at least one byte to store the offset to the original pointer.
+ */
+ size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
+ /* Add the offset for the now-aligned pointer */
+ xxh_u8* ptr = base + offset;
+
+ XXH_ASSERT((size_t)ptr % align == 0);
+
+ /* Store the offset immediately before the returned pointer. */
+ ptr[-1] = (xxh_u8)offset;
+ return ptr;
+ }
+ return NULL;
}
-
- return NULL;
-
- }
-
}
-
/*
* Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
* normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
*/
-static void XXH_alignedFree(void *p) {
-
- if (p != NULL) {
-
- xxh_u8 *ptr = (xxh_u8 *)p;
- /* Get the offset byte we added in XXH_malloc. */
- xxh_u8 offset = ptr[-1];
- /* Free the original malloc'd pointer */
- xxh_u8 *base = ptr - offset;
- XXH_free(base);
-
- }
-
-}
-
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API XXH3_state_t *XXH3_createState(void) {
-
- XXH3_state_t *const state =
- (XXH3_state_t *)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
- if (state == NULL) return NULL;
- XXH3_INITSTATE(state);
- return state;
-
+static void XXH_alignedFree(void* p)
+{
+ if (p != NULL) {
+ xxh_u8* ptr = (xxh_u8*)p;
+ /* Get the offset byte we added in XXH_malloc. */
+ xxh_u8 offset = ptr[-1];
+ /* Free the original malloc'd pointer */
+ xxh_u8* base = ptr - offset;
+ XXH_free(base);
+ }
}
-
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t *statePtr) {
-
- XXH_alignedFree(statePtr);
- return XXH_OK;
-
+/*! @ingroup XXH3_family */
+/*!
+ * @brief Allocate an @ref XXH3_state_t.
+ *
+ * @return An allocated pointer of @ref XXH3_state_t on success.
+ * @return `NULL` on failure.
+ *
+ * @note Must be freed with XXH3_freeState().
+ *
+ * @see @ref streaming_example "Streaming Example"
+ */
+XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void)
+{
+ XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
+ if (state==NULL) return NULL;
+ XXH3_INITSTATE(state);
+ return state;
}
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t *dst_state,
- const XXH3_state_t *src_state) {
-
- memcpy(dst_state, src_state, sizeof(*dst_state));
-
+/*! @ingroup XXH3_family */
+/*!
+ * @brief Frees an @ref XXH3_state_t.
+ *
+ * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState().
+ *
+ * @return @ref XXH_OK.
+ *
+ * @note Must be allocated with XXH3_createState().
+ *
+ * @see @ref streaming_example "Streaming Example"
+ */
+XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr)
+{
+ XXH_alignedFree(statePtr);
+ return XXH_OK;
}
-static void XXH3_reset_internal(XXH3_state_t *statePtr, XXH64_hash_t seed,
- const void *secret, size_t secretSize) {
-
- size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
- size_t const initLength =
- offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
- XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
- XXH_ASSERT(statePtr != NULL);
- /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
- memset((char *)statePtr + initStart, 0, initLength);
- statePtr->acc[0] = XXH_PRIME32_3;
- statePtr->acc[1] = XXH_PRIME64_1;
- statePtr->acc[2] = XXH_PRIME64_2;
- statePtr->acc[3] = XXH_PRIME64_3;
- statePtr->acc[4] = XXH_PRIME64_4;
- statePtr->acc[5] = XXH_PRIME32_2;
- statePtr->acc[6] = XXH_PRIME64_5;
- statePtr->acc[7] = XXH_PRIME32_1;
- statePtr->seed = seed;
- statePtr->extSecret = (const unsigned char *)secret;
- XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
- statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
- statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
-
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API void
+XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state)
+{
+ XXH_memcpy(dst_state, src_state, sizeof(*dst_state));
+}
+
+static void
+XXH3_reset_internal(XXH3_state_t* statePtr,
+ XXH64_hash_t seed,
+ const void* secret, size_t secretSize)
+{
+ size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
+ size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
+ XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
+ XXH_ASSERT(statePtr != NULL);
+ /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
+ memset((char*)statePtr + initStart, 0, initLength);
+ statePtr->acc[0] = XXH_PRIME32_3;
+ statePtr->acc[1] = XXH_PRIME64_1;
+ statePtr->acc[2] = XXH_PRIME64_2;
+ statePtr->acc[3] = XXH_PRIME64_3;
+ statePtr->acc[4] = XXH_PRIME64_4;
+ statePtr->acc[5] = XXH_PRIME32_2;
+ statePtr->acc[6] = XXH_PRIME64_5;
+ statePtr->acc[7] = XXH_PRIME32_1;
+ statePtr->seed = seed;
+ statePtr->useSeed = (seed != 0);
+ statePtr->extSecret = (const unsigned char*)secret;
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
+ statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
+ statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr)
+{
+ if (statePtr == NULL) return XXH_ERROR;
+ XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
+ return XXH_OK;
}
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t *statePtr) {
-
- if (statePtr == NULL) return XXH_ERROR;
- XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
- return XXH_OK;
-
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize)
+{
+ if (statePtr == NULL) return XXH_ERROR;
+ XXH3_reset_internal(statePtr, 0, secret, secretSize);
+ if (secret == NULL) return XXH_ERROR;
+ if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
+ return XXH_OK;
}
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(
- XXH3_state_t *statePtr, const void *secret, size_t secretSize) {
-
- if (statePtr == NULL) return XXH_ERROR;
- XXH3_reset_internal(statePtr, 0, secret, secretSize);
- if (secret == NULL) return XXH_ERROR;
- if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
- return XXH_OK;
-
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed)
+{
+ if (statePtr == NULL) return XXH_ERROR;
+ if (seed==0) return XXH3_64bits_reset(statePtr);
+ if ((seed != statePtr->seed) || (statePtr->extSecret != NULL))
+ XXH3_initCustomSecret(statePtr->customSecret, seed);
+ XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
+ return XXH_OK;
}
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t *statePtr,
- XXH64_hash_t seed) {
-
- if (statePtr == NULL) return XXH_ERROR;
- if (seed == 0) return XXH3_64bits_reset(statePtr);
- if (seed != statePtr->seed)
- XXH3_initCustomSecret(statePtr->customSecret, seed);
- XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
- return XXH_OK;
-
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed64)
+{
+ if (statePtr == NULL) return XXH_ERROR;
+ if (secret == NULL) return XXH_ERROR;
+ if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
+ XXH3_reset_internal(statePtr, seed64, secret, secretSize);
+ statePtr->useSeed = 1; /* always, even if seed64==0 */
+ return XXH_OK;
}
-/* Note : when XXH3_consumeStripes() is invoked,
- * there must be a guarantee that at least one more byte must be consumed from
- * input
- * so that the function can blindly consume all stripes using the "normal"
- * secret segment */
-XXH_FORCE_INLINE void XXH3_consumeStripes(
- xxh_u64 *XXH_RESTRICT acc, size_t *XXH_RESTRICT nbStripesSoFarPtr,
- size_t nbStripesPerBlock, const xxh_u8 *XXH_RESTRICT input,
- size_t nbStripes, const xxh_u8 *XXH_RESTRICT secret, size_t secretLimit,
- XXH3_f_accumulate_512 f_acc512, XXH3_f_scrambleAcc f_scramble) {
-
- XXH_ASSERT(nbStripes <=
- nbStripesPerBlock); /* can handle max 1 scramble per invocation */
- XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock);
- if (nbStripesPerBlock - *nbStripesSoFarPtr <= nbStripes) {
-
- /* need a scrambling operation */
- size_t const nbStripesToEndofBlock = nbStripesPerBlock - *nbStripesSoFarPtr;
- size_t const nbStripesAfterBlock = nbStripes - nbStripesToEndofBlock;
- XXH3_accumulate(acc, input,
- secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE,
- nbStripesToEndofBlock, f_acc512);
- f_scramble(acc, secret + secretLimit);
- XXH3_accumulate(acc, input + nbStripesToEndofBlock * XXH_STRIPE_LEN, secret,
- nbStripesAfterBlock, f_acc512);
- *nbStripesSoFarPtr = nbStripesAfterBlock;
-
- } else {
-
- XXH3_accumulate(acc, input,
- secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE,
- nbStripes, f_acc512);
- *nbStripesSoFarPtr += nbStripes;
-
- }
-
+/*!
+ * @internal
+ * @brief Processes a large input for XXH3_update() and XXH3_digest_long().
+ *
+ * Unlike XXH3_hashLong_internal_loop(), this can process data that overlaps a block.
+ *
+ * @param acc Pointer to the 8 accumulator lanes
+ * @param nbStripesSoFarPtr In/out pointer to the number of leftover stripes in the block*
+ * @param nbStripesPerBlock Number of stripes in a block
+ * @param input Input pointer
+ * @param nbStripes Number of stripes to process
+ * @param secret Secret pointer
+ * @param secretLimit Offset of the last block in @p secret
+ * @param f_acc Pointer to an XXH3_accumulate implementation
+ * @param f_scramble Pointer to an XXH3_scrambleAcc implementation
+ * @return Pointer past the end of @p input after processing
+ */
+XXH_FORCE_INLINE const xxh_u8 *
+XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
+ size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock,
+ const xxh_u8* XXH_RESTRICT input, size_t nbStripes,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretLimit,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble)
+{
+ const xxh_u8* initialSecret = secret + *nbStripesSoFarPtr * XXH_SECRET_CONSUME_RATE;
+ /* Process full blocks */
+ if (nbStripes >= (nbStripesPerBlock - *nbStripesSoFarPtr)) {
+ /* Process the initial partial block... */
+ size_t nbStripesThisIter = nbStripesPerBlock - *nbStripesSoFarPtr;
+
+ do {
+ /* Accumulate and scramble */
+ f_acc(acc, input, initialSecret, nbStripesThisIter);
+ f_scramble(acc, secret + secretLimit);
+ input += nbStripesThisIter * XXH_STRIPE_LEN;
+ nbStripes -= nbStripesThisIter;
+ /* Then continue the loop with the full block size */
+ nbStripesThisIter = nbStripesPerBlock;
+ initialSecret = secret;
+ } while (nbStripes >= nbStripesPerBlock);
+ *nbStripesSoFarPtr = 0;
+ }
+ /* Process a partial block */
+ if (nbStripes > 0) {
+ f_acc(acc, input, initialSecret, nbStripes);
+ input += nbStripes * XXH_STRIPE_LEN;
+ *nbStripesSoFarPtr += nbStripes;
+ }
+ /* Return end pointer */
+ return input;
}
+#ifndef XXH3_STREAM_USE_STACK
+# if XXH_SIZE_OPT <= 0 && !defined(__clang__) /* clang doesn't need additional stack space */
+# define XXH3_STREAM_USE_STACK 1
+# endif
+#endif
/*
* Both XXH3_64bits_update and XXH3_128bits_update use this routine.
*/
-XXH_FORCE_INLINE XXH_errorcode XXH3_update(XXH3_state_t *state,
- const xxh_u8 *input, size_t len,
- XXH3_f_accumulate_512 f_acc512,
- XXH3_f_scrambleAcc f_scramble) {
-
- if (input == NULL)
- #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \
- (XXH_ACCEPT_NULL_INPUT_POINTER >= 1)
- return XXH_OK;
- #else
- return XXH_ERROR;
- #endif
-
- {
-
- const xxh_u8 *const bEnd = input + len;
- const unsigned char *const secret =
- (state->extSecret == NULL) ? state->customSecret : state->extSecret;
-
- state->totalLen += len;
- XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
-
- if (state->bufferedSize + len <=
- XXH3_INTERNALBUFFER_SIZE) { /* fill in tmp buffer */
- XXH_memcpy(state->buffer + state->bufferedSize, input, len);
- state->bufferedSize += (XXH32_hash_t)len;
- return XXH_OK;
-
- }
-
- /* total input is now > XXH3_INTERNALBUFFER_SIZE */
-
- #define XXH3_INTERNALBUFFER_STRIPES \
- (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
- XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN ==
- 0); /* clean multiple */
-
- /*
- * Internal buffer is partially filled (always, except at beginning)
- * Complete it, then consume it.
- */
- if (state->bufferedSize) {
-
- size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
- XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
- input += loadSize;
- XXH3_consumeStripes(state->acc, &state->nbStripesSoFar,
- state->nbStripesPerBlock, state->buffer,
- XXH3_INTERNALBUFFER_STRIPES, secret,
- state->secretLimit, f_acc512, f_scramble);
- state->bufferedSize = 0;
-
+XXH_FORCE_INLINE XXH_errorcode
+XXH3_update(XXH3_state_t* XXH_RESTRICT const state,
+ const xxh_u8* XXH_RESTRICT input, size_t len,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble)
+{
+ if (input==NULL) {
+ XXH_ASSERT(len == 0);
+ return XXH_OK;
}
- XXH_ASSERT(input < bEnd);
-
- /* Consume input by a multiple of internal buffer size */
- if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) {
-
- const xxh_u8 *const limit = bEnd - XXH3_INTERNALBUFFER_SIZE;
- do {
-
- XXH3_consumeStripes(state->acc, &state->nbStripesSoFar,
- state->nbStripesPerBlock, input,
- XXH3_INTERNALBUFFER_STRIPES, secret,
- state->secretLimit, f_acc512, f_scramble);
- input += XXH3_INTERNALBUFFER_SIZE;
+ XXH_ASSERT(state != NULL);
+ { const xxh_u8* const bEnd = input + len;
+ const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
+#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
+ /* For some reason, gcc and MSVC seem to suffer greatly
+ * when operating accumulators directly into state.
+ * Operating into stack space seems to enable proper optimization.
+ * clang, on the other hand, doesn't seem to need this trick */
+ XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8];
+ XXH_memcpy(acc, state->acc, sizeof(acc));
+#else
+ xxh_u64* XXH_RESTRICT const acc = state->acc;
+#endif
+ state->totalLen += len;
+ XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
+
+ /* small input : just fill in tmp buffer */
+ if (len <= XXH3_INTERNALBUFFER_SIZE - state->bufferedSize) {
+ XXH_memcpy(state->buffer + state->bufferedSize, input, len);
+ state->bufferedSize += (XXH32_hash_t)len;
+ return XXH_OK;
+ }
- } while (input < limit);
+ /* total input is now > XXH3_INTERNALBUFFER_SIZE */
+ #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
+ XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */
- /* for last partial stripe */
- memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN,
- input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
+ /*
+ * Internal buffer is partially filled (always, except at beginning)
+ * Complete it, then consume it.
+ */
+ if (state->bufferedSize) {
+ size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
+ XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
+ input += loadSize;
+ XXH3_consumeStripes(acc,
+ &state->nbStripesSoFar, state->nbStripesPerBlock,
+ state->buffer, XXH3_INTERNALBUFFER_STRIPES,
+ secret, state->secretLimit,
+ f_acc, f_scramble);
+ state->bufferedSize = 0;
+ }
+ XXH_ASSERT(input < bEnd);
+ if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) {
+ size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN;
+ input = XXH3_consumeStripes(acc,
+ &state->nbStripesSoFar, state->nbStripesPerBlock,
+ input, nbStripes,
+ secret, state->secretLimit,
+ f_acc, f_scramble);
+ XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
+ }
+ /* Some remaining input (always) : buffer it */
+ XXH_ASSERT(input < bEnd);
+ XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE);
+ XXH_ASSERT(state->bufferedSize == 0);
+ XXH_memcpy(state->buffer, input, (size_t)(bEnd-input));
+ state->bufferedSize = (XXH32_hash_t)(bEnd-input);
+#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
+ /* save stack accumulators into state */
+ XXH_memcpy(state->acc, acc, sizeof(acc));
+#endif
}
- XXH_ASSERT(input < bEnd);
-
- /* Some remaining input (always) : buffer it */
- XXH_memcpy(state->buffer, input, (size_t)(bEnd - input));
- state->bufferedSize = (XXH32_hash_t)(bEnd - input);
-
- }
-
- return XXH_OK;
-
-}
-
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update(XXH3_state_t *state,
- const void *input, size_t len) {
-
- return XXH3_update(state, (const xxh_u8 *)input, len, XXH3_accumulate_512,
- XXH3_scrambleAcc);
-
+ return XXH_OK;
}
-XXH_FORCE_INLINE void XXH3_digest_long(XXH64_hash_t *acc,
- const XXH3_state_t *state,
- const unsigned char *secret) {
-
- /*
- * Digest on a local copy. This way, the state remains unaltered, and it can
- * continue ingesting more input afterwards.
- */
- memcpy(acc, state->acc, sizeof(state->acc));
- if (state->bufferedSize >= XXH_STRIPE_LEN) {
-
- size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
- size_t nbStripesSoFar = state->nbStripesSoFar;
- XXH3_consumeStripes(acc, &nbStripesSoFar, state->nbStripesPerBlock,
- state->buffer, nbStripes, secret, state->secretLimit,
- XXH3_accumulate_512, XXH3_scrambleAcc);
- /* last stripe */
- XXH3_accumulate_512(acc,
- state->buffer + state->bufferedSize - XXH_STRIPE_LEN,
- secret + state->secretLimit - XXH_SECRET_LASTACC_START);
-
- } else { /* bufferedSize < XXH_STRIPE_LEN */
-
- xxh_u8 lastStripe[XXH_STRIPE_LEN];
- size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
- XXH_ASSERT(state->bufferedSize >
- 0); /* there is always some input buffered */
- memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize,
- catchupSize);
- memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
- XXH3_accumulate_512(acc, lastStripe,
- secret + state->secretLimit - XXH_SECRET_LASTACC_START);
-
- }
-
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len)
+{
+ return XXH3_update(state, (const xxh_u8*)input, len,
+ XXH3_accumulate, XXH3_scrambleAcc);
}
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest(const XXH3_state_t *state) {
- const unsigned char *const secret =
- (state->extSecret == NULL) ? state->customSecret : state->extSecret;
- if (state->totalLen > XXH3_MIDSIZE_MAX) {
-
- XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
- XXH3_digest_long(acc, state, secret);
- return XXH3_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START,
- (xxh_u64)state->totalLen * XXH_PRIME64_1);
-
- }
-
- /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
- if (state->seed)
- return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen,
- state->seed);
- return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
- secret, state->secretLimit + XXH_STRIPE_LEN);
-
-}
-
- #define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
-
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API void XXH3_generateSecret(void *secretBuffer,
- const void *customSeed,
- size_t customSeedSize) {
-
- XXH_ASSERT(secretBuffer != NULL);
- if (customSeedSize == 0) {
-
- memcpy(secretBuffer, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
- return;
-
- }
-
- XXH_ASSERT(customSeed != NULL);
-
- {
-
- size_t const segmentSize = sizeof(XXH128_hash_t);
- size_t const nbSegments = XXH_SECRET_DEFAULT_SIZE / segmentSize;
- XXH128_canonical_t scrambler;
- XXH64_hash_t seeds[12];
- size_t segnb;
- XXH_ASSERT(nbSegments == 12);
- XXH_ASSERT(segmentSize * nbSegments ==
- XXH_SECRET_DEFAULT_SIZE); /* exact multiple */
- XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
+XXH_FORCE_INLINE void
+XXH3_digest_long (XXH64_hash_t* acc,
+ const XXH3_state_t* state,
+ const unsigned char* secret)
+{
+ xxh_u8 lastStripe[XXH_STRIPE_LEN];
+ const xxh_u8* lastStripePtr;
/*
- * Copy customSeed to seeds[], truncating or repeating as necessary.
+ * Digest on a local copy. This way, the state remains unaltered, and it can
+ * continue ingesting more input afterwards.
*/
- {
-
- size_t toFill = XXH_MIN(customSeedSize, sizeof(seeds));
- size_t filled = toFill;
- memcpy(seeds, customSeed, toFill);
- while (filled < sizeof(seeds)) {
-
- toFill = XXH_MIN(filled, sizeof(seeds) - filled);
- memcpy((char *)seeds + filled, seeds, toFill);
- filled += toFill;
-
- }
-
+ XXH_memcpy(acc, state->acc, sizeof(state->acc));
+ if (state->bufferedSize >= XXH_STRIPE_LEN) {
+ /* Consume remaining stripes then point to remaining data in buffer */
+ size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
+ size_t nbStripesSoFar = state->nbStripesSoFar;
+ XXH3_consumeStripes(acc,
+ &nbStripesSoFar, state->nbStripesPerBlock,
+ state->buffer, nbStripes,
+ secret, state->secretLimit,
+ XXH3_accumulate, XXH3_scrambleAcc);
+ lastStripePtr = state->buffer + state->bufferedSize - XXH_STRIPE_LEN;
+ } else { /* bufferedSize < XXH_STRIPE_LEN */
+ /* Copy to temp buffer */
+ size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
+ XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */
+ XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize);
+ XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
+ lastStripePtr = lastStripe;
}
+ /* Last stripe */
+ XXH3_accumulate_512(acc,
+ lastStripePtr,
+ secret + state->secretLimit - XXH_SECRET_LASTACC_START);
+}
- /* generate secret */
- memcpy(secretBuffer, &scrambler, sizeof(scrambler));
- for (segnb = 1; segnb < nbSegments; segnb++) {
-
- size_t const segmentStart = segnb * segmentSize;
- XXH128_canonical_t segment;
- XXH128_canonicalFromHash(&segment,
- XXH128(&scrambler, sizeof(scrambler),
- XXH_readLE64(seeds + segnb) + segnb));
- memcpy((char *)secretBuffer + segmentStart, &segment, sizeof(segment));
-
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* state)
+{
+ const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
+ if (state->totalLen > XXH3_MIDSIZE_MAX) {
+ XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
+ XXH3_digest_long(acc, state, secret);
+ return XXH3_mergeAccs(acc,
+ secret + XXH_SECRET_MERGEACCS_START,
+ (xxh_u64)state->totalLen * XXH_PRIME64_1);
}
-
- }
-
+ /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
+ if (state->useSeed)
+ return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
+ return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
+ secret, state->secretLimit + XXH_STRIPE_LEN);
}
+#endif /* !XXH_NO_STREAM */
+
/* ==========================================
* XXH3 128 bits (a.k.a XXH128)
* ==========================================
- * XXH3's 128-bit variant has better mixing and strength than the 64-bit
- * variant, even without counting the significantly larger output size.
+ * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant,
+ * even without counting the significantly larger output size.
*
* For example, extra steps are taken to avoid the seed-dependent collisions
* in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
@@ -5623,647 +6416,633 @@ XXH_PUBLIC_API void XXH3_generateSecret(void *secretBuffer,
* fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
*/
-XXH_FORCE_INLINE XXH128_hash_t XXH3_len_1to3_128b(const xxh_u8 *input,
- size_t len,
- const xxh_u8 *secret,
- XXH64_hash_t seed) {
-
- /* A doubled version of 1to3_64b with different constants. */
- XXH_ASSERT(input != NULL);
- XXH_ASSERT(1 <= len && len <= 3);
- XXH_ASSERT(secret != NULL);
- /*
- * len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
- * len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
- * len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
- */
- {
-
- xxh_u8 const c1 = input[0];
- xxh_u8 const c2 = input[len >> 1];
- xxh_u8 const c3 = input[len - 1];
- xxh_u32 const combinedl = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24) |
- ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
- xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
- xxh_u64 const bitflipl =
- (XXH_readLE32(secret) ^ XXH_readLE32(secret + 4)) + seed;
- xxh_u64 const bitfliph =
- (XXH_readLE32(secret + 8) ^ XXH_readLE32(secret + 12)) - seed;
- xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
- xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
- XXH128_hash_t h128;
- h128.low64 = XXH64_avalanche(keyed_lo);
- h128.high64 = XXH64_avalanche(keyed_hi);
- return h128;
-
- }
-
-}
-
-XXH_FORCE_INLINE XXH128_hash_t XXH3_len_4to8_128b(const xxh_u8 *input,
- size_t len,
- const xxh_u8 *secret,
- XXH64_hash_t seed) {
-
- XXH_ASSERT(input != NULL);
- XXH_ASSERT(secret != NULL);
- XXH_ASSERT(4 <= len && len <= 8);
- seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
- {
-
- xxh_u32 const input_lo = XXH_readLE32(input);
- xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
- xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
- xxh_u64 const bitflip =
- (XXH_readLE64(secret + 16) ^ XXH_readLE64(secret + 24)) + seed;
- xxh_u64 const keyed = input_64 ^ bitflip;
-
- /* Shift len to the left to ensure it is even, this avoids even multiplies.
- */
- XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
-
- m128.high64 += (m128.low64 << 1);
- m128.low64 ^= (m128.high64 >> 3);
-
- m128.low64 = XXH_xorshift64(m128.low64, 35);
- m128.low64 *= 0x9FB21C651E98DF25ULL;
- m128.low64 = XXH_xorshift64(m128.low64, 28);
- m128.high64 = XXH3_avalanche(m128.high64);
- return m128;
-
- }
-
-}
-
-XXH_FORCE_INLINE XXH128_hash_t XXH3_len_9to16_128b(const xxh_u8 *input,
- size_t len,
- const xxh_u8 *secret,
- XXH64_hash_t seed) {
-
- XXH_ASSERT(input != NULL);
- XXH_ASSERT(secret != NULL);
- XXH_ASSERT(9 <= len && len <= 16);
- {
-
- xxh_u64 const bitflipl =
- (XXH_readLE64(secret + 32) ^ XXH_readLE64(secret + 40)) - seed;
- xxh_u64 const bitfliph =
- (XXH_readLE64(secret + 48) ^ XXH_readLE64(secret + 56)) + seed;
- xxh_u64 const input_lo = XXH_readLE64(input);
- xxh_u64 input_hi = XXH_readLE64(input + len - 8);
- XXH128_hash_t m128 =
- XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
+XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ /* A doubled version of 1to3_64b with different constants. */
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(1 <= len && len <= 3);
+ XXH_ASSERT(secret != NULL);
/*
- * Put len in the middle of m128 to ensure that the length gets mixed to
- * both the low and high bits in the 128x64 multiply below.
+ * len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
+ * len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
+ * len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
*/
- m128.low64 += (xxh_u64)(len - 1) << 54;
- input_hi ^= bitfliph;
- /*
- * Add the high 32 bits of input_hi to the high 32 bits of m128, then
- * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
- * the high 64 bits of m128.
- *
- * The best approach to this operation is different on 32-bit and 64-bit.
- */
- if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */
- /*
- * 32-bit optimized version, which is more readable.
- *
- * On 32-bit, it removes an ADC and delays a dependency between the two
- * halves of m128.high64, but it generates an extra mask on 64-bit.
- */
- m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) +
- XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
-
- } else {
-
- /*
- * 64-bit optimized (albeit more confusing) version.
- *
- * Uses some properties of addition and multiplication to remove the mask:
- *
- * Let:
- * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
- * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
- * c = XXH_PRIME32_2
- *
- * a + (b * c)
- * Inverse Property: x + y - x == y
- * a + (b * (1 + c - 1))
- * Distributive Property: x * (y + z) == (x * y) + (x * z)
- * a + (b * 1) + (b * (c - 1))
- * Identity Property: x * 1 == x
- * a + b + (b * (c - 1))
- *
- * Substitute a, b, and c:
- * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 -
- * 1))
- *
- * Since input_hi.hi + input_hi.lo == input_hi, we get this:
- * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
- */
- m128.high64 +=
- input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
-
+ { xxh_u8 const c1 = input[0];
+ xxh_u8 const c2 = input[len >> 1];
+ xxh_u8 const c3 = input[len - 1];
+ xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
+ | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
+ xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
+ xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
+ xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed;
+ xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
+ xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
+ XXH128_hash_t h128;
+ h128.low64 = XXH64_avalanche(keyed_lo);
+ h128.high64 = XXH64_avalanche(keyed_hi);
+ return h128;
}
+}
- /* m128 ^= XXH_swap64(m128 >> 64); */
- m128.low64 ^= XXH_swap64(m128.high64);
-
- { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
- XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
- h128.high64 += m128.high64 * XXH_PRIME64_2;
-
- h128.low64 = XXH3_avalanche(h128.low64);
- h128.high64 = XXH3_avalanche(h128.high64);
- return h128;
-
+XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(secret != NULL);
+ XXH_ASSERT(4 <= len && len <= 8);
+ seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
+ { xxh_u32 const input_lo = XXH_readLE32(input);
+ xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
+ xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
+ xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed;
+ xxh_u64 const keyed = input_64 ^ bitflip;
+
+ /* Shift len to the left to ensure it is even, this avoids even multiplies. */
+ XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
+
+ m128.high64 += (m128.low64 << 1);
+ m128.low64 ^= (m128.high64 >> 3);
+
+ m128.low64 = XXH_xorshift64(m128.low64, 35);
+ m128.low64 *= PRIME_MX2;
+ m128.low64 = XXH_xorshift64(m128.low64, 28);
+ m128.high64 = XXH3_avalanche(m128.high64);
+ return m128;
}
+}
+
+XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(input != NULL);
+ XXH_ASSERT(secret != NULL);
+ XXH_ASSERT(9 <= len && len <= 16);
+ { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed;
+ xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed;
+ xxh_u64 const input_lo = XXH_readLE64(input);
+ xxh_u64 input_hi = XXH_readLE64(input + len - 8);
+ XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
+ /*
+ * Put len in the middle of m128 to ensure that the length gets mixed to
+ * both the low and high bits in the 128x64 multiply below.
+ */
+ m128.low64 += (xxh_u64)(len - 1) << 54;
+ input_hi ^= bitfliph;
+ /*
+ * Add the high 32 bits of input_hi to the high 32 bits of m128, then
+ * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
+ * the high 64 bits of m128.
+ *
+ * The best approach to this operation is different on 32-bit and 64-bit.
+ */
+ if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */
+ /*
+ * 32-bit optimized version, which is more readable.
+ *
+ * On 32-bit, it removes an ADC and delays a dependency between the two
+ * halves of m128.high64, but it generates an extra mask on 64-bit.
+ */
+ m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
+ } else {
+ /*
+ * 64-bit optimized (albeit more confusing) version.
+ *
+ * Uses some properties of addition and multiplication to remove the mask:
+ *
+ * Let:
+ * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
+ * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
+ * c = XXH_PRIME32_2
+ *
+ * a + (b * c)
+ * Inverse Property: x + y - x == y
+ * a + (b * (1 + c - 1))
+ * Distributive Property: x * (y + z) == (x * y) + (x * z)
+ * a + (b * 1) + (b * (c - 1))
+ * Identity Property: x * 1 == x
+ * a + b + (b * (c - 1))
+ *
+ * Substitute a, b, and c:
+ * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
+ *
+ * Since input_hi.hi + input_hi.lo == input_hi, we get this:
+ * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
+ */
+ m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
+ }
+ /* m128 ^= XXH_swap64(m128 >> 64); */
+ m128.low64 ^= XXH_swap64(m128.high64);
- }
+ { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
+ XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
+ h128.high64 += m128.high64 * XXH_PRIME64_2;
+ h128.low64 = XXH3_avalanche(h128.low64);
+ h128.high64 = XXH3_avalanche(h128.high64);
+ return h128;
+ } }
}
/*
* Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
*/
-XXH_FORCE_INLINE XXH128_hash_t XXH3_len_0to16_128b(const xxh_u8 *input,
- size_t len,
- const xxh_u8 *secret,
- XXH64_hash_t seed) {
-
- XXH_ASSERT(len <= 16);
- {
-
- if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
- if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
- if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
- {
-
- XXH128_hash_t h128;
- xxh_u64 const bitflipl =
- XXH_readLE64(secret + 64) ^ XXH_readLE64(secret + 72);
- xxh_u64 const bitfliph =
- XXH_readLE64(secret + 80) ^ XXH_readLE64(secret + 88);
- h128.low64 = XXH64_avalanche(seed ^ bitflipl);
- h128.high64 = XXH64_avalanche(seed ^ bitfliph);
- return h128;
-
- }
-
- }
-
+XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
+{
+ XXH_ASSERT(len <= 16);
+ { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
+ if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
+ if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
+ { XXH128_hash_t h128;
+ xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
+ xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
+ h128.low64 = XXH64_avalanche(seed ^ bitflipl);
+ h128.high64 = XXH64_avalanche( seed ^ bitfliph);
+ return h128;
+ } }
}
/*
* A bit slower than XXH3_mix16B, but handles multiply by zero better.
*/
-XXH_FORCE_INLINE XXH128_hash_t XXH128_mix32B(XXH128_hash_t acc,
- const xxh_u8 *input_1,
- const xxh_u8 *input_2,
- const xxh_u8 *secret,
- XXH64_hash_t seed) {
-
- acc.low64 += XXH3_mix16B(input_1, secret + 0, seed);
- acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
- acc.high64 += XXH3_mix16B(input_2, secret + 16, seed);
- acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
- return acc;
-
-}
-
-XXH_FORCE_INLINE XXH128_hash_t XXH3_len_17to128_128b(
- const xxh_u8 *XXH_RESTRICT input, size_t len,
- const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) {
-
- XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
- (void)secretSize;
- XXH_ASSERT(16 < len && len <= 128);
-
- {
-
- XXH128_hash_t acc;
- acc.low64 = len * XXH_PRIME64_1;
- acc.high64 = 0;
- if (len > 32) {
-
- if (len > 64) {
-
- if (len > 96) {
-
- acc = XXH128_mix32B(acc, input + 48, input + len - 64, secret + 96,
- seed);
-
+XXH_FORCE_INLINE XXH128_hash_t
+XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2,
+ const xxh_u8* secret, XXH64_hash_t seed)
+{
+ acc.low64 += XXH3_mix16B (input_1, secret+0, seed);
+ acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
+ acc.high64 += XXH3_mix16B (input_2, secret+16, seed);
+ acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
+ return acc;
+}
+
+
+XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH64_hash_t seed)
+{
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
+ XXH_ASSERT(16 < len && len <= 128);
+
+ { XXH128_hash_t acc;
+ acc.low64 = len * XXH_PRIME64_1;
+ acc.high64 = 0;
+
+#if XXH_SIZE_OPT >= 1
+ {
+ /* Smaller, but slightly slower. */
+ unsigned int i = (unsigned int)(len - 1) / 32;
+ do {
+ acc = XXH128_mix32B(acc, input+16*i, input+len-16*(i+1), secret+32*i, seed);
+ } while (i-- != 0);
+ }
+#else
+ if (len > 32) {
+ if (len > 64) {
+ if (len > 96) {
+ acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed);
+ }
+ acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed);
+ }
+ acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
+ }
+ acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
+#endif
+ { XXH128_hash_t h128;
+ h128.low64 = acc.low64 + acc.high64;
+ h128.high64 = (acc.low64 * XXH_PRIME64_1)
+ + (acc.high64 * XXH_PRIME64_4)
+ + ((len - seed) * XXH_PRIME64_2);
+ h128.low64 = XXH3_avalanche(h128.low64);
+ h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
+ return h128;
}
-
- acc =
- XXH128_mix32B(acc, input + 32, input + len - 48, secret + 64, seed);
-
- }
-
- acc = XXH128_mix32B(acc, input + 16, input + len - 32, secret + 32, seed);
-
- }
-
- acc = XXH128_mix32B(acc, input, input + len - 16, secret, seed);
- {
-
- XXH128_hash_t h128;
- h128.low64 = acc.low64 + acc.high64;
- h128.high64 = (acc.low64 * XXH_PRIME64_1) + (acc.high64 * XXH_PRIME64_4) +
- ((len - seed) * XXH_PRIME64_2);
- h128.low64 = XXH3_avalanche(h128.low64);
- h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
- return h128;
-
}
-
- }
-
}
-XXH_NO_INLINE XXH128_hash_t XXH3_len_129to240_128b(
- const xxh_u8 *XXH_RESTRICT input, size_t len,
- const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) {
-
- XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
- (void)secretSize;
- XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
-
- {
-
- XXH128_hash_t acc;
- int const nbRounds = (int)len / 32;
- int i;
- acc.low64 = len * XXH_PRIME64_1;
- acc.high64 = 0;
- for (i = 0; i < 4; i++) {
-
- acc = XXH128_mix32B(acc, input + (32 * i), input + (32 * i) + 16,
- secret + (32 * i), seed);
-
- }
-
- acc.low64 = XXH3_avalanche(acc.low64);
- acc.high64 = XXH3_avalanche(acc.high64);
- XXH_ASSERT(nbRounds >= 4);
- for (i = 4; i < nbRounds; i++) {
-
- acc = XXH128_mix32B(acc, input + (32 * i), input + (32 * i) + 16,
- secret + XXH3_MIDSIZE_STARTOFFSET + (32 * (i - 4)),
- seed);
-
- }
-
- /* last bytes */
- acc = XXH128_mix32B(
- acc, input + len - 16, input + len - 32,
- secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
- 0ULL - seed);
-
- {
-
- XXH128_hash_t h128;
- h128.low64 = acc.low64 + acc.high64;
- h128.high64 = (acc.low64 * XXH_PRIME64_1) + (acc.high64 * XXH_PRIME64_4) +
- ((len - seed) * XXH_PRIME64_2);
- h128.low64 = XXH3_avalanche(h128.low64);
- h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
- return h128;
+XXH_NO_INLINE XXH_PUREF XXH128_hash_t
+XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH64_hash_t seed)
+{
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
+ XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
+ { XXH128_hash_t acc;
+ unsigned i;
+ acc.low64 = len * XXH_PRIME64_1;
+ acc.high64 = 0;
+ /*
+ * We set as `i` as offset + 32. We do this so that unchanged
+ * `len` can be used as upper bound. This reaches a sweet spot
+ * where both x86 and aarch64 get simple agen and good codegen
+ * for the loop.
+ */
+ for (i = 32; i < 160; i += 32) {
+ acc = XXH128_mix32B(acc,
+ input + i - 32,
+ input + i - 16,
+ secret + i - 32,
+ seed);
+ }
+ acc.low64 = XXH3_avalanche(acc.low64);
+ acc.high64 = XXH3_avalanche(acc.high64);
+ /*
+ * NB: `i <= len` will duplicate the last 32-bytes if
+ * len % 32 was zero. This is an unfortunate necessity to keep
+ * the hash result stable.
+ */
+ for (i=160; i <= len; i += 32) {
+ acc = XXH128_mix32B(acc,
+ input + i - 32,
+ input + i - 16,
+ secret + XXH3_MIDSIZE_STARTOFFSET + i - 160,
+ seed);
+ }
+ /* last bytes */
+ acc = XXH128_mix32B(acc,
+ input + len - 16,
+ input + len - 32,
+ secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
+ (XXH64_hash_t)0 - seed);
+
+ { XXH128_hash_t h128;
+ h128.low64 = acc.low64 + acc.high64;
+ h128.high64 = (acc.low64 * XXH_PRIME64_1)
+ + (acc.high64 * XXH_PRIME64_4)
+ + ((len - seed) * XXH_PRIME64_2);
+ h128.low64 = XXH3_avalanche(h128.low64);
+ h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
+ return h128;
+ }
}
-
- }
-
}
-XXH_FORCE_INLINE XXH128_hash_t XXH3_hashLong_128b_internal(
- const void *XXH_RESTRICT input, size_t len,
- const xxh_u8 *XXH_RESTRICT secret, size_t secretSize,
- XXH3_f_accumulate_512 f_acc512, XXH3_f_scrambleAcc f_scramble) {
-
- XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
-
- XXH3_hashLong_internal_loop(acc, (const xxh_u8 *)input, len, secret,
- secretSize, f_acc512, f_scramble);
-
- /* converge into final hash */
- XXH_STATIC_ASSERT(sizeof(acc) == 64);
- XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
- {
-
- XXH128_hash_t h128;
- h128.low64 = XXH3_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START,
- (xxh_u64)len * XXH_PRIME64_1);
- h128.high64 = XXH3_mergeAccs(
- acc, secret + secretSize - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
- ~((xxh_u64)len * XXH_PRIME64_2));
- return h128;
-
- }
-
+XXH_FORCE_INLINE XXH128_hash_t
+XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len,
+ const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble)
+{
+ XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
+
+ XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc, f_scramble);
+
+ /* converge into final hash */
+ XXH_STATIC_ASSERT(sizeof(acc) == 64);
+ XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
+ { XXH128_hash_t h128;
+ h128.low64 = XXH3_mergeAccs(acc,
+ secret + XXH_SECRET_MERGEACCS_START,
+ (xxh_u64)len * XXH_PRIME64_1);
+ h128.high64 = XXH3_mergeAccs(acc,
+ secret + secretSize
+ - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
+ ~((xxh_u64)len * XXH_PRIME64_2));
+ return h128;
+ }
}
/*
- * It's important for performance that XXH3_hashLong is not inlined.
+ * It's important for performance that XXH3_hashLong() is not inlined.
*/
-XXH_NO_INLINE XXH128_hash_t XXH3_hashLong_128b_default(
- const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64,
- const void *XXH_RESTRICT secret, size_t secretLen) {
-
- (void)seed64;
- (void)secret;
- (void)secretLen;
- return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret,
- sizeof(XXH3_kSecret), XXH3_accumulate_512,
- XXH3_scrambleAcc);
-
+XXH_NO_INLINE XXH_PUREF XXH128_hash_t
+XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64,
+ const void* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)seed64; (void)secret; (void)secretLen;
+ return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret),
+ XXH3_accumulate, XXH3_scrambleAcc);
}
/*
- * It's important for performance that XXH3_hashLong is not inlined.
+ * It's important for performance to pass @p secretLen (when it's static)
+ * to the compiler, so that it can properly optimize the vectorized loop.
+ *
+ * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE
+ * breaks -Og, this is XXH_NO_INLINE.
*/
-XXH_NO_INLINE XXH128_hash_t XXH3_hashLong_128b_withSecret(
- const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64,
- const void *XXH_RESTRICT secret, size_t secretLen) {
-
- (void)seed64;
- return XXH3_hashLong_128b_internal(input, len, (const xxh_u8 *)secret,
- secretLen, XXH3_accumulate_512,
- XXH3_scrambleAcc);
-
+XXH3_WITH_SECRET_INLINE XXH128_hash_t
+XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64,
+ const void* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)seed64;
+ return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen,
+ XXH3_accumulate, XXH3_scrambleAcc);
}
-XXH_FORCE_INLINE XXH128_hash_t XXH3_hashLong_128b_withSeed_internal(
- const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64,
- XXH3_f_accumulate_512 f_acc512, XXH3_f_scrambleAcc f_scramble,
- XXH3_f_initCustomSecret f_initSec) {
-
- if (seed64 == 0)
- return XXH3_hashLong_128b_internal(
- input, len, XXH3_kSecret, sizeof(XXH3_kSecret), f_acc512, f_scramble);
- {
-
- XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
- f_initSec(secret, seed64);
- return XXH3_hashLong_128b_internal(input, len, (const xxh_u8 *)secret,
- sizeof(secret), f_acc512, f_scramble);
-
- }
-
+XXH_FORCE_INLINE XXH128_hash_t
+XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len,
+ XXH64_hash_t seed64,
+ XXH3_f_accumulate f_acc,
+ XXH3_f_scrambleAcc f_scramble,
+ XXH3_f_initCustomSecret f_initSec)
+{
+ if (seed64 == 0)
+ return XXH3_hashLong_128b_internal(input, len,
+ XXH3_kSecret, sizeof(XXH3_kSecret),
+ f_acc, f_scramble);
+ { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
+ f_initSec(secret, seed64);
+ return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret),
+ f_acc, f_scramble);
+ }
}
/*
* It's important for performance that XXH3_hashLong is not inlined.
*/
XXH_NO_INLINE XXH128_hash_t
-XXH3_hashLong_128b_withSeed(const void *input, size_t len, XXH64_hash_t seed64,
- const void *XXH_RESTRICT secret, size_t secretLen) {
-
- (void)secret;
- (void)secretLen;
- return XXH3_hashLong_128b_withSeed_internal(
- input, len, seed64, XXH3_accumulate_512, XXH3_scrambleAcc,
- XXH3_initCustomSecret);
-
+XXH3_hashLong_128b_withSeed(const void* input, size_t len,
+ XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen)
+{
+ (void)secret; (void)secretLen;
+ return XXH3_hashLong_128b_withSeed_internal(input, len, seed64,
+ XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret);
}
-typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void *XXH_RESTRICT, size_t,
- XXH64_hash_t,
- const void *XXH_RESTRICT, size_t);
+typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t,
+ XXH64_hash_t, const void* XXH_RESTRICT, size_t);
XXH_FORCE_INLINE XXH128_hash_t
-XXH3_128bits_internal(const void *input, size_t len, XXH64_hash_t seed64,
- const void *XXH_RESTRICT secret, size_t secretLen,
- XXH3_hashLong128_f f_hl128) {
-
- XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
- /*
- * If an action is to be taken if `secret` conditions are not respected,
- * it should be done here.
- * For now, it's a contract pre-condition.
- * Adding a check and a branch here would cost performance at every hash.
- */
- if (len <= 16)
- return XXH3_len_0to16_128b((const xxh_u8 *)input, len,
- (const xxh_u8 *)secret, seed64);
- if (len <= 128)
- return XXH3_len_17to128_128b((const xxh_u8 *)input, len,
- (const xxh_u8 *)secret, secretLen, seed64);
- if (len <= XXH3_MIDSIZE_MAX)
- return XXH3_len_129to240_128b((const xxh_u8 *)input, len,
- (const xxh_u8 *)secret, secretLen, seed64);
- return f_hl128(input, len, seed64, secret, secretLen);
-
+XXH3_128bits_internal(const void* input, size_t len,
+ XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
+ XXH3_hashLong128_f f_hl128)
+{
+ XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
+ /*
+ * If an action is to be taken if `secret` conditions are not respected,
+ * it should be done here.
+ * For now, it's a contract pre-condition.
+ * Adding a check and a branch here would cost performance at every hash.
+ */
+ if (len <= 16)
+ return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
+ if (len <= 128)
+ return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
+ if (len <= XXH3_MIDSIZE_MAX)
+ return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
+ return f_hl128(input, len, seed64, secret, secretLen);
}
-/* === Public XXH128 API === */
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void *input, size_t len) {
-
- return XXH3_128bits_internal(input, len, 0, XXH3_kSecret,
- sizeof(XXH3_kSecret),
- XXH3_hashLong_128b_default);
+/* === Public XXH128 API === */
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* input, size_t len)
+{
+ return XXH3_128bits_internal(input, len, 0,
+ XXH3_kSecret, sizeof(XXH3_kSecret),
+ XXH3_hashLong_128b_default);
}
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void *input,
- size_t len,
- const void *secret,
- size_t secretSize) {
-
- return XXH3_128bits_internal(input, len, 0, (const xxh_u8 *)secret,
- secretSize, XXH3_hashLong_128b_withSecret);
-
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t
+XXH3_128bits_withSecret(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize)
+{
+ return XXH3_128bits_internal(input, len, 0,
+ (const xxh_u8*)secret, secretSize,
+ XXH3_hashLong_128b_withSecret);
}
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void *input,
- size_t len,
- XXH64_hash_t seed) {
-
- return XXH3_128bits_internal(input, len, seed, XXH3_kSecret,
- sizeof(XXH3_kSecret),
- XXH3_hashLong_128b_withSeed);
-
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t
+XXH3_128bits_withSeed(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
+{
+ return XXH3_128bits_internal(input, len, seed,
+ XXH3_kSecret, sizeof(XXH3_kSecret),
+ XXH3_hashLong_128b_withSeed);
}
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API XXH128_hash_t XXH128(const void *input, size_t len,
- XXH64_hash_t seed) {
-
- return XXH3_128bits_withSeed(input, len, seed);
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t
+XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
+{
+ if (len <= XXH3_MIDSIZE_MAX)
+ return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
+ return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize);
+}
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t
+XXH128(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
+{
+ return XXH3_128bits_withSeed(input, len, seed);
}
-/* === XXH3 128-bit streaming === */
+/* === XXH3 128-bit streaming === */
+#ifndef XXH_NO_STREAM
/*
- * All the functions are actually the same as for 64-bit streaming variant.
+ * All initialization and update functions are identical to 64-bit streaming variant.
* The only difference is the finalization routine.
*/
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t *statePtr) {
-
- if (statePtr == NULL) return XXH_ERROR;
- XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
- return XXH_OK;
-
-}
-
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(
- XXH3_state_t *statePtr, const void *secret, size_t secretSize) {
-
- if (statePtr == NULL) return XXH_ERROR;
- XXH3_reset_internal(statePtr, 0, secret, secretSize);
- if (secret == NULL) return XXH_ERROR;
- if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
- return XXH_OK;
-
-}
-
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t *statePtr,
- XXH64_hash_t seed) {
-
- if (statePtr == NULL) return XXH_ERROR;
- if (seed == 0) return XXH3_128bits_reset(statePtr);
- if (seed != statePtr->seed)
- XXH3_initCustomSecret(statePtr->customSecret, seed);
- XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
- return XXH_OK;
-
-}
-
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update(XXH3_state_t *state,
- const void *input,
- size_t len) {
-
- return XXH3_update(state, (const xxh_u8 *)input, len, XXH3_accumulate_512,
- XXH3_scrambleAcc);
-
-}
-
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest(const XXH3_state_t *state) {
-
- const unsigned char *const secret =
- (state->extSecret == NULL) ? state->customSecret : state->extSecret;
- if (state->totalLen > XXH3_MIDSIZE_MAX) {
-
- XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
- XXH3_digest_long(acc, state, secret);
- XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >=
- sizeof(acc) + XXH_SECRET_MERGEACCS_START);
- {
-
- XXH128_hash_t h128;
- h128.low64 = XXH3_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START,
- (xxh_u64)state->totalLen * XXH_PRIME64_1);
- h128.high64 =
- XXH3_mergeAccs(acc,
- secret + state->secretLimit + XXH_STRIPE_LEN -
- sizeof(acc) - XXH_SECRET_MERGEACCS_START,
- ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
- return h128;
-
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr)
+{
+ return XXH3_64bits_reset(statePtr);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize)
+{
+ return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed)
+{
+ return XXH3_64bits_reset_withSeed(statePtr, seed);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
+{
+ return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len)
+{
+ return XXH3_64bits_update(state, input, len);
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* state)
+{
+ const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
+ if (state->totalLen > XXH3_MIDSIZE_MAX) {
+ XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
+ XXH3_digest_long(acc, state, secret);
+ XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
+ { XXH128_hash_t h128;
+ h128.low64 = XXH3_mergeAccs(acc,
+ secret + XXH_SECRET_MERGEACCS_START,
+ (xxh_u64)state->totalLen * XXH_PRIME64_1);
+ h128.high64 = XXH3_mergeAccs(acc,
+ secret + state->secretLimit + XXH_STRIPE_LEN
+ - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
+ ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
+ return h128;
+ }
}
-
- }
-
- /* len <= XXH3_MIDSIZE_MAX : short code */
- if (state->seed)
- return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen,
- state->seed);
- return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
- secret, state->secretLimit + XXH_STRIPE_LEN);
-
+ /* len <= XXH3_MIDSIZE_MAX : short code */
+ if (state->seed)
+ return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
+ return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
+ secret, state->secretLimit + XXH_STRIPE_LEN);
}
+#endif /* !XXH_NO_STREAM */
+/* 128-bit utility functions */
- /* 128-bit utility functions */
-
- #include <string.h> /* memcmp, memcpy */
+#include <string.h> /* memcmp, memcpy */
/* return : 1 is equal, 0 if different */
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) {
-
- /* note : XXH128_hash_t is compact, it has no padding byte */
- return !(memcmp(&h1, &h2, sizeof(h1)));
-
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
+{
+ /* note : XXH128_hash_t is compact, it has no padding byte */
+ return !(memcmp(&h1, &h2, sizeof(h1)));
}
/* This prototype is compatible with stdlib's qsort().
- * return : >0 if *h128_1 > *h128_2
- * <0 if *h128_1 < *h128_2
- * =0 if *h128_1 == *h128_2 */
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API int XXH128_cmp(const void *h128_1, const void *h128_2) {
+ * @return : >0 if *h128_1 > *h128_2
+ * <0 if *h128_1 < *h128_2
+ * =0 if *h128_1 == *h128_2 */
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2)
+{
+ XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1;
+ XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2;
+ int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
+ /* note : bets that, in most cases, hash values are different */
+ if (hcmp) return hcmp;
+ return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
+}
- XXH128_hash_t const h1 = *(const XXH128_hash_t *)h128_1;
- XXH128_hash_t const h2 = *(const XXH128_hash_t *)h128_2;
- int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
- /* note : bets that, in most cases, hash values are different */
- if (hcmp) return hcmp;
- return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
+/*====== Canonical representation ======*/
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API void
+XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) {
+ hash.high64 = XXH_swap64(hash.high64);
+ hash.low64 = XXH_swap64(hash.low64);
+ }
+ XXH_memcpy(dst, &hash.high64, sizeof(hash.high64));
+ XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
}
-/*====== Canonical representation ======*/
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t *dst,
- XXH128_hash_t hash) {
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH128_hash_t
+XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src)
+{
+ XXH128_hash_t h;
+ h.high64 = XXH_readBE64(src);
+ h.low64 = XXH_readBE64(src->digest + 8);
+ return h;
+}
- XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
- if (XXH_CPU_LITTLE_ENDIAN) {
- hash.high64 = XXH_swap64(hash.high64);
- hash.low64 = XXH_swap64(hash.low64);
- }
+/* ==========================================
+ * Secret generators
+ * ==========================================
+ */
+#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
+
+XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128)
+{
+ XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 );
+ XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 );
+}
+
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API XXH_errorcode
+XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize)
+{
+#if (XXH_DEBUGLEVEL >= 1)
+ XXH_ASSERT(secretBuffer != NULL);
+ XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
+#else
+ /* production mode, assert() are disabled */
+ if (secretBuffer == NULL) return XXH_ERROR;
+ if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
+#endif
- memcpy(dst, &hash.high64, sizeof(hash.high64));
- memcpy((char *)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
+ if (customSeedSize == 0) {
+ customSeed = XXH3_kSecret;
+ customSeedSize = XXH_SECRET_DEFAULT_SIZE;
+ }
+#if (XXH_DEBUGLEVEL >= 1)
+ XXH_ASSERT(customSeed != NULL);
+#else
+ if (customSeed == NULL) return XXH_ERROR;
+#endif
+ /* Fill secretBuffer with a copy of customSeed - repeat as needed */
+ { size_t pos = 0;
+ while (pos < secretSize) {
+ size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize);
+ memcpy((char*)secretBuffer + pos, customSeed, toCopy);
+ pos += toCopy;
+ } }
+
+ { size_t const nbSeg16 = secretSize / 16;
+ size_t n;
+ XXH128_canonical_t scrambler;
+ XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
+ for (n=0; n<nbSeg16; n++) {
+ XXH128_hash_t const h128 = XXH128(&scrambler, sizeof(scrambler), n);
+ XXH3_combine16((char*)secretBuffer + n*16, h128);
+ }
+ /* last segment */
+ XXH3_combine16((char*)secretBuffer + secretSize - 16, XXH128_hashFromCanonical(&scrambler));
+ }
+ return XXH_OK;
}
-/*! @ingroup xxh3_family */
-XXH_PUBLIC_API XXH128_hash_t
-XXH128_hashFromCanonical(const XXH128_canonical_t *src) {
+/*! @ingroup XXH3_family */
+XXH_PUBLIC_API void
+XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed)
+{
+ XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
+ XXH3_initCustomSecret(secret, seed);
+ XXH_ASSERT(secretBuffer != NULL);
+ memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE);
+}
- XXH128_hash_t h;
- h.high64 = XXH_readBE64(src);
- h.low64 = XXH_readBE64(src->digest + 8);
- return h;
-}
- /* Pop our optimization override from above */
- #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
- && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
- && defined(__OPTIMIZE__) && \
- !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
- #pragma GCC pop_options
- #endif
+/* Pop our optimization override from above */
+#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
+ && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
+ && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */
+# pragma GCC pop_options
+#endif
- #endif /* XXH_NO_LONG_LONG */
+#endif /* XXH_NO_LONG_LONG */
- #endif /* XXH_NO_XXH3 */
+#endif /* XXH_NO_XXH3 */
/*!
* @}
*/
-#endif /* XXH_IMPLEMENTATION */
+#endif /* XXH_IMPLEMENTATION */
-#if defined(__cplusplus)
-
-}
+#if defined (__cplusplus)
+} /* extern "C" */
#endif
-
diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c
index ea8f1423..48e32996 100644
--- a/src/afl-fuzz.c
+++ b/src/afl-fuzz.c
@@ -401,6 +401,10 @@ static void usage(u8 *argv0, int more_help) {
SAYF("Compiled with _AFL_DOCUMENT_MUTATIONS.\n");
#endif
+#ifdef _AFL_SPECIAL_PERFORMANCE
+ SAYF("Compiled with special performance options for this specific system, it might not work on other platforms!\n");
+#endif
+
SAYF("For additional help please consult %s/README.md :)\n\n", doc_path);
exit(1);
diff --git a/src/afl-performance.c b/src/afl-performance.c
index 07c1b527..22cf4dec 100644
--- a/src/afl-performance.c
+++ b/src/afl-performance.c
@@ -2,9 +2,18 @@
#include "afl-fuzz.h"
#include "types.h"
+#ifdef _HAVE_AVX2
+#define T1HA0_AESNI_AVAILABLE 1
+#define T1HA_USE_FAST_ONESHOT_READ 1
+#define T1HA_USE_INDIRECT_FUNCTIONS 1
+#define T1HA_IA32AES_NAME XXH3_64bits
+#include "t1ha0_ia32aes_b.h"
+#else
#define XXH_INLINE_ALL
#include "xxhash.h"
#undef XXH_INLINE_ALL
+#endif
+
void rand_set_seed(afl_state_t *afl, s64 init_seed) {
diff --git a/utils/bench/Makefile b/utils/bench/Makefile
new file mode 100644
index 00000000..e7d2f3a1
--- /dev/null
+++ b/utils/bench/Makefile
@@ -0,0 +1,8 @@
+all: hash
+
+hash: hash.c
+ gcc -O3 -mavx2 -march=native -I../../include -o hash hash.c
+
+clean:
+ rm -f hash
+
diff --git a/utils/bench/README.md b/utils/bench/README.md
new file mode 100644
index 00000000..772c117b
--- /dev/null
+++ b/utils/bench/README.md
@@ -0,0 +1,2 @@
+# Internal AFL++ benchmarking
+
diff --git a/utils/bench/hash.c b/utils/bench/hash.c
new file mode 100644
index 00000000..013a5321
--- /dev/null
+++ b/utils/bench/hash.c
@@ -0,0 +1,42 @@
+#include <stdio.h>
+#include <stdint.h>
+#include <time.h>
+
+#define T1HA0_AESNI_AVAILABLE 1
+#define T1HA_USE_FAST_ONESHOT_READ 1
+#define T1HA_USE_INDIRECT_FUNCTIONS 1
+#define T1HA_IA32AES_NAME t1ha0_ia32aes
+#include "t1ha0_ia32aes_b.h"
+
+#define XXH_INLINE_ALL
+#include "xxhash.h"
+#undef XXH_INLINE_ALL
+
+int main() {
+ char *data = malloc(4097);
+ struct timespec start, end;
+ long long duration;
+ int i;
+ uint64_t res;
+
+ clock_gettime(CLOCK_MONOTONIC, &start);
+ for (i = 0; i < 100000000; ++i) {
+ res = XXH3_64bits(data, 4097);
+ memcpy(data + 16, (char*)&res, 8);
+ }
+ clock_gettime(CLOCK_MONOTONIC, &end);
+ duration = (end.tv_sec - start.tv_sec) * 1000000000LL + (end.tv_nsec - start.tv_nsec);
+ printf("xxh3 duration: %lld ns\n", duration);
+
+ memset(data, 0, 4097);
+ clock_gettime(CLOCK_MONOTONIC, &start);
+ for (i = 0; i < 100000000; ++i) {
+ res = t1ha0_ia32aes(data, 4097);
+ memcpy(data + 16, (char*)&res, 8);
+ }
+ clock_gettime(CLOCK_MONOTONIC, &end);
+ duration = (end.tv_sec - start.tv_sec) * 1000000000LL + (end.tv_nsec - start.tv_nsec);
+ printf("t1ha0_ia32aes duration: %lld ns\n", duration);
+
+ return 0;
+}