diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/afl-as.h | 4 | ||||
-rw-r--r-- | include/afl-fuzz.h | 156 | ||||
-rw-r--r-- | include/afl-mutations.h | 2681 | ||||
-rw-r--r-- | include/afl-persistent-replay.h | 131 | ||||
-rw-r--r-- | include/afl-prealloc.h | 4 | ||||
-rw-r--r-- | include/afl-record-compat.h | 67 | ||||
-rw-r--r-- | include/alloc-inl.h | 12 | ||||
-rw-r--r-- | include/android-ashmem.h | 4 | ||||
-rw-r--r-- | include/cmplog.h | 30 | ||||
-rw-r--r-- | include/common.h | 9 | ||||
-rw-r--r-- | include/config.h | 56 | ||||
-rw-r--r-- | include/debug.h | 59 | ||||
-rw-r--r-- | include/envs.h | 311 | ||||
-rw-r--r-- | include/forkserver.h | 13 | ||||
-rw-r--r-- | include/hash.h | 2 | ||||
-rw-r--r-- | include/list.h | 4 | ||||
-rw-r--r-- | include/sharedmem.h | 4 | ||||
-rw-r--r-- | include/snapshot-inl.h | 4 | ||||
-rw-r--r-- | include/t1ha.h | 738 | ||||
-rw-r--r-- | include/t1ha0_ia32aes_b.h | 183 | ||||
-rw-r--r-- | include/t1ha_bits.h | 1423 | ||||
-rw-r--r-- | include/t1ha_selfcheck.h | 77 | ||||
-rw-r--r-- | include/types.h | 12 | ||||
-rw-r--r-- | include/xxhash.h | 4688 |
24 files changed, 8893 insertions, 1779 deletions
diff --git a/include/afl-as.h b/include/afl-as.h index 486314e2..c005d43d 100644 --- a/include/afl-as.h +++ b/include/afl-as.h @@ -5,12 +5,12 @@ Originally written by Michal Zalewski Now maintained by Marc Heuse <mh@mh-sec.de>, - Heiko Eißfeldt <heiko.eissfeldt@hexco.de>, + Heiko Eissfeldt <heiko.eissfeldt@hexco.de>, Andrea Fioraldi <andreafioraldi@gmail.com>, Dominik Maier <mail@dmnk.co> Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 8fb7ecb1..5efe5144 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -5,12 +5,12 @@ Originally written by Michal Zalewski Now maintained by Marc Heuse <mh@mh-sec.de>, - Heiko Eißfeldt <heiko.eissfeldt@hexco.de>, - Andrea Fioraldi <andreafioraldi@gmail.com>, - Dominik Maier <mail@dmnk.co> + Dominik Maier <mail@dmnk.co>, + Andrea Fioraldi <andreafioraldi@gmail.com>, and + Heiko Eissfeldt <heiko.eissfeldt@hexco.de> Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -31,7 +31,7 @@ #define MESSAGES_TO_STDOUT #ifndef _GNU_SOURCE - #define _GNU_SOURCE 1 + #define _GNU_SOURCE #endif #ifndef _FILE_OFFSET_BITS #define _FILE_OFFSET_BITS 64 @@ -124,6 +124,10 @@ #define CASE_PREFIX "id_" #endif /* ^!SIMPLE_FILES */ +#ifdef AFL_PERSISTENT_RECORD + #define RECORD_PREFIX "RECORD:" +#endif + #define STAGE_BUF_SIZE (64) /* usable size for stage name buf in afl_state */ // Little helper to access the ptr to afl->##name_buf - for use in afl_realloc. @@ -149,6 +153,48 @@ struct tainted { }; +struct inf_profile { + + u32 inf_skipped_bytes; /* Inference Stage Profiling */ + u64 inf_execs_cost, inf_time_cost; + +}; + +/* ToDo: add cmplog profile as well */ +struct havoc_profile { + + u32 queued_det_stage, /* Det/Havoc Stage Profiling */ + queued_havoc_stage, total_queued_det, edge_det_stage, edge_havoc_stage, + total_det_edge; + + u64 det_stage_time, havoc_stage_time, total_det_time; + +}; + +struct skipdet_entry { + + u8 continue_inf, done_eff; + u32 undet_bits, quick_eff_bytes; + + u8 *skip_eff_map, /* we'v finish the eff_map */ + *done_inf_map; /* some bytes are not done yet */ + +}; + +struct skipdet_global { + + u8 use_skip_havoc; + + u32 undet_bits_threshold; + + u64 last_cov_undet; + + u8 *virgin_det_bits; /* global fuzzed bits */ + + struct inf_profile *inf_prof; + +}; + struct queue_entry { u8 *fname; /* File name for the test case */ @@ -157,6 +203,7 @@ struct queue_entry { u8 colorized, /* Do not run redqueen stage again */ cal_failed; /* Calibration failed? */ + bool trim_done, /* Trimmed? */ was_fuzzed, /* historical, but needed for MOpt */ passed_det, /* Deterministic stages passed? */ @@ -168,22 +215,21 @@ struct queue_entry { disabled; /* Is disabled from fuzz selection */ u32 bitmap_size, /* Number of bits set in bitmap */ - fuzz_level, /* Number of fuzzing iterations */ - n_fuzz_entry /* offset in n_fuzz */ #ifdef INTROSPECTION - , stats_selected, /* stats: how often selected */ stats_skipped, /* stats: how often skipped */ stats_finds, /* stats: # of saved finds */ stats_crashes, /* stats: # of saved crashes */ - stats_tmouts /* stats: # of saved timeouts */ + stats_tmouts, /* stats: # of saved timeouts */ #endif - ; + fuzz_level, /* Number of fuzzing iterations */ + n_fuzz_entry; /* offset in n_fuzz */ u64 exec_us, /* Execution time (us) */ handicap, /* Number of queue cycles behind */ depth, /* Path depth */ exec_cksum, /* Checksum of the execution trace */ + custom, /* Marker for custom mutators */ stats_mutated; /* stats: # of mutations performed */ u8 *trace_mini; /* Trace bytes, if kept */ @@ -203,6 +249,8 @@ struct queue_entry { struct queue_entry *mother; /* queue entry this based on */ + struct skipdet_entry *skipdet_e; + }; struct extra_data { @@ -247,6 +295,8 @@ enum { /* 19 */ STAGE_CUSTOM_MUTATOR, /* 20 */ STAGE_COLORIZATION, /* 21 */ STAGE_ITS, + /* 22 */ STAGE_INF, + /* 23 */ STAGE_QUICK, STAGE_NUM_MAX @@ -345,6 +395,7 @@ enum { /* 13 */ PY_FUNC_DESCRIBE, /* 14 */ PY_FUNC_FUZZ_SEND, /* 15 */ PY_FUNC_SPLICE_OPTOUT, + /* 16 */ PY_FUNC_POST_RUN, PY_FUNC_COUNT }; @@ -400,7 +451,9 @@ typedef struct afl_env_vars { afl_exit_on_seed_issues, afl_try_affinity, afl_ignore_problems, afl_keep_timeouts, afl_no_crash_readme, afl_ignore_timeouts, afl_no_startup_calibration, afl_no_warn_instability, - afl_post_process_keep_original; + afl_post_process_keep_original, afl_crashing_seeds_as_new_crash, + afl_final_sync, afl_ignore_seed_problems, afl_disable_redundant, + afl_sha1_filenames; u8 *afl_tmpdir, *afl_custom_mutator_library, *afl_python_module, *afl_path, *afl_hang_tmout, *afl_forksrv_init_tmout, *afl_preload, @@ -493,7 +546,8 @@ typedef struct afl_state { *orig_cmdline, /* Original command line */ *infoexec; /* Command to execute on a new crash */ - u32 hang_tmout; /* Timeout used for hang det (ms) */ + u32 hang_tmout, /* Timeout used for hang det (ms) */ + stats_update_freq; /* Stats update frequency (execs) */ u8 havoc_stack_pow2, /* HAVOC_STACK_POW2 */ no_unlink, /* do not unlink cur_input */ @@ -502,14 +556,12 @@ typedef struct afl_state { custom_splice_optout, /* Custom mutator no splice buffer */ is_main_node, /* if this is the main node */ is_secondary_node, /* if this is a secondary instance */ - pizza_is_served; /* pizza mode */ - - u32 stats_update_freq; /* Stats update frequency (execs) */ - - u8 schedule; /* Power schedule (default: EXPLORE)*/ - u8 havoc_max_mult; - - u8 skip_deterministic, /* Skip deterministic stages? */ + pizza_is_served, /* pizza mode */ + input_mode, /* target wants text inputs */ + fuzz_mode, /* coverage/exploration or crash/exploitation mode */ + schedule, /* Power schedule (default: EXPLORE)*/ + havoc_max_mult, /* havoc multiplier */ + skip_deterministic, /* Skip deterministic stages? */ use_splicing, /* Recombine input files? */ non_instrumented_mode, /* Run in non-instrumented mode? */ score_changed, /* Scoring for favorites changed? */ @@ -596,7 +648,11 @@ typedef struct afl_state { last_hang_time, /* Time for most recent hang (ms) */ longest_find_time, /* Longest time taken for a find */ exit_on_time, /* Delay to exit if no new paths */ - sync_time; /* Sync time (ms) */ + sync_time, /* Sync time (ms) */ + switch_fuzz_mode, /* auto or fixed fuzz mode */ + calibration_time_us, /* Time spend on calibration */ + sync_time_us, /* Time spend on sync */ + trim_time_us; /* Time spend on trimming */ u32 slowest_exec_ms, /* Slowest testcase non hang in ms */ subseq_tmouts; /* Number of timeouts in a row */ @@ -609,6 +665,7 @@ typedef struct afl_state { u32 stage_cur, stage_max; /* Stage progression */ s32 splicing_with; /* Splicing with which test case? */ + s64 smallest_favored; /* smallest queue id favored */ u32 main_node_id, main_node_max; /* Main instance job splitting */ @@ -673,7 +730,8 @@ typedef struct afl_state { u32 cmplog_max_filesize; u32 cmplog_lvl; u32 colorize_success; - u8 cmplog_enable_arith, cmplog_enable_transform, cmplog_random_colorization; + u8 cmplog_enable_arith, cmplog_enable_transform, cmplog_enable_scale, + cmplog_enable_xtreme_transform, cmplog_random_colorization; struct afl_pass_stat *pass_stats; struct cmp_map *orig_cmp_map; @@ -778,6 +836,11 @@ typedef struct afl_state { * is too large) */ struct queue_entry **q_testcase_cache; + /* Global Profile Data for deterministic/havoc-splice stage */ + struct havoc_profile *havoc_prof; + + struct skipdet_global *skipdet_g; + #ifdef INTROSPECTION char mutation[8072]; char m_tmp[4096]; @@ -1018,6 +1081,16 @@ struct custom_mutator { void (*afl_custom_fuzz_send)(void *data, const u8 *buf, size_t buf_size); /** + * This method can be used if you want to run some code or scripts each time + * AFL++ executes the target with afl-fuzz. + * + * (Optional) + * + * @param data pointer returned in afl_custom_init by this custom mutator + */ + void (*afl_custom_post_run)(void *data); + + /** * Allow for additional analysis (e.g. calling a different tool that does a * different kind of coverage and saves this for the custom mutator). * @@ -1072,6 +1145,7 @@ void finalize_py_module(void *); u32 fuzz_count_py(void *, const u8 *, size_t); void fuzz_send_py(void *, const u8 *, size_t); +void post_run_py(void *); size_t post_process_py(void *, u8 *, size_t, u8 **); s32 init_trim_py(void *, u8 *, size_t); s32 post_trim_py(void *, u8); @@ -1145,6 +1219,10 @@ void show_stats_normal(afl_state_t *); void show_stats_pizza(afl_state_t *); void show_init_stats(afl_state_t *); +void update_calibration_time(afl_state_t *afl, u64 *time); +void update_trim_time(afl_state_t *afl, u64 *time); +void update_sync_time(afl_state_t *afl, u64 *time); + /* StatsD */ void statsd_setup_format(afl_state_t *afl); @@ -1202,6 +1280,7 @@ u8 check_if_text_buf(u8 *buf, u32 len); #ifndef AFL_SHOWMAP void setup_signal_handlers(void); #endif +char *get_fuzzing_state(afl_state_t *afl); /* CmpLog */ @@ -1216,6 +1295,13 @@ AFL_RAND_RETURN rand_next(afl_state_t *afl); /* probability between 0.0 and 1.0 */ double rand_next_percent(afl_state_t *afl); +/* SkipDet Functions */ + +u8 skip_deterministic_stage(afl_state_t *, u8 *, u8 *, u32, u64); +u8 is_det_timeout(u64, u8); + +void plot_profile_data(afl_state_t *, struct queue_entry *); + /**** Inline routines ****/ /* Generate a random number (from 0 to limit - 1). This may @@ -1319,6 +1405,32 @@ void queue_testcase_retake_mem(afl_state_t *afl, struct queue_entry *q, u8 *in, void queue_testcase_store_mem(afl_state_t *afl, struct queue_entry *q, u8 *mem); +/* Compute the SHA1 hash of `data`, which is of `len` bytes, and return the + * result as a `\0`-terminated hex string, which the caller much `ck_free`. */ +char *sha1_hex(const u8 *data, size_t len); + +/* Apply `sha1_hex` to the first `len` bytes of data of the file at `fname`. */ +char *sha1_hex_for_file(const char *fname, u32 len); + +/* Create file `fn`, but allow it to already exist if `AFL_SHA1_FILENAMES` is + * enabled. */ +static inline int permissive_create(afl_state_t *afl, const char *fn) { + + int fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, DEFAULT_PERMISSION); + if (unlikely(fd < 0)) { + + if (!(afl->afl_env.afl_sha1_filenames && errno == EEXIST)) { + + PFATAL("Unable to create '%s'", fn); + + } + + } + + return fd; + +} + #if TESTCASE_CACHE == 1 #error define of TESTCASE_CACHE must be zero or larger than 1 #endif diff --git a/include/afl-mutations.h b/include/afl-mutations.h new file mode 100644 index 00000000..79cf7c6a --- /dev/null +++ b/include/afl-mutations.h @@ -0,0 +1,2681 @@ +/* Implementation of afl havoc mutation to be used in AFL++ custom mutators and + partially in afl-fuzz itself. + + How to use: + + #include "afl-mutations.h" // needs afl-fuzz.h + + u32 afl_mutate(afl_state_t *afl, u8 *buf, u32 len, u32t steps, bool is_text, + bool is_exploration, u8 *splice_buf, u32 splice_len, + u32 max_len); + + Returns: + u32 - the length of the mutated data return in *buf. 0 = error + Parameters: + afl_state_t *afl - the *afl state pointer + u8 *buf - the input buffer to mutate which will be mutated into. + NOTE: must be able to contain a size of at least max_len!! (see below) + u32 len - the length of the input + u32 steps - how many mutations to perform on the input + bool is_text - is the target expecting text inputs + bool is_exploration - mutate for exploration mode (instead of exploitation) + splice_buf - a buffer from another corpus item to splice with. + If NULL then no splicing is done (obviously). + splice_len - the length of the splice buffer. If 0 then no splicing. + u32 max_len - the maximum size the mutated buffer may grow to +*/ + +#ifndef AFL_MUTATIONS_H +#define AFL_MUTATIONS_H + +#include <stdbool.h> +#include <inttypes.h> + +#define MUT_STRATEGY_ARRAY_SIZE 256 + +s8 interesting_8[] = {INTERESTING_8}; +s16 interesting_16[] = {INTERESTING_8, INTERESTING_16}; +s32 interesting_32[] = {INTERESTING_8, INTERESTING_16, INTERESTING_32}; + +enum { + + /* 00 */ MUT_FLIPBIT, + /* 01 */ MUT_INTERESTING8, + /* 02 */ MUT_INTERESTING16, + /* 03 */ MUT_INTERESTING16BE, + /* 04 */ MUT_INTERESTING32, + /* 05 */ MUT_INTERESTING32BE, + /* 06 */ MUT_ARITH8_, + /* 07 */ MUT_ARITH8, + /* 08 */ MUT_ARITH16_, + /* 09 */ MUT_ARITH16BE_, + /* 10 */ MUT_ARITH16, + /* 11 */ MUT_ARITH16BE, + /* 12 */ MUT_ARITH32_, + /* 13 */ MUT_ARITH32BE_, + /* 14 */ MUT_ARITH32, + /* 15 */ MUT_ARITH32BE, + /* 16 */ MUT_RAND8, + /* 17 */ MUT_CLONE_COPY, + /* 18 */ MUT_CLONE_FIXED, + /* 19 */ MUT_OVERWRITE_COPY, + /* 20 */ MUT_OVERWRITE_FIXED, + /* 21 */ MUT_BYTEADD, + /* 22 */ MUT_BYTESUB, + /* 23 */ MUT_FLIP8, + /* 24 */ MUT_SWITCH, + /* 25 */ MUT_DEL, + /* 26 */ MUT_SHUFFLE, + /* 27 */ MUT_DELONE, + /* 28 */ MUT_INSERTONE, + /* 29 */ MUT_ASCIINUM, + /* 30 */ MUT_INSERTASCIINUM, + /* 31 */ MUT_EXTRA_OVERWRITE, + /* 32 */ MUT_EXTRA_INSERT, + /* 33 */ MUT_AUTO_EXTRA_OVERWRITE, + /* 34 */ MUT_AUTO_EXTRA_INSERT, + /* 35 */ MUT_SPLICE_OVERWRITE, + /* 36 */ MUT_SPLICE_INSERT, + + MUT_MAX + +}; + +#define MUT_TXT_ARRAY_SIZE 200 +u32 text_array[MUT_TXT_ARRAY_SIZE] = {MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT}; + +#define MUT_BIN_ARRAY_SIZE 256 +u32 binary_array[MUT_BIN_ARRAY_SIZE] = {MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT}; + +#define MUT_NORMAL_ARRAY_SIZE 77 +u32 normal_splice_array[MUT_NORMAL_ARRAY_SIZE] = {MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_FIXED, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_FIXED, + MUT_BYTEADD, + MUT_BYTESUB, + MUT_FLIP8, + MUT_SWITCH, + MUT_SWITCH, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT}; + +#define MUT_SPLICE_ARRAY_SIZE 81 +u32 full_splice_array[MUT_SPLICE_ARRAY_SIZE] = {MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_FIXED, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_FIXED, + MUT_BYTEADD, + MUT_BYTESUB, + MUT_FLIP8, + MUT_SWITCH, + MUT_SWITCH, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT}; + +u32 mutation_strategy_exploration_text[MUT_STRATEGY_ARRAY_SIZE] = { + + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_DELONE, + MUT_DELONE, + MUT_DELONE, + MUT_DELONE, + MUT_DELONE, + MUT_DELONE, + MUT_DELONE, + MUT_INSERTONE, + MUT_INSERTONE, + MUT_INSERTONE, + MUT_INSERTONE, + MUT_INSERTONE, + MUT_INSERTONE, + MUT_ASCIINUM, + MUT_ASCIINUM, + MUT_ASCIINUM, + MUT_ASCIINUM, + MUT_ASCIINUM, + MUT_ASCIINUM, + MUT_ASCIINUM, + MUT_ASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT + +}; + +u32 mutation_strategy_exploration_binary[MUT_STRATEGY_ARRAY_SIZE] = { + + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_DELONE, + MUT_DELONE, + MUT_DELONE, + MUT_DELONE, + MUT_DELONE, + MUT_DELONE, + MUT_INSERTONE, + MUT_INSERTONE, + MUT_INSERTONE, + MUT_INSERTONE, + MUT_INSERTONE, + MUT_INSERTONE, + MUT_ASCIINUM, + MUT_ASCIINUM, + MUT_ASCIINUM, + MUT_ASCIINUM, + MUT_ASCIINUM, + MUT_ASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT + +}; + +u32 mutation_strategy_exploitation_text[MUT_STRATEGY_ARRAY_SIZE] = { + + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_DELONE, + MUT_DELONE, + MUT_DELONE, + MUT_DELONE, + MUT_DELONE, + MUT_INSERTONE, + MUT_INSERTONE, + MUT_INSERTONE, + MUT_INSERTONE, + MUT_INSERTONE, + MUT_INSERTONE, + MUT_ASCIINUM, + MUT_ASCIINUM, + MUT_ASCIINUM, + MUT_ASCIINUM, + MUT_ASCIINUM, + MUT_ASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT + +}; + +u32 mutation_strategy_exploitation_binary[MUT_STRATEGY_ARRAY_SIZE] = { + + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_FLIPBIT, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING8, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING16BE, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_INTERESTING32BE, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8_, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH8, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16BE_, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH16BE, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32BE_, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_ARITH32BE, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_RAND8, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_COPY, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_CLONE_FIXED, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_COPY, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_OVERWRITE_FIXED, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTEADD, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_BYTESUB, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_FLIP8, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_SWITCH, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_DEL, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_SHUFFLE, + MUT_DELONE, + MUT_DELONE, + MUT_DELONE, + MUT_DELONE, + MUT_DELONE, + MUT_INSERTONE, + MUT_INSERTONE, + MUT_INSERTONE, + MUT_INSERTONE, + MUT_INSERTONE, + MUT_INSERTONE, + MUT_ASCIINUM, + MUT_ASCIINUM, + MUT_ASCIINUM, + MUT_ASCIINUM, + MUT_ASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_INSERTASCIINUM, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_OVERWRITE, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_EXTRA_INSERT, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_OVERWRITE, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_AUTO_EXTRA_INSERT, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_OVERWRITE, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT, + MUT_SPLICE_INSERT + +}; + +u32 afl_mutate(afl_state_t *, u8 *, u32, u32, bool, bool, u8 *, u32, u32); +u32 choose_block_len(afl_state_t *, u32); + +/* Helper to choose random block len for block operations in fuzz_one(). + Doesn't return zero, provided that max_len is > 0. */ + +inline u32 choose_block_len(afl_state_t *afl, u32 limit) { + + u32 min_value, max_value; + u32 rlim = MIN(afl->queue_cycle, (u32)3); + + if (unlikely(!afl->run_over10m)) { rlim = 1; } + + switch (rand_below(afl, rlim)) { + + case 0: + min_value = 1; + max_value = HAVOC_BLK_SMALL; + break; + + case 1: + min_value = HAVOC_BLK_SMALL; + max_value = HAVOC_BLK_MEDIUM; + break; + + default: + + if (likely(rand_below(afl, 10))) { + + min_value = HAVOC_BLK_MEDIUM; + max_value = HAVOC_BLK_LARGE; + + } else { + + min_value = HAVOC_BLK_LARGE; + max_value = HAVOC_BLK_XL; + + } + + } + + if (min_value >= limit) { min_value = 1; } + + return min_value + rand_below(afl, MIN(max_value, limit) - min_value + 1); + +} + +inline u32 afl_mutate(afl_state_t *afl, u8 *buf, u32 len, u32 steps, + bool is_text, bool is_exploration, u8 *splice_buf, + u32 splice_len, u32 max_len) { + + if (!buf || !len) { return 0; } + + u32 *mutation_array; + static u8 *tmp_buf = NULL; + static u32 tmp_buf_size = 0; + + if (max_len > tmp_buf_size) { + + if (tmp_buf) { + + u8 *ptr = realloc(tmp_buf, max_len); + + if (!ptr) { + + return 0; + + } else { + + tmp_buf = ptr; + + } + + } else { + + if ((tmp_buf = malloc(max_len)) == NULL) { return 0; } + + } + + tmp_buf_size = max_len; + + } + + if (is_text) { + + if (is_exploration) { + + mutation_array = (u32 *)&mutation_strategy_exploration_text; + + } else { + + mutation_array = (u32 *)&mutation_strategy_exploitation_text; + + } + + } else { + + if (is_exploration) { + + mutation_array = (u32 *)&mutation_strategy_exploration_binary; + + } else { + + mutation_array = (u32 *)&mutation_strategy_exploitation_binary; + + } + + } + + for (u32 step = 0; step < steps; ++step) { + + retry_havoc_step: { + + u32 r = rand_below(afl, MUT_STRATEGY_ARRAY_SIZE), item; + + switch (mutation_array[r]) { + + case MUT_FLIPBIT: { + + /* Flip a single bit somewhere. Spooky! */ + u8 bit = rand_below(afl, 8); + u32 off = rand_below(afl, len); + buf[off] ^= 1 << bit; + + break; + + } + + case MUT_INTERESTING8: { + + /* Set byte to interesting value. */ + + item = rand_below(afl, sizeof(interesting_8)); + buf[rand_below(afl, len)] = interesting_8[item]; + break; + + } + + case MUT_INTERESTING16: { + + /* Set word to interesting value, little endian. */ + + if (unlikely(len < 2)) { break; } // no retry + + item = rand_below(afl, sizeof(interesting_16) >> 1); + *(u16 *)(buf + rand_below(afl, len - 1)) = interesting_16[item]; + + break; + + } + + case MUT_INTERESTING16BE: { + + /* Set word to interesting value, big endian. */ + + if (unlikely(len < 2)) { break; } // no retry + + item = rand_below(afl, sizeof(interesting_16) >> 1); + *(u16 *)(buf + rand_below(afl, len - 1)) = SWAP16(interesting_16[item]); + + break; + + } + + case MUT_INTERESTING32: { + + /* Set dword to interesting value, little endian. */ + + if (unlikely(len < 4)) { break; } // no retry + + item = rand_below(afl, sizeof(interesting_32) >> 2); + *(u32 *)(buf + rand_below(afl, len - 3)) = interesting_32[item]; + + break; + + } + + case MUT_INTERESTING32BE: { + + /* Set dword to interesting value, big endian. */ + + if (unlikely(len < 4)) { break; } // no retry + + item = rand_below(afl, sizeof(interesting_32) >> 2); + *(u32 *)(buf + rand_below(afl, len - 3)) = SWAP32(interesting_32[item]); + + break; + + } + + case MUT_ARITH8_: { + + /* Randomly subtract from byte. */ + + item = 1 + rand_below(afl, ARITH_MAX); + buf[rand_below(afl, len)] -= item; + break; + + } + + case MUT_ARITH8: { + + /* Randomly add to byte. */ + + item = 1 + rand_below(afl, ARITH_MAX); + buf[rand_below(afl, len)] += item; + break; + + } + + case MUT_ARITH16_: { + + /* Randomly subtract from word, little endian. */ + + if (unlikely(len < 2)) { break; } // no retry + + u32 pos = rand_below(afl, len - 1); + item = 1 + rand_below(afl, ARITH_MAX); + *(u16 *)(buf + pos) -= item; + + break; + + } + + case MUT_ARITH16BE_: { + + /* Randomly subtract from word, big endian. */ + + if (unlikely(len < 2)) { break; } // no retry + + u32 pos = rand_below(afl, len - 1); + u16 num = 1 + rand_below(afl, ARITH_MAX); + *(u16 *)(buf + pos) = SWAP16(SWAP16(*(u16 *)(buf + pos)) - num); + + break; + + } + + case MUT_ARITH16: { + + /* Randomly add to word, little endian. */ + + if (unlikely(len < 2)) { break; } // no retry + + u32 pos = rand_below(afl, len - 1); + item = 1 + rand_below(afl, ARITH_MAX); + *(u16 *)(buf + pos) += item; + + break; + + } + + case MUT_ARITH16BE: { + + /* Randomly add to word, big endian. */ + + if (unlikely(len < 2)) { break; } // no retry + + u32 pos = rand_below(afl, len - 1); + u16 num = 1 + rand_below(afl, ARITH_MAX); + *(u16 *)(buf + pos) = SWAP16(SWAP16(*(u16 *)(buf + pos)) + num); + + break; + + } + + case MUT_ARITH32_: { + + /* Randomly subtract from dword, little endian. */ + + if (unlikely(len < 4)) { break; } // no retry + + u32 pos = rand_below(afl, len - 3); + item = 1 + rand_below(afl, ARITH_MAX); + *(u32 *)(buf + pos) -= item; + + break; + + } + + case MUT_ARITH32BE_: { + + /* Randomly subtract from dword, big endian. */ + + if (unlikely(len < 4)) { break; } // no retry + + u32 pos = rand_below(afl, len - 3); + u32 num = 1 + rand_below(afl, ARITH_MAX); + *(u32 *)(buf + pos) = SWAP32(SWAP32(*(u32 *)(buf + pos)) - num); + + break; + + } + + case MUT_ARITH32: { + + /* Randomly add to dword, little endian. */ + + if (unlikely(len < 4)) { break; } // no retry + + u32 pos = rand_below(afl, len - 3); + item = 1 + rand_below(afl, ARITH_MAX); + *(u32 *)(buf + pos) += item; + + break; + + } + + case MUT_ARITH32BE: { + + /* Randomly add to dword, big endian. */ + + if (unlikely(len < 4)) { break; } // no retry + + u32 pos = rand_below(afl, len - 3); + u32 num = 1 + rand_below(afl, ARITH_MAX); + *(u32 *)(buf + pos) = SWAP32(SWAP32(*(u32 *)(buf + pos)) + num); + + break; + + } + + case MUT_RAND8: { + + /* Just set a random byte to a random value. Because, + why not. We use XOR with 1-255 to eliminate the + possibility of a no-op. */ + + u32 pos = rand_below(afl, len); + item = 1 + rand_below(afl, 255); + buf[pos] ^= item; + break; + + } + + case MUT_CLONE_COPY: { + + if (likely(len + HAVOC_BLK_XL < max_len)) { + + /* Clone bytes. */ + + u32 clone_len = choose_block_len(afl, len); + u32 clone_from = rand_below(afl, len - clone_len + 1); + u32 clone_to = rand_below(afl, len); + + /* Head */ + + memcpy(tmp_buf, buf, clone_to); + + /* Inserted part */ + + memcpy(tmp_buf + clone_to, buf + clone_from, clone_len); + + /* Tail */ + memcpy(tmp_buf + clone_to + clone_len, buf + clone_to, + len - clone_to); + + len += clone_len; + memcpy(buf, tmp_buf, len); + + } else if (unlikely(len < 8)) { + + break; + + } else { + + goto retry_havoc_step; + + } + + break; + + } + + case MUT_CLONE_FIXED: { + + if (likely(len + HAVOC_BLK_XL < max_len)) { + + /* Insert a block of constant bytes (25%). */ + + u32 clone_len = choose_block_len(afl, HAVOC_BLK_XL); + u32 clone_to = rand_below(afl, len); + u32 strat = rand_below(afl, 2); + u32 clone_from = clone_to ? clone_to - 1 : 0; + item = strat ? rand_below(afl, 256) : buf[clone_from]; + + /* Head */ + + memcpy(tmp_buf, buf, clone_to); + + /* Inserted part */ + + memset(tmp_buf + clone_to, item, clone_len); + + /* Tail */ + memcpy(tmp_buf + clone_to + clone_len, buf + clone_to, + len - clone_to); + + len += clone_len; + memcpy(buf, tmp_buf, len); + + } else if (unlikely(len < 8)) { + + break; + + } else { + + goto retry_havoc_step; + + } + + break; + + } + + case MUT_OVERWRITE_COPY: { + + /* Overwrite bytes with a randomly selected chunk bytes. */ + + if (unlikely(len < 2)) { break; } // no retry + + u32 copy_len = choose_block_len(afl, len - 1); + u32 copy_from = rand_below(afl, len - copy_len + 1); + u32 copy_to = rand_below(afl, len - copy_len + 1); + + if (likely(copy_from != copy_to)) { + + memmove(buf + copy_to, buf + copy_from, copy_len); + + } + + break; + + } + + case MUT_OVERWRITE_FIXED: { + + /* Overwrite bytes with fixed bytes. */ + + if (unlikely(len < 2)) { break; } // no retry + + u32 copy_len = choose_block_len(afl, len - 1); + u32 copy_to = rand_below(afl, len - copy_len + 1); + u32 strat = rand_below(afl, 2); + u32 copy_from = copy_to ? copy_to - 1 : 0; + item = strat ? rand_below(afl, 256) : buf[copy_from]; + memset(buf + copy_to, item, copy_len); + + break; + + } + + case MUT_BYTEADD: { + + /* Increase byte by 1. */ + + buf[rand_below(afl, len)]++; + break; + + } + + case MUT_BYTESUB: { + + /* Decrease byte by 1. */ + + buf[rand_below(afl, len)]--; + break; + + } + + case MUT_FLIP8: { + + /* Flip byte. */ + + buf[rand_below(afl, len)] ^= 0xff; + break; + + } + + case MUT_SWITCH: { + + if (unlikely(len < 4)) { break; } // no retry + + /* Switch bytes. */ + + u32 to_end, switch_to, switch_len, switch_from; + switch_from = rand_below(afl, len); + do { + + switch_to = rand_below(afl, len); + + } while (unlikely(switch_from == switch_to)); + + if (switch_from < switch_to) { + + switch_len = switch_to - switch_from; + to_end = len - switch_to; + + } else { + + switch_len = switch_from - switch_to; + to_end = len - switch_from; + + } + + switch_len = choose_block_len(afl, MIN(switch_len, to_end)); + + /* Backup */ + + memcpy(tmp_buf, buf + switch_from, switch_len); + + /* Switch 1 */ + + memcpy(buf + switch_from, buf + switch_to, switch_len); + + /* Switch 2 */ + + memcpy(buf + switch_to, tmp_buf, switch_len); + + break; + + } + + case MUT_DEL: { + + /* Delete bytes. */ + + if (unlikely(len < 2)) { break; } // no retry + + /* Don't delete too much. */ + + u32 del_len = choose_block_len(afl, len - 1); + u32 del_from = rand_below(afl, len - del_len + 1); + memmove(buf + del_from, buf + del_from + del_len, + len - del_from - del_len); + len -= del_len; + + break; + + } + + case MUT_SHUFFLE: { + + /* Shuffle bytes. */ + + if (unlikely(len < 4)) { break; } // no retry + + u32 blen = choose_block_len(afl, len - 1); + u32 off = rand_below(afl, len - blen + 1); + + for (u32 i = blen - 1; i > 0; i--) { + + u32 j; + do { + + j = rand_below(afl, i + 1); + + } while (unlikely(i == j)); + + u8 temp = buf[off + i]; + buf[off + i] = buf[off + j]; + buf[off + j] = temp; + + } + + break; + + } + + case MUT_DELONE: { + + /* Delete bytes. */ + + if (unlikely(len < 2)) { break; } // no retry + + /* Don't delete too much. */ + + u32 del_len = 1; + u32 del_from = rand_below(afl, len - del_len + 1); + memmove(buf + del_from, buf + del_from + del_len, + len - del_from - del_len); + + len -= del_len; + + break; + + } + + case MUT_INSERTONE: { + + if (unlikely(len < 2)) { break; } // no retry + + u32 clone_len = 1; + u32 clone_to = rand_below(afl, len); + u32 strat = rand_below(afl, 2); + u32 clone_from = clone_to ? clone_to - 1 : 0; + item = strat ? rand_below(afl, 256) : buf[clone_from]; + + /* Head */ + + memcpy(tmp_buf, buf, clone_to); + + /* Inserted part */ + + memset(tmp_buf + clone_to, item, clone_len); + + /* Tail */ + memcpy(tmp_buf + clone_to + clone_len, buf + clone_to, len - clone_to); + + len += clone_len; + memcpy(buf, tmp_buf, len); + + break; + + } + + case MUT_ASCIINUM: { + + if (unlikely(len < 4)) { break; } // no retry + + u32 off = rand_below(afl, len), off2 = off, cnt = 0; + + while (off2 + cnt < len && !isdigit(buf[off2 + cnt])) { + + ++cnt; + + } + + // none found, wrap + if (off2 + cnt == len) { + + off2 = 0; + cnt = 0; + + while (cnt < off && !isdigit(buf[off2 + cnt])) { + + ++cnt; + + } + + if (cnt == off) { + + if (len < 8) { + + break; + + } else { + + goto retry_havoc_step; + + } + + } + + } + + off = off2 + cnt; + off2 = off + 1; + + while (off2 < len && isdigit(buf[off2])) { + + ++off2; + + } + + s64 val = buf[off] - '0'; + for (u32 i = off + 1; i < off2; ++i) { + + val = (val * 10) + buf[i] - '0'; + + } + + if (off && buf[off - 1] == '-') { val = -val; } + + u32 strat = rand_below(afl, 8); + switch (strat) { + + case 0: + val++; + break; + case 1: + val--; + break; + case 2: + val *= 2; + break; + case 3: + val /= 2; + break; + case 4: + if (likely(val && (u64)val < 0x19999999)) { + + val = (u64)rand_next(afl) % (u64)((u64)val * 10); + + } else { + + val = rand_below(afl, 256); + + } + + break; + case 5: + val += rand_below(afl, 256); + break; + case 6: + val -= rand_below(afl, 256); + break; + case 7: + val = ~(val); + break; + + } + + char numbuf[32]; + snprintf(numbuf, sizeof(buf), "%" PRId64, val); + u32 old_len = off2 - off; + u32 new_len = strlen(numbuf); + + if (old_len == new_len) { + + memcpy(buf + off, numbuf, new_len); + + } else { + + /* Head */ + + memcpy(tmp_buf, buf, off); + + /* Inserted part */ + + memcpy(tmp_buf + off, numbuf, new_len); + + /* Tail */ + memcpy(tmp_buf + off + new_len, buf + off2, len - off2); + + len += (new_len - old_len); + memcpy(buf, tmp_buf, len); + + } + + // fprintf(stderr, "AFTER : %s\n", buf); + break; + + } + + case MUT_INSERTASCIINUM: { + + u32 ins_len = 1 + rand_below(afl, 8); + u32 pos = rand_below(afl, len); + + /* Insert ascii number. */ + if (unlikely(len < pos + ins_len)) { + + // no retry if we have a small input + if (unlikely(len < 8)) { + + break; + + } else { + + goto retry_havoc_step; + + } + + } + + u64 val = rand_next(afl); + char numbuf[32]; + snprintf(numbuf, sizeof(numbuf), "%llu", val); + size_t val_len = strlen(numbuf), off; + + if (ins_len > val_len) { + + ins_len = val_len; + off = 0; + + } else { + + off = val_len - ins_len; + + } + + memcpy(buf + pos, numbuf + off, ins_len); + + break; + + } + + case MUT_EXTRA_OVERWRITE: { + + if (unlikely(!afl->extras_cnt)) { goto retry_havoc_step; } + + /* Use the dictionary. */ + + u32 use_extra = rand_below(afl, afl->extras_cnt); + u32 extra_len = afl->extras[use_extra].len; + + if (unlikely(extra_len > len)) { goto retry_havoc_step; } + + u32 insert_at = rand_below(afl, len - extra_len + 1); + memcpy(buf + insert_at, afl->extras[use_extra].data, extra_len); + + break; + + } + + case MUT_EXTRA_INSERT: { + + if (unlikely(!afl->extras_cnt)) { goto retry_havoc_step; } + + u32 use_extra = rand_below(afl, afl->extras_cnt); + u32 extra_len = afl->extras[use_extra].len; + if (unlikely(len + extra_len >= max_len)) { goto retry_havoc_step; } + + u8 *ptr = afl->extras[use_extra].data; + u32 insert_at = rand_below(afl, len + 1); + + /* Tail */ + memmove(buf + insert_at + extra_len, buf + insert_at, len - insert_at); + + /* Inserted part */ + memcpy(buf + insert_at, ptr, extra_len); + len += extra_len; + + break; + + } + + case MUT_AUTO_EXTRA_OVERWRITE: { + + if (unlikely(!afl->a_extras_cnt)) { goto retry_havoc_step; } + + /* Use the dictionary. */ + + u32 use_extra = rand_below(afl, afl->a_extras_cnt); + u32 extra_len = afl->a_extras[use_extra].len; + + if (unlikely(extra_len > len)) { goto retry_havoc_step; } + + u32 insert_at = rand_below(afl, len - extra_len + 1); + memcpy(buf + insert_at, afl->a_extras[use_extra].data, extra_len); + + break; + + } + + case MUT_AUTO_EXTRA_INSERT: { + + if (unlikely(!afl->a_extras_cnt)) { goto retry_havoc_step; } + + u32 use_extra = rand_below(afl, afl->a_extras_cnt); + u32 extra_len = afl->a_extras[use_extra].len; + if (unlikely(len + extra_len >= max_len)) { goto retry_havoc_step; } + + u8 *ptr = afl->a_extras[use_extra].data; + u32 insert_at = rand_below(afl, len + 1); + + /* Tail */ + memmove(buf + insert_at + extra_len, buf + insert_at, len - insert_at); + + /* Inserted part */ + memcpy(buf + insert_at, ptr, extra_len); + len += extra_len; + + break; + + } + + case MUT_SPLICE_OVERWRITE: { + + if (unlikely(!splice_buf || !splice_len)) { goto retry_havoc_step; } + + /* overwrite mode */ + + u32 copy_from, copy_to, copy_len; + + copy_len = choose_block_len(afl, splice_len - 1); + + if (copy_len > len) copy_len = len; + + copy_from = rand_below(afl, splice_len - copy_len + 1); + copy_to = rand_below(afl, len - copy_len + 1); + memmove(buf + copy_to, splice_buf + copy_from, copy_len); + + break; + + } + + case MUT_SPLICE_INSERT: { + + if (unlikely(!splice_buf || !splice_len)) { goto retry_havoc_step; } + + if (unlikely(len + HAVOC_BLK_XL >= max_len)) { goto retry_havoc_step; } + + /* insert mode */ + + u32 clone_from, clone_to, clone_len; + + clone_len = choose_block_len(afl, splice_len); + clone_from = rand_below(afl, splice_len - clone_len + 1); + clone_to = rand_below(afl, len + 1); + + /* Head */ + + memcpy(tmp_buf, buf, clone_to); + + /* Inserted part */ + + memcpy(tmp_buf + clone_to, splice_buf + clone_from, clone_len); + + /* Tail */ + memcpy(tmp_buf + clone_to + clone_len, buf + clone_to, len - clone_to); + + len += clone_len; + memcpy(buf, tmp_buf, len); + + break; + + } + + } + + } + + } + + return len; + +} + +#endif /* !AFL_MUTATIONS_H */ + diff --git a/include/afl-persistent-replay.h b/include/afl-persistent-replay.h new file mode 100644 index 00000000..9e60ff9c --- /dev/null +++ b/include/afl-persistent-replay.h @@ -0,0 +1,131 @@ +#ifndef _HAVE_PERSISTENT_REPLAY_H +#define _HAVE_PERSISTENT_REPLAY_H + +#include <dirent.h> +#include <string.h> +#include <stdio.h> +#include <stdlib.h> +#include <malloc.h> +#include <unistd.h> +#include <sys/stat.h> +#include <fcntl.h> + +#ifndef PATH_MAX + #define PATH_MAX 4096 +#endif + +static unsigned short int is_replay_record; +static unsigned int replay_record; +static unsigned int replay_record_cnt; +static char replay_record_path[PATH_MAX]; +static char *replay_record_dir; +static struct dirent **record_list; + +#ifdef AFL_PERSISTENT_REPLAY_ARGPARSE +static char **record_arg = NULL; +#endif // AFL_PERSISTENT_REPLAY_ARGPARSE + +static int select_files(const struct dirent *dirbuf) { + + char fn[PATH_MAX]; + + if (dirbuf->d_name[0] == '.') { + + return 0; + + } else { + + snprintf(fn, sizeof(fn), "RECORD:%06u", replay_record); + return !!strstr(dirbuf->d_name, fn); + + } + +} + +static int compare_files(const struct dirent **da, const struct dirent **db) { + + unsigned int c1 = 0, c2 = 0; + + sscanf((*da)->d_name, "RECORD:%*u,cnt:%06u", &c1); + sscanf((*db)->d_name, "RECORD:%*u,cnt:%06u", &c2); + + return c1 - c2; + +} + +__attribute__((destructor)) static void __afl_record_replay_destroy(void) { + + for (int i = 0; i < replay_record_cnt; i++) { + + free(record_list[i]); + + } + + free(record_list); + +} + +__attribute__((constructor)) static void __afl_record_replay_init( +#ifdef AFL_PERSISTENT_REPLAY_ARGPARSE + int argc, char **argv +#endif // AFL_PERSISTENT_REPLAY_ARGPARSE +) { + +#ifdef AFL_PERSISTENT_REPLAY_ARGPARSE + char **argp; +#endif // AFL_PERSISTENT_REPLAY_ARGPARSE + + struct stat sb; + + /* caveat: if harness uses @@ and we don't pass it, it will regardless loop + * the number of iterations defined for AFL_LOOP (on the same file)*/ + if (!(is_replay_record = !!getenv("AFL_PERSISTENT_REPLAY"))) { + + // printf("[warning] AFL_PERSISTENT_REPLAY not set.\n"); + return; + + } + + replay_record = atoi(getenv("AFL_PERSISTENT_REPLAY")); + replay_record_dir = getenv("AFL_PERSISTENT_DIR"); + + if (!(stat(replay_record_dir, &sb) == 0 && S_ISDIR(sb.st_mode))) { + + fprintf(stderr, "[error] Can't find the requested record directory!\n"); + is_replay_record = 0; + return; + + } + + replay_record_cnt = scandir(replay_record_dir ? replay_record_dir : "./", + &record_list, select_files, compare_files); + + if (!replay_record_cnt) { + + fprintf(stderr, "[error] Can't find the requested record!\n"); + is_replay_record = 0; + + } + +#ifdef AFL_PERSISTENT_REPLAY_ARGPARSE + argp = argv; + while (*argp) { + + if (!strcmp(*argp, "@@")) { + + record_arg = argp; + *record_arg = replay_record_path; + break; + + } + + ++argp; + + } + +#endif // AFL_PERSISTENT_REPLAY_ARGPARSE + +} + +#endif // _HAVE_PERSISTENT_REPLAY_H + diff --git a/include/afl-prealloc.h b/include/afl-prealloc.h index d19a7b52..bcccb6b4 100644 --- a/include/afl-prealloc.h +++ b/include/afl-prealloc.h @@ -5,12 +5,12 @@ Originally written by Michal Zalewski Now maintained by Marc Heuse <mh@mh-sec.de>, - Heiko Eißfeldt <heiko.eissfeldt@hexco.de>, + Heiko Eissfeldt <heiko.eissfeldt@hexco.de>, Andrea Fioraldi <andreafioraldi@gmail.com>, Dominik Maier <mail@dmnk.co> Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/afl-record-compat.h b/include/afl-record-compat.h new file mode 100644 index 00000000..3e5d60e3 --- /dev/null +++ b/include/afl-record-compat.h @@ -0,0 +1,67 @@ +#ifndef _HAVE_AFL_COMPAT_H +#define _HAVE_AFL_COMPAT_H + +#include <afl-persistent-replay.h> + +#define FUZZ_BUF_SIZE 1024000 + +// extern ssize_t read(int fildes, void *buf, size_t nbyte); + +// extern int __afl_persistent_loop(unsigned int max_cnt); +// extern unsigned char fuzz_buf[]; + +#ifndef __AFL_HAVE_MANUAL_CONTROL + #define __AFL_HAVE_MANUAL_CONTROL +#endif + +#define __AFL_FUZZ_TESTCASE_LEN (read(0, fuzz_buf, FUZZ_BUF_SIZE)) +#define __AFL_FUZZ_TESTCASE_BUF fuzz_buf +#define __AFL_FUZZ_INIT() void sync(void); +#define __AFL_INIT() sync() +#define __AFL_LOOP(x) __afl_persistent_loop(x) + +unsigned char fuzz_buf[FUZZ_BUF_SIZE]; + +int __afl_persistent_loop(unsigned int max_cnt) { + + static unsigned int cycle_cnt = 1; + static unsigned short int inited = 0; + char tcase[PATH_MAX]; + + if (is_replay_record && cycle_cnt) { + + if (!inited) { + + cycle_cnt = replay_record_cnt; + inited = 1; + + } + + snprintf(tcase, PATH_MAX, "%s/%s", + replay_record_dir ? replay_record_dir : "./", + record_list[replay_record_cnt - cycle_cnt]->d_name); + +#ifdef AFL_PERSISTENT_REPLAY_ARGPARSE + if (record_arg) { + + *record_arg = tcase; + + } else + +#endif // AFL_PERSISTENT_REPLAY_ARGPARSE + { + + int fd = open(tcase, O_RDONLY); + dup2(fd, 0); + close(fd); + + } + + } + + return cycle_cnt--; + +} + +#endif // _HAVE_AFL_COMPAT_H + diff --git a/include/alloc-inl.h b/include/alloc-inl.h index 1e9a192b..dad0652f 100644 --- a/include/alloc-inl.h +++ b/include/alloc-inl.h @@ -5,12 +5,12 @@ Originally written by Michal Zalewski Now maintained by Marc Heuse <mh@mh-sec.de>, - Heiko Eißfeldt <heiko.eissfeldt@hexco.de>, + Heiko Eissfeldt <heiko.eissfeldt@hexco.de>, Andrea Fioraldi <andreafioraldi@gmail.com>, Dominik Maier <mail@dmnk.co> Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -322,7 +322,7 @@ static inline void DFL_ck_free(void *mem) { static inline void *DFL_ck_realloc(void *orig, u32 size) { void *ret; - u32 old_size = 0; + u32 old_size = 0; if (!size) { @@ -392,7 +392,7 @@ static inline void *DFL_ck_realloc(void *orig, u32 size) { static inline u8 *DFL_ck_strdup(u8 *str) { void *ret; - u32 size; + u32 size; if (!str) return NULL; @@ -438,14 +438,14 @@ struct TRK_obj { void *ptr; char *file, *func; - u32 line; + u32 line; }; #ifdef AFL_MAIN struct TRK_obj *TRK[ALLOC_BUCKETS]; -u32 TRK_cnt[ALLOC_BUCKETS]; +u32 TRK_cnt[ALLOC_BUCKETS]; #define alloc_report() TRK_report() diff --git a/include/android-ashmem.h b/include/android-ashmem.h index 1bfd3220..065c213b 100644 --- a/include/android-ashmem.h +++ b/include/android-ashmem.h @@ -2,7 +2,9 @@ #ifndef _ANDROID_ASHMEM_H #define _ANDROID_ASHMEM_H - #define _GNU_SOURCE + #ifndef _GNU_SOURCE + #define _GNU_SOURCE + #endif #include <sys/syscall.h> #include <unistd.h> #include <fcntl.h> diff --git a/include/cmplog.h b/include/cmplog.h index 6e16e6b0..a4449a60 100644 --- a/include/cmplog.h +++ b/include/cmplog.h @@ -7,12 +7,12 @@ Forkserver design by Jann Horn <jannhorn@googlemail.com> Now maintained by Marc Heuse <mh@mh-sec.de>, - Heiko Eißfeldt <heiko.eissfeldt@hexco.de>, + Heiko Eissfeldt <heiko.eissfeldt@hexco.de>, Andrea Fioraldi <andreafioraldi@gmail.com>, Dominik Maier <mail@dmnk.co> Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -34,22 +34,19 @@ #define CMP_MAP_W 65536 #define CMP_MAP_H 32 -#define CMP_MAP_RTN_H (CMP_MAP_H / 4) +#define CMP_MAP_RTN_H (CMP_MAP_H / 2) #define SHAPE_BYTES(x) (x + 1) -#define CMP_TYPE_INS 1 -#define CMP_TYPE_RTN 2 +#define CMP_TYPE_INS 0 +#define CMP_TYPE_RTN 1 -struct cmp_header { +struct cmp_header { // 16 bit = 2 bytes - unsigned hits : 24; - unsigned id : 24; - unsigned shape : 5; - unsigned type : 2; - unsigned attribute : 4; - unsigned overflow : 1; - unsigned reserved : 4; + unsigned hits : 6; // up to 63 entries, we have CMP_MAP_H = 32 + unsigned shape : 5; // 31+1 bytes max + unsigned type : 1; // 2: cmp, rtn + unsigned attribute : 4; // 16 for arithmetic comparison types } __attribute__((packed)); @@ -59,14 +56,17 @@ struct cmp_operands { u64 v1; u64 v0_128; u64 v1_128; + u64 unused; + u8 unused1; + u8 unused2; } __attribute__((packed)); struct cmpfn_operands { - u8 v0[31]; + u8 v0[32]; u8 v0_len; - u8 v1[31]; + u8 v1[32]; u8 v1_len; } __attribute__((packed)); diff --git a/include/common.h b/include/common.h index 8d85d201..a78dd60a 100644 --- a/include/common.h +++ b/include/common.h @@ -5,12 +5,12 @@ Originally written by Michal Zalewski Now maintained by Marc Heuse <mh@mh-sec.de>, - Heiko Eißfeldt <heiko.eissfeldt@hexco.de>, + Heiko Eissfeldt <heiko.eissfeldt@hexco.de>, Andrea Fioraldi <andreafioraldi@gmail.com>, Dominik Maier <mail@dmnk.co> Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -115,6 +115,11 @@ u8 *stringify_mem_size(u8 *buf, size_t len, u64 val); u8 *stringify_time_diff(u8 *buf, size_t len, u64 cur_ms, u64 event_ms); +/* Unsafe describe time delta as simple string. + Returns a pointer to buf for convenience. */ + +u8 *u_simplestring_time_diff(u8 *buf, u64 cur_ms, u64 event_ms); + /* Unsafe Describe integer. The buf sizes are not checked. This is unsafe but fast. Will return buf for convenience. */ diff --git a/include/config.h b/include/config.h index 764c29dc..3727dab1 100644 --- a/include/config.h +++ b/include/config.h @@ -5,12 +5,12 @@ Originally written by Michal Zalewski Now maintained by Marc Heuse <mh@mh-sec.de>, - Heiko Eißfeldt <heiko.eissfeldt@hexco.de>, - Andrea Fioraldi <andreafioraldi@gmail.com>, Dominik Maier <mail@dmnk.co> + Andrea Fioraldi <andreafioraldi@gmail.com>, + Heiko Eissfeldt <heiko.eissfeldt@hexco.de>, Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ /* Version string: */ // c = release, a = volatile github dev, e = experimental branch -#define VERSION "++4.07a" +#define VERSION "++4.21a" /****************************************************** * * @@ -43,9 +43,27 @@ Default: 8MB (defined in bytes) */ #define DEFAULT_SHMEM_SIZE (8 * 1024 * 1024) +/* Default time until when no more coverage finds are happening afl-fuzz + switches to exploitation mode. It automatically switches back when new + coverage is found. + Default: 300 (seconds) */ +#define STRATEGY_SWITCH_TIME 1000 + /* Default file permission umode when creating files (default: 0600) */ #define DEFAULT_PERMISSION 0600 +/* SkipDet's global configuration */ + +#define MINIMAL_BLOCK_SIZE 64 +#define SMALL_DET_TIME (60 * 1000 * 1000U) +#define MAXIMUM_INF_EXECS (16 * 1024U) +#define MAXIMUM_QUICK_EFF_EXECS (64 * 1024U) +#define THRESHOLD_DEC_TIME (20 * 60 * 1000U) + +/* Set the Prob of selecting eff_bytes 3 times more than original, + Now disabled */ +#define EFF_HAVOC_RATE 3 + /* CMPLOG/REDQUEEN TUNING * * Here you can modify tuning and solving options for CMPLOG. @@ -54,10 +72,6 @@ * */ -/* if TRANSFORM is enabled with '-l T', this additionally enables base64 - encoding/decoding */ -// #define CMPLOG_SOLVE_TRANSFORM_BASE64 - /* If a redqueen pass finds more than one solution, try to combine them? */ #define CMPLOG_COMBINE @@ -65,10 +79,10 @@ #define CMPLOG_CORPUS_PERCENT 5U /* Number of potential positions from which we decide if cmplog becomes - useless, default 8096 */ + useless, default 12288 */ #define CMPLOG_POSITIONS_MAX (12 * 1024) -/* Maximum allowed fails per CMP value. Default: 128 */ +/* Maximum allowed fails per CMP value. Default: 96 */ #define CMPLOG_FAIL_MAX 96 /* -------------------------------------*/ @@ -81,7 +95,12 @@ will be kept and written to the crash/ directory as RECORD:... files. Note that every crash will be written, not only unique ones! */ -//#define AFL_PERSISTENT_RECORD +// #define AFL_PERSISTENT_RECORD + +/* Adds support in compiler-rt to replay persistent records in @@-style + * harnesses */ + +// #define AFL_PERSISTENT_REPLAY_ARGPARSE /* console output colors: There are three ways to configure its behavior * 1. default: colored outputs fixed on: defined USE_COLOR && defined @@ -118,9 +137,9 @@ // #define _WANT_ORIGINAL_AFL_ALLOC -/* Comment out to disable fancy ANSI boxes and use poor man's 7-bit UI: */ +/* Comment out to disable fancy boxes and use poor man's 7-bit UI: */ -#ifndef ANDROID_DISABLE_FANCY // Fancy boxes are ugly from adb +#ifndef DISABLE_FANCY #define FANCY_BOXES #endif @@ -354,9 +373,10 @@ 65535, /* Overflow unsig 16-bit when incremented */ \ 65536, /* Overflow unsig 16 bit */ \ 100663045, /* Large positive number (endian-agnostic) */ \ + 2139095040, /* float infinite */ \ 2147483647 /* Overflow signed 32-bit when incremented */ -#define INTERESTING_32_LEN 8 +#define INTERESTING_32_LEN 9 /*********************************************************** * * @@ -440,7 +460,15 @@ after changing this - otherwise, SEGVs may ensue. */ #define MAP_SIZE_POW2 16 + +/* Do not change this unless you really know what you are doing. */ + #define MAP_SIZE (1U << MAP_SIZE_POW2) +#if MAP_SIZE <= 2097152 + #define MAP_INITIAL_SIZE (2 << 20) // = 2097152 +#else + #define MAP_INITIAL_SIZE MAP_SIZE +#endif /* Maximum allocator request size (keep well under INT_MAX): */ diff --git a/include/debug.h b/include/debug.h index cd621a72..5496135c 100644 --- a/include/debug.h +++ b/include/debug.h @@ -5,12 +5,12 @@ Originally written by Michal Zalewski Now maintained by Marc Heuse <mh@mh-sec.de>, - Heiko Eißfeldt <heiko.eissfeldt@hexco.de>, + Heiko Eissfeldt <heiko.eissfeldt@hexco.de>, Andrea Fioraldi <andreafioraldi@gmail.com>, Dominik Maier <mail@dmnk.co> Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -116,7 +116,7 @@ * Box drawing sequences * *************************/ -#ifdef FANCY_BOXES +#ifdef FANCY_BOXES_NO_UTF #define SET_G1 "\x1b)0" /* Set G1 for box drawing */ #define RESET_G1 "\x1b)B" /* Reset G1 to ASCII */ @@ -136,22 +136,43 @@ #else - #define SET_G1 "" - #define RESET_G1 "" - #define bSTART "" - #define bSTOP "" - #define bH "-" - #define bV "|" - #define bLT "+" - #define bRT "+" - #define bLB "+" - #define bRB "+" - #define bX "+" - #define bVR "+" - #define bVL "+" - #define bHT "+" - #define bHB "+" - + #ifdef FANCY_BOXES + + #define SET_G1 "" + #define RESET_G1 "" + #define bSTART "" + #define bSTOP "" + #define bH "\u2500" /* Horizontal line */ + #define bV "\u2502" /* Vertical line */ + #define bLT "\u250c" /* Left top corner */ + #define bRT "\u2510" /* Right top corner */ + #define bLB "\u2514" /* Left bottom corner */ + #define bRB "\u2518" /* Right bottom corner */ + #define bX "\u253c" /* Cross */ + #define bVR "\u251c" /* Vertical, branch right */ + #define bVL "\u2524" /* Vertical, branch left */ + #define bHT "\u2534" /* Horizontal, branch top */ + #define bHB "\u252c" /* Horizontal, branch bottom */ + + #else + + #define SET_G1 "" + #define RESET_G1 "" + #define bSTART "" + #define bSTOP "" + #define bH "-" + #define bV "|" + #define bLT "+" + #define bRT "+" + #define bLB "+" + #define bRB "+" + #define bX "+" + #define bVR "+" + #define bVL "+" + #define bHT "+" + #define bHB "+" + + #endif #endif /* ^FANCY_BOXES */ /*********************** diff --git a/include/envs.h b/include/envs.h index edfd06e4..5b516905 100644 --- a/include/envs.h +++ b/include/envs.h @@ -16,240 +16,107 @@ static char *afl_environment_deprecated[] = { static char *afl_environment_variables[] = { - "AFL_ALIGNED_ALLOC", - "AFL_ALLOW_TMP", - "AFL_ANALYZE_HEX", - "AFL_AS", - "AFL_AUTORESUME", - "AFL_AS_FORCE_INSTRUMENT", - "AFL_BENCH_JUST_ONE", - "AFL_BENCH_UNTIL_CRASH", - "AFL_CAL_FAST", - "AFL_CC", - "AFL_CC_COMPILER", - "AFL_CMIN_ALLOW_ANY", - "AFL_CMIN_CRASHES_ONLY", - "AFL_CMPLOG_ONLY_NEW", - "AFL_CODE_END", - "AFL_CODE_START", - "AFL_COMPCOV_BINNAME", - "AFL_COMPCOV_LEVEL", - "AFL_CRASH_EXITCODE", - "AFL_CUSTOM_MUTATOR_LIBRARY", - "AFL_CUSTOM_MUTATOR_ONLY", - "AFL_CUSTOM_INFO_PROGRAM", - "AFL_CUSTOM_INFO_PROGRAM_ARGV", - "AFL_CUSTOM_INFO_PROGRAM_INPUT", - "AFL_CUSTOM_INFO_OUT", - "AFL_CXX", - "AFL_CYCLE_SCHEDULES", - "AFL_DEBUG", - "AFL_DEBUG_CHILD", - "AFL_DEBUG_GDB", - "AFL_DEBUG_UNICORN", - "AFL_DISABLE_TRIM", - "AFL_DISABLE_LLVM_INSTRUMENTATION", - "AFL_DONT_OPTIMIZE", - "AFL_DRIVER_STDERR_DUPLICATE_FILENAME", - "AFL_DUMB_FORKSRV", - "AFL_EARLY_FORKSERVER", - "AFL_ENTRYPOINT", - "AFL_EXIT_WHEN_DONE", - "AFL_EXIT_ON_TIME", - "AFL_EXIT_ON_SEED_ISSUES", - "AFL_FAST_CAL", - "AFL_FORCE_UI", - "AFL_FRIDA_DEBUG_MAPS", - "AFL_FRIDA_DRIVER_NO_HOOK", - "AFL_FRIDA_EXCLUDE_RANGES", - "AFL_FRIDA_INST_CACHE_SIZE", - "AFL_FRIDA_INST_COVERAGE_ABSOLUTE", - "AFL_FRIDA_INST_COVERAGE_FILE", - "AFL_FRIDA_INST_DEBUG_FILE", - "AFL_FRIDA_INST_INSN", - "AFL_FRIDA_INST_JIT", - "AFL_FRIDA_INST_NO_CACHE", - "AFL_FRIDA_INST_NO_DYNAMIC_LOAD", - "AFL_FRIDA_INST_NO_OPTIMIZE", - "AFL_FRIDA_INST_NO_PREFETCH", - "AFL_FRIDA_INST_NO_PREFETCH_BACKPATCH", + "AFL_ALIGNED_ALLOC", "AFL_ALLOW_TMP", "AFL_ANALYZE_HEX", "AFL_AS", + "AFL_AUTORESUME", "AFL_AS_FORCE_INSTRUMENT", "AFL_BENCH_JUST_ONE", + "AFL_BENCH_UNTIL_CRASH", "AFL_CAL_FAST", "AFL_CC", "AFL_CC_COMPILER", + "AFL_CMIN_ALLOW_ANY", "AFL_CMIN_CRASHES_ONLY", "AFL_CMPLOG_ONLY_NEW", + "AFL_CODE_END", "AFL_CODE_START", "AFL_COMPCOV_BINNAME", + "AFL_DUMP_CYCLOMATIC_COMPLEXITY", "AFL_CMPLOG_MAX_LEN", "AFL_COMPCOV_LEVEL", + "AFL_CRASH_EXITCODE", "AFL_CRASHING_SEEDS_AS_NEW_CRASH", + "AFL_CUSTOM_MUTATOR_LIBRARY", "AFL_CUSTOM_MUTATOR_ONLY", + "AFL_CUSTOM_INFO_PROGRAM", "AFL_CUSTOM_INFO_PROGRAM_ARGV", + "AFL_CUSTOM_INFO_PROGRAM_INPUT", "AFL_CUSTOM_INFO_OUT", "AFL_CXX", + "AFL_CYCLE_SCHEDULES", "AFL_DEBUG", "AFL_DEBUG_CHILD", "AFL_DEBUG_GDB", + "AFL_DEBUG_UNICORN", "AFL_DISABLE_REDUNDANT", "AFL_NO_REDUNDANT", + "AFL_DISABLE_TRIM", "AFL_NO_TRIM", "AFL_DISABLE_LLVM_INSTRUMENTATION", + "AFL_DONT_OPTIMIZE", "AFL_DRIVER_STDERR_DUPLICATE_FILENAME", + "AFL_DUMB_FORKSRV", "AFL_EARLY_FORKSERVER", "AFL_ENTRYPOINT", + "AFL_EXIT_WHEN_DONE", "AFL_EXIT_ON_TIME", "AFL_EXIT_ON_SEED_ISSUES", + "AFL_FAST_CAL", "AFL_FINAL_SYNC", "AFL_FORCE_UI", "AFL_FRIDA_DEBUG_MAPS", + "AFL_FRIDA_DRIVER_NO_HOOK", "AFL_FRIDA_EXCLUDE_RANGES", + "AFL_FRIDA_INST_CACHE_SIZE", "AFL_FRIDA_INST_COVERAGE_ABSOLUTE", + "AFL_FRIDA_INST_COVERAGE_FILE", "AFL_FRIDA_INST_DEBUG_FILE", + "AFL_FRIDA_INST_INSN", "AFL_FRIDA_INST_JIT", "AFL_FRIDA_INST_NO_CACHE", + "AFL_FRIDA_INST_NO_DYNAMIC_LOAD", "AFL_FRIDA_INST_NO_OPTIMIZE", + "AFL_FRIDA_INST_NO_PREFETCH", "AFL_FRIDA_INST_NO_PREFETCH_BACKPATCH", "AFL_FRIDA_INST_NO_SUPPRESS" "AFL_FRIDA_INST_RANGES", - "AFL_FRIDA_INST_REGS_FILE", - "AFL_FRIDA_INST_SEED", - "AFL_FRIDA_INST_TRACE", - "AFL_FRIDA_INST_TRACE_UNIQUE", - "AFL_FRIDA_INST_UNSTABLE_COVERAGE_FILE", - "AFL_FRIDA_JS_SCRIPT", - "AFL_FRIDA_OUTPUT_STDOUT", - "AFL_FRIDA_OUTPUT_STDERR", - "AFL_FRIDA_PERSISTENT_ADDR", - "AFL_FRIDA_PERSISTENT_CNT", - "AFL_FRIDA_PERSISTENT_DEBUG", - "AFL_FRIDA_PERSISTENT_HOOK", - "AFL_FRIDA_PERSISTENT_RET", - "AFL_FRIDA_STALKER_ADJACENT_BLOCKS", - "AFL_FRIDA_STALKER_IC_ENTRIES", - "AFL_FRIDA_STALKER_NO_BACKPATCH", - "AFL_FRIDA_STATS_FILE", - "AFL_FRIDA_STATS_INTERVAL", - "AFL_FRIDA_TRACEABLE", + "AFL_FRIDA_INST_REGS_FILE", "AFL_FRIDA_INST_SEED", "AFL_FRIDA_INST_TRACE", + "AFL_FRIDA_INST_TRACE_UNIQUE", "AFL_FRIDA_INST_UNSTABLE_COVERAGE_FILE", + "AFL_FRIDA_JS_SCRIPT", "AFL_FRIDA_OUTPUT_STDOUT", "AFL_FRIDA_OUTPUT_STDERR", + "AFL_FRIDA_PERSISTENT_ADDR", "AFL_FRIDA_PERSISTENT_CNT", + "AFL_FRIDA_PERSISTENT_DEBUG", "AFL_FRIDA_PERSISTENT_HOOK", + "AFL_FRIDA_PERSISTENT_RET", "AFL_FRIDA_STALKER_ADJACENT_BLOCKS", + "AFL_FRIDA_STALKER_IC_ENTRIES", "AFL_FRIDA_STALKER_NO_BACKPATCH", + "AFL_FRIDA_STATS_FILE", "AFL_FRIDA_STATS_INTERVAL", "AFL_FRIDA_TRACEABLE", "AFL_FRIDA_VERBOSE", "AFL_FUZZER_ARGS", // oss-fuzz - "AFL_FUZZER_STATS_UPDATE_INTERVAL", - "AFL_GDB", - "AFL_GCC_ALLOWLIST", - "AFL_GCC_DENYLIST", - "AFL_GCC_BLOCKLIST", - "AFL_GCC_INSTRUMENT_FILE", - "AFL_GCC_OUT_OF_LINE", - "AFL_GCC_SKIP_NEVERZERO", - "AFL_GCJ", - "AFL_HANG_TMOUT", - "AFL_FORKSRV_INIT_TMOUT", - "AFL_HARDEN", - "AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES", - "AFL_IGNORE_PROBLEMS", - "AFL_IGNORE_PROBLEMS_COVERAGE", - "AFL_IGNORE_TIMEOUTS", - "AFL_IGNORE_UNKNOWN_ENVS", - "AFL_IMPORT_FIRST", - "AFL_INPUT_LEN_MIN", - "AFL_INPUT_LEN_MAX", - "AFL_INST_LIBS", - "AFL_INST_RATIO", - "AFL_KEEP_TIMEOUTS", - "AFL_KILL_SIGNAL", - "AFL_FORK_SERVER_KILL_SIGNAL", - "AFL_KEEP_TRACES", - "AFL_KEEP_ASSEMBLY", - "AFL_LD_HARD_FAIL", - "AFL_LD_LIMIT_MB", - "AFL_LD_NO_CALLOC_OVER", - "AFL_LD_PASSTHROUGH", - "AFL_REAL_LD", - "AFL_LD_PRELOAD", - "AFL_LD_VERBOSE", - "AFL_LLVM_ALLOWLIST", - "AFL_LLVM_DENYLIST", - "AFL_LLVM_BLOCKLIST", - "AFL_CMPLOG", - "AFL_LLVM_CMPLOG", - "AFL_GCC_CMPLOG", - "AFL_LLVM_INSTRIM", - "AFL_LLVM_CALLER", - "AFL_LLVM_CTX", - "AFL_LLVM_CTX_K", - "AFL_LLVM_DICT2FILE", - "AFL_LLVM_DICT2FILE_NO_MAIN", - "AFL_LLVM_DOCUMENT_IDS", - "AFL_LLVM_INSTRIM_LOOPHEAD", - "AFL_LLVM_INSTRUMENT", - "AFL_LLVM_LTO_AUTODICTIONARY", - "AFL_LLVM_AUTODICTIONARY", + "AFL_FUZZER_STATS_UPDATE_INTERVAL", "AFL_GDB", "AFL_GCC_ALLOWLIST", + "AFL_GCC_DENYLIST", "AFL_GCC_BLOCKLIST", "AFL_GCC_INSTRUMENT_FILE", + "AFL_GCC_OUT_OF_LINE", "AFL_GCC_SKIP_NEVERZERO", "AFL_GCJ", + "AFL_HANG_TMOUT", "AFL_FORKSRV_INIT_TMOUT", "AFL_HARDEN", + "AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES", "AFL_IGNORE_PROBLEMS", + "AFL_IGNORE_PROBLEMS_COVERAGE", "AFL_IGNORE_SEED_PROBLEMS", + "AFL_IGNORE_TIMEOUTS", "AFL_IGNORE_UNKNOWN_ENVS", "AFL_IMPORT_FIRST", + "AFL_INPUT_LEN_MIN", "AFL_INPUT_LEN_MAX", "AFL_INST_LIBS", "AFL_INST_RATIO", + "AFL_KEEP_TIMEOUTS", "AFL_KILL_SIGNAL", "AFL_FORK_SERVER_KILL_SIGNAL", + "AFL_KEEP_TRACES", "AFL_KEEP_ASSEMBLY", "AFL_LD_HARD_FAIL", + "AFL_LD_LIMIT_MB", "AFL_LD_NO_CALLOC_OVER", "AFL_LD_PASSTHROUGH", + "AFL_REAL_LD", "AFL_LD_PRELOAD", "AFL_LD_VERBOSE", "AFL_LLVM_ALLOWLIST", + "AFL_LLVM_DENYLIST", "AFL_LLVM_BLOCKLIST", "AFL_CMPLOG", "AFL_LLVM_CMPLOG", + "AFL_GCC_CMPLOG", "AFL_LLVM_INSTRIM", "AFL_LLVM_CALLER", "AFL_LLVM_CTX", + "AFL_LLVM_LTO_CALLER", "AFL_LLVM_LTO_CTX", "AFL_LLVM_LTO_CALLER_DEPTH", + "AFL_LLVM_LTO_CTX_DEPTH", "AFL_LLVM_CALLER_DEPTH", "AFL_LLVM_CTX_DEPTH", + "AFL_LLVM_CTX_K", "AFL_LLVM_DICT2FILE", "AFL_LLVM_DICT2FILE_NO_MAIN", + "AFL_LLVM_DOCUMENT_IDS", "AFL_LLVM_INSTRIM_LOOPHEAD", "AFL_LLVM_INSTRUMENT", + "AFL_LLVM_LTO_AUTODICTIONARY", "AFL_LLVM_AUTODICTIONARY", "AFL_LLVM_SKIPSINGLEBLOCK", - "AFL_LLVM_INSTRIM_SKIPSINGLEBLOCK", - "AFL_LLVM_LAF_SPLIT_COMPARES", - "AFL_LLVM_LAF_SPLIT_COMPARES_BITW", - "AFL_LLVM_LAF_SPLIT_FLOATS", - "AFL_LLVM_LAF_SPLIT_SWITCHES", - "AFL_LLVM_LAF_ALL", - "AFL_LLVM_LAF_TRANSFORM_COMPARES", - "AFL_LLVM_MAP_ADDR", - "AFL_LLVM_MAP_DYNAMIC", - "AFL_LLVM_NGRAM_SIZE", - "AFL_NGRAM_SIZE", - "AFL_LLVM_NOT_ZERO", - "AFL_LLVM_INSTRUMENT_FILE", - "AFL_LLVM_THREADSAFE_INST", - "AFL_LLVM_SKIP_NEVERZERO", - "AFL_NO_AFFINITY", - "AFL_TRY_AFFINITY", - "AFL_LLVM_LTO_DONTWRITEID", + // Marker: ADD_TO_INJECTIONS + "AFL_LLVM_INJECTIONS_ALL", "AFL_LLVM_INJECTIONS_SQL", + "AFL_LLVM_INJECTIONS_LDAP", "AFL_LLVM_INJECTIONS_XSS", + "AFL_LLVM_INSTRIM_SKIPSINGLEBLOCK", "AFL_LLVM_LAF_SPLIT_COMPARES", + "AFL_LLVM_LAF_SPLIT_COMPARES_BITW", "AFL_LLVM_LAF_SPLIT_FLOATS", + "AFL_LLVM_LAF_SPLIT_SWITCHES", "AFL_LLVM_LAF_ALL", + "AFL_LLVM_LAF_TRANSFORM_COMPARES", "AFL_LLVM_MAP_ADDR", + "AFL_LLVM_MAP_DYNAMIC", "AFL_LLVM_NGRAM_SIZE", "AFL_NGRAM_SIZE", + "AFL_LLVM_NO_RPATH", "AFL_LLVM_NOT_ZERO", "AFL_LLVM_INSTRUMENT_FILE", + "AFL_LLVM_THREADSAFE_INST", "AFL_LLVM_SKIP_NEVERZERO", "AFL_NO_AFFINITY", + "AFL_TRY_AFFINITY", "AFL_LLVM_LTO_DONTWRITEID", "AFL_LLVM_LTO_SKIPINIT" "AFL_LLVM_LTO_STARTID", - "AFL_NO_ARITH", - "AFL_NO_AUTODICT", - "AFL_NO_BUILTIN", + "AFL_FUZZER_LOOPCOUNT", "AFL_NO_ARITH", "AFL_NO_AUTODICT", "AFL_NO_BUILTIN", #if defined USE_COLOR && !defined ALWAYS_COLORED - "AFL_NO_COLOR", - "AFL_NO_COLOUR", + "AFL_NO_COLOR", "AFL_NO_COLOUR", #endif "AFL_NO_CPU_RED", - "AFL_NO_CRASH_README", - "AFL_NO_FORKSRV", - "AFL_NO_UI", - "AFL_NO_PYTHON", - "AFL_NO_STARTUP_CALIBRATION", - "AFL_NO_WARN_INSTABILITY", - "AFL_UNTRACER_FILE", - "AFL_LLVM_USE_TRACE_PC", - "AFL_MAP_SIZE", - "AFL_MAPSIZE", + "AFL_NO_CFG_FUZZING", // afl.rs rust crate option + "AFL_NO_CRASH_README", "AFL_NO_FORKSRV", "AFL_NO_UI", "AFL_NO_PYTHON", + "AFL_NO_STARTUP_CALIBRATION", "AFL_NO_WARN_INSTABILITY", + "AFL_UNTRACER_FILE", "AFL_LLVM_USE_TRACE_PC", "AFL_MAP_SIZE", "AFL_MAPSIZE", "AFL_MAX_DET_EXTRAS", "AFL_NO_X86", // not really an env but we dont want to warn on it - "AFL_NOOPT", - "AFL_PASSTHROUGH", - "AFL_PATH", - "AFL_PERFORMANCE_FILE", - "AFL_PERSISTENT_RECORD", - "AFL_POST_PROCESS_KEEP_ORIGINAL", - "AFL_PRELOAD", - "AFL_TARGET_ENV", - "AFL_PYTHON_MODULE", - "AFL_QEMU_CUSTOM_BIN", - "AFL_QEMU_COMPCOV", - "AFL_QEMU_COMPCOV_DEBUG", - "AFL_QEMU_DEBUG_MAPS", - "AFL_QEMU_DISABLE_CACHE", - "AFL_QEMU_DRIVER_NO_HOOK", - "AFL_QEMU_FORCE_DFL", - "AFL_QEMU_PERSISTENT_ADDR", - "AFL_QEMU_PERSISTENT_CNT", - "AFL_QEMU_PERSISTENT_GPR", - "AFL_QEMU_PERSISTENT_HOOK", - "AFL_QEMU_PERSISTENT_MEM", - "AFL_QEMU_PERSISTENT_RET", - "AFL_QEMU_PERSISTENT_RETADDR_OFFSET", - "AFL_QEMU_PERSISTENT_EXITS", - "AFL_QEMU_INST_RANGES", - "AFL_QEMU_EXCLUDE_RANGES", - "AFL_QEMU_SNAPSHOT", - "AFL_QEMU_TRACK_UNSTABLE", - "AFL_QUIET", - "AFL_RANDOM_ALLOC_CANARY", - "AFL_REAL_PATH", - "AFL_SHUFFLE_QUEUE", - "AFL_SKIP_BIN_CHECK", - "AFL_SKIP_CPUFREQ", - "AFL_SKIP_CRASHES", - "AFL_SKIP_OSSFUZZ", - "AFL_STATSD", - "AFL_STATSD_HOST", - "AFL_STATSD_PORT", - "AFL_STATSD_TAGS_FLAVOR", - "AFL_SYNC_TIME", - "AFL_TESTCACHE_SIZE", - "AFL_TESTCACHE_ENTRIES", - "AFL_TMIN_EXACT", - "AFL_TMPDIR", - "AFL_TOKEN_FILE", - "AFL_TRACE_PC", - "AFL_USE_ASAN", - "AFL_USE_MSAN", - "AFL_USE_TRACE_PC", - "AFL_USE_UBSAN", - "AFL_USE_TSAN", - "AFL_USE_CFISAN", - "AFL_USE_LSAN", - "AFL_WINE_PATH", - "AFL_NO_SNAPSHOT", - "AFL_EXPAND_HAVOC_NOW", - "AFL_USE_FASAN", - "AFL_USE_QASAN", - "AFL_PRINT_FILENAMES", - "AFL_PIZZA_MODE", - NULL + "AFL_NOOPT", "AFL_NYX_AUX_SIZE", "AFL_NYX_DISABLE_SNAPSHOT_MODE", + "AFL_NYX_HANDLE_INVALID_WRITE", "AFL_NYX_LOG", "AFL_NYX_REUSE_SNAPSHOT", + "AFL_PASSTHROUGH", "AFL_PATH", "AFL_PERFORMANCE_FILE", + "AFL_PERSISTENT_RECORD", "AFL_POST_PROCESS_KEEP_ORIGINAL", "AFL_PRELOAD", + "AFL_TARGET_ENV", "AFL_PYTHON_MODULE", "AFL_QEMU_CUSTOM_BIN", + "AFL_QEMU_COMPCOV", "AFL_QEMU_COMPCOV_DEBUG", "AFL_QEMU_DEBUG_MAPS", + "AFL_QEMU_DISABLE_CACHE", "AFL_QEMU_DRIVER_NO_HOOK", "AFL_QEMU_FORCE_DFL", + "AFL_QEMU_PERSISTENT_ADDR", "AFL_QEMU_PERSISTENT_CNT", + "AFL_QEMU_PERSISTENT_GPR", "AFL_QEMU_PERSISTENT_HOOK", + "AFL_QEMU_PERSISTENT_MEM", "AFL_QEMU_PERSISTENT_RET", + "AFL_QEMU_PERSISTENT_RETADDR_OFFSET", "AFL_QEMU_PERSISTENT_EXITS", + "AFL_QEMU_INST_RANGES", "AFL_QEMU_EXCLUDE_RANGES", "AFL_QEMU_SNAPSHOT", + "AFL_QEMU_TRACK_UNSTABLE", "AFL_QUIET", "AFL_RANDOM_ALLOC_CANARY", + "AFL_REAL_PATH", "AFL_SHA1_FILENAMES", "AFL_SHUFFLE_QUEUE", + "AFL_SKIP_BIN_CHECK", "AFL_SKIP_CPUFREQ", "AFL_SKIP_CRASHES", + "AFL_SKIP_OSSFUZZ", "AFL_STATSD", "AFL_STATSD_HOST", "AFL_STATSD_PORT", + "AFL_STATSD_TAGS_FLAVOR", "AFL_SYNC_TIME", "AFL_TESTCACHE_SIZE", + "AFL_TESTCACHE_ENTRIES", "AFL_TMIN_EXACT", "AFL_TMPDIR", "AFL_TOKEN_FILE", + "AFL_TRACE_PC", "AFL_USE_ASAN", "AFL_USE_MSAN", "AFL_USE_TRACE_PC", + "AFL_USE_UBSAN", "AFL_USE_TSAN", "AFL_USE_CFISAN", "AFL_USE_LSAN", + "AFL_WINE_PATH", "AFL_NO_SNAPSHOT", "AFL_EXPAND_HAVOC_NOW", "AFL_USE_FASAN", + "AFL_USE_QASAN", "AFL_PRINT_FILENAMES", "AFL_PIZZA_MODE", NULL }; diff --git a/include/forkserver.h b/include/forkserver.h index f5069ce2..593e34a2 100644 --- a/include/forkserver.h +++ b/include/forkserver.h @@ -7,12 +7,12 @@ Forkserver design by Jann Horn <jannhorn@googlemail.com> Now maintained by Marc Heuse <mh@mh-sec.de>, - Heiko Eißfeldt <heiko.eissfeldt@hexco.de>, + Heiko Eissfeldt <heiko.eissfeldt@hexco.de>, Andrea Fioraldi <andreafioraldi@gmail.com>, Dominik Maier <mail@dmnk.co>> Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -86,6 +86,8 @@ typedef struct { uint32_t size); bool (*nyx_remove_work_dir)(const char *workdir); + bool (*nyx_config_set_aux_buffer_size)(void *config, + uint32_t aux_buffer_size); } nyx_plugin_handler_t; @@ -124,7 +126,8 @@ typedef struct afl_forkserver { u8 *out_file, /* File to fuzz, if any */ *target_path; /* Path of the target */ - FILE *plot_file; /* Gnuplot output file */ + FILE *plot_file, /* Gnuplot output file */ + *det_plot_file; /* Note: last_run_timed_out is u32 to send it to the child as 4 byte array */ u32 last_run_timed_out; /* Traced process timed out? */ @@ -185,6 +188,8 @@ typedef struct afl_forkserver { u8 persistent_mode; + u32 max_length; + #ifdef __linux__ nyx_plugin_handler_t *nyx_handlers; char *out_dir_path; /* path to the output directory */ @@ -195,8 +200,10 @@ typedef struct afl_forkserver { u32 nyx_id; /* nyx runner id (0 -> master) */ u32 nyx_bind_cpu_id; /* nyx runner cpu id */ char *nyx_aux_string; + u32 nyx_aux_string_len; bool nyx_use_tmp_workdir; char *nyx_tmp_workdir_path; + s32 nyx_log_fd; #endif } afl_forkserver_t; diff --git a/include/hash.h b/include/hash.h index 0243c5b7..5d56a108 100644 --- a/include/hash.h +++ b/include/hash.h @@ -15,7 +15,7 @@ Other code written by Michal Zalewski Copyright 2016 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/list.h b/include/list.h index 283bf035..bec9abbc 100644 --- a/include/list.h +++ b/include/list.h @@ -5,12 +5,12 @@ Originally written by Michal Zalewski Now maintained by Marc Heuse <mh@mh-sec.de>, - Heiko Eißfeldt <heiko.eissfeldt@hexco.de>, + Heiko Eissfeldt <heiko.eissfeldt@hexco.de>, Andrea Fioraldi <andreafioraldi@gmail.com>, Dominik Maier <mail@dmnk.co> Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/sharedmem.h b/include/sharedmem.h index d32bd845..036fa560 100644 --- a/include/sharedmem.h +++ b/include/sharedmem.h @@ -7,12 +7,12 @@ Forkserver design by Jann Horn <jannhorn@googlemail.com> Now maintained by Marc Heuse <mh@mh-sec.de>, - Heiko Eißfeldt <heiko.eissfeldt@hexco.de>, + Heiko Eissfeldt <heiko.eissfeldt@hexco.de>, Andrea Fioraldi <andreafioraldi@gmail.com>, Dominik Maier <mail@dmnk.co> Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/snapshot-inl.h b/include/snapshot-inl.h index 3864e473..e577b013 100644 --- a/include/snapshot-inl.h +++ b/include/snapshot-inl.h @@ -7,12 +7,12 @@ Forkserver design by Jann Horn <jannhorn@googlemail.com> Now maintained by Marc Heuse <mh@mh-sec.de>, - Heiko Eißfeldt <heiko.eissfeldt@hexco.de>, + Heiko Eissfeldt <heiko.eissfeldt@hexco.de>, Andrea Fioraldi <andreafioraldi@gmail.com>, Dominik Maier <mail@dmnk.co> Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/include/t1ha.h b/include/t1ha.h new file mode 100644 index 00000000..1af29395 --- /dev/null +++ b/include/t1ha.h @@ -0,0 +1,738 @@ +/* + * Copyright (c) 2016-2020 Positive Technologies, https://www.ptsecurity.com, + * Fast Positive Hash. + * + * Portions Copyright (c) 2010-2020 Leonid Yuriev <leo@yuriev.ru>, + * The 1Hippeus project (t1h). + * + * This software is provided 'as-is', without any express or implied + * warranty. In no event will the authors be held liable for any damages + * arising from the use of this software. + * + * Permission is granted to anyone to use this software for any purpose, + * including commercial applications, and to alter it and redistribute it + * freely, subject to the following restrictions: + * + * 1. The origin of this software must not be misrepresented; you must not + * claim that you wrote the original software. If you use this software + * in a product, an acknowledgement in the product documentation would be + * appreciated but is not required. + * 2. Altered source versions must be plainly marked as such, and must not be + * misrepresented as being the original software. + * 3. This notice may not be removed or altered from any source distribution. + */ + +/* + * t1ha = { Fast Positive Hash, aka "Позитивный Хэш" } + * by [Positive Technologies](https://www.ptsecurity.ru) + * + * Briefly, it is a 64-bit Hash Function: + * 1. Created for 64-bit little-endian platforms, in predominantly for x86_64, + * but portable and without penalties it can run on any 64-bit CPU. + * 2. In most cases up to 15% faster than City64, xxHash, mum-hash, metro-hash + * and all others portable hash-functions (which do not use specific + * hardware tricks). + * 3. Not suitable for cryptography. + * + * The Future will (be) Positive. Всё будет хорошо. + * + * ACKNOWLEDGEMENT: + * The t1ha was originally developed by Leonid Yuriev (Леонид Юрьев) + * for The 1Hippeus project - zerocopy messaging in the spirit of Sparta! + */ + +#pragma once + +/***************************************************************************** + * + * PLEASE PAY ATTENTION TO THE FOLLOWING NOTES + * about macros definitions which controls t1ha behaviour and/or performance. + * + * + * 1) T1HA_SYS_UNALIGNED_ACCESS = Defines the system/platform/CPU/architecture + * abilities for unaligned data access. + * + * By default, when the T1HA_SYS_UNALIGNED_ACCESS not defined, + * it will defined on the basis hardcoded knowledge about of capabilities + * of most common CPU architectures. But you could override this + * default behavior when build t1ha library itself: + * + * // To disable unaligned access at all. + * #define T1HA_SYS_UNALIGNED_ACCESS 0 + * + * // To enable unaligned access, but indicate that it significantly slow. + * #define T1HA_SYS_UNALIGNED_ACCESS 1 + * + * // To enable unaligned access, and indicate that it effecient. + * #define T1HA_SYS_UNALIGNED_ACCESS 2 + * + * + * 2) T1HA_USE_FAST_ONESHOT_READ = Controls the data reads at the end of buffer. + * + * When defined to non-zero, t1ha will use 'one shot' method for reading + * up to 8 bytes at the end of data. In this case just the one 64-bit read + * will be performed even when the available less than 8 bytes. + * + * This is little bit faster that switching by length of data tail. + * Unfortunately this will triggering a false-positive alarms from Valgrind, + * AddressSanitizer and other similar tool. + * + * By default, t1ha defines it to 1, but you could override this + * default behavior when build t1ha library itself: + * + * // For little bit faster and small code. + * #define T1HA_USE_FAST_ONESHOT_READ 1 + * + * // For calmness if doubt. + * #define T1HA_USE_FAST_ONESHOT_READ 0 + * + * + * 3) T1HA0_RUNTIME_SELECT = Controls choice fastest function in runtime. + * + * t1ha library offers the t1ha0() function as the fastest for current CPU. + * But actual CPU's features/capabilities and may be significantly different, + * especially on x86 platform. Therefore, internally, t1ha0() may require + * dynamic dispatching for choice best implementation. + * + * By default, t1ha enables such runtime choice and (may be) corresponding + * indirect calls if it reasonable, but you could override this default + * behavior when build t1ha library itself: + * + * // To enable runtime choice of fastest implementation. + * #define T1HA0_RUNTIME_SELECT 1 + * + * // To disable runtime choice of fastest implementation. + * #define T1HA0_RUNTIME_SELECT 0 + * + * When T1HA0_RUNTIME_SELECT is nonzero the t1ha0_resolve() function could + * be used to get actual t1ha0() implementation address at runtime. This is + * useful for two cases: + * - calling by local pointer-to-function usually is little + * bit faster (less overhead) than via a PLT thru the DSO boundary. + * - GNU Indirect functions (see below) don't supported by environment + * and calling by t1ha0_funcptr is not available and/or expensive. + * + * 4) T1HA_USE_INDIRECT_FUNCTIONS = Controls usage of GNU Indirect functions. + * + * In continue of T1HA0_RUNTIME_SELECT the T1HA_USE_INDIRECT_FUNCTIONS + * controls usage of ELF indirect functions feature. In general, when + * available, this reduces overhead of indirect function's calls though + * a DSO-bundary (https://sourceware.org/glibc/wiki/GNU_IFUNC). + * + * By default, t1ha engage GNU Indirect functions when it available + * and useful, but you could override this default behavior when build + * t1ha library itself: + * + * // To enable use of GNU ELF Indirect functions. + * #define T1HA_USE_INDIRECT_FUNCTIONS 1 + * + * // To disable use of GNU ELF Indirect functions. This may be useful + * // if the actual toolchain or the system's loader don't support ones. + * #define T1HA_USE_INDIRECT_FUNCTIONS 0 + * + * 5) T1HA0_AESNI_AVAILABLE = Controls AES-NI detection and dispatching on x86. + * + * In continue of T1HA0_RUNTIME_SELECT the T1HA0_AESNI_AVAILABLE controls + * detection and usage of AES-NI CPU's feature. On the other hand, this + * requires compiling parts of t1ha library with certain properly options, + * and could be difficult or inconvenient in some cases. + * + * By default, t1ha engade AES-NI for t1ha0() on the x86 platform, but + * you could override this default behavior when build t1ha library itself: + * + * // To disable detection and usage of AES-NI instructions for t1ha0(). + * // This may be useful when you unable to build t1ha library properly + * // or known that AES-NI will be unavailable at the deploy. + * #define T1HA0_AESNI_AVAILABLE 0 + * + * // To force detection and usage of AES-NI instructions for t1ha0(), + * // but I don't known reasons to anybody would need this. + * #define T1HA0_AESNI_AVAILABLE 1 + * + * 6) T1HA0_DISABLED, T1HA1_DISABLED, T1HA2_DISABLED = Controls availability of + * t1ha functions. + * + * In some cases could be useful to import/use only few of t1ha functions + * or just the one. So, this definitions allows disable corresponding parts + * of t1ha library. + * + * // To disable t1ha0(), t1ha0_32le(), t1ha0_32be() and all AES-NI. + * #define T1HA0_DISABLED + * + * // To disable t1ha1_le() and t1ha1_be(). + * #define T1HA1_DISABLED + * + * // To disable t1ha2_atonce(), t1ha2_atonce128() and so on. + * #define T1HA2_DISABLED + * + *****************************************************************************/ + +#define T1HA_VERSION_MAJOR 2 +#define T1HA_VERSION_MINOR 1 +#define T1HA_VERSION_RELEASE 1 + +#ifndef __has_attribute + #define __has_attribute(x) (0) +#endif + +#ifndef __has_include + #define __has_include(x) (0) +#endif + +#ifndef __GNUC_PREREQ + #if defined(__GNUC__) && defined(__GNUC_MINOR__) + #define __GNUC_PREREQ(maj, min) \ + ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min)) + #else + #define __GNUC_PREREQ(maj, min) 0 + #endif +#endif /* __GNUC_PREREQ */ + +#ifndef __CLANG_PREREQ + #ifdef __clang__ + #define __CLANG_PREREQ(maj, min) \ + ((__clang_major__ << 16) + __clang_minor__ >= ((maj) << 16) + (min)) + #else + #define __CLANG_PREREQ(maj, min) (0) + #endif +#endif /* __CLANG_PREREQ */ + +#ifndef __LCC_PREREQ + #ifdef __LCC__ + #define __LCC_PREREQ(maj, min) \ + ((__LCC__ << 16) + __LCC_MINOR__ >= ((maj) << 16) + (min)) + #else + #define __LCC_PREREQ(maj, min) (0) + #endif +#endif /* __LCC_PREREQ */ + +/*****************************************************************************/ + +#ifdef _MSC_VER + /* Avoid '16' bytes padding added after data member 't1ha_context::total' + * and other warnings from std-headers if warning-level > 3. */ + #pragma warning(push, 3) +#endif + +#if defined(__cplusplus) && __cplusplus >= 201103L + #include <climits> + #include <cstddef> + #include <cstdint> +#else + #include <limits.h> + #include <stddef.h> + #include <stdint.h> +#endif + +/*****************************************************************************/ + +#if defined(i386) || defined(__386) || defined(__i386) || defined(__i386__) || \ + defined(i486) || defined(__i486) || defined(__i486__) || \ + defined(i586) | defined(__i586) || defined(__i586__) || defined(i686) || \ + defined(__i686) || defined(__i686__) || defined(_M_IX86) || \ + defined(_X86_) || defined(__THW_INTEL__) || defined(__I86__) || \ + defined(__INTEL__) || defined(__x86_64) || defined(__x86_64__) || \ + defined(__amd64__) || defined(__amd64) || defined(_M_X64) || \ + defined(_M_AMD64) || defined(__IA32__) || defined(__INTEL__) + #ifndef __ia32__ + /* LY: define neutral __ia32__ for x86 and x86-64 archs */ + #define __ia32__ 1 + #endif /* __ia32__ */ + #if !defined(__amd64__) && (defined(__x86_64) || defined(__x86_64__) || \ + defined(__amd64) || defined(_M_X64)) + /* LY: define trusty __amd64__ for all AMD64/x86-64 arch */ + #define __amd64__ 1 + #endif /* __amd64__ */ +#endif /* all x86 */ + +#if !defined(__BYTE_ORDER__) || !defined(__ORDER_LITTLE_ENDIAN__) || \ + !defined(__ORDER_BIG_ENDIAN__) + +/* *INDENT-OFF* */ +/* clang-format off */ + +#if defined(__GLIBC__) || defined(__GNU_LIBRARY__) || defined(__ANDROID__) || \ + defined(HAVE_ENDIAN_H) || __has_include(<endian.h>) +#include <endian.h> +#elif defined(__APPLE__) || defined(__MACH__) || defined(__OpenBSD__) || \ + defined(HAVE_MACHINE_ENDIAN_H) || __has_include(<machine/endian.h>) +#include <machine/endian.h> +#elif defined(HAVE_SYS_ISA_DEFS_H) || __has_include(<sys/isa_defs.h>) +#include <sys/isa_defs.h> +#elif (defined(HAVE_SYS_TYPES_H) && defined(HAVE_SYS_ENDIAN_H)) || \ + (__has_include(<sys/types.h>) && __has_include(<sys/endian.h>)) +#include <sys/endian.h> +#include <sys/types.h> +#elif defined(__bsdi__) || defined(__DragonFly__) || defined(__FreeBSD__) || \ + defined(__NETBSD__) || defined(__NetBSD__) || \ + defined(HAVE_SYS_PARAM_H) || __has_include(<sys/param.h>) +#include <sys/param.h> +#endif /* OS */ + +/* *INDENT-ON* */ +/* clang-format on */ + + #if defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN) + #define __ORDER_LITTLE_ENDIAN__ __LITTLE_ENDIAN + #define __ORDER_BIG_ENDIAN__ __BIG_ENDIAN + #define __BYTE_ORDER__ __BYTE_ORDER + #elif defined(_BYTE_ORDER) && defined(_LITTLE_ENDIAN) && defined(_BIG_ENDIAN) + #define __ORDER_LITTLE_ENDIAN__ _LITTLE_ENDIAN + #define __ORDER_BIG_ENDIAN__ _BIG_ENDIAN + #define __BYTE_ORDER__ _BYTE_ORDER + #else + #define __ORDER_LITTLE_ENDIAN__ 1234 + #define __ORDER_BIG_ENDIAN__ 4321 + + #if defined(__LITTLE_ENDIAN__) || \ + (defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN)) || \ + defined(__ARMEL__) || defined(__THUMBEL__) || \ + defined(__AARCH64EL__) || defined(__MIPSEL__) || defined(_MIPSEL) || \ + defined(__MIPSEL) || defined(_M_ARM) || defined(_M_ARM64) || \ + defined(__e2k__) || defined(__elbrus_4c__) || \ + defined(__elbrus_8c__) || defined(__bfin__) || defined(__BFIN__) || \ + defined(__ia64__) || defined(_IA64) || defined(__IA64__) || \ + defined(__ia64) || defined(_M_IA64) || defined(__itanium__) || \ + defined(__ia32__) || defined(__CYGWIN__) || defined(_WIN64) || \ + defined(_WIN32) || defined(__TOS_WIN__) || defined(__WINDOWS__) + #define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__ + + #elif defined(__BIG_ENDIAN__) || \ + (defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN)) || \ + defined(__ARMEB__) || defined(__THUMBEB__) || \ + defined(__AARCH64EB__) || defined(__MIPSEB__) || defined(_MIPSEB) || \ + defined(__MIPSEB) || defined(__m68k__) || defined(M68000) || \ + defined(__hppa__) || defined(__hppa) || defined(__HPPA__) || \ + defined(__sparc__) || defined(__sparc) || defined(__370__) || \ + defined(__THW_370__) || defined(__s390__) || defined(__s390x__) || \ + defined(__SYSC_ZARCH__) + #define __BYTE_ORDER__ __ORDER_BIG_ENDIAN__ + + #else + #error __BYTE_ORDER__ should be defined. + #endif /* Arch */ + + #endif +#endif /* __BYTE_ORDER__ || __ORDER_LITTLE_ENDIAN__ || __ORDER_BIG_ENDIAN__ */ + +/*****************************************************************************/ + +#ifndef __dll_export + #if defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__) + #if defined(__GNUC__) || __has_attribute(dllexport) + #define __dll_export __attribute__((dllexport)) + #else + #define __dll_export __declspec(dllexport) + #endif + #elif defined(__GNUC__) || __has_attribute(__visibility__) + #define __dll_export __attribute__((__visibility__("default"))) + #else + #define __dll_export + #endif +#endif /* __dll_export */ + +#ifndef __dll_import + #if defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__) + #if defined(__GNUC__) || __has_attribute(dllimport) + #define __dll_import __attribute__((dllimport)) + #else + #define __dll_import __declspec(dllimport) + #endif + #elif defined(__GNUC__) || __has_attribute(__visibility__) + #define __dll_import __attribute__((__visibility__("default"))) + #else + #define __dll_import + #endif +#endif /* __dll_import */ + +#ifndef __force_inline + #ifdef _MSC_VER + #define __force_inline __forceinline + #elif __GNUC_PREREQ(3, 2) || __has_attribute(__always_inline__) + #define __force_inline __inline __attribute__((__always_inline__)) + #else + #define __force_inline __inline + #endif +#endif /* __force_inline */ + +#ifndef T1HA_API + #if defined(t1ha_EXPORTS) + #define T1HA_API __dll_export + #elif defined(t1ha_IMPORTS) + #define T1HA_API __dll_import + #else + #define T1HA_API + #endif +#endif /* T1HA_API */ + +#if defined(_MSC_VER) && defined(__ia32__) + #define T1HA_ALIGN_PREFIX __declspec(align(32)) /* required only for SIMD */ +#else + #define T1HA_ALIGN_PREFIX +#endif /* _MSC_VER */ + +#if defined(__GNUC__) && defined(__ia32__) + #define T1HA_ALIGN_SUFFIX \ + __attribute__((__aligned__(32))) /* required only for SIMD */ +#else + #define T1HA_ALIGN_SUFFIX +#endif /* GCC x86 */ + +#ifndef T1HA_USE_INDIRECT_FUNCTIONS + /* GNU ELF indirect functions usage control. For more info please see + * https://en.wikipedia.org/wiki/Executable_and_Linkable_Format + * and https://sourceware.org/glibc/wiki/GNU_IFUNC */ + #if defined(__ELF__) && defined(__amd64__) && \ + (__has_attribute(__ifunc__) || \ + (!defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && \ + !defined(__SANITIZE_ADDRESS__) && !defined(__SSP_ALL__))) + /* Enable gnu_indirect_function by default if : + * - ELF AND x86_64 + * - attribute(__ifunc__) is available OR + * GCC >= 4 WITHOUT -fsanitize=address NOR -fstack-protector-all */ + #define T1HA_USE_INDIRECT_FUNCTIONS 1 + #else + #define T1HA_USE_INDIRECT_FUNCTIONS 0 + #endif +#endif /* T1HA_USE_INDIRECT_FUNCTIONS */ + +#if __GNUC_PREREQ(4, 0) + #pragma GCC visibility push(hidden) +#endif /* __GNUC_PREREQ(4,0) */ + +#ifdef __cplusplus +extern "C" { + +#endif + +typedef union T1HA_ALIGN_PREFIX t1ha_state256 { + + uint8_t bytes[32]; + uint32_t u32[8]; + uint64_t u64[4]; + struct { + + uint64_t a, b, c, d; + + } n; + +} t1ha_state256_t T1HA_ALIGN_SUFFIX; + +typedef struct t1ha_context { + + t1ha_state256_t state; + t1ha_state256_t buffer; + size_t partial; + uint64_t total; + +} t1ha_context_t; + +#ifdef _MSC_VER + #pragma warning(pop) +#endif + +/****************************************************************************** + * + * Self-testing API. + * + * Unfortunately, some compilers (exactly only Microsoft Visual C/C++) has + * a bugs which leads t1ha-functions to produce wrong results. This API allows + * check the correctness of the actual code in runtime. + * + * All check-functions returns 0 on success, or -1 in case the corresponding + * hash-function failed verification. PLEASE, always perform such checking at + * initialization of your code, if you using MSVC or other troubleful compilers. + */ + +T1HA_API int t1ha_selfcheck__all_enabled(void); + +#ifndef T1HA2_DISABLED +T1HA_API int t1ha_selfcheck__t1ha2_atonce(void); +T1HA_API int t1ha_selfcheck__t1ha2_atonce128(void); +T1HA_API int t1ha_selfcheck__t1ha2_stream(void); +T1HA_API int t1ha_selfcheck__t1ha2(void); +#endif /* T1HA2_DISABLED */ + +#ifndef T1HA1_DISABLED +T1HA_API int t1ha_selfcheck__t1ha1_le(void); +T1HA_API int t1ha_selfcheck__t1ha1_be(void); +T1HA_API int t1ha_selfcheck__t1ha1(void); +#endif /* T1HA1_DISABLED */ + +#ifndef T1HA0_DISABLED +T1HA_API int t1ha_selfcheck__t1ha0_32le(void); +T1HA_API int t1ha_selfcheck__t1ha0_32be(void); +T1HA_API int t1ha_selfcheck__t1ha0(void); + + /* Define T1HA0_AESNI_AVAILABLE to 0 for disable AES-NI support. */ + #ifndef T1HA0_AESNI_AVAILABLE + #if defined(__e2k__) || \ + (defined(__ia32__) && (!defined(_M_IX86) || _MSC_VER > 1800)) + #define T1HA0_AESNI_AVAILABLE 1 + #else + #define T1HA0_AESNI_AVAILABLE 0 + #endif + #endif /* ifndef T1HA0_AESNI_AVAILABLE */ + + #if T1HA0_AESNI_AVAILABLE +T1HA_API int t1ha_selfcheck__t1ha0_ia32aes_noavx(void); +T1HA_API int t1ha_selfcheck__t1ha0_ia32aes_avx(void); + #ifndef __e2k__ +T1HA_API int t1ha_selfcheck__t1ha0_ia32aes_avx2(void); + #endif + #endif /* if T1HA0_AESNI_AVAILABLE */ +#endif /* T1HA0_DISABLED */ + +/****************************************************************************** + * + * t1ha2 = 64 and 128-bit, SLIGHTLY MORE ATTENTION FOR QUALITY AND STRENGTH. + * + * - The recommended version of "Fast Positive Hash" with good quality + * for checksum, hash tables and fingerprinting. + * - Portable and extremely efficiency on modern 64-bit CPUs. + * Designed for 64-bit little-endian platforms, + * in other cases will runs slowly. + * - Great quality of hashing and still faster than other non-t1ha hashes. + * Provides streaming mode and 128-bit result. + * + * Note: Due performance reason 64- and 128-bit results are completely + * different each other, i.e. 64-bit result is NOT any part of 128-bit. + */ +#ifndef T1HA2_DISABLED + +/* The at-once variant with 64-bit result */ +T1HA_API uint64_t t1ha2_atonce(const void *data, size_t length, uint64_t seed); + +/* The at-once variant with 128-bit result. + * Argument `extra_result` is NOT optional and MUST be valid. + * The high 64-bit part of 128-bit hash will be always unconditionally + * stored to the address given by `extra_result` argument. */ +T1HA_API uint64_t t1ha2_atonce128(uint64_t *__restrict extra_result, + const void *__restrict data, size_t length, + uint64_t seed); + +/* The init/update/final trinity for streaming. + * Return 64 or 128-bit result depentently from `extra_result` argument. */ +T1HA_API void t1ha2_init(t1ha_context_t *ctx, uint64_t seed_x, uint64_t seed_y); +T1HA_API void t1ha2_update(t1ha_context_t *__restrict ctx, + const void *__restrict data, size_t length); + +/* Argument `extra_result` is optional and MAY be NULL. + * - If `extra_result` is NOT NULL then the 128-bit hash will be calculated, + * and high 64-bit part of it will be stored to the address given + * by `extra_result` argument. + * - Otherwise the 64-bit hash will be calculated + * and returned from function directly. + * + * Note: Due performance reason 64- and 128-bit results are completely + * different each other, i.e. 64-bit result is NOT any part of 128-bit. */ +T1HA_API uint64_t t1ha2_final(t1ha_context_t *__restrict ctx, + uint64_t *__restrict extra_result /* optional */); + +#endif /* T1HA2_DISABLED */ + +/****************************************************************************** + * + * t1ha1 = 64-bit, BASELINE FAST PORTABLE HASH: + * + * - Runs faster on 64-bit platforms in other cases may runs slowly. + * - Portable and stable, returns same 64-bit result + * on all architectures and CPUs. + * - Unfortunately it fails the "strict avalanche criteria", + * see test results at https://github.com/demerphq/smhasher. + * + * This flaw is insignificant for the t1ha1() purposes and imperceptible + * from a practical point of view. + * However, nowadays this issue has resolved in the next t1ha2(), + * that was initially planned to providing a bit more quality. + */ +#ifndef T1HA1_DISABLED + +/* The little-endian variant. */ +T1HA_API uint64_t t1ha1_le(const void *data, size_t length, uint64_t seed); + +/* The big-endian variant. */ +T1HA_API uint64_t t1ha1_be(const void *data, size_t length, uint64_t seed); + +#endif /* T1HA1_DISABLED */ + +/****************************************************************************** + * + * t1ha0 = 64-bit, JUST ONLY FASTER: + * + * - Provides fast-as-possible hashing for current CPU, including + * 32-bit systems and engaging the available hardware acceleration. + * - It is a facade that selects most quick-and-dirty hash + * for the current processor. For instance, on IA32 (x86) actual function + * will be selected in runtime, depending on current CPU capabilities + * + * BE CAREFUL!!! THIS IS MEANS: + * + * 1. The quality of hash is a subject for tradeoffs with performance. + * So, the quality and strength of t1ha0() may be lower than t1ha1(), + * especially on 32-bit targets, but then much faster. + * However, guaranteed that it passes all SMHasher tests. + * + * 2. No warranty that the hash result will be same for particular + * key on another machine or another version of libt1ha. + * + * Briefly, such hash-results and their derivatives, should be + * used only in runtime, but should not be persist or transferred + * over a network. + * + * + * When T1HA0_RUNTIME_SELECT is nonzero the t1ha0_resolve() function could + * be used to get actual t1ha0() implementation address at runtime. This is + * useful for two cases: + * - calling by local pointer-to-function usually is little + * bit faster (less overhead) than via a PLT thru the DSO boundary. + * - GNU Indirect functions (see below) don't supported by environment + * and calling by t1ha0_funcptr is not available and/or expensive. + */ + +#ifndef T1HA0_DISABLED + +/* The little-endian variant for 32-bit CPU. */ +uint64_t t1ha0_32le(const void *data, size_t length, uint64_t seed); +/* The big-endian variant for 32-bit CPU. */ +uint64_t t1ha0_32be(const void *data, size_t length, uint64_t seed); + + /* Define T1HA0_AESNI_AVAILABLE to 0 for disable AES-NI support. */ + #ifndef T1HA0_AESNI_AVAILABLE + #if defined(__e2k__) || \ + (defined(__ia32__) && (!defined(_M_IX86) || _MSC_VER > 1800)) + #define T1HA0_AESNI_AVAILABLE 1 + #else + #define T1HA0_AESNI_AVAILABLE 0 + #endif + #endif /* T1HA0_AESNI_AVAILABLE */ + + /* Define T1HA0_RUNTIME_SELECT to 0 for disable dispatching t1ha0 at runtime. + */ + #ifndef T1HA0_RUNTIME_SELECT + #if T1HA0_AESNI_AVAILABLE && !defined(__e2k__) + #define T1HA0_RUNTIME_SELECT 1 + #else + #define T1HA0_RUNTIME_SELECT 0 + #endif + #endif /* T1HA0_RUNTIME_SELECT */ + + #if !T1HA0_RUNTIME_SELECT && !defined(T1HA0_USE_DEFINE) + #if defined(__LCC__) + #define T1HA0_USE_DEFINE 1 + #else + #define T1HA0_USE_DEFINE 0 + #endif + #endif /* T1HA0_USE_DEFINE */ + + #if T1HA0_AESNI_AVAILABLE +uint64_t t1ha0_ia32aes_noavx(const void *data, size_t length, uint64_t seed); +uint64_t t1ha0_ia32aes_avx(const void *data, size_t length, uint64_t seed); + #ifndef __e2k__ +uint64_t t1ha0_ia32aes_avx2(const void *data, size_t length, uint64_t seed); + #endif + #endif /* T1HA0_AESNI_AVAILABLE */ + + #if T1HA0_RUNTIME_SELECT +typedef uint64_t (*t1ha0_function_t)(const void *, size_t, uint64_t); +T1HA_API t1ha0_function_t t1ha0_resolve(void); + #if T1HA_USE_INDIRECT_FUNCTIONS +T1HA_API uint64_t t1ha0(const void *data, size_t length, uint64_t seed); + #else +/* Otherwise function pointer will be used. + * Unfortunately this may cause some overhead calling. */ +T1HA_API extern uint64_t (*t1ha0_funcptr)(const void *data, size_t length, + uint64_t seed); +static __force_inline uint64_t t1ha0(const void *data, size_t length, + uint64_t seed) { + + return t1ha0_funcptr(data, length, seed); + +} + + #endif /* T1HA_USE_INDIRECT_FUNCTIONS */ + + #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + + #if T1HA0_USE_DEFINE + + #if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \ + (!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED)) + #if defined(T1HA1_DISABLED) + #define t1ha0 t1ha2_atonce + #else + #define t1ha0 t1ha1_be + #endif /* T1HA1_DISABLED */ + #else /* 32/64 */ + #define t1ha0 t1ha0_32be + #endif /* 32/64 */ + + #else /* T1HA0_USE_DEFINE */ + +static __force_inline uint64_t t1ha0(const void *data, size_t length, + uint64_t seed) { + + #if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \ + (!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED)) + #if defined(T1HA1_DISABLED) + return t1ha2_atonce(data, length, seed); + #else + return t1ha1_be(data, length, seed); + #endif /* T1HA1_DISABLED */ + #else /* 32/64 */ + return t1ha0_32be(data, length, seed); + #endif /* 32/64 */ + +} + + #endif /* !T1HA0_USE_DEFINE */ + + #else /* !T1HA0_RUNTIME_SELECT && __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__ */ + + #if T1HA0_USE_DEFINE + + #if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \ + (!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED)) + #if defined(T1HA1_DISABLED) + #define t1ha0 t1ha2_atonce + #else + #define t1ha0 t1ha1_le + #endif /* T1HA1_DISABLED */ + #else /* 32/64 */ + #define t1ha0 t1ha0_32le + #endif /* 32/64 */ + + #else + +static __force_inline uint64_t t1ha0(const void *data, size_t length, + uint64_t seed) { + + #if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \ + (!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED)) + #if defined(T1HA1_DISABLED) + return t1ha2_atonce(data, length, seed); + #else + return t1ha1_le(data, length, seed); + #endif /* T1HA1_DISABLED */ + #else /* 32/64 */ + return t1ha0_32le(data, length, seed); + #endif /* 32/64 */ + +} + + #endif /* !T1HA0_USE_DEFINE */ + + #endif /* !T1HA0_RUNTIME_SELECT */ + +#endif /* T1HA0_DISABLED */ + +#ifdef __cplusplus + +} + +#endif + +#if __GNUC_PREREQ(4, 0) + #pragma GCC visibility pop +#endif /* __GNUC_PREREQ(4,0) */ + diff --git a/include/t1ha0_ia32aes_b.h b/include/t1ha0_ia32aes_b.h new file mode 100644 index 00000000..93b16771 --- /dev/null +++ b/include/t1ha0_ia32aes_b.h @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2016-2020 Positive Technologies, https://www.ptsecurity.com, + * Fast Positive Hash. + * + * Portions Copyright (c) 2010-2020 Leonid Yuriev <leo@yuriev.ru>, + * The 1Hippeus project (t1h). + * + * This software is provided 'as-is', without any express or implied + * warranty. In no event will the authors be held liable for any damages + * arising from the use of this software. + * + * Permission is granted to anyone to use this software for any purpose, + * including commercial applications, and to alter it and redistribute it + * freely, subject to the following restrictions: + * + * 1. The origin of this software must not be misrepresented; you must not + * claim that you wrote the original software. If you use this software + * in a product, an acknowledgement in the product documentation would be + * appreciated but is not required. + * 2. Altered source versions must be plainly marked as such, and must not be + * misrepresented as being the original software. + * 3. This notice may not be removed or altered from any source distribution. + */ + +/* + * t1ha = { Fast Positive Hash, aka "Позитивный Хэш" } + * by [Positive Technologies](https://www.ptsecurity.ru) + * + * Briefly, it is a 64-bit Hash Function: + * 1. Created for 64-bit little-endian platforms, in predominantly for x86_64, + * but portable and without penalties it can run on any 64-bit CPU. + * 2. In most cases up to 15% faster than City64, xxHash, mum-hash, metro-hash + * and all others portable hash-functions (which do not use specific + * hardware tricks). + * 3. Not suitable for cryptography. + * + * The Future will (be) Positive. Всё будет хорошо. + * + * ACKNOWLEDGEMENT: + * The t1ha was originally developed by Leonid Yuriev (Леонид Юрьев) + * for The 1Hippeus project - zerocopy messaging in the spirit of Sparta! + */ + +#include "t1ha_bits.h" +#include "t1ha_selfcheck.h" + +#if T1HA0_AESNI_AVAILABLE + +uint64_t T1HA_IA32AES_NAME(const void *data, uint32_t len) { + + uint64_t a = 0; + uint64_t b = len; + + if (likely(len > 32)) { + + __m128i x = _mm_set_epi64x(a, b); + __m128i y = _mm_aesenc_si128(x, _mm_set_epi64x(prime_0, prime_1)); + + const __m128i *v = (const __m128i *)data; + const __m128i *const detent = + (const __m128i *)((const uint8_t *)data + (len & ~15ul)); + data = detent; + + if (len & 16) { + + x = _mm_add_epi64(x, _mm_loadu_si128(v++)); + y = _mm_aesenc_si128(x, y); + + } + + len &= 15; + + if (v + 7 < detent) { + + __m128i salt = y; + do { + + __m128i t = _mm_aesenc_si128(_mm_loadu_si128(v++), salt); + t = _mm_aesdec_si128(t, _mm_loadu_si128(v++)); + t = _mm_aesdec_si128(t, _mm_loadu_si128(v++)); + t = _mm_aesdec_si128(t, _mm_loadu_si128(v++)); + + t = _mm_aesdec_si128(t, _mm_loadu_si128(v++)); + t = _mm_aesdec_si128(t, _mm_loadu_si128(v++)); + t = _mm_aesdec_si128(t, _mm_loadu_si128(v++)); + t = _mm_aesdec_si128(t, _mm_loadu_si128(v++)); + + salt = _mm_add_epi64(salt, _mm_set_epi64x(prime_5, prime_6)); + t = _mm_aesenc_si128(x, t); + x = _mm_add_epi64(y, x); + y = t; + + } while (v + 7 < detent); + + } + + while (v < detent) { + + __m128i v0y = _mm_add_epi64(y, _mm_loadu_si128(v++)); + __m128i v1x = _mm_sub_epi64(x, _mm_loadu_si128(v++)); + x = _mm_aesdec_si128(x, v0y); + y = _mm_aesdec_si128(y, v1x); + + } + + x = _mm_add_epi64(_mm_aesdec_si128(x, _mm_aesenc_si128(y, x)), y); + #if defined(__x86_64__) || defined(_M_X64) + #if defined(__SSE4_1__) || defined(__AVX__) + a = _mm_extract_epi64(x, 0); + b = _mm_extract_epi64(x, 1); + #else + a = _mm_cvtsi128_si64(x); + b = _mm_cvtsi128_si64(_mm_unpackhi_epi64(x, x)); + #endif + #else + #if defined(__SSE4_1__) || defined(__AVX__) + a = (uint32_t)_mm_extract_epi32(x, 0) | (uint64_t)_mm_extract_epi32(x, 1) + << 32; + b = (uint32_t)_mm_extract_epi32(x, 2) | (uint64_t)_mm_extract_epi32(x, 3) + << 32; + #else + a = (uint32_t)_mm_cvtsi128_si32(x); + a |= (uint64_t)_mm_cvtsi128_si32(_mm_shuffle_epi32(x, 1)) << 32; + x = _mm_unpackhi_epi64(x, x); + b = (uint32_t)_mm_cvtsi128_si32(x); + b |= (uint64_t)_mm_cvtsi128_si32(_mm_shuffle_epi32(x, 1)) << 32; + #endif + #endif + #ifdef __AVX__ + _mm256_zeroupper(); + #elif !(defined(_X86_64_) || defined(__x86_64__) || defined(_M_X64) || \ + defined(__e2k__)) + _mm_empty(); + #endif + + } + + const uint64_t *v = (const uint64_t *)data; + switch (len) { + + default: + mixup64(&a, &b, fetch64_le_unaligned(v++), prime_4); + /* fall through */ + case 24: + case 23: + case 22: + case 21: + case 20: + case 19: + case 18: + case 17: + mixup64(&b, &a, fetch64_le_unaligned(v++), prime_3); + /* fall through */ + case 16: + case 15: + case 14: + case 13: + case 12: + case 11: + case 10: + case 9: + mixup64(&a, &b, fetch64_le_unaligned(v++), prime_2); + /* fall through */ + case 8: + case 7: + case 6: + case 5: + case 4: + case 3: + case 2: + case 1: + mixup64(&b, &a, tail64_le_unaligned(v, len), prime_1); + /* fall through */ + case 0: + return final64(a, b); + + } + +} + +#endif /* T1HA0_AESNI_AVAILABLE */ +#undef T1HA_IA32AES_NAME + diff --git a/include/t1ha_bits.h b/include/t1ha_bits.h new file mode 100644 index 00000000..0b9bbda5 --- /dev/null +++ b/include/t1ha_bits.h @@ -0,0 +1,1423 @@ +/* + * Copyright (c) 2016-2020 Positive Technologies, https://www.ptsecurity.com, + * Fast Positive Hash. + * + * Portions Copyright (c) 2010-2020 Leonid Yuriev <leo@yuriev.ru>, + * The 1Hippeus project (t1h). + * + * This software is provided 'as-is', without any express or implied + * warranty. In no event will the authors be held liable for any damages + * arising from the use of this software. + * + * Permission is granted to anyone to use this software for any purpose, + * including commercial applications, and to alter it and redistribute it + * freely, subject to the following restrictions: + * + * 1. The origin of this software must not be misrepresented; you must not + * claim that you wrote the original software. If you use this software + * in a product, an acknowledgement in the product documentation would be + * appreciated but is not required. + * 2. Altered source versions must be plainly marked as such, and must not be + * misrepresented as being the original software. + * 3. This notice may not be removed or altered from any source distribution. + */ + +/* + * t1ha = { Fast Positive Hash, aka "Позитивный Хэш" } + * by [Positive Technologies](https://www.ptsecurity.ru) + * + * Briefly, it is a 64-bit Hash Function: + * 1. Created for 64-bit little-endian platforms, in predominantly for x86_64, + * but portable and without penalties it can run on any 64-bit CPU. + * 2. In most cases up to 15% faster than City64, xxHash, mum-hash, metro-hash + * and all others portable hash-functions (which do not use specific + * hardware tricks). + * 3. Not suitable for cryptography. + * + * The Future will (be) Positive. Всё будет хорошо. + * + * ACKNOWLEDGEMENT: + * The t1ha was originally developed by Leonid Yuriev (Леонид Юрьев) + * for The 1Hippeus project - zerocopy messaging in the spirit of Sparta! + */ + +#pragma once + +#if defined(_MSC_VER) + #pragma warning(disable : 4201) /* nameless struct/union */ + #if _MSC_VER > 1800 + #pragma warning(disable : 4464) /* relative include path contains '..' */ + #endif /* 1800 */ +#endif /* MSVC */ +#include "t1ha.h" + +#ifndef T1HA_USE_FAST_ONESHOT_READ + /* Define it to 1 for little bit faster code. + * Unfortunately this may triggering a false-positive alarms from Valgrind, + * AddressSanitizer and other similar tool. + * So, define it to 0 for calmness if doubt. */ + #define T1HA_USE_FAST_ONESHOT_READ 1 +#endif /* T1HA_USE_FAST_ONESHOT_READ */ + +/*****************************************************************************/ + +#include <assert.h> /* for assert() */ +#include <stdbool.h> /* for bool */ +#include <string.h> /* for memcpy() */ + +#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ && \ + __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__ + #error Unsupported byte order. +#endif + +#define T1HA_UNALIGNED_ACCESS__UNABLE 0 +#define T1HA_UNALIGNED_ACCESS__SLOW 1 +#define T1HA_UNALIGNED_ACCESS__EFFICIENT 2 + +#ifndef T1HA_SYS_UNALIGNED_ACCESS + #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + #define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__EFFICIENT + #elif defined(__ia32__) + #define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__EFFICIENT + #elif defined(__e2k__) + #define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__SLOW + #elif defined(__ARM_FEATURE_UNALIGNED) + #define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__EFFICIENT + #else + #define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__UNABLE + #endif +#endif /* T1HA_SYS_UNALIGNED_ACCESS */ + +#define ALIGNMENT_16 2 +#define ALIGNMENT_32 4 +#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul + #define ALIGNMENT_64 8 +#else + #define ALIGNMENT_64 4 +#endif + +#ifndef PAGESIZE + #define PAGESIZE 4096 +#endif /* PAGESIZE */ + +/***************************************************************************/ + +#ifndef __has_builtin + #define __has_builtin(x) (0) +#endif + +#ifndef __has_warning + #define __has_warning(x) (0) +#endif + +#ifndef __has_feature + #define __has_feature(x) (0) +#endif + +#ifndef __has_extension + #define __has_extension(x) (0) +#endif + +#if __has_feature(address_sanitizer) + #define __SANITIZE_ADDRESS__ 1 +#endif + +#ifndef __optimize + #if defined(__clang__) && !__has_attribute(__optimize__) + #define __optimize(ops) + #elif defined(__GNUC__) || __has_attribute(__optimize__) + #define __optimize(ops) __attribute__((__optimize__(ops))) + #else + #define __optimize(ops) + #endif +#endif /* __optimize */ + +#ifndef __cold + #if defined(__OPTIMIZE__) + #if defined(__e2k__) + #define __cold __optimize(1) __attribute__((__cold__)) + #elif defined(__clang__) && !__has_attribute(__cold__) && \ + __has_attribute(__section__) + /* just put infrequently used functions in separate section */ + #define __cold \ + __attribute__((__section__("text.unlikely"))) __optimize("Os") + #elif defined(__GNUC__) || __has_attribute(__cold__) + #define __cold __attribute__((__cold__)) __optimize("Os") + #else + #define __cold __optimize("Os") + #endif + #else + #define __cold + #endif +#endif /* __cold */ + +#if __GNUC_PREREQ(4, 4) || defined(__clang__) + + #if defined(__ia32__) || defined(__e2k__) + #include <x86intrin.h> + #endif + + #if defined(__ia32__) && !defined(__cpuid_count) + #include <cpuid.h> + #endif + + #if defined(__e2k__) + #include <e2kbuiltin.h> + #endif + + #ifndef likely + #define likely(cond) __builtin_expect(!!(cond), 1) + #endif + + #ifndef unlikely + #define unlikely(cond) __builtin_expect(!!(cond), 0) + #endif + + #if __GNUC_PREREQ(4, 5) || __has_builtin(__builtin_unreachable) + #define unreachable() __builtin_unreachable() + #endif + + #define bswap64(v) __builtin_bswap64(v) + #define bswap32(v) __builtin_bswap32(v) + #if __GNUC_PREREQ(4, 8) || __has_builtin(__builtin_bswap16) + #define bswap16(v) __builtin_bswap16(v) + #endif + + #if !defined(__maybe_unused) && \ + (__GNUC_PREREQ(4, 3) || __has_attribute(__unused__)) + #define __maybe_unused __attribute__((__unused__)) + #endif + + #if !defined(__always_inline) && \ + (__GNUC_PREREQ(3, 2) || __has_attribute(__always_inline__)) + #define __always_inline __inline __attribute__((__always_inline__)) + #endif + + #if defined(__e2k__) + + #if __iset__ >= 3 + #define mul_64x64_high(a, b) __builtin_e2k_umulhd(a, b) + #endif /* __iset__ >= 3 */ + + #if __iset__ >= 5 +static __maybe_unused __always_inline unsigned e2k_add64carry_first( + uint64_t base, uint64_t addend, uint64_t *sum) { + + *sum = base + addend; + return (unsigned)__builtin_e2k_addcd_c(base, addend, 0); + +} + + #define add64carry_first(base, addend, sum) \ + e2k_add64carry_first(base, addend, sum) + +static __maybe_unused __always_inline unsigned e2k_add64carry_next( + unsigned carry, uint64_t base, uint64_t addend, uint64_t *sum) { + + *sum = __builtin_e2k_addcd(base, addend, carry); + return (unsigned)__builtin_e2k_addcd_c(base, addend, carry); + +} + + #define add64carry_next(carry, base, addend, sum) \ + e2k_add64carry_next(carry, base, addend, sum) + +static __maybe_unused __always_inline void e2k_add64carry_last(unsigned carry, + uint64_t base, + uint64_t addend, + uint64_t *sum) { + + *sum = __builtin_e2k_addcd(base, addend, carry); + +} + + #define add64carry_last(carry, base, addend, sum) \ + e2k_add64carry_last(carry, base, addend, sum) + #endif /* __iset__ >= 5 */ + + #define fetch64_be_aligned(ptr) ((uint64_t)__builtin_e2k_ld_64s_be(ptr)) + #define fetch32_be_aligned(ptr) ((uint32_t)__builtin_e2k_ld_32u_be(ptr)) + + #endif /* __e2k__ Elbrus */ + +#elif defined(_MSC_VER) + + #if _MSC_FULL_VER < 190024234 && defined(_M_IX86) + #pragma message( \ + "For AES-NI at least \"Microsoft C/C++ Compiler\" version 19.00.24234 (Visual Studio 2015 Update 3) is required.") + #endif + #if _MSC_FULL_VER < 191526730 + #pragma message( \ + "It is recommended to use \"Microsoft C/C++ Compiler\" version 19.15.26730 (Visual Studio 2017 15.8) or newer.") + #endif + #if _MSC_FULL_VER < 180040629 + #error At least "Microsoft C/C++ Compiler" version 18.00.40629 (Visual Studio 2013 Update 5) is required. + #endif + + #pragma warning(push, 1) + + #include <intrin.h> + #include <stdlib.h> + #define likely(cond) (cond) + #define unlikely(cond) (cond) + #define unreachable() __assume(0) + #define bswap64(v) _byteswap_uint64(v) + #define bswap32(v) _byteswap_ulong(v) + #define bswap16(v) _byteswap_ushort(v) + #define rot64(v, s) _rotr64(v, s) + #define rot32(v, s) _rotr(v, s) + #define __always_inline __forceinline + + #if defined(_M_X64) || defined(_M_IA64) + #pragma intrinsic(_umul128) + #define mul_64x64_128(a, b, ph) _umul128(a, b, ph) + #pragma intrinsic(_addcarry_u64) + #define add64carry_first(base, addend, sum) \ + _addcarry_u64(0, base, addend, sum) + #define add64carry_next(carry, base, addend, sum) \ + _addcarry_u64(carry, base, addend, sum) + #define add64carry_last(carry, base, addend, sum) \ + (void)_addcarry_u64(carry, base, addend, sum) + #endif + + #if defined(_M_ARM64) || defined(_M_X64) || defined(_M_IA64) + #pragma intrinsic(__umulh) + #define mul_64x64_high(a, b) __umulh(a, b) + #endif + + #if defined(_M_IX86) + #pragma intrinsic(__emulu) + #define mul_32x32_64(a, b) __emulu(a, b) + + #if _MSC_VER >= 1915 /* LY: workaround for SSA-optimizer bug */ + #pragma intrinsic(_addcarry_u32) + #define add32carry_first(base, addend, sum) \ + _addcarry_u32(0, base, addend, sum) + #define add32carry_next(carry, base, addend, sum) \ + _addcarry_u32(carry, base, addend, sum) + #define add32carry_last(carry, base, addend, sum) \ + (void)_addcarry_u32(carry, base, addend, sum) + +static __forceinline char msvc32_add64carry_first(uint64_t base, + uint64_t addend, + uint64_t *sum) { + + uint32_t *const sum32 = (uint32_t *)sum; + const uint32_t base_32l = (uint32_t)base; + const uint32_t base_32h = (uint32_t)(base >> 32); + const uint32_t addend_32l = (uint32_t)addend; + const uint32_t addend_32h = (uint32_t)(addend >> 32); + return add32carry_next(add32carry_first(base_32l, addend_32l, sum32), + base_32h, addend_32h, sum32 + 1); + +} + + #define add64carry_first(base, addend, sum) \ + msvc32_add64carry_first(base, addend, sum) + +static __forceinline char msvc32_add64carry_next(char carry, uint64_t base, + uint64_t addend, + uint64_t *sum) { + + uint32_t *const sum32 = (uint32_t *)sum; + const uint32_t base_32l = (uint32_t)base; + const uint32_t base_32h = (uint32_t)(base >> 32); + const uint32_t addend_32l = (uint32_t)addend; + const uint32_t addend_32h = (uint32_t)(addend >> 32); + return add32carry_next(add32carry_next(carry, base_32l, addend_32l, sum32), + base_32h, addend_32h, sum32 + 1); + +} + + #define add64carry_next(carry, base, addend, sum) \ + msvc32_add64carry_next(carry, base, addend, sum) + +static __forceinline void msvc32_add64carry_last(char carry, uint64_t base, + uint64_t addend, + uint64_t *sum) { + + uint32_t *const sum32 = (uint32_t *)sum; + const uint32_t base_32l = (uint32_t)base; + const uint32_t base_32h = (uint32_t)(base >> 32); + const uint32_t addend_32l = (uint32_t)addend; + const uint32_t addend_32h = (uint32_t)(addend >> 32); + add32carry_last(add32carry_next(carry, base_32l, addend_32l, sum32), base_32h, + addend_32h, sum32 + 1); + +} + + #define add64carry_last(carry, base, addend, sum) \ + msvc32_add64carry_last(carry, base, addend, sum) + #endif /* _MSC_FULL_VER >= 190024231 */ + + #elif defined(_M_ARM) + #define mul_32x32_64(a, b) _arm_umull(a, b) + #endif + + #pragma warning(pop) + #pragma warning(disable : 4514) /* 'xyz': unreferenced inline function \ + has been removed */ + #pragma warning(disable : 4710) /* 'xyz': function not inlined */ + #pragma warning(disable : 4711) /* function 'xyz' selected for \ + automatic inline expansion */ + #pragma warning(disable : 4127) /* conditional expression is constant */ + #pragma warning(disable : 4702) /* unreachable code */ +#endif /* Compiler */ + +#ifndef likely + #define likely(cond) (cond) +#endif +#ifndef unlikely + #define unlikely(cond) (cond) +#endif +#ifndef __maybe_unused + #define __maybe_unused +#endif +#ifndef __always_inline + #define __always_inline __inline +#endif +#ifndef unreachable + #define unreachable() \ + do { \ + \ + } while (1) +#endif + +#ifndef bswap64 + #if defined(bswap_64) + #define bswap64 bswap_64 + #elif defined(__bswap_64) + #define bswap64 __bswap_64 + #else +static __always_inline uint64_t bswap64(uint64_t v) { + + return v << 56 | v >> 56 | ((v << 40) & UINT64_C(0x00ff000000000000)) | + ((v << 24) & UINT64_C(0x0000ff0000000000)) | + ((v << 8) & UINT64_C(0x000000ff00000000)) | + ((v >> 8) & UINT64_C(0x00000000ff000000)) | + ((v >> 24) & UINT64_C(0x0000000000ff0000)) | + ((v >> 40) & UINT64_C(0x000000000000ff00)); + +} + + #endif +#endif /* bswap64 */ + +#ifndef bswap32 + #if defined(bswap_32) + #define bswap32 bswap_32 + #elif defined(__bswap_32) + #define bswap32 __bswap_32 + #else +static __always_inline uint32_t bswap32(uint32_t v) { + + return v << 24 | v >> 24 | ((v << 8) & UINT32_C(0x00ff0000)) | + ((v >> 8) & UINT32_C(0x0000ff00)); + +} + + #endif +#endif /* bswap32 */ + +#ifndef bswap16 + #if defined(bswap_16) + #define bswap16 bswap_16 + #elif defined(__bswap_16) + #define bswap16 __bswap_16 + #else +static __always_inline uint16_t bswap16(uint16_t v) { + + return v << 8 | v >> 8; + +} + + #endif +#endif /* bswap16 */ + +#if defined(__ia32__) || \ + T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT + /* The __builtin_assume_aligned() leads gcc/clang to load values into the + * registers, even when it is possible to directly use an operand from memory. + * This can lead to a shortage of registers and a significant slowdown. + * Therefore avoid unnecessary use of __builtin_assume_aligned() for x86. */ + #define read_unaligned(ptr, bits) (*(const uint##bits##_t *__restrict)(ptr)) + #define read_aligned(ptr, bits) (*(const uint##bits##_t *__restrict)(ptr)) +#endif /* __ia32__ */ + +#ifndef read_unaligned + #if defined(__GNUC__) || __has_attribute(__packed__) +typedef struct { + + uint8_t unaligned_8; + uint16_t unaligned_16; + uint32_t unaligned_32; + uint64_t unaligned_64; + +} __attribute__((__packed__)) t1ha_unaligned_proxy; + + #define read_unaligned(ptr, bits) \ + (((const t1ha_unaligned_proxy *)((const uint8_t *)(ptr)-offsetof( \ + t1ha_unaligned_proxy, unaligned_##bits))) \ + ->unaligned_##bits) + #elif defined(_MSC_VER) + #pragma warning( \ + disable : 4235) /* nonstandard extension used: '__unaligned' \ + * keyword not supported on this architecture */ + #define read_unaligned(ptr, bits) \ + (*(const __unaligned uint##bits##_t *)(ptr)) + #else + #pragma pack(push, 1) +typedef struct { + + uint8_t unaligned_8; + uint16_t unaligned_16; + uint32_t unaligned_32; + uint64_t unaligned_64; + +} t1ha_unaligned_proxy; + + #pragma pack(pop) + #define read_unaligned(ptr, bits) \ + (((const t1ha_unaligned_proxy *)((const uint8_t *)(ptr)-offsetof( \ + t1ha_unaligned_proxy, unaligned_##bits))) \ + ->unaligned_##bits) + #endif +#endif /* read_unaligned */ + +#ifndef read_aligned + #if __GNUC_PREREQ(4, 8) || __has_builtin(__builtin_assume_aligned) + #define read_aligned(ptr, bits) \ + (*(const uint##bits##_t *)__builtin_assume_aligned(ptr, ALIGNMENT_##bits)) + #elif (__GNUC_PREREQ(3, 3) || __has_attribute(__aligned__)) && \ + !defined(__clang__) + #define read_aligned(ptr, bits) \ + (*(const uint##bits##_t \ + __attribute__((__aligned__(ALIGNMENT_##bits))) *)(ptr)) + #elif __has_attribute(__assume_aligned__) + +static __always_inline const uint16_t *__attribute__(( + __assume_aligned__(ALIGNMENT_16))) cast_aligned_16(const void *ptr) { + + return (const uint16_t *)ptr; + +} + +static __always_inline const uint32_t *__attribute__(( + __assume_aligned__(ALIGNMENT_32))) cast_aligned_32(const void *ptr) { + + return (const uint32_t *)ptr; + +} + +static __always_inline const uint64_t *__attribute__(( + __assume_aligned__(ALIGNMENT_64))) cast_aligned_64(const void *ptr) { + + return (const uint64_t *)ptr; + +} + + #define read_aligned(ptr, bits) (*cast_aligned_##bits(ptr)) + + #elif defined(_MSC_VER) + #define read_aligned(ptr, bits) \ + (*(const __declspec(align(ALIGNMENT_##bits)) uint##bits##_t *)(ptr)) + #else + #define read_aligned(ptr, bits) (*(const uint##bits##_t *)(ptr)) + #endif +#endif /* read_aligned */ + +#ifndef prefetch + #if (__GNUC_PREREQ(4, 0) || __has_builtin(__builtin_prefetch)) && \ + !defined(__ia32__) + #define prefetch(ptr) __builtin_prefetch(ptr) + #elif defined(_M_ARM64) || defined(_M_ARM) + #define prefetch(ptr) __prefetch(ptr) + #else + #define prefetch(ptr) \ + do { \ + \ + (void)(ptr); \ + \ + } while (0) + + #endif +#endif /* prefetch */ + +#if __has_warning("-Wconstant-logical-operand") + #if defined(__clang__) + #pragma clang diagnostic ignored "-Wconstant-logical-operand" + #elif defined(__GNUC__) + #pragma GCC diagnostic ignored "-Wconstant-logical-operand" + #else + #pragma warning disable "constant-logical-operand" + #endif +#endif /* -Wconstant-logical-operand */ + +#if __has_warning("-Wtautological-pointer-compare") + #if defined(__clang__) + #pragma clang diagnostic ignored "-Wtautological-pointer-compare" + #elif defined(__GNUC__) + #pragma GCC diagnostic ignored "-Wtautological-pointer-compare" + #else + #pragma warning disable "tautological-pointer-compare" + #endif +#endif /* -Wtautological-pointer-compare */ + +/***************************************************************************/ + +#if __GNUC_PREREQ(4, 0) + #pragma GCC visibility push(hidden) +#endif /* __GNUC_PREREQ(4,0) */ + +/*---------------------------------------------------------- Little Endian */ + +#ifndef fetch16_le_aligned +static __maybe_unused __always_inline uint16_t +fetch16_le_aligned(const void *v) { + + assert(((uintptr_t)v) % ALIGNMENT_16 == 0); + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + return read_aligned(v, 16); + #else + return bswap16(read_aligned(v, 16)); + #endif + +} + +#endif /* fetch16_le_aligned */ + +#ifndef fetch16_le_unaligned +static __maybe_unused __always_inline uint16_t +fetch16_le_unaligned(const void *v) { + + #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE + const uint8_t *p = (const uint8_t *)v; + return p[0] | (uint16_t)p[1] << 8; + #elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + return read_unaligned(v, 16); + #else + return bswap16(read_unaligned(v, 16)); + #endif + +} + +#endif /* fetch16_le_unaligned */ + +#ifndef fetch32_le_aligned +static __maybe_unused __always_inline uint32_t +fetch32_le_aligned(const void *v) { + + assert(((uintptr_t)v) % ALIGNMENT_32 == 0); + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + return read_aligned(v, 32); + #else + return bswap32(read_aligned(v, 32)); + #endif + +} + +#endif /* fetch32_le_aligned */ + +#ifndef fetch32_le_unaligned +static __maybe_unused __always_inline uint32_t +fetch32_le_unaligned(const void *v) { + + #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE + return fetch16_le_unaligned(v) | + (uint32_t)fetch16_le_unaligned((const uint8_t *)v + 2) << 16; + #elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + return read_unaligned(v, 32); + #else + return bswap32(read_unaligned(v, 32)); + #endif + +} + +#endif /* fetch32_le_unaligned */ + +#ifndef fetch64_le_aligned +static __maybe_unused __always_inline uint64_t +fetch64_le_aligned(const void *v) { + + assert(((uintptr_t)v) % ALIGNMENT_64 == 0); + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + return read_aligned(v, 64); + #else + return bswap64(read_aligned(v, 64)); + #endif + +} + +#endif /* fetch64_le_aligned */ + +#ifndef fetch64_le_unaligned +static __maybe_unused __always_inline uint64_t +fetch64_le_unaligned(const void *v) { + + #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE + return fetch32_le_unaligned(v) | + (uint64_t)fetch32_le_unaligned((const uint8_t *)v + 4) << 32; + #elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + return read_unaligned(v, 64); + #else + return bswap64(read_unaligned(v, 64)); + #endif + +} + +#endif /* fetch64_le_unaligned */ + +static __maybe_unused __always_inline uint64_t tail64_le_aligned(const void *v, + size_t tail) { + + const uint8_t *const p = (const uint8_t *)v; +#if T1HA_USE_FAST_ONESHOT_READ && !defined(__SANITIZE_ADDRESS__) + /* We can perform a 'oneshot' read, which is little bit faster. */ + const unsigned shift = ((8 - tail) & 7) << 3; + return fetch64_le_aligned(p) & ((~UINT64_C(0)) >> shift); +#else + uint64_t r = 0; + switch (tail & 7) { + + default: + unreachable(); + /* fall through */ + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + /* For most CPUs this code is better when not needed byte reordering. */ + case 0: + return fetch64_le_aligned(p); + case 7: + r = (uint64_t)p[6] << 8; + /* fall through */ + case 6: + r += p[5]; + r <<= 8; + /* fall through */ + case 5: + r += p[4]; + r <<= 32; + /* fall through */ + case 4: + return r + fetch32_le_aligned(p); + case 3: + r = (uint64_t)p[2] << 16; + /* fall through */ + case 2: + return r + fetch16_le_aligned(p); + case 1: + return p[0]; + #else + case 0: + r = p[7] << 8; + /* fall through */ + case 7: + r += p[6]; + r <<= 8; + /* fall through */ + case 6: + r += p[5]; + r <<= 8; + /* fall through */ + case 5: + r += p[4]; + r <<= 8; + /* fall through */ + case 4: + r += p[3]; + r <<= 8; + /* fall through */ + case 3: + r += p[2]; + r <<= 8; + /* fall through */ + case 2: + r += p[1]; + r <<= 8; + /* fall through */ + case 1: + return r + p[0]; + #endif + + } + +#endif /* T1HA_USE_FAST_ONESHOT_READ */ + +} + +#if T1HA_USE_FAST_ONESHOT_READ && \ + T1HA_SYS_UNALIGNED_ACCESS != T1HA_UNALIGNED_ACCESS__UNABLE && \ + defined(PAGESIZE) && PAGESIZE > 42 && !defined(__SANITIZE_ADDRESS__) + #define can_read_underside(ptr, size) \ + (((PAGESIZE - (size)) & (uintptr_t)(ptr)) != 0) +#endif /* T1HA_USE_FAST_ONESHOT_READ */ + +static __maybe_unused __always_inline uint64_t +tail64_le_unaligned(const void *v, size_t tail) { + + const uint8_t *p = (const uint8_t *)v; +#if defined(can_read_underside) && \ + (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) + /* On some systems (e.g. x86_64) we can perform a 'oneshot' read, which + * is little bit faster. Thanks Marcin Żukowski <marcin.zukowski@gmail.com> + * for the reminder. */ + const unsigned offset = (8 - tail) & 7; + const unsigned shift = offset << 3; + if (likely(can_read_underside(p, 8))) { + + p -= offset; + return fetch64_le_unaligned(p) >> shift; + + } + + return fetch64_le_unaligned(p) & ((~UINT64_C(0)) >> shift); +#else + uint64_t r = 0; + switch (tail & 7) { + + default: + unreachable(); + /* fall through */ + #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT && \ + __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + /* For most CPUs this code is better when not needed + * copying for alignment or byte reordering. */ + case 0: + return fetch64_le_unaligned(p); + case 7: + r = (uint64_t)p[6] << 8; + /* fall through */ + case 6: + r += p[5]; + r <<= 8; + /* fall through */ + case 5: + r += p[4]; + r <<= 32; + /* fall through */ + case 4: + return r + fetch32_le_unaligned(p); + case 3: + r = (uint64_t)p[2] << 16; + /* fall through */ + case 2: + return r + fetch16_le_unaligned(p); + case 1: + return p[0]; + #else + /* For most CPUs this code is better than a + * copying for alignment and/or byte reordering. */ + case 0: + r = p[7] << 8; + /* fall through */ + case 7: + r += p[6]; + r <<= 8; + /* fall through */ + case 6: + r += p[5]; + r <<= 8; + /* fall through */ + case 5: + r += p[4]; + r <<= 8; + /* fall through */ + case 4: + r += p[3]; + r <<= 8; + /* fall through */ + case 3: + r += p[2]; + r <<= 8; + /* fall through */ + case 2: + r += p[1]; + r <<= 8; + /* fall through */ + case 1: + return r + p[0]; + #endif + + } + +#endif /* can_read_underside */ + +} + +/*------------------------------------------------------------- Big Endian */ + +#ifndef fetch16_be_aligned +static __maybe_unused __always_inline uint16_t +fetch16_be_aligned(const void *v) { + + assert(((uintptr_t)v) % ALIGNMENT_16 == 0); + #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + return read_aligned(v, 16); + #else + return bswap16(read_aligned(v, 16)); + #endif + +} + +#endif /* fetch16_be_aligned */ + +#ifndef fetch16_be_unaligned +static __maybe_unused __always_inline uint16_t +fetch16_be_unaligned(const void *v) { + + #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE + const uint8_t *p = (const uint8_t *)v; + return (uint16_t)p[0] << 8 | p[1]; + #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + return read_unaligned(v, 16); + #else + return bswap16(read_unaligned(v, 16)); + #endif + +} + +#endif /* fetch16_be_unaligned */ + +#ifndef fetch32_be_aligned +static __maybe_unused __always_inline uint32_t +fetch32_be_aligned(const void *v) { + + assert(((uintptr_t)v) % ALIGNMENT_32 == 0); + #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + return read_aligned(v, 32); + #else + return bswap32(read_aligned(v, 32)); + #endif + +} + +#endif /* fetch32_be_aligned */ + +#ifndef fetch32_be_unaligned +static __maybe_unused __always_inline uint32_t +fetch32_be_unaligned(const void *v) { + + #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE + return (uint32_t)fetch16_be_unaligned(v) << 16 | + fetch16_be_unaligned((const uint8_t *)v + 2); + #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + return read_unaligned(v, 32); + #else + return bswap32(read_unaligned(v, 32)); + #endif + +} + +#endif /* fetch32_be_unaligned */ + +#ifndef fetch64_be_aligned +static __maybe_unused __always_inline uint64_t +fetch64_be_aligned(const void *v) { + + assert(((uintptr_t)v) % ALIGNMENT_64 == 0); + #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + return read_aligned(v, 64); + #else + return bswap64(read_aligned(v, 64)); + #endif + +} + +#endif /* fetch64_be_aligned */ + +#ifndef fetch64_be_unaligned +static __maybe_unused __always_inline uint64_t +fetch64_be_unaligned(const void *v) { + + #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE + return (uint64_t)fetch32_be_unaligned(v) << 32 | + fetch32_be_unaligned((const uint8_t *)v + 4); + #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + return read_unaligned(v, 64); + #else + return bswap64(read_unaligned(v, 64)); + #endif + +} + +#endif /* fetch64_be_unaligned */ + +static __maybe_unused __always_inline uint64_t tail64_be_aligned(const void *v, + size_t tail) { + + const uint8_t *const p = (const uint8_t *)v; +#if T1HA_USE_FAST_ONESHOT_READ && !defined(__SANITIZE_ADDRESS__) + /* We can perform a 'oneshot' read, which is little bit faster. */ + const unsigned shift = ((8 - tail) & 7) << 3; + return fetch64_be_aligned(p) >> shift; +#else + switch (tail & 7) { + + default: + unreachable(); + /* fall through */ + #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + /* For most CPUs this code is better when not byte reordering. */ + case 1: + return p[0]; + case 2: + return fetch16_be_aligned(p); + case 3: + return (uint32_t)fetch16_be_aligned(p) << 8 | p[2]; + case 4: + return fetch32_be_aligned(p); + case 5: + return (uint64_t)fetch32_be_aligned(p) << 8 | p[4]; + case 6: + return (uint64_t)fetch32_be_aligned(p) << 16 | fetch16_be_aligned(p + 4); + case 7: + return (uint64_t)fetch32_be_aligned(p) << 24 | + (uint32_t)fetch16_be_aligned(p + 4) << 8 | p[6]; + case 0: + return fetch64_be_aligned(p); + #else + case 1: + return p[0]; + case 2: + return p[1] | (uint32_t)p[0] << 8; + case 3: + return p[2] | (uint32_t)p[1] << 8 | (uint32_t)p[0] << 16; + case 4: + return p[3] | (uint32_t)p[2] << 8 | (uint32_t)p[1] << 16 | + (uint32_t)p[0] << 24; + case 5: + return p[4] | (uint32_t)p[3] << 8 | (uint32_t)p[2] << 16 | + (uint32_t)p[1] << 24 | (uint64_t)p[0] << 32; + case 6: + return p[5] | (uint32_t)p[4] << 8 | (uint32_t)p[3] << 16 | + (uint32_t)p[2] << 24 | (uint64_t)p[1] << 32 | (uint64_t)p[0] << 40; + case 7: + return p[6] | (uint32_t)p[5] << 8 | (uint32_t)p[4] << 16 | + (uint32_t)p[3] << 24 | (uint64_t)p[2] << 32 | + (uint64_t)p[1] << 40 | (uint64_t)p[0] << 48; + case 0: + return p[7] | (uint32_t)p[6] << 8 | (uint32_t)p[5] << 16 | + (uint32_t)p[4] << 24 | (uint64_t)p[3] << 32 | + (uint64_t)p[2] << 40 | (uint64_t)p[1] << 48 | (uint64_t)p[0] << 56; + #endif + + } + +#endif /* T1HA_USE_FAST_ONESHOT_READ */ + +} + +static __maybe_unused __always_inline uint64_t +tail64_be_unaligned(const void *v, size_t tail) { + + const uint8_t *p = (const uint8_t *)v; +#if defined(can_read_underside) && \ + (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) + /* On some systems (e.g. x86_64) we can perform a 'oneshot' read, which + * is little bit faster. Thanks Marcin Żukowski <marcin.zukowski@gmail.com> + * for the reminder. */ + const unsigned offset = (8 - tail) & 7; + const unsigned shift = offset << 3; + if (likely(can_read_underside(p, 8))) { + + p -= offset; + return fetch64_be_unaligned(p) & ((~UINT64_C(0)) >> shift); + + } + + return fetch64_be_unaligned(p) >> shift; +#else + switch (tail & 7) { + + default: + unreachable(); + /* fall through */ + #if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT && \ + __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + /* For most CPUs this code is better when not needed + * copying for alignment or byte reordering. */ + case 1: + return p[0]; + case 2: + return fetch16_be_unaligned(p); + case 3: + return (uint32_t)fetch16_be_unaligned(p) << 8 | p[2]; + case 4: + return fetch32_be(p); + case 5: + return (uint64_t)fetch32_be_unaligned(p) << 8 | p[4]; + case 6: + return (uint64_t)fetch32_be_unaligned(p) << 16 | + fetch16_be_unaligned(p + 4); + case 7: + return (uint64_t)fetch32_be_unaligned(p) << 24 | + (uint32_t)fetch16_be_unaligned(p + 4) << 8 | p[6]; + case 0: + return fetch64_be_unaligned(p); + #else + /* For most CPUs this code is better than a + * copying for alignment and/or byte reordering. */ + case 1: + return p[0]; + case 2: + return p[1] | (uint32_t)p[0] << 8; + case 3: + return p[2] | (uint32_t)p[1] << 8 | (uint32_t)p[0] << 16; + case 4: + return p[3] | (uint32_t)p[2] << 8 | (uint32_t)p[1] << 16 | + (uint32_t)p[0] << 24; + case 5: + return p[4] | (uint32_t)p[3] << 8 | (uint32_t)p[2] << 16 | + (uint32_t)p[1] << 24 | (uint64_t)p[0] << 32; + case 6: + return p[5] | (uint32_t)p[4] << 8 | (uint32_t)p[3] << 16 | + (uint32_t)p[2] << 24 | (uint64_t)p[1] << 32 | (uint64_t)p[0] << 40; + case 7: + return p[6] | (uint32_t)p[5] << 8 | (uint32_t)p[4] << 16 | + (uint32_t)p[3] << 24 | (uint64_t)p[2] << 32 | + (uint64_t)p[1] << 40 | (uint64_t)p[0] << 48; + case 0: + return p[7] | (uint32_t)p[6] << 8 | (uint32_t)p[5] << 16 | + (uint32_t)p[4] << 24 | (uint64_t)p[3] << 32 | + (uint64_t)p[2] << 40 | (uint64_t)p[1] << 48 | (uint64_t)p[0] << 56; + #endif + + } + +#endif /* can_read_underside */ + +} + +/***************************************************************************/ + +#ifndef rot64 +static __maybe_unused __always_inline uint64_t rot64(uint64_t v, unsigned s) { + + return (v >> s) | (v << (64 - s)); + +} + +#endif /* rot64 */ + +#ifndef mul_32x32_64 +static __maybe_unused __always_inline uint64_t mul_32x32_64(uint32_t a, + uint32_t b) { + + return a * (uint64_t)b; + +} + +#endif /* mul_32x32_64 */ + +#ifndef add64carry_first +static __maybe_unused __always_inline unsigned add64carry_first(uint64_t base, + uint64_t addend, + uint64_t *sum) { + + #if __has_builtin(__builtin_addcll) + unsigned long long carryout; + *sum = __builtin_addcll(base, addend, 0, &carryout); + return (unsigned)carryout; + #else + *sum = base + addend; + return *sum < addend; + #endif /* __has_builtin(__builtin_addcll) */ + +} + +#endif /* add64carry_fist */ + +#ifndef add64carry_next +static __maybe_unused __always_inline unsigned add64carry_next(unsigned carry, + uint64_t base, + uint64_t addend, + uint64_t *sum) { + + #if __has_builtin(__builtin_addcll) + unsigned long long carryout; + *sum = __builtin_addcll(base, addend, carry, &carryout); + return (unsigned)carryout; + #else + *sum = base + addend + carry; + return *sum < addend || (carry && *sum == addend); + #endif /* __has_builtin(__builtin_addcll) */ + +} + +#endif /* add64carry_next */ + +#ifndef add64carry_last +static __maybe_unused __always_inline void add64carry_last(unsigned carry, + uint64_t base, + uint64_t addend, + uint64_t *sum) { + + #if __has_builtin(__builtin_addcll) + unsigned long long carryout; + *sum = __builtin_addcll(base, addend, carry, &carryout); + (void)carryout; + #else + *sum = base + addend + carry; + #endif /* __has_builtin(__builtin_addcll) */ + +} + +#endif /* add64carry_last */ + +#ifndef mul_64x64_128 +static __maybe_unused __always_inline uint64_t mul_64x64_128(uint64_t a, + uint64_t b, + uint64_t *h) { + + #if (defined(__SIZEOF_INT128__) || \ + (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)) && \ + (!defined(__LCC__) || __LCC__ != 124) + __uint128_t r = (__uint128_t)a * (__uint128_t)b; + /* modern GCC could nicely optimize this */ + *h = (uint64_t)(r >> 64); + return (uint64_t)r; + #elif defined(mul_64x64_high) + *h = mul_64x64_high(a, b); + return a * b; + #else + /* performs 64x64 to 128 bit multiplication */ + const uint64_t ll = mul_32x32_64((uint32_t)a, (uint32_t)b); + const uint64_t lh = mul_32x32_64(a >> 32, (uint32_t)b); + const uint64_t hl = mul_32x32_64((uint32_t)a, b >> 32); + const uint64_t hh = mul_32x32_64(a >> 32, b >> 32); + + /* Few simplification are possible here for 32-bit architectures, + * but thus we would lost compatibility with the original 64-bit + * version. Think is very bad idea, because then 32-bit t1ha will + * still (relatively) very slowly and well yet not compatible. */ + uint64_t l; + add64carry_last(add64carry_first(ll, lh << 32, &l), hh, lh >> 32, h); + add64carry_last(add64carry_first(l, hl << 32, &l), *h, hl >> 32, h); + return l; + #endif + +} + +#endif /* mul_64x64_128() */ + +#ifndef mul_64x64_high +static __maybe_unused __always_inline uint64_t mul_64x64_high(uint64_t a, + uint64_t b) { + + uint64_t h; + mul_64x64_128(a, b, &h); + return h; + +} + +#endif /* mul_64x64_high */ + +/***************************************************************************/ + +/* 'magic' primes */ +static const uint64_t prime_0 = UINT64_C(0xEC99BF0D8372CAAB); +static const uint64_t prime_1 = UINT64_C(0x82434FE90EDCEF39); +static const uint64_t prime_2 = UINT64_C(0xD4F06DB99D67BE4B); +static const uint64_t prime_3 = UINT64_C(0xBD9CACC22C6E9571); +static const uint64_t prime_4 = UINT64_C(0x9C06FAF4D023E3AB); +static const uint64_t prime_5 = UINT64_C(0xC060724A8424F345); +static const uint64_t prime_6 = UINT64_C(0xCB5AF53AE3AAAC31); + +/* xor high and low parts of full 128-bit product */ +static __maybe_unused __always_inline uint64_t mux64(uint64_t v, + uint64_t prime) { + + uint64_t l, h; + l = mul_64x64_128(v, prime, &h); + return l ^ h; + +} + +static __maybe_unused __always_inline uint64_t final64(uint64_t a, uint64_t b) { + + uint64_t x = (a + rot64(b, 41)) * prime_0; + uint64_t y = (rot64(a, 23) + b) * prime_6; + return mux64(x ^ y, prime_5); + +} + +static __maybe_unused __always_inline void mixup64(uint64_t *__restrict a, + uint64_t *__restrict b, + uint64_t v, uint64_t prime) { + + uint64_t h; + *a ^= mul_64x64_128(*b + v, prime, &h); + *b += h; + +} + +/***************************************************************************/ + +typedef union t1ha_uint128 { + +#if defined(__SIZEOF_INT128__) || \ + (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) + __uint128_t v; +#endif + struct { + +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + uint64_t l, h; +#else + uint64_t h, l; +#endif + + }; + +} t1ha_uint128_t; + +static __maybe_unused __always_inline t1ha_uint128_t +not128(const t1ha_uint128_t v) { + + t1ha_uint128_t r; +#if defined(__SIZEOF_INT128__) || \ + (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) + r.v = ~v.v; +#else + r.l = ~v.l; + r.h = ~v.h; +#endif + return r; + +} + +static __maybe_unused __always_inline t1ha_uint128_t +left128(const t1ha_uint128_t v, unsigned s) { + + t1ha_uint128_t r; + assert(s < 128); +#if defined(__SIZEOF_INT128__) || \ + (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) + r.v = v.v << s; +#else + r.l = (s < 64) ? v.l << s : 0; + r.h = (s < 64) ? (v.h << s) | (s ? v.l >> (64 - s) : 0) : v.l << (s - 64); +#endif + return r; + +} + +static __maybe_unused __always_inline t1ha_uint128_t +right128(const t1ha_uint128_t v, unsigned s) { + + t1ha_uint128_t r; + assert(s < 128); +#if defined(__SIZEOF_INT128__) || \ + (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) + r.v = v.v >> s; +#else + r.l = (s < 64) ? (s ? v.h << (64 - s) : 0) | (v.l >> s) : v.h >> (s - 64); + r.h = (s < 64) ? v.h >> s : 0; +#endif + return r; + +} + +static __maybe_unused __always_inline t1ha_uint128_t or128(t1ha_uint128_t x, + t1ha_uint128_t y) { + + t1ha_uint128_t r; +#if defined(__SIZEOF_INT128__) || \ + (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) + r.v = x.v | y.v; +#else + r.l = x.l | y.l; + r.h = x.h | y.h; +#endif + return r; + +} + +static __maybe_unused __always_inline t1ha_uint128_t xor128(t1ha_uint128_t x, + t1ha_uint128_t y) { + + t1ha_uint128_t r; +#if defined(__SIZEOF_INT128__) || \ + (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) + r.v = x.v ^ y.v; +#else + r.l = x.l ^ y.l; + r.h = x.h ^ y.h; +#endif + return r; + +} + +static __maybe_unused __always_inline t1ha_uint128_t rot128(t1ha_uint128_t v, + unsigned s) { + + s &= 127; +#if defined(__SIZEOF_INT128__) || \ + (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) + v.v = (v.v << (128 - s)) | (v.v >> s); + return v; +#else + return s ? or128(left128(v, 128 - s), right128(v, s)) : v; +#endif + +} + +static __maybe_unused __always_inline t1ha_uint128_t add128(t1ha_uint128_t x, + t1ha_uint128_t y) { + + t1ha_uint128_t r; +#if defined(__SIZEOF_INT128__) || \ + (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) + r.v = x.v + y.v; +#else + add64carry_last(add64carry_first(x.l, y.l, &r.l), x.h, y.h, &r.h); +#endif + return r; + +} + +static __maybe_unused __always_inline t1ha_uint128_t mul128(t1ha_uint128_t x, + t1ha_uint128_t y) { + + t1ha_uint128_t r; +#if defined(__SIZEOF_INT128__) || \ + (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) + r.v = x.v * y.v; +#else + r.l = mul_64x64_128(x.l, y.l, &r.h); + r.h += x.l * y.h + y.l * x.h; +#endif + return r; + +} + +/***************************************************************************/ + +#if T1HA0_AESNI_AVAILABLE && defined(__ia32__) +uint64_t t1ha_ia32cpu_features(void); + +static __maybe_unused __always_inline bool t1ha_ia32_AESNI_avail( + uint64_t ia32cpu_features) { + + /* check for AES-NI */ + return (ia32cpu_features & UINT32_C(0x02000000)) != 0; + +} + +static __maybe_unused __always_inline bool t1ha_ia32_AVX_avail( + uint64_t ia32cpu_features) { + + /* check for any AVX */ + return (ia32cpu_features & UINT32_C(0x1A000000)) == UINT32_C(0x1A000000); + +} + +static __maybe_unused __always_inline bool t1ha_ia32_AVX2_avail( + uint64_t ia32cpu_features) { + + /* check for 'Advanced Vector Extensions 2' */ + return ((ia32cpu_features >> 32) & 32) != 0; + +} + +#endif /* T1HA0_AESNI_AVAILABLE && __ia32__ */ + diff --git a/include/t1ha_selfcheck.h b/include/t1ha_selfcheck.h new file mode 100644 index 00000000..65343bfe --- /dev/null +++ b/include/t1ha_selfcheck.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2016-2020 Positive Technologies, https://www.ptsecurity.com, + * Fast Positive Hash. + * + * Portions Copyright (c) 2010-2020 Leonid Yuriev <leo@yuriev.ru>, + * The 1Hippeus project (t1h). + * + * This software is provided 'as-is', without any express or implied + * warranty. In no event will the authors be held liable for any damages + * arising from the use of this software. + * + * Permission is granted to anyone to use this software for any purpose, + * including commercial applications, and to alter it and redistribute it + * freely, subject to the following restrictions: + * + * 1. The origin of this software must not be misrepresented; you must not + * claim that you wrote the original software. If you use this software + * in a product, an acknowledgement in the product documentation would be + * appreciated but is not required. + * 2. Altered source versions must be plainly marked as such, and must not be + * misrepresented as being the original software. + * 3. This notice may not be removed or altered from any source distribution. + */ + +/* + * t1ha = { Fast Positive Hash, aka "Позитивный Хэш" } + * by [Positive Technologies](https://www.ptsecurity.ru) + * + * Briefly, it is a 64-bit Hash Function: + * 1. Created for 64-bit little-endian platforms, in predominantly for x86_64, + * but portable and without penalties it can run on any 64-bit CPU. + * 2. In most cases up to 15% faster than City64, xxHash, mum-hash, metro-hash + * and all others portable hash-functions (which do not use specific + * hardware tricks). + * 3. Not suitable for cryptography. + * + * The Future will (be) Positive. Всё будет хорошо. + * + * ACKNOWLEDGEMENT: + * The t1ha was originally developed by Leonid Yuriev (Леонид Юрьев) + * for The 1Hippeus project - zerocopy messaging in the spirit of Sparta! + */ + +#pragma once +#if defined(_MSC_VER) && _MSC_VER > 1800 + #pragma warning(disable : 4464) /* relative include path contains '..' */ +#endif /* MSVC */ +#include "t1ha.h" + +/***************************************************************************/ +/* Self-checking */ + +extern const uint8_t t1ha_test_pattern[64]; +int t1ha_selfcheck(uint64_t (*hash)(const void *, size_t, uint64_t), + const uint64_t *reference_values); + +#ifndef T1HA2_DISABLED +extern const uint64_t t1ha_refval_2atonce[81]; +extern const uint64_t t1ha_refval_2atonce128[81]; +extern const uint64_t t1ha_refval_2stream[81]; +extern const uint64_t t1ha_refval_2stream128[81]; +#endif /* T1HA2_DISABLED */ + +#ifndef T1HA1_DISABLED +extern const uint64_t t1ha_refval_64le[81]; +extern const uint64_t t1ha_refval_64be[81]; +#endif /* T1HA1_DISABLED */ + +#ifndef T1HA0_DISABLED +extern const uint64_t t1ha_refval_32le[81]; +extern const uint64_t t1ha_refval_32be[81]; + #if T1HA0_AESNI_AVAILABLE +extern const uint64_t t1ha_refval_ia32aes_a[81]; +extern const uint64_t t1ha_refval_ia32aes_b[81]; + #endif /* T1HA0_AESNI_AVAILABLE */ +#endif /* T1HA0_DISABLED */ + diff --git a/include/types.h b/include/types.h index d6476d82..cfb2f3d5 100644 --- a/include/types.h +++ b/include/types.h @@ -5,12 +5,12 @@ Originally written by Michal Zalewski Now maintained by Marc Heuse <mh@mh-sec.de>, - Heiko Eißfeldt <heiko.eissfeldt@hexco.de>, + Heiko Eissfeldt <heiko.eissfeldt@hexco.de>, Andrea Fioraldi <andreafioraldi@gmail.com>, Dominik Maier <mail@dmnk.co> Copyright 2016, 2017 Google Inc. All rights reserved. - Copyright 2019-2023 AFLplusplus Project. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -49,6 +49,14 @@ typedef uint128_t u128; #define FS_ERROR_OLD_CMPLOG 32 #define FS_ERROR_OLD_CMPLOG_QEMU 64 +/* New Forkserver */ +#define FS_NEW_VERSION_MIN 1 +#define FS_NEW_VERSION_MAX 1 +#define FS_NEW_ERROR 0xeffe0000 +#define FS_NEW_OPT_MAPSIZE 0x00000001 // parameter: 32 bit value +#define FS_NEW_OPT_SHDMEM_FUZZ 0x00000002 // parameter: none +#define FS_NEW_OPT_AUTODICT 0x00000800 // autodictionary data + /* Reporting options */ #define FS_OPT_ENABLED 0x80000001 #define FS_OPT_MAPSIZE 0x40000000 diff --git a/include/xxhash.h b/include/xxhash.h index 7bc0a14e..991a8f1e 100644 --- a/include/xxhash.h +++ b/include/xxhash.h @@ -32,50 +32,226 @@ * - xxHash homepage: https://www.xxhash.com * - xxHash source repository: https://github.com/Cyan4973/xxHash */ + /*! * @mainpage xxHash * + * xxHash is an extremely fast non-cryptographic hash algorithm, working at RAM + * speed limits. + * + * It is proposed in four flavors, in three families: + * 1. @ref XXH32_family + * - Classic 32-bit hash function. Simple, compact, and runs on almost all + * 32-bit and 64-bit systems. + * 2. @ref XXH64_family + * - Classic 64-bit adaptation of XXH32. Just as simple, and runs well on most + * 64-bit systems (but _not_ 32-bit systems). + * 3. @ref XXH3_family + * - Modern 64-bit and 128-bit hash function family which features improved + * strength and performance across the board, especially on smaller data. + * It benefits greatly from SIMD and 64-bit without requiring it. + * + * Benchmarks + * --- + * The reference system uses an Intel i7-9700K CPU, and runs Ubuntu x64 20.04. + * The open source benchmark program is compiled with clang v10.0 using -O3 + * flag. + * + * | Hash Name | ISA ext | Width | Large Data Speed | Small Data + * Velocity | | -------------------- | ------- | ----: | ---------------: | + * ------------------: | | XXH3_64bits() | @b AVX2 | 64 | 59.4 + * GB/s | 133.1 | | MeowHash | AES-NI | 128 | 58.2 + * GB/s | 52.5 | | XXH3_128bits() | @b AVX2 | 128 | 57.9 + * GB/s | 118.1 | | CLHash | PCLMUL | 64 | 37.1 + * GB/s | 58.1 | | XXH3_64bits() | @b SSE2 | 64 | 31.5 + * GB/s | 133.1 | | XXH3_128bits() | @b SSE2 | 128 | 29.6 + * GB/s | 118.1 | | RAM sequential read | | N/A | 28.0 + * GB/s | N/A | | ahash | AES-NI | 64 | 22.5 + * GB/s | 107.2 | | City64 | | 64 | 22.0 + * GB/s | 76.6 | | T1ha2 | | 64 | 22.0 + * GB/s | 99.0 | | City128 | | 128 | 21.7 + * GB/s | 57.7 | | FarmHash | AES-NI | 64 | 21.3 + * GB/s | 71.9 | | XXH64() | | 64 | 19.4 + * GB/s | 71.0 | | SpookyHash | | 64 | 19.3 + * GB/s | 53.2 | | Mum | | 64 | 18.0 + * GB/s | 67.0 | | CRC32C | SSE4.2 | 32 | 13.0 + * GB/s | 57.9 | | XXH32() | | 32 | 9.7 + * GB/s | 71.9 | | City32 | | 32 | 9.1 + * GB/s | 66.0 | | Blake3* | @b AVX2 | 256 | 4.4 + * GB/s | 8.1 | | Murmur3 | | 32 | 3.9 + * GB/s | 56.1 | | SipHash* | | 64 | 3.0 + * GB/s | 43.2 | | Blake3* | @b SSE2 | 256 | 2.4 + * GB/s | 8.1 | | HighwayHash | | 64 | 1.4 + * GB/s | 6.0 | | FNV64 | | 64 | 1.2 + * GB/s | 62.7 | | Blake2* | | 256 | 1.1 + * GB/s | 5.1 | | SHA1* | | 160 | 0.8 + * GB/s | 5.6 | | MD5* | | 128 | 0.6 + * GB/s | 7.8 | + * @note + * - Hashes which require a specific ISA extension are noted. SSE2 is also + * noted, even though it is mandatory on x64. + * - Hashes with an asterisk are cryptographic. Note that MD5 is + * non-cryptographic by modern standards. + * - Small data velocity is a rough average of algorithm's efficiency for + * small data. For more accurate information, see the wiki. + * - More benchmarks and strength tests are found on the wiki: + * https://github.com/Cyan4973/xxHash/wiki + * + * Usage + * ------ + * All xxHash variants use a similar API. Changing the algorithm is a trivial + * substitution. + * + * @pre + * For functions which take an input and length parameter, the following + * requirements are assumed: + * - The range from [`input`, `input + length`) is valid, readable memory. + * - The only exception is if the `length` is `0`, `input` may be `NULL`. + * - For C++, the objects must have the *TriviallyCopyable* property, as the + * functions access bytes directly as if it was an array of `unsigned + * char`. + * + * @anchor single_shot_example + * **Single Shot** + * + * These functions are stateless functions which hash a contiguous block of + * memory, immediately returning the result. They are the easiest and usually + * the fastest option. + * + * XXH32(), XXH64(), XXH3_64bits(), XXH3_128bits() + * + * @code{.c} + * #include <string.h> + * #include "xxhash.h" + * + * // Example for a function which hashes a null terminated string with + * XXH32(). XXH32_hash_t hash_string(const char* string, XXH32_hash_t seed) + * { + + * // NULL pointers are only valid if the length is zero + * size_t length = (string == NULL) ? 0 : strlen(string); + * return XXH32(string, length, seed); + * } + * @endcode + * + * + * @anchor streaming_example + * **Streaming** + * + * These groups of functions allow incremental hashing of unknown size, even + * more than what would fit in a size_t. + * + * XXH32_reset(), XXH64_reset(), XXH3_64bits_reset(), XXH3_128bits_reset() + * + * @code{.c} + * #include <stdio.h> + * #include <assert.h> + * #include "xxhash.h" + * // Example for a function which hashes a FILE incrementally with + * XXH3_64bits(). XXH64_hash_t hashFile(FILE* f) + * { + + * // Allocate a state struct. Do not just use malloc() or new. + * XXH3_state_t* state = XXH3_createState(); + * assert(state != NULL && "Out of memory!"); + * // Reset the state to start a new hashing session. + * XXH3_64bits_reset(state); + * char buffer[4096]; + * size_t count; + * // Read the file in chunks + * while ((count = fread(buffer, 1, sizeof(buffer), f)) != 0) { + + * // Run update() as many times as necessary to process the data + * XXH3_64bits_update(state, buffer, count); + * } + * // Retrieve the finalized hash. This will not change the state. + * XXH64_hash_t result = XXH3_64bits_digest(state); + * // Free the state. Do not use free(). + * XXH3_freeState(state); + * return result; + * } + * @endcode + * + * Streaming functions generate the xxHash value from an incremental input. + * This method is slower than single-call functions, due to state management. + * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. + * + * An XXH state must first be allocated using `XXH*_createState()`. + * + * Start a new hash by initializing the state with a seed using `XXH*_reset()`. + * + * Then, feed the hash state by calling `XXH*_update()` as many times as + * necessary. + * + * The function returns an error code, with 0 meaning OK, and any other value + * meaning there is an error. + * + * Finally, a hash value can be produced anytime, by using `XXH*_digest()`. + * This function returns the nn-bits hash as an int or long long. + * + * It's still possible to continue inserting input into the hash state after a + * digest, and generate new hash values later on by invoking `XXH*_digest()`. + * + * When done, release the state using `XXH*_freeState()`. + * + * + * @anchor canonical_representation_example + * **Canonical Representation** + * + * The default return values from XXH functions are unsigned 32, 64 and 128 bit + * integers. + * This the simplest and fastest format for further post-processing. + * + * However, this leaves open the question of what is the order on the byte + * level, since little and big endian conventions will store the same number + * differently. + * + * The canonical representation settles this issue by mandating big-endian + * convention, the same convention as human-readable numbers (large digits + * first). + * + * When writing hash values to storage, sending them over a network, or printing + * them, it's highly recommended to use the canonical representation to ensure + * portability across a wider range of systems, present and future. + * + * The following functions allow transformation of hash values to and from + * canonical format. + * + * XXH32_canonicalFromHash(), XXH32_hashFromCanonical(), + * XXH64_canonicalFromHash(), XXH64_hashFromCanonical(), + * XXH128_canonicalFromHash(), XXH128_hashFromCanonical(), + * + * @code{.c} + * #include <stdio.h> + * #include "xxhash.h" + * + * // Example for a function which prints XXH32_hash_t in human readable + * format void printXxh32(XXH32_hash_t hash) + * { + + * XXH32_canonical_t cano; + * XXH32_canonicalFromHash(&cano, hash); + * size_t i; + * for(i = 0; i < sizeof(cano.digest); ++i) { + + * printf("%02x", cano.digest[i]); + * } + * printf("\n"); + * } + * + * // Example for a function which converts XXH32_canonical_t to XXH32_hash_t + * XXH32_hash_t convertCanonicalToXxh32(XXH32_canonical_t cano) + * { + + * XXH32_hash_t hash = XXH32_hashFromCanonical(&cano); + * return hash; + * } + * @endcode + * + * * @file xxhash.h * xxHash prototypes and implementation */ -/* TODO: update */ -/* Notice extracted from xxHash homepage: - -xxHash is an extremely fast hash algorithm, running at RAM speed limits. -It also successfully passes all tests from the SMHasher suite. - -Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo -@3GHz) - -Name Speed Q.Score Author -xxHash 5.4 GB/s 10 -CrapWow 3.2 GB/s 2 Andrew -MurmurHash 3a 2.7 GB/s 10 Austin Appleby -SpookyHash 2.0 GB/s 10 Bob Jenkins -SBox 1.4 GB/s 9 Bret Mulvey -Lookup3 1.2 GB/s 9 Bob Jenkins -SuperFastHash 1.2 GB/s 1 Paul Hsieh -CityHash64 1.05 GB/s 10 Pike & Alakuijala -FNV 0.55 GB/s 5 Fowler, Noll, Vo -CRC32 0.43 GB/s 9 -MD5-32 0.33 GB/s 10 Ronald L. Rivest -SHA1-32 0.28 GB/s 10 - -Q.Score is a measure of quality of the hash function. -It depends on successfully passing SMHasher test set. -10 is a perfect score. - -Note: SMHasher's CRC32 implementation is not the fastest one. -Other speed-oriented implementations can be faster, -especially in combination with PCLMUL instruction: -https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735 - -A 64-bit version, named XXH64, is available since r35. -It offers much better speed, but for 64-bit applications only. -Name Speed on 64 bits Speed on 32 bits -XXH64 13.8 GB/s 1.9 GB/s -XXH32 6.8 GB/s 6.0 GB/s -*/ #if defined(__cplusplus) extern "C" { @@ -86,21 +262,82 @@ extern "C" { * INLINE mode ******************************/ /*! - * XXH_INLINE_ALL (and XXH_PRIVATE_API) - * Use these build macros to inline xxhash into the target unit. - * Inlining improves performance on small inputs, especially when the length is - * expressed as a compile-time constant: - * - * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html - * - * It also keeps xxHash symbols private to the unit, so they are not exported. - * - * Usage: - * #define XXH_INLINE_ALL - * #include "xxhash.h" - * - * Do not compile and link xxhash.o as a separate object, as it is not useful. + * @defgroup public Public API + * Contains details on the public xxHash functions. + * @{ + */ +#ifdef XXH_DOXYGEN + /*! + * @brief Gives access to internal state declaration, required for static + * allocation. + * + * Incompatible with dynamic linking, due to risks of ABI changes. + * + * Usage: + * @code{.c} + * #define XXH_STATIC_LINKING_ONLY + * #include "xxhash.h" + * @endcode + */ + #define XXH_STATIC_LINKING_ONLY + /* Do not undef XXH_STATIC_LINKING_ONLY for Doxygen */ + + /*! + * @brief Gives access to internal definitions. + * + * Usage: + * @code{.c} + * #define XXH_STATIC_LINKING_ONLY + * #define XXH_IMPLEMENTATION + * #include "xxhash.h" + * @endcode + */ + #define XXH_IMPLEMENTATION + /* Do not undef XXH_IMPLEMENTATION for Doxygen */ + + /*! + * @brief Exposes the implementation and marks all functions as `inline`. + * + * Use these build macros to inline xxhash into the target unit. + * Inlining improves performance on small inputs, especially when the length + * is expressed as a compile-time constant: + * + * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html + * + * It also keeps xxHash symbols private to the unit, so they are not exported. + * + * Usage: + * @code{.c} + * #define XXH_INLINE_ALL + * #include "xxhash.h" + * @endcode + * Do not compile and link xxhash.o as a separate object, as it is not useful. + */ + #define XXH_INLINE_ALL + #undef XXH_INLINE_ALL + /*! + * @brief Exposes the implementation without marking functions as inline. + */ + #define XXH_PRIVATE_API + #undef XXH_PRIVATE_API + /*! + * @brief Emulate a namespace by transparently prefixing all symbols. + * + * If you want to include _and expose_ xxHash functions from within your own + * library, but also want to avoid symbol collisions with other libraries + * which may also include xxHash, you can use @ref XXH_NAMESPACE to + * automatically prefix any public symbol from xxhash library with the value + * of @ref XXH_NAMESPACE (therefore, avoid empty or numeric values). + * + * Note that no change is required within the calling program as long as it + * includes `xxhash.h`: Regular symbol names will be automatically translated + * by this header. + */ + #define XXH_NAMESPACE /* YOUR NAME HERE */ + #undef XXH_NAMESPACE +#endif + #if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) && \ !defined(XXH_INLINE_ALL_31684351384) /* this section should be traversed only once */ @@ -160,6 +397,7 @@ extern "C" { #undef XXH3_64bits #undef XXH3_64bits_withSecret #undef XXH3_64bits_withSeed + #undef XXH3_64bits_withSecretandSeed #undef XXH3_createState #undef XXH3_freeState #undef XXH3_copyState @@ -177,6 +415,7 @@ extern "C" { #undef XXH3_128bits_reset #undef XXH3_128bits_reset_withSeed #undef XXH3_128bits_reset_withSecret + #undef XXH3_128bits_reset_withSecretandSeed #undef XXH3_128bits_update #undef XXH3_128bits_digest #undef XXH128_isEqual @@ -220,13 +459,7 @@ extern "C" { #ifndef XXHASH_H_5627135585666179 #define XXHASH_H_5627135585666179 1 - /*! - * @defgroup public Public API - * Contains details on the public xxHash functions. - * @{ - - */ - /* specific declaration modes for Windows */ + /*! @brief Marks a global symbol. */ #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) #if defined(WIN32) && defined(_MSC_VER) && \ (defined(XXH_IMPORT) || defined(XXH_EXPORT)) @@ -240,24 +473,6 @@ extern "C" { #endif #endif - #ifdef XXH_DOXYGEN - /*! - * @brief Emulate a namespace by transparently prefixing all symbols. - * - * If you want to include _and expose_ xxHash functions from within your own - * library, but also want to avoid symbol collisions with other libraries - * which may also include xxHash, you can use XXH_NAMESPACE to automatically - * prefix any public symbol from xxhash library with the value of - * XXH_NAMESPACE (therefore, avoid empty or numeric values). - * - * Note that no change is required within the calling program as long as it - * includes `xxhash.h`: Regular symbol names will be automatically - * translated by this header. - */ - #define XXH_NAMESPACE /* YOUR NAME HERE */ - #undef XXH_NAMESPACE - #endif - #ifdef XXH_NAMESPACE #define XXH_CAT(A, B) A##B #define XXH_NAME2(A, B) XXH_CAT(A, B) @@ -291,6 +506,8 @@ extern "C" { #define XXH3_64bits_withSecret \ XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret) #define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed) + #define XXH3_64bits_withSecretandSeed \ + XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed) #define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState) #define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState) #define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState) @@ -299,9 +516,13 @@ extern "C" { XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed) #define XXH3_64bits_reset_withSecret \ XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret) + #define XXH3_64bits_reset_withSecretandSeed \ + XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed) #define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update) #define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest) #define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret) + #define XXH3_generateSecret_fromSeed \ + XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed) /* XXH3_128bits */ #define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128) #define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits) @@ -309,11 +530,15 @@ extern "C" { XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed) #define XXH3_128bits_withSecret \ XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret) + #define XXH3_128bits_withSecretandSeed \ + XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed) #define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset) #define XXH3_128bits_reset_withSeed \ XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed) #define XXH3_128bits_reset_withSecret \ XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret) + #define XXH3_128bits_reset_withSecretandSeed \ + XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed) #define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update) #define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest) #define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual) @@ -325,11 +550,40 @@ extern "C" { #endif /* ************************************* + * Compiler specifics + ***************************************/ + + /* specific declaration modes for Windows */ + #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) + #if defined(WIN32) && defined(_MSC_VER) && \ + (defined(XXH_IMPORT) || defined(XXH_EXPORT)) + #ifdef XXH_EXPORT + #define XXH_PUBLIC_API __declspec(dllexport) + #elif XXH_IMPORT + #define XXH_PUBLIC_API __declspec(dllimport) + #endif + #else + #define XXH_PUBLIC_API /* do nothing */ + #endif + #endif + + #if defined(__GNUC__) + #define XXH_CONSTF __attribute__((const)) + #define XXH_PUREF __attribute__((pure)) + #define XXH_MALLOCF __attribute__((malloc)) + #else + #define XXH_CONSTF /* disable */ + #define XXH_PUREF + #define XXH_MALLOCF + #endif + + /* ************************************* * Version ***************************************/ #define XXH_VERSION_MAJOR 0 #define XXH_VERSION_MINOR 8 - #define XXH_VERSION_RELEASE 1 + #define XXH_VERSION_RELEASE 2 + /*! @brief Version number, encoded as two digits each */ #define XXH_VERSION_NUMBER \ (XXH_VERSION_MAJOR * 100 * 100 + XXH_VERSION_MINOR * 100 + \ XXH_VERSION_RELEASE) @@ -337,18 +591,26 @@ extern "C" { /*! * @brief Obtains the xxHash version. * - * This is only useful when xxHash is compiled as a shared library, as it is - * independent of the version defined in the header. + * This is mostly useful when xxHash is compiled as a shared library, + * since the returned value comes from the library, as opposed to header file. * - * @return `XXH_VERSION_NUMBER` as of when the libray was compiled. + * @return @ref XXH_VERSION_NUMBER of the invoked library. */ -XXH_PUBLIC_API unsigned XXH_versionNumber(void); +XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber(void); /* **************************** - * Definitions + * Common basic types ******************************/ #include <stddef.h> /* size_t */ -typedef enum { XXH_OK = 0, XXH_ERROR } XXH_errorcode; +/*! + * @brief Exit code for the streaming API. + */ +typedef enum { + + XXH_OK = 0, /*!< OK */ + XXH_ERROR /*!< Error */ + +} XXH_errorcode; /*-********************************************************************** * 32-bit hash @@ -365,36 +627,33 @@ typedef uint32_t XXH32_hash_t; (defined(__cplusplus) || \ (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)) #include <stdint.h> -typedef uint32_t XXH32_hash_t; +typedef uint32_t XXH32_hash_t; #else #include <limits.h> #if UINT_MAX == 0xFFFFFFFFUL typedef unsigned int XXH32_hash_t; - #else - #if ULONG_MAX == 0xFFFFFFFFUL + #elif ULONG_MAX == 0xFFFFFFFFUL typedef unsigned long XXH32_hash_t; - #else - #error "unsupported platform: need a 32-bit type" - #endif + #else + #error "unsupported platform: need a 32-bit type" #endif #endif /*! * @} * - * @defgroup xxh32_family XXH32 family + * @defgroup XXH32_family XXH32 family * @ingroup public * Contains functions used in the classic 32-bit xxHash algorithm. * * @note - * XXH32 is considered rather weak by today's standards. - * The @ref xxh3_family provides competitive speed for both 32-bit and 64-bit - * systems, and offers true 64/128 bit hash results. It provides a superior - * level of dispersion, and greatly reduces the risks of collisions. + * XXH32 is useful for older platforms, with no or poor 64-bit performance. + * Note that the @ref XXH3_family provides competitive speed for both 32-bit + * and 64-bit systems, and offers true 64/128 bit hash results. * - * @see @ref xxh64_family, @ref xxh3_family : Other xxHash families - * @see @ref xxh32_impl for implementation details + * @see @ref XXH64_family, @ref XXH3_family : Other xxHash families + * @see @ref XXH32_impl for implementation details * @{ */ @@ -402,8 +661,6 @@ typedef unsigned long XXH32_hash_t; /*! * @brief Calculates the 32-bit hash of @p input using xxHash32. * - * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s - * * @param input The block of data to be hashed, at least @p length bytes in * size. * @param length The length of @p input, in bytes. @@ -414,92 +671,46 @@ typedef unsigned long XXH32_hash_t; * readable, contiguous memory. However, if @p length is `0`, @p input may be * `NULL`. In C++, this also must be *TriviallyCopyable*. * - * @return The calculated 32-bit hash value. - * - * @see - * XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): - * Direct equivalents for the other variants of xxHash. - * @see - * XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version. - */ -XXH_PUBLIC_API XXH32_hash_t XXH32(const void *input, size_t length, - XXH32_hash_t seed); - -/*! - * Streaming functions generate the xxHash value from an incremental input. - * This method is slower than single-call functions, due to state management. - * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. - * - * An XXH state must first be allocated using `XXH*_createState()`. - * - * Start a new hash by initializing the state with a seed using `XXH*_reset()`. - * - * Then, feed the hash state by calling `XXH*_update()` as many times as - * necessary. - * - * The function returns an error code, with 0 meaning OK, and any other value - * meaning there is an error. - * - * Finally, a hash value can be produced anytime, by using `XXH*_digest()`. - * This function returns the nn-bits hash as an int or long long. - * - * It's still possible to continue inserting input into the hash state after a - * digest, and generate new hash values later on by invoking `XXH*_digest()`. - * - * When done, release the state using `XXH*_freeState()`. - * - * Example code for incrementally hashing a file: - * @code{.c} - * #include <stdio.h> - * #include <xxhash.h> - * #define BUFFER_SIZE 256 - * - * // Note: XXH64 and XXH3 use the same interface. - * XXH32_hash_t - * hashFile(FILE* stream) - * { - - * XXH32_state_t* state; - * unsigned char buf[BUFFER_SIZE]; - * size_t amt; - * XXH32_hash_t hash; + * @return The calculated 32-bit xxHash32 value. * - * state = XXH32_createState(); // Create a state - * assert(state != NULL); // Error check here - * XXH32_reset(state, 0xbaad5eed); // Reset state with our seed - * while ((amt = fread(buf, 1, sizeof(buf), stream)) != 0) { - - * XXH32_update(state, buf, amt); // Hash the file in chunks - * } - * hash = XXH32_digest(state); // Finalize the hash - * XXH32_freeState(state); // Clean up - * return hash; - * } - * @endcode + * @see @ref single_shot_example "Single Shot Example" for an example. */ +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32(const void *input, size_t length, + XXH32_hash_t seed); + #ifndef XXH_NO_STREAM /*! * @typedef struct XXH32_state_s XXH32_state_t * @brief The opaque state struct for the XXH32 streaming API. * * @see XXH32_state_s for details. + * @see @ref streaming_example "Streaming Example" */ typedef struct XXH32_state_s XXH32_state_t; /*! * @brief Allocates an @ref XXH32_state_t. * - * Must be freed with XXH32_freeState(). - * @return An allocated XXH32_state_t on success, `NULL` on failure. + * @return An allocated pointer of @ref XXH32_state_t on success. + * @return `NULL` on failure. + * + * @note Must be freed with XXH32_freeState(). + * + * @see @ref streaming_example "Streaming Example" */ -XXH_PUBLIC_API XXH32_state_t *XXH32_createState(void); +XXH_PUBLIC_API XXH_MALLOCF XXH32_state_t *XXH32_createState(void); /*! * @brief Frees an @ref XXH32_state_t. * - * Must be allocated with XXH32_createState(). * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref * XXH32_createState(). - * @return XXH_OK. + * + * @return @ref XXH_OK. + * + * @note @p statePtr must be allocated with XXH32_createState(). + * + * @see @ref streaming_example "Streaming Example" + * */ XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t *statePtr); /*! @@ -516,15 +727,19 @@ XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t *dst_state, /*! * @brief Resets an @ref XXH32_state_t to begin a new hash. * - * This function resets and seeds a state. Call it before @ref XXH32_update(). - * * @param statePtr The state struct to reset. * @param seed The 32-bit seed to alter the hash result predictably. * * @pre * @p statePtr must not be `NULL`. * - * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note This function resets and seeds a state. Call it before @ref + * XXH32_update(). + * + * @see @ref streaming_example "Streaming Example" */ XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t *statePtr, XXH32_hash_t seed); @@ -532,8 +747,6 @@ XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t *statePtr, /*! * @brief Consumes a block of @p input to an @ref XXH32_state_t. * - * Call this to incrementally consume blocks of data. - * * @param statePtr The state struct to update. * @param input The block of data to be hashed, at least @p length bytes in * size. @@ -546,7 +759,12 @@ XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t *statePtr, * readable, contiguous memory. However, if @p length is `0`, @p input may be * `NULL`. In C++, this also must be *TriviallyCopyable*. * - * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note Call this to incrementally consume blocks of data. + * + * @see @ref streaming_example "Streaming Example" */ XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t *statePtr, const void *input, size_t length); @@ -554,41 +772,24 @@ XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t *statePtr, /*! * @brief Returns the calculated hash value from an @ref XXH32_state_t. * - * @note - * Calling XXH32_digest() will not affect @p statePtr, so you can update, - * digest, and update again. - * * @param statePtr The state struct to calculate the hash from. * * @pre * @p statePtr must not be `NULL`. * - * @return The calculated xxHash32 value from that state. - */ -XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t *statePtr); - -/******* Canonical representation *******/ - -/* - * The default return values from XXH functions are unsigned 32 and 64 bit - * integers. - * This the simplest and fastest format for further post-processing. + * @return The calculated 32-bit xxHash32 value from that state. * - * However, this leaves open the question of what is the order on the byte - * level, since little and big endian conventions will store the same number - * differently. - * - * The canonical representation settles this issue by mandating big-endian - * convention, the same convention as human-readable numbers (large digits - * first). - * - * When writing hash values to storage, sending them over a network, or printing - * them, it's highly recommended to use the canonical representation to ensure - * portability across a wider range of systems, present and future. + * @note + * Calling XXH32_digest() will not affect @p statePtr, so you can update, + * digest, and update again. * - * The following functions allow transformation of hash values to and from - * canonical format. + * @see @ref streaming_example "Streaming Example" */ +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t +XXH32_digest(const XXH32_state_t *statePtr); + #endif /* !XXH_NO_STREAM */ + +/******* Canonical representation *******/ /*! * @brief Canonical (big endian) representation of @ref XXH32_hash_t. @@ -602,11 +803,13 @@ typedef struct { /*! * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t. * - * @param dst The @ref XXH32_canonical_t pointer to be stored to. + * @param dst The @ref XXH32_canonical_t pointer to be stored to. * @param hash The @ref XXH32_hash_t to be converted. * * @pre * @p dst must not be `NULL`. + * + * @see @ref canonical_representation_example "Canonical Representation Example" */ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t *dst, XXH32_hash_t hash); @@ -620,45 +823,75 @@ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t *dst, * @p src must not be `NULL`. * * @return The converted hash. + * + * @see @ref canonical_representation_example "Canonical Representation Example" */ -XXH_PUBLIC_API XXH32_hash_t +XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t *src); + /*! @cond Doxygen ignores this part */ #ifdef __has_attribute #define XXH_HAS_ATTRIBUTE(x) __has_attribute(x) #else #define XXH_HAS_ATTRIBUTE(x) 0 #endif + /*! @endcond */ + + /*! @cond Doxygen ignores this part */ + /* + * C23 __STDC_VERSION__ number hasn't been specified yet. For now + * leave as `201711L` (C17 + 1). + * TODO: Update to correct value when its been specified. + */ + #define XXH_C23_VN 201711L + /*! @endcond */ + /*! @cond Doxygen ignores this part */ /* C-language Attributes are added in C23. */ - #if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && \ + #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) && \ defined(__has_c_attribute) #define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x) #else #define XXH_HAS_C_ATTRIBUTE(x) 0 #endif + /*! @endcond */ + /*! @cond Doxygen ignores this part */ #if defined(__cplusplus) && defined(__has_cpp_attribute) #define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) #else #define XXH_HAS_CPP_ATTRIBUTE(x) 0 #endif + /*! @endcond */ + /*! @cond Doxygen ignores this part */ /* - Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' - attribute introduced in CPP17 and C23. CPP17 : - https://en.cppreference.com/w/cpp/language/attributes/fallthrough C23 : - https://en.cppreference.com/w/c/language/attributes/fallthrough - */ - #if XXH_HAS_C_ATTRIBUTE(x) - #define XXH_FALLTHROUGH [[fallthrough]] - #elif XXH_HAS_CPP_ATTRIBUTE(x) + * Define XXH_FALLTHROUGH macro for annotating switch case with the + * 'fallthrough' attribute introduced in CPP17 and C23. CPP17 : + * https://en.cppreference.com/w/cpp/language/attributes/fallthrough C23 : + * https://en.cppreference.com/w/c/language/attributes/fallthrough + */ + #if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough) #define XXH_FALLTHROUGH [[fallthrough]] #elif XXH_HAS_ATTRIBUTE(__fallthrough__) - #define XXH_FALLTHROUGH __attribute__((fallthrough)) + #define XXH_FALLTHROUGH __attribute__((__fallthrough__)) #else - #define XXH_FALLTHROUGH + #define XXH_FALLTHROUGH /* fallthrough */ #endif + /*! @endcond */ + + /*! @cond Doxygen ignores this part */ + /* + * Define XXH_NOESCAPE for annotated pointers in public API. + * https://clang.llvm.org/docs/AttributeReference.html#noescape + * As of writing this, only supported by clang. + */ + #if XXH_HAS_ATTRIBUTE(noescape) + #define XXH_NOESCAPE __attribute__((noescape)) + #else + #define XXH_NOESCAPE + #endif +/*! @endcond */ /*! * @} @@ -697,7 +930,7 @@ typedef unsigned long long XXH64_hash_t; /*! * @} * - * @defgroup xxh64_family XXH64 family + * @defgroup XXH64_family XXH64 family * @ingroup public * @{ @@ -705,16 +938,13 @@ typedef unsigned long long XXH64_hash_t; * * @note * XXH3 provides competitive speed for both 32-bit and 64-bit systems, - * and offers true 64/128 bit hash results. It provides a superior level of - * dispersion, and greatly reduces the risks of collisions. + * and offers true 64/128 bit hash results. + * It provides better speed for systems with vector processing capabilities. */ /*! * @brief Calculates the 64-bit hash of @p input using xxHash64. * - * This function usually runs faster on 64-bit systems, but slower on 32-bit - * systems (see benchmark). - * * @param input The block of data to be hashed, at least @p length bytes in * size. * @param length The length of @p input, in bytes. @@ -725,51 +955,171 @@ typedef unsigned long long XXH64_hash_t; * readable, contiguous memory. However, if @p length is `0`, @p input may be * `NULL`. In C++, this also must be *TriviallyCopyable*. * - * @return The calculated 64-bit hash. + * @return The calculated 64-bit xxHash64 value. * - * @see - * XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): - * Direct equivalents for the other variants of xxHash. - * @see - * XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version. + * @see @ref single_shot_example "Single Shot Example" for an example. */ -XXH_PUBLIC_API XXH64_hash_t XXH64(const void *input, size_t length, - XXH64_hash_t seed); +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(XXH_NOESCAPE const void *input, + size_t length, XXH64_hash_t seed); -/******* Streaming *******/ + /******* Streaming *******/ + #ifndef XXH_NO_STREAM /*! * @brief The opaque state struct for the XXH64 streaming API. * * @see XXH64_state_s for details. + * @see @ref streaming_example "Streaming Example" */ -typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ -XXH_PUBLIC_API XXH64_state_t *XXH64_createState(void); -XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t *statePtr); -XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t *dst_state, - const XXH64_state_t *src_state); +typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ -XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t *statePtr, - XXH64_hash_t seed); -XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH64_state_t *statePtr, - const void *input, size_t length); -XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t *statePtr); +/*! + * @brief Allocates an @ref XXH64_state_t. + * + * @return An allocated pointer of @ref XXH64_state_t on success. + * @return `NULL` on failure. + * + * @note Must be freed with XXH64_freeState(). + * + * @see @ref streaming_example "Streaming Example" + */ +XXH_PUBLIC_API XXH_MALLOCF XXH64_state_t *XXH64_createState(void); + +/*! + * @brief Frees an @ref XXH64_state_t. + * + * @param statePtr A pointer to an @ref XXH64_state_t allocated with @ref + * XXH64_createState(). + * + * @return @ref XXH_OK. + * + * @note @p statePtr must be allocated with XXH64_createState(). + * + * @see @ref streaming_example "Streaming Example" + */ +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t *statePtr); + +/*! + * @brief Copies one @ref XXH64_state_t to another. + * + * @param dst_state The state to copy to. + * @param src_state The state to copy from. + * @pre + * @p dst_state and @p src_state must not be `NULL` and must not overlap. + */ +XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t *dst_state, + const XXH64_state_t *src_state); +/*! + * @brief Resets an @ref XXH64_state_t to begin a new hash. + * + * @param statePtr The state struct to reset. + * @param seed The 64-bit seed to alter the hash result predictably. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note This function resets and seeds a state. Call it before @ref + * XXH64_update(). + * + * @see @ref streaming_example "Streaming Example" + */ +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH_NOESCAPE XXH64_state_t *statePtr, + XXH64_hash_t seed); + +/*! + * @brief Consumes a block of @p input to an @ref XXH64_state_t. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in + * size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note Call this to incrementally consume blocks of data. + * + * @see @ref streaming_example "Streaming Example" + */ +XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH_NOESCAPE XXH64_state_t *statePtr, + XXH_NOESCAPE const void *input, + size_t length); + +/*! + * @brief Returns the calculated hash value from an @ref XXH64_state_t. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated 64-bit xxHash64 value from that state. + * + * @note + * Calling XXH64_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + * + * @see @ref streaming_example "Streaming Example" + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t +XXH64_digest(XXH_NOESCAPE const XXH64_state_t *statePtr); + #endif /* !XXH_NO_STREAM */ /******* Canonical representation *******/ + +/*! + * @brief Canonical (big endian) representation of @ref XXH64_hash_t. + */ typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t; -XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t *dst, - XXH64_hash_t hash); -XXH_PUBLIC_API XXH64_hash_t -XXH64_hashFromCanonical(const XXH64_canonical_t *src); +/*! + * @brief Converts an @ref XXH64_hash_t to a big endian @ref XXH64_canonical_t. + * + * @param dst The @ref XXH64_canonical_t pointer to be stored to. + * @param hash The @ref XXH64_hash_t to be converted. + * + * @pre + * @p dst must not be `NULL`. + * + * @see @ref canonical_representation_example "Canonical Representation Example" + */ +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t *dst, + XXH64_hash_t hash); + +/*! + * @brief Converts an @ref XXH64_canonical_t to a native @ref XXH64_hash_t. + * + * @param src The @ref XXH64_canonical_t to convert. + * + * @pre + * @p src must not be `NULL`. + * + * @return The converted hash. + * + * @see @ref canonical_representation_example "Canonical Representation Example" + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t +XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t *src); + + #ifndef XXH_NO_XXH3 /*! * @} * ************************************************************************ - * @defgroup xxh3_family XXH3 family + * @defgroup XXH3_family XXH3 family * @ingroup public * @{ @@ -790,16 +1140,27 @@ XXH64_hashFromCanonical(const XXH64_canonical_t *src); * * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic, * but does not require it. - * Any 32-bit and 64-bit targets that can run XXH32 smoothly - * can run XXH3 at competitive speeds, even without vector support. - * Further details are explained in the implementation. - * - * Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8, - * ZVector and scalar targets. This can be controlled via the XXH_VECTOR macro. + * Most 32-bit and 64-bit targets that can run XXH32 smoothly can run XXH3 + * at competitive speeds, even without vector support. Further details are + * explained in the implementation. + * + * XXH3 has a fast scalar implementation, but it also includes accelerated SIMD + * implementations for many common platforms: + * - AVX512 + * - AVX2 + * - SSE2 + * - ARM NEON + * - WebAssembly SIMD128 + * - POWER8 VSX + * - s390x ZVector + * This can be controlled via the @ref XXH_VECTOR macro, but it automatically + * selects the best version according to predefined macros. For the x86 family, + * an automatic runtime dispatcher is included separately in @ref + * xxh_x86dispatch.c. * * XXH3 implementation is portable: * it has a generic C90 formulation that can be compiled on any platform, - * all implementations generage exactly the same hash value on all platforms. + * all implementations generate exactly the same hash value on all platforms. * Starting from v0.8.0, it's also labelled "stable", meaning that * any future version will also generate the same hash value. * @@ -811,53 +1172,112 @@ XXH64_hashFromCanonical(const XXH64_canonical_t *src); * * The API supports one-shot hashing, streaming mode, and custom secrets. */ - /*-********************************************************************** * XXH3 64-bit variant ************************************************************************/ -/* XXH3_64bits(): - * default 64-bit variant, using default secret and default seed of 0. - * It's the fastest variant. */ -XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void *data, size_t len); +/*! + * @brief Calculates 64-bit unseeded variant of XXH3 hash of @p input. + * + * @param input The block of data to be hashed, at least @p length bytes in + * size. + * @param length The length of @p input, in bytes. + * + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 64-bit XXH3 hash value. + * + * @note + * This is equivalent to @ref XXH3_64bits_withSeed() with a seed of `0`, + * however it may have slightly better performance due to constant propagation + * of the defaults. + * + * @see + * XXH3_64bits_withSeed(), XXH3_64bits_withSecret(): other seeding variants + * @see @ref single_shot_example "Single Shot Example" for an example. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t +XXH3_64bits(XXH_NOESCAPE const void *input, size_t length); -/* - * XXH3_64bits_withSeed(): - * This variant generates a custom secret on the fly - * based on default secret altered using the `seed` value. +/*! + * @brief Calculates 64-bit seeded variant of XXH3 hash of @p input. + * + * @param input The block of data to be hashed, at least @p length bytes in + * size. + * @param length The length of @p input, in bytes. + * @param seed The 64-bit seed to alter the hash result predictably. + * + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 64-bit XXH3 hash value. + * + * @note + * seed == 0 produces the same results as @ref XXH3_64bits(). + * + * This variant generates a custom secret on the fly based on default secret + * altered using the @p seed value. + * * While this operation is decently fast, note that it's not completely free. - * Note: seed==0 produces the same results as XXH3_64bits(). + * + * @see @ref single_shot_example "Single Shot Example" for an example. */ -XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void *data, size_t len, - XXH64_hash_t seed); +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed( + XXH_NOESCAPE const void *input, size_t length, XXH64_hash_t seed); - /*! - * The bare minimum size for a custom secret. - * - * @see - * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(), - * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret(). - */ - #define XXH3_SECRET_SIZE_MIN 136 + /*! + * The bare minimum size for a custom secret. + * + * @see + * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(), + * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret(). + */ + #define XXH3_SECRET_SIZE_MIN 136 -/* - * XXH3_64bits_withSecret(): +/*! + * @brief Calculates 64-bit variant of XXH3 with a custom "secret". + * + * @param data The block of data to be hashed, at least @p len bytes in + * size. + * @param len The length of @p data, in bytes. + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * + * @return The calculated 64-bit XXH3 hash value. + * + * @pre + * The memory between @p data and @p data + @p len must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p data may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * * It's possible to provide any blob of bytes as a "secret" to generate the * hash. This makes it more difficult for an external actor to prepare an - * intentional collision. The main condition is that secretSize *must* be large - * enough (>= XXH3_SECRET_SIZE_MIN). However, the quality of produced hash - * values depends on secret's entropy. Technically, the secret must look like a - * bunch of random bytes. Avoid "trivial" or structured data such as repeated - * sequences or a text document. Whenever unsure about the "randomness" of the - * blob of bytes, consider relabelling it as a "custom seed" instead, and employ - * "XXH3_generateSecret()" (see below) to generate a high entropy secret derived - * from the custom seed. - */ -XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void *data, size_t len, - const void *secret, - size_t secretSize); - -/******* Streaming *******/ + * intentional collision. The main condition is that @p secretSize *must* be + * large enough (>= @ref XXH3_SECRET_SIZE_MIN). However, the quality of the + * secret impacts the dispersion of the hash algorithm. Therefore, the secret + * _must_ look like a bunch of random bytes. Avoid "trivial" or structured data + * such as repeated sequences or a text document. Whenever in doubt about the + * "randomness" of the blob of bytes, consider employing @ref + * XXH3_generateSecret() instead (see below). It will generate a proper high + * entropy secret derived from the blob of bytes. Another advantage of using + * XXH3_generateSecret() is that it guarantees that all bits within the initial + * blob of bytes will impact every bit of the output. This is not necessarily + * the case when using the blob of bytes directly because, when hashing _small_ + * inputs, only a portion of the secret is employed. + * + * @see @ref single_shot_example "Single Shot Example" for an example. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t +XXH3_64bits_withSecret(XXH_NOESCAPE const void *data, size_t len, + XXH_NOESCAPE const void *secret, size_t secretSize); + + /******* Streaming *******/ + #ifndef XXH_NO_STREAM /* * Streaming requires state maintenance. * This operation costs memory and CPU. @@ -866,45 +1286,146 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void *data, size_t len, */ /*! - * @brief The state struct for the XXH3 streaming API. + * @brief The opaque state struct for the XXH3 streaming API. * * @see XXH3_state_s for details. + * @see @ref streaming_example "Streaming Example" */ -typedef struct XXH3_state_s XXH3_state_t; -XXH_PUBLIC_API XXH3_state_t *XXH3_createState(void); -XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t *statePtr); -XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t *dst_state, - const XXH3_state_t *src_state); +typedef struct XXH3_state_s XXH3_state_t; +XXH_PUBLIC_API XXH_MALLOCF XXH3_state_t *XXH3_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t *statePtr); -/* - * XXH3_64bits_reset(): - * Initialize with default parameters. - * digest will be equivalent to `XXH3_64bits()`. +/*! + * @brief Copies one @ref XXH3_state_t to another. + * + * @param dst_state The state to copy to. + * @param src_state The state to copy from. + * @pre + * @p dst_state and @p src_state must not be `NULL` and must not overlap. */ -XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t *statePtr); -/* - * XXH3_64bits_reset_withSeed(): - * Generate a custom secret from `seed`, and store it into `statePtr`. - * digest will be equivalent to `XXH3_64bits_withSeed()`. +XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t *dst_state, + XXH_NOESCAPE const XXH3_state_t *src_state); + +/*! + * @brief Resets an @ref XXH3_state_t to begin a new hash. + * + * @param statePtr The state struct to reset. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note + * - This function resets `statePtr` and generate a secret with default + * parameters. + * - Call this function before @ref XXH3_64bits_update(). + * - Digest will be equivalent to `XXH3_64bits()`. + * + * @see @ref streaming_example "Streaming Example" + * */ -XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t *statePtr, - XXH64_hash_t seed); -/* - * XXH3_64bits_reset_withSecret(): - * `secret` is referenced, it _must outlive_ the hash streaming session. - * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`, +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t *statePtr); + +/*! + * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash. + * + * @param statePtr The state struct to reset. + * @param seed The 64-bit seed to alter the hash result predictably. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note + * - This function resets `statePtr` and generate a secret from `seed`. + * - Call this function before @ref XXH3_64bits_update(). + * - Digest will be equivalent to `XXH3_64bits_withSeed()`. + * + * @see @ref streaming_example "Streaming Example" + * + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed( + XXH_NOESCAPE XXH3_state_t *statePtr, XXH64_hash_t seed); + +/*! + * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash. + * + * @param statePtr The state struct to reset. + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note + * `secret` is referenced, it _must outlive_ the hash streaming session. + * + * Similar to one-shot API, `secretSize` must be >= @ref XXH3_SECRET_SIZE_MIN, * and the quality of produced hash values depends on secret's entropy * (secret's content should look like a bunch of random bytes). * When in doubt about the randomness of a candidate `secret`, * consider employing `XXH3_generateSecret()` instead (see below). + * + * @see @ref streaming_example "Streaming Example" */ XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret( - XXH3_state_t *statePtr, const void *secret, size_t secretSize); + XXH_NOESCAPE XXH3_state_t *statePtr, XXH_NOESCAPE const void *secret, + size_t secretSize); + +/*! + * @brief Consumes a block of @p input to an @ref XXH3_state_t. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in + * size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note Call this to incrementally consume blocks of data. + * + * @see @ref streaming_example "Streaming Example" + */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t *statePtr, + XXH_NOESCAPE const void *input, size_t length); -XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update(XXH3_state_t *statePtr, - const void *input, - size_t length); -XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest(const XXH3_state_t *statePtr); +/*! + * @brief Returns the calculated XXH3 64-bit hash value from an @ref + * XXH3_state_t. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated XXH3 64-bit hash value from that state. + * + * @note + * Calling XXH3_64bits_digest() will not affect @p statePtr, so you can + * update, digest, and update again. + * + * @see @ref streaming_example "Streaming Example" + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t +XXH3_64bits_digest(XXH_NOESCAPE const XXH3_state_t *statePtr); + #endif /* !XXH_NO_STREAM */ /* note : canonical representation of XXH3 is the same as XXH64 * since they both produce XXH64_hash_t values */ @@ -926,15 +1447,82 @@ typedef struct { } XXH128_hash_t; -XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void *data, size_t len); -XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void *data, size_t len, - XXH64_hash_t seed); -XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void *data, - size_t len, - const void *secret, - size_t secretSize); +/*! + * @brief Calculates 128-bit unseeded variant of XXH3 of @p data. + * + * @param data The block of data to be hashed, at least @p length bytes in size. + * @param len The length of @p data, in bytes. + * + * @return The calculated 128-bit variant of XXH3 value. + * + * The 128-bit variant of XXH3 has more strength, but it has a bit of overhead + * for shorter inputs. + * + * This is equivalent to @ref XXH3_128bits_withSeed() with a seed of `0`, + * however it may have slightly better performance due to constant propagation + * of the defaults. + * + * @see XXH3_128bits_withSeed(), XXH3_128bits_withSecret(): other seeding + * variants + * @see @ref single_shot_example "Single Shot Example" for an example. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t +XXH3_128bits(XXH_NOESCAPE const void *data, size_t len); +/*! @brief Calculates 128-bit seeded variant of XXH3 hash of @p data. + * + * @param data The block of data to be hashed, at least @p length bytes in size. + * @param len The length of @p data, in bytes. + * @param seed The 64-bit seed to alter the hash result predictably. + * + * @return The calculated 128-bit variant of XXH3 value. + * + * @note + * seed == 0 produces the same results as @ref XXH3_64bits(). + * + * This variant generates a custom secret on the fly based on default secret + * altered using the @p seed value. + * + * While this operation is decently fast, note that it's not completely free. + * + * @see XXH3_128bits(), XXH3_128bits_withSecret(): other seeding variants + * @see @ref single_shot_example "Single Shot Example" for an example. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSeed( + XXH_NOESCAPE const void *data, size_t len, XXH64_hash_t seed); +/*! + * @brief Calculates 128-bit variant of XXH3 with a custom "secret". + * + * @param data The block of data to be hashed, at least @p len bytes in + * size. + * @param len The length of @p data, in bytes. + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * + * @return The calculated 128-bit variant of XXH3 value. + * + * It's possible to provide any blob of bytes as a "secret" to generate the + * hash. This makes it more difficult for an external actor to prepare an + * intentional collision. The main condition is that @p secretSize *must* be + * large enough (>= @ref XXH3_SECRET_SIZE_MIN). However, the quality of the + * secret impacts the dispersion of the hash algorithm. Therefore, the secret + * _must_ look like a bunch of random bytes. Avoid "trivial" or structured data + * such as repeated sequences or a text document. Whenever in doubt about the + * "randomness" of the blob of bytes, consider employing @ref + * XXH3_generateSecret() instead (see below). It will generate a proper high + * entropy secret derived from the blob of bytes. Another advantage of using + * XXH3_generateSecret() is that it guarantees that all bits within the initial + * blob of bytes will impact every bit of the output. This is not necessarily + * the case when using the blob of bytes directly because, when hashing _small_ + * inputs, only a portion of the secret is employed. + * + * @see @ref single_shot_example "Single Shot Example" for an example. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t +XXH3_128bits_withSecret(XXH_NOESCAPE const void *data, size_t len, + XXH_NOESCAPE const void *secret, size_t secretSize); -/******* Streaming *******/ + /******* Streaming *******/ + #ifndef XXH_NO_STREAM /* * Streaming requires state maintenance. * This operation costs memory and CPU. @@ -948,39 +1536,151 @@ XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void *data, * counterpart. */ -XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t *statePtr); -XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t *statePtr, - XXH64_hash_t seed); +/*! + * @brief Resets an @ref XXH3_state_t to begin a new hash. + * + * @param statePtr The state struct to reset. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note + * - This function resets `statePtr` and generate a secret with default + * parameters. + * - Call it before @ref XXH3_128bits_update(). + * - Digest will be equivalent to `XXH3_128bits()`. + * + * @see @ref streaming_example "Streaming Example" + */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t *statePtr); + +/*! + * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash. + * + * @param statePtr The state struct to reset. + * @param seed The 64-bit seed to alter the hash result predictably. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note + * - This function resets `statePtr` and generate a secret from `seed`. + * - Call it before @ref XXH3_128bits_update(). + * - Digest will be equivalent to `XXH3_128bits_withSeed()`. + * + * @see @ref streaming_example "Streaming Example" + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed( + XXH_NOESCAPE XXH3_state_t *statePtr, XXH64_hash_t seed); +/*! + * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash. + * + * @param statePtr The state struct to reset. + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * `secret` is referenced, it _must outlive_ the hash streaming session. + * Similar to one-shot API, `secretSize` must be >= @ref XXH3_SECRET_SIZE_MIN, + * and the quality of produced hash values depends on secret's entropy + * (secret's content should look like a bunch of random bytes). + * When in doubt about the randomness of a candidate `secret`, + * consider employing `XXH3_generateSecret()` instead (see below). + * + * @see @ref streaming_example "Streaming Example" + */ XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret( - XXH3_state_t *statePtr, const void *secret, size_t secretSize); + XXH_NOESCAPE XXH3_state_t *statePtr, XXH_NOESCAPE const void *secret, + size_t secretSize); + +/*! + * @brief Consumes a block of @p input to an @ref XXH3_state_t. + * + * Call this to incrementally consume blocks of data. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in + * size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @note + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t *statePtr, + XXH_NOESCAPE const void *input, size_t length); -XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update(XXH3_state_t *statePtr, - const void *input, - size_t length); -XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest(const XXH3_state_t *statePtr); +/*! + * @brief Returns the calculated XXH3 128-bit hash value from an @ref + * XXH3_state_t. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated XXH3 128-bit hash value from that state. + * + * @note + * Calling XXH3_128bits_digest() will not affect @p statePtr, so you can + * update, digest, and update again. + * + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t +XXH3_128bits_digest(XXH_NOESCAPE const XXH3_state_t *statePtr); + #endif /* !XXH_NO_STREAM */ /* Following helper functions make it possible to compare XXH128_hast_t values. * Since XXH128_hash_t is a structure, this capability is not offered by the - * language. - * Note: For better performance, these functions can be inlined using + * language. Note: For better performance, these functions can be inlined using * XXH_INLINE_ALL */ /*! - * XXH128_isEqual(): - * Return: 1 if `h1` and `h2` are equal, 0 if they are not. + * @brief Check equality of two XXH128_hash_t values + * + * @param h1 The 128-bit hash value. + * @param h2 Another 128-bit hash value. + * + * @return `1` if `h1` and `h2` are equal. + * @return `0` if they are not. */ -XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2); +XXH_PUBLIC_API XXH_PUREF int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2); /*! - * XXH128_cmp(): + * @brief Compares two @ref XXH128_hash_t * * This comparator is compatible with stdlib's `qsort()`/`bsearch()`. * - * return: >0 if *h128_1 > *h128_2 - * =0 if *h128_1 == *h128_2 - * <0 if *h128_1 < *h128_2 + * @param h128_1 Left-hand side value + * @param h128_2 Right-hand side value + * + * @return >0 if @p h128_1 > @p h128_2 + * @return =0 if @p h128_1 == @p h128_2 + * @return <0 if @p h128_1 < @p h128_2 */ -XXH_PUBLIC_API int XXH128_cmp(const void *h128_1, const void *h128_2); +XXH_PUBLIC_API XXH_PUREF int XXH128_cmp(XXH_NOESCAPE const void *h128_1, + XXH_NOESCAPE const void *h128_2); /******* Canonical representation *******/ typedef struct { @@ -989,11 +1689,35 @@ typedef struct { } XXH128_canonical_t; -XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t *dst, - XXH128_hash_t hash); -XXH_PUBLIC_API XXH128_hash_t -XXH128_hashFromCanonical(const XXH128_canonical_t *src); +/*! + * @brief Converts an @ref XXH128_hash_t to a big endian @ref + * XXH128_canonical_t. + * + * @param dst The @ref XXH128_canonical_t pointer to be stored to. + * @param hash The @ref XXH128_hash_t to be converted. + * + * @pre + * @p dst must not be `NULL`. + * @see @ref canonical_representation_example "Canonical Representation Example" + */ +XXH_PUBLIC_API void XXH128_canonicalFromHash( + XXH_NOESCAPE XXH128_canonical_t *dst, XXH128_hash_t hash); +/*! + * @brief Converts an @ref XXH128_canonical_t to a native @ref XXH128_hash_t. + * + * @param src The @ref XXH128_canonical_t to convert. + * + * @pre + * @p src must not be `NULL`. + * + * @return The converted hash. + * @see @ref canonical_representation_example "Canonical Representation Example" + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t +XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t *src); + + #endif /* !XXH_NO_XXH3 */ #endif /* XXH_NO_LONG_LONG */ /*! @@ -1035,15 +1759,11 @@ struct XXH32_state_s { XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */ XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */ - XXH32_hash_t v1; /*!< First accumulator lane */ - XXH32_hash_t v2; /*!< Second accumulator lane */ - XXH32_hash_t v3; /*!< Third accumulator lane */ - XXH32_hash_t v4; /*!< Fourth accumulator lane */ + XXH32_hash_t v[4]; /*!< Accumulator lanes */ XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */ XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */ - XXH32_hash_t reserved; /*!< Reserved field. Do not read or write to it, it may - be removed. */ + XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */ }; /* typedef'd to XXH32_state_t */ @@ -1064,62 +1784,62 @@ struct XXH32_state_s { struct XXH64_state_s { XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */ - XXH64_hash_t v1; /*!< First accumulator lane */ - XXH64_hash_t v2; /*!< Second accumulator lane */ - XXH64_hash_t v3; /*!< Third accumulator lane */ - XXH64_hash_t v4; /*!< Fourth accumulator lane */ + XXH64_hash_t v[4]; /*!< Accumulator lanes */ XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */ XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */ XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/ - XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it, it - may be removed. */ + XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */ }; /* typedef'd to XXH64_state_t */ - #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 \ - */ - #include <stdalign.h> - #define XXH_ALIGN(n) alignas(n) - #elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */ + #ifndef XXH_NO_XXH3 + + #if defined(__STDC_VERSION__) && \ + (__STDC_VERSION__ >= 201112L) /* >= C11 */ + #include <stdalign.h> + #define XXH_ALIGN(n) alignas(n) + #elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */ /* In C++ alignas() is a keyword */ - #define XXH_ALIGN(n) alignas(n) - #elif defined(__GNUC__) - #define XXH_ALIGN(n) __attribute__((aligned(n))) - #elif defined(_MSC_VER) - #define XXH_ALIGN(n) __declspec(align(n)) - #else - #define XXH_ALIGN(n) /* disabled */ - #endif + #define XXH_ALIGN(n) alignas(n) + #elif defined(__GNUC__) + #define XXH_ALIGN(n) __attribute__((aligned(n))) + #elif defined(_MSC_VER) + #define XXH_ALIGN(n) __declspec(align(n)) + #else + #define XXH_ALIGN(n) /* disabled */ + #endif - /* Old GCC versions only accept the attribute after the type in structures. - */ - #if !(defined(__STDC_VERSION__) && \ - (__STDC_VERSION__ >= 201112L)) /* C11+ */ \ - && !(defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \ - && defined(__GNUC__) - #define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align) - #else - #define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type - #endif + /* Old GCC versions only accept the attribute after the type in + * structures. */ + #if !(defined(__STDC_VERSION__) && \ + (__STDC_VERSION__ >= 201112L)) /* C11+ */ \ + && \ + !(defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \ + && defined(__GNUC__) + #define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align) + #else + #define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type + #endif - /*! - * @brief The size of the internal XXH3 buffer. - * - * This is the optimal update size for incremental hashing. - * - * @see XXH3_64b_update(), XXH3_128b_update(). - */ - #define XXH3_INTERNALBUFFER_SIZE 256 + /*! + * @brief The size of the internal XXH3 buffer. + * + * This is the optimal update size for incremental hashing. + * + * @see XXH3_64b_update(), XXH3_128b_update(). + */ + #define XXH3_INTERNALBUFFER_SIZE 256 - /*! - * @brief Default size of the secret buffer (and @ref XXH3_kSecret). - * - * This is the size used in @ref XXH3_kSecret and the seeded functions. - * - * Not to be confused with @ref XXH3_SECRET_SIZE_MIN. - */ - #define XXH3_SECRET_DEFAULT_SIZE 192 + /*! + * @internal + * @brief Default size of the secret buffer (and @ref XXH3_kSecret). + * + * This is the size used in @ref XXH3_kSecret and the seeded functions. + * + * Not to be confused with @ref XXH3_SECRET_SIZE_MIN. + */ + #define XXH3_SECRET_DEFAULT_SIZE 192 /*! * @internal @@ -1146,15 +1866,15 @@ struct XXH64_state_s { struct XXH3_state_s { XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]); - /*!< The 8 accumulators. Similar to `vN` in @ref XXH32_state_s::v1 and @ref - * XXH64_state_s */ + /*!< The 8 accumulators. See @ref XXH32_state_s::v and @ref XXH64_state_s::v + */ XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]); /*!< Used to store a custom secret generated from a seed. */ XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]); /*!< The internal buffer. @see XXH32_state_s::mem32 */ XXH32_hash_t bufferedSize; /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */ - XXH32_hash_t reserved32; + XXH32_hash_t useSeed; /*!< Reserved field. Needed for padding on 64-bit. */ size_t nbStripesSoFar; /*!< Number or stripes processed. */ @@ -1176,67 +1896,262 @@ struct XXH3_state_s { }; /* typedef'd to XXH3_state_t */ - #undef XXH_ALIGN_MEMBER + #undef XXH_ALIGN_MEMBER - /*! - * @brief Initializes a stack-allocated `XXH3_state_s`. - * - * When the @ref XXH3_state_t structure is merely emplaced on stack, - * it should be initialized with XXH3_INITSTATE() or a memset() - * in case its first reset uses XXH3_NNbits_reset_withSeed(). - * This init can be omitted if the first reset uses default or _withSecret - * mode. This operation isn't necessary when the state is created with - * XXH3_createState(). Note that this doesn't prepare the state for a - * streaming operation, it's still necessary to use XXH3_NNbits_reset*() - * afterwards. - */ - #define XXH3_INITSTATE(XXH3_state_ptr) \ - { (XXH3_state_ptr)->seed = 0; } + /*! + * @brief Initializes a stack-allocated `XXH3_state_s`. + * + * When the @ref XXH3_state_t structure is merely emplaced on stack, + * it should be initialized with XXH3_INITSTATE() or a memset() + * in case its first reset uses XXH3_NNbits_reset_withSeed(). + * This init can be omitted if the first reset uses default or _withSecret + * mode. This operation isn't necessary when the state is created with + * XXH3_createState(). Note that this doesn't prepare the state for a + * streaming operation, it's still necessary to use XXH3_NNbits_reset*() + * afterwards. + */ + #define XXH3_INITSTATE(XXH3_state_ptr) \ + do { \ + \ + XXH3_state_t *tmp_xxh3_state_ptr = (XXH3_state_ptr); \ + tmp_xxh3_state_ptr->seed = 0; \ + tmp_xxh3_state_ptr->extSecret = NULL; \ + \ + } while (0) + +/*! + * @brief Calculates the 128-bit hash of @p data using XXH3. + * + * @param data The block of data to be hashed, at least @p len bytes in size. + * @param len The length of @p data, in bytes. + * @param seed The 64-bit seed to alter the hash's output predictably. + * + * @pre + * The memory between @p data and @p data + @p len must be valid, + * readable, contiguous memory. However, if @p len is `0`, @p data may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 128-bit XXH3 value. + * + * @see @ref single_shot_example "Single Shot Example" for an example. + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void *data, + size_t len, XXH64_hash_t seed); /* === Experimental API === */ /* Symbols defined below must be considered tied to a specific library version. */ -/* - * XXH3_generateSecret(): +/*! + * @brief Derive a high-entropy secret from any user-defined content, named + * customSeed. + * + * @param secretBuffer A writable buffer for derived high-entropy secret + * data. + * @param secretSize Size of secretBuffer, in bytes. Must be >= + * XXH3_SECRET_DEFAULT_SIZE. + * @param customSeed A user-defined content. + * @param customSeedSize Size of customSeed, in bytes. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. * - * Derive a high-entropy secret from any user-defined content, named customSeed. * The generated secret can be used in combination with `*_withSecret()` * functions. The `_withSecret()` variants are useful to provide a higher level * of protection than 64-bit seed, as it becomes much more difficult for an * external actor to guess how to impact the calculation logic. * * The function accepts as input a custom seed of any length and any content, - * and derives from it a high-entropy secret of length XXH3_SECRET_DEFAULT_SIZE - * into an already allocated buffer secretBuffer. - * The generated secret is _always_ XXH_SECRET_DEFAULT_SIZE bytes long. + * and derives from it a high-entropy secret of length @p secretSize into an + * already allocated buffer @p secretBuffer. * * The generated secret can then be used with any `*_withSecret()` variant. - * Functions `XXH3_128bits_withSecret()`, `XXH3_64bits_withSecret()`, - * `XXH3_128bits_reset_withSecret()` and `XXH3_64bits_reset_withSecret()` + * The functions @ref XXH3_128bits_withSecret(), @ref XXH3_64bits_withSecret(), + * @ref XXH3_128bits_reset_withSecret() and @ref XXH3_64bits_reset_withSecret() * are part of this list. They all accept a `secret` parameter - * which must be very long for implementation reasons (>= XXH3_SECRET_SIZE_MIN) - * _and_ feature very high entropy (consist of random-looking bytes). - * These conditions can be a high bar to meet, so - * this function can be used to generate a secret of proper quality. - * - * customSeed can be anything. It can have any size, even small ones, - * and its content can be anything, even stupidly "low entropy" source such as a - * bunch of zeroes. The resulting `secret` will nonetheless provide all expected + * which must be large enough for implementation reasons (>= @ref + * XXH3_SECRET_SIZE_MIN) _and_ feature very high entropy (consist of + * random-looking bytes). These conditions can be a high bar to meet, so @ref + * XXH3_generateSecret() can be employed to ensure proper quality. + * + * @p customSeed can be anything. It can have any size, even small ones, + * and its content can be anything, even "poor entropy" sources such as a bunch + * of zeroes. The resulting `secret` will nonetheless provide all required * qualities. * - * Supplying NULL as the customSeed copies the default secret into - * `secretBuffer`. When customSeedSize > 0, supplying NULL as customSeed is - * undefined behavior. + * @pre + * - @p secretSize must be >= @ref XXH3_SECRET_SIZE_MIN + * - When @p customSeedSize > 0, supplying NULL as customSeed is undefined + * behavior. + * + * Example code: + * @code{.c} + * #include <stdio.h> + * #include <stdlib.h> + * #include <string.h> + * #define XXH_STATIC_LINKING_ONLY // expose unstable API + * #include "xxhash.h" + * // Hashes argv[2] using the entropy from argv[1]. + * int main(int argc, char* argv[]) + * { + + * char secret[XXH3_SECRET_SIZE_MIN]; + * if (argv != 3) { return 1; } + * XXH3_generateSecret(secret, sizeof(secret), argv[1], strlen(argv[1])); + * XXH64_hash_t h = XXH3_64bits_withSecret( + * argv[2], strlen(argv[2]), + * secret, sizeof(secret) + * ); + * printf("%016llx\n", (unsigned long long) h); + * } + * @endcode */ -XXH_PUBLIC_API void XXH3_generateSecret(void *secretBuffer, - const void *customSeed, - size_t customSeedSize); +XXH_PUBLIC_API XXH_errorcode +XXH3_generateSecret(XXH_NOESCAPE void *secretBuffer, size_t secretSize, + XXH_NOESCAPE const void *customSeed, size_t customSeedSize); + +/*! + * @brief Generate the same secret as the _withSeed() variants. + * + * @param secretBuffer A writable buffer of @ref XXH3_SECRET_SIZE_MIN bytes + * @param seed The 64-bit seed to alter the hash result predictably. + * + * The generated secret can be used in combination with + *`*_withSecret()` and `_withSecretandSeed()` variants. + * + * Example C++ `std::string` hash class: + * @code{.cpp} + * #include <string> + * #define XXH_STATIC_LINKING_ONLY // expose unstable API + * #include "xxhash.h" + * // Slow, seeds each time + * class HashSlow { + + * XXH64_hash_t seed; + * public: + * HashSlow(XXH64_hash_t s) : seed{s} {} + * size_t operator()(const std::string& x) const { + + * return size_t{XXH3_64bits_withSeed(x.c_str(), x.length(), seed)}; + * } + * }; + * // Fast, caches the seeded secret for future uses. + * class HashFast { + + * unsigned char secret[XXH3_SECRET_SIZE_MIN]; + * public: + * HashFast(XXH64_hash_t s) { + + * XXH3_generateSecret_fromSeed(secret, seed); + * } + * size_t operator()(const std::string& x) const { -/* simple short-cut to pre-selected XXH3_128bits variant */ -XXH_PUBLIC_API XXH128_hash_t XXH128(const void *data, size_t len, - XXH64_hash_t seed); + * return size_t{ + + * XXH3_64bits_withSecret(x.c_str(), x.length(), secret, + *sizeof(secret)) + * }; + * } + * }; + * @endcode + */ +XXH_PUBLIC_API void XXH3_generateSecret_fromSeed( + XXH_NOESCAPE void *secretBuffer, XXH64_hash_t seed); + +/*! + * @brief Calculates 64/128-bit seeded variant of XXH3 hash of @p data. + * + * @param data The block of data to be hashed, at least @p len bytes in + * size. + * @param len The length of @p data, in bytes. + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * @param seed The 64-bit seed to alter the hash result predictably. + * + * These variants generate hash values using either + * @p seed for "short" keys (< @ref XXH3_MIDSIZE_MAX = 240 bytes) + * or @p secret for "large" keys (>= @ref XXH3_MIDSIZE_MAX). + * + * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`. + * `_withSeed()` has to generate the secret on the fly for "large" keys. + * It's fast, but can be perceptible for "not so large" keys (< 1 KB). + * `_withSecret()` has to generate the masks on the fly for "small" keys, + * which requires more instructions than _withSeed() variants. + * Therefore, _withSecretandSeed variant combines the best of both worlds. + * + * When @p secret has been generated by XXH3_generateSecret_fromSeed(), + * this variant produces *exactly* the same results as `_withSeed()` variant, + * hence offering only a pure speed benefit on "large" input, + * by skipping the need to regenerate the secret for every large input. + * + * Another usage scenario is to hash the secret to a 64-bit hash value, + * for example with XXH3_64bits(), which then becomes the seed, + * and then employ both the seed and the secret in _withSecretandSeed(). + * On top of speed, an added benefit is that each bit in the secret + * has a 50% chance to swap each bit in the output, via its impact to the seed. + * + * This is not guaranteed when using the secret directly in "small data" + * scenarios, because only portions of the secret are employed for small data. + */ +XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecretandSeed( + XXH_NOESCAPE const void *data, size_t len, XXH_NOESCAPE const void *secret, + size_t secretSize, XXH64_hash_t seed); +/*! + * @brief Calculates 128-bit seeded variant of XXH3 hash of @p data. + * + * @param input The block of data to be hashed, at least @p len bytes in + * size. + * @param length The length of @p data, in bytes. + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * @param seed64 The 64-bit seed to alter the hash result predictably. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @see XXH3_64bits_withSecretandSeed() + */ +XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecretandSeed( + XXH_NOESCAPE const void *input, size_t length, + XXH_NOESCAPE const void *secret, size_t secretSize, XXH64_hash_t seed64); + #ifndef XXH_NO_STREAM +/*! + * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash. + * + * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref + * XXH3_createState(). + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * @param seed64 The 64-bit seed to alter the hash result predictably. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @see XXH3_64bits_withSecretandSeed() + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecretandSeed( + XXH_NOESCAPE XXH3_state_t *statePtr, XXH_NOESCAPE const void *secret, + size_t secretSize, XXH64_hash_t seed64); +/*! + * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash. + * + * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref + * XXH3_createState(). + * @param secret The secret data. + * @param secretSize The length of @p secret, in bytes. + * @param seed64 The 64-bit seed to alter the hash result predictably. + * + * @return @ref XXH_OK on success. + * @return @ref XXH_ERROR on failure. + * + * @see XXH3_64bits_withSecretandSeed() + */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecretandSeed( + XXH_NOESCAPE XXH3_state_t *statePtr, XXH_NOESCAPE const void *secret, + size_t secretSize, XXH64_hash_t seed64); + #endif /* !XXH_NO_STREAM */ + #endif /* !XXH_NO_XXH3 */ #endif /* XXH_NO_LONG_LONG */ #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) #define XXH_IMPLEMENTATION @@ -1291,7 +2206,7 @@ XXH_PUBLIC_API XXH128_hash_t XXH128(const void *data, size_t len, /*! * @brief Define this to disable 64-bit code. * - * Useful if only using the @ref xxh32_family and you have a strict C90 + * Useful if only using the @ref XXH32_family and you have a strict C90 * compiler. */ #define XXH_NO_LONG_LONG @@ -1315,7 +2230,7 @@ XXH_PUBLIC_API XXH128_hash_t XXH128(const void *data, size_t len, * Use `memcpy()`. Safe and portable. Note that most modern compilers * will eliminate the function call and treat it as an unaligned access. * - * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((packed))` + * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((aligned(1)))` * @par * Depends on compiler extensions and is therefore not portable. * This method is safe _if_ your compiler supports it, @@ -1336,30 +2251,50 @@ XXH_PUBLIC_API XXH128_hash_t XXH128(const void *data, size_t len, * big-endian systems which lack a native byteswap instruction. However, * some compilers will emit literal byteshifts even if the target supports * unaligned access. - * . + * * * @warning * Methods 1 and 2 rely on implementation-defined behavior. Use these with * care, as what works on one compiler/platform/optimization level may * cause another to read garbage data or even crash. * - * See https://stackoverflow.com/a/32095106/646947 for details. + * See + * https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + * for details. * * Prefer these methods in priority order (0 > 3 > 1 > 2) */ #define XXH_FORCE_MEMORY_ACCESS 0 + /*! - * @def XXH_ACCEPT_NULL_INPUT_POINTER - * @brief Whether to add explicit `NULL` checks. + * @def XXH_SIZE_OPT + * @brief Controls how much xxHash optimizes for size. * - * If the input pointer is `NULL` and the length is non-zero, xxHash's - * default behavior is to dereference it, triggering a segfault. + * xxHash, when compiled, tends to result in a rather large binary size. + * This is mostly due to heavy usage to forced inlining and constant folding + * of the + * @ref XXH3_family to increase performance. * - * When this macro is enabled, xxHash actively checks the input for a null - * pointer. If it is, the result for null input pointers is the same as a - * zero-length input. + * However, some developers prefer size over speed. This option can + * significantly reduce the size of the generated code. When using the `-Os` + * or `-Oz` options on GCC or Clang, this is defined to 1 by default, + * otherwise it is defined to 0. + * + * Most of these size optimizations can be controlled manually. + * + * This is a number from 0-2. + * - `XXH_SIZE_OPT` == 0: Default. xxHash makes no size optimizations. + * Speed comes first. + * - `XXH_SIZE_OPT` == 1: Default for `-Os` and `-Oz`. xxHash is more + * conservative and disables hacks that increase code size. It implies + * the options @ref XXH_NO_INLINE_HINTS == 1, @ref XXH_FORCE_ALIGN_CHECK == + * 0, and @ref XXH3_NEON_LANES == 8 if they are not already defined. + * - `XXH_SIZE_OPT` == 2: xxHash tries to make itself as small as possible. + * Performance may cry. For example, the single shot functions just use + * the streaming API. */ - #define XXH_ACCEPT_NULL_INPUT_POINTER 0 + #define XXH_SIZE_OPT 0 + /*! * @def XXH_FORCE_ALIGN_CHECK * @brief If defined to non-zero, adds a special path for aligned inputs @@ -1381,8 +2316,10 @@ XXH_PUBLIC_API XXH128_hash_t XXH128(const void *data, size_t len, * * In these cases, the alignment check can be removed by setting this macro * to 0. Then the code will always use unaligned memory access. Align check - * is automatically disabled on x86, x64 & arm64, which are platforms known - * to offer good unaligned memory accesses performance. + * is automatically disabled on x86, x64, ARM64, and some ARM chips which + * are platforms known to offer good unaligned memory accesses performance. + * + * It is also disabled by default when @ref XXH_SIZE_OPT >= 1. * * This option does not affect XXH3 (only XXH32 and XXH64). */ @@ -1405,24 +2342,39 @@ XXH_PUBLIC_API XXH128_hash_t XXH128(const void *data, size_t len, * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the * compiler full control on whether to inline or not. * - * When not optimizing (-O0), optimizing for size (-Os, -Oz), or using - * -fno-inline with GCC or Clang, this will automatically be defined. + * When not optimizing (-O0), using `-fno-inline` with GCC or Clang, or if + * @ref XXH_SIZE_OPT >= 1, this will automatically be defined. */ #define XXH_NO_INLINE_HINTS 0 /*! - * @def XXH_REROLL - * @brief Whether to reroll `XXH32_finalize`. + * @def XXH3_INLINE_SECRET + * @brief Determines whether to inline the XXH3 withSecret code. + * + * When the secret size is known, the compiler can improve the performance + * of XXH3_64bits_withSecret() and XXH3_128bits_withSecret(). * - * For performance, `XXH32_finalize` uses an unrolled loop - * in the form of a switch statement. + * However, if the secret size is not known, it doesn't have any benefit. + * This happens when xxHash is compiled into a global symbol. Therefore, if + * @ref XXH_INLINE_ALL is *not* defined, this will be defined to 0. * - * This is not always desirable, as it generates larger code, - * and depending on the architecture, may even be slower + * Additionally, this defaults to 0 on GCC 12+, which has an issue with + * function pointers that are *sometimes* force inline on -Og, and it is + * impossible to automatically detect this optimization level. + */ + #define XXH3_INLINE_SECRET 0 + + /*! + * @def XXH32_ENDJMP + * @brief Whether to use a jump for `XXH32_finalize`. * - * This is automatically defined with `-Os`/`-Oz` on GCC and Clang. + * For performance, `XXH32_finalize` uses multiple branches in the + * finalizer. This is generally preferable for performance, but depending on + * exact architecture, a jmp may be preferable. + * + * This setting is only possibly making a difference for very small inputs. */ - #define XXH_REROLL 0 + #define XXH32_ENDJMP 0 /*! * @internal @@ -1434,6 +2386,18 @@ XXH_PUBLIC_API XXH128_hash_t XXH128(const void *data, size_t len, */ #define XXH_OLD_NAMES #undef XXH_OLD_NAMES /* don't actually use, it is ugly. */ + + /*! + * @def XXH_NO_STREAM + * @brief Disables the streaming API. + * + * When xxHash is not inlined and the streaming functions are not used, + * disabling the streaming functions can improve code size significantly, + * especially with the @ref XXH3_family which tends to make constant folded + * copies of itself. + */ + #define XXH_NO_STREAM + #undef XXH_NO_STREAM /* don't actually */ #endif /* XXH_DOXYGEN */ /*! * @} @@ -1441,24 +2405,31 @@ XXH_PUBLIC_API XXH128_hash_t XXH128(const void *data, size_t len, #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command \ line for example */ - /* prefer __packed__ structures (method 1) for gcc on armv7+ and mips */ - #if !defined(__clang__) && \ - ((defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ - (defined(__GNUC__) && \ - ((defined(__ARM_ARCH) && __ARM_ARCH >= 7) || \ - (defined(__mips__) && (__mips <= 5 || __mips_isa_rev < 6) && \ - (!defined(__mips16) || defined(__mips_mips16e2)))))) + /* prefer __packed__ structures (method 1) for GCC + * < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte + * shifting, so we use memcpy which for some reason does unaligned loads. */ + #if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && \ + defined(__ARM_FEATURE_UNALIGNED)) #define XXH_FORCE_MEMORY_ACCESS 1 #endif #endif - #ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */ - #define XXH_ACCEPT_NULL_INPUT_POINTER 0 + #ifndef XXH_SIZE_OPT + /* default to 1 for -Os or -Oz */ + #if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__) + #define XXH_SIZE_OPT 1 + #else + #define XXH_SIZE_OPT 0 + #endif #endif #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ - #if defined(__i386) || defined(__x86_64__) || defined(__aarch64__) || \ - defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) /* visual */ + /* don't check on sizeopt, x86, aarch64, or arm when unaligned access is + * available */ + #if XXH_SIZE_OPT >= 1 || defined(__i386) || defined(__x86_64__) || \ + defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) || \ + defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || \ + defined(_M_ARM) /* visual */ #define XXH_FORCE_ALIGN_CHECK 0 #else #define XXH_FORCE_ALIGN_CHECK 1 @@ -1466,24 +2437,27 @@ XXH_PUBLIC_API XXH128_hash_t XXH128(const void *data, size_t len, #endif #ifndef XXH_NO_INLINE_HINTS - #if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ \ - || defined(__NO_INLINE__) /* -O0, -fno-inline */ + #if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__) /* -O0, -fno-inline */ #define XXH_NO_INLINE_HINTS 1 #else #define XXH_NO_INLINE_HINTS 0 #endif #endif - #ifndef XXH_REROLL - #if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ || \ - (defined(__GNUC__) && !defined(__clang__)) - /* The if/then loop is preferable to switch/case on gcc (on x64) */ - #define XXH_REROLL 1 + #ifndef XXH3_INLINE_SECRET + #if (defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 12) || \ + !defined(XXH_INLINE_ALL) + #define XXH3_INLINE_SECRET 0 #else - #define XXH_REROLL 0 + #define XXH3_INLINE_SECRET 1 #endif #endif + #ifndef XXH32_ENDJMP + /* generally preferable for performance */ + #define XXH32_ENDJMP 0 + #endif + /*! * @defgroup impl Implementation * @{ @@ -1493,17 +2467,45 @@ XXH_PUBLIC_API XXH128_hash_t XXH128(const void *data, size_t len, /* ************************************* * Includes & Memory related functions ***************************************/ + #if defined(XXH_NO_STREAM) + /* nothing */ + #elif defined(XXH_NO_STDLIB) + +/* When requesting to disable any mention of stdlib, + * the library loses the ability to invoked malloc / free. + * In practice, it means that functions like `XXH*_createState()` + * will always fail, and return NULL. + * This flag is useful in situations where + * xxhash.h is integrated into some kernel, embedded or limited environment + * without access to dynamic allocation. + */ + +static XXH_CONSTF void *XXH_malloc(size_t s) { + + (void)s; + return NULL; + +} + +static void XXH_free(void *p) { + + (void)p; + +} + + #else + /* * Modify the local functions below should you wish to use * different memory routines for malloc() and free() */ - #include <stdlib.h> + #include <stdlib.h> /*! * @internal * @brief Modify this function to use a different routine than malloc(). */ -static void *XXH_malloc(size_t s) { +static XXH_MALLOCF void *XXH_malloc(size_t s) { return malloc(s); @@ -1519,6 +2521,8 @@ static void XXH_free(void *p) { } + #endif /* XXH_NO_STDLIB */ + #include <string.h> /*! @@ -1542,20 +2546,20 @@ static void *XXH_memcpy(void *dest, const void *src, size_t size) { #endif #if XXH_NO_INLINE_HINTS /* disable inlining hints */ - #if defined(__GNUC__) + #if defined(__GNUC__) || defined(__clang__) #define XXH_FORCE_INLINE static __attribute__((unused)) #else #define XXH_FORCE_INLINE static #endif #define XXH_NO_INLINE static /* enable inlining hints */ - #elif defined(_MSC_VER) /* Visual Studio */ - #define XXH_FORCE_INLINE static __forceinline - #define XXH_NO_INLINE static __declspec(noinline) - #elif defined(__GNUC__) + #elif defined(__GNUC__) || defined(__clang__) #define XXH_FORCE_INLINE \ static __inline__ __attribute__((always_inline, unused)) #define XXH_NO_INLINE static __attribute__((noinline)) + #elif defined(_MSC_VER) /* Visual Studio */ + #define XXH_FORCE_INLINE static __forceinline + #define XXH_NO_INLINE static __declspec(noinline) #elif defined(__cplusplus) || \ (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */ #define XXH_FORCE_INLINE static inline @@ -1565,6 +2569,12 @@ static void *XXH_memcpy(void *dest, const void *src, size_t size) { #define XXH_NO_INLINE static #endif + #if XXH3_INLINE_SECRET + #define XXH3_WITH_SECRET_INLINE XXH_FORCE_INLINE + #else + #define XXH3_WITH_SECRET_INLINE XXH_NO_INLINE + #endif + /* ************************************* * Debug ***************************************/ @@ -1588,17 +2598,20 @@ static void *XXH_memcpy(void *dest, const void *src, size_t size) { #include <assert.h> /* note: can still be disabled with NDEBUG */ #define XXH_ASSERT(c) assert(c) #else - #define XXH_ASSERT(c) ((void)0) + #if defined(__INTEL_COMPILER) + #define XXH_ASSERT(c) XXH_ASSUME((unsigned char)(c)) + #else + #define XXH_ASSERT(c) XXH_ASSUME(c) + #endif #endif /* note: use after variable declarations */ #ifndef XXH_STATIC_ASSERT #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */ - #include <assert.h> #define XXH_STATIC_ASSERT_WITH_MESSAGE(c, m) \ do { \ \ - static_assert((c), m); \ + _Static_assert((c), m); \ \ } while (0) @@ -1642,12 +2655,20 @@ static void *XXH_memcpy(void *dest, const void *src, size_t size) { * We also use it to prevent unwanted constant folding for AArch64 in * XXH3_initCustomSecret_scalar(). */ - #ifdef __GNUC__ - #define XXH_COMPILER_GUARD(var) __asm__ __volatile__("" : "+r"(var)) + #if defined(__GNUC__) || defined(__clang__) + #define XXH_COMPILER_GUARD(var) __asm__("" : "+r"(var)) #else #define XXH_COMPILER_GUARD(var) ((void)0) #endif + /* Specifically for NEON vectors which use the "w" constraint, on + * Clang. */ + #if defined(__clang__) && defined(__ARM_ARCH) && !defined(__wasm__) + #define XXH_COMPILER_GUARD_CLANG_NEON(var) __asm__("" : "+w"(var)) + #else + #define XXH_COMPILER_GUARD_CLANG_NEON(var) ((void)0) + #endif + /* ************************************* * Basic Types ***************************************/ @@ -1662,6 +2683,8 @@ typedef unsigned char xxh_u8; typedef XXH32_hash_t xxh_u32; #ifdef XXH_OLD_NAMES + #warning \ + "XXH_OLD_NAMES is planned to be removed starting v0.9. If the program depends on it, consider moving away from it by employing newer type names directly" #define BYTE xxh_u8 #define U8 xxh_u8 #define U32 xxh_u32 @@ -1739,10 +2762,11 @@ static xxh_u32 XXH_read32(const void *memPtr) { #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 1)) /* - * __pack instructions are safer but compiler specific, hence potentially - * problematic for some compilers. - * - * Currently only defined for GCC and ICC. + * __attribute__((aligned(1))) is supported by gcc and clang. Originally the + * documentation claimed that it only increased the alignment, but actually + * it can decrease it on gcc, clang, and icc: + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, + * https://gcc.godbolt.org/z/xYez1j67Y. */ #ifdef XXH_OLD_NAMES typedef union { @@ -1754,13 +2778,8 @@ typedef union { #endif static xxh_u32 XXH_read32(const void *ptr) { - typedef union { - - xxh_u32 u32; - - } __attribute__((packed)) xxh_unalign; - - return ((const xxh_unalign *)ptr)->u32; + typedef __attribute__((aligned(1))) xxh_u32 xxh_unalign32; + return *((const xxh_unalign32 *)ptr); } @@ -1768,12 +2787,13 @@ static xxh_u32 XXH_read32(const void *ptr) { /* * Portable and safe solution. Generally efficient. - * see: https://stackoverflow.com/a/32095106/646947 + * see: + * https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html */ static xxh_u32 XXH_read32(const void *memPtr) { xxh_u32 val; - memcpy(&val, memPtr, sizeof(val)); + XXH_memcpy(&val, memPtr, sizeof(val)); return val; } @@ -1849,6 +2869,50 @@ static int XXH_isLittleEndian(void) { #define XXH_HAS_BUILTIN(x) 0 #endif +/* + * C23 and future versions have standard "unreachable()". + * Once it has been implemented reliably we can add it as an + * additional case: + * + * ``` + * #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) + * # include <stddef.h> + * # ifdef unreachable + * # define XXH_UNREACHABLE() unreachable() + * # endif + * #endif + * ``` + * + * Note C++23 also has std::unreachable() which can be detected + * as follows: + * ``` + * #if defined(__cpp_lib_unreachable) && (__cpp_lib_unreachable >= 202202L) + * # include <utility> + * # define XXH_UNREACHABLE() std::unreachable() + * #endif + * ``` + * NB: `__cpp_lib_unreachable` is defined in the `<version>` header. + * We don't use that as including `<utility>` in `extern "C"` blocks + * doesn't work on GCC12 + */ + + #if XXH_HAS_BUILTIN(__builtin_unreachable) + #define XXH_UNREACHABLE() __builtin_unreachable() + + #elif defined(_MSC_VER) + #define XXH_UNREACHABLE() __assume(0) + + #else + #define XXH_UNREACHABLE() + #endif + + #if XXH_HAS_BUILTIN(__builtin_assume) + #define XXH_ASSUME(c) __builtin_assume(c) + #else + #define XXH_ASSUME(c) \ + if (!(c)) { XXH_UNREACHABLE(); } + #endif + /*! * @internal * @def XXH_rotl32(x,r) @@ -1982,8 +3046,10 @@ XXH_PUBLIC_API unsigned XXH_versionNumber(void) { *********************************************************************/ /*! * @} - * @defgroup xxh32_impl XXH32 implementation + * @defgroup XXH32_impl XXH32 implementation * @ingroup impl + * + * Details on the XXH32 implementation. * @{ */ @@ -2018,7 +3084,8 @@ static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) { acc += input * XXH_PRIME32_2; acc = XXH_rotl32(acc, 13); acc *= XXH_PRIME32_1; - #if (defined(__SSE4_1__) || defined(__aarch64__)) && \ + #if (defined(__SSE4_1__) || defined(__aarch64__) || \ + defined(__wasm_simd128__)) && \ !defined(XXH_ENABLE_AUTOVECTORIZE) /* * UGLY HACK: @@ -2049,9 +3116,12 @@ static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) { * can load data, while v3 can multiply. SSE forces them to operate * together. * - * This is also enabled on AArch64, as Clang autovectorizes it incorrectly - * and it is pointless writing a NEON implementation that is basically the - * same speed as scalar for XXH32. + * This is also enabled on AArch64, as Clang is *very aggressive* in + * vectorizing the loop. NEON is only faster on the A53, and with the newer + * cores, it is less than half the speed. + * + * Additionally, this is used on WASM SIMD128 because it JITs to the same + * SIMD instructions and has the same issue. */ XXH_COMPILER_GUARD(acc); #endif @@ -2066,39 +3136,22 @@ static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) { * The final mix ensures that all input bits have a chance to impact any bit in * the output digest, resulting in an unbiased distribution. * - * @param h32 The hash to avalanche. + * @param hash The hash to avalanche. * @return The avalanched hash. */ -static xxh_u32 XXH32_avalanche(xxh_u32 h32) { +static xxh_u32 XXH32_avalanche(xxh_u32 hash) { - h32 ^= h32 >> 15; - h32 *= XXH_PRIME32_2; - h32 ^= h32 >> 13; - h32 *= XXH_PRIME32_3; - h32 ^= h32 >> 16; - return (h32); + hash ^= hash >> 15; + hash *= XXH_PRIME32_2; + hash ^= hash >> 13; + hash *= XXH_PRIME32_3; + hash ^= hash >> 16; + return hash; } #define XXH_get32bits(p) XXH_readLE32_align(p, align) - #define XXH_PROCESS1 \ - do { \ - \ - h32 += (*ptr++) * XXH_PRIME32_5; \ - h32 = XXH_rotl32(h32, 11) * XXH_PRIME32_1; \ - \ - } while (0) - - #define XXH_PROCESS4 \ - do { \ - \ - h32 += XXH_get32bits(ptr) * XXH_PRIME32_3; \ - ptr += 4; \ - h32 = XXH_rotl32(h32, 17) * XXH_PRIME32_4; \ - \ - } while (0) - /*! * @internal * @brief Processes the last 0-15 bytes of @p ptr. @@ -2107,17 +3160,37 @@ static xxh_u32 XXH32_avalanche(xxh_u32 h32) { * This final stage will digest them to ensure that all input bytes are present * in the final mix. * - * @param h32 The hash to finalize. + * @param hash The hash to finalize. * @param ptr The pointer to the remaining input. * @param len The remaining length, modulo 16. * @param align Whether @p ptr is aligned. * @return The finalized hash. + * @see XXH64_finalize(). */ -static xxh_u32 XXH32_finalize(xxh_u32 h32, const xxh_u8 *ptr, size_t len, - XXH_alignment align) { +static XXH_PUREF xxh_u32 XXH32_finalize(xxh_u32 hash, const xxh_u8 *ptr, + size_t len, XXH_alignment align) { +\ + #define XXH_PROCESS1 \ + do { \ + \ + hash += (*ptr++) * XXH_PRIME32_5; \ + hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1; \ + \ + } while (0) + + #define XXH_PROCESS4 \ + do { \ + \ + hash += XXH_get32bits(ptr) * XXH_PRIME32_3; \ + ptr += 4; \ + hash = XXH_rotl32(hash, 17) * XXH_PRIME32_4; \ + \ + } while (0) - /* Compact rerolled version */ - if (XXH_REROLL) { + if (ptr == NULL) XXH_ASSERT(len == 0); + + /* Compact rerolled version; generally faster */ + if (!XXH32_ENDJMP) { len &= 15; while (len >= 4) { @@ -2134,7 +3207,7 @@ static xxh_u32 XXH32_finalize(xxh_u32 h32, const xxh_u8 *ptr, size_t len, } - return XXH32_avalanche(h32); + return XXH32_avalanche(hash); } else { @@ -2142,62 +3215,62 @@ static xxh_u32 XXH32_finalize(xxh_u32 h32, const xxh_u8 *ptr, size_t len, case 12: XXH_PROCESS4; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 8: XXH_PROCESS4; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 4: XXH_PROCESS4; - return XXH32_avalanche(h32); + return XXH32_avalanche(hash); case 13: XXH_PROCESS4; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 9: XXH_PROCESS4; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 5: XXH_PROCESS4; XXH_PROCESS1; - return XXH32_avalanche(h32); + return XXH32_avalanche(hash); case 14: XXH_PROCESS4; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 10: XXH_PROCESS4; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 6: XXH_PROCESS4; XXH_PROCESS1; XXH_PROCESS1; - return XXH32_avalanche(h32); + return XXH32_avalanche(hash); case 15: XXH_PROCESS4; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 11: XXH_PROCESS4; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 7: XXH_PROCESS4; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 3: XXH_PROCESS1; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 2: XXH_PROCESS1; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 1: XXH_PROCESS1; - XXH_FALLTHROUGH; + XXH_FALLTHROUGH; /* fallthrough */ case 0: - return XXH32_avalanche(h32); + return XXH32_avalanche(hash); } XXH_ASSERT(0); - return h32; /* reaching this point is deemed impossible */ + return hash; /* reaching this point is deemed impossible */ } @@ -2215,29 +3288,21 @@ static xxh_u32 XXH32_finalize(xxh_u32 h32, const xxh_u8 *ptr, size_t len, * @internal * @brief The implementation for @ref XXH32(). * - * @param input, len, seed Directly passed from @ref XXH32(). + * @param input , len , seed Directly passed from @ref XXH32(). * @param align Whether @p input is aligned. * @return The calculated hash. */ -XXH_FORCE_INLINE xxh_u32 XXH32_endian_align(const xxh_u8 *input, size_t len, - xxh_u32 seed, XXH_alignment align) { - - const xxh_u8 *bEnd = input ? input + len : NULL; - xxh_u32 h32; - - #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \ - (XXH_ACCEPT_NULL_INPUT_POINTER >= 1) - if (input == NULL) { - - len = 0; - bEnd = input = (const xxh_u8 *)(size_t)16; +XXH_FORCE_INLINE XXH_PUREF xxh_u32 XXH32_endian_align(const xxh_u8 *input, + size_t len, xxh_u32 seed, + XXH_alignment align) { - } + xxh_u32 h32; - #endif + if (input == NULL) XXH_ASSERT(len == 0); if (len >= 16) { + const xxh_u8 *const bEnd = input + len; const xxh_u8 *const limit = bEnd - 15; xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2; xxh_u32 v2 = seed + XXH_PRIME32_2; @@ -2272,16 +3337,17 @@ XXH_FORCE_INLINE xxh_u32 XXH32_endian_align(const xxh_u8 *input, size_t len, } -/*! @ingroup xxh32_family */ +/*! @ingroup XXH32_family */ XXH_PUBLIC_API XXH32_hash_t XXH32(const void *input, size_t len, XXH32_hash_t seed) { - #if 0 - /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ - XXH32_state_t state; - XXH32_reset(&state, seed); - XXH32_update(&state, (const xxh_u8*)input, len); - return XXH32_digest(&state); + #if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2 + /* Simple version, good for code maintenance, but unfortunately slow for small + * inputs */ + XXH32_state_t state; + XXH32_reset(&state, seed); + XXH32_update(&state, (const xxh_u8 *)input, len); + return XXH32_digest(&state); #else if (XXH_FORCE_ALIGN_CHECK) { @@ -2298,17 +3364,16 @@ XXH_PUBLIC_API XXH32_hash_t XXH32(const void *input, size_t len, } -/******* Hash streaming *******/ -/*! - * @ingroup xxh32_family - */ + /******* Hash streaming *******/ + #ifndef XXH_NO_STREAM +/*! @ingroup XXH32_family */ XXH_PUBLIC_API XXH32_state_t *XXH32_createState(void) { return (XXH32_state_t *)XXH_malloc(sizeof(XXH32_state_t)); } -/*! @ingroup xxh32_family */ +/*! @ingroup XXH32_family */ XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t *statePtr) { XXH_free(statePtr); @@ -2316,42 +3381,38 @@ XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t *statePtr) { } -/*! @ingroup xxh32_family */ +/*! @ingroup XXH32_family */ XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t *dstState, const XXH32_state_t *srcState) { - memcpy(dstState, srcState, sizeof(*dstState)); + XXH_memcpy(dstState, srcState, sizeof(*dstState)); } -/*! @ingroup xxh32_family */ +/*! @ingroup XXH32_family */ XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t *statePtr, XXH32_hash_t seed) { - XXH32_state_t state; /* using a local state to memcpy() in order to avoid - strict-aliasing warnings */ - memset(&state, 0, sizeof(state)); - state.v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2; - state.v2 = seed + XXH_PRIME32_2; - state.v3 = seed + 0; - state.v4 = seed - XXH_PRIME32_1; - /* do not write into reserved, planned to be removed in a future version */ - memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); + XXH_ASSERT(statePtr != NULL); + memset(statePtr, 0, sizeof(*statePtr)); + statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2; + statePtr->v[1] = seed + XXH_PRIME32_2; + statePtr->v[2] = seed + 0; + statePtr->v[3] = seed - XXH_PRIME32_1; return XXH_OK; } -/*! @ingroup xxh32_family */ +/*! @ingroup XXH32_family */ XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t *state, const void *input, size_t len) { - if (input == NULL) - #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \ - (XXH_ACCEPT_NULL_INPUT_POINTER >= 1) + if (input == NULL) { + + XXH_ASSERT(len == 0); return XXH_OK; - #else - return XXH_ERROR; - #endif + + } { @@ -2375,13 +3436,13 @@ XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t *state, { const xxh_u32 *p32 = state->mem32; - state->v1 = XXH32_round(state->v1, XXH_readLE32(p32)); + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++; - state->v2 = XXH32_round(state->v2, XXH_readLE32(p32)); + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++; - state->v3 = XXH32_round(state->v3, XXH_readLE32(p32)); + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++; - state->v4 = XXH32_round(state->v4, XXH_readLE32(p32)); + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32)); } @@ -2393,29 +3454,20 @@ XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t *state, if (p <= bEnd - 16) { const xxh_u8 *const limit = bEnd - 16; - xxh_u32 v1 = state->v1; - xxh_u32 v2 = state->v2; - xxh_u32 v3 = state->v3; - xxh_u32 v4 = state->v4; do { - v1 = XXH32_round(v1, XXH_readLE32(p)); + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p += 4; - v2 = XXH32_round(v2, XXH_readLE32(p)); + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p += 4; - v3 = XXH32_round(v3, XXH_readLE32(p)); + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p += 4; - v4 = XXH32_round(v4, XXH_readLE32(p)); + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p += 4; } while (p <= limit); - state->v1 = v1; - state->v2 = v2; - state->v3 = v3; - state->v4 = v4; - } if (p < bEnd) { @@ -2431,19 +3483,19 @@ XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t *state, } -/*! @ingroup xxh32_family */ +/*! @ingroup XXH32_family */ XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t *state) { xxh_u32 h32; if (state->large_len) { - h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + - XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18); + h32 = XXH_rotl32(state->v[0], 1) + XXH_rotl32(state->v[1], 7) + + XXH_rotl32(state->v[2], 12) + XXH_rotl32(state->v[3], 18); } else { - h32 = state->v3 /* == seed */ + XXH_PRIME32_5; + h32 = state->v[2] /* == seed */ + XXH_PRIME32_5; } @@ -2454,32 +3506,21 @@ XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t *state) { } + #endif /* !XXH_NO_STREAM */ + /******* Canonical representation *******/ -/*! - * @ingroup xxh32_family - * The default return values from XXH functions are unsigned 32 and 64 bit - * integers. - * - * The canonical representation uses big endian convention, the same convention - * as human-readable numbers (large digits first). - * - * This way, hash values can be written into a file or buffer, remaining - * comparable across different systems. - * - * The following functions allow transformation of hash values to and from their - * canonical format. - */ +/*! @ingroup XXH32_family */ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t *dst, XXH32_hash_t hash) { XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); - memcpy(dst, &hash, sizeof(*dst)); + XXH_memcpy(dst, &hash, sizeof(*dst)); } -/*! @ingroup xxh32_family */ +/*! @ingroup XXH32_family */ XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t *src) { @@ -2524,10 +3565,11 @@ static xxh_u64 XXH_read64(const void *memPtr) { #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 1)) /* - * __pack instructions are safer, but compiler specific, hence potentially - * problematic for some compilers. - * - * Currently only defined for GCC and ICC. + * __attribute__((aligned(1))) is supported by gcc and clang. Originally + * the documentation claimed that it only increased the alignment, but + * actually it can decrease it on gcc, clang, and icc: + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, + * https://gcc.godbolt.org/z/xYez1j67Y. */ #ifdef XXH_OLD_NAMES typedef union { @@ -2540,14 +3582,8 @@ typedef union { #endif static xxh_u64 XXH_read64(const void *ptr) { - typedef union { - - xxh_u32 u32; - xxh_u64 u64; - - } __attribute__((packed)) xxh_unalign64; - - return ((const xxh_unalign64 *)ptr)->u64; + typedef __attribute__((aligned(1))) xxh_u64 xxh_unalign64; + return *((const xxh_unalign64 *)ptr); } @@ -2555,12 +3591,13 @@ static xxh_u64 XXH_read64(const void *ptr) { /* * Portable and safe solution. Generally efficient. - * see: https://stackoverflow.com/a/32095106/646947 + * see: + * https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html */ static xxh_u64 XXH_read64(const void *memPtr) { xxh_u64 val; - memcpy(&val, memPtr, sizeof(val)); + XXH_memcpy(&val, memPtr, sizeof(val)); return val; } @@ -2639,8 +3676,10 @@ XXH_FORCE_INLINE xxh_u64 XXH_readLE64_align(const void *ptr, /******* xxh64 *******/ /*! * @} - * @defgroup xxh64_impl XXH64 implementation + * @defgroup XXH64_impl XXH64 implementation * @ingroup impl + * + * Details on the XXH64 implementation. * @{ */ @@ -2674,6 +3713,7 @@ XXH_FORCE_INLINE xxh_u64 XXH_readLE64_align(const void *ptr, #define PRIME64_5 XXH_PRIME64_5 #endif +/*! @copydoc XXH32_round */ static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input) { acc += input * XXH_PRIME64_2; @@ -2692,51 +3732,68 @@ static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val) { } -static xxh_u64 XXH64_avalanche(xxh_u64 h64) { +/*! @copydoc XXH32_avalanche */ +static xxh_u64 XXH64_avalanche(xxh_u64 hash) { - h64 ^= h64 >> 33; - h64 *= XXH_PRIME64_2; - h64 ^= h64 >> 29; - h64 *= XXH_PRIME64_3; - h64 ^= h64 >> 32; - return h64; + hash ^= hash >> 33; + hash *= XXH_PRIME64_2; + hash ^= hash >> 29; + hash *= XXH_PRIME64_3; + hash ^= hash >> 32; + return hash; } #define XXH_get64bits(p) XXH_readLE64_align(p, align) -static xxh_u64 XXH64_finalize(xxh_u64 h64, const xxh_u8 *ptr, size_t len, - XXH_alignment align) { +/*! + * @internal + * @brief Processes the last 0-31 bytes of @p ptr. + * + * There may be up to 31 bytes remaining to consume from the input. + * This final stage will digest them to ensure that all input bytes are present + * in the final mix. + * + * @param hash The hash to finalize. + * @param ptr The pointer to the remaining input. + * @param len The remaining length, modulo 32. + * @param align Whether @p ptr is aligned. + * @return The finalized hash + * @see XXH32_finalize(). + */ +static XXH_PUREF xxh_u64 XXH64_finalize(xxh_u64 hash, const xxh_u8 *ptr, + size_t len, XXH_alignment align) { + if (ptr == NULL) XXH_ASSERT(len == 0); len &= 31; while (len >= 8) { xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); ptr += 8; - h64 ^= k1; - h64 = XXH_rotl64(h64, 27) * XXH_PRIME64_1 + XXH_PRIME64_4; + hash ^= k1; + hash = XXH_rotl64(hash, 27) * XXH_PRIME64_1 + XXH_PRIME64_4; len -= 8; } if (len >= 4) { - h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1; + hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1; ptr += 4; - h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; + hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; len -= 4; } while (len > 0) { - h64 ^= (*ptr++) * XXH_PRIME64_5; - h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1; + hash ^= (*ptr++) * XXH_PRIME64_5; + hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1; --len; } - return XXH64_avalanche(h64); + return XXH64_avalanche(hash); } @@ -2750,26 +3807,25 @@ static xxh_u64 XXH64_finalize(xxh_u64 h64, const xxh_u8 *ptr, size_t len, #undef XXH_PROCESS8_64 #endif -XXH_FORCE_INLINE xxh_u64 XXH64_endian_align(const xxh_u8 *input, size_t len, - xxh_u64 seed, XXH_alignment align) { - - const xxh_u8 *bEnd = input ? input + len : NULL; - xxh_u64 h64; - - #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \ - (XXH_ACCEPT_NULL_INPUT_POINTER >= 1) - if (input == NULL) { - - len = 0; - bEnd = input = (const xxh_u8 *)(size_t)32; - - } +/*! + * @internal + * @brief The implementation for @ref XXH64(). + * + * @param input , len , seed Directly passed from @ref XXH64(). + * @param align Whether @p input is aligned. + * @return The calculated hash. + */ +XXH_FORCE_INLINE XXH_PUREF xxh_u64 XXH64_endian_align(const xxh_u8 *input, + size_t len, xxh_u64 seed, + XXH_alignment align) { - #endif + xxh_u64 h64; + if (input == NULL) XXH_ASSERT(len == 0); if (len >= 32) { - const xxh_u8 *const limit = bEnd - 32; + const xxh_u8 *const bEnd = input + len; + const xxh_u8 *const limit = bEnd - 31; xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2; xxh_u64 v2 = seed + XXH_PRIME64_2; xxh_u64 v3 = seed + 0; @@ -2786,7 +3842,7 @@ XXH_FORCE_INLINE xxh_u64 XXH64_endian_align(const xxh_u8 *input, size_t len, v4 = XXH64_round(v4, XXH_get64bits(input)); input += 8; - } while (input <= limit); + } while (input < limit); h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); @@ -2807,16 +3863,17 @@ XXH_FORCE_INLINE xxh_u64 XXH64_endian_align(const xxh_u8 *input, size_t len, } -/*! @ingroup xxh64_family */ -XXH_PUBLIC_API XXH64_hash_t XXH64(const void *input, size_t len, +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH64_hash_t XXH64(XXH_NOESCAPE const void *input, size_t len, XXH64_hash_t seed) { - #if 0 - /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ - XXH64_state_t state; - XXH64_reset(&state, seed); - XXH64_update(&state, (const xxh_u8*)input, len); - return XXH64_digest(&state); + #if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2 + /* Simple version, good for code maintenance, but unfortunately slow for small + * inputs */ + XXH64_state_t state; + XXH64_reset(&state, seed); + XXH64_update(&state, (const xxh_u8 *)input, len); + return XXH64_digest(&state); #else if (XXH_FORCE_ALIGN_CHECK) { @@ -2834,16 +3891,16 @@ XXH_PUBLIC_API XXH64_hash_t XXH64(const void *input, size_t len, } -/******* Hash Streaming *******/ - -/*! @ingroup xxh64_family*/ + /******* Hash Streaming *******/ + #ifndef XXH_NO_STREAM +/*! @ingroup XXH64_family*/ XXH_PUBLIC_API XXH64_state_t *XXH64_createState(void) { return (XXH64_state_t *)XXH_malloc(sizeof(XXH64_state_t)); } -/*! @ingroup xxh64_family */ +/*! @ingroup XXH64_family */ XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t *statePtr) { XXH_free(statePtr); @@ -2851,42 +3908,39 @@ XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t *statePtr) { } -/*! @ingroup xxh64_family */ -XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t *dstState, - const XXH64_state_t *srcState) { +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t *dstState, + const XXH64_state_t *srcState) { - memcpy(dstState, srcState, sizeof(*dstState)); + XXH_memcpy(dstState, srcState, sizeof(*dstState)); } -/*! @ingroup xxh64_family */ -XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t *statePtr, - XXH64_hash_t seed) { +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH_NOESCAPE XXH64_state_t *statePtr, + XXH64_hash_t seed) { - XXH64_state_t state; /* use a local state to memcpy() in order to avoid - strict-aliasing warnings */ - memset(&state, 0, sizeof(state)); - state.v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2; - state.v2 = seed + XXH_PRIME64_2; - state.v3 = seed + 0; - state.v4 = seed - XXH_PRIME64_1; - /* do not write into reserved64, might be removed in a future version */ - memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved64)); + XXH_ASSERT(statePtr != NULL); + memset(statePtr, 0, sizeof(*statePtr)); + statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2; + statePtr->v[1] = seed + XXH_PRIME64_2; + statePtr->v[2] = seed + 0; + statePtr->v[3] = seed - XXH_PRIME64_1; return XXH_OK; } -/*! @ingroup xxh64_family */ -XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH64_state_t *state, - const void *input, size_t len) { +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH_NOESCAPE XXH64_state_t *state, + XXH_NOESCAPE const void *input, + size_t len) { - if (input == NULL) - #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \ - (XXH_ACCEPT_NULL_INPUT_POINTER >= 1) + if (input == NULL) { + + XXH_ASSERT(len == 0); return XXH_OK; - #else - return XXH_ERROR; - #endif + + } { @@ -2905,10 +3959,10 @@ XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH64_state_t *state, if (state->memsize) { /* tmp buffer is full */ XXH_memcpy(((xxh_u8 *)state->mem64) + state->memsize, input, 32 - state->memsize); - state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64 + 0)); - state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64 + 1)); - state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64 + 2)); - state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64 + 3)); + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64 + 0)); + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64 + 1)); + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64 + 2)); + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64 + 3)); p += 32 - state->memsize; state->memsize = 0; @@ -2917,29 +3971,20 @@ XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH64_state_t *state, if (p + 32 <= bEnd) { const xxh_u8 *const limit = bEnd - 32; - xxh_u64 v1 = state->v1; - xxh_u64 v2 = state->v2; - xxh_u64 v3 = state->v3; - xxh_u64 v4 = state->v4; do { - v1 = XXH64_round(v1, XXH_readLE64(p)); + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p += 8; - v2 = XXH64_round(v2, XXH_readLE64(p)); + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p += 8; - v3 = XXH64_round(v3, XXH_readLE64(p)); + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p += 8; - v4 = XXH64_round(v4, XXH_readLE64(p)); + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p += 8; } while (p <= limit); - state->v1 = v1; - state->v2 = v2; - state->v3 = v3; - state->v4 = v4; - } if (p < bEnd) { @@ -2955,28 +4000,24 @@ XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH64_state_t *state, } -/*! @ingroup xxh64_family */ -XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t *state) { +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API XXH64_hash_t +XXH64_digest(XXH_NOESCAPE const XXH64_state_t *state) { xxh_u64 h64; if (state->total_len >= 32) { - xxh_u64 const v1 = state->v1; - xxh_u64 const v2 = state->v2; - xxh_u64 const v3 = state->v3; - xxh_u64 const v4 = state->v4; - - h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + - XXH_rotl64(v4, 18); - h64 = XXH64_mergeRound(h64, v1); - h64 = XXH64_mergeRound(h64, v2); - h64 = XXH64_mergeRound(h64, v3); - h64 = XXH64_mergeRound(h64, v4); + h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18); + h64 = XXH64_mergeRound(h64, state->v[0]); + h64 = XXH64_mergeRound(h64, state->v[1]); + h64 = XXH64_mergeRound(h64, state->v[2]); + h64 = XXH64_mergeRound(h64, state->v[3]); } else { - h64 = state->v3 /*seed*/ + XXH_PRIME64_5; + h64 = state->v[2] /*seed*/ + XXH_PRIME64_5; } @@ -2987,21 +4028,23 @@ XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t *state) { } + #endif /* !XXH_NO_STREAM */ + /******* Canonical representation *******/ -/*! @ingroup xxh64_family */ -XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t *dst, - XXH64_hash_t hash) { +/*! @ingroup XXH64_family */ +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t *dst, + XXH64_hash_t hash) { XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); - memcpy(dst, &hash, sizeof(*dst)); + XXH_memcpy(dst, &hash, sizeof(*dst)); } -/*! @ingroup xxh64_family */ +/*! @ingroup XXH64_family */ XXH_PUBLIC_API XXH64_hash_t -XXH64_hashFromCanonical(const XXH64_canonical_t *src) { +XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t *src) { return XXH_readBE64(src); @@ -3015,7 +4058,7 @@ XXH64_hashFromCanonical(const XXH64_canonical_t *src) { ************************************************************************ */ /*! * @} - * @defgroup xxh3_impl XXH3 implementation + * @defgroup XXH3_impl XXH3 implementation * @ingroup impl * @{ @@ -3030,9 +4073,16 @@ XXH64_hashFromCanonical(const XXH64_canonical_t *src) { #elif defined(__STDC_VERSION__) && \ __STDC_VERSION__ >= 199901L /* >= C99 */ #define XXH_RESTRICT restrict + #elif (defined(__GNUC__) && \ + ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))) || \ + (defined(__clang__)) || (defined(_MSC_VER) && (_MSC_VER >= 1400)) || \ + (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1300)) + /* + * There are a LOT more compilers that recognize __restrict but this + * covers the major ones. + */ + #define XXH_RESTRICT __restrict #else - /* Note: it might be useful to define __restrict or __restrict__ for - * some C++ compilers */ #define XXH_RESTRICT /* disable */ #endif @@ -3046,17 +4096,38 @@ XXH64_hashFromCanonical(const XXH64_canonical_t *src) { #define XXH_unlikely(x) (x) #endif - #if defined(__GNUC__) - #if defined(__AVX2__) - #include <immintrin.h> - #elif defined(__SSE2__) - #include <emmintrin.h> - #elif defined(__ARM_NEON__) || defined(__ARM_NEON) + #ifndef XXH_HAS_INCLUDE + #ifdef __has_include + /* + * Not defined as XXH_HAS_INCLUDE(x) (function-like) because + * this causes segfaults in Apple Clang 4.2 (on Mac OS X 10.7 Lion) + */ + #define XXH_HAS_INCLUDE __has_include + #else + #define XXH_HAS_INCLUDE(x) 0 + #endif + #endif + + #if defined(__GNUC__) || defined(__clang__) + #if defined(__ARM_FEATURE_SVE) + #include <arm_sve.h> + #endif + #if defined(__ARM_NEON__) || defined(__ARM_NEON) || \ + (defined(_M_ARM) && _M_ARM >= 7) || defined(_M_ARM64) || \ + defined(_M_ARM64EC) || \ + (defined(__wasm_simd128__) && \ + XXH_HAS_INCLUDE(<arm_neon.h>)) /* WASM SIMD128 via SIMDe */ #define inline __inline__ /* circumvent a clang bug */ #include <arm_neon.h> #undef inline + #elif defined(__AVX2__) + #include <immintrin.h> + #elif defined(__SSE2__) + #include <emmintrin.h> #endif - #elif defined(_MSC_VER) + #endif + + #if defined(_MSC_VER) #include <intrin.h> #endif @@ -3161,7 +4232,7 @@ XXH64_hashFromCanonical(const XXH64_canonical_t *src) { * Note that these are actually implemented as macros. * * If this is not defined, it is detected automatically. - * @ref XXH_X86DISPATCH overrides this. + * internal macro XXH_X86DISPATCH overrides this. */ enum XXH_VECTOR_TYPE /* fake enum */ { @@ -3174,8 +4245,13 @@ enum XXH_VECTOR_TYPE /* fake enum */ { */ XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */ XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */ - XXH_NEON = 4, /*!< NEON for most ARMv7-A and all AArch64 */ + XXH_NEON = 4, /*!< + * NEON for most ARMv7-A, all AArch64, and WASM SIMD128 + * via the SIMDeverywhere polyfill provided with the + * Emscripten SDK. + */ XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */ + XXH_SVE = 6, /*!< SVE for some ARMv8-A and ARMv9-A */ }; @@ -3183,7 +4259,7 @@ enum XXH_VECTOR_TYPE /* fake enum */ { * @ingroup tuning * @brief Selects the minimum alignment for XXH3's accumulators. * - * When using SIMD, this should match the alignment reqired for said + * When using SIMD, this should match the alignment required for said * vector type, so, for example, 32 for AVX2. * * Default: Auto detected. @@ -3199,23 +4275,30 @@ enum XXH_VECTOR_TYPE /* fake enum */ { #define XXH_AVX512 3 #define XXH_NEON 4 #define XXH_VSX 5 + #define XXH_SVE 6 #endif #ifndef XXH_VECTOR /* can be defined on command line */ - #if defined(__AVX512F__) + #if defined(__ARM_FEATURE_SVE) + #define XXH_VECTOR XXH_SVE + #elif (defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \ + || defined(_M_ARM) || defined(_M_ARM64) || \ + defined(_M_ARM64EC) /* msvc */ \ + || (defined(__wasm_simd128__) && \ + XXH_HAS_INCLUDE(<arm_neon.h>)) /* wasm simd128 via SIMDe */ \ + ) && \ + (defined(_WIN32) || \ + defined(__LITTLE_ENDIAN__) /* little endian only */ \ + || (defined(__BYTE_ORDER__) && \ + __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) + #define XXH_VECTOR XXH_NEON + #elif defined(__AVX512F__) #define XXH_VECTOR XXH_AVX512 #elif defined(__AVX2__) #define XXH_VECTOR XXH_AVX2 #elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || \ (defined(_M_IX86_FP) && (_M_IX86_FP == 2)) #define XXH_VECTOR XXH_SSE2 - #elif defined(__GNUC__) /* msvc support maybe later */ \ - && (defined(__ARM_NEON__) || defined(__ARM_NEON)) && \ - (defined( \ - __LITTLE_ENDIAN__) /* We only support little endian NEON */ \ - || (defined(__BYTE_ORDER__) && \ - __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) - #define XXH_VECTOR XXH_NEON #elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) || \ (defined(__s390x__) && defined(__VEC__)) && \ defined(__GNUC__) /* TODO: IBM XL */ @@ -3225,6 +4308,17 @@ enum XXH_VECTOR_TYPE /* fake enum */ { #endif #endif + /* __ARM_FEATURE_SVE is only supported by GCC & Clang. */ + #if (XXH_VECTOR == XXH_SVE) && !defined(__ARM_FEATURE_SVE) + #ifdef _MSC_VER + #pragma warning(once : 4606) + #else + #warning "__ARM_FEATURE_SVE isn't supported. Use SCALAR instead." + #endif + #undef XXH_VECTOR + #define XXH_VECTOR XXH_SCALAR + #endif + /* * Controls the alignment of the accumulator, * for compatibility with aligned vector loads, which are usually faster. @@ -3244,16 +4338,26 @@ enum XXH_VECTOR_TYPE /* fake enum */ { #define XXH_ACC_ALIGN 16 #elif XXH_VECTOR == XXH_AVX512 /* avx512 */ #define XXH_ACC_ALIGN 64 + #elif XXH_VECTOR == XXH_SVE /* sve */ + #define XXH_ACC_ALIGN 64 #endif #endif #if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 || \ XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512 #define XXH_SEC_ALIGN XXH_ACC_ALIGN + #elif XXH_VECTOR == XXH_SVE + #define XXH_SEC_ALIGN XXH_ACC_ALIGN #else #define XXH_SEC_ALIGN 8 #endif + #if defined(__GNUC__) || defined(__clang__) + #define XXH_ALIASING __attribute__((may_alias)) + #else + #define XXH_ALIASING /* nothing */ + #endif + /* * UGLY HACK: * GCC usually generates the best code with -O3 for xxHash. @@ -3278,126 +4382,153 @@ enum XXH_VECTOR_TYPE /* fake enum */ { #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \ && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ && defined(__OPTIMIZE__) && \ - !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */ + XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */ #pragma GCC push_options #pragma GCC optimize("-O2") #endif #if XXH_VECTOR == XXH_NEON - /* - * NEON's setup for vmlal_u32 is a little more complicated than it is on - * SSE2, AVX2, and VSX. - * - * While PMULUDQ and VMULEUW both perform a mask, VMLAL.U32 performs an - * upcast. - * - * To do the same operation, the 128-bit 'Q' register needs to be split - * into two 64-bit 'D' registers, performing this operation:: - * - * [ a | b ] | - * '---------. .--------' | | x | - * | .---------' '--------. | - * [ a & 0xFFFFFFFF | b & 0xFFFFFFFF ],[ a >> 32 | b >> 32 - * ] - * - * Due to significant changes in aarch64, the fastest method for aarch64 - * is completely different than the fastest method for ARMv7-A. - * - * ARMv7-A treats D registers as unions overlaying Q registers, so - * modifying D11 will modify the high half of Q5. This is similar to how - * modifying AH will only affect bits 8-15 of AX on x86. - * - * VZIP takes two registers, and puts even lanes in one register and odd - * lanes in the other. + +/* + * UGLY HACK: While AArch64 GCC on Linux does not seem to care, on macOS, GCC + * -O3 optimizes out the entire hashLong loop because of the aliasing violation. + * + * However, GCC is also inefficient at load-store optimization with vld1q/vst1q, + * so the only option is to mark it as aliasing. + */ +typedef uint64x2_t xxh_aliasing_uint64x2_t XXH_ALIASING; + + /*! + * @internal + * @brief `vld1q_u64` but faster and alignment-safe. * - * On ARMv7-A, this strangely modifies both parameters in place instead - * of taking the usual 3-operand form. + * On AArch64, unaligned access is always safe, but on ARMv7-a, it is + * only *conditionally* safe (`vld1` has an alignment bit like + * `movdq[ua]` in x86). * - * Therefore, if we want to do this, we can simply use a D-form VZIP.32 - * on the lower and upper halves of the Q register to end up with the - * high and low halves where we want - all in one instruction. + * GCC for AArch64 sees `vld1q_u8` as an intrinsic instead of a load, so + * it prohibits load-store optimizations. Therefore, a direct + * dereference is used. * - * vzip.32 d10, d11 @ d10 = { d10[0], d11[0] }; d11 = { + * Otherwise, `vld1q_u8` is used with `vreinterpretq_u8_u64` to do a + * safe unaligned load. + */ + #if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) +XXH_FORCE_INLINE uint64x2_t +XXH_vld1q_u64(void const *ptr) /* silence -Wcast-align */ +{ - * d10[1], d11[1] } + return *(xxh_aliasing_uint64x2_t const *)ptr; + +} + + #else +XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const *ptr) { + + return vreinterpretq_u64_u8(vld1q_u8((uint8_t const *)ptr)); + +} + + #endif + + /*! + * @internal + * @brief `vmlal_u32` on low and high halves of a vector. * - * Unfortunately we need inline assembly for this: Instructions - * modifying two registers at once is not possible in GCC or Clang's IR, - * and they have to create a copy. + * This is a workaround for AArch64 GCC < 11 which implemented + * arm_neon.h with inline assembly and were therefore incapable of + * merging the `vget_{low, high}_u32` with `vmlal_u32`. + */ + #if defined(__aarch64__) && defined(__GNUC__) && \ + !defined(__clang__) && __GNUC__ < 11 +XXH_FORCE_INLINE uint64x2_t XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, + uint32x4_t rhs) { + + /* Inline assembly is the only way */ + __asm__("umlal %0.2d, %1.2s, %2.2s" : "+w"(acc) : "w"(lhs), "w"(rhs)); + return acc; + +} + +XXH_FORCE_INLINE uint64x2_t XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, + uint32x4_t rhs) { + + /* This intrinsic works as expected */ + return vmlal_high_u32(acc, lhs, rhs); + +} + + #else +/* Portable intrinsic versions */ +XXH_FORCE_INLINE uint64x2_t XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, + uint32x4_t rhs) { + + return vmlal_u32(acc, vget_low_u32(lhs), vget_low_u32(rhs)); + +} + +/*! @copydoc XXH_vmlal_low_u32 + * Assume the compiler converts this to vmlal_high_u32 on aarch64 */ +XXH_FORCE_INLINE uint64x2_t XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, + uint32x4_t rhs) { + + return vmlal_u32(acc, vget_high_u32(lhs), vget_high_u32(rhs)); + +} + + #endif + + /*! + * @ingroup tuning + * @brief Controls the NEON to scalar ratio for XXH3 * - * aarch64 requires a different approach. + * This can be set to 2, 4, 6, or 8. * - * In order to make it easier to write a decent compiler for aarch64, - * many quirks were removed, such as conditional execution. + * ARM Cortex CPUs are _very_ sensitive to how their pipelines are used. * - * NEON was also affected by this. + * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but + * only 2 of those can be NEON. If you are only using NEON instructions, + * you are only using 2/3 of the CPU bandwidth. * - * aarch64 cannot access the high bits of a Q-form register, and writes - * to a D-form register zero the high bits, similar to how writes to - * W-form scalar registers (or DWORD registers on x86_64) work. + * This is even more noticeable on the more advanced cores like the + * Cortex-A76 which can dispatch 8 micro-ops per cycle, but still only 2 + * NEON micro-ops at once. * - * The formerly free vget_high intrinsics now require a vext (with a few - * exceptions) + * Therefore, to make the most out of the pipeline, it is beneficial to + * run 6 NEON lanes and 2 scalar lanes, which is chosen by default. * - * Additionally, VZIP was replaced by ZIP1 and ZIP2, which are the - * equivalent of PUNPCKL* and PUNPCKH* in SSE, respectively, in order to - * only modify one operand. + * This does not apply to Apple processors or 32-bit processors, which + * run better with full NEON. These will default to 8. Additionally, + * size-optimized builds run 8 lanes. * - * The equivalent of the VZIP.32 on the lower and upper halves would be - * this mess: + * This change benefits CPUs with large micro-op buffers without + * negatively affecting most other CPUs: * - * ext v2.4s, v0.4s, v0.4s, #2 // v2 = { v0[2], v0[3], v0[0], - * v0[1] } zip1 v1.2s, v0.2s, v2.2s // v1 = { v0[0], v2[0] } zip2 - * v0.2s, v0.2s, v1.2s // v0 = { v0[1], v2[1] } + * | Chipset | Dispatch type | NEON only | 6:2 + * hybrid | Diff. | + * |:----------------------|:--------------------|----------:|-----------:|------:| + * | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 + * GB/s | ~16% | | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 + * GB/s | 5.3 GB/s | ~5% | | Marvell PXA1928 (A53) | In-order + * dual-issue | 1.9 GB/s | 1.9 GB/s | 0% | | Apple M1 | 4 NEON/8 + * micro-ops | 37.3 GB/s | 36.1 GB/s | ~-3% | * - * Instead, we use a literal downcast, vmovn_u64 (XTN), and vshrn_n_u64 - * (SHRN): + * It also seems to fix some bad codegen on GCC, making it almost as + * fast as clang. * - * shrn v1.2s, v0.2d, #32 // v1 = (uint32x2_t)(v0 >> 32); - * xtn v0.2s, v0.2d // v0 = (uint32x2_t)(v0 & 0xFFFFFFFF); + * When using WASM SIMD128, if this is 2 or 6, SIMDe will scalarize 2 of + * the lanes meaning it effectively becomes worse 4. * - * This is available on ARMv7-A, but is less efficient than a single - * VZIP.32. + * @see XXH3_accumulate_512_neon() */ - - /*! - * Function-like macro: - * void XXH_SPLIT_IN_PLACE(uint64x2_t &in, uint32x2_t &outLo, uint32x2_t - * &outHi) - * { - - * outLo = (uint32x2_t)(in & 0xFFFFFFFF); - * outHi = (uint32x2_t)(in >> 32); - * in = UNDEFINED; - * } - */ - #if !defined(XXH_NO_VZIP_HACK) /* define to disable */ \ - && defined(__GNUC__) && !defined(__aarch64__) && \ - !defined(__arm64__) - #define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \ - do { \ - \ - /* Undocumented GCC/Clang operand modifier: %e0 = lower D half, \ - * %f0 = upper D half */ \ - /* https://github.com/gcc-mirror/gcc/blob/38cf91e5/gcc/config/arm/arm.c#L22486 \ - */ \ - /* https://github.com/llvm-mirror/llvm/blob/2c4ca683/lib/Target/ARM/ARMAsmPrinter.cpp#L399 \ - */ \ - __asm__("vzip.32 %e0, %f0" : "+w"(in)); \ - (outLo) = vget_low_u32(vreinterpretq_u32_u64(in)); \ - (outHi) = vget_high_u32(vreinterpretq_u32_u64(in)); \ - \ - } while (0) - - #else - #define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \ - do { \ - \ - (outLo) = vmovn_u64(in); \ - (outHi) = vshrn_n_u64((in), 32); \ - \ - } while (0) - + #ifndef XXH3_NEON_LANES + #if (defined(__aarch64__) || defined(__arm64__) || \ + defined(_M_ARM64) || defined(_M_ARM64EC)) && \ + !defined(__APPLE__) && XXH_SIZE_OPT <= 0 + #define XXH3_NEON_LANES 6 + #else + #define XXH3_NEON_LANES XXH_ACC_NB + #endif #endif #endif /* XXH_VECTOR == XXH_NEON */ @@ -3410,28 +4541,43 @@ enum XXH_VECTOR_TYPE /* fake enum */ { * inconsistent intrinsics, spotty coverage, and multiple endiannesses. */ #if XXH_VECTOR == XXH_VSX + /* Annoyingly, these headers _may_ define three macros: `bool`, + * `vector`, and `pixel`. This is a problem for obvious reasons. + * + * These keywords are unnecessary; the spec literally says they are + * equivalent to `__bool`, `__vector`, and `__pixel` and may be undef'd + * after including the header. + * + * We use pragma push_macro/pop_macro to keep the namespace clean. */ + #pragma push_macro("bool") + #pragma push_macro("vector") + #pragma push_macro("pixel") + /* silence potential macro redefined warnings */ + #undef bool + #undef vector + #undef pixel + #if defined(__s390x__) #include <s390intrin.h> #else - /* gcc's altivec.h can have the unwanted consequence to - * unconditionally #define bool, vector, and pixel keywords, with bad - * consequences for programs already using these keywords for other - * purposes. The paragraph defining these macros is skipped when - * __APPLE_ALTIVEC__ is defined. - * __APPLE_ALTIVEC__ is _generally_ defined automatically by the - * compiler, but it seems that, in some cases, it isn't. Force the - * build macro to be defined, so that keywords are not altered. - */ - #if defined(__GNUC__) && !defined(__APPLE_ALTIVEC__) - #define __APPLE_ALTIVEC__ - #endif #include <altivec.h> #endif + /* Restore the original macro values, if applicable. */ + #pragma pop_macro("pixel") + #pragma pop_macro("vector") + #pragma pop_macro("bool") + typedef __vector unsigned long long xxh_u64x2; typedef __vector unsigned char xxh_u8x16; typedef __vector unsigned xxh_u32x4; +/* + * UGLY HACK: Similar to aarch64 macOS GCC, s390x GCC has the same aliasing + * issue. + */ +typedef xxh_u64x2 xxh_aliasing_u64x2 XXH_ALIASING; + #ifndef XXH_VSX_BE #if defined(__BIG_ENDIAN__) || \ (defined(__BYTE_ORDER__) && \ @@ -3472,7 +4618,7 @@ XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val) { XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr) { xxh_u64x2 ret; - memcpy(&ret, ptr, sizeof(xxh_u64x2)); + XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2)); #if XXH_VSX_BE ret = XXH_vec_revb(ret); #endif @@ -3491,9 +4637,12 @@ XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr) { /* s390x is always big endian, no issue on this platform */ #define XXH_vec_mulo vec_mulo #define XXH_vec_mule vec_mule - #elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) - /* Clang has a better way to control this, we can just use the builtin - * which doesn't swap. */ + #elif defined(__clang__) && \ + XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) && !defined(__ibmxl__) + /* Clang has a better way to control this, we can just use the builtin + * which doesn't swap. */ + /* The IBM XL Compiler (which defined __clang__) only implements the + * vec_* operations */ #define XXH_vec_mulo __builtin_altivec_vmulouw #define XXH_vec_mule __builtin_altivec_vmuleuw #else @@ -3519,14 +4668,33 @@ XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) { #endif /* XXH_vec_mulo, XXH_vec_mule */ #endif /* XXH_VECTOR == XXH_VSX */ + #if XXH_VECTOR == XXH_SVE + #define ACCRND(acc, offset) \ + do { \ + \ + svuint64_t input_vec = svld1_u64(mask, xinput + offset); \ + svuint64_t secret_vec = svld1_u64(mask, xsecret + offset); \ + svuint64_t mixed = sveor_u64_x(mask, secret_vec, input_vec); \ + svuint64_t swapped = svtbl_u64(input_vec, kSwap); \ + svuint64_t mixed_lo = svextw_u64_x(mask, mixed); \ + svuint64_t mixed_hi = svlsr_n_u64_x(mask, mixed, 32); \ + svuint64_t mul = svmad_u64_x(mask, mixed_lo, mixed_hi, swapped); \ + acc = svadd_u64_x(mask, acc, mul); \ + \ + } while (0) + + #endif /* XXH_VECTOR == XXH_SVE */ + /* prefetch * can be disabled, by declaring XXH_NO_PREFETCH build macro */ #if defined(XXH_NO_PREFETCH) #define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ #else - #if defined(_MSC_VER) && \ - (defined(_M_X64) || \ - defined( \ + #if XXH_SIZE_OPT >= 1 + #define XXH_PREFETCH(ptr) (void)(ptr) + #elif defined(_MSC_VER) && \ + (defined(_M_X64) || \ + defined( \ _M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */ #include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ #define XXH_PREFETCH(ptr) \ @@ -3573,6 +4741,15 @@ static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = { }; +static const xxh_u64 PRIME_MX1 = + 0x165667919E3779F9ULL; /*!< + 0b0001011001010110011001111001000110011110001101110111100111111001 + */ +static const xxh_u64 PRIME_MX2 = + 0x9FB21C651E98DF25ULL; /*!< + 0b1001111110110010000111000110010100011110100110001101111100100101 + */ + #ifdef XXH_OLD_NAMES #define kSecret XXH3_kSecret #endif @@ -3601,18 +4778,17 @@ XXH_FORCE_INLINE xxh_u64 XXH_mult32to64(xxh_u64 x, xxh_u64 y) { } #elif defined(_MSC_VER) && defined(_M_IX86) - #include <intrin.h> #define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y)) #else - /* - * Downcast + upcast is usually better than masking on older compilers - * like GCC 4.2 (especially 32-bit ones), all without affecting newer - * compilers. - * - * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both - * operands and perform a full 64x64 multiply -- entirely redundant on - * 32-bit. - */ + /* + * Downcast + upcast is usually better than masking on older compilers + * like GCC 4.2 (especially 32-bit ones), all without affecting newer + * compilers. + * + * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both + * operands and perform a full 64x64 multiply -- entirely redundant on + * 32-bit. + */ #define XXH_mult32to64(x, y) \ ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y)) #endif @@ -3623,7 +4799,7 @@ XXH_FORCE_INLINE xxh_u64 XXH_mult32to64(xxh_u64 x, xxh_u64 y) { * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar * version. * - * @param lhs, rhs The 64-bit integers to be multiplied + * @param lhs , rhs The 64-bit integers to be multiplied * @return The 128-bit result represented in an @ref XXH128_hash_t. */ static XXH128_hash_t XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) { @@ -3643,8 +4819,8 @@ static XXH128_hash_t XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) { * In that case it is best to use the portable one. * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677 */ - #if defined(__GNUC__) && !defined(__wasm__) && \ - defined(__SIZEOF_INT128__) || \ + #if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) && \ + defined(__SIZEOF_INT128__) || \ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs; @@ -3661,7 +4837,7 @@ static XXH128_hash_t XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) { * * This compiles to single operand MUL on x64. */ - #elif defined(_M_X64) || defined(_M_IA64) + #elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC) #ifndef _MSC_VER #pragma intrinsic(_umul128) @@ -3673,6 +4849,22 @@ static XXH128_hash_t XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) { r128.high64 = product_high; return r128; + /* + * MSVC for ARM64's __umulh method. + * + * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t + * method. + */ + #elif defined(_M_ARM64) || defined(_M_ARM64EC) + + #ifndef _MSC_VER + #pragma intrinsic(__umulh) + #endif + XXH128_hash_t r128; + r128.low64 = lhs * rhs; + r128.high64 = __umulh(lhs, rhs); + return r128; + #else /* * Portable scalar method. Optimized for 32-bit and 64-bit ALUs. @@ -3744,7 +4936,7 @@ static XXH128_hash_t XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) { * around by value. This will hopefully inline the multiply, but we don't force * it. * - * @param lhs, rhs The 64-bit integers to multiply + * @param lhs , rhs The 64-bit integers to multiply * @return The low 64 bits of the product XOR'd by the high 64 bits. * @see XXH_mult64to128() */ @@ -3756,7 +4948,7 @@ static xxh_u64 XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs) { } /*! Seems to produce slightly better code on GCC for some reason. */ -XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) { +XXH_FORCE_INLINE XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) { XXH_ASSERT(0 <= shift && shift < 64); return v64 ^ (v64 >> shift); @@ -3770,7 +4962,7 @@ XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) { static XXH64_hash_t XXH3_avalanche(xxh_u64 h64) { h64 = XXH_xorshift64(h64, 37); - h64 *= 0x165667919E3779F9ULL; + h64 *= PRIME_MX1; h64 = XXH_xorshift64(h64, 32); return h64; @@ -3785,9 +4977,9 @@ static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) { /* this mix is inspired by Pelle Evensen's rrmxmx */ h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24); - h64 *= 0x9FB21C651E98DF25ULL; + h64 *= PRIME_MX2; h64 ^= (h64 >> 35) + len; - h64 *= 0x9FB21C651E98DF25ULL; + h64 *= PRIME_MX2; return XXH_xorshift64(h64, 28); } @@ -3826,9 +5018,10 @@ static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) { * * This adds an extra layer of strength for custom secrets. */ -XXH_FORCE_INLINE XXH64_hash_t XXH3_len_1to3_64b(const xxh_u8 *input, size_t len, - const xxh_u8 *secret, - XXH64_hash_t seed) { +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t XXH3_len_1to3_64b(const xxh_u8 *input, + size_t len, + const xxh_u8 *secret, + XXH64_hash_t seed) { XXH_ASSERT(input != NULL); XXH_ASSERT(1 <= len && len <= 3); @@ -3854,9 +5047,10 @@ XXH_FORCE_INLINE XXH64_hash_t XXH3_len_1to3_64b(const xxh_u8 *input, size_t len, } -XXH_FORCE_INLINE XXH64_hash_t XXH3_len_4to8_64b(const xxh_u8 *input, size_t len, - const xxh_u8 *secret, - XXH64_hash_t seed) { +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t XXH3_len_4to8_64b(const xxh_u8 *input, + size_t len, + const xxh_u8 *secret, + XXH64_hash_t seed) { XXH_ASSERT(input != NULL); XXH_ASSERT(secret != NULL); @@ -3876,10 +5070,10 @@ XXH_FORCE_INLINE XXH64_hash_t XXH3_len_4to8_64b(const xxh_u8 *input, size_t len, } -XXH_FORCE_INLINE XXH64_hash_t XXH3_len_9to16_64b(const xxh_u8 *input, - size_t len, - const xxh_u8 *secret, - XXH64_hash_t seed) { +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t XXH3_len_9to16_64b(const xxh_u8 *input, + size_t len, + const xxh_u8 *secret, + XXH64_hash_t seed) { XXH_ASSERT(input != NULL); XXH_ASSERT(secret != NULL); @@ -3900,10 +5094,10 @@ XXH_FORCE_INLINE XXH64_hash_t XXH3_len_9to16_64b(const xxh_u8 *input, } -XXH_FORCE_INLINE XXH64_hash_t XXH3_len_0to16_64b(const xxh_u8 *input, - size_t len, - const xxh_u8 *secret, - XXH64_hash_t seed) { +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t XXH3_len_0to16_64b(const xxh_u8 *input, + size_t len, + const xxh_u8 *secret, + XXH64_hash_t seed) { XXH_ASSERT(len <= 16); { @@ -3983,7 +5177,7 @@ XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8 *XXH_RESTRICT input, } /* For mid range keys, XXH3 uses a Mum-hash variant. */ -XXH_FORCE_INLINE XXH64_hash_t XXH3_len_17to128_64b( +XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t XXH3_len_17to128_64b( const xxh_u8 *XXH_RESTRICT input, size_t len, const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) { @@ -3994,6 +5188,18 @@ XXH_FORCE_INLINE XXH64_hash_t XXH3_len_17to128_64b( { xxh_u64 acc = len * XXH_PRIME64_1; + #if XXH_SIZE_OPT >= 1 + /* Smaller and cleaner, but slightly slower. */ + unsigned int i = (unsigned int)(len - 1) / 32; + do { + + acc += XXH3_mix16B(input + 16 * i, secret + 32 * i, seed); + acc += + XXH3_mix16B(input + len - 16 * (i + 1), secret + 32 * i + 16, seed); + + } while (i-- != 0); + + #else if (len > 32) { if (len > 64) { @@ -4017,16 +5223,19 @@ XXH_FORCE_INLINE XXH64_hash_t XXH3_len_17to128_64b( acc += XXH3_mix16B(input + 0, secret + 0, seed); acc += XXH3_mix16B(input + len - 16, secret + 16, seed); - + #endif return XXH3_avalanche(acc); } } + /*! + * @brief Maximum size of "short" key in bytes. + */ #define XXH3_MIDSIZE_MAX 240 -XXH_NO_INLINE XXH64_hash_t XXH3_len_129to240_64b( +XXH_NO_INLINE XXH_PUREF XXH64_hash_t XXH3_len_129to240_64b( const xxh_u8 *XXH_RESTRICT input, size_t len, const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) { @@ -4039,17 +5248,23 @@ XXH_NO_INLINE XXH64_hash_t XXH3_len_129to240_64b( { - xxh_u64 acc = len * XXH_PRIME64_1; - int const nbRounds = (int)len / 16; - int i; + xxh_u64 acc = len * XXH_PRIME64_1; + xxh_u64 acc_end; + unsigned int const nbRounds = (unsigned int)len / 16; + unsigned int i; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); for (i = 0; i < 8; i++) { acc += XXH3_mix16B(input + (16 * i), secret + (16 * i), seed); } - acc = XXH3_avalanche(acc); + /* last bytes */ + acc_end = XXH3_mix16B( + input + len - 16, + secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed); XXH_ASSERT(nbRounds >= 8); + acc = XXH3_avalanche(acc); #if defined(__clang__) /* Clang */ \ && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ @@ -4077,17 +5292,18 @@ XXH_NO_INLINE XXH64_hash_t XXH3_len_129to240_64b( #endif for (i = 8; i < nbRounds; i++) { - acc += + /* + * Prevents clang for unrolling the acc loop and interleaving with this + * one. + */ + XXH_COMPILER_GUARD(acc); + acc_end += XXH3_mix16B(input + (16 * i), secret + (16 * (i - 8)) + XXH3_MIDSIZE_STARTOFFSET, seed); } - /* last bytes */ - acc += XXH3_mix16B(input + len - 16, - secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, - seed); - return XXH3_avalanche(acc); + return XXH3_avalanche(acc + acc_end); } @@ -4105,10 +5321,49 @@ XXH_NO_INLINE XXH64_hash_t XXH3_len_129to240_64b( #define ACC_NB XXH_ACC_NB #endif + #ifndef XXH_PREFETCH_DIST + #ifdef __clang__ + #define XXH_PREFETCH_DIST 320 + #else + #if (XXH_VECTOR == XXH_AVX512) + #define XXH_PREFETCH_DIST 512 + #else + #define XXH_PREFETCH_DIST 384 + #endif + #endif /* __clang__ */ + #endif /* XXH_PREFETCH_DIST */ + + /* + * These macros are to generate an XXH3_accumulate() function. + * The two arguments select the name suffix and target attribute. + * + * The name of this symbol is XXH3_accumulate_<name>() and it calls + * XXH3_accumulate_512_<name>(). + * + * It may be useful to hand implement this function if the compiler fails + * to optimize the inline function. + */ + #define XXH3_ACCUMULATE_TEMPLATE(name) \ + void XXH3_accumulate_##name( \ + xxh_u64 *XXH_RESTRICT acc, const xxh_u8 *XXH_RESTRICT input, \ + const xxh_u8 *XXH_RESTRICT secret, size_t nbStripes) { \ + \ + size_t n; \ + for (n = 0; n < nbStripes; n++) { \ + \ + const xxh_u8 *const in = input + n * XXH_STRIPE_LEN; \ + XXH_PREFETCH(in + XXH_PREFETCH_DIST); \ + XXH3_accumulate_512_##name(acc, in, \ + secret + n * XXH_SECRET_CONSUME_RATE); \ + \ + } \ + \ + } + XXH_FORCE_INLINE void XXH_writeLE64(void *dst, xxh_u64 v64) { if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64); - memcpy(dst, &v64, sizeof(v64)); + XXH_memcpy(dst, &v64, sizeof(v64)); } @@ -4176,8 +5431,7 @@ XXH_FORCE_INLINE XXH_TARGET_AVX512 void XXH3_accumulate_512_avx512( /* data_key = data_vec ^ key_vec; */ __m512i const data_key = _mm512_xor_si512(data_vec, key_vec); /* data_key_lo = data_key >> 32; */ - __m512i const data_key_lo = - _mm512_shuffle_epi32(data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1)); + __m512i const data_key_lo = _mm512_srli_epi64(data_key, 32); /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ __m512i const product = _mm512_mul_epu32(data_key, data_key_lo); /* xacc[0] += swap(data_vec); */ @@ -4191,31 +5445,34 @@ XXH_FORCE_INLINE XXH_TARGET_AVX512 void XXH3_accumulate_512_avx512( } -/* - * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing. - * - * Multiplication isn't perfect, as explained by Google in HighwayHash: - * - * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to - * // varying degrees. In descending order of goodness, bytes - * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32. - * // As expected, the upper and lower bytes are much worse. - * - * Source: - * https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291 - * - * Since our algorithm uses a pseudorandom secret to add some variance into the - * mix, we don't need to (or want to) mix as often or as much as HighwayHash - * does. - * - * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid - * extraction. - * - * Both XXH3_64bits and XXH3_128bits use this subroutine. - */ +XXH_FORCE_INLINE XXH_TARGET_AVX512 XXH3_ACCUMULATE_TEMPLATE(avx512) -XXH_FORCE_INLINE XXH_TARGET_AVX512 void XXH3_scrambleAcc_avx512( - void *XXH_RESTRICT acc, const void *XXH_RESTRICT secret) { + /* + * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing. + * + * Multiplication isn't perfect, as explained by Google in HighwayHash: + * + * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to + * // varying degrees. In descending order of goodness, bytes + * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32. + * // As expected, the upper and lower bytes are much worse. + * + * Source: + * https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291 + * + * Since our algorithm uses a pseudorandom secret to add some variance into + * the mix, we don't need to (or want to) mix as often or as much as + * HighwayHash does. + * + * This isn't as tight as XXH3_accumulate, but still written in SIMD to + * avoid extraction. + * + * Both XXH3_64bits and XXH3_128bits use this subroutine. + */ + + XXH_FORCE_INLINE XXH_TARGET_AVX512 + void XXH3_scrambleAcc_avx512(void *XXH_RESTRICT acc, + const void *XXH_RESTRICT secret) { XXH_ASSERT((((size_t)acc) & 63) == 0); XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); @@ -4227,14 +5484,13 @@ XXH_FORCE_INLINE XXH_TARGET_AVX512 void XXH3_scrambleAcc_avx512( /* xacc[0] ^= (xacc[0] >> 47) */ __m512i const acc_vec = *xacc; __m512i const shifted = _mm512_srli_epi64(acc_vec, 47); - __m512i const data_vec = _mm512_xor_si512(acc_vec, shifted); /* xacc[0] ^= secret; */ __m512i const key_vec = _mm512_loadu_si512(secret); - __m512i const data_key = _mm512_xor_si512(data_vec, key_vec); + __m512i const data_key = _mm512_ternarylogic_epi32( + key_vec, acc_vec, shifted, 0x96 /* key_vec ^ acc_vec ^ shifted */); /* xacc[0] *= XXH_PRIME32_1; */ - __m512i const data_key_hi = - _mm512_shuffle_epi32(data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1)); + __m512i const data_key_hi = _mm512_srli_epi64(data_key, 32); __m512i const prod_lo = _mm512_mul_epu32(data_key, prime32); __m512i const prod_hi = _mm512_mul_epu32(data_key_hi, prime32); *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32)); @@ -4253,8 +5509,9 @@ XXH_FORCE_INLINE XXH_TARGET_AVX512 void XXH3_initCustomSecret_avx512( { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i); - __m512i const seed = _mm512_mask_set1_epi64( - _mm512_set1_epi64((xxh_i64)seed64), 0xAA, (xxh_i64)(0U - seed64)); + __m512i const seed_pos = _mm512_set1_epi64((xxh_i64)seed64); + __m512i const seed = + _mm512_mask_sub_epi64(seed_pos, 0xAA, _mm512_set1_epi8(0), seed_pos); const __m512i *const src = (const __m512i *)((const void *)XXH3_kSecret); __m512i *const dest = (__m512i *)customSecret; @@ -4263,18 +5520,7 @@ XXH_FORCE_INLINE XXH_TARGET_AVX512 void XXH3_initCustomSecret_avx512( XXH_ASSERT(((size_t)dest & 63) == 0); for (i = 0; i < nbRounds; ++i) { - /* GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void - * const*', this will warn "discards 'const' qualifier". */ - union { - - const __m512i *cp; - void *p; - - } remote_const_void; - - remote_const_void.cp = src + i; - dest[i] = - _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p), seed); + dest[i] = _mm512_add_epi64(_mm512_load_si512(src + i), seed); } @@ -4317,8 +5563,7 @@ XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_accumulate_512_avx2( /* data_key = data_vec ^ key_vec; */ __m256i const data_key = _mm256_xor_si256(data_vec, key_vec); /* data_key_lo = data_key >> 32; */ - __m256i const data_key_lo = - _mm256_shuffle_epi32(data_key, _MM_SHUFFLE(0, 3, 0, 1)); + __m256i const data_key_lo = _mm256_srli_epi64(data_key, 32); /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ __m256i const product = _mm256_mul_epu32(data_key, data_key_lo); /* xacc[i] += swap(data_vec); */ @@ -4334,8 +5579,11 @@ XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_accumulate_512_avx2( } -XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_scrambleAcc_avx2( - void *XXH_RESTRICT acc, const void *XXH_RESTRICT secret) { +XXH_FORCE_INLINE XXH_TARGET_AVX2 XXH3_ACCUMULATE_TEMPLATE(avx2) + + XXH_FORCE_INLINE XXH_TARGET_AVX2 + void XXH3_scrambleAcc_avx2(void *XXH_RESTRICT acc, + const void *XXH_RESTRICT secret) { XXH_ASSERT((((size_t)acc) & 31) == 0); { @@ -4358,8 +5606,7 @@ XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_scrambleAcc_avx2( __m256i const data_key = _mm256_xor_si256(data_vec, key_vec); /* xacc[i] *= XXH_PRIME32_1; */ - __m256i const data_key_hi = - _mm256_shuffle_epi32(data_key, _MM_SHUFFLE(0, 3, 0, 1)); + __m256i const data_key_hi = _mm256_srli_epi64(data_key, 32); __m256i const prod_lo = _mm256_mul_epu32(data_key, prime32); __m256i const prod_hi = _mm256_mul_epu32(data_key_hi, prime32); xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32)); @@ -4399,12 +5646,12 @@ XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2( XXH_ASSERT(((size_t)dest & 31) == 0); /* GCC -O2 need unroll loop manually */ - dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(src + 0), seed); - dest[1] = _mm256_add_epi64(_mm256_stream_load_si256(src + 1), seed); - dest[2] = _mm256_add_epi64(_mm256_stream_load_si256(src + 2), seed); - dest[3] = _mm256_add_epi64(_mm256_stream_load_si256(src + 3), seed); - dest[4] = _mm256_add_epi64(_mm256_stream_load_si256(src + 4), seed); - dest[5] = _mm256_add_epi64(_mm256_stream_load_si256(src + 5), seed); + dest[0] = _mm256_add_epi64(_mm256_load_si256(src + 0), seed); + dest[1] = _mm256_add_epi64(_mm256_load_si256(src + 1), seed); + dest[2] = _mm256_add_epi64(_mm256_load_si256(src + 2), seed); + dest[3] = _mm256_add_epi64(_mm256_load_si256(src + 3), seed); + dest[4] = _mm256_add_epi64(_mm256_load_si256(src + 4), seed); + dest[5] = _mm256_add_epi64(_mm256_load_si256(src + 5), seed); } @@ -4462,8 +5709,11 @@ XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_accumulate_512_sse2( } -XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_scrambleAcc_sse2( - void *XXH_RESTRICT acc, const void *XXH_RESTRICT secret) { +XXH_FORCE_INLINE XXH_TARGET_SSE2 XXH3_ACCUMULATE_TEMPLATE(sse2) + + XXH_FORCE_INLINE XXH_TARGET_SSE2 + void XXH3_scrambleAcc_sse2(void *XXH_RESTRICT acc, + const void *XXH_RESTRICT secret) { XXH_ASSERT((((size_t)acc) & 15) == 0); { @@ -4546,40 +5796,173 @@ XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2( #if (XXH_VECTOR == XXH_NEON) +/* forward declarations for the scalar routines */ +XXH_FORCE_INLINE void XXH3_scalarRound(void *XXH_RESTRICT acc, + void const *XXH_RESTRICT input, + void const *XXH_RESTRICT secret, + size_t lane); + +XXH_FORCE_INLINE void XXH3_scalarScrambleRound(void *XXH_RESTRICT acc, + void const *XXH_RESTRICT secret, + size_t lane); + +/*! + * @internal + * @brief The bulk processing loop for NEON and WASM SIMD128. + * + * The NEON code path is actually partially scalar when running on AArch64. This + * is to optimize the pipelining and can have up to 15% speedup depending on the + * CPU, and it also mitigates some GCC codegen issues. + * + * @see XXH3_NEON_LANES for configuring this and details about this + * optimization. + * + * NEON's 32-bit to 64-bit long multiply takes a half vector of 32-bit + * integers instead of the other platforms which mask full 64-bit vectors, + * so the setup is more complicated than just shifting right. + * + * Additionally, there is an optimization for 4 lanes at once noted below. + * + * Since, as stated, the most optimal amount of lanes for Cortexes is 6, + * there needs to be *three* versions of the accumulate operation used + * for the remaining 2 lanes. + * + * WASM's SIMD128 uses SIMDe's arm_neon.h polyfill because the intrinsics + * overlap nearly perfectly. + */ + XXH_FORCE_INLINE void XXH3_accumulate_512_neon( void *XXH_RESTRICT acc, const void *XXH_RESTRICT input, const void *XXH_RESTRICT secret) { XXH_ASSERT((((size_t)acc) & 15) == 0); - { - - uint64x2_t *const xacc = (uint64x2_t *)acc; + XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && + XXH3_NEON_LANES % 2 == 0); + { /* GCC for darwin arm64 does not like aliasing here */ + xxh_aliasing_uint64x2_t *const xacc = (xxh_aliasing_uint64x2_t *)acc; /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */ - uint8_t const *const xinput = (const uint8_t *)input; - uint8_t const *const xsecret = (const uint8_t *)secret; + uint8_t const *xinput = (const uint8_t *)input; + uint8_t const *xsecret = (const uint8_t *)secret; size_t i; - for (i = 0; i < XXH_STRIPE_LEN / sizeof(uint64x2_t); i++) { + #ifdef __wasm_simd128__ + /* + * On WASM SIMD128, Clang emits direct address loads when XXH3_kSecret + * is constant propagated, which results in it converting it to this + * inside the loop: + * + * a = v128.load(XXH3_kSecret + 0 + $secret_offset, offset = 0) + * b = v128.load(XXH3_kSecret + 16 + $secret_offset, offset = 0) + * ... + * + * This requires a full 32-bit address immediate (and therefore a 6 byte + * instruction) as well as an add for each offset. + * + * Putting an asm guard prevents it from folding (at the cost of losing + * the alignment hint), and uses the free offset in `v128.load` instead + * of adding secret_offset each time which overall reduces code size by + * about a kilobyte and improves performance. + */ + XXH_COMPILER_GUARD(xsecret); + #endif + /* Scalar lanes use the normal scalarRound routine */ + for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { + + XXH3_scalarRound(acc, input, secret, i); + + } + + i = 0; + /* 4 NEON lanes at a time. */ + for (; i + 1 < XXH3_NEON_LANES / 2; i += 2) { /* data_vec = xinput[i]; */ - uint8x16_t data_vec = vld1q_u8(xinput + (i * 16)); + uint64x2_t data_vec_1 = XXH_vld1q_u64(xinput + (i * 16)); + uint64x2_t data_vec_2 = XXH_vld1q_u64(xinput + ((i + 1) * 16)); /* key_vec = xsecret[i]; */ - uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16)); - uint64x2_t data_key; - uint32x2_t data_key_lo, data_key_hi; - /* xacc[i] += swap(data_vec); */ - uint64x2_t const data64 = vreinterpretq_u64_u8(data_vec); - uint64x2_t const swapped = vextq_u64(data64, data64, 1); - xacc[i] = vaddq_u64(xacc[i], swapped); + uint64x2_t key_vec_1 = XXH_vld1q_u64(xsecret + (i * 16)); + uint64x2_t key_vec_2 = XXH_vld1q_u64(xsecret + ((i + 1) * 16)); + /* data_swap = swap(data_vec) */ + uint64x2_t data_swap_1 = vextq_u64(data_vec_1, data_vec_1, 1); + uint64x2_t data_swap_2 = vextq_u64(data_vec_2, data_vec_2, 1); /* data_key = data_vec ^ key_vec; */ - data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec)); - /* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF); - * data_key_hi = (uint32x2_t) (data_key >> 32); - * data_key = UNDEFINED; */ - XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi); - /* xacc[i] += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */ - xacc[i] = vmlal_u32(xacc[i], data_key_lo, data_key_hi); + uint64x2_t data_key_1 = veorq_u64(data_vec_1, key_vec_1); + uint64x2_t data_key_2 = veorq_u64(data_vec_2, key_vec_2); + + /* + * If we reinterpret the 64x2 vectors as 32x4 vectors, we can use a + * de-interleave operation for 4 lanes in 1 step with `vuzpq_u32` to + * get one vector with the low 32 bits of each lane, and one vector + * with the high 32 bits of each lane. + * + * The intrinsic returns a double vector because the original ARMv7-a + * instruction modified both arguments in place. AArch64 and SIMD128 emit + * two instructions from this intrinsic. + * + * [ dk11L | dk11H | dk12L | dk12H ] -> [ dk11L | dk12L | dk21L | dk22L ] + * [ dk21L | dk21H | dk22L | dk22H ] -> [ dk11H | dk12H | dk21H | dk22H ] + */ + uint32x4x2_t unzipped = vuzpq_u32(vreinterpretq_u32_u64(data_key_1), + vreinterpretq_u32_u64(data_key_2)); + /* data_key_lo = data_key & 0xFFFFFFFF */ + uint32x4_t data_key_lo = unzipped.val[0]; + /* data_key_hi = data_key >> 32 */ + uint32x4_t data_key_hi = unzipped.val[1]; + /* + * Then, we can split the vectors horizontally and multiply which, as for + * most widening intrinsics, have a variant that works on both high half + * vectors for free on AArch64. A similar instruction is available on + * SIMD128. + * + * sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi + */ + uint64x2_t sum_1 = + XXH_vmlal_low_u32(data_swap_1, data_key_lo, data_key_hi); + uint64x2_t sum_2 = + XXH_vmlal_high_u32(data_swap_2, data_key_lo, data_key_hi); + /* + * Clang reorders + * a += b * c; // umlal swap.2d, dkl.2s, dkh.2s + * c += a; // add acc.2d, acc.2d, swap.2d + * to + * c += a; // add acc.2d, acc.2d, swap.2d + * c += b * c; // umlal acc.2d, dkl.2s, dkh.2s + * + * While it would make sense in theory since the addition is faster, + * for reasons likely related to umlal being limited to certain NEON + * pipelines, this is worse. A compiler guard fixes this. + */ + XXH_COMPILER_GUARD_CLANG_NEON(sum_1); + XXH_COMPILER_GUARD_CLANG_NEON(sum_2); + /* xacc[i] = acc_vec + sum; */ + xacc[i] = vaddq_u64(xacc[i], sum_1); + xacc[i + 1] = vaddq_u64(xacc[i + 1], sum_2); + + } + + /* Operate on the remaining NEON lanes 2 at a time. */ + for (; i < XXH3_NEON_LANES / 2; i++) { + + /* data_vec = xinput[i]; */ + uint64x2_t data_vec = XXH_vld1q_u64(xinput + (i * 16)); + /* key_vec = xsecret[i]; */ + uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); + /* acc_vec_2 = swap(data_vec) */ + uint64x2_t data_swap = vextq_u64(data_vec, data_vec, 1); + /* data_key = data_vec ^ key_vec; */ + uint64x2_t data_key = veorq_u64(data_vec, key_vec); + /* For two lanes, just use VMOVN and VSHRN. */ + /* data_key_lo = data_key & 0xFFFFFFFF; */ + uint32x2_t data_key_lo = vmovn_u64(data_key); + /* data_key_hi = data_key >> 32; */ + uint32x2_t data_key_hi = vshrn_n_u64(data_key, 32); + /* sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi; */ + uint64x2_t sum = vmlal_u32(data_swap, data_key_lo, data_key_hi); + /* Same Clang workaround as before */ + XXH_COMPILER_GUARD_CLANG_NEON(sum); + /* xacc[i] = acc_vec + sum; */ + xacc[i] = vaddq_u64(xacc[i], sum); } @@ -4587,19 +5970,37 @@ XXH_FORCE_INLINE void XXH3_accumulate_512_neon( } -XXH_FORCE_INLINE void XXH3_scrambleAcc_neon(void *XXH_RESTRICT acc, - const void *XXH_RESTRICT secret) { +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(neon) + + XXH_FORCE_INLINE + void XXH3_scrambleAcc_neon(void *XXH_RESTRICT acc, + const void *XXH_RESTRICT secret) { XXH_ASSERT((((size_t)acc) & 15) == 0); { - uint64x2_t *xacc = (uint64x2_t *)acc; - uint8_t const *xsecret = (uint8_t const *)secret; - uint32x2_t prime = vdup_n_u32(XXH_PRIME32_1); + xxh_aliasing_uint64x2_t *xacc = (xxh_aliasing_uint64x2_t *)acc; + uint8_t const *xsecret = (uint8_t const *)secret; size_t i; - for (i = 0; i < XXH_STRIPE_LEN / sizeof(uint64x2_t); i++) { + /* WASM uses operator overloads and doesn't need these. */ + #ifndef __wasm_simd128__ + /* { prime32_1, prime32_1 } */ + uint32x2_t const kPrimeLo = vdup_n_u32(XXH_PRIME32_1); + /* { 0, prime32_1, 0, prime32_1 } */ + uint32x4_t const kPrimeHi = + vreinterpretq_u32_u64(vdupq_n_u64((xxh_u64)XXH_PRIME32_1 << 32)); + #endif + + /* AArch64 uses both scalar and neon at the same time */ + for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { + + XXH3_scalarScrambleRound(acc, secret, i); + + } + + for (i = 0; i < XXH3_NEON_LANES / 2; i++) { /* xacc[i] ^= (xacc[i] >> 47); */ uint64x2_t acc_vec = xacc[i]; @@ -4607,40 +6008,32 @@ XXH_FORCE_INLINE void XXH3_scrambleAcc_neon(void *XXH_RESTRICT acc, uint64x2_t data_vec = veorq_u64(acc_vec, shifted); /* xacc[i] ^= xsecret[i]; */ - uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16)); - uint64x2_t data_key = veorq_u64(data_vec, vreinterpretq_u64_u8(key_vec)); - - /* xacc[i] *= XXH_PRIME32_1 */ - uint32x2_t data_key_lo, data_key_hi; - /* data_key_lo = (uint32x2_t) (xacc[i] & 0xFFFFFFFF); - * data_key_hi = (uint32x2_t) (xacc[i] >> 32); - * xacc[i] = UNDEFINED; */ - XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi); - { /* - * prod_hi = (data_key >> 32) * XXH_PRIME32_1; - * - * Avoid vmul_u32 + vshll_n_u32 since Clang 6 and 7 will - * incorrectly "optimize" this: - * tmp = vmul_u32(vmovn_u64(a), vmovn_u64(b)); - * shifted = vshll_n_u32(tmp, 32); - * to this: - * tmp = "vmulq_u64"(a, b); // no such thing! - * shifted = vshlq_n_u64(tmp, 32); - * - * However, unlike SSE, Clang lacks a 64-bit multiply routine - * for NEON, and it scalarizes two 64-bit multiplies instead. - * - * vmull_u32 has the same timing as vmul_u32, and it avoids - * this bug completely. - * See https://bugs.llvm.org/show_bug.cgi?id=39967 - */ - uint64x2_t prod_hi = vmull_u32(data_key_hi, prime); - /* xacc[i] = prod_hi << 32; */ - xacc[i] = vshlq_n_u64(prod_hi, 32); - /* xacc[i] += (prod_hi & 0xFFFFFFFF) * XXH_PRIME32_1; */ - xacc[i] = vmlal_u32(xacc[i], data_key_lo, prime); - - } + uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16)); + uint64x2_t data_key = veorq_u64(data_vec, key_vec); + /* xacc[i] *= XXH_PRIME32_1 */ + #ifdef __wasm_simd128__ + /* SIMD128 has multiply by u64x2, use it instead of expanding and + * scalarizing */ + xacc[i] = data_key * XXH_PRIME32_1; + #else + /* + * Expanded version with portable NEON intrinsics + * + * lo(x) * lo(y) + (hi(x) * lo(y) << 32) + * + * prod_hi = hi(data_key) * lo(prime) << 32 + * + * Since we only need 32 bits of this multiply a trick can be used, + * reinterpreting the vector as a uint32x4_t and multiplying by { 0, + * prime, 0, prime } to cancel out the unwanted bits and avoid the shift. + */ + uint32x4_t prod_hi = vmulq_u32(vreinterpretq_u32_u64(data_key), kPrimeHi); + /* Extract low bits for vmlal_u32 */ + uint32x2_t data_key_lo = vmovn_u64(data_key); + /* xacc[i] = prod_hi + lo(data_key) * XXH_PRIME32_1; */ + xacc[i] = + vmlal_u32(vreinterpretq_u64_u32(prod_hi), data_key_lo, kPrimeLo); + #endif } @@ -4656,47 +6049,54 @@ XXH_FORCE_INLINE void XXH3_accumulate_512_vsx(void *XXH_RESTRICT acc, const void *XXH_RESTRICT input, const void *XXH_RESTRICT secret) { - xxh_u64x2 *const xacc = (xxh_u64x2 *)acc; /* presumed aligned */ - xxh_u64x2 const *const xinput = - (xxh_u64x2 const *)input; /* no alignment restriction */ - xxh_u64x2 const *const xsecret = - (xxh_u64x2 const *)secret; /* no alignment restriction */ + /* presumed aligned */ + xxh_aliasing_u64x2 *const xacc = (xxh_aliasing_u64x2 *)acc; + xxh_u8 const *const xinput = + (xxh_u8 const *)input; /* no alignment restriction */ + xxh_u8 const *const xsecret = + (xxh_u8 const *)secret; /* no alignment restriction */ xxh_u64x2 const v32 = {32, 32}; size_t i; for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { /* data_vec = xinput[i]; */ - xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + i); + xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + 16 * i); /* key_vec = xsecret[i]; */ - xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i); + xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16 * i); xxh_u64x2 const data_key = data_vec ^ key_vec; /* shuffled = (data_key << 32) | (data_key >> 32); */ xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32); /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & * 0xFFFFFFFF); */ xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled); - xacc[i] += product; + /* acc_vec = xacc[i]; */ + xxh_u64x2 acc_vec = xacc[i]; + acc_vec += product; /* swap high and low halves */ #ifdef __s390x__ - xacc[i] += vec_permi(data_vec, data_vec, 2); + acc_vec += vec_permi(data_vec, data_vec, 2); #else - xacc[i] += vec_xxpermdi(data_vec, data_vec, 2); + acc_vec += vec_xxpermdi(data_vec, data_vec, 2); #endif + xacc[i] = acc_vec; } } -XXH_FORCE_INLINE void XXH3_scrambleAcc_vsx(void *XXH_RESTRICT acc, - const void *XXH_RESTRICT secret) { +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(vsx) + + XXH_FORCE_INLINE + void XXH3_scrambleAcc_vsx(void *XXH_RESTRICT acc, + const void *XXH_RESTRICT secret) { XXH_ASSERT((((size_t)acc) & 15) == 0); { - xxh_u64x2 *const xacc = (xxh_u64x2 *)acc; - const xxh_u64x2 *const xsecret = (const xxh_u64x2 *)secret; + xxh_aliasing_u64x2 *const xacc = (xxh_aliasing_u64x2 *)acc; + const xxh_u8 *const xsecret = (const xxh_u8 *)secret; /* constants */ xxh_u64x2 const v32 = {32, 32}; xxh_u64x2 const v47 = {47, 47}; @@ -4710,7 +6110,7 @@ XXH_FORCE_INLINE void XXH3_scrambleAcc_vsx(void *XXH_RESTRICT acc, xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47); /* xacc[i] ^= xsecret[i]; */ - xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i); + xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16 * i); xxh_u64x2 const data_key = data_vec ^ key_vec; /* xacc[i] *= XXH_PRIME32_1 */ @@ -4729,46 +6129,272 @@ XXH_FORCE_INLINE void XXH3_scrambleAcc_vsx(void *XXH_RESTRICT acc, #endif -/* scalar variants - universal */ + #if (XXH_VECTOR == XXH_SVE) + +XXH_FORCE_INLINE void XXH3_accumulate_512_sve(void *XXH_RESTRICT acc, + const void *XXH_RESTRICT input, + const void *XXH_RESTRICT secret) { + + uint64_t *xacc = (uint64_t *)acc; + const uint64_t *xinput = (const uint64_t *)(const void *)input; + const uint64_t *xsecret = (const uint64_t *)(const void *)secret; + svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); + uint64_t element_count = svcntd(); + if (element_count >= 8) { + + svbool_t mask = svptrue_pat_b64(SV_VL8); + svuint64_t vacc = svld1_u64(mask, xacc); + ACCRND(vacc, 0); + svst1_u64(mask, xacc, vacc); + + } else if (element_count == 2) { /* sve128 */ + + svbool_t mask = svptrue_pat_b64(SV_VL2); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 2); + svuint64_t acc2 = svld1_u64(mask, xacc + 4); + svuint64_t acc3 = svld1_u64(mask, xacc + 6); + ACCRND(acc0, 0); + ACCRND(acc1, 2); + ACCRND(acc2, 4); + ACCRND(acc3, 6); + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 2, acc1); + svst1_u64(mask, xacc + 4, acc2); + svst1_u64(mask, xacc + 6, acc3); + + } else { + + svbool_t mask = svptrue_pat_b64(SV_VL4); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 4); + ACCRND(acc0, 0); + ACCRND(acc1, 4); + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 4, acc1); + + } + +} + +XXH_FORCE_INLINE void XXH3_accumulate_sve(xxh_u64 *XXH_RESTRICT acc, + const xxh_u8 *XXH_RESTRICT input, + const xxh_u8 *XXH_RESTRICT secret, + size_t nbStripes) { + + if (nbStripes != 0) { + + uint64_t *xacc = (uint64_t *)acc; + const uint64_t *xinput = (const uint64_t *)(const void *)input; + const uint64_t *xsecret = (const uint64_t *)(const void *)secret; + svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1); + uint64_t element_count = svcntd(); + if (element_count >= 8) { + + svbool_t mask = svptrue_pat_b64(SV_VL8); + svuint64_t vacc = svld1_u64(mask, xacc + 0); + do { + + /* svprfd(svbool_t, void *, enum svfprop); */ + svprfd(mask, xinput + 128, SV_PLDL1STRM); + ACCRND(vacc, 0); + xinput += 8; + xsecret += 1; + nbStripes--; + + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, vacc); + + } else if (element_count == 2) { /* sve128 */ + + svbool_t mask = svptrue_pat_b64(SV_VL2); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 2); + svuint64_t acc2 = svld1_u64(mask, xacc + 4); + svuint64_t acc3 = svld1_u64(mask, xacc + 6); + do { + + svprfd(mask, xinput + 128, SV_PLDL1STRM); + ACCRND(acc0, 0); + ACCRND(acc1, 2); + ACCRND(acc2, 4); + ACCRND(acc3, 6); + xinput += 8; + xsecret += 1; + nbStripes--; + + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 2, acc1); + svst1_u64(mask, xacc + 4, acc2); + svst1_u64(mask, xacc + 6, acc3); + + } else { + + svbool_t mask = svptrue_pat_b64(SV_VL4); + svuint64_t acc0 = svld1_u64(mask, xacc + 0); + svuint64_t acc1 = svld1_u64(mask, xacc + 4); + do { + + svprfd(mask, xinput + 128, SV_PLDL1STRM); + ACCRND(acc0, 0); + ACCRND(acc1, 4); + xinput += 8; + xsecret += 1; + nbStripes--; + + } while (nbStripes != 0); + + svst1_u64(mask, xacc + 0, acc0); + svst1_u64(mask, xacc + 4, acc1); + + } + + } + +} + + #endif + + /* scalar variants - universal */ + + #if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__)) +/* + * In XXH3_scalarRound(), GCC and Clang have a similar codegen issue, where they + * emit an excess mask and a full 64-bit multiply-add (MADD X-form). + * + * While this might not seem like much, as AArch64 is a 64-bit architecture, + * only big Cortex designs have a full 64-bit multiplier. + * + * On the little cores, the smaller 32-bit multiplier is used, and full 64-bit + * multiplies expand to 2-3 multiplies in microcode. This has a major penalty + * of up to 4 latency cycles and 2 stall cycles in the multiply pipeline. + * + * Thankfully, AArch64 still provides the 32-bit long multiply-add (UMADDL) + * which does not have this penalty and does the mask automatically. + */ +XXH_FORCE_INLINE xxh_u64 XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, + xxh_u64 acc) { + + xxh_u64 ret; + /* note: %x = 64-bit register, %w = 32-bit register */ + __asm__("umaddl %x0, %w1, %w2, %x3" + : "=r"(ret) + : "r"(lhs), "r"(rhs), "r"(acc)); + return ret; + +} + + #else +XXH_FORCE_INLINE xxh_u64 XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, + xxh_u64 acc) { + return XXH_mult32to64((xxh_u32)lhs, (xxh_u32)rhs) + acc; + +} + + #endif + +/*! + * @internal + * @brief Scalar round for @ref XXH3_accumulate_512_scalar(). + * + * This is extracted to its own function because the NEON path uses a + * combination of NEON and scalar. + */ +XXH_FORCE_INLINE void XXH3_scalarRound(void *XXH_RESTRICT acc, + void const *XXH_RESTRICT input, + void const *XXH_RESTRICT secret, + size_t lane) { + + xxh_u64 *xacc = (xxh_u64 *)acc; + xxh_u8 const *xinput = (xxh_u8 const *)input; + xxh_u8 const *xsecret = (xxh_u8 const *)secret; + XXH_ASSERT(lane < XXH_ACC_NB); + XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN - 1)) == 0); + { + + xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8); + xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8); + xacc[lane ^ 1] += data_val; /* swap adjacent lanes */ + xacc[lane] = XXH_mult32to64_add64(data_key /* & 0xFFFFFFFF */, + data_key >> 32, xacc[lane]); + + } + +} + +/*! + * @internal + * @brief Processes a 64 byte block of data using the scalar path. + */ XXH_FORCE_INLINE void XXH3_accumulate_512_scalar( void *XXH_RESTRICT acc, const void *XXH_RESTRICT input, const void *XXH_RESTRICT secret) { - xxh_u64 *const xacc = (xxh_u64 *)acc; /* presumed aligned */ - const xxh_u8 *const xinput = - (const xxh_u8 *)input; /* no alignment restriction */ - const xxh_u8 *const xsecret = - (const xxh_u8 *)secret; /* no alignment restriction */ size_t i; - XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN - 1)) == 0); + /* ARM GCC refuses to unroll this loop, resulting in a 24% slowdown on + * ARMv6. */ + #if defined(__GNUC__) && !defined(__clang__) && \ + (defined(__arm__) || defined(__thumb2__)) && \ + defined(__ARM_FEATURE_UNALIGNED) /* no unaligned access just wastes \ + bytes */ \ + && XXH_SIZE_OPT <= 0 + #pragma GCC unroll 8 + #endif for (i = 0; i < XXH_ACC_NB; i++) { - xxh_u64 const data_val = XXH_readLE64(xinput + 8 * i); - xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + i * 8); - xacc[i ^ 1] += data_val; /* swap adjacent lanes */ - xacc[i] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32); + XXH3_scalarRound(acc, input, secret, i); } } -XXH_FORCE_INLINE void XXH3_scrambleAcc_scalar(void *XXH_RESTRICT acc, - const void *XXH_RESTRICT secret) { +XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(scalar) + + /*! + * @internal + * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar(). + * + * This is extracted to its own function because the NEON path uses a + * combination of NEON and scalar. + */ + XXH_FORCE_INLINE + void XXH3_scalarScrambleRound(void *XXH_RESTRICT acc, + void const *XXH_RESTRICT secret, + size_t lane) { xxh_u64 *const xacc = (xxh_u64 *)acc; /* presumed aligned */ const xxh_u8 *const xsecret = (const xxh_u8 *)secret; /* no alignment restriction */ - size_t i; XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN - 1)) == 0); - for (i = 0; i < XXH_ACC_NB; i++) { + XXH_ASSERT(lane < XXH_ACC_NB); + { - xxh_u64 const key64 = XXH_readLE64(xsecret + 8 * i); - xxh_u64 acc64 = xacc[i]; + xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8); + xxh_u64 acc64 = xacc[lane]; acc64 = XXH_xorshift64(acc64, 47); acc64 ^= key64; acc64 *= XXH_PRIME32_1; - xacc[i] = acc64; + xacc[lane] = acc64; + + } + +} + +/*! + * @internal + * @brief Scrambles the accumulators after a large chunk has been read + */ +XXH_FORCE_INLINE void XXH3_scrambleAcc_scalar(void *XXH_RESTRICT acc, + const void *XXH_RESTRICT secret) { + + size_t i; + for (i = 0; i < XXH_ACC_NB; i++) { + + XXH3_scalarScrambleRound(acc, secret, i); } @@ -4785,15 +6411,16 @@ XXH_FORCE_INLINE void XXH3_initCustomSecret_scalar( const xxh_u8 *kSecretPtr = XXH3_kSecret; XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); - #if defined(__clang__) && defined(__aarch64__) + #if defined(__GNUC__) && defined(__aarch64__) /* * UGLY HACK: - * Clang generates a bunch of MOV/MOVK pairs for aarch64, and they are + * GCC and Clang generate a bunch of MOV/MOVK pairs for aarch64, and they are * placed sequentially, in order, at the top of the unrolled loop. * * While MOVK is great for generating constants (2 cycles for a 64-bit - * constant compared to 4 cycles for LDR), long MOVK chains stall the - * integer pipelines: + * constant compared to 4 cycles for LDR), it fights for bandwidth with + * the arithmetic instructions. + * * I L S * MOVK * MOVK @@ -4802,7 +6429,7 @@ XXH_FORCE_INLINE void XXH3_initCustomSecret_scalar( * ADD * SUB STR * STR - * By forcing loads from memory (as the asm line causes Clang to assume + * By forcing loads from memory (as the asm line causes the compiler to assume * that XXH3_kSecretPtr has been changed), the pipelines are used more * efficiently: * I L S @@ -4810,18 +6437,15 @@ XXH_FORCE_INLINE void XXH3_initCustomSecret_scalar( * ADD LDR * SUB STR * STR + * + * See XXH3_NEON_LANES for details on the pipsline. + * * XXH3_64bits_withSeed, len == 256, Snapdragon 835 * without hack: 2654.4 MB/s * with hack: 3202.9 MB/s */ XXH_COMPILER_GUARD(kSecretPtr); #endif - /* - * Note: in debug mode, this overrides the asm optimization - * and Clang will emit MOVK chains again. - */ - XXH_ASSERT(kSecretPtr == XXH3_kSecret); - { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16; @@ -4829,7 +6453,7 @@ XXH_FORCE_INLINE void XXH3_initCustomSecret_scalar( for (i = 0; i < nbRounds; i++) { /* - * The asm hack causes Clang to assume that kSecretPtr aliases with + * The asm hack causes the compiler to assume that kSecretPtr aliases with * customSecret, and on aarch64, this prevented LDP from merging two * loads together for free. Putting the loads together before the stores * properly generates LDP. @@ -4845,87 +6469,71 @@ XXH_FORCE_INLINE void XXH3_initCustomSecret_scalar( } -typedef void (*XXH3_f_accumulate_512)(void *XXH_RESTRICT, const void *, - const void *); +typedef void (*XXH3_f_accumulate)(xxh_u64 *XXH_RESTRICT, + const xxh_u8 *XXH_RESTRICT, + const xxh_u8 *XXH_RESTRICT, size_t); typedef void (*XXH3_f_scrambleAcc)(void *XXH_RESTRICT, const void *); typedef void (*XXH3_f_initCustomSecret)(void *XXH_RESTRICT, xxh_u64); #if (XXH_VECTOR == XXH_AVX512) #define XXH3_accumulate_512 XXH3_accumulate_512_avx512 + #define XXH3_accumulate XXH3_accumulate_avx512 #define XXH3_scrambleAcc XXH3_scrambleAcc_avx512 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx512 #elif (XXH_VECTOR == XXH_AVX2) #define XXH3_accumulate_512 XXH3_accumulate_512_avx2 + #define XXH3_accumulate XXH3_accumulate_avx2 #define XXH3_scrambleAcc XXH3_scrambleAcc_avx2 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx2 #elif (XXH_VECTOR == XXH_SSE2) #define XXH3_accumulate_512 XXH3_accumulate_512_sse2 + #define XXH3_accumulate XXH3_accumulate_sse2 #define XXH3_scrambleAcc XXH3_scrambleAcc_sse2 #define XXH3_initCustomSecret XXH3_initCustomSecret_sse2 #elif (XXH_VECTOR == XXH_NEON) #define XXH3_accumulate_512 XXH3_accumulate_512_neon + #define XXH3_accumulate XXH3_accumulate_neon #define XXH3_scrambleAcc XXH3_scrambleAcc_neon #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar #elif (XXH_VECTOR == XXH_VSX) #define XXH3_accumulate_512 XXH3_accumulate_512_vsx + #define XXH3_accumulate XXH3_accumulate_vsx #define XXH3_scrambleAcc XXH3_scrambleAcc_vsx #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + #elif (XXH_VECTOR == XXH_SVE) + #define XXH3_accumulate_512 XXH3_accumulate_512_sve + #define XXH3_accumulate XXH3_accumulate_sve + #define XXH3_scrambleAcc XXH3_scrambleAcc_scalar + #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + #else /* scalar */ #define XXH3_accumulate_512 XXH3_accumulate_512_scalar + #define XXH3_accumulate XXH3_accumulate_scalar #define XXH3_scrambleAcc XXH3_scrambleAcc_scalar #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar #endif - #ifndef XXH_PREFETCH_DIST - #ifdef __clang__ - #define XXH_PREFETCH_DIST 320 - #else - #if (XXH_VECTOR == XXH_AVX512) - #define XXH_PREFETCH_DIST 512 - #else - #define XXH_PREFETCH_DIST 384 - #endif - #endif /* __clang__ */ - #endif /* XXH_PREFETCH_DIST */ - -/* - * XXH3_accumulate() - * Loops over XXH3_accumulate_512(). - * Assumption: nbStripes will not overflow the secret size - */ -XXH_FORCE_INLINE void XXH3_accumulate(xxh_u64 *XXH_RESTRICT acc, - const xxh_u8 *XXH_RESTRICT input, - const xxh_u8 *XXH_RESTRICT secret, - size_t nbStripes, - XXH3_f_accumulate_512 f_acc512) { - - size_t n; - for (n = 0; n < nbStripes; n++) { - - const xxh_u8 *const in = input + n * XXH_STRIPE_LEN; - XXH_PREFETCH(in + XXH_PREFETCH_DIST); - f_acc512(acc, in, secret + n * XXH_SECRET_CONSUME_RATE); - - } - -} + #if XXH_SIZE_OPT >= 1 /* don't do SIMD for initialization */ + #undef XXH3_initCustomSecret + #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + #endif XXH_FORCE_INLINE void XXH3_hashLong_internal_loop( xxh_u64 *XXH_RESTRICT acc, const xxh_u8 *XXH_RESTRICT input, size_t len, const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, - XXH3_f_accumulate_512 f_acc512, XXH3_f_scrambleAcc f_scramble) { + XXH3_f_accumulate f_acc, XXH3_f_scrambleAcc f_scramble) { size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE; @@ -4938,8 +6546,7 @@ XXH_FORCE_INLINE void XXH3_hashLong_internal_loop( for (n = 0; n < nb_blocks; n++) { - XXH3_accumulate(acc, input + n * block_len, secret, nbStripesPerBlock, - f_acc512); + f_acc(acc, input + n * block_len, secret, nbStripesPerBlock); f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN); } @@ -4951,8 +6558,7 @@ XXH_FORCE_INLINE void XXH3_hashLong_internal_loop( size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN; XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE)); - XXH3_accumulate(acc, input + nb_blocks * block_len, secret, nbStripes, - f_acc512); + f_acc(acc, input + nb_blocks * block_len, secret, nbStripes); /* last stripe */ { @@ -4961,8 +6567,9 @@ XXH_FORCE_INLINE void XXH3_hashLong_internal_loop( #define XXH_SECRET_LASTACC_START \ 7 /* not aligned on 8, last secret is different from acc & scrambler \ */ - f_acc512(acc, p, - secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START); + XXH3_accumulate_512( + acc, p, + secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START); } @@ -5019,13 +6626,12 @@ static XXH64_hash_t XXH3_mergeAccs(const xxh_u64 *XXH_RESTRICT acc, XXH_FORCE_INLINE XXH64_hash_t XXH3_hashLong_64b_internal( const void *XXH_RESTRICT input, size_t len, const void *XXH_RESTRICT secret, - size_t secretSize, XXH3_f_accumulate_512 f_acc512, - XXH3_f_scrambleAcc f_scramble) { + size_t secretSize, XXH3_f_accumulate f_acc, XXH3_f_scrambleAcc f_scramble) { XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; XXH3_hashLong_internal_loop(acc, (const xxh_u8 *)input, len, - (const xxh_u8 *)secret, secretSize, f_acc512, + (const xxh_u8 *)secret, secretSize, f_acc, f_scramble); /* converge into final hash */ @@ -5041,26 +6647,30 @@ XXH_FORCE_INLINE XXH64_hash_t XXH3_hashLong_64b_internal( } /* - * It's important for performance that XXH3_hashLong is not inlined. + * It's important for performance to transmit secret's size (when it's static) + * so that the compiler can properly optimize the vectorized loop. + * This makes a big performance difference for "medium" keys (<1 KB) when using + * AVX instruction set. When the secret size is unknown, or on GCC 12 where the + * mix of NO_INLINE and FORCE_INLINE breaks -Og, this is XXH_NO_INLINE. */ -XXH_NO_INLINE XXH64_hash_t XXH3_hashLong_64b_withSecret( +XXH3_WITH_SECRET_INLINE XXH64_hash_t XXH3_hashLong_64b_withSecret( const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64, const xxh_u8 *XXH_RESTRICT secret, size_t secretLen) { (void)seed64; return XXH3_hashLong_64b_internal(input, len, secret, secretLen, - XXH3_accumulate_512, XXH3_scrambleAcc); + XXH3_accumulate, XXH3_scrambleAcc); } /* - * It's important for performance that XXH3_hashLong is not inlined. - * Since the function is not inlined, the compiler may not be able to understand - * that, in some scenarios, its `secret` argument is actually a compile time - * constant. This variant enforces that the compiler can detect that, and uses - * this opportunity to streamline the generated code for better performance. + * It's preferable for performance that XXH3_hashLong is not inlined, + * as it results in a smaller function for small data, easier to the instruction + * cache. Note that inside this no_inline function, we do inline the internal + * loop, and provide a statically defined secret size to allow optimization of + * vector loop. */ -XXH_NO_INLINE XXH64_hash_t XXH3_hashLong_64b_default( +XXH_NO_INLINE XXH_PUREF XXH64_hash_t XXH3_hashLong_64b_default( const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64, const xxh_u8 *XXH_RESTRICT secret, size_t secretLen) { @@ -5068,7 +6678,7 @@ XXH_NO_INLINE XXH64_hash_t XXH3_hashLong_64b_default( (void)secret; (void)secretLen; return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, - sizeof(XXH3_kSecret), XXH3_accumulate_512, + sizeof(XXH3_kSecret), XXH3_accumulate, XXH3_scrambleAcc); } @@ -5085,19 +6695,20 @@ XXH_NO_INLINE XXH64_hash_t XXH3_hashLong_64b_default( * why (uop cache maybe?), but the difference is large and easily measurable. */ XXH_FORCE_INLINE XXH64_hash_t XXH3_hashLong_64b_withSeed_internal( - const void *input, size_t len, XXH64_hash_t seed, - XXH3_f_accumulate_512 f_acc512, XXH3_f_scrambleAcc f_scramble, - XXH3_f_initCustomSecret f_initSec) { + const void *input, size_t len, XXH64_hash_t seed, XXH3_f_accumulate f_acc, + XXH3_f_scrambleAcc f_scramble, XXH3_f_initCustomSecret f_initSec) { + #if XXH_SIZE_OPT <= 0 if (seed == 0) - return XXH3_hashLong_64b_internal( - input, len, XXH3_kSecret, sizeof(XXH3_kSecret), f_acc512, f_scramble); + return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, + sizeof(XXH3_kSecret), f_acc, f_scramble); + #endif { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; f_initSec(secret, seed); - return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret), - f_acc512, f_scramble); + return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret), f_acc, + f_scramble); } @@ -5106,17 +6717,15 @@ XXH_FORCE_INLINE XXH64_hash_t XXH3_hashLong_64b_withSeed_internal( /* * It's important for performance that XXH3_hashLong is not inlined. */ -XXH_NO_INLINE XXH64_hash_t XXH3_hashLong_64b_withSeed(const void *input, - size_t len, - XXH64_hash_t seed, - const xxh_u8 *secret, - size_t secretLen) { +XXH_NO_INLINE XXH64_hash_t XXH3_hashLong_64b_withSeed( + const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed, + const xxh_u8 *XXH_RESTRICT secret, size_t secretLen) { (void)secret; (void)secretLen; - return XXH3_hashLong_64b_withSeed_internal( - input, len, seed, XXH3_accumulate_512, XXH3_scrambleAcc, - XXH3_initCustomSecret); + return XXH3_hashLong_64b_withSeed_internal(input, len, seed, XXH3_accumulate, + XXH3_scrambleAcc, + XXH3_initCustomSecret); } @@ -5152,36 +6761,49 @@ XXH3_64bits_internal(const void *XXH_RESTRICT input, size_t len, /* === Public entry point === */ -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void *input, size_t len) { +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void *input, + size_t length) { - return XXH3_64bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), - XXH3_hashLong_64b_default); + return XXH3_64bits_internal(input, length, 0, XXH3_kSecret, + sizeof(XXH3_kSecret), XXH3_hashLong_64b_default); } -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void *input, - size_t len, - const void *secret, - size_t secretSize) { +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSecret(XXH_NOESCAPE const void *input, size_t length, + XXH_NOESCAPE const void *secret, size_t secretSize) { - return XXH3_64bits_internal(input, len, 0, secret, secretSize, + return XXH3_64bits_internal(input, length, 0, secret, secretSize, XXH3_hashLong_64b_withSecret); } -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void *input, size_t len, +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(XXH_NOESCAPE const void *input, + size_t length, XXH64_hash_t seed) { - return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, + return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed); } -/* === XXH3 streaming === */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecretandSeed( + XXH_NOESCAPE const void *input, size_t length, + XXH_NOESCAPE const void *secret, size_t secretSize, XXH64_hash_t seed) { + + if (length <= XXH3_MIDSIZE_MAX) + return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, + sizeof(XXH3_kSecret), NULL); + return XXH3_hashLong_64b_withSecret(input, length, seed, + (const xxh_u8 *)secret, secretSize); +} + + /* === XXH3 streaming === */ + #ifndef XXH_NO_STREAM /* * Malloc's a pointer that is always aligned to align. * @@ -5205,7 +6827,7 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void *input, size_t len, * * Align must be a power of 2 and 8 <= align <= 128. */ -static void *XXH_alignedMalloc(size_t s, size_t align) { +static XXH_MALLOCF void *XXH_alignedMalloc(size_t s, size_t align) { XXH_ASSERT(align <= 128 && align >= 8); /* range check */ XXH_ASSERT((align & (align - 1)) == 0); /* power of 2 */ @@ -5257,7 +6879,17 @@ static void XXH_alignedFree(void *p) { } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ +/*! + * @brief Allocate an @ref XXH3_state_t. + * + * @return An allocated pointer of @ref XXH3_state_t on success. + * @return `NULL` on failure. + * + * @note Must be freed with XXH3_freeState(). + * + * @see @ref streaming_example "Streaming Example" + */ XXH_PUBLIC_API XXH3_state_t *XXH3_createState(void) { XXH3_state_t *const state = @@ -5268,7 +6900,19 @@ XXH_PUBLIC_API XXH3_state_t *XXH3_createState(void) { } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ +/*! + * @brief Frees an @ref XXH3_state_t. + * + * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref + * XXH3_createState(). + * + * @return @ref XXH_OK. + * + * @note Must be allocated with XXH3_createState(). + * + * @see @ref streaming_example "Streaming Example" + */ XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t *statePtr) { XXH_alignedFree(statePtr); @@ -5276,11 +6920,11 @@ XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t *statePtr) { } -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t *dst_state, - const XXH3_state_t *src_state) { +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t *dst_state, + XXH_NOESCAPE const XXH3_state_t *src_state) { - memcpy(dst_state, src_state, sizeof(*dst_state)); + XXH_memcpy(dst_state, src_state, sizeof(*dst_state)); } @@ -5303,6 +6947,7 @@ static void XXH3_reset_internal(XXH3_state_t *statePtr, XXH64_hash_t seed, statePtr->acc[6] = XXH_PRIME64_5; statePtr->acc[7] = XXH_PRIME32_1; statePtr->seed = seed; + statePtr->useSeed = (seed != 0); statePtr->extSecret = (const unsigned char *)secret; XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); statePtr->secretLimit = secretSize - XXH_STRIPE_LEN; @@ -5310,8 +6955,9 @@ static void XXH3_reset_internal(XXH3_state_t *statePtr, XXH64_hash_t seed, } -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t *statePtr) { +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t *statePtr) { if (statePtr == NULL) return XXH_ERROR; XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); @@ -5319,9 +6965,10 @@ XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t *statePtr) { } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret( - XXH3_state_t *statePtr, const void *secret, size_t secretSize) { + XXH_NOESCAPE XXH3_state_t *statePtr, XXH_NOESCAPE const void *secret, + size_t secretSize) { if (statePtr == NULL) return XXH_ERROR; XXH3_reset_internal(statePtr, 0, secret, secretSize); @@ -5331,84 +6978,140 @@ XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret( } -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t *statePtr, - XXH64_hash_t seed) { +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed( + XXH_NOESCAPE XXH3_state_t *statePtr, XXH64_hash_t seed) { if (statePtr == NULL) return XXH_ERROR; if (seed == 0) return XXH3_64bits_reset(statePtr); - if (seed != statePtr->seed) + if ((seed != statePtr->seed) || (statePtr->extSecret != NULL)) XXH3_initCustomSecret(statePtr->customSecret, seed); XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE); return XXH_OK; } -/* Note : when XXH3_consumeStripes() is invoked, - * there must be a guarantee that at least one more byte must be consumed from - * input - * so that the function can blindly consume all stripes using the "normal" - * secret segment */ -XXH_FORCE_INLINE void XXH3_consumeStripes( +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecretandSeed( + XXH_NOESCAPE XXH3_state_t *statePtr, XXH_NOESCAPE const void *secret, + size_t secretSize, XXH64_hash_t seed64) { + + if (statePtr == NULL) return XXH_ERROR; + if (secret == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; + XXH3_reset_internal(statePtr, seed64, secret, secretSize); + statePtr->useSeed = 1; /* always, even if seed64==0 */ + return XXH_OK; + +} + +/*! + * @internal + * @brief Processes a large input for XXH3_update() and XXH3_digest_long(). + * + * Unlike XXH3_hashLong_internal_loop(), this can process data that overlaps a + * block. + * + * @param acc Pointer to the 8 accumulator lanes + * @param nbStripesSoFarPtr In/out pointer to the number of leftover stripes in + * the block* + * @param nbStripesPerBlock Number of stripes in a block + * @param input Input pointer + * @param nbStripes Number of stripes to process + * @param secret Secret pointer + * @param secretLimit Offset of the last block in @p secret + * @param f_acc Pointer to an XXH3_accumulate implementation + * @param f_scramble Pointer to an XXH3_scrambleAcc implementation + * @return Pointer past the end of @p input after processing + */ +XXH_FORCE_INLINE const xxh_u8 *XXH3_consumeStripes( xxh_u64 *XXH_RESTRICT acc, size_t *XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock, const xxh_u8 *XXH_RESTRICT input, size_t nbStripes, const xxh_u8 *XXH_RESTRICT secret, size_t secretLimit, - XXH3_f_accumulate_512 f_acc512, XXH3_f_scrambleAcc f_scramble) { - - XXH_ASSERT(nbStripes <= - nbStripesPerBlock); /* can handle max 1 scramble per invocation */ - XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock); - if (nbStripesPerBlock - *nbStripesSoFarPtr <= nbStripes) { - - /* need a scrambling operation */ - size_t const nbStripesToEndofBlock = nbStripesPerBlock - *nbStripesSoFarPtr; - size_t const nbStripesAfterBlock = nbStripes - nbStripesToEndofBlock; - XXH3_accumulate(acc, input, - secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, - nbStripesToEndofBlock, f_acc512); - f_scramble(acc, secret + secretLimit); - XXH3_accumulate(acc, input + nbStripesToEndofBlock * XXH_STRIPE_LEN, secret, - nbStripesAfterBlock, f_acc512); - *nbStripesSoFarPtr = nbStripesAfterBlock; + XXH3_f_accumulate f_acc, XXH3_f_scrambleAcc f_scramble) { - } else { + const xxh_u8 *initialSecret = + secret + *nbStripesSoFarPtr * XXH_SECRET_CONSUME_RATE; + /* Process full blocks */ + if (nbStripes >= (nbStripesPerBlock - *nbStripesSoFarPtr)) { + + /* Process the initial partial block... */ + size_t nbStripesThisIter = nbStripesPerBlock - *nbStripesSoFarPtr; + + do { + + /* Accumulate and scramble */ + f_acc(acc, input, initialSecret, nbStripesThisIter); + f_scramble(acc, secret + secretLimit); + input += nbStripesThisIter * XXH_STRIPE_LEN; + nbStripes -= nbStripesThisIter; + /* Then continue the loop with the full block size */ + nbStripesThisIter = nbStripesPerBlock; + initialSecret = secret; + + } while (nbStripes >= nbStripesPerBlock); + + *nbStripesSoFarPtr = 0; + + } + + /* Process a partial block */ + if (nbStripes > 0) { - XXH3_accumulate(acc, input, - secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, - nbStripes, f_acc512); + f_acc(acc, input, initialSecret, nbStripes); + input += nbStripes * XXH_STRIPE_LEN; *nbStripesSoFarPtr += nbStripes; } + /* Return end pointer */ + return input; + } + #ifndef XXH3_STREAM_USE_STACK + #if XXH_SIZE_OPT <= 0 && \ + !defined( \ + __clang__) /* clang doesn't need additional stack space */ + #define XXH3_STREAM_USE_STACK 1 + #endif + #endif /* * Both XXH3_64bits_update and XXH3_128bits_update use this routine. */ -XXH_FORCE_INLINE XXH_errorcode XXH3_update(XXH3_state_t *state, - const xxh_u8 *input, size_t len, - XXH3_f_accumulate_512 f_acc512, - XXH3_f_scrambleAcc f_scramble) { +XXH_FORCE_INLINE XXH_errorcode XXH3_update( + XXH3_state_t *XXH_RESTRICT const state, const xxh_u8 *XXH_RESTRICT input, + size_t len, XXH3_f_accumulate f_acc, XXH3_f_scrambleAcc f_scramble) { - if (input == NULL) - #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \ - (XXH_ACCEPT_NULL_INPUT_POINTER >= 1) + if (input == NULL) { + + XXH_ASSERT(len == 0); return XXH_OK; - #else - return XXH_ERROR; - #endif + } + + XXH_ASSERT(state != NULL); { const xxh_u8 *const bEnd = input + len; const unsigned char *const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; - + #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 + /* For some reason, gcc and MSVC seem to suffer greatly + * when operating accumulators directly into state. + * Operating into stack space seems to enable proper optimization. + * clang, on the other hand, doesn't seem to need this trick */ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; + XXH_memcpy(acc, state->acc, sizeof(acc)); + #else + xxh_u64 *XXH_RESTRICT const acc = state->acc; + #endif state->totalLen += len; XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE); - if (state->bufferedSize + len <= - XXH3_INTERNALBUFFER_SIZE) { /* fill in tmp buffer */ + /* small input : just fill in tmp buffer */ + if (len <= XXH3_INTERNALBUFFER_SIZE - state->bufferedSize) { + XXH_memcpy(state->buffer + state->bufferedSize, input, len); state->bufferedSize += (XXH32_hash_t)len; return XXH_OK; @@ -5416,9 +7119,8 @@ XXH_FORCE_INLINE XXH_errorcode XXH3_update(XXH3_state_t *state, } /* total input is now > XXH3_INTERNALBUFFER_SIZE */ - - #define XXH3_INTERNALBUFFER_STRIPES \ - (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN) + #define XXH3_INTERNALBUFFER_STRIPES \ + (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN) XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */ @@ -5431,41 +7133,35 @@ XXH_FORCE_INLINE XXH_errorcode XXH3_update(XXH3_state_t *state, size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize; XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize); input += loadSize; - XXH3_consumeStripes(state->acc, &state->nbStripesSoFar, - state->nbStripesPerBlock, state->buffer, - XXH3_INTERNALBUFFER_STRIPES, secret, - state->secretLimit, f_acc512, f_scramble); + XXH3_consumeStripes(acc, &state->nbStripesSoFar, state->nbStripesPerBlock, + state->buffer, XXH3_INTERNALBUFFER_STRIPES, secret, + state->secretLimit, f_acc, f_scramble); state->bufferedSize = 0; } XXH_ASSERT(input < bEnd); - - /* Consume input by a multiple of internal buffer size */ if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) { - const xxh_u8 *const limit = bEnd - XXH3_INTERNALBUFFER_SIZE; - do { - - XXH3_consumeStripes(state->acc, &state->nbStripesSoFar, - state->nbStripesPerBlock, input, - XXH3_INTERNALBUFFER_STRIPES, secret, - state->secretLimit, f_acc512, f_scramble); - input += XXH3_INTERNALBUFFER_SIZE; - - } while (input < limit); - - /* for last partial stripe */ - memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, - input - XXH_STRIPE_LEN, XXH_STRIPE_LEN); + size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN; + input = XXH3_consumeStripes( + acc, &state->nbStripesSoFar, state->nbStripesPerBlock, input, + nbStripes, secret, state->secretLimit, f_acc, f_scramble); + XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, + input - XXH_STRIPE_LEN, XXH_STRIPE_LEN); } - XXH_ASSERT(input < bEnd); - /* Some remaining input (always) : buffer it */ + XXH_ASSERT(input < bEnd); + XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE); + XXH_ASSERT(state->bufferedSize == 0); XXH_memcpy(state->buffer, input, (size_t)(bEnd - input)); state->bufferedSize = (XXH32_hash_t)(bEnd - input); + #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 + /* save stack accumulators into state */ + XXH_memcpy(state->acc, acc, sizeof(acc)); + #endif } @@ -5473,11 +7169,12 @@ XXH_FORCE_INLINE XXH_errorcode XXH3_update(XXH3_state_t *state, } -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update(XXH3_state_t *state, - const void *input, size_t len) { +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t *state, + XXH_NOESCAPE const void *input, size_t len) { - return XXH3_update(state, (const xxh_u8 *)input, len, XXH3_accumulate_512, + return XXH3_update(state, (const xxh_u8 *)input, len, XXH3_accumulate, XXH3_scrambleAcc); } @@ -5486,41 +7183,46 @@ XXH_FORCE_INLINE void XXH3_digest_long(XXH64_hash_t *acc, const XXH3_state_t *state, const unsigned char *secret) { + xxh_u8 lastStripe[XXH_STRIPE_LEN]; + const xxh_u8 *lastStripePtr; + /* * Digest on a local copy. This way, the state remains unaltered, and it can * continue ingesting more input afterwards. */ - memcpy(acc, state->acc, sizeof(state->acc)); + XXH_memcpy(acc, state->acc, sizeof(state->acc)); if (state->bufferedSize >= XXH_STRIPE_LEN) { + /* Consume remaining stripes then point to remaining data in buffer */ size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN; size_t nbStripesSoFar = state->nbStripesSoFar; XXH3_consumeStripes(acc, &nbStripesSoFar, state->nbStripesPerBlock, state->buffer, nbStripes, secret, state->secretLimit, - XXH3_accumulate_512, XXH3_scrambleAcc); - /* last stripe */ - XXH3_accumulate_512(acc, - state->buffer + state->bufferedSize - XXH_STRIPE_LEN, - secret + state->secretLimit - XXH_SECRET_LASTACC_START); + XXH3_accumulate, XXH3_scrambleAcc); + lastStripePtr = state->buffer + state->bufferedSize - XXH_STRIPE_LEN; } else { /* bufferedSize < XXH_STRIPE_LEN */ - xxh_u8 lastStripe[XXH_STRIPE_LEN]; + /* Copy to temp buffer */ size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize; XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */ - memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, - catchupSize); - memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize); - XXH3_accumulate_512(acc, lastStripe, - secret + state->secretLimit - XXH_SECRET_LASTACC_START); + XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, + catchupSize); + XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize); + lastStripePtr = lastStripe; } + /* Last stripe */ + XXH3_accumulate_512(acc, lastStripePtr, + secret + state->secretLimit - XXH_SECRET_LASTACC_START); + } -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest(const XXH3_state_t *state) { +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_digest(XXH_NOESCAPE const XXH3_state_t *state) { const unsigned char *const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; @@ -5534,7 +7236,7 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest(const XXH3_state_t *state) { } /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */ - if (state->seed) + if (state->useSeed) return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen), @@ -5542,69 +7244,7 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest(const XXH3_state_t *state) { } - #define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x)) - -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API void XXH3_generateSecret(void *secretBuffer, - const void *customSeed, - size_t customSeedSize) { - - XXH_ASSERT(secretBuffer != NULL); - if (customSeedSize == 0) { - - memcpy(secretBuffer, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); - return; - - } - - XXH_ASSERT(customSeed != NULL); - - { - - size_t const segmentSize = sizeof(XXH128_hash_t); - size_t const nbSegments = XXH_SECRET_DEFAULT_SIZE / segmentSize; - XXH128_canonical_t scrambler; - XXH64_hash_t seeds[12]; - size_t segnb; - XXH_ASSERT(nbSegments == 12); - XXH_ASSERT(segmentSize * nbSegments == - XXH_SECRET_DEFAULT_SIZE); /* exact multiple */ - XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0)); - - /* - * Copy customSeed to seeds[], truncating or repeating as necessary. - */ - { - - size_t toFill = XXH_MIN(customSeedSize, sizeof(seeds)); - size_t filled = toFill; - memcpy(seeds, customSeed, toFill); - while (filled < sizeof(seeds)) { - - toFill = XXH_MIN(filled, sizeof(seeds) - filled); - memcpy((char *)seeds + filled, seeds, toFill); - filled += toFill; - - } - - } - - /* generate secret */ - memcpy(secretBuffer, &scrambler, sizeof(scrambler)); - for (segnb = 1; segnb < nbSegments; segnb++) { - - size_t const segmentStart = segnb * segmentSize; - XXH128_canonical_t segment; - XXH128_canonicalFromHash(&segment, - XXH128(&scrambler, sizeof(scrambler), - XXH_readLE64(seeds + segnb) + segnb)); - memcpy((char *)secretBuffer + segmentStart, &segment, sizeof(segment)); - - } - - } - -} + #endif /* !XXH_NO_STREAM */ /* ========================================== * XXH3 128 bits (a.k.a XXH128) @@ -5623,10 +7263,8 @@ XXH_PUBLIC_API void XXH3_generateSecret(void *secretBuffer, * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64). */ -XXH_FORCE_INLINE XXH128_hash_t XXH3_len_1to3_128b(const xxh_u8 *input, - size_t len, - const xxh_u8 *secret, - XXH64_hash_t seed) { +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t XXH3_len_1to3_128b( + const xxh_u8 *input, size_t len, const xxh_u8 *secret, XXH64_hash_t seed) { /* A doubled version of 1to3_64b with different constants. */ XXH_ASSERT(input != NULL); @@ -5660,10 +7298,8 @@ XXH_FORCE_INLINE XXH128_hash_t XXH3_len_1to3_128b(const xxh_u8 *input, } -XXH_FORCE_INLINE XXH128_hash_t XXH3_len_4to8_128b(const xxh_u8 *input, - size_t len, - const xxh_u8 *secret, - XXH64_hash_t seed) { +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t XXH3_len_4to8_128b( + const xxh_u8 *input, size_t len, const xxh_u8 *secret, XXH64_hash_t seed) { XXH_ASSERT(input != NULL); XXH_ASSERT(secret != NULL); @@ -5686,7 +7322,7 @@ XXH_FORCE_INLINE XXH128_hash_t XXH3_len_4to8_128b(const xxh_u8 *input, m128.low64 ^= (m128.high64 >> 3); m128.low64 = XXH_xorshift64(m128.low64, 35); - m128.low64 *= 0x9FB21C651E98DF25ULL; + m128.low64 *= PRIME_MX2; m128.low64 = XXH_xorshift64(m128.low64, 28); m128.high64 = XXH3_avalanche(m128.high64); return m128; @@ -5695,10 +7331,8 @@ XXH_FORCE_INLINE XXH128_hash_t XXH3_len_4to8_128b(const xxh_u8 *input, } -XXH_FORCE_INLINE XXH128_hash_t XXH3_len_9to16_128b(const xxh_u8 *input, - size_t len, - const xxh_u8 *secret, - XXH64_hash_t seed) { +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t XXH3_len_9to16_128b( + const xxh_u8 *input, size_t len, const xxh_u8 *secret, XXH64_hash_t seed) { XXH_ASSERT(input != NULL); XXH_ASSERT(secret != NULL); @@ -5788,10 +7422,8 @@ XXH_FORCE_INLINE XXH128_hash_t XXH3_len_9to16_128b(const xxh_u8 *input, /* * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN */ -XXH_FORCE_INLINE XXH128_hash_t XXH3_len_0to16_128b(const xxh_u8 *input, - size_t len, - const xxh_u8 *secret, - XXH64_hash_t seed) { +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t XXH3_len_0to16_128b( + const xxh_u8 *input, size_t len, const xxh_u8 *secret, XXH64_hash_t seed) { XXH_ASSERT(len <= 16); { @@ -5833,7 +7465,7 @@ XXH_FORCE_INLINE XXH128_hash_t XXH128_mix32B(XXH128_hash_t acc, } -XXH_FORCE_INLINE XXH128_hash_t XXH3_len_17to128_128b( +XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t XXH3_len_17to128_128b( const xxh_u8 *XXH_RESTRICT input, size_t len, const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) { @@ -5846,6 +7478,22 @@ XXH_FORCE_INLINE XXH128_hash_t XXH3_len_17to128_128b( XXH128_hash_t acc; acc.low64 = len * XXH_PRIME64_1; acc.high64 = 0; + + #if XXH_SIZE_OPT >= 1 + { + + /* Smaller, but slightly slower. */ + unsigned int i = (unsigned int)(len - 1) / 32; + do { + + acc = XXH128_mix32B(acc, input + 16 * i, input + len - 16 * (i + 1), + secret + 32 * i, seed); + + } while (i-- != 0); + + } + + #else if (len > 32) { if (len > 64) { @@ -5867,6 +7515,7 @@ XXH_FORCE_INLINE XXH128_hash_t XXH3_len_17to128_128b( } acc = XXH128_mix32B(acc, input, input + len - 16, secret, seed); + #endif { XXH128_hash_t h128; @@ -5883,7 +7532,7 @@ XXH_FORCE_INLINE XXH128_hash_t XXH3_len_17to128_128b( } -XXH_NO_INLINE XXH128_hash_t XXH3_len_129to240_128b( +XXH_NO_INLINE XXH_PUREF XXH128_hash_t XXH3_len_129to240_128b( const xxh_u8 *XXH_RESTRICT input, size_t len, const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) { @@ -5894,25 +7543,33 @@ XXH_NO_INLINE XXH128_hash_t XXH3_len_129to240_128b( { XXH128_hash_t acc; - int const nbRounds = (int)len / 32; - int i; + unsigned i; acc.low64 = len * XXH_PRIME64_1; acc.high64 = 0; - for (i = 0; i < 4; i++) { + /* + * We set as `i` as offset + 32. We do this so that unchanged + * `len` can be used as upper bound. This reaches a sweet spot + * where both x86 and aarch64 get simple agen and good codegen + * for the loop. + */ + for (i = 32; i < 160; i += 32) { - acc = XXH128_mix32B(acc, input + (32 * i), input + (32 * i) + 16, - secret + (32 * i), seed); + acc = XXH128_mix32B(acc, input + i - 32, input + i - 16, secret + i - 32, + seed); } acc.low64 = XXH3_avalanche(acc.low64); acc.high64 = XXH3_avalanche(acc.high64); - XXH_ASSERT(nbRounds >= 4); - for (i = 4; i < nbRounds; i++) { + /* + * NB: `i <= len` will duplicate the last 32-bytes if + * len % 32 was zero. This is an unfortunate necessity to keep + * the hash result stable. + */ + for (i = 160; i <= len; i += 32) { - acc = XXH128_mix32B(acc, input + (32 * i), input + (32 * i) + 16, - secret + XXH3_MIDSIZE_STARTOFFSET + (32 * (i - 4)), - seed); + acc = XXH128_mix32B(acc, input + i - 32, input + i - 16, + secret + XXH3_MIDSIZE_STARTOFFSET + i - 160, seed); } @@ -5920,7 +7577,7 @@ XXH_NO_INLINE XXH128_hash_t XXH3_len_129to240_128b( acc = XXH128_mix32B( acc, input + len - 16, input + len - 32, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16, - 0ULL - seed); + (XXH64_hash_t)0 - seed); { @@ -5941,12 +7598,12 @@ XXH_NO_INLINE XXH128_hash_t XXH3_len_129to240_128b( XXH_FORCE_INLINE XXH128_hash_t XXH3_hashLong_128b_internal( const void *XXH_RESTRICT input, size_t len, const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, - XXH3_f_accumulate_512 f_acc512, XXH3_f_scrambleAcc f_scramble) { + XXH3_f_accumulate f_acc, XXH3_f_scrambleAcc f_scramble) { XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; XXH3_hashLong_internal_loop(acc, (const xxh_u8 *)input, len, secret, - secretSize, f_acc512, f_scramble); + secretSize, f_acc, f_scramble); /* converge into final hash */ XXH_STATIC_ASSERT(sizeof(acc) == 64); @@ -5966,9 +7623,9 @@ XXH_FORCE_INLINE XXH128_hash_t XXH3_hashLong_128b_internal( } /* - * It's important for performance that XXH3_hashLong is not inlined. + * It's important for performance that XXH3_hashLong() is not inlined. */ -XXH_NO_INLINE XXH128_hash_t XXH3_hashLong_128b_default( +XXH_NO_INLINE XXH_PUREF XXH128_hash_t XXH3_hashLong_128b_default( const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64, const void *XXH_RESTRICT secret, size_t secretLen) { @@ -5976,39 +7633,43 @@ XXH_NO_INLINE XXH128_hash_t XXH3_hashLong_128b_default( (void)secret; (void)secretLen; return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, - sizeof(XXH3_kSecret), XXH3_accumulate_512, + sizeof(XXH3_kSecret), XXH3_accumulate, XXH3_scrambleAcc); } /* - * It's important for performance that XXH3_hashLong is not inlined. + * It's important for performance to pass @p secretLen (when it's static) + * to the compiler, so that it can properly optimize the vectorized loop. + * + * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and + * FORCE_INLINE breaks -Og, this is XXH_NO_INLINE. */ -XXH_NO_INLINE XXH128_hash_t XXH3_hashLong_128b_withSecret( +XXH3_WITH_SECRET_INLINE XXH128_hash_t XXH3_hashLong_128b_withSecret( const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64, const void *XXH_RESTRICT secret, size_t secretLen) { (void)seed64; return XXH3_hashLong_128b_internal(input, len, (const xxh_u8 *)secret, - secretLen, XXH3_accumulate_512, + secretLen, XXH3_accumulate, XXH3_scrambleAcc); } XXH_FORCE_INLINE XXH128_hash_t XXH3_hashLong_128b_withSeed_internal( const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64, - XXH3_f_accumulate_512 f_acc512, XXH3_f_scrambleAcc f_scramble, + XXH3_f_accumulate f_acc, XXH3_f_scrambleAcc f_scramble, XXH3_f_initCustomSecret f_initSec) { if (seed64 == 0) - return XXH3_hashLong_128b_internal( - input, len, XXH3_kSecret, sizeof(XXH3_kSecret), f_acc512, f_scramble); + return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, + sizeof(XXH3_kSecret), f_acc, f_scramble); { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; f_initSec(secret, seed64); return XXH3_hashLong_128b_internal(input, len, (const xxh_u8 *)secret, - sizeof(secret), f_acc512, f_scramble); + sizeof(secret), f_acc, f_scramble); } @@ -6023,9 +7684,9 @@ XXH3_hashLong_128b_withSeed(const void *input, size_t len, XXH64_hash_t seed64, (void)secret; (void)secretLen; - return XXH3_hashLong_128b_withSeed_internal( - input, len, seed64, XXH3_accumulate_512, XXH3_scrambleAcc, - XXH3_initCustomSecret); + return XXH3_hashLong_128b_withSeed_internal(input, len, seed64, + XXH3_accumulate, XXH3_scrambleAcc, + XXH3_initCustomSecret); } @@ -6060,8 +7721,9 @@ XXH3_128bits_internal(const void *input, size_t len, XXH64_hash_t seed64, /* === Public XXH128 API === */ -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void *input, size_t len) { +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void *input, + size_t len) { return XXH3_128bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), @@ -6069,21 +7731,19 @@ XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void *input, size_t len) { } -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void *input, - size_t len, - const void *secret, - size_t secretSize) { +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSecret(XXH_NOESCAPE const void *input, size_t len, + XXH_NOESCAPE const void *secret, size_t secretSize) { return XXH3_128bits_internal(input, len, 0, (const xxh_u8 *)secret, secretSize, XXH3_hashLong_128b_withSecret); } -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void *input, - size_t len, - XXH64_hash_t seed) { +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed( + XXH_NOESCAPE const void *input, size_t len, XXH64_hash_t seed) { return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), @@ -6091,67 +7751,80 @@ XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void *input, } -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API XXH128_hash_t XXH128(const void *input, size_t len, +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecretandSeed( + XXH_NOESCAPE const void *input, size_t len, XXH_NOESCAPE const void *secret, + size_t secretSize, XXH64_hash_t seed) { + + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, + sizeof(XXH3_kSecret), NULL); + return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize); + +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t XXH128(XXH_NOESCAPE const void *input, size_t len, XXH64_hash_t seed) { return XXH3_128bits_withSeed(input, len, seed); } -/* === XXH3 128-bit streaming === */ - + /* === XXH3 128-bit streaming === */ + #ifndef XXH_NO_STREAM /* - * All the functions are actually the same as for 64-bit streaming variant. - * The only difference is the finalization routine. + * All initialization and update functions are identical to 64-bit streaming + * variant. The only difference is the finalization routine. */ -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t *statePtr) { +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t *statePtr) { - if (statePtr == NULL) return XXH_ERROR; - XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); - return XXH_OK; + return XXH3_64bits_reset(statePtr); } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret( - XXH3_state_t *statePtr, const void *secret, size_t secretSize) { + XXH_NOESCAPE XXH3_state_t *statePtr, XXH_NOESCAPE const void *secret, + size_t secretSize) { - if (statePtr == NULL) return XXH_ERROR; - XXH3_reset_internal(statePtr, 0, secret, secretSize); - if (secret == NULL) return XXH_ERROR; - if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; - return XXH_OK; + return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize); } -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t *statePtr, - XXH64_hash_t seed) { +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed( + XXH_NOESCAPE XXH3_state_t *statePtr, XXH64_hash_t seed) { - if (statePtr == NULL) return XXH_ERROR; - if (seed == 0) return XXH3_128bits_reset(statePtr); - if (seed != statePtr->seed) - XXH3_initCustomSecret(statePtr->customSecret, seed); - XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE); - return XXH_OK; + return XXH3_64bits_reset_withSeed(statePtr, seed); } -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update(XXH3_state_t *state, - const void *input, - size_t len) { +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecretandSeed( + XXH_NOESCAPE XXH3_state_t *statePtr, XXH_NOESCAPE const void *secret, + size_t secretSize, XXH64_hash_t seed) { - return XXH3_update(state, (const xxh_u8 *)input, len, XXH3_accumulate_512, - XXH3_scrambleAcc); + return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, + seed); } -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest(const XXH3_state_t *state) { +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t *state, + XXH_NOESCAPE const void *input, size_t len) { + + return XXH3_64bits_update(state, input, len); + +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_digest(XXH_NOESCAPE const XXH3_state_t *state) { const unsigned char *const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; @@ -6186,12 +7859,13 @@ XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest(const XXH3_state_t *state) { } - /* 128-bit utility functions */ + #endif /* !XXH_NO_STREAM */ + /* 128-bit utility functions */ #include <string.h> /* memcmp, memcpy */ /* return : 1 is equal, 0 if different */ -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) { /* note : XXH128_hash_t is compact, it has no padding byte */ @@ -6200,11 +7874,12 @@ XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) { } /* This prototype is compatible with stdlib's qsort(). - * return : >0 if *h128_1 > *h128_2 - * <0 if *h128_1 < *h128_2 - * =0 if *h128_1 == *h128_2 */ -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API int XXH128_cmp(const void *h128_1, const void *h128_2) { + * @return : >0 if *h128_1 > *h128_2 + * <0 if *h128_1 < *h128_2 + * =0 if *h128_1 == *h128_2 */ +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API int XXH128_cmp(XXH_NOESCAPE const void *h128_1, + XXH_NOESCAPE const void *h128_2) { XXH128_hash_t const h1 = *(const XXH128_hash_t *)h128_1; XXH128_hash_t const h2 = *(const XXH128_hash_t *)h128_2; @@ -6216,9 +7891,9 @@ XXH_PUBLIC_API int XXH128_cmp(const void *h128_1, const void *h128_2) { } /*====== Canonical representation ======*/ -/*! @ingroup xxh3_family */ -XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t *dst, - XXH128_hash_t hash) { +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API void XXH128_canonicalFromHash( + XXH_NOESCAPE XXH128_canonical_t *dst, XXH128_hash_t hash) { XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t)); if (XXH_CPU_LITTLE_ENDIAN) { @@ -6228,14 +7903,15 @@ XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t *dst, } - memcpy(dst, &hash.high64, sizeof(hash.high64)); - memcpy((char *)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64)); + XXH_memcpy(dst, &hash.high64, sizeof(hash.high64)); + XXH_memcpy((char *)dst + sizeof(hash.high64), &hash.low64, + sizeof(hash.low64)); } -/*! @ingroup xxh3_family */ +/*! @ingroup XXH3_family */ XXH_PUBLIC_API XXH128_hash_t -XXH128_hashFromCanonical(const XXH128_canonical_t *src) { +XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t *src) { XXH128_hash_t h; h.high64 = XXH_readBE64(src); @@ -6244,11 +7920,99 @@ XXH128_hashFromCanonical(const XXH128_canonical_t *src) { } + /* ========================================== + * Secret generators + * ========================================== + */ + #define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x)) + +XXH_FORCE_INLINE void XXH3_combine16(void *dst, XXH128_hash_t h128) { + + XXH_writeLE64(dst, XXH_readLE64(dst) ^ h128.low64); + XXH_writeLE64((char *)dst + 8, XXH_readLE64((char *)dst + 8) ^ h128.high64); + +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret( + XXH_NOESCAPE void *secretBuffer, size_t secretSize, + XXH_NOESCAPE const void *customSeed, size_t customSeedSize) { + + #if (XXH_DEBUGLEVEL >= 1) + XXH_ASSERT(secretBuffer != NULL); + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + #else + /* production mode, assert() are disabled */ + if (secretBuffer == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; + #endif + + if (customSeedSize == 0) { + + customSeed = XXH3_kSecret; + customSeedSize = XXH_SECRET_DEFAULT_SIZE; + + } + + #if (XXH_DEBUGLEVEL >= 1) + XXH_ASSERT(customSeed != NULL); + #else + if (customSeed == NULL) return XXH_ERROR; + #endif + + /* Fill secretBuffer with a copy of customSeed - repeat as needed */ + { + + size_t pos = 0; + while (pos < secretSize) { + + size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize); + memcpy((char *)secretBuffer + pos, customSeed, toCopy); + pos += toCopy; + + } + + } + + { + + size_t const nbSeg16 = secretSize / 16; + size_t n; + XXH128_canonical_t scrambler; + XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0)); + for (n = 0; n < nbSeg16; n++) { + + XXH128_hash_t const h128 = XXH128(&scrambler, sizeof(scrambler), n); + XXH3_combine16((char *)secretBuffer + n * 16, h128); + + } + + /* last segment */ + XXH3_combine16((char *)secretBuffer + secretSize - 16, + XXH128_hashFromCanonical(&scrambler)); + + } + + return XXH_OK; + +} + +/*! @ingroup XXH3_family */ +XXH_PUBLIC_API void XXH3_generateSecret_fromSeed( + XXH_NOESCAPE void *secretBuffer, XXH64_hash_t seed) { + + XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; + XXH3_initCustomSecret(secret, seed); + XXH_ASSERT(secretBuffer != NULL); + memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE); + +} + /* Pop our optimization override from above */ #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \ && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ && defined(__OPTIMIZE__) && \ - !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */ + XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */ #pragma GCC pop_options #endif @@ -6263,7 +8027,7 @@ XXH128_hashFromCanonical(const XXH128_canonical_t *src) { #if defined(__cplusplus) -} +} /* extern "C" */ #endif |