about summary refs log tree commit diff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/afl-fuzz.h434
-rw-r--r--include/afl-prealloc.h2
-rw-r--r--include/alloc-inl.h355
-rw-r--r--include/android-ashmem.h87
-rw-r--r--include/cmplog.h27
-rw-r--r--include/common.h16
-rw-r--r--include/config.h157
-rw-r--r--include/coverage-32.h112
-rw-r--r--include/coverage-64.h189
-rw-r--r--include/debug.h169
-rw-r--r--include/envs.h50
-rw-r--r--include/forkserver.h35
-rw-r--r--include/list.h1
-rw-r--r--include/sharedmem.h1
-rw-r--r--include/snapshot-inl.h2
-rw-r--r--include/types.h38
-rw-r--r--include/xxh3.h3187
-rw-r--r--include/xxhash.h3688
18 files changed, 4594 insertions, 3956 deletions
diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h
index 1c1be711..5003b563 100644
--- a/include/afl-fuzz.h
+++ b/include/afl-fuzz.h
@@ -37,10 +37,6 @@
   #define _FILE_OFFSET_BITS 64
 #endif
 
-#ifdef __ANDROID__
-  #include "android-ashmem.h"
-#endif
-
 #include "config.h"
 #include "types.h"
 #include "debug.h"
@@ -65,6 +61,9 @@
 #include <dlfcn.h>
 #include <sched.h>
 
+#include <netdb.h>
+#include <netinet/in.h>
+
 #include <sys/wait.h>
 #include <sys/time.h>
 #ifndef USEMMAP
@@ -76,12 +75,18 @@
 #include <sys/mman.h>
 #include <sys/ioctl.h>
 #include <sys/file.h>
+#include <sys/types.h>
 
 #if defined(__APPLE__) || defined(__FreeBSD__) || defined(__OpenBSD__) || \
     defined(__NetBSD__) || defined(__DragonFly__)
   #include <sys/sysctl.h>
 #endif                           /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */
 
+#if defined(__HAIKU__)
+  #include <kernel/OS.h>
+  #include <kernel/scheduler.h>
+#endif
+
 /* For systems that have sched_setaffinity; right now just Linux, but one
    can hope... */
 
@@ -104,6 +109,7 @@
     #include <kstat.h>
     #include <sys/sysinfo.h>
     #include <sys/pset.h>
+    #include <strings.h>
   #endif
 #endif                                                         /* __linux__ */
 
@@ -121,41 +127,72 @@
 
 #define STAGE_BUF_SIZE (64)  /* usable size for stage name buf in afl_state */
 
+// Little helper to access the ptr to afl->##name_buf - for use in afl_realloc.
+#define AFL_BUF_PARAM(name) ((void **)&afl->name##_buf)
+
+#ifdef WORD_SIZE_64
+  #define AFL_RAND_RETURN u64
+#else
+  #define AFL_RAND_RETURN u32
+#endif
+
 extern s8  interesting_8[INTERESTING_8_LEN];
 extern s16 interesting_16[INTERESTING_8_LEN + INTERESTING_16_LEN];
 extern s32
     interesting_32[INTERESTING_8_LEN + INTERESTING_16_LEN + INTERESTING_32_LEN];
 
+struct tainted {
+
+  u32             pos;
+  u32             len;
+  struct tainted *next;
+  struct tainted *prev;
+
+};
+
 struct queue_entry {
 
   u8 *fname;                            /* File name for the test case      */
   u32 len;                              /* Input length                     */
+  u32 id;                               /* entry number in queue_buf        */
 
-  u8 cal_failed,                        /* Calibration failed?              */
-      trim_done,                        /* Trimmed?                         */
+  u8 colorized,                         /* Do not run redqueen stage again  */
+      cal_failed;                       /* Calibration failed?              */
+  bool trim_done,                       /* Trimmed?                         */
       was_fuzzed,                       /* historical, but needed for MOpt  */
       passed_det,                       /* Deterministic stages passed?     */
       has_new_cov,                      /* Triggers new coverage?           */
       var_behavior,                     /* Variable behavior?               */
       favored,                          /* Currently favored?               */
       fs_redundant,                     /* Marked as redundant in the fs?   */
-      fully_colorized,                  /* Do not run redqueen stage again  */
-      is_ascii;                         /* Is the input just ascii text?    */
+      is_ascii,                         /* Is the input just ascii text?    */
+      disabled;                         /* Is disabled from fuzz selection  */
 
   u32 bitmap_size,                      /* Number of bits set in bitmap     */
-      fuzz_level;                       /* Number of fuzzing iterations     */
+      fuzz_level,                       /* Number of fuzzing iterations     */
+      n_fuzz_entry;                     /* offset in n_fuzz                 */
 
   u64 exec_us,                          /* Execution time (us)              */
       handicap,                         /* Number of queue cycles behind    */
-      n_fuzz,                           /* Number of fuzz, does not overflow*/
       depth,                            /* Path depth                       */
       exec_cksum;                       /* Checksum of the execution trace  */
 
   u8 *trace_mini;                       /* Trace bytes, if kept             */
   u32 tc_ref;                           /* Trace bytes ref count            */
 
-  struct queue_entry *next,             /* Next element, if any             */
-      *next_100;                        /* 100 elements ahead               */
+#ifdef INTROSPECTION
+  u32 bitsmap_size;
+#endif
+
+  double perf_score,                    /* performance score                */
+      weight;
+
+  u8 *testcase_buf;                     /* The testcase buffer, if loaded.  */
+
+  u8 *            cmplog_colorinput;    /* the result buf of colorization   */
+  struct tainted *taint;                /* Taint information from CmpLog    */
+
+  struct queue_entry *mother;           /* queue entry this based on        */
 
 };
 
@@ -167,6 +204,14 @@ struct extra_data {
 
 };
 
+struct auto_extra_data {
+
+  u8  data[MAX_AUTO_EXTRA];             /* Dictionary token data            */
+  u32 len;                              /* Dictionary token length          */
+  u32 hit_cnt;                          /* Use count in the corpus          */
+
+};
+
 /* Fuzzing stages */
 
 enum {
@@ -207,7 +252,7 @@ enum {
 
 };
 
-#define operator_num 18
+#define operator_num 19
 #define swarm_num 5
 #define period_core 500000
 
@@ -223,18 +268,19 @@ enum {
 #define STAGE_OverWrite75 15
 #define STAGE_OverWriteExtra 16
 #define STAGE_InsertExtra 17
+#define STAGE_Splice 18
 #define period_pilot 50000
 
 enum {
 
   /* 00 */ EXPLORE, /* AFL default, Exploration-based constant schedule */
-  /* 01 */ EXPLOIT, /* AFL's exploitation-based const.  */
-  /* 02 */ FAST,    /* Exponential schedule             */
-  /* 03 */ COE,     /* Cut-Off Exponential schedule     */
-  /* 04 */ LIN,     /* Linear schedule                  */
-  /* 05 */ QUAD,    /* Quadratic schedule               */
-  /* 06 */ RARE,    /* Rare edges                       */
-  /* 07 */ MMOPT,   /* Modified MOPT schedule           */
+  /* 01 */ MMOPT,   /* Modified MOPT schedule           */
+  /* 02 */ EXPLOIT, /* AFL's exploitation-based const.  */
+  /* 03 */ FAST,    /* Exponential schedule             */
+  /* 04 */ COE,     /* Cut-Off Exponential schedule     */
+  /* 05 */ LIN,     /* Linear schedule                  */
+  /* 06 */ QUAD,    /* Quadratic schedule               */
+  /* 07 */ RARE,    /* Rare edges                       */
   /* 08 */ SEEK,    /* EXPLORE that ignores timings     */
 
   POWER_SCHEDULES_NUM
@@ -272,16 +318,21 @@ enum {
 enum {
 
   /* 00 */ PY_FUNC_INIT,
-  /* 01 */ PY_FUNC_FUZZ,
-  /* 02 */ PY_FUNC_POST_PROCESS,
-  /* 03 */ PY_FUNC_INIT_TRIM,
-  /* 04 */ PY_FUNC_POST_TRIM,
-  /* 05 */ PY_FUNC_TRIM,
-  /* 06 */ PY_FUNC_HAVOC_MUTATION,
-  /* 07 */ PY_FUNC_HAVOC_MUTATION_PROBABILITY,
-  /* 08 */ PY_FUNC_QUEUE_GET,
-  /* 09 */ PY_FUNC_QUEUE_NEW_ENTRY,
-  /* 10 */ PY_FUNC_DEINIT,
+  /* 01 */ PY_FUNC_DEINIT,
+  /* FROM HERE ON BELOW ALL ARE OPTIONAL */
+  /* 02 */ PY_OPTIONAL = 2,
+  /* 02 */ PY_FUNC_FUZZ = 2,
+  /* 03 */ PY_FUNC_FUZZ_COUNT,
+  /* 04 */ PY_FUNC_POST_PROCESS,
+  /* 05 */ PY_FUNC_INIT_TRIM,
+  /* 06 */ PY_FUNC_POST_TRIM,
+  /* 07 */ PY_FUNC_TRIM,
+  /* 08 */ PY_FUNC_HAVOC_MUTATION,
+  /* 09 */ PY_FUNC_HAVOC_MUTATION_PROBABILITY,
+  /* 10 */ PY_FUNC_QUEUE_GET,
+  /* 11 */ PY_FUNC_QUEUE_NEW_ENTRY,
+  /* 12 */ PY_FUNC_INTROSPECTION,
+  /* 13 */ PY_FUNC_DESCRIBE,
   PY_FUNC_COUNT
 
 };
@@ -296,8 +347,7 @@ typedef struct py_mutator {
   u8 *   fuzz_buf;
   size_t fuzz_size;
 
-  u8 *   post_process_buf;
-  size_t post_process_size;
+  Py_buffer post_process_buf;
 
   u8 *   trim_buf;
   size_t trim_size;
@@ -333,11 +383,14 @@ typedef struct afl_env_vars {
   u8 afl_skip_cpufreq, afl_exit_when_done, afl_no_affinity, afl_skip_bin_check,
       afl_dumb_forksrv, afl_import_first, afl_custom_mutator_only, afl_no_ui,
       afl_force_ui, afl_i_dont_care_about_missing_crashes, afl_bench_just_one,
-      afl_bench_until_crash, afl_debug_child_output, afl_autoresume,
-      afl_cal_fast, afl_cycle_schedules, afl_expand_havoc;
+      afl_bench_until_crash, afl_debug_child, afl_autoresume, afl_cal_fast,
+      afl_cycle_schedules, afl_expand_havoc, afl_statsd, afl_cmplog_only_new;
 
   u8 *afl_tmpdir, *afl_custom_mutator_library, *afl_python_module, *afl_path,
-      *afl_hang_tmout, *afl_skip_crashes, *afl_preload;
+      *afl_hang_tmout, *afl_forksrv_init_tmout, *afl_skip_crashes, *afl_preload,
+      *afl_max_det_extras, *afl_statsd_host, *afl_statsd_port,
+      *afl_crash_exitcode, *afl_statsd_tags_flavor, *afl_testcache_size,
+      *afl_testcache_entries, *afl_kill_signal;
 
 } afl_env_vars_t;
 
@@ -351,7 +404,7 @@ struct afl_pass_stat {
 struct foreign_sync {
 
   u8 *   dir;
-  time_t ctime;
+  time_t mtime;
 
 };
 
@@ -372,7 +425,8 @@ typedef struct afl_state {
     really makes no sense to haul them around as function parameters. */
   u64 orig_hit_cnt_puppet, last_limit_time_start, tmp_pilot_time,
       total_pacemaker_time, total_puppet_find, temp_puppet_find, most_time_key,
-      most_time, most_execs_key, most_execs, old_hit_count, force_ui_update;
+      most_time, most_execs_key, most_execs, old_hit_count, force_ui_update,
+      prev_run_time;
 
   MOpt_globals_t mopt_globals_core, mopt_globals_pilot;
 
@@ -424,6 +478,7 @@ typedef struct afl_state {
 
   u8 cal_cycles,                        /* Calibration cycles defaults      */
       cal_cycles_long,                  /* Calibration cycles defaults      */
+      havoc_stack_pow2,                 /* HAVOC_STACK_POW2                 */
       no_unlink,                        /* do not unlink cur_input          */
       debug,                            /* Debug mode                       */
       custom_only,                      /* Custom mutator only mode         */
@@ -464,14 +519,22 @@ typedef struct afl_state {
       disable_trim,                     /* Never trim in fuzz_one           */
       shmem_testcase_mode,              /* If sharedmem testcases are used  */
       expand_havoc,                /* perform expensive havoc after no find */
-      cycle_schedules;                  /* cycle power schedules ?          */
+      cycle_schedules,                  /* cycle power schedules?           */
+      old_seed_selection;               /* use vanilla afl seed selection   */
 
   u8 *virgin_bits,                      /* Regions yet untouched by fuzzing */
       *virgin_tmout,                    /* Bits we haven't seen in tmouts   */
       *virgin_crash;                    /* Bits we haven't seen in crashes  */
 
+  double *alias_probability;            /* alias weighted probabilities     */
+  u32 *   alias_table;                /* alias weighted random lookup table */
+  u32     active_paths;                 /* enabled entries in the queue     */
+
   u8 *var_bytes;                        /* Bytes that appear to be variable */
 
+#define N_FUZZ_SIZE (1 << 21)
+  u32 *n_fuzz;
+
   volatile u8 stop_soon,                /* Ctrl-C pressed?                  */
       clear_screen;                     /* Window resized?                  */
 
@@ -490,7 +553,8 @@ typedef struct afl_state {
       useless_at_start,                 /* Number of useless starting paths */
       var_byte_count,                   /* Bitmap bytes with var behavior   */
       current_entry,                    /* Current queue entry ID           */
-      havoc_div;                        /* Cycle count divisor for havoc    */
+      havoc_div,                        /* Cycle count divisor for havoc    */
+      max_det_extras;                   /* deterministic extra count (dicts)*/
 
   u64 total_crashes,                    /* Total number of crashes          */
       unique_crashes,                   /* Crashes with unique signatures   */
@@ -506,6 +570,7 @@ typedef struct afl_state {
       blocks_eff_total,                 /* Blocks subject to effector maps  */
       blocks_eff_select,                /* Blocks selected as fuzzable      */
       start_time,                       /* Unix start time (ms)             */
+      last_sync_time,                   /* Time of last sync                */
       last_path_time,                   /* Time for most recent path (ms)   */
       last_crash_time,                  /* Time for most recent crash (ms)  */
       last_hang_time;                   /* Time for most recent hang (ms)   */
@@ -519,7 +584,7 @@ typedef struct afl_state {
 
   u8 stage_name_buf[STAGE_BUF_SIZE];    /* reused stagename buf with len 64 */
 
-  s32 stage_cur, stage_max;             /* Stage progression                */
+  u32 stage_cur, stage_max;             /* Stage progression                */
   s32 splicing_with;                    /* Splicing with which test case?   */
 
   u32 main_node_id, main_node_max;      /*   Main instance job splitting    */
@@ -536,8 +601,9 @@ typedef struct afl_state {
 
   u32 rand_cnt;                         /* Random number counter            */
 
-  u64 rand_seed[4];
-  s64 init_seed;
+  /*  unsigned long rand_seed[3]; would also work */
+  AFL_RAND_RETURN rand_seed[3];
+  s64             init_seed;
 
   u64 total_cal_us,                     /* Total calibration time (us)      */
       total_cal_cycles;                 /* Total calibration cycles         */
@@ -545,7 +611,8 @@ typedef struct afl_state {
   u64 total_bitmap_size,                /* Total bit count for all bitmaps  */
       total_bitmap_entries;             /* Number of bitmaps counted        */
 
-  s32 cpu_core_count;                   /* CPU core count                   */
+  s32 cpu_core_count,                   /* CPU core count                   */
+      cpu_to_bind;                      /* bind to specific CPU             */
 
 #ifdef HAVE_AFFINITY
   s32 cpu_aff;                          /* Selected CPU core                */
@@ -553,20 +620,19 @@ typedef struct afl_state {
 
   struct queue_entry *queue,            /* Fuzzing queue (linked list)      */
       *queue_cur,                       /* Current offset within the queue  */
-      *queue_top,                       /* Top of the list                  */
-      *q_prev100;                       /* Previous 100 marker              */
+      *queue_top;                       /* Top of the list                  */
 
   // growing buf
   struct queue_entry **queue_buf;
-  size_t               queue_size;
 
   struct queue_entry **top_rated;           /* Top entries for bitmap bytes */
 
   struct extra_data *extras;            /* Extra tokens to fuzz with        */
   u32                extras_cnt;        /* Total number of tokens read      */
 
-  struct extra_data *a_extras;          /* Automatically selected extras    */
-  u32                a_extras_cnt;      /* Total number of tokens available */
+  struct auto_extra_data
+      a_extras[MAX_AUTO_EXTRAS];        /* Automatically selected extras    */
+  u32 a_extras_cnt;                     /* Total number of tokens available */
 
   /* afl_postprocess API - Now supported via custom mutators */
 
@@ -581,6 +647,10 @@ typedef struct afl_state {
   /* cmplog forkserver ids */
   s32 cmplog_fsrv_ctl_fd, cmplog_fsrv_st_fd;
   u32 cmplog_prev_timed_out;
+  u32 cmplog_max_filesize;
+  u32 cmplog_lvl;
+  u32 colorize_success;
+  u8  cmplog_enable_arith, cmplog_enable_transform;
 
   struct afl_pass_stat *pass_stats;
   struct cmp_map *      orig_cmp_map;
@@ -590,10 +660,10 @@ typedef struct afl_state {
 
   unsigned long long int last_avg_exec_update;
   u32                    last_avg_execs;
-  float                  last_avg_execs_saved;
+  double                 last_avg_execs_saved;
 
 /* foreign sync */
-#define FOREIGN_SYNCS_MAX 32
+#define FOREIGN_SYNCS_MAX 32U
   u8                  foreign_sync_cnt;
   struct foreign_sync foreign_syncs[FOREIGN_SYNCS_MAX];
 
@@ -602,16 +672,24 @@ typedef struct afl_state {
   u32 document_counter;
 #endif
 
-  void *maybe_add_auto;
-
   /* statistics file */
   double last_bitmap_cvg, last_stability, last_eps;
 
   /* plot file saves from last run */
   u32 plot_prev_qp, plot_prev_pf, plot_prev_pnf, plot_prev_ce, plot_prev_md;
-  u64 plot_prev_qc, plot_prev_uc, plot_prev_uh;
+  u64 plot_prev_qc, plot_prev_uc, plot_prev_uh, plot_prev_ed;
 
   u64 stats_last_stats_ms, stats_last_plot_ms, stats_last_ms, stats_last_execs;
+
+  /* StatsD */
+  u64                statsd_last_send_ms;
+  struct sockaddr_in statsd_server;
+  int                statsd_sock;
+  char *             statsd_tags_flavor;
+  char *             statsd_tags_format;
+  char *             statsd_metric_format;
+  int                statsd_metric_format_type;
+
   double stats_avg_exec;
 
   u8 *clean_trace;
@@ -620,24 +698,23 @@ typedef struct afl_state {
 
   /*needed for afl_fuzz_one */
   // TODO: see which we can reuse
-  u8 *   out_buf;
-  size_t out_size;
+  u8 *out_buf;
+
+  u8 *out_scratch_buf;
 
-  u8 *   out_scratch_buf;
-  size_t out_scratch_size;
+  u8 *eff_buf;
 
-  u8 *   eff_buf;
-  size_t eff_size;
+  u8 *in_buf;
 
-  u8 *   in_buf;
-  size_t in_size;
+  u8 *in_scratch_buf;
 
-  u8 *   in_scratch_buf;
-  size_t in_scratch_size;
+  u8 *ex_buf;
 
-  u8 *   ex_buf;
-  size_t ex_size;
-  u32    custom_mutators_count;
+  u8 *testcase_buf, *splicecase_buf;
+
+  u32 custom_mutators_count;
+
+  struct custom_mutator *current_custom_fuzz;
 
   list_t custom_mutator_list;
 
@@ -645,14 +722,49 @@ typedef struct afl_state {
    * they do not call another function */
   u8 *map_tmp_buf;
 
+  /* queue entries ready for splicing count (len > 4) */
+  u32 ready_for_splicing_count;
+
+  /* This is the user specified maximum size to use for the testcase cache */
+  u64 q_testcase_max_cache_size;
+
+  /* This is the user specified maximum entries in the testcase cache */
+  u32 q_testcase_max_cache_entries;
+
+  /* How much of the testcase cache is used so far */
+  u64 q_testcase_cache_size;
+
+  /* highest cache count so far */
+  u32 q_testcase_max_cache_count;
+
+  /* How many queue entries currently have cached testcases */
+  u32 q_testcase_cache_count;
+
+  /* the smallest id currently known free entry */
+  u32 q_testcase_smallest_free;
+
+  /* How often did we evict from the cache (for statistics only) */
+  u32 q_testcase_evictions;
+
+  /* Refs to each queue entry with cached testcase (for eviction, if cache_count
+   * is too large) */
+  struct queue_entry **q_testcase_cache;
+
+#ifdef INTROSPECTION
+  char  mutation[8072];
+  char  m_tmp[4096];
+  FILE *introspection_file;
+  u32   bitsmap_size;
+#endif
+
 } afl_state_t;
 
 struct custom_mutator {
 
   const char *name;
+  char *      name_short;
   void *      dh;
   u8 *        post_process_buf;
-  size_t      post_process_size;
   u8          stacked_custom_prob, stacked_custom;
 
   void *data;                                    /* custom mutator data ptr */
@@ -669,11 +781,38 @@ struct custom_mutator {
   void *(*afl_custom_init)(afl_state_t *afl, unsigned int seed);
 
   /**
+   * When afl-fuzz was compiled with INTROSPECTION=1 then custom mutators can
+   * also give introspection information back with this function.
+   *
+   * @param data pointer returned in afl_custom_init by this custom mutator
+   * @return pointer to a text string (const char*)
+   */
+  const char *(*afl_custom_introspection)(void *data);
+
+  /**
+   * This method is called just before fuzzing a queue entry with the custom
+   * mutator, and receives the initial buffer. It should return the number of
+   * fuzzes to perform.
+   *
+   * A value of 0 means no fuzzing of this queue entry.
+   *
+   * The function is now allowed to change the data.
+   *
+   * (Optional)
+   *
+   * @param data pointer returned in afl_custom_init by this custom mutator
+   * @param buf Buffer containing the test case
+   * @param buf_size Size of the test case
+   * @return The amount of fuzzes to perform on this queue entry, 0 = skip
+   */
+  u32 (*afl_custom_fuzz_count)(void *data, const u8 *buf, size_t buf_size);
+
+  /**
    * Perform custom mutations on a given input
    *
    * (Optional for now. Required in the future)
    *
-   * @param data pointer returned in afl_custom_init for this fuzz case
+   * @param data pointer returned in afl_custom_init by this custom mutator
    * @param[in] buf Pointer to the input data to be mutated and the mutated
    *     output
    * @param[in] buf_size Size of the input/output data
@@ -689,13 +828,28 @@ struct custom_mutator {
                             u8 *add_buf, size_t add_buf_size, size_t max_size);
 
   /**
+   * Describe the current testcase, generated by the last mutation.
+   * This will be called, for example, to give the written testcase a name
+   * after a crash ocurred. It can help to reproduce crashing mutations.
+   *
+   * (Optional)
+   *
+   * @param data pointer returned by afl_customm_init for this custom mutator
+   * @paramp[in] max_description_len maximum size avaliable for the description.
+   *             A longer return string is legal, but will be truncated.
+   * @return A valid ptr to a 0-terminated string.
+   *         An empty or NULL return will result in a default description
+   */
+  const char *(*afl_custom_describe)(void *data, size_t max_description_len);
+
+  /**
    * A post-processing function to use right before AFL writes the test case to
    * disk in order to execute the target.
    *
    * (Optional) If this functionality is not needed, simply don't define this
    * function.
    *
-   * @param[in] data pointer returned in afl_custom_init for this fuzz case
+   * @param[in] data pointer returned in afl_custom_init by this custom mutator
    * @param[in] buf Buffer containing the test case to be executed
    * @param[in] buf_size Size of the test case
    * @param[out] out_buf Pointer to the buffer storing the test case after
@@ -722,7 +876,7 @@ struct custom_mutator {
    *
    * (Optional)
    *
-   * @param data pointer returned in afl_custom_init for this fuzz case
+   * @param data pointer returned in afl_custom_init by this custom mutator
    * @param buf Buffer containing the test case
    * @param buf_size Size of the test case
    * @return The amount of possible iteration steps to trim the input.
@@ -741,7 +895,7 @@ struct custom_mutator {
    *
    * (Optional)
    *
-   * @param data pointer returned in afl_custom_init for this fuzz case
+   * @param data pointer returned in afl_custom_init by this custom mutator
    * @param[out] out_buf Pointer to the buffer containing the trimmed test case.
    *             The library can reuse a buffer for each call
    *             and will have to free the buf (for example in deinit)
@@ -756,7 +910,7 @@ struct custom_mutator {
    *
    * (Optional)
    *
-   * @param data pointer returned in afl_custom_init for this fuzz case
+   * @param data pointer returned in afl_custom_init by this custom mutator
    * @param success Indicates if the last trim operation was successful.
    * @return The next trim iteration index (from 0 to the maximum amount of
    *     steps returned in init_trim). Negative on error.
@@ -769,7 +923,7 @@ struct custom_mutator {
    *
    * (Optional)
    *
-   * @param[in] data pointer returned in afl_custom_init for this fuzz case
+   * @param[in] data pointer returned in afl_custom_init by this custom mutator
    * @param[in] buf Pointer to the input data to be mutated and the mutated
    *     output
    * @param[in] buf_size Size of input data
@@ -788,7 +942,7 @@ struct custom_mutator {
    *
    * (Optional)
    *
-   * @param data pointer returned in afl_custom_init for this fuzz case
+   * @param data pointer returned in afl_custom_init by this custom mutator
    * @return The probability (0-100).
    */
   u8 (*afl_custom_havoc_mutation_probability)(void *data);
@@ -798,7 +952,7 @@ struct custom_mutator {
    *
    * (Optional)
    *
-   * @param data pointer returned in afl_custom_init for this fuzz case
+   * @param data pointer returned in afl_custom_init by this custom mutator
    * @param filename File name of the test case in the queue entry
    * @return Return True(1) if the fuzzer will fuzz the queue entry, and
    *     False(0) otherwise.
@@ -811,7 +965,7 @@ struct custom_mutator {
    *
    * (Optional)
    *
-   * @param data pointer returned in afl_custom_init for this fuzz case
+   * @param data pointer returned in afl_custom_init by this custom mutator
    * @param filename_new_queue File name of the new queue entry
    * @param filename_orig_queue File name of the original queue entry. This
    *     argument can be NULL while initializing the fuzzer
@@ -821,7 +975,7 @@ struct custom_mutator {
   /**
    * Deinitialize the custom mutator.
    *
-   * @param data pointer returned in afl_custom_init for this fuzz case
+   * @param data pointer returned in afl_custom_init by this custom mutator
    */
   void (*afl_custom_deinit)(void *data);
 
@@ -856,15 +1010,17 @@ u8   trim_case_custom(afl_state_t *, struct queue_entry *q, u8 *in_buf,
 struct custom_mutator *load_custom_mutator_py(afl_state_t *, char *);
 void                   finalize_py_module(void *);
 
-size_t post_process_py(void *, u8 *, size_t, u8 **);
-s32    init_trim_py(void *, u8 *, size_t);
-s32    post_trim_py(void *, u8);
-size_t trim_py(void *, u8 **);
-size_t havoc_mutation_py(void *, u8 *, size_t, u8 **, size_t);
-u8     havoc_mutation_probability_py(void *);
-u8     queue_get_py(void *, const u8 *);
-void   queue_new_entry_py(void *, const u8 *, const u8 *);
-void   deinit_py(void *);
+u32         fuzz_count_py(void *, const u8 *, size_t);
+size_t      post_process_py(void *, u8 *, size_t, u8 **);
+s32         init_trim_py(void *, u8 *, size_t);
+s32         post_trim_py(void *, u8);
+size_t      trim_py(void *, u8 **);
+size_t      havoc_mutation_py(void *, u8 *, size_t, u8 **, size_t);
+u8          havoc_mutation_probability_py(void *);
+u8          queue_get_py(void *, const u8 *);
+const char *introspection_py(void *);
+void        queue_new_entry_py(void *, const u8 *, const u8 *);
+void        deinit_py(void *);
 
 #endif
 
@@ -885,37 +1041,50 @@ void write_bitmap(afl_state_t *);
 u32  count_bits(afl_state_t *, u8 *);
 u32  count_bytes(afl_state_t *, u8 *);
 u32  count_non_255_bytes(afl_state_t *, u8 *);
-#ifdef WORD_SIZE_64
-void simplify_trace(afl_state_t *, u64 *);
+void simplify_trace(afl_state_t *, u8 *);
 void classify_counts(afl_forkserver_t *);
+#ifdef WORD_SIZE_64
+void discover_word(u8 *ret, u64 *current, u64 *virgin);
 #else
-void simplify_trace(afl_state_t *, u32 *);
-void classify_counts(afl_forkserver_t *);
+void discover_word(u8 *ret, u32 *current, u32 *virgin);
 #endif
 void init_count_class16(void);
 void minimize_bits(afl_state_t *, u8 *, u8 *);
 #ifndef SIMPLE_FILES
-u8 *describe_op(afl_state_t *, u8);
+u8 *describe_op(afl_state_t *, u8, size_t);
 #endif
 u8 save_if_interesting(afl_state_t *, void *, u32, u8);
 u8 has_new_bits(afl_state_t *, u8 *);
+u8 has_new_bits_unclassified(afl_state_t *, u8 *);
 
 /* Extras */
 
 void load_extras_file(afl_state_t *, u8 *, u32 *, u32 *, u32);
 void load_extras(afl_state_t *, u8 *);
-void maybe_add_auto(void *, u8 *, u32);
+void dedup_extras(afl_state_t *);
+void deunicode_extras(afl_state_t *);
+void add_extra(afl_state_t *afl, u8 *mem, u32 len);
+void maybe_add_auto(afl_state_t *, u8 *, u32);
 void save_auto(afl_state_t *);
 void load_auto(afl_state_t *);
 void destroy_extras(afl_state_t *);
 
 /* Stats */
 
-void write_stats_file(afl_state_t *, double, double, double);
-void maybe_update_plot_file(afl_state_t *, double, double);
+void load_stats_file(afl_state_t *);
+void write_setup_file(afl_state_t *, u32, char **);
+void write_stats_file(afl_state_t *, u32, double, double, double);
+void maybe_update_plot_file(afl_state_t *, u32, double, double);
 void show_stats(afl_state_t *);
 void show_init_stats(afl_state_t *);
 
+/* StatsD */
+
+void statsd_setup_format(afl_state_t *afl);
+int  statsd_socket_init(afl_state_t *afl);
+int  statsd_send_metric(afl_state_t *afl);
+int  statsd_format_metric(afl_state_t *afl, char *buff, size_t bufflen);
+
 /* Run */
 
 fsrv_run_result_t fuzz_run_target(afl_state_t *, afl_forkserver_t *fsrv, u32);
@@ -939,7 +1108,7 @@ u8   fuzz_one(afl_state_t *);
 void bind_to_free_cpu(afl_state_t *);
 #endif
 void   setup_post(afl_state_t *);
-void   read_testcases(afl_state_t *);
+void   read_testcases(afl_state_t *, u8 *);
 void   perform_dry_run(afl_state_t *);
 void   pivot_inputs(afl_state_t *);
 u32    find_start_position(afl_state_t *);
@@ -947,6 +1116,8 @@ void   find_timeout(afl_state_t *);
 double get_runnable_processes(void);
 void   nuke_resume_dir(afl_state_t *);
 int    check_main_node_exists(afl_state_t *);
+u32    select_next_queue_entry(afl_state_t *afl);
+void   create_alias_table(afl_state_t *afl);
 void   setup_dirs_fds(afl_state_t *);
 void   setup_cmdline_file(afl_state_t *, char **);
 void   setup_stdio_file(afl_state_t *);
@@ -954,7 +1125,7 @@ void   check_crash_handling(void);
 void   check_cpu_governor(afl_state_t *);
 void   get_core_count(afl_state_t *);
 void   fix_up_sync(afl_state_t *);
-void   check_asan_opts(void);
+void   check_asan_opts(afl_state_t *);
 void   check_binary(afl_state_t *, u8 *);
 void   fix_up_banner(afl_state_t *, u8 *);
 void   check_if_tty(afl_state_t *);
@@ -967,11 +1138,13 @@ void   read_foreign_testcases(afl_state_t *, int);
 u8 common_fuzz_cmplog_stuff(afl_state_t *afl, u8 *out_buf, u32 len);
 
 /* RedQueen */
-u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len,
-                        u64 exec_cksum);
+u8 input_to_state_stage(afl_state_t *afl, u8 *orig_buf, u8 *buf, u32 len);
+
+/* our RNG wrapper */
+AFL_RAND_RETURN rand_next(afl_state_t *afl);
 
-/* xoshiro256** */
-uint64_t rand_next(afl_state_t *afl);
+/* probability between 0.0 and 1.0 */
+double rand_next_percent(afl_state_t *afl);
 
 /**** Inline routines ****/
 
@@ -980,6 +1153,8 @@ uint64_t rand_next(afl_state_t *afl);
 
 static inline u32 rand_below(afl_state_t *afl, u32 limit) {
 
+  if (limit <= 1) return 0;
+
   /* The boundary not being necessarily a power of 2,
      we need to ensure the result uniformity. */
   if (unlikely(!afl->rand_cnt--) && likely(!afl->fixed_seed)) {
@@ -991,7 +1166,44 @@ static inline u32 rand_below(afl_state_t *afl, u32 limit) {
 
   }
 
-  return rand_next(afl) % limit;
+  /* Modulo is biased - we don't want our fuzzing to be biased so let's do it
+   right. See:
+   https://stackoverflow.com/questions/10984974/why-do-people-say-there-is-modulo-bias-when-using-a-random-number-generator
+   */
+  u64 unbiased_rnd;
+  do {
+
+    unbiased_rnd = rand_next(afl);
+
+  } while (unlikely(unbiased_rnd >= (UINT64_MAX - (UINT64_MAX % limit))));
+
+  return unbiased_rnd % limit;
+
+}
+
+/* we prefer lower range values here */
+/* this is only called with normal havoc, not MOpt, to have an equalizer for
+   expand havoc mode */
+static inline u32 rand_below_datalen(afl_state_t *afl, u32 limit) {
+
+  if (limit <= 1) return 0;
+
+  switch (rand_below(afl, 3)) {
+
+    case 2:
+      return (rand_below(afl, limit) % (1 + rand_below(afl, limit - 1))) %
+             (1 + rand_below(afl, limit - 1));
+      break;
+    case 1:
+      return rand_below(afl, limit) % (1 + rand_below(afl, limit - 1));
+      break;
+    case 0:
+      return rand_below(afl, limit);
+      break;
+
+  }
+
+  return 1;  // cannot be reached
 
 }
 
@@ -1021,5 +1233,25 @@ static inline u64 next_p2(u64 val) {
 
 }
 
+/* Returns the testcase buf from the file behind this queue entry.
+  Increases the refcount. */
+u8 *queue_testcase_get(afl_state_t *afl, struct queue_entry *q);
+
+/* If trimming changes the testcase size we have to reload it */
+void queue_testcase_retake(afl_state_t *afl, struct queue_entry *q,
+                           u32 old_len);
+
+/* If trimming changes the testcase size we have to replace it  */
+void queue_testcase_retake_mem(afl_state_t *afl, struct queue_entry *q, u8 *in,
+                               u32 len, u32 old_len);
+
+/* Add a new queue entry directly to the cache */
+
+void queue_testcase_store_mem(afl_state_t *afl, struct queue_entry *q, u8 *mem);
+
+#if TESTCASE_CACHE == 1
+  #error define of TESTCASE_CACHE must be zero or larger than 1
+#endif
+
 #endif
 
diff --git a/include/afl-prealloc.h b/include/afl-prealloc.h
index edf69a67..fa6c9b70 100644
--- a/include/afl-prealloc.h
+++ b/include/afl-prealloc.h
@@ -60,7 +60,7 @@ typedef enum prealloc_status {
                                                                                \
     if ((prealloc_counter) >= (prealloc_size)) {                               \
                                                                                \
-      el_ptr = (void *)malloc(sizeof(*el_ptr));                                \
+      el_ptr = (element_t *)malloc(sizeof(*el_ptr));                           \
       if (!el_ptr) { FATAL("error in list.h -> out of memory for element!"); } \
       el_ptr->pre_status = PRE_STATUS_MALLOC;                                  \
                                                                                \
diff --git a/include/alloc-inl.h b/include/alloc-inl.h
index 832b2de4..c914da5f 100644
--- a/include/alloc-inl.h
+++ b/include/alloc-inl.h
@@ -30,12 +30,13 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <stddef.h>
 
 #include "config.h"
 #include "types.h"
 #include "debug.h"
 
-/* Initial size used for ck_maybe_grow */
+/* Initial size used for afl_realloc */
 #define INITIAL_GROWTH_SIZE (64)
 
 // Be careful! _WANT_ORIGINAL_AFL_ALLOC is not compatible with custom mutators
@@ -76,10 +77,6 @@
                                                                         \
     } while (0)
 
-  /* Allocator increments for ck_realloc_block(). */
-
-  #define ALLOC_BLK_INC 256
-
 /* Allocate a buffer, explicitly not zeroing it. Returns NULL for zero-sized
    requests. */
 
@@ -97,7 +94,8 @@ static inline void *DFL_ck_alloc_nozero(u32 size) {
 
 }
 
-/* Allocate a buffer, returning zeroed memory. */
+/* Allocate a buffer, returning zeroed memory.
+  Returns null for 0 size */
 
 static inline void *DFL_ck_alloc(u32 size) {
 
@@ -149,15 +147,6 @@ static inline void *DFL_ck_realloc(void *orig, u32 size) {
 
 }
 
-/* Re-allocate a buffer with ALLOC_BLK_INC increments (used to speed up
-   repeated small reallocs without complicating the user code). */
-
-static inline void *DFL_ck_realloc_block(void *orig, u32 size) {
-
-  return DFL_ck_realloc(orig, size);
-
-}
-
 /* Create a buffer with a copy of a string. Returns NULL for NULL inputs. */
 
 static inline u8 *DFL_ck_strdup(u8 *str) {
@@ -177,53 +166,13 @@ static inline u8 *DFL_ck_strdup(u8 *str) {
 
 }
 
-/* Create a buffer with a copy of a memory block. Returns NULL for zero-sized
-   or NULL inputs. */
-
-static inline void *DFL_ck_memdup(void *mem, u32 size) {
-
-  void *ret;
-
-  if (!mem || !size) { return NULL; }
-
-  ALLOC_CHECK_SIZE(size);
-  ret = malloc(size);
-  ALLOC_CHECK_RESULT(ret, size);
-
-  return memcpy(ret, mem, size);
-
-}
-
-/* Create a buffer with a block of text, appending a NUL terminator at the end.
-   Returns NULL for zero-sized or NULL inputs. */
-
-static inline u8 *DFL_ck_memdup_str(u8 *mem, u32 size) {
-
-  u8 *ret;
-
-  if (!mem || !size) { return NULL; }
-
-  ALLOC_CHECK_SIZE(size);
-  ret = (u8 *)malloc(size + 1);
-  ALLOC_CHECK_RESULT(ret, size);
-
-  memcpy(ret, mem, size);
-  ret[size] = 0;
-
-  return ret;
-
-}
-
   /* In non-debug mode, we just do straightforward aliasing of the above
      functions to user-visible names such as ck_alloc(). */
 
   #define ck_alloc DFL_ck_alloc
   #define ck_alloc_nozero DFL_ck_alloc_nozero
   #define ck_realloc DFL_ck_realloc
-  #define ck_realloc_block DFL_ck_realloc_block
   #define ck_strdup DFL_ck_strdup
-  #define ck_memdup DFL_ck_memdup
-  #define ck_memdup_str DFL_ck_memdup_str
   #define ck_free DFL_ck_free
 
   #define alloc_report()
@@ -278,10 +227,6 @@ static inline u8 *DFL_ck_memdup_str(u8 *mem, u32 size) {
   #define ALLOC_OFF_HEAD 8
   #define ALLOC_OFF_TOTAL (ALLOC_OFF_HEAD + 1)
 
-  /* Allocator increments for ck_realloc_block(). */
-
-  #define ALLOC_BLK_INC 256
-
   /* Sanity-checking macros for pointers. */
 
   #define CHECK_PTR(_p)                            \
@@ -326,7 +271,7 @@ static inline void *DFL_ck_alloc_nozero(u32 size) {
   ret = malloc(size + ALLOC_OFF_TOTAL);
   ALLOC_CHECK_RESULT(ret, size);
 
-  ret += ALLOC_OFF_HEAD;
+  ret = (char *)ret + ALLOC_OFF_HEAD;
 
   ALLOC_C1(ret) = ALLOC_MAGIC_C1;
   ALLOC_S(ret) = size;
@@ -366,7 +311,7 @@ static inline void DFL_ck_free(void *mem) {
 
   ALLOC_C1(mem) = ALLOC_MAGIC_F;
 
-  free(mem - ALLOC_OFF_HEAD);
+  free((char *)mem - ALLOC_OFF_HEAD);
 
 }
 
@@ -395,7 +340,7 @@ static inline void *DFL_ck_realloc(void *orig, u32 size) {
   #endif                                                    /* !DEBUG_BUILD */
 
     old_size = ALLOC_S(orig);
-    orig -= ALLOC_OFF_HEAD;
+    orig = (char *)orig - ALLOC_OFF_HEAD;
 
     ALLOC_CHECK_SIZE(old_size);
 
@@ -418,10 +363,11 @@ static inline void *DFL_ck_realloc(void *orig, u32 size) {
 
   if (orig) {
 
-    memcpy(ret + ALLOC_OFF_HEAD, orig + ALLOC_OFF_HEAD, MIN(size, old_size));
-    memset(orig + ALLOC_OFF_HEAD, 0xFF, old_size);
+    memcpy((char *)ret + ALLOC_OFF_HEAD, (char *)orig + ALLOC_OFF_HEAD,
+           MIN(size, old_size));
+    memset((char *)orig + ALLOC_OFF_HEAD, 0xFF, old_size);
 
-    ALLOC_C1(orig + ALLOC_OFF_HEAD) = ALLOC_MAGIC_F;
+    ALLOC_C1((char *)orig + ALLOC_OFF_HEAD) = ALLOC_MAGIC_F;
 
     free(orig);
 
@@ -429,41 +375,18 @@ static inline void *DFL_ck_realloc(void *orig, u32 size) {
 
   #endif                                                   /* ^!DEBUG_BUILD */
 
-  ret += ALLOC_OFF_HEAD;
+  ret = (char *)ret + ALLOC_OFF_HEAD;
 
   ALLOC_C1(ret) = ALLOC_MAGIC_C1;
   ALLOC_S(ret) = size;
   ALLOC_C2(ret) = ALLOC_MAGIC_C2;
 
-  if (size > old_size) memset(ret + old_size, 0, size - old_size);
+  if (size > old_size) memset((char *)ret + old_size, 0, size - old_size);
 
   return ret;
 
 }
 
-/* Re-allocate a buffer with ALLOC_BLK_INC increments (used to speed up
-   repeated small reallocs without complicating the user code). */
-
-static inline void *DFL_ck_realloc_block(void *orig, u32 size) {
-
-  #ifndef DEBUG_BUILD
-
-  if (orig) {
-
-    CHECK_PTR(orig);
-
-    if (ALLOC_S(orig) >= size) return orig;
-
-    size += ALLOC_BLK_INC;
-
-  }
-
-  #endif                                                    /* !DEBUG_BUILD */
-
-  return DFL_ck_realloc(orig, size);
-
-}
-
 /* Create a buffer with a copy of a string. Returns NULL for NULL inputs. */
 
 static inline u8 *DFL_ck_strdup(u8 *str) {
@@ -479,7 +402,7 @@ static inline u8 *DFL_ck_strdup(u8 *str) {
   ret = malloc(size + ALLOC_OFF_TOTAL);
   ALLOC_CHECK_RESULT(ret, size);
 
-  ret += ALLOC_OFF_HEAD;
+  ret = (char *)ret + ALLOC_OFF_HEAD;
 
   ALLOC_C1(ret) = ALLOC_MAGIC_C1;
   ALLOC_S(ret) = size;
@@ -489,55 +412,6 @@ static inline u8 *DFL_ck_strdup(u8 *str) {
 
 }
 
-/* Create a buffer with a copy of a memory block. Returns NULL for zero-sized
-   or NULL inputs. */
-
-static inline void *DFL_ck_memdup(void *mem, u32 size) {
-
-  void *ret;
-
-  if (!mem || !size) return NULL;
-
-  ALLOC_CHECK_SIZE(size);
-  ret = malloc(size + ALLOC_OFF_TOTAL);
-  ALLOC_CHECK_RESULT(ret, size);
-
-  ret += ALLOC_OFF_HEAD;
-
-  ALLOC_C1(ret) = ALLOC_MAGIC_C1;
-  ALLOC_S(ret) = size;
-  ALLOC_C2(ret) = ALLOC_MAGIC_C2;
-
-  return memcpy(ret, mem, size);
-
-}
-
-/* Create a buffer with a block of text, appending a NUL terminator at the end.
-   Returns NULL for zero-sized or NULL inputs. */
-
-static inline u8 *DFL_ck_memdup_str(u8 *mem, u32 size) {
-
-  u8 *ret;
-
-  if (!mem || !size) return NULL;
-
-  ALLOC_CHECK_SIZE(size);
-  ret = malloc(size + ALLOC_OFF_TOTAL + 1);
-  ALLOC_CHECK_RESULT(ret, size);
-
-  ret += ALLOC_OFF_HEAD;
-
-  ALLOC_C1(ret) = ALLOC_MAGIC_C1;
-  ALLOC_S(ret) = size;
-  ALLOC_C2(ret) = ALLOC_MAGIC_C2;
-
-  memcpy(ret, mem, size);
-  ret[size] = 0;
-
-  return ret;
-
-}
-
   #ifndef DEBUG_BUILD
 
     /* In non-debug mode, we just do straightforward aliasing of the above
@@ -546,10 +420,7 @@ static inline u8 *DFL_ck_memdup_str(u8 *mem, u32 size) {
     #define ck_alloc DFL_ck_alloc
     #define ck_alloc_nozero DFL_ck_alloc_nozero
     #define ck_realloc DFL_ck_realloc
-    #define ck_realloc_block DFL_ck_realloc_block
     #define ck_strdup DFL_ck_strdup
-    #define ck_memdup DFL_ck_memdup
-    #define ck_memdup_str DFL_ck_memdup_str
     #define ck_free DFL_ck_free
 
     #define alloc_report()
@@ -618,8 +489,8 @@ static inline void TRK_alloc_buf(void *ptr, const char *file, const char *func,
 
   /* No space available - allocate more. */
 
-  TRK[bucket] = DFL_ck_realloc_block(
-      TRK[bucket], (TRK_cnt[bucket] + 1) * sizeof(struct TRK_obj));
+  TRK[bucket] = DFL_ck_realloc(TRK[bucket],
+                               (TRK_cnt[bucket] + 1) * sizeof(struct TRK_obj));
 
   TRK[bucket][i].ptr = ptr;
   TRK[bucket][i].file = (char *)file;
@@ -694,16 +565,6 @@ static inline void *TRK_ck_realloc(void *orig, u32 size, const char *file,
 
 }
 
-static inline void *TRK_ck_realloc_block(void *orig, u32 size, const char *file,
-                                         const char *func, u32 line) {
-
-  void *ret = DFL_ck_realloc_block(orig, size);
-  TRK_free_buf(orig, file, func, line);
-  TRK_alloc_buf(ret, file, func, line);
-  return ret;
-
-}
-
 static inline void *TRK_ck_strdup(u8 *str, const char *file, const char *func,
                                   u32 line) {
 
@@ -713,24 +574,6 @@ static inline void *TRK_ck_strdup(u8 *str, const char *file, const char *func,
 
 }
 
-static inline void *TRK_ck_memdup(void *mem, u32 size, const char *file,
-                                  const char *func, u32 line) {
-
-  void *ret = DFL_ck_memdup(mem, size);
-  TRK_alloc_buf(ret, file, func, line);
-  return ret;
-
-}
-
-static inline void *TRK_ck_memdup_str(void *mem, u32 size, const char *file,
-                                      const char *func, u32 line) {
-
-  void *ret = DFL_ck_memdup_str(mem, size);
-  TRK_alloc_buf(ret, file, func, line);
-  return ret;
-
-}
-
 static inline void TRK_ck_free(void *ptr, const char *file, const char *func,
                                u32 line) {
 
@@ -749,17 +592,8 @@ static inline void TRK_ck_free(void *ptr, const char *file, const char *func,
     #define ck_realloc(_p1, _p2) \
       TRK_ck_realloc(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
 
-    #define ck_realloc_block(_p1, _p2) \
-      TRK_ck_realloc_block(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
-
     #define ck_strdup(_p1) TRK_ck_strdup(_p1, __FILE__, __FUNCTION__, __LINE__)
 
-    #define ck_memdup(_p1, _p2) \
-      TRK_ck_memdup(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
-
-    #define ck_memdup_str(_p1, _p2) \
-      TRK_ck_memdup_str(_p1, _p2, __FILE__, __FUNCTION__, __LINE__)
-
     #define ck_free(_p1) TRK_ck_free(_p1, __FILE__, __FUNCTION__, __LINE__)
 
   #endif                                                   /* ^!DEBUG_BUILD */
@@ -771,11 +605,14 @@ static inline void TRK_ck_free(void *ptr, const char *file, const char *func,
 */
 static inline size_t next_pow2(size_t in) {
 
-  if (in == 0 || in > (size_t)-1) {
-
-    return 0;                  /* avoid undefined behaviour under-/overflow */
+  // Commented this out as this behavior doesn't change, according to unittests
+  // if (in == 0 || in > (size_t)-1) {
 
-  }
+  //
+  //   return 0;                  /* avoid undefined behaviour under-/overflow
+  //   */
+  //
+  // }
 
   size_t out = in - 1;
   out |= out >> 1;
@@ -787,6 +624,35 @@ static inline size_t next_pow2(size_t in) {
 
 }
 
+/* AFL alloc buffer, the struct is here so we don't need to do fancy ptr
+ * arithmetics */
+struct afl_alloc_buf {
+
+  /* The complete allocated size, including the header of len
+   * AFL_ALLOC_SIZE_OFFSET */
+  size_t complete_size;
+  /* ptr to the first element of the actual buffer */
+  u8 buf[0];
+
+};
+
+#define AFL_ALLOC_SIZE_OFFSET (offsetof(struct afl_alloc_buf, buf))
+
+/* Returns the container element to this ptr */
+static inline struct afl_alloc_buf *afl_alloc_bufptr(void *buf) {
+
+  return (struct afl_alloc_buf *)((u8 *)buf - AFL_ALLOC_SIZE_OFFSET);
+
+}
+
+/* Gets the maximum size of the buf contents (ptr->complete_size -
+ * AFL_ALLOC_SIZE_OFFSET) */
+static inline size_t afl_alloc_bufsize(void *buf) {
+
+  return afl_alloc_bufptr(buf)->complete_size - AFL_ALLOC_SIZE_OFFSET;
+
+}
+
 /* This function makes sure *size is > size_needed after call.
  It will realloc *buf otherwise.
  *size will grow exponentially as per:
@@ -794,71 +660,116 @@ static inline size_t next_pow2(size_t in) {
  Will return NULL and free *buf if size_needed is <1 or realloc failed.
  @return For convenience, this function returns *buf.
  */
-static inline void *maybe_grow(void **buf, size_t *size, size_t size_needed) {
+static inline void *afl_realloc(void **buf, size_t size_needed) {
+
+  struct afl_alloc_buf *new_buf = NULL;
+
+  size_t current_size = 0;
+  size_t next_size = 0;
+
+  if (likely(*buf)) {
+
+    /* the size is always stored at buf - 1*size_t */
+    new_buf = (struct afl_alloc_buf *)afl_alloc_bufptr(*buf);
+    current_size = new_buf->complete_size;
+
+  }
+
+  size_needed += AFL_ALLOC_SIZE_OFFSET;
 
   /* No need to realloc */
-  if (likely(size_needed && *size >= size_needed)) { return *buf; }
+  if (likely(current_size >= size_needed)) { return *buf; }
 
   /* No initial size was set */
-  if (size_needed < INITIAL_GROWTH_SIZE) { size_needed = INITIAL_GROWTH_SIZE; }
+  if (size_needed < INITIAL_GROWTH_SIZE) {
+
+    next_size = INITIAL_GROWTH_SIZE;
 
-  /* grow exponentially */
-  size_t next_size = next_pow2(size_needed);
+  } else {
 
-  /* handle overflow and zero size_needed */
-  if (!next_size) { next_size = size_needed; }
+    /* grow exponentially */
+    next_size = next_pow2(size_needed);
+
+    /* handle overflow: fall back to the original size_needed */
+    if (unlikely(!next_size)) { next_size = size_needed; }
+
+  }
 
   /* alloc */
-  *buf = realloc(*buf, next_size);
-  *size = *buf ? next_size : 0;
+  struct afl_alloc_buf *newer_buf =
+      (struct afl_alloc_buf *)realloc(new_buf, next_size);
+  if (unlikely(!newer_buf)) {
+
+    free(new_buf);  // avoid a leak
+    *buf = NULL;
+    return NULL;
+
+  } else {
+
+    new_buf = newer_buf;
 
+  }
+
+  new_buf->complete_size = next_size;
+  *buf = (void *)(new_buf->buf);
   return *buf;
 
 }
 
-/* This function makes sure *size is > size_needed after call.
- It will realloc *buf otherwise.
- *size will grow exponentially as per:
- https://blog.mozilla.org/nnethercote/2014/11/04/please-grow-your-buffers-exponentially/
- Will FATAL if size_needed is <1.
- @return For convenience, this function returns *buf.
- */
-static inline void *ck_maybe_grow(void **buf, size_t *size,
-                                  size_t size_needed) {
+/* afl_realloc_exact uses afl alloc buffers but sets it to a specific size */
 
-  /* Oops. found a bug? */
-  if (unlikely(size_needed < 1)) { FATAL("cannot grow to non-positive size"); }
+static inline void *afl_realloc_exact(void **buf, size_t size_needed) {
 
-  /* No need to realloc */
-  if (likely(*size >= size_needed)) { return *buf; }
+  struct afl_alloc_buf *new_buf = NULL;
 
-  /* No initial size was set */
-  if (size_needed < INITIAL_GROWTH_SIZE) { size_needed = INITIAL_GROWTH_SIZE; }
+  size_t current_size = 0;
 
-  /* grow exponentially */
-  size_t next_size = next_pow2(size_needed);
+  if (likely(*buf)) {
 
-  /* handle overflow */
-  if (!next_size) { next_size = size_needed; }
+    /* the size is always stored at buf - 1*size_t */
+    new_buf = (struct afl_alloc_buf *)afl_alloc_bufptr(*buf);
+    current_size = new_buf->complete_size;
+
+  }
+
+  size_needed += AFL_ALLOC_SIZE_OFFSET;
+
+  /* No need to realloc */
+  if (unlikely(current_size == size_needed)) { return *buf; }
 
   /* alloc */
-  *buf = ck_realloc(*buf, next_size);
-  *size = next_size;
+  struct afl_alloc_buf *newer_buf =
+      (struct afl_alloc_buf *)realloc(new_buf, size_needed);
+  if (unlikely(!newer_buf)) {
 
+    free(new_buf);  // avoid a leak
+    *buf = NULL;
+    return NULL;
+
+  } else {
+
+    new_buf = newer_buf;
+
+  }
+
+  new_buf->complete_size = size_needed;
+  *buf = (void *)(new_buf->buf);
   return *buf;
 
 }
 
+static inline void afl_free(void *buf) {
+
+  if (buf) { free(afl_alloc_bufptr(buf)); }
+
+}
+
 /* Swaps buf1 ptr and buf2 ptr, as well as their sizes */
-static inline void swap_bufs(void **buf1, size_t *size1, void **buf2,
-                             size_t *size2) {
+static inline void afl_swap_bufs(void **buf1, void **buf2) {
 
-  void * scratch_buf = *buf1;
-  size_t scratch_size = *size1;
+  void *scratch_buf = *buf1;
   *buf1 = *buf2;
-  *size1 = *size2;
   *buf2 = scratch_buf;
-  *size2 = scratch_size;
 
 }
 
diff --git a/include/android-ashmem.h b/include/android-ashmem.h
index 41d4d2da..91699b27 100644
--- a/include/android-ashmem.h
+++ b/include/android-ashmem.h
@@ -1,62 +1,34 @@
-/*
-   american fuzzy lop++ - android shared memory compatibility layer
-   ----------------------------------------------------------------
-
-   Originally written by Michal Zalewski
-
-   Now maintained by Marc Heuse <mh@mh-sec.de>,
-                     Heiko Eißfeldt <heiko.eissfeldt@hexco.de>,
-                     Andrea Fioraldi <andreafioraldi@gmail.com>,
-                     Dominik Maier <mail@dmnk.co>
-
-   Copyright 2016, 2017 Google Inc. All rights reserved.
-   Copyright 2019-2020 AFLplusplus Project. All rights reserved.
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at:
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-   This header re-defines the shared memory routines used by AFL++
-   using the Andoid API.
-
- */
-
-#ifndef _ANDROID_ASHMEM_H
-#define _ANDROID_ASHMEM_H
-
 #ifdef __ANDROID__
-
-  #include <fcntl.h>
-  #include <linux/shm.h>
-  #include <linux/ashmem.h>
-  #include <sys/ioctl.h>
-  #include <sys/mman.h>
-
-  #if __ANDROID_API__ >= 26
-    #define shmat bionic_shmat
-    #define shmctl bionic_shmctl
-    #define shmdt bionic_shmdt
-    #define shmget bionic_shmget
-  #endif
-
-  #include <sys/shm.h>
-  #undef shmat
-  #undef shmctl
-  #undef shmdt
-  #undef shmget
-  #include <stdio.h>
-
-  #define ASHMEM_DEVICE "/dev/ashmem"
-
-static inline int shmctl(int __shmid, int __cmd, struct shmid_ds *__buf) {
+  #ifndef _ANDROID_ASHMEM_H
+    #define _ANDROID_ASHMEM_H
+
+    #include <fcntl.h>
+    #include <linux/ashmem.h>
+    #include <sys/ioctl.h>
+    #include <sys/mman.h>
+
+    #if __ANDROID_API__ >= 26
+      #define shmat bionic_shmat
+      #define shmctl bionic_shmctl
+      #define shmdt bionic_shmdt
+      #define shmget bionic_shmget
+    #endif
+    #include <sys/shm.h>
+    #undef shmat
+    #undef shmctl
+    #undef shmdt
+    #undef shmget
+    #include <stdio.h>
+
+    #define ASHMEM_DEVICE "/dev/ashmem"
+
+int shmctl(int __shmid, int __cmd, struct shmid_ds *__buf) {
 
   int ret = 0;
   if (__cmd == IPC_RMID) {
 
     int               length = ioctl(__shmid, ASHMEM_GET_SIZE, NULL);
-    struct ashmem_pin pin = {0, (unsigned int)length};
+    struct ashmem_pin pin = {0, length};
     ret = ioctl(__shmid, ASHMEM_UNPIN, &pin);
     close(__shmid);
 
@@ -66,7 +38,7 @@ static inline int shmctl(int __shmid, int __cmd, struct shmid_ds *__buf) {
 
 }
 
-static inline int shmget(key_t __key, size_t __size, int __shmflg) {
+int shmget(key_t __key, size_t __size, int __shmflg) {
 
   (void)__shmflg;
   int  fd, ret;
@@ -90,7 +62,7 @@ error:
 
 }
 
-static inline void *shmat(int __shmid, const void *__shmaddr, int __shmflg) {
+void *shmat(int __shmid, const void *__shmaddr, int __shmflg) {
 
   (void)__shmflg;
   int   size;
@@ -106,7 +78,6 @@ static inline void *shmat(int __shmid, const void *__shmaddr, int __shmflg) {
 
 }
 
-#endif                                                       /* __ANDROID__ */
-
-#endif
+  #endif                                              /* !_ANDROID_ASHMEM_H */
+#endif                                                      /* !__ANDROID__ */
 
diff --git a/include/cmplog.h b/include/cmplog.h
index 74e6a3bb..878ed60c 100644
--- a/include/cmplog.h
+++ b/include/cmplog.h
@@ -29,26 +29,26 @@
 #define _AFL_CMPLOG_H
 
 #include "config.h"
-#include "forkserver.h"
+
+#define CMPLOG_LVL_MAX 3
 
 #define CMP_MAP_W 65536
-#define CMP_MAP_H 256
+#define CMP_MAP_H 32
 #define CMP_MAP_RTN_H (CMP_MAP_H / 4)
 
 #define SHAPE_BYTES(x) (x + 1)
 
-#define CMP_TYPE_INS 0
-#define CMP_TYPE_RTN 1
+#define CMP_TYPE_INS 1
+#define CMP_TYPE_RTN 2
 
 struct cmp_header {
 
-  unsigned hits : 20;
-
-  unsigned cnt : 20;
-  unsigned id : 16;
-
-  unsigned shape : 5;  // from 0 to 31
-  unsigned type : 1;
+  unsigned hits : 24;
+  unsigned id : 24;
+  unsigned shape : 5;
+  unsigned type : 2;
+  unsigned attribute : 4;
+  unsigned reserved : 5;
 
 } __attribute__((packed));
 
@@ -56,6 +56,8 @@ struct cmp_operands {
 
   u64 v0;
   u64 v1;
+  u64 v0_128;
+  u64 v1_128;
 
 };
 
@@ -77,7 +79,8 @@ struct cmp_map {
 
 /* Execs the child */
 
-void cmplog_exec_child(afl_forkserver_t *fsrv, char **argv);
+struct afl_forkserver;
+void cmplog_exec_child(struct afl_forkserver *fsrv, char **argv);
 
 #endif
 
diff --git a/include/common.h b/include/common.h
index 87a7425b..b7adbaec 100644
--- a/include/common.h
+++ b/include/common.h
@@ -31,14 +31,15 @@
 #include <string.h>
 #include <unistd.h>
 #include <sys/time.h>
+#include <stdbool.h>
 #include "types.h"
-#include "stdbool.h"
 
 /* STRINGIFY_VAL_SIZE_MAX will fit all stringify_ strings. */
 
 #define STRINGIFY_VAL_SIZE_MAX (16)
 
-void detect_file_args(char **argv, u8 *prog_in, u8 *use_stdin);
+void detect_file_args(char **argv, u8 *prog_in, bool *use_stdin);
+void print_suggested_envs(char *mispelled_env);
 void check_environment_vars(char **env);
 
 char **argv_cpy_dup(int argc, char **argv);
@@ -56,6 +57,11 @@ extern u8 *doc_path;                    /* path to documentation dir        */
 
 u8 *find_binary(u8 *fname);
 
+/* Parses the kill signal environment variable, FATALs on error.
+  If the env is not set, sets the env to default_signal for the signal handlers
+  and returns the default_signal. */
+int parse_afl_kill_signal_env(u8 *afl_kill_signal_env, int default_signal);
+
 /* Read a bitmap from file fname to memory
    This is for the -B option again. */
 
@@ -110,5 +116,11 @@ u8 *u_stringify_time_diff(u8 *buf, u64 cur_ms, u64 event_ms);
 /* Reads the map size from ENV */
 u32 get_map_size(void);
 
+/* create a stream file */
+FILE *create_ffile(u8 *fn);
+
+/* create a file */
+s32 create_file(u8 *fn);
+
 #endif
 
diff --git a/include/config.h b/include/config.h
index 344a368f..29225f6b 100644
--- a/include/config.h
+++ b/include/config.h
@@ -10,7 +10,7 @@
                      Dominik Maier <mail@dmnk.co>
 
    Copyright 2016, 2017 Google Inc. All rights reserved.
-   Copyright 2019-2020 AFLplusplus Project. All rights reserved.
+   Copyright 2019-2021 AFLplusplus Project. All rights reserved.
 
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
@@ -23,12 +23,10 @@
 #ifndef _HAVE_CONFIG_H
 #define _HAVE_CONFIG_H
 
-#include "types.h"
-
 /* Version string: */
 
-// c = release, d = volatile github dev, e = experimental branch
-#define VERSION "++2.66d"
+// c = release, a = volatile github dev, e = experimental branch
+#define VERSION "++3.12a"
 
 /******************************************************
  *                                                    *
@@ -36,15 +34,76 @@
  *                                                    *
  ******************************************************/
 
+/* Default shared memory map size. Most targets just need a coverage map
+   between 20-250kb. Plus there is an auto-detection feature in afl-fuzz.
+   However if a target has problematic constructors and init arrays then
+   this can fail. Hence afl-fuzz deploys a larger default map. The largest
+   map seen so far is the xlsx fuzzer for libreoffice which is 5MB.
+   At runtime this value can be overriden via AFL_MAP_SIZE.
+   Default: 8MB (defined in bytes) */
+#define DEFAULT_SHMEM_SIZE (8 * 1024 * 1024)
+
+/* CMPLOG/REDQUEEN TUNING
+ *
+ * Here you can modify tuning and solving options for CMPLOG.
+ * Note that these are run-time options for afl-fuzz, no target
+ * recompilation required.
+ *
+ */
+
+/* if TRANSFORM is enabled with '-l T', this additionally enables base64
+   encoding/decoding */
+// #define CMPLOG_SOLVE_TRANSFORM_BASE64
+
+/* If a redqueen pass finds more than one solution, try to combine them? */
+#define CMPLOG_COMBINE
+
+/* Minimum % of the corpus to perform cmplog on. Default: 10% */
+#define CMPLOG_CORPUS_PERCENT 5U
+
+/* Number of potential positions from which we decide if cmplog becomes
+   useless, default 8096 */
+#define CMPLOG_POSITIONS_MAX (12 * 1024)
+
+/* Maximum allowed fails per CMP value. Default: 128 */
+#define CMPLOG_FAIL_MAX 96
+
+/* Now non-cmplog configuration options */
+
+/* console output colors: There are three ways to configure its behavior
+ * 1. default: colored outputs fixed on: defined USE_COLOR && defined
+ * ALWAYS_COLORED The env var. AFL_NO_COLOR will have no effect
+ * 2. defined USE_COLOR && !defined ALWAYS_COLORED
+ *    -> depending on env var AFL_NO_COLOR=1 colors can be switched off
+ *    at run-time. Default is to use colors.
+ * 3. colored outputs fixed off: !defined USE_COLOR
+ *    The env var. AFL_NO_COLOR will have no effect
+ */
+
 /* Comment out to disable terminal colors (note that this makes afl-analyze
    a lot less nice): */
 
 #define USE_COLOR
 
+#ifdef USE_COLOR
+  /* Comment in to always enable terminal colors */
+  /* Comment out to enable runtime controlled terminal colors via AFL_NO_COLOR
+   */
+  #define ALWAYS_COLORED 1
+#endif
+
+/* StatsD config
+   Config can be adjusted via AFL_STATSD_HOST and AFL_STATSD_PORT environment
+   variable.
+*/
+#define STATSD_UPDATE_SEC 1
+#define STATSD_DEFAULT_PORT 8125
+#define STATSD_DEFAULT_HOST "127.0.0.1"
+
 /* If you want to have the original afl internal memory corruption checks.
    Disabled by default for speed. it is better to use "make ASAN_BUILD=1". */
 
-//#define _WANT_ORIGINAL_AFL_ALLOC
+// #define _WANT_ORIGINAL_AFL_ALLOC
 
 /* Comment out to disable fancy ANSI boxes and use poor man's 7-bit UI: */
 
@@ -55,69 +114,61 @@
 /* Default timeout for fuzzed code (milliseconds). This is the upper bound,
    also used for detecting hangs; the actual value is auto-scaled: */
 
-#define EXEC_TIMEOUT 1000
+#define EXEC_TIMEOUT 1000U
 
 /* Timeout rounding factor when auto-scaling (milliseconds): */
 
-#define EXEC_TM_ROUND 20
+#define EXEC_TM_ROUND 20U
 
 /* 64bit arch MACRO */
 #if (defined(__x86_64__) || defined(__arm64__) || defined(__aarch64__))
   #define WORD_SIZE_64 1
 #endif
 
-/* Default memory limit for child process (MB): */
-
-#ifndef __NetBSD__
-  #ifndef WORD_SIZE_64
-    #define MEM_LIMIT 25
-  #else
-    #define MEM_LIMIT 50
-  #endif                                                  /* ^!WORD_SIZE_64 */
-#else /* NetBSD's kernel needs more space for stack, see discussion for issue \
-         #165 */
-  #define MEM_LIMIT 200
-#endif
-/* Default memory limit when running in QEMU mode (MB): */
+/* Default memory limit for child process (MB) 0 = disabled : */
+
+#define MEM_LIMIT 0U
 
-#define MEM_LIMIT_QEMU 200
+/* Default memory limit when running in QEMU mode (MB) 0 = disabled : */
 
-/* Default memory limit when running in Unicorn mode (MB): */
+#define MEM_LIMIT_QEMU 0U
 
-#define MEM_LIMIT_UNICORN 200
+/* Default memory limit when running in Unicorn mode (MB) 0 = disabled : */
+
+#define MEM_LIMIT_UNICORN 0U
 
 /* Number of calibration cycles per every new test case (and for test
    cases that show variable behavior): */
 
-#define CAL_CYCLES 8
-#define CAL_CYCLES_LONG 40
+#define CAL_CYCLES 8U
+#define CAL_CYCLES_LONG 40U
 
 /* Number of subsequent timeouts before abandoning an input file: */
 
-#define TMOUT_LIMIT 250
+#define TMOUT_LIMIT 250U
 
 /* Maximum number of unique hangs or crashes to record: */
 
-#define KEEP_UNIQUE_HANG 500
-#define KEEP_UNIQUE_CRASH 5000
+#define KEEP_UNIQUE_HANG 500U
+#define KEEP_UNIQUE_CRASH 5000U
 
 /* Baseline number of random tweaks during a single 'havoc' stage: */
 
-#define HAVOC_CYCLES 256
-#define HAVOC_CYCLES_INIT 1024
+#define HAVOC_CYCLES 256U
+#define HAVOC_CYCLES_INIT 1024U
 
 /* Maximum multiplier for the above (should be a power of two, beware
    of 32-bit int overflows): */
 
-#define HAVOC_MAX_MULT 16
-#define HAVOC_MAX_MULT_MOPT 32
+#define HAVOC_MAX_MULT 64U
+#define HAVOC_MAX_MULT_MOPT 64U
 
 /* Absolute minimum number of havoc cycles (after all adjustments): */
 
-#define HAVOC_MIN 16
+#define HAVOC_MIN 12U
 
 /* Power Schedule Divisor */
-#define POWER_BETA 1
+#define POWER_BETA 1U
 #define MAX_FACTOR (POWER_BETA * 32)
 
 /* Maximum stacking for havoc-stage tweaks. The actual value is calculated
@@ -126,22 +177,22 @@
    n = random between 1 and HAVOC_STACK_POW2
    stacking = 2^n
 
-   In other words, the default (n = 7) produces 2, 4, 8, 16, 32, 64, or
-   128 stacked tweaks: */
+   In other words, the default (n = 4) produces 2, 4, 8, 16
+   stacked tweaks: */
 
-#define HAVOC_STACK_POW2 7
+#define HAVOC_STACK_POW2 4U
 
 /* Caps on block sizes for cloning and deletion operations. Each of these
    ranges has a 33% probability of getting picked, except for the first
    two cycles where smaller blocks are favored: */
 
-#define HAVOC_BLK_SMALL 32
-#define HAVOC_BLK_MEDIUM 128
-#define HAVOC_BLK_LARGE 1500
+#define HAVOC_BLK_SMALL 32U
+#define HAVOC_BLK_MEDIUM 128U
+#define HAVOC_BLK_LARGE 1500U
 
 /* Extra-large blocks, selected very rarely (<5% of the time): */
 
-#define HAVOC_BLK_XL 32768
+#define HAVOC_BLK_XL 32768U
 
 /* Probabilities of skipping non-favored entries in the queue, expressed as
    percentages: */
@@ -169,9 +220,11 @@
 #define TRIM_START_STEPS 16
 #define TRIM_END_STEPS 1024
 
-/* Maximum size of input file, in bytes (keep under 100MB): */
+/* Maximum size of input file, in bytes (keep under 100MB, default 1MB):
+   (note that if this value is changed, several areas in afl-cc.c, afl-fuzz.c
+   and afl-fuzz-state.c have to be changed as well! */
 
-#define MAX_FILE (1 * 1024 * 1024)
+#define MAX_FILE (1 * 1024 * 1024U)
 
 /* The same, for the test case minimizer: */
 
@@ -195,7 +248,7 @@
    steps; past this point, the "extras/user" step will be still carried out,
    but with proportionally lower odds: */
 
-#define MAX_DET_EXTRAS 200
+#define MAX_DET_EXTRAS 256
 
 /* Maximum number of auto-extracted dictionary tokens to actually use in fuzzing
    (first value), and to keep in memory as candidates. The latter should be much
@@ -236,6 +289,11 @@
 
 #define SYNC_INTERVAL 8
 
+/* Sync time (minimum time between syncing in ms, time is halfed for -M main
+   nodes) - default is 30 minutes: */
+
+#define SYNC_TIME (30 * 60 * 1000)
+
 /* Output directory reuse grace period (minutes): */
 
 #define OUTPUT_GRACE 25
@@ -295,6 +353,13 @@
 
 #define RESEED_RNG 100000
 
+/* The default maximum testcase cache size in MB, 0 = disable.
+   A value between 50 and 250 is a good default value. Note that the
+   number of entries will be auto assigned if not specified via the
+   AFL_TESTCACHE_ENTRIES env variable */
+
+#define TESTCASE_CACHE_SIZE 50
+
 /* Maximum line length passed from GCC to 'as' and used for parsing
    configuration files: */
 
@@ -356,7 +421,7 @@
    after changing this - otherwise, SEGVs may ensue. */
 
 #define MAP_SIZE_POW2 16
-#define MAP_SIZE (1 << MAP_SIZE_POW2)
+#define MAP_SIZE (1U << MAP_SIZE_POW2)
 
 /* Maximum allocator request size (keep well under INT_MAX): */
 
diff --git a/include/coverage-32.h b/include/coverage-32.h
new file mode 100644
index 00000000..ca36c29f
--- /dev/null
+++ b/include/coverage-32.h
@@ -0,0 +1,112 @@
+#include "config.h"
+#include "types.h"
+
+u32 skim(const u32 *virgin, const u32 *current, const u32 *current_end);
+u32 classify_word(u32 word);
+
+inline u32 classify_word(u32 word) {
+
+  u16 mem16[2];
+  memcpy(mem16, &word, sizeof(mem16));
+
+  mem16[0] = count_class_lookup16[mem16[0]];
+  mem16[1] = count_class_lookup16[mem16[1]];
+
+  memcpy(&word, mem16, sizeof(mem16));
+  return word;
+
+}
+
+void simplify_trace(afl_state_t *afl, u8 *bytes) {
+
+  u32 *mem = (u32 *)bytes;
+  u32  i = (afl->fsrv.map_size >> 2);
+
+  while (i--) {
+
+    /* Optimize for sparse bitmaps. */
+
+    if (unlikely(*mem)) {
+
+      u8 *mem8 = (u8 *)mem;
+
+      mem8[0] = simplify_lookup[mem8[0]];
+      mem8[1] = simplify_lookup[mem8[1]];
+      mem8[2] = simplify_lookup[mem8[2]];
+      mem8[3] = simplify_lookup[mem8[3]];
+
+    } else
+
+      *mem = 0x01010101;
+
+    mem++;
+
+  }
+
+}
+
+inline void classify_counts(afl_forkserver_t *fsrv) {
+
+  u32 *mem = (u32 *)fsrv->trace_bits;
+  u32  i = (fsrv->map_size >> 2);
+
+  while (i--) {
+
+    /* Optimize for sparse bitmaps. */
+
+    if (unlikely(*mem)) { *mem = classify_word(*mem); }
+
+    mem++;
+
+  }
+
+}
+
+/* Updates the virgin bits, then reflects whether a new count or a new tuple is
+ * seen in ret. */
+inline void discover_word(u8 *ret, u32 *current, u32 *virgin) {
+
+  /* Optimize for (*current & *virgin) == 0 - i.e., no bits in current bitmap
+     that have not been already cleared from the virgin map - since this will
+     almost always be the case. */
+
+  if (*current & *virgin) {
+
+    if (likely(*ret < 2)) {
+
+      u8 *cur = (u8 *)current;
+      u8 *vir = (u8 *)virgin;
+
+      /* Looks like we have not found any new bytes yet; see if any non-zero
+         bytes in current[] are pristine in virgin[]. */
+
+      if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
+          (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff))
+        *ret = 2;
+      else
+        *ret = 1;
+
+    }
+
+    *virgin &= ~*current;
+
+  }
+
+}
+
+#define PACK_SIZE 16
+inline u32 skim(const u32 *virgin, const u32 *current, const u32 *current_end) {
+
+  for (; current < current_end; virgin += 4, current += 4) {
+
+    if (current[0] && classify_word(current[0]) & virgin[0]) return 1;
+    if (current[1] && classify_word(current[1]) & virgin[1]) return 1;
+    if (current[2] && classify_word(current[2]) & virgin[2]) return 1;
+    if (current[3] && classify_word(current[3]) & virgin[3]) return 1;
+
+  }
+
+  return 0;
+
+}
+
diff --git a/include/coverage-64.h b/include/coverage-64.h
new file mode 100644
index 00000000..54fe9d33
--- /dev/null
+++ b/include/coverage-64.h
@@ -0,0 +1,189 @@
+#include "config.h"
+#include "types.h"
+
+#if (defined(__AVX512F__) && defined(__AVX512DQ__)) || defined(__AVX2__)
+  #include <immintrin.h>
+#endif
+
+u32 skim(const u64 *virgin, const u64 *current, const u64 *current_end);
+u64 classify_word(u64 word);
+
+inline u64 classify_word(u64 word) {
+
+  u16 mem16[4];
+  memcpy(mem16, &word, sizeof(mem16));
+
+  mem16[0] = count_class_lookup16[mem16[0]];
+  mem16[1] = count_class_lookup16[mem16[1]];
+  mem16[2] = count_class_lookup16[mem16[2]];
+  mem16[3] = count_class_lookup16[mem16[3]];
+
+  memcpy(&word, mem16, sizeof(mem16));
+  return word;
+
+}
+
+void simplify_trace(afl_state_t *afl, u8 *bytes) {
+
+  u64 *mem = (u64 *)bytes;
+  u32  i = (afl->fsrv.map_size >> 3);
+
+  while (i--) {
+
+    /* Optimize for sparse bitmaps. */
+
+    if (unlikely(*mem)) {
+
+      u8 *mem8 = (u8 *)mem;
+
+      mem8[0] = simplify_lookup[mem8[0]];
+      mem8[1] = simplify_lookup[mem8[1]];
+      mem8[2] = simplify_lookup[mem8[2]];
+      mem8[3] = simplify_lookup[mem8[3]];
+      mem8[4] = simplify_lookup[mem8[4]];
+      mem8[5] = simplify_lookup[mem8[5]];
+      mem8[6] = simplify_lookup[mem8[6]];
+      mem8[7] = simplify_lookup[mem8[7]];
+
+    } else
+
+      *mem = 0x0101010101010101ULL;
+
+    mem++;
+
+  }
+
+}
+
+inline void classify_counts(afl_forkserver_t *fsrv) {
+
+  u64 *mem = (u64 *)fsrv->trace_bits;
+  u32  i = (fsrv->map_size >> 3);
+
+  while (i--) {
+
+    /* Optimize for sparse bitmaps. */
+
+    if (unlikely(*mem)) { *mem = classify_word(*mem); }
+
+    mem++;
+
+  }
+
+}
+
+/* Updates the virgin bits, then reflects whether a new count or a new tuple is
+ * seen in ret. */
+inline void discover_word(u8 *ret, u64 *current, u64 *virgin) {
+
+  /* Optimize for (*current & *virgin) == 0 - i.e., no bits in current bitmap
+     that have not been already cleared from the virgin map - since this will
+     almost always be the case. */
+
+  if (*current & *virgin) {
+
+    if (likely(*ret < 2)) {
+
+      u8 *cur = (u8 *)current;
+      u8 *vir = (u8 *)virgin;
+
+      /* Looks like we have not found any new bytes yet; see if any non-zero
+         bytes in current[] are pristine in virgin[]. */
+
+      if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
+          (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff) ||
+          (cur[4] && vir[4] == 0xff) || (cur[5] && vir[5] == 0xff) ||
+          (cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff))
+        *ret = 2;
+      else
+        *ret = 1;
+
+    }
+
+    *virgin &= ~*current;
+
+  }
+
+}
+
+#if defined(__AVX512F__) && defined(__AVX512DQ__)
+  #define PACK_SIZE 64
+inline u32 skim(const u64 *virgin, const u64 *current, const u64 *current_end) {
+
+  for (; current != current_end; virgin += 8, current += 8) {
+
+    __m512i  value = *(__m512i *)current;
+    __mmask8 mask = _mm512_testn_epi64_mask(value, value);
+
+    /* All bytes are zero. */
+    if (mask == 0xff) continue;
+
+      /* Look for nonzero bytes and check for new bits. */
+  #define UNROLL(x) \
+    if (!(mask & (1 << x)) && classify_word(current[x]) & virgin[x]) return 1
+    UNROLL(0);
+    UNROLL(1);
+    UNROLL(2);
+    UNROLL(3);
+    UNROLL(4);
+    UNROLL(5);
+    UNROLL(6);
+    UNROLL(7);
+  #undef UNROLL
+
+  }
+
+  return 0;
+
+}
+
+#endif
+
+#if !defined(PACK_SIZE) && defined(__AVX2__)
+  #define PACK_SIZE 32
+inline u32 skim(const u64 *virgin, const u64 *current, const u64 *current_end) {
+
+  __m256i zeroes = _mm256_setzero_si256();
+
+  for (; current < current_end; virgin += 4, current += 4) {
+
+    __m256i value = *(__m256i *)current;
+    __m256i cmp = _mm256_cmpeq_epi64(value, zeroes);
+    u32     mask = _mm256_movemask_epi8(cmp);
+
+    /* All bytes are zero. */
+    if (mask == (u32)-1) continue;
+
+    /* Look for nonzero bytes and check for new bits. */
+    if (!(mask & 0xff) && classify_word(current[0]) & virgin[0]) return 1;
+    if (!(mask & 0xff00) && classify_word(current[1]) & virgin[1]) return 1;
+    if (!(mask & 0xff0000) && classify_word(current[2]) & virgin[2]) return 1;
+    if (!(mask & 0xff000000) && classify_word(current[3]) & virgin[3]) return 1;
+
+  }
+
+  return 0;
+
+}
+
+#endif
+
+#if !defined(PACK_SIZE)
+  #define PACK_SIZE 32
+inline u32 skim(const u64 *virgin, const u64 *current, const u64 *current_end) {
+
+  for (; current < current_end; virgin += 4, current += 4) {
+
+    if (current[0] && classify_word(current[0]) & virgin[0]) return 1;
+    if (current[1] && classify_word(current[1]) & virgin[1]) return 1;
+    if (current[2] && classify_word(current[2]) & virgin[2]) return 1;
+    if (current[3] && classify_word(current[3]) & virgin[3]) return 1;
+
+  }
+
+  return 0;
+
+}
+
+#endif
+
diff --git a/include/debug.h b/include/debug.h
index d1bd971b..fc1f39cb 100644
--- a/include/debug.h
+++ b/include/debug.h
@@ -28,11 +28,6 @@
 #include "types.h"
 #include "config.h"
 
-/* __FUNCTION__ is non-iso */
-#ifdef __func__
-  #define __FUNCTION__ __func__
-#endif
-
 /*******************
  * Terminal colors *
  *******************/
@@ -173,12 +168,84 @@
  * Debug & error macros *
  ************************/
 
-/* Just print stuff to the appropriate stream. */
+#if defined USE_COLOR && !defined ALWAYS_COLORED
+  #include <unistd.h>
+  #pragma GCC diagnostic ignored "-Wformat-security"
+static inline const char *colorfilter(const char *x) {
+
+  static int once = 1;
+  static int disabled = 0;
+
+  if (once) {
+
+    /* when there is no tty -> we always want filtering
+     * when AFL_NO_UI is set filtering depends on AFL_NO_COLOR
+     * otherwise we want always colors
+     */
+    disabled =
+        isatty(2) && (!getenv("AFL_NO_UI") ||
+                      (!getenv("AFL_NO_COLOR") && !getenv("AFL_NO_COLOUR")));
+    once = 0;
+
+  }
+
+  if (likely(disabled)) return x;
+
+  static char monochromestring[4096];
+  char *      d = monochromestring;
+  int         in_seq = 0;
+
+  while (*x) {
+
+    if (in_seq && *x == 'm') {
+
+      in_seq = 0;
+
+    } else {
+
+      if (!in_seq && *x == '\x1b') { in_seq = 1; }
+      if (!in_seq) { *d++ = *x; }
 
+    }
+
+    ++x;
+
+  }
+
+  *d = '\0';
+  return monochromestring;
+
+}
+
+#else
+  #define colorfilter(x) x                        /* no filtering necessary */
+#endif
+
+/* macro magic to transform the first parameter to SAYF
+ * through colorfilter which strips coloring */
+#define GET_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, \
+                  _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26,  \
+                  _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38,  \
+                  _39, _40, NAME, ...)                                         \
+  NAME
+
+#define SAYF(...)                                                           \
+  GET_MACRO(__VA_ARGS__, SAYF_N, SAYF_N, SAYF_N, SAYF_N, SAYF_N, SAYF_N,    \
+            SAYF_N, SAYF_N, SAYF_N, SAYF_N, SAYF_N, SAYF_N, SAYF_N, SAYF_N, \
+            SAYF_N, SAYF_N, SAYF_N, SAYF_N, SAYF_N, SAYF_N, SAYF_N, SAYF_N, \
+            SAYF_N, SAYF_N, SAYF_N, SAYF_N, SAYF_N, SAYF_N, SAYF_N, SAYF_N, \
+            SAYF_N, SAYF_N, SAYF_N, SAYF_N, SAYF_N, SAYF_N, SAYF_N, SAYF_N, \
+            SAYF_N, SAYF_1)                                                 \
+  (__VA_ARGS__)
+
+#define SAYF_1(x) MY_SAYF(colorfilter(x))
+#define SAYF_N(x, ...) MY_SAYF(colorfilter(x), __VA_ARGS__)
+
+/* Just print stuff to the appropriate stream. */
 #ifdef MESSAGES_TO_STDOUT
-  #define SAYF(x...) printf(x)
+  #define MY_SAYF(x...) printf(x)
 #else
-  #define SAYF(x...) fprintf(stderr, x)
+  #define MY_SAYF(x...) fprintf(stderr, x)
 #endif                                               /* ^MESSAGES_TO_STDOUT */
 
 /* Show a prefixed warning. */
@@ -223,43 +290,43 @@
 
 /* Die with a verbose non-OS fatal error message. */
 
-#define FATAL(x...)                                                          \
-  do {                                                                       \
-                                                                             \
-    SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD                                \
-         "\n[-] PROGRAM ABORT : " cRST   x);                                   \
-    SAYF(cLRD "\n         Location : " cRST "%s(), %s:%u\n\n", __FUNCTION__, \
-         __FILE__, __LINE__);                                                \
-    exit(1);                                                                 \
-                                                                             \
+#define FATAL(x...)                                                      \
+  do {                                                                   \
+                                                                         \
+    SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD                            \
+         "\n[-] PROGRAM ABORT : " cRST   x);                               \
+    SAYF(cLRD "\n         Location : " cRST "%s(), %s:%u\n\n", __func__, \
+         __FILE__, (u32)__LINE__);                                       \
+    exit(1);                                                             \
+                                                                         \
   } while (0)
 
 /* Die by calling abort() to provide a core dump. */
 
-#define ABORT(x...)                                                          \
-  do {                                                                       \
-                                                                             \
-    SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD                                \
-         "\n[-] PROGRAM ABORT : " cRST   x);                                   \
-    SAYF(cLRD "\n    Stop location : " cRST "%s(), %s:%u\n\n", __FUNCTION__, \
-         __FILE__, __LINE__);                                                \
-    abort();                                                                 \
-                                                                             \
+#define ABORT(x...)                                                      \
+  do {                                                                   \
+                                                                         \
+    SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD                            \
+         "\n[-] PROGRAM ABORT : " cRST   x);                               \
+    SAYF(cLRD "\n    Stop location : " cRST "%s(), %s:%u\n\n", __func__, \
+         __FILE__, (u32)__LINE__);                                       \
+    abort();                                                             \
+                                                                         \
   } while (0)
 
 /* Die while also including the output of perror(). */
 
-#define PFATAL(x...)                                                       \
-  do {                                                                     \
-                                                                           \
-    fflush(stdout);                                                        \
-    SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD                              \
-         "\n[-]  SYSTEM ERROR : " cRST   x);                                 \
-    SAYF(cLRD "\n    Stop location : " cRST "%s(), %s:%u\n", __FUNCTION__, \
-         __FILE__, __LINE__);                                              \
-    SAYF(cLRD "       OS message : " cRST "%s\n", strerror(errno));        \
-    exit(1);                                                               \
-                                                                           \
+#define PFATAL(x...)                                                   \
+  do {                                                                 \
+                                                                       \
+    fflush(stdout);                                                    \
+    SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD                          \
+         "\n[-]  SYSTEM ERROR : " cRST   x);                             \
+    SAYF(cLRD "\n    Stop location : " cRST "%s(), %s:%u\n", __func__, \
+         __FILE__, (u32)__LINE__);                                     \
+    SAYF(cLRD "       OS message : " cRST "%s\n", strerror(errno));    \
+    exit(1);                                                           \
+                                                                       \
   } while (0)
 
 /* Die with FATAL() or PFATAL() depending on the value of res (used to
@@ -275,22 +342,34 @@
                            \
   } while (0)
 
+/* Show a prefixed debug output. */
+
+#define DEBUGF(x...)                                    \
+  do {                                                  \
+                                                        \
+    fprintf(stderr, cMGN "[D] " cBRI "DEBUG: " cRST x); \
+    fprintf(stderr, cRST "");                           \
+                                                        \
+  } while (0)
+
 /* Error-checking versions of read() and write() that call RPFATAL() as
    appropriate. */
 
-#define ck_write(fd, buf, len, fn)                            \
-  do {                                                        \
-                                                              \
-    u32 _len = (len);                                         \
-    s32 _res = write(fd, buf, _len);                          \
-    if (_res != _len) RPFATAL(_res, "Short write to %s", fn); \
-                                                              \
+#define ck_write(fd, buf, len, fn)                                        \
+  do {                                                                    \
+                                                                          \
+    int _fd = (fd);                                                       \
+                                                                          \
+    s32 _len = (s32)(len);                                                \
+    s32 _res = write(_fd, (buf), _len);                                   \
+    if (_res != _len) RPFATAL(_res, "Short write to %s, fd %d", fn, _fd); \
+                                                                          \
   } while (0)
 
 #define ck_read(fd, buf, len, fn)                              \
   do {                                                         \
                                                                \
-    u32 _len = (len);                                          \
+    s32 _len = (s32)(len);                                     \
     s32 _res = read(fd, buf, _len);                            \
     if (_res != _len) RPFATAL(_res, "Short read from %s", fn); \
                                                                \
diff --git a/include/envs.h b/include/envs.h
index c1c7d387..cfd73b68 100644
--- a/include/envs.h
+++ b/include/envs.h
@@ -6,6 +6,7 @@ static char *afl_environment_deprecated[] = {
 
     "AFL_LLVM_WHITELIST",
     "AFL_GCC_WHITELIST",
+    "AFL_DEBUG_CHILD_OUTPUT",
     "AFL_DEFER_FORKSRV",
     "AFL_POST_LIBRARY",
     "AFL_PERSISTENT",
@@ -27,32 +28,45 @@ static char *afl_environment_variables[] = {
     "AFL_CC",
     "AFL_CMIN_ALLOW_ANY",
     "AFL_CMIN_CRASHES_ONLY",
+    "AFL_CMPLOG_ONLY_NEW",
     "AFL_CODE_END",
     "AFL_CODE_START",
     "AFL_COMPCOV_BINNAME",
     "AFL_COMPCOV_LEVEL",
+    "AFL_CRASH_EXITCODE",
     "AFL_CUSTOM_MUTATOR_LIBRARY",
     "AFL_CUSTOM_MUTATOR_ONLY",
     "AFL_CXX",
     "AFL_CYCLE_SCHEDULES",
     "AFL_DEBUG",
-    "AFL_DEBUG_CHILD_OUTPUT",
+    "AFL_DEBUG_CHILD",
     "AFL_DEBUG_GDB",
     "AFL_DISABLE_TRIM",
     "AFL_DONT_OPTIMIZE",
+    "AFL_DRIVER_STDERR_DUPLICATE_FILENAME",
     "AFL_DUMB_FORKSRV",
     "AFL_ENTRYPOINT",
     "AFL_EXIT_WHEN_DONE",
     "AFL_FAST_CAL",
     "AFL_FORCE_UI",
+    "AFL_FUZZER_ARGS",  // oss-fuzz
+    "AFL_GDB",
+    "AFL_GCC_ALLOWLIST",
+    "AFL_GCC_DENYLIST",
+    "AFL_GCC_BLOCKLIST",
     "AFL_GCC_INSTRUMENT_FILE",
+    "AFL_GCC_OUT_OF_LINE",
+    "AFL_GCC_SKIP_NEVERZERO",
     "AFL_GCJ",
     "AFL_HANG_TMOUT",
+    "AFL_FORKSRV_INIT_TMOUT",
     "AFL_HARDEN",
     "AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES",
+    "AFL_IGNORE_UNKNOWN_ENVS",
     "AFL_IMPORT_FIRST",
     "AFL_INST_LIBS",
     "AFL_INST_RATIO",
+    "AFL_KILL_SIGNAL",
     "AFL_KEEP_TRACES",
     "AFL_KEEP_ASSEMBLY",
     "AFL_LD_HARD_FAIL",
@@ -62,11 +76,18 @@ static char *afl_environment_variables[] = {
     "AFL_REAL_LD",
     "AFL_LD_PRELOAD",
     "AFL_LD_VERBOSE",
+    "AFL_LLVM_ALLOWLIST",
+    "AFL_LLVM_DENYLIST",
+    "AFL_LLVM_BLOCKLIST",
     "AFL_LLVM_CMPLOG",
     "AFL_LLVM_INSTRIM",
+    "AFL_LLVM_CALLER",
     "AFL_LLVM_CTX",
-    "AFL_LLVM_INSTRUMENT",
+    "AFL_LLVM_CTX_K",
+    "AFL_LLVM_DICT2FILE",
+    "AFL_LLVM_DOCUMENT_IDS",
     "AFL_LLVM_INSTRIM_LOOPHEAD",
+    "AFL_LLVM_INSTRUMENT",
     "AFL_LLVM_LTO_AUTODICTIONARY",
     "AFL_LLVM_AUTODICTIONARY",
     "AFL_LLVM_SKIPSINGLEBLOCK",
@@ -88,30 +109,46 @@ static char *afl_environment_variables[] = {
     "AFL_LLVM_LTO_STARTID",
     "AFL_LLVM_LTO_DONTWRITEID",
     "AFL_NO_ARITH",
+    "AFL_NO_AUTODICT",
     "AFL_NO_BUILTIN",
+#if defined USE_COLOR && !defined ALWAYS_COLORED
+    "AFL_NO_COLOR",
+    "AFL_NO_COLOUR",
+#endif
     "AFL_NO_CPU_RED",
     "AFL_NO_FORKSRV",
     "AFL_NO_UI",
     "AFL_NO_PYTHON",
     "AFL_UNTRACER_FILE",
     "AFL_LLVM_USE_TRACE_PC",
-    "AFL_NO_X86",  // not really an env but we dont want to warn on it
     "AFL_MAP_SIZE",
     "AFL_MAPSIZE",
+    "AFL_MAX_DET_EXTRAS",
+    "AFL_NO_X86",  // not really an env but we dont want to warn on it
+    "AFL_NOOPT",
+    "AFL_PASSTHROUGH",
     "AFL_PATH",
     "AFL_PERFORMANCE_FILE",
     "AFL_PRELOAD",
     "AFL_PYTHON_MODULE",
+    "AFL_QEMU_CUSTOM_BIN",
     "AFL_QEMU_COMPCOV",
     "AFL_QEMU_COMPCOV_DEBUG",
     "AFL_QEMU_DEBUG_MAPS",
     "AFL_QEMU_DISABLE_CACHE",
+    "AFL_QEMU_DRIVER_NO_HOOK",
+    "AFL_QEMU_FORCE_DFL",
     "AFL_QEMU_PERSISTENT_ADDR",
     "AFL_QEMU_PERSISTENT_CNT",
     "AFL_QEMU_PERSISTENT_GPR",
     "AFL_QEMU_PERSISTENT_HOOK",
+    "AFL_QEMU_PERSISTENT_MEM",
     "AFL_QEMU_PERSISTENT_RET",
     "AFL_QEMU_PERSISTENT_RETADDR_OFFSET",
+    "AFL_QEMU_PERSISTENT_EXITS",
+    "AFL_QEMU_INST_RANGES",
+    "AFL_QEMU_EXCLUDE_RANGES",
+    "AFL_QEMU_SNAPSHOT",
     "AFL_QUIET",
     "AFL_RANDOM_ALLOC_CANARY",
     "AFL_REAL_PATH",
@@ -119,6 +156,12 @@ static char *afl_environment_variables[] = {
     "AFL_SKIP_BIN_CHECK",
     "AFL_SKIP_CPUFREQ",
     "AFL_SKIP_CRASHES",
+    "AFL_STATSD",
+    "AFL_STATSD_HOST",
+    "AFL_STATSD_PORT",
+    "AFL_STATSD_TAGS_FLAVOR",
+    "AFL_TESTCACHE_SIZE",
+    "AFL_TESTCACHE_ENTRIES",
     "AFL_TMIN_EXACT",
     "AFL_TMPDIR",
     "AFL_TOKEN_FILE",
@@ -131,6 +174,7 @@ static char *afl_environment_variables[] = {
     "AFL_WINE_PATH",
     "AFL_NO_SNAPSHOT",
     "AFL_EXPAND_HAVOC_NOW",
+    "AFL_USE_QASAN",
     NULL
 
 };
diff --git a/include/forkserver.h b/include/forkserver.h
index 717493db..ac027f81 100644
--- a/include/forkserver.h
+++ b/include/forkserver.h
@@ -37,9 +37,7 @@ typedef struct afl_forkserver {
 
   /* a program that includes afl-forkserver needs to define these */
 
-  u8  uses_asan;                        /* Target uses ASAN?                */
   u8 *trace_bits;                       /* SHM with instrumentation bitmap  */
-  u8  use_stdin;                        /* use stdin for sending data       */
 
   s32 fsrv_pid,                         /* PID of the fork server           */
       child_pid,                        /* PID of the fuzzed program        */
@@ -53,9 +51,8 @@ typedef struct afl_forkserver {
       fsrv_ctl_fd,                      /* Fork server control pipe (write) */
       fsrv_st_fd;                       /* Fork server status pipe (read)   */
 
-  u8 no_unlink;                         /* do not unlink cur_input          */
-
   u32 exec_tmout;                       /* Configurable exec timeout (ms)   */
+  u32 init_tmout;                       /* Configurable init timeout (ms)   */
   u32 map_size;                         /* map size used by the target      */
   u32 snapshot;                         /* is snapshot feature used         */
   u64 mem_limit;                        /* Memory cap for child (MB)        */
@@ -67,18 +64,29 @@ typedef struct afl_forkserver {
 
   FILE *plot_file;                      /* Gnuplot output file              */
 
-  /* Note: lat_run_timed_out is u32 to send it to the child as 4 byte array */
+  /* Note: last_run_timed_out is u32 to send it to the child as 4 byte array */
   u32 last_run_timed_out;               /* Traced process timed out?        */
 
   u8 last_kill_signal;                  /* Signal that killed the child     */
 
-  u8 use_shmem_fuzz;                    /* use shared mem for test cases    */
+  bool use_shmem_fuzz;                  /* use shared mem for test cases    */
+
+  bool support_shmem_fuzz;              /* set by afl-fuzz                  */
+
+  bool use_fauxsrv;                     /* Fauxsrv for non-forking targets? */
+
+  bool qemu_mode;                       /* if running in qemu mode or not   */
 
-  u8 support_shmem_fuzz;                /* set by afl-fuzz                  */
+  bool use_stdin;                       /* use stdin for sending data       */
 
-  u8 use_fauxsrv;                       /* Fauxsrv for non-forking targets? */
+  bool no_unlink;                       /* do not unlink cur_input          */
 
-  u8 qemu_mode;                         /* if running in qemu mode or not   */
+  bool uses_asan;                       /* Target uses ASAN?                */
+
+  bool debug;                           /* debug mode?                      */
+
+  bool uses_crash_exitcode;             /* Custom crash exitcode specified? */
+  u8   crash_exitcode;                  /* The crash exitcode specified     */
 
   u32 *shmem_fuzz_len;                  /* length of the fuzzing test case  */
 
@@ -89,9 +97,11 @@ typedef struct afl_forkserver {
   /* Function to kick off the forkserver child */
   void (*init_child_func)(struct afl_forkserver *fsrv, char **argv);
 
-  u8 *function_opt;                     /* for autodictionary: afl ptr      */
+  u8 *afl_ptr;                          /* for autodictionary: afl ptr      */
+
+  void (*add_extra_func)(void *afl_ptr, u8 *mem, u32 len);
 
-  void (*function_ptr)(void *afl_tmp, u8 *mem, u32 len);
+  u8 kill_signal;
 
 } afl_forkserver_t;
 
@@ -110,11 +120,14 @@ void afl_fsrv_init(afl_forkserver_t *fsrv);
 void afl_fsrv_init_dup(afl_forkserver_t *fsrv_to, afl_forkserver_t *from);
 void afl_fsrv_start(afl_forkserver_t *fsrv, char **argv,
                     volatile u8 *stop_soon_p, u8 debug_child_output);
+u32  afl_fsrv_get_mapsize(afl_forkserver_t *fsrv, char **argv,
+                          volatile u8 *stop_soon_p, u8 debug_child_output);
 void afl_fsrv_write_to_testcase(afl_forkserver_t *fsrv, u8 *buf, size_t len);
 fsrv_run_result_t afl_fsrv_run_target(afl_forkserver_t *fsrv, u32 timeout,
                                       volatile u8 *stop_soon_p);
 void              afl_fsrv_killall(void);
 void              afl_fsrv_deinit(afl_forkserver_t *fsrv);
+void              afl_fsrv_kill(afl_forkserver_t *fsrv);
 
 #ifdef __APPLE__
   #define MSG_FORK_ON_APPLE                                                    \
diff --git a/include/list.h b/include/list.h
index 88cbe062..7ec81cbe 100644
--- a/include/list.h
+++ b/include/list.h
@@ -81,6 +81,7 @@ static inline void list_append(list_t *list, void *el) {
   }
 
   element_t *el_box = NULL;
+
   PRE_ALLOC(el_box, list->element_prealloc_buf, LIST_PREALLOC_SIZE,
             list->element_prealloc_count);
   if (!el_box) { FATAL("failed to allocate list element"); }
diff --git a/include/sharedmem.h b/include/sharedmem.h
index b15d0535..fdc947f9 100644
--- a/include/sharedmem.h
+++ b/include/sharedmem.h
@@ -51,6 +51,7 @@ typedef struct sharedmem {
   size_t map_size;                                 /* actual allocated size */
 
   int             cmplog_mode;
+  int             shmemfuzz_mode;
   struct cmp_map *cmp_map;
 
 } sharedmem_t;
diff --git a/include/snapshot-inl.h b/include/snapshot-inl.h
index a75d69c0..a18187ef 100644
--- a/include/snapshot-inl.h
+++ b/include/snapshot-inl.h
@@ -66,7 +66,7 @@ struct afl_snapshot_vmrange_args {
 
 static int afl_snapshot_dev_fd;
 
-static int afl_snapshot_init() {
+static int afl_snapshot_init(void) {
 
   afl_snapshot_dev_fd = open(AFL_SNAPSHOT_FILE_NAME, 0);
   return afl_snapshot_dev_fd;
diff --git a/include/types.h b/include/types.h
index 39f599a0..7b94fb83 100644
--- a/include/types.h
+++ b/include/types.h
@@ -25,10 +25,15 @@
 
 #include <stdint.h>
 #include <stdlib.h>
+#include "config.h"
 
 typedef uint8_t  u8;
 typedef uint16_t u16;
 typedef uint32_t u32;
+#ifdef WORD_SIZE_64
+typedef unsigned __int128 uint128_t;
+typedef uint128_t         u128;
+#endif
 
 /* Extended forkserver option values */
 
@@ -50,7 +55,7 @@ typedef uint32_t u32;
 #define FS_OPT_SHDMEM_FUZZ 0x01000000
 #define FS_OPT_OLD_AFLPP_WORKAROUND 0x0f000000
 // FS_OPT_MAX_MAPSIZE is 8388608 = 0x800000 = 2^23 = 1 << 22
-#define FS_OPT_MAX_MAPSIZE ((0x00fffffe >> 1) + 1)
+#define FS_OPT_MAX_MAPSIZE ((0x00fffffeU >> 1) + 1)
 #define FS_OPT_GET_MAPSIZE(x) (((x & 0x00fffffe) >> 1) + 1)
 #define FS_OPT_SET_MAPSIZE(x) \
   (x <= 1 || x > FS_OPT_MAX_MAPSIZE ? 0 : ((x - 1) << 1))
@@ -61,6 +66,10 @@ typedef int8_t  s8;
 typedef int16_t s16;
 typedef int32_t s32;
 typedef int64_t s64;
+#ifdef WORD_SIZE_64
+typedef __int128 int128_t;
+typedef int128_t s128;
+#endif
 
 #ifndef MIN
   #define MIN(a, b)           \
@@ -114,6 +123,33 @@ typedef int64_t s64;
                                                                                \
   })
 
+// It is impossible to define 128 bit constants, so ...
+#ifdef WORD_SIZE_64
+  #define SWAPN(_x, _l)                            \
+    ({                                             \
+                                                   \
+      u128  _res = (_x), _ret;                     \
+      char *d = (char *)&_ret, *s = (char *)&_res; \
+      int   i;                                     \
+      for (i = 0; i < 16; i++)                     \
+        d[15 - i] = s[i];                          \
+      u32 sr = 128U - ((_l) << 3U);                \
+      (_ret >>= sr);                               \
+      (u128) _ret;                                 \
+                                                   \
+    })
+#endif
+
+#define SWAPNN(_x, _y, _l)                     \
+  ({                                           \
+                                               \
+    char *d = (char *)(_x), *s = (char *)(_y); \
+    u32   i, l = (_l)-1;                       \
+    for (i = 0; i <= l; i++)                   \
+      d[l - i] = s[i];                         \
+                                               \
+  })
+
 #ifdef AFL_LLVM_PASS
   #if defined(__linux__) || !defined(__ANDROID__)
     #define AFL_SR(s) (srandom(s))
diff --git a/include/xxh3.h b/include/xxh3.h
deleted file mode 100644
index 2354bde9..00000000
--- a/include/xxh3.h
+++ /dev/null
@@ -1,3187 +0,0 @@
-/*
- * xxHash - Extremely Fast Hash algorithm
- * Development source file for `xxh3`
- * Copyright (C) 2019-2020 Yann Collet
- *
- * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *    * Redistributions of source code must retain the above copyright
- *      notice, this list of conditions and the following disclaimer.
- *    * Redistributions in binary form must reproduce the above
- *      copyright notice, this list of conditions and the following disclaimer
- *      in the documentation and/or other materials provided with the
- *      distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You can contact the author at:
- *   - xxHash homepage: https://www.xxhash.com
- *   - xxHash source repository: https://github.com/Cyan4973/xxHash
- */
-
-/*
- * Note: This file is separated for development purposes.
- * It will be integrated into `xxhash.h` when development stage is completed.
- *
- * Credit: most of the work on vectorial and asm variants comes from
- * @easyaspi314
- */
-
-#ifndef XXH3_H_1397135465
-#define XXH3_H_1397135465
-
-/* ===   Dependencies   === */
-#ifndef XXHASH_H_5627135585666179
-  /* special: when including `xxh3.h` directly, turn on XXH_INLINE_ALL */
-  #undef XXH_INLINE_ALL                               /* avoid redefinition */
-  #define XXH_INLINE_ALL
-#endif
-#include "xxhash.h"
-
-/* ===   Compiler specifics   === */
-
-#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L      /* >= C99 */
-  #define XXH_RESTRICT restrict
-#else
-  /* Note: it might be useful to define __restrict or __restrict__ for some C++
-   * compilers */
-  #define XXH_RESTRICT                                           /* disable */
-#endif
-
-#if (defined(__GNUC__) && (__GNUC__ >= 3)) ||                   \
-    (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || \
-    defined(__clang__)
-  #define XXH_likely(x) __builtin_expect(x, 1)
-  #define XXH_unlikely(x) __builtin_expect(x, 0)
-#else
-  #define XXH_likely(x) (x)
-  #define XXH_unlikely(x) (x)
-#endif
-
-#if defined(__GNUC__)
-  #if defined(__AVX2__)
-    #include <immintrin.h>
-  #elif defined(__SSE2__)
-    #include <emmintrin.h>
-  #elif defined(__ARM_NEON__) || defined(__ARM_NEON)
-    #define inline __inline__                                  /* clang bug */
-    #include <arm_neon.h>
-    #undef inline
-  #endif
-#elif defined(_MSC_VER)
-  #include <intrin.h>
-#endif
-
-/*
- * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
- * remaining a true 64-bit/128-bit hash function.
- *
- * This is done by prioritizing a subset of 64-bit operations that can be
- * emulated without too many steps on the average 32-bit machine.
- *
- * For example, these two lines seem similar, and run equally fast on 64-bit:
- *
- *   xxh_u64 x;
- *   x ^= (x >> 47); // good
- *   x ^= (x >> 13); // bad
- *
- * However, to a 32-bit machine, there is a major difference.
- *
- * x ^= (x >> 47) looks like this:
- *
- *   x.lo ^= (x.hi >> (47 - 32));
- *
- * while x ^= (x >> 13) looks like this:
- *
- *   // note: funnel shifts are not usually cheap.
- *   x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
- *   x.hi ^= (x.hi >> 13);
- *
- * The first one is significantly faster than the second, simply because the
- * shift is larger than 32. This means:
- *  - All the bits we need are in the upper 32 bits, so we can ignore the lower
- *    32 bits in the shift.
- *  - The shift result will always fit in the lower 32 bits, and therefore,
- *    we can ignore the upper 32 bits in the xor.
- *
- * Thanks to this optimization, XXH3 only requires these features to be
- * efficient:
- *
- *  - Usable unaligned access
- *  - A 32-bit or 64-bit ALU
- *      - If 32-bit, a decent ADC instruction
- *  - A 32 or 64-bit multiply with a 64-bit result
- *  - For the 128-bit variant, a decent byteswap helps short inputs.
- *
- * The first two are already required by XXH32, and almost all 32-bit and 64-bit
- * platforms which can run XXH32 can run XXH3 efficiently.
- *
- * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
- * notable exception.
- *
- * First of all, Thumb-1 lacks support for the UMULL instruction which
- * performs the important long multiply. This means numerous __aeabi_lmul
- * calls.
- *
- * Second of all, the 8 functional registers are just not enough.
- * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need
- * Lo registers, and this shuffling results in thousands more MOVs than A32.
- *
- * A32 and T32 don't have this limitation. They can access all 14 registers,
- * do a 32->64 multiply with UMULL, and the flexible operand allowing free
- * shifts is helpful, too.
- *
- * Therefore, we do a quick sanity check.
- *
- * If compiling Thumb-1 for a target which supports ARM instructions, we will
- * emit a warning, as it is not a "sane" platform to compile for.
- *
- * Usually, if this happens, it is because of an accident and you probably need
- * to specify -march, as you likely meant to compile for a newer architecture.
- */
-#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
-  #warning "XXH3 is highly inefficient without ARM or Thumb-2."
-#endif
-
-/* ==========================================
- * Vectorization detection
- * ========================================== */
-#define XXH_SCALAR 0                             /* Portable scalar version */
-#define XXH_SSE2 1                     /* SSE2 for Pentium 4 and all x86_64 */
-#define XXH_AVX2 2                        /* AVX2 for Haswell and Bulldozer */
-#define XXH_AVX512 3                      /* AVX512 for Skylake and Icelake */
-#define XXH_NEON 4                 /* NEON for most ARMv7-A and all AArch64 */
-#define XXH_VSX 5                         /* VSX and ZVector for POWER8/z13 */
-
-#ifndef XXH_VECTOR                        /* can be defined on command line */
-  #if defined(__AVX512F__)
-    #define XXH_VECTOR XXH_AVX512
-  #elif defined(__AVX2__)
-    #define XXH_VECTOR XXH_AVX2
-  #elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || \
-      (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
-    #define XXH_VECTOR XXH_SSE2
-  #elif defined(__GNUC__) /* msvc support maybe later */                   \
-      && (defined(__ARM_NEON__) || defined(__ARM_NEON)) &&                 \
-      (defined(__LITTLE_ENDIAN__) /* We only support little endian NEON */ \
-       ||                                                                  \
-       (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
-    #define XXH_VECTOR XXH_NEON
-  #elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) || \
-      (defined(__s390x__) && defined(__VEC__)) &&             \
-          defined(__GNUC__)                                 /* TODO: IBM XL */
-    #define XXH_VECTOR XXH_VSX
-  #else
-    #define XXH_VECTOR XXH_SCALAR
-  #endif
-#endif
-
-/*
- * Controls the alignment of the accumulator,
- * for compatibility with aligned vector loads, which are usually faster.
- */
-#ifndef XXH_ACC_ALIGN
-  #if defined(XXH_X86DISPATCH)
-    #define XXH_ACC_ALIGN 64               /* for compatibility with avx512 */
-  #elif XXH_VECTOR == XXH_SCALAR                                  /* scalar */
-    #define XXH_ACC_ALIGN 8
-  #elif XXH_VECTOR == XXH_SSE2                                      /* sse2 */
-    #define XXH_ACC_ALIGN 16
-  #elif XXH_VECTOR == XXH_AVX2                                      /* avx2 */
-    #define XXH_ACC_ALIGN 32
-  #elif XXH_VECTOR == XXH_NEON                                      /* neon */
-    #define XXH_ACC_ALIGN 16
-  #elif XXH_VECTOR == XXH_VSX                                        /* vsx */
-    #define XXH_ACC_ALIGN 16
-  #elif XXH_VECTOR == XXH_AVX512                                  /* avx512 */
-    #define XXH_ACC_ALIGN 64
-  #endif
-#endif
-
-#if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 || \
-    XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
-  #define XXH_SEC_ALIGN XXH_ACC_ALIGN
-#else
-  #define XXH_SEC_ALIGN 8
-#endif
-
-/*
- * UGLY HACK:
- * GCC usually generates the best code with -O3 for xxHash.
- *
- * However, when targeting AVX2, it is overzealous in its unrolling resulting
- * in code roughly 3/4 the speed of Clang.
- *
- * There are other issues, such as GCC splitting _mm256_loadu_si256 into
- * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
- * only applies to Sandy and Ivy Bridge... which don't even support AVX2.
- *
- * That is why when compiling the AVX2 version, it is recommended to use either
- *   -O2 -mavx2 -march=haswell
- * or
- *   -O2 -mavx2 -mno-avx256-split-unaligned-load
- * for decent performance, or to use Clang instead.
- *
- * Fortunately, we can control the first one with a pragma that forces GCC into
- * -O2, but the other one we can't control without "failed to inline always
- * inline function due to target mismatch" warnings.
- */
-#if XXH_VECTOR == XXH_AVX2                      /* AVX2 */           \
-    && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
-    && defined(__OPTIMIZE__) &&                                      \
-    !defined(__OPTIMIZE_SIZE__)                      /* respect -O0 and -Os */
-  #pragma GCC push_options
-  #pragma GCC optimize("-O2")
-#endif
-
-#if XXH_VECTOR == XXH_NEON
-  /*
-   * NEON's setup for vmlal_u32 is a little more complicated than it is on
-   * SSE2, AVX2, and VSX.
-   *
-   * While PMULUDQ and VMULEUW both perform a mask, VMLAL.U32 performs an
-   * upcast.
-   *
-   * To do the same operation, the 128-bit 'Q' register needs to be split into
-   * two 64-bit 'D' registers, performing this operation::
-   *
-   *   [                a                 |                 b                ]
-   *            |              '---------. .--------'                |
-   *            |                         x                          |
-   *            |              .---------' '--------.                |
-   *   [ a & 0xFFFFFFFF | b & 0xFFFFFFFF ],[    a >> 32     |     b >> 32    ]
-   *
-   * Due to significant changes in aarch64, the fastest method for aarch64 is
-   * completely different than the fastest method for ARMv7-A.
-   *
-   * ARMv7-A treats D registers as unions overlaying Q registers, so modifying
-   * D11 will modify the high half of Q5. This is similar to how modifying AH
-   * will only affect bits 8-15 of AX on x86.
-   *
-   * VZIP takes two registers, and puts even lanes in one register and odd lanes
-   * in the other.
-   *
-   * On ARMv7-A, this strangely modifies both parameters in place instead of
-   * taking the usual 3-operand form.
-   *
-   * Therefore, if we want to do this, we can simply use a D-form VZIP.32 on the
-   * lower and upper halves of the Q register to end up with the high and low
-   * halves where we want - all in one instruction.
-   *
-   *   vzip.32   d10, d11       @ d10 = { d10[0], d11[0] }; d11 = { d10[1],
-   * d11[1] }
-   *
-   * Unfortunately we need inline assembly for this: Instructions modifying two
-   * registers at once is not possible in GCC or Clang's IR, and they have to
-   * create a copy.
-   *
-   * aarch64 requires a different approach.
-   *
-   * In order to make it easier to write a decent compiler for aarch64, many
-   * quirks were removed, such as conditional execution.
-   *
-   * NEON was also affected by this.
-   *
-   * aarch64 cannot access the high bits of a Q-form register, and writes to a
-   * D-form register zero the high bits, similar to how writes to W-form scalar
-   * registers (or DWORD registers on x86_64) work.
-   *
-   * The formerly free vget_high intrinsics now require a vext (with a few
-   * exceptions)
-   *
-   * Additionally, VZIP was replaced by ZIP1 and ZIP2, which are the equivalent
-   * of PUNPCKL* and PUNPCKH* in SSE, respectively, in order to only modify one
-   * operand.
-   *
-   * The equivalent of the VZIP.32 on the lower and upper halves would be this
-   * mess:
-   *
-   *   ext     v2.4s, v0.4s, v0.4s, #2 // v2 = { v0[2], v0[3], v0[0], v0[1] }
-   *   zip1    v1.2s, v0.2s, v2.2s     // v1 = { v0[0], v2[0] }
-   *   zip2    v0.2s, v0.2s, v1.2s     // v0 = { v0[1], v2[1] }
-   *
-   * Instead, we use a literal downcast, vmovn_u64 (XTN), and vshrn_n_u64
-   * (SHRN):
-   *
-   *   shrn    v1.2s, v0.2d, #32  // v1 = (uint32x2_t)(v0 >> 32);
-   *   xtn     v0.2s, v0.2d       // v0 = (uint32x2_t)(v0 & 0xFFFFFFFF);
-   *
-   * This is available on ARMv7-A, but is less efficient than a single VZIP.32.
-   */
-
-  /*
-   * Function-like macro:
-   * void XXH_SPLIT_IN_PLACE(uint64x2_t &in, uint32x2_t &outLo, uint32x2_t
-   * &outHi)
-   * {
-
-   *     outLo = (uint32x2_t)(in & 0xFFFFFFFF);
-   *     outHi = (uint32x2_t)(in >> 32);
-   *     in = UNDEFINED;
-   * }
-   */
-  #if !defined(XXH_NO_VZIP_HACK) /* define to disable */ \
-      && defined(__GNUC__) && !defined(__aarch64__) && !defined(__arm64__)
-    #define XXH_SPLIT_IN_PLACE(in, outLo, outHi)                                                   \
-      do {                                                                                         \
-                                                                                                   \
-        /* Undocumented GCC/Clang operand modifier: %e0 = lower D half, %f0 =                      \
-         * upper D half */                                                                         \
-        /* https://github.com/gcc-mirror/gcc/blob/38cf91e5/gcc/config/arm/arm.c#L22486             \
-         */                                                                                        \
-        /* https://github.com/llvm-mirror/llvm/blob/2c4ca683/lib/Target/ARM/ARMAsmPrinter.cpp#L399 \
-         */                                                                                        \
-        __asm__("vzip.32  %e0, %f0" : "+w"(in));                                                   \
-        (outLo) = vget_low_u32(vreinterpretq_u32_u64(in));                                         \
-        (outHi) = vget_high_u32(vreinterpretq_u32_u64(in));                                        \
-                                                                                                   \
-      } while (0)
-
-  #else
-    #define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
-      do {                                       \
-                                                 \
-        (outLo) = vmovn_u64(in);                 \
-        (outHi) = vshrn_n_u64((in), 32);         \
-                                                 \
-      } while (0)
-
-  #endif
-#endif                                            /* XXH_VECTOR == XXH_NEON */
-
-/*
- * VSX and Z Vector helpers.
- *
- * This is very messy, and any pull requests to clean this up are welcome.
- *
- * There are a lot of problems with supporting VSX and s390x, due to
- * inconsistent intrinsics, spotty coverage, and multiple endiannesses.
- */
-#if XXH_VECTOR == XXH_VSX
-  #if defined(__s390x__)
-    #include <s390intrin.h>
-  #else
-    #include <altivec.h>
-  #endif
-
-  #undef vector                                       /* Undo the pollution */
-
-typedef __vector unsigned long long xxh_u64x2;
-typedef __vector unsigned char      xxh_u8x16;
-typedef __vector unsigned           xxh_u32x4;
-
-  #ifndef XXH_VSX_BE
-    #if defined(__BIG_ENDIAN__) || \
-        (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
-      #define XXH_VSX_BE 1
-    #elif defined(__VEC_ELEMENT_REG_ORDER__) && \
-        __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
-      #warning "-maltivec=be is not recommended. Please use native endianness."
-      #define XXH_VSX_BE 1
-    #else
-      #define XXH_VSX_BE 0
-    #endif
-  #endif                                            /* !defined(XXH_VSX_BE) */
-
-  #if XXH_VSX_BE
-    /* A wrapper for POWER9's vec_revb. */
-    #if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
-      #define XXH_vec_revb vec_revb
-    #else
-XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val) {
-
-  xxh_u8x16 const vByteSwap = {0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
-                               0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08};
-  return vec_perm(val, val, vByteSwap);
-
-}
-
-    #endif
-  #endif                                                      /* XXH_VSX_BE */
-
-/*
- * Performs an unaligned load and byte swaps it on big endian.
- */
-XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr) {
-
-  xxh_u64x2 ret;
-  memcpy(&ret, ptr, sizeof(xxh_u64x2));
-  #if XXH_VSX_BE
-  ret = XXH_vec_revb(ret);
-  #endif
-  return ret;
-
-}
-
-  /*
-   * vec_mulo and vec_mule are very problematic intrinsics on PowerPC
-   *
-   * These intrinsics weren't added until GCC 8, despite existing for a while,
-   * and they are endian dependent. Also, their meaning swap depending on
-   * version.
-   * */
-  #if defined(__s390x__)
-    /* s390x is always big endian, no issue on this platform */
-    #define XXH_vec_mulo vec_mulo
-    #define XXH_vec_mule vec_mule
-  #elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw)
-    /* Clang has a better way to control this, we can just use the builtin which
-     * doesn't swap. */
-    #define XXH_vec_mulo __builtin_altivec_vmulouw
-    #define XXH_vec_mule __builtin_altivec_vmuleuw
-  #else
-/* gcc needs inline assembly */
-/* Adapted from
- * https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
-XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b) {
-
-  xxh_u64x2 result;
-  __asm__("vmulouw %0, %1, %2" : "=v"(result) : "v"(a), "v"(b));
-  return result;
-
-}
-
-XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) {
-
-  xxh_u64x2 result;
-  __asm__("vmuleuw %0, %1, %2" : "=v"(result) : "v"(a), "v"(b));
-  return result;
-
-}
-
-  #endif                                      /* XXH_vec_mulo, XXH_vec_mule */
-#endif                                             /* XXH_VECTOR == XXH_VSX */
-
-/* prefetch
- * can be disabled, by declaring XXH_NO_PREFETCH build macro */
-#if defined(XXH_NO_PREFETCH)
-  #define XXH_PREFETCH(ptr) (void)(ptr)                         /* disabled */
-#else
-  #if defined(_MSC_VER) && \
-      (defined(_M_X64) ||  \
-       defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */
-    #include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
-    #define XXH_PREFETCH(ptr) _mm_prefetch((const char *)(ptr), _MM_HINT_T0)
-  #elif defined(__GNUC__) && \
-      ((__GNUC__ >= 4) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1)))
-    #define XXH_PREFETCH(ptr) \
-      __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
-  #else
-    #define XXH_PREFETCH(ptr) (void)(ptr)                       /* disabled */
-  #endif
-#endif                                                   /* XXH_NO_PREFETCH */
-
-/* ==========================================
- * XXH3 default settings
- * ========================================== */
-
-#define XXH_SECRET_DEFAULT_SIZE 192         /* minimum XXH3_SECRET_SIZE_MIN */
-
-#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
-  #error "default keyset is not large enough"
-#endif
-
-/* Pseudorandom secret taken directly from FARSH */
-XXH_ALIGN(64)
-static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
-
-    0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c,
-    0xf7, 0x21, 0xad, 0x1c, 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb,
-    0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f, 0xcb, 0x79, 0xe6, 0x4e,
-    0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
-    0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6,
-    0x81, 0x3a, 0x26, 0x4c, 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb,
-    0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3, 0x71, 0x64, 0x48, 0x97,
-    0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
-    0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7,
-    0xc7, 0x0b, 0x4f, 0x1d, 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31,
-    0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
-
-    0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff,
-    0xfa, 0x13, 0x63, 0xeb, 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49,
-    0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e, 0x2b, 0x16, 0xbe, 0x58,
-    0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
-    0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca,
-    0xbb, 0x4b, 0x40, 0x7e,
-
-};
-
-#ifdef XXH_OLD_NAMES
-  #define kSecret XXH3_kSecret
-#endif
-
-/*
- * Calculates a 32-bit to 64-bit long multiply.
- *
- * Wraps __emulu on MSVC x86 because it tends to call __allmul when it doesn't
- * need to (but it shouldn't need to anyways, it is about 7 instructions to do
- * a 64x64 multiply...). Since we know that this will _always_ emit MULL, we
- * use that instead of the normal method.
- *
- * If you are compiling for platforms like Thumb-1 and don't have a better
- * option, you may also want to write your own long multiply routine here.
- *
- * XXH_FORCE_INLINE xxh_u64 XXH_mult32to64(xxh_u64 x, xxh_u64 y)
- * {
-
- *    return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
- * }
- */
-#if defined(_MSC_VER) && defined(_M_IX86)
-  #include <intrin.h>
-  #define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
-#else
-  /*
-   * Downcast + upcast is usually better than masking on older compilers like
-   * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers.
-   *
-   * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both
-   * operands and perform a full 64x64 multiply -- entirely redundant on 32-bit.
-   */
-  #define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
-#endif
-
-/*
- * Calculates a 64->128-bit long multiply.
- *
- * Uses __uint128_t and _umul128 if available, otherwise uses a scalar version.
- */
-static XXH128_hash_t XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) {
-
-  /*
-   * GCC/Clang __uint128_t method.
-   *
-   * On most 64-bit targets, GCC and Clang define a __uint128_t type.
-   * This is usually the best way as it usually uses a native long 64-bit
-   * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
-   *
-   * Usually.
-   *
-   * Despite being a 32-bit platform, Clang (and emscripten) define this type
-   * despite not having the arithmetic for it. This results in a laggy
-   * compiler builtin call which calculates a full 128-bit multiply.
-   * In that case it is best to use the portable one.
-   * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
-   */
-#if defined(__GNUC__) && !defined(__wasm__) && defined(__SIZEOF_INT128__) || \
-    (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
-
-  __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
-  XXH128_hash_t     r128;
-  r128.low64 = (xxh_u64)(product);
-  r128.high64 = (xxh_u64)(product >> 64);
-  return r128;
-
-    /*
-     * MSVC for x64's _umul128 method.
-     *
-     * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64
-     * *HighProduct);
-     *
-     * This compiles to single operand MUL on x64.
-     */
-#elif defined(_M_X64) || defined(_M_IA64)
-
-  #ifndef _MSC_VER
-    #pragma intrinsic(_umul128)
-  #endif
-  xxh_u64       product_high;
-  xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
-  XXH128_hash_t r128;
-  r128.low64 = product_low;
-  r128.high64 = product_high;
-  return r128;
-
-#else
-  /*
-   * Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
-   *
-   * This is a fast and simple grade school multiply, which is shown below
-   * with base 10 arithmetic instead of base 0x100000000.
-   *
-   *           9 3 // D2 lhs = 93
-   *         x 7 5 // D2 rhs = 75
-   *     ----------
-   *           1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
-   *         4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
-   *         2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
-   *     + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
-   *     ---------
-   *         2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
-   *     + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
-   *     ---------
-   *       6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
-   *
-   * The reasons for adding the products like this are:
-   *  1. It avoids manual carry tracking. Just like how
-   *     (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
-   *     This avoids a lot of complexity.
-   *
-   *  2. It hints for, and on Clang, compiles to, the powerful UMAAL
-   *     instruction available in ARM's Digital Signal Processing extension
-   *     in 32-bit ARMv6 and later, which is shown below:
-   *
-   *         void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
-   *         {
-
-   *             xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
-   *             *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
-   *             *RdHi = (xxh_u32)(product >> 32);
-   *         }
-   *
-   *     This instruction was designed for efficient long multiplication, and
-   *     allows this to be calculated in only 4 instructions at speeds
-   *     comparable to some 64-bit ALUs.
-   *
-   *  3. It isn't terrible on other platforms. Usually this will be a couple
-   *     of 32-bit ADD/ADCs.
-   */
-
-  /* First calculate all of the cross products. */
-  xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
-  xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
-  xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
-  xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
-
-  /* Now add the products together. These will never overflow. */
-  xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
-  xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
-  xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
-
-  XXH128_hash_t r128;
-  r128.low64 = lower;
-  r128.high64 = upper;
-  return r128;
-#endif
-
-}
-
-/*
- * Does a 64-bit to 128-bit multiply, then XOR folds it.
- *
- * The reason for the separate function is to prevent passing too many structs
- * around by value. This will hopefully inline the multiply, but we don't force
- * it.
- */
-static xxh_u64 XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs) {
-
-  XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
-  return product.low64 ^ product.high64;
-
-}
-
-/* Seems to produce slightly better code on GCC for some reason. */
-XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) {
-
-  XXH_ASSERT(0 <= shift && shift < 64);
-  return v64 ^ (v64 >> shift);
-
-}
-
-/*
- * We don't need to (or want to) mix as much as XXH64.
- *
- * Short hashes are more evenly distributed, so it isn't necessary.
- */
-static XXH64_hash_t XXH3_avalanche(xxh_u64 h64) {
-
-  h64 = XXH_xorshift64(h64, 37);
-  h64 *= 0x165667919E3779F9ULL;
-  h64 = XXH_xorshift64(h64, 32);
-  return h64;
-
-}
-
-/* ==========================================
- * Short keys
- * ==========================================
- * One of the shortcomings of XXH32 and XXH64 was that their performance was
- * sub-optimal on short lengths. It used an iterative algorithm which strongly
- * favored lengths that were a multiple of 4 or 8.
- *
- * Instead of iterating over individual inputs, we use a set of single shot
- * functions which piece together a range of lengths and operate in constant
- * time.
- *
- * Additionally, the number of multiplies has been significantly reduced. This
- * reduces latency, especially when emulating 64-bit multiplies on 32-bit.
- *
- * Depending on the platform, this may or may not be faster than XXH32, but it
- * is almost guaranteed to be faster than XXH64.
- */
-
-/*
- * At very short lengths, there isn't enough input to fully hide secrets, or use
- * the entire secret.
- *
- * There is also only a limited amount of mixing we can do before significantly
- * impacting performance.
- *
- * Therefore, we use different sections of the secret and always mix two secret
- * samples with an XOR. This should have no effect on performance on the
- * seedless or withSeed variants because everything _should_ be constant folded
- * by modern compilers.
- *
- * The XOR mixing hides individual parts of the secret and increases entropy.
- *
- * This adds an extra layer of strength for custom secrets.
- */
-XXH_FORCE_INLINE XXH64_hash_t XXH3_len_1to3_64b(const xxh_u8 *input, size_t len,
-                                                const xxh_u8 *secret,
-                                                XXH64_hash_t  seed) {
-
-  XXH_ASSERT(input != NULL);
-  XXH_ASSERT(1 <= len && len <= 3);
-  XXH_ASSERT(secret != NULL);
-  /*
-   * len = 1: combined = { input[0], 0x01, input[0], input[0] }
-   * len = 2: combined = { input[1], 0x02, input[0], input[1] }
-   * len = 3: combined = { input[2], 0x03, input[0], input[1] }
-   */
-  {
-
-    xxh_u8 const  c1 = input[0];
-    xxh_u8 const  c2 = input[len >> 1];
-    xxh_u8 const  c3 = input[len - 1];
-    xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24) |
-                             ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
-    xxh_u64 const bitflip =
-        (XXH_readLE32(secret) ^ XXH_readLE32(secret + 4)) + seed;
-    xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
-    xxh_u64 const mixed = keyed * XXH_PRIME64_1;
-    return XXH3_avalanche(mixed);
-
-  }
-
-}
-
-XXH_FORCE_INLINE XXH64_hash_t XXH3_len_4to8_64b(const xxh_u8 *input, size_t len,
-                                                const xxh_u8 *secret,
-                                                XXH64_hash_t  seed) {
-
-  XXH_ASSERT(input != NULL);
-  XXH_ASSERT(secret != NULL);
-  XXH_ASSERT(4 <= len && len < 8);
-  seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
-  {
-
-    xxh_u32 const input1 = XXH_readLE32(input);
-    xxh_u32 const input2 = XXH_readLE32(input + len - 4);
-    xxh_u64 const bitflip =
-        (XXH_readLE64(secret + 8) ^ XXH_readLE64(secret + 16)) - seed;
-    xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
-    xxh_u64       x = input64 ^ bitflip;
-    /* this mix is inspired by Pelle Evensen's rrmxmx */
-    x ^= XXH_rotl64(x, 49) ^ XXH_rotl64(x, 24);
-    x *= 0x9FB21C651E98DF25ULL;
-    x ^= (x >> 35) + len;
-    x *= 0x9FB21C651E98DF25ULL;
-    return XXH_xorshift64(x, 28);
-
-  }
-
-}
-
-XXH_FORCE_INLINE XXH64_hash_t XXH3_len_9to16_64b(const xxh_u8 *input,
-                                                 size_t        len,
-                                                 const xxh_u8 *secret,
-                                                 XXH64_hash_t  seed) {
-
-  XXH_ASSERT(input != NULL);
-  XXH_ASSERT(secret != NULL);
-  XXH_ASSERT(8 <= len && len <= 16);
-  {
-
-    xxh_u64 const bitflip1 =
-        (XXH_readLE64(secret + 24) ^ XXH_readLE64(secret + 32)) + seed;
-    xxh_u64 const bitflip2 =
-        (XXH_readLE64(secret + 40) ^ XXH_readLE64(secret + 48)) - seed;
-    xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1;
-    xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
-    xxh_u64 const acc = len + XXH_swap64(input_lo) + input_hi +
-                        XXH3_mul128_fold64(input_lo, input_hi);
-    return XXH3_avalanche(acc);
-
-  }
-
-}
-
-XXH_FORCE_INLINE XXH64_hash_t XXH3_len_0to16_64b(const xxh_u8 *input,
-                                                 size_t        len,
-                                                 const xxh_u8 *secret,
-                                                 XXH64_hash_t  seed) {
-
-  XXH_ASSERT(len <= 16);
-  {
-
-    if (XXH_likely(len > 8))
-      return XXH3_len_9to16_64b(input, len, secret, seed);
-    if (XXH_likely(len >= 4))
-      return XXH3_len_4to8_64b(input, len, secret, seed);
-    if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
-    return XXH3_avalanche((XXH_PRIME64_1 + seed) ^ (XXH_readLE64(secret + 56) ^
-                                                    XXH_readLE64(secret + 64)));
-
-  }
-
-}
-
-/*
- * DISCLAIMER: There are known *seed-dependent* multicollisions here due to
- * multiplication by zero, affecting hashes of lengths 17 to 240.
- *
- * However, they are very unlikely.
- *
- * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all
- * unseeded non-cryptographic hashes, it does not attempt to defend itself
- * against specially crafted inputs, only random inputs.
- *
- * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes
- * cancelling out the secret is taken an arbitrary number of times (addressed
- * in XXH3_accumulate_512), this collision is very unlikely with random inputs
- * and/or proper seeding:
- *
- * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a
- * function that is only called up to 16 times per hash with up to 240 bytes of
- * input.
- *
- * This is not too bad for a non-cryptographic hash function, especially with
- * only 64 bit outputs.
- *
- * The 128-bit variant (which trades some speed for strength) is NOT affected
- * by this, although it is always a good idea to use a proper seed if you care
- * about strength.
- */
-XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8 *XXH_RESTRICT input,
-                                     const xxh_u8 *XXH_RESTRICT secret,
-                                     xxh_u64                    seed64) {
-
-#if defined(__GNUC__) && !defined(__clang__)  /* GCC, not Clang */ \
-    && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */     \
-    &&                                                             \
-    !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */
-  /*
-   * UGLY HACK:
-   * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
-   * slower code.
-   *
-   * By forcing seed64 into a register, we disrupt the cost model and
-   * cause it to scalarize. See `XXH32_round()`
-   *
-   * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
-   * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
-   * GCC 9.2, despite both emitting scalar code.
-   *
-   * GCC generates much better scalar code than Clang for the rest of XXH3,
-   * which is why finding a more optimal codepath is an interest.
-   */
-  __asm__("" : "+r"(seed64));
-#endif
-  {
-
-    xxh_u64 const input_lo = XXH_readLE64(input);
-    xxh_u64 const input_hi = XXH_readLE64(input + 8);
-    return XXH3_mul128_fold64(input_lo ^ (XXH_readLE64(secret) + seed64),
-                              input_hi ^ (XXH_readLE64(secret + 8) - seed64));
-
-  }
-
-}
-
-/* For mid range keys, XXH3 uses a Mum-hash variant. */
-XXH_FORCE_INLINE XXH64_hash_t XXH3_len_17to128_64b(
-    const xxh_u8 *XXH_RESTRICT input, size_t len,
-    const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) {
-
-  XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
-  (void)secretSize;
-  XXH_ASSERT(16 < len && len <= 128);
-
-  {
-
-    xxh_u64 acc = len * XXH_PRIME64_1;
-    if (len > 32) {
-
-      if (len > 64) {
-
-        if (len > 96) {
-
-          acc += XXH3_mix16B(input + 48, secret + 96, seed);
-          acc += XXH3_mix16B(input + len - 64, secret + 112, seed);
-
-        }
-
-        acc += XXH3_mix16B(input + 32, secret + 64, seed);
-        acc += XXH3_mix16B(input + len - 48, secret + 80, seed);
-
-      }
-
-      acc += XXH3_mix16B(input + 16, secret + 32, seed);
-      acc += XXH3_mix16B(input + len - 32, secret + 48, seed);
-
-    }
-
-    acc += XXH3_mix16B(input + 0, secret + 0, seed);
-    acc += XXH3_mix16B(input + len - 16, secret + 16, seed);
-
-    return XXH3_avalanche(acc);
-
-  }
-
-}
-
-#define XXH3_MIDSIZE_MAX 240
-
-XXH_NO_INLINE XXH64_hash_t XXH3_len_129to240_64b(
-    const xxh_u8 *XXH_RESTRICT input, size_t len,
-    const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) {
-
-  XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
-  (void)secretSize;
-  XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
-
-#define XXH3_MIDSIZE_STARTOFFSET 3
-#define XXH3_MIDSIZE_LASTOFFSET 17
-
-  {
-
-    xxh_u64   acc = len * XXH_PRIME64_1;
-    int const nbRounds = (int)len / 16;
-    int       i;
-    for (i = 0; i < 8; i++) {
-
-      acc += XXH3_mix16B(input + (16 * i), secret + (16 * i), seed);
-
-    }
-
-    acc = XXH3_avalanche(acc);
-    XXH_ASSERT(nbRounds >= 8);
-#if defined(__clang__)                                /* Clang */ \
-    && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */  \
-    && !defined(XXH_ENABLE_AUTOVECTORIZE)              /* Define to disable */
-  /*
-   * UGLY HACK:
-   * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
-   * In everywhere else, it uses scalar code.
-   *
-   * For 64->128-bit multiplies, even if the NEON was 100% optimal, it
-   * would still be slower than UMAAL (see XXH_mult64to128).
-   *
-   * Unfortunately, Clang doesn't handle the long multiplies properly and
-   * converts them to the nonexistent "vmulq_u64" intrinsic, which is then
-   * scalarized into an ugly mess of VMOV.32 instructions.
-   *
-   * This mess is difficult to avoid without turning autovectorization
-   * off completely, but they are usually relatively minor and/or not
-   * worth it to fix.
-   *
-   * This loop is the easiest to fix, as unlike XXH32, this pragma
-   * _actually works_ because it is a loop vectorization instead of an
-   * SLP vectorization.
-   */
-  #pragma clang loop vectorize(disable)
-#endif
-    for (i = 8; i < nbRounds; i++) {
-
-      acc +=
-          XXH3_mix16B(input + (16 * i),
-                      secret + (16 * (i - 8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
-
-    }
-
-    /* last bytes */
-    acc += XXH3_mix16B(input + len - 16,
-                       secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET,
-                       seed);
-    return XXH3_avalanche(acc);
-
-  }
-
-}
-
-/* =======     Long Keys     ======= */
-
-#define XXH_STRIPE_LEN 64
-#define XXH_SECRET_CONSUME_RATE \
-  8                     /* nb of secret bytes consumed at each accumulation */
-#define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
-
-#ifdef XXH_OLD_NAMES
-  #define STRIPE_LEN XXH_STRIPE_LEN
-  #define ACC_NB XXH_ACC_NB
-#endif
-
-typedef enum { XXH3_acc_64bits, XXH3_acc_128bits } XXH3_accWidth_e;
-
-XXH_FORCE_INLINE void XXH_writeLE64(void *dst, xxh_u64 v64) {
-
-  if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
-  memcpy(dst, &v64, sizeof(v64));
-
-}
-
-/* Several intrinsic functions below are supposed to accept __int64 as argument,
- * as documented in
- * https://software.intel.com/sites/landingpage/IntrinsicsGuide/ . However,
- * several environments do not define __int64 type, requiring a workaround.
- */
-#if !defined(__VMS) &&       \
-    (defined(__cplusplus) || \
-     (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */))
-typedef int64_t xxh_i64;
-#else
-/* the following type must have a width of 64-bit */
-typedef long long xxh_i64;
-#endif
-
-/*
- * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most
- * optimized.
- *
- * It is a hardened version of UMAC, based off of FARSH's implementation.
- *
- * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
- * implementations, and it is ridiculously fast.
- *
- * We harden it by mixing the original input to the accumulators as well as the
- * product.
- *
- * This means that in the (relatively likely) case of a multiply by zero, the
- * original input is preserved.
- *
- * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
- * cross-pollination, as otherwise the upper and lower halves would be
- * essentially independent.
- *
- * This doesn't matter on 64-bit hashes since they all get merged together in
- * the end, so we skip the extra step.
- *
- * Both XXH3_64bits and XXH3_128bits use this subroutine.
- */
-
-#if (XXH_VECTOR == XXH_AVX512) || defined(XXH_X86DISPATCH)
-
-  #ifndef XXH_TARGET_AVX512
-    #define XXH_TARGET_AVX512                   /* disable attribute target */
-  #endif
-
-XXH_FORCE_INLINE XXH_TARGET_AVX512 void XXH3_accumulate_512_avx512(
-    void *XXH_RESTRICT acc, const void *XXH_RESTRICT input,
-    const void *XXH_RESTRICT secret, XXH3_accWidth_e accWidth) {
-
-  XXH_ALIGN(64) __m512i *const xacc = (__m512i *)acc;
-  XXH_ASSERT((((size_t)acc) & 63) == 0);
-  XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
-
-  {
-
-    /* data_vec    = input[0]; */
-    __m512i const data_vec = _mm512_loadu_si512(input);
-    /* key_vec     = secret[0]; */
-    __m512i const key_vec = _mm512_loadu_si512(secret);
-    /* data_key    = data_vec ^ key_vec; */
-    __m512i const data_key = _mm512_xor_si512(data_vec, key_vec);
-    /* data_key_lo = data_key >> 32; */
-    __m512i const data_key_lo =
-        _mm512_shuffle_epi32(data_key, _MM_SHUFFLE(0, 3, 0, 1));
-    /* product     = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
-    __m512i const product = _mm512_mul_epu32(data_key, data_key_lo);
-    if (accWidth == XXH3_acc_128bits) {
-
-      /* xacc[0] += swap(data_vec); */
-      __m512i const data_swap =
-          _mm512_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
-      __m512i const sum = _mm512_add_epi64(*xacc, data_swap);
-      /* xacc[0] += product; */
-      *xacc = _mm512_add_epi64(product, sum);
-
-    } else {                                             /* XXH3_acc_64bits */
-
-      /* xacc[0] += data_vec; */
-      __m512i const sum = _mm512_add_epi64(*xacc, data_vec);
-      /* xacc[0] += product; */
-      *xacc = _mm512_add_epi64(product, sum);
-
-    }
-
-  }
-
-}
-
-/*
- * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
- *
- * Multiplication isn't perfect, as explained by Google in HighwayHash:
- *
- *  // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to
- *  // varying degrees. In descending order of goodness, bytes
- *  // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
- *  // As expected, the upper and lower bytes are much worse.
- *
- * Source:
- * https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
- *
- * Since our algorithm uses a pseudorandom secret to add some variance into the
- * mix, we don't need to (or want to) mix as often or as much as HighwayHash
- * does.
- *
- * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
- * extraction.
- *
- * Both XXH3_64bits and XXH3_128bits use this subroutine.
- */
-
-XXH_FORCE_INLINE XXH_TARGET_AVX512 void XXH3_scrambleAcc_avx512(
-    void *XXH_RESTRICT acc, const void *XXH_RESTRICT secret) {
-
-  XXH_ASSERT((((size_t)acc) & 63) == 0);
-  XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
-  {
-
-    XXH_ALIGN(64) __m512i *const xacc = (__m512i *)acc;
-    const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
-
-    /* xacc[0] ^= (xacc[0] >> 47) */
-    __m512i const acc_vec = *xacc;
-    __m512i const shifted = _mm512_srli_epi64(acc_vec, 47);
-    __m512i const data_vec = _mm512_xor_si512(acc_vec, shifted);
-    /* xacc[0] ^= secret; */
-    __m512i const key_vec = _mm512_loadu_si512(secret);
-    __m512i const data_key = _mm512_xor_si512(data_vec, key_vec);
-
-    /* xacc[0] *= XXH_PRIME32_1; */
-    __m512i const data_key_hi =
-        _mm512_shuffle_epi32(data_key, _MM_SHUFFLE(0, 3, 0, 1));
-    __m512i const prod_lo = _mm512_mul_epu32(data_key, prime32);
-    __m512i const prod_hi = _mm512_mul_epu32(data_key_hi, prime32);
-    *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
-
-  }
-
-}
-
-XXH_FORCE_INLINE XXH_TARGET_AVX512 void XXH3_initCustomSecret_avx512(
-    void *XXH_RESTRICT customSecret, xxh_u64 seed64) {
-
-  XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
-  XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
-  XXH_ASSERT(((size_t)customSecret & 63) == 0);
-  (void)(&XXH_writeLE64);
-  {
-
-    int const     nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
-    __m512i const seed = _mm512_mask_set1_epi64(
-        _mm512_set1_epi64((xxh_i64)seed64), 0xAA, -(xxh_i64)seed64);
-
-    XXH_ALIGN(64) const __m512i *const src = (const __m512i *)XXH3_kSecret;
-    XXH_ALIGN(64) __m512i *const       dest = (__m512i *)customSecret;
-    int                                i;
-    for (i = 0; i < nbRounds; ++i) {
-
-      // GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void
-      // const*', this will warn "discards ‘const’ qualifier".
-      union {
-
-        XXH_ALIGN(64) const __m512i *const cp;
-        XXH_ALIGN(64) void *const p;
-
-      } const remote_const_void = {.cp = src + i};
-
-      dest[i] =
-          _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p), seed);
-
-    }
-
-  }
-
-}
-
-#endif
-
-#if (XXH_VECTOR == XXH_AVX2) || defined(XXH_X86DISPATCH)
-
-  #ifndef XXH_TARGET_AVX2
-    #define XXH_TARGET_AVX2                     /* disable attribute target */
-  #endif
-
-XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_accumulate_512_avx2(
-    void *XXH_RESTRICT acc, const void *XXH_RESTRICT input,
-    const void *XXH_RESTRICT secret, XXH3_accWidth_e accWidth) {
-
-  XXH_ASSERT((((size_t)acc) & 31) == 0);
-  {
-
-    XXH_ALIGN(32) __m256i *const xacc = (__m256i *)acc;
-    /* Unaligned. This is mainly for pointer arithmetic, and because
-     * _mm256_loadu_si256 requires  a const __m256i * pointer for some reason.
-     */
-    const __m256i *const xinput = (const __m256i *)input;
-    /* Unaligned. This is mainly for pointer arithmetic, and because
-     * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
-    const __m256i *const xsecret = (const __m256i *)secret;
-
-    size_t i;
-    for (i = 0; i < XXH_STRIPE_LEN / sizeof(__m256i); i++) {
-
-      /* data_vec    = xinput[i]; */
-      __m256i const data_vec = _mm256_loadu_si256(xinput + i);
-      /* key_vec     = xsecret[i]; */
-      __m256i const key_vec = _mm256_loadu_si256(xsecret + i);
-      /* data_key    = data_vec ^ key_vec; */
-      __m256i const data_key = _mm256_xor_si256(data_vec, key_vec);
-      /* data_key_lo = data_key >> 32; */
-      __m256i const data_key_lo =
-          _mm256_shuffle_epi32(data_key, _MM_SHUFFLE(0, 3, 0, 1));
-      /* product     = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
-      __m256i const product = _mm256_mul_epu32(data_key, data_key_lo);
-      if (accWidth == XXH3_acc_128bits) {
-
-        /* xacc[i] += swap(data_vec); */
-        __m256i const data_swap =
-            _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
-        __m256i const sum = _mm256_add_epi64(xacc[i], data_swap);
-        /* xacc[i] += product; */
-        xacc[i] = _mm256_add_epi64(product, sum);
-
-      } else {                                           /* XXH3_acc_64bits */
-
-        /* xacc[i] += data_vec; */
-        __m256i const sum = _mm256_add_epi64(xacc[i], data_vec);
-        /* xacc[i] += product; */
-        xacc[i] = _mm256_add_epi64(product, sum);
-
-      }
-
-    }
-
-  }
-
-}
-
-XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_scrambleAcc_avx2(
-    void *XXH_RESTRICT acc, const void *XXH_RESTRICT secret) {
-
-  XXH_ASSERT((((size_t)acc) & 31) == 0);
-  {
-
-    XXH_ALIGN(32) __m256i *const xacc = (__m256i *)acc;
-    /* Unaligned. This is mainly for pointer arithmetic, and because
-     * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
-    const __m256i *const xsecret = (const __m256i *)secret;
-    const __m256i        prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
-
-    size_t i;
-    for (i = 0; i < XXH_STRIPE_LEN / sizeof(__m256i); i++) {
-
-      /* xacc[i] ^= (xacc[i] >> 47) */
-      __m256i const acc_vec = xacc[i];
-      __m256i const shifted = _mm256_srli_epi64(acc_vec, 47);
-      __m256i const data_vec = _mm256_xor_si256(acc_vec, shifted);
-      /* xacc[i] ^= xsecret; */
-      __m256i const key_vec = _mm256_loadu_si256(xsecret + i);
-      __m256i const data_key = _mm256_xor_si256(data_vec, key_vec);
-
-      /* xacc[i] *= XXH_PRIME32_1; */
-      __m256i const data_key_hi =
-          _mm256_shuffle_epi32(data_key, _MM_SHUFFLE(0, 3, 0, 1));
-      __m256i const prod_lo = _mm256_mul_epu32(data_key, prime32);
-      __m256i const prod_hi = _mm256_mul_epu32(data_key_hi, prime32);
-      xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
-
-    }
-
-  }
-
-}
-
-XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(
-    void *XXH_RESTRICT customSecret, xxh_u64 seed64) {
-
-  XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
-  XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
-  XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
-  (void)(&XXH_writeLE64);
-  XXH_PREFETCH(customSecret);
-  {
-
-    __m256i const seed = _mm256_set_epi64x(-(xxh_i64)seed64, (xxh_i64)seed64,
-                                           -(xxh_i64)seed64, (xxh_i64)seed64);
-
-    XXH_ALIGN(64) const __m256i *const src = (const __m256i *)XXH3_kSecret;
-    XXH_ALIGN(64) __m256i *            dest = (__m256i *)customSecret;
-
-  #if defined(__GNUC__) || defined(__clang__)
-    /*
-     * On GCC & Clang, marking 'dest' as modified will cause the compiler:
-     *   - do not extract the secret from sse registers in the internal loop
-     *   - use less common registers, and avoid pushing these reg into stack
-     * The asm hack causes Clang to assume that XXH3_kSecretPtr aliases with
-     * customSecret, and on aarch64, this prevented LDP from merging two
-     * loads together for free. Putting the loads together before the stores
-     * properly generates LDP.
-     */
-    __asm__("" : "+r"(dest));
-  #endif
-
-    /* GCC -O2 need unroll loop manually */
-    dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(src + 0), seed);
-    dest[1] = _mm256_add_epi64(_mm256_stream_load_si256(src + 1), seed);
-    dest[2] = _mm256_add_epi64(_mm256_stream_load_si256(src + 2), seed);
-    dest[3] = _mm256_add_epi64(_mm256_stream_load_si256(src + 3), seed);
-    dest[4] = _mm256_add_epi64(_mm256_stream_load_si256(src + 4), seed);
-    dest[5] = _mm256_add_epi64(_mm256_stream_load_si256(src + 5), seed);
-
-  }
-
-}
-
-#endif
-
-#if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
-
-  #ifndef XXH_TARGET_SSE2
-    #define XXH_TARGET_SSE2                     /* disable attribute target */
-  #endif
-
-XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_accumulate_512_sse2(
-    void *XXH_RESTRICT acc, const void *XXH_RESTRICT input,
-    const void *XXH_RESTRICT secret, XXH3_accWidth_e accWidth) {
-
-  /* SSE2 is just a half-scale version of the AVX2 version. */
-  XXH_ASSERT((((size_t)acc) & 15) == 0);
-  {
-
-    XXH_ALIGN(16) __m128i *const xacc = (__m128i *)acc;
-    /* Unaligned. This is mainly for pointer arithmetic, and because
-     * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
-    const __m128i *const xinput = (const __m128i *)input;
-    /* Unaligned. This is mainly for pointer arithmetic, and because
-     * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
-    const __m128i *const xsecret = (const __m128i *)secret;
-
-    size_t i;
-    for (i = 0; i < XXH_STRIPE_LEN / sizeof(__m128i); i++) {
-
-      /* data_vec    = xinput[i]; */
-      __m128i const data_vec = _mm_loadu_si128(xinput + i);
-      /* key_vec     = xsecret[i]; */
-      __m128i const key_vec = _mm_loadu_si128(xsecret + i);
-      /* data_key    = data_vec ^ key_vec; */
-      __m128i const data_key = _mm_xor_si128(data_vec, key_vec);
-      /* data_key_lo = data_key >> 32; */
-      __m128i const data_key_lo =
-          _mm_shuffle_epi32(data_key, _MM_SHUFFLE(0, 3, 0, 1));
-      /* product     = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
-      __m128i const product = _mm_mul_epu32(data_key, data_key_lo);
-      if (accWidth == XXH3_acc_128bits) {
-
-        /* xacc[i] += swap(data_vec); */
-        __m128i const data_swap =
-            _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
-        __m128i const sum = _mm_add_epi64(xacc[i], data_swap);
-        /* xacc[i] += product; */
-        xacc[i] = _mm_add_epi64(product, sum);
-
-      } else {                                           /* XXH3_acc_64bits */
-
-        /* xacc[i] += data_vec; */
-        __m128i const sum = _mm_add_epi64(xacc[i], data_vec);
-        /* xacc[i] += product; */
-        xacc[i] = _mm_add_epi64(product, sum);
-
-      }
-
-    }
-
-  }
-
-}
-
-XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_scrambleAcc_sse2(
-    void *XXH_RESTRICT acc, const void *XXH_RESTRICT secret) {
-
-  XXH_ASSERT((((size_t)acc) & 15) == 0);
-  {
-
-    XXH_ALIGN(16) __m128i *const xacc = (__m128i *)acc;
-    /* Unaligned. This is mainly for pointer arithmetic, and because
-     * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
-    const __m128i *const xsecret = (const __m128i *)secret;
-    const __m128i        prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
-
-    size_t i;
-    for (i = 0; i < XXH_STRIPE_LEN / sizeof(__m128i); i++) {
-
-      /* xacc[i] ^= (xacc[i] >> 47) */
-      __m128i const acc_vec = xacc[i];
-      __m128i const shifted = _mm_srli_epi64(acc_vec, 47);
-      __m128i const data_vec = _mm_xor_si128(acc_vec, shifted);
-      /* xacc[i] ^= xsecret[i]; */
-      __m128i const key_vec = _mm_loadu_si128(xsecret + i);
-      __m128i const data_key = _mm_xor_si128(data_vec, key_vec);
-
-      /* xacc[i] *= XXH_PRIME32_1; */
-      __m128i const data_key_hi =
-          _mm_shuffle_epi32(data_key, _MM_SHUFFLE(0, 3, 0, 1));
-      __m128i const prod_lo = _mm_mul_epu32(data_key, prime32);
-      __m128i const prod_hi = _mm_mul_epu32(data_key_hi, prime32);
-      xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
-
-    }
-
-  }
-
-}
-
-XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(
-    void *XXH_RESTRICT customSecret, xxh_u64 seed64) {
-
-  XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
-  (void)(&XXH_writeLE64);
-  {
-
-    int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
-
-  #if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
-    // MSVC 32bit mode does not support _mm_set_epi64x before 2015
-    XXH_ALIGN(16)
-    const xxh_i64 seed64x2[2] = {(xxh_i64)seed64, -(xxh_i64)seed64};
-    __m128i const seed = _mm_load_si128((__m128i const *)seed64x2);
-  #else
-    __m128i const seed = _mm_set_epi64x(-(xxh_i64)seed64, (xxh_i64)seed64);
-  #endif
-    int i;
-
-    XXH_ALIGN(64) const float *const  src = (float const *)XXH3_kSecret;
-    XXH_ALIGN(XXH_SEC_ALIGN) __m128i *dest = (__m128i *)customSecret;
-  #if defined(__GNUC__) || defined(__clang__)
-    /*
-     * On GCC & Clang, marking 'dest' as modified will cause the compiler:
-     *   - do not extract the secret from sse registers in the internal loop
-     *   - use less common registers, and avoid pushing these reg into stack
-     */
-    __asm__("" : "+r"(dest));
-  #endif
-
-    for (i = 0; i < nbRounds; ++i) {
-
-      dest[i] = _mm_add_epi64(_mm_castps_si128(_mm_load_ps(src + i * 4)), seed);
-
-    }
-
-  }
-
-}
-
-#endif
-
-#if (XXH_VECTOR == XXH_NEON)
-
-XXH_FORCE_INLINE void XXH3_accumulate_512_neon(void *XXH_RESTRICT       acc,
-                                               const void *XXH_RESTRICT input,
-                                               const void *XXH_RESTRICT secret,
-                                               XXH3_accWidth_e accWidth) {
-
-  XXH_ASSERT((((size_t)acc) & 15) == 0);
-  {
-
-    XXH_ALIGN(16) uint64x2_t *const xacc = (uint64x2_t *)acc;
-    /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7.
-     */
-    uint8_t const *const xinput = (const uint8_t *)input;
-    uint8_t const *const xsecret = (const uint8_t *)secret;
-
-    size_t i;
-    for (i = 0; i < XXH_STRIPE_LEN / sizeof(uint64x2_t); i++) {
-
-      /* data_vec = xinput[i]; */
-      uint8x16_t data_vec = vld1q_u8(xinput + (i * 16));
-      /* key_vec  = xsecret[i];  */
-      uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16));
-      uint64x2_t data_key;
-      uint32x2_t data_key_lo, data_key_hi;
-      if (accWidth == XXH3_acc_64bits) {
-
-        /* xacc[i] += data_vec; */
-        xacc[i] = vaddq_u64(xacc[i], vreinterpretq_u64_u8(data_vec));
-
-      } else {                                          /* XXH3_acc_128bits */
-
-        /* xacc[i] += swap(data_vec); */
-        uint64x2_t const data64 = vreinterpretq_u64_u8(data_vec);
-        uint64x2_t const swapped = vextq_u64(data64, data64, 1);
-        xacc[i] = vaddq_u64(xacc[i], swapped);
-
-      }
-
-      /* data_key = data_vec ^ key_vec; */
-      data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec));
-      /* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF);
-       * data_key_hi = (uint32x2_t) (data_key >> 32);
-       * data_key = UNDEFINED; */
-      XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
-      /* xacc[i] += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */
-      xacc[i] = vmlal_u32(xacc[i], data_key_lo, data_key_hi);
-
-    }
-
-  }
-
-}
-
-XXH_FORCE_INLINE void XXH3_scrambleAcc_neon(void *XXH_RESTRICT       acc,
-                                            const void *XXH_RESTRICT secret) {
-
-  XXH_ASSERT((((size_t)acc) & 15) == 0);
-
-  {
-
-    uint64x2_t *   xacc = (uint64x2_t *)acc;
-    uint8_t const *xsecret = (uint8_t const *)secret;
-    uint32x2_t     prime = vdup_n_u32(XXH_PRIME32_1);
-
-    size_t i;
-    for (i = 0; i < XXH_STRIPE_LEN / sizeof(uint64x2_t); i++) {
-
-      /* xacc[i] ^= (xacc[i] >> 47); */
-      uint64x2_t acc_vec = xacc[i];
-      uint64x2_t shifted = vshrq_n_u64(acc_vec, 47);
-      uint64x2_t data_vec = veorq_u64(acc_vec, shifted);
-
-      /* xacc[i] ^= xsecret[i]; */
-      uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16));
-      uint64x2_t data_key = veorq_u64(data_vec, vreinterpretq_u64_u8(key_vec));
-
-      /* xacc[i] *= XXH_PRIME32_1 */
-      uint32x2_t data_key_lo, data_key_hi;
-      /* data_key_lo = (uint32x2_t) (xacc[i] & 0xFFFFFFFF);
-       * data_key_hi = (uint32x2_t) (xacc[i] >> 32);
-       * xacc[i] = UNDEFINED; */
-      XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
-      { /*
-         * prod_hi = (data_key >> 32) * XXH_PRIME32_1;
-         *
-         * Avoid vmul_u32 + vshll_n_u32 since Clang 6 and 7 will
-         * incorrectly "optimize" this:
-         *   tmp     = vmul_u32(vmovn_u64(a), vmovn_u64(b));
-         *   shifted = vshll_n_u32(tmp, 32);
-         * to this:
-         *   tmp     = "vmulq_u64"(a, b); // no such thing!
-         *   shifted = vshlq_n_u64(tmp, 32);
-         *
-         * However, unlike SSE, Clang lacks a 64-bit multiply routine
-         * for NEON, and it scalarizes two 64-bit multiplies instead.
-         *
-         * vmull_u32 has the same timing as vmul_u32, and it avoids
-         * this bug completely.
-         * See https://bugs.llvm.org/show_bug.cgi?id=39967
-         */
-        uint64x2_t prod_hi = vmull_u32(data_key_hi, prime);
-        /* xacc[i] = prod_hi << 32; */
-        xacc[i] = vshlq_n_u64(prod_hi, 32);
-        /* xacc[i] += (prod_hi & 0xFFFFFFFF) * XXH_PRIME32_1; */
-        xacc[i] = vmlal_u32(xacc[i], data_key_lo, prime);
-
-      }
-
-    }
-
-  }
-
-}
-
-#endif
-
-#if (XXH_VECTOR == XXH_VSX)
-
-XXH_FORCE_INLINE void XXH3_accumulate_512_vsx(void *XXH_RESTRICT       acc,
-                                              const void *XXH_RESTRICT input,
-                                              const void *XXH_RESTRICT secret,
-                                              XXH3_accWidth_e accWidth) {
-
-  xxh_u64x2 *const       xacc = (xxh_u64x2 *)acc;       /* presumed aligned */
-  xxh_u64x2 const *const xinput =
-      (xxh_u64x2 const *)input;                 /* no alignment restriction */
-  xxh_u64x2 const *const xsecret =
-      (xxh_u64x2 const *)secret;                /* no alignment restriction */
-  xxh_u64x2 const v32 = {32, 32};
-  size_t          i;
-  for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
-
-    /* data_vec = xinput[i]; */
-    xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + i);
-    /* key_vec = xsecret[i]; */
-    xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
-    xxh_u64x2 const data_key = data_vec ^ key_vec;
-    /* shuffled = (data_key << 32) | (data_key >> 32); */
-    xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
-    /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled &
-     * 0xFFFFFFFF); */
-    xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
-    xacc[i] += product;
-
-    if (accWidth == XXH3_acc_64bits) {
-
-      xacc[i] += data_vec;
-
-    } else {                                            /* XXH3_acc_128bits */
-
-        /* swap high and low halves */
-  #ifdef __s390x__
-      xxh_u64x2 const data_swapped = vec_permi(data_vec, data_vec, 2);
-  #else
-      xxh_u64x2 const data_swapped = vec_xxpermdi(data_vec, data_vec, 2);
-  #endif
-      xacc[i] += data_swapped;
-
-    }
-
-  }
-
-}
-
-XXH_FORCE_INLINE void XXH3_scrambleAcc_vsx(void *XXH_RESTRICT       acc,
-                                           const void *XXH_RESTRICT secret) {
-
-  XXH_ASSERT((((size_t)acc) & 15) == 0);
-
-  {
-
-    xxh_u64x2 *const       xacc = (xxh_u64x2 *)acc;
-    const xxh_u64x2 *const xsecret = (const xxh_u64x2 *)secret;
-    /* constants */
-    xxh_u64x2 const v32 = {32, 32};
-    xxh_u64x2 const v47 = {47, 47};
-    xxh_u32x4 const prime = {XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1,
-                             XXH_PRIME32_1};
-    size_t          i;
-    for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
-
-      /* xacc[i] ^= (xacc[i] >> 47); */
-      xxh_u64x2 const acc_vec = xacc[i];
-      xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
-
-      /* xacc[i] ^= xsecret[i]; */
-      xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
-      xxh_u64x2 const data_key = data_vec ^ key_vec;
-
-      /* xacc[i] *= XXH_PRIME32_1 */
-      /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime &
-       * 0xFFFFFFFF);  */
-      xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime);
-      /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32);  */
-      xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime);
-      xacc[i] = prod_odd + (prod_even << v32);
-
-    }
-
-  }
-
-}
-
-#endif
-
-/* scalar variants - universal */
-
-XXH_FORCE_INLINE void XXH3_accumulate_512_scalar(
-    void *XXH_RESTRICT acc, const void *XXH_RESTRICT input,
-    const void *XXH_RESTRICT secret, XXH3_accWidth_e accWidth) {
-
-  XXH_ALIGN(XXH_ACC_ALIGN)
-  xxh_u64 *const      xacc = (xxh_u64 *)acc;            /* presumed aligned */
-  const xxh_u8 *const xinput =
-      (const xxh_u8 *)input;                    /* no alignment restriction */
-  const xxh_u8 *const xsecret =
-      (const xxh_u8 *)secret;                   /* no alignment restriction */
-  size_t i;
-  XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN - 1)) == 0);
-  for (i = 0; i < XXH_ACC_NB; i++) {
-
-    xxh_u64 const data_val = XXH_readLE64(xinput + 8 * i);
-    xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + i * 8);
-
-    if (accWidth == XXH3_acc_64bits) {
-
-      xacc[i] += data_val;
-
-    } else {
-
-      xacc[i ^ 1] += data_val;                       /* swap adjacent lanes */
-
-    }
-
-    xacc[i] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32);
-
-  }
-
-}
-
-XXH_FORCE_INLINE void XXH3_scrambleAcc_scalar(void *XXH_RESTRICT       acc,
-                                              const void *XXH_RESTRICT secret) {
-
-  XXH_ALIGN(XXH_ACC_ALIGN)
-  xxh_u64 *const      xacc = (xxh_u64 *)acc;            /* presumed aligned */
-  const xxh_u8 *const xsecret =
-      (const xxh_u8 *)secret;                   /* no alignment restriction */
-  size_t i;
-  XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN - 1)) == 0);
-  for (i = 0; i < XXH_ACC_NB; i++) {
-
-    xxh_u64 const key64 = XXH_readLE64(xsecret + 8 * i);
-    xxh_u64       acc64 = xacc[i];
-    acc64 = XXH_xorshift64(acc64, 47);
-    acc64 ^= key64;
-    acc64 *= XXH_PRIME32_1;
-    xacc[i] = acc64;
-
-  }
-
-}
-
-XXH_FORCE_INLINE void XXH3_initCustomSecret_scalar(
-    void *XXH_RESTRICT customSecret, xxh_u64 seed64) {
-
-  /*
-   * We need a separate pointer for the hack below,
-   * which requires a non-const pointer.
-   * Any decent compiler will optimize this out otherwise.
-   */
-  const xxh_u8 *kSecretPtr = XXH3_kSecret;
-  XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
-
-#if defined(__clang__) && defined(__aarch64__)
-  /*
-   * UGLY HACK:
-   * Clang generates a bunch of MOV/MOVK pairs for aarch64, and they are
-   * placed sequentially, in order, at the top of the unrolled loop.
-   *
-   * While MOVK is great for generating constants (2 cycles for a 64-bit
-   * constant compared to 4 cycles for LDR), long MOVK chains stall the
-   * integer pipelines:
-   *   I   L   S
-   * MOVK
-   * MOVK
-   * MOVK
-   * MOVK
-   * ADD
-   * SUB      STR
-   *          STR
-   * By forcing loads from memory (as the asm line causes Clang to assume
-   * that XXH3_kSecretPtr has been changed), the pipelines are used more
-   * efficiently:
-   *   I   L   S
-   *      LDR
-   *  ADD LDR
-   *  SUB     STR
-   *          STR
-   * XXH3_64bits_withSeed, len == 256, Snapdragon 835
-   *   without hack: 2654.4 MB/s
-   *   with hack:    3202.9 MB/s
-   */
-  __asm__("" : "+r"(kSecretPtr));
-#endif
-  /*
-   * Note: in debug mode, this overrides the asm optimization
-   * and Clang will emit MOVK chains again.
-   */
-  XXH_ASSERT(kSecretPtr == XXH3_kSecret);
-
-  {
-
-    int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
-    int       i;
-    for (i = 0; i < nbRounds; i++) {
-
-      /*
-       * The asm hack causes Clang to assume that kSecretPtr aliases with
-       * customSecret, and on aarch64, this prevented LDP from merging two
-       * loads together for free. Putting the loads together before the stores
-       * properly generates LDP.
-       */
-      xxh_u64 lo = XXH_readLE64(kSecretPtr + 16 * i) + seed64;
-      xxh_u64 hi = XXH_readLE64(kSecretPtr + 16 * i + 8) - seed64;
-      XXH_writeLE64((xxh_u8 *)customSecret + 16 * i, lo);
-      XXH_writeLE64((xxh_u8 *)customSecret + 16 * i + 8, hi);
-
-    }
-
-  }
-
-}
-
-typedef void (*XXH3_f_accumulate_512)(void *XXH_RESTRICT, const void *,
-                                      const void *, XXH3_accWidth_e);
-typedef void (*XXH3_f_scrambleAcc)(void *XXH_RESTRICT, const void *);
-typedef void (*XXH3_f_initCustomSecret)(void *XXH_RESTRICT, xxh_u64);
-
-#if (XXH_VECTOR == XXH_AVX512)
-
-  #define XXH3_accumulate_512 XXH3_accumulate_512_avx512
-  #define XXH3_scrambleAcc XXH3_scrambleAcc_avx512
-  #define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
-
-#elif (XXH_VECTOR == XXH_AVX2)
-
-  #define XXH3_accumulate_512 XXH3_accumulate_512_avx2
-  #define XXH3_scrambleAcc XXH3_scrambleAcc_avx2
-  #define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
-
-#elif (XXH_VECTOR == XXH_SSE2)
-
-  #define XXH3_accumulate_512 XXH3_accumulate_512_sse2
-  #define XXH3_scrambleAcc XXH3_scrambleAcc_sse2
-  #define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
-
-#elif (XXH_VECTOR == XXH_NEON)
-
-  #define XXH3_accumulate_512 XXH3_accumulate_512_neon
-  #define XXH3_scrambleAcc XXH3_scrambleAcc_neon
-  #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
-
-#elif (XXH_VECTOR == XXH_VSX)
-
-  #define XXH3_accumulate_512 XXH3_accumulate_512_vsx
-  #define XXH3_scrambleAcc XXH3_scrambleAcc_vsx
-  #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
-
-#else                                                             /* scalar */
-
-  #define XXH3_accumulate_512 XXH3_accumulate_512_scalar
-  #define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
-  #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
-
-#endif
-
-#ifndef XXH_PREFETCH_DIST
-  #ifdef __clang__
-    #define XXH_PREFETCH_DIST 320
-  #else
-    #if (XXH_VECTOR == XXH_AVX512)
-      #define XXH_PREFETCH_DIST 512
-    #else
-      #define XXH_PREFETCH_DIST 384
-    #endif
-  #endif                                                       /* __clang__ */
-#endif                                                 /* XXH_PREFETCH_DIST */
-
-/*
- * XXH3_accumulate()
- * Loops over XXH3_accumulate_512().
- * Assumption: nbStripes will not overflow the secret size
- */
-XXH_FORCE_INLINE void XXH3_accumulate(xxh_u64 *XXH_RESTRICT      acc,
-                                      const xxh_u8 *XXH_RESTRICT input,
-                                      const xxh_u8 *XXH_RESTRICT secret,
-                                      size_t                     nbStripes,
-                                      XXH3_accWidth_e            accWidth,
-                                      XXH3_f_accumulate_512      f_acc512) {
-
-  size_t n;
-  for (n = 0; n < nbStripes; n++) {
-
-    const xxh_u8 *const in = input + n * XXH_STRIPE_LEN;
-    XXH_PREFETCH(in + XXH_PREFETCH_DIST);
-    f_acc512(acc, in, secret + n * XXH_SECRET_CONSUME_RATE, accWidth);
-
-  }
-
-}
-
-XXH_FORCE_INLINE void XXH3_hashLong_internal_loop(
-    xxh_u64 *XXH_RESTRICT acc, const xxh_u8 *XXH_RESTRICT input, size_t len,
-    const xxh_u8 *XXH_RESTRICT secret, size_t secretSize,
-    XXH3_accWidth_e accWidth, XXH3_f_accumulate_512 f_acc512,
-    XXH3_f_scrambleAcc f_scramble) {
-
-  size_t const nb_rounds =
-      (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
-  size_t const block_len = XXH_STRIPE_LEN * nb_rounds;
-  size_t const nb_blocks = len / block_len;
-
-  size_t n;
-
-  XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
-
-  for (n = 0; n < nb_blocks; n++) {
-
-    XXH3_accumulate(acc, input + n * block_len, secret, nb_rounds, accWidth,
-                    f_acc512);
-    f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
-
-  }
-
-  /* last partial block */
-  XXH_ASSERT(len > XXH_STRIPE_LEN);
-  {
-
-    size_t const nbStripes = (len - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
-    XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
-    XXH3_accumulate(acc, input + nb_blocks * block_len, secret, nbStripes,
-                    accWidth, f_acc512);
-
-    /* last stripe */
-    if (len & (XXH_STRIPE_LEN - 1)) {
-
-      const xxh_u8 *const p = input + len - XXH_STRIPE_LEN;
-      /* Do not align on 8, so that the secret is different from the scrambler
-       */
-#define XXH_SECRET_LASTACC_START 7
-      f_acc512(acc, p,
-               secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START,
-               accWidth);
-
-    }
-
-  }
-
-}
-
-XXH_FORCE_INLINE xxh_u64 XXH3_mix2Accs(const xxh_u64 *XXH_RESTRICT acc,
-                                       const xxh_u8 *XXH_RESTRICT  secret) {
-
-  return XXH3_mul128_fold64(acc[0] ^ XXH_readLE64(secret),
-                            acc[1] ^ XXH_readLE64(secret + 8));
-
-}
-
-static XXH64_hash_t XXH3_mergeAccs(const xxh_u64 *XXH_RESTRICT acc,
-                                   const xxh_u8 *XXH_RESTRICT  secret,
-                                   xxh_u64                     start) {
-
-  xxh_u64 result64 = start;
-  size_t  i = 0;
-
-  for (i = 0; i < 4; i++) {
-
-    result64 += XXH3_mix2Accs(acc + 2 * i, secret + 16 * i);
-#if defined(__clang__)                                /* Clang */ \
-    && (defined(__arm__) || defined(__thumb__))       /* ARMv7 */ \
-    && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */  \
-    && !defined(XXH_ENABLE_AUTOVECTORIZE)              /* Define to disable */
-    /*
-     * UGLY HACK:
-     * Prevent autovectorization on Clang ARMv7-a. Exact same problem as
-     * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
-     * XXH3_64bits, len == 256, Snapdragon 835:
-     *   without hack: 2063.7 MB/s
-     *   with hack:    2560.7 MB/s
-     */
-    __asm__("" : "+r"(result64));
-#endif
-
-  }
-
-  return XXH3_avalanche(result64);
-
-}
-
-#define XXH3_INIT_ACC                                                          \
-  {                                                                            \
-                                                                               \
-    XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, XXH_PRIME64_4, \
-        XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1                            \
-                                                                               \
-  }
-
-XXH_FORCE_INLINE XXH64_hash_t XXH3_hashLong_64b_internal(
-    const xxh_u8 *XXH_RESTRICT input, size_t len,
-    const xxh_u8 *XXH_RESTRICT secret, size_t secretSize,
-    XXH3_f_accumulate_512 f_acc512, XXH3_f_scrambleAcc f_scramble) {
-
-  XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
-
-  XXH3_hashLong_internal_loop(acc, input, len, secret, secretSize,
-                              XXH3_acc_64bits, f_acc512, f_scramble);
-
-  /* converge into final hash */
-  XXH_STATIC_ASSERT(sizeof(acc) == 64);
-  /* do not align on 8, so that the secret is different from the accumulator */
-#define XXH_SECRET_MERGEACCS_START 11
-  XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
-  return XXH3_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START,
-                        (xxh_u64)len * XXH_PRIME64_1);
-
-}
-
-/*
- * It's important for performance that XXH3_hashLong is not inlined.
- */
-XXH_NO_INLINE XXH64_hash_t XXH3_hashLong_64b_withSecret(
-    const xxh_u8 *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64,
-    const xxh_u8 *XXH_RESTRICT secret, size_t secretLen) {
-
-  (void)seed64;
-  return XXH3_hashLong_64b_internal(input, len, secret, secretLen,
-                                    XXH3_accumulate_512, XXH3_scrambleAcc);
-
-}
-
-/*
- * XXH3_hashLong_64b_withSeed():
- * Generate a custom key based on alteration of default XXH3_kSecret with the
- * seed, and then use this key for long mode hashing.
- *
- * This operation is decently fast but nonetheless costs a little bit of time.
- * Try to avoid it whenever possible (typically when seed==0).
- *
- * It's important for performance that XXH3_hashLong is not inlined. Not sure
- * why (uop cache maybe?), but the difference is large and easily measurable.
- */
-XXH_FORCE_INLINE XXH64_hash_t XXH3_hashLong_64b_withSeed_internal(
-    const xxh_u8 *input, size_t len, XXH64_hash_t seed,
-    XXH3_f_accumulate_512 f_acc512, XXH3_f_scrambleAcc f_scramble,
-    XXH3_f_initCustomSecret f_initSec) {
-
-  if (seed == 0)
-    return XXH3_hashLong_64b_internal(
-        input, len, XXH3_kSecret, sizeof(XXH3_kSecret), f_acc512, f_scramble);
-  {
-
-    XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
-    f_initSec(secret, seed);
-    return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
-                                      f_acc512, f_scramble);
-
-  }
-
-}
-
-/*
- * It's important for performance that XXH3_hashLong is not inlined.
- */
-XXH_NO_INLINE XXH64_hash_t XXH3_hashLong_64b_withSeed(const xxh_u8 *input,
-                                                      size_t        len,
-                                                      XXH64_hash_t  seed,
-                                                      const xxh_u8 *secret,
-                                                      size_t        secretLen) {
-
-  (void)secret;
-  (void)secretLen;
-  return XXH3_hashLong_64b_withSeed_internal(
-      input, len, seed, XXH3_accumulate_512, XXH3_scrambleAcc,
-      XXH3_initCustomSecret);
-
-}
-
-typedef XXH64_hash_t (*XXH3_hashLong64_f)(const xxh_u8 *XXH_RESTRICT, size_t,
-                                          XXH64_hash_t,
-                                          const xxh_u8 *XXH_RESTRICT, size_t);
-
-XXH_FORCE_INLINE XXH64_hash_t
-XXH3_64bits_internal(const void *XXH_RESTRICT input, size_t len,
-                     XXH64_hash_t seed64, const void *XXH_RESTRICT secret,
-                     size_t secretLen, XXH3_hashLong64_f f_hashLong) {
-
-  XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
-  /*
-   * If an action is to be taken if `secretLen` condition is not respected,
-   * it should be done here.
-   * For now, it's a contract pre-condition.
-   * Adding a check and a branch here would cost performance at every hash.
-   * Also, note that function signature doesn't offer room to return an error.
-   */
-  if (len <= 16)
-    return XXH3_len_0to16_64b((const xxh_u8 *)input, len,
-                              (const xxh_u8 *)secret, seed64);
-  if (len <= 128)
-    return XXH3_len_17to128_64b((const xxh_u8 *)input, len,
-                                (const xxh_u8 *)secret, secretLen, seed64);
-  if (len <= XXH3_MIDSIZE_MAX)
-    return XXH3_len_129to240_64b((const xxh_u8 *)input, len,
-                                 (const xxh_u8 *)secret, secretLen, seed64);
-  return f_hashLong((const xxh_u8 *)input, len, seed64, (const xxh_u8 *)secret,
-                    secretLen);
-
-}
-
-/* ===   Public entry point   === */
-
-XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void *input, size_t len) {
-
-  return XXH3_64bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret),
-                              XXH3_hashLong_64b_withSecret);
-
-}
-
-XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void *input,
-                                                   size_t      len,
-                                                   const void *secret,
-                                                   size_t      secretSize) {
-
-  return XXH3_64bits_internal(input, len, 0, secret, secretSize,
-                              XXH3_hashLong_64b_withSecret);
-
-}
-
-XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void *input, size_t len,
-                                                 XXH64_hash_t seed) {
-
-  return XXH3_64bits_internal(input, len, seed, XXH3_kSecret,
-                              sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
-
-}
-
-/* ===   XXH3 streaming   === */
-
-/*
- * Malloc's a pointer that is always aligned to align.
- *
- * This must be freed with `XXH_alignedFree()`.
- *
- * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte
- * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2
- * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON.
- *
- * This underalignment previously caused a rather obvious crash which went
- * completely unnoticed due to XXH3_createState() not actually being tested.
- * Credit to RedSpah for noticing this bug.
- *
- * The alignment is done manually: Functions like posix_memalign or _mm_malloc
- * are avoided: To maintain portability, we would have to write a fallback
- * like this anyways, and besides, testing for the existence of library
- * functions without relying on external build tools is impossible.
- *
- * The method is simple: Overallocate, manually align, and store the offset
- * to the original behind the returned pointer.
- *
- * Align must be a power of 2 and 8 <= align <= 128.
- */
-static void *XXH_alignedMalloc(size_t s, size_t align) {
-
-  XXH_ASSERT(align <= 128 && align >= 8);                    /* range check */
-  XXH_ASSERT((align & (align - 1)) == 0);                     /* power of 2 */
-  XXH_ASSERT(s != 0 && s < (s + align));                  /* empty/overflow */
-  {  /* Overallocate to make room for manual realignment and an offset byte */
-    xxh_u8 *base = (xxh_u8 *)XXH_malloc(s + align);
-    if (base != NULL) {
-
-      /*
-       * Get the offset needed to align this pointer.
-       *
-       * Even if the returned pointer is aligned, there will always be
-       * at least one byte to store the offset to the original pointer.
-       */
-      size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
-      /* Add the offset for the now-aligned pointer */
-      xxh_u8 *ptr = base + offset;
-
-      XXH_ASSERT((size_t)ptr % align == 0);
-
-      /* Store the offset immediately before the returned pointer. */
-      ptr[-1] = (xxh_u8)offset;
-      return ptr;
-
-    }
-
-    return NULL;
-
-  }
-
-}
-
-/*
- * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
- * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
- */
-static void XXH_alignedFree(void *p) {
-
-  if (p != NULL) {
-
-    xxh_u8 *ptr = (xxh_u8 *)p;
-    /* Get the offset byte we added in XXH_malloc. */
-    xxh_u8 offset = ptr[-1];
-    /* Free the original malloc'd pointer */
-    xxh_u8 *base = ptr - offset;
-    XXH_free(base);
-
-  }
-
-}
-
-XXH_PUBLIC_API XXH3_state_t *XXH3_createState(void) {
-
-  return (XXH3_state_t *)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
-
-}
-
-XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t *statePtr) {
-
-  XXH_alignedFree(statePtr);
-  return XXH_OK;
-
-}
-
-XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t *      dst_state,
-                                   const XXH3_state_t *src_state) {
-
-  memcpy(dst_state, src_state, sizeof(*dst_state));
-
-}
-
-static void XXH3_64bits_reset_internal(XXH3_state_t *statePtr,
-                                       XXH64_hash_t seed, const xxh_u8 *secret,
-                                       size_t secretSize) {
-
-  XXH_ASSERT(statePtr != NULL);
-  memset(statePtr, 0, sizeof(*statePtr));
-  statePtr->acc[0] = XXH_PRIME32_3;
-  statePtr->acc[1] = XXH_PRIME64_1;
-  statePtr->acc[2] = XXH_PRIME64_2;
-  statePtr->acc[3] = XXH_PRIME64_3;
-  statePtr->acc[4] = XXH_PRIME64_4;
-  statePtr->acc[5] = XXH_PRIME32_2;
-  statePtr->acc[6] = XXH_PRIME64_5;
-  statePtr->acc[7] = XXH_PRIME32_1;
-  statePtr->seed = seed;
-  XXH_ASSERT(secret != NULL);
-  statePtr->extSecret = secret;
-  XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
-  statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
-  statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
-
-}
-
-XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t *statePtr) {
-
-  if (statePtr == NULL) return XXH_ERROR;
-  XXH3_64bits_reset_internal(statePtr, 0, XXH3_kSecret,
-                             XXH_SECRET_DEFAULT_SIZE);
-  return XXH_OK;
-
-}
-
-XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(
-    XXH3_state_t *statePtr, const void *secret, size_t secretSize) {
-
-  if (statePtr == NULL) return XXH_ERROR;
-  XXH3_64bits_reset_internal(statePtr, 0, (const xxh_u8 *)secret, secretSize);
-  if (secret == NULL) return XXH_ERROR;
-  if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
-  return XXH_OK;
-
-}
-
-XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t *statePtr,
-                                                        XXH64_hash_t  seed) {
-
-  if (statePtr == NULL) return XXH_ERROR;
-  XXH3_64bits_reset_internal(statePtr, seed, XXH3_kSecret,
-                             XXH_SECRET_DEFAULT_SIZE);
-  XXH3_initCustomSecret(statePtr->customSecret, seed);
-  statePtr->extSecret = NULL;
-  return XXH_OK;
-
-}
-
-XXH_FORCE_INLINE void XXH3_consumeStripes(
-    xxh_u64 *XXH_RESTRICT acc, size_t *XXH_RESTRICT nbStripesSoFarPtr,
-    size_t nbStripesPerBlock, const xxh_u8 *XXH_RESTRICT input,
-    size_t totalStripes, const xxh_u8 *XXH_RESTRICT secret, size_t secretLimit,
-    XXH3_accWidth_e accWidth, XXH3_f_accumulate_512 f_acc512,
-    XXH3_f_scrambleAcc f_scramble) {
-
-  XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock);
-  if (nbStripesPerBlock - *nbStripesSoFarPtr <= totalStripes) {
-
-    /* need a scrambling operation */
-    size_t const nbStripes = nbStripesPerBlock - *nbStripesSoFarPtr;
-    XXH3_accumulate(acc, input,
-                    secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE,
-                    nbStripes, accWidth, f_acc512);
-    f_scramble(acc, secret + secretLimit);
-    XXH3_accumulate(acc, input + nbStripes * XXH_STRIPE_LEN, secret,
-                    totalStripes - nbStripes, accWidth, f_acc512);
-    *nbStripesSoFarPtr = totalStripes - nbStripes;
-
-  } else {
-
-    XXH3_accumulate(acc, input,
-                    secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE,
-                    totalStripes, accWidth, f_acc512);
-    *nbStripesSoFarPtr += totalStripes;
-
-  }
-
-}
-
-/*
- * Both XXH3_64bits_update and XXH3_128bits_update use this routine.
- */
-XXH_FORCE_INLINE XXH_errorcode XXH3_update(XXH3_state_t *state,
-                                           const xxh_u8 *input, size_t len,
-                                           XXH3_accWidth_e       accWidth,
-                                           XXH3_f_accumulate_512 f_acc512,
-                                           XXH3_f_scrambleAcc    f_scramble) {
-
-  if (input == NULL)
-#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \
-    (XXH_ACCEPT_NULL_INPUT_POINTER >= 1)
-    return XXH_OK;
-#else
-    return XXH_ERROR;
-#endif
-
-  {
-
-    const xxh_u8 *const        bEnd = input + len;
-    const unsigned char *const secret =
-        (state->extSecret == NULL) ? state->customSecret : state->extSecret;
-
-    state->totalLen += len;
-
-    if (state->bufferedSize + len <=
-        XXH3_INTERNALBUFFER_SIZE) {                   /* fill in tmp buffer */
-      XXH_memcpy(state->buffer + state->bufferedSize, input, len);
-      state->bufferedSize += (XXH32_hash_t)len;
-      return XXH_OK;
-
-    }
-
-    /* input is now > XXH3_INTERNALBUFFER_SIZE */
-
-#define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
-    XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN ==
-                      0);                                 /* clean multiple */
-
-    /*
-     * There is some input left inside the internal buffer.
-     * Fill it, then consume it.
-     */
-    if (state->bufferedSize) {
-
-      size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
-      XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
-      input += loadSize;
-      XXH3_consumeStripes(state->acc, &state->nbStripesSoFar,
-                          state->nbStripesPerBlock, state->buffer,
-                          XXH3_INTERNALBUFFER_STRIPES, secret,
-                          state->secretLimit, accWidth, f_acc512, f_scramble);
-      state->bufferedSize = 0;
-
-    }
-
-    /* Consume input by full buffer quantities */
-    if (input + XXH3_INTERNALBUFFER_SIZE <= bEnd) {
-
-      const xxh_u8 *const limit = bEnd - XXH3_INTERNALBUFFER_SIZE;
-      do {
-
-        XXH3_consumeStripes(state->acc, &state->nbStripesSoFar,
-                            state->nbStripesPerBlock, input,
-                            XXH3_INTERNALBUFFER_STRIPES, secret,
-                            state->secretLimit, accWidth, f_acc512, f_scramble);
-        input += XXH3_INTERNALBUFFER_SIZE;
-
-      } while (input <= limit);
-
-      /* for last partial stripe */
-      memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN,
-             input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
-
-    }
-
-    if (input < bEnd) {                  /* Some remaining input: buffer it */
-      XXH_memcpy(state->buffer, input, (size_t)(bEnd - input));
-      state->bufferedSize = (XXH32_hash_t)(bEnd - input);
-
-    }
-
-  }
-
-  return XXH_OK;
-
-}
-
-XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update(XXH3_state_t *state,
-                                                const void *input, size_t len) {
-
-  return XXH3_update(state, (const xxh_u8 *)input, len, XXH3_acc_64bits,
-                     XXH3_accumulate_512, XXH3_scrambleAcc);
-
-}
-
-XXH_FORCE_INLINE void XXH3_digest_long(XXH64_hash_t *       acc,
-                                       const XXH3_state_t * state,
-                                       const unsigned char *secret,
-                                       XXH3_accWidth_e      accWidth) {
-
-  /*
-   * Digest on a local copy. This way, the state remains unaltered, and it can
-   * continue ingesting more input afterwards.
-   */
-  memcpy(acc, state->acc, sizeof(state->acc));
-  if (state->bufferedSize >= XXH_STRIPE_LEN) {
-
-    size_t const nbStripes = state->bufferedSize / XXH_STRIPE_LEN;
-    size_t       nbStripesSoFar = state->nbStripesSoFar;
-    XXH3_consumeStripes(acc, &nbStripesSoFar, state->nbStripesPerBlock,
-                        state->buffer, nbStripes, secret, state->secretLimit,
-                        accWidth, XXH3_accumulate_512, XXH3_scrambleAcc);
-    if (state->bufferedSize % XXH_STRIPE_LEN) {  /* one last partial stripe */
-      XXH3_accumulate_512(
-          acc, state->buffer + state->bufferedSize - XXH_STRIPE_LEN,
-          secret + state->secretLimit - XXH_SECRET_LASTACC_START, accWidth);
-
-    }
-
-  } else {                                 /* bufferedSize < XXH_STRIPE_LEN */
-
-    if (state->bufferedSize) {                           /* one last stripe */
-      xxh_u8       lastStripe[XXH_STRIPE_LEN];
-      size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
-      memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize,
-             catchupSize);
-      memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
-      XXH3_accumulate_512(
-          acc, lastStripe,
-          secret + state->secretLimit - XXH_SECRET_LASTACC_START, accWidth);
-
-    }
-
-  }
-
-}
-
-XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest(const XXH3_state_t *state) {
-
-  const unsigned char *const secret =
-      (state->extSecret == NULL) ? state->customSecret : state->extSecret;
-  if (state->totalLen > XXH3_MIDSIZE_MAX) {
-
-    XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
-    XXH3_digest_long(acc, state, secret, XXH3_acc_64bits);
-    return XXH3_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START,
-                          (xxh_u64)state->totalLen * XXH_PRIME64_1);
-
-  }
-
-  /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
-  if (state->seed)
-    return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen,
-                                state->seed);
-  return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
-                                secret, state->secretLimit + XXH_STRIPE_LEN);
-
-}
-
-#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
-
-XXH_PUBLIC_API void XXH3_generateSecret(void *      secretBuffer,
-                                        const void *customSeed,
-                                        size_t      customSeedSize) {
-
-  XXH_ASSERT(secretBuffer != NULL);
-  if (customSeedSize == 0) {
-
-    memcpy(secretBuffer, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
-    return;
-
-  }
-
-  XXH_ASSERT(customSeed != NULL);
-
-  {
-
-    size_t const       segmentSize = sizeof(XXH128_hash_t);
-    size_t const       nbSegments = XXH_SECRET_DEFAULT_SIZE / segmentSize;
-    XXH128_canonical_t scrambler;
-    XXH64_hash_t       seeds[12];
-    size_t             segnb;
-    XXH_ASSERT(nbSegments == 12);
-    XXH_ASSERT(segmentSize * nbSegments ==
-               XXH_SECRET_DEFAULT_SIZE);                  /* exact multiple */
-    XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
-
-    /*
-     * Copy customSeed to seeds[], truncating or repeating as necessary.
-     */
-    {
-
-      size_t toFill = XXH_MIN(customSeedSize, sizeof(seeds));
-      size_t filled = toFill;
-      memcpy(seeds, customSeed, toFill);
-      while (filled < sizeof(seeds)) {
-
-        toFill = XXH_MIN(filled, sizeof(seeds) - filled);
-        memcpy((char *)seeds + filled, seeds, toFill);
-        filled += toFill;
-
-      }
-
-    }
-
-    /* generate secret */
-    memcpy(secretBuffer, &scrambler, sizeof(scrambler));
-    for (segnb = 1; segnb < nbSegments; segnb++) {
-
-      size_t const       segmentStart = segnb * segmentSize;
-      XXH128_canonical_t segment;
-      XXH128_canonicalFromHash(&segment,
-                               XXH128(&scrambler, sizeof(scrambler),
-                                      XXH_readLE64(seeds + segnb) + segnb));
-      memcpy((char *)secretBuffer + segmentStart, &segment, sizeof(segment));
-
-    }
-
-  }
-
-}
-
-/* ==========================================
- * XXH3 128 bits (a.k.a XXH128)
- * ==========================================
- * XXH3's 128-bit variant has better mixing and strength than the 64-bit
- * variant, even without counting the significantly larger output size.
- *
- * For example, extra steps are taken to avoid the seed-dependent collisions
- * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
- *
- * This strength naturally comes at the cost of some speed, especially on short
- * lengths. Note that longer hashes are about as fast as the 64-bit version
- * due to it using only a slight modification of the 64-bit loop.
- *
- * XXH128 is also more oriented towards 64-bit machines. It is still extremely
- * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
- */
-
-XXH_FORCE_INLINE XXH128_hash_t XXH3_len_1to3_128b(const xxh_u8 *input,
-                                                  size_t        len,
-                                                  const xxh_u8 *secret,
-                                                  XXH64_hash_t  seed) {
-
-  /* A doubled version of 1to3_64b with different constants. */
-  XXH_ASSERT(input != NULL);
-  XXH_ASSERT(1 <= len && len <= 3);
-  XXH_ASSERT(secret != NULL);
-  /*
-   * len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
-   * len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
-   * len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
-   */
-  {
-
-    xxh_u8 const  c1 = input[0];
-    xxh_u8 const  c2 = input[len >> 1];
-    xxh_u8 const  c3 = input[len - 1];
-    xxh_u32 const combinedl = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24) |
-                              ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
-    xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
-    xxh_u64 const bitflipl =
-        (XXH_readLE32(secret) ^ XXH_readLE32(secret + 4)) + seed;
-    xxh_u64 const bitfliph =
-        (XXH_readLE32(secret + 8) ^ XXH_readLE32(secret + 12)) - seed;
-    xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
-    xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
-    xxh_u64 const mixedl = keyed_lo * XXH_PRIME64_1;
-    xxh_u64 const mixedh = keyed_hi * XXH_PRIME64_5;
-    XXH128_hash_t h128;
-    h128.low64 = XXH3_avalanche(mixedl);
-    h128.high64 = XXH3_avalanche(mixedh);
-    return h128;
-
-  }
-
-}
-
-XXH_FORCE_INLINE XXH128_hash_t XXH3_len_4to8_128b(const xxh_u8 *input,
-                                                  size_t        len,
-                                                  const xxh_u8 *secret,
-                                                  XXH64_hash_t  seed) {
-
-  XXH_ASSERT(input != NULL);
-  XXH_ASSERT(secret != NULL);
-  XXH_ASSERT(4 <= len && len <= 8);
-  seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
-  {
-
-    xxh_u32 const input_lo = XXH_readLE32(input);
-    xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
-    xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
-    xxh_u64 const bitflip =
-        (XXH_readLE64(secret + 16) ^ XXH_readLE64(secret + 24)) + seed;
-    xxh_u64 const keyed = input_64 ^ bitflip;
-
-    /* Shift len to the left to ensure it is even, this avoids even multiplies.
-     */
-    XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
-
-    m128.high64 += (m128.low64 << 1);
-    m128.low64 ^= (m128.high64 >> 3);
-
-    m128.low64 = XXH_xorshift64(m128.low64, 35);
-    m128.low64 *= 0x9FB21C651E98DF25ULL;
-    m128.low64 = XXH_xorshift64(m128.low64, 28);
-    m128.high64 = XXH3_avalanche(m128.high64);
-    return m128;
-
-  }
-
-}
-
-XXH_FORCE_INLINE XXH128_hash_t XXH3_len_9to16_128b(const xxh_u8 *input,
-                                                   size_t        len,
-                                                   const xxh_u8 *secret,
-                                                   XXH64_hash_t  seed) {
-
-  XXH_ASSERT(input != NULL);
-  XXH_ASSERT(secret != NULL);
-  XXH_ASSERT(9 <= len && len <= 16);
-  {
-
-    xxh_u64 const bitflipl =
-        (XXH_readLE64(secret + 32) ^ XXH_readLE64(secret + 40)) - seed;
-    xxh_u64 const bitfliph =
-        (XXH_readLE64(secret + 48) ^ XXH_readLE64(secret + 56)) + seed;
-    xxh_u64 const input_lo = XXH_readLE64(input);
-    xxh_u64       input_hi = XXH_readLE64(input + len - 8);
-    XXH128_hash_t m128 =
-        XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
-    /*
-     * Put len in the middle of m128 to ensure that the length gets mixed to
-     * both the low and high bits in the 128x64 multiply below.
-     */
-    m128.low64 += (xxh_u64)(len - 1) << 54;
-    input_hi ^= bitfliph;
-    /*
-     * Add the high 32 bits of input_hi to the high 32 bits of m128, then
-     * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
-     * the high 64 bits of m128.
-     *
-     * The best approach to this operation is different on 32-bit and 64-bit.
-     */
-    if (sizeof(void *) < sizeof(xxh_u64)) {                       /* 32-bit */
-      /*
-       * 32-bit optimized version, which is more readable.
-       *
-       * On 32-bit, it removes an ADC and delays a dependency between the two
-       * halves of m128.high64, but it generates an extra mask on 64-bit.
-       */
-      m128.high64 += (input_hi & 0xFFFFFFFF00000000) +
-                     XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
-
-    } else {
-
-      /*
-       * 64-bit optimized (albeit more confusing) version.
-       *
-       * Uses some properties of addition and multiplication to remove the mask:
-       *
-       * Let:
-       *    a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
-       *    b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
-       *    c = XXH_PRIME32_2
-       *
-       *    a + (b * c)
-       * Inverse Property: x + y - x == y
-       *    a + (b * (1 + c - 1))
-       * Distributive Property: x * (y + z) == (x * y) + (x * z)
-       *    a + (b * 1) + (b * (c - 1))
-       * Identity Property: x * 1 == x
-       *    a + b + (b * (c - 1))
-       *
-       * Substitute a, b, and c:
-       *    input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 -
-       * 1))
-       *
-       * Since input_hi.hi + input_hi.lo == input_hi, we get this:
-       *    input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
-       */
-      m128.high64 +=
-          input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
-
-    }
-
-    /* m128 ^= XXH_swap64(m128 >> 64); */
-    m128.low64 ^= XXH_swap64(m128.high64);
-
-    {                      /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
-      XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
-      h128.high64 += m128.high64 * XXH_PRIME64_2;
-
-      h128.low64 = XXH3_avalanche(h128.low64);
-      h128.high64 = XXH3_avalanche(h128.high64);
-      return h128;
-
-    }
-
-  }
-
-}
-
-/*
- * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
- */
-XXH_FORCE_INLINE XXH128_hash_t XXH3_len_0to16_128b(const xxh_u8 *input,
-                                                   size_t        len,
-                                                   const xxh_u8 *secret,
-                                                   XXH64_hash_t  seed) {
-
-  XXH_ASSERT(len <= 16);
-  {
-
-    if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
-    if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
-    if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
-    {
-
-      XXH128_hash_t h128;
-      xxh_u64 const bitflipl =
-          XXH_readLE64(secret + 64) ^ XXH_readLE64(secret + 72);
-      xxh_u64 const bitfliph =
-          XXH_readLE64(secret + 80) ^ XXH_readLE64(secret + 88);
-      h128.low64 = XXH3_avalanche((XXH_PRIME64_1 + seed) ^ bitflipl);
-      h128.high64 = XXH3_avalanche((XXH_PRIME64_2 - seed) ^ bitfliph);
-      return h128;
-
-    }
-
-  }
-
-}
-
-/*
- * A bit slower than XXH3_mix16B, but handles multiply by zero better.
- */
-XXH_FORCE_INLINE XXH128_hash_t XXH128_mix32B(XXH128_hash_t acc,
-                                             const xxh_u8 *input_1,
-                                             const xxh_u8 *input_2,
-                                             const xxh_u8 *secret,
-                                             XXH64_hash_t  seed) {
-
-  acc.low64 += XXH3_mix16B(input_1, secret + 0, seed);
-  acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
-  acc.high64 += XXH3_mix16B(input_2, secret + 16, seed);
-  acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
-  return acc;
-
-}
-
-XXH_FORCE_INLINE XXH128_hash_t XXH3_len_17to128_128b(
-    const xxh_u8 *XXH_RESTRICT input, size_t len,
-    const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) {
-
-  XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
-  (void)secretSize;
-  XXH_ASSERT(16 < len && len <= 128);
-
-  {
-
-    XXH128_hash_t acc;
-    acc.low64 = len * XXH_PRIME64_1;
-    acc.high64 = 0;
-    if (len > 32) {
-
-      if (len > 64) {
-
-        if (len > 96) {
-
-          acc = XXH128_mix32B(acc, input + 48, input + len - 64, secret + 96,
-                              seed);
-
-        }
-
-        acc =
-            XXH128_mix32B(acc, input + 32, input + len - 48, secret + 64, seed);
-
-      }
-
-      acc = XXH128_mix32B(acc, input + 16, input + len - 32, secret + 32, seed);
-
-    }
-
-    acc = XXH128_mix32B(acc, input, input + len - 16, secret, seed);
-    {
-
-      XXH128_hash_t h128;
-      h128.low64 = acc.low64 + acc.high64;
-      h128.high64 = (acc.low64 * XXH_PRIME64_1) + (acc.high64 * XXH_PRIME64_4) +
-                    ((len - seed) * XXH_PRIME64_2);
-      h128.low64 = XXH3_avalanche(h128.low64);
-      h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
-      return h128;
-
-    }
-
-  }
-
-}
-
-XXH_NO_INLINE XXH128_hash_t XXH3_len_129to240_128b(
-    const xxh_u8 *XXH_RESTRICT input, size_t len,
-    const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) {
-
-  XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
-  (void)secretSize;
-  XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
-
-  {
-
-    XXH128_hash_t acc;
-    int const     nbRounds = (int)len / 32;
-    int           i;
-    acc.low64 = len * XXH_PRIME64_1;
-    acc.high64 = 0;
-    for (i = 0; i < 4; i++) {
-
-      acc = XXH128_mix32B(acc, input + (32 * i), input + (32 * i) + 16,
-                          secret + (32 * i), seed);
-
-    }
-
-    acc.low64 = XXH3_avalanche(acc.low64);
-    acc.high64 = XXH3_avalanche(acc.high64);
-    XXH_ASSERT(nbRounds >= 4);
-    for (i = 4; i < nbRounds; i++) {
-
-      acc = XXH128_mix32B(acc, input + (32 * i), input + (32 * i) + 16,
-                          secret + XXH3_MIDSIZE_STARTOFFSET + (32 * (i - 4)),
-                          seed);
-
-    }
-
-    /* last bytes */
-    acc = XXH128_mix32B(
-        acc, input + len - 16, input + len - 32,
-        secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
-        0ULL - seed);
-
-    {
-
-      XXH128_hash_t h128;
-      h128.low64 = acc.low64 + acc.high64;
-      h128.high64 = (acc.low64 * XXH_PRIME64_1) + (acc.high64 * XXH_PRIME64_4) +
-                    ((len - seed) * XXH_PRIME64_2);
-      h128.low64 = XXH3_avalanche(h128.low64);
-      h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
-      return h128;
-
-    }
-
-  }
-
-}
-
-XXH_FORCE_INLINE XXH128_hash_t XXH3_hashLong_128b_internal(
-    const xxh_u8 *XXH_RESTRICT input, size_t len,
-    const xxh_u8 *XXH_RESTRICT secret, size_t secretSize,
-    XXH3_f_accumulate_512 f_acc512, XXH3_f_scrambleAcc f_scramble) {
-
-  XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
-
-  XXH3_hashLong_internal_loop(acc, input, len, secret, secretSize,
-                              XXH3_acc_128bits, f_acc512, f_scramble);
-
-  /* converge into final hash */
-  XXH_STATIC_ASSERT(sizeof(acc) == 64);
-  XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
-  {
-
-    XXH128_hash_t h128;
-    h128.low64 = XXH3_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START,
-                                (xxh_u64)len * XXH_PRIME64_1);
-    h128.high64 = XXH3_mergeAccs(
-        acc, secret + secretSize - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
-        ~((xxh_u64)len * XXH_PRIME64_2));
-    return h128;
-
-  }
-
-}
-
-/*
- * It's important for performance that XXH3_hashLong is not inlined.
- */
-XXH_NO_INLINE XXH128_hash_t XXH3_hashLong_128b_defaultSecret(
-    const xxh_u8 *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64,
-    const xxh_u8 *XXH_RESTRICT secret, size_t secretLen) {
-
-  (void)seed64;
-  (void)secret;
-  (void)secretLen;
-  return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret,
-                                     sizeof(XXH3_kSecret), XXH3_accumulate_512,
-                                     XXH3_scrambleAcc);
-
-}
-
-/*
- * It's important for performance that XXH3_hashLong is not inlined.
- */
-XXH_NO_INLINE XXH128_hash_t XXH3_hashLong_128b_withSecret(
-    const xxh_u8 *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64,
-    const xxh_u8 *XXH_RESTRICT secret, size_t secretLen) {
-
-  (void)seed64;
-  return XXH3_hashLong_128b_internal(input, len, secret, secretLen,
-                                     XXH3_accumulate_512, XXH3_scrambleAcc);
-
-}
-
-XXH_FORCE_INLINE XXH128_hash_t XXH3_hashLong_128b_withSeed_internal(
-    const xxh_u8 *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64,
-    XXH3_f_accumulate_512 f_acc512, XXH3_f_scrambleAcc f_scramble,
-    XXH3_f_initCustomSecret f_initSec) {
-
-  if (seed64 == 0)
-    return XXH3_hashLong_128b_internal(
-        input, len, XXH3_kSecret, sizeof(XXH3_kSecret), f_acc512, f_scramble);
-  {
-
-    XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
-    f_initSec(secret, seed64);
-    return XXH3_hashLong_128b_internal(input, len, secret, sizeof(secret),
-                                       f_acc512, f_scramble);
-
-  }
-
-}
-
-/*
- * It's important for performance that XXH3_hashLong is not inlined.
- */
-XXH_NO_INLINE XXH128_hash_t XXH3_hashLong_128b_withSeed(
-    const xxh_u8 *input, size_t len, XXH64_hash_t seed64,
-    const xxh_u8 *XXH_RESTRICT secret, size_t secretLen) {
-
-  (void)secret;
-  (void)secretLen;
-  return XXH3_hashLong_128b_withSeed_internal(
-      input, len, seed64, XXH3_accumulate_512, XXH3_scrambleAcc,
-      XXH3_initCustomSecret);
-
-}
-
-typedef XXH128_hash_t (*XXH3_hashLong128_f)(const xxh_u8 *XXH_RESTRICT, size_t,
-                                            XXH64_hash_t,
-                                            const xxh_u8 *XXH_RESTRICT, size_t);
-
-XXH_FORCE_INLINE XXH128_hash_t
-XXH3_128bits_internal(const void *input, size_t len, XXH64_hash_t seed64,
-                      const xxh_u8 *XXH_RESTRICT secret, size_t secretLen,
-                      XXH3_hashLong128_f f_hl128) {
-
-  XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
-  /*
-   * If an action is to be taken if `secret` conditions are not respected,
-   * it should be done here.
-   * For now, it's a contract pre-condition.
-   * Adding a check and a branch here would cost performance at every hash.
-   */
-  if (len <= 16)
-    return XXH3_len_0to16_128b((const xxh_u8 *)input, len, secret, seed64);
-  if (len <= 128)
-    return XXH3_len_17to128_128b((const xxh_u8 *)input, len, secret, secretLen,
-                                 seed64);
-  if (len <= XXH3_MIDSIZE_MAX)
-    return XXH3_len_129to240_128b((const xxh_u8 *)input, len, secret, secretLen,
-                                  seed64);
-  return f_hl128((const xxh_u8 *)input, len, seed64, secret, secretLen);
-
-}
-
-/* ===   Public XXH128 API   === */
-
-XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void *input, size_t len) {
-
-  return XXH3_128bits_internal(input, len, 0, XXH3_kSecret,
-                               sizeof(XXH3_kSecret),
-                               XXH3_hashLong_128b_withSecret);
-
-}
-
-XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void *input,
-                                                     size_t      len,
-                                                     const void *secret,
-                                                     size_t      secretSize) {
-
-  return XXH3_128bits_internal(input, len, 0, (const xxh_u8 *)secret,
-                               secretSize, XXH3_hashLong_128b_defaultSecret);
-
-}
-
-XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void * input,
-                                                   size_t       len,
-                                                   XXH64_hash_t seed) {
-
-  return XXH3_128bits_internal(input, len, seed, XXH3_kSecret,
-                               sizeof(XXH3_kSecret),
-                               XXH3_hashLong_128b_withSeed);
-
-}
-
-XXH_PUBLIC_API XXH128_hash_t XXH128(const void *input, size_t len,
-                                    XXH64_hash_t seed) {
-
-  return XXH3_128bits_withSeed(input, len, seed);
-
-}
-
-/* ===   XXH3 128-bit streaming   === */
-
-/*
- * All the functions are actually the same as for 64-bit streaming variant.
- * The only difference is the finalizatiom routine.
- */
-
-static void XXH3_128bits_reset_internal(XXH3_state_t *statePtr,
-                                        XXH64_hash_t seed, const xxh_u8 *secret,
-                                        size_t secretSize) {
-
-  XXH3_64bits_reset_internal(statePtr, seed, secret, secretSize);
-
-}
-
-XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t *statePtr) {
-
-  if (statePtr == NULL) return XXH_ERROR;
-  XXH3_128bits_reset_internal(statePtr, 0, XXH3_kSecret,
-                              XXH_SECRET_DEFAULT_SIZE);
-  return XXH_OK;
-
-}
-
-XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(
-    XXH3_state_t *statePtr, const void *secret, size_t secretSize) {
-
-  if (statePtr == NULL) return XXH_ERROR;
-  XXH3_128bits_reset_internal(statePtr, 0, (const xxh_u8 *)secret, secretSize);
-  if (secret == NULL) return XXH_ERROR;
-  if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
-  return XXH_OK;
-
-}
-
-XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t *statePtr,
-                                                         XXH64_hash_t  seed) {
-
-  if (statePtr == NULL) return XXH_ERROR;
-  XXH3_128bits_reset_internal(statePtr, seed, XXH3_kSecret,
-                              XXH_SECRET_DEFAULT_SIZE);
-  XXH3_initCustomSecret(statePtr->customSecret, seed);
-  statePtr->extSecret = NULL;
-  return XXH_OK;
-
-}
-
-XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update(XXH3_state_t *state,
-                                                 const void *  input,
-                                                 size_t        len) {
-
-  return XXH3_update(state, (const xxh_u8 *)input, len, XXH3_acc_128bits,
-                     XXH3_accumulate_512, XXH3_scrambleAcc);
-
-}
-
-XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest(const XXH3_state_t *state) {
-
-  const unsigned char *const secret =
-      (state->extSecret == NULL) ? state->customSecret : state->extSecret;
-  if (state->totalLen > XXH3_MIDSIZE_MAX) {
-
-    XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
-    XXH3_digest_long(acc, state, secret, XXH3_acc_128bits);
-    XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >=
-               sizeof(acc) + XXH_SECRET_MERGEACCS_START);
-    {
-
-      XXH128_hash_t h128;
-      h128.low64 = XXH3_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START,
-                                  (xxh_u64)state->totalLen * XXH_PRIME64_1);
-      h128.high64 =
-          XXH3_mergeAccs(acc,
-                         secret + state->secretLimit + XXH_STRIPE_LEN -
-                             sizeof(acc) - XXH_SECRET_MERGEACCS_START,
-                         ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
-      return h128;
-
-    }
-
-  }
-
-  /* len <= XXH3_MIDSIZE_MAX : short code */
-  if (state->seed)
-    return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen,
-                                 state->seed);
-  return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
-                                 secret, state->secretLimit + XXH_STRIPE_LEN);
-
-}
-
-/* 128-bit utility functions */
-
-#include <string.h>                                       /* memcmp, memcpy */
-
-/* return : 1 is equal, 0 if different */
-XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) {
-
-  /* note : XXH128_hash_t is compact, it has no padding byte */
-  return !(memcmp(&h1, &h2, sizeof(h1)));
-
-}
-
-/* This prototype is compatible with stdlib's qsort().
- * return : >0 if *h128_1  > *h128_2
- *          <0 if *h128_1  < *h128_2
- *          =0 if *h128_1 == *h128_2  */
-XXH_PUBLIC_API int XXH128_cmp(const void *h128_1, const void *h128_2) {
-
-  XXH128_hash_t const h1 = *(const XXH128_hash_t *)h128_1;
-  XXH128_hash_t const h2 = *(const XXH128_hash_t *)h128_2;
-  int const           hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
-  /* note : bets that, in most cases, hash values are different */
-  if (hcmp) return hcmp;
-  return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
-
-}
-
-/*======   Canonical representation   ======*/
-XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t *dst,
-                                             XXH128_hash_t       hash) {
-
-  XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
-  if (XXH_CPU_LITTLE_ENDIAN) {
-
-    hash.high64 = XXH_swap64(hash.high64);
-    hash.low64 = XXH_swap64(hash.low64);
-
-  }
-
-  memcpy(dst, &hash.high64, sizeof(hash.high64));
-  memcpy((char *)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
-
-}
-
-XXH_PUBLIC_API XXH128_hash_t
-XXH128_hashFromCanonical(const XXH128_canonical_t *src) {
-
-  XXH128_hash_t h;
-  h.high64 = XXH_readBE64(src);
-  h.low64 = XXH_readBE64(src->digest + 8);
-  return h;
-
-}
-
-/* Pop our optimization override from above */
-#if XXH_VECTOR == XXH_AVX2                      /* AVX2 */           \
-    && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
-    && defined(__OPTIMIZE__) &&                                      \
-    !defined(__OPTIMIZE_SIZE__)                      /* respect -O0 and -Os */
-  #pragma GCC pop_options
-#endif
-
-#endif                                                 /* XXH3_H_1397135465 */
-
diff --git a/include/xxhash.h b/include/xxhash.h
index 826f39bd..006d3f3d 100644
--- a/include/xxhash.h
+++ b/include/xxhash.h
@@ -197,6 +197,7 @@ extern "C" {
     #define XXH_CAT(A, B) A##B
     #define XXH_NAME2(A, B) XXH_CAT(A, B)
     #define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
+    /* XXH32 */
     #define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
     #define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
     #define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
@@ -208,6 +209,7 @@ extern "C" {
       XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
     #define XXH32_hashFromCanonical \
       XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
+    /* XXH64 */
     #define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
     #define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
     #define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
@@ -219,14 +221,50 @@ extern "C" {
       XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
     #define XXH64_hashFromCanonical \
       XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
+    /* XXH3_64bits */
+    #define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
+    #define XXH3_64bits_withSecret \
+      XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
+    #define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
+    #define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
+    #define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
+    #define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
+    #define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
+    #define XXH3_64bits_reset_withSeed \
+      XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
+    #define XXH3_64bits_reset_withSecret \
+      XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
+    #define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
+    #define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
+    #define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
+    /* XXH3_128bits */
+    #define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
+    #define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
+    #define XXH3_128bits_withSeed \
+      XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
+    #define XXH3_128bits_withSecret \
+      XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
+    #define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
+    #define XXH3_128bits_reset_withSeed \
+      XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
+    #define XXH3_128bits_reset_withSecret \
+      XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
+    #define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
+    #define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
+    #define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
+    #define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
+    #define XXH128_canonicalFromHash \
+      XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
+    #define XXH128_hashFromCanonical \
+      XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
   #endif
 
   /* *************************************
    *  Version
    ***************************************/
   #define XXH_VERSION_MAJOR 0
-  #define XXH_VERSION_MINOR 7
-  #define XXH_VERSION_RELEASE 4
+  #define XXH_VERSION_MINOR 8
+  #define XXH_VERSION_RELEASE 0
   #define XXH_VERSION_NUMBER                                   \
     (XXH_VERSION_MAJOR * 100 * 100 + XXH_VERSION_MINOR * 100 + \
      XXH_VERSION_RELEASE)
@@ -401,145 +439,56 @@ XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t *dst,
 XXH_PUBLIC_API XXH64_hash_t
 XXH64_hashFromCanonical(const XXH64_canonical_t *src);
 
-  #endif                                                /* XXH_NO_LONG_LONG */
-
-#endif                                         /* XXHASH_H_5627135585666179 */
-
-#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
-  #define XXHASH_H_STATIC_13879238742
-/* ****************************************************************************
- * This section contains declarations which are not guaranteed to remain stable.
- * They may change in future versions, becoming incompatible with a different
- * version of the library.
- * These declarations should only be used with static linking.
- * Never use them in association with dynamic linking!
- *****************************************************************************
- */
+/*-**********************************************************************
+ *  XXH3 64-bit variant
+ ************************************************************************/
 
-/*
- * These definitions are only present to allow static allocation of an XXH
- * state, for example, on the stack or in a struct.
- * Never **ever** access members directly.
+/* ************************************************************************
+ * XXH3 is a new hash algorithm featuring:
+ *  - Improved speed for both small and large inputs
+ *  - True 64-bit and 128-bit outputs
+ *  - SIMD acceleration
+ *  - Improved 32-bit viability
+ *
+ * Speed analysis methodology is explained here:
+ *
+ *    https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
+ *
+ * In general, expect XXH3 to run about ~2x faster on large inputs and >3x
+ * faster on small ones compared to XXH64, though exact differences depend on
+ * the platform.
+ *
+ * The algorithm is portable: Like XXH32 and XXH64, it generates the same hash
+ * on all platforms.
+ *
+ * It benefits greatly from SIMD and 64-bit arithmetic, but does not require it.
+ *
+ * Almost all 32-bit and 64-bit targets that can run XXH32 smoothly can run
+ * XXH3 at competitive speeds, even if XXH64 runs slowly. Further details are
+ * explained in the implementation.
+ *
+ * Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8,
+ * ZVector and scalar targets. This can be controlled with the XXH_VECTOR macro.
+ *
+ * XXH3 offers 2 variants, _64bits and _128bits.
+ * When only 64 bits are needed, prefer calling the _64bits variant, as it
+ * reduces the amount of mixing, resulting in faster speed on small inputs.
+ *
+ * It's also generally simpler to manipulate a scalar return type than a struct.
+ *
+ * The 128-bit version adds additional strength, but it is slightly slower.
+ *
+ * Return values of XXH3 and XXH128 are officially finalized starting
+ * with v0.8.0 and will no longer change in future versions.
+ * Avoid storing values from before that release in long-term storage.
+ *
+ * Results produced by v0.7.x are not comparable with results from v0.7.y.
+ * However, the API is completely stable, and it can safely be used for
+ * ephemeral data (local sessions).
+ *
+ * The API supports one-shot hashing, streaming mode, and custom secrets.
  */
 
-struct XXH32_state_s {
-
-  XXH32_hash_t total_len_32;
-  XXH32_hash_t large_len;
-  XXH32_hash_t v1;
-  XXH32_hash_t v2;
-  XXH32_hash_t v3;
-  XXH32_hash_t v4;
-  XXH32_hash_t mem32[4];
-  XXH32_hash_t memsize;
-  XXH32_hash_t
-      reserved; /* never read nor write, might be removed in a future version */
-
-};                                            /* typedef'd to XXH32_state_t */
-
-  #ifndef XXH_NO_LONG_LONG       /* defined when there is no 64-bit support */
-
-struct XXH64_state_s {
-
-  XXH64_hash_t total_len;
-  XXH64_hash_t v1;
-  XXH64_hash_t v2;
-  XXH64_hash_t v3;
-  XXH64_hash_t v4;
-  XXH64_hash_t mem64[4];
-  XXH32_hash_t memsize;
-  XXH32_hash_t reserved32;                   /* required for padding anyway */
-  XXH64_hash_t reserved64; /* never read nor write, might be removed in a future
-                              version */
-
-};                                            /* typedef'd to XXH64_state_t */
-
-  /*-**********************************************************************
-   *  XXH3
-   *  New experimental hash
-   ************************************************************************/
-
-  /* ************************************************************************
-   * XXH3 is a new hash algorithm featuring:
-   *  - Improved speed for both small and large inputs
-   *  - True 64-bit and 128-bit outputs
-   *  - SIMD acceleration
-   *  - Improved 32-bit viability
-   *
-   * Speed analysis methodology is explained here:
-   *
-   *    https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
-   *
-   * In general, expect XXH3 to run about ~2x faster on large inputs and >3x
-   * faster on small ones compared to XXH64, though exact differences depend on
-   * the platform.
-   *
-   * The algorithm is portable: Like XXH32 and XXH64, it generates the same hash
-   * on all platforms.
-   *
-   * It benefits greatly from SIMD and 64-bit arithmetic, but does not require
-   * it.
-   *
-   * Almost all 32-bit and 64-bit targets that can run XXH32 smoothly can run
-   * XXH3 at competitive speeds, even if XXH64 runs slowly. Further details are
-   * explained in the implementation.
-   *
-   * Optimized implementations are provided for AVX512, AVX2, SSE2, NEON,
-   * POWER8, ZVector and scalar targets. This can be controlled with the
-   * XXH_VECTOR macro.
-   *
-   * XXH3 offers 2 variants, _64bits and _128bits.
-   * When only 64 bits are needed, prefer calling the _64bits variant, as it
-   * reduces the amount of mixing, resulting in faster speed on small inputs.
-   *
-   * It's also generally simpler to manipulate a scalar return type than a
-   * struct.
-   *
-   * The 128-bit version adds additional strength, but it is slightly slower.
-   *
-   * The XXH3 algorithm is still in development.
-   * The results it produces may still change in future versions.
-   *
-   * Results produced by v0.7.x are not comparable with results from v0.7.y.
-   * However, the API is completely stable, and it can safely be used for
-   * ephemeral data (local sessions).
-   *
-   * Avoid storing values in long-term storage until the algorithm is finalized.
-   *
-   * Since v0.7.3, XXH3 has reached "release candidate" status, meaning that, if
-   * everything remains fine, its current format will be "frozen" and become the
-   * final one.
-   *
-   * After which, return values of XXH3 and XXH128 will no longer change in
-   * future versions.
-   *
-   * XXH3's return values will be officially finalized upon reaching v0.8.0.
-   *
-   * The API supports one-shot hashing, streaming mode, and custom secrets.
-   */
-
-    #ifdef XXH_NAMESPACE
-      #define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
-      #define XXH3_64bits_withSecret \
-        XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
-      #define XXH3_64bits_withSeed \
-        XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
-
-      #define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
-      #define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
-      #define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
-
-      #define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
-      #define XXH3_64bits_reset_withSeed \
-        XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
-      #define XXH3_64bits_reset_withSecret \
-        XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
-      #define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
-      #define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
-
-      #define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
-    #endif
-
 /* XXH3_64bits():
  * default 64-bit variant, using default secret and default seed of 0.
  * It's the fastest variant. */
@@ -547,8 +496,8 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void *data, size_t len);
 
 /*
  * XXH3_64bits_withSeed():
- * This variant generates a custom secret on the fly based on the default
- * secret, altered using the `seed` value.
+ * This variant generates a custom secret on the fly
+ * based on default secret altered using the `seed` value.
  * While this operation is decently fast, note that it's not completely free.
  * Note: seed==0 produces the same results as XXH3_64bits().
  */
@@ -559,74 +508,28 @@ XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void *data, size_t len,
      * XXH3_64bits_withSecret():
      * It's possible to provide any blob of bytes as a "secret" to generate the
      * hash. This makes it more difficult for an external actor to prepare an
-     * intentional collision. secretSize *must* be large enough (>=
-     * XXH3_SECRET_SIZE_MIN). The hash quality depends on the secret's high
-     * entropy, meaning that the secret should look like a bunch of random
-     * bytes. Avoid "trivial" sequences such as text or a bunch of repeated
-     * characters. If you are unsure of the "randonmess" of the blob of bytes,
-     * consider making it a "custom seed" instead,
-     * and use "XXH_generateSecret()" to generate a high quality secret.
+     * intentional collision. The main condition is that secretSize *must* be
+     * large enough (>= XXH3_SECRET_SIZE_MIN). However, the quality of produced
+     * hash values depends on secret's entropy. Technically, the secret must
+     * look like a bunch of random bytes. Avoid "trivial" or structured data
+     * such as repeated sequences or a text document. Whenever unsure about the
+     * "randomness" of the blob of bytes, consider relabelling it as a "custom
+     * seed" instead, and employ "XXH3_generateSecret()" (see below) to generate
+     * a high entropy secret derived from the custom seed.
      */
     #define XXH3_SECRET_SIZE_MIN 136
 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void *data, size_t len,
                                                    const void *secret,
                                                    size_t      secretSize);
 
-  /* streaming 64-bit */
-
-    #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)  /* C11+ */
-      #include <stdalign.h>
-      #define XXH_ALIGN(n) alignas(n)
-    #elif defined(__GNUC__)
-      #define XXH_ALIGN(n) __attribute__((aligned(n)))
-    #elif defined(_MSC_VER)
-      #define XXH_ALIGN(n) __declspec(align(n))
-    #else
-      #define XXH_ALIGN(n)                                      /* disabled */
-    #endif
-
-    /* Old GCC versions only accept the attribute after the type in structures.
-     */
-    #if !(defined(__STDC_VERSION__) &&              \
-          (__STDC_VERSION__ >= 201112L)) /* C11+ */ \
-        && defined(__GNUC__)
-      #define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
-    #else
-      #define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
-    #endif
-
-typedef struct XXH3_state_s XXH3_state_t;
-
-    #define XXH3_INTERNALBUFFER_SIZE 256
-    #define XXH3_SECRET_DEFAULT_SIZE 192
-struct XXH3_state_s {
-
-  XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
-  /* used to store a custom secret generated from a seed */
-  XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
-  XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
-  XXH32_hash_t         bufferedSize;
-  XXH32_hash_t         reserved32;
-  size_t               nbStripesPerBlock;
-  size_t               nbStripesSoFar;
-  size_t               secretLimit;
-  XXH64_hash_t         totalLen;
-  XXH64_hash_t         seed;
-  XXH64_hash_t         reserved64;
-  const unsigned char *extSecret; /* reference to external secret;
-                                   * if == NULL, use .customSecret instead */
-  /* note: there may be some padding at the end due to alignment on 64 bytes */
-
-};                                             /* typedef'd to XXH3_state_t */
-
-    #undef XXH_ALIGN_MEMBER
-
+/*******   Streaming   *******/
 /*
  * Streaming requires state maintenance.
  * This operation costs memory and CPU.
  * As a consequence, streaming is slower than one-shot hashing.
- * For better performance, prefer one-shot functions whenever possible.
+ * For better performance, prefer one-shot functions whenever applicable.
  */
+typedef struct XXH3_state_s XXH3_state_t;
 XXH_PUBLIC_API XXH3_state_t *XXH3_createState(void);
 XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t *statePtr);
 XXH_PUBLIC_API void          XXH3_copyState(XXH3_state_t *      dst_state,
@@ -634,8 +537,8 @@ XXH_PUBLIC_API void          XXH3_copyState(XXH3_state_t *      dst_state,
 
 /*
  * XXH3_64bits_reset():
- * Initialize with the default parameters.
- * The result will be equivalent to `XXH3_64bits()`.
+ * Initialize with default parameters.
+ * digest will be equivalent to `XXH3_64bits()`.
  */
 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t *statePtr);
 /*
@@ -647,9 +550,12 @@ XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t *statePtr,
                                                         XXH64_hash_t  seed);
 /*
  * XXH3_64bits_reset_withSecret():
- * `secret` is referenced, and must outlive the hash streaming session, so
- * be careful when using stack arrays.
- * `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`.
+ * `secret` is referenced, it _must outlive_ the hash streaming session.
+ * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`,
+ * and the quality of produced hash values depends on secret's entropy
+ * (secret's content should look like a bunch of random bytes).
+ * When in doubt about the randomness of a candidate `secret`,
+ * consider employing `XXH3_generateSecret()` instead (see below).
  */
 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(
     XXH3_state_t *statePtr, const void *secret, size_t secretSize);
@@ -659,31 +565,12 @@ XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update(XXH3_state_t *statePtr,
                                                 size_t        length);
 XXH_PUBLIC_API XXH64_hash_t  XXH3_64bits_digest(const XXH3_state_t *statePtr);
 
-  /* 128-bit */
-
-    #ifdef XXH_NAMESPACE
-      #define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
-      #define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
-      #define XXH3_128bits_withSeed \
-        XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
-      #define XXH3_128bits_withSecret \
-        XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
-
-      #define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
-      #define XXH3_128bits_reset_withSeed \
-        XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
-      #define XXH3_128bits_reset_withSecret \
-        XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
-      #define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
-      #define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
-
-      #define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
-      #define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
-      #define XXH128_canonicalFromHash \
-        XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
-      #define XXH128_hashFromCanonical \
-        XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
-    #endif
+/* note : canonical representation of XXH3 is the same as XXH64
+ * since they both produce XXH64_hash_t values */
+
+/*-**********************************************************************
+ *  XXH3 128-bit variant
+ ************************************************************************/
 
 typedef struct {
 
@@ -692,16 +579,28 @@ typedef struct {
 
 } XXH128_hash_t;
 
-XXH_PUBLIC_API XXH128_hash_t XXH128(const void *data, size_t len,
-                                    XXH64_hash_t seed);
 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void *data, size_t len);
-XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(
-    const void *data, size_t len, XXH64_hash_t seed);        /* == XXH128() */
+XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void *data, size_t len,
+                                                   XXH64_hash_t seed);
 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void *data,
                                                      size_t      len,
                                                      const void *secret,
                                                      size_t      secretSize);
 
+/*******   Streaming   *******/
+/*
+ * Streaming requires state maintenance.
+ * This operation costs memory and CPU.
+ * As a consequence, streaming is slower than one-shot hashing.
+ * For better performance, prefer one-shot functions whenever applicable.
+ *
+ * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits().
+ * Use already declared XXH3_createState() and XXH3_freeState().
+ *
+ * All reset and streaming functions have same meaning as their 64-bit
+ * counterpart.
+ */
+
 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t *statePtr);
 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t *statePtr,
                                                          XXH64_hash_t  seed);
@@ -713,7 +612,10 @@ XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update(XXH3_state_t *statePtr,
                                                  size_t        length);
 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest(const XXH3_state_t *statePtr);
 
-/* Note: For better performance, these functions can be inlined using
+/* Following helper functions make it possible to compare XXH128_hast_t values.
+ * Since XXH128_hash_t is a structure, this capability is not offered by the
+ * language.
+ * Note: For better performance, these functions can be inlined using
  * XXH_INLINE_ALL */
 
 /*!
@@ -745,6 +647,116 @@ XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t *dst,
 XXH_PUBLIC_API XXH128_hash_t
 XXH128_hashFromCanonical(const XXH128_canonical_t *src);
 
+  #endif                                                /* XXH_NO_LONG_LONG */
+
+#endif                                         /* XXHASH_H_5627135585666179 */
+
+#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
+  #define XXHASH_H_STATIC_13879238742
+/* ****************************************************************************
+ * This section contains declarations which are not guaranteed to remain stable.
+ * They may change in future versions, becoming incompatible with a different
+ * version of the library.
+ * These declarations should only be used with static linking.
+ * Never use them in association with dynamic linking!
+ *****************************************************************************
+ */
+
+/*
+ * These definitions are only present to allow static allocation
+ * of XXH states, on stack or in a struct, for example.
+ * Never **ever** access their members directly.
+ */
+
+struct XXH32_state_s {
+
+  XXH32_hash_t total_len_32;
+  XXH32_hash_t large_len;
+  XXH32_hash_t v1;
+  XXH32_hash_t v2;
+  XXH32_hash_t v3;
+  XXH32_hash_t v4;
+  XXH32_hash_t mem32[4];
+  XXH32_hash_t memsize;
+  XXH32_hash_t
+      reserved; /* never read nor write, might be removed in a future version */
+
+};                                            /* typedef'd to XXH32_state_t */
+
+  #ifndef XXH_NO_LONG_LONG       /* defined when there is no 64-bit support */
+
+struct XXH64_state_s {
+
+  XXH64_hash_t total_len;
+  XXH64_hash_t v1;
+  XXH64_hash_t v2;
+  XXH64_hash_t v3;
+  XXH64_hash_t v4;
+  XXH64_hash_t mem64[4];
+  XXH32_hash_t memsize;
+  XXH32_hash_t reserved32;                   /* required for padding anyway */
+  XXH64_hash_t reserved64; /* never read nor write, might be removed in a future
+                              version */
+
+};                                            /* typedef'd to XXH64_state_t */
+
+    #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)  /* C11+ */
+      #include <stdalign.h>
+      #define XXH_ALIGN(n) alignas(n)
+    #elif defined(__GNUC__)
+      #define XXH_ALIGN(n) __attribute__((aligned(n)))
+    #elif defined(_MSC_VER)
+      #define XXH_ALIGN(n) __declspec(align(n))
+    #else
+      #define XXH_ALIGN(n)                                      /* disabled */
+    #endif
+
+    /* Old GCC versions only accept the attribute after the type in structures.
+     */
+    #if !(defined(__STDC_VERSION__) &&              \
+          (__STDC_VERSION__ >= 201112L)) /* C11+ */ \
+        && defined(__GNUC__)
+      #define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
+    #else
+      #define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
+    #endif
+
+    #define XXH3_INTERNALBUFFER_SIZE 256
+    #define XXH3_SECRET_DEFAULT_SIZE 192
+struct XXH3_state_s {
+
+  XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
+  /* used to store a custom secret generated from a seed */
+  XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
+  XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
+  XXH32_hash_t         bufferedSize;
+  XXH32_hash_t         reserved32;
+  size_t               nbStripesSoFar;
+  XXH64_hash_t         totalLen;
+  size_t               nbStripesPerBlock;
+  size_t               secretLimit;
+  XXH64_hash_t         seed;
+  XXH64_hash_t         reserved64;
+  const unsigned char *extSecret; /* reference to external secret;
+                                   * if == NULL, use .customSecret instead */
+  /* note: there may be some padding at the end due to alignment on 64 bytes */
+
+};                                             /* typedef'd to XXH3_state_t */
+
+    #undef XXH_ALIGN_MEMBER
+
+    /* When the XXH3_state_t structure is merely emplaced on stack,
+     * it should be initialized with XXH3_INITSTATE() or a memset()
+     * in case its first reset uses XXH3_NNbits_reset_withSeed().
+     * This init can be omitted if the first reset uses default or _withSecret
+     * mode. This operation isn't necessary when the state is created with
+     * XXH3_createState(). Note that this doesn't prepare the state for a
+     * streaming operation, it's still necessary to use XXH3_NNbits_reset*()
+     * afterwards.
+     */
+    #define XXH3_INITSTATE(XXH3_state_ptr) \
+      { (XXH3_state_ptr)->seed = 0; }
+
 /* ===   Experimental API   === */
 /* Symbols defined below must be considered tied to a specific library version.
  */
@@ -752,17 +764,19 @@ XXH128_hashFromCanonical(const XXH128_canonical_t *src);
 /*
  * XXH3_generateSecret():
  *
- * Derive a secret for use with `*_withSecret()` prototypes of XXH3.
- * Use this if you need a higher level of security than the one provided by
- * 64bit seed.
+ * Derive a high-entropy secret from any user-defined content, named customSeed.
+ * The generated secret can be used in combination with `*_withSecret()`
+ * functions. The `_withSecret()` variants are useful to provide a higher level
+ * of protection than 64-bit seed, as it becomes much more difficult for an
+ * external actor to guess how to impact the calculation logic.
  *
- * Take as input a custom seed of any length and any content,
- * generate from it a high-entropy secret of length XXH3_SECRET_DEFAULT_SIZE
- * into already allocated buffer secretBuffer.
- * The generated secret ALWAYS is XXH_SECRET_DEFAULT_SIZE bytes long.
+ * The function accepts as input a custom seed of any length and any content,
+ * and derives from it a high-entropy secret of length XXH3_SECRET_DEFAULT_SIZE
+ * into an already allocated buffer secretBuffer.
+ * The generated secret is _always_ XXH_SECRET_DEFAULT_SIZE bytes long.
  *
  * The generated secret can then be used with any `*_withSecret()` variant.
- * The functions `XXH3_128bits_withSecret()`, `XXH3_64bits_withSecret()`,
+ * Functions `XXH3_128bits_withSecret()`, `XXH3_64bits_withSecret()`,
  * `XXH3_128bits_reset_withSecret()` and `XXH3_64bits_reset_withSecret()`
  * are part of this list. They all accept a `secret` parameter
  * which must be very long for implementation reasons (>= XXH3_SECRET_SIZE_MIN)
@@ -771,8 +785,8 @@ XXH128_hashFromCanonical(const XXH128_canonical_t *src);
  * this function can be used to generate a secret of proper quality.
  *
  * customSeed can be anything. It can have any size, even small ones,
- * and its content can be anything, even some "low entropy" source such as a
- * bunch of zeroes. The resulting `secret` will nonetheless respect all expected
+ * and its content can be anything, even stupidly "low entropy" source such as a
+ * bunch of zeroes. The resulting `secret` will nonetheless provide all expected
  * qualities.
  *
  * Supplying NULL as the customSeed copies the default secret into
@@ -783,6 +797,10 @@ XXH_PUBLIC_API void XXH3_generateSecret(void *      secretBuffer,
                                         const void *customSeed,
                                         size_t      customSeedSize);
 
+/* simple short-cut to pre-selected XXH3_128bits variant */
+XXH_PUBLIC_API XXH128_hash_t XXH128(const void *data, size_t len,
+                                    XXH64_hash_t seed);
+
   #endif                                                /* XXH_NO_LONG_LONG */
 
   #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
@@ -799,17 +817,23 @@ XXH_PUBLIC_API void XXH3_generateSecret(void *      secretBuffer,
 /*-**********************************************************************
  * xxHash implementation
  *-**********************************************************************
- * xxHash's implementation used to be found in xxhash.c.
+ * xxHash's implementation used to be hosted inside xxhash.c.
  *
- * However, code inlining requires the implementation to be visible to the
- * compiler, usually within the header.
+ * However, inlining requires implementation to be visible to the compiler,
+ * hence be included alongside the header.
+ * Previously, implementation was hosted inside xxhash.c,
+ * which was then #included when inlining was activated.
+ * This construction created issues with a few build and install systems,
+ * as it required xxhash.c to be stored in /include directory.
  *
- * As a workaround, xxhash.c used to be included within xxhash.h. This caused
- * some issues with some build systems, especially ones which treat .c files
- * as source files.
+ * xxHash implementation is now directly integrated within xxhash.h.
+ * As a consequence, xxhash.c is no longer needed in /include.
  *
- * Therefore, the implementation is now directly integrated within xxhash.h.
- * Another small advantage is that xxhash.c is no longer needed in /include.
+ * xxhash.c is still available and is still useful.
+ * In a "normal" setup, when xxhash is not inlined,
+ * xxhash.h only exposes the prototypes and public symbols,
+ * while xxhash.c can be built into an object file xxhash.o
+ * which can then be linked into the final binary.
  ************************************************************************/
 
 #if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) || \
@@ -828,10 +852,10 @@ XXH_PUBLIC_API void XXH3_generateSecret(void *      secretBuffer,
    * Unfortunately, on some target/compiler combinations, the generated assembly
    * is sub-optimal.
    *
-   * The below switch allow to select a different access method for improved
-   * performance.
+   * The below switch allow selection of a different access method
+   * in the search for improved performance.
    * Method 0 (default):
-   *     Use `memcpy()`. Safe and portable.
+   *     Use `memcpy()`. Safe and portable. Default.
    * Method 1:
    *     `__attribute__((packed))` statement. It depends on compiler extensions
    *     and is therefore not portable.
@@ -843,7 +867,7 @@ XXH_PUBLIC_API void XXH3_generateSecret(void *      secretBuffer,
    *     It can generate buggy code on targets which do not support unaligned
    *     memory accesses.
    *     But in some circumstances, it's the only known way to get the most
-   *     performance (ie GCC + ARMv6)
+   *     performance (example: GCC + ARMv6)
    * Method 3:
    *     Byteshift. This can generate the best code on old compilers which don't
    *     inline small `memcpy()` calls, and it might also be faster on
@@ -924,7 +948,8 @@ XXH_PUBLIC_API void XXH3_generateSecret(void *      secretBuffer,
    * -fno-inline with GCC or Clang, this will automatically be defined.
    */
   #ifndef XXH_NO_INLINE_HINTS
-    #if defined(__OPTIMIZE_SIZE__) || defined(__NO_INLINE__)
+    #if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ \
+        || defined(__NO_INLINE__)                       /* -O0, -fno-inline */
       #define XXH_NO_INLINE_HINTS 1
     #else
       #define XXH_NO_INLINE_HINTS 0
@@ -950,8 +975,8 @@ XXH_PUBLIC_API void XXH3_generateSecret(void *      secretBuffer,
    *  Includes & Memory related functions
    ***************************************/
   /*!
-   * Modify the local functions below should you wish to use some other memory
-   * routines for malloc() and free()
+   * Modify the local functions below should you wish to use
+   * different memory routines for malloc() and free()
    */
   #include <stdlib.h>
 
@@ -1137,7 +1162,8 @@ typedef enum { XXH_bigEndian = 0, XXH_littleEndian = 1 } XXH_endianess;
      * Try to detect endianness automatically, to avoid the nonstandard behavior
      * in `XXH_isLittleEndian()`
      */
-    #if defined(_WIN32) || defined(__LITTLE_ENDIAN__) || \
+    #if defined(_WIN32) /* Windows is always little endian */ \
+        || defined(__LITTLE_ENDIAN__) ||                      \
         (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
       #define XXH_CPU_LITTLE_ENDIAN 1
     #elif defined(__BIG_ENDIAN__) || \
@@ -1778,13 +1804,16 @@ typedef XXH64_hash_t xxh_u64;
      * rerolled.
      */
     #ifndef XXH_REROLL_XXH64
-      #if (defined(__ILP32__) || defined(_ILP32)) ||                           \
-          !(defined(__x86_64__) || defined(_M_X64) || defined(_M_AMD64) ||     \
-            defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__) || \
-            defined(__PPC64__) || defined(__PPC64LE__) ||                      \
-            defined(__ppc64__) || defined(__powerpc64__) ||                    \
-            defined(__mips64__) || defined(__mips64)) ||                       \
-          (!defined(SIZE_MAX) || SIZE_MAX < ULLONG_MAX)
+      #if (defined(__ILP32__) ||                                              \
+           defined(_ILP32)) /* ILP32 is often defined on 32-bit GCC family */ \
+          || !(defined(__x86_64__) || defined(_M_X64) ||                      \
+               defined(_M_AMD64) /* x86-64 */                                 \
+               || defined(_M_ARM64) || defined(__aarch64__) ||                \
+               defined(__arm64__) /* aarch64 */                               \
+               || defined(__PPC64__) || defined(__PPC64LE__) ||               \
+               defined(__ppc64__) || defined(__powerpc64__) /* ppc64 */       \
+               || defined(__mips64__) || defined(__mips64)) /* mips64 */      \
+          || (!defined(SIZE_MAX) || SIZE_MAX < ULLONG_MAX)  /* check limits */
         #define XXH_REROLL_XXH64 1
       #else
         #define XXH_REROLL_XXH64 0
@@ -2428,7 +2457,3134 @@ XXH64_hashFromCanonical(const XXH64_canonical_t *src) {
    *  New generation hash designed for speed on small keys and vectorization
    ************************************************************************ */
 
-    #include "xxh3.h"
+  /* ===   Compiler specifics   === */
+
+    #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L  /* >= C99 */
+      #define XXH_RESTRICT restrict
+    #else
+      /* Note: it might be useful to define __restrict or __restrict__ for some
+       * C++ compilers */
+      #define XXH_RESTRICT                                       /* disable */
+    #endif
+
+    #if (defined(__GNUC__) && (__GNUC__ >= 3)) ||                   \
+        (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || \
+        defined(__clang__)
+      #define XXH_likely(x) __builtin_expect(x, 1)
+      #define XXH_unlikely(x) __builtin_expect(x, 0)
+    #else
+      #define XXH_likely(x) (x)
+      #define XXH_unlikely(x) (x)
+    #endif
+
+    #if defined(__GNUC__)
+      #if defined(__AVX2__)
+        #include <immintrin.h>
+      #elif defined(__SSE2__)
+        #include <emmintrin.h>
+      #elif defined(__ARM_NEON__) || defined(__ARM_NEON)
+        #define inline __inline__                 /* circumvent a clang bug */
+        #include <arm_neon.h>
+        #undef inline
+      #endif
+    #elif defined(_MSC_VER)
+      #include <intrin.h>
+    #endif
+
+    /*
+     * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
+     * remaining a true 64-bit/128-bit hash function.
+     *
+     * This is done by prioritizing a subset of 64-bit operations that can be
+     * emulated without too many steps on the average 32-bit machine.
+     *
+     * For example, these two lines seem similar, and run equally fast on
+     * 64-bit:
+     *
+     *   xxh_u64 x;
+     *   x ^= (x >> 47); // good
+     *   x ^= (x >> 13); // bad
+     *
+     * However, to a 32-bit machine, there is a major difference.
+     *
+     * x ^= (x >> 47) looks like this:
+     *
+     *   x.lo ^= (x.hi >> (47 - 32));
+     *
+     * while x ^= (x >> 13) looks like this:
+     *
+     *   // note: funnel shifts are not usually cheap.
+     *   x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
+     *   x.hi ^= (x.hi >> 13);
+     *
+     * The first one is significantly faster than the second, simply because the
+     * shift is larger than 32. This means:
+     *  - All the bits we need are in the upper 32 bits, so we can ignore the
+     * lower 32 bits in the shift.
+     *  - The shift result will always fit in the lower 32 bits, and therefore,
+     *    we can ignore the upper 32 bits in the xor.
+     *
+     * Thanks to this optimization, XXH3 only requires these features to be
+     * efficient:
+     *
+     *  - Usable unaligned access
+     *  - A 32-bit or 64-bit ALU
+     *      - If 32-bit, a decent ADC instruction
+     *  - A 32 or 64-bit multiply with a 64-bit result
+     *  - For the 128-bit variant, a decent byteswap helps short inputs.
+     *
+     * The first two are already required by XXH32, and almost all 32-bit and
+     * 64-bit platforms which can run XXH32 can run XXH3 efficiently.
+     *
+     * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
+     * notable exception.
+     *
+     * First of all, Thumb-1 lacks support for the UMULL instruction which
+     * performs the important long multiply. This means numerous __aeabi_lmul
+     * calls.
+     *
+     * Second of all, the 8 functional registers are just not enough.
+     * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic
+     * need Lo registers, and this shuffling results in thousands more MOVs than
+     * A32.
+     *
+     * A32 and T32 don't have this limitation. They can access all 14 registers,
+     * do a 32->64 multiply with UMULL, and the flexible operand allowing free
+     * shifts is helpful, too.
+     *
+     * Therefore, we do a quick sanity check.
+     *
+     * If compiling Thumb-1 for a target which supports ARM instructions, we
+     * will emit a warning, as it is not a "sane" platform to compile for.
+     *
+     * Usually, if this happens, it is because of an accident and you probably
+     * need to specify -march, as you likely meant to compile for a newer
+     * architecture.
+     *
+     * Credit: large sections of the vectorial and asm source code paths
+     *         have been contributed by @easyaspi314
+     */
+    #if defined(__thumb__) && !defined(__thumb2__) && \
+        defined(__ARM_ARCH_ISA_ARM)
+      #warning "XXH3 is highly inefficient without ARM or Thumb-2."
+    #endif
+
+    /* ==========================================
+     * Vectorization detection
+     * ========================================== */
+    #define XXH_SCALAR 0                         /* Portable scalar version */
+    #define XXH_SSE2 1                 /* SSE2 for Pentium 4 and all x86_64 */
+    #define XXH_AVX2 2                    /* AVX2 for Haswell and Bulldozer */
+    #define XXH_AVX512 3                  /* AVX512 for Skylake and Icelake */
+    #define XXH_NEON 4             /* NEON for most ARMv7-A and all AArch64 */
+    #define XXH_VSX 5                     /* VSX and ZVector for POWER8/z13 */
+
+    #ifndef XXH_VECTOR                    /* can be defined on command line */
+      #if defined(__AVX512F__)
+        #define XXH_VECTOR XXH_AVX512
+      #elif defined(__AVX2__)
+        #define XXH_VECTOR XXH_AVX2
+      #elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || \
+          (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
+        #define XXH_VECTOR XXH_SSE2
+      #elif defined(__GNUC__) /* msvc support maybe later */                   \
+          && (defined(__ARM_NEON__) || defined(__ARM_NEON)) &&                 \
+          (defined(__LITTLE_ENDIAN__) /* We only support little endian NEON */ \
+           || (defined(__BYTE_ORDER__) &&                                      \
+               __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
+        #define XXH_VECTOR XXH_NEON
+      #elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) || \
+          (defined(__s390x__) && defined(__VEC__)) &&             \
+              defined(__GNUC__)                             /* TODO: IBM XL */
+        #define XXH_VECTOR XXH_VSX
+      #else
+        #define XXH_VECTOR XXH_SCALAR
+      #endif
+    #endif
+
+    /*
+     * Controls the alignment of the accumulator,
+     * for compatibility with aligned vector loads, which are usually faster.
+     */
+    #ifndef XXH_ACC_ALIGN
+      #if defined(XXH_X86DISPATCH)
+        #define XXH_ACC_ALIGN 64           /* for compatibility with avx512 */
+      #elif XXH_VECTOR == XXH_SCALAR                              /* scalar */
+        #define XXH_ACC_ALIGN 8
+      #elif XXH_VECTOR == XXH_SSE2                                  /* sse2 */
+        #define XXH_ACC_ALIGN 16
+      #elif XXH_VECTOR == XXH_AVX2                                  /* avx2 */
+        #define XXH_ACC_ALIGN 32
+      #elif XXH_VECTOR == XXH_NEON                                  /* neon */
+        #define XXH_ACC_ALIGN 16
+      #elif XXH_VECTOR == XXH_VSX                                    /* vsx */
+        #define XXH_ACC_ALIGN 16
+      #elif XXH_VECTOR == XXH_AVX512                              /* avx512 */
+        #define XXH_ACC_ALIGN 64
+      #endif
+    #endif
+
+    #if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 || \
+        XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
+      #define XXH_SEC_ALIGN XXH_ACC_ALIGN
+    #else
+      #define XXH_SEC_ALIGN 8
+    #endif
+
+    /*
+     * UGLY HACK:
+     * GCC usually generates the best code with -O3 for xxHash.
+     *
+     * However, when targeting AVX2, it is overzealous in its unrolling
+     * resulting in code roughly 3/4 the speed of Clang.
+     *
+     * There are other issues, such as GCC splitting _mm256_loadu_si256 into
+     * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
+     * only applies to Sandy and Ivy Bridge... which don't even support AVX2.
+     *
+     * That is why when compiling the AVX2 version, it is recommended to use
+     * either -O2 -mavx2 -march=haswell or -O2 -mavx2
+     * -mno-avx256-split-unaligned-load for decent performance, or to use Clang
+     * instead.
+     *
+     * Fortunately, we can control the first one with a pragma that forces GCC
+     * into -O2, but the other one we can't control without "failed to inline
+     * always inline function due to target mismatch" warnings.
+     */
+    #if XXH_VECTOR == XXH_AVX2                      /* AVX2 */           \
+        && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
+        && defined(__OPTIMIZE__) &&                                      \
+        !defined(__OPTIMIZE_SIZE__)                  /* respect -O0 and -Os */
+      #pragma GCC push_options
+      #pragma GCC optimize("-O2")
+    #endif
+
+    #if XXH_VECTOR == XXH_NEON
+      /*
+       * NEON's setup for vmlal_u32 is a little more complicated than it is on
+       * SSE2, AVX2, and VSX.
+       *
+       * While PMULUDQ and VMULEUW both perform a mask, VMLAL.U32 performs an
+       * upcast.
+       *
+       * To do the same operation, the 128-bit 'Q' register needs to be split
+       * into two 64-bit 'D' registers, performing this operation::
+       *
+       *   [                a                 |                 b ] |
+       * '---------. .--------'                | |                         x |
+       *            |              .---------' '--------.                |
+       *   [ a & 0xFFFFFFFF | b & 0xFFFFFFFF ],[    a >> 32     |     b >> 32 ]
+       *
+       * Due to significant changes in aarch64, the fastest method for aarch64
+       * is completely different than the fastest method for ARMv7-A.
+       *
+       * ARMv7-A treats D registers as unions overlaying Q registers, so
+       * modifying D11 will modify the high half of Q5. This is similar to how
+       * modifying AH will only affect bits 8-15 of AX on x86.
+       *
+       * VZIP takes two registers, and puts even lanes in one register and odd
+       * lanes in the other.
+       *
+       * On ARMv7-A, this strangely modifies both parameters in place instead of
+       * taking the usual 3-operand form.
+       *
+       * Therefore, if we want to do this, we can simply use a D-form VZIP.32 on
+       * the lower and upper halves of the Q register to end up with the high
+       * and low halves where we want - all in one instruction.
+       *
+       *   vzip.32   d10, d11       @ d10 = { d10[0], d11[0] }; d11 = { d10[1],
+       * d11[1] }
+       *
+       * Unfortunately we need inline assembly for this: Instructions modifying
+       * two registers at once is not possible in GCC or Clang's IR, and they
+       * have to create a copy.
+       *
+       * aarch64 requires a different approach.
+       *
+       * In order to make it easier to write a decent compiler for aarch64, many
+       * quirks were removed, such as conditional execution.
+       *
+       * NEON was also affected by this.
+       *
+       * aarch64 cannot access the high bits of a Q-form register, and writes to
+       * a D-form register zero the high bits, similar to how writes to W-form
+       * scalar registers (or DWORD registers on x86_64) work.
+       *
+       * The formerly free vget_high intrinsics now require a vext (with a few
+       * exceptions)
+       *
+       * Additionally, VZIP was replaced by ZIP1 and ZIP2, which are the
+       * equivalent of PUNPCKL* and PUNPCKH* in SSE, respectively, in order to
+       * only modify one operand.
+       *
+       * The equivalent of the VZIP.32 on the lower and upper halves would be
+       * this mess:
+       *
+       *   ext     v2.4s, v0.4s, v0.4s, #2 // v2 = { v0[2], v0[3], v0[0], v0[1]
+       * } zip1    v1.2s, v0.2s, v2.2s     // v1 = { v0[0], v2[0] } zip2 v0.2s,
+       * v0.2s, v1.2s     // v0 = { v0[1], v2[1] }
+       *
+       * Instead, we use a literal downcast, vmovn_u64 (XTN), and vshrn_n_u64
+       * (SHRN):
+       *
+       *   shrn    v1.2s, v0.2d, #32  // v1 = (uint32x2_t)(v0 >> 32);
+       *   xtn     v0.2s, v0.2d       // v0 = (uint32x2_t)(v0 & 0xFFFFFFFF);
+       *
+       * This is available on ARMv7-A, but is less efficient than a single
+       * VZIP.32.
+       */
+
+      /*
+       * Function-like macro:
+       * void XXH_SPLIT_IN_PLACE(uint64x2_t &in, uint32x2_t &outLo, uint32x2_t
+       * &outHi)
+       * {
+
+       *     outLo = (uint32x2_t)(in & 0xFFFFFFFF);
+       *     outHi = (uint32x2_t)(in >> 32);
+       *     in = UNDEFINED;
+       * }
+       */
+      #if !defined(XXH_NO_VZIP_HACK) /* define to disable */ \
+          && defined(__GNUC__) && !defined(__aarch64__) && !defined(__arm64__)
+        #define XXH_SPLIT_IN_PLACE(in, outLo, outHi)                                                   \
+          do {                                                                                         \
+                                                                                                       \
+            /* Undocumented GCC/Clang operand modifier: %e0 = lower D half,                            \
+             * %f0 = upper D half */                                                                   \
+            /* https://github.com/gcc-mirror/gcc/blob/38cf91e5/gcc/config/arm/arm.c#L22486             \
+             */                                                                                        \
+            /* https://github.com/llvm-mirror/llvm/blob/2c4ca683/lib/Target/ARM/ARMAsmPrinter.cpp#L399 \
+             */                                                                                        \
+            __asm__("vzip.32  %e0, %f0" : "+w"(in));                                                   \
+            (outLo) = vget_low_u32(vreinterpretq_u32_u64(in));                                         \
+            (outHi) = vget_high_u32(vreinterpretq_u32_u64(in));                                        \
+                                                                                                       \
+          } while (0)
+
+      #else
+        #define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
+          do {                                       \
+                                                     \
+            (outLo) = vmovn_u64(in);                 \
+            (outHi) = vshrn_n_u64((in), 32);         \
+                                                     \
+          } while (0)
+
+      #endif
+    #endif                                        /* XXH_VECTOR == XXH_NEON */
+
+    /*
+     * VSX and Z Vector helpers.
+     *
+     * This is very messy, and any pull requests to clean this up are welcome.
+     *
+     * There are a lot of problems with supporting VSX and s390x, due to
+     * inconsistent intrinsics, spotty coverage, and multiple endiannesses.
+     */
+    #if XXH_VECTOR == XXH_VSX
+      #if defined(__s390x__)
+        #include <s390intrin.h>
+      #else
+        /* gcc's altivec.h can have the unwanted consequence to unconditionally
+         * #define bool, vector, and pixel keywords,
+         * with bad consequences for programs already using these keywords for
+         * other purposes. The paragraph defining these macros is skipped when
+         * __APPLE_ALTIVEC__ is defined.
+         * __APPLE_ALTIVEC__ is _generally_ defined automatically by the
+         * compiler, but it seems that, in some cases, it isn't. Force the build
+         * macro to be defined, so that keywords are not altered.
+         */
+        #if defined(__GNUC__) && !defined(__APPLE_ALTIVEC__)
+          #define __APPLE_ALTIVEC__
+        #endif
+        #include <altivec.h>
+      #endif
+
+typedef __vector unsigned long long xxh_u64x2;
+typedef __vector unsigned char      xxh_u8x16;
+typedef __vector unsigned           xxh_u32x4;
+
+      #ifndef XXH_VSX_BE
+        #if defined(__BIG_ENDIAN__) ||  \
+            (defined(__BYTE_ORDER__) && \
+             __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+          #define XXH_VSX_BE 1
+        #elif defined(__VEC_ELEMENT_REG_ORDER__) && \
+            __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
+          #warning \
+              "-maltivec=be is not recommended. Please use native endianness."
+          #define XXH_VSX_BE 1
+        #else
+          #define XXH_VSX_BE 0
+        #endif
+      #endif                                        /* !defined(XXH_VSX_BE) */
+
+      #if XXH_VSX_BE
+        /* A wrapper for POWER9's vec_revb. */
+        #if defined(__POWER9_VECTOR__) || \
+            (defined(__clang__) && defined(__s390x__))
+          #define XXH_vec_revb vec_revb
+        #else
+XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val) {
+
+  xxh_u8x16 const vByteSwap = {0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
+                               0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08};
+  return vec_perm(val, val, vByteSwap);
+
+}
+
+        #endif
+      #endif                                                  /* XXH_VSX_BE */
+
+/*
+ * Performs an unaligned load and byte swaps it on big endian.
+ */
+XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr) {
+
+  xxh_u64x2 ret;
+  memcpy(&ret, ptr, sizeof(xxh_u64x2));
+      #if XXH_VSX_BE
+  ret = XXH_vec_revb(ret);
+      #endif
+  return ret;
+
+}
+
+      /*
+       * vec_mulo and vec_mule are very problematic intrinsics on PowerPC
+       *
+       * These intrinsics weren't added until GCC 8, despite existing for a
+       * while, and they are endian dependent. Also, their meaning swap
+       * depending on version.
+       * */
+      #if defined(__s390x__)
+      /* s390x is always big endian, no issue on this platform */
+        #define XXH_vec_mulo vec_mulo
+        #define XXH_vec_mule vec_mule
+      #elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw)
+        /* Clang has a better way to control this, we can just use the builtin
+         * which doesn't swap. */
+        #define XXH_vec_mulo __builtin_altivec_vmulouw
+        #define XXH_vec_mule __builtin_altivec_vmuleuw
+      #else
+/* gcc needs inline assembly */
+/* Adapted from
+ * https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
+XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b) {
+
+  xxh_u64x2 result;
+  __asm__("vmulouw %0, %1, %2" : "=v"(result) : "v"(a), "v"(b));
+  return result;
+
+}
+
+XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) {
+
+  xxh_u64x2 result;
+  __asm__("vmuleuw %0, %1, %2" : "=v"(result) : "v"(a), "v"(b));
+  return result;
+
+}
+
+      #endif                                  /* XXH_vec_mulo, XXH_vec_mule */
+    #endif                                         /* XXH_VECTOR == XXH_VSX */
+
+    /* prefetch
+     * can be disabled, by declaring XXH_NO_PREFETCH build macro */
+    #if defined(XXH_NO_PREFETCH)
+      #define XXH_PREFETCH(ptr) (void)(ptr)                     /* disabled */
+    #else
+      #if defined(_MSC_VER) && \
+          (defined(_M_X64) ||  \
+           defined(            \
+               _M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */
+        #include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
+        #define XXH_PREFETCH(ptr) _mm_prefetch((const char *)(ptr), _MM_HINT_T0)
+      #elif defined(__GNUC__) && \
+          ((__GNUC__ >= 4) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1)))
+        #define XXH_PREFETCH(ptr) \
+          __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
+      #else
+        #define XXH_PREFETCH(ptr) (void)(ptr)                   /* disabled */
+      #endif
+    #endif                                               /* XXH_NO_PREFETCH */
+
+  /* ==========================================
+   * XXH3 default settings
+   * ========================================== */
+
+    #define XXH_SECRET_DEFAULT_SIZE 192     /* minimum XXH3_SECRET_SIZE_MIN */
+
+    #if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
+      #error "default keyset is not large enough"
+    #endif
+
+/* Pseudorandom secret taken directly from FARSH */
+XXH_ALIGN(64)
+static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
+
+    0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c,
+    0xf7, 0x21, 0xad, 0x1c, 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb,
+    0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f, 0xcb, 0x79, 0xe6, 0x4e,
+    0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
+    0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6,
+    0x81, 0x3a, 0x26, 0x4c, 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb,
+    0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3, 0x71, 0x64, 0x48, 0x97,
+    0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
+    0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7,
+    0xc7, 0x0b, 0x4f, 0x1d, 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31,
+    0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64, 0xea, 0xc5, 0xac, 0x83,
+    0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
+    0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26,
+    0x29, 0xd4, 0x68, 0x9e, 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc,
+    0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce, 0x45, 0xcb, 0x3a, 0x8f,
+    0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
+
+};
+
+    #ifdef XXH_OLD_NAMES
+      #define kSecret XXH3_kSecret
+    #endif
+
+    /*
+     * Calculates a 32-bit to 64-bit long multiply.
+     *
+     * Wraps __emulu on MSVC x86 because it tends to call __allmul when it
+     * doesn't need to (but it shouldn't need to anyways, it is about 7
+     * instructions to do a 64x64 multiply...). Since we know that this will
+     * _always_ emit MULL, we use that instead of the normal method.
+     *
+     * If you are compiling for platforms like Thumb-1 and don't have a better
+     * option, you may also want to write your own long multiply routine here.
+     *
+     * XXH_FORCE_INLINE xxh_u64 XXH_mult32to64(xxh_u64 x, xxh_u64 y)
+     * {
+
+     *    return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
+     * }
+     */
+    #if defined(_MSC_VER) && defined(_M_IX86)
+      #include <intrin.h>
+      #define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
+    #else
+      /*
+       * Downcast + upcast is usually better than masking on older compilers
+       * like GCC 4.2 (especially 32-bit ones), all without affecting newer
+       * compilers.
+       *
+       * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both
+       * operands and perform a full 64x64 multiply -- entirely redundant on
+       * 32-bit.
+       */
+      #define XXH_mult32to64(x, y) \
+        ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
+    #endif
+
+/*
+ * Calculates a 64->128-bit long multiply.
+ *
+ * Uses __uint128_t and _umul128 if available, otherwise uses a scalar version.
+ */
+static XXH128_hash_t XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) {
+
+    /*
+     * GCC/Clang __uint128_t method.
+     *
+     * On most 64-bit targets, GCC and Clang define a __uint128_t type.
+     * This is usually the best way as it usually uses a native long 64-bit
+     * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
+     *
+     * Usually.
+     *
+     * Despite being a 32-bit platform, Clang (and emscripten) define this type
+     * despite not having the arithmetic for it. This results in a laggy
+     * compiler builtin call which calculates a full 128-bit multiply.
+     * In that case it is best to use the portable one.
+     * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
+     */
+    #if defined(__GNUC__) && !defined(__wasm__) && \
+            defined(__SIZEOF_INT128__) ||          \
+        (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+
+  __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
+  XXH128_hash_t     r128;
+  r128.low64 = (xxh_u64)(product);
+  r128.high64 = (xxh_u64)(product >> 64);
+  return r128;
+
+      /*
+       * MSVC for x64's _umul128 method.
+       *
+       * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64
+       * *HighProduct);
+       *
+       * This compiles to single operand MUL on x64.
+       */
+    #elif defined(_M_X64) || defined(_M_IA64)
+
+      #ifndef _MSC_VER
+        #pragma intrinsic(_umul128)
+      #endif
+  xxh_u64       product_high;
+  xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
+  XXH128_hash_t r128;
+  r128.low64 = product_low;
+  r128.high64 = product_high;
+  return r128;
+
+    #else
+  /*
+   * Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
+   *
+   * This is a fast and simple grade school multiply, which is shown below
+   * with base 10 arithmetic instead of base 0x100000000.
+   *
+   *           9 3 // D2 lhs = 93
+   *         x 7 5 // D2 rhs = 75
+   *     ----------
+   *           1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
+   *         4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
+   *         2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
+   *     + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
+   *     ---------
+   *         2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
+   *     + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
+   *     ---------
+   *       6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
+   *
+   * The reasons for adding the products like this are:
+   *  1. It avoids manual carry tracking. Just like how
+   *     (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
+   *     This avoids a lot of complexity.
+   *
+   *  2. It hints for, and on Clang, compiles to, the powerful UMAAL
+   *     instruction available in ARM's Digital Signal Processing extension
+   *     in 32-bit ARMv6 and later, which is shown below:
+   *
+   *         void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
+   *         {
+
+   *             xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
+   *             *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
+   *             *RdHi = (xxh_u32)(product >> 32);
+   *         }
+   *
+   *     This instruction was designed for efficient long multiplication, and
+   *     allows this to be calculated in only 4 instructions at speeds
+   *     comparable to some 64-bit ALUs.
+   *
+   *  3. It isn't terrible on other platforms. Usually this will be a couple
+   *     of 32-bit ADD/ADCs.
+   */
+
+  /* First calculate all of the cross products. */
+  xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
+  xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
+  xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
+  xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
+
+  /* Now add the products together. These will never overflow. */
+  xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
+  xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
+  xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
+
+  XXH128_hash_t r128;
+  r128.low64 = lower;
+  r128.high64 = upper;
+  return r128;
+    #endif
+
+}
+
+/*
+ * Does a 64-bit to 128-bit multiply, then XOR folds it.
+ *
+ * The reason for the separate function is to prevent passing too many structs
+ * around by value. This will hopefully inline the multiply, but we don't force
+ * it.
+ */
+static xxh_u64 XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs) {
+
+  XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
+  return product.low64 ^ product.high64;
+
+}
+
+/* Seems to produce slightly better code on GCC for some reason. */
+XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) {
+
+  XXH_ASSERT(0 <= shift && shift < 64);
+  return v64 ^ (v64 >> shift);
+
+}
+
+/*
+ * This is a fast avalanche stage,
+ * suitable when input bits are already partially mixed
+ */
+static XXH64_hash_t XXH3_avalanche(xxh_u64 h64) {
+
+  h64 = XXH_xorshift64(h64, 37);
+  h64 *= 0x165667919E3779F9ULL;
+  h64 = XXH_xorshift64(h64, 32);
+  return h64;
+
+}
+
+/*
+ * This is a stronger avalanche,
+ * inspired by Pelle Evensen's rrmxmx
+ * preferable when input has not been previously mixed
+ */
+static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) {
+
+  /* this mix is inspired by Pelle Evensen's rrmxmx */
+  h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
+  h64 *= 0x9FB21C651E98DF25ULL;
+  h64 ^= (h64 >> 35) + len;
+  h64 *= 0x9FB21C651E98DF25ULL;
+  return XXH_xorshift64(h64, 28);
+
+}
+
+/* ==========================================
+ * Short keys
+ * ==========================================
+ * One of the shortcomings of XXH32 and XXH64 was that their performance was
+ * sub-optimal on short lengths. It used an iterative algorithm which strongly
+ * favored lengths that were a multiple of 4 or 8.
+ *
+ * Instead of iterating over individual inputs, we use a set of single shot
+ * functions which piece together a range of lengths and operate in constant
+ * time.
+ *
+ * Additionally, the number of multiplies has been significantly reduced. This
+ * reduces latency, especially when emulating 64-bit multiplies on 32-bit.
+ *
+ * Depending on the platform, this may or may not be faster than XXH32, but it
+ * is almost guaranteed to be faster than XXH64.
+ */
+
+/*
+ * At very short lengths, there isn't enough input to fully hide secrets, or use
+ * the entire secret.
+ *
+ * There is also only a limited amount of mixing we can do before significantly
+ * impacting performance.
+ *
+ * Therefore, we use different sections of the secret and always mix two secret
+ * samples with an XOR. This should have no effect on performance on the
+ * seedless or withSeed variants because everything _should_ be constant folded
+ * by modern compilers.
+ *
+ * The XOR mixing hides individual parts of the secret and increases entropy.
+ *
+ * This adds an extra layer of strength for custom secrets.
+ */
+XXH_FORCE_INLINE XXH64_hash_t XXH3_len_1to3_64b(const xxh_u8 *input, size_t len,
+                                                const xxh_u8 *secret,
+                                                XXH64_hash_t  seed) {
+
+  XXH_ASSERT(input != NULL);
+  XXH_ASSERT(1 <= len && len <= 3);
+  XXH_ASSERT(secret != NULL);
+  /*
+   * len = 1: combined = { input[0], 0x01, input[0], input[0] }
+   * len = 2: combined = { input[1], 0x02, input[0], input[1] }
+   * len = 3: combined = { input[2], 0x03, input[0], input[1] }
+   */
+  {
+
+    xxh_u8 const  c1 = input[0];
+    xxh_u8 const  c2 = input[len >> 1];
+    xxh_u8 const  c3 = input[len - 1];
+    xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24) |
+                             ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
+    xxh_u64 const bitflip =
+        (XXH_readLE32(secret) ^ XXH_readLE32(secret + 4)) + seed;
+    xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
+    return XXH64_avalanche(keyed);
+
+  }
+
+}
+
+XXH_FORCE_INLINE XXH64_hash_t XXH3_len_4to8_64b(const xxh_u8 *input, size_t len,
+                                                const xxh_u8 *secret,
+                                                XXH64_hash_t  seed) {
+
+  XXH_ASSERT(input != NULL);
+  XXH_ASSERT(secret != NULL);
+  XXH_ASSERT(4 <= len && len < 8);
+  seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
+  {
+
+    xxh_u32 const input1 = XXH_readLE32(input);
+    xxh_u32 const input2 = XXH_readLE32(input + len - 4);
+    xxh_u64 const bitflip =
+        (XXH_readLE64(secret + 8) ^ XXH_readLE64(secret + 16)) - seed;
+    xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
+    xxh_u64 const keyed = input64 ^ bitflip;
+    return XXH3_rrmxmx(keyed, len);
+
+  }
+
+}
+
+XXH_FORCE_INLINE XXH64_hash_t XXH3_len_9to16_64b(const xxh_u8 *input,
+                                                 size_t        len,
+                                                 const xxh_u8 *secret,
+                                                 XXH64_hash_t  seed) {
+
+  XXH_ASSERT(input != NULL);
+  XXH_ASSERT(secret != NULL);
+  XXH_ASSERT(8 <= len && len <= 16);
+  {
+
+    xxh_u64 const bitflip1 =
+        (XXH_readLE64(secret + 24) ^ XXH_readLE64(secret + 32)) + seed;
+    xxh_u64 const bitflip2 =
+        (XXH_readLE64(secret + 40) ^ XXH_readLE64(secret + 48)) - seed;
+    xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1;
+    xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
+    xxh_u64 const acc = len + XXH_swap64(input_lo) + input_hi +
+                        XXH3_mul128_fold64(input_lo, input_hi);
+    return XXH3_avalanche(acc);
+
+  }
+
+}
+
+XXH_FORCE_INLINE XXH64_hash_t XXH3_len_0to16_64b(const xxh_u8 *input,
+                                                 size_t        len,
+                                                 const xxh_u8 *secret,
+                                                 XXH64_hash_t  seed) {
+
+  XXH_ASSERT(len <= 16);
+  {
+
+    if (XXH_likely(len > 8))
+      return XXH3_len_9to16_64b(input, len, secret, seed);
+    if (XXH_likely(len >= 4))
+      return XXH3_len_4to8_64b(input, len, secret, seed);
+    if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
+    return XXH64_avalanche(
+        seed ^ (XXH_readLE64(secret + 56) ^ XXH_readLE64(secret + 64)));
+
+  }
+
+}
+
+/*
+ * DISCLAIMER: There are known *seed-dependent* multicollisions here due to
+ * multiplication by zero, affecting hashes of lengths 17 to 240.
+ *
+ * However, they are very unlikely.
+ *
+ * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all
+ * unseeded non-cryptographic hashes, it does not attempt to defend itself
+ * against specially crafted inputs, only random inputs.
+ *
+ * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes
+ * cancelling out the secret is taken an arbitrary number of times (addressed
+ * in XXH3_accumulate_512), this collision is very unlikely with random inputs
+ * and/or proper seeding:
+ *
+ * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a
+ * function that is only called up to 16 times per hash with up to 240 bytes of
+ * input.
+ *
+ * This is not too bad for a non-cryptographic hash function, especially with
+ * only 64 bit outputs.
+ *
+ * The 128-bit variant (which trades some speed for strength) is NOT affected
+ * by this, although it is always a good idea to use a proper seed if you care
+ * about strength.
+ */
+XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8 *XXH_RESTRICT input,
+                                     const xxh_u8 *XXH_RESTRICT secret,
+                                     xxh_u64                    seed64) {
+
+    #if defined(__GNUC__) && !defined(__clang__)  /* GCC, not Clang */ \
+        && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */     \
+        &&                                                             \
+        !defined(                                                      \
+            XXH_ENABLE_AUTOVECTORIZE)  /* Define to disable like XXH32 hack */
+  /*
+   * UGLY HACK:
+   * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
+   * slower code.
+   *
+   * By forcing seed64 into a register, we disrupt the cost model and
+   * cause it to scalarize. See `XXH32_round()`
+   *
+   * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
+   * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
+   * GCC 9.2, despite both emitting scalar code.
+   *
+   * GCC generates much better scalar code than Clang for the rest of XXH3,
+   * which is why finding a more optimal codepath is an interest.
+   */
+  __asm__("" : "+r"(seed64));
+    #endif
+  {
+
+    xxh_u64 const input_lo = XXH_readLE64(input);
+    xxh_u64 const input_hi = XXH_readLE64(input + 8);
+    return XXH3_mul128_fold64(input_lo ^ (XXH_readLE64(secret) + seed64),
+                              input_hi ^ (XXH_readLE64(secret + 8) - seed64));
+
+  }
+
+}
+
+/* For mid range keys, XXH3 uses a Mum-hash variant. */
+XXH_FORCE_INLINE XXH64_hash_t XXH3_len_17to128_64b(
+    const xxh_u8 *XXH_RESTRICT input, size_t len,
+    const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) {
+
+  XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
+  (void)secretSize;
+  XXH_ASSERT(16 < len && len <= 128);
+
+  {
+
+    xxh_u64 acc = len * XXH_PRIME64_1;
+    if (len > 32) {
+
+      if (len > 64) {
+
+        if (len > 96) {
+
+          acc += XXH3_mix16B(input + 48, secret + 96, seed);
+          acc += XXH3_mix16B(input + len - 64, secret + 112, seed);
+
+        }
+
+        acc += XXH3_mix16B(input + 32, secret + 64, seed);
+        acc += XXH3_mix16B(input + len - 48, secret + 80, seed);
+
+      }
+
+      acc += XXH3_mix16B(input + 16, secret + 32, seed);
+      acc += XXH3_mix16B(input + len - 32, secret + 48, seed);
+
+    }
+
+    acc += XXH3_mix16B(input + 0, secret + 0, seed);
+    acc += XXH3_mix16B(input + len - 16, secret + 16, seed);
+
+    return XXH3_avalanche(acc);
+
+  }
+
+}
+
+    #define XXH3_MIDSIZE_MAX 240
+
+XXH_NO_INLINE XXH64_hash_t XXH3_len_129to240_64b(
+    const xxh_u8 *XXH_RESTRICT input, size_t len,
+    const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) {
+
+  XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
+  (void)secretSize;
+  XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
+
+    #define XXH3_MIDSIZE_STARTOFFSET 3
+    #define XXH3_MIDSIZE_LASTOFFSET 17
+
+  {
+
+    xxh_u64   acc = len * XXH_PRIME64_1;
+    int const nbRounds = (int)len / 16;
+    int       i;
+    for (i = 0; i < 8; i++) {
+
+      acc += XXH3_mix16B(input + (16 * i), secret + (16 * i), seed);
+
+    }
+
+    acc = XXH3_avalanche(acc);
+    XXH_ASSERT(nbRounds >= 8);
+    #if defined(__clang__)                                /* Clang */ \
+        && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */  \
+        && !defined(XXH_ENABLE_AUTOVECTORIZE)          /* Define to disable */
+      /*
+       * UGLY HACK:
+       * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
+       * In everywhere else, it uses scalar code.
+       *
+       * For 64->128-bit multiplies, even if the NEON was 100% optimal, it
+       * would still be slower than UMAAL (see XXH_mult64to128).
+       *
+       * Unfortunately, Clang doesn't handle the long multiplies properly and
+       * converts them to the nonexistent "vmulq_u64" intrinsic, which is then
+       * scalarized into an ugly mess of VMOV.32 instructions.
+       *
+       * This mess is difficult to avoid without turning autovectorization
+       * off completely, but they are usually relatively minor and/or not
+       * worth it to fix.
+       *
+       * This loop is the easiest to fix, as unlike XXH32, this pragma
+       * _actually works_ because it is a loop vectorization instead of an
+       * SLP vectorization.
+       */
+      #pragma clang loop vectorize(disable)
+    #endif
+    for (i = 8; i < nbRounds; i++) {
+
+      acc +=
+          XXH3_mix16B(input + (16 * i),
+                      secret + (16 * (i - 8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
+
+    }
+
+    /* last bytes */
+    acc += XXH3_mix16B(input + len - 16,
+                       secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET,
+                       seed);
+    return XXH3_avalanche(acc);
+
+  }
+
+}
+
+  /* =======     Long Keys     ======= */
+
+    #define XXH_STRIPE_LEN 64
+    #define XXH_SECRET_CONSUME_RATE \
+      8                 /* nb of secret bytes consumed at each accumulation */
+    #define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
+
+    #ifdef XXH_OLD_NAMES
+      #define STRIPE_LEN XXH_STRIPE_LEN
+      #define ACC_NB XXH_ACC_NB
+    #endif
+
+XXH_FORCE_INLINE void XXH_writeLE64(void *dst, xxh_u64 v64) {
+
+  if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
+  memcpy(dst, &v64, sizeof(v64));
+
+}
+
+    /* Several intrinsic functions below are supposed to accept __int64 as
+     * argument, as documented in
+     * https://software.intel.com/sites/landingpage/IntrinsicsGuide/ . However,
+     * several environments do not define __int64 type, requiring a workaround.
+     */
+    #if !defined(__VMS) &&                                     \
+        (defined(__cplusplus) || (defined(__STDC_VERSION__) && \
+                                  (__STDC_VERSION__ >= 199901L) /* C99 */))
+typedef int64_t xxh_i64;
+    #else
+/* the following type must have a width of 64-bit */
+typedef long long xxh_i64;
+    #endif
+
+  /*
+   * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the
+   * most optimized.
+   *
+   * It is a hardened version of UMAC, based off of FARSH's implementation.
+   *
+   * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
+   * implementations, and it is ridiculously fast.
+   *
+   * We harden it by mixing the original input to the accumulators as well as
+   * the product.
+   *
+   * This means that in the (relatively likely) case of a multiply by zero, the
+   * original input is preserved.
+   *
+   * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
+   * cross-pollination, as otherwise the upper and lower halves would be
+   * essentially independent.
+   *
+   * This doesn't matter on 64-bit hashes since they all get merged together in
+   * the end, so we skip the extra step.
+   *
+   * Both XXH3_64bits and XXH3_128bits use this subroutine.
+   */
+
+    #if (XXH_VECTOR == XXH_AVX512) || defined(XXH_X86DISPATCH)
+
+      #ifndef XXH_TARGET_AVX512
+        #define XXH_TARGET_AVX512               /* disable attribute target */
+      #endif
+
+XXH_FORCE_INLINE XXH_TARGET_AVX512 void XXH3_accumulate_512_avx512(
+    void *XXH_RESTRICT acc, const void *XXH_RESTRICT input,
+    const void *XXH_RESTRICT secret) {
+
+  XXH_ALIGN(64) __m512i *const xacc = (__m512i *)acc;
+  XXH_ASSERT((((size_t)acc) & 63) == 0);
+  XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
+
+  {
+
+    /* data_vec    = input[0]; */
+    __m512i const data_vec = _mm512_loadu_si512(input);
+    /* key_vec     = secret[0]; */
+    __m512i const key_vec = _mm512_loadu_si512(secret);
+    /* data_key    = data_vec ^ key_vec; */
+    __m512i const data_key = _mm512_xor_si512(data_vec, key_vec);
+    /* data_key_lo = data_key >> 32; */
+    __m512i const data_key_lo =
+        _mm512_shuffle_epi32(data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
+    /* product     = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
+    __m512i const product = _mm512_mul_epu32(data_key, data_key_lo);
+    /* xacc[0] += swap(data_vec); */
+    __m512i const data_swap =
+        _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
+    __m512i const sum = _mm512_add_epi64(*xacc, data_swap);
+    /* xacc[0] += product; */
+    *xacc = _mm512_add_epi64(product, sum);
+
+  }
+
+}
+
+/*
+ * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
+ *
+ * Multiplication isn't perfect, as explained by Google in HighwayHash:
+ *
+ *  // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to
+ *  // varying degrees. In descending order of goodness, bytes
+ *  // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
+ *  // As expected, the upper and lower bytes are much worse.
+ *
+ * Source:
+ * https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
+ *
+ * Since our algorithm uses a pseudorandom secret to add some variance into the
+ * mix, we don't need to (or want to) mix as often or as much as HighwayHash
+ * does.
+ *
+ * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
+ * extraction.
+ *
+ * Both XXH3_64bits and XXH3_128bits use this subroutine.
+ */
+
+XXH_FORCE_INLINE XXH_TARGET_AVX512 void XXH3_scrambleAcc_avx512(
+    void *XXH_RESTRICT acc, const void *XXH_RESTRICT secret) {
+
+  XXH_ASSERT((((size_t)acc) & 63) == 0);
+  XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
+  {
+
+    XXH_ALIGN(64) __m512i *const xacc = (__m512i *)acc;
+    const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
+
+    /* xacc[0] ^= (xacc[0] >> 47) */
+    __m512i const acc_vec = *xacc;
+    __m512i const shifted = _mm512_srli_epi64(acc_vec, 47);
+    __m512i const data_vec = _mm512_xor_si512(acc_vec, shifted);
+    /* xacc[0] ^= secret; */
+    __m512i const key_vec = _mm512_loadu_si512(secret);
+    __m512i const data_key = _mm512_xor_si512(data_vec, key_vec);
+
+    /* xacc[0] *= XXH_PRIME32_1; */
+    __m512i const data_key_hi =
+        _mm512_shuffle_epi32(data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
+    __m512i const prod_lo = _mm512_mul_epu32(data_key, prime32);
+    __m512i const prod_hi = _mm512_mul_epu32(data_key_hi, prime32);
+    *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
+
+  }
+
+}
+
+XXH_FORCE_INLINE XXH_TARGET_AVX512 void XXH3_initCustomSecret_avx512(
+    void *XXH_RESTRICT customSecret, xxh_u64 seed64) {
+
+  XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
+  XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
+  XXH_ASSERT(((size_t)customSecret & 63) == 0);
+  (void)(&XXH_writeLE64);
+  {
+
+    int const     nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
+    __m512i const seed = _mm512_mask_set1_epi64(
+        _mm512_set1_epi64((xxh_i64)seed64), 0xAA, -(xxh_i64)seed64);
+
+    XXH_ALIGN(64) const __m512i *const src = (const __m512i *)XXH3_kSecret;
+    XXH_ALIGN(64) __m512i *const       dest = (__m512i *)customSecret;
+    int                                i;
+    for (i = 0; i < nbRounds; ++i) {
+
+      /* GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void
+       * const*', this will warn "discards ‘const’ qualifier". */
+      union {
+
+        XXH_ALIGN(64) const __m512i *cp;
+        XXH_ALIGN(64) void *p;
+
+      } remote_const_void;
+
+      remote_const_void.cp = src + i;
+      dest[i] =
+          _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p), seed);
+
+    }
+
+  }
+
+}
+
+    #endif
+
+    #if (XXH_VECTOR == XXH_AVX2) || defined(XXH_X86DISPATCH)
+
+      #ifndef XXH_TARGET_AVX2
+        #define XXH_TARGET_AVX2                 /* disable attribute target */
+      #endif
+
+XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_accumulate_512_avx2(
+    void *XXH_RESTRICT acc, const void *XXH_RESTRICT input,
+    const void *XXH_RESTRICT secret) {
+
+  XXH_ASSERT((((size_t)acc) & 31) == 0);
+  {
+
+    XXH_ALIGN(32) __m256i *const xacc = (__m256i *)acc;
+    /* Unaligned. This is mainly for pointer arithmetic, and because
+     * _mm256_loadu_si256 requires  a const __m256i * pointer for some reason.
+     */
+    const __m256i *const xinput = (const __m256i *)input;
+    /* Unaligned. This is mainly for pointer arithmetic, and because
+     * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
+    const __m256i *const xsecret = (const __m256i *)secret;
+
+    size_t i;
+    for (i = 0; i < XXH_STRIPE_LEN / sizeof(__m256i); i++) {
+
+      /* data_vec    = xinput[i]; */
+      __m256i const data_vec = _mm256_loadu_si256(xinput + i);
+      /* key_vec     = xsecret[i]; */
+      __m256i const key_vec = _mm256_loadu_si256(xsecret + i);
+      /* data_key    = data_vec ^ key_vec; */
+      __m256i const data_key = _mm256_xor_si256(data_vec, key_vec);
+      /* data_key_lo = data_key >> 32; */
+      __m256i const data_key_lo =
+          _mm256_shuffle_epi32(data_key, _MM_SHUFFLE(0, 3, 0, 1));
+      /* product     = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
+      __m256i const product = _mm256_mul_epu32(data_key, data_key_lo);
+      /* xacc[i] += swap(data_vec); */
+      __m256i const data_swap =
+          _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
+      __m256i const sum = _mm256_add_epi64(xacc[i], data_swap);
+      /* xacc[i] += product; */
+      xacc[i] = _mm256_add_epi64(product, sum);
+
+    }
+
+  }
+
+}
+
+XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_scrambleAcc_avx2(
+    void *XXH_RESTRICT acc, const void *XXH_RESTRICT secret) {
+
+  XXH_ASSERT((((size_t)acc) & 31) == 0);
+  {
+
+    XXH_ALIGN(32) __m256i *const xacc = (__m256i *)acc;
+    /* Unaligned. This is mainly for pointer arithmetic, and because
+     * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
+    const __m256i *const xsecret = (const __m256i *)secret;
+    const __m256i        prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
+
+    size_t i;
+    for (i = 0; i < XXH_STRIPE_LEN / sizeof(__m256i); i++) {
+
+      /* xacc[i] ^= (xacc[i] >> 47) */
+      __m256i const acc_vec = xacc[i];
+      __m256i const shifted = _mm256_srli_epi64(acc_vec, 47);
+      __m256i const data_vec = _mm256_xor_si256(acc_vec, shifted);
+      /* xacc[i] ^= xsecret; */
+      __m256i const key_vec = _mm256_loadu_si256(xsecret + i);
+      __m256i const data_key = _mm256_xor_si256(data_vec, key_vec);
+
+      /* xacc[i] *= XXH_PRIME32_1; */
+      __m256i const data_key_hi =
+          _mm256_shuffle_epi32(data_key, _MM_SHUFFLE(0, 3, 0, 1));
+      __m256i const prod_lo = _mm256_mul_epu32(data_key, prime32);
+      __m256i const prod_hi = _mm256_mul_epu32(data_key_hi, prime32);
+      xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
+
+    }
+
+  }
+
+}
+
+XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(
+    void *XXH_RESTRICT customSecret, xxh_u64 seed64) {
+
+  XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
+  XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
+  XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
+  (void)(&XXH_writeLE64);
+  XXH_PREFETCH(customSecret);
+  {
+
+    __m256i const seed = _mm256_set_epi64x(-(xxh_i64)seed64, (xxh_i64)seed64,
+                                           -(xxh_i64)seed64, (xxh_i64)seed64);
+
+    XXH_ALIGN(64) const __m256i *const src = (const __m256i *)XXH3_kSecret;
+    XXH_ALIGN(64) __m256i *            dest = (__m256i *)customSecret;
+
+      #if defined(__GNUC__) || defined(__clang__)
+    /*
+     * On GCC & Clang, marking 'dest' as modified will cause the compiler:
+     *   - do not extract the secret from sse registers in the internal loop
+     *   - use less common registers, and avoid pushing these reg into stack
+     * The asm hack causes Clang to assume that XXH3_kSecretPtr aliases with
+     * customSecret, and on aarch64, this prevented LDP from merging two
+     * loads together for free. Putting the loads together before the stores
+     * properly generates LDP.
+     */
+    __asm__("" : "+r"(dest));
+      #endif
+
+    /* GCC -O2 need unroll loop manually */
+    dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(src + 0), seed);
+    dest[1] = _mm256_add_epi64(_mm256_stream_load_si256(src + 1), seed);
+    dest[2] = _mm256_add_epi64(_mm256_stream_load_si256(src + 2), seed);
+    dest[3] = _mm256_add_epi64(_mm256_stream_load_si256(src + 3), seed);
+    dest[4] = _mm256_add_epi64(_mm256_stream_load_si256(src + 4), seed);
+    dest[5] = _mm256_add_epi64(_mm256_stream_load_si256(src + 5), seed);
+
+  }
+
+}
+
+    #endif
+
+    #if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
+
+      #ifndef XXH_TARGET_SSE2
+        #define XXH_TARGET_SSE2                 /* disable attribute target */
+      #endif
+
+XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_accumulate_512_sse2(
+    void *XXH_RESTRICT acc, const void *XXH_RESTRICT input,
+    const void *XXH_RESTRICT secret) {
+
+  /* SSE2 is just a half-scale version of the AVX2 version. */
+  XXH_ASSERT((((size_t)acc) & 15) == 0);
+  {
+
+    XXH_ALIGN(16) __m128i *const xacc = (__m128i *)acc;
+    /* Unaligned. This is mainly for pointer arithmetic, and because
+     * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
+    const __m128i *const xinput = (const __m128i *)input;
+    /* Unaligned. This is mainly for pointer arithmetic, and because
+     * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
+    const __m128i *const xsecret = (const __m128i *)secret;
+
+    size_t i;
+    for (i = 0; i < XXH_STRIPE_LEN / sizeof(__m128i); i++) {
+
+      /* data_vec    = xinput[i]; */
+      __m128i const data_vec = _mm_loadu_si128(xinput + i);
+      /* key_vec     = xsecret[i]; */
+      __m128i const key_vec = _mm_loadu_si128(xsecret + i);
+      /* data_key    = data_vec ^ key_vec; */
+      __m128i const data_key = _mm_xor_si128(data_vec, key_vec);
+      /* data_key_lo = data_key >> 32; */
+      __m128i const data_key_lo =
+          _mm_shuffle_epi32(data_key, _MM_SHUFFLE(0, 3, 0, 1));
+      /* product     = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
+      __m128i const product = _mm_mul_epu32(data_key, data_key_lo);
+      /* xacc[i] += swap(data_vec); */
+      __m128i const data_swap =
+          _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
+      __m128i const sum = _mm_add_epi64(xacc[i], data_swap);
+      /* xacc[i] += product; */
+      xacc[i] = _mm_add_epi64(product, sum);
+
+    }
+
+  }
+
+}
+
+XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_scrambleAcc_sse2(
+    void *XXH_RESTRICT acc, const void *XXH_RESTRICT secret) {
+
+  XXH_ASSERT((((size_t)acc) & 15) == 0);
+  {
+
+    XXH_ALIGN(16) __m128i *const xacc = (__m128i *)acc;
+    /* Unaligned. This is mainly for pointer arithmetic, and because
+     * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
+    const __m128i *const xsecret = (const __m128i *)secret;
+    const __m128i        prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
+
+    size_t i;
+    for (i = 0; i < XXH_STRIPE_LEN / sizeof(__m128i); i++) {
+
+      /* xacc[i] ^= (xacc[i] >> 47) */
+      __m128i const acc_vec = xacc[i];
+      __m128i const shifted = _mm_srli_epi64(acc_vec, 47);
+      __m128i const data_vec = _mm_xor_si128(acc_vec, shifted);
+      /* xacc[i] ^= xsecret[i]; */
+      __m128i const key_vec = _mm_loadu_si128(xsecret + i);
+      __m128i const data_key = _mm_xor_si128(data_vec, key_vec);
+
+      /* xacc[i] *= XXH_PRIME32_1; */
+      __m128i const data_key_hi =
+          _mm_shuffle_epi32(data_key, _MM_SHUFFLE(0, 3, 0, 1));
+      __m128i const prod_lo = _mm_mul_epu32(data_key, prime32);
+      __m128i const prod_hi = _mm_mul_epu32(data_key_hi, prime32);
+      xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
+
+    }
+
+  }
+
+}
+
+XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(
+    void *XXH_RESTRICT customSecret, xxh_u64 seed64) {
+
+  XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
+  (void)(&XXH_writeLE64);
+  {
+
+    int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
+
+      #if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
+    // MSVC 32bit mode does not support _mm_set_epi64x before 2015
+    XXH_ALIGN(16)
+    const xxh_i64 seed64x2[2] = {(xxh_i64)seed64, -(xxh_i64)seed64};
+    __m128i const seed = _mm_load_si128((__m128i const *)seed64x2);
+      #else
+    __m128i const seed = _mm_set_epi64x(-(xxh_i64)seed64, (xxh_i64)seed64);
+      #endif
+    int i;
+
+    XXH_ALIGN(64) const float *const  src = (float const *)XXH3_kSecret;
+    XXH_ALIGN(XXH_SEC_ALIGN) __m128i *dest = (__m128i *)customSecret;
+      #if defined(__GNUC__) || defined(__clang__)
+    /*
+     * On GCC & Clang, marking 'dest' as modified will cause the compiler:
+     *   - do not extract the secret from sse registers in the internal loop
+     *   - use less common registers, and avoid pushing these reg into stack
+     */
+    __asm__("" : "+r"(dest));
+      #endif
+
+    for (i = 0; i < nbRounds; ++i) {
+
+      dest[i] = _mm_add_epi64(_mm_castps_si128(_mm_load_ps(src + i * 4)), seed);
+
+    }
+
+  }
+
+}
+
+    #endif
+
+    #if (XXH_VECTOR == XXH_NEON)
+
+XXH_FORCE_INLINE void XXH3_accumulate_512_neon(
+    void *XXH_RESTRICT acc, const void *XXH_RESTRICT input,
+    const void *XXH_RESTRICT secret) {
+
+  XXH_ASSERT((((size_t)acc) & 15) == 0);
+  {
+
+    XXH_ALIGN(16) uint64x2_t *const xacc = (uint64x2_t *)acc;
+    /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7.
+     */
+    uint8_t const *const xinput = (const uint8_t *)input;
+    uint8_t const *const xsecret = (const uint8_t *)secret;
+
+    size_t i;
+    for (i = 0; i < XXH_STRIPE_LEN / sizeof(uint64x2_t); i++) {
+
+      /* data_vec = xinput[i]; */
+      uint8x16_t data_vec = vld1q_u8(xinput + (i * 16));
+      /* key_vec  = xsecret[i];  */
+      uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16));
+      uint64x2_t data_key;
+      uint32x2_t data_key_lo, data_key_hi;
+      /* xacc[i] += swap(data_vec); */
+      uint64x2_t const data64 = vreinterpretq_u64_u8(data_vec);
+      uint64x2_t const swapped = vextq_u64(data64, data64, 1);
+      xacc[i] = vaddq_u64(xacc[i], swapped);
+      /* data_key = data_vec ^ key_vec; */
+      data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec));
+      /* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF);
+       * data_key_hi = (uint32x2_t) (data_key >> 32);
+       * data_key = UNDEFINED; */
+      XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
+      /* xacc[i] += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */
+      xacc[i] = vmlal_u32(xacc[i], data_key_lo, data_key_hi);
+
+    }
+
+  }
+
+}
+
+XXH_FORCE_INLINE void XXH3_scrambleAcc_neon(void *XXH_RESTRICT       acc,
+                                            const void *XXH_RESTRICT secret) {
+
+  XXH_ASSERT((((size_t)acc) & 15) == 0);
+
+  {
+
+    uint64x2_t *   xacc = (uint64x2_t *)acc;
+    uint8_t const *xsecret = (uint8_t const *)secret;
+    uint32x2_t     prime = vdup_n_u32(XXH_PRIME32_1);
+
+    size_t i;
+    for (i = 0; i < XXH_STRIPE_LEN / sizeof(uint64x2_t); i++) {
+
+      /* xacc[i] ^= (xacc[i] >> 47); */
+      uint64x2_t acc_vec = xacc[i];
+      uint64x2_t shifted = vshrq_n_u64(acc_vec, 47);
+      uint64x2_t data_vec = veorq_u64(acc_vec, shifted);
+
+      /* xacc[i] ^= xsecret[i]; */
+      uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16));
+      uint64x2_t data_key = veorq_u64(data_vec, vreinterpretq_u64_u8(key_vec));
+
+      /* xacc[i] *= XXH_PRIME32_1 */
+      uint32x2_t data_key_lo, data_key_hi;
+      /* data_key_lo = (uint32x2_t) (xacc[i] & 0xFFFFFFFF);
+       * data_key_hi = (uint32x2_t) (xacc[i] >> 32);
+       * xacc[i] = UNDEFINED; */
+      XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
+      { /*
+         * prod_hi = (data_key >> 32) * XXH_PRIME32_1;
+         *
+         * Avoid vmul_u32 + vshll_n_u32 since Clang 6 and 7 will
+         * incorrectly "optimize" this:
+         *   tmp     = vmul_u32(vmovn_u64(a), vmovn_u64(b));
+         *   shifted = vshll_n_u32(tmp, 32);
+         * to this:
+         *   tmp     = "vmulq_u64"(a, b); // no such thing!
+         *   shifted = vshlq_n_u64(tmp, 32);
+         *
+         * However, unlike SSE, Clang lacks a 64-bit multiply routine
+         * for NEON, and it scalarizes two 64-bit multiplies instead.
+         *
+         * vmull_u32 has the same timing as vmul_u32, and it avoids
+         * this bug completely.
+         * See https://bugs.llvm.org/show_bug.cgi?id=39967
+         */
+        uint64x2_t prod_hi = vmull_u32(data_key_hi, prime);
+        /* xacc[i] = prod_hi << 32; */
+        xacc[i] = vshlq_n_u64(prod_hi, 32);
+        /* xacc[i] += (prod_hi & 0xFFFFFFFF) * XXH_PRIME32_1; */
+        xacc[i] = vmlal_u32(xacc[i], data_key_lo, prime);
+
+      }
+
+    }
+
+  }
+
+}
+
+    #endif
+
+    #if (XXH_VECTOR == XXH_VSX)
+
+XXH_FORCE_INLINE void XXH3_accumulate_512_vsx(void *XXH_RESTRICT       acc,
+                                              const void *XXH_RESTRICT input,
+                                              const void *XXH_RESTRICT secret) {
+
+  xxh_u64x2 *const       xacc = (xxh_u64x2 *)acc;       /* presumed aligned */
+  xxh_u64x2 const *const xinput =
+      (xxh_u64x2 const *)input;                 /* no alignment restriction */
+  xxh_u64x2 const *const xsecret =
+      (xxh_u64x2 const *)secret;                /* no alignment restriction */
+  xxh_u64x2 const v32 = {32, 32};
+  size_t          i;
+  for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
+
+    /* data_vec = xinput[i]; */
+    xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + i);
+    /* key_vec = xsecret[i]; */
+    xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
+    xxh_u64x2 const data_key = data_vec ^ key_vec;
+    /* shuffled = (data_key << 32) | (data_key >> 32); */
+    xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
+    /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled &
+     * 0xFFFFFFFF); */
+    xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
+    xacc[i] += product;
+
+        /* swap high and low halves */
+      #ifdef __s390x__
+    xacc[i] += vec_permi(data_vec, data_vec, 2);
+      #else
+    xacc[i] += vec_xxpermdi(data_vec, data_vec, 2);
+      #endif
+
+  }
+
+}
+
+XXH_FORCE_INLINE void XXH3_scrambleAcc_vsx(void *XXH_RESTRICT       acc,
+                                           const void *XXH_RESTRICT secret) {
+
+  XXH_ASSERT((((size_t)acc) & 15) == 0);
+
+  {
+
+    xxh_u64x2 *const       xacc = (xxh_u64x2 *)acc;
+    const xxh_u64x2 *const xsecret = (const xxh_u64x2 *)secret;
+    /* constants */
+    xxh_u64x2 const v32 = {32, 32};
+    xxh_u64x2 const v47 = {47, 47};
+    xxh_u32x4 const prime = {XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1,
+                             XXH_PRIME32_1};
+    size_t          i;
+    for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
+
+      /* xacc[i] ^= (xacc[i] >> 47); */
+      xxh_u64x2 const acc_vec = xacc[i];
+      xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
+
+      /* xacc[i] ^= xsecret[i]; */
+      xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
+      xxh_u64x2 const data_key = data_vec ^ key_vec;
+
+      /* xacc[i] *= XXH_PRIME32_1 */
+      /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime &
+       * 0xFFFFFFFF);  */
+      xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime);
+      /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32);  */
+      xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime);
+      xacc[i] = prod_odd + (prod_even << v32);
+
+    }
+
+  }
+
+}
+
+    #endif
+
+/* scalar variants - universal */
+
+XXH_FORCE_INLINE void XXH3_accumulate_512_scalar(
+    void *XXH_RESTRICT acc, const void *XXH_RESTRICT input,
+    const void *XXH_RESTRICT secret) {
+
+  XXH_ALIGN(XXH_ACC_ALIGN)
+  xxh_u64 *const      xacc = (xxh_u64 *)acc;            /* presumed aligned */
+  const xxh_u8 *const xinput =
+      (const xxh_u8 *)input;                    /* no alignment restriction */
+  const xxh_u8 *const xsecret =
+      (const xxh_u8 *)secret;                   /* no alignment restriction */
+  size_t i;
+  XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN - 1)) == 0);
+  for (i = 0; i < XXH_ACC_NB; i++) {
+
+    xxh_u64 const data_val = XXH_readLE64(xinput + 8 * i);
+    xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + i * 8);
+    xacc[i ^ 1] += data_val;                         /* swap adjacent lanes */
+    xacc[i] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32);
+
+  }
+
+}
+
+XXH_FORCE_INLINE void XXH3_scrambleAcc_scalar(void *XXH_RESTRICT       acc,
+                                              const void *XXH_RESTRICT secret) {
+
+  XXH_ALIGN(XXH_ACC_ALIGN)
+  xxh_u64 *const      xacc = (xxh_u64 *)acc;            /* presumed aligned */
+  const xxh_u8 *const xsecret =
+      (const xxh_u8 *)secret;                   /* no alignment restriction */
+  size_t i;
+  XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN - 1)) == 0);
+  for (i = 0; i < XXH_ACC_NB; i++) {
+
+    xxh_u64 const key64 = XXH_readLE64(xsecret + 8 * i);
+    xxh_u64       acc64 = xacc[i];
+    acc64 = XXH_xorshift64(acc64, 47);
+    acc64 ^= key64;
+    acc64 *= XXH_PRIME32_1;
+    xacc[i] = acc64;
+
+  }
+
+}
+
+XXH_FORCE_INLINE void XXH3_initCustomSecret_scalar(
+    void *XXH_RESTRICT customSecret, xxh_u64 seed64) {
+
+  /*
+   * We need a separate pointer for the hack below,
+   * which requires a non-const pointer.
+   * Any decent compiler will optimize this out otherwise.
+   */
+  const xxh_u8 *kSecretPtr = XXH3_kSecret;
+  XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
+
+    #if defined(__clang__) && defined(__aarch64__)
+  /*
+   * UGLY HACK:
+   * Clang generates a bunch of MOV/MOVK pairs for aarch64, and they are
+   * placed sequentially, in order, at the top of the unrolled loop.
+   *
+   * While MOVK is great for generating constants (2 cycles for a 64-bit
+   * constant compared to 4 cycles for LDR), long MOVK chains stall the
+   * integer pipelines:
+   *   I   L   S
+   * MOVK
+   * MOVK
+   * MOVK
+   * MOVK
+   * ADD
+   * SUB      STR
+   *          STR
+   * By forcing loads from memory (as the asm line causes Clang to assume
+   * that XXH3_kSecretPtr has been changed), the pipelines are used more
+   * efficiently:
+   *   I   L   S
+   *      LDR
+   *  ADD LDR
+   *  SUB     STR
+   *          STR
+   * XXH3_64bits_withSeed, len == 256, Snapdragon 835
+   *   without hack: 2654.4 MB/s
+   *   with hack:    3202.9 MB/s
+   */
+  __asm__("" : "+r"(kSecretPtr));
+    #endif
+  /*
+   * Note: in debug mode, this overrides the asm optimization
+   * and Clang will emit MOVK chains again.
+   */
+  XXH_ASSERT(kSecretPtr == XXH3_kSecret);
+
+  {
+
+    int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
+    int       i;
+    for (i = 0; i < nbRounds; i++) {
+
+      /*
+       * The asm hack causes Clang to assume that kSecretPtr aliases with
+       * customSecret, and on aarch64, this prevented LDP from merging two
+       * loads together for free. Putting the loads together before the stores
+       * properly generates LDP.
+       */
+      xxh_u64 lo = XXH_readLE64(kSecretPtr + 16 * i) + seed64;
+      xxh_u64 hi = XXH_readLE64(kSecretPtr + 16 * i + 8) - seed64;
+      XXH_writeLE64((xxh_u8 *)customSecret + 16 * i, lo);
+      XXH_writeLE64((xxh_u8 *)customSecret + 16 * i + 8, hi);
+
+    }
+
+  }
+
+}
+
+typedef void (*XXH3_f_accumulate_512)(void *XXH_RESTRICT, const void *,
+                                      const void *);
+typedef void (*XXH3_f_scrambleAcc)(void *XXH_RESTRICT, const void *);
+typedef void (*XXH3_f_initCustomSecret)(void *XXH_RESTRICT, xxh_u64);
+
+    #if (XXH_VECTOR == XXH_AVX512)
+
+      #define XXH3_accumulate_512 XXH3_accumulate_512_avx512
+      #define XXH3_scrambleAcc XXH3_scrambleAcc_avx512
+      #define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
+
+    #elif (XXH_VECTOR == XXH_AVX2)
+
+      #define XXH3_accumulate_512 XXH3_accumulate_512_avx2
+      #define XXH3_scrambleAcc XXH3_scrambleAcc_avx2
+      #define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
+
+    #elif (XXH_VECTOR == XXH_SSE2)
+
+      #define XXH3_accumulate_512 XXH3_accumulate_512_sse2
+      #define XXH3_scrambleAcc XXH3_scrambleAcc_sse2
+      #define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
+
+    #elif (XXH_VECTOR == XXH_NEON)
+
+      #define XXH3_accumulate_512 XXH3_accumulate_512_neon
+      #define XXH3_scrambleAcc XXH3_scrambleAcc_neon
+      #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
+
+    #elif (XXH_VECTOR == XXH_VSX)
+
+      #define XXH3_accumulate_512 XXH3_accumulate_512_vsx
+      #define XXH3_scrambleAcc XXH3_scrambleAcc_vsx
+      #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
+
+    #else                                                         /* scalar */
+
+      #define XXH3_accumulate_512 XXH3_accumulate_512_scalar
+      #define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
+      #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
+
+    #endif
+
+    #ifndef XXH_PREFETCH_DIST
+      #ifdef __clang__
+        #define XXH_PREFETCH_DIST 320
+      #else
+        #if (XXH_VECTOR == XXH_AVX512)
+          #define XXH_PREFETCH_DIST 512
+        #else
+          #define XXH_PREFETCH_DIST 384
+        #endif
+      #endif                                                   /* __clang__ */
+    #endif                                             /* XXH_PREFETCH_DIST */
+
+/*
+ * XXH3_accumulate()
+ * Loops over XXH3_accumulate_512().
+ * Assumption: nbStripes will not overflow the secret size
+ */
+XXH_FORCE_INLINE void XXH3_accumulate(xxh_u64 *XXH_RESTRICT      acc,
+                                      const xxh_u8 *XXH_RESTRICT input,
+                                      const xxh_u8 *XXH_RESTRICT secret,
+                                      size_t                     nbStripes,
+                                      XXH3_f_accumulate_512      f_acc512) {
+
+  size_t n;
+  for (n = 0; n < nbStripes; n++) {
+
+    const xxh_u8 *const in = input + n * XXH_STRIPE_LEN;
+    XXH_PREFETCH(in + XXH_PREFETCH_DIST);
+    f_acc512(acc, in, secret + n * XXH_SECRET_CONSUME_RATE);
+
+  }
+
+}
+
+XXH_FORCE_INLINE void XXH3_hashLong_internal_loop(
+    xxh_u64 *XXH_RESTRICT acc, const xxh_u8 *XXH_RESTRICT input, size_t len,
+    const xxh_u8 *XXH_RESTRICT secret, size_t secretSize,
+    XXH3_f_accumulate_512 f_acc512, XXH3_f_scrambleAcc f_scramble) {
+
+  size_t const nbStripesPerBlock =
+      (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
+  size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
+  size_t const nb_blocks = (len - 1) / block_len;
+
+  size_t n;
+
+  XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
+
+  for (n = 0; n < nb_blocks; n++) {
+
+    XXH3_accumulate(acc, input + n * block_len, secret, nbStripesPerBlock,
+                    f_acc512);
+    f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
+
+  }
+
+  /* last partial block */
+  XXH_ASSERT(len > XXH_STRIPE_LEN);
+  {
+
+    size_t const nbStripes =
+        ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
+    XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
+    XXH3_accumulate(acc, input + nb_blocks * block_len, secret, nbStripes,
+                    f_acc512);
+
+    /* last stripe */
+    {
+
+      const xxh_u8 *const p = input + len - XXH_STRIPE_LEN;
+    #define XXH_SECRET_LASTACC_START \
+      7  /* not aligned on 8, last secret is different from acc & scrambler */
+      f_acc512(acc, p,
+               secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
+
+    }
+
+  }
+
+}
+
+XXH_FORCE_INLINE xxh_u64 XXH3_mix2Accs(const xxh_u64 *XXH_RESTRICT acc,
+                                       const xxh_u8 *XXH_RESTRICT  secret) {
+
+  return XXH3_mul128_fold64(acc[0] ^ XXH_readLE64(secret),
+                            acc[1] ^ XXH_readLE64(secret + 8));
+
+}
+
+static XXH64_hash_t XXH3_mergeAccs(const xxh_u64 *XXH_RESTRICT acc,
+                                   const xxh_u8 *XXH_RESTRICT  secret,
+                                   xxh_u64                     start) {
+
+  xxh_u64 result64 = start;
+  size_t  i = 0;
+
+  for (i = 0; i < 4; i++) {
+
+    result64 += XXH3_mix2Accs(acc + 2 * i, secret + 16 * i);
+    #if defined(__clang__)                                /* Clang */ \
+        && (defined(__arm__) || defined(__thumb__))       /* ARMv7 */ \
+        && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */  \
+        && !defined(XXH_ENABLE_AUTOVECTORIZE)          /* Define to disable */
+    /*
+     * UGLY HACK:
+     * Prevent autovectorization on Clang ARMv7-a. Exact same problem as
+     * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
+     * XXH3_64bits, len == 256, Snapdragon 835:
+     *   without hack: 2063.7 MB/s
+     *   with hack:    2560.7 MB/s
+     */
+    __asm__("" : "+r"(result64));
+    #endif
+
+  }
+
+  return XXH3_avalanche(result64);
+
+}
+
+    #define XXH3_INIT_ACC                                              \
+      {                                                                \
+                                                                       \
+        XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3,    \
+            XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 \
+                                                                       \
+      }
+
+XXH_FORCE_INLINE XXH64_hash_t XXH3_hashLong_64b_internal(
+    const void *XXH_RESTRICT input, size_t len, const void *XXH_RESTRICT secret,
+    size_t secretSize, XXH3_f_accumulate_512 f_acc512,
+    XXH3_f_scrambleAcc f_scramble) {
+
+  XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
+
+  XXH3_hashLong_internal_loop(acc, (const xxh_u8 *)input, len,
+                              (const xxh_u8 *)secret, secretSize, f_acc512,
+                              f_scramble);
+
+  /* converge into final hash */
+  XXH_STATIC_ASSERT(sizeof(acc) == 64);
+    /* do not align on 8, so that the secret is different from the accumulator
+     */
+    #define XXH_SECRET_MERGEACCS_START 11
+  XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
+  return XXH3_mergeAccs(acc,
+                        (const xxh_u8 *)secret + XXH_SECRET_MERGEACCS_START,
+                        (xxh_u64)len * XXH_PRIME64_1);
+
+}
+
+/*
+ * It's important for performance that XXH3_hashLong is not inlined.
+ */
+XXH_NO_INLINE XXH64_hash_t XXH3_hashLong_64b_withSecret(
+    const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64,
+    const xxh_u8 *XXH_RESTRICT secret, size_t secretLen) {
+
+  (void)seed64;
+  return XXH3_hashLong_64b_internal(input, len, secret, secretLen,
+                                    XXH3_accumulate_512, XXH3_scrambleAcc);
+
+}
+
+/*
+ * It's important for performance that XXH3_hashLong is not inlined.
+ * Since the function is not inlined, the compiler may not be able to understand
+ * that, in some scenarios, its `secret` argument is actually a compile time
+ * constant. This variant enforces that the compiler can detect that, and uses
+ * this opportunity to streamline the generated code for better performance.
+ */
+XXH_NO_INLINE XXH64_hash_t XXH3_hashLong_64b_default(
+    const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64,
+    const xxh_u8 *XXH_RESTRICT secret, size_t secretLen) {
+
+  (void)seed64;
+  (void)secret;
+  (void)secretLen;
+  return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret,
+                                    sizeof(XXH3_kSecret), XXH3_accumulate_512,
+                                    XXH3_scrambleAcc);
+
+}
+
+/*
+ * XXH3_hashLong_64b_withSeed():
+ * Generate a custom key based on alteration of default XXH3_kSecret with the
+ * seed, and then use this key for long mode hashing.
+ *
+ * This operation is decently fast but nonetheless costs a little bit of time.
+ * Try to avoid it whenever possible (typically when seed==0).
+ *
+ * It's important for performance that XXH3_hashLong is not inlined. Not sure
+ * why (uop cache maybe?), but the difference is large and easily measurable.
+ */
+XXH_FORCE_INLINE XXH64_hash_t XXH3_hashLong_64b_withSeed_internal(
+    const void *input, size_t len, XXH64_hash_t seed,
+    XXH3_f_accumulate_512 f_acc512, XXH3_f_scrambleAcc f_scramble,
+    XXH3_f_initCustomSecret f_initSec) {
+
+  if (seed == 0)
+    return XXH3_hashLong_64b_internal(
+        input, len, XXH3_kSecret, sizeof(XXH3_kSecret), f_acc512, f_scramble);
+  {
+
+    XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
+    f_initSec(secret, seed);
+    return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
+                                      f_acc512, f_scramble);
+
+  }
+
+}
+
+/*
+ * It's important for performance that XXH3_hashLong is not inlined.
+ */
+XXH_NO_INLINE XXH64_hash_t XXH3_hashLong_64b_withSeed(const void *  input,
+                                                      size_t        len,
+                                                      XXH64_hash_t  seed,
+                                                      const xxh_u8 *secret,
+                                                      size_t        secretLen) {
+
+  (void)secret;
+  (void)secretLen;
+  return XXH3_hashLong_64b_withSeed_internal(
+      input, len, seed, XXH3_accumulate_512, XXH3_scrambleAcc,
+      XXH3_initCustomSecret);
+
+}
+
+typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void *XXH_RESTRICT, size_t,
+                                          XXH64_hash_t,
+                                          const xxh_u8 *XXH_RESTRICT, size_t);
+
+XXH_FORCE_INLINE XXH64_hash_t
+XXH3_64bits_internal(const void *XXH_RESTRICT input, size_t len,
+                     XXH64_hash_t seed64, const void *XXH_RESTRICT secret,
+                     size_t secretLen, XXH3_hashLong64_f f_hashLong) {
+
+  XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
+  /*
+   * If an action is to be taken if `secretLen` condition is not respected,
+   * it should be done here.
+   * For now, it's a contract pre-condition.
+   * Adding a check and a branch here would cost performance at every hash.
+   * Also, note that function signature doesn't offer room to return an error.
+   */
+  if (len <= 16)
+    return XXH3_len_0to16_64b((const xxh_u8 *)input, len,
+                              (const xxh_u8 *)secret, seed64);
+  if (len <= 128)
+    return XXH3_len_17to128_64b((const xxh_u8 *)input, len,
+                                (const xxh_u8 *)secret, secretLen, seed64);
+  if (len <= XXH3_MIDSIZE_MAX)
+    return XXH3_len_129to240_64b((const xxh_u8 *)input, len,
+                                 (const xxh_u8 *)secret, secretLen, seed64);
+  return f_hashLong(input, len, seed64, (const xxh_u8 *)secret, secretLen);
+
+}
+
+/* ===   Public entry point   === */
+
+XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void *input, size_t len) {
+
+  return XXH3_64bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret),
+                              XXH3_hashLong_64b_default);
+
+}
+
+XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void *input,
+                                                   size_t      len,
+                                                   const void *secret,
+                                                   size_t      secretSize) {
+
+  return XXH3_64bits_internal(input, len, 0, secret, secretSize,
+                              XXH3_hashLong_64b_withSecret);
+
+}
+
+XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void *input, size_t len,
+                                                 XXH64_hash_t seed) {
+
+  return XXH3_64bits_internal(input, len, seed, XXH3_kSecret,
+                              sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
+
+}
+
+/* ===   XXH3 streaming   === */
+
+/*
+ * Malloc's a pointer that is always aligned to align.
+ *
+ * This must be freed with `XXH_alignedFree()`.
+ *
+ * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte
+ * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2
+ * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON.
+ *
+ * This underalignment previously caused a rather obvious crash which went
+ * completely unnoticed due to XXH3_createState() not actually being tested.
+ * Credit to RedSpah for noticing this bug.
+ *
+ * The alignment is done manually: Functions like posix_memalign or _mm_malloc
+ * are avoided: To maintain portability, we would have to write a fallback
+ * like this anyways, and besides, testing for the existence of library
+ * functions without relying on external build tools is impossible.
+ *
+ * The method is simple: Overallocate, manually align, and store the offset
+ * to the original behind the returned pointer.
+ *
+ * Align must be a power of 2 and 8 <= align <= 128.
+ */
+static void *XXH_alignedMalloc(size_t s, size_t align) {
+
+  XXH_ASSERT(align <= 128 && align >= 8);                    /* range check */
+  XXH_ASSERT((align & (align - 1)) == 0);                     /* power of 2 */
+  XXH_ASSERT(s != 0 && s < (s + align));                  /* empty/overflow */
+  {  /* Overallocate to make room for manual realignment and an offset byte */
+    xxh_u8 *base = (xxh_u8 *)XXH_malloc(s + align);
+    if (base != NULL) {
+
+      /*
+       * Get the offset needed to align this pointer.
+       *
+       * Even if the returned pointer is aligned, there will always be
+       * at least one byte to store the offset to the original pointer.
+       */
+      size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
+      /* Add the offset for the now-aligned pointer */
+      xxh_u8 *ptr = base + offset;
+
+      XXH_ASSERT((size_t)ptr % align == 0);
+
+      /* Store the offset immediately before the returned pointer. */
+      ptr[-1] = (xxh_u8)offset;
+      return ptr;
+
+    }
+
+    return NULL;
+
+  }
+
+}
+
+/*
+ * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
+ * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
+ */
+static void XXH_alignedFree(void *p) {
+
+  if (p != NULL) {
+
+    xxh_u8 *ptr = (xxh_u8 *)p;
+    /* Get the offset byte we added in XXH_malloc. */
+    xxh_u8 offset = ptr[-1];
+    /* Free the original malloc'd pointer */
+    xxh_u8 *base = ptr - offset;
+    XXH_free(base);
+
+  }
+
+}
+
+XXH_PUBLIC_API XXH3_state_t *XXH3_createState(void) {
+
+  XXH3_state_t *const state =
+      (XXH3_state_t *)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
+  if (state == NULL) return NULL;
+  XXH3_INITSTATE(state);
+  return state;
+
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t *statePtr) {
+
+  XXH_alignedFree(statePtr);
+  return XXH_OK;
+
+}
+
+XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t *      dst_state,
+                                   const XXH3_state_t *src_state) {
+
+  memcpy(dst_state, src_state, sizeof(*dst_state));
+
+}
+
+static void XXH3_64bits_reset_internal(XXH3_state_t *statePtr,
+                                       XXH64_hash_t seed, const void *secret,
+                                       size_t secretSize) {
+
+  size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
+  size_t const initLength =
+      offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
+  XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
+  XXH_ASSERT(statePtr != NULL);
+  /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
+  memset((char *)statePtr + initStart, 0, initLength);
+  statePtr->acc[0] = XXH_PRIME32_3;
+  statePtr->acc[1] = XXH_PRIME64_1;
+  statePtr->acc[2] = XXH_PRIME64_2;
+  statePtr->acc[3] = XXH_PRIME64_3;
+  statePtr->acc[4] = XXH_PRIME64_4;
+  statePtr->acc[5] = XXH_PRIME32_2;
+  statePtr->acc[6] = XXH_PRIME64_5;
+  statePtr->acc[7] = XXH_PRIME32_1;
+  statePtr->seed = seed;
+  statePtr->extSecret = (const unsigned char *)secret;
+  XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
+  statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
+  statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
+
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t *statePtr) {
+
+  if (statePtr == NULL) return XXH_ERROR;
+  XXH3_64bits_reset_internal(statePtr, 0, XXH3_kSecret,
+                             XXH_SECRET_DEFAULT_SIZE);
+  return XXH_OK;
+
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(
+    XXH3_state_t *statePtr, const void *secret, size_t secretSize) {
+
+  if (statePtr == NULL) return XXH_ERROR;
+  XXH3_64bits_reset_internal(statePtr, 0, secret, secretSize);
+  if (secret == NULL) return XXH_ERROR;
+  if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
+  return XXH_OK;
+
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t *statePtr,
+                                                        XXH64_hash_t  seed) {
+
+  if (statePtr == NULL) return XXH_ERROR;
+  if (seed == 0) return XXH3_64bits_reset(statePtr);
+  if (seed != statePtr->seed)
+    XXH3_initCustomSecret(statePtr->customSecret, seed);
+  XXH3_64bits_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
+  return XXH_OK;
+
+}
+
+/* Note : when XXH3_consumeStripes() is invoked,
+ * there must be a guarantee that at least one more byte must be consumed from
+ * input
+ * so that the function can blindly consume all stripes using the "normal"
+ * secret segment */
+XXH_FORCE_INLINE void XXH3_consumeStripes(
+    xxh_u64 *XXH_RESTRICT acc, size_t *XXH_RESTRICT nbStripesSoFarPtr,
+    size_t nbStripesPerBlock, const xxh_u8 *XXH_RESTRICT input,
+    size_t nbStripes, const xxh_u8 *XXH_RESTRICT secret, size_t secretLimit,
+    XXH3_f_accumulate_512 f_acc512, XXH3_f_scrambleAcc f_scramble) {
+
+  XXH_ASSERT(nbStripes <=
+             nbStripesPerBlock); /* can handle max 1 scramble per invocation */
+  XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock);
+  if (nbStripesPerBlock - *nbStripesSoFarPtr <= nbStripes) {
+
+    /* need a scrambling operation */
+    size_t const nbStripesToEndofBlock = nbStripesPerBlock - *nbStripesSoFarPtr;
+    size_t const nbStripesAfterBlock = nbStripes - nbStripesToEndofBlock;
+    XXH3_accumulate(acc, input,
+                    secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE,
+                    nbStripesToEndofBlock, f_acc512);
+    f_scramble(acc, secret + secretLimit);
+    XXH3_accumulate(acc, input + nbStripesToEndofBlock * XXH_STRIPE_LEN, secret,
+                    nbStripesAfterBlock, f_acc512);
+    *nbStripesSoFarPtr = nbStripesAfterBlock;
+
+  } else {
+
+    XXH3_accumulate(acc, input,
+                    secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE,
+                    nbStripes, f_acc512);
+    *nbStripesSoFarPtr += nbStripes;
+
+  }
+
+}
+
+/*
+ * Both XXH3_64bits_update and XXH3_128bits_update use this routine.
+ */
+XXH_FORCE_INLINE XXH_errorcode XXH3_update(XXH3_state_t *state,
+                                           const xxh_u8 *input, size_t len,
+                                           XXH3_f_accumulate_512 f_acc512,
+                                           XXH3_f_scrambleAcc    f_scramble) {
+
+  if (input == NULL)
+    #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \
+        (XXH_ACCEPT_NULL_INPUT_POINTER >= 1)
+    return XXH_OK;
+    #else
+    return XXH_ERROR;
+    #endif
+
+  {
+
+    const xxh_u8 *const        bEnd = input + len;
+    const unsigned char *const secret =
+        (state->extSecret == NULL) ? state->customSecret : state->extSecret;
+
+    state->totalLen += len;
+
+    if (state->bufferedSize + len <=
+        XXH3_INTERNALBUFFER_SIZE) {                   /* fill in tmp buffer */
+      XXH_memcpy(state->buffer + state->bufferedSize, input, len);
+      state->bufferedSize += (XXH32_hash_t)len;
+      return XXH_OK;
+
+    }
+
+      /* total input is now > XXH3_INTERNALBUFFER_SIZE */
+
+    #define XXH3_INTERNALBUFFER_STRIPES \
+      (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
+    XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN ==
+                      0);                                 /* clean multiple */
+
+    /*
+     * Internal buffer is partially filled (always, except at beginning)
+     * Complete it, then consume it.
+     */
+    if (state->bufferedSize) {
+
+      size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
+      XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
+      input += loadSize;
+      XXH3_consumeStripes(state->acc, &state->nbStripesSoFar,
+                          state->nbStripesPerBlock, state->buffer,
+                          XXH3_INTERNALBUFFER_STRIPES, secret,
+                          state->secretLimit, f_acc512, f_scramble);
+      state->bufferedSize = 0;
+
+    }
+
+    XXH_ASSERT(input < bEnd);
+
+    /* Consume input by a multiple of internal buffer size */
+    if (input + XXH3_INTERNALBUFFER_SIZE < bEnd) {
+
+      const xxh_u8 *const limit = bEnd - XXH3_INTERNALBUFFER_SIZE;
+      do {
+
+        XXH3_consumeStripes(state->acc, &state->nbStripesSoFar,
+                            state->nbStripesPerBlock, input,
+                            XXH3_INTERNALBUFFER_STRIPES, secret,
+                            state->secretLimit, f_acc512, f_scramble);
+        input += XXH3_INTERNALBUFFER_SIZE;
+
+      } while (input < limit);
+
+      /* for last partial stripe */
+      memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN,
+             input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
+
+    }
+
+    XXH_ASSERT(input < bEnd);
+
+    /* Some remaining input (always) : buffer it */
+    XXH_memcpy(state->buffer, input, (size_t)(bEnd - input));
+    state->bufferedSize = (XXH32_hash_t)(bEnd - input);
+
+  }
+
+  return XXH_OK;
+
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update(XXH3_state_t *state,
+                                                const void *input, size_t len) {
+
+  return XXH3_update(state, (const xxh_u8 *)input, len, XXH3_accumulate_512,
+                     XXH3_scrambleAcc);
+
+}
+
+XXH_FORCE_INLINE void XXH3_digest_long(XXH64_hash_t *       acc,
+                                       const XXH3_state_t * state,
+                                       const unsigned char *secret) {
+
+  /*
+   * Digest on a local copy. This way, the state remains unaltered, and it can
+   * continue ingesting more input afterwards.
+   */
+  memcpy(acc, state->acc, sizeof(state->acc));
+  if (state->bufferedSize >= XXH_STRIPE_LEN) {
+
+    size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
+    size_t       nbStripesSoFar = state->nbStripesSoFar;
+    XXH3_consumeStripes(acc, &nbStripesSoFar, state->nbStripesPerBlock,
+                        state->buffer, nbStripes, secret, state->secretLimit,
+                        XXH3_accumulate_512, XXH3_scrambleAcc);
+    /* last stripe */
+    XXH3_accumulate_512(acc,
+                        state->buffer + state->bufferedSize - XXH_STRIPE_LEN,
+                        secret + state->secretLimit - XXH_SECRET_LASTACC_START);
+
+  } else {                                 /* bufferedSize < XXH_STRIPE_LEN */
+
+    xxh_u8       lastStripe[XXH_STRIPE_LEN];
+    size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
+    XXH_ASSERT(state->bufferedSize >
+               0);                   /* there is always some input buffered */
+    memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize,
+           catchupSize);
+    memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
+    XXH3_accumulate_512(acc, lastStripe,
+                        secret + state->secretLimit - XXH_SECRET_LASTACC_START);
+
+  }
+
+}
+
+XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest(const XXH3_state_t *state) {
+
+  const unsigned char *const secret =
+      (state->extSecret == NULL) ? state->customSecret : state->extSecret;
+  if (state->totalLen > XXH3_MIDSIZE_MAX) {
+
+    XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
+    XXH3_digest_long(acc, state, secret);
+    return XXH3_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START,
+                          (xxh_u64)state->totalLen * XXH_PRIME64_1);
+
+  }
+
+  /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
+  if (state->seed)
+    return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen,
+                                state->seed);
+  return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
+                                secret, state->secretLimit + XXH_STRIPE_LEN);
+
+}
+
+    #define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
+
+XXH_PUBLIC_API void XXH3_generateSecret(void *      secretBuffer,
+                                        const void *customSeed,
+                                        size_t      customSeedSize) {
+
+  XXH_ASSERT(secretBuffer != NULL);
+  if (customSeedSize == 0) {
+
+    memcpy(secretBuffer, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
+    return;
+
+  }
+
+  XXH_ASSERT(customSeed != NULL);
+
+  {
+
+    size_t const       segmentSize = sizeof(XXH128_hash_t);
+    size_t const       nbSegments = XXH_SECRET_DEFAULT_SIZE / segmentSize;
+    XXH128_canonical_t scrambler;
+    XXH64_hash_t       seeds[12];
+    size_t             segnb;
+    XXH_ASSERT(nbSegments == 12);
+    XXH_ASSERT(segmentSize * nbSegments ==
+               XXH_SECRET_DEFAULT_SIZE);                  /* exact multiple */
+    XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
+
+    /*
+     * Copy customSeed to seeds[], truncating or repeating as necessary.
+     */
+    {
+
+      size_t toFill = XXH_MIN(customSeedSize, sizeof(seeds));
+      size_t filled = toFill;
+      memcpy(seeds, customSeed, toFill);
+      while (filled < sizeof(seeds)) {
+
+        toFill = XXH_MIN(filled, sizeof(seeds) - filled);
+        memcpy((char *)seeds + filled, seeds, toFill);
+        filled += toFill;
+
+      }
+
+    }
+
+    /* generate secret */
+    memcpy(secretBuffer, &scrambler, sizeof(scrambler));
+    for (segnb = 1; segnb < nbSegments; segnb++) {
+
+      size_t const       segmentStart = segnb * segmentSize;
+      XXH128_canonical_t segment;
+      XXH128_canonicalFromHash(&segment,
+                               XXH128(&scrambler, sizeof(scrambler),
+                                      XXH_readLE64(seeds + segnb) + segnb));
+      memcpy((char *)secretBuffer + segmentStart, &segment, sizeof(segment));
+
+    }
+
+  }
+
+}
+
+/* ==========================================
+ * XXH3 128 bits (a.k.a XXH128)
+ * ==========================================
+ * XXH3's 128-bit variant has better mixing and strength than the 64-bit
+ * variant, even without counting the significantly larger output size.
+ *
+ * For example, extra steps are taken to avoid the seed-dependent collisions
+ * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
+ *
+ * This strength naturally comes at the cost of some speed, especially on short
+ * lengths. Note that longer hashes are about as fast as the 64-bit version
+ * due to it using only a slight modification of the 64-bit loop.
+ *
+ * XXH128 is also more oriented towards 64-bit machines. It is still extremely
+ * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
+ */
+
+XXH_FORCE_INLINE XXH128_hash_t XXH3_len_1to3_128b(const xxh_u8 *input,
+                                                  size_t        len,
+                                                  const xxh_u8 *secret,
+                                                  XXH64_hash_t  seed) {
+
+  /* A doubled version of 1to3_64b with different constants. */
+  XXH_ASSERT(input != NULL);
+  XXH_ASSERT(1 <= len && len <= 3);
+  XXH_ASSERT(secret != NULL);
+  /*
+   * len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
+   * len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
+   * len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
+   */
+  {
+
+    xxh_u8 const  c1 = input[0];
+    xxh_u8 const  c2 = input[len >> 1];
+    xxh_u8 const  c3 = input[len - 1];
+    xxh_u32 const combinedl = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24) |
+                              ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
+    xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
+    xxh_u64 const bitflipl =
+        (XXH_readLE32(secret) ^ XXH_readLE32(secret + 4)) + seed;
+    xxh_u64 const bitfliph =
+        (XXH_readLE32(secret + 8) ^ XXH_readLE32(secret + 12)) - seed;
+    xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
+    xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
+    XXH128_hash_t h128;
+    h128.low64 = XXH64_avalanche(keyed_lo);
+    h128.high64 = XXH64_avalanche(keyed_hi);
+    return h128;
+
+  }
+
+}
+
+XXH_FORCE_INLINE XXH128_hash_t XXH3_len_4to8_128b(const xxh_u8 *input,
+                                                  size_t        len,
+                                                  const xxh_u8 *secret,
+                                                  XXH64_hash_t  seed) {
+
+  XXH_ASSERT(input != NULL);
+  XXH_ASSERT(secret != NULL);
+  XXH_ASSERT(4 <= len && len <= 8);
+  seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
+  {
+
+    xxh_u32 const input_lo = XXH_readLE32(input);
+    xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
+    xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
+    xxh_u64 const bitflip =
+        (XXH_readLE64(secret + 16) ^ XXH_readLE64(secret + 24)) + seed;
+    xxh_u64 const keyed = input_64 ^ bitflip;
+
+    /* Shift len to the left to ensure it is even, this avoids even multiplies.
+     */
+    XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
+
+    m128.high64 += (m128.low64 << 1);
+    m128.low64 ^= (m128.high64 >> 3);
+
+    m128.low64 = XXH_xorshift64(m128.low64, 35);
+    m128.low64 *= 0x9FB21C651E98DF25ULL;
+    m128.low64 = XXH_xorshift64(m128.low64, 28);
+    m128.high64 = XXH3_avalanche(m128.high64);
+    return m128;
+
+  }
+
+}
+
+XXH_FORCE_INLINE XXH128_hash_t XXH3_len_9to16_128b(const xxh_u8 *input,
+                                                   size_t        len,
+                                                   const xxh_u8 *secret,
+                                                   XXH64_hash_t  seed) {
+
+  XXH_ASSERT(input != NULL);
+  XXH_ASSERT(secret != NULL);
+  XXH_ASSERT(9 <= len && len <= 16);
+  {
+
+    xxh_u64 const bitflipl =
+        (XXH_readLE64(secret + 32) ^ XXH_readLE64(secret + 40)) - seed;
+    xxh_u64 const bitfliph =
+        (XXH_readLE64(secret + 48) ^ XXH_readLE64(secret + 56)) + seed;
+    xxh_u64 const input_lo = XXH_readLE64(input);
+    xxh_u64       input_hi = XXH_readLE64(input + len - 8);
+    XXH128_hash_t m128 =
+        XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
+    /*
+     * Put len in the middle of m128 to ensure that the length gets mixed to
+     * both the low and high bits in the 128x64 multiply below.
+     */
+    m128.low64 += (xxh_u64)(len - 1) << 54;
+    input_hi ^= bitfliph;
+    /*
+     * Add the high 32 bits of input_hi to the high 32 bits of m128, then
+     * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
+     * the high 64 bits of m128.
+     *
+     * The best approach to this operation is different on 32-bit and 64-bit.
+     */
+    if (sizeof(void *) < sizeof(xxh_u64)) {                       /* 32-bit */
+      /*
+       * 32-bit optimized version, which is more readable.
+       *
+       * On 32-bit, it removes an ADC and delays a dependency between the two
+       * halves of m128.high64, but it generates an extra mask on 64-bit.
+       */
+      m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) +
+                     XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
+
+    } else {
+
+      /*
+       * 64-bit optimized (albeit more confusing) version.
+       *
+       * Uses some properties of addition and multiplication to remove the mask:
+       *
+       * Let:
+       *    a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
+       *    b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
+       *    c = XXH_PRIME32_2
+       *
+       *    a + (b * c)
+       * Inverse Property: x + y - x == y
+       *    a + (b * (1 + c - 1))
+       * Distributive Property: x * (y + z) == (x * y) + (x * z)
+       *    a + (b * 1) + (b * (c - 1))
+       * Identity Property: x * 1 == x
+       *    a + b + (b * (c - 1))
+       *
+       * Substitute a, b, and c:
+       *    input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 -
+       * 1))
+       *
+       * Since input_hi.hi + input_hi.lo == input_hi, we get this:
+       *    input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
+       */
+      m128.high64 +=
+          input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
+
+    }
+
+    /* m128 ^= XXH_swap64(m128 >> 64); */
+    m128.low64 ^= XXH_swap64(m128.high64);
+
+    {                      /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
+      XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
+      h128.high64 += m128.high64 * XXH_PRIME64_2;
+
+      h128.low64 = XXH3_avalanche(h128.low64);
+      h128.high64 = XXH3_avalanche(h128.high64);
+      return h128;
+
+    }
+
+  }
+
+}
+
+/*
+ * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
+ */
+XXH_FORCE_INLINE XXH128_hash_t XXH3_len_0to16_128b(const xxh_u8 *input,
+                                                   size_t        len,
+                                                   const xxh_u8 *secret,
+                                                   XXH64_hash_t  seed) {
+
+  XXH_ASSERT(len <= 16);
+  {
+
+    if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
+    if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
+    if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
+    {
+
+      XXH128_hash_t h128;
+      xxh_u64 const bitflipl =
+          XXH_readLE64(secret + 64) ^ XXH_readLE64(secret + 72);
+      xxh_u64 const bitfliph =
+          XXH_readLE64(secret + 80) ^ XXH_readLE64(secret + 88);
+      h128.low64 = XXH64_avalanche(seed ^ bitflipl);
+      h128.high64 = XXH64_avalanche(seed ^ bitfliph);
+      return h128;
+
+    }
+
+  }
+
+}
+
+/*
+ * A bit slower than XXH3_mix16B, but handles multiply by zero better.
+ */
+XXH_FORCE_INLINE XXH128_hash_t XXH128_mix32B(XXH128_hash_t acc,
+                                             const xxh_u8 *input_1,
+                                             const xxh_u8 *input_2,
+                                             const xxh_u8 *secret,
+                                             XXH64_hash_t  seed) {
+
+  acc.low64 += XXH3_mix16B(input_1, secret + 0, seed);
+  acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
+  acc.high64 += XXH3_mix16B(input_2, secret + 16, seed);
+  acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
+  return acc;
+
+}
+
+XXH_FORCE_INLINE XXH128_hash_t XXH3_len_17to128_128b(
+    const xxh_u8 *XXH_RESTRICT input, size_t len,
+    const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) {
+
+  XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
+  (void)secretSize;
+  XXH_ASSERT(16 < len && len <= 128);
+
+  {
+
+    XXH128_hash_t acc;
+    acc.low64 = len * XXH_PRIME64_1;
+    acc.high64 = 0;
+    if (len > 32) {
+
+      if (len > 64) {
+
+        if (len > 96) {
+
+          acc = XXH128_mix32B(acc, input + 48, input + len - 64, secret + 96,
+                              seed);
+
+        }
+
+        acc =
+            XXH128_mix32B(acc, input + 32, input + len - 48, secret + 64, seed);
+
+      }
+
+      acc = XXH128_mix32B(acc, input + 16, input + len - 32, secret + 32, seed);
+
+    }
+
+    acc = XXH128_mix32B(acc, input, input + len - 16, secret, seed);
+    {
+
+      XXH128_hash_t h128;
+      h128.low64 = acc.low64 + acc.high64;
+      h128.high64 = (acc.low64 * XXH_PRIME64_1) + (acc.high64 * XXH_PRIME64_4) +
+                    ((len - seed) * XXH_PRIME64_2);
+      h128.low64 = XXH3_avalanche(h128.low64);
+      h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
+      return h128;
+
+    }
+
+  }
+
+}
+
+XXH_NO_INLINE XXH128_hash_t XXH3_len_129to240_128b(
+    const xxh_u8 *XXH_RESTRICT input, size_t len,
+    const xxh_u8 *XXH_RESTRICT secret, size_t secretSize, XXH64_hash_t seed) {
+
+  XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
+  (void)secretSize;
+  XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
+
+  {
+
+    XXH128_hash_t acc;
+    int const     nbRounds = (int)len / 32;
+    int           i;
+    acc.low64 = len * XXH_PRIME64_1;
+    acc.high64 = 0;
+    for (i = 0; i < 4; i++) {
+
+      acc = XXH128_mix32B(acc, input + (32 * i), input + (32 * i) + 16,
+                          secret + (32 * i), seed);
+
+    }
+
+    acc.low64 = XXH3_avalanche(acc.low64);
+    acc.high64 = XXH3_avalanche(acc.high64);
+    XXH_ASSERT(nbRounds >= 4);
+    for (i = 4; i < nbRounds; i++) {
+
+      acc = XXH128_mix32B(acc, input + (32 * i), input + (32 * i) + 16,
+                          secret + XXH3_MIDSIZE_STARTOFFSET + (32 * (i - 4)),
+                          seed);
+
+    }
+
+    /* last bytes */
+    acc = XXH128_mix32B(
+        acc, input + len - 16, input + len - 32,
+        secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
+        0ULL - seed);
+
+    {
+
+      XXH128_hash_t h128;
+      h128.low64 = acc.low64 + acc.high64;
+      h128.high64 = (acc.low64 * XXH_PRIME64_1) + (acc.high64 * XXH_PRIME64_4) +
+                    ((len - seed) * XXH_PRIME64_2);
+      h128.low64 = XXH3_avalanche(h128.low64);
+      h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
+      return h128;
+
+    }
+
+  }
+
+}
+
+XXH_FORCE_INLINE XXH128_hash_t XXH3_hashLong_128b_internal(
+    const void *XXH_RESTRICT input, size_t len,
+    const xxh_u8 *XXH_RESTRICT secret, size_t secretSize,
+    XXH3_f_accumulate_512 f_acc512, XXH3_f_scrambleAcc f_scramble) {
+
+  XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
+
+  XXH3_hashLong_internal_loop(acc, (const xxh_u8 *)input, len, secret,
+                              secretSize, f_acc512, f_scramble);
+
+  /* converge into final hash */
+  XXH_STATIC_ASSERT(sizeof(acc) == 64);
+  XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
+  {
+
+    XXH128_hash_t h128;
+    h128.low64 = XXH3_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START,
+                                (xxh_u64)len * XXH_PRIME64_1);
+    h128.high64 = XXH3_mergeAccs(
+        acc, secret + secretSize - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
+        ~((xxh_u64)len * XXH_PRIME64_2));
+    return h128;
+
+  }
+
+}
+
+/*
+ * It's important for performance that XXH3_hashLong is not inlined.
+ */
+XXH_NO_INLINE XXH128_hash_t XXH3_hashLong_128b_default(
+    const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64,
+    const void *XXH_RESTRICT secret, size_t secretLen) {
+
+  (void)seed64;
+  (void)secret;
+  (void)secretLen;
+  return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret,
+                                     sizeof(XXH3_kSecret), XXH3_accumulate_512,
+                                     XXH3_scrambleAcc);
+
+}
+
+/*
+ * It's important for performance that XXH3_hashLong is not inlined.
+ */
+XXH_NO_INLINE XXH128_hash_t XXH3_hashLong_128b_withSecret(
+    const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64,
+    const void *XXH_RESTRICT secret, size_t secretLen) {
+
+  (void)seed64;
+  return XXH3_hashLong_128b_internal(input, len, (const xxh_u8 *)secret,
+                                     secretLen, XXH3_accumulate_512,
+                                     XXH3_scrambleAcc);
+
+}
+
+XXH_FORCE_INLINE XXH128_hash_t XXH3_hashLong_128b_withSeed_internal(
+    const void *XXH_RESTRICT input, size_t len, XXH64_hash_t seed64,
+    XXH3_f_accumulate_512 f_acc512, XXH3_f_scrambleAcc f_scramble,
+    XXH3_f_initCustomSecret f_initSec) {
+
+  if (seed64 == 0)
+    return XXH3_hashLong_128b_internal(
+        input, len, XXH3_kSecret, sizeof(XXH3_kSecret), f_acc512, f_scramble);
+  {
+
+    XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
+    f_initSec(secret, seed64);
+    return XXH3_hashLong_128b_internal(input, len, (const xxh_u8 *)secret,
+                                       sizeof(secret), f_acc512, f_scramble);
+
+  }
+
+}
+
+/*
+ * It's important for performance that XXH3_hashLong is not inlined.
+ */
+XXH_NO_INLINE XXH128_hash_t
+XXH3_hashLong_128b_withSeed(const void *input, size_t len, XXH64_hash_t seed64,
+                            const void *XXH_RESTRICT secret, size_t secretLen) {
+
+  (void)secret;
+  (void)secretLen;
+  return XXH3_hashLong_128b_withSeed_internal(
+      input, len, seed64, XXH3_accumulate_512, XXH3_scrambleAcc,
+      XXH3_initCustomSecret);
+
+}
+
+typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void *XXH_RESTRICT, size_t,
+                                            XXH64_hash_t,
+                                            const void *XXH_RESTRICT, size_t);
+
+XXH_FORCE_INLINE XXH128_hash_t
+XXH3_128bits_internal(const void *input, size_t len, XXH64_hash_t seed64,
+                      const void *XXH_RESTRICT secret, size_t secretLen,
+                      XXH3_hashLong128_f f_hl128) {
+
+  XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
+  /*
+   * If an action is to be taken if `secret` conditions are not respected,
+   * it should be done here.
+   * For now, it's a contract pre-condition.
+   * Adding a check and a branch here would cost performance at every hash.
+   */
+  if (len <= 16)
+    return XXH3_len_0to16_128b((const xxh_u8 *)input, len,
+                               (const xxh_u8 *)secret, seed64);
+  if (len <= 128)
+    return XXH3_len_17to128_128b((const xxh_u8 *)input, len,
+                                 (const xxh_u8 *)secret, secretLen, seed64);
+  if (len <= XXH3_MIDSIZE_MAX)
+    return XXH3_len_129to240_128b((const xxh_u8 *)input, len,
+                                  (const xxh_u8 *)secret, secretLen, seed64);
+  return f_hl128(input, len, seed64, secret, secretLen);
+
+}
+
+/* ===   Public XXH128 API   === */
+
+XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void *input, size_t len) {
+
+  return XXH3_128bits_internal(input, len, 0, XXH3_kSecret,
+                               sizeof(XXH3_kSecret),
+                               XXH3_hashLong_128b_default);
+
+}
+
+XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void *input,
+                                                     size_t      len,
+                                                     const void *secret,
+                                                     size_t      secretSize) {
+
+  return XXH3_128bits_internal(input, len, 0, (const xxh_u8 *)secret,
+                               secretSize, XXH3_hashLong_128b_withSecret);
+
+}
+
+XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void * input,
+                                                   size_t       len,
+                                                   XXH64_hash_t seed) {
+
+  return XXH3_128bits_internal(input, len, seed, XXH3_kSecret,
+                               sizeof(XXH3_kSecret),
+                               XXH3_hashLong_128b_withSeed);
+
+}
+
+XXH_PUBLIC_API XXH128_hash_t XXH128(const void *input, size_t len,
+                                    XXH64_hash_t seed) {
+
+  return XXH3_128bits_withSeed(input, len, seed);
+
+}
+
+/* ===   XXH3 128-bit streaming   === */
+
+/*
+ * All the functions are actually the same as for 64-bit streaming variant.
+ * The only difference is the finalizatiom routine.
+ */
+
+static void XXH3_128bits_reset_internal(XXH3_state_t *statePtr,
+                                        XXH64_hash_t seed, const void *secret,
+                                        size_t secretSize) {
+
+  XXH3_64bits_reset_internal(statePtr, seed, secret, secretSize);
+
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t *statePtr) {
+
+  if (statePtr == NULL) return XXH_ERROR;
+  XXH3_128bits_reset_internal(statePtr, 0, XXH3_kSecret,
+                              XXH_SECRET_DEFAULT_SIZE);
+  return XXH_OK;
+
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(
+    XXH3_state_t *statePtr, const void *secret, size_t secretSize) {
+
+  if (statePtr == NULL) return XXH_ERROR;
+  XXH3_128bits_reset_internal(statePtr, 0, secret, secretSize);
+  if (secret == NULL) return XXH_ERROR;
+  if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
+  return XXH_OK;
+
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t *statePtr,
+                                                         XXH64_hash_t  seed) {
+
+  if (statePtr == NULL) return XXH_ERROR;
+  if (seed == 0) return XXH3_128bits_reset(statePtr);
+  if (seed != statePtr->seed)
+    XXH3_initCustomSecret(statePtr->customSecret, seed);
+  XXH3_128bits_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
+  return XXH_OK;
+
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update(XXH3_state_t *state,
+                                                 const void *  input,
+                                                 size_t        len) {
+
+  return XXH3_update(state, (const xxh_u8 *)input, len, XXH3_accumulate_512,
+                     XXH3_scrambleAcc);
+
+}
+
+XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest(const XXH3_state_t *state) {
+
+  const unsigned char *const secret =
+      (state->extSecret == NULL) ? state->customSecret : state->extSecret;
+  if (state->totalLen > XXH3_MIDSIZE_MAX) {
+
+    XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
+    XXH3_digest_long(acc, state, secret);
+    XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >=
+               sizeof(acc) + XXH_SECRET_MERGEACCS_START);
+    {
+
+      XXH128_hash_t h128;
+      h128.low64 = XXH3_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START,
+                                  (xxh_u64)state->totalLen * XXH_PRIME64_1);
+      h128.high64 =
+          XXH3_mergeAccs(acc,
+                         secret + state->secretLimit + XXH_STRIPE_LEN -
+                             sizeof(acc) - XXH_SECRET_MERGEACCS_START,
+                         ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
+      return h128;
+
+    }
+
+  }
+
+  /* len <= XXH3_MIDSIZE_MAX : short code */
+  if (state->seed)
+    return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen,
+                                 state->seed);
+  return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
+                                 secret, state->secretLimit + XXH_STRIPE_LEN);
+
+}
+
+  /* 128-bit utility functions */
+
+    #include <string.h>                                   /* memcmp, memcpy */
+
+/* return : 1 is equal, 0 if different */
+XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) {
+
+  /* note : XXH128_hash_t is compact, it has no padding byte */
+  return !(memcmp(&h1, &h2, sizeof(h1)));
+
+}
+
+/* This prototype is compatible with stdlib's qsort().
+ * return : >0 if *h128_1  > *h128_2
+ *          <0 if *h128_1  < *h128_2
+ *          =0 if *h128_1 == *h128_2  */
+XXH_PUBLIC_API int XXH128_cmp(const void *h128_1, const void *h128_2) {
+
+  XXH128_hash_t const h1 = *(const XXH128_hash_t *)h128_1;
+  XXH128_hash_t const h2 = *(const XXH128_hash_t *)h128_2;
+  int const           hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
+  /* note : bets that, in most cases, hash values are different */
+  if (hcmp) return hcmp;
+  return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
+
+}
+
+/*======   Canonical representation   ======*/
+XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t *dst,
+                                             XXH128_hash_t       hash) {
+
+  XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
+  if (XXH_CPU_LITTLE_ENDIAN) {
+
+    hash.high64 = XXH_swap64(hash.high64);
+    hash.low64 = XXH_swap64(hash.low64);
+
+  }
+
+  memcpy(dst, &hash.high64, sizeof(hash.high64));
+  memcpy((char *)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
+
+}
+
+XXH_PUBLIC_API XXH128_hash_t
+XXH128_hashFromCanonical(const XXH128_canonical_t *src) {
+
+  XXH128_hash_t h;
+  h.high64 = XXH_readBE64(src);
+  h.low64 = XXH_readBE64(src->digest + 8);
+  return h;
+
+}
+
+    /* Pop our optimization override from above */
+    #if XXH_VECTOR == XXH_AVX2                      /* AVX2 */           \
+        && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
+        && defined(__OPTIMIZE__) &&                                      \
+        !defined(__OPTIMIZE_SIZE__)                  /* respect -O0 and -Os */
+      #pragma GCC pop_options
+    #endif
 
   #endif                                                /* XXH_NO_LONG_LONG */