12 xxHash - Extremely Fast Hash algorithm
14 Copyright (C) 2012-2018, Yann Collet.
15 Copyright (C) 2017-2018, Piotr Pliszka.
18 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
19 Redistribution and use in source and binary forms, with or without
20 modification, are permitted provided that the following conditions are
22 * Redistributions of source code must retain the above copyright
23 notice, this list of conditions and the following disclaimer.
24 * Redistributions in binary form must reproduce the above
25 copyright notice, this list of conditions and the following disclaimer
26 in the documentation and/or other materials provided with the
28 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 You can contact the author at :
40 - xxHash source repository : https://github.com/Cyan4973/xxHash
41 - xxHash C++ port repository : https://github.com/RedSpah/xxhash_cpp
44 /* *************************************
46 ***************************************/
47 /*!XXH_FORCE_MEMORY_ACCESS :
48 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
49 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
50 * The below switch allow to select different access method for improved performance.
51 * Method 0 (default) : use `memcpy()`. Safe and portable.
52 * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
53 * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
54 * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
55 * It can generate buggy code on targets which do not support unaligned memory accesses.
56 * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
57 * See http://stackoverflow.com/a/32095106/646947 for details.
58 * Prefer these methods in priority order (0 > 1 > 2)
60 #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
61 #if defined(__GNUC__) && (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || \
62 defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__))
63 #define XXH_FORCE_MEMORY_ACCESS 2
64 #elif defined(__INTEL_COMPILER) || \
65 (defined(__GNUC__) && (defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || \
66 defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__)))
67 #define XXH_FORCE_MEMORY_ACCESS 1
71 /*!XXH_FORCE_NATIVE_FORMAT :
72 * By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
73 * Results are therefore identical for little-endian and big-endian CPU.
74 * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
75 * Should endian-independence be of no importance for your application, you may set the #define below to 1,
76 * to improve speed for Big-endian CPU.
77 * This option has no impact on Little_Endian CPU.
79 #if !defined(XXH_FORCE_NATIVE_FORMAT) || (XXH_FORCE_NATIVE_FORMAT == 0) /* can be defined externally */
80 #define XXH_FORCE_NATIVE_FORMAT 0
81 #define XXH_CPU_LITTLE_ENDIAN 1
84 /*!XXH_FORCE_ALIGN_CHECK :
85 * This is a minor performance trick, only useful with lots of very small keys.
86 * It means : check for aligned/unaligned input.
87 * The check costs one initial branch per hash;
88 * set it to 0 when the input is guaranteed to be aligned,
89 * or when alignment doesn't matter for performance.
91 #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
92 #if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
93 #define XXH_FORCE_ALIGN_CHECK 0
95 #define XXH_FORCE_ALIGN_CHECK 1
99 /*!XXH_CPU_LITTLE_ENDIAN :
100 * This is a CPU endian detection macro, will be
101 * automatically set to 1 (little endian) if XXH_FORCE_NATIVE_FORMAT
102 * is left undefined, XXH_FORCE_NATIVE_FORMAT is defined to 0, or if an x86/x86_64 compiler macro is defined.
103 * If left undefined, endianness will be determined at runtime, at the cost of a slight one-time overhead
104 * and a larger overhead due to get_endian() not being constexpr.
106 #ifndef XXH_CPU_LITTLE_ENDIAN
107 #if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
108 #define XXH_CPU_LITTLE_ENDIAN 1
112 /* *************************************
113 * Compiler Specific Options
114 ***************************************/
115 #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
118 /* *************************************
120 ***************************************/
121 constexpr int cpp_version_major = 0;
122 constexpr int cpp_version_minor = 6;
123 constexpr int cpp_version_release = 5;
124 constexpr uint32_t version_number()
126 return cpp_version_major * 10000 + cpp_version_minor * 100 + cpp_version_release;
129 namespace hash_t_impl {
130 /* *************************************
131 * Basic Types - Detail
132 ***************************************/
134 using _hash32_underlying = uint32_t;
135 using _hash64_underlying = uint64_t;
137 template <size_t N> struct hash_type {
140 template <> struct hash_type<32> {
141 using type = _hash32_underlying;
143 template <> struct hash_type<64> {
144 using type = _hash64_underlying;
146 } // namespace hash_t_impl
148 /* *************************************
149 * Basic Types - Public
150 ***************************************/
152 template <size_t N> using hash_t = typename hash_t_impl::hash_type<N>::type;
153 using hash32_t = hash_t<32>;
154 using hash64_t = hash_t<64>;
156 /* *************************************
157 * Bit Functions - Public
158 ***************************************/
161 /* ****************************************
162 * Intrinsics and Bit Operations
163 ******************************************/
165 #if defined(_MSC_VER)
166 inline uint32_t rotl32(uint32_t x, int32_t r)
170 inline uint64_t rotl64(uint64_t x, int32_t r)
172 return _rotl64(x, r);
175 inline uint32_t rotl32(uint32_t x, int32_t r)
177 return ((x << r) | (x >> (32 - r)));
179 inline uint64_t rotl64(uint64_t x, int32_t r)
181 return ((x << r) | (x >> (64 - r)));
185 #if defined(_MSC_VER) /* Visual Studio */
186 inline uint32_t swap32(uint32_t x)
188 return _byteswap_ulong(x);
190 inline uint64_t swap64(uint64_t x)
192 return _byteswap_uint64(x);
194 #elif XXH_GCC_VERSION >= 403
195 inline uint32_t swap32(uint32_t x)
197 return __builtin_bswap32(x);
199 inline uint64_t swap64(uint64_t x)
201 return __builtin_bswap64(x);
204 inline uint32_t swap32(uint32_t x)
206 return ((x << 24) & 0xff000000) | ((x << 8) & 0x00ff0000) | ((x >> 8) & 0x0000ff00) | ((x >> 24) & 0x000000ff);
208 inline uint64_t swap64(uint64_t x)
210 return ((x << 56) & 0xff00000000000000ULL) | ((x << 40) & 0x00ff000000000000ULL) |
211 ((x << 24) & 0x0000ff0000000000ULL) | ((x << 8) & 0x000000ff00000000ULL) | ((x >> 8) & 0x00000000ff000000ULL) |
212 ((x >> 24) & 0x0000000000ff0000ULL) | ((x >> 40) & 0x000000000000ff00ULL) |
213 ((x >> 56) & 0x00000000000000ffULL);
216 template <size_t N> inline hash_t<N> rotl(hash_t<N> n, int32_t r){};
218 template <> inline hash_t<32> rotl<32>(hash_t<32> n, int32_t r)
223 template <> inline hash_t<64> rotl<64>(hash_t<64> n, int32_t r)
228 template <size_t N> inline hash_t<N> swap(hash_t<N> n){};
230 template <> inline hash_t<32> swap<32>(hash_t<32> n)
235 template <> inline hash_t<64> swap<64>(hash_t<64> n)
239 } // namespace bit_ops
241 /* *************************************
242 * Memory Functions - Public
243 ***************************************/
245 enum class alignment : uint8_t { aligned, unaligned };
246 enum class endianness : uint8_t { big_endian = 0, little_endian = 1, unspecified = 2 };
249 /* *************************************
251 ***************************************/
252 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 2))
254 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
255 template <size_t N> inline hash_t<N> read_unaligned(const void* memPtr)
257 return *(const hash_t<N>*)memPtr;
260 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 1))
262 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
263 /* currently only defined for gcc and icc */
264 template <size_t N> using unalign = union {
266 } __attribute((packed));
268 template <size_t N> inline hash_t<N> read_unaligned(const void* memPtr)
270 return ((const unalign*)memPtr)->uval;
274 /* portable and safe solution. Generally efficient.
275 * see : http://stackoverflow.com/a/32095106/646947
277 template <size_t N> inline hash_t<N> read_unaligned(const void* memPtr)
280 memcpy(&val, memPtr, sizeof(val));
284 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
286 inline hash_t<32> read32(const void* memPtr)
288 return read_unaligned<32>(memPtr);
290 inline hash_t<64> read64(const void* memPtr)
292 return read_unaligned<64>(memPtr);
295 /* *************************************
296 * Architecture Macros
297 ***************************************/
299 /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
301 #ifndef XXH_CPU_LITTLE_ENDIAN
303 inline endianness get_endian(endianness endian)
305 static struct _dummy_t {
306 std::array<endianness, 3> endian_lookup = {endianness::big_endian, endianness::little_endian,
307 endianness::unspecified};
309 _dummy_t() { endian_lookup[2] = static_cast<endianness>(*(const char*)(&g_one)); }
312 return _dummy.endian_lookup[(uint8_t)endian];
315 inline bool is_little_endian()
317 return get_endian(endianness::unspecified) == endianness::little_endian;
321 constexpr endianness get_endian(endianness endian)
323 constexpr std::array<endianness, 3> endian_lookup = {
324 {endianness::big_endian, endianness::little_endian,
325 (XXH_CPU_LITTLE_ENDIAN) ? endianness::little_endian : endianness::big_endian}};
326 return endian_lookup[static_cast<uint8_t>(endian)];
329 constexpr bool is_little_endian()
331 return get_endian(endianness::unspecified) == endianness::little_endian;
336 /* ***************************
338 *****************************/
340 template <size_t N> inline hash_t<N> readLE_align(const void* ptr, endianness endian, alignment align)
342 if (align == alignment::unaligned) {
343 return endian == endianness::little_endian ? read_unaligned<N>(ptr) : bit_ops::swap<N>(read_unaligned<N>(ptr));
345 return endian == endianness::little_endian ? *reinterpret_cast<const hash_t<N>*>(ptr)
346 : bit_ops::swap<N>(*reinterpret_cast<const hash_t<N>*>(ptr));
350 template <size_t N> inline hash_t<N> readLE(const void* ptr, endianness endian)
352 return readLE_align<N>(ptr, endian, alignment::unaligned);
355 template <size_t N> inline hash_t<N> readBE(const void* ptr)
357 return is_little_endian() ? bit_ops::swap<N>(read_unaligned<N>(ptr)) : read_unaligned<N>(ptr);
360 template <size_t N> inline alignment get_alignment(const void* input)
362 return ((XXH_FORCE_ALIGN_CHECK) && ((reinterpret_cast<uintptr_t>(input) & ((N / 8) - 1)) == 0))
363 ? xxh::alignment::aligned
364 : xxh::alignment::unaligned;
366 } // namespace mem_ops
368 /* *******************************************************************
370 *********************************************************************/
373 /* *******************************************************************
374 * Hash functions - Implementation
375 *********************************************************************/
377 constexpr static std::array<hash32_t, 5> primes32 = {{2654435761U, 2246822519U, 3266489917U, 668265263U, 374761393U}};
378 constexpr static std::array<hash64_t, 5> primes64 = {{11400714785074694791ULL, 14029467366897019727ULL,
379 1609587929392839161ULL, 9650029242287828579ULL,
380 2870177450012600261ULL}};
382 template <size_t N> constexpr hash_t<N> PRIME(int32_t n){};
384 template <> constexpr hash32_t PRIME<32>(int32_t n)
386 return primes32[n - 1];
389 template <> constexpr hash64_t PRIME<64>(int32_t n)
391 return primes64[n - 1];
394 template <size_t N> inline hash_t<N> round(hash_t<N> seed, hash_t<N> input)
396 seed += input * PRIME<N>(2);
397 seed = bit_ops::rotl<N>(seed, ((N == 32) ? 13 : 31));
402 inline hash64_t mergeRound64(hash64_t acc, hash64_t val)
404 val = round<64>(0, val);
406 acc = acc * PRIME<64>(1) + PRIME<64>(4);
411 inline void endian_align_sub_mergeround(
412 #if __cplusplus < 201703L
413 __attribute__((unused))
418 hash_t<N> v1, hash_t<N> v2, hash_t<N> v3, hash_t<N> v4){};
421 inline void endian_align_sub_mergeround<64>(hash_t<64>& hash_ret, hash_t<64> v1, hash_t<64> v2, hash_t<64> v3,
424 hash_ret = mergeRound64(hash_ret, v1);
425 hash_ret = mergeRound64(hash_ret, v2);
426 hash_ret = mergeRound64(hash_ret, v3);
427 hash_ret = mergeRound64(hash_ret, v4);
431 inline hash_t<N> endian_align_sub_ending(hash_t<N> hash_ret, const uint8_t* p, const uint8_t* bEnd,
432 xxh::endianness endian, xxh::alignment align){};
435 inline hash_t<32> endian_align_sub_ending<32>(hash_t<32> hash_ret, const uint8_t* p, const uint8_t* bEnd,
436 xxh::endianness endian, xxh::alignment align)
438 while ((p + 4) <= bEnd) {
439 hash_ret += mem_ops::readLE_align<32>(p, endian, align) * PRIME<32>(3);
440 hash_ret = bit_ops::rotl<32>(hash_ret, 17) * PRIME<32>(4);
445 hash_ret += (*p) * PRIME<32>(5);
446 hash_ret = bit_ops::rotl<32>(hash_ret, 11) * PRIME<32>(1);
450 hash_ret ^= hash_ret >> 15;
451 hash_ret *= PRIME<32>(2);
452 hash_ret ^= hash_ret >> 13;
453 hash_ret *= PRIME<32>(3);
454 hash_ret ^= hash_ret >> 16;
460 inline hash_t<64> endian_align_sub_ending<64>(hash_t<64> hash_ret, const uint8_t* p, const uint8_t* bEnd,
461 xxh::endianness endian, xxh::alignment align)
463 while (p + 8 <= bEnd) {
464 const hash64_t k1 = round<64>(0, mem_ops::readLE_align<64>(p, endian, align));
466 hash_ret = bit_ops::rotl<64>(hash_ret, 27) * PRIME<64>(1) + PRIME<64>(4);
471 hash_ret ^= static_cast<hash64_t>(mem_ops::readLE_align<32>(p, endian, align)) * PRIME<64>(1);
472 hash_ret = bit_ops::rotl<64>(hash_ret, 23) * PRIME<64>(2) + PRIME<64>(3);
477 hash_ret ^= (*p) * PRIME<64>(5);
478 hash_ret = bit_ops::rotl<64>(hash_ret, 11) * PRIME<64>(1);
482 hash_ret ^= hash_ret >> 33;
483 hash_ret *= PRIME<64>(2);
484 hash_ret ^= hash_ret >> 29;
485 hash_ret *= PRIME<64>(3);
486 hash_ret ^= hash_ret >> 32;
492 inline hash_t<N> endian_align(const void* input, size_t len, hash_t<N> seed, xxh::endianness endian,
493 xxh::alignment align)
495 static_assert(!(N != 32 && N != 64), "You can only call endian_align in 32 or 64 bit mode.");
497 const uint8_t* p = static_cast<const uint8_t*>(input);
498 const uint8_t* bEnd = p + len;
501 if (len >= (N / 2)) {
502 const uint8_t* const limit = bEnd - (N / 2);
503 hash_t<N> v1 = seed + PRIME<N>(1) + PRIME<N>(2);
504 hash_t<N> v2 = seed + PRIME<N>(2);
505 hash_t<N> v3 = seed + 0;
506 hash_t<N> v4 = seed - PRIME<N>(1);
509 v1 = round<N>(v1, mem_ops::readLE_align<N>(p, endian, align));
511 v2 = round<N>(v2, mem_ops::readLE_align<N>(p, endian, align));
513 v3 = round<N>(v3, mem_ops::readLE_align<N>(p, endian, align));
515 v4 = round<N>(v4, mem_ops::readLE_align<N>(p, endian, align));
517 } while (p <= limit);
519 hash_ret = bit_ops::rotl<N>(v1, 1) + bit_ops::rotl<N>(v2, 7) + bit_ops::rotl<N>(v3, 12) + bit_ops::rotl<N>(v4, 18);
521 endian_align_sub_mergeround<N>(hash_ret, v1, v2, v3, v4);
523 hash_ret = seed + PRIME<N>(5);
526 hash_ret += static_cast<hash_t<N>>(len);
528 return endian_align_sub_ending<N>(hash_ret, p, bEnd, endian, align);
530 } // namespace detail
533 hash_t<N> xxhash(const void* input, size_t len, hash_t<N> seed = 0, endianness endian = endianness::unspecified)
535 static_assert(!(N != 32 && N != 64), "You can only call xxhash in 32 or 64 bit mode.");
536 return detail::endian_align<N>(input, len, seed, mem_ops::get_endian(endian), mem_ops::get_alignment<N>(input));
539 template <size_t N, typename T>
540 hash_t<N> xxhash(const std::basic_string<T>& input, hash_t<N> seed = 0, endianness endian = endianness::unspecified)
542 static_assert(!(N != 32 && N != 64), "You can only call xxhash in 32 or 64 bit mode.");
543 return detail::endian_align<N>(static_cast<const void*>(input.data()), input.length() * sizeof(T), seed,
544 mem_ops::get_endian(endian),
545 mem_ops::get_alignment<N>(static_cast<const void*>(input.data())));
548 template <size_t N, typename ContiguousIterator>
549 hash_t<N> xxhash(ContiguousIterator begin, ContiguousIterator end, hash_t<N> seed = 0,
550 endianness endian = endianness::unspecified)
552 static_assert(!(N != 32 && N != 64), "You can only call xxhash in 32 or 64 bit mode.");
553 using T = typename std::decay_t<decltype(*end)>;
554 return detail::endian_align<N>(static_cast<const void*>(&*begin), (end - begin) * sizeof(T), seed,
555 mem_ops::get_endian(endian),
556 mem_ops::get_alignment<N>(static_cast<const void*>(&*begin)));
559 template <size_t N, typename T>
560 hash_t<N> xxhash(const std::vector<T>& input, hash_t<N> seed = 0, endianness endian = endianness::unspecified)
562 static_assert(!(N != 32 && N != 64), "You can only call xxhash in 32 or 64 bit mode.");
563 return detail::endian_align<N>(static_cast<const void*>(input.data()), input.size() * sizeof(T), seed,
564 mem_ops::get_endian(endian),
565 mem_ops::get_alignment<N>(static_cast<const void*>(input.data())));
568 template <size_t N, typename T, size_t AN>
569 hash_t<N> xxhash(const std::array<T, AN>& input, hash_t<N> seed = 0, endianness endian = endianness::unspecified)
571 static_assert(!(N != 32 && N != 64), "You can only call xxhash in 32 or 64 bit mode.");
572 return detail::endian_align<N>(static_cast<const void*>(input.data()), AN * sizeof(T), seed,
573 mem_ops::get_endian(endian),
574 mem_ops::get_alignment<N>(static_cast<const void*>(input.data())));
577 template <size_t N, typename T>
578 hash_t<N> xxhash(const std::initializer_list<T>& input, hash_t<N> seed = 0, endianness endian = endianness::unspecified)
580 static_assert(!(N != 32 && N != 64), "You can only call xxhash in 32 or 64 bit mode.");
581 return detail::endian_align<N>(static_cast<const void*>(input.begin()), input.size() * sizeof(T), seed,
582 mem_ops::get_endian(endian),
583 mem_ops::get_alignment<N>(static_cast<const void*>(input.begin())));
586 /* *******************************************************************
588 *********************************************************************/
589 enum class error_code : uint8_t { ok = 0, error };
591 template <size_t N> class hash_state_t {
592 uint64_t total_len = 0;
593 hash_t<N> v1 = 0, v2 = 0, v3 = 0, v4 = 0;
594 std::array<hash_t<N>, 4> mem = {{0, 0, 0, 0}};
595 uint32_t memsize = 0;
597 inline error_code _update_impl(const void* input, size_t length, endianness endian)
599 const uint8_t* p = reinterpret_cast<const uint8_t*>(input);
600 const uint8_t* const bEnd = p + length;
603 return xxh::error_code::error;
608 if (memsize + length < (N / 2)) { /* fill in tmp buffer */
609 memcpy(reinterpret_cast<uint8_t*>(mem.data()) + memsize, input, length);
610 memsize += static_cast<uint32_t>(length);
611 return error_code::ok;
614 if (memsize) { /* some data left from previous update */
615 memcpy(reinterpret_cast<uint8_t*>(mem.data()) + memsize, input, (N / 2) - memsize);
617 const hash_t<N>* ptr = mem.data();
618 v1 = detail::round<N>(v1, mem_ops::readLE<N>(ptr, endian));
620 v2 = detail::round<N>(v2, mem_ops::readLE<N>(ptr, endian));
622 v3 = detail::round<N>(v3, mem_ops::readLE<N>(ptr, endian));
624 v4 = detail::round<N>(v4, mem_ops::readLE<N>(ptr, endian));
626 p += (N / 2) - memsize;
630 if (p <= bEnd - (N / 2)) {
631 const uint8_t* const limit = bEnd - (N / 2);
634 v1 = detail::round<N>(v1, mem_ops::readLE<N>(p, endian));
636 v2 = detail::round<N>(v2, mem_ops::readLE<N>(p, endian));
638 v3 = detail::round<N>(v3, mem_ops::readLE<N>(p, endian));
640 v4 = detail::round<N>(v4, mem_ops::readLE<N>(p, endian));
642 } while (p <= limit);
646 memcpy(mem.data(), p, static_cast<size_t>(bEnd - p));
647 memsize = static_cast<uint32_t>(bEnd - p);
650 return error_code::ok;
653 inline hash_t<N> _digest_impl(endianness endian) const
655 const uint8_t* p = reinterpret_cast<const uint8_t*>(mem.data());
656 const uint8_t* const bEnd = reinterpret_cast<const uint8_t*>(mem.data()) + memsize;
659 if (total_len > (N / 2)) {
661 bit_ops::rotl<N>(v1, 1) + bit_ops::rotl<N>(v2, 7) + bit_ops::rotl<N>(v3, 12) + bit_ops::rotl<N>(v4, 18);
663 detail::endian_align_sub_mergeround<N>(hash_ret, v1, v2, v3, v4);
665 hash_ret = v3 + detail::PRIME<N>(5);
668 hash_ret += static_cast<hash_t<N>>(total_len);
670 return detail::endian_align_sub_ending<N>(hash_ret, p, bEnd, endian, alignment::unaligned);
674 hash_state_t(hash_t<N> seed = 0)
676 static_assert(!(N != 32 && N != 64), "You can only stream hashing in 32 or 64 bit mode.");
677 v1 = seed + detail::PRIME<N>(1) + detail::PRIME<N>(2);
678 v2 = seed + detail::PRIME<N>(2);
680 v4 = seed - detail::PRIME<N>(1);
683 hash_state_t operator=(hash_state_t<N>& other) { memcpy(this, other, sizeof(hash_state_t<N>)); }
685 error_code reset(hash_t<N> seed = 0)
687 memset(this, 0, sizeof(hash_state_t<N>));
688 v1 = seed + detail::PRIME<N>(1) + detail::PRIME<N>(2);
689 v2 = seed + detail::PRIME<N>(2);
691 v4 = seed - detail::PRIME<N>(1);
692 return error_code::ok;
695 error_code update(const void* input, size_t length, endianness endian = endianness::unspecified)
697 return _update_impl(input, length, mem_ops::get_endian(endian));
700 template <typename T>
701 error_code update(const std::basic_string<T>& input, endianness endian = endianness::unspecified)
703 return _update_impl(static_cast<const void*>(input.data()), input.length() * sizeof(T),
704 mem_ops::get_endian(endian));
707 template <typename ContiguousIterator>
708 error_code update(ContiguousIterator begin, ContiguousIterator end, endianness endian = endianness::unspecified)
710 using T = typename std::decay_t<decltype(*end)>;
711 return _update_impl(static_cast<const void*>(&*begin), (end - begin) * sizeof(T), mem_ops::get_endian(endian));
714 template <typename T> error_code update(const std::vector<T>& input, endianness endian = endianness::unspecified)
716 return _update_impl(static_cast<const void*>(input.data()), input.size() * sizeof(T), mem_ops::get_endian(endian));
719 template <typename T, size_t AN>
720 error_code update(const std::array<T, AN>& input, endianness endian = endianness::unspecified)
722 return _update_impl(static_cast<const void*>(input.data()), AN * sizeof(T), mem_ops::get_endian(endian));
725 template <typename T>
726 error_code update(const std::initializer_list<T>& input, endianness endian = endianness::unspecified)
728 return _update_impl(static_cast<const void*>(input.begin()), input.size() * sizeof(T), mem_ops::get_endian(endian));
731 hash_t<N> digest(endianness endian = endianness::unspecified) { return _digest_impl(mem_ops::get_endian(endian)); }
734 using hash_state32_t = hash_state_t<32>;
735 using hash_state64_t = hash_state_t<64>;
737 /* *******************************************************************
739 *********************************************************************/
741 template <size_t N> struct canonical_t {
742 std::array<uint8_t, N / 8> digest;
744 canonical_t(hash_t<N> hash)
746 if (mem_ops::is_little_endian()) {
747 hash = bit_ops::swap<N>(hash);
749 memcpy(digest.data(), &hash, sizeof(canonical_t<N>));
752 hash_t<N> get_hash() const { return mem_ops::readBE<N>(&digest); }
755 using canonical32_t = canonical_t<32>;
756 using canonical64_t = canonical_t<64>;