13 xxHash - Extremely Fast Hash algorithm
15 Copyright (C) 2012-2018, Yann Collet.
16 Copyright (C) 2017-2018, Piotr Pliszka.
19 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
20 Redistribution and use in source and binary forms, with or without
21 modification, are permitted provided that the following conditions are
23 * Redistributions of source code must retain the above copyright
24 notice, this list of conditions and the following disclaimer.
25 * Redistributions in binary form must reproduce the above
26 copyright notice, this list of conditions and the following disclaimer
27 in the documentation and/or other materials provided with the
29 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 You can contact the author at :
41 - xxHash source repository : https://github.com/Cyan4973/xxHash
42 - xxHash C++ port repository : https://github.com/RedSpah/xxhash_cpp
45 /* *************************************
47 ***************************************/
48 /*!XXH_FORCE_MEMORY_ACCESS :
49 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
50 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
51 * The below switch allow to select different access method for improved performance.
52 * Method 0 (default) : use `memcpy()`. Safe and portable.
53 * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
54 * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
55 * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
56 * It can generate buggy code on targets which do not support unaligned memory accesses.
57 * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
58 * See http://stackoverflow.com/a/32095106/646947 for details.
59 * Prefer these methods in priority order (0 > 1 > 2)
61 #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
62 # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
63 # define XXH_FORCE_MEMORY_ACCESS 2
64 # elif defined(__INTEL_COMPILER) || (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
65 # define XXH_FORCE_MEMORY_ACCESS 1
70 /*!XXH_FORCE_NATIVE_FORMAT :
71 * By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
72 * Results are therefore identical for little-endian and big-endian CPU.
73 * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
74 * Should endian-independence be of no importance for your application, you may set the #define below to 1,
75 * to improve speed for Big-endian CPU.
76 * This option has no impact on Little_Endian CPU.
78 #if !defined(XXH_FORCE_NATIVE_FORMAT) || (XXH_FORCE_NATIVE_FORMAT == 0) /* can be defined externally */
79 # define XXH_FORCE_NATIVE_FORMAT 0
80 # define XXH_CPU_LITTLE_ENDIAN 1
84 /*!XXH_FORCE_ALIGN_CHECK :
85 * This is a minor performance trick, only useful with lots of very small keys.
86 * It means : check for aligned/unaligned input.
87 * The check costs one initial branch per hash;
88 * set it to 0 when the input is guaranteed to be aligned,
89 * or when alignment doesn't matter for performance.
91 #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
92 # if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
93 # define XXH_FORCE_ALIGN_CHECK 0
95 # define XXH_FORCE_ALIGN_CHECK 1
99 /*!XXH_CPU_LITTLE_ENDIAN :
100 * This is a CPU endian detection macro, will be
101 * automatically set to 1 (little endian) if XXH_FORCE_NATIVE_FORMAT
102 * is left undefined, XXH_FORCE_NATIVE_FORMAT is defined to 0, or if an x86/x86_64 compiler macro is defined.
103 * If left undefined, endianness will be determined at runtime, at the cost of a slight one-time overhead
104 * and a larger overhead due to get_endian() not being constexpr.
106 #ifndef XXH_CPU_LITTLE_ENDIAN
107 # if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
108 # define XXH_CPU_LITTLE_ENDIAN 1
112 /* *************************************
113 * Compiler Specific Options
114 ***************************************/
115 #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
119 /* *************************************
121 ***************************************/
122 constexpr int cpp_version_major = 0;
123 constexpr int cpp_version_minor = 6;
124 constexpr int cpp_version_release = 5;
125 constexpr uint32_t version_number() { return cpp_version_major * 10000 + cpp_version_minor * 100 + cpp_version_release; }
127 namespace hash_t_impl
129 /* *************************************
130 * Basic Types - Detail
131 ***************************************/
133 using _hash32_underlying = uint32_t;
134 using _hash64_underlying = uint64_t;
137 struct hash_type { using type = void; };
139 struct hash_type<32> { using type = _hash32_underlying; };
141 struct hash_type<64> { using type = _hash64_underlying; };
144 /* *************************************
145 * Basic Types - Public
146 ***************************************/
149 using hash_t = typename hash_t_impl::hash_type<N>::type;
150 using hash32_t = hash_t<32>;
151 using hash64_t = hash_t<64>;
153 /* *************************************
154 * Bit Functions - Public
155 ***************************************/
159 /* ****************************************
160 * Intrinsics and Bit Operations
161 ******************************************/
163 #if defined(_MSC_VER)
164 inline uint32_t rotl32(uint32_t x, int32_t r) { return _rotl(x, r); }
165 inline uint64_t rotl64(uint64_t x, int32_t r) { return _rotl64(x, r); }
167 inline uint32_t rotl32(uint32_t x, int32_t r) { return ((x << r) | (x >> (32 - r))); }
168 inline uint64_t rotl64(uint64_t x, int32_t r) { return ((x << r) | (x >> (64 - r))); }
171 #if defined(_MSC_VER) /* Visual Studio */
172 inline uint32_t swap32(uint32_t x) { return _byteswap_ulong(x); }
173 inline uint64_t swap64(uint64_t x) { return _byteswap_uint64(x); }
174 #elif XXH_GCC_VERSION >= 403
175 inline uint32_t swap32(uint32_t x) { return __builtin_bswap32(x); }
176 inline uint64_t swap64(uint64_t x) { return __builtin_bswap64(x); }
178 inline uint32_t swap32(uint32_t x) { return ((x << 24) & 0xff000000) | ((x << 8) & 0x00ff0000) | ((x >> 8) & 0x0000ff00) | ((x >> 24) & 0x000000ff); }
179 inline uint64_t swap64(uint64_t x) { return ((x << 56) & 0xff00000000000000ULL) | ((x << 40) & 0x00ff000000000000ULL) | ((x << 24) & 0x0000ff0000000000ULL) | ((x << 8) & 0x000000ff00000000ULL) | ((x >> 8) & 0x00000000ff000000ULL) | ((x >> 24) & 0x0000000000ff0000ULL) | ((x >> 40) & 0x000000000000ff00ULL) | ((x >> 56) & 0x00000000000000ffULL); }
182 inline hash_t<N> rotl(hash_t<N> n, int32_t r) {};
185 inline hash_t<32> rotl<32>(hash_t<32> n, int32_t r)
191 inline hash_t<64> rotl<64>(hash_t<64> n, int32_t r)
197 inline hash_t<N> swap(hash_t<N> n) {};
200 inline hash_t<32> swap<32>(hash_t<32> n)
206 inline hash_t<64> swap<64>(hash_t<64> n)
212 /* *************************************
213 * Memory Functions - Public
214 ***************************************/
216 enum class alignment : uint8_t { aligned, unaligned };
217 enum class endianness : uint8_t { big_endian = 0, little_endian = 1, unspecified = 2 };
221 /* *************************************
223 ***************************************/
224 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
226 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
228 inline hash_t<N> read_unaligned(const void* memPtr) { return *(const hash_t<N>*)memPtr; }
230 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
232 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
233 /* currently only defined for gcc and icc */
235 using unalign = union { hash_t<N> uval; } __attribute((packed));
238 inline hash_t<N> read_unaligned(const void* memPtr) { return ((const unalign*)memPtr)->uval; }
241 /* portable and safe solution. Generally efficient.
242 * see : http://stackoverflow.com/a/32095106/646947
245 inline hash_t<N> read_unaligned(const void* memPtr)
248 memcpy(&val, memPtr, sizeof(val));
252 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
254 inline hash_t<32> read32(const void* memPtr) { return read_unaligned<32>(memPtr); }
255 inline hash_t<64> read64(const void* memPtr) { return read_unaligned<64>(memPtr); }
257 /* *************************************
258 * Architecture Macros
259 ***************************************/
261 /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
263 #ifndef XXH_CPU_LITTLE_ENDIAN
265 inline endianness get_endian(endianness endian)
267 static struct _dummy_t
269 std::array<endianness, 3> endian_lookup = { endianness::big_endian, endianness::little_endian, endianness::unspecified };
273 endian_lookup[2] = static_cast<endianness>(*(const char*)(&g_one));
277 return _dummy.endian_lookup[(uint8_t)endian];
280 inline bool is_little_endian()
282 return get_endian(endianness::unspecified) == endianness::little_endian;
286 constexpr endianness get_endian(endianness endian)
288 constexpr std::array<endianness, 3> endian_lookup = { endianness::big_endian, endianness::little_endian, (XXH_CPU_LITTLE_ENDIAN) ? endianness::little_endian : endianness::big_endian };
289 return endian_lookup[static_cast<uint8_t>(endian)];
292 constexpr bool is_little_endian()
294 return get_endian(endianness::unspecified) == endianness::little_endian;
301 /* ***************************
303 *****************************/
307 inline hash_t<N> readLE_align(const void* ptr, endianness endian, alignment align)
309 if (align == alignment::unaligned)
311 return endian == endianness::little_endian ? read_unaligned<N>(ptr) : bit_ops::swap<N>(read_unaligned<N>(ptr));
315 return endian == endianness::little_endian ? *reinterpret_cast<const hash_t<N>*>(ptr) : bit_ops::swap<N>(*reinterpret_cast<const hash_t<N>*>(ptr));
320 inline hash_t<N> readLE(const void* ptr, endianness endian)
322 return readLE_align<N>(ptr, endian, alignment::unaligned);
326 inline hash_t<N> readBE(const void* ptr)
328 return is_little_endian() ? bit_ops::swap<N>(read_unaligned<N>(ptr)) : read_unaligned<N>(ptr);
332 inline alignment get_alignment(const void* input)
334 return ((XXH_FORCE_ALIGN_CHECK) && ((reinterpret_cast<uintptr_t>(input) & ((N / 8) - 1)) == 0)) ? xxh::alignment::aligned : xxh::alignment::unaligned;
338 /* *******************************************************************
340 *********************************************************************/
344 /* *******************************************************************
345 * Hash functions - Implementation
346 *********************************************************************/
348 constexpr static std::array<hash32_t, 5> primes32 = { 2654435761U, 2246822519U, 3266489917U, 668265263U, 374761393U };
349 constexpr static std::array<hash64_t, 5> primes64 = { 11400714785074694791ULL, 14029467366897019727ULL, 1609587929392839161ULL, 9650029242287828579ULL, 2870177450012600261ULL };
352 constexpr hash_t<N> PRIME(int32_t n) {};
355 constexpr hash32_t PRIME<32>(int32_t n)
357 return primes32[n - 1];
361 constexpr hash64_t PRIME<64>(int32_t n)
363 return primes64[n - 1];
367 inline hash_t<N> round(hash_t<N> seed, hash_t<N> input)
369 seed += input * PRIME<N>(2);
370 seed = bit_ops::rotl<N>(seed, ((N == 32) ? 13 : 31));
375 inline hash64_t mergeRound64(hash64_t acc, hash64_t val)
377 val = round<64>(0, val);
379 acc = acc * PRIME<64>(1) + PRIME<64>(4);
384 inline void endian_align_sub_mergeround([[maybe_unused]] hash_t<N>& hash_ret, hash_t<N> v1, hash_t<N> v2, hash_t<N> v3, hash_t<N> v4) {};
387 inline void endian_align_sub_mergeround<64>(hash_t<64>& hash_ret, hash_t<64> v1, hash_t<64> v2, hash_t<64> v3, hash_t<64> v4)
389 hash_ret = mergeRound64(hash_ret, v1);
390 hash_ret = mergeRound64(hash_ret, v2);
391 hash_ret = mergeRound64(hash_ret, v3);
392 hash_ret = mergeRound64(hash_ret, v4);
396 inline hash_t<N> endian_align_sub_ending(hash_t<N> hash_ret, const uint8_t* p, const uint8_t* bEnd, xxh::endianness endian, xxh::alignment align) {};
399 inline hash_t<32> endian_align_sub_ending<32>(hash_t<32> hash_ret, const uint8_t* p, const uint8_t* bEnd, xxh::endianness endian, xxh::alignment align)
401 while ((p + 4) <= bEnd)
403 hash_ret += mem_ops::readLE_align<32>(p, endian, align) * PRIME<32>(3);
404 hash_ret = bit_ops::rotl<32>(hash_ret, 17) * PRIME<32>(4);
410 hash_ret += (*p) * PRIME<32>(5);
411 hash_ret = bit_ops::rotl<32>(hash_ret, 11) * PRIME<32>(1);
415 hash_ret ^= hash_ret >> 15;
416 hash_ret *= PRIME<32>(2);
417 hash_ret ^= hash_ret >> 13;
418 hash_ret *= PRIME<32>(3);
419 hash_ret ^= hash_ret >> 16;
425 inline hash_t<64> endian_align_sub_ending<64>(hash_t<64> hash_ret, const uint8_t* p, const uint8_t* bEnd, xxh::endianness endian, xxh::alignment align)
427 while (p + 8 <= bEnd)
429 const hash64_t k1 = round<64>(0, mem_ops::readLE_align<64>(p, endian, align));
431 hash_ret = bit_ops::rotl<64>(hash_ret, 27) * PRIME<64>(1) + PRIME<64>(4);
437 hash_ret ^= static_cast<hash64_t>(mem_ops::readLE_align<32>(p, endian, align)) * PRIME<64>(1);
438 hash_ret = bit_ops::rotl<64>(hash_ret, 23) * PRIME<64>(2) + PRIME<64>(3);
444 hash_ret ^= (*p) * PRIME<64>(5);
445 hash_ret = bit_ops::rotl<64>(hash_ret, 11) * PRIME<64>(1);
449 hash_ret ^= hash_ret >> 33;
450 hash_ret *= PRIME<64>(2);
451 hash_ret ^= hash_ret >> 29;
452 hash_ret *= PRIME<64>(3);
453 hash_ret ^= hash_ret >> 32;
459 inline hash_t<N> endian_align(const void* input, size_t len, hash_t<N> seed, xxh::endianness endian, xxh::alignment align)
461 static_assert(!(N != 32 && N != 64), "You can only call endian_align in 32 or 64 bit mode.");
463 const uint8_t* p = static_cast<const uint8_t*>(input);
464 const uint8_t* bEnd = p + len;
469 const uint8_t* const limit = bEnd - (N / 2);
470 hash_t<N> v1 = seed + PRIME<N>(1) + PRIME<N>(2);
471 hash_t<N> v2 = seed + PRIME<N>(2);
472 hash_t<N> v3 = seed + 0;
473 hash_t<N> v4 = seed - PRIME<N>(1);
477 v1 = round<N>(v1, mem_ops::readLE_align<N>(p, endian, align)); p += (N / 8);
478 v2 = round<N>(v2, mem_ops::readLE_align<N>(p, endian, align)); p += (N / 8);
479 v3 = round<N>(v3, mem_ops::readLE_align<N>(p, endian, align)); p += (N / 8);
480 v4 = round<N>(v4, mem_ops::readLE_align<N>(p, endian, align)); p += (N / 8);
481 } while (p <= limit);
483 hash_ret = bit_ops::rotl<N>(v1, 1) + bit_ops::rotl<N>(v2, 7) + bit_ops::rotl<N>(v3, 12) + bit_ops::rotl<N>(v4, 18);
485 endian_align_sub_mergeround<N>(hash_ret, v1, v2, v3, v4);
487 else { hash_ret = seed + PRIME<N>(5); }
489 hash_ret += static_cast<hash_t<N>>(len);
491 return endian_align_sub_ending<N>(hash_ret, p, bEnd, endian, align);
496 hash_t<N> xxhash(const void* input, size_t len, hash_t<N> seed = 0, endianness endian = endianness::unspecified)
498 static_assert(!(N != 32 && N != 64), "You can only call xxhash in 32 or 64 bit mode.");
499 return detail::endian_align<N>(input, len, seed, mem_ops::get_endian(endian), mem_ops::get_alignment<N>(input));
502 template <size_t N, typename T>
503 hash_t<N> xxhash(const std::basic_string<T>& input, hash_t<N> seed = 0, endianness endian = endianness::unspecified)
505 static_assert(!(N != 32 && N != 64), "You can only call xxhash in 32 or 64 bit mode.");
506 return detail::endian_align<N>(static_cast<const void*>(input.data()), input.length() * sizeof(T), seed, mem_ops::get_endian(endian), mem_ops::get_alignment<N>(static_cast<const void*>(input.data())));
509 template <size_t N, typename ContiguousIterator>
510 hash_t<N> xxhash(ContiguousIterator begin, ContiguousIterator end, hash_t<N> seed = 0, endianness endian = endianness::unspecified)
512 static_assert(!(N != 32 && N != 64), "You can only call xxhash in 32 or 64 bit mode.");
513 using T = typename std::decay_t<decltype(*end)>;
514 return detail::endian_align<N>(static_cast<const void*>(&*begin), (end - begin) * sizeof(T), seed, mem_ops::get_endian(endian), mem_ops::get_alignment<N>(static_cast<const void*>(&*begin)));
517 template <size_t N, typename T>
518 hash_t<N> xxhash(const std::vector<T>& input, hash_t<N> seed = 0, endianness endian = endianness::unspecified)
520 static_assert(!(N != 32 && N != 64), "You can only call xxhash in 32 or 64 bit mode.");
521 return detail::endian_align<N>(static_cast<const void*>(input.data()), input.size() * sizeof(T), seed, mem_ops::get_endian(endian), mem_ops::get_alignment<N>(static_cast<const void*>(input.data())));
524 template <size_t N, typename T, size_t AN>
525 hash_t<N> xxhash(const std::array<T, AN>& input, hash_t<N> seed = 0, endianness endian = endianness::unspecified)
527 static_assert(!(N != 32 && N != 64), "You can only call xxhash in 32 or 64 bit mode.");
528 return detail::endian_align<N>(static_cast<const void*>(input.data()), AN * sizeof(T), seed, mem_ops::get_endian(endian), mem_ops::get_alignment<N>(static_cast<const void*>(input.data())));
531 template <size_t N, typename T>
532 hash_t<N> xxhash(const std::initializer_list<T>& input, hash_t<N> seed = 0, endianness endian = endianness::unspecified)
534 static_assert(!(N != 32 && N != 64), "You can only call xxhash in 32 or 64 bit mode.");
535 return detail::endian_align<N>(static_cast<const void*>(input.begin()), input.size() * sizeof(T), seed, mem_ops::get_endian(endian), mem_ops::get_alignment<N>(static_cast<const void*>(input.begin())));
539 /* *******************************************************************
541 *********************************************************************/
542 enum class error_code : uint8_t { ok = 0, error };
547 uint64_t total_len = 0;
548 hash_t<N> v1 = 0, v2 = 0, v3 = 0, v4 = 0;
549 std::array<hash_t<N>, 4> mem = {{ 0,0,0,0 }};
550 uint32_t memsize = 0;
552 inline error_code _update_impl(const void* input, size_t length, endianness endian)
554 const uint8_t* p = reinterpret_cast<const uint8_t*>(input);
555 const uint8_t* const bEnd = p + length;
557 if (!input) { return xxh::error_code::error; }
561 if (memsize + length < (N / 2))
562 { /* fill in tmp buffer */
563 memcpy(reinterpret_cast<uint8_t*>(mem.data()) + memsize, input, length);
564 memsize += static_cast<uint32_t>(length);
565 return error_code::ok;
569 { /* some data left from previous update */
570 memcpy(reinterpret_cast<uint8_t*>(mem.data()) + memsize, input, (N / 2) - memsize);
572 const hash_t<N>* ptr = mem.data();
573 v1 = detail::round<N>(v1, mem_ops::readLE<N>(ptr, endian)); ptr++;
574 v2 = detail::round<N>(v2, mem_ops::readLE<N>(ptr, endian)); ptr++;
575 v3 = detail::round<N>(v3, mem_ops::readLE<N>(ptr, endian)); ptr++;
576 v4 = detail::round<N>(v4, mem_ops::readLE<N>(ptr, endian));
578 p += (N / 2) - memsize;
582 if (p <= bEnd - (N / 2))
584 const uint8_t* const limit = bEnd - (N / 2);
588 v1 = detail::round<N>(v1, mem_ops::readLE<N>(p, endian)); p += (N / 8);
589 v2 = detail::round<N>(v2, mem_ops::readLE<N>(p, endian)); p += (N / 8);
590 v3 = detail::round<N>(v3, mem_ops::readLE<N>(p, endian)); p += (N / 8);
591 v4 = detail::round<N>(v4, mem_ops::readLE<N>(p, endian)); p += (N / 8);
592 } while (p <= limit);
597 memcpy(mem.data(), p, static_cast<size_t>(bEnd - p));
598 memsize = static_cast<uint32_t>(bEnd - p);
601 return error_code::ok;
604 inline hash_t<N> _digest_impl(endianness endian) const
606 const uint8_t* p = reinterpret_cast<const uint8_t*>(mem.data());
607 const uint8_t* const bEnd = reinterpret_cast<const uint8_t*>(mem.data()) + memsize;
610 if (total_len > (N / 2))
612 hash_ret = bit_ops::rotl<N>(v1, 1) + bit_ops::rotl<N>(v2, 7) + bit_ops::rotl<N>(v3, 12) + bit_ops::rotl<N>(v4, 18);
614 detail::endian_align_sub_mergeround<N>(hash_ret, v1, v2, v3, v4);
616 else { hash_ret = v3 + detail::PRIME<N>(5); }
618 hash_ret += static_cast<hash_t<N>>(total_len);
620 return detail::endian_align_sub_ending<N>(hash_ret, p, bEnd, endian, alignment::unaligned);
624 hash_state_t(hash_t<N> seed = 0)
626 static_assert(!(N != 32 && N != 64), "You can only stream hashing in 32 or 64 bit mode.");
627 v1 = seed + detail::PRIME<N>(1) + detail::PRIME<N>(2);
628 v2 = seed + detail::PRIME<N>(2);
630 v4 = seed - detail::PRIME<N>(1);
633 hash_state_t operator=(hash_state_t<N>& other)
635 memcpy(this, other, sizeof(hash_state_t<N>));
638 error_code reset(hash_t<N> seed = 0)
640 memset(this, 0, sizeof(hash_state_t<N>));
641 v1 = seed + detail::PRIME<N>(1) + detail::PRIME<N>(2);
642 v2 = seed + detail::PRIME<N>(2);
644 v4 = seed - detail::PRIME<N>(1);
645 return error_code::ok;
648 error_code update(const void* input, size_t length, endianness endian = endianness::unspecified)
650 return _update_impl(input, length, mem_ops::get_endian(endian));
653 template <typename T>
654 error_code update(const std::basic_string<T>& input, endianness endian = endianness::unspecified)
656 return _update_impl(static_cast<const void*>(input.data()), input.length() * sizeof(T), mem_ops::get_endian(endian));
659 template <typename ContiguousIterator>
660 error_code update(ContiguousIterator begin, ContiguousIterator end, endianness endian = endianness::unspecified)
662 using T = typename std::decay_t<decltype(*end)>;
663 return _update_impl(static_cast<const void*>(&*begin), (end - begin) * sizeof(T), mem_ops::get_endian(endian));
666 template <typename T>
667 error_code update(const std::vector<T>& input, endianness endian = endianness::unspecified)
669 return _update_impl(static_cast<const void*>(input.data()), input.size() * sizeof(T), mem_ops::get_endian(endian));
672 template <typename T, size_t AN>
673 error_code update(const std::array<T, AN>& input, endianness endian = endianness::unspecified)
675 return _update_impl(static_cast<const void*>(input.data()), AN * sizeof(T), mem_ops::get_endian(endian));
678 template <typename T>
679 error_code update(const std::initializer_list<T>& input, endianness endian = endianness::unspecified)
681 return _update_impl(static_cast<const void*>(input.begin()), input.size() * sizeof(T), mem_ops::get_endian(endian));
684 hash_t<N> digest(endianness endian = endianness::unspecified)
686 return _digest_impl(mem_ops::get_endian(endian));
690 using hash_state32_t = hash_state_t<32>;
691 using hash_state64_t = hash_state_t<64>;
694 /* *******************************************************************
696 *********************************************************************/
701 std::array<uint8_t, N / 8> digest;\
705 canonical_t(hash_t<N> hash)
707 if (mem_ops::is_little_endian()) { hash = bit_ops::swap<N>(hash); }
708 memcpy(digest.data(), &hash, sizeof(canonical_t<N>));
711 hash_t<N> get_hash() const
713 return mem_ops::readBE<N>(&digest);
717 using canonical32_t = canonical_t<32>;
718 using canonical64_t = canonical_t<64>;