diff options
| author | Peter Maydell | 2021-01-19 16:47:23 +0100 |
|---|---|---|
| committer | Peter Maydell | 2021-01-19 16:47:23 +0100 |
| commit | 48202c712412c803ddb56365c7bca322aa4e7506 (patch) | |
| tree | d619d954218fd9e75a193d1372e5c34dffe5ae72 /include | |
| parent | Merge remote-tracking branch 'remotes/huth-gitlab/tags/pull-request-2021-01-1... (diff) | |
| parent | docs: Build and install all the docs in a single manual (diff) | |
| download | qemu-48202c712412c803ddb56365c7bca322aa4e7506.tar.gz qemu-48202c712412c803ddb56365c7bca322aa4e7506.tar.xz qemu-48202c712412c803ddb56365c7bca322aa4e7506.zip | |
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20210119-1' into staging
target-arm queue:
* Implement IMPDEF pauth algorithm
* Support ARMv8.4-SEL2
* Fix bug where we were truncating predicate vector lengths in SVE insns
* npcm7xx_adc-test: Fix memleak in adc_qom_set
* target/arm/m_helper: Silence GCC 10 maybe-uninitialized error
* docs: Build and install all the docs in a single manual
# gpg: Signature made Tue 19 Jan 2021 15:46:34 GMT
# gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE
# gpg: issuer "peter.maydell@linaro.org"
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate]
# gpg: aka "Peter Maydell <pmaydell@gmail.com>" [ultimate]
# gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate]
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE
* remotes/pmaydell/tags/pull-target-arm-20210119-1: (29 commits)
docs: Build and install all the docs in a single manual
target/arm/m_helper: Silence GCC 10 maybe-uninitialized error
npcm7xx_adc-test: Fix memleak in adc_qom_set
target/arm: Update REV, PUNPK for pred_desc
target/arm: Update ZIP, UZP, TRN for pred_desc
target/arm: Update PFIRST, PNEXT for pred_desc
target/arm: Introduce PREDDESC field definitions
target/arm: refactor vae1_tlbmask()
target/arm: enable Secure EL2 in max CPU
target/arm: Implement SCR_EL2.EEL2
target/arm: revector to run-time pick target EL
target/arm: set HPFAR_EL2.NS on secure stage 2 faults
target/arm: secure stage 2 translation regime
target/arm: generalize 2-stage page-walk condition
target/arm: translate NS bit in page-walks
target/arm: do S1_ptw_translate() before address space lookup
target/arm: handle VMID change in secure state
target/arm: add ARMv8.4-SEL2 system registers
target/arm: add MMU stage 1 for Secure EL2
target/arm: add 64-bit S-EL2 to EL exception table
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'include')
| -rw-r--r-- | include/qemu/xxhash.h | 98 |
1 files changed, 98 insertions, 0 deletions
diff --git a/include/qemu/xxhash.h b/include/qemu/xxhash.h index 076f1f6054..c2dcccadbf 100644 --- a/include/qemu/xxhash.h +++ b/include/qemu/xxhash.h @@ -119,4 +119,102 @@ static inline uint32_t qemu_xxhash6(uint64_t ab, uint64_t cd, uint32_t e, return qemu_xxhash7(ab, cd, e, f, 0); } +/* + * Component parts of the XXH64 algorithm from + * https://github.com/Cyan4973/xxHash/blob/v0.8.0/xxhash.h + * + * The complete algorithm looks like + * + * i = 0; + * if (len >= 32) { + * v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2; + * v2 = seed + XXH_PRIME64_2; + * v3 = seed + 0; + * v4 = seed - XXH_PRIME64_1; + * do { + * v1 = XXH64_round(v1, get64bits(input + i)); + * v2 = XXH64_round(v2, get64bits(input + i + 8)); + * v3 = XXH64_round(v3, get64bits(input + i + 16)); + * v4 = XXH64_round(v4, get64bits(input + i + 24)); + * } while ((i += 32) <= len); + * h64 = XXH64_mergerounds(v1, v2, v3, v4); + * } else { + * h64 = seed + XXH_PRIME64_5; + * } + * h64 += len; + * + * for (; i + 8 <= len; i += 8) { + * h64 ^= XXH64_round(0, get64bits(input + i)); + * h64 = rol64(h64, 27) * XXH_PRIME64_1 + XXH_PRIME64_4; + * } + * for (; i + 4 <= len; i += 4) { + * h64 ^= get32bits(input + i) * PRIME64_1; + * h64 = rol64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; + * } + * for (; i < len; i += 1) { + * h64 ^= get8bits(input + i) * XXH_PRIME64_5; + * h64 = rol64(h64, 11) * XXH_PRIME64_1; + * } + * + * return XXH64_avalanche(h64) + * + * Exposing the pieces instead allows for simplified usage when + * the length is a known constant and the inputs are in registers. + */ +#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL +#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL +#define XXH_PRIME64_3 0x165667B19E3779F9ULL +#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL +#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL + +static inline uint64_t XXH64_round(uint64_t acc, uint64_t input) +{ + return rol64(acc + input * XXH_PRIME64_2, 31) * XXH_PRIME64_1; +} + +static inline uint64_t XXH64_mergeround(uint64_t acc, uint64_t val) +{ + return (acc ^ XXH64_round(0, val)) * XXH_PRIME64_1 + XXH_PRIME64_4; +} + +static inline uint64_t XXH64_mergerounds(uint64_t v1, uint64_t v2, + uint64_t v3, uint64_t v4) +{ + uint64_t h64; + + h64 = rol64(v1, 1) + rol64(v2, 7) + rol64(v3, 12) + rol64(v4, 18); + h64 = XXH64_mergeround(h64, v1); + h64 = XXH64_mergeround(h64, v2); + h64 = XXH64_mergeround(h64, v3); + h64 = XXH64_mergeround(h64, v4); + + return h64; +} + +static inline uint64_t XXH64_avalanche(uint64_t h64) +{ + h64 ^= h64 >> 33; + h64 *= XXH_PRIME64_2; + h64 ^= h64 >> 29; + h64 *= XXH_PRIME64_3; + h64 ^= h64 >> 32; + return h64; +} + +static inline uint64_t qemu_xxhash64_4(uint64_t a, uint64_t b, + uint64_t c, uint64_t d) +{ + uint64_t v1 = QEMU_XXHASH_SEED + XXH_PRIME64_1 + XXH_PRIME64_2; + uint64_t v2 = QEMU_XXHASH_SEED + XXH_PRIME64_2; + uint64_t v3 = QEMU_XXHASH_SEED + 0; + uint64_t v4 = QEMU_XXHASH_SEED - XXH_PRIME64_1; + + v1 = XXH64_round(v1, a); + v2 = XXH64_round(v2, b); + v3 = XXH64_round(v3, c); + v4 = XXH64_round(v4, d); + + return XXH64_avalanche(XXH64_mergerounds(v1, v2, v3, v4)); +} + #endif /* QEMU_XXHASH_H */ |
