summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRichard Henderson2021-01-12 00:57:38 +0100
committerPeter Maydell2021-01-19 15:38:51 +0100
commit283fc52ade85eb50141f3b8b85f82b07d016cb17 (patch)
tree12369c759d3c8bcbdf62420794bce3f0c951e7eb
parentMerge remote-tracking branch 'remotes/huth-gitlab/tags/pull-request-2021-01-1... (diff)
downloadqemu-283fc52ade85eb50141f3b8b85f82b07d016cb17.tar.gz
qemu-283fc52ade85eb50141f3b8b85f82b07d016cb17.tar.xz
qemu-283fc52ade85eb50141f3b8b85f82b07d016cb17.zip
target/arm: Implement an IMPDEF pauth algorithm
Without hardware acceleration, a cryptographically strong algorithm is too expensive for pauth_computepac. Even with hardware accel, we are not currently expecting to link the linux-user binaries to any crypto libraries, and doing so would generally make the --static build fail. So choose XXH64 as a reasonably quick and decent hash. Tested-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210111235740.462469-2-richard.henderson@linaro.org Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--include/qemu/xxhash.h98
-rw-r--r--target/arm/cpu.h15
-rw-r--r--target/arm/pauth_helper.c27
3 files changed, 131 insertions, 9 deletions
diff --git a/include/qemu/xxhash.h b/include/qemu/xxhash.h
index 076f1f6054..c2dcccadbf 100644
--- a/include/qemu/xxhash.h
+++ b/include/qemu/xxhash.h
@@ -119,4 +119,102 @@ static inline uint32_t qemu_xxhash6(uint64_t ab, uint64_t cd, uint32_t e,
return qemu_xxhash7(ab, cd, e, f, 0);
}
+/*
+ * Component parts of the XXH64 algorithm from
+ * https://github.com/Cyan4973/xxHash/blob/v0.8.0/xxhash.h
+ *
+ * The complete algorithm looks like
+ *
+ * i = 0;
+ * if (len >= 32) {
+ * v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
+ * v2 = seed + XXH_PRIME64_2;
+ * v3 = seed + 0;
+ * v4 = seed - XXH_PRIME64_1;
+ * do {
+ * v1 = XXH64_round(v1, get64bits(input + i));
+ * v2 = XXH64_round(v2, get64bits(input + i + 8));
+ * v3 = XXH64_round(v3, get64bits(input + i + 16));
+ * v4 = XXH64_round(v4, get64bits(input + i + 24));
+ * } while ((i += 32) <= len);
+ * h64 = XXH64_mergerounds(v1, v2, v3, v4);
+ * } else {
+ * h64 = seed + XXH_PRIME64_5;
+ * }
+ * h64 += len;
+ *
+ * for (; i + 8 <= len; i += 8) {
+ * h64 ^= XXH64_round(0, get64bits(input + i));
+ * h64 = rol64(h64, 27) * XXH_PRIME64_1 + XXH_PRIME64_4;
+ * }
+ * for (; i + 4 <= len; i += 4) {
+ * h64 ^= get32bits(input + i) * PRIME64_1;
+ * h64 = rol64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
+ * }
+ * for (; i < len; i += 1) {
+ * h64 ^= get8bits(input + i) * XXH_PRIME64_5;
+ * h64 = rol64(h64, 11) * XXH_PRIME64_1;
+ * }
+ *
+ * return XXH64_avalanche(h64)
+ *
+ * Exposing the pieces instead allows for simplified usage when
+ * the length is a known constant and the inputs are in registers.
+ */
+#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL
+#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL
+#define XXH_PRIME64_3 0x165667B19E3779F9ULL
+#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL
+#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL
+
+static inline uint64_t XXH64_round(uint64_t acc, uint64_t input)
+{
+ return rol64(acc + input * XXH_PRIME64_2, 31) * XXH_PRIME64_1;
+}
+
+static inline uint64_t XXH64_mergeround(uint64_t acc, uint64_t val)
+{
+ return (acc ^ XXH64_round(0, val)) * XXH_PRIME64_1 + XXH_PRIME64_4;
+}
+
+static inline uint64_t XXH64_mergerounds(uint64_t v1, uint64_t v2,
+ uint64_t v3, uint64_t v4)
+{
+ uint64_t h64;
+
+ h64 = rol64(v1, 1) + rol64(v2, 7) + rol64(v3, 12) + rol64(v4, 18);
+ h64 = XXH64_mergeround(h64, v1);
+ h64 = XXH64_mergeround(h64, v2);
+ h64 = XXH64_mergeround(h64, v3);
+ h64 = XXH64_mergeround(h64, v4);
+
+ return h64;
+}
+
+static inline uint64_t XXH64_avalanche(uint64_t h64)
+{
+ h64 ^= h64 >> 33;
+ h64 *= XXH_PRIME64_2;
+ h64 ^= h64 >> 29;
+ h64 *= XXH_PRIME64_3;
+ h64 ^= h64 >> 32;
+ return h64;
+}
+
+static inline uint64_t qemu_xxhash64_4(uint64_t a, uint64_t b,
+ uint64_t c, uint64_t d)
+{
+ uint64_t v1 = QEMU_XXHASH_SEED + XXH_PRIME64_1 + XXH_PRIME64_2;
+ uint64_t v2 = QEMU_XXHASH_SEED + XXH_PRIME64_2;
+ uint64_t v3 = QEMU_XXHASH_SEED + 0;
+ uint64_t v4 = QEMU_XXHASH_SEED - XXH_PRIME64_1;
+
+ v1 = XXH64_round(v1, a);
+ v2 = XXH64_round(v2, b);
+ v3 = XXH64_round(v3, c);
+ v4 = XXH64_round(v4, d);
+
+ return XXH64_avalanche(XXH64_mergerounds(v1, v2, v3, v4));
+}
+
#endif /* QEMU_XXHASH_H */
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 84cc2de3b1..84784070a7 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -3918,10 +3918,8 @@ static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id)
static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id)
{
/*
- * Note that while QEMU will only implement the architected algorithm
- * QARMA, and thus APA+GPA, the host cpu for kvm may use implementation
- * defined algorithms, and thus API+GPI, and this predicate controls
- * migration of the 128-bit keys.
+ * Return true if any form of pauth is enabled, as this
+ * predicate controls migration of the 128-bit keys.
*/
return (id->id_aa64isar1 &
(FIELD_DP64(0, ID_AA64ISAR1, APA, 0xf) |
@@ -3930,6 +3928,15 @@ static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id)
FIELD_DP64(0, ID_AA64ISAR1, GPI, 0xf))) != 0;
}
+static inline bool isar_feature_aa64_pauth_arch(const ARMISARegisters *id)
+{
+ /*
+ * Return true if pauth is enabled with the architected QARMA algorithm.
+ * QEMU will always set APA+GPA to the same value.
+ */
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) != 0;
+}
+
static inline bool isar_feature_aa64_sb(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0;
diff --git a/target/arm/pauth_helper.c b/target/arm/pauth_helper.c
index 564c48faa6..cd6df18150 100644
--- a/target/arm/pauth_helper.c
+++ b/target/arm/pauth_helper.c
@@ -24,6 +24,7 @@
#include "exec/cpu_ldst.h"
#include "exec/helper-proto.h"
#include "tcg/tcg-gvec-desc.h"
+#include "qemu/xxhash.h"
static uint64_t pac_cell_shuffle(uint64_t i)
@@ -207,8 +208,8 @@ static uint64_t tweak_inv_shuffle(uint64_t i)
return o;
}
-static uint64_t pauth_computepac(uint64_t data, uint64_t modifier,
- ARMPACKey key)
+static uint64_t pauth_computepac_architected(uint64_t data, uint64_t modifier,
+ ARMPACKey key)
{
static const uint64_t RC[5] = {
0x0000000000000000ull,
@@ -272,6 +273,22 @@ static uint64_t pauth_computepac(uint64_t data, uint64_t modifier,
return workingval;
}
+static uint64_t pauth_computepac_impdef(uint64_t data, uint64_t modifier,
+ ARMPACKey key)
+{
+ return qemu_xxhash64_4(data, modifier, key.lo, key.hi);
+}
+
+static uint64_t pauth_computepac(CPUARMState *env, uint64_t data,
+ uint64_t modifier, ARMPACKey key)
+{
+ if (cpu_isar_feature(aa64_pauth_arch, env_archcpu(env))) {
+ return pauth_computepac_architected(data, modifier, key);
+ } else {
+ return pauth_computepac_impdef(data, modifier, key);
+ }
+}
+
static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier,
ARMPACKey *key, bool data)
{
@@ -292,7 +309,7 @@ static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier,
bot_bit = 64 - param.tsz;
ext_ptr = deposit64(ptr, bot_bit, top_bit - bot_bit, ext);
- pac = pauth_computepac(ext_ptr, modifier, *key);
+ pac = pauth_computepac(env, ext_ptr, modifier, *key);
/*
* Check if the ptr has good extension bits and corrupt the
@@ -341,7 +358,7 @@ static uint64_t pauth_auth(CPUARMState *env, uint64_t ptr, uint64_t modifier,
uint64_t pac, orig_ptr, test;
orig_ptr = pauth_original_ptr(ptr, param);
- pac = pauth_computepac(orig_ptr, modifier, *key);
+ pac = pauth_computepac(env, orig_ptr, modifier, *key);
bot_bit = 64 - param.tsz;
top_bit = 64 - 8 * param.tbi;
@@ -442,7 +459,7 @@ uint64_t HELPER(pacga)(CPUARMState *env, uint64_t x, uint64_t y)
uint64_t pac;
pauth_check_trap(env, arm_current_el(env), GETPC());
- pac = pauth_computepac(x, y, env->keys.apga);
+ pac = pauth_computepac(env, x, y, env->keys.apga);
return pac & 0xffffffff00000000ull;
}