summaryrefslogtreecommitdiffstats
path: root/include/qemu/host-utils.h
diff options
context:
space:
mode:
authorRichard Henderson2021-11-09 07:18:33 +0100
committerRichard Henderson2021-11-09 07:18:33 +0100
commitf10e7b9f6fc18be390b3bc189e04b5147eb8dbf8 (patch)
tree760292c2ddc22c5b75653c5d5dc7ae4ca6d8d729 /include/qemu/host-utils.h
parentMerge remote-tracking branch 'remotes/philmd/tags/avocado-20211108' into staging (diff)
parentspapr_numa.c: FORM2 table handle nodes with no distance info (diff)
downloadqemu-f10e7b9f6fc18be390b3bc189e04b5147eb8dbf8.tar.gz
qemu-f10e7b9f6fc18be390b3bc189e04b5147eb8dbf8.tar.xz
qemu-f10e7b9f6fc18be390b3bc189e04b5147eb8dbf8.zip
Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-6.2-20211109' into staging
ppc patch queue for 2021-11-09 Here's the latest set of ppc related patches for qemu-6.2, which I hope will squeeze in just barely before the hard freeze. This set includes a change to MAINTAINERS moving maintainership of ppc from myself and Greg Kurz to Cédric le Goater and Daniel Henrique Barboza. So, I expect this to be my last pull request as ppc maintainer. It's been great, but it's time I moved onto other things. Apart from that, this patchset is mostly a lot of updates to TCG implementations of ISA 3.1 (POWER10) instructions from the El Dorado team. There are also a handful of other fixes. # gpg: Signature made Tue 09 Nov 2021 05:14:33 AM CET # gpg: using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392 # gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full] # gpg: aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown] # gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full] # gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full] * remotes/dgibson/tags/ppc-for-6.2-20211109: (54 commits) spapr_numa.c: FORM2 table handle nodes with no distance info target/ppc, hw/ppc: Change maintainers target/ppc: cntlzdm/cnttzdm implementation without brcond target/ppc: Implement lxvkq instruction target/ppc: Implement xxblendvb/xxblendvh/xxblendvw/xxblendvd instructions target/ppc: implemented XXSPLTIDP instruction target/ppc: Implemented XXSPLTIW using decodetree target/ppc: implemented XXSPLTI32DX target/ppc: moved XXSPLTIB to using decodetree target/ppc: moved XXSPLTW to using decodetree target/ppc: added the instructions PLXVP and PSTXVP target/ppc: added the instructions PLXV and PSTXV target/ppc: added the instructions LXVPX and STXVPX target/ppc: added the instructions LXVP and STXVP target/ppc: moved stxvx and lxvx from legacy to decodtree target/ppc: moved stxv and lxv from legacy to decodtree target/ppc: receive high/low as argument in get/set_cpu_vsr target/ppc: Introduce REQUIRE_VSX macro target/ppc: Implement Vector Extract Double to VSR using GPR index insns target/ppc: Move vinsertb/vinserth/vinsertw/vinsertd to decodetree ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'include/qemu/host-utils.h')
-rw-r--r--include/qemu/host-utils.h36
1 files changed, 36 insertions, 0 deletions
diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h
index a3a7ced78d..ca979dc6cc 100644
--- a/include/qemu/host-utils.h
+++ b/include/qemu/host-utils.h
@@ -590,6 +590,42 @@ static inline bool umul64_overflow(uint64_t x, uint64_t y, uint64_t *ret)
#endif
}
+/*
+ * Unsigned 128x64 multiplication.
+ * Returns true if the result got truncated to 128 bits.
+ * Otherwise, returns false and the multiplication result via plow and phigh.
+ */
+static inline bool mulu128(uint64_t *plow, uint64_t *phigh, uint64_t factor)
+{
+#if defined(CONFIG_INT128) && \
+ (__has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5)
+ bool res;
+ __uint128_t r;
+ __uint128_t f = ((__uint128_t)*phigh << 64) | *plow;
+ res = __builtin_mul_overflow(f, factor, &r);
+
+ *plow = r;
+ *phigh = r >> 64;
+
+ return res;
+#else
+ uint64_t dhi = *phigh;
+ uint64_t dlo = *plow;
+ uint64_t ahi;
+ uint64_t blo, bhi;
+
+ if (dhi == 0) {
+ mulu64(plow, phigh, dlo, factor);
+ return false;
+ }
+
+ mulu64(plow, &ahi, dlo, factor);
+ mulu64(&blo, &bhi, dhi, factor);
+
+ return uadd64_overflow(ahi, blo, phigh) || bhi != 0;
+#endif
+}
+
/**
* uadd64_carry - addition with carry-in and carry-out
* @x, @y: addends