diff options
author | Peter Maydell | 2019-02-18 17:20:13 +0100 |
---|---|---|
committer | Peter Maydell | 2019-02-18 17:20:13 +0100 |
commit | 2e68b8620637a4ee8c79b5724144b726af1e261b (patch) | |
tree | e53cbfaf5cbdd0d0b0c326838634e92096540256 /target/ppc/translate/vsx-impl.inc.c | |
parent | Merge remote-tracking branch 'remotes/armbru/tags/pull-qapi-2019-02-18' into ... (diff) | |
parent | target/ppc: convert vmin* and vmax* to vector operations (diff) | |
download | qemu-2e68b8620637a4ee8c79b5724144b726af1e261b.tar.gz qemu-2e68b8620637a4ee8c79b5724144b726af1e261b.tar.xz qemu-2e68b8620637a4ee8c79b5724144b726af1e261b.zip |
Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-4.0-20190219' into staging
ppc patch queue 2019-02-19
Here's the next batch of ppc and spapr patches. Higlights are:
* A bunch of improvements to TCG handling of vector instructions from
Richard Henderson and Marc Cave-Ayland
* Cleanup to the XICS interrupt controller from Greg Kurz, removing
the special KVM subclasses which were a bad idea
* Some refinements to the XIVE interrupt controller from Cédric Le
Goater
* Fix from Fabiano Rosas for a really dumb buffer overflow in the
device tree code for memory hotplug
* Code for allowing access to SPRs from the gdb stub from Fabiano
Rosas
* Assorted minor fixes and cleanups
# gpg: Signature made Mon 18 Feb 2019 13:47:54 GMT
# gpg: using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392
# gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full]
# gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full]
# gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full]
# gpg: aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown]
# Primary key fingerprint: 75F4 6586 AE61 A66C C44E 87DC 6C38 CACA 20D9 B392
* remotes/dgibson/tags/ppc-for-4.0-20190219: (43 commits)
target/ppc: convert vmin* and vmax* to vector operations
target/ppc: convert vadd*s and vsub*s to vector operations
target/ppc: Split out VSCR_SAT to a vector field
target/ppc: Add set_vscr_sat
target/ppc: Use mtvscr/mfvscr for vmstate
target/ppc: Add helper_mfvscr
target/ppc: Remove vscr_nj and vscr_sat
target/ppc: Use helper_mtvscr for reset and gdb
target/ppc: Pass integer to helper_mtvscr
target/ppc: convert xxsel to vector operations
target/ppc: convert xxspltw to vector operations
target/ppc: convert xxspltib to vector operations
target/ppc: convert VSX logical operations to vector operations
target/ppc: convert vsplt[bhw] to use vector operations
target/ppc: convert vspltis[bhw] to use vector operations
target/ppc: convert vaddu[b,h,w,d] and vsubu[b,h,w,d] over to use vector operations
target/ppc: convert VMX logical instructions to use vector operations
xics: Drop the KVM ICS class
spapr/irq: Use the "simple" ICS class for KVM
xics: Handle KVM interrupt presentation from "simple" ICS code
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target/ppc/translate/vsx-impl.inc.c')
-rw-r--r-- | target/ppc/translate/vsx-impl.inc.c | 147 |
1 files changed, 60 insertions, 87 deletions
diff --git a/target/ppc/translate/vsx-impl.inc.c b/target/ppc/translate/vsx-impl.inc.c index ed4fdceacf..e73197e717 100644 --- a/target/ppc/translate/vsx-impl.inc.c +++ b/target/ppc/translate/vsx-impl.inc.c @@ -10,6 +10,11 @@ static inline void set_vsr(int n, TCGv_i64 src) tcg_gen_st_i64(src, cpu_env, offsetof(CPUPPCState, vsr[n].u64[1])); } +static inline int vsr_full_offset(int n) +{ + return offsetof(CPUPPCState, vsr[n].u64[0]); +} + static inline void get_cpu_vsrh(TCGv_i64 dst, int n) { if (n < 32) { @@ -1255,40 +1260,26 @@ static void gen_xxbrw(DisasContext *ctx) tcg_temp_free_i64(xbl); } -#define VSX_LOGICAL(name, tcg_op) \ +#define VSX_LOGICAL(name, vece, tcg_op) \ static void glue(gen_, name)(DisasContext * ctx) \ { \ - TCGv_i64 t0; \ - TCGv_i64 t1; \ - TCGv_i64 t2; \ if (unlikely(!ctx->vsx_enabled)) { \ gen_exception(ctx, POWERPC_EXCP_VSXU); \ return; \ } \ - t0 = tcg_temp_new_i64(); \ - t1 = tcg_temp_new_i64(); \ - t2 = tcg_temp_new_i64(); \ - get_cpu_vsrh(t0, xA(ctx->opcode)); \ - get_cpu_vsrh(t1, xB(ctx->opcode)); \ - tcg_op(t2, t0, t1); \ - set_cpu_vsrh(xT(ctx->opcode), t2); \ - get_cpu_vsrl(t0, xA(ctx->opcode)); \ - get_cpu_vsrl(t1, xB(ctx->opcode)); \ - tcg_op(t2, t0, t1); \ - set_cpu_vsrl(xT(ctx->opcode), t2); \ - tcg_temp_free_i64(t0); \ - tcg_temp_free_i64(t1); \ - tcg_temp_free_i64(t2); \ + tcg_op(vece, vsr_full_offset(xT(ctx->opcode)), \ + vsr_full_offset(xA(ctx->opcode)), \ + vsr_full_offset(xB(ctx->opcode)), 16, 16); \ } -VSX_LOGICAL(xxland, tcg_gen_and_i64) -VSX_LOGICAL(xxlandc, tcg_gen_andc_i64) -VSX_LOGICAL(xxlor, tcg_gen_or_i64) -VSX_LOGICAL(xxlxor, tcg_gen_xor_i64) -VSX_LOGICAL(xxlnor, tcg_gen_nor_i64) -VSX_LOGICAL(xxleqv, tcg_gen_eqv_i64) -VSX_LOGICAL(xxlnand, tcg_gen_nand_i64) -VSX_LOGICAL(xxlorc, tcg_gen_orc_i64) +VSX_LOGICAL(xxland, MO_64, tcg_gen_gvec_and) +VSX_LOGICAL(xxlandc, MO_64, tcg_gen_gvec_andc) +VSX_LOGICAL(xxlor, MO_64, tcg_gen_gvec_or) +VSX_LOGICAL(xxlxor, MO_64, tcg_gen_gvec_xor) +VSX_LOGICAL(xxlnor, MO_64, tcg_gen_gvec_nor) +VSX_LOGICAL(xxleqv, MO_64, tcg_gen_gvec_eqv) +VSX_LOGICAL(xxlnand, MO_64, tcg_gen_gvec_nand) +VSX_LOGICAL(xxlorc, MO_64, tcg_gen_gvec_orc) #define VSX_XXMRG(name, high) \ static void glue(gen_, name)(DisasContext * ctx) \ @@ -1330,85 +1321,71 @@ static void glue(gen_, name)(DisasContext * ctx) \ VSX_XXMRG(xxmrghw, 1) VSX_XXMRG(xxmrglw, 0) -static void gen_xxsel(DisasContext * ctx) +static void xxsel_i64(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b, TCGv_i64 c) { - TCGv_i64 a, b, c, tmp; - if (unlikely(!ctx->vsx_enabled)) { - gen_exception(ctx, POWERPC_EXCP_VSXU); - return; - } - a = tcg_temp_new_i64(); - b = tcg_temp_new_i64(); - c = tcg_temp_new_i64(); - tmp = tcg_temp_new_i64(); - - get_cpu_vsrh(a, xA(ctx->opcode)); - get_cpu_vsrh(b, xB(ctx->opcode)); - get_cpu_vsrh(c, xC(ctx->opcode)); - tcg_gen_and_i64(b, b, c); tcg_gen_andc_i64(a, a, c); - tcg_gen_or_i64(tmp, a, b); - set_cpu_vsrh(xT(ctx->opcode), tmp); - - get_cpu_vsrl(a, xA(ctx->opcode)); - get_cpu_vsrl(b, xB(ctx->opcode)); - get_cpu_vsrl(c, xC(ctx->opcode)); - - tcg_gen_and_i64(b, b, c); - tcg_gen_andc_i64(a, a, c); - tcg_gen_or_i64(tmp, a, b); - set_cpu_vsrl(xT(ctx->opcode), tmp); + tcg_gen_or_i64(t, a, b); +} - tcg_temp_free_i64(a); - tcg_temp_free_i64(b); - tcg_temp_free_i64(c); - tcg_temp_free_i64(tmp); +static void xxsel_vec(unsigned vece, TCGv_vec t, TCGv_vec a, + TCGv_vec b, TCGv_vec c) +{ + tcg_gen_and_vec(vece, b, b, c); + tcg_gen_andc_vec(vece, a, a, c); + tcg_gen_or_vec(vece, t, a, b); } -static void gen_xxspltw(DisasContext *ctx) +static void gen_xxsel(DisasContext *ctx) { - TCGv_i64 b, b2; - TCGv_i64 vsr; + static const GVecGen4 g = { + .fni8 = xxsel_i64, + .fniv = xxsel_vec, + .vece = MO_64, + }; + int rt = xT(ctx->opcode); + int ra = xA(ctx->opcode); + int rb = xB(ctx->opcode); + int rc = xC(ctx->opcode); if (unlikely(!ctx->vsx_enabled)) { gen_exception(ctx, POWERPC_EXCP_VSXU); return; } + tcg_gen_gvec_4(vsr_full_offset(rt), vsr_full_offset(ra), + vsr_full_offset(rb), vsr_full_offset(rc), 16, 16, &g); +} - vsr = tcg_temp_new_i64(); - if (UIM(ctx->opcode) & 2) { - get_cpu_vsrl(vsr, xB(ctx->opcode)); - } else { - get_cpu_vsrh(vsr, xB(ctx->opcode)); - } - - b = tcg_temp_new_i64(); - b2 = tcg_temp_new_i64(); +static void gen_xxspltw(DisasContext *ctx) +{ + int rt = xT(ctx->opcode); + int rb = xB(ctx->opcode); + int uim = UIM(ctx->opcode); + int tofs, bofs; - if (UIM(ctx->opcode) & 1) { - tcg_gen_ext32u_i64(b, vsr); - } else { - tcg_gen_shri_i64(b, vsr, 32); + if (unlikely(!ctx->vsx_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VSXU); + return; } - tcg_gen_shli_i64(b2, b, 32); - tcg_gen_or_i64(vsr, b, b2); - set_cpu_vsrh(xT(ctx->opcode), vsr); - set_cpu_vsrl(xT(ctx->opcode), vsr); + tofs = vsr_full_offset(rt); + bofs = vsr_full_offset(rb); + bofs += uim << MO_32; +#ifndef HOST_WORDS_BIG_ENDIAN + bofs ^= 8 | 4; +#endif - tcg_temp_free_i64(vsr); - tcg_temp_free_i64(b); - tcg_temp_free_i64(b2); + tcg_gen_gvec_dup_mem(MO_32, tofs, bofs, 16, 16); } #define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff)) static void gen_xxspltib(DisasContext *ctx) { - unsigned char uim8 = IMM8(ctx->opcode); - TCGv_i64 vsr; - if (xS(ctx->opcode) < 32) { + uint8_t uim8 = IMM8(ctx->opcode); + int rt = xT(ctx->opcode); + + if (rt < 32) { if (unlikely(!ctx->altivec_enabled)) { gen_exception(ctx, POWERPC_EXCP_VPU); return; @@ -1419,11 +1396,7 @@ static void gen_xxspltib(DisasContext *ctx) return; } } - vsr = tcg_temp_new_i64(); - tcg_gen_movi_i64(vsr, pattern(uim8)); - set_cpu_vsrh(xT(ctx->opcode), vsr); - set_cpu_vsrl(xT(ctx->opcode), vsr); - tcg_temp_free_i64(vsr); + tcg_gen_gvec_dup8i(vsr_full_offset(rt), 16, 16, uim8); } static void gen_xxsldwi(DisasContext *ctx) |