summaryrefslogtreecommitdiffstats
path: root/tcg/ia64
diff options
context:
space:
mode:
authorRichard Henderson2016-07-14 21:43:06 +0200
committerRichard Henderson2016-09-16 17:12:06 +0200
commit85aa80813dd9f5c1f581c743e45678a3bee220f8 (patch)
tree2a140ecb81d60cf1a593a160c0d09f88ae5a3c7d /tcg/ia64
parentMerge remote-tracking branch 'remotes/awilliam/tags/vfio-fixes-20160915.0' in... (diff)
downloadqemu-85aa80813dd9f5c1f581c743e45678a3bee220f8.tar.gz
qemu-85aa80813dd9f5c1f581c743e45678a3bee220f8.tar.xz
qemu-85aa80813dd9f5c1f581c743e45678a3bee220f8.zip
tcg: Support arbitrary size + alignment
Previously we allowed fully unaligned operations, but not operations that are aligned but with less alignment than the operation size. In addition, arm32, ia64, mips, and sparc had been omitted from the previous overalignment patch, which would have led to that alignment being enforced. Signed-off-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'tcg/ia64')
-rw-r--r--tcg/ia64/tcg-target.inc.c22
1 files changed, 15 insertions, 7 deletions
diff --git a/tcg/ia64/tcg-target.inc.c b/tcg/ia64/tcg-target.inc.c
index c91f39281b..7642390b4b 100644
--- a/tcg/ia64/tcg-target.inc.c
+++ b/tcg/ia64/tcg-target.inc.c
@@ -1496,10 +1496,18 @@ QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
R1, R3 are clobbered, leaving R56 free for...
BSWAP_1, BSWAP_2 and I-slot insns for swapping data for store. */
static inline void tcg_out_qemu_tlb(TCGContext *s, TCGReg addr_reg,
- TCGMemOp s_bits, int off_rw, int off_add,
+ TCGMemOp opc, int off_rw, int off_add,
uint64_t bswap1, uint64_t bswap2)
{
- /*
+ unsigned s_bits = opc & MO_SIZE;
+ unsigned a_bits = get_alignment_bits(opc);
+
+ /* We don't support unaligned accesses, but overalignment is easy. */
+ if (a_bits < s_bits) {
+ a_bits = s_bits;
+ }
+
+ /*
.mii
mov r2 = off_rw
extr.u r3 = addr_reg, ... # extract tlb page
@@ -1521,7 +1529,7 @@ static inline void tcg_out_qemu_tlb(TCGContext *s, TCGReg addr_reg,
cmp.eq p6, p7 = r3, r58
nop
;;
- */
+ */
tcg_out_bundle(s, miI,
tcg_opc_movi_a(TCG_REG_P0, TCG_REG_R2, off_rw),
tcg_opc_i11(TCG_REG_P0, OPC_EXTR_U_I11, TCG_REG_R3,
@@ -1536,8 +1544,8 @@ static inline void tcg_out_qemu_tlb(TCGContext *s, TCGReg addr_reg,
TCG_REG_R3, 63 - CPU_TLB_ENTRY_BITS,
63 - CPU_TLB_ENTRY_BITS),
tcg_opc_i14(TCG_REG_P0, OPC_DEP_I14, TCG_REG_R1, 0,
- TCG_REG_R57, 63 - s_bits,
- TARGET_PAGE_BITS - s_bits - 1));
+ TCG_REG_R57, 63 - a_bits,
+ TARGET_PAGE_BITS - a_bits - 1));
tcg_out_bundle(s, MmI,
tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1,
TCG_REG_R2, TCG_REG_R2, TCG_REG_R3),
@@ -1661,7 +1669,7 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args)
s_bits = opc & MO_SIZE;
/* Read the TLB entry */
- tcg_out_qemu_tlb(s, addr_reg, s_bits,
+ tcg_out_qemu_tlb(s, addr_reg, opc,
offsetof(CPUArchState, tlb_table[mem_index][0].addr_read),
offsetof(CPUArchState, tlb_table[mem_index][0].addend),
INSN_NOP_I, INSN_NOP_I);
@@ -1739,7 +1747,7 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
pre1 = tcg_opc_ext_i(TCG_REG_P0, opc, TCG_REG_R58, data_reg);
}
- tcg_out_qemu_tlb(s, addr_reg, s_bits,
+ tcg_out_qemu_tlb(s, addr_reg, opc,
offsetof(CPUArchState, tlb_table[mem_index][0].addr_write),
offsetof(CPUArchState, tlb_table[mem_index][0].addend),
pre1, pre2);