summaryrefslogtreecommitdiffstats
path: root/accel/tcg/cputlb.c
diff options
context:
space:
mode:
authorRichard Henderson2022-04-27 06:40:25 +0200
committerRichard Henderson2022-04-27 06:40:25 +0200
commit34723f59371f3fd02ea59b94674314b875504426 (patch)
tree92142e74bb6099c2fc6eb3a5b97eba67c8ae5c38 /accel/tcg/cputlb.c
parentMerge tag 'pull-nbd-2022-04-26' of https://repo.or.cz/qemu/ericb into staging (diff)
parentsoftfloat: Use FloatRelation for fracN_cmp (diff)
downloadqemu-34723f59371f3fd02ea59b94674314b875504426.tar.gz
qemu-34723f59371f3fd02ea59b94674314b875504426.tar.xz
qemu-34723f59371f3fd02ea59b94674314b875504426.zip
Merge tag 'pull-tcg-20220426' of https://gitlab.com/rth7680/qemu into staging
Fix s390x ICMH cc computation. Minor adjustments to satisfy Coverity. # -----BEGIN PGP SIGNATURE----- # # iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmJoyJcdHHJpY2hhcmQu # aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV8ZBQf+OWlDwqNOF+XzyLfb # pPFAwqNCDX+9rRP6eyouydoCe2n4djj6I4rF+ESdkzbXAxrDzhfBF496CWgFd/Ar # HRdssehq0V8UY6Blyhig9OXrcwtdJAZrZhQrl5541VqEak89Sii84F0RNt1QdhvE # HArSm5D78DJx7ZmAtDRZhc3uGOxJefKPTD/4FVnQZQRh9jHeuR9oClMm+1ksYkxo # 52SkalMlUXZNVvpud8AkuZxWtTeEdzgGPRX/zXdXLMrYI0ZdrqVS/DbuJBA3zwkL # r+VmPwDIwojn5cHnS8QzP545XdsQ3alWM1Blhi7lKrwS0LHjyD3BOSH1Dxen9IOc # /Ip5fA== # =ysOK # -----END PGP SIGNATURE----- # gpg: Signature made Tue 26 Apr 2022 09:37:43 PM PDT # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [ultimate] * tag 'pull-tcg-20220426' of https://gitlab.com/rth7680/qemu: softfloat: Use FloatRelation for fracN_cmp softfloat: Use FloatRelation within partsN_compare softfloat: Fix declaration of partsN_compare target/i386: Suppress coverity warning on fsave/frstor target/s390x: Fix the accumulation of ccm in op_icm accel/tcg: Assert mmu_idx in range before use in cputlb Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel/tcg/cputlb.c')
-rw-r--r--accel/tcg/cputlb.c40
1 files changed, 27 insertions, 13 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index dd45e0467b..f90f4312ea 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1761,7 +1761,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
MemOpIdx oi, int size, int prot,
uintptr_t retaddr)
{
- size_t mmu_idx = get_mmuidx(oi);
+ uintptr_t mmu_idx = get_mmuidx(oi);
MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop);
uintptr_t index;
@@ -1769,6 +1769,8 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
target_ulong tlb_addr;
void *hostaddr;
+ tcg_debug_assert(mmu_idx < NB_MMU_MODES);
+
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
@@ -1908,18 +1910,20 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
uintptr_t retaddr, MemOp op, bool code_read,
FullLoadHelper *full_load)
{
- uintptr_t mmu_idx = get_mmuidx(oi);
- uintptr_t index = tlb_index(env, mmu_idx, addr);
- CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
- target_ulong tlb_addr = code_read ? entry->addr_code : entry->addr_read;
const size_t tlb_off = code_read ?
offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
const MMUAccessType access_type =
code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
- unsigned a_bits = get_alignment_bits(get_memop(oi));
+ const unsigned a_bits = get_alignment_bits(get_memop(oi));
+ const size_t size = memop_size(op);
+ uintptr_t mmu_idx = get_mmuidx(oi);
+ uintptr_t index;
+ CPUTLBEntry *entry;
+ target_ulong tlb_addr;
void *haddr;
uint64_t res;
- size_t size = memop_size(op);
+
+ tcg_debug_assert(mmu_idx < NB_MMU_MODES);
/* Handle CPU specific unaligned behaviour */
if (addr & ((1 << a_bits) - 1)) {
@@ -1927,6 +1931,10 @@ load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
mmu_idx, retaddr);
}
+ index = tlb_index(env, mmu_idx, addr);
+ entry = tlb_entry(env, mmu_idx, addr);
+ tlb_addr = code_read ? entry->addr_code : entry->addr_read;
+
/* If the TLB entry is for a different page, reload and try again. */
if (!tlb_hit(tlb_addr, addr)) {
if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
@@ -2310,14 +2318,16 @@ static inline void QEMU_ALWAYS_INLINE
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr, MemOp op)
{
- uintptr_t mmu_idx = get_mmuidx(oi);
- uintptr_t index = tlb_index(env, mmu_idx, addr);
- CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
- target_ulong tlb_addr = tlb_addr_write(entry);
const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
- unsigned a_bits = get_alignment_bits(get_memop(oi));
+ const unsigned a_bits = get_alignment_bits(get_memop(oi));
+ const size_t size = memop_size(op);
+ uintptr_t mmu_idx = get_mmuidx(oi);
+ uintptr_t index;
+ CPUTLBEntry *entry;
+ target_ulong tlb_addr;
void *haddr;
- size_t size = memop_size(op);
+
+ tcg_debug_assert(mmu_idx < NB_MMU_MODES);
/* Handle CPU specific unaligned behaviour */
if (addr & ((1 << a_bits) - 1)) {
@@ -2325,6 +2335,10 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
mmu_idx, retaddr);
}
+ index = tlb_index(env, mmu_idx, addr);
+ entry = tlb_entry(env, mmu_idx, addr);
+ tlb_addr = tlb_addr_write(entry);
+
/* If the TLB entry is for a different page, reload and try again. */
if (!tlb_hit(tlb_addr, addr)) {
if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,