summaryrefslogtreecommitdiffstats
path: root/target
diff options
context:
space:
mode:
authorAlistair Francis2017-08-15 16:57:14 +0200
committerPeter Maydell2017-08-15 18:38:44 +0200
commit4a2fdb78e794c1ad93aa9e160235d6a61a2125de (patch)
tree382eaaa32defefe5fb8dd7d9f3476b95b6837419 /target
parenttarget/arm: Correct load exclusive pair atomicity (diff)
downloadqemu-4a2fdb78e794c1ad93aa9e160235d6a61a2125de.tar.gz
qemu-4a2fdb78e794c1ad93aa9e160235d6a61a2125de.tar.xz
qemu-4a2fdb78e794c1ad93aa9e160235d6a61a2125de.zip
target/arm: Require alignment for load exclusive
According to the ARM ARM exclusive loads require the same alignment as exclusive stores. Let's update the memops used for the load to match that of the store. This adds the alignment requirement to the memops. Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com> Signed-off-by: Alistair Francis <alistair.francis@xilinx.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20170815145714.17635-4-richard.henderson@linaro.org [rth: Require 16-byte alignment for 64-bit LDXP.] Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/arm/translate-a64.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index eac545e4f2..2200e25be0 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -1861,7 +1861,7 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
g_assert(size >= 2);
if (size == 2) {
/* The pair must be single-copy atomic for the doubleword. */
- memop |= MO_64;
+ memop |= MO_64 | MO_ALIGN;
tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
if (s->be_data == MO_LE) {
tcg_gen_extract_i64(cpu_reg(s, rt), cpu_exclusive_val, 0, 32);
@@ -1871,10 +1871,11 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
tcg_gen_extract_i64(cpu_reg(s, rt2), cpu_exclusive_val, 0, 32);
}
} else {
- /* The pair must be single-copy atomic for *each* doubleword,
- but not the entire quadword. */
+ /* The pair must be single-copy atomic for *each* doubleword, not
+ the entire quadword, however it must be quadword aligned. */
memop |= MO_64;
- tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
+ tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx,
+ memop | MO_ALIGN_16);
TCGv_i64 addr2 = tcg_temp_new_i64();
tcg_gen_addi_i64(addr2, addr, 8);
@@ -1885,7 +1886,7 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
tcg_gen_mov_i64(cpu_reg(s, rt2), cpu_exclusive_high);
}
} else {
- memop |= size;
+ memop |= size | MO_ALIGN;
tcg_gen_qemu_ld_i64(cpu_exclusive_val, addr, idx, memop);
tcg_gen_mov_i64(cpu_reg(s, rt), cpu_exclusive_val);
}