summaryrefslogtreecommitdiffstats
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/riscv/cpu.h10
-rw-r--r--target/riscv/cpu_helper.c50
-rw-r--r--target/riscv/op_helper.c7
-rw-r--r--target/s390x/cpu_features.c5
-rw-r--r--target/s390x/cpu_features.h4
-rw-r--r--target/s390x/cpu_models.c4
-rw-r--r--target/s390x/kvm.c3
7 files changed, 62 insertions, 21 deletions
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index de275782e6..de4705bb57 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -82,9 +82,13 @@ enum {
#define VEXT_VERSION_0_07_1 0x00000701
-#define TRANSLATE_PMP_FAIL 2
-#define TRANSLATE_FAIL 1
-#define TRANSLATE_SUCCESS 0
+enum {
+ TRANSLATE_SUCCESS,
+ TRANSLATE_FAIL,
+ TRANSLATE_PMP_FAIL,
+ TRANSLATE_G_STAGE_FAIL
+};
+
#define MMU_USER_IDX 3
#define MAX_RISCV_PMPS (16)
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 904899054d..4652082df1 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -316,6 +316,9 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
* @physical: This will be set to the calculated physical address
* @prot: The returned protection attributes
* @addr: The virtual address to be translated
+ * @fault_pte_addr: If not NULL, this will be set to fault pte address
+ * when a error occurs on pte address translation.
+ * This will already be shifted to match htval.
* @access_type: The type of MMU access
* @mmu_idx: Indicates current privilege level
* @first_stage: Are we in first stage translation?
@@ -324,6 +327,7 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
*/
static int get_physical_address(CPURISCVState *env, hwaddr *physical,
int *prot, target_ulong addr,
+ target_ulong *fault_pte_addr,
int access_type, int mmu_idx,
bool first_stage, bool two_stage)
{
@@ -447,11 +451,14 @@ restart:
/* Do the second stage translation on the base PTE address. */
int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
- base, MMU_DATA_LOAD,
+ base, NULL, MMU_DATA_LOAD,
mmu_idx, false, true);
if (vbase_ret != TRANSLATE_SUCCESS) {
- return vbase_ret;
+ if (fault_pte_addr) {
+ *fault_pte_addr = (base + idx * ptesize) >> 2;
+ }
+ return TRANSLATE_G_STAGE_FAIL;
}
pte_addr = vbase + idx * ptesize;
@@ -632,13 +639,13 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
int prot;
int mmu_idx = cpu_mmu_index(&cpu->env, false);
- if (get_physical_address(env, &phys_addr, &prot, addr, 0, mmu_idx,
+ if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
true, riscv_cpu_virt_enabled(env))) {
return -1;
}
if (riscv_cpu_virt_enabled(env)) {
- if (get_physical_address(env, &phys_addr, &prot, phys_addr,
+ if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
0, mmu_idx, false, true)) {
return -1;
}
@@ -727,19 +734,30 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
if (riscv_cpu_virt_enabled(env) ||
(riscv_cpu_two_stage_lookup(env) && access_type != MMU_INST_FETCH)) {
/* Two stage lookup */
- ret = get_physical_address(env, &pa, &prot, address, access_type,
+ ret = get_physical_address(env, &pa, &prot, address,
+ &env->guest_phys_fault_addr, access_type,
mmu_idx, true, true);
+ /*
+ * A G-stage exception may be triggered during two state lookup.
+ * And the env->guest_phys_fault_addr has already been set in
+ * get_physical_address().
+ */
+ if (ret == TRANSLATE_G_STAGE_FAIL) {
+ first_stage_error = false;
+ access_type = MMU_DATA_LOAD;
+ }
+
qemu_log_mask(CPU_LOG_MMU,
"%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
TARGET_FMT_plx " prot %d\n",
__func__, address, ret, pa, prot);
- if (ret != TRANSLATE_FAIL) {
+ if (ret == TRANSLATE_SUCCESS) {
/* Second stage lookup */
im_address = pa;
- ret = get_physical_address(env, &pa, &prot2, im_address,
+ ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
access_type, mmu_idx, false, true);
qemu_log_mask(CPU_LOG_MMU,
@@ -768,8 +786,8 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
}
} else {
/* Single stage lookup */
- ret = get_physical_address(env, &pa, &prot, address, access_type,
- mmu_idx, true, false);
+ ret = get_physical_address(env, &pa, &prot, address, NULL,
+ access_type, mmu_idx, true, false);
qemu_log_mask(CPU_LOG_MMU,
"%s address=%" VADDR_PRIx " ret %d physical "
@@ -852,6 +870,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
target_ulong deleg = async ? env->mideleg : env->medeleg;
+ bool write_tval = false;
target_ulong tval = 0;
target_ulong htval = 0;
target_ulong mtval2 = 0;
@@ -873,6 +892,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
case RISCV_EXCP_INST_PAGE_FAULT:
case RISCV_EXCP_LOAD_PAGE_FAULT:
case RISCV_EXCP_STORE_PAGE_FAULT:
+ write_tval = true;
tval = env->badaddr;
break;
default:
@@ -895,7 +915,13 @@ void riscv_cpu_do_interrupt(CPUState *cs)
}
trace_riscv_trap(env->mhartid, async, cause, env->pc, tval,
- riscv_cpu_get_trap_name(cause, async));
+ riscv_cpu_get_trap_name(cause, async));
+
+ qemu_log_mask(CPU_LOG_INT,
+ "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", "
+ "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n",
+ __func__, env->mhartid, async, cause, env->pc, tval,
+ riscv_cpu_get_trap_name(cause, async));
if (env->priv <= PRV_S &&
cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) {
@@ -904,7 +930,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
target_ulong hdeleg = async ? env->hideleg : env->hedeleg;
if ((riscv_cpu_virt_enabled(env) ||
- riscv_cpu_two_stage_lookup(env)) && tval) {
+ riscv_cpu_two_stage_lookup(env)) && write_tval) {
/*
* If we are writing a guest virtual address to stval, set
* this to 1. If we are trapping to VS we will set this to 0
@@ -932,7 +958,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
/* Trap into HS mode, from virt */
riscv_cpu_swap_hypervisor_regs(env);
env->hstatus = set_field(env->hstatus, HSTATUS_SPVP,
- get_field(env->mstatus, SSTATUS_SPP));
+ env->priv);
env->hstatus = set_field(env->hstatus, HSTATUS_SPV,
riscv_cpu_virt_enabled(env));
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
index 9b9ada45a9..4ce73575a7 100644
--- a/target/riscv/op_helper.c
+++ b/target/riscv/op_helper.c
@@ -29,7 +29,6 @@ void QEMU_NORETURN riscv_raise_exception(CPURISCVState *env,
uint32_t exception, uintptr_t pc)
{
CPUState *cs = env_cpu(env);
- qemu_log_mask(CPU_LOG_INT, "%s: %d\n", __func__, exception);
cs->exception_index = exception;
cpu_loop_exit_restore(cs, pc);
}
@@ -334,12 +333,12 @@ target_ulong helper_hyp_x_load(CPURISCVState *env, target_ulong address,
riscv_cpu_set_two_stage_lookup(env, true);
switch (memop) {
- case MO_TEUL:
- pte = cpu_ldub_data_ra(env, address, GETPC());
- break;
case MO_TEUW:
pte = cpu_lduw_data_ra(env, address, GETPC());
break;
+ case MO_TEUL:
+ pte = cpu_ldl_data_ra(env, address, GETPC());
+ break;
default:
g_assert_not_reached();
}
diff --git a/target/s390x/cpu_features.c b/target/s390x/cpu_features.c
index 31ea8df246..42fe0bf4ca 100644
--- a/target/s390x/cpu_features.c
+++ b/target/s390x/cpu_features.c
@@ -14,6 +14,7 @@
#include "qemu/osdep.h"
#include "qemu/module.h"
#include "cpu_features.h"
+#include "hw/s390x/pv.h"
#define DEF_FEAT(_FEAT, _NAME, _TYPE, _BIT, _DESC) \
[S390_FEAT_##_FEAT] = { \
@@ -105,6 +106,10 @@ void s390_fill_feat_block(const S390FeatBitmap features, S390FeatType type,
}
feat = find_next_bit(features, S390_FEAT_MAX, feat + 1);
}
+
+ if (type == S390_FEAT_TYPE_SCLP_FAC134 && s390_is_pv()) {
+ clear_be_bit(s390_feat_def(S390_FEAT_DIAG_318)->bit, data);
+ }
}
void s390_add_from_feat_block(S390FeatBitmap features, S390FeatType type,
diff --git a/target/s390x/cpu_features.h b/target/s390x/cpu_features.h
index ef52ffce83..87463f064d 100644
--- a/target/s390x/cpu_features.h
+++ b/target/s390x/cpu_features.h
@@ -81,6 +81,10 @@ const S390FeatGroupDef *s390_feat_group_def(S390FeatGroup group);
#define BE_BIT_NR(BIT) (BIT ^ (BITS_PER_LONG - 1))
+static inline void clear_be_bit(unsigned int bit_nr, uint8_t *array)
+{
+ array[bit_nr / 8] &= ~(0x80 >> (bit_nr % 8));
+}
static inline void set_be_bit(unsigned int bit_nr, uint8_t *array)
{
array[bit_nr / 8] |= 0x80 >> (bit_nr % 8);
diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c
index ca484bfda7..461e0b8f4a 100644
--- a/target/s390x/cpu_models.c
+++ b/target/s390x/cpu_models.c
@@ -29,6 +29,7 @@
#include "hw/pci/pci.h"
#endif
#include "qapi/qapi-commands-machine-target.h"
+#include "hw/s390x/pv.h"
#define CPUDEF_INIT(_type, _gen, _ec_ga, _mha_pow, _hmfai, _name, _desc) \
{ \
@@ -238,6 +239,9 @@ bool s390_has_feat(S390Feat feat)
}
return 0;
}
+ if (feat == S390_FEAT_DIAG_318 && s390_is_pv()) {
+ return false;
+ }
return test_bit(feat, cpu->model->features);
}
diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c
index f13eff688c..baa070fdf7 100644
--- a/target/s390x/kvm.c
+++ b/target/s390x/kvm.c
@@ -2498,8 +2498,7 @@ void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp)
*/
set_bit(S390_FEAT_EXTENDED_LENGTH_SCCB, model->features);
- /* DIAGNOSE 0x318 is not supported under protected virtualization */
- if (!s390_is_pv() && kvm_check_extension(kvm_state, KVM_CAP_S390_DIAG318)) {
+ if (kvm_check_extension(kvm_state, KVM_CAP_S390_DIAG318)) {
set_bit(S390_FEAT_DIAG_318, model->features);
}