summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/sys_regs.c
diff options
context:
space:
mode:
authorShannon Zhao2015-09-08 09:15:56 +0200
committerMarc Zyngier2016-02-29 19:34:21 +0100
commitd692b8ad6ec4814ddd9a37ce5c9c9d971e741088 (patch)
tree1162196e1dde3244581e90b669681b062731921b /arch/arm64/kvm/sys_regs.c
parentarm64: KVM: Add helper to handle PMCR register bits (diff)
downloadkernel-qcow2-linux-d692b8ad6ec4814ddd9a37ce5c9c9d971e741088.tar.gz
kernel-qcow2-linux-d692b8ad6ec4814ddd9a37ce5c9c9d971e741088.tar.xz
kernel-qcow2-linux-d692b8ad6ec4814ddd9a37ce5c9c9d971e741088.zip
arm64: KVM: Add access handler for PMUSERENR register
This register resets as unknown in 64bit mode while it resets as zero in 32bit mode. Here we choose to reset it as zero for consistency. PMUSERENR_EL0 holds some bits which decide whether PMU registers can be accessed from EL0. Add some check helpers to handle the access from EL0. When these bits are zero, only reading PMUSERENR will trap to EL2 and writing PMUSERENR or reading/writing other PMU registers will trap to EL1 other than EL2 when HCR.TGE==0. To current KVM configuration (HCR.TGE==0) there is no way to get these traps. Here we write 0xf to physical PMUSERENR register on VM entry, so that it will trap PMU access from EL0 to EL2. Within the register access handler we check the real value of guest PMUSERENR register to decide whether this access is allowed. If not allowed, return false to inject UND to guest. Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch/arm64/kvm/sys_regs.c')
-rw-r--r--arch/arm64/kvm/sys_regs.c101
1 files changed, 96 insertions, 5 deletions
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 12f36ef8caa0..fe15c2310a65 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -453,6 +453,37 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
vcpu_sys_reg(vcpu, PMCR_EL0) = val;
}
+static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
+{
+ u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
+
+ return !((reg & ARMV8_PMU_USERENR_EN) || vcpu_mode_priv(vcpu));
+}
+
+static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
+{
+ u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
+
+ return !((reg & (ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN))
+ || vcpu_mode_priv(vcpu));
+}
+
+static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
+{
+ u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
+
+ return !((reg & (ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN))
+ || vcpu_mode_priv(vcpu));
+}
+
+static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
+{
+ u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
+
+ return !((reg & (ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN))
+ || vcpu_mode_priv(vcpu));
+}
+
static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
@@ -461,6 +492,9 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (!kvm_arm_pmu_v3_ready(vcpu))
return trap_raz_wi(vcpu, p, r);
+ if (pmu_access_el0_disabled(vcpu))
+ return false;
+
if (p->is_write) {
/* Only update writeable bits of PMCR */
val = vcpu_sys_reg(vcpu, PMCR_EL0);
@@ -484,6 +518,9 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (!kvm_arm_pmu_v3_ready(vcpu))
return trap_raz_wi(vcpu, p, r);
+ if (pmu_access_event_counter_el0_disabled(vcpu))
+ return false;
+
if (p->is_write)
vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
else
@@ -504,6 +541,9 @@ static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
BUG_ON(p->is_write);
+ if (pmu_access_el0_disabled(vcpu))
+ return false;
+
if (!(p->Op2 & 1))
asm volatile("mrs %0, pmceid0_el0\n" : "=r" (pmceid));
else
@@ -538,16 +578,25 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
if (r->CRn == 9 && r->CRm == 13) {
if (r->Op2 == 2) {
/* PMXEVCNTR_EL0 */
+ if (pmu_access_event_counter_el0_disabled(vcpu))
+ return false;
+
idx = vcpu_sys_reg(vcpu, PMSELR_EL0)
& ARMV8_PMU_COUNTER_MASK;
} else if (r->Op2 == 0) {
/* PMCCNTR_EL0 */
+ if (pmu_access_cycle_counter_el0_disabled(vcpu))
+ return false;
+
idx = ARMV8_PMU_CYCLE_IDX;
} else {
BUG();
}
} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
/* PMEVCNTRn_EL0 */
+ if (pmu_access_event_counter_el0_disabled(vcpu))
+ return false;
+
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
} else {
BUG();
@@ -556,10 +605,14 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
if (!pmu_counter_idx_valid(vcpu, idx))
return false;
- if (p->is_write)
+ if (p->is_write) {
+ if (pmu_access_el0_disabled(vcpu))
+ return false;
+
kvm_pmu_set_counter_value(vcpu, idx, p->regval);
- else
+ } else {
p->regval = kvm_pmu_get_counter_value(vcpu, idx);
+ }
return true;
}
@@ -572,6 +625,9 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (!kvm_arm_pmu_v3_ready(vcpu))
return trap_raz_wi(vcpu, p, r);
+ if (pmu_access_el0_disabled(vcpu))
+ return false;
+
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
/* PMXEVTYPER_EL0 */
idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
@@ -608,6 +664,9 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (!kvm_arm_pmu_v3_ready(vcpu))
return trap_raz_wi(vcpu, p, r);
+ if (pmu_access_el0_disabled(vcpu))
+ return false;
+
mask = kvm_pmu_valid_counter_mask(vcpu);
if (p->is_write) {
val = p->regval & mask;
@@ -635,6 +694,9 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (!kvm_arm_pmu_v3_ready(vcpu))
return trap_raz_wi(vcpu, p, r);
+ if (!vcpu_mode_priv(vcpu))
+ return false;
+
if (p->is_write) {
u64 val = p->regval & mask;
@@ -659,6 +721,9 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (!kvm_arm_pmu_v3_ready(vcpu))
return trap_raz_wi(vcpu, p, r);
+ if (pmu_access_el0_disabled(vcpu))
+ return false;
+
if (p->is_write) {
if (r->CRm & 0x2)
/* accessing PMOVSSET_EL0 */
@@ -681,6 +746,9 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (!kvm_arm_pmu_v3_ready(vcpu))
return trap_raz_wi(vcpu, p, r);
+ if (pmu_write_swinc_el0_disabled(vcpu))
+ return false;
+
if (p->is_write) {
mask = kvm_pmu_valid_counter_mask(vcpu);
kvm_pmu_software_increment(vcpu, p->regval & mask);
@@ -690,6 +758,26 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return false;
}
+static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (!kvm_arm_pmu_v3_ready(vcpu))
+ return trap_raz_wi(vcpu, p, r);
+
+ if (p->is_write) {
+ if (!vcpu_mode_priv(vcpu))
+ return false;
+
+ vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval
+ & ARMV8_PMU_USERENR_MASK;
+ } else {
+ p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0)
+ & ARMV8_PMU_USERENR_MASK;
+ }
+
+ return true;
+}
+
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
/* DBGBVRn_EL1 */ \
@@ -919,9 +1007,12 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* PMXEVCNTR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
access_pmu_evcntr },
- /* PMUSERENR_EL0 */
+ /* PMUSERENR_EL0
+ * This register resets as unknown in 64bit mode while it resets as zero
+ * in 32bit mode. Here we choose to reset it as zero for consistency.
+ */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
- trap_raz_wi },
+ access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
/* PMOVSSET_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
access_pmovs, reset_unknown, PMOVSSET_EL0 },
@@ -1246,7 +1337,7 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
{ Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
{ Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
- { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
{ Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
{ Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
{ Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },