summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorAndrew Murray2019-06-17 21:01:04 +0200
committerMarc Zyngier2019-07-05 14:56:18 +0200
commit218907cbc2b82419c70180610163c987d4764b27 (patch)
treea22012749fc9a5bf7c6c28c2de69374d5dccf45b /virt
parentKVM: arm/arm64: Re-create event when setting counter value (diff)
downloadkernel-qcow2-linux-218907cbc2b82419c70180610163c987d4764b27.tar.gz
kernel-qcow2-linux-218907cbc2b82419c70180610163c987d4764b27.tar.xz
kernel-qcow2-linux-218907cbc2b82419c70180610163c987d4764b27.zip
KVM: arm/arm64: Remove pmc->bitmask
We currently use pmc->bitmask to determine the width of the pmc - however it's superfluous as the pmc index already describes if the pmc is a cycle counter or event counter. The architecture clearly describes the widths of these counters. Let's remove the bitmask to simplify the code. Signed-off-by: Andrew Murray <andrew.murray@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/pmu.c30
1 files changed, 20 insertions, 10 deletions
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index f77643f4274c..24c6cf869a16 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -14,6 +14,18 @@
#include <kvm/arm_vgic.h>
static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
+
+/**
+ * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
+ * @vcpu: The vcpu pointer
+ * @select_idx: The counter index
+ */
+static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
+{
+ return (select_idx == ARMV8_PMU_CYCLE_IDX &&
+ __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
+}
+
/**
* kvm_pmu_get_counter_value - get PMU counter value
* @vcpu: The vcpu pointer
@@ -36,7 +48,10 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
counter += perf_event_read_value(pmc->perf_event, &enabled,
&running);
- return counter & pmc->bitmask;
+ if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
+ counter = lower_32_bits(counter);
+
+ return counter;
}
/**
@@ -102,7 +117,6 @@ void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
pmu->pmc[i].idx = i;
- pmu->pmc[i].bitmask = 0xffffffffUL;
}
}
@@ -337,8 +351,6 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
*/
void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
{
- struct kvm_pmu *pmu = &vcpu->arch.pmu;
- struct kvm_pmc *pmc;
u64 mask;
int i;
@@ -357,11 +369,6 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
kvm_pmu_set_counter_value(vcpu, i, 0);
}
-
- if (val & ARMV8_PMU_PMCR_LC) {
- pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX];
- pmc->bitmask = 0xffffffffffffffffUL;
- }
}
static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
@@ -409,7 +416,10 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
counter = kvm_pmu_get_counter_value(vcpu, select_idx);
/* The initial sample period (overflow count) of an event. */
- attr.sample_period = (-counter) & pmc->bitmask;
+ if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
+ attr.sample_period = (-counter) & GENMASK(63, 0);
+ else
+ attr.sample_period = (-counter) & GENMASK(31, 0);
event = perf_event_create_kernel_counter(&attr, -1, current,
kvm_pmu_perf_overflow, pmc);