summaryrefslogtreecommitdiffstats
path: root/virt/kvm/arm/vgic
diff options
context:
space:
mode:
authorMarc Zyngier2019-03-13 19:07:50 +0100
committerMarc Zyngier2019-03-19 18:56:34 +0100
commitca71228b42a96908eca7658861eafacd227856c9 (patch)
treeb915c50bc72f0d568a31b392ccb89065ecbd77f1 /virt/kvm/arm/vgic
parentKVM: arm64: Reset the PMU in preemptible context (diff)
downloadkernel-qcow2-linux-ca71228b42a96908eca7658861eafacd227856c9.tar.gz
kernel-qcow2-linux-ca71228b42a96908eca7658861eafacd227856c9.tar.xz
kernel-qcow2-linux-ca71228b42a96908eca7658861eafacd227856c9.zip
arm64: KVM: Always set ICH_HCR_EL2.EN if GICv4 is enabled
The normal interrupt flow is not to enable the vgic when no virtual interrupt is to be injected (i.e. the LRs are empty). But when a guest is likely to use GICv4 for LPIs, we absolutely need to switch it on at all times. Otherwise, VLPIs only get delivered when there is something in the LRs, which doesn't happen very often. Reported-by: Nianyao Tang <tangnianyao@huawei.com> Tested-by: Shameerali Kolothum Thodi <shameerali.kolothum.thodi@huawei.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'virt/kvm/arm/vgic')
-rw-r--r--virt/kvm/arm/vgic/vgic.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index abd9c7352677..3af69f2a3866 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -867,15 +867,21 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
* either observe the new interrupt before or after doing this check,
* and introducing additional synchronization mechanism doesn't change
* this.
+ *
+ * Note that we still need to go through the whole thing if anything
+ * can be directly injected (GICv4).
*/
- if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
+ if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
+ !vgic_supports_direct_msis(vcpu->kvm))
return;
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
- raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
- vgic_flush_lr_state(vcpu);
- raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+ if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
+ raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
+ vgic_flush_lr_state(vcpu);
+ raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+ }
if (can_access_vgic_from_kernel())
vgic_restore_state(vcpu);