summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/kvm/hyp/switch.c13
-rw-r--r--virt/kvm/arm/arm.c1
-rw-r--r--virt/kvm/arm/vgic/vgic.c21
3 files changed, 26 insertions, 9 deletions
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 31badf6e91e8..86abbee40d3f 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -192,13 +192,15 @@ static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
write_sysreg(0, vttbr_el2);
}
-static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
+/* Save VGICv3 state on non-VHE systems */
+static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
{
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
__vgic_v3_save_state(vcpu);
}
-static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
+/* Restore VGICv3 state on non_VEH systems */
+static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
{
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
__vgic_v3_restore_state(vcpu);
@@ -400,8 +402,6 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
__activate_traps(vcpu);
__activate_vm(vcpu->kvm);
- __vgic_restore_state(vcpu);
-
sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu);
@@ -415,7 +415,6 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
fp_enabled = fpsimd_enabled_vhe();
sysreg_save_guest_state_vhe(guest_ctxt);
- __vgic_save_state(vcpu);
__deactivate_traps(vcpu);
@@ -451,7 +450,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
__activate_traps(vcpu);
__activate_vm(kern_hyp_va(vcpu->kvm));
- __vgic_restore_state(vcpu);
+ __hyp_vgic_restore_state(vcpu);
__timer_enable_traps(vcpu);
/*
@@ -484,7 +483,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
__sysreg_save_state_nvhe(guest_ctxt);
__sysreg32_save_state(vcpu);
__timer_disable_traps(vcpu);
- __vgic_save_state(vcpu);
+ __hyp_vgic_save_state(vcpu);
__deactivate_traps(vcpu);
__deactivate_vm(vcpu);
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 09dbee56ed8f..dba629c5f8ac 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -717,6 +717,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) ||
kvm_request_pending(vcpu)) {
vcpu->mode = OUTSIDE_GUEST_MODE;
+ isb(); /* Ensure work in x_flush_hwstate is committed */
kvm_pmu_sync_hwstate(vcpu);
if (static_branch_unlikely(&userspace_irqchip_in_use))
kvm_timer_sync_hwstate(vcpu);
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 12e2a28f437e..eaab4a616ecf 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -19,6 +19,7 @@
#include <linux/list_sort.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <asm/kvm_hyp.h>
#include "vgic.h"
@@ -749,10 +750,22 @@ next:
vgic_clear_lr(vcpu, count);
}
+static inline bool can_access_vgic_from_kernel(void)
+{
+ /*
+ * GICv2 can always be accessed from the kernel because it is
+ * memory-mapped, and VHE systems can access GICv3 EL2 system
+ * registers.
+ */
+ return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe();
+}
+
static inline void vgic_save_state(struct kvm_vcpu *vcpu)
{
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_save_state(vcpu);
+ else
+ __vgic_v3_save_state(vcpu);
}
/* Sync back the hardware VGIC state into our emulation after a guest's run. */
@@ -760,7 +773,8 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- vgic_save_state(vcpu);
+ if (can_access_vgic_from_kernel())
+ vgic_save_state(vcpu);
WARN_ON(vgic_v4_sync_hwstate(vcpu));
@@ -777,6 +791,8 @@ static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
{
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_restore_state(vcpu);
+ else
+ __vgic_v3_restore_state(vcpu);
}
/* Flush our emulation state into the GIC hardware before entering the guest. */
@@ -803,7 +819,8 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
out:
- vgic_restore_state(vcpu);
+ if (can_access_vgic_from_kernel())
+ vgic_restore_state(vcpu);
}
void kvm_vgic_load(struct kvm_vcpu *vcpu)