]> git.hungrycats.org Git - linux/commitdiff
KVM: arm64: Save the host's PtrAuth keys in non-preemptible context
authorMarc Zyngier <maz@kernel.org>
Wed, 3 Jun 2020 17:24:01 +0000 (18:24 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 17 Jun 2020 14:42:09 +0000 (16:42 +0200)
commit ef3e40a7ea8dbe2abd0a345032cd7d5023b9684f upstream.

When using the PtrAuth feature in a guest, we need to save the host's
keys before allowing the guest to program them. For that, we dump
them in a per-CPU data structure (the so called host context).

But both call sites that do this are in preemptible context,
which may end up in disaster should the vcpu thread get preempted
before reentering the guest.

Instead, save the keys eagerly on each vcpu_load(). This has an
increased overhead, but is at least safe.

Cc: stable@vger.kernel.org
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/kvm/handle_exit.c
virt/kvm/arm/arm.c

index 3944305e81df6be1d5ebb6fc406acce08cdb1f55..b26c1aaf1e3cac2cdb6e1afacfb0d44f14c410e7 100644 (file)
@@ -367,6 +367,7 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
        }
 }
 
-static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {}
+static inline bool vcpu_has_ptrauth(struct kvm_vcpu *vcpu) { return false; }
+static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) { }
 
 #endif /* __ARM_KVM_EMULATE_H__ */
index f658dda123645f53462586cf9b4798e8349177cb..0ab02e5ff71218925a7a94eaf4eec1231bedcab0 100644 (file)
@@ -111,12 +111,6 @@ static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
        vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
 }
 
-static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu)
-{
-       if (vcpu_has_ptrauth(vcpu))
-               vcpu_ptrauth_disable(vcpu);
-}
-
 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
 {
        return vcpu->arch.vsesr_el2;
index aacfc55de44cb90cc641186bbc1512f2652a6faf..e0a4bcdb94516f45fda483c7fe98044ace2f8ae6 100644 (file)
@@ -162,31 +162,16 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
        return 1;
 }
 
-#define __ptrauth_save_key(regs, key)                                          \
-({                                                                             \
-       regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1);       \
-       regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1);       \
-})
-
 /*
  * Handle the guest trying to use a ptrauth instruction, or trying to access a
  * ptrauth register.
  */
 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
 {
-       struct kvm_cpu_context *ctxt;
-
-       if (vcpu_has_ptrauth(vcpu)) {
+       if (vcpu_has_ptrauth(vcpu))
                vcpu_ptrauth_enable(vcpu);
-               ctxt = vcpu->arch.host_cpu_context;
-               __ptrauth_save_key(ctxt->sys_regs, APIA);
-               __ptrauth_save_key(ctxt->sys_regs, APIB);
-               __ptrauth_save_key(ctxt->sys_regs, APDA);
-               __ptrauth_save_key(ctxt->sys_regs, APDB);
-               __ptrauth_save_key(ctxt->sys_regs, APGA);
-       } else {
+       else
                kvm_inject_undefined(vcpu);
-       }
 }
 
 /*
index eda7b624eab8c46250789267a2118c50affab453..0aca5514a58bd2ce4d454cd6f097629da1632a93 100644 (file)
@@ -332,6 +332,16 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
        preempt_enable();
 }
 
+#ifdef CONFIG_ARM64
+#define __ptrauth_save_key(regs, key)                                          \
+({                                                                             \
+       regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1);       \
+       regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1);       \
+})
+#else
+#define  __ptrauth_save_key(regs, key) do { } while (0)
+#endif
+
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        int *last_ran;
@@ -365,7 +375,17 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        else
                vcpu_set_wfx_traps(vcpu);
 
-       vcpu_ptrauth_setup_lazy(vcpu);
+       if (vcpu_has_ptrauth(vcpu)) {
+               struct kvm_cpu_context __maybe_unused *ctxt = vcpu->arch.host_cpu_context;
+
+               __ptrauth_save_key(ctxt->sys_regs, APIA);
+               __ptrauth_save_key(ctxt->sys_regs, APIB);
+               __ptrauth_save_key(ctxt->sys_regs, APDA);
+               __ptrauth_save_key(ctxt->sys_regs, APDB);
+               __ptrauth_save_key(ctxt->sys_regs, APGA);
+
+               vcpu_ptrauth_disable(vcpu);
+       }
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)