]> git.hungrycats.org Git - linux/commitdiff
arm64: entry: Add vectors that have the bhb mitigation sequences
authorJames Morse <james.morse@arm.com>
Tue, 15 Mar 2022 18:24:08 +0000 (18:24 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 19 Mar 2022 12:40:15 +0000 (13:40 +0100)
commit ba2689234be92024e5635d30fe744f4853ad97db upstream.

Some CPUs affected by Spectre-BHB need a sequence of branches, or a
firmware call to be run before any indirect branch. This needs to go
in the vectors. No CPU needs both.

While this can be patched in, it would run on all CPUs as there is a
single set of vectors. If only one part of a big/little combination is
affected, the unaffected CPUs have to run the mitigation too.

Create extra vectors that include the sequence. Subsequent patches will
allow affected CPUs to select this set of vectors. Later patches will
modify the loop count to match what the CPU requires.

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/vectors.h [new file with mode: 0644]
arch/arm64/kernel/entry.S
include/linux/arm-smccc.h

index 4a4258f17c868f8acde2d5cfc7cdcfe325d55e82..1279e4f5bd8fa100201a6cbe36aa671cc3bc4637 100644 (file)
@@ -757,4 +757,28 @@ USER(\label, ic    ivau, \tmp2)                    // invalidate I line PoU
 .Lyield_out_\@ :
        .endm
 
+       .macro __mitigate_spectre_bhb_loop      tmp
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+       mov     \tmp, #32
+.Lspectre_bhb_loop\@:
+       b       . + 4
+       subs    \tmp, \tmp, #1
+       b.ne    .Lspectre_bhb_loop\@
+       sb
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+       .endm
+
+       /* Save/restores x0-x3 to the stack */
+       .macro __mitigate_spectre_bhb_fw
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+       stp     x0, x1, [sp, #-16]!
+       stp     x2, x3, [sp, #-16]!
+       mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_3
+alternative_cb smccc_patch_fw_mitigation_conduit
+       nop                                     // Patched to SMC/HVC #0
+alternative_cb_end
+       ldp     x2, x3, [sp], #16
+       ldp     x0, x1, [sp], #16
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+       .endm
 #endif /* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/vectors.h b/arch/arm64/include/asm/vectors.h
new file mode 100644 (file)
index 0000000..16ca742
--- /dev/null
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 ARM Ltd.
+ */
+#ifndef __ASM_VECTORS_H
+#define __ASM_VECTORS_H
+
+/*
+ * Note: the order of this enum corresponds to two arrays in entry.S:
+ * tramp_vecs and __bp_harden_el1_vectors. By default the canonical
+ * 'full fat' vectors are used directly.
+ */
+enum arm64_bp_harden_el1_vectors {
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+       /*
+        * Perform the BHB loop mitigation, before branching to the canonical
+        * vectors.
+        */
+       EL1_VECTOR_BHB_LOOP,
+
+       /*
+        * Make the SMC call for firmware mitigation, before branching to the
+        * canonical vectors.
+        */
+       EL1_VECTOR_BHB_FW,
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+
+       /*
+        * Remap the kernel before branching to the canonical vectors.
+        */
+       EL1_VECTOR_KPTI,
+};
+
+#endif /* __ASM_VECTORS_H */
index 1bc33f506bb116b5800e353e1dc734558e77df0c..14351ee5e81282adcb68ca7afa6fe6fbe775df97 100644 (file)
@@ -1063,13 +1063,26 @@ alternative_else_nop_endif
        sub     \dst, \dst, PAGE_SIZE
        .endm
 
-       .macro tramp_ventry, vector_start, regsize, kpti
+
+#define BHB_MITIGATION_NONE    0
+#define BHB_MITIGATION_LOOP    1
+#define BHB_MITIGATION_FW      2
+
+       .macro tramp_ventry, vector_start, regsize, kpti, bhb
        .align  7
 1:
        .if     \regsize == 64
        msr     tpidrro_el0, x30        // Restored in kernel_ventry
        .endif
 
+       .if     \bhb == BHB_MITIGATION_LOOP
+       /*
+        * This sequence must appear before the first indirect branch. i.e. the
+        * ret out of tramp_ventry. It appears here because x30 is free.
+        */
+       __mitigate_spectre_bhb_loop     x30
+       .endif // \bhb == BHB_MITIGATION_LOOP
+
        .if     \kpti == 1
        /*
         * Defend against branch aliasing attacks by pushing a dummy
@@ -1097,6 +1110,15 @@ alternative_else_nop_endif
        ldr     x30, =vectors
        .endif // \kpti == 1
 
+       .if     \bhb == BHB_MITIGATION_FW
+       /*
+        * The firmware sequence must appear before the first indirect branch.
+        * i.e. the ret out of tramp_ventry. But it also needs the stack to be
+        * mapped to save/restore the registers the SMC clobbers.
+        */
+       __mitigate_spectre_bhb_fw
+       .endif // \bhb == BHB_MITIGATION_FW
+
        add     x30, x30, #(1b - \vector_start + 4)
        ret
 .org 1b + 128  // Did we overflow the ventry slot?
@@ -1104,6 +1126,9 @@ alternative_else_nop_endif
 
        .macro tramp_exit, regsize = 64
        adr     x30, tramp_vectors
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+       add     x30, x30, SZ_4K
+#endif
        msr     vbar_el1, x30
        ldr     lr, [sp, #S_LR]
        tramp_unmap_kernel      x29
@@ -1115,26 +1140,32 @@ alternative_else_nop_endif
        sb
        .endm
 
-       .macro  generate_tramp_vector,  kpti
+       .macro  generate_tramp_vector,  kpti, bhb
 .Lvector_start\@:
        .space  0x400
 
        .rept   4
-       tramp_ventry    .Lvector_start\@, 64, \kpti
+       tramp_ventry    .Lvector_start\@, 64, \kpti, \bhb
        .endr
        .rept   4
-       tramp_ventry    .Lvector_start\@, 32, \kpti
+       tramp_ventry    .Lvector_start\@, 32, \kpti, \bhb
        .endr
        .endm
 
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 /*
  * Exception vectors trampoline.
+ * The order must match __bp_harden_el1_vectors and the
+ * arm64_bp_harden_el1_vectors enum.
  */
        .pushsection ".entry.tramp.text", "ax"
        .align  11
 ENTRY(tramp_vectors)
-       generate_tramp_vector   kpti=1
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+       generate_tramp_vector   kpti=1, bhb=BHB_MITIGATION_LOOP
+       generate_tramp_vector   kpti=1, bhb=BHB_MITIGATION_FW
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
+       generate_tramp_vector   kpti=1, bhb=BHB_MITIGATION_NONE
 END(tramp_vectors)
 
 ENTRY(tramp_exit_native)
@@ -1161,7 +1192,7 @@ __entry_tramp_data_start:
  * Exception vectors for spectre mitigations on entry from EL1 when
  * kpti is not in use.
  */
-       .macro generate_el1_vector
+       .macro generate_el1_vector, bhb
 .Lvector_start\@:
        kernel_ventry   1, sync_invalid                 // Synchronous EL1t
        kernel_ventry   1, irq_invalid                  // IRQ EL1t
@@ -1174,17 +1205,21 @@ __entry_tramp_data_start:
        kernel_ventry   1, error                        // Error EL1h
 
        .rept   4
-       tramp_ventry    .Lvector_start\@, 64, kpti=0
+       tramp_ventry    .Lvector_start\@, 64, 0, \bhb
        .endr
        .rept 4
-       tramp_ventry    .Lvector_start\@, 32, kpti=0
+       tramp_ventry    .Lvector_start\@, 32, 0, \bhb
        .endr
        .endm
 
+/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
        .pushsection ".entry.text", "ax"
        .align  11
 SYM_CODE_START(__bp_harden_el1_vectors)
-       generate_el1_vector
+#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
+       generate_el1_vector     bhb=BHB_MITIGATION_LOOP
+       generate_el1_vector     bhb=BHB_MITIGATION_FW
+#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
 SYM_CODE_END(__bp_harden_el1_vectors)
        .popsection
 
index 4e97ba64dbb424afad8b01afea330cdd4ae80450..3e6ef64e74d3ddaa9c6de0515cec8b832401c8e5 100644 (file)
                           ARM_SMCCC_SMC_32,                            \
                           0, 0x7fff)
 
+#define ARM_SMCCC_ARCH_WORKAROUND_3                                    \
+       ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
+                          ARM_SMCCC_SMC_32,                            \
+                          0, 0x3fff)
+
 #define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED   1
 
 #ifndef __ASSEMBLY__