]> git.hungrycats.org Git - linux/commitdiff
x86/cpufeatures: Disentangle SSBD enumeration
authorThomas Gleixner <tglx@linutronix.de>
Thu, 10 May 2018 18:21:36 +0000 (20:21 +0200)
committerBen Hutchings <ben@decadent.org.uk>
Wed, 3 Oct 2018 03:09:46 +0000 (04:09 +0100)
commit 52817587e706686fcdb27f14c1b000c92f266c96 upstream.

The SSBD enumeration is similarly to the other bits magically shared
between Intel and AMD though the mechanisms are different.

Make X86_FEATURE_SSBD synthetic and set it depending on the vendor specific
features or family dependent setup.

Change the Intel bit to X86_FEATURE_SPEC_CTRL_SSBD to denote that SSBD is
controlled via MSR_SPEC_CTRL and fix up the usage sites.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
[bwh: Backported to 3.16:
 - Use the next available bit number in CPU feature word 7
 - Adjust filename, context]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
arch/x86/include/asm/cpufeature.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/process.c

index 15b08bb4596a7d7e4b5ca1303abe245589ed8fbb..e675956f5d65b22fbe469bb3be69f0fed55a113f 100644 (file)
 #define X86_FEATURE_USE_IBPB   (7*32+12) /* "" Indirect Branch Prediction Barrier enabled */
 #define X86_FEATURE_USE_IBRS_FW (7*32+13) /* "" Use IBRS during runtime firmware calls */
 #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE (7*32+14) /* "" Disable Speculative Store Bypass. */
-#define X86_FEATURE_AMD_SSBD   (7*32+15)  /* "" AMD SSBD implementation */
+#define X86_FEATURE_LS_CFG_SSBD        (7*32+15) /* "" AMD SSBD implementation */
 #define X86_FEATURE_IBRS       (7*32+16) /* Indirect Branch Restricted Speculation */
 #define X86_FEATURE_IBPB       (7*32+17) /* Indirect Branch Prediction Barrier */
 #define X86_FEATURE_STIBP      (7*32+18) /* Single Thread Indirect Branch Predictors */
 #define X86_FEATURE_MSR_SPEC_CTRL (7*32+19) /* "" MSR SPEC_CTRL is implemented */
+#define X86_FEATURE_SSBD       (7*32+20) /* Speculative Store Bypass Disable */
 
 #define X86_FEATURE_RETPOLINE  (7*32+29) /* "" Generic Retpoline mitigation for Spectre variant 2 */
 #define X86_FEATURE_RETPOLINE_AMD (7*32+30) /* "" AMD Retpoline mitigation for Spectre variant 2 */
 #define X86_FEATURE_SPEC_CTRL          (10*32+26) /* "" Speculation Control (IBRS + IBPB) */
 #define X86_FEATURE_INTEL_STIBP                (10*32+27) /* "" Single Thread Indirect Branch Predictors */
 #define X86_FEATURE_ARCH_CAPABILITIES  (10*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
-#define X86_FEATURE_SSBD               (10*32+31) /* Speculative Store Bypass Disable */
+#define X86_FEATURE_SPEC_CTRL_SSBD     (10*32+31) /* "" Speculative Store Bypass Disable */
 
 /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 11 */
 #define X86_FEATURE_AMD_IBPB           (11*32+12) /* Indirect Branch Prediction Barrier */
index 5864c706b062fab8f3378f0c9a356b871bef91ec..e6a095f41bc2b5c0505801c60100306e158aa0fc 100644 (file)
@@ -486,8 +486,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
                 * avoid RMW. If that faults, do not enable SSBD.
                 */
                if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
+                       setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
                        setup_force_cpu_cap(X86_FEATURE_SSBD);
-                       setup_force_cpu_cap(X86_FEATURE_AMD_SSBD);
                        x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
                }
        }
@@ -801,11 +801,6 @@ static void init_amd(struct cpuinfo_x86 *c)
                set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
 
        rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
-
-       if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) {
-               set_cpu_cap(c, X86_FEATURE_SSBD);
-               set_cpu_cap(c, X86_FEATURE_AMD_SSBD);
-       }
 }
 
 #ifdef CONFIG_X86_32
index 1c67a79323840affaeb8630b185f3a3e4efaf8a7..43a084002e1f58e0dc1b0a94b046a712a2f3bc0d 100644 (file)
@@ -220,8 +220,8 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
        if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
                return;
 
-       /* Intel controls SSB in MSR_SPEC_CTRL */
-       if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
+       /* SSBD controlled in MSR_SPEC_CTRL */
+       if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
                host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
 
        if (host != guest_spec_ctrl)
@@ -237,8 +237,8 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
        if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
                return;
 
-       /* Intel controls SSB in MSR_SPEC_CTRL */
-       if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
+       /* SSBD controlled in MSR_SPEC_CTRL */
+       if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
                host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
 
        if (host != guest_spec_ctrl)
@@ -250,7 +250,7 @@ static void x86_amd_ssb_disable(void)
 {
        u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
 
-       if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
+       if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
                wrmsrl(MSR_AMD64_LS_CFG, msrval);
 }
 
index 2664a68644705b002c1f88f0ce6dcaf2a3226983..aaebf24e494672cb919b681b21ba785ea446ccd1 100644 (file)
@@ -700,6 +700,9 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
        if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
                set_cpu_cap(c, X86_FEATURE_STIBP);
 
+       if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD))
+               set_cpu_cap(c, X86_FEATURE_SSBD);
+
        if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
                set_cpu_cap(c, X86_FEATURE_IBRS);
                set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
index 15df9da98c58a2e2c737d0dd2a84d5c5ad596afd..0d41977fb6ada06a8312c7a592ebfb8e78673a43 100644 (file)
@@ -122,6 +122,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
                setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
                setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
                setup_clear_cpu_cap(X86_FEATURE_SSBD);
+               setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
        }
 
        /*
index 0b3adbbc2e2b393a305280d1f57aaec8354b3be7..bbc39a88c3c49cf03374c61203c644579664bac4 100644 (file)
@@ -221,7 +221,7 @@ static __always_inline void __speculative_store_bypass_update(unsigned long tifn
 {
        u64 msr;
 
-       if (static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+       if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
                msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
                wrmsrl(MSR_AMD64_LS_CFG, msr);
        } else {