]> www.infradead.org Git - users/hch/misc.git/commitdiff
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git
authorStephen Rothwell <sfr@canb.auug.org.au>
Tue, 13 May 2025 06:55:10 +0000 (16:55 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 13 May 2025 06:55:10 +0000 (16:55 +1000)
# Conflicts:
# Documentation/admin-guide/hw-vuln/index.rst
# arch/loongarch/kernel/entry.S
# arch/x86/Kconfig.assembler
# arch/x86/include/asm/cpufeatures.h
# arch/x86/kernel/alternative.c
# arch/x86/kernel/cpu/bugs.c
# arch/x86/kernel/cpu/common.c
# drivers/base/cpu.c
# drivers/cpufreq/amd-pstate.c
# drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
# include/linux/cpu.h

85 files changed:
1  2 
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/admin-guide/hw-vuln/index.rst
Documentation/admin-guide/kernel-parameters.txt
MAINTAINERS
arch/arm/common/sa1111.c
arch/arm/plat-orion/gpio.c
arch/loongarch/kernel/entry.S
arch/powerpc/platforms/52xx/mpc52xx_gpt.c
arch/powerpc/platforms/pseries/msi.c
arch/powerpc/sysdev/mpic.c
arch/riscv/kernel/entry.S
arch/x86/Kconfig
arch/x86/Kconfig.assembler
arch/x86/include/asm/alternative.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/nospec-branch.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/setup.h
arch/x86/kernel/alternative.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/machine_kexec_64.c
arch/x86/kernel/module.c
arch/x86/kernel/setup.c
arch/x86/kernel/static_call.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/kvm/svm/sev.c
arch/x86/kvm/x86.c
arch/x86/lib/retpoline.S
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/mm/pat/set_memory.c
arch/x86/mm/pgtable.c
arch/x86/net/bpf_jit_comp.c
arch/x86/realmode/init.c
drivers/base/cpu.c
drivers/base/power/main.c
drivers/char/random.c
drivers/cpufreq/amd-pstate-ut.c
drivers/cpufreq/amd-pstate.c
drivers/cpufreq/intel_pstate.c
drivers/crypto/ccp/sev-dev.c
drivers/firmware/efi/libstub/Makefile
drivers/gpu/drm/msm/msm_mdss.c
drivers/gpu/drm/ttm/tests/ttm_bo_test.c
drivers/hwmon/k10temp.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-piix4.c
drivers/idle/intel_idle.c
drivers/memory/omap-gpmc.c
drivers/net/dsa/microchip/ksz_common.c
drivers/net/dsa/microchip/ksz_ptp.c
drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
drivers/net/usb/lan78xx.c
drivers/pci/controller/dwc/pcie-dw-rockchip.c
drivers/pci/controller/pci-mvebu.c
drivers/pci/pci.h
drivers/platform/x86/amd/hsmp/acpi.c
drivers/platform/x86/amd/hsmp/plat.c
drivers/platform/x86/amd/pmc/pmc-quirks.c
drivers/soc/fsl/qe/qe_ic.c
drivers/soc/qcom/smp2p.c
drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
include/linux/cpu.h
include/linux/mm_types.h
include/linux/mmap_lock.h
include/linux/sched.h
include/linux/vmalloc.h
init/Kconfig
init/main.c
kernel/fork.c
kernel/sched/core.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/sched.h
kernel/sched/topology.c
mm/nommu.c
mm/vmalloc.c
net/bridge/br_multicast.c
sound/soc/sof/amd/acp.c
tools/objtool/arch/x86/decode.c
tools/perf/util/amd-sample-raw.c
tools/testing/selftests/bpf/bench.c

index ce296b8430fc987358329d7697cd1990e4d10892,cf1511145927901d1a0f57ea714119f21d82d382..c2cc1ceed3d61b85641d119132fe327520cb2ef7
@@@ -23,4 -23,4 +23,5 @@@ are configurable at compile, boot or ru
     gather_data_sampling
     reg-file-data-sampling
     rsb
 +   indirect-target-selection
+    old_microcode
diff --cc MAINTAINERS
Simple merge
Simple merge
Simple merge
index 77f6fb9146a24875bc8d0d6bb5d32c6beeab748d,2abc29e573810e000f2fef4646ddca0dbb80eabe..47e1db9a1ce47b7835c3acfc9e253d806d349da4
@@@ -78,25 -77,21 +78,23 @@@ SYM_CODE_START(handle_syscall
  SYM_CODE_END(handle_syscall)
  _ASM_NOKPROBE(handle_syscall)
  
- SYM_CODE_START(ret_from_fork)
+ SYM_CODE_START(ret_from_fork_asm)
        UNWIND_HINT_REGS
-       bl              schedule_tail           # a0 = struct task_struct *prev
-       move            a0, sp
-       bl              syscall_exit_to_user_mode
+       move            a1, sp
+       bl              ret_from_fork
 +      STACKLEAK_ERASE
        RESTORE_STATIC
        RESTORE_SOME
        RESTORE_SP_AND_RET
- SYM_CODE_END(ret_from_fork)
+ SYM_CODE_END(ret_from_fork_asm)
  
- SYM_CODE_START(ret_from_kernel_thread)
+ SYM_CODE_START(ret_from_kernel_thread_asm)
        UNWIND_HINT_REGS
-       bl              schedule_tail           # a0 = struct task_struct *prev
-       move            a0, s1
-       jirl            ra, s0, 0
-       move            a0, sp
-       bl              syscall_exit_to_user_mode
+       move            a1, sp
+       move            a2, s0
+       move            a3, s1
+       bl              ret_from_kernel_thread
 +      STACKLEAK_ERASE
        RESTORE_STATIC
        RESTORE_SOME
        RESTORE_SP_AND_RET
Simple merge
Simple merge
Simple merge
Simple merge
index 4d06fd3c8dfedac069ece51275ecd46bf3fcad6e,fa8858546d5e68aaa8c4f126d4539a2612b9f7d4..c827f694fb72d72d785992e6606f87d59fcf2751
@@@ -6,11 -6,16 +6,6 @@@ config AS_AVX51
        help
          Supported by binutils >= 2.25 and LLVM integrated assembler
  
- config AS_TPAUSE
-       def_bool $(as-instr,tpause %ecx)
 -config AS_SHA1_NI
 -      def_bool $(as-instr,sha1msg1 %xmm0$(comma)%xmm1)
--      help
-         Supported by binutils >= 2.31.1 and LLVM integrated assembler >= V7
 -        Supported by binutils >= 2.24 and LLVM integrated assembler
 -
 -config AS_SHA256_NI
 -      def_bool $(as-instr,sha256msg1 %xmm0$(comma)%xmm1)
 -      help
 -        Supported by binutils >= 2.24 and LLVM integrated assembler
--
  config AS_GFNI
        def_bool $(as-instr,vgf2p8mulb %xmm0$(comma)%xmm1$(comma)%xmm2)
        help
Simple merge
index 39e61212ac9a916dbb2ceb1b0b95e8d71303c547,7642310276a8fdf905261534262d2ca8078a2d6c..2fda5f6f49daf55dcf16fdaba295f56324e1d049
  #define X86_FEATURE_CLEAR_BHB_LOOP    (21*32+ 1) /* Clear branch history at syscall entry using SW loop */
  #define X86_FEATURE_BHI_CTRL          (21*32+ 2) /* BHI_DIS_S HW control available */
  #define X86_FEATURE_CLEAR_BHB_HW      (21*32+ 3) /* BHI_DIS_S HW control enabled */
- #define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* Clear branch history at vmexit using SW loop */
- #define X86_FEATURE_AMD_FAST_CPPC     (21*32 + 5) /* Fast CPPC */
- #define X86_FEATURE_AMD_HETEROGENEOUS_CORES (21*32 + 6) /* Heterogeneous Core Topology */
- #define X86_FEATURE_AMD_WORKLOAD_CLASS        (21*32 + 7) /* Workload Classification */
- #define X86_FEATURE_PREFER_YMM                (21*32 + 8) /* Avoid ZMM registers due to downclocking */
- #define X86_FEATURE_INDIRECT_THUNK_ITS        (21*32 + 9) /* Use thunk for indirect branches in lower half of cacheline */
+ #define X86_FEATURE_CLEAR_BHB_VMEXIT  (21*32+ 4) /* Clear branch history at vmexit using SW loop */
+ #define X86_FEATURE_AMD_FAST_CPPC     (21*32+ 5) /* Fast CPPC */
+ #define X86_FEATURE_AMD_HTR_CORES     (21*32+ 6) /* Heterogeneous Core Topology */
+ #define X86_FEATURE_AMD_WORKLOAD_CLASS        (21*32+ 7) /* Workload Classification */
+ #define X86_FEATURE_PREFER_YMM                (21*32+ 8) /* Avoid ZMM registers due to downclocking */
 -#define X86_FEATURE_APX                       (21*32+ 9) /* Advanced Performance Extensions */
++#define X86_FEATURE_INDIRECT_THUNK_ITS        (21*32+ 9) /* Use thunk for indirect branches in lower half of cacheline */
++#define X86_FEATURE_APX                       (21*32+10) /* Advanced Performance Extensions */
  
  /*
   * BUG word(s)
  #define X86_BUG_TDX_PW_MCE            X86_BUG(31) /* "tdx_pw_mce" CPU may incur #MC if non-TD software does partial write to TDX private memory */
  
  /* BUG word 2 */
- #define X86_BUG_SRSO                  X86_BUG(1*32 + 0) /* "srso" AMD SRSO bug */
- #define X86_BUG_DIV0                  X86_BUG(1*32 + 1) /* "div0" AMD DIV0 speculation bug */
- #define X86_BUG_RFDS                  X86_BUG(1*32 + 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */
- #define X86_BUG_BHI                   X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */
- #define X86_BUG_IBPB_NO_RET           X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
- #define X86_BUG_SPECTRE_V2_USER               X86_BUG(1*32 + 5) /* "spectre_v2_user" CPU is affected by Spectre variant 2 attack between user processes */
- #define X86_BUG_ITS                   X86_BUG(1*32 + 6) /* "its" CPU is affected by Indirect Target Selection */
- #define X86_BUG_ITS_NATIVE_ONLY               X86_BUG(1*32 + 7) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
+ #define X86_BUG_SRSO                  X86_BUG( 1*32+ 0) /* "srso" AMD SRSO bug */
+ #define X86_BUG_DIV0                  X86_BUG( 1*32+ 1) /* "div0" AMD DIV0 speculation bug */
+ #define X86_BUG_RFDS                  X86_BUG( 1*32+ 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */
+ #define X86_BUG_BHI                   X86_BUG( 1*32+ 3) /* "bhi" CPU is affected by Branch History Injection */
+ #define X86_BUG_IBPB_NO_RET           X86_BUG( 1*32+ 4) /* "ibpb_no_ret" IBPB omits return target predictions */
+ #define X86_BUG_SPECTRE_V2_USER               X86_BUG( 1*32+ 5) /* "spectre_v2_user" CPU is affected by Spectre variant 2 attack between user processes */
 -#define X86_BUG_OLD_MICROCODE         X86_BUG( 1*32+ 6) /* "old_microcode" CPU has old microcode, it is surely vulnerable to something */
++#define X86_BUG_ITS                   X86_BUG( 1*32+ 6) /* "its" CPU is affected by Indirect Target Selection */
++#define X86_BUG_ITS_NATIVE_ONLY               X86_BUG( 1*32+ 7) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
++#define X86_BUG_OLD_MICROCODE         X86_BUG( 1*32+ 8) /* "old_microcode" CPU has old microcode, it is surely vulnerable to something */
  #endif /* _ASM_X86_CPUFEATURES_H */
Simple merge
Simple merge
Simple merge
Simple merge
index 48fd04e9011483cfa180197e0bbaed3c46455637,ddbc303e41e367afe59c5d159c748c541060a778..a9a32d991832f33a4f0bbd52a8131824cdde98ac
@@@ -1,39 -1,14 +1,17 @@@
  // SPDX-License-Identifier: GPL-2.0-only
  #define pr_fmt(fmt) "SMP alternatives: " fmt
  
- #include <linux/module.h>
- #include <linux/sched.h>
+ #include <linux/mmu_context.h>
  #include <linux/perf_event.h>
- #include <linux/mutex.h>
- #include <linux/list.h>
- #include <linux/stringify.h>
- #include <linux/highmem.h>
- #include <linux/mm.h>
  #include <linux/vmalloc.h>
  #include <linux/memory.h>
- #include <linux/stop_machine.h>
- #include <linux/slab.h>
- #include <linux/kdebug.h>
- #include <linux/kprobes.h>
- #include <linux/mmu_context.h>
- #include <linux/bsearch.h>
- #include <linux/sync_core.h>
 +#include <linux/execmem.h>
  #include <asm/text-patching.h>
- #include <asm/alternative.h>
- #include <asm/sections.h>
- #include <asm/mce.h>
- #include <asm/nmi.h>
- #include <asm/cacheflush.h>
- #include <asm/tlbflush.h>
  #include <asm/insn.h>
- #include <asm/io.h>
- #include <asm/fixmap.h>
- #include <asm/paravirt.h>
- #include <asm/asm-prototypes.h>
- #include <asm/cfi.h>
+ #include <asm/nmi.h>
 +#include <asm/ibt.h>
 +#include <asm/set_memory.h>
  
  int __read_mostly alternatives_patched;
  
index 8596ce85026c0dd8aeb79871350f71a1e2c9b113,a938fb4add658b98f9d2847ee962be19b0d9dc69..f0505c8de0586643e3de64f8267b1b0300c153e7
  
  #include "cpu.h"
  
+ /*
+  * Speculation Vulnerability Handling
+  *
+  * Each vulnerability is handled with the following functions:
+  *   <vuln>_select_mitigation() -- Selects a mitigation to use.  This should
+  *                               take into account all relevant command line
+  *                               options.
+  *   <vuln>_update_mitigation() -- This is called after all vulnerabilities have
+  *                               selected a mitigation, in case the selection
+  *                               may want to change based on other choices
+  *                               made.  This function is optional.
+  *   <vuln>_apply_mitigation() -- Enable the selected mitigation.
+  *
+  * The compile-time mitigation in all cases should be AUTO.  An explicit
+  * command-line option can override AUTO.  If no such option is
+  * provided, <vuln>_select_mitigation() will override AUTO to the best
+  * mitigation option.
+  */
  static void __init spectre_v1_select_mitigation(void);
+ static void __init spectre_v1_apply_mitigation(void);
  static void __init spectre_v2_select_mitigation(void);
+ static void __init spectre_v2_update_mitigation(void);
+ static void __init spectre_v2_apply_mitigation(void);
  static void __init retbleed_select_mitigation(void);
+ static void __init retbleed_update_mitigation(void);
+ static void __init retbleed_apply_mitigation(void);
  static void __init spectre_v2_user_select_mitigation(void);
+ static void __init spectre_v2_user_update_mitigation(void);
+ static void __init spectre_v2_user_apply_mitigation(void);
  static void __init ssb_select_mitigation(void);
+ static void __init ssb_apply_mitigation(void);
  static void __init l1tf_select_mitigation(void);
+ static void __init l1tf_apply_mitigation(void);
  static void __init mds_select_mitigation(void);
- static void __init md_clear_update_mitigation(void);
- static void __init md_clear_select_mitigation(void);
+ static void __init mds_update_mitigation(void);
+ static void __init mds_apply_mitigation(void);
  static void __init taa_select_mitigation(void);
+ static void __init taa_update_mitigation(void);
+ static void __init taa_apply_mitigation(void);
  static void __init mmio_select_mitigation(void);
+ static void __init mmio_update_mitigation(void);
+ static void __init mmio_apply_mitigation(void);
+ static void __init rfds_select_mitigation(void);
+ static void __init rfds_update_mitigation(void);
+ static void __init rfds_apply_mitigation(void);
  static void __init srbds_select_mitigation(void);
+ static void __init srbds_apply_mitigation(void);
  static void __init l1d_flush_select_mitigation(void);
  static void __init srso_select_mitigation(void);
+ static void __init srso_update_mitigation(void);
+ static void __init srso_apply_mitigation(void);
  static void __init gds_select_mitigation(void);
+ static void __init gds_apply_mitigation(void);
 +static void __init its_select_mitigation(void);
+ static void __init bhi_select_mitigation(void);
+ static void __init bhi_update_mitigation(void);
+ static void __init bhi_apply_mitigation(void);
  
  /* The base value of the SPEC_CTRL MSR without task-specific bits set */
  u64 x86_spec_ctrl_base;
@@@ -172,22 -204,56 +213,57 @@@ void __init cpu_select_mitigations(void
        spectre_v2_user_select_mitigation();
        ssb_select_mitigation();
        l1tf_select_mitigation();
-       md_clear_select_mitigation();
+       mds_select_mitigation();
+       taa_select_mitigation();
+       mmio_select_mitigation();
+       rfds_select_mitigation();
        srbds_select_mitigation();
        l1d_flush_select_mitigation();
-       /*
-        * srso_select_mitigation() depends and must run after
-        * retbleed_select_mitigation().
-        */
        srso_select_mitigation();
        gds_select_mitigation();
+       bhi_select_mitigation();
 +      its_select_mitigation();
+       /*
+        * After mitigations are selected, some may need to update their
+        * choices.
+        */
+       spectre_v2_update_mitigation();
+       /*
+        * retbleed_update_mitigation() relies on the state set by
+        * spectre_v2_update_mitigation(); specifically it wants to know about
+        * spectre_v2=ibrs.
+        */
+       retbleed_update_mitigation();
+       /*
+        * spectre_v2_user_update_mitigation() depends on
+        * retbleed_update_mitigation(), specifically the STIBP
+        * selection is forced for UNRET or IBPB.
+        */
+       spectre_v2_user_update_mitigation();
+       mds_update_mitigation();
+       taa_update_mitigation();
+       mmio_update_mitigation();
+       rfds_update_mitigation();
+       bhi_update_mitigation();
+       /* srso_update_mitigation() depends on retbleed_update_mitigation(). */
+       srso_update_mitigation();
+       spectre_v1_apply_mitigation();
+       spectre_v2_apply_mitigation();
+       retbleed_apply_mitigation();
+       spectre_v2_user_apply_mitigation();
+       ssb_apply_mitigation();
+       l1tf_apply_mitigation();
+       mds_apply_mitigation();
+       taa_apply_mitigation();
+       mmio_apply_mitigation();
+       rfds_apply_mitigation();
+       srbds_apply_mitigation();
+       srso_apply_mitigation();
+       gds_apply_mitigation();
+       bhi_apply_mitigation();
  }
  
  /*
@@@ -1173,169 -1323,8 +1333,147 @@@ static void __init retbleed_apply_mitig
        if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
            (retbleed_nosmt || cpu_mitigations_auto_nosmt()))
                cpu_smt_disable(false);
-       /*
-        * Let IBRS trump all on Intel without affecting the effects of the
-        * retbleed= cmdline option except for call depth based stuffing
-        */
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
-               switch (spectre_v2_enabled) {
-               case SPECTRE_V2_IBRS:
-                       retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
-                       break;
-               case SPECTRE_V2_EIBRS:
-               case SPECTRE_V2_EIBRS_RETPOLINE:
-               case SPECTRE_V2_EIBRS_LFENCE:
-                       retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
-                       break;
-               default:
-                       if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
-                               pr_err(RETBLEED_INTEL_MSG);
-               }
-       }
-       pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
  }
  
 +#undef pr_fmt
 +#define pr_fmt(fmt)     "ITS: " fmt
 +
 +enum its_mitigation_cmd {
 +      ITS_CMD_OFF,
 +      ITS_CMD_ON,
 +      ITS_CMD_VMEXIT,
 +      ITS_CMD_RSB_STUFF,
 +};
 +
 +enum its_mitigation {
 +      ITS_MITIGATION_OFF,
 +      ITS_MITIGATION_VMEXIT_ONLY,
 +      ITS_MITIGATION_ALIGNED_THUNKS,
 +      ITS_MITIGATION_RETPOLINE_STUFF,
 +};
 +
 +static const char * const its_strings[] = {
 +      [ITS_MITIGATION_OFF]                    = "Vulnerable",
 +      [ITS_MITIGATION_VMEXIT_ONLY]            = "Mitigation: Vulnerable, KVM: Not affected",
 +      [ITS_MITIGATION_ALIGNED_THUNKS]         = "Mitigation: Aligned branch/return thunks",
 +      [ITS_MITIGATION_RETPOLINE_STUFF]        = "Mitigation: Retpolines, Stuffing RSB",
 +};
 +
 +static enum its_mitigation its_mitigation __ro_after_init = ITS_MITIGATION_ALIGNED_THUNKS;
 +
 +static enum its_mitigation_cmd its_cmd __ro_after_init =
 +      IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_CMD_ON : ITS_CMD_OFF;
 +
 +static int __init its_parse_cmdline(char *str)
 +{
 +      if (!str)
 +              return -EINVAL;
 +
 +      if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) {
 +              pr_err("Mitigation disabled at compile time, ignoring option (%s)", str);
 +              return 0;
 +      }
 +
 +      if (!strcmp(str, "off")) {
 +              its_cmd = ITS_CMD_OFF;
 +      } else if (!strcmp(str, "on")) {
 +              its_cmd = ITS_CMD_ON;
 +      } else if (!strcmp(str, "force")) {
 +              its_cmd = ITS_CMD_ON;
 +              setup_force_cpu_bug(X86_BUG_ITS);
 +      } else if (!strcmp(str, "vmexit")) {
 +              its_cmd = ITS_CMD_VMEXIT;
 +      } else if (!strcmp(str, "stuff")) {
 +              its_cmd = ITS_CMD_RSB_STUFF;
 +      } else {
 +              pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
 +      }
 +
 +      return 0;
 +}
 +early_param("indirect_target_selection", its_parse_cmdline);
 +
 +static void __init its_select_mitigation(void)
 +{
 +      enum its_mitigation_cmd cmd = its_cmd;
 +
 +      if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) {
 +              its_mitigation = ITS_MITIGATION_OFF;
 +              return;
 +      }
 +
 +      /* Retpoline+CDT mitigates ITS, bail out */
 +      if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
 +          boot_cpu_has(X86_FEATURE_CALL_DEPTH)) {
 +              its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
 +              goto out;
 +      }
 +
 +      /* Exit early to avoid irrelevant warnings */
 +      if (cmd == ITS_CMD_OFF) {
 +              its_mitigation = ITS_MITIGATION_OFF;
 +              goto out;
 +      }
 +      if (spectre_v2_enabled == SPECTRE_V2_NONE) {
 +              pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n");
 +              its_mitigation = ITS_MITIGATION_OFF;
 +              goto out;
 +      }
 +      if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ||
 +          !IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) {
 +              pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n");
 +              its_mitigation = ITS_MITIGATION_OFF;
 +              goto out;
 +      }
 +      if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) {
 +              pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n");
 +              its_mitigation = ITS_MITIGATION_OFF;
 +              goto out;
 +      }
 +      if (boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
 +              pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n");
 +              its_mitigation = ITS_MITIGATION_OFF;
 +              goto out;
 +      }
 +
 +      if (cmd == ITS_CMD_RSB_STUFF &&
 +          (!boot_cpu_has(X86_FEATURE_RETPOLINE) || !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING))) {
 +              pr_err("RSB stuff mitigation not supported, using default\n");
 +              cmd = ITS_CMD_ON;
 +      }
 +
 +      switch (cmd) {
 +      case ITS_CMD_OFF:
 +              its_mitigation = ITS_MITIGATION_OFF;
 +              break;
 +      case ITS_CMD_VMEXIT:
 +              if (boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY)) {
 +                      its_mitigation = ITS_MITIGATION_VMEXIT_ONLY;
 +                      goto out;
 +              }
 +              fallthrough;
 +      case ITS_CMD_ON:
 +              its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
 +              if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
 +                      setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
 +              setup_force_cpu_cap(X86_FEATURE_RETHUNK);
 +              set_return_thunk(its_return_thunk);
 +              break;
 +      case ITS_CMD_RSB_STUFF:
 +              its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
 +              setup_force_cpu_cap(X86_FEATURE_RETHUNK);
 +              setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
 +              set_return_thunk(call_depth_return_thunk);
 +              if (retbleed_mitigation == RETBLEED_MITIGATION_NONE) {
 +                      retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
 +                      pr_info("Retbleed mitigation updated to stuffing\n");
 +              }
 +              break;
 +      }
 +out:
 +      pr_info("%s\n", its_strings[its_mitigation]);
 +}
 +
  #undef pr_fmt
  #define pr_fmt(fmt)     "Spectre V2 : " fmt
  
@@@ -2833,8 -2806,52 +2955,52 @@@ static void __init srso_apply_mitigatio
        if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE)
                setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE);
  
-       if (srso_mitigation != SRSO_MITIGATION_NONE)
-               pr_info("%s\n", srso_strings[srso_mitigation]);
+       if (srso_mitigation == SRSO_MITIGATION_NONE) {
+               if (boot_cpu_has(X86_FEATURE_SBPB))
+                       x86_pred_cmd = PRED_CMD_SBPB;
+               return;
+       }
+       switch (srso_mitigation) {
+       case SRSO_MITIGATION_SAFE_RET:
+       case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED:
+               /*
+                * Enable the return thunk for generated code
+                * like ftrace, static_call, etc.
+                */
+               setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+               setup_force_cpu_cap(X86_FEATURE_UNRET);
+               if (boot_cpu_data.x86 == 0x19) {
+                       setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
 -                      x86_return_thunk = srso_alias_return_thunk;
++                      set_return_thunk(srso_alias_return_thunk);
+               } else {
+                       setup_force_cpu_cap(X86_FEATURE_SRSO);
 -                      x86_return_thunk = srso_return_thunk;
++                      set_return_thunk(srso_return_thunk);
+               }
+               break;
+       case SRSO_MITIGATION_IBPB:
+               setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
+               /*
+                * IBPB on entry already obviates the need for
+                * software-based untraining so clear those in case some
+                * other mitigation like Retbleed has selected them.
+                */
+               setup_clear_cpu_cap(X86_FEATURE_UNRET);
+               setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
+               fallthrough;
+       case SRSO_MITIGATION_IBPB_ON_VMEXIT:
+               setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+               /*
+                * There is no need for RSB filling: entry_ibpb() ensures
+                * all predictions, including the RSB, are invalidated,
+                * regardless of IBPB implementation.
+                */
+               setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
+               break;
+       default:
+               break;
+       }
  }
  
  #undef pr_fmt
@@@ -2949,11 -2963,14 +3112,19 @@@ static ssize_t rfds_show_state(char *bu
        return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
  }
  
 +static ssize_t its_show_state(char *buf)
 +{
 +      return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]);
 +}
 +
+ static ssize_t old_microcode_show_state(char *buf)
+ {
+       if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+               return sysfs_emit(buf, "Unknown: running under hypervisor");
+       return sysfs_emit(buf, "Vulnerable\n");
+ }
  static char *stibp_state(void)
  {
        if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
@@@ -3136,9 -3152,9 +3306,12 @@@ static ssize_t cpu_show_common(struct d
        case X86_BUG_RFDS:
                return rfds_show_state(buf);
  
 +      case X86_BUG_ITS:
 +              return its_show_state(buf);
 +
+       case X86_BUG_OLD_MICROCODE:
+               return old_microcode_show_state(buf);
        default:
                break;
        }
@@@ -3219,10 -3232,10 +3389,15 @@@ ssize_t cpu_show_reg_file_data_sampling
        return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
  }
  
 +ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf)
 +{
 +      return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
 +}
++
+ ssize_t cpu_show_old_microcode(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+       return cpu_show_common(dev, attr, buf, X86_BUG_OLD_MICROCODE);
+ }
  #endif
  
  void __warn_thunk(void)
index 0ff057ff11ce93acc08ffbccec2b9b9e8b42a3b8,34efb9d2519a91bbda6f66478df7aa8c6def6a37..114aaaf6ae8a36c3b14c44d169d8661045085637
@@@ -1325,32 -1320,42 +1327,68 @@@ static bool __init vulnerable_to_rfds(u
        return cpu_matches(cpu_vuln_blacklist, RFDS);
  }
  
 +static bool __init vulnerable_to_its(u64 x86_arch_cap_msr)
 +{
 +      /* The "immunity" bit trumps everything else: */
 +      if (x86_arch_cap_msr & ARCH_CAP_ITS_NO)
 +              return false;
 +      if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
 +              return false;
 +
 +      /* None of the affected CPUs have BHI_CTRL */
 +      if (boot_cpu_has(X86_FEATURE_BHI_CTRL))
 +              return false;
 +
 +      /*
 +       * If a VMM did not expose ITS_NO, assume that a guest could
 +       * be running on a vulnerable hardware or may migrate to such
 +       * hardware.
 +       */
 +      if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
 +              return true;
 +
 +      if (cpu_matches(cpu_vuln_blacklist, ITS))
 +              return true;
 +
 +      return false;
 +}
 +
+ static struct x86_cpu_id cpu_latest_microcode[] = {
+ #include "microcode/intel-ucode-defs.h"
+       {}
+ };
+ static bool __init cpu_has_old_microcode(void)
+ {
+       const struct x86_cpu_id *m = x86_match_cpu(cpu_latest_microcode);
+       /* Give unknown CPUs a pass: */
+       if (!m) {
+               /* Intel CPUs should be in the list. Warn if not: */
+               if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+                       pr_info("x86/CPU: Model not found in latest microcode list\n");
+               return false;
+       }
+       /*
+        * Hosts usually lie to guests with a super high microcode
+        * version. Just ignore what hosts tell guests:
+        */
+       if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+               return false;
+       /* Consider all debug microcode to be old: */
+       if (boot_cpu_data.microcode & BIT(31))
+               return true;
+       /* Give new microcode a pass: */
+       if (boot_cpu_data.microcode >= m->driver_data)
+               return false;
+       /* Uh oh, too old: */
+       return true;
+ }
  static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
  {
        u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 50651435577c8f52fedd86e0b6c9edec84c2545d,1c4359366cd7602974c1b74519e3cca579af246a..381bf3b2163fac04e51ed7993046363f8b57be0b
@@@ -600,7 -600,7 +600,8 @@@ CPU_SHOW_VULN_FALLBACK(spec_rstack_over
  CPU_SHOW_VULN_FALLBACK(gds);
  CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
  CPU_SHOW_VULN_FALLBACK(ghostwrite);
 +CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
+ CPU_SHOW_VULN_FALLBACK(old_microcode);
  
  static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
  static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@@ -617,7 -617,7 +618,8 @@@ static DEVICE_ATTR(spec_rstack_overflow
  static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
  static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
  static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL);
 +static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
+ static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL);
  
  static struct attribute *cpu_root_vulnerabilities_attrs[] = {
        &dev_attr_meltdown.attr,
        &dev_attr_gather_data_sampling.attr,
        &dev_attr_reg_file_data_sampling.attr,
        &dev_attr_ghostwrite.attr,
 +      &dev_attr_indirect_target_selection.attr,
+       &dev_attr_old_microcode.attr,
        NULL
  };
  
Simple merge
Simple merge
Simple merge
index d96bb3e202eeff7af03ce744dcaff31390eef211,66fdc74f13ef48bcefce2fd2ddf1e4b8da801d81..0d4c0de89a0028a325893584717501ba371c9e1a
@@@ -389,10 -389,9 +389,10 @@@ static inline int amd_pstate_cppc_enabl
  static int msr_init_perf(struct amd_cpudata *cpudata)
  {
        union perf_cached perf = READ_ONCE(cpudata->perf);
 -      u64 cap1, numerator;
 +      u64 cap1, numerator, cppc_req;
 +      u8 min_perf;
  
-       int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
+       int ret = rdmsrq_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
                                     &cap1);
        if (ret)
                return ret;
        if (ret)
                return ret;
  
-       ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req);
++      ret = rdmsrq_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &cppc_req);
 +      if (ret)
 +              return ret;
 +
 +      WRITE_ONCE(cpudata->cppc_req_cached, cppc_req);
 +      min_perf = FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cppc_req);
 +
 +      /*
 +       * Clear out the min_perf part to check if the rest of the MSR is 0, if yes, this is an
 +       * indication that the min_perf value is the one specified through the BIOS option
 +       */
 +      cppc_req &= ~(AMD_CPPC_MIN_PERF_MASK);
 +
 +      if (!cppc_req)
 +              perf.bios_min_perf = min_perf;
 +
        perf.highest_perf = numerator;
        perf.max_limit_perf = numerator;
        perf.min_limit_perf = FIELD_GET(AMD_CPPC_LOWEST_PERF_MASK, cap1);
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 3b9e831cf0ef3264025b310c11c294788d7f0e9a,f2c2bd257e3905ff07d4a32951b40d795517e7ee..ad29538f5e70667231c36840235e87753cb078a9
@@@ -198,9 -183,9 +198,9 @@@ int txgbe_setup_misc_irq(struct txgbe *
        if (wx->mac.type == wx_mac_aml)
                goto skip_sp_irq;
  
 -      txgbe->misc.nirqs = 1;
 +      txgbe->misc.nirqs = TXGBE_IRQ_MAX;
-       txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0,
-                                                  &txgbe_misc_irq_domain_ops, txgbe);
+       txgbe->misc.domain = irq_domain_create_simple(NULL, txgbe->misc.nirqs, 0,
+                                                     &txgbe_misc_irq_domain_ops, txgbe);
        if (!txgbe->misc.domain)
                return -ENOMEM;
  
Simple merge
Simple merge
Simple merge
Simple merge
index 81931e808bbc812fcde5cee5489d6c117dcc2859,c9a1b1ed42244537ed508c8b650263c878f799fe..62bf9547631ed133629a23a25287af45f8780f50
@@@ -9,9 -9,8 +9,9 @@@
  
  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  
- #include <asm/amd_hsmp.h>
+ #include <asm/amd/hsmp.h>
  
 +#include <linux/acpi.h>
  #include <linux/build_bug.h>
  #include <linux/device.h>
  #include <linux/module.h>
Simple merge
Simple merge
index 3aa955102b349a97d9f5776799dc57e74299cff0,1f5cfc4cc04f5273f063da15b2ce710b16c5b8a5..a3dca7f10628145ba10f601842ce4cd3e54f3c1f
@@@ -78,8 -78,8 +78,10 @@@ extern ssize_t cpu_show_gds(struct devi
  extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
                                               struct device_attribute *attr, char *buf);
  extern ssize_t cpu_show_ghostwrite(struct device *dev, struct device_attribute *attr, char *buf);
 +extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
 +                                                struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_old_microcode(struct device *dev,
+                                     struct device_attribute *attr, char *buf);
  
  extern __printf(4, 5)
  struct device *cpu_device_create(struct device *parent, void *drvdata,
index 15808cad2bc1a2de2a42845641d263a741968eb1,32ba5126e2214bdf2c6961941f00ac7e7ed3accc..168c42be93381e323102c77a1b546f4b16bcef05
  #endif
  #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
  
 -#define INIT_PASID    0
  
  struct address_space;
+ struct futex_private_hash;
  struct mem_cgroup;
  
  /*
Simple merge
Simple merge
Simple merge
diff --cc init/Kconfig
Simple merge
diff --cc init/main.c
Simple merge
diff --cc kernel/fork.c
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc mm/nommu.c
Simple merge
diff --cc mm/vmalloc.c
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge