* To prevent this from racing with the manipulation of the task's FPSIMD state
  * from task context and thereby corrupting the state, it is necessary to
  * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE
- * flag with local_bh_disable() unless softirqs are already masked.
+ * flag with {, __}get_cpu_fpsimd_context(). This will still allow softirqs to
+ * run but prevent them to use FPSIMD.
  *
  * For a certain task, the sequence may look something like this:
  * - the task gets scheduled in; if both the task's fpsimd_cpu field
 
 #endif /* ! CONFIG_ARM64_SVE */
 
+DEFINE_PER_CPU(bool, fpsimd_context_busy);
+EXPORT_PER_CPU_SYMBOL(fpsimd_context_busy);
+
+static void __get_cpu_fpsimd_context(void)
+{
+       bool busy = __this_cpu_xchg(fpsimd_context_busy, true);
+
+       WARN_ON(busy);
+}
+
+/*
+ * Claim ownership of the CPU FPSIMD context for use by the calling context.
+ *
+ * The caller may freely manipulate the FPSIMD context metadata until
+ * put_cpu_fpsimd_context() is called.
+ *
+ * The double-underscore version must only be called if you know the task
+ * can't be preempted.
+ */
+static void get_cpu_fpsimd_context(void)
+{
+       preempt_disable();
+       __get_cpu_fpsimd_context();
+}
+
+static void __put_cpu_fpsimd_context(void)
+{
+       bool busy = __this_cpu_xchg(fpsimd_context_busy, false);
+
+       WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */
+}
+
+/*
+ * Release the CPU FPSIMD context.
+ *
+ * Must be called from a context in which get_cpu_fpsimd_context() was
+ * previously called, with no call to put_cpu_fpsimd_context() in the
+ * meantime.
+ */
+static void put_cpu_fpsimd_context(void)
+{
+       __put_cpu_fpsimd_context();
+       preempt_enable();
+}
+
+static bool have_cpu_fpsimd_context(void)
+{
+       return !preemptible() && __this_cpu_read(fpsimd_context_busy);
+}
+
 /*
  * Call __sve_free() directly only if you know task can't be scheduled
  * or preempted.
  * This function should be called only when the FPSIMD/SVE state in
  * thread_struct is known to be up to date, when preparing to enter
  * userspace.
- *
- * Softirqs (and preemption) must be disabled.
  */
 static void task_fpsimd_load(void)
 {
-       WARN_ON(!in_softirq() && !irqs_disabled());
+       WARN_ON(!have_cpu_fpsimd_context());
 
        if (system_supports_sve() && test_thread_flag(TIF_SVE))
                sve_load_state(sve_pffr(¤t->thread),
 /*
  * Ensure FPSIMD/SVE storage in memory for the loaded context is up to
  * date with respect to the CPU registers.
- *
- * Softirqs (and preemption) must be disabled.
  */
 static void fpsimd_save(void)
 {
                this_cpu_ptr(&fpsimd_last_state);
        /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
 
-       WARN_ON(!in_softirq() && !irqs_disabled());
+       WARN_ON(!have_cpu_fpsimd_context());
 
        if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
                if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
  * task->thread.sve_state.
  *
  * Task can be a non-runnable task, or current.  In the latter case,
- * softirqs (and preemption) must be disabled.
+ * the caller must have ownership of the cpu FPSIMD context before calling
+ * this function.
  * task->thread.sve_state must point to at least sve_state_size(task)
  * bytes of allocated kernel memory.
  * task->thread.uw.fpsimd_state must be up to date before calling this
  * task->thread.uw.fpsimd_state.
  *
  * Task can be a non-runnable task, or current.  In the latter case,
- * softirqs (and preemption) must be disabled.
+ * the caller must have ownership of the cpu FPSIMD context before calling
+ * this function.
  * task->thread.sve_state must point to at least sve_state_size(task)
  * bytes of allocated kernel memory.
  * task->thread.sve_state must be up to date before calling this function.
         * non-SVE thread.
         */
        if (task == current) {
-               local_bh_disable();
+               get_cpu_fpsimd_context();
 
                fpsimd_save();
        }
                sve_to_fpsimd(task);
 
        if (task == current)
-               local_bh_enable();
+               put_cpu_fpsimd_context();
 
        /*
         * Force reallocation of task SVE state to the correct size
 
        sve_alloc(current);
 
-       local_bh_disable();
+       get_cpu_fpsimd_context();
 
        fpsimd_save();
 
        if (test_and_set_thread_flag(TIF_SVE))
                WARN_ON(1); /* SVE access shouldn't have trapped */
 
-       local_bh_enable();
+       put_cpu_fpsimd_context();
 }
 
 /*
        if (!system_supports_fpsimd())
                return;
 
+       __get_cpu_fpsimd_context();
+
        /* Save unsaved fpsimd state, if any: */
        fpsimd_save();
 
 
        update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
                               wrong_task || wrong_cpu);
+
+       __put_cpu_fpsimd_context();
 }
 
 void fpsimd_flush_thread(void)
        if (!system_supports_fpsimd())
                return;
 
-       local_bh_disable();
+       get_cpu_fpsimd_context();
 
        fpsimd_flush_task_state(current);
        memset(¤t->thread.uw.fpsimd_state, 0,
                        current->thread.sve_vl_onexec = 0;
        }
 
-       local_bh_enable();
+       put_cpu_fpsimd_context();
 }
 
 /*
        if (!system_supports_fpsimd())
                return;
 
-       local_bh_disable();
+       get_cpu_fpsimd_context();
        fpsimd_save();
-       local_bh_enable();
+       put_cpu_fpsimd_context();
 }
 
 /*
 
 /*
  * Associate current's FPSIMD context with this cpu
- * Preemption must be disabled when calling this function.
+ * The caller must have ownership of the cpu FPSIMD context before calling
+ * this function.
  */
 void fpsimd_bind_task_to_cpu(void)
 {
        if (!system_supports_fpsimd())
                return;
 
-       local_bh_disable();
+       get_cpu_fpsimd_context();
 
        if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
                task_fpsimd_load();
                fpsimd_bind_task_to_cpu();
        }
 
-       local_bh_enable();
+       put_cpu_fpsimd_context();
 }
 
 /*
        if (!system_supports_fpsimd())
                return;
 
-       local_bh_disable();
+       get_cpu_fpsimd_context();
 
        current->thread.uw.fpsimd_state = *state;
        if (system_supports_sve() && test_thread_flag(TIF_SVE))
 
        clear_thread_flag(TIF_FOREIGN_FPSTATE);
 
-       local_bh_enable();
+       put_cpu_fpsimd_context();
 }
 
 /*
 
 /*
  * Invalidate any task's FPSIMD state that is present on this cpu.
- * This function must be called with softirqs disabled.
+ * The FPSIMD context should be acquired with get_cpu_fpsimd_context()
+ * before calling this function.
  */
 static void fpsimd_flush_cpu_state(void)
 {
 
 /*
  * Save the FPSIMD state to memory and invalidate cpu view.
- * This function must be called with softirqs (and preemption) disabled.
+ * This function must be called with preemption disabled.
  */
 void fpsimd_save_and_flush_cpu_state(void)
 {
+       WARN_ON(preemptible());
+       __get_cpu_fpsimd_context();
        fpsimd_save();
        fpsimd_flush_cpu_state();
+       __put_cpu_fpsimd_context();
 }
 
 #ifdef CONFIG_KERNEL_MODE_NEON
 
-DEFINE_PER_CPU(bool, kernel_neon_busy);
-EXPORT_PER_CPU_SYMBOL(kernel_neon_busy);
-
 /*
  * Kernel-side NEON support functions
  */
 
        BUG_ON(!may_use_simd());
 
-       local_bh_disable();
-
-       __this_cpu_write(kernel_neon_busy, true);
+       get_cpu_fpsimd_context();
 
        /* Save unsaved fpsimd state, if any: */
        fpsimd_save();
 
        /* Invalidate any task state remaining in the fpsimd regs: */
        fpsimd_flush_cpu_state();
-
-       preempt_disable();
-
-       local_bh_enable();
 }
 EXPORT_SYMBOL(kernel_neon_begin);
 
  */
 void kernel_neon_end(void)
 {
-       bool busy;
-
        if (!system_supports_fpsimd())
                return;
 
-       busy = __this_cpu_xchg(kernel_neon_busy, false);
-       WARN_ON(!busy); /* No matching kernel_neon_begin()? */
-
-       preempt_enable();
+       put_cpu_fpsimd_context();
 }
 EXPORT_SYMBOL(kernel_neon_end);