if (!system_supports_sve())
                return;
 
-       vq = sve_vq_from_vl(task_get_sve_vl(task));
+       vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
        __fpsimd_to_sve(sst, fst, vq);
 }
 
  */
 static void sve_to_fpsimd(struct task_struct *task)
 {
-       unsigned int vq;
+       unsigned int vq, vl;
        void const *sst = task->thread.sve_state;
        struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
        unsigned int i;
        if (!system_supports_sve())
                return;
 
-       vq = sve_vq_from_vl(task_get_sve_vl(task));
+       vl = thread_get_cur_vl(&task->thread);
+       vq = sve_vq_from_vl(vl);
        for (i = 0; i < SVE_NUM_ZREGS; ++i) {
                p = (__uint128_t const *)ZREG(sst, vq, i);
                fst->vregs[i] = arm64_le128_to_cpu(*p);
 }
 
 
+/*
+ * Force the FPSIMD state shared with SVE to be updated in the SVE state
+ * even if the SVE state is the current active state.
+ *
+ * This should only be called by ptrace.  task must be non-runnable.
+ * task->thread.sve_state must point to at least sve_state_size(task)
+ * bytes of allocated kernel memory.
+ */
+void fpsimd_force_sync_to_sve(struct task_struct *task)
+{
+       fpsimd_to_sve(task);
+}
+
 /*
  * Ensure that task->thread.sve_state is up to date with respect to
  * the user task, irrespective of when SVE is in use or not.
  */
 void fpsimd_sync_to_sve(struct task_struct *task)
 {
-       if (!test_tsk_thread_flag(task, TIF_SVE))
+       if (!test_tsk_thread_flag(task, TIF_SVE) &&
+           !thread_sm_enabled(&task->thread))
                fpsimd_to_sve(task);
 }
 
  */
 void sve_sync_to_fpsimd(struct task_struct *task)
 {
-       if (test_tsk_thread_flag(task, TIF_SVE))
+       if (test_tsk_thread_flag(task, TIF_SVE) ||
+           thread_sm_enabled(&task->thread))
                sve_to_fpsimd(task);
 }
 
        if (!test_tsk_thread_flag(task, TIF_SVE))
                return;
 
-       vq = sve_vq_from_vl(task_get_sve_vl(task));
+       vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
 
        memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
        __fpsimd_to_sve(sst, fst, vq);
        /*
         * To ensure the FPSIMD bits of the SVE vector registers are preserved,
         * write any live register state back to task_struct, and convert to a
-        * regular FPSIMD thread.  Since the vector length can only be changed
-        * with a syscall we can't be in streaming mode while reconfiguring.
+        * regular FPSIMD thread.
         */
        if (task == current) {
                get_cpu_fpsimd_context();
 
 #ifdef CONFIG_ARM64_SVE
 
 static void sve_init_header_from_task(struct user_sve_header *header,
-                                     struct task_struct *target)
+                                     struct task_struct *target,
+                                     enum vec_type type)
 {
        unsigned int vq;
+       bool active;
+       bool fpsimd_only;
+       enum vec_type task_type;
 
        memset(header, 0, sizeof(*header));
 
-       header->flags = test_tsk_thread_flag(target, TIF_SVE) ?
-               SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD;
-       if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
-               header->flags |= SVE_PT_VL_INHERIT;
+       /* Check if the requested registers are active for the task */
+       if (thread_sm_enabled(&target->thread))
+               task_type = ARM64_VEC_SME;
+       else
+               task_type = ARM64_VEC_SVE;
+       active = (task_type == type);
+
+       switch (type) {
+       case ARM64_VEC_SVE:
+               if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
+                       header->flags |= SVE_PT_VL_INHERIT;
+               fpsimd_only = !test_tsk_thread_flag(target, TIF_SVE);
+               break;
+       case ARM64_VEC_SME:
+               if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
+                       header->flags |= SVE_PT_VL_INHERIT;
+               fpsimd_only = false;
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               return;
+       }
 
-       header->vl = task_get_sve_vl(target);
+       if (active) {
+               if (fpsimd_only) {
+                       header->flags |= SVE_PT_REGS_FPSIMD;
+               } else {
+                       header->flags |= SVE_PT_REGS_SVE;
+               }
+       }
+
+       header->vl = task_get_vl(target, type);
        vq = sve_vq_from_vl(header->vl);
 
-       header->max_vl = sve_max_vl();
+       header->max_vl = vec_max_vl(type);
        header->size = SVE_PT_SIZE(vq, header->flags);
        header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
                                      SVE_PT_REGS_SVE);
        return ALIGN(header->size, SVE_VQ_BYTES);
 }
 
-static int sve_get(struct task_struct *target,
-                  const struct user_regset *regset,
-                  struct membuf to)
+static int sve_get_common(struct task_struct *target,
+                         const struct user_regset *regset,
+                         struct membuf to,
+                         enum vec_type type)
 {
        struct user_sve_header header;
        unsigned int vq;
        unsigned long start, end;
 
-       if (!system_supports_sve())
-               return -EINVAL;
-
        /* Header */
-       sve_init_header_from_task(&header, target);
+       sve_init_header_from_task(&header, target, type);
        vq = sve_vq_from_vl(header.vl);
 
        membuf_write(&to, &header, sizeof(header));
        if (target == current)
                fpsimd_preserve_current_state();
 
-       /* Registers: FPSIMD-only case */
-
        BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
-       if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD)
+       BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
+
+       switch ((header.flags & SVE_PT_REGS_MASK)) {
+       case SVE_PT_REGS_FPSIMD:
                return __fpr_get(target, regset, to);
 
-       /* Otherwise: full SVE case */
+       case SVE_PT_REGS_SVE:
+               start = SVE_PT_SVE_OFFSET;
+               end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
+               membuf_write(&to, target->thread.sve_state, end - start);
 
-       BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
-       start = SVE_PT_SVE_OFFSET;
-       end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
-       membuf_write(&to, target->thread.sve_state, end - start);
+               start = end;
+               end = SVE_PT_SVE_FPSR_OFFSET(vq);
+               membuf_zero(&to, end - start);
 
-       start = end;
-       end = SVE_PT_SVE_FPSR_OFFSET(vq);
-       membuf_zero(&to, end - start);
+               /*
+                * Copy fpsr, and fpcr which must follow contiguously in
+                * struct fpsimd_state:
+                */
+               start = end;
+               end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
+               membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr,
+                            end - start);
 
-       /*
-        * Copy fpsr, and fpcr which must follow contiguously in
-        * struct fpsimd_state:
-        */
-       start = end;
-       end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
-       membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr, end - start);
+               start = end;
+               end = sve_size_from_header(&header);
+               return membuf_zero(&to, end - start);
 
-       start = end;
-       end = sve_size_from_header(&header);
-       return membuf_zero(&to, end - start);
+       default:
+               return 0;
+       }
 }
 
-static int sve_set(struct task_struct *target,
+static int sve_get(struct task_struct *target,
                   const struct user_regset *regset,
-                  unsigned int pos, unsigned int count,
-                  const void *kbuf, const void __user *ubuf)
+                  struct membuf to)
+{
+       if (!system_supports_sve())
+               return -EINVAL;
+
+       return sve_get_common(target, regset, to, ARM64_VEC_SVE);
+}
+
+static int sve_set_common(struct task_struct *target,
+                         const struct user_regset *regset,
+                         unsigned int pos, unsigned int count,
+                         const void *kbuf, const void __user *ubuf,
+                         enum vec_type type)
 {
        int ret;
        struct user_sve_header header;
        unsigned int vq;
        unsigned long start, end;
 
-       if (!system_supports_sve())
-               return -EINVAL;
-
        /* Header */
        if (count < sizeof(header))
                return -EINVAL;
         * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
         * vec_set_vector_length(), which will also validate them for us:
         */
-       ret = vec_set_vector_length(target, ARM64_VEC_SVE, header.vl,
+       ret = vec_set_vector_length(target, type, header.vl,
                ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
        if (ret)
                goto out;
 
        /* Actual VL set may be less than the user asked for: */
-       vq = sve_vq_from_vl(task_get_sve_vl(target));
+       vq = sve_vq_from_vl(task_get_vl(target, type));
+
+       /* Enter/exit streaming mode */
+       if (system_supports_sme()) {
+               u64 old_svcr = target->thread.svcr;
+
+               switch (type) {
+               case ARM64_VEC_SVE:
+                       target->thread.svcr &= ~SYS_SVCR_EL0_SM_MASK;
+                       break;
+               case ARM64_VEC_SME:
+                       target->thread.svcr |= SYS_SVCR_EL0_SM_MASK;
+                       break;
+               default:
+                       WARN_ON_ONCE(1);
+                       return -EINVAL;
+               }
+
+               /*
+                * If we switched then invalidate any existing SVE
+                * state and ensure there's storage.
+                */
+               if (target->thread.svcr != old_svcr)
+                       sve_alloc(target);
+       }
 
        /* Registers: FPSIMD-only case */
 
                ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
                                SVE_PT_FPSIMD_OFFSET);
                clear_tsk_thread_flag(target, TIF_SVE);
+               if (type == ARM64_VEC_SME)
+                       fpsimd_force_sync_to_sve(target);
                goto out;
        }
 
-       /* Otherwise: full SVE case */
+       /*
+        * Otherwise: no registers or full SVE case.  For backwards
+        * compatibility reasons we treat empty flags as SVE registers.
+        */
 
        /*
         * If setting a different VL from the requested VL and there is
 
        /*
         * Ensure target->thread.sve_state is up to date with target's
-        * FPSIMD regs, so that a short copyin leaves trailing registers
-        * unmodified.
+        * FPSIMD regs, so that a short copyin leaves trailing
+        * registers unmodified.  Always enable SVE even if going into
+        * streaming mode.
         */
        fpsimd_sync_to_sve(target);
        set_tsk_thread_flag(target, TIF_SVE);
        return ret;
 }
 
+static int sve_set(struct task_struct *target,
+                  const struct user_regset *regset,
+                  unsigned int pos, unsigned int count,
+                  const void *kbuf, const void __user *ubuf)
+{
+       if (!system_supports_sve())
+               return -EINVAL;
+
+       return sve_set_common(target, regset, pos, count, kbuf, ubuf,
+                             ARM64_VEC_SVE);
+}
+
 #endif /* CONFIG_ARM64_SVE */
 
+#ifdef CONFIG_ARM64_SME
+
+static int ssve_get(struct task_struct *target,
+                  const struct user_regset *regset,
+                  struct membuf to)
+{
+       if (!system_supports_sme())
+               return -EINVAL;
+
+       return sve_get_common(target, regset, to, ARM64_VEC_SME);
+}
+
+static int ssve_set(struct task_struct *target,
+                   const struct user_regset *regset,
+                   unsigned int pos, unsigned int count,
+                   const void *kbuf, const void __user *ubuf)
+{
+       if (!system_supports_sme())
+               return -EINVAL;
+
+       return sve_set_common(target, regset, pos, count, kbuf, ubuf,
+                             ARM64_VEC_SME);
+}
+
+#endif /* CONFIG_ARM64_SME */
+
 #ifdef CONFIG_ARM64_PTR_AUTH
 static int pac_mask_get(struct task_struct *target,
                        const struct user_regset *regset,
 #ifdef CONFIG_ARM64_SVE
        REGSET_SVE,
 #endif
+#ifdef CONFIG_ARM64_SVE
+       REGSET_SSVE,
+#endif
 #ifdef CONFIG_ARM64_PTR_AUTH
        REGSET_PAC_MASK,
        REGSET_PAC_ENABLED_KEYS,
                .set = sve_set,
        },
 #endif
+#ifdef CONFIG_ARM64_SME
+       [REGSET_SSVE] = { /* Streaming mode SVE */
+               .core_note_type = NT_ARM_SSVE,
+               .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
+                                 SVE_VQ_BYTES),
+               .size = SVE_VQ_BYTES,
+               .align = SVE_VQ_BYTES,
+               .regset_get = ssve_get,
+               .set = ssve_set,
+       },
+#endif
 #ifdef CONFIG_ARM64_PTR_AUTH
        [REGSET_PAC_MASK] = {
                .core_note_type = NT_ARM_PAC_MASK,