The combination of spinlock_t lock and seqcount_spinlock_t seq
in struct fs_struct is an open-coded seqlock_t (see linux/seqlock_types.h).
	Combine and switch to equivalent seqlock_t primitives.  AFAICS,
that does end up with the same sequence of underlying operations in all
cases.
	While we are at it, get_fs_pwd() is open-coded verbatim in
get_path_from_fd(); rather than applying conversion to it, replace with
the call of get_fs_pwd() there.  Not worth splitting the commit for that,
IMO...
	A bit of historical background - conversion of seqlock_t to
use of seqcount_spinlock_t happened several months after the same
had been done to struct fs_struct; switching fs_struct to seqlock_t
could've been done immediately after that, but it looks like nobody
had gotten around to that until now.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Link: https://lore.kernel.org/20250702053437.GC1880847@ZenIV
Acked-by: Ahmed S. Darwish <darwi@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Christian Brauner <brauner@kernel.org>
Signed-off-by: Christian Brauner <brauner@kernel.org>
 
        unsigned seq;
 
        do {
-               seq = read_seqcount_begin(&fs->seq);
+               seq = read_seqbegin(&fs->seq);
                *root = fs->root;
-       } while (read_seqcount_retry(&fs->seq, seq));
+       } while (read_seqretry(&fs->seq, seq));
 }
 
 /**
        unsigned seq;
 
        do {
-               seq = read_seqcount_begin(&fs->seq);
+               seq = read_seqbegin(&fs->seq);
                *root = fs->root;
                *pwd = fs->pwd;
-       } while (read_seqcount_retry(&fs->seq, seq));
+       } while (read_seqretry(&fs->seq, seq));
 }
 
 /*
 
         * state is protected by cred_guard_mutex we hold.
         */
        n_fs = 1;
-       spin_lock(&p->fs->lock);
+       read_seqlock_excl(&p->fs->seq);
        rcu_read_lock();
        for_other_threads(p, t) {
                if (t->fs == p->fs)
                bprm->unsafe |= LSM_UNSAFE_SHARE;
        else
                p->fs->in_exec = 1;
-       spin_unlock(&p->fs->lock);
+       read_sequnlock_excl(&p->fs->seq);
 }
 
 static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
 
        }
 
        if (fd == AT_FDCWD) {
-               struct fs_struct *fs = current->fs;
-               spin_lock(&fs->lock);
-               *root = fs->pwd;
-               path_get(root);
-               spin_unlock(&fs->lock);
+               get_fs_pwd(current->fs, root);
                return 0;
        }
 
 
        struct path old_root;
 
        path_get(path);
-       spin_lock(&fs->lock);
-       write_seqcount_begin(&fs->seq);
+       write_seqlock(&fs->seq);
        old_root = fs->root;
        fs->root = *path;
-       write_seqcount_end(&fs->seq);
-       spin_unlock(&fs->lock);
+       write_sequnlock(&fs->seq);
        if (old_root.dentry)
                path_put(&old_root);
 }
        struct path old_pwd;
 
        path_get(path);
-       spin_lock(&fs->lock);
-       write_seqcount_begin(&fs->seq);
+       write_seqlock(&fs->seq);
        old_pwd = fs->pwd;
        fs->pwd = *path;
-       write_seqcount_end(&fs->seq);
-       spin_unlock(&fs->lock);
+       write_sequnlock(&fs->seq);
 
        if (old_pwd.dentry)
                path_put(&old_pwd);
                fs = p->fs;
                if (fs) {
                        int hits = 0;
-                       spin_lock(&fs->lock);
-                       write_seqcount_begin(&fs->seq);
+                       write_seqlock(&fs->seq);
                        hits += replace_path(&fs->root, old_root, new_root);
                        hits += replace_path(&fs->pwd, old_root, new_root);
-                       write_seqcount_end(&fs->seq);
                        while (hits--) {
                                count++;
                                path_get(new_root);
                        }
-                       spin_unlock(&fs->lock);
+                       write_sequnlock(&fs->seq);
                }
                task_unlock(p);
        }
        if (fs) {
                int kill;
                task_lock(tsk);
-               spin_lock(&fs->lock);
+               read_seqlock_excl(&fs->seq);
                tsk->fs = NULL;
                kill = !--fs->users;
-               spin_unlock(&fs->lock);
+               read_sequnlock_excl(&fs->seq);
                task_unlock(tsk);
                if (kill)
                        free_fs_struct(fs);
        if (fs) {
                fs->users = 1;
                fs->in_exec = 0;
-               spin_lock_init(&fs->lock);
-               seqcount_spinlock_init(&fs->seq, &fs->lock);
+               seqlock_init(&fs->seq);
                fs->umask = old->umask;
 
-               spin_lock(&old->lock);
+               read_seqlock_excl(&old->seq);
                fs->root = old->root;
                path_get(&fs->root);
                fs->pwd = old->pwd;
                path_get(&fs->pwd);
-               spin_unlock(&old->lock);
+               read_sequnlock_excl(&old->seq);
        }
        return fs;
 }
                return -ENOMEM;
 
        task_lock(current);
-       spin_lock(&fs->lock);
+       read_seqlock_excl(&fs->seq);
        kill = !--fs->users;
        current->fs = new_fs;
-       spin_unlock(&fs->lock);
+       read_sequnlock_excl(&fs->seq);
        task_unlock(current);
 
        if (kill)
 /* to be mentioned only in INIT_TASK */
 struct fs_struct init_fs = {
        .users          = 1,
-       .lock           = __SPIN_LOCK_UNLOCKED(init_fs.lock),
-       .seq            = SEQCNT_SPINLOCK_ZERO(init_fs.seq, &init_fs.lock),
+       .seq            = __SEQLOCK_UNLOCKED(init_fs.seq),
        .umask          = 0022,
 };
 
                unsigned seq;
 
                do {
-                       seq = read_seqcount_begin(&fs->seq);
+                       seq = read_seqbegin(&fs->seq);
                        nd->root = fs->root;
                        nd->root_seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
-               } while (read_seqcount_retry(&fs->seq, seq));
+               } while (read_seqretry(&fs->seq, seq));
        } else {
                get_fs_root(fs, &nd->root);
                nd->state |= ND_ROOT_GRABBED;
                        unsigned seq;
 
                        do {
-                               seq = read_seqcount_begin(&fs->seq);
+                               seq = read_seqbegin(&fs->seq);
                                nd->path = fs->pwd;
                                nd->inode = nd->path.dentry->d_inode;
                                nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
-                       } while (read_seqcount_retry(&fs->seq, seq));
+                       } while (read_seqretry(&fs->seq, seq));
                } else {
                        get_fs_pwd(current->fs, &nd->path);
                        nd->inode = nd->path.dentry->d_inode;
 
 
 struct fs_struct {
        int users;
-       spinlock_t lock;
-       seqcount_spinlock_t seq;
+       seqlock_t seq;
        int umask;
        int in_exec;
        struct path root, pwd;
 
 static inline void get_fs_root(struct fs_struct *fs, struct path *root)
 {
-       spin_lock(&fs->lock);
+       read_seqlock_excl(&fs->seq);
        *root = fs->root;
        path_get(root);
-       spin_unlock(&fs->lock);
+       read_sequnlock_excl(&fs->seq);
 }
 
 static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd)
 {
-       spin_lock(&fs->lock);
+       read_seqlock_excl(&fs->seq);
        *pwd = fs->pwd;
        path_get(pwd);
-       spin_unlock(&fs->lock);
+       read_sequnlock_excl(&fs->seq);
 }
 
 extern bool current_chrooted(void);
 
        struct fs_struct *fs = current->fs;
        if (clone_flags & CLONE_FS) {
                /* tsk->fs is already what we want */
-               spin_lock(&fs->lock);
+               read_seqlock_excl(&fs->seq);
                /* "users" and "in_exec" locked for check_unsafe_exec() */
                if (fs->in_exec) {
-                       spin_unlock(&fs->lock);
+                       read_sequnlock_excl(&fs->seq);
                        return -EAGAIN;
                }
                fs->users++;
-               spin_unlock(&fs->lock);
+               read_sequnlock_excl(&fs->seq);
                return 0;
        }
        tsk->fs = copy_fs_struct(fs);
 
                if (new_fs) {
                        fs = current->fs;
-                       spin_lock(&fs->lock);
+                       read_seqlock_excl(&fs->seq);
                        current->fs = new_fs;
                        if (--fs->users)
                                new_fs = NULL;
                        else
                                new_fs = fs;
-                       spin_unlock(&fs->lock);
+                       read_sequnlock_excl(&fs->seq);
                }
 
                if (new_fd)