int ret = 1;
        down_read(&namespace_sem);
        br_write_lock(vfsmount_lock);
-       if (propagate_mount_busy(mnt, 2))
+       if (propagate_mount_busy(real_mount(mnt), 2))
                ret = 0;
        br_write_unlock(vfsmount_lock);
        up_read(&namespace_sem);
 
 static void shrink_submounts(struct mount *mnt, struct list_head *umounts);
 
-static int do_umount(struct vfsmount *mnt, int flags)
+static int do_umount(struct mount *mnt, int flags)
 {
-       struct super_block *sb = mnt->mnt_sb;
+       struct super_block *sb = mnt->mnt.mnt_sb;
        int retval;
        LIST_HEAD(umount_list);
 
-       retval = security_sb_umount(mnt, flags);
+       retval = security_sb_umount(&mnt->mnt, flags);
        if (retval)
                return retval;
 
         *  (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
         */
        if (flags & MNT_EXPIRE) {
-               if (mnt == current->fs->root.mnt ||
+               if (&mnt->mnt == current->fs->root.mnt ||
                    flags & (MNT_FORCE | MNT_DETACH))
                        return -EINVAL;
 
                 * all race cases, but it's a slowpath.
                 */
                br_write_lock(vfsmount_lock);
-               if (mnt_get_count(mnt) != 2) {
+               if (mnt_get_count(&mnt->mnt) != 2) {
                        br_write_unlock(vfsmount_lock);
                        return -EBUSY;
                }
                br_write_unlock(vfsmount_lock);
 
-               if (!xchg(&mnt->mnt_expiry_mark, 1))
+               if (!xchg(&mnt->mnt.mnt_expiry_mark, 1))
                        return -EAGAIN;
        }
 
         * /reboot - static binary that would close all descriptors and
         * call reboot(9). Then init(8) could umount root and exec /reboot.
         */
-       if (mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
+       if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
                /*
                 * Special case for "unmounting" root ...
                 * we just try to remount it readonly.
        event++;
 
        if (!(flags & MNT_DETACH))
-               shrink_submounts(real_mount(mnt), &umount_list);
+               shrink_submounts(mnt, &umount_list);
 
        retval = -EBUSY;
        if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) {
-               if (!list_empty(&mnt->mnt_list))
-                       umount_tree(real_mount(mnt), 1, &umount_list);
+               if (!list_empty(&mnt->mnt.mnt_list))
+                       umount_tree(mnt, 1, &umount_list);
                retval = 0;
        }
        br_write_unlock(vfsmount_lock);
        if (!capable(CAP_SYS_ADMIN))
                goto dput_and_out;
 
-       retval = do_umount(path.mnt, flags);
+       retval = do_umount(real_mount(path.mnt), flags);
 dput_and_out:
        /* we mustn't call path_put() as that would clear mnt_expiry_mark */
        dput(path.dentry);
         */
        list_for_each_entry_safe(mnt, next, mounts, mnt.mnt_expire) {
                if (!xchg(&mnt->mnt.mnt_expiry_mark, 1) ||
-                       propagate_mount_busy(&mnt->mnt, 1))
+                       propagate_mount_busy(mnt, 1))
                        continue;
                list_move(&mnt->mnt.mnt_expire, &graveyard);
        }
                        goto repeat;
                }
 
-               if (!propagate_mount_busy(&mnt->mnt, 1)) {
+               if (!propagate_mount_busy(mnt, 1)) {
                        list_move_tail(&mnt->mnt.mnt_expire, graveyard);
                        found++;
                }
 
 /*
  * return true if the refcount is greater than count
  */
-static inline int do_refcount_check(struct vfsmount *mnt, int count)
+static inline int do_refcount_check(struct mount *mnt, int count)
 {
-       int mycount = mnt_get_count(mnt) - mnt->mnt_ghosts;
+       int mycount = mnt_get_count(&mnt->mnt) - mnt->mnt.mnt_ghosts;
        return (mycount > count);
 }
 
  *
  * vfsmount lock must be held for write
  */
-int propagate_mount_busy(struct vfsmount *mnt, int refcnt)
+int propagate_mount_busy(struct mount *mnt, int refcnt)
 {
        struct vfsmount *m;
        struct mount *child;
-       struct vfsmount *parent = mnt->mnt_parent;
+       struct vfsmount *parent = mnt->mnt.mnt_parent;
        int ret = 0;
 
-       if (mnt == parent)
+       if (&mnt->mnt == parent)
                return do_refcount_check(mnt, refcnt);
 
        /*
         * If not, we don't have to go checking for all other
         * mounts
         */
-       if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt))
+       if (!list_empty(&mnt->mnt.mnt_mounts) || do_refcount_check(mnt, refcnt))
                return 1;
 
        for (m = propagation_next(parent, parent); m;
                        m = propagation_next(m, parent)) {
-               child = __lookup_mnt(m, mnt->mnt_mountpoint, 0);
+               child = __lookup_mnt(m, mnt->mnt.mnt_mountpoint, 0);
                if (child && list_empty(&child->mnt.mnt_mounts) &&
-                   (ret = do_refcount_check(&child->mnt, 1)))
+                   (ret = do_refcount_check(child, 1)))
                        break;
        }
        return ret;