kref_put(&ioc->refcount, nfs_io_completion_release);
 }
 
+static void
+nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode)
+{
+       if (!test_and_set_bit(PG_INODE_REF, &req->wb_flags)) {
+               kref_get(&req->wb_kref);
+               atomic_long_inc(&NFS_I(inode)->nrequests);
+       }
+}
+
+static int
+nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
+{
+       int ret;
+
+       if (!test_bit(PG_REMOVE, &req->wb_flags))
+               return 0;
+       ret = nfs_page_group_lock(req);
+       if (ret)
+               return ret;
+       if (test_and_clear_bit(PG_REMOVE, &req->wb_flags))
+               nfs_page_set_inode_ref(req, inode);
+       nfs_page_group_unlock(req);
+       return 0;
+}
+
 /**
  * nfs_folio_find_head_request - find head request associated with a folio
  * @folio: pointer to folio
        struct inode *inode = folio->mapping->host;
        struct nfs_page *head, *subreq;
        struct nfs_commit_info cinfo;
-       bool removed;
        int ret;
 
        /*
                goto retry;
        }
 
-       ret = nfs_page_group_lock(head);
+       ret = nfs_cancel_remove_inode(head, inode);
        if (ret < 0)
                goto out_unlock;
 
-       removed = test_bit(PG_REMOVE, &head->wb_flags);
+       ret = nfs_page_group_lock(head);
+       if (ret < 0)
+               goto out_unlock;
 
        /* lock each request in the page group */
        for (subreq = head->wb_this_page;
             subreq != head;
             subreq = subreq->wb_this_page) {
-               if (test_bit(PG_REMOVE, &subreq->wb_flags))
-                       removed = true;
                ret = nfs_page_group_lock_subreq(head, subreq);
                if (ret < 0)
                        goto out_unlock;
 
        nfs_page_group_unlock(head);
 
-       /*
-        * If PG_REMOVE is set on any request, I/O on that request has
-        * completed, but some requests were still under I/O at the time
-        * we locked the head request.
-        *
-        * In that case the above wait for all requests means that all I/O
-        * has now finished, and we can restart from a clean slate.  Let the
-        * old requests go away and start from scratch instead.
-        */
-       if (removed) {
-               nfs_unroll_locks(head, head);
-               nfs_unlock_and_release_request(head);
-               goto retry;
-       }
-
        nfs_init_cinfo_from_inode(&cinfo, inode);
        nfs_join_page_group(head, &cinfo, inode);
        return head;