]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
Revert "nfs: don't reuse partially completed requests in nfs_lock_and_join_requests"
authorTrond Myklebust <trond.myklebust@hammerspace.com>
Tue, 5 Nov 2024 02:09:21 +0000 (21:09 -0500)
committerTrond Myklebust <trond.myklebust@hammerspace.com>
Mon, 18 Nov 2024 14:42:03 +0000 (09:42 -0500)
This reverts commit b571cfcb9dcac187c6d967987792d37cb0688610.

This patch appears to assume that if one request is complete, then the
others will complete too before unlocking. That is not a valid
assumption, since other requests could hit a non-fatal error or a short
write that would cause them not to complete.

Reported-by: Igor Raits <igor@gooddata.com>
Link: https://bugzilla.kernel.org/show_bug.cgi?id=219508
Fixes: b571cfcb9dca ("nfs: don't reuse partially completed requests in nfs_lock_and_join_requests")
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
fs/nfs/write.c

index 2da00987d9ed4c8102ab5b4d5f4959425ff1e48a..50fa539611f5ecbc96a2ae7be90670001457dda4 100644 (file)
@@ -144,6 +144,31 @@ static void nfs_io_completion_put(struct nfs_io_completion *ioc)
                kref_put(&ioc->refcount, nfs_io_completion_release);
 }
 
+static void
+nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode)
+{
+       if (!test_and_set_bit(PG_INODE_REF, &req->wb_flags)) {
+               kref_get(&req->wb_kref);
+               atomic_long_inc(&NFS_I(inode)->nrequests);
+       }
+}
+
+static int
+nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
+{
+       int ret;
+
+       if (!test_bit(PG_REMOVE, &req->wb_flags))
+               return 0;
+       ret = nfs_page_group_lock(req);
+       if (ret)
+               return ret;
+       if (test_and_clear_bit(PG_REMOVE, &req->wb_flags))
+               nfs_page_set_inode_ref(req, inode);
+       nfs_page_group_unlock(req);
+       return 0;
+}
+
 /**
  * nfs_folio_find_head_request - find head request associated with a folio
  * @folio: pointer to folio
@@ -540,7 +565,6 @@ static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio)
        struct inode *inode = folio->mapping->host;
        struct nfs_page *head, *subreq;
        struct nfs_commit_info cinfo;
-       bool removed;
        int ret;
 
        /*
@@ -565,18 +589,18 @@ retry:
                goto retry;
        }
 
-       ret = nfs_page_group_lock(head);
+       ret = nfs_cancel_remove_inode(head, inode);
        if (ret < 0)
                goto out_unlock;
 
-       removed = test_bit(PG_REMOVE, &head->wb_flags);
+       ret = nfs_page_group_lock(head);
+       if (ret < 0)
+               goto out_unlock;
 
        /* lock each request in the page group */
        for (subreq = head->wb_this_page;
             subreq != head;
             subreq = subreq->wb_this_page) {
-               if (test_bit(PG_REMOVE, &subreq->wb_flags))
-                       removed = true;
                ret = nfs_page_group_lock_subreq(head, subreq);
                if (ret < 0)
                        goto out_unlock;
@@ -584,21 +608,6 @@ retry:
 
        nfs_page_group_unlock(head);
 
-       /*
-        * If PG_REMOVE is set on any request, I/O on that request has
-        * completed, but some requests were still under I/O at the time
-        * we locked the head request.
-        *
-        * In that case the above wait for all requests means that all I/O
-        * has now finished, and we can restart from a clean slate.  Let the
-        * old requests go away and start from scratch instead.
-        */
-       if (removed) {
-               nfs_unroll_locks(head, head);
-               nfs_unlock_and_release_request(head);
-               goto retry;
-       }
-
        nfs_init_cinfo_from_inode(&cinfo, inode);
        nfs_join_page_group(head, &cinfo, inode);
        return head;