hdr->cred = hdr->req->wb_context->cred;
        hdr->io_start = req_offset(hdr->req);
        hdr->good_bytes = mirror->pg_count;
+       hdr->io_completion = desc->pg_io_completion;
        hdr->dreq = desc->pg_dreq;
        hdr->release = release;
        hdr->completion_ops = desc->pg_completion_ops;
        desc->pg_ioflags = io_flags;
        desc->pg_error = 0;
        desc->pg_lseg = NULL;
+       desc->pg_io_completion = NULL;
        desc->pg_dreq = NULL;
        desc->pg_bsize = bsize;
 
 {
        LIST_HEAD(failed);
 
+       desc->pg_io_completion = hdr->io_completion;
        desc->pg_dreq = hdr->dreq;
        while (!list_empty(&hdr->pages)) {
                struct nfs_page *req = nfs_list_entry(hdr->pages.next);
 
 #define MIN_POOL_WRITE         (32)
 #define MIN_POOL_COMMIT                (4)
 
+struct nfs_io_completion {
+       void (*complete)(void *data);
+       void *data;
+       struct kref refcount;
+};
+
 /*
  * Local function declarations
  */
        mempool_free(hdr, nfs_wdata_mempool);
 }
 
+static struct nfs_io_completion *nfs_io_completion_alloc(gfp_t gfp_flags)
+{
+       return kmalloc(sizeof(struct nfs_io_completion), gfp_flags);
+}
+
+static void nfs_io_completion_init(struct nfs_io_completion *ioc,
+               void (*complete)(void *), void *data)
+{
+       ioc->complete = complete;
+       ioc->data = data;
+       kref_init(&ioc->refcount);
+}
+
+static void nfs_io_completion_release(struct kref *kref)
+{
+       struct nfs_io_completion *ioc = container_of(kref,
+                       struct nfs_io_completion, refcount);
+       ioc->complete(ioc->data);
+       kfree(ioc);
+}
+
+static void nfs_io_completion_get(struct nfs_io_completion *ioc)
+{
+       if (ioc != NULL)
+               kref_get(&ioc->refcount);
+}
+
+static void nfs_io_completion_put(struct nfs_io_completion *ioc)
+{
+       if (ioc != NULL)
+               kref_put(&ioc->refcount, nfs_io_completion_release);
+}
+
 static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
 {
        ctx->error = error;
        return ret;
 }
 
+static void nfs_io_completion_commit(void *inode)
+{
+       nfs_commit_inode(inode, 0);
+}
+
 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
 {
        struct inode *inode = mapping->host;
        struct nfs_pageio_descriptor pgio;
+       struct nfs_io_completion *ioc = nfs_io_completion_alloc(GFP_NOFS);
        int err;
 
        nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
 
+       if (ioc)
+               nfs_io_completion_init(ioc, nfs_io_completion_commit, inode);
+
        nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false,
                                &nfs_async_write_completion_ops);
+       pgio.pg_io_completion = ioc;
        err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
        nfs_pageio_complete(&pgio);
+       nfs_io_completion_put(ioc);
 
        if (err < 0)
                goto out_err;
        return hdr->verf.committed != NFS_FILE_SYNC;
 }
 
+static void nfs_async_write_init(struct nfs_pgio_header *hdr)
+{
+       nfs_io_completion_get(hdr->io_completion);
+}
+
 static void nfs_write_completion(struct nfs_pgio_header *hdr)
 {
        struct nfs_commit_info cinfo;
                nfs_release_request(req);
        }
 out:
+       nfs_io_completion_put(hdr->io_completion);
        hdr->release(hdr);
 }
 
 }
 
 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
+       .init_hdr = nfs_async_write_init,
        .error_cleanup = nfs_async_write_error,
        .completion = nfs_write_completion,
        .reschedule_io = nfs_async_write_reschedule_io,