/* test for nfs page cache coalescing */
        int (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, struct nfs_page *);
 
+       /* Returns true if layoutdriver wants to divert this request to
+        * driver's commit routine.
+        */
+       bool (*mark_pnfs_commit)(struct pnfs_layout_segment *lseg);
+       struct list_head * (*choose_commit_list) (struct nfs_page *req);
+       int (*commit_pagelist)(struct inode *inode, struct list_head *mds_pages, int how);
+
        /*
         * Return PNFS_ATTEMPTED to indicate the layout code has attempted
         * I/O, else return PNFS_NOT_ATTEMPTED to fall back to normal NFS
        return nfss->pnfs_curr_ld != NULL;
 }
 
+static inline void
+pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+{
+       if (lseg) {
+               struct pnfs_layoutdriver_type *ld;
+
+               ld = NFS_SERVER(req->wb_page->mapping->host)->pnfs_curr_ld;
+               if (ld->mark_pnfs_commit && ld->mark_pnfs_commit(lseg)) {
+                       set_bit(PG_PNFS_COMMIT, &req->wb_flags);
+                       req->wb_commit_lseg = get_lseg(lseg);
+               }
+       }
+}
+
+static inline int
+pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how)
+{
+       if (!test_and_clear_bit(NFS_INO_PNFS_COMMIT, &NFS_I(inode)->flags))
+               return PNFS_NOT_ATTEMPTED;
+       return NFS_SERVER(inode)->pnfs_curr_ld->commit_pagelist(inode, mds_pages, how);
+}
+
+static inline struct list_head *
+pnfs_choose_commit_list(struct nfs_page *req, struct list_head *mds)
+{
+       struct list_head *rv;
+
+       if (test_and_clear_bit(PG_PNFS_COMMIT, &req->wb_flags)) {
+               struct inode *inode = req->wb_commit_lseg->pls_layout->plh_inode;
+
+               set_bit(NFS_INO_PNFS_COMMIT, &NFS_I(inode)->flags);
+               rv = NFS_SERVER(inode)->pnfs_curr_ld->choose_commit_list(req);
+               /* matched by ref taken when PG_PNFS_COMMIT is set */
+               put_lseg(req->wb_commit_lseg);
+       } else
+               rv = mds;
+       return rv;
+}
+
+static inline void pnfs_clear_request_commit(struct nfs_page *req)
+{
+       if (test_and_clear_bit(PG_PNFS_COMMIT, &req->wb_flags))
+               put_lseg(req->wb_commit_lseg);
+}
+
 #else  /* CONFIG_NFS_V4_1 */
 
 static inline void pnfs_destroy_all_layouts(struct nfs_client *clp)
        pgio->pg_test = NULL;
 }
 
+static inline void
+pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+{
+}
+
+static inline int
+pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how)
+{
+       return PNFS_NOT_ATTEMPTED;
+}
+
+static inline struct list_head *
+pnfs_choose_commit_list(struct nfs_page *req, struct list_head *mds)
+{
+       return mds;
+}
+
+static inline void pnfs_clear_request_commit(struct nfs_page *req)
+{
+}
+
 #endif /* CONFIG_NFS_V4_1 */
 
 #endif /* FS_NFS_PNFS_H */
 
  * Add a request to the inode's commit list.
  */
 static void
-nfs_mark_request_commit(struct nfs_page *req)
+nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
 {
        struct inode *inode = req->wb_context->path.dentry->d_inode;
        struct nfs_inode *nfsi = NFS_I(inode);
                        NFS_PAGE_TAG_COMMIT);
        nfsi->ncommit++;
        spin_unlock(&inode->i_lock);
+       pnfs_mark_request_commit(req, lseg);
        inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
        inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
        __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
 }
 
 static inline
-int nfs_reschedule_unstable_write(struct nfs_page *req)
+int nfs_reschedule_unstable_write(struct nfs_page *req,
+                                 struct nfs_write_data *data)
 {
        if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) {
-               nfs_mark_request_commit(req);
+               nfs_mark_request_commit(req, data->lseg);
                return 1;
        }
        if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
 }
 #else
 static inline void
-nfs_mark_request_commit(struct nfs_page *req)
+nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
 {
 }
 
 }
 
 static inline
-int nfs_reschedule_unstable_write(struct nfs_page *req)
+int nfs_reschedule_unstable_write(struct nfs_page *req,
+                                 struct nfs_write_data *data)
 {
        return 0;
 }
        }
 
        if (nfs_clear_request_commit(req) &&
-                       radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree,
-                               req->wb_index, NFS_PAGE_TAG_COMMIT) != NULL)
+           radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree,
+                                req->wb_index, NFS_PAGE_TAG_COMMIT) != NULL) {
                NFS_I(inode)->ncommit--;
+               pnfs_clear_request_commit(req);
+       }
 
        /* Okay, the request matches. Update the region */
        if (offset < req->wb_offset) {
        return status;
 }
 
-static void nfs_writepage_release(struct nfs_page *req)
+static void nfs_writepage_release(struct nfs_page *req,
+                                 struct nfs_write_data *data)
 {
        struct page *page = req->wb_page;
 
-       if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req))
+       if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req, data))
                nfs_inode_remove_request(req);
        nfs_clear_page_tag_locked(req);
        nfs_end_page_writeback(page);
 
 out:
        if (atomic_dec_and_test(&req->wb_complete))
-               nfs_writepage_release(req);
+               nfs_writepage_release(req, data);
        nfs_writedata_release(calldata);
 }
 
 
                if (nfs_write_need_commit(data)) {
                        memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
-                       nfs_mark_request_commit(req);
+                       nfs_mark_request_commit(req, data->lseg);
                        dprintk(" marked for commit\n");
                        goto next;
                }
        nfs_fattr_init(&data->fattr);
 }
 
-static void nfs_retry_commit(struct list_head *page_list)
+static void nfs_retry_commit(struct list_head *page_list,
+                     struct pnfs_layout_segment *lseg)
 {
        struct nfs_page *req;
 
        while (!list_empty(page_list)) {
                req = nfs_list_entry(page_list->next);
                nfs_list_remove_request(req);
-               nfs_mark_request_commit(req);
+               nfs_mark_request_commit(req, lseg);
                dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
                dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
                             BDI_RECLAIMABLE);
        nfs_init_commit(data, head);
        return nfs_initiate_commit(data, NFS_CLIENT(inode), data->mds_ops, how);
  out_bad:
-       nfs_retry_commit(head);
+       nfs_retry_commit(head, NULL);
        nfs_commit_clear_lock(NFS_I(inode));
        return -ENOMEM;
 }
        res = nfs_scan_commit(inode, &head, 0, 0);
        spin_unlock(&inode->i_lock);
        if (res) {
-               int error = nfs_commit_list(inode, &head, how);
+               int error;
+
+               error = pnfs_commit_list(inode, &head, how);
+               if (error == PNFS_NOT_ATTEMPTED)
+                       error = nfs_commit_list(inode, &head, how);
                if (error < 0)
                        return error;
                if (!may_wait)