return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
 }
 
+static struct nfs4_pnfs_ds *
+ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio, int *best_idx)
+{
+       struct pnfs_layout_segment *lseg = pgio->pg_lseg;
+       struct nfs4_pnfs_ds *ds;
+
+       ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
+                                              best_idx);
+       if (ds || !pgio->pg_mirror_idx)
+               return ds;
+       return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
+}
+
 static void
 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
                      struct nfs_page *req,
                        goto out_nolseg;
        }
 
-       ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
+       ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
        if (!ds) {
                if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
                        goto out_mds;
        }
 }
 
+static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
+{
+       u32 idx = hdr->pgio_mirror_idx + 1;
+       int new_idx = 0;
+
+       if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx + 1, &new_idx))
+               ff_layout_send_layouterror(hdr->lseg);
+       else
+               pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
+       pnfs_read_resend_pnfs(hdr, new_idx);
+}
+
 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
 {
        struct rpc_task *task = &hdr->task;
 
        pnfs_layoutcommit_inode(hdr->inode, false);
+       pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
 
        if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
                dprintk("%s Reset task %5u for i/o through MDS "
                break;
        case NFS4ERR_NXIO:
                ff_layout_mark_ds_unreachable(lseg, idx);
+               /*
+                * Don't return the layout if this is a read and we still
+                * have layouts to try
+                */
+               if (opnum == OP_READ)
+                       break;
                /* Fallthrough */
        default:
                pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
 static int ff_layout_read_done_cb(struct rpc_task *task,
                                struct nfs_pgio_header *hdr)
 {
-       int new_idx = hdr->pgio_mirror_idx;
        int err;
 
        if (task->tk_status < 0) {
        clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
        switch (err) {
        case -NFS4ERR_RESET_TO_PNFS:
-               if (ff_layout_choose_best_ds_for_read(hdr->lseg,
-                                       hdr->pgio_mirror_idx + 1,
-                                       &new_idx))
-                       goto out_layouterror;
                set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
                return task->tk_status;
        case -NFS4ERR_RESET_TO_MDS:
        }
 
        return 0;
-out_layouterror:
-       ff_layout_read_record_layoutstats_done(task, hdr);
-       ff_layout_send_layouterror(hdr->lseg);
-       hdr->pgio_mirror_idx = new_idx;
 out_eagain:
        rpc_restart_call_prepare(task);
        return -EAGAIN;
        struct nfs_pgio_header *hdr = data;
 
        ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
-       if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
-               ff_layout_send_layouterror(hdr->lseg);
-               pnfs_read_resend_pnfs(hdr);
-       } else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
+       if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
+               ff_layout_resend_pnfs_read(hdr);
+       else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
                ff_layout_reset_read(hdr);
        pnfs_generic_rw_release(data);
 }