obj_request_done_set(obj_request);
 }
 
-static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
-                               struct ceph_msg *msg)
+static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
 {
        struct rbd_obj_request *obj_request = osd_req->r_priv;
        u16 opcode;
 
-       dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
+       dout("%s: osd_req %p\n", __func__, osd_req);
        rbd_assert(osd_req == obj_request->osd_req);
        if (obj_request_img_data_test(obj_request)) {
                rbd_assert(obj_request->img_request);
 
 /*
  * Finish an async read(ahead) op.
  */
-static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
+static void finish_read(struct ceph_osd_request *req)
 {
        struct inode *inode = req->r_inode;
        struct ceph_osd_data *osd_data;
-       int rc = req->r_result;
-       int bytes = le32_to_cpu(msg->hdr.data_len);
+       int rc = req->r_result <= 0 ? req->r_result : 0;
+       int bytes = req->r_result >= 0 ? req->r_result : 0;
        int num_pages;
        int i;
 
  * If we get an error, set the mapping error bit, but not the individual
  * page error bits.
  */
-static void writepages_finish(struct ceph_osd_request *req,
-                             struct ceph_msg *msg)
+static void writepages_finish(struct ceph_osd_request *req)
 {
        struct inode *inode = req->r_inode;
        struct ceph_inode_info *ci = ceph_inode(inode);
 
        kfree(aio_req);
 }
 
-static void ceph_aio_complete_req(struct ceph_osd_request *req,
-                                 struct ceph_msg *msg)
+static void ceph_aio_complete_req(struct ceph_osd_request *req)
 {
        int rc = req->r_result;
        struct inode *inode = req->r_inode;
 out:
        if (ret < 0) {
                req->r_result = ret;
-               ceph_aio_complete_req(req, NULL);
+               ceph_aio_complete_req(req);
        }
 
        ceph_put_snap_context(snapc);
                                                              req, false);
                        if (ret < 0) {
                                req->r_result = ret;
-                               ceph_aio_complete_req(req, NULL);
+                               ceph_aio_complete_req(req);
                        }
                }
                return -EIOCBQUEUED;
 
 /*
  * completion callback for async writepages
  */
-typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *,
-                                    struct ceph_msg *);
+typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *);
 typedef void (*ceph_osdc_unsafe_callback_t)(struct ceph_osd_request *, bool);
 
 #define CEPH_HOMELESS_OSD      -1
 
                    result >= 0 && !(flags & CEPH_OSD_FLAG_ONDISK))
                        req->r_unsafe_callback(req, true);
                if (req->r_callback)
-                       req->r_callback(req, msg);
+                       req->r_callback(req);
                else
                        complete_all(&req->r_completion);
        }
        req->r_result = -EIO;
        __unregister_request(osdc, req);
        if (req->r_callback)
-               req->r_callback(req, msg);
+               req->r_callback(req);
        else
                complete_all(&req->r_completion);
        complete_request(req);