]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
blk-mq: fix racy updates of rq->errors
authorChristoph Hellwig <hch@lst.de>
Tue, 30 May 2017 17:36:32 +0000 (10:36 -0700)
committerChuck Anderson <chuck.anderson@oracle.com>
Thu, 1 Jun 2017 20:40:57 +0000 (13:40 -0700)
blk_mq_complete_request may be a no-op if the request has already
been completed by others means (e.g. a timeout or cancellation), but
currently drivers have to set rq->errors before calling
blk_mq_complete_request, which might leave us with the wrong error value.

Add an error parameter to blk_mq_complete_request so that we can
defer setting rq->errors until we known we won the race to complete the
request.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
(cherry picked from commit f4829a9b7a61e159367350008a608b062c4f6840)

Orabug: 25130845

Signed-off-by: Ashok Vairavan <ashok.vairavan@oracle.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
block/blk-mq.c
drivers/block/loop.c
drivers/block/null_blk.c
drivers/block/virtio_blk.c
drivers/block/xen-blkfront.c
drivers/md/dm.c
drivers/scsi/scsi_lib.c
include/linux/blk-mq.h

index cc4956a368a75b8c56b745e5bcf07e7013a89a89..fbb886bd96ec5d11c42cc93898f119a38cb27b50 100644 (file)
@@ -377,14 +377,16 @@ void __blk_mq_complete_request(struct request *rq)
  *     Ends all I/O on a request. It does not handle partial completions.
  *     The actual completion happens out-of-order, through a IPI handler.
  **/
-void blk_mq_complete_request(struct request *rq)
+void blk_mq_complete_request(struct request *rq, int error)
 {
        struct request_queue *q = rq->q;
 
        if (unlikely(blk_should_fake_timeout(q)))
                return;
-       if (!blk_mark_rq_complete(rq))
+       if (!blk_mark_rq_complete(rq)) {
+               rq->errors = error;
                __blk_mq_complete_request(rq);
+       }
 }
 EXPORT_SYMBOL(blk_mq_complete_request);
 
@@ -600,10 +602,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
                 * If a request wasn't started before the queue was
                 * marked dying, kill it here or it'll go unnoticed.
                 */
-               if (unlikely(blk_queue_dying(rq->q))) {
-                       rq->errors = -EIO;
-                       blk_mq_complete_request(rq);
-               }
+               if (unlikely(blk_queue_dying(rq->q)))
+                       blk_mq_complete_request(rq, -EIO);
                return;
        }
        if (rq->cmd_flags & REQ_NO_TIMEOUT)
index 062dfb6dded95f8e3a7d2359bb8b25578d1ed039..5d4572a9bc80e9fd6d69392f680d642e50d40e96 100644 (file)
@@ -455,8 +455,7 @@ static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
                ret = -EIO;
        }
 
-       rq->errors = ret;
-       blk_mq_complete_request(rq);
+       blk_mq_complete_request(rq, ret);
 }
 
 static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
@@ -1671,19 +1670,18 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
 {
        const bool write = cmd->rq->cmd_flags & REQ_WRITE;
        struct loop_device *lo = cmd->rq->q->queuedata;
-       int ret = -EIO;
+       int ret = 0;
 
-       if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY))
+       if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
+               ret = -EIO;
                goto failed;
+       }
 
        ret = do_req_filebacked(lo, cmd->rq);
-
  failed:
-       if (ret)
-               cmd->rq->errors = -EIO;
        /* complete non-aio request */
        if (!cmd->use_aio || ret)
-               blk_mq_complete_request(cmd->rq);
+               blk_mq_complete_request(cmd->rq, ret ? -EIO : 0);
 }
 
 static void loop_queue_work(struct kthread_work *work)
index 65cd61a4145ed2049944621c50b374cf742041ca..cd304851f646cdff50a0ef97429d5f8dc9fc197b 100644 (file)
@@ -278,7 +278,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd)
        case NULL_IRQ_SOFTIRQ:
                switch (queue_mode)  {
                case NULL_Q_MQ:
-                       blk_mq_complete_request(cmd->rq);
+                       blk_mq_complete_request(cmd->rq, cmd->rq->errors);
                        break;
                case NULL_Q_RQ:
                        blk_complete_request(cmd->rq);
index d4d05f064d390772a2f99acbf882eaa983788511..44a72107f93c3164afaf16c3812d984e67a00133 100644 (file)
@@ -144,7 +144,7 @@ static void virtblk_done(struct virtqueue *vq)
        do {
                virtqueue_disable_cb(vq);
                while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
-                       blk_mq_complete_request(vbr->req);
+                       blk_mq_complete_request(vbr->req, vbr->req->errors);
                        req_done = true;
                }
                if (unlikely(virtqueue_is_broken(vq)))
index 89a9dfaade94a9637fe510fa03d56e21e0de482d..2edf895d3d0f8203826b4d8a15b2558694e91152 100644 (file)
@@ -1580,6 +1580,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
        unsigned long flags;
        struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
        struct blkfront_info *info = rinfo->dev_info;
+       int error;
 
        if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
                return IRQ_HANDLED;
@@ -1623,37 +1624,37 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                        continue;
                }
 
-               req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
+               error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
                switch (bret->operation) {
                case BLKIF_OP_DISCARD:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
                                struct request_queue *rq = info->rq;
                                printk(KERN_WARNING "blkfront: %s: %s op failed\n",
                                           info->gd->disk_name, op_name(bret->operation));
-                               req->errors = -EOPNOTSUPP;
+                               error = -EOPNOTSUPP;
                                info->feature_discard = 0;
                                info->feature_secdiscard = 0;
                                queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
                                queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
                        }
-                       blk_mq_complete_request(req);
+                       blk_mq_complete_request(req, error);
                        break;
                case BLKIF_OP_FLUSH_DISKCACHE:
                case BLKIF_OP_WRITE_BARRIER:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
                                printk(KERN_WARNING "blkfront: %s: %s op failed\n",
                                       info->gd->disk_name, op_name(bret->operation));
-                               req->errors = -EOPNOTSUPP;
+                               error = -EOPNOTSUPP;
                        }
                        if (unlikely(bret->status == BLKIF_RSP_ERROR &&
                                     rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
                                printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
                                       info->gd->disk_name, op_name(bret->operation));
-                               req->errors = -EOPNOTSUPP;
+                               error = -EOPNOTSUPP;
                        }
-                       if (unlikely(req->errors)) {
-                               if (req->errors == -EOPNOTSUPP)
-                                       req->errors = 0;
+                       if (unlikely(error)) {
+                               if (error == -EOPNOTSUPP)
+                                       error = 0;
                                info->feature_flush = 0;
                                xlvbd_flush(info);
                        }
@@ -1664,7 +1665,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                                dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
                                        "request: %x\n", bret->status);
 
-                       blk_mq_complete_request(req);
+                       blk_mq_complete_request(req, error);
                        break;
                default:
                        BUG();
index 87de9a0848b7cb7b09360b659797e7fffae0d01b..a852ab01d7453bacd6293b2a814e64443f286fe5 100644 (file)
@@ -1303,7 +1303,7 @@ static void dm_complete_request(struct request *rq, int error)
        if (!rq->q->mq_ops)
                blk_complete_request(rq);
        else
-               blk_mq_complete_request(rq);
+               blk_mq_complete_request(rq, error);
 }
 
 /*
index 448ebdaa3d694758dd899b1c06ce379d0676898a..39831e2744db4a064b6af6e904b0257ecbf4d060 100644 (file)
@@ -1957,7 +1957,7 @@ static int scsi_mq_prep_fn(struct request *req)
 static void scsi_mq_done(struct scsi_cmnd *cmd)
 {
        trace_scsi_dispatch_cmd_done(cmd);
-       blk_mq_complete_request(cmd->request);
+       blk_mq_complete_request(cmd->request, cmd->request->errors);
 }
 
 static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
index b1fbbdf3ad1c15ffdeed1d63a0b0cbc30bc2b174..94b640c92792f2dddafb7a52430d51d58521de1f 100644 (file)
@@ -214,7 +214,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
 void blk_mq_cancel_requeue_work(struct request_queue *q);
 void blk_mq_kick_requeue_list(struct request_queue *q);
 void blk_mq_abort_requeue_list(struct request_queue *q);
-void blk_mq_complete_request(struct request *rq);
+void blk_mq_complete_request(struct request *rq, int error);
 
 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);