]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
block: check BLK_FEAT_POLL under q_usage_count
authorChristoph Hellwig <hch@lst.de>
Fri, 10 Jan 2025 05:47:11 +0000 (06:47 +0100)
committerJens Axboe <axboe@kernel.dk>
Fri, 10 Jan 2025 14:29:23 +0000 (07:29 -0700)
Otherwise feature reconfiguration can race with I/O submission.

Also drop the bio_clear_polled in the error path, as the flag does not
matter for instant error completions, it is a left over from when we
allowed polled I/O to proceed unpolled in this case.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Link: https://lore.kernel.org/r/20250110054726.1499538-4-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-core.c
block/blk-mq.c

index 666efe8fa202065fd417a0f18b367e95c209fc26..6309b3f5a89dc4b972e5722201f9646bc359465b 100644 (file)
@@ -629,8 +629,14 @@ static void __submit_bio(struct bio *bio)
                blk_mq_submit_bio(bio);
        } else if (likely(bio_queue_enter(bio) == 0)) {
                struct gendisk *disk = bio->bi_bdev->bd_disk;
-
-               disk->fops->submit_bio(bio);
+       
+               if ((bio->bi_opf & REQ_POLLED) &&
+                   !(disk->queue->limits.features & BLK_FEAT_POLL)) {
+                       bio->bi_status = BLK_STS_NOTSUPP;
+                       bio_endio(bio);
+               } else {
+                       disk->fops->submit_bio(bio);
+               }
                blk_queue_exit(disk->queue);
        }
 
@@ -805,12 +811,6 @@ void submit_bio_noacct(struct bio *bio)
                }
        }
 
-       if (!(q->limits.features & BLK_FEAT_POLL) &&
-                       (bio->bi_opf & REQ_POLLED)) {
-               bio_clear_polled(bio);
-               goto not_supported;
-       }
-
        switch (bio_op(bio)) {
        case REQ_OP_READ:
                break;
@@ -935,7 +935,7 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
                return 0;
 
        q = bdev_get_queue(bdev);
-       if (cookie == BLK_QC_T_NONE || !(q->limits.features & BLK_FEAT_POLL))
+       if (cookie == BLK_QC_T_NONE)
                return 0;
 
        blk_flush_plug(current->plug, false);
@@ -951,7 +951,9 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
         */
        if (!percpu_ref_tryget(&q->q_usage_counter))
                return 0;
-       if (queue_is_mq(q)) {
+       if (!(q->limits.features & BLK_FEAT_POLL)) {
+               ret = 0;
+       } else if (queue_is_mq(q)) {
                ret = blk_mq_poll(q, cookie, iob, flags);
        } else {
                struct gendisk *disk = q->disk;
index 2e6132f778fd958aae3cad545e4b3dd623c9c304..02c9232a8fff9438990a86d8a0d112052827847a 100644 (file)
@@ -3096,14 +3096,22 @@ void blk_mq_submit_bio(struct bio *bio)
        }
 
        /*
-        * Device reconfiguration may change logical block size, so alignment
-        * check has to be done with queue usage counter held
+        * Device reconfiguration may change logical block size or reduce the
+        * number of poll queues, so the checks for alignment and poll support
+        * have to be done with queue usage counter held.
         */
        if (unlikely(bio_unaligned(bio, q))) {
                bio_io_error(bio);
                goto queue_exit;
        }
 
+       if ((bio->bi_opf & REQ_POLLED) &&
+           !(q->limits.features & BLK_FEAT_POLL)) {
+               bio->bi_status = BLK_STS_NOTSUPP;
+               bio_endio(bio);
+               goto queue_exit;
+       }
+
        bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
        if (!bio)
                goto queue_exit;