]> www.infradead.org Git - linux.git/commitdiff
block: fix queue limits checks in blk_rq_map_user_bvec for real
authorChristoph Hellwig <hch@lst.de>
Mon, 28 Oct 2024 09:07:48 +0000 (10:07 +0100)
committerJens Axboe <axboe@kernel.dk>
Mon, 28 Oct 2024 18:35:05 +0000 (12:35 -0600)
blk_rq_map_user_bvec currently only has ad-hoc checks for queue limits,
and the last fix to it enabled valid NVMe I/O to pass, but also allowed
invalid one for drivers that set a max_segment_size or seg_boundary
limit.

Fix it once for all by using the bio_split_rw_at helper from the I/O
path that indicates if and where a bio would be have to be split to
adhere to the queue limits, and it returns a positive value, turn that
into -EREMOTEIO to retry using the copy path.

Fixes: 2ff949441802 ("block: fix sanity checks in blk_rq_map_user_bvec")
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: John Garry <john.g.garry@oracle.com>
Link: https://lore.kernel.org/r/20241028090840.446180-1-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-map.c

index 6ef2ec1f7d78bb4ffaa24934a364d321926273a5..b5fd1d8574615e28da206b6e12a1b81009279fff 100644 (file)
@@ -561,55 +561,33 @@ EXPORT_SYMBOL(blk_rq_append_bio);
 /* Prepare bio for passthrough IO given ITER_BVEC iter */
 static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
 {
-       struct request_queue *q = rq->q;
-       size_t nr_iter = iov_iter_count(iter);
-       size_t nr_segs = iter->nr_segs;
-       struct bio_vec *bvecs, *bvprvp = NULL;
-       const struct queue_limits *lim = &q->limits;
-       unsigned int nsegs = 0, bytes = 0;
+       const struct queue_limits *lim = &rq->q->limits;
+       unsigned int max_bytes = lim->max_hw_sectors << SECTOR_SHIFT;
+       unsigned int nsegs;
        struct bio *bio;
-       size_t i;
+       int ret;
 
-       if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
-               return -EINVAL;
-       if (nr_segs > queue_max_segments(q))
+       if (!iov_iter_count(iter) || iov_iter_count(iter) > max_bytes)
                return -EINVAL;
 
-       /* no iovecs to alloc, as we already have a BVEC iterator */
+       /* reuse the bvecs from the iterator instead of allocating new ones */
        bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
-       if (bio == NULL)
+       if (!bio)
                return -ENOMEM;
-
        bio_iov_bvec_set(bio, (struct iov_iter *)iter);
-       blk_rq_bio_prep(rq, bio, nr_segs);
-
-       /* loop to perform a bunch of sanity checks */
-       bvecs = (struct bio_vec *)iter->bvec;
-       for (i = 0; i < nr_segs; i++) {
-               struct bio_vec *bv = &bvecs[i];
-
-               /*
-                * If the queue doesn't support SG gaps and adding this
-                * offset would create a gap, fallback to copy.
-                */
-               if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
-                       blk_mq_map_bio_put(bio);
-                       return -EREMOTEIO;
-               }
-               /* check full condition */
-               if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
-                       goto put_bio;
-               if (bytes + bv->bv_len > nr_iter)
-                       break;
 
-               nsegs++;
-               bytes += bv->bv_len;
-               bvprvp = bv;
+       /* check that the data layout matches the hardware restrictions */
+       ret = bio_split_rw_at(bio, lim, &nsegs, max_bytes);
+       if (ret) {
+               /* if we would have to split the bio, copy instead */
+               if (ret > 0)
+                       ret = -EREMOTEIO;
+               blk_mq_map_bio_put(bio);
+               return ret;
        }
+
+       blk_rq_bio_prep(rq, bio, nsegs);
        return 0;
-put_bio:
-       blk_mq_map_bio_put(bio);
-       return -EINVAL;
 }
 
 /**