]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
blk-integrity: improved sg segment mapping
authorKeith Busch <kbusch@kernel.org>
Fri, 13 Sep 2024 19:17:46 +0000 (12:17 -0700)
committerJens Axboe <axboe@kernel.dk>
Fri, 13 Sep 2024 19:22:09 +0000 (13:22 -0600)
Make the integrity mapping more like data mapping, blk_rq_map_sg. Use
the request to validate the segment count, and update the callers so
they don't have to.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Link: https://lore.kernel.org/r/20240913191746.2628196-1-kbusch@meta.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-integrity.c
drivers/nvme/host/rdma.c
drivers/scsi/scsi_lib.c
include/linux/blk-integrity.h

index 1d82b18e06f8e8c137f1ab8ff8a1dbdd154ffb84..0a2b1c5d0ebf1281f7ab2101b3cc64736530348c 100644 (file)
@@ -62,19 +62,20 @@ new_segment:
  *
  * Description: Map the integrity vectors in request into a
  * scatterlist.  The scatterlist must be big enough to hold all
- * elements.  I.e. sized using blk_rq_count_integrity_sg().
+ * elements.  I.e. sized using blk_rq_count_integrity_sg() or
+ * rq->nr_integrity_segments.
  */
-int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
-                           struct scatterlist *sglist)
+int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist)
 {
        struct bio_vec iv, ivprv = { NULL };
+       struct request_queue *q = rq->q;
        struct scatterlist *sg = NULL;
+       struct bio *bio = rq->bio;
        unsigned int segments = 0;
        struct bvec_iter iter;
        int prev = 0;
 
        bio_for_each_integrity_vec(iv, bio, iter) {
-
                if (prev) {
                        if (!biovec_phys_mergeable(q, &ivprv, &iv))
                                goto new_segment;
@@ -102,6 +103,12 @@ new_segment:
        if (sg)
                sg_mark_end(sg);
 
+       /*
+        * Something must have been wrong if the figured number of segment
+        * is bigger than number of req's physical integrity segments
+        */
+       BUG_ON(segments > rq->nr_integrity_segments);
+       BUG_ON(segments > queue_max_integrity_segments(q));
        return segments;
 }
 EXPORT_SYMBOL(blk_rq_map_integrity_sg);
index 256466bdaee7cd75a9a65ffac033b288243dbfd2..c8fd0e8f023754f5403ca1f574373276b566a43a 100644 (file)
@@ -1504,8 +1504,8 @@ static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
                        goto out_unmap_sg;
                }
 
-               req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
-                               rq->bio, req->metadata_sgl->sg_table.sgl);
+               req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq,
+                               req->metadata_sgl->sg_table.sgl);
                *pi_count = ib_dma_map_sg(ibdev,
                                          req->metadata_sgl->sg_table.sgl,
                                          req->metadata_sgl->nents,
index c602b0af745caa482fbe2c9e0cdf7ae41be9bfbb..c2f6d0e1c03e776cfa66511dad018212d2ba0d82 100644 (file)
@@ -1163,7 +1163,6 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
 
        if (blk_integrity_rq(rq)) {
                struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
-               int ivecs;
 
                if (WARN_ON_ONCE(!prot_sdb)) {
                        /*
@@ -1175,19 +1174,15 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
                        goto out_free_sgtables;
                }
 
-               ivecs = rq->nr_integrity_segments;
-               if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
+               if (sg_alloc_table_chained(&prot_sdb->table,
+                               rq->nr_integrity_segments,
                                prot_sdb->table.sgl,
                                SCSI_INLINE_PROT_SG_CNT)) {
                        ret = BLK_STS_RESOURCE;
                        goto out_free_sgtables;
                }
 
-               count = blk_rq_map_integrity_sg(rq->q, rq->bio,
-                                               prot_sdb->table.sgl);
-               BUG_ON(count > ivecs);
-               BUG_ON(count > queue_max_integrity_segments(rq->q));
-
+               count = blk_rq_map_integrity_sg(rq, prot_sdb->table.sgl);
                cmd->prot_sdb = prot_sdb;
                cmd->prot_sdb->table.nents = count;
        }
index 793dbb1e0672dc3dd15496caf489413e93de6fe3..676f8f860c4748660aa1bda0f809f982b527f61a 100644 (file)
@@ -25,8 +25,7 @@ static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t,
 }
 
 #ifdef CONFIG_BLK_DEV_INTEGRITY
-int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
-                                  struct scatterlist *);
+int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
 int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
 int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
                              ssize_t bytes, u32 seed);
@@ -98,8 +97,7 @@ static inline int blk_rq_count_integrity_sg(struct request_queue *q,
 {
        return 0;
 }
-static inline int blk_rq_map_integrity_sg(struct request_queue *q,
-                                         struct bio *b,
+static inline int blk_rq_map_integrity_sg(struct request *q,
                                          struct scatterlist *s)
 {
        return 0;