From: Christoph Hellwig Date: Sat, 5 Oct 2024 04:31:50 +0000 (+0200) Subject: blk-mq: add a dma mapping iterator X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=8273e482f73d244c61a750e8b3757429c3d898df;p=users%2Fhch%2Fblock.git blk-mq: add a dma mapping iterator blk_rq_map_sg is maze of nested loops. Untangle it by creating an iterator that returns [paddr,len] tuples for DMA mapping, and then implement the DMA logic on top of this. This not only removes code at the source level, but also generates nicer binary code: $ size block/blk-merge.o.* text data bss dec hex filename 10001 432 0 10433 28c1 block/blk-merge.o.new 10317 468 0 10785 2a21 block/blk-merge.o.old Last but not least it will be used as a building block for a new DMA mapping helper that doesn't rely on struct scatterlist. Signed-off-by: Christoph Hellwig --- diff --git a/block/blk-merge.c b/block/blk-merge.c index ad763ec313b6..d77d62815c0f 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -451,137 +451,112 @@ unsigned int blk_recalc_rq_segments(struct request *rq) return nr_phys_segs; } -static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, - struct scatterlist *sglist) -{ - if (!*sg) - return sglist; - - /* - * If the driver previously mapped a shorter list, we could see a - * termination bit prematurely unless it fully inits the sg table - * on each mapping. We KNOW that there must be more entries here - * or the driver would be buggy, so force clear the termination bit - * to avoid doing a full sg_init_table() in drivers for each command. - */ - sg_unmark_end(*sg); - return sg_next(*sg); -} +struct phys_vec { + phys_addr_t paddr; + u32 len; +}; + +#define DEFINE_REQ_ITERATOR(_iter, _rq) \ + struct req_iterator (_iter) = { \ + .bio = (_rq)->bio, \ + .iter = (_rq)->bio->bi_iter, \ + } -static unsigned blk_bvec_map_sg(struct request_queue *q, - struct bio_vec *bvec, struct scatterlist *sglist, - struct scatterlist **sg) +static void blk_map_iter_next_bio(struct request *req, + struct req_iterator *iter, struct bio_vec *prev) { - unsigned nbytes = bvec->bv_len; - unsigned nsegs = 0, total = 0; - - while (nbytes > 0) { - unsigned offset = bvec->bv_offset + total; - unsigned len = get_max_segment_size(&q->limits, - bvec_phys(bvec) + total, nbytes); - struct page *page = bvec->bv_page; - - /* - * Unfortunately a fair number of drivers barf on scatterlists - * that have an offset larger than PAGE_SIZE, despite other - * subsystems dealing with that invariant just fine. For now - * stick to the legacy format where we never present those from - * the block layer, but the code below should be removed once - * these offenders (mostly MMC/SD drivers) are fixed. - */ - page += (offset >> PAGE_SHIFT); - offset &= ~PAGE_MASK; + struct bio_vec next; - *sg = blk_next_sg(sg, sglist); - sg_set_page(*sg, page, len, offset); + iter->bio = iter->bio->bi_next; + if (!iter->bio) + return; + iter->iter = iter->bio->bi_iter; - total += len; - nbytes -= len; - nsegs++; + /* + * Check if the first bvec in the new bio can be merged into the last + * one of the previous bio. All other merging is done when adding + * data to the bio. + */ + next = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter); + if (prev->bv_len + next.bv_len <= queue_max_segment_size(req->q) && + biovec_phys_mergeable(req->q, prev, &next)) { + prev->bv_len += next.bv_len; + bio_advance_iter_single(iter->bio, &iter->iter, next.bv_len); } - - return nsegs; -} - -static inline int __blk_bvec_map_sg(struct bio_vec bv, - struct scatterlist *sglist, struct scatterlist **sg) -{ - *sg = blk_next_sg(sg, sglist); - sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); - return 1; } -/* only try to merge bvecs into one sg if they are from two bios */ -static inline bool -__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec, - struct bio_vec *bvprv, struct scatterlist **sg) +static bool blk_map_iter_next(struct request *req, + struct req_iterator *iter, struct phys_vec *vec) { + struct bio_vec bv; - int nbytes = bvec->bv_len; - - if (!*sg) - return false; + /* + * For special payload requests there only is a single segment. Return + * it now and make sure blk_phys_iter_next stop iterating. + */ + if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { + if (!iter->bio) + return false; + vec->paddr = bvec_phys(&req->special_vec); + vec->len = req->special_vec.bv_len; + iter->bio = NULL; + return true; + } - if ((*sg)->length + nbytes > queue_max_segment_size(q)) + if (!iter->iter.bi_size) return false; - if (!biovec_phys_mergeable(q, bvprv, bvec)) - return false; + bv = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter); + vec->paddr = bvec_phys(&bv); + if (bv.bv_offset + bv.bv_len > PAGE_SIZE) { + bv.bv_len = get_max_segment_size(&req->q->limits, vec->paddr, + bv.bv_len); + } + bio_advance_iter_single(iter->bio, &iter->iter, bv.bv_len); - (*sg)->length += nbytes; + while (!iter->iter.bi_size && iter->bio) + blk_map_iter_next_bio(req, iter, &bv); + vec->len = bv.bv_len; return true; } -static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, - struct scatterlist *sglist, - struct scatterlist **sg) +static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, + struct scatterlist *sglist) { - struct bio_vec bvec, bvprv = { NULL }; - struct bvec_iter iter; - int nsegs = 0; - bool new_bio = false; - - for_each_bio(bio) { - bio_for_each_bvec(bvec, bio, iter) { - /* - * Only try to merge bvecs from two bios given we - * have done bio internal merge when adding pages - * to bio - */ - if (new_bio && - __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg)) - goto next_bvec; - - if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE) - nsegs += __blk_bvec_map_sg(bvec, sglist, sg); - else - nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg); - next_bvec: - new_bio = false; - } - if (likely(bio->bi_iter.bi_size)) { - bvprv = bvec; - new_bio = true; - } - } + if (!*sg) + return sglist; - return nsegs; + /* + * If the driver previously mapped a shorter list, we could see a + * termination bit prematurely unless it fully inits the sg table + * on each mapping. We KNOW that there must be more entries here + * or the driver would be buggy, so force clear the termination bit + * to avoid doing a full sg_init_table() in drivers for each command. + */ + sg_unmark_end(*sg); + return sg_next(*sg); } /* - * map a request to scatterlist, return number of sg entries setup. Caller - * must make sure sg can hold rq->nr_phys_segments entries + * Map a request to scatterlist, return number of sg entries setup. Caller + * must make sure sg can hold rq->nr_phys_segments entries. */ int __blk_rq_map_sg(struct request_queue *q, struct request *rq, struct scatterlist *sglist, struct scatterlist **last_sg) { + DEFINE_REQ_ITERATOR(iter, rq); + struct phys_vec vec; int nsegs = 0; - if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) - nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg); - else if (rq->bio) - nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg); + while (blk_map_iter_next(rq, &iter, &vec)) { + struct page *page = pfn_to_page(__phys_to_pfn(vec.paddr)); + unsigned int offset = offset_in_page(vec.paddr); + + *last_sg = blk_next_sg(last_sg, sglist); + sg_set_page(*last_sg, page, vec.len, offset); + nsegs++; + } if (*last_sg) sg_mark_end(*last_sg);