From: Christoph Hellwig Date: Wed, 11 Aug 2021 09:53:17 +0000 (+0200) Subject: bio: add a per-cpu bio cache X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=c103edab7b27019963f1d4d25e7a8b96e0a06487;p=users%2Fhch%2Fblock.git bio: add a per-cpu bio cache Add an optional percpu frontend that keeps constructed bios in the bio_set. Very simple - keeps a count of bio's in the cache, and maintains a max of 512 with a slack of 64. If we get above max + slack, we drop slack number of bio's. Based on an earlier patch from Jens Axboe. Signed-off-by: Jens Axboe --- diff --git a/block/bio.c b/block/bio.c index 116cdc4efb0d..100a2cb959e2 100644 --- a/block/bio.c +++ b/block/bio.c @@ -25,6 +25,11 @@ #include "blk.h" #include "blk-rq-qos.h" +struct bio_alloc_cache { + struct bio_list free_list; + unsigned int nr; +}; + static struct biovec_slab { int nr_vecs; char *name; @@ -494,6 +499,29 @@ err_free: } EXPORT_SYMBOL(bio_alloc_bioset); +struct bio *bio_alloc_iocb(struct kiocb *iocb, unsigned short nr_vecs, + struct bio_set *bs) +{ + struct bio_alloc_cache *cache = NULL; + struct bio *bio; + + if (!(iocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS) + return bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs); + + cache = per_cpu_ptr(bs->cache, get_cpu()); + bio = bio_list_pop(&cache->free_list); + if (bio) { + bio_init(bio, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs); + cache->nr--; + } + put_cpu(); + + if (!bio) + bio = bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs); + bio_set_flag(bio, BIO_CACHEABLE); + return bio; +} + /** * bio_kmalloc - kmalloc a bio for I/O * @gfp_mask: the GFP_* mask given to the slab allocator @@ -612,6 +640,45 @@ void guard_bio_eod(struct bio *bio) bio_truncate(bio, maxsector << 9); } +#define ALLOC_CACHE_MAX 512 +#define ALLOC_CACHE_SLACK 64 + +static void bio_alloc_cache_prune(struct bio_alloc_cache *cache, + unsigned int nr) +{ + struct bio *bio; + unsigned int i; + + i = 0; + while ((bio = bio_list_pop(&cache->free_list)) != NULL) { + cache->nr--; + bio_free(bio); + if (++i == nr) + break; + } +} + +#if 0 +// XXX: add a cpu down notifier to call this +void bio_alloc_cache_destroy(struct bio_alloc_cache *cache) +{ + bio_alloc_cache_prune(cache, -1U); +} +#endif + +static void bio_add_to_cache(struct bio *bio) +{ + struct bio_alloc_cache *cache; + + bio_uninit(bio); + + cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu()); + bio_list_add_head(&cache->free_list, bio); + if (++cache->nr > ALLOC_CACHE_MAX + ALLOC_CACHE_SLACK) + bio_alloc_cache_prune(cache, ALLOC_CACHE_SLACK); + put_cpu(); +} + /** * bio_put - release a reference to a bio * @bio: bio to release reference to @@ -622,17 +689,16 @@ void guard_bio_eod(struct bio *bio) **/ void bio_put(struct bio *bio) { - if (!bio_flagged(bio, BIO_REFFED)) - bio_free(bio); - else { + if (bio_flagged(bio, BIO_REFFED)) { BIO_BUG_ON(!atomic_read(&bio->__bi_cnt)); - - /* - * last put frees it - */ - if (atomic_dec_and_test(&bio->__bi_cnt)) - bio_free(bio); + if (!atomic_dec_and_test(&bio->__bi_cnt)) + return; } + + if (bio_flagged(bio, BIO_CACHEABLE)) + bio_add_to_cache(bio); + else + bio_free(bio); } EXPORT_SYMBOL(bio_put); @@ -1511,6 +1577,7 @@ int biovec_init_pool(mempool_t *pool, int pool_entries) */ void bioset_exit(struct bio_set *bs) { + free_percpu(bs->cache); if (bs->rescue_workqueue) destroy_workqueue(bs->rescue_workqueue); bs->rescue_workqueue = NULL; @@ -1572,12 +1639,18 @@ int bioset_init(struct bio_set *bs, biovec_init_pool(&bs->bvec_pool, pool_size)) goto bad; - if (!(flags & BIOSET_NEED_RESCUER)) - return 0; - - bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0); - if (!bs->rescue_workqueue) - goto bad; + if (flags & BIOSET_NEED_RESCUER) { + bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, + 0); + if (!bs->rescue_workqueue) + goto bad; + } + + if (flags & BIOSET_PERCPU_CACHE) { + bs->cache = alloc_percpu(struct bio_alloc_cache); + if (!bs->cache) + goto bad; + } return 0; bad: diff --git a/include/linux/bio.h b/include/linux/bio.h index 7b5f65a81f2b..5c89336a1bc6 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -400,6 +400,7 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors, enum { BIOSET_NEED_BVECS = BIT(0), BIOSET_NEED_RESCUER = BIT(1), + BIOSET_PERCPU_CACHE = BIT(2), }; extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); extern void bioset_exit(struct bio_set *); @@ -408,6 +409,8 @@ extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src); struct bio *bio_alloc_bioset(gfp_t gfp, unsigned short nr_iovecs, struct bio_set *bs); +struct bio *bio_alloc_iocb(struct kiocb *iocb, unsigned short nr_vecs, + struct bio_set *bs); struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs); extern void bio_put(struct bio *); @@ -656,7 +659,7 @@ static inline void bio_inc_remaining(struct bio *bio) struct bio_set { struct kmem_cache *bio_slab; unsigned int front_pad; - + struct bio_alloc_cache __percpu *cache; mempool_t bio_pool; mempool_t bvec_pool; #if defined(CONFIG_BLK_DEV_INTEGRITY) diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 1335efa8a1db..244ded83f5cf 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -297,6 +297,7 @@ enum { BIO_TRACKED, /* set if bio goes through the rq_qos path */ BIO_REMAPPED, BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */ + BIO_CACHEABLE, /* can be added to the percpu cache */ BIO_FLAG_LAST }; diff --git a/include/linux/fs.h b/include/linux/fs.h index 640574294216..58e313b2a10c 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -319,6 +319,7 @@ enum rw_hint { /* iocb->ki_waitq is valid */ #define IOCB_WAITQ (1 << 19) #define IOCB_NOIO (1 << 20) +#define IOCB_ALLOC_CACHE (1 << 21) struct kiocb { struct file *ki_filp;