From 15fb41c6e1a5d76fe5c284369c36702b03b7d0a8 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Wed, 26 May 2021 10:29:45 -0400 Subject: [PATCH] dm writecache: don't split bios when overwriting contiguous cache content If dm-writecache overwrites existing cached data, it splits the incoming bio into many block-sized bios. The I/O scheduler does merge these bios into one large request but this needless splitting and merging causes performance degradation. Fix this by avoiding bio splitting if the cache target area that is being overwritten is contiguous. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer --- drivers/md/dm-writecache.c | 38 ++++++++++++++++++++++++++++++-------- 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 30f5704ad65a..4a7891be2f0a 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -1360,14 +1360,18 @@ read_next_block: } else { do { bool found_entry = false; + bool search_used = false; if (writecache_has_error(wc)) goto unlock_error; e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0); if (e) { - if (!writecache_entry_is_committed(wc, e)) + if (!writecache_entry_is_committed(wc, e)) { + search_used = true; goto bio_copy; + } if (!WC_MODE_PMEM(wc) && !e->write_in_progress) { wc->overwrote_committed = true; + search_used = true; goto bio_copy; } found_entry = true; @@ -1404,13 +1408,31 @@ bio_copy: sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT); while (bio_size < bio->bi_iter.bi_size) { - struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec); - if (!f) - break; - write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector + - (bio_size >> SECTOR_SHIFT), wc->seq_count); - writecache_insert_entry(wc, f); - wc->uncommitted_blocks++; + if (!search_used) { + struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec); + if (!f) + break; + write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector + + (bio_size >> SECTOR_SHIFT), wc->seq_count); + writecache_insert_entry(wc, f); + wc->uncommitted_blocks++; + } else { + struct wc_entry *f; + struct rb_node *next = rb_next(&e->rb_node); + if (!next) + break; + f = container_of(next, struct wc_entry, rb_node); + if (f != e + 1) + break; + if (read_original_sector(wc, f) != + read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT)) + break; + if (unlikely(f->write_in_progress)) + break; + if (writecache_entry_is_committed(wc, f)) + wc->overwrote_committed = true; + e = f; + } bio_size += wc->block_size; current_cache_sec += wc->block_size >> SECTOR_SHIFT; } -- 2.50.1