size_t metadata_sectors;
        size_t n_blocks;
        uint64_t seq_count;
+       sector_t data_device_sectors;
        void *block_start;
        struct wc_entry *entries;
        unsigned block_size;
 
        wc_lock(wc);
 
+       wc->data_device_sectors = i_size_read(wc->dev->bdev->bd_inode) >> SECTOR_SHIFT;
+
        if (WC_MODE_PMEM(wc)) {
                persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
        } else {
        void *address = memory_data(wc, e);
 
        persistent_memory_flush_cache(address, block_size);
+
+       if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors))
+               return true;
+
        return bio_add_page(&wb->bio, persistent_memory_page(address),
                            block_size, persistent_memory_page_offset(address)) != 0;
 }
                if (writecache_has_error(wc)) {
                        bio->bi_status = BLK_STS_IOERR;
                        bio_endio(&wb->bio);
+               } else if (unlikely(!bio_sectors(&wb->bio))) {
+                       bio->bi_status = BLK_STS_OK;
+                       bio_endio(&wb->bio);
                } else {
                        submit_bio(&wb->bio);
                }
                        e = f;
                }
 
+               if (unlikely(to.sector + to.count > wc->data_device_sectors)) {
+                       if (to.sector >= wc->data_device_sectors) {
+                               writecache_copy_endio(0, 0, c);
+                               continue;
+                       }
+                       from.count = to.count = wc->data_device_sectors - to.sector;
+               }
+
                dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
 
                __writeback_throttle(wc, wbl);