block: emove the block layer bounce buffering code block-remove-bounce
authorChristoph Hellwig <hch@lst.de>
Fri, 30 Dec 2022 16:52:31 +0000 (06:52 -1000)
committerChristoph Hellwig <hch@lst.de>
Fri, 30 Dec 2022 17:15:09 +0000 (07:15 -1000)
Signed-off-by: Christoph Hellwig <hch@lst.de>
15 files changed:
block/Makefile
block/blk-map.c
block/blk-mq.c
block/blk-settings.c
block/blk.h
block/bounce.c [deleted file]
drivers/base/node.c
fs/proc/meminfo.c
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/mmzone.h
include/trace/events/block.h
kernel/trace/blktrace.c
mm/Kconfig
mm/page_alloc.c

index 4e01bb71ad6e07b7176caf277724a35b54fa4056..db7936a7ebdb4798e08d9162bda771eb3c8d0378 100644 (file)
@@ -11,7 +11,6 @@ obj-y         := bdev.o fops.o bio.o elevator.o blk-core.o blk-sysfs.o \
                        genhd.o ioprio.o badblocks.o partitions/ blk-rq-qos.o \
                        disk-events.o blk-ia-ranges.o
 
-obj-$(CONFIG_BOUNCE)           += bounce.o
 obj-$(CONFIG_BLK_DEV_BSG_COMMON) += bsg.o
 obj-$(CONFIG_BLK_DEV_BSGLIB)   += bsg-lib.o
 obj-$(CONFIG_BLK_CGROUP)       += blk-cgroup.o
index 19940c978c73bba2fb1bfb74e7a1371e83a23a96..3a2ece60041ee5f73dd9cabe77180af6b5a3fa54 100644 (file)
@@ -635,8 +635,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
 
        if (map_data)
                copy = true;
-       else if (blk_queue_may_bounce(q))
-               copy = true;
        else if (iov_iter_alignment(iter) & align)
                copy = true;
        else if (iov_iter_is_bvec(iter))
@@ -787,8 +785,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
        if (!len || !kbuf)
                return -EINVAL;
 
-       if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) ||
-           blk_queue_may_bounce(q))
+       if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf))
                bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
        else
                bio = bio_map_kern(q, kbuf, len, gfp_mask);
index c5cf0dbca1db8dcae3930451b0ba56161e91753c..195496b6c20a5feb7c3b992c32b9abc6f9be4bde 100644 (file)
@@ -2950,7 +2950,6 @@ void blk_mq_submit_bio(struct bio *bio)
        unsigned int nr_segs = 1;
        blk_status_t ret;
 
-       bio = blk_queue_bounce(bio, q);
        if (bio_may_exceed_limits(bio, &q->limits))
                bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
 
index 0477c4d527fee33f2eef567641cb36eb6be42c92..3ef2bcf3dbc47b0f75924ce2c3b7a774b738b363 100644 (file)
@@ -51,7 +51,6 @@ void blk_set_default_limits(struct queue_limits *lim)
        lim->discard_alignment = 0;
        lim->discard_misaligned = 0;
        lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
-       lim->bounce = BLK_BOUNCE_NONE;
        lim->alignment_offset = 0;
        lim->io_opt = 0;
        lim->misaligned = 0;
@@ -84,22 +83,6 @@ void blk_set_stacking_limits(struct queue_limits *lim)
 }
 EXPORT_SYMBOL(blk_set_stacking_limits);
 
-/**
- * blk_queue_bounce_limit - set bounce buffer limit for queue
- * @q: the request queue for the device
- * @bounce: bounce limit to enforce
- *
- * Description:
- *    Force bouncing for ISA DMA ranges or highmem.
- *
- *    DEPRECATED, don't use in new code.
- **/
-void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
-{
-       q->limits.bounce = bounce;
-}
-EXPORT_SYMBOL(blk_queue_bounce_limit);
-
 /**
  * blk_queue_max_hw_sectors - set max sectors for a request for this queue
  * @q:  the request queue for the device
@@ -556,7 +539,6 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                                        b->max_write_zeroes_sectors);
        t->max_zone_append_sectors = min(t->max_zone_append_sectors,
                                        b->max_zone_append_sectors);
-       t->bounce = max(t->bounce, b->bounce);
 
        t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
                                            b->seg_boundary_mask);
index 4c3b3325219a5b3424201d5e8897bc77bb5c3279..af406dc7f6f27a5b192cc9ca5582ce4ad2fadc32 100644 (file)
@@ -3,7 +3,6 @@
 #define BLK_INTERNAL_H
 
 #include <linux/blk-crypto.h>
-#include <linux/memblock.h>    /* for max_pfn/max_low_pfn */
 #include <xen/xen.h>
 #include "blk-crypto-internal.h"
 
@@ -375,23 +374,6 @@ static inline void blk_throtl_bio_endio(struct bio *bio) { }
 static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
 #endif
 
-struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q);
-
-static inline bool blk_queue_may_bounce(struct request_queue *q)
-{
-       return IS_ENABLED(CONFIG_BOUNCE) &&
-               q->limits.bounce == BLK_BOUNCE_HIGH &&
-               max_low_pfn >= max_pfn;
-}
-
-static inline struct bio *blk_queue_bounce(struct bio *bio,
-               struct request_queue *q)
-{
-       if (unlikely(blk_queue_may_bounce(q) && bio_has_data(bio)))
-               return __blk_queue_bounce(bio, q);
-       return bio;
-}
-
 #ifdef CONFIG_BLK_CGROUP_IOLATENCY
 int blk_iolatency_init(struct gendisk *disk);
 #else
diff --git a/block/bounce.c b/block/bounce.c
deleted file mode 100644 (file)
index 7cfcb24..0000000
+++ /dev/null
@@ -1,268 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* bounce buffer handling for block devices
- *
- * - Split from highmem.c
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/mm.h>
-#include <linux/export.h>
-#include <linux/swap.h>
-#include <linux/gfp.h>
-#include <linux/bio.h>
-#include <linux/pagemap.h>
-#include <linux/mempool.h>
-#include <linux/blkdev.h>
-#include <linux/backing-dev.h>
-#include <linux/init.h>
-#include <linux/hash.h>
-#include <linux/highmem.h>
-#include <linux/printk.h>
-#include <asm/tlbflush.h>
-
-#include <trace/events/block.h>
-#include "blk.h"
-#include "blk-cgroup.h"
-
-#define POOL_SIZE      64
-#define ISA_POOL_SIZE  16
-
-static struct bio_set bounce_bio_set, bounce_bio_split;
-static mempool_t page_pool;
-
-static void init_bounce_bioset(void)
-{
-       static bool bounce_bs_setup;
-       int ret;
-
-       if (bounce_bs_setup)
-               return;
-
-       ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
-       BUG_ON(ret);
-       if (bioset_integrity_create(&bounce_bio_set, BIO_POOL_SIZE))
-               BUG_ON(1);
-
-       ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0);
-       BUG_ON(ret);
-       bounce_bs_setup = true;
-}
-
-static __init int init_emergency_pool(void)
-{
-       int ret;
-
-#ifndef CONFIG_MEMORY_HOTPLUG
-       if (max_pfn <= max_low_pfn)
-               return 0;
-#endif
-
-       ret = mempool_init_page_pool(&page_pool, POOL_SIZE, 0);
-       BUG_ON(ret);
-       pr_info("pool size: %d pages\n", POOL_SIZE);
-
-       init_bounce_bioset();
-       return 0;
-}
-
-__initcall(init_emergency_pool);
-
-/*
- * Simple bounce buffer support for highmem pages. Depending on the
- * queue gfp mask set, *to may or may not be a highmem page. kmap it
- * always, it will do the Right Thing
- */
-static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
-{
-       struct bio_vec tovec, fromvec;
-       struct bvec_iter iter;
-       /*
-        * The bio of @from is created by bounce, so we can iterate
-        * its bvec from start to end, but the @from->bi_iter can't be
-        * trusted because it might be changed by splitting.
-        */
-       struct bvec_iter from_iter = BVEC_ITER_ALL_INIT;
-
-       bio_for_each_segment(tovec, to, iter) {
-               fromvec = bio_iter_iovec(from, from_iter);
-               if (tovec.bv_page != fromvec.bv_page) {
-                       /*
-                        * fromvec->bv_offset and fromvec->bv_len might have
-                        * been modified by the block layer, so use the original
-                        * copy, bounce_copy_vec already uses tovec->bv_len
-                        */
-                       memcpy_to_bvec(&tovec, page_address(fromvec.bv_page) +
-                                      tovec.bv_offset);
-               }
-               bio_advance_iter(from, &from_iter, tovec.bv_len);
-       }
-}
-
-static void bounce_end_io(struct bio *bio)
-{
-       struct bio *bio_orig = bio->bi_private;
-       struct bio_vec *bvec, orig_vec;
-       struct bvec_iter orig_iter = bio_orig->bi_iter;
-       struct bvec_iter_all iter_all;
-
-       /*
-        * free up bounce indirect pages used
-        */
-       bio_for_each_segment_all(bvec, bio, iter_all) {
-               orig_vec = bio_iter_iovec(bio_orig, orig_iter);
-               if (bvec->bv_page != orig_vec.bv_page) {
-                       dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
-                       mempool_free(bvec->bv_page, &page_pool);
-               }
-               bio_advance_iter(bio_orig, &orig_iter, orig_vec.bv_len);
-       }
-
-       bio_orig->bi_status = bio->bi_status;
-       bio_endio(bio_orig);
-       bio_put(bio);
-}
-
-static void bounce_end_io_write(struct bio *bio)
-{
-       bounce_end_io(bio);
-}
-
-static void bounce_end_io_read(struct bio *bio)
-{
-       struct bio *bio_orig = bio->bi_private;
-
-       if (!bio->bi_status)
-               copy_to_high_bio_irq(bio_orig, bio);
-
-       bounce_end_io(bio);
-}
-
-static struct bio *bounce_clone_bio(struct bio *bio_src)
-{
-       struct bvec_iter iter;
-       struct bio_vec bv;
-       struct bio *bio;
-
-       /*
-        * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
-        * bio_src->bi_io_vec to bio->bi_io_vec.
-        *
-        * We can't do that anymore, because:
-        *
-        *  - The point of cloning the biovec is to produce a bio with a biovec
-        *    the caller can modify: bi_idx and bi_bvec_done should be 0.
-        *
-        *  - The original bio could've had more than BIO_MAX_VECS biovecs; if
-        *    we tried to clone the whole thing bio_alloc_bioset() would fail.
-        *    But the clone should succeed as long as the number of biovecs we
-        *    actually need to allocate is fewer than BIO_MAX_VECS.
-        *
-        *  - Lastly, bi_vcnt should not be looked at or relied upon by code
-        *    that does not own the bio - reason being drivers don't use it for
-        *    iterating over the biovec anymore, so expecting it to be kept up
-        *    to date (i.e. for clones that share the parent biovec) is just
-        *    asking for trouble and would force extra work.
-        */
-       bio = bio_alloc_bioset(bio_src->bi_bdev, bio_segments(bio_src),
-                              bio_src->bi_opf, GFP_NOIO, &bounce_bio_set);
-       if (bio_flagged(bio_src, BIO_REMAPPED))
-               bio_set_flag(bio, BIO_REMAPPED);
-       bio->bi_ioprio          = bio_src->bi_ioprio;
-       bio->bi_iter.bi_sector  = bio_src->bi_iter.bi_sector;
-       bio->bi_iter.bi_size    = bio_src->bi_iter.bi_size;
-
-       switch (bio_op(bio)) {
-       case REQ_OP_DISCARD:
-       case REQ_OP_SECURE_ERASE:
-       case REQ_OP_WRITE_ZEROES:
-               break;
-       default:
-               bio_for_each_segment(bv, bio_src, iter)
-                       bio->bi_io_vec[bio->bi_vcnt++] = bv;
-               break;
-       }
-
-       if (bio_crypt_clone(bio, bio_src, GFP_NOIO) < 0)
-               goto err_put;
-
-       if (bio_integrity(bio_src) &&
-           bio_integrity_clone(bio, bio_src, GFP_NOIO) < 0)
-               goto err_put;
-
-       bio_clone_blkg_association(bio, bio_src);
-
-       return bio;
-
-err_put:
-       bio_put(bio);
-       return NULL;
-}
-
-struct bio *__blk_queue_bounce(struct bio *bio_orig, struct request_queue *q)
-{
-       struct bio *bio;
-       int rw = bio_data_dir(bio_orig);
-       struct bio_vec *to, from;
-       struct bvec_iter iter;
-       unsigned i = 0, bytes = 0;
-       bool bounce = false;
-       int sectors;
-
-       bio_for_each_segment(from, bio_orig, iter) {
-               if (i++ < BIO_MAX_VECS)
-                       bytes += from.bv_len;
-               if (PageHighMem(from.bv_page))
-                       bounce = true;
-       }
-       if (!bounce)
-               return bio_orig;
-
-       /*
-        * Individual bvecs might not be logical block aligned. Round down
-        * the split size so that each bio is properly block size aligned,
-        * even if we do not use the full hardware limits.
-        */
-       sectors = ALIGN_DOWN(bytes, queue_logical_block_size(q)) >>
-                       SECTOR_SHIFT;
-       if (sectors < bio_sectors(bio_orig)) {
-               bio = bio_split(bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
-               bio_chain(bio, bio_orig);
-               submit_bio_noacct(bio_orig);
-               bio_orig = bio;
-       }
-       bio = bounce_clone_bio(bio_orig);
-
-       /*
-        * Bvec table can't be updated by bio_for_each_segment_all(),
-        * so retrieve bvec from the table directly. This way is safe
-        * because the 'bio' is single-page bvec.
-        */
-       for (i = 0, to = bio->bi_io_vec; i < bio->bi_vcnt; to++, i++) {
-               struct page *bounce_page;
-
-               if (!PageHighMem(to->bv_page))
-                       continue;
-
-               bounce_page = mempool_alloc(&page_pool, GFP_NOIO);
-               inc_zone_page_state(bounce_page, NR_BOUNCE);
-
-               if (rw == WRITE) {
-                       flush_dcache_page(to->bv_page);
-                       memcpy_from_bvec(page_address(bounce_page), to);
-               }
-               to->bv_page = bounce_page;
-       }
-
-       trace_block_bio_bounce(bio_orig);
-
-       bio->bi_flags |= (1 << BIO_BOUNCED);
-
-       if (rw == READ)
-               bio->bi_end_io = bounce_end_io_read;
-       else
-               bio->bi_end_io = bounce_end_io_write;
-
-       bio->bi_private = bio_orig;
-       return bio;
-}
index faf3597a96da9d7b9122bda47687038254f2c793..a4f4c749544effa762fcbffff4f01b306f20ac7e 100644 (file)
@@ -463,7 +463,7 @@ static ssize_t node_read_meminfo(struct device *dev,
                             nid, K(node_page_state(pgdat, NR_PAGETABLE)),
                             nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
                             nid, 0UL,
-                            nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
+                            nid, 0UL,
                             nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
                             nid, K(sreclaimable +
                                    node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
index 440960110a4257dc0ed2d2c2c94b31887e413ca9..b4b2018bed5eb747f993f09d80ec0a03d43da3fd 100644 (file)
@@ -119,8 +119,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                    global_node_page_state(NR_SECONDARY_PAGETABLE));
 
        show_val_kb(m, "NFS_Unstable:   ", 0);
-       show_val_kb(m, "Bounce:         ",
-                   global_zone_page_state(NR_BOUNCE));
+       show_val_kb(m, "Bounce:         ", 0);
        show_val_kb(m, "WritebackTmp:   ",
                    global_node_page_state(NR_WRITEBACK_TEMP));
        show_val_kb(m, "CommitLimit:    ", vm_commit_limit());
index 99be590f952f6e69d47900b970f05f0195e98798..9440482ac1299102dd54328ff4294f27dda180eb 100644 (file)
@@ -320,7 +320,6 @@ struct bio {
 enum {
        BIO_NO_PAGE_REF,        /* don't put release vec pages */
        BIO_CLONED,             /* doesn't own data */
-       BIO_BOUNCED,            /* bio is a bounce bio */
        BIO_QUIET,              /* Make BIO Quiet */
        BIO_CHAIN,              /* chained bio, ->bi_remaining in effect */
        BIO_REFFED,             /* bio has elevated ->bi_cnt */
index 301cf1cf4f2facbe4ad3e44c6a166761437c25bd..204f5296f6a44a31307f6340862c9db3b0952891 100644 (file)
@@ -270,17 +270,7 @@ enum blk_zoned_model {
        BLK_ZONED_HM,           /* Host-managed zoned block device */
 };
 
-/*
- * BLK_BOUNCE_NONE:    never bounce (default)
- * BLK_BOUNCE_HIGH:    bounce all highmem pages
- */
-enum blk_bounce {
-       BLK_BOUNCE_NONE,
-       BLK_BOUNCE_HIGH,
-};
-
 struct queue_limits {
-       enum blk_bounce         bounce;
        unsigned long           seg_boundary_mask;
        unsigned long           virt_boundary_mask;
 
@@ -905,7 +895,6 @@ static inline unsigned int blk_chunk_sectors_left(sector_t offset,
 /*
  * Access functions for manipulating queue properties
  */
-void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
 extern void blk_queue_max_segments(struct request_queue *, unsigned short);
index cd28a100d9e4f7fd8cfe16068aab68a914d80729..0797af0ec14f1cee0b0c126619aea8cc88e06974 100644 (file)
@@ -147,7 +147,6 @@ enum zone_stat_item {
        NR_ZONE_WRITE_PENDING,  /* Count of dirty, writeback and unstable pages */
        NR_MLOCK,               /* mlock()ed pages found and moved off LRU */
        /* Second 128 byte cacheline */
-       NR_BOUNCE,
 #if IS_ENABLED(CONFIG_ZSMALLOC)
        NR_ZSPAGES,             /* allocated in zsmalloc */
 #endif
index 7f4dfbdf12a6f1c531c145e59f5311ea83f9e419..10bf986575857fa8f99bb912e4d7ac92cc1e2346 100644 (file)
@@ -309,21 +309,6 @@ DECLARE_EVENT_CLASS(block_bio,
                  __entry->nr_sector, __entry->comm)
 );
 
-/**
- * block_bio_bounce - used bounce buffer when processing block operation
- * @bio: block operation
- *
- * A bounce buffer was used to handle the block operation @bio in @q.
- * This occurs when hardware limitations prevent a direct transfer of
- * data between the @bio data memory area and the IO device.  Use of a
- * bounce buffer requires extra copying of data and decreases
- * performance.
- */
-DEFINE_EVENT(block_bio, block_bio_bounce,
-       TP_PROTO(struct bio *bio),
-       TP_ARGS(bio)
-);
-
 /**
  * block_bio_backmerge - merging block operation to the end of an existing operation
  * @bio: new block operation to merge
index 918a7d12df8ff6315fa1883b8197f800352891b6..31ec3a3c77b72d9d185e9af34db58b2350bb98db 100644 (file)
@@ -910,11 +910,6 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
        rcu_read_unlock();
 }
 
-static void blk_add_trace_bio_bounce(void *ignore, struct bio *bio)
-{
-       blk_add_trace_bio(bio->bi_bdev->bd_disk->queue, bio, BLK_TA_BOUNCE, 0);
-}
-
 static void blk_add_trace_bio_complete(void *ignore,
                                       struct request_queue *q, struct bio *bio)
 {
@@ -1106,8 +1101,6 @@ static void blk_register_tracepoints(void)
        WARN_ON(ret);
        ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
        WARN_ON(ret);
-       ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
-       WARN_ON(ret);
        ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
        WARN_ON(ret);
        ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
@@ -1142,7 +1135,6 @@ static void blk_unregister_tracepoints(void)
        unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
        unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
        unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
-       unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
        unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
        unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
        unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
index ff7b209dec05574e2af34147139fd678ea207d57..cae4d088b0c5d4698dad28bd1e48747c8c302877 100644 (file)
@@ -656,15 +656,6 @@ config CONTIG_ALLOC
 config PHYS_ADDR_T_64BIT
        def_bool 64BIT
 
-config BOUNCE
-       bool "Enable bounce buffers"
-       default y
-       depends on BLOCK && MMU && HIGHMEM
-       help
-         Enable bounce buffers for devices that cannot access the full range of
-         memory available to the CPU. Enabled by default when HIGHMEM is
-         selected, but you may say n to override this.
-
 config MMU_NOTIFIER
        bool
        select SRCU
index 0745aedebb37142ff44bfc24ec79d8f4543c4d7b..3c1d3e4f4ee757f601123b3f3a712969bcd6eab5 100644 (file)
@@ -6102,7 +6102,7 @@ void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_i
                " unevictable:%lu dirty:%lu writeback:%lu\n"
                " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
                " mapped:%lu shmem:%lu pagetables:%lu\n"
-               " sec_pagetables:%lu bounce:%lu\n"
+               " sec_pagetables:%lu bounce:0\n"
                " kernel_misc_reclaimable:%lu\n"
                " free:%lu free_pcp:%lu free_cma:%lu\n",
                global_node_page_state(NR_ACTIVE_ANON),
@@ -6120,7 +6120,6 @@ void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_i
                global_node_page_state(NR_SHMEM),
                global_node_page_state(NR_PAGETABLE),
                global_node_page_state(NR_SECONDARY_PAGETABLE),
-               global_zone_page_state(NR_BOUNCE),
                global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE),
                global_zone_page_state(NR_FREE_PAGES),
                free_pcp,
@@ -6216,7 +6215,7 @@ void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_i
                        " present:%lukB"
                        " managed:%lukB"
                        " mlocked:%lukB"
-                       " bounce:%lukB"
+                       " bounce:0kB"
                        " free_pcp:%lukB"
                        " local_pcp:%ukB"
                        " free_cma:%lukB"
@@ -6237,7 +6236,6 @@ void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_i
                        K(zone->present_pages),
                        K(zone_managed_pages(zone)),
                        K(zone_page_state(zone, NR_MLOCK)),
-                       K(zone_page_state(zone, NR_BOUNCE)),
                        K(free_pcp),
                        K(this_cpu_read(zone->per_cpu_pageset->count)),
                        K(zone_page_state(zone, NR_FREE_CMA_PAGES)));