There are many places where kmap/memset/kunmap patterns occur.
Use the newly lifted memzero_page() to eliminate direct uses of kmap and
leverage the new core functions use of kmap_local_page().
The development of this patch was aided by the following coccinelle
script:
// <smpl>
// SPDX-License-Identifier: GPL-2.0-only
// Find kmap/memset/kunmap pattern and replace with memset*page calls
//
// NOTE: Offsets and other expressions may be more complex than what the script
// will automatically generate.  Therefore a catchall rule is provided to find
// the pattern which then must be evaluated by hand.
//
// Confidence: Low
// Copyright: (C) 2021 Intel Corporation
// URL: http://coccinelle.lip6.fr/
// Comments:
// Options:
//
// Then the memset pattern
//
@ memset_rule1 @
expression page, V, L, Off;
identifier ptr;
type VP;
@@
(
-VP ptr = kmap(page);
|
-ptr = kmap(page);
|
-VP ptr = kmap_atomic(page);
|
-ptr = kmap_atomic(page);
)
<+...
(
-memset(ptr, 0, L);
+memzero_page(page, 0, L);
|
-memset(ptr + Off, 0, L);
+memzero_page(page, Off, L);
|
-memset(ptr, V, L);
+memset_page(page, V, 0, L);
|
-memset(ptr + Off, V, L);
+memset_page(page, V, Off, L);
)
...+>
(
-kunmap(page);
|
-kunmap_atomic(ptr);
)
// Remove any pointers left unused
@
depends on memset_rule1
@
identifier memset_rule1.ptr;
type VP, VP1;
@@
-VP ptr;
	... when != ptr;
? VP1 ptr;
//
// Catch all
//
@ memset_rule2 @
expression page;
identifier ptr;
expression GenTo, GenSize, GenValue;
type VP;
@@
(
-VP ptr = kmap(page);
|
-ptr = kmap(page);
|
-VP ptr = kmap_atomic(page);
|
-ptr = kmap_atomic(page);
)
<+...
(
//
// Some call sites have complex expressions within the memset/memcpy
// The follow are catch alls which need to be evaluated by hand.
//
-memset(GenTo, 0, GenSize);
+memzero_pageExtra(page, GenTo, GenSize);
|
-memset(GenTo, GenValue, GenSize);
+memset_pageExtra(page, GenValue, GenTo, GenSize);
)
...+>
(
-kunmap(page);
|
-kunmap_atomic(ptr);
)
// Remove any pointers left unused
@
depends on memset_rule2
@
identifier memset_rule2.ptr;
type VP, VP1;
@@
-VP ptr;
	... when != ptr;
? VP1 ptr;
// </smpl>
Link: https://lkml.kernel.org/r/20210309212137.2610186-4-ira.weiny@intel.com
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Cc: Chris Mason <clm@fb.com>
Cc: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
                free_extent_map(em);
 
                if (page->index == end_index) {
-                       char *userpage;
                        size_t zero_offset = offset_in_page(isize);
 
                        if (zero_offset) {
                                int zeros;
                                zeros = PAGE_SIZE - zero_offset;
-                               userpage = kmap_atomic(page);
-                               memset(userpage + zero_offset, 0, zeros);
+                               memzero_page(page, zero_offset, zeros);
                                flush_dcache_page(page);
-                               kunmap_atomic(userpage);
                        }
                }
 
 
        }
 
        if (page->index == last_byte >> PAGE_SHIFT) {
-               char *userpage;
                size_t zero_offset = offset_in_page(last_byte);
 
                if (zero_offset) {
                        iosize = PAGE_SIZE - zero_offset;
-                       userpage = kmap_atomic(page);
-                       memset(userpage + zero_offset, 0, iosize);
+                       memzero_page(page, zero_offset, iosize);
                        flush_dcache_page(page);
-                       kunmap_atomic(userpage);
                }
        }
        begin_page_read(fs_info, page);
                u64 disk_bytenr;
 
                if (cur >= last_byte) {
-                       char *userpage;
                        struct extent_state *cached = NULL;
 
                        iosize = PAGE_SIZE - pg_offset;
-                       userpage = kmap_atomic(page);
-                       memset(userpage + pg_offset, 0, iosize);
+                       memzero_page(page, pg_offset, iosize);
                        flush_dcache_page(page);
-                       kunmap_atomic(userpage);
                        set_extent_uptodate(tree, cur, cur + iosize - 1,
                                            &cached, GFP_NOFS);
                        unlock_extent_cached(tree, cur,
 
                /* we've found a hole, just zero and go on */
                if (block_start == EXTENT_MAP_HOLE) {
-                       char *userpage;
                        struct extent_state *cached = NULL;
 
-                       userpage = kmap_atomic(page);
-                       memset(userpage + pg_offset, 0, iosize);
+                       memzero_page(page, pg_offset, iosize);
                        flush_dcache_page(page);
-                       kunmap_atomic(userpage);
 
                        set_extent_uptodate(tree, cur, cur + iosize - 1,
                                            &cached, GFP_NOFS);
        }
 
        if (page->index == end_index) {
-               char *userpage;
-
-               userpage = kmap_atomic(page);
-               memset(userpage + pg_offset, 0,
-                      PAGE_SIZE - pg_offset);
-               kunmap_atomic(userpage);
+               memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
                flush_dcache_page(page);
        }
 
 
                if (!ret) {
                        unsigned long offset = offset_in_page(total_compressed);
                        struct page *page = pages[nr_pages - 1];
-                       char *kaddr;
 
                        /* zero the tail end of the last page, we might be
                         * sending it down to disk
                         */
-                       if (offset) {
-                               kaddr = kmap_atomic(page);
-                               memset(kaddr + offset, 0,
-                                      PAGE_SIZE - offset);
-                               kunmap_atomic(kaddr);
-                       }
+                       if (offset)
+                               memzero_page(page, offset, PAGE_SIZE - offset);
                        will_compress = 1;
                }
        }
        struct btrfs_ordered_extent *ordered;
        struct extent_state *cached_state = NULL;
        struct extent_changeset *data_reserved = NULL;
-       char *kaddr;
        bool only_release_metadata = false;
        u32 blocksize = fs_info->sectorsize;
        pgoff_t index = from >> PAGE_SHIFT;
        if (offset != blocksize) {
                if (!len)
                        len = blocksize - offset;
-               kaddr = kmap(page);
                if (front)
-                       memset(kaddr + (block_start - page_offset(page)),
-                               0, offset);
+                       memzero_page(page, (block_start - page_offset(page)),
+                                    offset);
                else
-                       memset(kaddr + (block_start - page_offset(page)) +  offset,
-                               0, len);
+                       memzero_page(page, (block_start - page_offset(page)) + offset,
+                                    len);
                flush_dcache_page(page);
-               kunmap(page);
        }
        ClearPageChecked(page);
        set_page_dirty(page);
         * cover that region here.
         */
 
-       if (max_size + pg_offset < PAGE_SIZE) {
-               char *map = kmap(page);
-               memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
-               kunmap(page);
-       }
+       if (max_size + pg_offset < PAGE_SIZE)
+               memzero_page(page,  pg_offset + max_size,
+                            PAGE_SIZE - max_size - pg_offset);
        kfree(tmp);
        return ret;
 }
        struct btrfs_ordered_extent *ordered;
        struct extent_state *cached_state = NULL;
        struct extent_changeset *data_reserved = NULL;
-       char *kaddr;
        unsigned long zero_start;
        loff_t size;
        vm_fault_t ret;
                zero_start = PAGE_SIZE;
 
        if (zero_start != PAGE_SIZE) {
-               kaddr = kmap(page);
-               memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start);
+               memzero_page(page, zero_start, PAGE_SIZE - zero_start);
                flush_dcache_page(page);
-               kunmap(page);
        }
        ClearPageChecked(page);
        set_page_dirty(page);
 
         * So what's in the range [500, 4095] corresponds to zeroes.
         */
        if (datal < block_size) {
-               char *map;
-
-               map = kmap(page);
-               memset(map + datal, 0, block_size - datal);
+               memzero_page(page, datal, block_size - datal);
                flush_dcache_page(page);
-               kunmap(page);
        }
 
        SetPageUptodate(page);
 
        unsigned long bytes_left;
        unsigned long total_out = 0;
        unsigned long pg_offset = 0;
-       char *kaddr;
 
        destlen = min_t(unsigned long, destlen, PAGE_SIZE);
        bytes_left = destlen;
         * end of the inline extent (destlen) to the end of the page
         */
        if (pg_offset < destlen) {
-               kaddr = kmap_atomic(dest_page);
-               memset(kaddr + pg_offset, 0, destlen - pg_offset);
-               kunmap_atomic(kaddr);
+               memzero_page(dest_page, pg_offset, destlen - pg_offset);
        }
        return ret;
 }
 
        size_t ret2;
        unsigned long total_out = 0;
        unsigned long pg_offset = 0;
-       char *kaddr;
 
        stream = ZSTD_initDStream(
                        ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
        ret = 0;
 finish:
        if (pg_offset < destlen) {
-               kaddr = kmap_atomic(dest_page);
-               memset(kaddr + pg_offset, 0, destlen - pg_offset);
-               kunmap_atomic(kaddr);
+               memzero_page(dest_page, pg_offset, destlen - pg_offset);
        }
        return ret;
 }