]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
iov_iter: fix copy_page_from_iter_atomic() if KMAP_LOCAL_FORCE_MAP
authorHugh Dickins <hughd@google.com>
Sun, 27 Oct 2024 22:23:23 +0000 (15:23 -0700)
committerChristian Brauner <brauner@kernel.org>
Mon, 28 Oct 2024 12:39:35 +0000 (13:39 +0100)
generic/077 on x86_32 CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP=y with highmem,
on huge=always tmpfs, issues a warning and then hangs (interruptibly):

WARNING: CPU: 5 PID: 3517 at mm/highmem.c:622 kunmap_local_indexed+0x62/0xc9
CPU: 5 UID: 0 PID: 3517 Comm: cp Not tainted 6.12.0-rc4 #2
...
copy_page_from_iter_atomic+0xa6/0x5ec
generic_perform_write+0xf6/0x1b4
shmem_file_write_iter+0x54/0x67

Fix copy_page_from_iter_atomic() by limiting it in that case
(include/linux/skbuff.h skb_frag_must_loop() does similar).

But going forward, perhaps CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP is too
surprising, has outlived its usefulness, and should just be removed?

Fixes: 908a1ad89466 ("iov_iter: Handle compound highmem pages in copy_page_from_iter_atomic()")
Signed-off-by: Hugh Dickins <hughd@google.com>
Link: https://lore.kernel.org/r/dd5f0c89-186e-18e1-4f43-19a60f5a9774@google.com
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: stable@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
lib/iov_iter.c

index cc4b5541eef8bdbecf3d569512c81bf73e15be5f..908e75a28d90bda63cdb4dc83ad90059ee8ef5b7 100644 (file)
@@ -461,6 +461,8 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
                size_t bytes, struct iov_iter *i)
 {
        size_t n, copied = 0;
+       bool uses_kmap = IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) ||
+                        PageHighMem(page);
 
        if (!page_copy_sane(page, offset, bytes))
                return 0;
@@ -471,7 +473,7 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
                char *p;
 
                n = bytes - copied;
-               if (PageHighMem(page)) {
+               if (uses_kmap) {
                        page += offset / PAGE_SIZE;
                        offset %= PAGE_SIZE;
                        n = min_t(size_t, n, PAGE_SIZE - offset);
@@ -482,7 +484,7 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
                kunmap_atomic(p);
                copied += n;
                offset += n;
-       } while (PageHighMem(page) && copied != bytes && n > 0);
+       } while (uses_kmap && copied != bytes && n > 0);
 
        return copied;
 }