]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
fs/proc/kcore.c: allow translation of physical memory addresses
authorAlexander Gordeev <agordeev@linux.ibm.com>
Mon, 30 Sep 2024 12:21:19 +0000 (14:21 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 9 Oct 2024 19:47:19 +0000 (12:47 -0700)
When /proc/kcore is read an attempt to read the first two pages results in
HW-specific page swap on s390 and another (so called prefix) pages are
accessed instead.  That leads to a wrong read.

Allow architecture-specific translation of memory addresses using
kc_xlate_dev_mem_ptr() and kc_unxlate_dev_mem_ptr() callbacks similarily
to /dev/mem xlate_dev_mem_ptr() and unxlate_dev_mem_ptr() callbacks.  That
way an architecture can deal with specific physical memory ranges.

Re-use the existing /dev/mem callback implementation on s390, which
handles the described prefix pages swapping correctly.

For other architectures the default callback is basically NOP.  It is
expected the condition (vaddr == __va(__pa(vaddr))) always holds true for
KCORE_RAM memory type.

Link: https://lkml.kernel.org/r/20240930122119.1651546-1-agordeev@linux.ibm.com
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Suggested-by: Heiko Carstens <hca@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/s390/include/asm/io.h
fs/proc/kcore.c

index 0fbc992d7a5ea7e71fddc8576218237360900fd2..fc9933a743d692196c322f77538d35b9a826a8ee 100644 (file)
 #include <asm/pci_io.h>
 
 #define xlate_dev_mem_ptr xlate_dev_mem_ptr
+#define kc_xlate_dev_mem_ptr xlate_dev_mem_ptr
 void *xlate_dev_mem_ptr(phys_addr_t phys);
 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
+#define kc_unxlate_dev_mem_ptr unxlate_dev_mem_ptr
 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
 
 #define IO_SPACE_LIMIT 0
index 7d0acdad74e2ffd916b3e3b8deb51918dfad67cb..51446c59388f10761bc95b4378a5d1c074b0556d 100644 (file)
@@ -50,6 +50,20 @@ static struct proc_dir_entry *proc_root_kcore;
 #define        kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
 #endif
 
+#ifndef kc_xlate_dev_mem_ptr
+#define kc_xlate_dev_mem_ptr kc_xlate_dev_mem_ptr
+static inline void *kc_xlate_dev_mem_ptr(phys_addr_t phys)
+{
+       return __va(phys);
+}
+#endif
+#ifndef kc_unxlate_dev_mem_ptr
+#define kc_unxlate_dev_mem_ptr kc_unxlate_dev_mem_ptr
+static inline void kc_unxlate_dev_mem_ptr(phys_addr_t phys, void *virt)
+{
+}
+#endif
+
 static LIST_HEAD(kclist_head);
 static DECLARE_RWSEM(kclist_lock);
 static int kcore_need_update = 1;
@@ -471,6 +485,8 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
        while (buflen) {
                struct page *page;
                unsigned long pfn;
+               phys_addr_t phys;
+               void *__start;
 
                /*
                 * If this is the first iteration or the address is not within
@@ -537,7 +553,8 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
                        }
                        break;
                case KCORE_RAM:
-                       pfn = __pa(start) >> PAGE_SHIFT;
+                       phys = __pa(start);
+                       pfn =  phys >> PAGE_SHIFT;
                        page = pfn_to_online_page(pfn);
 
                        /*
@@ -557,13 +574,28 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
                        fallthrough;
                case KCORE_VMEMMAP:
                case KCORE_TEXT:
+                       if (m->type == KCORE_RAM) {
+                               __start = kc_xlate_dev_mem_ptr(phys);
+                               if (!__start) {
+                                       ret = -ENOMEM;
+                                       if (iov_iter_zero(tsz, iter) != tsz)
+                                               ret = -EFAULT;
+                                       goto out;
+                               }
+                       } else {
+                               __start = (void *)start;
+                       }
+
                        /*
                         * Sadly we must use a bounce buffer here to be able to
                         * make use of copy_from_kernel_nofault(), as these
                         * memory regions might not always be mapped on all
                         * architectures.
                         */
-                       if (copy_from_kernel_nofault(buf, (void *)start, tsz)) {
+                       ret = copy_from_kernel_nofault(buf, __start, tsz);
+                       if (m->type == KCORE_RAM)
+                               kc_unxlate_dev_mem_ptr(phys, __start);
+                       if (ret) {
                                if (iov_iter_zero(tsz, iter) != tsz) {
                                        ret = -EFAULT;
                                        goto out;