]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
powerpc/64s: Fix __pte_needs_flush() false positive warning
authorBenjamin Gray <bgray@linux.ibm.com>
Thu, 2 Mar 2023 22:59:47 +0000 (09:59 +1100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 6 Apr 2023 10:10:54 +0000 (12:10 +0200)
commit 1abce0580b89464546ae06abd5891ebec43c9470 upstream.

Userspace PROT_NONE ptes set _PAGE_PRIVILEGED, triggering a false
positive debug assertion that __pte_flags_need_flush() is not called
on a kernel mapping.

Detect when it is a userspace PROT_NONE page by checking the required
bits of PAGE_NONE are set, and none of the RWX bits are set.
pte_protnone() is insufficient here because it always returns 0 when
CONFIG_NUMA_BALANCING=n.

Fixes: b11931e9adc1 ("powerpc/64s: add pte_needs_flush and huge_pmd_needs_flush")
Cc: stable@vger.kernel.org # v6.1+
Reported-by: Russell Currey <ruscur@russell.cc>
Signed-off-by: Benjamin Gray <bgray@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20230302225947.81083-1-bgray@linux.ibm.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/powerpc/include/asm/book3s/64/tlbflush.h

index 67655cd6054563bbdcbd84396921a5f05ff419d4..985bd8e69bdc29f852dc130d1230d59ef3a74b71 100644 (file)
@@ -163,6 +163,11 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
         */
 }
 
+static inline bool __pte_protnone(unsigned long pte)
+{
+       return (pte & (pgprot_val(PAGE_NONE) | _PAGE_RWX)) == pgprot_val(PAGE_NONE);
+}
+
 static inline bool __pte_flags_need_flush(unsigned long oldval,
                                          unsigned long newval)
 {
@@ -179,8 +184,8 @@ static inline bool __pte_flags_need_flush(unsigned long oldval,
        /*
         * We do not expect kernel mappings or non-PTEs or not-present PTEs.
         */
-       VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED);
-       VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED);
+       VM_WARN_ON_ONCE(!__pte_protnone(oldval) && oldval & _PAGE_PRIVILEGED);
+       VM_WARN_ON_ONCE(!__pte_protnone(newval) && newval & _PAGE_PRIVILEGED);
        VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
        VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
        VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));