]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
LoongArch: Add ARCH_HAS_PTE_DEVMAP support
authorHuacai Chen <chenhuacai@loongson.cn>
Sat, 20 Jul 2024 14:40:59 +0000 (22:40 +0800)
committerHuacai Chen <chenhuacai@loongson.cn>
Sat, 20 Jul 2024 14:40:59 +0000 (22:40 +0800)
In order for things like get_user_pages() to work on ZONE_DEVICE memory,
we need a software PTE bit to identify device-backed PFNs.  Hook this up
along with the relevant helpers to join in with ARCH_HAS_PTE_DEVMAP.

Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
arch/loongarch/Kconfig
arch/loongarch/include/asm/pgtable-bits.h
arch/loongarch/include/asm/pgtable.h

index fcf6451b4e384841256169923e9d82014c0243b4..be5249ebd8fc31a3870b7514ee8ff1c3fe5b1dd5 100644 (file)
@@ -22,6 +22,7 @@ config LOONGARCH
        select ARCH_HAS_KERNEL_FPU_SUPPORT if CPU_HAS_FPU
        select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
        select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+       select ARCH_HAS_PTE_DEVMAP
        select ARCH_HAS_PTE_SPECIAL
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
        select ARCH_INLINE_READ_LOCK if !PREEMPTION
index 21319c1e045c216fffaaecc5370b0677cf48aada..82cd3a9f094b8a4fb382e04b85bdbee4623e4151 100644 (file)
@@ -22,6 +22,7 @@
 #define        _PAGE_PFN_SHIFT         12
 #define        _PAGE_SWP_EXCLUSIVE_SHIFT 23
 #define        _PAGE_PFN_END_SHIFT     48
+#define        _PAGE_DEVMAP_SHIFT      59
 #define        _PAGE_PRESENT_INVALID_SHIFT 60
 #define        _PAGE_NO_READ_SHIFT     61
 #define        _PAGE_NO_EXEC_SHIFT     62
@@ -35,6 +36,7 @@
 #define _PAGE_MODIFIED         (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT)
 #define _PAGE_PROTNONE         (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT)
 #define _PAGE_SPECIAL          (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT)
+#define _PAGE_DEVMAP           (_ULCAST_(1) << _PAGE_DEVMAP_SHIFT)
 
 /* We borrow bit 23 to store the exclusive marker in swap PTEs. */
 #define _PAGE_SWP_EXCLUSIVE    (_ULCAST_(1) << _PAGE_SWP_EXCLUSIVE_SHIFT)
@@ -74,8 +76,8 @@
 #define __READABLE     (_PAGE_VALID)
 #define __WRITEABLE    (_PAGE_DIRTY | _PAGE_WRITE)
 
-#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV)
-#define _HPAGE_CHG_MASK        (_PAGE_MODIFIED | _PAGE_SPECIAL | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE)
+#define _PAGE_CHG_MASK (_PAGE_MODIFIED | _PAGE_SPECIAL | _PAGE_DEVMAP | _PFN_MASK | _CACHE_MASK | _PAGE_PLV)
+#define _HPAGE_CHG_MASK        (_PAGE_MODIFIED | _PAGE_SPECIAL | _PAGE_DEVMAP | _PFN_MASK | _CACHE_MASK | _PAGE_PLV | _PAGE_HUGE)
 
 #define PAGE_NONE      __pgprot(_PAGE_PROTNONE | _PAGE_NO_READ | \
                                 _PAGE_USER | _CACHE_CC)
index af3acdf3481a6a74cb4583a42c12362d44204bf2..0e821be6326893111b4f7fd23054e13350e73b83 100644 (file)
@@ -424,6 +424,9 @@ static inline int pte_special(pte_t pte)    { return pte_val(pte) & _PAGE_SPECIAL;
 static inline pte_t pte_mkspecial(pte_t pte)   { pte_val(pte) |= _PAGE_SPECIAL; return pte; }
 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
 
+static inline int pte_devmap(pte_t pte)                { return !!(pte_val(pte) & _PAGE_DEVMAP); }
+static inline pte_t pte_mkdevmap(pte_t pte)    { pte_val(pte) |= _PAGE_DEVMAP; return pte; }
+
 #define pte_accessible pte_accessible
 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
 {
@@ -558,6 +561,17 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
        return pmd;
 }
 
+static inline int pmd_devmap(pmd_t pmd)
+{
+       return !!(pmd_val(pmd) & _PAGE_DEVMAP);
+}
+
+static inline pmd_t pmd_mkdevmap(pmd_t pmd)
+{
+       pmd_val(pmd) |= _PAGE_DEVMAP;
+       return pmd;
+}
+
 static inline struct page *pmd_page(pmd_t pmd)
 {
        if (pmd_trans_huge(pmd))
@@ -613,6 +627,11 @@ static inline long pmd_protnone(pmd_t pmd)
 #define pmd_leaf(pmd)          ((pmd_val(pmd) & _PAGE_HUGE) != 0)
 #define pud_leaf(pud)          ((pud_val(pud) & _PAGE_HUGE) != 0)
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define pud_devmap(pud)                (0)
+#define pgd_devmap(pgd)                (0)
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
 /*
  * We provide our own get_unmapped area to cope with the virtual aliasing
  * constraints placed on us by the cache architecture.