_SET_MEMORY_NX_BIT,
        _SET_MEMORY_X_BIT,
        _SET_MEMORY_4K_BIT,
+       _SET_MEMORY_INV_BIT,
+       _SET_MEMORY_DEF_BIT,
 };
 
 #define SET_MEMORY_RO  BIT(_SET_MEMORY_RO_BIT)
 #define SET_MEMORY_NX  BIT(_SET_MEMORY_NX_BIT)
 #define SET_MEMORY_X   BIT(_SET_MEMORY_X_BIT)
 #define SET_MEMORY_4K  BIT(_SET_MEMORY_4K_BIT)
+#define SET_MEMORY_INV BIT(_SET_MEMORY_INV_BIT)
+#define SET_MEMORY_DEF BIT(_SET_MEMORY_DEF_BIT)
 
 int __set_memory(unsigned long addr, int numpages, unsigned long flags);
 
        return __set_memory(addr, numpages, SET_MEMORY_4K);
 }
 
+int set_direct_map_invalid_noflush(struct page *page);
+int set_direct_map_default_noflush(struct page *page);
+
 #endif
 
  * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
  */
 #include <linux/hugetlb.h>
+#include <linux/vmalloc.h>
 #include <linux/mm.h>
 #include <asm/cacheflush.h>
 #include <asm/facility.h>
                        new = set_pte_bit(new, __pgprot(_PAGE_NOEXEC));
                else if (flags & SET_MEMORY_X)
                        new = clear_pte_bit(new, __pgprot(_PAGE_NOEXEC));
+               if (flags & SET_MEMORY_INV) {
+                       new = set_pte_bit(new, __pgprot(_PAGE_INVALID));
+               } else if (flags & SET_MEMORY_DEF) {
+                       new = __pte(pte_val(new) & PAGE_MASK);
+                       new = set_pte_bit(new, PAGE_KERNEL);
+                       if (!MACHINE_HAS_NX)
+                               new = clear_pte_bit(new, __pgprot(_PAGE_NOEXEC));
+               }
                pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE);
                ptep++;
                addr += PAGE_SIZE;
                new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
        else if (flags & SET_MEMORY_X)
                new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
+       if (flags & SET_MEMORY_INV) {
+               new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID));
+       } else if (flags & SET_MEMORY_DEF) {
+               new = __pmd(pmd_val(new) & PMD_MASK);
+               new = set_pmd_bit(new, SEGMENT_KERNEL);
+               if (!MACHINE_HAS_NX)
+                       new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
+       }
        pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
 }
 
                new = set_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
        else if (flags & SET_MEMORY_X)
                new = clear_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
+       if (flags & SET_MEMORY_INV) {
+               new = set_pud_bit(new, __pgprot(_REGION_ENTRY_INVALID));
+       } else if (flags & SET_MEMORY_DEF) {
+               new = __pud(pud_val(new) & PUD_MASK);
+               new = set_pud_bit(new, REGION3_KERNEL);
+               if (!MACHINE_HAS_NX)
+                       new = clear_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
+       }
        pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
 }
 
        return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags);
 }
 
+int set_direct_map_invalid_noflush(struct page *page)
+{
+       return __set_memory((unsigned long)page_to_virt(page), 1, SET_MEMORY_INV);
+}
+
+int set_direct_map_default_noflush(struct page *page)
+{
+       return __set_memory((unsigned long)page_to_virt(page), 1, SET_MEMORY_DEF);
+}
+
 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
 
 static void ipte_range(pte_t *pte, unsigned long address, int nr)