pte_t *ptep, unsigned long sz)
 {
        if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
-               pte_val(*ptep) = _REGION3_ENTRY_EMPTY;
+               set_pte(ptep, __pte(_REGION3_ENTRY_EMPTY));
        else
-               pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY;
+               set_pte(ptep, __pte(_SEGMENT_ENTRY_EMPTY));
 }
 
 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
 
 
 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
 {
-       pgd_val(*pgd) = _REGION1_ENTRY | __pa(p4d);
+       set_pgd(pgd, __pgd(_REGION1_ENTRY | __pa(p4d)));
 }
 
 static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
 {
-       p4d_val(*p4d) = _REGION2_ENTRY | __pa(pud);
+       set_p4d(p4d, __p4d(_REGION2_ENTRY | __pa(pud)));
 }
 
 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 {
-       pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
+       set_pud(pud, __pud(_REGION3_ENTRY | __pa(pmd)));
 }
 
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 static inline void pmd_populate(struct mm_struct *mm,
                                pmd_t *pmd, pgtable_t pte)
 {
-       pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
+       set_pmd(pmd, __pmd(_SEGMENT_ENTRY | __pa(pte)));
 }
 
 #define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
 
 static inline void pgd_clear(pgd_t *pgd)
 {
        if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
-               pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
+               set_pgd(pgd, __pgd(_REGION1_ENTRY_EMPTY));
 }
 
 static inline void p4d_clear(p4d_t *p4d)
 {
        if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
-               p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
+               set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY));
 }
 
 static inline void pud_clear(pud_t *pud)
 {
        if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
-               pud_val(*pud) = _REGION3_ENTRY_EMPTY;
+               set_pud(pud, __pud(_REGION3_ENTRY_EMPTY));
 }
 
 static inline void pmd_clear(pmd_t *pmdp)
 {
-       pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
+       set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
 }
 
 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
-       pte_val(*ptep) = _PAGE_INVALID;
+       set_pte(ptep, __pte(_PAGE_INVALID));
 }
 
 /*
 
        if (full) {
                res = *ptep;
-               *ptep = __pte(_PAGE_INVALID);
+               set_pte(ptep, __pte(_PAGE_INVALID));
        } else {
                res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
        }
        if (mm_has_pgste(mm))
                ptep_set_pte_at(mm, addr, ptep, entry);
        else
-               *ptep = entry;
+               set_pte(ptep, entry);
 }
 
 /*
 {
        if (!MACHINE_HAS_NX)
                pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
-       *pmdp = entry;
+       set_pmd(pmdp, entry);
 }
 
 static inline pmd_t pmd_mkhuge(pmd_t pmd)
 {
        if (full) {
                pmd_t pmd = *pmdp;
-               *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
+               set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
                return pmd;
        }
        return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
 
        }
 
        if (bits & GMAP_NOTIFY_MPROT)
-               pmd_val(*pmdp) |= _SEGMENT_ENTRY_GMAP_IN;
+               set_pmd(pmdp, set_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_IN)));
 
        /* Shadow GMAP protection needs split PMDs */
        if (bits & GMAP_NOTIFY_SHADOW)
                                address = pte_val(pte) & PAGE_MASK;
                                address += gaddr & ~PAGE_MASK;
                                *val = *(unsigned long *) address;
-                               pte_val(*ptep) |= _PAGE_YOUNG;
+                               set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_YOUNG)));
                                /* Do *NOT* clear the _PAGE_INVALID bit! */
                                rc = 0;
                        }
 static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
                             unsigned long gaddr)
 {
-       pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_IN;
+       set_pmd(pmdp, clear_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_IN)));
        gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
 }
 
                __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
        else
                __pmdp_csp(pmdp);
-       *pmdp = new;
+       set_pmd(pmdp, new);
 }
 
 static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
                                                   _SEGMENT_ENTRY_GMAP_UC));
                        if (purge)
                                __pmdp_csp(pmdp);
-                       pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
+                       set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
                }
                spin_unlock(&gmap->guest_table_lock);
        }
                return false;
 
        /* Clear UC indication and reset protection */
-       pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_UC;
+       set_pmd(pmdp, clear_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_GMAP_UC)));
        gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
        return true;
 }
 
                rste |= _SEGMENT_ENTRY_LARGE;
 
        clear_huge_pte_skeys(mm, rste);
-       pte_val(*ptep) = rste;
+       set_pte(ptep, __pte(rste));
 }
 
 pte_t huge_ptep_get(pte_t *ptep)
 
                                                page = kasan_early_alloc_segment();
                                                memset(page, 0, _SEGMENT_SIZE);
                                        }
-                                       pmd_val(*pm_dir) = __pa(page) | sgt_prot;
+                                       set_pmd(pm_dir, __pmd(__pa(page) | sgt_prot));
                                        address = (address + PMD_SIZE) & PMD_MASK;
                                        continue;
                                }
                        switch (mode) {
                        case POPULATE_ONE2ONE:
                                page = (void *)address;
-                               pte_val(*pt_dir) = __pa(page) | pgt_prot;
+                               set_pte(pt_dir, __pte(__pa(page) | pgt_prot));
                                break;
                        case POPULATE_MAP:
                                page = kasan_early_alloc_pages(0);
                                memset(page, 0, PAGE_SIZE);
-                               pte_val(*pt_dir) = __pa(page) | pgt_prot;
+                               set_pte(pt_dir, __pte(__pa(page) | pgt_prot));
                                break;
                        case POPULATE_ZERO_SHADOW:
                                page = kasan_early_shadow_page;
-                               pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
+                               set_pte(pt_dir, __pte(__pa(page) | pgt_prot_zero));
                                break;
                        case POPULATE_SHALLOW:
                                /* should never happen */
 
                prot &= ~_PAGE_NOEXEC;
        ptep = pt_dir;
        for (i = 0; i < PTRS_PER_PTE; i++) {
-               pte_val(*ptep) = pte_addr | prot;
+               set_pte(ptep, __pte(pte_addr | prot));
                pte_addr += PAGE_SIZE;
                ptep++;
        }
                prot &= ~_SEGMENT_ENTRY_NOEXEC;
        pmdp = pm_dir;
        for (i = 0; i < PTRS_PER_PMD; i++) {
-               pmd_val(*pmdp) = pmd_addr | prot;
+               set_pmd(pmdp, __pmd(pmd_addr | prot));
                pmd_addr += PMD_SIZE;
                pmdp++;
        }
 void __kernel_map_pages(struct page *page, int numpages, int enable)
 {
        unsigned long address;
+       pte_t *ptep, pte;
        int nr, i, j;
-       pte_t *pte;
 
        for (i = 0; i < numpages;) {
                address = (unsigned long)page_to_virt(page + i);
-               pte = virt_to_kpte(address);
-               nr = (unsigned long)pte >> ilog2(sizeof(long));
+               ptep = virt_to_kpte(address);
+               nr = (unsigned long)ptep >> ilog2(sizeof(long));
                nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
                nr = min(numpages - i, nr);
                if (enable) {
                        for (j = 0; j < nr; j++) {
-                               pte_val(*pte) &= ~_PAGE_INVALID;
+                               pte = clear_pte_bit(*ptep, __pgprot(_PAGE_INVALID));
+                               set_pte(ptep, pte);
                                address += PAGE_SIZE;
-                               pte++;
+                               ptep++;
                        }
                } else {
-                       ipte_range(pte, address, nr);
+                       ipte_range(ptep, address, nr);
                }
                i += nr;
        }
 
        atomic_inc(&mm->context.flush_count);
        if (cpumask_equal(&mm->context.cpu_attach_mask,
                          cpumask_of(smp_processor_id()))) {
-               pte_val(*ptep) |= _PAGE_INVALID;
+               set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_INVALID)));
                mm->context.flush_mm = 1;
        } else
                ptep_ipte_global(mm, addr, ptep, nodat);
                        pgste_val(pgste) |= PGSTE_UC_BIT;
        }
 #endif
-       *ptep = entry;
+       set_pte(ptep, entry);
        return pgste;
 }
 
                pgste = pgste_set_pte(ptep, pgste, new);
                pgste_set_unlock(ptep, pgste);
        } else {
-               *ptep = new;
+               set_pte(ptep, new);
        }
        return old;
 }
                pgste = pgste_set_pte(ptep, pgste, pte);
                pgste_set_unlock(ptep, pgste);
        } else {
-               *ptep = pte;
+               set_pte(ptep, pte);
        }
        preempt_enable();
 }
        atomic_inc(&mm->context.flush_count);
        if (cpumask_equal(&mm->context.cpu_attach_mask,
                          cpumask_of(smp_processor_id()))) {
-               pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
+               set_pmd(pmdp, set_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_INVALID)));
                mm->context.flush_mm = 1;
                if (mm_has_pgste(mm))
                        gmap_pmdp_invalidate(mm, addr);
 
        preempt_disable();
        old = pmdp_flush_direct(mm, addr, pmdp);
-       *pmdp = new;
+       set_pmd(pmdp, new);
        preempt_enable();
        return old;
 }
 
        preempt_disable();
        old = pmdp_flush_lazy(mm, addr, pmdp);
-       *pmdp = new;
+       set_pmd(pmdp, new);
        preempt_enable();
        return old;
 }
 
        preempt_disable();
        old = pudp_flush_direct(mm, addr, pudp);
-       *pudp = new;
+       set_pud(pudp, new);
        preempt_enable();
        return old;
 }
                list_del(lh);
        }
        ptep = (pte_t *) pgtable;
-       pte_val(*ptep) = _PAGE_INVALID;
+       set_pte(ptep, __pte(_PAGE_INVALID));
        ptep++;
-       pte_val(*ptep) = _PAGE_INVALID;
+       set_pte(ptep, __pte(_PAGE_INVALID));
        return pgtable;
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
                        pte_val(pte) |= _PAGE_PROTECT;
                else
                        pte_val(pte) |= _PAGE_INVALID;
-               *ptep = pte;
+               set_pte(ptep, pte);
        }
        pgste_set_unlock(ptep, pgste);
        return dirty;
 
 
                                if (!new_page)
                                        goto out;
-                               pte_val(*pte) = __pa(new_page) | prot;
+                               set_pte(pte, __pte(__pa(new_page) | prot));
                        } else {
-                               pte_val(*pte) = __pa(addr) | prot;
+                               set_pte(pte, __pte(__pa(addr) | prot));
                        }
                } else {
                        continue;
                            IS_ALIGNED(next, PMD_SIZE) &&
                            MACHINE_HAS_EDAT1 && addr && direct &&
                            !debug_pagealloc_enabled()) {
-                               pmd_val(*pmd) = __pa(addr) | prot;
+                               set_pmd(pmd, __pmd(__pa(addr) | prot));
                                pages++;
                                continue;
                        } else if (!direct && MACHINE_HAS_EDAT1) {
                                 */
                                new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE);
                                if (new_page) {
-                                       pmd_val(*pmd) = __pa(new_page) | prot;
+                                       set_pmd(pmd, __pmd(__pa(new_page) | prot));
                                        if (!IS_ALIGNED(addr, PMD_SIZE) ||
                                            !IS_ALIGNED(next, PMD_SIZE)) {
                                                vmemmap_use_new_sub_pmd(addr, next);
                            IS_ALIGNED(next, PUD_SIZE) &&
                            MACHINE_HAS_EDAT2 && addr && direct &&
                            !debug_pagealloc_enabled()) {
-                               pud_val(*pud) = __pa(addr) | prot;
+                               set_pud(pud, __pud(__pa(addr) | prot));
                                pages++;
                                continue;
                        }