if (!(native_read_cr4() & X86_CR4_LA57))
                return false;
 
-       RIP_REL_REF(__pgtable_l5_enabled)       = 1;
-       RIP_REL_REF(pgdir_shift)                = 48;
-       RIP_REL_REF(ptrs_per_p4d)               = 512;
-       RIP_REL_REF(page_offset_base)           = __PAGE_OFFSET_BASE_L5;
-       RIP_REL_REF(vmalloc_base)               = __VMALLOC_BASE_L5;
-       RIP_REL_REF(vmemmap_base)               = __VMEMMAP_BASE_L5;
+       __pgtable_l5_enabled    = 1;
+       pgdir_shift             = 48;
+       ptrs_per_p4d            = 512;
+       page_offset_base        = __PAGE_OFFSET_BASE_L5;
+       vmalloc_base            = __VMALLOC_BASE_L5;
+       vmemmap_base            = __VMEMMAP_BASE_L5;
 
        return true;
 }
        return sme_get_me_mask();
 }
 
-/* Code in __startup_64() can be relocated during execution, but the compiler
- * doesn't have to generate PC-relative relocations when accessing globals from
- * that function. Clang actually does not generate them, which leads to
- * boot-time crashes. To work around this problem, every global pointer must
- * be accessed using RIP_REL_REF(). Kernel virtual addresses can be determined
- * by subtracting p2v_offset from the RIP-relative address.
+/*
+ * This code is compiled using PIC codegen because it will execute from the
+ * early 1:1 mapping of memory, which deviates from the mapping expected by the
+ * linker. Due to this deviation, taking the address of a global variable will
+ * produce an ambiguous result when using the plain & operator.  Instead,
+ * rip_rel_ptr() must be used, which will return the RIP-relative address in
+ * the 1:1 mapping of memory. Kernel virtual addresses can be determined by
+ * subtracting p2v_offset from the RIP-relative address.
  */
 unsigned long __head __startup_64(unsigned long p2v_offset,
                                  struct boot_params *bp)
         * Compute the delta between the address I am compiled to run at
         * and the address I am actually running at.
         */
-       load_delta = __START_KERNEL_map + p2v_offset;
-       RIP_REL_REF(phys_base) = load_delta;
+       phys_base = load_delta = __START_KERNEL_map + p2v_offset;
 
        /* Is the address not 2M aligned? */
        if (load_delta & ~PMD_MASK)
                pgd[pgd_index(__START_KERNEL_map)] = (pgdval_t)p4d | _PAGE_TABLE;
        }
 
-       RIP_REL_REF(level3_kernel_pgt)[PTRS_PER_PUD - 2].pud += load_delta;
-       RIP_REL_REF(level3_kernel_pgt)[PTRS_PER_PUD - 1].pud += load_delta;
+       level3_kernel_pgt[PTRS_PER_PUD - 2].pud += load_delta;
+       level3_kernel_pgt[PTRS_PER_PUD - 1].pud += load_delta;
 
        for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
-               RIP_REL_REF(level2_fixmap_pgt)[i].pmd += load_delta;
+               level2_fixmap_pgt[i].pmd += load_delta;
 
        /*
         * Set up the identity mapping for the switchover.  These
 
        pud = &early_pgts[0]->pmd;
        pmd = &early_pgts[1]->pmd;
-       RIP_REL_REF(next_early_pgt) = 2;
+       next_early_pgt = 2;
 
        pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
 
        if (la57) {
-               p4d = &early_pgts[RIP_REL_REF(next_early_pgt)++]->pmd;
+               p4d = &early_pgts[next_early_pgt++]->pmd;
 
                i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
                pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
 
        pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
        /* Filter out unsupported __PAGE_KERNEL_* bits: */
-       pmd_entry &= RIP_REL_REF(__supported_pte_mask);
+       pmd_entry &= __supported_pte_mask;
        pmd_entry += sme_get_me_mask();
        pmd_entry +=  physaddr;