#define gic_write_lpir(v, c)           writeq_relaxed(v, c)
 
 #define gic_flush_dcache_to_poc(a,l)   \
-       __flush_dcache_area((unsigned long)(a), (unsigned long)(a)+(l))
+       dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
 
 #define gits_read_baser(c)             readq_relaxed(c)
 #define gits_write_baser(v, c)         writeq_relaxed(v, c)
 
  *             - start  - virtual start address (inclusive)
  *             - end    - virtual end address (exclusive)
  *
- *     __flush_icache_range(start, end)
+ *     caches_clean_inval_pou(start, end)
  *
  *             Ensure coherency between the I-cache and the D-cache region to
  *             the Point of Unification.
  *
- *     __flush_cache_user_range(start, end)
+ *     caches_clean_inval_user_pou(start, end)
  *
  *             Ensure coherency between the I-cache and the D-cache region to
  *             the Point of Unification.
  *             Use only if the region might access user memory.
  *
- *     invalidate_icache_range(start, end)
+ *     icache_inval_pou(start, end)
  *
  *             Invalidate I-cache region to the Point of Unification.
  *
- *     __flush_dcache_area(start, end)
+ *     dcache_clean_inval_poc(start, end)
  *
  *             Clean and invalidate D-cache region to the Point of Coherency.
  *
- *     __inval_dcache_area(start, end)
+ *     dcache_inval_poc(start, end)
  *
  *             Invalidate D-cache region to the Point of Coherency.
  *
- *     __clean_dcache_area_poc(start, end)
+ *     dcache_clean_poc(start, end)
  *
  *             Clean D-cache region to the Point of Coherency.
  *
- *     __clean_dcache_area_pop(start, end)
+ *     dcache_clean_pop(start, end)
  *
  *             Clean D-cache region to the Point of Persistence.
  *
- *     __clean_dcache_area_pou(start, end)
+ *     dcache_clean_pou(start, end)
  *
  *             Clean D-cache region to the Point of Unification.
  */
-extern void __flush_icache_range(unsigned long start, unsigned long end);
-extern void invalidate_icache_range(unsigned long start, unsigned long end);
-extern void __flush_dcache_area(unsigned long start, unsigned long end);
-extern void __inval_dcache_area(unsigned long start, unsigned long end);
-extern void __clean_dcache_area_poc(unsigned long start, unsigned long end);
-extern void __clean_dcache_area_pop(unsigned long start, unsigned long end);
-extern void __clean_dcache_area_pou(unsigned long start, unsigned long end);
-extern long __flush_cache_user_range(unsigned long start, unsigned long end);
+extern void caches_clean_inval_pou(unsigned long start, unsigned long end);
+extern void icache_inval_pou(unsigned long start, unsigned long end);
+extern void dcache_clean_inval_poc(unsigned long start, unsigned long end);
+extern void dcache_inval_poc(unsigned long start, unsigned long end);
+extern void dcache_clean_poc(unsigned long start, unsigned long end);
+extern void dcache_clean_pop(unsigned long start, unsigned long end);
+extern void dcache_clean_pou(unsigned long start, unsigned long end);
+extern long caches_clean_inval_user_pou(unsigned long start, unsigned long end);
 extern void sync_icache_aliases(unsigned long start, unsigned long end);
 
 static inline void flush_icache_range(unsigned long start, unsigned long end)
 {
-       __flush_icache_range(start, end);
+       caches_clean_inval_pou(start, end);
 
        /*
         * IPI all online CPUs so that they undergo a context synchronization
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 extern void flush_dcache_page(struct page *);
 
-static __always_inline void __flush_icache_all(void)
+static __always_inline void icache_inval_all_pou(void)
 {
        if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
                return;
 
 
 static inline void efi_capsule_flush_cache_range(void *addr, int size)
 {
-       __flush_dcache_area((unsigned long)addr, (unsigned long)addr + size);
+       dcache_clean_inval_poc((unsigned long)addr, (unsigned long)addr + size);
 }
 
 #endif /* _ASM_EFI_H */
 
 struct kvm;
 
 #define kvm_flush_dcache_to_poc(a,l)   \
-       __flush_dcache_area((unsigned long)(a), (unsigned long)(a)+(l))
+       dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
 
 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
 {
 {
        if (icache_is_aliasing()) {
                /* any kind of VIPT cache */
-               __flush_icache_all();
+               icache_inval_all_pou();
        } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
                /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
                void *va = page_address(pfn_to_page(pfn));
 
-               invalidate_icache_range((unsigned long)va,
+               icache_inval_pou((unsigned long)va,
                                        (unsigned long)va + size);
        }
 }
 
         */
        if (!is_module) {
                dsb(ish);
-               __flush_icache_all();
+               icache_inval_all_pou();
                isb();
 
                /* Ignore ARM64_CB bit from feature mask */
 
         */
        ldr     w1, =kernel_size
        add     x1, x0, x1
-       bl      __clean_dcache_area_poc
+       bl      dcache_clean_poc
        ic      ialluis
 
        /*
         */
        adr     x0, 0f
        adr     x1, 3f
-       bl      __clean_dcache_area_poc
+       bl      dcache_clean_poc
 0:
        /* Turn off Dcache and MMU */
        mrs     x0, CurrentEL
 
                                                // MMU off
 
        add     x1, x0, #0x20                   // 4 x 8 bytes
-       b       __inval_dcache_area             // tail call
+       b       dcache_inval_poc                // tail call
 SYM_CODE_END(preserve_boot_args)
 
 /*
         */
        adrp    x0, init_pg_dir
        adrp    x1, init_pg_end
-       bl      __inval_dcache_area
+       bl      dcache_inval_poc
 
        /*
         * Clear the init page tables.
 
        adrp    x0, idmap_pg_dir
        adrp    x1, idmap_pg_end
-       bl      __inval_dcache_area
+       bl      dcache_inval_poc
 
        adrp    x0, init_pg_dir
        adrp    x1, init_pg_end
-       bl      __inval_dcache_area
+       bl      dcache_inval_poc
 
        ret     x28
 SYM_FUNC_END(__create_page_tables)
 
  * Because this code has to be copied to a 'safe' page, it can't call out to
  * other functions by PC-relative address. Also remember that it may be
  * mid-way through over-writing other functions. For this reason it contains
- * code from __flush_icache_range() and uses the copy_page() macro.
+ * code from caches_clean_inval_pou() and uses the copy_page() macro.
  *
  * This 'safe' page is mapped via ttbr0, and executed from there. This function
  * switches to a copy of the linear map in ttbr1, performs the restore, then
        copy_page       x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
 
        add     x1, x10, #PAGE_SIZE
-       /* Clean the copied page to PoU - based on __flush_icache_range() */
+       /* Clean the copied page to PoU - based on caches_clean_inval_pou() */
        raw_dcache_line_size x2, x3
        sub     x3, x2, #1
        bic     x4, x10, x3
 
                return -ENOMEM;
 
        memcpy(page, src_start, length);
-       __flush_icache_range((unsigned long)page, (unsigned long)page + length);
+       caches_clean_inval_pou((unsigned long)page, (unsigned long)page + length);
        rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page);
        if (rc)
                return rc;
                ret = swsusp_save();
        } else {
                /* Clean kernel core startup/idle code to PoC*/
-               __flush_dcache_area((unsigned long)__mmuoff_data_start,
+               dcache_clean_inval_poc((unsigned long)__mmuoff_data_start,
                                    (unsigned long)__mmuoff_data_end);
-               __flush_dcache_area((unsigned long)__idmap_text_start,
+               dcache_clean_inval_poc((unsigned long)__idmap_text_start,
                                    (unsigned long)__idmap_text_end);
 
                /* Clean kvm setup code to PoC? */
                if (el2_reset_needed()) {
-                       __flush_dcache_area(
+                       dcache_clean_inval_poc(
                                (unsigned long)__hyp_idmap_text_start,
                                (unsigned long)__hyp_idmap_text_end);
-                       __flush_dcache_area((unsigned long)__hyp_text_start,
+                       dcache_clean_inval_poc((unsigned long)__hyp_text_start,
                                            (unsigned long)__hyp_text_end);
                }
 
         * The hibernate exit text contains a set of el2 vectors, that will
         * be executed at el2 with the mmu off in order to reload hyp-stub.
         */
-       __flush_dcache_area((unsigned long)hibernate_exit,
+       dcache_clean_inval_poc((unsigned long)hibernate_exit,
                            (unsigned long)hibernate_exit + exit_size);
 
        /*
 
 
        for (i = 0; i < ARRAY_SIZE(regs); i++) {
                if (regs[i]->override)
-                       __flush_dcache_area((unsigned long)regs[i]->override,
+                       dcache_clean_inval_poc((unsigned long)regs[i]->override,
                                            (unsigned long)regs[i]->override +
                                            sizeof(*regs[i]->override));
        }
 
 __efistub_strcmp               = __pi_strcmp;
 __efistub_strncmp              = __pi_strncmp;
 __efistub_strrchr              = __pi_strrchr;
-__efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc;
+__efistub_dcache_clean_poc = __pi_dcache_clean_poc;
 
 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
 __efistub___memcpy             = __pi_memcpy;
 
 
        ret = aarch64_insn_write(tp, insn);
        if (ret == 0)
-               __flush_icache_range((uintptr_t)tp,
+               caches_clean_inval_pou((uintptr_t)tp,
                                     (uintptr_t)tp + AARCH64_INSN_SIZE);
 
        return ret;
 
         * we end up running with module randomization disabled.
         */
        module_alloc_base = (u64)_etext - MODULES_VSIZE;
-       __flush_dcache_area((unsigned long)&module_alloc_base,
+       dcache_clean_inval_poc((unsigned long)&module_alloc_base,
                            (unsigned long)&module_alloc_base +
                                    sizeof(module_alloc_base));
 
        module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
        module_alloc_base &= PAGE_MASK;
 
-       __flush_dcache_area((unsigned long)&module_alloc_base,
+       dcache_clean_inval_poc((unsigned long)&module_alloc_base,
                            (unsigned long)&module_alloc_base +
                                    sizeof(module_alloc_base));
-       __flush_dcache_area((unsigned long)&memstart_offset_seed,
+       dcache_clean_inval_poc((unsigned long)&memstart_offset_seed,
                            (unsigned long)&memstart_offset_seed +
                                    sizeof(memstart_offset_seed));
 
 
         * For execution with the MMU off, reloc_code needs to be cleaned to the
         * PoC and invalidated from the I-cache.
         */
-       __flush_dcache_area((unsigned long)reloc_code,
+       dcache_clean_inval_poc((unsigned long)reloc_code,
                            (unsigned long)reloc_code +
                                    arm64_relocate_new_kernel_size);
-       invalidate_icache_range((uintptr_t)reloc_code,
+       icache_inval_pou((uintptr_t)reloc_code,
                                (uintptr_t)reloc_code +
                                        arm64_relocate_new_kernel_size);
 
                unsigned long addr;
 
                /* flush the list entries. */
-               __flush_dcache_area((unsigned long)entry,
+               dcache_clean_inval_poc((unsigned long)entry,
                                    (unsigned long)entry +
                                            sizeof(kimage_entry_t));
 
                        break;
                case IND_SOURCE:
                        /* flush the source pages. */
-                       __flush_dcache_area(addr, addr + PAGE_SIZE);
+                       dcache_clean_inval_poc(addr, addr + PAGE_SIZE);
                        break;
                case IND_DESTINATION:
                        break;
                        kimage->segment[i].memsz,
                        kimage->segment[i].memsz /  PAGE_SIZE);
 
-               __flush_dcache_area(
+               dcache_clean_inval_poc(
                        (unsigned long)phys_to_virt(kimage->segment[i].mem),
                        (unsigned long)phys_to_virt(kimage->segment[i].mem) +
                                kimage->segment[i].memsz);
 
        secondary_data.task = idle;
        secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
        update_cpu_boot_status(CPU_MMU_OFF);
-       __flush_dcache_area((unsigned long)&secondary_data,
+       dcache_clean_inval_poc((unsigned long)&secondary_data,
                            (unsigned long)&secondary_data +
                                    sizeof(secondary_data));
 
        pr_crit("CPU%u: failed to come online\n", cpu);
        secondary_data.task = NULL;
        secondary_data.stack = NULL;
-       __flush_dcache_area((unsigned long)&secondary_data,
+       dcache_clean_inval_poc((unsigned long)&secondary_data,
                            (unsigned long)&secondary_data +
                                    sizeof(secondary_data));
        status = READ_ONCE(secondary_data.status);
 
        unsigned long size = sizeof(secondary_holding_pen_release);
 
        secondary_holding_pen_release = val;
-       __flush_dcache_area((unsigned long)start, (unsigned long)start + size);
+       dcache_clean_inval_poc((unsigned long)start, (unsigned long)start + size);
 }
 
 
         * the boot protocol.
         */
        writeq_relaxed(pa_holding_pen, release_addr);
-       __flush_dcache_area((__force unsigned long)release_addr,
+       dcache_clean_inval_poc((__force unsigned long)release_addr,
                            (__force unsigned long)release_addr +
                                    sizeof(*release_addr));
 
 
                        dsb(ish);
                }
 
-               ret = __flush_cache_user_range(start, start + chunk);
+               ret = caches_clean_inval_user_pou(start, start + chunk);
                if (ret)
                        return ret;
 
 
                if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
                        stage2_unmap_vm(vcpu->kvm);
                else
-                       __flush_icache_all();
+                       icache_inval_all_pou();
        }
 
        vcpu_reset_hcr(vcpu);
 
 #include <asm/assembler.h>
 #include <asm/alternative.h>
 
-SYM_FUNC_START_PI(__flush_dcache_area)
+SYM_FUNC_START_PI(dcache_clean_inval_poc)
        dcache_by_line_op civac, sy, x0, x1, x2, x3
        ret
-SYM_FUNC_END_PI(__flush_dcache_area)
+SYM_FUNC_END_PI(dcache_clean_inval_poc)
 
        for (i = 0; i < hyp_nr_cpus; i++) {
                params = per_cpu_ptr(&kvm_init_params, i);
                params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
-               __flush_dcache_area((unsigned long)params,
+               dcache_clean_inval_poc((unsigned long)params,
                                    (unsigned long)params + sizeof(*params));
        }
 }
 
         * you should be running with VHE enabled.
         */
        if (icache_is_vpipt())
-               __flush_icache_all();
+               icache_inval_all_pou();
 
        __tlb_switch_to_host(&cxt);
 }
 
        if (need_flush) {
                kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops);
 
-               __flush_dcache_area((unsigned long)pte_follow,
+               dcache_clean_inval_poc((unsigned long)pte_follow,
                                    (unsigned long)pte_follow +
                                            kvm_granule_size(level));
        }
                return 0;
 
        pte_follow = kvm_pte_follow(pte, mm_ops);
-       __flush_dcache_area((unsigned long)pte_follow,
+       dcache_clean_inval_poc((unsigned long)pte_follow,
                            (unsigned long)pte_follow +
                                    kvm_granule_size(level));
        return 0;
 
         * barrier to order the cache maintenance against the memcpy.
         */
        memcpy(dst, src, cnt);
-       __clean_dcache_area_pop((unsigned long)dst, (unsigned long)dst + cnt);
+       dcache_clean_pop((unsigned long)dst, (unsigned long)dst + cnt);
 }
 EXPORT_SYMBOL_GPL(memcpy_flushcache);
 
        rc = raw_copy_from_user(to, from, n);
 
        /* See above */
-       __clean_dcache_area_pop((unsigned long)to, (unsigned long)to + n - rc);
+       dcache_clean_pop((unsigned long)to, (unsigned long)to + n - rc);
        return rc;
 }
 
 #include <asm/asm-uaccess.h>
 
 /*
- *     __flush_cache_range(start,end) [fixup]
+ *     caches_clean_inval_pou_macro(start,end) [fixup]
  *
  *     Ensure that the I and D caches are coherent within specified region.
  *     This is typically used when code has been written to a memory region,
  *     - end     - virtual end address of region
  *     - fixup   - optional label to branch to on user fault
  */
-.macro __flush_cache_range, fixup
+.macro caches_clean_inval_pou_macro, fixup
 alternative_if ARM64_HAS_CACHE_IDC
        dsb     ishst
        b       .Ldc_skip_\@
 .endm
 
 /*
- *     __flush_icache_range(start,end)
+ *     caches_clean_inval_pou(start,end)
  *
  *     Ensure that the I and D caches are coherent within specified region.
  *     This is typically used when code has been written to a memory region,
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-SYM_FUNC_START(__flush_icache_range)
-       __flush_cache_range
+SYM_FUNC_START(caches_clean_inval_pou)
+       caches_clean_inval_pou_macro
        ret
-SYM_FUNC_END(__flush_icache_range)
+SYM_FUNC_END(caches_clean_inval_pou)
 
 /*
- *     __flush_cache_user_range(start,end)
+ *     caches_clean_inval_user_pou(start,end)
  *
  *     Ensure that the I and D caches are coherent within specified region.
  *     This is typically used when code has been written to a memory region,
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-SYM_FUNC_START(__flush_cache_user_range)
+SYM_FUNC_START(caches_clean_inval_user_pou)
        uaccess_ttbr0_enable x2, x3, x4
 
-       __flush_cache_range 2f
+       caches_clean_inval_pou_macro 2f
        mov     x0, xzr
 1:
        uaccess_ttbr0_disable x1, x2
 2:
        mov     x0, #-EFAULT
        b       1b
-SYM_FUNC_END(__flush_cache_user_range)
+SYM_FUNC_END(caches_clean_inval_user_pou)
 
 /*
- *     invalidate_icache_range(start,end)
+ *     icache_inval_pou(start,end)
  *
  *     Ensure that the I cache is invalid within specified region.
  *
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-SYM_FUNC_START(invalidate_icache_range)
+SYM_FUNC_START(icache_inval_pou)
 alternative_if ARM64_HAS_CACHE_DIC
        isb
        ret
 
        invalidate_icache_by_line x0, x1, x2, x3
        ret
-SYM_FUNC_END(invalidate_icache_range)
+SYM_FUNC_END(icache_inval_pou)
 
 /*
- *     __flush_dcache_area(start, end)
+ *     dcache_clean_inval_poc(start, end)
  *
  *     Ensure that any D-cache lines for the interval [start, end)
  *     are cleaned and invalidated to the PoC.
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-SYM_FUNC_START_PI(__flush_dcache_area)
+SYM_FUNC_START_PI(dcache_clean_inval_poc)
        dcache_by_line_op civac, sy, x0, x1, x2, x3
        ret
-SYM_FUNC_END_PI(__flush_dcache_area)
+SYM_FUNC_END_PI(dcache_clean_inval_poc)
 
 /*
- *     __clean_dcache_area_pou(start, end)
+ *     dcache_clean_pou(start, end)
  *
  *     Ensure that any D-cache lines for the interval [start, end)
  *     are cleaned to the PoU.
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-SYM_FUNC_START(__clean_dcache_area_pou)
+SYM_FUNC_START(dcache_clean_pou)
 alternative_if ARM64_HAS_CACHE_IDC
        dsb     ishst
        ret
 alternative_else_nop_endif
        dcache_by_line_op cvau, ish, x0, x1, x2, x3
        ret
-SYM_FUNC_END(__clean_dcache_area_pou)
+SYM_FUNC_END(dcache_clean_pou)
 
 /*
- *     __inval_dcache_area(start, end)
+ *     dcache_inval_poc(start, end)
  *
  *     Ensure that any D-cache lines for the interval [start, end)
  *     are invalidated. Any partial lines at the ends of the interval are
  *     - end     - kernel end address of region
  */
 SYM_FUNC_START_LOCAL(__dma_inv_area)
-SYM_FUNC_START_PI(__inval_dcache_area)
+SYM_FUNC_START_PI(dcache_inval_poc)
        /* FALLTHROUGH */
 
 /*
        b.lo    2b
        dsb     sy
        ret
-SYM_FUNC_END_PI(__inval_dcache_area)
+SYM_FUNC_END_PI(dcache_inval_poc)
 SYM_FUNC_END(__dma_inv_area)
 
 /*
- *     __clean_dcache_area_poc(start, end)
+ *     dcache_clean_poc(start, end)
  *
  *     Ensure that any D-cache lines for the interval [start, end)
  *     are cleaned to the PoC.
  *     - end     - virtual end address of region
  */
 SYM_FUNC_START_LOCAL(__dma_clean_area)
-SYM_FUNC_START_PI(__clean_dcache_area_poc)
+SYM_FUNC_START_PI(dcache_clean_poc)
        /* FALLTHROUGH */
 
 /*
  */
        dcache_by_line_op cvac, sy, x0, x1, x2, x3
        ret
-SYM_FUNC_END_PI(__clean_dcache_area_poc)
+SYM_FUNC_END_PI(dcache_clean_poc)
 SYM_FUNC_END(__dma_clean_area)
 
 /*
- *     __clean_dcache_area_pop(start, end)
+ *     dcache_clean_pop(start, end)
  *
  *     Ensure that any D-cache lines for the interval [start, end)
  *     are cleaned to the PoP.
  *     - start   - virtual start address of region
  *     - end     - virtual end address of region
  */
-SYM_FUNC_START_PI(__clean_dcache_area_pop)
+SYM_FUNC_START_PI(dcache_clean_pop)
        alternative_if_not ARM64_HAS_DCPOP
-       b       __clean_dcache_area_poc
+       b       dcache_clean_poc
        alternative_else_nop_endif
        dcache_by_line_op cvap, sy, x0, x1, x2, x3
        ret
-SYM_FUNC_END_PI(__clean_dcache_area_pop)
+SYM_FUNC_END_PI(dcache_clean_pop)
 
 /*
  *     __dma_flush_area(start, size)
 
 void sync_icache_aliases(unsigned long start, unsigned long end)
 {
        if (icache_is_aliasing()) {
-               __clean_dcache_area_pou(start, end);
-               __flush_icache_all();
+               dcache_clean_pou(start, end);
+               icache_inval_all_pou();
        } else {
                /*
                 * Don't issue kick_all_cpus_sync() after I-cache invalidation
                 * for user mappings.
                 */
-               __flush_icache_range(start, end);
+               caches_clean_inval_pou(start, end);
        }
 }
 
 /*
  * Additional functions defined in assembly.
  */
-EXPORT_SYMBOL(__flush_icache_range);
+EXPORT_SYMBOL(caches_clean_inval_pou);
 
 #ifdef CONFIG_ARCH_HAS_PMEM_API
 void arch_wb_cache_pmem(void *addr, size_t size)
 {
        /* Ensure order against any prior non-cacheable writes */
        dmb(osh);
-       __clean_dcache_area_pop((unsigned long)addr, (unsigned long)addr + size);
+       dcache_clean_pop((unsigned long)addr, (unsigned long)addr + size);
 }
 EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
 
 void arch_invalidate_pmem(void *addr, size_t size)
 {
-       __inval_dcache_area((unsigned long)addr, (unsigned long)addr + size);
+       dcache_inval_poc((unsigned long)addr, (unsigned long)addr + size);
 }
 EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
 #endif