regular load/store instructions if the cpu does not implement the
          feature.
 
+config ARM64_PMEM
+       bool "Enable support for persistent memory"
+       select ARCH_HAS_PMEM_API
+       help
+         Say Y to enable support for the persistent memory API based on the
+         ARMv8.2 DCPoP feature.
+
+         The feature is detected at runtime, and the kernel will use DC CVAC
+         operations if DC CVAP is not supported (following the behaviour of
+         DC CVAP itself if the system does not define a point of persistence).
+
 endmenu
 
 config ARM64_MODULE_CMODEL_LARGE
 
        dc      \op, \kaddr
 alternative_else
        dc      civac, \kaddr
+alternative_endif
+       .elseif (\op == cvap)
+alternative_if ARM64_HAS_DCPOP
+       sys 3, c7, c12, 1, \kaddr       // dc cvap
+alternative_else
+       dc      cvac, \kaddr
 alternative_endif
        .else
        dc      \op, \kaddr
 
 extern void __flush_dcache_area(void *addr, size_t len);
 extern void __inval_dcache_area(void *addr, size_t len);
 extern void __clean_dcache_area_poc(void *addr, size_t len);
+extern void __clean_dcache_area_pop(void *addr, size_t len);
 extern void __clean_dcache_area_pou(void *addr, size_t len);
 extern long __flush_cache_user_range(unsigned long start, unsigned long end);
 extern void sync_icache_aliases(void *kaddr, unsigned long len);
 
 #define ARM64_WORKAROUND_QCOM_FALKOR_E1003     18
 #define ARM64_WORKAROUND_858921                        19
 #define ARM64_WORKAROUND_CAVIUM_30115          20
+#define ARM64_HAS_DCPOP                                21
 
-#define ARM64_NCAPS                            21
+#define ARM64_NCAPS                            22
 
 #endif /* __ASM_CPUCAPS_H */
 
                .min_field_value = 0,
                .matches = has_no_fpsimd,
        },
+#ifdef CONFIG_ARM64_PMEM
+       {
+               .desc = "Data cache clean to Point of Persistence",
+               .capability = ARM64_HAS_DCPOP,
+               .def_scope = SCOPE_SYSTEM,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64ISAR1_EL1,
+               .field_pos = ID_AA64ISAR1_DPB_SHIFT,
+               .min_field_value = 1,
+       },
+#endif
        {},
 };
 
 
 ENDPIPROC(__clean_dcache_area_poc)
 ENDPROC(__dma_clean_area)
 
+/*
+ *     __clean_dcache_area_pop(kaddr, size)
+ *
+ *     Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ *     are cleaned to the PoP.
+ *
+ *     - kaddr   - kernel address
+ *     - size    - size in question
+ */
+ENTRY(__clean_dcache_area_pop)
+       dcache_by_line_op cvap, sy, x0, x1, x2, x3
+       ret
+ENDPIPROC(__clean_dcache_area_pop)
+
 /*
  *     __dma_flush_area(start, size)
  *
 
  * Additional functions defined in assembly.
  */
 EXPORT_SYMBOL(flush_icache_range);
+
+#ifdef CONFIG_ARCH_HAS_PMEM_API
+static inline void arch_wb_cache_pmem(void *addr, size_t size)
+{
+       /* Ensure order against any prior non-cacheable writes */
+       dmb(osh);
+       __clean_dcache_area_pop(addr, size);
+}
+EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
+
+static inline void arch_invalidate_pmem(void *addr, size_t size)
+{
+       __inval_dcache_area(addr, size);
+}
+EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
+#endif