Although cxl_trace_hpa() is used to populate TRACE EVENTs with HPA
addresses the work it performs is a DPA to HPA translation not a
trace. Tidy up this naming by moving the minimal work done in
cxl_trace_hpa() into cxl_dpa_to_hpa() and use cxl_dpa_to_hpa()
for trace event callbacks.
Suggested-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Alison Schofield <alison.schofield@intel.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Robert Richter <rrichter@amd.com>
Link: https://patch.msgid.link/452a9b0c525b774c72d9d5851515ffa928750132.1719980933.git.alison.schofield@intel.com
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
 
 void cxl_region_exit(void);
 int cxl_get_poison_by_endpoint(struct cxl_port *port);
 struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa);
-u64 cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
-                 u64 dpa);
+u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
+                  u64 dpa);
 
 #else
-static inline u64
-cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, u64 dpa)
+static inline u64 cxl_dpa_to_hpa(struct cxl_region *cxlr,
+                                const struct cxl_memdev *cxlmd, u64 dpa)
 {
        return ULLONG_MAX;
 }
 
                dpa = le64_to_cpu(evt->common.phys_addr) & CXL_DPA_MASK;
                cxlr = cxl_dpa_to_region(cxlmd, dpa);
                if (cxlr)
-                       hpa = cxl_trace_hpa(cxlr, cxlmd, dpa);
+                       hpa = cxl_dpa_to_hpa(cxlr, cxlmd, dpa);
 
                if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
                        trace_cxl_general_media(cxlmd, type, cxlr, hpa,
 
        return false;
 }
 
-static u64 cxl_dpa_to_hpa(u64 dpa,  struct cxl_region *cxlr,
-                         struct cxl_endpoint_decoder *cxled)
+u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
+                  u64 dpa)
 {
        u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa;
        struct cxl_region_params *p = &cxlr->params;
-       int pos = cxled->pos;
+       struct cxl_endpoint_decoder *cxled = NULL;
        u16 eig = 0;
        u8 eiw = 0;
+       int pos;
+
+       for (int i = 0; i < p->nr_targets; i++) {
+               cxled = p->targets[i];
+               if (cxlmd == cxled_to_memdev(cxled))
+                       break;
+       }
+       if (!cxled || cxlmd != cxled_to_memdev(cxled))
+               return ULLONG_MAX;
 
+       pos = cxled->pos;
        ways_to_eiw(p->interleave_ways, &eiw);
        granularity_to_eig(p->interleave_granularity, &eig);
 
        return hpa;
 }
 
-u64 cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
-                 u64 dpa)
-{
-       struct cxl_region_params *p = &cxlr->params;
-       struct cxl_endpoint_decoder *cxled = NULL;
-
-       for (int i = 0; i <  p->nr_targets; i++) {
-               cxled = p->targets[i];
-               if (cxlmd == cxled_to_memdev(cxled))
-                       break;
-       }
-       if (!cxled || cxlmd != cxled_to_memdev(cxled))
-               return ULLONG_MAX;
-
-       return cxl_dpa_to_hpa(dpa, cxlr, cxled);
-}
-
 static struct lock_class_key cxl_pmem_region_key;
 
 static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
 
                if (cxlr) {
                        __assign_str(region);
                        memcpy(__entry->uuid, &cxlr->params.uuid, 16);
-                       __entry->hpa = cxl_trace_hpa(cxlr, cxlmd,
-                                                    __entry->dpa);
+                       __entry->hpa = cxl_dpa_to_hpa(cxlr, cxlmd,
+                                                     __entry->dpa);
                } else {
                        __assign_str(region);
                        memset(__entry->uuid, 0, 16);