From: Dave Jiang Date: Fri, 14 Mar 2025 23:22:34 +0000 (-0700) Subject: Merge branch 'for-6.15/extended-linear-cache' into cxl-for-next2 X-Git-Tag: v6.15-rc1~45^2~7 X-Git-Url: https://www.infradead.org/git/?a=commitdiff_plain;h=763e15d04740ad2984bf009d9a5f70c099c8e6fd;p=linux.git Merge branch 'for-6.15/extended-linear-cache' into cxl-for-next2 Add support for Extended Linear Cache for CXL. Add enumeration support of the cache. Add MCE notification of the aliased memory address. --- 763e15d04740ad2984bf009d9a5f70c099c8e6fd diff --cc drivers/cxl/core/Makefile index ba5f0916d379,61c9332b3582..c390f7edc377 --- a/drivers/cxl/core/Makefile +++ b/drivers/cxl/core/Makefile @@@ -14,6 -14,7 +14,8 @@@ cxl_core-y += pci. cxl_core-y += hdm.o cxl_core-y += pmu.o cxl_core-y += cdat.o +cxl_core-y += ras.o + cxl_core-y += acpi.o cxl_core-$(CONFIG_TRACING) += trace.o cxl_core-$(CONFIG_CXL_REGION) += region.o + cxl_core-$(CONFIG_CXL_MCE) += mce.o diff --cc drivers/cxl/core/core.h index 4d8316f97ed8,0fb779b612d1..e35f6e08ddb5 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@@ -115,8 -115,7 +115,10 @@@ bool cxl_need_node_perf_attrs_update(in int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port, struct access_coordinate *c); +int cxl_ras_init(void); +void cxl_ras_exit(void); +int cxl_gpf_port_setup(struct device *dport_dev, struct cxl_port *port); + int cxl_acpi_get_extended_linear_cache_size(struct resource *backing_res, + int nid, resource_size_t *size); #endif /* __CXL_CORE_H__ */ diff --cc drivers/cxl/core/mbox.c index 85a1c1860a03,c06f19a729e8..e088c6ba1705 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@@ -1463,7 -1458,13 +1471,11 @@@ struct cxl_memdev_state *cxl_memdev_sta mds->cxlds.reg_map.host = dev; mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE; mds->cxlds.type = CXL_DEVTYPE_CLASSMEM; - mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID; - mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID; + rc = devm_cxl_register_mce_notifier(dev, &mds->mce_notifier); + if (rc) + return ERR_PTR(rc); + return mds; } EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, "CXL"); diff --cc drivers/cxl/core/region.c index 824d356d9d23,c2b4162aee42..80ba19cf3094 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@@ -3208,20 -3230,69 +3222,66 @@@ static int match_region_by_range(struc cxlr = to_cxl_region(dev); p = &cxlr->params; - down_read(&cxl_region_rwsem); + guard(rwsem_read)(&cxl_region_rwsem); if (p->res && p->res->start == r->start && p->res->end == r->end) - rc = 1; - up_read(&cxl_region_rwsem); + return 1; - return rc; + return 0; } + static int cxl_extended_linear_cache_resize(struct cxl_region *cxlr, + struct resource *res) + { + struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); + struct cxl_region_params *p = &cxlr->params; + int nid = phys_to_target_node(res->start); + resource_size_t size, cache_size, start; + int rc; + + size = resource_size(res); + if (!size) + return -EINVAL; + + rc = cxl_acpi_get_extended_linear_cache_size(res, nid, &cache_size); + if (rc) + return rc; + + if (!cache_size) + return 0; + + if (size != cache_size) { + dev_warn(&cxlr->dev, + "Extended Linear Cache size %lld != CXL size %lld. No Support!", + cache_size, size); + return -EOPNOTSUPP; + } + + /* + * Move the start of the range to where the cache range starts. The + * implementation assumes that the cache range is in front of the + * CXL range. This is not dictated by the HMAT spec but is how the + * current known implementation is configured. + * + * The cache range is expected to be within the CFMWS. The adjusted + * res->start should not be less than cxlrd->res->start. + */ + start = res->start - cache_size; + if (start < cxlrd->res->start) + return -ENXIO; + + res->start = start; + p->cache_size = cache_size; + + return 0; + } + -/* Establish an empty region covering the given HPA range */ -static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd, - struct cxl_endpoint_decoder *cxled) +static int __construct_region(struct cxl_region *cxlr, + struct cxl_root_decoder *cxlrd, + struct cxl_endpoint_decoder *cxled) { struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); - struct cxl_port *port = cxlrd_to_port(cxlrd); struct range *hpa = &cxled->cxld.hpa_range; struct cxl_region_params *p; - struct cxl_region *cxlr; struct resource *res; int rc; diff --cc drivers/cxl/cxl.h index 8bdfa536262e,4785cff5209f..be8a7dc77719 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@@ -478,13 -505,9 +479,14 @@@ struct cxl_region_params struct resource *res; struct cxl_endpoint_decoder *targets[CXL_DECODER_MAX_INTERLEAVE]; int nr_targets; + resource_size_t cache_size; }; +enum cxl_partition_mode { + CXL_PARTMODE_RAM, + CXL_PARTMODE_PMEM, +}; + /* * Indicate whether this region has been assembled by autodetection or * userspace assembly. Prevent endpoint decoders outside of automatic diff --cc tools/testing/cxl/Kbuild index 3d71447c0bd8,f625eb2d2dc5..a7ec67d4a0f2 --- a/tools/testing/cxl/Kbuild +++ b/tools/testing/cxl/Kbuild @@@ -61,9 -61,10 +61,11 @@@ cxl_core-y += $(CXL_CORE_SRC)/pci. cxl_core-y += $(CXL_CORE_SRC)/hdm.o cxl_core-y += $(CXL_CORE_SRC)/pmu.o cxl_core-y += $(CXL_CORE_SRC)/cdat.o +cxl_core-y += $(CXL_CORE_SRC)/ras.o + cxl_core-y += $(CXL_CORE_SRC)/acpi.o cxl_core-$(CONFIG_TRACING) += $(CXL_CORE_SRC)/trace.o cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o + cxl_core-$(CONFIG_CXL_MCE) += $(CXL_CORE_SRC)/mce.o cxl_core-y += config_check.o cxl_core-y += cxl_core_test.o cxl_core-y += cxl_core_exports.o