]> www.infradead.org Git - linux.git/commitdiff
Merge branch 'for-6.15/guard_cleanups' into cxl-for-next2
authorDave Jiang <dave.jiang@intel.com>
Fri, 14 Mar 2025 22:26:23 +0000 (15:26 -0700)
committerDave Jiang <dave.jiang@intel.com>
Fri, 14 Mar 2025 23:11:06 +0000 (16:11 -0700)
A series of CXL refactoring using scope based resource management to
remove goto patterns on the cleanup paths.

1  2 
drivers/cxl/core/hdm.c
drivers/cxl/core/mbox.c
drivers/cxl/core/memdev.c
drivers/cxl/core/port.c
drivers/cxl/core/region.c

index d705dec1471e9849c7ae2a97a2c9eb831f6a261e,1edbf787347199401200224ae45b8d05dadbb5ad..70cae4ebf8a4bd1b2d5e05d5030a660625f1a20d
@@@ -576,18 -398,15 +566,15 @@@ int cxl_dpa_free(struct cxl_endpoint_de
        if (cxled->cxld.id != port->hdm_end) {
                dev_dbg(dev, "expected decoder%d.%d\n", port->id,
                        port->hdm_end);
-               rc = -EBUSY;
-               goto out;
+               return -EBUSY;
        }
        devm_cxl_dpa_release(cxled);
-       rc = 0;
- out:
-       up_write(&cxl_dpa_rwsem);
-       return rc;
+       return 0;
  }
  
 -int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
 -                   enum cxl_decoder_mode mode)
 +int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
 +                   enum cxl_partition_mode mode)
  {
        struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
        struct cxl_dev_state *cxlds = cxlmd->cxlds;
        return 0;
  }
  
int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
static int __cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
  {
        struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
-       struct cxl_port *port = cxled_to_port(cxled);
 -      resource_size_t free_ram_start, free_pmem_start;
        struct cxl_dev_state *cxlds = cxlmd->cxlds;
        struct device *dev = &cxled->cxld.dev;
 -      resource_size_t start, avail, skip;
 +      struct resource *res, *prev = NULL;
 +      resource_size_t start, avail, skip, skip_start;
        struct resource *p, *last;
-       int part, rc;
++      int part;
  
-       down_write(&cxl_dpa_rwsem);
+       guard(rwsem_write)(&cxl_dpa_rwsem);
        if (cxled->cxld.region) {
                dev_dbg(dev, "decoder attached to %s\n",
                        dev_name(&cxled->cxld.region->dev));
  
        if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
                dev_dbg(dev, "decoder enabled\n");
-               rc = -EBUSY;
-               goto out;
+               return -EBUSY;
        }
  
 -      for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling)
 -              last = p;
 -      if (last)
 -              free_ram_start = last->end + 1;
 -      else
 -              free_ram_start = cxlds->ram_res.start;
 +      part = cxled->part;
 +      if (part < 0) {
 +              dev_dbg(dev, "partition not set\n");
-               rc = -EBUSY;
-               goto out;
++              return -EBUSY;
 +      }
  
 -      for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling)
 +      res = &cxlds->part[part].res;
 +      for (p = res->child, last = NULL; p; p = p->sibling)
                last = p;
        if (last)
 -              free_pmem_start = last->end + 1;
 +              start = last->end + 1;
        else
 -              free_pmem_start = cxlds->pmem_res.start;
 +              start = res->start;
  
 -      if (cxled->mode == CXL_DECODER_RAM) {
 -              start = free_ram_start;
 -              avail = cxlds->ram_res.end - start + 1;
 -              skip = 0;
 -      } else if (cxled->mode == CXL_DECODER_PMEM) {
 -              resource_size_t skip_start, skip_end;
 -
 -              start = free_pmem_start;
 -              avail = cxlds->pmem_res.end - start + 1;
 -              skip_start = free_ram_start;
 -
 -              /*
 -               * If some pmem is already allocated, then that allocation
 -               * already handled the skip.
 -               */
 -              if (cxlds->pmem_res.child &&
 -                  skip_start == cxlds->pmem_res.child->start)
 -                      skip_end = skip_start - 1;
 -              else
 -                      skip_end = start - 1;
 -              skip = skip_end - skip_start + 1;
 -      } else {
 -              dev_dbg(dev, "mode not set\n");
 -              return -EINVAL;
 +      /*
 +       * To allocate at partition N, a skip needs to be calculated for all
 +       * unallocated space at lower partitions indices.
 +       *
 +       * If a partition has any allocations, the search can end because a
 +       * previous cxl_dpa_alloc() invocation is assumed to have accounted for
 +       * all previous partitions.
 +       */
 +      skip_start = CXL_RESOURCE_NONE;
 +      for (int i = part; i; i--) {
 +              prev = &cxlds->part[i - 1].res;
 +              for (p = prev->child, last = NULL; p; p = p->sibling)
 +                      last = p;
 +              if (last) {
 +                      skip_start = last->end + 1;
 +                      break;
 +              }
 +              skip_start = prev->start;
        }
  
 +      avail = res->end - start + 1;
 +      if (skip_start == CXL_RESOURCE_NONE)
 +              skip = 0;
 +      else
 +              skip = res->start - skip_start;
 +
        if (size > avail) {
                dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
 -                      cxl_decoder_mode_name(cxled->mode), &avail);
 +                      res->name, &avail);
-               rc = -ENOSPC;
-               goto out;
+               return -ENOSPC;
        }
  
-       rc = __cxl_dpa_reserve(cxled, start, size, skip);
- out:
-       up_write(&cxl_dpa_rwsem);
+       return __cxl_dpa_reserve(cxled, start, size, skip);
+ }
+ int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
+ {
+       struct cxl_port *port = cxled_to_port(cxled);
+       int rc;
  
+       rc = __cxl_dpa_alloc(cxled, size);
        if (rc)
                return rc;
  
index 998e1df36db673c47c4e87b957df9c29bf3f291a,0601297af0c9bb2fbf5d754b23ae0b78345168b5..7299cd3a01551c12f5575600acd7fbd33ad6cdcf
@@@ -1229,30 -1232,38 +1228,27 @@@ int cxl_mem_sanitize(struct cxl_memdev 
         * be sure that the device is unmapped.
         */
        if (endpoint && cxl_num_decoders_committed(endpoint) == 0)
-               rc = __cxl_mem_sanitize(mds, cmd);
-       else
-               rc = -EBUSY;
-       up_read(&cxl_region_rwsem);
+               return __cxl_mem_sanitize(mds, cmd);
  
-       return rc;
+       return -EBUSY;
  }
  
 -static int add_dpa_res(struct device *dev, struct resource *parent,
 -                     struct resource *res, resource_size_t start,
 -                     resource_size_t size, const char *type)
 +static void add_part(struct cxl_dpa_info *info, u64 start, u64 size, enum cxl_partition_mode mode)
  {
 -      int rc;
 +      int i = info->nr_partitions;
  
 -      res->name = type;
 -      res->start = start;
 -      res->end = start + size - 1;
 -      res->flags = IORESOURCE_MEM;
 -      if (resource_size(res) == 0) {
 -              dev_dbg(dev, "DPA(%s): no capacity\n", res->name);
 -              return 0;
 -      }
 -      rc = request_resource(parent, res);
 -      if (rc) {
 -              dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name,
 -                      res, rc);
 -              return rc;
 -      }
 -
 -      dev_dbg(dev, "DPA(%s): %pr\n", res->name, res);
 +      if (size == 0)
 +              return;
  
 -      return 0;
 +      info->part[i].range = (struct range) {
 +              .start = start,
 +              .end = start + size - 1,
 +      };
 +      info->part[i].mode = mode;
 +      info->nr_partitions++;
  }
  
 -int cxl_mem_create_range_info(struct cxl_memdev_state *mds)
 +int cxl_mem_dpa_fetch(struct cxl_memdev_state *mds, struct cxl_dpa_info *info)
  {
        struct cxl_dev_state *cxlds = &mds->cxlds;
        struct device *dev = cxlds->dev;
Simple merge
Simple merge
index 84ce625b859152e3f01aa77b4a78ddccdb7db172,a750107a3bfffaf549b4cb12423d9d94d7d6a387..824d356d9d23ea00157eda5c9be7e17a7f0bdd59
@@@ -3301,14 -3272,39 +3271,40 @@@ static int __construct_region(struct cx
  
        /* ...to match put_device() in cxl_add_to_region() */
        get_device(&cxlr->dev);
-       up_write(&cxl_region_rwsem);
  
-       return cxlr;
+       return 0;
+ }
  
- err:
-       up_write(&cxl_region_rwsem);
-       devm_release_action(port->uport_dev, unregister_region, cxlr);
-       return ERR_PTR(rc);
+ /* Establish an empty region covering the given HPA range */
+ static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd,
+                                          struct cxl_endpoint_decoder *cxled)
+ {
+       struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+       struct cxl_port *port = cxlrd_to_port(cxlrd);
++      struct cxl_dev_state *cxlds = cxlmd->cxlds;
++      int rc, part = READ_ONCE(cxled->part);
+       struct cxl_region *cxlr;
 -      int rc;
+       do {
 -              cxlr = __create_region(cxlrd, cxled->mode,
++              cxlr = __create_region(cxlrd, cxlds->part[part].mode,
+                                      atomic_read(&cxlrd->region_id));
+       } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY);
+       if (IS_ERR(cxlr)) {
+               dev_err(cxlmd->dev.parent,
+                       "%s:%s: %s failed assign region: %ld\n",
+                       dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
+                       __func__, PTR_ERR(cxlr));
+               return cxlr;
+       }
+       rc = __construct_region(cxlr, cxlrd, cxled);
+       if (rc) {
+               devm_release_action(port->uport_dev, unregister_region, cxlr);
+               return ERR_PTR(rc);
+       }
+       return cxlr;
  }
  
  int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)