return NULL;
        cxl_rr->port = port;
        cxl_rr->region = cxlr;
+       cxl_rr->nr_targets = 1;
        xa_init(&cxl_rr->endpoints);
 
        rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL);
        struct cxl_decoder *cxld = cxl_rr->decoder;
        struct cxl_ep *ep = cxl_ep_load(port, cxled_to_memdev(cxled));
 
-       rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep,
-                        GFP_KERNEL);
-       if (rc)
-               return rc;
+       if (ep) {
+               rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep,
+                              GFP_KERNEL);
+               if (rc)
+                       return rc;
+       }
        cxl_rr->nr_eps++;
 
        if (!cxld->region) {
                goto out_erase;
        }
 
+       dev_dbg(&cxlr->dev,
+               "%s:%s %s add: %s:%s @ %d next: %s nr_eps: %d nr_targets: %d\n",
+               dev_name(port->uport), dev_name(&port->dev),
+               dev_name(&cxld->dev), dev_name(&cxlmd->dev),
+               dev_name(&cxled->cxld.dev), pos,
+               ep ? ep->next ? dev_name(ep->next->uport) :
+                                     dev_name(&cxlmd->dev) :
+                          "none",
+               cxl_rr->nr_eps, cxl_rr->nr_targets);
+
        return 0;
 out_erase:
        if (cxl_rr->nr_eps == 0)
                                   struct cxl_endpoint_decoder *cxled)
 {
        struct cxl_region_ref *cxl_rr;
-       struct cxl_ep *ep;
+       struct cxl_ep *ep = NULL;
 
        lockdep_assert_held_write(&cxl_region_rwsem);
 
        if (!cxl_rr)
                return;
 
-       ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled);
+       /*
+        * Endpoint ports do not carry cxl_ep references, and they
+        * never target more than one endpoint by definition
+        */
+       if (cxl_rr->decoder == &cxled->cxld)
+               cxl_rr->nr_eps--;
+       else
+               ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled);
        if (ep) {
                struct cxl_ep *ep_iter;
                unsigned long index;
                free_region_ref(cxl_rr);
 }
 
+static int check_last_peer(struct cxl_endpoint_decoder *cxled,
+                          struct cxl_ep *ep, struct cxl_region_ref *cxl_rr,
+                          int distance)
+{
+       struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+       struct cxl_region *cxlr = cxl_rr->region;
+       struct cxl_region_params *p = &cxlr->params;
+       struct cxl_endpoint_decoder *cxled_peer;
+       struct cxl_port *port = cxl_rr->port;
+       struct cxl_memdev *cxlmd_peer;
+       struct cxl_ep *ep_peer;
+       int pos = cxled->pos;
+
+       /*
+        * If this position wants to share a dport with the last endpoint mapped
+        * then that endpoint, at index 'position - distance', must also be
+        * mapped by this dport.
+        */
+       if (pos < distance) {
+               dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n",
+                       dev_name(port->uport), dev_name(&port->dev),
+                       dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
+               return -ENXIO;
+       }
+       cxled_peer = p->targets[pos - distance];
+       cxlmd_peer = cxled_to_memdev(cxled_peer);
+       ep_peer = cxl_ep_load(port, cxlmd_peer);
+       if (ep->dport != ep_peer->dport) {
+               dev_dbg(&cxlr->dev,
+                       "%s:%s: %s:%s pos %d mismatched peer %s:%s\n",
+                       dev_name(port->uport), dev_name(&port->dev),
+                       dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos,
+                       dev_name(&cxlmd_peer->dev),
+                       dev_name(&cxled_peer->cxld.dev));
+               return -ENXIO;
+       }
+
+       return 0;
+}
+
+static int cxl_port_setup_targets(struct cxl_port *port,
+                                 struct cxl_region *cxlr,
+                                 struct cxl_endpoint_decoder *cxled)
+{
+       struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+       int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos;
+       struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
+       struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
+       struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+       struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
+       struct cxl_region_params *p = &cxlr->params;
+       struct cxl_decoder *cxld = cxl_rr->decoder;
+       struct cxl_switch_decoder *cxlsd;
+       u16 eig, peig;
+       u8 eiw, peiw;
+
+       /*
+        * While root level decoders support x3, x6, x12, switch level
+        * decoders only support powers of 2 up to x16.
+        */
+       if (!is_power_of_2(cxl_rr->nr_targets)) {
+               dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n",
+                       dev_name(port->uport), dev_name(&port->dev),
+                       cxl_rr->nr_targets);
+               return -EINVAL;
+       }
+
+       cxlsd = to_cxl_switch_decoder(&cxld->dev);
+       if (cxl_rr->nr_targets_set) {
+               int i, distance;
+
+               distance = p->nr_targets / cxl_rr->nr_targets;
+               for (i = 0; i < cxl_rr->nr_targets_set; i++)
+                       if (ep->dport == cxlsd->target[i]) {
+                               rc = check_last_peer(cxled, ep, cxl_rr,
+                                                    distance);
+                               if (rc)
+                                       return rc;
+                               goto out_target_set;
+                       }
+               goto add_target;
+       }
+
+       if (is_cxl_root(parent_port)) {
+               parent_ig = cxlrd->cxlsd.cxld.interleave_granularity;
+               parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
+               /*
+                * For purposes of address bit routing, use power-of-2 math for
+                * switch ports.
+                */
+               if (!is_power_of_2(parent_iw))
+                       parent_iw /= 3;
+       } else {
+               struct cxl_region_ref *parent_rr;
+               struct cxl_decoder *parent_cxld;
+
+               parent_rr = cxl_rr_load(parent_port, cxlr);
+               parent_cxld = parent_rr->decoder;
+               parent_ig = parent_cxld->interleave_granularity;
+               parent_iw = parent_cxld->interleave_ways;
+       }
+
+       granularity_to_cxl(parent_ig, &peig);
+       ways_to_cxl(parent_iw, &peiw);
+
+       iw = cxl_rr->nr_targets;
+       ways_to_cxl(iw, &eiw);
+       if (cxl_rr->nr_targets > 1) {
+               u32 address_bit = max(peig + peiw, eiw + peig);
+
+               eig = address_bit - eiw + 1;
+       } else {
+               eiw = peiw;
+               eig = peig;
+       }
+
+       rc = cxl_to_granularity(eig, &ig);
+       if (rc) {
+               dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n",
+                       dev_name(port->uport), dev_name(&port->dev),
+                       256 << eig);
+               return rc;
+       }
+
+       cxld->interleave_ways = iw;
+       cxld->interleave_granularity = ig;
+       dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport),
+               dev_name(&port->dev), iw, ig);
+add_target:
+       if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) {
+               dev_dbg(&cxlr->dev,
+                       "%s:%s: targets full trying to add %s:%s at %d\n",
+                       dev_name(port->uport), dev_name(&port->dev),
+                       dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
+               return -ENXIO;
+       }
+       cxlsd->target[cxl_rr->nr_targets_set] = ep->dport;
+       inc = 1;
+out_target_set:
+       cxl_rr->nr_targets_set += inc;
+       dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n",
+               dev_name(port->uport), dev_name(&port->dev),
+               cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport),
+               dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
+
+       return 0;
+}
+
+static void cxl_port_reset_targets(struct cxl_port *port,
+                                  struct cxl_region *cxlr)
+{
+       struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
+
+       /*
+        * After the last endpoint has been detached the entire cxl_rr may now
+        * be gone.
+        */
+       if (cxl_rr)
+               cxl_rr->nr_targets_set = 0;
+}
+
+static void cxl_region_teardown_targets(struct cxl_region *cxlr)
+{
+       struct cxl_region_params *p = &cxlr->params;
+       struct cxl_endpoint_decoder *cxled;
+       struct cxl_memdev *cxlmd;
+       struct cxl_port *iter;
+       struct cxl_ep *ep;
+       int i;
+
+       for (i = 0; i < p->nr_targets; i++) {
+               cxled = p->targets[i];
+               cxlmd = cxled_to_memdev(cxled);
+
+               iter = cxled_to_port(cxled);
+               while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
+                       iter = to_cxl_port(iter->dev.parent);
+
+               for (ep = cxl_ep_load(iter, cxlmd); iter;
+                    iter = ep->next, ep = cxl_ep_load(iter, cxlmd))
+                       cxl_port_reset_targets(iter, cxlr);
+       }
+}
+
+static int cxl_region_setup_targets(struct cxl_region *cxlr)
+{
+       struct cxl_region_params *p = &cxlr->params;
+       struct cxl_endpoint_decoder *cxled;
+       struct cxl_memdev *cxlmd;
+       struct cxl_port *iter;
+       struct cxl_ep *ep;
+       int i, rc;
+
+       for (i = 0; i < p->nr_targets; i++) {
+               cxled = p->targets[i];
+               cxlmd = cxled_to_memdev(cxled);
+
+               iter = cxled_to_port(cxled);
+               while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
+                       iter = to_cxl_port(iter->dev.parent);
+
+               /*
+                * Descend the topology tree programming targets while
+                * looking for conflicts.
+                */
+               for (ep = cxl_ep_load(iter, cxlmd); iter;
+                    iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
+                       rc = cxl_port_setup_targets(iter, cxlr, cxled);
+                       if (rc) {
+                               cxl_region_teardown_targets(cxlr);
+                               return rc;
+                       }
+               }
+       }
+
+       return 0;
+}
+
 static int cxl_region_attach(struct cxl_region *cxlr,
                             struct cxl_endpoint_decoder *cxled, int pos)
 {
        cxled->pos = pos;
        p->nr_targets++;
 
-       if (p->nr_targets == p->interleave_ways)
+       if (p->nr_targets == p->interleave_ways) {
+               rc = cxl_region_setup_targets(cxlr);
+               if (rc)
+                       goto err;
                p->state = CXL_CONFIG_ACTIVE;
+       }
 
        return 0;
 
                goto out;
        }
 
-       if (p->state == CXL_CONFIG_ACTIVE)
+       if (p->state == CXL_CONFIG_ACTIVE) {
                p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
+               cxl_region_teardown_targets(cxlr);
+       }
        p->targets[cxled->pos] = NULL;
        p->nr_targets--;