}
}
- ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
+ ret = xa_err(xa_store(&ctrl->p2p_ns_map, ns->nsid, p2p_dev,
+ GFP_KERNEL));
if (ret < 0)
pci_dev_put(p2p_dev);
return ret;
out_dev_put:
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
- pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
+ pci_dev_put(xa_erase(&ctrl->p2p_ns_map, ns->nsid));
out_dev_disable:
nvmet_ns_dev_disable(ns);
goto out_unlock;
subsys->max_nsid = nvmet_max_nsid(subsys);
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
- pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
+ pci_dev_put(xa_erase(&ctrl->p2p_ns_map, ns->nsid));
mutex_unlock(&subsys->lock);
if (IS_ENABLED(CONFIG_PCI_P2PDMA)) {
if (req->sq->ctrl && req->ns)
- p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
+ p2p_dev = xa_load(&req->sq->ctrl->p2p_ns_map,
req->ns->nsid);
req->p2p_dev = NULL;
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
}
-/*
- * Note: ctrl->subsys->lock should be held when calling this function
- */
static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
{
- struct radix_tree_iter iter;
- void __rcu **slot;
+ struct pci_dev *pdev;
+ unsigned long index;
- radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
- pci_dev_put(radix_tree_deref_slot(slot));
+ xa_for_each(&ctrl->p2p_ns_map, index, pdev)
+ pci_dev_put(pdev);
put_device(ctrl->p2p_client);
}
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events);
- INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
+ xa_init(&ctrl->p2p_ns_map);
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
#include <linux/configfs.h>
#include <linux/rcupdate.h>
#include <linux/blkdev.h>
-#include <linux/radix-tree.h>
+#include <linux/xarray.h>
#define NVMET_ASYNC_EVENTS 4
#define NVMET_ERROR_LOG_SLOTS 128
char hostnqn[NVMF_NQN_FIELD_LEN];
struct device *p2p_client;
- struct radix_tree_root p2p_ns_map;
+ struct xarray p2p_ns_map;
spinlock_t error_lock;
u64 err_counter;