static void seq_print_resource_pending_meta_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
{
struct drbd_device *device;
- unsigned int i;
+ unsigned long i;
seq_puts(m, "minor\tvnr\tstart\tsubmit\tintent\n");
rcu_read_lock();
- idr_for_each_entry(&resource->devices, device, i) {
+ xa_for_each(&resource->devices, i, device) {
struct drbd_md_io tmp;
/* In theory this is racy,
* in the sense that there could have been a
static void seq_print_waiting_for_AL(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
{
struct drbd_device *device;
- unsigned int i;
+ unsigned long i;
seq_puts(m, "minor\tvnr\tage\t#waiting\n");
rcu_read_lock();
- idr_for_each_entry(&resource->devices, device, i) {
+ xa_for_each(&resource->devices, i, device) {
unsigned long jif;
struct drbd_request *req;
int n = atomic_read(&device->ap_actlog_cnt);
static void seq_print_resource_pending_bitmap_io(struct seq_file *m, struct drbd_resource *resource, unsigned long now)
{
struct drbd_device *device;
- unsigned int i;
+ unsigned long i;
seq_puts(m, "minor\tvnr\trw\tage\t#in-flight\n");
rcu_read_lock();
- idr_for_each_entry(&resource->devices, device, i) {
+ xa_for_each(&resource->devices, i, device) {
seq_print_device_bitmap_io(m, device, now);
}
rcu_read_unlock();
struct drbd_resource *resource, unsigned long now)
{
struct drbd_device *device;
- unsigned int i;
+ unsigned long i;
rcu_read_lock();
- idr_for_each_entry(&resource->devices, device, i) {
+ xa_for_each(&resource->devices, i, device) {
seq_print_device_peer_requests(m, device, now);
}
rcu_read_unlock();
#define div_floor(A, B) ((A)/(B))
extern struct ratelimit_state drbd_ratelimit_state;
-extern struct idr drbd_devices; /* RCU, updates: genl_lock() */
+extern struct xarray drbd_devices; /* RCU, updates: genl_lock() */
extern struct list_head drbd_resources; /* RCU, updates: genl_lock() */
extern const char *cmdname(enum drbd_packet cmd);
struct dentry *debugfs_res_in_flight_summary;
#endif
struct kref kref;
- struct idr devices; /* volume number to device mapping */
+ struct xarray devices; /* volume number to device mapping */
struct list_head connections;
struct list_head resources;
struct res_opts res_opts;
static inline struct drbd_device *minor_to_device(unsigned int minor)
{
- return (struct drbd_device *)idr_find(&drbd_devices, minor);
+ return xa_load(&drbd_devices, minor);
}
static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
/* in 2.6.x, our device mapping and config info contains our virtual gendisks
* as member "struct gendisk *vdisk;"
*/
-struct idr drbd_devices;
+DEFINE_XARRAY(drbd_devices);
struct list_head drbd_resources;
struct mutex resources_mutex;
struct drbd_resource *resource =
container_of(kref, struct drbd_resource, kref);
- idr_destroy(&resource->devices);
+ xa_destroy(&resource->devices);
free_cpumask_var(resource->cpu_mask);
kfree(resource->name);
memset(resource, 0xf2, sizeof(*resource));
static void drbd_cleanup(void)
{
- unsigned int i;
+ unsigned long i;
struct drbd_device *device;
struct drbd_resource *resource, *tmp;
drbd_genl_unregister();
- idr_for_each_entry(&drbd_devices, device, i)
+ xa_for_each(&drbd_devices, i, device)
drbd_delete_device(device);
+ xa_destroy(&drbd_devices);
/* not _rcu since, no other updater anymore. Genl already unregistered */
for_each_resource_safe(resource, tmp, &drbd_resources) {
drbd_destroy_mempools();
unregister_blkdev(DRBD_MAJOR, "drbd");
- idr_destroy(&drbd_devices);
-
pr_info("module cleanup done.\n");
}
if (!zalloc_cpumask_var(&resource->cpu_mask, GFP_KERNEL))
goto fail_free_name;
kref_init(&resource->kref);
- idr_init(&resource->devices);
+ xa_init(&resource->devices);
INIT_LIST_HEAD(&resource->connections);
resource->write_ordering = WO_BDEV_FLUSH;
list_add_tail_rcu(&resource->resources, &drbd_resources);
struct drbd_peer_device *peer_device, *tmp_peer_device;
struct gendisk *disk;
struct request_queue *q;
- int id;
+ int ret;
int vnr = adm_ctx->volume;
enum drbd_ret_code err = ERR_NOMEM;
device->read_requests = RB_ROOT;
device->write_requests = RB_ROOT;
- id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
- if (id < 0) {
- if (id == -ENOSPC)
+ ret = xa_insert(&drbd_devices, minor, device, GFP_KERNEL);
+ if (ret < 0) {
+ if (ret == -EBUSY)
err = ERR_MINOR_OR_VOLUME_EXISTS;
- goto out_no_minor_idr;
+ goto out_no_minor;
}
kref_get(&device->kref);
- id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
- if (id < 0) {
- if (id == -ENOSPC)
+ ret = xa_insert(&resource->devices, vnr, device, GFP_KERNEL);
+ if (ret < 0) {
+ if (ret == -EBUSY)
err = ERR_MINOR_OR_VOLUME_EXISTS;
- goto out_idr_remove_minor;
+ goto out_remove_minor;
}
kref_get(&device->kref);
for_each_connection(connection, resource) {
peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL);
if (!peer_device)
- goto out_idr_remove_from_resource;
+ goto out_remove_from_resource;
peer_device->connection = connection;
peer_device->device = device;
list_add(&peer_device->peer_devices, &device->peer_devices);
kref_get(&device->kref);
- id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
- if (id < 0) {
- if (id == -ENOSPC)
+ ret = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
+ if (ret < 0) {
+ if (ret == -ENOSPC)
err = ERR_INVALID_REQUEST;
- goto out_idr_remove_from_resource;
+ goto out_remove_from_resource;
}
kref_get(&connection->kref);
INIT_WORK(&peer_device->send_acks_work, drbd_send_acks_wf);
out_idr_remove_vol:
idr_remove(&connection->peer_devices, vnr);
-out_idr_remove_from_resource:
+out_remove_from_resource:
for_each_connection(connection, resource) {
peer_device = idr_remove(&connection->peer_devices, vnr);
if (peer_device)
list_del(&peer_device->peer_devices);
kfree(peer_device);
}
- idr_remove(&resource->devices, vnr);
-out_idr_remove_minor:
- idr_remove(&drbd_devices, minor);
+ xa_erase(&resource->devices, vnr);
+out_remove_minor:
+ xa_erase(&drbd_devices, minor);
synchronize_rcu();
-out_no_minor_idr:
+out_no_minor:
drbd_bm_cleanup(device);
out_no_bitmap:
__free_page(device->md_io.page);
idr_remove(&connection->peer_devices, device->vnr);
kref_put(&device->kref, drbd_destroy_device);
}
- idr_remove(&resource->devices, device->vnr);
+ xa_erase(&resource->devices, device->vnr);
kref_put(&device->kref, drbd_destroy_device);
- idr_remove(&drbd_devices, device_to_minor(device));
+ xa_erase(&drbd_devices, device_to_minor(device));
kref_put(&device->kref, drbd_destroy_device);
del_gendisk(device->vdisk);
synchronize_rcu();
init_waitqueue_head(&drbd_pp_wait);
drbd_proc = NULL; /* play safe for drbd_cleanup */
- idr_init(&drbd_devices);
mutex_init(&resources_mutex);
INIT_LIST_HEAD(&drbd_resources);
struct nlattr *resource_filter;
struct drbd_resource *resource;
struct drbd_device *uninitialized_var(device);
- int minor, err, retcode;
+ int err, retcode;
+ unsigned long minor;
struct drbd_genlmsghdr *dh;
struct device_info device_info;
struct device_statistics device_statistics;
- struct idr *idr_to_search;
+ struct xarray *devices;
resource = (struct drbd_resource *)cb->args[0];
if (!cb->args[0] && !cb->args[1]) {
rcu_read_lock();
minor = cb->args[1];
- idr_to_search = resource ? &resource->devices : &drbd_devices;
- device = idr_get_next(idr_to_search, &minor);
+ devices = resource ? &resource->devices : &drbd_devices;
+ device = xa_find_after(devices, &minor, ULONG_MAX, XA_PRESENT);
if (!device) {
err = 0;
goto out;
}
- idr_for_each_entry_continue(idr_to_search, device, minor) {
- retcode = NO_ERROR;
- goto put_result; /* only one iteration */
- }
- err = 0;
- goto out; /* no more devices */
+ retcode = NO_ERROR;
put_result:
dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
struct drbd_resource *resource;
struct drbd_device *uninitialized_var(device);
struct drbd_peer_device *peer_device = NULL;
- int minor, err, retcode;
+ int err, retcode;
+ unsigned long minor;
struct drbd_genlmsghdr *dh;
- struct idr *idr_to_search;
+ struct xarray *devices;
resource = (struct drbd_resource *)cb->args[0];
if (!cb->args[0] && !cb->args[1]) {
rcu_read_lock();
minor = cb->args[1];
- idr_to_search = resource ? &resource->devices : &drbd_devices;
- device = idr_find(idr_to_search, minor);
+ devices = resource ? &resource->devices : &drbd_devices;
+ device = xa_load(devices, minor);
if (!device) {
next_device:
- minor++;
cb->args[2] = 0;
- device = idr_get_next(idr_to_search, &minor);
+ device = xa_find_after(devices, &minor, ULONG_MAX, XA_PRESENT);
if (!device) {
err = 0;
goto out;
struct drbd_resource *pos = (struct drbd_resource *)cb->args[0];
struct drbd_resource *resource = NULL;
struct drbd_resource *tmp;
- unsigned volume = cb->args[1];
+ unsigned long volume = cb->args[1];
/* Open coded, deferred, iteration:
* for_each_resource_safe(resource, tmp, &drbd_resources) {
* connection = "first connection of resource or undefined";
- * idr_for_each_entry(&resource->devices, device, i) {
+ * xa_for_each(&resource->devices, i, device) {
* ...
* }
* }
}
if (resource) {
next_resource:
- device = idr_get_next(&resource->devices, &volume);
+ device = xa_find(&resource->devices, &volume,
+ ULONG_MAX, XA_PRESENT);
if (!device) {
/* No more volumes to dump on this resource.
* Advance resource iterator. */
if (connection->cstate > C_STANDALONE)
return ERR_NET_CONFIGURED;
}
- if (!idr_is_empty(&resource->devices))
+ if (!xa_empty(&resource->devices))
return ERR_RES_IN_USE;
/* The state engine has stopped the sender thread, so we don't
struct drbd_device *device;
int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
unsigned i;
+ unsigned long index;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
if (!adm_ctx.reply_skb)
}
/* detach */
- idr_for_each_entry(&resource->devices, device, i) {
+ xa_for_each(&resource->devices, index, device) {
retcode = adm_detach(device, 0);
if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach");
}
/* delete volumes */
- idr_for_each_entry(&resource->devices, device, i) {
+ xa_for_each(&resource->devices, index, device) {
retcode = adm_del_minor(device);
if (retcode != NO_ERROR) {
/* "can not happen" */
int drbd_seq_show(struct seq_file *seq, void *v)
{
- int i, prev_i = -1;
+ unsigned long i, prev_i = -1;
const char *sn;
struct drbd_device *device;
struct net_conf *nc;
*/
rcu_read_lock();
- idr_for_each_entry(&drbd_devices, device, i) {
+ xa_for_each(&drbd_devices, i, device) {
if (prev_i != i - 1)
seq_putc(seq, '\n');
prev_i = i;
if (state.conn == C_STANDALONE &&
state.disk == D_DISKLESS &&
state.role == R_SECONDARY) {
- seq_printf(seq, "%2d: cs:Unconfigured\n", i);
+ seq_printf(seq, "%2ld: cs:Unconfigured\n", i);
} else {
/* reset device->congestion_reason */
bdi_rw_congested(device->rq_queue->backing_dev_info);
nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
seq_printf(seq,
- "%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n"
+ "%2ld: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n"
" ns:%u nr:%u dw:%u dr:%u al:%u bm:%u "
"lo:%d pe:%d ua:%d ap:%d ep:%d wo:%c",
i, sn,
{
struct drbd_device *device;
enum write_ordering_e pwo;
- int vnr;
+ unsigned long vnr;
static char *write_ordering_str[] = {
[WO_NONE] = "none",
[WO_DRAIN_IO] = "drain",
if (wo != WO_BDEV_FLUSH)
wo = min(pwo, wo);
rcu_read_lock();
- idr_for_each_entry(&resource->devices, device, vnr) {
+ xa_for_each(&resource->devices, vnr, device) {
if (get_ldev(device)) {
wo = max_allowed_wo(device->ldev, wo);
if (device->ldev == bdev)
{
struct drbd_device *device;
struct drbd_connection *connection;
- int vnr;
+ unsigned long vnr;
*n_devices = 0;
*n_connections = 0;
- idr_for_each_entry(&resource->devices, device, vnr)
+ xa_for_each(&resource->devices, vnr, device)
(*n_devices)++;
for_each_connection(connection, resource)
(*n_connections)++;
unsigned int n_devices;
struct drbd_connection *connection;
unsigned int n_connections;
- int vnr;
+ unsigned long vnr;
struct drbd_device_state_change *device_state_change;
struct drbd_peer_device_state_change *peer_device_state_change;
device_state_change = state_change->devices;
peer_device_state_change = state_change->peer_devices;
- idr_for_each_entry(&resource->devices, device, vnr) {
+ xa_for_each(&resource->devices, vnr, device) {
kref_get(&device->kref);
device_state_change->device = device;
device_state_change->disk_state[OLD] = device->state.disk;
{
bool changed = false;
struct drbd_device *odev;
- int i;
+ unsigned long i;
rcu_read_lock();
- idr_for_each_entry(&drbd_devices, odev, i) {
+ xa_for_each(&drbd_devices, i, odev) {
if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
continue;
if (!_drbd_may_sync_now(odev) &&
{
bool changed = false;
struct drbd_device *odev;
- int i;
+ unsigned long i;
rcu_read_lock();
- idr_for_each_entry(&drbd_devices, odev, i) {
+ xa_for_each(&drbd_devices, i, odev) {
if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
continue;
if (odev->state.aftr_isp) {