struct dentry *debugfs_conn_oldest_requests;
#endif
struct kref kref;
- struct idr peer_devices; /* volume number to peer device mapping */
+ struct xarray peer_devices; /* volume number to peer device mapping */
enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
struct mutex cstate_mutex; /* Protects graceful disconnects */
unsigned int connect_cnt; /* Inc each time a connection is established */
static inline struct drbd_peer_device *
conn_peer_device(struct drbd_connection *connection, int volume_number)
{
- return idr_find(&connection->peer_devices, volume_number);
+ return xa_load(&connection->peer_devices, volume_number);
}
#define for_each_resource(resource, _resources) \
int conn_lowest_minor(struct drbd_connection *connection)
{
struct drbd_peer_device *peer_device;
- int vnr = 0, minor = -1;
+ unsigned long vnr = 0;
+ int minor = -1;
rcu_read_lock();
- peer_device = idr_get_next(&connection->peer_devices, &vnr);
+ peer_device = xa_find(&connection->peer_devices, &vnr,
+ ULONG_MAX, XA_PRESENT);
if (peer_device)
minor = device_to_minor(peer_device->device);
rcu_read_unlock();
connection->cstate = C_STANDALONE;
mutex_init(&connection->cstate_mutex);
init_waitqueue_head(&connection->ping_wait);
- idr_init(&connection->peer_devices);
+ xa_init(&connection->peer_devices);
drbd_init_workqueue(&connection->sender_work);
mutex_init(&connection->data.mutex);
drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
kfree(connection->current_epoch);
- idr_destroy(&connection->peer_devices);
+ xa_destroy(&connection->peer_devices);
drbd_free_socket(&connection->meta);
drbd_free_socket(&connection->data);
list_add(&peer_device->peer_devices, &device->peer_devices);
kref_get(&device->kref);
- ret = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
+ ret = xa_insert(&connection->peer_devices, vnr, peer_device,
+ GFP_KERNEL);
if (ret < 0) {
- if (ret == -ENOSPC)
+ if (ret == -EBUSY)
err = ERR_INVALID_REQUEST;
goto out_remove_from_resource;
}
return NO_ERROR;
out_idr_remove_vol:
- idr_remove(&connection->peer_devices, vnr);
+ xa_erase(&connection->peer_devices, vnr);
out_remove_from_resource:
for_each_connection(connection, resource) {
- peer_device = idr_remove(&connection->peer_devices, vnr);
+ peer_device = xa_erase(&connection->peer_devices, vnr);
if (peer_device)
kref_put(&connection->kref, drbd_destroy_connection);
}
drbd_debugfs_peer_device_cleanup(peer_device);
drbd_debugfs_device_cleanup(device);
for_each_connection(connection, resource) {
- idr_remove(&connection->peer_devices, device->vnr);
+ xa_erase(&connection->peer_devices, device->vnr);
kref_put(&device->kref, drbd_destroy_device);
}
xa_erase(&resource->devices, device->vnr);
void conn_md_sync(struct drbd_connection *connection)
{
struct drbd_peer_device *peer_device;
- int vnr;
+ unsigned long vnr;
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
kref_get(&device->kref);
{
enum drbd_fencing_p fp = FP_NOT_AVAIL;
struct drbd_peer_device *peer_device;
- int vnr;
+ unsigned long vnr;
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
if (get_ldev_if_state(device, D_CONSISTENT)) {
struct disk_conf *disk_conf =
{
struct drbd_peer_device *peer_device;
bool rv = false;
- int vnr;
+ unsigned long vnr;
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
if (device->state.conn == C_SYNC_SOURCE ||
device->state.conn == C_SYNC_TARGET ||
{
struct drbd_peer_device *peer_device;
bool rv = false;
- int vnr;
+ unsigned long vnr;
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
if (device->state.conn == C_VERIFY_S ||
device->state.conn == C_VERIFY_T) {
_check_net_options(struct drbd_connection *connection, struct net_conf *old_net_conf, struct net_conf *new_net_conf)
{
struct drbd_peer_device *peer_device;
- int i;
+ unsigned long i;
if (old_net_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) {
if (new_net_conf->wire_protocol != old_net_conf->wire_protocol)
(new_net_conf->wire_protocol != DRBD_PROT_C))
return ERR_NOT_PROTO_C;
- idr_for_each_entry(&connection->peer_devices, peer_device, i) {
+ xa_for_each(&connection->peer_devices, i, peer_device) {
struct drbd_device *device = peer_device->device;
if (get_ldev(device)) {
enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing;
{
enum drbd_ret_code rv;
struct drbd_peer_device *peer_device;
- int i;
+ unsigned long i;
rcu_read_lock();
rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf);
rcu_read_unlock();
/* connection->peer_devices protected by genl_lock() here */
- idr_for_each_entry(&connection->peer_devices, peer_device, i) {
+ xa_for_each(&connection->peer_devices, i, peer_device) {
struct drbd_device *device = peer_device->device;
if (!device->bitmap) {
if (drbd_bm_init(device))
if (connection->cstate >= C_WF_REPORT_PARAMS) {
struct drbd_peer_device *peer_device;
- int vnr;
+ unsigned long vnr;
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
+ xa_for_each(&connection->peer_devices, vnr, peer_device)
drbd_send_sync_param(peer_device);
}
struct drbd_resource *resource;
struct drbd_connection *connection;
enum drbd_ret_code retcode;
- int i;
+ unsigned long i;
int err;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
- idr_for_each_entry(&connection->peer_devices, peer_device, i) {
+ xa_for_each(&connection->peer_devices, i, peer_device) {
peer_devices++;
}
flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
mutex_lock(¬ification_mutex);
notify_connection_state(NULL, 0, connection, &connection_info, NOTIFY_CREATE | flags);
- idr_for_each_entry(&connection->peer_devices, peer_device, i) {
+ xa_for_each(&connection->peer_devices, i, peer_device) {
struct peer_device_info peer_device_info;
peer_device_to_info(&peer_device_info, peer_device);
mutex_unlock(&adm_ctx.resource->conf_update);
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, i) {
+ xa_for_each(&connection->peer_devices, i, peer_device) {
struct drbd_device *device = peer_device->device;
device->send_cnt = 0;
device->recv_cnt = 0;
struct drbd_connection *connection;
struct drbd_device *device;
int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
- unsigned i;
unsigned long index;
retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
for_each_connection(connection, resource) {
struct drbd_peer_device *peer_device;
- idr_for_each_entry(&connection->peer_devices, peer_device, i) {
+ xa_for_each(&connection->peer_devices, index, peer_device) {
retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0);
if (retcode < SS_SUCCESS) {
drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote");
static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection)
{
struct drbd_peer_device *peer_device;
- int vnr;
+ unsigned long vnr;
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
if (!atomic_read(&device->pp_in_use_by_net))
continue;
struct drbd_socket sock, msock;
struct drbd_peer_device *peer_device;
struct net_conf *nc;
- int vnr, timeout, h;
+ int timeout, h;
+ unsigned long vnr;
bool discard_my_data, ok;
enum drbd_state_rv rv;
struct accept_wait_data ad = {
* drbd_set_role() is finished, and any incoming drbd_set_role
* will see the STATE_SENT flag, and wait for it to be cleared.
*/
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
+ xa_for_each(&connection->peer_devices, vnr, peer_device)
mutex_lock(peer_device->device->state_mutex);
/* avoid a race with conn_request_state( C_DISCONNECTING ) */
set_bit(STATE_SENT, &connection->flags);
spin_unlock_irq(&connection->resource->req_lock);
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
+ xa_for_each(&connection->peer_devices, vnr, peer_device)
mutex_unlock(peer_device->device->state_mutex);
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
kref_get(&device->kref);
rcu_read_unlock();
if (connection->resource->write_ordering >= WO_BDEV_FLUSH) {
struct drbd_peer_device *peer_device;
struct issue_flush_context ctx;
- int vnr;
+ unsigned long vnr;
atomic_set(&ctx.pending, 1);
ctx.error = 0;
init_completion(&ctx.done);
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
if (!get_ldev(device))
static void conn_wait_active_ee_empty(struct drbd_connection *connection)
{
struct drbd_peer_device *peer_device;
- int vnr;
+ unsigned long vnr;
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
kref_get(&device->kref);
{
struct drbd_peer_device *peer_device;
enum drbd_conns oc;
- int vnr;
+ unsigned long vnr;
if (connection->cstate == C_STANDALONE)
return;
drbd_free_sock(connection);
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
kref_get(&device->kref);
rcu_read_unlock();
{
struct p_barrier_ack *p = pi->data;
struct drbd_peer_device *peer_device;
- int vnr;
+ unsigned long vnr;
tl_release(connection, p->barrier, be32_to_cpu(p->set_size));
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
if (device->state.conn == C_AHEAD &&
{
struct drbd_peer_device *peer_device;
bool rv = true;
- int vnr;
+ unsigned long vnr;
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
if (device->state.disk != D_DISKLESS ||
device->state.conn != C_STANDALONE ||
{
enum drbd_role role = R_SECONDARY;
struct drbd_peer_device *peer_device;
- int vnr;
+ unsigned long vnr;
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
role = max_role(role, device->state.role);
}
{
enum drbd_role peer = R_UNKNOWN;
struct drbd_peer_device *peer_device;
- int vnr;
+ unsigned long vnr;
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
peer = max_role(peer, device->state.peer);
}
{
enum drbd_disk_state disk_state = D_DISKLESS;
struct drbd_peer_device *peer_device;
- int vnr;
+ unsigned long vnr;
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
disk_state = max_t(enum drbd_disk_state, disk_state, device->state.disk);
}
{
enum drbd_disk_state disk_state = D_MASK;
struct drbd_peer_device *peer_device;
- int vnr;
+ unsigned long vnr;
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
disk_state = min_t(enum drbd_disk_state, disk_state, device->state.disk);
}
{
enum drbd_disk_state disk_state = D_DISKLESS;
struct drbd_peer_device *peer_device;
- int vnr;
+ unsigned long vnr;
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
disk_state = max_t(enum drbd_disk_state, disk_state, device->state.pdsk);
}
{
enum drbd_conns conn = C_MASK;
struct drbd_peer_device *peer_device;
- int vnr;
+ unsigned long vnr;
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
conn = min_t(enum drbd_conns, conn, device->state.conn);
}
static bool no_peer_wf_report_params(struct drbd_connection *connection)
{
struct drbd_peer_device *peer_device;
- int vnr;
+ unsigned long vnr;
bool rv = true;
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
+ xa_for_each(&connection->peer_devices, vnr, peer_device)
if (peer_device->device->state.conn == C_WF_REPORT_PARAMS) {
rv = false;
break;
static void wake_up_all_devices(struct drbd_connection *connection)
{
struct drbd_peer_device *peer_device;
- int vnr;
+ unsigned long vnr;
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
+ xa_for_each(&connection->peer_devices, vnr, peer_device)
wake_up(&peer_device->device->state_wait);
rcu_read_unlock();
if (resource->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) {
/* case2: The connection was established again: */
struct drbd_peer_device *peer_device;
- int vnr;
+ unsigned long vnr;
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
+ xa_for_each(&connection->peer_devices, vnr, peer_device)
clear_bit(NEW_CUR_UUID, &peer_device->device->flags);
rcu_read_unlock();
enum drbd_conns oc = acscw->oc;
union drbd_state ns_max = acscw->ns_max;
struct drbd_peer_device *peer_device;
- int vnr;
+ unsigned long vnr;
broadcast_state_change(acscw->state_change);
forget_state_change(acscw->state_change);
struct net_conf *old_conf;
mutex_lock(¬ification_mutex);
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
+ xa_for_each(&connection->peer_devices, vnr, peer_device)
notify_peer_device_state(NULL, 0, peer_device, NULL,
NOTIFY_DESTROY | NOTIFY_CONTINUES);
notify_connection_state(NULL, 0, connection, NULL, NOTIFY_DESTROY);
/* case1: The outdate peer handler is successful: */
if (ns_max.pdsk <= D_OUTDATED) {
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
if (test_bit(NEW_CUR_UUID, &device->flags)) {
drbd_uuid_new_current(device);
{
enum chg_state_flags flags = ~0;
struct drbd_peer_device *peer_device;
- int vnr, first_vol = 1;
+ unsigned long vnr;
+ bool first_vol = true;
union drbd_dev_state os, cs = {
{ .role = R_SECONDARY,
.peer = R_UNKNOWN,
} };
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
os = device->state;
enum drbd_state_rv rv = SS_SUCCESS;
union drbd_state ns, os;
struct drbd_peer_device *peer_device;
- int vnr;
+ unsigned long vnr;
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
os = drbd_read_state(device);
ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL);
} };
struct drbd_peer_device *peer_device;
enum drbd_state_rv rv;
- int vnr, number_of_volumes = 0;
+ unsigned long vnr, number_of_volumes = 0;
if (mask.conn == C_MASK) {
/* remember last connect time so request_timer_fn() won't
}
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
number_of_volumes++;
os = drbd_read_state(device);
fp = rcu_dereference(device->ldev->disk_conf)->fencing;
if (fp != FP_DONT_CARE) {
struct drbd_peer_device *peer_device;
- int vnr;
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ unsigned long vnr;
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
disk_state = min_t(enum drbd_disk_state, disk_state, device->state.disk);
pdsk_state = min_t(enum drbd_disk_state, pdsk_state, device->state.pdsk);
static void do_unqueued_work(struct drbd_connection *connection)
{
struct drbd_peer_device *peer_device;
- int vnr;
+ unsigned long vnr;
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
unsigned long todo = get_work_bits(&device->flags);
if (!todo)
struct drbd_work *w = NULL;
struct drbd_peer_device *peer_device;
LIST_HEAD(work_list);
- int vnr;
+ unsigned long vnr;
while (get_t_state(thi) == RUNNING) {
drbd_thread_current_set_cpu(thi);
} while (!list_empty(&work_list) || test_bit(DEVICE_WORK_PENDING, &connection->flags));
rcu_read_lock();
- idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+ xa_for_each(&connection->peer_devices, vnr, peer_device) {
struct drbd_device *device = peer_device->device;
D_ASSERT(device, device->state.disk == D_DISKLESS && device->state.conn == C_STANDALONE);
kref_get(&device->kref);