lockdep_assert_held(&ioc->lock);
xa_erase(&ioc->icq_array, icq->q->id);
- hlist_del_init(&icq->ioc_node);
list_del_init(&icq->q_node);
/*
struct io_context *ioc = container_of(work, struct io_context,
release_work);
unsigned long flags;
+ unsigned long index;
+ struct io_cq *icq;
/*
* Exiting icq may call into put_io_context() through elevator
*/
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
- while (!hlist_empty(&ioc->icq_list)) {
- struct io_cq *icq = hlist_entry(ioc->icq_list.first,
- struct io_cq, ioc_node);
+ xa_for_each(&ioc->icq_array, index, icq) {
struct request_queue *q = icq->q;
if (spin_trylock(&q->queue_lock)) {
*/
if (atomic_long_dec_and_test(&ioc->refcount)) {
spin_lock_irqsave(&ioc->lock, flags);
- if (!hlist_empty(&ioc->icq_list))
+ if (!xa_empty(&ioc->icq_array))
queue_work(system_power_efficient_wq,
&ioc->release_work);
else
void put_io_context_active(struct io_context *ioc)
{
unsigned long flags;
+ unsigned long index;
struct io_cq *icq;
if (!atomic_dec_and_test(&ioc->active_ref)) {
* explanation on the nested locking annotation.
*/
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
- hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
+ xa_for_each(&ioc->icq_array, index, icq) {
if (icq->flags & ICQ_EXITED)
continue;
atomic_set(&ioc->active_ref, 1);
spin_lock_init(&ioc->lock);
xa_init_flags(&ioc->icq_array, XA_FLAGS_LOCK_IRQ);
- INIT_HLIST_HEAD(&ioc->icq_list);
INIT_WORK(&ioc->release_work, ioc_release_fn);
/*
icq->ioc = ioc;
icq->q = q;
INIT_LIST_HEAD(&icq->q_node);
- INIT_HLIST_NODE(&icq->ioc_node);
/* lock both q and ioc and try to link @icq */
spin_lock_irq(&q->queue_lock);
curr = xa_cmpxchg(&ioc->icq_array, q->id, XA_ZERO_ENTRY, icq,
GFP_ATOMIC);
if (likely(!curr)) {
- hlist_add_head(&icq->ioc_node, &ioc->icq_list);
list_add(&icq->q_node, &q->icq_list);
if (et->ops.init_icq)
et->ops.init_icq(icq);
*
* - ioc lock nests inside q lock.
*
- * - ioc->icq_list and icq->ioc_node are protected by ioc lock.
- * q->icq_list and icq->q_node by q lock.
+ * - q->icq_list and icq->q_node are protected by q lock.
*
* - ioc->icq_array and ioc->icq_hint are protected by ioc lock, while icq
* itself is protected by q lock. However, both the indexes and icq
struct io_context *ioc;
/*
- * q_node and ioc_node link io_cq through icq_list of q and ioc
- * respectively. Both fields are unused once ioc_exit_icq() is
- * called and shared with __rcu_icq_cache and __rcu_head which are
- * used for RCU free of io_cq.
+ * q_node links io_cq through the icq_list of q.
+ * It is unused once ioc_exit_icq() is called so it is shared with
+ * __rcu_icq_cache which is used for RCU free of io_cq.
*/
union {
struct list_head q_node;
struct kmem_cache *__rcu_icq_cache;
};
- union {
- struct hlist_node ioc_node;
- struct rcu_head __rcu_head;
- };
+ struct rcu_head __rcu_head;
unsigned int flags;
};
struct xarray icq_array;
struct io_cq __rcu *icq_hint;
- struct hlist_head icq_list;
struct work_struct release_work;
};