hash_list = &ctx->napi_ht[hash_min(napi_id, HASH_BITS(ctx->napi_ht))];
- rcu_read_lock();
- e = io_napi_hash_find(hash_list, napi_id);
- if (e) {
- WRITE_ONCE(e->timeout, jiffies + NAPI_TIMEOUT);
- rcu_read_unlock();
- return -EEXIST;
+ scoped_guard(rcu) {
+ e = io_napi_hash_find(hash_list, napi_id);
+ if (e) {
+ WRITE_ONCE(e->timeout, jiffies + NAPI_TIMEOUT);
+ return -EEXIST;
+ }
}
- rcu_read_unlock();
e = kmalloc(sizeof(*e), GFP_NOWAIT);
if (!e)
e->napi_id = napi_id;
e->timeout = jiffies + NAPI_TIMEOUT;
+ /*
+ * guard(spinlock) is not used to manually unlock it before calling
+ * kfree()
+ */
spin_lock(&ctx->napi_lock);
if (unlikely(io_napi_hash_find(hash_list, napi_id))) {
spin_unlock(&ctx->napi_lock);
{
struct io_napi_entry *e;
- spin_lock(&ctx->napi_lock);
+ guard(spinlock)(&ctx->napi_lock);
/*
* list_for_each_entry_safe() is not required as long as:
* 1. list_del_rcu() does not reset the deleted node next pointer
kfree_rcu(e, rcu);
}
}
- spin_unlock(&ctx->napi_lock);
}
static inline void io_napi_remove_stale(struct io_ring_ctx *ctx, bool is_stale)
if (list_is_singular(&ctx->napi_list))
loop_end_arg = iowq;
- rcu_read_lock();
- do {
- is_stale = __io_napi_do_busy_loop(ctx, loop_end_arg);
- } while (!io_napi_busy_loop_should_end(iowq, start_time) && !loop_end_arg);
- rcu_read_unlock();
+ scoped_guard(rcu) {
+ do {
+ is_stale = __io_napi_do_busy_loop(ctx, loop_end_arg);
+ } while (!io_napi_busy_loop_should_end(iowq, start_time) &&
+ !loop_end_arg);
+ }
io_napi_remove_stale(ctx, is_stale);
}
{
struct io_napi_entry *e;
- spin_lock(&ctx->napi_lock);
+ guard(spinlock)(&ctx->napi_lock);
list_for_each_entry(e, &ctx->napi_list, list) {
hash_del_rcu(&e->node);
kfree_rcu(e, rcu);
}
INIT_LIST_HEAD_RCU(&ctx->napi_list);
- spin_unlock(&ctx->napi_lock);
}
/*
if (list_empty_careful(&ctx->napi_list))
return 0;
- rcu_read_lock();
- is_stale = __io_napi_do_busy_loop(ctx, NULL);
- rcu_read_unlock();
+ scoped_guard(rcu) {
+ is_stale = __io_napi_do_busy_loop(ctx, NULL);
+ }
io_napi_remove_stale(ctx, is_stale);
return 1;