]> www.infradead.org Git - users/hch/misc.git/commitdiff
io_uring/napi: Use lock guards
authorOlivier Langlois <olivier@trillion01.com>
Sun, 13 Oct 2024 18:29:02 +0000 (14:29 -0400)
committerJens Axboe <axboe@kernel.dk>
Wed, 6 Nov 2024 20:55:38 +0000 (13:55 -0700)
Convert napi locks to use the shiny new Scope-Based Resource Management
machinery.

Signed-off-by: Olivier Langlois <olivier@trillion01.com>
Link: https://lore.kernel.org/r/2680ca47ee183cfdb89d1a40c84d349edeb620ab.1728828877.git.olivier@trillion01.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/napi.c

index 5e2299e7ff8ee2ad0340c4a7182988eb5c0f7ac9..6d5fdd397f2f4139304f02c3b0a2bdd69ace6853 100644 (file)
@@ -49,14 +49,13 @@ int __io_napi_add_id(struct io_ring_ctx *ctx, unsigned int napi_id)
 
        hash_list = &ctx->napi_ht[hash_min(napi_id, HASH_BITS(ctx->napi_ht))];
 
-       rcu_read_lock();
-       e = io_napi_hash_find(hash_list, napi_id);
-       if (e) {
-               WRITE_ONCE(e->timeout, jiffies + NAPI_TIMEOUT);
-               rcu_read_unlock();
-               return -EEXIST;
+       scoped_guard(rcu) {
+               e = io_napi_hash_find(hash_list, napi_id);
+               if (e) {
+                       WRITE_ONCE(e->timeout, jiffies + NAPI_TIMEOUT);
+                       return -EEXIST;
+               }
        }
-       rcu_read_unlock();
 
        e = kmalloc(sizeof(*e), GFP_NOWAIT);
        if (!e)
@@ -65,6 +64,10 @@ int __io_napi_add_id(struct io_ring_ctx *ctx, unsigned int napi_id)
        e->napi_id = napi_id;
        e->timeout = jiffies + NAPI_TIMEOUT;
 
+       /*
+        * guard(spinlock) is not used to manually unlock it before calling
+        * kfree()
+        */
        spin_lock(&ctx->napi_lock);
        if (unlikely(io_napi_hash_find(hash_list, napi_id))) {
                spin_unlock(&ctx->napi_lock);
@@ -82,7 +85,7 @@ static void __io_napi_remove_stale(struct io_ring_ctx *ctx)
 {
        struct io_napi_entry *e;
 
-       spin_lock(&ctx->napi_lock);
+       guard(spinlock)(&ctx->napi_lock);
        /*
         * list_for_each_entry_safe() is not required as long as:
         * 1. list_del_rcu() does not reset the deleted node next pointer
@@ -96,7 +99,6 @@ static void __io_napi_remove_stale(struct io_ring_ctx *ctx)
                        kfree_rcu(e, rcu);
                }
        }
-       spin_unlock(&ctx->napi_lock);
 }
 
 static inline void io_napi_remove_stale(struct io_ring_ctx *ctx, bool is_stale)
@@ -168,11 +170,12 @@ static void io_napi_blocking_busy_loop(struct io_ring_ctx *ctx,
        if (list_is_singular(&ctx->napi_list))
                loop_end_arg = iowq;
 
-       rcu_read_lock();
-       do {
-               is_stale = __io_napi_do_busy_loop(ctx, loop_end_arg);
-       } while (!io_napi_busy_loop_should_end(iowq, start_time) && !loop_end_arg);
-       rcu_read_unlock();
+       scoped_guard(rcu) {
+               do {
+                       is_stale = __io_napi_do_busy_loop(ctx, loop_end_arg);
+               } while (!io_napi_busy_loop_should_end(iowq, start_time) &&
+                        !loop_end_arg);
+       }
 
        io_napi_remove_stale(ctx, is_stale);
 }
@@ -203,13 +206,12 @@ void io_napi_free(struct io_ring_ctx *ctx)
 {
        struct io_napi_entry *e;
 
-       spin_lock(&ctx->napi_lock);
+       guard(spinlock)(&ctx->napi_lock);
        list_for_each_entry(e, &ctx->napi_list, list) {
                hash_del_rcu(&e->node);
                kfree_rcu(e, rcu);
        }
        INIT_LIST_HEAD_RCU(&ctx->napi_list);
-       spin_unlock(&ctx->napi_lock);
 }
 
 /*
@@ -305,9 +307,9 @@ int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx)
        if (list_empty_careful(&ctx->napi_list))
                return 0;
 
-       rcu_read_lock();
-       is_stale = __io_napi_do_busy_loop(ctx, NULL);
-       rcu_read_unlock();
+       scoped_guard(rcu) {
+               is_stale = __io_napi_do_busy_loop(ctx, NULL);
+       }
 
        io_napi_remove_stale(ctx, is_stale);
        return 1;