]> www.infradead.org Git - users/hch/xfs.git/commitdiff
closures: closure_get_not_zero(), closure_return_sync()
authorKent Overstreet <kent.overstreet@linux.dev>
Sun, 23 Jun 2024 01:38:58 +0000 (21:38 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Sun, 23 Jun 2024 04:57:21 +0000 (00:57 -0400)
Provide new primitives for solving a lifetime issue with bcachefs
btree_trans objects.

closure_sync_return(): like closure_sync(), wait synchronously for any
outstanding gets. like closure_return, the closure is considered
"finished" and the ref left at 0.

closure_get_not_zero(): get a ref on a closure if it's alive, i.e. the
ref is not zero.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
include/linux/closure.h
lib/closure.c

index 99155df162d03ca4369fe4ada58c371cbc1b7813..59b8c06b11ff332e361e38212c4efd43e1c9b15e 100644 (file)
@@ -284,6 +284,21 @@ static inline void closure_get(struct closure *cl)
 #endif
 }
 
+/**
+ * closure_get_not_zero
+ */
+static inline bool closure_get_not_zero(struct closure *cl)
+{
+       unsigned old = atomic_read(&cl->remaining);
+       do {
+               if (!(old & CLOSURE_REMAINING_MASK))
+                       return false;
+
+       } while (!atomic_try_cmpxchg_acquire(&cl->remaining, &old, old + 1));
+
+       return true;
+}
+
 /**
  * closure_init - Initialize a closure, setting the refcount to 1
  * @cl:                closure to initialize
@@ -310,6 +325,12 @@ static inline void closure_init_stack(struct closure *cl)
        atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
 }
 
+static inline void closure_init_stack_release(struct closure *cl)
+{
+       memset(cl, 0, sizeof(struct closure));
+       atomic_set_release(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+}
+
 /**
  * closure_wake_up - wake up all closures on a wait list,
  *                  with memory barrier
@@ -355,6 +376,8 @@ do {                                                                        \
  */
 #define closure_return(_cl)    continue_at((_cl), NULL, NULL)
 
+void closure_return_sync(struct closure *cl);
+
 /**
  * continue_at_nobarrier - jump to another function without barrier
  *
index 2e1ee9fdec081b09f468e06cfe8c310219a0cb38..c971216d9d7742231258e51c1e822084513f07d9 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/seq_file.h>
 #include <linux/sched/debug.h>
 
-static inline void closure_put_after_sub(struct closure *cl, int flags)
+static inline void closure_put_after_sub_checks(int flags)
 {
        int r = flags & CLOSURE_REMAINING_MASK;
 
@@ -22,12 +22,17 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
                 flags & CLOSURE_GUARD_MASK, (unsigned) __fls(r)))
                r &= ~CLOSURE_GUARD_MASK;
 
-       if (!r) {
-               smp_acquire__after_ctrl_dep();
+       WARN(!r && (flags & ~CLOSURE_DESTRUCTOR),
+            "closure ref hit 0 with incorrect flags set: %x (%u)",
+            flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags));
+}
+
+static inline void closure_put_after_sub(struct closure *cl, int flags)
+{
+       closure_put_after_sub_checks(flags);
 
-               WARN(flags & ~CLOSURE_DESTRUCTOR,
-                    "closure ref hit 0 with incorrect flags set: %x (%u)",
-                    flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags));
+       if (!(flags & CLOSURE_REMAINING_MASK)) {
+               smp_acquire__after_ctrl_dep();
 
                cl->closure_get_happened = false;
 
@@ -145,6 +150,41 @@ void __sched __closure_sync(struct closure *cl)
 }
 EXPORT_SYMBOL(__closure_sync);
 
+/*
+ * closure_return_sync - finish running a closure, synchronously (i.e. waiting
+ * for outstanding get()s to finish) and returning once closure refcount is 0.
+ *
+ * Unlike closure_sync() this doesn't reinit the ref to 1; subsequent
+ * closure_get_not_zero() calls waill fail.
+ */
+void __sched closure_return_sync(struct closure *cl)
+{
+       struct closure_syncer s = { .task = current };
+
+       cl->s = &s;
+       set_closure_fn(cl, closure_sync_fn, NULL);
+
+       unsigned flags = atomic_sub_return_release(1 + CLOSURE_RUNNING - CLOSURE_DESTRUCTOR,
+                                                  &cl->remaining);
+
+       closure_put_after_sub_checks(flags);
+
+       if (unlikely(flags & CLOSURE_REMAINING_MASK)) {
+               while (1) {
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       if (s.done)
+                               break;
+                       schedule();
+               }
+
+               __set_current_state(TASK_RUNNING);
+       }
+
+       if (cl->parent)
+               closure_put(cl->parent);
+}
+EXPORT_SYMBOL(closure_return_sync);
+
 int __sched __closure_sync_timeout(struct closure *cl, unsigned long timeout)
 {
        struct closure_syncer s = { .task = current };