]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
bcachefs: Fix lost wake up
authorAlan Huang <mmpgouride@gmail.com>
Tue, 27 Aug 2024 15:14:48 +0000 (23:14 +0800)
committerKent Overstreet <kent.overstreet@linux.dev>
Sat, 28 Sep 2024 02:32:23 +0000 (22:32 -0400)
If the reader acquires the read lock and then the writer enters the slow
path, while the reader proceeds to the unlock path, the following scenario
can occur without the change:

writer: pcpu_read_count(lock) return 1 (so __do_six_trylock will return 0)
reader: this_cpu_dec(*lock->readers)
reader: smp_mb()
reader: state = atomic_read(&lock->state) (there is no waiting flag set)
writer: six_set_bitmask()

then the writer will sleep forever.

Signed-off-by: Alan Huang <mmpgouride@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/six.c

index 3a494c5d12478595c76bebc89fd15b517c5ed6d0..b3583c50ef04ece7beb6d661bad6a2f52c4de8bf 100644 (file)
@@ -169,11 +169,17 @@ static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type,
                                ret = -1 - SIX_LOCK_write;
                }
        } else if (type == SIX_LOCK_write && lock->readers) {
-               if (try) {
+               if (try)
                        atomic_add(SIX_LOCK_HELD_write, &lock->state);
-                       smp_mb__after_atomic();
-               }
 
+               /*
+                * Make sure atomic_add happens before pcpu_read_count and
+                * six_set_bitmask in slow path happens before pcpu_read_count.
+                *
+                * Paired with the smp_mb() in read lock fast path (per-cpu mode)
+                * and the one before atomic_read in read unlock path.
+                */
+               smp_mb();
                ret = !pcpu_read_count(lock);
 
                if (try && !ret) {