]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
bcache: fix a lost wake-up problem caused by mca_cannibalize_lock
authorGuoju Fang <fangguoju@gmail.com>
Wed, 13 Nov 2019 08:03:16 +0000 (16:03 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 1 Oct 2020 11:12:29 +0000 (13:12 +0200)
[ Upstream commit 34cf78bf34d48dddddfeeadb44f9841d7864997a ]

This patch fix a lost wake-up problem caused by the race between
mca_cannibalize_lock and bch_cannibalize_unlock.

Consider two processes, A and B. Process A is executing
mca_cannibalize_lock, while process B takes c->btree_cache_alloc_lock
and is executing bch_cannibalize_unlock. The problem happens that after
process A executes cmpxchg and will execute prepare_to_wait. In this
timeslice process B executes wake_up, but after that process A executes
prepare_to_wait and set the state to TASK_INTERRUPTIBLE. Then process A
goes to sleep but no one will wake up it. This problem may cause bcache
device to dead.

Signed-off-by: Guoju Fang <fangguoju@gmail.com>
Signed-off-by: Coly Li <colyli@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/md/bcache/bcache.h
drivers/md/bcache/btree.c
drivers/md/bcache/super.c

index e4a3f692057b84a5eb94be03b031cf48a54b4d02..a3763d664a67a66fa1d173ddec7508478fc0637a 100644 (file)
@@ -548,6 +548,7 @@ struct cache_set {
         */
        wait_queue_head_t       btree_cache_wait;
        struct task_struct      *btree_cache_alloc_lock;
+       spinlock_t              btree_cannibalize_lock;
 
        /*
         * When we free a btree node, we increment the gen of the bucket the
index 9fca837d0b41ead104c75e40abd85ed316b6abc1..fba0fff8040d60f8b8131bd51fc62498654cc7a9 100644 (file)
@@ -840,15 +840,17 @@ out:
 
 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
 {
-       struct task_struct *old;
-
-       old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
-       if (old && old != current) {
+       spin_lock(&c->btree_cannibalize_lock);
+       if (likely(c->btree_cache_alloc_lock == NULL)) {
+               c->btree_cache_alloc_lock = current;
+       } else if (c->btree_cache_alloc_lock != current) {
                if (op)
                        prepare_to_wait(&c->btree_cache_wait, &op->wait,
                                        TASK_UNINTERRUPTIBLE);
+               spin_unlock(&c->btree_cannibalize_lock);
                return -EINTR;
        }
+       spin_unlock(&c->btree_cannibalize_lock);
 
        return 0;
 }
@@ -883,10 +885,12 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
  */
 static void bch_cannibalize_unlock(struct cache_set *c)
 {
+       spin_lock(&c->btree_cannibalize_lock);
        if (c->btree_cache_alloc_lock == current) {
                c->btree_cache_alloc_lock = NULL;
                wake_up(&c->btree_cache_wait);
        }
+       spin_unlock(&c->btree_cannibalize_lock);
 }
 
 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
index 7fcc1ba12bc0185d7b5af7822e7740036ea45e67..6bf1559a1f0db67536c163a27ba23d55e9a70bf0 100644 (file)
@@ -1510,6 +1510,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
        sema_init(&c->sb_write_mutex, 1);
        mutex_init(&c->bucket_lock);
        init_waitqueue_head(&c->btree_cache_wait);
+       spin_lock_init(&c->btree_cannibalize_lock);
        init_waitqueue_head(&c->bucket_wait);
        init_waitqueue_head(&c->gc_wait);
        sema_init(&c->uuid_write_mutex, 1);