]> www.infradead.org Git - nvme.git/commitdiff
rbd: rename RBD_LOCK_STATE_RELEASING and releasing_wait
authorIlya Dryomov <idryomov@gmail.com>
Tue, 23 Jul 2024 15:54:39 +0000 (17:54 +0200)
committerIlya Dryomov <idryomov@gmail.com>
Thu, 25 Jul 2024 10:18:01 +0000 (12:18 +0200)
... to RBD_LOCK_STATE_QUIESCING and quiescing_wait to recognize that
this state and the associated completion are backing rbd_quiesce_lock(),
which isn't specific to releasing the lock.

While exclusive lock does get quiesced before it's released, it also
gets quiesced before an attempt to update the cookie is made and there
the lock is not released as long as ceph_cls_set_cookie() succeeds.

Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Reviewed-by: Dongsheng Yang <dongsheng.yang@easystack.cn>
drivers/block/rbd.c

index 26ff5cd2bf0abc118d5c83cdf733554a3be97e0c..c30d227753d72736cf1f1715d29292af6528d45d 100644 (file)
@@ -362,7 +362,7 @@ enum rbd_watch_state {
 enum rbd_lock_state {
        RBD_LOCK_STATE_UNLOCKED,
        RBD_LOCK_STATE_LOCKED,
-       RBD_LOCK_STATE_RELEASING,
+       RBD_LOCK_STATE_QUIESCING,
 };
 
 /* WatchNotify::ClientId */
@@ -422,7 +422,7 @@ struct rbd_device {
        struct list_head        running_list;
        struct completion       acquire_wait;
        int                     acquire_err;
-       struct completion       releasing_wait;
+       struct completion       quiescing_wait;
 
        spinlock_t              object_map_lock;
        u8                      *object_map;
@@ -525,7 +525,7 @@ static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
        lockdep_assert_held(&rbd_dev->lock_rwsem);
 
        return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
-              rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
+              rbd_dev->lock_state == RBD_LOCK_STATE_QUIESCING;
 }
 
 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
@@ -3458,12 +3458,12 @@ static void rbd_lock_del_request(struct rbd_img_request *img_req)
        spin_lock(&rbd_dev->lock_lists_lock);
        if (!list_empty(&img_req->lock_item)) {
                list_del_init(&img_req->lock_item);
-               need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
+               need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_QUIESCING &&
                               list_empty(&rbd_dev->running_list));
        }
        spin_unlock(&rbd_dev->lock_lists_lock);
        if (need_wakeup)
-               complete(&rbd_dev->releasing_wait);
+               complete(&rbd_dev->quiescing_wait);
 }
 
 static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
@@ -4181,16 +4181,16 @@ static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
        /*
         * Ensure that all in-flight IO is flushed.
         */
-       rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
-       rbd_assert(!completion_done(&rbd_dev->releasing_wait));
+       rbd_dev->lock_state = RBD_LOCK_STATE_QUIESCING;
+       rbd_assert(!completion_done(&rbd_dev->quiescing_wait));
        if (list_empty(&rbd_dev->running_list))
                return true;
 
        up_write(&rbd_dev->lock_rwsem);
-       wait_for_completion(&rbd_dev->releasing_wait);
+       wait_for_completion(&rbd_dev->quiescing_wait);
 
        down_write(&rbd_dev->lock_rwsem);
-       if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
+       if (rbd_dev->lock_state != RBD_LOCK_STATE_QUIESCING)
                return false;
 
        rbd_assert(list_empty(&rbd_dev->running_list));
@@ -5383,7 +5383,7 @@ static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec)
        INIT_LIST_HEAD(&rbd_dev->acquiring_list);
        INIT_LIST_HEAD(&rbd_dev->running_list);
        init_completion(&rbd_dev->acquire_wait);
-       init_completion(&rbd_dev->releasing_wait);
+       init_completion(&rbd_dev->quiescing_wait);
 
        spin_lock_init(&rbd_dev->object_map_lock);