]> www.infradead.org Git - users/willy/pagecache.git/commitdiff
gfs2: Free quota data objects synchronously
authorAndreas Gruenbacher <agruenba@redhat.com>
Wed, 23 Aug 2023 15:15:47 +0000 (17:15 +0200)
committerAndreas Gruenbacher <agruenba@redhat.com>
Tue, 5 Sep 2023 13:58:17 +0000 (15:58 +0200)
In gfs2_quota_cleanup(), wait for the quota data objects to be freed
before returning.  Otherwise, there is no guarantee that the quota data
objects will be gone when their kmem cache is destroyed.

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
fs/gfs2/quota.c

index 97fdf64148ba43b9515424076643e0442728d2e2..75be0d8e243f8bffbb4c674cb98685866d5b36d2 100644 (file)
@@ -109,7 +109,11 @@ static inline void spin_unlock_bucket(unsigned int hash)
 static void gfs2_qd_dealloc(struct rcu_head *rcu)
 {
        struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
+       struct gfs2_sbd *sdp = qd->qd_sbd;
+
        kmem_cache_free(gfs2_quotad_cachep, qd);
+       if (atomic_dec_and_test(&sdp->sd_quota_count))
+               wake_up(&sdp->sd_kill_wait);
 }
 
 static void gfs2_qd_dispose(struct gfs2_quota_data *qd)
@@ -143,7 +147,6 @@ static void gfs2_qd_list_dispose(struct list_head *list)
                list_del(&qd->qd_lru);
 
                gfs2_qd_dispose(qd);
-               atomic_dec(&sdp->sd_quota_count);
        }
 }
 
@@ -317,13 +320,24 @@ static void qd_hold(struct gfs2_quota_data *qd)
 
 static void qd_put(struct gfs2_quota_data *qd)
 {
+       struct gfs2_sbd *sdp;
+
        if (lockref_put_or_lock(&qd->qd_lockref))
                return;
 
+       BUG_ON(__lockref_is_dead(&qd->qd_lockref));
+       sdp = qd->qd_sbd;
+       if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
+               lockref_mark_dead(&qd->qd_lockref);
+               spin_unlock(&qd->qd_lockref.lock);
+
+               gfs2_qd_dispose(qd);
+               return;
+       }
+
        qd->qd_lockref.count = 0;
        list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
        spin_unlock(&qd->qd_lockref.lock);
-
 }
 
 static int slot_get(struct gfs2_quota_data *qd)
@@ -1465,16 +1479,33 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
 {
        struct gfs2_quota_data *qd;
        LIST_HEAD(dispose);
+       int count;
+
+       BUG_ON(test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
 
        spin_lock(&qd_lock);
        list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
+               spin_lock(&qd->qd_lockref.lock);
+               if (qd->qd_lockref.count != 0) {
+                       spin_unlock(&qd->qd_lockref.lock);
+                       continue;
+               }
+               lockref_mark_dead(&qd->qd_lockref);
+               spin_unlock(&qd->qd_lockref.lock);
+
                list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
                list_add(&qd->qd_lru, &dispose);
        }
        spin_unlock(&qd_lock);
 
        gfs2_qd_list_dispose(&dispose);
-       gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
+
+       wait_event_timeout(sdp->sd_kill_wait,
+               (count = atomic_read(&sdp->sd_quota_count)) == 0,
+               HZ * 60);
+
+       if (count != 0)
+               fs_err(sdp, "%d left-over quota data objects\n", count);
 
        kvfree(sdp->sd_quota_bitmap);
        sdp->sd_quota_bitmap = NULL;