]> www.infradead.org Git - users/hch/xfsprogs.git/commitdiff
xfs: don't create one lockdep class per RTG
authorChristoph Hellwig <hch@lst.de>
Wed, 17 Jul 2024 06:46:18 +0000 (08:46 +0200)
committerChristoph Hellwig <hch@lst.de>
Wed, 17 Jul 2024 12:31:20 +0000 (14:31 +0200)
Source kernel commit: c10ebb4d5097a01ff185c206dfdf984e83f3ef22

lockdep_unregister_key is rather expensive because it does a
synchronize_rcu.  To avoid slowdowns with lots of RTGs try to avoid
using it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
libxfs/xfs_rtgroup.c
libxfs/xfs_rtgroup.h

index ad35e11127dd1c3ff589a9f951c4d9af2ea20a69..f724889096794cbd018c57a00132662012ea68cb 100644 (file)
@@ -163,8 +163,6 @@ xfs_initialize_rtgroups(
                /* Place kernel structure only init below this point. */
                spin_lock_init(&rtg->rtg_state_lock);
                init_waitqueue_head(&rtg->rtg_active_wq);
-               memset(&rtg->lock_class, 0, sizeof(rtg->lock_class));
-               lockdep_register_key(&rtg->lock_class);
                xfs_defer_drain_init(&rtg->rtg_intents_drain);
                xfs_hooks_init(&rtg->rtg_rmap_update_hooks);
 #endif /* __KERNEL__ */
@@ -207,7 +205,6 @@ xfs_free_unused_rtgroup_range(
                if (!rtg)
                        break;
 #ifdef __KERNEL__
-               lockdep_unregister_key(&rtg->lock_class);
                xfs_defer_drain_free(&rtg->rtg_intents_drain);
 #endif
                kfree(rtg);
@@ -244,7 +241,6 @@ xfs_free_rtgroups(
                ASSERT(rtg);
                XFS_IS_CORRUPT(mp, atomic_read(&rtg->rtg_ref) != 0);
 #ifdef __KERNEL__
-               lockdep_unregister_key(&rtg->lock_class);
                xfs_defer_drain_free(&rtg->rtg_intents_drain);
 #endif
 
@@ -522,6 +518,46 @@ static const struct xfs_rtginode_ops xfs_rtginode_ops[] = {
        },
 };
 
+#ifdef CONFIG_PROVE_LOCKING
+static struct lock_class_key xfs_rtg_lock_class;
+
+static int
+xfs_rtg_ilock_cmp_fn(
+       const struct lockdep_map        *m1,
+       const struct lockdep_map        *m2)
+{
+       const struct xfs_inode *ip1 =
+               container_of(m1, struct xfs_inode, i_lock.dep_map);
+       const struct xfs_inode *ip2 =
+               container_of(m2, struct xfs_inode, i_lock.dep_map);
+
+       if (ip1->i_rgno < ip2->i_rgno)
+               return -1;
+       if (ip1->i_rgno > ip2->i_rgno)
+               return 1;
+       return 0;
+}
+
+/*
+ * Most of the time each of the RTG inode locks is only taken one at a time.
+ * But when committing deferred ops more than one of a kind can be taken.
+ * As the RTG will be committed in rgno order there is not potential for
+ * deadlocks, and the code here is needed to tell lockdep about this order.
+ */
+static inline void
+xfs_rtg_ilock_lockdep_setup(
+       struct xfs_inode        *ip,
+       xfs_rgnumber_t          rgno,
+       enum xfs_rtg_inodes     type)
+{
+       ip->i_rgno = rgno;
+       lockdep_set_class_and_subclass(&ip->i_lock, &xfs_rtg_lock_class, type);
+       lock_set_cmp_fn(&ip->i_lock, xfs_rtg_ilock_cmp_fn, NULL);
+}
+#else
+#define xfs_rtg_ilock_lockdep_setup(ip, rgno, type)    do { } while (0)
+#endif /* CONFIG_PROVE_LOCKING */
+
 const char *
 xfs_rtginode_name(
        enum xfs_rtg_inodes     type)
@@ -573,11 +609,7 @@ xfs_rtginode_load(
                return -EFSCORRUPTED;
        }
 
-       /*
-        * Each realtime allocation group has a lockdep class key for the metadata
-        * inodes.  Each metadata inode in a group gets its own subclass.
-        */
-       lockdep_set_class_and_subclass(&ip->i_lock, &rtg->lock_class, type);
+       xfs_rtg_ilock_lockdep_setup(ip, rtg->rtg_rgno, type);
        rtg->rtg_inodes[type] = ip;
        return 0;
 }
@@ -610,7 +642,7 @@ xfs_rtginode_create(
        if (error)
                return error;
 
-       lockdep_set_class_and_subclass(&upd.ip->i_lock, &rtg->lock_class, type);
+       xfs_rtg_ilock_lockdep_setup(upd.ip, rtg->rtg_rgno, type);
 
        error = ops->create(rtg, upd.ip, upd.tp, init);
        if (error)
index e8d76a818e07db3b3cdb2e4a0b1032c7f123a957..ab44b9ccf1d812e28d7d4d09279245d55aed21bc 100644 (file)
@@ -50,8 +50,6 @@ struct xfs_rtgroup {
        /* -- kernel only structures below this line -- */
        spinlock_t              rtg_state_lock;
 
-       struct lock_class_key   lock_class;
-
        /*
         * We use xfs_drain to track the number of deferred log intent items
         * that have been queued (but not yet processed) so that waiters (e.g.