/* Place kernel structure only init below this point. */
spin_lock_init(&rtg->rtg_state_lock);
init_waitqueue_head(&rtg->rtg_active_wq);
- memset(&rtg->lock_class, 0, sizeof(rtg->lock_class));
- lockdep_register_key(&rtg->lock_class);
xfs_defer_drain_init(&rtg->rtg_intents_drain);
xfs_hooks_init(&rtg->rtg_rmap_update_hooks);
#endif /* __KERNEL__ */
if (!rtg)
break;
#ifdef __KERNEL__
- lockdep_unregister_key(&rtg->lock_class);
xfs_defer_drain_free(&rtg->rtg_intents_drain);
#endif
kfree(rtg);
ASSERT(rtg);
XFS_IS_CORRUPT(mp, atomic_read(&rtg->rtg_ref) != 0);
#ifdef __KERNEL__
- lockdep_unregister_key(&rtg->lock_class);
xfs_defer_drain_free(&rtg->rtg_intents_drain);
#endif
},
};
+#ifdef CONFIG_PROVE_LOCKING
+static struct lock_class_key xfs_rtg_lock_class;
+
+static int
+xfs_rtg_ilock_cmp_fn(
+ const struct lockdep_map *m1,
+ const struct lockdep_map *m2)
+{
+ const struct xfs_inode *ip1 =
+ container_of(m1, struct xfs_inode, i_lock.dep_map);
+ const struct xfs_inode *ip2 =
+ container_of(m2, struct xfs_inode, i_lock.dep_map);
+
+ if (ip1->i_rgno < ip2->i_rgno)
+ return -1;
+ if (ip1->i_rgno > ip2->i_rgno)
+ return 1;
+ return 0;
+}
+
+/*
+ * Most of the time each of the RTG inode locks is only taken one at a time.
+ * But when committing deferred ops more than one of a kind can be taken.
+ * As the RTG will be committed in rgno order there is not potential for
+ * deadlocks, and the code here is needed to tell lockdep about this order.
+ */
+static inline void
+xfs_rtg_ilock_lockdep_setup(
+ struct xfs_inode *ip,
+ xfs_rgnumber_t rgno,
+ enum xfs_rtg_inodes type)
+{
+ ip->i_rgno = rgno;
+ lockdep_set_class_and_subclass(&ip->i_lock, &xfs_rtg_lock_class, type);
+ lock_set_cmp_fn(&ip->i_lock, xfs_rtg_ilock_cmp_fn, NULL);
+}
+#else
+#define xfs_rtg_ilock_lockdep_setup(ip, rgno, type) do { } while (0)
+#endif /* CONFIG_PROVE_LOCKING */
+
const char *
xfs_rtginode_name(
enum xfs_rtg_inodes type)
return -EFSCORRUPTED;
}
- /*
- * Each realtime allocation group has a lockdep class key for the metadata
- * inodes. Each metadata inode in a group gets its own subclass.
- */
- lockdep_set_class_and_subclass(&ip->i_lock, &rtg->lock_class, type);
+ xfs_rtg_ilock_lockdep_setup(ip, rtg->rtg_rgno, type);
rtg->rtg_inodes[type] = ip;
return 0;
}
if (error)
return error;
- lockdep_set_class_and_subclass(&upd.ip->i_lock, &rtg->lock_class, type);
+ xfs_rtg_ilock_lockdep_setup(upd.ip, rtg->rtg_rgno, type);
error = ops->create(rtg, upd.ip, upd.tp, init);
if (error)