# xfs_rtbitmap is shared with libxfs
xfs-$(CONFIG_XFS_RT) += $(addprefix libxfs/, \
xfs_rtbitmap.o \
+ xfs_rtgroup.o \
)
# highlevel code
xfs_ino_t sb_metadirino; /* metadata directory tree root */
+ xfs_rgnumber_t sb_rgcount; /* number of realtime groups */
+ xfs_rtxlen_t sb_rgextents; /* size of a realtime group in rtx */
+
/* must be padded to 64 bit alignment */
} xfs_sb_t;
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2022-2024 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
+#include "xfs_btree.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_rmap_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_ialloc.h"
+#include "xfs_rmap.h"
+#include "xfs_ag.h"
+#include "xfs_ag_resv.h"
+#include "xfs_health.h"
+#include "xfs_error.h"
+#include "xfs_bmap.h"
+#include "xfs_defer.h"
+#include "xfs_log_format.h"
+#include "xfs_trans.h"
+#include "xfs_trace.h"
+#include "xfs_inode.h"
+#include "xfs_icache.h"
+#include "xfs_rtgroup.h"
+#include "xfs_rtbitmap.h"
+
+/*
+ * Passive reference counting access wrappers to the rtgroup structures. If
+ * the rtgroup structure is to be freed, the freeing code is responsible for
+ * cleaning up objects with passive references before freeing the structure.
+ */
+struct xfs_rtgroup *
+xfs_rtgroup_get(
+ struct xfs_mount *mp,
+ xfs_rgnumber_t rgno)
+{
+ struct xfs_rtgroup *rtg;
+
+ rcu_read_lock();
+ rtg = radix_tree_lookup(&mp->m_rtgroup_tree, rgno);
+ if (rtg) {
+ trace_xfs_rtgroup_get(rtg, _RET_IP_);
+ ASSERT(atomic_read(&rtg->rtg_ref) >= 0);
+ atomic_inc(&rtg->rtg_ref);
+ }
+ rcu_read_unlock();
+ return rtg;
+}
+
+/* Get a passive reference to the given rtgroup. */
+struct xfs_rtgroup *
+xfs_rtgroup_hold(
+ struct xfs_rtgroup *rtg)
+{
+ ASSERT(atomic_read(&rtg->rtg_ref) > 0 ||
+ atomic_read(&rtg->rtg_active_ref) > 0);
+
+ trace_xfs_rtgroup_hold(rtg, _RET_IP_);
+ atomic_inc(&rtg->rtg_ref);
+ return rtg;
+}
+
+void
+xfs_rtgroup_put(
+ struct xfs_rtgroup *rtg)
+{
+ trace_xfs_rtgroup_put(rtg, _RET_IP_);
+ ASSERT(atomic_read(&rtg->rtg_ref) > 0);
+ atomic_dec(&rtg->rtg_ref);
+}
+
+/*
+ * Active references for rtgroup structures. This is for short term access to
+ * the rtgroup structures for walking trees or accessing state. If an rtgroup
+ * is being shrunk or is offline, then this will fail to find that group and
+ * return NULL instead.
+ */
+struct xfs_rtgroup *
+xfs_rtgroup_grab(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno)
+{
+ struct xfs_rtgroup *rtg;
+
+ rcu_read_lock();
+ rtg = radix_tree_lookup(&mp->m_rtgroup_tree, agno);
+ if (rtg) {
+ trace_xfs_rtgroup_grab(rtg, _RET_IP_);
+ if (!atomic_inc_not_zero(&rtg->rtg_active_ref))
+ rtg = NULL;
+ }
+ rcu_read_unlock();
+ return rtg;
+}
+
+void
+xfs_rtgroup_rele(
+ struct xfs_rtgroup *rtg)
+{
+ trace_xfs_rtgroup_rele(rtg, _RET_IP_);
+ if (atomic_dec_and_test(&rtg->rtg_active_ref))
+ wake_up(&rtg->rtg_active_wq);
+}
+
+int
+xfs_rtgroup_alloc(
+ struct xfs_mount *mp,
+ xfs_rgnumber_t rgno)
+{
+ struct xfs_rtgroup *rtg;
+ int error;
+
+ rtg = kzalloc(sizeof(struct xfs_rtgroup), GFP_KERNEL);
+ if (!rtg)
+ return -ENOMEM;
+ rtg->rtg_rgno = rgno;
+ rtg->rtg_mount = mp;
+
+ error = radix_tree_preload(GFP_NOFS);
+ if (error)
+ goto out_free_rtg;
+
+ spin_lock(&mp->m_rtgroup_lock);
+ if (radix_tree_insert(&mp->m_rtgroup_tree, rgno, rtg)) {
+ spin_unlock(&mp->m_rtgroup_lock);
+ radix_tree_preload_end();
+ error = -EEXIST;
+ goto out_free_rtg;
+ }
+ spin_unlock(&mp->m_rtgroup_lock);
+ radix_tree_preload_end();
+
+#ifdef __KERNEL__
+ /* Place kernel structure only init below this point. */
+ spin_lock_init(&rtg->rtg_state_lock);
+ init_waitqueue_head(&rtg->rtg_active_wq);
+#endif /* __KERNEL__ */
+
+ /* Active ref owned by mount indicates rtgroup is online. */
+ atomic_set(&rtg->rtg_active_ref, 1);
+ return 0;
+
+out_free_rtg:
+ kfree(rtg);
+ return error;
+}
+
+void
+xfs_rtgroup_free(
+ struct xfs_mount *mp,
+ xfs_rgnumber_t rgno)
+{
+ struct xfs_rtgroup *rtg;
+
+ spin_lock(&mp->m_rtgroup_lock);
+ rtg = radix_tree_delete(&mp->m_rtgroup_tree, rgno);
+ spin_unlock(&mp->m_rtgroup_lock);
+
+ if (!rtg) /* can happen when growfs fails */
+ return;
+
+ XFS_IS_CORRUPT(mp, atomic_read(&rtg->rtg_ref) != 0);
+
+ /* drop the mount's active reference */
+ xfs_rtgroup_rele(rtg);
+ XFS_IS_CORRUPT(mp, atomic_read(&rtg->rtg_active_ref) != 0);
+
+ kfree_rcu_mightsleep(rtg);
+}
+
+/*
+ * Free up the rtgroup resources associated with the mount structure.
+ */
+void
+xfs_free_rtgroups(
+ struct xfs_mount *mp,
+ xfs_rgnumber_t rgcount)
+{
+ xfs_rgnumber_t rgno;
+
+ for (rgno = 0; rgno < rgcount; rgno++)
+ xfs_rtgroup_free(mp, rgno);
+}
+
+/* Compute the number of rt extents in this realtime group. */
+xfs_rtxnum_t
+xfs_rtgroup_extents(
+ struct xfs_mount *mp,
+ xfs_rgnumber_t rgno)
+{
+ xfs_rgnumber_t rgcount = mp->m_sb.sb_rgcount;
+
+ ASSERT(rgno < rgcount);
+ if (rgno == rgcount - 1)
+ return mp->m_sb.sb_rextents -
+ ((xfs_rtxnum_t)rgno * mp->m_sb.sb_rgextents);
+
+ ASSERT(xfs_has_rtgroups(mp));
+ return mp->m_sb.sb_rgextents;
+}
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2022-2024 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
+ */
+#ifndef __LIBXFS_RTGROUP_H
+#define __LIBXFS_RTGROUP_H 1
+
+struct xfs_mount;
+struct xfs_trans;
+
+/*
+ * Realtime group incore structure, similar to the per-AG structure.
+ */
+struct xfs_rtgroup {
+ struct xfs_mount *rtg_mount;
+ xfs_rgnumber_t rtg_rgno;
+ atomic_t rtg_ref; /* passive reference count */
+ atomic_t rtg_active_ref; /* active reference count */
+ wait_queue_head_t rtg_active_wq;/* woken active_ref falls to zero */
+
+ /* Number of blocks in this group */
+ xfs_rtxnum_t rtg_extents;
+
+#ifdef __KERNEL__
+ /* -- kernel only structures below this line -- */
+ spinlock_t rtg_state_lock;
+#endif /* __KERNEL__ */
+};
+
+#ifdef CONFIG_XFS_RT
+/* Passive rtgroup references */
+struct xfs_rtgroup *xfs_rtgroup_get(struct xfs_mount *mp, xfs_rgnumber_t rgno);
+struct xfs_rtgroup *xfs_rtgroup_hold(struct xfs_rtgroup *rtg);
+void xfs_rtgroup_put(struct xfs_rtgroup *rtg);
+
+/* Active rtgroup references */
+struct xfs_rtgroup *xfs_rtgroup_grab(struct xfs_mount *mp, xfs_rgnumber_t rgno);
+void xfs_rtgroup_rele(struct xfs_rtgroup *rtg);
+
+int xfs_rtgroup_alloc(struct xfs_mount *mp, xfs_rgnumber_t rgno);
+void xfs_rtgroup_free(struct xfs_mount *mp, xfs_rgnumber_t rgno);
+void xfs_free_rtgroups(struct xfs_mount *mp, xfs_rgnumber_t rgcount);
+#else /* CONFIG_XFS_RT */
+static inline struct xfs_rtgroup *xfs_rtgroup_get(struct xfs_mount *mp,
+ xfs_rgnumber_t rgno)
+{
+ return NULL;
+}
+static inline struct xfs_rtgroup *xfs_rtgroup_hold(struct xfs_rtgroup *rtg)
+{
+ ASSERT(rtg == NULL);
+ return NULL;
+}
+static inline void xfs_rtgroup_put(struct xfs_rtgroup *rtg)
+{
+}
+static inline int xfs_rtgroup_alloc( struct xfs_mount *mp,
+ xfs_rgnumber_t rgno)
+{
+ return 0;
+}
+static inline void xfs_free_rtgroups(struct xfs_mount *mp,
+ xfs_rgnumber_t rgcount)
+{
+}
+#define xfs_rtgroup_grab xfs_rtgroup_get
+#define xfs_rtgroup_rele xfs_rtgroup_put
+#endif /* CONFIG_XFS_RT */
+
+/*
+ * rt group iteration APIs
+ */
+static inline struct xfs_rtgroup *
+xfs_rtgroup_next(
+ struct xfs_rtgroup *rtg,
+ xfs_rgnumber_t *rgno,
+ xfs_rgnumber_t end_rgno)
+{
+ struct xfs_mount *mp = rtg->rtg_mount;
+
+ *rgno = rtg->rtg_rgno + 1;
+ xfs_rtgroup_rele(rtg);
+ if (*rgno > end_rgno)
+ return NULL;
+ return xfs_rtgroup_grab(mp, *rgno);
+}
+
+#define for_each_rtgroup_range(mp, rgno, end_rgno, rtg) \
+ for ((rtg) = xfs_rtgroup_grab((mp), (rgno)); \
+ (rtg) != NULL; \
+ (rtg) = xfs_rtgroup_next((rtg), &(rgno), (end_rgno)))
+
+#define for_each_rtgroup_from(mp, rgno, rtg) \
+ for_each_rtgroup_range((mp), (rgno), (mp)->m_sb.sb_rgcount - 1, (rtg))
+
+
+#define for_each_rtgroup(mp, rgno, rtg) \
+ (rgno) = 0; \
+ for_each_rtgroup_from((mp), (rgno), (rtg))
+
+static inline bool
+xfs_verify_rgbno(
+ struct xfs_rtgroup *rtg,
+ xfs_rgblock_t rgbno)
+{
+ struct xfs_mount *mp = rtg->rtg_mount;
+
+ if (rgbno >= rtg->rtg_extents * mp->m_sb.sb_rextsize)
+ return false;
+ if (xfs_has_rtsb(mp) && rtg->rtg_rgno == 0 &&
+ rgbno < mp->m_sb.sb_rextsize)
+ return false;
+ return true;
+}
+
+static inline bool
+xfs_verify_rgbext(
+ struct xfs_rtgroup *rtg,
+ xfs_rgblock_t rgbno,
+ xfs_rgblock_t len)
+{
+ if (rgbno + len <= rgbno)
+ return false;
+
+ if (!xfs_verify_rgbno(rtg, rgbno))
+ return false;
+
+ return xfs_verify_rgbno(rtg, rgbno + len - 1);
+}
+
+static inline xfs_rtblock_t
+xfs_rgno_start_rtb(
+ struct xfs_mount *mp,
+ xfs_rgnumber_t rgno)
+{
+ if (mp->m_rgblklog >= 0)
+ return ((xfs_rtblock_t)rgno << mp->m_rgblklog);
+ return ((xfs_rtblock_t)rgno * mp->m_rgblocks);
+}
+
+static inline xfs_rtblock_t
+xfs_rgbno_to_rtb(
+ struct xfs_mount *mp,
+ xfs_rgnumber_t rgno,
+ xfs_rgblock_t rgbno)
+{
+ return xfs_rgno_start_rtb(mp, rgno) + rgbno;
+}
+
+static inline xfs_rgnumber_t
+xfs_rtb_to_rgno(
+ struct xfs_mount *mp,
+ xfs_rtblock_t rtbno)
+{
+ if (!xfs_has_rtgroups(mp))
+ return 0;
+
+ if (mp->m_rgblklog >= 0)
+ return rtbno >> mp->m_rgblklog;
+
+ return div_u64(rtbno, mp->m_rgblocks);
+}
+
+static inline uint64_t
+__xfs_rtb_to_rgbno(
+ struct xfs_mount *mp,
+ xfs_rtblock_t rtbno)
+{
+ uint32_t rem;
+
+ if (!xfs_has_rtgroups(mp))
+ return rtbno;
+
+ if (mp->m_rgblklog >= 0)
+ return rtbno & mp->m_rgblkmask;
+
+ div_u64_rem(rtbno, mp->m_rgblocks, &rem);
+ return rem;
+}
+
+static inline xfs_rgblock_t
+xfs_rtb_to_rgbno(
+ struct xfs_mount *mp,
+ xfs_rtblock_t rtbno)
+{
+ return __xfs_rtb_to_rgbno(mp, rtbno);
+}
+
+static inline xfs_daddr_t
+xfs_rtb_to_daddr(
+ struct xfs_mount *mp,
+ xfs_rtblock_t rtbno)
+{
+ return rtbno << mp->m_blkbb_log;
+}
+
+static inline xfs_rtblock_t
+xfs_daddr_to_rtb(
+ struct xfs_mount *mp,
+ xfs_daddr_t daddr)
+{
+ return daddr >> mp->m_blkbb_log;
+}
+
+#ifdef CONFIG_XFS_RT
+xfs_rtxnum_t xfs_rtgroup_extents(struct xfs_mount *mp, xfs_rgnumber_t rgno);
+#else
+# define xfs_rtgroup_extents(mp, rgno) (0)
+#endif /* CONFIG_XFS_RT */
+
+#endif /* __LIBXFS_RTGROUP_H */
to->sb_metadirino = be64_to_cpu(from->sb_metadirino);
else
to->sb_metadirino = NULLFSINO;
+
+ to->sb_rgcount = 1;
+ to->sb_rgextents = 0;
}
void
{
mp->m_rtxblklog = log2_if_power2(sbp->sb_rextsize);
mp->m_rtxblkmask = mask64_if_power2(sbp->sb_rextsize);
+
+ mp->m_rgblocks = 0;
+ mp->m_rgblklog = 0;
+ mp->m_rgblkmask = 0;
}
/*
typedef uint32_t prid_t; /* project ID */
typedef uint32_t xfs_agblock_t; /* blockno in alloc. group */
+typedef uint32_t xfs_rgblock_t; /* blockno in realtime group */
typedef uint32_t xfs_agino_t; /* inode # within allocation grp */
typedef uint32_t xfs_extlen_t; /* extent length in blocks */
typedef uint32_t xfs_rtxlen_t; /* file extent length in rtextents */
typedef uint32_t xfs_agnumber_t; /* allocation group number */
+typedef uint32_t xfs_rgnumber_t; /* realtime group number */
typedef uint64_t xfs_extnum_t; /* # of extents in a file */
typedef uint32_t xfs_aextnum_t; /* # extents in an attribute fork */
typedef int64_t xfs_fsize_t; /* bytes in a file */
#define NULLFILEOFF ((xfs_fileoff_t)-1)
#define NULLAGBLOCK ((xfs_agblock_t)-1)
+#define NULLRGBLOCK ((xfs_rgblock_t)-1)
#define NULLAGNUMBER ((xfs_agnumber_t)-1)
+#define NULLRGNUMBER ((xfs_rgnumber_t)-1)
#define NULLCOMMITLSN ((xfs_lsn_t)-1)
#include "xfs_ag.h"
#include "xfs_quota.h"
#include "xfs_reflink.h"
+#include "xfs_rtgroup.h"
#define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
struct xfs_mount *mp = log->l_mp;
struct xfs_buf *bp = mp->m_sb_bp;
struct xfs_sb *sbp = &mp->m_sb;
+ xfs_rgnumber_t old_rgcount = sbp->sb_rgcount;
int error;
trace_xfs_log_recover(log, head_blk, tail_blk);
xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
return error;
}
+
+ if (sbp->sb_rgcount < old_rgcount) {
+ xfs_warn(mp, "rgcount shrink not supported");
+ return -EINVAL;
+ }
+ if (sbp->sb_rgcount > old_rgcount) {
+ xfs_rgnumber_t rgno;
+
+ for (rgno = old_rgcount; rgno < sbp->sb_rgcount; rgno++) {
+ error = xfs_rtgroup_alloc(mp, rgno);
+ if (error) {
+ xfs_warn(mp,
+ "Failed post-recovery rtgroup init: %d",
+ error);
+ return error;
+ }
+ }
+ }
mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
/* Normal transactions can now occur */
#include "xfs_ag.h"
#include "xfs_rtbitmap.h"
#include "xfs_metafile.h"
+#include "xfs_rtgroup.h"
#include "scrub/stats.h"
static DEFINE_MUTEX(xfs_uuid_table_mutex);
struct xfs_ino_geometry *igeo = M_IGEO(mp);
uint quotamount = 0;
uint quotaflags = 0;
+ xfs_rgnumber_t rgno;
int error = 0;
xfs_sb_mount_common(mp, sbp);
goto out_free_dir;
}
+ for (rgno = 0; rgno < mp->m_sb.sb_rgcount; rgno++) {
+ error = xfs_rtgroup_alloc(mp, rgno);
+ if (error) {
+ xfs_warn(mp, "Failed rtgroup init: %d", error);
+ goto out_free_rtgroup;
+ }
+ }
+
if (XFS_IS_CORRUPT(mp, !sbp->sb_logblocks)) {
xfs_warn(mp, "no log defined");
error = -EFSCORRUPTED;
- goto out_free_perag;
+ goto out_free_rtgroup;
}
error = xfs_inodegc_register_shrinker(mp);
if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
xfs_buftarg_drain(mp->m_logdev_targp);
xfs_buftarg_drain(mp->m_ddev_targp);
- out_free_perag:
+ out_free_rtgroup:
+ xfs_free_rtgroups(mp, rgno);
xfs_free_perag(mp);
out_free_dir:
xfs_da_unmount(mp);
xfs_errortag_clearall(mp);
#endif
shrinker_free(mp->m_inodegc_shrinker);
+ xfs_free_rtgroups(mp, mp->m_sb.sb_rgcount);
xfs_free_perag(mp);
xfs_errortag_del(mp);
uint8_t m_agno_log; /* log #ag's */
uint8_t m_sectbb_log; /* sectlog - BBSHIFT */
int8_t m_rtxblklog; /* log2 of rextsize, if possible */
+ int8_t m_rgblklog; /* log2 of rt group sz if possible */
uint m_blockmask; /* sb_blocksize-1 */
uint m_blockwsize; /* sb_blocksize in words */
uint m_blockwmask; /* blockwsize-1 */
int m_logbsize; /* size of each log buffer */
uint m_rsumlevels; /* rt summary levels */
xfs_filblks_t m_rsumblocks; /* size of rt summary, FSBs */
+ uint32_t m_rgblocks; /* size of rtgroup in rtblocks */
int m_fixedfsid[2]; /* unchanged for life of FS */
uint m_qflags; /* quota status flags */
uint64_t m_features; /* active filesystem features */
uint64_t m_low_space[XFS_LOWSP_MAX];
uint64_t m_low_rtexts[XFS_LOWSP_MAX];
uint64_t m_rtxblkmask; /* rt extent block mask */
+ uint64_t m_rgblkmask; /* rt group block mask */
struct xfs_ino_geometry m_ino_geo; /* inode geometry */
struct xfs_trans_resv m_resv; /* precomputed res values */
/* low free space thresholds */
*/
atomic64_t m_allocbt_blks;
+ struct radix_tree_root m_rtgroup_tree; /* per-rt group info */
+ spinlock_t m_rtgroup_lock; /* lock for m_rtgroup_tree */
struct radix_tree_root m_perag_tree; /* per-ag accounting info */
spinlock_t m_perag_lock; /* lock for m_perag_tree */
uint64_t m_resblks; /* total reserved blocks */
#define XFS_FEAT_NREXT64 (1ULL << 26) /* large extent counters */
#define XFS_FEAT_EXCHANGE_RANGE (1ULL << 27) /* exchange range */
#define XFS_FEAT_METADIR (1ULL << 28) /* metadata directory tree */
+#define XFS_FEAT_RTGROUPS (1ULL << 29) /* realtime groups */
+#define XFS_FEAT_RTSB (1ULL << 30) /* realtime superblock */
/* Mount features */
#define XFS_FEAT_NOATTR2 (1ULL << 48) /* disable attr2 creation */
__XFS_HAS_FEAT(large_extent_counts, NREXT64)
__XFS_HAS_FEAT(exchange_range, EXCHANGE_RANGE)
__XFS_HAS_FEAT(metadir, METADIR)
+__XFS_HAS_FEAT(rtgroups, RTGROUPS)
+__XFS_HAS_FEAT(rtsb, RTSB)
/*
* Some features are always on for v5 file systems, allow the compiler to
#include "xfs_health.h"
#include "xfs_da_format.h"
#include "xfs_metafile.h"
+#include "xfs_rtgroup.h"
/*
* Return whether there are any free extents in the size range given
{
struct xfs_trans *tp;
struct xfs_sb *sbp = &mp->m_sb;
+ struct xfs_rtgroup *rtg;
+ xfs_rgnumber_t rgno;
int error;
error = xfs_trans_alloc_empty(mp, &tp);
if (error)
goto out_rele_summary;
+ for_each_rtgroup(mp, rgno, rtg)
+ rtg->rtg_extents = xfs_rtgroup_extents(mp, rtg->rtg_rgno);
+
error = xfs_alloc_rsum_cache(mp, sbp->sb_rbmblocks);
if (error)
goto out_rele_summary;
spin_lock_init(&mp->m_sb_lock);
INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
spin_lock_init(&mp->m_perag_lock);
+ INIT_RADIX_TREE(&mp->m_rtgroup_tree, GFP_ATOMIC);
+ spin_lock_init(&mp->m_rtgroup_lock);
mutex_init(&mp->m_growlock);
INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
#include "xfs_refcount.h"
#include "xfs_metafile.h"
#include "xfs_metadir.h"
+#include "xfs_rtgroup.h"
/*
* We include this last to have the helpers above available for the trace
struct xfs_rmap_intent;
struct xfs_refcount_intent;
struct xfs_metadir_update;
+struct xfs_rtgroup;
#define XFS_ATTR_FILTER_FLAGS \
{ XFS_ATTR_ROOT, "ROOT" }, \
DEFINE_PERAG_REF_EVENT(xfs_perag_set_inode_tag);
DEFINE_PERAG_REF_EVENT(xfs_perag_clear_inode_tag);
+#ifdef CONFIG_XFS_RT
+DECLARE_EVENT_CLASS(xfs_rtgroup_class,
+ TP_PROTO(struct xfs_rtgroup *rtg, unsigned long caller_ip),
+ TP_ARGS(rtg, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_rgnumber_t, rgno)
+ __field(int, refcount)
+ __field(int, active_refcount)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = rtg->rtg_mount->m_super->s_dev;
+ __entry->rgno = rtg->rtg_rgno;
+ __entry->refcount = atomic_read(&rtg->rtg_ref);
+ __entry->active_refcount = atomic_read(&rtg->rtg_active_ref);
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d rgno 0x%x passive refs %d active refs %d caller %pS",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->rgno,
+ __entry->refcount,
+ __entry->active_refcount,
+ (char *)__entry->caller_ip)
+);
+
+#define DEFINE_RTGROUP_REF_EVENT(name) \
+DEFINE_EVENT(xfs_rtgroup_class, name, \
+ TP_PROTO(struct xfs_rtgroup *rtg, unsigned long caller_ip), \
+ TP_ARGS(rtg, caller_ip))
+DEFINE_RTGROUP_REF_EVENT(xfs_rtgroup_get);
+DEFINE_RTGROUP_REF_EVENT(xfs_rtgroup_hold);
+DEFINE_RTGROUP_REF_EVENT(xfs_rtgroup_put);
+DEFINE_RTGROUP_REF_EVENT(xfs_rtgroup_grab);
+DEFINE_RTGROUP_REF_EVENT(xfs_rtgroup_rele);
+#endif /* CONFIG_XFS_RT */
+
TRACE_EVENT(xfs_inodegc_worker,
TP_PROTO(struct xfs_mount *mp, unsigned int shrinker_hits),
TP_ARGS(mp, shrinker_hits),