--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/mm.h>
+#include "lru_cache.h"
+#include "messages.h"
+
+/*
+ * Initialize a cache object.
+ *
+ * @cache:      The cache.
+ * @max_size:   Maximum size (number of entries) for the cache.
+ */
+void btrfs_lru_cache_init(struct btrfs_lru_cache *cache, unsigned int max_size)
+{
+       INIT_LIST_HEAD(&cache->lru_list);
+       mt_init(&cache->entries);
+       cache->size = 0;
+       cache->max_size = max_size;
+}
+
+/*
+ * Lookup for an entry in the cache.
+ *
+ * @cache:      The cache.
+ * @key:        The key of the entry we are looking for.
+ *
+ * Returns the entry associated with the key or NULL if none found.
+ */
+struct btrfs_lru_cache_entry *btrfs_lru_cache_lookup(struct btrfs_lru_cache *cache,
+                                                    u64 key)
+{
+       struct btrfs_lru_cache_entry *entry;
+
+       entry = mtree_load(&cache->entries, key);
+       if (entry)
+               list_move_tail(&entry->lru_list, &cache->lru_list);
+
+       return entry;
+}
+
+/*
+ * Store an entry in the cache.
+ *
+ * @cache:      The cache.
+ * @entry:      The entry to store.
+ *
+ * Returns 0 on success and < 0 on error.
+ */
+int btrfs_lru_cache_store(struct btrfs_lru_cache *cache,
+                         struct btrfs_lru_cache_entry *new_entry,
+                         gfp_t gfp)
+{
+       int ret;
+
+       if (cache->size == cache->max_size) {
+               struct btrfs_lru_cache_entry *lru_entry;
+               struct btrfs_lru_cache_entry *mt_entry;
+
+               lru_entry = list_first_entry(&cache->lru_list,
+                                            struct btrfs_lru_cache_entry,
+                                            lru_list);
+               mt_entry = mtree_erase(&cache->entries, lru_entry->key);
+               ASSERT(mt_entry == lru_entry);
+               list_del(&mt_entry->lru_list);
+               kfree(mt_entry);
+               cache->size--;
+       }
+
+       ret = mtree_insert(&cache->entries, new_entry->key, new_entry, gfp);
+       if (ret < 0)
+               return ret;
+
+       list_add_tail(&new_entry->lru_list, &cache->lru_list);
+       cache->size++;
+
+       return 0;
+}
+
+/*
+ * Empty a cache.
+ *
+ * @cache:     The cache to empty.
+ *
+ * Removes all entries from the cache.
+ */
+void btrfs_lru_cache_clear(struct btrfs_lru_cache *cache)
+{
+       struct btrfs_lru_cache_entry *entry;
+       struct btrfs_lru_cache_entry *tmp;
+
+       list_for_each_entry_safe(entry, tmp, &cache->lru_list, lru_list)
+               kfree(entry);
+
+       INIT_LIST_HEAD(&cache->lru_list);
+       mtree_destroy(&cache->entries);
+       cache->size = 0;
+}
 
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_LRU_CACHE_H
+#define BTRFS_LRU_CACHE_H
+
+#include <linux/maple_tree.h>
+#include <linux/list.h>
+
+/*
+ * A cache entry. This is meant to be embedded in a structure of a user of
+ * this module. Similar to how struct list_head and struct rb_node are used.
+ *
+ * Note: it should be embedded as the first element in a struct (offset 0), and
+ * this module assumes it was allocated with kmalloc(), so it calls kfree() when
+ * it needs to free an entry.
+ */
+struct btrfs_lru_cache_entry {
+       struct list_head lru_list;
+       u64 key;
+};
+
+struct btrfs_lru_cache {
+       struct list_head lru_list;
+       struct maple_tree entries;
+       /* Number of entries stored in the cache. */
+       unsigned int size;
+       /* Maximum number of entries the cache can have. */
+       unsigned int max_size;
+};
+
+static inline unsigned int btrfs_lru_cache_size(const struct btrfs_lru_cache *cache)
+{
+       return cache->size;
+}
+
+void btrfs_lru_cache_init(struct btrfs_lru_cache *cache, unsigned int max_size);
+struct btrfs_lru_cache_entry *btrfs_lru_cache_lookup(struct btrfs_lru_cache *cache,
+                                                    u64 key);
+int btrfs_lru_cache_store(struct btrfs_lru_cache *cache,
+                         struct btrfs_lru_cache_entry *new_entry,
+                         gfp_t gfp);
+void btrfs_lru_cache_clear(struct btrfs_lru_cache *cache);
+
+#endif
 
 #include "file-item.h"
 #include "ioctl.h"
 #include "verity.h"
+#include "lru_cache.h"
 
 /*
  * Maximum number of references an extent can have in order for us to attempt to
  * x86_64).
  */
 struct backref_cache_entry {
-       /* List to link to the cache's lru list. */
-       struct list_head list;
-       /* The key for this entry in the cache. */
-       u64 key;
+       struct btrfs_lru_cache_entry entry;
        u64 root_ids[SEND_MAX_BACKREF_CACHE_ROOTS];
        /* Number of valid elements in the root_ids array. */
        int num_roots;
 };
 
+/* See the comment at lru_cache.h about struct btrfs_lru_cache_entry. */
+static_assert(offsetof(struct backref_cache_entry, entry) == 0);
+
 struct send_ctx {
        struct file *send_filp;
        loff_t send_off;
        struct rb_root rbtree_new_refs;
        struct rb_root rbtree_deleted_refs;
 
-       struct {
-               u64 last_reloc_trans;
-               struct list_head lru_list;
-               struct maple_tree entries;
-               /* Number of entries stored in the cache. */
-               int size;
-       } backref_cache;
+       struct btrfs_lru_cache backref_cache;
+       u64 backref_cache_last_reloc_trans;
 };
 
 struct pending_dir_move {
        return 0;
 }
 
-static void empty_backref_cache(struct send_ctx *sctx)
-{
-       struct backref_cache_entry *entry;
-       struct backref_cache_entry *tmp;
-
-       list_for_each_entry_safe(entry, tmp, &sctx->backref_cache.lru_list, list)
-               kfree(entry);
-
-       INIT_LIST_HEAD(&sctx->backref_cache.lru_list);
-       mtree_destroy(&sctx->backref_cache.entries);
-       sctx->backref_cache.size = 0;
-}
-
 static bool lookup_backref_cache(u64 leaf_bytenr, void *ctx,
                                 const u64 **root_ids_ret, int *root_count_ret)
 {
        struct send_ctx *sctx = bctx->sctx;
        struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
        const u64 key = leaf_bytenr >> fs_info->sectorsize_bits;
+       struct btrfs_lru_cache_entry *raw_entry;
        struct backref_cache_entry *entry;
 
-       if (sctx->backref_cache.size == 0)
+       if (btrfs_lru_cache_size(&sctx->backref_cache) == 0)
                return false;
 
        /*
         * transaction handle or holding fs_info->commit_root_sem, so no need
         * to take any lock here.
         */
-       if (fs_info->last_reloc_trans > sctx->backref_cache.last_reloc_trans) {
-               empty_backref_cache(sctx);
+       if (fs_info->last_reloc_trans > sctx->backref_cache_last_reloc_trans) {
+               btrfs_lru_cache_clear(&sctx->backref_cache);
                return false;
        }
 
-       entry = mtree_load(&sctx->backref_cache.entries, key);
-       if (!entry)
+       raw_entry = btrfs_lru_cache_lookup(&sctx->backref_cache, key);
+       if (!raw_entry)
                return false;
 
+       entry = container_of(raw_entry, struct backref_cache_entry, entry);
        *root_ids_ret = entry->root_ids;
        *root_count_ret = entry->num_roots;
-       list_move_tail(&entry->list, &sctx->backref_cache.lru_list);
 
        return true;
 }
        if (!new_entry)
                return;
 
-       new_entry->key = leaf_bytenr >> fs_info->sectorsize_bits;
+       new_entry->entry.key = leaf_bytenr >> fs_info->sectorsize_bits;
        new_entry->num_roots = 0;
        ULIST_ITER_INIT(&uiter);
        while ((node = ulist_next(root_ids, &uiter)) != NULL) {
         * none of the roots is part of the list of roots from which we are
         * allowed to clone. Cache the new entry as it's still useful to avoid
         * backref walking to determine which roots have a path to the leaf.
+        *
+        * Also use GFP_NOFS because we're called while holding a transaction
+        * handle or while holding fs_info->commit_root_sem.
         */
-
-       if (sctx->backref_cache.size >= SEND_MAX_BACKREF_CACHE_SIZE) {
-               struct backref_cache_entry *lru_entry;
-               struct backref_cache_entry *mt_entry;
-
-               lru_entry = list_first_entry(&sctx->backref_cache.lru_list,
-                                            struct backref_cache_entry, list);
-               mt_entry = mtree_erase(&sctx->backref_cache.entries, lru_entry->key);
-               ASSERT(mt_entry == lru_entry);
-               list_del(&mt_entry->list);
-               kfree(mt_entry);
-               sctx->backref_cache.size--;
-       }
-
-       ret = mtree_insert(&sctx->backref_cache.entries, new_entry->key,
-                          new_entry, GFP_NOFS);
+       ret = btrfs_lru_cache_store(&sctx->backref_cache, &new_entry->entry,
+                                   GFP_NOFS);
        ASSERT(ret == 0 || ret == -ENOMEM);
        if (ret) {
                /* Caching is optional, no worries. */
                return;
        }
 
-       list_add_tail(&new_entry->list, &sctx->backref_cache.lru_list);
-
        /*
         * We are called from iterate_extent_inodes() while either holding a
         * transaction handle or holding fs_info->commit_root_sem, so no need
         * to take any lock here.
         */
-       if (sctx->backref_cache.size == 0)
-               sctx->backref_cache.last_reloc_trans = fs_info->last_reloc_trans;
-
-       sctx->backref_cache.size++;
+       if (btrfs_lru_cache_size(&sctx->backref_cache) == 1)
+               sctx->backref_cache_last_reloc_trans = fs_info->last_reloc_trans;
 }
 
 static int check_extent_item(u64 bytenr, const struct btrfs_extent_item *ei,
        INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
        INIT_LIST_HEAD(&sctx->name_cache_list);
 
-       INIT_LIST_HEAD(&sctx->backref_cache.lru_list);
-       mt_init(&sctx->backref_cache.entries);
+       btrfs_lru_cache_init(&sctx->backref_cache, SEND_MAX_BACKREF_CACHE_SIZE);
 
        sctx->pending_dir_moves = RB_ROOT;
        sctx->waiting_dir_moves = RB_ROOT;
 
                close_current_inode(sctx);
 
-               empty_backref_cache(sctx);
+               btrfs_lru_cache_clear(&sctx->backref_cache);
 
                kfree(sctx);
        }