/* Only track the nodes of mappings with shadow entries */
 void workingset_update_node(struct xa_node *node);
+extern struct list_lru shadow_nodes;
 #define mapping_set_update(xas, mapping) do {                          \
-       if (!dax_mapping(mapping) && !shmem_mapping(mapping))           \
+       if (!dax_mapping(mapping) && !shmem_mapping(mapping)) {         \
                xas_set_update(xas, workingset_update_node);            \
+               xas_set_lru(xas, &shadow_nodes);                        \
+       }                                                               \
 } while (0)
 
 /* linux/mm/page_alloc.c */
 
        struct xa_node *xa_node;
        struct xa_node *xa_alloc;
        xa_update_node_t xa_update;
+       struct list_lru *xa_lru;
 };
 
 /*
        .xa_pad = 0,                                    \
        .xa_node = XAS_RESTART,                         \
        .xa_alloc = NULL,                               \
-       .xa_update = NULL                               \
+       .xa_update = NULL,                              \
+       .xa_lru = NULL,                                 \
 }
 
 /**
        xas->xa_update = update;
 }
 
+static inline void xas_set_lru(struct xa_state *xas, struct list_lru *lru)
+{
+       xas->xa_lru = lru;
+}
+
 /**
  * xas_next_entry() - Advance iterator to next present entry.
  * @xas: XArray operation state.
 
        }
        if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
                gfp |= __GFP_ACCOUNT;
-       xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+       xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
        if (!xas->xa_alloc)
                return false;
        xas->xa_alloc->parent = NULL;
                gfp |= __GFP_ACCOUNT;
        if (gfpflags_allow_blocking(gfp)) {
                xas_unlock_type(xas, lock_type);
-               xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+               xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
                xas_lock_type(xas, lock_type);
        } else {
-               xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+               xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
        }
        if (!xas->xa_alloc)
                return false;
                if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
                        gfp |= __GFP_ACCOUNT;
 
-               node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+               node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
                if (!node) {
                        xas_set_err(xas, -ENOMEM);
                        return NULL;
                void *sibling = NULL;
                struct xa_node *node;
 
-               node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+               node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
                if (!node)
                        goto nomem;
                node->array = xas->xa;