/* linux/mm/workingset.c */
  void *workingset_eviction(struct address_space *mapping, struct page *page);
 -bool workingset_refault(void *shadow);
 +void workingset_refault(struct page *page, void *shadow);
  void workingset_activation(struct page *page);
  
- /* Do not use directly, use workingset_lookup_update */
- void workingset_update_node(struct radix_tree_node *node);
- 
- /* Returns workingset_update_node() if the mapping has shadow entries. */
- #define workingset_lookup_update(mapping)                             \
- ({                                                                    \
-       radix_tree_update_node_t __helper = workingset_update_node;     \
-       if (dax_mapping(mapping) || shmem_mapping(mapping))             \
-               __helper = NULL;                                        \
-       __helper;                                                       \
- })
+ /* Only track the nodes of mappings with shadow entries */
+ void workingset_update_node(struct xa_node *node);
+ #define mapping_set_update(xas, mapping) do {                         \
+       if (!dax_mapping(mapping) && !shmem_mapping(mapping))           \
+               xas_set_update(xas, workingset_update_node);            \
+ } while (0)
  
  /* linux/mm/page_alloc.c */
  extern unsigned long totalram_pages;
 
   * refault distance will immediately activate the refaulting page.
   */
  
- #define EVICTION_SHIFT        (RADIX_TREE_EXCEPTIONAL_ENTRY + \
+ #define EVICTION_SHIFT        ((BITS_PER_LONG - BITS_PER_XA_VALUE) +  \
 -                       NODES_SHIFT +                          \
 -                       MEM_CGROUP_ID_SHIFT)
 +                       1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT)
  #define EVICTION_MASK (~0UL >> EVICTION_SHIFT)
  
  /*
   */
  static unsigned int bucket_order __read_mostly;
  
 -static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction)
 +static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
 +                       bool workingset)
  {
        eviction >>= bucket_order;
+       eviction &= EVICTION_MASK;
        eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
        eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
-       eviction = (eviction << RADIX_TREE_EXCEPTIONAL_SHIFT);
 +      eviction = (eviction << 1) | workingset;
  
-       return (void *)(eviction | RADIX_TREE_EXCEPTIONAL_ENTRY);
+       return xa_mk_value(eviction);
  }
  
  static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
 -                        unsigned long *evictionp)
 +                        unsigned long *evictionp, bool *workingsetp)
  {
-       unsigned long entry = (unsigned long)shadow;
+       unsigned long entry = xa_to_value(shadow);
        int memcgid, nid;
 +      bool workingset;
  
-       entry >>= RADIX_TREE_EXCEPTIONAL_SHIFT;
 +      workingset = entry & 1;
 +      entry >>= 1;
        nid = entry & ((1UL << NODES_SHIFT) - 1);
        entry >>= NODES_SHIFT;
        memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
         * already where they should be. The list_empty() test is safe
         * as node->private_list is protected by the i_pages lock.
         */
-       if (node->count && node->count == node->exceptional) {
 +      VM_WARN_ON_ONCE(!irqs_disabled());  /* For __inc_lruvec_page_state */
 +
 -              if (list_empty(&node->private_list))
+       if (node->count && node->count == node->nr_values) {
 +              if (list_empty(&node->private_list)) {
                        list_lru_add(&shadow_nodes, &node->private_list);
 +                      __inc_lruvec_page_state(virt_to_page(node),
 +                                              WORKINGSET_NODES);
 +              }
        } else {
 -              if (!list_empty(&node->private_list))
 +              if (!list_empty(&node->private_list)) {
                        list_lru_del(&shadow_nodes, &node->private_list);
 +                      __dec_lruvec_page_state(virt_to_page(node),
 +                                              WORKINGSET_NODES);
 +              }
        }
  }
  
         * each, this will reclaim shadow entries when they consume
         * ~1.8% of available memory:
         *
-        * PAGE_SIZE / radix_tree_nodes / node_entries * 8 / PAGE_SIZE
+        * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE
         */
 +#ifdef CONFIG_MEMCG
        if (sc->memcg) {
 -              cache = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
 -                                                   LRU_ALL_FILE);
 -      } else {
 -              cache = node_page_state(NODE_DATA(sc->nid), NR_ACTIVE_FILE) +
 -                      node_page_state(NODE_DATA(sc->nid), NR_INACTIVE_FILE);
 -      }
 -      max_nodes = cache >> (XA_CHUNK_SHIFT - 3);
 +              struct lruvec *lruvec;
 +
 +              pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
 +                                                   LRU_ALL);
 +              lruvec = mem_cgroup_lruvec(NODE_DATA(sc->nid), sc->memcg);
 +              pages += lruvec_page_state(lruvec, NR_SLAB_RECLAIMABLE);
 +              pages += lruvec_page_state(lruvec, NR_SLAB_UNRECLAIMABLE);
 +      } else
 +#endif
 +              pages = node_present_pages(sc->nid);
 +
-       max_nodes = pages >> (RADIX_TREE_MAP_SHIFT - 3);
++      max_nodes = pages >> (XA_CHUNK_SHIFT - 3);
  
        if (!nodes)
                return SHRINK_EMPTY;
         * no pages, so we expect to be able to remove them all and
         * delete and free the empty node afterwards.
         */
-       if (WARN_ON_ONCE(!node->exceptional))
+       if (WARN_ON_ONCE(!node->nr_values))
                goto out_invalid;
-       if (WARN_ON_ONCE(node->count != node->exceptional))
-               goto out_invalid;
-       for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
-               if (node->slots[i]) {
-                       if (WARN_ON_ONCE(!radix_tree_exceptional_entry(node->slots[i])))
-                               goto out_invalid;
-                       if (WARN_ON_ONCE(!node->exceptional))
-                               goto out_invalid;
-                       if (WARN_ON_ONCE(!mapping->nrexceptional))
-                               goto out_invalid;
-                       node->slots[i] = NULL;
-                       node->exceptional--;
-                       node->count--;
-                       mapping->nrexceptional--;
-               }
-       }
-       if (WARN_ON_ONCE(node->exceptional))
+       if (WARN_ON_ONCE(node->count != node->nr_values))
                goto out_invalid;
 -      inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
+       mapping->nrexceptional -= node->nr_values;
+       xas.xa_node = xa_parent_locked(&mapping->i_pages, node);
+       xas.xa_offset = node->offset;
+       xas.xa_shift = node->shift + XA_CHUNK_SHIFT;
+       xas_set_update(&xas, workingset_update_node);
+       /*
+        * We could store a shadow entry here which was the minimum of the
+        * shadow entries we were tracking ...
+        */
+       xas_store(&xas, NULL);
-       __radix_tree_delete_node(&mapping->i_pages, node,
-                                workingset_lookup_update(mapping));
 +      __inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
  
  out_invalid:
        xa_unlock_irq(&mapping->i_pages);