MEMCG_PERCPU_B,
        MEMCG_VMALLOC,
        MEMCG_KMEM,
+       MEMCG_ZSWAP_B,
+       MEMCG_ZSWAPPED,
        MEMCG_NR_STAT,
 };
 
        /* Range enforcement for interrupt charges */
        struct work_struct high_work;
 
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
+       unsigned long zswap_max;
+#endif
+
        unsigned long soft_limit;
 
        /* vmpressure notifications */
        return NULL;
 }
 
+static inline void obj_cgroup_put(struct obj_cgroup *objcg)
+{
+}
+
 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
 {
 }
 void __memcg_kmem_uncharge_page(struct page *page, int order);
 
 struct obj_cgroup *get_obj_cgroup_from_current(void);
+struct obj_cgroup *get_obj_cgroup_from_page(struct page *page);
 
 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
 
 struct mem_cgroup *mem_cgroup_from_obj(void *p);
 
+static inline void count_objcg_event(struct obj_cgroup *objcg,
+                                    enum vm_event_item idx)
+{
+       struct mem_cgroup *memcg;
+
+       if (mem_cgroup_kmem_disabled())
+               return;
+
+       rcu_read_lock();
+       memcg = obj_cgroup_memcg(objcg);
+       count_memcg_events(memcg, idx, 1);
+       rcu_read_unlock();
+}
+
 #else
 static inline bool mem_cgroup_kmem_disabled(void)
 {
 {
 }
 
+static inline struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
+{
+       return NULL;
+}
+
 static inline bool memcg_kmem_enabled(void)
 {
        return false;
        return NULL;
 }
 
+static inline void count_objcg_event(struct obj_cgroup *objcg,
+                                    enum vm_event_item idx)
+{
+}
+
 #endif /* CONFIG_MEMCG_KMEM */
 
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
+bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
+void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
+void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
+#else
+static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
+{
+       return true;
+}
+static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg,
+                                          size_t size)
+{
+}
+static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
+                                            size_t size)
+{
+}
+#endif
+
 #endif /* _LINUX_MEMCONTROL_H */
 
        { "sock",                       MEMCG_SOCK                      },
        { "vmalloc",                    MEMCG_VMALLOC                   },
        { "shmem",                      NR_SHMEM                        },
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
+       { "zswap",                      MEMCG_ZSWAP_B                   },
+       { "zswapped",                   MEMCG_ZSWAPPED                  },
+#endif
        { "file_mapped",                NR_FILE_MAPPED                  },
        { "file_dirty",                 NR_FILE_DIRTY                   },
        { "file_writeback",             NR_WRITEBACK                    },
 {
        switch (item) {
        case MEMCG_PERCPU_B:
+       case MEMCG_ZSWAP_B:
        case NR_SLAB_RECLAIMABLE_B:
        case NR_SLAB_UNRECLAIMABLE_B:
        case WORKINGSET_REFAULT_ANON:
        seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
                       memcg_events(memcg, PGLAZYFREED));
 
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
+       seq_buf_printf(&s, "%s %lu\n", vm_event_name(ZSWPIN),
+                      memcg_events(memcg, ZSWPIN));
+       seq_buf_printf(&s, "%s %lu\n", vm_event_name(ZSWPOUT),
+                      memcg_events(memcg, ZSWPOUT));
+#endif
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
                       memcg_events(memcg, THP_FAULT_ALLOC));
        return page_memcg_check(folio_page(folio, 0));
 }
 
+static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
+{
+       struct obj_cgroup *objcg = NULL;
+
+       for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
+               objcg = rcu_dereference(memcg->objcg);
+               if (objcg && obj_cgroup_tryget(objcg))
+                       break;
+               objcg = NULL;
+       }
+       return objcg;
+}
+
 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
 {
        struct obj_cgroup *objcg = NULL;
                memcg = active_memcg();
        else
                memcg = mem_cgroup_from_task(current);
-
-       for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
-               objcg = rcu_dereference(memcg->objcg);
-               if (objcg && obj_cgroup_tryget(objcg))
-                       break;
-               objcg = NULL;
-       }
+       objcg = __get_obj_cgroup_from_memcg(memcg);
        rcu_read_unlock();
+       return objcg;
+}
+
+struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
+{
+       struct obj_cgroup *objcg;
+
+       if (!memcg_kmem_enabled() || memcg_kmem_bypass())
+               return NULL;
+
+       if (PageMemcgKmem(page)) {
+               objcg = __folio_objcg(page_folio(page));
+               obj_cgroup_get(objcg);
+       } else {
+               struct mem_cgroup *memcg;
 
+               rcu_read_lock();
+               memcg = __folio_memcg(page_folio(page));
+               if (memcg)
+                       objcg = __get_obj_cgroup_from_memcg(memcg);
+               else
+                       objcg = NULL;
+               rcu_read_unlock();
+       }
        return objcg;
 }
 
 
        page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
        memcg->soft_limit = PAGE_COUNTER_MAX;
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
+       memcg->zswap_max = PAGE_COUNTER_MAX;
+#endif
        page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
        if (parent) {
                memcg->swappiness = mem_cgroup_swappiness(parent);
        { },    /* terminate */
 };
 
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
+/**
+ * obj_cgroup_may_zswap - check if this cgroup can zswap
+ * @objcg: the object cgroup
+ *
+ * Check if the hierarchical zswap limit has been reached.
+ *
+ * This doesn't check for specific headroom, and it is not atomic
+ * either. But with zswap, the size of the allocation is only known
+ * once compression has occured, and this optimistic pre-check avoids
+ * spending cycles on compression when there is already no room left
+ * or zswap is disabled altogether somewhere in the hierarchy.
+ */
+bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
+{
+       struct mem_cgroup *memcg, *original_memcg;
+       bool ret = true;
+
+       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+               return true;
+
+       original_memcg = get_mem_cgroup_from_objcg(objcg);
+       for (memcg = original_memcg; memcg != root_mem_cgroup;
+            memcg = parent_mem_cgroup(memcg)) {
+               unsigned long max = READ_ONCE(memcg->zswap_max);
+               unsigned long pages;
+
+               if (max == PAGE_COUNTER_MAX)
+                       continue;
+               if (max == 0) {
+                       ret = false;
+                       break;
+               }
+
+               cgroup_rstat_flush(memcg->css.cgroup);
+               pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
+               if (pages < max)
+                       continue;
+               ret = false;
+               break;
+       }
+       mem_cgroup_put(original_memcg);
+       return ret;
+}
+
+/**
+ * obj_cgroup_charge_zswap - charge compression backend memory
+ * @objcg: the object cgroup
+ * @size: size of compressed object
+ *
+ * This forces the charge after obj_cgroup_may_swap() allowed
+ * compression and storage in zwap for this cgroup to go ahead.
+ */
+void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
+{
+       struct mem_cgroup *memcg;
+
+       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+               return;
+
+       VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
+
+       /* PF_MEMALLOC context, charging must succeed */
+       if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
+               VM_WARN_ON_ONCE(1);
+
+       rcu_read_lock();
+       memcg = obj_cgroup_memcg(objcg);
+       mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
+       mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
+       rcu_read_unlock();
+}
+
+/**
+ * obj_cgroup_uncharge_zswap - uncharge compression backend memory
+ * @objcg: the object cgroup
+ * @size: size of compressed object
+ *
+ * Uncharges zswap memory on page in.
+ */
+void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
+{
+       struct mem_cgroup *memcg;
+
+       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+               return;
+
+       obj_cgroup_uncharge(objcg, size);
+
+       rcu_read_lock();
+       memcg = obj_cgroup_memcg(objcg);
+       mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
+       mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
+       rcu_read_unlock();
+}
+
+static u64 zswap_current_read(struct cgroup_subsys_state *css,
+                             struct cftype *cft)
+{
+       cgroup_rstat_flush(css->cgroup);
+       return memcg_page_state(mem_cgroup_from_css(css), MEMCG_ZSWAP_B);
+}
+
+static int zswap_max_show(struct seq_file *m, void *v)
+{
+       return seq_puts_memcg_tunable(m,
+               READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
+}
+
+static ssize_t zswap_max_write(struct kernfs_open_file *of,
+                              char *buf, size_t nbytes, loff_t off)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
+       unsigned long max;
+       int err;
+
+       buf = strstrip(buf);
+       err = page_counter_memparse(buf, "max", &max);
+       if (err)
+               return err;
+
+       xchg(&memcg->zswap_max, max);
+
+       return nbytes;
+}
+
+static struct cftype zswap_files[] = {
+       {
+               .name = "zswap.current",
+               .flags = CFTYPE_NOT_ON_ROOT,
+               .read_u64 = zswap_current_read,
+       },
+       {
+               .name = "zswap.max",
+               .flags = CFTYPE_NOT_ON_ROOT,
+               .seq_show = zswap_max_show,
+               .write = zswap_max_write,
+       },
+       { }     /* terminate */
+};
+#endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
+
 /*
  * If mem_cgroup_swap_init() is implemented as a subsys_initcall()
  * instead of a core_initcall(), this could mean cgroup_memory_noswap still
 
        WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
        WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
-
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
+       WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
+#endif
        return 0;
 }
 core_initcall(mem_cgroup_swap_init);
 
                unsigned long handle;
                unsigned long value;
        };
+       struct obj_cgroup *objcg;
 };
 
 struct zswap_header {
  */
 static void zswap_free_entry(struct zswap_entry *entry)
 {
+       if (entry->objcg) {
+               obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
+               obj_cgroup_put(entry->objcg);
+       }
        if (!entry->length)
                atomic_dec(&zswap_same_filled_pages);
        else {
        struct zswap_entry *entry, *dupentry;
        struct scatterlist input, output;
        struct crypto_acomp_ctx *acomp_ctx;
+       struct obj_cgroup *objcg = NULL;
+       struct zswap_pool *pool;
        int ret;
        unsigned int hlen, dlen = PAGE_SIZE;
        unsigned long handle, value;
                goto reject;
        }
 
+       objcg = get_obj_cgroup_from_page(page);
+       if (objcg && !obj_cgroup_may_zswap(objcg))
+               goto shrink;
+
        /* reclaim space if needed */
        if (zswap_is_full()) {
-               struct zswap_pool *pool;
-
                zswap_pool_limit_hit++;
                zswap_pool_reached_full = true;
-               pool = zswap_pool_last_get();
-               if (pool)
-                       queue_work(shrink_wq, &pool->shrink_work);
-               ret = -ENOMEM;
-               goto reject;
+               goto shrink;
        }
 
        if (zswap_pool_reached_full) {
        entry->length = dlen;
 
 insert_entry:
+       entry->objcg = objcg;
+       if (objcg) {
+               obj_cgroup_charge_zswap(objcg, entry->length);
+               /* Account before objcg ref is moved to tree */
+               count_objcg_event(objcg, ZSWPOUT);
+       }
+
        /* map */
        spin_lock(&tree->lock);
        do {
 freepage:
        zswap_entry_cache_free(entry);
 reject:
+       if (objcg)
+               obj_cgroup_put(objcg);
        return ret;
+
+shrink:
+       pool = zswap_pool_last_get();
+       if (pool)
+               queue_work(shrink_wq, &pool->shrink_work);
+       ret = -ENOMEM;
+       goto reject;
 }
 
 /*
        BUG_ON(ret);
 stats:
        count_vm_event(ZSWPIN);
+       if (entry->objcg)
+               count_objcg_event(entry->objcg, ZSWPIN);
 freeentry:
        spin_lock(&tree->lock);
        zswap_entry_put(tree, entry);