hugetlbfs page which is *not* counted in "RSS" or "PSS" field for historical
 reasons. And these are not included in {Shared,Private}_{Clean,Dirty} field.
 "Swap" shows how much would-be-anonymous memory is also used, but out on swap.
-"SwapPss" shows proportional swap share of this mapping.
+For shmem mappings, "Swap" includes also the size of the mapped (and not
+replaced by copy-on-write) part of the underlying shmem object out on swap.
+"SwapPss" shows proportional swap share of this mapping. Unlike "Swap", this
+does not take into account swapped out page of underlying shmem objects.
 "Locked" indicates whether the mapping is locked in memory or not.
 
 "VmFlags" field deserves a separate description. This member represents the kernel
 
        unsigned long private_hugetlb;
        u64 pss;
        u64 swap_pss;
+       bool check_shmem_swap;
 };
 
 static void smaps_account(struct mem_size_stats *mss, struct page *page,
        }
 }
 
+#ifdef CONFIG_SHMEM
+static unsigned long smaps_shmem_swap(struct vm_area_struct *vma,
+               unsigned long addr)
+{
+       struct page *page;
+
+       page = find_get_entry(vma->vm_file->f_mapping,
+                                       linear_page_index(vma, addr));
+       if (!page)
+               return 0;
+
+       if (radix_tree_exceptional_entry(page))
+               return PAGE_SIZE;
+
+       page_cache_release(page);
+       return 0;
+
+}
+
+static int smaps_pte_hole(unsigned long addr, unsigned long end,
+               struct mm_walk *walk)
+{
+       struct mem_size_stats *mss = walk->private;
+
+       while (addr < end) {
+               mss->swap += smaps_shmem_swap(walk->vma, addr);
+               addr += PAGE_SIZE;
+       }
+
+       return 0;
+}
+#else
+static unsigned long smaps_shmem_swap(struct vm_area_struct *vma,
+               unsigned long addr)
+{
+       return 0;
+}
+#endif
+
 static void smaps_pte_entry(pte_t *pte, unsigned long addr,
                struct mm_walk *walk)
 {
                        }
                } else if (is_migration_entry(swpent))
                        page = migration_entry_to_page(swpent);
+       } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
+                                                       && pte_none(*pte))) {
+               mss->swap += smaps_shmem_swap(vma, addr);
        }
 
        if (!page)
        };
 
        memset(&mss, 0, sizeof mss);
+
+#ifdef CONFIG_SHMEM
+       if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
+               mss.check_shmem_swap = true;
+               smaps_walk.pte_hole = smaps_pte_hole;
+       }
+#endif
+
        /* mmap_sem is held in m_start */
        walk_page_vma(vma, &smaps_walk);