/* The number of page slots additionally sharing those nodes */
 static unsigned long ksm_pages_sharing;
 
+/* The number of nodes in the unstable tree */
+static unsigned long ksm_pages_unshared;
+
+/* The number of rmap_items in use: to calculate pages_volatile */
+static unsigned long ksm_rmap_items;
+
 /* Limit on the number of unswappable pages used */
 static unsigned long ksm_max_kernel_pages;
 
 
 static inline struct rmap_item *alloc_rmap_item(void)
 {
-       return kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL);
+       struct rmap_item *rmap_item;
+
+       rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL);
+       if (rmap_item)
+               ksm_rmap_items++;
+       return rmap_item;
 }
 
 static inline void free_rmap_item(struct rmap_item *rmap_item)
 {
+       ksm_rmap_items--;
        rmap_item->mm = NULL;   /* debug safety */
        kmem_cache_free(rmap_item_cache, rmap_item);
 }
                BUG_ON(age > 2);
                if (!age)
                        rb_erase(&rmap_item->node, &root_unstable_tree);
+               ksm_pages_unshared--;
        }
 
        rmap_item->address &= PAGE_MASK;
        rb_link_node(&rmap_item->node, parent, new);
        rb_insert_color(&rmap_item->node, &root_unstable_tree);
 
+       ksm_pages_unshared++;
        return NULL;
 }
 
                if (!err) {
                        rb_erase(&tree_rmap_item->node, &root_unstable_tree);
                        tree_rmap_item->address &= ~NODE_FLAG;
+                       ksm_pages_unshared--;
+
                        /*
                         * If we fail to insert the page into the stable tree,
                         * we will have 2 virtual addresses that are pointing
 }
 KSM_ATTR_RO(pages_sharing);
 
+static ssize_t pages_unshared_show(struct kobject *kobj,
+                                  struct kobj_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%lu\n", ksm_pages_unshared);
+}
+KSM_ATTR_RO(pages_unshared);
+
+static ssize_t pages_volatile_show(struct kobject *kobj,
+                                  struct kobj_attribute *attr, char *buf)
+{
+       long ksm_pages_volatile;
+
+       ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
+                               - ksm_pages_sharing - ksm_pages_unshared;
+       /*
+        * It was not worth any locking to calculate that statistic,
+        * but it might therefore sometimes be negative: conceal that.
+        */
+       if (ksm_pages_volatile < 0)
+               ksm_pages_volatile = 0;
+       return sprintf(buf, "%ld\n", ksm_pages_volatile);
+}
+KSM_ATTR_RO(pages_volatile);
+
+static ssize_t full_scans_show(struct kobject *kobj,
+                              struct kobj_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%lu\n", ksm_scan.seqnr);
+}
+KSM_ATTR_RO(full_scans);
+
 static struct attribute *ksm_attrs[] = {
        &sleep_millisecs_attr.attr,
        &pages_to_scan_attr.attr,
        &max_kernel_pages_attr.attr,
        &pages_shared_attr.attr,
        &pages_sharing_attr.attr,
+       &pages_unshared_attr.attr,
+       &pages_volatile_attr.attr,
+       &full_scans_attr.attr,
        NULL,
 };