atomic_t                 cl_lru_in_list;
        struct list_head         cl_lru_list; /* lru page list */
        spinlock_t               cl_lru_list_lock; /* page list protector */
+       atomic_t                 cl_unstable_count;
 
        /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
        atomic_t             cl_destroy_in_flight;
 
        atomic_set(&cli->cl_lru_in_list, 0);
        INIT_LIST_HEAD(&cli->cl_lru_list);
        spin_lock_init(&cli->cl_lru_list_lock);
+       atomic_set(&cli->cl_unstable_count, 0);
 
        init_waitqueue_head(&cli->cl_destroy_waitq);
        atomic_set(&cli->cl_destroy_in_flight, 0);
 
 }
 LUSTRE_RW_ATTR(max_pages_per_rpc);
 
+static ssize_t unstable_stats_show(struct kobject *kobj,
+                                  struct attribute *attr,
+                                  char *buf)
+{
+       struct obd_device *dev = container_of(kobj, struct obd_device,
+                                             obd_kobj);
+       struct client_obd *cli = &dev->u.cli;
+       int pages, mb;
+
+       pages = atomic_read(&cli->cl_unstable_count);
+       mb = (pages * PAGE_SIZE) >> 20;
+
+       return sprintf(buf, "unstable_pages: %8d\n"
+                      "unstable_mb:    %8d\n", pages, mb);
+}
+LUSTRE_RO_ATTR(unstable_stats);
+
 LPROC_SEQ_FOPS_RO_TYPE(osc, connect_flags);
 LPROC_SEQ_FOPS_RO_TYPE(osc, server_uuid);
 LPROC_SEQ_FOPS_RO_TYPE(osc, conn_uuid);
        &lustre_attr_max_pages_per_rpc.attr,
        &lustre_attr_max_rpcs_in_flight.attr,
        &lustre_attr_resend_count.attr,
+       &lustre_attr_unstable_stats.attr,
        NULL,
 };
 
 
        atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr);
        LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
 
+       atomic_sub(page_count, &cli->cl_unstable_count);
+       LASSERT(atomic_read(&cli->cl_unstable_count) >= 0);
+
        atomic_sub(page_count, &obd_unstable_pages);
        LASSERT(atomic_read(&obd_unstable_pages) >= 0);
 
        LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
        atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr);
 
+       LASSERT(atomic_read(&cli->cl_unstable_count) >= 0);
+       atomic_add(page_count, &cli->cl_unstable_count);
+
        LASSERT(atomic_read(&obd_unstable_pages) >= 0);
        atomic_add(page_count, &obd_unstable_pages);