unsigned                devices_max_used;
        struct list_head        cached_devs;
        uint64_t                cached_dev_sectors;
+       atomic_long_t           flash_dev_dirty_sectors;
        struct closure          caching;
 
        struct closure          sb_write;
 
 {
        struct bcache_device *d = container_of(cl, struct bcache_device, cl);
        mutex_lock(&bch_register_lock);
+       atomic_long_sub(bcache_dev_sectors_dirty(d),
+                       &d->c->flash_dev_dirty_sectors);
        bcache_device_free(d);
        mutex_unlock(&bch_register_lock);
        kobject_put(&d->kobj);
 
         * flash-only devices
         */
        uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
-                               bcache_flash_devs_sectors_dirty(c);
+                               atomic_long_read(&c->flash_dev_dirty_sectors);
 
        /*
         * Unfortunately there is no control of global dirty data.  If the
        if (!d)
                return;
 
+       if (UUID_FLASH_ONLY(&c->uuids[inode]))
+               atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors);
+
        stripe = offset_to_stripe(d, offset);
        stripe_offset = offset & (d->stripe_size - 1);
 
 
        return ret;
 }
 
-static inline uint64_t  bcache_flash_devs_sectors_dirty(struct cache_set *c)
-{
-       uint64_t i, ret = 0;
-
-       mutex_lock(&bch_register_lock);
-
-       for (i = 0; i < c->devices_max_used; i++) {
-               struct bcache_device *d = c->devices[i];
-
-               if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
-                       continue;
-               ret += bcache_dev_sectors_dirty(d);
-       }
-
-       mutex_unlock(&bch_register_lock);
-
-       return ret;
-}
-
 static inline unsigned offset_to_stripe(struct bcache_device *d,
                                        uint64_t offset)
 {