#include <linux/mutex.h>
 #include <linux/rbtree.h>
 #include <linux/rwsem.h>
+#include <linux/refcount.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
 
        struct semaphore        sb_write_mutex;
 
        /* Refcount on the cache set. Always nonzero when we're caching. */
-       atomic_t                count;
+       refcount_t              count;
        struct work_struct      detach;
 
        /*
 
 static inline void cached_dev_put(struct cached_dev *dc)
 {
-       if (atomic_dec_and_test(&dc->count))
+       if (refcount_dec_and_test(&dc->count))
                schedule_work(&dc->detach);
 }
 
 static inline bool cached_dev_get(struct cached_dev *dc)
 {
-       if (!atomic_inc_not_zero(&dc->count))
+       if (!refcount_inc_not_zero(&dc->count))
                return false;
 
        /* Paired with the mb in cached_dev_attach */
 
        closure_init_stack(&cl);
 
        BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
-       BUG_ON(atomic_read(&dc->count));
+       BUG_ON(refcount_read(&dc->count));
 
        mutex_lock(&bch_register_lock);
 
         * dc->c must be set before dc->count != 0 - paired with the mb in
         * cached_dev_get()
         */
-       atomic_set(&dc->count, 1);
+       refcount_set(&dc->count, 1);
 
        /* Block writeback thread, but spawn it */
        down_write(&dc->writeback_lock);
        if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
                bch_sectors_dirty_init(&dc->disk);
                atomic_set(&dc->has_dirty, 1);
-               atomic_inc(&dc->count);
+               refcount_inc(&dc->count);
                bch_writeback_queue(dc);
        }
 
 
 {
        if (!atomic_read(&dc->has_dirty) &&
            !atomic_xchg(&dc->has_dirty, 1)) {
-               atomic_inc(&dc->count);
+               refcount_inc(&dc->count);
 
                if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
                        SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);