set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
 
        /* one ref for the tree */
-       atomic_set(&entry->refs, 1);
+       refcount_set(&entry->refs, 1);
        init_waitqueue_head(&entry->wait);
        INIT_LIST_HEAD(&entry->list);
        INIT_LIST_HEAD(&entry->root_extent_list);
 out:
        if (!ret && cached && entry) {
                *cached = entry;
-               atomic_inc(&entry->refs);
+               refcount_inc(&entry->refs);
        }
        spin_unlock_irqrestore(&tree->lock, flags);
        return ret == 0;
 out:
        if (!ret && cached && entry) {
                *cached = entry;
-               atomic_inc(&entry->refs);
+               refcount_inc(&entry->refs);
        }
        spin_unlock_irqrestore(&tree->lock, flags);
        return ret == 0;
                if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
                        continue;
                list_add(&ordered->log_list, logged_list);
-               atomic_inc(&ordered->refs);
+               refcount_inc(&ordered->refs);
        }
        spin_unlock_irq(&tree->lock);
 }
 
        trace_btrfs_ordered_extent_put(entry->inode, entry);
 
-       if (atomic_dec_and_test(&entry->refs)) {
+       if (refcount_dec_and_test(&entry->refs)) {
                ASSERT(list_empty(&entry->log_list));
                ASSERT(list_empty(&entry->trans_list));
                ASSERT(list_empty(&entry->root_extent_list));
 
                list_move_tail(&ordered->root_extent_list,
                               &root->ordered_extents);
-               atomic_inc(&ordered->refs);
+               refcount_inc(&ordered->refs);
                spin_unlock(&root->ordered_extent_lock);
 
                btrfs_init_work(&ordered->flush_work,
        if (!offset_in_entry(entry, file_offset))
                entry = NULL;
        if (entry)
-               atomic_inc(&entry->refs);
+               refcount_inc(&entry->refs);
 out:
        spin_unlock_irq(&tree->lock);
        return entry;
        }
 out:
        if (entry)
-               atomic_inc(&entry->refs);
+               refcount_inc(&entry->refs);
        spin_unlock_irq(&tree->lock);
        return entry;
 }
                goto out;
 
        entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
-       atomic_inc(&entry->refs);
+       refcount_inc(&entry->refs);
 out:
        spin_unlock_irq(&tree->lock);
        return entry;