fsnotify_get_mark(fsn_mark);
                /* One ref for being in the idr, one ref we just took */
-               BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
+               BUG_ON(refcount_read(&fsn_mark->refcnt) < 2);
        }
 
        return i_mark;
         * One ref for being in the idr
         * one ref grabbed by inotify_idr_find
         */
-       if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 2)) {
+       if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) {
                printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
                         __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
                /* we can't really recover with bad ref cnting.. */
 
 
 void fsnotify_get_mark(struct fsnotify_mark *mark)
 {
-       WARN_ON_ONCE(!atomic_read(&mark->refcnt));
-       atomic_inc(&mark->refcnt);
+       WARN_ON_ONCE(!refcount_read(&mark->refcnt));
+       refcount_inc(&mark->refcnt);
 }
 
 static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
 
        /* Catch marks that were actually never attached to object */
        if (!mark->connector) {
-               if (atomic_dec_and_test(&mark->refcnt))
+               if (refcount_dec_and_test(&mark->refcnt))
                        fsnotify_final_mark_destroy(mark);
                return;
        }
         * We have to be careful so that traversals of obj_list under lock can
         * safely grab mark reference.
         */
-       if (!atomic_dec_and_lock(&mark->refcnt, &mark->connector->lock))
+       if (!refcount_dec_and_lock(&mark->refcnt, &mark->connector->lock))
                return;
 
        conn = mark->connector;
        if (!mark)
                return true;
 
-       if (atomic_inc_not_zero(&mark->refcnt)) {
+       if (refcount_inc_not_zero(&mark->refcnt)) {
                spin_lock(&mark->lock);
                if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) {
                        /* mark is attached, group is still alive then */
 
        WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
        WARN_ON_ONCE(!srcu_read_lock_held(&fsnotify_mark_srcu) &&
-                    atomic_read(&mark->refcnt) < 1 +
+                    refcount_read(&mark->refcnt) < 1 +
                        !!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED));
 
        spin_lock(&mark->lock);
 {
        memset(mark, 0, sizeof(*mark));
        spin_lock_init(&mark->lock);
-       atomic_set(&mark->refcnt, 1);
+       refcount_set(&mark->refcnt, 1);
        fsnotify_get_group(group);
        mark->group = group;
 }