static void nfs_init_lock_context(struct nfs_lock_context *l_ctx)
 {
-       atomic_set(&l_ctx->count, 1);
+       refcount_set(&l_ctx->count, 1);
        l_ctx->lockowner = current->files;
        INIT_LIST_HEAD(&l_ctx->list);
        atomic_set(&l_ctx->io_count, 0);
        do {
                if (pos->lockowner != current->files)
                        continue;
-               atomic_inc(&pos->count);
+               refcount_inc(&pos->count);
                return pos;
        } while ((pos = list_entry(pos->list.next, typeof(*pos), list)) != head);
        return NULL;
        struct nfs_open_context *ctx = l_ctx->open_context;
        struct inode *inode = d_inode(ctx->dentry);
 
-       if (!atomic_dec_and_lock(&l_ctx->count, &inode->i_lock))
+       if (!refcount_dec_and_lock(&l_ctx->count, &inode->i_lock))
                return;
        list_del(&l_ctx->list);
        spin_unlock(&inode->i_lock);
 struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
 {
        if (ctx != NULL)
-               atomic_inc(&ctx->lock_context.count);
+               refcount_inc(&ctx->lock_context.count);
        return ctx;
 }
 EXPORT_SYMBOL_GPL(get_nfs_open_context);
        struct super_block *sb = ctx->dentry->d_sb;
 
        if (!list_empty(&ctx->list)) {
-               if (!atomic_dec_and_lock(&ctx->lock_context.count, &inode->i_lock))
+               if (!refcount_dec_and_lock(&ctx->lock_context.count, &inode->i_lock))
                        return;
                list_del(&ctx->list);
                spin_unlock(&inode->i_lock);
-       } else if (!atomic_dec_and_test(&ctx->lock_context.count))
+       } else if (!refcount_dec_and_test(&ctx->lock_context.count))
                return;
        if (inode != NULL)
                NFS_PROTO(inode)->close_context(ctx, is_sync);