* This is called from xv_malloc/xv_free path, so it
  * needs to be fast.
  */
-static void *get_ptr_atomic(struct page *page, u16 offset, enum km_type type)
+static void *get_ptr_atomic(struct page *page, u16 offset)
 {
        unsigned char *base;
 
-       base = kmap_atomic(page, type);
+       base = kmap_atomic(page);
        return base + offset;
 }
 
-static void put_ptr_atomic(void *ptr, enum km_type type)
+static void put_ptr_atomic(void *ptr)
 {
-       kunmap_atomic(ptr, type);
+       kunmap_atomic(ptr);
 }
 
 static u32 get_blockprev(struct block_header *block)
 
        if (block->link.next_page) {
                nextblock = get_ptr_atomic(block->link.next_page,
-                                       block->link.next_offset, KM_USER1);
+                                       block->link.next_offset);
                nextblock->link.prev_page = page;
                nextblock->link.prev_offset = offset;
-               put_ptr_atomic(nextblock, KM_USER1);
+               put_ptr_atomic(nextblock);
                /* If there was a next page then the free bits are set. */
                return;
        }
 
        if (block->link.prev_page) {
                tmpblock = get_ptr_atomic(block->link.prev_page,
-                               block->link.prev_offset, KM_USER1);
+                               block->link.prev_offset);
                tmpblock->link.next_page = block->link.next_page;
                tmpblock->link.next_offset = block->link.next_offset;
-               put_ptr_atomic(tmpblock, KM_USER1);
+               put_ptr_atomic(tmpblock);
        }
 
        if (block->link.next_page) {
                tmpblock = get_ptr_atomic(block->link.next_page,
-                               block->link.next_offset, KM_USER1);
+                               block->link.next_offset);
                tmpblock->link.prev_page = block->link.prev_page;
                tmpblock->link.prev_offset = block->link.prev_offset;
-               put_ptr_atomic(tmpblock, KM_USER1);
+               put_ptr_atomic(tmpblock);
        }
 
        /* Is this block is at the head of the freelist? */
                if (pool->freelist[slindex].page) {
                        struct block_header *tmpblock;
                        tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
-                                       pool->freelist[slindex].offset,
-                                       KM_USER1);
+                                       pool->freelist[slindex].offset);
                        tmpblock->link.prev_page = NULL;
                        tmpblock->link.prev_offset = 0;
-                       put_ptr_atomic(tmpblock, KM_USER1);
+                       put_ptr_atomic(tmpblock);
                } else {
                        /* This freelist bucket is empty */
                        __clear_bit(slindex % BITS_PER_LONG,
        stat_inc(&pool->total_pages);
 
        spin_lock(&pool->lock);
-       block = get_ptr_atomic(page, 0, KM_USER0);
+       block = get_ptr_atomic(page, 0);
 
        block->size = PAGE_SIZE - XV_ALIGN;
        set_flag(block, BLOCK_FREE);
 
        insert_block(pool, page, 0, block);
 
-       put_ptr_atomic(block, KM_USER0);
+       put_ptr_atomic(block);
        spin_unlock(&pool->lock);
 
        return 0;
                return -ENOMEM;
        }
 
-       block = get_ptr_atomic(*page, *offset, KM_USER0);
+       block = get_ptr_atomic(*page, *offset);
 
        remove_block(pool, *page, *offset, block, index);
 
        block->size = origsize;
        clear_flag(block, BLOCK_FREE);
 
-       put_ptr_atomic(block, KM_USER0);
+       put_ptr_atomic(block);
        spin_unlock(&pool->lock);
 
        *offset += XV_ALIGN;
 
        spin_lock(&pool->lock);
 
-       page_start = get_ptr_atomic(page, 0, KM_USER0);
+       page_start = get_ptr_atomic(page, 0);
        block = (struct block_header *)((char *)page_start + offset);
 
        /* Catch double free bugs */
 
        /* No used objects in this page. Free it. */
        if (block->size == PAGE_SIZE - XV_ALIGN) {
-               put_ptr_atomic(page_start, KM_USER0);
+               put_ptr_atomic(page_start);
                spin_unlock(&pool->lock);
 
                __free_page(page);
                set_blockprev(tmpblock, offset);
        }
 
-       put_ptr_atomic(page_start, KM_USER0);
+       put_ptr_atomic(page_start);
        spin_unlock(&pool->lock);
 }
 EXPORT_SYMBOL_GPL(xv_free);
 
                goto out;
        }
 
-       obj = kmap_atomic(page, KM_USER0) + offset;
+       obj = kmap_atomic(page) + offset;
        clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
-       kunmap_atomic(obj, KM_USER0);
+       kunmap_atomic(obj);
 
        xv_free(zram->mem_pool, page, offset);
        if (clen <= PAGE_SIZE / 2)
        struct page *page = bvec->bv_page;
        void *user_mem;
 
-       user_mem = kmap_atomic(page, KM_USER0);
+       user_mem = kmap_atomic(page);
        memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
-       kunmap_atomic(user_mem, KM_USER0);
+       kunmap_atomic(user_mem);
 
        flush_dcache_page(page);
 }
        struct page *page = bvec->bv_page;
        unsigned char *user_mem, *cmem;
 
-       user_mem = kmap_atomic(page, KM_USER0);
-       cmem = kmap_atomic(zram->table[index].page, KM_USER1);
+       user_mem = kmap_atomic(page);
+       cmem = kmap_atomic(zram->table[index].page);
 
        memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
-       kunmap_atomic(cmem, KM_USER1);
-       kunmap_atomic(user_mem, KM_USER0);
+       kunmap_atomic(cmem);
+       kunmap_atomic(user_mem);
 
        flush_dcache_page(page);
 }
                }
        }
 
-       user_mem = kmap_atomic(page, KM_USER0);
+       user_mem = kmap_atomic(page);
        if (!is_partial_io(bvec))
                uncmem = user_mem;
        clen = PAGE_SIZE;
 
-       cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
+       cmem = kmap_atomic(zram->table[index].page) +
                zram->table[index].offset;
 
        ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
                kfree(uncmem);
        }
 
-       kunmap_atomic(cmem, KM_USER1);
-       kunmap_atomic(user_mem, KM_USER0);
+       kunmap_atomic(cmem);
+       kunmap_atomic(user_mem);
 
        /* Should NEVER happen. Return bio error if it does. */
        if (unlikely(ret != LZO_E_OK)) {
                return 0;
        }
 
-       cmem = kmap_atomic(zram->table[index].page, KM_USER0) +
+       cmem = kmap_atomic(zram->table[index].page) +
                zram->table[index].offset;
 
        /* Page is stored uncompressed since it's incompressible */
        if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
                memcpy(mem, cmem, PAGE_SIZE);
-               kunmap_atomic(cmem, KM_USER0);
+               kunmap_atomic(cmem);
                return 0;
        }
 
        ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
                                    xv_get_object_size(cmem) - sizeof(*zheader),
                                    mem, &clen);
-       kunmap_atomic(cmem, KM_USER0);
+       kunmap_atomic(cmem);
 
        /* Should NEVER happen. Return bio error if it does. */
        if (unlikely(ret != LZO_E_OK)) {
            zram_test_flag(zram, index, ZRAM_ZERO))
                zram_free_page(zram, index);
 
-       user_mem = kmap_atomic(page, KM_USER0);
+       user_mem = kmap_atomic(page);
 
        if (is_partial_io(bvec))
                memcpy(uncmem + offset, user_mem + bvec->bv_offset,
                uncmem = user_mem;
 
        if (page_zero_filled(uncmem)) {
-               kunmap_atomic(user_mem, KM_USER0);
+               kunmap_atomic(user_mem);
                if (is_partial_io(bvec))
                        kfree(uncmem);
                zram_stat_inc(&zram->stats.pages_zero);
        ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
                               zram->compress_workmem);
 
-       kunmap_atomic(user_mem, KM_USER0);
+       kunmap_atomic(user_mem);
        if (is_partial_io(bvec))
                        kfree(uncmem);
 
                zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
                zram_stat_inc(&zram->stats.pages_expand);
                zram->table[index].page = page_store;
-               src = kmap_atomic(page, KM_USER0);
+               src = kmap_atomic(page);
                goto memstore;
        }
 
 memstore:
        zram->table[index].offset = store_offset;
 
-       cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
+       cmem = kmap_atomic(zram->table[index].page) +
                zram->table[index].offset;
 
 #if 0
 
        memcpy(cmem, src, clen);
 
-       kunmap_atomic(cmem, KM_USER1);
+       kunmap_atomic(cmem);
        if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
-               kunmap_atomic(src, KM_USER0);
+               kunmap_atomic(src);
 
        /* Update stats */
        zram_stat64_add(zram, &zram->stats.compr_size, clen);