#ifdef CONFIG_HIGHMEM
 extern void *kmap(struct page *page);
 extern void kunmap(struct page *page);
-extern void *kmap_atomic(struct page *page, enum km_type type);
-extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
-extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
+extern void *__kmap_atomic(struct page *page);
+extern void __kunmap_atomic(void *kvaddr);
+extern void *kmap_atomic_pfn(unsigned long pfn);
 extern struct page *kmap_atomic_to_page(const void *ptr);
 #endif
 
 
 }
 EXPORT_SYMBOL(kunmap);
 
-void *kmap_atomic(struct page *page, enum km_type type)
+void *__kmap_atomic(struct page *page)
 {
        unsigned int idx;
        unsigned long vaddr;
        void *kmap;
+       int type;
 
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
 
-       debug_kmap_atomic(type);
-
 #ifdef CONFIG_DEBUG_HIGHMEM
        /*
         * There is no cache coherency issue when non VIVT, so force the
        if (kmap)
                return kmap;
 
+       type = kmap_atomic_idx_push();
+
        idx = type + KM_TYPE_NR * smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
 
        return (void *)vaddr;
 }
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(__kmap_atomic);
 
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
 {
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-       unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
+       int idx, type;
 
        if (kvaddr >= (void *)FIXADDR_START) {
+               type = kmap_atomic_idx_pop();
+               idx = type + KM_TYPE_NR * smp_processor_id();
+
                if (cache_is_vivt())
                        __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
 #ifdef CONFIG_DEBUG_HIGHMEM
        }
        pagefault_enable();
 }
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
 
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+void *kmap_atomic_pfn(unsigned long pfn)
 {
-       unsigned int idx;
        unsigned long vaddr;
+       int idx, type;
 
        pagefault_disable();
 
+       type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR * smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
 
        (void *) damlr;                                                                           \
 })
 
-static inline void *kmap_atomic(struct page *page, enum km_type type)
+static inline void *kmap_atomic_primary(struct page *page, enum km_type type)
 {
        unsigned long paddr;
 
        pagefault_disable();
-       debug_kmap_atomic(type);
        paddr = page_to_phys(page);
 
        switch (type) {
         case 1:                return __kmap_atomic_primary(1, paddr, 3);
         case 2:                return __kmap_atomic_primary(2, paddr, 4);
         case 3:                return __kmap_atomic_primary(3, paddr, 5);
-        case 4:                return __kmap_atomic_primary(4, paddr, 6);
-        case 5:                return __kmap_atomic_primary(5, paddr, 7);
-        case 6:                return __kmap_atomic_primary(6, paddr, 8);
-        case 7:                return __kmap_atomic_primary(7, paddr, 9);
-        case 8:                return __kmap_atomic_primary(8, paddr, 10);
-
-       case 9 ... 9 + NR_TLB_LINES - 1:
-               return __kmap_atomic_secondary(type - 9, paddr);
 
        default:
                BUG();
        asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory");   \
 } while(0)
 
-static inline void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type)
 {
        switch (type) {
         case 0:                __kunmap_atomic_primary(0, 2);  break;
         case 1:                __kunmap_atomic_primary(1, 3);  break;
         case 2:                __kunmap_atomic_primary(2, 4);  break;
         case 3:                __kunmap_atomic_primary(3, 5);  break;
-        case 4:                __kunmap_atomic_primary(4, 6);  break;
-        case 5:                __kunmap_atomic_primary(5, 7);  break;
-        case 6:                __kunmap_atomic_primary(6, 8);  break;
-        case 7:                __kunmap_atomic_primary(7, 9);  break;
-        case 8:                __kunmap_atomic_primary(8, 10); break;
-
-       case 9 ... 9 + NR_TLB_LINES - 1:
-               __kunmap_atomic_secondary(type - 9, kvaddr);
-               break;
 
        default:
                BUG();
        pagefault_enable();
 }
 
+void *__kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
 
        dampr2 = __get_DAMPR(2);
 
        for (i = 0; i < nents; i++) {
-               vaddr = kmap_atomic(sg_page(&sg[i]), __KM_CACHE);
+               vaddr = kmap_atomic_primary(sg_page(&sg[i]), __KM_CACHE);
 
                frv_dcache_writeback((unsigned long) vaddr,
                                     (unsigned long) vaddr + PAGE_SIZE);
 
        }
 
-       kunmap_atomic(vaddr, __KM_CACHE);
+       kunmap_atomic_primary(vaddr, __KM_CACHE);
        if (dampr2) {
                __set_DAMPR(2, dampr2);
                __set_IAMPR(2, dampr2);
 
 
        dampr2 = __get_DAMPR(2);
 
-       vaddr = kmap_atomic(page, __KM_CACHE);
+       vaddr = kmap_atomic_primary(page, __KM_CACHE);
 
        frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE);
 
-       kunmap_atomic(vaddr, __KM_CACHE);
+       kunmap_atomic_primary(vaddr, __KM_CACHE);
 
        if (dampr2) {
                __set_DAMPR(2, dampr2);
 
        dampr2 = __get_DAMPR(2);
 
-       vaddr = kmap_atomic(page, __KM_CACHE);
+       vaddr = kmap_atomic_primary(page, __KM_CACHE);
 
        start = (start & ~PAGE_MASK) | (unsigned long) vaddr;
        frv_cache_wback_inv(start, start + len);
 
-       kunmap_atomic(vaddr, __KM_CACHE);
+       kunmap_atomic_primary(vaddr, __KM_CACHE);
 
        if (dampr2) {
                __set_DAMPR(2, dampr2);
 
 {
        return virt_to_page(ptr);
 }
+
+void *__kmap_atomic(struct page *page)
+{
+       unsigned long paddr;
+       int type;
+
+       pagefault_disable();
+       type = kmap_atomic_idx_push();
+       paddr = page_to_phys(page);
+
+       switch (type) {
+       /*
+        * The first 4 primary maps are reserved for architecture code
+        */
+       case 0:         return __kmap_atomic_primary(4, paddr, 6);
+       case 1:         return __kmap_atomic_primary(5, paddr, 7);
+       case 2:         return __kmap_atomic_primary(6, paddr, 8);
+       case 3:         return __kmap_atomic_primary(7, paddr, 9);
+       case 4:         return __kmap_atomic_primary(8, paddr, 10);
+
+       case 5 ... 5 + NR_TLB_LINES - 1:
+               return __kmap_atomic_secondary(type - 5, paddr);
+
+       default:
+               BUG();
+               return NULL;
+       }
+}
+EXPORT_SYMBOL(__kmap_atomic);
+
+void __kunmap_atomic(void *kvaddr)
+{
+       int type = kmap_atomic_idx_pop();
+       switch (type) {
+       case 0:         __kunmap_atomic_primary(4, 6);  break;
+       case 1:         __kunmap_atomic_primary(5, 7);  break;
+       case 2:         __kunmap_atomic_primary(6, 8);  break;
+       case 3:         __kunmap_atomic_primary(7, 9);  break;
+       case 4:         __kunmap_atomic_primary(8, 10); break;
+
+       case 5 ... 5 + NR_TLB_LINES - 1:
+               __kunmap_atomic_secondary(type - 5, kvaddr);
+               break;
+
+       default:
+               BUG();
+       }
+       pagefault_enable();
+}
+EXPORT_SYMBOL(__kunmap_atomic);
 
 extern void * kmap_high(struct page *page);
 extern void kunmap_high(struct page *page);
 
-extern void *__kmap(struct page *page);
-extern void __kunmap(struct page *page);
-extern void *__kmap_atomic(struct page *page, enum km_type type);
-extern void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
-extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
-extern struct page *__kmap_atomic_to_page(void *ptr);
-
-#define kmap                   __kmap
-#define kunmap                 __kunmap
-#define kmap_atomic            __kmap_atomic
-#define kunmap_atomic_notypecheck              __kunmap_atomic_notypecheck
-#define kmap_atomic_to_page    __kmap_atomic_to_page
+extern void *kmap(struct page *page);
+extern void kunmap(struct page *page);
+extern void *__kmap_atomic(struct page *page);
+extern void __kunmap_atomic(void *kvaddr);
+extern void *kmap_atomic_pfn(unsigned long pfn);
+extern struct page *kmap_atomic_to_page(void *ptr);
 
 #define flush_cache_kmaps()    flush_cache_all()
 
 
 
 unsigned long highstart_pfn, highend_pfn;
 
-void *__kmap(struct page *page)
+void *kmap(struct page *page)
 {
        void *addr;
 
 
        return addr;
 }
-EXPORT_SYMBOL(__kmap);
+EXPORT_SYMBOL(kmap);
 
-void __kunmap(struct page *page)
+void kunmap(struct page *page)
 {
        BUG_ON(in_interrupt());
        if (!PageHighMem(page))
                return;
        kunmap_high(page);
 }
-EXPORT_SYMBOL(__kunmap);
+EXPORT_SYMBOL(kunmap);
 
 /*
  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  * kmaps are appropriate for short, tight code paths only.
  */
 
-void *__kmap_atomic(struct page *page, enum km_type type)
+void *__kmap_atomic(struct page *page)
 {
-       enum fixed_addresses idx;
        unsigned long vaddr;
+       int idx, type;
 
        /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
 
-       debug_kmap_atomic(type);
+       type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR*smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
 }
 EXPORT_SYMBOL(__kmap_atomic);
 
-void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
 {
-#ifdef CONFIG_DEBUG_HIGHMEM
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-       enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+       int type;
 
        if (vaddr < FIXADDR_START) { // FIXME
                pagefault_enable();
                return;
        }
 
-       BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+       type = kmap_atomic_idx_pop();
+#ifdef CONFIG_DEBUG_HIGHMEM
+       {
+               int idx = type + KM_TYPE_NR * smp_processor_id();
 
-       /*
-        * force other mappings to Oops if they'll try to access
-        * this pte without first remap it
-        */
-       pte_clear(&init_mm, vaddr, kmap_pte-idx);
-       local_flush_tlb_one(vaddr);
-#endif
+               BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
 
+               /*
+                * force other mappings to Oops if they'll try to access
+                * this pte without first remap it
+                */
+               pte_clear(&init_mm, vaddr, kmap_pte-idx);
+               local_flush_tlb_one(vaddr);
+       }
+#endif
        pagefault_enable();
 }
-EXPORT_SYMBOL(__kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
 
 /*
  * This is the same as kmap_atomic() but can map memory that doesn't
  * have a struct page associated with it.
  */
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+void *kmap_atomic_pfn(unsigned long pfn)
 {
-       enum fixed_addresses idx;
        unsigned long vaddr;
+       int idx, type;
 
        pagefault_disable();
 
-       debug_kmap_atomic(type);
+       type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR*smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
        return (void*) vaddr;
 }
 
-struct page *__kmap_atomic_to_page(void *ptr)
+struct page *kmap_atomic_to_page(void *ptr)
 {
        unsigned long idx, vaddr = (unsigned long)ptr;
        pte_t *pte;
 
  * be used in IRQ contexts, so in some (very limited) cases we need
  * it.
  */
-static inline unsigned long kmap_atomic(struct page *page, enum km_type type)
+static inline unsigned long __kmap_atomic(struct page *page)
 {
-       enum fixed_addresses idx;
        unsigned long vaddr;
+       int idx, type;
 
+       pagefault_disable();
        if (page < highmem_start_page)
                return page_address(page);
 
-       debug_kmap_atomic(type);
+       type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR * smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 #if HIGHMEM_DEBUG
        return vaddr;
 }
 
-static inline void kunmap_atomic_notypecheck(unsigned long vaddr, enum km_type type)
+static inline void __kunmap_atomic(unsigned long vaddr)
 {
-#if HIGHMEM_DEBUG
-       enum fixed_addresses idx = type + KM_TYPE_NR * smp_processor_id();
+       int type;
 
-       if (vaddr < FIXADDR_START) /* FIXME */
+       if (vaddr < FIXADDR_START) { /* FIXME */
+               pagefault_enable();
                return;
+       }
 
-       if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx))
-               BUG();
+       type = kmap_atomic_idx_pop();
 
-       /*
-        * force other mappings to Oops if they'll try to access
-        * this pte without first remap it
-        */
-       pte_clear(kmap_pte - idx);
-       __flush_tlb_one(vaddr);
+#if HIGHMEM_DEBUG
+       {
+               unsigned int idx;
+               idx = type + KM_TYPE_NR * smp_processor_id();
+
+               if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx))
+                       BUG();
+
+               /*
+                * force other mappings to Oops if they'll try to access
+                * this pte without first remap it
+                */
+               pte_clear(kmap_pte - idx);
+               __flush_tlb_one(vaddr);
+       }
 #endif
+       pagefault_enable();
 }
-
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_HIGHMEM_H */
 
 
 extern void *kmap_high(struct page *page);
 extern void kunmap_high(struct page *page);
-extern void *kmap_atomic_prot(struct page *page, enum km_type type,
-                             pgprot_t prot);
-extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
+extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
+extern void __kunmap_atomic(void *kvaddr);
 
 static inline void *kmap(struct page *page)
 {
        kunmap_high(page);
 }
 
-static inline void *kmap_atomic(struct page *page, enum km_type type)
+static inline void *__kmap_atomic(struct page *page)
 {
-       return kmap_atomic_prot(page, type, kmap_prot);
+       return kmap_atomic_prot(page, kmap_prot);
 }
 
 static inline struct page *kmap_atomic_to_page(void *ptr)
 
  * be used in IRQ contexts, so in some (very limited) cases we need
  * it.
  */
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
-       unsigned int idx;
        unsigned long vaddr;
+       int idx, type;
 
        /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
 
-       debug_kmap_atomic(type);
+       type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR*smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
 }
 EXPORT_SYMBOL(kmap_atomic_prot);
 
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
 {
-#ifdef CONFIG_DEBUG_HIGHMEM
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-       enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+       int type;
 
        if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
                pagefault_enable();
                return;
        }
 
-       BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+       type = kmap_atomic_idx_pop();
 
-       /*
-        * force other mappings to Oops if they'll try to access
-        * this pte without first remap it
-        */
-       pte_clear(&init_mm, vaddr, kmap_pte-idx);
-       local_flush_tlb_page(NULL, vaddr);
+#ifdef CONFIG_DEBUG_HIGHMEM
+       {
+               unsigned int idx;
+
+               idx = type + KM_TYPE_NR * smp_processor_id();
+               BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+
+               /*
+                * force other mappings to Oops if they'll try to access
+                * this pte without first remap it
+                */
+               pte_clear(&init_mm, vaddr, kmap_pte-idx);
+               local_flush_tlb_page(NULL, vaddr);
+       }
 #endif
        pagefault_enable();
 }
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
 
        kunmap_high(page);
 }
 
-extern void *kmap_atomic(struct page *page, enum km_type type);
-extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
+extern void *__kmap_atomic(struct page *page);
+extern void __kunmap_atomic(void *kvaddr);
 extern struct page *kmap_atomic_to_page(void *vaddr);
 
 #define flush_cache_kmaps()    flush_cache_all()
 
 #include <asm/tlbflush.h>
 #include <asm/fixmap.h>
 
-void *kmap_atomic(struct page *page, enum km_type type)
+void *__kmap_atomic(struct page *page)
 {
-       unsigned long idx;
        unsigned long vaddr;
+       long idx, type;
 
        /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
 
-       debug_kmap_atomic(type);
+       type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR*smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 
 
        return (void*) vaddr;
 }
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(__kmap_atomic);
 
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
 {
-#ifdef CONFIG_DEBUG_HIGHMEM
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-       unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
+       int type;
 
        if (vaddr < FIXADDR_START) { // FIXME
                pagefault_enable();
                return;
        }
 
-       BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
+       type = kmap_atomic_idx_pop();
 
-/* XXX Fix - Anton */
+#ifdef CONFIG_DEBUG_HIGHMEM
+       {
+               unsigned long idx;
+
+               idx = type + KM_TYPE_NR * smp_processor_id();
+               BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
+
+               /* XXX Fix - Anton */
 #if 0
-       __flush_cache_one(vaddr);
+               __flush_cache_one(vaddr);
 #else
-       flush_cache_all();
+               flush_cache_all();
 #endif
 
-       /*
-        * force other mappings to Oops if they'll try to access
-        * this pte without first remap it
-        */
-       pte_clear(&init_mm, vaddr, kmap_pte-idx);
-/* XXX Fix - Anton */
+               /*
+                * force other mappings to Oops if they'll try to access
+                * this pte without first remap it
+                */
+               pte_clear(&init_mm, vaddr, kmap_pte-idx);
+               /* XXX Fix - Anton */
 #if 0
-       __flush_tlb_one(vaddr);
+               __flush_tlb_one(vaddr);
 #else
-       flush_tlb_all();
+               flush_tlb_all();
 #endif
+       }
 #endif
-
        pagefault_enable();
 }
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
 
 /* We may be fed a pagetable here by ptep_to_xxx and others. */
 struct page *kmap_atomic_to_page(void *ptr)
 
 /* This macro is used only in map_new_virtual() to map "page". */
 #define kmap_prot page_to_kpgprot(page)
 
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
-void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
+void *__kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+void *kmap_atomic_pfn(unsigned long pfn);
+void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
 struct page *kmap_atomic_to_page(void *ptr);
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
-void *kmap_atomic(struct page *page, enum km_type type);
+void *kmap_atomic_prot(struct page *page, pgprot_t prot);
 void kmap_atomic_fix_kpte(struct page *page, int finished);
 
 #define flush_cache_kmaps()    do { } while (0)
 
 }
 EXPORT_SYMBOL(kunmap);
 
-static void debug_kmap_atomic_prot(enum km_type type)
-{
-#ifdef CONFIG_DEBUG_HIGHMEM
-       static unsigned warn_count = 10;
-
-       if (unlikely(warn_count == 0))
-               return;
-
-       if (unlikely(in_interrupt())) {
-               if (in_irq()) {
-                       if (type != KM_IRQ0 && type != KM_IRQ1 &&
-                           type != KM_BIO_SRC_IRQ &&
-                           /* type != KM_BIO_DST_IRQ && */
-                           type != KM_BOUNCE_READ) {
-                               WARN_ON(1);
-                               warn_count--;
-                       }
-               } else if (!irqs_disabled()) {  /* softirq */
-                       if (type != KM_IRQ0 && type != KM_IRQ1 &&
-                           type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
-                           type != KM_SKB_SUNRPC_DATA &&
-                           type != KM_SKB_DATA_SOFTIRQ &&
-                           type != KM_BOUNCE_READ) {
-                               WARN_ON(1);
-                               warn_count--;
-                       }
-               }
-       }
-
-       if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
-           type == KM_BIO_SRC_IRQ /* || type == KM_BIO_DST_IRQ */) {
-               if (!irqs_disabled()) {
-                       WARN_ON(1);
-                       warn_count--;
-               }
-       } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
-               if (irq_count() == 0 && !irqs_disabled()) {
-                       WARN_ON(1);
-                       warn_count--;
-               }
-       }
-#endif
-}
-
 /*
  * Describe a single atomic mapping of a page on a given cpu at a
  * given address, and allow it to be linked into a list.
  * When holding an atomic kmap is is not legal to sleep, so atomic
  * kmaps are appropriate for short, tight code paths only.
  */
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
-       enum fixed_addresses idx;
        unsigned long vaddr;
+       int idx, type;
        pte_t *pte;
 
        /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
        if (!PageHighMem(page))
                return page_address(page);
 
-       debug_kmap_atomic_prot(type);
-
+       type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR*smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        pte = kmap_get_pte(vaddr);
 }
 EXPORT_SYMBOL(kmap_atomic_prot);
 
-void *kmap_atomic(struct page *page, enum km_type type)
+void *__kmap_atomic(struct page *page)
 {
        /* PAGE_NONE is a magic value that tells us to check immutability. */
        return kmap_atomic_prot(page, type, PAGE_NONE);
 }
-EXPORT_SYMBOL(kmap_atomic);
+EXPORT_SYMBOL(__kmap_atomic);
 
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
 {
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-       enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
 
-       /*
-        * Force other mappings to Oops if they try to access this pte without
-        * first remapping it.  Keeping stale mappings around is a bad idea.
-        */
-       if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) {
+       if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
+           vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
                pte_t *pte = kmap_get_pte(vaddr);
                pte_t pteval = *pte;
+               int idx, type;
+
+               type = kmap_atomic_idx_pop();
+               idx = type + KM_TYPE_NR*smp_processor_id();
+
+               /*
+                * Force other mappings to Oops if they try to access this pte
+                * without first remapping it.  Keeping stale mappings around
+                * is a bad idea.
+                */
                BUG_ON(!pte_present(pteval) && !pte_migrating(pteval));
                kmap_atomic_unregister(pte_page(pteval), vaddr);
                kpte_clear_flush(pte, vaddr);
        arch_flush_lazy_mmu_mode();
        pagefault_enable();
 }
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
+EXPORT_SYMBOL(__kunmap_atomic);
 
 /*
  * This API is supposed to allow us to map memory without a "struct page".
  * Currently we don't support this, though this may change in the future.
  */
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
+void *kmap_atomic_pfn(unsigned long pfn)
 {
-       return kmap_atomic(pfn_to_page(pfn), type);
+       return kmap_atomic(pfn_to_page(pfn));
 }
-void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
 {
-       return kmap_atomic_prot(pfn_to_page(pfn), type, prot);
+       return kmap_atomic_prot(pfn_to_page(pfn), prot);
 }
 
 struct page *kmap_atomic_to_page(void *ptr)
 
 
 void *kmap(struct page *page);
 void kunmap(struct page *page);
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
-void *kmap_atomic(struct page *page, enum km_type type);
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type);
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
-void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
+
+void *kmap_atomic_prot(struct page *page, pgprot_t prot);
+void *__kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+void *kmap_atomic_pfn(unsigned long pfn);
+void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
 struct page *kmap_atomic_to_page(void *ptr);
 
 #define flush_cache_kmaps()    do { } while (0)
 
 #include <asm/tlbflush.h>
 
 void __iomem *
-iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
+iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
 
 void
-iounmap_atomic(void __iomem *kvaddr, enum km_type type);
+iounmap_atomic(void __iomem *kvaddr);
 
 int
 iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
 
        if (!is_crashed_pfn_valid(pfn))
                return -EFAULT;
 
-       vaddr = kmap_atomic_pfn(pfn, KM_PTE0);
+       vaddr = kmap_atomic_pfn(pfn);
 
        if (!userbuf) {
                memcpy(buf, (vaddr + offset), csize);
 
                return page_address(page);
        return kmap_high(page);
 }
+EXPORT_SYMBOL(kmap);
 
 void kunmap(struct page *page)
 {
                return;
        kunmap_high(page);
 }
+EXPORT_SYMBOL(kunmap);
 
 /*
  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
  * However when holding an atomic kmap it is not legal to sleep, so atomic
  * kmaps are appropriate for short, tight code paths only.
  */
-void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
-       enum fixed_addresses idx;
        unsigned long vaddr;
+       int idx, type;
 
        /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
 
-       debug_kmap_atomic(type);
-
+       type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR*smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        BUG_ON(!pte_none(*(kmap_pte-idx)));
 
        return (void *)vaddr;
 }
+EXPORT_SYMBOL(kmap_atomic_prot);
+
+void *__kmap_atomic(struct page *page)
+{
+       return kmap_atomic_prot(page, kmap_prot);
+}
+EXPORT_SYMBOL(__kmap_atomic);
 
-void *kmap_atomic(struct page *page, enum km_type type)
+/*
+ * This is the same as kmap_atomic() but can map memory that doesn't
+ * have a struct page associated with it.
+ */
+void *kmap_atomic_pfn(unsigned long pfn)
 {
-       return kmap_atomic_prot(page, type, kmap_prot);
+       return kmap_atomic_prot_pfn(pfn, kmap_prot);
 }
+EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
 
-void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type)
+void __kunmap_atomic(void *kvaddr)
 {
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-       enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-
-       /*
-        * Force other mappings to Oops if they'll try to access this pte
-        * without first remap it.  Keeping stale mappings around is a bad idea
-        * also, in case the page changes cacheability attributes or becomes
-        * a protected page in a hypervisor.
-        */
-       if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
+
+       if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
+           vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
+               int idx, type;
+
+               type = kmap_atomic_idx_pop();
+               idx = type + KM_TYPE_NR * smp_processor_id();
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+               WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+#endif
+               /*
+                * Force other mappings to Oops if they'll try to access this
+                * pte without first remap it.  Keeping stale mappings around
+                * is a bad idea also, in case the page changes cacheability
+                * attributes or becomes a protected page in a hypervisor.
+                */
                kpte_clear_flush(kmap_pte-idx, vaddr);
-       else {
+       }
 #ifdef CONFIG_DEBUG_HIGHMEM
+       else {
                BUG_ON(vaddr < PAGE_OFFSET);
                BUG_ON(vaddr >= (unsigned long)high_memory);
-#endif
        }
+#endif
 
        pagefault_enable();
 }
-
-/*
- * This is the same as kmap_atomic() but can map memory that doesn't
- * have a struct page associated with it.
- */
-void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
-{
-       return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
-}
-EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
+EXPORT_SYMBOL(__kunmap_atomic);
 
 struct page *kmap_atomic_to_page(void *ptr)
 {
        pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
        return pte_page(*pte);
 }
-
-EXPORT_SYMBOL(kmap);
-EXPORT_SYMBOL(kunmap);
-EXPORT_SYMBOL(kmap_atomic);
-EXPORT_SYMBOL(kunmap_atomic_notypecheck);
-EXPORT_SYMBOL(kmap_atomic_prot);
 EXPORT_SYMBOL(kmap_atomic_to_page);
 
 void __init set_highmem_pages_init(void)
 
 }
 EXPORT_SYMBOL_GPL(iomap_create_wc);
 
-void
-iomap_free(resource_size_t base, unsigned long size)
+void iomap_free(resource_size_t base, unsigned long size)
 {
        io_free_memtype(base, base + size);
 }
 EXPORT_SYMBOL_GPL(iomap_free);
 
-void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
 {
-       enum fixed_addresses idx;
        unsigned long vaddr;
+       int idx, type;
 
        pagefault_disable();
 
-       debug_kmap_atomic(type);
+       type = kmap_atomic_idx_push();
        idx = type + KM_TYPE_NR * smp_processor_id();
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
 }
 
 /*
- * Map 'pfn' using fixed map 'type' and protections 'prot'
+ * Map 'pfn' using protections 'prot'
  */
 void __iomem *
-iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
+iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
 {
        /*
         * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
        if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
                prot = PAGE_KERNEL_UC_MINUS;
 
-       return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot);
+       return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
 }
 EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
 
 void
-iounmap_atomic(void __iomem *kvaddr, enum km_type type)
+iounmap_atomic(void __iomem *kvaddr)
 {
        unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-       enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
 
-       /*
-        * Force other mappings to Oops if they'll try to access this pte
-        * without first remap it.  Keeping stale mappings around is a bad idea
-        * also, in case the page changes cacheability attributes or becomes
-        * a protected page in a hypervisor.
-        */
-       if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
+       if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
+           vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
+               int idx, type;
+
+               type = kmap_atomic_idx_pop();
+               idx = type + KM_TYPE_NR * smp_processor_id();
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+               WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+#endif
+               /*
+                * Force other mappings to Oops if they'll try to access this
+                * pte without first remap it.  Keeping stale mappings around
+                * is a bad idea also, in case the page changes cacheability
+                * attributes or becomes a protected page in a hypervisor.
+                */
                kpte_clear_flush(kmap_pte-idx, vaddr);
+       }
 
        pagefault_enable();
 }
 
        char __iomem *vaddr;
        int unwritten;
 
-       vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
+       vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
        if (vaddr == NULL)
                return -ENOMEM;
        unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
-       kunmap_atomic(vaddr, KM_USER0);
+       kunmap_atomic(vaddr);
 
        if (unwritten)
                return -EFAULT;
        char *vaddr_atomic;
        unsigned long unwritten;
 
-       vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0);
+       vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
        unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
                                                      user_data, length);
-       io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
+       io_mapping_unmap_atomic(vaddr_atomic);
        if (unwritten)
                return -EFAULT;
        return 0;
        char __iomem *vaddr;
        unsigned long unwritten;
 
-       vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
+       vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
        if (vaddr == NULL)
                return -ENOMEM;
        unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
-       kunmap_atomic(vaddr, KM_USER0);
+       kunmap_atomic(vaddr);
 
        if (unwritten)
                return -EFAULT;
                reloc_offset = obj_priv->gtt_offset + reloc->offset;
                reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
                                                      (reloc_offset &
-                                                      ~(PAGE_SIZE - 1)),
-                                                     KM_USER0);
+                                                      ~(PAGE_SIZE - 1)));
                reloc_entry = (uint32_t __iomem *)(reloc_page +
                                                   (reloc_offset & (PAGE_SIZE - 1)));
                reloc_val = target_obj_priv->gtt_offset + reloc->delta;
                          readl(reloc_entry), reloc_val);
 #endif
                writel(reloc_val, reloc_entry);
-               io_mapping_unmap_atomic(reloc_page, KM_USER0);
+               io_mapping_unmap_atomic(reloc_page);
 
                /* The updated presumed offset for this entry will be
                 * copied back out to the user.
        page_count = obj->size / PAGE_SIZE;
 
        for (i = 0; i < page_count; i++) {
-               char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
+               char *dst = kmap_atomic(obj_priv->pages[i]);
                char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
 
                memcpy(dst, src, PAGE_SIZE);
-               kunmap_atomic(dst, KM_USER0);
+               kunmap_atomic(dst);
        }
        drm_clflush_pages(obj_priv->pages, page_count);
        drm_agp_chipset_flush(dev);
        page_count = obj->size / PAGE_SIZE;
 
        for (i = 0; i < page_count; i++) {
-               char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
+               char *src = kmap_atomic(obj_priv->pages[i]);
                char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
 
                memcpy(dst, src, PAGE_SIZE);
-               kunmap_atomic(src, KM_USER0);
+               kunmap_atomic(src);
        }
 
        i915_gem_object_put_pages(obj);
 
 
                local_irq_save(flags);
                s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
-                                            reloc_offset,
-                                            KM_IRQ0);
+                                            reloc_offset);
                memcpy_fromio(d, s, PAGE_SIZE);
-               io_mapping_unmap_atomic(s, KM_IRQ0);
+               io_mapping_unmap_atomic(s);
                local_irq_restore(flags);
 
                dst->pages[page] = d;
 
 
        if (OVERLAY_NONPHYSICAL(overlay->dev)) {
                regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
-                                               overlay->reg_bo->gtt_offset,
-                                               KM_USER0);
+                                               overlay->reg_bo->gtt_offset);
 
                if (!regs) {
                        DRM_ERROR("failed to map overlay regs in GTT\n");
 static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
 {
        if (OVERLAY_NONPHYSICAL(overlay->dev))
-               io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0);
+               io_mapping_unmap_atomic(overlay->virt_addr);
 
        overlay->virt_addr = NULL;
 
 
 
        if (off < pci_resource_len(dev->pdev, 1)) {
                uint8_t __iomem *p =
-                       io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
+                       io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
 
                val = ioread32(p + (off & ~PAGE_MASK));
 
-               io_mapping_unmap_atomic(p, KM_USER0);
+               io_mapping_unmap_atomic(p);
        }
 
        return val;
 {
        if (off < pci_resource_len(dev->pdev, 1)) {
                uint8_t __iomem *p =
-                       io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
+                       io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
 
                iowrite32(val, p + (off & ~PAGE_MASK));
                wmb();
 
-               io_mapping_unmap_atomic(p, KM_USER0);
+               io_mapping_unmap_atomic(p);
        }
 }
 
 
        src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
 
 #ifdef CONFIG_X86
-       dst = kmap_atomic_prot(d, KM_USER0, prot);
+       dst = kmap_atomic_prot(d, prot);
 #else
        if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
                dst = vmap(&d, 1, 0, prot);
        memcpy_fromio(dst, src, PAGE_SIZE);
 
 #ifdef CONFIG_X86
-       kunmap_atomic(dst, KM_USER0);
+       kunmap_atomic(dst);
 #else
        if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
                vunmap(dst);
 
        dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
 #ifdef CONFIG_X86
-       src = kmap_atomic_prot(s, KM_USER0, prot);
+       src = kmap_atomic_prot(s, prot);
 #else
        if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
                src = vmap(&s, 1, 0, prot);
        memcpy_toio(dst, src, PAGE_SIZE);
 
 #ifdef CONFIG_X86
-       kunmap_atomic(src, KM_USER0);
+       kunmap_atomic(src);
 #else
        if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
                vunmap(src);
 
 
 #include <asm/kmap_types.h>
 
-#ifdef CONFIG_DEBUG_HIGHMEM
-
-void debug_kmap_atomic(enum km_type type);
-
-#else
-
-static inline void debug_kmap_atomic(enum km_type type)
-{
-}
-
-#endif
-
 #ifdef CONFIG_HIGHMEM
 #include <asm/highmem.h>
 
 
 void kmap_flush_unused(void);
 
+DECLARE_PER_CPU(int, __kmap_atomic_idx);
+
+static inline int kmap_atomic_idx_push(void)
+{
+       int idx = __get_cpu_var(__kmap_atomic_idx)++;
+#ifdef CONFIG_DEBUG_HIGHMEM
+       WARN_ON_ONCE(in_irq() && !irqs_disabled());
+       BUG_ON(idx > KM_TYPE_NR);
+#endif
+       return idx;
+}
+
+static inline int kmap_atomic_idx_pop(void)
+{
+       int idx = --__get_cpu_var(__kmap_atomic_idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+       BUG_ON(idx < 0);
+#endif
+       return idx;
+}
+
 #else /* CONFIG_HIGHMEM */
 
 static inline unsigned int nr_free_highpages(void) { return 0; }
 {
 }
 
-static inline void *kmap_atomic(struct page *page, enum km_type idx)
+static inline void *__kmap_atomic(struct page *page)
 {
        pagefault_disable();
        return page_address(page);
 }
-#define kmap_atomic_prot(page, idx, prot)      kmap_atomic(page, idx)
+#define kmap_atomic_prot(page, prot)   __kmap_atomic(page)
 
-static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx)
+static inline void __kunmap_atomic(void *addr)
 {
        pagefault_enable();
 }
 
-#define kmap_atomic_pfn(pfn, idx)      kmap_atomic(pfn_to_page(pfn), (idx))
+#define kmap_atomic_pfn(pfn)   kmap_atomic(pfn_to_page(pfn))
 #define kmap_atomic_to_page(ptr)       virt_to_page(ptr)
 
 #define kmap_flush_unused()    do {} while(0)
 
 #endif /* CONFIG_HIGHMEM */
 
-/* Prevent people trying to call kunmap_atomic() as if it were kunmap() */
-/* kunmap_atomic() should get the return value of kmap_atomic, not the page. */
-#define kunmap_atomic(addr, idx) do { \
-               BUILD_BUG_ON(__same_type((addr), struct page *)); \
-               kunmap_atomic_notypecheck((addr), (idx)); \
-       } while (0)
+/*
+ * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work.
+ */
+#define kmap_atomic(page, args...) __kmap_atomic(page)
+
+/*
+ * Prevent people trying to call kunmap_atomic() as if it were kunmap()
+ * kunmap_atomic() should get the return value of kmap_atomic, not the page.
+ */
+#define kunmap_atomic(addr, args...)                           \
+do {                                                           \
+       BUILD_BUG_ON(__same_type((addr), struct page *));       \
+       __kunmap_atomic(addr);                                  \
+} while (0)
 
 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
 #ifndef clear_user_highpage
 
 /* Atomic map/unmap */
 static inline void __iomem *
 io_mapping_map_atomic_wc(struct io_mapping *mapping,
-                        unsigned long offset,
-                        int slot)
+                        unsigned long offset)
 {
        resource_size_t phys_addr;
        unsigned long pfn;
        BUG_ON(offset >= mapping->size);
        phys_addr = mapping->base + offset;
        pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
-       return iomap_atomic_prot_pfn(pfn, slot, mapping->prot);
+       return iomap_atomic_prot_pfn(pfn, mapping->prot);
 }
 
 static inline void
-io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr)
 {
-       iounmap_atomic(vaddr, slot);
+       iounmap_atomic(vaddr);
 }
 
 static inline void __iomem *
 /* Atomic map/unmap */
 static inline void __iomem *
 io_mapping_map_atomic_wc(struct io_mapping *mapping,
-                        unsigned long offset,
-                        int slot)
+                        unsigned long offset)
 {
        return ((char __force __iomem *) mapping) + offset;
 }
 
 static inline void
-io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
+io_mapping_unmap_atomic(void __iomem *vaddr)
 {
 }
 
 
 unsigned long totalhigh_pages __read_mostly;
 EXPORT_SYMBOL(totalhigh_pages);
 
+
+DEFINE_PER_CPU(int, __kmap_atomic_idx);
+EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
+
 unsigned int nr_free_highpages (void)
 {
        pg_data_t *pgdat;
 }
 
 #endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */
-
-#ifdef CONFIG_DEBUG_HIGHMEM
-
-void debug_kmap_atomic(enum km_type type)
-{
-       static int warn_count = 10;
-
-       if (unlikely(warn_count < 0))
-               return;
-
-       if (unlikely(in_interrupt())) {
-               if (in_nmi()) {
-                       if (type != KM_NMI && type != KM_NMI_PTE) {
-                               WARN_ON(1);
-                               warn_count--;
-                       }
-               } else if (in_irq()) {
-                       if (type != KM_IRQ0 && type != KM_IRQ1 &&
-                           type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
-                           type != KM_BOUNCE_READ && type != KM_IRQ_PTE) {
-                               WARN_ON(1);
-                               warn_count--;
-                       }
-               } else if (!irqs_disabled()) {  /* softirq */
-                       if (type != KM_IRQ0 && type != KM_IRQ1 &&
-                           type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
-                           type != KM_SKB_SUNRPC_DATA &&
-                           type != KM_SKB_DATA_SOFTIRQ &&
-                           type != KM_BOUNCE_READ) {
-                               WARN_ON(1);
-                               warn_count--;
-                       }
-               }
-       }
-
-       if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
-                       type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ ||
-                       type == KM_IRQ_PTE || type == KM_NMI ||
-                       type == KM_NMI_PTE ) {
-               if (!irqs_disabled()) {
-                       WARN_ON(1);
-                       warn_count--;
-               }
-       } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
-               if (irq_count() == 0 && !irqs_disabled()) {
-                       WARN_ON(1);
-                       warn_count--;
-               }
-       }
-#ifdef CONFIG_KGDB_KDB
-       if (unlikely(type == KM_KDB && atomic_read(&kgdb_active) == -1)) {
-               WARN_ON(1);
-               warn_count--;
-       }
-#endif /* CONFIG_KGDB_KDB */
-}
-
-#endif