#DEFINES += -DUSE_INTERRUPTS
 #DEFINES += -DUSE_SSSE3
 #DEFINES += -DPUNIT_CAMERA_BUSY
-DEFINES += -DUSE_KMEM_CACHE
+#DEFINES += -DUSE_KMEM_CACHE
 
 DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0
 DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400
 
 
                        page_obj[i].page = hmm_page->page;
                        page_obj[i++].type = HMM_PAGE_TYPE_DYNAMIC;
-#ifdef USE_KMEM_CACHE
                        kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
-#else
-                       atomisp_kernel_free(hmm_page);
-#endif
 
                        if (i == size)
                                return i;
                }
                return;
        }
-#ifdef USE_KMEM_CACHE
        hmm_page = kmem_cache_zalloc(dypool_info->pgptr_cache,
                                                GFP_KERNEL);
-#else
-       hmm_page = atomisp_kernel_malloc(sizeof(struct hmm_page));
-#endif
        if (!hmm_page) {
                dev_err(atomisp_dev, "out of memory for hmm_page.\n");
 
                return -ENOMEM;
        }
 
-#ifdef USE_KMEM_CACHE
        dypool_info->pgptr_cache = kmem_cache_create("pgptr_cache",
                                                sizeof(struct hmm_page), 0,
                                                SLAB_HWCACHE_ALIGN, NULL);
                atomisp_kernel_free(dypool_info);
                return -ENOMEM;
        }
-#endif
 
        INIT_LIST_HEAD(&dypool_info->pages_list);
        spin_lock_init(&dypool_info->list_lock);
                        hmm_mem_stat.dyc_size--;
                        hmm_mem_stat.sys_size--;
                }
-#ifdef USE_KMEM_CACHE
                kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
-#else
-               atomisp_kernel_free(hmm_page);
-#endif
                spin_lock_irqsave(&dypool_info->list_lock, flags);
        }
 
        spin_unlock_irqrestore(&dypool_info->list_lock, flags);
 
-#ifdef USE_KMEM_CACHE
        kmem_cache_destroy(dypool_info->pgptr_cache);
-#endif
 
        atomisp_kernel_free(dypool_info);
 
 
        /* list lock is used to protect the free pages block lists */
        spinlock_t              list_lock;
 
-#ifdef USE_KMEM_CACHE
        struct kmem_cache       *pgptr_cache;
-#endif
        bool                    initialized;
 
        unsigned int            pool_size;
 
        phys_addr_t base_address;
 
        struct mutex pt_mutex;
-#ifdef USE_KMEM_CACHE
        struct kmem_cache *tbl_cache;
-#endif
 };
 
 /* flags for PDE and PTE */
 
         * The slab allocator(kmem_cache and kmalloc family) doesn't handle
         * GFP_DMA32 flag, so we have to use buddy allocator.
         */
-#ifdef USE_KMEM_CACHE
        if (totalram_pages > (unsigned long)NR_PAGES_2GB)
                virt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
        else
                virt = kmem_cache_zalloc(mmu->tbl_cache, GFP_KERNEL);
-#else
-       virt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
-#endif
        if (!virt)
                return (phys_addr_t)NULL_PAGE;
 
        set_memory_wb((unsigned long)virt, 1);
 #endif
 
-#ifdef USE_KMEM_CACHE
        kmem_cache_free(mmu->tbl_cache, virt);
-#else
-       free_page((unsigned long)virt);
-#endif
 }
 
 static void mmu_remap_error(struct isp_mmu *mmu,
 
        mutex_init(&mmu->pt_mutex);
 
-#ifdef USE_KMEM_CACHE
        mmu->tbl_cache = kmem_cache_create("iopte_cache", ISP_PAGE_SIZE,
                                           ISP_PAGE_SIZE, SLAB_HWCACHE_ALIGN,
                                           NULL);
        if (!mmu->tbl_cache)
                return -ENOMEM;
-#endif
 
        return 0;
 }
 
        free_page_table(mmu, l1_pt);
 
-#ifdef USE_KMEM_CACHE
        kmem_cache_destroy(mmu->tbl_cache);
-#endif
 }