return rxe_type_info[pool->type].name;
 }
 
-static inline struct kmem_cache *pool_cache(struct rxe_pool *pool)
-{
-       return rxe_type_info[pool->type].cache;
-}
-
-static void rxe_cache_clean(size_t cnt)
-{
-       int i;
-       struct rxe_type_info *type;
-
-       for (i = 0; i < cnt; i++) {
-               type = &rxe_type_info[i];
-               if (!(type->flags & RXE_POOL_NO_ALLOC)) {
-                       kmem_cache_destroy(type->cache);
-                       type->cache = NULL;
-               }
-       }
-}
-
-int rxe_cache_init(void)
-{
-       int err;
-       int i;
-       size_t size;
-       struct rxe_type_info *type;
-
-       for (i = 0; i < RXE_NUM_TYPES; i++) {
-               type = &rxe_type_info[i];
-               size = ALIGN(type->size, RXE_POOL_ALIGN);
-               if (!(type->flags & RXE_POOL_NO_ALLOC)) {
-                       type->cache =
-                               kmem_cache_create(type->name, size,
-                                                 RXE_POOL_ALIGN,
-                                                 RXE_POOL_CACHE_FLAGS, NULL);
-                       if (!type->cache) {
-                               pr_err("Unable to init kmem cache for %s\n",
-                                      type->name);
-                               err = -ENOMEM;
-                               goto err1;
-                       }
-               }
-       }
-
-       return 0;
-
-err1:
-       rxe_cache_clean(i);
-
-       return err;
-}
-
-void rxe_cache_exit(void)
-{
-       rxe_cache_clean(RXE_NUM_TYPES);
-}
-
 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
 {
        int err = 0;
        if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
                goto out_cnt;
 
-       elem = kmem_cache_zalloc(pool_cache(pool),
+       elem = kzalloc(rxe_type_info[pool->type].size,
                                 (pool->flags & RXE_POOL_ATOMIC) ?
                                 GFP_ATOMIC : GFP_KERNEL);
        if (!elem)
                pool->cleanup(elem);
 
        if (!(pool->flags & RXE_POOL_NO_ALLOC))
-               kmem_cache_free(pool_cache(pool), elem);
+               kfree(elem);
        atomic_dec(&pool->num_elem);
        ib_device_put(&pool->rxe->ib_dev);
        rxe_pool_put(pool);
 
        u32                     min_index;
        size_t                  key_offset;
        size_t                  key_size;
-       struct kmem_cache       *cache;
 };
 
 extern struct rxe_type_info rxe_type_info[];
        size_t                  key_size;
 };
 
-/* initialize slab caches for managed objects */
-int rxe_cache_init(void);
-
-/* cleanup slab caches for managed objects */
-void rxe_cache_exit(void);
-
 /* initialize a pool of objects with given limit on
  * number of elements. gets parameters from rxe_type_info
  * pool elements will be allocated out of a slab cache