local_irq_enable();
 }
 
+static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
+                                              gfp_t gfp_flags)
+{
+       if (mc->kmem_cache)
+               return kmem_cache_zalloc(mc->kmem_cache, gfp_flags);
+       else
+               return (void *)__get_free_page(gfp_flags);
+}
+
 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
 {
        void *obj;
        if (mc->nobjs >= min)
                return 0;
        while (mc->nobjs < ARRAY_SIZE(mc->objects)) {
-               if (mc->kmem_cache)
-                       obj = kmem_cache_zalloc(mc->kmem_cache, GFP_KERNEL_ACCOUNT);
-               else
-                       obj = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
+               obj = mmu_memory_cache_alloc_obj(mc, GFP_KERNEL_ACCOUNT);
                if (!obj)
                        return mc->nobjs >= min ? 0 : -ENOMEM;
                mc->objects[mc->nobjs++] = obj;
 {
        void *p;
 
-       BUG_ON(!mc->nobjs);
-       p = mc->objects[--mc->nobjs];
+       if (WARN_ON(!mc->nobjs))
+               p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
+       else
+               p = mc->objects[--mc->nobjs];
+       BUG_ON(!p);
        return p;
 }