if (r < 0)
                goto out_vmcs02;
 
-       vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
+       vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
        if (!vmx->nested.cached_vmcs12)
                goto out_cached_vmcs12;
 
-       vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
+       vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
        if (!vmx->nested.cached_shadow_vmcs12)
                goto out_cached_shadow_vmcs12;
 
                enable_shadow_vmcs = 0;
        if (enable_shadow_vmcs) {
                for (i = 0; i < VMX_BITMAP_NR; i++) {
+                       /*
+                        * The vmx_bitmap is not tied to a VM and so should
+                        * not be charged to a memcg.
+                        */
                        vmx_bitmap[i] = (unsigned long *)
                                __get_free_page(GFP_KERNEL);
                        if (!vmx_bitmap[i]) {
 
 
        if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
            !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+               /*
+                * This allocation for vmx_l1d_flush_pages is not tied to a VM
+                * lifetime and so should not be charged to a memcg.
+                */
                page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
                if (!page)
                        return -ENOMEM;
        return 0;
 }
 
-struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu)
+struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
 {
        int node = cpu_to_node(cpu);
        struct page *pages;
        struct vmcs *vmcs;
 
-       pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
+       pages = __alloc_pages_node(node, flags, vmcs_config.order);
        if (!pages)
                return NULL;
        vmcs = page_address(pages);
        loaded_vmcs_init(loaded_vmcs);
 
        if (cpu_has_vmx_msr_bitmap()) {
-               loaded_vmcs->msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
+               loaded_vmcs->msr_bitmap = (unsigned long *)
+                               __get_free_page(GFP_KERNEL_ACCOUNT);
                if (!loaded_vmcs->msr_bitmap)
                        goto out_vmcs;
                memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
        for_each_possible_cpu(cpu) {
                struct vmcs *vmcs;
 
-               vmcs = alloc_vmcs_cpu(false, cpu);
+               vmcs = alloc_vmcs_cpu(false, cpu, GFP_KERNEL);
                if (!vmcs) {
                        free_kvm_area();
                        return -ENOMEM;
 
 static struct kvm *vmx_vm_alloc(void)
 {
-       struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx));
+       struct kvm_vmx *kvm_vmx = __vmalloc(sizeof(struct kvm_vmx),
+                                           GFP_KERNEL_ACCOUNT | __GFP_ZERO,
+                                           PAGE_KERNEL);
        return &kvm_vmx->kvm;
 }
 
 static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
 {
        int err;
-       struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+       struct vcpu_vmx *vmx;
        unsigned long *msr_bitmap;
        int cpu;
 
+       vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
        if (!vmx)
                return ERR_PTR(-ENOMEM);
 
-       vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, GFP_KERNEL);
+       vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
+                       GFP_KERNEL_ACCOUNT);
        if (!vmx->vcpu.arch.guest_fpu) {
                printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
                err = -ENOMEM;
         * for the guest, etc.
         */
        if (enable_pml) {
-               vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
                if (!vmx->pml_pg)
                        goto uninit_vcpu;
        }
 
-       vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
        BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
                     > PAGE_SIZE);
 
 
        return &(to_vmx(vcpu)->pi_desc);
 }
 
-struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu);
+struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
 void free_vmcs(struct vmcs *vmcs);
 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
 
 static inline struct vmcs *alloc_vmcs(bool shadow)
 {
-       return alloc_vmcs_cpu(shadow, raw_smp_processor_id());
+       return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
+                             GFP_KERNEL_ACCOUNT);
 }
 
 u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);