]> www.infradead.org Git - users/hch/misc.git/commitdiff
kvm: vmx: Add memcg accounting to KVM allocations
authorBen Gardon <bgardon@google.com>
Mon, 11 Feb 2019 19:02:52 +0000 (11:02 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 20 Feb 2019 21:48:31 +0000 (22:48 +0100)
There are many KVM kernel memory allocations which are tied to the life of
the VM process and should be charged to the VM process's cgroup. If the
allocations aren't tied to the process, the OOM killer will not know
that killing the process will free the associated kernel memory.
Add __GFP_ACCOUNT flags to many of the allocations which are not yet being
charged to the VM process's cgroup.

Tested:
Ran all kvm-unit-tests on a 64 bit Haswell machine, the patch
introduced no new failures.
Ran a kernel memory accounting test which creates a VM to touch
memory and then checks that the kernel memory allocated for the
process is within certain bounds.
With this patch we account for much more of the vmalloc and slab memory
allocated for the VM.

Signed-off-by: Ben Gardon <bgardon@google.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h

index 11431bb4c7edc2ab620c92dfd36ca5aaac53765c..a4b1b5a50489e2b5597c628c243fdd7d77a26a25 100644 (file)
@@ -4166,11 +4166,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
        if (r < 0)
                goto out_vmcs02;
 
-       vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
+       vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
        if (!vmx->nested.cached_vmcs12)
                goto out_cached_vmcs12;
 
-       vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL);
+       vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
        if (!vmx->nested.cached_shadow_vmcs12)
                goto out_cached_shadow_vmcs12;
 
@@ -5715,6 +5715,10 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
                enable_shadow_vmcs = 0;
        if (enable_shadow_vmcs) {
                for (i = 0; i < VMX_BITMAP_NR; i++) {
+                       /*
+                        * The vmx_bitmap is not tied to a VM and so should
+                        * not be charged to a memcg.
+                        */
                        vmx_bitmap[i] = (unsigned long *)
                                __get_free_page(GFP_KERNEL);
                        if (!vmx_bitmap[i]) {
index de6a9e7c647198e6b9f640498f82eea2042fb545..4950bb20e06ab09cc77a0e4688f299b5c692a972 100644 (file)
@@ -246,6 +246,10 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
 
        if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
            !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+               /*
+                * This allocation for vmx_l1d_flush_pages is not tied to a VM
+                * lifetime and so should not be charged to a memcg.
+                */
                page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
                if (!page)
                        return -ENOMEM;
@@ -2386,13 +2390,13 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
        return 0;
 }
 
-struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu)
+struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
 {
        int node = cpu_to_node(cpu);
        struct page *pages;
        struct vmcs *vmcs;
 
-       pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
+       pages = __alloc_pages_node(node, flags, vmcs_config.order);
        if (!pages)
                return NULL;
        vmcs = page_address(pages);
@@ -2439,7 +2443,8 @@ int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
        loaded_vmcs_init(loaded_vmcs);
 
        if (cpu_has_vmx_msr_bitmap()) {
-               loaded_vmcs->msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
+               loaded_vmcs->msr_bitmap = (unsigned long *)
+                               __get_free_page(GFP_KERNEL_ACCOUNT);
                if (!loaded_vmcs->msr_bitmap)
                        goto out_vmcs;
                memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
@@ -2480,7 +2485,7 @@ static __init int alloc_kvm_area(void)
        for_each_possible_cpu(cpu) {
                struct vmcs *vmcs;
 
-               vmcs = alloc_vmcs_cpu(false, cpu);
+               vmcs = alloc_vmcs_cpu(false, cpu, GFP_KERNEL);
                if (!vmcs) {
                        free_kvm_area();
                        return -ENOMEM;
@@ -6530,7 +6535,9 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
 
 static struct kvm *vmx_vm_alloc(void)
 {
-       struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx));
+       struct kvm_vmx *kvm_vmx = __vmalloc(sizeof(struct kvm_vmx),
+                                           GFP_KERNEL_ACCOUNT | __GFP_ZERO,
+                                           PAGE_KERNEL);
        return &kvm_vmx->kvm;
 }
 
@@ -6557,14 +6564,16 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
 static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
 {
        int err;
-       struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+       struct vcpu_vmx *vmx;
        unsigned long *msr_bitmap;
        int cpu;
 
+       vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
        if (!vmx)
                return ERR_PTR(-ENOMEM);
 
-       vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, GFP_KERNEL);
+       vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
+                       GFP_KERNEL_ACCOUNT);
        if (!vmx->vcpu.arch.guest_fpu) {
                printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
                err = -ENOMEM;
@@ -6586,12 +6595,12 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
         * for the guest, etc.
         */
        if (enable_pml) {
-               vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
                if (!vmx->pml_pg)
                        goto uninit_vcpu;
        }
 
-       vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
        BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
                     > PAGE_SIZE);
 
index d7d1048d221b76b80fce48d8a7644a8482a8bc1b..1554cb45b3931a6de9d91b0092de1ff85d4c993f 100644 (file)
@@ -479,7 +479,7 @@ static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
        return &(to_vmx(vcpu)->pi_desc);
 }
 
-struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu);
+struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
 void free_vmcs(struct vmcs *vmcs);
 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
@@ -488,7 +488,8 @@ void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
 
 static inline struct vmcs *alloc_vmcs(bool shadow)
 {
-       return alloc_vmcs_cpu(shadow, raw_smp_processor_id());
+       return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
+                             GFP_KERNEL_ACCOUNT);
 }
 
 u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);