]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
mm: keep kabi compatibility of may_expand_vm() etc
authorTong Chen <tong.c.chen@oracle.com>
Wed, 14 Aug 2019 06:18:17 +0000 (14:18 +0800)
committerBrian Maly <brian.maly@oracle.com>
Tue, 10 Sep 2019 14:53:06 +0000 (10:53 -0400)
One of the previous patches:

  mm: rework virtual memory accounting

has modifications on the prototype of functions like may_expand_vm() and the
shared_vm field of mm struct, which actually break the compatibility of kabi.

In order to keep kabi compatibility intact, this patch changed the related
function prototype back and renamed the data_vm back to shared_vm.

Orabug: 30145754
Signed-off-by: Tong Chen <tong.c.chen@oracle.com>
Reviewed-by: Junxiao Bi <junxiao.bi@oracle.com>
Signed-off-by: Brian Maly <brian.maly@oracle.com>
arch/ia64/kernel/perfmon.c
fs/proc/task_mmu.c
include/linux/mm.h
include/linux/mm_types.h
kernel/fork.c
mm/debug.c
mm/mmap.c
mm/mprotect.c
mm/mremap.c

index 9cd607b06964522859028d65fd920aacb1e61b2c..d2bcb315f3227b767bd4bd9b8ae7674f223b5d67 100644 (file)
@@ -2332,7 +2332,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
         */
        insert_vm_struct(mm, vma);
 
-       vm_stat_account(vma->vm_mm, vma->vm_flags, vma_pages(vma));
+       vm_stat_account(vma->vm_mm, vma->vm_flags, NULL, vma_pages(vma));
        up_write(&task->mm->mmap_sem);
 
        /*
index 7c317bb6f11b024a0ae3b729b88db6f5b6be9bea..e8174b223712a621aa761c813ea7e87589913784 100644 (file)
@@ -63,7 +63,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
                mm->pinned_vm << (PAGE_SHIFT-10),
                hiwater_rss << (PAGE_SHIFT-10),
                total_rss << (PAGE_SHIFT-10),
-               mm->data_vm << (PAGE_SHIFT-10),
+               mm->shared_vm << (PAGE_SHIFT-10),
                mm->stack_vm << (PAGE_SHIFT-10), text, lib,
                ptes >> 10,
                pmds >> 10,
@@ -82,7 +82,7 @@ unsigned long task_statm(struct mm_struct *mm,
        *shared = get_mm_counter(mm, MM_FILEPAGES);
        *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
                                                                >> PAGE_SHIFT;
-       *data = mm->data_vm + mm->stack_vm;
+       *data = mm->shared_vm + mm->stack_vm;
        *resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
        return mm->total_vm;
 }
index 700fca8e9d241dc62000c5434c9eb56b6d196375..d5bea2b4ef8d92fd6dedad0c3aa0979cadac2070 100644 (file)
@@ -1898,8 +1898,18 @@ extern void mm_drop_all_locks(struct mm_struct *mm);
 extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
 extern struct file *get_mm_exe_file(struct mm_struct *mm);
 
-extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
-extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
+/*
+ * For keep kabi compatibility of may_expand_vm()
+ */
+#define KABI_PATTERN_VAL       0xFAF1F2FB
+struct mm_bind_flags {
+       unsigned long pat_val;
+       struct mm_struct *mm;
+       vm_flags_t vm_flags;
+};
+
+extern int may_expand_vm(struct mm_struct *, unsigned long npages);
+extern void vm_stat_account(struct mm_struct *, vm_flags_t, struct file *file, long npages);
 
 extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
                                   unsigned long addr, unsigned long len,
index 2a9c77be880b2b630891b68fedfe56f4e382fa5e..71582d94352922c9e62020db7afb05d5d0595f4a 100644 (file)
@@ -413,7 +413,7 @@ struct mm_struct {
        unsigned long total_vm;         /* Total pages mapped */
        unsigned long locked_vm;        /* Pages that have PG_mlocked set */
        unsigned long pinned_vm;        /* Refcount permanently increased */
-       unsigned long data_vm;          /* VM_WRITE & ~VM_SHARED/GROWSDOWN */
+       unsigned long shared_vm;        /* VM_WRITE & ~VM_SHARED/GROWSDOWN */
        unsigned long exec_vm;          /* VM_EXEC & ~VM_WRITE */
        unsigned long stack_vm;         /* VM_GROWSUP/DOWN */
        unsigned long def_flags;
index 6766b8bb930700b915f83e827950cefe05bf34a9..5dc74262c41c5cc40b41d3163b68a04a135765ff 100644 (file)
@@ -426,7 +426,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
        RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
 
        mm->total_vm = oldmm->total_vm;
-       mm->data_vm = oldmm->data_vm;
+       mm->shared_vm = oldmm->shared_vm;
        mm->exec_vm = oldmm->exec_vm;
        mm->stack_vm = oldmm->stack_vm;
 
@@ -445,7 +445,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                struct file *file;
 
                if (mpnt->vm_flags & VM_DONTCOPY) {
-                       vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
+                       vm_stat_account(mm, mpnt->vm_flags, NULL, -vma_pages(mpnt));
                        continue;
                }
                charge = 0;
index 264fca21f3c1bfe232a111945523c3ff963b47ad..335d697c7168ef2ecb99c7274ffc9b46d0a908be 100644 (file)
@@ -175,7 +175,7 @@ void dump_mm(const struct mm_struct *mm)
                "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
                "pgd %p mm_users %d mm_count %d nr_ptes %lu nr_pmds %lu map_count %d\n"
                "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
-               "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
+               "pinned_vm %lx shared_vm %lx exec_vm %lx stack_vm %lx\n"
                "start_code %lx end_code %lx start_data %lx end_data %lx\n"
                "start_brk %lx brk %lx start_stack %lx\n"
                "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
@@ -209,7 +209,7 @@ void dump_mm(const struct mm_struct *mm)
                mm_nr_pmds((struct mm_struct *)mm),
                mm->map_count,
                mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
-               mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm,
+               mm->pinned_vm, mm->shared_vm, mm->exec_vm, mm->stack_vm,
                mm->start_code, mm->end_code, mm->start_data, mm->end_data,
                mm->start_brk, mm->brk, mm->start_stack,
                mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
index 3ba2e790395bc80af5df531bb214ae82b75ef2dd..7cf883c6319b9f87efeb5180cd5ee978ffb96743 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1580,9 +1580,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
        int error;
        struct rb_node **rb_link, *rb_parent;
        unsigned long charged = 0;
+       struct mm_bind_flags mbf = {KABI_PATTERN_VAL, mm, vm_flags};
 
        /* Check against address space limit. */
-       if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
+       if (!may_expand_vm((struct mm_struct *)&mbf, len >> PAGE_SHIFT)) {
                unsigned long nr_pages;
 
                /*
@@ -1594,7 +1595,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 
                nr_pages = count_vma_pages_range(mm, addr, addr + len);
 
-               if (!may_expand_vm(mm, vm_flags,
+               if (!may_expand_vm((struct mm_struct *)&mbf,
                                        (len >> PAGE_SHIFT) - nr_pages))
                        return -ENOMEM;
        }
@@ -1695,7 +1696,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 out:
        perf_event_mmap(vma);
 
-       vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
+       vm_stat_account(mm, vm_flags, NULL, len >> PAGE_SHIFT);
        if (vm_flags & VM_LOCKED) {
                if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
                                        vma == get_gate_vma(current->mm)))
@@ -2146,9 +2147,10 @@ static int acct_stack_growth(struct vm_area_struct *vma,
        struct mm_struct *mm = vma->vm_mm;
        struct rlimit *rlim = current->signal->rlim;
        unsigned long new_start;
+       struct mm_bind_flags mbf = {KABI_PATTERN_VAL, mm, vma->vm_flags};
 
        /* address space limit tests */
-       if (!may_expand_vm(mm, vma->vm_flags, grow))
+       if (!may_expand_vm((struct mm_struct *)&mbf, grow))
                return -ENOMEM;
 
        /* Stack limit test */
@@ -2250,7 +2252,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                                spin_lock(&mm->page_table_lock);
                                if (vma->vm_flags & VM_LOCKED)
                                        mm->locked_vm += grow;
-                               vm_stat_account(mm, vma->vm_flags, grow);
+                               vm_stat_account(mm, vma->vm_flags, NULL, grow);
                                anon_vma_interval_tree_pre_update_vma(vma);
                                vma->vm_end = address;
                                anon_vma_interval_tree_post_update_vma(vma);
@@ -2334,7 +2336,7 @@ int expand_downwards(struct vm_area_struct *vma,
                                spin_lock(&mm->page_table_lock);
                                if (vma->vm_flags & VM_LOCKED)
                                        mm->locked_vm += grow;
-                               vm_stat_account(mm, vma->vm_flags, grow);
+                               vm_stat_account(mm, vma->vm_flags, NULL, grow);
                                anon_vma_interval_tree_pre_update_vma(vma);
                                vma->vm_start = address;
                                vma->vm_pgoff -= grow;
@@ -2437,7 +2439,7 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
 
                if (vma->vm_flags & VM_ACCOUNT)
                        nr_accounted += nrpages;
-               vm_stat_account(mm, vma->vm_flags, -nrpages);
+               vm_stat_account(mm, vma->vm_flags, NULL, -nrpages);
                vma = remove_vma(vma);
        } while (vma);
        vm_unacct_memory(nr_accounted);
@@ -2801,6 +2803,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
        struct rb_node **rb_link, *rb_parent;
        pgoff_t pgoff = addr >> PAGE_SHIFT;
        int error;
+       struct mm_bind_flags mbf = {KABI_PATTERN_VAL, mm, 0};
 
        len = PAGE_ALIGN(len);
        if (!len)
@@ -2832,7 +2835,8 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
        }
 
        /* Check against address space limits *after* clearing old maps... */
-       if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
+       mbf.vm_flags = flags;
+       if (!may_expand_vm((struct mm_struct *)&mbf, len >> PAGE_SHIFT))
                return -ENOMEM;
 
        if (mm->map_count > sysctl_max_map_count)
@@ -2867,7 +2871,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
 out:
        perf_event_mmap(vma);
        mm->total_vm += len >> PAGE_SHIFT;
-       mm->data_vm += len >> PAGE_SHIFT;
+       mm->shared_vm += len >> PAGE_SHIFT;
        if (flags & VM_LOCKED)
                mm->locked_vm += (len >> PAGE_SHIFT);
        vma->vm_flags |= VM_SOFTDIRTY;
@@ -3057,21 +3061,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
  * Return true if the calling process may expand its vm space by the passed
  * number of pages
  */
-bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
+int may_expand_vm(struct mm_struct *mm, unsigned long npages)
 {
+       vm_flags_t flags = 0;
+       struct mm_bind_flags *pmbf = (struct mm_bind_flags *)mm;
+
+       if (pmbf->pat_val == KABI_PATTERN_VAL) {
+               mm = pmbf->mm;
+               flags = pmbf->vm_flags;
+       }
+
+       /* If there is some third-party module that use the old interface(without
+        * flags, mm points to mm_struct instance), because flags is inited to 0,
+        * is_data_mapping() will fail, and the effect of the whole may_expand_vm()
+        * will actually be equivalent to:
+        *
+        * return mm->total_vm + npages <= rlimit(RLIMIT_AS) >> PAGE_SHIFT;
+        *
+        * This is the same case as when these patches were not introduced before.
+        */
+
        if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
                return false;
 
        if (is_data_mapping(flags) &&
-           mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
+           mm->shared_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
                /* Workaround for Valgrind */
                if (rlimit(RLIMIT_DATA) == 0 &&
-                   mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
+                   mm->shared_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
                        return true;
 
                pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
                             current->comm, current->pid,
-                            (mm->data_vm + npages) << PAGE_SHIFT,
+                            (mm->shared_vm + npages) << PAGE_SHIFT,
                             rlimit(RLIMIT_DATA),
                             ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
 
@@ -3082,7 +3104,7 @@ bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
        return true;
 }
 
-void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
+void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, struct file *file, long npages)
 {
        mm->total_vm += npages;
 
@@ -3091,7 +3113,7 @@ void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
        else if (is_stack_mapping(flags))
                mm->stack_vm += npages;
        else if (is_data_mapping(flags))
-               mm->data_vm += npages;
+               mm->shared_vm += npages;
 }
 
 static int special_mapping_fault(struct vm_area_struct *vma,
@@ -3181,7 +3203,7 @@ static struct vm_area_struct *__install_special_mapping(
        if (ret)
                goto out;
 
-       vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
+       vm_stat_account(mm, vma->vm_flags, NULL, len >> PAGE_SHIFT);
 
        perf_event_mmap(vma);
 
index 7b17c6d724ccc198e877201de07dad49488fd1d4..6320561eb172f4b7cecf05dae0bff5f5db00b05e 100644 (file)
@@ -326,6 +326,8 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
        pgoff_t pgoff;
        int error;
        int dirty_accountable = 0;
+       struct mm_bind_flags mbf_new = {KABI_PATTERN_VAL, mm, 0};
+       struct mm_bind_flags mbf_old = {KABI_PATTERN_VAL, mm, 0};
 
        if (newflags == oldflags) {
                *pprev = vma;
@@ -353,8 +355,10 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
         */
        if (newflags & VM_WRITE) {
                /* Check space limits when area turns into data. */
-               if (!may_expand_vm(mm, newflags, nrpages) &&
-                               may_expand_vm(mm, oldflags, nrpages))
+               mbf_new.vm_flags = newflags;
+               mbf_old.vm_flags = oldflags;
+               if (!may_expand_vm((struct mm_struct *)&mbf_new, nrpages) &&
+                               may_expand_vm((struct mm_struct *)&mbf_old, nrpages))
                        return -ENOMEM;
                if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
                                                VM_SHARED|VM_NORESERVE))) {
@@ -403,8 +407,8 @@ success:
        change_protection(vma, start, end, vma->vm_page_prot,
                          dirty_accountable, 0);
 
-       vm_stat_account(mm, oldflags, -nrpages);
-       vm_stat_account(mm, newflags, nrpages);
+       vm_stat_account(mm, oldflags, NULL, -nrpages);
+       vm_stat_account(mm, newflags, NULL, nrpages);
        perf_event_mmap(vma);
        return 0;
 
index 4539cfce4b8f525e054c5c661f2b00b9871db2f1..624dccdf9fe3593e9fc73e5d9140aa5c016cb4d4 100644 (file)
@@ -323,7 +323,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
         * If this were a serious issue, we'd add a flag to do_munmap().
         */
        hiwater_vm = mm->hiwater_vm;
-       vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
+       vm_stat_account(mm, vma->vm_flags, NULL, new_len >> PAGE_SHIFT);
 
        if (do_munmap(mm, old_addr, old_len) < 0) {
                /* OOM: unable to split vma, just get accounts right */
@@ -352,6 +352,7 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma = find_vma(mm, addr);
+       struct mm_bind_flags mbf = {KABI_PATTERN_VAL, mm, vma->vm_flags};
 
        if (!vma || vma->vm_start > addr)
                return ERR_PTR(-EFAULT);
@@ -384,7 +385,7 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
                        return ERR_PTR(-EAGAIN);
        }
 
-       if (!may_expand_vm(mm, vma->vm_flags,
+       if (!may_expand_vm((struct mm_struct *)&mbf,
                                (new_len - old_len) >> PAGE_SHIFT))
                return ERR_PTR(-ENOMEM);
 
@@ -552,7 +553,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
                                goto out;
                        }
 
-                       vm_stat_account(mm, vma->vm_flags, pages);
+                       vm_stat_account(mm, vma->vm_flags, NULL, pages);
                        if (vma->vm_flags & VM_LOCKED) {
                                mm->locked_vm += pages;
                                locked = true;