*/
insert_vm_struct(mm, vma);
- vm_stat_account(vma->vm_mm, vma->vm_flags, vma_pages(vma));
+ vm_stat_account(vma->vm_mm, vma->vm_flags, NULL, vma_pages(vma));
up_write(&task->mm->mmap_sem);
/*
mm->pinned_vm << (PAGE_SHIFT-10),
hiwater_rss << (PAGE_SHIFT-10),
total_rss << (PAGE_SHIFT-10),
- mm->data_vm << (PAGE_SHIFT-10),
+ mm->shared_vm << (PAGE_SHIFT-10),
mm->stack_vm << (PAGE_SHIFT-10), text, lib,
ptes >> 10,
pmds >> 10,
*shared = get_mm_counter(mm, MM_FILEPAGES);
*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
>> PAGE_SHIFT;
- *data = mm->data_vm + mm->stack_vm;
+ *data = mm->shared_vm + mm->stack_vm;
*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
return mm->total_vm;
}
extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
extern struct file *get_mm_exe_file(struct mm_struct *mm);
-extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
-extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
+/*
+ * For keep kabi compatibility of may_expand_vm()
+ */
+#define KABI_PATTERN_VAL 0xFAF1F2FB
+struct mm_bind_flags {
+ unsigned long pat_val;
+ struct mm_struct *mm;
+ vm_flags_t vm_flags;
+};
+
+extern int may_expand_vm(struct mm_struct *, unsigned long npages);
+extern void vm_stat_account(struct mm_struct *, vm_flags_t, struct file *file, long npages);
extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long total_vm; /* Total pages mapped */
unsigned long locked_vm; /* Pages that have PG_mlocked set */
unsigned long pinned_vm; /* Refcount permanently increased */
- unsigned long data_vm; /* VM_WRITE & ~VM_SHARED/GROWSDOWN */
+ unsigned long shared_vm; /* VM_WRITE & ~VM_SHARED/GROWSDOWN */
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */
unsigned long stack_vm; /* VM_GROWSUP/DOWN */
unsigned long def_flags;
RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
mm->total_vm = oldmm->total_vm;
- mm->data_vm = oldmm->data_vm;
+ mm->shared_vm = oldmm->shared_vm;
mm->exec_vm = oldmm->exec_vm;
mm->stack_vm = oldmm->stack_vm;
struct file *file;
if (mpnt->vm_flags & VM_DONTCOPY) {
- vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
+ vm_stat_account(mm, mpnt->vm_flags, NULL, -vma_pages(mpnt));
continue;
}
charge = 0;
"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
"pgd %p mm_users %d mm_count %d nr_ptes %lu nr_pmds %lu map_count %d\n"
"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
- "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
+ "pinned_vm %lx shared_vm %lx exec_vm %lx stack_vm %lx\n"
"start_code %lx end_code %lx start_data %lx end_data %lx\n"
"start_brk %lx brk %lx start_stack %lx\n"
"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
mm_nr_pmds((struct mm_struct *)mm),
mm->map_count,
mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
- mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm,
+ mm->pinned_vm, mm->shared_vm, mm->exec_vm, mm->stack_vm,
mm->start_code, mm->end_code, mm->start_data, mm->end_data,
mm->start_brk, mm->brk, mm->start_stack,
mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
int error;
struct rb_node **rb_link, *rb_parent;
unsigned long charged = 0;
+ struct mm_bind_flags mbf = {KABI_PATTERN_VAL, mm, vm_flags};
/* Check against address space limit. */
- if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
+ if (!may_expand_vm((struct mm_struct *)&mbf, len >> PAGE_SHIFT)) {
unsigned long nr_pages;
/*
nr_pages = count_vma_pages_range(mm, addr, addr + len);
- if (!may_expand_vm(mm, vm_flags,
+ if (!may_expand_vm((struct mm_struct *)&mbf,
(len >> PAGE_SHIFT) - nr_pages))
return -ENOMEM;
}
out:
perf_event_mmap(vma);
- vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
+ vm_stat_account(mm, vm_flags, NULL, len >> PAGE_SHIFT);
if (vm_flags & VM_LOCKED) {
if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
vma == get_gate_vma(current->mm)))
struct mm_struct *mm = vma->vm_mm;
struct rlimit *rlim = current->signal->rlim;
unsigned long new_start;
+ struct mm_bind_flags mbf = {KABI_PATTERN_VAL, mm, vma->vm_flags};
/* address space limit tests */
- if (!may_expand_vm(mm, vma->vm_flags, grow))
+ if (!may_expand_vm((struct mm_struct *)&mbf, grow))
return -ENOMEM;
/* Stack limit test */
spin_lock(&mm->page_table_lock);
if (vma->vm_flags & VM_LOCKED)
mm->locked_vm += grow;
- vm_stat_account(mm, vma->vm_flags, grow);
+ vm_stat_account(mm, vma->vm_flags, NULL, grow);
anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_end = address;
anon_vma_interval_tree_post_update_vma(vma);
spin_lock(&mm->page_table_lock);
if (vma->vm_flags & VM_LOCKED)
mm->locked_vm += grow;
- vm_stat_account(mm, vma->vm_flags, grow);
+ vm_stat_account(mm, vma->vm_flags, NULL, grow);
anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_start = address;
vma->vm_pgoff -= grow;
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += nrpages;
- vm_stat_account(mm, vma->vm_flags, -nrpages);
+ vm_stat_account(mm, vma->vm_flags, NULL, -nrpages);
vma = remove_vma(vma);
} while (vma);
vm_unacct_memory(nr_accounted);
struct rb_node **rb_link, *rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
+ struct mm_bind_flags mbf = {KABI_PATTERN_VAL, mm, 0};
len = PAGE_ALIGN(len);
if (!len)
}
/* Check against address space limits *after* clearing old maps... */
- if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
+ mbf.vm_flags = flags;
+ if (!may_expand_vm((struct mm_struct *)&mbf, len >> PAGE_SHIFT))
return -ENOMEM;
if (mm->map_count > sysctl_max_map_count)
out:
perf_event_mmap(vma);
mm->total_vm += len >> PAGE_SHIFT;
- mm->data_vm += len >> PAGE_SHIFT;
+ mm->shared_vm += len >> PAGE_SHIFT;
if (flags & VM_LOCKED)
mm->locked_vm += (len >> PAGE_SHIFT);
vma->vm_flags |= VM_SOFTDIRTY;
* Return true if the calling process may expand its vm space by the passed
* number of pages
*/
-bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
+int may_expand_vm(struct mm_struct *mm, unsigned long npages)
{
+ vm_flags_t flags = 0;
+ struct mm_bind_flags *pmbf = (struct mm_bind_flags *)mm;
+
+ if (pmbf->pat_val == KABI_PATTERN_VAL) {
+ mm = pmbf->mm;
+ flags = pmbf->vm_flags;
+ }
+
+ /* If there is some third-party module that use the old interface(without
+ * flags, mm points to mm_struct instance), because flags is inited to 0,
+ * is_data_mapping() will fail, and the effect of the whole may_expand_vm()
+ * will actually be equivalent to:
+ *
+ * return mm->total_vm + npages <= rlimit(RLIMIT_AS) >> PAGE_SHIFT;
+ *
+ * This is the same case as when these patches were not introduced before.
+ */
+
if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
return false;
if (is_data_mapping(flags) &&
- mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
+ mm->shared_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
/* Workaround for Valgrind */
if (rlimit(RLIMIT_DATA) == 0 &&
- mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
+ mm->shared_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
return true;
pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
current->comm, current->pid,
- (mm->data_vm + npages) << PAGE_SHIFT,
+ (mm->shared_vm + npages) << PAGE_SHIFT,
rlimit(RLIMIT_DATA),
ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
return true;
}
-void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
+void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, struct file *file, long npages)
{
mm->total_vm += npages;
else if (is_stack_mapping(flags))
mm->stack_vm += npages;
else if (is_data_mapping(flags))
- mm->data_vm += npages;
+ mm->shared_vm += npages;
}
static int special_mapping_fault(struct vm_area_struct *vma,
if (ret)
goto out;
- vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
+ vm_stat_account(mm, vma->vm_flags, NULL, len >> PAGE_SHIFT);
perf_event_mmap(vma);
pgoff_t pgoff;
int error;
int dirty_accountable = 0;
+ struct mm_bind_flags mbf_new = {KABI_PATTERN_VAL, mm, 0};
+ struct mm_bind_flags mbf_old = {KABI_PATTERN_VAL, mm, 0};
if (newflags == oldflags) {
*pprev = vma;
*/
if (newflags & VM_WRITE) {
/* Check space limits when area turns into data. */
- if (!may_expand_vm(mm, newflags, nrpages) &&
- may_expand_vm(mm, oldflags, nrpages))
+ mbf_new.vm_flags = newflags;
+ mbf_old.vm_flags = oldflags;
+ if (!may_expand_vm((struct mm_struct *)&mbf_new, nrpages) &&
+ may_expand_vm((struct mm_struct *)&mbf_old, nrpages))
return -ENOMEM;
if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
VM_SHARED|VM_NORESERVE))) {
change_protection(vma, start, end, vma->vm_page_prot,
dirty_accountable, 0);
- vm_stat_account(mm, oldflags, -nrpages);
- vm_stat_account(mm, newflags, nrpages);
+ vm_stat_account(mm, oldflags, NULL, -nrpages);
+ vm_stat_account(mm, newflags, NULL, nrpages);
perf_event_mmap(vma);
return 0;
* If this were a serious issue, we'd add a flag to do_munmap().
*/
hiwater_vm = mm->hiwater_vm;
- vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
+ vm_stat_account(mm, vma->vm_flags, NULL, new_len >> PAGE_SHIFT);
if (do_munmap(mm, old_addr, old_len) < 0) {
/* OOM: unable to split vma, just get accounts right */
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = find_vma(mm, addr);
+ struct mm_bind_flags mbf = {KABI_PATTERN_VAL, mm, vma->vm_flags};
if (!vma || vma->vm_start > addr)
return ERR_PTR(-EFAULT);
return ERR_PTR(-EAGAIN);
}
- if (!may_expand_vm(mm, vma->vm_flags,
+ if (!may_expand_vm((struct mm_struct *)&mbf,
(new_len - old_len) >> PAGE_SHIFT))
return ERR_PTR(-ENOMEM);
goto out;
}
- vm_stat_account(mm, vma->vm_flags, pages);
+ vm_stat_account(mm, vma->vm_flags, NULL, pages);
if (vma->vm_flags & VM_LOCKED) {
mm->locked_vm += pages;
locked = true;