*/
        insert_vm_struct(mm, vma);
 
-       vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
-                                                       vma_pages(vma));
+       vm_stat_account(vma->vm_mm, vma->vm_flags, vma_pages(vma));
        up_write(&task->mm->mmap_sem);
 
        /*
 
 
 void task_mem(struct seq_file *m, struct mm_struct *mm)
 {
-       unsigned long data, text, lib, swap, ptes, pmds, anon, file, shmem;
+       unsigned long text, lib, swap, ptes, pmds, anon, file, shmem;
        unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
 
        anon = get_mm_counter(mm, MM_ANONPAGES);
        if (hiwater_rss < mm->hiwater_rss)
                hiwater_rss = mm->hiwater_rss;
 
-       data = mm->total_vm - mm->shared_vm - mm->stack_vm;
        text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
        lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
        swap = get_mm_counter(mm, MM_SWAPENTS);
                anon << (PAGE_SHIFT-10),
                file << (PAGE_SHIFT-10),
                shmem << (PAGE_SHIFT-10),
-               data << (PAGE_SHIFT-10),
+               mm->data_vm << (PAGE_SHIFT-10),
                mm->stack_vm << (PAGE_SHIFT-10), text, lib,
                ptes >> 10,
                pmds >> 10,
                        get_mm_counter(mm, MM_SHMEMPAGES);
        *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
                                                                >> PAGE_SHIFT;
-       *data = mm->total_vm - mm->shared_vm;
+       *data = mm->data_vm + mm->stack_vm;
        *resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
        return mm->total_vm;
 }
 
 extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
 extern struct file *get_mm_exe_file(struct mm_struct *mm);
 
-extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
+extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
+extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
+
 extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
                                   unsigned long addr, unsigned long len,
                                   unsigned long flags,
 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
                               unsigned long size, pte_fn_t fn, void *data);
 
-#ifdef CONFIG_PROC_FS
-void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
-#else
-static inline void vm_stat_account(struct mm_struct *mm,
-                       unsigned long flags, struct file *file, long pages)
-{
-       mm->total_vm += pages;
-}
-#endif /* CONFIG_PROC_FS */
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
 extern bool _debug_pagealloc_enabled;
 
        unsigned long total_vm;         /* Total pages mapped */
        unsigned long locked_vm;        /* Pages that have PG_mlocked set */
        unsigned long pinned_vm;        /* Refcount permanently increased */
-       unsigned long shared_vm;        /* Shared pages (files) */
+       unsigned long data_vm;          /* VM_WRITE & ~VM_SHARED/GROWSDOWN */
        unsigned long exec_vm;          /* VM_EXEC & ~VM_WRITE */
        unsigned long stack_vm;         /* VM_GROWSUP/DOWN */
        unsigned long def_flags;
 
        RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
 
        mm->total_vm = oldmm->total_vm;
-       mm->shared_vm = oldmm->shared_vm;
+       mm->data_vm = oldmm->data_vm;
        mm->exec_vm = oldmm->exec_vm;
        mm->stack_vm = oldmm->stack_vm;
 
                struct file *file;
 
                if (mpnt->vm_flags & VM_DONTCOPY) {
-                       vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
-                                                       -vma_pages(mpnt));
+                       vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
                        continue;
                }
                charge = 0;
 
                "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
                "pgd %p mm_users %d mm_count %d nr_ptes %lu nr_pmds %lu map_count %d\n"
                "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
-               "pinned_vm %lx shared_vm %lx exec_vm %lx stack_vm %lx\n"
+               "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
                "start_code %lx end_code %lx start_data %lx end_data %lx\n"
                "start_brk %lx brk %lx start_stack %lx\n"
                "arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
                mm_nr_pmds((struct mm_struct *)mm),
                mm->map_count,
                mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
-               mm->pinned_vm, mm->shared_vm, mm->exec_vm, mm->stack_vm,
+               mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm,
                mm->start_code, mm->end_code, mm->start_data, mm->end_data,
                mm->start_brk, mm->brk, mm->start_stack,
                mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
 
        return NULL;
 }
 
-#ifdef CONFIG_PROC_FS
-void vm_stat_account(struct mm_struct *mm, unsigned long flags,
-                                               struct file *file, long pages)
-{
-       const unsigned long stack_flags
-               = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
-
-       mm->total_vm += pages;
-
-       if (file) {
-               mm->shared_vm += pages;
-               if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
-                       mm->exec_vm += pages;
-       } else if (flags & stack_flags)
-               mm->stack_vm += pages;
-}
-#endif /* CONFIG_PROC_FS */
-
 /*
  * If a hint addr is less than mmap_min_addr change hint to be as
  * low as possible but still greater than mmap_min_addr
        unsigned long charged = 0;
 
        /* Check against address space limit. */
-       if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
+       if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
                unsigned long nr_pages;
 
                /*
                 */
                nr_pages = count_vma_pages_range(mm, addr, addr + len);
 
-               if (!may_expand_vm(mm, (len >> PAGE_SHIFT) - nr_pages))
+               if (!may_expand_vm(mm, vm_flags,
+                                       (len >> PAGE_SHIFT) - nr_pages))
                        return -ENOMEM;
        }
 
 out:
        perf_event_mmap(vma);
 
-       vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
+       vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
        if (vm_flags & VM_LOCKED) {
                if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
                                        vma == get_gate_vma(current->mm)))
        unsigned long new_start, actual_size;
 
        /* address space limit tests */
-       if (!may_expand_vm(mm, grow))
+       if (!may_expand_vm(mm, vma->vm_flags, grow))
                return -ENOMEM;
 
        /* Stack limit test */
                                spin_lock(&mm->page_table_lock);
                                if (vma->vm_flags & VM_LOCKED)
                                        mm->locked_vm += grow;
-                               vm_stat_account(mm, vma->vm_flags,
-                                               vma->vm_file, grow);
+                               vm_stat_account(mm, vma->vm_flags, grow);
                                anon_vma_interval_tree_pre_update_vma(vma);
                                vma->vm_end = address;
                                anon_vma_interval_tree_post_update_vma(vma);
                                spin_lock(&mm->page_table_lock);
                                if (vma->vm_flags & VM_LOCKED)
                                        mm->locked_vm += grow;
-                               vm_stat_account(mm, vma->vm_flags,
-                                               vma->vm_file, grow);
+                               vm_stat_account(mm, vma->vm_flags, grow);
                                anon_vma_interval_tree_pre_update_vma(vma);
                                vma->vm_start = address;
                                vma->vm_pgoff -= grow;
 
                if (vma->vm_flags & VM_ACCOUNT)
                        nr_accounted += nrpages;
-               vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
+               vm_stat_account(mm, vma->vm_flags, -nrpages);
                vma = remove_vma(vma);
        } while (vma);
        vm_unacct_memory(nr_accounted);
        }
 
        /* Check against address space limits *after* clearing old maps... */
-       if (!may_expand_vm(mm, len >> PAGE_SHIFT))
+       if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
                return -ENOMEM;
 
        if (mm->map_count > sysctl_max_map_count)
 out:
        perf_event_mmap(vma);
        mm->total_vm += len >> PAGE_SHIFT;
+       mm->data_vm += len >> PAGE_SHIFT;
        if (flags & VM_LOCKED)
                mm->locked_vm += (len >> PAGE_SHIFT);
        vma->vm_flags |= VM_SOFTDIRTY;
  * Return true if the calling process may expand its vm space by the passed
  * number of pages
  */
-int may_expand_vm(struct mm_struct *mm, unsigned long npages)
+bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
 {
-       return mm->total_vm + npages <= rlimit(RLIMIT_AS) >> PAGE_SHIFT;
+       if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
+               return false;
+
+       if ((flags & (VM_WRITE | VM_SHARED | (VM_STACK_FLAGS &
+                               (VM_GROWSUP | VM_GROWSDOWN)))) == VM_WRITE)
+               return mm->data_vm + npages <= rlimit(RLIMIT_DATA);
+
+       return true;
+}
+
+void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
+{
+       mm->total_vm += npages;
+
+       if ((flags & (VM_EXEC | VM_WRITE)) == VM_EXEC)
+               mm->exec_vm += npages;
+       else if (flags & (VM_STACK_FLAGS & (VM_GROWSUP | VM_GROWSDOWN)))
+               mm->stack_vm += npages;
+       else if ((flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
+               mm->data_vm += npages;
 }
 
 static int special_mapping_fault(struct vm_area_struct *vma,
        if (ret)
                goto out;
 
-       mm->total_vm += len >> PAGE_SHIFT;
+       vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
 
        perf_event_mmap(vma);
 
 
         * even if read-only so there is no need to account for them here
         */
        if (newflags & VM_WRITE) {
+               /* Check space limits when area turns into data. */
+               if (!may_expand_vm(mm, newflags, nrpages) &&
+                               may_expand_vm(mm, oldflags, nrpages))
+                       return -ENOMEM;
                if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
                                                VM_SHARED|VM_NORESERVE))) {
                        charged = nrpages;
                populate_vma_page_range(vma, start, end, NULL);
        }
 
-       vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
-       vm_stat_account(mm, newflags, vma->vm_file, nrpages);
+       vm_stat_account(mm, oldflags, -nrpages);
+       vm_stat_account(mm, newflags, nrpages);
        perf_event_mmap(vma);
        return 0;
 
 
         * If this were a serious issue, we'd add a flag to do_munmap().
         */
        hiwater_vm = mm->hiwater_vm;
-       vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
+       vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
 
        /* Tell pfnmap has moved from this vma */
        if (unlikely(vma->vm_flags & VM_PFNMAP))
                        return ERR_PTR(-EAGAIN);
        }
 
-       if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
+       if (!may_expand_vm(mm, vma->vm_flags,
+                               (new_len - old_len) >> PAGE_SHIFT))
                return ERR_PTR(-ENOMEM);
 
        if (vma->vm_flags & VM_ACCOUNT) {
                                goto out;
                        }
 
-                       vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
+                       vm_stat_account(mm, vma->vm_flags, pages);
                        if (vma->vm_flags & VM_LOCKED) {
                                mm->locked_vm += pages;
                                locked = true;