mlock_fixup() will call make_pages_present() in the hugetlbfs VMA range to
    allocate the huge pages and populate the ptes.
 
-3) VMAs with VM_DONTEXPAND or VM_RESERVED are generally userspace mappings of
-   kernel pages, such as the VDSO page, relay channel pages, etc.  These pages
+3) VMAs with VM_DONTEXPAND are generally userspace mappings of kernel pages,
+   such as the VDSO page, relay channel pages, etc. These pages
    are inherently unevictable and are not managed on the LRU lists.
    mlock_fixup() treats these VMAs the same as hugetlbfs VMAs.  It calls
    make_pages_present() to populate the ptes.
 
                base = sparse ? hose->sparse_io_base : hose->dense_io_base;
 
        vma->vm_pgoff += base >> PAGE_SHIFT;
-       vma->vm_flags |= (VM_IO | VM_RESERVED);
+       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
 
        return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
                                  vma->vm_end - vma->vm_start,
 
         */
        vma->vm_mm           = mm;
        vma->vm_file         = get_file(filp);
-       vma->vm_flags        = VM_READ| VM_MAYREAD |VM_RESERVED;
+       vma->vm_flags        = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP;
        vma->vm_page_prot    = PAGE_READONLY; /* XXX may need to change */
 
        /*
 
                        vma->vm_mm = current->mm;
                        vma->vm_end = PAGE_SIZE;
                        vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
-                       vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
+                       vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
+                                       VM_DONTEXPAND | VM_DONTDUMP;
                        down_write(¤t->mm->mmap_sem);
                        if (insert_vm_struct(current->mm, vma)) {
                                up_write(¤t->mm->mmap_sem);
 
 
 static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
 {
-       vma->vm_flags |= VM_RESERVED;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_ops = &kvm_rma_vm_ops;
        return 0;
 }
 
 static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
                                            enum pci_mmap_state mmap_state)
 {
-       vma->vm_flags |= (VM_IO | VM_RESERVED);
+       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
 }
 
 /* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
 
        return install_special_mapping(mm, 0xffff0000, PAGE_SIZE,
                                       VM_READ | VM_EXEC |
                                       VM_MAYREAD | VM_MAYEXEC |
-                                      VM_RESERVED,
+                                      VM_DONTEXPAND | VM_DONTDUMP,
                                       NULL);
 }
 
 
 
        prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
 
-       BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) ==
-                               (VM_PFNMAP | VM_RESERVED | VM_IO)));
+       BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
 
        rmd.mfn = mfn;
        rmd.prot = prot;
 
 
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
-       /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
+       /* Remap-pfn-range will mark the range VM_IO */
        if (remap_pfn_range(vma,
                            vma->vm_start,
                            __pa(soft->gscr_addr) >> PAGE_SHIFT,
 
 
        vma->vm_ops = &mmap_mem_ops;
 
-       /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
+       /* Remap-pfn-range will mark the range VM_IO */
        if (remap_pfn_range(vma,
                            vma->vm_start,
                            vma->vm_pgoff,
 
        atomic_set(&vdata->refcnt, 1);
        vma->vm_private_data = vdata;
 
-       vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
+       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
        if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        vma->vm_ops = &mspec_vm_ops;
 
                goto out_unlock;
        }
 
-       vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
+       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_ops = obj->dev->driver->gem_vm_ops;
        vma->vm_private_data = map->handle;
        vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 
 
        vma->vm_ops = &drm_vm_dma_ops;
 
-       vma->vm_flags |= VM_RESERVED;   /* Don't swap */
-       vma->vm_flags |= VM_DONTEXPAND;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 
        drm_vm_open_locked(dev, vma);
        return 0;
        case _DRM_SHM:
                vma->vm_ops = &drm_vm_shm_ops;
                vma->vm_private_data = (void *)map;
-               /* Don't let this area swap.  Change when
-                  DRM_KERNEL advisory is supported. */
-               vma->vm_flags |= VM_RESERVED;
                break;
        case _DRM_SCATTER_GATHER:
                vma->vm_ops = &drm_vm_sg_ops;
                vma->vm_private_data = (void *)map;
-               vma->vm_flags |= VM_RESERVED;
                vma->vm_page_prot = drm_dma_prot(map->type, vma);
                break;
        default:
                return -EINVAL; /* This should never happen. */
        }
-       vma->vm_flags |= VM_RESERVED;   /* Don't swap */
-       vma->vm_flags |= VM_DONTEXPAND;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 
        drm_vm_open_locked(dev, vma);
        return 0;
 
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       vma->vm_flags |= (VM_IO | VM_RESERVED);
+       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
 
        update_vm_cache_attr(exynos_gem_obj, vma);
 
 
         */
        vma->vm_ops = &psbfb_vm_ops;
        vma->vm_private_data = (void *)psbfb;
-       vma->vm_flags |= VM_RESERVED | VM_IO |
-                                       VM_MIXEDMAP | VM_DONTEXPAND;
+       vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
        return 0;
 }
 
 
         */
 
        vma->vm_private_data = bo;
-       vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+       vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
        return 0;
 out_unref:
        ttm_bo_unref(&bo);
 
        vma->vm_ops = &ttm_bo_vm_ops;
        vma->vm_private_data = ttm_bo_reference(bo);
-       vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+       vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
        return 0;
 }
 EXPORT_SYMBOL(ttm_fbdev_mmap);
 
                        size = 0;
        }
 
-       vma->vm_flags |= VM_RESERVED;   /* avoid to swap out this VMA */
+       /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
        return 0;
 }
 
 
        physical = galpas->user.fw_handle;
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical);
-       /* VM_IO | VM_RESERVED are set by remap_pfn_range() */
+       /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
        ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT,
                           vma->vm_page_prot);
        if (unlikely(ret)) {
        u64 start, ofs;
        struct page *page;
 
-       vma->vm_flags |= VM_RESERVED;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
        start = vma->vm_start;
        for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
                u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
 
 
        vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
        vma->vm_ops = &ipath_file_vm_ops;
-       vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
        ret = 1;
 
 bail:
 
 
        vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
        vma->vm_ops = &qib_file_vm_ops;
-       vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
        ret = 1;
 
 bail:
 
 
        vma->vm_ops = &meye_vm_ops;
        vma->vm_flags &= ~VM_IO;        /* not I/O memory */
-       vma->vm_flags |= VM_RESERVED;   /* avoid to swap out this VMA */
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_private_data = (void *) (offset / gbufsize);
        meye_vm_open(vma);
 
 
 
        q->bufs[i]->baddr = vma->vm_start;
 
-       vma->vm_flags |= VM_RESERVED;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
        vma->vm_ops = &omap_vout_vm_ops;
        vma->vm_private_data = (void *) vout;
 
 
        fb->map_count = 1;
 
-       vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_flags &= ~VM_IO;
        vma->vm_private_data = fb;
        vma->vm_file = file;
 
                return -EINVAL;
        }
 
-       vma->vm_flags |= VM_IO;
-       vma->vm_flags |= VM_RESERVED;
+       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
 
        pos = cam->frame[i].bufmem;
        while (size > 0) { /* size is page-aligned */
 
        }
 
        /* VM_IO is eventually going to replace PageReserved altogether */
-       vma->vm_flags |= VM_IO;
-       vma->vm_flags |= VM_RESERVED;   /* avoid to swap out this VMA */
+       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
 
        pos = usbvision->frame[i].data;
        while (size > 0) {
 
        map->count    = 1;
        map->q        = q;
        vma->vm_ops   = &videobuf_vm_ops;
-       vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
        vma->vm_private_data = map;
        dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
 
        }
 
        vma->vm_ops          = &videobuf_vm_ops;
-       vma->vm_flags       |= VM_DONTEXPAND | VM_RESERVED;
+       vma->vm_flags       |= VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_private_data = map;
 
        dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
 
                return ret;
        }
 
-       vma->vm_flags           |= VM_DONTEXPAND | VM_RESERVED;
+       vma->vm_flags           |= VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_private_data    = priv;
        vma->vm_ops             = vm_ops;
 
 
                return -EINVAL;
        }
 
-       /* IO memory (stop cacheing) */
-       vma->vm_flags |= VM_IO | VM_RESERVED;
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        return io_remap_pfn_range(vma, vma->vm_start, addr, vsize,
 
                                vma->vm_end & (GRU_GSEG_PAGESIZE - 1))
                return -EINVAL;
 
-       vma->vm_flags |=
-           (VM_IO | VM_DONTCOPY | VM_LOCKED | VM_DONTEXPAND | VM_PFNMAP |
-                       VM_RESERVED);
+       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_LOCKED |
+                        VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_page_prot = PAGE_SHARED;
        vma->vm_ops = &gru_vm_ops;
 
 
                        return -EINVAL;
                if (set_vm_offset(vma, off) < 0)
                        return -EINVAL;
-               vma->vm_flags |= VM_IO | VM_RESERVED;
+               vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
 
 #ifdef pgprot_noncached
                if (file->f_flags & O_DSYNC || off >= __pa(high_memory))
 
        }
 
        sfp->mmap_called = 1;
-       vma->vm_flags |= VM_RESERVED;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_private_data = sfp;
        vma->vm_ops = &sg_mmap_vm_ops;
        return 0;
 
                goto out_unlock;
        }
 
-       vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
+       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_ops = obj->dev->driver->gem_vm_ops;
        vma->vm_private_data = obj;
        vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 
 {
        u32 status;
 
-       vma->vm_flags |= VM_RESERVED | VM_IO;
+       /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        dev_dbg(bridge, "%s: vm filp %p start %lx end %lx page_prot %ulx "
 
        if (mi < 0)
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_RESERVED;
-
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        return remap_pfn_range(vma,
 
 static int uio_mmap_logical(struct vm_area_struct *vma)
 {
-       vma->vm_flags |= VM_RESERVED;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_ops = &uio_vm_ops;
        uio_vma_open(vma);
        return 0;
 
 {
        /* don't do anything here: "fault" will set up page table entries */
        vma->vm_ops = &mon_bin_vm_ops;
-       vma->vm_flags |= VM_RESERVED;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_private_data = filp->private_data;
        mon_bin_vma_open(vma);
        return 0;
 
 #ifndef MMU
        /* this is uClinux (no MMU) specific code */
 
-       vma->vm_flags |= VM_RESERVED;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_start = videomemory;
 
        return 0;
 
        off = vma->vm_pgoff << PAGE_SHIFT;
        size = vma->vm_end - vma->vm_start;
 
-       /* To stop the swapper from even considering these pages. */
-       vma->vm_flags |= (VM_IO | VM_RESERVED);
+       /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
 
        if (((vma->vm_pgoff == 0) && (size == info->fix.smem_len)) ||
            ((off == info->fix.smem_len) && (size == PAGE_SIZE)))
 
                                vma->vm_page_prot))
                return -EAGAIN;
 
-       vma->vm_flags |= VM_RESERVED;   /* avoid to swap out this VMA */
+       /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
        return 0;
-
 }
 
 static struct fb_ops unifb_ops = {
 
 static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
 {
        vma->vm_ops = &fb_deferred_io_vm_ops;
-       vma->vm_flags |= ( VM_RESERVED | VM_DONTEXPAND );
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
        if (!(info->flags & FBINFO_VIRTFB))
                vma->vm_flags |= VM_IO;
        vma->vm_private_data = info;
 
                return -EINVAL;
        off += start;
        vma->vm_pgoff = off >> PAGE_SHIFT;
-       /* This is an IO map - tell maydump to skip this VMA */
-       vma->vm_flags |= VM_IO | VM_RESERVED;
+       /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by io_remap_pfn_range()*/
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
        fb_pgprotect(file, vma, off);
        if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
 
        pgprot_val(vma->vm_page_prot) =
                pgprot_fb(pgprot_val(vma->vm_page_prot));
 
-       vma->vm_flags |= VM_IO | VM_RESERVED;
+       /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
 
        /* look for the starting tile */
        tile = &gbe_tiles.cpu[offset >> TILE_SHIFT];
 
        DBG("user mmap region start %lx, len %d, off %lx\n", start, len, off);
 
        vma->vm_pgoff = off >> PAGE_SHIFT;
-       vma->vm_flags |= VM_IO | VM_RESERVED;
+       /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
        vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
        vma->vm_ops = &mmap_user_ops;
        vma->vm_private_data = rg;
 
 
        off = vma->vm_pgoff << PAGE_SHIFT;
 
-       /* To stop the swapper from even considering these pages */
-       vma->vm_flags |= (VM_IO | VM_RESERVED);
-       
+       /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
+
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 
        /* Each page, see which map applies */
 
                        size = 0;
        }
 
-       vma->vm_flags |= VM_RESERVED;   /* avoid to swap out this VMA */
        return 0;
 }
 
 
                        size = 0;
        }
 
-       vma->vm_flags |= VM_RESERVED;   /* avoid to swap out this VMA */
        return 0;
 }
 
 
        offset += vinfo->vram_start;
        pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
        pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
-       vma->vm_flags |= VM_RESERVED | VM_IO;
        if (remap_pfn_range(vma, vma->vm_start, offset >> PAGE_SHIFT,
                                                size, vma->vm_page_prot))
                return -EAGAIN;
 
                        size = 0;
        }
 
-       vma->vm_flags |= VM_RESERVED;   /* avoid to swap out this VMA */
        return 0;
 
 }
 
 
        vma->vm_private_data = vm_priv;
 
-       vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 
        vma->vm_ops = &gntalloc_vmops;
 
 
 
        vma->vm_ops = &gntdev_vmops;
 
-       vma->vm_flags |= VM_RESERVED|VM_DONTEXPAND;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 
        if (use_ptemod)
                vma->vm_flags |= VM_DONTCOPY;
 
 {
        /* DONTCOPY is essential for Xen because copy_page_range doesn't know
         * how to recreate these mappings */
-       vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
+       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
+                        VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_ops = &privcmd_vm_ops;
        vma->vm_private_data = NULL;
 
 
        }
 
        /* Do not dump I/O mapped devices or special mappings */
-       if (vma->vm_flags & (VM_IO | VM_RESERVED))
+       if (vma->vm_flags & VM_IO)
                return 0;
 
        /* By default, dump shared memory if mapped from an anonymous file. */
 
        int dump_ok;
 
        /* Do not dump I/O mapped devices or special mappings */
-       if (vma->vm_flags & (VM_IO | VM_RESERVED)) {
+       if (vma->vm_flags & VM_IO) {
                kdcore("%08lx: %08lx: no (IO)", vma->vm_start, vma->vm_flags);
                return 0;
        }
 
         * way when do_mmap_pgoff unwinds (may be important on powerpc
         * and ia64).
         */
-       vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
+       vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_ops = &hugetlb_vm_ops;
 
        if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
 
                "VmPTE:\t%8lu kB\n"
                "VmSwap:\t%8lu kB\n",
                hiwater_vm << (PAGE_SHIFT-10),
-               (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
+               total_vm << (PAGE_SHIFT-10),
                mm->locked_vm << (PAGE_SHIFT-10),
                mm->pinned_vm << (PAGE_SHIFT-10),
                hiwater_rss << (PAGE_SHIFT-10),
 
 /* Check if a vma is migratable */
 static inline int vma_migratable(struct vm_area_struct *vma)
 {
-       if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
+       if (vma->vm_flags & (VM_IO | VM_HUGETLB | VM_PFNMAP))
                return 0;
        /*
         * Migration allocates pages in the highest zone. If we cannot
 
 
 #define VM_DONTCOPY    0x00020000      /* Do not copy this vma on fork */
 #define VM_DONTEXPAND  0x00040000      /* Cannot expand with mremap() */
-#define VM_RESERVED    0x00080000      /* Count as reserved_vm like IO */
 #define VM_ACCOUNT     0x00100000      /* Is a VM accounted object */
 #define VM_NORESERVE   0x00200000      /* should the VM suppress accounting */
 #define VM_HUGETLB     0x00400000      /* Huge TLB Page VM */
  * Special vmas that are non-mergable, non-mlock()able.
  * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
  */
-#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
+#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP)
 
 /*
  * mapping from the currently active vm_flags protection bits (the
 
        unsigned long shared_vm;        /* Shared pages (files) */
        unsigned long exec_vm;          /* VM_EXEC & ~VM_WRITE */
        unsigned long stack_vm;         /* VM_GROWSUP/DOWN */
-       unsigned long reserved_vm;      /* VM_RESERVED|VM_IO pages */
        unsigned long def_flags;
        unsigned long nr_ptes;          /* Page table pages */
        unsigned long start_code, end_code, start_data, end_data;
 
                atomic_inc(&event->mmap_count);
        mutex_unlock(&event->mmap_mutex);
 
-       vma->vm_flags |= VM_RESERVED;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_ops = &perf_mmap_vmops;
 
        return ret;
 
                 */
                if (*vm_flags & (VM_MERGEABLE | VM_SHARED  | VM_MAYSHARE   |
                                 VM_PFNMAP    | VM_IO      | VM_DONTEXPAND |
-                                VM_RESERVED  | VM_HUGETLB |
-                                VM_NONLINEAR | VM_MIXEDMAP))
+                                VM_HUGETLB | VM_NONLINEAR | VM_MIXEDMAP))
                        return 0;               /* just ignore the advice */
 
 #ifdef VM_SAO
 
         * rest of the world about it:
         *   VM_IO tells people not to look at these pages
         *      (accesses can have side effects).
-        *   VM_RESERVED is specified all over the place, because
-        *      in 2.4 it kept swapout's vma scan off this vma; but
-        *      in 2.6 the LRU scan won't even find its pages, so this
-        *      flag means no more than count its pages in reserved_vm,
-        *      and omit it from core dump, even when VM_IO turned off.
         *   VM_PFNMAP tells the core MM that the base pages are just
         *      raw PFN mappings, and do not have a "struct page" associated
         *      with them.
+        *   VM_DONTEXPAND
+        *      Disable vma merging and expanding with mremap().
+        *   VM_DONTDUMP
+        *      Omit vma from core dump, even when VM_IO turned off.
         *
         * There's a horrible special case to handle copy-on-write
         * behaviour that some programs depend on. We mark the "original"
        if (err)
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
+       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
 
        BUG_ON(addr >= end);
        pfn -= addr >> PAGE_SHIFT;
 
        if (vma->vm_flags & (VM_IO | VM_PFNMAP))
                goto no_mlock;
 
-       if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
+       if (!((vma->vm_flags & VM_DONTEXPAND) ||
                        is_vm_hugetlb_page(vma) ||
                        vma == get_gate_vma(current->mm))) {
 
 
                        mm->exec_vm += pages;
        } else if (flags & stack_flags)
                mm->stack_vm += pages;
-       if (flags & (VM_RESERVED|VM_IO))
-               mm->reserved_vm += pages;
 }
 #endif /* CONFIG_PROC_FS */
 
 
        if (addr != (pfn << PAGE_SHIFT))
                return -EINVAL;
 
-       vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
+       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
        return 0;
 }
 EXPORT_SYMBOL(remap_pfn_range);
 
                usize -= PAGE_SIZE;
        } while (usize > 0);
 
-       /* Prevent "things" like memory migration? VM_flags need a cleanup... */
-       vma->vm_flags |= VM_RESERVED;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 
        return 0;
 }
 
                        return -EACCES;
        }
 
-       vma->vm_flags |= VM_RESERVED;
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_ops = &sel_mmap_policy_ops;
 
        return 0;
 
                return -EINVAL;
        area->vm_ops = &snd_pcm_vm_ops_status;
        area->vm_private_data = substream;
-       area->vm_flags |= VM_RESERVED;
+       area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
        return 0;
 }
 
                return -EINVAL;
        area->vm_ops = &snd_pcm_vm_ops_control;
        area->vm_private_data = substream;
-       area->vm_flags |= VM_RESERVED;
+       area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
        return 0;
 }
 #else /* ! coherent mmap */
 int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
                             struct vm_area_struct *area)
 {
-       area->vm_flags |= VM_RESERVED;
+       area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
 #ifdef ARCH_HAS_DMA_MMAP_COHERENT
        if (!substream->ops->page &&
            substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
 
        }
 
        area->vm_ops = &usb_stream_hwdep_vm_ops;
-       area->vm_flags |= VM_RESERVED;
+       area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
        area->vm_private_data = us122l;
        atomic_inc(&us122l->mmap_count);
 out:
 
                us428->us428ctls_sharedmem->CtlSnapShotLast = -2;
        }
        area->vm_ops = &us428ctls_vm_ops;
-       area->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+       area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
        area->vm_private_data = hw->private_data;
        return 0;
 }
 
                return -ENODEV;
        }
        area->vm_ops = &snd_usX2Y_hwdep_pcm_vm_ops;
-       area->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+       area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
        area->vm_private_data = hw->private_data;
        return 0;
 }