unsigned nr_events = ctx->max_reqs;
        unsigned long size;
        int nr_pages;
+       bool populate;
 
        /* Compensate for the ring buffer's head/tail overlap entry */
        nr_events += 2; /* 1 is required, 2 for good luck */
        down_write(&ctx->mm->mmap_sem);
        info->mmap_base = do_mmap_pgoff(NULL, 0, info->mmap_size, 
                                        PROT_READ|PROT_WRITE,
-                                       MAP_ANONYMOUS|MAP_PRIVATE, 0);
+                                       MAP_ANONYMOUS|MAP_PRIVATE, 0,
+                                       &populate);
        if (IS_ERR((void *)info->mmap_base)) {
                up_write(&ctx->mm->mmap_sem);
                info->mmap_size = 0;
                aio_free_ring(ctx);
                return -EAGAIN;
        }
+       if (populate)
+               mm_populate(info->mmap_base, info->mmap_size);
 
        ctx->user_id = info->mmap_base;
 
 
 extern unsigned long mmap_region(struct file *file, unsigned long addr,
        unsigned long len, unsigned long flags,
        vm_flags_t vm_flags, unsigned long pgoff);
-extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
-        unsigned long, unsigned long,
-        unsigned long, unsigned long);
+extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+       unsigned long len, unsigned long prot, unsigned long flags,
+       unsigned long pgoff, bool *populate);
 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
 
+#ifdef CONFIG_MMU
+extern int __mm_populate(unsigned long addr, unsigned long len,
+                        int ignore_errors);
+static inline void mm_populate(unsigned long addr, unsigned long len)
+{
+       /* Ignore errors */
+       (void) __mm_populate(addr, len, 1);
+}
+#else
+static inline void mm_populate(unsigned long addr, unsigned long len) {}
+#endif
+
 /* These take the mm semaphore themselves */
 extern unsigned long vm_brk(unsigned long, unsigned long);
 extern int vm_munmap(unsigned long, size_t);
 
        unsigned long flags;
        unsigned long prot;
        int acc_mode;
-       unsigned long user_addr;
        struct ipc_namespace *ns;
        struct shm_file_data *sfd;
        struct path path;
        fmode_t f_mode;
+       bool populate = false;
 
        err = -EINVAL;
        if (shmid < 0)
                        goto invalid;
        }
                
-       user_addr = do_mmap_pgoff(file, addr, size, prot, flags, 0);
-       *raddr = user_addr;
+       addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
+       *raddr = addr;
        err = 0;
-       if (IS_ERR_VALUE(user_addr))
-               err = (long)user_addr;
+       if (IS_ERR_VALUE(addr))
+               err = (long)addr;
 invalid:
        up_write(¤t->mm->mmap_sem);
+       if (populate)
+               mm_populate(addr, size);
 
 out_fput:
        fput(file);
 
        return error;
 }
 
-static int do_mlock_pages(unsigned long start, size_t len, int ignore_errors)
+/*
+ * __mm_populate - populate and/or mlock pages within a range of address space.
+ *
+ * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
+ * flags. VMAs must be already marked with the desired vm_flags, and
+ * mmap_sem must not be held.
+ */
+int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
 {
        struct mm_struct *mm = current->mm;
        unsigned long end, nstart, nend;
                error = do_mlock(start, len, 1);
        up_write(¤t->mm->mmap_sem);
        if (!error)
-               error = do_mlock_pages(start, len, 0);
+               error = __mm_populate(start, len, 0);
        return error;
 }
 
            capable(CAP_IPC_LOCK))
                ret = do_mlockall(flags);
        up_write(¤t->mm->mmap_sem);
-       if (!ret && (flags & MCL_CURRENT)) {
-               /* Ignore errors */
-               do_mlock_pages(0, TASK_SIZE, 1);
-       }
+       if (!ret && (flags & MCL_CURRENT))
+               mm_populate(0, TASK_SIZE);
 out:
        return ret;
 }
 
 
 unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
                        unsigned long len, unsigned long prot,
-                       unsigned long flags, unsigned long pgoff)
+                       unsigned long flags, unsigned long pgoff,
+                       bool *populate)
 {
        struct mm_struct * mm = current->mm;
        struct inode *inode;
        vm_flags_t vm_flags;
 
+       *populate = false;
+
        /*
         * Does the application expect PROT_READ to imply PROT_EXEC?
         *
                }
        }
 
-       return mmap_region(file, addr, len, flags, vm_flags, pgoff);
+       addr = mmap_region(file, addr, len, flags, vm_flags, pgoff);
+       if (!IS_ERR_VALUE(addr) &&
+           ((vm_flags & VM_LOCKED) ||
+            (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
+               *populate = true;
+       return addr;
 }
 
 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
 
        vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
        if (vm_flags & VM_LOCKED) {
-               if (!mlock_vma_pages_range(vma, addr, addr + len))
+               if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
+                                       vma == get_gate_vma(current->mm)))
                        mm->locked_vm += (len >> PAGE_SHIFT);
-       } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
-               make_pages_present(addr, addr + len);
+               else
+                       vma->vm_flags &= ~VM_LOCKED;
+       }
 
        if (file)
                uprobe_mmap(vma);
 
                            unsigned long len,
                            unsigned long prot,
                            unsigned long flags,
-                           unsigned long pgoff)
+                           unsigned long pgoff,
+                           bool *populate)
 {
        struct vm_area_struct *vma;
        struct vm_region *region;
 
        kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff);
 
+       *populate = false;
+
        /* decide whether we should attempt the mapping, and if so what sort of
         * mapping */
        ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
 
 {
        unsigned long ret;
        struct mm_struct *mm = current->mm;
+       bool populate;
 
        ret = security_mmap_file(file, prot, flag);
        if (!ret) {
                down_write(&mm->mmap_sem);
-               ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff);
+               ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
+                                   &populate);
                up_write(&mm->mmap_sem);
+               if (!IS_ERR_VALUE(ret) && populate)
+                       mm_populate(ret, len);
        }
        return ret;
 }