mutex_unlock(&dev->struct_mutex);
 
                if (!prefaulted) {
-                       ret = fault_in_pages_writeable(user_data, remain);
+                       ret = fault_in_multipages_writeable(user_data, remain);
                        /* Userspace is tricking us, but we've already clobbered
                         * its pages with the prefault and promised to write the
                         * data up to the first fault. Hence ignore any errors
                       args->size))
                return -EFAULT;
 
-       ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
-                                     args->size);
+       ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
+                                          args->size);
        if (ret)
                return -EFAULT;
 
 
                 */
                if (((unsigned long)uaddr & PAGE_MASK) !=
                                ((unsigned long)end & PAGE_MASK))
-                       ret = __put_user(0, end);
+                       ret = __put_user(0, end);
        }
        return ret;
 }
 
                if (((unsigned long)uaddr & PAGE_MASK) !=
                                ((unsigned long)end & PAGE_MASK)) {
-                       ret = __get_user(c, end);
+                       ret = __get_user(c, end);
                        (void)c;
                }
        }
        return ret;
 }
 
+/*
+ * Multipage variants of the above prefault helpers, useful if more than
+ * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
+ * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
+ * filemap.c hotpaths.
+ */
+static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
+{
+       int ret;
+       const char __user *end = uaddr + size - 1;
+
+       if (unlikely(size == 0))
+               return 0;
+
+       /*
+        * Writing zeroes into userspace here is OK, because we know that if
+        * the zero gets there, we'll be overwriting it.
+        */
+       while (uaddr <= end) {
+               ret = __put_user(0, uaddr);
+               if (ret != 0)
+                       return ret;
+               uaddr += PAGE_SIZE;
+       }
+
+       /* Check whether the range spilled into the next page. */
+       if (((unsigned long)uaddr & PAGE_MASK) ==
+                       ((unsigned long)end & PAGE_MASK))
+               ret = __put_user(0, end);
+
+       return ret;
+}
+
+static inline int fault_in_multipages_readable(const char __user *uaddr,
+                                              int size)
+{
+       volatile char c;
+       int ret;
+       const char __user *end = uaddr + size - 1;
+
+       if (unlikely(size == 0))
+               return 0;
+
+       while (uaddr <= end) {
+               ret = __get_user(c, uaddr);
+               if (ret != 0)
+                       return ret;
+               uaddr += PAGE_SIZE;
+       }
+
+       /* Check whether the range spilled into the next page. */
+       if (((unsigned long)uaddr & PAGE_MASK) ==
+                       ((unsigned long)end & PAGE_MASK)) {
+               ret = __get_user(c, end);
+               (void)c;
+       }
+
+       return ret;
+}
+
 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
                                pgoff_t index, gfp_t gfp_mask);
 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,