on_each_cpu(kvm_map_magic_page, &features, 1);
 
        /* Quick self-test to see if the mapping works */
-       if (fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) {
+       if (fault_in_readable((const char __user *)KVM_MAGIC_PAGE,
+                             sizeof(u32))) {
                kvm_patching_worked = false;
                return;
        }
 
        if (new_ctx == NULL)
                return 0;
        if (!access_ok(new_ctx, ctx_size) ||
-           fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
+           fault_in_readable((char __user *)new_ctx, ctx_size))
                return -EFAULT;
 
        /*
 #endif
 
        if (!access_ok(ctx, sizeof(*ctx)) ||
-           fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
+           fault_in_readable((char __user *)ctx, sizeof(*ctx)))
                return -EFAULT;
 
        /*
 
        if (new_ctx == NULL)
                return 0;
        if (!access_ok(new_ctx, ctx_size) ||
-           fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
+           fault_in_readable((char __user *)new_ctx, ctx_size))
                return -EFAULT;
 
        /*
 
        fpregs_unlock();
 
        if (ret) {
-               if (!fault_in_pages_writeable(buf_fx, fpu_user_xstate_size))
+               if (!fault_in_writeable(buf_fx, fpu_user_xstate_size))
                        goto retry;
                return -EFAULT;
        }
                if (ret != -EFAULT)
                        return -EINVAL;
 
-               ret = fault_in_pages_readable(buf, size);
-               if (!ret)
+               if (!fault_in_readable(buf, size))
                        goto retry;
-               return ret;
+               return -EFAULT;
        }
 
        /*
 
        struct drm_armada_gem_pwrite *args = data;
        struct armada_gem_object *dobj;
        char __user *ptr;
-       int ret;
+       int ret = 0;
 
        DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
                args->handle, args->offset, args->size, args->ptr);
        if (!access_ok(ptr, args->size))
                return -EFAULT;
 
-       ret = fault_in_pages_readable(ptr, args->size);
-       if (ret)
-               return ret;
+       if (fault_in_readable(ptr, args->size))
+               return -EFAULT;
 
        dobj = armada_gem_object_lookup(file, args->handle);
        if (dobj == NULL)
 
        key.offset = sk->min_offset;
 
        while (1) {
-               ret = fault_in_pages_writeable(ubuf + sk_offset,
-                                              *buf_size - sk_offset);
-               if (ret)
+               ret = -EFAULT;
+               if (fault_in_writeable(ubuf + sk_offset, *buf_size - sk_offset))
                        break;
 
                ret = btrfs_search_forward(root, &key, path, sk->min_transid);
 
 extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
 
 /*
- * Fault everything in given userspace address range in.
+ * Fault in userspace address range.
  */
-static inline int fault_in_pages_writeable(char __user *uaddr, size_t size)
-{
-       char __user *end = uaddr + size - 1;
-
-       if (unlikely(size == 0))
-               return 0;
-
-       if (unlikely(uaddr > end))
-               return -EFAULT;
-       /*
-        * Writing zeroes into userspace here is OK, because we know that if
-        * the zero gets there, we'll be overwriting it.
-        */
-       do {
-               if (unlikely(__put_user(0, uaddr) != 0))
-                       return -EFAULT;
-               uaddr += PAGE_SIZE;
-       } while (uaddr <= end);
-
-       /* Check whether the range spilled into the next page. */
-       if (((unsigned long)uaddr & PAGE_MASK) ==
-                       ((unsigned long)end & PAGE_MASK))
-               return __put_user(0, end);
-
-       return 0;
-}
-
-static inline int fault_in_pages_readable(const char __user *uaddr, size_t size)
-{
-       volatile char c;
-       const char __user *end = uaddr + size - 1;
-
-       if (unlikely(size == 0))
-               return 0;
-
-       if (unlikely(uaddr > end))
-               return -EFAULT;
-
-       do {
-               if (unlikely(__get_user(c, uaddr) != 0))
-                       return -EFAULT;
-               uaddr += PAGE_SIZE;
-       } while (uaddr <= end);
-
-       /* Check whether the range spilled into the next page. */
-       if (((unsigned long)uaddr & PAGE_MASK) ==
-                       ((unsigned long)end & PAGE_MASK)) {
-               return __get_user(c, end);
-       }
-
-       (void)c;
-       return 0;
-}
+size_t fault_in_writeable(char __user *uaddr, size_t size);
+size_t fault_in_readable(const char __user *uaddr, size_t size);
 
 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
                                pgoff_t index, gfp_t gfp_mask);
 
        buf = iov->iov_base + skip;
        copy = min(bytes, iov->iov_len - skip);
 
-       if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
+       if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_writeable(buf, copy)) {
                kaddr = kmap_atomic(page);
                from = kaddr + offset;
 
        buf = iov->iov_base + skip;
        copy = min(bytes, iov->iov_len - skip);
 
-       if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
+       if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_readable(buf, copy)) {
                kaddr = kmap_atomic(page);
                to = kaddr + offset;
 
                        bytes = i->count;
                for (p = i->iov, skip = i->iov_offset; bytes; p++, skip = 0) {
                        size_t len = min(bytes, p->iov_len - skip);
-                       int err;
 
                        if (unlikely(!len))
                                continue;
-                       err = fault_in_pages_readable(p->iov_base + skip, len);
-                       if (unlikely(err))
-                               return err;
+                       if (fault_in_readable(p->iov_base + skip, len))
+                               return -EFAULT;
                        bytes -= len;
                }
        }
 
  *      ->lock_page            (filemap_fault, access_process_vm)
  *
  *  ->i_rwsem                  (generic_perform_write)
- *    ->mmap_lock              (fault_in_pages_readable->do_page_fault)
+ *    ->mmap_lock              (fault_in_readable->do_page_fault)
  *
  *  bdi->wb.list_lock
  *    sb_lock                  (fs/fs-writeback.c)
 
 }
 #endif /* !CONFIG_MMU */
 
+/**
+ * fault_in_writeable - fault in userspace address range for writing
+ * @uaddr: start of address range
+ * @size: size of address range
+ *
+ * Returns the number of bytes not faulted in (like copy_to_user() and
+ * copy_from_user()).
+ */
+size_t fault_in_writeable(char __user *uaddr, size_t size)
+{
+       char __user *start = uaddr, *end;
+
+       if (unlikely(size == 0))
+               return 0;
+       if (!PAGE_ALIGNED(uaddr)) {
+               if (unlikely(__put_user(0, uaddr) != 0))
+                       return size;
+               uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr);
+       }
+       end = (char __user *)PAGE_ALIGN((unsigned long)start + size);
+       if (unlikely(end < start))
+               end = NULL;
+       while (uaddr != end) {
+               if (unlikely(__put_user(0, uaddr) != 0))
+                       goto out;
+               uaddr += PAGE_SIZE;
+       }
+
+out:
+       if (size > uaddr - start)
+               return size - (uaddr - start);
+       return 0;
+}
+EXPORT_SYMBOL(fault_in_writeable);
+
+/**
+ * fault_in_readable - fault in userspace address range for reading
+ * @uaddr: start of user address range
+ * @size: size of user address range
+ *
+ * Returns the number of bytes not faulted in (like copy_to_user() and
+ * copy_from_user()).
+ */
+size_t fault_in_readable(const char __user *uaddr, size_t size)
+{
+       const char __user *start = uaddr, *end;
+       volatile char c;
+
+       if (unlikely(size == 0))
+               return 0;
+       if (!PAGE_ALIGNED(uaddr)) {
+               if (unlikely(__get_user(c, uaddr) != 0))
+                       return size;
+               uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr);
+       }
+       end = (const char __user *)PAGE_ALIGN((unsigned long)start + size);
+       if (unlikely(end < start))
+               end = NULL;
+       while (uaddr != end) {
+               if (unlikely(__get_user(c, uaddr) != 0))
+                       goto out;
+               uaddr += PAGE_SIZE;
+       }
+
+out:
+       (void)c;
+       if (size > uaddr - start)
+               return size - (uaddr - start);
+       return 0;
+}
+EXPORT_SYMBOL(fault_in_readable);
+
 /**
  * get_dump_page() - pin user page in memory while writing it to core dump
  * @addr: user address