left = len;
                for (n = 0; n < num_pages; n++) {
                        size_t plen = min_t(size_t, left, PAGE_SIZE);
-                       ret = iov_iter_copy_from_user(pages[n], &i, 0, plen);
+                       ret = copy_page_from_iter(pages[n], 0, plen, &i);
                        if (ret != plen) {
                                ret = -EFAULT;
                                break;
                        }
                        left -= ret;
-                       iov_iter_advance(&i, ret);
                }
 
                if (ret < 0) {
 
 
                save_len = cur_len;
                for (i = 0; i < nr_pages; i++) {
-                       bytes = min_t(const size_t, cur_len, PAGE_SIZE);
-                       copied = iov_iter_copy_from_user(wdata->pages[i], &it,
-                                                        0, bytes);
+                       bytes = min_t(size_t, cur_len, PAGE_SIZE);
+                       copied = copy_page_from_iter(wdata->pages[i], 0, bytes,
+                                                    &it);
                        cur_len -= copied;
-                       iov_iter_advance(&it, copied);
                        /*
                         * If we didn't copy as much as we expected, then that
                         * may mean we trod into an unmapped area. Stop copying
 
 
 size_t iov_iter_copy_from_user_atomic(struct page *page,
                struct iov_iter *i, unsigned long offset, size_t bytes);
-size_t iov_iter_copy_from_user(struct page *page,
-               struct iov_iter *i, unsigned long offset, size_t bytes);
 void iov_iter_advance(struct iov_iter *i, size_t bytes);
 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
 size_t iov_iter_single_seg_count(const struct iov_iter *i);
 
 }
 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
 
-/*
- * This has the same sideeffects and return value as
- * iov_iter_copy_from_user_atomic().
- * The difference is that it attempts to resolve faults.
- * Page must not be locked.
- */
-size_t iov_iter_copy_from_user(struct page *page,
-               struct iov_iter *i, unsigned long offset, size_t bytes)
-{
-       char *kaddr;
-       size_t copied;
-
-       kaddr = kmap(page);
-       if (likely(i->nr_segs == 1)) {
-               int left;
-               char __user *buf = i->iov->iov_base + i->iov_offset;
-               left = __copy_from_user(kaddr + offset, buf, bytes);
-               copied = bytes - left;
-       } else {
-               copied = __iovec_copy_from_user_inatomic(kaddr + offset,
-                                               i->iov, i->iov_offset, bytes);
-       }
-       kunmap(page);
-       return copied;
-}
-EXPORT_SYMBOL(iov_iter_copy_from_user);
-
 void iov_iter_advance(struct iov_iter *i, size_t bytes)
 {
        BUG_ON(i->count < bytes);
 
                        copy = len;
 
                if (vm_write) {
-                       if (copy > iov_iter_count(iter))
-                               copy = iov_iter_count(iter);
-                       copied = iov_iter_copy_from_user(page, iter,
-                                       offset, copy);
-                       iov_iter_advance(iter, copied);
+                       copied = copy_page_from_iter(page, offset, copy, iter);
                        set_page_dirty_lock(page);
                } else {
                        copied = copy_page_to_iter(page, offset, copy, iter);