extern void copy_user_huge_page(struct page *dst, struct page *src,
                                unsigned long addr, struct vm_area_struct *vma,
                                unsigned int pages_per_huge_page);
+extern long copy_huge_page_from_user(struct page *dst_page,
+                               const void __user *usr_src,
+                               unsigned int pages_per_huge_page);
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
 
 extern struct page_ext_operations debug_guardpage_ops;
 
                copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
        }
 }
+
+long copy_huge_page_from_user(struct page *dst_page,
+                               const void __user *usr_src,
+                               unsigned int pages_per_huge_page)
+{
+       void *src = (void *)usr_src;
+       void *page_kaddr;
+       unsigned long i, rc = 0;
+       unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
+
+       for (i = 0; i < pages_per_huge_page; i++) {
+               page_kaddr = kmap_atomic(dst_page + i);
+               rc = copy_from_user(page_kaddr,
+                               (const void __user *)(src + i * PAGE_SIZE),
+                               PAGE_SIZE);
+               kunmap_atomic(page_kaddr);
+
+               ret_val -= (PAGE_SIZE - rc);
+               if (rc)
+                       break;
+
+               cond_resched();
+       }
+       return ret_val;
+}
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
 
 #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS