vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags);
#ifdef CONFIG_USERFAULTFD
+ssize_t hugetlb_mfill_prepare(struct vm_area_struct *dst_vma, unsigned long dst_addr,
+ unsigned long increment, pte_t **dst_pte, u32 *hash, uffd_flags_t flags);
int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
}
#ifdef CONFIG_USERFAULTFD
+
+ssize_t hugetlb_mfill_prepare(struct vm_area_struct *dst_vma, unsigned long dst_addr,
+ unsigned long increment, pte_t **dst_pte, u32 *hash, uffd_flags_t flags)
+{
+ struct mm_struct *dst_mm = dst_vma->vm_mm;
+ struct address_space *mapping;
+ pgoff_t idx;
+ /*
+ * Serialize via vma_lock and hugetlb_fault_mutex.
+ * vma_lock ensures the dst_pte remains valid even
+ * in the case of shared pmds. fault mutex prevents
+ * races with other faulting threads.
+ */
+ idx = linear_page_index(dst_vma, dst_addr);
+ mapping = dst_vma->vm_file->f_mapping;
+ *hash = hugetlb_fault_mutex_hash(mapping, idx);
+ mutex_lock(&hugetlb_fault_mutex_table[*hash]);
+ hugetlb_vma_lock_read(dst_vma);
+
+ *dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, increment);
+ if (!*dst_pte) {
+ hugetlb_vma_unlock_read(dst_vma);
+ mutex_unlock(&hugetlb_fault_mutex_table[*hash]);
+ return -ENOMEM;
+ }
+
+ if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) &&
+ !huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, *dst_pte))) {
+ hugetlb_vma_unlock_read(dst_vma);
+ mutex_unlock(&hugetlb_fault_mutex_table[*hash]);
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
/*
* Can probably be eliminated, but still used by hugetlb_mfill_atomic_pte().
*/
unsigned long src_addr, dst_addr;
long copied;
struct folio *folio;
- pgoff_t idx;
u32 hash;
- struct address_space *mapping;
const struct vm_uffd_ops *uffd_ops;
unsigned long increment;
while (src_addr < src_start + len) {
VM_WARN_ON_ONCE(dst_addr >= dst_start + len);
- /*
- * Serialize via vma_lock and hugetlb_fault_mutex.
- * vma_lock ensures the dst_pte remains valid even
- * in the case of shared pmds. fault mutex prevents
- * races with other faulting threads.
- */
- idx = linear_page_index(dst_vma, dst_addr);
- mapping = dst_vma->vm_file->f_mapping;
- hash = hugetlb_fault_mutex_hash(mapping, idx);
- mutex_lock(&hugetlb_fault_mutex_table[hash]);
- hugetlb_vma_lock_read(dst_vma);
-
- err = -ENOMEM;
- dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, increment);
- if (!dst_pte) {
- hugetlb_vma_unlock_read(dst_vma);
- mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- goto out_unlock;
- }
-
- if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) &&
- !huge_pte_none_mostly(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
- err = -EEXIST;
- hugetlb_vma_unlock_read(dst_vma);
- mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+ err = hugetlb_mfill_prepare(dst_vma, dst_addr, increment,
+ &dst_pte, &hash, flags);
+ if (err)
goto out_unlock;
- }
err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr,
src_addr, flags, &folio, hash);