/* Required features below */
ssize_t (*is_dst_valid)(struct vm_area_struct *dst_vma,
unsigned long dst_start, unsigned long len);
+ unsigned long (*increment)(struct vm_area_struct *vma);
};
#define MFILL_ATOMIC_MODE_BITS (const_ilog2(NR_MFILL_ATOMIC_MODES - 1) + 1)
return vma->vm_flags & __VM_UFFD_FLAGS;
}
+static inline unsigned long mfill_size(struct vm_area_struct *vma)
+{
+ return PAGE_SIZE;
+}
+
static inline bool vma_can_userfault(struct vm_area_struct *vma,
vm_flags_t vm_flags,
bool wp_async)
}
#ifdef CONFIG_USERFAULTFD
+
+static inline unsigned long hugetlb_mfill_size(struct vm_area_struct *vma)
+{
+ return vma_kernel_pagesize(vma);
+}
+
static ssize_t hugetlb_is_dst_valid(struct vm_area_struct *dst_vma,
unsigned long dst_start, unsigned long len);
+
static const struct vm_uffd_ops hugetlb_uffd_ops = {
.is_dst_valid = hugetlb_is_dst_valid,
+ .increment = hugetlb_mfill_size,
};
#endif
/* Anon vma ops */
static const struct vm_uffd_ops default_uffd_ops = {
.is_dst_valid = uffd_def_is_dst_valid,
+ .increment = mfill_size,
};
static inline const struct vm_uffd_ops *vma_get_uffd_ops(struct vm_area_struct *vma)
unsigned long src_addr, dst_addr;
long copied;
struct folio *folio;
- unsigned long vma_hpagesize;
pgoff_t idx;
u32 hash;
struct address_space *mapping;
+ const struct vm_uffd_ops *uffd_ops;
+ unsigned long increment;
/*
* There is no default zero huge page for all huge page sizes as
dst_addr = dst_start;
copied = 0;
folio = NULL;
- vma_hpagesize = vma_kernel_pagesize(dst_vma);
retry:
/*
goto out_unlock;
}
+ uffd_ops = vma_get_uffd_ops(dst_vma);
+ if (uffd_ops && uffd_ops->increment)
+ increment = uffd_ops->increment(dst_vma);
+
while (src_addr < src_start + len) {
VM_WARN_ON_ONCE(dst_addr >= dst_start + len);
hugetlb_vma_lock_read(dst_vma);
err = -ENOMEM;
- dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
+ dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, increment);
if (!dst_pte) {
hugetlb_vma_unlock_read(dst_vma);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
VM_WARN_ON_ONCE(folio);
if (!err) {
- dst_addr += vma_hpagesize;
- src_addr += vma_hpagesize;
- copied += vma_hpagesize;
+ dst_addr += increment;
+ src_addr += increment;
+ copied += increment;
if (fatal_signal_pending(current))
err = -EINTR;
unsigned long src_addr, dst_addr;
long copied;
struct folio *folio;
+ const struct vm_uffd_ops *uffd_ops;
+ unsigned long increment;
/*
* Sanitize the command parameters:
dst_addr = dst_start;
copied = 0;
folio = NULL;
+
retry:
/*
* Make sure the vma is not shared, that the dst range is
err = PTR_ERR(dst_vma);
goto out;
}
+ uffd_ops = vma_get_uffd_ops(dst_vma);
+ if (WARN_ON_ONCE(!uffd_ops->increment)) {
+ increment = PAGE_SIZE;
+ } else {
+ increment = uffd_ops->increment(dst_vma);
+ }
err = uffd_ctx_lock_and_validate_dst(ctx, dst_vma, dst_start, len);
if (err)
}
if (!err) {
- dst_addr += PAGE_SIZE;
- src_addr += PAGE_SIZE;
- copied += PAGE_SIZE;
+ dst_addr += increment;
+ src_addr += increment;
+ copied += increment;
if (fatal_signal_pending(current))
err = -EINTR;