unsigned long src_addr);
};
+struct uffd_info {
+ unsigned long dst_addr; /* Target address */
+ unsigned long src_addr; /* Source address */
+ unsigned long len; /* Total length to copy */
+
+ struct vm_area_struct *dst_vma; /* Target vma */
+ unsigned long increment; /* Size of each operation */
+ bool wp; /* Operation is requesting write protection */
+ struct folio **foliop; /* folio pointer for retry */
+ const struct vm_uffd_ops *uffd_ops; /* The vma uffd_ops pointer */
+};
+
+#define UFFD_STRUCT_INIT(__dst_addr, __src_addr, __len, __wp) { \
+ .dst_addr = (__dst_addr), \
+ .src_addr = (__src_addr), \
+ .len = (__len), \
+ .wp = (__wp), \
+}
+
#define MFILL_ATOMIC_MODE_BITS (const_ilog2(NR_MFILL_ATOMIC_MODES - 1) + 1)
#define MFILL_ATOMIC_BIT(nr) BIT(MFILL_ATOMIC_MODE_BITS + (nr))
#define MFILL_ATOMIC_FLAG(nr) ((__force uffd_flags_t) MFILL_ATOMIC_BIT(nr))
return vma;
}
-static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm,
- unsigned long dst_start,
- unsigned long len)
+static int uffd_mfill_lock(struct mm_struct *dst_mm, struct uffd_info *us)
{
struct vm_area_struct *dst_vma;
- dst_vma = uffd_lock_vma(dst_mm, dst_start);
- if (IS_ERR(dst_vma) || validate_dst_vma(dst_vma, dst_start + len))
- return dst_vma;
+ dst_vma = uffd_lock_vma(dst_mm, us->dst_addr);
+ if (IS_ERR(dst_vma))
+ return PTR_ERR(dst_vma);
+
+ if (validate_dst_vma(dst_vma, us->dst_addr + us->len)) {
+ us->dst_vma = dst_vma;
+ return 0;
+ }
vma_end_read(dst_vma);
- return ERR_PTR(-ENOENT);
+ return -ENOENT;
}
static void uffd_mfill_unlock(struct vm_area_struct *vma)
#else
-static struct vm_area_struct *uffd_mfill_lock(struct mm_struct *dst_mm,
- unsigned long dst_start,
- unsigned long len)
+static int uffd_mfill_lock(struct mm_struct *dst_mm, struct uffd_info *us)
{
struct vm_area_struct *dst_vma;
mmap_read_lock(dst_mm);
- dst_vma = find_vma_and_prepare_anon(dst_mm, dst_start);
+ dst_vma = find_vma_and_prepare_anon(dst_mm, uf->dst_addr);
if (IS_ERR(dst_vma))
goto out_unlock;
- if (validate_dst_vma(dst_vma, dst_start + len))
- return dst_vma;
+ if (validate_dst_vma(dst_vma, uf->dst_addr, uf->len)) {
+ us->dst_vma = dst_vma;
+ return 0;
+ }
- dst_vma = ERR_PTR(-ENOENT);
+ dst_vma = -ENOENT;
out_unlock:
mmap_read_unlock(dst_mm);
- return dst_vma;
+ return PTR_ERR(dst_vma);
}
static void uffd_mfill_unlock(struct vm_area_struct *vma)
struct folio *folio;
const struct vm_uffd_ops *uffd_ops;
unsigned long increment;
+ struct uffd_info info = UFFD_STRUCT_INIT(dst_start, src_start, len,
+ flags & MFILL_ATOMIC_WP);
/*
* Sanitize the command parameters:
* Make sure the vma is not shared, that the dst range is
* both valid and fully within a single existing vma.
*/
- dst_vma = uffd_mfill_lock(dst_mm, dst_start, len);
- if (IS_ERR(dst_vma)) {
- err = PTR_ERR(dst_vma);
+ err = uffd_mfill_lock(dst_mm, &info);
+ if (err)
goto out;
- }
+
+ dst_vma = info.dst_vma;
uffd_ops = vma_get_uffd_ops(dst_vma);
if (WARN_ON_ONCE(!uffd_ops->increment)) {
increment = PAGE_SIZE;
uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
goto out_unlock;
+ info.increment = increment;
while (src_addr < src_start + len) {
VM_WARN_ON_ONCE(dst_addr >= dst_start + len);