return ret;
}
+static int uffd_writeprotect(struct uffd_info *info)
+{
+ long err;
+ struct vm_area_struct *dst_vma;
+ unsigned long end = info->src_addr + info->len;
+ unsigned long _start, _end;
+
+ dst_vma = info->dst_vma;
+ if (!userfaultfd_wp(dst_vma))
+ return -ENOENT;
+
+ if (is_vm_hugetlb_page(dst_vma)) {
+ unsigned long page_mask;
+ page_mask = vma_kernel_pagesize(dst_vma) - 1;
+ if ((info->src_addr & page_mask) || (info->len & page_mask))
+ return -EINVAL;
+ }
+
+ _start = max(dst_vma->vm_start, info->src_addr);
+ _end = min(dst_vma->vm_end, end);
+
+ err = uffd_wp_range(dst_vma, _start, _end - _start, info->wp);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
int mwriteprotect_range(struct userfaultfd_ctx *ctx, unsigned long start,
unsigned long len, bool enable_wp)
{
struct mm_struct *dst_mm = ctx->mm;
- unsigned long end = start + len;
- unsigned long _start, _end;
- struct vm_area_struct *dst_vma;
- unsigned long page_mask;
long err;
VMA_ITERATOR(vmi, dst_mm, start);
+ struct uffd_info info = UFFD_STRUCT_INIT(0, start, len, ctx, enable_wp);
/*
* Sanitize the command parameters:
goto out_unlock;
err = -ENOENT;
- for_each_vma_range(vmi, dst_vma, end) {
-
- if (!userfaultfd_wp(dst_vma)) {
- err = -ENOENT;
- break;
- }
-
- if (is_vm_hugetlb_page(dst_vma)) {
- err = -EINVAL;
- page_mask = vma_kernel_pagesize(dst_vma) - 1;
- if ((start & page_mask) || (len & page_mask))
- break;
- }
-
- _start = max(dst_vma->vm_start, start);
- _end = min(dst_vma->vm_end, end);
-
- err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp);
-
- /* Return 0 on success, <0 on failures */
- if (err < 0)
+ for_each_vma_range(vmi, info.dst_vma, start + len) {
+ err = uffd_writeprotect(&info);
+ if (err)
break;
- err = 0;
}
+
out_unlock:
up_read(&ctx->map_changing_lock);
mmap_read_unlock(dst_mm);