hugetlb locks the context before validating the hugetlb (which is
already stable). Reversing the order better aligns with the other
memory types and has no need to be different.
This is important to follow the logic of later patches of combining the
memory types into the same code.
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
goto out;
}
- err = -ENOENT;
- if (!is_vm_hugetlb_page(dst_vma))
- goto out_unlock_vma;
-
- err = -EINVAL;
- if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
- goto out_unlock_vma;
-
/*
* If memory mappings are changing because of non-cooperative
* operation (e.g. mremap) running in parallel, bail out and
err = -EAGAIN;
if (atomic_read(&ctx->mmap_changing))
goto out_unlock;
+
+ err = -ENOENT;
+ if (!is_vm_hugetlb_page(dst_vma))
+ goto out_unlock;
+
+ err = -EINVAL;
+ if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
+ goto out_unlock;
}
while (src_addr < src_start + len) {
out_unlock:
up_read(&ctx->map_changing_lock);
-out_unlock_vma:
uffd_mfill_unlock(dst_vma);
out:
if (folio)