struct address_space *mapping = inode->i_mapping;
gfp_t gfp = mapping_gfp_mask(mapping);
pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
- void *page_kaddr;
struct folio *folio;
int ret;
pmd_t *dst_pmd;
if (!folio)
goto out_unacct_blocks;
- page_kaddr = kmap_local_folio(folio, 0);
- /*
- * The read mmap_lock is held here. Despite the
- * mmap_lock being read recursive a deadlock is still
- * possible if a writer has taken a lock. For example:
- *
- * process A thread 1 takes read lock on own mmap_lock
- * process A thread 2 calls mmap, blocks taking write lock
- * process B thread 1 takes page fault, read lock on own mmap lock
- * process B thread 2 calls mmap, blocks taking write lock
- * process A thread 1 blocks taking read lock on process B
- * process B thread 1 blocks taking read lock on process A
- *
- * Disable page faults to prevent potential deadlock
- * and retry the copy outside the mmap_lock.
- */
- pagefault_disable();
- ret = copy_from_user(page_kaddr, (const void __user *)src_addr,
- PAGE_SIZE);
- pagefault_enable();
- kunmap_local(page_kaddr);
-
- /* fallback to copy_from_user outside mmap_lock */
- if (unlikely(ret)) {
+ ret = uffd_atomic_pte_copy(folio, src_addr);
+ if (ret) {
*foliop = folio;
- ret = -ENOENT;
- /* don't free the page */
goto out_unacct_blocks;
}
- flush_dcache_folio(folio);
} else {
folio = *foliop;
VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
return ret;
}
+inline int uffd_atomic_pte_copy(struct folio *folio, unsigned long src_addr)
+{
+ int ret;
+ void *kaddr;
+
+ kaddr = kmap_local_folio(folio, 0);
+ /*
+ * The read mmap_lock is held here. Despite the
+ * mmap_lock being read recursive a deadlock is still
+ * possible if a writer has taken a lock. For example:
+ *
+ * process A thread 1 takes read lock on own mmap_lock
+ * process A thread 2 calls mmap, blocks taking write lock
+ * process B thread 1 takes page fault, read lock on own mmap lock
+ * process B thread 2 calls mmap, blocks taking write lock
+ * process A thread 1 blocks taking read lock on process B
+ * process B thread 1 blocks taking read lock on process A
+ *
+ * Disable page faults to prevent potential deadlock
+ * and retry the copy outside the mmap_lock.
+ */
+ pagefault_disable();
+ ret = copy_from_user(kaddr, (const void __user *) src_addr,
+ PAGE_SIZE);
+ pagefault_enable();
+ kunmap_local(kaddr);
+
+ /* fallback to copy_from_user outside mmap_lock */
+ if (unlikely(ret))
+ return -ENOENT; /* don't free the page */
+
+ flush_dcache_folio(folio);
+ return 0;
+}
+
int mfill_atomic_pte_copy(struct vm_area_struct *dst_vma,
unsigned long dst_addr, unsigned long src_addr,
uffd_flags_t flags, struct folio **foliop,
unsigned long increment)
{
- void *kaddr;
int ret;
struct folio *folio;
pmd_t *dst_pmd;
if (!folio)
goto out;
- kaddr = kmap_local_folio(folio, 0);
- /*
- * The read mmap_lock is held here. Despite the
- * mmap_lock being read recursive a deadlock is still
- * possible if a writer has taken a lock. For example:
- *
- * process A thread 1 takes read lock on own mmap_lock
- * process A thread 2 calls mmap, blocks taking write lock
- * process B thread 1 takes page fault, read lock on own mmap lock
- * process B thread 2 calls mmap, blocks taking write lock
- * process A thread 1 blocks taking read lock on process B
- * process B thread 1 blocks taking read lock on process A
- *
- * Disable page faults to prevent potential deadlock
- * and retry the copy outside the mmap_lock.
- */
- pagefault_disable();
- ret = copy_from_user(kaddr, (const void __user *) src_addr,
- PAGE_SIZE);
- pagefault_enable();
- kunmap_local(kaddr);
-
- /* fallback to copy_from_user outside mmap_lock */
- if (unlikely(ret)) {
- ret = -ENOENT;
+ ret = uffd_atomic_pte_copy(folio, src_addr);
+ if (ret) {
*foliop = folio;
- /* don't free the page */
goto out;
}
- flush_dcache_folio(folio);
} else {
folio = *foliop;
*foliop = NULL;