return 0;
}
-
-static int shmem_mfill_atomic_pte_zeropage(struct vm_area_struct *dst_vma,
- unsigned long dst_addr)
+static inline int shmem_mfill_complete(struct vm_area_struct *dst_vma,
+ unsigned long dst_addr, pmd_t *dst_pmd, pgoff_t pgoff,
+ struct folio *folio, struct inode *inode, gfp_t gfp)
{
- struct inode *inode = file_inode(dst_vma->vm_file);
- struct shmem_inode_info *info = SHMEM_I(inode);
- struct address_space *mapping = inode->i_mapping;
- gfp_t gfp = mapping_gfp_mask(mapping);
- pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
- struct folio *folio;
- int ret;
pgoff_t max_off;
- pmd_t *dst_pmd;
-
- ret = uffd_get_dst_pmd(dst_vma, dst_addr, &dst_pmd);
- if (ret)
- return ret;
-
- if (shmem_inode_acct_blocks(inode, 1))
- return -ENOMEM;
-
- ret = -ENOMEM;
- folio = shmem_alloc_folio(gfp, 0, info, pgoff);
- if (!folio)
- goto out_unacct_blocks;
-
- clear_user_highpage(&folio->page, dst_addr);
+ int ret;
VM_BUG_ON(folio_test_locked(folio));
VM_BUG_ON(folio_test_swapbacked(folio));
ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp);
if (ret)
goto out_release;
- ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp);
+ ret = shmem_add_to_page_cache(folio, inode->i_mapping, pgoff, NULL,
+ gfp);
if (ret)
goto out_release;
out_release:
folio_unlock(folio);
folio_put(folio);
-out_unacct_blocks:
shmem_inode_unacct_blocks(inode, 1);
return ret;
}
+static int shmem_mfill_atomic_pte_zeropage(struct vm_area_struct *dst_vma,
+ unsigned long dst_addr)
+{
+ struct inode *inode = file_inode(dst_vma->vm_file);
+ struct shmem_inode_info *info = SHMEM_I(inode);
+ gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
+ pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
+ struct folio *folio;
+ int ret;
+ pmd_t *dst_pmd;
+
+ ret = uffd_get_dst_pmd(dst_vma, dst_addr, &dst_pmd);
+ if (ret)
+ return ret;
+
+ if (shmem_inode_acct_blocks(inode, 1))
+ return -ENOMEM;
+
+ ret = -ENOMEM;
+ folio = shmem_alloc_folio(gfp, 0, info, pgoff);
+ if (!folio) {
+ shmem_inode_unacct_blocks(inode, 1);
+ return -ENOMEM;
+ }
+
+ clear_user_highpage(&folio->page, dst_addr);
+
+ ret = shmem_mfill_complete(dst_vma, dst_addr, dst_pmd, pgoff, folio,
+ inode, gfp);
+ return ret;
+}
+
static int shmem_mfill_atomic_pte_copy(struct vm_area_struct *dst_vma,
unsigned long dst_addr, unsigned long src_addr,
uffd_flags_t flags, struct folio **foliop,
void *page_kaddr;
struct folio *folio;
int ret;
- pgoff_t max_off;
pmd_t *dst_pmd;
ret = uffd_get_dst_pmd(dst_vma, dst_addr, &dst_pmd);
*foliop = NULL;
}
- VM_BUG_ON(folio_test_locked(folio));
- VM_BUG_ON(folio_test_swapbacked(folio));
- __folio_set_locked(folio);
- __folio_set_swapbacked(folio);
- __folio_mark_uptodate(folio);
-
- ret = -EFAULT;
- max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
- if (unlikely(pgoff >= max_off))
- goto out_release;
-
- ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp);
- if (ret)
- goto out_release;
- ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp);
- if (ret)
- goto out_release;
-
- ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
- &folio->page, true, flags);
- if (ret)
- goto out_delete_from_cache;
+ ret = shmem_mfill_complete(dst_vma, dst_addr, dst_pmd, pgoff, folio,
+ inode, gfp);
+ return ret;
- shmem_recalc_inode(inode, 1, 0);
- folio_unlock(folio);
- return 0;
-out_delete_from_cache:
- filemap_remove_folio(folio);
-out_release:
- folio_unlock(folio);
- folio_put(folio);
out_unacct_blocks:
shmem_inode_unacct_blocks(inode, 1);
return ret;