extern bool shmem_charge(struct inode *inode, long pages);
extern void shmem_uncharge(struct inode *inode, long pages);
-#ifdef CONFIG_USERFAULTFD
-#ifdef CONFIG_SHMEM
-extern int shmem_mfill_atomic_pte(struct vm_area_struct *dst_vma,
- unsigned long dst_addr,
- unsigned long src_addr,
- uffd_flags_t flags,
- struct folio **foliop);
-#else /* !CONFIG_SHMEM */
-#define shmem_mfill_atomic_pte(dst_vma, dst_addr, \
- src_addr, flags, foliop) ({ BUG(); 0; })
-#endif /* CONFIG_SHMEM */
-#endif /* CONFIG_USERFAULTFD */
-
/*
* Used space is stored as unsigned 64-bit value in bytes but
* quota core supports only signed 64-bit values so use that
return PAGE_SIZE;
}
int mfill_atomic_pte_poison(struct uffd_info *info);
-int mfill_atomic_pte_continue(struct uffd_info *info);
int mfill_atomic_pte_copy(struct uffd_info *info);
int mfill_atomic_pte_zeropage(struct uffd_info *info);
return ret;
}
+/* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
+static int shmem_mfill_atomic_pte_continue(struct uffd_info *info)
+{
+ struct vm_area_struct *dst_vma = info->dst_vma;
+ struct inode *inode = file_inode(dst_vma->vm_file);
+ pgoff_t pgoff = linear_page_index(dst_vma, info->dst_addr);
+ pmd_t *dst_pmd;
+ struct folio *folio;
+ struct page *page;
+ int ret;
+
+ ret = uffd_get_dst_pmd(dst_vma, info->dst_addr, &dst_pmd);
+ if (ret)
+ return ret;
+
+ ret = shmem_get_folio(inode, pgoff, 0, &folio, SGP_NOALLOC);
+ /* Our caller expects us to return -EFAULT if we failed to find folio */
+ if (ret == -ENOENT)
+ ret = -EFAULT;
+ if (ret)
+ goto out;
+ if (!folio) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ page = folio_file_page(folio, pgoff);
+ if (PageHWPoison(page)) {
+ ret = -EIO;
+ goto out_release;
+ }
+
+ ret = mfill_atomic_install_pte(dst_pmd, dst_vma, info->dst_addr,
+ page, false, info->wp);
+ if (ret)
+ goto out_release;
+
+ folio_unlock(folio);
+ ret = 0;
+out:
+ return ret;
+out_release:
+ folio_unlock(folio);
+ folio_put(folio);
+ goto out;
+}
+
static int shmem_mfill_atomic_pte_copy(struct uffd_info *u_info)
{
struct vm_area_struct *dst_vma = u_info->dst_vma;
static const struct vm_uffd_ops shmem_uffd_ops = {
.copy = shmem_mfill_atomic_pte_copy,
.zeropage = shmem_mfill_atomic_pte_zeropage,
- .cont = mfill_atomic_pte_continue,
+ .cont = shmem_mfill_atomic_pte_continue,
.poison = mfill_atomic_pte_poison,
.is_dst_valid = shmem_is_dst_valid,
.increment = mfill_size,
#include <linux/userfaultfd_k.h>
#include <linux/mmu_notifier.h>
#include <linux/hugetlb.h>
-#include <linux/shmem_fs.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include "internal.h"
return ret;
}
-/* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
-int mfill_atomic_pte_continue(struct uffd_info *info)
-{
- struct vm_area_struct *dst_vma = info->dst_vma;
- struct inode *inode = file_inode(dst_vma->vm_file);
- pgoff_t pgoff = linear_page_index(dst_vma, info->dst_addr);
- pmd_t *dst_pmd;
- struct folio *folio;
- struct page *page;
- int ret;
-
- ret = uffd_get_dst_pmd(dst_vma, info->dst_addr, &dst_pmd);
- if (ret)
- return ret;
-
- ret = shmem_get_folio(inode, pgoff, 0, &folio, SGP_NOALLOC);
- /* Our caller expects us to return -EFAULT if we failed to find folio */
- if (ret == -ENOENT)
- ret = -EFAULT;
- if (ret)
- goto out;
- if (!folio) {
- ret = -EFAULT;
- goto out;
- }
-
- page = folio_file_page(folio, pgoff);
- if (PageHWPoison(page)) {
- ret = -EIO;
- goto out_release;
- }
-
- ret = mfill_atomic_install_pte(dst_pmd, dst_vma, info->dst_addr,
- page, false, info->wp);
- if (ret)
- goto out_release;
-
- folio_unlock(folio);
- ret = 0;
-out:
- return ret;
-out_release:
- folio_unlock(folio);
- folio_put(folio);
- goto out;
-}
-
/* Handles UFFDIO_POISON for all non-hugetlb VMAs. */
int mfill_atomic_pte_poison(struct uffd_info *info)
{