return hmm_vma_fault(addr, end, required_fault, walk);
}
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+static int hmm_vma_handle_absent_pmd(struct mm_walk *walk, unsigned long start,
+ unsigned long end, unsigned long *hmm_pfns,
+ pmd_t pmd)
+{
+ struct hmm_vma_walk *hmm_vma_walk = walk->private;
+ struct hmm_range *range = hmm_vma_walk->range;
+ unsigned long npages = (end - start) >> PAGE_SHIFT;
+ unsigned long addr = start;
+ swp_entry_t entry = pmd_to_swp_entry(pmd);
+ unsigned int required_fault;
+
+ if (is_device_private_entry(entry) &&
+ pfn_swap_entry_folio(entry)->pgmap->owner ==
+ range->dev_private_owner) {
+ unsigned long cpu_flags = HMM_PFN_VALID |
+ hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
+ unsigned long pfn = swp_offset_pfn(entry);
+ unsigned long i;
+
+ if (is_writable_device_private_entry(entry))
+ cpu_flags |= HMM_PFN_WRITE;
+
+ /*
+ * Fully populate the PFN list though subsequent PFNs could be
+ * inferred, because drivers which are not yet aware of large
+ * folios probably do not support sparsely populated PFN lists.
+ */
+ for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
+ hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS;
+ hmm_pfns[i] |= pfn | cpu_flags;
+ }
+
+ return 0;
+ }
+
+ required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns,
+ npages, 0);
+ if (required_fault) {
+ if (is_device_private_entry(entry))
+ return hmm_vma_fault(addr, end, required_fault, walk);
+ else
+ return -EFAULT;
+ }
+
+ return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
+}
+#else
+static int hmm_vma_handle_absent_pmd(struct mm_walk *walk, unsigned long start,
+ unsigned long end, unsigned long *hmm_pfns,
+ pmd_t pmd)
+{
+ struct hmm_vma_walk *hmm_vma_walk = walk->private;
+ struct hmm_range *range = hmm_vma_walk->range;
+ unsigned long npages = (end - start) >> PAGE_SHIFT;
+
+ if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
+ return -EFAULT;
+ return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
+}
+#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
+
static int hmm_vma_walk_pmd(pmd_t *pmdp,
unsigned long start,
unsigned long end,
return hmm_pfns_fill(start, end, range, 0);
}
- if (!pmd_present(pmd)) {
- if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
- return -EFAULT;
- return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
- }
+ if (!pmd_present(pmd))
+ return hmm_vma_handle_absent_pmd(walk, start, end, hmm_pfns,
+ pmd);
if (pmd_trans_huge(pmd)) {
/*