return VM_FAULT_SIGBUS;
}
- pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
+ pfn = phys_to_pfn_t(phys, 0);
dax_set_mapping(vmf, pfn, fault_size);
- return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
+ return vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn),
+ vmf->flags & FAULT_FLAG_WRITE);
}
static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
+ pfn = phys_to_pfn_t(phys, 0);
dax_set_mapping(vmf, pfn, fault_size);
- return vmf_insert_pfn_pmd(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE);
+ return vmf_insert_folio_pmd(vmf, page_folio(pfn_t_to_page(pfn)),
+ vmf->flags & FAULT_FLAG_WRITE);
}
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
return VM_FAULT_SIGBUS;
}
- pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
+ pfn = phys_to_pfn_t(phys, 0);
dax_set_mapping(vmf, pfn, fault_size);
- return vmf_insert_pfn_pud(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE);
+ return vmf_insert_folio_pud(vmf, page_folio(pfn_t_to_page(pfn)),
+ vmf->flags & FAULT_FLAG_WRITE);
}
#else
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
{
struct dev_pagemap *pgmap = folio->pgmap;
- if (WARN_ON_ONCE(!pgmap->ops))
- return;
-
- if (WARN_ON_ONCE(pgmap->type != MEMORY_DEVICE_FS_DAX &&
- !pgmap->ops->page_free))
+ if (WARN_ON_ONCE((!pgmap->ops &&
+ pgmap->type != MEMORY_DEVICE_GENERIC) ||
+ (pgmap->ops && !pgmap->ops->page_free &&
+ pgmap->type != MEMORY_DEVICE_FS_DAX)))
return;
mem_cgroup_uncharge(folio);
* zero which indicating the page has been removed from the file
* system mapping.
*/
- if (pgmap->type != MEMORY_DEVICE_FS_DAX)
+ if (pgmap->type != MEMORY_DEVICE_FS_DAX &&
+ pgmap->type != MEMORY_DEVICE_GENERIC)
folio->mapping = NULL;
switch (pgmap->type) {
* Reset the refcount to 1 to prepare for handing out the page
* again.
*/
- pgmap->ops->page_free(folio_page(folio, 0));
folio_set_count(folio, 1);
break;