struct xarray pt;
struct mmu_interval_notifier notifier;
struct mutex mutex;
+ __u64 flags;
};
/*
page_to_pfn(spage)))
goto next;
- dpage = dmirror_devmem_alloc_page(dmirror, is_large);
+ if (dmirror->flags & HMM_DMIRROR_FLAG_FAIL_ALLOC) {
+ dmirror->flags &= ~HMM_DMIRROR_FLAG_FAIL_ALLOC;
+ dpage = NULL;
+ } else
+ dpage = dmirror_devmem_alloc_page(dmirror, is_large);
+
if (!dpage) {
struct folio *folio;
unsigned long i;
spage = BACKING_PAGE(spage);
order = folio_order(page_folio(spage));
-
if (order)
+ *dst = MIGRATE_PFN_COMPOUND;
+ if (*src & MIGRATE_PFN_WRITE)
+ *dst |= MIGRATE_PFN_WRITE;
+
+ if (dmirror->flags & HMM_DMIRROR_FLAG_FAIL_ALLOC) {
+ dmirror->flags &= ~HMM_DMIRROR_FLAG_FAIL_ALLOC;
+ *dst &= ~MIGRATE_PFN_COMPOUND;
+ dpage = NULL;
+ } else if (order) {
dpage = folio_page(vma_alloc_folio(GFP_HIGHUSER_MOVABLE,
order, args->vma, addr), 0);
- else
- dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr);
-
- /* Try with smaller pages if large allocation fails */
- if (!dpage && order) {
+ } else {
dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr);
- if (!dpage)
- return VM_FAULT_OOM;
- order = 0;
}
+ if (!dpage && !order)
+ return VM_FAULT_OOM;
+
pr_debug("migrating from sys to dev pfn src: 0x%lx pfn dst: 0x%lx\n",
page_to_pfn(spage), page_to_pfn(dpage));
- lock_page(dpage);
- xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
- copy_highpage(dpage, spage);
- *dst = migrate_pfn(page_to_pfn(dpage));
- if (*src & MIGRATE_PFN_WRITE)
- *dst |= MIGRATE_PFN_WRITE;
- if (order)
- *dst |= MIGRATE_PFN_COMPOUND;
+
+ if (dpage) {
+ lock_page(dpage);
+ *dst |= migrate_pfn(page_to_pfn(dpage));
+ }
for (i = 0; i < (1 << order); i++) {
struct page *src_page;
struct page *dst_page;
+ /* Try with smaller pages if large allocation fails */
+ if (!dpage && order) {
+ dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr);
+ lock_page(dpage);
+ dst[i] = migrate_pfn(page_to_pfn(dpage));
+ dst_page = pfn_to_page(page_to_pfn(dpage));
+ dpage = NULL; /* For the next iteration */
+ } else {
+ dst_page = pfn_to_page(page_to_pfn(dpage) + i);
+ }
+
src_page = pfn_to_page(page_to_pfn(spage) + i);
- dst_page = pfn_to_page(page_to_pfn(dpage) + i);
xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
+ addr += PAGE_SIZE;
copy_highpage(dst_page, src_page);
}
next:
- addr += PAGE_SIZE << order;
src += 1 << order;
dst += 1 << order;
}
dmirror_device_remove_chunks(dmirror->mdevice);
ret = 0;
break;
+ case HMM_DMIRROR_FLAGS:
+ dmirror->flags = cmd.npages;
+ ret = 0;
+ break;
default:
return -EINVAL;
#define HMM_DMIRROR_EXCLUSIVE _IOWR('H', 0x05, struct hmm_dmirror_cmd)
#define HMM_DMIRROR_CHECK_EXCLUSIVE _IOWR('H', 0x06, struct hmm_dmirror_cmd)
#define HMM_DMIRROR_RELEASE _IOWR('H', 0x07, struct hmm_dmirror_cmd)
+#define HMM_DMIRROR_FLAGS _IOWR('H', 0x08, struct hmm_dmirror_cmd)
+
+#define HMM_DMIRROR_FLAG_FAIL_ALLOC (1ULL << 0)
/*
* Values returned in hmm_dmirror_cmd.ptr for HMM_DMIRROR_SNAPSHOT.