put_page(page);
}
-static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
-{
- unsigned long upages = 0;
- unsigned long i;
-
- for (i = 0; i < migrate->npages; i++) {
- if (migrate->src[i] & MIGRATE_PFN_VALID &&
- !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
- upages++;
- }
- return upages;
-}
-
-static int
+static long
svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
dma_addr_t *scratch, uint64_t ttm_res_offset)
struct amdgpu_device *adev = node->adev;
struct device *dev = adev->dev;
struct amdgpu_res_cursor cursor;
- uint64_t mpages = 0;
+ long mpages;
dma_addr_t *src;
uint64_t *dst;
uint64_t i, j;
amdgpu_res_first(prange->ttm_res, ttm_res_offset,
npages << PAGE_SHIFT, &cursor);
+ mpages = 0;
for (i = j = 0; (i < npages) && (mpages < migrate->cpages); i++) {
struct page *spage;
out_free_vram_pages:
if (r) {
pr_debug("failed %d to copy memory to vram\n", r);
- for (i = 0; i < npages && mpages; i++) {
+ while (i-- && mpages) {
if (!dst[i])
continue;
svm_migrate_put_vram_page(adev, dst[i]);
migrate->dst[i] = 0;
mpages--;
}
+ mpages = r;
}
#ifdef DEBUG_FORCE_MIXED_DOMAINS
}
#endif
- return r;
+ return mpages;
}
static long
struct dma_fence *mfence = NULL;
struct migrate_vma migrate = { 0 };
unsigned long cpages = 0;
- unsigned long mpages = 0;
+ long mpages = 0;
dma_addr_t *scratch;
void *buf;
int r = -ENOMEM;
else
pr_debug("0x%lx pages collected\n", cpages);
- r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
+ mpages = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
migrate_vma_pages(&migrate);
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
- mpages = cpages - svm_migrate_unsuccessful_pages(&migrate);
- pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
+ if (mpages >= 0)
+ pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
mpages, cpages, migrate.npages);
+ else
+ r = mpages;
svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
0, node->id, trigger, r);
out:
- if (!r && mpages) {
+ if (!r && mpages > 0) {
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
WRITE_ONCE(pdd->page_in, pdd->page_in + mpages);
-
- return mpages;
}
- return r;
+
+ return r ? r : mpages;
}
/**
}
}
-static int
+static long
svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
dma_addr_t *scratch, uint64_t npages)
uint64_t *src;
dma_addr_t *dst;
struct page *dpage;
+ long mpages;
uint64_t i = 0, j;
uint64_t addr;
int r = 0;
src = (uint64_t *)(scratch + npages);
dst = scratch;
+ mpages = 0;
for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
struct page *spage;
dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
+ mpages++;
j++;
}
out_oom:
if (r) {
pr_debug("failed %d copy to ram\n", r);
- while (i--) {
+ while (i-- && mpages) {
+ if (!migrate->dst[i])
+ continue;
svm_migrate_put_sys_page(dst[i]);
migrate->dst[i] = 0;
+ mpages--;
}
+ mpages = r;
}
- return r;
+ return mpages;
}
/**
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
uint64_t npages = (end - start) >> PAGE_SHIFT;
- unsigned long upages = npages;
unsigned long cpages = 0;
- unsigned long mpages = 0;
+ long mpages = 0;
struct amdgpu_device *adev = node->adev;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
else
pr_debug("0x%lx pages collected\n", cpages);
- r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
+ mpages = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
scratch, npages);
migrate_vma_pages(&migrate);
- upages = svm_migrate_unsuccessful_pages(&migrate);
- pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
- upages, cpages, migrate.npages);
+ if (mpages >= 0)
+ pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
+ mpages, cpages, migrate.npages);
+ else
+ r = mpages;
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
node->id, 0, trigger, r);
out:
- if (!r && cpages) {
- mpages = cpages - upages;
+ if (!r && mpages > 0) {
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
WRITE_ONCE(pdd->page_out, pdd->page_out + mpages);
}
if (r >= 0) {
+ WARN_ONCE(prange->vram_pages < mpages,
+ "Recorded vram pages(0x%llx) should not be less than migration pages(0x%lx).",
+ prange->vram_pages, mpages);
prange->vram_pages -= mpages;
/* prange does not have vram page set its actual_loc to system