start >> PAGE_SHIFT, end >> PAGE_SHIFT,
0, node->id, trigger);
- svm_range_dma_unmap(adev->dev, scratch, 0, npages);
+ svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
out_free:
kvfree(buf);
if (cpages) {
prange->actual_loc = best_loc;
- svm_range_free_dma_mappings(prange, true);
+ svm_range_dma_unmap(prange);
} else {
svm_range_vram_node_free(prange);
}
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
node->id, 0, trigger);
- svm_range_dma_unmap(adev->dev, scratch, 0, npages);
+ svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
out_free:
kvfree(buf);
return r;
}
-void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
+void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
unsigned long offset, unsigned long npages)
{
enum dma_data_direction dir = DMA_BIDIRECTIONAL;
}
}
-void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma)
+void svm_range_dma_unmap(struct svm_range *prange)
{
struct kfd_process_device *pdd;
dma_addr_t *dma_addr;
continue;
}
dev = &pdd->dev->adev->pdev->dev;
- if (unmap_dma)
- svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
- kvfree(dma_addr);
- prange->dma_addr[gpuidx] = NULL;
+
+ svm_range_dma_unmap_dev(dev, dma_addr, 0, prange->npages);
}
}
{
uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
+ uint32_t gpuidx;
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
prange->start, prange->last);
svm_range_vram_node_free(prange);
- svm_range_free_dma_mappings(prange, do_unmap);
+ if (do_unmap)
+ svm_range_dma_unmap(prange);
if (do_unmap && !p->xnack_enabled) {
pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
}
+
+ /* free dma_addr array for each gpu */
+ for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
+ if (prange->dma_addr[gpuidx]) {
+ kvfree(prange->dma_addr[gpuidx]);
+ prange->dma_addr[gpuidx] = NULL;
+ }
+ }
+
mutex_destroy(&prange->lock);
mutex_destroy(&prange->migrate_mutex);
kfree(prange);
struct svm_range *prange, struct mm_struct *mm,
enum svm_work_list_ops op);
void schedule_deferred_list_work(struct svm_range_list *svms);
-void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
+void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
unsigned long offset, unsigned long npages);
-void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma);
+void svm_range_dma_unmap(struct svm_range *prange);
int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
uint64_t *svm_priv_data_size);
int kfd_criu_checkpoint_svm(struct kfd_process *p,