mmu_interval_notifier_remove(&prange->notifier);
 }
 
+static bool
+svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
+{
+       return dma_addr && !dma_mapping_error(dev, dma_addr) &&
+              !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
+}
+
 static int
 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
                      unsigned long offset, unsigned long npages,
 
        addr += offset;
        for (i = 0; i < npages; i++) {
-               if (WARN_ONCE(addr[i] && !dma_mapping_error(dev, addr[i]),
-                             "leaking dma mapping\n"))
+               if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
                        dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
 
                page = hmm_pfn_to_page(hmm_pfns[i]);
                return;
 
        for (i = offset; i < offset + npages; i++) {
-               if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
+               if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
                        continue;
                pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
                dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
        unsigned long last_start;
        int last_domain;
        int r = 0;
-       int64_t i;
+       int64_t i, j;
 
        last_start = prange->start + offset;
 
                                                NULL, dma_addr,
                                                &vm->last_update,
                                                &table_freed);
+
+               for (j = last_start - prange->start; j <= i; j++)
+                       dma_addr[j] |= last_domain;
+
                if (r) {
                        pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
                        goto out;