if (!range)
                return ERR_PTR(-ENOMEM);
 
+       INIT_LIST_HEAD(&range->garbage_collector_link);
        xe_vm_get(gpusvm_to_vm(gpusvm));
 
        return &range->base;
        return container_of(r, struct xe_svm_range, base);
 }
 
+static void
+xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
+                                  const struct mmu_notifier_range *mmu_range)
+{
+       struct xe_device *xe = vm->xe;
+
+       drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
+
+       spin_lock(&vm->svm.garbage_collector.lock);
+       if (list_empty(&range->garbage_collector_link))
+               list_add_tail(&range->garbage_collector_link,
+                             &vm->svm.garbage_collector.range_list);
+       spin_unlock(&vm->svm.garbage_collector.lock);
+
+       queue_work(xe_device_get_root_tile(xe)->primary_gt->usm.pf_wq,
+                  &vm->svm.garbage_collector.work);
+}
+
 static u8
 xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
                                  const struct mmu_notifier_range *mmu_range,
        xe_svm_assert_in_notifier(vm);
 
        drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
-       /* TODO: Add range to garbage collector if VM is not closed */
+       if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP)
+               xe_svm_garbage_collector_add_range(vm, to_xe_range(r),
+                                                  mmu_range);
 }
 
 static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
                xe_svm_range_notifier_event_end(vm, r, mmu_range);
 }
 
+static int __xe_svm_garbage_collector(struct xe_vm *vm,
+                                     struct xe_svm_range *range)
+{
+       /* TODO: Do unbind */
+
+       drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
+
+       return 0;
+}
+
+static int xe_svm_garbage_collector(struct xe_vm *vm)
+{
+       struct xe_svm_range *range;
+       int err;
+
+       lockdep_assert_held_write(&vm->lock);
+
+       if (xe_vm_is_closed_or_banned(vm))
+               return -ENOENT;
+
+       spin_lock(&vm->svm.garbage_collector.lock);
+       for (;;) {
+               range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
+                                                typeof(*range),
+                                                garbage_collector_link);
+               if (!range)
+                       break;
+
+               list_del(&range->garbage_collector_link);
+               spin_unlock(&vm->svm.garbage_collector.lock);
+
+               err = __xe_svm_garbage_collector(vm, range);
+               if (err) {
+                       drm_warn(&vm->xe->drm,
+                                "Garbage collection failed: %pe\n",
+                                ERR_PTR(err));
+                       xe_vm_kill(vm, true);
+                       return err;
+               }
+
+               spin_lock(&vm->svm.garbage_collector.lock);
+       }
+       spin_unlock(&vm->svm.garbage_collector.lock);
+
+       return 0;
+}
+
+static void xe_svm_garbage_collector_work_func(struct work_struct *w)
+{
+       struct xe_vm *vm = container_of(w, struct xe_vm,
+                                       svm.garbage_collector.work);
+
+       down_write(&vm->lock);
+       xe_svm_garbage_collector(vm);
+       up_write(&vm->lock);
+}
+
 static const struct drm_gpusvm_ops gpusvm_ops = {
        .range_alloc = xe_svm_range_alloc,
        .range_free = xe_svm_range_free,
 {
        int err;
 
+       spin_lock_init(&vm->svm.garbage_collector.lock);
+       INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
+       INIT_WORK(&vm->svm.garbage_collector.work,
+                 xe_svm_garbage_collector_work_func);
+
        err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
                              current->mm, NULL, 0, vm->size,
                              SZ_512M, &gpusvm_ops, fault_chunk_sizes,
 void xe_svm_close(struct xe_vm *vm)
 {
        xe_assert(vm->xe, xe_vm_is_closed(vm));
+       flush_work(&vm->svm.garbage_collector.work);
 }
 
 /**
        xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
 
 retry:
-       /* TODO: Run garbage collector */
+       /* Always process UNMAPs first so view SVM ranges is current */
+       err = xe_svm_garbage_collector(vm);
+       if (err)
+               return err;
 
        r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, fault_addr,
                                            xe_vma_start(vma), xe_vma_end(vma),