dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096,
PCI_DMA_BIDIRECTIONAL);
- radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
+ xa_erase(&spt->vgpu->gtt.spts, spt->shadow_page.mfn);
if (spt->guest_page.gfn) {
if (spt->guest_page.oos_page)
static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
{
- struct intel_vgpu_ppgtt_spt *spt, *spn;
- struct radix_tree_iter iter;
- LIST_HEAD(all_spt);
- void __rcu **slot;
-
- rcu_read_lock();
- radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
- spt = radix_tree_deref_slot(slot);
- list_move(&spt->post_shadow_list, &all_spt);
- }
- rcu_read_unlock();
+ struct intel_vgpu_ppgtt_spt *spt;
+ unsigned long index;
- list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list)
+ xa_for_each(&vgpu->gtt.spts, index, spt)
ppgtt_free_spt(spt);
}
static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
struct intel_vgpu *vgpu, unsigned long mfn)
{
- return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn);
+ return xa_load(&vgpu->gtt.spts, mfn);
}
static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
- ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
- if (ret)
+ if (xa_store(&vgpu->gtt.spts, spt->shadow_page.mfn, spt, GFP_KERNEL))
goto err_unmap_dma;
return spt;
err_unmap_dma:
+ ret = -ENOMEM;
dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
err_free_spt:
free_spt(spt);
{
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
- INIT_RADIX_TREE(>t->spt_tree, GFP_KERNEL);
+ xa_init(>t->spts);
INIT_LIST_HEAD(>t->ppgtt_mm_list_head);
INIT_LIST_HEAD(>t->oos_page_list_head);
if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
gvt_err("vgpu ppgtt mm is not fully destroyed\n");
- if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
+ if (GEM_WARN_ON(!xa_empty(&vgpu->gtt.spts))) {
gvt_err("Why we still has spt not freed?\n");
ppgtt_free_all_spt(vgpu);
}