struct page *page;
int nr_vcpus;
- rcu_read_lock();
- page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
- rcu_read_unlock();
+ page = xa_load(&kvm->arch.vsie.addr_to_page, addr >> 9);
if (page) {
if (page_ref_inc_return(page) == 2)
return page_to_virt(page);
kvm->arch.vsie.next++;
kvm->arch.vsie.next %= nr_vcpus;
}
- radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
+ xa_erase(&kvm->arch.vsie.addr_to_page, page->index >> 9);
}
page->index = addr;
/* double use of the same address */
- if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
+ if (xa_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page,
+ GFP_KERNEL)) {
page_ref_dec(page);
mutex_unlock(&kvm->arch.vsie.mutex);
return NULL;
void kvm_s390_vsie_init(struct kvm *kvm)
{
mutex_init(&kvm->arch.vsie.mutex);
- INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL);
+ xa_init(&kvm->arch.vsie.addr_to_page);
}
/* Destroy the vsie data structures. To be called when a vm is destroyed. */
kvm->arch.vsie.pages[i] = NULL;
vsie_page = page_to_virt(page);
release_gmap_shadow(vsie_page);
- /* free the radix tree entry */
- radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
+ xa_erase(&kvm->arch.vsie.addr_to_page, page->index >> 9);
__free_page(page);
}
kvm->arch.vsie.page_count = 0;