INIT_LIST_HEAD(&gmap->crst_list);
INIT_LIST_HEAD(&gmap->children);
INIT_LIST_HEAD(&gmap->pt_list);
- INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
- INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
- INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
+ xa_init(&gmap->guest_to_host);
+ xa_init(&gmap->host_to_guest);
+ xa_init(&gmap->host_to_rmap);
spin_lock_init(&gmap->guest_table_lock);
spin_lock_init(&gmap->shadow_lock);
atomic_set(&gmap->ref_count, 1);
__tlb_flush_global();
}
-static void gmap_radix_tree_free(struct radix_tree_root *root)
-{
- struct radix_tree_iter iter;
- unsigned long indices[16];
- unsigned long index;
- void __rcu **slot;
- int i, nr;
-
- /* A radix tree is freed by deleting all of its entries */
- index = 0;
- do {
- nr = 0;
- radix_tree_for_each_slot(slot, root, &iter, index) {
- indices[nr] = iter.index;
- if (++nr == 16)
- break;
- }
- for (i = 0; i < nr; i++) {
- index = indices[i];
- radix_tree_delete(root, index);
- }
- } while (nr > 0);
-}
-
-static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
+static void gmap_rmap_free(struct xarray *xa)
{
struct gmap_rmap *rmap, *rnext, *head;
- struct radix_tree_iter iter;
- unsigned long indices[16];
unsigned long index;
- void __rcu **slot;
- int i, nr;
-
- /* A radix tree is freed by deleting all of its entries */
- index = 0;
- do {
- nr = 0;
- radix_tree_for_each_slot(slot, root, &iter, index) {
- indices[nr] = iter.index;
- if (++nr == 16)
- break;
- }
- for (i = 0; i < nr; i++) {
- index = indices[i];
- head = radix_tree_delete(root, index);
- gmap_for_each_rmap_safe(rmap, rnext, head)
- kfree(rmap);
- }
- } while (nr > 0);
+
+ xa_for_each(xa, index, head) {
+ gmap_for_each_rmap_safe(rmap, rnext, head)
+ kfree(rmap);
+ }
+ xa_destroy(xa);
}
/**
/* Free all segment & region tables. */
list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
__free_pages(page, CRST_ALLOC_ORDER);
- gmap_radix_tree_free(&gmap->guest_to_host);
- gmap_radix_tree_free(&gmap->host_to_guest);
+ xa_destroy(&gmap->guest_to_host);
+ xa_destroy(&gmap->host_to_guest);
/* Free additional data for a shadow gmap */
if (gmap_is_shadow(gmap)) {
/* Free all page tables. */
list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
page_table_free_pgste(page);
- gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
+ gmap_rmap_free(&gmap->host_to_rmap);
/* Release reference to the parent */
gmap_put(gmap->parent);
}
BUG_ON(gmap_is_shadow(gmap));
spin_lock(&gmap->guest_table_lock);
- entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
+ entry = xa_erase(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
if (entry) {
flush = (*entry != _SEGMENT_ENTRY_EMPTY);
*entry = _SEGMENT_ENTRY_EMPTY;
{
unsigned long vmaddr;
- vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
+ vmaddr = (unsigned long) xa_erase(&gmap->guest_to_host,
gaddr >> PMD_SHIFT);
return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
}
/* Remove old translation */
flush |= __gmap_unmap_by_gaddr(gmap, to + off);
/* Store new translation */
- if (radix_tree_insert(&gmap->guest_to_host,
+ if (xa_is_err(xa_store(&gmap->guest_to_host,
(to + off) >> PMD_SHIFT,
- (void *) from + off))
+ (void *) from + off, GFP_KERNEL)))
break;
}
up_write(&gmap->mm->mmap_sem);
unsigned long vmaddr;
vmaddr = (unsigned long)
- radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
+ xa_load(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
/* Note: guest_to_host is empty for a shadow gmap */
return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
}
/* Are we allowed to use huge pages? */
if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
return -EFAULT;
- /* Link gmap segment table entry location to page table. */
- rc = radix_tree_preload(GFP_KERNEL);
- if (rc)
+ /* Reserve an entry for this address before we acquire locks */
+ rc = xa_insert(&gmap->host_to_guest, vmaddr >> PMD_SHIFT, NULL,
+ GFP_KERNEL);
+ if (rc == -ENOMEM)
return rc;
+ /* Link gmap segment table entry location to page table. */
ptl = pmd_lock(mm, pmd);
spin_lock(&gmap->guest_table_lock);
if (*table == _SEGMENT_ENTRY_EMPTY) {
- rc = radix_tree_insert(&gmap->host_to_guest,
- vmaddr >> PMD_SHIFT, table);
+ rc = xa_err(xa_store(&gmap->host_to_guest, vmaddr >> PMD_SHIFT,
+ table, GFP_ATOMIC));
if (!rc) {
if (pmd_large(*pmd)) {
*table = (pmd_val(*pmd) &
}
spin_unlock(&gmap->guest_table_lock);
spin_unlock(ptl);
- radix_tree_preload_end();
return rc;
}
pte_t *ptep;
/* Find the vm address for the guest address */
- vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
+ vmaddr = (unsigned long) xa_load(&gmap->guest_to_host,
gaddr >> PMD_SHIFT);
if (vmaddr) {
vmaddr |= gaddr & ~PMD_MASK;
for (gaddr = from; gaddr < to;
gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
/* Find the vm address for the guest address */
- vmaddr = (unsigned long)
- radix_tree_lookup(&gmap->guest_to_host,
+ vmaddr = (unsigned long) xa_load(&gmap->guest_to_host,
gaddr >> PMD_SHIFT);
if (!vmaddr)
continue;
}
EXPORT_SYMBOL_GPL(gmap_read_table);
+/*
+ * If there is an entry for this address already, returns -EBUSY. If
+ * we run out of memory, returns -ENOMEM. If we inserted a reserved entry,
+ * returns 0
+ */
+static inline int gmap_reserve_rmap(struct gmap *sg, unsigned long vmaddr)
+{
+ return xa_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT, NULL,
+ GFP_KERNEL);
+}
+
+/*
+ * If there is a reserved entry for this address, free the entry (and
+ * potentially other memory in the XArray).
+ */
+static inline void gmap_release_rmap(struct gmap *sg, unsigned long vmaddr)
+{
+ xa_cmpxchg(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT, XA_ZERO_ENTRY,
+ NULL, GFP_KERNEL);
+}
+
/**
- * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
+ * gmap_insert_rmap - add a rmap to the host_to_rmap
* @sg: pointer to the shadow guest address space structure
* @vmaddr: vm address associated with the rmap
* @rmap: pointer to the rmap structure
*
- * Called with the sg->guest_table_lock
+ * Called with the sg->guest_table_lock and page table lock held
*/
static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
struct gmap_rmap *rmap)
{
- void __rcu **slot;
+ XA_STATE(xas, &sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
+ void *entry;
BUG_ON(!gmap_is_shadow(sg));
- slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
- if (slot) {
- rmap->next = radix_tree_deref_slot_protected(slot,
- &sg->guest_table_lock);
- radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
- } else {
- rmap->next = NULL;
- radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
- rmap);
- }
+
+ xas_lock(&xas);
+ entry = xas_load(&xas);
+ if (entry == XA_ZERO_ENTRY)
+ entry = NULL;
+ rmap->next = entry;
+ xas_store(&xas, rmap);
+ xas_unlock(&xas);
}
/**
if (!rmap)
return -ENOMEM;
rmap->raddr = raddr;
- rc = radix_tree_preload(GFP_KERNEL);
- if (rc) {
+ rc = gmap_reserve_rmap(sg, vmaddr);
+ if (rc == -ENOMEM) {
kfree(rmap);
return rc;
}
spin_unlock(&sg->guest_table_lock);
gmap_pte_op_end(ptl);
}
- radix_tree_preload_end();
if (rc) {
+ gmap_release_rmap(sg, vmaddr);
kfree(rmap);
rc = gmap_pte_op_fixup(parent, paddr, vmaddr, PROT_READ);
if (rc)
rc = vmaddr;
break;
}
- rc = radix_tree_preload(GFP_KERNEL);
- if (rc)
+ rc = gmap_reserve_rmap(sg, vmaddr);
+ if (rc == -ENOMEM)
break;
rc = -EAGAIN;
sptep = gmap_pte_op_walk(parent, paddr, &ptl);
if (!tptep) {
spin_unlock(&sg->guest_table_lock);
gmap_pte_op_end(ptl);
- radix_tree_preload_end();
+ gmap_release_rmap(sg, vmaddr);
break;
}
rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
}
gmap_pte_op_end(ptl);
spin_unlock(&sg->guest_table_lock);
+ if (rc < 0)
+ gmap_release_rmap(sg, vmaddr);
}
- radix_tree_preload_end();
if (!rc)
break;
rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
return;
}
/* Remove the page table tree from on specific entry */
- head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
+ head = xa_erase(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
gmap_for_each_rmap_safe(rmap, rnext, head) {
bits = rmap->raddr & _SHADOW_RMAP_MASK;
raddr = rmap->raddr ^ bits;
rcu_read_lock();
list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
spin_lock(&gmap->guest_table_lock);
- table = radix_tree_lookup(&gmap->host_to_guest,
- vmaddr >> PMD_SHIFT);
+ table = xa_load(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
if (table)
gaddr = __gmap_segment_gaddr(table) + offset;
spin_unlock(&gmap->guest_table_lock);
rcu_read_lock();
list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
spin_lock(&gmap->guest_table_lock);
- pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
+ pmdp = (pmd_t *)xa_erase(&gmap->host_to_guest,
vmaddr >> PMD_SHIFT);
if (pmdp) {
gaddr = __gmap_segment_gaddr((unsigned long *)pmdp);
rcu_read_lock();
list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
spin_lock(&gmap->guest_table_lock);
- entry = radix_tree_delete(&gmap->host_to_guest,
- vmaddr >> PMD_SHIFT);
+ entry = xa_erase(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
if (entry) {
pmdp = (pmd_t *)entry;
gaddr = __gmap_segment_gaddr(entry);
rcu_read_lock();
list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
spin_lock(&gmap->guest_table_lock);
- entry = radix_tree_delete(&gmap->host_to_guest,
- vmaddr >> PMD_SHIFT);
+ entry = xa_erase(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
if (entry) {
pmdp = (pmd_t *)entry;
gaddr = __gmap_segment_gaddr(entry);