static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
{
- struct radix_tree_iter iter;
- void __rcu **slot;
-
- rcu_read_lock();
- radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
- radix_tree_delete(&obj->mm.get_page.radix, iter.index);
- rcu_read_unlock();
+ xa_destroy(&obj->mm.get_page.xa);
}
struct sg_table *
GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- /* As we iterate forward through the sg, we record each entry in a
- * radixtree for quick repeated (backwards) lookups. If we have seen
+ /* As we iterate forward through the sg, we record each entry in an
+ * xarray for quick repeated (backwards) lookups. If we have seen
* this index previously, we will have an entry for it.
*
* Initial lookup is O(N), but this is amortized to O(1) for
/* We prefer to reuse the last sg so that repeated lookup of this
* (or the subsequent) sg are fast - comparing against the last
- * sg is faster than going through the radixtree.
+ * sg is faster than going through the xarray.
*/
sg = iter->sg_pos;
while (idx + count <= n) {
void *entry;
unsigned long i;
- int ret;
+ const gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
/* If we cannot allocate and insert this entry, or the
* individual pages from this range, cancel updating the
* sg_idx so that on this lookup we are forced to linearly
* scan onwards, but on future lookups we will try the
- * insertion again (in which case we need to be careful of
- * the error return reporting that we have already inserted
- * this index).
+ * insertion again.
*/
- ret = radix_tree_insert(&iter->radix, idx, sg);
- if (ret && ret != -EEXIST)
+ if (xa_store(&iter->xa, idx, sg, gfp) == XA_ERROR(-ENOMEM))
goto scan;
entry = xa_mk_value(idx);
for (i = 1; i < count; i++) {
- ret = radix_tree_insert(&iter->radix, idx + i, entry);
- if (ret && ret != -EEXIST)
+ if (xa_store(&iter->xa, idx, sg, gfp) ==
+ XA_ERROR(-ENOMEM))
goto scan;
}
if (unlikely(n < idx)) /* insertion completed by another thread */
goto lookup;
- /* In case we failed to insert the entry into the radixtree, we need
+ /* In case we failed to insert the entry into the xarray, we need
* to look beyond the current sg.
*/
while (idx + count <= n) {
lookup:
rcu_read_lock();
- sg = radix_tree_lookup(&iter->radix, n);
+ sg = xa_load(&iter->xa, n);
GEM_BUG_ON(!sg);
/* If this index is in the middle of multi-page sg entry,
- * the radix tree will contain a value entry that points
+ * the xarray will contain a value entry that points
* to the start of that range. We will return the pointer to
* the base page and the offset of this page within the
* sg entry's range.
if (unlikely(xa_is_value(sg))) {
unsigned long base = xa_to_value(sg);
- sg = radix_tree_lookup(&iter->radix, base);
+ sg = xa_load(&iter->xa, base);
GEM_BUG_ON(!sg);
*offset = n - base;