spans_range = true;
if (!overwrite) {
- if (spans_range || ma_get_rcu_slot(mas->node, slot)) {
- mas_set_err(mas, -EBUSY);
- return 0;
- }
+ void *entry;
+
+ if (spans_range)
+ goto busy;
+
+ entry = ma_get_rcu_slot(mas->node, slot);
+ if (entry && !xa_is_retry(entry))
+ goto busy;
}
if (last_piv == mas->max)
ma_update_gap(mas);
return ret;
+
+busy:
+ mas_set_err(mas, -EBUSY);
+ return 0;
}
static inline int _ma_insert(struct ma_state *mas, void *entry,
unsigned char slot)
*
*
*/
-static inline int mas_coalesce(struct ma_state *mas, unsigned char s_slot)
+static inline int mas_coalesce(struct ma_state *mas)
{
int ret = 0;
unsigned char slot_cnt;
unsigned long pivot, last = 0;
+ struct maple_enode *m_en = mas->node;
+ unsigned char s_slot = ma_get_slot(mas);
+ unsigned char slot;
slot_cnt = mt_slot_count(mas->node);
- for (; s_slot < slot_cnt; s_slot++) {
- pivot = ma_get_safe_pivot(mas, s_slot);
- if (s_slot && !pivot)
+ for (slot = 0; slot < slot_cnt; slot++) {
+ pivot = ma_get_safe_pivot(mas, slot);
+ if (slot && !pivot)
break;
- if (s_slot && last == pivot) {
+ if (slot && last == pivot) {
mas_partial_copy(mas, mt_slot_count(mas->node));
- if (mas_is_err(mas))
+ if (mas_is_err(mas)) {
+ mas->node = m_en;
goto mas_error;
+ }
ret = 1;
goto done;
}
done:
+ // Restore slot.
if (ret)
mt_replace(mas);
if (mt_is_alloc(mas->tree))
ma_update_gap(mas);
+ ma_set_slot(mas, s_slot);
return ret;
}
leaf = _mas_walk(mas);
slot = ma_get_slot(mas);
// Clean this node
- if (mas_coalesce(mas, 0)) {
+ if (mas_coalesce(mas)) {
if (mas_is_err(mas))
return 0;
goto retry;
}
if (leaf == true && slot != MAPLE_NODE_SLOTS) {
- if (!overwrite && ma_get_rcu_slot(mas->node, slot))
- goto exists;
+ if (!overwrite) {
+ void *entry = ma_get_rcu_slot(mas->node, slot);
+
+ if (entry && !xa_is_retry(entry))
+ goto exists;
+ }
}
/* Do the add */
}
/* The error on allocation failure can be ignored */
- mas_coalesce(mas, ++slot);
+ mas_coalesce(mas);
return ret;
}
rcu_read_lock();
entry = mas_walk(&mas);
rcu_read_unlock();
- if (xa_is_zero(entry))
- entry = NULL;
+
+ if (xa_is_zero(entry) || xa_is_retry(entry))
+ return NULL;
return entry;
}
EXPORT_SYMBOL(mtree_load);
}
+static noinline void check_erase_testset(struct maple_tree *mt)
+{
+ unsigned long set[] = {5015, 5014, 5017, 25, 1000,
+ 1001, 1002, 1003, 1005, 0,
+ 5003, 5002};
+ void *ptr = &set;
+ void *root_node;
+
+ check_insert(mt, set[0], ptr); // 5015
+ check_insert(mt, set[1], mt); // 5014
+ check_insert(mt, set[2], ptr); // 5017
+ check_insert(mt, set[3], mt); // 25
+ check_load(mt, set[0], ptr);
+ check_load(mt, set[1], mt);
+ check_load(mt, set[2], ptr);
+ check_load(mt, set[3], mt);
+ mt_set_non_kernel(1);
+ check_erase(mt, set[1]);
+ check_load(mt, set[0], ptr);
+ check_load(mt, set[1], NULL);
+ check_load(mt, set[2], ptr);
+ check_load(mt, set[3], mt);
+
+ check_insert(mt, set[1], mt);
+ // Check erase and load without an allocation.
+ check_erase(mt, set[1]);
+ check_load(mt, set[0], ptr);
+ check_load(mt, set[1], NULL);
+ check_load(mt, set[2], ptr);
+ check_load(mt, set[3], mt);
+
+ // Set the newly erased node. This will produce a different allocated
+ // node to avoid busy slots.
+ root_node = mt->ma_root;
+ check_insert(mt, set[1], mt);
+
+ // The root node should be replaced to avoid writing a busy slot.
+ MT_BUG_ON(mt, root_node == mt->ma_root);
+
+ check_load(mt, set[0], ptr);
+ check_load(mt, 5016, NULL);
+ check_load(mt, set[1], mt);
+ check_load(mt, 5013, NULL);
+ check_load(mt, set[2], ptr);
+ check_load(mt, 5018, NULL);
+ check_load(mt, set[3], mt);
+
+ check_erase(mt, set[2]); // erase 5017 to check append
+ check_load(mt, set[0], ptr);
+ check_load(mt, 5016, NULL);
+ check_load(mt, set[1], mt);
+ check_load(mt, 5013, NULL);
+ check_load(mt, set[2], NULL);
+ check_load(mt, 5018, NULL);
+ check_load(mt, set[3], mt);
+
+ root_node = mt->ma_root;
+ check_insert(mt, set[2], ptr);
+ // The root node should be replaced to avoid writing a busy slot.
+ MT_BUG_ON(mt, root_node == mt->ma_root);
+
+ mtree_destroy(mt);
+}
+
static noinline void check_alloc_rev_range(struct maple_tree *mt)
{
/* Generated by:
/* Clear out the tree */
mtree_destroy(&tree);
+ mtree_init(&tree, 0);
+ check_erase_testset(&tree);
mtree_init(&tree, 0);
/*
- * set[] = {15, 14, 17, 25, 1000,
+ * set[] = {5015, 5014, 5017, 25, 1000,
* 1001, 1002, 1003, 1005, 0,
- * 3};
+ * 5003, 5002};
*/
- check_insert(&tree, set[0], ptr); // 15
- check_insert(&tree, set[1], &tree); // 14
- check_insert(&tree, set[2], ptr); // 17
+ check_insert(&tree, set[0], ptr); // 5015
+ check_insert(&tree, set[1], &tree); // 5014
+ check_insert(&tree, set[2], ptr); // 5017
check_insert(&tree, set[3], &tree); // 25
check_load(&tree, set[0], ptr);
check_load(&tree, set[1], &tree);
check_load(&tree, set[2], ptr);
check_load(&tree, set[3], &tree);
- mt_set_non_kernel(1);
- check_erase(&tree, set[1]);
- check_load(&tree, set[0], ptr);
- check_load(&tree, set[1], NULL);
- check_load(&tree, set[2], ptr);
- check_load(&tree, set[3], &tree);
- check_insert(&tree, set[1], &tree);
check_insert(&tree, set[4], ptr); // 1000 < Should split.
check_load(&tree, set[0], ptr);
check_load(&tree, set[1], &tree);