* @coalesce the number of slots that would be removed if copied/coalesced.
*
*/
-static inline unsigned char ma_hard_data(unsigned long end,
+static inline int ma_hard_data(unsigned long end,
unsigned long coalesce)
{
return end - coalesce;
return true;
}
/* Private
- * ma_erase() - Find the range in which index resides and erase the entire
+ * mas_erase() - Find the range in which index resides and erase the entire
* range.
*
* Any previous pivots with no value will be set to the same pivot value.
- * Return: the number of concurrent slots that are NULL or XA_DELETED_ENTRY.
+ * Return: the entry that was erased
*/
-static inline int ma_erase(struct ma_state *mas)
+static inline void *mas_erase(struct ma_state *mas)
{
- enum maple_type type = mte_node_type(mas->node);
- unsigned char slot_cnt = mt_slots[type];
- unsigned char pivot_cnt = mt_pivots[type];
+ enum maple_type type;
+ unsigned char pivot_cnt;
unsigned long piv_val;
int slot, ret = 1;
+ void *entry = NULL;
+
+ _mas_walk(mas);
+ if (mas_is_ptr(mas)) {
+ entry = mas->tree->ma_root;
+ mas->tree->ma_root = NULL;
+ return entry;
+ }
slot = mas_get_slot(mas);
+ if (slot == MAPLE_NODE_SLOTS)
+ return NULL;
+
+ type = mte_node_type(mas->node);
+ pivot_cnt = mt_pivots[type];
+ entry = mte_get_rcu_slot(mas->node, slot);
mte_update_rcu_slot(mas->node, slot, XA_DELETED_ENTRY);
// dense nodes only need to set a single value.
if (!pivot_cnt)
- return ret;
+ goto done;
- if ((slot >= slot_cnt - 1))
- piv_val = mas->max;
- else
- piv_val = mte_get_pivot(mas->node, slot);
+ piv_val = _mas_get_safe_pivot(mas, slot, type);
while ((slot < pivot_cnt - 1)) {
unsigned long this_pivot = mte_get_pivot(mas->node, slot + 1);
break;
// There is data for this pivot.
- if (mte_get_rcu_slot(mas->node, slot + 1))
+ if (!mt_is_empty(mte_get_rcu_slot(mas->node, slot + 1)))
break;
// empty slot above the erase.
ret++;
}
+done:
mas_coalesce(mas);
- return ret;
+ return entry;
}
return ret;
}
-int mtree_erase(struct maple_tree *mt, unsigned long index)
+void *mtree_erase(struct maple_tree *mt, unsigned long index)
{
- int ret = -EINVAL;
- int slot;
+ void *entry = NULL;
MA_STATE(mas, mt, index, index);
mtree_lock(mt);
- _mas_walk(&mas);
- slot = mas_get_slot(&mas);
- if (slot != MAPLE_NODE_SLOTS)
- ret = ma_erase(&mas);
-
+ entry = mas_erase(&mas);
mtree_unlock(mt);
- return ret;
+
+ return entry;
}
EXPORT_SYMBOL(mtree_erase);
return mtree_load(mt, index);
}
-static int mtree_test_erase(struct maple_tree *mt, unsigned long index)
+static void *mtree_test_erase(struct maple_tree *mt, unsigned long index)
{
return mtree_erase(mt, index);
}
MT_BUG_ON(mt, ret != 0);
}
-static noinline void check_erase(struct maple_tree *mt, unsigned long index)
+static noinline void check_erase(struct maple_tree *mt, unsigned long index,
+ void *ptr)
{
- int ret = -EINVAL;
-
- ret = mtree_test_erase(mt, index);
- MT_BUG_ON(mt, ret < 0);
+ MT_BUG_ON(mt, mtree_test_erase(mt, index) != ptr);
}
static noinline void check_dup_insert(struct maple_tree *mt,
unsigned long index, void *ptr)
#define erase_ptr(i) entry[i%2]
#define erase_check_load(mt, i) check_load(mt, set[i], entry[i%2])
#define erase_check_insert(mt, i) check_insert(mt, set[i], entry[i%2])
+#define erase_check_erase(mt, i) check_erase(mt, set[i], entry[i%2])
static noinline void check_erase_testset(struct maple_tree *mt)
{
erase_check_load(mt, i);
mt_set_non_kernel(2);
- check_erase(mt, set[1]);
+ erase_check_erase(mt, 1);
erase_check_load(mt, 0);
check_load(mt, set[1], NULL);
for (int i = 2; i < 4; i++)
erase_check_load(mt, i);
- check_erase(mt, set[2]);
+ erase_check_erase(mt, 2);
erase_check_load(mt, 0);
check_load(mt, set[1], NULL);
check_load(mt, set[2], NULL);
erase_check_load(mt, i);
// Check erase and load without an allocation.
- check_erase(mt, set[1]);
+ erase_check_erase(mt, 1);
erase_check_load(mt, 0);
check_load(mt, set[1], NULL);
for (int i = 2; i < 4; i++)
root_node = mt->ma_root;
erase_check_insert(mt, 1);
- // The root node should be replaced to avoid writing a busy slot.
- MT_BUG_ON(mt, root_node == mt->ma_root);
-
erase_check_load(mt, 0);
check_load(mt, 5016, NULL);
erase_check_load(mt, 1);
check_load(mt, 5018, NULL);
erase_check_load(mt, 3);
- check_erase(mt, set[2]); // erase 5017 to check append
+ erase_check_erase(mt, 2); // erase 5017 to check append
erase_check_load(mt, 0);
check_load(mt, 5016, NULL);
erase_check_load(mt, 1);
erase_check_load(mt, 3);
root_node = mt->ma_root;
- mt_dump(mt);
erase_check_insert(mt, 2);
- // The root node should be replaced to avoid writing a busy slot.
- MT_BUG_ON(mt, root_node == mt->ma_root);
erase_check_load(mt, 0);
check_load(mt, 5016, NULL);
check_load(mt, 5018, NULL);
erase_check_load(mt, 3);
- check_erase(mt, set[2]); // erase 5017 to check append
- check_erase(mt, set[0]); // erase 5015 to check append
+ erase_check_erase(mt, 2); // erase 5017 to check append
+ erase_check_erase(mt, 0); // erase 5015 to check append
erase_check_insert(mt, 4); // 1000 < Should not split.
check_load(mt, set[0], NULL);
check_load(mt, 5016, NULL);
// Coalesce testing
erase_check_insert(mt, 0);
erase_check_insert(mt, 2);
- mt_dump(mt);
for (int i = 5; i < 25; i++) {
erase_check_insert(mt, i);
}
}
- check_erase(mt, set[14]); //6015
+ erase_check_erase(mt, 14); //6015
for (int i = 0; i < 25; i++) {
if (i == 14)
check_load(mt, set[i], NULL);
else
erase_check_load(mt, i);
}
- check_erase(mt, set[16]); //7002
+ erase_check_erase(mt, 16); //7002
for (int i = 0; i < 25; i++) {
if (i == 16 || i == 14)
check_load(mt, set[i], NULL);
}
mt_set_non_kernel(1);
- check_erase(mt, set[13]); //6012
+ erase_check_erase(mt, 13); //6012
for (int i = 0; i < 25; i++) {
if (i == 16 || i == 14 || i == 13)
check_load(mt, set[i], NULL);
erase_check_load(mt, i);
}
- check_erase(mt, set[15]); //7003
+ erase_check_erase(mt, 15); //7003
for (int i = 0; i < 25; i++) {
if (i <= 16 && i >= 13)
check_load(mt, set[i], NULL);
}
mt_set_non_kernel(2);
- check_erase(mt, set[17]); //7008 *should* cause coalesce.
+ erase_check_erase(mt, 17); //7008 *should* cause coalesce.
for (int i = 0; i < 25; i++) {
if (i <= 17 && i >= 13)
check_load(mt, set[i], NULL);
}
mt_set_non_kernel(1);
- check_erase(mt, set[18]); //7012
- mt_dump(mt);
+ erase_check_erase(mt, 18); //7012
for (int i = 0; i < 25; i++) {
if (i <= 18 && i >= 13)
check_load(mt, set[i], NULL);
erase_check_load(mt, i);
}
- check_erase(mt, set[19]); //7015
+ erase_check_erase(mt, 19); //7015
for (int i = 0; i < 25; i++) {
if (i <= 19 && i >= 13)
check_load(mt, set[i], NULL);
}
- check_erase(mt, set[20]); //8003
+ erase_check_erase(mt, 20); //8003
for (int i = 0; i < 25; i++) {
if (i <= 20 && i >= 13)
check_load(mt, set[i], NULL);
}
mt_set_non_kernel(2);
- check_erase(mt, set[21]); //8002
+ erase_check_erase(mt, 21); //8002
for (int i = 0; i < 25; i++) {
if (i <= 21 && i >= 13)
check_load(mt, set[i], NULL);
mt_set_non_kernel(1);
- check_erase(mt, set[22]); //8008
+ erase_check_erase(mt, 22); //8008
for (int i = 0; i < 25; i++) {
if (i <= 22 && i >= 13)
check_load(mt, set[i], NULL);
erase_check_load(mt, i);
}
for (int i = 23; i < 25; i++) {
- check_erase(mt, set[i]);
- mt_dump(mt);
+ erase_check_erase(mt, i);
}
for (int i = 0; i < 25; i++) {
if (i <= 25 && i >= 13)
mt_set_non_kernel(99);
for (int i = 18; i < ARRAY_SIZE(set); i++) {
- check_erase(mt, set[i]);
+ erase_check_erase(mt, i);
for (int j = 0; j < ARRAY_SIZE(set); j++) {
if (j < 18 || j > i)
erase_check_load(mt, j);
}
mt_set_non_kernel(30);
for (int i = 0; i < 18; i++) {
- check_erase(mt, set[i]);
+ erase_check_erase(mt, i);
for (int j = 0; j < ARRAY_SIZE(set); j++) {
if (j < 18 && j > i)
erase_check_load(mt, j);
}
erase_check_insert(mt, 8);
erase_check_insert(mt, 9);
- check_erase(mt, set[8]);
- mt_dump(mt);
+ erase_check_erase(mt, 8);
}
static noinline void check_alloc_rev_range(struct maple_tree *mt)
check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
xa_mk_value(range[i] >> 12), 0);
}
- mt_dump(mt);
for (i = 0; i < req_range_cnt; i += 5) {
check_mtree_alloc_rrange(mt,
check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
xa_mk_value(range[i] >> 12), 0);
}
- mt_dump(mt);
for (i = 0; i < req_range_cnt; i += 5) {
- printk("%s: insert %i\n", __func__, i);
check_mtree_alloc_range(mt,
req_range[i] >> 12, // start
req_range[i+1] >> 12, // end