return data_end;
}
-static inline unsigned char ma_calc_split(struct ma_state *mas,
+#define ma_calc_split ma_no_dense_calc_split
+
+static inline unsigned char ma_no_dense_calc_split(struct ma_state *mas,
+ struct maple_enode **left, struct maple_enode **right)
+{
+ char i, j;
+ unsigned long min = mas->min;
+ unsigned long max = min;
+ enum maple_type type = mt_node_type(mas->node);
+ unsigned long pivot_cnt = mt_pivots[type];
+ unsigned long half = mt_slots[type] / 2;
+ unsigned char data_end = ma_data_end(mas->node, type);
+
+ if (ma_is_root(mas->node)) {
+ i = half;
+ *left = (struct maple_enode*)ma_next_alloc(mas);
+ *right = (struct maple_enode*)ma_next_alloc(mas);
+ goto even_split;
+ }
+
+ *left = (struct maple_enode*)ma_next_alloc(mas);
+ for (i = 0; i < data_end; i++) {
+ max = ma_get_pivot(mas->node, i);
+ if ((max - min) > 15) {
+ if (i)
+ i--;
+ break;
+ }
+ }
+
+ if (i >= data_end) {
+ *left = mt_mk_node(ma_mnode_ptr(*left), type);
+ if (mas->last >= mas->min + mt_max[type]) {
+ *right = (struct maple_enode*)ma_next_alloc(mas);
+ *right = mt_mk_node(ma_mnode_ptr(*right), type);
+ }
+ return i;
+ }
+
+ *right = (struct maple_enode*)ma_next_alloc(mas);
+ if (i >= half) {
+ *left = mt_mk_node(ma_mnode_ptr(*left), type);
+ *right = mt_mk_node(ma_mnode_ptr(*right), type);
+ return i;
+ }
+
+ if (data_end < pivot_cnt)
+ max = ma_get_pivot(mas->node, data_end);
+ else
+ max = mas->max;
+
+ j = data_end;
+ do {
+ j--;
+ min = ma_get_pivot(mas->node, j);
+ if ((max - min) > 15) {
+ j++;
+ break;
+ }
+ } while (j > 0);
+
+ if (data_end - j >= half) {
+ *left = mt_mk_node(ma_mnode_ptr(*left), type);
+ *right = mt_mk_node(ma_mnode_ptr(*right), type);
+ return j;
+ }
+even_split:
+ *left = mt_mk_node(ma_mnode_ptr(*left), type);
+ *right = mt_mk_node(ma_mnode_ptr(*right), type);
+
+ return i > 2 ? i : half - 1;
+}
+static inline unsigned char ma_dense_calc_split(struct ma_state *mas,
struct maple_enode **left, struct maple_enode **right)
{
char i, j;
if (i >= data_end) {
*left = mt_mk_node(ma_mnode_ptr(*left), maple_dense);
+ if (mas->last >= mas->min + mt_max[type]) {
+ *right = (struct maple_enode*)ma_next_alloc(mas);
+ *right = mt_mk_node(ma_mnode_ptr(*right), type);
+ }
+ if (!i)
+ i = mt_max[type];
return i;
}
ma_copy(mas, &cp);
cp.src_start = split + 1;
+
+ if (cp.src_start > mt_node_max(mas->node))
+ return split;
+
cp.src_end = mt_slot_count(mas->node) - 1;
if (right) {
cp.dst = right;
ma_split_data(mas, left, right, split);
if (right) {
pivot = ma_get_pivot(left, split);
+ if (!pivot)
+ pivot = mas->min + split - 1;
} else {
pivot = mt_node_max(left);
- if (p_slot)
- pivot += ma_get_pivot(new_parent, p_slot - 1);
- else
+
+ if (!p_slot || mt_node_type(left) == maple_dense)
pivot += mas->min;
+ else
+ pivot += ma_get_pivot(new_parent, p_slot - 1);
}
ma_link(left, new_parent, p_slot, pivot, ptype);
- if (right)
- ma_link(right, new_parent, p_slot + 1, mas->max, ptype);
+ if (right) {
+ unsigned long r_max = mas->max;
+ if (mt_node_max(right) < mas->max)
+ r_max = pivot + mt_node_max(right);
+ ma_link(right, new_parent, p_slot + 1, r_max, ptype);
+ }
if (mt_is_alloc(mas->tree)) {
if (right) {
} else {
mas->node = right;
p_slot += 2;
- mas->min = pivot;
+ mas->min = pivot + 1;
}
if (mas->index > mt_node_max(mas->node) + mas->min) {
if (old_parent != full)
mt_free(mt_to_node(full));
-
return split;
}
* 4. Write the entry.
*
*/
+static inline void ma_insert(struct ma_state *mas, void *entry);
static inline int _ma_insert(struct ma_state *mas, void *entry,
unsigned char slot)
{
unsigned char slot_cnt = mt_slots[type];
unsigned char pivot_cnt = mt_pivots[type];
- /* Linear node type */
+ /* Dense node type */
if (!pivot_cnt) {
min = mas->index - mas->min;
max = mas->last - mas->min;
+ if (max >= mt_max[type])
+ max = mt_max[type] - 1;
+
do
ma_update_rcu_slot(mas->node, min++, entry);
- while(min < max);
+ while(min <= max);
+
+ if (max != mas->last - mas->min) {
+ mas->index = mas->min + max + 1;
+ ma_insert(mas, entry);
+ }
+
if (mt_is_alloc(mas->tree))
ma_update_gap(mas);
+
return max - min + 1;
}
n_end++;
if (n_end > slot_cnt - 1 ||
- (n_end == slot_cnt - 1 && mas->max != mas->last)){
+ (n_end == slot_cnt - 1 && mas->max != mas->last)){
unsigned char split = ma_split(mas, slot);
if (mas_is_err(mas))
return 0;
if (mt_is_alloc(mas->tree))
ma_update_gap(mas);
-
+
return ret;
}
goto retry;
mtree_unlock(mas.tree);
-
return ret;
}
0x0, // Min
0x7fff58791000, // Max
0x2000, // Size
- 0x0, // First hole in our data of size 1000.
+ 0x0, // First hole in our data of size 2000.
0, // Return value success.
#if 0
- 0x, // Min
- 0x, // Max
- 0x, // Size
- 0x, // First hole in our data of size 1000.
+ // Test ascend.
+ 0x7fff5879F000, // Min
+ 0x, // Max
+ 0x3000, // Size
+ 0x, // First hole in our data of size 1000.
0, // Return value success.
+
+ // Test filling entire gap.
#endif
};
int i, range_cnt = sizeof(range)/sizeof(range[0]);
int req_range_cnt = sizeof(req_range)/sizeof(req_range[0]);
- for (i = 0; i < range_cnt/2; i+=2) {
- printk("i = %d, start = %ld\n", i, range[i] >> 12);
+ printk("range cnt = %d\n", range_cnt);
+ for (i = 0; i < range_cnt; i+=2) {
+ printk("Decode table %d: %lx -> %ld\n", i, range[i], range[i] >> 12);
+ }
+ for (i = 0; i < range_cnt; i+=2) {
+ printk("i = %d, %ld-%ld\n", i, range[i] >> 12, (range[i + 1] >> 12) - 1);
check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
xa_mk_value(range[i] >> 12));
mt_dump(mt);
}
for (i = 0; i < req_range_cnt; i+=5) {
+ printk("alloc range i = %d, start = %ld size %ld\n", i, req_range[i+3] >> 12,
+ req_range[i+2] >> 12);
check_mtree_alloc_range(mt,
req_range[i] >> 12, // start
req_range[i+1] >> 12, // end
req_range[i+3] >> 12, // expected address
req_range[i+4] >> 12, // expected return
xa_mk_value(req_range[i] >> 12)); // pointer
+ mt_dump(mt);
+ printk("Done\n\n");
}
mtree_destroy(mt);