return (mt->ma_flags & MAPLE_ALLOC_RANGE);
}
+
static inline unsigned int mt_parent_shift(unsigned long parent)
{
if (!(parent & 2))
}
static inline unsigned char ma_data_end(const struct maple_enode *mn,
- const enum maple_type type)
+ const enum maple_type type, unsigned long *last)
{
unsigned char data_end = 0;
- unsigned long last;
for (data_end = 0; data_end < mt_slot_count(mn) - 1; data_end++) {
- last = _ma_get_pivot(mn, data_end, type);
- if (last == 0 && data_end > 0)
- return data_end - 1;
- if (last == mt_max[type])
+ *last = _ma_get_pivot(mn, data_end, type);
+ if (*last == 0 && data_end > 0)
return data_end - 1;
}
enum maple_type type = mt_node_type(mas->node);
unsigned long pivot_cnt = mt_pivots[type];
unsigned long half = mt_slots[type] / 2;
- unsigned char data_end = ma_data_end(mas->node, type);
-
- if (ma_is_root(mas->node)) {
- i = half;
- *left = (struct maple_enode*)ma_next_alloc(mas);
- *right = (struct maple_enode*)ma_next_alloc(mas);
- goto even_split;
- }
+ unsigned long last_pivot;
+ unsigned char data_end = ma_data_end(mas->node, type, &last_pivot);
*left = (struct maple_enode*)ma_next_alloc(mas);
+ *left = mt_mk_node(ma_mnode_ptr(*left), type);
+ *right = (struct maple_enode*)ma_next_alloc(mas);
+ *right = mt_mk_node(ma_mnode_ptr(*right), type);
+
+ if (ma_is_root(mas->node))
+ return half;
+
for (i = 0; i < data_end; i++) {
max = ma_get_pivot(mas->node, i);
- if ((max - min) > 15) {
- if (i)
- i--;
+
+ if (max == mt_max[type] && i)
+ return i - 1;
+
+ if ((max - min) > 15)
break;
- }
}
+ if (i)
+ i--;
- if (i >= data_end) {
- *left = mt_mk_node(ma_mnode_ptr(*left), type);
- if (mas->last >= mas->min + mt_max[type]) {
- *right = (struct maple_enode*)ma_next_alloc(mas);
- *right = mt_mk_node(ma_mnode_ptr(*right), type);
- }
+ if (i >= data_end)
return i;
- }
- *right = (struct maple_enode*)ma_next_alloc(mas);
- if (i >= half) {
- *left = mt_mk_node(ma_mnode_ptr(*left), type);
- *right = mt_mk_node(ma_mnode_ptr(*right), type);
+ if (i >= half)
return i;
- }
if (data_end < pivot_cnt)
max = ma_get_pivot(mas->node, data_end);
}
} while (j > 0);
- if (data_end - j >= half) {
- *left = mt_mk_node(ma_mnode_ptr(*left), type);
- *right = mt_mk_node(ma_mnode_ptr(*right), type);
+ if (data_end - j >= half)
return j;
- }
-even_split:
- *left = mt_mk_node(ma_mnode_ptr(*left), type);
- *right = mt_mk_node(ma_mnode_ptr(*right), type);
return i > 2 ? i : half - 1;
}
enum maple_type type = mt_node_type(mas->node);
unsigned long pivot_cnt = mt_pivots[type];
unsigned long half = mt_slots[type] / 2;
- unsigned char data_end = ma_data_end(mas->node, type);
+ unsigned long last_pivot;
+ unsigned char data_end = ma_data_end(mas->node, type, &last_pivot);
if (ma_is_root(mas->node)) {
i = half;
pstart = mas->min;
for (int i = 0; i < mt_slots[mt]; i++) {
- if (i < mt_pivots[mt]) {
+ bool empty = true;
+ if (i < mt_pivots[mt])
pend = ma_get_pivot(mas->node, i);
- if (!pend)
- break;
- } else {
+ else
pend = mas->max;
+
+ if (pend) {
+ gap = pend - pstart;
+ if (ma_get_rcu_slot(mas->node, i))
+ empty = false;
+ } else {
+ gap = mas->max - pstart;
}
- gap = (pend ? pend : mas->max) - pstart;
- if ((gap > max_gap) && !ma_get_rcu_slot(mas->node, i))
+ if (!pstart)
+ gap++;
+
+ if (empty && (gap > max_gap))
max_gap = gap;
- if (!pend)
+ if (pend == mas->max)
break;
- pstart = pend;
+ pstart = pend + 1;
}
done:
return max_gap;
}
-static inline unsigned long ma_max_gap(struct ma_state *mas)
+static inline unsigned long ma_max_gap(struct ma_state *mas, unsigned char *slot)
{
unsigned long max_gap = 0;
- for (int i = 0; i < mt_slot_count(mas->node); i++) {
+ unsigned char i;
+ for (i = 0; i < mt_slot_count(mas->node); i++) {
unsigned long gap;
gap = ma_get_gap(mas->node, i);
- if ( gap > max_gap)
+ if ( gap > max_gap) {
+ *slot = i;
max_gap = gap;
+ }
}
return max_gap;
}
unsigned long new)
{
unsigned long max_gap = 0;
+ unsigned char max_slot = 0;
+
+ ma_encoded_parent(mas);
+ ma_set_gap(mas->node, slot, new);
if (ma_is_root(mas->node))
return;
- ma_encoded_parent(mas);
- max_gap = ma_max_gap(mas);
- ma_set_gap(mas->node, slot, new);
+ max_gap = ma_max_gap(mas, &max_slot);
- if(max_gap < new) {
+ if (slot == max_slot || max_gap < new) {
unsigned char pslot = mt_parent_slot(mas->node);
ma_parent_gap(mas, pslot, new);
}
pslot = mt_parent_slot(mas->node);
p_gap = _ma_get_gap(mt_parent(mas->node), pslot,
mt_parent_enum(mas, mas->node));
+
if (p_gap != max_gap)
ma_parent_gap(mas, pslot, max_gap);
}
unsigned char sloc = cp->src_start; // src location
unsigned char dloc = cp->dst_start; // dst location
enum maple_type type = mt_node_type(cp->src);
+ enum maple_type dtype;
unsigned char pivot_cnt = mt_pivots[type];
if (!cp->dst) {
ma_dense_cp(mas, cp);
return;
}
+ dtype = mt_node_type(cp->dst);
while (sloc <= cp->src_end &&
dloc <= cp->dst_end) {
ma_set_pivot(cp->dst, dloc, mas->max);
}
- if (mt_node_type(cp->dst) == maple_arange_64)
+ if (dtype == maple_arange_64)
ma_cp_gap(cp->dst, dloc, cp->src, sloc);
ma_cp_rcu_slot(cp->dst, dloc++, cp->src, sloc++);
}
unsigned char slot, unsigned long pivot)
{
unsigned long gap;
+ unsigned char max_slot;
if (slot)
mas->min = ma_get_pivot(parent, slot - 1) + 1;
mas->max = pivot;
if (!mt_is_leaf(mas->node))
- gap = ma_max_gap(mas);
+ gap = ma_max_gap(mas, &max_slot);
else
gap = ma_leaf_max_gap(mas);
p_slot = 0;
mas->max = mt_max[ptype];
} else {
+ unsigned long last_pivot;
p_slot = mt_parent_slot(mas->node);
ma_encoded_parent(mas);
old_parent = mas->node;
ptype = mt_node_type(mas->node);
- p_end = ma_data_end(mas->node, ptype);
+ p_end = ma_data_end(mas->node, ptype, &last_pivot);
if (p_end >= mt_slots[ptype] - 1) {
/* Must split the parent */
split = ma_split(mas, p_slot);
}
ptype = mt_node_type(mas->node);
mas_update_limits(mas, p_slot, ptype);
+ p_end = ma_data_end(mas->node, ptype, &last_pivot);
mas->node = ma_get_rcu_slot(old_parent, p_slot);
}
// Allocations.
new_parent = mt_mk_node(ma_next_alloc(mas), ptype);
-
// Copy the parents information
if (!ma_is_root(full)) {
- // FIXME: Optimize this to avoid setting a slot/pivot twice.
- // Copy the parent data and leave a hole.
- MA_CP(cp, old_parent, new_parent, 0, p_slot);
- ma_copy(mas, &cp);
- if (p_slot < mt_slots[ptype] - 1) {
- cp.dst_start += 1;
+ // Copy the parent data except p_slot, which will later be
+ // replaced.
+ MA_CP(cp, old_parent, new_parent, 0, p_slot - 1);
+ if (p_slot)
+ ma_copy(mas, &cp);
+
+ if (p_slot < mt_slots[ptype] - 2) {
cp.src_start = p_slot + 1;
- cp.src_end = p_end + 1;
+ cp.dst_start += 2;
+ cp.src_end = p_end;
ma_copy(mas, &cp);
}
}
ma_link(left, new_parent, p_slot, pivot, ptype);
if (right) {
unsigned long r_max = mas->max;
+
if (mt_node_max(right) < mas->max)
r_max = pivot + mt_node_max(right);
ma_link(right, new_parent, p_slot + 1, r_max, ptype);
unsigned char slot)
{
struct maple_enode *p_mn;
- int o_end = ma_data_end(mas->node, mt_node_type(mas->node)); // Old end
+ unsigned long last_pivot;
+ // Old end
+ int o_end = ma_data_end(mas->node, mt_node_type(mas->node),
+ &last_pivot);
int n_end = o_end; // New end
unsigned long max = mas->max;
unsigned long min = mas->min;
ma_insert(mas, entry);
}
- if (mt_is_alloc(mas->tree))
- ma_update_gap(mas);
+ ret = max - min + 1;
+ goto update_gap;
+ }
+
+ // If this is an append, set the slot to one beyond the end.
+ if (slot == MAPLE_NODE_SLOTS) {
+ slot = o_end;
+ if (last_pivot != mt_max[type])
+ slot++;
+ }
+
+ /* Calculate the range of this slot */
+ // Set the max
+ if (slot < pivot_cnt && ma_get_pivot(mas->node, slot) != 0)
+ max = ma_get_pivot(mas->node, slot);
+
+ // Set the min.
+ if (slot > 0)
+ min = ma_get_pivot(mas->node, slot - 1) + 1;
+
+ /* Figure out how many slots are needed for the entry. */
+ // A null for the end.
+ if (max > mas->last)
+ n_end++;
+
+ // A new entry for the start.
+ if (mas->index && min != mas->index)
+ n_end++;
- return max - min + 1;
+ // Check if we need to split.
+ if (n_end > slot_cnt - 1 ||
+ (n_end == slot_cnt - 1 && mas->max != mas->last)){
+ unsigned char split = ma_split(mas, slot);
+ if (mas_is_err(mas))
+ return 0;
+
+ if (split <= slot)
+ slot -= split;
+
+ return _ma_insert(mas, entry, slot);
+ }
+
+ p_mn = mas->node;
+
+ if (!mt_is_leaf(mas->node)) {
+ struct maple_enode *leaf;
+ enum maple_type mt;
+ // Old limits.
+ unsigned long o_min = mas->min;
+ unsigned long o_max = mas->max;
+
+ // Create a new node.
+ mas_node_cnt(mas, 1);
+ if (mas_is_err(mas))
+ return 0;
+
+ // Set the new leaf parent and type.
+ mt = ma_determine_type(mas, min, slot);
+ leaf = mt_mk_node(ma_next_alloc(mas), mt);
+ mt_set_parent(leaf, mas->node, slot);
+ // Put the new range in the new leaf.
+ mas->node = leaf;
+ mas->min = mas->index;
+ mas->max = max;
+ _ma_insert(mas, entry, 0);
+ // Restore old values and continue inserting.
+ mas->min = o_min;
+ mas->max = o_max;
+ mas->node = p_mn;
+ entry = leaf;
+ if (mt == maple_dense)
+ mas->last = mas->index + mt_max[mt] - 1;
+ }
+
+ /* Figure out if this is an append or not.
+ * Appending does not need to create a new node. */
+ if (o_end == 0 || slot - 1 == o_end) {
+ o_end = n_end; /* Appending */
+ } else {
+ /* Not appending */
+ /* Creates a new node and copies until slot (inclusively) */
+ mas_partial_copy(mas, slot);
+
+ if (mas_is_err(mas))
+ return 0;
+
+ o_end = slot;
+ }
+
+ if (mas->index && min != mas->index) {
+ /* When writing a NULL entry, the order must be reversed to
+ * ensure readers don't get incorrect data on appends
+ */
+ /* Write the entry */
+ ma_set_rcu_slot(mas->node, ++slot, entry);
+ if (slot < pivot_cnt)
+ ma_set_pivot(mas->node, slot, mas->last);
+ /* Write NULL entry */
+ ma_set_rcu_slot(mas->node, --slot, NULL);
+ if (o_end == n_end + 1) // Append.
+ wmb();
+
+ ma_set_pivot(mas->node, slot, mas->index - 1);
+ if (mt_is_alloc(mas->tree)) {
+ unsigned long gap = mas->min;
+ if (slot > 0)
+ gap = ma_get_pivot(mas->node, slot - 1);
+ gap = (mas->index - 1) - gap;
+ ma_set_gap(mas->node, slot, gap);
+ }
+ slot += 2;
+ ret = 2;
+ } else {
+ /* Write the entry */
+ ma_set_rcu_slot(mas->node, slot, entry);
+ if (o_end == n_end + 1) // Append.
+ wmb();
+
+ if (slot < pivot_cnt)
+ ma_set_pivot(mas->node, slot++, mas->last);
+ }
+
+ /* Skip possible duplicate entry that contains a NULL */
+ if (o_end != n_end && ma_get_pivot(p_mn, o_end) <= mas->last)
+ o_end++;
+
+ /* Copy remainder of node if this isn't an append */
+ MA_CP(cp, p_mn, mas->node, o_end, slot_cnt - 1);
+ cp.dst_start = slot;
+ cp.dst_end = n_end;
+ ma_copy(mas, &cp);
+
+ //mas->max = mas->last;
+
+ if (p_mn != mas->node)
+ mt_replace(mas);
+
+update_gap:
+
+ if (mt_is_alloc(mas->tree))
+ ma_update_gap(mas);
+
+ return ret;
+}
+static inline int old_ma_insert(struct ma_state *mas, void *entry,
+ unsigned char slot)
+{
+ struct maple_enode *p_mn;
+ unsigned long last_pivot;
+ // Old end
+ int o_end = ma_data_end(mas->node, mt_node_type(mas->node),
+ &last_pivot);
+ int n_end = o_end; // New end
+ unsigned long max = mas->max;
+ unsigned long min = mas->min;
+ int ret = 1;
+ enum maple_type type = mt_node_type(mas->node);
+ unsigned char slot_cnt = mt_slots[type];
+ unsigned char pivot_cnt = mt_pivots[type];
+
+ /* Dense node type */
+ if (!pivot_cnt) {
+ min = mas->index - mas->min;
+ max = mas->last - mas->min;
+ if (max >= mt_max[type])
+ max = mt_max[type] - 1;
+
+ do
+ ma_update_rcu_slot(mas->node, min++, entry);
+ while(min <= max);
+
+ if (max != mas->last - mas->min) {
+ mas->index = mas->min + max + 1;
+ ma_insert(mas, entry);
+ }
+
+ ret = max - min + 1;
+ goto update_gap;
}
/* Calculate the range of the slot */
if (p_mn != mas->node)
mt_replace(mas);
+update_gap:
+
if (mt_is_alloc(mas->tree))
ma_update_gap(mas);
{
void *r_entry = rcu_dereference(ms->tree->ma_root); // root entry
struct maple_node *mn;
- enum maple_type mt = maple_leaf_64;
+ enum maple_type mt = ma_ptype_leaf(ms);
mas_node_cnt(ms, 1);
if (mas_is_err(ms))
_ma_insert(ms, entry, 1);
if (mas_is_err(ms))
return;
+
+ if (mt_is_alloc(ms->tree)) {
+// ms->index = TASK_SIZE / PAGE_SIZE - 1;
+ ms->index = 0x2000000000000UL;
+ ms->last = mt_max[mt];
+ _ma_insert(ms, XA_ZERO_ENTRY, 2);
+ if (mas_is_err(ms))
+ return;
+ }
+
/* swap the new root into the tree */
rcu_assign_pointer(ms->tree->ma_root, mt_mk_root(ms->node));
}
for (i = 0; i <= pivot_cnt; i++) {
unsigned long this_gap = 0;
void *entry = NULL;
+
if (i == pivot_cnt - 1)
pivot = max;
else
pivot = _ma_get_pivot(mas->node, i, type);
+
+ /* End of data in this leaf */
+ if (i && !pivot)
+ break;
+
+ /* out of upper bounds */
+ if (mas->last < pivot) {
+ mas_set_err(mas, -EBUSY);
+ return true;
+ }
+
+ /* Not within lower bounds */
if (mas->index > pivot)
- continue;
+ goto next;
+
entry = _ma_get_rcu_slot(mas->node, i, type);
- if (!entry)
- this_gap = pivot - min;
- min = pivot;
- if (!this_gap)
- continue;
+ if (!entry) {
+ this_gap = pivot - mas->index;
+ if (!this_gap) // No entry, pivot = index.
+ this_gap = 1;
+ }
+
+ /* Does not fit in this gap or node */
if (mas->last < pivot - this_gap)
goto ascend;
+
if (this_gap >= size) {
found = true;
break;
}
+next:
+ min = pivot + 1;
}
break;
default:
for (; i < pivot_cnt; i++) {
unsigned long this_gap;
pivot = _ma_get_pivot(mas->node, i, type);
- if (i != 0 && pivot == 0)
+ if (i && !pivot)
goto ascend;
this_gap = ma_get_gap(mas->node, i);
- if (mas->last < pivot - this_gap)
- goto ascend;
-
if (size <= this_gap) {
if (mas->index <= pivot) {
max = pivot;
break;
}
}
+
min = pivot + 1;
+ if (mas->last < min) {
+ mas_set_err(mas, -EBUSY);
+ return true;
+ }
}
// Final slot.
if (!leaf) { // descend.
- mas->node = _ma_get_rcu_slot(mas->node, i, type);
+ struct maple_enode *next;
+ next = _ma_get_rcu_slot(mas->node, i, type);
mas->min = min;
mas->max = max;
- if (!mas->node)
+ if (next) {
+ mas->node = next;
+ i = 0;
+ } else {
found = true; // this is a non-leaf hole.
+ }
}
+
ma_set_slot(mas, i);
return found;
-
ascend:
- return false;
-
-
+ if (ma_is_root(mas->node)) {
+ found = true;
+ }
+ ma_set_slot(mas, i);
+ return found;
}
/*
* Private: Returns true if mas->node is a leaf
ma_set_slot(mas, i);
return ret;
}
-static inline void mas_walk_next(struct ma_state *mas);
+static inline bool mas_skip_node(struct ma_state *mas);
static inline void mas_awalk(struct ma_state *mas, unsigned long size)
{
* no gap found. (return, slot == MAPLE_NODE_SLOTS)
* found the gap. (return, slot != MAPLE_NODE_SLOTS)
*/
- while (!_mas_awalk(mas, size))
- {
- if (last == mas->node) // Ascend.
- mas_walk_next(mas);
-
- last = mas->node;
+ while (!_mas_awalk(mas, size) && !mas_is_err(mas)) {
+ if (last == mas->node)
+ mas_skip_node(mas);
+ else
+ last = mas->node;
}
return;
}
//mas->index is the start address for the search, which may be done with?
//mas->last is the end address for the search
- //mas->index = ma_get_pivot(mas->node, slot);
unsigned long min = mas->min;
if (slot)
min = ma_get_pivot(mas->node, slot - 1) + 1;
}
mas_awalk(mas, size);
+ if (mas_is_err(mas))
+ return xa_err(mas->node);
+
slot = ma_get_slot(mas);
if (slot == MAPLE_NODE_SLOTS)
goto no_gap;
return entry;
}
-static inline void mas_walk_next(struct ma_state *mas)
+/* Skip this slot in the parent. */
+static inline bool mas_skip_node(struct ma_state *mas)
{
unsigned char slot;
do {
- slot = mt_parent_slot(mas->node);
- ma_encoded_parent(mas);
- } while (slot > mt_slot_count(mas->node) - 1);
+ if (ma_is_root(mas->node)) {
+ slot = ma_get_slot(mas);
+ if (slot >= mt_slot_count(mas->node) - 1) {
+ mas_set_err(mas, -EBUSY);
+ return false;
+ }
+ } else {
+ slot = mt_parent_slot(mas->node);
+ ma_encoded_parent(mas);
+ }
+ } while (slot >= mt_slot_count(mas->node) - 1);
- slot++;
- ma_set_slot(mas, slot);
+ ma_set_slot(mas, ++slot);
mas_update_limits(mas, slot, mt_node_type(mas->node));
- mas->node = ma_get_rcu_slot(mas->node, slot);
- return;
+ return true;
}
/* Private
* Find an entry and erase the entire range
rcu_read_lock();
entry = mas_walk(&mas);
rcu_read_unlock();
+ if (xa_is_zero(entry))
+ entry = NULL;
return entry;
}
EXPORT_SYMBOL(mtree_load);
mas.index = min;
mas.last = max - size;
mas.min = 0;
- mas.max = ULONG_MAX;
+ mas.max = mt_max[maple_arange_64];
ret = ma_alloc(&mas, entry, size, startp);
if (mas_nomem(&mas, gfp))
goto retry;
int ret = -ENOENT;
MA_STATE(mas, mt, index, index);
rcu_read_lock();
- mas_walk_next(&mas);
+ //mas_walk_next(&mas);
rcu_read_unlock();
if (mas.node)
} \
} while (0)
#endif
-
+#define MTREE_ALLOC_MAX 0x2000000000000Ul
static
int mtree_insert_index(struct maple_tree *mt, unsigned long index, gfp_t gfp)
{
unsigned long result = expected + 1;
int ret;
+
ret = mtree_alloc_range(mt, &result, ptr, size, start, end,
GFP_KERNEL);
MT_BUG_ON(mt, ret != eret);
+ if (ret)
+ return;
+
MT_BUG_ON(mt, result != expected);
}
static noinline void check_load(struct maple_tree *mt, unsigned long index,
{
// cat /proc/self/maps|awk '{print $1}'| awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}'
unsigned long range[] = {
+ // Inclusive , Exclusive.
0x565234af2000, 0x565234af4000,
0x565234af4000, 0x565234af9000,
0x565234af9000, 0x565234afb000,
0x7fff5876b000, 0x7fff5878d000,
0x7fff5878e000, 0x7fff58791000,
0x7fff58791000, 0x7fff58793000,
- 0x7fff58793000, 0x7fff58794000, // Added for ascend testing.
- 0x7fff58794000, 0x7fff58796000, // Added for ascend testing.
- 0x7fff58796000, 0x7fff58798000, // Added for ascend testing.
- 0x7fff58798000, 0x7fff5879A000, // Added for ascend testing.
- 0x7fff5879A000, 0x7fff5879C000, // Added for ascend testing.
- 0x7fff5879C000, 0x7fff5879D000, // Added for ascend testing.
- 0x7fff5879E000, 0x7fff5879F000, // Added for ascend testing.
- 0x7fff5879F000, 0x7fff587A0000, // Added for ascend testing.
- 0x7fff587A0000, 0x7fff587A3000,
- 0x7fff587A3000, 0x7fff587A4000, // Added for ascend testing.
- 0x7fff587A4000, 0x7fff587A6000, // Added for ascend testing.
- 0x7fff587A6000, 0x7fff587A8000, // Added for ascend testing.
- 0x7fff587A8000, 0x7fff587AA000, // Added for ascend testing.
- 0x7fff587AA000, 0x7fff587AC000, // Added for ascend testing.
- 0x7fff587AC000, 0x7fff587AD000, // Added for ascend testing.
- 0x7fff587AE000, 0x7fff587AF000, // Added for ascend testing.
};
/* req_range consists of 4 values.
0x0, // Min
0x7fff58791000, // Max
- 0x2000, // Size
+ 0x1F00, // Size
0x0, // First hole in our data of size 2000.
0, // Return value success.
-#if 0
// Test ascend.
- 0x7fff5879F000, // Min
- 0x, // Max
+ 34148797436 << 12, // Min
+ 0x7fff587AF000, // Max
0x3000, // Size
- 0x, // First hole in our data of size 1000.
+ 34148798629 << 12, // Expected location
0, // Return value success.
+ // Test failing.
+ 34148798623 << 12, // Min
+ 34148798683 << 12, // Max
+ 0x15000, // Size
+ 0, // Expected location
+ -EBUSY, // Return value failed.
+
// Test filling entire gap.
-#endif
+ 34148798623<< 12, // Min
+ 0x7fff587AF000, // Max
+ 0x10000, // Size
+ 34148798632 << 12, // Expected location
+ 0, // Return value success.
+
+ // Test walking off the end of root.
+ 0, // Min
+ -1, // Max
+ -1, // Size
+ 0, // Expected location
+ -EBUSY, // Return value failure.
+
+ // Test looking for too large a hole across entire range.
+ 0, // Min
+ -1, // Max
+ 4503599618982063UL << 12, // Size
+ 34359052178 << 12, // Expected location
+ -EBUSY, // Return failure.
};
int i, range_cnt = sizeof(range)/sizeof(range[0]);
int req_range_cnt = sizeof(req_range)/sizeof(req_range[0]);
+ unsigned long last = 0;
printk("range cnt = %d\n", range_cnt);
for (i = 0; i < range_cnt; i+=2) {
- printk("Decode table %d: %lx -> %ld\n", i, range[i], range[i] >> 12);
+ printk("hole: %lu\n", (range[i] >> 12) - last);
+ last = range[i+1] >> 12;
+ printk("Decode table %d: %lx %lx -> %lu - %lu ", i,
+ range[i], range[i+1],
+ range[i] >> 12, range[i+1] >> 12);
}
+ printk("last hole: %lu\n", (MTREE_ALLOC_MAX>>12) - last);
+ printk("i = count, inclusive start - inclusive end\n");
for (i = 0; i < range_cnt; i+=2) {
printk("i = %d, %ld-%ld\n", i, range[i] >> 12, (range[i + 1] >> 12) - 1);
+ // Inclusive , Inclusive (with the -1)
check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
xa_mk_value(range[i] >> 12));
mt_dump(mt);
}
for (i = 0; i < req_range_cnt; i+=5) {
- printk("alloc range i = %d, start = %ld size %ld\n", i, req_range[i+3] >> 12,
- req_range[i+2] >> 12);
+ mt_dump(mt);
+ printk("%d: alloc %ld in range %ld - %ld\n", i,
+ req_range[i+2] >> 12,
+ req_range[i] >> 12,
+ req_range[i+1] >> 12);
+ if (req_range[i+4])
+ printk("\t Should fail\n");
+ else
+ printk ("\tShould be starting at %ld\n",
+ req_range[i+3] >> 12);
check_mtree_alloc_range(mt,
req_range[i] >> 12, // start
req_range[i+1] >> 12, // end
req_range[i+2] >> 12, // size
req_range[i+3] >> 12, // expected address
- req_range[i+4] >> 12, // expected return
+ req_range[i+4], // expected return
xa_mk_value(req_range[i] >> 12)); // pointer
- mt_dump(mt);
- printk("Done\n\n");
+ mt_dump(mt);
+ printk("Done\n\n");
}
mtree_destroy(mt);
check_nomem(&tree);
check_seq(&tree, 16, false);
- printk("start\n");
check_seq(&tree, 1000, true);
- printk("end\n");
check_new_node(&tree);
check_lower_bound_split(&tree);