[maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2),
#endif
};
-#define mt_min_slot_cnt(x) mt_min_slots[mte_node_type(x)]
+#define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
#define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS* 2 + 2)
* @mas: The maple state.
*
* The first allocated node may be used for accounting of many other nodes.
- * Please see mas_alloc_cnt() and mas_next_alloc() for complete use.
+ * Please see mas_alloc_count() and mas_next_alloc() for complete use.
*
* Returns: The first allocated node.
*
}
/*
- * ma_node_alloc_cnt() - Get the number of allocations stored in this node.
+ * ma_node_alloc_count() - Get the number of allocations stored in this node.
* @node: The maple node
*
* Used to calculate the total allocated nodes in a maple state. See
- * mas_alloc_cnt().
+ * mas_alloc_count().
*
* Returns: The count of the allocated nodes stored in this nodes slots.
*
*/
-static inline int ma_node_alloc_cnt(const struct maple_node *node)
+static inline int ma_node_alloc_count(const struct maple_node *node)
{
int slot = 0;
}
/*
- * mas_alloc_cnt() - Get the number of nodes allocated in a maple state.
+ * mas_alloc_count() - Get the number of nodes allocated in a maple state.
* @mas: The maple state
*
* Walks through the allocated nodes and returns the number allocated.
* Returns: The total number of nodes allocated
*
*/
-static inline int mas_alloc_cnt(const struct ma_state *mas)
+static inline int mas_alloc_count(const struct ma_state *mas)
{
struct maple_node *node = mas_get_alloc(mas);
int ret = 1;
if (!node)
return 0;
- slot = ma_node_alloc_cnt(node);
+ slot = ma_node_alloc_count(node);
ret += slot;
while (--slot >= 0) {
if (ma_mnode_ptr(node->slot[slot])->slot[0])
- ret += ma_node_alloc_cnt(node->slot[slot]);
+ ret += ma_node_alloc_count(node->slot[slot]);
}
return ret;
}
static inline struct maple_node *mas_next_alloc(struct ma_state *ms)
{
- int cnt;
+ int count;
struct maple_node *mn, *smn;
if (!ms->alloc)
return NULL;
- cnt = mas_alloc_cnt(ms);
+ count = mas_alloc_count(ms);
mn = mas_get_alloc(ms);
- if (cnt == 1) {
+ if (count == 1) {
ms->alloc = NULL;
- } else if (cnt <= MAPLE_NODE_SLOTS + 1) {
- cnt -= 2;
- smn = mn->slot[cnt];
- mn->slot[cnt] = NULL;
+ } else if (count <= MAPLE_NODE_SLOTS + 1) {
+ count -= 2;
+ smn = mn->slot[count];
+ mn->slot[count] = NULL;
mn = smn;
- } else if (cnt > MAPLE_NODE_SLOTS + 1) {
- cnt -= 2;
- smn = mn->slot[(cnt / MAPLE_NODE_SLOTS) - 1];
- mn = smn->slot[(cnt % MAPLE_NODE_SLOTS)];
- smn->slot[cnt % MAPLE_NODE_SLOTS] = NULL;
+ } else if (count > MAPLE_NODE_SLOTS + 1) {
+ count -= 2;
+ smn = mn->slot[(count / MAPLE_NODE_SLOTS) - 1];
+ mn = smn->slot[(count % MAPLE_NODE_SLOTS)];
+ smn->slot[count % MAPLE_NODE_SLOTS] = NULL;
}
return mn;
{
struct maple_node *reuse = mte_to_node(used);
struct maple_node *node = mas_get_alloc(mas);
- int cnt;
+ int count;
memset(reuse, 0, sizeof(*reuse));
- cnt = mas_alloc_cnt(mas);
- if (cnt == 0) {
+ count = mas_alloc_count(mas);
+ if (count == 0) {
mas->alloc = reuse;
- } else if (cnt <= MAPLE_NODE_SLOTS) {
- cnt--;
- node->slot[cnt] = reuse;
+ } else if (count <= MAPLE_NODE_SLOTS) {
+ count--;
+ node->slot[count] = reuse;
} else {
struct maple_node *smn;
- cnt--;
- smn = node->slot[(cnt/MAPLE_NODE_SLOTS) - 1];
- smn->slot[cnt % MAPLE_NODE_SLOTS] = reuse;
+ count--;
+ smn = node->slot[(count/MAPLE_NODE_SLOTS) - 1];
+ smn->slot[count % MAPLE_NODE_SLOTS] = reuse;
}
- cnt = mas_alloc_cnt(mas);
+ count = mas_alloc_count(mas);
- BUG_ON(!mas_alloc_cnt(mas));
+ BUG_ON(!mas_alloc_count(mas));
}
static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
{
struct maple_node *mn, *smn;
int req = mas_alloc_req(ms);
- int allocated = mas_alloc_cnt(ms);
+ int allocated = mas_alloc_count(ms);
int slot;
if (!req)
return true;
}
-static inline struct maple_node *mas_node_cnt(struct ma_state *mas, int count)
+static inline struct maple_node *mas_node_count(struct ma_state *mas, int count)
{
- int allocated = mas_alloc_cnt(mas);
+ int allocated = mas_alloc_count(mas);
//BUG_ON(count > 127);
if (allocated < count) {
nr_nodes = DIV_ROUND_UP(nr_entries, MAPLE_RANGE64_SLOTS); // leaves
nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
- mas_node_cnt(mas, min(nr_nodes, (int)MAPLE_NODE_MAX));
+ mas_node_count(mas, min(nr_nodes, (int)MAPLE_NODE_MAX));
if (!mas_is_err(mas))
return 0;
* @b_node: the maple_big_node that contains the data.
* @size: the amount of data in the b_node
* @split: the potential split location
- * @slot_cnt: the size that can be stored in a single node being considered.
+ * @slot_count: the size that can be stored in a single node being considered.
* Returns: true if a middle node is required.
*/
static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
- unsigned char slot_cnt)
+ unsigned char slot_count)
{
unsigned char size = b_node->b_end;
- if (size >= 2 * slot_cnt)
+ if (size >= 2 * slot_count)
return true;
- if (!b_node->slot[split] && (size >= 2 * slot_cnt - 1))
+ if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
return true;
return false;
*
* @b_node: the maple_big_node with the data
* @split: the suggested split location
- * @slot_cnt: the number of slots in the node being considered.
+ * @slot_count: the number of slots in the node being considered.
* Returns the split location.
*/
static inline int mab_no_null_split(struct maple_big_node *b_node,
- unsigned char split, unsigned char slot_cnt)
+ unsigned char split, unsigned char slot_count)
{
if (!b_node->slot[split]) {
/* If the split is less than the max slot && the right side will
* still be sufficient, then increment the split on NULL.
*/
- if ((split < slot_cnt - 1) &&
+ if ((split < slot_count - 1) &&
(b_node->b_end - split) > (mt_min_slots[b_node->type]))
split++;
else
unsigned char *mid_split)
{
int split = b_node->b_end / 2; // Assume equal split.
- unsigned char slot_cnt = mt_slots[b_node->type];
+ unsigned char slot_count = mt_slots[b_node->type];
- if (mab_middle_node(b_node, split, slot_cnt)) {
+ if (mab_middle_node(b_node, split, slot_count)) {
split = b_node->b_end / 3;
*mid_split = split * 2;
} else {
* causes one node to be deficient.
* NOTE: mt_min_slots is 1 based, b_end and split are zero.
*/
- while (((b_node->pivot[split] - b_node->min) < slot_cnt - 1) &&
- (split < slot_cnt - 1) &&
+ while (((b_node->pivot[split] - b_node->min) < slot_count - 1) &&
+ (split < slot_count - 1) &&
(b_node->b_end - split > mt_min_slots[b_node->type] - 1))
split++;
}
/* Avoid ending a node on a NULL entry */
- split = mab_no_null_split(b_node, split, slot_cnt);
+ split = mab_no_null_split(b_node, split, slot_count);
if (!(*mid_split))
return split;
- *mid_split = mab_no_null_split(b_node, *mid_split, slot_cnt);
+ *mid_split = mab_no_null_split(b_node, *mid_split, slot_count);
return split;
}
unsigned char *mid_split)
{
unsigned char split = 0;
- unsigned char slot_cnt = mt_slots[b_node->type];
+ unsigned char slot_count = mt_slots[b_node->type];
*left = mas_new_ma_node(mas, b_node);
*right = NULL;
*middle = NULL;
*mid_split = 0;
- if (b_node->b_end < slot_cnt) {
+ if (b_node->b_end < slot_count) {
split = b_node->b_end;
} else {
split = mab_calc_split(b_node, mid_split);
*/
static inline bool mast_sufficient(struct maple_subtree_state *mast)
{
- if (mast->bn->b_end > mt_min_slot_cnt(mast->orig_l->node))
+ if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
return true;
return false;
static inline int mas_rebalance(struct ma_state *mas,
struct maple_big_node *b_node)
{
- char empty_cnt = mas_mt_height(mas);
+ char empty_count = mas_mt_height(mas);
struct maple_subtree_state mast;
unsigned char shift, b_end = ++b_node->b_end;
trace_mas_rebalance(mas);
- mas_node_cnt(mas, 1 + empty_cnt * 3);
+ mas_node_count(mas, 1 + empty_count * 3);
if (mas_is_err(mas))
return 0;
l_mas.index = l_mas.last = l_mas.min;
}
- return mas_spanning_rebalance(mas, &mast, empty_cnt);
+ return mas_spanning_rebalance(mas, &mast, empty_count);
}
static inline bool _mas_split_final_node(struct maple_subtree_state *mast,
trace_mas_split(mas);
mas->depth = mas_mt_height(mas);
// Allocation failures will happen early.
- mas_node_cnt(mas, 1 + mas->depth * 2);
+ mas_node_count(mas, 1 + mas->depth * 2);
if (mas_is_err(mas))
return 0;
if (mas_reuse_node(mas, b_node, end))
goto reused_node;
- mas_node_cnt(mas, 1);
+ mas_node_count(mas, 1);
if (mas_is_err(mas))
return 0;
int slot = 0;
- mas_node_cnt(mas, 1);
+ mas_node_count(mas, 1);
if (mas_is_err(mas))
return 0;
struct maple_big_node b_node;
struct maple_subtree_state mast;
unsigned char height = mas_mt_height(mas);
- int node_cnt = 1 + height * 3;
+ int node_count = 1 + height * 3;
// Holds new left and right sub-tree
MA_STATE(l_mas, mas->tree, mas->index, mas->index);
/* Node rebalancing may occur due to this store, so there may be two new
* entries per level plus a new root.
*/
- mas_node_cnt(mas, node_cnt);
+ mas_node_count(mas, node_count);
if (mas_is_err(mas))
return 0;
{
enum maple_type type = mte_node_type(mas->node);
unsigned long pivot, min, gap = 0;
- unsigned char offset = 0, pivot_cnt = mt_pivots[type];
+ unsigned char offset = 0, pivot_count = mt_pivots[type];
unsigned long *gaps = NULL, *pivots = ma_pivots(mas_mn(mas), type);
void **slots = ma_slots(mas_mn(mas), type);
bool found = false;
}
min = mas_safe_min(mas, pivots, offset);
- for (; offset <= pivot_cnt; offset++) {
+ for (; offset <= pivot_count; offset++) {
pivot = _mas_safe_pivot(mas, pivots, offset, type);
if (offset && !pivot)
break;
while (!mas_is_none(&mas)) {
if (!mte_is_root(mas.node)) {
end = mas_data_end(&mas);
- if ((end < mt_min_slot_cnt(mas.node)) &&
+ if ((end < mt_min_slot_count(mas.node)) &&
(mas.max != ULONG_MAX)) {
pr_err("Invalid size %u of "MA_PTR"\n", end,
mas_mn(&mas));