bool alloc;
};
-static inline
+static __always_inline
void mns_node_part_leaf_init(struct ma_node_part *ma_part,
struct ma_wr_state *wr_mas, struct ma_node_state *src)
{
ma_part->skip = 1;
}
-static inline
+static __always_inline
void mns_insert_part(struct ma_node_part *part,
struct ma_node_state *dst)
{
* @len: The number of offsets to copy
*
*/
-static inline void mns_cp(struct ma_node_state *src, struct ma_node_state *dst,
+static __always_inline
+void mns_cp(struct ma_node_state *src, struct ma_node_state *dst,
unsigned char len)
{
unsigned long max;
if (src->gaps)
memcpy(dst->gaps + dst->offset, src->gaps + src->offset, size);
- BUG_ON(src->offset + len > mt_slots[src->type]);
if (src->offset + len > mt_pivots[src->type]) {
size = mt_pivots[src->type] - src->offset;
max = src->max;
* Zero any area that needs to be zeroed and set the metadata.
* metadata needs the largest gap for non-leaves.
*/
-static inline void mns_finalise(struct ma_node_state *p)
+static __always_inline
+void mns_finalise(struct ma_node_state *p)
{
unsigned long max_gap;
unsigned char len;
unsigned char offset;
unsigned char i;
unsigned long gap, pstart;
+ unsigned long *pivs;
+ void **slots;
if (!p->alloc)
goto finalise_leaf;
goto finalise_leaf;
}
+ slots = p->slots;
+ pivs = p->pivots;
/* Special case the first slot before the loop */
- if (likely(!p->slots[0])) {
+ if (likely(!slots[0])) {
//printk("slot 0 is %p\n", p->slots[0]);
//printk("first slot check (%lu - %lu + 1\n", p->pivots[0], p->min);
- gap = p->pivots[0] - p->min + 1;
+ gap = pivs[0] - p->min + 1;
if (gap > max_gap)
max_gap = gap;
//printk("gap is now %lu\n", max_gap);
for (; i <= offset; i++) {
/* data == no gap. */
- if (likely(p->slots[i]))
+ if (slots[i])
continue;
//printk("empty slot at %u\n", i);
- pstart = p->pivots[i - 1];
- gap = p->pivots[i] - pstart;
+ pstart = pivs[i - 1];
+ gap = pivs[i] - pstart;
//printk("gap is %lu vs %lu\n", gap, max_gap);
if (gap > max_gap)
max_gap = gap;
trace_ma_write(__func__, mas, 0, wr_mas->entry);
mas_update_gap(mas);
mas->end = new_end;
+ //mt_dump(mas->tree, mt_dump_hex);
return;
}
#endif
/* #define BENCH_SLOT_STORE */
-/* #define BENCH_NODE_STORE */
+#define BENCH_NODE_STORE
/* #define BENCH_AWALK */
/* #define BENCH_WALK */
/* #define BENCH_LOAD */
#if defined(BENCH_NODE_STORE)
static noinline void __init bench_node_store(struct maple_tree *mt)
{
- int i, overwrite = 76, max = 240, count = 20000000;
+ int i, overwrite = 76, max = 240, count = 50000000;
for (i = 0; i < max; i += 10)
mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);