/* Need to do corresponding calculations for 32-bit kernels */
#endif
+typedef struct maple_pnode *maple_pnode; // parent node.
/*
* We can be more cache-efficient if we interleave pivots and slots.
* Code will be more complex, though.
*/
struct maple_range_64 {
- struct maple_node *parent;
+ struct maple_pnode *parent;
void __rcu *slot[MAPLE_RANGE64_SLOTS];
u64 pivot[MAPLE_RANGE64_SLOTS - 1];
};
struct maple_arange_64 {
- struct maple_node *parent;
+ struct maple_pnode *parent;
u64 gap[MAPLE_ARANGE64_SLOTS];
void __rcu *slot[MAPLE_ARANGE64_SLOTS];
u64 pivot[MAPLE_ARANGE64_SLOTS - 1];
};
struct maple_range_32 {
- struct maple_node *parent;
+ struct maple_pnode *parent;
void __rcu *slot[MAPLE_RANGE32_SLOTS];
u32 pivot[MAPLE_RANGE32_SLOTS - 1];
};
struct maple_range_16 {
- struct maple_node *parent;
+ struct maple_pnode *parent;
void __rcu *slot[MAPLE_RANGE16_SLOTS];
u16 pivot[MAPLE_RANGE16_SLOTS - 1];
};
struct maple_sparse_64 {
- struct maple_node *parent;
+ struct maple_pnode *parent;
void __rcu *slot[MAPLE_SPARSE64_SLOTS];
u64 pivot[MAPLE_SPARSE64_SLOTS];
};
struct maple_sparse_32 {
- struct maple_node *parent;
+ struct maple_pnode *parent;
void __rcu *slot[MAPLE_SPARSE32_SLOTS];
u32 pivot[MAPLE_SPARSE32_SLOTS];
};
struct maple_sparse_21 {
- struct maple_node *parent;
+ struct maple_pnode *parent;
void __rcu *slot[MAPLE_SPARSE21_SLOTS];
u64 pivot[(MAPLE_SPARSE21_SLOTS + 2) / 3];
};
struct maple_sparse_16 {
- struct maple_node *parent;
+ struct maple_pnode *parent;
void __rcu *slot[MAPLE_SPARSE16_SLOTS];
u16 pivot[MAPLE_SPARSE16_SLOTS];
};
struct maple_sparse_9 {
- struct maple_node *parent;
+ struct maple_pnode *parent;
void __rcu *slot[MAPLE_SPARSE9_SLOTS];
u64 pivot[(MAPLE_SPARSE9_SLOTS + 6) / 7];
};
struct maple_sparse_6 {
- struct maple_node *parent;
+ struct maple_pnode *parent;
void __rcu *slot[MAPLE_SPARSE6_SLOTS];
u64 pivot; /* Use a bitmap for pivots */
};
struct maple_node {
union {
struct {
- struct maple_node *parent;
+ struct maple_pnode *parent;
void __rcu *slot[MAPLE_NODE_SLOTS];
};
struct {
#include <asm/barrier.h>
#define MA_ROOT_PARENT 1
-
+#define ma_parent_ptr(x) ((struct maple_pnode*)x)
static struct kmem_cache *maple_node_cache;
unsigned long mt_max[] = {
static void mt_free(struct maple_node *node)
{
- node->parent = node;
+ node->parent = ma_parent_ptr(node);
call_rcu(&node->rcu, mt_free_rcu);
}
val &= ~bitmask; // Remove any old slot number.
val |= (slot << slot_shift); // Set the slot.
val |= type;
- mt_to_node(node)->parent = (struct maple_node *)val;
+ mt_to_node(node)->parent = ma_parent_ptr(val);
}
static inline unsigned int mt_parent_slot(struct maple_node *node)
}
static inline void ma_encoded_parent(struct ma_state *mas)
{
- struct maple_node *parent, *gparent;
+ void *parent, *gparent;
unsigned char slot;
- parent = mt_to_node(mas->node)->parent;
- if (ma_is_root(mt_to_node(mas->node)->parent)) {
+ if (ma_is_root(mt_parent(mas->node))) {
mas->node = mt_safe_root(rcu_dereference(mas->tree->ma_root));
+ mas->min = 0;
+ mas->max = mt_node_max(mas->node);
return;
}
+ /* Go up 2 levels */
+ parent = mt_parent(mt_to_node(mas->node));
gparent = mt_parent(parent);
+ /* Get the parents slot in the grand parent */
slot = mt_parent_slot(parent);
mas->node = mt_mk_node(gparent, mt_parent_enum(mas, parent));
ma_set_slot(mas, slot);
ma_adopt_children(mas->node);
if (ma_is_root(mas->node)) {
- mn->parent = (struct maple_node *)
- ((unsigned long)mas->tree | MA_ROOT_PARENT);
+ mn->parent = ma_parent_ptr(
+ ((unsigned long)mas->tree | MA_ROOT_PARENT));
rcu_assign_pointer(mas->tree->ma_root, mt_mk_root(mas->node));
} else {
rcu_assign_pointer(parent->slot[slot], mas->node);
if (mt_is_alloc(ms->tree))
mt = maple_aleaf_64;
ms->node = mt_mk_node(mn, mt);
- mn->parent = (struct maple_node*)
- ((unsigned long)ms->tree | MA_ROOT_PARENT);
+ mn->parent = ma_parent_ptr(
+ ((unsigned long)ms->tree | MA_ROOT_PARENT));
/* Assign the old entry to slot 0, or set it to null. */
ma_set_rcu_slot(mn, 0, r_entry);