#define MAPLE_NODE_SLOTS 31 /* 256 bytes including ->parent */
#define MAPLE_RANGE64_SLOTS 16 /* 256 bytes */
#define MAPLE_ARANGE64_SLOTS 10 /* 240 bytes */
+#define MAPLE_NODE_MASK 255UL
#else
#define MAPLE_NODE_SLOTS 15 /* 128 bytes including ->parent */
#define MAPLE_RANGE64_SLOTS 8 /* 128 bytes */
#define MAPLE_ARANGE64_SLOTS 5 /* 120 bytes */
+#define MAPLE_NODE_MASK 127UL
#endif // End NODE256
-#define MA_MAX_ALLOC 127
#else
/* Need to do corresponding calculations for 32-bit kernels */
#endif
*/
static inline struct maple_node *mte_parent(const struct maple_enode *enode)
{
- unsigned long bitmask = 0x7F;
-
- return (void *)((unsigned long)(mte_to_node(enode)->parent) & ~bitmask);
+ return (void *)((unsigned long)
+ (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
}
/*
*/
static inline struct maple_node *mas_get_alloc(const struct ma_state *mas)
{
- return (struct maple_node *)((unsigned long)mas->alloc & ~0x7F);
+ return (struct maple_node *)
+ ((unsigned long)mas->alloc & ~MAPLE_NODE_MASK);
}
/*
*/
static inline void mas_set_alloc_req(struct ma_state *mas, int count)
{
- mas->alloc = (struct maple_node *)((unsigned long)mas->alloc & ~0x7F);
- mas->alloc = (struct maple_node *)((unsigned long)mas->alloc | count);
+ mas->alloc = (struct maple_node *)
+ (((unsigned long)mas->alloc & ~MAPLE_NODE_MASK) | count);
}
/*
*/
static inline int mas_alloc_req(const struct ma_state *mas)
{
- return (int)(((unsigned long)mas->alloc & 0x7F));
+ return (int)(((unsigned long)mas->alloc & MAPLE_NODE_MASK));
}
/*
{
int allocated = mas_alloc_cnt(mas);
- BUG_ON(count > 127);
+ //BUG_ON(count > 127);
if (allocated < count) {
mas_set_alloc_req(mas, count - allocated);
mas_node_node(mas, GFP_NOWAIT | __GFP_NOWARN);
MT_BUG_ON(mt, mas_alloc_cnt(&mas) != 0);
}
- for (i = 1; i < 128; i++) {
+ for (i = 1; i < MAPLE_NODE_MASK + 1; i++) {
MA_STATE(mas2, mt, 0, 0);
mas_node_cnt(&mas, i); // Request
mas_nomem(&mas, GFP_KERNEL); // Fill request