/* Outside this nodes range, it doesn't exist. */
if (ms->min > ms->index ||
ms->max < ms->index)
- return entry;
+ return entry; // FIXME: Retry?
if (ms->node == NULL)
return entry;
static inline int _count_node_64(struct maple_state *ms) {
int i;
struct maple_node_64 mn64 = _maple_to_node(ms->node)->map64;
+
for (i = 0; i < MAPLE_NODE64_MAX_PIVOT; i++) {
if (mn64.pivot[i] == 0)
break;
idx = p_here + shift;
mn64->pivot[idx] = ms->end + 1;
rcu_assign_pointer(mn64->slot[idx], entry);
- idx--;
if (shift > 0) {
+ idx--;
mn64->pivot[idx] = ms->index;
rcu_assign_pointer(mn64->slot[idx], NULL);
}
ms->node = entry;
_maple_update_limits(ms);
if(_maple_is_node_4(ms)) {
+ /* FIXME: What about range inserts on a node4? */
entry = _maple_walk_4(ms);
/* end of the line.. */
continue;
*/
if (ret == -ENOMEM)
return maple_retry(ms);
- if (ret == -EINVAL) // FIXME..
+ if (ret == -EINVAL) // FIXME: root expand on split may return an invalid insert.
return maple_retry(ms);
return NULL;
}
struct maple_node *mn = ms->node;
entry = _maple_walk_64(ms, ms->end);
if (s_idx != ms->slot_idx || mn != ms->node)
- return mn;
+ return mn; /* Return non-NULL for an insert fail */
}
} while(xa_is_internal(entry));