All users are gone.
Signed-off-by: Matthew Wilcox <willy@infradead.org>
- 'ide_host_for_each_port'
- 'ide_port_for_each_dev'
- 'ide_port_for_each_present_dev'
- - 'idr_for_each_entry'
- - 'idr_for_each_entry_continue'
- 'inet_bind_bucket_for_each'
- 'inet_lhash2_for_each_icsk_rcu'
- 'key_for_each'
- 'protocol_for_each_card'
- 'protocol_for_each_dev'
- 'queue_for_each_hw_ctx'
- - 'radix_tree_for_each_slot'
- 'rbtree_postorder_for_each_entry_safe'
- 'rdma_for_each_port'
- 'resource_list_for_each_entry'
A common problem to solve is allocating identifiers (IDs); generally
small numbers which identify a thing. Examples include file descriptors,
process IDs, packet identifiers in networking protocols, SCSI tags
-and device instance numbers. The IDR and the IDA provide a reasonable
-solution to the problem to avoid everybody inventing their own. The IDR
-provides the ability to map an ID to a pointer, while the IDA provides
-only ID allocation, and as a result is much more memory-efficient.
-
-IDR usage
-=========
-
-Start by initialising an IDR, either with :c:func:`DEFINE_IDR`
-for statically allocated IDRs or :c:func:`idr_init` for dynamically
-allocated IDRs.
-
-You can call :c:func:`idr_alloc` to allocate an unused ID. Look up
-the pointer you associated with the ID by calling :c:func:`idr_find`
-and free the ID by calling :c:func:`idr_remove`.
-
-To perform an action on all pointers used by the IDR, you can
-use :c:func:`idr_for_each_entry`. You may need to use
-:c:func:`idr_for_each_entry_continue` to continue an iteration. You can
-also use :c:func:`idr_get_next` if the iterator doesn't fit your needs.
-
-When you have finished using an IDR, you can call :c:func:`idr_destroy`
-to release the memory used by the IDR. This will not free the objects
-pointed to from the IDR; if you want to do that, use one of the iterators
-to do it.
-
-You can use :c:func:`idr_is_empty` to find out whether there are any
-IDs currently allocated.
-
-.. kernel-doc:: include/linux/idr.h
- :doc: idr sync
+and device instance numbers. The IDA provides a simple API to allocate
+and free IDs in a memory efficient way. If you need to map IDs to pointers,
+use the XArray, and if you need CPU efficiency more than memory efficiency,
+you can use the sbitmap.
IDA usage
=========
#ifndef __IDR_H__
#define __IDR_H__
-#include <linux/radix-tree.h>
+#include <linux/xarray.h>
#include <linux/gfp.h>
#include <linux/percpu.h>
-struct idr {
- struct radix_tree_root idr_rt;
- unsigned int idr_base;
-};
-
-/*
- * The IDR API does not expose the tagging functionality of the radix tree
- * to users. Use tag 0 to track whether a node has free space below it.
- */
-#define IDR_FREE 0
-
-/* Set the IDR flag and the IDR_FREE tag */
-#define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \
- (1 << (ROOT_TAG_SHIFT + IDR_FREE)))
-
-#define IDR_INIT_BASE(name, base) { \
- .idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER), \
- .idr_base = (base), \
-}
-
-/**
- * IDR_INIT() - Initialise an IDR.
- * @name: Name of IDR.
- *
- * A freshly-initialised IDR contains no IDs.
- */
-#define IDR_INIT(name) IDR_INIT_BASE(name, 0)
-
-/**
- * DEFINE_IDR() - Define a statically-allocated IDR.
- * @name: Name of IDR.
- *
- * An IDR defined using this macro is ready for use with no additional
- * initialisation required. It contains no IDs.
- */
-#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
-
-/**
- * DOC: idr sync
- * idr synchronization (stolen from radix-tree.h)
- *
- * idr_find() is able to be called locklessly, using RCU. The caller must
- * ensure calls to this function are made within rcu_read_lock() regions.
- * Other readers (lock-free or otherwise) and modifications may be running
- * concurrently.
- *
- * It is still required that the caller manage the synchronization and
- * lifetimes of the items. So if RCU lock-free lookups are used, typically
- * this would mean that the items have their own locks, or are amenable to
- * lock-free access; and that the items are freed by RCU (or only freed after
- * having been deleted from the idr tree *and* a synchronize_rcu() grace
- * period).
- */
-
-#define idr_lock(idr) xa_lock(&(idr)->idr_rt)
-#define idr_unlock(idr) xa_unlock(&(idr)->idr_rt)
-#define idr_lock_bh(idr) xa_lock_bh(&(idr)->idr_rt)
-#define idr_unlock_bh(idr) xa_unlock_bh(&(idr)->idr_rt)
-#define idr_lock_irq(idr) xa_lock_irq(&(idr)->idr_rt)
-#define idr_unlock_irq(idr) xa_unlock_irq(&(idr)->idr_rt)
-#define idr_lock_irqsave(idr, flags) \
- xa_lock_irqsave(&(idr)->idr_rt, flags)
-#define idr_unlock_irqrestore(idr, flags) \
- xa_unlock_irqrestore(&(idr)->idr_rt, flags)
-
-int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
-void *idr_remove(struct idr *, unsigned long id);
-void *idr_find(const struct idr *, unsigned long id);
-void *idr_get_next(struct idr *, int *nextid);
-void idr_destroy(struct idr *);
-
-/**
- * idr_init_base() - Initialise an IDR.
- * @idr: IDR handle.
- * @base: The base value for the IDR.
- *
- * This variation of idr_init() creates an IDR which will allocate IDs
- * starting at %base.
- */
-static inline void idr_init_base(struct idr *idr, int base)
-{
- INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
- idr->idr_base = base;
-}
-
-/**
- * idr_init() - Initialise an IDR.
- * @idr: IDR handle.
- *
- * Initialise a dynamically allocated IDR. To initialise a
- * statically allocated IDR, use DEFINE_IDR().
- */
-static inline void idr_init(struct idr *idr)
-{
- idr_init_base(idr, 0);
-}
-
-/**
- * idr_is_empty() - Are there any IDs allocated?
- * @idr: IDR handle.
- *
- * Return: %true if any IDs have been allocated from this IDR.
- */
-static inline bool idr_is_empty(const struct idr *idr)
-{
- return radix_tree_empty(&idr->idr_rt) &&
- radix_tree_tagged(&idr->idr_rt, IDR_FREE);
-}
-
-/**
- * idr_for_each_entry() - Iterate over an IDR's elements of a given type.
- * @idr: IDR handle.
- * @entry: The type * to use as cursor
- * @id: Entry ID.
- *
- * @entry and @id do not need to be initialized before the loop, and
- * after normal termination @entry is left with the value NULL. This
- * is convenient for a "not found" value.
- */
-#define idr_for_each_entry(idr, entry, id) \
- for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; id += 1U)
-
-/**
- * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type
- * @idr: IDR handle.
- * @entry: The type * to use as a cursor.
- * @id: Entry ID.
- *
- * Continue to iterate over entries, continuing after the current position.
- */
-#define idr_for_each_entry_continue(idr, entry, id) \
- for ((entry) = idr_get_next((idr), &(id)); \
- entry; \
- ++id, (entry) = idr_get_next((idr), &(id)))
-
-/**
- * idr_for_each_entry_continue_ul() - Continue iteration over an IDR's elements of a given type
- * @idr: IDR handle.
- * @entry: The type * to use as a cursor.
- * @tmp: A temporary placeholder for ID.
- * @id: Entry ID.
- *
- * Continue to iterate over entries, continuing after the current position.
- */
-#define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \
- for (tmp = id; \
- tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
- tmp = id, ++id)
-
/*
* IDA - ID Allocator, use when translation from id to pointer isn't necessary.
*/
#include <linux/types.h>
#include <linux/xarray.h>
-/* Keep unconverted code working */
-#define radix_tree_node xa_node
-
-struct radix_tree_root {
- spinlock_t xa_lock;
- gfp_t xa_flags;
- void __rcu * xa_head;
-};
-
-/*
- * The bottom two bits of the slot determine how the remaining bits in the
- * slot are interpreted:
- *
- * 00 - data pointer
- * 10 - internal entry
- * x1 - value entry
- *
- * The internal entry may be a pointer to the next level in the tree, a
- * sibling entry, or an indicator that the entry in this slot has been moved
- * to another location in the tree and the lookup should be restarted. While
- * NULL fits the 'data pointer' pattern, it means that there is no entry in
- * the tree for this index (no matter what level of the tree it is found at).
- * This means that storing a NULL entry in the tree is the same as deleting
- * the entry from the tree.
- */
-#define RADIX_TREE_ENTRY_MASK 3UL
-#define RADIX_TREE_INTERNAL_NODE 2UL
-
-static inline bool radix_tree_is_internal_node(void *ptr)
-{
- return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) ==
- RADIX_TREE_INTERNAL_NODE;
-}
-
/*** radix-tree API starts here ***/
-#define RADIX_TREE_MAP_SHIFT XA_CHUNK_SHIFT
-#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
-#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
-
-#define RADIX_TREE_MAX_TAGS XA_MAX_MARKS
-#define RADIX_TREE_TAG_LONGS XA_MARK_LONGS
-
-#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
-#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
- RADIX_TREE_MAP_SHIFT))
-
-/* The IDR tag is stored in the low bits of xa_flags */
-#define ROOT_IS_IDR ((__force gfp_t)4)
-/* The top bits of xa_flags are used to store the root tags */
-#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT)
-
-#define RADIX_TREE_INIT(name, mask) XARRAY_INIT(name, mask)
-
-#define RADIX_TREE(name, mask) \
- struct radix_tree_root name = RADIX_TREE_INIT(name, mask)
-
-#define INIT_RADIX_TREE(root, mask) do { \
- spin_lock_init(&(root)->xa_lock); \
- (root)->xa_flags = mask; \
- (root)->xa_head = NULL; \
-} while (0)
-
-static inline bool radix_tree_empty(const struct radix_tree_root *root)
-{
- return root->xa_head == NULL;
-}
-
-/**
- * struct radix_tree_iter - radix tree iterator state
- *
- * @index: index of current slot
- * @next_index: one beyond the last index for this chunk
- * @node: node that contains current slot
- *
- * This radix tree iterator works in terms of "chunks" of slots. A chunk is a
- * subinterval of slots contained within one radix tree leaf node. It is
- * described by a pointer to its first slot and a struct radix_tree_iter
- * which holds the chunk's position in the tree and its size. For tagged
- * iteration radix_tree_iter also holds the slots' bit-mask for one chosen
- * radix tree tag.
- */
-struct radix_tree_iter {
- unsigned long index;
- unsigned long next_index;
- struct radix_tree_node *node;
-};
-
-/**
- * Radix-tree synchronization
- *
- * The radix-tree API requires that users provide all synchronisation (with
- * specific exceptions, noted below).
- *
- * Synchronization of access to the data items being stored in the tree, and
- * management of their lifetimes must be completely managed by API users.
- *
- * For API usage, in general,
- * - any function _modifying_ the tree or tags (inserting or deleting
- * items, setting or clearing tags) must exclude other modifications, and
- * exclude any functions reading the tree.
- * - any function _reading_ the tree or tags (looking up items or tags)
- * must exclude modifications to the tree, but may occur
- * concurrently with other readers.
- *
- * The notable exceptions to this rule are the following functions:
- * __radix_tree_lookup
- * radix_tree_lookup
- * radix_tree_tag_get
- * radix_tree_tagged
- *
- * The first 3 functions are able to be called locklessly, using RCU. The
- * caller must ensure calls to these functions are made within rcu_read_lock()
- * regions. Other readers (lock-free or otherwise) and modifications may be
- * running concurrently.
- *
- * It is still required that the caller manage the synchronization and lifetimes
- * of the items. So if RCU lock-free lookups are used, typically this would mean
- * that the items have their own locks, or are amenable to lock-free access; and
- * that the items are freed by RCU (or only freed after having been deleted from
- * the radix tree *and* a synchronize_rcu() grace period).
- *
- * (Note, rcu_assign_pointer and rcu_dereference are not needed to control
- * access to data items when inserting into or looking up from the radix tree)
- *
- * Note that the value returned by radix_tree_tag_get() may not be relied upon
- * if only the RCU read lock is held. Functions to set/clear tags and to
- * delete nodes running concurrently with it may affect its result such that
- * two consecutive reads in the same locked section may return different
- * values. If reliability is required, modification functions must also be
- * excluded from concurrency.
- *
- * radix_tree_tagged is able to be called without locking or RCU.
- */
-
-void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index,
- struct radix_tree_node **nodep, void __rcu ***slotp);
-void *radix_tree_lookup(const struct radix_tree_root *, unsigned long);
-void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *,
- void __rcu **slot, void *entry);
-void radix_tree_iter_replace(struct radix_tree_root *,
- const struct radix_tree_iter *, void __rcu **slot, void *entry);
-void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
void radix_tree_init(void);
-int radix_tree_tag_get(const struct radix_tree_root *,
- unsigned long index, unsigned int tag);
-void radix_tree_iter_tag_clear(struct radix_tree_root *,
- const struct radix_tree_iter *iter, unsigned int tag);
-int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
-
-void __rcu **idr_get_free(struct radix_tree_root *root,
- struct radix_tree_iter *iter, gfp_t gfp,
- unsigned long max);
-
-/**
- * radix_tree_iter_init - initialize radix tree iterator
- *
- * @iter: pointer to iterator state
- * @start: iteration starting index
- * Returns: NULL
- */
-static __always_inline void __rcu **
-radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
-{
- /*
- * Set index to zero to bypass next_index overflow protection.
- * See the comment in radix_tree_next_chunk() for details.
- */
- iter->index = 0;
- iter->next_index = start;
- return NULL;
-}
-
-/**
- * radix_tree_next_chunk - find next chunk of slots for iteration
- *
- * @root: radix tree root
- * @iter: iterator state
- * @flags: RADIX_TREE_ITER_* flags and tag index
- * Returns: pointer to chunk first slot, or NULL if there no more left
- *
- * This function looks up the next chunk in the radix tree starting from
- * @iter->next_index. It returns a pointer to the chunk's first slot.
- * Also it fills @iter with data about chunk: position in the tree (index),
- * its end (next_index).
- */
-void __rcu **radix_tree_next_chunk(const struct radix_tree_root *,
- struct radix_tree_iter *iter, unsigned flags);
-
-/**
- * radix_tree_iter_find - find a present entry
- * @root: radix tree root
- * @iter: iterator state
- * @index: start location
- *
- * This function returns the slot containing the entry with the lowest index
- * which is at least @index. If @index is larger than any present entry, this
- * function returns NULL. The @iter is updated to describe the entry found.
- */
-static inline void __rcu **
-radix_tree_iter_find(const struct radix_tree_root *root,
- struct radix_tree_iter *iter, unsigned long index)
-{
- radix_tree_iter_init(iter, index);
- return radix_tree_next_chunk(root, iter, 0);
-}
-
-static inline unsigned long
-__radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
-{
- return iter->index + slots;
-}
-
-/**
- * radix_tree_chunk_size - get current chunk size
- *
- * @iter: pointer to radix tree iterator
- * Returns: current chunk size
- */
-static __always_inline long
-radix_tree_chunk_size(struct radix_tree_iter *iter)
-{
- return iter->next_index - iter->index;
-}
-
-/**
- * radix_tree_next_slot - find next slot in chunk
- *
- * @slot: pointer to current slot
- * @iter: pointer to interator state
- * @flags: RADIX_TREE_ITER_*, should be constant
- * Returns: pointer to next slot, or NULL if there no more left
- *
- * This function updates @iter->index in the case of a successful lookup.
- */
-static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot,
- struct radix_tree_iter *iter, unsigned flags)
-{
- long count = radix_tree_chunk_size(iter);
-
- while (--count > 0) {
- slot++;
- iter->index = __radix_tree_iter_add(iter, 1);
-
- if (likely(*slot))
- goto found;
- }
- return NULL;
-
- found:
- return slot;
-}
-
-/**
- * radix_tree_for_each_slot - iterate over non-empty slots
- *
- * @slot: the void** variable for pointer to slot
- * @root: the struct radix_tree_root pointer
- * @iter: the struct radix_tree_iter pointer
- * @start: iteration starting index
- *
- * @slot points to radix tree slot, @iter->index contains its index.
- */
-#define radix_tree_for_each_slot(slot, root, iter, start) \
- for (slot = radix_tree_iter_init(iter, start) ; \
- slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \
- slot = radix_tree_next_slot(slot, iter, 0))
#endif /* _LINUX_RADIX_TREE_H */
#include <linux/spinlock.h>
#include <linux/xarray.h>
-static int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
- unsigned long max, gfp_t gfp)
-{
- struct radix_tree_iter iter;
- void __rcu **slot;
- unsigned int base = idr->idr_base;
- unsigned int id = *nextid;
-
- if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
- idr->idr_rt.xa_flags |= IDR_RT_MARKER;
-
- id = (id < base) ? 0 : id - base;
- radix_tree_iter_init(&iter, id);
- slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base);
- if (IS_ERR(slot))
- return PTR_ERR(slot);
-
- *nextid = iter.index + base;
- /* there is a memory barrier inside radix_tree_iter_replace() */
- radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
- radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
-
- return 0;
-}
-
-/**
- * idr_alloc() - Allocate an ID.
- * @idr: IDR handle.
- * @ptr: Pointer to be associated with the new ID.
- * @start: The minimum ID (inclusive).
- * @end: The maximum ID (exclusive).
- * @gfp: Memory allocation flags.
- *
- * Allocates an unused ID in the range specified by @start and @end. If
- * @end is <= 0, it is treated as one larger than %INT_MAX. This allows
- * callers to use @start + N as @end as long as N is within integer range.
- *
- * The caller should provide their own locking to ensure that two
- * concurrent modifications to the IDR are not possible. Read-only
- * accesses to the IDR may be done under the RCU read lock or may
- * exclude simultaneous writers.
- *
- * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
- * or -ENOSPC if no free IDs could be found.
- */
-int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
-{
- u32 id = start;
- int ret;
-
- if (WARN_ON_ONCE(start < 0))
- return -EINVAL;
-
- ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp);
- if (ret)
- return ret;
-
- return id;
-}
-EXPORT_SYMBOL_GPL(idr_alloc);
-
-/**
- * idr_remove() - Remove an ID from the IDR.
- * @idr: IDR handle.
- * @id: Pointer ID.
- *
- * Removes this ID from the IDR. If the ID was not previously in the IDR,
- * this function returns %NULL.
- *
- * Since this function modifies the IDR, the caller should provide their
- * own locking to ensure that concurrent modification of the same IDR is
- * not possible.
- *
- * Return: The pointer formerly associated with this ID.
- */
-void *idr_remove(struct idr *idr, unsigned long id)
-{
- return radix_tree_delete_item(&idr->idr_rt, id - idr->idr_base, NULL);
-}
-EXPORT_SYMBOL_GPL(idr_remove);
-
-/**
- * idr_find() - Return pointer for given ID.
- * @idr: IDR handle.
- * @id: Pointer ID.
- *
- * Looks up the pointer associated with this ID. A %NULL pointer may
- * indicate that @id is not allocated or that the %NULL pointer was
- * associated with this ID.
- *
- * This function can be called under rcu_read_lock(), given that the leaf
- * pointers lifetimes are correctly managed.
- *
- * Return: The pointer associated with this ID.
- */
-void *idr_find(const struct idr *idr, unsigned long id)
-{
- return radix_tree_lookup(&idr->idr_rt, id - idr->idr_base);
-}
-EXPORT_SYMBOL_GPL(idr_find);
-
-/**
- * idr_get_next() - Find next populated entry.
- * @idr: IDR handle.
- * @nextid: Pointer to an ID.
- *
- * Returns the next populated entry in the tree with an ID greater than
- * or equal to the value pointed to by @nextid. On exit, @nextid is updated
- * to the ID of the found value. To use in a loop, the value pointed to by
- * nextid must be incremented by the user.
- */
-void *idr_get_next(struct idr *idr, int *nextid)
-{
- struct radix_tree_iter iter;
- void __rcu **slot;
- void *entry = NULL;
- unsigned long base = idr->idr_base;
- unsigned long id = *nextid;
-
- id = (id < base) ? 0 : id - base;
- radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, id) {
- entry = rcu_dereference_raw(*slot);
- if (!entry)
- continue;
- if (!xa_is_internal(entry))
- break;
- if (slot != &idr->idr_rt.xa_head && !xa_is_retry(entry))
- break;
- slot = radix_tree_iter_retry(&iter);
- }
- if (!slot)
- return NULL;
- id = iter.index + base;
-
- if (WARN_ON_ONCE(id > INT_MAX))
- return NULL;
-
- *nextid = id;
- return entry;
-}
-EXPORT_SYMBOL(idr_get_next);
-
/**
* DOC: IDA description
*
#include <linux/kmemleak.h>
#include <linux/percpu.h>
#include <linux/preempt.h> /* in_interrupt() */
-#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/xarray.h>
-
/*
* Radix tree node cache.
*/
struct kmem_cache *radix_tree_node_cachep;
-static inline struct radix_tree_node *entry_to_node(void *ptr)
-{
- return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE);
-}
-
-static inline void *node_to_entry(void *ptr)
-{
- return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
-}
-
-#define RADIX_TREE_RETRY XA_RETRY_ENTRY
-
-static inline unsigned long
-get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
-{
- return parent ? slot - parent->slots : 0;
-}
-
-static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
- struct radix_tree_node **nodep, unsigned long index)
-{
- unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
- void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
-
- *nodep = (void *)entry;
- return offset;
-}
-
-static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
- int offset)
-{
- __set_bit(offset, node->tags[tag]);
-}
-
-static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
- int offset)
-{
- __clear_bit(offset, node->tags[tag]);
-}
-
-static inline int tag_get(const struct radix_tree_node *node, unsigned int tag,
- int offset)
-{
- return test_bit(offset, node->tags[tag]);
-}
-
-static inline void root_tag_set(struct radix_tree_root *root, unsigned tag)
-{
- root->xa_flags |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
-}
-
-static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
-{
- root->xa_flags &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));
-}
-
-static inline void root_tag_clear_all(struct radix_tree_root *root)
-{
- root->xa_flags &= (__force gfp_t)((1 << ROOT_TAG_SHIFT) - 1);
-}
-
-static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag)
-{
- return (__force int)root->xa_flags & (1 << (tag + ROOT_TAG_SHIFT));
-}
-
-static inline unsigned root_tags_get(const struct radix_tree_root *root)
-{
- return (__force unsigned)root->xa_flags >> ROOT_TAG_SHIFT;
-}
-
-static inline bool is_idr(const struct radix_tree_root *root)
-{
- return !!(root->xa_flags & ROOT_IS_IDR);
-}
-
-/*
- * Returns 1 if any slot in the node has this tag set.
- * Otherwise returns 0.
- */
-static inline int any_tag_set(const struct radix_tree_node *node,
- unsigned int tag)
-{
- unsigned idx;
- for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
- if (node->tags[tag][idx])
- return 1;
- }
- return 0;
-}
-
-static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
-{
- bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE);
-}
-
-/**
- * radix_tree_find_next_bit - find the next set bit in a memory region
- *
- * @addr: The address to base the search on
- * @size: The bitmap size in bits
- * @offset: The bitnumber to start searching at
- *
- * Unrollable variant of find_next_bit() for constant size arrays.
- * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
- * Returns next bit offset, or size if nothing found.
- */
-static __always_inline unsigned long
-radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag,
- unsigned long offset)
-{
- const unsigned long *addr = node->tags[tag];
-
- if (offset < RADIX_TREE_MAP_SIZE) {
- unsigned long tmp;
-
- addr += offset / BITS_PER_LONG;
- tmp = *addr >> (offset % BITS_PER_LONG);
- if (tmp)
- return __ffs(tmp) + offset;
- offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
- while (offset < RADIX_TREE_MAP_SIZE) {
- tmp = *++addr;
- if (tmp)
- return __ffs(tmp) + offset;
- offset += BITS_PER_LONG;
- }
- }
- return RADIX_TREE_MAP_SIZE;
-}
-
-static unsigned int iter_offset(const struct radix_tree_iter *iter)
-{
- return iter->index & RADIX_TREE_MAP_MASK;
-}
-
-/*
- * The maximum index which can be stored in a radix tree
- */
-static inline unsigned long shift_maxindex(unsigned int shift)
-{
- return (RADIX_TREE_MAP_SIZE << shift) - 1;
-}
-
-static inline unsigned long node_maxindex(const struct radix_tree_node *node)
-{
- return shift_maxindex(node->shift);
-}
-
-static unsigned long next_index(unsigned long index,
- const struct radix_tree_node *node,
- unsigned long offset)
-{
- return (index & ~node_maxindex(node)) + (offset << node->shift);
-}
-
-static struct radix_tree_node *
-radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
- struct radix_tree_root *root,
- unsigned int shift, unsigned int offset,
- unsigned int count, unsigned int nr_values)
-{
- struct radix_tree_node *ret;
-
- ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
- BUG_ON(radix_tree_is_internal_node(ret));
- if (ret) {
- ret->shift = shift;
- ret->offset = offset;
- ret->count = count;
- ret->nr_values = nr_values;
- ret->parent = parent;
- ret->tree = root;
- }
- return ret;
-}
-
void radix_tree_node_rcu_free(struct rcu_head *head)
{
- struct radix_tree_node *node =
- container_of(head, struct radix_tree_node, rcu_head);
+ struct xa_node *node = container_of(head, struct xa_node, rcu_head);
/*
* Must only free zeroed nodes into the slab. We can be left with
kmem_cache_free(radix_tree_node_cachep, node);
}
-static inline void
-radix_tree_node_free(struct radix_tree_node *node)
-{
- call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
-}
-
-static unsigned radix_tree_load_root(const struct radix_tree_root *root,
- struct radix_tree_node **nodep, unsigned long *maxindex)
-{
- struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
-
- *nodep = node;
-
- if (likely(radix_tree_is_internal_node(node))) {
- node = entry_to_node(node);
- *maxindex = node_maxindex(node);
- return node->shift + RADIX_TREE_MAP_SHIFT;
- }
-
- *maxindex = 0;
- return 0;
-}
-
-/*
- * Extend a radix tree so it can store key @index.
- */
-static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
- unsigned long index, unsigned int shift)
-{
- void *entry;
- unsigned int maxshift;
- int tag;
-
- /* Figure out what the shift should be. */
- maxshift = shift;
- while (index > shift_maxindex(maxshift))
- maxshift += RADIX_TREE_MAP_SHIFT;
-
- entry = rcu_dereference_raw(root->xa_head);
- if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE)))
- goto out;
-
- do {
- struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL,
- root, shift, 0, 1, 0);
- if (!node)
- return -ENOMEM;
-
- if (is_idr(root)) {
- all_tag_set(node, IDR_FREE);
- if (!root_tag_get(root, IDR_FREE)) {
- tag_clear(node, IDR_FREE, 0);
- root_tag_set(root, IDR_FREE);
- }
- } else {
- /* Propagate the aggregated tag info to the new child */
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
- if (root_tag_get(root, tag))
- tag_set(node, tag, 0);
- }
- }
-
- BUG_ON(shift > BITS_PER_LONG);
- if (radix_tree_is_internal_node(entry)) {
- entry_to_node(entry)->parent = node;
- } else if (xa_is_value(entry)) {
- /* Moving a value entry root->xa_head to a node */
- node->nr_values = 1;
- }
- /*
- * entry was already in the radix tree, so we do not need
- * rcu_assign_pointer here
- */
- node->slots[0] = (void __rcu *)entry;
- entry = node_to_entry(node);
- rcu_assign_pointer(root->xa_head, entry);
- shift += RADIX_TREE_MAP_SHIFT;
- } while (shift <= maxshift);
-out:
- return maxshift + RADIX_TREE_MAP_SHIFT;
-}
-
-/**
- * radix_tree_shrink - shrink radix tree to minimum height
- * @root radix tree root
- */
-static inline bool radix_tree_shrink(struct radix_tree_root *root)
-{
- bool shrunk = false;
-
- for (;;) {
- struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
- struct radix_tree_node *child;
-
- if (!radix_tree_is_internal_node(node))
- break;
- node = entry_to_node(node);
-
- /*
- * The candidate node has more than one child, or its child
- * is not at the leftmost slot, we cannot shrink.
- */
- if (node->count != 1)
- break;
- child = rcu_dereference_raw(node->slots[0]);
- if (!child)
- break;
-
- /*
- * For an IDR, we must not shrink entry 0 into the root in
- * case somebody calls idr_replace() with a pointer that
- * appears to be an internal entry
- */
- if (!node->shift && is_idr(root))
- break;
-
- if (radix_tree_is_internal_node(child))
- entry_to_node(child)->parent = NULL;
-
- /*
- * We don't need rcu_assign_pointer(), since we are simply
- * moving the node from one part of the tree to another: if it
- * was safe to dereference the old pointer to it
- * (node->slots[0]), it will be safe to dereference the new
- * one (root->xa_head) as far as dependent read barriers go.
- */
- root->xa_head = (void __rcu *)child;
- if (is_idr(root) && !tag_get(node, IDR_FREE, 0))
- root_tag_clear(root, IDR_FREE);
-
- /*
- * We have a dilemma here. The node's slot[0] must not be
- * NULLed in case there are concurrent lookups expecting to
- * find the item. However if this was a bottom-level node,
- * then it may be subject to the slot pointer being visible
- * to callers dereferencing it. If item corresponding to
- * slot[0] is subsequently deleted, these callers would expect
- * their slot to become empty sooner or later.
- *
- * For example, lockless pagecache will look up a slot, deref
- * the page pointer, and if the page has 0 refcount it means it
- * was concurrently deleted from pagecache so try the deref
- * again. Fortunately there is already a requirement for logic
- * to retry the entire slot lookup -- the indirect pointer
- * problem (replacing direct root node with an indirect pointer
- * also results in a stale slot). So tag the slot as indirect
- * to force callers to retry.
- */
- node->count = 0;
- if (!radix_tree_is_internal_node(child)) {
- node->slots[0] = (void __rcu *)RADIX_TREE_RETRY;
- }
-
- WARN_ON_ONCE(!list_empty(&node->private_list));
- radix_tree_node_free(node);
- shrunk = true;
- }
-
- return shrunk;
-}
-
-static bool delete_node(struct radix_tree_root *root,
- struct radix_tree_node *node)
-{
- bool deleted = false;
-
- do {
- struct radix_tree_node *parent;
-
- if (node->count) {
- if (node_to_entry(node) ==
- rcu_dereference_raw(root->xa_head))
- deleted |= radix_tree_shrink(root);
- return deleted;
- }
-
- parent = node->parent;
- if (parent) {
- parent->slots[node->offset] = NULL;
- parent->count--;
- } else {
- /*
- * Shouldn't the tags already have all been cleared
- * by the caller?
- */
- if (!is_idr(root))
- root_tag_clear_all(root);
- root->xa_head = NULL;
- }
-
- WARN_ON_ONCE(!list_empty(&node->private_list));
- radix_tree_node_free(node);
- deleted = true;
-
- node = parent;
- } while (node);
-
- return deleted;
-}
-
-/*
- * Free any nodes below this node. The tree is presumed to not need
- * shrinking, and any user data in the tree is presumed to not need a
- * destructor called on it. If we need to add a destructor, we can
- * add that functionality later. Note that we may not clear tags or
- * slots from the tree as an RCU walker may still have a pointer into
- * this subtree. We could replace the entries with RADIX_TREE_RETRY,
- * but we'll still have to clear those in rcu_free.
- */
-static void radix_tree_free_nodes(struct radix_tree_node *node)
-{
- unsigned offset = 0;
- struct radix_tree_node *child = entry_to_node(node);
-
- for (;;) {
- void *entry = rcu_dereference_raw(child->slots[offset]);
- if (xa_is_node(entry) && child->shift) {
- child = entry_to_node(entry);
- offset = 0;
- continue;
- }
- offset++;
- while (offset == RADIX_TREE_MAP_SIZE) {
- struct radix_tree_node *old = child;
- offset = child->offset + 1;
- child = child->parent;
- WARN_ON_ONCE(!list_empty(&old->private_list));
- radix_tree_node_free(old);
- if (old == entry_to_node(node))
- return;
- }
- }
-}
-
-static inline int insert_entries(struct radix_tree_node *node,
- void __rcu **slot, void *item, bool replace)
-{
- if (*slot)
- return -EEXIST;
- rcu_assign_pointer(*slot, item);
- if (node) {
- node->count++;
- if (xa_is_value(item))
- node->nr_values++;
- }
- return 1;
-}
-
-/**
- * __radix_tree_lookup - lookup an item in a radix tree
- * @root: radix tree root
- * @index: index key
- * @nodep: returns node
- * @slotp: returns slot
- *
- * Lookup and return the item at position @index in the radix
- * tree @root.
- *
- * Until there is more than one item in the tree, no nodes are
- * allocated and @root->xa_head is used as a direct slot instead of
- * pointing to a node, in which case *@nodep will be NULL.
- */
-void *__radix_tree_lookup(const struct radix_tree_root *root,
- unsigned long index, struct radix_tree_node **nodep,
- void __rcu ***slotp)
-{
- struct radix_tree_node *node, *parent;
- unsigned long maxindex;
- void __rcu **slot;
-
- restart:
- parent = NULL;
- slot = (void __rcu **)&root->xa_head;
- radix_tree_load_root(root, &node, &maxindex);
- if (index > maxindex)
- return NULL;
-
- while (radix_tree_is_internal_node(node)) {
- unsigned offset;
-
- parent = entry_to_node(node);
- offset = radix_tree_descend(parent, &node, index);
- slot = parent->slots + offset;
- if (node == RADIX_TREE_RETRY)
- goto restart;
- if (parent->shift == 0)
- break;
- }
-
- if (nodep)
- *nodep = parent;
- if (slotp)
- *slotp = slot;
- return node;
-}
-
-/**
- * radix_tree_lookup - perform lookup operation on a radix tree
- * @root: radix tree root
- * @index: index key
- *
- * Lookup the item at the position @index in the radix tree @root.
- *
- * This function can be called under rcu_read_lock, however the caller
- * must manage lifetimes of leaf nodes (eg. RCU may also be used to free
- * them safely). No RCU barriers are required to access or modify the
- * returned item, however.
- */
-void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index)
-{
- return __radix_tree_lookup(root, index, NULL, NULL);
-}
-
-static void replace_slot(void __rcu **slot, void *item,
- struct radix_tree_node *node, int count, int values)
-{
- if (node && (count || values)) {
- node->count += count;
- node->nr_values += values;
- }
-
- rcu_assign_pointer(*slot, item);
-}
-
-static bool node_tag_get(const struct radix_tree_root *root,
- const struct radix_tree_node *node,
- unsigned int tag, unsigned int offset)
-{
- if (node)
- return tag_get(node, tag, offset);
- return root_tag_get(root, tag);
-}
-
-/*
- * IDR users want to be able to store NULL in the tree, so if the slot isn't
- * free, don't adjust the count, even if it's transitioning between NULL and
- * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still
- * have empty bits, but it only stores NULL in slots when they're being
- * deleted.
- */
-static int calculate_count(struct radix_tree_root *root,
- struct radix_tree_node *node, void __rcu **slot,
- void *item, void *old)
-{
- if (is_idr(root)) {
- unsigned offset = get_slot_offset(node, slot);
- bool free = node_tag_get(root, node, IDR_FREE, offset);
- if (!free)
- return 0;
- if (!old)
- return 1;
- }
- return !!item - !!old;
-}
-
-/**
- * __radix_tree_replace - replace item in a slot
- * @root: radix tree root
- * @node: pointer to tree node
- * @slot: pointer to slot in @node
- * @item: new item to store in the slot.
- *
- * For use with __radix_tree_lookup(). Caller must hold tree write locked
- * across slot lookup and replacement.
- */
-void __radix_tree_replace(struct radix_tree_root *root,
- struct radix_tree_node *node,
- void __rcu **slot, void *item)
-{
- void *old = rcu_dereference_raw(*slot);
- int values = !!xa_is_value(item) - !!xa_is_value(old);
- int count = calculate_count(root, node, slot, item, old);
-
- /*
- * This function supports replacing value entries and
- * deleting entries, but that needs accounting against the
- * node unless the slot is root->xa_head.
- */
- WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->xa_head) &&
- (count || values));
- replace_slot(slot, item, node, count, values);
-
- if (!node)
- return;
-
- delete_node(root, node);
-}
-
-/**
- * radix_tree_iter_replace - replace item in a slot
- * @root: radix tree root
- * @slot: pointer to slot
- * @item: new item to store in the slot.
- *
- * For use with radix_tree_for_each_slot().
- * Caller must hold tree write locked.
- */
-void radix_tree_iter_replace(struct radix_tree_root *root,
- const struct radix_tree_iter *iter,
- void __rcu **slot, void *item)
-{
- __radix_tree_replace(root, iter->node, slot, item);
-}
-
-static void node_tag_set(struct radix_tree_root *root,
- struct radix_tree_node *node,
- unsigned int tag, unsigned int offset)
-{
- while (node) {
- if (tag_get(node, tag, offset))
- return;
- tag_set(node, tag, offset);
- offset = node->offset;
- node = node->parent;
- }
-
- if (!root_tag_get(root, tag))
- root_tag_set(root, tag);
-}
-
-static void node_tag_clear(struct radix_tree_root *root,
- struct radix_tree_node *node,
- unsigned int tag, unsigned int offset)
-{
- while (node) {
- if (!tag_get(node, tag, offset))
- return;
- tag_clear(node, tag, offset);
- if (any_tag_set(node, tag))
- return;
-
- offset = node->offset;
- node = node->parent;
- }
-
- /* clear the root's tag bit */
- if (root_tag_get(root, tag))
- root_tag_clear(root, tag);
-}
-
-/**
- * radix_tree_iter_tag_clear - clear a tag on the current iterator entry
- * @root: radix tree root
- * @iter: iterator state
- * @tag: tag to clear
- */
-void radix_tree_iter_tag_clear(struct radix_tree_root *root,
- const struct radix_tree_iter *iter, unsigned int tag)
-{
- node_tag_clear(root, iter->node, tag, iter_offset(iter));
-}
-
-/**
- * radix_tree_tag_get - get a tag on a radix tree node
- * @root: radix tree root
- * @index: index key
- * @tag: tag index (< RADIX_TREE_MAX_TAGS)
- *
- * Return values:
- *
- * 0: tag not present or not set
- * 1: tag set
- *
- * Note that the return value of this function may not be relied on, even if
- * the RCU lock is held, unless tag modification and node deletion are excluded
- * from concurrency.
- */
-int radix_tree_tag_get(const struct radix_tree_root *root,
- unsigned long index, unsigned int tag)
-{
- struct radix_tree_node *node, *parent;
- unsigned long maxindex;
-
- if (!root_tag_get(root, tag))
- return 0;
-
- radix_tree_load_root(root, &node, &maxindex);
- if (index > maxindex)
- return 0;
-
- while (radix_tree_is_internal_node(node)) {
- unsigned offset;
-
- parent = entry_to_node(node);
- offset = radix_tree_descend(parent, &node, index);
-
- if (!tag_get(parent, tag, offset))
- return 0;
- if (node == RADIX_TREE_RETRY)
- break;
- }
-
- return 1;
-}
-
-/**
- * radix_tree_next_chunk - find next chunk of slots for iteration
- *
- * @root: radix tree root
- * @iter: iterator state
- * @flags: RADIX_TREE_ITER_* flags and tag index
- * Returns: pointer to chunk first slot, or NULL if iteration is over
- */
-void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
- struct radix_tree_iter *iter, unsigned flags)
-{
- struct radix_tree_node *node, *child;
- unsigned long index, offset, maxindex;
-
- /*
- * Catch next_index overflow after ~0UL. iter->index never overflows
- * during iterating; it can be zero only at the beginning.
- * And we cannot overflow iter->next_index in a single step,
- * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
- *
- * This condition also used by radix_tree_next_slot() to stop
- * contiguous iterating, and forbid switching to the next chunk.
- */
- index = iter->next_index;
- if (!index && iter->index)
- return NULL;
-
- restart:
- radix_tree_load_root(root, &child, &maxindex);
- if (index > maxindex)
- return NULL;
- if (!child)
- return NULL;
-
- if (!radix_tree_is_internal_node(child)) {
- /* Single-slot tree */
- iter->index = index;
- iter->next_index = maxindex + 1;
- iter->node = NULL;
- return (void __rcu **)&root->xa_head;
- }
-
- do {
- node = entry_to_node(child);
- offset = radix_tree_descend(node, &child, index);
-
- if (!child) {
- while (++offset < RADIX_TREE_MAP_SIZE) {
- void *slot = rcu_dereference_raw(
- node->slots[offset]);
- if (slot)
- break;
- }
- index &= ~node_maxindex(node);
- index += offset << node->shift;
- /* Overflow after ~0UL */
- if (!index)
- return NULL;
- if (offset == RADIX_TREE_MAP_SIZE)
- goto restart;
- child = rcu_dereference_raw(node->slots[offset]);
- }
-
- if (!child)
- goto restart;
- if (child == RADIX_TREE_RETRY)
- break;
- } while (node->shift && radix_tree_is_internal_node(child));
-
- /* Update the iterator state */
- iter->index = (index &~ node_maxindex(node)) | offset;
- iter->next_index = (index | node_maxindex(node)) + 1;
- iter->node = node;
-
- return node->slots + offset;
-}
-EXPORT_SYMBOL(radix_tree_next_chunk);
-
-static bool __radix_tree_delete(struct radix_tree_root *root,
- struct radix_tree_node *node, void __rcu **slot)
-{
- void *old = rcu_dereference_raw(*slot);
- int values = xa_is_value(old) ? -1 : 0;
- unsigned offset = get_slot_offset(node, slot);
- int tag;
-
- if (is_idr(root))
- node_tag_set(root, node, IDR_FREE, offset);
- else
- for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
- node_tag_clear(root, node, tag, offset);
-
- replace_slot(slot, NULL, node, -1, values);
- return node && delete_node(root, node);
-}
-
-/**
- * radix_tree_delete_item - delete an item from a radix tree
- * @root: radix tree root
- * @index: index key
- * @item: expected item
- *
- * Remove @item at @index from the radix tree rooted at @root.
- *
- * Return: the deleted entry, or %NULL if it was not present
- * or the entry at the given @index was not @item.
- */
-void *radix_tree_delete_item(struct radix_tree_root *root,
- unsigned long index, void *item)
-{
- struct radix_tree_node *node = NULL;
- void __rcu **slot = NULL;
- void *entry;
-
- entry = __radix_tree_lookup(root, index, &node, &slot);
- if (!slot)
- return NULL;
- if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
- get_slot_offset(node, slot))))
- return NULL;
-
- if (item && entry != item)
- return NULL;
-
- __radix_tree_delete(root, node, slot);
-
- return entry;
-}
-
-/**
- * radix_tree_tagged - test whether any items in the tree are tagged
- * @root: radix tree root
- * @tag: tag to test
- */
-int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag)
-{
- return root_tag_get(root, tag);
-}
-EXPORT_SYMBOL(radix_tree_tagged);
-
-void __rcu **idr_get_free(struct radix_tree_root *root,
- struct radix_tree_iter *iter, gfp_t gfp,
- unsigned long max)
-{
- struct radix_tree_node *node = NULL, *child;
- void __rcu **slot = (void __rcu **)&root->xa_head;
- unsigned long maxindex, start = iter->next_index;
- unsigned int shift, offset = 0;
-
- grow:
- shift = radix_tree_load_root(root, &child, &maxindex);
- if (!radix_tree_tagged(root, IDR_FREE))
- start = max(start, maxindex + 1);
- if (start > max)
- return ERR_PTR(-ENOSPC);
-
- if (start > maxindex) {
- int error = radix_tree_extend(root, gfp, start, shift);
- if (error < 0)
- return ERR_PTR(error);
- shift = error;
- child = rcu_dereference_raw(root->xa_head);
- }
- if (start == 0 && shift == 0)
- shift = RADIX_TREE_MAP_SHIFT;
-
- while (shift) {
- shift -= RADIX_TREE_MAP_SHIFT;
- if (child == NULL) {
- /* Have to add a child node. */
- child = radix_tree_node_alloc(gfp, node, root, shift,
- offset, 0, 0);
- if (!child)
- return ERR_PTR(-ENOMEM);
- all_tag_set(child, IDR_FREE);
- rcu_assign_pointer(*slot, node_to_entry(child));
- if (node)
- node->count++;
- } else if (!radix_tree_is_internal_node(child))
- break;
-
- node = entry_to_node(child);
- offset = radix_tree_descend(node, &child, start);
- if (!tag_get(node, IDR_FREE, offset)) {
- offset = radix_tree_find_next_bit(node, IDR_FREE,
- offset + 1);
- start = next_index(start, node, offset);
- if (start > max)
- return ERR_PTR(-ENOSPC);
- while (offset == RADIX_TREE_MAP_SIZE) {
- offset = node->offset + 1;
- node = node->parent;
- if (!node)
- goto grow;
- shift = node->shift;
- }
- child = rcu_dereference_raw(node->slots[offset]);
- }
- slot = &node->slots[offset];
- }
-
- iter->index = start;
- if (node)
- iter->next_index = 1 + min(max, (start | node_maxindex(node)));
- else
- iter->next_index = 1;
- iter->node = node;
-
- return slot;
-}
-
-/**
- * idr_destroy - release all internal memory from an IDR
- * @idr: idr handle
- *
- * After this function is called, the IDR is empty, and may be reused or
- * the data structure containing it may be freed.
- *
- * A typical clean-up sequence for objects stored in an idr tree will use
- * idr_for_each_entry() to free all objects, if necessary, then idr_destroy() to
- * free the memory used to keep track of those objects.
- */
-void idr_destroy(struct idr *idr)
-{
- struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.xa_head);
- if (radix_tree_is_internal_node(node))
- radix_tree_free_nodes(node);
- idr->idr_rt.xa_head = NULL;
- root_tag_set(&idr->idr_rt, IDR_FREE);
-}
-EXPORT_SYMBOL(idr_destroy);
-
static void
radix_tree_node_ctor(void *arg)
{
- struct radix_tree_node *node = arg;
+ struct xa_node *node = arg;
memset(node, 0, sizeof(*node));
INIT_LIST_HEAD(&node->private_list);
void __init radix_tree_init(void)
{
- BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32);
- BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK);
BUILD_BUG_ON(XA_CHUNK_SIZE > 255);
radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
- sizeof(struct radix_tree_node), 0,
+ sizeof(struct xa_node), 0,
SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
radix_tree_node_ctor);
}
return 0;
}
-void item_idr_remove(struct idr *idr, int id)
-{
- struct item *item = idr_find(idr, id);
- assert(item->index == id);
- idr_remove(idr, id);
- free(item);
-}
-
/*
* Unlike the radix tree, you can put a NULL pointer -- with care -- into
- * the IDR. Some interfaces, like idr_find() do not distinguish between
* "present, value is NULL" and "not present", but that's exactly what some
* users want.
*/
void idr_null_test(void)
{
int i;
- DEFINE_IDR(idr);
- assert(idr_is_empty(&idr));
- assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
- assert(!idr_is_empty(&idr));
- idr_remove(&idr, 0);
- assert(idr_is_empty(&idr));
- assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
- assert(!idr_is_empty(&idr));
- idr_destroy(&idr);
- assert(idr_is_empty(&idr));
for (i = 0; i < 10; i++) {
- assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == i);
}
- idr_remove(&idr, 5);
- assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 5);
- idr_remove(&idr, 5);
for (i = 0; i < 9; i++) {
- idr_remove(&idr, i);
- assert(!idr_is_empty(&idr));
}
- idr_remove(&idr, 8);
- assert(!idr_is_empty(&idr));
- idr_remove(&idr, 9);
- assert(idr_is_empty(&idr));
- assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
- idr_destroy(&idr);
- assert(idr_is_empty(&idr));
for (i = 1; i < 10; i++) {
- assert(idr_alloc(&idr, NULL, 1, 0, GFP_KERNEL) == i);
}
- idr_destroy(&idr);
- assert(idr_is_empty(&idr));
}
-void idr_get_next_test(int base)
{
unsigned long i;
int nextid;
struct item *item;
- DEFINE_IDR(idr);
- idr_init_base(&idr, base);
int indices[] = {4, 7, 9, 15, 65, 128, 1000, 99999, 0};
for(i = 0; indices[i]; i++) {
item = item_create(indices[i], 0);
- assert(idr_alloc(&idr, item, indices[i], indices[i+1],
- GFP_KERNEL) == indices[i]);
}
for(i = 0, nextid = 0; indices[i]; i++) {
- idr_get_next(&idr, &nextid);
assert(nextid == indices[i]);
nextid++;
}
- idr_for_each_entry(&idr, item, nextid)
item_idr_free(nextid, item, &idr);
- idr_destroy(&idr);
}
static void idr_align_test(struct idr *idr)
void *entry;
for (i = 0; i < 9; i++) {
- BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i);
- idr_for_each_entry(idr, entry, id);
}
- idr_destroy(idr);
for (i = 1; i < 10; i++) {
- BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 1);
- idr_for_each_entry(idr, entry, id);
}
- idr_destroy(idr);
for (i = 2; i < 11; i++) {
- BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 2);
- idr_for_each_entry(idr, entry, id);
}
- idr_destroy(idr);
for (i = 3; i < 12; i++) {
- BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 3);
- idr_for_each_entry(idr, entry, id);
}
- idr_destroy(idr);
for (i = 0; i < 8; i++) {
- BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0);
- BUG_ON(idr_alloc(idr, &name[i + 1], 0, 0, GFP_KERNEL) != 1);
- idr_for_each_entry(idr, entry, id);
- idr_remove(idr, 1);
- idr_for_each_entry(idr, entry, id);
- idr_remove(idr, 0);
- BUG_ON(!idr_is_empty(idr));
}
for (i = 0; i < 8; i++) {
- BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0);
- BUG_ON(idr_alloc(idr, NULL, 0, 0, GFP_KERNEL) != 1);
- idr_remove(idr, 1);
- idr_for_each_entry(idr, entry, id);
- idr_remove(idr, 0);
}
}
unsigned long i;
int id;
struct item *item;
- DEFINE_IDR(idr);
for (i = 0; i < 10000; i++) {
item = item_create(i, 0);
- assert(idr_alloc(&idr, item, 0, 20000, GFP_KERNEL) == i);
}
- assert(idr_alloc(&idr, DUMMY_PTR, 5, 30, GFP_KERNEL) < 0);
for (i = 0; i < 5000; i++)
- item_idr_remove(&idr, i);
-
- idr_remove(&idr, 3);
- idr_for_each_entry(&idr, item, id)
item_idr_free(id, item, &idr);
- idr_destroy(&idr);
- assert(idr_is_empty(&idr));
- idr_remove(&idr, 3);
- idr_remove(&idr, 0);
- assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == 0);
- idr_remove(&idr, 1);
for (i = 1; i < XA_CHUNK_SIZE; i++)
- assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == i);
- idr_remove(&idr, 1 << 30);
- idr_destroy(&idr);
for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) {
item = item_create(i, 0);
- assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i);
}
- assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i, GFP_KERNEL) == -ENOSPC);
- assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i + 10, GFP_KERNEL) == -ENOSPC);
- idr_for_each_entry(&idr, item, id)
item_idr_free(id, item, &idr);
- idr_destroy(&idr);
- idr_destroy(&idr);
- assert(idr_is_empty(&idr));
for (i = 1; i < 10000; i++) {
item = item_create(i, 0);
- assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i);
}
- idr_for_each_entry(&idr, item, id)
item_idr_free(id, item, &idr);
- idr_destroy(&idr);
idr_null_test();
- idr_get_next_test(0);
- idr_get_next_test(1);
- idr_get_next_test(4);
idr_align_test(&idr);
idr_find_test();
}