With all users now converted to XArray, remove this API.
Signed-off-by: Matthew Wilcox <willy@infradead.org>
:c:func:`idr_alloc_u32`. If you need IDs that will not fit in a u32,
we will work with you to address your needs.
-If you need to allocate IDs sequentially, you can use
-:c:func:`idr_alloc_cyclic`. The IDR becomes less efficient when dealing
-with larger IDs, so using this function comes at a slight cost.
-
To perform an action on all pointers used by the IDR, you can
either use the callback-based :c:func:`idr_for_each` or the
iterator-style :c:func:`idr_for_each_entry`. You may need to use
struct idr {
struct radix_tree_root idr_rt;
unsigned int idr_base;
- unsigned int idr_next;
};
/*
#define IDR_INIT_BASE(name, base) { \
.idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER), \
.idr_base = (base), \
- .idr_next = 0, \
}
/**
*/
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
-/**
- * idr_get_cursor - Return the current position of the cyclic allocator
- * @idr: idr handle
- *
- * The value returned is the value that will be next returned from
- * idr_alloc_cyclic() if it is free (otherwise the search will start from
- * this position).
- */
-static inline unsigned int idr_get_cursor(const struct idr *idr)
-{
- return READ_ONCE(idr->idr_next);
-}
-
-/**
- * idr_set_cursor - Set the current position of the cyclic allocator
- * @idr: idr handle
- * @val: new position
- *
- * The next call to idr_alloc_cyclic() will return @val if it is free
- * (otherwise the search will start from this position).
- */
-static inline void idr_set_cursor(struct idr *idr, unsigned int val)
-{
- WRITE_ONCE(idr->idr_next, val);
-}
-
/**
* DOC: idr sync
* idr synchronization (stolen from radix-tree.h)
int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id,
unsigned long max, gfp_t);
-int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t);
void *idr_remove(struct idr *, unsigned long id);
void *idr_find(const struct idr *, unsigned long id);
int idr_for_each(const struct idr *,
{
INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
idr->idr_base = base;
- idr->idr_next = 0;
}
/**
}
EXPORT_SYMBOL_GPL(idr_alloc);
-/**
- * idr_alloc_cyclic() - Allocate an ID cyclically.
- * @idr: IDR handle.
- * @ptr: Pointer to be associated with the new ID.
- * @start: The minimum ID (inclusive).
- * @end: The maximum ID (exclusive).
- * @gfp: Memory allocation flags.
- *
- * Allocates an unused ID in the range specified by @nextid and @end. If
- * @end is <= 0, it is treated as one larger than %INT_MAX. This allows
- * callers to use @start + N as @end as long as N is within integer range.
- * The search for an unused ID will start at the last ID allocated and will
- * wrap around to @start if no free IDs are found before reaching @end.
- *
- * The caller should provide their own locking to ensure that two
- * concurrent modifications to the IDR are not possible. Read-only
- * accesses to the IDR may be done under the RCU read lock or may
- * exclude simultaneous writers.
- *
- * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
- * or -ENOSPC if no free IDs could be found.
- */
-int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
-{
- u32 id = idr->idr_next;
- int err, max = end > 0 ? end - 1 : INT_MAX;
-
- if ((int)id < start)
- id = start;
-
- err = idr_alloc_u32(idr, ptr, &id, max, gfp);
- if ((err == -ENOSPC) && (id > start)) {
- id = start;
- err = idr_alloc_u32(idr, ptr, &id, max, gfp);
- }
- if (err)
- return err;
-
- idr->idr_next = id + 1;
- return id;
-}
-EXPORT_SYMBOL(idr_alloc_cyclic);
-
/**
* idr_remove() - Remove an ID from the IDR.
* @idr: IDR handle.
free(item);
}
-void idr_alloc_test(void)
-{
- unsigned long i;
- DEFINE_IDR(idr);
-
- assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0, 0x4000, GFP_KERNEL) == 0);
- assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0x3ffd, 0x4000, GFP_KERNEL) == 0x3ffd);
- idr_remove(&idr, 0x3ffd);
- idr_remove(&idr, 0);
-
- for (i = 0x3ffe; i < 0x4003; i++) {
- int id;
- struct item *item;
-
- if (i < 0x4000)
- item = item_create(i, 0);
- else
- item = item_create(i - 0x3fff, 0);
-
- id = idr_alloc_cyclic(&idr, item, 1, 0x4000, GFP_KERNEL);
- assert(id == item->index);
- }
-
- idr_for_each(&idr, item_idr_free, &idr);
- idr_destroy(&idr);
-}
-
void idr_replace_test(void)
{
DEFINE_IDR(idr);
assert(idr_is_empty(&idr));
- idr_set_cursor(&idr, INT_MAX - 3UL);
- for (i = INT_MAX - 3UL; i < INT_MAX + 3UL; i++) {
- struct item *item;
- unsigned int id;
- if (i <= INT_MAX)
- item = item_create(i, 0);
- else
- item = item_create(i - INT_MAX - 1, 0);
-
- id = idr_alloc_cyclic(&idr, item, 0, 0, GFP_KERNEL);
- assert(id == item->index);
- }
-
- idr_for_each(&idr, item_idr_free, &idr);
- idr_destroy(&idr);
- assert(idr_is_empty(&idr));
-
for (i = 1; i < 10000; i++) {
struct item *item = item_create(i, 0);
assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i);
idr_destroy(&idr);
idr_replace_test();
- idr_alloc_test();
idr_null_test();
idr_nowait_test();
idr_get_next_test(0);