#include <linux/static_key.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/idr.h>
#include <linux/irq.h>
#include <linux/math64.h>
#include <linux/log2.h>
+#include <linux/xarray.h>
#include <trace/events/irq.h>
DEFINE_PER_CPU(struct irq_timings, irq_timings);
-static DEFINE_IDR(irqt_stats);
+static DEFINE_XARRAY(irqt_stats);
void irq_timings_enable(void)
{
struct irqt_stat *irqs;
struct irqt_stat __percpu *s;
u64 ts, next_evt = U64_MAX;
- int i, irq = 0;
+ unsigned long i;
+ int irq = 0;
/*
* This function must be called with the local irq disabled in
*/
for_each_irqts(i, irqts) {
irq = irq_timing_decode(irqts->values[i], &ts);
- s = idr_find(&irqt_stats, irq);
+ s = xa_load(&irqt_stats, irq);
if (s)
irq_timings_store(irq, this_cpu_ptr(s), ts);
}
* Look in the list of interrupts' statistics, the earliest
* next event.
*/
- idr_for_each_entry(&irqt_stats, s, i) {
-
+ xa_for_each(&irqt_stats, i, s) {
irqs = this_cpu_ptr(s);
ts = __irq_timings_next_event(irqs, i, now);
{
struct irqt_stat __percpu *s;
- s = idr_find(&irqt_stats, irq);
+ s = xa_erase(&irqt_stats, irq);
if (s) {
free_percpu(s);
- idr_remove(&irqt_stats, irq);
}
}
* same interrupt number. Just bail out in case the per cpu
* stat structure is already allocated.
*/
- s = idr_find(&irqt_stats, irq);
+ s = xa_load(&irqt_stats, irq);
if (s)
return 0;
if (!s)
return -ENOMEM;
- idr_preload(GFP_KERNEL);
- id = idr_alloc(&irqt_stats, s, irq, irq + 1, GFP_NOWAIT);
- idr_preload_end();
-
+ id = xa_insert(&irqt_stats, irq, s, GFP_KERNEL);
if (id < 0) {
free_percpu(s);
return id;
return ret;
}
- s = idr_find(&irqt_stats, irq);
+ s = xa_load(&irqt_stats, irq);
if (!s) {
ret = -EIDRM;
goto out;