irq: Convert timings to XArray
authorMatthew Wilcox <willy@infradead.org>
Tue, 12 Feb 2019 14:36:36 +0000 (09:36 -0500)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 9 Aug 2019 01:38:16 +0000 (21:38 -0400)
Signed-off-by: Matthew Wilcox <willy@infradead.org>
kernel/irq/timings.c

index e960d7ce7bcced7c03f9c1dca69c3e1e516ed062..dc275f3a8be0717c055d4c7bc6c44a516a67c20f 100644 (file)
@@ -8,10 +8,10 @@
 #include <linux/static_key.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
-#include <linux/idr.h>
 #include <linux/irq.h>
 #include <linux/math64.h>
 #include <linux/log2.h>
+#include <linux/xarray.h>
 
 #include <trace/events/irq.h>
 
@@ -21,7 +21,7 @@ DEFINE_STATIC_KEY_FALSE(irq_timing_enabled);
 
 DEFINE_PER_CPU(struct irq_timings, irq_timings);
 
-static DEFINE_IDR(irqt_stats);
+static DEFINE_XARRAY(irqt_stats);
 
 void irq_timings_enable(void)
 {
@@ -534,7 +534,8 @@ u64 irq_timings_next_event(u64 now)
        struct irqt_stat *irqs;
        struct irqt_stat __percpu *s;
        u64 ts, next_evt = U64_MAX;
-       int i, irq = 0;
+       unsigned long i;
+       int irq = 0;
 
        /*
         * This function must be called with the local irq disabled in
@@ -562,7 +563,7 @@ u64 irq_timings_next_event(u64 now)
         */
        for_each_irqts(i, irqts) {
                irq = irq_timing_decode(irqts->values[i], &ts);
-               s = idr_find(&irqt_stats, irq);
+               s = xa_load(&irqt_stats, irq);
                if (s)
                        irq_timings_store(irq, this_cpu_ptr(s), ts);
        }
@@ -571,8 +572,7 @@ u64 irq_timings_next_event(u64 now)
         * Look in the list of interrupts' statistics, the earliest
         * next event.
         */
-       idr_for_each_entry(&irqt_stats, s, i) {
-
+       xa_for_each(&irqt_stats, i, s) {
                irqs = this_cpu_ptr(s);
 
                ts = __irq_timings_next_event(irqs, i, now);
@@ -590,10 +590,9 @@ void irq_timings_free(int irq)
 {
        struct irqt_stat __percpu *s;
 
-       s = idr_find(&irqt_stats, irq);
+       s = xa_erase(&irqt_stats, irq);
        if (s) {
                free_percpu(s);
-               idr_remove(&irqt_stats, irq);
        }
 }
 
@@ -608,7 +607,7 @@ int irq_timings_alloc(int irq)
         * same interrupt number. Just bail out in case the per cpu
         * stat structure is already allocated.
         */
-       s = idr_find(&irqt_stats, irq);
+       s = xa_load(&irqt_stats, irq);
        if (s)
                return 0;
 
@@ -616,10 +615,7 @@ int irq_timings_alloc(int irq)
        if (!s)
                return -ENOMEM;
 
-       idr_preload(GFP_KERNEL);
-       id = idr_alloc(&irqt_stats, s, irq, irq + 1, GFP_NOWAIT);
-       idr_preload_end();
-
+       id = xa_insert(&irqt_stats, irq, s, GFP_KERNEL);
        if (id < 0) {
                free_percpu(s);
                return id;
@@ -778,7 +774,7 @@ static int __init irq_timings_test_irqs(struct timings_intervals *ti)
                return ret;
        }
 
-       s = idr_find(&irqt_stats, irq);
+       s = xa_load(&irqt_stats, irq);
        if (!s) {
                ret = -EIDRM;
                goto out;