]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
dtrace: get rid of dtrace_gethrtime
authorKris Van Hees <kris.van.hees@oracle.com>
Wed, 8 Mar 2017 20:23:51 +0000 (15:23 -0500)
committerKris Van Hees <kris.van.hees@oracle.com>
Wed, 8 Mar 2017 20:24:29 +0000 (15:24 -0500)
Remove the need for dtrace_gethrtime() and dtrace_getwalltime() because
the current implementations are not deadlock safe.

Signed-off-by: Kris Van Hees <kris.van.hees@oracle.com>
Acked-by: Nick Alcock <nick.alcock@oracle.com>
include/linux/cyclic.h
include/linux/dtrace_os.h
kernel/dtrace/cyclic.c
kernel/dtrace/dtrace_os.c

index 8c06046f3f86e00faa2a63f1def2e8dba74a6c73..e314bfcf666ed6b133f895b0b57d78cf68b06e94 100644 (file)
@@ -16,7 +16,7 @@
 
 typedef uintptr_t      cyclic_id_t;
 typedef uint16_t       cyc_level_t;
-typedef void           (*cyc_func_t)(uintptr_t);
+typedef void           (*cyc_func_t)(uintptr_t, ktime_t);
 
 #define CYCLIC_NONE    ((cyclic_id_t)0)
 
index f5daa4832f36fc5dfe0610671052272f2c465304..90952301a0131bfb14ea04bc982fd3b3937d4816 100644 (file)
@@ -29,9 +29,6 @@ extern void dtrace_os_exit(void);
 extern void dtrace_enable(void);
 extern void dtrace_disable(void);
 
-extern ktime_t dtrace_gethrtime(void);
-extern ktime_t dtrace_getwalltime(void);
-
 typedef enum dtrace_vtime_state {
        DTRACE_VTIME_INACTIVE = 0,
        DTRACE_VTIME_ACTIVE
@@ -42,6 +39,9 @@ extern dtrace_vtime_state_t dtrace_vtime_active;
 extern void dtrace_vtime_enable(void);
 extern void dtrace_vtime_disable(void);
 extern void dtrace_vtime_switch(struct task_struct *, struct task_struct *);
+extern void dtrace_vtime_suspend(void);
+extern void dtrace_vtime_resume(void);
+extern void dtrace_chill(ktime_t, ktime_t, ktime_t);
 
 extern void dtrace_skip_instruction(struct pt_regs *);
 
index 6fc2fffb84bc314cece25246a6246d37d780d513..1c90421a9c10c26a3cd4c972e57a6f4bf9857743 100644 (file)
@@ -52,7 +52,8 @@ static void cyclic_fire(uintptr_t arg)
                 * We know that the 'pend' counter for the cyclic is non-zero.
                 * So, we can start with calling the handler at least once.
                 */
-               (*cyc->cyc.hdlr.cyh_func)(cyc->cyc.hdlr.cyh_arg);
+               (*cyc->cyc.hdlr.cyh_func)(cyc->cyc.hdlr.cyh_arg,
+                                         ns_to_ktime(ktime_get_raw_fast_ns()));
 
 again:
                /*
@@ -107,7 +108,8 @@ static enum hrtimer_restart cyclic_expire(struct hrtimer *timr)
         * interrupt context.
         */
        if (cyc->cyc.hdlr.cyh_level == CY_HIGH_LEVEL) {
-               (*cyc->cyc.hdlr.cyh_func)(cyc->cyc.hdlr.cyh_arg);
+               (*cyc->cyc.hdlr.cyh_func)(cyc->cyc.hdlr.cyh_arg,
+                                         ns_to_ktime(ktime_get_raw_fast_ns()));
                goto done;
        }
 
@@ -181,6 +183,11 @@ cyclic_id_t cyclic_add(cyc_handler_t *hdlr, cyc_time_t *when)
                hrtimer_start(&cyc->cyc.timr, cyc->cyc.when.cyt_when,
                              HRTIMER_MODE_ABS_PINNED);
 
+       /*
+        * Let the caller know when the cyclic was added.
+        */
+       when->cyt_when = ns_to_ktime(ktime_get_raw_fast_ns());
+
        return (cyclic_id_t)cyc;
 }
 EXPORT_SYMBOL(cyclic_add);
@@ -224,6 +231,11 @@ static void cyclic_omni_start(cyclic_t *omni, int cpu)
        cyc_time_t      when;
        cyc_handler_t   hdlr;
 
+       /*
+        * Let the caller know when the cyclic is being started.
+        */
+       when.cyt_when = ns_to_ktime(ktime_get_raw_fast_ns());
+
        omni->omni.hdlr.cyo_online(omni->omni.hdlr.cyo_arg, cpu, &hdlr, &when);
        cyclic_add_pinned(cpu, omni, &hdlr, &when);
 }
index a1257d7c836b3427674049050d5411a29f731b4c..2a6941772fba5121b07595ac1545254b29e8d0a6 100644 (file)
@@ -335,30 +335,6 @@ void dtrace_psinfo_free(struct task_struct *tsk)
 \*---------------------------------------------------------------------------*/
 dtrace_vtime_state_t   dtrace_vtime_active = 0;
 
-/*
- * Return a high resolution timer value that is guaranteed to always increase.
- */
-ktime_t dtrace_gethrtime(void)
-{
-       struct timespec ts;
-
-       getrawmonotonic(&ts);
-       return timespec_to_ktime(ts);
-}
-EXPORT_SYMBOL(dtrace_gethrtime);
-
-/*
- * Return the current wall-clock time, in nanoseconds since the epoch.
- */
-ktime_t dtrace_getwalltime(void)
-{
-       struct timespec ts;
-
-       getnstimeofday(&ts);
-       return timespec_to_ktime(ts);
-}
-EXPORT_SYMBOL(dtrace_getwalltime);
-
 void dtrace_vtime_enable(void)
 {
        dtrace_vtime_state_t    old, new;
@@ -393,7 +369,7 @@ EXPORT_SYMBOL(dtrace_vtime_disable);
 
 void dtrace_vtime_switch(struct task_struct *prev, struct task_struct *next)
 {
-       ktime_t now = dtrace_gethrtime();
+       ktime_t now = ns_to_ktime(ktime_get_raw_fast_ns());
 
        if (ktime_nz(prev->dtrace_start)) {
                prev->dtrace_vtime = ktime_add(prev->dtrace_vtime,
@@ -405,6 +381,53 @@ void dtrace_vtime_switch(struct task_struct *prev, struct task_struct *next)
        next->dtrace_start = now;
 }
 
+void dtrace_vtime_suspend(void)
+{
+       ktime_t now = ns_to_ktime(ktime_get_raw_fast_ns());
+
+       current->dtrace_vtime = ktime_add(current->dtrace_vtime,
+                                 ktime_sub(now, current->dtrace_start));
+       current->dtrace_start = now;
+}
+EXPORT_SYMBOL(dtrace_vtime_suspend);
+
+void dtrace_vtime_resume(void)
+{
+       current->dtrace_start = ns_to_ktime(ktime_get_raw_fast_ns());
+}
+EXPORT_SYMBOL(dtrace_vtime_resume);
+
+#define ktime_lt(t0, t1)       ((t0).tv64 < (t1).tv64)
+#define ktime_gt(t0, t1)       ((t0).tv64 > (t1).tv64)
+
+void dtrace_chill(ktime_t val, ktime_t interval, ktime_t int_max)
+{
+       ktime_t                 now = ns_to_ktime(ktime_get_raw_fast_ns());
+       cpu_core_t              *cpu = this_cpu_core;
+       volatile uint16_t       *flags;
+
+       flags = (volatile uint16_t *)&cpu->cpuc_dtrace_flags;
+
+       if (ktime_gt(ktime_sub(now, cpu->cpu_dtrace_chillmark), interval)) {
+               cpu->cpu_dtrace_chillmark = now;
+               cpu->cpu_dtrace_chilled = ktime_set(0, 0);
+       }
+
+       if (ktime_gt(ktime_add(cpu->cpu_dtrace_chilled, val), int_max) ||
+           ktime_lt(ktime_add(cpu->cpu_dtrace_chilled, val),
+                    cpu->cpu_dtrace_chilled)) {
+               *flags |= CPU_DTRACE_ILLOP;
+               return;
+       }
+
+       while (ktime_lt(ktime_sub(ns_to_ktime(ktime_get_raw_fast_ns()), now),
+                       val))
+               continue;
+
+       cpu->cpu_dtrace_chilled = ktime_add(cpu->cpu_dtrace_chilled, val);
+}
+EXPORT_SYMBOL(dtrace_chill);
+
 void dtrace_stacktrace(stacktrace_state_t *st)
 {
        struct stack_trace      trace;