extern void dtrace_enable(void);
extern void dtrace_disable(void);
-extern ktime_t dtrace_gethrtime(void);
-extern ktime_t dtrace_getwalltime(void);
-
typedef enum dtrace_vtime_state {
DTRACE_VTIME_INACTIVE = 0,
DTRACE_VTIME_ACTIVE
extern void dtrace_vtime_enable(void);
extern void dtrace_vtime_disable(void);
extern void dtrace_vtime_switch(struct task_struct *, struct task_struct *);
+extern void dtrace_vtime_suspend(void);
+extern void dtrace_vtime_resume(void);
+extern void dtrace_chill(ktime_t, ktime_t, ktime_t);
extern void dtrace_skip_instruction(struct pt_regs *);
* We know that the 'pend' counter for the cyclic is non-zero.
* So, we can start with calling the handler at least once.
*/
- (*cyc->cyc.hdlr.cyh_func)(cyc->cyc.hdlr.cyh_arg);
+ (*cyc->cyc.hdlr.cyh_func)(cyc->cyc.hdlr.cyh_arg,
+ ns_to_ktime(ktime_get_raw_fast_ns()));
again:
/*
* interrupt context.
*/
if (cyc->cyc.hdlr.cyh_level == CY_HIGH_LEVEL) {
- (*cyc->cyc.hdlr.cyh_func)(cyc->cyc.hdlr.cyh_arg);
+ (*cyc->cyc.hdlr.cyh_func)(cyc->cyc.hdlr.cyh_arg,
+ ns_to_ktime(ktime_get_raw_fast_ns()));
goto done;
}
hrtimer_start(&cyc->cyc.timr, cyc->cyc.when.cyt_when,
HRTIMER_MODE_ABS_PINNED);
+ /*
+ * Let the caller know when the cyclic was added.
+ */
+ when->cyt_when = ns_to_ktime(ktime_get_raw_fast_ns());
+
return (cyclic_id_t)cyc;
}
EXPORT_SYMBOL(cyclic_add);
cyc_time_t when;
cyc_handler_t hdlr;
+ /*
+ * Let the caller know when the cyclic is being started.
+ */
+ when.cyt_when = ns_to_ktime(ktime_get_raw_fast_ns());
+
omni->omni.hdlr.cyo_online(omni->omni.hdlr.cyo_arg, cpu, &hdlr, &when);
cyclic_add_pinned(cpu, omni, &hdlr, &when);
}
\*---------------------------------------------------------------------------*/
dtrace_vtime_state_t dtrace_vtime_active = 0;
-/*
- * Return a high resolution timer value that is guaranteed to always increase.
- */
-ktime_t dtrace_gethrtime(void)
-{
- struct timespec ts;
-
- getrawmonotonic(&ts);
- return timespec_to_ktime(ts);
-}
-EXPORT_SYMBOL(dtrace_gethrtime);
-
-/*
- * Return the current wall-clock time, in nanoseconds since the epoch.
- */
-ktime_t dtrace_getwalltime(void)
-{
- struct timespec ts;
-
- getnstimeofday(&ts);
- return timespec_to_ktime(ts);
-}
-EXPORT_SYMBOL(dtrace_getwalltime);
-
void dtrace_vtime_enable(void)
{
dtrace_vtime_state_t old, new;
void dtrace_vtime_switch(struct task_struct *prev, struct task_struct *next)
{
- ktime_t now = dtrace_gethrtime();
+ ktime_t now = ns_to_ktime(ktime_get_raw_fast_ns());
if (ktime_nz(prev->dtrace_start)) {
prev->dtrace_vtime = ktime_add(prev->dtrace_vtime,
next->dtrace_start = now;
}
+void dtrace_vtime_suspend(void)
+{
+ ktime_t now = ns_to_ktime(ktime_get_raw_fast_ns());
+
+ current->dtrace_vtime = ktime_add(current->dtrace_vtime,
+ ktime_sub(now, current->dtrace_start));
+ current->dtrace_start = now;
+}
+EXPORT_SYMBOL(dtrace_vtime_suspend);
+
+void dtrace_vtime_resume(void)
+{
+ current->dtrace_start = ns_to_ktime(ktime_get_raw_fast_ns());
+}
+EXPORT_SYMBOL(dtrace_vtime_resume);
+
+#define ktime_lt(t0, t1) ((t0).tv64 < (t1).tv64)
+#define ktime_gt(t0, t1) ((t0).tv64 > (t1).tv64)
+
+void dtrace_chill(ktime_t val, ktime_t interval, ktime_t int_max)
+{
+ ktime_t now = ns_to_ktime(ktime_get_raw_fast_ns());
+ cpu_core_t *cpu = this_cpu_core;
+ volatile uint16_t *flags;
+
+ flags = (volatile uint16_t *)&cpu->cpuc_dtrace_flags;
+
+ if (ktime_gt(ktime_sub(now, cpu->cpu_dtrace_chillmark), interval)) {
+ cpu->cpu_dtrace_chillmark = now;
+ cpu->cpu_dtrace_chilled = ktime_set(0, 0);
+ }
+
+ if (ktime_gt(ktime_add(cpu->cpu_dtrace_chilled, val), int_max) ||
+ ktime_lt(ktime_add(cpu->cpu_dtrace_chilled, val),
+ cpu->cpu_dtrace_chilled)) {
+ *flags |= CPU_DTRACE_ILLOP;
+ return;
+ }
+
+ while (ktime_lt(ktime_sub(ns_to_ktime(ktime_get_raw_fast_ns()), now),
+ val))
+ continue;
+
+ cpu->cpu_dtrace_chilled = ktime_add(cpu->cpu_dtrace_chilled, val);
+}
+EXPORT_SYMBOL(dtrace_chill);
+
void dtrace_stacktrace(stacktrace_state_t *st)
{
struct stack_trace trace;