obj-$(CONFIG_DT_SDT) += sdt.o
obj-$(CONFIG_DT_SYSTRACE) += systrace.o
obj-$(CONFIG_DT_DT_TEST) += dt_test.o
-obj-$(CONFIG_DT_DT_PERF) += dt_perf.o
dtrace-y := dtrace_mod.o dtrace_dev.o \
dtrace_asm_$(UTS_MACHINE).o \
sdt-y := sdt_mod.o sdt_dev.o sdt_$(UTS_MACHINE).o
systrace-y := systrace_mod.o systrace_dev.o
dt_test-y := dt_test_mod.o dt_test_dev.o
-dt_perf-y := dt_perf_mod.o dt_perf_dev.o
/*
* See the comment in dtrace_state_deadman() for the reason
- * for setting dts_laststatus to INT64_MAX before setting
+ * for setting dts_laststatus to UINT64_MAX before setting
* it to the correct value.
*/
- state->dts_laststatus = ns_to_ktime(INT64_MAX);
+ state->dts_laststatus = UINT64_MAX;
dtrace_membar_producer();
- state->dts_laststatus = dtrace_gethrtime();
+ state->dts_laststatus = jiffies;
memset(&stat, 0, sizeof(stat));
#include <linux/hardirq.h>
#include <linux/in6.h>
#include <linux/inet.h>
+#include <linux/jiffies.h>
#include <linux/kdev_t.h>
#include <linux/slab.h>
#include <linux/socket.h>
return (uint64_t)(uintptr_t)current;
case DIF_VAR_TIMESTAMP:
+ case DIF_VAR_WALLTIMESTAMP:
if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
- mstate->dtms_timestamp = dtrace_gethrtime();
+ mstate->dtms_timestamp = current->dtrace_start;
mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
}
return ktime_to_ns(current->dtrace_vtime);
- case DIF_VAR_WALLTIMESTAMP:
- return ktime_to_ns(dtrace_getwalltime());
-
case DIF_VAR_IPL:
if (!dtrace_priv_kernel(state))
return 0;
switch (subr) {
case DIF_SUBR_RAND:
- regs[rd] = ktime_to_ns(dtrace_gethrtime()) * 2416 + 374441;
+ regs[rd] = jiffies * 2416 + 374441;
regs[rd] = do_div(regs[rd], 1771875);
break;
dtrace_copyinstr_arch(uaddr, kaddr, size, flags);
}
-ktime_t dtrace_gethrestime(void)
-{
- return dtrace_gethrtime();
-}
-
void dtrace_getpcstack(uint64_t *pcstack, int pcstack_limit, int aframes,
uint32_t *intrpc)
{
static void dtrace_action_chill(dtrace_mstate_t *mstate, ktime_t val)
{
- ktime_t now;
- volatile uint16_t *flags;
- cpu_core_t *cpu = this_cpu_core;
-
if (dtrace_destructive_disallow)
return;
- flags = (volatile uint16_t *)&cpu->cpuc_dtrace_flags;
-
- now = dtrace_gethrtime();
-
- if (ktime_gt(ktime_sub(now, cpu->cpu_dtrace_chillmark),
- dtrace_chill_interval)) {
- /*
- * We need to advance the mark to the current time.
- */
- cpu->cpu_dtrace_chillmark = now;
- cpu->cpu_dtrace_chilled = ktime_set(0, 0);
- }
+ dtrace_chill(val, dtrace_chill_interval, dtrace_chill_max);
- /*
- * Now check to see if the requested chill time would take us over
- * the maximum amount of time allowed in the chill interval. (Or
- * worse, if the calculation itself induces overflow.)
- */
- if (ktime_gt(ktime_add(cpu->cpu_dtrace_chilled, val),
- dtrace_chill_max) ||
- ktime_lt(ktime_add(cpu->cpu_dtrace_chilled, val),
- cpu->cpu_dtrace_chilled)) {
- *flags |= CPU_DTRACE_ILLOP;
- return;
- }
-
- while (ktime_lt(ktime_sub(dtrace_gethrtime(), now), val))
- continue;
-
- /*
- * Normally, we assure that the value of the variable "timestamp" does
- * not change within an ECB. The presence of chill() represents an
- * exception to this rule, however.
- */
mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP;
- cpu->cpu_dtrace_chilled = ktime_add(cpu->cpu_dtrace_chilled, val);
}
static void dtrace_action_ustack(dtrace_mstate_t *mstate,
dtrace_action_t *act;
intptr_t offs;
size_t size;
- int vtime, onintr;
+ int onintr;
volatile uint16_t *flags;
- ktime_t now;
int pflag = 0;
#ifdef FIXME
*flags |= CPU_DTRACE_PROBE_CTX;
this_cpu_core->cpu_dtrace_caller = id;
- now = dtrace_gethrtime();
- vtime = (dtrace_vtime_references > 0);
-
- if (vtime && ktime_nz(current->dtrace_start))
- current->dtrace_vtime =
- ktime_add(current->dtrace_vtime,
- ktime_sub(now, current->dtrace_start));
+ if (id != dtrace_probeid_error)
+ dtrace_vtime_suspend();
mstate.dtms_difo = NULL;
mstate.dtms_probe = probe;
}
}
- if (ktime_gt(ktime_sub(now, state->dts_alive),
+ if (ktime_gt(ktime_sub(current->dtrace_start, state->dts_alive),
dtrace_deadman_timeout)) {
/*
* We seem to be dead. Unless we (a) have kernel
continue;
}
- if (vtime)
- /*
- * Before recursing on dtrace_probe(), we
- * need to explicitly clear out our start
- * time to prevent it from being accumulated
- * into t_dtrace_vtime.
- */
- current->dtrace_start = ktime_set(0, 0);
-
/*
* Iterate over the actions to figure out which action
* we were processing when we experienced the error.
id, ecb->dte_epid);
}
- if (vtime)
- current->dtrace_start = dtrace_gethrtime();
+ dtrace_vtime_resume();
/*
* Only clear the flag if this is not the ERROR probe. We know that
#if 1
ktime_t dtrace_deadman_interval = KTIME_INIT(10, 0);
ktime_t dtrace_deadman_timeout = KTIME_INIT(120, 0);
-ktime_t dtrace_deadman_user = KTIME_INIT(120, 0);
+uint64_t dtrace_deadman_user = SECS_TO_JIFFIES(120);
#else
ktime_t dtrace_deadman_interval = KTIME_INIT(1, 0);
ktime_t dtrace_deadman_timeout = KTIME_INIT(10, 0);
-ktime_t dtrace_deadman_user = KTIME_INIT(30, 0);
+uint64_t dtrace_deadman_user = SECS_TO_JIFFIES(30);
#endif
dtrace_id_t dtrace_probeid_begin;
vfree(vstate->dtvs_locals);
}
-static void dtrace_state_clean(dtrace_state_t *state)
+static void dtrace_state_clean(dtrace_state_t *state, ktime_t when)
{
if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
state->dts_activity != DTRACE_ACTIVITY_DRAINING)
dtrace_speculation_clean(state);
}
-static void dtrace_state_deadman(dtrace_state_t *state)
+static void dtrace_state_deadman(dtrace_state_t *state, ktime_t when)
{
- ktime_t now;
-
#ifdef FIXME
/*
* This may not be needed for Linux - we'll see.
dtrace_sync();
#endif
- now = dtrace_gethrtime();
-
if (state != dtrace_anon.dta_state &&
- ktime_ge(ktime_sub(now, state->dts_laststatus),
- dtrace_deadman_user))
+ time_after_eq(jiffies, state->dts_laststatus + dtrace_deadman_user))
return;
/*
*/
state->dts_alive = ktime_set(KTIME_SEC_MAX, 0);
dtrace_membar_producer();
- state->dts_alive = now;
+ state->dts_alive = when;
}
dtrace_state_t *dtrace_state_create(struct file *file)
when.cyt_when = ktime_set(0, 0);
when.cyt_interval = dtrace_deadman_interval;
- state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
state->dts_deadman = cyclic_add(&hdlr, &when);
+ state->dts_alive = when.cyt_when;
+ state->dts_laststatus = jiffies;
state->dts_activity = DTRACE_ACTIVITY_WARMUP;
uint32_t dts_stkstroverflows;
uint32_t dts_dblerrors;
uint32_t dts_reserve;
- ktime_t dts_laststatus;
cyclic_id_t dts_cleaner;
cyclic_id_t dts_deadman;
+ uint64_t dts_laststatus;
ktime_t dts_alive;
char dts_speculates;
char dts_destructive;
extern void dtrace_vpanic(const char *, va_list);
extern int dtrace_getipl(void);
-extern ktime_t dtrace_gethrestime(void);
-
extern dtrace_icookie_t dtrace_interrupt_disable(void);
extern void dtrace_interrupt_enable(dtrace_icookie_t);
*
* CDDL HEADER END
*
- * Copyright 2009 -- 2013 Oracle, Inc. All rights reserved.
+ * Copyright 2009-2017 Oracle, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <asm/bitsperlong.h>
#include <linux/dtrace_os.h>
+#include <linux/jiffies.h>
typedef unsigned char uchar_t;
typedef unsigned int uint_t;
#define ktime_gt(t0, t1) ((t0).tv64 > (t1).tv64)
#define ktime_cp(t0, t1) ((t0).tv64 = (t1).tv64)
+#define SECS_TO_JIFFIES(s) (((s) * SEC_CONVERSION) >> SEC_JIFFIE_SC)
+
/*
* Translate between kernel config options and userspace-compatible definitions.
*/
static int profile_max; /* maximum number of profile probes */
static atomic_t profile_total; /* current number of profile probes */
-static void profile_tick_fn(uintptr_t arg)
+static void profile_tick_fn(uintptr_t arg, ktime_t when)
{
profile_probe_t *prof = (profile_probe_t *)arg;
unsigned long pc = 0, upc = 0;
dtrace_probe(prof->prof_id, pc, upc, 0, 0, 0);
}
-static void profile_prof_fn(uintptr_t arg)
+static void profile_prof_fn(uintptr_t arg, ktime_t when)
{
profile_probe_percpu_t *pcpu = (profile_probe_percpu_t *)arg;
profile_probe_t *prof = pcpu->profc_probe;
struct pt_regs *regs = get_irq_regs();
unsigned long pc = 0, upc = 0;
- late = ktime_sub(dtrace_gethrtime(), pcpu->profc_expected);
+ late = ktime_sub(when, pcpu->profc_expected);
pcpu->profc_expected = ktime_add(pcpu->profc_expected,
pcpu->profc_interval);
hdlr->cyh_level = CY_HIGH_LEVEL;
when->cyt_interval = prof->prof_interval;
- when->cyt_when = ktime_add(dtrace_gethrtime(), when->cyt_interval);
+ when->cyt_when = ktime_add(when->cyt_when, when->cyt_interval);
pcpu->profc_expected = when->cyt_when;
pcpu->profc_interval = when->cyt_interval;