191eaf3e1SJohn Birrell /*
291eaf3e1SJohn Birrell * CDDL HEADER START
391eaf3e1SJohn Birrell *
491eaf3e1SJohn Birrell * The contents of this file are subject to the terms of the
591eaf3e1SJohn Birrell * Common Development and Distribution License, Version 1.0 only
691eaf3e1SJohn Birrell * (the "License"). You may not use this file except in compliance
791eaf3e1SJohn Birrell * with the License.
891eaf3e1SJohn Birrell *
991eaf3e1SJohn Birrell * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
1091eaf3e1SJohn Birrell * or http://www.opensolaris.org/os/licensing.
1191eaf3e1SJohn Birrell * See the License for the specific language governing permissions
1291eaf3e1SJohn Birrell * and limitations under the License.
1391eaf3e1SJohn Birrell *
1491eaf3e1SJohn Birrell * When distributing Covered Code, include this CDDL HEADER in each
1591eaf3e1SJohn Birrell * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1691eaf3e1SJohn Birrell * If applicable, add the following below this CDDL HEADER, with the
1791eaf3e1SJohn Birrell * fields enclosed by brackets "[]" replaced with your own identifying
1891eaf3e1SJohn Birrell * information: Portions Copyright [yyyy] [name of copyright owner]
1991eaf3e1SJohn Birrell *
2091eaf3e1SJohn Birrell * CDDL HEADER END
2191eaf3e1SJohn Birrell *
2291eaf3e1SJohn Birrell */
2391eaf3e1SJohn Birrell /*
2491eaf3e1SJohn Birrell * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
2591eaf3e1SJohn Birrell * Use is subject to license terms.
2691eaf3e1SJohn Birrell */
2791eaf3e1SJohn Birrell
284737d389SGeorge V. Neville-Neil /*
294737d389SGeorge V. Neville-Neil * Copyright (c) 2011, Joyent, Inc. All rights reserved.
304737d389SGeorge V. Neville-Neil */
314737d389SGeorge V. Neville-Neil
3291eaf3e1SJohn Birrell #include <sys/param.h>
3391eaf3e1SJohn Birrell #include <sys/systm.h>
3471a19bdcSAttilio Rao #include <sys/cpuset.h>
3591eaf3e1SJohn Birrell #include <sys/kernel.h>
3691eaf3e1SJohn Birrell #include <sys/malloc.h>
3791eaf3e1SJohn Birrell #include <sys/kmem.h>
38bdd101c4SMark Johnston #include <sys/proc.h>
3991eaf3e1SJohn Birrell #include <sys/smp.h>
4091eaf3e1SJohn Birrell #include <sys/dtrace_impl.h>
4191eaf3e1SJohn Birrell #include <sys/dtrace_bsd.h>
42bdd101c4SMark Johnston #include <cddl/dev/dtrace/dtrace_cddl.h>
4391eaf3e1SJohn Birrell #include <machine/clock.h>
447174af79SMark Johnston #include <machine/cpufunc.h>
4591eaf3e1SJohn Birrell #include <machine/frame.h>
467174af79SMark Johnston #include <machine/psl.h>
47d41e41f9SJohn Baldwin #include <machine/trap.h>
4891eaf3e1SJohn Birrell #include <vm/pmap.h>
4991eaf3e1SJohn Birrell
5091eaf3e1SJohn Birrell extern uintptr_t kernelbase;
5191eaf3e1SJohn Birrell
5257d025c3SGeorge V. Neville-Neil extern void dtrace_getnanotime(struct timespec *tsp);
539093dd9aSMark Johnston extern int (*dtrace_invop_jump_addr)(struct trapframe *);
5457d025c3SGeorge V. Neville-Neil
556c280659SMark Johnston int dtrace_invop(uintptr_t, struct trapframe *, uintptr_t);
569093dd9aSMark Johnston int dtrace_invop_start(struct trapframe *frame);
579093dd9aSMark Johnston void dtrace_invop_init(void);
589093dd9aSMark Johnston void dtrace_invop_uninit(void);
5991eaf3e1SJohn Birrell
6091eaf3e1SJohn Birrell typedef struct dtrace_invop_hdlr {
616c280659SMark Johnston int (*dtih_func)(uintptr_t, struct trapframe *, uintptr_t);
6291eaf3e1SJohn Birrell struct dtrace_invop_hdlr *dtih_next;
6391eaf3e1SJohn Birrell } dtrace_invop_hdlr_t;
6491eaf3e1SJohn Birrell
6591eaf3e1SJohn Birrell dtrace_invop_hdlr_t *dtrace_invop_hdlr;
6691eaf3e1SJohn Birrell
6791eaf3e1SJohn Birrell int
dtrace_invop(uintptr_t addr,struct trapframe * frame,uintptr_t eax)686c280659SMark Johnston dtrace_invop(uintptr_t addr, struct trapframe *frame, uintptr_t eax)
6991eaf3e1SJohn Birrell {
70bdd101c4SMark Johnston struct thread *td;
7191eaf3e1SJohn Birrell dtrace_invop_hdlr_t *hdlr;
7291eaf3e1SJohn Birrell int rval;
7391eaf3e1SJohn Birrell
74bdd101c4SMark Johnston rval = 0;
75bdd101c4SMark Johnston td = curthread;
76bdd101c4SMark Johnston td->t_dtrace_trapframe = frame;
7791eaf3e1SJohn Birrell for (hdlr = dtrace_invop_hdlr; hdlr != NULL; hdlr = hdlr->dtih_next)
786c280659SMark Johnston if ((rval = hdlr->dtih_func(addr, frame, eax)) != 0)
79bdd101c4SMark Johnston break;
80bdd101c4SMark Johnston td->t_dtrace_trapframe = NULL;
8191eaf3e1SJohn Birrell return (rval);
8291eaf3e1SJohn Birrell }
8391eaf3e1SJohn Birrell
8491eaf3e1SJohn Birrell void
dtrace_invop_add(int (* func)(uintptr_t,struct trapframe *,uintptr_t))856c280659SMark Johnston dtrace_invop_add(int (*func)(uintptr_t, struct trapframe *, uintptr_t))
8691eaf3e1SJohn Birrell {
8791eaf3e1SJohn Birrell dtrace_invop_hdlr_t *hdlr;
8891eaf3e1SJohn Birrell
8991eaf3e1SJohn Birrell hdlr = kmem_alloc(sizeof (dtrace_invop_hdlr_t), KM_SLEEP);
9091eaf3e1SJohn Birrell hdlr->dtih_func = func;
9191eaf3e1SJohn Birrell hdlr->dtih_next = dtrace_invop_hdlr;
9291eaf3e1SJohn Birrell dtrace_invop_hdlr = hdlr;
9391eaf3e1SJohn Birrell }
9491eaf3e1SJohn Birrell
9591eaf3e1SJohn Birrell void
dtrace_invop_remove(int (* func)(uintptr_t,struct trapframe *,uintptr_t))966c280659SMark Johnston dtrace_invop_remove(int (*func)(uintptr_t, struct trapframe *, uintptr_t))
9791eaf3e1SJohn Birrell {
9891eaf3e1SJohn Birrell dtrace_invop_hdlr_t *hdlr = dtrace_invop_hdlr, *prev = NULL;
9991eaf3e1SJohn Birrell
10091eaf3e1SJohn Birrell for (;;) {
10191eaf3e1SJohn Birrell if (hdlr == NULL)
10291eaf3e1SJohn Birrell panic("attempt to remove non-existent invop handler");
10391eaf3e1SJohn Birrell
10491eaf3e1SJohn Birrell if (hdlr->dtih_func == func)
10591eaf3e1SJohn Birrell break;
10691eaf3e1SJohn Birrell
10791eaf3e1SJohn Birrell prev = hdlr;
10891eaf3e1SJohn Birrell hdlr = hdlr->dtih_next;
10991eaf3e1SJohn Birrell }
11091eaf3e1SJohn Birrell
11191eaf3e1SJohn Birrell if (prev == NULL) {
11291eaf3e1SJohn Birrell ASSERT(dtrace_invop_hdlr == hdlr);
11391eaf3e1SJohn Birrell dtrace_invop_hdlr = hdlr->dtih_next;
11491eaf3e1SJohn Birrell } else {
11591eaf3e1SJohn Birrell ASSERT(dtrace_invop_hdlr != hdlr);
11691eaf3e1SJohn Birrell prev->dtih_next = hdlr->dtih_next;
11791eaf3e1SJohn Birrell }
11891eaf3e1SJohn Birrell
11991eaf3e1SJohn Birrell kmem_free(hdlr, 0);
12091eaf3e1SJohn Birrell }
12191eaf3e1SJohn Birrell
12291eaf3e1SJohn Birrell void
dtrace_invop_init(void)1239093dd9aSMark Johnston dtrace_invop_init(void)
1249093dd9aSMark Johnston {
1259093dd9aSMark Johnston
1269093dd9aSMark Johnston dtrace_invop_jump_addr = dtrace_invop_start;
1279093dd9aSMark Johnston }
1289093dd9aSMark Johnston
1299093dd9aSMark Johnston void
dtrace_invop_uninit(void)1309093dd9aSMark Johnston dtrace_invop_uninit(void)
1319093dd9aSMark Johnston {
1329093dd9aSMark Johnston
1339093dd9aSMark Johnston dtrace_invop_jump_addr = NULL;
1349093dd9aSMark Johnston }
1359093dd9aSMark Johnston
1369093dd9aSMark Johnston void
dtrace_toxic_ranges(void (* func)(uintptr_t base,uintptr_t limit))13791eaf3e1SJohn Birrell dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
13891eaf3e1SJohn Birrell {
13991eaf3e1SJohn Birrell (*func)(0, kernelbase);
14091eaf3e1SJohn Birrell }
14191eaf3e1SJohn Birrell
14291eaf3e1SJohn Birrell void
dtrace_xcall(processorid_t cpu,dtrace_xcall_t func,void * arg)14391eaf3e1SJohn Birrell dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
14491eaf3e1SJohn Birrell {
14571a19bdcSAttilio Rao cpuset_t cpus;
14691eaf3e1SJohn Birrell
14791eaf3e1SJohn Birrell if (cpu == DTRACE_CPUALL)
14891eaf3e1SJohn Birrell cpus = all_cpus;
14991eaf3e1SJohn Birrell else
15071a19bdcSAttilio Rao CPU_SETOF(cpu, &cpus);
15191eaf3e1SJohn Birrell
15267d955aaSPatrick Kelsey smp_rendezvous_cpus(cpus, smp_no_rendezvous_barrier, func,
15367d955aaSPatrick Kelsey smp_no_rendezvous_barrier, arg);
15491eaf3e1SJohn Birrell }
15591eaf3e1SJohn Birrell
15691eaf3e1SJohn Birrell static void
dtrace_sync_func(void)15791eaf3e1SJohn Birrell dtrace_sync_func(void)
15891eaf3e1SJohn Birrell {
15991eaf3e1SJohn Birrell }
16091eaf3e1SJohn Birrell
16191eaf3e1SJohn Birrell void
dtrace_sync(void)16291eaf3e1SJohn Birrell dtrace_sync(void)
16391eaf3e1SJohn Birrell {
16491eaf3e1SJohn Birrell dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
16591eaf3e1SJohn Birrell }
16691eaf3e1SJohn Birrell
16791eaf3e1SJohn Birrell #ifdef notyet
16891eaf3e1SJohn Birrell void
dtrace_safe_synchronous_signal(void)16991eaf3e1SJohn Birrell dtrace_safe_synchronous_signal(void)
17091eaf3e1SJohn Birrell {
17191eaf3e1SJohn Birrell kthread_t *t = curthread;
17291eaf3e1SJohn Birrell struct regs *rp = lwptoregs(ttolwp(t));
17391eaf3e1SJohn Birrell size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
17491eaf3e1SJohn Birrell
17591eaf3e1SJohn Birrell ASSERT(t->t_dtrace_on);
17691eaf3e1SJohn Birrell
17791eaf3e1SJohn Birrell /*
17891eaf3e1SJohn Birrell * If we're not in the range of scratch addresses, we're not actually
17991eaf3e1SJohn Birrell * tracing user instructions so turn off the flags. If the instruction
18091eaf3e1SJohn Birrell * we copied out caused a synchonous trap, reset the pc back to its
18191eaf3e1SJohn Birrell * original value and turn off the flags.
18291eaf3e1SJohn Birrell */
18391eaf3e1SJohn Birrell if (rp->r_pc < t->t_dtrace_scrpc ||
18491eaf3e1SJohn Birrell rp->r_pc > t->t_dtrace_astpc + isz) {
18591eaf3e1SJohn Birrell t->t_dtrace_ft = 0;
18691eaf3e1SJohn Birrell } else if (rp->r_pc == t->t_dtrace_scrpc ||
18791eaf3e1SJohn Birrell rp->r_pc == t->t_dtrace_astpc) {
18891eaf3e1SJohn Birrell rp->r_pc = t->t_dtrace_pc;
18991eaf3e1SJohn Birrell t->t_dtrace_ft = 0;
19091eaf3e1SJohn Birrell }
19191eaf3e1SJohn Birrell }
19291eaf3e1SJohn Birrell
19391eaf3e1SJohn Birrell int
dtrace_safe_defer_signal(void)19491eaf3e1SJohn Birrell dtrace_safe_defer_signal(void)
19591eaf3e1SJohn Birrell {
19691eaf3e1SJohn Birrell kthread_t *t = curthread;
19791eaf3e1SJohn Birrell struct regs *rp = lwptoregs(ttolwp(t));
19891eaf3e1SJohn Birrell size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
19991eaf3e1SJohn Birrell
20091eaf3e1SJohn Birrell ASSERT(t->t_dtrace_on);
20191eaf3e1SJohn Birrell
20291eaf3e1SJohn Birrell /*
20391eaf3e1SJohn Birrell * If we're not in the range of scratch addresses, we're not actually
20491eaf3e1SJohn Birrell * tracing user instructions so turn off the flags.
20591eaf3e1SJohn Birrell */
20691eaf3e1SJohn Birrell if (rp->r_pc < t->t_dtrace_scrpc ||
20791eaf3e1SJohn Birrell rp->r_pc > t->t_dtrace_astpc + isz) {
20891eaf3e1SJohn Birrell t->t_dtrace_ft = 0;
20991eaf3e1SJohn Birrell return (0);
21091eaf3e1SJohn Birrell }
21191eaf3e1SJohn Birrell
21291eaf3e1SJohn Birrell /*
2134737d389SGeorge V. Neville-Neil * If we have executed the original instruction, but we have performed
2144737d389SGeorge V. Neville-Neil * neither the jmp back to t->t_dtrace_npc nor the clean up of any
2154737d389SGeorge V. Neville-Neil * registers used to emulate %rip-relative instructions in 64-bit mode,
2164737d389SGeorge V. Neville-Neil * we'll save ourselves some effort by doing that here and taking the
2174737d389SGeorge V. Neville-Neil * signal right away. We detect this condition by seeing if the program
2184737d389SGeorge V. Neville-Neil * counter is the range [scrpc + isz, astpc).
21991eaf3e1SJohn Birrell */
2204737d389SGeorge V. Neville-Neil if (rp->r_pc >= t->t_dtrace_scrpc + isz &&
2214737d389SGeorge V. Neville-Neil rp->r_pc < t->t_dtrace_astpc) {
22291eaf3e1SJohn Birrell #ifdef __amd64
22391eaf3e1SJohn Birrell /*
22491eaf3e1SJohn Birrell * If there is a scratch register and we're on the
22591eaf3e1SJohn Birrell * instruction immediately after the modified instruction,
22691eaf3e1SJohn Birrell * restore the value of that scratch register.
22791eaf3e1SJohn Birrell */
22891eaf3e1SJohn Birrell if (t->t_dtrace_reg != 0 &&
22991eaf3e1SJohn Birrell rp->r_pc == t->t_dtrace_scrpc + isz) {
23091eaf3e1SJohn Birrell switch (t->t_dtrace_reg) {
23191eaf3e1SJohn Birrell case REG_RAX:
23291eaf3e1SJohn Birrell rp->r_rax = t->t_dtrace_regv;
23391eaf3e1SJohn Birrell break;
23491eaf3e1SJohn Birrell case REG_RCX:
23591eaf3e1SJohn Birrell rp->r_rcx = t->t_dtrace_regv;
23691eaf3e1SJohn Birrell break;
23791eaf3e1SJohn Birrell case REG_R8:
23891eaf3e1SJohn Birrell rp->r_r8 = t->t_dtrace_regv;
23991eaf3e1SJohn Birrell break;
24091eaf3e1SJohn Birrell case REG_R9:
24191eaf3e1SJohn Birrell rp->r_r9 = t->t_dtrace_regv;
24291eaf3e1SJohn Birrell break;
24391eaf3e1SJohn Birrell }
24491eaf3e1SJohn Birrell }
24591eaf3e1SJohn Birrell #endif
24691eaf3e1SJohn Birrell rp->r_pc = t->t_dtrace_npc;
24791eaf3e1SJohn Birrell t->t_dtrace_ft = 0;
24891eaf3e1SJohn Birrell return (0);
24991eaf3e1SJohn Birrell }
25091eaf3e1SJohn Birrell
25191eaf3e1SJohn Birrell /*
25291eaf3e1SJohn Birrell * Otherwise, make sure we'll return to the kernel after executing
25391eaf3e1SJohn Birrell * the copied out instruction and defer the signal.
25491eaf3e1SJohn Birrell */
25591eaf3e1SJohn Birrell if (!t->t_dtrace_step) {
25691eaf3e1SJohn Birrell ASSERT(rp->r_pc < t->t_dtrace_astpc);
25791eaf3e1SJohn Birrell rp->r_pc += t->t_dtrace_astpc - t->t_dtrace_scrpc;
25891eaf3e1SJohn Birrell t->t_dtrace_step = 1;
25991eaf3e1SJohn Birrell }
26091eaf3e1SJohn Birrell
26191eaf3e1SJohn Birrell t->t_dtrace_ast = 1;
26291eaf3e1SJohn Birrell
26391eaf3e1SJohn Birrell return (1);
26491eaf3e1SJohn Birrell }
26591eaf3e1SJohn Birrell #endif
26691eaf3e1SJohn Birrell
26791eaf3e1SJohn Birrell static int64_t tgt_cpu_tsc;
26891eaf3e1SJohn Birrell static int64_t hst_cpu_tsc;
26991eaf3e1SJohn Birrell static int64_t tsc_skew[MAXCPU];
270b064b6d1SAndriy Gapon static uint64_t nsec_scale;
271b064b6d1SAndriy Gapon
272b064b6d1SAndriy Gapon /* See below for the explanation of this macro. */
273b064b6d1SAndriy Gapon #define SCALE_SHIFT 28
27491eaf3e1SJohn Birrell
275fdce57a0SJohn Baldwin static void
dtrace_gethrtime_init_cpu(void * arg)276fdce57a0SJohn Baldwin dtrace_gethrtime_init_cpu(void *arg)
277fdce57a0SJohn Baldwin {
278fdce57a0SJohn Baldwin uintptr_t cpu = (uintptr_t) arg;
279fdce57a0SJohn Baldwin
280fdce57a0SJohn Baldwin if (cpu == curcpu)
281fdce57a0SJohn Baldwin tgt_cpu_tsc = rdtsc();
282fdce57a0SJohn Baldwin else
283fdce57a0SJohn Baldwin hst_cpu_tsc = rdtsc();
284fdce57a0SJohn Baldwin }
285fdce57a0SJohn Baldwin
286fdce57a0SJohn Baldwin static void
dtrace_gethrtime_init(void * arg)287fdce57a0SJohn Baldwin dtrace_gethrtime_init(void *arg)
288fdce57a0SJohn Baldwin {
289fdce57a0SJohn Baldwin struct pcpu *pc;
290fdce57a0SJohn Baldwin uint64_t tsc_f;
291fdce57a0SJohn Baldwin cpuset_t map;
292fdce57a0SJohn Baldwin int i;
293e1e33ff9SMark Johnston
294e1e33ff9SMark Johnston /*
295e1e33ff9SMark Johnston * Get TSC frequency known at this moment.
296e1e33ff9SMark Johnston * This should be constant if TSC is invariant.
297e1e33ff9SMark Johnston * Otherwise tick->time conversion will be inaccurate, but
298e1e33ff9SMark Johnston * will preserve monotonic property of TSC.
299e1e33ff9SMark Johnston */
300e1e33ff9SMark Johnston tsc_f = atomic_load_acq_64(&tsc_freq);
301e1e33ff9SMark Johnston
302e1e33ff9SMark Johnston /*
303e1e33ff9SMark Johnston * The following line checks that nsec_scale calculated below
304e1e33ff9SMark Johnston * doesn't overflow 32-bit unsigned integer, so that it can multiply
305e1e33ff9SMark Johnston * another 32-bit integer without overflowing 64-bit.
306e1e33ff9SMark Johnston * Thus minimum supported TSC frequency is 62.5MHz.
307e1e33ff9SMark Johnston */
308e1e33ff9SMark Johnston KASSERT(tsc_f > (NANOSEC >> (32 - SCALE_SHIFT)),
309e1e33ff9SMark Johnston ("TSC frequency is too low"));
310e1e33ff9SMark Johnston
311e1e33ff9SMark Johnston /*
312e1e33ff9SMark Johnston * We scale up NANOSEC/tsc_f ratio to preserve as much precision
313e1e33ff9SMark Johnston * as possible.
314e1e33ff9SMark Johnston * 2^28 factor was chosen quite arbitrarily from practical
315e1e33ff9SMark Johnston * considerations:
316e1e33ff9SMark Johnston * - it supports TSC frequencies as low as 62.5MHz (see above);
317e1e33ff9SMark Johnston * - it provides quite good precision (e < 0.01%) up to THz
318e1e33ff9SMark Johnston * (terahertz) values;
319e1e33ff9SMark Johnston */
320e1e33ff9SMark Johnston nsec_scale = ((uint64_t)NANOSEC << SCALE_SHIFT) / tsc_f;
32191eaf3e1SJohn Birrell
322a4b59d3dSMark Johnston if (vm_guest != VM_GUEST_NO)
323e362e590SMark Johnston return;
324e362e590SMark Johnston
32591eaf3e1SJohn Birrell /* The current CPU is the reference one. */
3267becfa95SAndriy Gapon sched_pin();
32791eaf3e1SJohn Birrell tsc_skew[curcpu] = 0;
3283aa6d94eSJohn Baldwin CPU_FOREACH(i) {
32991eaf3e1SJohn Birrell if (i == curcpu)
33091eaf3e1SJohn Birrell continue;
33191eaf3e1SJohn Birrell
3327becfa95SAndriy Gapon pc = pcpu_find(i);
333ada5b739SAttilio Rao CPU_SETOF(PCPU_GET(cpuid), &map);
334ada5b739SAttilio Rao CPU_SET(pc->pc_cpuid, &map);
33591eaf3e1SJohn Birrell
336d9b8935fSAndriy Gapon smp_rendezvous_cpus(map, NULL,
33791eaf3e1SJohn Birrell dtrace_gethrtime_init_cpu,
33867d955aaSPatrick Kelsey smp_no_rendezvous_barrier, (void *)(uintptr_t) i);
33991eaf3e1SJohn Birrell
34091eaf3e1SJohn Birrell tsc_skew[i] = tgt_cpu_tsc - hst_cpu_tsc;
34191eaf3e1SJohn Birrell }
3427becfa95SAndriy Gapon sched_unpin();
34391eaf3e1SJohn Birrell }
344fdce57a0SJohn Baldwin SYSINIT(dtrace_gethrtime_init, SI_SUB_DTRACE, SI_ORDER_ANY,
345fdce57a0SJohn Baldwin dtrace_gethrtime_init, NULL);
34691eaf3e1SJohn Birrell
34791eaf3e1SJohn Birrell /*
34891eaf3e1SJohn Birrell * DTrace needs a high resolution time function which can
34991eaf3e1SJohn Birrell * be called from a probe context and guaranteed not to have
35091eaf3e1SJohn Birrell * instrumented with probes itself.
35191eaf3e1SJohn Birrell *
35291eaf3e1SJohn Birrell * Returns nanoseconds since boot.
35391eaf3e1SJohn Birrell */
35491eaf3e1SJohn Birrell uint64_t
dtrace_gethrtime(void)3556c7828a2SMark Johnston dtrace_gethrtime(void)
35691eaf3e1SJohn Birrell {
357b064b6d1SAndriy Gapon uint64_t tsc;
3586c7828a2SMark Johnston uint32_t lo, hi;
3596c7828a2SMark Johnston register_t eflags;
360b064b6d1SAndriy Gapon
361b064b6d1SAndriy Gapon /*
362b064b6d1SAndriy Gapon * We split TSC value into lower and higher 32-bit halves and separately
363b064b6d1SAndriy Gapon * scale them with nsec_scale, then we scale them down by 2^28
364b064b6d1SAndriy Gapon * (see nsec_scale calculations) taking into account 32-bit shift of
365b064b6d1SAndriy Gapon * the higher half and finally add.
366b064b6d1SAndriy Gapon */
3676c7828a2SMark Johnston eflags = intr_disable();
368db5c7d36SZachary Loafman tsc = rdtsc() - tsc_skew[curcpu];
3696c7828a2SMark Johnston intr_restore(eflags);
3706c7828a2SMark Johnston
371b064b6d1SAndriy Gapon lo = tsc;
372b064b6d1SAndriy Gapon hi = tsc >> 32;
373b064b6d1SAndriy Gapon return (((lo * nsec_scale) >> SCALE_SHIFT) +
374b064b6d1SAndriy Gapon ((hi * nsec_scale) << (32 - SCALE_SHIFT)));
37591eaf3e1SJohn Birrell }
37691eaf3e1SJohn Birrell
37791eaf3e1SJohn Birrell uint64_t
dtrace_gethrestime(void)37891eaf3e1SJohn Birrell dtrace_gethrestime(void)
37991eaf3e1SJohn Birrell {
38057d025c3SGeorge V. Neville-Neil struct timespec current_time;
38157d025c3SGeorge V. Neville-Neil
38257d025c3SGeorge V. Neville-Neil dtrace_getnanotime(¤t_time);
38357d025c3SGeorge V. Neville-Neil
384d638e8dcSGeorge V. Neville-Neil return (current_time.tv_sec * 1000000000ULL + current_time.tv_nsec);
38591eaf3e1SJohn Birrell }
38691eaf3e1SJohn Birrell
38791eaf3e1SJohn Birrell /* Function to handle DTrace traps during probes. See i386/i386/trap.c */
38891eaf3e1SJohn Birrell int
dtrace_trap(struct trapframe * frame,u_int type)389cafe8744SMark Johnston dtrace_trap(struct trapframe *frame, u_int type)
39091eaf3e1SJohn Birrell {
391a11ac730SMark Johnston uint16_t nofault;
392a11ac730SMark Johnston
39391eaf3e1SJohn Birrell /*
39491eaf3e1SJohn Birrell * A trap can occur while DTrace executes a probe. Before
39591eaf3e1SJohn Birrell * executing the probe, DTrace blocks re-scheduling and sets
396291624fdSMark Johnston * a flag in its per-cpu flags to indicate that it doesn't
3976bccea7cSRebecca Cran * want to fault. On returning from the probe, the no-fault
39891eaf3e1SJohn Birrell * flag is cleared and finally re-scheduling is enabled.
39991eaf3e1SJohn Birrell *
40091eaf3e1SJohn Birrell * Check if DTrace has enabled 'no-fault' mode:
40191eaf3e1SJohn Birrell */
402a11ac730SMark Johnston sched_pin();
403a11ac730SMark Johnston nofault = cpu_core[curcpu].cpuc_dtrace_flags & CPU_DTRACE_NOFAULT;
404a11ac730SMark Johnston sched_unpin();
405a11ac730SMark Johnston if (nofault) {
406a11ac730SMark Johnston KASSERT((read_eflags() & PSL_I) == 0, ("interrupts enabled"));
407a11ac730SMark Johnston
40891eaf3e1SJohn Birrell /*
40991eaf3e1SJohn Birrell * There are only a couple of trap types that are expected.
41091eaf3e1SJohn Birrell * All the rest will be handled in the usual way.
41191eaf3e1SJohn Birrell */
412cafe8744SMark Johnston switch (type) {
41391eaf3e1SJohn Birrell /* General protection fault. */
41491eaf3e1SJohn Birrell case T_PROTFLT:
41591eaf3e1SJohn Birrell /* Flag an illegal operation. */
41691eaf3e1SJohn Birrell cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
41791eaf3e1SJohn Birrell
41891eaf3e1SJohn Birrell /*
41991eaf3e1SJohn Birrell * Offset the instruction pointer to the instruction
42091eaf3e1SJohn Birrell * following the one causing the fault.
42191eaf3e1SJohn Birrell */
422*1a149d65SChristos Margiolis frame->tf_eip += dtrace_instr_size((uint8_t *) frame->tf_eip);
42391eaf3e1SJohn Birrell return (1);
42491eaf3e1SJohn Birrell /* Page fault. */
42591eaf3e1SJohn Birrell case T_PAGEFLT:
42691eaf3e1SJohn Birrell /* Flag a bad address. */
42791eaf3e1SJohn Birrell cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
42891eaf3e1SJohn Birrell cpu_core[curcpu].cpuc_dtrace_illval = rcr2();
42991eaf3e1SJohn Birrell
43091eaf3e1SJohn Birrell /*
43191eaf3e1SJohn Birrell * Offset the instruction pointer to the instruction
43291eaf3e1SJohn Birrell * following the one causing the fault.
43391eaf3e1SJohn Birrell */
434*1a149d65SChristos Margiolis frame->tf_eip += dtrace_instr_size((uint8_t *) frame->tf_eip);
43591eaf3e1SJohn Birrell return (1);
43691eaf3e1SJohn Birrell default:
43791eaf3e1SJohn Birrell /* Handle all other traps in the usual way. */
43891eaf3e1SJohn Birrell break;
43991eaf3e1SJohn Birrell }
44091eaf3e1SJohn Birrell }
44191eaf3e1SJohn Birrell
44291eaf3e1SJohn Birrell /* Handle the trap in the usual way. */
44391eaf3e1SJohn Birrell return (0);
44491eaf3e1SJohn Birrell }
445