xref: /freebsd/sys/cddl/dev/dtrace/i386/dtrace_subr.c (revision 884a2a699669ec61e2366e3e358342dbc94be24a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  *
22  * $FreeBSD$
23  *
24  */
25 /*
26  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
27  * Use is subject to license terms.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/types.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/kmem.h>
36 #include <sys/smp.h>
37 #include <sys/dtrace_impl.h>
38 #include <sys/dtrace_bsd.h>
39 #include <machine/clock.h>
40 #include <machine/frame.h>
41 #include <vm/pmap.h>
42 
43 extern uintptr_t 	kernelbase;
44 extern uintptr_t 	dtrace_in_probe_addr;
45 extern int		dtrace_in_probe;
46 
47 int dtrace_invop(uintptr_t, uintptr_t *, uintptr_t);
48 
49 typedef struct dtrace_invop_hdlr {
50 	int (*dtih_func)(uintptr_t, uintptr_t *, uintptr_t);
51 	struct dtrace_invop_hdlr *dtih_next;
52 } dtrace_invop_hdlr_t;
53 
54 dtrace_invop_hdlr_t *dtrace_invop_hdlr;
55 
56 int
57 dtrace_invop(uintptr_t addr, uintptr_t *stack, uintptr_t eax)
58 {
59 	dtrace_invop_hdlr_t *hdlr;
60 	int rval;
61 
62 	for (hdlr = dtrace_invop_hdlr; hdlr != NULL; hdlr = hdlr->dtih_next)
63 		if ((rval = hdlr->dtih_func(addr, stack, eax)) != 0)
64 			return (rval);
65 
66 	return (0);
67 }
68 
69 void
70 dtrace_invop_add(int (*func)(uintptr_t, uintptr_t *, uintptr_t))
71 {
72 	dtrace_invop_hdlr_t *hdlr;
73 
74 	hdlr = kmem_alloc(sizeof (dtrace_invop_hdlr_t), KM_SLEEP);
75 	hdlr->dtih_func = func;
76 	hdlr->dtih_next = dtrace_invop_hdlr;
77 	dtrace_invop_hdlr = hdlr;
78 }
79 
80 void
81 dtrace_invop_remove(int (*func)(uintptr_t, uintptr_t *, uintptr_t))
82 {
83 	dtrace_invop_hdlr_t *hdlr = dtrace_invop_hdlr, *prev = NULL;
84 
85 	for (;;) {
86 		if (hdlr == NULL)
87 			panic("attempt to remove non-existent invop handler");
88 
89 		if (hdlr->dtih_func == func)
90 			break;
91 
92 		prev = hdlr;
93 		hdlr = hdlr->dtih_next;
94 	}
95 
96 	if (prev == NULL) {
97 		ASSERT(dtrace_invop_hdlr == hdlr);
98 		dtrace_invop_hdlr = hdlr->dtih_next;
99 	} else {
100 		ASSERT(dtrace_invop_hdlr != hdlr);
101 		prev->dtih_next = hdlr->dtih_next;
102 	}
103 
104 	kmem_free(hdlr, 0);
105 }
106 
107 void
108 dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
109 {
110 	(*func)(0, kernelbase);
111 }
112 
113 void
114 dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
115 {
116 	cpumask_t cpus;
117 
118 	if (cpu == DTRACE_CPUALL)
119 		cpus = all_cpus;
120 	else
121 		cpus = (cpumask_t)1 << cpu;
122 
123 	smp_rendezvous_cpus(cpus, smp_no_rendevous_barrier, func,
124 	    smp_no_rendevous_barrier, arg);
125 }
126 
127 static void
128 dtrace_sync_func(void)
129 {
130 }
131 
132 void
133 dtrace_sync(void)
134 {
135         dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
136 }
137 
138 #ifdef notyet
139 int (*dtrace_fasttrap_probe_ptr)(struct regs *);
140 int (*dtrace_pid_probe_ptr)(struct regs *);
141 int (*dtrace_return_probe_ptr)(struct regs *);
142 
143 void
144 dtrace_user_probe(struct regs *rp, caddr_t addr, processorid_t cpuid)
145 {
146 	krwlock_t *rwp;
147 	proc_t *p = curproc;
148 	extern void trap(struct regs *, caddr_t, processorid_t);
149 
150 	if (USERMODE(rp->r_cs) || (rp->r_ps & PS_VM)) {
151 		if (curthread->t_cred != p->p_cred) {
152 			cred_t *oldcred = curthread->t_cred;
153 			/*
154 			 * DTrace accesses t_cred in probe context.  t_cred
155 			 * must always be either NULL, or point to a valid,
156 			 * allocated cred structure.
157 			 */
158 			curthread->t_cred = crgetcred();
159 			crfree(oldcred);
160 		}
161 	}
162 
163 	if (rp->r_trapno == T_DTRACE_RET) {
164 		uint8_t step = curthread->t_dtrace_step;
165 		uint8_t ret = curthread->t_dtrace_ret;
166 		uintptr_t npc = curthread->t_dtrace_npc;
167 
168 		if (curthread->t_dtrace_ast) {
169 			aston(curthread);
170 			curthread->t_sig_check = 1;
171 		}
172 
173 		/*
174 		 * Clear all user tracing flags.
175 		 */
176 		curthread->t_dtrace_ft = 0;
177 
178 		/*
179 		 * If we weren't expecting to take a return probe trap, kill
180 		 * the process as though it had just executed an unassigned
181 		 * trap instruction.
182 		 */
183 		if (step == 0) {
184 			tsignal(curthread, SIGILL);
185 			return;
186 		}
187 
188 		/*
189 		 * If we hit this trap unrelated to a return probe, we're
190 		 * just here to reset the AST flag since we deferred a signal
191 		 * until after we logically single-stepped the instruction we
192 		 * copied out.
193 		 */
194 		if (ret == 0) {
195 			rp->r_pc = npc;
196 			return;
197 		}
198 
199 		/*
200 		 * We need to wait until after we've called the
201 		 * dtrace_return_probe_ptr function pointer to set %pc.
202 		 */
203 		rwp = &CPU->cpu_ft_lock;
204 		rw_enter(rwp, RW_READER);
205 		if (dtrace_return_probe_ptr != NULL)
206 			(void) (*dtrace_return_probe_ptr)(rp);
207 		rw_exit(rwp);
208 		rp->r_pc = npc;
209 
210 	} else if (rp->r_trapno == T_DTRACE_PROBE) {
211 		rwp = &CPU->cpu_ft_lock;
212 		rw_enter(rwp, RW_READER);
213 		if (dtrace_fasttrap_probe_ptr != NULL)
214 			(void) (*dtrace_fasttrap_probe_ptr)(rp);
215 		rw_exit(rwp);
216 
217 	} else if (rp->r_trapno == T_BPTFLT) {
218 		uint8_t instr;
219 		rwp = &CPU->cpu_ft_lock;
220 
221 		/*
222 		 * The DTrace fasttrap provider uses the breakpoint trap
223 		 * (int 3). We let DTrace take the first crack at handling
224 		 * this trap; if it's not a probe that DTrace knowns about,
225 		 * we call into the trap() routine to handle it like a
226 		 * breakpoint placed by a conventional debugger.
227 		 */
228 		rw_enter(rwp, RW_READER);
229 		if (dtrace_pid_probe_ptr != NULL &&
230 		    (*dtrace_pid_probe_ptr)(rp) == 0) {
231 			rw_exit(rwp);
232 			return;
233 		}
234 		rw_exit(rwp);
235 
236 		/*
237 		 * If the instruction that caused the breakpoint trap doesn't
238 		 * look like an int 3 anymore, it may be that this tracepoint
239 		 * was removed just after the user thread executed it. In
240 		 * that case, return to user land to retry the instuction.
241 		 */
242 		if (fuword8((void *)(rp->r_pc - 1), &instr) == 0 &&
243 		    instr != FASTTRAP_INSTR) {
244 			rp->r_pc--;
245 			return;
246 		}
247 
248 		trap(rp, addr, cpuid);
249 
250 	} else {
251 		trap(rp, addr, cpuid);
252 	}
253 }
254 
255 void
256 dtrace_safe_synchronous_signal(void)
257 {
258 	kthread_t *t = curthread;
259 	struct regs *rp = lwptoregs(ttolwp(t));
260 	size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
261 
262 	ASSERT(t->t_dtrace_on);
263 
264 	/*
265 	 * If we're not in the range of scratch addresses, we're not actually
266 	 * tracing user instructions so turn off the flags. If the instruction
267 	 * we copied out caused a synchonous trap, reset the pc back to its
268 	 * original value and turn off the flags.
269 	 */
270 	if (rp->r_pc < t->t_dtrace_scrpc ||
271 	    rp->r_pc > t->t_dtrace_astpc + isz) {
272 		t->t_dtrace_ft = 0;
273 	} else if (rp->r_pc == t->t_dtrace_scrpc ||
274 	    rp->r_pc == t->t_dtrace_astpc) {
275 		rp->r_pc = t->t_dtrace_pc;
276 		t->t_dtrace_ft = 0;
277 	}
278 }
279 
280 int
281 dtrace_safe_defer_signal(void)
282 {
283 	kthread_t *t = curthread;
284 	struct regs *rp = lwptoregs(ttolwp(t));
285 	size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
286 
287 	ASSERT(t->t_dtrace_on);
288 
289 	/*
290 	 * If we're not in the range of scratch addresses, we're not actually
291 	 * tracing user instructions so turn off the flags.
292 	 */
293 	if (rp->r_pc < t->t_dtrace_scrpc ||
294 	    rp->r_pc > t->t_dtrace_astpc + isz) {
295 		t->t_dtrace_ft = 0;
296 		return (0);
297 	}
298 
299 	/*
300 	 * If we've executed the original instruction, but haven't performed
301 	 * the jmp back to t->t_dtrace_npc or the clean up of any registers
302 	 * used to emulate %rip-relative instructions in 64-bit mode, do that
303 	 * here and take the signal right away. We detect this condition by
304 	 * seeing if the program counter is the range [scrpc + isz, astpc).
305 	 */
306 	if (t->t_dtrace_astpc - rp->r_pc <
307 	    t->t_dtrace_astpc - t->t_dtrace_scrpc - isz) {
308 #ifdef __amd64
309 		/*
310 		 * If there is a scratch register and we're on the
311 		 * instruction immediately after the modified instruction,
312 		 * restore the value of that scratch register.
313 		 */
314 		if (t->t_dtrace_reg != 0 &&
315 		    rp->r_pc == t->t_dtrace_scrpc + isz) {
316 			switch (t->t_dtrace_reg) {
317 			case REG_RAX:
318 				rp->r_rax = t->t_dtrace_regv;
319 				break;
320 			case REG_RCX:
321 				rp->r_rcx = t->t_dtrace_regv;
322 				break;
323 			case REG_R8:
324 				rp->r_r8 = t->t_dtrace_regv;
325 				break;
326 			case REG_R9:
327 				rp->r_r9 = t->t_dtrace_regv;
328 				break;
329 			}
330 		}
331 #endif
332 		rp->r_pc = t->t_dtrace_npc;
333 		t->t_dtrace_ft = 0;
334 		return (0);
335 	}
336 
337 	/*
338 	 * Otherwise, make sure we'll return to the kernel after executing
339 	 * the copied out instruction and defer the signal.
340 	 */
341 	if (!t->t_dtrace_step) {
342 		ASSERT(rp->r_pc < t->t_dtrace_astpc);
343 		rp->r_pc += t->t_dtrace_astpc - t->t_dtrace_scrpc;
344 		t->t_dtrace_step = 1;
345 	}
346 
347 	t->t_dtrace_ast = 1;
348 
349 	return (1);
350 }
351 #endif
352 
353 static int64_t	tgt_cpu_tsc;
354 static int64_t	hst_cpu_tsc;
355 static int64_t	tsc_skew[MAXCPU];
356 static uint64_t	nsec_scale;
357 
358 /* See below for the explanation of this macro. */
359 #define SCALE_SHIFT	28
360 
361 static void
362 dtrace_gethrtime_init_cpu(void *arg)
363 {
364 	uintptr_t cpu = (uintptr_t) arg;
365 
366 	if (cpu == curcpu)
367 		tgt_cpu_tsc = rdtsc();
368 	else
369 		hst_cpu_tsc = rdtsc();
370 }
371 
372 static void
373 dtrace_gethrtime_init(void *arg)
374 {
375 	struct pcpu *pc;
376 	uint64_t tsc_f;
377 	cpumask_t map;
378 	int i;
379 
380 	/*
381 	 * Get TSC frequency known at this moment.
382 	 * This should be constant if TSC is invariant.
383 	 * Otherwise tick->time conversion will be inaccurate, but
384 	 * will preserve monotonic property of TSC.
385 	 */
386 	tsc_f = atomic_load_acq_64(&tsc_freq);
387 
388 	/*
389 	 * The following line checks that nsec_scale calculated below
390 	 * doesn't overflow 32-bit unsigned integer, so that it can multiply
391 	 * another 32-bit integer without overflowing 64-bit.
392 	 * Thus minimum supported TSC frequency is 62.5MHz.
393 	 */
394 	KASSERT(tsc_f > (NANOSEC >> (32 - SCALE_SHIFT)), ("TSC frequency is too low"));
395 
396 	/*
397 	 * We scale up NANOSEC/tsc_f ratio to preserve as much precision
398 	 * as possible.
399 	 * 2^28 factor was chosen quite arbitrarily from practical
400 	 * considerations:
401 	 * - it supports TSC frequencies as low as 62.5MHz (see above);
402 	 * - it provides quite good precision (e < 0.01%) up to THz
403 	 *   (terahertz) values;
404 	 */
405 	nsec_scale = ((uint64_t)NANOSEC << SCALE_SHIFT) / tsc_f;
406 
407 	/* The current CPU is the reference one. */
408 	sched_pin();
409 	tsc_skew[curcpu] = 0;
410 	CPU_FOREACH(i) {
411 		if (i == curcpu)
412 			continue;
413 
414 		pc = pcpu_find(i);
415 		map = PCPU_GET(cpumask) | pc->pc_cpumask;
416 
417 		smp_rendezvous_cpus(map, NULL,
418 		    dtrace_gethrtime_init_cpu,
419 		    smp_no_rendevous_barrier, (void *)(uintptr_t) i);
420 
421 		tsc_skew[i] = tgt_cpu_tsc - hst_cpu_tsc;
422 	}
423 	sched_unpin();
424 }
425 
426 SYSINIT(dtrace_gethrtime_init, SI_SUB_SMP, SI_ORDER_ANY, dtrace_gethrtime_init, NULL);
427 
428 /*
429  * DTrace needs a high resolution time function which can
430  * be called from a probe context and guaranteed not to have
431  * instrumented with probes itself.
432  *
433  * Returns nanoseconds since boot.
434  */
435 uint64_t
436 dtrace_gethrtime()
437 {
438 	uint64_t tsc;
439 	uint32_t lo;
440 	uint32_t hi;
441 
442 	/*
443 	 * We split TSC value into lower and higher 32-bit halves and separately
444 	 * scale them with nsec_scale, then we scale them down by 2^28
445 	 * (see nsec_scale calculations) taking into account 32-bit shift of
446 	 * the higher half and finally add.
447 	 */
448 	tsc = rdtsc() + tsc_skew[curcpu];
449 	lo = tsc;
450 	hi = tsc >> 32;
451 	return (((lo * nsec_scale) >> SCALE_SHIFT) +
452 	    ((hi * nsec_scale) << (32 - SCALE_SHIFT)));
453 }
454 
455 uint64_t
456 dtrace_gethrestime(void)
457 {
458 	printf("%s(%d): XXX\n",__func__,__LINE__);
459 	return (0);
460 }
461 
462 /* Function to handle DTrace traps during probes. See i386/i386/trap.c */
463 int
464 dtrace_trap(struct trapframe *frame, u_int type)
465 {
466 	/*
467 	 * A trap can occur while DTrace executes a probe. Before
468 	 * executing the probe, DTrace blocks re-scheduling and sets
469 	 * a flag in it's per-cpu flags to indicate that it doesn't
470 	 * want to fault. On returning from the probe, the no-fault
471 	 * flag is cleared and finally re-scheduling is enabled.
472 	 *
473 	 * Check if DTrace has enabled 'no-fault' mode:
474 	 *
475 	 */
476 	if ((cpu_core[curcpu].cpuc_dtrace_flags & CPU_DTRACE_NOFAULT) != 0) {
477 		/*
478 		 * There are only a couple of trap types that are expected.
479 		 * All the rest will be handled in the usual way.
480 		 */
481 		switch (type) {
482 		/* General protection fault. */
483 		case T_PROTFLT:
484 			/* Flag an illegal operation. */
485 			cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
486 
487 			/*
488 			 * Offset the instruction pointer to the instruction
489 			 * following the one causing the fault.
490 			 */
491 			frame->tf_eip += dtrace_instr_size((u_char *) frame->tf_eip);
492 			return (1);
493 		/* Page fault. */
494 		case T_PAGEFLT:
495 			/* Flag a bad address. */
496 			cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
497 			cpu_core[curcpu].cpuc_dtrace_illval = rcr2();
498 
499 			/*
500 			 * Offset the instruction pointer to the instruction
501 			 * following the one causing the fault.
502 			 */
503 			frame->tf_eip += dtrace_instr_size((u_char *) frame->tf_eip);
504 			return (1);
505 		default:
506 			/* Handle all other traps in the usual way. */
507 			break;
508 		}
509 	}
510 
511 	/* Handle the trap in the usual way. */
512 	return (0);
513 }
514