xref: /freebsd/sys/cddl/dev/dtrace/amd64/dtrace_subr.c (revision b3aaa0cc21c63d388230c7ef2a80abd631ff20d5)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  *
22  * $FreeBSD$
23  *
24  */
25 /*
26  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
27  * Use is subject to license terms.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/types.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/kmem.h>
36 #include <sys/smp.h>
37 #include <sys/dtrace_impl.h>
38 #include <sys/dtrace_bsd.h>
39 #include <machine/clock.h>
40 #include <machine/frame.h>
41 #include <vm/pmap.h>
42 
43 extern uintptr_t 	kernelbase;
44 extern uintptr_t 	dtrace_in_probe_addr;
45 extern int		dtrace_in_probe;
46 
47 int dtrace_invop(uintptr_t, uintptr_t *, uintptr_t);
48 
49 typedef struct dtrace_invop_hdlr {
50 	int (*dtih_func)(uintptr_t, uintptr_t *, uintptr_t);
51 	struct dtrace_invop_hdlr *dtih_next;
52 } dtrace_invop_hdlr_t;
53 
54 dtrace_invop_hdlr_t *dtrace_invop_hdlr;
55 
56 int
57 dtrace_invop(uintptr_t addr, uintptr_t *stack, uintptr_t eax)
58 {
59 	dtrace_invop_hdlr_t *hdlr;
60 	int rval;
61 
62 	for (hdlr = dtrace_invop_hdlr; hdlr != NULL; hdlr = hdlr->dtih_next)
63 		if ((rval = hdlr->dtih_func(addr, stack, eax)) != 0)
64 			return (rval);
65 
66 	return (0);
67 }
68 
69 void
70 dtrace_invop_add(int (*func)(uintptr_t, uintptr_t *, uintptr_t))
71 {
72 	dtrace_invop_hdlr_t *hdlr;
73 
74 	hdlr = kmem_alloc(sizeof (dtrace_invop_hdlr_t), KM_SLEEP);
75 	hdlr->dtih_func = func;
76 	hdlr->dtih_next = dtrace_invop_hdlr;
77 	dtrace_invop_hdlr = hdlr;
78 }
79 
80 void
81 dtrace_invop_remove(int (*func)(uintptr_t, uintptr_t *, uintptr_t))
82 {
83 	dtrace_invop_hdlr_t *hdlr = dtrace_invop_hdlr, *prev = NULL;
84 
85 	for (;;) {
86 		if (hdlr == NULL)
87 			panic("attempt to remove non-existent invop handler");
88 
89 		if (hdlr->dtih_func == func)
90 			break;
91 
92 		prev = hdlr;
93 		hdlr = hdlr->dtih_next;
94 	}
95 
96 	if (prev == NULL) {
97 		ASSERT(dtrace_invop_hdlr == hdlr);
98 		dtrace_invop_hdlr = hdlr->dtih_next;
99 	} else {
100 		ASSERT(dtrace_invop_hdlr != hdlr);
101 		prev->dtih_next = hdlr->dtih_next;
102 	}
103 
104 	kmem_free(hdlr, 0);
105 }
106 
107 /*ARGSUSED*/
108 void
109 dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
110 {
111 	(*func)(0, (uintptr_t) addr_PTmap);
112 }
113 
114 void
115 dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
116 {
117 	cpumask_t cpus;
118 
119 	critical_enter();
120 
121 	if (cpu == DTRACE_CPUALL)
122 		cpus = all_cpus;
123 	else
124 		cpus = (cpumask_t) (1 << cpu);
125 
126 	/* If the current CPU is in the set, call the function directly: */
127 	if ((cpus & (1 << curcpu)) != 0) {
128 		(*func)(arg);
129 
130 		/* Mask the current CPU from the set */
131 		cpus &= ~(1 << curcpu);
132 	}
133 
134 	/* If there are any CPUs in the set, cross-call to those CPUs */
135 	if (cpus != 0)
136 		smp_rendezvous_cpus(cpus, NULL, func, smp_no_rendevous_barrier, arg);
137 
138 	critical_exit();
139 }
140 
141 static void
142 dtrace_sync_func(void)
143 {
144 }
145 
146 void
147 dtrace_sync(void)
148 {
149         dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
150 }
151 
152 #ifdef notyet
153 int (*dtrace_fasttrap_probe_ptr)(struct regs *);
154 int (*dtrace_pid_probe_ptr)(struct regs *);
155 int (*dtrace_return_probe_ptr)(struct regs *);
156 
157 void
158 dtrace_user_probe(struct regs *rp, caddr_t addr, processorid_t cpuid)
159 {
160 	krwlock_t *rwp;
161 	proc_t *p = curproc;
162 	extern void trap(struct regs *, caddr_t, processorid_t);
163 
164 	if (USERMODE(rp->r_cs) || (rp->r_ps & PS_VM)) {
165 		if (curthread->t_cred != p->p_cred) {
166 			cred_t *oldcred = curthread->t_cred;
167 			/*
168 			 * DTrace accesses t_cred in probe context.  t_cred
169 			 * must always be either NULL, or point to a valid,
170 			 * allocated cred structure.
171 			 */
172 			curthread->t_cred = crgetcred();
173 			crfree(oldcred);
174 		}
175 	}
176 
177 	if (rp->r_trapno == T_DTRACE_RET) {
178 		uint8_t step = curthread->t_dtrace_step;
179 		uint8_t ret = curthread->t_dtrace_ret;
180 		uintptr_t npc = curthread->t_dtrace_npc;
181 
182 		if (curthread->t_dtrace_ast) {
183 			aston(curthread);
184 			curthread->t_sig_check = 1;
185 		}
186 
187 		/*
188 		 * Clear all user tracing flags.
189 		 */
190 		curthread->t_dtrace_ft = 0;
191 
192 		/*
193 		 * If we weren't expecting to take a return probe trap, kill
194 		 * the process as though it had just executed an unassigned
195 		 * trap instruction.
196 		 */
197 		if (step == 0) {
198 			tsignal(curthread, SIGILL);
199 			return;
200 		}
201 
202 		/*
203 		 * If we hit this trap unrelated to a return probe, we're
204 		 * just here to reset the AST flag since we deferred a signal
205 		 * until after we logically single-stepped the instruction we
206 		 * copied out.
207 		 */
208 		if (ret == 0) {
209 			rp->r_pc = npc;
210 			return;
211 		}
212 
213 		/*
214 		 * We need to wait until after we've called the
215 		 * dtrace_return_probe_ptr function pointer to set %pc.
216 		 */
217 		rwp = &CPU->cpu_ft_lock;
218 		rw_enter(rwp, RW_READER);
219 		if (dtrace_return_probe_ptr != NULL)
220 			(void) (*dtrace_return_probe_ptr)(rp);
221 		rw_exit(rwp);
222 		rp->r_pc = npc;
223 
224 	} else if (rp->r_trapno == T_DTRACE_PROBE) {
225 		rwp = &CPU->cpu_ft_lock;
226 		rw_enter(rwp, RW_READER);
227 		if (dtrace_fasttrap_probe_ptr != NULL)
228 			(void) (*dtrace_fasttrap_probe_ptr)(rp);
229 		rw_exit(rwp);
230 
231 	} else if (rp->r_trapno == T_BPTFLT) {
232 		uint8_t instr;
233 		rwp = &CPU->cpu_ft_lock;
234 
235 		/*
236 		 * The DTrace fasttrap provider uses the breakpoint trap
237 		 * (int 3). We let DTrace take the first crack at handling
238 		 * this trap; if it's not a probe that DTrace knowns about,
239 		 * we call into the trap() routine to handle it like a
240 		 * breakpoint placed by a conventional debugger.
241 		 */
242 		rw_enter(rwp, RW_READER);
243 		if (dtrace_pid_probe_ptr != NULL &&
244 		    (*dtrace_pid_probe_ptr)(rp) == 0) {
245 			rw_exit(rwp);
246 			return;
247 		}
248 		rw_exit(rwp);
249 
250 		/*
251 		 * If the instruction that caused the breakpoint trap doesn't
252 		 * look like an int 3 anymore, it may be that this tracepoint
253 		 * was removed just after the user thread executed it. In
254 		 * that case, return to user land to retry the instuction.
255 		 */
256 		if (fuword8((void *)(rp->r_pc - 1), &instr) == 0 &&
257 		    instr != FASTTRAP_INSTR) {
258 			rp->r_pc--;
259 			return;
260 		}
261 
262 		trap(rp, addr, cpuid);
263 
264 	} else {
265 		trap(rp, addr, cpuid);
266 	}
267 }
268 
269 void
270 dtrace_safe_synchronous_signal(void)
271 {
272 	kthread_t *t = curthread;
273 	struct regs *rp = lwptoregs(ttolwp(t));
274 	size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
275 
276 	ASSERT(t->t_dtrace_on);
277 
278 	/*
279 	 * If we're not in the range of scratch addresses, we're not actually
280 	 * tracing user instructions so turn off the flags. If the instruction
281 	 * we copied out caused a synchonous trap, reset the pc back to its
282 	 * original value and turn off the flags.
283 	 */
284 	if (rp->r_pc < t->t_dtrace_scrpc ||
285 	    rp->r_pc > t->t_dtrace_astpc + isz) {
286 		t->t_dtrace_ft = 0;
287 	} else if (rp->r_pc == t->t_dtrace_scrpc ||
288 	    rp->r_pc == t->t_dtrace_astpc) {
289 		rp->r_pc = t->t_dtrace_pc;
290 		t->t_dtrace_ft = 0;
291 	}
292 }
293 
294 int
295 dtrace_safe_defer_signal(void)
296 {
297 	kthread_t *t = curthread;
298 	struct regs *rp = lwptoregs(ttolwp(t));
299 	size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
300 
301 	ASSERT(t->t_dtrace_on);
302 
303 	/*
304 	 * If we're not in the range of scratch addresses, we're not actually
305 	 * tracing user instructions so turn off the flags.
306 	 */
307 	if (rp->r_pc < t->t_dtrace_scrpc ||
308 	    rp->r_pc > t->t_dtrace_astpc + isz) {
309 		t->t_dtrace_ft = 0;
310 		return (0);
311 	}
312 
313 	/*
314 	 * If we've executed the original instruction, but haven't performed
315 	 * the jmp back to t->t_dtrace_npc or the clean up of any registers
316 	 * used to emulate %rip-relative instructions in 64-bit mode, do that
317 	 * here and take the signal right away. We detect this condition by
318 	 * seeing if the program counter is the range [scrpc + isz, astpc).
319 	 */
320 	if (t->t_dtrace_astpc - rp->r_pc <
321 	    t->t_dtrace_astpc - t->t_dtrace_scrpc - isz) {
322 #ifdef __amd64
323 		/*
324 		 * If there is a scratch register and we're on the
325 		 * instruction immediately after the modified instruction,
326 		 * restore the value of that scratch register.
327 		 */
328 		if (t->t_dtrace_reg != 0 &&
329 		    rp->r_pc == t->t_dtrace_scrpc + isz) {
330 			switch (t->t_dtrace_reg) {
331 			case REG_RAX:
332 				rp->r_rax = t->t_dtrace_regv;
333 				break;
334 			case REG_RCX:
335 				rp->r_rcx = t->t_dtrace_regv;
336 				break;
337 			case REG_R8:
338 				rp->r_r8 = t->t_dtrace_regv;
339 				break;
340 			case REG_R9:
341 				rp->r_r9 = t->t_dtrace_regv;
342 				break;
343 			}
344 		}
345 #endif
346 		rp->r_pc = t->t_dtrace_npc;
347 		t->t_dtrace_ft = 0;
348 		return (0);
349 	}
350 
351 	/*
352 	 * Otherwise, make sure we'll return to the kernel after executing
353 	 * the copied out instruction and defer the signal.
354 	 */
355 	if (!t->t_dtrace_step) {
356 		ASSERT(rp->r_pc < t->t_dtrace_astpc);
357 		rp->r_pc += t->t_dtrace_astpc - t->t_dtrace_scrpc;
358 		t->t_dtrace_step = 1;
359 	}
360 
361 	t->t_dtrace_ast = 1;
362 
363 	return (1);
364 }
365 #endif
366 
367 static int64_t	tgt_cpu_tsc;
368 static int64_t	hst_cpu_tsc;
369 static int64_t	tsc_skew[MAXCPU];
370 
371 static void
372 dtrace_gethrtime_init_sync(void *arg)
373 {
374 #ifdef CHECK_SYNC
375 	/*
376 	 * Delay this function from returning on one
377 	 * of the CPUs to check that the synchronisation
378 	 * works.
379 	 */
380 	uintptr_t cpu = (uintptr_t) arg;
381 
382 	if (cpu == curcpu) {
383 		int i;
384 		for (i = 0; i < 1000000000; i++)
385 			tgt_cpu_tsc = rdtsc();
386 		tgt_cpu_tsc = 0;
387 	}
388 #endif
389 }
390 
391 static void
392 dtrace_gethrtime_init_cpu(void *arg)
393 {
394 	uintptr_t cpu = (uintptr_t) arg;
395 
396 	if (cpu == curcpu)
397 		tgt_cpu_tsc = rdtsc();
398 	else
399 		hst_cpu_tsc = rdtsc();
400 }
401 
402 static void
403 dtrace_gethrtime_init(void *arg)
404 {
405 	cpumask_t map;
406 	int i;
407 	struct pcpu *cp;
408 
409 	/* The current CPU is the reference one. */
410 	tsc_skew[curcpu] = 0;
411 
412 	for (i = 0; i <= mp_maxid; i++) {
413 		if (i == curcpu)
414 			continue;
415 
416 		if ((cp = pcpu_find(i)) == NULL)
417 			continue;
418 
419 		map = 0;
420 		map |= (1 << curcpu);
421 		map |= (1 << i);
422 
423 		smp_rendezvous_cpus(map, dtrace_gethrtime_init_sync,
424 		    dtrace_gethrtime_init_cpu,
425 		    smp_no_rendevous_barrier, (void *)(uintptr_t) i);
426 
427 		tsc_skew[i] = tgt_cpu_tsc - hst_cpu_tsc;
428 	}
429 }
430 
431 SYSINIT(dtrace_gethrtime_init, SI_SUB_SMP, SI_ORDER_ANY, dtrace_gethrtime_init, NULL);
432 
433 /*
434  * DTrace needs a high resolution time function which can
435  * be called from a probe context and guaranteed not to have
436  * instrumented with probes itself.
437  *
438  * Returns nanoseconds since boot.
439  */
440 uint64_t
441 dtrace_gethrtime()
442 {
443 	return ((rdtsc() + tsc_skew[curcpu]) * (int64_t) 1000000000 / tsc_freq);
444 }
445 
446 uint64_t
447 dtrace_gethrestime(void)
448 {
449 	printf("%s(%d): XXX\n",__func__,__LINE__);
450 	return (0);
451 }
452 
453 /* Function to handle DTrace traps during probes. See amd64/amd64/trap.c */
454 int
455 dtrace_trap(struct trapframe *frame, u_int type)
456 {
457 	/*
458 	 * A trap can occur while DTrace executes a probe. Before
459 	 * executing the probe, DTrace blocks re-scheduling and sets
460 	 * a flag in it's per-cpu flags to indicate that it doesn't
461 	 * want to fault. On returning from the the probe, the no-fault
462 	 * flag is cleared and finally re-scheduling is enabled.
463 	 *
464 	 * Check if DTrace has enabled 'no-fault' mode:
465 	 *
466 	 */
467 	if ((cpu_core[curcpu].cpuc_dtrace_flags & CPU_DTRACE_NOFAULT) != 0) {
468 		/*
469 		 * There are only a couple of trap types that are expected.
470 		 * All the rest will be handled in the usual way.
471 		 */
472 		switch (type) {
473 		/* Privilieged instruction fault. */
474 		case T_PRIVINFLT:
475 			break;
476 		/* General protection fault. */
477 		case T_PROTFLT:
478 			/* Flag an illegal operation. */
479 			cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
480 
481 			/*
482 			 * Offset the instruction pointer to the instruction
483 			 * following the one causing the fault.
484 			 */
485 			frame->tf_rip += dtrace_instr_size((u_char *) frame->tf_rip);
486 			return (1);
487 		/* Page fault. */
488 		case T_PAGEFLT:
489 			/* Flag a bad address. */
490 			cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
491 			cpu_core[curcpu].cpuc_dtrace_illval = frame->tf_addr;
492 
493 			/*
494 			 * Offset the instruction pointer to the instruction
495 			 * following the one causing the fault.
496 			 */
497 			frame->tf_rip += dtrace_instr_size((u_char *) frame->tf_rip);
498 			return (1);
499 		default:
500 			/* Handle all other traps in the usual way. */
501 			break;
502 		}
503 	}
504 
505 	/* Handle the trap in the usual way. */
506 	return (0);
507 }
508