xref: /titanic_52/usr/src/uts/sparc/dtrace/fasttrap_isa.c (revision 84ab085a13f931bc78e7415e7ce921dbaa14fcb3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/fasttrap_isa.h>
30 #include <sys/fasttrap_impl.h>
31 #include <sys/dtrace.h>
32 #include <sys/dtrace_impl.h>
33 #include <sys/cmn_err.h>
34 #include <sys/frame.h>
35 #include <sys/stack.h>
36 #include <sys/sysmacros.h>
37 #include <sys/trap.h>
38 
39 #include <v9/sys/machpcb.h>
40 #include <v9/sys/privregs.h>
41 
42 /*
43  * Lossless User-Land Tracing on SPARC
44  * -----------------------------------
45  *
46  * The Basic Idea
47  *
48  * The most important design constraint is, of course, correct execution of
49  * the user thread above all else. The next most important goal is rapid
50  * execution. We combine execution of instructions in user-land with
51  * emulation of certain instructions in the kernel to aim for complete
52  * correctness and maximal performance.
53  *
54  * We take advantage of the split PC/NPC architecture to speed up logical
55  * single-stepping; when we copy an instruction out to the scratch space in
56  * the ulwp_t structure (held in the %g7 register on SPARC), we can
57  * effectively single step by setting the PC to our scratch space and leaving
58  * the NPC alone. This executes the replaced instruction and then continues
59  * on without having to reenter the kernel as with single- stepping. The
60  * obvious caveat is for instructions whose execution is PC dependant --
61  * branches, call and link instructions (call and jmpl), and the rdpc
62  * instruction. These instructions cannot be executed in the manner described
63  * so they must be emulated in the kernel.
64  *
65  * Emulation for this small set of instructions if fairly simple; the most
66  * difficult part being emulating branch conditions.
67  *
68  *
69  * A Cache Heavy Portfolio
70  *
71  * It's important to note at this time that copying an instruction out to the
72  * ulwp_t scratch space in user-land is rather complicated. SPARC has
73  * separate data and instruction caches so any writes to the D$ (using a
74  * store instruction for example) aren't necessarily reflected in the I$.
75  * The flush instruction can be used to synchronize the two and must be used
76  * for any self-modifying code, but the flush instruction only applies to the
77  * primary address space (the absence of a flusha analogue to the flush
78  * instruction that accepts an ASI argument is an obvious omission from SPARC
79  * v9 where the notion of the alternate address space was introduced on
80  * SPARC). To correctly copy out the instruction we must use a block store
81  * that doesn't allocate in the D$ and ensures synchronization with the I$;
82  * see dtrace_blksuword32() for the implementation  (this function uses
83  * ASI_BLK_COMMIT_S to write a block through the secondary ASI in the manner
84  * described). Refer to the UltraSPARC I/II manual for details on the
85  * ASI_BLK_COMMIT_S ASI.
86  *
87  *
88  * Return Subtleties
89  *
90  * When we're firing a return probe we need to expose the value returned by
91  * the function being traced. Since the function can set the return value
92  * in its last instruction, we need to fire the return probe only _after_
93  * the effects of the instruction are apparent. For instructions that we
94  * emulate, we can call dtrace_probe() after we've performed the emulation;
95  * for instructions that we execute after we return to user-land, we set
96  * %pc to the instruction we copied out (as described above) and set %npc
97  * to a trap instruction stashed in the ulwp_t structure. After the traced
98  * instruction is executed, the trap instruction returns control to the
99  * kernel where we can fire the return probe.
100  *
101  * This need for a second trap in cases where we execute the traced
102  * instruction makes it all the more important to emulate the most common
103  * instructions to avoid the second trip in and out of the kernel.
104  *
105  *
106  * Making it Fast
107  *
108  * Since copying out an instruction is neither simple nor inexpensive for the
109  * CPU, we should attempt to avoid doing it in as many cases as possible.
110  * Since function entry and return are usually the most interesting probe
111  * sites, we attempt to tune the performance of the fasttrap provider around
112  * instructions typically in those places.
113  *
114  * Looking at a bunch of functions in libraries and executables reveals that
115  * most functions begin with either a save or a sethi (to setup a larger
116  * argument to the save) and end with a restore or an or (in the case of leaf
117  * functions). To try to improve performance, we emulate all of these
118  * instructions in the kernel.
119  *
120  * The save and restore instructions are a little tricky since they perform
121  * register window maniplulation. Rather than trying to tinker with the
122  * register windows from the kernel, we emulate the implicit add that takes
123  * place as part of those instructions and set the %pc to point to a simple
124  * save or restore we've hidden in the ulwp_t structure. If we're in a return
125  * probe so want to make it seem as though the tracepoint has been completely
126  * executed we need to remember that we've pulled this trick with restore and
127  * pull registers from the previous window (the one that we'll switch to once
128  * the simple store instruction is executed) rather than the current one. This
129  * is why in the case of emulating a restore we set the DTrace CPU flag
130  * CPU_DTRACE_FAKERESTORE before calling dtrace_probe() for the return probes
131  * (see fasttrap_return_common()).
132  */
133 
134 #define	OP(x)		((x) >> 30)
135 #define	OP2(x)		(((x) >> 22) & 0x07)
136 #define	OP3(x)		(((x) >> 19) & 0x3f)
137 #define	RCOND(x)	(((x) >> 25) & 0x07)
138 #define	COND(x)		(((x) >> 25) & 0x0f)
139 #define	A(x)		(((x) >> 29) & 0x01)
140 #define	I(x)		(((x) >> 13) & 0x01)
141 #define	RD(x)		(((x) >> 25) & 0x1f)
142 #define	RS1(x)		(((x) >> 14) & 0x1f)
143 #define	RS2(x)		(((x) >> 0) & 0x1f)
144 #define	CC(x)		(((x) >> 20) & 0x03)
145 #define	DISP16(x)	((((x) >> 6) & 0xc000) | ((x) & 0x3fff))
146 #define	DISP22(x)	((x) & 0x3fffff)
147 #define	DISP19(x)	((x) & 0x7ffff)
148 #define	DISP30(x)	((x) & 0x3fffffff)
149 #define	SW_TRAP(x)	((x) & 0x7f)
150 
151 #define	OP3_OR		0x02
152 #define	OP3_RD		0x28
153 #define	OP3_JMPL	0x38
154 #define	OP3_RETURN	0x39
155 #define	OP3_TCC		0x3a
156 #define	OP3_SAVE	0x3c
157 #define	OP3_RESTORE	0x3d
158 
159 #define	OP3_PREFETCH	0x2d
160 #define	OP3_CASA	0x3c
161 #define	OP3_PREFETCHA	0x3d
162 #define	OP3_CASXA	0x3e
163 
164 #define	OP2_ILLTRAP	0x0
165 #define	OP2_BPcc	0x1
166 #define	OP2_Bicc	0x2
167 #define	OP2_BPr		0x3
168 #define	OP2_SETHI	0x4
169 #define	OP2_FBPfcc	0x5
170 #define	OP2_FBfcc	0x6
171 
172 #define	R_G0		0
173 #define	R_O0		8
174 #define	R_SP		14
175 #define	R_I0		24
176 #define	R_I1		25
177 #define	R_I2		26
178 #define	R_I3		27
179 
180 /*
181  * Check the comment in fasttrap.h when changing these offsets or adding
182  * new instructions.
183  */
184 #define	FASTTRAP_OFF_SAVE	64
185 #define	FASTTRAP_OFF_RESTORE	68
186 #define	FASTTRAP_OFF_FTRET	72
187 #define	FASTTRAP_OFF_RETURN	76
188 
189 #define	BREAKPOINT_INSTR	0x91d02001	/* ta 1 */
190 
191 /*
192  * Tunable to let users turn off the fancy save instruction optimization.
193  * If a program is non-ABI compliant, there's a possibility that the save
194  * instruction optimization could cause an error.
195  */
196 int fasttrap_optimize_save = 1;
197 
198 static uint64_t
199 fasttrap_anarg(struct regs *rp, int argno)
200 {
201 	uint64_t value;
202 
203 	if (argno < 6)
204 		return ((&rp->r_o0)[argno]);
205 
206 	if (curproc->p_model == DATAMODEL_NATIVE) {
207 		struct frame *fr = (struct frame *)(rp->r_sp + STACK_BIAS);
208 
209 		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
210 		value = dtrace_fulword(&fr->fr_argd[argno]);
211 		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | CPU_DTRACE_BADADDR |
212 		    CPU_DTRACE_BADALIGN);
213 	} else {
214 		struct frame32 *fr = (struct frame32 *)rp->r_sp;
215 
216 		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
217 		value = dtrace_fuword32(&fr->fr_argd[argno]);
218 		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | CPU_DTRACE_BADADDR |
219 		    CPU_DTRACE_BADALIGN);
220 	}
221 
222 	return (value);
223 }
224 
225 static ulong_t fasttrap_getreg(struct regs *, uint_t);
226 static void fasttrap_putreg(struct regs *, uint_t, ulong_t);
227 
228 int
229 fasttrap_probe(struct regs *rp)
230 {
231 	dtrace_probe(fasttrap_probe_id,
232 	    rp->r_o0, rp->r_o1, rp->r_o2, rp->r_o3, rp->r_o4);
233 
234 	rp->r_pc = rp->r_npc;
235 	rp->r_npc = rp->r_pc + 4;
236 
237 	return (0);
238 }
239 
240 static void
241 fasttrap_usdt_args(fasttrap_probe_t *probe, struct regs *rp, int argc,
242     uintptr_t *argv)
243 {
244 	int i, x, cap = MIN(argc, probe->ftp_nargs);
245 
246 	if (curproc->p_model == DATAMODEL_NATIVE) {
247 		struct frame *fr = (struct frame *)(rp->r_sp + STACK_BIAS);
248 		uintptr_t v;
249 
250 		for (i = 0; i < cap; i++) {
251 			x = probe->ftp_argmap[i];
252 
253 			if (x < 6)
254 				argv[i] = (&rp->r_o0)[x];
255 			else if (fasttrap_fulword(&fr->fr_argd[x], &v) != 0)
256 				argv[i] = 0;
257 		}
258 
259 	} else {
260 		struct frame32 *fr = (struct frame32 *)rp->r_sp;
261 		uint32_t v;
262 
263 		for (i = 0; i < cap; i++) {
264 			x = probe->ftp_argmap[i];
265 
266 			if (x < 6)
267 				argv[i] = (&rp->r_o0)[x];
268 			else if (fasttrap_fuword32(&fr->fr_argd[x], &v) != 0)
269 				argv[i] = 0;
270 		}
271 	}
272 
273 	for (; i < argc; i++) {
274 		argv[i] = 0;
275 	}
276 }
277 
278 static void
279 fasttrap_return_common(struct regs *rp, uintptr_t pc, pid_t pid,
280     uint_t fake_restore)
281 {
282 	fasttrap_tracepoint_t *tp;
283 	fasttrap_bucket_t *bucket;
284 	fasttrap_id_t *id;
285 	kmutex_t *pid_mtx;
286 	dtrace_icookie_t cookie;
287 
288 	pid_mtx = &cpu_core[CPU->cpu_id].cpuc_pid_lock;
289 	mutex_enter(pid_mtx);
290 	bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
291 
292 	for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
293 		if (pid == tp->ftt_pid && pc == tp->ftt_pc &&
294 		    !tp->ftt_prov->ftp_defunct)
295 			break;
296 	}
297 
298 	/*
299 	 * Don't sweat it if we can't find the tracepoint again; unlike
300 	 * when we're in fasttrap_pid_probe(), finding the tracepoint here
301 	 * is not essential to the correct execution of the process.
302 	 */
303 	if (tp == NULL || tp->ftt_retids == NULL) {
304 		mutex_exit(pid_mtx);
305 		return;
306 	}
307 
308 	for (id = tp->ftt_retids; id != NULL; id = id->fti_next) {
309 		fasttrap_probe_t *probe = id->fti_probe;
310 
311 		if (probe->ftp_type == DTFTP_POST_OFFSETS) {
312 			if (probe->ftp_argmap == NULL) {
313 				dtrace_probe(probe->ftp_id, rp->r_o0, rp->r_o1,
314 				    rp->r_o2, rp->r_o3, rp->r_o4);
315 			} else {
316 				uintptr_t t[5];
317 
318 				fasttrap_usdt_args(probe, rp,
319 				    sizeof (t) / sizeof (t[0]), t);
320 
321 				dtrace_probe(probe->ftp_id, t[0], t[1],
322 				    t[2], t[3], t[4]);
323 			}
324 			continue;
325 		}
326 
327 		/*
328 		 * If this is only a possible return point, we must
329 		 * be looking at a potential tail call in leaf context.
330 		 * If the %npc is still within this function, then we
331 		 * must have misidentified a jmpl as a tail-call when it
332 		 * is, in fact, part of a jump table. It would be nice to
333 		 * remove this tracepoint, but this is neither the time
334 		 * nor the place.
335 		 */
336 		if ((tp->ftt_flags & FASTTRAP_F_RETMAYBE) &&
337 		    rp->r_npc - probe->ftp_faddr < probe->ftp_fsize)
338 			continue;
339 
340 		/*
341 		 * It's possible for a function to branch to the delay slot
342 		 * of an instruction that we've identified as a return site.
343 		 * We can dectect this spurious return probe activation by
344 		 * observing that in this case %npc will be %pc + 4 and %npc
345 		 * will be inside the current function (unless the user is
346 		 * doing _crazy_ instruction picking in which case there's
347 		 * very little we can do). The second check is important
348 		 * in case the last instructions of a function make a tail-
349 		 * call to the function located immediately subsequent.
350 		 */
351 		if (rp->r_npc == rp->r_pc + 4 &&
352 		    rp->r_npc - probe->ftp_faddr < probe->ftp_fsize)
353 			continue;
354 
355 		/*
356 		 * The first argument is the offset of return tracepoint
357 		 * in the function; the remaining arguments are the return
358 		 * values.
359 		 *
360 		 * If fake_restore is set, we need to pull the return values
361 		 * out of the %i's rather than the %o's -- a little trickier.
362 		 */
363 		if (!fake_restore) {
364 			dtrace_probe(probe->ftp_id, pc - probe->ftp_faddr,
365 			    rp->r_o0, rp->r_o1, rp->r_o2, rp->r_o3);
366 		} else {
367 			uintptr_t arg0 = fasttrap_getreg(rp, R_I0);
368 			uintptr_t arg1 = fasttrap_getreg(rp, R_I1);
369 			uintptr_t arg2 = fasttrap_getreg(rp, R_I2);
370 			uintptr_t arg3 = fasttrap_getreg(rp, R_I3);
371 
372 			cookie = dtrace_interrupt_disable();
373 			DTRACE_CPUFLAG_SET(CPU_DTRACE_FAKERESTORE);
374 			dtrace_probe(probe->ftp_id, pc - probe->ftp_faddr,
375 			    arg0, arg1, arg2, arg3);
376 			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_FAKERESTORE);
377 			dtrace_interrupt_enable(cookie);
378 		}
379 	}
380 
381 	mutex_exit(pid_mtx);
382 }
383 
384 int
385 fasttrap_pid_probe(struct regs *rp)
386 {
387 	proc_t *p = curproc;
388 	fasttrap_tracepoint_t *tp, tp_local;
389 	fasttrap_id_t *id;
390 	pid_t pid;
391 	uintptr_t pc = rp->r_pc;
392 	uintptr_t npc = rp->r_npc;
393 	uintptr_t orig_pc = pc;
394 	fasttrap_bucket_t *bucket;
395 	kmutex_t *pid_mtx;
396 	uint_t fake_restore = 0;
397 	dtrace_icookie_t cookie;
398 
399 	/*
400 	 * It's possible that a user (in a veritable orgy of bad planning)
401 	 * could redirect this thread's flow of control before it reached the
402 	 * return probe fasttrap. In this case we need to kill the process
403 	 * since it's in a unrecoverable state.
404 	 */
405 	if (curthread->t_dtrace_step) {
406 		ASSERT(curthread->t_dtrace_on);
407 		fasttrap_sigtrap(p, curthread, pc);
408 		return (0);
409 	}
410 
411 	/*
412 	 * Clear all user tracing flags.
413 	 */
414 	curthread->t_dtrace_ft = 0;
415 	curthread->t_dtrace_pc = 0;
416 	curthread->t_dtrace_npc = 0;
417 	curthread->t_dtrace_scrpc = 0;
418 	curthread->t_dtrace_astpc = 0;
419 
420 	/*
421 	 * Treat a child created by a call to vfork(2) as if it were its
422 	 * parent. We know that there's only one thread of control in such a
423 	 * process: this one.
424 	 */
425 	while (p->p_flag & SVFORK) {
426 		p = p->p_parent;
427 	}
428 
429 	pid = p->p_pid;
430 	pid_mtx = &cpu_core[CPU->cpu_id].cpuc_pid_lock;
431 	mutex_enter(pid_mtx);
432 	bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
433 
434 	/*
435 	 * Lookup the tracepoint that the process just hit.
436 	 */
437 	for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
438 		if (pid == tp->ftt_pid && pc == tp->ftt_pc &&
439 		    !tp->ftt_prov->ftp_defunct)
440 			break;
441 	}
442 
443 	/*
444 	 * If we couldn't find a matching tracepoint, either a tracepoint has
445 	 * been inserted without using the pid<pid> ioctl interface (see
446 	 * fasttrap_ioctl), or somehow we have mislaid this tracepoint.
447 	 */
448 	if (tp == NULL) {
449 		mutex_exit(pid_mtx);
450 		return (-1);
451 	}
452 
453 	for (id = tp->ftt_ids; id != NULL; id = id->fti_next) {
454 		fasttrap_probe_t *probe = id->fti_probe;
455 		int isentry;
456 		/*
457 		 * We note that this was an entry probe to help ustack() find
458 		 * the first caller.
459 		 */
460 		if ((isentry = (probe->ftp_type == DTFTP_ENTRY)) != 0) {
461 			cookie = dtrace_interrupt_disable();
462 			DTRACE_CPUFLAG_SET(CPU_DTRACE_ENTRY);
463 		}
464 		dtrace_probe(probe->ftp_id, rp->r_o0, rp->r_o1, rp->r_o2,
465 		    rp->r_o3, rp->r_o4);
466 		if (isentry) {
467 			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_ENTRY);
468 			dtrace_interrupt_enable(cookie);
469 		}
470 	}
471 
472 	/*
473 	 * We're about to do a bunch of work so we cache a local copy of
474 	 * the tracepoint to emulate the instruction, and then find the
475 	 * tracepoint again later if we need to light up any return probes.
476 	 */
477 	tp_local = *tp;
478 	mutex_exit(pid_mtx);
479 	tp = &tp_local;
480 
481 	/*
482 	 * We emulate certain types of instructions do ensure correctness
483 	 * (in the case of position dependent instructions) or optimize
484 	 * common cases. The rest we have the thread execute back in user-
485 	 * land.
486 	 */
487 	switch (tp->ftt_type) {
488 	case FASTTRAP_T_SAVE:
489 	{
490 		int32_t imm;
491 
492 		/*
493 		 * This an optimization to let us handle function entry
494 		 * probes more efficiently. Many functions begin with a save
495 		 * instruction that follows the pattern:
496 		 *	save	%sp, <imm>, %sp
497 		 *
498 		 * Meanwhile, we've stashed the instruction:
499 		 *	save	%g1, %g0, %sp
500 		 *
501 		 * off of %g7, so all we have to do is stick the right value
502 		 * into %g1 and reset %pc to point to the instruction we've
503 		 * cleverly hidden (%npc should not be touched).
504 		 */
505 
506 		imm = tp->ftt_instr << 19;
507 		imm >>= 19;
508 		rp->r_g1 = rp->r_sp + imm;
509 		pc = rp->r_g7 + FASTTRAP_OFF_SAVE;
510 		break;
511 	}
512 
513 	case FASTTRAP_T_RESTORE:
514 	{
515 		ulong_t value;
516 		uint_t rd;
517 
518 		/*
519 		 * This is an optimization to let us handle function
520 		 * return probes more efficiently. Most non-leaf functions
521 		 * end with the sequence:
522 		 *	ret
523 		 *	restore	<reg>, <reg_or_imm>, %oX
524 		 *
525 		 * We've stashed the instruction:
526 		 *	restore	%g0, %g0, %g0
527 		 *
528 		 * off of %g7 so we just need to place the correct value
529 		 * in the right %i register (since after our fake-o
530 		 * restore, the %i's will become the %o's) and set the %pc
531 		 * to point to our hidden restore. We also set fake_restore to
532 		 * let fasttrap_return_common() know that it will find the
533 		 * return values in the %i's rather than the %o's.
534 		 */
535 
536 		if (I(tp->ftt_instr)) {
537 			int32_t imm;
538 
539 			imm = tp->ftt_instr << 19;
540 			imm >>= 19;
541 			value = fasttrap_getreg(rp, RS1(tp->ftt_instr)) + imm;
542 		} else {
543 			value = fasttrap_getreg(rp, RS1(tp->ftt_instr)) +
544 			    fasttrap_getreg(rp, RS2(tp->ftt_instr));
545 		}
546 
547 		/*
548 		 * Convert %o's to %i's; leave %g's as they are.
549 		 */
550 		rd = RD(tp->ftt_instr);
551 		fasttrap_putreg(rp, ((rd & 0x18) == 0x8) ? rd + 16 : rd, value);
552 
553 		pc = rp->r_g7 + FASTTRAP_OFF_RESTORE;
554 		fake_restore = 1;
555 		break;
556 	}
557 
558 	case FASTTRAP_T_RETURN:
559 	{
560 		uintptr_t target;
561 
562 		/*
563 		 * A return instruction is like a jmpl (without the link
564 		 * part) that executes an implicit restore. We've stashed
565 		 * the instruction:
566 		 *	return %o0
567 		 *
568 		 * off of %g7 so we just need to place the target in %o0
569 		 * and set the %pc to point to the stashed return instruction.
570 		 * We use %o0 since that register disappears after the return
571 		 * executes, erasing any evidence of this tampering.
572 		 */
573 		if (I(tp->ftt_instr)) {
574 			int32_t imm;
575 
576 			imm = tp->ftt_instr << 19;
577 			imm >>= 19;
578 			target = fasttrap_getreg(rp, RS1(tp->ftt_instr)) + imm;
579 		} else {
580 			target = fasttrap_getreg(rp, RS1(tp->ftt_instr)) +
581 			    fasttrap_getreg(rp, RS2(tp->ftt_instr));
582 		}
583 
584 		fasttrap_putreg(rp, R_O0, target);
585 
586 		pc = rp->r_g7 + FASTTRAP_OFF_RETURN;
587 		fake_restore = 1;
588 		break;
589 	}
590 
591 	case FASTTRAP_T_OR:
592 	{
593 		ulong_t value;
594 
595 		if (I(tp->ftt_instr)) {
596 			int32_t imm;
597 
598 			imm = tp->ftt_instr << 19;
599 			imm >>= 19;
600 			value = fasttrap_getreg(rp, RS1(tp->ftt_instr)) | imm;
601 		} else {
602 			value = fasttrap_getreg(rp, RS1(tp->ftt_instr)) |
603 			    fasttrap_getreg(rp, RS2(tp->ftt_instr));
604 		}
605 
606 		fasttrap_putreg(rp, RD(tp->ftt_instr), value);
607 		pc = rp->r_npc;
608 		npc = pc + 4;
609 		break;
610 	}
611 
612 	case FASTTRAP_T_SETHI:
613 		if (RD(tp->ftt_instr) != R_G0) {
614 			uint32_t imm32 = tp->ftt_instr << 10;
615 			fasttrap_putreg(rp, RD(tp->ftt_instr), (ulong_t)imm32);
616 		}
617 		pc = rp->r_npc;
618 		npc = pc + 4;
619 		break;
620 
621 	case FASTTRAP_T_CCR:
622 	{
623 		uint_t c, v, z, n, taken;
624 		uint_t ccr = rp->r_tstate >> TSTATE_CCR_SHIFT;
625 
626 		if (tp->ftt_cc != 0)
627 			ccr >>= 4;
628 
629 		c = (ccr >> 0) & 1;
630 		v = (ccr >> 1) & 1;
631 		z = (ccr >> 2) & 1;
632 		n = (ccr >> 3) & 1;
633 
634 		switch (tp->ftt_code) {
635 		case 0x0:	/* BN */
636 			taken = 0;		break;
637 		case 0x1:	/* BE */
638 			taken = z;		break;
639 		case 0x2:	/* BLE */
640 			taken = z | (n ^ v);	break;
641 		case 0x3:	/* BL */
642 			taken = n ^ v;		break;
643 		case 0x4:	/* BLEU */
644 			taken = c | z;		break;
645 		case 0x5:	/* BCS (BLU) */
646 			taken = c;		break;
647 		case 0x6:	/* BNEG */
648 			taken = n;		break;
649 		case 0x7:	/* BVS */
650 			taken = v;		break;
651 		case 0x8:	/* BA */
652 			/*
653 			 * We handle the BA case differently since the annul
654 			 * bit means something slightly different.
655 			 */
656 			panic("fasttrap: mishandled a branch");
657 			taken = 1;		break;
658 		case 0x9:	/* BNE */
659 			taken = ~z;		break;
660 		case 0xa:	/* BG */
661 			taken = ~(z | (n ^ v));	break;
662 		case 0xb:	/* BGE */
663 			taken = ~(n ^ v);	break;
664 		case 0xc:	/* BGU */
665 			taken = ~(c | z);	break;
666 		case 0xd:	/* BCC (BGEU) */
667 			taken = ~c;		break;
668 		case 0xe:	/* BPOS */
669 			taken = ~n;		break;
670 		case 0xf:	/* BVC */
671 			taken = ~v;		break;
672 		}
673 
674 		if (taken & 1) {
675 			pc = rp->r_npc;
676 			npc = tp->ftt_dest;
677 		} else if (tp->ftt_flags & FASTTRAP_F_ANNUL) {
678 			/*
679 			 * Untaken annulled branches don't execute the
680 			 * instruction in the delay slot.
681 			 */
682 			pc = rp->r_npc + 4;
683 			npc = pc + 4;
684 		} else {
685 			pc = rp->r_npc;
686 			npc = pc + 4;
687 		}
688 		break;
689 	}
690 
691 	case FASTTRAP_T_FCC:
692 	{
693 		uint_t fcc;
694 		uint_t taken;
695 		uint64_t fsr;
696 
697 		dtrace_getfsr(&fsr);
698 
699 		if (tp->ftt_cc == 0) {
700 			fcc = (fsr >> 10) & 0x3;
701 		} else {
702 			uint_t shift;
703 			ASSERT(tp->ftt_cc <= 3);
704 			shift = 30 + tp->ftt_cc * 2;
705 			fcc = (fsr >> shift) & 0x3;
706 		}
707 
708 		switch (tp->ftt_code) {
709 		case 0x0:	/* FBN */
710 			taken = (1 << fcc) & (0|0|0|0);	break;
711 		case 0x1:	/* FBNE */
712 			taken = (1 << fcc) & (8|4|2|0);	break;
713 		case 0x2:	/* FBLG */
714 			taken = (1 << fcc) & (0|4|2|0);	break;
715 		case 0x3:	/* FBUL */
716 			taken = (1 << fcc) & (8|0|2|0);	break;
717 		case 0x4:	/* FBL */
718 			taken = (1 << fcc) & (0|0|2|0);	break;
719 		case 0x5:	/* FBUG */
720 			taken = (1 << fcc) & (8|4|0|0);	break;
721 		case 0x6:	/* FBG */
722 			taken = (1 << fcc) & (0|4|0|0);	break;
723 		case 0x7:	/* FBU */
724 			taken = (1 << fcc) & (8|0|0|0);	break;
725 		case 0x8:	/* FBA */
726 			/*
727 			 * We handle the FBA case differently since the annul
728 			 * bit means something slightly different.
729 			 */
730 			panic("fasttrap: mishandled a branch");
731 			taken = (1 << fcc) & (8|4|2|1);	break;
732 		case 0x9:	/* FBE */
733 			taken = (1 << fcc) & (0|0|0|1);	break;
734 		case 0xa:	/* FBUE */
735 			taken = (1 << fcc) & (8|0|0|1);	break;
736 		case 0xb:	/* FBGE */
737 			taken = (1 << fcc) & (0|4|0|1);	break;
738 		case 0xc:	/* FBUGE */
739 			taken = (1 << fcc) & (8|4|0|1);	break;
740 		case 0xd:	/* FBLE */
741 			taken = (1 << fcc) & (0|0|2|1);	break;
742 		case 0xe:	/* FBULE */
743 			taken = (1 << fcc) & (8|0|2|1);	break;
744 		case 0xf:	/* FBO */
745 			taken = (1 << fcc) & (0|4|2|1);	break;
746 		}
747 
748 		if (taken) {
749 			pc = rp->r_npc;
750 			npc = tp->ftt_dest;
751 		} else if (tp->ftt_flags & FASTTRAP_F_ANNUL) {
752 			/*
753 			 * Untaken annulled branches don't execute the
754 			 * instruction in the delay slot.
755 			 */
756 			pc = rp->r_npc + 4;
757 			npc = pc + 4;
758 		} else {
759 			pc = rp->r_npc;
760 			npc = pc + 4;
761 		}
762 		break;
763 	}
764 
765 	case FASTTRAP_T_REG:
766 	{
767 		uint64_t value;
768 		uint_t taken;
769 		uint_t reg = RS1(tp->ftt_instr);
770 
771 		/*
772 		 * An ILP32 process shouldn't be using a branch predicated on
773 		 * an %i or an %l since it would violate the ABI. It's a
774 		 * violation of the ABI because we can't ensure deterministic
775 		 * behavior. We should have identified this case when we
776 		 * enabled the probe.
777 		 */
778 		ASSERT(p->p_model == DATAMODEL_LP64 || reg < 16);
779 
780 		value = fasttrap_getreg(rp, reg);
781 
782 		switch (tp->ftt_code) {
783 		case 0x1:	/* BRZ */
784 			taken = (value == 0);	break;
785 		case 0x2:	/* BRLEZ */
786 			taken = (value <= 0);	break;
787 		case 0x3:	/* BRLZ */
788 			taken = (value < 0);	break;
789 		case 0x5:	/* BRNZ */
790 			taken = (value != 0);	break;
791 		case 0x6:	/* BRGZ */
792 			taken = (value > 0);	break;
793 		case 0x7:	/* BRGEZ */
794 			taken = (value <= 0);	break;
795 		default:
796 		case 0x0:
797 		case 0x4:
798 			panic("fasttrap: mishandled a branch");
799 		}
800 
801 		if (taken) {
802 			pc = rp->r_npc;
803 			npc = tp->ftt_dest;
804 		} else if (tp->ftt_flags & FASTTRAP_F_ANNUL) {
805 			/*
806 			 * Untaken annulled branches don't execute the
807 			 * instruction in the delay slot.
808 			 */
809 			pc = rp->r_npc + 4;
810 			npc = pc + 4;
811 		} else {
812 			pc = rp->r_npc;
813 			npc = pc + 4;
814 		}
815 		break;
816 	}
817 
818 	case FASTTRAP_T_ALWAYS:
819 		/*
820 		 * BAs, BA,As...
821 		 */
822 
823 		if (tp->ftt_flags & FASTTRAP_F_ANNUL) {
824 			/*
825 			 * Annulled branch always instructions never execute
826 			 * the instruction in the delay slot.
827 			 */
828 			pc = tp->ftt_dest;
829 			npc = tp->ftt_dest + 4;
830 		} else {
831 			pc = rp->r_npc;
832 			npc = tp->ftt_dest;
833 		}
834 		break;
835 
836 	case FASTTRAP_T_RDPC:
837 		fasttrap_putreg(rp, RD(tp->ftt_instr), rp->r_pc);
838 		pc = rp->r_npc;
839 		npc = pc + 4;
840 		break;
841 
842 	case FASTTRAP_T_CALL:
843 		/*
844 		 * It's a call _and_ link remember...
845 		 */
846 		rp->r_o7 = rp->r_pc;
847 		pc = rp->r_npc;
848 		npc = tp->ftt_dest;
849 		break;
850 
851 	case FASTTRAP_T_JMPL:
852 		pc = rp->r_npc;
853 
854 		if (I(tp->ftt_instr)) {
855 			uint_t rs1 = RS1(tp->ftt_instr);
856 			int32_t imm;
857 
858 			imm = tp->ftt_instr << 19;
859 			imm >>= 19;
860 			npc = fasttrap_getreg(rp, rs1) + imm;
861 		} else {
862 			uint_t rs1 = RS1(tp->ftt_instr);
863 			uint_t rs2 = RS2(tp->ftt_instr);
864 
865 			npc = fasttrap_getreg(rp, rs1) +
866 			    fasttrap_getreg(rp, rs2);
867 		}
868 
869 		/*
870 		 * Do the link part of the jump-and-link instruction.
871 		 */
872 		fasttrap_putreg(rp, RD(tp->ftt_instr), rp->r_pc);
873 
874 		break;
875 
876 	case FASTTRAP_T_COMMON:
877 	{
878 		curthread->t_dtrace_scrpc = rp->r_g7;
879 		curthread->t_dtrace_astpc = rp->r_g7 + FASTTRAP_OFF_FTRET;
880 
881 		/*
882 		 * Copy the instruction to a reserved location in the
883 		 * user-land thread structure, then set the PC to that
884 		 * location and leave the NPC alone. We take pains to ensure
885 		 * consistency in the instruction stream (See SPARC
886 		 * Architecture Manual Version 9, sections 8.4.7, A.20, and
887 		 * H.1.6; UltraSPARC I/II User's Manual, sections 3.1.1.1,
888 		 * and 13.6.4) by using the ASI ASI_BLK_COMMIT_S to copy the
889 		 * instruction into the user's address space without
890 		 * bypassing the I$. There's no AS_USER version of this ASI
891 		 * (as exist for other ASIs) so we use the lofault
892 		 * mechanism to catch faults.
893 		 */
894 		if (dtrace_blksuword32(rp->r_g7, &tp->ftt_instr, 1) == -1) {
895 			/*
896 			 * If the copyout fails, then the process's state
897 			 * is not consistent (the effects of the traced
898 			 * instruction will never be seen). This process
899 			 * cannot be allowed to continue execution.
900 			 */
901 			fasttrap_sigtrap(curproc, curthread, pc);
902 			return (0);
903 		}
904 
905 		curthread->t_dtrace_pc = pc;
906 		curthread->t_dtrace_npc = npc;
907 		curthread->t_dtrace_on = 1;
908 
909 		pc = curthread->t_dtrace_scrpc;
910 
911 		if (tp->ftt_retids != NULL) {
912 			curthread->t_dtrace_step = 1;
913 			curthread->t_dtrace_ret = 1;
914 			npc = curthread->t_dtrace_astpc;
915 		}
916 		break;
917 	}
918 
919 	default:
920 		panic("fasttrap: mishandled an instruction");
921 	}
922 
923 	/*
924 	 * This bit me in the ass a couple of times, so lets toss this
925 	 * in as a cursory sanity check.
926 	 */
927 	ASSERT(pc != rp->r_g7 + 4);
928 	ASSERT(pc != rp->r_g7 + 8);
929 
930 	/*
931 	 * If there were no return probes when we first found the tracepoint,
932 	 * we should feel no obligation to honor any return probes that were
933 	 * subsequently enabled -- they'll just have to wait until the next
934 	 * time around.
935 	 */
936 	if (tp->ftt_retids != NULL) {
937 		/*
938 		 * We need to wait until the results of the instruction are
939 		 * apparent before invoking any return probes. If this
940 		 * instruction was emulated we can just call
941 		 * fasttrap_return_common(); if it needs to be executed, we
942 		 * need to wait until we return to the kernel.
943 		 */
944 		if (tp->ftt_type != FASTTRAP_T_COMMON) {
945 			fasttrap_return_common(rp, orig_pc, pid, fake_restore);
946 		} else {
947 			ASSERT(curthread->t_dtrace_ret != 0);
948 			ASSERT(curthread->t_dtrace_pc == orig_pc);
949 			ASSERT(curthread->t_dtrace_scrpc == rp->r_g7);
950 			ASSERT(npc == curthread->t_dtrace_astpc);
951 		}
952 	}
953 
954 	ASSERT(pc != 0);
955 	rp->r_pc = pc;
956 	rp->r_npc = npc;
957 
958 	return (0);
959 }
960 
961 int
962 fasttrap_return_probe(struct regs *rp)
963 {
964 	proc_t *p = ttoproc(curthread);
965 	pid_t pid;
966 	uintptr_t pc = curthread->t_dtrace_pc;
967 	uintptr_t npc = curthread->t_dtrace_npc;
968 
969 	curthread->t_dtrace_pc = 0;
970 	curthread->t_dtrace_npc = 0;
971 	curthread->t_dtrace_scrpc = 0;
972 	curthread->t_dtrace_astpc = 0;
973 
974 	/*
975 	 * Treat a child created by a call to vfork(2) as if it were its
976 	 * parent. We know there's only one thread of control in such a
977 	 * process: this one.
978 	 */
979 	while (p->p_flag & SVFORK) {
980 		p = p->p_parent;
981 	}
982 
983 	/*
984 	 * We set the %pc and %npc to their values when the traced
985 	 * instruction was initially executed so that it appears to
986 	 * dtrace_probe() that we're on the original instruction, and so that
987 	 * the user can't easily detect our complex web of lies.
988 	 * dtrace_return_probe() (our caller) will correctly set %pc and %npc
989 	 * after we return.
990 	 */
991 	rp->r_pc = pc;
992 	rp->r_npc = npc;
993 
994 	pid = p->p_pid;
995 	fasttrap_return_common(rp, pc, pid, 0);
996 
997 	return (0);
998 }
999 
1000 int
1001 fasttrap_tracepoint_install(proc_t *p, fasttrap_tracepoint_t *tp)
1002 {
1003 	fasttrap_instr_t instr = FASTTRAP_INSTR;
1004 
1005 	if (uwrite(p, &instr, 4, tp->ftt_pc) != 0)
1006 		return (-1);
1007 
1008 	return (0);
1009 }
1010 
1011 int
1012 fasttrap_tracepoint_remove(proc_t *p, fasttrap_tracepoint_t *tp)
1013 {
1014 	fasttrap_instr_t instr;
1015 
1016 	/*
1017 	 * Distinguish between read or write failures and a changed
1018 	 * instruction.
1019 	 */
1020 	if (uread(p, &instr, 4, tp->ftt_pc) != 0)
1021 		return (0);
1022 	if (instr != FASTTRAP_INSTR && instr != BREAKPOINT_INSTR)
1023 		return (0);
1024 	if (uwrite(p, &tp->ftt_instr, 4, tp->ftt_pc) != 0)
1025 		return (-1);
1026 
1027 	return (0);
1028 }
1029 
1030 int
1031 fasttrap_tracepoint_init(proc_t *p, fasttrap_probe_t *probe,
1032     fasttrap_tracepoint_t *tp, uintptr_t pc)
1033 {
1034 	uint32_t instr;
1035 	int32_t disp;
1036 
1037 	/*
1038 	 * Read the instruction at the given address out of the process's
1039 	 * address space. We don't have to worry about a debugger
1040 	 * changing this instruction before we overwrite it with our trap
1041 	 * instruction since P_PR_LOCK is set.
1042 	 */
1043 	if (uread(p, &instr, 4, pc) != 0)
1044 		return (-1);
1045 
1046 	/*
1047 	 * Decode the instruction to fill in the probe flags. We can have
1048 	 * the process execute most instructions on its own using a pc/npc
1049 	 * trick, but pc-relative control transfer present a problem since
1050 	 * we're relocating the instruction. We emulate these instructions
1051 	 * in the kernel. We assume a default type and over-write that as
1052 	 * needed.
1053 	 *
1054 	 * pc-relative instructions must be emulated for correctness;
1055 	 * other instructions (which represent a large set of commonly traced
1056 	 * instructions) are emulated or otherwise optimized for performance.
1057 	 */
1058 	tp->ftt_type = FASTTRAP_T_COMMON;
1059 	if (OP(instr) == 1) {
1060 		/*
1061 		 * Call instructions.
1062 		 */
1063 		tp->ftt_type = FASTTRAP_T_CALL;
1064 		disp = DISP30(instr) << 2;
1065 		tp->ftt_dest = pc + (intptr_t)disp;
1066 
1067 	} else if (OP(instr) == 0) {
1068 		/*
1069 		 * Branch instructions.
1070 		 *
1071 		 * Unconditional branches need careful attention when they're
1072 		 * annulled: annulled unconditional branches never execute
1073 		 * the instruction in the delay slot.
1074 		 */
1075 		switch (OP2(instr)) {
1076 		case OP2_ILLTRAP:
1077 		case 0x7:
1078 			/*
1079 			 * The compiler may place an illtrap after a call to
1080 			 * a function that returns a structure. In the case of
1081 			 * a returned structure, the compiler places an illtrap
1082 			 * whose const22 field is the size of the returned
1083 			 * structure immediately following the delay slot of
1084 			 * the call. To stay out of the way, we refuse to
1085 			 * place tracepoints on top of illtrap instructions.
1086 			 *
1087 			 * This is one of the dumbest architectural decisions
1088 			 * I've ever had to work around.
1089 			 *
1090 			 * We also identify the only illegal op2 value (See
1091 			 * SPARC Architecture Manual Version 9, E.2 table 31).
1092 			 */
1093 			return (-1);
1094 
1095 		case OP2_BPcc:
1096 			if (COND(instr) == 8) {
1097 				tp->ftt_type = FASTTRAP_T_ALWAYS;
1098 			} else {
1099 				/*
1100 				 * Check for an illegal instruction.
1101 				 */
1102 				if (CC(instr) & 1)
1103 					return (-1);
1104 				tp->ftt_type = FASTTRAP_T_CCR;
1105 				tp->ftt_cc = CC(instr);
1106 				tp->ftt_code = COND(instr);
1107 			}
1108 
1109 			if (A(instr) != 0)
1110 				tp->ftt_flags |= FASTTRAP_F_ANNUL;
1111 
1112 			disp = DISP19(instr);
1113 			disp <<= 13;
1114 			disp >>= 11;
1115 			tp->ftt_dest = pc + (intptr_t)disp;
1116 			break;
1117 
1118 		case OP2_Bicc:
1119 			if (COND(instr) == 8) {
1120 				tp->ftt_type = FASTTRAP_T_ALWAYS;
1121 			} else {
1122 				tp->ftt_type = FASTTRAP_T_CCR;
1123 				tp->ftt_cc = 0;
1124 				tp->ftt_code = COND(instr);
1125 			}
1126 
1127 			if (A(instr) != 0)
1128 				tp->ftt_flags |= FASTTRAP_F_ANNUL;
1129 
1130 			disp = DISP22(instr);
1131 			disp <<= 10;
1132 			disp >>= 8;
1133 			tp->ftt_dest = pc + (intptr_t)disp;
1134 			break;
1135 
1136 		case OP2_BPr:
1137 			/*
1138 			 * Check for an illegal instruction.
1139 			 */
1140 			if ((RCOND(instr) & 3) == 0)
1141 				return (-1);
1142 
1143 			/*
1144 			 * It's a violation of the v8plus ABI to use a
1145 			 * register-predicated branch in a 32-bit app if
1146 			 * the register used is an %l or an %i (%gs and %os
1147 			 * are legit because they're not saved to the stack
1148 			 * in 32-bit words when we take a trap).
1149 			 */
1150 			if (p->p_model == DATAMODEL_ILP32 && RS1(instr) >= 16)
1151 				return (-1);
1152 
1153 			tp->ftt_type = FASTTRAP_T_REG;
1154 			if (A(instr) != 0)
1155 				tp->ftt_flags |= FASTTRAP_F_ANNUL;
1156 			disp = DISP16(instr);
1157 			disp <<= 16;
1158 			disp >>= 14;
1159 			tp->ftt_dest = pc + (intptr_t)disp;
1160 			tp->ftt_code = RCOND(instr);
1161 			break;
1162 
1163 		case OP2_SETHI:
1164 			tp->ftt_type = FASTTRAP_T_SETHI;
1165 			break;
1166 
1167 		case OP2_FBPfcc:
1168 			if (COND(instr) == 8) {
1169 				tp->ftt_type = FASTTRAP_T_ALWAYS;
1170 			} else {
1171 				tp->ftt_type = FASTTRAP_T_FCC;
1172 				tp->ftt_cc = CC(instr);
1173 				tp->ftt_code = COND(instr);
1174 			}
1175 
1176 			if (A(instr) != 0)
1177 				tp->ftt_flags |= FASTTRAP_F_ANNUL;
1178 
1179 			disp = DISP19(instr);
1180 			disp <<= 13;
1181 			disp >>= 11;
1182 			tp->ftt_dest = pc + (intptr_t)disp;
1183 			break;
1184 
1185 		case OP2_FBfcc:
1186 			if (COND(instr) == 8) {
1187 				tp->ftt_type = FASTTRAP_T_ALWAYS;
1188 			} else {
1189 				tp->ftt_type = FASTTRAP_T_FCC;
1190 				tp->ftt_cc = 0;
1191 				tp->ftt_code = COND(instr);
1192 			}
1193 
1194 			if (A(instr) != 0)
1195 				tp->ftt_flags |= FASTTRAP_F_ANNUL;
1196 
1197 			disp = DISP22(instr);
1198 			disp <<= 10;
1199 			disp >>= 8;
1200 			tp->ftt_dest = pc + (intptr_t)disp;
1201 			break;
1202 		}
1203 
1204 	} else if (OP(instr) == 2) {
1205 		switch (OP3(instr)) {
1206 		case OP3_RETURN:
1207 			tp->ftt_type = FASTTRAP_T_RETURN;
1208 			break;
1209 
1210 		case OP3_JMPL:
1211 			tp->ftt_type = FASTTRAP_T_JMPL;
1212 			break;
1213 
1214 		case OP3_RD:
1215 			if (RS1(instr) == 5)
1216 				tp->ftt_type = FASTTRAP_T_RDPC;
1217 			break;
1218 
1219 		case OP3_SAVE:
1220 			/*
1221 			 * We optimize for save instructions at function
1222 			 * entry; see the comment in fasttrap_pid_probe()
1223 			 * (near FASTTRAP_T_SAVE) for details.
1224 			 */
1225 			if (fasttrap_optimize_save != 0 &&
1226 			    probe->ftp_type == DTFTP_ENTRY &&
1227 			    I(instr) == 1 && RD(instr) == R_SP)
1228 				tp->ftt_type = FASTTRAP_T_SAVE;
1229 			break;
1230 
1231 		case OP3_RESTORE:
1232 			/*
1233 			 * We optimize restore instructions at function
1234 			 * return; see the comment in fasttrap_pid_probe()
1235 			 * (near FASTTRAP_T_RESTORE) for details.
1236 			 *
1237 			 * rd must be an %o or %g register.
1238 			 */
1239 			if ((RD(instr) & 0x10) == 0)
1240 				tp->ftt_type = FASTTRAP_T_RESTORE;
1241 			break;
1242 
1243 		case OP3_OR:
1244 			/*
1245 			 * A large proportion of instructions in the delay
1246 			 * slot of retl instructions are or's so we emulate
1247 			 * these downstairs as an optimization.
1248 			 */
1249 			tp->ftt_type = FASTTRAP_T_OR;
1250 			break;
1251 
1252 		case OP3_TCC:
1253 			/*
1254 			 * Breakpoint instructions are effectively position-
1255 			 * dependent since the debugger uses the %pc value
1256 			 * to lookup which breakpoint was executed. As a
1257 			 * result, we can't actually instrument breakpoints.
1258 			 */
1259 			if (SW_TRAP(instr) == ST_BREAKPOINT)
1260 				return (-1);
1261 			break;
1262 
1263 		case 0x19:
1264 		case 0x1d:
1265 		case 0x29:
1266 		case 0x33:
1267 		case 0x3f:
1268 			/*
1269 			 * Identify illegal instructions (See SPARC
1270 			 * Architecture Manual Version 9, E.2 table 32).
1271 			 */
1272 			return (-1);
1273 		}
1274 	} else if (OP(instr) == 3) {
1275 		uint32_t op3 = OP3(instr);
1276 
1277 		/*
1278 		 * Identify illegal instructions (See SPARC Architecture
1279 		 * Manual Version 9, E.2 table 33).
1280 		 */
1281 		if ((op3 & 0x28) == 0x28) {
1282 			if (op3 != OP3_PREFETCH && op3 != OP3_CASA &&
1283 			    op3 != OP3_PREFETCHA && op3 != OP3_CASXA)
1284 				return (-1);
1285 		} else {
1286 			if ((op3 & 0x0f) == 0x0c || (op3 & 0x3b) == 0x31)
1287 				return (-1);
1288 		}
1289 	}
1290 
1291 	tp->ftt_instr = instr;
1292 
1293 	/*
1294 	 * We don't know how this tracepoint is going to be used, but in case
1295 	 * it's used as part of a function return probe, we need to indicate
1296 	 * whether it's always a return site or only potentially a return
1297 	 * site. If it's part of a return probe, it's always going to be a
1298 	 * return from that function if it's a restore instruction or if
1299 	 * the previous instruction was a return. If we could reliably
1300 	 * distinguish jump tables from return sites, this wouldn't be
1301 	 * necessary.
1302 	 */
1303 	if (tp->ftt_type != FASTTRAP_T_RESTORE &&
1304 	    (uread(p, &instr, 4, pc - sizeof (instr)) != 0 ||
1305 	    !(OP(instr) == 2 && OP3(instr) == OP3_RETURN)))
1306 		tp->ftt_flags |= FASTTRAP_F_RETMAYBE;
1307 
1308 	return (0);
1309 }
1310 
1311 /*ARGSUSED*/
1312 uint64_t
1313 fasttrap_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes)
1314 {
1315 	return (fasttrap_anarg(ttolwp(curthread)->lwp_regs, argno));
1316 }
1317 
1318 /*ARGSUSED*/
1319 uint64_t
1320 fasttrap_usdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno,
1321     int aframes)
1322 {
1323 	return (fasttrap_anarg(ttolwp(curthread)->lwp_regs, argno));
1324 }
1325 
1326 static uint64_t fasttrap_getreg_fast_cnt;
1327 static uint64_t fasttrap_getreg_mpcb_cnt;
1328 static uint64_t fasttrap_getreg_slow_cnt;
1329 
1330 static ulong_t
1331 fasttrap_getreg(struct regs *rp, uint_t reg)
1332 {
1333 	ulong_t value;
1334 	dtrace_icookie_t cookie;
1335 	struct machpcb *mpcb;
1336 	extern ulong_t dtrace_getreg_win(uint_t, uint_t);
1337 
1338 	/*
1339 	 * We have the %os and %gs in our struct regs, but if we need to
1340 	 * snag a %l or %i we need to go scrounging around in the process's
1341 	 * address space.
1342 	 */
1343 	if (reg == 0)
1344 		return (0);
1345 
1346 	if (reg < 16)
1347 		return ((&rp->r_g1)[reg - 1]);
1348 
1349 	/*
1350 	 * Before we look at the user's stack, we'll check the register
1351 	 * windows to see if the information we want is in there.
1352 	 */
1353 	cookie = dtrace_interrupt_disable();
1354 	if (dtrace_getotherwin() > 0) {
1355 		value = dtrace_getreg_win(reg, 1);
1356 		dtrace_interrupt_enable(cookie);
1357 
1358 		atomic_add_64(&fasttrap_getreg_fast_cnt, 1);
1359 
1360 		return (value);
1361 	}
1362 	dtrace_interrupt_enable(cookie);
1363 
1364 	/*
1365 	 * First check the machpcb structure to see if we've already read
1366 	 * in the register window we're looking for; if we haven't, (and
1367 	 * we probably haven't) try to copy in the value of the register.
1368 	 */
1369 	mpcb = (struct machpcb *)((caddr_t)rp - REGOFF);
1370 
1371 	if (get_udatamodel() == DATAMODEL_NATIVE) {
1372 		struct frame *fr = (struct frame *)(rp->r_sp + STACK_BIAS);
1373 
1374 		if (mpcb->mpcb_wbcnt > 0) {
1375 			struct rwindow *rwin = (void *)mpcb->mpcb_wbuf;
1376 			int i = mpcb->mpcb_wbcnt;
1377 			do {
1378 				i--;
1379 				if ((long)mpcb->mpcb_spbuf[i] != rp->r_sp)
1380 					continue;
1381 
1382 				atomic_add_64(&fasttrap_getreg_mpcb_cnt, 1);
1383 				return (rwin[i].rw_local[reg - 16]);
1384 			} while (i > 0);
1385 		}
1386 
1387 		if (fasttrap_fulword(&fr->fr_local[reg - 16], &value) != 0)
1388 			goto err;
1389 	} else {
1390 		struct frame32 *fr = (struct frame32 *)(caddr32_t)rp->r_sp;
1391 		uint32_t *v32 = (uint32_t *)&value;
1392 
1393 		if (mpcb->mpcb_wbcnt > 0) {
1394 			struct rwindow32 *rwin = (void *)mpcb->mpcb_wbuf;
1395 			int i = mpcb->mpcb_wbcnt;
1396 			do {
1397 				i--;
1398 				if ((long)mpcb->mpcb_spbuf[i] != rp->r_sp)
1399 					continue;
1400 
1401 				atomic_add_64(&fasttrap_getreg_mpcb_cnt, 1);
1402 				return (rwin[i].rw_local[reg - 16]);
1403 			} while (i > 0);
1404 		}
1405 
1406 		if (fasttrap_fuword32(&fr->fr_local[reg - 16], &v32[1]) != 0)
1407 			goto err;
1408 
1409 		v32[0] = 0;
1410 	}
1411 
1412 	atomic_add_64(&fasttrap_getreg_slow_cnt, 1);
1413 	return (value);
1414 
1415 err:
1416 	/*
1417 	 * If the copy in failed, the process will be in a irrecoverable
1418 	 * state, and we have no choice but to kill it.
1419 	 */
1420 	psignal(ttoproc(curthread), SIGILL);
1421 	return (0);
1422 }
1423 
1424 static uint64_t fasttrap_putreg_fast_cnt;
1425 static uint64_t fasttrap_putreg_mpcb_cnt;
1426 static uint64_t fasttrap_putreg_slow_cnt;
1427 
1428 static void
1429 fasttrap_putreg(struct regs *rp, uint_t reg, ulong_t value)
1430 {
1431 	dtrace_icookie_t cookie;
1432 	struct machpcb *mpcb;
1433 	extern void dtrace_putreg_win(uint_t, ulong_t);
1434 
1435 	if (reg == 0)
1436 		return;
1437 
1438 	if (reg < 16) {
1439 		(&rp->r_g1)[reg - 1] = value;
1440 		return;
1441 	}
1442 
1443 	/*
1444 	 * If the user process is still using some register windows, we
1445 	 * can just place the value in the correct window.
1446 	 */
1447 	cookie = dtrace_interrupt_disable();
1448 	if (dtrace_getotherwin() > 0) {
1449 		dtrace_putreg_win(reg, value);
1450 		dtrace_interrupt_enable(cookie);
1451 		atomic_add_64(&fasttrap_putreg_fast_cnt, 1);
1452 		return;
1453 	}
1454 	dtrace_interrupt_enable(cookie);
1455 
1456 	/*
1457 	 * First see if there's a copy of the register window in the
1458 	 * machpcb structure that we can modify; if there isn't try to
1459 	 * copy out the value. If that fails, we try to create a new
1460 	 * register window in the machpcb structure. While this isn't
1461 	 * _precisely_ the intended use of the machpcb structure, it
1462 	 * can't cause any problems since we know at this point in the
1463 	 * code that all of the user's data have been flushed out of the
1464 	 * register file (since %otherwin is 0).
1465 	 */
1466 	mpcb = (struct machpcb *)((caddr_t)rp - REGOFF);
1467 
1468 	if (get_udatamodel() == DATAMODEL_NATIVE) {
1469 		struct frame *fr = (struct frame *)(rp->r_sp + STACK_BIAS);
1470 		struct rwindow *rwin = (struct rwindow *)mpcb->mpcb_wbuf;
1471 
1472 		if (mpcb->mpcb_wbcnt > 0) {
1473 			int i = mpcb->mpcb_wbcnt;
1474 			do {
1475 				i--;
1476 				if ((long)mpcb->mpcb_spbuf[i] != rp->r_sp)
1477 					continue;
1478 
1479 				rwin[i].rw_local[reg - 16] = value;
1480 				atomic_add_64(&fasttrap_putreg_mpcb_cnt, 1);
1481 				return;
1482 			} while (i > 0);
1483 		}
1484 
1485 		if (fasttrap_sulword(&fr->fr_local[reg - 16], value) != 0) {
1486 			if (mpcb->mpcb_wbcnt >= MAXWIN || copyin(fr,
1487 			    &rwin[mpcb->mpcb_wbcnt], sizeof (*rwin)) != 0)
1488 				goto err;
1489 
1490 			rwin[mpcb->mpcb_wbcnt].rw_local[reg - 16] = value;
1491 			mpcb->mpcb_spbuf[mpcb->mpcb_wbcnt] = (caddr_t)rp->r_sp;
1492 			mpcb->mpcb_wbcnt++;
1493 			atomic_add_64(&fasttrap_putreg_mpcb_cnt, 1);
1494 			return;
1495 		}
1496 	} else {
1497 		struct frame32 *fr = (struct frame32 *)(caddr32_t)rp->r_sp;
1498 		struct rwindow32 *rwin = (struct rwindow32 *)mpcb->mpcb_wbuf;
1499 		uint32_t v32 = (uint32_t)value;
1500 
1501 		if (mpcb->mpcb_wbcnt > 0) {
1502 			int i = mpcb->mpcb_wbcnt;
1503 			do {
1504 				i--;
1505 				if ((long)mpcb->mpcb_spbuf[i] != rp->r_sp)
1506 					continue;
1507 
1508 				rwin[i].rw_local[reg - 16] = v32;
1509 				atomic_add_64(&fasttrap_putreg_mpcb_cnt, 1);
1510 				return;
1511 			} while (i > 0);
1512 		}
1513 
1514 		if (fasttrap_suword32(&fr->fr_local[reg - 16], v32) != 0) {
1515 			if (mpcb->mpcb_wbcnt >= MAXWIN || copyin(fr,
1516 			    &rwin[mpcb->mpcb_wbcnt], sizeof (*rwin)) != 0)
1517 				goto err;
1518 
1519 			rwin[mpcb->mpcb_wbcnt].rw_local[reg - 16] = v32;
1520 			mpcb->mpcb_spbuf[mpcb->mpcb_wbcnt] = (caddr_t)rp->r_sp;
1521 			mpcb->mpcb_wbcnt++;
1522 			atomic_add_64(&fasttrap_putreg_mpcb_cnt, 1);
1523 			return;
1524 		}
1525 	}
1526 
1527 	atomic_add_64(&fasttrap_putreg_slow_cnt, 1);
1528 	return;
1529 
1530 err:
1531 	/*
1532 	 * If we couldn't record this register's value, the process is in an
1533 	 * irrecoverable state and we have no choice but to euthanize it.
1534 	 */
1535 	psignal(ttoproc(curthread), SIGILL);
1536 }
1537