xref: /titanic_50/usr/src/uts/common/dtrace/fasttrap.c (revision 60c807700988885656502665e0cf8afd4b4346f7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/atomic.h>
30 #include <sys/errno.h>
31 #include <sys/stat.h>
32 #include <sys/modctl.h>
33 #include <sys/conf.h>
34 #include <sys/systm.h>
35 #include <sys/ddi.h>
36 #include <sys/sunddi.h>
37 #include <sys/cpuvar.h>
38 #include <sys/kmem.h>
39 #include <sys/strsubr.h>
40 #include <sys/fasttrap.h>
41 #include <sys/fasttrap_impl.h>
42 #include <sys/fasttrap_isa.h>
43 #include <sys/dtrace.h>
44 #include <sys/dtrace_impl.h>
45 #include <sys/sysmacros.h>
46 #include <sys/frame.h>
47 #include <sys/stack.h>
48 #include <sys/proc.h>
49 #include <sys/priv.h>
50 #include <sys/policy.h>
51 #include <sys/ontrap.h>
52 #include <sys/vmsystm.h>
53 #include <sys/prsystm.h>
54 
55 #include <vm/as.h>
56 #include <vm/seg.h>
57 #include <vm/seg_dev.h>
58 #include <vm/seg_vn.h>
59 #include <vm/seg_spt.h>
60 #include <vm/seg_kmem.h>
61 
62 /*
63  * User-Land Trap-Based Tracing
64  * ----------------------------
65  *
66  * The fasttrap provider allows DTrace consumers to instrument any user-level
67  * instruction to gather data; this includes probes with semantic
68  * signifigance like entry and return as well as simple offsets into the
69  * function. While the specific techniques used are very ISA specific, the
70  * methodology is generalizable to any architecture.
71  *
72  *
73  * The General Methodology
74  * -----------------------
75  *
76  * With the primary goal of tracing every user-land instruction and the
77  * limitation that we can't trust user space so don't want to rely on much
78  * information there, we begin by replacing the instructions we want to trace
79  * with trap instructions. Each instruction we overwrite is saved into a hash
80  * table keyed by process ID and pc address. When we enter the kernel due to
81  * this trap instruction, we need the effects of the replaced instruction to
82  * appear to have occurred before we proceed with the user thread's
83  * execution.
84  *
85  * Each user level thread is represented by a ulwp_t structure which is
86  * always easily accessible through a register. The most basic way to produce
87  * the effects of the instruction we replaced is to copy that instruction out
88  * to a bit of scratch space reserved in the user thread's ulwp_t structure
89  * (a sort of kernel-private thread local storage), set the PC to that
90  * scratch space and single step. When we reenter the kernel after single
91  * stepping the instruction we must then adjust the PC to point to what would
92  * normally be the next instruction. Of course, special care must be taken
93  * for branches and jumps, but these represent such a small fraction of any
94  * instruction set that writing the code to emulate these in the kernel is
95  * not too difficult.
96  *
97  * Return probes may require several tracepoints to trace every return site,
98  * and, conversely, each tracepoint may activate several probes (the entry
99  * and offset 0 probes, for example). To solve this muliplexing problem,
100  * tracepoints contain lists of probes to activate and probes contain lists
101  * of tracepoints to enable. If a probe is activated, it adds its ID to
102  * existing tracepoints or creates new ones as necessary.
103  *
104  * Most probes are activated _before_ the instruction is executed, but return
105  * probes are activated _after_ the effects of the last instruction of the
106  * function are visible. Return probes must be fired _after_ we have
107  * single-stepped the instruction whereas all other probes are fired
108  * beforehand.
109  */
110 
111 static dev_info_t *fasttrap_devi;
112 static dtrace_provider_id_t fasttrap_id;
113 static dtrace_meta_provider_id_t fasttrap_meta_id;
114 
115 static timeout_id_t fasttrap_timeout;
116 static kmutex_t fasttrap_cleanup_mtx;
117 static uint_t fasttrap_cleanup_work;
118 
119 /*
120  * Generation count on modifications to the global tracepoint lookup table.
121  */
122 static volatile uint64_t fasttrap_mod_gen;
123 
124 /*
125  * When the fasttrap provider is loaded, fasttrap_max is set to either
126  * FASTTRAP_MAX_DEFAULT or the value for fasttrap-max-probes in the
127  * fasttrap.conf file. Each time a probe is created, fasttrap_total is
128  * incremented by the number of tracepoints that may be associated with that
129  * probe; fasttrap_total is capped at fasttrap_max.
130  */
131 #define	FASTTRAP_MAX_DEFAULT		250000
132 static uint32_t fasttrap_max;
133 static uint32_t fasttrap_total;
134 
135 
136 #define	FASTTRAP_TPOINTS_DEFAULT_SIZE	0x4000
137 #define	FASTTRAP_PROVIDERS_DEFAULT_SIZE	0x100
138 #define	FASTTRAP_PROCS_DEFAULT_SIZE	0x100
139 
140 #define	FASTTRAP_PID_NAME		"pid"
141 
142 fasttrap_hash_t			fasttrap_tpoints;
143 static fasttrap_hash_t		fasttrap_provs;
144 static fasttrap_hash_t		fasttrap_procs;
145 
146 dtrace_id_t			fasttrap_probe_id;
147 static int			fasttrap_count;		/* ref count */
148 static int			fasttrap_pid_count;	/* pid ref count */
149 static kmutex_t			fasttrap_count_mtx;	/* lock on ref count */
150 
151 #define	FASTTRAP_ENABLE_FAIL	1
152 #define	FASTTRAP_ENABLE_PARTIAL	2
153 
154 static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t);
155 static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t);
156 
157 static fasttrap_provider_t *fasttrap_provider_lookup(pid_t, const char *,
158     const dtrace_pattr_t *);
159 static void fasttrap_provider_retire(pid_t, const char *, int);
160 static void fasttrap_provider_free(fasttrap_provider_t *);
161 
162 static fasttrap_proc_t *fasttrap_proc_lookup(pid_t);
163 static void fasttrap_proc_release(fasttrap_proc_t *);
164 
165 #define	FASTTRAP_PROVS_INDEX(pid, name) \
166 	((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
167 
168 #define	FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
169 
170 static int
171 fasttrap_highbit(ulong_t i)
172 {
173 	int h = 1;
174 
175 	if (i == 0)
176 		return (0);
177 #ifdef _LP64
178 	if (i & 0xffffffff00000000ul) {
179 		h += 32; i >>= 32;
180 	}
181 #endif
182 	if (i & 0xffff0000) {
183 		h += 16; i >>= 16;
184 	}
185 	if (i & 0xff00) {
186 		h += 8; i >>= 8;
187 	}
188 	if (i & 0xf0) {
189 		h += 4; i >>= 4;
190 	}
191 	if (i & 0xc) {
192 		h += 2; i >>= 2;
193 	}
194 	if (i & 0x2) {
195 		h += 1;
196 	}
197 	return (h);
198 }
199 
200 static uint_t
201 fasttrap_hash_str(const char *p)
202 {
203 	unsigned int g;
204 	uint_t hval = 0;
205 
206 	while (*p) {
207 		hval = (hval << 4) + *p++;
208 		if ((g = (hval & 0xf0000000)) != 0)
209 			hval ^= g >> 24;
210 		hval &= ~g;
211 	}
212 	return (hval);
213 }
214 
215 void
216 fasttrap_sigtrap(proc_t *p, kthread_t *t, uintptr_t pc)
217 {
218 	sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
219 
220 	sqp->sq_info.si_signo = SIGTRAP;
221 	sqp->sq_info.si_code = TRAP_DTRACE;
222 	sqp->sq_info.si_addr = (caddr_t)pc;
223 
224 	mutex_enter(&p->p_lock);
225 	sigaddqa(p, t, sqp);
226 	mutex_exit(&p->p_lock);
227 
228 	if (t != NULL)
229 		aston(t);
230 }
231 
232 /*
233  * This function ensures that no threads are actively using the memory
234  * associated with probes that were formerly live.
235  */
236 static void
237 fasttrap_mod_barrier(uint64_t gen)
238 {
239 	int i;
240 
241 	if (gen < fasttrap_mod_gen)
242 		return;
243 
244 	fasttrap_mod_gen++;
245 
246 	for (i = 0; i < NCPU; i++) {
247 		mutex_enter(&cpu_core[i].cpuc_pid_lock);
248 		mutex_exit(&cpu_core[i].cpuc_pid_lock);
249 	}
250 }
251 
252 /*
253  * This is the timeout's callback for cleaning up the providers and their
254  * probes.
255  */
256 /*ARGSUSED*/
257 static void
258 fasttrap_pid_cleanup_cb(void *data)
259 {
260 	fasttrap_provider_t **fpp, *fp;
261 	fasttrap_bucket_t *bucket;
262 	dtrace_provider_id_t provid;
263 	int i, later;
264 
265 	static volatile int in = 0;
266 	ASSERT(in == 0);
267 	in = 1;
268 
269 	mutex_enter(&fasttrap_cleanup_mtx);
270 	while (fasttrap_cleanup_work) {
271 		fasttrap_cleanup_work = 0;
272 		mutex_exit(&fasttrap_cleanup_mtx);
273 
274 		later = 0;
275 
276 		/*
277 		 * Iterate over all the providers trying to remove the marked
278 		 * ones. If a provider is marked but not retired, we just
279 		 * have to take a crack at removing it -- it's no big deal if
280 		 * we can't.
281 		 */
282 		for (i = 0; i < fasttrap_provs.fth_nent; i++) {
283 			bucket = &fasttrap_provs.fth_table[i];
284 			mutex_enter(&bucket->ftb_mtx);
285 			fpp = (fasttrap_provider_t **)&bucket->ftb_data;
286 
287 			while ((fp = *fpp) != NULL) {
288 				if (!fp->ftp_marked) {
289 					fpp = &fp->ftp_next;
290 					continue;
291 				}
292 
293 				mutex_enter(&fp->ftp_mtx);
294 
295 				/*
296 				 * If this provider is referenced either
297 				 * because it is a USDT provider or is being
298 				 * modified, we can't unregister or even
299 				 * condense.
300 				 */
301 				if (fp->ftp_ccount != 0 ||
302 				    fp->ftp_mcount != 0) {
303 					mutex_exit(&fp->ftp_mtx);
304 					fp->ftp_marked = 0;
305 					continue;
306 				}
307 
308 				if (!fp->ftp_retired || fp->ftp_rcount != 0)
309 					fp->ftp_marked = 0;
310 
311 				mutex_exit(&fp->ftp_mtx);
312 
313 				/*
314 				 * If we successfully unregister this
315 				 * provider we can remove it from the hash
316 				 * chain and free the memory. If our attempt
317 				 * to unregister fails and this is a retired
318 				 * provider, increment our flag to try again
319 				 * pretty soon. If we've consumed more than
320 				 * half of our total permitted number of
321 				 * probes call dtrace_condense() to try to
322 				 * clean out the unenabled probes.
323 				 */
324 				provid = fp->ftp_provid;
325 				if (dtrace_unregister(provid) != 0) {
326 					if (fasttrap_total > fasttrap_max / 2)
327 						(void) dtrace_condense(provid);
328 					later += fp->ftp_marked;
329 					fpp = &fp->ftp_next;
330 				} else {
331 					*fpp = fp->ftp_next;
332 					fasttrap_provider_free(fp);
333 				}
334 			}
335 			mutex_exit(&bucket->ftb_mtx);
336 		}
337 
338 		mutex_enter(&fasttrap_cleanup_mtx);
339 	}
340 
341 	ASSERT(fasttrap_timeout != 0);
342 
343 	/*
344 	 * If we were unable to remove a retired provider, try again after
345 	 * a second. This situation can occur in certain circumstances where
346 	 * providers cannot be unregistered even though they have no probes
347 	 * enabled because of an execution of dtrace -l or something similar.
348 	 * If the timeout has been disabled (set to 1 because we're trying
349 	 * to detach), we set fasttrap_cleanup_work to ensure that we'll
350 	 * get a chance to do that work if and when the timeout is reenabled
351 	 * (if detach fails).
352 	 */
353 	if (later > 0 && fasttrap_timeout != (timeout_id_t)1)
354 		fasttrap_timeout = timeout(&fasttrap_pid_cleanup_cb, NULL, hz);
355 	else if (later > 0)
356 		fasttrap_cleanup_work = 1;
357 	else
358 		fasttrap_timeout = 0;
359 
360 	mutex_exit(&fasttrap_cleanup_mtx);
361 	in = 0;
362 }
363 
364 /*
365  * Activates the asynchronous cleanup mechanism.
366  */
367 static void
368 fasttrap_pid_cleanup(void)
369 {
370 	mutex_enter(&fasttrap_cleanup_mtx);
371 	fasttrap_cleanup_work = 1;
372 	if (fasttrap_timeout == 0)
373 		fasttrap_timeout = timeout(&fasttrap_pid_cleanup_cb, NULL, 1);
374 	mutex_exit(&fasttrap_cleanup_mtx);
375 }
376 
377 /*
378  * This is called from cfork() via dtrace_fasttrap_fork(). The child
379  * process's address space is a (roughly) a copy of the parent process's so
380  * we have to remove all the instrumentation we had previously enabled in the
381  * parent.
382  */
383 static void
384 fasttrap_fork(proc_t *p, proc_t *cp)
385 {
386 	pid_t ppid = p->p_pid;
387 	int i;
388 
389 	ASSERT(curproc == p);
390 	ASSERT(p->p_proc_flag & P_PR_LOCK);
391 	ASSERT(p->p_dtrace_count > 0);
392 	ASSERT(cp->p_dtrace_count == 0);
393 
394 	/*
395 	 * This would be simpler and faster if we maintained per-process
396 	 * hash tables of enabled tracepoints. It could, however, potentially
397 	 * slow down execution of a tracepoint since we'd need to go
398 	 * through two levels of indirection. In the future, we should
399 	 * consider either maintaining per-process ancillary lists of
400 	 * enabled tracepoints or hanging a pointer to a per-process hash
401 	 * table of enabled tracepoints off the proc structure.
402 	 */
403 
404 	/*
405 	 * We don't have to worry about the child process disappearing
406 	 * because we're in fork().
407 	 */
408 	mutex_enter(&cp->p_lock);
409 	sprlock_proc(cp);
410 	mutex_exit(&cp->p_lock);
411 
412 	/*
413 	 * Iterate over every tracepoint looking for ones that belong to the
414 	 * parent process, and remove each from the child process.
415 	 */
416 	for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
417 		fasttrap_tracepoint_t *tp;
418 		fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i];
419 
420 		mutex_enter(&bucket->ftb_mtx);
421 		for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
422 			if (tp->ftt_pid == ppid &&
423 			    !tp->ftt_proc->ftpc_defunct) {
424 				int ret = fasttrap_tracepoint_remove(cp, tp);
425 				ASSERT(ret == 0);
426 			}
427 		}
428 		mutex_exit(&bucket->ftb_mtx);
429 	}
430 
431 	mutex_enter(&cp->p_lock);
432 	sprunlock(cp);
433 }
434 
435 /*
436  * This is called from proc_exit() or from exec_common() if p_dtrace_probes
437  * is set on the proc structure to indicate that there is a pid provider
438  * associated with this process.
439  */
440 static void
441 fasttrap_exec_exit(proc_t *p)
442 {
443 	ASSERT(p == curproc);
444 	ASSERT(MUTEX_HELD(&p->p_lock));
445 
446 	mutex_exit(&p->p_lock);
447 
448 	/*
449 	 * We clean up the pid provider for this process here; user-land
450 	 * static probes are handled by the meta-provider remove entry point.
451 	 */
452 	fasttrap_provider_retire(p->p_pid, FASTTRAP_PID_NAME, 0);
453 
454 	mutex_enter(&p->p_lock);
455 }
456 
457 
458 /*ARGSUSED*/
459 static void
460 fasttrap_pid_provide(void *arg, const dtrace_probedesc_t *desc)
461 {
462 	/*
463 	 * There are no "default" pid probes.
464 	 */
465 }
466 
467 /*ARGSUSED*/
468 static void
469 fasttrap_provide(void *arg, const dtrace_probedesc_t *desc)
470 {
471 	if (dtrace_probe_lookup(fasttrap_id, NULL, "fasttrap", "fasttrap") == 0)
472 		fasttrap_probe_id = dtrace_probe_create(fasttrap_id, NULL,
473 		    "fasttrap", "fasttrap", FASTTRAP_AFRAMES, NULL);
474 }
475 
476 static int
477 fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
478 {
479 	fasttrap_tracepoint_t *tp, *new_tp = NULL;
480 	fasttrap_bucket_t *bucket;
481 	fasttrap_id_t *id;
482 	pid_t pid;
483 	uintptr_t pc;
484 
485 	ASSERT(index < probe->ftp_ntps);
486 
487 	pid = probe->ftp_pid;
488 	pc = probe->ftp_tps[index].fit_tp->ftt_pc;
489 	id = &probe->ftp_tps[index].fit_id;
490 
491 	ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
492 
493 	ASSERT(!(p->p_flag & SVFORK));
494 
495 	/*
496 	 * Before we make any modifications, make sure we've imposed a barrier
497 	 * on the generation in which this probe was last modified.
498 	 */
499 	fasttrap_mod_barrier(probe->ftp_gen);
500 
501 	bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
502 
503 	/*
504 	 * If the tracepoint has already been enabled, just add our id to the
505 	 * list of interested probes. This may be our second time through
506 	 * this path in which case we'll have constructed the tracepoint we'd
507 	 * like to install. If we can't find a match, and have an allocated
508 	 * tracepoint ready to go, enable that one now.
509 	 *
510 	 * A tracepoint whose process is defunct is also considered defunct.
511 	 */
512 again:
513 	mutex_enter(&bucket->ftb_mtx);
514 	for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
515 		if (tp->ftt_pid != pid || tp->ftt_pc != pc ||
516 		    tp->ftt_proc->ftpc_defunct)
517 			continue;
518 
519 		/*
520 		 * Now that we've found a matching tracepoint, it would be
521 		 * a decent idea to confirm that the tracepoint is still
522 		 * enabled and the trap instruction hasn't been overwritten.
523 		 * Since this is a little hairy, we'll punt for now.
524 		 */
525 
526 		/*
527 		 * This can't be the first interested probe. We don't have
528 		 * to worry about another thread being in the midst of
529 		 * deleting this tracepoint (which would be the only valid
530 		 * reason for a tracepoint to have no interested probes)
531 		 * since we're holding P_PR_LOCK for this process.
532 		 */
533 		ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL);
534 
535 		switch (id->fti_ptype) {
536 		case DTFTP_ENTRY:
537 		case DTFTP_OFFSETS:
538 		case DTFTP_IS_ENABLED:
539 			id->fti_next = tp->ftt_ids;
540 			membar_producer();
541 			tp->ftt_ids = id;
542 			membar_producer();
543 			break;
544 
545 		case DTFTP_RETURN:
546 		case DTFTP_POST_OFFSETS:
547 			id->fti_next = tp->ftt_retids;
548 			membar_producer();
549 			tp->ftt_retids = id;
550 			membar_producer();
551 			break;
552 
553 		default:
554 			ASSERT(0);
555 		}
556 
557 		mutex_exit(&bucket->ftb_mtx);
558 
559 		if (new_tp != NULL) {
560 			new_tp->ftt_ids = NULL;
561 			new_tp->ftt_retids = NULL;
562 		}
563 
564 		return (0);
565 	}
566 
567 	/*
568 	 * If we have a good tracepoint ready to go, install it now while
569 	 * we have the lock held and no one can screw with us.
570 	 */
571 	if (new_tp != NULL) {
572 		int rc = 0;
573 
574 		new_tp->ftt_next = bucket->ftb_data;
575 		membar_producer();
576 		bucket->ftb_data = new_tp;
577 		membar_producer();
578 		mutex_exit(&bucket->ftb_mtx);
579 
580 		/*
581 		 * Activate the tracepoint in the ISA-specific manner.
582 		 * If this fails, we need to report the failure, but
583 		 * indicate that this tracepoint must still be disabled
584 		 * by calling fasttrap_tracepoint_disable().
585 		 */
586 		if (fasttrap_tracepoint_install(p, new_tp) != 0)
587 			rc = FASTTRAP_ENABLE_PARTIAL;
588 
589 		/*
590 		 * Increment the count of the number of tracepoints active in
591 		 * the victim process.
592 		 */
593 		ASSERT(p->p_proc_flag & P_PR_LOCK);
594 		p->p_dtrace_count++;
595 
596 		return (rc);
597 	}
598 
599 	mutex_exit(&bucket->ftb_mtx);
600 
601 	/*
602 	 * Initialize the tracepoint that's been preallocated with the probe.
603 	 */
604 	new_tp = probe->ftp_tps[index].fit_tp;
605 
606 	ASSERT(new_tp->ftt_pid == pid);
607 	ASSERT(new_tp->ftt_pc == pc);
608 	ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc);
609 	ASSERT(new_tp->ftt_ids == NULL);
610 	ASSERT(new_tp->ftt_retids == NULL);
611 
612 	switch (id->fti_ptype) {
613 	case DTFTP_ENTRY:
614 	case DTFTP_OFFSETS:
615 	case DTFTP_IS_ENABLED:
616 		id->fti_next = NULL;
617 		new_tp->ftt_ids = id;
618 		break;
619 
620 	case DTFTP_RETURN:
621 	case DTFTP_POST_OFFSETS:
622 		id->fti_next = NULL;
623 		new_tp->ftt_retids = id;
624 		break;
625 
626 	default:
627 		ASSERT(0);
628 	}
629 
630 	/*
631 	 * If the ISA-dependent initialization goes to plan, go back to the
632 	 * beginning and try to install this freshly made tracepoint.
633 	 */
634 	if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0)
635 		goto again;
636 
637 	new_tp->ftt_ids = NULL;
638 	new_tp->ftt_retids = NULL;
639 
640 	return (FASTTRAP_ENABLE_FAIL);
641 }
642 
643 static void
644 fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
645 {
646 	fasttrap_bucket_t *bucket;
647 	fasttrap_provider_t *provider = probe->ftp_prov;
648 	fasttrap_tracepoint_t **pp, *tp;
649 	fasttrap_id_t *id, **idp;
650 	pid_t pid;
651 	uintptr_t pc;
652 
653 	ASSERT(index < probe->ftp_ntps);
654 
655 	pid = probe->ftp_pid;
656 	pc = probe->ftp_tps[index].fit_tp->ftt_pc;
657 	id = &probe->ftp_tps[index].fit_id;
658 
659 	ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
660 
661 	/*
662 	 * Find the tracepoint and make sure that our id is one of the
663 	 * ones registered with it.
664 	 */
665 	bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
666 	mutex_enter(&bucket->ftb_mtx);
667 	for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
668 		if (tp->ftt_pid == pid && tp->ftt_pc == pc &&
669 		    tp->ftt_proc == provider->ftp_proc)
670 			break;
671 	}
672 
673 	/*
674 	 * If we somehow lost this tracepoint, we're in a world of hurt.
675 	 */
676 	ASSERT(tp != NULL);
677 
678 	switch (id->fti_ptype) {
679 	case DTFTP_ENTRY:
680 	case DTFTP_OFFSETS:
681 	case DTFTP_IS_ENABLED:
682 		ASSERT(tp->ftt_ids != NULL);
683 		idp = &tp->ftt_ids;
684 		break;
685 
686 	case DTFTP_RETURN:
687 	case DTFTP_POST_OFFSETS:
688 		ASSERT(tp->ftt_retids != NULL);
689 		idp = &tp->ftt_retids;
690 		break;
691 
692 	default:
693 		ASSERT(0);
694 	}
695 
696 	while ((*idp)->fti_probe != probe) {
697 		idp = &(*idp)->fti_next;
698 		ASSERT(*idp != NULL);
699 	}
700 
701 	id = *idp;
702 	*idp = id->fti_next;
703 	membar_producer();
704 
705 	ASSERT(id->fti_probe == probe);
706 
707 	/*
708 	 * If there are other registered enablings of this tracepoint, we're
709 	 * all done, but if this was the last probe assocated with this
710 	 * this tracepoint, we need to remove and free it.
711 	 */
712 	if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) {
713 
714 		/*
715 		 * If the current probe's tracepoint is in use, swap it
716 		 * for an unused tracepoint.
717 		 */
718 		if (tp == probe->ftp_tps[index].fit_tp) {
719 			fasttrap_probe_t *tmp_probe;
720 			fasttrap_tracepoint_t **tmp_tp;
721 			uint_t tmp_index;
722 
723 			if (tp->ftt_ids != NULL) {
724 				tmp_probe = tp->ftt_ids->fti_probe;
725 				tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids);
726 				tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
727 			} else {
728 				tmp_probe = tp->ftt_retids->fti_probe;
729 				tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids);
730 				tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
731 			}
732 
733 			ASSERT(*tmp_tp != NULL);
734 			ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp);
735 			ASSERT((*tmp_tp)->ftt_ids == NULL);
736 			ASSERT((*tmp_tp)->ftt_retids == NULL);
737 
738 			probe->ftp_tps[index].fit_tp = *tmp_tp;
739 			*tmp_tp = tp;
740 
741 		}
742 
743 		mutex_exit(&bucket->ftb_mtx);
744 
745 		/*
746 		 * Tag the modified probe with the generation in which it was
747 		 * changed.
748 		 */
749 		probe->ftp_gen = fasttrap_mod_gen;
750 		return;
751 	}
752 
753 	mutex_exit(&bucket->ftb_mtx);
754 
755 	/*
756 	 * We can't safely remove the tracepoint from the set of active
757 	 * tracepoints until we've actually removed the fasttrap instruction
758 	 * from the process's text. We can, however, operate on this
759 	 * tracepoint secure in the knowledge that no other thread is going to
760 	 * be looking at it since we hold P_PR_LOCK on the process if it's
761 	 * live or we hold the provider lock on the process if it's dead and
762 	 * gone.
763 	 */
764 
765 	/*
766 	 * We only need to remove the actual instruction if we're looking
767 	 * at an existing process
768 	 */
769 	if (p != NULL) {
770 		/*
771 		 * If we fail to restore the instruction we need to kill
772 		 * this process since it's in a completely unrecoverable
773 		 * state.
774 		 */
775 		if (fasttrap_tracepoint_remove(p, tp) != 0)
776 			fasttrap_sigtrap(p, NULL, pc);
777 
778 		/*
779 		 * Decrement the count of the number of tracepoints active
780 		 * in the victim process.
781 		 */
782 		ASSERT(p->p_proc_flag & P_PR_LOCK);
783 		p->p_dtrace_count--;
784 	}
785 
786 	/*
787 	 * Remove the probe from the hash table of active tracepoints.
788 	 */
789 	mutex_enter(&bucket->ftb_mtx);
790 	pp = (fasttrap_tracepoint_t **)&bucket->ftb_data;
791 	ASSERT(*pp != NULL);
792 	while (*pp != tp) {
793 		pp = &(*pp)->ftt_next;
794 		ASSERT(*pp != NULL);
795 	}
796 
797 	*pp = tp->ftt_next;
798 	membar_producer();
799 
800 	mutex_exit(&bucket->ftb_mtx);
801 
802 	/*
803 	 * Tag the modified probe with the generation in which it was changed.
804 	 */
805 	probe->ftp_gen = fasttrap_mod_gen;
806 }
807 
808 typedef int fasttrap_probe_f(struct regs *);
809 
810 static void
811 fasttrap_enable_common(int *count, fasttrap_probe_f **fptr, fasttrap_probe_f *f,
812     fasttrap_probe_f **fptr2, fasttrap_probe_f *f2)
813 {
814 	/*
815 	 * We don't have to play the rw lock game here because we're
816 	 * providing something rather than taking something away --
817 	 * we can be sure that no threads have tried to follow this
818 	 * function pointer yet.
819 	 */
820 	mutex_enter(&fasttrap_count_mtx);
821 	if (*count == 0) {
822 		ASSERT(*fptr == NULL);
823 		*fptr = f;
824 		if (fptr2 != NULL)
825 			*fptr2 = f2;
826 	}
827 	ASSERT(*fptr == f);
828 	ASSERT(fptr2 == NULL || *fptr2 == f2);
829 	(*count)++;
830 	mutex_exit(&fasttrap_count_mtx);
831 }
832 
833 static void
834 fasttrap_disable_common(int *count, fasttrap_probe_f **fptr,
835     fasttrap_probe_f **fptr2)
836 {
837 	ASSERT(MUTEX_HELD(&cpu_lock));
838 
839 	mutex_enter(&fasttrap_count_mtx);
840 	(*count)--;
841 	ASSERT(*count >= 0);
842 	if (*count == 0) {
843 		cpu_t *cur, *cpu = CPU;
844 
845 		for (cur = cpu->cpu_next_onln; cur != cpu;
846 			cur = cur->cpu_next_onln) {
847 			rw_enter(&cur->cpu_ft_lock, RW_WRITER);
848 		}
849 
850 		*fptr = NULL;
851 		if (fptr2 != NULL)
852 			*fptr2 = NULL;
853 
854 		for (cur = cpu->cpu_next_onln; cur != cpu;
855 			cur = cur->cpu_next_onln) {
856 			rw_exit(&cur->cpu_ft_lock);
857 		}
858 	}
859 	mutex_exit(&fasttrap_count_mtx);
860 }
861 
862 /*ARGSUSED*/
863 static void
864 fasttrap_enable(void *arg, dtrace_id_t id, void *parg)
865 {
866 	/*
867 	 * Enable the probe that corresponds to statically placed trace
868 	 * points which have not explicitly been placed in the process's text
869 	 * by the fasttrap provider.
870 	 */
871 	ASSERT(arg == NULL);
872 	ASSERT(id == fasttrap_probe_id);
873 
874 	fasttrap_enable_common(&fasttrap_count,
875 	    &dtrace_fasttrap_probe_ptr, fasttrap_probe, NULL, NULL);
876 }
877 
878 
879 /*ARGSUSED*/
880 static void
881 fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg)
882 {
883 	fasttrap_probe_t *probe = parg;
884 	proc_t *p;
885 	int i, rc;
886 
887 	ASSERT(probe != NULL);
888 	ASSERT(!probe->ftp_enabled);
889 	ASSERT(id == probe->ftp_id);
890 	ASSERT(MUTEX_HELD(&cpu_lock));
891 
892 	/*
893 	 * Increment the count of enabled probes on this probe's provider;
894 	 * the provider can't go away while the probe still exists. We
895 	 * must increment this even if we aren't able to properly enable
896 	 * this probe.
897 	 */
898 	mutex_enter(&probe->ftp_prov->ftp_mtx);
899 	probe->ftp_prov->ftp_rcount++;
900 	mutex_exit(&probe->ftp_prov->ftp_mtx);
901 
902 	/*
903 	 * If this probe's provider is retired (meaning it was valid in a
904 	 * previously exec'ed incarnation of this address space), bail out. The
905 	 * provider can't go away while we're in this code path.
906 	 */
907 	if (probe->ftp_prov->ftp_retired)
908 		return;
909 
910 	/*
911 	 * If we can't find the process, it may be that we're in the context of
912 	 * a fork in which the traced process is being born and we're copying
913 	 * USDT probes. Otherwise, the process is gone so bail.
914 	 */
915 	if ((p = sprlock(probe->ftp_pid)) == NULL) {
916 		if ((curproc->p_flag & SFORKING) == 0)
917 			return;
918 
919 		mutex_enter(&pidlock);
920 		p = prfind(probe->ftp_pid);
921 
922 		/*
923 		 * Confirm that curproc is indeed forking the process in which
924 		 * we're trying to enable probes.
925 		 */
926 		ASSERT(p != NULL);
927 		ASSERT(p->p_parent == curproc);
928 		ASSERT(p->p_stat == SIDL);
929 
930 		mutex_enter(&p->p_lock);
931 		mutex_exit(&pidlock);
932 
933 		sprlock_proc(p);
934 	}
935 
936 	ASSERT(!(p->p_flag & SVFORK));
937 	mutex_exit(&p->p_lock);
938 
939 	/*
940 	 * We have to enable the trap entry before any user threads have
941 	 * the chance to execute the trap instruction we're about to place
942 	 * in their process's text.
943 	 */
944 	fasttrap_enable_common(&fasttrap_pid_count,
945 	    &dtrace_pid_probe_ptr, fasttrap_pid_probe,
946 	    &dtrace_return_probe_ptr, fasttrap_return_probe);
947 
948 	/*
949 	 * Enable all the tracepoints and add this probe's id to each
950 	 * tracepoint's list of active probes.
951 	 */
952 	for (i = 0; i < probe->ftp_ntps; i++) {
953 		if ((rc = fasttrap_tracepoint_enable(p, probe, i)) != 0) {
954 			/*
955 			 * If enabling the tracepoint failed completely,
956 			 * we don't have to disable it; if the failure
957 			 * was only partial we must disable it.
958 			 */
959 			if (rc == FASTTRAP_ENABLE_FAIL)
960 				i--;
961 			else
962 				ASSERT(rc == FASTTRAP_ENABLE_PARTIAL);
963 
964 			/*
965 			 * Back up and pull out all the tracepoints we've
966 			 * created so far for this probe.
967 			 */
968 			while (i >= 0) {
969 				fasttrap_tracepoint_disable(p, probe, i);
970 				i--;
971 			}
972 
973 			mutex_enter(&p->p_lock);
974 			sprunlock(p);
975 
976 			/*
977 			 * Since we're not actually enabling this probe,
978 			 * drop our reference on the trap table entry.
979 			 */
980 			fasttrap_disable_common(&fasttrap_pid_count,
981 			    &dtrace_pid_probe_ptr, &dtrace_return_probe_ptr);
982 			return;
983 		}
984 	}
985 
986 	mutex_enter(&p->p_lock);
987 	sprunlock(p);
988 
989 	probe->ftp_enabled = 1;
990 }
991 
992 
993 /*ARGSUSED*/
994 static void
995 fasttrap_disable(void *arg, dtrace_id_t id, void *parg)
996 {
997 	/*
998 	 * Disable the probe the corresponds to statically placed trace
999 	 * points.
1000 	 */
1001 	ASSERT(arg == NULL);
1002 	ASSERT(id == fasttrap_probe_id);
1003 	ASSERT(MUTEX_HELD(&cpu_lock));
1004 	fasttrap_disable_common(&fasttrap_count, &dtrace_fasttrap_probe_ptr,
1005 	    NULL);
1006 }
1007 
1008 /*ARGSUSED*/
1009 static void
1010 fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg)
1011 {
1012 	fasttrap_probe_t *probe = parg;
1013 	fasttrap_provider_t *provider = probe->ftp_prov;
1014 	proc_t *p;
1015 	int i, whack = 0;
1016 
1017 	if (!probe->ftp_enabled) {
1018 		mutex_enter(&provider->ftp_mtx);
1019 		provider->ftp_rcount--;
1020 		ASSERT(provider->ftp_rcount >= 0);
1021 		mutex_exit(&provider->ftp_mtx);
1022 		return;
1023 	}
1024 
1025 	ASSERT(id == probe->ftp_id);
1026 
1027 	/*
1028 	 * We won't be able to acquire a /proc-esque lock on the process
1029 	 * iff the process is dead and gone. In this case, we rely on the
1030 	 * provider lock as a point of mutual exclusion to prevent other
1031 	 * DTrace consumers from disabling this probe.
1032 	 */
1033 	if ((p = sprlock(probe->ftp_pid)) != NULL) {
1034 		ASSERT(!(p->p_flag & SVFORK));
1035 		mutex_exit(&p->p_lock);
1036 	}
1037 
1038 	mutex_enter(&provider->ftp_mtx);
1039 
1040 	/*
1041 	 * Disable all the associated tracepoints.
1042 	 */
1043 	for (i = 0; i < probe->ftp_ntps; i++) {
1044 		fasttrap_tracepoint_disable(p, probe, i);
1045 	}
1046 
1047 	ASSERT(provider->ftp_rcount > 0);
1048 	provider->ftp_rcount--;
1049 
1050 	if (p != NULL) {
1051 		/*
1052 		 * Even though we may not be able to remove it entirely, we
1053 		 * mark this retired provider to get a chance to remove some
1054 		 * of the associated probes.
1055 		 */
1056 		if (provider->ftp_retired && !provider->ftp_marked)
1057 			whack = provider->ftp_marked = 1;
1058 		mutex_exit(&provider->ftp_mtx);
1059 
1060 		mutex_enter(&p->p_lock);
1061 		sprunlock(p);
1062 	} else {
1063 		/*
1064 		 * If the process is dead, we're just waiting for the
1065 		 * last probe to be disabled to be able to free it.
1066 		 */
1067 		if (provider->ftp_rcount == 0 && !provider->ftp_marked)
1068 			whack = provider->ftp_marked = 1;
1069 		mutex_exit(&provider->ftp_mtx);
1070 	}
1071 
1072 	if (whack)
1073 		fasttrap_pid_cleanup();
1074 
1075 	probe->ftp_enabled = 0;
1076 
1077 	ASSERT(MUTEX_HELD(&cpu_lock));
1078 	fasttrap_disable_common(&fasttrap_pid_count, &dtrace_pid_probe_ptr,
1079 	    &dtrace_return_probe_ptr);
1080 }
1081 
1082 /*ARGSUSED*/
1083 static void
1084 fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg,
1085     dtrace_argdesc_t *desc)
1086 {
1087 	fasttrap_probe_t *probe = parg;
1088 	char *str;
1089 	int i;
1090 
1091 	desc->dtargd_native[0] = '\0';
1092 	desc->dtargd_xlate[0] = '\0';
1093 
1094 	if (probe->ftp_prov->ftp_retired != 0 ||
1095 	    desc->dtargd_ndx >= probe->ftp_nargs) {
1096 		desc->dtargd_ndx = DTRACE_ARGNONE;
1097 		return;
1098 	}
1099 
1100 	/*
1101 	 * We only need to set this member if the argument is remapped.
1102 	 */
1103 	if (probe->ftp_argmap != NULL)
1104 		desc->dtargd_mapping = probe->ftp_argmap[desc->dtargd_ndx];
1105 
1106 	str = probe->ftp_ntypes;
1107 	for (i = 0; i < desc->dtargd_mapping; i++) {
1108 		str += strlen(str) + 1;
1109 	}
1110 
1111 	ASSERT(strlen(str + 1) < sizeof (desc->dtargd_native));
1112 	(void) strcpy(desc->dtargd_native, str);
1113 
1114 	if (probe->ftp_xtypes == NULL)
1115 		return;
1116 
1117 	str = probe->ftp_xtypes;
1118 	for (i = 0; i < desc->dtargd_ndx; i++) {
1119 		str += strlen(str) + 1;
1120 	}
1121 
1122 	ASSERT(strlen(str + 1) < sizeof (desc->dtargd_xlate));
1123 	(void) strcpy(desc->dtargd_xlate, str);
1124 }
1125 
1126 /*ARGSUSED*/
1127 static void
1128 fasttrap_destroy(void *arg, dtrace_id_t id, void *parg)
1129 {
1130 	ASSERT(arg == NULL);
1131 	ASSERT(id == fasttrap_probe_id);
1132 }
1133 
1134 /*ARGSUSED*/
1135 static void
1136 fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg)
1137 {
1138 	fasttrap_probe_t *probe = parg;
1139 	int i;
1140 	size_t size;
1141 
1142 	ASSERT(probe != NULL);
1143 	ASSERT(!probe->ftp_enabled);
1144 	ASSERT(fasttrap_total >= probe->ftp_ntps);
1145 
1146 	atomic_add_32(&fasttrap_total, -probe->ftp_ntps);
1147 	size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
1148 
1149 	if (probe->ftp_gen + 1 >= fasttrap_mod_gen)
1150 		fasttrap_mod_barrier(probe->ftp_gen);
1151 
1152 	for (i = 0; i < probe->ftp_ntps; i++) {
1153 		kmem_free(probe->ftp_tps[i].fit_tp,
1154 		    sizeof (fasttrap_tracepoint_t));
1155 	}
1156 
1157 	kmem_free(probe, size);
1158 }
1159 
1160 
1161 static const dtrace_pattr_t fasttrap_attr = {
1162 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1163 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1164 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1165 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1166 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1167 };
1168 
1169 static dtrace_pops_t fasttrap_pops = {
1170 	fasttrap_provide,
1171 	NULL,
1172 	fasttrap_enable,
1173 	fasttrap_disable,
1174 	NULL,
1175 	NULL,
1176 	NULL,
1177 	fasttrap_getarg,
1178 	NULL,
1179 	fasttrap_destroy
1180 };
1181 
1182 static const dtrace_pattr_t pid_attr = {
1183 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1184 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1185 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1186 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1187 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1188 };
1189 
1190 static dtrace_pops_t pid_pops = {
1191 	fasttrap_pid_provide,
1192 	NULL,
1193 	fasttrap_pid_enable,
1194 	fasttrap_pid_disable,
1195 	NULL,
1196 	NULL,
1197 	fasttrap_pid_getargdesc,
1198 	fasttrap_getarg,
1199 	NULL,
1200 	fasttrap_pid_destroy
1201 };
1202 
1203 static dtrace_pops_t usdt_pops = {
1204 	fasttrap_pid_provide,
1205 	NULL,
1206 	fasttrap_pid_enable,
1207 	fasttrap_pid_disable,
1208 	NULL,
1209 	NULL,
1210 	fasttrap_pid_getargdesc,
1211 	fasttrap_usdt_getarg,
1212 	NULL,
1213 	fasttrap_pid_destroy
1214 };
1215 
1216 static fasttrap_proc_t *
1217 fasttrap_proc_lookup(pid_t pid)
1218 {
1219 	fasttrap_bucket_t *bucket;
1220 	fasttrap_proc_t *fprc, *new_fprc;
1221 
1222 	bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1223 	mutex_enter(&bucket->ftb_mtx);
1224 
1225 	for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1226 		if (fprc->ftpc_pid == pid && !fprc->ftpc_defunct) {
1227 			mutex_enter(&fprc->ftpc_mtx);
1228 			mutex_exit(&bucket->ftb_mtx);
1229 			fprc->ftpc_count++;
1230 			mutex_exit(&fprc->ftpc_mtx);
1231 
1232 			return (fprc);
1233 		}
1234 	}
1235 
1236 	/*
1237 	 * Drop the bucket lock so we don't try to perform a sleeping
1238 	 * allocation under it.
1239 	 */
1240 	mutex_exit(&bucket->ftb_mtx);
1241 
1242 	new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP);
1243 	new_fprc->ftpc_pid = pid;
1244 	new_fprc->ftpc_count = 1;
1245 
1246 	mutex_enter(&bucket->ftb_mtx);
1247 
1248 	/*
1249 	 * Take another lap through the list to make sure a proc hasn't
1250 	 * been created for this pid while we weren't under the bucket lock.
1251 	 */
1252 	for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1253 		if (fprc->ftpc_pid == pid && !fprc->ftpc_defunct) {
1254 			mutex_enter(&fprc->ftpc_mtx);
1255 			mutex_exit(&bucket->ftb_mtx);
1256 			fprc->ftpc_count++;
1257 			mutex_exit(&fprc->ftpc_mtx);
1258 
1259 			kmem_free(new_fprc, sizeof (fasttrap_proc_t));
1260 
1261 			return (fprc);
1262 		}
1263 	}
1264 
1265 	new_fprc->ftpc_next = bucket->ftb_data;
1266 	bucket->ftb_data = new_fprc;
1267 
1268 	mutex_exit(&bucket->ftb_mtx);
1269 
1270 	return (new_fprc);
1271 }
1272 
1273 static void
1274 fasttrap_proc_release(fasttrap_proc_t *proc)
1275 {
1276 	fasttrap_bucket_t *bucket;
1277 	fasttrap_proc_t *fprc, **fprcp;
1278 	pid_t pid = proc->ftpc_pid;
1279 
1280 	mutex_enter(&proc->ftpc_mtx);
1281 
1282 	ASSERT(proc->ftpc_count != 0);
1283 
1284 	if (--proc->ftpc_count != 0) {
1285 		mutex_exit(&proc->ftpc_mtx);
1286 		return;
1287 	}
1288 
1289 	mutex_exit(&proc->ftpc_mtx);
1290 
1291 	bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1292 	mutex_enter(&bucket->ftb_mtx);
1293 
1294 	fprcp = (fasttrap_proc_t **)&bucket->ftb_data;
1295 	while ((fprc = *fprcp) != NULL) {
1296 		if (fprc == proc)
1297 			break;
1298 
1299 		fprcp = &fprc->ftpc_next;
1300 	}
1301 
1302 	/*
1303 	 * Something strange has happened if we can't find the proc.
1304 	 */
1305 	ASSERT(fprc != NULL);
1306 
1307 	*fprcp = fprc->ftpc_next;
1308 
1309 	mutex_exit(&bucket->ftb_mtx);
1310 
1311 	kmem_free(fprc, sizeof (fasttrap_proc_t));
1312 }
1313 
1314 /*
1315  * Lookup a fasttrap-managed provider based on its name and associated pid.
1316  * If the pattr argument is non-NULL, this function instantiates the provider
1317  * if it doesn't exist otherwise it returns NULL. The provider is returned
1318  * with its lock held.
1319  */
1320 static fasttrap_provider_t *
1321 fasttrap_provider_lookup(pid_t pid, const char *name,
1322     const dtrace_pattr_t *pattr)
1323 {
1324 	fasttrap_provider_t *fp, *new_fp = NULL;
1325 	fasttrap_bucket_t *bucket;
1326 	char provname[DTRACE_PROVNAMELEN];
1327 	proc_t *p;
1328 	cred_t *cred;
1329 
1330 	ASSERT(strlen(name) < sizeof (fp->ftp_name));
1331 	ASSERT(pattr != NULL);
1332 
1333 	bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1334 	mutex_enter(&bucket->ftb_mtx);
1335 
1336 	/*
1337 	 * Take a lap through the list and return the match if we find it.
1338 	 */
1339 	for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1340 		if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1341 		    !fp->ftp_retired) {
1342 			mutex_enter(&fp->ftp_mtx);
1343 			mutex_exit(&bucket->ftb_mtx);
1344 			return (fp);
1345 		}
1346 	}
1347 
1348 	/*
1349 	 * Drop the bucket lock so we don't try to perform a sleeping
1350 	 * allocation under it.
1351 	 */
1352 	mutex_exit(&bucket->ftb_mtx);
1353 
1354 	/*
1355 	 * Make sure the process exists, isn't a child created as the result
1356 	 * of a vfork(2), and isn't a zombie (but may be in fork).
1357 	 */
1358 	mutex_enter(&pidlock);
1359 	if ((p = prfind(pid)) == NULL) {
1360 		mutex_exit(&pidlock);
1361 		return (NULL);
1362 	}
1363 	mutex_enter(&p->p_lock);
1364 	mutex_exit(&pidlock);
1365 	if (p->p_flag & (SVFORK | SEXITING)) {
1366 		mutex_exit(&p->p_lock);
1367 		return (NULL);
1368 	}
1369 
1370 	/*
1371 	 * Increment p_dtrace_probes so that the process knows to inform us
1372 	 * when it exits or execs. fasttrap_provider_free() decrements this
1373 	 * when we're done with this provider.
1374 	 */
1375 	p->p_dtrace_probes++;
1376 
1377 	/*
1378 	 * Grab the credentials for this process so we have
1379 	 * something to pass to dtrace_register().
1380 	 */
1381 	mutex_enter(&p->p_crlock);
1382 	crhold(p->p_cred);
1383 	cred = p->p_cred;
1384 	mutex_exit(&p->p_crlock);
1385 	mutex_exit(&p->p_lock);
1386 
1387 	new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP);
1388 	new_fp->ftp_pid = pid;
1389 	new_fp->ftp_proc = fasttrap_proc_lookup(pid);
1390 
1391 	ASSERT(new_fp->ftp_proc != NULL);
1392 
1393 	mutex_enter(&bucket->ftb_mtx);
1394 
1395 	/*
1396 	 * Take another lap through the list to make sure a provider hasn't
1397 	 * been created for this pid while we weren't under the bucket lock.
1398 	 */
1399 	for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1400 		if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1401 		    !fp->ftp_retired) {
1402 			mutex_enter(&fp->ftp_mtx);
1403 			mutex_exit(&bucket->ftb_mtx);
1404 			fasttrap_provider_free(new_fp);
1405 			crfree(cred);
1406 			return (fp);
1407 		}
1408 	}
1409 
1410 	(void) strcpy(new_fp->ftp_name, name);
1411 
1412 	/*
1413 	 * Fail and return NULL if either the provider name is too long
1414 	 * or we fail to register this new provider with the DTrace
1415 	 * framework. Note that this is the only place we ever construct
1416 	 * the full provider name -- we keep it in pieces in the provider
1417 	 * structure.
1418 	 */
1419 	if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >=
1420 	    sizeof (provname) ||
1421 	    dtrace_register(provname, pattr,
1422 	    DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred,
1423 	    pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp,
1424 	    &new_fp->ftp_provid) != 0) {
1425 		mutex_exit(&bucket->ftb_mtx);
1426 		fasttrap_provider_free(new_fp);
1427 		crfree(cred);
1428 		return (NULL);
1429 	}
1430 
1431 	new_fp->ftp_next = bucket->ftb_data;
1432 	bucket->ftb_data = new_fp;
1433 
1434 	mutex_enter(&new_fp->ftp_mtx);
1435 	mutex_exit(&bucket->ftb_mtx);
1436 
1437 	crfree(cred);
1438 	return (new_fp);
1439 }
1440 
1441 static void
1442 fasttrap_provider_free(fasttrap_provider_t *provider)
1443 {
1444 	pid_t pid = provider->ftp_pid;
1445 	proc_t *p;
1446 
1447 	/*
1448 	 * There need to be no associated enabled probes, no consumers
1449 	 * creating probes, and no meta providers referencing this provider.
1450 	 */
1451 	ASSERT(provider->ftp_rcount == 0);
1452 	ASSERT(provider->ftp_ccount == 0);
1453 	ASSERT(provider->ftp_mcount == 0);
1454 
1455 	fasttrap_proc_release(provider->ftp_proc);
1456 
1457 	kmem_free(provider, sizeof (fasttrap_provider_t));
1458 
1459 	/*
1460 	 * Decrement p_dtrace_probes on the process whose provider we're
1461 	 * freeing. We don't have to worry about clobbering somone else's
1462 	 * modifications to it because we have locked the bucket that
1463 	 * corresponds to this process's hash chain in the provider hash
1464 	 * table. Don't sweat it if we can't find the process.
1465 	 */
1466 	mutex_enter(&pidlock);
1467 	if ((p = prfind(pid)) == NULL) {
1468 		mutex_exit(&pidlock);
1469 		return;
1470 	}
1471 
1472 	mutex_enter(&p->p_lock);
1473 	mutex_exit(&pidlock);
1474 
1475 	p->p_dtrace_probes--;
1476 	mutex_exit(&p->p_lock);
1477 }
1478 
1479 static void
1480 fasttrap_provider_retire(pid_t pid, const char *name, int mprov)
1481 {
1482 	fasttrap_provider_t *fp;
1483 	fasttrap_bucket_t *bucket;
1484 	dtrace_provider_id_t provid;
1485 
1486 	ASSERT(strlen(name) < sizeof (fp->ftp_name));
1487 
1488 	bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1489 	mutex_enter(&bucket->ftb_mtx);
1490 
1491 	for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1492 		if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 &&
1493 		    !fp->ftp_retired)
1494 			break;
1495 	}
1496 
1497 	if (fp == NULL) {
1498 		mutex_exit(&bucket->ftb_mtx);
1499 		return;
1500 	}
1501 
1502 	mutex_enter(&fp->ftp_mtx);
1503 	ASSERT(!mprov || fp->ftp_mcount > 0);
1504 	if (mprov && --fp->ftp_mcount != 0)  {
1505 		mutex_exit(&fp->ftp_mtx);
1506 		mutex_exit(&bucket->ftb_mtx);
1507 		return;
1508 	}
1509 
1510 	/*
1511 	 * Mark the provider to be removed in our post-processing step,
1512 	 * mark it retired, and mark its proc as defunct (though it may
1513 	 * already be marked defunct by another provider that shares the
1514 	 * same proc). Marking it indicates that we should try to remove it;
1515 	 * setting the retired flag indicates that we're done with this
1516 	 * provider; setting the proc to be defunct indicates that all
1517 	 * tracepoints associated with the traced process should be ignored.
1518 	 *
1519 	 * We obviously need to take the bucket lock before the provider lock
1520 	 * to perform the lookup, but we need to drop the provider lock
1521 	 * before calling into the DTrace framework since we acquire the
1522 	 * provider lock in callbacks invoked from the DTrace framework. The
1523 	 * bucket lock therefore protects the integrity of the provider hash
1524 	 * table.
1525 	 */
1526 	fp->ftp_proc->ftpc_defunct = 1;
1527 	fp->ftp_retired = 1;
1528 	fp->ftp_marked = 1;
1529 	provid = fp->ftp_provid;
1530 	mutex_exit(&fp->ftp_mtx);
1531 
1532 	/*
1533 	 * We don't have to worry about invalidating the same provider twice
1534 	 * since fasttrap_provider_lookup() will ignore provider that have
1535 	 * been marked as retired.
1536 	 */
1537 	dtrace_invalidate(provid);
1538 
1539 	mutex_exit(&bucket->ftb_mtx);
1540 
1541 	fasttrap_pid_cleanup();
1542 }
1543 
1544 static int
1545 fasttrap_add_probe(fasttrap_probe_spec_t *pdata)
1546 {
1547 	fasttrap_provider_t *provider;
1548 	fasttrap_probe_t *pp;
1549 	fasttrap_tracepoint_t *tp;
1550 	char *name;
1551 	int i, aframes, whack;
1552 
1553 	switch (pdata->ftps_type) {
1554 	case DTFTP_ENTRY:
1555 		name = "entry";
1556 		aframes = FASTTRAP_ENTRY_AFRAMES;
1557 		break;
1558 	case DTFTP_RETURN:
1559 		name = "return";
1560 		aframes = FASTTRAP_RETURN_AFRAMES;
1561 		break;
1562 	case DTFTP_OFFSETS:
1563 		name = NULL;
1564 		break;
1565 	default:
1566 		return (EINVAL);
1567 	}
1568 
1569 	if ((provider = fasttrap_provider_lookup(pdata->ftps_pid,
1570 	    FASTTRAP_PID_NAME, &pid_attr)) == NULL)
1571 		return (ESRCH);
1572 
1573 	/*
1574 	 * Increment this reference count to indicate that a consumer is
1575 	 * actively adding a new probe associated with this provider.
1576 	 */
1577 	provider->ftp_ccount++;
1578 	mutex_exit(&provider->ftp_mtx);
1579 
1580 	if (name != NULL) {
1581 		if (dtrace_probe_lookup(provider->ftp_provid,
1582 		    pdata->ftps_mod, pdata->ftps_func, name) != 0)
1583 			goto done;
1584 
1585 		atomic_add_32(&fasttrap_total, pdata->ftps_noffs);
1586 
1587 		if (fasttrap_total > fasttrap_max) {
1588 			atomic_add_32(&fasttrap_total, -pdata->ftps_noffs);
1589 			goto no_mem;
1590 		}
1591 
1592 		ASSERT(pdata->ftps_noffs > 0);
1593 		pp = kmem_zalloc(offsetof(fasttrap_probe_t,
1594 		    ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
1595 
1596 		pp->ftp_prov = provider;
1597 		pp->ftp_faddr = pdata->ftps_pc;
1598 		pp->ftp_fsize = pdata->ftps_size;
1599 		pp->ftp_pid = pdata->ftps_pid;
1600 		pp->ftp_ntps = pdata->ftps_noffs;
1601 
1602 		for (i = 0; i < pdata->ftps_noffs; i++) {
1603 			tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t),
1604 			    KM_SLEEP);
1605 
1606 			tp->ftt_proc = provider->ftp_proc;
1607 			tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1608 			tp->ftt_pid = pdata->ftps_pid;
1609 
1610 			pp->ftp_tps[i].fit_tp = tp;
1611 			pp->ftp_tps[i].fit_id.fti_probe = pp;
1612 			pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_type;
1613 		}
1614 
1615 		pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1616 		    pdata->ftps_mod, pdata->ftps_func, name, aframes, pp);
1617 	} else {
1618 		for (i = 0; i < pdata->ftps_noffs; i++) {
1619 			char name_str[17];
1620 
1621 			(void) sprintf(name_str, "%llx",
1622 			    (unsigned long long)pdata->ftps_offs[i]);
1623 
1624 			if (dtrace_probe_lookup(provider->ftp_provid,
1625 			    pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
1626 				continue;
1627 
1628 			atomic_add_32(&fasttrap_total, 1);
1629 
1630 			if (fasttrap_total > fasttrap_max) {
1631 				atomic_add_32(&fasttrap_total, -1);
1632 				goto no_mem;
1633 			}
1634 
1635 			pp = kmem_zalloc(sizeof (fasttrap_probe_t), KM_SLEEP);
1636 
1637 			pp->ftp_prov = provider;
1638 			pp->ftp_faddr = pdata->ftps_pc;
1639 			pp->ftp_fsize = pdata->ftps_size;
1640 			pp->ftp_pid = pdata->ftps_pid;
1641 			pp->ftp_ntps = 1;
1642 
1643 			tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t),
1644 			    KM_SLEEP);
1645 
1646 			tp->ftt_proc = provider->ftp_proc;
1647 			tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1648 			tp->ftt_pid = pdata->ftps_pid;
1649 
1650 			pp->ftp_tps[0].fit_tp = tp;
1651 			pp->ftp_tps[0].fit_id.fti_probe = pp;
1652 			pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_type;
1653 
1654 			pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1655 			    pdata->ftps_mod, pdata->ftps_func, name_str,
1656 			    FASTTRAP_OFFSET_AFRAMES, pp);
1657 		}
1658 	}
1659 
1660 done:
1661 	/*
1662 	 * We know that the provider is still valid since we incremented the
1663 	 * reference count. If someone tried to free this provider while we
1664 	 * were using it (e.g. because the process called exec(2) or exit(2)),
1665 	 * take note of that and try to free it now.
1666 	 */
1667 	mutex_enter(&provider->ftp_mtx);
1668 	provider->ftp_ccount--;
1669 	whack = provider->ftp_retired;
1670 	mutex_exit(&provider->ftp_mtx);
1671 
1672 	if (whack)
1673 		fasttrap_pid_cleanup();
1674 
1675 	return (0);
1676 
1677 no_mem:
1678 	/*
1679 	 * If we've exhausted the allowable resources, we'll try to remove
1680 	 * this provider to free some up. This is to cover the case where
1681 	 * the user has accidentally created many more probes than was
1682 	 * intended (e.g. pid123:::).
1683 	 */
1684 	mutex_enter(&provider->ftp_mtx);
1685 	provider->ftp_ccount--;
1686 	provider->ftp_marked = 1;
1687 	mutex_exit(&provider->ftp_mtx);
1688 
1689 	fasttrap_pid_cleanup();
1690 
1691 	return (ENOMEM);
1692 }
1693 
1694 /*ARGSUSED*/
1695 static void *
1696 fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
1697 {
1698 	fasttrap_provider_t *provider;
1699 
1700 	/*
1701 	 * A 32-bit unsigned integer (like a pid for example) can be
1702 	 * expressed in 10 or fewer decimal digits. Make sure that we'll
1703 	 * have enough space for the provider name.
1704 	 */
1705 	if (strlen(dhpv->dthpv_provname) + 10 >=
1706 	    sizeof (provider->ftp_name)) {
1707 		cmn_err(CE_WARN, "failed to instantiate provider %s: "
1708 		    "name too long to accomodate pid", dhpv->dthpv_provname);
1709 		return (NULL);
1710 	}
1711 
1712 	/*
1713 	 * Don't let folks spoof the true pid provider.
1714 	 */
1715 	if (strcmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME) == 0) {
1716 		cmn_err(CE_WARN, "failed to instantiate provider %s: "
1717 		    "%s is an invalid name", dhpv->dthpv_provname,
1718 		    FASTTRAP_PID_NAME);
1719 		return (NULL);
1720 	}
1721 
1722 	/*
1723 	 * The highest stability class that fasttrap supports is ISA; cap
1724 	 * the stability of the new provider accordingly.
1725 	 */
1726 	if (dhpv->dthpv_pattr.dtpa_provider.dtat_class >= DTRACE_CLASS_COMMON)
1727 		dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA;
1728 	if (dhpv->dthpv_pattr.dtpa_mod.dtat_class >= DTRACE_CLASS_COMMON)
1729 		dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA;
1730 	if (dhpv->dthpv_pattr.dtpa_func.dtat_class >= DTRACE_CLASS_COMMON)
1731 		dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA;
1732 	if (dhpv->dthpv_pattr.dtpa_name.dtat_class >= DTRACE_CLASS_COMMON)
1733 		dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA;
1734 	if (dhpv->dthpv_pattr.dtpa_args.dtat_class >= DTRACE_CLASS_COMMON)
1735 		dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA;
1736 
1737 	if ((provider = fasttrap_provider_lookup(pid, dhpv->dthpv_provname,
1738 	    &dhpv->dthpv_pattr)) == NULL) {
1739 		cmn_err(CE_WARN, "failed to instantiate provider %s for "
1740 		    "process %u",  dhpv->dthpv_provname, (uint_t)pid);
1741 		return (NULL);
1742 	}
1743 
1744 	/*
1745 	 * Up the meta provider count so this provider isn't removed until
1746 	 * the meta provider has been told to remove it.
1747 	 */
1748 	provider->ftp_mcount++;
1749 
1750 	mutex_exit(&provider->ftp_mtx);
1751 
1752 	return (provider);
1753 }
1754 
1755 /*ARGSUSED*/
1756 static void
1757 fasttrap_meta_create_probe(void *arg, void *parg,
1758     dtrace_helper_probedesc_t *dhpb)
1759 {
1760 	fasttrap_provider_t *provider = parg;
1761 	fasttrap_probe_t *pp;
1762 	fasttrap_tracepoint_t *tp;
1763 	int i, j;
1764 	uint32_t ntps;
1765 
1766 	mutex_enter(&provider->ftp_mtx);
1767 
1768 	if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod,
1769 	    dhpb->dthpb_func, dhpb->dthpb_name) != 0) {
1770 		mutex_exit(&provider->ftp_mtx);
1771 		return;
1772 	}
1773 
1774 	ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs;
1775 	ASSERT(ntps > 0);
1776 
1777 	atomic_add_32(&fasttrap_total, ntps);
1778 
1779 	if (fasttrap_total > fasttrap_max) {
1780 		atomic_add_32(&fasttrap_total, -ntps);
1781 		mutex_exit(&provider->ftp_mtx);
1782 		return;
1783 	}
1784 
1785 	pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
1786 
1787 	pp->ftp_prov = provider;
1788 	pp->ftp_pid = provider->ftp_pid;
1789 	pp->ftp_ntps = ntps;
1790 	pp->ftp_nargs = dhpb->dthpb_xargc;
1791 	pp->ftp_xtypes = dhpb->dthpb_xtypes;
1792 	pp->ftp_ntypes = dhpb->dthpb_ntypes;
1793 
1794 	/*
1795 	 * First create a tracepoint for each actual point of interest.
1796 	 */
1797 	for (i = 0; i < dhpb->dthpb_noffs; i++) {
1798 		tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
1799 
1800 		tp->ftt_proc = provider->ftp_proc;
1801 		tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_offs[i];
1802 		tp->ftt_pid = provider->ftp_pid;
1803 
1804 		pp->ftp_tps[i].fit_tp = tp;
1805 		pp->ftp_tps[i].fit_id.fti_probe = pp;
1806 #ifdef __sparc
1807 		pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_POST_OFFSETS;
1808 #else
1809 		pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS;
1810 #endif
1811 	}
1812 
1813 	/*
1814 	 * Then create a tracepoint for each is-enabled point.
1815 	 */
1816 	for (j = 0; i < ntps; i++, j++) {
1817 		tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP);
1818 
1819 		tp->ftt_proc = provider->ftp_proc;
1820 		tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_enoffs[j];
1821 		tp->ftt_pid = provider->ftp_pid;
1822 
1823 		pp->ftp_tps[i].fit_tp = tp;
1824 		pp->ftp_tps[i].fit_id.fti_probe = pp;
1825 		pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED;
1826 	}
1827 
1828 	/*
1829 	 * If the arguments are shuffled around we set the argument remapping
1830 	 * table. Later, when the probe fires, we only remap the arguments
1831 	 * if the table is non-NULL.
1832 	 */
1833 	for (i = 0; i < dhpb->dthpb_xargc; i++) {
1834 		if (dhpb->dthpb_args[i] != i) {
1835 			pp->ftp_argmap = dhpb->dthpb_args;
1836 			break;
1837 		}
1838 	}
1839 
1840 	/*
1841 	 * The probe is fully constructed -- register it with DTrace.
1842 	 */
1843 	pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod,
1844 	    dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp);
1845 
1846 	mutex_exit(&provider->ftp_mtx);
1847 }
1848 
1849 /*ARGSUSED*/
1850 static void
1851 fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
1852 {
1853 	/*
1854 	 * Clean up the USDT provider. There may be active consumers of the
1855 	 * provider busy adding probes, no damage will actually befall the
1856 	 * provider until that count has dropped to zero. This just puts
1857 	 * the provider on death row.
1858 	 */
1859 	fasttrap_provider_retire(pid, dhpv->dthpv_provname, 1);
1860 }
1861 
1862 static dtrace_mops_t fasttrap_mops = {
1863 	fasttrap_meta_create_probe,
1864 	fasttrap_meta_provide,
1865 	fasttrap_meta_remove
1866 };
1867 
1868 /*ARGSUSED*/
1869 static int
1870 fasttrap_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
1871 {
1872 	return (0);
1873 }
1874 
1875 /*ARGSUSED*/
1876 static int
1877 fasttrap_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
1878 {
1879 	if (!dtrace_attached())
1880 		return (EAGAIN);
1881 
1882 	if (cmd == FASTTRAPIOC_MAKEPROBE) {
1883 		fasttrap_probe_spec_t *uprobe = (void *)arg;
1884 		fasttrap_probe_spec_t *probe;
1885 		uint64_t noffs;
1886 		size_t size;
1887 		int ret;
1888 		char *c;
1889 
1890 		if (copyin(&uprobe->ftps_noffs, &noffs,
1891 		    sizeof (uprobe->ftps_noffs)))
1892 			return (EFAULT);
1893 
1894 		/*
1895 		 * Probes must have at least one tracepoint.
1896 		 */
1897 		if (noffs == 0)
1898 			return (EINVAL);
1899 
1900 		size = sizeof (fasttrap_probe_spec_t) +
1901 		    sizeof (probe->ftps_offs[0]) * (noffs - 1);
1902 
1903 		if (size > 1024 * 1024)
1904 			return (ENOMEM);
1905 
1906 		probe = kmem_alloc(size, KM_SLEEP);
1907 
1908 		if (copyin(uprobe, probe, size) != 0) {
1909 			kmem_free(probe, size);
1910 			return (EFAULT);
1911 		}
1912 
1913 		/*
1914 		 * Verify that the function and module strings contain no
1915 		 * funny characters.
1916 		 */
1917 		for (c = &probe->ftps_func[0]; *c != '\0'; c++) {
1918 			if (*c < 0x20 || 0x7f <= *c) {
1919 				ret = EINVAL;
1920 				goto err;
1921 			}
1922 		}
1923 
1924 		for (c = &probe->ftps_mod[0]; *c != '\0'; c++) {
1925 			if (*c < 0x20 || 0x7f <= *c) {
1926 				ret = EINVAL;
1927 				goto err;
1928 			}
1929 		}
1930 
1931 		if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
1932 			proc_t *p;
1933 			pid_t pid = probe->ftps_pid;
1934 
1935 			mutex_enter(&pidlock);
1936 			/*
1937 			 * Report an error if the process doesn't exist
1938 			 * or is actively being birthed.
1939 			 */
1940 			if ((p = prfind(pid)) == NULL || p->p_stat == SIDL) {
1941 				mutex_exit(&pidlock);
1942 				return (ESRCH);
1943 			}
1944 			mutex_enter(&p->p_lock);
1945 			mutex_exit(&pidlock);
1946 
1947 			if ((ret = priv_proc_cred_perm(cr, p, NULL,
1948 			    VREAD | VWRITE)) != 0) {
1949 				mutex_exit(&p->p_lock);
1950 				return (ret);
1951 			}
1952 
1953 			mutex_exit(&p->p_lock);
1954 		}
1955 
1956 		ret = fasttrap_add_probe(probe);
1957 err:
1958 		kmem_free(probe, size);
1959 
1960 		return (ret);
1961 
1962 	} else if (cmd == FASTTRAPIOC_GETINSTR) {
1963 		fasttrap_instr_query_t instr;
1964 		fasttrap_tracepoint_t *tp;
1965 		uint_t index;
1966 		int ret;
1967 
1968 		if (copyin((void *)arg, &instr, sizeof (instr)) != 0)
1969 			return (EFAULT);
1970 
1971 		if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
1972 			proc_t *p;
1973 			pid_t pid = instr.ftiq_pid;
1974 
1975 			mutex_enter(&pidlock);
1976 			/*
1977 			 * Report an error if the process doesn't exist
1978 			 * or is actively being birthed.
1979 			 */
1980 			if ((p = prfind(pid)) == NULL || p->p_stat == SIDL) {
1981 				mutex_exit(&pidlock);
1982 				return (ESRCH);
1983 			}
1984 			mutex_enter(&p->p_lock);
1985 			mutex_exit(&pidlock);
1986 
1987 			if ((ret = priv_proc_cred_perm(cr, p, NULL,
1988 			    VREAD)) != 0) {
1989 				mutex_exit(&p->p_lock);
1990 				return (ret);
1991 			}
1992 
1993 			mutex_exit(&p->p_lock);
1994 		}
1995 
1996 		index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc);
1997 
1998 		mutex_enter(&fasttrap_tpoints.fth_table[index].ftb_mtx);
1999 		tp = fasttrap_tpoints.fth_table[index].ftb_data;
2000 		while (tp != NULL) {
2001 			if (instr.ftiq_pid == tp->ftt_pid &&
2002 			    instr.ftiq_pc == tp->ftt_pc &&
2003 			    !tp->ftt_proc->ftpc_defunct)
2004 				break;
2005 
2006 			tp = tp->ftt_next;
2007 		}
2008 
2009 		if (tp == NULL) {
2010 			mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2011 			return (ENOENT);
2012 		}
2013 
2014 		bcopy(&tp->ftt_instr, &instr.ftiq_instr,
2015 		    sizeof (instr.ftiq_instr));
2016 		mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2017 
2018 		if (copyout(&instr, (void *)arg, sizeof (instr)) != 0)
2019 			return (EFAULT);
2020 
2021 		return (0);
2022 	}
2023 
2024 	return (EINVAL);
2025 }
2026 
2027 static struct cb_ops fasttrap_cb_ops = {
2028 	fasttrap_open,		/* open */
2029 	nodev,			/* close */
2030 	nulldev,		/* strategy */
2031 	nulldev,		/* print */
2032 	nodev,			/* dump */
2033 	nodev,			/* read */
2034 	nodev,			/* write */
2035 	fasttrap_ioctl,		/* ioctl */
2036 	nodev,			/* devmap */
2037 	nodev,			/* mmap */
2038 	nodev,			/* segmap */
2039 	nochpoll,		/* poll */
2040 	ddi_prop_op,		/* cb_prop_op */
2041 	0,			/* streamtab  */
2042 	D_NEW | D_MP		/* Driver compatibility flag */
2043 };
2044 
2045 /*ARGSUSED*/
2046 static int
2047 fasttrap_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
2048 {
2049 	int error;
2050 
2051 	switch (infocmd) {
2052 	case DDI_INFO_DEVT2DEVINFO:
2053 		*result = (void *)fasttrap_devi;
2054 		error = DDI_SUCCESS;
2055 		break;
2056 	case DDI_INFO_DEVT2INSTANCE:
2057 		*result = (void *)0;
2058 		error = DDI_SUCCESS;
2059 		break;
2060 	default:
2061 		error = DDI_FAILURE;
2062 	}
2063 	return (error);
2064 }
2065 
2066 static int
2067 fasttrap_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
2068 {
2069 	ulong_t nent;
2070 
2071 	switch (cmd) {
2072 	case DDI_ATTACH:
2073 		break;
2074 	case DDI_RESUME:
2075 		return (DDI_SUCCESS);
2076 	default:
2077 		return (DDI_FAILURE);
2078 	}
2079 
2080 	if (ddi_create_minor_node(devi, "fasttrap", S_IFCHR, 0,
2081 	    DDI_PSEUDO, NULL) == DDI_FAILURE ||
2082 	    dtrace_register("fasttrap", &fasttrap_attr, DTRACE_PRIV_USER, NULL,
2083 	    &fasttrap_pops, NULL, &fasttrap_id) != 0) {
2084 		ddi_remove_minor_node(devi, NULL);
2085 		return (DDI_FAILURE);
2086 	}
2087 
2088 	ddi_report_dev(devi);
2089 	fasttrap_devi = devi;
2090 
2091 	/*
2092 	 * Install our hooks into fork(2), exec(2), and exit(2).
2093 	 */
2094 	dtrace_fasttrap_fork_ptr = &fasttrap_fork;
2095 	dtrace_fasttrap_exit_ptr = &fasttrap_exec_exit;
2096 	dtrace_fasttrap_exec_ptr = &fasttrap_exec_exit;
2097 
2098 	fasttrap_max = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2099 	    "fasttrap-max-probes", FASTTRAP_MAX_DEFAULT);
2100 	fasttrap_total = 0;
2101 
2102 	/*
2103 	 * Conjure up the tracepoints hashtable...
2104 	 */
2105 	nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2106 	    "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE);
2107 
2108 	if (nent <= 0 || nent > 0x1000000)
2109 		nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2110 
2111 	if ((nent & (nent - 1)) == 0)
2112 		fasttrap_tpoints.fth_nent = nent;
2113 	else
2114 		fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent);
2115 	ASSERT(fasttrap_tpoints.fth_nent > 0);
2116 	fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1;
2117 	fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent *
2118 	    sizeof (fasttrap_bucket_t), KM_SLEEP);
2119 
2120 	/*
2121 	 * ... and the providers hash table...
2122 	 */
2123 	nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE;
2124 	if ((nent & (nent - 1)) == 0)
2125 		fasttrap_provs.fth_nent = nent;
2126 	else
2127 		fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent);
2128 	ASSERT(fasttrap_provs.fth_nent > 0);
2129 	fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1;
2130 	fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent *
2131 	    sizeof (fasttrap_bucket_t), KM_SLEEP);
2132 
2133 	/*
2134 	 * ... and the procs hash table.
2135 	 */
2136 	nent = FASTTRAP_PROCS_DEFAULT_SIZE;
2137 	if ((nent & (nent - 1)) == 0)
2138 		fasttrap_procs.fth_nent = nent;
2139 	else
2140 		fasttrap_procs.fth_nent = 1 << fasttrap_highbit(nent);
2141 	ASSERT(fasttrap_procs.fth_nent > 0);
2142 	fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1;
2143 	fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent *
2144 	    sizeof (fasttrap_bucket_t), KM_SLEEP);
2145 
2146 	(void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2147 	    &fasttrap_meta_id);
2148 
2149 	return (DDI_SUCCESS);
2150 }
2151 
2152 static int
2153 fasttrap_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
2154 {
2155 	int i, fail = 0;
2156 	timeout_id_t tmp;
2157 
2158 	switch (cmd) {
2159 	case DDI_DETACH:
2160 		break;
2161 	case DDI_SUSPEND:
2162 		return (DDI_SUCCESS);
2163 	default:
2164 		return (DDI_FAILURE);
2165 	}
2166 
2167 	/*
2168 	 * Unregister the meta-provider to make sure no new fasttrap-
2169 	 * managed providers come along while we're trying to close up
2170 	 * shop. If we fail to detach, we'll need to re-register as a
2171 	 * meta-provider. We can fail to unregister as a meta-provider
2172 	 * if providers we manage still exist.
2173 	 */
2174 	if (fasttrap_meta_id != DTRACE_METAPROVNONE &&
2175 	    dtrace_meta_unregister(fasttrap_meta_id) != 0)
2176 		return (DDI_FAILURE);
2177 
2178 	/*
2179 	 * Prevent any new timeouts from running by setting fasttrap_timeout
2180 	 * to a non-zero value, and wait for the current timeout to complete.
2181 	 */
2182 	mutex_enter(&fasttrap_cleanup_mtx);
2183 	fasttrap_cleanup_work = 0;
2184 
2185 	while (fasttrap_timeout != (timeout_id_t)1) {
2186 		tmp = fasttrap_timeout;
2187 		fasttrap_timeout = (timeout_id_t)1;
2188 
2189 		if (tmp != 0) {
2190 			mutex_exit(&fasttrap_cleanup_mtx);
2191 			(void) untimeout(tmp);
2192 			mutex_enter(&fasttrap_cleanup_mtx);
2193 		}
2194 	}
2195 
2196 	fasttrap_cleanup_work = 0;
2197 	mutex_exit(&fasttrap_cleanup_mtx);
2198 
2199 	/*
2200 	 * Iterate over all of our providers. If there's still a process
2201 	 * that corresponds to that pid, fail to detach.
2202 	 */
2203 	for (i = 0; i < fasttrap_provs.fth_nent; i++) {
2204 		fasttrap_provider_t **fpp, *fp;
2205 		fasttrap_bucket_t *bucket = &fasttrap_provs.fth_table[i];
2206 
2207 		mutex_enter(&bucket->ftb_mtx);
2208 		fpp = (fasttrap_provider_t **)&bucket->ftb_data;
2209 		while ((fp = *fpp) != NULL) {
2210 			/*
2211 			 * Acquire and release the lock as a simple way of
2212 			 * waiting for any other consumer to finish with
2213 			 * this provider. A thread must first acquire the
2214 			 * bucket lock so there's no chance of another thread
2215 			 * blocking on the provider's lock.
2216 			 */
2217 			mutex_enter(&fp->ftp_mtx);
2218 			mutex_exit(&fp->ftp_mtx);
2219 
2220 			if (dtrace_unregister(fp->ftp_provid) != 0) {
2221 				fail = 1;
2222 				fpp = &fp->ftp_next;
2223 			} else {
2224 				*fpp = fp->ftp_next;
2225 				fasttrap_provider_free(fp);
2226 			}
2227 		}
2228 
2229 		mutex_exit(&bucket->ftb_mtx);
2230 	}
2231 
2232 	if (fail || dtrace_unregister(fasttrap_id) != 0) {
2233 		uint_t work;
2234 		/*
2235 		 * If we're failing to detach, we need to unblock timeouts
2236 		 * and start a new timeout if any work has accumulated while
2237 		 * we've been unsuccessfully trying to detach.
2238 		 */
2239 		mutex_enter(&fasttrap_cleanup_mtx);
2240 		fasttrap_timeout = 0;
2241 		work = fasttrap_cleanup_work;
2242 		mutex_exit(&fasttrap_cleanup_mtx);
2243 
2244 		if (work)
2245 			fasttrap_pid_cleanup();
2246 
2247 		(void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2248 		    &fasttrap_meta_id);
2249 
2250 		return (DDI_FAILURE);
2251 	}
2252 
2253 #ifdef DEBUG
2254 	mutex_enter(&fasttrap_count_mtx);
2255 	ASSERT(fasttrap_count == 0);
2256 	mutex_exit(&fasttrap_count_mtx);
2257 #endif
2258 
2259 	kmem_free(fasttrap_tpoints.fth_table,
2260 	    fasttrap_tpoints.fth_nent * sizeof (fasttrap_bucket_t));
2261 	fasttrap_tpoints.fth_nent = 0;
2262 
2263 	kmem_free(fasttrap_provs.fth_table,
2264 	    fasttrap_provs.fth_nent * sizeof (fasttrap_bucket_t));
2265 	fasttrap_provs.fth_nent = 0;
2266 
2267 	kmem_free(fasttrap_procs.fth_table,
2268 	    fasttrap_procs.fth_nent * sizeof (fasttrap_bucket_t));
2269 	fasttrap_procs.fth_nent = 0;
2270 
2271 	/*
2272 	 * We know there are no tracepoints in any process anywhere in
2273 	 * the system so there is no process which has its p_dtrace_count
2274 	 * greater than zero, therefore we know that no thread can actively
2275 	 * be executing code in fasttrap_fork(). Similarly for p_dtrace_probes
2276 	 * and fasttrap_exec() and fasttrap_exit().
2277 	 */
2278 	ASSERT(dtrace_fasttrap_fork_ptr == &fasttrap_fork);
2279 	dtrace_fasttrap_fork_ptr = NULL;
2280 
2281 	ASSERT(dtrace_fasttrap_exec_ptr == &fasttrap_exec_exit);
2282 	dtrace_fasttrap_exec_ptr = NULL;
2283 
2284 	ASSERT(dtrace_fasttrap_exit_ptr == &fasttrap_exec_exit);
2285 	dtrace_fasttrap_exit_ptr = NULL;
2286 
2287 	ddi_remove_minor_node(devi, NULL);
2288 
2289 	return (DDI_SUCCESS);
2290 }
2291 
2292 static struct dev_ops fasttrap_ops = {
2293 	DEVO_REV,		/* devo_rev */
2294 	0,			/* refcnt */
2295 	fasttrap_info,		/* get_dev_info */
2296 	nulldev,		/* identify */
2297 	nulldev,		/* probe */
2298 	fasttrap_attach,	/* attach */
2299 	fasttrap_detach,	/* detach */
2300 	nodev,			/* reset */
2301 	&fasttrap_cb_ops,	/* driver operations */
2302 	NULL,			/* bus operations */
2303 	nodev			/* dev power */
2304 };
2305 
2306 /*
2307  * Module linkage information for the kernel.
2308  */
2309 static struct modldrv modldrv = {
2310 	&mod_driverops,		/* module type (this is a pseudo driver) */
2311 	"Fasttrap Tracing",	/* name of module */
2312 	&fasttrap_ops,		/* driver ops */
2313 };
2314 
2315 static struct modlinkage modlinkage = {
2316 	MODREV_1,
2317 	(void *)&modldrv,
2318 	NULL
2319 };
2320 
2321 int
2322 _init(void)
2323 {
2324 	return (mod_install(&modlinkage));
2325 }
2326 
2327 int
2328 _info(struct modinfo *modinfop)
2329 {
2330 	return (mod_info(&modlinkage, modinfop));
2331 }
2332 
2333 int
2334 _fini(void)
2335 {
2336 	return (mod_remove(&modlinkage));
2337 }
2338