xref: /titanic_50/usr/src/lib/libdtrace/common/dt_proc.c (revision 53548f91e84cd97a638c23b5b295cc69089a5030)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright (c) 2012 by Delphix. All rights reserved.
29  */
30 
31 /*
32  * DTrace Process Control
33  *
34  * This file provides a set of routines that permit libdtrace and its clients
35  * to create and grab process handles using libproc, and to share these handles
36  * between library mechanisms that need libproc access, such as ustack(), and
37  * client mechanisms that need libproc access, such as dtrace(1M) -c and -p.
38  * The library provides several mechanisms in the libproc control layer:
39  *
40  * Reference Counting: The library code and client code can independently grab
41  * the same process handles without interfering with one another.  Only when
42  * the reference count drops to zero and the handle is not being cached (see
43  * below for more information on caching) will Prelease() be called on it.
44  *
45  * Handle Caching: If a handle is grabbed PGRAB_RDONLY (e.g. by ustack()) and
46  * the reference count drops to zero, the handle is not immediately released.
47  * Instead, libproc handles are maintained on dph_lrulist in order from most-
48  * recently accessed to least-recently accessed.  Idle handles are maintained
49  * until a pre-defined LRU cache limit is exceeded, permitting repeated calls
50  * to ustack() to avoid the overhead of releasing and re-grabbing processes.
51  *
52  * Process Control: For processes that are grabbed for control (~PGRAB_RDONLY)
53  * or created by dt_proc_create(), a control thread is created to provide
54  * callbacks on process exit and symbol table caching on dlopen()s.
55  *
56  * MT-Safety: Libproc is not MT-Safe, so dt_proc_lock() and dt_proc_unlock()
57  * are provided to synchronize access to the libproc handle between libdtrace
58  * code and client code and the control thread's use of the ps_prochandle.
59  *
60  * NOTE: MT-Safety is NOT provided for libdtrace itself, or for use of the
61  * dtrace_proc_grab/dtrace_proc_create mechanisms.  Like all exported libdtrace
62  * calls, these are assumed to be MT-Unsafe.  MT-Safety is ONLY provided for
63  * synchronization between libdtrace control threads and the client thread.
64  *
65  * The ps_prochandles themselves are maintained along with a dt_proc_t struct
66  * in a hash table indexed by PID.  This provides basic locking and reference
67  * counting.  The dt_proc_t is also maintained in LRU order on dph_lrulist.
68  * The dph_lrucnt and dph_lrulim count the number of cacheable processes and
69  * the current limit on the number of actively cached entries.
70  *
71  * The control thread for a process establishes breakpoints at the rtld_db
72  * locations of interest, updates mappings and symbol tables at these points,
73  * and handles exec and fork (by always following the parent).  The control
74  * thread automatically exits when the process dies or control is lost.
75  *
76  * A simple notification mechanism is provided for libdtrace clients using
77  * dtrace_handle_proc() for notification of PS_UNDEAD or PS_LOST events.  If
78  * such an event occurs, the dt_proc_t itself is enqueued on a notification
79  * list and the control thread broadcasts to dph_cv.  dtrace_sleep() will wake
80  * up using this condition and will then call the client handler as necessary.
81  */
82 
83 #include <sys/wait.h>
84 #include <sys/lwp.h>
85 #include <strings.h>
86 #include <signal.h>
87 #include <assert.h>
88 #include <errno.h>
89 
90 #include <dt_proc.h>
91 #include <dt_pid.h>
92 #include <dt_impl.h>
93 
94 #define	IS_SYS_EXEC(w)	(w == SYS_execve)
95 #define	IS_SYS_FORK(w)	(w == SYS_vfork || w == SYS_forksys)
96 
97 static dt_bkpt_t *
98 dt_proc_bpcreate(dt_proc_t *dpr, uintptr_t addr, dt_bkpt_f *func, void *data)
99 {
100 	struct ps_prochandle *P = dpr->dpr_proc;
101 	dt_bkpt_t *dbp;
102 
103 	assert(MUTEX_HELD(&dpr->dpr_lock));
104 
105 	if ((dbp = dt_zalloc(dpr->dpr_hdl, sizeof (dt_bkpt_t))) != NULL) {
106 		dbp->dbp_func = func;
107 		dbp->dbp_data = data;
108 		dbp->dbp_addr = addr;
109 
110 		if (Psetbkpt(P, dbp->dbp_addr, &dbp->dbp_instr) == 0)
111 			dbp->dbp_active = B_TRUE;
112 
113 		dt_list_append(&dpr->dpr_bps, dbp);
114 	}
115 
116 	return (dbp);
117 }
118 
119 static void
120 dt_proc_bpdestroy(dt_proc_t *dpr, int delbkpts)
121 {
122 	int state = Pstate(dpr->dpr_proc);
123 	dt_bkpt_t *dbp, *nbp;
124 
125 	assert(MUTEX_HELD(&dpr->dpr_lock));
126 
127 	for (dbp = dt_list_next(&dpr->dpr_bps); dbp != NULL; dbp = nbp) {
128 		if (delbkpts && dbp->dbp_active &&
129 		    state != PS_LOST && state != PS_UNDEAD) {
130 			(void) Pdelbkpt(dpr->dpr_proc,
131 			    dbp->dbp_addr, dbp->dbp_instr);
132 		}
133 		nbp = dt_list_next(dbp);
134 		dt_list_delete(&dpr->dpr_bps, dbp);
135 		dt_free(dpr->dpr_hdl, dbp);
136 	}
137 }
138 
139 static void
140 dt_proc_bpmatch(dtrace_hdl_t *dtp, dt_proc_t *dpr)
141 {
142 	const lwpstatus_t *psp = &Pstatus(dpr->dpr_proc)->pr_lwp;
143 	dt_bkpt_t *dbp;
144 
145 	assert(MUTEX_HELD(&dpr->dpr_lock));
146 
147 	for (dbp = dt_list_next(&dpr->dpr_bps);
148 	    dbp != NULL; dbp = dt_list_next(dbp)) {
149 		if (psp->pr_reg[R_PC] == dbp->dbp_addr)
150 			break;
151 	}
152 
153 	if (dbp == NULL) {
154 		dt_dprintf("pid %d: spurious breakpoint wakeup for %lx\n",
155 		    (int)dpr->dpr_pid, (ulong_t)psp->pr_reg[R_PC]);
156 		return;
157 	}
158 
159 	dt_dprintf("pid %d: hit breakpoint at %lx (%lu)\n",
160 	    (int)dpr->dpr_pid, (ulong_t)dbp->dbp_addr, ++dbp->dbp_hits);
161 
162 	dbp->dbp_func(dtp, dpr, dbp->dbp_data);
163 	(void) Pxecbkpt(dpr->dpr_proc, dbp->dbp_instr);
164 }
165 
166 static void
167 dt_proc_bpenable(dt_proc_t *dpr)
168 {
169 	dt_bkpt_t *dbp;
170 
171 	assert(MUTEX_HELD(&dpr->dpr_lock));
172 
173 	for (dbp = dt_list_next(&dpr->dpr_bps);
174 	    dbp != NULL; dbp = dt_list_next(dbp)) {
175 		if (!dbp->dbp_active && Psetbkpt(dpr->dpr_proc,
176 		    dbp->dbp_addr, &dbp->dbp_instr) == 0)
177 			dbp->dbp_active = B_TRUE;
178 	}
179 
180 	dt_dprintf("breakpoints enabled\n");
181 }
182 
183 static void
184 dt_proc_bpdisable(dt_proc_t *dpr)
185 {
186 	dt_bkpt_t *dbp;
187 
188 	assert(MUTEX_HELD(&dpr->dpr_lock));
189 
190 	for (dbp = dt_list_next(&dpr->dpr_bps);
191 	    dbp != NULL; dbp = dt_list_next(dbp)) {
192 		if (dbp->dbp_active && Pdelbkpt(dpr->dpr_proc,
193 		    dbp->dbp_addr, dbp->dbp_instr) == 0)
194 			dbp->dbp_active = B_FALSE;
195 	}
196 
197 	dt_dprintf("breakpoints disabled\n");
198 }
199 
200 static void
201 dt_proc_notify(dtrace_hdl_t *dtp, dt_proc_hash_t *dph, dt_proc_t *dpr,
202     const char *msg)
203 {
204 	dt_proc_notify_t *dprn = dt_alloc(dtp, sizeof (dt_proc_notify_t));
205 
206 	if (dprn == NULL) {
207 		dt_dprintf("failed to allocate notification for %d %s\n",
208 		    (int)dpr->dpr_pid, msg);
209 	} else {
210 		dprn->dprn_dpr = dpr;
211 		if (msg == NULL)
212 			dprn->dprn_errmsg[0] = '\0';
213 		else
214 			(void) strlcpy(dprn->dprn_errmsg, msg,
215 			    sizeof (dprn->dprn_errmsg));
216 
217 		(void) pthread_mutex_lock(&dph->dph_lock);
218 
219 		dprn->dprn_next = dph->dph_notify;
220 		dph->dph_notify = dprn;
221 
222 		(void) pthread_cond_broadcast(&dph->dph_cv);
223 		(void) pthread_mutex_unlock(&dph->dph_lock);
224 	}
225 }
226 
227 /*
228  * Check to see if the control thread was requested to stop when the victim
229  * process reached a particular event (why) rather than continuing the victim.
230  * If 'why' is set in the stop mask, we wait on dpr_cv for dt_proc_continue().
231  * If 'why' is not set, this function returns immediately and does nothing.
232  */
233 static void
234 dt_proc_stop(dt_proc_t *dpr, uint8_t why)
235 {
236 	assert(MUTEX_HELD(&dpr->dpr_lock));
237 	assert(why != DT_PROC_STOP_IDLE);
238 
239 	if (dpr->dpr_stop & why) {
240 		dpr->dpr_stop |= DT_PROC_STOP_IDLE;
241 		dpr->dpr_stop &= ~why;
242 
243 		(void) pthread_cond_broadcast(&dpr->dpr_cv);
244 
245 		/*
246 		 * We disable breakpoints while stopped to preserve the
247 		 * integrity of the program text for both our own disassembly
248 		 * and that of the kernel.
249 		 */
250 		dt_proc_bpdisable(dpr);
251 
252 		while (dpr->dpr_stop & DT_PROC_STOP_IDLE)
253 			(void) pthread_cond_wait(&dpr->dpr_cv, &dpr->dpr_lock);
254 
255 		dt_proc_bpenable(dpr);
256 	}
257 }
258 
259 /*ARGSUSED*/
260 static void
261 dt_proc_bpmain(dtrace_hdl_t *dtp, dt_proc_t *dpr, const char *fname)
262 {
263 	dt_dprintf("pid %d: breakpoint at %s()\n", (int)dpr->dpr_pid, fname);
264 	dt_proc_stop(dpr, DT_PROC_STOP_MAIN);
265 }
266 
267 static void
268 dt_proc_rdevent(dtrace_hdl_t *dtp, dt_proc_t *dpr, const char *evname)
269 {
270 	rd_event_msg_t rdm;
271 	rd_err_e err;
272 
273 	if ((err = rd_event_getmsg(dpr->dpr_rtld, &rdm)) != RD_OK) {
274 		dt_dprintf("pid %d: failed to get %s event message: %s\n",
275 		    (int)dpr->dpr_pid, evname, rd_errstr(err));
276 		return;
277 	}
278 
279 	dt_dprintf("pid %d: rtld event %s type=%d state %d\n",
280 	    (int)dpr->dpr_pid, evname, rdm.type, rdm.u.state);
281 
282 	switch (rdm.type) {
283 	case RD_DLACTIVITY:
284 		if (rdm.u.state != RD_CONSISTENT)
285 			break;
286 
287 		Pupdate_syms(dpr->dpr_proc);
288 		if (dt_pid_create_probes_module(dtp, dpr) != 0)
289 			dt_proc_notify(dtp, dtp->dt_procs, dpr,
290 			    dpr->dpr_errmsg);
291 
292 		break;
293 	case RD_PREINIT:
294 		Pupdate_syms(dpr->dpr_proc);
295 		dt_proc_stop(dpr, DT_PROC_STOP_PREINIT);
296 		break;
297 	case RD_POSTINIT:
298 		Pupdate_syms(dpr->dpr_proc);
299 		dt_proc_stop(dpr, DT_PROC_STOP_POSTINIT);
300 		break;
301 	}
302 }
303 
304 static void
305 dt_proc_rdwatch(dt_proc_t *dpr, rd_event_e event, const char *evname)
306 {
307 	rd_notify_t rdn;
308 	rd_err_e err;
309 
310 	if ((err = rd_event_addr(dpr->dpr_rtld, event, &rdn)) != RD_OK) {
311 		dt_dprintf("pid %d: failed to get event address for %s: %s\n",
312 		    (int)dpr->dpr_pid, evname, rd_errstr(err));
313 		return;
314 	}
315 
316 	if (rdn.type != RD_NOTIFY_BPT) {
317 		dt_dprintf("pid %d: event %s has unexpected type %d\n",
318 		    (int)dpr->dpr_pid, evname, rdn.type);
319 		return;
320 	}
321 
322 	(void) dt_proc_bpcreate(dpr, rdn.u.bptaddr,
323 	    (dt_bkpt_f *)dt_proc_rdevent, (void *)evname);
324 }
325 
326 /*
327  * Common code for enabling events associated with the run-time linker after
328  * attaching to a process or after a victim process completes an exec(2).
329  */
330 static void
331 dt_proc_attach(dt_proc_t *dpr, int exec)
332 {
333 	const pstatus_t *psp = Pstatus(dpr->dpr_proc);
334 	rd_err_e err;
335 	GElf_Sym sym;
336 
337 	assert(MUTEX_HELD(&dpr->dpr_lock));
338 
339 	if (exec) {
340 		if (psp->pr_lwp.pr_errno != 0)
341 			return; /* exec failed: nothing needs to be done */
342 
343 		dt_proc_bpdestroy(dpr, B_FALSE);
344 		Preset_maps(dpr->dpr_proc);
345 	}
346 
347 	if ((dpr->dpr_rtld = Prd_agent(dpr->dpr_proc)) != NULL &&
348 	    (err = rd_event_enable(dpr->dpr_rtld, B_TRUE)) == RD_OK) {
349 		dt_proc_rdwatch(dpr, RD_PREINIT, "RD_PREINIT");
350 		dt_proc_rdwatch(dpr, RD_POSTINIT, "RD_POSTINIT");
351 		dt_proc_rdwatch(dpr, RD_DLACTIVITY, "RD_DLACTIVITY");
352 	} else {
353 		dt_dprintf("pid %d: failed to enable rtld events: %s\n",
354 		    (int)dpr->dpr_pid, dpr->dpr_rtld ? rd_errstr(err) :
355 		    "rtld_db agent initialization failed");
356 	}
357 
358 	Pupdate_maps(dpr->dpr_proc);
359 
360 	if (Pxlookup_by_name(dpr->dpr_proc, LM_ID_BASE,
361 	    "a.out", "main", &sym, NULL) == 0) {
362 		(void) dt_proc_bpcreate(dpr, (uintptr_t)sym.st_value,
363 		    (dt_bkpt_f *)dt_proc_bpmain, "a.out`main");
364 	} else {
365 		dt_dprintf("pid %d: failed to find a.out`main: %s\n",
366 		    (int)dpr->dpr_pid, strerror(errno));
367 	}
368 }
369 
370 /*
371  * Wait for a stopped process to be set running again by some other debugger.
372  * This is typically not required by /proc-based debuggers, since the usual
373  * model is that one debugger controls one victim.  But DTrace, as usual, has
374  * its own needs: the stop() action assumes that prun(1) or some other tool
375  * will be applied to resume the victim process.  This could be solved by
376  * adding a PCWRUN directive to /proc, but that seems like overkill unless
377  * other debuggers end up needing this functionality, so we implement a cheap
378  * equivalent to PCWRUN using the set of existing kernel mechanisms.
379  *
380  * Our intent is really not just to wait for the victim to run, but rather to
381  * wait for it to run and then stop again for a reason other than the current
382  * PR_REQUESTED stop.  Since PCWSTOP/Pstopstatus() can be applied repeatedly
383  * to a stopped process and will return the same result without affecting the
384  * victim, we can just perform these operations repeatedly until Pstate()
385  * changes, the representative LWP ID changes, or the stop timestamp advances.
386  * dt_proc_control() will then rediscover the new state and continue as usual.
387  * When the process is still stopped in the same exact state, we sleep for a
388  * brief interval before waiting again so as not to spin consuming CPU cycles.
389  */
390 static void
391 dt_proc_waitrun(dt_proc_t *dpr)
392 {
393 	struct ps_prochandle *P = dpr->dpr_proc;
394 	const lwpstatus_t *psp = &Pstatus(P)->pr_lwp;
395 
396 	int krflag = psp->pr_flags & (PR_KLC | PR_RLC);
397 	timestruc_t tstamp = psp->pr_tstamp;
398 	lwpid_t lwpid = psp->pr_lwpid;
399 
400 	const long wstop = PCWSTOP;
401 	int pfd = Pctlfd(P);
402 
403 	assert(MUTEX_HELD(&dpr->dpr_lock));
404 	assert(psp->pr_flags & PR_STOPPED);
405 	assert(Pstate(P) == PS_STOP);
406 
407 	/*
408 	 * While we are waiting for the victim to run, clear PR_KLC and PR_RLC
409 	 * so that if the libdtrace client is killed, the victim stays stopped.
410 	 * dt_proc_destroy() will also observe this and perform PRELEASE_HANG.
411 	 */
412 	(void) Punsetflags(P, krflag);
413 	Psync(P);
414 
415 	(void) pthread_mutex_unlock(&dpr->dpr_lock);
416 
417 	while (!dpr->dpr_quit) {
418 		if (write(pfd, &wstop, sizeof (wstop)) == -1 && errno == EINTR)
419 			continue; /* check dpr_quit and continue waiting */
420 
421 		(void) pthread_mutex_lock(&dpr->dpr_lock);
422 		(void) Pstopstatus(P, PCNULL, 0);
423 		psp = &Pstatus(P)->pr_lwp;
424 
425 		/*
426 		 * If we've reached a new state, found a new representative, or
427 		 * the stop timestamp has changed, restore PR_KLC/PR_RLC to its
428 		 * original setting and then return with dpr_lock held.
429 		 */
430 		if (Pstate(P) != PS_STOP || psp->pr_lwpid != lwpid ||
431 		    bcmp(&psp->pr_tstamp, &tstamp, sizeof (tstamp)) != 0) {
432 			(void) Psetflags(P, krflag);
433 			Psync(P);
434 			return;
435 		}
436 
437 		(void) pthread_mutex_unlock(&dpr->dpr_lock);
438 		(void) poll(NULL, 0, MILLISEC / 2);
439 	}
440 
441 	(void) pthread_mutex_lock(&dpr->dpr_lock);
442 }
443 
444 typedef struct dt_proc_control_data {
445 	dtrace_hdl_t *dpcd_hdl;			/* DTrace handle */
446 	dt_proc_t *dpcd_proc;			/* proccess to control */
447 } dt_proc_control_data_t;
448 
449 /*
450  * Main loop for all victim process control threads.  We initialize all the
451  * appropriate /proc control mechanisms, and then enter a loop waiting for
452  * the process to stop on an event or die.  We process any events by calling
453  * appropriate subroutines, and exit when the victim dies or we lose control.
454  *
455  * The control thread synchronizes the use of dpr_proc with other libdtrace
456  * threads using dpr_lock.  We hold the lock for all of our operations except
457  * waiting while the process is running: this is accomplished by writing a
458  * PCWSTOP directive directly to the underlying /proc/<pid>/ctl file.  If the
459  * libdtrace client wishes to exit or abort our wait, SIGCANCEL can be used.
460  */
461 static void *
462 dt_proc_control(void *arg)
463 {
464 	dt_proc_control_data_t *datap = arg;
465 	dtrace_hdl_t *dtp = datap->dpcd_hdl;
466 	dt_proc_t *dpr = datap->dpcd_proc;
467 	dt_proc_hash_t *dph = dtp->dt_procs;
468 	struct ps_prochandle *P = dpr->dpr_proc;
469 
470 	int pfd = Pctlfd(P);
471 	int pid = dpr->dpr_pid;
472 
473 	const long wstop = PCWSTOP;
474 	int notify = B_FALSE;
475 
476 	/*
477 	 * We disable the POSIX thread cancellation mechanism so that the
478 	 * client program using libdtrace can't accidentally cancel our thread.
479 	 * dt_proc_destroy() uses SIGCANCEL explicitly to simply poke us out
480 	 * of PCWSTOP with EINTR, at which point we will see dpr_quit and exit.
481 	 */
482 	(void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
483 
484 	/*
485 	 * Set up the corresponding process for tracing by libdtrace.  We want
486 	 * to be able to catch breakpoints and efficiently single-step over
487 	 * them, and we need to enable librtld_db to watch libdl activity.
488 	 */
489 	(void) pthread_mutex_lock(&dpr->dpr_lock);
490 
491 	(void) Punsetflags(P, PR_ASYNC);	/* require synchronous mode */
492 	(void) Psetflags(P, PR_BPTADJ);		/* always adjust eip on x86 */
493 	(void) Punsetflags(P, PR_FORK);		/* do not inherit on fork */
494 
495 	(void) Pfault(P, FLTBPT, B_TRUE);	/* always trace breakpoints */
496 	(void) Pfault(P, FLTTRACE, B_TRUE);	/* always trace single-step */
497 
498 	/*
499 	 * We must trace exit from exec() system calls so that if the exec is
500 	 * successful, we can reset our breakpoints and re-initialize libproc.
501 	 */
502 	(void) Psysexit(P, SYS_execve, B_TRUE);
503 
504 	/*
505 	 * We must trace entry and exit for fork() system calls in order to
506 	 * disable our breakpoints temporarily during the fork.  We do not set
507 	 * the PR_FORK flag, so if fork succeeds the child begins executing and
508 	 * does not inherit any other tracing behaviors or a control thread.
509 	 */
510 	(void) Psysentry(P, SYS_vfork, B_TRUE);
511 	(void) Psysexit(P, SYS_vfork, B_TRUE);
512 	(void) Psysentry(P, SYS_forksys, B_TRUE);
513 	(void) Psysexit(P, SYS_forksys, B_TRUE);
514 
515 	Psync(P);				/* enable all /proc changes */
516 	dt_proc_attach(dpr, B_FALSE);		/* enable rtld breakpoints */
517 
518 	/*
519 	 * If PR_KLC is set, we created the process; otherwise we grabbed it.
520 	 * Check for an appropriate stop request and wait for dt_proc_continue.
521 	 */
522 	if (Pstatus(P)->pr_flags & PR_KLC)
523 		dt_proc_stop(dpr, DT_PROC_STOP_CREATE);
524 	else
525 		dt_proc_stop(dpr, DT_PROC_STOP_GRAB);
526 
527 	if (Psetrun(P, 0, 0) == -1) {
528 		dt_dprintf("pid %d: failed to set running: %s\n",
529 		    (int)dpr->dpr_pid, strerror(errno));
530 	}
531 
532 	(void) pthread_mutex_unlock(&dpr->dpr_lock);
533 
534 	/*
535 	 * Wait for the process corresponding to this control thread to stop,
536 	 * process the event, and then set it running again.  We want to sleep
537 	 * with dpr_lock *unheld* so that other parts of libdtrace can use the
538 	 * ps_prochandle in the meantime (e.g. ustack()).  To do this, we write
539 	 * a PCWSTOP directive directly to the underlying /proc/<pid>/ctl file.
540 	 * Once the process stops, we wake up, grab dpr_lock, and then call
541 	 * Pwait() (which will return immediately) and do our processing.
542 	 */
543 	while (!dpr->dpr_quit) {
544 		const lwpstatus_t *psp;
545 
546 		if (write(pfd, &wstop, sizeof (wstop)) == -1 && errno == EINTR)
547 			continue; /* check dpr_quit and continue waiting */
548 
549 		(void) pthread_mutex_lock(&dpr->dpr_lock);
550 pwait_locked:
551 		if (Pstopstatus(P, PCNULL, 0) == -1 && errno == EINTR) {
552 			(void) pthread_mutex_unlock(&dpr->dpr_lock);
553 			continue; /* check dpr_quit and continue waiting */
554 		}
555 
556 		switch (Pstate(P)) {
557 		case PS_STOP:
558 			psp = &Pstatus(P)->pr_lwp;
559 
560 			dt_dprintf("pid %d: proc stopped showing %d/%d\n",
561 			    pid, psp->pr_why, psp->pr_what);
562 
563 			/*
564 			 * If the process stops showing PR_REQUESTED, then the
565 			 * DTrace stop() action was applied to it or another
566 			 * debugging utility (e.g. pstop(1)) asked it to stop.
567 			 * In either case, the user's intention is for the
568 			 * process to remain stopped until another external
569 			 * mechanism (e.g. prun(1)) is applied.  So instead of
570 			 * setting the process running ourself, we wait for
571 			 * someone else to do so.  Once that happens, we return
572 			 * to our normal loop waiting for an event of interest.
573 			 */
574 			if (psp->pr_why == PR_REQUESTED) {
575 				dt_proc_waitrun(dpr);
576 				(void) pthread_mutex_unlock(&dpr->dpr_lock);
577 				continue;
578 			}
579 
580 			/*
581 			 * If the process stops showing one of the events that
582 			 * we are tracing, perform the appropriate response.
583 			 * Note that we ignore PR_SUSPENDED, PR_CHECKPOINT, and
584 			 * PR_JOBCONTROL by design: if one of these conditions
585 			 * occurs, we will fall through to Psetrun() but the
586 			 * process will remain stopped in the kernel by the
587 			 * corresponding mechanism (e.g. job control stop).
588 			 */
589 			if (psp->pr_why == PR_FAULTED && psp->pr_what == FLTBPT)
590 				dt_proc_bpmatch(dtp, dpr);
591 			else if (psp->pr_why == PR_SYSENTRY &&
592 			    IS_SYS_FORK(psp->pr_what))
593 				dt_proc_bpdisable(dpr);
594 			else if (psp->pr_why == PR_SYSEXIT &&
595 			    IS_SYS_FORK(psp->pr_what))
596 				dt_proc_bpenable(dpr);
597 			else if (psp->pr_why == PR_SYSEXIT &&
598 			    IS_SYS_EXEC(psp->pr_what))
599 				dt_proc_attach(dpr, B_TRUE);
600 			break;
601 
602 		case PS_LOST:
603 			if (Preopen(P) == 0)
604 				goto pwait_locked;
605 
606 			dt_dprintf("pid %d: proc lost: %s\n",
607 			    pid, strerror(errno));
608 
609 			dpr->dpr_quit = B_TRUE;
610 			notify = B_TRUE;
611 			break;
612 
613 		case PS_UNDEAD:
614 			dt_dprintf("pid %d: proc died\n", pid);
615 			dpr->dpr_quit = B_TRUE;
616 			notify = B_TRUE;
617 			break;
618 		}
619 
620 		if (Pstate(P) != PS_UNDEAD && Psetrun(P, 0, 0) == -1) {
621 			dt_dprintf("pid %d: failed to set running: %s\n",
622 			    (int)dpr->dpr_pid, strerror(errno));
623 		}
624 
625 		(void) pthread_mutex_unlock(&dpr->dpr_lock);
626 	}
627 
628 	/*
629 	 * If the control thread detected PS_UNDEAD or PS_LOST, then enqueue
630 	 * the dt_proc_t structure on the dt_proc_hash_t notification list.
631 	 */
632 	if (notify)
633 		dt_proc_notify(dtp, dph, dpr, NULL);
634 
635 	/*
636 	 * Destroy and remove any remaining breakpoints, set dpr_done and clear
637 	 * dpr_tid to indicate the control thread has exited, and notify any
638 	 * waiting thread in dt_proc_destroy() that we have succesfully exited.
639 	 */
640 	(void) pthread_mutex_lock(&dpr->dpr_lock);
641 
642 	dt_proc_bpdestroy(dpr, B_TRUE);
643 	dpr->dpr_done = B_TRUE;
644 	dpr->dpr_tid = 0;
645 
646 	(void) pthread_cond_broadcast(&dpr->dpr_cv);
647 	(void) pthread_mutex_unlock(&dpr->dpr_lock);
648 
649 	return (NULL);
650 }
651 
652 /*PRINTFLIKE3*/
653 static struct ps_prochandle *
654 dt_proc_error(dtrace_hdl_t *dtp, dt_proc_t *dpr, const char *format, ...)
655 {
656 	va_list ap;
657 
658 	va_start(ap, format);
659 	dt_set_errmsg(dtp, NULL, NULL, NULL, 0, format, ap);
660 	va_end(ap);
661 
662 	if (dpr->dpr_proc != NULL)
663 		Prelease(dpr->dpr_proc, 0);
664 
665 	dt_free(dtp, dpr);
666 	(void) dt_set_errno(dtp, EDT_COMPILER);
667 	return (NULL);
668 }
669 
670 dt_proc_t *
671 dt_proc_lookup(dtrace_hdl_t *dtp, struct ps_prochandle *P, int remove)
672 {
673 	dt_proc_hash_t *dph = dtp->dt_procs;
674 	pid_t pid = Pstatus(P)->pr_pid;
675 	dt_proc_t *dpr, **dpp = &dph->dph_hash[pid & (dph->dph_hashlen - 1)];
676 
677 	for (dpr = *dpp; dpr != NULL; dpr = dpr->dpr_hash) {
678 		if (dpr->dpr_pid == pid)
679 			break;
680 		else
681 			dpp = &dpr->dpr_hash;
682 	}
683 
684 	assert(dpr != NULL);
685 	assert(dpr->dpr_proc == P);
686 
687 	if (remove)
688 		*dpp = dpr->dpr_hash; /* remove from pid hash chain */
689 
690 	return (dpr);
691 }
692 
693 static void
694 dt_proc_destroy(dtrace_hdl_t *dtp, struct ps_prochandle *P)
695 {
696 	dt_proc_t *dpr = dt_proc_lookup(dtp, P, B_FALSE);
697 	dt_proc_hash_t *dph = dtp->dt_procs;
698 	dt_proc_notify_t *npr, **npp;
699 	int rflag;
700 
701 	assert(dpr != NULL);
702 
703 	/*
704 	 * If neither PR_KLC nor PR_RLC is set, then the process is stopped by
705 	 * an external debugger and we were waiting in dt_proc_waitrun().
706 	 * Leave the process in this condition using PRELEASE_HANG.
707 	 */
708 	if (!(Pstatus(dpr->dpr_proc)->pr_flags & (PR_KLC | PR_RLC))) {
709 		dt_dprintf("abandoning pid %d\n", (int)dpr->dpr_pid);
710 		rflag = PRELEASE_HANG;
711 	} else if (Pstatus(dpr->dpr_proc)->pr_flags & PR_KLC) {
712 		dt_dprintf("killing pid %d\n", (int)dpr->dpr_pid);
713 		rflag = PRELEASE_KILL; /* apply kill-on-last-close */
714 	} else {
715 		dt_dprintf("releasing pid %d\n", (int)dpr->dpr_pid);
716 		rflag = 0; /* apply run-on-last-close */
717 	}
718 
719 	if (dpr->dpr_tid) {
720 		/*
721 		 * Set the dpr_quit flag to tell the daemon thread to exit.  We
722 		 * send it a SIGCANCEL to poke it out of PCWSTOP or any other
723 		 * long-term /proc system call.  Our daemon threads have POSIX
724 		 * cancellation disabled, so EINTR will be the only effect.  We
725 		 * then wait for dpr_done to indicate the thread has exited.
726 		 *
727 		 * We can't use pthread_kill() to send SIGCANCEL because the
728 		 * interface forbids it and we can't use pthread_cancel()
729 		 * because with cancellation disabled it won't actually
730 		 * send SIGCANCEL to the target thread, so we use _lwp_kill()
731 		 * to do the job.  This is all built on evil knowledge of
732 		 * the details of the cancellation mechanism in libc.
733 		 */
734 		(void) pthread_mutex_lock(&dpr->dpr_lock);
735 		dpr->dpr_quit = B_TRUE;
736 		(void) _lwp_kill(dpr->dpr_tid, SIGCANCEL);
737 
738 		/*
739 		 * If the process is currently idling in dt_proc_stop(), re-
740 		 * enable breakpoints and poke it into running again.
741 		 */
742 		if (dpr->dpr_stop & DT_PROC_STOP_IDLE) {
743 			dt_proc_bpenable(dpr);
744 			dpr->dpr_stop &= ~DT_PROC_STOP_IDLE;
745 			(void) pthread_cond_broadcast(&dpr->dpr_cv);
746 		}
747 
748 		while (!dpr->dpr_done)
749 			(void) pthread_cond_wait(&dpr->dpr_cv, &dpr->dpr_lock);
750 
751 		(void) pthread_mutex_unlock(&dpr->dpr_lock);
752 	}
753 
754 	/*
755 	 * Before we free the process structure, remove this dt_proc_t from the
756 	 * lookup hash, and then walk the dt_proc_hash_t's notification list
757 	 * and remove this dt_proc_t if it is enqueued.
758 	 */
759 	(void) pthread_mutex_lock(&dph->dph_lock);
760 	(void) dt_proc_lookup(dtp, P, B_TRUE);
761 	npp = &dph->dph_notify;
762 
763 	while ((npr = *npp) != NULL) {
764 		if (npr->dprn_dpr == dpr) {
765 			*npp = npr->dprn_next;
766 			dt_free(dtp, npr);
767 		} else {
768 			npp = &npr->dprn_next;
769 		}
770 	}
771 
772 	(void) pthread_mutex_unlock(&dph->dph_lock);
773 
774 	/*
775 	 * Remove the dt_proc_list from the LRU list, release the underlying
776 	 * libproc handle, and free our dt_proc_t data structure.
777 	 */
778 	if (dpr->dpr_cacheable) {
779 		assert(dph->dph_lrucnt != 0);
780 		dph->dph_lrucnt--;
781 	}
782 
783 	dt_list_delete(&dph->dph_lrulist, dpr);
784 	Prelease(dpr->dpr_proc, rflag);
785 	dt_free(dtp, dpr);
786 }
787 
788 static int
789 dt_proc_create_thread(dtrace_hdl_t *dtp, dt_proc_t *dpr, uint_t stop)
790 {
791 	dt_proc_control_data_t data;
792 	sigset_t nset, oset;
793 	pthread_attr_t a;
794 	int err;
795 
796 	(void) pthread_mutex_lock(&dpr->dpr_lock);
797 	dpr->dpr_stop |= stop; /* set bit for initial rendezvous */
798 
799 	(void) pthread_attr_init(&a);
800 	(void) pthread_attr_setdetachstate(&a, PTHREAD_CREATE_DETACHED);
801 
802 	(void) sigfillset(&nset);
803 	(void) sigdelset(&nset, SIGABRT);	/* unblocked for assert() */
804 	(void) sigdelset(&nset, SIGCANCEL);	/* see dt_proc_destroy() */
805 
806 	data.dpcd_hdl = dtp;
807 	data.dpcd_proc = dpr;
808 
809 	(void) pthread_sigmask(SIG_SETMASK, &nset, &oset);
810 	err = pthread_create(&dpr->dpr_tid, &a, dt_proc_control, &data);
811 	(void) pthread_sigmask(SIG_SETMASK, &oset, NULL);
812 
813 	/*
814 	 * If the control thread was created, then wait on dpr_cv for either
815 	 * dpr_done to be set (the victim died or the control thread failed)
816 	 * or DT_PROC_STOP_IDLE to be set, indicating that the victim is now
817 	 * stopped by /proc and the control thread is at the rendezvous event.
818 	 * On success, we return with the process and control thread stopped:
819 	 * the caller can then apply dt_proc_continue() to resume both.
820 	 */
821 	if (err == 0) {
822 		while (!dpr->dpr_done && !(dpr->dpr_stop & DT_PROC_STOP_IDLE))
823 			(void) pthread_cond_wait(&dpr->dpr_cv, &dpr->dpr_lock);
824 
825 		/*
826 		 * If dpr_done is set, the control thread aborted before it
827 		 * reached the rendezvous event.  This is either due to PS_LOST
828 		 * or PS_UNDEAD (i.e. the process died).  We try to provide a
829 		 * small amount of useful information to help figure it out.
830 		 */
831 		if (dpr->dpr_done) {
832 			const psinfo_t *prp = Ppsinfo(dpr->dpr_proc);
833 			int stat = prp ? prp->pr_wstat : 0;
834 			int pid = dpr->dpr_pid;
835 
836 			if (Pstate(dpr->dpr_proc) == PS_LOST) {
837 				(void) dt_proc_error(dpr->dpr_hdl, dpr,
838 				    "failed to control pid %d: process exec'd "
839 				    "set-id or unobservable program\n", pid);
840 			} else if (WIFSIGNALED(stat)) {
841 				(void) dt_proc_error(dpr->dpr_hdl, dpr,
842 				    "failed to control pid %d: process died "
843 				    "from signal %d\n", pid, WTERMSIG(stat));
844 			} else {
845 				(void) dt_proc_error(dpr->dpr_hdl, dpr,
846 				    "failed to control pid %d: process exited "
847 				    "with status %d\n", pid, WEXITSTATUS(stat));
848 			}
849 
850 			err = ESRCH; /* cause grab() or create() to fail */
851 		}
852 	} else {
853 		(void) dt_proc_error(dpr->dpr_hdl, dpr,
854 		    "failed to create control thread for process-id %d: %s\n",
855 		    (int)dpr->dpr_pid, strerror(err));
856 	}
857 
858 	(void) pthread_mutex_unlock(&dpr->dpr_lock);
859 	(void) pthread_attr_destroy(&a);
860 
861 	return (err);
862 }
863 
864 struct ps_prochandle *
865 dt_proc_create(dtrace_hdl_t *dtp, const char *file, char *const *argv)
866 {
867 	dt_proc_hash_t *dph = dtp->dt_procs;
868 	dt_proc_t *dpr;
869 	int err;
870 
871 	if ((dpr = dt_zalloc(dtp, sizeof (dt_proc_t))) == NULL)
872 		return (NULL); /* errno is set for us */
873 
874 	(void) pthread_mutex_init(&dpr->dpr_lock, NULL);
875 	(void) pthread_cond_init(&dpr->dpr_cv, NULL);
876 
877 	dpr->dpr_proc = Pxcreate(file, argv, dtp->dt_proc_env, &err, NULL, 0);
878 	if (dpr->dpr_proc == NULL) {
879 		return (dt_proc_error(dtp, dpr,
880 		    "failed to execute %s: %s\n", file, Pcreate_error(err)));
881 	}
882 
883 	dpr->dpr_hdl = dtp;
884 	dpr->dpr_pid = Pstatus(dpr->dpr_proc)->pr_pid;
885 
886 	(void) Punsetflags(dpr->dpr_proc, PR_RLC);
887 	(void) Psetflags(dpr->dpr_proc, PR_KLC);
888 
889 	if (dt_proc_create_thread(dtp, dpr, dtp->dt_prcmode) != 0)
890 		return (NULL); /* dt_proc_error() has been called for us */
891 
892 	dpr->dpr_hash = dph->dph_hash[dpr->dpr_pid & (dph->dph_hashlen - 1)];
893 	dph->dph_hash[dpr->dpr_pid & (dph->dph_hashlen - 1)] = dpr;
894 	dt_list_prepend(&dph->dph_lrulist, dpr);
895 
896 	dt_dprintf("created pid %d\n", (int)dpr->dpr_pid);
897 	dpr->dpr_refs++;
898 
899 	return (dpr->dpr_proc);
900 }
901 
902 struct ps_prochandle *
903 dt_proc_grab(dtrace_hdl_t *dtp, pid_t pid, int flags, int nomonitor)
904 {
905 	dt_proc_hash_t *dph = dtp->dt_procs;
906 	uint_t h = pid & (dph->dph_hashlen - 1);
907 	dt_proc_t *dpr, *opr;
908 	int err;
909 
910 	/*
911 	 * Search the hash table for the pid.  If it is already grabbed or
912 	 * created, move the handle to the front of the lrulist, increment
913 	 * the reference count, and return the existing ps_prochandle.
914 	 */
915 	for (dpr = dph->dph_hash[h]; dpr != NULL; dpr = dpr->dpr_hash) {
916 		if (dpr->dpr_pid == pid && !dpr->dpr_stale) {
917 			/*
918 			 * If the cached handle was opened read-only and
919 			 * this request is for a writeable handle, mark
920 			 * the cached handle as stale and open a new handle.
921 			 * Since it's stale, unmark it as cacheable.
922 			 */
923 			if (dpr->dpr_rdonly && !(flags & PGRAB_RDONLY)) {
924 				dt_dprintf("upgrading pid %d\n", (int)pid);
925 				dpr->dpr_stale = B_TRUE;
926 				dpr->dpr_cacheable = B_FALSE;
927 				dph->dph_lrucnt--;
928 				break;
929 			}
930 
931 			dt_dprintf("grabbed pid %d (cached)\n", (int)pid);
932 			dt_list_delete(&dph->dph_lrulist, dpr);
933 			dt_list_prepend(&dph->dph_lrulist, dpr);
934 			dpr->dpr_refs++;
935 			return (dpr->dpr_proc);
936 		}
937 	}
938 
939 	if ((dpr = dt_zalloc(dtp, sizeof (dt_proc_t))) == NULL)
940 		return (NULL); /* errno is set for us */
941 
942 	(void) pthread_mutex_init(&dpr->dpr_lock, NULL);
943 	(void) pthread_cond_init(&dpr->dpr_cv, NULL);
944 
945 	if ((dpr->dpr_proc = Pgrab(pid, flags, &err)) == NULL) {
946 		return (dt_proc_error(dtp, dpr,
947 		    "failed to grab pid %d: %s\n", (int)pid, Pgrab_error(err)));
948 	}
949 
950 	dpr->dpr_hdl = dtp;
951 	dpr->dpr_pid = pid;
952 
953 	(void) Punsetflags(dpr->dpr_proc, PR_KLC);
954 	(void) Psetflags(dpr->dpr_proc, PR_RLC);
955 
956 	/*
957 	 * If we are attempting to grab the process without a monitor
958 	 * thread, then mark the process cacheable only if it's being
959 	 * grabbed read-only.  If we're currently caching more process
960 	 * handles than dph_lrulim permits, attempt to find the
961 	 * least-recently-used handle that is currently unreferenced and
962 	 * release it from the cache.  Otherwise we are grabbing the process
963 	 * for control: create a control thread for this process and store
964 	 * its ID in dpr->dpr_tid.
965 	 */
966 	if (nomonitor || (flags & PGRAB_RDONLY)) {
967 		if (dph->dph_lrucnt >= dph->dph_lrulim) {
968 			for (opr = dt_list_prev(&dph->dph_lrulist);
969 			    opr != NULL; opr = dt_list_prev(opr)) {
970 				if (opr->dpr_cacheable && opr->dpr_refs == 0) {
971 					dt_proc_destroy(dtp, opr->dpr_proc);
972 					break;
973 				}
974 			}
975 		}
976 
977 		if (flags & PGRAB_RDONLY) {
978 			dpr->dpr_cacheable = B_TRUE;
979 			dpr->dpr_rdonly = B_TRUE;
980 			dph->dph_lrucnt++;
981 		}
982 
983 	} else if (dt_proc_create_thread(dtp, dpr, DT_PROC_STOP_GRAB) != 0)
984 		return (NULL); /* dt_proc_error() has been called for us */
985 
986 	dpr->dpr_hash = dph->dph_hash[h];
987 	dph->dph_hash[h] = dpr;
988 	dt_list_prepend(&dph->dph_lrulist, dpr);
989 
990 	dt_dprintf("grabbed pid %d\n", (int)pid);
991 	dpr->dpr_refs++;
992 
993 	return (dpr->dpr_proc);
994 }
995 
996 void
997 dt_proc_release(dtrace_hdl_t *dtp, struct ps_prochandle *P)
998 {
999 	dt_proc_t *dpr = dt_proc_lookup(dtp, P, B_FALSE);
1000 	dt_proc_hash_t *dph = dtp->dt_procs;
1001 
1002 	assert(dpr != NULL);
1003 	assert(dpr->dpr_refs != 0);
1004 
1005 	if (--dpr->dpr_refs == 0 &&
1006 	    (!dpr->dpr_cacheable || dph->dph_lrucnt > dph->dph_lrulim))
1007 		dt_proc_destroy(dtp, P);
1008 }
1009 
1010 void
1011 dt_proc_continue(dtrace_hdl_t *dtp, struct ps_prochandle *P)
1012 {
1013 	dt_proc_t *dpr = dt_proc_lookup(dtp, P, B_FALSE);
1014 
1015 	(void) pthread_mutex_lock(&dpr->dpr_lock);
1016 
1017 	if (dpr->dpr_stop & DT_PROC_STOP_IDLE) {
1018 		dpr->dpr_stop &= ~DT_PROC_STOP_IDLE;
1019 		(void) pthread_cond_broadcast(&dpr->dpr_cv);
1020 	}
1021 
1022 	(void) pthread_mutex_unlock(&dpr->dpr_lock);
1023 }
1024 
1025 void
1026 dt_proc_lock(dtrace_hdl_t *dtp, struct ps_prochandle *P)
1027 {
1028 	dt_proc_t *dpr = dt_proc_lookup(dtp, P, B_FALSE);
1029 	int err = pthread_mutex_lock(&dpr->dpr_lock);
1030 	assert(err == 0); /* check for recursion */
1031 }
1032 
1033 void
1034 dt_proc_unlock(dtrace_hdl_t *dtp, struct ps_prochandle *P)
1035 {
1036 	dt_proc_t *dpr = dt_proc_lookup(dtp, P, B_FALSE);
1037 	int err = pthread_mutex_unlock(&dpr->dpr_lock);
1038 	assert(err == 0); /* check for unheld lock */
1039 }
1040 
1041 void
1042 dt_proc_init(dtrace_hdl_t *dtp)
1043 {
1044 	extern char **environ;
1045 	static char *envdef[] = {
1046 		"LD_NOLAZYLOAD=1",	/* linker lazy loading hides funcs */
1047 		NULL
1048 	};
1049 	char **p;
1050 	int i;
1051 
1052 	if ((dtp->dt_procs = dt_zalloc(dtp, sizeof (dt_proc_hash_t) +
1053 	    sizeof (dt_proc_t *) * _dtrace_pidbuckets - 1)) == NULL)
1054 		return;
1055 
1056 	(void) pthread_mutex_init(&dtp->dt_procs->dph_lock, NULL);
1057 	(void) pthread_cond_init(&dtp->dt_procs->dph_cv, NULL);
1058 
1059 	dtp->dt_procs->dph_hashlen = _dtrace_pidbuckets;
1060 	dtp->dt_procs->dph_lrulim = _dtrace_pidlrulim;
1061 
1062 
1063 	/*
1064 	 * Count how big our environment needs to be.
1065 	 */
1066 	for (i = 1, p = environ; *p != NULL; i++, p++)
1067 		continue;
1068 	for (p = envdef; *p != NULL; i++, p++)
1069 		continue;
1070 
1071 	if ((dtp->dt_proc_env = dt_zalloc(dtp, sizeof (char *) * i)) == NULL)
1072 		return;
1073 
1074 	for (i = 0, p = environ; *p != NULL; i++, p++) {
1075 		if ((dtp->dt_proc_env[i] = strdup(*p)) == NULL)
1076 			goto err;
1077 	}
1078 	for (p = envdef; *p != NULL; i++, p++) {
1079 		if ((dtp->dt_proc_env[i] = strdup(*p)) == NULL)
1080 			goto err;
1081 	}
1082 
1083 	return;
1084 
1085 err:
1086 	while (--i != 0) {
1087 		dt_free(dtp, dtp->dt_proc_env[i]);
1088 	}
1089 	dt_free(dtp, dtp->dt_proc_env);
1090 	dtp->dt_proc_env = NULL;
1091 }
1092 
1093 void
1094 dt_proc_fini(dtrace_hdl_t *dtp)
1095 {
1096 	dt_proc_hash_t *dph = dtp->dt_procs;
1097 	dt_proc_t *dpr;
1098 	char **p;
1099 
1100 	while ((dpr = dt_list_next(&dph->dph_lrulist)) != NULL)
1101 		dt_proc_destroy(dtp, dpr->dpr_proc);
1102 
1103 	dtp->dt_procs = NULL;
1104 	dt_free(dtp, dph);
1105 
1106 	for (p = dtp->dt_proc_env; *p != NULL; p++)
1107 		dt_free(dtp, *p);
1108 
1109 	dt_free(dtp, dtp->dt_proc_env);
1110 	dtp->dt_proc_env = NULL;
1111 }
1112 
1113 struct ps_prochandle *
1114 dtrace_proc_create(dtrace_hdl_t *dtp, const char *file, char *const *argv)
1115 {
1116 	dt_ident_t *idp = dt_idhash_lookup(dtp->dt_macros, "target");
1117 	struct ps_prochandle *P = dt_proc_create(dtp, file, argv);
1118 
1119 	if (P != NULL && idp != NULL && idp->di_id == 0)
1120 		idp->di_id = Pstatus(P)->pr_pid; /* $target = created pid */
1121 
1122 	return (P);
1123 }
1124 
1125 struct ps_prochandle *
1126 dtrace_proc_grab(dtrace_hdl_t *dtp, pid_t pid, int flags)
1127 {
1128 	dt_ident_t *idp = dt_idhash_lookup(dtp->dt_macros, "target");
1129 	struct ps_prochandle *P = dt_proc_grab(dtp, pid, flags, 0);
1130 
1131 	if (P != NULL && idp != NULL && idp->di_id == 0)
1132 		idp->di_id = pid; /* $target = grabbed pid */
1133 
1134 	return (P);
1135 }
1136 
1137 void
1138 dtrace_proc_release(dtrace_hdl_t *dtp, struct ps_prochandle *P)
1139 {
1140 	dt_proc_release(dtp, P);
1141 }
1142 
1143 void
1144 dtrace_proc_continue(dtrace_hdl_t *dtp, struct ps_prochandle *P)
1145 {
1146 	dt_proc_continue(dtp, P);
1147 }
1148