1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Copyright (c) 2012 by Delphix. All rights reserved. 29 */ 30 31 /* 32 * DTrace Process Control 33 * 34 * This file provides a set of routines that permit libdtrace and its clients 35 * to create and grab process handles using libproc, and to share these handles 36 * between library mechanisms that need libproc access, such as ustack(), and 37 * client mechanisms that need libproc access, such as dtrace(1M) -c and -p. 38 * The library provides several mechanisms in the libproc control layer: 39 * 40 * Reference Counting: The library code and client code can independently grab 41 * the same process handles without interfering with one another. Only when 42 * the reference count drops to zero and the handle is not being cached (see 43 * below for more information on caching) will Prelease() be called on it. 44 * 45 * Handle Caching: If a handle is grabbed PGRAB_RDONLY (e.g. by ustack()) and 46 * the reference count drops to zero, the handle is not immediately released. 47 * Instead, libproc handles are maintained on dph_lrulist in order from most- 48 * recently accessed to least-recently accessed. Idle handles are maintained 49 * until a pre-defined LRU cache limit is exceeded, permitting repeated calls 50 * to ustack() to avoid the overhead of releasing and re-grabbing processes. 51 * 52 * Process Control: For processes that are grabbed for control (~PGRAB_RDONLY) 53 * or created by dt_proc_create(), a control thread is created to provide 54 * callbacks on process exit and symbol table caching on dlopen()s. 55 * 56 * MT-Safety: Libproc is not MT-Safe, so dt_proc_lock() and dt_proc_unlock() 57 * are provided to synchronize access to the libproc handle between libdtrace 58 * code and client code and the control thread's use of the ps_prochandle. 59 * 60 * NOTE: MT-Safety is NOT provided for libdtrace itself, or for use of the 61 * dtrace_proc_grab/dtrace_proc_create mechanisms. Like all exported libdtrace 62 * calls, these are assumed to be MT-Unsafe. MT-Safety is ONLY provided for 63 * synchronization between libdtrace control threads and the client thread. 64 * 65 * The ps_prochandles themselves are maintained along with a dt_proc_t struct 66 * in a hash table indexed by PID. This provides basic locking and reference 67 * counting. The dt_proc_t is also maintained in LRU order on dph_lrulist. 68 * The dph_lrucnt and dph_lrulim count the number of cacheable processes and 69 * the current limit on the number of actively cached entries. 70 * 71 * The control thread for a process establishes breakpoints at the rtld_db 72 * locations of interest, updates mappings and symbol tables at these points, 73 * and handles exec and fork (by always following the parent). The control 74 * thread automatically exits when the process dies or control is lost. 75 * 76 * A simple notification mechanism is provided for libdtrace clients using 77 * dtrace_handle_proc() for notification of PS_UNDEAD or PS_LOST events. If 78 * such an event occurs, the dt_proc_t itself is enqueued on a notification 79 * list and the control thread broadcasts to dph_cv. dtrace_sleep() will wake 80 * up using this condition and will then call the client handler as necessary. 81 */ 82 83 #include <sys/wait.h> 84 #ifdef illumos 85 #include <sys/lwp.h> 86 #endif 87 #include <strings.h> 88 #include <signal.h> 89 #include <assert.h> 90 #include <errno.h> 91 92 #include <dt_proc.h> 93 #include <dt_pid.h> 94 #include <dt_impl.h> 95 96 #ifndef illumos 97 #include <sys/syscall.h> 98 #include <libproc_compat.h> 99 #define SYS_forksys SYS_fork 100 #endif 101 102 #define IS_SYS_EXEC(w) (w == SYS_execve) 103 #define IS_SYS_FORK(w) (w == SYS_vfork || w == SYS_forksys) 104 105 static dt_bkpt_t * 106 dt_proc_bpcreate(dt_proc_t *dpr, uintptr_t addr, dt_bkpt_f *func, void *data) 107 { 108 struct ps_prochandle *P = dpr->dpr_proc; 109 dt_bkpt_t *dbp; 110 111 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 112 113 if ((dbp = dt_zalloc(dpr->dpr_hdl, sizeof (dt_bkpt_t))) != NULL) { 114 dbp->dbp_func = func; 115 dbp->dbp_data = data; 116 dbp->dbp_addr = addr; 117 118 if (Psetbkpt(P, dbp->dbp_addr, &dbp->dbp_instr) == 0) 119 dbp->dbp_active = B_TRUE; 120 121 dt_list_append(&dpr->dpr_bps, dbp); 122 } 123 124 return (dbp); 125 } 126 127 static void 128 dt_proc_bpdestroy(dt_proc_t *dpr, int delbkpts) 129 { 130 int state = Pstate(dpr->dpr_proc); 131 dt_bkpt_t *dbp, *nbp; 132 133 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 134 135 for (dbp = dt_list_next(&dpr->dpr_bps); dbp != NULL; dbp = nbp) { 136 if (delbkpts && dbp->dbp_active && 137 state != PS_LOST && state != PS_UNDEAD) { 138 (void) Pdelbkpt(dpr->dpr_proc, 139 dbp->dbp_addr, dbp->dbp_instr); 140 } 141 nbp = dt_list_next(dbp); 142 dt_list_delete(&dpr->dpr_bps, dbp); 143 dt_free(dpr->dpr_hdl, dbp); 144 } 145 } 146 147 static void 148 dt_proc_bpmatch(dtrace_hdl_t *dtp, dt_proc_t *dpr) 149 { 150 #ifdef illumos 151 const lwpstatus_t *psp = &Pstatus(dpr->dpr_proc)->pr_lwp; 152 #else 153 unsigned long pc; 154 #endif 155 dt_bkpt_t *dbp; 156 157 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 158 159 #ifndef illumos 160 proc_regget(dpr->dpr_proc, REG_PC, &pc); 161 proc_bkptregadj(&pc); 162 #endif 163 164 for (dbp = dt_list_next(&dpr->dpr_bps); 165 dbp != NULL; dbp = dt_list_next(dbp)) { 166 #ifdef illumos 167 if (psp->pr_reg[R_PC] == dbp->dbp_addr) 168 break; 169 #else 170 if (pc == dbp->dbp_addr) 171 break; 172 #endif 173 } 174 175 if (dbp == NULL) { 176 dt_dprintf("pid %d: spurious breakpoint wakeup for %lx\n", 177 #ifdef illumos 178 (int)dpr->dpr_pid, (ulong_t)psp->pr_reg[R_PC]); 179 #else 180 (int)dpr->dpr_pid, pc); 181 #endif 182 return; 183 } 184 185 dt_dprintf("pid %d: hit breakpoint at %lx (%lu)\n", 186 (int)dpr->dpr_pid, (ulong_t)dbp->dbp_addr, ++dbp->dbp_hits); 187 188 dbp->dbp_func(dtp, dpr, dbp->dbp_data); 189 (void) Pxecbkpt(dpr->dpr_proc, dbp->dbp_instr); 190 } 191 192 static void 193 dt_proc_bpenable(dt_proc_t *dpr) 194 { 195 dt_bkpt_t *dbp; 196 197 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 198 199 for (dbp = dt_list_next(&dpr->dpr_bps); 200 dbp != NULL; dbp = dt_list_next(dbp)) { 201 if (!dbp->dbp_active && Psetbkpt(dpr->dpr_proc, 202 dbp->dbp_addr, &dbp->dbp_instr) == 0) 203 dbp->dbp_active = B_TRUE; 204 } 205 206 dt_dprintf("breakpoints enabled\n"); 207 } 208 209 static void 210 dt_proc_bpdisable(dt_proc_t *dpr) 211 { 212 dt_bkpt_t *dbp; 213 214 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 215 216 for (dbp = dt_list_next(&dpr->dpr_bps); 217 dbp != NULL; dbp = dt_list_next(dbp)) { 218 if (dbp->dbp_active && Pdelbkpt(dpr->dpr_proc, 219 dbp->dbp_addr, dbp->dbp_instr) == 0) 220 dbp->dbp_active = B_FALSE; 221 } 222 223 dt_dprintf("breakpoints disabled\n"); 224 } 225 226 static void 227 dt_proc_notify(dtrace_hdl_t *dtp, dt_proc_hash_t *dph, dt_proc_t *dpr, 228 const char *msg) 229 { 230 dt_proc_notify_t *dprn = dt_alloc(dtp, sizeof (dt_proc_notify_t)); 231 232 if (dprn == NULL) { 233 dt_dprintf("failed to allocate notification for %d %s\n", 234 (int)dpr->dpr_pid, msg); 235 } else { 236 dprn->dprn_dpr = dpr; 237 if (msg == NULL) 238 dprn->dprn_errmsg[0] = '\0'; 239 else 240 (void) strlcpy(dprn->dprn_errmsg, msg, 241 sizeof (dprn->dprn_errmsg)); 242 243 (void) pthread_mutex_lock(&dph->dph_lock); 244 245 dprn->dprn_next = dph->dph_notify; 246 dph->dph_notify = dprn; 247 248 (void) pthread_cond_broadcast(&dph->dph_cv); 249 (void) pthread_mutex_unlock(&dph->dph_lock); 250 } 251 } 252 253 /* 254 * Check to see if the control thread was requested to stop when the victim 255 * process reached a particular event (why) rather than continuing the victim. 256 * If 'why' is set in the stop mask, we wait on dpr_cv for dt_proc_continue(). 257 * If 'why' is not set, this function returns immediately and does nothing. 258 */ 259 static void 260 dt_proc_stop(dt_proc_t *dpr, uint8_t why) 261 { 262 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 263 assert(why != DT_PROC_STOP_IDLE); 264 265 if (dpr->dpr_stop & why) { 266 dpr->dpr_stop |= DT_PROC_STOP_IDLE; 267 dpr->dpr_stop &= ~why; 268 269 (void) pthread_cond_broadcast(&dpr->dpr_cv); 270 271 /* 272 * We disable breakpoints while stopped to preserve the 273 * integrity of the program text for both our own disassembly 274 * and that of the kernel. 275 */ 276 dt_proc_bpdisable(dpr); 277 278 while (dpr->dpr_stop & DT_PROC_STOP_IDLE) 279 (void) pthread_cond_wait(&dpr->dpr_cv, &dpr->dpr_lock); 280 281 dt_proc_bpenable(dpr); 282 } 283 } 284 285 /*ARGSUSED*/ 286 static void 287 dt_proc_bpmain(dtrace_hdl_t *dtp, dt_proc_t *dpr, const char *fname) 288 { 289 dt_dprintf("pid %d: breakpoint at %s()\n", (int)dpr->dpr_pid, fname); 290 dt_proc_stop(dpr, DT_PROC_STOP_MAIN); 291 } 292 293 static void 294 dt_proc_rdevent(dtrace_hdl_t *dtp, dt_proc_t *dpr, const char *evname) 295 { 296 rd_event_msg_t rdm; 297 rd_err_e err; 298 299 if ((err = rd_event_getmsg(dpr->dpr_rtld, &rdm)) != RD_OK) { 300 dt_dprintf("pid %d: failed to get %s event message: %s\n", 301 (int)dpr->dpr_pid, evname, rd_errstr(err)); 302 return; 303 } 304 305 dt_dprintf("pid %d: rtld event %s type=%d state %d\n", 306 (int)dpr->dpr_pid, evname, rdm.type, rdm.u.state); 307 308 switch (rdm.type) { 309 case RD_DLACTIVITY: 310 if (rdm.u.state != RD_CONSISTENT) 311 break; 312 313 Pupdate_syms(dpr->dpr_proc); 314 if (dt_pid_create_probes_module(dtp, dpr) != 0) 315 dt_proc_notify(dtp, dtp->dt_procs, dpr, 316 dpr->dpr_errmsg); 317 318 break; 319 case RD_PREINIT: 320 Pupdate_syms(dpr->dpr_proc); 321 dt_proc_stop(dpr, DT_PROC_STOP_PREINIT); 322 break; 323 case RD_POSTINIT: 324 Pupdate_syms(dpr->dpr_proc); 325 dt_proc_stop(dpr, DT_PROC_STOP_POSTINIT); 326 break; 327 } 328 } 329 330 static void 331 dt_proc_rdwatch(dt_proc_t *dpr, rd_event_e event, const char *evname) 332 { 333 rd_notify_t rdn; 334 rd_err_e err; 335 336 if ((err = rd_event_addr(dpr->dpr_rtld, event, &rdn)) != RD_OK) { 337 dt_dprintf("pid %d: failed to get event address for %s: %s\n", 338 (int)dpr->dpr_pid, evname, rd_errstr(err)); 339 return; 340 } 341 342 if (rdn.type != RD_NOTIFY_BPT) { 343 dt_dprintf("pid %d: event %s has unexpected type %d\n", 344 (int)dpr->dpr_pid, evname, rdn.type); 345 return; 346 } 347 348 (void) dt_proc_bpcreate(dpr, rdn.u.bptaddr, 349 #ifdef illumos 350 (dt_bkpt_f *)dt_proc_rdevent, (void *)evname); 351 #else 352 /* XXX ugly */ 353 (dt_bkpt_f *)dt_proc_rdevent, __DECONST(void *, evname)); 354 #endif 355 } 356 357 /* 358 * Common code for enabling events associated with the run-time linker after 359 * attaching to a process or after a victim process completes an exec(2). 360 */ 361 static void 362 dt_proc_attach(dt_proc_t *dpr, int exec) 363 { 364 #ifdef illumos 365 const pstatus_t *psp = Pstatus(dpr->dpr_proc); 366 #endif 367 rd_err_e err; 368 GElf_Sym sym; 369 370 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 371 372 if (exec) { 373 #ifdef illumos 374 if (psp->pr_lwp.pr_errno != 0) 375 return; /* exec failed: nothing needs to be done */ 376 #endif 377 378 dt_proc_bpdestroy(dpr, B_FALSE); 379 #ifdef illumos 380 Preset_maps(dpr->dpr_proc); 381 #endif 382 } 383 if ((dpr->dpr_rtld = Prd_agent(dpr->dpr_proc)) != NULL && 384 (err = rd_event_enable(dpr->dpr_rtld, B_TRUE)) == RD_OK) { 385 #ifdef illumos 386 dt_proc_rdwatch(dpr, RD_PREINIT, "RD_PREINIT"); 387 #endif 388 dt_proc_rdwatch(dpr, RD_POSTINIT, "RD_POSTINIT"); 389 #ifdef illumos 390 dt_proc_rdwatch(dpr, RD_DLACTIVITY, "RD_DLACTIVITY"); 391 #endif 392 } else { 393 dt_dprintf("pid %d: failed to enable rtld events: %s\n", 394 (int)dpr->dpr_pid, dpr->dpr_rtld ? rd_errstr(err) : 395 "rtld_db agent initialization failed"); 396 } 397 398 Pupdate_maps(dpr->dpr_proc); 399 400 if (Pxlookup_by_name(dpr->dpr_proc, LM_ID_BASE, 401 "a.out", "main", &sym, NULL) == 0) { 402 (void) dt_proc_bpcreate(dpr, (uintptr_t)sym.st_value, 403 (dt_bkpt_f *)dt_proc_bpmain, "a.out`main"); 404 } else { 405 dt_dprintf("pid %d: failed to find a.out`main: %s\n", 406 (int)dpr->dpr_pid, strerror(errno)); 407 } 408 } 409 410 /* 411 * Wait for a stopped process to be set running again by some other debugger. 412 * This is typically not required by /proc-based debuggers, since the usual 413 * model is that one debugger controls one victim. But DTrace, as usual, has 414 * its own needs: the stop() action assumes that prun(1) or some other tool 415 * will be applied to resume the victim process. This could be solved by 416 * adding a PCWRUN directive to /proc, but that seems like overkill unless 417 * other debuggers end up needing this functionality, so we implement a cheap 418 * equivalent to PCWRUN using the set of existing kernel mechanisms. 419 * 420 * Our intent is really not just to wait for the victim to run, but rather to 421 * wait for it to run and then stop again for a reason other than the current 422 * PR_REQUESTED stop. Since PCWSTOP/Pstopstatus() can be applied repeatedly 423 * to a stopped process and will return the same result without affecting the 424 * victim, we can just perform these operations repeatedly until Pstate() 425 * changes, the representative LWP ID changes, or the stop timestamp advances. 426 * dt_proc_control() will then rediscover the new state and continue as usual. 427 * When the process is still stopped in the same exact state, we sleep for a 428 * brief interval before waiting again so as not to spin consuming CPU cycles. 429 */ 430 static void 431 dt_proc_waitrun(dt_proc_t *dpr) 432 { 433 printf("%s:%s(%d): not implemented\n", __FUNCTION__, __FILE__, 434 __LINE__); 435 #ifdef DOODAD 436 struct ps_prochandle *P = dpr->dpr_proc; 437 const lwpstatus_t *psp = &Pstatus(P)->pr_lwp; 438 439 int krflag = psp->pr_flags & (PR_KLC | PR_RLC); 440 timestruc_t tstamp = psp->pr_tstamp; 441 lwpid_t lwpid = psp->pr_lwpid; 442 443 const long wstop = PCWSTOP; 444 int pfd = Pctlfd(P); 445 446 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 447 assert(psp->pr_flags & PR_STOPPED); 448 assert(Pstate(P) == PS_STOP); 449 450 /* 451 * While we are waiting for the victim to run, clear PR_KLC and PR_RLC 452 * so that if the libdtrace client is killed, the victim stays stopped. 453 * dt_proc_destroy() will also observe this and perform PRELEASE_HANG. 454 */ 455 (void) Punsetflags(P, krflag); 456 Psync(P); 457 458 (void) pthread_mutex_unlock(&dpr->dpr_lock); 459 460 while (!dpr->dpr_quit) { 461 if (write(pfd, &wstop, sizeof (wstop)) == -1 && errno == EINTR) 462 continue; /* check dpr_quit and continue waiting */ 463 464 (void) pthread_mutex_lock(&dpr->dpr_lock); 465 (void) Pstopstatus(P, PCNULL, 0); 466 psp = &Pstatus(P)->pr_lwp; 467 468 /* 469 * If we've reached a new state, found a new representative, or 470 * the stop timestamp has changed, restore PR_KLC/PR_RLC to its 471 * original setting and then return with dpr_lock held. 472 */ 473 if (Pstate(P) != PS_STOP || psp->pr_lwpid != lwpid || 474 bcmp(&psp->pr_tstamp, &tstamp, sizeof (tstamp)) != 0) { 475 (void) Psetflags(P, krflag); 476 Psync(P); 477 return; 478 } 479 480 (void) pthread_mutex_unlock(&dpr->dpr_lock); 481 (void) poll(NULL, 0, MILLISEC / 2); 482 } 483 484 (void) pthread_mutex_lock(&dpr->dpr_lock); 485 #endif 486 } 487 488 typedef struct dt_proc_control_data { 489 dtrace_hdl_t *dpcd_hdl; /* DTrace handle */ 490 dt_proc_t *dpcd_proc; /* proccess to control */ 491 } dt_proc_control_data_t; 492 493 /* 494 * Main loop for all victim process control threads. We initialize all the 495 * appropriate /proc control mechanisms, and then enter a loop waiting for 496 * the process to stop on an event or die. We process any events by calling 497 * appropriate subroutines, and exit when the victim dies or we lose control. 498 * 499 * The control thread synchronizes the use of dpr_proc with other libdtrace 500 * threads using dpr_lock. We hold the lock for all of our operations except 501 * waiting while the process is running: this is accomplished by writing a 502 * PCWSTOP directive directly to the underlying /proc/<pid>/ctl file. If the 503 * libdtrace client wishes to exit or abort our wait, SIGCANCEL can be used. 504 */ 505 static void * 506 dt_proc_control(void *arg) 507 { 508 dt_proc_control_data_t *datap = arg; 509 dtrace_hdl_t *dtp = datap->dpcd_hdl; 510 dt_proc_t *dpr = datap->dpcd_proc; 511 dt_proc_hash_t *dph = dtp->dt_procs; 512 struct ps_prochandle *P = dpr->dpr_proc; 513 int pid = dpr->dpr_pid; 514 515 #ifdef illumos 516 int pfd = Pctlfd(P); 517 518 const long wstop = PCWSTOP; 519 #endif 520 int notify = B_FALSE; 521 522 /* 523 * We disable the POSIX thread cancellation mechanism so that the 524 * client program using libdtrace can't accidentally cancel our thread. 525 * dt_proc_destroy() uses SIGCANCEL explicitly to simply poke us out 526 * of PCWSTOP with EINTR, at which point we will see dpr_quit and exit. 527 */ 528 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); 529 530 /* 531 * Set up the corresponding process for tracing by libdtrace. We want 532 * to be able to catch breakpoints and efficiently single-step over 533 * them, and we need to enable librtld_db to watch libdl activity. 534 */ 535 (void) pthread_mutex_lock(&dpr->dpr_lock); 536 537 #ifdef illumos 538 (void) Punsetflags(P, PR_ASYNC); /* require synchronous mode */ 539 (void) Psetflags(P, PR_BPTADJ); /* always adjust eip on x86 */ 540 (void) Punsetflags(P, PR_FORK); /* do not inherit on fork */ 541 542 (void) Pfault(P, FLTBPT, B_TRUE); /* always trace breakpoints */ 543 (void) Pfault(P, FLTTRACE, B_TRUE); /* always trace single-step */ 544 545 /* 546 * We must trace exit from exec() system calls so that if the exec is 547 * successful, we can reset our breakpoints and re-initialize libproc. 548 */ 549 (void) Psysexit(P, SYS_execve, B_TRUE); 550 551 /* 552 * We must trace entry and exit for fork() system calls in order to 553 * disable our breakpoints temporarily during the fork. We do not set 554 * the PR_FORK flag, so if fork succeeds the child begins executing and 555 * does not inherit any other tracing behaviors or a control thread. 556 */ 557 (void) Psysentry(P, SYS_vfork, B_TRUE); 558 (void) Psysexit(P, SYS_vfork, B_TRUE); 559 (void) Psysentry(P, SYS_forksys, B_TRUE); 560 (void) Psysexit(P, SYS_forksys, B_TRUE); 561 562 Psync(P); /* enable all /proc changes */ 563 #endif 564 dt_proc_attach(dpr, B_FALSE); /* enable rtld breakpoints */ 565 566 /* 567 * If PR_KLC is set, we created the process; otherwise we grabbed it. 568 * Check for an appropriate stop request and wait for dt_proc_continue. 569 */ 570 #ifdef illumos 571 if (Pstatus(P)->pr_flags & PR_KLC) 572 #else 573 if (proc_getflags(P) & PR_KLC) 574 #endif 575 dt_proc_stop(dpr, DT_PROC_STOP_CREATE); 576 else 577 dt_proc_stop(dpr, DT_PROC_STOP_GRAB); 578 579 if (Psetrun(P, 0, 0) == -1) { 580 dt_dprintf("pid %d: failed to set running: %s\n", 581 (int)dpr->dpr_pid, strerror(errno)); 582 } 583 584 (void) pthread_mutex_unlock(&dpr->dpr_lock); 585 586 /* 587 * Wait for the process corresponding to this control thread to stop, 588 * process the event, and then set it running again. We want to sleep 589 * with dpr_lock *unheld* so that other parts of libdtrace can use the 590 * ps_prochandle in the meantime (e.g. ustack()). To do this, we write 591 * a PCWSTOP directive directly to the underlying /proc/<pid>/ctl file. 592 * Once the process stops, we wake up, grab dpr_lock, and then call 593 * Pwait() (which will return immediately) and do our processing. 594 */ 595 while (!dpr->dpr_quit) { 596 const lwpstatus_t *psp; 597 598 #ifdef illumos 599 if (write(pfd, &wstop, sizeof (wstop)) == -1 && errno == EINTR) 600 continue; /* check dpr_quit and continue waiting */ 601 #else 602 /* Wait for the process to report status. */ 603 proc_wstatus(P); 604 if (errno == EINTR) 605 continue; /* check dpr_quit and continue waiting */ 606 #endif 607 608 (void) pthread_mutex_lock(&dpr->dpr_lock); 609 610 #ifdef illumos 611 pwait_locked: 612 if (Pstopstatus(P, PCNULL, 0) == -1 && errno == EINTR) { 613 (void) pthread_mutex_unlock(&dpr->dpr_lock); 614 continue; /* check dpr_quit and continue waiting */ 615 } 616 #endif 617 618 switch (Pstate(P)) { 619 case PS_STOP: 620 #ifdef illumos 621 psp = &Pstatus(P)->pr_lwp; 622 #else 623 psp = proc_getlwpstatus(P); 624 #endif 625 626 dt_dprintf("pid %d: proc stopped showing %d/%d\n", 627 pid, psp->pr_why, psp->pr_what); 628 629 /* 630 * If the process stops showing PR_REQUESTED, then the 631 * DTrace stop() action was applied to it or another 632 * debugging utility (e.g. pstop(1)) asked it to stop. 633 * In either case, the user's intention is for the 634 * process to remain stopped until another external 635 * mechanism (e.g. prun(1)) is applied. So instead of 636 * setting the process running ourself, we wait for 637 * someone else to do so. Once that happens, we return 638 * to our normal loop waiting for an event of interest. 639 */ 640 if (psp->pr_why == PR_REQUESTED) { 641 dt_proc_waitrun(dpr); 642 (void) pthread_mutex_unlock(&dpr->dpr_lock); 643 continue; 644 } 645 646 /* 647 * If the process stops showing one of the events that 648 * we are tracing, perform the appropriate response. 649 * Note that we ignore PR_SUSPENDED, PR_CHECKPOINT, and 650 * PR_JOBCONTROL by design: if one of these conditions 651 * occurs, we will fall through to Psetrun() but the 652 * process will remain stopped in the kernel by the 653 * corresponding mechanism (e.g. job control stop). 654 */ 655 if (psp->pr_why == PR_FAULTED && psp->pr_what == FLTBPT) 656 dt_proc_bpmatch(dtp, dpr); 657 else if (psp->pr_why == PR_SYSENTRY && 658 IS_SYS_FORK(psp->pr_what)) 659 dt_proc_bpdisable(dpr); 660 else if (psp->pr_why == PR_SYSEXIT && 661 IS_SYS_FORK(psp->pr_what)) 662 dt_proc_bpenable(dpr); 663 else if (psp->pr_why == PR_SYSEXIT && 664 IS_SYS_EXEC(psp->pr_what)) 665 dt_proc_attach(dpr, B_TRUE); 666 break; 667 668 case PS_LOST: 669 #ifdef illumos 670 if (Preopen(P) == 0) 671 goto pwait_locked; 672 #endif 673 674 dt_dprintf("pid %d: proc lost: %s\n", 675 pid, strerror(errno)); 676 677 dpr->dpr_quit = B_TRUE; 678 notify = B_TRUE; 679 break; 680 681 case PS_UNDEAD: 682 dt_dprintf("pid %d: proc died\n", pid); 683 dpr->dpr_quit = B_TRUE; 684 notify = B_TRUE; 685 break; 686 } 687 688 if (Pstate(P) != PS_UNDEAD && Psetrun(P, 0, 0) == -1) { 689 dt_dprintf("pid %d: failed to set running: %s\n", 690 (int)dpr->dpr_pid, strerror(errno)); 691 } 692 693 (void) pthread_mutex_unlock(&dpr->dpr_lock); 694 } 695 696 /* 697 * If the control thread detected PS_UNDEAD or PS_LOST, then enqueue 698 * the dt_proc_t structure on the dt_proc_hash_t notification list. 699 */ 700 if (notify) 701 dt_proc_notify(dtp, dph, dpr, NULL); 702 703 /* 704 * Destroy and remove any remaining breakpoints, set dpr_done and clear 705 * dpr_tid to indicate the control thread has exited, and notify any 706 * waiting thread in dt_proc_destroy() that we have succesfully exited. 707 */ 708 (void) pthread_mutex_lock(&dpr->dpr_lock); 709 710 dt_proc_bpdestroy(dpr, B_TRUE); 711 dpr->dpr_done = B_TRUE; 712 dpr->dpr_tid = 0; 713 714 (void) pthread_cond_broadcast(&dpr->dpr_cv); 715 (void) pthread_mutex_unlock(&dpr->dpr_lock); 716 717 return (NULL); 718 } 719 720 /*PRINTFLIKE3*/ 721 static struct ps_prochandle * 722 dt_proc_error(dtrace_hdl_t *dtp, dt_proc_t *dpr, const char *format, ...) 723 { 724 va_list ap; 725 726 va_start(ap, format); 727 dt_set_errmsg(dtp, NULL, NULL, NULL, 0, format, ap); 728 va_end(ap); 729 730 if (dpr->dpr_proc != NULL) 731 Prelease(dpr->dpr_proc, 0); 732 733 dt_free(dtp, dpr); 734 (void) dt_set_errno(dtp, EDT_COMPILER); 735 return (NULL); 736 } 737 738 dt_proc_t * 739 dt_proc_lookup(dtrace_hdl_t *dtp, struct ps_prochandle *P, int remove) 740 { 741 dt_proc_hash_t *dph = dtp->dt_procs; 742 #ifdef illumos 743 pid_t pid = Pstatus(P)->pr_pid; 744 #else 745 pid_t pid = proc_getpid(P); 746 #endif 747 dt_proc_t *dpr, **dpp = &dph->dph_hash[pid & (dph->dph_hashlen - 1)]; 748 749 for (dpr = *dpp; dpr != NULL; dpr = dpr->dpr_hash) { 750 if (dpr->dpr_pid == pid) 751 break; 752 else 753 dpp = &dpr->dpr_hash; 754 } 755 756 assert(dpr != NULL); 757 assert(dpr->dpr_proc == P); 758 759 if (remove) 760 *dpp = dpr->dpr_hash; /* remove from pid hash chain */ 761 762 return (dpr); 763 } 764 765 static void 766 dt_proc_destroy(dtrace_hdl_t *dtp, struct ps_prochandle *P) 767 { 768 dt_proc_t *dpr = dt_proc_lookup(dtp, P, B_FALSE); 769 dt_proc_hash_t *dph = dtp->dt_procs; 770 dt_proc_notify_t *npr, **npp; 771 int rflag; 772 773 assert(dpr != NULL); 774 775 /* 776 * If neither PR_KLC nor PR_RLC is set, then the process is stopped by 777 * an external debugger and we were waiting in dt_proc_waitrun(). 778 * Leave the process in this condition using PRELEASE_HANG. 779 */ 780 #ifdef illumos 781 if (!(Pstatus(dpr->dpr_proc)->pr_flags & (PR_KLC | PR_RLC))) { 782 #else 783 if (!(proc_getflags(dpr->dpr_proc) & (PR_KLC | PR_RLC))) { 784 #endif 785 dt_dprintf("abandoning pid %d\n", (int)dpr->dpr_pid); 786 rflag = PRELEASE_HANG; 787 #ifdef illumos 788 } else if (Pstatus(dpr->dpr_proc)->pr_flags & PR_KLC) { 789 #else 790 } else if (proc_getflags(dpr->dpr_proc) & PR_KLC) { 791 #endif 792 dt_dprintf("killing pid %d\n", (int)dpr->dpr_pid); 793 rflag = PRELEASE_KILL; /* apply kill-on-last-close */ 794 } else { 795 dt_dprintf("releasing pid %d\n", (int)dpr->dpr_pid); 796 rflag = 0; /* apply run-on-last-close */ 797 } 798 799 if (dpr->dpr_tid) { 800 /* 801 * Set the dpr_quit flag to tell the daemon thread to exit. We 802 * send it a SIGCANCEL to poke it out of PCWSTOP or any other 803 * long-term /proc system call. Our daemon threads have POSIX 804 * cancellation disabled, so EINTR will be the only effect. We 805 * then wait for dpr_done to indicate the thread has exited. 806 * 807 * We can't use pthread_kill() to send SIGCANCEL because the 808 * interface forbids it and we can't use pthread_cancel() 809 * because with cancellation disabled it won't actually 810 * send SIGCANCEL to the target thread, so we use _lwp_kill() 811 * to do the job. This is all built on evil knowledge of 812 * the details of the cancellation mechanism in libc. 813 */ 814 (void) pthread_mutex_lock(&dpr->dpr_lock); 815 dpr->dpr_quit = B_TRUE; 816 #ifdef illumos 817 (void) _lwp_kill(dpr->dpr_tid, SIGCANCEL); 818 #else 819 pthread_kill(dpr->dpr_tid, SIGTHR); 820 #endif 821 822 /* 823 * If the process is currently idling in dt_proc_stop(), re- 824 * enable breakpoints and poke it into running again. 825 */ 826 if (dpr->dpr_stop & DT_PROC_STOP_IDLE) { 827 dt_proc_bpenable(dpr); 828 dpr->dpr_stop &= ~DT_PROC_STOP_IDLE; 829 (void) pthread_cond_broadcast(&dpr->dpr_cv); 830 } 831 832 while (!dpr->dpr_done) 833 (void) pthread_cond_wait(&dpr->dpr_cv, &dpr->dpr_lock); 834 835 (void) pthread_mutex_unlock(&dpr->dpr_lock); 836 } 837 838 /* 839 * Before we free the process structure, remove this dt_proc_t from the 840 * lookup hash, and then walk the dt_proc_hash_t's notification list 841 * and remove this dt_proc_t if it is enqueued. 842 */ 843 (void) pthread_mutex_lock(&dph->dph_lock); 844 (void) dt_proc_lookup(dtp, P, B_TRUE); 845 npp = &dph->dph_notify; 846 847 while ((npr = *npp) != NULL) { 848 if (npr->dprn_dpr == dpr) { 849 *npp = npr->dprn_next; 850 dt_free(dtp, npr); 851 } else { 852 npp = &npr->dprn_next; 853 } 854 } 855 856 (void) pthread_mutex_unlock(&dph->dph_lock); 857 858 /* 859 * Remove the dt_proc_list from the LRU list, release the underlying 860 * libproc handle, and free our dt_proc_t data structure. 861 */ 862 if (dpr->dpr_cacheable) { 863 assert(dph->dph_lrucnt != 0); 864 dph->dph_lrucnt--; 865 } 866 867 dt_list_delete(&dph->dph_lrulist, dpr); 868 Prelease(dpr->dpr_proc, rflag); 869 dt_free(dtp, dpr); 870 } 871 872 static int 873 dt_proc_create_thread(dtrace_hdl_t *dtp, dt_proc_t *dpr, uint_t stop) 874 { 875 dt_proc_control_data_t data; 876 sigset_t nset, oset; 877 pthread_attr_t a; 878 int err; 879 880 (void) pthread_mutex_lock(&dpr->dpr_lock); 881 dpr->dpr_stop |= stop; /* set bit for initial rendezvous */ 882 883 (void) pthread_attr_init(&a); 884 (void) pthread_attr_setdetachstate(&a, PTHREAD_CREATE_DETACHED); 885 886 (void) sigfillset(&nset); 887 (void) sigdelset(&nset, SIGABRT); /* unblocked for assert() */ 888 #ifdef illumos 889 (void) sigdelset(&nset, SIGCANCEL); /* see dt_proc_destroy() */ 890 #else 891 (void) sigdelset(&nset, SIGUSR1); /* see dt_proc_destroy() */ 892 #endif 893 894 data.dpcd_hdl = dtp; 895 data.dpcd_proc = dpr; 896 897 (void) pthread_sigmask(SIG_SETMASK, &nset, &oset); 898 err = pthread_create(&dpr->dpr_tid, &a, dt_proc_control, &data); 899 (void) pthread_sigmask(SIG_SETMASK, &oset, NULL); 900 901 /* 902 * If the control thread was created, then wait on dpr_cv for either 903 * dpr_done to be set (the victim died or the control thread failed) 904 * or DT_PROC_STOP_IDLE to be set, indicating that the victim is now 905 * stopped by /proc and the control thread is at the rendezvous event. 906 * On success, we return with the process and control thread stopped: 907 * the caller can then apply dt_proc_continue() to resume both. 908 */ 909 if (err == 0) { 910 while (!dpr->dpr_done && !(dpr->dpr_stop & DT_PROC_STOP_IDLE)) 911 (void) pthread_cond_wait(&dpr->dpr_cv, &dpr->dpr_lock); 912 913 /* 914 * If dpr_done is set, the control thread aborted before it 915 * reached the rendezvous event. This is either due to PS_LOST 916 * or PS_UNDEAD (i.e. the process died). We try to provide a 917 * small amount of useful information to help figure it out. 918 */ 919 if (dpr->dpr_done) { 920 #ifdef illumos 921 const psinfo_t *prp = Ppsinfo(dpr->dpr_proc); 922 int stat = prp ? prp->pr_wstat : 0; 923 int pid = dpr->dpr_pid; 924 #else 925 int stat = proc_getwstat(dpr->dpr_proc); 926 int pid = proc_getpid(dpr->dpr_proc); 927 #endif 928 if (proc_state(dpr->dpr_proc) == PS_LOST) { 929 (void) dt_proc_error(dpr->dpr_hdl, dpr, 930 "failed to control pid %d: process exec'd " 931 "set-id or unobservable program\n", pid); 932 } else if (WIFSIGNALED(stat)) { 933 (void) dt_proc_error(dpr->dpr_hdl, dpr, 934 "failed to control pid %d: process died " 935 "from signal %d\n", pid, WTERMSIG(stat)); 936 } else { 937 (void) dt_proc_error(dpr->dpr_hdl, dpr, 938 "failed to control pid %d: process exited " 939 "with status %d\n", pid, WEXITSTATUS(stat)); 940 } 941 942 err = ESRCH; /* cause grab() or create() to fail */ 943 } 944 } else { 945 (void) dt_proc_error(dpr->dpr_hdl, dpr, 946 "failed to create control thread for process-id %d: %s\n", 947 (int)dpr->dpr_pid, strerror(err)); 948 } 949 950 if (err == 0) 951 (void) pthread_mutex_unlock(&dpr->dpr_lock); 952 (void) pthread_attr_destroy(&a); 953 954 return (err); 955 } 956 957 struct ps_prochandle * 958 dt_proc_create(dtrace_hdl_t *dtp, const char *file, char *const *argv, 959 proc_child_func *pcf, void *child_arg) 960 { 961 dt_proc_hash_t *dph = dtp->dt_procs; 962 dt_proc_t *dpr; 963 int err; 964 965 if ((dpr = dt_zalloc(dtp, sizeof (dt_proc_t))) == NULL) 966 return (NULL); /* errno is set for us */ 967 968 (void) pthread_mutex_init(&dpr->dpr_lock, NULL); 969 (void) pthread_cond_init(&dpr->dpr_cv, NULL); 970 971 #ifdef illumos 972 dpr->dpr_proc = Pxcreate(file, argv, dtp->dt_proc_env, &err, NULL, 0); 973 if (dpr->dpr_proc == NULL) { 974 return (dt_proc_error(dtp, dpr, 975 "failed to execute %s: %s\n", file, Pcreate_error(err))); 976 } 977 #else 978 if ((err = proc_create(file, argv, dtp->dt_proc_env, pcf, child_arg, 979 &dpr->dpr_proc)) != 0) { 980 return (dt_proc_error(dtp, dpr, 981 "failed to execute %s: %s\n", file, Pcreate_error(err))); 982 } 983 #endif 984 985 dpr->dpr_hdl = dtp; 986 #ifdef illumos 987 dpr->dpr_pid = Pstatus(dpr->dpr_proc)->pr_pid; 988 #else 989 dpr->dpr_pid = proc_getpid(dpr->dpr_proc); 990 #endif 991 992 (void) Punsetflags(dpr->dpr_proc, PR_RLC); 993 (void) Psetflags(dpr->dpr_proc, PR_KLC); 994 995 if (dt_proc_create_thread(dtp, dpr, dtp->dt_prcmode) != 0) 996 return (NULL); /* dt_proc_error() has been called for us */ 997 998 dpr->dpr_hash = dph->dph_hash[dpr->dpr_pid & (dph->dph_hashlen - 1)]; 999 dph->dph_hash[dpr->dpr_pid & (dph->dph_hashlen - 1)] = dpr; 1000 dt_list_prepend(&dph->dph_lrulist, dpr); 1001 1002 dt_dprintf("created pid %d\n", (int)dpr->dpr_pid); 1003 dpr->dpr_refs++; 1004 1005 return (dpr->dpr_proc); 1006 } 1007 1008 struct ps_prochandle * 1009 dt_proc_grab(dtrace_hdl_t *dtp, pid_t pid, int flags, int nomonitor) 1010 { 1011 dt_proc_hash_t *dph = dtp->dt_procs; 1012 uint_t h = pid & (dph->dph_hashlen - 1); 1013 dt_proc_t *dpr, *opr; 1014 int err; 1015 1016 /* 1017 * Search the hash table for the pid. If it is already grabbed or 1018 * created, move the handle to the front of the lrulist, increment 1019 * the reference count, and return the existing ps_prochandle. 1020 */ 1021 for (dpr = dph->dph_hash[h]; dpr != NULL; dpr = dpr->dpr_hash) { 1022 if (dpr->dpr_pid == pid && !dpr->dpr_stale) { 1023 /* 1024 * If the cached handle was opened read-only and 1025 * this request is for a writeable handle, mark 1026 * the cached handle as stale and open a new handle. 1027 * Since it's stale, unmark it as cacheable. 1028 */ 1029 if (dpr->dpr_rdonly && !(flags & PGRAB_RDONLY)) { 1030 dt_dprintf("upgrading pid %d\n", (int)pid); 1031 dpr->dpr_stale = B_TRUE; 1032 dpr->dpr_cacheable = B_FALSE; 1033 dph->dph_lrucnt--; 1034 break; 1035 } 1036 1037 dt_dprintf("grabbed pid %d (cached)\n", (int)pid); 1038 dt_list_delete(&dph->dph_lrulist, dpr); 1039 dt_list_prepend(&dph->dph_lrulist, dpr); 1040 dpr->dpr_refs++; 1041 return (dpr->dpr_proc); 1042 } 1043 } 1044 1045 if ((dpr = dt_zalloc(dtp, sizeof (dt_proc_t))) == NULL) 1046 return (NULL); /* errno is set for us */ 1047 1048 (void) pthread_mutex_init(&dpr->dpr_lock, NULL); 1049 (void) pthread_cond_init(&dpr->dpr_cv, NULL); 1050 1051 #ifdef illumos 1052 if ((dpr->dpr_proc = Pgrab(pid, flags, &err)) == NULL) { 1053 #else 1054 if ((err = proc_attach(pid, flags, &dpr->dpr_proc)) != 0) { 1055 #endif 1056 return (dt_proc_error(dtp, dpr, 1057 "failed to grab pid %d: %s\n", (int)pid, Pgrab_error(err))); 1058 } 1059 1060 dpr->dpr_hdl = dtp; 1061 dpr->dpr_pid = pid; 1062 1063 (void) Punsetflags(dpr->dpr_proc, PR_KLC); 1064 (void) Psetflags(dpr->dpr_proc, PR_RLC); 1065 1066 /* 1067 * If we are attempting to grab the process without a monitor 1068 * thread, then mark the process cacheable only if it's being 1069 * grabbed read-only. If we're currently caching more process 1070 * handles than dph_lrulim permits, attempt to find the 1071 * least-recently-used handle that is currently unreferenced and 1072 * release it from the cache. Otherwise we are grabbing the process 1073 * for control: create a control thread for this process and store 1074 * its ID in dpr->dpr_tid. 1075 */ 1076 if (nomonitor || (flags & PGRAB_RDONLY)) { 1077 if (dph->dph_lrucnt >= dph->dph_lrulim) { 1078 for (opr = dt_list_prev(&dph->dph_lrulist); 1079 opr != NULL; opr = dt_list_prev(opr)) { 1080 if (opr->dpr_cacheable && opr->dpr_refs == 0) { 1081 dt_proc_destroy(dtp, opr->dpr_proc); 1082 break; 1083 } 1084 } 1085 } 1086 1087 if (flags & PGRAB_RDONLY) { 1088 dpr->dpr_cacheable = B_TRUE; 1089 dpr->dpr_rdonly = B_TRUE; 1090 dph->dph_lrucnt++; 1091 } 1092 1093 } else if (dt_proc_create_thread(dtp, dpr, DT_PROC_STOP_GRAB) != 0) 1094 return (NULL); /* dt_proc_error() has been called for us */ 1095 1096 dpr->dpr_hash = dph->dph_hash[h]; 1097 dph->dph_hash[h] = dpr; 1098 dt_list_prepend(&dph->dph_lrulist, dpr); 1099 1100 dt_dprintf("grabbed pid %d\n", (int)pid); 1101 dpr->dpr_refs++; 1102 1103 return (dpr->dpr_proc); 1104 } 1105 1106 void 1107 dt_proc_release(dtrace_hdl_t *dtp, struct ps_prochandle *P) 1108 { 1109 dt_proc_t *dpr = dt_proc_lookup(dtp, P, B_FALSE); 1110 dt_proc_hash_t *dph = dtp->dt_procs; 1111 1112 assert(dpr != NULL); 1113 assert(dpr->dpr_refs != 0); 1114 1115 if (--dpr->dpr_refs == 0 && 1116 (!dpr->dpr_cacheable || dph->dph_lrucnt > dph->dph_lrulim)) 1117 dt_proc_destroy(dtp, P); 1118 } 1119 1120 void 1121 dt_proc_continue(dtrace_hdl_t *dtp, struct ps_prochandle *P) 1122 { 1123 dt_proc_t *dpr = dt_proc_lookup(dtp, P, B_FALSE); 1124 1125 (void) pthread_mutex_lock(&dpr->dpr_lock); 1126 1127 if (dpr->dpr_stop & DT_PROC_STOP_IDLE) { 1128 dpr->dpr_stop &= ~DT_PROC_STOP_IDLE; 1129 (void) pthread_cond_broadcast(&dpr->dpr_cv); 1130 } 1131 1132 (void) pthread_mutex_unlock(&dpr->dpr_lock); 1133 } 1134 1135 void 1136 dt_proc_lock(dtrace_hdl_t *dtp, struct ps_prochandle *P) 1137 { 1138 dt_proc_t *dpr = dt_proc_lookup(dtp, P, B_FALSE); 1139 int err = pthread_mutex_lock(&dpr->dpr_lock); 1140 assert(err == 0); /* check for recursion */ 1141 } 1142 1143 void 1144 dt_proc_unlock(dtrace_hdl_t *dtp, struct ps_prochandle *P) 1145 { 1146 dt_proc_t *dpr = dt_proc_lookup(dtp, P, B_FALSE); 1147 int err = pthread_mutex_unlock(&dpr->dpr_lock); 1148 assert(err == 0); /* check for unheld lock */ 1149 } 1150 1151 void 1152 dt_proc_init(dtrace_hdl_t *dtp) 1153 { 1154 extern char **environ; 1155 static char *envdef[] = { 1156 "LD_NOLAZYLOAD=1", /* linker lazy loading hides funcs */ 1157 NULL 1158 }; 1159 char **p; 1160 int i; 1161 1162 if ((dtp->dt_procs = dt_zalloc(dtp, sizeof (dt_proc_hash_t) + 1163 sizeof (dt_proc_t *) * _dtrace_pidbuckets - 1)) == NULL) 1164 return; 1165 1166 (void) pthread_mutex_init(&dtp->dt_procs->dph_lock, NULL); 1167 (void) pthread_cond_init(&dtp->dt_procs->dph_cv, NULL); 1168 1169 dtp->dt_procs->dph_hashlen = _dtrace_pidbuckets; 1170 dtp->dt_procs->dph_lrulim = _dtrace_pidlrulim; 1171 1172 /* 1173 * Count how big our environment needs to be. 1174 */ 1175 for (i = 1, p = environ; *p != NULL; i++, p++) 1176 continue; 1177 for (p = envdef; *p != NULL; i++, p++) 1178 continue; 1179 1180 if ((dtp->dt_proc_env = dt_zalloc(dtp, sizeof (char *) * i)) == NULL) 1181 return; 1182 1183 for (i = 0, p = environ; *p != NULL; i++, p++) { 1184 if ((dtp->dt_proc_env[i] = strdup(*p)) == NULL) 1185 goto err; 1186 } 1187 for (p = envdef; *p != NULL; i++, p++) { 1188 if ((dtp->dt_proc_env[i] = strdup(*p)) == NULL) 1189 goto err; 1190 } 1191 1192 return; 1193 1194 err: 1195 while (--i != 0) { 1196 dt_free(dtp, dtp->dt_proc_env[i]); 1197 } 1198 dt_free(dtp, dtp->dt_proc_env); 1199 dtp->dt_proc_env = NULL; 1200 } 1201 1202 void 1203 dt_proc_fini(dtrace_hdl_t *dtp) 1204 { 1205 dt_proc_hash_t *dph = dtp->dt_procs; 1206 dt_proc_t *dpr; 1207 char **p; 1208 1209 while ((dpr = dt_list_next(&dph->dph_lrulist)) != NULL) 1210 dt_proc_destroy(dtp, dpr->dpr_proc); 1211 1212 dtp->dt_procs = NULL; 1213 dt_free(dtp, dph); 1214 1215 for (p = dtp->dt_proc_env; *p != NULL; p++) 1216 dt_free(dtp, *p); 1217 1218 dt_free(dtp, dtp->dt_proc_env); 1219 dtp->dt_proc_env = NULL; 1220 } 1221 1222 struct ps_prochandle * 1223 dtrace_proc_create(dtrace_hdl_t *dtp, const char *file, char *const *argv, 1224 proc_child_func *pcf, void *child_arg) 1225 { 1226 dt_ident_t *idp = dt_idhash_lookup(dtp->dt_macros, "target"); 1227 struct ps_prochandle *P = dt_proc_create(dtp, file, argv, pcf, child_arg); 1228 1229 if (P != NULL && idp != NULL && idp->di_id == 0) { 1230 #ifdef illumos 1231 idp->di_id = Pstatus(P)->pr_pid; /* $target = created pid */ 1232 #else 1233 idp->di_id = proc_getpid(P); /* $target = created pid */ 1234 #endif 1235 } 1236 1237 return (P); 1238 } 1239 1240 struct ps_prochandle * 1241 dtrace_proc_grab(dtrace_hdl_t *dtp, pid_t pid, int flags) 1242 { 1243 dt_ident_t *idp = dt_idhash_lookup(dtp->dt_macros, "target"); 1244 struct ps_prochandle *P = dt_proc_grab(dtp, pid, flags, 0); 1245 1246 if (P != NULL && idp != NULL && idp->di_id == 0) 1247 idp->di_id = pid; /* $target = grabbed pid */ 1248 1249 return (P); 1250 } 1251 1252 void 1253 dtrace_proc_release(dtrace_hdl_t *dtp, struct ps_prochandle *P) 1254 { 1255 dt_proc_release(dtp, P); 1256 } 1257 1258 void 1259 dtrace_proc_continue(dtrace_hdl_t *dtp, struct ps_prochandle *P) 1260 { 1261 dt_proc_continue(dtp, P); 1262 } 1263