1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * DTrace Process Control 29 * 30 * This file provides a set of routines that permit libdtrace and its clients 31 * to create and grab process handles using libproc, and to share these handles 32 * between library mechanisms that need libproc access, such as ustack(), and 33 * client mechanisms that need libproc access, such as dtrace(1M) -c and -p. 34 * The library provides several mechanisms in the libproc control layer: 35 * 36 * Reference Counting: The library code and client code can independently grab 37 * the same process handles without interfering with one another. Only when 38 * the reference count drops to zero and the handle is not being cached (see 39 * below for more information on caching) will Prelease() be called on it. 40 * 41 * Handle Caching: If a handle is grabbed PGRAB_RDONLY (e.g. by ustack()) and 42 * the reference count drops to zero, the handle is not immediately released. 43 * Instead, libproc handles are maintained on dph_lrulist in order from most- 44 * recently accessed to least-recently accessed. Idle handles are maintained 45 * until a pre-defined LRU cache limit is exceeded, permitting repeated calls 46 * to ustack() to avoid the overhead of releasing and re-grabbing processes. 47 * 48 * Process Control: For processes that are grabbed for control (~PGRAB_RDONLY) 49 * or created by dt_proc_create(), a control thread is created to provide 50 * callbacks on process exit and symbol table caching on dlopen()s. 51 * 52 * MT-Safety: Libproc is not MT-Safe, so dt_proc_lock() and dt_proc_unlock() 53 * are provided to synchronize access to the libproc handle between libdtrace 54 * code and client code and the control thread's use of the ps_prochandle. 55 * 56 * NOTE: MT-Safety is NOT provided for libdtrace itself, or for use of the 57 * dtrace_proc_grab/dtrace_proc_create mechanisms. Like all exported libdtrace 58 * calls, these are assumed to be MT-Unsafe. MT-Safety is ONLY provided for 59 * synchronization between libdtrace control threads and the client thread. 60 * 61 * The ps_prochandles themselves are maintained along with a dt_proc_t struct 62 * in a hash table indexed by PID. This provides basic locking and reference 63 * counting. The dt_proc_t is also maintained in LRU order on dph_lrulist. 64 * The dph_lrucnt and dph_lrulim count the number of cacheable processes and 65 * the current limit on the number of actively cached entries. 66 * 67 * The control thread for a process establishes breakpoints at the rtld_db 68 * locations of interest, updates mappings and symbol tables at these points, 69 * and handles exec and fork (by always following the parent). The control 70 * thread automatically exits when the process dies or control is lost. 71 * 72 * A simple notification mechanism is provided for libdtrace clients using 73 * dtrace_handle_proc() for notification of PS_UNDEAD or PS_LOST events. If 74 * such an event occurs, the dt_proc_t itself is enqueued on a notification 75 * list and the control thread broadcasts to dph_cv. dtrace_sleep() will wake 76 * up using this condition and will then call the client handler as necessary. 77 */ 78 79 #include <sys/wait.h> 80 #ifdef illumos 81 #include <sys/lwp.h> 82 #endif 83 #include <strings.h> 84 #include <signal.h> 85 #include <assert.h> 86 #include <errno.h> 87 88 #include <dt_proc.h> 89 #include <dt_pid.h> 90 #include <dt_impl.h> 91 92 #ifndef illumos 93 #include <sys/syscall.h> 94 #include <libproc_compat.h> 95 #define SYS_forksys SYS_fork 96 #endif 97 98 #define IS_SYS_EXEC(w) (w == SYS_execve) 99 #define IS_SYS_FORK(w) (w == SYS_vfork || w == SYS_forksys) 100 101 static dt_bkpt_t * 102 dt_proc_bpcreate(dt_proc_t *dpr, uintptr_t addr, dt_bkpt_f *func, void *data) 103 { 104 struct ps_prochandle *P = dpr->dpr_proc; 105 dt_bkpt_t *dbp; 106 107 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 108 109 if ((dbp = dt_zalloc(dpr->dpr_hdl, sizeof (dt_bkpt_t))) != NULL) { 110 dbp->dbp_func = func; 111 dbp->dbp_data = data; 112 dbp->dbp_addr = addr; 113 114 if (Psetbkpt(P, dbp->dbp_addr, &dbp->dbp_instr) == 0) 115 dbp->dbp_active = B_TRUE; 116 117 dt_list_append(&dpr->dpr_bps, dbp); 118 } 119 120 return (dbp); 121 } 122 123 static void 124 dt_proc_bpdestroy(dt_proc_t *dpr, int delbkpts) 125 { 126 int state = Pstate(dpr->dpr_proc); 127 dt_bkpt_t *dbp, *nbp; 128 129 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 130 131 for (dbp = dt_list_next(&dpr->dpr_bps); dbp != NULL; dbp = nbp) { 132 if (delbkpts && dbp->dbp_active && 133 state != PS_LOST && state != PS_UNDEAD) { 134 (void) Pdelbkpt(dpr->dpr_proc, 135 dbp->dbp_addr, dbp->dbp_instr); 136 } 137 nbp = dt_list_next(dbp); 138 dt_list_delete(&dpr->dpr_bps, dbp); 139 dt_free(dpr->dpr_hdl, dbp); 140 } 141 } 142 143 static void 144 dt_proc_bpmatch(dtrace_hdl_t *dtp, dt_proc_t *dpr) 145 { 146 #ifdef illumos 147 const lwpstatus_t *psp = &Pstatus(dpr->dpr_proc)->pr_lwp; 148 #else 149 unsigned long pc; 150 #endif 151 dt_bkpt_t *dbp; 152 153 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 154 155 #ifndef illumos 156 proc_regget(dpr->dpr_proc, REG_PC, &pc); 157 proc_bkptregadj(&pc); 158 #endif 159 160 for (dbp = dt_list_next(&dpr->dpr_bps); 161 dbp != NULL; dbp = dt_list_next(dbp)) { 162 #ifdef illumos 163 if (psp->pr_reg[R_PC] == dbp->dbp_addr) 164 break; 165 #else 166 if (pc == dbp->dbp_addr) 167 break; 168 #endif 169 } 170 171 if (dbp == NULL) { 172 dt_dprintf("pid %d: spurious breakpoint wakeup for %lx\n", 173 #ifdef illumos 174 (int)dpr->dpr_pid, (ulong_t)psp->pr_reg[R_PC]); 175 #else 176 (int)dpr->dpr_pid, pc); 177 #endif 178 return; 179 } 180 181 dt_dprintf("pid %d: hit breakpoint at %lx (%lu)\n", 182 (int)dpr->dpr_pid, (ulong_t)dbp->dbp_addr, ++dbp->dbp_hits); 183 184 dbp->dbp_func(dtp, dpr, dbp->dbp_data); 185 (void) Pxecbkpt(dpr->dpr_proc, dbp->dbp_instr); 186 } 187 188 static void 189 dt_proc_bpenable(dt_proc_t *dpr) 190 { 191 dt_bkpt_t *dbp; 192 193 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 194 195 for (dbp = dt_list_next(&dpr->dpr_bps); 196 dbp != NULL; dbp = dt_list_next(dbp)) { 197 if (!dbp->dbp_active && Psetbkpt(dpr->dpr_proc, 198 dbp->dbp_addr, &dbp->dbp_instr) == 0) 199 dbp->dbp_active = B_TRUE; 200 } 201 202 dt_dprintf("breakpoints enabled\n"); 203 } 204 205 static void 206 dt_proc_bpdisable(dt_proc_t *dpr) 207 { 208 dt_bkpt_t *dbp; 209 210 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 211 212 for (dbp = dt_list_next(&dpr->dpr_bps); 213 dbp != NULL; dbp = dt_list_next(dbp)) { 214 if (dbp->dbp_active && Pdelbkpt(dpr->dpr_proc, 215 dbp->dbp_addr, dbp->dbp_instr) == 0) 216 dbp->dbp_active = B_FALSE; 217 } 218 219 dt_dprintf("breakpoints disabled\n"); 220 } 221 222 static void 223 dt_proc_notify(dtrace_hdl_t *dtp, dt_proc_hash_t *dph, dt_proc_t *dpr, 224 const char *msg) 225 { 226 dt_proc_notify_t *dprn = dt_alloc(dtp, sizeof (dt_proc_notify_t)); 227 228 if (dprn == NULL) { 229 dt_dprintf("failed to allocate notification for %d %s\n", 230 (int)dpr->dpr_pid, msg); 231 } else { 232 dprn->dprn_dpr = dpr; 233 if (msg == NULL) 234 dprn->dprn_errmsg[0] = '\0'; 235 else 236 (void) strlcpy(dprn->dprn_errmsg, msg, 237 sizeof (dprn->dprn_errmsg)); 238 239 (void) pthread_mutex_lock(&dph->dph_lock); 240 241 dprn->dprn_next = dph->dph_notify; 242 dph->dph_notify = dprn; 243 244 (void) pthread_cond_broadcast(&dph->dph_cv); 245 (void) pthread_mutex_unlock(&dph->dph_lock); 246 } 247 } 248 249 /* 250 * Check to see if the control thread was requested to stop when the victim 251 * process reached a particular event (why) rather than continuing the victim. 252 * If 'why' is set in the stop mask, we wait on dpr_cv for dt_proc_continue(). 253 * If 'why' is not set, this function returns immediately and does nothing. 254 */ 255 static void 256 dt_proc_stop(dt_proc_t *dpr, uint8_t why) 257 { 258 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 259 assert(why != DT_PROC_STOP_IDLE); 260 261 if (dpr->dpr_stop & why) { 262 dpr->dpr_stop |= DT_PROC_STOP_IDLE; 263 dpr->dpr_stop &= ~why; 264 265 (void) pthread_cond_broadcast(&dpr->dpr_cv); 266 267 /* 268 * We disable breakpoints while stopped to preserve the 269 * integrity of the program text for both our own disassembly 270 * and that of the kernel. 271 */ 272 dt_proc_bpdisable(dpr); 273 274 while (dpr->dpr_stop & DT_PROC_STOP_IDLE) 275 (void) pthread_cond_wait(&dpr->dpr_cv, &dpr->dpr_lock); 276 277 dt_proc_bpenable(dpr); 278 } 279 } 280 281 /*ARGSUSED*/ 282 static void 283 dt_proc_bpmain(dtrace_hdl_t *dtp, dt_proc_t *dpr, const char *fname) 284 { 285 dt_dprintf("pid %d: breakpoint at %s()\n", (int)dpr->dpr_pid, fname); 286 dt_proc_stop(dpr, DT_PROC_STOP_MAIN); 287 } 288 289 static void 290 dt_proc_rdevent(dtrace_hdl_t *dtp, dt_proc_t *dpr, const char *evname) 291 { 292 rd_event_msg_t rdm; 293 rd_err_e err; 294 295 if ((err = rd_event_getmsg(dpr->dpr_rtld, &rdm)) != RD_OK) { 296 dt_dprintf("pid %d: failed to get %s event message: %s\n", 297 (int)dpr->dpr_pid, evname, rd_errstr(err)); 298 return; 299 } 300 301 dt_dprintf("pid %d: rtld event %s type=%d state %d\n", 302 (int)dpr->dpr_pid, evname, rdm.type, rdm.u.state); 303 304 switch (rdm.type) { 305 case RD_DLACTIVITY: 306 if (rdm.u.state != RD_CONSISTENT) 307 break; 308 309 Pupdate_syms(dpr->dpr_proc); 310 if (dt_pid_create_probes_module(dtp, dpr) != 0) 311 dt_proc_notify(dtp, dtp->dt_procs, dpr, 312 dpr->dpr_errmsg); 313 314 break; 315 case RD_PREINIT: 316 Pupdate_syms(dpr->dpr_proc); 317 dt_proc_stop(dpr, DT_PROC_STOP_PREINIT); 318 break; 319 case RD_POSTINIT: 320 Pupdate_syms(dpr->dpr_proc); 321 dt_proc_stop(dpr, DT_PROC_STOP_POSTINIT); 322 break; 323 } 324 } 325 326 static void 327 dt_proc_rdwatch(dt_proc_t *dpr, rd_event_e event, const char *evname) 328 { 329 rd_notify_t rdn; 330 rd_err_e err; 331 332 if ((err = rd_event_addr(dpr->dpr_rtld, event, &rdn)) != RD_OK) { 333 dt_dprintf("pid %d: failed to get event address for %s: %s\n", 334 (int)dpr->dpr_pid, evname, rd_errstr(err)); 335 return; 336 } 337 338 if (rdn.type != RD_NOTIFY_BPT) { 339 dt_dprintf("pid %d: event %s has unexpected type %d\n", 340 (int)dpr->dpr_pid, evname, rdn.type); 341 return; 342 } 343 344 (void) dt_proc_bpcreate(dpr, rdn.u.bptaddr, 345 #ifdef illumos 346 (dt_bkpt_f *)dt_proc_rdevent, (void *)evname); 347 #else 348 /* XXX ugly */ 349 (dt_bkpt_f *)dt_proc_rdevent, __DECONST(void *, evname)); 350 #endif 351 } 352 353 /* 354 * Common code for enabling events associated with the run-time linker after 355 * attaching to a process or after a victim process completes an exec(2). 356 */ 357 static void 358 dt_proc_attach(dt_proc_t *dpr, int exec) 359 { 360 #ifdef illumos 361 const pstatus_t *psp = Pstatus(dpr->dpr_proc); 362 #endif 363 rd_err_e err; 364 GElf_Sym sym; 365 366 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 367 368 if (exec) { 369 #ifdef illumos 370 if (psp->pr_lwp.pr_errno != 0) 371 return; /* exec failed: nothing needs to be done */ 372 #endif 373 374 dt_proc_bpdestroy(dpr, B_FALSE); 375 #ifdef illumos 376 Preset_maps(dpr->dpr_proc); 377 #endif 378 } 379 if ((dpr->dpr_rtld = Prd_agent(dpr->dpr_proc)) != NULL && 380 (err = rd_event_enable(dpr->dpr_rtld, B_TRUE)) == RD_OK) { 381 #ifdef illumos 382 dt_proc_rdwatch(dpr, RD_PREINIT, "RD_PREINIT"); 383 #endif 384 dt_proc_rdwatch(dpr, RD_POSTINIT, "RD_POSTINIT"); 385 #ifdef illumos 386 dt_proc_rdwatch(dpr, RD_DLACTIVITY, "RD_DLACTIVITY"); 387 #endif 388 } else { 389 dt_dprintf("pid %d: failed to enable rtld events: %s\n", 390 (int)dpr->dpr_pid, dpr->dpr_rtld ? rd_errstr(err) : 391 "rtld_db agent initialization failed"); 392 } 393 394 Pupdate_maps(dpr->dpr_proc); 395 396 if (Pxlookup_by_name(dpr->dpr_proc, LM_ID_BASE, 397 "a.out", "main", &sym, NULL) == 0) { 398 (void) dt_proc_bpcreate(dpr, (uintptr_t)sym.st_value, 399 (dt_bkpt_f *)dt_proc_bpmain, "a.out`main"); 400 } else { 401 dt_dprintf("pid %d: failed to find a.out`main: %s\n", 402 (int)dpr->dpr_pid, strerror(errno)); 403 } 404 } 405 406 /* 407 * Wait for a stopped process to be set running again by some other debugger. 408 * This is typically not required by /proc-based debuggers, since the usual 409 * model is that one debugger controls one victim. But DTrace, as usual, has 410 * its own needs: the stop() action assumes that prun(1) or some other tool 411 * will be applied to resume the victim process. This could be solved by 412 * adding a PCWRUN directive to /proc, but that seems like overkill unless 413 * other debuggers end up needing this functionality, so we implement a cheap 414 * equivalent to PCWRUN using the set of existing kernel mechanisms. 415 * 416 * Our intent is really not just to wait for the victim to run, but rather to 417 * wait for it to run and then stop again for a reason other than the current 418 * PR_REQUESTED stop. Since PCWSTOP/Pstopstatus() can be applied repeatedly 419 * to a stopped process and will return the same result without affecting the 420 * victim, we can just perform these operations repeatedly until Pstate() 421 * changes, the representative LWP ID changes, or the stop timestamp advances. 422 * dt_proc_control() will then rediscover the new state and continue as usual. 423 * When the process is still stopped in the same exact state, we sleep for a 424 * brief interval before waiting again so as not to spin consuming CPU cycles. 425 */ 426 static void 427 dt_proc_waitrun(dt_proc_t *dpr) 428 { 429 printf("%s:%s(%d): not implemented\n", __FUNCTION__, __FILE__, 430 __LINE__); 431 #ifdef DOODAD 432 struct ps_prochandle *P = dpr->dpr_proc; 433 const lwpstatus_t *psp = &Pstatus(P)->pr_lwp; 434 435 int krflag = psp->pr_flags & (PR_KLC | PR_RLC); 436 timestruc_t tstamp = psp->pr_tstamp; 437 lwpid_t lwpid = psp->pr_lwpid; 438 439 const long wstop = PCWSTOP; 440 int pfd = Pctlfd(P); 441 442 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 443 assert(psp->pr_flags & PR_STOPPED); 444 assert(Pstate(P) == PS_STOP); 445 446 /* 447 * While we are waiting for the victim to run, clear PR_KLC and PR_RLC 448 * so that if the libdtrace client is killed, the victim stays stopped. 449 * dt_proc_destroy() will also observe this and perform PRELEASE_HANG. 450 */ 451 (void) Punsetflags(P, krflag); 452 Psync(P); 453 454 (void) pthread_mutex_unlock(&dpr->dpr_lock); 455 456 while (!dpr->dpr_quit) { 457 if (write(pfd, &wstop, sizeof (wstop)) == -1 && errno == EINTR) 458 continue; /* check dpr_quit and continue waiting */ 459 460 (void) pthread_mutex_lock(&dpr->dpr_lock); 461 (void) Pstopstatus(P, PCNULL, 0); 462 psp = &Pstatus(P)->pr_lwp; 463 464 /* 465 * If we've reached a new state, found a new representative, or 466 * the stop timestamp has changed, restore PR_KLC/PR_RLC to its 467 * original setting and then return with dpr_lock held. 468 */ 469 if (Pstate(P) != PS_STOP || psp->pr_lwpid != lwpid || 470 bcmp(&psp->pr_tstamp, &tstamp, sizeof (tstamp)) != 0) { 471 (void) Psetflags(P, krflag); 472 Psync(P); 473 return; 474 } 475 476 (void) pthread_mutex_unlock(&dpr->dpr_lock); 477 (void) poll(NULL, 0, MILLISEC / 2); 478 } 479 480 (void) pthread_mutex_lock(&dpr->dpr_lock); 481 #endif 482 } 483 484 typedef struct dt_proc_control_data { 485 dtrace_hdl_t *dpcd_hdl; /* DTrace handle */ 486 dt_proc_t *dpcd_proc; /* proccess to control */ 487 } dt_proc_control_data_t; 488 489 /* 490 * Main loop for all victim process control threads. We initialize all the 491 * appropriate /proc control mechanisms, and then enter a loop waiting for 492 * the process to stop on an event or die. We process any events by calling 493 * appropriate subroutines, and exit when the victim dies or we lose control. 494 * 495 * The control thread synchronizes the use of dpr_proc with other libdtrace 496 * threads using dpr_lock. We hold the lock for all of our operations except 497 * waiting while the process is running: this is accomplished by writing a 498 * PCWSTOP directive directly to the underlying /proc/<pid>/ctl file. If the 499 * libdtrace client wishes to exit or abort our wait, SIGCANCEL can be used. 500 */ 501 static void * 502 dt_proc_control(void *arg) 503 { 504 dt_proc_control_data_t *datap = arg; 505 dtrace_hdl_t *dtp = datap->dpcd_hdl; 506 dt_proc_t *dpr = datap->dpcd_proc; 507 dt_proc_hash_t *dph = dpr->dpr_hdl->dt_procs; 508 struct ps_prochandle *P = dpr->dpr_proc; 509 int pid = dpr->dpr_pid; 510 511 #ifdef illumos 512 int pfd = Pctlfd(P); 513 514 const long wstop = PCWSTOP; 515 #endif 516 int notify = B_FALSE; 517 518 /* 519 * We disable the POSIX thread cancellation mechanism so that the 520 * client program using libdtrace can't accidentally cancel our thread. 521 * dt_proc_destroy() uses SIGCANCEL explicitly to simply poke us out 522 * of PCWSTOP with EINTR, at which point we will see dpr_quit and exit. 523 */ 524 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); 525 526 /* 527 * Set up the corresponding process for tracing by libdtrace. We want 528 * to be able to catch breakpoints and efficiently single-step over 529 * them, and we need to enable librtld_db to watch libdl activity. 530 */ 531 (void) pthread_mutex_lock(&dpr->dpr_lock); 532 533 #ifdef illumos 534 (void) Punsetflags(P, PR_ASYNC); /* require synchronous mode */ 535 (void) Psetflags(P, PR_BPTADJ); /* always adjust eip on x86 */ 536 (void) Punsetflags(P, PR_FORK); /* do not inherit on fork */ 537 538 (void) Pfault(P, FLTBPT, B_TRUE); /* always trace breakpoints */ 539 (void) Pfault(P, FLTTRACE, B_TRUE); /* always trace single-step */ 540 541 /* 542 * We must trace exit from exec() system calls so that if the exec is 543 * successful, we can reset our breakpoints and re-initialize libproc. 544 */ 545 (void) Psysexit(P, SYS_execve, B_TRUE); 546 547 /* 548 * We must trace entry and exit for fork() system calls in order to 549 * disable our breakpoints temporarily during the fork. We do not set 550 * the PR_FORK flag, so if fork succeeds the child begins executing and 551 * does not inherit any other tracing behaviors or a control thread. 552 */ 553 (void) Psysentry(P, SYS_vfork, B_TRUE); 554 (void) Psysexit(P, SYS_vfork, B_TRUE); 555 (void) Psysentry(P, SYS_forksys, B_TRUE); 556 (void) Psysexit(P, SYS_forksys, B_TRUE); 557 558 Psync(P); /* enable all /proc changes */ 559 #endif 560 dt_proc_attach(dpr, B_FALSE); /* enable rtld breakpoints */ 561 562 /* 563 * If PR_KLC is set, we created the process; otherwise we grabbed it. 564 * Check for an appropriate stop request and wait for dt_proc_continue. 565 */ 566 #ifdef illumos 567 if (Pstatus(P)->pr_flags & PR_KLC) 568 #else 569 if (proc_getflags(P) & PR_KLC) 570 #endif 571 dt_proc_stop(dpr, DT_PROC_STOP_CREATE); 572 else 573 dt_proc_stop(dpr, DT_PROC_STOP_GRAB); 574 575 if (Psetrun(P, 0, 0) == -1) { 576 dt_dprintf("pid %d: failed to set running: %s\n", 577 (int)dpr->dpr_pid, strerror(errno)); 578 } 579 580 (void) pthread_mutex_unlock(&dpr->dpr_lock); 581 582 /* 583 * Wait for the process corresponding to this control thread to stop, 584 * process the event, and then set it running again. We want to sleep 585 * with dpr_lock *unheld* so that other parts of libdtrace can use the 586 * ps_prochandle in the meantime (e.g. ustack()). To do this, we write 587 * a PCWSTOP directive directly to the underlying /proc/<pid>/ctl file. 588 * Once the process stops, we wake up, grab dpr_lock, and then call 589 * Pwait() (which will return immediately) and do our processing. 590 */ 591 while (!dpr->dpr_quit) { 592 const lwpstatus_t *psp; 593 594 #ifdef illumos 595 if (write(pfd, &wstop, sizeof (wstop)) == -1 && errno == EINTR) 596 continue; /* check dpr_quit and continue waiting */ 597 #else 598 /* Wait for the process to report status. */ 599 proc_wstatus(P); 600 if (errno == EINTR) 601 continue; /* check dpr_quit and continue waiting */ 602 #endif 603 604 (void) pthread_mutex_lock(&dpr->dpr_lock); 605 606 #ifdef illumos 607 pwait_locked: 608 if (Pstopstatus(P, PCNULL, 0) == -1 && errno == EINTR) { 609 (void) pthread_mutex_unlock(&dpr->dpr_lock); 610 continue; /* check dpr_quit and continue waiting */ 611 } 612 #endif 613 614 switch (Pstate(P)) { 615 case PS_STOP: 616 #ifdef illumos 617 psp = &Pstatus(P)->pr_lwp; 618 #else 619 psp = proc_getlwpstatus(P); 620 #endif 621 622 dt_dprintf("pid %d: proc stopped showing %d/%d\n", 623 pid, psp->pr_why, psp->pr_what); 624 625 /* 626 * If the process stops showing PR_REQUESTED, then the 627 * DTrace stop() action was applied to it or another 628 * debugging utility (e.g. pstop(1)) asked it to stop. 629 * In either case, the user's intention is for the 630 * process to remain stopped until another external 631 * mechanism (e.g. prun(1)) is applied. So instead of 632 * setting the process running ourself, we wait for 633 * someone else to do so. Once that happens, we return 634 * to our normal loop waiting for an event of interest. 635 */ 636 if (psp->pr_why == PR_REQUESTED) { 637 dt_proc_waitrun(dpr); 638 (void) pthread_mutex_unlock(&dpr->dpr_lock); 639 continue; 640 } 641 642 /* 643 * If the process stops showing one of the events that 644 * we are tracing, perform the appropriate response. 645 * Note that we ignore PR_SUSPENDED, PR_CHECKPOINT, and 646 * PR_JOBCONTROL by design: if one of these conditions 647 * occurs, we will fall through to Psetrun() but the 648 * process will remain stopped in the kernel by the 649 * corresponding mechanism (e.g. job control stop). 650 */ 651 if (psp->pr_why == PR_FAULTED && psp->pr_what == FLTBPT) 652 dt_proc_bpmatch(dtp, dpr); 653 else if (psp->pr_why == PR_SYSENTRY && 654 IS_SYS_FORK(psp->pr_what)) 655 dt_proc_bpdisable(dpr); 656 else if (psp->pr_why == PR_SYSEXIT && 657 IS_SYS_FORK(psp->pr_what)) 658 dt_proc_bpenable(dpr); 659 else if (psp->pr_why == PR_SYSEXIT && 660 IS_SYS_EXEC(psp->pr_what)) 661 dt_proc_attach(dpr, B_TRUE); 662 break; 663 664 case PS_LOST: 665 #ifdef illumos 666 if (Preopen(P) == 0) 667 goto pwait_locked; 668 #endif 669 670 dt_dprintf("pid %d: proc lost: %s\n", 671 pid, strerror(errno)); 672 673 dpr->dpr_quit = B_TRUE; 674 notify = B_TRUE; 675 break; 676 677 case PS_UNDEAD: 678 dt_dprintf("pid %d: proc died\n", pid); 679 dpr->dpr_quit = B_TRUE; 680 notify = B_TRUE; 681 break; 682 } 683 684 if (Pstate(P) != PS_UNDEAD && Psetrun(P, 0, 0) == -1) { 685 dt_dprintf("pid %d: failed to set running: %s\n", 686 (int)dpr->dpr_pid, strerror(errno)); 687 } 688 689 (void) pthread_mutex_unlock(&dpr->dpr_lock); 690 } 691 692 /* 693 * If the control thread detected PS_UNDEAD or PS_LOST, then enqueue 694 * the dt_proc_t structure on the dt_proc_hash_t notification list. 695 */ 696 if (notify) 697 dt_proc_notify(dtp, dph, dpr, NULL); 698 699 /* 700 * Destroy and remove any remaining breakpoints, set dpr_done and clear 701 * dpr_tid to indicate the control thread has exited, and notify any 702 * waiting thread in dt_proc_destroy() that we have succesfully exited. 703 */ 704 (void) pthread_mutex_lock(&dpr->dpr_lock); 705 706 dt_proc_bpdestroy(dpr, B_TRUE); 707 dpr->dpr_done = B_TRUE; 708 dpr->dpr_tid = 0; 709 710 (void) pthread_cond_broadcast(&dpr->dpr_cv); 711 (void) pthread_mutex_unlock(&dpr->dpr_lock); 712 713 return (NULL); 714 } 715 716 /*PRINTFLIKE3*/ 717 static struct ps_prochandle * 718 dt_proc_error(dtrace_hdl_t *dtp, dt_proc_t *dpr, const char *format, ...) 719 { 720 va_list ap; 721 722 va_start(ap, format); 723 dt_set_errmsg(dtp, NULL, NULL, NULL, 0, format, ap); 724 va_end(ap); 725 726 if (dpr->dpr_proc != NULL) 727 Prelease(dpr->dpr_proc, 0); 728 729 dt_free(dtp, dpr); 730 (void) dt_set_errno(dtp, EDT_COMPILER); 731 return (NULL); 732 } 733 734 dt_proc_t * 735 dt_proc_lookup(dtrace_hdl_t *dtp, struct ps_prochandle *P, int remove) 736 { 737 dt_proc_hash_t *dph = dtp->dt_procs; 738 #ifdef illumos 739 pid_t pid = Pstatus(P)->pr_pid; 740 #else 741 pid_t pid = proc_getpid(P); 742 #endif 743 dt_proc_t *dpr, **dpp = &dph->dph_hash[pid & (dph->dph_hashlen - 1)]; 744 745 for (dpr = *dpp; dpr != NULL; dpr = dpr->dpr_hash) { 746 if (dpr->dpr_pid == pid) 747 break; 748 else 749 dpp = &dpr->dpr_hash; 750 } 751 752 assert(dpr != NULL); 753 assert(dpr->dpr_proc == P); 754 755 if (remove) 756 *dpp = dpr->dpr_hash; /* remove from pid hash chain */ 757 758 return (dpr); 759 } 760 761 static void 762 dt_proc_destroy(dtrace_hdl_t *dtp, struct ps_prochandle *P) 763 { 764 dt_proc_t *dpr = dt_proc_lookup(dtp, P, B_FALSE); 765 dt_proc_hash_t *dph = dtp->dt_procs; 766 dt_proc_notify_t *npr, **npp; 767 int rflag; 768 769 assert(dpr != NULL); 770 771 /* 772 * If neither PR_KLC nor PR_RLC is set, then the process is stopped by 773 * an external debugger and we were waiting in dt_proc_waitrun(). 774 * Leave the process in this condition using PRELEASE_HANG. 775 */ 776 #ifdef illumos 777 if (!(Pstatus(dpr->dpr_proc)->pr_flags & (PR_KLC | PR_RLC))) { 778 #else 779 if (!(proc_getflags(dpr->dpr_proc) & (PR_KLC | PR_RLC))) { 780 #endif 781 dt_dprintf("abandoning pid %d\n", (int)dpr->dpr_pid); 782 rflag = PRELEASE_HANG; 783 #ifdef illumos 784 } else if (Pstatus(dpr->dpr_proc)->pr_flags & PR_KLC) { 785 #else 786 } else if (proc_getflags(dpr->dpr_proc) & PR_KLC) { 787 #endif 788 dt_dprintf("killing pid %d\n", (int)dpr->dpr_pid); 789 rflag = PRELEASE_KILL; /* apply kill-on-last-close */ 790 } else { 791 dt_dprintf("releasing pid %d\n", (int)dpr->dpr_pid); 792 rflag = 0; /* apply run-on-last-close */ 793 } 794 795 if (dpr->dpr_tid) { 796 /* 797 * Set the dpr_quit flag to tell the daemon thread to exit. We 798 * send it a SIGCANCEL to poke it out of PCWSTOP or any other 799 * long-term /proc system call. Our daemon threads have POSIX 800 * cancellation disabled, so EINTR will be the only effect. We 801 * then wait for dpr_done to indicate the thread has exited. 802 * 803 * We can't use pthread_kill() to send SIGCANCEL because the 804 * interface forbids it and we can't use pthread_cancel() 805 * because with cancellation disabled it won't actually 806 * send SIGCANCEL to the target thread, so we use _lwp_kill() 807 * to do the job. This is all built on evil knowledge of 808 * the details of the cancellation mechanism in libc. 809 */ 810 (void) pthread_mutex_lock(&dpr->dpr_lock); 811 dpr->dpr_quit = B_TRUE; 812 #ifdef illumos 813 (void) _lwp_kill(dpr->dpr_tid, SIGCANCEL); 814 #else 815 pthread_kill(dpr->dpr_tid, SIGTHR); 816 #endif 817 818 /* 819 * If the process is currently idling in dt_proc_stop(), re- 820 * enable breakpoints and poke it into running again. 821 */ 822 if (dpr->dpr_stop & DT_PROC_STOP_IDLE) { 823 dt_proc_bpenable(dpr); 824 dpr->dpr_stop &= ~DT_PROC_STOP_IDLE; 825 (void) pthread_cond_broadcast(&dpr->dpr_cv); 826 } 827 828 while (!dpr->dpr_done) 829 (void) pthread_cond_wait(&dpr->dpr_cv, &dpr->dpr_lock); 830 831 (void) pthread_mutex_unlock(&dpr->dpr_lock); 832 } 833 834 /* 835 * Before we free the process structure, remove this dt_proc_t from the 836 * lookup hash, and then walk the dt_proc_hash_t's notification list 837 * and remove this dt_proc_t if it is enqueued. 838 */ 839 (void) pthread_mutex_lock(&dph->dph_lock); 840 (void) dt_proc_lookup(dtp, P, B_TRUE); 841 npp = &dph->dph_notify; 842 843 while ((npr = *npp) != NULL) { 844 if (npr->dprn_dpr == dpr) { 845 *npp = npr->dprn_next; 846 dt_free(dtp, npr); 847 } else { 848 npp = &npr->dprn_next; 849 } 850 } 851 852 (void) pthread_mutex_unlock(&dph->dph_lock); 853 854 /* 855 * Remove the dt_proc_list from the LRU list, release the underlying 856 * libproc handle, and free our dt_proc_t data structure. 857 */ 858 if (dpr->dpr_cacheable) { 859 assert(dph->dph_lrucnt != 0); 860 dph->dph_lrucnt--; 861 } 862 863 dt_list_delete(&dph->dph_lrulist, dpr); 864 Prelease(dpr->dpr_proc, rflag); 865 dt_free(dtp, dpr); 866 } 867 868 static int 869 dt_proc_create_thread(dtrace_hdl_t *dtp, dt_proc_t *dpr, uint_t stop) 870 { 871 dt_proc_control_data_t data; 872 sigset_t nset, oset; 873 pthread_attr_t a; 874 int err; 875 876 (void) pthread_mutex_lock(&dpr->dpr_lock); 877 dpr->dpr_stop |= stop; /* set bit for initial rendezvous */ 878 879 (void) pthread_attr_init(&a); 880 (void) pthread_attr_setdetachstate(&a, PTHREAD_CREATE_DETACHED); 881 882 (void) sigfillset(&nset); 883 (void) sigdelset(&nset, SIGABRT); /* unblocked for assert() */ 884 #ifdef illumos 885 (void) sigdelset(&nset, SIGCANCEL); /* see dt_proc_destroy() */ 886 #else 887 (void) sigdelset(&nset, SIGUSR1); /* see dt_proc_destroy() */ 888 #endif 889 890 data.dpcd_hdl = dtp; 891 data.dpcd_proc = dpr; 892 893 (void) pthread_sigmask(SIG_SETMASK, &nset, &oset); 894 err = pthread_create(&dpr->dpr_tid, &a, dt_proc_control, &data); 895 (void) pthread_sigmask(SIG_SETMASK, &oset, NULL); 896 897 /* 898 * If the control thread was created, then wait on dpr_cv for either 899 * dpr_done to be set (the victim died or the control thread failed) 900 * or DT_PROC_STOP_IDLE to be set, indicating that the victim is now 901 * stopped by /proc and the control thread is at the rendezvous event. 902 * On success, we return with the process and control thread stopped: 903 * the caller can then apply dt_proc_continue() to resume both. 904 */ 905 if (err == 0) { 906 while (!dpr->dpr_done && !(dpr->dpr_stop & DT_PROC_STOP_IDLE)) 907 (void) pthread_cond_wait(&dpr->dpr_cv, &dpr->dpr_lock); 908 909 /* 910 * If dpr_done is set, the control thread aborted before it 911 * reached the rendezvous event. This is either due to PS_LOST 912 * or PS_UNDEAD (i.e. the process died). We try to provide a 913 * small amount of useful information to help figure it out. 914 */ 915 if (dpr->dpr_done) { 916 #ifdef illumos 917 const psinfo_t *prp = Ppsinfo(dpr->dpr_proc); 918 int stat = prp ? prp->pr_wstat : 0; 919 int pid = dpr->dpr_pid; 920 #else 921 int stat = proc_getwstat(dpr->dpr_proc); 922 int pid = proc_getpid(dpr->dpr_proc); 923 #endif 924 if (proc_state(dpr->dpr_proc) == PS_LOST) { 925 (void) dt_proc_error(dpr->dpr_hdl, dpr, 926 "failed to control pid %d: process exec'd " 927 "set-id or unobservable program\n", pid); 928 } else if (WIFSIGNALED(stat)) { 929 (void) dt_proc_error(dpr->dpr_hdl, dpr, 930 "failed to control pid %d: process died " 931 "from signal %d\n", pid, WTERMSIG(stat)); 932 } else { 933 (void) dt_proc_error(dpr->dpr_hdl, dpr, 934 "failed to control pid %d: process exited " 935 "with status %d\n", pid, WEXITSTATUS(stat)); 936 } 937 938 err = ESRCH; /* cause grab() or create() to fail */ 939 } 940 } else { 941 (void) dt_proc_error(dpr->dpr_hdl, dpr, 942 "failed to create control thread for process-id %d: %s\n", 943 (int)dpr->dpr_pid, strerror(err)); 944 } 945 946 if (err == 0) 947 (void) pthread_mutex_unlock(&dpr->dpr_lock); 948 (void) pthread_attr_destroy(&a); 949 950 return (err); 951 } 952 953 struct ps_prochandle * 954 dt_proc_create(dtrace_hdl_t *dtp, const char *file, char *const *argv, 955 proc_child_func *pcf, void *child_arg) 956 { 957 dt_proc_hash_t *dph = dtp->dt_procs; 958 dt_proc_t *dpr; 959 int err; 960 961 if ((dpr = dt_zalloc(dtp, sizeof (dt_proc_t))) == NULL) 962 return (NULL); /* errno is set for us */ 963 964 (void) pthread_mutex_init(&dpr->dpr_lock, NULL); 965 (void) pthread_cond_init(&dpr->dpr_cv, NULL); 966 967 #ifdef illumos 968 if ((dpr->dpr_proc = Pcreate(file, argv, &err, NULL, 0)) == NULL) { 969 #else 970 if ((err = proc_create(file, argv, pcf, child_arg, 971 &dpr->dpr_proc)) != 0) { 972 #endif 973 return (dt_proc_error(dtp, dpr, 974 "failed to execute %s: %s\n", file, Pcreate_error(err))); 975 } 976 977 dpr->dpr_hdl = dtp; 978 #ifdef illumos 979 dpr->dpr_pid = Pstatus(dpr->dpr_proc)->pr_pid; 980 #else 981 dpr->dpr_pid = proc_getpid(dpr->dpr_proc); 982 #endif 983 984 (void) Punsetflags(dpr->dpr_proc, PR_RLC); 985 (void) Psetflags(dpr->dpr_proc, PR_KLC); 986 987 if (dt_proc_create_thread(dtp, dpr, dtp->dt_prcmode) != 0) 988 return (NULL); /* dt_proc_error() has been called for us */ 989 990 dpr->dpr_hash = dph->dph_hash[dpr->dpr_pid & (dph->dph_hashlen - 1)]; 991 dph->dph_hash[dpr->dpr_pid & (dph->dph_hashlen - 1)] = dpr; 992 dt_list_prepend(&dph->dph_lrulist, dpr); 993 994 dt_dprintf("created pid %d\n", (int)dpr->dpr_pid); 995 dpr->dpr_refs++; 996 997 return (dpr->dpr_proc); 998 } 999 1000 struct ps_prochandle * 1001 dt_proc_grab(dtrace_hdl_t *dtp, pid_t pid, int flags, int nomonitor) 1002 { 1003 dt_proc_hash_t *dph = dtp->dt_procs; 1004 uint_t h = pid & (dph->dph_hashlen - 1); 1005 dt_proc_t *dpr, *opr; 1006 int err; 1007 1008 /* 1009 * Search the hash table for the pid. If it is already grabbed or 1010 * created, move the handle to the front of the lrulist, increment 1011 * the reference count, and return the existing ps_prochandle. 1012 */ 1013 for (dpr = dph->dph_hash[h]; dpr != NULL; dpr = dpr->dpr_hash) { 1014 if (dpr->dpr_pid == pid && !dpr->dpr_stale) { 1015 /* 1016 * If the cached handle was opened read-only and 1017 * this request is for a writeable handle, mark 1018 * the cached handle as stale and open a new handle. 1019 * Since it's stale, unmark it as cacheable. 1020 */ 1021 if (dpr->dpr_rdonly && !(flags & PGRAB_RDONLY)) { 1022 dt_dprintf("upgrading pid %d\n", (int)pid); 1023 dpr->dpr_stale = B_TRUE; 1024 dpr->dpr_cacheable = B_FALSE; 1025 dph->dph_lrucnt--; 1026 break; 1027 } 1028 1029 dt_dprintf("grabbed pid %d (cached)\n", (int)pid); 1030 dt_list_delete(&dph->dph_lrulist, dpr); 1031 dt_list_prepend(&dph->dph_lrulist, dpr); 1032 dpr->dpr_refs++; 1033 return (dpr->dpr_proc); 1034 } 1035 } 1036 1037 if ((dpr = dt_zalloc(dtp, sizeof (dt_proc_t))) == NULL) 1038 return (NULL); /* errno is set for us */ 1039 1040 (void) pthread_mutex_init(&dpr->dpr_lock, NULL); 1041 (void) pthread_cond_init(&dpr->dpr_cv, NULL); 1042 1043 #ifdef illumos 1044 if ((dpr->dpr_proc = Pgrab(pid, flags, &err)) == NULL) { 1045 #else 1046 if ((err = proc_attach(pid, flags, &dpr->dpr_proc)) != 0) { 1047 #endif 1048 return (dt_proc_error(dtp, dpr, 1049 "failed to grab pid %d: %s\n", (int)pid, Pgrab_error(err))); 1050 } 1051 1052 dpr->dpr_hdl = dtp; 1053 dpr->dpr_pid = pid; 1054 1055 (void) Punsetflags(dpr->dpr_proc, PR_KLC); 1056 (void) Psetflags(dpr->dpr_proc, PR_RLC); 1057 1058 /* 1059 * If we are attempting to grab the process without a monitor 1060 * thread, then mark the process cacheable only if it's being 1061 * grabbed read-only. If we're currently caching more process 1062 * handles than dph_lrulim permits, attempt to find the 1063 * least-recently-used handle that is currently unreferenced and 1064 * release it from the cache. Otherwise we are grabbing the process 1065 * for control: create a control thread for this process and store 1066 * its ID in dpr->dpr_tid. 1067 */ 1068 if (nomonitor || (flags & PGRAB_RDONLY)) { 1069 if (dph->dph_lrucnt >= dph->dph_lrulim) { 1070 for (opr = dt_list_prev(&dph->dph_lrulist); 1071 opr != NULL; opr = dt_list_prev(opr)) { 1072 if (opr->dpr_cacheable && opr->dpr_refs == 0) { 1073 dt_proc_destroy(dtp, opr->dpr_proc); 1074 break; 1075 } 1076 } 1077 } 1078 1079 if (flags & PGRAB_RDONLY) { 1080 dpr->dpr_cacheable = B_TRUE; 1081 dpr->dpr_rdonly = B_TRUE; 1082 dph->dph_lrucnt++; 1083 } 1084 1085 } else if (dt_proc_create_thread(dtp, dpr, DT_PROC_STOP_GRAB) != 0) 1086 return (NULL); /* dt_proc_error() has been called for us */ 1087 1088 dpr->dpr_hash = dph->dph_hash[h]; 1089 dph->dph_hash[h] = dpr; 1090 dt_list_prepend(&dph->dph_lrulist, dpr); 1091 1092 dt_dprintf("grabbed pid %d\n", (int)pid); 1093 dpr->dpr_refs++; 1094 1095 return (dpr->dpr_proc); 1096 } 1097 1098 void 1099 dt_proc_release(dtrace_hdl_t *dtp, struct ps_prochandle *P) 1100 { 1101 dt_proc_t *dpr = dt_proc_lookup(dtp, P, B_FALSE); 1102 dt_proc_hash_t *dph = dtp->dt_procs; 1103 1104 assert(dpr != NULL); 1105 assert(dpr->dpr_refs != 0); 1106 1107 if (--dpr->dpr_refs == 0 && 1108 (!dpr->dpr_cacheable || dph->dph_lrucnt > dph->dph_lrulim)) 1109 dt_proc_destroy(dtp, P); 1110 } 1111 1112 void 1113 dt_proc_continue(dtrace_hdl_t *dtp, struct ps_prochandle *P) 1114 { 1115 dt_proc_t *dpr = dt_proc_lookup(dtp, P, B_FALSE); 1116 1117 (void) pthread_mutex_lock(&dpr->dpr_lock); 1118 1119 if (dpr->dpr_stop & DT_PROC_STOP_IDLE) { 1120 dpr->dpr_stop &= ~DT_PROC_STOP_IDLE; 1121 (void) pthread_cond_broadcast(&dpr->dpr_cv); 1122 } 1123 1124 (void) pthread_mutex_unlock(&dpr->dpr_lock); 1125 } 1126 1127 void 1128 dt_proc_lock(dtrace_hdl_t *dtp, struct ps_prochandle *P) 1129 { 1130 dt_proc_t *dpr = dt_proc_lookup(dtp, P, B_FALSE); 1131 int err = pthread_mutex_lock(&dpr->dpr_lock); 1132 assert(err == 0); /* check for recursion */ 1133 } 1134 1135 void 1136 dt_proc_unlock(dtrace_hdl_t *dtp, struct ps_prochandle *P) 1137 { 1138 dt_proc_t *dpr = dt_proc_lookup(dtp, P, B_FALSE); 1139 int err = pthread_mutex_unlock(&dpr->dpr_lock); 1140 assert(err == 0); /* check for unheld lock */ 1141 } 1142 1143 void 1144 dt_proc_hash_create(dtrace_hdl_t *dtp) 1145 { 1146 if ((dtp->dt_procs = dt_zalloc(dtp, sizeof (dt_proc_hash_t) + 1147 sizeof (dt_proc_t *) * _dtrace_pidbuckets - 1)) != NULL) { 1148 1149 (void) pthread_mutex_init(&dtp->dt_procs->dph_lock, NULL); 1150 (void) pthread_cond_init(&dtp->dt_procs->dph_cv, NULL); 1151 1152 dtp->dt_procs->dph_hashlen = _dtrace_pidbuckets; 1153 dtp->dt_procs->dph_lrulim = _dtrace_pidlrulim; 1154 } 1155 } 1156 1157 void 1158 dt_proc_hash_destroy(dtrace_hdl_t *dtp) 1159 { 1160 dt_proc_hash_t *dph = dtp->dt_procs; 1161 dt_proc_t *dpr; 1162 1163 while ((dpr = dt_list_next(&dph->dph_lrulist)) != NULL) 1164 dt_proc_destroy(dtp, dpr->dpr_proc); 1165 1166 dtp->dt_procs = NULL; 1167 dt_free(dtp, dph); 1168 } 1169 1170 struct ps_prochandle * 1171 dtrace_proc_create(dtrace_hdl_t *dtp, const char *file, char *const *argv, 1172 proc_child_func *pcf, void *child_arg) 1173 { 1174 dt_ident_t *idp = dt_idhash_lookup(dtp->dt_macros, "target"); 1175 struct ps_prochandle *P = dt_proc_create(dtp, file, argv, pcf, child_arg); 1176 1177 if (P != NULL && idp != NULL && idp->di_id == 0) { 1178 #ifdef illumos 1179 idp->di_id = Pstatus(P)->pr_pid; /* $target = created pid */ 1180 #else 1181 idp->di_id = proc_getpid(P); /* $target = created pid */ 1182 #endif 1183 } 1184 1185 return (P); 1186 } 1187 1188 struct ps_prochandle * 1189 dtrace_proc_grab(dtrace_hdl_t *dtp, pid_t pid, int flags) 1190 { 1191 dt_ident_t *idp = dt_idhash_lookup(dtp->dt_macros, "target"); 1192 struct ps_prochandle *P = dt_proc_grab(dtp, pid, flags, 0); 1193 1194 if (P != NULL && idp != NULL && idp->di_id == 0) 1195 idp->di_id = pid; /* $target = grabbed pid */ 1196 1197 return (P); 1198 } 1199 1200 void 1201 dtrace_proc_release(dtrace_hdl_t *dtp, struct ps_prochandle *P) 1202 { 1203 dt_proc_release(dtp, P); 1204 } 1205 1206 void 1207 dtrace_proc_continue(dtrace_hdl_t *dtp, struct ps_prochandle *P) 1208 { 1209 dt_proc_continue(dtp, P); 1210 } 1211