1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * DTrace Process Control 29 * 30 * This file provides a set of routines that permit libdtrace and its clients 31 * to create and grab process handles using libproc, and to share these handles 32 * between library mechanisms that need libproc access, such as ustack(), and 33 * client mechanisms that need libproc access, such as dtrace(1M) -c and -p. 34 * The library provides several mechanisms in the libproc control layer: 35 * 36 * Reference Counting: The library code and client code can independently grab 37 * the same process handles without interfering with one another. Only when 38 * the reference count drops to zero and the handle is not being cached (see 39 * below for more information on caching) will Prelease() be called on it. 40 * 41 * Handle Caching: If a handle is grabbed PGRAB_RDONLY (e.g. by ustack()) and 42 * the reference count drops to zero, the handle is not immediately released. 43 * Instead, libproc handles are maintained on dph_lrulist in order from most- 44 * recently accessed to least-recently accessed. Idle handles are maintained 45 * until a pre-defined LRU cache limit is exceeded, permitting repeated calls 46 * to ustack() to avoid the overhead of releasing and re-grabbing processes. 47 * 48 * Process Control: For processes that are grabbed for control (~PGRAB_RDONLY) 49 * or created by dt_proc_create(), a control thread is created to provide 50 * callbacks on process exit and symbol table caching on dlopen()s. 51 * 52 * MT-Safety: Libproc is not MT-Safe, so dt_proc_lock() and dt_proc_unlock() 53 * are provided to synchronize access to the libproc handle between libdtrace 54 * code and client code and the control thread's use of the ps_prochandle. 55 * 56 * NOTE: MT-Safety is NOT provided for libdtrace itself, or for use of the 57 * dtrace_proc_grab/dtrace_proc_create mechanisms. Like all exported libdtrace 58 * calls, these are assumed to be MT-Unsafe. MT-Safety is ONLY provided for 59 * synchronization between libdtrace control threads and the client thread. 60 * 61 * The ps_prochandles themselves are maintained along with a dt_proc_t struct 62 * in a hash table indexed by PID. This provides basic locking and reference 63 * counting. The dt_proc_t is also maintained in LRU order on dph_lrulist. 64 * The dph_lrucnt and dph_lrulim count the number of cacheable processes and 65 * the current limit on the number of actively cached entries. 66 * 67 * The control thread for a process establishes breakpoints at the rtld_db 68 * locations of interest, updates mappings and symbol tables at these points, 69 * and handles exec and fork (by always following the parent). The control 70 * thread automatically exits when the process dies or control is lost. 71 * 72 * A simple notification mechanism is provided for libdtrace clients using 73 * dtrace_handle_proc() for notification of PS_UNDEAD or PS_LOST events. If 74 * such an event occurs, the dt_proc_t itself is enqueued on a notification 75 * list and the control thread broadcasts to dph_cv. dtrace_sleep() will wake 76 * up using this condition and will then call the client handler as necessary. 77 */ 78 79 #include <sys/wait.h> 80 #if defined(sun) 81 #include <sys/lwp.h> 82 #endif 83 #include <strings.h> 84 #include <signal.h> 85 #include <assert.h> 86 #include <errno.h> 87 88 #include <dt_proc.h> 89 #include <dt_pid.h> 90 #include <dt_impl.h> 91 92 #if !defined(sun) 93 #include <sys/syscall.h> 94 #include <libproc_compat.h> 95 #define SYS_forksys SYS_fork 96 #endif 97 98 #define IS_SYS_EXEC(w) (w == SYS_execve) 99 #define IS_SYS_FORK(w) (w == SYS_vfork || w == SYS_forksys) 100 101 static dt_bkpt_t * 102 dt_proc_bpcreate(dt_proc_t *dpr, uintptr_t addr, dt_bkpt_f *func, void *data) 103 { 104 struct ps_prochandle *P = dpr->dpr_proc; 105 dt_bkpt_t *dbp; 106 107 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 108 109 if ((dbp = dt_zalloc(dpr->dpr_hdl, sizeof (dt_bkpt_t))) != NULL) { 110 dbp->dbp_func = func; 111 dbp->dbp_data = data; 112 dbp->dbp_addr = addr; 113 114 if (Psetbkpt(P, dbp->dbp_addr, &dbp->dbp_instr) == 0) 115 dbp->dbp_active = B_TRUE; 116 117 dt_list_append(&dpr->dpr_bps, dbp); 118 } 119 120 return (dbp); 121 } 122 123 static void 124 dt_proc_bpdestroy(dt_proc_t *dpr, int delbkpts) 125 { 126 int state = Pstate(dpr->dpr_proc); 127 dt_bkpt_t *dbp, *nbp; 128 129 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 130 131 for (dbp = dt_list_next(&dpr->dpr_bps); dbp != NULL; dbp = nbp) { 132 if (delbkpts && dbp->dbp_active && 133 state != PS_LOST && state != PS_UNDEAD) { 134 (void) Pdelbkpt(dpr->dpr_proc, 135 dbp->dbp_addr, dbp->dbp_instr); 136 } 137 nbp = dt_list_next(dbp); 138 dt_list_delete(&dpr->dpr_bps, dbp); 139 dt_free(dpr->dpr_hdl, dbp); 140 } 141 } 142 143 static void 144 dt_proc_bpmatch(dtrace_hdl_t *dtp, dt_proc_t *dpr) 145 { 146 #if defined(sun) 147 const lwpstatus_t *psp = &Pstatus(dpr->dpr_proc)->pr_lwp; 148 #else 149 unsigned long pc; 150 #endif 151 dt_bkpt_t *dbp; 152 153 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 154 155 #if !defined(sun) 156 proc_regget(dpr->dpr_proc, REG_PC, &pc); 157 proc_bkptregadj(&pc); 158 #endif 159 160 for (dbp = dt_list_next(&dpr->dpr_bps); 161 dbp != NULL; dbp = dt_list_next(dbp)) { 162 #if defined(sun) 163 if (psp->pr_reg[R_PC] == dbp->dbp_addr) 164 break; 165 #else 166 if (pc == dbp->dbp_addr) 167 break; 168 #endif 169 } 170 171 if (dbp == NULL) { 172 dt_dprintf("pid %d: spurious breakpoint wakeup for %lx\n", 173 #if defined(sun) 174 (int)dpr->dpr_pid, (ulong_t)psp->pr_reg[R_PC]); 175 #else 176 (int)dpr->dpr_pid, pc); 177 #endif 178 return; 179 } 180 181 dt_dprintf("pid %d: hit breakpoint at %lx (%lu)\n", 182 (int)dpr->dpr_pid, (ulong_t)dbp->dbp_addr, ++dbp->dbp_hits); 183 184 dbp->dbp_func(dtp, dpr, dbp->dbp_data); 185 (void) Pxecbkpt(dpr->dpr_proc, dbp->dbp_instr); 186 } 187 188 static void 189 dt_proc_bpenable(dt_proc_t *dpr) 190 { 191 dt_bkpt_t *dbp; 192 193 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 194 195 for (dbp = dt_list_next(&dpr->dpr_bps); 196 dbp != NULL; dbp = dt_list_next(dbp)) { 197 if (!dbp->dbp_active && Psetbkpt(dpr->dpr_proc, 198 dbp->dbp_addr, &dbp->dbp_instr) == 0) 199 dbp->dbp_active = B_TRUE; 200 } 201 202 dt_dprintf("breakpoints enabled\n"); 203 } 204 205 static void 206 dt_proc_bpdisable(dt_proc_t *dpr) 207 { 208 dt_bkpt_t *dbp; 209 210 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 211 212 for (dbp = dt_list_next(&dpr->dpr_bps); 213 dbp != NULL; dbp = dt_list_next(dbp)) { 214 if (dbp->dbp_active && Pdelbkpt(dpr->dpr_proc, 215 dbp->dbp_addr, dbp->dbp_instr) == 0) 216 dbp->dbp_active = B_FALSE; 217 } 218 219 dt_dprintf("breakpoints disabled\n"); 220 } 221 222 static void 223 dt_proc_notify(dtrace_hdl_t *dtp, dt_proc_hash_t *dph, dt_proc_t *dpr, 224 const char *msg) 225 { 226 dt_proc_notify_t *dprn = dt_alloc(dtp, sizeof (dt_proc_notify_t)); 227 228 if (dprn == NULL) { 229 dt_dprintf("failed to allocate notification for %d %s\n", 230 (int)dpr->dpr_pid, msg); 231 } else { 232 dprn->dprn_dpr = dpr; 233 if (msg == NULL) 234 dprn->dprn_errmsg[0] = '\0'; 235 else 236 (void) strlcpy(dprn->dprn_errmsg, msg, 237 sizeof (dprn->dprn_errmsg)); 238 239 (void) pthread_mutex_lock(&dph->dph_lock); 240 241 dprn->dprn_next = dph->dph_notify; 242 dph->dph_notify = dprn; 243 244 (void) pthread_cond_broadcast(&dph->dph_cv); 245 (void) pthread_mutex_unlock(&dph->dph_lock); 246 } 247 } 248 249 /* 250 * Check to see if the control thread was requested to stop when the victim 251 * process reached a particular event (why) rather than continuing the victim. 252 * If 'why' is set in the stop mask, we wait on dpr_cv for dt_proc_continue(). 253 * If 'why' is not set, this function returns immediately and does nothing. 254 */ 255 static void 256 dt_proc_stop(dt_proc_t *dpr, uint8_t why) 257 { 258 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 259 assert(why != DT_PROC_STOP_IDLE); 260 261 if (dpr->dpr_stop & why) { 262 dpr->dpr_stop |= DT_PROC_STOP_IDLE; 263 dpr->dpr_stop &= ~why; 264 265 (void) pthread_cond_broadcast(&dpr->dpr_cv); 266 267 /* 268 * We disable breakpoints while stopped to preserve the 269 * integrity of the program text for both our own disassembly 270 * and that of the kernel. 271 */ 272 dt_proc_bpdisable(dpr); 273 274 while (dpr->dpr_stop & DT_PROC_STOP_IDLE) 275 (void) pthread_cond_wait(&dpr->dpr_cv, &dpr->dpr_lock); 276 277 dt_proc_bpenable(dpr); 278 } 279 } 280 281 /*ARGSUSED*/ 282 static void 283 dt_proc_bpmain(dtrace_hdl_t *dtp, dt_proc_t *dpr, const char *fname) 284 { 285 dt_dprintf("pid %d: breakpoint at %s()\n", (int)dpr->dpr_pid, fname); 286 dt_proc_stop(dpr, DT_PROC_STOP_MAIN); 287 } 288 289 static void 290 dt_proc_rdevent(dtrace_hdl_t *dtp, dt_proc_t *dpr, const char *evname) 291 { 292 rd_event_msg_t rdm; 293 rd_err_e err; 294 295 if ((err = rd_event_getmsg(dpr->dpr_rtld, &rdm)) != RD_OK) { 296 dt_dprintf("pid %d: failed to get %s event message: %s\n", 297 (int)dpr->dpr_pid, evname, rd_errstr(err)); 298 return; 299 } 300 301 dt_dprintf("pid %d: rtld event %s type=%d state %d\n", 302 (int)dpr->dpr_pid, evname, rdm.type, rdm.u.state); 303 304 switch (rdm.type) { 305 case RD_DLACTIVITY: 306 if (rdm.u.state != RD_CONSISTENT) 307 break; 308 309 Pupdate_syms(dpr->dpr_proc); 310 if (dt_pid_create_probes_module(dtp, dpr) != 0) 311 dt_proc_notify(dtp, dtp->dt_procs, dpr, 312 dpr->dpr_errmsg); 313 314 break; 315 case RD_PREINIT: 316 Pupdate_syms(dpr->dpr_proc); 317 dt_proc_stop(dpr, DT_PROC_STOP_PREINIT); 318 break; 319 case RD_POSTINIT: 320 Pupdate_syms(dpr->dpr_proc); 321 dt_proc_stop(dpr, DT_PROC_STOP_POSTINIT); 322 break; 323 } 324 } 325 326 static void 327 dt_proc_rdwatch(dt_proc_t *dpr, rd_event_e event, const char *evname) 328 { 329 rd_notify_t rdn; 330 rd_err_e err; 331 332 if ((err = rd_event_addr(dpr->dpr_rtld, event, &rdn)) != RD_OK) { 333 dt_dprintf("pid %d: failed to get event address for %s: %s\n", 334 (int)dpr->dpr_pid, evname, rd_errstr(err)); 335 return; 336 } 337 338 if (rdn.type != RD_NOTIFY_BPT) { 339 dt_dprintf("pid %d: event %s has unexpected type %d\n", 340 (int)dpr->dpr_pid, evname, rdn.type); 341 return; 342 } 343 344 (void) dt_proc_bpcreate(dpr, rdn.u.bptaddr, 345 #if defined(sun) 346 (dt_bkpt_f *)dt_proc_rdevent, (void *)evname); 347 #else 348 /* XXX ugly */ 349 (dt_bkpt_f *)dt_proc_rdevent, __DECONST(void *, evname)); 350 #endif 351 } 352 353 /* 354 * Common code for enabling events associated with the run-time linker after 355 * attaching to a process or after a victim process completes an exec(2). 356 */ 357 static void 358 dt_proc_attach(dt_proc_t *dpr, int exec) 359 { 360 #if defined(sun) 361 const pstatus_t *psp = Pstatus(dpr->dpr_proc); 362 #endif 363 rd_err_e err; 364 GElf_Sym sym; 365 366 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 367 368 if (exec) { 369 #if defined(sun) 370 if (psp->pr_lwp.pr_errno != 0) 371 return; /* exec failed: nothing needs to be done */ 372 #endif 373 374 dt_proc_bpdestroy(dpr, B_FALSE); 375 #if defined(sun) 376 Preset_maps(dpr->dpr_proc); 377 #endif 378 } 379 if ((dpr->dpr_rtld = Prd_agent(dpr->dpr_proc)) != NULL && 380 (err = rd_event_enable(dpr->dpr_rtld, B_TRUE)) == RD_OK) { 381 #if defined(sun) 382 dt_proc_rdwatch(dpr, RD_PREINIT, "RD_PREINIT"); 383 #endif 384 dt_proc_rdwatch(dpr, RD_POSTINIT, "RD_POSTINIT"); 385 #if defined(sun) 386 dt_proc_rdwatch(dpr, RD_DLACTIVITY, "RD_DLACTIVITY"); 387 #endif 388 } else { 389 dt_dprintf("pid %d: failed to enable rtld events: %s\n", 390 (int)dpr->dpr_pid, dpr->dpr_rtld ? rd_errstr(err) : 391 "rtld_db agent initialization failed"); 392 } 393 394 Pupdate_maps(dpr->dpr_proc); 395 396 if (Pxlookup_by_name(dpr->dpr_proc, LM_ID_BASE, 397 "a.out", "main", &sym, NULL) == 0) { 398 (void) dt_proc_bpcreate(dpr, (uintptr_t)sym.st_value, 399 (dt_bkpt_f *)dt_proc_bpmain, "a.out`main"); 400 } else { 401 dt_dprintf("pid %d: failed to find a.out`main: %s\n", 402 (int)dpr->dpr_pid, strerror(errno)); 403 } 404 } 405 406 /* 407 * Wait for a stopped process to be set running again by some other debugger. 408 * This is typically not required by /proc-based debuggers, since the usual 409 * model is that one debugger controls one victim. But DTrace, as usual, has 410 * its own needs: the stop() action assumes that prun(1) or some other tool 411 * will be applied to resume the victim process. This could be solved by 412 * adding a PCWRUN directive to /proc, but that seems like overkill unless 413 * other debuggers end up needing this functionality, so we implement a cheap 414 * equivalent to PCWRUN using the set of existing kernel mechanisms. 415 * 416 * Our intent is really not just to wait for the victim to run, but rather to 417 * wait for it to run and then stop again for a reason other than the current 418 * PR_REQUESTED stop. Since PCWSTOP/Pstopstatus() can be applied repeatedly 419 * to a stopped process and will return the same result without affecting the 420 * victim, we can just perform these operations repeatedly until Pstate() 421 * changes, the representative LWP ID changes, or the stop timestamp advances. 422 * dt_proc_control() will then rediscover the new state and continue as usual. 423 * When the process is still stopped in the same exact state, we sleep for a 424 * brief interval before waiting again so as not to spin consuming CPU cycles. 425 */ 426 static void 427 dt_proc_waitrun(dt_proc_t *dpr) 428 { 429 printf("%s:%s(%d): DOODAD\n",__FUNCTION__,__FILE__,__LINE__); 430 #ifdef DOODAD 431 struct ps_prochandle *P = dpr->dpr_proc; 432 const lwpstatus_t *psp = &Pstatus(P)->pr_lwp; 433 434 int krflag = psp->pr_flags & (PR_KLC | PR_RLC); 435 timestruc_t tstamp = psp->pr_tstamp; 436 lwpid_t lwpid = psp->pr_lwpid; 437 438 const long wstop = PCWSTOP; 439 int pfd = Pctlfd(P); 440 441 assert(DT_MUTEX_HELD(&dpr->dpr_lock)); 442 assert(psp->pr_flags & PR_STOPPED); 443 assert(Pstate(P) == PS_STOP); 444 445 /* 446 * While we are waiting for the victim to run, clear PR_KLC and PR_RLC 447 * so that if the libdtrace client is killed, the victim stays stopped. 448 * dt_proc_destroy() will also observe this and perform PRELEASE_HANG. 449 */ 450 (void) Punsetflags(P, krflag); 451 Psync(P); 452 453 (void) pthread_mutex_unlock(&dpr->dpr_lock); 454 455 while (!dpr->dpr_quit) { 456 if (write(pfd, &wstop, sizeof (wstop)) == -1 && errno == EINTR) 457 continue; /* check dpr_quit and continue waiting */ 458 459 (void) pthread_mutex_lock(&dpr->dpr_lock); 460 (void) Pstopstatus(P, PCNULL, 0); 461 psp = &Pstatus(P)->pr_lwp; 462 463 /* 464 * If we've reached a new state, found a new representative, or 465 * the stop timestamp has changed, restore PR_KLC/PR_RLC to its 466 * original setting and then return with dpr_lock held. 467 */ 468 if (Pstate(P) != PS_STOP || psp->pr_lwpid != lwpid || 469 bcmp(&psp->pr_tstamp, &tstamp, sizeof (tstamp)) != 0) { 470 (void) Psetflags(P, krflag); 471 Psync(P); 472 return; 473 } 474 475 (void) pthread_mutex_unlock(&dpr->dpr_lock); 476 (void) poll(NULL, 0, MILLISEC / 2); 477 } 478 479 (void) pthread_mutex_lock(&dpr->dpr_lock); 480 #endif 481 } 482 483 typedef struct dt_proc_control_data { 484 dtrace_hdl_t *dpcd_hdl; /* DTrace handle */ 485 dt_proc_t *dpcd_proc; /* proccess to control */ 486 } dt_proc_control_data_t; 487 488 /* 489 * Main loop for all victim process control threads. We initialize all the 490 * appropriate /proc control mechanisms, and then enter a loop waiting for 491 * the process to stop on an event or die. We process any events by calling 492 * appropriate subroutines, and exit when the victim dies or we lose control. 493 * 494 * The control thread synchronizes the use of dpr_proc with other libdtrace 495 * threads using dpr_lock. We hold the lock for all of our operations except 496 * waiting while the process is running: this is accomplished by writing a 497 * PCWSTOP directive directly to the underlying /proc/<pid>/ctl file. If the 498 * libdtrace client wishes to exit or abort our wait, SIGCANCEL can be used. 499 */ 500 static void * 501 dt_proc_control(void *arg) 502 { 503 dt_proc_control_data_t *datap = arg; 504 dtrace_hdl_t *dtp = datap->dpcd_hdl; 505 dt_proc_t *dpr = datap->dpcd_proc; 506 dt_proc_hash_t *dph = dpr->dpr_hdl->dt_procs; 507 struct ps_prochandle *P = dpr->dpr_proc; 508 int pid = dpr->dpr_pid; 509 510 #if defined(sun) 511 int pfd = Pctlfd(P); 512 513 const long wstop = PCWSTOP; 514 #endif 515 int notify = B_FALSE; 516 517 /* 518 * We disable the POSIX thread cancellation mechanism so that the 519 * client program using libdtrace can't accidentally cancel our thread. 520 * dt_proc_destroy() uses SIGCANCEL explicitly to simply poke us out 521 * of PCWSTOP with EINTR, at which point we will see dpr_quit and exit. 522 */ 523 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); 524 525 /* 526 * Set up the corresponding process for tracing by libdtrace. We want 527 * to be able to catch breakpoints and efficiently single-step over 528 * them, and we need to enable librtld_db to watch libdl activity. 529 */ 530 (void) pthread_mutex_lock(&dpr->dpr_lock); 531 532 #if defined(sun) 533 (void) Punsetflags(P, PR_ASYNC); /* require synchronous mode */ 534 (void) Psetflags(P, PR_BPTADJ); /* always adjust eip on x86 */ 535 (void) Punsetflags(P, PR_FORK); /* do not inherit on fork */ 536 537 (void) Pfault(P, FLTBPT, B_TRUE); /* always trace breakpoints */ 538 (void) Pfault(P, FLTTRACE, B_TRUE); /* always trace single-step */ 539 540 /* 541 * We must trace exit from exec() system calls so that if the exec is 542 * successful, we can reset our breakpoints and re-initialize libproc. 543 */ 544 (void) Psysexit(P, SYS_execve, B_TRUE); 545 546 /* 547 * We must trace entry and exit for fork() system calls in order to 548 * disable our breakpoints temporarily during the fork. We do not set 549 * the PR_FORK flag, so if fork succeeds the child begins executing and 550 * does not inherit any other tracing behaviors or a control thread. 551 */ 552 (void) Psysentry(P, SYS_vfork, B_TRUE); 553 (void) Psysexit(P, SYS_vfork, B_TRUE); 554 (void) Psysentry(P, SYS_forksys, B_TRUE); 555 (void) Psysexit(P, SYS_forksys, B_TRUE); 556 557 Psync(P); /* enable all /proc changes */ 558 #endif 559 dt_proc_attach(dpr, B_FALSE); /* enable rtld breakpoints */ 560 561 /* 562 * If PR_KLC is set, we created the process; otherwise we grabbed it. 563 * Check for an appropriate stop request and wait for dt_proc_continue. 564 */ 565 #if defined(sun) 566 if (Pstatus(P)->pr_flags & PR_KLC) 567 #else 568 if (proc_getflags(P) & PR_KLC) 569 #endif 570 dt_proc_stop(dpr, DT_PROC_STOP_CREATE); 571 else 572 dt_proc_stop(dpr, DT_PROC_STOP_GRAB); 573 574 if (Psetrun(P, 0, 0) == -1) { 575 dt_dprintf("pid %d: failed to set running: %s\n", 576 (int)dpr->dpr_pid, strerror(errno)); 577 } 578 579 (void) pthread_mutex_unlock(&dpr->dpr_lock); 580 581 /* 582 * Wait for the process corresponding to this control thread to stop, 583 * process the event, and then set it running again. We want to sleep 584 * with dpr_lock *unheld* so that other parts of libdtrace can use the 585 * ps_prochandle in the meantime (e.g. ustack()). To do this, we write 586 * a PCWSTOP directive directly to the underlying /proc/<pid>/ctl file. 587 * Once the process stops, we wake up, grab dpr_lock, and then call 588 * Pwait() (which will return immediately) and do our processing. 589 */ 590 while (!dpr->dpr_quit) { 591 const lwpstatus_t *psp; 592 593 #if defined(sun) 594 if (write(pfd, &wstop, sizeof (wstop)) == -1 && errno == EINTR) 595 continue; /* check dpr_quit and continue waiting */ 596 #else 597 /* Wait for the process to report status. */ 598 proc_wstatus(P); 599 if (errno == EINTR) 600 continue; /* check dpr_quit and continue waiting */ 601 #endif 602 603 (void) pthread_mutex_lock(&dpr->dpr_lock); 604 605 #if defined(sun) 606 pwait_locked: 607 if (Pstopstatus(P, PCNULL, 0) == -1 && errno == EINTR) { 608 (void) pthread_mutex_unlock(&dpr->dpr_lock); 609 continue; /* check dpr_quit and continue waiting */ 610 } 611 #endif 612 613 switch (Pstate(P)) { 614 case PS_STOP: 615 #if defined(sun) 616 psp = &Pstatus(P)->pr_lwp; 617 #else 618 psp = proc_getlwpstatus(P); 619 #endif 620 621 dt_dprintf("pid %d: proc stopped showing %d/%d\n", 622 pid, psp->pr_why, psp->pr_what); 623 624 /* 625 * If the process stops showing PR_REQUESTED, then the 626 * DTrace stop() action was applied to it or another 627 * debugging utility (e.g. pstop(1)) asked it to stop. 628 * In either case, the user's intention is for the 629 * process to remain stopped until another external 630 * mechanism (e.g. prun(1)) is applied. So instead of 631 * setting the process running ourself, we wait for 632 * someone else to do so. Once that happens, we return 633 * to our normal loop waiting for an event of interest. 634 */ 635 if (psp->pr_why == PR_REQUESTED) { 636 dt_proc_waitrun(dpr); 637 (void) pthread_mutex_unlock(&dpr->dpr_lock); 638 continue; 639 } 640 641 /* 642 * If the process stops showing one of the events that 643 * we are tracing, perform the appropriate response. 644 * Note that we ignore PR_SUSPENDED, PR_CHECKPOINT, and 645 * PR_JOBCONTROL by design: if one of these conditions 646 * occurs, we will fall through to Psetrun() but the 647 * process will remain stopped in the kernel by the 648 * corresponding mechanism (e.g. job control stop). 649 */ 650 if (psp->pr_why == PR_FAULTED && psp->pr_what == FLTBPT) 651 dt_proc_bpmatch(dtp, dpr); 652 else if (psp->pr_why == PR_SYSENTRY && 653 IS_SYS_FORK(psp->pr_what)) 654 dt_proc_bpdisable(dpr); 655 else if (psp->pr_why == PR_SYSEXIT && 656 IS_SYS_FORK(psp->pr_what)) 657 dt_proc_bpenable(dpr); 658 else if (psp->pr_why == PR_SYSEXIT && 659 IS_SYS_EXEC(psp->pr_what)) 660 dt_proc_attach(dpr, B_TRUE); 661 break; 662 663 case PS_LOST: 664 #if defined(sun) 665 if (Preopen(P) == 0) 666 goto pwait_locked; 667 #endif 668 669 dt_dprintf("pid %d: proc lost: %s\n", 670 pid, strerror(errno)); 671 672 dpr->dpr_quit = B_TRUE; 673 notify = B_TRUE; 674 break; 675 676 case PS_UNDEAD: 677 dt_dprintf("pid %d: proc died\n", pid); 678 dpr->dpr_quit = B_TRUE; 679 notify = B_TRUE; 680 break; 681 } 682 683 if (Pstate(P) != PS_UNDEAD && Psetrun(P, 0, 0) == -1) { 684 dt_dprintf("pid %d: failed to set running: %s\n", 685 (int)dpr->dpr_pid, strerror(errno)); 686 } 687 688 (void) pthread_mutex_unlock(&dpr->dpr_lock); 689 } 690 691 /* 692 * If the control thread detected PS_UNDEAD or PS_LOST, then enqueue 693 * the dt_proc_t structure on the dt_proc_hash_t notification list. 694 */ 695 if (notify) 696 dt_proc_notify(dtp, dph, dpr, NULL); 697 698 /* 699 * Destroy and remove any remaining breakpoints, set dpr_done and clear 700 * dpr_tid to indicate the control thread has exited, and notify any 701 * waiting thread in dt_proc_destroy() that we have succesfully exited. 702 */ 703 (void) pthread_mutex_lock(&dpr->dpr_lock); 704 705 dt_proc_bpdestroy(dpr, B_TRUE); 706 dpr->dpr_done = B_TRUE; 707 dpr->dpr_tid = 0; 708 709 (void) pthread_cond_broadcast(&dpr->dpr_cv); 710 (void) pthread_mutex_unlock(&dpr->dpr_lock); 711 712 return (NULL); 713 } 714 715 /*PRINTFLIKE3*/ 716 static struct ps_prochandle * 717 dt_proc_error(dtrace_hdl_t *dtp, dt_proc_t *dpr, const char *format, ...) 718 { 719 va_list ap; 720 721 va_start(ap, format); 722 dt_set_errmsg(dtp, NULL, NULL, NULL, 0, format, ap); 723 va_end(ap); 724 725 if (dpr->dpr_proc != NULL) 726 Prelease(dpr->dpr_proc, 0); 727 728 dt_free(dtp, dpr); 729 (void) dt_set_errno(dtp, EDT_COMPILER); 730 return (NULL); 731 } 732 733 dt_proc_t * 734 dt_proc_lookup(dtrace_hdl_t *dtp, struct ps_prochandle *P, int remove) 735 { 736 dt_proc_hash_t *dph = dtp->dt_procs; 737 #if defined(sun) 738 pid_t pid = Pstatus(P)->pr_pid; 739 #else 740 pid_t pid = proc_getpid(P); 741 #endif 742 dt_proc_t *dpr, **dpp = &dph->dph_hash[pid & (dph->dph_hashlen - 1)]; 743 744 for (dpr = *dpp; dpr != NULL; dpr = dpr->dpr_hash) { 745 if (dpr->dpr_pid == pid) 746 break; 747 else 748 dpp = &dpr->dpr_hash; 749 } 750 751 assert(dpr != NULL); 752 assert(dpr->dpr_proc == P); 753 754 if (remove) 755 *dpp = dpr->dpr_hash; /* remove from pid hash chain */ 756 757 return (dpr); 758 } 759 760 static void 761 dt_proc_destroy(dtrace_hdl_t *dtp, struct ps_prochandle *P) 762 { 763 dt_proc_t *dpr = dt_proc_lookup(dtp, P, B_FALSE); 764 dt_proc_hash_t *dph = dtp->dt_procs; 765 dt_proc_notify_t *npr, **npp; 766 int rflag; 767 768 assert(dpr != NULL); 769 770 /* 771 * If neither PR_KLC nor PR_RLC is set, then the process is stopped by 772 * an external debugger and we were waiting in dt_proc_waitrun(). 773 * Leave the process in this condition using PRELEASE_HANG. 774 */ 775 #if defined(sun) 776 if (!(Pstatus(dpr->dpr_proc)->pr_flags & (PR_KLC | PR_RLC))) { 777 #else 778 if (!(proc_getflags(dpr->dpr_proc) & (PR_KLC | PR_RLC))) { 779 #endif 780 dt_dprintf("abandoning pid %d\n", (int)dpr->dpr_pid); 781 rflag = PRELEASE_HANG; 782 #if defined(sun) 783 } else if (Pstatus(dpr->dpr_proc)->pr_flags & PR_KLC) { 784 #else 785 } else if (proc_getflags(dpr->dpr_proc) & PR_KLC) { 786 #endif 787 dt_dprintf("killing pid %d\n", (int)dpr->dpr_pid); 788 rflag = PRELEASE_KILL; /* apply kill-on-last-close */ 789 } else { 790 dt_dprintf("releasing pid %d\n", (int)dpr->dpr_pid); 791 rflag = 0; /* apply run-on-last-close */ 792 } 793 794 if (dpr->dpr_tid) { 795 /* 796 * Set the dpr_quit flag to tell the daemon thread to exit. We 797 * send it a SIGCANCEL to poke it out of PCWSTOP or any other 798 * long-term /proc system call. Our daemon threads have POSIX 799 * cancellation disabled, so EINTR will be the only effect. We 800 * then wait for dpr_done to indicate the thread has exited. 801 * 802 * We can't use pthread_kill() to send SIGCANCEL because the 803 * interface forbids it and we can't use pthread_cancel() 804 * because with cancellation disabled it won't actually 805 * send SIGCANCEL to the target thread, so we use _lwp_kill() 806 * to do the job. This is all built on evil knowledge of 807 * the details of the cancellation mechanism in libc. 808 */ 809 (void) pthread_mutex_lock(&dpr->dpr_lock); 810 dpr->dpr_quit = B_TRUE; 811 #if defined(sun) 812 (void) _lwp_kill(dpr->dpr_tid, SIGCANCEL); 813 #else 814 pthread_kill(dpr->dpr_tid, SIGTHR); 815 #endif 816 817 /* 818 * If the process is currently idling in dt_proc_stop(), re- 819 * enable breakpoints and poke it into running again. 820 */ 821 if (dpr->dpr_stop & DT_PROC_STOP_IDLE) { 822 dt_proc_bpenable(dpr); 823 dpr->dpr_stop &= ~DT_PROC_STOP_IDLE; 824 (void) pthread_cond_broadcast(&dpr->dpr_cv); 825 } 826 827 while (!dpr->dpr_done) 828 (void) pthread_cond_wait(&dpr->dpr_cv, &dpr->dpr_lock); 829 830 (void) pthread_mutex_unlock(&dpr->dpr_lock); 831 } 832 833 /* 834 * Before we free the process structure, remove this dt_proc_t from the 835 * lookup hash, and then walk the dt_proc_hash_t's notification list 836 * and remove this dt_proc_t if it is enqueued. 837 */ 838 (void) pthread_mutex_lock(&dph->dph_lock); 839 (void) dt_proc_lookup(dtp, P, B_TRUE); 840 npp = &dph->dph_notify; 841 842 while ((npr = *npp) != NULL) { 843 if (npr->dprn_dpr == dpr) { 844 *npp = npr->dprn_next; 845 dt_free(dtp, npr); 846 } else { 847 npp = &npr->dprn_next; 848 } 849 } 850 851 (void) pthread_mutex_unlock(&dph->dph_lock); 852 853 /* 854 * Remove the dt_proc_list from the LRU list, release the underlying 855 * libproc handle, and free our dt_proc_t data structure. 856 */ 857 if (dpr->dpr_cacheable) { 858 assert(dph->dph_lrucnt != 0); 859 dph->dph_lrucnt--; 860 } 861 862 dt_list_delete(&dph->dph_lrulist, dpr); 863 Prelease(dpr->dpr_proc, rflag); 864 dt_free(dtp, dpr); 865 } 866 867 static int 868 dt_proc_create_thread(dtrace_hdl_t *dtp, dt_proc_t *dpr, uint_t stop) 869 { 870 dt_proc_control_data_t data; 871 sigset_t nset, oset; 872 pthread_attr_t a; 873 int err; 874 875 (void) pthread_mutex_lock(&dpr->dpr_lock); 876 dpr->dpr_stop |= stop; /* set bit for initial rendezvous */ 877 878 (void) pthread_attr_init(&a); 879 (void) pthread_attr_setdetachstate(&a, PTHREAD_CREATE_DETACHED); 880 881 (void) sigfillset(&nset); 882 (void) sigdelset(&nset, SIGABRT); /* unblocked for assert() */ 883 #if defined(sun) 884 (void) sigdelset(&nset, SIGCANCEL); /* see dt_proc_destroy() */ 885 #else 886 (void) sigdelset(&nset, SIGUSR1); /* see dt_proc_destroy() */ 887 #endif 888 889 data.dpcd_hdl = dtp; 890 data.dpcd_proc = dpr; 891 892 (void) pthread_sigmask(SIG_SETMASK, &nset, &oset); 893 err = pthread_create(&dpr->dpr_tid, &a, dt_proc_control, &data); 894 (void) pthread_sigmask(SIG_SETMASK, &oset, NULL); 895 896 /* 897 * If the control thread was created, then wait on dpr_cv for either 898 * dpr_done to be set (the victim died or the control thread failed) 899 * or DT_PROC_STOP_IDLE to be set, indicating that the victim is now 900 * stopped by /proc and the control thread is at the rendezvous event. 901 * On success, we return with the process and control thread stopped: 902 * the caller can then apply dt_proc_continue() to resume both. 903 */ 904 if (err == 0) { 905 while (!dpr->dpr_done && !(dpr->dpr_stop & DT_PROC_STOP_IDLE)) 906 (void) pthread_cond_wait(&dpr->dpr_cv, &dpr->dpr_lock); 907 908 /* 909 * If dpr_done is set, the control thread aborted before it 910 * reached the rendezvous event. This is either due to PS_LOST 911 * or PS_UNDEAD (i.e. the process died). We try to provide a 912 * small amount of useful information to help figure it out. 913 */ 914 if (dpr->dpr_done) { 915 #if defined(sun) 916 const psinfo_t *prp = Ppsinfo(dpr->dpr_proc); 917 int stat = prp ? prp->pr_wstat : 0; 918 int pid = dpr->dpr_pid; 919 #else 920 int stat = proc_getwstat(dpr->dpr_proc); 921 int pid = proc_getpid(dpr->dpr_proc); 922 #endif 923 if (proc_state(dpr->dpr_proc) == PS_LOST) { 924 (void) dt_proc_error(dpr->dpr_hdl, dpr, 925 "failed to control pid %d: process exec'd " 926 "set-id or unobservable program\n", pid); 927 } else if (WIFSIGNALED(stat)) { 928 (void) dt_proc_error(dpr->dpr_hdl, dpr, 929 "failed to control pid %d: process died " 930 "from signal %d\n", pid, WTERMSIG(stat)); 931 } else { 932 (void) dt_proc_error(dpr->dpr_hdl, dpr, 933 "failed to control pid %d: process exited " 934 "with status %d\n", pid, WEXITSTATUS(stat)); 935 } 936 937 err = ESRCH; /* cause grab() or create() to fail */ 938 } 939 } else { 940 (void) dt_proc_error(dpr->dpr_hdl, dpr, 941 "failed to create control thread for process-id %d: %s\n", 942 (int)dpr->dpr_pid, strerror(err)); 943 } 944 945 if (err == 0) 946 (void) pthread_mutex_unlock(&dpr->dpr_lock); 947 (void) pthread_attr_destroy(&a); 948 949 return (err); 950 } 951 952 struct ps_prochandle * 953 dt_proc_create(dtrace_hdl_t *dtp, const char *file, char *const *argv, 954 proc_child_func *pcf, void *child_arg) 955 { 956 dt_proc_hash_t *dph = dtp->dt_procs; 957 dt_proc_t *dpr; 958 int err; 959 960 if ((dpr = dt_zalloc(dtp, sizeof (dt_proc_t))) == NULL) 961 return (NULL); /* errno is set for us */ 962 963 (void) pthread_mutex_init(&dpr->dpr_lock, NULL); 964 (void) pthread_cond_init(&dpr->dpr_cv, NULL); 965 966 #if defined(sun) 967 if ((dpr->dpr_proc = Pcreate(file, argv, &err, NULL, 0)) == NULL) { 968 #else 969 if ((err = proc_create(file, argv, pcf, child_arg, 970 &dpr->dpr_proc)) != 0) { 971 #endif 972 return (dt_proc_error(dtp, dpr, 973 "failed to execute %s: %s\n", file, Pcreate_error(err))); 974 } 975 976 dpr->dpr_hdl = dtp; 977 #if defined(sun) 978 dpr->dpr_pid = Pstatus(dpr->dpr_proc)->pr_pid; 979 #else 980 dpr->dpr_pid = proc_getpid(dpr->dpr_proc); 981 #endif 982 983 (void) Punsetflags(dpr->dpr_proc, PR_RLC); 984 (void) Psetflags(dpr->dpr_proc, PR_KLC); 985 986 if (dt_proc_create_thread(dtp, dpr, dtp->dt_prcmode) != 0) 987 return (NULL); /* dt_proc_error() has been called for us */ 988 989 dpr->dpr_hash = dph->dph_hash[dpr->dpr_pid & (dph->dph_hashlen - 1)]; 990 dph->dph_hash[dpr->dpr_pid & (dph->dph_hashlen - 1)] = dpr; 991 dt_list_prepend(&dph->dph_lrulist, dpr); 992 993 dt_dprintf("created pid %d\n", (int)dpr->dpr_pid); 994 dpr->dpr_refs++; 995 996 return (dpr->dpr_proc); 997 } 998 999 struct ps_prochandle * 1000 dt_proc_grab(dtrace_hdl_t *dtp, pid_t pid, int flags, int nomonitor) 1001 { 1002 dt_proc_hash_t *dph = dtp->dt_procs; 1003 uint_t h = pid & (dph->dph_hashlen - 1); 1004 dt_proc_t *dpr, *opr; 1005 int err; 1006 1007 /* 1008 * Search the hash table for the pid. If it is already grabbed or 1009 * created, move the handle to the front of the lrulist, increment 1010 * the reference count, and return the existing ps_prochandle. 1011 */ 1012 for (dpr = dph->dph_hash[h]; dpr != NULL; dpr = dpr->dpr_hash) { 1013 if (dpr->dpr_pid == pid && !dpr->dpr_stale) { 1014 /* 1015 * If the cached handle was opened read-only and 1016 * this request is for a writeable handle, mark 1017 * the cached handle as stale and open a new handle. 1018 * Since it's stale, unmark it as cacheable. 1019 */ 1020 if (dpr->dpr_rdonly && !(flags & PGRAB_RDONLY)) { 1021 dt_dprintf("upgrading pid %d\n", (int)pid); 1022 dpr->dpr_stale = B_TRUE; 1023 dpr->dpr_cacheable = B_FALSE; 1024 dph->dph_lrucnt--; 1025 break; 1026 } 1027 1028 dt_dprintf("grabbed pid %d (cached)\n", (int)pid); 1029 dt_list_delete(&dph->dph_lrulist, dpr); 1030 dt_list_prepend(&dph->dph_lrulist, dpr); 1031 dpr->dpr_refs++; 1032 return (dpr->dpr_proc); 1033 } 1034 } 1035 1036 if ((dpr = dt_zalloc(dtp, sizeof (dt_proc_t))) == NULL) 1037 return (NULL); /* errno is set for us */ 1038 1039 (void) pthread_mutex_init(&dpr->dpr_lock, NULL); 1040 (void) pthread_cond_init(&dpr->dpr_cv, NULL); 1041 1042 #if defined(sun) 1043 if ((dpr->dpr_proc = Pgrab(pid, flags, &err)) == NULL) { 1044 #else 1045 if ((err = proc_attach(pid, flags, &dpr->dpr_proc)) != 0) { 1046 #endif 1047 return (dt_proc_error(dtp, dpr, 1048 "failed to grab pid %d: %s\n", (int)pid, Pgrab_error(err))); 1049 } 1050 1051 dpr->dpr_hdl = dtp; 1052 dpr->dpr_pid = pid; 1053 1054 (void) Punsetflags(dpr->dpr_proc, PR_KLC); 1055 (void) Psetflags(dpr->dpr_proc, PR_RLC); 1056 1057 /* 1058 * If we are attempting to grab the process without a monitor 1059 * thread, then mark the process cacheable only if it's being 1060 * grabbed read-only. If we're currently caching more process 1061 * handles than dph_lrulim permits, attempt to find the 1062 * least-recently-used handle that is currently unreferenced and 1063 * release it from the cache. Otherwise we are grabbing the process 1064 * for control: create a control thread for this process and store 1065 * its ID in dpr->dpr_tid. 1066 */ 1067 if (nomonitor || (flags & PGRAB_RDONLY)) { 1068 if (dph->dph_lrucnt >= dph->dph_lrulim) { 1069 for (opr = dt_list_prev(&dph->dph_lrulist); 1070 opr != NULL; opr = dt_list_prev(opr)) { 1071 if (opr->dpr_cacheable && opr->dpr_refs == 0) { 1072 dt_proc_destroy(dtp, opr->dpr_proc); 1073 break; 1074 } 1075 } 1076 } 1077 1078 if (flags & PGRAB_RDONLY) { 1079 dpr->dpr_cacheable = B_TRUE; 1080 dpr->dpr_rdonly = B_TRUE; 1081 dph->dph_lrucnt++; 1082 } 1083 1084 } else if (dt_proc_create_thread(dtp, dpr, DT_PROC_STOP_GRAB) != 0) 1085 return (NULL); /* dt_proc_error() has been called for us */ 1086 1087 dpr->dpr_hash = dph->dph_hash[h]; 1088 dph->dph_hash[h] = dpr; 1089 dt_list_prepend(&dph->dph_lrulist, dpr); 1090 1091 dt_dprintf("grabbed pid %d\n", (int)pid); 1092 dpr->dpr_refs++; 1093 1094 return (dpr->dpr_proc); 1095 } 1096 1097 void 1098 dt_proc_release(dtrace_hdl_t *dtp, struct ps_prochandle *P) 1099 { 1100 dt_proc_t *dpr = dt_proc_lookup(dtp, P, B_FALSE); 1101 dt_proc_hash_t *dph = dtp->dt_procs; 1102 1103 assert(dpr != NULL); 1104 assert(dpr->dpr_refs != 0); 1105 1106 if (--dpr->dpr_refs == 0 && 1107 (!dpr->dpr_cacheable || dph->dph_lrucnt > dph->dph_lrulim)) 1108 dt_proc_destroy(dtp, P); 1109 } 1110 1111 void 1112 dt_proc_continue(dtrace_hdl_t *dtp, struct ps_prochandle *P) 1113 { 1114 dt_proc_t *dpr = dt_proc_lookup(dtp, P, B_FALSE); 1115 1116 (void) pthread_mutex_lock(&dpr->dpr_lock); 1117 1118 if (dpr->dpr_stop & DT_PROC_STOP_IDLE) { 1119 dpr->dpr_stop &= ~DT_PROC_STOP_IDLE; 1120 (void) pthread_cond_broadcast(&dpr->dpr_cv); 1121 } 1122 1123 (void) pthread_mutex_unlock(&dpr->dpr_lock); 1124 } 1125 1126 void 1127 dt_proc_lock(dtrace_hdl_t *dtp, struct ps_prochandle *P) 1128 { 1129 dt_proc_t *dpr = dt_proc_lookup(dtp, P, B_FALSE); 1130 int err = pthread_mutex_lock(&dpr->dpr_lock); 1131 assert(err == 0); /* check for recursion */ 1132 } 1133 1134 void 1135 dt_proc_unlock(dtrace_hdl_t *dtp, struct ps_prochandle *P) 1136 { 1137 dt_proc_t *dpr = dt_proc_lookup(dtp, P, B_FALSE); 1138 int err = pthread_mutex_unlock(&dpr->dpr_lock); 1139 assert(err == 0); /* check for unheld lock */ 1140 } 1141 1142 void 1143 dt_proc_hash_create(dtrace_hdl_t *dtp) 1144 { 1145 if ((dtp->dt_procs = dt_zalloc(dtp, sizeof (dt_proc_hash_t) + 1146 sizeof (dt_proc_t *) * _dtrace_pidbuckets - 1)) != NULL) { 1147 1148 (void) pthread_mutex_init(&dtp->dt_procs->dph_lock, NULL); 1149 (void) pthread_cond_init(&dtp->dt_procs->dph_cv, NULL); 1150 1151 dtp->dt_procs->dph_hashlen = _dtrace_pidbuckets; 1152 dtp->dt_procs->dph_lrulim = _dtrace_pidlrulim; 1153 } 1154 } 1155 1156 void 1157 dt_proc_hash_destroy(dtrace_hdl_t *dtp) 1158 { 1159 dt_proc_hash_t *dph = dtp->dt_procs; 1160 dt_proc_t *dpr; 1161 1162 while ((dpr = dt_list_next(&dph->dph_lrulist)) != NULL) 1163 dt_proc_destroy(dtp, dpr->dpr_proc); 1164 1165 dtp->dt_procs = NULL; 1166 dt_free(dtp, dph); 1167 } 1168 1169 struct ps_prochandle * 1170 dtrace_proc_create(dtrace_hdl_t *dtp, const char *file, char *const *argv, 1171 proc_child_func *pcf, void *child_arg) 1172 { 1173 dt_ident_t *idp = dt_idhash_lookup(dtp->dt_macros, "target"); 1174 struct ps_prochandle *P = dt_proc_create(dtp, file, argv, pcf, child_arg); 1175 1176 if (P != NULL && idp != NULL && idp->di_id == 0) { 1177 #if defined(sun) 1178 idp->di_id = Pstatus(P)->pr_pid; /* $target = created pid */ 1179 #else 1180 idp->di_id = proc_getpid(P); /* $target = created pid */ 1181 #endif 1182 } 1183 1184 return (P); 1185 } 1186 1187 struct ps_prochandle * 1188 dtrace_proc_grab(dtrace_hdl_t *dtp, pid_t pid, int flags) 1189 { 1190 dt_ident_t *idp = dt_idhash_lookup(dtp->dt_macros, "target"); 1191 struct ps_prochandle *P = dt_proc_grab(dtp, pid, flags, 0); 1192 1193 if (P != NULL && idp != NULL && idp->di_id == 0) 1194 idp->di_id = pid; /* $target = grabbed pid */ 1195 1196 return (P); 1197 } 1198 1199 void 1200 dtrace_proc_release(dtrace_hdl_t *dtp, struct ps_prochandle *P) 1201 { 1202 dt_proc_release(dtp, P); 1203 } 1204 1205 void 1206 dtrace_proc_continue(dtrace_hdl_t *dtp, struct ps_prochandle *P) 1207 { 1208 dt_proc_continue(dtp, P); 1209 } 1210