1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/atomic.h> 30 #include <sys/errno.h> 31 #include <sys/stat.h> 32 #include <sys/modctl.h> 33 #include <sys/conf.h> 34 #include <sys/systm.h> 35 #include <sys/ddi.h> 36 #include <sys/sunddi.h> 37 #include <sys/cpuvar.h> 38 #include <sys/kmem.h> 39 #include <sys/strsubr.h> 40 #include <sys/fasttrap.h> 41 #include <sys/fasttrap_impl.h> 42 #include <sys/fasttrap_isa.h> 43 #include <sys/dtrace.h> 44 #include <sys/dtrace_impl.h> 45 #include <sys/sysmacros.h> 46 #include <sys/frame.h> 47 #include <sys/stack.h> 48 #include <sys/proc.h> 49 #include <sys/priv.h> 50 #include <sys/policy.h> 51 #include <sys/ontrap.h> 52 #include <sys/vmsystm.h> 53 #include <sys/prsystm.h> 54 55 56 #include <vm/as.h> 57 #include <vm/seg.h> 58 #include <vm/seg_dev.h> 59 #include <vm/seg_vn.h> 60 #include <vm/seg_spt.h> 61 #include <vm/seg_kmem.h> 62 63 /* 64 * User-Land Trap-Based Tracing 65 * ---------------------------- 66 * 67 * The fasttrap provider allows DTrace consumers to instrument any user-level 68 * instruction to gather data; this includes probes with semantic 69 * signifigance like entry and return as well as simple offsets into the 70 * function. While the specific techniques used are very ISA specific, the 71 * methodology is generalizable to any architecture. 72 * 73 * 74 * The General Methodology 75 * ----------------------- 76 * 77 * With the primary goal of tracing every user-land instruction and the 78 * limitation that we can't trust user space so don't want to rely on much 79 * information there, we begin by replacing the instructions we want to trace 80 * with trap instructions. Each instruction we overwrite is saved into a hash 81 * table keyed by process ID and pc address. When we enter the kernel due to 82 * this trap instruction, we need the effects of the replaced instruction to 83 * appear to have occurred before we proceed with the user thread's 84 * execution. 85 * 86 * Each user level thread is represented by a ulwp_t structure which is 87 * always easily accessible through a register. The most basic way to produce 88 * the effects of the instruction we replaced is to copy that instruction out 89 * to a bit of scratch space reserved in the user thread's ulwp_t structure 90 * (a sort of kernel-private thread local storage), set the PC to that 91 * scratch space and single step. When we reenter the kernel after single 92 * stepping the instruction we must then adjust the PC to point to what would 93 * normally be the next instruction. Of course, special care must be taken 94 * for branches and jumps, but these represent such a small fraction of any 95 * instruction set that writing the code to emulate these in the kernel is 96 * not too difficult. 97 * 98 * Return probes may require several tracepoints to trace every return site, 99 * and, conversely, each tracepoint may activate several probes (the entry 100 * and offset 0 probes, for example). To solve this muliplexing problem, 101 * tracepoints contain lists of probes to activate and probes contain lists 102 * of tracepoints to enable. If a probe is activated, it adds its ID to 103 * existing tracepoints or creates new ones as necessary. 104 * 105 * Most probes are activated _before_ the instruction is executed, but return 106 * probes are activated _after_ the effects of the last instruction of the 107 * function are visible. Return probes must be fired _after_ we have 108 * single-stepped the instruction whereas all other probes are fired 109 * beforehand. 110 */ 111 112 static dev_info_t *fasttrap_devi; 113 static dtrace_provider_id_t fasttrap_id; 114 static dtrace_meta_provider_id_t fasttrap_meta_id; 115 116 static timeout_id_t fasttrap_timeout; 117 static kmutex_t fasttrap_cleanup_mtx; 118 static uint_t fasttrap_cleanup_work; 119 120 /* 121 * Generation count on modifications to the global tracepoint lookup table. 122 */ 123 static volatile uint64_t fasttrap_mod_gen; 124 125 /* 126 * When the fasttrap provider is loaded, fasttrap_max is set to either 127 * FASTTRAP_MAX_DEFAULT or the value for fasttrap-max-probes in the 128 * fasttrap.conf file. Each time a probe is created, fasttrap_total is 129 * incremented by the number of tracepoints that may be associated with that 130 * probe; fasttrap_total is capped at fasttrap_max. 131 */ 132 #define FASTTRAP_MAX_DEFAULT 250000 133 static uint32_t fasttrap_max; 134 static uint32_t fasttrap_total; 135 136 137 #define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000 138 #define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100 139 140 #define FASTTRAP_PID_NAME "pid" 141 142 fasttrap_hash_t fasttrap_tpoints; 143 static fasttrap_hash_t fasttrap_provs; 144 145 dtrace_id_t fasttrap_probe_id; 146 static int fasttrap_count; /* ref count */ 147 static int fasttrap_pid_count; /* pid ref count */ 148 static kmutex_t fasttrap_count_mtx; /* lock on ref count */ 149 150 static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t); 151 static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t); 152 153 static fasttrap_provider_t *fasttrap_provider_lookup(pid_t, const char *, 154 const dtrace_pattr_t *); 155 static void fasttrap_provider_free(fasttrap_provider_t *); 156 static void fasttrap_provider_retire(fasttrap_provider_t *); 157 158 #define FASTTRAP_PROVS_INDEX(pid, name) \ 159 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask) 160 161 static int 162 fasttrap_highbit(ulong_t i) 163 { 164 int h = 1; 165 166 if (i == 0) 167 return (0); 168 #ifdef _LP64 169 if (i & 0xffffffff00000000ul) { 170 h += 32; i >>= 32; 171 } 172 #endif 173 if (i & 0xffff0000) { 174 h += 16; i >>= 16; 175 } 176 if (i & 0xff00) { 177 h += 8; i >>= 8; 178 } 179 if (i & 0xf0) { 180 h += 4; i >>= 4; 181 } 182 if (i & 0xc) { 183 h += 2; i >>= 2; 184 } 185 if (i & 0x2) { 186 h += 1; 187 } 188 return (h); 189 } 190 191 static uint_t 192 fasttrap_hash_str(const char *p) 193 { 194 unsigned int g; 195 uint_t hval = 0; 196 197 while (*p) { 198 hval = (hval << 4) + *p++; 199 if ((g = (hval & 0xf0000000)) != 0) 200 hval ^= g >> 24; 201 hval &= ~g; 202 } 203 return (hval); 204 } 205 206 void 207 fasttrap_sigtrap(proc_t *p, kthread_t *t, uintptr_t pc) 208 { 209 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 210 211 sqp->sq_info.si_signo = SIGTRAP; 212 sqp->sq_info.si_code = TRAP_DTRACE; 213 sqp->sq_info.si_addr = (caddr_t)pc; 214 215 mutex_enter(&p->p_lock); 216 sigaddqa(p, t, sqp); 217 mutex_exit(&p->p_lock); 218 219 if (t != NULL) 220 aston(t); 221 } 222 223 /* 224 * This function ensures that no threads are actively using the memory 225 * associated with probes that were formerly live. 226 */ 227 static void 228 fasttrap_mod_barrier(uint64_t gen) 229 { 230 int i; 231 232 if (gen < fasttrap_mod_gen) 233 return; 234 235 fasttrap_mod_gen++; 236 237 for (i = 0; i < NCPU; i++) { 238 mutex_enter(&cpu_core[i].cpuc_pid_lock); 239 mutex_exit(&cpu_core[i].cpuc_pid_lock); 240 } 241 } 242 243 /* 244 * This is the timeout's callback for cleaning up the providers and their 245 * probes. 246 */ 247 /*ARGSUSED*/ 248 static void 249 fasttrap_pid_cleanup_cb(void *data) 250 { 251 fasttrap_provider_t **fpp, *fp; 252 fasttrap_bucket_t *bucket; 253 dtrace_provider_id_t provid; 254 int i, later; 255 256 static volatile int in = 0; 257 ASSERT(in == 0); 258 in = 1; 259 260 mutex_enter(&fasttrap_cleanup_mtx); 261 while (fasttrap_cleanup_work) { 262 fasttrap_cleanup_work = 0; 263 mutex_exit(&fasttrap_cleanup_mtx); 264 265 later = 0; 266 267 /* 268 * Iterate over all the providers trying to remove the marked 269 * ones. If a provider is marked, but not defunct, we just 270 * have to take a crack at removing it -- it's no big deal if 271 * we can't. 272 */ 273 for (i = 0; i < fasttrap_provs.fth_nent; i++) { 274 bucket = &fasttrap_provs.fth_table[i]; 275 mutex_enter(&bucket->ftb_mtx); 276 fpp = (fasttrap_provider_t **)&bucket->ftb_data; 277 278 while ((fp = *fpp) != NULL) { 279 if (!fp->ftp_marked) { 280 fpp = &fp->ftp_next; 281 continue; 282 } 283 284 mutex_enter(&fp->ftp_mtx); 285 286 /* 287 * If this provider is referenced either 288 * because it is a USDT provider or is being 289 * modified, we can't unregister or even 290 * condense. 291 */ 292 if (fp->ftp_ccount != 0) { 293 mutex_exit(&fp->ftp_mtx); 294 fp->ftp_marked = 0; 295 continue; 296 } 297 298 if (!fp->ftp_defunct || fp->ftp_rcount != 0) 299 fp->ftp_marked = 0; 300 301 mutex_exit(&fp->ftp_mtx); 302 303 /* 304 * If we successfully unregister this 305 * provider we can remove it from the hash 306 * chain and free the memory. If our attempt 307 * to unregister fails and this is a defunct 308 * provider, increment our flag to try again 309 * pretty soon. If we've consumed more than 310 * half of our total permitted number of 311 * probes call dtrace_condense() to try to 312 * clean out the unenabled probes. 313 */ 314 provid = fp->ftp_provid; 315 if (dtrace_unregister(provid) != 0) { 316 if (fasttrap_total > fasttrap_max / 2) 317 (void) dtrace_condense(provid); 318 later += fp->ftp_marked; 319 fpp = &fp->ftp_next; 320 } else { 321 *fpp = fp->ftp_next; 322 fasttrap_provider_free(fp); 323 } 324 } 325 mutex_exit(&bucket->ftb_mtx); 326 } 327 328 mutex_enter(&fasttrap_cleanup_mtx); 329 } 330 331 ASSERT(fasttrap_timeout != 0); 332 333 /* 334 * If we were unable to remove a defunct provider, try again after 335 * a second. This situation can occur in certain circumstances where 336 * providers cannot be unregistered even though they have no probes 337 * enabled because of an execution of dtrace -l or something similar. 338 * If the timeout has been disabled (set to 1 because we're trying 339 * to detach), we set fasttrap_cleanup_work to ensure that we'll 340 * get a chance to do that work if and when the timeout is reenabled 341 * (if detach fails). 342 */ 343 if (later > 0 && fasttrap_timeout != (timeout_id_t)1) 344 fasttrap_timeout = timeout(&fasttrap_pid_cleanup_cb, NULL, hz); 345 else if (later > 0) 346 fasttrap_cleanup_work = 1; 347 else 348 fasttrap_timeout = 0; 349 350 mutex_exit(&fasttrap_cleanup_mtx); 351 in = 0; 352 } 353 354 /* 355 * Activates the asynchronous cleanup mechanism. 356 */ 357 static void 358 fasttrap_pid_cleanup(void) 359 { 360 mutex_enter(&fasttrap_cleanup_mtx); 361 fasttrap_cleanup_work = 1; 362 if (fasttrap_timeout == 0) 363 fasttrap_timeout = timeout(&fasttrap_pid_cleanup_cb, NULL, 1); 364 mutex_exit(&fasttrap_cleanup_mtx); 365 } 366 367 /* 368 * This is called from cfork() via dtrace_fasttrap_fork(). The child 369 * process's address space is a (roughly) a copy of the parent process's so 370 * we have to remove all the instrumentation we had previously enabled in the 371 * parent. 372 */ 373 static void 374 fasttrap_fork(proc_t *p, proc_t *cp) 375 { 376 pid_t ppid = p->p_pid; 377 int i; 378 379 ASSERT(curproc == p); 380 ASSERT(p->p_proc_flag & P_PR_LOCK); 381 ASSERT(p->p_dtrace_count > 0); 382 ASSERT(cp->p_dtrace_count == 0); 383 384 /* 385 * This would be simpler and faster if we maintained per-process 386 * hash tables of enabled tracepoints. It could, however, potentially 387 * slow down execution of a tracepoint since we'd need to go 388 * through two levels of indirection. In the future, we should 389 * consider either maintaining per-process ancillary lists of 390 * enabled tracepoints or hanging a pointer to a per-process hash 391 * table of enabled tracepoints off the proc structure. 392 */ 393 394 /* 395 * We don't have to worry about the child process disappearing 396 * because we're in fork(). 397 */ 398 mutex_enter(&cp->p_lock); 399 sprlock_proc(cp); 400 mutex_exit(&cp->p_lock); 401 402 /* 403 * Iterate over every tracepoint looking for ones that belong to the 404 * parent process, and remove each from the child process. 405 */ 406 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) { 407 fasttrap_tracepoint_t *tp; 408 fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i]; 409 410 mutex_enter(&bucket->ftb_mtx); 411 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { 412 if (tp->ftt_pid == ppid && !tp->ftt_prov->ftp_defunct) { 413 int ret = fasttrap_tracepoint_remove(cp, tp); 414 ASSERT(ret == 0); 415 } 416 } 417 mutex_exit(&bucket->ftb_mtx); 418 } 419 420 mutex_enter(&cp->p_lock); 421 sprunlock(cp); 422 } 423 424 /* 425 * This is called from proc_exit() or from exec_common() if p_dtrace_probes 426 * is set on the proc structure to indicate that there is a pid provider 427 * associated with this process. 428 */ 429 static void 430 fasttrap_exec_exit(proc_t *p) 431 { 432 fasttrap_provider_t *provider; 433 434 ASSERT(p == curproc); 435 ASSERT(MUTEX_HELD(&p->p_lock)); 436 437 mutex_exit(&p->p_lock); 438 439 /* 440 * We clean up the pid provider for this process here; user-land 441 * static probes are handled by the meta-provider remove entry point. 442 */ 443 if ((provider = fasttrap_provider_lookup(p->p_pid, 444 FASTTRAP_PID_NAME, NULL)) != NULL) 445 fasttrap_provider_retire(provider); 446 447 mutex_enter(&p->p_lock); 448 } 449 450 451 /*ARGSUSED*/ 452 static void 453 fasttrap_pid_provide(void *arg, const dtrace_probedesc_t *desc) 454 { 455 /* 456 * There are no "default" pid probes. 457 */ 458 } 459 460 /*ARGSUSED*/ 461 static void 462 fasttrap_provide(void *arg, const dtrace_probedesc_t *desc) 463 { 464 if (dtrace_probe_lookup(fasttrap_id, NULL, "fasttrap", "fasttrap") == 0) 465 fasttrap_probe_id = dtrace_probe_create(fasttrap_id, NULL, 466 "fasttrap", "fasttrap", FASTTRAP_AFRAMES, NULL); 467 } 468 469 static int 470 fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index) 471 { 472 fasttrap_tracepoint_t *tp, *new_tp = NULL; 473 fasttrap_bucket_t *bucket; 474 fasttrap_id_t *id; 475 pid_t pid; 476 uintptr_t pc; 477 478 ASSERT(index < probe->ftp_ntps); 479 480 pid = probe->ftp_pid; 481 pc = probe->ftp_tps[index].fit_tp->ftt_pc; 482 id = &probe->ftp_tps[index].fit_id; 483 484 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid); 485 486 ASSERT(!(p->p_flag & SVFORK)); 487 488 /* 489 * Before we make any modifications, make sure we've imposed a barrier 490 * on the generation in which this probe was last modified. 491 */ 492 fasttrap_mod_barrier(probe->ftp_gen); 493 494 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)]; 495 496 /* 497 * If the tracepoint has already been enabled, just add our id to the 498 * list of interested probes. This may be our second time through 499 * this path in which case we'll have constructed the tracepoint we'd 500 * like to install. If we can't find a match, and have an allocated 501 * tracepoint ready to go, enable that one now. 502 * 503 * Tracepoints whose provider is now defunct are also considered 504 * defunct. 505 */ 506 again: 507 mutex_enter(&bucket->ftb_mtx); 508 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { 509 if (tp->ftt_pid != pid || tp->ftt_pc != pc || 510 tp->ftt_prov->ftp_defunct) 511 continue; 512 513 /* 514 * Now that we've found a matching tracepoint, it would be 515 * a decent idea to confirm that the tracepoint is still 516 * enabled and the trap instruction hasn't been overwritten. 517 * Since this is a little hairy, we'll punt for now. 518 */ 519 520 /* 521 * This can't be the first interested probe. We don't have 522 * to worry about another thread being in the midst of 523 * deleting this tracepoint (which would be the only valid 524 * reason for a tracepoint to have no interested probes) 525 * since we're holding P_PR_LOCK for this process. 526 */ 527 ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL); 528 529 if (probe->ftp_type == DTFTP_RETURN || 530 probe->ftp_type == DTFTP_POST_OFFSETS) { 531 id->fti_next = tp->ftt_retids; 532 membar_producer(); 533 tp->ftt_retids = id; 534 membar_producer(); 535 } else { 536 id->fti_next = tp->ftt_ids; 537 membar_producer(); 538 tp->ftt_ids = id; 539 membar_producer(); 540 } 541 542 mutex_exit(&bucket->ftb_mtx); 543 544 if (new_tp != NULL) { 545 new_tp->ftt_ids = NULL; 546 new_tp->ftt_retids = NULL; 547 } 548 549 return (0); 550 } 551 552 /* 553 * If we have a good tracepoint ready to go, install it now while 554 * we have the lock held and no one can screw with us. 555 */ 556 if (new_tp != NULL) { 557 int rc; 558 559 new_tp->ftt_next = bucket->ftb_data; 560 membar_producer(); 561 bucket->ftb_data = new_tp; 562 membar_producer(); 563 mutex_exit(&bucket->ftb_mtx); 564 565 /* 566 * Activate the tracepoint in the isa-specific manner. 567 */ 568 if (fasttrap_tracepoint_install(p, new_tp) != 0) { 569 rc = 1; 570 } else { 571 /* 572 * Increment the count of the number of tracepoints 573 * active in the victim process. 574 */ 575 ASSERT(p->p_proc_flag & P_PR_LOCK); 576 p->p_dtrace_count++; 577 rc = 0; 578 } 579 580 return (rc); 581 } 582 583 mutex_exit(&bucket->ftb_mtx); 584 585 /* 586 * Initialize the tracepoint that's been preallocated with the probe. 587 */ 588 new_tp = probe->ftp_tps[index].fit_tp; 589 590 ASSERT(new_tp->ftt_pid == pid); 591 ASSERT(new_tp->ftt_pc == pc); 592 ASSERT(new_tp->ftt_prov == probe->ftp_prov); 593 ASSERT(new_tp->ftt_ids == NULL); 594 ASSERT(new_tp->ftt_retids == NULL); 595 596 if (probe->ftp_type == DTFTP_RETURN || 597 probe->ftp_type == DTFTP_POST_OFFSETS) { 598 id->fti_next = NULL; 599 new_tp->ftt_retids = id; 600 } else { 601 id->fti_next = NULL; 602 new_tp->ftt_ids = id; 603 } 604 605 /* 606 * If the isa-dependent initialization goes to plan, go back to the 607 * beginning and try to install this freshly made tracepoint. 608 */ 609 if (fasttrap_tracepoint_init(p, probe, new_tp, pc) == 0) 610 goto again; 611 612 new_tp->ftt_ids = NULL; 613 new_tp->ftt_retids = NULL; 614 615 return (1); 616 } 617 618 static void 619 fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index) 620 { 621 fasttrap_bucket_t *bucket; 622 fasttrap_provider_t *provider = probe->ftp_prov; 623 fasttrap_tracepoint_t **pp, *tp; 624 fasttrap_id_t *id, **idp; 625 pid_t pid; 626 uintptr_t pc; 627 628 ASSERT(index < probe->ftp_ntps); 629 630 pid = probe->ftp_pid; 631 pc = probe->ftp_tps[index].fit_tp->ftt_pc; 632 633 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid); 634 635 /* 636 * Find the tracepoint and make sure that our id is one of the 637 * ones registered with it. 638 */ 639 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)]; 640 mutex_enter(&bucket->ftb_mtx); 641 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { 642 if (tp->ftt_pid == pid && tp->ftt_pc == pc && 643 tp->ftt_prov == provider) 644 break; 645 } 646 647 /* 648 * If we somehow lost this tracepoint, we're in a world of hurt. 649 */ 650 ASSERT(tp != NULL); 651 652 if (probe->ftp_type == DTFTP_RETURN || 653 probe->ftp_type == DTFTP_POST_OFFSETS) { 654 ASSERT(tp->ftt_retids != NULL); 655 idp = &tp->ftt_retids; 656 } else { 657 ASSERT(tp->ftt_ids != NULL); 658 idp = &tp->ftt_ids; 659 } 660 661 while ((*idp)->fti_probe != probe) { 662 idp = &(*idp)->fti_next; 663 ASSERT(*idp != NULL); 664 } 665 666 id = *idp; 667 *idp = id->fti_next; 668 membar_producer(); 669 670 ASSERT(id->fti_probe == probe); 671 672 /* 673 * If there are other registered enablings of this tracepoint, we're 674 * all done, but if this was the last probe assocated with this 675 * this tracepoint, we need to remove and free it. 676 */ 677 if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) { 678 679 /* 680 * If the current probe's tracepoint is in use, swap it 681 * for an unused tracepoint. 682 */ 683 if (tp == probe->ftp_tps[index].fit_tp) { 684 fasttrap_probe_t *tmp_probe; 685 fasttrap_tracepoint_t **tmp_tp; 686 uint_t tmp_index; 687 688 if (tp->ftt_ids != NULL) { 689 tmp_probe = tp->ftt_ids->fti_probe; 690 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids); 691 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp; 692 } else { 693 tmp_probe = tp->ftt_retids->fti_probe; 694 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids); 695 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp; 696 } 697 698 ASSERT(*tmp_tp != NULL); 699 ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp); 700 ASSERT((*tmp_tp)->ftt_ids == NULL); 701 ASSERT((*tmp_tp)->ftt_retids == NULL); 702 703 probe->ftp_tps[index].fit_tp = *tmp_tp; 704 *tmp_tp = tp; 705 706 } 707 708 mutex_exit(&bucket->ftb_mtx); 709 710 /* 711 * Tag the modified probe with the generation in which it was 712 * changed. 713 */ 714 probe->ftp_gen = fasttrap_mod_gen; 715 return; 716 } 717 718 mutex_exit(&bucket->ftb_mtx); 719 720 /* 721 * We can't safely remove the tracepoint from the set of active 722 * tracepoints until we've actually removed the fasttrap instruction 723 * from the process's text. We can, however, operate on this 724 * tracepoint secure in the knowledge that no other thread is going to 725 * be looking at it since we hold P_PR_LOCK on the process if it's 726 * live or we hold the provider lock on the process if it's dead and 727 * gone. 728 */ 729 730 /* 731 * We only need to remove the actual instruction if we're looking 732 * at an existing process 733 */ 734 if (p != NULL) { 735 /* 736 * If we fail to restore the instruction we need to kill 737 * this process since it's in a completely unrecoverable 738 * state. 739 */ 740 if (fasttrap_tracepoint_remove(p, tp) != 0) 741 fasttrap_sigtrap(p, NULL, pc); 742 743 /* 744 * Decrement the count of the number of tracepoints active 745 * in the victim process. 746 */ 747 ASSERT(p->p_proc_flag & P_PR_LOCK); 748 p->p_dtrace_count--; 749 } 750 751 /* 752 * Remove the probe from the hash table of active tracepoints. 753 */ 754 mutex_enter(&bucket->ftb_mtx); 755 pp = (fasttrap_tracepoint_t **)&bucket->ftb_data; 756 ASSERT(*pp != NULL); 757 while (*pp != tp) { 758 pp = &(*pp)->ftt_next; 759 ASSERT(*pp != NULL); 760 } 761 762 *pp = tp->ftt_next; 763 membar_producer(); 764 765 mutex_exit(&bucket->ftb_mtx); 766 767 /* 768 * Tag the modified probe with the generation in which it was changed. 769 */ 770 probe->ftp_gen = fasttrap_mod_gen; 771 } 772 773 typedef int fasttrap_probe_f(struct regs *); 774 775 static void 776 fasttrap_enable_common(int *count, fasttrap_probe_f **fptr, fasttrap_probe_f *f, 777 fasttrap_probe_f **fptr2, fasttrap_probe_f *f2) 778 { 779 /* 780 * We don't have to play the rw lock game here because we're 781 * providing something rather than taking something away -- 782 * we can be sure that no threads have tried to follow this 783 * function pointer yet. 784 */ 785 mutex_enter(&fasttrap_count_mtx); 786 if (*count == 0) { 787 ASSERT(*fptr == NULL); 788 *fptr = f; 789 if (fptr2 != NULL) 790 *fptr2 = f2; 791 } 792 ASSERT(*fptr == f); 793 ASSERT(fptr2 == NULL || *fptr2 == f2); 794 (*count)++; 795 mutex_exit(&fasttrap_count_mtx); 796 } 797 798 static void 799 fasttrap_disable_common(int *count, fasttrap_probe_f **fptr, 800 fasttrap_probe_f **fptr2) 801 { 802 ASSERT(MUTEX_HELD(&cpu_lock)); 803 804 mutex_enter(&fasttrap_count_mtx); 805 (*count)--; 806 ASSERT(*count >= 0); 807 if (*count == 0) { 808 cpu_t *cur, *cpu = CPU; 809 810 for (cur = cpu->cpu_next_onln; cur != cpu; 811 cur = cur->cpu_next_onln) { 812 rw_enter(&cur->cpu_ft_lock, RW_WRITER); 813 } 814 815 *fptr = NULL; 816 if (fptr2 != NULL) 817 *fptr2 = NULL; 818 819 for (cur = cpu->cpu_next_onln; cur != cpu; 820 cur = cur->cpu_next_onln) { 821 rw_exit(&cur->cpu_ft_lock); 822 } 823 } 824 mutex_exit(&fasttrap_count_mtx); 825 } 826 827 /*ARGSUSED*/ 828 static void 829 fasttrap_enable(void *arg, dtrace_id_t id, void *parg) 830 { 831 /* 832 * Enable the probe that corresponds to statically placed trace 833 * points which have not explicitly been placed in the process's text 834 * by the fasttrap provider. 835 */ 836 ASSERT(arg == NULL); 837 ASSERT(id == fasttrap_probe_id); 838 839 fasttrap_enable_common(&fasttrap_count, 840 &dtrace_fasttrap_probe_ptr, fasttrap_probe, NULL, NULL); 841 } 842 843 844 /*ARGSUSED*/ 845 static void 846 fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg) 847 { 848 fasttrap_probe_t *probe = parg; 849 proc_t *p; 850 int i; 851 852 ASSERT(probe != NULL); 853 ASSERT(!probe->ftp_enabled); 854 ASSERT(id == probe->ftp_id); 855 ASSERT(MUTEX_HELD(&cpu_lock)); 856 857 /* 858 * Increment the count of enabled probes on this probe's provider; 859 * the provider can't go away while the probe still exists. We 860 * must increment this even if we aren't able to properly enable 861 * this probe. 862 */ 863 mutex_enter(&probe->ftp_prov->ftp_mtx); 864 probe->ftp_prov->ftp_rcount++; 865 mutex_exit(&probe->ftp_prov->ftp_mtx); 866 867 /* 868 * Bail out if we can't find the process for this probe or its 869 * provider is defunct (meaning it was valid in a previously exec'ed 870 * incarnation of this address space). The provider can't go away 871 * while we're in this code path. 872 */ 873 if (probe->ftp_prov->ftp_defunct || 874 (p = sprlock(probe->ftp_pid)) == NULL) 875 return; 876 877 ASSERT(!(p->p_flag & SVFORK)); 878 mutex_exit(&p->p_lock); 879 880 /* 881 * We have to enable the trap entry before any user threads have 882 * the chance to execute the trap instruction we're about to place 883 * in their process's text. 884 */ 885 fasttrap_enable_common(&fasttrap_pid_count, 886 &dtrace_pid_probe_ptr, fasttrap_pid_probe, 887 &dtrace_return_probe_ptr, fasttrap_return_probe); 888 889 /* 890 * Enable all the tracepoints and add this probe's id to each 891 * tracepoint's list of active probes. 892 */ 893 for (i = 0; i < probe->ftp_ntps; i++) { 894 if (fasttrap_tracepoint_enable(p, probe, i) != 0) { 895 /* 896 * Back up and pull out all the tracepoints we've 897 * created so far for this probe. 898 */ 899 while (--i >= 0) { 900 fasttrap_tracepoint_disable(p, probe, i); 901 } 902 903 mutex_enter(&p->p_lock); 904 sprunlock(p); 905 906 /* 907 * Since we're not actually enabling this probe, 908 * drop our reference on the trap table entry. 909 */ 910 fasttrap_disable_common(&fasttrap_pid_count, 911 &dtrace_pid_probe_ptr, &dtrace_return_probe_ptr); 912 return; 913 } 914 } 915 916 mutex_enter(&p->p_lock); 917 sprunlock(p); 918 919 probe->ftp_enabled = 1; 920 } 921 922 923 /*ARGSUSED*/ 924 static void 925 fasttrap_disable(void *arg, dtrace_id_t id, void *parg) 926 { 927 /* 928 * Disable the probe the corresponds to statically placed trace 929 * points. 930 */ 931 ASSERT(arg == NULL); 932 ASSERT(id == fasttrap_probe_id); 933 ASSERT(MUTEX_HELD(&cpu_lock)); 934 fasttrap_disable_common(&fasttrap_count, &dtrace_fasttrap_probe_ptr, 935 NULL); 936 } 937 938 /*ARGSUSED*/ 939 static void 940 fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg) 941 { 942 fasttrap_probe_t *probe = parg; 943 fasttrap_provider_t *provider = probe->ftp_prov; 944 proc_t *p; 945 int i, whack = 0; 946 947 if (!probe->ftp_enabled) { 948 mutex_enter(&provider->ftp_mtx); 949 provider->ftp_rcount--; 950 ASSERT(provider->ftp_rcount >= 0); 951 mutex_exit(&provider->ftp_mtx); 952 return; 953 } 954 955 ASSERT(id == probe->ftp_id); 956 957 /* 958 * We won't be able to acquire a /proc-esque lock on the process 959 * iff the process is dead and gone. In this case, we rely on the 960 * provider lock as a point of mutual exclusion to prevent other 961 * DTrace consumers from disabling this probe. 962 */ 963 if ((p = sprlock(probe->ftp_pid)) != NULL) { 964 ASSERT(!(p->p_flag & SVFORK)); 965 mutex_exit(&p->p_lock); 966 } 967 968 mutex_enter(&provider->ftp_mtx); 969 970 /* 971 * Disable all the associated tracepoints. 972 */ 973 for (i = 0; i < probe->ftp_ntps; i++) { 974 fasttrap_tracepoint_disable(p, probe, i); 975 } 976 977 ASSERT(provider->ftp_rcount > 0); 978 provider->ftp_rcount--; 979 980 if (p != NULL) { 981 /* 982 * Even though we may not be able to remove it entirely, we 983 * mark this defunct provider to get a chance to remove some 984 * of the associated probes. 985 */ 986 if (provider->ftp_defunct && !provider->ftp_marked) 987 whack = provider->ftp_marked = 1; 988 mutex_exit(&provider->ftp_mtx); 989 990 mutex_enter(&p->p_lock); 991 sprunlock(p); 992 } else { 993 /* 994 * If the process is dead, we're just waiting for the 995 * last probe to be disabled to be able to free it. 996 */ 997 if (provider->ftp_rcount == 0 && !provider->ftp_marked) 998 whack = provider->ftp_marked = 1; 999 mutex_exit(&provider->ftp_mtx); 1000 } 1001 1002 if (whack) 1003 fasttrap_pid_cleanup(); 1004 1005 probe->ftp_enabled = 0; 1006 1007 ASSERT(MUTEX_HELD(&cpu_lock)); 1008 fasttrap_disable_common(&fasttrap_pid_count, &dtrace_pid_probe_ptr, 1009 &dtrace_return_probe_ptr); 1010 } 1011 1012 /*ARGSUSED*/ 1013 static void 1014 fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg, 1015 dtrace_argdesc_t *desc) 1016 { 1017 fasttrap_probe_t *probe = parg; 1018 char *str; 1019 int i; 1020 1021 desc->dtargd_native[0] = '\0'; 1022 desc->dtargd_xlate[0] = '\0'; 1023 1024 if (probe->ftp_prov->ftp_defunct != 0 || 1025 desc->dtargd_ndx >= probe->ftp_nargs) { 1026 desc->dtargd_ndx = DTRACE_ARGNONE; 1027 return; 1028 } 1029 1030 /* 1031 * We only need to set this member if the argument is remapped. 1032 */ 1033 if (probe->ftp_argmap != NULL) 1034 desc->dtargd_mapping = probe->ftp_argmap[desc->dtargd_ndx]; 1035 1036 str = probe->ftp_ntypes; 1037 for (i = 0; i < desc->dtargd_mapping; i++) { 1038 str += strlen(str) + 1; 1039 } 1040 1041 ASSERT(strlen(str + 1) < sizeof (desc->dtargd_native)); 1042 (void) strcpy(desc->dtargd_native, str); 1043 1044 if (probe->ftp_xtypes == NULL) 1045 return; 1046 1047 str = probe->ftp_xtypes; 1048 for (i = 0; i < desc->dtargd_ndx; i++) { 1049 str += strlen(str) + 1; 1050 } 1051 1052 ASSERT(strlen(str + 1) < sizeof (desc->dtargd_xlate)); 1053 (void) strcpy(desc->dtargd_xlate, str); 1054 } 1055 1056 /*ARGSUSED*/ 1057 static void 1058 fasttrap_destroy(void *arg, dtrace_id_t id, void *parg) 1059 { 1060 ASSERT(arg == NULL); 1061 ASSERT(id == fasttrap_probe_id); 1062 } 1063 1064 /*ARGSUSED*/ 1065 static void 1066 fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg) 1067 { 1068 fasttrap_probe_t *probe = parg; 1069 int i; 1070 size_t size; 1071 1072 ASSERT(probe != NULL); 1073 ASSERT(!probe->ftp_enabled); 1074 ASSERT(fasttrap_total >= probe->ftp_ntps); 1075 1076 atomic_add_32(&fasttrap_total, -probe->ftp_ntps); 1077 size = sizeof (fasttrap_probe_t) + 1078 sizeof (probe->ftp_tps[0]) * (probe->ftp_ntps - 1); 1079 1080 if (probe->ftp_gen + 1 >= fasttrap_mod_gen) 1081 fasttrap_mod_barrier(probe->ftp_gen); 1082 1083 for (i = 0; i < probe->ftp_ntps; i++) { 1084 kmem_free(probe->ftp_tps[i].fit_tp, 1085 sizeof (fasttrap_tracepoint_t)); 1086 } 1087 1088 kmem_free(probe, size); 1089 } 1090 1091 1092 static const dtrace_pattr_t fasttrap_attr = { 1093 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, 1094 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 1095 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 1096 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, 1097 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, 1098 }; 1099 1100 static dtrace_pops_t fasttrap_pops = { 1101 fasttrap_provide, 1102 NULL, 1103 fasttrap_enable, 1104 fasttrap_disable, 1105 NULL, 1106 NULL, 1107 NULL, 1108 fasttrap_getarg, 1109 NULL, 1110 fasttrap_destroy 1111 }; 1112 1113 static const dtrace_pattr_t pid_attr = { 1114 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, 1115 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 1116 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 1117 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, 1118 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 1119 }; 1120 1121 static dtrace_pops_t pid_pops = { 1122 fasttrap_pid_provide, 1123 NULL, 1124 fasttrap_pid_enable, 1125 fasttrap_pid_disable, 1126 NULL, 1127 NULL, 1128 fasttrap_pid_getargdesc, 1129 fasttrap_getarg, 1130 NULL, 1131 fasttrap_pid_destroy 1132 }; 1133 1134 static dtrace_pops_t usdt_pops = { 1135 fasttrap_pid_provide, 1136 NULL, 1137 fasttrap_pid_enable, 1138 fasttrap_pid_disable, 1139 NULL, 1140 NULL, 1141 fasttrap_pid_getargdesc, 1142 fasttrap_usdt_getarg, 1143 NULL, 1144 fasttrap_pid_destroy 1145 }; 1146 1147 /* 1148 * Lookup a fasttrap-managed provider based on its name and associated pid. 1149 * If the pattr argument is non-NULL, this function instantiates the provider 1150 * if it doesn't exist otherwise it returns NULL. The provider is returned 1151 * with its lock held. 1152 */ 1153 static fasttrap_provider_t * 1154 fasttrap_provider_lookup(pid_t pid, const char *name, 1155 const dtrace_pattr_t *pattr) 1156 { 1157 fasttrap_provider_t *fp, *new_fp = NULL; 1158 fasttrap_bucket_t *bucket; 1159 char provname[DTRACE_PROVNAMELEN]; 1160 proc_t *p; 1161 uid_t uid = (uid_t)-1; 1162 1163 ASSERT(strlen(name) < sizeof (fp->ftp_name)); 1164 1165 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)]; 1166 mutex_enter(&bucket->ftb_mtx); 1167 1168 /* 1169 * Take a lap through the list and return the match if we find it. 1170 */ 1171 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) { 1172 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 && 1173 !fp->ftp_defunct) { 1174 mutex_enter(&fp->ftp_mtx); 1175 mutex_exit(&bucket->ftb_mtx); 1176 return (fp); 1177 } 1178 } 1179 1180 /* 1181 * Drop the bucket lock so we don't try to perform a sleeping 1182 * allocation under it. 1183 */ 1184 mutex_exit(&bucket->ftb_mtx); 1185 1186 if (pattr == NULL) 1187 return (NULL); 1188 1189 /* 1190 * Make sure the process exists, isn't a child created as the result 1191 * of a vfork(2), and isn't a zombie (but may be in fork). Record the 1192 * process's uid to pass to dtrace_register(). 1193 */ 1194 mutex_enter(&pidlock); 1195 if ((p = prfind(pid)) == NULL || 1196 (p->p_flag & (SVFORK | SEXITLWPS)) || 1197 (p->p_lwpcnt == 0 && p->p_lwpdir != NULL)) { 1198 mutex_exit(&pidlock); 1199 return (NULL); 1200 } 1201 mutex_enter(&p->p_lock); 1202 mutex_exit(&pidlock); 1203 1204 /* 1205 * Increment p_dtrace_probes so that the process knows to inform us 1206 * when it exits or execs. fasttrap_provider_free() decrements this 1207 * when we're done with this provider. 1208 */ 1209 p->p_dtrace_probes++; 1210 1211 mutex_enter(&p->p_crlock); 1212 uid = crgetruid(p->p_cred); 1213 mutex_exit(&p->p_crlock); 1214 mutex_exit(&p->p_lock); 1215 1216 new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP); 1217 1218 mutex_enter(&bucket->ftb_mtx); 1219 1220 /* 1221 * Take another lap through the list to make sure a provider hasn't 1222 * been created for this pid while we weren't under the bucket lock. 1223 */ 1224 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) { 1225 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 && 1226 !fp->ftp_defunct) { 1227 mutex_enter(&fp->ftp_mtx); 1228 mutex_exit(&bucket->ftb_mtx); 1229 fasttrap_provider_free(new_fp); 1230 return (fp); 1231 } 1232 } 1233 1234 new_fp->ftp_pid = pid; 1235 (void) strcpy(new_fp->ftp_name, name); 1236 1237 /* 1238 * Fail and return NULL if either the provider name is too long 1239 * or we fail to register this new provider with the DTrace 1240 * framework. Note that this is the only place we ever construct 1241 * the full provider name -- we keep it in pieces in the provider 1242 * structure. 1243 */ 1244 if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >= 1245 sizeof (provname) || 1246 dtrace_register(provname, pattr, 1247 DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER, uid, 1248 pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp, 1249 &new_fp->ftp_provid) != 0) { 1250 mutex_exit(&bucket->ftb_mtx); 1251 fasttrap_provider_free(new_fp); 1252 return (NULL); 1253 } 1254 1255 new_fp->ftp_next = bucket->ftb_data; 1256 bucket->ftb_data = new_fp; 1257 1258 mutex_enter(&new_fp->ftp_mtx); 1259 mutex_exit(&bucket->ftb_mtx); 1260 1261 return (new_fp); 1262 } 1263 1264 static void 1265 fasttrap_provider_free(fasttrap_provider_t *provider) 1266 { 1267 pid_t pid = provider->ftp_pid; 1268 proc_t *p; 1269 1270 /* 1271 * There need to be no consumers using this provider and no 1272 * associated enabled probes. 1273 */ 1274 ASSERT(provider->ftp_ccount == 0); 1275 ASSERT(provider->ftp_rcount == 0); 1276 1277 kmem_free(provider, sizeof (fasttrap_provider_t)); 1278 1279 /* 1280 * Decrement p_dtrace_probes on the process whose provider we're 1281 * freeing. We don't have to worry about clobbering somone else's 1282 * modifications to it because we have locked the bucket that 1283 * corresponds to this process's hash chain in the provider hash 1284 * table. Don't sweat it if we can't find the process. 1285 */ 1286 mutex_enter(&pidlock); 1287 if ((p = prfind(pid)) == NULL) { 1288 mutex_exit(&pidlock); 1289 return; 1290 } 1291 1292 mutex_enter(&p->p_lock); 1293 mutex_exit(&pidlock); 1294 1295 p->p_dtrace_probes--; 1296 mutex_exit(&p->p_lock); 1297 } 1298 1299 static void 1300 fasttrap_provider_retire(fasttrap_provider_t *provider) 1301 { 1302 dtrace_provider_id_t provid = provider->ftp_provid; 1303 1304 /* 1305 * Mark the provider to be removed in our post-processing step 1306 * and mark it as defunct. The former indicates that we should try 1307 * to remove it, the latter indicates that even if we were unable 1308 * to remove it, this provider shouldn't be used to create probes 1309 * in the future. 1310 */ 1311 provider->ftp_defunct = 1; 1312 provider->ftp_marked = 1; 1313 mutex_exit(&provider->ftp_mtx); 1314 1315 /* 1316 * We don't have to worry about invalidating the same provider twice 1317 * since fasttrap_provider_lookup() will ignore provider that have 1318 * been marked as defunct. 1319 */ 1320 dtrace_invalidate(provid); 1321 1322 fasttrap_pid_cleanup(); 1323 } 1324 1325 static int 1326 fasttrap_add_probe(fasttrap_probe_spec_t *pdata) 1327 { 1328 fasttrap_provider_t *provider; 1329 fasttrap_probe_t *pp; 1330 fasttrap_tracepoint_t *tp; 1331 char *name; 1332 size_t size; 1333 int i, aframes, whack; 1334 1335 switch (pdata->ftps_type) { 1336 case DTFTP_ENTRY: 1337 name = "entry"; 1338 aframes = FASTTRAP_ENTRY_AFRAMES; 1339 break; 1340 case DTFTP_RETURN: 1341 name = "return"; 1342 aframes = FASTTRAP_RETURN_AFRAMES; 1343 break; 1344 case DTFTP_OFFSETS: 1345 name = NULL; 1346 break; 1347 default: 1348 return (EINVAL); 1349 } 1350 1351 if ((provider = fasttrap_provider_lookup(pdata->ftps_pid, 1352 FASTTRAP_PID_NAME, &pid_attr)) == NULL) 1353 return (ESRCH); 1354 1355 /* 1356 * Increment this reference count to indicate that a consumer is 1357 * actively adding a new probe associated with this provider. 1358 */ 1359 provider->ftp_ccount++; 1360 mutex_exit(&provider->ftp_mtx); 1361 1362 if (name != NULL) { 1363 if (dtrace_probe_lookup(provider->ftp_provid, 1364 pdata->ftps_mod, pdata->ftps_func, name) != 0) 1365 goto done; 1366 1367 atomic_add_32(&fasttrap_total, pdata->ftps_noffs); 1368 1369 if (fasttrap_total > fasttrap_max) { 1370 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs); 1371 goto no_mem; 1372 } 1373 1374 ASSERT(pdata->ftps_noffs > 0); 1375 size = sizeof (fasttrap_probe_t) + 1376 sizeof (pp->ftp_tps[0]) * (pdata->ftps_noffs - 1); 1377 1378 pp = kmem_zalloc(size, KM_SLEEP); 1379 1380 pp->ftp_prov = provider; 1381 pp->ftp_faddr = pdata->ftps_pc; 1382 pp->ftp_fsize = pdata->ftps_size; 1383 pp->ftp_pid = pdata->ftps_pid; 1384 pp->ftp_ntps = pdata->ftps_noffs; 1385 pp->ftp_type = pdata->ftps_type; 1386 1387 for (i = 0; i < pdata->ftps_noffs; i++) { 1388 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), 1389 KM_SLEEP); 1390 1391 tp->ftt_prov = provider; 1392 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc; 1393 tp->ftt_pid = pdata->ftps_pid; 1394 1395 pp->ftp_tps[i].fit_tp = tp; 1396 pp->ftp_tps[i].fit_id.fti_probe = pp; 1397 } 1398 1399 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, 1400 pdata->ftps_mod, pdata->ftps_func, name, aframes, pp); 1401 } else { 1402 for (i = 0; i < pdata->ftps_noffs; i++) { 1403 char name_str[17]; 1404 1405 (void) sprintf(name_str, "%llx", 1406 (unsigned long long)pdata->ftps_offs[i]); 1407 1408 if (dtrace_probe_lookup(provider->ftp_provid, 1409 pdata->ftps_mod, pdata->ftps_func, name_str) != 0) 1410 continue; 1411 1412 atomic_add_32(&fasttrap_total, 1); 1413 1414 if (fasttrap_total > fasttrap_max) { 1415 atomic_add_32(&fasttrap_total, -1); 1416 goto no_mem; 1417 } 1418 1419 pp = kmem_zalloc(sizeof (fasttrap_probe_t), KM_SLEEP); 1420 1421 pp->ftp_prov = provider; 1422 pp->ftp_faddr = pdata->ftps_pc; 1423 pp->ftp_fsize = pdata->ftps_size; 1424 pp->ftp_pid = pdata->ftps_pid; 1425 pp->ftp_ntps = 1; 1426 pp->ftp_type = pdata->ftps_type; 1427 1428 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), 1429 KM_SLEEP); 1430 1431 tp->ftt_prov = provider; 1432 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc; 1433 tp->ftt_pid = pdata->ftps_pid; 1434 1435 pp->ftp_tps[0].fit_tp = tp; 1436 pp->ftp_tps[0].fit_id.fti_probe = pp; 1437 1438 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, 1439 pdata->ftps_mod, pdata->ftps_func, name_str, 1440 FASTTRAP_OFFSET_AFRAMES, pp); 1441 } 1442 } 1443 1444 done: 1445 /* 1446 * We know that the provider is still valid since we incremented the 1447 * reference count. If someone tried to free this provider while we 1448 * were using it (e.g. because the process called exec(2) or exit(2)), 1449 * take note of that and try to free it now. 1450 */ 1451 mutex_enter(&provider->ftp_mtx); 1452 provider->ftp_ccount--; 1453 whack = provider->ftp_defunct; 1454 mutex_exit(&provider->ftp_mtx); 1455 1456 if (whack) 1457 fasttrap_pid_cleanup(); 1458 1459 return (0); 1460 1461 no_mem: 1462 /* 1463 * If we've exhausted the allowable resources, we'll try to remove 1464 * this provider to free some up. This is to cover the case where 1465 * the user has accidentally created many more probes than was 1466 * intended (e.g. pid123:::). 1467 */ 1468 mutex_enter(&provider->ftp_mtx); 1469 provider->ftp_ccount--; 1470 provider->ftp_marked = 1; 1471 mutex_exit(&provider->ftp_mtx); 1472 1473 fasttrap_pid_cleanup(); 1474 1475 return (ENOMEM); 1476 } 1477 1478 /*ARGSUSED*/ 1479 static void * 1480 fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid) 1481 { 1482 fasttrap_provider_t *provider; 1483 1484 /* 1485 * A 32-bit unsigned integer (like a pid for example) can be 1486 * expressed in 10 or fewer decimal digits. Make sure that we'll 1487 * have enough space for the provider name. 1488 */ 1489 if (strlen(dhpv->dthpv_provname) + 10 >= 1490 sizeof (provider->ftp_name)) { 1491 cmn_err(CE_WARN, "failed to instantiate provider %s: " 1492 "name too long to accomodate pid", dhpv->dthpv_provname); 1493 return (NULL); 1494 } 1495 1496 /* 1497 * Don't let folks spoof the true pid provider. 1498 */ 1499 if (strcmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME) == 0) { 1500 cmn_err(CE_WARN, "failed to instantiate provider %s: " 1501 "%s is an invalid name", dhpv->dthpv_provname, 1502 FASTTRAP_PID_NAME); 1503 return (NULL); 1504 } 1505 1506 /* 1507 * The highest stability class that fasttrap supports is ISA; cap 1508 * the stability of the new provider accordingly. 1509 */ 1510 if (dhpv->dthpv_pattr.dtpa_provider.dtat_class >= DTRACE_CLASS_COMMON) 1511 dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA; 1512 if (dhpv->dthpv_pattr.dtpa_mod.dtat_class >= DTRACE_CLASS_COMMON) 1513 dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA; 1514 if (dhpv->dthpv_pattr.dtpa_func.dtat_class >= DTRACE_CLASS_COMMON) 1515 dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA; 1516 if (dhpv->dthpv_pattr.dtpa_name.dtat_class >= DTRACE_CLASS_COMMON) 1517 dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA; 1518 if (dhpv->dthpv_pattr.dtpa_args.dtat_class >= DTRACE_CLASS_COMMON) 1519 dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA; 1520 1521 if ((provider = fasttrap_provider_lookup(pid, dhpv->dthpv_provname, 1522 &dhpv->dthpv_pattr)) == NULL) { 1523 cmn_err(CE_WARN, "failed to instantiate provider %s for " 1524 "process %u", dhpv->dthpv_provname, (uint_t)pid); 1525 return (NULL); 1526 } 1527 1528 /* 1529 * We elevate the consumer count here to ensure that this provider 1530 * isn't removed until after the meta provider has been told to 1531 * remove it. 1532 */ 1533 provider->ftp_ccount++; 1534 1535 mutex_exit(&provider->ftp_mtx); 1536 1537 return (provider); 1538 } 1539 1540 /*ARGSUSED*/ 1541 static void 1542 fasttrap_meta_create_probe(void *arg, void *parg, 1543 dtrace_helper_probedesc_t *dhpb) 1544 { 1545 fasttrap_provider_t *provider = parg; 1546 fasttrap_probe_t *pp; 1547 fasttrap_tracepoint_t *tp; 1548 size_t size; 1549 int i; 1550 1551 mutex_enter(&provider->ftp_mtx); 1552 1553 if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod, 1554 dhpb->dthpb_func, dhpb->dthpb_name) != 0) { 1555 mutex_exit(&provider->ftp_mtx); 1556 return; 1557 } 1558 1559 atomic_add_32(&fasttrap_total, dhpb->dthpb_noffs); 1560 1561 if (fasttrap_total > fasttrap_max) { 1562 atomic_add_32(&fasttrap_total, -dhpb->dthpb_noffs); 1563 mutex_exit(&provider->ftp_mtx); 1564 return; 1565 } 1566 1567 size = sizeof (fasttrap_probe_t) + 1568 sizeof (pp->ftp_tps[0]) * (dhpb->dthpb_noffs - 1); 1569 pp = kmem_zalloc(size, KM_SLEEP); 1570 1571 pp->ftp_prov = provider; 1572 pp->ftp_pid = provider->ftp_pid; 1573 pp->ftp_ntps = dhpb->dthpb_noffs; 1574 #ifdef __sparc 1575 pp->ftp_type = DTFTP_POST_OFFSETS; 1576 #else 1577 pp->ftp_type = DTFTP_OFFSETS; 1578 #endif 1579 pp->ftp_nargs = dhpb->dthpb_xargc; 1580 pp->ftp_xtypes = dhpb->dthpb_xtypes; 1581 pp->ftp_ntypes = dhpb->dthpb_ntypes; 1582 1583 for (i = 0; i < pp->ftp_ntps; i++) { 1584 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP); 1585 1586 tp->ftt_prov = provider; 1587 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_offs[i]; 1588 tp->ftt_pid = provider->ftp_pid; 1589 1590 pp->ftp_tps[i].fit_tp = tp; 1591 pp->ftp_tps[i].fit_id.fti_probe = pp; 1592 } 1593 1594 /* 1595 * If the arguments are shuffled around we set the argument remapping 1596 * table. Later, when the probe fires, we only remap the arguments 1597 * if the table is non-NULL. 1598 */ 1599 for (i = 0; i < dhpb->dthpb_xargc; i++) { 1600 if (dhpb->dthpb_args[i] != i) { 1601 pp->ftp_argmap = dhpb->dthpb_args; 1602 break; 1603 } 1604 } 1605 1606 /* 1607 * The probe is fully constructed -- register it with DTrace. 1608 */ 1609 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod, 1610 dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp); 1611 1612 mutex_exit(&provider->ftp_mtx); 1613 } 1614 1615 /*ARGSUSED*/ 1616 static void 1617 fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid) 1618 { 1619 fasttrap_provider_t *provider; 1620 1621 if ((provider = fasttrap_provider_lookup(pid, 1622 dhpv->dthpv_provname, NULL)) != NULL) { 1623 /* 1624 * Drop the consumer count now that we're done with this 1625 * provider. If there are no other consumers retire it now. 1626 */ 1627 if (--provider->ftp_ccount == 0) 1628 fasttrap_provider_retire(provider); 1629 else 1630 mutex_exit(&provider->ftp_mtx); 1631 } 1632 } 1633 1634 static dtrace_mops_t fasttrap_mops = { 1635 fasttrap_meta_create_probe, 1636 fasttrap_meta_provide, 1637 fasttrap_meta_remove 1638 }; 1639 1640 /*ARGSUSED*/ 1641 static int 1642 fasttrap_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 1643 { 1644 return (0); 1645 } 1646 1647 /*ARGSUSED*/ 1648 static int 1649 fasttrap_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 1650 { 1651 if (!dtrace_attached()) 1652 return (EAGAIN); 1653 1654 if (cmd == FASTTRAPIOC_MAKEPROBE) { 1655 fasttrap_probe_spec_t *uprobe = (void *)arg; 1656 fasttrap_probe_spec_t *probe; 1657 uint64_t noffs; 1658 size_t size; 1659 int ret; 1660 char *c; 1661 1662 if (copyin(&uprobe->ftps_noffs, &noffs, 1663 sizeof (uprobe->ftps_noffs))) 1664 return (EFAULT); 1665 1666 /* 1667 * Probes must have at least one tracepoint. 1668 */ 1669 if (noffs == 0) 1670 return (EINVAL); 1671 1672 size = sizeof (fasttrap_probe_spec_t) + 1673 sizeof (probe->ftps_offs[0]) * (noffs - 1); 1674 1675 if (size > 1024 * 1024) 1676 return (ENOMEM); 1677 1678 probe = kmem_alloc(size, KM_SLEEP); 1679 1680 if (copyin(uprobe, probe, size) != 0) { 1681 kmem_free(probe, size); 1682 return (EFAULT); 1683 } 1684 1685 /* 1686 * Verify that the function and module strings contain no 1687 * funny characters. 1688 */ 1689 for (c = &probe->ftps_func[0]; *c != '\0'; c++) { 1690 if (*c < 0x20 || 0x7f <= *c) { 1691 ret = EINVAL; 1692 goto err; 1693 } 1694 } 1695 1696 for (c = &probe->ftps_mod[0]; *c != '\0'; c++) { 1697 if (*c < 0x20 || 0x7f <= *c) { 1698 ret = EINVAL; 1699 goto err; 1700 } 1701 } 1702 1703 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) { 1704 proc_t *p; 1705 pid_t pid = probe->ftps_pid; 1706 1707 mutex_enter(&pidlock); 1708 /* 1709 * Report an error if the process doesn't exist 1710 * or is actively being birthed. 1711 */ 1712 if ((p = prfind(pid)) == NULL || p->p_stat == SIDL) { 1713 mutex_exit(&pidlock); 1714 return (ESRCH); 1715 } 1716 mutex_enter(&p->p_lock); 1717 mutex_exit(&pidlock); 1718 1719 if ((ret = priv_proc_cred_perm(cr, p, NULL, 1720 VREAD | VWRITE)) != 0) { 1721 mutex_exit(&p->p_lock); 1722 return (ret); 1723 } 1724 1725 mutex_exit(&p->p_lock); 1726 } 1727 1728 ret = fasttrap_add_probe(probe); 1729 err: 1730 kmem_free(probe, size); 1731 1732 return (ret); 1733 1734 } else if (cmd == FASTTRAPIOC_GETINSTR) { 1735 fasttrap_instr_query_t instr; 1736 fasttrap_tracepoint_t *tp; 1737 uint_t index; 1738 int ret; 1739 1740 if (copyin((void *)arg, &instr, sizeof (instr)) != 0) 1741 return (EFAULT); 1742 1743 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) { 1744 proc_t *p; 1745 pid_t pid = instr.ftiq_pid; 1746 1747 mutex_enter(&pidlock); 1748 /* 1749 * Report an error if the process doesn't exist 1750 * or is actively being birthed. 1751 */ 1752 if ((p = prfind(pid)) == NULL || p->p_stat == SIDL) { 1753 mutex_exit(&pidlock); 1754 return (ESRCH); 1755 } 1756 mutex_enter(&p->p_lock); 1757 mutex_exit(&pidlock); 1758 1759 if ((ret = priv_proc_cred_perm(cr, p, NULL, 1760 VREAD)) != 0) { 1761 mutex_exit(&p->p_lock); 1762 return (ret); 1763 } 1764 1765 mutex_exit(&p->p_lock); 1766 } 1767 1768 index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc); 1769 1770 mutex_enter(&fasttrap_tpoints.fth_table[index].ftb_mtx); 1771 tp = fasttrap_tpoints.fth_table[index].ftb_data; 1772 while (tp != NULL) { 1773 if (instr.ftiq_pid == tp->ftt_pid && 1774 instr.ftiq_pc == tp->ftt_pc && 1775 !tp->ftt_prov->ftp_defunct) 1776 break; 1777 1778 tp = tp->ftt_next; 1779 } 1780 1781 if (tp == NULL) { 1782 mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx); 1783 return (ENOENT); 1784 } 1785 1786 bcopy(&tp->ftt_instr, &instr.ftiq_instr, 1787 sizeof (instr.ftiq_instr)); 1788 mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx); 1789 1790 if (copyout(&instr, (void *)arg, sizeof (instr)) != 0) 1791 return (EFAULT); 1792 1793 return (0); 1794 } 1795 1796 return (EINVAL); 1797 } 1798 1799 static struct cb_ops fasttrap_cb_ops = { 1800 fasttrap_open, /* open */ 1801 nodev, /* close */ 1802 nulldev, /* strategy */ 1803 nulldev, /* print */ 1804 nodev, /* dump */ 1805 nodev, /* read */ 1806 nodev, /* write */ 1807 fasttrap_ioctl, /* ioctl */ 1808 nodev, /* devmap */ 1809 nodev, /* mmap */ 1810 nodev, /* segmap */ 1811 nochpoll, /* poll */ 1812 ddi_prop_op, /* cb_prop_op */ 1813 0, /* streamtab */ 1814 D_NEW | D_MP /* Driver compatibility flag */ 1815 }; 1816 1817 /*ARGSUSED*/ 1818 static int 1819 fasttrap_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 1820 { 1821 int error; 1822 1823 switch (infocmd) { 1824 case DDI_INFO_DEVT2DEVINFO: 1825 *result = (void *)fasttrap_devi; 1826 error = DDI_SUCCESS; 1827 break; 1828 case DDI_INFO_DEVT2INSTANCE: 1829 *result = (void *)0; 1830 error = DDI_SUCCESS; 1831 break; 1832 default: 1833 error = DDI_FAILURE; 1834 } 1835 return (error); 1836 } 1837 1838 static int 1839 fasttrap_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 1840 { 1841 ulong_t nent; 1842 1843 switch (cmd) { 1844 case DDI_ATTACH: 1845 break; 1846 case DDI_RESUME: 1847 return (DDI_SUCCESS); 1848 default: 1849 return (DDI_FAILURE); 1850 } 1851 1852 if (ddi_create_minor_node(devi, "fasttrap", S_IFCHR, 0, 1853 DDI_PSEUDO, NULL) == DDI_FAILURE || 1854 dtrace_register("fasttrap", &fasttrap_attr, DTRACE_PRIV_USER, 0, 1855 &fasttrap_pops, NULL, &fasttrap_id) != 0) { 1856 ddi_remove_minor_node(devi, NULL); 1857 return (DDI_FAILURE); 1858 } 1859 1860 ddi_report_dev(devi); 1861 fasttrap_devi = devi; 1862 1863 /* 1864 * Install our hooks into fork(2), exec(2), and exit(2). 1865 */ 1866 dtrace_fasttrap_fork_ptr = &fasttrap_fork; 1867 dtrace_fasttrap_exit_ptr = &fasttrap_exec_exit; 1868 dtrace_fasttrap_exec_ptr = &fasttrap_exec_exit; 1869 1870 fasttrap_max = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 1871 "fasttrap-max-probes", FASTTRAP_MAX_DEFAULT); 1872 fasttrap_total = 0; 1873 1874 /* 1875 * Conjure up the tracepoints hashtable... 1876 */ 1877 nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 1878 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE); 1879 1880 if (nent <= 0 || nent > 0x1000000) 1881 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE; 1882 1883 if ((nent & (nent - 1)) == 0) 1884 fasttrap_tpoints.fth_nent = nent; 1885 else 1886 fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent); 1887 ASSERT(fasttrap_tpoints.fth_nent > 0); 1888 fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1; 1889 fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent * 1890 sizeof (fasttrap_bucket_t), KM_SLEEP); 1891 1892 /* 1893 * ... and the providers hash table. 1894 */ 1895 nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE; 1896 if ((nent & (nent - 1)) == 0) 1897 fasttrap_provs.fth_nent = nent; 1898 else 1899 fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent); 1900 ASSERT(fasttrap_provs.fth_nent > 0); 1901 fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1; 1902 1903 fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent * 1904 sizeof (fasttrap_bucket_t), KM_SLEEP); 1905 1906 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL, 1907 &fasttrap_meta_id); 1908 1909 return (DDI_SUCCESS); 1910 } 1911 1912 static int 1913 fasttrap_detach(dev_info_t *devi, ddi_detach_cmd_t cmd) 1914 { 1915 int i, fail = 0; 1916 timeout_id_t tmp; 1917 1918 switch (cmd) { 1919 case DDI_DETACH: 1920 break; 1921 case DDI_SUSPEND: 1922 return (DDI_SUCCESS); 1923 default: 1924 return (DDI_FAILURE); 1925 } 1926 1927 /* 1928 * Unregister the meta-provider to make sure no new fasttrap- 1929 * managed providers come along while we're trying to close up 1930 * shop. If we fail to detach, we'll need to re-register as a 1931 * meta-provider. We can fail to unregister as a meta-provider 1932 * if providers we manage still exist. 1933 */ 1934 if (fasttrap_meta_id != DTRACE_METAPROVNONE && 1935 dtrace_meta_unregister(fasttrap_meta_id) != 0) 1936 return (DDI_FAILURE); 1937 1938 /* 1939 * Prevent any new timeouts from running by setting fasttrap_timeout 1940 * to a non-zero value, and wait for the current timeout to complete. 1941 */ 1942 mutex_enter(&fasttrap_cleanup_mtx); 1943 fasttrap_cleanup_work = 0; 1944 1945 while (fasttrap_timeout != (timeout_id_t)1) { 1946 tmp = fasttrap_timeout; 1947 fasttrap_timeout = (timeout_id_t)1; 1948 1949 if (tmp != 0) { 1950 mutex_exit(&fasttrap_cleanup_mtx); 1951 (void) untimeout(tmp); 1952 mutex_enter(&fasttrap_cleanup_mtx); 1953 } 1954 } 1955 1956 fasttrap_cleanup_work = 0; 1957 mutex_exit(&fasttrap_cleanup_mtx); 1958 1959 /* 1960 * Iterate over all of our providers. If there's still a process 1961 * that corresponds to that pid, fail to detach. 1962 */ 1963 for (i = 0; i < fasttrap_provs.fth_nent; i++) { 1964 fasttrap_provider_t **fpp, *fp; 1965 fasttrap_bucket_t *bucket = &fasttrap_provs.fth_table[i]; 1966 1967 mutex_enter(&bucket->ftb_mtx); 1968 fpp = (fasttrap_provider_t **)&bucket->ftb_data; 1969 while ((fp = *fpp) != NULL) { 1970 /* 1971 * Acquire and release the lock as a simple way of 1972 * waiting for any other consumer to finish with 1973 * this provider. A thread must first acquire the 1974 * bucket lock so there's no chance of another thread 1975 * blocking on the providers lock. 1976 */ 1977 mutex_enter(&fp->ftp_mtx); 1978 mutex_exit(&fp->ftp_mtx); 1979 1980 if (dtrace_unregister(fp->ftp_provid) != 0) { 1981 fail = 1; 1982 fpp = &fp->ftp_next; 1983 } else { 1984 *fpp = fp->ftp_next; 1985 fasttrap_provider_free(fp); 1986 } 1987 } 1988 1989 mutex_exit(&bucket->ftb_mtx); 1990 } 1991 1992 if (fail || dtrace_unregister(fasttrap_id) != 0) { 1993 uint_t work; 1994 /* 1995 * If we're failing to detach, we need to unblock timeouts 1996 * and start a new timeout if any work has accumulated while 1997 * we've been unsuccessfully trying to detach. 1998 */ 1999 mutex_enter(&fasttrap_cleanup_mtx); 2000 fasttrap_timeout = 0; 2001 work = fasttrap_cleanup_work; 2002 mutex_exit(&fasttrap_cleanup_mtx); 2003 2004 if (work) 2005 fasttrap_pid_cleanup(); 2006 2007 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL, 2008 &fasttrap_meta_id); 2009 2010 return (DDI_FAILURE); 2011 } 2012 2013 #ifdef DEBUG 2014 mutex_enter(&fasttrap_count_mtx); 2015 ASSERT(fasttrap_count == 0); 2016 mutex_exit(&fasttrap_count_mtx); 2017 #endif 2018 2019 kmem_free(fasttrap_tpoints.fth_table, 2020 fasttrap_tpoints.fth_nent * sizeof (fasttrap_bucket_t)); 2021 fasttrap_tpoints.fth_nent = 0; 2022 2023 kmem_free(fasttrap_provs.fth_table, 2024 fasttrap_provs.fth_nent * sizeof (fasttrap_bucket_t)); 2025 fasttrap_provs.fth_nent = 0; 2026 2027 /* 2028 * We know there are no tracepoints in any process anywhere in 2029 * the system so there is no process which has its p_dtrace_count 2030 * greater than zero, therefore we know that no thread can actively 2031 * be executing code in fasttrap_fork(). Similarly for p_dtrace_probes 2032 * and fasttrap_exec() and fasttrap_exit(). 2033 */ 2034 ASSERT(dtrace_fasttrap_fork_ptr == &fasttrap_fork); 2035 dtrace_fasttrap_fork_ptr = NULL; 2036 2037 ASSERT(dtrace_fasttrap_exec_ptr == &fasttrap_exec_exit); 2038 dtrace_fasttrap_exec_ptr = NULL; 2039 2040 ASSERT(dtrace_fasttrap_exit_ptr == &fasttrap_exec_exit); 2041 dtrace_fasttrap_exit_ptr = NULL; 2042 2043 ddi_remove_minor_node(devi, NULL); 2044 2045 return (DDI_SUCCESS); 2046 } 2047 2048 static struct dev_ops fasttrap_ops = { 2049 DEVO_REV, /* devo_rev */ 2050 0, /* refcnt */ 2051 fasttrap_info, /* get_dev_info */ 2052 nulldev, /* identify */ 2053 nulldev, /* probe */ 2054 fasttrap_attach, /* attach */ 2055 fasttrap_detach, /* detach */ 2056 nodev, /* reset */ 2057 &fasttrap_cb_ops, /* driver operations */ 2058 NULL, /* bus operations */ 2059 nodev /* dev power */ 2060 }; 2061 2062 /* 2063 * Module linkage information for the kernel. 2064 */ 2065 static struct modldrv modldrv = { 2066 &mod_driverops, /* module type (this is a pseudo driver) */ 2067 "Fasttrap Tracing", /* name of module */ 2068 &fasttrap_ops, /* driver ops */ 2069 }; 2070 2071 static struct modlinkage modlinkage = { 2072 MODREV_1, 2073 (void *)&modldrv, 2074 NULL 2075 }; 2076 2077 int 2078 _init(void) 2079 { 2080 return (mod_install(&modlinkage)); 2081 } 2082 2083 int 2084 _info(struct modinfo *modinfop) 2085 { 2086 return (mod_info(&modlinkage, modinfop)); 2087 } 2088 2089 int 2090 _fini(void) 2091 { 2092 return (mod_remove(&modlinkage)); 2093 } 2094