1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * Portions Copyright 2010 The FreeBSD Foundation 22 * 23 * $FreeBSD$ 24 */ 25 26 /* 27 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 28 * Use is subject to license terms. 29 */ 30 31 /* 32 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 33 */ 34 35 #include <sys/atomic.h> 36 #include <sys/errno.h> 37 #include <sys/stat.h> 38 #include <sys/modctl.h> 39 #include <sys/conf.h> 40 #include <sys/systm.h> 41 #ifdef illumos 42 #include <sys/ddi.h> 43 #endif 44 #include <sys/sunddi.h> 45 #include <sys/cpuvar.h> 46 #include <sys/kmem.h> 47 #ifdef illumos 48 #include <sys/strsubr.h> 49 #endif 50 #include <sys/fasttrap.h> 51 #include <sys/fasttrap_impl.h> 52 #include <sys/fasttrap_isa.h> 53 #include <sys/dtrace.h> 54 #include <sys/dtrace_impl.h> 55 #include <sys/sysmacros.h> 56 #include <sys/proc.h> 57 #include <sys/policy.h> 58 #ifdef illumos 59 #include <util/qsort.h> 60 #endif 61 #include <sys/mutex.h> 62 #include <sys/kernel.h> 63 #ifndef illumos 64 #include <sys/dtrace_bsd.h> 65 #include <sys/eventhandler.h> 66 #include <sys/u8_textprep.h> 67 #include <sys/user.h> 68 #include <vm/vm.h> 69 #include <vm/pmap.h> 70 #include <vm/vm_map.h> 71 #include <vm/vm_param.h> 72 #include <cddl/dev/dtrace/dtrace_cddl.h> 73 #endif 74 75 /* 76 * User-Land Trap-Based Tracing 77 * ---------------------------- 78 * 79 * The fasttrap provider allows DTrace consumers to instrument any user-level 80 * instruction to gather data; this includes probes with semantic 81 * signifigance like entry and return as well as simple offsets into the 82 * function. While the specific techniques used are very ISA specific, the 83 * methodology is generalizable to any architecture. 84 * 85 * 86 * The General Methodology 87 * ----------------------- 88 * 89 * With the primary goal of tracing every user-land instruction and the 90 * limitation that we can't trust user space so don't want to rely on much 91 * information there, we begin by replacing the instructions we want to trace 92 * with trap instructions. Each instruction we overwrite is saved into a hash 93 * table keyed by process ID and pc address. When we enter the kernel due to 94 * this trap instruction, we need the effects of the replaced instruction to 95 * appear to have occurred before we proceed with the user thread's 96 * execution. 97 * 98 * Each user level thread is represented by a ulwp_t structure which is 99 * always easily accessible through a register. The most basic way to produce 100 * the effects of the instruction we replaced is to copy that instruction out 101 * to a bit of scratch space reserved in the user thread's ulwp_t structure 102 * (a sort of kernel-private thread local storage), set the PC to that 103 * scratch space and single step. When we reenter the kernel after single 104 * stepping the instruction we must then adjust the PC to point to what would 105 * normally be the next instruction. Of course, special care must be taken 106 * for branches and jumps, but these represent such a small fraction of any 107 * instruction set that writing the code to emulate these in the kernel is 108 * not too difficult. 109 * 110 * Return probes may require several tracepoints to trace every return site, 111 * and, conversely, each tracepoint may activate several probes (the entry 112 * and offset 0 probes, for example). To solve this muliplexing problem, 113 * tracepoints contain lists of probes to activate and probes contain lists 114 * of tracepoints to enable. If a probe is activated, it adds its ID to 115 * existing tracepoints or creates new ones as necessary. 116 * 117 * Most probes are activated _before_ the instruction is executed, but return 118 * probes are activated _after_ the effects of the last instruction of the 119 * function are visible. Return probes must be fired _after_ we have 120 * single-stepped the instruction whereas all other probes are fired 121 * beforehand. 122 * 123 * 124 * Lock Ordering 125 * ------------- 126 * 127 * The lock ordering below -- both internally and with respect to the DTrace 128 * framework -- is a little tricky and bears some explanation. Each provider 129 * has a lock (ftp_mtx) that protects its members including reference counts 130 * for enabled probes (ftp_rcount), consumers actively creating probes 131 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider 132 * from being freed. A provider is looked up by taking the bucket lock for the 133 * provider hash table, and is returned with its lock held. The provider lock 134 * may be taken in functions invoked by the DTrace framework, but may not be 135 * held while calling functions in the DTrace framework. 136 * 137 * To ensure consistency over multiple calls to the DTrace framework, the 138 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may 139 * not be taken when holding the provider lock as that would create a cyclic 140 * lock ordering. In situations where one would naturally take the provider 141 * lock and then the creation lock, we instead up a reference count to prevent 142 * the provider from disappearing, drop the provider lock, and acquire the 143 * creation lock. 144 * 145 * Briefly: 146 * bucket lock before provider lock 147 * DTrace before provider lock 148 * creation lock before DTrace 149 * never hold the provider lock and creation lock simultaneously 150 */ 151 152 static d_open_t fasttrap_open; 153 static d_ioctl_t fasttrap_ioctl; 154 155 static struct cdevsw fasttrap_cdevsw = { 156 .d_version = D_VERSION, 157 .d_open = fasttrap_open, 158 .d_ioctl = fasttrap_ioctl, 159 .d_name = "fasttrap", 160 }; 161 static struct cdev *fasttrap_cdev; 162 static dtrace_meta_provider_id_t fasttrap_meta_id; 163 164 static struct proc *fasttrap_cleanup_proc; 165 static struct mtx fasttrap_cleanup_mtx; 166 static uint_t fasttrap_cleanup_work, fasttrap_cleanup_drain, fasttrap_cleanup_cv; 167 168 /* 169 * Generation count on modifications to the global tracepoint lookup table. 170 */ 171 static volatile uint64_t fasttrap_mod_gen; 172 173 /* 174 * When the fasttrap provider is loaded, fasttrap_max is set to either 175 * FASTTRAP_MAX_DEFAULT or the value for fasttrap-max-probes in the 176 * fasttrap.conf file. Each time a probe is created, fasttrap_total is 177 * incremented by the number of tracepoints that may be associated with that 178 * probe; fasttrap_total is capped at fasttrap_max. 179 */ 180 #define FASTTRAP_MAX_DEFAULT 250000 181 static uint32_t fasttrap_max; 182 static uint32_t fasttrap_total; 183 184 /* 185 * Copyright (c) 2011, Joyent, Inc. All rights reserved. 186 */ 187 188 #define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000 189 #define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100 190 #define FASTTRAP_PROCS_DEFAULT_SIZE 0x100 191 192 #define FASTTRAP_PID_NAME "pid" 193 194 fasttrap_hash_t fasttrap_tpoints; 195 static fasttrap_hash_t fasttrap_provs; 196 static fasttrap_hash_t fasttrap_procs; 197 198 static uint64_t fasttrap_pid_count; /* pid ref count */ 199 static kmutex_t fasttrap_count_mtx; /* lock on ref count */ 200 201 #define FASTTRAP_ENABLE_FAIL 1 202 #define FASTTRAP_ENABLE_PARTIAL 2 203 204 static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t); 205 static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t); 206 207 static fasttrap_provider_t *fasttrap_provider_lookup(pid_t, const char *, 208 const dtrace_pattr_t *); 209 static void fasttrap_provider_retire(pid_t, const char *, int); 210 static void fasttrap_provider_free(fasttrap_provider_t *); 211 212 static fasttrap_proc_t *fasttrap_proc_lookup(pid_t); 213 static void fasttrap_proc_release(fasttrap_proc_t *); 214 215 #ifndef illumos 216 static void fasttrap_thread_dtor(void *, struct thread *); 217 #endif 218 219 #define FASTTRAP_PROVS_INDEX(pid, name) \ 220 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask) 221 222 #define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask) 223 224 #ifndef illumos 225 static kmutex_t fasttrap_cpuc_pid_lock[MAXCPU]; 226 static eventhandler_tag fasttrap_thread_dtor_tag; 227 #endif 228 229 static int 230 fasttrap_highbit(ulong_t i) 231 { 232 int h = 1; 233 234 if (i == 0) 235 return (0); 236 #ifdef _LP64 237 if (i & 0xffffffff00000000ul) { 238 h += 32; i >>= 32; 239 } 240 #endif 241 if (i & 0xffff0000) { 242 h += 16; i >>= 16; 243 } 244 if (i & 0xff00) { 245 h += 8; i >>= 8; 246 } 247 if (i & 0xf0) { 248 h += 4; i >>= 4; 249 } 250 if (i & 0xc) { 251 h += 2; i >>= 2; 252 } 253 if (i & 0x2) { 254 h += 1; 255 } 256 return (h); 257 } 258 259 static uint_t 260 fasttrap_hash_str(const char *p) 261 { 262 unsigned int g; 263 uint_t hval = 0; 264 265 while (*p) { 266 hval = (hval << 4) + *p++; 267 if ((g = (hval & 0xf0000000)) != 0) 268 hval ^= g >> 24; 269 hval &= ~g; 270 } 271 return (hval); 272 } 273 274 void 275 fasttrap_sigtrap(proc_t *p, kthread_t *t, uintptr_t pc) 276 { 277 #ifdef illumos 278 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 279 280 sqp->sq_info.si_signo = SIGTRAP; 281 sqp->sq_info.si_code = TRAP_DTRACE; 282 sqp->sq_info.si_addr = (caddr_t)pc; 283 284 mutex_enter(&p->p_lock); 285 sigaddqa(p, t, sqp); 286 mutex_exit(&p->p_lock); 287 288 if (t != NULL) 289 aston(t); 290 #else 291 ksiginfo_t *ksi = kmem_zalloc(sizeof (ksiginfo_t), KM_SLEEP); 292 293 ksiginfo_init(ksi); 294 ksi->ksi_signo = SIGTRAP; 295 ksi->ksi_code = TRAP_DTRACE; 296 ksi->ksi_addr = (caddr_t)pc; 297 PROC_LOCK(p); 298 (void) tdsendsignal(p, t, SIGTRAP, ksi); 299 PROC_UNLOCK(p); 300 #endif 301 } 302 303 #ifndef illumos 304 /* 305 * Obtain a chunk of scratch space in the address space of the target process. 306 */ 307 fasttrap_scrspace_t * 308 fasttrap_scraddr(struct thread *td, fasttrap_proc_t *fprc) 309 { 310 fasttrap_scrblock_t *scrblk; 311 fasttrap_scrspace_t *scrspc; 312 struct proc *p; 313 vm_offset_t addr; 314 int error, i; 315 316 scrspc = NULL; 317 if (td->t_dtrace_sscr != NULL) { 318 /* If the thread already has scratch space, we're done. */ 319 scrspc = (fasttrap_scrspace_t *)td->t_dtrace_sscr; 320 return (scrspc); 321 } 322 323 p = td->td_proc; 324 325 mutex_enter(&fprc->ftpc_mtx); 326 if (LIST_EMPTY(&fprc->ftpc_fscr)) { 327 /* 328 * No scratch space is available, so we'll map a new scratch 329 * space block into the traced process' address space. 330 */ 331 addr = 0; 332 error = vm_map_find(&p->p_vmspace->vm_map, NULL, 0, &addr, 333 FASTTRAP_SCRBLOCK_SIZE, 0, VMFS_ANY_SPACE, VM_PROT_ALL, 334 VM_PROT_ALL, 0); 335 if (error != KERN_SUCCESS) 336 goto done; 337 338 scrblk = malloc(sizeof(*scrblk), M_SOLARIS, M_WAITOK); 339 scrblk->ftsb_addr = addr; 340 LIST_INSERT_HEAD(&fprc->ftpc_scrblks, scrblk, ftsb_next); 341 342 /* 343 * Carve the block up into chunks and put them on the free list. 344 */ 345 for (i = 0; 346 i < FASTTRAP_SCRBLOCK_SIZE / FASTTRAP_SCRSPACE_SIZE; i++) { 347 scrspc = malloc(sizeof(*scrspc), M_SOLARIS, M_WAITOK); 348 scrspc->ftss_addr = addr + 349 i * FASTTRAP_SCRSPACE_SIZE; 350 LIST_INSERT_HEAD(&fprc->ftpc_fscr, scrspc, 351 ftss_next); 352 } 353 } 354 355 /* 356 * Take the first scratch chunk off the free list, put it on the 357 * allocated list, and return its address. 358 */ 359 scrspc = LIST_FIRST(&fprc->ftpc_fscr); 360 LIST_REMOVE(scrspc, ftss_next); 361 LIST_INSERT_HEAD(&fprc->ftpc_ascr, scrspc, ftss_next); 362 363 /* 364 * This scratch space is reserved for use by td until the thread exits. 365 */ 366 td->t_dtrace_sscr = scrspc; 367 368 done: 369 mutex_exit(&fprc->ftpc_mtx); 370 371 return (scrspc); 372 } 373 374 /* 375 * Return any allocated per-thread scratch space chunks back to the process' 376 * free list. 377 */ 378 static void 379 fasttrap_thread_dtor(void *arg __unused, struct thread *td) 380 { 381 fasttrap_bucket_t *bucket; 382 fasttrap_proc_t *fprc; 383 fasttrap_scrspace_t *scrspc; 384 pid_t pid; 385 386 if (td->t_dtrace_sscr == NULL) 387 return; 388 389 pid = td->td_proc->p_pid; 390 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)]; 391 fprc = NULL; 392 393 /* Look up the fasttrap process handle for this process. */ 394 mutex_enter(&bucket->ftb_mtx); 395 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) { 396 if (fprc->ftpc_pid == pid) { 397 mutex_enter(&fprc->ftpc_mtx); 398 mutex_exit(&bucket->ftb_mtx); 399 break; 400 } 401 } 402 if (fprc == NULL) { 403 mutex_exit(&bucket->ftb_mtx); 404 return; 405 } 406 407 scrspc = (fasttrap_scrspace_t *)td->t_dtrace_sscr; 408 LIST_REMOVE(scrspc, ftss_next); 409 LIST_INSERT_HEAD(&fprc->ftpc_fscr, scrspc, ftss_next); 410 411 mutex_exit(&fprc->ftpc_mtx); 412 } 413 #endif 414 415 /* 416 * This function ensures that no threads are actively using the memory 417 * associated with probes that were formerly live. 418 */ 419 static void 420 fasttrap_mod_barrier(uint64_t gen) 421 { 422 int i; 423 424 if (gen < fasttrap_mod_gen) 425 return; 426 427 fasttrap_mod_gen++; 428 429 CPU_FOREACH(i) { 430 mutex_enter(&fasttrap_cpuc_pid_lock[i]); 431 mutex_exit(&fasttrap_cpuc_pid_lock[i]); 432 } 433 } 434 435 /* 436 * This function performs asynchronous cleanup of fasttrap providers. The 437 * Solaris implementation of this mechanism use a timeout that's activated in 438 * fasttrap_pid_cleanup(), but this doesn't work in FreeBSD: one may sleep while 439 * holding the DTrace mutexes, but it is unsafe to sleep in a callout handler. 440 * Thus we use a dedicated process to perform the cleanup when requested. 441 */ 442 /*ARGSUSED*/ 443 static void 444 fasttrap_pid_cleanup_cb(void *data) 445 { 446 fasttrap_provider_t **fpp, *fp; 447 fasttrap_bucket_t *bucket; 448 dtrace_provider_id_t provid; 449 int i, later = 0, rval; 450 451 mtx_lock(&fasttrap_cleanup_mtx); 452 while (!fasttrap_cleanup_drain || later > 0) { 453 fasttrap_cleanup_work = 0; 454 mtx_unlock(&fasttrap_cleanup_mtx); 455 456 later = 0; 457 458 /* 459 * Iterate over all the providers trying to remove the marked 460 * ones. If a provider is marked but not retired, we just 461 * have to take a crack at removing it -- it's no big deal if 462 * we can't. 463 */ 464 for (i = 0; i < fasttrap_provs.fth_nent; i++) { 465 bucket = &fasttrap_provs.fth_table[i]; 466 mutex_enter(&bucket->ftb_mtx); 467 fpp = (fasttrap_provider_t **)&bucket->ftb_data; 468 469 while ((fp = *fpp) != NULL) { 470 if (!fp->ftp_marked) { 471 fpp = &fp->ftp_next; 472 continue; 473 } 474 475 mutex_enter(&fp->ftp_mtx); 476 477 /* 478 * If this provider has consumers actively 479 * creating probes (ftp_ccount) or is a USDT 480 * provider (ftp_mcount), we can't unregister 481 * or even condense. 482 */ 483 if (fp->ftp_ccount != 0 || 484 fp->ftp_mcount != 0) { 485 mutex_exit(&fp->ftp_mtx); 486 fp->ftp_marked = 0; 487 continue; 488 } 489 490 if (!fp->ftp_retired || fp->ftp_rcount != 0) 491 fp->ftp_marked = 0; 492 493 mutex_exit(&fp->ftp_mtx); 494 495 /* 496 * If we successfully unregister this 497 * provider we can remove it from the hash 498 * chain and free the memory. If our attempt 499 * to unregister fails and this is a retired 500 * provider, increment our flag to try again 501 * pretty soon. If we've consumed more than 502 * half of our total permitted number of 503 * probes call dtrace_condense() to try to 504 * clean out the unenabled probes. 505 */ 506 provid = fp->ftp_provid; 507 if ((rval = dtrace_unregister(provid)) != 0) { 508 if (fasttrap_total > fasttrap_max / 2) 509 (void) dtrace_condense(provid); 510 511 if (rval == EAGAIN) 512 fp->ftp_marked = 1; 513 514 later += fp->ftp_marked; 515 fpp = &fp->ftp_next; 516 } else { 517 *fpp = fp->ftp_next; 518 fasttrap_provider_free(fp); 519 } 520 } 521 mutex_exit(&bucket->ftb_mtx); 522 } 523 mtx_lock(&fasttrap_cleanup_mtx); 524 525 /* 526 * If we were unable to retire a provider, try again after a 527 * second. This situation can occur in certain circumstances 528 * where providers cannot be unregistered even though they have 529 * no probes enabled because of an execution of dtrace -l or 530 * something similar. 531 */ 532 if (later > 0 || fasttrap_cleanup_work || 533 fasttrap_cleanup_drain) { 534 mtx_unlock(&fasttrap_cleanup_mtx); 535 pause("ftclean", hz); 536 mtx_lock(&fasttrap_cleanup_mtx); 537 } else 538 mtx_sleep(&fasttrap_cleanup_cv, &fasttrap_cleanup_mtx, 539 0, "ftcl", 0); 540 } 541 542 /* 543 * Wake up the thread in fasttrap_unload() now that we're done. 544 */ 545 wakeup(&fasttrap_cleanup_drain); 546 mtx_unlock(&fasttrap_cleanup_mtx); 547 548 kthread_exit(); 549 } 550 551 /* 552 * Activates the asynchronous cleanup mechanism. 553 */ 554 static void 555 fasttrap_pid_cleanup(void) 556 { 557 558 mtx_lock(&fasttrap_cleanup_mtx); 559 if (!fasttrap_cleanup_work) { 560 fasttrap_cleanup_work = 1; 561 wakeup(&fasttrap_cleanup_cv); 562 } 563 mtx_unlock(&fasttrap_cleanup_mtx); 564 } 565 566 /* 567 * This is called from cfork() via dtrace_fasttrap_fork(). The child 568 * process's address space is (roughly) a copy of the parent process's so 569 * we have to remove all the instrumentation we had previously enabled in the 570 * parent. 571 */ 572 static void 573 fasttrap_fork(proc_t *p, proc_t *cp) 574 { 575 #ifndef illumos 576 fasttrap_scrblock_t *scrblk; 577 fasttrap_proc_t *fprc = NULL; 578 #endif 579 pid_t ppid = p->p_pid; 580 int i; 581 582 #ifdef illumos 583 ASSERT(curproc == p); 584 ASSERT(p->p_proc_flag & P_PR_LOCK); 585 #else 586 PROC_LOCK_ASSERT(p, MA_OWNED); 587 #endif 588 #ifdef illumos 589 ASSERT(p->p_dtrace_count > 0); 590 #else 591 if (p->p_dtrace_helpers) { 592 /* 593 * dtrace_helpers_duplicate() allocates memory. 594 */ 595 _PHOLD(cp); 596 PROC_UNLOCK(p); 597 PROC_UNLOCK(cp); 598 dtrace_helpers_duplicate(p, cp); 599 PROC_LOCK(cp); 600 PROC_LOCK(p); 601 _PRELE(cp); 602 } 603 /* 604 * This check is purposely here instead of in kern_fork.c because, 605 * for legal resons, we cannot include the dtrace_cddl.h header 606 * inside kern_fork.c and insert if-clause there. 607 */ 608 if (p->p_dtrace_count == 0) 609 return; 610 #endif 611 ASSERT(cp->p_dtrace_count == 0); 612 613 /* 614 * This would be simpler and faster if we maintained per-process 615 * hash tables of enabled tracepoints. It could, however, potentially 616 * slow down execution of a tracepoint since we'd need to go 617 * through two levels of indirection. In the future, we should 618 * consider either maintaining per-process ancillary lists of 619 * enabled tracepoints or hanging a pointer to a per-process hash 620 * table of enabled tracepoints off the proc structure. 621 */ 622 623 /* 624 * We don't have to worry about the child process disappearing 625 * because we're in fork(). 626 */ 627 #ifdef illumos 628 mtx_lock_spin(&cp->p_slock); 629 sprlock_proc(cp); 630 mtx_unlock_spin(&cp->p_slock); 631 #else 632 /* 633 * fasttrap_tracepoint_remove() expects the child process to be 634 * unlocked and the VM then expects curproc to be unlocked. 635 */ 636 _PHOLD(cp); 637 PROC_UNLOCK(cp); 638 PROC_UNLOCK(p); 639 #endif 640 641 /* 642 * Iterate over every tracepoint looking for ones that belong to the 643 * parent process, and remove each from the child process. 644 */ 645 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) { 646 fasttrap_tracepoint_t *tp; 647 fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i]; 648 649 mutex_enter(&bucket->ftb_mtx); 650 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { 651 if (tp->ftt_pid == ppid && 652 tp->ftt_proc->ftpc_acount != 0) { 653 int ret = fasttrap_tracepoint_remove(cp, tp); 654 ASSERT(ret == 0); 655 656 /* 657 * The count of active providers can only be 658 * decremented (i.e. to zero) during exec, 659 * exit, and removal of a meta provider so it 660 * should be impossible to drop the count 661 * mid-fork. 662 */ 663 ASSERT(tp->ftt_proc->ftpc_acount != 0); 664 #ifndef illumos 665 fprc = tp->ftt_proc; 666 #endif 667 } 668 } 669 mutex_exit(&bucket->ftb_mtx); 670 671 #ifndef illumos 672 /* 673 * Unmap any scratch space inherited from the parent's address 674 * space. 675 */ 676 if (fprc != NULL) { 677 mutex_enter(&fprc->ftpc_mtx); 678 LIST_FOREACH(scrblk, &fprc->ftpc_scrblks, ftsb_next) { 679 vm_map_remove(&cp->p_vmspace->vm_map, 680 scrblk->ftsb_addr, 681 scrblk->ftsb_addr + FASTTRAP_SCRBLOCK_SIZE); 682 } 683 mutex_exit(&fprc->ftpc_mtx); 684 } 685 #endif 686 } 687 688 #ifdef illumos 689 mutex_enter(&cp->p_lock); 690 sprunlock(cp); 691 #else 692 PROC_LOCK(p); 693 PROC_LOCK(cp); 694 _PRELE(cp); 695 #endif 696 } 697 698 /* 699 * This is called from proc_exit() or from exec_common() if p_dtrace_probes 700 * is set on the proc structure to indicate that there is a pid provider 701 * associated with this process. 702 */ 703 static void 704 fasttrap_exec_exit(proc_t *p) 705 { 706 #ifndef illumos 707 struct thread *td; 708 #endif 709 710 #ifdef illumos 711 ASSERT(p == curproc); 712 #else 713 PROC_LOCK_ASSERT(p, MA_OWNED); 714 _PHOLD(p); 715 /* 716 * Since struct threads may be recycled, we cannot rely on t_dtrace_sscr 717 * fields to be zeroed by kdtrace_thread_ctor. Thus we must zero it 718 * ourselves when a process exits. 719 */ 720 FOREACH_THREAD_IN_PROC(p, td) 721 td->t_dtrace_sscr = NULL; 722 PROC_UNLOCK(p); 723 #endif 724 725 /* 726 * We clean up the pid provider for this process here; user-land 727 * static probes are handled by the meta-provider remove entry point. 728 */ 729 fasttrap_provider_retire(p->p_pid, FASTTRAP_PID_NAME, 0); 730 #ifndef illumos 731 if (p->p_dtrace_helpers) 732 dtrace_helpers_destroy(p); 733 PROC_LOCK(p); 734 _PRELE(p); 735 #endif 736 } 737 738 739 /*ARGSUSED*/ 740 static void 741 fasttrap_pid_provide(void *arg, dtrace_probedesc_t *desc) 742 { 743 /* 744 * There are no "default" pid probes. 745 */ 746 } 747 748 static int 749 fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index) 750 { 751 fasttrap_tracepoint_t *tp, *new_tp = NULL; 752 fasttrap_bucket_t *bucket; 753 fasttrap_id_t *id; 754 pid_t pid; 755 uintptr_t pc; 756 757 ASSERT(index < probe->ftp_ntps); 758 759 pid = probe->ftp_pid; 760 pc = probe->ftp_tps[index].fit_tp->ftt_pc; 761 id = &probe->ftp_tps[index].fit_id; 762 763 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid); 764 765 #ifdef illumos 766 ASSERT(!(p->p_flag & SVFORK)); 767 #endif 768 769 /* 770 * Before we make any modifications, make sure we've imposed a barrier 771 * on the generation in which this probe was last modified. 772 */ 773 fasttrap_mod_barrier(probe->ftp_gen); 774 775 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)]; 776 777 /* 778 * If the tracepoint has already been enabled, just add our id to the 779 * list of interested probes. This may be our second time through 780 * this path in which case we'll have constructed the tracepoint we'd 781 * like to install. If we can't find a match, and have an allocated 782 * tracepoint ready to go, enable that one now. 783 * 784 * A tracepoint whose process is defunct is also considered defunct. 785 */ 786 again: 787 mutex_enter(&bucket->ftb_mtx); 788 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { 789 /* 790 * Note that it's safe to access the active count on the 791 * associated proc structure because we know that at least one 792 * provider (this one) will still be around throughout this 793 * operation. 794 */ 795 if (tp->ftt_pid != pid || tp->ftt_pc != pc || 796 tp->ftt_proc->ftpc_acount == 0) 797 continue; 798 799 /* 800 * Now that we've found a matching tracepoint, it would be 801 * a decent idea to confirm that the tracepoint is still 802 * enabled and the trap instruction hasn't been overwritten. 803 * Since this is a little hairy, we'll punt for now. 804 */ 805 806 /* 807 * This can't be the first interested probe. We don't have 808 * to worry about another thread being in the midst of 809 * deleting this tracepoint (which would be the only valid 810 * reason for a tracepoint to have no interested probes) 811 * since we're holding P_PR_LOCK for this process. 812 */ 813 ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL); 814 815 switch (id->fti_ptype) { 816 case DTFTP_ENTRY: 817 case DTFTP_OFFSETS: 818 case DTFTP_IS_ENABLED: 819 id->fti_next = tp->ftt_ids; 820 membar_producer(); 821 tp->ftt_ids = id; 822 membar_producer(); 823 break; 824 825 case DTFTP_RETURN: 826 case DTFTP_POST_OFFSETS: 827 id->fti_next = tp->ftt_retids; 828 membar_producer(); 829 tp->ftt_retids = id; 830 membar_producer(); 831 break; 832 833 default: 834 ASSERT(0); 835 } 836 837 mutex_exit(&bucket->ftb_mtx); 838 839 if (new_tp != NULL) { 840 new_tp->ftt_ids = NULL; 841 new_tp->ftt_retids = NULL; 842 } 843 844 return (0); 845 } 846 847 /* 848 * If we have a good tracepoint ready to go, install it now while 849 * we have the lock held and no one can screw with us. 850 */ 851 if (new_tp != NULL) { 852 int rc = 0; 853 854 new_tp->ftt_next = bucket->ftb_data; 855 membar_producer(); 856 bucket->ftb_data = new_tp; 857 membar_producer(); 858 mutex_exit(&bucket->ftb_mtx); 859 860 /* 861 * Activate the tracepoint in the ISA-specific manner. 862 * If this fails, we need to report the failure, but 863 * indicate that this tracepoint must still be disabled 864 * by calling fasttrap_tracepoint_disable(). 865 */ 866 if (fasttrap_tracepoint_install(p, new_tp) != 0) 867 rc = FASTTRAP_ENABLE_PARTIAL; 868 869 /* 870 * Increment the count of the number of tracepoints active in 871 * the victim process. 872 */ 873 #ifdef illumos 874 ASSERT(p->p_proc_flag & P_PR_LOCK); 875 #endif 876 p->p_dtrace_count++; 877 878 return (rc); 879 } 880 881 mutex_exit(&bucket->ftb_mtx); 882 883 /* 884 * Initialize the tracepoint that's been preallocated with the probe. 885 */ 886 new_tp = probe->ftp_tps[index].fit_tp; 887 888 ASSERT(new_tp->ftt_pid == pid); 889 ASSERT(new_tp->ftt_pc == pc); 890 ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc); 891 ASSERT(new_tp->ftt_ids == NULL); 892 ASSERT(new_tp->ftt_retids == NULL); 893 894 switch (id->fti_ptype) { 895 case DTFTP_ENTRY: 896 case DTFTP_OFFSETS: 897 case DTFTP_IS_ENABLED: 898 id->fti_next = NULL; 899 new_tp->ftt_ids = id; 900 break; 901 902 case DTFTP_RETURN: 903 case DTFTP_POST_OFFSETS: 904 id->fti_next = NULL; 905 new_tp->ftt_retids = id; 906 break; 907 908 default: 909 ASSERT(0); 910 } 911 912 /* 913 * If the ISA-dependent initialization goes to plan, go back to the 914 * beginning and try to install this freshly made tracepoint. 915 */ 916 if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0) 917 goto again; 918 919 new_tp->ftt_ids = NULL; 920 new_tp->ftt_retids = NULL; 921 922 return (FASTTRAP_ENABLE_FAIL); 923 } 924 925 static void 926 fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index) 927 { 928 fasttrap_bucket_t *bucket; 929 fasttrap_provider_t *provider = probe->ftp_prov; 930 fasttrap_tracepoint_t **pp, *tp; 931 fasttrap_id_t *id, **idp = NULL; 932 pid_t pid; 933 uintptr_t pc; 934 935 ASSERT(index < probe->ftp_ntps); 936 937 pid = probe->ftp_pid; 938 pc = probe->ftp_tps[index].fit_tp->ftt_pc; 939 id = &probe->ftp_tps[index].fit_id; 940 941 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid); 942 943 /* 944 * Find the tracepoint and make sure that our id is one of the 945 * ones registered with it. 946 */ 947 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)]; 948 mutex_enter(&bucket->ftb_mtx); 949 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { 950 if (tp->ftt_pid == pid && tp->ftt_pc == pc && 951 tp->ftt_proc == provider->ftp_proc) 952 break; 953 } 954 955 /* 956 * If we somehow lost this tracepoint, we're in a world of hurt. 957 */ 958 ASSERT(tp != NULL); 959 960 switch (id->fti_ptype) { 961 case DTFTP_ENTRY: 962 case DTFTP_OFFSETS: 963 case DTFTP_IS_ENABLED: 964 ASSERT(tp->ftt_ids != NULL); 965 idp = &tp->ftt_ids; 966 break; 967 968 case DTFTP_RETURN: 969 case DTFTP_POST_OFFSETS: 970 ASSERT(tp->ftt_retids != NULL); 971 idp = &tp->ftt_retids; 972 break; 973 974 default: 975 ASSERT(0); 976 } 977 978 while ((*idp)->fti_probe != probe) { 979 idp = &(*idp)->fti_next; 980 ASSERT(*idp != NULL); 981 } 982 983 id = *idp; 984 *idp = id->fti_next; 985 membar_producer(); 986 987 ASSERT(id->fti_probe == probe); 988 989 /* 990 * If there are other registered enablings of this tracepoint, we're 991 * all done, but if this was the last probe assocated with this 992 * this tracepoint, we need to remove and free it. 993 */ 994 if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) { 995 996 /* 997 * If the current probe's tracepoint is in use, swap it 998 * for an unused tracepoint. 999 */ 1000 if (tp == probe->ftp_tps[index].fit_tp) { 1001 fasttrap_probe_t *tmp_probe; 1002 fasttrap_tracepoint_t **tmp_tp; 1003 uint_t tmp_index; 1004 1005 if (tp->ftt_ids != NULL) { 1006 tmp_probe = tp->ftt_ids->fti_probe; 1007 /* LINTED - alignment */ 1008 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids); 1009 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp; 1010 } else { 1011 tmp_probe = tp->ftt_retids->fti_probe; 1012 /* LINTED - alignment */ 1013 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids); 1014 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp; 1015 } 1016 1017 ASSERT(*tmp_tp != NULL); 1018 ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp); 1019 ASSERT((*tmp_tp)->ftt_ids == NULL); 1020 ASSERT((*tmp_tp)->ftt_retids == NULL); 1021 1022 probe->ftp_tps[index].fit_tp = *tmp_tp; 1023 *tmp_tp = tp; 1024 } 1025 1026 mutex_exit(&bucket->ftb_mtx); 1027 1028 /* 1029 * Tag the modified probe with the generation in which it was 1030 * changed. 1031 */ 1032 probe->ftp_gen = fasttrap_mod_gen; 1033 return; 1034 } 1035 1036 mutex_exit(&bucket->ftb_mtx); 1037 1038 /* 1039 * We can't safely remove the tracepoint from the set of active 1040 * tracepoints until we've actually removed the fasttrap instruction 1041 * from the process's text. We can, however, operate on this 1042 * tracepoint secure in the knowledge that no other thread is going to 1043 * be looking at it since we hold P_PR_LOCK on the process if it's 1044 * live or we hold the provider lock on the process if it's dead and 1045 * gone. 1046 */ 1047 1048 /* 1049 * We only need to remove the actual instruction if we're looking 1050 * at an existing process 1051 */ 1052 if (p != NULL) { 1053 /* 1054 * If we fail to restore the instruction we need to kill 1055 * this process since it's in a completely unrecoverable 1056 * state. 1057 */ 1058 if (fasttrap_tracepoint_remove(p, tp) != 0) 1059 fasttrap_sigtrap(p, NULL, pc); 1060 1061 /* 1062 * Decrement the count of the number of tracepoints active 1063 * in the victim process. 1064 */ 1065 #ifdef illumos 1066 ASSERT(p->p_proc_flag & P_PR_LOCK); 1067 #endif 1068 p->p_dtrace_count--; 1069 } 1070 1071 /* 1072 * Remove the probe from the hash table of active tracepoints. 1073 */ 1074 mutex_enter(&bucket->ftb_mtx); 1075 pp = (fasttrap_tracepoint_t **)&bucket->ftb_data; 1076 ASSERT(*pp != NULL); 1077 while (*pp != tp) { 1078 pp = &(*pp)->ftt_next; 1079 ASSERT(*pp != NULL); 1080 } 1081 1082 *pp = tp->ftt_next; 1083 membar_producer(); 1084 1085 mutex_exit(&bucket->ftb_mtx); 1086 1087 /* 1088 * Tag the modified probe with the generation in which it was changed. 1089 */ 1090 probe->ftp_gen = fasttrap_mod_gen; 1091 } 1092 1093 static void 1094 fasttrap_enable_callbacks(void) 1095 { 1096 /* 1097 * We don't have to play the rw lock game here because we're 1098 * providing something rather than taking something away -- 1099 * we can be sure that no threads have tried to follow this 1100 * function pointer yet. 1101 */ 1102 mutex_enter(&fasttrap_count_mtx); 1103 if (fasttrap_pid_count == 0) { 1104 ASSERT(dtrace_pid_probe_ptr == NULL); 1105 ASSERT(dtrace_return_probe_ptr == NULL); 1106 dtrace_pid_probe_ptr = &fasttrap_pid_probe; 1107 dtrace_return_probe_ptr = &fasttrap_return_probe; 1108 } 1109 ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe); 1110 ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe); 1111 fasttrap_pid_count++; 1112 mutex_exit(&fasttrap_count_mtx); 1113 } 1114 1115 static void 1116 fasttrap_disable_callbacks(void) 1117 { 1118 #ifdef illumos 1119 ASSERT(MUTEX_HELD(&cpu_lock)); 1120 #endif 1121 1122 1123 mutex_enter(&fasttrap_count_mtx); 1124 ASSERT(fasttrap_pid_count > 0); 1125 fasttrap_pid_count--; 1126 if (fasttrap_pid_count == 0) { 1127 #ifdef illumos 1128 cpu_t *cur, *cpu = CPU; 1129 1130 for (cur = cpu->cpu_next_onln; cur != cpu; 1131 cur = cur->cpu_next_onln) { 1132 rw_enter(&cur->cpu_ft_lock, RW_WRITER); 1133 } 1134 #endif 1135 dtrace_pid_probe_ptr = NULL; 1136 dtrace_return_probe_ptr = NULL; 1137 #ifdef illumos 1138 for (cur = cpu->cpu_next_onln; cur != cpu; 1139 cur = cur->cpu_next_onln) { 1140 rw_exit(&cur->cpu_ft_lock); 1141 } 1142 #endif 1143 } 1144 mutex_exit(&fasttrap_count_mtx); 1145 } 1146 1147 /*ARGSUSED*/ 1148 static void 1149 fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg) 1150 { 1151 fasttrap_probe_t *probe = parg; 1152 proc_t *p = NULL; 1153 int i, rc; 1154 1155 ASSERT(probe != NULL); 1156 ASSERT(!probe->ftp_enabled); 1157 ASSERT(id == probe->ftp_id); 1158 #ifdef illumos 1159 ASSERT(MUTEX_HELD(&cpu_lock)); 1160 #endif 1161 1162 /* 1163 * Increment the count of enabled probes on this probe's provider; 1164 * the provider can't go away while the probe still exists. We 1165 * must increment this even if we aren't able to properly enable 1166 * this probe. 1167 */ 1168 mutex_enter(&probe->ftp_prov->ftp_mtx); 1169 probe->ftp_prov->ftp_rcount++; 1170 mutex_exit(&probe->ftp_prov->ftp_mtx); 1171 1172 /* 1173 * If this probe's provider is retired (meaning it was valid in a 1174 * previously exec'ed incarnation of this address space), bail out. The 1175 * provider can't go away while we're in this code path. 1176 */ 1177 if (probe->ftp_prov->ftp_retired) 1178 return; 1179 1180 /* 1181 * If we can't find the process, it may be that we're in the context of 1182 * a fork in which the traced process is being born and we're copying 1183 * USDT probes. Otherwise, the process is gone so bail. 1184 */ 1185 #ifdef illumos 1186 if ((p = sprlock(probe->ftp_pid)) == NULL) { 1187 if ((curproc->p_flag & SFORKING) == 0) 1188 return; 1189 1190 mutex_enter(&pidlock); 1191 p = prfind(probe->ftp_pid); 1192 1193 /* 1194 * Confirm that curproc is indeed forking the process in which 1195 * we're trying to enable probes. 1196 */ 1197 ASSERT(p != NULL); 1198 ASSERT(p->p_parent == curproc); 1199 ASSERT(p->p_stat == SIDL); 1200 1201 mutex_enter(&p->p_lock); 1202 mutex_exit(&pidlock); 1203 1204 sprlock_proc(p); 1205 } 1206 1207 ASSERT(!(p->p_flag & SVFORK)); 1208 mutex_exit(&p->p_lock); 1209 #else 1210 if ((p = pfind(probe->ftp_pid)) == NULL) 1211 return; 1212 #endif 1213 1214 /* 1215 * We have to enable the trap entry point before any user threads have 1216 * the chance to execute the trap instruction we're about to place 1217 * in their process's text. 1218 */ 1219 #ifdef __FreeBSD__ 1220 /* 1221 * pfind() returns a locked process. 1222 */ 1223 _PHOLD(p); 1224 PROC_UNLOCK(p); 1225 #endif 1226 fasttrap_enable_callbacks(); 1227 1228 /* 1229 * Enable all the tracepoints and add this probe's id to each 1230 * tracepoint's list of active probes. 1231 */ 1232 for (i = 0; i < probe->ftp_ntps; i++) { 1233 if ((rc = fasttrap_tracepoint_enable(p, probe, i)) != 0) { 1234 /* 1235 * If enabling the tracepoint failed completely, 1236 * we don't have to disable it; if the failure 1237 * was only partial we must disable it. 1238 */ 1239 if (rc == FASTTRAP_ENABLE_FAIL) 1240 i--; 1241 else 1242 ASSERT(rc == FASTTRAP_ENABLE_PARTIAL); 1243 1244 /* 1245 * Back up and pull out all the tracepoints we've 1246 * created so far for this probe. 1247 */ 1248 while (i >= 0) { 1249 fasttrap_tracepoint_disable(p, probe, i); 1250 i--; 1251 } 1252 1253 #ifdef illumos 1254 mutex_enter(&p->p_lock); 1255 sprunlock(p); 1256 #else 1257 PRELE(p); 1258 #endif 1259 1260 /* 1261 * Since we're not actually enabling this probe, 1262 * drop our reference on the trap table entry. 1263 */ 1264 fasttrap_disable_callbacks(); 1265 return; 1266 } 1267 } 1268 #ifdef illumos 1269 mutex_enter(&p->p_lock); 1270 sprunlock(p); 1271 #else 1272 PRELE(p); 1273 #endif 1274 1275 probe->ftp_enabled = 1; 1276 } 1277 1278 /*ARGSUSED*/ 1279 static void 1280 fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg) 1281 { 1282 fasttrap_probe_t *probe = parg; 1283 fasttrap_provider_t *provider = probe->ftp_prov; 1284 proc_t *p; 1285 int i, whack = 0; 1286 1287 ASSERT(id == probe->ftp_id); 1288 1289 mutex_enter(&provider->ftp_mtx); 1290 1291 /* 1292 * We won't be able to acquire a /proc-esque lock on the process 1293 * iff the process is dead and gone. In this case, we rely on the 1294 * provider lock as a point of mutual exclusion to prevent other 1295 * DTrace consumers from disabling this probe. 1296 */ 1297 if ((p = pfind(probe->ftp_pid)) != NULL) { 1298 #ifdef __FreeBSD__ 1299 if (p->p_flag & P_WEXIT) { 1300 PROC_UNLOCK(p); 1301 p = NULL; 1302 } else { 1303 _PHOLD(p); 1304 PROC_UNLOCK(p); 1305 } 1306 #endif 1307 } 1308 1309 /* 1310 * Disable all the associated tracepoints (for fully enabled probes). 1311 */ 1312 if (probe->ftp_enabled) { 1313 for (i = 0; i < probe->ftp_ntps; i++) { 1314 fasttrap_tracepoint_disable(p, probe, i); 1315 } 1316 } 1317 1318 ASSERT(provider->ftp_rcount > 0); 1319 provider->ftp_rcount--; 1320 1321 if (p != NULL) { 1322 /* 1323 * Even though we may not be able to remove it entirely, we 1324 * mark this retired provider to get a chance to remove some 1325 * of the associated probes. 1326 */ 1327 if (provider->ftp_retired && !provider->ftp_marked) 1328 whack = provider->ftp_marked = 1; 1329 mutex_exit(&provider->ftp_mtx); 1330 } else { 1331 /* 1332 * If the process is dead, we're just waiting for the 1333 * last probe to be disabled to be able to free it. 1334 */ 1335 if (provider->ftp_rcount == 0 && !provider->ftp_marked) 1336 whack = provider->ftp_marked = 1; 1337 mutex_exit(&provider->ftp_mtx); 1338 } 1339 1340 if (whack) 1341 fasttrap_pid_cleanup(); 1342 1343 #ifdef __FreeBSD__ 1344 if (p != NULL) 1345 PRELE(p); 1346 #endif 1347 if (!probe->ftp_enabled) 1348 return; 1349 1350 probe->ftp_enabled = 0; 1351 1352 #ifdef illumos 1353 ASSERT(MUTEX_HELD(&cpu_lock)); 1354 #endif 1355 fasttrap_disable_callbacks(); 1356 } 1357 1358 /*ARGSUSED*/ 1359 static void 1360 fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg, 1361 dtrace_argdesc_t *desc) 1362 { 1363 fasttrap_probe_t *probe = parg; 1364 char *str; 1365 int i, ndx; 1366 1367 desc->dtargd_native[0] = '\0'; 1368 desc->dtargd_xlate[0] = '\0'; 1369 1370 if (probe->ftp_prov->ftp_retired != 0 || 1371 desc->dtargd_ndx >= probe->ftp_nargs) { 1372 desc->dtargd_ndx = DTRACE_ARGNONE; 1373 return; 1374 } 1375 1376 ndx = (probe->ftp_argmap != NULL) ? 1377 probe->ftp_argmap[desc->dtargd_ndx] : desc->dtargd_ndx; 1378 1379 str = probe->ftp_ntypes; 1380 for (i = 0; i < ndx; i++) { 1381 str += strlen(str) + 1; 1382 } 1383 1384 ASSERT(strlen(str + 1) < sizeof (desc->dtargd_native)); 1385 (void) strcpy(desc->dtargd_native, str); 1386 1387 if (probe->ftp_xtypes == NULL) 1388 return; 1389 1390 str = probe->ftp_xtypes; 1391 for (i = 0; i < desc->dtargd_ndx; i++) { 1392 str += strlen(str) + 1; 1393 } 1394 1395 ASSERT(strlen(str + 1) < sizeof (desc->dtargd_xlate)); 1396 (void) strcpy(desc->dtargd_xlate, str); 1397 } 1398 1399 /*ARGSUSED*/ 1400 static void 1401 fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg) 1402 { 1403 fasttrap_probe_t *probe = parg; 1404 int i; 1405 size_t size; 1406 1407 ASSERT(probe != NULL); 1408 ASSERT(!probe->ftp_enabled); 1409 ASSERT(fasttrap_total >= probe->ftp_ntps); 1410 1411 atomic_add_32(&fasttrap_total, -probe->ftp_ntps); 1412 size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]); 1413 1414 if (probe->ftp_gen + 1 >= fasttrap_mod_gen) 1415 fasttrap_mod_barrier(probe->ftp_gen); 1416 1417 for (i = 0; i < probe->ftp_ntps; i++) { 1418 kmem_free(probe->ftp_tps[i].fit_tp, 1419 sizeof (fasttrap_tracepoint_t)); 1420 } 1421 1422 kmem_free(probe, size); 1423 } 1424 1425 1426 static const dtrace_pattr_t pid_attr = { 1427 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, 1428 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 1429 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 1430 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, 1431 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 1432 }; 1433 1434 static dtrace_pops_t pid_pops = { 1435 fasttrap_pid_provide, 1436 NULL, 1437 fasttrap_pid_enable, 1438 fasttrap_pid_disable, 1439 NULL, 1440 NULL, 1441 fasttrap_pid_getargdesc, 1442 fasttrap_pid_getarg, 1443 NULL, 1444 fasttrap_pid_destroy 1445 }; 1446 1447 static dtrace_pops_t usdt_pops = { 1448 fasttrap_pid_provide, 1449 NULL, 1450 fasttrap_pid_enable, 1451 fasttrap_pid_disable, 1452 NULL, 1453 NULL, 1454 fasttrap_pid_getargdesc, 1455 fasttrap_usdt_getarg, 1456 NULL, 1457 fasttrap_pid_destroy 1458 }; 1459 1460 static fasttrap_proc_t * 1461 fasttrap_proc_lookup(pid_t pid) 1462 { 1463 fasttrap_bucket_t *bucket; 1464 fasttrap_proc_t *fprc, *new_fprc; 1465 1466 1467 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)]; 1468 mutex_enter(&bucket->ftb_mtx); 1469 1470 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) { 1471 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) { 1472 mutex_enter(&fprc->ftpc_mtx); 1473 mutex_exit(&bucket->ftb_mtx); 1474 fprc->ftpc_rcount++; 1475 atomic_inc_64(&fprc->ftpc_acount); 1476 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount); 1477 mutex_exit(&fprc->ftpc_mtx); 1478 1479 return (fprc); 1480 } 1481 } 1482 1483 /* 1484 * Drop the bucket lock so we don't try to perform a sleeping 1485 * allocation under it. 1486 */ 1487 mutex_exit(&bucket->ftb_mtx); 1488 1489 new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP); 1490 new_fprc->ftpc_pid = pid; 1491 new_fprc->ftpc_rcount = 1; 1492 new_fprc->ftpc_acount = 1; 1493 #ifndef illumos 1494 mutex_init(&new_fprc->ftpc_mtx, "fasttrap proc mtx", MUTEX_DEFAULT, 1495 NULL); 1496 #endif 1497 1498 mutex_enter(&bucket->ftb_mtx); 1499 1500 /* 1501 * Take another lap through the list to make sure a proc hasn't 1502 * been created for this pid while we weren't under the bucket lock. 1503 */ 1504 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) { 1505 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) { 1506 mutex_enter(&fprc->ftpc_mtx); 1507 mutex_exit(&bucket->ftb_mtx); 1508 fprc->ftpc_rcount++; 1509 atomic_inc_64(&fprc->ftpc_acount); 1510 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount); 1511 mutex_exit(&fprc->ftpc_mtx); 1512 1513 kmem_free(new_fprc, sizeof (fasttrap_proc_t)); 1514 1515 return (fprc); 1516 } 1517 } 1518 1519 new_fprc->ftpc_next = bucket->ftb_data; 1520 bucket->ftb_data = new_fprc; 1521 1522 mutex_exit(&bucket->ftb_mtx); 1523 1524 return (new_fprc); 1525 } 1526 1527 static void 1528 fasttrap_proc_release(fasttrap_proc_t *proc) 1529 { 1530 fasttrap_bucket_t *bucket; 1531 fasttrap_proc_t *fprc, **fprcp; 1532 pid_t pid = proc->ftpc_pid; 1533 #ifndef illumos 1534 fasttrap_scrblock_t *scrblk, *scrblktmp; 1535 fasttrap_scrspace_t *scrspc, *scrspctmp; 1536 struct proc *p; 1537 struct thread *td; 1538 #endif 1539 1540 mutex_enter(&proc->ftpc_mtx); 1541 1542 ASSERT(proc->ftpc_rcount != 0); 1543 ASSERT(proc->ftpc_acount <= proc->ftpc_rcount); 1544 1545 if (--proc->ftpc_rcount != 0) { 1546 mutex_exit(&proc->ftpc_mtx); 1547 return; 1548 } 1549 1550 #ifndef illumos 1551 /* 1552 * Free all structures used to manage per-thread scratch space. 1553 */ 1554 LIST_FOREACH_SAFE(scrblk, &proc->ftpc_scrblks, ftsb_next, 1555 scrblktmp) { 1556 LIST_REMOVE(scrblk, ftsb_next); 1557 free(scrblk, M_SOLARIS); 1558 } 1559 LIST_FOREACH_SAFE(scrspc, &proc->ftpc_fscr, ftss_next, scrspctmp) { 1560 LIST_REMOVE(scrspc, ftss_next); 1561 free(scrspc, M_SOLARIS); 1562 } 1563 LIST_FOREACH_SAFE(scrspc, &proc->ftpc_ascr, ftss_next, scrspctmp) { 1564 LIST_REMOVE(scrspc, ftss_next); 1565 free(scrspc, M_SOLARIS); 1566 } 1567 1568 if ((p = pfind(pid)) != NULL) { 1569 FOREACH_THREAD_IN_PROC(p, td) 1570 td->t_dtrace_sscr = NULL; 1571 PROC_UNLOCK(p); 1572 } 1573 #endif 1574 1575 mutex_exit(&proc->ftpc_mtx); 1576 1577 /* 1578 * There should definitely be no live providers associated with this 1579 * process at this point. 1580 */ 1581 ASSERT(proc->ftpc_acount == 0); 1582 1583 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)]; 1584 mutex_enter(&bucket->ftb_mtx); 1585 1586 fprcp = (fasttrap_proc_t **)&bucket->ftb_data; 1587 while ((fprc = *fprcp) != NULL) { 1588 if (fprc == proc) 1589 break; 1590 1591 fprcp = &fprc->ftpc_next; 1592 } 1593 1594 /* 1595 * Something strange has happened if we can't find the proc. 1596 */ 1597 ASSERT(fprc != NULL); 1598 1599 *fprcp = fprc->ftpc_next; 1600 1601 mutex_exit(&bucket->ftb_mtx); 1602 1603 kmem_free(fprc, sizeof (fasttrap_proc_t)); 1604 } 1605 1606 /* 1607 * Lookup a fasttrap-managed provider based on its name and associated pid. 1608 * If the pattr argument is non-NULL, this function instantiates the provider 1609 * if it doesn't exist otherwise it returns NULL. The provider is returned 1610 * with its lock held. 1611 */ 1612 static fasttrap_provider_t * 1613 fasttrap_provider_lookup(pid_t pid, const char *name, 1614 const dtrace_pattr_t *pattr) 1615 { 1616 fasttrap_provider_t *fp, *new_fp = NULL; 1617 fasttrap_bucket_t *bucket; 1618 char provname[DTRACE_PROVNAMELEN]; 1619 proc_t *p; 1620 cred_t *cred; 1621 1622 ASSERT(strlen(name) < sizeof (fp->ftp_name)); 1623 ASSERT(pattr != NULL); 1624 1625 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)]; 1626 mutex_enter(&bucket->ftb_mtx); 1627 1628 /* 1629 * Take a lap through the list and return the match if we find it. 1630 */ 1631 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) { 1632 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 && 1633 !fp->ftp_retired) { 1634 mutex_enter(&fp->ftp_mtx); 1635 mutex_exit(&bucket->ftb_mtx); 1636 return (fp); 1637 } 1638 } 1639 1640 /* 1641 * Drop the bucket lock so we don't try to perform a sleeping 1642 * allocation under it. 1643 */ 1644 mutex_exit(&bucket->ftb_mtx); 1645 1646 /* 1647 * Make sure the process exists, isn't a child created as the result 1648 * of a vfork(2), and isn't a zombie (but may be in fork). 1649 */ 1650 if ((p = pfind(pid)) == NULL) 1651 return (NULL); 1652 1653 /* 1654 * Increment p_dtrace_probes so that the process knows to inform us 1655 * when it exits or execs. fasttrap_provider_free() decrements this 1656 * when we're done with this provider. 1657 */ 1658 p->p_dtrace_probes++; 1659 1660 /* 1661 * Grab the credentials for this process so we have 1662 * something to pass to dtrace_register(). 1663 */ 1664 PROC_LOCK_ASSERT(p, MA_OWNED); 1665 crhold(p->p_ucred); 1666 cred = p->p_ucred; 1667 PROC_UNLOCK(p); 1668 1669 new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP); 1670 new_fp->ftp_pid = pid; 1671 new_fp->ftp_proc = fasttrap_proc_lookup(pid); 1672 #ifndef illumos 1673 mutex_init(&new_fp->ftp_mtx, "provider mtx", MUTEX_DEFAULT, NULL); 1674 mutex_init(&new_fp->ftp_cmtx, "lock on creating", MUTEX_DEFAULT, NULL); 1675 #endif 1676 1677 ASSERT(new_fp->ftp_proc != NULL); 1678 1679 mutex_enter(&bucket->ftb_mtx); 1680 1681 /* 1682 * Take another lap through the list to make sure a provider hasn't 1683 * been created for this pid while we weren't under the bucket lock. 1684 */ 1685 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) { 1686 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 && 1687 !fp->ftp_retired) { 1688 mutex_enter(&fp->ftp_mtx); 1689 mutex_exit(&bucket->ftb_mtx); 1690 fasttrap_provider_free(new_fp); 1691 crfree(cred); 1692 return (fp); 1693 } 1694 } 1695 1696 (void) strcpy(new_fp->ftp_name, name); 1697 1698 /* 1699 * Fail and return NULL if either the provider name is too long 1700 * or we fail to register this new provider with the DTrace 1701 * framework. Note that this is the only place we ever construct 1702 * the full provider name -- we keep it in pieces in the provider 1703 * structure. 1704 */ 1705 if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >= 1706 sizeof (provname) || 1707 dtrace_register(provname, pattr, 1708 DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred, 1709 pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp, 1710 &new_fp->ftp_provid) != 0) { 1711 mutex_exit(&bucket->ftb_mtx); 1712 fasttrap_provider_free(new_fp); 1713 crfree(cred); 1714 return (NULL); 1715 } 1716 1717 new_fp->ftp_next = bucket->ftb_data; 1718 bucket->ftb_data = new_fp; 1719 1720 mutex_enter(&new_fp->ftp_mtx); 1721 mutex_exit(&bucket->ftb_mtx); 1722 1723 crfree(cred); 1724 return (new_fp); 1725 } 1726 1727 static void 1728 fasttrap_provider_free(fasttrap_provider_t *provider) 1729 { 1730 pid_t pid = provider->ftp_pid; 1731 proc_t *p; 1732 1733 /* 1734 * There need to be no associated enabled probes, no consumers 1735 * creating probes, and no meta providers referencing this provider. 1736 */ 1737 ASSERT(provider->ftp_rcount == 0); 1738 ASSERT(provider->ftp_ccount == 0); 1739 ASSERT(provider->ftp_mcount == 0); 1740 1741 /* 1742 * If this provider hasn't been retired, we need to explicitly drop the 1743 * count of active providers on the associated process structure. 1744 */ 1745 if (!provider->ftp_retired) { 1746 atomic_dec_64(&provider->ftp_proc->ftpc_acount); 1747 ASSERT(provider->ftp_proc->ftpc_acount < 1748 provider->ftp_proc->ftpc_rcount); 1749 } 1750 1751 fasttrap_proc_release(provider->ftp_proc); 1752 1753 #ifndef illumos 1754 mutex_destroy(&provider->ftp_mtx); 1755 mutex_destroy(&provider->ftp_cmtx); 1756 #endif 1757 kmem_free(provider, sizeof (fasttrap_provider_t)); 1758 1759 /* 1760 * Decrement p_dtrace_probes on the process whose provider we're 1761 * freeing. We don't have to worry about clobbering somone else's 1762 * modifications to it because we have locked the bucket that 1763 * corresponds to this process's hash chain in the provider hash 1764 * table. Don't sweat it if we can't find the process. 1765 */ 1766 if ((p = pfind(pid)) == NULL) { 1767 return; 1768 } 1769 1770 p->p_dtrace_probes--; 1771 #ifndef illumos 1772 PROC_UNLOCK(p); 1773 #endif 1774 } 1775 1776 static void 1777 fasttrap_provider_retire(pid_t pid, const char *name, int mprov) 1778 { 1779 fasttrap_provider_t *fp; 1780 fasttrap_bucket_t *bucket; 1781 dtrace_provider_id_t provid; 1782 1783 ASSERT(strlen(name) < sizeof (fp->ftp_name)); 1784 1785 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)]; 1786 mutex_enter(&bucket->ftb_mtx); 1787 1788 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) { 1789 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 && 1790 !fp->ftp_retired) 1791 break; 1792 } 1793 1794 if (fp == NULL) { 1795 mutex_exit(&bucket->ftb_mtx); 1796 return; 1797 } 1798 1799 mutex_enter(&fp->ftp_mtx); 1800 ASSERT(!mprov || fp->ftp_mcount > 0); 1801 if (mprov && --fp->ftp_mcount != 0) { 1802 mutex_exit(&fp->ftp_mtx); 1803 mutex_exit(&bucket->ftb_mtx); 1804 return; 1805 } 1806 1807 /* 1808 * Mark the provider to be removed in our post-processing step, mark it 1809 * retired, and drop the active count on its proc. Marking it indicates 1810 * that we should try to remove it; setting the retired flag indicates 1811 * that we're done with this provider; dropping the active the proc 1812 * releases our hold, and when this reaches zero (as it will during 1813 * exit or exec) the proc and associated providers become defunct. 1814 * 1815 * We obviously need to take the bucket lock before the provider lock 1816 * to perform the lookup, but we need to drop the provider lock 1817 * before calling into the DTrace framework since we acquire the 1818 * provider lock in callbacks invoked from the DTrace framework. The 1819 * bucket lock therefore protects the integrity of the provider hash 1820 * table. 1821 */ 1822 atomic_dec_64(&fp->ftp_proc->ftpc_acount); 1823 ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount); 1824 1825 fp->ftp_retired = 1; 1826 fp->ftp_marked = 1; 1827 provid = fp->ftp_provid; 1828 mutex_exit(&fp->ftp_mtx); 1829 1830 /* 1831 * We don't have to worry about invalidating the same provider twice 1832 * since fasttrap_provider_lookup() will ignore provider that have 1833 * been marked as retired. 1834 */ 1835 dtrace_invalidate(provid); 1836 1837 mutex_exit(&bucket->ftb_mtx); 1838 1839 fasttrap_pid_cleanup(); 1840 } 1841 1842 static int 1843 fasttrap_uint32_cmp(const void *ap, const void *bp) 1844 { 1845 return (*(const uint32_t *)ap - *(const uint32_t *)bp); 1846 } 1847 1848 static int 1849 fasttrap_uint64_cmp(const void *ap, const void *bp) 1850 { 1851 return (*(const uint64_t *)ap - *(const uint64_t *)bp); 1852 } 1853 1854 static int 1855 fasttrap_add_probe(fasttrap_probe_spec_t *pdata) 1856 { 1857 fasttrap_provider_t *provider; 1858 fasttrap_probe_t *pp; 1859 fasttrap_tracepoint_t *tp; 1860 char *name; 1861 int i, aframes = 0, whack; 1862 1863 /* 1864 * There needs to be at least one desired trace point. 1865 */ 1866 if (pdata->ftps_noffs == 0) 1867 return (EINVAL); 1868 1869 switch (pdata->ftps_type) { 1870 case DTFTP_ENTRY: 1871 name = "entry"; 1872 aframes = FASTTRAP_ENTRY_AFRAMES; 1873 break; 1874 case DTFTP_RETURN: 1875 name = "return"; 1876 aframes = FASTTRAP_RETURN_AFRAMES; 1877 break; 1878 case DTFTP_OFFSETS: 1879 name = NULL; 1880 break; 1881 default: 1882 return (EINVAL); 1883 } 1884 1885 if ((provider = fasttrap_provider_lookup(pdata->ftps_pid, 1886 FASTTRAP_PID_NAME, &pid_attr)) == NULL) 1887 return (ESRCH); 1888 1889 /* 1890 * Increment this reference count to indicate that a consumer is 1891 * actively adding a new probe associated with this provider. This 1892 * prevents the provider from being deleted -- we'll need to check 1893 * for pending deletions when we drop this reference count. 1894 */ 1895 provider->ftp_ccount++; 1896 mutex_exit(&provider->ftp_mtx); 1897 1898 /* 1899 * Grab the creation lock to ensure consistency between calls to 1900 * dtrace_probe_lookup() and dtrace_probe_create() in the face of 1901 * other threads creating probes. We must drop the provider lock 1902 * before taking this lock to avoid a three-way deadlock with the 1903 * DTrace framework. 1904 */ 1905 mutex_enter(&provider->ftp_cmtx); 1906 1907 if (name == NULL) { 1908 for (i = 0; i < pdata->ftps_noffs; i++) { 1909 char name_str[17]; 1910 1911 (void) sprintf(name_str, "%llx", 1912 (unsigned long long)pdata->ftps_offs[i]); 1913 1914 if (dtrace_probe_lookup(provider->ftp_provid, 1915 pdata->ftps_mod, pdata->ftps_func, name_str) != 0) 1916 continue; 1917 1918 atomic_inc_32(&fasttrap_total); 1919 1920 if (fasttrap_total > fasttrap_max) { 1921 atomic_dec_32(&fasttrap_total); 1922 goto no_mem; 1923 } 1924 1925 pp = kmem_zalloc(sizeof (fasttrap_probe_t), KM_SLEEP); 1926 1927 pp->ftp_prov = provider; 1928 pp->ftp_faddr = pdata->ftps_pc; 1929 pp->ftp_fsize = pdata->ftps_size; 1930 pp->ftp_pid = pdata->ftps_pid; 1931 pp->ftp_ntps = 1; 1932 1933 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), 1934 KM_SLEEP); 1935 1936 tp->ftt_proc = provider->ftp_proc; 1937 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc; 1938 tp->ftt_pid = pdata->ftps_pid; 1939 1940 pp->ftp_tps[0].fit_tp = tp; 1941 pp->ftp_tps[0].fit_id.fti_probe = pp; 1942 pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_type; 1943 1944 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, 1945 pdata->ftps_mod, pdata->ftps_func, name_str, 1946 FASTTRAP_OFFSET_AFRAMES, pp); 1947 } 1948 1949 } else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod, 1950 pdata->ftps_func, name) == 0) { 1951 atomic_add_32(&fasttrap_total, pdata->ftps_noffs); 1952 1953 if (fasttrap_total > fasttrap_max) { 1954 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs); 1955 goto no_mem; 1956 } 1957 1958 /* 1959 * Make sure all tracepoint program counter values are unique. 1960 * We later assume that each probe has exactly one tracepoint 1961 * for a given pc. 1962 */ 1963 qsort(pdata->ftps_offs, pdata->ftps_noffs, 1964 sizeof (uint64_t), fasttrap_uint64_cmp); 1965 for (i = 1; i < pdata->ftps_noffs; i++) { 1966 if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1]) 1967 continue; 1968 1969 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs); 1970 goto no_mem; 1971 } 1972 1973 ASSERT(pdata->ftps_noffs > 0); 1974 pp = kmem_zalloc(offsetof(fasttrap_probe_t, 1975 ftp_tps[pdata->ftps_noffs]), KM_SLEEP); 1976 1977 pp->ftp_prov = provider; 1978 pp->ftp_faddr = pdata->ftps_pc; 1979 pp->ftp_fsize = pdata->ftps_size; 1980 pp->ftp_pid = pdata->ftps_pid; 1981 pp->ftp_ntps = pdata->ftps_noffs; 1982 1983 for (i = 0; i < pdata->ftps_noffs; i++) { 1984 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), 1985 KM_SLEEP); 1986 1987 tp->ftt_proc = provider->ftp_proc; 1988 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc; 1989 tp->ftt_pid = pdata->ftps_pid; 1990 1991 pp->ftp_tps[i].fit_tp = tp; 1992 pp->ftp_tps[i].fit_id.fti_probe = pp; 1993 pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_type; 1994 } 1995 1996 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, 1997 pdata->ftps_mod, pdata->ftps_func, name, aframes, pp); 1998 } 1999 2000 mutex_exit(&provider->ftp_cmtx); 2001 2002 /* 2003 * We know that the provider is still valid since we incremented the 2004 * creation reference count. If someone tried to clean up this provider 2005 * while we were using it (e.g. because the process called exec(2) or 2006 * exit(2)), take note of that and try to clean it up now. 2007 */ 2008 mutex_enter(&provider->ftp_mtx); 2009 provider->ftp_ccount--; 2010 whack = provider->ftp_retired; 2011 mutex_exit(&provider->ftp_mtx); 2012 2013 if (whack) 2014 fasttrap_pid_cleanup(); 2015 2016 return (0); 2017 2018 no_mem: 2019 /* 2020 * If we've exhausted the allowable resources, we'll try to remove 2021 * this provider to free some up. This is to cover the case where 2022 * the user has accidentally created many more probes than was 2023 * intended (e.g. pid123:::). 2024 */ 2025 mutex_exit(&provider->ftp_cmtx); 2026 mutex_enter(&provider->ftp_mtx); 2027 provider->ftp_ccount--; 2028 provider->ftp_marked = 1; 2029 mutex_exit(&provider->ftp_mtx); 2030 2031 fasttrap_pid_cleanup(); 2032 2033 return (ENOMEM); 2034 } 2035 2036 /*ARGSUSED*/ 2037 static void * 2038 fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid) 2039 { 2040 fasttrap_provider_t *provider; 2041 2042 /* 2043 * A 32-bit unsigned integer (like a pid for example) can be 2044 * expressed in 10 or fewer decimal digits. Make sure that we'll 2045 * have enough space for the provider name. 2046 */ 2047 if (strlen(dhpv->dthpv_provname) + 10 >= 2048 sizeof (provider->ftp_name)) { 2049 printf("failed to instantiate provider %s: " 2050 "name too long to accomodate pid", dhpv->dthpv_provname); 2051 return (NULL); 2052 } 2053 2054 /* 2055 * Don't let folks spoof the true pid provider. 2056 */ 2057 if (strcmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME) == 0) { 2058 printf("failed to instantiate provider %s: " 2059 "%s is an invalid name", dhpv->dthpv_provname, 2060 FASTTRAP_PID_NAME); 2061 return (NULL); 2062 } 2063 2064 /* 2065 * The highest stability class that fasttrap supports is ISA; cap 2066 * the stability of the new provider accordingly. 2067 */ 2068 if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA) 2069 dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA; 2070 if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA) 2071 dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA; 2072 if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA) 2073 dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA; 2074 if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA) 2075 dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA; 2076 if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA) 2077 dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA; 2078 2079 if ((provider = fasttrap_provider_lookup(pid, dhpv->dthpv_provname, 2080 &dhpv->dthpv_pattr)) == NULL) { 2081 printf("failed to instantiate provider %s for " 2082 "process %u", dhpv->dthpv_provname, (uint_t)pid); 2083 return (NULL); 2084 } 2085 2086 /* 2087 * Up the meta provider count so this provider isn't removed until 2088 * the meta provider has been told to remove it. 2089 */ 2090 provider->ftp_mcount++; 2091 2092 mutex_exit(&provider->ftp_mtx); 2093 2094 return (provider); 2095 } 2096 2097 /*ARGSUSED*/ 2098 static void 2099 fasttrap_meta_create_probe(void *arg, void *parg, 2100 dtrace_helper_probedesc_t *dhpb) 2101 { 2102 fasttrap_provider_t *provider = parg; 2103 fasttrap_probe_t *pp; 2104 fasttrap_tracepoint_t *tp; 2105 int i, j; 2106 uint32_t ntps; 2107 2108 /* 2109 * Since the meta provider count is non-zero we don't have to worry 2110 * about this provider disappearing. 2111 */ 2112 ASSERT(provider->ftp_mcount > 0); 2113 2114 /* 2115 * The offsets must be unique. 2116 */ 2117 qsort(dhpb->dthpb_offs, dhpb->dthpb_noffs, sizeof (uint32_t), 2118 fasttrap_uint32_cmp); 2119 for (i = 1; i < dhpb->dthpb_noffs; i++) { 2120 if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <= 2121 dhpb->dthpb_base + dhpb->dthpb_offs[i - 1]) 2122 return; 2123 } 2124 2125 qsort(dhpb->dthpb_enoffs, dhpb->dthpb_nenoffs, sizeof (uint32_t), 2126 fasttrap_uint32_cmp); 2127 for (i = 1; i < dhpb->dthpb_nenoffs; i++) { 2128 if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <= 2129 dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1]) 2130 return; 2131 } 2132 2133 /* 2134 * Grab the creation lock to ensure consistency between calls to 2135 * dtrace_probe_lookup() and dtrace_probe_create() in the face of 2136 * other threads creating probes. 2137 */ 2138 mutex_enter(&provider->ftp_cmtx); 2139 2140 if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod, 2141 dhpb->dthpb_func, dhpb->dthpb_name) != 0) { 2142 mutex_exit(&provider->ftp_cmtx); 2143 return; 2144 } 2145 2146 ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs; 2147 ASSERT(ntps > 0); 2148 2149 atomic_add_32(&fasttrap_total, ntps); 2150 2151 if (fasttrap_total > fasttrap_max) { 2152 atomic_add_32(&fasttrap_total, -ntps); 2153 mutex_exit(&provider->ftp_cmtx); 2154 return; 2155 } 2156 2157 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP); 2158 2159 pp->ftp_prov = provider; 2160 pp->ftp_pid = provider->ftp_pid; 2161 pp->ftp_ntps = ntps; 2162 pp->ftp_nargs = dhpb->dthpb_xargc; 2163 pp->ftp_xtypes = dhpb->dthpb_xtypes; 2164 pp->ftp_ntypes = dhpb->dthpb_ntypes; 2165 2166 /* 2167 * First create a tracepoint for each actual point of interest. 2168 */ 2169 for (i = 0; i < dhpb->dthpb_noffs; i++) { 2170 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP); 2171 2172 tp->ftt_proc = provider->ftp_proc; 2173 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_offs[i]; 2174 tp->ftt_pid = provider->ftp_pid; 2175 2176 pp->ftp_tps[i].fit_tp = tp; 2177 pp->ftp_tps[i].fit_id.fti_probe = pp; 2178 #ifdef __sparc 2179 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_POST_OFFSETS; 2180 #else 2181 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS; 2182 #endif 2183 } 2184 2185 /* 2186 * Then create a tracepoint for each is-enabled point. 2187 */ 2188 for (j = 0; i < ntps; i++, j++) { 2189 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP); 2190 2191 tp->ftt_proc = provider->ftp_proc; 2192 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_enoffs[j]; 2193 tp->ftt_pid = provider->ftp_pid; 2194 2195 pp->ftp_tps[i].fit_tp = tp; 2196 pp->ftp_tps[i].fit_id.fti_probe = pp; 2197 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED; 2198 } 2199 2200 /* 2201 * If the arguments are shuffled around we set the argument remapping 2202 * table. Later, when the probe fires, we only remap the arguments 2203 * if the table is non-NULL. 2204 */ 2205 for (i = 0; i < dhpb->dthpb_xargc; i++) { 2206 if (dhpb->dthpb_args[i] != i) { 2207 pp->ftp_argmap = dhpb->dthpb_args; 2208 break; 2209 } 2210 } 2211 2212 /* 2213 * The probe is fully constructed -- register it with DTrace. 2214 */ 2215 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod, 2216 dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp); 2217 2218 mutex_exit(&provider->ftp_cmtx); 2219 } 2220 2221 /*ARGSUSED*/ 2222 static void 2223 fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid) 2224 { 2225 /* 2226 * Clean up the USDT provider. There may be active consumers of the 2227 * provider busy adding probes, no damage will actually befall the 2228 * provider until that count has dropped to zero. This just puts 2229 * the provider on death row. 2230 */ 2231 fasttrap_provider_retire(pid, dhpv->dthpv_provname, 1); 2232 } 2233 2234 static dtrace_mops_t fasttrap_mops = { 2235 fasttrap_meta_create_probe, 2236 fasttrap_meta_provide, 2237 fasttrap_meta_remove 2238 }; 2239 2240 /*ARGSUSED*/ 2241 static int 2242 fasttrap_open(struct cdev *dev __unused, int oflags __unused, 2243 int devtype __unused, struct thread *td __unused) 2244 { 2245 return (0); 2246 } 2247 2248 /*ARGSUSED*/ 2249 static int 2250 fasttrap_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int fflag, 2251 struct thread *td) 2252 { 2253 #ifdef notyet 2254 struct kinfo_proc kp; 2255 const cred_t *cr = td->td_ucred; 2256 #endif 2257 if (!dtrace_attached()) 2258 return (EAGAIN); 2259 2260 if (cmd == FASTTRAPIOC_MAKEPROBE) { 2261 fasttrap_probe_spec_t *uprobe = *(fasttrap_probe_spec_t **)arg; 2262 fasttrap_probe_spec_t *probe; 2263 uint64_t noffs; 2264 size_t size; 2265 int ret, err; 2266 2267 if (copyin(&uprobe->ftps_noffs, &noffs, 2268 sizeof (uprobe->ftps_noffs))) 2269 return (EFAULT); 2270 2271 /* 2272 * Probes must have at least one tracepoint. 2273 */ 2274 if (noffs == 0) 2275 return (EINVAL); 2276 2277 size = sizeof (fasttrap_probe_spec_t) + 2278 sizeof (probe->ftps_offs[0]) * (noffs - 1); 2279 2280 if (size > 1024 * 1024) 2281 return (ENOMEM); 2282 2283 probe = kmem_alloc(size, KM_SLEEP); 2284 2285 if (copyin(uprobe, probe, size) != 0 || 2286 probe->ftps_noffs != noffs) { 2287 kmem_free(probe, size); 2288 return (EFAULT); 2289 } 2290 2291 /* 2292 * Verify that the function and module strings contain no 2293 * funny characters. 2294 */ 2295 if (u8_validate(probe->ftps_func, strlen(probe->ftps_func), 2296 NULL, U8_VALIDATE_ENTIRE, &err) < 0) { 2297 ret = EINVAL; 2298 goto err; 2299 } 2300 2301 if (u8_validate(probe->ftps_mod, strlen(probe->ftps_mod), 2302 NULL, U8_VALIDATE_ENTIRE, &err) < 0) { 2303 ret = EINVAL; 2304 goto err; 2305 } 2306 2307 #ifdef notyet 2308 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) { 2309 proc_t *p; 2310 pid_t pid = probe->ftps_pid; 2311 2312 #ifdef illumos 2313 mutex_enter(&pidlock); 2314 #endif 2315 /* 2316 * Report an error if the process doesn't exist 2317 * or is actively being birthed. 2318 */ 2319 sx_slock(&proctree_lock); 2320 p = pfind(pid); 2321 if (p) 2322 fill_kinfo_proc(p, &kp); 2323 sx_sunlock(&proctree_lock); 2324 if (p == NULL || kp.ki_stat == SIDL) { 2325 #ifdef illumos 2326 mutex_exit(&pidlock); 2327 #endif 2328 return (ESRCH); 2329 } 2330 #ifdef illumos 2331 mutex_enter(&p->p_lock); 2332 mutex_exit(&pidlock); 2333 #else 2334 PROC_LOCK_ASSERT(p, MA_OWNED); 2335 #endif 2336 2337 #ifdef notyet 2338 if ((ret = priv_proc_cred_perm(cr, p, NULL, 2339 VREAD | VWRITE)) != 0) { 2340 #ifdef illumos 2341 mutex_exit(&p->p_lock); 2342 #else 2343 PROC_UNLOCK(p); 2344 #endif 2345 return (ret); 2346 } 2347 #endif /* notyet */ 2348 #ifdef illumos 2349 mutex_exit(&p->p_lock); 2350 #else 2351 PROC_UNLOCK(p); 2352 #endif 2353 } 2354 #endif /* notyet */ 2355 2356 ret = fasttrap_add_probe(probe); 2357 err: 2358 kmem_free(probe, size); 2359 2360 return (ret); 2361 2362 } else if (cmd == FASTTRAPIOC_GETINSTR) { 2363 fasttrap_instr_query_t instr; 2364 fasttrap_tracepoint_t *tp; 2365 uint_t index; 2366 #ifdef illumos 2367 int ret; 2368 #endif 2369 2370 #ifdef illumos 2371 if (copyin((void *)arg, &instr, sizeof (instr)) != 0) 2372 return (EFAULT); 2373 #endif 2374 2375 #ifdef notyet 2376 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) { 2377 proc_t *p; 2378 pid_t pid = instr.ftiq_pid; 2379 2380 #ifdef illumos 2381 mutex_enter(&pidlock); 2382 #endif 2383 /* 2384 * Report an error if the process doesn't exist 2385 * or is actively being birthed. 2386 */ 2387 sx_slock(&proctree_lock); 2388 p = pfind(pid); 2389 if (p) 2390 fill_kinfo_proc(p, &kp); 2391 sx_sunlock(&proctree_lock); 2392 if (p == NULL || kp.ki_stat == SIDL) { 2393 #ifdef illumos 2394 mutex_exit(&pidlock); 2395 #endif 2396 return (ESRCH); 2397 } 2398 #ifdef illumos 2399 mutex_enter(&p->p_lock); 2400 mutex_exit(&pidlock); 2401 #else 2402 PROC_LOCK_ASSERT(p, MA_OWNED); 2403 #endif 2404 2405 #ifdef notyet 2406 if ((ret = priv_proc_cred_perm(cr, p, NULL, 2407 VREAD)) != 0) { 2408 #ifdef illumos 2409 mutex_exit(&p->p_lock); 2410 #else 2411 PROC_UNLOCK(p); 2412 #endif 2413 return (ret); 2414 } 2415 #endif /* notyet */ 2416 2417 #ifdef illumos 2418 mutex_exit(&p->p_lock); 2419 #else 2420 PROC_UNLOCK(p); 2421 #endif 2422 } 2423 #endif /* notyet */ 2424 2425 index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc); 2426 2427 mutex_enter(&fasttrap_tpoints.fth_table[index].ftb_mtx); 2428 tp = fasttrap_tpoints.fth_table[index].ftb_data; 2429 while (tp != NULL) { 2430 if (instr.ftiq_pid == tp->ftt_pid && 2431 instr.ftiq_pc == tp->ftt_pc && 2432 tp->ftt_proc->ftpc_acount != 0) 2433 break; 2434 2435 tp = tp->ftt_next; 2436 } 2437 2438 if (tp == NULL) { 2439 mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx); 2440 return (ENOENT); 2441 } 2442 2443 bcopy(&tp->ftt_instr, &instr.ftiq_instr, 2444 sizeof (instr.ftiq_instr)); 2445 mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx); 2446 2447 if (copyout(&instr, (void *)arg, sizeof (instr)) != 0) 2448 return (EFAULT); 2449 2450 return (0); 2451 } 2452 2453 return (EINVAL); 2454 } 2455 2456 static int 2457 fasttrap_load(void) 2458 { 2459 ulong_t nent; 2460 int i, ret; 2461 2462 /* Create the /dev/dtrace/fasttrap entry. */ 2463 fasttrap_cdev = make_dev(&fasttrap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, 2464 "dtrace/fasttrap"); 2465 2466 mtx_init(&fasttrap_cleanup_mtx, "fasttrap clean", "dtrace", MTX_DEF); 2467 mutex_init(&fasttrap_count_mtx, "fasttrap count mtx", MUTEX_DEFAULT, 2468 NULL); 2469 2470 #ifdef illumos 2471 fasttrap_max = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 2472 "fasttrap-max-probes", FASTTRAP_MAX_DEFAULT); 2473 #else 2474 fasttrap_max = FASTTRAP_MAX_DEFAULT; 2475 #endif 2476 fasttrap_total = 0; 2477 2478 /* 2479 * Conjure up the tracepoints hashtable... 2480 */ 2481 #ifdef illumos 2482 nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 2483 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE); 2484 #else 2485 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE; 2486 #endif 2487 2488 if (nent == 0 || nent > 0x1000000) 2489 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE; 2490 2491 if (ISP2(nent)) 2492 fasttrap_tpoints.fth_nent = nent; 2493 else 2494 fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent); 2495 ASSERT(fasttrap_tpoints.fth_nent > 0); 2496 fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1; 2497 fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent * 2498 sizeof (fasttrap_bucket_t), KM_SLEEP); 2499 #ifndef illumos 2500 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) 2501 mutex_init(&fasttrap_tpoints.fth_table[i].ftb_mtx, 2502 "tracepoints bucket mtx", MUTEX_DEFAULT, NULL); 2503 #endif 2504 2505 /* 2506 * ... and the providers hash table... 2507 */ 2508 nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE; 2509 if (ISP2(nent)) 2510 fasttrap_provs.fth_nent = nent; 2511 else 2512 fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent); 2513 ASSERT(fasttrap_provs.fth_nent > 0); 2514 fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1; 2515 fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent * 2516 sizeof (fasttrap_bucket_t), KM_SLEEP); 2517 #ifndef illumos 2518 for (i = 0; i < fasttrap_provs.fth_nent; i++) 2519 mutex_init(&fasttrap_provs.fth_table[i].ftb_mtx, 2520 "providers bucket mtx", MUTEX_DEFAULT, NULL); 2521 #endif 2522 2523 ret = kproc_create(fasttrap_pid_cleanup_cb, NULL, 2524 &fasttrap_cleanup_proc, 0, 0, "ftcleanup"); 2525 if (ret != 0) { 2526 destroy_dev(fasttrap_cdev); 2527 #ifndef illumos 2528 for (i = 0; i < fasttrap_provs.fth_nent; i++) 2529 mutex_destroy(&fasttrap_provs.fth_table[i].ftb_mtx); 2530 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) 2531 mutex_destroy(&fasttrap_tpoints.fth_table[i].ftb_mtx); 2532 #endif 2533 kmem_free(fasttrap_provs.fth_table, fasttrap_provs.fth_nent * 2534 sizeof (fasttrap_bucket_t)); 2535 mtx_destroy(&fasttrap_cleanup_mtx); 2536 mutex_destroy(&fasttrap_count_mtx); 2537 return (ret); 2538 } 2539 2540 2541 /* 2542 * ... and the procs hash table. 2543 */ 2544 nent = FASTTRAP_PROCS_DEFAULT_SIZE; 2545 if (ISP2(nent)) 2546 fasttrap_procs.fth_nent = nent; 2547 else 2548 fasttrap_procs.fth_nent = 1 << fasttrap_highbit(nent); 2549 ASSERT(fasttrap_procs.fth_nent > 0); 2550 fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1; 2551 fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent * 2552 sizeof (fasttrap_bucket_t), KM_SLEEP); 2553 #ifndef illumos 2554 for (i = 0; i < fasttrap_procs.fth_nent; i++) 2555 mutex_init(&fasttrap_procs.fth_table[i].ftb_mtx, 2556 "processes bucket mtx", MUTEX_DEFAULT, NULL); 2557 2558 CPU_FOREACH(i) { 2559 mutex_init(&fasttrap_cpuc_pid_lock[i], "fasttrap barrier", 2560 MUTEX_DEFAULT, NULL); 2561 } 2562 2563 /* 2564 * This event handler must run before kdtrace_thread_dtor() since it 2565 * accesses the thread's struct kdtrace_thread. 2566 */ 2567 fasttrap_thread_dtor_tag = EVENTHANDLER_REGISTER(thread_dtor, 2568 fasttrap_thread_dtor, NULL, EVENTHANDLER_PRI_FIRST); 2569 #endif 2570 2571 /* 2572 * Install our hooks into fork(2), exec(2), and exit(2). 2573 */ 2574 dtrace_fasttrap_fork = &fasttrap_fork; 2575 dtrace_fasttrap_exit = &fasttrap_exec_exit; 2576 dtrace_fasttrap_exec = &fasttrap_exec_exit; 2577 2578 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL, 2579 &fasttrap_meta_id); 2580 2581 return (0); 2582 } 2583 2584 static int 2585 fasttrap_unload(void) 2586 { 2587 int i, fail = 0; 2588 2589 /* 2590 * Unregister the meta-provider to make sure no new fasttrap- 2591 * managed providers come along while we're trying to close up 2592 * shop. If we fail to detach, we'll need to re-register as a 2593 * meta-provider. We can fail to unregister as a meta-provider 2594 * if providers we manage still exist. 2595 */ 2596 if (fasttrap_meta_id != DTRACE_METAPROVNONE && 2597 dtrace_meta_unregister(fasttrap_meta_id) != 0) 2598 return (-1); 2599 2600 /* 2601 * Iterate over all of our providers. If there's still a process 2602 * that corresponds to that pid, fail to detach. 2603 */ 2604 for (i = 0; i < fasttrap_provs.fth_nent; i++) { 2605 fasttrap_provider_t **fpp, *fp; 2606 fasttrap_bucket_t *bucket = &fasttrap_provs.fth_table[i]; 2607 2608 mutex_enter(&bucket->ftb_mtx); 2609 fpp = (fasttrap_provider_t **)&bucket->ftb_data; 2610 while ((fp = *fpp) != NULL) { 2611 /* 2612 * Acquire and release the lock as a simple way of 2613 * waiting for any other consumer to finish with 2614 * this provider. A thread must first acquire the 2615 * bucket lock so there's no chance of another thread 2616 * blocking on the provider's lock. 2617 */ 2618 mutex_enter(&fp->ftp_mtx); 2619 mutex_exit(&fp->ftp_mtx); 2620 2621 if (dtrace_unregister(fp->ftp_provid) != 0) { 2622 fail = 1; 2623 fpp = &fp->ftp_next; 2624 } else { 2625 *fpp = fp->ftp_next; 2626 fasttrap_provider_free(fp); 2627 } 2628 } 2629 2630 mutex_exit(&bucket->ftb_mtx); 2631 } 2632 2633 if (fail) { 2634 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL, 2635 &fasttrap_meta_id); 2636 2637 return (-1); 2638 } 2639 2640 /* 2641 * Stop new processes from entering these hooks now, before the 2642 * fasttrap_cleanup thread runs. That way all processes will hopefully 2643 * be out of these hooks before we free fasttrap_provs.fth_table 2644 */ 2645 ASSERT(dtrace_fasttrap_fork == &fasttrap_fork); 2646 dtrace_fasttrap_fork = NULL; 2647 2648 ASSERT(dtrace_fasttrap_exec == &fasttrap_exec_exit); 2649 dtrace_fasttrap_exec = NULL; 2650 2651 ASSERT(dtrace_fasttrap_exit == &fasttrap_exec_exit); 2652 dtrace_fasttrap_exit = NULL; 2653 2654 mtx_lock(&fasttrap_cleanup_mtx); 2655 fasttrap_cleanup_drain = 1; 2656 /* Wait for the cleanup thread to finish up and signal us. */ 2657 wakeup(&fasttrap_cleanup_cv); 2658 mtx_sleep(&fasttrap_cleanup_drain, &fasttrap_cleanup_mtx, 0, "ftcld", 2659 0); 2660 fasttrap_cleanup_proc = NULL; 2661 mtx_destroy(&fasttrap_cleanup_mtx); 2662 2663 #ifdef DEBUG 2664 mutex_enter(&fasttrap_count_mtx); 2665 ASSERT(fasttrap_pid_count == 0); 2666 mutex_exit(&fasttrap_count_mtx); 2667 #endif 2668 2669 #ifndef illumos 2670 EVENTHANDLER_DEREGISTER(thread_dtor, fasttrap_thread_dtor_tag); 2671 2672 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) 2673 mutex_destroy(&fasttrap_tpoints.fth_table[i].ftb_mtx); 2674 for (i = 0; i < fasttrap_provs.fth_nent; i++) 2675 mutex_destroy(&fasttrap_provs.fth_table[i].ftb_mtx); 2676 for (i = 0; i < fasttrap_procs.fth_nent; i++) 2677 mutex_destroy(&fasttrap_procs.fth_table[i].ftb_mtx); 2678 #endif 2679 kmem_free(fasttrap_tpoints.fth_table, 2680 fasttrap_tpoints.fth_nent * sizeof (fasttrap_bucket_t)); 2681 fasttrap_tpoints.fth_nent = 0; 2682 2683 kmem_free(fasttrap_provs.fth_table, 2684 fasttrap_provs.fth_nent * sizeof (fasttrap_bucket_t)); 2685 fasttrap_provs.fth_nent = 0; 2686 2687 kmem_free(fasttrap_procs.fth_table, 2688 fasttrap_procs.fth_nent * sizeof (fasttrap_bucket_t)); 2689 fasttrap_procs.fth_nent = 0; 2690 2691 #ifndef illumos 2692 destroy_dev(fasttrap_cdev); 2693 mutex_destroy(&fasttrap_count_mtx); 2694 CPU_FOREACH(i) { 2695 mutex_destroy(&fasttrap_cpuc_pid_lock[i]); 2696 } 2697 #endif 2698 2699 return (0); 2700 } 2701 2702 /* ARGSUSED */ 2703 static int 2704 fasttrap_modevent(module_t mod __unused, int type, void *data __unused) 2705 { 2706 int error = 0; 2707 2708 switch (type) { 2709 case MOD_LOAD: 2710 break; 2711 2712 case MOD_UNLOAD: 2713 break; 2714 2715 case MOD_SHUTDOWN: 2716 break; 2717 2718 default: 2719 error = EOPNOTSUPP; 2720 break; 2721 } 2722 return (error); 2723 } 2724 2725 SYSINIT(fasttrap_load, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, fasttrap_load, 2726 NULL); 2727 SYSUNINIT(fasttrap_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, 2728 fasttrap_unload, NULL); 2729 2730 DEV_MODULE(fasttrap, fasttrap_modevent, NULL); 2731 MODULE_VERSION(fasttrap, 1); 2732 MODULE_DEPEND(fasttrap, dtrace, 1, 1, 1); 2733 MODULE_DEPEND(fasttrap, opensolaris, 1, 1, 1); 2734