1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * Portions Copyright 2010 The FreeBSD Foundation 22 * 23 * $FreeBSD$ 24 */ 25 26 /* 27 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 28 * Use is subject to license terms. 29 */ 30 31 /* 32 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 33 */ 34 35 #include <sys/atomic.h> 36 #include <sys/errno.h> 37 #include <sys/stat.h> 38 #include <sys/modctl.h> 39 #include <sys/conf.h> 40 #include <sys/systm.h> 41 #if defined(sun) 42 #include <sys/ddi.h> 43 #endif 44 #include <sys/sunddi.h> 45 #include <sys/cpuvar.h> 46 #include <sys/kmem.h> 47 #if defined(sun) 48 #include <sys/strsubr.h> 49 #endif 50 #include <sys/fasttrap.h> 51 #include <sys/fasttrap_impl.h> 52 #include <sys/fasttrap_isa.h> 53 #include <sys/dtrace.h> 54 #include <sys/dtrace_impl.h> 55 #include <sys/sysmacros.h> 56 #include <sys/proc.h> 57 #include <sys/policy.h> 58 #if defined(sun) 59 #include <util/qsort.h> 60 #endif 61 #include <sys/mutex.h> 62 #include <sys/kernel.h> 63 #if !defined(sun) 64 #include <sys/dtrace_bsd.h> 65 #include <sys/eventhandler.h> 66 #include <sys/u8_textprep.h> 67 #include <sys/user.h> 68 #include <vm/vm.h> 69 #include <vm/pmap.h> 70 #include <vm/vm_map.h> 71 #include <vm/vm_param.h> 72 #include <cddl/dev/dtrace/dtrace_cddl.h> 73 #endif 74 75 /* 76 * User-Land Trap-Based Tracing 77 * ---------------------------- 78 * 79 * The fasttrap provider allows DTrace consumers to instrument any user-level 80 * instruction to gather data; this includes probes with semantic 81 * signifigance like entry and return as well as simple offsets into the 82 * function. While the specific techniques used are very ISA specific, the 83 * methodology is generalizable to any architecture. 84 * 85 * 86 * The General Methodology 87 * ----------------------- 88 * 89 * With the primary goal of tracing every user-land instruction and the 90 * limitation that we can't trust user space so don't want to rely on much 91 * information there, we begin by replacing the instructions we want to trace 92 * with trap instructions. Each instruction we overwrite is saved into a hash 93 * table keyed by process ID and pc address. When we enter the kernel due to 94 * this trap instruction, we need the effects of the replaced instruction to 95 * appear to have occurred before we proceed with the user thread's 96 * execution. 97 * 98 * Each user level thread is represented by a ulwp_t structure which is 99 * always easily accessible through a register. The most basic way to produce 100 * the effects of the instruction we replaced is to copy that instruction out 101 * to a bit of scratch space reserved in the user thread's ulwp_t structure 102 * (a sort of kernel-private thread local storage), set the PC to that 103 * scratch space and single step. When we reenter the kernel after single 104 * stepping the instruction we must then adjust the PC to point to what would 105 * normally be the next instruction. Of course, special care must be taken 106 * for branches and jumps, but these represent such a small fraction of any 107 * instruction set that writing the code to emulate these in the kernel is 108 * not too difficult. 109 * 110 * Return probes may require several tracepoints to trace every return site, 111 * and, conversely, each tracepoint may activate several probes (the entry 112 * and offset 0 probes, for example). To solve this muliplexing problem, 113 * tracepoints contain lists of probes to activate and probes contain lists 114 * of tracepoints to enable. If a probe is activated, it adds its ID to 115 * existing tracepoints or creates new ones as necessary. 116 * 117 * Most probes are activated _before_ the instruction is executed, but return 118 * probes are activated _after_ the effects of the last instruction of the 119 * function are visible. Return probes must be fired _after_ we have 120 * single-stepped the instruction whereas all other probes are fired 121 * beforehand. 122 * 123 * 124 * Lock Ordering 125 * ------------- 126 * 127 * The lock ordering below -- both internally and with respect to the DTrace 128 * framework -- is a little tricky and bears some explanation. Each provider 129 * has a lock (ftp_mtx) that protects its members including reference counts 130 * for enabled probes (ftp_rcount), consumers actively creating probes 131 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider 132 * from being freed. A provider is looked up by taking the bucket lock for the 133 * provider hash table, and is returned with its lock held. The provider lock 134 * may be taken in functions invoked by the DTrace framework, but may not be 135 * held while calling functions in the DTrace framework. 136 * 137 * To ensure consistency over multiple calls to the DTrace framework, the 138 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may 139 * not be taken when holding the provider lock as that would create a cyclic 140 * lock ordering. In situations where one would naturally take the provider 141 * lock and then the creation lock, we instead up a reference count to prevent 142 * the provider from disappearing, drop the provider lock, and acquire the 143 * creation lock. 144 * 145 * Briefly: 146 * bucket lock before provider lock 147 * DTrace before provider lock 148 * creation lock before DTrace 149 * never hold the provider lock and creation lock simultaneously 150 */ 151 152 static d_open_t fasttrap_open; 153 static d_ioctl_t fasttrap_ioctl; 154 155 static struct cdevsw fasttrap_cdevsw = { 156 .d_version = D_VERSION, 157 .d_open = fasttrap_open, 158 .d_ioctl = fasttrap_ioctl, 159 .d_name = "fasttrap", 160 }; 161 static struct cdev *fasttrap_cdev; 162 static dtrace_meta_provider_id_t fasttrap_meta_id; 163 164 static struct proc *fasttrap_cleanup_proc; 165 static struct mtx fasttrap_cleanup_mtx; 166 static uint_t fasttrap_cleanup_work, fasttrap_cleanup_drain, fasttrap_cleanup_cv; 167 168 /* 169 * Generation count on modifications to the global tracepoint lookup table. 170 */ 171 static volatile uint64_t fasttrap_mod_gen; 172 173 /* 174 * When the fasttrap provider is loaded, fasttrap_max is set to either 175 * FASTTRAP_MAX_DEFAULT or the value for fasttrap-max-probes in the 176 * fasttrap.conf file. Each time a probe is created, fasttrap_total is 177 * incremented by the number of tracepoints that may be associated with that 178 * probe; fasttrap_total is capped at fasttrap_max. 179 */ 180 #define FASTTRAP_MAX_DEFAULT 250000 181 static uint32_t fasttrap_max; 182 static uint32_t fasttrap_total; 183 184 /* 185 * Copyright (c) 2011, Joyent, Inc. All rights reserved. 186 */ 187 188 #define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000 189 #define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100 190 #define FASTTRAP_PROCS_DEFAULT_SIZE 0x100 191 192 #define FASTTRAP_PID_NAME "pid" 193 194 fasttrap_hash_t fasttrap_tpoints; 195 static fasttrap_hash_t fasttrap_provs; 196 static fasttrap_hash_t fasttrap_procs; 197 198 static uint64_t fasttrap_pid_count; /* pid ref count */ 199 static kmutex_t fasttrap_count_mtx; /* lock on ref count */ 200 201 #define FASTTRAP_ENABLE_FAIL 1 202 #define FASTTRAP_ENABLE_PARTIAL 2 203 204 static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t); 205 static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t); 206 207 static fasttrap_provider_t *fasttrap_provider_lookup(pid_t, const char *, 208 const dtrace_pattr_t *); 209 static void fasttrap_provider_retire(pid_t, const char *, int); 210 static void fasttrap_provider_free(fasttrap_provider_t *); 211 212 static fasttrap_proc_t *fasttrap_proc_lookup(pid_t); 213 static void fasttrap_proc_release(fasttrap_proc_t *); 214 215 #if !defined(sun) 216 static void fasttrap_thread_dtor(void *, struct thread *); 217 #endif 218 219 #define FASTTRAP_PROVS_INDEX(pid, name) \ 220 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask) 221 222 #define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask) 223 224 #if !defined(sun) 225 static kmutex_t fasttrap_cpuc_pid_lock[MAXCPU]; 226 static eventhandler_tag fasttrap_thread_dtor_tag; 227 #endif 228 229 static int 230 fasttrap_highbit(ulong_t i) 231 { 232 int h = 1; 233 234 if (i == 0) 235 return (0); 236 #ifdef _LP64 237 if (i & 0xffffffff00000000ul) { 238 h += 32; i >>= 32; 239 } 240 #endif 241 if (i & 0xffff0000) { 242 h += 16; i >>= 16; 243 } 244 if (i & 0xff00) { 245 h += 8; i >>= 8; 246 } 247 if (i & 0xf0) { 248 h += 4; i >>= 4; 249 } 250 if (i & 0xc) { 251 h += 2; i >>= 2; 252 } 253 if (i & 0x2) { 254 h += 1; 255 } 256 return (h); 257 } 258 259 static uint_t 260 fasttrap_hash_str(const char *p) 261 { 262 unsigned int g; 263 uint_t hval = 0; 264 265 while (*p) { 266 hval = (hval << 4) + *p++; 267 if ((g = (hval & 0xf0000000)) != 0) 268 hval ^= g >> 24; 269 hval &= ~g; 270 } 271 return (hval); 272 } 273 274 void 275 fasttrap_sigtrap(proc_t *p, kthread_t *t, uintptr_t pc) 276 { 277 #if defined(sun) 278 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 279 280 sqp->sq_info.si_signo = SIGTRAP; 281 sqp->sq_info.si_code = TRAP_DTRACE; 282 sqp->sq_info.si_addr = (caddr_t)pc; 283 284 mutex_enter(&p->p_lock); 285 sigaddqa(p, t, sqp); 286 mutex_exit(&p->p_lock); 287 288 if (t != NULL) 289 aston(t); 290 #else 291 ksiginfo_t *ksi = kmem_zalloc(sizeof (ksiginfo_t), KM_SLEEP); 292 293 ksiginfo_init(ksi); 294 ksi->ksi_signo = SIGTRAP; 295 ksi->ksi_code = TRAP_DTRACE; 296 ksi->ksi_addr = (caddr_t)pc; 297 PROC_LOCK(p); 298 (void) tdksignal(t, SIGTRAP, ksi); 299 PROC_UNLOCK(p); 300 #endif 301 } 302 303 #if !defined(sun) 304 /* 305 * Obtain a chunk of scratch space in the address space of the target process. 306 */ 307 fasttrap_scrspace_t * 308 fasttrap_scraddr(struct thread *td, fasttrap_proc_t *fprc) 309 { 310 fasttrap_scrblock_t *scrblk; 311 fasttrap_scrspace_t *scrspc; 312 struct proc *p; 313 vm_offset_t addr; 314 int error, i; 315 316 scrspc = NULL; 317 if (td->t_dtrace_sscr != NULL) { 318 /* If the thread already has scratch space, we're done. */ 319 scrspc = (fasttrap_scrspace_t *)td->t_dtrace_sscr; 320 return (scrspc); 321 } 322 323 p = td->td_proc; 324 325 mutex_enter(&fprc->ftpc_mtx); 326 if (LIST_EMPTY(&fprc->ftpc_fscr)) { 327 /* 328 * No scratch space is available, so we'll map a new scratch 329 * space block into the traced process' address space. 330 */ 331 addr = 0; 332 error = vm_map_find(&p->p_vmspace->vm_map, NULL, 0, &addr, 333 FASTTRAP_SCRBLOCK_SIZE, 0, VMFS_ANY_SPACE, VM_PROT_ALL, 334 VM_PROT_ALL, 0); 335 if (error != KERN_SUCCESS) 336 goto done; 337 338 scrblk = malloc(sizeof(*scrblk), M_SOLARIS, M_WAITOK); 339 scrblk->ftsb_addr = addr; 340 LIST_INSERT_HEAD(&fprc->ftpc_scrblks, scrblk, ftsb_next); 341 342 /* 343 * Carve the block up into chunks and put them on the free list. 344 */ 345 for (i = 0; 346 i < FASTTRAP_SCRBLOCK_SIZE / FASTTRAP_SCRSPACE_SIZE; i++) { 347 scrspc = malloc(sizeof(*scrspc), M_SOLARIS, M_WAITOK); 348 scrspc->ftss_addr = addr + 349 i * FASTTRAP_SCRSPACE_SIZE; 350 LIST_INSERT_HEAD(&fprc->ftpc_fscr, scrspc, 351 ftss_next); 352 } 353 } 354 355 /* 356 * Take the first scratch chunk off the free list, put it on the 357 * allocated list, and return its address. 358 */ 359 scrspc = LIST_FIRST(&fprc->ftpc_fscr); 360 LIST_REMOVE(scrspc, ftss_next); 361 LIST_INSERT_HEAD(&fprc->ftpc_ascr, scrspc, ftss_next); 362 363 /* 364 * This scratch space is reserved for use by td until the thread exits. 365 */ 366 td->t_dtrace_sscr = scrspc; 367 368 done: 369 mutex_exit(&fprc->ftpc_mtx); 370 371 return (scrspc); 372 } 373 374 /* 375 * Return any allocated per-thread scratch space chunks back to the process' 376 * free list. 377 */ 378 static void 379 fasttrap_thread_dtor(void *arg __unused, struct thread *td) 380 { 381 fasttrap_bucket_t *bucket; 382 fasttrap_proc_t *fprc; 383 fasttrap_scrspace_t *scrspc; 384 pid_t pid; 385 386 if (td->t_dtrace_sscr == NULL) 387 return; 388 389 pid = td->td_proc->p_pid; 390 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)]; 391 fprc = NULL; 392 393 /* Look up the fasttrap process handle for this process. */ 394 mutex_enter(&bucket->ftb_mtx); 395 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) { 396 if (fprc->ftpc_pid == pid) { 397 mutex_enter(&fprc->ftpc_mtx); 398 mutex_exit(&bucket->ftb_mtx); 399 break; 400 } 401 } 402 if (fprc == NULL) { 403 mutex_exit(&bucket->ftb_mtx); 404 return; 405 } 406 407 scrspc = (fasttrap_scrspace_t *)td->t_dtrace_sscr; 408 LIST_REMOVE(scrspc, ftss_next); 409 LIST_INSERT_HEAD(&fprc->ftpc_fscr, scrspc, ftss_next); 410 411 mutex_exit(&fprc->ftpc_mtx); 412 } 413 #endif 414 415 /* 416 * This function ensures that no threads are actively using the memory 417 * associated with probes that were formerly live. 418 */ 419 static void 420 fasttrap_mod_barrier(uint64_t gen) 421 { 422 int i; 423 424 if (gen < fasttrap_mod_gen) 425 return; 426 427 fasttrap_mod_gen++; 428 429 CPU_FOREACH(i) { 430 mutex_enter(&fasttrap_cpuc_pid_lock[i]); 431 mutex_exit(&fasttrap_cpuc_pid_lock[i]); 432 } 433 } 434 435 /* 436 * This function performs asynchronous cleanup of fasttrap providers. The 437 * Solaris implementation of this mechanism use a timeout that's activated in 438 * fasttrap_pid_cleanup(), but this doesn't work in FreeBSD: one may sleep while 439 * holding the DTrace mutexes, but it is unsafe to sleep in a callout handler. 440 * Thus we use a dedicated process to perform the cleanup when requested. 441 */ 442 /*ARGSUSED*/ 443 static void 444 fasttrap_pid_cleanup_cb(void *data) 445 { 446 fasttrap_provider_t **fpp, *fp; 447 fasttrap_bucket_t *bucket; 448 dtrace_provider_id_t provid; 449 int i, later = 0, rval; 450 451 mtx_lock(&fasttrap_cleanup_mtx); 452 while (!fasttrap_cleanup_drain || later > 0) { 453 fasttrap_cleanup_work = 0; 454 mtx_unlock(&fasttrap_cleanup_mtx); 455 456 later = 0; 457 458 /* 459 * Iterate over all the providers trying to remove the marked 460 * ones. If a provider is marked but not retired, we just 461 * have to take a crack at removing it -- it's no big deal if 462 * we can't. 463 */ 464 for (i = 0; i < fasttrap_provs.fth_nent; i++) { 465 bucket = &fasttrap_provs.fth_table[i]; 466 mutex_enter(&bucket->ftb_mtx); 467 fpp = (fasttrap_provider_t **)&bucket->ftb_data; 468 469 while ((fp = *fpp) != NULL) { 470 if (!fp->ftp_marked) { 471 fpp = &fp->ftp_next; 472 continue; 473 } 474 475 mutex_enter(&fp->ftp_mtx); 476 477 /* 478 * If this provider has consumers actively 479 * creating probes (ftp_ccount) or is a USDT 480 * provider (ftp_mcount), we can't unregister 481 * or even condense. 482 */ 483 if (fp->ftp_ccount != 0 || 484 fp->ftp_mcount != 0) { 485 mutex_exit(&fp->ftp_mtx); 486 fp->ftp_marked = 0; 487 continue; 488 } 489 490 if (!fp->ftp_retired || fp->ftp_rcount != 0) 491 fp->ftp_marked = 0; 492 493 mutex_exit(&fp->ftp_mtx); 494 495 /* 496 * If we successfully unregister this 497 * provider we can remove it from the hash 498 * chain and free the memory. If our attempt 499 * to unregister fails and this is a retired 500 * provider, increment our flag to try again 501 * pretty soon. If we've consumed more than 502 * half of our total permitted number of 503 * probes call dtrace_condense() to try to 504 * clean out the unenabled probes. 505 */ 506 provid = fp->ftp_provid; 507 if ((rval = dtrace_unregister(provid)) != 0) { 508 if (fasttrap_total > fasttrap_max / 2) 509 (void) dtrace_condense(provid); 510 511 if (rval == EAGAIN) 512 fp->ftp_marked = 1; 513 514 later += fp->ftp_marked; 515 fpp = &fp->ftp_next; 516 } else { 517 *fpp = fp->ftp_next; 518 fasttrap_provider_free(fp); 519 } 520 } 521 mutex_exit(&bucket->ftb_mtx); 522 } 523 mtx_lock(&fasttrap_cleanup_mtx); 524 525 /* 526 * If we were unable to retire a provider, try again after a 527 * second. This situation can occur in certain circumstances 528 * where providers cannot be unregistered even though they have 529 * no probes enabled because of an execution of dtrace -l or 530 * something similar. 531 */ 532 if (later > 0 || fasttrap_cleanup_work || 533 fasttrap_cleanup_drain) { 534 mtx_unlock(&fasttrap_cleanup_mtx); 535 pause("ftclean", hz); 536 mtx_lock(&fasttrap_cleanup_mtx); 537 } else 538 mtx_sleep(&fasttrap_cleanup_cv, &fasttrap_cleanup_mtx, 539 0, "ftcl", 0); 540 } 541 542 /* 543 * Wake up the thread in fasttrap_unload() now that we're done. 544 */ 545 wakeup(&fasttrap_cleanup_drain); 546 mtx_unlock(&fasttrap_cleanup_mtx); 547 548 kthread_exit(); 549 } 550 551 /* 552 * Activates the asynchronous cleanup mechanism. 553 */ 554 static void 555 fasttrap_pid_cleanup(void) 556 { 557 558 mtx_lock(&fasttrap_cleanup_mtx); 559 if (!fasttrap_cleanup_work) { 560 fasttrap_cleanup_work = 1; 561 wakeup(&fasttrap_cleanup_cv); 562 } 563 mtx_unlock(&fasttrap_cleanup_mtx); 564 } 565 566 /* 567 * This is called from cfork() via dtrace_fasttrap_fork(). The child 568 * process's address space is (roughly) a copy of the parent process's so 569 * we have to remove all the instrumentation we had previously enabled in the 570 * parent. 571 */ 572 static void 573 fasttrap_fork(proc_t *p, proc_t *cp) 574 { 575 #if !defined(sun) 576 fasttrap_scrblock_t *scrblk; 577 fasttrap_proc_t *fprc = NULL; 578 #endif 579 pid_t ppid = p->p_pid; 580 int i; 581 582 #if defined(sun) 583 ASSERT(curproc == p); 584 ASSERT(p->p_proc_flag & P_PR_LOCK); 585 #else 586 PROC_LOCK_ASSERT(p, MA_OWNED); 587 #endif 588 #if defined(sun) 589 ASSERT(p->p_dtrace_count > 0); 590 #else 591 if (p->p_dtrace_helpers) { 592 /* 593 * dtrace_helpers_duplicate() allocates memory. 594 */ 595 _PHOLD(cp); 596 PROC_UNLOCK(p); 597 PROC_UNLOCK(cp); 598 dtrace_helpers_duplicate(p, cp); 599 PROC_LOCK(cp); 600 PROC_LOCK(p); 601 _PRELE(cp); 602 } 603 /* 604 * This check is purposely here instead of in kern_fork.c because, 605 * for legal resons, we cannot include the dtrace_cddl.h header 606 * inside kern_fork.c and insert if-clause there. 607 */ 608 if (p->p_dtrace_count == 0) 609 return; 610 #endif 611 ASSERT(cp->p_dtrace_count == 0); 612 613 /* 614 * This would be simpler and faster if we maintained per-process 615 * hash tables of enabled tracepoints. It could, however, potentially 616 * slow down execution of a tracepoint since we'd need to go 617 * through two levels of indirection. In the future, we should 618 * consider either maintaining per-process ancillary lists of 619 * enabled tracepoints or hanging a pointer to a per-process hash 620 * table of enabled tracepoints off the proc structure. 621 */ 622 623 /* 624 * We don't have to worry about the child process disappearing 625 * because we're in fork(). 626 */ 627 #if defined(sun) 628 mtx_lock_spin(&cp->p_slock); 629 sprlock_proc(cp); 630 mtx_unlock_spin(&cp->p_slock); 631 #else 632 /* 633 * fasttrap_tracepoint_remove() expects the child process to be 634 * unlocked and the VM then expects curproc to be unlocked. 635 */ 636 _PHOLD(cp); 637 PROC_UNLOCK(cp); 638 PROC_UNLOCK(p); 639 #endif 640 641 /* 642 * Iterate over every tracepoint looking for ones that belong to the 643 * parent process, and remove each from the child process. 644 */ 645 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) { 646 fasttrap_tracepoint_t *tp; 647 fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i]; 648 649 mutex_enter(&bucket->ftb_mtx); 650 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { 651 if (tp->ftt_pid == ppid && 652 tp->ftt_proc->ftpc_acount != 0) { 653 int ret = fasttrap_tracepoint_remove(cp, tp); 654 ASSERT(ret == 0); 655 656 /* 657 * The count of active providers can only be 658 * decremented (i.e. to zero) during exec, 659 * exit, and removal of a meta provider so it 660 * should be impossible to drop the count 661 * mid-fork. 662 */ 663 ASSERT(tp->ftt_proc->ftpc_acount != 0); 664 #if !defined(sun) 665 fprc = tp->ftt_proc; 666 #endif 667 } 668 } 669 mutex_exit(&bucket->ftb_mtx); 670 671 #if !defined(sun) 672 /* 673 * Unmap any scratch space inherited from the parent's address 674 * space. 675 */ 676 if (fprc != NULL) { 677 mutex_enter(&fprc->ftpc_mtx); 678 LIST_FOREACH(scrblk, &fprc->ftpc_scrblks, ftsb_next) { 679 vm_map_remove(&cp->p_vmspace->vm_map, 680 scrblk->ftsb_addr, 681 scrblk->ftsb_addr + FASTTRAP_SCRBLOCK_SIZE); 682 } 683 mutex_exit(&fprc->ftpc_mtx); 684 } 685 #endif 686 } 687 688 #if defined(sun) 689 mutex_enter(&cp->p_lock); 690 sprunlock(cp); 691 #else 692 PROC_LOCK(p); 693 PROC_LOCK(cp); 694 _PRELE(cp); 695 #endif 696 } 697 698 /* 699 * This is called from proc_exit() or from exec_common() if p_dtrace_probes 700 * is set on the proc structure to indicate that there is a pid provider 701 * associated with this process. 702 */ 703 static void 704 fasttrap_exec_exit(proc_t *p) 705 { 706 #if !defined(sun) 707 struct thread *td; 708 #endif 709 710 #if defined(sun) 711 ASSERT(p == curproc); 712 #else 713 PROC_LOCK_ASSERT(p, MA_OWNED); 714 _PHOLD(p); 715 /* 716 * Since struct threads may be recycled, we cannot rely on t_dtrace_sscr 717 * fields to be zeroed by kdtrace_thread_ctor. Thus we must zero it 718 * ourselves when a process exits. 719 */ 720 FOREACH_THREAD_IN_PROC(p, td) 721 td->t_dtrace_sscr = NULL; 722 PROC_UNLOCK(p); 723 #endif 724 725 /* 726 * We clean up the pid provider for this process here; user-land 727 * static probes are handled by the meta-provider remove entry point. 728 */ 729 fasttrap_provider_retire(p->p_pid, FASTTRAP_PID_NAME, 0); 730 #if !defined(sun) 731 if (p->p_dtrace_helpers) 732 dtrace_helpers_destroy(p); 733 PROC_LOCK(p); 734 _PRELE(p); 735 #endif 736 } 737 738 739 /*ARGSUSED*/ 740 static void 741 fasttrap_pid_provide(void *arg, dtrace_probedesc_t *desc) 742 { 743 /* 744 * There are no "default" pid probes. 745 */ 746 } 747 748 static int 749 fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index) 750 { 751 fasttrap_tracepoint_t *tp, *new_tp = NULL; 752 fasttrap_bucket_t *bucket; 753 fasttrap_id_t *id; 754 pid_t pid; 755 uintptr_t pc; 756 757 ASSERT(index < probe->ftp_ntps); 758 759 pid = probe->ftp_pid; 760 pc = probe->ftp_tps[index].fit_tp->ftt_pc; 761 id = &probe->ftp_tps[index].fit_id; 762 763 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid); 764 765 #if defined(sun) 766 ASSERT(!(p->p_flag & SVFORK)); 767 #endif 768 769 /* 770 * Before we make any modifications, make sure we've imposed a barrier 771 * on the generation in which this probe was last modified. 772 */ 773 fasttrap_mod_barrier(probe->ftp_gen); 774 775 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)]; 776 777 /* 778 * If the tracepoint has already been enabled, just add our id to the 779 * list of interested probes. This may be our second time through 780 * this path in which case we'll have constructed the tracepoint we'd 781 * like to install. If we can't find a match, and have an allocated 782 * tracepoint ready to go, enable that one now. 783 * 784 * A tracepoint whose process is defunct is also considered defunct. 785 */ 786 again: 787 mutex_enter(&bucket->ftb_mtx); 788 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { 789 /* 790 * Note that it's safe to access the active count on the 791 * associated proc structure because we know that at least one 792 * provider (this one) will still be around throughout this 793 * operation. 794 */ 795 if (tp->ftt_pid != pid || tp->ftt_pc != pc || 796 tp->ftt_proc->ftpc_acount == 0) 797 continue; 798 799 /* 800 * Now that we've found a matching tracepoint, it would be 801 * a decent idea to confirm that the tracepoint is still 802 * enabled and the trap instruction hasn't been overwritten. 803 * Since this is a little hairy, we'll punt for now. 804 */ 805 806 /* 807 * This can't be the first interested probe. We don't have 808 * to worry about another thread being in the midst of 809 * deleting this tracepoint (which would be the only valid 810 * reason for a tracepoint to have no interested probes) 811 * since we're holding P_PR_LOCK for this process. 812 */ 813 ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL); 814 815 switch (id->fti_ptype) { 816 case DTFTP_ENTRY: 817 case DTFTP_OFFSETS: 818 case DTFTP_IS_ENABLED: 819 id->fti_next = tp->ftt_ids; 820 membar_producer(); 821 tp->ftt_ids = id; 822 membar_producer(); 823 break; 824 825 case DTFTP_RETURN: 826 case DTFTP_POST_OFFSETS: 827 id->fti_next = tp->ftt_retids; 828 membar_producer(); 829 tp->ftt_retids = id; 830 membar_producer(); 831 break; 832 833 default: 834 ASSERT(0); 835 } 836 837 mutex_exit(&bucket->ftb_mtx); 838 839 if (new_tp != NULL) { 840 new_tp->ftt_ids = NULL; 841 new_tp->ftt_retids = NULL; 842 } 843 844 return (0); 845 } 846 847 /* 848 * If we have a good tracepoint ready to go, install it now while 849 * we have the lock held and no one can screw with us. 850 */ 851 if (new_tp != NULL) { 852 int rc = 0; 853 854 new_tp->ftt_next = bucket->ftb_data; 855 membar_producer(); 856 bucket->ftb_data = new_tp; 857 membar_producer(); 858 mutex_exit(&bucket->ftb_mtx); 859 860 /* 861 * Activate the tracepoint in the ISA-specific manner. 862 * If this fails, we need to report the failure, but 863 * indicate that this tracepoint must still be disabled 864 * by calling fasttrap_tracepoint_disable(). 865 */ 866 if (fasttrap_tracepoint_install(p, new_tp) != 0) 867 rc = FASTTRAP_ENABLE_PARTIAL; 868 869 /* 870 * Increment the count of the number of tracepoints active in 871 * the victim process. 872 */ 873 #if defined(sun) 874 ASSERT(p->p_proc_flag & P_PR_LOCK); 875 #endif 876 p->p_dtrace_count++; 877 878 return (rc); 879 } 880 881 mutex_exit(&bucket->ftb_mtx); 882 883 /* 884 * Initialize the tracepoint that's been preallocated with the probe. 885 */ 886 new_tp = probe->ftp_tps[index].fit_tp; 887 888 ASSERT(new_tp->ftt_pid == pid); 889 ASSERT(new_tp->ftt_pc == pc); 890 ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc); 891 ASSERT(new_tp->ftt_ids == NULL); 892 ASSERT(new_tp->ftt_retids == NULL); 893 894 switch (id->fti_ptype) { 895 case DTFTP_ENTRY: 896 case DTFTP_OFFSETS: 897 case DTFTP_IS_ENABLED: 898 id->fti_next = NULL; 899 new_tp->ftt_ids = id; 900 break; 901 902 case DTFTP_RETURN: 903 case DTFTP_POST_OFFSETS: 904 id->fti_next = NULL; 905 new_tp->ftt_retids = id; 906 break; 907 908 default: 909 ASSERT(0); 910 } 911 912 /* 913 * If the ISA-dependent initialization goes to plan, go back to the 914 * beginning and try to install this freshly made tracepoint. 915 */ 916 if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0) 917 goto again; 918 919 new_tp->ftt_ids = NULL; 920 new_tp->ftt_retids = NULL; 921 922 return (FASTTRAP_ENABLE_FAIL); 923 } 924 925 static void 926 fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index) 927 { 928 fasttrap_bucket_t *bucket; 929 fasttrap_provider_t *provider = probe->ftp_prov; 930 fasttrap_tracepoint_t **pp, *tp; 931 fasttrap_id_t *id, **idp = NULL; 932 pid_t pid; 933 uintptr_t pc; 934 935 ASSERT(index < probe->ftp_ntps); 936 937 pid = probe->ftp_pid; 938 pc = probe->ftp_tps[index].fit_tp->ftt_pc; 939 id = &probe->ftp_tps[index].fit_id; 940 941 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid); 942 943 /* 944 * Find the tracepoint and make sure that our id is one of the 945 * ones registered with it. 946 */ 947 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)]; 948 mutex_enter(&bucket->ftb_mtx); 949 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { 950 if (tp->ftt_pid == pid && tp->ftt_pc == pc && 951 tp->ftt_proc == provider->ftp_proc) 952 break; 953 } 954 955 /* 956 * If we somehow lost this tracepoint, we're in a world of hurt. 957 */ 958 ASSERT(tp != NULL); 959 960 switch (id->fti_ptype) { 961 case DTFTP_ENTRY: 962 case DTFTP_OFFSETS: 963 case DTFTP_IS_ENABLED: 964 ASSERT(tp->ftt_ids != NULL); 965 idp = &tp->ftt_ids; 966 break; 967 968 case DTFTP_RETURN: 969 case DTFTP_POST_OFFSETS: 970 ASSERT(tp->ftt_retids != NULL); 971 idp = &tp->ftt_retids; 972 break; 973 974 default: 975 ASSERT(0); 976 } 977 978 while ((*idp)->fti_probe != probe) { 979 idp = &(*idp)->fti_next; 980 ASSERT(*idp != NULL); 981 } 982 983 id = *idp; 984 *idp = id->fti_next; 985 membar_producer(); 986 987 ASSERT(id->fti_probe == probe); 988 989 /* 990 * If there are other registered enablings of this tracepoint, we're 991 * all done, but if this was the last probe assocated with this 992 * this tracepoint, we need to remove and free it. 993 */ 994 if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) { 995 996 /* 997 * If the current probe's tracepoint is in use, swap it 998 * for an unused tracepoint. 999 */ 1000 if (tp == probe->ftp_tps[index].fit_tp) { 1001 fasttrap_probe_t *tmp_probe; 1002 fasttrap_tracepoint_t **tmp_tp; 1003 uint_t tmp_index; 1004 1005 if (tp->ftt_ids != NULL) { 1006 tmp_probe = tp->ftt_ids->fti_probe; 1007 /* LINTED - alignment */ 1008 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids); 1009 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp; 1010 } else { 1011 tmp_probe = tp->ftt_retids->fti_probe; 1012 /* LINTED - alignment */ 1013 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids); 1014 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp; 1015 } 1016 1017 ASSERT(*tmp_tp != NULL); 1018 ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp); 1019 ASSERT((*tmp_tp)->ftt_ids == NULL); 1020 ASSERT((*tmp_tp)->ftt_retids == NULL); 1021 1022 probe->ftp_tps[index].fit_tp = *tmp_tp; 1023 *tmp_tp = tp; 1024 } 1025 1026 mutex_exit(&bucket->ftb_mtx); 1027 1028 /* 1029 * Tag the modified probe with the generation in which it was 1030 * changed. 1031 */ 1032 probe->ftp_gen = fasttrap_mod_gen; 1033 return; 1034 } 1035 1036 mutex_exit(&bucket->ftb_mtx); 1037 1038 /* 1039 * We can't safely remove the tracepoint from the set of active 1040 * tracepoints until we've actually removed the fasttrap instruction 1041 * from the process's text. We can, however, operate on this 1042 * tracepoint secure in the knowledge that no other thread is going to 1043 * be looking at it since we hold P_PR_LOCK on the process if it's 1044 * live or we hold the provider lock on the process if it's dead and 1045 * gone. 1046 */ 1047 1048 /* 1049 * We only need to remove the actual instruction if we're looking 1050 * at an existing process 1051 */ 1052 if (p != NULL) { 1053 /* 1054 * If we fail to restore the instruction we need to kill 1055 * this process since it's in a completely unrecoverable 1056 * state. 1057 */ 1058 if (fasttrap_tracepoint_remove(p, tp) != 0) 1059 fasttrap_sigtrap(p, NULL, pc); 1060 1061 /* 1062 * Decrement the count of the number of tracepoints active 1063 * in the victim process. 1064 */ 1065 #if defined(sun) 1066 ASSERT(p->p_proc_flag & P_PR_LOCK); 1067 #endif 1068 p->p_dtrace_count--; 1069 } 1070 1071 /* 1072 * Remove the probe from the hash table of active tracepoints. 1073 */ 1074 mutex_enter(&bucket->ftb_mtx); 1075 pp = (fasttrap_tracepoint_t **)&bucket->ftb_data; 1076 ASSERT(*pp != NULL); 1077 while (*pp != tp) { 1078 pp = &(*pp)->ftt_next; 1079 ASSERT(*pp != NULL); 1080 } 1081 1082 *pp = tp->ftt_next; 1083 membar_producer(); 1084 1085 mutex_exit(&bucket->ftb_mtx); 1086 1087 /* 1088 * Tag the modified probe with the generation in which it was changed. 1089 */ 1090 probe->ftp_gen = fasttrap_mod_gen; 1091 } 1092 1093 static void 1094 fasttrap_enable_callbacks(void) 1095 { 1096 /* 1097 * We don't have to play the rw lock game here because we're 1098 * providing something rather than taking something away -- 1099 * we can be sure that no threads have tried to follow this 1100 * function pointer yet. 1101 */ 1102 mutex_enter(&fasttrap_count_mtx); 1103 if (fasttrap_pid_count == 0) { 1104 ASSERT(dtrace_pid_probe_ptr == NULL); 1105 ASSERT(dtrace_return_probe_ptr == NULL); 1106 dtrace_pid_probe_ptr = &fasttrap_pid_probe; 1107 dtrace_return_probe_ptr = &fasttrap_return_probe; 1108 } 1109 ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe); 1110 ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe); 1111 fasttrap_pid_count++; 1112 mutex_exit(&fasttrap_count_mtx); 1113 } 1114 1115 static void 1116 fasttrap_disable_callbacks(void) 1117 { 1118 #if defined(sun) 1119 ASSERT(MUTEX_HELD(&cpu_lock)); 1120 #endif 1121 1122 1123 mutex_enter(&fasttrap_count_mtx); 1124 ASSERT(fasttrap_pid_count > 0); 1125 fasttrap_pid_count--; 1126 if (fasttrap_pid_count == 0) { 1127 #if defined(sun) 1128 cpu_t *cur, *cpu = CPU; 1129 1130 for (cur = cpu->cpu_next_onln; cur != cpu; 1131 cur = cur->cpu_next_onln) { 1132 rw_enter(&cur->cpu_ft_lock, RW_WRITER); 1133 } 1134 #endif 1135 dtrace_pid_probe_ptr = NULL; 1136 dtrace_return_probe_ptr = NULL; 1137 #if defined(sun) 1138 for (cur = cpu->cpu_next_onln; cur != cpu; 1139 cur = cur->cpu_next_onln) { 1140 rw_exit(&cur->cpu_ft_lock); 1141 } 1142 #endif 1143 } 1144 mutex_exit(&fasttrap_count_mtx); 1145 } 1146 1147 /*ARGSUSED*/ 1148 static void 1149 fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg) 1150 { 1151 fasttrap_probe_t *probe = parg; 1152 proc_t *p = NULL; 1153 int i, rc; 1154 1155 ASSERT(probe != NULL); 1156 ASSERT(!probe->ftp_enabled); 1157 ASSERT(id == probe->ftp_id); 1158 #if defined(sun) 1159 ASSERT(MUTEX_HELD(&cpu_lock)); 1160 #endif 1161 1162 /* 1163 * Increment the count of enabled probes on this probe's provider; 1164 * the provider can't go away while the probe still exists. We 1165 * must increment this even if we aren't able to properly enable 1166 * this probe. 1167 */ 1168 mutex_enter(&probe->ftp_prov->ftp_mtx); 1169 probe->ftp_prov->ftp_rcount++; 1170 mutex_exit(&probe->ftp_prov->ftp_mtx); 1171 1172 /* 1173 * If this probe's provider is retired (meaning it was valid in a 1174 * previously exec'ed incarnation of this address space), bail out. The 1175 * provider can't go away while we're in this code path. 1176 */ 1177 if (probe->ftp_prov->ftp_retired) 1178 return; 1179 1180 /* 1181 * If we can't find the process, it may be that we're in the context of 1182 * a fork in which the traced process is being born and we're copying 1183 * USDT probes. Otherwise, the process is gone so bail. 1184 */ 1185 #if defined(sun) 1186 if ((p = sprlock(probe->ftp_pid)) == NULL) { 1187 if ((curproc->p_flag & SFORKING) == 0) 1188 return; 1189 1190 mutex_enter(&pidlock); 1191 p = prfind(probe->ftp_pid); 1192 1193 /* 1194 * Confirm that curproc is indeed forking the process in which 1195 * we're trying to enable probes. 1196 */ 1197 ASSERT(p != NULL); 1198 ASSERT(p->p_parent == curproc); 1199 ASSERT(p->p_stat == SIDL); 1200 1201 mutex_enter(&p->p_lock); 1202 mutex_exit(&pidlock); 1203 1204 sprlock_proc(p); 1205 } 1206 1207 ASSERT(!(p->p_flag & SVFORK)); 1208 mutex_exit(&p->p_lock); 1209 #else 1210 if ((p = pfind(probe->ftp_pid)) == NULL) 1211 return; 1212 #endif 1213 1214 /* 1215 * We have to enable the trap entry point before any user threads have 1216 * the chance to execute the trap instruction we're about to place 1217 * in their process's text. 1218 */ 1219 #ifdef __FreeBSD__ 1220 /* 1221 * pfind() returns a locked process. 1222 */ 1223 _PHOLD(p); 1224 PROC_UNLOCK(p); 1225 #endif 1226 fasttrap_enable_callbacks(); 1227 1228 /* 1229 * Enable all the tracepoints and add this probe's id to each 1230 * tracepoint's list of active probes. 1231 */ 1232 for (i = 0; i < probe->ftp_ntps; i++) { 1233 if ((rc = fasttrap_tracepoint_enable(p, probe, i)) != 0) { 1234 /* 1235 * If enabling the tracepoint failed completely, 1236 * we don't have to disable it; if the failure 1237 * was only partial we must disable it. 1238 */ 1239 if (rc == FASTTRAP_ENABLE_FAIL) 1240 i--; 1241 else 1242 ASSERT(rc == FASTTRAP_ENABLE_PARTIAL); 1243 1244 /* 1245 * Back up and pull out all the tracepoints we've 1246 * created so far for this probe. 1247 */ 1248 while (i >= 0) { 1249 fasttrap_tracepoint_disable(p, probe, i); 1250 i--; 1251 } 1252 1253 #if defined(sun) 1254 mutex_enter(&p->p_lock); 1255 sprunlock(p); 1256 #else 1257 PRELE(p); 1258 #endif 1259 1260 /* 1261 * Since we're not actually enabling this probe, 1262 * drop our reference on the trap table entry. 1263 */ 1264 fasttrap_disable_callbacks(); 1265 return; 1266 } 1267 } 1268 #if defined(sun) 1269 mutex_enter(&p->p_lock); 1270 sprunlock(p); 1271 #else 1272 PRELE(p); 1273 #endif 1274 1275 probe->ftp_enabled = 1; 1276 } 1277 1278 /*ARGSUSED*/ 1279 static void 1280 fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg) 1281 { 1282 fasttrap_probe_t *probe = parg; 1283 fasttrap_provider_t *provider = probe->ftp_prov; 1284 proc_t *p; 1285 int i, whack = 0; 1286 1287 ASSERT(id == probe->ftp_id); 1288 1289 mutex_enter(&provider->ftp_mtx); 1290 1291 /* 1292 * We won't be able to acquire a /proc-esque lock on the process 1293 * iff the process is dead and gone. In this case, we rely on the 1294 * provider lock as a point of mutual exclusion to prevent other 1295 * DTrace consumers from disabling this probe. 1296 */ 1297 if ((p = pfind(probe->ftp_pid)) != NULL) { 1298 #ifdef __FreeBSD__ 1299 _PHOLD(p); 1300 PROC_UNLOCK(p); 1301 #endif 1302 } 1303 1304 /* 1305 * Disable all the associated tracepoints (for fully enabled probes). 1306 */ 1307 if (probe->ftp_enabled) { 1308 for (i = 0; i < probe->ftp_ntps; i++) { 1309 fasttrap_tracepoint_disable(p, probe, i); 1310 } 1311 } 1312 1313 ASSERT(provider->ftp_rcount > 0); 1314 provider->ftp_rcount--; 1315 1316 if (p != NULL) { 1317 /* 1318 * Even though we may not be able to remove it entirely, we 1319 * mark this retired provider to get a chance to remove some 1320 * of the associated probes. 1321 */ 1322 if (provider->ftp_retired && !provider->ftp_marked) 1323 whack = provider->ftp_marked = 1; 1324 mutex_exit(&provider->ftp_mtx); 1325 } else { 1326 /* 1327 * If the process is dead, we're just waiting for the 1328 * last probe to be disabled to be able to free it. 1329 */ 1330 if (provider->ftp_rcount == 0 && !provider->ftp_marked) 1331 whack = provider->ftp_marked = 1; 1332 mutex_exit(&provider->ftp_mtx); 1333 } 1334 1335 if (whack) 1336 fasttrap_pid_cleanup(); 1337 1338 #ifdef __FreeBSD__ 1339 if (p != NULL) 1340 PRELE(p); 1341 #endif 1342 if (!probe->ftp_enabled) 1343 return; 1344 1345 probe->ftp_enabled = 0; 1346 1347 #if defined(sun) 1348 ASSERT(MUTEX_HELD(&cpu_lock)); 1349 #endif 1350 fasttrap_disable_callbacks(); 1351 } 1352 1353 /*ARGSUSED*/ 1354 static void 1355 fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg, 1356 dtrace_argdesc_t *desc) 1357 { 1358 fasttrap_probe_t *probe = parg; 1359 char *str; 1360 int i, ndx; 1361 1362 desc->dtargd_native[0] = '\0'; 1363 desc->dtargd_xlate[0] = '\0'; 1364 1365 if (probe->ftp_prov->ftp_retired != 0 || 1366 desc->dtargd_ndx >= probe->ftp_nargs) { 1367 desc->dtargd_ndx = DTRACE_ARGNONE; 1368 return; 1369 } 1370 1371 ndx = (probe->ftp_argmap != NULL) ? 1372 probe->ftp_argmap[desc->dtargd_ndx] : desc->dtargd_ndx; 1373 1374 str = probe->ftp_ntypes; 1375 for (i = 0; i < ndx; i++) { 1376 str += strlen(str) + 1; 1377 } 1378 1379 ASSERT(strlen(str + 1) < sizeof (desc->dtargd_native)); 1380 (void) strcpy(desc->dtargd_native, str); 1381 1382 if (probe->ftp_xtypes == NULL) 1383 return; 1384 1385 str = probe->ftp_xtypes; 1386 for (i = 0; i < desc->dtargd_ndx; i++) { 1387 str += strlen(str) + 1; 1388 } 1389 1390 ASSERT(strlen(str + 1) < sizeof (desc->dtargd_xlate)); 1391 (void) strcpy(desc->dtargd_xlate, str); 1392 } 1393 1394 /*ARGSUSED*/ 1395 static void 1396 fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg) 1397 { 1398 fasttrap_probe_t *probe = parg; 1399 int i; 1400 size_t size; 1401 1402 ASSERT(probe != NULL); 1403 ASSERT(!probe->ftp_enabled); 1404 ASSERT(fasttrap_total >= probe->ftp_ntps); 1405 1406 atomic_add_32(&fasttrap_total, -probe->ftp_ntps); 1407 size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]); 1408 1409 if (probe->ftp_gen + 1 >= fasttrap_mod_gen) 1410 fasttrap_mod_barrier(probe->ftp_gen); 1411 1412 for (i = 0; i < probe->ftp_ntps; i++) { 1413 kmem_free(probe->ftp_tps[i].fit_tp, 1414 sizeof (fasttrap_tracepoint_t)); 1415 } 1416 1417 kmem_free(probe, size); 1418 } 1419 1420 1421 static const dtrace_pattr_t pid_attr = { 1422 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, 1423 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 1424 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 1425 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, 1426 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 1427 }; 1428 1429 static dtrace_pops_t pid_pops = { 1430 fasttrap_pid_provide, 1431 NULL, 1432 fasttrap_pid_enable, 1433 fasttrap_pid_disable, 1434 NULL, 1435 NULL, 1436 fasttrap_pid_getargdesc, 1437 fasttrap_pid_getarg, 1438 NULL, 1439 fasttrap_pid_destroy 1440 }; 1441 1442 static dtrace_pops_t usdt_pops = { 1443 fasttrap_pid_provide, 1444 NULL, 1445 fasttrap_pid_enable, 1446 fasttrap_pid_disable, 1447 NULL, 1448 NULL, 1449 fasttrap_pid_getargdesc, 1450 fasttrap_usdt_getarg, 1451 NULL, 1452 fasttrap_pid_destroy 1453 }; 1454 1455 static fasttrap_proc_t * 1456 fasttrap_proc_lookup(pid_t pid) 1457 { 1458 fasttrap_bucket_t *bucket; 1459 fasttrap_proc_t *fprc, *new_fprc; 1460 1461 1462 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)]; 1463 mutex_enter(&bucket->ftb_mtx); 1464 1465 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) { 1466 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) { 1467 mutex_enter(&fprc->ftpc_mtx); 1468 mutex_exit(&bucket->ftb_mtx); 1469 fprc->ftpc_rcount++; 1470 atomic_inc_64(&fprc->ftpc_acount); 1471 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount); 1472 mutex_exit(&fprc->ftpc_mtx); 1473 1474 return (fprc); 1475 } 1476 } 1477 1478 /* 1479 * Drop the bucket lock so we don't try to perform a sleeping 1480 * allocation under it. 1481 */ 1482 mutex_exit(&bucket->ftb_mtx); 1483 1484 new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP); 1485 new_fprc->ftpc_pid = pid; 1486 new_fprc->ftpc_rcount = 1; 1487 new_fprc->ftpc_acount = 1; 1488 #if !defined(sun) 1489 mutex_init(&new_fprc->ftpc_mtx, "fasttrap proc mtx", MUTEX_DEFAULT, 1490 NULL); 1491 #endif 1492 1493 mutex_enter(&bucket->ftb_mtx); 1494 1495 /* 1496 * Take another lap through the list to make sure a proc hasn't 1497 * been created for this pid while we weren't under the bucket lock. 1498 */ 1499 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) { 1500 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) { 1501 mutex_enter(&fprc->ftpc_mtx); 1502 mutex_exit(&bucket->ftb_mtx); 1503 fprc->ftpc_rcount++; 1504 atomic_inc_64(&fprc->ftpc_acount); 1505 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount); 1506 mutex_exit(&fprc->ftpc_mtx); 1507 1508 kmem_free(new_fprc, sizeof (fasttrap_proc_t)); 1509 1510 return (fprc); 1511 } 1512 } 1513 1514 new_fprc->ftpc_next = bucket->ftb_data; 1515 bucket->ftb_data = new_fprc; 1516 1517 mutex_exit(&bucket->ftb_mtx); 1518 1519 return (new_fprc); 1520 } 1521 1522 static void 1523 fasttrap_proc_release(fasttrap_proc_t *proc) 1524 { 1525 fasttrap_bucket_t *bucket; 1526 fasttrap_proc_t *fprc, **fprcp; 1527 pid_t pid = proc->ftpc_pid; 1528 #if !defined(sun) 1529 fasttrap_scrblock_t *scrblk, *scrblktmp; 1530 fasttrap_scrspace_t *scrspc, *scrspctmp; 1531 struct proc *p; 1532 struct thread *td; 1533 #endif 1534 1535 mutex_enter(&proc->ftpc_mtx); 1536 1537 ASSERT(proc->ftpc_rcount != 0); 1538 ASSERT(proc->ftpc_acount <= proc->ftpc_rcount); 1539 1540 if (--proc->ftpc_rcount != 0) { 1541 mutex_exit(&proc->ftpc_mtx); 1542 return; 1543 } 1544 1545 #if !defined(sun) 1546 /* 1547 * Free all structures used to manage per-thread scratch space. 1548 */ 1549 LIST_FOREACH_SAFE(scrblk, &proc->ftpc_scrblks, ftsb_next, 1550 scrblktmp) { 1551 LIST_REMOVE(scrblk, ftsb_next); 1552 free(scrblk, M_SOLARIS); 1553 } 1554 LIST_FOREACH_SAFE(scrspc, &proc->ftpc_fscr, ftss_next, scrspctmp) { 1555 LIST_REMOVE(scrspc, ftss_next); 1556 free(scrspc, M_SOLARIS); 1557 } 1558 LIST_FOREACH_SAFE(scrspc, &proc->ftpc_ascr, ftss_next, scrspctmp) { 1559 LIST_REMOVE(scrspc, ftss_next); 1560 free(scrspc, M_SOLARIS); 1561 } 1562 1563 if ((p = pfind(pid)) != NULL) { 1564 FOREACH_THREAD_IN_PROC(p, td) 1565 td->t_dtrace_sscr = NULL; 1566 PROC_UNLOCK(p); 1567 } 1568 #endif 1569 1570 mutex_exit(&proc->ftpc_mtx); 1571 1572 /* 1573 * There should definitely be no live providers associated with this 1574 * process at this point. 1575 */ 1576 ASSERT(proc->ftpc_acount == 0); 1577 1578 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)]; 1579 mutex_enter(&bucket->ftb_mtx); 1580 1581 fprcp = (fasttrap_proc_t **)&bucket->ftb_data; 1582 while ((fprc = *fprcp) != NULL) { 1583 if (fprc == proc) 1584 break; 1585 1586 fprcp = &fprc->ftpc_next; 1587 } 1588 1589 /* 1590 * Something strange has happened if we can't find the proc. 1591 */ 1592 ASSERT(fprc != NULL); 1593 1594 *fprcp = fprc->ftpc_next; 1595 1596 mutex_exit(&bucket->ftb_mtx); 1597 1598 kmem_free(fprc, sizeof (fasttrap_proc_t)); 1599 } 1600 1601 /* 1602 * Lookup a fasttrap-managed provider based on its name and associated pid. 1603 * If the pattr argument is non-NULL, this function instantiates the provider 1604 * if it doesn't exist otherwise it returns NULL. The provider is returned 1605 * with its lock held. 1606 */ 1607 static fasttrap_provider_t * 1608 fasttrap_provider_lookup(pid_t pid, const char *name, 1609 const dtrace_pattr_t *pattr) 1610 { 1611 fasttrap_provider_t *fp, *new_fp = NULL; 1612 fasttrap_bucket_t *bucket; 1613 char provname[DTRACE_PROVNAMELEN]; 1614 proc_t *p; 1615 cred_t *cred; 1616 1617 ASSERT(strlen(name) < sizeof (fp->ftp_name)); 1618 ASSERT(pattr != NULL); 1619 1620 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)]; 1621 mutex_enter(&bucket->ftb_mtx); 1622 1623 /* 1624 * Take a lap through the list and return the match if we find it. 1625 */ 1626 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) { 1627 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 && 1628 !fp->ftp_retired) { 1629 mutex_enter(&fp->ftp_mtx); 1630 mutex_exit(&bucket->ftb_mtx); 1631 return (fp); 1632 } 1633 } 1634 1635 /* 1636 * Drop the bucket lock so we don't try to perform a sleeping 1637 * allocation under it. 1638 */ 1639 mutex_exit(&bucket->ftb_mtx); 1640 1641 /* 1642 * Make sure the process exists, isn't a child created as the result 1643 * of a vfork(2), and isn't a zombie (but may be in fork). 1644 */ 1645 if ((p = pfind(pid)) == NULL) 1646 return (NULL); 1647 1648 /* 1649 * Increment p_dtrace_probes so that the process knows to inform us 1650 * when it exits or execs. fasttrap_provider_free() decrements this 1651 * when we're done with this provider. 1652 */ 1653 p->p_dtrace_probes++; 1654 1655 /* 1656 * Grab the credentials for this process so we have 1657 * something to pass to dtrace_register(). 1658 */ 1659 PROC_LOCK_ASSERT(p, MA_OWNED); 1660 crhold(p->p_ucred); 1661 cred = p->p_ucred; 1662 PROC_UNLOCK(p); 1663 1664 new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP); 1665 new_fp->ftp_pid = pid; 1666 new_fp->ftp_proc = fasttrap_proc_lookup(pid); 1667 #if !defined(sun) 1668 mutex_init(&new_fp->ftp_mtx, "provider mtx", MUTEX_DEFAULT, NULL); 1669 mutex_init(&new_fp->ftp_cmtx, "lock on creating", MUTEX_DEFAULT, NULL); 1670 #endif 1671 1672 ASSERT(new_fp->ftp_proc != NULL); 1673 1674 mutex_enter(&bucket->ftb_mtx); 1675 1676 /* 1677 * Take another lap through the list to make sure a provider hasn't 1678 * been created for this pid while we weren't under the bucket lock. 1679 */ 1680 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) { 1681 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 && 1682 !fp->ftp_retired) { 1683 mutex_enter(&fp->ftp_mtx); 1684 mutex_exit(&bucket->ftb_mtx); 1685 fasttrap_provider_free(new_fp); 1686 crfree(cred); 1687 return (fp); 1688 } 1689 } 1690 1691 (void) strcpy(new_fp->ftp_name, name); 1692 1693 /* 1694 * Fail and return NULL if either the provider name is too long 1695 * or we fail to register this new provider with the DTrace 1696 * framework. Note that this is the only place we ever construct 1697 * the full provider name -- we keep it in pieces in the provider 1698 * structure. 1699 */ 1700 if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >= 1701 sizeof (provname) || 1702 dtrace_register(provname, pattr, 1703 DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred, 1704 pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp, 1705 &new_fp->ftp_provid) != 0) { 1706 mutex_exit(&bucket->ftb_mtx); 1707 fasttrap_provider_free(new_fp); 1708 crfree(cred); 1709 return (NULL); 1710 } 1711 1712 new_fp->ftp_next = bucket->ftb_data; 1713 bucket->ftb_data = new_fp; 1714 1715 mutex_enter(&new_fp->ftp_mtx); 1716 mutex_exit(&bucket->ftb_mtx); 1717 1718 crfree(cred); 1719 return (new_fp); 1720 } 1721 1722 static void 1723 fasttrap_provider_free(fasttrap_provider_t *provider) 1724 { 1725 pid_t pid = provider->ftp_pid; 1726 proc_t *p; 1727 1728 /* 1729 * There need to be no associated enabled probes, no consumers 1730 * creating probes, and no meta providers referencing this provider. 1731 */ 1732 ASSERT(provider->ftp_rcount == 0); 1733 ASSERT(provider->ftp_ccount == 0); 1734 ASSERT(provider->ftp_mcount == 0); 1735 1736 /* 1737 * If this provider hasn't been retired, we need to explicitly drop the 1738 * count of active providers on the associated process structure. 1739 */ 1740 if (!provider->ftp_retired) { 1741 atomic_dec_64(&provider->ftp_proc->ftpc_acount); 1742 ASSERT(provider->ftp_proc->ftpc_acount < 1743 provider->ftp_proc->ftpc_rcount); 1744 } 1745 1746 fasttrap_proc_release(provider->ftp_proc); 1747 1748 #if !defined(sun) 1749 mutex_destroy(&provider->ftp_mtx); 1750 mutex_destroy(&provider->ftp_cmtx); 1751 #endif 1752 kmem_free(provider, sizeof (fasttrap_provider_t)); 1753 1754 /* 1755 * Decrement p_dtrace_probes on the process whose provider we're 1756 * freeing. We don't have to worry about clobbering somone else's 1757 * modifications to it because we have locked the bucket that 1758 * corresponds to this process's hash chain in the provider hash 1759 * table. Don't sweat it if we can't find the process. 1760 */ 1761 if ((p = pfind(pid)) == NULL) { 1762 return; 1763 } 1764 1765 p->p_dtrace_probes--; 1766 #if !defined(sun) 1767 PROC_UNLOCK(p); 1768 #endif 1769 } 1770 1771 static void 1772 fasttrap_provider_retire(pid_t pid, const char *name, int mprov) 1773 { 1774 fasttrap_provider_t *fp; 1775 fasttrap_bucket_t *bucket; 1776 dtrace_provider_id_t provid; 1777 1778 ASSERT(strlen(name) < sizeof (fp->ftp_name)); 1779 1780 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)]; 1781 mutex_enter(&bucket->ftb_mtx); 1782 1783 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) { 1784 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 && 1785 !fp->ftp_retired) 1786 break; 1787 } 1788 1789 if (fp == NULL) { 1790 mutex_exit(&bucket->ftb_mtx); 1791 return; 1792 } 1793 1794 mutex_enter(&fp->ftp_mtx); 1795 ASSERT(!mprov || fp->ftp_mcount > 0); 1796 if (mprov && --fp->ftp_mcount != 0) { 1797 mutex_exit(&fp->ftp_mtx); 1798 mutex_exit(&bucket->ftb_mtx); 1799 return; 1800 } 1801 1802 /* 1803 * Mark the provider to be removed in our post-processing step, mark it 1804 * retired, and drop the active count on its proc. Marking it indicates 1805 * that we should try to remove it; setting the retired flag indicates 1806 * that we're done with this provider; dropping the active the proc 1807 * releases our hold, and when this reaches zero (as it will during 1808 * exit or exec) the proc and associated providers become defunct. 1809 * 1810 * We obviously need to take the bucket lock before the provider lock 1811 * to perform the lookup, but we need to drop the provider lock 1812 * before calling into the DTrace framework since we acquire the 1813 * provider lock in callbacks invoked from the DTrace framework. The 1814 * bucket lock therefore protects the integrity of the provider hash 1815 * table. 1816 */ 1817 atomic_dec_64(&fp->ftp_proc->ftpc_acount); 1818 ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount); 1819 1820 fp->ftp_retired = 1; 1821 fp->ftp_marked = 1; 1822 provid = fp->ftp_provid; 1823 mutex_exit(&fp->ftp_mtx); 1824 1825 /* 1826 * We don't have to worry about invalidating the same provider twice 1827 * since fasttrap_provider_lookup() will ignore provider that have 1828 * been marked as retired. 1829 */ 1830 dtrace_invalidate(provid); 1831 1832 mutex_exit(&bucket->ftb_mtx); 1833 1834 fasttrap_pid_cleanup(); 1835 } 1836 1837 static int 1838 fasttrap_uint32_cmp(const void *ap, const void *bp) 1839 { 1840 return (*(const uint32_t *)ap - *(const uint32_t *)bp); 1841 } 1842 1843 static int 1844 fasttrap_uint64_cmp(const void *ap, const void *bp) 1845 { 1846 return (*(const uint64_t *)ap - *(const uint64_t *)bp); 1847 } 1848 1849 static int 1850 fasttrap_add_probe(fasttrap_probe_spec_t *pdata) 1851 { 1852 fasttrap_provider_t *provider; 1853 fasttrap_probe_t *pp; 1854 fasttrap_tracepoint_t *tp; 1855 char *name; 1856 int i, aframes = 0, whack; 1857 1858 /* 1859 * There needs to be at least one desired trace point. 1860 */ 1861 if (pdata->ftps_noffs == 0) 1862 return (EINVAL); 1863 1864 switch (pdata->ftps_type) { 1865 case DTFTP_ENTRY: 1866 name = "entry"; 1867 aframes = FASTTRAP_ENTRY_AFRAMES; 1868 break; 1869 case DTFTP_RETURN: 1870 name = "return"; 1871 aframes = FASTTRAP_RETURN_AFRAMES; 1872 break; 1873 case DTFTP_OFFSETS: 1874 name = NULL; 1875 break; 1876 default: 1877 return (EINVAL); 1878 } 1879 1880 if ((provider = fasttrap_provider_lookup(pdata->ftps_pid, 1881 FASTTRAP_PID_NAME, &pid_attr)) == NULL) 1882 return (ESRCH); 1883 1884 /* 1885 * Increment this reference count to indicate that a consumer is 1886 * actively adding a new probe associated with this provider. This 1887 * prevents the provider from being deleted -- we'll need to check 1888 * for pending deletions when we drop this reference count. 1889 */ 1890 provider->ftp_ccount++; 1891 mutex_exit(&provider->ftp_mtx); 1892 1893 /* 1894 * Grab the creation lock to ensure consistency between calls to 1895 * dtrace_probe_lookup() and dtrace_probe_create() in the face of 1896 * other threads creating probes. We must drop the provider lock 1897 * before taking this lock to avoid a three-way deadlock with the 1898 * DTrace framework. 1899 */ 1900 mutex_enter(&provider->ftp_cmtx); 1901 1902 if (name == NULL) { 1903 for (i = 0; i < pdata->ftps_noffs; i++) { 1904 char name_str[17]; 1905 1906 (void) sprintf(name_str, "%llx", 1907 (unsigned long long)pdata->ftps_offs[i]); 1908 1909 if (dtrace_probe_lookup(provider->ftp_provid, 1910 pdata->ftps_mod, pdata->ftps_func, name_str) != 0) 1911 continue; 1912 1913 atomic_inc_32(&fasttrap_total); 1914 1915 if (fasttrap_total > fasttrap_max) { 1916 atomic_dec_32(&fasttrap_total); 1917 goto no_mem; 1918 } 1919 1920 pp = kmem_zalloc(sizeof (fasttrap_probe_t), KM_SLEEP); 1921 1922 pp->ftp_prov = provider; 1923 pp->ftp_faddr = pdata->ftps_pc; 1924 pp->ftp_fsize = pdata->ftps_size; 1925 pp->ftp_pid = pdata->ftps_pid; 1926 pp->ftp_ntps = 1; 1927 1928 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), 1929 KM_SLEEP); 1930 1931 tp->ftt_proc = provider->ftp_proc; 1932 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc; 1933 tp->ftt_pid = pdata->ftps_pid; 1934 1935 pp->ftp_tps[0].fit_tp = tp; 1936 pp->ftp_tps[0].fit_id.fti_probe = pp; 1937 pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_type; 1938 1939 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, 1940 pdata->ftps_mod, pdata->ftps_func, name_str, 1941 FASTTRAP_OFFSET_AFRAMES, pp); 1942 } 1943 1944 } else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod, 1945 pdata->ftps_func, name) == 0) { 1946 atomic_add_32(&fasttrap_total, pdata->ftps_noffs); 1947 1948 if (fasttrap_total > fasttrap_max) { 1949 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs); 1950 goto no_mem; 1951 } 1952 1953 /* 1954 * Make sure all tracepoint program counter values are unique. 1955 * We later assume that each probe has exactly one tracepoint 1956 * for a given pc. 1957 */ 1958 qsort(pdata->ftps_offs, pdata->ftps_noffs, 1959 sizeof (uint64_t), fasttrap_uint64_cmp); 1960 for (i = 1; i < pdata->ftps_noffs; i++) { 1961 if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1]) 1962 continue; 1963 1964 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs); 1965 goto no_mem; 1966 } 1967 1968 ASSERT(pdata->ftps_noffs > 0); 1969 pp = kmem_zalloc(offsetof(fasttrap_probe_t, 1970 ftp_tps[pdata->ftps_noffs]), KM_SLEEP); 1971 1972 pp->ftp_prov = provider; 1973 pp->ftp_faddr = pdata->ftps_pc; 1974 pp->ftp_fsize = pdata->ftps_size; 1975 pp->ftp_pid = pdata->ftps_pid; 1976 pp->ftp_ntps = pdata->ftps_noffs; 1977 1978 for (i = 0; i < pdata->ftps_noffs; i++) { 1979 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), 1980 KM_SLEEP); 1981 1982 tp->ftt_proc = provider->ftp_proc; 1983 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc; 1984 tp->ftt_pid = pdata->ftps_pid; 1985 1986 pp->ftp_tps[i].fit_tp = tp; 1987 pp->ftp_tps[i].fit_id.fti_probe = pp; 1988 pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_type; 1989 } 1990 1991 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, 1992 pdata->ftps_mod, pdata->ftps_func, name, aframes, pp); 1993 } 1994 1995 mutex_exit(&provider->ftp_cmtx); 1996 1997 /* 1998 * We know that the provider is still valid since we incremented the 1999 * creation reference count. If someone tried to clean up this provider 2000 * while we were using it (e.g. because the process called exec(2) or 2001 * exit(2)), take note of that and try to clean it up now. 2002 */ 2003 mutex_enter(&provider->ftp_mtx); 2004 provider->ftp_ccount--; 2005 whack = provider->ftp_retired; 2006 mutex_exit(&provider->ftp_mtx); 2007 2008 if (whack) 2009 fasttrap_pid_cleanup(); 2010 2011 return (0); 2012 2013 no_mem: 2014 /* 2015 * If we've exhausted the allowable resources, we'll try to remove 2016 * this provider to free some up. This is to cover the case where 2017 * the user has accidentally created many more probes than was 2018 * intended (e.g. pid123:::). 2019 */ 2020 mutex_exit(&provider->ftp_cmtx); 2021 mutex_enter(&provider->ftp_mtx); 2022 provider->ftp_ccount--; 2023 provider->ftp_marked = 1; 2024 mutex_exit(&provider->ftp_mtx); 2025 2026 fasttrap_pid_cleanup(); 2027 2028 return (ENOMEM); 2029 } 2030 2031 /*ARGSUSED*/ 2032 static void * 2033 fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid) 2034 { 2035 fasttrap_provider_t *provider; 2036 2037 /* 2038 * A 32-bit unsigned integer (like a pid for example) can be 2039 * expressed in 10 or fewer decimal digits. Make sure that we'll 2040 * have enough space for the provider name. 2041 */ 2042 if (strlen(dhpv->dthpv_provname) + 10 >= 2043 sizeof (provider->ftp_name)) { 2044 printf("failed to instantiate provider %s: " 2045 "name too long to accomodate pid", dhpv->dthpv_provname); 2046 return (NULL); 2047 } 2048 2049 /* 2050 * Don't let folks spoof the true pid provider. 2051 */ 2052 if (strcmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME) == 0) { 2053 printf("failed to instantiate provider %s: " 2054 "%s is an invalid name", dhpv->dthpv_provname, 2055 FASTTRAP_PID_NAME); 2056 return (NULL); 2057 } 2058 2059 /* 2060 * The highest stability class that fasttrap supports is ISA; cap 2061 * the stability of the new provider accordingly. 2062 */ 2063 if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA) 2064 dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA; 2065 if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA) 2066 dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA; 2067 if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA) 2068 dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA; 2069 if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA) 2070 dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA; 2071 if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA) 2072 dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA; 2073 2074 if ((provider = fasttrap_provider_lookup(pid, dhpv->dthpv_provname, 2075 &dhpv->dthpv_pattr)) == NULL) { 2076 printf("failed to instantiate provider %s for " 2077 "process %u", dhpv->dthpv_provname, (uint_t)pid); 2078 return (NULL); 2079 } 2080 2081 /* 2082 * Up the meta provider count so this provider isn't removed until 2083 * the meta provider has been told to remove it. 2084 */ 2085 provider->ftp_mcount++; 2086 2087 mutex_exit(&provider->ftp_mtx); 2088 2089 return (provider); 2090 } 2091 2092 /*ARGSUSED*/ 2093 static void 2094 fasttrap_meta_create_probe(void *arg, void *parg, 2095 dtrace_helper_probedesc_t *dhpb) 2096 { 2097 fasttrap_provider_t *provider = parg; 2098 fasttrap_probe_t *pp; 2099 fasttrap_tracepoint_t *tp; 2100 int i, j; 2101 uint32_t ntps; 2102 2103 /* 2104 * Since the meta provider count is non-zero we don't have to worry 2105 * about this provider disappearing. 2106 */ 2107 ASSERT(provider->ftp_mcount > 0); 2108 2109 /* 2110 * The offsets must be unique. 2111 */ 2112 qsort(dhpb->dthpb_offs, dhpb->dthpb_noffs, sizeof (uint32_t), 2113 fasttrap_uint32_cmp); 2114 for (i = 1; i < dhpb->dthpb_noffs; i++) { 2115 if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <= 2116 dhpb->dthpb_base + dhpb->dthpb_offs[i - 1]) 2117 return; 2118 } 2119 2120 qsort(dhpb->dthpb_enoffs, dhpb->dthpb_nenoffs, sizeof (uint32_t), 2121 fasttrap_uint32_cmp); 2122 for (i = 1; i < dhpb->dthpb_nenoffs; i++) { 2123 if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <= 2124 dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1]) 2125 return; 2126 } 2127 2128 /* 2129 * Grab the creation lock to ensure consistency between calls to 2130 * dtrace_probe_lookup() and dtrace_probe_create() in the face of 2131 * other threads creating probes. 2132 */ 2133 mutex_enter(&provider->ftp_cmtx); 2134 2135 if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod, 2136 dhpb->dthpb_func, dhpb->dthpb_name) != 0) { 2137 mutex_exit(&provider->ftp_cmtx); 2138 return; 2139 } 2140 2141 ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs; 2142 ASSERT(ntps > 0); 2143 2144 atomic_add_32(&fasttrap_total, ntps); 2145 2146 if (fasttrap_total > fasttrap_max) { 2147 atomic_add_32(&fasttrap_total, -ntps); 2148 mutex_exit(&provider->ftp_cmtx); 2149 return; 2150 } 2151 2152 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP); 2153 2154 pp->ftp_prov = provider; 2155 pp->ftp_pid = provider->ftp_pid; 2156 pp->ftp_ntps = ntps; 2157 pp->ftp_nargs = dhpb->dthpb_xargc; 2158 pp->ftp_xtypes = dhpb->dthpb_xtypes; 2159 pp->ftp_ntypes = dhpb->dthpb_ntypes; 2160 2161 /* 2162 * First create a tracepoint for each actual point of interest. 2163 */ 2164 for (i = 0; i < dhpb->dthpb_noffs; i++) { 2165 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP); 2166 2167 tp->ftt_proc = provider->ftp_proc; 2168 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_offs[i]; 2169 tp->ftt_pid = provider->ftp_pid; 2170 2171 pp->ftp_tps[i].fit_tp = tp; 2172 pp->ftp_tps[i].fit_id.fti_probe = pp; 2173 #ifdef __sparc 2174 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_POST_OFFSETS; 2175 #else 2176 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS; 2177 #endif 2178 } 2179 2180 /* 2181 * Then create a tracepoint for each is-enabled point. 2182 */ 2183 for (j = 0; i < ntps; i++, j++) { 2184 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP); 2185 2186 tp->ftt_proc = provider->ftp_proc; 2187 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_enoffs[j]; 2188 tp->ftt_pid = provider->ftp_pid; 2189 2190 pp->ftp_tps[i].fit_tp = tp; 2191 pp->ftp_tps[i].fit_id.fti_probe = pp; 2192 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED; 2193 } 2194 2195 /* 2196 * If the arguments are shuffled around we set the argument remapping 2197 * table. Later, when the probe fires, we only remap the arguments 2198 * if the table is non-NULL. 2199 */ 2200 for (i = 0; i < dhpb->dthpb_xargc; i++) { 2201 if (dhpb->dthpb_args[i] != i) { 2202 pp->ftp_argmap = dhpb->dthpb_args; 2203 break; 2204 } 2205 } 2206 2207 /* 2208 * The probe is fully constructed -- register it with DTrace. 2209 */ 2210 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod, 2211 dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp); 2212 2213 mutex_exit(&provider->ftp_cmtx); 2214 } 2215 2216 /*ARGSUSED*/ 2217 static void 2218 fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid) 2219 { 2220 /* 2221 * Clean up the USDT provider. There may be active consumers of the 2222 * provider busy adding probes, no damage will actually befall the 2223 * provider until that count has dropped to zero. This just puts 2224 * the provider on death row. 2225 */ 2226 fasttrap_provider_retire(pid, dhpv->dthpv_provname, 1); 2227 } 2228 2229 static dtrace_mops_t fasttrap_mops = { 2230 fasttrap_meta_create_probe, 2231 fasttrap_meta_provide, 2232 fasttrap_meta_remove 2233 }; 2234 2235 /*ARGSUSED*/ 2236 static int 2237 fasttrap_open(struct cdev *dev __unused, int oflags __unused, 2238 int devtype __unused, struct thread *td __unused) 2239 { 2240 return (0); 2241 } 2242 2243 /*ARGSUSED*/ 2244 static int 2245 fasttrap_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int fflag, 2246 struct thread *td) 2247 { 2248 #ifdef notyet 2249 struct kinfo_proc kp; 2250 const cred_t *cr = td->td_ucred; 2251 #endif 2252 if (!dtrace_attached()) 2253 return (EAGAIN); 2254 2255 if (cmd == FASTTRAPIOC_MAKEPROBE) { 2256 fasttrap_probe_spec_t *uprobe = *(fasttrap_probe_spec_t **)arg; 2257 fasttrap_probe_spec_t *probe; 2258 uint64_t noffs; 2259 size_t size; 2260 int ret, err; 2261 2262 if (copyin(&uprobe->ftps_noffs, &noffs, 2263 sizeof (uprobe->ftps_noffs))) 2264 return (EFAULT); 2265 2266 /* 2267 * Probes must have at least one tracepoint. 2268 */ 2269 if (noffs == 0) 2270 return (EINVAL); 2271 2272 size = sizeof (fasttrap_probe_spec_t) + 2273 sizeof (probe->ftps_offs[0]) * (noffs - 1); 2274 2275 if (size > 1024 * 1024) 2276 return (ENOMEM); 2277 2278 probe = kmem_alloc(size, KM_SLEEP); 2279 2280 if (copyin(uprobe, probe, size) != 0 || 2281 probe->ftps_noffs != noffs) { 2282 kmem_free(probe, size); 2283 return (EFAULT); 2284 } 2285 2286 /* 2287 * Verify that the function and module strings contain no 2288 * funny characters. 2289 */ 2290 if (u8_validate(probe->ftps_func, strlen(probe->ftps_func), 2291 NULL, U8_VALIDATE_ENTIRE, &err) < 0) { 2292 ret = EINVAL; 2293 goto err; 2294 } 2295 2296 if (u8_validate(probe->ftps_mod, strlen(probe->ftps_mod), 2297 NULL, U8_VALIDATE_ENTIRE, &err) < 0) { 2298 ret = EINVAL; 2299 goto err; 2300 } 2301 2302 #ifdef notyet 2303 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) { 2304 proc_t *p; 2305 pid_t pid = probe->ftps_pid; 2306 2307 #if defined(sun) 2308 mutex_enter(&pidlock); 2309 #endif 2310 /* 2311 * Report an error if the process doesn't exist 2312 * or is actively being birthed. 2313 */ 2314 sx_slock(&proctree_lock); 2315 p = pfind(pid); 2316 if (p) 2317 fill_kinfo_proc(p, &kp); 2318 sx_sunlock(&proctree_lock); 2319 if (p == NULL || kp.ki_stat == SIDL) { 2320 #if defined(sun) 2321 mutex_exit(&pidlock); 2322 #endif 2323 return (ESRCH); 2324 } 2325 #if defined(sun) 2326 mutex_enter(&p->p_lock); 2327 mutex_exit(&pidlock); 2328 #else 2329 PROC_LOCK_ASSERT(p, MA_OWNED); 2330 #endif 2331 2332 #ifdef notyet 2333 if ((ret = priv_proc_cred_perm(cr, p, NULL, 2334 VREAD | VWRITE)) != 0) { 2335 #if defined(sun) 2336 mutex_exit(&p->p_lock); 2337 #else 2338 PROC_UNLOCK(p); 2339 #endif 2340 return (ret); 2341 } 2342 #endif /* notyet */ 2343 #if defined(sun) 2344 mutex_exit(&p->p_lock); 2345 #else 2346 PROC_UNLOCK(p); 2347 #endif 2348 } 2349 #endif /* notyet */ 2350 2351 ret = fasttrap_add_probe(probe); 2352 err: 2353 kmem_free(probe, size); 2354 2355 return (ret); 2356 2357 } else if (cmd == FASTTRAPIOC_GETINSTR) { 2358 fasttrap_instr_query_t instr; 2359 fasttrap_tracepoint_t *tp; 2360 uint_t index; 2361 #if defined(sun) 2362 int ret; 2363 #endif 2364 2365 #if defined(sun) 2366 if (copyin((void *)arg, &instr, sizeof (instr)) != 0) 2367 return (EFAULT); 2368 #endif 2369 2370 #ifdef notyet 2371 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) { 2372 proc_t *p; 2373 pid_t pid = instr.ftiq_pid; 2374 2375 #if defined(sun) 2376 mutex_enter(&pidlock); 2377 #endif 2378 /* 2379 * Report an error if the process doesn't exist 2380 * or is actively being birthed. 2381 */ 2382 sx_slock(&proctree_lock); 2383 p = pfind(pid); 2384 if (p) 2385 fill_kinfo_proc(p, &kp); 2386 sx_sunlock(&proctree_lock); 2387 if (p == NULL || kp.ki_stat == SIDL) { 2388 #if defined(sun) 2389 mutex_exit(&pidlock); 2390 #endif 2391 return (ESRCH); 2392 } 2393 #if defined(sun) 2394 mutex_enter(&p->p_lock); 2395 mutex_exit(&pidlock); 2396 #else 2397 PROC_LOCK_ASSERT(p, MA_OWNED); 2398 #endif 2399 2400 #ifdef notyet 2401 if ((ret = priv_proc_cred_perm(cr, p, NULL, 2402 VREAD)) != 0) { 2403 #if defined(sun) 2404 mutex_exit(&p->p_lock); 2405 #else 2406 PROC_UNLOCK(p); 2407 #endif 2408 return (ret); 2409 } 2410 #endif /* notyet */ 2411 2412 #if defined(sun) 2413 mutex_exit(&p->p_lock); 2414 #else 2415 PROC_UNLOCK(p); 2416 #endif 2417 } 2418 #endif /* notyet */ 2419 2420 index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc); 2421 2422 mutex_enter(&fasttrap_tpoints.fth_table[index].ftb_mtx); 2423 tp = fasttrap_tpoints.fth_table[index].ftb_data; 2424 while (tp != NULL) { 2425 if (instr.ftiq_pid == tp->ftt_pid && 2426 instr.ftiq_pc == tp->ftt_pc && 2427 tp->ftt_proc->ftpc_acount != 0) 2428 break; 2429 2430 tp = tp->ftt_next; 2431 } 2432 2433 if (tp == NULL) { 2434 mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx); 2435 return (ENOENT); 2436 } 2437 2438 bcopy(&tp->ftt_instr, &instr.ftiq_instr, 2439 sizeof (instr.ftiq_instr)); 2440 mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx); 2441 2442 if (copyout(&instr, (void *)arg, sizeof (instr)) != 0) 2443 return (EFAULT); 2444 2445 return (0); 2446 } 2447 2448 return (EINVAL); 2449 } 2450 2451 static int 2452 fasttrap_load(void) 2453 { 2454 ulong_t nent; 2455 int i, ret; 2456 2457 /* Create the /dev/dtrace/fasttrap entry. */ 2458 fasttrap_cdev = make_dev(&fasttrap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, 2459 "dtrace/fasttrap"); 2460 2461 mtx_init(&fasttrap_cleanup_mtx, "fasttrap clean", "dtrace", MTX_DEF); 2462 mutex_init(&fasttrap_count_mtx, "fasttrap count mtx", MUTEX_DEFAULT, 2463 NULL); 2464 2465 #if defined(sun) 2466 fasttrap_max = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 2467 "fasttrap-max-probes", FASTTRAP_MAX_DEFAULT); 2468 #else 2469 fasttrap_max = FASTTRAP_MAX_DEFAULT; 2470 #endif 2471 fasttrap_total = 0; 2472 2473 /* 2474 * Conjure up the tracepoints hashtable... 2475 */ 2476 #if defined(sun) 2477 nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 2478 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE); 2479 #else 2480 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE; 2481 #endif 2482 2483 if (nent == 0 || nent > 0x1000000) 2484 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE; 2485 2486 if ((nent & (nent - 1)) == 0) 2487 fasttrap_tpoints.fth_nent = nent; 2488 else 2489 fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent); 2490 ASSERT(fasttrap_tpoints.fth_nent > 0); 2491 fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1; 2492 fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent * 2493 sizeof (fasttrap_bucket_t), KM_SLEEP); 2494 #if !defined(sun) 2495 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) 2496 mutex_init(&fasttrap_tpoints.fth_table[i].ftb_mtx, 2497 "tracepoints bucket mtx", MUTEX_DEFAULT, NULL); 2498 #endif 2499 2500 /* 2501 * ... and the providers hash table... 2502 */ 2503 nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE; 2504 if ((nent & (nent - 1)) == 0) 2505 fasttrap_provs.fth_nent = nent; 2506 else 2507 fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent); 2508 ASSERT(fasttrap_provs.fth_nent > 0); 2509 fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1; 2510 fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent * 2511 sizeof (fasttrap_bucket_t), KM_SLEEP); 2512 #if !defined(sun) 2513 for (i = 0; i < fasttrap_provs.fth_nent; i++) 2514 mutex_init(&fasttrap_provs.fth_table[i].ftb_mtx, 2515 "providers bucket mtx", MUTEX_DEFAULT, NULL); 2516 #endif 2517 2518 ret = kproc_create(fasttrap_pid_cleanup_cb, NULL, 2519 &fasttrap_cleanup_proc, 0, 0, "ftcleanup"); 2520 if (ret != 0) { 2521 destroy_dev(fasttrap_cdev); 2522 #if !defined(sun) 2523 for (i = 0; i < fasttrap_provs.fth_nent; i++) 2524 mutex_destroy(&fasttrap_provs.fth_table[i].ftb_mtx); 2525 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) 2526 mutex_destroy(&fasttrap_tpoints.fth_table[i].ftb_mtx); 2527 #endif 2528 kmem_free(fasttrap_provs.fth_table, fasttrap_provs.fth_nent * 2529 sizeof (fasttrap_bucket_t)); 2530 mtx_destroy(&fasttrap_cleanup_mtx); 2531 mutex_destroy(&fasttrap_count_mtx); 2532 return (ret); 2533 } 2534 2535 2536 /* 2537 * ... and the procs hash table. 2538 */ 2539 nent = FASTTRAP_PROCS_DEFAULT_SIZE; 2540 if ((nent & (nent - 1)) == 0) 2541 fasttrap_procs.fth_nent = nent; 2542 else 2543 fasttrap_procs.fth_nent = 1 << fasttrap_highbit(nent); 2544 ASSERT(fasttrap_procs.fth_nent > 0); 2545 fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1; 2546 fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent * 2547 sizeof (fasttrap_bucket_t), KM_SLEEP); 2548 #if !defined(sun) 2549 for (i = 0; i < fasttrap_procs.fth_nent; i++) 2550 mutex_init(&fasttrap_procs.fth_table[i].ftb_mtx, 2551 "processes bucket mtx", MUTEX_DEFAULT, NULL); 2552 2553 CPU_FOREACH(i) { 2554 mutex_init(&fasttrap_cpuc_pid_lock[i], "fasttrap barrier", 2555 MUTEX_DEFAULT, NULL); 2556 } 2557 2558 /* 2559 * This event handler must run before kdtrace_thread_dtor() since it 2560 * accesses the thread's struct kdtrace_thread. 2561 */ 2562 fasttrap_thread_dtor_tag = EVENTHANDLER_REGISTER(thread_dtor, 2563 fasttrap_thread_dtor, NULL, EVENTHANDLER_PRI_FIRST); 2564 #endif 2565 2566 /* 2567 * Install our hooks into fork(2), exec(2), and exit(2). 2568 */ 2569 dtrace_fasttrap_fork = &fasttrap_fork; 2570 dtrace_fasttrap_exit = &fasttrap_exec_exit; 2571 dtrace_fasttrap_exec = &fasttrap_exec_exit; 2572 2573 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL, 2574 &fasttrap_meta_id); 2575 2576 return (0); 2577 } 2578 2579 static int 2580 fasttrap_unload(void) 2581 { 2582 int i, fail = 0; 2583 2584 /* 2585 * Unregister the meta-provider to make sure no new fasttrap- 2586 * managed providers come along while we're trying to close up 2587 * shop. If we fail to detach, we'll need to re-register as a 2588 * meta-provider. We can fail to unregister as a meta-provider 2589 * if providers we manage still exist. 2590 */ 2591 if (fasttrap_meta_id != DTRACE_METAPROVNONE && 2592 dtrace_meta_unregister(fasttrap_meta_id) != 0) 2593 return (-1); 2594 2595 /* 2596 * Iterate over all of our providers. If there's still a process 2597 * that corresponds to that pid, fail to detach. 2598 */ 2599 for (i = 0; i < fasttrap_provs.fth_nent; i++) { 2600 fasttrap_provider_t **fpp, *fp; 2601 fasttrap_bucket_t *bucket = &fasttrap_provs.fth_table[i]; 2602 2603 mutex_enter(&bucket->ftb_mtx); 2604 fpp = (fasttrap_provider_t **)&bucket->ftb_data; 2605 while ((fp = *fpp) != NULL) { 2606 /* 2607 * Acquire and release the lock as a simple way of 2608 * waiting for any other consumer to finish with 2609 * this provider. A thread must first acquire the 2610 * bucket lock so there's no chance of another thread 2611 * blocking on the provider's lock. 2612 */ 2613 mutex_enter(&fp->ftp_mtx); 2614 mutex_exit(&fp->ftp_mtx); 2615 2616 if (dtrace_unregister(fp->ftp_provid) != 0) { 2617 fail = 1; 2618 fpp = &fp->ftp_next; 2619 } else { 2620 *fpp = fp->ftp_next; 2621 fasttrap_provider_free(fp); 2622 } 2623 } 2624 2625 mutex_exit(&bucket->ftb_mtx); 2626 } 2627 2628 if (fail) { 2629 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL, 2630 &fasttrap_meta_id); 2631 2632 return (-1); 2633 } 2634 2635 /* 2636 * Stop new processes from entering these hooks now, before the 2637 * fasttrap_cleanup thread runs. That way all processes will hopefully 2638 * be out of these hooks before we free fasttrap_provs.fth_table 2639 */ 2640 ASSERT(dtrace_fasttrap_fork == &fasttrap_fork); 2641 dtrace_fasttrap_fork = NULL; 2642 2643 ASSERT(dtrace_fasttrap_exec == &fasttrap_exec_exit); 2644 dtrace_fasttrap_exec = NULL; 2645 2646 ASSERT(dtrace_fasttrap_exit == &fasttrap_exec_exit); 2647 dtrace_fasttrap_exit = NULL; 2648 2649 mtx_lock(&fasttrap_cleanup_mtx); 2650 fasttrap_cleanup_drain = 1; 2651 /* Wait for the cleanup thread to finish up and signal us. */ 2652 wakeup(&fasttrap_cleanup_cv); 2653 mtx_sleep(&fasttrap_cleanup_drain, &fasttrap_cleanup_mtx, 0, "ftcld", 2654 0); 2655 fasttrap_cleanup_proc = NULL; 2656 mtx_destroy(&fasttrap_cleanup_mtx); 2657 2658 #ifdef DEBUG 2659 mutex_enter(&fasttrap_count_mtx); 2660 ASSERT(fasttrap_pid_count == 0); 2661 mutex_exit(&fasttrap_count_mtx); 2662 #endif 2663 2664 #if !defined(sun) 2665 EVENTHANDLER_DEREGISTER(thread_dtor, fasttrap_thread_dtor_tag); 2666 2667 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) 2668 mutex_destroy(&fasttrap_tpoints.fth_table[i].ftb_mtx); 2669 for (i = 0; i < fasttrap_provs.fth_nent; i++) 2670 mutex_destroy(&fasttrap_provs.fth_table[i].ftb_mtx); 2671 for (i = 0; i < fasttrap_procs.fth_nent; i++) 2672 mutex_destroy(&fasttrap_procs.fth_table[i].ftb_mtx); 2673 #endif 2674 kmem_free(fasttrap_tpoints.fth_table, 2675 fasttrap_tpoints.fth_nent * sizeof (fasttrap_bucket_t)); 2676 fasttrap_tpoints.fth_nent = 0; 2677 2678 kmem_free(fasttrap_provs.fth_table, 2679 fasttrap_provs.fth_nent * sizeof (fasttrap_bucket_t)); 2680 fasttrap_provs.fth_nent = 0; 2681 2682 kmem_free(fasttrap_procs.fth_table, 2683 fasttrap_procs.fth_nent * sizeof (fasttrap_bucket_t)); 2684 fasttrap_procs.fth_nent = 0; 2685 2686 #if !defined(sun) 2687 destroy_dev(fasttrap_cdev); 2688 mutex_destroy(&fasttrap_count_mtx); 2689 CPU_FOREACH(i) { 2690 mutex_destroy(&fasttrap_cpuc_pid_lock[i]); 2691 } 2692 #endif 2693 2694 return (0); 2695 } 2696 2697 /* ARGSUSED */ 2698 static int 2699 fasttrap_modevent(module_t mod __unused, int type, void *data __unused) 2700 { 2701 int error = 0; 2702 2703 switch (type) { 2704 case MOD_LOAD: 2705 break; 2706 2707 case MOD_UNLOAD: 2708 break; 2709 2710 case MOD_SHUTDOWN: 2711 break; 2712 2713 default: 2714 error = EOPNOTSUPP; 2715 break; 2716 } 2717 return (error); 2718 } 2719 2720 SYSINIT(fasttrap_load, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, fasttrap_load, 2721 NULL); 2722 SYSUNINIT(fasttrap_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, 2723 fasttrap_unload, NULL); 2724 2725 DEV_MODULE(fasttrap, fasttrap_modevent, NULL); 2726 MODULE_VERSION(fasttrap, 1); 2727 MODULE_DEPEND(fasttrap, dtrace, 1, 1, 1); 2728 MODULE_DEPEND(fasttrap, opensolaris, 1, 1, 1); 2729