1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * Portions Copyright 2010 The FreeBSD Foundation 22 * 23 * $FreeBSD$ 24 */ 25 26 /* 27 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 28 * Use is subject to license terms. 29 */ 30 31 #if defined(sun) 32 #pragma ident "%Z%%M% %I% %E% SMI" 33 #endif 34 35 #include <sys/atomic.h> 36 #include <sys/errno.h> 37 #include <sys/stat.h> 38 #include <sys/modctl.h> 39 #include <sys/conf.h> 40 #include <sys/systm.h> 41 #if defined(sun) 42 #include <sys/ddi.h> 43 #endif 44 #include <sys/sunddi.h> 45 #include <sys/cpuvar.h> 46 #include <sys/kmem.h> 47 #if defined(sun) 48 #include <sys/strsubr.h> 49 #endif 50 #include <sys/fasttrap.h> 51 #include <sys/fasttrap_impl.h> 52 #include <sys/fasttrap_isa.h> 53 #include <sys/dtrace.h> 54 #include <sys/dtrace_impl.h> 55 #include <sys/sysmacros.h> 56 #include <sys/proc.h> 57 #include <sys/policy.h> 58 #if defined(sun) 59 #include <util/qsort.h> 60 #endif 61 #include <sys/mutex.h> 62 #include <sys/kernel.h> 63 #if !defined(sun) 64 #include <sys/user.h> 65 #include <sys/dtrace_bsd.h> 66 #include <cddl/dev/dtrace/dtrace_cddl.h> 67 #endif 68 69 /* 70 * User-Land Trap-Based Tracing 71 * ---------------------------- 72 * 73 * The fasttrap provider allows DTrace consumers to instrument any user-level 74 * instruction to gather data; this includes probes with semantic 75 * signifigance like entry and return as well as simple offsets into the 76 * function. While the specific techniques used are very ISA specific, the 77 * methodology is generalizable to any architecture. 78 * 79 * 80 * The General Methodology 81 * ----------------------- 82 * 83 * With the primary goal of tracing every user-land instruction and the 84 * limitation that we can't trust user space so don't want to rely on much 85 * information there, we begin by replacing the instructions we want to trace 86 * with trap instructions. Each instruction we overwrite is saved into a hash 87 * table keyed by process ID and pc address. When we enter the kernel due to 88 * this trap instruction, we need the effects of the replaced instruction to 89 * appear to have occurred before we proceed with the user thread's 90 * execution. 91 * 92 * Each user level thread is represented by a ulwp_t structure which is 93 * always easily accessible through a register. The most basic way to produce 94 * the effects of the instruction we replaced is to copy that instruction out 95 * to a bit of scratch space reserved in the user thread's ulwp_t structure 96 * (a sort of kernel-private thread local storage), set the PC to that 97 * scratch space and single step. When we reenter the kernel after single 98 * stepping the instruction we must then adjust the PC to point to what would 99 * normally be the next instruction. Of course, special care must be taken 100 * for branches and jumps, but these represent such a small fraction of any 101 * instruction set that writing the code to emulate these in the kernel is 102 * not too difficult. 103 * 104 * Return probes may require several tracepoints to trace every return site, 105 * and, conversely, each tracepoint may activate several probes (the entry 106 * and offset 0 probes, for example). To solve this muliplexing problem, 107 * tracepoints contain lists of probes to activate and probes contain lists 108 * of tracepoints to enable. If a probe is activated, it adds its ID to 109 * existing tracepoints or creates new ones as necessary. 110 * 111 * Most probes are activated _before_ the instruction is executed, but return 112 * probes are activated _after_ the effects of the last instruction of the 113 * function are visible. Return probes must be fired _after_ we have 114 * single-stepped the instruction whereas all other probes are fired 115 * beforehand. 116 * 117 * 118 * Lock Ordering 119 * ------------- 120 * 121 * The lock ordering below -- both internally and with respect to the DTrace 122 * framework -- is a little tricky and bears some explanation. Each provider 123 * has a lock (ftp_mtx) that protects its members including reference counts 124 * for enabled probes (ftp_rcount), consumers actively creating probes 125 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider 126 * from being freed. A provider is looked up by taking the bucket lock for the 127 * provider hash table, and is returned with its lock held. The provider lock 128 * may be taken in functions invoked by the DTrace framework, but may not be 129 * held while calling functions in the DTrace framework. 130 * 131 * To ensure consistency over multiple calls to the DTrace framework, the 132 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may 133 * not be taken when holding the provider lock as that would create a cyclic 134 * lock ordering. In situations where one would naturally take the provider 135 * lock and then the creation lock, we instead up a reference count to prevent 136 * the provider from disappearing, drop the provider lock, and acquire the 137 * creation lock. 138 * 139 * Briefly: 140 * bucket lock before provider lock 141 * DTrace before provider lock 142 * creation lock before DTrace 143 * never hold the provider lock and creation lock simultaneously 144 */ 145 146 static d_open_t fasttrap_open; 147 static d_ioctl_t fasttrap_ioctl; 148 149 static struct cdevsw fasttrap_cdevsw = { 150 .d_version = D_VERSION, 151 .d_open = fasttrap_open, 152 .d_ioctl = fasttrap_ioctl, 153 .d_name = "fasttrap", 154 }; 155 static struct cdev *fasttrap_cdev; 156 static dtrace_meta_provider_id_t fasttrap_meta_id; 157 158 static struct callout fasttrap_timeout; 159 static struct mtx fasttrap_cleanup_mtx; 160 static uint_t fasttrap_cleanup_work; 161 162 /* 163 * Generation count on modifications to the global tracepoint lookup table. 164 */ 165 static volatile uint64_t fasttrap_mod_gen; 166 167 /* 168 * When the fasttrap provider is loaded, fasttrap_max is set to either 169 * FASTTRAP_MAX_DEFAULT or the value for fasttrap-max-probes in the 170 * fasttrap.conf file. Each time a probe is created, fasttrap_total is 171 * incremented by the number of tracepoints that may be associated with that 172 * probe; fasttrap_total is capped at fasttrap_max. 173 */ 174 #define FASTTRAP_MAX_DEFAULT 250000 175 static uint32_t fasttrap_max; 176 static uint32_t fasttrap_total; 177 178 179 #define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000 180 #define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100 181 #define FASTTRAP_PROCS_DEFAULT_SIZE 0x100 182 183 #define FASTTRAP_PID_NAME "pid" 184 185 fasttrap_hash_t fasttrap_tpoints; 186 static fasttrap_hash_t fasttrap_provs; 187 static fasttrap_hash_t fasttrap_procs; 188 189 static uint64_t fasttrap_pid_count; /* pid ref count */ 190 static kmutex_t fasttrap_count_mtx; /* lock on ref count */ 191 192 #define FASTTRAP_ENABLE_FAIL 1 193 #define FASTTRAP_ENABLE_PARTIAL 2 194 195 static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t); 196 static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t); 197 198 static fasttrap_provider_t *fasttrap_provider_lookup(pid_t, const char *, 199 const dtrace_pattr_t *); 200 static void fasttrap_provider_retire(pid_t, const char *, int); 201 static void fasttrap_provider_free(fasttrap_provider_t *); 202 203 static fasttrap_proc_t *fasttrap_proc_lookup(pid_t); 204 static void fasttrap_proc_release(fasttrap_proc_t *); 205 206 #define FASTTRAP_PROVS_INDEX(pid, name) \ 207 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask) 208 209 #define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask) 210 211 #if !defined(sun) 212 static kmutex_t fasttrap_cpuc_pid_lock[MAXCPU]; 213 #endif 214 215 static int 216 fasttrap_highbit(ulong_t i) 217 { 218 int h = 1; 219 220 if (i == 0) 221 return (0); 222 #ifdef _LP64 223 if (i & 0xffffffff00000000ul) { 224 h += 32; i >>= 32; 225 } 226 #endif 227 if (i & 0xffff0000) { 228 h += 16; i >>= 16; 229 } 230 if (i & 0xff00) { 231 h += 8; i >>= 8; 232 } 233 if (i & 0xf0) { 234 h += 4; i >>= 4; 235 } 236 if (i & 0xc) { 237 h += 2; i >>= 2; 238 } 239 if (i & 0x2) { 240 h += 1; 241 } 242 return (h); 243 } 244 245 static uint_t 246 fasttrap_hash_str(const char *p) 247 { 248 unsigned int g; 249 uint_t hval = 0; 250 251 while (*p) { 252 hval = (hval << 4) + *p++; 253 if ((g = (hval & 0xf0000000)) != 0) 254 hval ^= g >> 24; 255 hval &= ~g; 256 } 257 return (hval); 258 } 259 260 void 261 fasttrap_sigtrap(proc_t *p, kthread_t *t, uintptr_t pc) 262 { 263 #if defined(sun) 264 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 265 266 sqp->sq_info.si_signo = SIGTRAP; 267 sqp->sq_info.si_code = TRAP_DTRACE; 268 sqp->sq_info.si_addr = (caddr_t)pc; 269 270 mutex_enter(&p->p_lock); 271 sigaddqa(p, t, sqp); 272 mutex_exit(&p->p_lock); 273 274 if (t != NULL) 275 aston(t); 276 #else 277 ksiginfo_t *ksi = kmem_zalloc(sizeof (ksiginfo_t), KM_SLEEP); 278 279 ksiginfo_init(ksi); 280 ksi->ksi_signo = SIGTRAP; 281 ksi->ksi_code = TRAP_DTRACE; 282 ksi->ksi_addr = (caddr_t)pc; 283 PROC_LOCK(p); 284 (void) tdksignal(t, SIGTRAP, ksi); 285 PROC_UNLOCK(p); 286 #endif 287 } 288 289 /* 290 * This function ensures that no threads are actively using the memory 291 * associated with probes that were formerly live. 292 */ 293 static void 294 fasttrap_mod_barrier(uint64_t gen) 295 { 296 int i; 297 298 if (gen < fasttrap_mod_gen) 299 return; 300 301 fasttrap_mod_gen++; 302 303 CPU_FOREACH(i) { 304 mutex_enter(&fasttrap_cpuc_pid_lock[i]); 305 mutex_exit(&fasttrap_cpuc_pid_lock[i]); 306 } 307 } 308 309 /* 310 * This is the timeout's callback for cleaning up the providers and their 311 * probes. 312 */ 313 /*ARGSUSED*/ 314 static void 315 fasttrap_pid_cleanup_cb(void *data) 316 { 317 fasttrap_provider_t **fpp, *fp; 318 fasttrap_bucket_t *bucket; 319 dtrace_provider_id_t provid; 320 int i, later = 0; 321 322 static volatile int in = 0; 323 ASSERT(in == 0); 324 in = 1; 325 326 while (fasttrap_cleanup_work) { 327 fasttrap_cleanup_work = 0; 328 mtx_unlock(&fasttrap_cleanup_mtx); 329 330 later = 0; 331 332 /* 333 * Iterate over all the providers trying to remove the marked 334 * ones. If a provider is marked but not retired, we just 335 * have to take a crack at removing it -- it's no big deal if 336 * we can't. 337 */ 338 for (i = 0; i < fasttrap_provs.fth_nent; i++) { 339 bucket = &fasttrap_provs.fth_table[i]; 340 mutex_enter(&bucket->ftb_mtx); 341 fpp = (fasttrap_provider_t **)&bucket->ftb_data; 342 343 while ((fp = *fpp) != NULL) { 344 if (!fp->ftp_marked) { 345 fpp = &fp->ftp_next; 346 continue; 347 } 348 349 mutex_enter(&fp->ftp_mtx); 350 351 /* 352 * If this provider has consumers actively 353 * creating probes (ftp_ccount) or is a USDT 354 * provider (ftp_mcount), we can't unregister 355 * or even condense. 356 */ 357 if (fp->ftp_ccount != 0 || 358 fp->ftp_mcount != 0) { 359 mutex_exit(&fp->ftp_mtx); 360 fp->ftp_marked = 0; 361 continue; 362 } 363 364 if (!fp->ftp_retired || fp->ftp_rcount != 0) 365 fp->ftp_marked = 0; 366 367 mutex_exit(&fp->ftp_mtx); 368 369 /* 370 * If we successfully unregister this 371 * provider we can remove it from the hash 372 * chain and free the memory. If our attempt 373 * to unregister fails and this is a retired 374 * provider, increment our flag to try again 375 * pretty soon. If we've consumed more than 376 * half of our total permitted number of 377 * probes call dtrace_condense() to try to 378 * clean out the unenabled probes. 379 */ 380 provid = fp->ftp_provid; 381 if (dtrace_unregister(provid) != 0) { 382 if (fasttrap_total > fasttrap_max / 2) 383 (void) dtrace_condense(provid); 384 later += fp->ftp_marked; 385 fpp = &fp->ftp_next; 386 } else { 387 *fpp = fp->ftp_next; 388 fasttrap_provider_free(fp); 389 } 390 } 391 mutex_exit(&bucket->ftb_mtx); 392 } 393 394 mtx_lock(&fasttrap_cleanup_mtx); 395 } 396 397 #if 0 398 ASSERT(fasttrap_timeout != 0); 399 #endif 400 401 /* 402 * If we were unable to remove a retired provider, try again after 403 * a second. This situation can occur in certain circumstances where 404 * providers cannot be unregistered even though they have no probes 405 * enabled because of an execution of dtrace -l or something similar. 406 * If the timeout has been disabled (set to 1 because we're trying 407 * to detach), we set fasttrap_cleanup_work to ensure that we'll 408 * get a chance to do that work if and when the timeout is reenabled 409 * (if detach fails). 410 */ 411 if (later > 0 && callout_active(&fasttrap_timeout)) 412 callout_reset(&fasttrap_timeout, hz, &fasttrap_pid_cleanup_cb, 413 NULL); 414 else if (later > 0) 415 fasttrap_cleanup_work = 1; 416 else { 417 #if !defined(sun) 418 /* Nothing to be done for FreeBSD */ 419 #endif 420 } 421 422 in = 0; 423 } 424 425 /* 426 * Activates the asynchronous cleanup mechanism. 427 */ 428 static void 429 fasttrap_pid_cleanup(void) 430 { 431 432 mtx_lock(&fasttrap_cleanup_mtx); 433 fasttrap_cleanup_work = 1; 434 callout_reset(&fasttrap_timeout, 1, &fasttrap_pid_cleanup_cb, NULL); 435 mtx_unlock(&fasttrap_cleanup_mtx); 436 } 437 438 /* 439 * This is called from cfork() via dtrace_fasttrap_fork(). The child 440 * process's address space is (roughly) a copy of the parent process's so 441 * we have to remove all the instrumentation we had previously enabled in the 442 * parent. 443 */ 444 static void 445 fasttrap_fork(proc_t *p, proc_t *cp) 446 { 447 pid_t ppid = p->p_pid; 448 int i; 449 450 #if defined(sun) 451 ASSERT(curproc == p); 452 ASSERT(p->p_proc_flag & P_PR_LOCK); 453 #else 454 PROC_LOCK_ASSERT(p, MA_OWNED); 455 #endif 456 #if defined(sun) 457 ASSERT(p->p_dtrace_count > 0); 458 #else 459 if (p->p_dtrace_helpers) { 460 /* 461 * dtrace_helpers_duplicate() allocates memory. 462 */ 463 _PHOLD(cp); 464 PROC_UNLOCK(p); 465 PROC_UNLOCK(cp); 466 dtrace_helpers_duplicate(p, cp); 467 PROC_LOCK(cp); 468 PROC_LOCK(p); 469 _PRELE(cp); 470 } 471 /* 472 * This check is purposely here instead of in kern_fork.c because, 473 * for legal resons, we cannot include the dtrace_cddl.h header 474 * inside kern_fork.c and insert if-clause there. 475 */ 476 if (p->p_dtrace_count == 0) 477 return; 478 #endif 479 ASSERT(cp->p_dtrace_count == 0); 480 481 /* 482 * This would be simpler and faster if we maintained per-process 483 * hash tables of enabled tracepoints. It could, however, potentially 484 * slow down execution of a tracepoint since we'd need to go 485 * through two levels of indirection. In the future, we should 486 * consider either maintaining per-process ancillary lists of 487 * enabled tracepoints or hanging a pointer to a per-process hash 488 * table of enabled tracepoints off the proc structure. 489 */ 490 491 /* 492 * We don't have to worry about the child process disappearing 493 * because we're in fork(). 494 */ 495 #if defined(sun) 496 mtx_lock_spin(&cp->p_slock); 497 sprlock_proc(cp); 498 mtx_unlock_spin(&cp->p_slock); 499 #else 500 _PHOLD(cp); 501 #endif 502 503 /* 504 * Iterate over every tracepoint looking for ones that belong to the 505 * parent process, and remove each from the child process. 506 */ 507 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) { 508 fasttrap_tracepoint_t *tp; 509 fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i]; 510 511 mutex_enter(&bucket->ftb_mtx); 512 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { 513 if (tp->ftt_pid == ppid && 514 tp->ftt_proc->ftpc_acount != 0) { 515 int ret = fasttrap_tracepoint_remove(cp, tp); 516 ASSERT(ret == 0); 517 518 /* 519 * The count of active providers can only be 520 * decremented (i.e. to zero) during exec, 521 * exit, and removal of a meta provider so it 522 * should be impossible to drop the count 523 * mid-fork. 524 */ 525 ASSERT(tp->ftt_proc->ftpc_acount != 0); 526 } 527 } 528 mutex_exit(&bucket->ftb_mtx); 529 } 530 531 #if defined(sun) 532 mutex_enter(&cp->p_lock); 533 sprunlock(cp); 534 #else 535 _PRELE(cp); 536 #endif 537 } 538 539 /* 540 * This is called from proc_exit() or from exec_common() if p_dtrace_probes 541 * is set on the proc structure to indicate that there is a pid provider 542 * associated with this process. 543 */ 544 static void 545 fasttrap_exec_exit(proc_t *p) 546 { 547 #if defined(sun) 548 ASSERT(p == curproc); 549 #endif 550 PROC_LOCK_ASSERT(p, MA_OWNED); 551 _PHOLD(p); 552 PROC_UNLOCK(p); 553 554 /* 555 * We clean up the pid provider for this process here; user-land 556 * static probes are handled by the meta-provider remove entry point. 557 */ 558 fasttrap_provider_retire(p->p_pid, FASTTRAP_PID_NAME, 0); 559 #if !defined(sun) 560 if (p->p_dtrace_helpers) 561 dtrace_helpers_destroy(p); 562 #endif 563 PROC_LOCK(p); 564 _PRELE(p); 565 } 566 567 568 /*ARGSUSED*/ 569 static void 570 fasttrap_pid_provide(void *arg, dtrace_probedesc_t *desc) 571 { 572 /* 573 * There are no "default" pid probes. 574 */ 575 } 576 577 static int 578 fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index) 579 { 580 fasttrap_tracepoint_t *tp, *new_tp = NULL; 581 fasttrap_bucket_t *bucket; 582 fasttrap_id_t *id; 583 pid_t pid; 584 uintptr_t pc; 585 586 ASSERT(index < probe->ftp_ntps); 587 588 pid = probe->ftp_pid; 589 pc = probe->ftp_tps[index].fit_tp->ftt_pc; 590 id = &probe->ftp_tps[index].fit_id; 591 592 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid); 593 594 #if defined(sun) 595 ASSERT(!(p->p_flag & SVFORK)); 596 #endif 597 598 /* 599 * Before we make any modifications, make sure we've imposed a barrier 600 * on the generation in which this probe was last modified. 601 */ 602 fasttrap_mod_barrier(probe->ftp_gen); 603 604 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)]; 605 606 /* 607 * If the tracepoint has already been enabled, just add our id to the 608 * list of interested probes. This may be our second time through 609 * this path in which case we'll have constructed the tracepoint we'd 610 * like to install. If we can't find a match, and have an allocated 611 * tracepoint ready to go, enable that one now. 612 * 613 * A tracepoint whose process is defunct is also considered defunct. 614 */ 615 again: 616 mutex_enter(&bucket->ftb_mtx); 617 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { 618 /* 619 * Note that it's safe to access the active count on the 620 * associated proc structure because we know that at least one 621 * provider (this one) will still be around throughout this 622 * operation. 623 */ 624 if (tp->ftt_pid != pid || tp->ftt_pc != pc || 625 tp->ftt_proc->ftpc_acount == 0) 626 continue; 627 628 /* 629 * Now that we've found a matching tracepoint, it would be 630 * a decent idea to confirm that the tracepoint is still 631 * enabled and the trap instruction hasn't been overwritten. 632 * Since this is a little hairy, we'll punt for now. 633 */ 634 635 /* 636 * This can't be the first interested probe. We don't have 637 * to worry about another thread being in the midst of 638 * deleting this tracepoint (which would be the only valid 639 * reason for a tracepoint to have no interested probes) 640 * since we're holding P_PR_LOCK for this process. 641 */ 642 ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL); 643 644 switch (id->fti_ptype) { 645 case DTFTP_ENTRY: 646 case DTFTP_OFFSETS: 647 case DTFTP_IS_ENABLED: 648 id->fti_next = tp->ftt_ids; 649 membar_producer(); 650 tp->ftt_ids = id; 651 membar_producer(); 652 break; 653 654 case DTFTP_RETURN: 655 case DTFTP_POST_OFFSETS: 656 id->fti_next = tp->ftt_retids; 657 membar_producer(); 658 tp->ftt_retids = id; 659 membar_producer(); 660 break; 661 662 default: 663 ASSERT(0); 664 } 665 666 mutex_exit(&bucket->ftb_mtx); 667 668 if (new_tp != NULL) { 669 new_tp->ftt_ids = NULL; 670 new_tp->ftt_retids = NULL; 671 } 672 673 return (0); 674 } 675 676 /* 677 * If we have a good tracepoint ready to go, install it now while 678 * we have the lock held and no one can screw with us. 679 */ 680 if (new_tp != NULL) { 681 int rc = 0; 682 683 new_tp->ftt_next = bucket->ftb_data; 684 membar_producer(); 685 bucket->ftb_data = new_tp; 686 membar_producer(); 687 mutex_exit(&bucket->ftb_mtx); 688 689 /* 690 * Activate the tracepoint in the ISA-specific manner. 691 * If this fails, we need to report the failure, but 692 * indicate that this tracepoint must still be disabled 693 * by calling fasttrap_tracepoint_disable(). 694 */ 695 if (fasttrap_tracepoint_install(p, new_tp) != 0) 696 rc = FASTTRAP_ENABLE_PARTIAL; 697 698 /* 699 * Increment the count of the number of tracepoints active in 700 * the victim process. 701 */ 702 #if defined(sun) 703 ASSERT(p->p_proc_flag & P_PR_LOCK); 704 #endif 705 p->p_dtrace_count++; 706 707 return (rc); 708 } 709 710 mutex_exit(&bucket->ftb_mtx); 711 712 /* 713 * Initialize the tracepoint that's been preallocated with the probe. 714 */ 715 new_tp = probe->ftp_tps[index].fit_tp; 716 717 ASSERT(new_tp->ftt_pid == pid); 718 ASSERT(new_tp->ftt_pc == pc); 719 ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc); 720 ASSERT(new_tp->ftt_ids == NULL); 721 ASSERT(new_tp->ftt_retids == NULL); 722 723 switch (id->fti_ptype) { 724 case DTFTP_ENTRY: 725 case DTFTP_OFFSETS: 726 case DTFTP_IS_ENABLED: 727 id->fti_next = NULL; 728 new_tp->ftt_ids = id; 729 break; 730 731 case DTFTP_RETURN: 732 case DTFTP_POST_OFFSETS: 733 id->fti_next = NULL; 734 new_tp->ftt_retids = id; 735 break; 736 737 default: 738 ASSERT(0); 739 } 740 741 /* 742 * If the ISA-dependent initialization goes to plan, go back to the 743 * beginning and try to install this freshly made tracepoint. 744 */ 745 if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0) 746 goto again; 747 748 new_tp->ftt_ids = NULL; 749 new_tp->ftt_retids = NULL; 750 751 return (FASTTRAP_ENABLE_FAIL); 752 } 753 754 static void 755 fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index) 756 { 757 fasttrap_bucket_t *bucket; 758 fasttrap_provider_t *provider = probe->ftp_prov; 759 fasttrap_tracepoint_t **pp, *tp; 760 fasttrap_id_t *id, **idp = NULL; 761 pid_t pid; 762 uintptr_t pc; 763 764 ASSERT(index < probe->ftp_ntps); 765 766 pid = probe->ftp_pid; 767 pc = probe->ftp_tps[index].fit_tp->ftt_pc; 768 id = &probe->ftp_tps[index].fit_id; 769 770 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid); 771 772 /* 773 * Find the tracepoint and make sure that our id is one of the 774 * ones registered with it. 775 */ 776 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)]; 777 mutex_enter(&bucket->ftb_mtx); 778 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { 779 if (tp->ftt_pid == pid && tp->ftt_pc == pc && 780 tp->ftt_proc == provider->ftp_proc) 781 break; 782 } 783 784 /* 785 * If we somehow lost this tracepoint, we're in a world of hurt. 786 */ 787 ASSERT(tp != NULL); 788 789 switch (id->fti_ptype) { 790 case DTFTP_ENTRY: 791 case DTFTP_OFFSETS: 792 case DTFTP_IS_ENABLED: 793 ASSERT(tp->ftt_ids != NULL); 794 idp = &tp->ftt_ids; 795 break; 796 797 case DTFTP_RETURN: 798 case DTFTP_POST_OFFSETS: 799 ASSERT(tp->ftt_retids != NULL); 800 idp = &tp->ftt_retids; 801 break; 802 803 default: 804 ASSERT(0); 805 } 806 807 while ((*idp)->fti_probe != probe) { 808 idp = &(*idp)->fti_next; 809 ASSERT(*idp != NULL); 810 } 811 812 id = *idp; 813 *idp = id->fti_next; 814 membar_producer(); 815 816 ASSERT(id->fti_probe == probe); 817 818 /* 819 * If there are other registered enablings of this tracepoint, we're 820 * all done, but if this was the last probe assocated with this 821 * this tracepoint, we need to remove and free it. 822 */ 823 if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) { 824 825 /* 826 * If the current probe's tracepoint is in use, swap it 827 * for an unused tracepoint. 828 */ 829 if (tp == probe->ftp_tps[index].fit_tp) { 830 fasttrap_probe_t *tmp_probe; 831 fasttrap_tracepoint_t **tmp_tp; 832 uint_t tmp_index; 833 834 if (tp->ftt_ids != NULL) { 835 tmp_probe = tp->ftt_ids->fti_probe; 836 /* LINTED - alignment */ 837 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids); 838 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp; 839 } else { 840 tmp_probe = tp->ftt_retids->fti_probe; 841 /* LINTED - alignment */ 842 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids); 843 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp; 844 } 845 846 ASSERT(*tmp_tp != NULL); 847 ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp); 848 ASSERT((*tmp_tp)->ftt_ids == NULL); 849 ASSERT((*tmp_tp)->ftt_retids == NULL); 850 851 probe->ftp_tps[index].fit_tp = *tmp_tp; 852 *tmp_tp = tp; 853 } 854 855 mutex_exit(&bucket->ftb_mtx); 856 857 /* 858 * Tag the modified probe with the generation in which it was 859 * changed. 860 */ 861 probe->ftp_gen = fasttrap_mod_gen; 862 return; 863 } 864 865 mutex_exit(&bucket->ftb_mtx); 866 867 /* 868 * We can't safely remove the tracepoint from the set of active 869 * tracepoints until we've actually removed the fasttrap instruction 870 * from the process's text. We can, however, operate on this 871 * tracepoint secure in the knowledge that no other thread is going to 872 * be looking at it since we hold P_PR_LOCK on the process if it's 873 * live or we hold the provider lock on the process if it's dead and 874 * gone. 875 */ 876 877 /* 878 * We only need to remove the actual instruction if we're looking 879 * at an existing process 880 */ 881 if (p != NULL) { 882 /* 883 * If we fail to restore the instruction we need to kill 884 * this process since it's in a completely unrecoverable 885 * state. 886 */ 887 if (fasttrap_tracepoint_remove(p, tp) != 0) 888 fasttrap_sigtrap(p, NULL, pc); 889 890 /* 891 * Decrement the count of the number of tracepoints active 892 * in the victim process. 893 */ 894 #if defined(sun) 895 ASSERT(p->p_proc_flag & P_PR_LOCK); 896 #endif 897 p->p_dtrace_count--; 898 } 899 900 /* 901 * Remove the probe from the hash table of active tracepoints. 902 */ 903 mutex_enter(&bucket->ftb_mtx); 904 pp = (fasttrap_tracepoint_t **)&bucket->ftb_data; 905 ASSERT(*pp != NULL); 906 while (*pp != tp) { 907 pp = &(*pp)->ftt_next; 908 ASSERT(*pp != NULL); 909 } 910 911 *pp = tp->ftt_next; 912 membar_producer(); 913 914 mutex_exit(&bucket->ftb_mtx); 915 916 /* 917 * Tag the modified probe with the generation in which it was changed. 918 */ 919 probe->ftp_gen = fasttrap_mod_gen; 920 } 921 922 static void 923 fasttrap_enable_callbacks(void) 924 { 925 /* 926 * We don't have to play the rw lock game here because we're 927 * providing something rather than taking something away -- 928 * we can be sure that no threads have tried to follow this 929 * function pointer yet. 930 */ 931 mutex_enter(&fasttrap_count_mtx); 932 if (fasttrap_pid_count == 0) { 933 ASSERT(dtrace_pid_probe_ptr == NULL); 934 ASSERT(dtrace_return_probe_ptr == NULL); 935 dtrace_pid_probe_ptr = &fasttrap_pid_probe; 936 dtrace_return_probe_ptr = &fasttrap_return_probe; 937 } 938 ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe); 939 ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe); 940 fasttrap_pid_count++; 941 mutex_exit(&fasttrap_count_mtx); 942 } 943 944 static void 945 fasttrap_disable_callbacks(void) 946 { 947 #if defined(sun) 948 ASSERT(MUTEX_HELD(&cpu_lock)); 949 #endif 950 951 952 mutex_enter(&fasttrap_count_mtx); 953 ASSERT(fasttrap_pid_count > 0); 954 fasttrap_pid_count--; 955 if (fasttrap_pid_count == 0) { 956 #if defined(sun) 957 cpu_t *cur, *cpu = CPU; 958 959 for (cur = cpu->cpu_next_onln; cur != cpu; 960 cur = cur->cpu_next_onln) { 961 rw_enter(&cur->cpu_ft_lock, RW_WRITER); 962 } 963 #endif 964 dtrace_pid_probe_ptr = NULL; 965 dtrace_return_probe_ptr = NULL; 966 #if defined(sun) 967 for (cur = cpu->cpu_next_onln; cur != cpu; 968 cur = cur->cpu_next_onln) { 969 rw_exit(&cur->cpu_ft_lock); 970 } 971 #endif 972 } 973 mutex_exit(&fasttrap_count_mtx); 974 } 975 976 /*ARGSUSED*/ 977 static void 978 fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg) 979 { 980 fasttrap_probe_t *probe = parg; 981 proc_t *p = NULL; 982 int i, rc; 983 984 985 ASSERT(probe != NULL); 986 ASSERT(!probe->ftp_enabled); 987 ASSERT(id == probe->ftp_id); 988 #if defined(sun) 989 ASSERT(MUTEX_HELD(&cpu_lock)); 990 #endif 991 992 /* 993 * Increment the count of enabled probes on this probe's provider; 994 * the provider can't go away while the probe still exists. We 995 * must increment this even if we aren't able to properly enable 996 * this probe. 997 */ 998 mutex_enter(&probe->ftp_prov->ftp_mtx); 999 probe->ftp_prov->ftp_rcount++; 1000 mutex_exit(&probe->ftp_prov->ftp_mtx); 1001 1002 /* 1003 * If this probe's provider is retired (meaning it was valid in a 1004 * previously exec'ed incarnation of this address space), bail out. The 1005 * provider can't go away while we're in this code path. 1006 */ 1007 if (probe->ftp_prov->ftp_retired) 1008 return; 1009 1010 /* 1011 * If we can't find the process, it may be that we're in the context of 1012 * a fork in which the traced process is being born and we're copying 1013 * USDT probes. Otherwise, the process is gone so bail. 1014 */ 1015 #if defined(sun) 1016 if ((p = sprlock(probe->ftp_pid)) == NULL) { 1017 if ((curproc->p_flag & SFORKING) == 0) 1018 return; 1019 1020 mutex_enter(&pidlock); 1021 p = prfind(probe->ftp_pid); 1022 1023 /* 1024 * Confirm that curproc is indeed forking the process in which 1025 * we're trying to enable probes. 1026 */ 1027 ASSERT(p != NULL); 1028 ASSERT(p->p_parent == curproc); 1029 ASSERT(p->p_stat == SIDL); 1030 1031 mutex_enter(&p->p_lock); 1032 mutex_exit(&pidlock); 1033 1034 sprlock_proc(p); 1035 } 1036 1037 ASSERT(!(p->p_flag & SVFORK)); 1038 mutex_exit(&p->p_lock); 1039 #else 1040 if ((p = pfind(probe->ftp_pid)) == NULL) 1041 return; 1042 #endif 1043 1044 /* 1045 * We have to enable the trap entry point before any user threads have 1046 * the chance to execute the trap instruction we're about to place 1047 * in their process's text. 1048 */ 1049 #ifdef __FreeBSD__ 1050 /* 1051 * pfind() returns a locked process. 1052 */ 1053 _PHOLD(p); 1054 PROC_UNLOCK(p); 1055 #endif 1056 fasttrap_enable_callbacks(); 1057 1058 /* 1059 * Enable all the tracepoints and add this probe's id to each 1060 * tracepoint's list of active probes. 1061 */ 1062 for (i = 0; i < probe->ftp_ntps; i++) { 1063 if ((rc = fasttrap_tracepoint_enable(p, probe, i)) != 0) { 1064 /* 1065 * If enabling the tracepoint failed completely, 1066 * we don't have to disable it; if the failure 1067 * was only partial we must disable it. 1068 */ 1069 if (rc == FASTTRAP_ENABLE_FAIL) 1070 i--; 1071 else 1072 ASSERT(rc == FASTTRAP_ENABLE_PARTIAL); 1073 1074 /* 1075 * Back up and pull out all the tracepoints we've 1076 * created so far for this probe. 1077 */ 1078 while (i >= 0) { 1079 fasttrap_tracepoint_disable(p, probe, i); 1080 i--; 1081 } 1082 1083 #if defined(sun) 1084 mutex_enter(&p->p_lock); 1085 sprunlock(p); 1086 #else 1087 PRELE(p); 1088 #endif 1089 1090 /* 1091 * Since we're not actually enabling this probe, 1092 * drop our reference on the trap table entry. 1093 */ 1094 fasttrap_disable_callbacks(); 1095 return; 1096 } 1097 } 1098 #if defined(sun) 1099 mutex_enter(&p->p_lock); 1100 sprunlock(p); 1101 #else 1102 PRELE(p); 1103 #endif 1104 1105 probe->ftp_enabled = 1; 1106 } 1107 1108 /*ARGSUSED*/ 1109 static void 1110 fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg) 1111 { 1112 fasttrap_probe_t *probe = parg; 1113 fasttrap_provider_t *provider = probe->ftp_prov; 1114 proc_t *p; 1115 int i, whack = 0; 1116 1117 ASSERT(id == probe->ftp_id); 1118 1119 mutex_enter(&provider->ftp_mtx); 1120 1121 /* 1122 * We won't be able to acquire a /proc-esque lock on the process 1123 * iff the process is dead and gone. In this case, we rely on the 1124 * provider lock as a point of mutual exclusion to prevent other 1125 * DTrace consumers from disabling this probe. 1126 */ 1127 if ((p = pfind(probe->ftp_pid)) == NULL) { 1128 mutex_exit(&provider->ftp_mtx); 1129 return; 1130 } 1131 #ifdef __FreeBSD__ 1132 _PHOLD(p); 1133 PROC_UNLOCK(p); 1134 #endif 1135 1136 /* 1137 * Disable all the associated tracepoints (for fully enabled probes). 1138 */ 1139 if (probe->ftp_enabled) { 1140 for (i = 0; i < probe->ftp_ntps; i++) { 1141 fasttrap_tracepoint_disable(p, probe, i); 1142 } 1143 } 1144 1145 ASSERT(provider->ftp_rcount > 0); 1146 provider->ftp_rcount--; 1147 1148 if (p != NULL) { 1149 /* 1150 * Even though we may not be able to remove it entirely, we 1151 * mark this retired provider to get a chance to remove some 1152 * of the associated probes. 1153 */ 1154 if (provider->ftp_retired && !provider->ftp_marked) 1155 whack = provider->ftp_marked = 1; 1156 mutex_exit(&provider->ftp_mtx); 1157 } else { 1158 /* 1159 * If the process is dead, we're just waiting for the 1160 * last probe to be disabled to be able to free it. 1161 */ 1162 if (provider->ftp_rcount == 0 && !provider->ftp_marked) 1163 whack = provider->ftp_marked = 1; 1164 mutex_exit(&provider->ftp_mtx); 1165 } 1166 1167 if (whack) 1168 fasttrap_pid_cleanup(); 1169 1170 #ifdef __FreeBSD__ 1171 PRELE(p); 1172 #endif 1173 if (!probe->ftp_enabled) 1174 return; 1175 1176 probe->ftp_enabled = 0; 1177 1178 #if defined(sun) 1179 ASSERT(MUTEX_HELD(&cpu_lock)); 1180 #endif 1181 fasttrap_disable_callbacks(); 1182 } 1183 1184 /*ARGSUSED*/ 1185 static void 1186 fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg, 1187 dtrace_argdesc_t *desc) 1188 { 1189 fasttrap_probe_t *probe = parg; 1190 char *str; 1191 int i, ndx; 1192 1193 desc->dtargd_native[0] = '\0'; 1194 desc->dtargd_xlate[0] = '\0'; 1195 1196 if (probe->ftp_prov->ftp_retired != 0 || 1197 desc->dtargd_ndx >= probe->ftp_nargs) { 1198 desc->dtargd_ndx = DTRACE_ARGNONE; 1199 return; 1200 } 1201 1202 ndx = (probe->ftp_argmap != NULL) ? 1203 probe->ftp_argmap[desc->dtargd_ndx] : desc->dtargd_ndx; 1204 1205 str = probe->ftp_ntypes; 1206 for (i = 0; i < ndx; i++) { 1207 str += strlen(str) + 1; 1208 } 1209 1210 ASSERT(strlen(str + 1) < sizeof (desc->dtargd_native)); 1211 (void) strcpy(desc->dtargd_native, str); 1212 1213 if (probe->ftp_xtypes == NULL) 1214 return; 1215 1216 str = probe->ftp_xtypes; 1217 for (i = 0; i < desc->dtargd_ndx; i++) { 1218 str += strlen(str) + 1; 1219 } 1220 1221 ASSERT(strlen(str + 1) < sizeof (desc->dtargd_xlate)); 1222 (void) strcpy(desc->dtargd_xlate, str); 1223 } 1224 1225 /*ARGSUSED*/ 1226 static void 1227 fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg) 1228 { 1229 fasttrap_probe_t *probe = parg; 1230 int i; 1231 size_t size; 1232 1233 ASSERT(probe != NULL); 1234 ASSERT(!probe->ftp_enabled); 1235 ASSERT(fasttrap_total >= probe->ftp_ntps); 1236 1237 atomic_add_32(&fasttrap_total, -probe->ftp_ntps); 1238 size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]); 1239 1240 if (probe->ftp_gen + 1 >= fasttrap_mod_gen) 1241 fasttrap_mod_barrier(probe->ftp_gen); 1242 1243 for (i = 0; i < probe->ftp_ntps; i++) { 1244 kmem_free(probe->ftp_tps[i].fit_tp, 1245 sizeof (fasttrap_tracepoint_t)); 1246 } 1247 1248 kmem_free(probe, size); 1249 } 1250 1251 1252 static const dtrace_pattr_t pid_attr = { 1253 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, 1254 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 1255 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 1256 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, 1257 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 1258 }; 1259 1260 static dtrace_pops_t pid_pops = { 1261 fasttrap_pid_provide, 1262 NULL, 1263 fasttrap_pid_enable, 1264 fasttrap_pid_disable, 1265 NULL, 1266 NULL, 1267 fasttrap_pid_getargdesc, 1268 fasttrap_pid_getarg, 1269 NULL, 1270 fasttrap_pid_destroy 1271 }; 1272 1273 static dtrace_pops_t usdt_pops = { 1274 fasttrap_pid_provide, 1275 NULL, 1276 fasttrap_pid_enable, 1277 fasttrap_pid_disable, 1278 NULL, 1279 NULL, 1280 fasttrap_pid_getargdesc, 1281 fasttrap_usdt_getarg, 1282 NULL, 1283 fasttrap_pid_destroy 1284 }; 1285 1286 static fasttrap_proc_t * 1287 fasttrap_proc_lookup(pid_t pid) 1288 { 1289 fasttrap_bucket_t *bucket; 1290 fasttrap_proc_t *fprc, *new_fprc; 1291 1292 1293 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)]; 1294 mutex_enter(&bucket->ftb_mtx); 1295 1296 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) { 1297 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) { 1298 mutex_enter(&fprc->ftpc_mtx); 1299 mutex_exit(&bucket->ftb_mtx); 1300 fprc->ftpc_rcount++; 1301 atomic_add_64(&fprc->ftpc_acount, 1); 1302 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount); 1303 mutex_exit(&fprc->ftpc_mtx); 1304 1305 return (fprc); 1306 } 1307 } 1308 1309 /* 1310 * Drop the bucket lock so we don't try to perform a sleeping 1311 * allocation under it. 1312 */ 1313 mutex_exit(&bucket->ftb_mtx); 1314 1315 new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP); 1316 new_fprc->ftpc_pid = pid; 1317 new_fprc->ftpc_rcount = 1; 1318 new_fprc->ftpc_acount = 1; 1319 #if !defined(sun) 1320 mutex_init(&new_fprc->ftpc_mtx, "fasttrap proc mtx", MUTEX_DEFAULT, 1321 NULL); 1322 #endif 1323 1324 mutex_enter(&bucket->ftb_mtx); 1325 1326 /* 1327 * Take another lap through the list to make sure a proc hasn't 1328 * been created for this pid while we weren't under the bucket lock. 1329 */ 1330 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) { 1331 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) { 1332 mutex_enter(&fprc->ftpc_mtx); 1333 mutex_exit(&bucket->ftb_mtx); 1334 fprc->ftpc_rcount++; 1335 atomic_add_64(&fprc->ftpc_acount, 1); 1336 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount); 1337 mutex_exit(&fprc->ftpc_mtx); 1338 1339 kmem_free(new_fprc, sizeof (fasttrap_proc_t)); 1340 1341 return (fprc); 1342 } 1343 } 1344 1345 new_fprc->ftpc_next = bucket->ftb_data; 1346 bucket->ftb_data = new_fprc; 1347 1348 mutex_exit(&bucket->ftb_mtx); 1349 1350 return (new_fprc); 1351 } 1352 1353 static void 1354 fasttrap_proc_release(fasttrap_proc_t *proc) 1355 { 1356 fasttrap_bucket_t *bucket; 1357 fasttrap_proc_t *fprc, **fprcp; 1358 pid_t pid = proc->ftpc_pid; 1359 1360 mutex_enter(&proc->ftpc_mtx); 1361 1362 ASSERT(proc->ftpc_rcount != 0); 1363 ASSERT(proc->ftpc_acount <= proc->ftpc_rcount); 1364 1365 if (--proc->ftpc_rcount != 0) { 1366 mutex_exit(&proc->ftpc_mtx); 1367 return; 1368 } 1369 1370 mutex_exit(&proc->ftpc_mtx); 1371 1372 /* 1373 * There should definitely be no live providers associated with this 1374 * process at this point. 1375 */ 1376 ASSERT(proc->ftpc_acount == 0); 1377 1378 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)]; 1379 mutex_enter(&bucket->ftb_mtx); 1380 1381 fprcp = (fasttrap_proc_t **)&bucket->ftb_data; 1382 while ((fprc = *fprcp) != NULL) { 1383 if (fprc == proc) 1384 break; 1385 1386 fprcp = &fprc->ftpc_next; 1387 } 1388 1389 /* 1390 * Something strange has happened if we can't find the proc. 1391 */ 1392 ASSERT(fprc != NULL); 1393 1394 *fprcp = fprc->ftpc_next; 1395 1396 mutex_exit(&bucket->ftb_mtx); 1397 1398 kmem_free(fprc, sizeof (fasttrap_proc_t)); 1399 } 1400 1401 /* 1402 * Lookup a fasttrap-managed provider based on its name and associated pid. 1403 * If the pattr argument is non-NULL, this function instantiates the provider 1404 * if it doesn't exist otherwise it returns NULL. The provider is returned 1405 * with its lock held. 1406 */ 1407 static fasttrap_provider_t * 1408 fasttrap_provider_lookup(pid_t pid, const char *name, 1409 const dtrace_pattr_t *pattr) 1410 { 1411 fasttrap_provider_t *fp, *new_fp = NULL; 1412 fasttrap_bucket_t *bucket; 1413 char provname[DTRACE_PROVNAMELEN]; 1414 proc_t *p; 1415 cred_t *cred; 1416 1417 ASSERT(strlen(name) < sizeof (fp->ftp_name)); 1418 ASSERT(pattr != NULL); 1419 1420 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)]; 1421 mutex_enter(&bucket->ftb_mtx); 1422 1423 /* 1424 * Take a lap through the list and return the match if we find it. 1425 */ 1426 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) { 1427 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 && 1428 !fp->ftp_retired) { 1429 mutex_enter(&fp->ftp_mtx); 1430 mutex_exit(&bucket->ftb_mtx); 1431 return (fp); 1432 } 1433 } 1434 1435 /* 1436 * Drop the bucket lock so we don't try to perform a sleeping 1437 * allocation under it. 1438 */ 1439 mutex_exit(&bucket->ftb_mtx); 1440 1441 /* 1442 * Make sure the process exists, isn't a child created as the result 1443 * of a vfork(2), and isn't a zombie (but may be in fork). 1444 */ 1445 if ((p = pfind(pid)) == NULL) 1446 return (NULL); 1447 1448 /* 1449 * Increment p_dtrace_probes so that the process knows to inform us 1450 * when it exits or execs. fasttrap_provider_free() decrements this 1451 * when we're done with this provider. 1452 */ 1453 p->p_dtrace_probes++; 1454 1455 /* 1456 * Grab the credentials for this process so we have 1457 * something to pass to dtrace_register(). 1458 */ 1459 PROC_LOCK_ASSERT(p, MA_OWNED); 1460 crhold(p->p_ucred); 1461 cred = p->p_ucred; 1462 PROC_UNLOCK(p); 1463 1464 new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP); 1465 new_fp->ftp_pid = pid; 1466 new_fp->ftp_proc = fasttrap_proc_lookup(pid); 1467 #if !defined(sun) 1468 mutex_init(&new_fp->ftp_mtx, "provider mtx", MUTEX_DEFAULT, NULL); 1469 mutex_init(&new_fp->ftp_cmtx, "lock on creating", MUTEX_DEFAULT, NULL); 1470 #endif 1471 1472 ASSERT(new_fp->ftp_proc != NULL); 1473 1474 mutex_enter(&bucket->ftb_mtx); 1475 1476 /* 1477 * Take another lap through the list to make sure a provider hasn't 1478 * been created for this pid while we weren't under the bucket lock. 1479 */ 1480 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) { 1481 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 && 1482 !fp->ftp_retired) { 1483 mutex_enter(&fp->ftp_mtx); 1484 mutex_exit(&bucket->ftb_mtx); 1485 fasttrap_provider_free(new_fp); 1486 crfree(cred); 1487 return (fp); 1488 } 1489 } 1490 1491 (void) strcpy(new_fp->ftp_name, name); 1492 1493 /* 1494 * Fail and return NULL if either the provider name is too long 1495 * or we fail to register this new provider with the DTrace 1496 * framework. Note that this is the only place we ever construct 1497 * the full provider name -- we keep it in pieces in the provider 1498 * structure. 1499 */ 1500 if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >= 1501 sizeof (provname) || 1502 dtrace_register(provname, pattr, 1503 DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred, 1504 pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp, 1505 &new_fp->ftp_provid) != 0) { 1506 mutex_exit(&bucket->ftb_mtx); 1507 fasttrap_provider_free(new_fp); 1508 crfree(cred); 1509 return (NULL); 1510 } 1511 1512 new_fp->ftp_next = bucket->ftb_data; 1513 bucket->ftb_data = new_fp; 1514 1515 mutex_enter(&new_fp->ftp_mtx); 1516 mutex_exit(&bucket->ftb_mtx); 1517 1518 crfree(cred); 1519 return (new_fp); 1520 } 1521 1522 static void 1523 fasttrap_provider_free(fasttrap_provider_t *provider) 1524 { 1525 pid_t pid = provider->ftp_pid; 1526 proc_t *p; 1527 1528 /* 1529 * There need to be no associated enabled probes, no consumers 1530 * creating probes, and no meta providers referencing this provider. 1531 */ 1532 ASSERT(provider->ftp_rcount == 0); 1533 ASSERT(provider->ftp_ccount == 0); 1534 ASSERT(provider->ftp_mcount == 0); 1535 1536 /* 1537 * If this provider hasn't been retired, we need to explicitly drop the 1538 * count of active providers on the associated process structure. 1539 */ 1540 if (!provider->ftp_retired) { 1541 atomic_add_64(&provider->ftp_proc->ftpc_acount, -1); 1542 ASSERT(provider->ftp_proc->ftpc_acount < 1543 provider->ftp_proc->ftpc_rcount); 1544 } 1545 1546 fasttrap_proc_release(provider->ftp_proc); 1547 1548 #if !defined(sun) 1549 mutex_destroy(&provider->ftp_mtx); 1550 mutex_destroy(&provider->ftp_cmtx); 1551 #endif 1552 kmem_free(provider, sizeof (fasttrap_provider_t)); 1553 1554 /* 1555 * Decrement p_dtrace_probes on the process whose provider we're 1556 * freeing. We don't have to worry about clobbering somone else's 1557 * modifications to it because we have locked the bucket that 1558 * corresponds to this process's hash chain in the provider hash 1559 * table. Don't sweat it if we can't find the process. 1560 */ 1561 if ((p = pfind(pid)) == NULL) { 1562 return; 1563 } 1564 1565 p->p_dtrace_probes--; 1566 #if !defined(sun) 1567 PROC_UNLOCK(p); 1568 #endif 1569 } 1570 1571 static void 1572 fasttrap_provider_retire(pid_t pid, const char *name, int mprov) 1573 { 1574 fasttrap_provider_t *fp; 1575 fasttrap_bucket_t *bucket; 1576 dtrace_provider_id_t provid; 1577 1578 ASSERT(strlen(name) < sizeof (fp->ftp_name)); 1579 1580 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)]; 1581 mutex_enter(&bucket->ftb_mtx); 1582 1583 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) { 1584 if (fp->ftp_pid == pid && strcmp(fp->ftp_name, name) == 0 && 1585 !fp->ftp_retired) 1586 break; 1587 } 1588 1589 if (fp == NULL) { 1590 mutex_exit(&bucket->ftb_mtx); 1591 return; 1592 } 1593 1594 mutex_enter(&fp->ftp_mtx); 1595 ASSERT(!mprov || fp->ftp_mcount > 0); 1596 if (mprov && --fp->ftp_mcount != 0) { 1597 mutex_exit(&fp->ftp_mtx); 1598 mutex_exit(&bucket->ftb_mtx); 1599 return; 1600 } 1601 1602 /* 1603 * Mark the provider to be removed in our post-processing step, mark it 1604 * retired, and drop the active count on its proc. Marking it indicates 1605 * that we should try to remove it; setting the retired flag indicates 1606 * that we're done with this provider; dropping the active the proc 1607 * releases our hold, and when this reaches zero (as it will during 1608 * exit or exec) the proc and associated providers become defunct. 1609 * 1610 * We obviously need to take the bucket lock before the provider lock 1611 * to perform the lookup, but we need to drop the provider lock 1612 * before calling into the DTrace framework since we acquire the 1613 * provider lock in callbacks invoked from the DTrace framework. The 1614 * bucket lock therefore protects the integrity of the provider hash 1615 * table. 1616 */ 1617 atomic_add_64(&fp->ftp_proc->ftpc_acount, -1); 1618 ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount); 1619 1620 fp->ftp_retired = 1; 1621 fp->ftp_marked = 1; 1622 provid = fp->ftp_provid; 1623 mutex_exit(&fp->ftp_mtx); 1624 1625 /* 1626 * We don't have to worry about invalidating the same provider twice 1627 * since fasttrap_provider_lookup() will ignore provider that have 1628 * been marked as retired. 1629 */ 1630 dtrace_invalidate(provid); 1631 1632 mutex_exit(&bucket->ftb_mtx); 1633 1634 fasttrap_pid_cleanup(); 1635 } 1636 1637 static int 1638 fasttrap_uint32_cmp(const void *ap, const void *bp) 1639 { 1640 return (*(const uint32_t *)ap - *(const uint32_t *)bp); 1641 } 1642 1643 static int 1644 fasttrap_uint64_cmp(const void *ap, const void *bp) 1645 { 1646 return (*(const uint64_t *)ap - *(const uint64_t *)bp); 1647 } 1648 1649 static int 1650 fasttrap_add_probe(fasttrap_probe_spec_t *pdata) 1651 { 1652 fasttrap_provider_t *provider; 1653 fasttrap_probe_t *pp; 1654 fasttrap_tracepoint_t *tp; 1655 char *name; 1656 int i, aframes = 0, whack; 1657 1658 /* 1659 * There needs to be at least one desired trace point. 1660 */ 1661 if (pdata->ftps_noffs == 0) 1662 return (EINVAL); 1663 1664 switch (pdata->ftps_type) { 1665 case DTFTP_ENTRY: 1666 name = "entry"; 1667 aframes = FASTTRAP_ENTRY_AFRAMES; 1668 break; 1669 case DTFTP_RETURN: 1670 name = "return"; 1671 aframes = FASTTRAP_RETURN_AFRAMES; 1672 break; 1673 case DTFTP_OFFSETS: 1674 name = NULL; 1675 break; 1676 default: 1677 return (EINVAL); 1678 } 1679 1680 if ((provider = fasttrap_provider_lookup(pdata->ftps_pid, 1681 FASTTRAP_PID_NAME, &pid_attr)) == NULL) 1682 return (ESRCH); 1683 1684 /* 1685 * Increment this reference count to indicate that a consumer is 1686 * actively adding a new probe associated with this provider. This 1687 * prevents the provider from being deleted -- we'll need to check 1688 * for pending deletions when we drop this reference count. 1689 */ 1690 provider->ftp_ccount++; 1691 mutex_exit(&provider->ftp_mtx); 1692 1693 /* 1694 * Grab the creation lock to ensure consistency between calls to 1695 * dtrace_probe_lookup() and dtrace_probe_create() in the face of 1696 * other threads creating probes. We must drop the provider lock 1697 * before taking this lock to avoid a three-way deadlock with the 1698 * DTrace framework. 1699 */ 1700 mutex_enter(&provider->ftp_cmtx); 1701 1702 if (name == NULL) { 1703 for (i = 0; i < pdata->ftps_noffs; i++) { 1704 char name_str[17]; 1705 1706 (void) sprintf(name_str, "%llx", 1707 (unsigned long long)pdata->ftps_offs[i]); 1708 1709 if (dtrace_probe_lookup(provider->ftp_provid, 1710 pdata->ftps_mod, pdata->ftps_func, name_str) != 0) 1711 continue; 1712 1713 atomic_add_32(&fasttrap_total, 1); 1714 1715 if (fasttrap_total > fasttrap_max) { 1716 atomic_add_32(&fasttrap_total, -1); 1717 goto no_mem; 1718 } 1719 1720 pp = kmem_zalloc(sizeof (fasttrap_probe_t), KM_SLEEP); 1721 1722 pp->ftp_prov = provider; 1723 pp->ftp_faddr = pdata->ftps_pc; 1724 pp->ftp_fsize = pdata->ftps_size; 1725 pp->ftp_pid = pdata->ftps_pid; 1726 pp->ftp_ntps = 1; 1727 1728 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), 1729 KM_SLEEP); 1730 1731 tp->ftt_proc = provider->ftp_proc; 1732 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc; 1733 tp->ftt_pid = pdata->ftps_pid; 1734 1735 pp->ftp_tps[0].fit_tp = tp; 1736 pp->ftp_tps[0].fit_id.fti_probe = pp; 1737 pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_type; 1738 1739 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, 1740 pdata->ftps_mod, pdata->ftps_func, name_str, 1741 FASTTRAP_OFFSET_AFRAMES, pp); 1742 } 1743 1744 } else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod, 1745 pdata->ftps_func, name) == 0) { 1746 atomic_add_32(&fasttrap_total, pdata->ftps_noffs); 1747 1748 if (fasttrap_total > fasttrap_max) { 1749 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs); 1750 goto no_mem; 1751 } 1752 1753 /* 1754 * Make sure all tracepoint program counter values are unique. 1755 * We later assume that each probe has exactly one tracepoint 1756 * for a given pc. 1757 */ 1758 qsort(pdata->ftps_offs, pdata->ftps_noffs, 1759 sizeof (uint64_t), fasttrap_uint64_cmp); 1760 for (i = 1; i < pdata->ftps_noffs; i++) { 1761 if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1]) 1762 continue; 1763 1764 atomic_add_32(&fasttrap_total, -pdata->ftps_noffs); 1765 goto no_mem; 1766 } 1767 1768 ASSERT(pdata->ftps_noffs > 0); 1769 pp = kmem_zalloc(offsetof(fasttrap_probe_t, 1770 ftp_tps[pdata->ftps_noffs]), KM_SLEEP); 1771 1772 pp->ftp_prov = provider; 1773 pp->ftp_faddr = pdata->ftps_pc; 1774 pp->ftp_fsize = pdata->ftps_size; 1775 pp->ftp_pid = pdata->ftps_pid; 1776 pp->ftp_ntps = pdata->ftps_noffs; 1777 1778 for (i = 0; i < pdata->ftps_noffs; i++) { 1779 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), 1780 KM_SLEEP); 1781 1782 tp->ftt_proc = provider->ftp_proc; 1783 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc; 1784 tp->ftt_pid = pdata->ftps_pid; 1785 1786 pp->ftp_tps[i].fit_tp = tp; 1787 pp->ftp_tps[i].fit_id.fti_probe = pp; 1788 pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_type; 1789 } 1790 1791 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, 1792 pdata->ftps_mod, pdata->ftps_func, name, aframes, pp); 1793 } 1794 1795 mutex_exit(&provider->ftp_cmtx); 1796 1797 /* 1798 * We know that the provider is still valid since we incremented the 1799 * creation reference count. If someone tried to clean up this provider 1800 * while we were using it (e.g. because the process called exec(2) or 1801 * exit(2)), take note of that and try to clean it up now. 1802 */ 1803 mutex_enter(&provider->ftp_mtx); 1804 provider->ftp_ccount--; 1805 whack = provider->ftp_retired; 1806 mutex_exit(&provider->ftp_mtx); 1807 1808 if (whack) 1809 fasttrap_pid_cleanup(); 1810 1811 return (0); 1812 1813 no_mem: 1814 /* 1815 * If we've exhausted the allowable resources, we'll try to remove 1816 * this provider to free some up. This is to cover the case where 1817 * the user has accidentally created many more probes than was 1818 * intended (e.g. pid123:::). 1819 */ 1820 mutex_exit(&provider->ftp_cmtx); 1821 mutex_enter(&provider->ftp_mtx); 1822 provider->ftp_ccount--; 1823 provider->ftp_marked = 1; 1824 mutex_exit(&provider->ftp_mtx); 1825 1826 fasttrap_pid_cleanup(); 1827 1828 return (ENOMEM); 1829 } 1830 1831 /*ARGSUSED*/ 1832 static void * 1833 fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid) 1834 { 1835 fasttrap_provider_t *provider; 1836 1837 /* 1838 * A 32-bit unsigned integer (like a pid for example) can be 1839 * expressed in 10 or fewer decimal digits. Make sure that we'll 1840 * have enough space for the provider name. 1841 */ 1842 if (strlen(dhpv->dthpv_provname) + 10 >= 1843 sizeof (provider->ftp_name)) { 1844 printf("failed to instantiate provider %s: " 1845 "name too long to accomodate pid", dhpv->dthpv_provname); 1846 return (NULL); 1847 } 1848 1849 /* 1850 * Don't let folks spoof the true pid provider. 1851 */ 1852 if (strcmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME) == 0) { 1853 printf("failed to instantiate provider %s: " 1854 "%s is an invalid name", dhpv->dthpv_provname, 1855 FASTTRAP_PID_NAME); 1856 return (NULL); 1857 } 1858 1859 /* 1860 * The highest stability class that fasttrap supports is ISA; cap 1861 * the stability of the new provider accordingly. 1862 */ 1863 if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA) 1864 dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA; 1865 if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA) 1866 dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA; 1867 if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA) 1868 dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA; 1869 if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA) 1870 dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA; 1871 if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA) 1872 dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA; 1873 1874 if ((provider = fasttrap_provider_lookup(pid, dhpv->dthpv_provname, 1875 &dhpv->dthpv_pattr)) == NULL) { 1876 printf("failed to instantiate provider %s for " 1877 "process %u", dhpv->dthpv_provname, (uint_t)pid); 1878 return (NULL); 1879 } 1880 1881 /* 1882 * Up the meta provider count so this provider isn't removed until 1883 * the meta provider has been told to remove it. 1884 */ 1885 provider->ftp_mcount++; 1886 1887 mutex_exit(&provider->ftp_mtx); 1888 1889 return (provider); 1890 } 1891 1892 /*ARGSUSED*/ 1893 static void 1894 fasttrap_meta_create_probe(void *arg, void *parg, 1895 dtrace_helper_probedesc_t *dhpb) 1896 { 1897 fasttrap_provider_t *provider = parg; 1898 fasttrap_probe_t *pp; 1899 fasttrap_tracepoint_t *tp; 1900 int i, j; 1901 uint32_t ntps; 1902 1903 /* 1904 * Since the meta provider count is non-zero we don't have to worry 1905 * about this provider disappearing. 1906 */ 1907 ASSERT(provider->ftp_mcount > 0); 1908 1909 /* 1910 * The offsets must be unique. 1911 */ 1912 qsort(dhpb->dthpb_offs, dhpb->dthpb_noffs, sizeof (uint32_t), 1913 fasttrap_uint32_cmp); 1914 for (i = 1; i < dhpb->dthpb_noffs; i++) { 1915 if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <= 1916 dhpb->dthpb_base + dhpb->dthpb_offs[i - 1]) 1917 return; 1918 } 1919 1920 qsort(dhpb->dthpb_enoffs, dhpb->dthpb_nenoffs, sizeof (uint32_t), 1921 fasttrap_uint32_cmp); 1922 for (i = 1; i < dhpb->dthpb_nenoffs; i++) { 1923 if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <= 1924 dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1]) 1925 return; 1926 } 1927 1928 /* 1929 * Grab the creation lock to ensure consistency between calls to 1930 * dtrace_probe_lookup() and dtrace_probe_create() in the face of 1931 * other threads creating probes. 1932 */ 1933 mutex_enter(&provider->ftp_cmtx); 1934 1935 if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod, 1936 dhpb->dthpb_func, dhpb->dthpb_name) != 0) { 1937 mutex_exit(&provider->ftp_cmtx); 1938 return; 1939 } 1940 1941 ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs; 1942 ASSERT(ntps > 0); 1943 1944 atomic_add_32(&fasttrap_total, ntps); 1945 1946 if (fasttrap_total > fasttrap_max) { 1947 atomic_add_32(&fasttrap_total, -ntps); 1948 mutex_exit(&provider->ftp_cmtx); 1949 return; 1950 } 1951 1952 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP); 1953 1954 pp->ftp_prov = provider; 1955 pp->ftp_pid = provider->ftp_pid; 1956 pp->ftp_ntps = ntps; 1957 pp->ftp_nargs = dhpb->dthpb_xargc; 1958 pp->ftp_xtypes = dhpb->dthpb_xtypes; 1959 pp->ftp_ntypes = dhpb->dthpb_ntypes; 1960 1961 /* 1962 * First create a tracepoint for each actual point of interest. 1963 */ 1964 for (i = 0; i < dhpb->dthpb_noffs; i++) { 1965 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP); 1966 1967 tp->ftt_proc = provider->ftp_proc; 1968 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_offs[i]; 1969 tp->ftt_pid = provider->ftp_pid; 1970 1971 pp->ftp_tps[i].fit_tp = tp; 1972 pp->ftp_tps[i].fit_id.fti_probe = pp; 1973 #ifdef __sparc 1974 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_POST_OFFSETS; 1975 #else 1976 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS; 1977 #endif 1978 } 1979 1980 /* 1981 * Then create a tracepoint for each is-enabled point. 1982 */ 1983 for (j = 0; i < ntps; i++, j++) { 1984 tp = kmem_zalloc(sizeof (fasttrap_tracepoint_t), KM_SLEEP); 1985 1986 tp->ftt_proc = provider->ftp_proc; 1987 tp->ftt_pc = dhpb->dthpb_base + dhpb->dthpb_enoffs[j]; 1988 tp->ftt_pid = provider->ftp_pid; 1989 1990 pp->ftp_tps[i].fit_tp = tp; 1991 pp->ftp_tps[i].fit_id.fti_probe = pp; 1992 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED; 1993 } 1994 1995 /* 1996 * If the arguments are shuffled around we set the argument remapping 1997 * table. Later, when the probe fires, we only remap the arguments 1998 * if the table is non-NULL. 1999 */ 2000 for (i = 0; i < dhpb->dthpb_xargc; i++) { 2001 if (dhpb->dthpb_args[i] != i) { 2002 pp->ftp_argmap = dhpb->dthpb_args; 2003 break; 2004 } 2005 } 2006 2007 /* 2008 * The probe is fully constructed -- register it with DTrace. 2009 */ 2010 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod, 2011 dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp); 2012 2013 mutex_exit(&provider->ftp_cmtx); 2014 } 2015 2016 /*ARGSUSED*/ 2017 static void 2018 fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid) 2019 { 2020 /* 2021 * Clean up the USDT provider. There may be active consumers of the 2022 * provider busy adding probes, no damage will actually befall the 2023 * provider until that count has dropped to zero. This just puts 2024 * the provider on death row. 2025 */ 2026 fasttrap_provider_retire(pid, dhpv->dthpv_provname, 1); 2027 } 2028 2029 static dtrace_mops_t fasttrap_mops = { 2030 fasttrap_meta_create_probe, 2031 fasttrap_meta_provide, 2032 fasttrap_meta_remove 2033 }; 2034 2035 /*ARGSUSED*/ 2036 static int 2037 fasttrap_open(struct cdev *dev __unused, int oflags __unused, 2038 int devtype __unused, struct thread *td __unused) 2039 { 2040 return (0); 2041 } 2042 2043 /*ARGSUSED*/ 2044 static int 2045 fasttrap_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int fflag, 2046 struct thread *td) 2047 { 2048 #ifdef notyet 2049 struct kinfo_proc kp; 2050 const cred_t *cr = td->td_ucred; 2051 #endif 2052 if (!dtrace_attached()) 2053 return (EAGAIN); 2054 2055 if (cmd == FASTTRAPIOC_MAKEPROBE) { 2056 fasttrap_probe_spec_t *uprobe = (void *)arg; 2057 fasttrap_probe_spec_t *probe; 2058 uint64_t noffs; 2059 size_t size; 2060 int ret; 2061 char *c; 2062 2063 #if defined(sun) 2064 if (copyin(&uprobe->ftps_noffs, &noffs, 2065 sizeof (uprobe->ftps_noffs))) 2066 return (EFAULT); 2067 #else 2068 noffs = uprobe->ftps_noffs; 2069 #endif 2070 2071 /* 2072 * Probes must have at least one tracepoint. 2073 */ 2074 if (noffs == 0) 2075 return (EINVAL); 2076 2077 size = sizeof (fasttrap_probe_spec_t) + 2078 sizeof (probe->ftps_offs[0]) * (noffs - 1); 2079 2080 if (size > 1024 * 1024) 2081 return (ENOMEM); 2082 2083 probe = kmem_alloc(size, KM_SLEEP); 2084 2085 #if defined(sun) 2086 if (copyin(uprobe, probe, size) != 0) { 2087 kmem_free(probe, size); 2088 return (EFAULT); 2089 } 2090 #else 2091 memcpy(probe, uprobe, sizeof(*probe)); 2092 if (noffs > 1 && copyin(uprobe + 1, probe + 1, size) != 0) { 2093 kmem_free(probe, size); 2094 return (EFAULT); 2095 } 2096 #endif 2097 2098 2099 /* 2100 * Verify that the function and module strings contain no 2101 * funny characters. 2102 */ 2103 for (c = &probe->ftps_func[0]; *c != '\0'; c++) { 2104 if (*c < 0x20 || 0x7f <= *c) { 2105 ret = EINVAL; 2106 goto err; 2107 } 2108 } 2109 2110 for (c = &probe->ftps_mod[0]; *c != '\0'; c++) { 2111 if (*c < 0x20 || 0x7f <= *c) { 2112 ret = EINVAL; 2113 goto err; 2114 } 2115 } 2116 2117 #ifdef notyet 2118 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) { 2119 proc_t *p; 2120 pid_t pid = probe->ftps_pid; 2121 2122 #if defined(sun) 2123 mutex_enter(&pidlock); 2124 #endif 2125 /* 2126 * Report an error if the process doesn't exist 2127 * or is actively being birthed. 2128 */ 2129 p = pfind(pid); 2130 if (p) 2131 fill_kinfo_proc(p, &kp); 2132 if (p == NULL || kp.ki_stat == SIDL) { 2133 #if defined(sun) 2134 mutex_exit(&pidlock); 2135 #endif 2136 return (ESRCH); 2137 } 2138 #if defined(sun) 2139 mutex_enter(&p->p_lock); 2140 mutex_exit(&pidlock); 2141 #else 2142 PROC_LOCK_ASSERT(p, MA_OWNED); 2143 #endif 2144 2145 #ifdef notyet 2146 if ((ret = priv_proc_cred_perm(cr, p, NULL, 2147 VREAD | VWRITE)) != 0) { 2148 #if defined(sun) 2149 mutex_exit(&p->p_lock); 2150 #else 2151 PROC_UNLOCK(p); 2152 #endif 2153 return (ret); 2154 } 2155 #endif /* notyet */ 2156 #if defined(sun) 2157 mutex_exit(&p->p_lock); 2158 #else 2159 PROC_UNLOCK(p); 2160 #endif 2161 } 2162 #endif /* notyet */ 2163 2164 ret = fasttrap_add_probe(probe); 2165 err: 2166 kmem_free(probe, size); 2167 2168 return (ret); 2169 2170 } else if (cmd == FASTTRAPIOC_GETINSTR) { 2171 fasttrap_instr_query_t instr; 2172 fasttrap_tracepoint_t *tp; 2173 uint_t index; 2174 #if defined(sun) 2175 int ret; 2176 #endif 2177 2178 #if defined(sun) 2179 if (copyin((void *)arg, &instr, sizeof (instr)) != 0) 2180 return (EFAULT); 2181 #endif 2182 2183 #ifdef notyet 2184 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) { 2185 proc_t *p; 2186 pid_t pid = instr.ftiq_pid; 2187 2188 #if defined(sun) 2189 mutex_enter(&pidlock); 2190 #endif 2191 /* 2192 * Report an error if the process doesn't exist 2193 * or is actively being birthed. 2194 */ 2195 p = pfind(pid); 2196 if (p) 2197 fill_kinfo_proc(p, &kp); 2198 if (p == NULL || kp.ki_stat == SIDL) { 2199 #if defined(sun) 2200 mutex_exit(&pidlock); 2201 #endif 2202 return (ESRCH); 2203 } 2204 #if defined(sun) 2205 mutex_enter(&p->p_lock); 2206 mutex_exit(&pidlock); 2207 #else 2208 PROC_LOCK_ASSERT(p, MA_OWNED); 2209 #endif 2210 2211 #ifdef notyet 2212 if ((ret = priv_proc_cred_perm(cr, p, NULL, 2213 VREAD)) != 0) { 2214 #if defined(sun) 2215 mutex_exit(&p->p_lock); 2216 #else 2217 PROC_UNLOCK(p); 2218 #endif 2219 return (ret); 2220 } 2221 #endif /* notyet */ 2222 2223 #if defined(sun) 2224 mutex_exit(&p->p_lock); 2225 #else 2226 PROC_UNLOCK(p); 2227 #endif 2228 } 2229 #endif /* notyet */ 2230 2231 index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc); 2232 2233 mutex_enter(&fasttrap_tpoints.fth_table[index].ftb_mtx); 2234 tp = fasttrap_tpoints.fth_table[index].ftb_data; 2235 while (tp != NULL) { 2236 if (instr.ftiq_pid == tp->ftt_pid && 2237 instr.ftiq_pc == tp->ftt_pc && 2238 tp->ftt_proc->ftpc_acount != 0) 2239 break; 2240 2241 tp = tp->ftt_next; 2242 } 2243 2244 if (tp == NULL) { 2245 mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx); 2246 return (ENOENT); 2247 } 2248 2249 bcopy(&tp->ftt_instr, &instr.ftiq_instr, 2250 sizeof (instr.ftiq_instr)); 2251 mutex_exit(&fasttrap_tpoints.fth_table[index].ftb_mtx); 2252 2253 if (copyout(&instr, (void *)arg, sizeof (instr)) != 0) 2254 return (EFAULT); 2255 2256 return (0); 2257 } 2258 2259 return (EINVAL); 2260 } 2261 2262 static int 2263 fasttrap_load(void) 2264 { 2265 ulong_t nent; 2266 int i; 2267 2268 /* Create the /dev/dtrace/fasttrap entry. */ 2269 fasttrap_cdev = make_dev(&fasttrap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, 2270 "dtrace/fasttrap"); 2271 2272 mtx_init(&fasttrap_cleanup_mtx, "fasttrap clean", "dtrace", MTX_DEF); 2273 callout_init_mtx(&fasttrap_timeout, &fasttrap_cleanup_mtx, 0); 2274 mutex_init(&fasttrap_count_mtx, "fasttrap count mtx", MUTEX_DEFAULT, 2275 NULL); 2276 2277 /* 2278 * Install our hooks into fork(2), exec(2), and exit(2). 2279 */ 2280 dtrace_fasttrap_fork = &fasttrap_fork; 2281 dtrace_fasttrap_exit = &fasttrap_exec_exit; 2282 dtrace_fasttrap_exec = &fasttrap_exec_exit; 2283 2284 #if defined(sun) 2285 fasttrap_max = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 2286 "fasttrap-max-probes", FASTTRAP_MAX_DEFAULT); 2287 #else 2288 fasttrap_max = FASTTRAP_MAX_DEFAULT; 2289 #endif 2290 fasttrap_total = 0; 2291 2292 /* 2293 * Conjure up the tracepoints hashtable... 2294 */ 2295 #if defined(sun) 2296 nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS, 2297 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE); 2298 #else 2299 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE; 2300 #endif 2301 2302 if (nent == 0 || nent > 0x1000000) 2303 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE; 2304 2305 if ((nent & (nent - 1)) == 0) 2306 fasttrap_tpoints.fth_nent = nent; 2307 else 2308 fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent); 2309 ASSERT(fasttrap_tpoints.fth_nent > 0); 2310 fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1; 2311 fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent * 2312 sizeof (fasttrap_bucket_t), KM_SLEEP); 2313 #if !defined(sun) 2314 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) 2315 mutex_init(&fasttrap_tpoints.fth_table[i].ftb_mtx, 2316 "tracepoints bucket mtx", MUTEX_DEFAULT, NULL); 2317 #endif 2318 2319 /* 2320 * ... and the providers hash table... 2321 */ 2322 nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE; 2323 if ((nent & (nent - 1)) == 0) 2324 fasttrap_provs.fth_nent = nent; 2325 else 2326 fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent); 2327 ASSERT(fasttrap_provs.fth_nent > 0); 2328 fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1; 2329 fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent * 2330 sizeof (fasttrap_bucket_t), KM_SLEEP); 2331 #if !defined(sun) 2332 for (i = 0; i < fasttrap_provs.fth_nent; i++) 2333 mutex_init(&fasttrap_provs.fth_table[i].ftb_mtx, 2334 "providers bucket mtx", MUTEX_DEFAULT, NULL); 2335 #endif 2336 2337 /* 2338 * ... and the procs hash table. 2339 */ 2340 nent = FASTTRAP_PROCS_DEFAULT_SIZE; 2341 if ((nent & (nent - 1)) == 0) 2342 fasttrap_procs.fth_nent = nent; 2343 else 2344 fasttrap_procs.fth_nent = 1 << fasttrap_highbit(nent); 2345 ASSERT(fasttrap_procs.fth_nent > 0); 2346 fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1; 2347 fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent * 2348 sizeof (fasttrap_bucket_t), KM_SLEEP); 2349 #if !defined(sun) 2350 for (i = 0; i < fasttrap_procs.fth_nent; i++) 2351 mutex_init(&fasttrap_procs.fth_table[i].ftb_mtx, 2352 "processes bucket mtx", MUTEX_DEFAULT, NULL); 2353 2354 CPU_FOREACH(i) { 2355 mutex_init(&fasttrap_cpuc_pid_lock[i], "fasttrap barrier", 2356 MUTEX_DEFAULT, NULL); 2357 } 2358 #endif 2359 2360 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL, 2361 &fasttrap_meta_id); 2362 2363 return (0); 2364 } 2365 2366 static int 2367 fasttrap_unload(void) 2368 { 2369 int i, fail = 0; 2370 2371 /* 2372 * Unregister the meta-provider to make sure no new fasttrap- 2373 * managed providers come along while we're trying to close up 2374 * shop. If we fail to detach, we'll need to re-register as a 2375 * meta-provider. We can fail to unregister as a meta-provider 2376 * if providers we manage still exist. 2377 */ 2378 if (fasttrap_meta_id != DTRACE_METAPROVNONE && 2379 dtrace_meta_unregister(fasttrap_meta_id) != 0) 2380 return (-1); 2381 2382 /* 2383 * Prevent any new timeouts from running by setting fasttrap_timeout 2384 * to a non-zero value, and wait for the current timeout to complete. 2385 */ 2386 mtx_lock(&fasttrap_cleanup_mtx); 2387 fasttrap_cleanup_work = 0; 2388 callout_drain(&fasttrap_timeout); 2389 mtx_unlock(&fasttrap_cleanup_mtx); 2390 2391 /* 2392 * Iterate over all of our providers. If there's still a process 2393 * that corresponds to that pid, fail to detach. 2394 */ 2395 for (i = 0; i < fasttrap_provs.fth_nent; i++) { 2396 fasttrap_provider_t **fpp, *fp; 2397 fasttrap_bucket_t *bucket = &fasttrap_provs.fth_table[i]; 2398 2399 mutex_enter(&bucket->ftb_mtx); 2400 fpp = (fasttrap_provider_t **)&bucket->ftb_data; 2401 while ((fp = *fpp) != NULL) { 2402 /* 2403 * Acquire and release the lock as a simple way of 2404 * waiting for any other consumer to finish with 2405 * this provider. A thread must first acquire the 2406 * bucket lock so there's no chance of another thread 2407 * blocking on the provider's lock. 2408 */ 2409 mutex_enter(&fp->ftp_mtx); 2410 mutex_exit(&fp->ftp_mtx); 2411 2412 if (dtrace_unregister(fp->ftp_provid) != 0) { 2413 fail = 1; 2414 fpp = &fp->ftp_next; 2415 } else { 2416 *fpp = fp->ftp_next; 2417 fasttrap_provider_free(fp); 2418 } 2419 } 2420 2421 mutex_exit(&bucket->ftb_mtx); 2422 } 2423 2424 if (fail) { 2425 uint_t work; 2426 /* 2427 * If we're failing to detach, we need to unblock timeouts 2428 * and start a new timeout if any work has accumulated while 2429 * we've been unsuccessfully trying to detach. 2430 */ 2431 mtx_lock(&fasttrap_cleanup_mtx); 2432 work = fasttrap_cleanup_work; 2433 callout_drain(&fasttrap_timeout); 2434 mtx_unlock(&fasttrap_cleanup_mtx); 2435 2436 if (work) 2437 fasttrap_pid_cleanup(); 2438 2439 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL, 2440 &fasttrap_meta_id); 2441 2442 return (-1); 2443 } 2444 2445 #ifdef DEBUG 2446 mutex_enter(&fasttrap_count_mtx); 2447 ASSERT(fasttrap_pid_count == 0); 2448 mutex_exit(&fasttrap_count_mtx); 2449 #endif 2450 2451 kmem_free(fasttrap_tpoints.fth_table, 2452 fasttrap_tpoints.fth_nent * sizeof (fasttrap_bucket_t)); 2453 fasttrap_tpoints.fth_nent = 0; 2454 2455 kmem_free(fasttrap_provs.fth_table, 2456 fasttrap_provs.fth_nent * sizeof (fasttrap_bucket_t)); 2457 fasttrap_provs.fth_nent = 0; 2458 2459 kmem_free(fasttrap_procs.fth_table, 2460 fasttrap_procs.fth_nent * sizeof (fasttrap_bucket_t)); 2461 fasttrap_procs.fth_nent = 0; 2462 2463 /* 2464 * We know there are no tracepoints in any process anywhere in 2465 * the system so there is no process which has its p_dtrace_count 2466 * greater than zero, therefore we know that no thread can actively 2467 * be executing code in fasttrap_fork(). Similarly for p_dtrace_probes 2468 * and fasttrap_exec() and fasttrap_exit(). 2469 */ 2470 ASSERT(dtrace_fasttrap_fork == &fasttrap_fork); 2471 dtrace_fasttrap_fork = NULL; 2472 2473 ASSERT(dtrace_fasttrap_exec == &fasttrap_exec_exit); 2474 dtrace_fasttrap_exec = NULL; 2475 2476 ASSERT(dtrace_fasttrap_exit == &fasttrap_exec_exit); 2477 dtrace_fasttrap_exit = NULL; 2478 2479 #if !defined(sun) 2480 destroy_dev(fasttrap_cdev); 2481 mutex_destroy(&fasttrap_count_mtx); 2482 CPU_FOREACH(i) { 2483 mutex_destroy(&fasttrap_cpuc_pid_lock[i]); 2484 } 2485 #endif 2486 2487 return (0); 2488 } 2489 2490 /* ARGSUSED */ 2491 static int 2492 fasttrap_modevent(module_t mod __unused, int type, void *data __unused) 2493 { 2494 int error = 0; 2495 2496 switch (type) { 2497 case MOD_LOAD: 2498 break; 2499 2500 case MOD_UNLOAD: 2501 break; 2502 2503 case MOD_SHUTDOWN: 2504 break; 2505 2506 default: 2507 error = EOPNOTSUPP; 2508 break; 2509 } 2510 return (error); 2511 } 2512 2513 SYSINIT(fasttrap_load, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, fasttrap_load, 2514 NULL); 2515 SYSUNINIT(fasttrap_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, 2516 fasttrap_unload, NULL); 2517 2518 DEV_MODULE(fasttrap, fasttrap_modevent, NULL); 2519 MODULE_VERSION(fasttrap, 1); 2520 MODULE_DEPEND(fasttrap, dtrace, 1, 1, 1); 2521 MODULE_DEPEND(fasttrap, opensolaris, 1, 1, 1); 2522