1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * Portions Copyright 2006-2008 John Birrell jb@freebsd.org 22 * 23 * $FreeBSD$ 24 * 25 */ 26 27 /* 28 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 29 * Use is subject to license terms. 30 */ 31 32 #include <sys/cdefs.h> 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/conf.h> 36 #include <sys/cpuvar.h> 37 #include <sys/fcntl.h> 38 #include <sys/filio.h> 39 #include <sys/kdb.h> 40 #include <sys/kernel.h> 41 #include <sys/kmem.h> 42 #include <sys/kthread.h> 43 #include <sys/limits.h> 44 #include <sys/linker.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/module.h> 48 #include <sys/mutex.h> 49 #include <sys/pcpu.h> 50 #include <sys/poll.h> 51 #include <sys/proc.h> 52 #include <sys/selinfo.h> 53 #include <sys/smp.h> 54 #include <sys/syscall.h> 55 #include <sys/sysent.h> 56 #include <sys/sysproto.h> 57 #include <sys/uio.h> 58 #include <sys/unistd.h> 59 #include <machine/stdarg.h> 60 61 #include <sys/dtrace.h> 62 #include <sys/dtrace_bsd.h> 63 64 static MALLOC_DEFINE(M_FBT, "fbt", "Function Boundary Tracing"); 65 66 #define FBT_PUSHL_EBP 0x55 67 #define FBT_MOVL_ESP_EBP0_V0 0x8b 68 #define FBT_MOVL_ESP_EBP1_V0 0xec 69 #define FBT_MOVL_ESP_EBP0_V1 0x89 70 #define FBT_MOVL_ESP_EBP1_V1 0xe5 71 #define FBT_REX_RSP_RBP 0x48 72 73 #define FBT_POPL_EBP 0x5d 74 #define FBT_RET 0xc3 75 #define FBT_RET_IMM16 0xc2 76 #define FBT_LEAVE 0xc9 77 78 #ifdef __amd64__ 79 #define FBT_PATCHVAL 0xcc 80 #else 81 #define FBT_PATCHVAL 0xf0 82 #endif 83 84 static d_open_t fbt_open; 85 static int fbt_unload(void); 86 static void fbt_getargdesc(void *, dtrace_id_t, void *, dtrace_argdesc_t *); 87 static void fbt_provide_module(void *, modctl_t *); 88 static void fbt_destroy(void *, dtrace_id_t, void *); 89 static void fbt_enable(void *, dtrace_id_t, void *); 90 static void fbt_disable(void *, dtrace_id_t, void *); 91 static void fbt_load(void *); 92 static void fbt_suspend(void *, dtrace_id_t, void *); 93 static void fbt_resume(void *, dtrace_id_t, void *); 94 95 #define FBT_ENTRY "entry" 96 #define FBT_RETURN "return" 97 #define FBT_ADDR2NDX(addr) ((((uintptr_t)(addr)) >> 4) & fbt_probetab_mask) 98 #define FBT_PROBETAB_SIZE 0x8000 /* 32k entries -- 128K total */ 99 100 static struct cdevsw fbt_cdevsw = { 101 .d_version = D_VERSION, 102 .d_open = fbt_open, 103 .d_name = "fbt", 104 }; 105 106 static dtrace_pattr_t fbt_attr = { 107 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, 108 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 109 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, 110 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, 111 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, 112 }; 113 114 static dtrace_pops_t fbt_pops = { 115 NULL, 116 fbt_provide_module, 117 fbt_enable, 118 fbt_disable, 119 fbt_suspend, 120 fbt_resume, 121 fbt_getargdesc, 122 NULL, 123 NULL, 124 fbt_destroy 125 }; 126 127 typedef struct fbt_probe { 128 struct fbt_probe *fbtp_hashnext; 129 uint8_t *fbtp_patchpoint; 130 int8_t fbtp_rval; 131 uint8_t fbtp_patchval; 132 uint8_t fbtp_savedval; 133 uintptr_t fbtp_roffset; 134 dtrace_id_t fbtp_id; 135 const char *fbtp_name; 136 modctl_t *fbtp_ctl; 137 int fbtp_loadcnt; 138 int fbtp_primary; 139 int fbtp_invop_cnt; 140 int fbtp_symindx; 141 struct fbt_probe *fbtp_next; 142 } fbt_probe_t; 143 144 static struct cdev *fbt_cdev; 145 static dtrace_provider_id_t fbt_id; 146 static fbt_probe_t **fbt_probetab; 147 static int fbt_probetab_size; 148 static int fbt_probetab_mask; 149 static int fbt_verbose = 0; 150 151 static void 152 fbt_doubletrap(void) 153 { 154 fbt_probe_t *fbt; 155 int i; 156 157 for (i = 0; i < fbt_probetab_size; i++) { 158 fbt = fbt_probetab[i]; 159 160 for (; fbt != NULL; fbt = fbt->fbtp_next) 161 *fbt->fbtp_patchpoint = fbt->fbtp_savedval; 162 } 163 } 164 165 static int 166 fbt_invop(uintptr_t addr, uintptr_t *stack, uintptr_t rval) 167 { 168 solaris_cpu_t *cpu = &solaris_cpu[curcpu]; 169 uintptr_t stack0, stack1, stack2, stack3, stack4; 170 fbt_probe_t *fbt = fbt_probetab[FBT_ADDR2NDX(addr)]; 171 172 for (; fbt != NULL; fbt = fbt->fbtp_hashnext) { 173 if ((uintptr_t)fbt->fbtp_patchpoint == addr) { 174 fbt->fbtp_invop_cnt++; 175 if (fbt->fbtp_roffset == 0) { 176 int i = 0; 177 /* 178 * When accessing the arguments on the stack, 179 * we must protect against accessing beyond 180 * the stack. We can safely set NOFAULT here 181 * -- we know that interrupts are already 182 * disabled. 183 */ 184 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 185 cpu->cpu_dtrace_caller = stack[i++]; 186 stack0 = stack[i++]; 187 stack1 = stack[i++]; 188 stack2 = stack[i++]; 189 stack3 = stack[i++]; 190 stack4 = stack[i++]; 191 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | 192 CPU_DTRACE_BADADDR); 193 194 dtrace_probe(fbt->fbtp_id, stack0, stack1, 195 stack2, stack3, stack4); 196 197 cpu->cpu_dtrace_caller = 0; 198 } else { 199 #ifdef __amd64__ 200 /* 201 * On amd64, we instrument the ret, not the 202 * leave. We therefore need to set the caller 203 * to assure that the top frame of a stack() 204 * action is correct. 205 */ 206 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 207 cpu->cpu_dtrace_caller = stack[0]; 208 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | 209 CPU_DTRACE_BADADDR); 210 #endif 211 212 dtrace_probe(fbt->fbtp_id, fbt->fbtp_roffset, 213 rval, 0, 0, 0); 214 cpu->cpu_dtrace_caller = 0; 215 } 216 217 return (fbt->fbtp_rval); 218 } 219 } 220 221 return (0); 222 } 223 224 static int 225 fbt_provide_module_function(linker_file_t lf, int symindx, 226 linker_symval_t *symval, void *opaque) 227 { 228 char *modname = opaque; 229 const char *name = symval->name; 230 fbt_probe_t *fbt, *retfbt; 231 int j; 232 int size; 233 u_int8_t *instr, *limit; 234 235 if (strncmp(name, "dtrace_", 7) == 0 && 236 strncmp(name, "dtrace_safe_", 12) != 0) { 237 /* 238 * Anything beginning with "dtrace_" may be called 239 * from probe context unless it explicitly indicates 240 * that it won't be called from probe context by 241 * using the prefix "dtrace_safe_". 242 */ 243 return (0); 244 } 245 246 if (name[0] == '_' && name[1] == '_') 247 return (0); 248 249 size = symval->size; 250 251 instr = (u_int8_t *) symval->value; 252 limit = (u_int8_t *) symval->value + symval->size; 253 254 #ifdef __amd64__ 255 while (instr < limit) { 256 if (*instr == FBT_PUSHL_EBP) 257 break; 258 259 if ((size = dtrace_instr_size(instr)) <= 0) 260 break; 261 262 instr += size; 263 } 264 265 if (instr >= limit || *instr != FBT_PUSHL_EBP) { 266 /* 267 * We either don't save the frame pointer in this 268 * function, or we ran into some disassembly 269 * screw-up. Either way, we bail. 270 */ 271 return (0); 272 } 273 #else 274 if (instr[0] != FBT_PUSHL_EBP) 275 return (0); 276 277 if (!(instr[1] == FBT_MOVL_ESP_EBP0_V0 && 278 instr[2] == FBT_MOVL_ESP_EBP1_V0) && 279 !(instr[1] == FBT_MOVL_ESP_EBP0_V1 && 280 instr[2] == FBT_MOVL_ESP_EBP1_V1)) 281 return (0); 282 #endif 283 284 fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO); 285 fbt->fbtp_name = name; 286 fbt->fbtp_id = dtrace_probe_create(fbt_id, modname, 287 name, FBT_ENTRY, 3, fbt); 288 fbt->fbtp_patchpoint = instr; 289 fbt->fbtp_ctl = lf; 290 fbt->fbtp_loadcnt = lf->loadcnt; 291 fbt->fbtp_rval = DTRACE_INVOP_PUSHL_EBP; 292 fbt->fbtp_savedval = *instr; 293 fbt->fbtp_patchval = FBT_PATCHVAL; 294 fbt->fbtp_symindx = symindx; 295 296 fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; 297 fbt_probetab[FBT_ADDR2NDX(instr)] = fbt; 298 299 lf->fbt_nentries++; 300 301 retfbt = NULL; 302 again: 303 if (instr >= limit) 304 return (0); 305 306 /* 307 * If this disassembly fails, then we've likely walked off into 308 * a jump table or some other unsuitable area. Bail out of the 309 * disassembly now. 310 */ 311 if ((size = dtrace_instr_size(instr)) <= 0) 312 return (0); 313 314 #ifdef __amd64__ 315 /* 316 * We only instrument "ret" on amd64 -- we don't yet instrument 317 * ret imm16, largely because the compiler doesn't seem to 318 * (yet) emit them in the kernel... 319 */ 320 if (*instr != FBT_RET) { 321 instr += size; 322 goto again; 323 } 324 #else 325 if (!(size == 1 && 326 (*instr == FBT_POPL_EBP || *instr == FBT_LEAVE) && 327 (*(instr + 1) == FBT_RET || 328 *(instr + 1) == FBT_RET_IMM16))) { 329 instr += size; 330 goto again; 331 } 332 #endif 333 334 /* 335 * We (desperately) want to avoid erroneously instrumenting a 336 * jump table, especially given that our markers are pretty 337 * short: two bytes on x86, and just one byte on amd64. To 338 * determine if we're looking at a true instruction sequence 339 * or an inline jump table that happens to contain the same 340 * byte sequences, we resort to some heuristic sleeze: we 341 * treat this instruction as being contained within a pointer, 342 * and see if that pointer points to within the body of the 343 * function. If it does, we refuse to instrument it. 344 */ 345 for (j = 0; j < sizeof (uintptr_t); j++) { 346 caddr_t check = (caddr_t) instr - j; 347 uint8_t *ptr; 348 349 if (check < symval->value) 350 break; 351 352 if (check + sizeof (caddr_t) > (caddr_t)limit) 353 continue; 354 355 ptr = *(uint8_t **)check; 356 357 if (ptr >= (uint8_t *) symval->value && ptr < limit) { 358 instr += size; 359 goto again; 360 } 361 } 362 363 /* 364 * We have a winner! 365 */ 366 fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO); 367 fbt->fbtp_name = name; 368 369 if (retfbt == NULL) { 370 fbt->fbtp_id = dtrace_probe_create(fbt_id, modname, 371 name, FBT_RETURN, 3, fbt); 372 } else { 373 retfbt->fbtp_next = fbt; 374 fbt->fbtp_id = retfbt->fbtp_id; 375 } 376 377 retfbt = fbt; 378 fbt->fbtp_patchpoint = instr; 379 fbt->fbtp_ctl = lf; 380 fbt->fbtp_loadcnt = lf->loadcnt; 381 fbt->fbtp_symindx = symindx; 382 383 #ifndef __amd64__ 384 if (*instr == FBT_POPL_EBP) { 385 fbt->fbtp_rval = DTRACE_INVOP_POPL_EBP; 386 } else { 387 ASSERT(*instr == FBT_LEAVE); 388 fbt->fbtp_rval = DTRACE_INVOP_LEAVE; 389 } 390 fbt->fbtp_roffset = 391 (uintptr_t)(instr - (uint8_t *) symval->value) + 1; 392 393 #else 394 ASSERT(*instr == FBT_RET); 395 fbt->fbtp_rval = DTRACE_INVOP_RET; 396 fbt->fbtp_roffset = 397 (uintptr_t)(instr - (uint8_t *) symval->value); 398 #endif 399 400 fbt->fbtp_savedval = *instr; 401 fbt->fbtp_patchval = FBT_PATCHVAL; 402 fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; 403 fbt_probetab[FBT_ADDR2NDX(instr)] = fbt; 404 405 lf->fbt_nentries++; 406 407 instr += size; 408 goto again; 409 } 410 411 static void 412 fbt_provide_module(void *arg, modctl_t *lf) 413 { 414 char modname[MAXPATHLEN]; 415 int i; 416 size_t len; 417 418 strlcpy(modname, lf->filename, sizeof(modname)); 419 len = strlen(modname); 420 if (len > 3 && strcmp(modname + len - 3, ".ko") == 0) 421 modname[len - 3] = '\0'; 422 423 /* 424 * Employees of dtrace and their families are ineligible. Void 425 * where prohibited. 426 */ 427 if (strcmp(modname, "dtrace") == 0) 428 return; 429 430 /* 431 * The cyclic timer subsystem can be built as a module and DTrace 432 * depends on that, so it is ineligible too. 433 */ 434 if (strcmp(modname, "cyclic") == 0) 435 return; 436 437 /* 438 * To register with DTrace, a module must list 'dtrace' as a 439 * dependency in order for the kernel linker to resolve 440 * symbols like dtrace_register(). All modules with such a 441 * dependency are ineligible for FBT tracing. 442 */ 443 for (i = 0; i < lf->ndeps; i++) 444 if (strncmp(lf->deps[i]->filename, "dtrace", 6) == 0) 445 return; 446 447 if (lf->fbt_nentries) { 448 /* 449 * This module has some FBT entries allocated; we're afraid 450 * to screw with it. 451 */ 452 return; 453 } 454 455 /* 456 * List the functions in the module and the symbol values. 457 */ 458 (void) linker_file_function_listall(lf, fbt_provide_module_function, modname); 459 } 460 461 static void 462 fbt_destroy(void *arg, dtrace_id_t id, void *parg) 463 { 464 fbt_probe_t *fbt = parg, *next, *hash, *last; 465 modctl_t *ctl; 466 int ndx; 467 468 do { 469 ctl = fbt->fbtp_ctl; 470 471 ctl->fbt_nentries--; 472 473 /* 474 * Now we need to remove this probe from the fbt_probetab. 475 */ 476 ndx = FBT_ADDR2NDX(fbt->fbtp_patchpoint); 477 last = NULL; 478 hash = fbt_probetab[ndx]; 479 480 while (hash != fbt) { 481 ASSERT(hash != NULL); 482 last = hash; 483 hash = hash->fbtp_hashnext; 484 } 485 486 if (last != NULL) { 487 last->fbtp_hashnext = fbt->fbtp_hashnext; 488 } else { 489 fbt_probetab[ndx] = fbt->fbtp_hashnext; 490 } 491 492 next = fbt->fbtp_next; 493 free(fbt, M_FBT); 494 495 fbt = next; 496 } while (fbt != NULL); 497 } 498 499 static void 500 fbt_enable(void *arg, dtrace_id_t id, void *parg) 501 { 502 fbt_probe_t *fbt = parg; 503 modctl_t *ctl = fbt->fbtp_ctl; 504 505 ctl->nenabled++; 506 507 /* 508 * Now check that our modctl has the expected load count. If it 509 * doesn't, this module must have been unloaded and reloaded -- and 510 * we're not going to touch it. 511 */ 512 if (ctl->loadcnt != fbt->fbtp_loadcnt) { 513 if (fbt_verbose) { 514 printf("fbt is failing for probe %s " 515 "(module %s reloaded)", 516 fbt->fbtp_name, ctl->filename); 517 } 518 519 return; 520 } 521 522 for (; fbt != NULL; fbt = fbt->fbtp_next) { 523 *fbt->fbtp_patchpoint = fbt->fbtp_patchval; 524 } 525 } 526 527 static void 528 fbt_disable(void *arg, dtrace_id_t id, void *parg) 529 { 530 fbt_probe_t *fbt = parg; 531 modctl_t *ctl = fbt->fbtp_ctl; 532 533 ASSERT(ctl->nenabled > 0); 534 ctl->nenabled--; 535 536 if ((ctl->loadcnt != fbt->fbtp_loadcnt)) 537 return; 538 539 for (; fbt != NULL; fbt = fbt->fbtp_next) 540 *fbt->fbtp_patchpoint = fbt->fbtp_savedval; 541 } 542 543 static void 544 fbt_suspend(void *arg, dtrace_id_t id, void *parg) 545 { 546 fbt_probe_t *fbt = parg; 547 modctl_t *ctl = fbt->fbtp_ctl; 548 549 ASSERT(ctl->nenabled > 0); 550 551 if ((ctl->loadcnt != fbt->fbtp_loadcnt)) 552 return; 553 554 for (; fbt != NULL; fbt = fbt->fbtp_next) 555 *fbt->fbtp_patchpoint = fbt->fbtp_savedval; 556 } 557 558 static void 559 fbt_resume(void *arg, dtrace_id_t id, void *parg) 560 { 561 fbt_probe_t *fbt = parg; 562 modctl_t *ctl = fbt->fbtp_ctl; 563 564 ASSERT(ctl->nenabled > 0); 565 566 if ((ctl->loadcnt != fbt->fbtp_loadcnt)) 567 return; 568 569 for (; fbt != NULL; fbt = fbt->fbtp_next) 570 *fbt->fbtp_patchpoint = fbt->fbtp_patchval; 571 } 572 573 static int 574 fbt_ctfoff_init(modctl_t *lf, linker_ctf_t *lc) 575 { 576 const Elf_Sym *symp = lc->symtab;; 577 const char *name; 578 const ctf_header_t *hp = (const ctf_header_t *) lc->ctftab; 579 const uint8_t *ctfdata = lc->ctftab + sizeof(ctf_header_t); 580 int i; 581 uint32_t *ctfoff; 582 uint32_t objtoff = hp->cth_objtoff; 583 uint32_t funcoff = hp->cth_funcoff; 584 ushort_t info; 585 ushort_t vlen; 586 587 /* Sanity check. */ 588 if (hp->cth_magic != CTF_MAGIC) { 589 printf("Bad magic value in CTF data of '%s'\n",lf->pathname); 590 return (EINVAL); 591 } 592 593 if (lc->symtab == NULL) { 594 printf("No symbol table in '%s'\n",lf->pathname); 595 return (EINVAL); 596 } 597 598 if ((ctfoff = malloc(sizeof(uint32_t) * lc->nsym, M_LINKER, M_WAITOK)) == NULL) 599 return (ENOMEM); 600 601 *lc->ctfoffp = ctfoff; 602 603 for (i = 0; i < lc->nsym; i++, ctfoff++, symp++) { 604 if (symp->st_name == 0 || symp->st_shndx == SHN_UNDEF) { 605 *ctfoff = 0xffffffff; 606 continue; 607 } 608 609 if (symp->st_name < lc->strcnt) 610 name = lc->strtab + symp->st_name; 611 else 612 name = "(?)"; 613 614 switch (ELF_ST_TYPE(symp->st_info)) { 615 case STT_OBJECT: 616 if (objtoff >= hp->cth_funcoff || 617 (symp->st_shndx == SHN_ABS && symp->st_value == 0)) { 618 *ctfoff = 0xffffffff; 619 break; 620 } 621 622 *ctfoff = objtoff; 623 objtoff += sizeof (ushort_t); 624 break; 625 626 case STT_FUNC: 627 if (funcoff >= hp->cth_typeoff) { 628 *ctfoff = 0xffffffff; 629 break; 630 } 631 632 *ctfoff = funcoff; 633 634 info = *((const ushort_t *)(ctfdata + funcoff)); 635 vlen = CTF_INFO_VLEN(info); 636 637 /* 638 * If we encounter a zero pad at the end, just skip it. 639 * Otherwise skip over the function and its return type 640 * (+2) and the argument list (vlen). 641 */ 642 if (CTF_INFO_KIND(info) == CTF_K_UNKNOWN && vlen == 0) 643 funcoff += sizeof (ushort_t); /* skip pad */ 644 else 645 funcoff += sizeof (ushort_t) * (vlen + 2); 646 break; 647 648 default: 649 *ctfoff = 0xffffffff; 650 break; 651 } 652 } 653 654 return (0); 655 } 656 657 static ssize_t 658 fbt_get_ctt_size(uint8_t version, const ctf_type_t *tp, ssize_t *sizep, 659 ssize_t *incrementp) 660 { 661 ssize_t size, increment; 662 663 if (version > CTF_VERSION_1 && 664 tp->ctt_size == CTF_LSIZE_SENT) { 665 size = CTF_TYPE_LSIZE(tp); 666 increment = sizeof (ctf_type_t); 667 } else { 668 size = tp->ctt_size; 669 increment = sizeof (ctf_stype_t); 670 } 671 672 if (sizep) 673 *sizep = size; 674 if (incrementp) 675 *incrementp = increment; 676 677 return (size); 678 } 679 680 static int 681 fbt_typoff_init(linker_ctf_t *lc) 682 { 683 const ctf_header_t *hp = (const ctf_header_t *) lc->ctftab; 684 const ctf_type_t *tbuf; 685 const ctf_type_t *tend; 686 const ctf_type_t *tp; 687 const uint8_t *ctfdata = lc->ctftab + sizeof(ctf_header_t); 688 int ctf_typemax = 0; 689 uint32_t *xp; 690 ulong_t pop[CTF_K_MAX + 1] = { 0 }; 691 692 693 /* Sanity check. */ 694 if (hp->cth_magic != CTF_MAGIC) 695 return (EINVAL); 696 697 tbuf = (const ctf_type_t *) (ctfdata + hp->cth_typeoff); 698 tend = (const ctf_type_t *) (ctfdata + hp->cth_stroff); 699 700 int child = hp->cth_parname != 0; 701 702 /* 703 * We make two passes through the entire type section. In this first 704 * pass, we count the number of each type and the total number of types. 705 */ 706 for (tp = tbuf; tp < tend; ctf_typemax++) { 707 ushort_t kind = CTF_INFO_KIND(tp->ctt_info); 708 ulong_t vlen = CTF_INFO_VLEN(tp->ctt_info); 709 ssize_t size, increment; 710 711 size_t vbytes; 712 uint_t n; 713 714 (void) fbt_get_ctt_size(hp->cth_version, tp, &size, &increment); 715 716 switch (kind) { 717 case CTF_K_INTEGER: 718 case CTF_K_FLOAT: 719 vbytes = sizeof (uint_t); 720 break; 721 case CTF_K_ARRAY: 722 vbytes = sizeof (ctf_array_t); 723 break; 724 case CTF_K_FUNCTION: 725 vbytes = sizeof (ushort_t) * (vlen + (vlen & 1)); 726 break; 727 case CTF_K_STRUCT: 728 case CTF_K_UNION: 729 if (size < CTF_LSTRUCT_THRESH) { 730 ctf_member_t *mp = (ctf_member_t *) 731 ((uintptr_t)tp + increment); 732 733 vbytes = sizeof (ctf_member_t) * vlen; 734 for (n = vlen; n != 0; n--, mp++) 735 child |= CTF_TYPE_ISCHILD(mp->ctm_type); 736 } else { 737 ctf_lmember_t *lmp = (ctf_lmember_t *) 738 ((uintptr_t)tp + increment); 739 740 vbytes = sizeof (ctf_lmember_t) * vlen; 741 for (n = vlen; n != 0; n--, lmp++) 742 child |= 743 CTF_TYPE_ISCHILD(lmp->ctlm_type); 744 } 745 break; 746 case CTF_K_ENUM: 747 vbytes = sizeof (ctf_enum_t) * vlen; 748 break; 749 case CTF_K_FORWARD: 750 /* 751 * For forward declarations, ctt_type is the CTF_K_* 752 * kind for the tag, so bump that population count too. 753 * If ctt_type is unknown, treat the tag as a struct. 754 */ 755 if (tp->ctt_type == CTF_K_UNKNOWN || 756 tp->ctt_type >= CTF_K_MAX) 757 pop[CTF_K_STRUCT]++; 758 else 759 pop[tp->ctt_type]++; 760 /*FALLTHRU*/ 761 case CTF_K_UNKNOWN: 762 vbytes = 0; 763 break; 764 case CTF_K_POINTER: 765 case CTF_K_TYPEDEF: 766 case CTF_K_VOLATILE: 767 case CTF_K_CONST: 768 case CTF_K_RESTRICT: 769 child |= CTF_TYPE_ISCHILD(tp->ctt_type); 770 vbytes = 0; 771 break; 772 default: 773 printf("%s(%d): detected invalid CTF kind -- %u\n", __func__, __LINE__, kind); 774 return (EIO); 775 } 776 tp = (ctf_type_t *)((uintptr_t)tp + increment + vbytes); 777 pop[kind]++; 778 } 779 780 *lc->typlenp = ctf_typemax; 781 782 if ((xp = malloc(sizeof(uint32_t) * ctf_typemax, M_LINKER, M_ZERO | M_WAITOK)) == NULL) 783 return (ENOMEM); 784 785 *lc->typoffp = xp; 786 787 /* type id 0 is used as a sentinel value */ 788 *xp++ = 0; 789 790 /* 791 * In the second pass, fill in the type offset. 792 */ 793 for (tp = tbuf; tp < tend; xp++) { 794 ushort_t kind = CTF_INFO_KIND(tp->ctt_info); 795 ulong_t vlen = CTF_INFO_VLEN(tp->ctt_info); 796 ssize_t size, increment; 797 798 size_t vbytes; 799 uint_t n; 800 801 (void) fbt_get_ctt_size(hp->cth_version, tp, &size, &increment); 802 803 switch (kind) { 804 case CTF_K_INTEGER: 805 case CTF_K_FLOAT: 806 vbytes = sizeof (uint_t); 807 break; 808 case CTF_K_ARRAY: 809 vbytes = sizeof (ctf_array_t); 810 break; 811 case CTF_K_FUNCTION: 812 vbytes = sizeof (ushort_t) * (vlen + (vlen & 1)); 813 break; 814 case CTF_K_STRUCT: 815 case CTF_K_UNION: 816 if (size < CTF_LSTRUCT_THRESH) { 817 ctf_member_t *mp = (ctf_member_t *) 818 ((uintptr_t)tp + increment); 819 820 vbytes = sizeof (ctf_member_t) * vlen; 821 for (n = vlen; n != 0; n--, mp++) 822 child |= CTF_TYPE_ISCHILD(mp->ctm_type); 823 } else { 824 ctf_lmember_t *lmp = (ctf_lmember_t *) 825 ((uintptr_t)tp + increment); 826 827 vbytes = sizeof (ctf_lmember_t) * vlen; 828 for (n = vlen; n != 0; n--, lmp++) 829 child |= 830 CTF_TYPE_ISCHILD(lmp->ctlm_type); 831 } 832 break; 833 case CTF_K_ENUM: 834 vbytes = sizeof (ctf_enum_t) * vlen; 835 break; 836 case CTF_K_FORWARD: 837 case CTF_K_UNKNOWN: 838 vbytes = 0; 839 break; 840 case CTF_K_POINTER: 841 case CTF_K_TYPEDEF: 842 case CTF_K_VOLATILE: 843 case CTF_K_CONST: 844 case CTF_K_RESTRICT: 845 vbytes = 0; 846 break; 847 default: 848 printf("%s(%d): detected invalid CTF kind -- %u\n", __func__, __LINE__, kind); 849 return (EIO); 850 } 851 *xp = (uint32_t)((uintptr_t) tp - (uintptr_t) ctfdata); 852 tp = (ctf_type_t *)((uintptr_t)tp + increment + vbytes); 853 } 854 855 return (0); 856 } 857 858 /* 859 * CTF Declaration Stack 860 * 861 * In order to implement ctf_type_name(), we must convert a type graph back 862 * into a C type declaration. Unfortunately, a type graph represents a storage 863 * class ordering of the type whereas a type declaration must obey the C rules 864 * for operator precedence, and the two orderings are frequently in conflict. 865 * For example, consider these CTF type graphs and their C declarations: 866 * 867 * CTF_K_POINTER -> CTF_K_FUNCTION -> CTF_K_INTEGER : int (*)() 868 * CTF_K_POINTER -> CTF_K_ARRAY -> CTF_K_INTEGER : int (*)[] 869 * 870 * In each case, parentheses are used to raise operator * to higher lexical 871 * precedence, so the string form of the C declaration cannot be constructed by 872 * walking the type graph links and forming the string from left to right. 873 * 874 * The functions in this file build a set of stacks from the type graph nodes 875 * corresponding to the C operator precedence levels in the appropriate order. 876 * The code in ctf_type_name() can then iterate over the levels and nodes in 877 * lexical precedence order and construct the final C declaration string. 878 */ 879 typedef struct ctf_list { 880 struct ctf_list *l_prev; /* previous pointer or tail pointer */ 881 struct ctf_list *l_next; /* next pointer or head pointer */ 882 } ctf_list_t; 883 884 #define ctf_list_prev(elem) ((void *)(((ctf_list_t *)(elem))->l_prev)) 885 #define ctf_list_next(elem) ((void *)(((ctf_list_t *)(elem))->l_next)) 886 887 typedef enum { 888 CTF_PREC_BASE, 889 CTF_PREC_POINTER, 890 CTF_PREC_ARRAY, 891 CTF_PREC_FUNCTION, 892 CTF_PREC_MAX 893 } ctf_decl_prec_t; 894 895 typedef struct ctf_decl_node { 896 ctf_list_t cd_list; /* linked list pointers */ 897 ctf_id_t cd_type; /* type identifier */ 898 uint_t cd_kind; /* type kind */ 899 uint_t cd_n; /* type dimension if array */ 900 } ctf_decl_node_t; 901 902 typedef struct ctf_decl { 903 ctf_list_t cd_nodes[CTF_PREC_MAX]; /* declaration node stacks */ 904 int cd_order[CTF_PREC_MAX]; /* storage order of decls */ 905 ctf_decl_prec_t cd_qualp; /* qualifier precision */ 906 ctf_decl_prec_t cd_ordp; /* ordered precision */ 907 char *cd_buf; /* buffer for output */ 908 char *cd_ptr; /* buffer location */ 909 char *cd_end; /* buffer limit */ 910 size_t cd_len; /* buffer space required */ 911 int cd_err; /* saved error value */ 912 } ctf_decl_t; 913 914 /* 915 * Simple doubly-linked list append routine. This implementation assumes that 916 * each list element contains an embedded ctf_list_t as the first member. 917 * An additional ctf_list_t is used to store the head (l_next) and tail 918 * (l_prev) pointers. The current head and tail list elements have their 919 * previous and next pointers set to NULL, respectively. 920 */ 921 static void 922 ctf_list_append(ctf_list_t *lp, void *new) 923 { 924 ctf_list_t *p = lp->l_prev; /* p = tail list element */ 925 ctf_list_t *q = new; /* q = new list element */ 926 927 lp->l_prev = q; 928 q->l_prev = p; 929 q->l_next = NULL; 930 931 if (p != NULL) 932 p->l_next = q; 933 else 934 lp->l_next = q; 935 } 936 937 /* 938 * Prepend the specified existing element to the given ctf_list_t. The 939 * existing pointer should be pointing at a struct with embedded ctf_list_t. 940 */ 941 static void 942 ctf_list_prepend(ctf_list_t *lp, void *new) 943 { 944 ctf_list_t *p = new; /* p = new list element */ 945 ctf_list_t *q = lp->l_next; /* q = head list element */ 946 947 lp->l_next = p; 948 p->l_prev = NULL; 949 p->l_next = q; 950 951 if (q != NULL) 952 q->l_prev = p; 953 else 954 lp->l_prev = p; 955 } 956 957 static void 958 ctf_decl_init(ctf_decl_t *cd, char *buf, size_t len) 959 { 960 int i; 961 962 bzero(cd, sizeof (ctf_decl_t)); 963 964 for (i = CTF_PREC_BASE; i < CTF_PREC_MAX; i++) 965 cd->cd_order[i] = CTF_PREC_BASE - 1; 966 967 cd->cd_qualp = CTF_PREC_BASE; 968 cd->cd_ordp = CTF_PREC_BASE; 969 970 cd->cd_buf = buf; 971 cd->cd_ptr = buf; 972 cd->cd_end = buf + len; 973 } 974 975 static void 976 ctf_decl_fini(ctf_decl_t *cd) 977 { 978 ctf_decl_node_t *cdp, *ndp; 979 int i; 980 981 for (i = CTF_PREC_BASE; i < CTF_PREC_MAX; i++) { 982 for (cdp = ctf_list_next(&cd->cd_nodes[i]); 983 cdp != NULL; cdp = ndp) { 984 ndp = ctf_list_next(cdp); 985 free(cdp, M_FBT); 986 } 987 } 988 } 989 990 static const ctf_type_t * 991 ctf_lookup_by_id(linker_ctf_t *lc, ctf_id_t type) 992 { 993 const ctf_type_t *tp; 994 uint32_t offset; 995 uint32_t *typoff = *lc->typoffp; 996 997 if (type >= *lc->typlenp) { 998 printf("%s(%d): type %d exceeds max %ld\n",__func__,__LINE__,(int) type,*lc->typlenp); 999 return(NULL); 1000 } 1001 1002 /* Check if the type isn't cross-referenced. */ 1003 if ((offset = typoff[type]) == 0) { 1004 printf("%s(%d): type %d isn't cross referenced\n",__func__,__LINE__, (int) type); 1005 return(NULL); 1006 } 1007 1008 tp = (const ctf_type_t *)(lc->ctftab + offset + sizeof(ctf_header_t)); 1009 1010 return (tp); 1011 } 1012 1013 static void 1014 fbt_array_info(linker_ctf_t *lc, ctf_id_t type, ctf_arinfo_t *arp) 1015 { 1016 const ctf_header_t *hp = (const ctf_header_t *) lc->ctftab; 1017 const ctf_type_t *tp; 1018 const ctf_array_t *ap; 1019 ssize_t increment; 1020 1021 bzero(arp, sizeof(*arp)); 1022 1023 if ((tp = ctf_lookup_by_id(lc, type)) == NULL) 1024 return; 1025 1026 if (CTF_INFO_KIND(tp->ctt_info) != CTF_K_ARRAY) 1027 return; 1028 1029 (void) fbt_get_ctt_size(hp->cth_version, tp, NULL, &increment); 1030 1031 ap = (const ctf_array_t *)((uintptr_t)tp + increment); 1032 arp->ctr_contents = ap->cta_contents; 1033 arp->ctr_index = ap->cta_index; 1034 arp->ctr_nelems = ap->cta_nelems; 1035 } 1036 1037 static const char * 1038 ctf_strptr(linker_ctf_t *lc, int name) 1039 { 1040 const ctf_header_t *hp = (const ctf_header_t *) lc->ctftab;; 1041 const char *strp = ""; 1042 1043 if (name < 0 || name >= hp->cth_strlen) 1044 return(strp); 1045 1046 strp = (const char *)(lc->ctftab + hp->cth_stroff + name + sizeof(ctf_header_t)); 1047 1048 return (strp); 1049 } 1050 1051 static void 1052 ctf_decl_push(ctf_decl_t *cd, linker_ctf_t *lc, ctf_id_t type) 1053 { 1054 ctf_decl_node_t *cdp; 1055 ctf_decl_prec_t prec; 1056 uint_t kind, n = 1; 1057 int is_qual = 0; 1058 1059 const ctf_type_t *tp; 1060 ctf_arinfo_t ar; 1061 1062 if ((tp = ctf_lookup_by_id(lc, type)) == NULL) { 1063 cd->cd_err = ENOENT; 1064 return; 1065 } 1066 1067 switch (kind = CTF_INFO_KIND(tp->ctt_info)) { 1068 case CTF_K_ARRAY: 1069 fbt_array_info(lc, type, &ar); 1070 ctf_decl_push(cd, lc, ar.ctr_contents); 1071 n = ar.ctr_nelems; 1072 prec = CTF_PREC_ARRAY; 1073 break; 1074 1075 case CTF_K_TYPEDEF: 1076 if (ctf_strptr(lc, tp->ctt_name)[0] == '\0') { 1077 ctf_decl_push(cd, lc, tp->ctt_type); 1078 return; 1079 } 1080 prec = CTF_PREC_BASE; 1081 break; 1082 1083 case CTF_K_FUNCTION: 1084 ctf_decl_push(cd, lc, tp->ctt_type); 1085 prec = CTF_PREC_FUNCTION; 1086 break; 1087 1088 case CTF_K_POINTER: 1089 ctf_decl_push(cd, lc, tp->ctt_type); 1090 prec = CTF_PREC_POINTER; 1091 break; 1092 1093 case CTF_K_VOLATILE: 1094 case CTF_K_CONST: 1095 case CTF_K_RESTRICT: 1096 ctf_decl_push(cd, lc, tp->ctt_type); 1097 prec = cd->cd_qualp; 1098 is_qual++; 1099 break; 1100 1101 default: 1102 prec = CTF_PREC_BASE; 1103 } 1104 1105 if ((cdp = malloc(sizeof (ctf_decl_node_t), M_FBT, M_WAITOK)) == NULL) { 1106 cd->cd_err = EAGAIN; 1107 return; 1108 } 1109 1110 cdp->cd_type = type; 1111 cdp->cd_kind = kind; 1112 cdp->cd_n = n; 1113 1114 if (ctf_list_next(&cd->cd_nodes[prec]) == NULL) 1115 cd->cd_order[prec] = cd->cd_ordp++; 1116 1117 /* 1118 * Reset cd_qualp to the highest precedence level that we've seen so 1119 * far that can be qualified (CTF_PREC_BASE or CTF_PREC_POINTER). 1120 */ 1121 if (prec > cd->cd_qualp && prec < CTF_PREC_ARRAY) 1122 cd->cd_qualp = prec; 1123 1124 /* 1125 * C array declarators are ordered inside out so prepend them. Also by 1126 * convention qualifiers of base types precede the type specifier (e.g. 1127 * const int vs. int const) even though the two forms are equivalent. 1128 */ 1129 if (kind == CTF_K_ARRAY || (is_qual && prec == CTF_PREC_BASE)) 1130 ctf_list_prepend(&cd->cd_nodes[prec], cdp); 1131 else 1132 ctf_list_append(&cd->cd_nodes[prec], cdp); 1133 } 1134 1135 static void 1136 ctf_decl_sprintf(ctf_decl_t *cd, const char *format, ...) 1137 { 1138 size_t len = (size_t)(cd->cd_end - cd->cd_ptr); 1139 va_list ap; 1140 size_t n; 1141 1142 va_start(ap, format); 1143 n = vsnprintf(cd->cd_ptr, len, format, ap); 1144 va_end(ap); 1145 1146 cd->cd_ptr += MIN(n, len); 1147 cd->cd_len += n; 1148 } 1149 1150 static ssize_t 1151 fbt_type_name(linker_ctf_t *lc, ctf_id_t type, char *buf, size_t len) 1152 { 1153 ctf_decl_t cd; 1154 ctf_decl_node_t *cdp; 1155 ctf_decl_prec_t prec, lp, rp; 1156 int ptr, arr; 1157 uint_t k; 1158 1159 if (lc == NULL && type == CTF_ERR) 1160 return (-1); /* simplify caller code by permitting CTF_ERR */ 1161 1162 ctf_decl_init(&cd, buf, len); 1163 ctf_decl_push(&cd, lc, type); 1164 1165 if (cd.cd_err != 0) { 1166 ctf_decl_fini(&cd); 1167 return (-1); 1168 } 1169 1170 /* 1171 * If the type graph's order conflicts with lexical precedence order 1172 * for pointers or arrays, then we need to surround the declarations at 1173 * the corresponding lexical precedence with parentheses. This can 1174 * result in either a parenthesized pointer (*) as in int (*)() or 1175 * int (*)[], or in a parenthesized pointer and array as in int (*[])(). 1176 */ 1177 ptr = cd.cd_order[CTF_PREC_POINTER] > CTF_PREC_POINTER; 1178 arr = cd.cd_order[CTF_PREC_ARRAY] > CTF_PREC_ARRAY; 1179 1180 rp = arr ? CTF_PREC_ARRAY : ptr ? CTF_PREC_POINTER : -1; 1181 lp = ptr ? CTF_PREC_POINTER : arr ? CTF_PREC_ARRAY : -1; 1182 1183 k = CTF_K_POINTER; /* avoid leading whitespace (see below) */ 1184 1185 for (prec = CTF_PREC_BASE; prec < CTF_PREC_MAX; prec++) { 1186 for (cdp = ctf_list_next(&cd.cd_nodes[prec]); 1187 cdp != NULL; cdp = ctf_list_next(cdp)) { 1188 1189 const ctf_type_t *tp = 1190 ctf_lookup_by_id(lc, cdp->cd_type); 1191 const char *name = ctf_strptr(lc, tp->ctt_name); 1192 1193 if (k != CTF_K_POINTER && k != CTF_K_ARRAY) 1194 ctf_decl_sprintf(&cd, " "); 1195 1196 if (lp == prec) { 1197 ctf_decl_sprintf(&cd, "("); 1198 lp = -1; 1199 } 1200 1201 switch (cdp->cd_kind) { 1202 case CTF_K_INTEGER: 1203 case CTF_K_FLOAT: 1204 case CTF_K_TYPEDEF: 1205 ctf_decl_sprintf(&cd, "%s", name); 1206 break; 1207 case CTF_K_POINTER: 1208 ctf_decl_sprintf(&cd, "*"); 1209 break; 1210 case CTF_K_ARRAY: 1211 ctf_decl_sprintf(&cd, "[%u]", cdp->cd_n); 1212 break; 1213 case CTF_K_FUNCTION: 1214 ctf_decl_sprintf(&cd, "()"); 1215 break; 1216 case CTF_K_STRUCT: 1217 case CTF_K_FORWARD: 1218 ctf_decl_sprintf(&cd, "struct %s", name); 1219 break; 1220 case CTF_K_UNION: 1221 ctf_decl_sprintf(&cd, "union %s", name); 1222 break; 1223 case CTF_K_ENUM: 1224 ctf_decl_sprintf(&cd, "enum %s", name); 1225 break; 1226 case CTF_K_VOLATILE: 1227 ctf_decl_sprintf(&cd, "volatile"); 1228 break; 1229 case CTF_K_CONST: 1230 ctf_decl_sprintf(&cd, "const"); 1231 break; 1232 case CTF_K_RESTRICT: 1233 ctf_decl_sprintf(&cd, "restrict"); 1234 break; 1235 } 1236 1237 k = cdp->cd_kind; 1238 } 1239 1240 if (rp == prec) 1241 ctf_decl_sprintf(&cd, ")"); 1242 } 1243 1244 ctf_decl_fini(&cd); 1245 return (cd.cd_len); 1246 } 1247 1248 static void 1249 fbt_getargdesc(void *arg __unused, dtrace_id_t id __unused, void *parg, dtrace_argdesc_t *desc) 1250 { 1251 const ushort_t *dp; 1252 fbt_probe_t *fbt = parg; 1253 linker_ctf_t lc; 1254 modctl_t *ctl = fbt->fbtp_ctl; 1255 int ndx = desc->dtargd_ndx; 1256 int symindx = fbt->fbtp_symindx; 1257 uint32_t *ctfoff; 1258 uint32_t offset; 1259 ushort_t info, kind, n; 1260 1261 desc->dtargd_ndx = DTRACE_ARGNONE; 1262 1263 /* Get a pointer to the CTF data and it's length. */ 1264 if (linker_ctf_get(ctl, &lc) != 0) 1265 /* No CTF data? Something wrong? *shrug* */ 1266 return; 1267 1268 /* Check if this module hasn't been initialised yet. */ 1269 if (*lc.ctfoffp == NULL) { 1270 /* 1271 * Initialise the CTF object and function symindx to 1272 * byte offset array. 1273 */ 1274 if (fbt_ctfoff_init(ctl, &lc) != 0) 1275 return; 1276 1277 /* Initialise the CTF type to byte offset array. */ 1278 if (fbt_typoff_init(&lc) != 0) 1279 return; 1280 } 1281 1282 ctfoff = *lc.ctfoffp; 1283 1284 if (ctfoff == NULL || *lc.typoffp == NULL) 1285 return; 1286 1287 /* Check if the symbol index is out of range. */ 1288 if (symindx >= lc.nsym) 1289 return; 1290 1291 /* Check if the symbol isn't cross-referenced. */ 1292 if ((offset = ctfoff[symindx]) == 0xffffffff) 1293 return; 1294 1295 dp = (const ushort_t *)(lc.ctftab + offset + sizeof(ctf_header_t)); 1296 1297 info = *dp++; 1298 kind = CTF_INFO_KIND(info); 1299 n = CTF_INFO_VLEN(info); 1300 1301 if (kind == CTF_K_UNKNOWN && n == 0) { 1302 printf("%s(%d): Unknown function!\n",__func__,__LINE__); 1303 return; 1304 } 1305 1306 if (kind != CTF_K_FUNCTION) { 1307 printf("%s(%d): Expected a function!\n",__func__,__LINE__); 1308 return; 1309 } 1310 1311 /* Check if the requested argument doesn't exist. */ 1312 if (ndx >= n) 1313 return; 1314 1315 /* Skip the return type and arguments up to the one requested. */ 1316 dp += ndx + 1; 1317 1318 if (fbt_type_name(&lc, *dp, desc->dtargd_native, sizeof(desc->dtargd_native)) > 0) 1319 desc->dtargd_ndx = ndx; 1320 1321 return; 1322 } 1323 1324 static void 1325 fbt_load(void *dummy) 1326 { 1327 /* Create the /dev/dtrace/fbt entry. */ 1328 fbt_cdev = make_dev(&fbt_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, 1329 "dtrace/fbt"); 1330 1331 /* Default the probe table size if not specified. */ 1332 if (fbt_probetab_size == 0) 1333 fbt_probetab_size = FBT_PROBETAB_SIZE; 1334 1335 /* Choose the hash mask for the probe table. */ 1336 fbt_probetab_mask = fbt_probetab_size - 1; 1337 1338 /* Allocate memory for the probe table. */ 1339 fbt_probetab = 1340 malloc(fbt_probetab_size * sizeof (fbt_probe_t *), M_FBT, M_WAITOK | M_ZERO); 1341 1342 dtrace_doubletrap_func = fbt_doubletrap; 1343 dtrace_invop_add(fbt_invop); 1344 1345 if (dtrace_register("fbt", &fbt_attr, DTRACE_PRIV_USER, 1346 NULL, &fbt_pops, NULL, &fbt_id) != 0) 1347 return; 1348 } 1349 1350 1351 static int 1352 fbt_unload() 1353 { 1354 int error = 0; 1355 1356 /* De-register the invalid opcode handler. */ 1357 dtrace_invop_remove(fbt_invop); 1358 1359 dtrace_doubletrap_func = NULL; 1360 1361 /* De-register this DTrace provider. */ 1362 if ((error = dtrace_unregister(fbt_id)) != 0) 1363 return (error); 1364 1365 /* Free the probe table. */ 1366 free(fbt_probetab, M_FBT); 1367 fbt_probetab = NULL; 1368 fbt_probetab_mask = 0; 1369 1370 destroy_dev(fbt_cdev); 1371 1372 return (error); 1373 } 1374 1375 static int 1376 fbt_modevent(module_t mod __unused, int type, void *data __unused) 1377 { 1378 int error = 0; 1379 1380 switch (type) { 1381 case MOD_LOAD: 1382 break; 1383 1384 case MOD_UNLOAD: 1385 break; 1386 1387 case MOD_SHUTDOWN: 1388 break; 1389 1390 default: 1391 error = EOPNOTSUPP; 1392 break; 1393 1394 } 1395 1396 return (error); 1397 } 1398 1399 static int 1400 fbt_open(struct cdev *dev __unused, int oflags __unused, int devtype __unused, struct thread *td __unused) 1401 { 1402 return (0); 1403 } 1404 1405 SYSINIT(fbt_load, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, fbt_load, NULL); 1406 SYSUNINIT(fbt_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, fbt_unload, NULL); 1407 1408 DEV_MODULE(fbt, fbt_modevent, NULL); 1409 MODULE_VERSION(fbt, 1); 1410 MODULE_DEPEND(fbt, dtrace, 1, 1, 1); 1411 MODULE_DEPEND(fbt, opensolaris, 1, 1, 1); 1412