1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * Portions Copyright 2006-2008 John Birrell jb@freebsd.org 22 * 23 * $FreeBSD$ 24 * 25 */ 26 27 /* 28 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 29 * Use is subject to license terms. 30 */ 31 32 #include <sys/cdefs.h> 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/conf.h> 36 #include <sys/cpuvar.h> 37 #include <sys/fcntl.h> 38 #include <sys/filio.h> 39 #include <sys/kdb.h> 40 #include <sys/kernel.h> 41 #include <sys/kmem.h> 42 #include <sys/kthread.h> 43 #include <sys/limits.h> 44 #include <sys/linker.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/module.h> 48 #include <sys/mutex.h> 49 #include <sys/pcpu.h> 50 #include <sys/poll.h> 51 #include <sys/proc.h> 52 #include <sys/selinfo.h> 53 #include <sys/smp.h> 54 #include <sys/syscall.h> 55 #include <sys/sysent.h> 56 #include <sys/sysproto.h> 57 #include <sys/uio.h> 58 #include <sys/unistd.h> 59 #include <machine/stdarg.h> 60 61 #include <sys/dtrace.h> 62 #include <sys/dtrace_bsd.h> 63 64 static MALLOC_DEFINE(M_FBT, "fbt", "Function Boundary Tracing"); 65 66 #define FBT_PUSHL_EBP 0x55 67 #define FBT_MOVL_ESP_EBP0_V0 0x8b 68 #define FBT_MOVL_ESP_EBP1_V0 0xec 69 #define FBT_MOVL_ESP_EBP0_V1 0x89 70 #define FBT_MOVL_ESP_EBP1_V1 0xe5 71 #define FBT_REX_RSP_RBP 0x48 72 73 #define FBT_POPL_EBP 0x5d 74 #define FBT_RET 0xc3 75 #define FBT_RET_IMM16 0xc2 76 #define FBT_LEAVE 0xc9 77 78 #ifdef __amd64__ 79 #define FBT_PATCHVAL 0xcc 80 #else 81 #define FBT_PATCHVAL 0xf0 82 #endif 83 84 static d_open_t fbt_open; 85 static int fbt_unload(void); 86 static void fbt_getargdesc(void *, dtrace_id_t, void *, dtrace_argdesc_t *); 87 static void fbt_provide_module(void *, modctl_t *); 88 static void fbt_destroy(void *, dtrace_id_t, void *); 89 static void fbt_enable(void *, dtrace_id_t, void *); 90 static void fbt_disable(void *, dtrace_id_t, void *); 91 static void fbt_load(void *); 92 static void fbt_suspend(void *, dtrace_id_t, void *); 93 static void fbt_resume(void *, dtrace_id_t, void *); 94 95 #define FBT_ENTRY "entry" 96 #define FBT_RETURN "return" 97 #define FBT_ADDR2NDX(addr) ((((uintptr_t)(addr)) >> 4) & fbt_probetab_mask) 98 #define FBT_PROBETAB_SIZE 0x8000 /* 32k entries -- 128K total */ 99 100 static struct cdevsw fbt_cdevsw = { 101 .d_version = D_VERSION, 102 .d_open = fbt_open, 103 .d_name = "fbt", 104 }; 105 106 static dtrace_pattr_t fbt_attr = { 107 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, 108 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 109 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, 110 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, 111 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, 112 }; 113 114 static dtrace_pops_t fbt_pops = { 115 NULL, 116 fbt_provide_module, 117 fbt_enable, 118 fbt_disable, 119 fbt_suspend, 120 fbt_resume, 121 fbt_getargdesc, 122 NULL, 123 NULL, 124 fbt_destroy 125 }; 126 127 typedef struct fbt_probe { 128 struct fbt_probe *fbtp_hashnext; 129 uint8_t *fbtp_patchpoint; 130 int8_t fbtp_rval; 131 uint8_t fbtp_patchval; 132 uint8_t fbtp_savedval; 133 uintptr_t fbtp_roffset; 134 dtrace_id_t fbtp_id; 135 const char *fbtp_name; 136 modctl_t *fbtp_ctl; 137 int fbtp_loadcnt; 138 int fbtp_primary; 139 int fbtp_invop_cnt; 140 int fbtp_symindx; 141 struct fbt_probe *fbtp_next; 142 } fbt_probe_t; 143 144 static struct cdev *fbt_cdev; 145 static dtrace_provider_id_t fbt_id; 146 static fbt_probe_t **fbt_probetab; 147 static int fbt_probetab_size; 148 static int fbt_probetab_mask; 149 static int fbt_verbose = 0; 150 151 static void 152 fbt_doubletrap(void) 153 { 154 fbt_probe_t *fbt; 155 int i; 156 157 for (i = 0; i < fbt_probetab_size; i++) { 158 fbt = fbt_probetab[i]; 159 160 for (; fbt != NULL; fbt = fbt->fbtp_next) 161 *fbt->fbtp_patchpoint = fbt->fbtp_savedval; 162 } 163 } 164 165 static int 166 fbt_invop(uintptr_t addr, uintptr_t *stack, uintptr_t rval) 167 { 168 solaris_cpu_t *cpu = &solaris_cpu[curcpu]; 169 uintptr_t stack0, stack1, stack2, stack3, stack4; 170 fbt_probe_t *fbt = fbt_probetab[FBT_ADDR2NDX(addr)]; 171 172 for (; fbt != NULL; fbt = fbt->fbtp_hashnext) { 173 if ((uintptr_t)fbt->fbtp_patchpoint == addr) { 174 fbt->fbtp_invop_cnt++; 175 if (fbt->fbtp_roffset == 0) { 176 int i = 0; 177 /* 178 * When accessing the arguments on the stack, 179 * we must protect against accessing beyond 180 * the stack. We can safely set NOFAULT here 181 * -- we know that interrupts are already 182 * disabled. 183 */ 184 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 185 cpu->cpu_dtrace_caller = stack[i++]; 186 stack0 = stack[i++]; 187 stack1 = stack[i++]; 188 stack2 = stack[i++]; 189 stack3 = stack[i++]; 190 stack4 = stack[i++]; 191 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | 192 CPU_DTRACE_BADADDR); 193 194 dtrace_probe(fbt->fbtp_id, stack0, stack1, 195 stack2, stack3, stack4); 196 197 cpu->cpu_dtrace_caller = 0; 198 } else { 199 #ifdef __amd64__ 200 /* 201 * On amd64, we instrument the ret, not the 202 * leave. We therefore need to set the caller 203 * to assure that the top frame of a stack() 204 * action is correct. 205 */ 206 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 207 cpu->cpu_dtrace_caller = stack[0]; 208 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | 209 CPU_DTRACE_BADADDR); 210 #endif 211 212 dtrace_probe(fbt->fbtp_id, fbt->fbtp_roffset, 213 rval, 0, 0, 0); 214 cpu->cpu_dtrace_caller = 0; 215 } 216 217 return (fbt->fbtp_rval); 218 } 219 } 220 221 return (0); 222 } 223 224 static int 225 fbt_provide_module_function(linker_file_t lf, int symindx, 226 linker_symval_t *symval, void *opaque) 227 { 228 char *modname = opaque; 229 const char *name = symval->name; 230 fbt_probe_t *fbt, *retfbt; 231 int j; 232 int size; 233 u_int8_t *instr, *limit; 234 235 if (strncmp(name, "dtrace_", 7) == 0 && 236 strncmp(name, "dtrace_safe_", 12) != 0) { 237 /* 238 * Anything beginning with "dtrace_" may be called 239 * from probe context unless it explicitly indicates 240 * that it won't be called from probe context by 241 * using the prefix "dtrace_safe_". 242 */ 243 return (0); 244 } 245 246 if (name[0] == '_' && name[1] == '_') 247 return (0); 248 249 size = symval->size; 250 251 instr = (u_int8_t *) symval->value; 252 limit = (u_int8_t *) symval->value + symval->size; 253 254 #ifdef __amd64__ 255 while (instr < limit) { 256 if (*instr == FBT_PUSHL_EBP) 257 break; 258 259 if ((size = dtrace_instr_size(instr)) <= 0) 260 break; 261 262 instr += size; 263 } 264 265 if (instr >= limit || *instr != FBT_PUSHL_EBP) { 266 /* 267 * We either don't save the frame pointer in this 268 * function, or we ran into some disassembly 269 * screw-up. Either way, we bail. 270 */ 271 return (0); 272 } 273 #else 274 if (instr[0] != FBT_PUSHL_EBP) 275 return (0); 276 277 if (!(instr[1] == FBT_MOVL_ESP_EBP0_V0 && 278 instr[2] == FBT_MOVL_ESP_EBP1_V0) && 279 !(instr[1] == FBT_MOVL_ESP_EBP0_V1 && 280 instr[2] == FBT_MOVL_ESP_EBP1_V1)) 281 return (0); 282 #endif 283 284 fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO); 285 fbt->fbtp_name = name; 286 fbt->fbtp_id = dtrace_probe_create(fbt_id, modname, 287 name, FBT_ENTRY, 3, fbt); 288 fbt->fbtp_patchpoint = instr; 289 fbt->fbtp_ctl = lf; 290 fbt->fbtp_loadcnt = lf->loadcnt; 291 fbt->fbtp_rval = DTRACE_INVOP_PUSHL_EBP; 292 fbt->fbtp_savedval = *instr; 293 fbt->fbtp_patchval = FBT_PATCHVAL; 294 fbt->fbtp_symindx = symindx; 295 296 fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; 297 fbt_probetab[FBT_ADDR2NDX(instr)] = fbt; 298 299 lf->fbt_nentries++; 300 301 retfbt = NULL; 302 again: 303 if (instr >= limit) 304 return (0); 305 306 /* 307 * If this disassembly fails, then we've likely walked off into 308 * a jump table or some other unsuitable area. Bail out of the 309 * disassembly now. 310 */ 311 if ((size = dtrace_instr_size(instr)) <= 0) 312 return (0); 313 314 #ifdef __amd64__ 315 /* 316 * We only instrument "ret" on amd64 -- we don't yet instrument 317 * ret imm16, largely because the compiler doesn't seem to 318 * (yet) emit them in the kernel... 319 */ 320 if (*instr != FBT_RET) { 321 instr += size; 322 goto again; 323 } 324 #else 325 if (!(size == 1 && 326 (*instr == FBT_POPL_EBP || *instr == FBT_LEAVE) && 327 (*(instr + 1) == FBT_RET || 328 *(instr + 1) == FBT_RET_IMM16))) { 329 instr += size; 330 goto again; 331 } 332 #endif 333 334 /* 335 * We (desperately) want to avoid erroneously instrumenting a 336 * jump table, especially given that our markers are pretty 337 * short: two bytes on x86, and just one byte on amd64. To 338 * determine if we're looking at a true instruction sequence 339 * or an inline jump table that happens to contain the same 340 * byte sequences, we resort to some heuristic sleeze: we 341 * treat this instruction as being contained within a pointer, 342 * and see if that pointer points to within the body of the 343 * function. If it does, we refuse to instrument it. 344 */ 345 for (j = 0; j < sizeof (uintptr_t); j++) { 346 caddr_t check = (caddr_t) instr - j; 347 uint8_t *ptr; 348 349 if (check < symval->value) 350 break; 351 352 if (check + sizeof (caddr_t) > (caddr_t)limit) 353 continue; 354 355 ptr = *(uint8_t **)check; 356 357 if (ptr >= (uint8_t *) symval->value && ptr < limit) { 358 instr += size; 359 goto again; 360 } 361 } 362 363 /* 364 * We have a winner! 365 */ 366 fbt = malloc(sizeof (fbt_probe_t), M_FBT, M_WAITOK | M_ZERO); 367 fbt->fbtp_name = name; 368 369 if (retfbt == NULL) { 370 fbt->fbtp_id = dtrace_probe_create(fbt_id, modname, 371 name, FBT_RETURN, 3, fbt); 372 } else { 373 retfbt->fbtp_next = fbt; 374 fbt->fbtp_id = retfbt->fbtp_id; 375 } 376 377 retfbt = fbt; 378 fbt->fbtp_patchpoint = instr; 379 fbt->fbtp_ctl = lf; 380 fbt->fbtp_loadcnt = lf->loadcnt; 381 fbt->fbtp_symindx = symindx; 382 383 #ifndef __amd64__ 384 if (*instr == FBT_POPL_EBP) { 385 fbt->fbtp_rval = DTRACE_INVOP_POPL_EBP; 386 } else { 387 ASSERT(*instr == FBT_LEAVE); 388 fbt->fbtp_rval = DTRACE_INVOP_LEAVE; 389 } 390 fbt->fbtp_roffset = 391 (uintptr_t)(instr - (uint8_t *) symval->value) + 1; 392 393 #else 394 ASSERT(*instr == FBT_RET); 395 fbt->fbtp_rval = DTRACE_INVOP_RET; 396 fbt->fbtp_roffset = 397 (uintptr_t)(instr - (uint8_t *) symval->value); 398 #endif 399 400 fbt->fbtp_savedval = *instr; 401 fbt->fbtp_patchval = FBT_PATCHVAL; 402 fbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; 403 fbt_probetab[FBT_ADDR2NDX(instr)] = fbt; 404 405 lf->fbt_nentries++; 406 407 instr += size; 408 goto again; 409 } 410 411 static void 412 fbt_provide_module(void *arg, modctl_t *lf) 413 { 414 char modname[MAXPATHLEN]; 415 int i; 416 size_t len; 417 418 strlcpy(modname, lf->filename, sizeof(modname)); 419 len = strlen(modname); 420 if (len > 3 && strcmp(modname + len - 3, ".ko") == 0) 421 modname[len - 3] = '\0'; 422 423 /* 424 * Employees of dtrace and their families are ineligible. Void 425 * where prohibited. 426 */ 427 if (strcmp(modname, "dtrace") == 0) 428 return; 429 430 /* 431 * The cyclic timer subsystem can be built as a module and DTrace 432 * depends on that, so it is ineligible too. 433 */ 434 if (strcmp(modname, "cyclic") == 0) 435 return; 436 437 /* 438 * To register with DTrace, a module must list 'dtrace' as a 439 * dependency in order for the kernel linker to resolve 440 * symbols like dtrace_register(). All modules with such a 441 * dependency are ineligible for FBT tracing. 442 */ 443 for (i = 0; i < lf->ndeps; i++) 444 if (strncmp(lf->deps[i]->filename, "dtrace", 6) == 0) 445 return; 446 447 if (lf->fbt_nentries) { 448 /* 449 * This module has some FBT entries allocated; we're afraid 450 * to screw with it. 451 */ 452 return; 453 } 454 455 /* 456 * List the functions in the module and the symbol values. 457 */ 458 (void) linker_file_function_listall(lf, fbt_provide_module_function, modname); 459 } 460 461 static void 462 fbt_destroy(void *arg, dtrace_id_t id, void *parg) 463 { 464 fbt_probe_t *fbt = parg, *next, *hash, *last; 465 modctl_t *ctl; 466 int ndx; 467 468 do { 469 ctl = fbt->fbtp_ctl; 470 471 ctl->fbt_nentries--; 472 473 /* 474 * Now we need to remove this probe from the fbt_probetab. 475 */ 476 ndx = FBT_ADDR2NDX(fbt->fbtp_patchpoint); 477 last = NULL; 478 hash = fbt_probetab[ndx]; 479 480 while (hash != fbt) { 481 ASSERT(hash != NULL); 482 last = hash; 483 hash = hash->fbtp_hashnext; 484 } 485 486 if (last != NULL) { 487 last->fbtp_hashnext = fbt->fbtp_hashnext; 488 } else { 489 fbt_probetab[ndx] = fbt->fbtp_hashnext; 490 } 491 492 next = fbt->fbtp_next; 493 free(fbt, M_FBT); 494 495 fbt = next; 496 } while (fbt != NULL); 497 } 498 499 static void 500 fbt_enable(void *arg, dtrace_id_t id, void *parg) 501 { 502 fbt_probe_t *fbt = parg; 503 modctl_t *ctl = fbt->fbtp_ctl; 504 505 ctl->nenabled++; 506 507 /* 508 * Now check that our modctl has the expected load count. If it 509 * doesn't, this module must have been unloaded and reloaded -- and 510 * we're not going to touch it. 511 */ 512 if (ctl->loadcnt != fbt->fbtp_loadcnt) { 513 if (fbt_verbose) { 514 printf("fbt is failing for probe %s " 515 "(module %s reloaded)", 516 fbt->fbtp_name, ctl->filename); 517 } 518 519 return; 520 } 521 522 for (; fbt != NULL; fbt = fbt->fbtp_next) { 523 *fbt->fbtp_patchpoint = fbt->fbtp_patchval; 524 } 525 } 526 527 static void 528 fbt_disable(void *arg, dtrace_id_t id, void *parg) 529 { 530 fbt_probe_t *fbt = parg; 531 modctl_t *ctl = fbt->fbtp_ctl; 532 533 ASSERT(ctl->nenabled > 0); 534 ctl->nenabled--; 535 536 if ((ctl->loadcnt != fbt->fbtp_loadcnt)) 537 return; 538 539 for (; fbt != NULL; fbt = fbt->fbtp_next) 540 *fbt->fbtp_patchpoint = fbt->fbtp_savedval; 541 } 542 543 static void 544 fbt_suspend(void *arg, dtrace_id_t id, void *parg) 545 { 546 fbt_probe_t *fbt = parg; 547 modctl_t *ctl = fbt->fbtp_ctl; 548 549 ASSERT(ctl->nenabled > 0); 550 551 if ((ctl->loadcnt != fbt->fbtp_loadcnt)) 552 return; 553 554 for (; fbt != NULL; fbt = fbt->fbtp_next) 555 *fbt->fbtp_patchpoint = fbt->fbtp_savedval; 556 } 557 558 static void 559 fbt_resume(void *arg, dtrace_id_t id, void *parg) 560 { 561 fbt_probe_t *fbt = parg; 562 modctl_t *ctl = fbt->fbtp_ctl; 563 564 ASSERT(ctl->nenabled > 0); 565 566 if ((ctl->loadcnt != fbt->fbtp_loadcnt)) 567 return; 568 569 for (; fbt != NULL; fbt = fbt->fbtp_next) 570 *fbt->fbtp_patchpoint = fbt->fbtp_patchval; 571 } 572 573 static int 574 fbt_ctfoff_init(modctl_t *lf, linker_ctf_t *lc) 575 { 576 const Elf_Sym *symp = lc->symtab;; 577 const ctf_header_t *hp = (const ctf_header_t *) lc->ctftab; 578 const uint8_t *ctfdata = lc->ctftab + sizeof(ctf_header_t); 579 int i; 580 uint32_t *ctfoff; 581 uint32_t objtoff = hp->cth_objtoff; 582 uint32_t funcoff = hp->cth_funcoff; 583 ushort_t info; 584 ushort_t vlen; 585 586 /* Sanity check. */ 587 if (hp->cth_magic != CTF_MAGIC) { 588 printf("Bad magic value in CTF data of '%s'\n",lf->pathname); 589 return (EINVAL); 590 } 591 592 if (lc->symtab == NULL) { 593 printf("No symbol table in '%s'\n",lf->pathname); 594 return (EINVAL); 595 } 596 597 if ((ctfoff = malloc(sizeof(uint32_t) * lc->nsym, M_LINKER, M_WAITOK)) == NULL) 598 return (ENOMEM); 599 600 *lc->ctfoffp = ctfoff; 601 602 for (i = 0; i < lc->nsym; i++, ctfoff++, symp++) { 603 if (symp->st_name == 0 || symp->st_shndx == SHN_UNDEF) { 604 *ctfoff = 0xffffffff; 605 continue; 606 } 607 608 switch (ELF_ST_TYPE(symp->st_info)) { 609 case STT_OBJECT: 610 if (objtoff >= hp->cth_funcoff || 611 (symp->st_shndx == SHN_ABS && symp->st_value == 0)) { 612 *ctfoff = 0xffffffff; 613 break; 614 } 615 616 *ctfoff = objtoff; 617 objtoff += sizeof (ushort_t); 618 break; 619 620 case STT_FUNC: 621 if (funcoff >= hp->cth_typeoff) { 622 *ctfoff = 0xffffffff; 623 break; 624 } 625 626 *ctfoff = funcoff; 627 628 info = *((const ushort_t *)(ctfdata + funcoff)); 629 vlen = CTF_INFO_VLEN(info); 630 631 /* 632 * If we encounter a zero pad at the end, just skip it. 633 * Otherwise skip over the function and its return type 634 * (+2) and the argument list (vlen). 635 */ 636 if (CTF_INFO_KIND(info) == CTF_K_UNKNOWN && vlen == 0) 637 funcoff += sizeof (ushort_t); /* skip pad */ 638 else 639 funcoff += sizeof (ushort_t) * (vlen + 2); 640 break; 641 642 default: 643 *ctfoff = 0xffffffff; 644 break; 645 } 646 } 647 648 return (0); 649 } 650 651 static ssize_t 652 fbt_get_ctt_size(uint8_t version, const ctf_type_t *tp, ssize_t *sizep, 653 ssize_t *incrementp) 654 { 655 ssize_t size, increment; 656 657 if (version > CTF_VERSION_1 && 658 tp->ctt_size == CTF_LSIZE_SENT) { 659 size = CTF_TYPE_LSIZE(tp); 660 increment = sizeof (ctf_type_t); 661 } else { 662 size = tp->ctt_size; 663 increment = sizeof (ctf_stype_t); 664 } 665 666 if (sizep) 667 *sizep = size; 668 if (incrementp) 669 *incrementp = increment; 670 671 return (size); 672 } 673 674 static int 675 fbt_typoff_init(linker_ctf_t *lc) 676 { 677 const ctf_header_t *hp = (const ctf_header_t *) lc->ctftab; 678 const ctf_type_t *tbuf; 679 const ctf_type_t *tend; 680 const ctf_type_t *tp; 681 const uint8_t *ctfdata = lc->ctftab + sizeof(ctf_header_t); 682 int ctf_typemax = 0; 683 uint32_t *xp; 684 ulong_t pop[CTF_K_MAX + 1] = { 0 }; 685 686 687 /* Sanity check. */ 688 if (hp->cth_magic != CTF_MAGIC) 689 return (EINVAL); 690 691 tbuf = (const ctf_type_t *) (ctfdata + hp->cth_typeoff); 692 tend = (const ctf_type_t *) (ctfdata + hp->cth_stroff); 693 694 int child = hp->cth_parname != 0; 695 696 /* 697 * We make two passes through the entire type section. In this first 698 * pass, we count the number of each type and the total number of types. 699 */ 700 for (tp = tbuf; tp < tend; ctf_typemax++) { 701 ushort_t kind = CTF_INFO_KIND(tp->ctt_info); 702 ulong_t vlen = CTF_INFO_VLEN(tp->ctt_info); 703 ssize_t size, increment; 704 705 size_t vbytes; 706 uint_t n; 707 708 (void) fbt_get_ctt_size(hp->cth_version, tp, &size, &increment); 709 710 switch (kind) { 711 case CTF_K_INTEGER: 712 case CTF_K_FLOAT: 713 vbytes = sizeof (uint_t); 714 break; 715 case CTF_K_ARRAY: 716 vbytes = sizeof (ctf_array_t); 717 break; 718 case CTF_K_FUNCTION: 719 vbytes = sizeof (ushort_t) * (vlen + (vlen & 1)); 720 break; 721 case CTF_K_STRUCT: 722 case CTF_K_UNION: 723 if (size < CTF_LSTRUCT_THRESH) { 724 ctf_member_t *mp = (ctf_member_t *) 725 ((uintptr_t)tp + increment); 726 727 vbytes = sizeof (ctf_member_t) * vlen; 728 for (n = vlen; n != 0; n--, mp++) 729 child |= CTF_TYPE_ISCHILD(mp->ctm_type); 730 } else { 731 ctf_lmember_t *lmp = (ctf_lmember_t *) 732 ((uintptr_t)tp + increment); 733 734 vbytes = sizeof (ctf_lmember_t) * vlen; 735 for (n = vlen; n != 0; n--, lmp++) 736 child |= 737 CTF_TYPE_ISCHILD(lmp->ctlm_type); 738 } 739 break; 740 case CTF_K_ENUM: 741 vbytes = sizeof (ctf_enum_t) * vlen; 742 break; 743 case CTF_K_FORWARD: 744 /* 745 * For forward declarations, ctt_type is the CTF_K_* 746 * kind for the tag, so bump that population count too. 747 * If ctt_type is unknown, treat the tag as a struct. 748 */ 749 if (tp->ctt_type == CTF_K_UNKNOWN || 750 tp->ctt_type >= CTF_K_MAX) 751 pop[CTF_K_STRUCT]++; 752 else 753 pop[tp->ctt_type]++; 754 /*FALLTHRU*/ 755 case CTF_K_UNKNOWN: 756 vbytes = 0; 757 break; 758 case CTF_K_POINTER: 759 case CTF_K_TYPEDEF: 760 case CTF_K_VOLATILE: 761 case CTF_K_CONST: 762 case CTF_K_RESTRICT: 763 child |= CTF_TYPE_ISCHILD(tp->ctt_type); 764 vbytes = 0; 765 break; 766 default: 767 printf("%s(%d): detected invalid CTF kind -- %u\n", __func__, __LINE__, kind); 768 return (EIO); 769 } 770 tp = (ctf_type_t *)((uintptr_t)tp + increment + vbytes); 771 pop[kind]++; 772 } 773 774 /* account for a sentinel value below */ 775 ctf_typemax++; 776 *lc->typlenp = ctf_typemax; 777 778 if ((xp = malloc(sizeof(uint32_t) * ctf_typemax, M_LINKER, M_ZERO | M_WAITOK)) == NULL) 779 return (ENOMEM); 780 781 *lc->typoffp = xp; 782 783 /* type id 0 is used as a sentinel value */ 784 *xp++ = 0; 785 786 /* 787 * In the second pass, fill in the type offset. 788 */ 789 for (tp = tbuf; tp < tend; xp++) { 790 ushort_t kind = CTF_INFO_KIND(tp->ctt_info); 791 ulong_t vlen = CTF_INFO_VLEN(tp->ctt_info); 792 ssize_t size, increment; 793 794 size_t vbytes; 795 uint_t n; 796 797 (void) fbt_get_ctt_size(hp->cth_version, tp, &size, &increment); 798 799 switch (kind) { 800 case CTF_K_INTEGER: 801 case CTF_K_FLOAT: 802 vbytes = sizeof (uint_t); 803 break; 804 case CTF_K_ARRAY: 805 vbytes = sizeof (ctf_array_t); 806 break; 807 case CTF_K_FUNCTION: 808 vbytes = sizeof (ushort_t) * (vlen + (vlen & 1)); 809 break; 810 case CTF_K_STRUCT: 811 case CTF_K_UNION: 812 if (size < CTF_LSTRUCT_THRESH) { 813 ctf_member_t *mp = (ctf_member_t *) 814 ((uintptr_t)tp + increment); 815 816 vbytes = sizeof (ctf_member_t) * vlen; 817 for (n = vlen; n != 0; n--, mp++) 818 child |= CTF_TYPE_ISCHILD(mp->ctm_type); 819 } else { 820 ctf_lmember_t *lmp = (ctf_lmember_t *) 821 ((uintptr_t)tp + increment); 822 823 vbytes = sizeof (ctf_lmember_t) * vlen; 824 for (n = vlen; n != 0; n--, lmp++) 825 child |= 826 CTF_TYPE_ISCHILD(lmp->ctlm_type); 827 } 828 break; 829 case CTF_K_ENUM: 830 vbytes = sizeof (ctf_enum_t) * vlen; 831 break; 832 case CTF_K_FORWARD: 833 case CTF_K_UNKNOWN: 834 vbytes = 0; 835 break; 836 case CTF_K_POINTER: 837 case CTF_K_TYPEDEF: 838 case CTF_K_VOLATILE: 839 case CTF_K_CONST: 840 case CTF_K_RESTRICT: 841 vbytes = 0; 842 break; 843 default: 844 printf("%s(%d): detected invalid CTF kind -- %u\n", __func__, __LINE__, kind); 845 return (EIO); 846 } 847 *xp = (uint32_t)((uintptr_t) tp - (uintptr_t) ctfdata); 848 tp = (ctf_type_t *)((uintptr_t)tp + increment + vbytes); 849 } 850 851 return (0); 852 } 853 854 /* 855 * CTF Declaration Stack 856 * 857 * In order to implement ctf_type_name(), we must convert a type graph back 858 * into a C type declaration. Unfortunately, a type graph represents a storage 859 * class ordering of the type whereas a type declaration must obey the C rules 860 * for operator precedence, and the two orderings are frequently in conflict. 861 * For example, consider these CTF type graphs and their C declarations: 862 * 863 * CTF_K_POINTER -> CTF_K_FUNCTION -> CTF_K_INTEGER : int (*)() 864 * CTF_K_POINTER -> CTF_K_ARRAY -> CTF_K_INTEGER : int (*)[] 865 * 866 * In each case, parentheses are used to raise operator * to higher lexical 867 * precedence, so the string form of the C declaration cannot be constructed by 868 * walking the type graph links and forming the string from left to right. 869 * 870 * The functions in this file build a set of stacks from the type graph nodes 871 * corresponding to the C operator precedence levels in the appropriate order. 872 * The code in ctf_type_name() can then iterate over the levels and nodes in 873 * lexical precedence order and construct the final C declaration string. 874 */ 875 typedef struct ctf_list { 876 struct ctf_list *l_prev; /* previous pointer or tail pointer */ 877 struct ctf_list *l_next; /* next pointer or head pointer */ 878 } ctf_list_t; 879 880 #define ctf_list_prev(elem) ((void *)(((ctf_list_t *)(elem))->l_prev)) 881 #define ctf_list_next(elem) ((void *)(((ctf_list_t *)(elem))->l_next)) 882 883 typedef enum { 884 CTF_PREC_BASE, 885 CTF_PREC_POINTER, 886 CTF_PREC_ARRAY, 887 CTF_PREC_FUNCTION, 888 CTF_PREC_MAX 889 } ctf_decl_prec_t; 890 891 typedef struct ctf_decl_node { 892 ctf_list_t cd_list; /* linked list pointers */ 893 ctf_id_t cd_type; /* type identifier */ 894 uint_t cd_kind; /* type kind */ 895 uint_t cd_n; /* type dimension if array */ 896 } ctf_decl_node_t; 897 898 typedef struct ctf_decl { 899 ctf_list_t cd_nodes[CTF_PREC_MAX]; /* declaration node stacks */ 900 int cd_order[CTF_PREC_MAX]; /* storage order of decls */ 901 ctf_decl_prec_t cd_qualp; /* qualifier precision */ 902 ctf_decl_prec_t cd_ordp; /* ordered precision */ 903 char *cd_buf; /* buffer for output */ 904 char *cd_ptr; /* buffer location */ 905 char *cd_end; /* buffer limit */ 906 size_t cd_len; /* buffer space required */ 907 int cd_err; /* saved error value */ 908 } ctf_decl_t; 909 910 /* 911 * Simple doubly-linked list append routine. This implementation assumes that 912 * each list element contains an embedded ctf_list_t as the first member. 913 * An additional ctf_list_t is used to store the head (l_next) and tail 914 * (l_prev) pointers. The current head and tail list elements have their 915 * previous and next pointers set to NULL, respectively. 916 */ 917 static void 918 ctf_list_append(ctf_list_t *lp, void *new) 919 { 920 ctf_list_t *p = lp->l_prev; /* p = tail list element */ 921 ctf_list_t *q = new; /* q = new list element */ 922 923 lp->l_prev = q; 924 q->l_prev = p; 925 q->l_next = NULL; 926 927 if (p != NULL) 928 p->l_next = q; 929 else 930 lp->l_next = q; 931 } 932 933 /* 934 * Prepend the specified existing element to the given ctf_list_t. The 935 * existing pointer should be pointing at a struct with embedded ctf_list_t. 936 */ 937 static void 938 ctf_list_prepend(ctf_list_t *lp, void *new) 939 { 940 ctf_list_t *p = new; /* p = new list element */ 941 ctf_list_t *q = lp->l_next; /* q = head list element */ 942 943 lp->l_next = p; 944 p->l_prev = NULL; 945 p->l_next = q; 946 947 if (q != NULL) 948 q->l_prev = p; 949 else 950 lp->l_prev = p; 951 } 952 953 static void 954 ctf_decl_init(ctf_decl_t *cd, char *buf, size_t len) 955 { 956 int i; 957 958 bzero(cd, sizeof (ctf_decl_t)); 959 960 for (i = CTF_PREC_BASE; i < CTF_PREC_MAX; i++) 961 cd->cd_order[i] = CTF_PREC_BASE - 1; 962 963 cd->cd_qualp = CTF_PREC_BASE; 964 cd->cd_ordp = CTF_PREC_BASE; 965 966 cd->cd_buf = buf; 967 cd->cd_ptr = buf; 968 cd->cd_end = buf + len; 969 } 970 971 static void 972 ctf_decl_fini(ctf_decl_t *cd) 973 { 974 ctf_decl_node_t *cdp, *ndp; 975 int i; 976 977 for (i = CTF_PREC_BASE; i < CTF_PREC_MAX; i++) { 978 for (cdp = ctf_list_next(&cd->cd_nodes[i]); 979 cdp != NULL; cdp = ndp) { 980 ndp = ctf_list_next(cdp); 981 free(cdp, M_FBT); 982 } 983 } 984 } 985 986 static const ctf_type_t * 987 ctf_lookup_by_id(linker_ctf_t *lc, ctf_id_t type) 988 { 989 const ctf_type_t *tp; 990 uint32_t offset; 991 uint32_t *typoff = *lc->typoffp; 992 993 if (type >= *lc->typlenp) { 994 printf("%s(%d): type %d exceeds max %ld\n",__func__,__LINE__,(int) type,*lc->typlenp); 995 return(NULL); 996 } 997 998 /* Check if the type isn't cross-referenced. */ 999 if ((offset = typoff[type]) == 0) { 1000 printf("%s(%d): type %d isn't cross referenced\n",__func__,__LINE__, (int) type); 1001 return(NULL); 1002 } 1003 1004 tp = (const ctf_type_t *)(lc->ctftab + offset + sizeof(ctf_header_t)); 1005 1006 return (tp); 1007 } 1008 1009 static void 1010 fbt_array_info(linker_ctf_t *lc, ctf_id_t type, ctf_arinfo_t *arp) 1011 { 1012 const ctf_header_t *hp = (const ctf_header_t *) lc->ctftab; 1013 const ctf_type_t *tp; 1014 const ctf_array_t *ap; 1015 ssize_t increment; 1016 1017 bzero(arp, sizeof(*arp)); 1018 1019 if ((tp = ctf_lookup_by_id(lc, type)) == NULL) 1020 return; 1021 1022 if (CTF_INFO_KIND(tp->ctt_info) != CTF_K_ARRAY) 1023 return; 1024 1025 (void) fbt_get_ctt_size(hp->cth_version, tp, NULL, &increment); 1026 1027 ap = (const ctf_array_t *)((uintptr_t)tp + increment); 1028 arp->ctr_contents = ap->cta_contents; 1029 arp->ctr_index = ap->cta_index; 1030 arp->ctr_nelems = ap->cta_nelems; 1031 } 1032 1033 static const char * 1034 ctf_strptr(linker_ctf_t *lc, int name) 1035 { 1036 const ctf_header_t *hp = (const ctf_header_t *) lc->ctftab;; 1037 const char *strp = ""; 1038 1039 if (name < 0 || name >= hp->cth_strlen) 1040 return(strp); 1041 1042 strp = (const char *)(lc->ctftab + hp->cth_stroff + name + sizeof(ctf_header_t)); 1043 1044 return (strp); 1045 } 1046 1047 static void 1048 ctf_decl_push(ctf_decl_t *cd, linker_ctf_t *lc, ctf_id_t type) 1049 { 1050 ctf_decl_node_t *cdp; 1051 ctf_decl_prec_t prec; 1052 uint_t kind, n = 1; 1053 int is_qual = 0; 1054 1055 const ctf_type_t *tp; 1056 ctf_arinfo_t ar; 1057 1058 if ((tp = ctf_lookup_by_id(lc, type)) == NULL) { 1059 cd->cd_err = ENOENT; 1060 return; 1061 } 1062 1063 switch (kind = CTF_INFO_KIND(tp->ctt_info)) { 1064 case CTF_K_ARRAY: 1065 fbt_array_info(lc, type, &ar); 1066 ctf_decl_push(cd, lc, ar.ctr_contents); 1067 n = ar.ctr_nelems; 1068 prec = CTF_PREC_ARRAY; 1069 break; 1070 1071 case CTF_K_TYPEDEF: 1072 if (ctf_strptr(lc, tp->ctt_name)[0] == '\0') { 1073 ctf_decl_push(cd, lc, tp->ctt_type); 1074 return; 1075 } 1076 prec = CTF_PREC_BASE; 1077 break; 1078 1079 case CTF_K_FUNCTION: 1080 ctf_decl_push(cd, lc, tp->ctt_type); 1081 prec = CTF_PREC_FUNCTION; 1082 break; 1083 1084 case CTF_K_POINTER: 1085 ctf_decl_push(cd, lc, tp->ctt_type); 1086 prec = CTF_PREC_POINTER; 1087 break; 1088 1089 case CTF_K_VOLATILE: 1090 case CTF_K_CONST: 1091 case CTF_K_RESTRICT: 1092 ctf_decl_push(cd, lc, tp->ctt_type); 1093 prec = cd->cd_qualp; 1094 is_qual++; 1095 break; 1096 1097 default: 1098 prec = CTF_PREC_BASE; 1099 } 1100 1101 if ((cdp = malloc(sizeof (ctf_decl_node_t), M_FBT, M_WAITOK)) == NULL) { 1102 cd->cd_err = EAGAIN; 1103 return; 1104 } 1105 1106 cdp->cd_type = type; 1107 cdp->cd_kind = kind; 1108 cdp->cd_n = n; 1109 1110 if (ctf_list_next(&cd->cd_nodes[prec]) == NULL) 1111 cd->cd_order[prec] = cd->cd_ordp++; 1112 1113 /* 1114 * Reset cd_qualp to the highest precedence level that we've seen so 1115 * far that can be qualified (CTF_PREC_BASE or CTF_PREC_POINTER). 1116 */ 1117 if (prec > cd->cd_qualp && prec < CTF_PREC_ARRAY) 1118 cd->cd_qualp = prec; 1119 1120 /* 1121 * C array declarators are ordered inside out so prepend them. Also by 1122 * convention qualifiers of base types precede the type specifier (e.g. 1123 * const int vs. int const) even though the two forms are equivalent. 1124 */ 1125 if (kind == CTF_K_ARRAY || (is_qual && prec == CTF_PREC_BASE)) 1126 ctf_list_prepend(&cd->cd_nodes[prec], cdp); 1127 else 1128 ctf_list_append(&cd->cd_nodes[prec], cdp); 1129 } 1130 1131 static void 1132 ctf_decl_sprintf(ctf_decl_t *cd, const char *format, ...) 1133 { 1134 size_t len = (size_t)(cd->cd_end - cd->cd_ptr); 1135 va_list ap; 1136 size_t n; 1137 1138 va_start(ap, format); 1139 n = vsnprintf(cd->cd_ptr, len, format, ap); 1140 va_end(ap); 1141 1142 cd->cd_ptr += MIN(n, len); 1143 cd->cd_len += n; 1144 } 1145 1146 static ssize_t 1147 fbt_type_name(linker_ctf_t *lc, ctf_id_t type, char *buf, size_t len) 1148 { 1149 ctf_decl_t cd; 1150 ctf_decl_node_t *cdp; 1151 ctf_decl_prec_t prec, lp, rp; 1152 int ptr, arr; 1153 uint_t k; 1154 1155 if (lc == NULL && type == CTF_ERR) 1156 return (-1); /* simplify caller code by permitting CTF_ERR */ 1157 1158 ctf_decl_init(&cd, buf, len); 1159 ctf_decl_push(&cd, lc, type); 1160 1161 if (cd.cd_err != 0) { 1162 ctf_decl_fini(&cd); 1163 return (-1); 1164 } 1165 1166 /* 1167 * If the type graph's order conflicts with lexical precedence order 1168 * for pointers or arrays, then we need to surround the declarations at 1169 * the corresponding lexical precedence with parentheses. This can 1170 * result in either a parenthesized pointer (*) as in int (*)() or 1171 * int (*)[], or in a parenthesized pointer and array as in int (*[])(). 1172 */ 1173 ptr = cd.cd_order[CTF_PREC_POINTER] > CTF_PREC_POINTER; 1174 arr = cd.cd_order[CTF_PREC_ARRAY] > CTF_PREC_ARRAY; 1175 1176 rp = arr ? CTF_PREC_ARRAY : ptr ? CTF_PREC_POINTER : -1; 1177 lp = ptr ? CTF_PREC_POINTER : arr ? CTF_PREC_ARRAY : -1; 1178 1179 k = CTF_K_POINTER; /* avoid leading whitespace (see below) */ 1180 1181 for (prec = CTF_PREC_BASE; prec < CTF_PREC_MAX; prec++) { 1182 for (cdp = ctf_list_next(&cd.cd_nodes[prec]); 1183 cdp != NULL; cdp = ctf_list_next(cdp)) { 1184 1185 const ctf_type_t *tp = 1186 ctf_lookup_by_id(lc, cdp->cd_type); 1187 const char *name = ctf_strptr(lc, tp->ctt_name); 1188 1189 if (k != CTF_K_POINTER && k != CTF_K_ARRAY) 1190 ctf_decl_sprintf(&cd, " "); 1191 1192 if (lp == prec) { 1193 ctf_decl_sprintf(&cd, "("); 1194 lp = -1; 1195 } 1196 1197 switch (cdp->cd_kind) { 1198 case CTF_K_INTEGER: 1199 case CTF_K_FLOAT: 1200 case CTF_K_TYPEDEF: 1201 ctf_decl_sprintf(&cd, "%s", name); 1202 break; 1203 case CTF_K_POINTER: 1204 ctf_decl_sprintf(&cd, "*"); 1205 break; 1206 case CTF_K_ARRAY: 1207 ctf_decl_sprintf(&cd, "[%u]", cdp->cd_n); 1208 break; 1209 case CTF_K_FUNCTION: 1210 ctf_decl_sprintf(&cd, "()"); 1211 break; 1212 case CTF_K_STRUCT: 1213 case CTF_K_FORWARD: 1214 ctf_decl_sprintf(&cd, "struct %s", name); 1215 break; 1216 case CTF_K_UNION: 1217 ctf_decl_sprintf(&cd, "union %s", name); 1218 break; 1219 case CTF_K_ENUM: 1220 ctf_decl_sprintf(&cd, "enum %s", name); 1221 break; 1222 case CTF_K_VOLATILE: 1223 ctf_decl_sprintf(&cd, "volatile"); 1224 break; 1225 case CTF_K_CONST: 1226 ctf_decl_sprintf(&cd, "const"); 1227 break; 1228 case CTF_K_RESTRICT: 1229 ctf_decl_sprintf(&cd, "restrict"); 1230 break; 1231 } 1232 1233 k = cdp->cd_kind; 1234 } 1235 1236 if (rp == prec) 1237 ctf_decl_sprintf(&cd, ")"); 1238 } 1239 1240 ctf_decl_fini(&cd); 1241 return (cd.cd_len); 1242 } 1243 1244 static void 1245 fbt_getargdesc(void *arg __unused, dtrace_id_t id __unused, void *parg, dtrace_argdesc_t *desc) 1246 { 1247 const ushort_t *dp; 1248 fbt_probe_t *fbt = parg; 1249 linker_ctf_t lc; 1250 modctl_t *ctl = fbt->fbtp_ctl; 1251 int ndx = desc->dtargd_ndx; 1252 int symindx = fbt->fbtp_symindx; 1253 uint32_t *ctfoff; 1254 uint32_t offset; 1255 ushort_t info, kind, n; 1256 1257 if (fbt->fbtp_roffset != 0 && desc->dtargd_ndx == 0) { 1258 (void) strcpy(desc->dtargd_native, "int"); 1259 return; 1260 } 1261 1262 desc->dtargd_ndx = DTRACE_ARGNONE; 1263 1264 /* Get a pointer to the CTF data and it's length. */ 1265 if (linker_ctf_get(ctl, &lc) != 0) 1266 /* No CTF data? Something wrong? *shrug* */ 1267 return; 1268 1269 /* Check if this module hasn't been initialised yet. */ 1270 if (*lc.ctfoffp == NULL) { 1271 /* 1272 * Initialise the CTF object and function symindx to 1273 * byte offset array. 1274 */ 1275 if (fbt_ctfoff_init(ctl, &lc) != 0) 1276 return; 1277 1278 /* Initialise the CTF type to byte offset array. */ 1279 if (fbt_typoff_init(&lc) != 0) 1280 return; 1281 } 1282 1283 ctfoff = *lc.ctfoffp; 1284 1285 if (ctfoff == NULL || *lc.typoffp == NULL) 1286 return; 1287 1288 /* Check if the symbol index is out of range. */ 1289 if (symindx >= lc.nsym) 1290 return; 1291 1292 /* Check if the symbol isn't cross-referenced. */ 1293 if ((offset = ctfoff[symindx]) == 0xffffffff) 1294 return; 1295 1296 dp = (const ushort_t *)(lc.ctftab + offset + sizeof(ctf_header_t)); 1297 1298 info = *dp++; 1299 kind = CTF_INFO_KIND(info); 1300 n = CTF_INFO_VLEN(info); 1301 1302 if (kind == CTF_K_UNKNOWN && n == 0) { 1303 printf("%s(%d): Unknown function!\n",__func__,__LINE__); 1304 return; 1305 } 1306 1307 if (kind != CTF_K_FUNCTION) { 1308 printf("%s(%d): Expected a function!\n",__func__,__LINE__); 1309 return; 1310 } 1311 1312 if (fbt->fbtp_roffset != 0) { 1313 /* Only return type is available for args[1] in return probe. */ 1314 if (ndx > 1) 1315 return; 1316 ASSERT(ndx == 1); 1317 } else { 1318 /* Check if the requested argument doesn't exist. */ 1319 if (ndx >= n) 1320 return; 1321 1322 /* Skip the return type and arguments up to the one requested. */ 1323 dp += ndx + 1; 1324 } 1325 1326 if (fbt_type_name(&lc, *dp, desc->dtargd_native, sizeof(desc->dtargd_native)) > 0) 1327 desc->dtargd_ndx = ndx; 1328 1329 return; 1330 } 1331 1332 static int 1333 fbt_linker_file_cb(linker_file_t lf, void *arg) 1334 { 1335 1336 fbt_provide_module(arg, lf); 1337 1338 return (0); 1339 } 1340 1341 static void 1342 fbt_load(void *dummy) 1343 { 1344 /* Create the /dev/dtrace/fbt entry. */ 1345 fbt_cdev = make_dev(&fbt_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, 1346 "dtrace/fbt"); 1347 1348 /* Default the probe table size if not specified. */ 1349 if (fbt_probetab_size == 0) 1350 fbt_probetab_size = FBT_PROBETAB_SIZE; 1351 1352 /* Choose the hash mask for the probe table. */ 1353 fbt_probetab_mask = fbt_probetab_size - 1; 1354 1355 /* Allocate memory for the probe table. */ 1356 fbt_probetab = 1357 malloc(fbt_probetab_size * sizeof (fbt_probe_t *), M_FBT, M_WAITOK | M_ZERO); 1358 1359 dtrace_doubletrap_func = fbt_doubletrap; 1360 dtrace_invop_add(fbt_invop); 1361 1362 if (dtrace_register("fbt", &fbt_attr, DTRACE_PRIV_USER, 1363 NULL, &fbt_pops, NULL, &fbt_id) != 0) 1364 return; 1365 1366 /* Create probes for the kernel and already-loaded modules. */ 1367 linker_file_foreach(fbt_linker_file_cb, NULL); 1368 } 1369 1370 static int 1371 fbt_unload() 1372 { 1373 int error = 0; 1374 1375 /* De-register the invalid opcode handler. */ 1376 dtrace_invop_remove(fbt_invop); 1377 1378 dtrace_doubletrap_func = NULL; 1379 1380 /* De-register this DTrace provider. */ 1381 if ((error = dtrace_unregister(fbt_id)) != 0) 1382 return (error); 1383 1384 /* Free the probe table. */ 1385 free(fbt_probetab, M_FBT); 1386 fbt_probetab = NULL; 1387 fbt_probetab_mask = 0; 1388 1389 destroy_dev(fbt_cdev); 1390 1391 return (error); 1392 } 1393 1394 static int 1395 fbt_modevent(module_t mod __unused, int type, void *data __unused) 1396 { 1397 int error = 0; 1398 1399 switch (type) { 1400 case MOD_LOAD: 1401 break; 1402 1403 case MOD_UNLOAD: 1404 break; 1405 1406 case MOD_SHUTDOWN: 1407 break; 1408 1409 default: 1410 error = EOPNOTSUPP; 1411 break; 1412 1413 } 1414 1415 return (error); 1416 } 1417 1418 static int 1419 fbt_open(struct cdev *dev __unused, int oflags __unused, int devtype __unused, struct thread *td __unused) 1420 { 1421 return (0); 1422 } 1423 1424 SYSINIT(fbt_load, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, fbt_load, NULL); 1425 SYSUNINIT(fbt_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, fbt_unload, NULL); 1426 1427 DEV_MODULE(fbt, fbt_modevent, NULL); 1428 MODULE_VERSION(fbt, 1); 1429 MODULE_DEPEND(fbt, dtrace, 1, 1, 1); 1430 MODULE_DEPEND(fbt, opensolaris, 1, 1, 1); 1431