1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2013 by Delphix. All rights reserved. 24 * 25 * Copyright 2019 Joyent, Inc. 26 * Copyright 2024 Oxide Computer Company 27 */ 28 29 #include <kmdb/kmdb_kvm.h> 30 #include <kmdb/kvm.h> 31 #include <kmdb/kmdb_kdi.h> 32 #include <kmdb/kmdb_promif.h> 33 #include <kmdb/kmdb_module.h> 34 #include <kmdb/kmdb_asmutil.h> 35 #include <mdb/mdb_types.h> 36 #include <mdb/mdb_conf.h> 37 #include <mdb/mdb_err.h> 38 #include <mdb/mdb_modapi.h> 39 #include <mdb/mdb_target_impl.h> 40 #include <mdb/mdb_debug.h> 41 #include <mdb/mdb_string.h> 42 #include <mdb/mdb_ctf.h> 43 #include <mdb/mdb_kreg_impl.h> 44 #include <mdb/mdb_ks.h> 45 #include <mdb/mdb.h> 46 47 #include <strings.h> 48 #include <dlfcn.h> 49 #include <sys/isa_defs.h> 50 #include <sys/kobj.h> 51 #include <sys/kobj_impl.h> 52 #include <sys/bitmap.h> 53 #include <sys/uuid.h> 54 #include <vm/as.h> 55 56 static const char KMT_RTLD_NAME[] = "krtld"; 57 static const char KMT_MODULE[] = "mdb_ks"; 58 static const char KMT_CTFPARENT[] = "genunix"; 59 60 static mdb_list_t kmt_defbp_list; /* List of current deferred bp's */ 61 static int kmt_defbp_lock; /* For list, running kernel holds */ 62 static uint_t kmt_defbp_modchg_isload; /* Whether mod change is load/unload */ 63 static struct modctl *kmt_defbp_modchg_modctl; /* modctl for defbp checking */ 64 static uint_t kmt_defbp_num; /* Number of referenced def'd bp's */ 65 static int kmt_defbp_bpspec; /* vespec for def'd bp activation bp */ 66 67 static const mdb_se_ops_t kmt_brkpt_ops; 68 static const mdb_se_ops_t kmt_wapt_ops; 69 70 static void kmt_sync(mdb_tgt_t *); 71 72 typedef struct kmt_symarg { 73 mdb_tgt_sym_f *sym_cb; /* Caller's callback function */ 74 void *sym_data; /* Callback function argument */ 75 uint_t sym_type; /* Symbol type/binding filter */ 76 mdb_syminfo_t sym_info; /* Symbol id and table id */ 77 const char *sym_obj; /* Containing object */ 78 } kmt_symarg_t; 79 80 typedef struct kmt_maparg { 81 mdb_tgt_t *map_target; /* Target used for mapping iter */ 82 mdb_tgt_map_f *map_cb; /* Caller's callback function */ 83 void *map_data; /* Callback function argument */ 84 } kmt_maparg_t; 85 86 /*ARGSUSED*/ 87 int 88 kmt_setflags(mdb_tgt_t *t, int flags) 89 { 90 /* 91 * We only handle one flag (ALLOWIO), and we can't fail to set or clear 92 * it, so we just blindly replace the t_flags version with the one 93 * passed. 94 */ 95 t->t_flags = (t->t_flags & ~MDB_TGT_F_ALLOWIO) | 96 (flags & MDB_TGT_F_ALLOWIO); 97 98 return (0); 99 } 100 101 /*ARGSUSED*/ 102 const char * 103 kmt_name(mdb_tgt_t *t) 104 { 105 return ("kmdb_kvm"); 106 } 107 108 /*ARGSUSED*/ 109 static const char * 110 kmt_platform(mdb_tgt_t *t) 111 { 112 static char platform[SYS_NMLN]; 113 114 if (kmdb_dpi_get_state(NULL) == DPI_STATE_INIT) 115 return (mdb_conf_platform()); 116 117 if (mdb_tgt_readsym(mdb.m_target, MDB_TGT_AS_VIRT, platform, 118 sizeof (platform), "unix", "platform") != sizeof (platform)) { 119 warn("'platform' symbol is missing from kernel\n"); 120 return ("unknown"); 121 } 122 123 return (platform); 124 } 125 126 static int 127 kmt_uname(mdb_tgt_t *t, struct utsname *utsp) 128 { 129 return (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, utsp, 130 sizeof (struct utsname), MDB_TGT_OBJ_EXEC, "utsname")); 131 } 132 133 /*ARGSUSED*/ 134 static int 135 kmt_dmodel(mdb_tgt_t *t) 136 { 137 return (MDB_TGT_MODEL_NATIVE); 138 } 139 140 /*ARGSUSED*/ 141 ssize_t 142 kmt_rw(mdb_tgt_t *t, void *buf, size_t nbytes, uint64_t addr, 143 ssize_t (*rw)(void *, size_t, uint64_t)) 144 { 145 /* 146 * chunksz needs to be volatile because of the use of setjmp() in this 147 * function. 148 */ 149 volatile size_t chunksz; 150 size_t n, ndone; 151 jmp_buf *oldpcb = NULL; 152 jmp_buf pcb; 153 ssize_t res; 154 155 kmdb_prom_check_interrupt(); 156 157 if (nbytes == 0) 158 return (0); 159 160 /* 161 * Try to process the entire buffer, as requested. If we catch a fault, 162 * try smaller chunks. This allows us to handle regions that cross 163 * mapping boundaries. 164 */ 165 chunksz = nbytes; 166 ndone = 0; 167 if (setjmp(pcb) != 0) { 168 if (chunksz == 1) { 169 /* We failed with the smallest chunk - give up */ 170 kmdb_dpi_restore_fault_hdlr(oldpcb); 171 return (ndone > 0 ? ndone : -1); /* errno set for us */ 172 } else if (chunksz > 4) 173 chunksz = 4; 174 else 175 chunksz = 1; 176 } 177 178 oldpcb = kmdb_dpi_set_fault_hdlr(&pcb); 179 while (nbytes > 0) { 180 n = MIN(chunksz, nbytes); 181 182 if ((res = rw(buf, n, addr)) != n) 183 return (res < 0 ? res : ndone + res); 184 185 addr += n; 186 nbytes -= n; 187 ndone += n; 188 buf = ((caddr_t)buf + n); 189 } 190 191 kmdb_dpi_restore_fault_hdlr(oldpcb); 192 193 return (ndone); 194 } 195 196 static void 197 kmt_bcopy(const void *s1, void *s2, size_t n) 198 { 199 /* 200 * We need to guarantee atomic accesses for certain sizes. bcopy won't 201 * make that guarantee, so we need to do it ourselves. 202 */ 203 #ifdef _LP64 204 if (n == 8 && ((uintptr_t)s1 & 7) == 0 && ((uintptr_t)s2 & 7) == 0) 205 *(uint64_t *)s2 = *(uint64_t *)s1; 206 else 207 #endif 208 if (n == 4 && ((uintptr_t)s1 & 3) == 0 && ((uintptr_t)s2 & 3) == 0) 209 *(uint32_t *)s2 = *(uint32_t *)s1; 210 else if (n == 2 && ((uintptr_t)s1 & 1) == 0 && ((uintptr_t)s2 & 1) == 0) 211 *(uint16_t *)s2 = *(uint16_t *)s1; 212 else if (n == 1) 213 *(uint8_t *)s2 = *(uint8_t *)s1; 214 else 215 bcopy(s1, s2, n); 216 } 217 218 static ssize_t 219 kmt_reader(void *buf, size_t nbytes, uint64_t addr) 220 { 221 kmt_bcopy((void *)(uintptr_t)addr, buf, nbytes); 222 return (nbytes); 223 } 224 225 ssize_t 226 kmt_writer(void *buf, size_t nbytes, uint64_t addr) 227 { 228 kmt_bcopy(buf, (void *)(uintptr_t)addr, nbytes); 229 return (nbytes); 230 } 231 232 /*ARGSUSED*/ 233 static ssize_t 234 kmt_read(mdb_tgt_t *t, void *buf, size_t nbytes, uintptr_t addr) 235 { 236 /* 237 * We don't want to allow reads of I/O-mapped memory. Multi-page reads 238 * that cross into I/O-mapped memory should be restricted to the initial 239 * non-I/O region. Reads that begin in I/O-mapped memory are failed 240 * outright. 241 */ 242 if (!(t->t_flags & MDB_TGT_F_ALLOWIO) && 243 (nbytes = kmdb_kdi_range_is_nontoxic(addr, nbytes, 0)) == 0) 244 return (set_errno(EMDB_NOMAP)); 245 246 return (kmt_rw(t, buf, nbytes, addr, kmt_reader)); 247 } 248 249 /*ARGSUSED*/ 250 static ssize_t 251 kmt_pread(mdb_tgt_t *t, void *buf, size_t nbytes, physaddr_t addr) 252 { 253 return (kmt_rw(t, buf, nbytes, addr, kmdb_kdi_pread)); 254 } 255 256 /*ARGSUSED*/ 257 ssize_t 258 kmt_pwrite(mdb_tgt_t *t, const void *buf, size_t nbytes, physaddr_t addr) 259 { 260 return (kmt_rw(t, (void *)buf, nbytes, addr, kmdb_kdi_pwrite)); 261 } 262 263 static uintptr_t 264 kmt_read_kas(mdb_tgt_t *t) 265 { 266 GElf_Sym sym; 267 268 if (mdb_tgt_lookup_by_name(t, "unix", "kas", &sym, NULL) < 0) { 269 warn("'kas' symbol is missing from kernel\n"); 270 (void) set_errno(EMDB_NOSYM); 271 return (0); 272 } 273 274 return ((uintptr_t)sym.st_value); 275 } 276 277 static int 278 kmt_vtop(mdb_tgt_t *t, mdb_tgt_as_t as, uintptr_t va, physaddr_t *pap) 279 { 280 mdb_module_t *mod; 281 struct as *asp; 282 mdb_var_t *v; 283 284 switch ((uintptr_t)as) { 285 case (uintptr_t)MDB_TGT_AS_PHYS: 286 case (uintptr_t)MDB_TGT_AS_FILE: 287 case (uintptr_t)MDB_TGT_AS_IO: 288 return (set_errno(EINVAL)); 289 case (uintptr_t)MDB_TGT_AS_VIRT: 290 case (uintptr_t)MDB_TGT_AS_VIRT_I: 291 case (uintptr_t)MDB_TGT_AS_VIRT_S: 292 if ((asp = (struct as *)kmt_read_kas(t)) == NULL) 293 return (-1); /* errno is set for us */ 294 break; 295 default: 296 asp = (struct as *)as; 297 298 /* We don't support non-kas vtop */ 299 if (asp != (struct as *)kmt_read_kas(t)) 300 return (set_errno(EMDB_TGTNOTSUP)); 301 } 302 303 if (kmdb_prom_vtop(va, pap) == 0) 304 return (0); 305 306 if ((v = mdb_nv_lookup(&mdb.m_modules, "unix")) != NULL && 307 (mod = mdb_nv_get_cookie(v)) != NULL) { 308 int (*fptr)(uintptr_t, struct as *, physaddr_t *); 309 310 fptr = (int (*)(uintptr_t, struct as *, physaddr_t *)) 311 dlsym(mod->mod_hdl, "platform_vtop"); 312 313 if ((fptr != NULL) && ((*fptr)(va, asp, pap) == 0)) 314 return (0); 315 } 316 317 return (set_errno(EMDB_NOMAP)); 318 } 319 320 /*ARGSUSED*/ 321 static int 322 kmt_cpuregs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 323 { 324 const mdb_tgt_gregset_t *gregs; 325 intptr_t cpuid = DPI_MASTER_CPUID; 326 int i; 327 328 if (flags & DCMD_ADDRSPEC) { 329 if (argc != 0) 330 return (DCMD_USAGE); 331 if ((cpuid = mdb_cpu2cpuid(addr)) < 0) { 332 (void) set_errno(EMDB_NOMAP); 333 mdb_warn("failed to find cpuid for cpu at %p", addr); 334 return (DCMD_ERR); 335 } 336 } 337 338 i = mdb_getopts(argc, argv, 339 'c', MDB_OPT_UINTPTR, &cpuid, 340 NULL); 341 342 argc -= i; 343 argv += i; 344 345 if (argc != 0) 346 return (DCMD_USAGE); 347 348 if ((gregs = kmdb_dpi_get_gregs(cpuid)) == NULL) { 349 warn("failed to retrieve registers for cpu %d", (int)cpuid); 350 return (DCMD_ERR); 351 } 352 353 kmt_printregs(gregs); 354 355 return (DCMD_OK); 356 } 357 358 static int 359 kmt_regs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 360 { 361 if (flags & DCMD_ADDRSPEC) 362 return (DCMD_USAGE); 363 364 return (kmt_cpuregs(addr, flags, argc, argv)); 365 } 366 367 static int 368 kmt_cpustack_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 369 { 370 intptr_t cpuid = DPI_MASTER_CPUID; 371 uint_t verbose = 0; 372 int i; 373 374 if (flags & DCMD_ADDRSPEC) { 375 if ((cpuid = mdb_cpu2cpuid(addr)) < 0) { 376 (void) set_errno(EMDB_NOMAP); 377 mdb_warn("failed to find cpuid for cpu at %p", addr); 378 return (DCMD_ERR); 379 } 380 flags &= ~DCMD_ADDRSPEC; 381 } 382 383 i = mdb_getopts(argc, argv, 384 'c', MDB_OPT_UINTPTR, &cpuid, 385 'v', MDB_OPT_SETBITS, 1, &verbose, 386 NULL); 387 388 argc -= i; 389 argv += i; 390 391 return (kmt_cpustack(addr, flags, argc, argv, cpuid, verbose)); 392 } 393 394 /* 395 * Lasciate ogne speranza, voi ch'intrate. 396 */ 397 static int 398 kmt_call(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 399 { 400 uintptr_t *call_argv, rval; 401 int parse_strings = 1; 402 GElf_Sym sym; 403 jmp_buf *oldpcb = NULL; 404 jmp_buf pcb; 405 int i; 406 407 if (!(flags & DCMD_ADDRSPEC)) 408 return (DCMD_USAGE); 409 410 if (mdb_tgt_lookup_by_addr(mdb.m_target, addr, MDB_TGT_SYM_EXACT, 411 NULL, 0, &sym, NULL) == 0 && GELF_ST_TYPE(sym.st_info) != 412 STT_FUNC) { 413 warn("%a is not a function\n", addr); 414 return (DCMD_ERR); 415 } 416 417 if (argc > 1 && argv[0].a_type == MDB_TYPE_STRING && 418 strcmp(argv[0].a_un.a_str, "-s") == 0) { 419 parse_strings = 0; 420 argc--; 421 argv++; 422 } 423 424 call_argv = mdb_alloc(sizeof (uintptr_t) * argc, UM_SLEEP); 425 426 for (i = 0; i < argc; i++) { 427 switch (argv[i].a_type) { 428 case MDB_TYPE_STRING: 429 /* 430 * mdb_strtoull doesn't return on error, so we have to 431 * pre-check strings suspected to contain numbers. 432 */ 433 if (parse_strings && strisbasenum(argv[i].a_un.a_str)) { 434 call_argv[i] = (uintptr_t)mdb_strtoull( 435 argv[i].a_un.a_str); 436 } else 437 call_argv[i] = (uintptr_t)argv[i].a_un.a_str; 438 439 break; 440 441 case MDB_TYPE_IMMEDIATE: 442 call_argv[i] = argv[i].a_un.a_val; 443 break; 444 445 default: 446 mdb_free(call_argv, 447 sizeof (uintptr_t) * argc); 448 return (DCMD_USAGE); 449 } 450 } 451 452 if (setjmp(pcb) != 0) { 453 warn("call failed: caught a trap\n"); 454 455 kmdb_dpi_restore_fault_hdlr(oldpcb); 456 mdb_free(call_argv, sizeof (uintptr_t) * argc); 457 return (DCMD_ERR); 458 } 459 460 oldpcb = kmdb_dpi_set_fault_hdlr(&pcb); 461 rval = kmdb_dpi_call(addr, argc, call_argv); 462 kmdb_dpi_restore_fault_hdlr(oldpcb); 463 464 if (flags & DCMD_PIPE_OUT) { 465 mdb_printf("%p\n", rval); 466 } else { 467 /* pretty-print the results */ 468 mdb_printf("%p = %a(", rval, addr); 469 for (i = 0; i < argc; i++) { 470 if (i > 0) 471 mdb_printf(", "); 472 if (argv[i].a_type == MDB_TYPE_STRING) { 473 /* I'm ashamed but amused */ 474 char *quote = &("\""[parse_strings && 475 strisbasenum(argv[i].a_un.a_str)]); 476 477 mdb_printf("%s%s%s", quote, argv[i].a_un.a_str, 478 quote); 479 } else 480 mdb_printf("%p", argv[i].a_un.a_val); 481 } 482 mdb_printf(");\n"); 483 } 484 485 mdb_free(call_argv, sizeof (uintptr_t) * argc); 486 487 return (DCMD_OK); 488 } 489 490 /*ARGSUSED*/ 491 int 492 kmt_dump_crumbs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 493 { 494 intptr_t cpu = -1; 495 496 if (flags & DCMD_ADDRSPEC) { 497 if (argc != 0) 498 return (DCMD_USAGE); 499 } else { 500 addr = 0; 501 502 if (mdb_getopts(argc, argv, 503 'c', MDB_OPT_UINTPTR, &cpu, 504 NULL) != argc) 505 return (DCMD_USAGE); 506 } 507 508 kmdb_dpi_dump_crumbs(addr, cpu); 509 510 return (DCMD_OK); 511 } 512 513 /*ARGSUSED*/ 514 static int 515 kmt_noducttape(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 516 { 517 int a = 0; 518 519 return (a/a); 520 } 521 522 static int 523 kmt_dmod_status(char *msg, int state) 524 { 525 kmdb_modctl_t *kmc; 526 mdb_var_t *v; 527 int first = 1, n = 0; 528 529 mdb_nv_rewind(&mdb.m_dmodctl); 530 while ((v = mdb_nv_advance(&mdb.m_dmodctl)) != NULL) { 531 kmc = MDB_NV_COOKIE(v); 532 533 if (kmc->kmc_state != state) 534 continue; 535 536 n++; 537 538 if (msg != NULL) { 539 if (first) { 540 mdb_printf(msg, NULL); 541 first = 0; 542 } 543 544 mdb_printf(" %s", kmc->kmc_modname); 545 } 546 } 547 548 if (!first && msg != NULL) 549 mdb_printf("\n"); 550 551 return (n); 552 } 553 554 /*ARGSUSED*/ 555 static int 556 kmt_status_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 557 { 558 struct utsname uts; 559 char uuid[UUID_PRINTABLE_STRING_LENGTH]; 560 kreg_t tt; 561 562 if (mdb_tgt_readsym(mdb.m_target, MDB_TGT_AS_VIRT, &uts, sizeof (uts), 563 "unix", "utsname") != sizeof (uts)) { 564 warn("failed to read 'utsname' struct from kernel\n"); 565 bzero(&uts, sizeof (uts)); 566 (void) strcpy(uts.nodename, "unknown machine"); 567 } 568 569 mdb_printf("debugging live kernel (%d-bit) on %s\n", 570 (int)(sizeof (void *) * NBBY), 571 (*uts.nodename == '\0' ? "(not set)" : uts.nodename)); 572 mdb_printf("operating system: %s %s (%s)\n", 573 uts.release, uts.version, uts.machine); 574 575 mdb_print_buildversion(); 576 577 if (mdb_readsym(uuid, sizeof (uuid), 578 "dump_osimage_uuid") == sizeof (uuid) && 579 uuid[sizeof (uuid) - 1] == '\0') { 580 mdb_printf("image uuid: %s\n", uuid[0] != '\0' ? 581 uuid : "(not set)"); 582 } 583 584 mdb_printf("DTrace state: %s\n", (kmdb_kdi_dtrace_get_state() == 585 KDI_DTSTATE_DTRACE_ACTIVE ? "active (debugger breakpoints cannot " 586 "be armed)" : "inactive")); 587 588 (void) kmdb_dpi_get_register("tt", &tt); 589 mdb_printf("stopped on: %s\n", kmt_trapname(tt)); 590 591 (void) kmt_dmod_status("pending dmod loads:", KMDB_MC_STATE_LOADING); 592 (void) kmt_dmod_status("pending dmod unloads:", 593 KMDB_MC_STATE_UNLOADING); 594 595 return (DCMD_OK); 596 } 597 598 /*ARGSUSED*/ 599 static int 600 kmt_switch(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv) 601 { 602 if (!(flags & DCMD_ADDRSPEC) || argc != 0) 603 return (DCMD_USAGE); 604 605 if (kmdb_dpi_switch_master((int)addr) < 0) { 606 warn("failed to switch to CPU %d", (int)addr); 607 return (DCMD_ERR); 608 } 609 610 return (DCMD_OK); 611 } 612 613 static const mdb_dcmd_t kmt_dcmds[] = { 614 { "$c", "?[cnt]", "print stack backtrace", kmt_stack }, 615 { "$C", "?[cnt]", "print stack backtrace", kmt_stackv }, 616 { "$r", NULL, "print general-purpose registers", kmt_regs }, 617 { "$?", NULL, "print status and registers", kmt_regs }, 618 { ":x", ":", "change the active CPU", kmt_switch }, 619 { "call", ":[arg ...]", "call a kernel function", kmt_call }, 620 { "cpustack", "?[-v] [-c cpuid] [cnt]", "print stack backtrace for a " 621 "specific CPU", kmt_cpustack_dcmd }, 622 { "cpuregs", "?[-c cpuid]", "print general-purpose registers for a " 623 "specific CPU", kmt_cpuregs }, 624 { "crumbs", NULL, NULL, kmt_dump_crumbs }, 625 #if defined(__i386) || defined(__amd64) 626 { "in", ":[-L len]", "read from I/O port", kmt_in_dcmd }, 627 { "out", ":[-L len] val", "write to I/O port", kmt_out_dcmd }, 628 { "rdmsr", ":", "read an MSR", kmt_rdmsr }, 629 { "wrmsr", ": val", "write an MSR", kmt_wrmsr }, 630 { "rdpcicfg", ": bus dev func", "read a register in PCI config space", 631 kmt_rdpcicfg }, 632 { "wrpcicfg", ": bus dev func val", "write a register in PCI config " 633 "space", kmt_wrpcicfg }, 634 #endif 635 { "noducttape", NULL, NULL, kmt_noducttape }, 636 { "regs", NULL, "print general-purpose registers", kmt_regs }, 637 { "stack", "?[cnt]", "print stack backtrace", kmt_stack }, 638 { "stackregs", "?", "print stack backtrace and registers", kmt_stackr }, 639 { "status", NULL, "print summary of current target", kmt_status_dcmd }, 640 { "switch", ":", "change the active CPU", kmt_switch }, 641 { NULL } 642 }; 643 644 static uintmax_t 645 kmt_reg_disc_get(const mdb_var_t *v) 646 { 647 mdb_tgt_reg_t r = 0; 648 649 (void) mdb_tgt_getareg(MDB_NV_COOKIE(v), 0, mdb_nv_get_name(v), &r); 650 651 return (r); 652 } 653 654 static void 655 kmt_reg_disc_set(mdb_var_t *v, uintmax_t r) 656 { 657 if (mdb_tgt_putareg(MDB_NV_COOKIE(v), 0, mdb_nv_get_name(v), r) == -1) 658 warn("failed to modify %%%s register", mdb_nv_get_name(v)); 659 } 660 661 static const mdb_nv_disc_t kmt_reg_disc = { 662 .disc_get = kmt_reg_disc_get, 663 .disc_set = kmt_reg_disc_set 664 }; 665 666 /*ARGSUSED*/ 667 static int 668 kmt_getareg(mdb_tgt_t *t, mdb_tgt_tid_t tid, const char *rname, 669 mdb_tgt_reg_t *rp) 670 { 671 kreg_t val; 672 673 if (kmdb_dpi_get_register(rname, &val) < 0) 674 return (set_errno(EMDB_BADREG)); 675 676 *rp = val; 677 return (0); 678 } 679 680 /*ARGSUSED*/ 681 static int 682 kmt_putareg(mdb_tgt_t *t, mdb_tgt_tid_t tid, const char *rname, mdb_tgt_reg_t r) 683 { 684 if (kmdb_dpi_set_register(rname, r) < 0) 685 return (set_errno(EMDB_BADREG)); 686 687 return (0); 688 } 689 690 static void 691 kmt_mod_destroy(kmt_module_t *km) 692 { 693 if (km->km_name != NULL) 694 strfree(km->km_name); 695 if (km->km_symtab != NULL) 696 mdb_gelf_symtab_destroy(km->km_symtab); 697 if (km->km_ctfp != NULL) 698 mdb_ctf_close(km->km_ctfp); 699 } 700 701 static kmt_module_t * 702 kmt_mod_create(mdb_tgt_t *t, struct modctl *ctlp, char *name) 703 { 704 kmt_module_t *km = mdb_zalloc(sizeof (kmt_module_t), UM_SLEEP); 705 struct module *mod; 706 707 km->km_name = mdb_alloc(strlen(name) + 1, UM_SLEEP); 708 (void) strcpy(km->km_name, name); 709 710 bcopy(ctlp, &km->km_modctl, sizeof (struct modctl)); 711 712 if (mdb_tgt_vread(t, &km->km_module, sizeof (struct module), 713 (uintptr_t)km->km_modctl.mod_mp) != sizeof (struct module)) 714 goto create_module_cleanup; 715 mod = &km->km_module; 716 717 if (mod->symhdr != NULL && mod->strhdr != NULL && mod->symtbl != NULL && 718 mod->strings != NULL) { 719 mdb_gelf_ehdr_to_gehdr(&mod->hdr, &km->km_ehdr); 720 721 km->km_symtab = mdb_gelf_symtab_create_raw(&km->km_ehdr, 722 mod->symhdr, mod->symtbl, mod->strhdr, mod->strings, 723 MDB_TGT_SYMTAB); 724 725 km->km_symtab_va = mod->symtbl; 726 km->km_strtab_va = mod->strings; 727 728 if (mdb_tgt_vread(t, &km->km_symtab_hdr, sizeof (Shdr), 729 (uintptr_t)mod->symhdr) != sizeof (Shdr) || 730 mdb_tgt_vread(t, &km->km_strtab_hdr, sizeof (Shdr), 731 (uintptr_t)mod->strhdr) != sizeof (Shdr)) 732 goto create_module_cleanup; 733 } 734 735 /* 736 * We don't want everyone rooting around in the module structure, so we 737 * make copies of the interesting members. 738 */ 739 km->km_text_va = (uintptr_t)mod->text; 740 km->km_text_size = mod->text_size; 741 km->km_data_va = (uintptr_t)mod->data; 742 km->km_data_size = mod->data_size; 743 km->km_bss_va = (uintptr_t)mod->bss; 744 km->km_bss_size = mod->bss_size; 745 km->km_ctf_va = mod->ctfdata; 746 km->km_ctf_size = mod->ctfsize; 747 748 if (mod->flags & KOBJ_PRIM) 749 km->km_flags |= KM_F_PRIMARY; 750 751 return (km); 752 753 create_module_cleanup: 754 warn("failed to read module %s\n", name); 755 kmt_mod_destroy(km); 756 return (NULL); 757 } 758 759 static void 760 kmt_mod_remove(kmt_data_t *kmt, kmt_module_t *km) 761 { 762 mdb_var_t *v = mdb_nv_lookup(&kmt->kmt_modules, km->km_name); 763 764 ASSERT(v != NULL); 765 766 mdb_dprintf(MDB_DBG_KMOD, "removing module %s\n", km->km_name); 767 768 mdb_list_delete(&kmt->kmt_modlist, km); 769 mdb_nv_remove(&kmt->kmt_modules, v); 770 kmt_mod_destroy(km); 771 } 772 773 static int 774 kmt_modlist_update_cb(struct modctl *modp, void *arg) 775 { 776 mdb_tgt_t *t = arg; 777 kmt_data_t *kmt = t->t_data; 778 kmt_module_t *km; 779 mdb_var_t *v; 780 char name[MAXNAMELEN]; 781 782 if (mdb_tgt_readstr(t, MDB_TGT_AS_VIRT, name, MAXNAMELEN, 783 (uintptr_t)modp->mod_modname) <= 0) { 784 warn("failed to read module name at %p", 785 (void *)modp->mod_modname); 786 } 787 788 /* We only care about modules that are actually loaded */ 789 if (!kmdb_kdi_mod_isloaded(modp)) 790 return (0); 791 792 /* 793 * Skip the modules we already know about and that haven't 794 * changed since last time we were here. 795 */ 796 if ((v = mdb_nv_lookup(&kmt->kmt_modules, name)) != NULL) { 797 km = MDB_NV_COOKIE(v); 798 799 if (kmdb_kdi_mod_haschanged(&km->km_modctl, &km->km_module, 800 modp, modp->mod_mp)) { 801 /* 802 * The module has changed since last we saw it. For 803 * safety, remove our old version, and treat it as a 804 * new module. 805 */ 806 mdb_dprintf(MDB_DBG_KMOD, "stutter module %s\n", name); 807 kmt_mod_remove(kmt, km); 808 } else { 809 km->km_seen = 1; 810 return (0); 811 } 812 } 813 814 mdb_dprintf(MDB_DBG_KMOD, "found new module %s\n", name); 815 816 if ((km = kmt_mod_create(t, modp, name)) != NULL) { 817 mdb_list_append(&kmt->kmt_modlist, km); 818 (void) mdb_nv_insert(&kmt->kmt_modules, name, NULL, 819 (uintptr_t)km, 0); 820 km->km_seen = 1; 821 } 822 823 return (0); 824 } 825 826 static void 827 kmt_modlist_update(mdb_tgt_t *t) 828 { 829 kmt_data_t *kmt = t->t_data; 830 kmt_module_t *km, *kmn; 831 832 if (kmdb_kdi_mod_iter(kmt_modlist_update_cb, t) < 0) { 833 warn("failed to complete update of kernel module list\n"); 834 return; 835 } 836 837 km = mdb_list_next(&kmt->kmt_modlist); 838 while (km != NULL) { 839 kmn = mdb_list_next(km); 840 841 if (km->km_seen == 1) { 842 /* Reset the mark for next time */ 843 km->km_seen = 0; 844 } else { 845 /* 846 * We didn't see it on the kernel's module list, so 847 * remove it from our view of the world. 848 */ 849 kmt_mod_remove(kmt, km); 850 } 851 852 km = kmn; 853 } 854 } 855 856 static void 857 kmt_periodic(mdb_tgt_t *t) 858 { 859 (void) mdb_tgt_status(t, &t->t_status); 860 } 861 862 int 863 kmt_lookup_by_addr(mdb_tgt_t *t, uintptr_t addr, uint_t flags, 864 char *buf, size_t nbytes, GElf_Sym *symp, mdb_syminfo_t *sip) 865 { 866 kmt_data_t *kmt = t->t_data; 867 kmt_module_t *km = mdb_list_next(&kmt->kmt_modlist); 868 kmt_module_t *sym_km = NULL; 869 kmt_module_t prmod; 870 GElf_Sym sym; 871 uint_t symid; 872 const char *name; 873 874 /* 875 * We look through the private symbols (if any), then through the module 876 * symbols. We can simplify the loop if we pretend the private symbols 877 * come from a module. 878 */ 879 if (mdb.m_prsym != NULL) { 880 bzero(&prmod, sizeof (kmt_module_t)); 881 prmod.km_name = "<<<prmod>>>"; 882 prmod.km_symtab = mdb.m_prsym; 883 prmod.km_list.ml_next = (mdb_list_t *)km; 884 km = &prmod; 885 } 886 887 /* Symbol resolution isn't available during initialization */ 888 if (kmdb_dpi_get_state(NULL) == DPI_STATE_INIT) 889 return (set_errno(EMDB_NOSYM)); 890 891 for (; km != NULL; km = mdb_list_next(km)) { 892 if (km != &prmod && !kmt->kmt_symavail) 893 continue; 894 895 if (km->km_symtab == NULL) 896 continue; 897 898 if (mdb_gelf_symtab_lookup_by_addr(km->km_symtab, addr, flags, 899 buf, nbytes, symp, &sip->sym_id) != 0 || 900 symp->st_value == 0) 901 continue; 902 903 if (flags & MDB_TGT_SYM_EXACT) { 904 sym_km = km; 905 goto found; 906 } 907 908 /* 909 * If this is the first match we've found, or if this symbol is 910 * closer to the specified address than the last one we found, 911 * use it. 912 */ 913 if (sym_km == NULL || mdb_gelf_sym_closer(symp, &sym, addr)) { 914 sym_km = km; 915 sym = *symp; 916 symid = sip->sym_id; 917 } 918 } 919 920 /* 921 * kmdb dmods are normal kernel modules, loaded by krtld as such. To 922 * avoid polluting modinfo, and to keep from confusing the module 923 * subsystem (many dmods have the same names as real kernel modules), 924 * kmdb keeps their modctls separate, and doesn't allow their loading 925 * to be broadcast via the krtld module load/unload mechanism. As a 926 * result, kmdb_kvm doesn't find out about them, and can't turn their 927 * addresses into symbols. This can be most inconvenient during 928 * debugger faults, as the dmod frames will show up without names. 929 * We weren't able to turn the requested address into a symbol, so we'll 930 * take a spin through the dmods, trying to match our address against 931 * their symbols. 932 */ 933 if (sym_km == NULL) { 934 return (kmdb_module_lookup_by_addr(addr, flags, buf, nbytes, 935 symp, sip)); 936 } 937 938 *symp = sym; 939 sip->sym_id = symid; 940 941 found: 942 /* 943 * Once we've found something, copy the final name into the caller's 944 * buffer and prefix it with the load object name if appropriate. 945 */ 946 name = mdb_gelf_sym_name(sym_km->km_symtab, symp); 947 948 if (sym_km == &prmod) { 949 if (buf != NULL) { 950 (void) strncpy(buf, name, nbytes); 951 buf[nbytes - 1] = '\0'; 952 } 953 sip->sym_table = MDB_TGT_PRVSYM; 954 } else { 955 if (buf != NULL) { 956 if (sym_km->km_flags & KM_F_PRIMARY) { 957 (void) strncpy(buf, name, nbytes); 958 buf[nbytes - 1] = '\0'; 959 } else { 960 (void) mdb_snprintf(buf, nbytes, "%s`%s", 961 sym_km->km_name, name); 962 } 963 } 964 sip->sym_table = MDB_TGT_SYMTAB; 965 } 966 967 return (0); 968 } 969 970 static int 971 kmt_lookup_by_name(mdb_tgt_t *t, const char *obj, const char *name, 972 GElf_Sym *symp, mdb_syminfo_t *sip) 973 { 974 kmt_data_t *kmt = t->t_data; 975 kmt_module_t *km; 976 mdb_var_t *v; 977 GElf_Sym sym; 978 uint_t symid; 979 int n; 980 981 if (!kmt->kmt_symavail) 982 return (set_errno(EMDB_NOSYM)); 983 984 switch ((uintptr_t)obj) { 985 case (uintptr_t)MDB_TGT_OBJ_EXEC: 986 case (uintptr_t)MDB_TGT_OBJ_EVERY: 987 km = mdb_list_next(&kmt->kmt_modlist); 988 n = mdb_nv_size(&kmt->kmt_modules); 989 break; 990 991 case (uintptr_t)MDB_TGT_OBJ_RTLD: 992 obj = kmt->kmt_rtld_name; 993 /*FALLTHROUGH*/ 994 995 default: 996 /* 997 * If this is a request for a dmod symbol, let kmdb_module 998 * handle it. 999 */ 1000 if (obj != NULL && strncmp(obj, "DMOD`", 5) == 0) { 1001 return (kmdb_module_lookup_by_name(obj + 5, name, 1002 symp, sip)); 1003 } 1004 1005 if ((v = mdb_nv_lookup(&kmt->kmt_modules, obj)) == NULL) 1006 return (set_errno(EMDB_NOOBJ)); 1007 1008 km = mdb_nv_get_cookie(v); 1009 n = 1; 1010 } 1011 1012 /* 1013 * kmdb's kvm target is at a bit of a disadvantage compared to mdb's 1014 * kvm target when it comes to global symbol lookups. mdb has ksyms, 1015 * which hides pesky things like symbols that are undefined in unix, 1016 * but which are defined in genunix. We don't have such a facility - 1017 * we simply iterate through the modules, looking for a given symbol 1018 * in each. Unless we're careful, we'll return the undef in the 1019 * aforementioned case. 1020 */ 1021 for (; n > 0; n--, km = mdb_list_next(km)) { 1022 if (mdb_gelf_symtab_lookup_by_name(km->km_symtab, name, 1023 &sym, &symid) == 0 && sym.st_shndx != SHN_UNDEF) 1024 break; 1025 } 1026 1027 if (n == 0) 1028 return (set_errno(EMDB_NOSYM)); 1029 1030 found: 1031 bcopy(&sym, symp, sizeof (GElf_Sym)); 1032 sip->sym_id = symid; 1033 sip->sym_table = MDB_TGT_SYMTAB; 1034 1035 return (0); 1036 } 1037 1038 static int 1039 kmt_symtab_func(void *data, const GElf_Sym *sym, const char *name, uint_t id) 1040 { 1041 kmt_symarg_t *arg = data; 1042 1043 if (mdb_tgt_sym_match(sym, arg->sym_type)) { 1044 arg->sym_info.sym_id = id; 1045 1046 return (arg->sym_cb(arg->sym_data, sym, name, &arg->sym_info, 1047 arg->sym_obj)); 1048 } 1049 1050 return (0); 1051 } 1052 1053 static void 1054 kmt_symtab_iter(mdb_gelf_symtab_t *gst, uint_t type, const char *obj, 1055 mdb_tgt_sym_f *cb, void *p) 1056 { 1057 kmt_symarg_t arg; 1058 1059 arg.sym_cb = cb; 1060 arg.sym_data = p; 1061 arg.sym_type = type; 1062 arg.sym_info.sym_table = gst->gst_tabid; 1063 arg.sym_obj = obj; 1064 1065 mdb_gelf_symtab_iter(gst, kmt_symtab_func, &arg); 1066 } 1067 1068 static int 1069 kmt_symbol_iter(mdb_tgt_t *t, const char *obj, uint_t which, uint_t type, 1070 mdb_tgt_sym_f *cb, void *data) 1071 { 1072 kmt_data_t *kmt = t->t_data; 1073 kmt_module_t *km; 1074 1075 mdb_gelf_symtab_t *symtab = NULL; 1076 mdb_var_t *v; 1077 1078 if (which == MDB_TGT_DYNSYM) 1079 return (set_errno(EMDB_TGTNOTSUP)); 1080 1081 switch ((uintptr_t)obj) { 1082 case (uintptr_t)MDB_TGT_OBJ_EXEC: 1083 case (uintptr_t)MDB_TGT_OBJ_EVERY: 1084 mdb_nv_rewind(&kmt->kmt_modules); 1085 while ((v = mdb_nv_advance(&kmt->kmt_modules)) != NULL) { 1086 km = mdb_nv_get_cookie(v); 1087 1088 if (km->km_symtab != NULL) { 1089 kmt_symtab_iter(km->km_symtab, type, 1090 km->km_name, cb, data); 1091 } 1092 } 1093 return (0); 1094 1095 case (uintptr_t)MDB_TGT_OBJ_RTLD: 1096 obj = kmt->kmt_rtld_name; 1097 /*FALLTHROUGH*/ 1098 1099 default: 1100 if (strncmp(obj, "DMOD`", 5) == 0) { 1101 return (kmdb_module_symbol_iter(obj + 5, type, 1102 cb, data)); 1103 } 1104 1105 if ((v = mdb_nv_lookup(&kmt->kmt_modules, obj)) == NULL) 1106 return (set_errno(EMDB_NOOBJ)); 1107 km = mdb_nv_get_cookie(v); 1108 1109 symtab = km->km_symtab; 1110 } 1111 1112 if (symtab != NULL) 1113 kmt_symtab_iter(symtab, type, obj, cb, data); 1114 1115 return (0); 1116 } 1117 1118 static int 1119 kmt_mapping_walk(uintptr_t addr, const void *data, kmt_maparg_t *marg) 1120 { 1121 /* 1122 * This is a bit sketchy but avoids problematic compilation of this 1123 * target against the current VM implementation. Now that we have 1124 * vmem, we can make this less broken and more informative by changing 1125 * this code to invoke the vmem walker in the near future. 1126 */ 1127 const struct kmt_seg { 1128 caddr_t s_base; 1129 size_t s_size; 1130 } *segp = (const struct kmt_seg *)data; 1131 1132 mdb_map_t map; 1133 GElf_Sym sym; 1134 mdb_syminfo_t info; 1135 1136 map.map_base = (uintptr_t)segp->s_base; 1137 map.map_size = segp->s_size; 1138 map.map_flags = MDB_TGT_MAP_R | MDB_TGT_MAP_W | MDB_TGT_MAP_X; 1139 1140 if (kmt_lookup_by_addr(marg->map_target, addr, MDB_TGT_SYM_EXACT, 1141 map.map_name, MDB_TGT_MAPSZ, &sym, &info) == -1) { 1142 1143 (void) mdb_iob_snprintf(map.map_name, MDB_TGT_MAPSZ, 1144 "%lr", addr); 1145 } 1146 1147 return (marg->map_cb(marg->map_data, &map, map.map_name)); 1148 } 1149 1150 static int 1151 kmt_mapping_iter(mdb_tgt_t *t, mdb_tgt_map_f *func, void *private) 1152 { 1153 kmt_maparg_t m; 1154 uintptr_t kas; 1155 1156 m.map_target = t; 1157 m.map_cb = func; 1158 m.map_data = private; 1159 1160 if ((kas = kmt_read_kas(t)) == 0) 1161 return (-1); /* errno is set for us */ 1162 1163 return (mdb_pwalk("seg", (mdb_walk_cb_t)kmt_mapping_walk, &m, kas)); 1164 } 1165 1166 static const mdb_map_t * 1167 kmt_mod_to_map(kmt_module_t *km, mdb_map_t *map) 1168 { 1169 (void) strncpy(map->map_name, km->km_name, MDB_TGT_MAPSZ); 1170 map->map_name[MDB_TGT_MAPSZ - 1] = '\0'; 1171 map->map_base = km->km_text_va; 1172 map->map_size = km->km_text_size; 1173 map->map_flags = MDB_TGT_MAP_R | MDB_TGT_MAP_W | MDB_TGT_MAP_X; 1174 1175 return (map); 1176 } 1177 1178 static int 1179 kmt_object_iter(mdb_tgt_t *t, mdb_tgt_map_f *func, void *private) 1180 { 1181 kmt_data_t *kmt = t->t_data; 1182 kmt_module_t *km; 1183 mdb_map_t m; 1184 1185 for (km = mdb_list_next(&kmt->kmt_modlist); km != NULL; 1186 km = mdb_list_next(km)) { 1187 if (func(private, kmt_mod_to_map(km, &m), km->km_name) == -1) 1188 break; 1189 } 1190 1191 return (0); 1192 } 1193 1194 static const mdb_map_t * 1195 kmt_addr_to_map(mdb_tgt_t *t, uintptr_t addr) 1196 { 1197 kmt_data_t *kmt = t->t_data; 1198 kmt_module_t *km; 1199 1200 for (km = mdb_list_next(&kmt->kmt_modlist); km != NULL; 1201 km = mdb_list_next(km)) { 1202 if (addr - km->km_text_va < km->km_text_size || 1203 addr - km->km_data_va < km->km_data_size || 1204 addr - km->km_bss_va < km->km_bss_size) 1205 return (kmt_mod_to_map(km, &kmt->kmt_map)); 1206 } 1207 1208 (void) set_errno(EMDB_NOMAP); 1209 return (NULL); 1210 } 1211 1212 static kmt_module_t * 1213 kmt_module_by_name(kmt_data_t *kmt, const char *name) 1214 { 1215 kmt_module_t *km; 1216 1217 for (km = mdb_list_next(&kmt->kmt_modlist); km != NULL; 1218 km = mdb_list_next(km)) { 1219 if (strcmp(name, km->km_name) == 0) 1220 return (km); 1221 } 1222 1223 return (NULL); 1224 } 1225 1226 static const mdb_map_t * 1227 kmt_name_to_map(mdb_tgt_t *t, const char *name) 1228 { 1229 kmt_data_t *kmt = t->t_data; 1230 kmt_module_t *km; 1231 mdb_map_t m; 1232 1233 /* 1234 * If name is MDB_TGT_OBJ_EXEC, return the first module on the list, 1235 * which will be unix since we keep kmt_modlist in load order. 1236 */ 1237 if (name == MDB_TGT_OBJ_EXEC) { 1238 return (kmt_mod_to_map(mdb_list_next(&kmt->kmt_modlist), 1239 &m)); 1240 } 1241 1242 if (name == MDB_TGT_OBJ_RTLD) 1243 name = kmt->kmt_rtld_name; 1244 1245 if ((km = kmt_module_by_name(kmt, name)) != NULL) 1246 return (kmt_mod_to_map(km, &m)); 1247 1248 (void) set_errno(EMDB_NOOBJ); 1249 return (NULL); 1250 } 1251 1252 static ctf_file_t * 1253 kmt_load_ctfdata(mdb_tgt_t *t, kmt_module_t *km) 1254 { 1255 kmt_data_t *kmt = t->t_data; 1256 int err; 1257 1258 if (km->km_ctfp != NULL) 1259 return (km->km_ctfp); 1260 1261 if (km->km_ctf_va == NULL || km->km_symtab == NULL) { 1262 (void) set_errno(EMDB_NOCTF); 1263 return (NULL); 1264 } 1265 1266 if ((km->km_ctfp = mdb_ctf_bufopen(km->km_ctf_va, km->km_ctf_size, 1267 km->km_symtab_va, &km->km_symtab_hdr, km->km_strtab_va, 1268 &km->km_strtab_hdr, &err)) == NULL) { 1269 (void) set_errno(ctf_to_errno(err)); 1270 return (NULL); 1271 } 1272 1273 mdb_dprintf(MDB_DBG_KMOD, "loaded %lu bytes of CTF data for %s\n", 1274 (ulong_t)km->km_ctf_size, km->km_name); 1275 1276 if (ctf_parent_name(km->km_ctfp) != NULL) { 1277 mdb_var_t *v; 1278 1279 if ((v = mdb_nv_lookup(&kmt->kmt_modules, 1280 ctf_parent_name(km->km_ctfp))) != NULL) { 1281 kmt_module_t *pm = mdb_nv_get_cookie(v); 1282 1283 if (pm->km_ctfp == NULL) 1284 (void) kmt_load_ctfdata(t, pm); 1285 1286 if (pm->km_ctfp != NULL && ctf_import(km->km_ctfp, 1287 pm->km_ctfp) == CTF_ERR) { 1288 warn("failed to import parent types into " 1289 "%s: %s\n", km->km_name, 1290 ctf_errmsg(ctf_errno(km->km_ctfp))); 1291 } 1292 } else { 1293 warn("failed to load CTF data for %s - parent %s not " 1294 "loaded\n", km->km_name, 1295 ctf_parent_name(km->km_ctfp)); 1296 } 1297 } 1298 1299 return (km->km_ctfp); 1300 } 1301 1302 ctf_file_t * 1303 kmt_addr_to_ctf(mdb_tgt_t *t, uintptr_t addr) 1304 { 1305 kmt_data_t *kmt = t->t_data; 1306 kmt_module_t *km; 1307 1308 for (km = mdb_list_next(&kmt->kmt_modlist); km != NULL; 1309 km = mdb_list_next(km)) { 1310 if (addr - km->km_text_va < km->km_text_size || 1311 addr - km->km_data_va < km->km_data_size || 1312 addr - km->km_bss_va < km->km_bss_size) 1313 return (kmt_load_ctfdata(t, km)); 1314 } 1315 1316 return (kmdb_module_addr_to_ctf(addr)); 1317 } 1318 1319 ctf_file_t * 1320 kmt_name_to_ctf(mdb_tgt_t *t, const char *name) 1321 { 1322 kmt_data_t *kt = t->t_data; 1323 kmt_module_t *km; 1324 1325 if (name == MDB_TGT_OBJ_EXEC) { 1326 name = KMT_CTFPARENT; 1327 } else if (name == MDB_TGT_OBJ_RTLD) { 1328 name = kt->kmt_rtld_name; 1329 } else if (strncmp(name, "DMOD`", 5) == 0) { 1330 /* Request for CTF data for a DMOD symbol */ 1331 return (kmdb_module_name_to_ctf(name + 5)); 1332 } 1333 1334 if ((km = kmt_module_by_name(kt, name)) != NULL) 1335 return (kmt_load_ctfdata(t, km)); 1336 1337 (void) set_errno(EMDB_NOOBJ); 1338 return (NULL); 1339 } 1340 1341 /*ARGSUSED*/ 1342 static int 1343 kmt_status(mdb_tgt_t *t, mdb_tgt_status_t *tsp) 1344 { 1345 int state; 1346 1347 bzero(tsp, sizeof (mdb_tgt_status_t)); 1348 1349 switch ((state = kmdb_dpi_get_state(NULL))) { 1350 case DPI_STATE_INIT: 1351 tsp->st_state = MDB_TGT_RUNNING; 1352 tsp->st_pc = 0; 1353 break; 1354 1355 case DPI_STATE_STOPPED: 1356 tsp->st_state = MDB_TGT_STOPPED; 1357 1358 (void) kmdb_dpi_get_register("pc", &tsp->st_pc); 1359 break; 1360 1361 case DPI_STATE_FAULTED: 1362 tsp->st_state = MDB_TGT_STOPPED; 1363 1364 (void) kmdb_dpi_get_register("pc", &tsp->st_pc); 1365 1366 tsp->st_flags |= MDB_TGT_ISTOP; 1367 break; 1368 1369 case DPI_STATE_LOST: 1370 tsp->st_state = MDB_TGT_LOST; 1371 1372 (void) kmdb_dpi_get_register("pc", &tsp->st_pc); 1373 break; 1374 } 1375 1376 mdb_dprintf(MDB_DBG_KMOD, "kmt_status, dpi: %d tsp: %d, pc = %p %A\n", 1377 state, tsp->st_state, (void *)tsp->st_pc, tsp->st_pc); 1378 1379 return (0); 1380 } 1381 1382 /* 1383 * Invoked when kmt_defbp_enter_debugger is called, this routine activates and 1384 * deactivates deferred breakpoints in response to module load and unload 1385 * events. 1386 */ 1387 /*ARGSUSED*/ 1388 static void 1389 kmt_defbp_event(mdb_tgt_t *t, int vid, void *private) 1390 { 1391 if (kmt_defbp_modchg_isload) { 1392 if (!mdb_tgt_sespec_activate_all(t) && 1393 (mdb.m_flags & MDB_FL_BPTNOSYMSTOP)) { 1394 /* 1395 * We weren't able to activate the breakpoints. 1396 * If so requested, we'll return without calling 1397 * continue, thus throwing the user into the debugger. 1398 */ 1399 return; 1400 } 1401 1402 } else { 1403 mdb_sespec_t *sep, *nsep; 1404 const mdb_map_t *map, *bpmap; 1405 mdb_map_t modmap; 1406 1407 if ((map = kmt_addr_to_map(t, 1408 (uintptr_t)kmt_defbp_modchg_modctl->mod_text)) == NULL) { 1409 warn("module unload notification for unknown module %s", 1410 kmt_defbp_modchg_modctl->mod_modname); 1411 return; /* drop into the debugger */ 1412 } 1413 1414 bcopy(map, &modmap, sizeof (mdb_map_t)); 1415 1416 for (sep = mdb_list_next(&t->t_active); sep; sep = nsep) { 1417 nsep = mdb_list_next(sep); 1418 1419 if (sep->se_ops == &kmt_brkpt_ops) { 1420 kmt_brkpt_t *kb = sep->se_data; 1421 1422 if ((bpmap = kmt_addr_to_map(t, 1423 kb->kb_addr)) == NULL || 1424 (bpmap->map_base == modmap.map_base && 1425 bpmap->map_size == modmap.map_size)) { 1426 mdb_tgt_sespec_idle_one(t, sep, 1427 EMDB_NOMAP); 1428 } 1429 } 1430 } 1431 } 1432 1433 (void) mdb_tgt_continue(t, NULL); 1434 } 1435 1436 static void 1437 kmt_defbp_enter_debugger(void) 1438 { 1439 /* 1440 * The debugger places a breakpoint here. We can't have a simple 1441 * nop function here, because GCC knows much more than we do, and 1442 * will optimize away the call to it. 1443 */ 1444 (void) get_fp(); 1445 } 1446 1447 /* 1448 * This routine is called while the kernel is running. It attempts to determine 1449 * whether any deferred breakpoints exist for the module being changed (loaded 1450 * or unloaded). If any such breakpoints exist, the debugger will be entered to 1451 * process them. 1452 */ 1453 static void 1454 kmt_defbp_modchg(struct modctl *mctl, int isload) 1455 { 1456 kmt_defbp_t *dbp; 1457 1458 kmt_defbp_lock = 1; 1459 1460 for (dbp = mdb_list_next(&kmt_defbp_list); dbp; 1461 dbp = mdb_list_next(dbp)) { 1462 if (!dbp->dbp_ref) 1463 continue; 1464 1465 if (strcmp(mctl->mod_modname, dbp->dbp_objname) == 0) { 1466 /* 1467 * Activate the breakpoint 1468 */ 1469 kmt_defbp_modchg_isload = isload; 1470 kmt_defbp_modchg_modctl = mctl; 1471 1472 kmt_defbp_enter_debugger(); 1473 break; 1474 } 1475 } 1476 1477 kmt_defbp_lock = 0; 1478 } 1479 1480 /*ARGSUSED*/ 1481 static int 1482 kmt_continue(mdb_tgt_t *t, mdb_tgt_status_t *tsp) 1483 { 1484 int n; 1485 1486 kmdb_dpi_resume(); 1487 1488 /* 1489 * The order of the following two calls is important. If there are 1490 * load acks on the work queue, we'll initialize the dmods they 1491 * represent. This will involve a call to _mdb_init, which may very 1492 * well result in a symbol lookup. If we haven't resynced our view 1493 * of symbols with the current state of the world, this lookup could 1494 * end very badly. We therefore make sure to sync before processing 1495 * the work queue. 1496 */ 1497 kmt_sync(t); 1498 kmdb_dpi_process_work_queue(); 1499 1500 if (kmdb_kdi_get_unload_request()) 1501 t->t_flags |= MDB_TGT_F_UNLOAD; 1502 1503 (void) mdb_tgt_status(t, &t->t_status); 1504 1505 if ((n = kmt_dmod_status(NULL, KMDB_MC_STATE_LOADING) + 1506 kmt_dmod_status(NULL, KMDB_MC_STATE_UNLOADING)) != 0) { 1507 mdb_warn("%d dmod load%c/unload%c pending\n", n, 1508 "s"[n == 1], "s"[n == 1]); 1509 } 1510 1511 return (0); 1512 } 1513 1514 /*ARGSUSED*/ 1515 static int 1516 kmt_step(mdb_tgt_t *t, mdb_tgt_status_t *tsp) 1517 { 1518 int rc; 1519 1520 if ((rc = kmdb_dpi_step()) == 0) 1521 (void) mdb_tgt_status(t, &t->t_status); 1522 1523 return (rc); 1524 } 1525 1526 static int 1527 kmt_defbp_activate(mdb_tgt_t *t) 1528 { 1529 kmdb_dpi_modchg_register(kmt_defbp_modchg); 1530 1531 /* 1532 * The routines that add and arm breakpoints will check for the proper 1533 * DTrace state, but they'll just put this breakpoint on the idle list 1534 * if DTrace is active. It'll correctly move to the active list when 1535 * DTrace deactivates, but that's insufficient for our purposes -- we 1536 * need to do extra processing at that point. We won't get to do said 1537 * processing with with a normal idle->active transition, so we just 1538 * won't add it add it until we're sure that it'll stick. 1539 */ 1540 1541 if (kmdb_kdi_dtrace_get_state() == KDI_DTSTATE_DTRACE_ACTIVE) 1542 return (set_errno(EMDB_DTACTIVE)); 1543 1544 kmt_defbp_bpspec = mdb_tgt_add_vbrkpt(t, 1545 (uintptr_t)kmt_defbp_enter_debugger, 1546 MDB_TGT_SPEC_HIDDEN, kmt_defbp_event, NULL); 1547 1548 return (0); 1549 } 1550 1551 static void 1552 kmt_defbp_deactivate(mdb_tgt_t *t) 1553 { 1554 kmdb_dpi_modchg_cancel(); 1555 1556 if (kmt_defbp_bpspec != 0) { 1557 if (t != NULL) 1558 (void) mdb_tgt_vespec_delete(t, kmt_defbp_bpspec); 1559 1560 kmt_defbp_bpspec = 0; 1561 } 1562 } 1563 1564 static kmt_defbp_t * 1565 kmt_defbp_create(mdb_tgt_t *t, const char *objname, const char *symname) 1566 { 1567 kmt_defbp_t *dbp = mdb_alloc(sizeof (kmt_defbp_t), UM_SLEEP); 1568 1569 mdb_dprintf(MDB_DBG_KMOD, "defbp_create %s`%s\n", objname, symname); 1570 1571 dbp->dbp_objname = strdup(objname); 1572 dbp->dbp_symname = strdup(symname); 1573 dbp->dbp_ref = 1; 1574 1575 kmt_defbp_num++; 1576 1577 if (kmt_defbp_num == 1 || kmt_defbp_bpspec == 0) { 1578 if (kmt_defbp_activate(t) < 0) 1579 warn("failed to activate deferred breakpoints"); 1580 } 1581 1582 mdb_list_append(&kmt_defbp_list, dbp); 1583 1584 return (dbp); 1585 } 1586 1587 static void 1588 kmt_defbp_destroy(kmt_defbp_t *dbp) 1589 { 1590 mdb_dprintf(MDB_DBG_KMOD, "defbp_destroy %s`%s\n", dbp->dbp_objname, 1591 dbp->dbp_symname); 1592 1593 mdb_list_delete(&kmt_defbp_list, dbp); 1594 1595 strfree(dbp->dbp_objname); 1596 strfree(dbp->dbp_symname); 1597 mdb_free(dbp, sizeof (kmt_defbp_t)); 1598 } 1599 1600 static void 1601 kmt_defbp_prune_common(int all) 1602 { 1603 kmt_defbp_t *dbp, *ndbp; 1604 1605 /* We can't remove items from the list while the driver is using it. */ 1606 if (kmt_defbp_lock) 1607 return; 1608 1609 for (dbp = mdb_list_next(&kmt_defbp_list); dbp != NULL; dbp = ndbp) { 1610 ndbp = mdb_list_next(dbp); 1611 1612 if (!all && dbp->dbp_ref) 1613 continue; 1614 1615 kmt_defbp_destroy(dbp); 1616 } 1617 } 1618 1619 static void 1620 kmt_defbp_prune(void) 1621 { 1622 kmt_defbp_prune_common(0); 1623 } 1624 1625 static void 1626 kmt_defbp_destroy_all(void) 1627 { 1628 kmt_defbp_prune_common(1); 1629 } 1630 1631 static void 1632 kmt_defbp_delete(mdb_tgt_t *t, kmt_defbp_t *dbp) 1633 { 1634 dbp->dbp_ref = 0; 1635 1636 ASSERT(kmt_defbp_num > 0); 1637 kmt_defbp_num--; 1638 1639 if (kmt_defbp_num == 0) 1640 kmt_defbp_deactivate(t); 1641 1642 kmt_defbp_prune(); 1643 } 1644 1645 static int 1646 kmt_brkpt_ctor(mdb_tgt_t *t, mdb_sespec_t *sep, void *args) 1647 { 1648 mdb_tgt_status_t tsp; 1649 kmt_bparg_t *ka = args; 1650 kmt_brkpt_t *kb; 1651 GElf_Sym s; 1652 mdb_instr_t instr; 1653 1654 (void) mdb_tgt_status(t, &tsp); 1655 if (tsp.st_state != MDB_TGT_RUNNING && tsp.st_state != MDB_TGT_STOPPED) 1656 return (set_errno(EMDB_NOPROC)); 1657 1658 if (ka->ka_symbol != NULL) { 1659 if (mdb_tgt_lookup_by_scope(t, ka->ka_symbol, &s, NULL) == -1) { 1660 if (errno != EMDB_NOOBJ && !(errno == EMDB_NOSYM && 1661 !(mdb.m_flags & MDB_FL_BPTNOSYMSTOP))) { 1662 warn("breakpoint %s activation failed", 1663 ka->ka_symbol); 1664 } 1665 return (-1); /* errno is set for us */ 1666 } 1667 1668 ka->ka_addr = (uintptr_t)s.st_value; 1669 } 1670 1671 #ifdef __sparc 1672 if (ka->ka_addr & 3) 1673 return (set_errno(EMDB_BPALIGN)); 1674 #endif 1675 1676 if (mdb_vread(&instr, sizeof (instr), ka->ka_addr) != sizeof (instr)) 1677 return (-1); /* errno is set for us */ 1678 1679 if (kmdb_kdi_dtrace_get_state() == KDI_DTSTATE_DTRACE_ACTIVE) 1680 warn("breakpoint will not arm until DTrace is inactive\n"); 1681 1682 kb = mdb_zalloc(sizeof (kmt_brkpt_t), UM_SLEEP); 1683 kb->kb_addr = ka->ka_addr; 1684 sep->se_data = kb; 1685 1686 return (0); 1687 } 1688 1689 /*ARGSUSED*/ 1690 static void 1691 kmt_brkpt_dtor(mdb_tgt_t *t, mdb_sespec_t *sep) 1692 { 1693 mdb_free(sep->se_data, sizeof (kmt_brkpt_t)); 1694 } 1695 1696 /*ARGSUSED*/ 1697 static char * 1698 kmt_brkpt_info(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_vespec_t *vep, 1699 mdb_tgt_spec_desc_t *sp, char *buf, size_t nbytes) 1700 { 1701 uintptr_t addr = 0; 1702 1703 if (vep != NULL) { 1704 kmt_bparg_t *ka = vep->ve_args; 1705 1706 if (ka->ka_symbol != NULL) { 1707 (void) mdb_iob_snprintf(buf, nbytes, "stop at %s", 1708 ka->ka_symbol); 1709 } else { 1710 (void) mdb_iob_snprintf(buf, nbytes, "stop at %a", 1711 ka->ka_addr); 1712 addr = ka->ka_addr; 1713 } 1714 1715 } else { 1716 addr = ((kmt_brkpt_t *)sep->se_data)->kb_addr; 1717 (void) mdb_iob_snprintf(buf, nbytes, "stop at %a", addr); 1718 } 1719 1720 sp->spec_base = addr; 1721 sp->spec_size = sizeof (mdb_instr_t); 1722 1723 return (buf); 1724 } 1725 1726 static int 1727 kmt_brkpt_secmp(mdb_tgt_t *t, mdb_sespec_t *sep, void *args) 1728 { 1729 kmt_brkpt_t *kb = sep->se_data; 1730 kmt_bparg_t *ka = args; 1731 GElf_Sym sym; 1732 1733 if (ka->ka_symbol != NULL) { 1734 return (mdb_tgt_lookup_by_scope(t, ka->ka_symbol, 1735 &sym, NULL) == 0 && sym.st_value == kb->kb_addr); 1736 } 1737 1738 return (ka->ka_addr == kb->kb_addr); 1739 } 1740 1741 /*ARGSUSED*/ 1742 static int 1743 kmt_brkpt_vecmp(mdb_tgt_t *t, mdb_vespec_t *vep, void *args) 1744 { 1745 kmt_bparg_t *ka1 = vep->ve_args; 1746 kmt_bparg_t *ka2 = args; 1747 1748 if (ka1->ka_symbol != NULL && ka2->ka_symbol != NULL) 1749 return (strcmp(ka1->ka_symbol, ka2->ka_symbol) == 0); 1750 1751 if (ka1->ka_symbol == NULL && ka2->ka_symbol == NULL) 1752 return (ka1->ka_addr == ka2->ka_addr); 1753 1754 return (0); /* fail if one is symbolic, other is an explicit address */ 1755 } 1756 1757 static int 1758 kmt_brkpt_arm(mdb_tgt_t *t, mdb_sespec_t *sep) 1759 { 1760 kmt_data_t *kmt = t->t_data; 1761 kmt_brkpt_t *kb = sep->se_data; 1762 int rv; 1763 1764 if (kmdb_kdi_dtrace_get_state() == KDI_DTSTATE_DTRACE_ACTIVE) 1765 return (set_errno(EMDB_DTACTIVE)); 1766 1767 if ((rv = kmdb_dpi_brkpt_arm(kb->kb_addr, &kb->kb_oinstr)) != 0) 1768 return (rv); 1769 1770 if (kmt->kmt_narmedbpts++ == 0) 1771 (void) kmdb_kdi_dtrace_set(KDI_DTSET_KMDB_BPT_ACTIVATE); 1772 1773 return (0); 1774 } 1775 1776 static int 1777 kmt_brkpt_disarm(mdb_tgt_t *t, mdb_sespec_t *sep) 1778 { 1779 kmt_data_t *kmt = t->t_data; 1780 kmt_brkpt_t *kb = sep->se_data; 1781 int rv; 1782 1783 ASSERT(kmdb_kdi_dtrace_get_state() == KDI_DTSTATE_KMDB_BPT_ACTIVE); 1784 1785 if ((rv = kmdb_dpi_brkpt_disarm(kb->kb_addr, kb->kb_oinstr)) != 0) 1786 return (rv); 1787 1788 if (--kmt->kmt_narmedbpts == 0) 1789 (void) kmdb_kdi_dtrace_set(KDI_DTSET_KMDB_BPT_DEACTIVATE); 1790 1791 return (0); 1792 } 1793 1794 /* 1795 * Determine whether the specified sespec is an armed watchpoint that overlaps 1796 * with the given breakpoint and has the given flags set. We use this to find 1797 * conflicts with breakpoints, below. 1798 */ 1799 static int 1800 kmt_wp_overlap(mdb_sespec_t *sep, kmt_brkpt_t *kb, int flags) 1801 { 1802 const kmdb_wapt_t *wp = sep->se_data; 1803 1804 return (sep->se_state == MDB_TGT_SPEC_ARMED && 1805 sep->se_ops == &kmt_wapt_ops && (wp->wp_wflags & flags) && 1806 kb->kb_addr - wp->wp_addr < wp->wp_size); 1807 } 1808 1809 /* 1810 * We step over breakpoints using our single-stepper. If a conflicting 1811 * watchpoint is present, we must temporarily remove it before stepping over the 1812 * breakpoint so we don't immediately re-trigger the watchpoint. We know the 1813 * watchpoint has already triggered on our trap instruction as part of fetching 1814 * it. Before we return, we must re-install any disabled watchpoints. 1815 */ 1816 static int 1817 kmt_brkpt_cont(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_tgt_status_t *tsp) 1818 { 1819 kmt_brkpt_t *kb = sep->se_data; 1820 int status = -1; 1821 int error; 1822 1823 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) { 1824 if (kmt_wp_overlap(sep, kb, MDB_TGT_WA_X)) 1825 (void) kmdb_dpi_wapt_disarm(sep->se_data); 1826 } 1827 1828 if (kmdb_dpi_brkpt_disarm(kb->kb_addr, kb->kb_oinstr) == 0 && 1829 kmt_step(t, tsp) == 0) 1830 status = kmt_status(t, tsp); 1831 1832 error = errno; /* save errno from disarm, step, or status */ 1833 1834 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) { 1835 if (kmt_wp_overlap(sep, kb, MDB_TGT_WA_X)) 1836 kmdb_dpi_wapt_arm(sep->se_data); 1837 } 1838 1839 (void) set_errno(error); 1840 return (status); 1841 } 1842 1843 /*ARGSUSED*/ 1844 static int 1845 kmt_brkpt_match(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_tgt_status_t *tsp) 1846 { 1847 kmt_brkpt_t *kb = sep->se_data; 1848 int state, why; 1849 kreg_t pc; 1850 1851 state = kmdb_dpi_get_state(&why); 1852 (void) kmdb_dpi_get_register("pc", &pc); 1853 1854 return (state == DPI_STATE_FAULTED && why == DPI_STATE_WHY_BKPT && 1855 pc == kb->kb_addr); 1856 } 1857 1858 static const mdb_se_ops_t kmt_brkpt_ops = { 1859 .se_ctor = kmt_brkpt_ctor, 1860 .se_dtor = kmt_brkpt_dtor, 1861 .se_info = kmt_brkpt_info, 1862 .se_secmp = kmt_brkpt_secmp, 1863 .se_vecmp = kmt_brkpt_vecmp, 1864 .se_arm = kmt_brkpt_arm, 1865 .se_disarm = kmt_brkpt_disarm, 1866 .se_cont = kmt_brkpt_cont, 1867 .se_match = kmt_brkpt_match, 1868 }; 1869 1870 static int 1871 kmt_wapt_ctor(mdb_tgt_t *t, mdb_sespec_t *sep, void *args) 1872 { 1873 mdb_tgt_status_t tsp; 1874 kmdb_wapt_t *vwp = args; 1875 kmdb_wapt_t *swp; 1876 1877 (void) mdb_tgt_status(t, &tsp); 1878 if (tsp.st_state != MDB_TGT_RUNNING && tsp.st_state != MDB_TGT_STOPPED) 1879 return (set_errno(EMDB_NOPROC)); 1880 1881 swp = mdb_alloc(sizeof (kmdb_wapt_t), UM_SLEEP); 1882 bcopy(vwp, swp, sizeof (kmdb_wapt_t)); 1883 1884 if (kmdb_dpi_wapt_reserve(swp) < 0) { 1885 mdb_free(swp, sizeof (kmdb_wapt_t)); 1886 return (-1); /* errno is set for us */ 1887 } 1888 1889 sep->se_data = swp; 1890 1891 return (0); 1892 } 1893 1894 /*ARGSUSED*/ 1895 static void 1896 kmt_wapt_dtor(mdb_tgt_t *t, mdb_sespec_t *sep) 1897 { 1898 kmdb_wapt_t *wp = sep->se_data; 1899 1900 kmdb_dpi_wapt_release(wp); 1901 mdb_free(wp, sizeof (kmdb_wapt_t)); 1902 } 1903 1904 /*ARGSUSED*/ 1905 static char * 1906 kmt_wapt_info(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_vespec_t *vep, 1907 mdb_tgt_spec_desc_t *sp, char *buf, size_t nbytes) 1908 { 1909 kmdb_wapt_t *wp = vep != NULL ? vep->ve_args : sep->se_data; 1910 const char *fmt; 1911 char desc[24]; 1912 1913 ASSERT(wp->wp_wflags != 0); 1914 desc[0] = '\0'; 1915 1916 switch (wp->wp_wflags) { 1917 case MDB_TGT_WA_R: 1918 (void) strcat(desc, "/read"); 1919 break; 1920 case MDB_TGT_WA_W: 1921 (void) strcat(desc, "/write"); 1922 break; 1923 case MDB_TGT_WA_X: 1924 (void) strcat(desc, "/exec"); 1925 break; 1926 default: 1927 if (wp->wp_wflags & MDB_TGT_WA_R) 1928 (void) strcat(desc, "/r"); 1929 if (wp->wp_wflags & MDB_TGT_WA_W) 1930 (void) strcat(desc, "/w"); 1931 if (wp->wp_wflags & MDB_TGT_WA_X) 1932 (void) strcat(desc, "/x"); 1933 } 1934 1935 switch (wp->wp_type) { 1936 case DPI_WAPT_TYPE_PHYS: 1937 fmt = "stop on %s of phys [%p, %p)"; 1938 break; 1939 1940 case DPI_WAPT_TYPE_VIRT: 1941 fmt = "stop on %s of [%la, %la)"; 1942 break; 1943 1944 case DPI_WAPT_TYPE_IO: 1945 if (wp->wp_size == 1) 1946 fmt = "stop on %s of I/O port %p"; 1947 else 1948 fmt = "stop on %s of I/O port [%p, %p)"; 1949 break; 1950 1951 default: 1952 fmt = "stop on %s of unknown [%p, %p]"; 1953 break; 1954 } 1955 1956 (void) mdb_iob_snprintf(buf, nbytes, fmt, desc + 1, wp->wp_addr, 1957 wp->wp_addr + wp->wp_size); 1958 1959 sp->spec_base = wp->wp_addr; 1960 sp->spec_size = wp->wp_size; 1961 1962 return (buf); 1963 } 1964 1965 /*ARGSUSED*/ 1966 static int 1967 kmt_wapt_secmp(mdb_tgt_t *t, mdb_sespec_t *sep, void *args) 1968 { 1969 kmdb_wapt_t *wp1 = sep->se_data; 1970 kmdb_wapt_t *wp2 = args; 1971 1972 return (wp1->wp_addr == wp2->wp_addr && wp1->wp_size == wp2->wp_size && 1973 wp1->wp_wflags == wp2->wp_wflags); 1974 } 1975 1976 /*ARGSUSED*/ 1977 static int 1978 kmt_wapt_vecmp(mdb_tgt_t *t, mdb_vespec_t *vep, void *args) 1979 { 1980 kmdb_wapt_t *wp1 = vep->ve_args; 1981 kmdb_wapt_t *wp2 = args; 1982 1983 return (wp1->wp_addr == wp2->wp_addr && wp1->wp_size == wp2->wp_size && 1984 wp1->wp_wflags == wp2->wp_wflags); 1985 } 1986 1987 /*ARGSUSED*/ 1988 static int 1989 kmt_wapt_arm(mdb_tgt_t *t, mdb_sespec_t *sep) 1990 { 1991 kmdb_dpi_wapt_arm(sep->se_data); 1992 1993 return (0); 1994 } 1995 1996 /*ARGSUSED*/ 1997 static int 1998 kmt_wapt_disarm(mdb_tgt_t *t, mdb_sespec_t *sep) 1999 { 2000 kmdb_dpi_wapt_disarm(sep->se_data); 2001 2002 return (0); 2003 } 2004 2005 /* 2006 * Determine whether the specified sespec is an armed breakpoint at the given 2007 * %pc. We use this to find conflicts with watchpoints below. 2008 */ 2009 static int 2010 kmt_bp_overlap(mdb_sespec_t *sep, uintptr_t pc) 2011 { 2012 kmt_brkpt_t *kb = sep->se_data; 2013 2014 return (sep->se_state == MDB_TGT_SPEC_ARMED && 2015 sep->se_ops == &kmt_brkpt_ops && kb->kb_addr == pc); 2016 } 2017 2018 /* 2019 * We step over watchpoints using our single-stepper. If a conflicting 2020 * breakpoint is present, we must temporarily disarm it before stepping over 2021 * the watchpoint so we do not immediately re-trigger the breakpoint. This is 2022 * similar to the case handled in kmt_brkpt_cont(), above. 2023 */ 2024 static int 2025 kmt_wapt_cont(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_tgt_status_t *tsp) 2026 { 2027 mdb_sespec_t *bep = NULL; 2028 int status = -1; 2029 int error, why; 2030 2031 /* 2032 * If we stopped for anything other than a watchpoint, check to see 2033 * if there's a breakpoint here. 2034 */ 2035 if (!(kmdb_dpi_get_state(&why) == DPI_STATE_FAULTED && 2036 (why == DPI_STATE_WHY_V_WAPT || why == DPI_STATE_WHY_P_WAPT))) { 2037 kreg_t pc; 2038 2039 (void) kmdb_dpi_get_register("pc", &pc); 2040 2041 for (bep = mdb_list_next(&t->t_active); bep != NULL; 2042 bep = mdb_list_next(bep)) { 2043 if (kmt_bp_overlap(bep, pc)) { 2044 (void) bep->se_ops->se_disarm(t, bep); 2045 bep->se_state = MDB_TGT_SPEC_ACTIVE; 2046 break; 2047 } 2048 } 2049 } 2050 2051 kmdb_dpi_wapt_disarm(sep->se_data); 2052 if (kmt_step(t, tsp) == 0) 2053 status = kmt_status(t, tsp); 2054 2055 error = errno; /* save errno from step or status */ 2056 2057 if (bep != NULL) 2058 mdb_tgt_sespec_arm_one(t, bep); 2059 2060 (void) set_errno(error); 2061 return (status); 2062 } 2063 2064 /*ARGSUSED*/ 2065 static int 2066 kmt_wapt_match(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_tgt_status_t *tsp) 2067 { 2068 return (kmdb_dpi_wapt_match(sep->se_data)); 2069 } 2070 2071 static const mdb_se_ops_t kmt_wapt_ops = { 2072 .se_ctor = kmt_wapt_ctor, 2073 .se_dtor = kmt_wapt_dtor, 2074 .se_info = kmt_wapt_info, 2075 .se_secmp = kmt_wapt_secmp, 2076 .se_vecmp = kmt_wapt_vecmp, 2077 .se_arm = kmt_wapt_arm, 2078 .se_disarm = kmt_wapt_disarm, 2079 .se_cont = kmt_wapt_cont, 2080 .se_match = kmt_wapt_match, 2081 }; 2082 2083 /*ARGSUSED*/ 2084 static int 2085 kmt_trap_ctor(mdb_tgt_t *t, mdb_sespec_t *sep, void *args) 2086 { 2087 sep->se_data = args; /* trap number */ 2088 2089 return (0); 2090 } 2091 2092 /*ARGSUSED*/ 2093 static char * 2094 kmt_trap_info(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_vespec_t *vep, 2095 mdb_tgt_spec_desc_t *sp, char *buf, size_t nbytes) 2096 { 2097 const char *name; 2098 int trapnum; 2099 2100 if (vep != NULL) 2101 trapnum = (intptr_t)vep->ve_args; 2102 else 2103 trapnum = (intptr_t)sep->se_data; 2104 2105 if (trapnum == KMT_TRAP_ALL) 2106 name = "any trap"; 2107 else if (trapnum == KMT_TRAP_NOTENUM) 2108 name = "miscellaneous trap"; 2109 else 2110 name = kmt_trapname(trapnum); 2111 2112 (void) mdb_iob_snprintf(buf, nbytes, "single-step stop on %s", name); 2113 2114 return (buf); 2115 } 2116 2117 /*ARGSUSED2*/ 2118 static int 2119 kmt_trap_match(mdb_tgt_t *t, mdb_sespec_t *sep, mdb_tgt_status_t *tsp) 2120 { 2121 int spectt = (intptr_t)sep->se_data; 2122 kmt_data_t *kmt = t->t_data; 2123 kreg_t tt; 2124 2125 (void) kmdb_dpi_get_register("tt", &tt); 2126 2127 switch (spectt) { 2128 case KMT_TRAP_ALL: 2129 return (1); 2130 case KMT_TRAP_NOTENUM: 2131 return (tt > kmt->kmt_trapmax || 2132 !BT_TEST(kmt->kmt_trapmap, tt)); 2133 default: 2134 return (tt == spectt); 2135 } 2136 } 2137 2138 static const mdb_se_ops_t kmt_trap_ops = { 2139 .se_ctor = kmt_trap_ctor, 2140 .se_dtor = no_se_dtor, 2141 .se_info = kmt_trap_info, 2142 .se_secmp = no_se_secmp, 2143 .se_vecmp = no_se_vecmp, 2144 .se_arm = no_se_arm, 2145 .se_disarm = no_se_disarm, 2146 .se_cont = no_se_cont, 2147 .se_match = kmt_trap_match, 2148 }; 2149 2150 static void 2151 kmt_bparg_dtor(mdb_vespec_t *vep) 2152 { 2153 kmt_bparg_t *ka = vep->ve_args; 2154 2155 if (ka->ka_symbol != NULL) 2156 strfree(ka->ka_symbol); 2157 2158 if (ka->ka_defbp != NULL) 2159 kmt_defbp_delete(mdb.m_target, ka->ka_defbp); 2160 2161 mdb_free(ka, sizeof (kmt_bparg_t)); 2162 } 2163 2164 static int 2165 kmt_add_vbrkpt(mdb_tgt_t *t, uintptr_t addr, 2166 int spec_flags, mdb_tgt_se_f *func, void *data) 2167 { 2168 kmt_bparg_t *ka = mdb_alloc(sizeof (kmt_bparg_t), UM_SLEEP); 2169 2170 ka->ka_addr = addr; 2171 ka->ka_symbol = NULL; 2172 ka->ka_defbp = NULL; 2173 2174 return (mdb_tgt_vespec_insert(t, &kmt_brkpt_ops, spec_flags, 2175 func, data, ka, kmt_bparg_dtor)); 2176 } 2177 2178 static int 2179 kmt_add_sbrkpt(mdb_tgt_t *t, const char *fullname, 2180 int spec_flags, mdb_tgt_se_f *func, void *data) 2181 { 2182 kmt_bparg_t *ka; 2183 kmt_defbp_t *dbp; 2184 GElf_Sym sym; 2185 char *tick, *objname, *symname; 2186 int serrno; 2187 2188 if ((tick = strchr(fullname, '`')) == fullname) { 2189 (void) set_errno(EMDB_NOOBJ); 2190 return (0); 2191 } 2192 2193 /* 2194 * Deferred breakpoints are always scoped. If we didn't find a tick, 2195 * there's no scope. We'll create a vbrkpt, but only if we can turn the 2196 * provided string into an address. 2197 */ 2198 if (tick == NULL) { 2199 uintptr_t addr; 2200 2201 if (strisbasenum(fullname)) { 2202 addr = mdb_strtoull(fullname); /* a bare address */ 2203 } else if (mdb_tgt_lookup_by_name(t, MDB_TGT_OBJ_EVERY, 2204 fullname, &sym, NULL) < 0) { 2205 (void) set_errno(EMDB_NOSYM); 2206 return (0); 2207 } else { 2208 addr = (uintptr_t)sym.st_value; /* unscoped sym name */ 2209 } 2210 2211 return (kmt_add_vbrkpt(t, addr, spec_flags, func, data)); 2212 } 2213 2214 if (*(tick + 1) == '\0') { 2215 (void) set_errno(EMDB_NOSYM); 2216 return (0); 2217 } 2218 2219 objname = strndup(fullname, tick - fullname); 2220 symname = tick + 1; 2221 2222 if (mdb_tgt_lookup_by_name(t, objname, symname, NULL, NULL) < 0 && 2223 errno != EMDB_NOOBJ) { 2224 serrno = errno; 2225 strfree(objname); 2226 2227 (void) set_errno(serrno); 2228 return (0); /* errno is set for us */ 2229 } 2230 2231 dbp = kmt_defbp_create(t, objname, symname); 2232 strfree(objname); 2233 2234 ka = mdb_alloc(sizeof (kmt_bparg_t), UM_SLEEP); 2235 ka->ka_symbol = strdup(fullname); 2236 ka->ka_addr = 0; 2237 ka->ka_defbp = dbp; 2238 2239 return (mdb_tgt_vespec_insert(t, &kmt_brkpt_ops, spec_flags, 2240 func, data, ka, kmt_bparg_dtor)); 2241 } 2242 2243 static int 2244 kmt_wparg_overlap(const kmdb_wapt_t *wp1, const kmdb_wapt_t *wp2) 2245 { 2246 /* Assume the watchpoint spaces don't overlap */ 2247 if (wp1->wp_type != wp2->wp_type) 2248 return (0); 2249 2250 if (wp2->wp_addr + wp2->wp_size <= wp1->wp_addr) 2251 return (0); /* no range overlap */ 2252 2253 if (wp1->wp_addr + wp1->wp_size <= wp2->wp_addr) 2254 return (0); /* no range overlap */ 2255 2256 return (wp1->wp_addr != wp2->wp_addr || wp1->wp_size != wp2->wp_size || 2257 wp1->wp_wflags != wp2->wp_wflags); 2258 } 2259 2260 static void 2261 kmt_wparg_dtor(mdb_vespec_t *vep) 2262 { 2263 mdb_free(vep->ve_args, sizeof (kmdb_wapt_t)); 2264 } 2265 2266 static int 2267 kmt_add_wapt_common(mdb_tgt_t *t, uintptr_t addr, size_t len, uint_t wflags, 2268 int spec_flags, mdb_tgt_se_f *func, void *data, int type) 2269 { 2270 kmdb_wapt_t *wp = mdb_alloc(sizeof (kmdb_wapt_t), UM_SLEEP); 2271 mdb_sespec_t *sep; 2272 2273 wp->wp_addr = addr; 2274 wp->wp_size = len; 2275 wp->wp_type = type; 2276 wp->wp_wflags = wflags; 2277 2278 if (kmdb_dpi_wapt_validate(wp) < 0) 2279 return (0); /* errno is set for us */ 2280 2281 for (sep = mdb_list_next(&t->t_active); sep; sep = mdb_list_next(sep)) { 2282 if (sep->se_ops == &kmt_wapt_ops && 2283 mdb_list_next(&sep->se_velist) != NULL && 2284 kmt_wparg_overlap(wp, sep->se_data)) 2285 goto wapt_dup; 2286 } 2287 2288 for (sep = mdb_list_next(&t->t_idle); sep; sep = mdb_list_next(sep)) { 2289 if (sep->se_ops == &kmt_wapt_ops && kmt_wparg_overlap(wp, 2290 ((mdb_vespec_t *)mdb_list_next(&sep->se_velist))->ve_args)) 2291 goto wapt_dup; 2292 } 2293 2294 return (mdb_tgt_vespec_insert(t, &kmt_wapt_ops, spec_flags, 2295 func, data, wp, kmt_wparg_dtor)); 2296 2297 wapt_dup: 2298 mdb_free(wp, sizeof (kmdb_wapt_t)); 2299 (void) set_errno(EMDB_WPDUP); 2300 return (0); 2301 } 2302 2303 static int 2304 kmt_add_pwapt(mdb_tgt_t *t, physaddr_t addr, size_t len, uint_t wflags, 2305 int spec_flags, mdb_tgt_se_f *func, void *data) 2306 { 2307 return (kmt_add_wapt_common(t, (uintptr_t)addr, len, wflags, spec_flags, 2308 func, data, DPI_WAPT_TYPE_PHYS)); 2309 } 2310 2311 static int 2312 kmt_add_vwapt(mdb_tgt_t *t, uintptr_t addr, size_t len, uint_t wflags, 2313 int spec_flags, mdb_tgt_se_f *func, void *data) 2314 { 2315 return (kmt_add_wapt_common(t, addr, len, wflags, spec_flags, func, 2316 data, DPI_WAPT_TYPE_VIRT)); 2317 } 2318 2319 static int 2320 kmt_add_iowapt(mdb_tgt_t *t, uintptr_t addr, size_t len, uint_t wflags, 2321 int spec_flags, mdb_tgt_se_f *func, void *data) 2322 { 2323 return (kmt_add_wapt_common(t, addr, len, wflags, spec_flags, func, 2324 data, DPI_WAPT_TYPE_IO)); 2325 } 2326 2327 static int 2328 kmt_add_trap(mdb_tgt_t *t, int trapnum, int spec_flags, mdb_tgt_se_f *func, 2329 void *data) 2330 { 2331 kmt_data_t *kmt = t->t_data; 2332 2333 if (trapnum != KMT_TRAP_ALL && trapnum != KMT_TRAP_NOTENUM) { 2334 if (trapnum < 0 || trapnum > kmt->kmt_trapmax) { 2335 (void) set_errno(EMDB_BADFLTNUM); 2336 return (0); 2337 } 2338 2339 BT_SET(kmt->kmt_trapmap, trapnum); 2340 } 2341 2342 return (mdb_tgt_vespec_insert(t, &kmt_trap_ops, spec_flags, func, data, 2343 (void *)(uintptr_t)trapnum, no_ve_dtor)); 2344 } 2345 2346 /*ARGSUSED*/ 2347 static uintmax_t 2348 kmt_cpuid_disc_get(const mdb_var_t *v) 2349 { 2350 return (kmdb_dpi_get_master_cpuid()); 2351 } 2352 2353 static const mdb_nv_disc_t kmt_cpuid_disc = { 2354 .disc_get = kmt_cpuid_disc_get 2355 }; 2356 2357 /* 2358 * This routine executes while the kernel is running. 2359 */ 2360 void 2361 kmt_activate(mdb_tgt_t *t) 2362 { 2363 kmt_data_t *kmt = t->t_data; 2364 2365 mdb_prop_postmortem = FALSE; 2366 mdb_prop_kernel = TRUE; 2367 2368 (void) mdb_tgt_register_dcmds(t, &kmt_dcmds[0], MDB_MOD_FORCE); 2369 mdb_tgt_register_regvars(t, kmt->kmt_rds, &kmt_reg_disc, 0); 2370 2371 /* 2372 * Force load of the MDB krtld module, in case it's been rolled into 2373 * unix. 2374 */ 2375 (void) mdb_module_load(KMT_RTLD_NAME, MDB_MOD_SILENT | MDB_MOD_DEFER); 2376 } 2377 2378 static void 2379 kmt_destroy(mdb_tgt_t *t) 2380 { 2381 kmt_data_t *kmt = t->t_data; 2382 kmt_module_t *km, *pkm; 2383 2384 mdb_nv_destroy(&kmt->kmt_modules); 2385 for (km = mdb_list_prev(&kmt->kmt_modlist); km != NULL; km = pkm) { 2386 pkm = mdb_list_prev(km); 2387 mdb_free(km, sizeof (kmt_module_t)); 2388 } 2389 2390 if (!kmt_defbp_lock) 2391 kmt_defbp_destroy_all(); 2392 2393 if (kmt->kmt_trapmap != NULL) 2394 mdb_free(kmt->kmt_trapmap, BT_SIZEOFMAP(kmt->kmt_trapmax)); 2395 2396 mdb_free(kmt, sizeof (kmt_data_t)); 2397 } 2398 2399 static const mdb_tgt_ops_t kmt_ops = { 2400 .t_setflags = kmt_setflags, 2401 .t_setcontext = (int (*)())(uintptr_t)mdb_tgt_notsup, 2402 .t_activate = kmt_activate, 2403 .t_deactivate = (void (*)())(uintptr_t)mdb_tgt_nop, 2404 .t_periodic = kmt_periodic, 2405 .t_destroy = kmt_destroy, 2406 .t_name = kmt_name, 2407 .t_isa = (const char *(*)())mdb_conf_isa, 2408 .t_platform = kmt_platform, 2409 .t_uname = kmt_uname, 2410 .t_dmodel = kmt_dmodel, 2411 .t_aread = (ssize_t (*)())mdb_tgt_notsup, 2412 .t_awrite = (ssize_t (*)())mdb_tgt_notsup, 2413 .t_vread = kmt_read, 2414 .t_vwrite = kmt_write, 2415 .t_pread = kmt_pread, 2416 .t_pwrite = kmt_pwrite, 2417 .t_fread = kmt_read, 2418 .t_fwrite = kmt_write, 2419 .t_ioread = kmt_ioread, 2420 .t_iowrite = kmt_iowrite, 2421 .t_vtop = kmt_vtop, 2422 .t_lookup_by_name = kmt_lookup_by_name, 2423 .t_lookup_by_addr = kmt_lookup_by_addr, 2424 .t_symbol_iter = kmt_symbol_iter, 2425 .t_mapping_iter = kmt_mapping_iter, 2426 .t_object_iter = kmt_object_iter, 2427 .t_addr_to_map = kmt_addr_to_map, 2428 .t_name_to_map = kmt_name_to_map, 2429 .t_addr_to_ctf = kmt_addr_to_ctf, 2430 .t_name_to_ctf = kmt_name_to_ctf, 2431 .t_status = kmt_status, 2432 .t_run = (int (*)())(uintptr_t)mdb_tgt_notsup, 2433 .t_step = kmt_step, 2434 .t_step_out = kmt_step_out, 2435 .t_next = kmt_next, 2436 .t_cont = kmt_continue, 2437 .t_signal = (int (*)())(uintptr_t)mdb_tgt_notsup, 2438 .t_add_vbrkpt = kmt_add_vbrkpt, 2439 .t_add_sbrkpt = kmt_add_sbrkpt, 2440 .t_add_pwapt = kmt_add_pwapt, 2441 .t_add_vwapt = kmt_add_vwapt, 2442 .t_add_iowapt = kmt_add_iowapt, 2443 .t_add_sysenter = (int (*)())(uintptr_t)mdb_tgt_null, 2444 .t_add_sysexit = (int (*)())(uintptr_t)mdb_tgt_null, 2445 .t_add_signal = (int (*)())(uintptr_t)mdb_tgt_null, 2446 .t_add_fault = kmt_add_trap, 2447 .t_getareg = kmt_getareg, 2448 .t_putareg = kmt_putareg, 2449 .t_stack_iter = (int (*)())(uintptr_t)mdb_tgt_nop, /* XXX */ 2450 .t_auxv = (int (*)())(uintptr_t)mdb_tgt_notsup, 2451 .t_thread_name = (int (*)())(uintptr_t)mdb_tgt_notsup, 2452 }; 2453 2454 /* 2455 * Called immediately upon resumption of the system after a step or continue. 2456 * Allows us to synchronize kmt's view of the world with reality. 2457 */ 2458 /*ARGSUSED*/ 2459 static void 2460 kmt_sync(mdb_tgt_t *t) 2461 { 2462 kmt_data_t *kmt = t->t_data; 2463 int symavail; 2464 2465 mdb_dprintf(MDB_DBG_KMOD, "synchronizing with kernel\n"); 2466 2467 symavail = kmt->kmt_symavail; 2468 kmt->kmt_symavail = FALSE; 2469 2470 /* 2471 * Resync our view of the world if the modules have changed, or if we 2472 * didn't have any symbols coming into this function. The latter will 2473 * only happen on startup. 2474 */ 2475 if (kmdb_kdi_mods_changed() || !symavail) 2476 kmt_modlist_update(t); 2477 2478 /* 2479 * It would be nice if we could run this less frequently, perhaps 2480 * after a dvec-initiated trigger. 2481 */ 2482 kmdb_module_sync(); 2483 2484 kmt->kmt_symavail = TRUE; 2485 2486 mdb_dprintf(MDB_DBG_KMOD, "synchronization complete\n"); 2487 2488 kmt_defbp_prune(); 2489 2490 if (kmt_defbp_num > 0 && kmt_defbp_bpspec == 0 && 2491 kmdb_kdi_dtrace_get_state() != KDI_DTSTATE_DTRACE_ACTIVE) { 2492 /* 2493 * Deferred breakpoints were created while DTrace was active, 2494 * and consequently the deferred breakpoint enabling mechanism 2495 * wasn't activated. Activate it now, and then try to activate 2496 * the deferred breakpoints. We do this so that we can catch 2497 * the ones which may apply to modules that have been loaded 2498 * while they were waiting for DTrace to deactivate. 2499 */ 2500 (void) kmt_defbp_activate(t); 2501 (void) mdb_tgt_sespec_activate_all(t); 2502 } 2503 2504 (void) mdb_tgt_status(t, &t->t_status); 2505 } 2506 2507 /* 2508 * This routine executes while the kernel is running. 2509 */ 2510 /*ARGSUSED*/ 2511 int 2512 kmdb_kvm_create(mdb_tgt_t *t, int argc, const char *argv[]) 2513 { 2514 kmt_data_t *kmt; 2515 2516 if (argc != 0) 2517 return (set_errno(EINVAL)); 2518 2519 kmt = mdb_zalloc(sizeof (kmt_data_t), UM_SLEEP); 2520 t->t_data = kmt; 2521 t->t_ops = &kmt_ops; 2522 t->t_flags |= MDB_TGT_F_RDWR; /* kmdb is always r/w */ 2523 2524 (void) mdb_nv_insert(&mdb.m_nv, "cpuid", &kmt_cpuid_disc, 0, 2525 MDB_NV_PERSIST | MDB_NV_RDONLY); 2526 2527 (void) mdb_nv_create(&kmt->kmt_modules, UM_SLEEP); 2528 2529 kmt_init_isadep(t); 2530 2531 kmt->kmt_symavail = FALSE; 2532 2533 bzero(&kmt_defbp_list, sizeof (mdb_list_t)); 2534 2535 return (0); 2536 2537 create_err: 2538 kmt_destroy(t); 2539 2540 return (-1); 2541 } 2542 2543 /* 2544 * This routine is called once, when kmdb first has control of the world. 2545 */ 2546 void 2547 kmdb_kvm_startup(void) 2548 { 2549 kmt_data_t *kmt = mdb.m_target->t_data; 2550 2551 mdb_dprintf(MDB_DBG_KMOD, "kmdb_kvm startup\n"); 2552 2553 kmt_sync(mdb.m_target); 2554 (void) mdb_module_load_builtin(KMT_MODULE); 2555 kmt_startup_isadep(mdb.m_target); 2556 2557 /* 2558 * This is here because we need to write the deferred breakpoint 2559 * breakpoint when the debugger starts. Our normal r/o write routines 2560 * don't work when the kernel is running, so we have to do it during 2561 * startup. 2562 */ 2563 (void) mdb_tgt_sespec_activate_all(mdb.m_target); 2564 2565 kmt->kmt_rtld_name = KMT_RTLD_NAME; 2566 2567 if (kmt_module_by_name(kmt, KMT_RTLD_NAME) == NULL) 2568 kmt->kmt_rtld_name = "unix"; 2569 } 2570 2571 /* 2572 * This routine is called after kmdb has loaded its initial set of modules. 2573 */ 2574 void 2575 kmdb_kvm_poststartup(void) 2576 { 2577 mdb_dprintf(MDB_DBG_KMOD, "kmdb_kvm post-startup\n"); 2578 2579 (void) mdb_dis_select(kmt_def_dismode()); 2580 } 2581