1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * Copyright 2019 Joyent, Inc.
27 * Copyright 2025 Oxide Computer Company
28 */
29
30 /*
31 * Libkvm Kernel Target
32 *
33 * The libkvm kernel target provides access to both crash dumps and live
34 * kernels through /dev/ksyms and /dev/kmem, using the facilities provided by
35 * the libkvm.so library. The target-specific data structures are shared
36 * between this file (common code) and the ISA-dependent parts of the target,
37 * and so they are defined in the mdb_kvm.h header. The target processes an
38 * "executable" (/dev/ksyms or the unix.X file) which contains a primary
39 * .symtab and .dynsym, and then also iterates over the krtld module chain in
40 * the kernel in order to obtain a list of loaded modules and per-module symbol
41 * tables. To improve startup performance, the per-module symbol tables are
42 * instantiated on-the-fly whenever an address lookup falls within the text
43 * section of a given module. The target also relies on services from the
44 * mdb_ks (kernel support) module, which contains pieces of the implementation
45 * that must be compiled against the kernel implementation.
46 */
47
48 #include <sys/modctl.h>
49 #include <sys/kobj.h>
50 #include <sys/kobj_impl.h>
51 #include <sys/utsname.h>
52 #include <sys/panic.h>
53 #include <sys/dumphdr.h>
54 #include <sys/dumpadm.h>
55 #include <sys/uuid.h>
56 #include <sys/stdbool.h>
57
58 #include <dlfcn.h>
59 #include <libctf.h>
60 #include <string.h>
61 #include <fcntl.h>
62 #include <errno.h>
63 #include <stddef.h>
64
65 #include <mdb/mdb_target_impl.h>
66 #include <mdb/mdb_err.h>
67 #include <mdb/mdb_debug.h>
68 #include <mdb/mdb_string.h>
69 #include <mdb/mdb_modapi.h>
70 #include <mdb/mdb_io_impl.h>
71 #include <mdb/mdb_ctf.h>
72 #include <mdb/mdb_kvm.h>
73 #include <mdb/mdb_module.h>
74 #include <mdb/mdb_kb.h>
75 #include <mdb/mdb_ks.h>
76 #include <mdb/mdb.h>
77
78 #define KT_RELOC_BUF(buf, obase, nbase) \
79 ((uintptr_t)(buf) - (uintptr_t)(obase) + (uintptr_t)(nbase))
80
81 #define KT_BAD_BUF(buf, base, size) \
82 ((uintptr_t)(buf) < (uintptr_t)(base) || \
83 ((uintptr_t)(buf) >= (uintptr_t)(base) + (uintptr_t)(size)))
84
85 typedef struct kt_symarg {
86 mdb_tgt_sym_f *sym_cb; /* Caller's callback function */
87 void *sym_data; /* Callback function argument */
88 uint_t sym_type; /* Symbol type/binding filter */
89 mdb_syminfo_t sym_info; /* Symbol id and table id */
90 const char *sym_obj; /* Containing object */
91 } kt_symarg_t;
92
93 typedef struct kt_maparg {
94 mdb_tgt_t *map_target; /* Target used for mapping iter */
95 mdb_tgt_map_f *map_cb; /* Caller's callback function */
96 void *map_data; /* Callback function argument */
97 } kt_maparg_t;
98
99 static const char KT_MODULE[] = "mdb_ks";
100 static const char KT_CTFPARENT[] = "genunix";
101
102 static void (*print_buildversion)(void);
103
104 static void
kt_load_module(kt_data_t * kt,mdb_tgt_t * t,kt_module_t * km)105 kt_load_module(kt_data_t *kt, mdb_tgt_t *t, kt_module_t *km)
106 {
107 km->km_data = mdb_alloc(km->km_datasz, UM_SLEEP);
108
109 (void) mdb_tgt_vread(t, km->km_data, km->km_datasz, km->km_symspace_va);
110
111 km->km_symbuf = (void *)
112 KT_RELOC_BUF(km->km_symtab_va, km->km_symspace_va, km->km_data);
113
114 km->km_strtab = (char *)
115 KT_RELOC_BUF(km->km_strtab_va, km->km_symspace_va, km->km_data);
116
117 km->km_symtab = mdb_gelf_symtab_create_raw(&kt->k_file->gf_ehdr,
118 &km->km_symtab_hdr, km->km_symbuf,
119 &km->km_strtab_hdr, km->km_strtab, MDB_TGT_SYMTAB);
120 }
121
122 static bool
kt_adjust_module(const struct modctl * ctl,struct module * kmod)123 kt_adjust_module(const struct modctl *ctl, struct module *kmod)
124 {
125 /*
126 * "struct module" was changed in illumos 17467 to accommodate
127 * data from an extended ELF header. Unfortunately the new
128 * fields were inserted into the middle of the structure
129 * instead of the end where things would have been much easier
130 * for us. In case we are loading a crash dump from an older
131 * kernel everything will be misaligned from where we expect it
132 * to be. Attempt to detect such cases and fix things up so we
133 * can still load the module. Using CTF is not an option at
134 * this point as we need to parse the module in order to find
135 * its CTF data.
136 */
137 if (kmod->text == ctl->mod_text &&
138 kmod->text_size == ctl->mod_text_size) {
139 return (true);
140 }
141
142 bcopy(&kmod->shnum, &kmod->shdrs,
143 sizeof (struct module) - offsetof(struct module, shdrs));
144
145 if (kmod->text != ctl->mod_text ||
146 kmod->text_size != ctl->mod_text_size) {
147 mdb_printf("couldn't adjust old modctl %p's module",
148 (void *)ctl->mod_mp);
149 return (false);
150 }
151
152 kmod->shnum = kmod->hdr.e_shnum;
153 kmod->phnum = kmod->hdr.e_phnum;
154 kmod->shstrndx = kmod->hdr.e_shstrndx;
155
156 return (true);
157 }
158
159 static void
kt_load_modules(kt_data_t * kt,mdb_tgt_t * t)160 kt_load_modules(kt_data_t *kt, mdb_tgt_t *t)
161 {
162 char name[MAXNAMELEN];
163 uintptr_t addr, head;
164
165 struct module kmod;
166 struct modctl ctl;
167 Shdr symhdr, strhdr;
168 GElf_Sym sym;
169
170 kt_module_t *km;
171
172 if (mdb_tgt_lookup_by_name(t, MDB_TGT_OBJ_EXEC,
173 "modules", &sym, NULL) == -1) {
174 warn("failed to get 'modules' symbol");
175 return;
176 }
177
178 if (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, &ctl, sizeof (ctl),
179 MDB_TGT_OBJ_EXEC, "modules") != sizeof (ctl)) {
180 warn("failed to read 'modules' struct");
181 return;
182 }
183
184 addr = head = (uintptr_t)sym.st_value;
185
186 do {
187 if (addr == 0)
188 break; /* Avoid spurious NULL pointers in list */
189
190 if (mdb_tgt_vread(t, &ctl, sizeof (ctl), addr) == -1) {
191 warn("failed to read modctl at %p", (void *)addr);
192 return;
193 }
194
195 if (ctl.mod_mp == NULL)
196 continue; /* No associated krtld structure */
197
198 if (mdb_tgt_readstr(t, MDB_TGT_AS_VIRT, name, MAXNAMELEN,
199 (uintptr_t)ctl.mod_modname) <= 0) {
200 warn("failed to read module name at %p",
201 (void *)ctl.mod_modname);
202 continue;
203 }
204
205 mdb_dprintf(MDB_DBG_KMOD, "reading mod %s (%p)\n",
206 name, (void *)addr);
207
208 if (mdb_nv_lookup(&kt->k_modules, name) != NULL) {
209 warn("skipping duplicate module '%s', id=%d\n",
210 name, ctl.mod_id);
211 continue;
212 }
213
214 if (mdb_tgt_vread(t, &kmod, sizeof (kmod),
215 (uintptr_t)ctl.mod_mp) == -1) {
216 warn("failed to read module at %p\n",
217 (void *)ctl.mod_mp);
218 continue;
219 }
220
221 if (!kt_adjust_module(&ctl, &kmod))
222 continue;
223
224 if (kmod.symspace == NULL || kmod.symhdr == NULL ||
225 kmod.strhdr == NULL) {
226 /*
227 * If no buffer for the symbols has been allocated,
228 * or the shdrs for .symtab and .strtab are missing,
229 * then we're out of luck.
230 */
231 continue;
232 }
233
234 if (mdb_tgt_vread(t, &symhdr, sizeof (Shdr),
235 (uintptr_t)kmod.symhdr) == -1) {
236 warn("failed to read .symtab header for '%s', id=%d",
237 name, ctl.mod_id);
238 continue;
239 }
240
241 if (mdb_tgt_vread(t, &strhdr, sizeof (Shdr),
242 (uintptr_t)kmod.strhdr) == -1) {
243 warn("failed to read .strtab header for '%s', id=%d",
244 name, ctl.mod_id);
245 continue;
246 }
247
248 /*
249 * Now get clever: f(*^ing krtld didn't used to bother updating
250 * its own kmod.symsize value. We know that prior to this bug
251 * being fixed, symspace was a contiguous buffer containing
252 * .symtab, .strtab, and the symbol hash table in that order.
253 * So if symsize is zero, recompute it as the size of .symtab
254 * plus the size of .strtab. We don't need to load the hash
255 * table anyway since we re-hash all the symbols internally.
256 */
257 if (kmod.symsize == 0)
258 kmod.symsize = symhdr.sh_size + strhdr.sh_size;
259
260 /*
261 * Similar logic can be used to make educated guesses
262 * at the values of kmod.symtbl and kmod.strings.
263 */
264 if (kmod.symtbl == NULL)
265 kmod.symtbl = kmod.symspace;
266 if (kmod.strings == NULL)
267 kmod.strings = kmod.symspace + symhdr.sh_size;
268
269 /*
270 * Make sure things seem reasonable before we proceed
271 * to actually read and decipher the symspace.
272 */
273 if (KT_BAD_BUF(kmod.symtbl, kmod.symspace, kmod.symsize) ||
274 KT_BAD_BUF(kmod.strings, kmod.symspace, kmod.symsize)) {
275 warn("skipping module '%s', id=%d (corrupt symspace)\n",
276 name, ctl.mod_id);
277 continue;
278 }
279
280 km = mdb_zalloc(sizeof (kt_module_t), UM_SLEEP);
281 km->km_name = strdup(name);
282
283 (void) mdb_nv_insert(&kt->k_modules, km->km_name, NULL,
284 (uintptr_t)km, MDB_NV_EXTNAME);
285
286 km->km_datasz = kmod.symsize;
287 km->km_symspace_va = (uintptr_t)kmod.symspace;
288 km->km_symtab_va = (uintptr_t)kmod.symtbl;
289 km->km_strtab_va = (uintptr_t)kmod.strings;
290 km->km_symtab_hdr = symhdr;
291 km->km_strtab_hdr = strhdr;
292 km->km_text_va = (uintptr_t)kmod.text;
293 km->km_text_size = kmod.text_size;
294 km->km_data_va = (uintptr_t)kmod.data;
295 km->km_data_size = kmod.data_size;
296 km->km_bss_va = (uintptr_t)kmod.bss;
297 km->km_bss_size = kmod.bss_size;
298
299 if (kt->k_ctfvalid) {
300 km->km_ctf_va = (uintptr_t)kmod.ctfdata;
301 km->km_ctf_size = kmod.ctfsize;
302 }
303
304 /*
305 * Add the module to the end of the list of modules in load-
306 * dependency order. This is needed to load the corresponding
307 * debugger modules in the same order for layering purposes.
308 */
309 mdb_list_append(&kt->k_modlist, km);
310
311 if (t->t_flags & MDB_TGT_F_PRELOAD) {
312 mdb_iob_printf(mdb.m_out, " %s", name);
313 mdb_iob_flush(mdb.m_out);
314 kt_load_module(kt, t, km);
315 }
316
317 } while ((addr = (uintptr_t)ctl.mod_next) != head);
318 }
319
320 int
kt_setflags(mdb_tgt_t * t,int flags)321 kt_setflags(mdb_tgt_t *t, int flags)
322 {
323 int iochg = ((flags ^ t->t_flags) & MDB_TGT_F_ALLOWIO) &&
324 !mdb_prop_postmortem;
325 int rwchg = (flags ^ t->t_flags) & MDB_TGT_F_RDWR;
326 kt_data_t *kt = t->t_data;
327 const char *kvmfile;
328 void *cookie;
329 int mode;
330
331 if (!iochg && !rwchg)
332 return (0);
333
334 if (kt->k_xpv_domu) {
335 warn("read-only target");
336 return (-1);
337 }
338
339 if (iochg) {
340 kvmfile = (flags & MDB_TGT_F_ALLOWIO) ? "/dev/allkmem" :
341 "/dev/kmem";
342 } else {
343 kvmfile = kt->k_kvmfile;
344 }
345
346 mode = (flags & MDB_TGT_F_RDWR) ? O_RDWR : O_RDONLY;
347
348 if ((cookie = kt->k_kb_ops->kb_open(kt->k_symfile, kvmfile, NULL, mode,
349 mdb.m_pname)) == NULL) {
350 /* We failed to re-open, so don't change t_flags */
351 warn("failed to re-open target");
352 return (-1);
353 }
354
355 /*
356 * We successfully reopened the target, so update k_kvmfile. Also set
357 * the RDWR and ALLOWIO bits in t_flags to match those in flags.
358 */
359 (void) kt->k_kb_ops->kb_close(kt->k_cookie);
360 kt->k_cookie = cookie;
361
362 if (kvmfile != kt->k_kvmfile) {
363 strfree(kt->k_kvmfile);
364 kt->k_kvmfile = strdup(kvmfile);
365 }
366
367 t->t_flags = (t->t_flags & ~(MDB_TGT_F_RDWR | MDB_TGT_F_ALLOWIO)) |
368 (flags & (MDB_TGT_F_RDWR | MDB_TGT_F_ALLOWIO));
369
370 return (0);
371 }
372
373 /*
374 * Determine which PIDs (if any) have their pages saved in the dump. We
375 * do this by looking for content flags in dump_flags in the header. These
376 * flags, which won't be set in older dumps, tell us whether a single process
377 * has had its pages included in the dump. If a single process has been
378 * included, we need to get the PID for that process from the dump_pids
379 * array in the dump.
380 */
381 static int
kt_find_dump_contents(kt_data_t * kt)382 kt_find_dump_contents(kt_data_t *kt)
383 {
384 dumphdr_t *dh = kt->k_dumphdr;
385 pid_t pid = -1;
386
387 if (dh->dump_flags & DF_ALL)
388 return (KT_DUMPCONTENT_ALL);
389
390 if (dh->dump_flags & DF_CURPROC) {
391 if ((pid = kt->k_dump_find_curproc()) == -1)
392 return (KT_DUMPCONTENT_INVALID);
393 else
394 return (pid);
395 } else {
396 return (KT_DUMPCONTENT_KERNEL);
397 }
398 }
399
400 static int
kt_dump_contains_proc(mdb_tgt_t * t,void * context)401 kt_dump_contains_proc(mdb_tgt_t *t, void *context)
402 {
403 kt_data_t *kt = t->t_data;
404 pid_t (*f_pid)(uintptr_t);
405 pid_t reqpid;
406
407 switch (kt->k_dumpcontent) {
408 case KT_DUMPCONTENT_KERNEL:
409 return (0);
410 case KT_DUMPCONTENT_ALL:
411 return (1);
412 case KT_DUMPCONTENT_INVALID:
413 goto procnotfound;
414 default:
415 f_pid = (pid_t (*)()) dlsym(RTLD_NEXT, "mdb_kproc_pid");
416 if (f_pid == NULL)
417 goto procnotfound;
418
419 reqpid = f_pid((uintptr_t)context);
420 if (reqpid == -1)
421 goto procnotfound;
422
423 return (kt->k_dumpcontent == reqpid);
424 }
425
426 procnotfound:
427 warn("unable to determine whether dump contains proc %p\n", context);
428 return (1);
429 }
430
431 int
kt_setcontext(mdb_tgt_t * t,void * context)432 kt_setcontext(mdb_tgt_t *t, void *context)
433 {
434 if (context != NULL) {
435 const char *argv[2];
436 int argc = 0;
437 mdb_tgt_t *ct;
438 kt_data_t *kt = t->t_data;
439
440 argv[argc++] = (const char *)context;
441 argv[argc] = NULL;
442
443 if (kt->k_dumphdr != NULL &&
444 !kt_dump_contains_proc(t, context)) {
445 warn("dump does not contain pages for proc %p\n",
446 context);
447 return (-1);
448 }
449
450 if ((ct = mdb_tgt_create(mdb_kproc_tgt_create,
451 t->t_flags, argc, argv)) == NULL)
452 return (-1);
453
454 mdb_printf("debugger context set to proc %p\n", context);
455 mdb_tgt_activate(ct);
456 } else
457 mdb_printf("debugger context set to kernel\n");
458
459 return (0);
460 }
461
462 static int
kt_stack(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)463 kt_stack(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
464 {
465 kt_data_t *kt = mdb.m_target->t_data;
466 return (kt->k_dcmd_stack(addr, flags, argc, argv));
467 }
468
469 static int
kt_stackv(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)470 kt_stackv(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
471 {
472 kt_data_t *kt = mdb.m_target->t_data;
473 return (kt->k_dcmd_stackv(addr, flags, argc, argv));
474 }
475
476 static int
kt_stackr(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)477 kt_stackr(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
478 {
479 kt_data_t *kt = mdb.m_target->t_data;
480 return (kt->k_dcmd_stackr(addr, flags, argc, argv));
481 }
482
483 static int
kt_regs(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)484 kt_regs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
485 {
486 kt_data_t *kt = mdb.m_target->t_data;
487
488 if (argc != 0 || (flags & DCMD_ADDRSPEC))
489 return (DCMD_USAGE);
490
491 addr = (uintptr_t)kt->k_regs;
492
493 return (kt->k_dcmd_regs(addr, flags, argc, argv));
494 }
495
496 #ifdef __x86
497 static int
kt_cpustack(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)498 kt_cpustack(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
499 {
500 kt_data_t *kt = mdb.m_target->t_data;
501 return (kt->k_dcmd_cpustack(addr, flags, argc, argv));
502 }
503
504 static int
kt_cpuregs(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)505 kt_cpuregs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
506 {
507 kt_data_t *kt = mdb.m_target->t_data;
508 return (kt->k_dcmd_cpuregs(addr, flags, argc, argv));
509 }
510 #endif /* __x86 */
511
512 /*ARGSUSED*/
513 static int
kt_status_dcmd(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)514 kt_status_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
515 {
516 kt_data_t *kt = mdb.m_target->t_data;
517 struct utsname uts;
518
519 bzero(&uts, sizeof (uts));
520 (void) strcpy(uts.nodename, "unknown machine");
521 (void) kt_uname(mdb.m_target, &uts);
522
523 if (mdb_prop_postmortem) {
524 mdb_printf("debugging %scrash dump %s (%d-bit) from %s\n",
525 kt->k_xpv_domu ? "domain " : "", kt->k_kvmfile,
526 (int)(sizeof (void *) * NBBY), uts.nodename);
527 } else {
528 mdb_printf("debugging live kernel (%d-bit) on %s\n",
529 (int)(sizeof (void *) * NBBY), uts.nodename);
530 }
531
532 mdb_printf("operating system: %s %s (%s)\n",
533 uts.release, uts.version, uts.machine);
534
535 if (print_buildversion != NULL)
536 print_buildversion();
537
538 if (kt->k_dumphdr) {
539 dumphdr_t *dh = kt->k_dumphdr;
540
541 mdb_printf("image uuid: %s\n", dh->dump_uuid[0] != '\0' ?
542 dh->dump_uuid : "(not set)");
543 mdb_printf("panic message: %s\n", dh->dump_panicstring);
544
545 kt->k_dump_print_content(dh, kt->k_dumpcontent);
546 } else {
547 char uuid[UUID_PRINTABLE_STRING_LENGTH];
548
549 if (mdb_readsym(uuid, sizeof (uuid),
550 "dump_osimage_uuid") == sizeof (uuid) &&
551 uuid[sizeof (uuid) - 1] == '\0') {
552 mdb_printf("image uuid: %s\n", uuid[0] != '\0' ?
553 uuid : "(not set)");
554 }
555 }
556
557 return (DCMD_OK);
558 }
559
560 static void
kt_stack_help(void)561 kt_stack_help(void)
562 {
563 mdb_printf(
564 "Options:\n"
565 " -n do not resolve addresses to names\n"
566 " -s show the size of each stack frame to the left\n"
567 " -t where CTF is present, show types for functions and "
568 "arguments\n"
569 " -v include frame pointer information (this is the default "
570 "with %<b>$C%</b>)\n"
571 "\n"
572 "If the optional %<u>cnt%</u> is given, no more than %<u>cnt%</u> "
573 "arguments are shown\nfor each stack frame.\n");
574 }
575
576 static const mdb_dcmd_t kt_dcmds[] = {
577 { "$c", "?[-nstv] [cnt]", "print stack backtrace", kt_stack,
578 kt_stack_help },
579 { "$C", "?[-nstv] [cnt]", "print stack backtrace", kt_stackv,
580 kt_stack_help },
581 { "$r", NULL, "print general-purpose registers", kt_regs },
582 { "$?", NULL, "print status and registers", kt_regs },
583 { "regs", NULL, "print general-purpose registers", kt_regs },
584 { "stack", "?[-nstv] [cnt]", "print stack backtrace", kt_stack,
585 kt_stack_help },
586 { "stackregs", "?[-nstv] [cnt]", "print stack backtrace and registers",
587 kt_stackr, kt_stack_help },
588 #ifdef __x86
589 { "cpustack", "?[-v] [-c cpuid] [cnt]", "print stack backtrace for a "
590 "specific CPU", kt_cpustack },
591 { "cpuregs", "?[-c cpuid]", "print general-purpose registers for a "
592 "specific CPU", kt_cpuregs },
593 #endif
594 { "status", NULL, "print summary of current target", kt_status_dcmd },
595 { NULL }
596 };
597
598 static uintmax_t
reg_disc_get(const mdb_var_t * v)599 reg_disc_get(const mdb_var_t *v)
600 {
601 mdb_tgt_t *t = MDB_NV_COOKIE(v);
602 kt_data_t *kt = t->t_data;
603 mdb_tgt_reg_t r = 0;
604
605 (void) mdb_tgt_getareg(t, kt->k_tid, mdb_nv_get_name(v), &r);
606 return (r);
607 }
608
609 static kt_module_t *
kt_module_by_name(kt_data_t * kt,const char * name)610 kt_module_by_name(kt_data_t *kt, const char *name)
611 {
612 kt_module_t *km;
613
614 for (km = mdb_list_next(&kt->k_modlist); km; km = mdb_list_next(km)) {
615 if (strcmp(name, km->km_name) == 0)
616 return (km);
617 }
618
619 return (NULL);
620 }
621
622 void
kt_activate(mdb_tgt_t * t)623 kt_activate(mdb_tgt_t *t)
624 {
625 static const mdb_nv_disc_t reg_disc = { .disc_get = reg_disc_get };
626 kt_data_t *kt = t->t_data;
627 void *sym;
628
629 int oflag = 0;
630
631 mdb_prop_postmortem = kt->k_xpv_domu || (kt->k_dumphdr != NULL);
632 mdb_prop_kernel = TRUE;
633 mdb_prop_datamodel = MDB_TGT_MODEL_NATIVE;
634
635 if (kt->k_activated == FALSE) {
636 struct utsname u1, u2;
637 /*
638 * If we're examining a crash dump, root is /, and uname(2)
639 * does not match the utsname in the dump, issue a warning.
640 * Note that we are assuming that the modules and macros in
641 * /usr/lib are compiled against the kernel from uname -rv.
642 */
643 if (mdb_prop_postmortem && strcmp(mdb.m_root, "/") == 0 &&
644 uname(&u1) >= 0 && kt_uname(t, &u2) >= 0 &&
645 (strcmp(u1.release, u2.release) ||
646 strcmp(u1.version, u2.version))) {
647 mdb_warn("warning: dump is from %s %s %s; dcmds and "
648 "macros may not match kernel implementation\n",
649 u2.sysname, u2.release, u2.version);
650 }
651
652 if (mdb_module_load(KT_MODULE, MDB_MOD_GLOBAL) < 0) {
653 warn("failed to load kernel support module -- "
654 "some modules may not load\n");
655 }
656
657 print_buildversion = (void (*)(void))dlsym(RTLD_NEXT,
658 "mdb_print_buildversion");
659
660 if (mdb_prop_postmortem && kt->k_dumphdr != NULL) {
661 sym = dlsym(RTLD_NEXT, "mdb_dump_print_content");
662 if (sym != NULL)
663 kt->k_dump_print_content = (void (*)())sym;
664
665 sym = dlsym(RTLD_NEXT, "mdb_dump_find_curproc");
666 if (sym != NULL)
667 kt->k_dump_find_curproc = (int (*)())sym;
668
669 kt->k_dumpcontent = kt_find_dump_contents(kt);
670 }
671
672 if (t->t_flags & MDB_TGT_F_PRELOAD) {
673 oflag = mdb_iob_getflags(mdb.m_out) & MDB_IOB_PGENABLE;
674
675 mdb_iob_clrflags(mdb.m_out, oflag);
676 mdb_iob_puts(mdb.m_out, "Preloading module symbols: [");
677 mdb_iob_flush(mdb.m_out);
678 }
679
680 if (!(t->t_flags & MDB_TGT_F_NOLOAD)) {
681 kt_load_modules(kt, t);
682
683 /*
684 * Determine where the CTF data for krtld is. If krtld
685 * is rolled into unix, force load the MDB krtld
686 * module.
687 */
688 kt->k_rtld_name = "krtld";
689
690 if (kt_module_by_name(kt, "krtld") == NULL) {
691 (void) mdb_module_load("krtld", MDB_MOD_SILENT);
692 kt->k_rtld_name = "unix";
693 }
694 }
695
696
697 if (t->t_flags & MDB_TGT_F_PRELOAD) {
698 mdb_iob_puts(mdb.m_out, " ]\n");
699 mdb_iob_setflags(mdb.m_out, oflag);
700 }
701
702 kt->k_activated = TRUE;
703 }
704
705 (void) mdb_tgt_register_dcmds(t, &kt_dcmds[0], MDB_MOD_FORCE);
706
707 /* Export some of our registers as named variables */
708 mdb_tgt_register_regvars(t, kt->k_rds, ®_disc, MDB_NV_RDONLY);
709
710 mdb_tgt_elf_export(kt->k_file);
711 }
712
713 void
kt_deactivate(mdb_tgt_t * t)714 kt_deactivate(mdb_tgt_t *t)
715 {
716 kt_data_t *kt = t->t_data;
717
718 const mdb_tgt_regdesc_t *rdp;
719 const mdb_dcmd_t *dcp;
720
721 for (rdp = kt->k_rds; rdp->rd_name != NULL; rdp++) {
722 mdb_var_t *v;
723
724 if (!(rdp->rd_flags & MDB_TGT_R_EXPORT))
725 continue; /* Didn't export register as a variable */
726
727 if ((v = mdb_nv_lookup(&mdb.m_nv, rdp->rd_name)) != NULL) {
728 v->v_flags &= ~MDB_NV_PERSIST;
729 mdb_nv_remove(&mdb.m_nv, v);
730 }
731 }
732
733 for (dcp = &kt_dcmds[0]; dcp->dc_name != NULL; dcp++) {
734 if (mdb_module_remove_dcmd(t->t_module, dcp->dc_name) == -1)
735 warn("failed to remove dcmd %s", dcp->dc_name);
736 }
737
738 mdb_prop_postmortem = FALSE;
739 mdb_prop_kernel = FALSE;
740 mdb_prop_datamodel = MDB_TGT_MODEL_UNKNOWN;
741 }
742
743 /*ARGSUSED*/
744 const char *
kt_name(mdb_tgt_t * t)745 kt_name(mdb_tgt_t *t)
746 {
747 return ("kvm");
748 }
749
750 const char *
kt_platform(mdb_tgt_t * t)751 kt_platform(mdb_tgt_t *t)
752 {
753 kt_data_t *kt = t->t_data;
754 return (kt->k_platform);
755 }
756
757 int
kt_uname(mdb_tgt_t * t,struct utsname * utsp)758 kt_uname(mdb_tgt_t *t, struct utsname *utsp)
759 {
760 return (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, utsp,
761 sizeof (struct utsname), MDB_TGT_OBJ_EXEC, "utsname"));
762 }
763
764 /*ARGSUSED*/
765 int
kt_dmodel(mdb_tgt_t * t)766 kt_dmodel(mdb_tgt_t *t)
767 {
768 return (MDB_TGT_MODEL_NATIVE);
769 }
770
771 ssize_t
kt_aread(mdb_tgt_t * t,mdb_tgt_as_t as,void * buf,size_t nbytes,mdb_tgt_addr_t addr)772 kt_aread(mdb_tgt_t *t, mdb_tgt_as_t as, void *buf,
773 size_t nbytes, mdb_tgt_addr_t addr)
774 {
775 kt_data_t *kt = t->t_data;
776 ssize_t rval;
777
778 if ((rval = kt->k_kb_ops->kb_aread(kt->k_cookie, addr, buf,
779 nbytes, as)) == -1)
780 return (set_errno(EMDB_NOMAP));
781
782 return (rval);
783 }
784
785 ssize_t
kt_awrite(mdb_tgt_t * t,mdb_tgt_as_t as,const void * buf,size_t nbytes,mdb_tgt_addr_t addr)786 kt_awrite(mdb_tgt_t *t, mdb_tgt_as_t as, const void *buf,
787 size_t nbytes, mdb_tgt_addr_t addr)
788 {
789 kt_data_t *kt = t->t_data;
790 ssize_t rval;
791
792 if ((rval = kt->k_kb_ops->kb_awrite(kt->k_cookie, addr, buf,
793 nbytes, as)) == -1)
794 return (set_errno(EMDB_NOMAP));
795
796 return (rval);
797 }
798
799 ssize_t
kt_vread(mdb_tgt_t * t,void * buf,size_t nbytes,uintptr_t addr)800 kt_vread(mdb_tgt_t *t, void *buf, size_t nbytes, uintptr_t addr)
801 {
802 kt_data_t *kt = t->t_data;
803 ssize_t rval;
804
805 if ((rval = kt->k_kb_ops->kb_kread(kt->k_cookie, addr, buf,
806 nbytes)) == -1)
807 return (set_errno(EMDB_NOMAP));
808
809 return (rval);
810 }
811
812 ssize_t
kt_vwrite(mdb_tgt_t * t,const void * buf,size_t nbytes,uintptr_t addr)813 kt_vwrite(mdb_tgt_t *t, const void *buf, size_t nbytes, uintptr_t addr)
814 {
815 kt_data_t *kt = t->t_data;
816 ssize_t rval;
817
818 if ((rval = kt->k_kb_ops->kb_kwrite(kt->k_cookie, addr, buf,
819 nbytes)) == -1)
820 return (set_errno(EMDB_NOMAP));
821
822 return (rval);
823 }
824
825 ssize_t
kt_fread(mdb_tgt_t * t,void * buf,size_t nbytes,uintptr_t addr)826 kt_fread(mdb_tgt_t *t, void *buf, size_t nbytes, uintptr_t addr)
827 {
828 return (kt_vread(t, buf, nbytes, addr));
829 }
830
831 ssize_t
kt_fwrite(mdb_tgt_t * t,const void * buf,size_t nbytes,uintptr_t addr)832 kt_fwrite(mdb_tgt_t *t, const void *buf, size_t nbytes, uintptr_t addr)
833 {
834 return (kt_vwrite(t, buf, nbytes, addr));
835 }
836
837 ssize_t
kt_pread(mdb_tgt_t * t,void * buf,size_t nbytes,physaddr_t addr)838 kt_pread(mdb_tgt_t *t, void *buf, size_t nbytes, physaddr_t addr)
839 {
840 kt_data_t *kt = t->t_data;
841 ssize_t rval;
842
843 if ((rval = kt->k_kb_ops->kb_pread(kt->k_cookie, addr, buf,
844 nbytes)) == -1)
845 return (set_errno(EMDB_NOMAP));
846
847 return (rval);
848 }
849
850 ssize_t
kt_pwrite(mdb_tgt_t * t,const void * buf,size_t nbytes,physaddr_t addr)851 kt_pwrite(mdb_tgt_t *t, const void *buf, size_t nbytes, physaddr_t addr)
852 {
853 kt_data_t *kt = t->t_data;
854 ssize_t rval;
855
856 if ((rval = kt->k_kb_ops->kb_pwrite(kt->k_cookie, addr, buf,
857 nbytes)) == -1)
858 return (set_errno(EMDB_NOMAP));
859
860 return (rval);
861 }
862
863 int
kt_vtop(mdb_tgt_t * t,mdb_tgt_as_t as,uintptr_t va,physaddr_t * pap)864 kt_vtop(mdb_tgt_t *t, mdb_tgt_as_t as, uintptr_t va, physaddr_t *pap)
865 {
866 kt_data_t *kt = t->t_data;
867
868 struct as *asp;
869 physaddr_t pa;
870 mdb_module_t *mod;
871 mdb_var_t *v;
872 int (*fptr)(uintptr_t, struct as *, physaddr_t *);
873
874 switch ((uintptr_t)as) {
875 case (uintptr_t)MDB_TGT_AS_PHYS:
876 case (uintptr_t)MDB_TGT_AS_FILE:
877 case (uintptr_t)MDB_TGT_AS_IO:
878 return (set_errno(EINVAL));
879 case (uintptr_t)MDB_TGT_AS_VIRT:
880 case (uintptr_t)MDB_TGT_AS_VIRT_I:
881 case (uintptr_t)MDB_TGT_AS_VIRT_S:
882 asp = kt->k_as;
883 break;
884 default:
885 asp = (struct as *)as;
886 }
887
888 if ((pa = kt->k_kb_ops->kb_vtop(kt->k_cookie, asp, va)) != -1ULL) {
889 *pap = pa;
890 return (0);
891 }
892
893 if ((v = mdb_nv_lookup(&mdb.m_modules, "unix")) != NULL &&
894 (mod = mdb_nv_get_cookie(v)) != NULL) {
895
896 fptr = (int (*)(uintptr_t, struct as *, physaddr_t *))
897 dlsym(mod->mod_hdl, "platform_vtop");
898
899 if ((fptr != NULL) && ((*fptr)(va, asp, pap) == 0))
900 return (0);
901 }
902
903 return (set_errno(EMDB_NOMAP));
904 }
905
906 int
kt_lookup_by_name(mdb_tgt_t * t,const char * obj,const char * name,GElf_Sym * symp,mdb_syminfo_t * sip)907 kt_lookup_by_name(mdb_tgt_t *t, const char *obj, const char *name,
908 GElf_Sym *symp, mdb_syminfo_t *sip)
909 {
910 kt_data_t *kt = t->t_data;
911 kt_module_t *km, kmod;
912 mdb_var_t *v;
913 int n;
914
915 /*
916 * To simplify the implementation, we create a fake module on the stack
917 * which is "prepended" to k_modlist and whose symtab is kt->k_symtab.
918 */
919 kmod.km_symtab = kt->k_symtab;
920 kmod.km_list.ml_next = mdb_list_next(&kt->k_modlist);
921
922 switch ((uintptr_t)obj) {
923 case (uintptr_t)MDB_TGT_OBJ_EXEC:
924 km = &kmod;
925 n = 1;
926 break;
927
928 case (uintptr_t)MDB_TGT_OBJ_EVERY:
929 km = &kmod;
930 n = mdb_nv_size(&kt->k_modules) + 1;
931 break;
932
933 case (uintptr_t)MDB_TGT_OBJ_RTLD:
934 obj = kt->k_rtld_name;
935 /*FALLTHRU*/
936
937 default:
938 if ((v = mdb_nv_lookup(&kt->k_modules, obj)) == NULL)
939 return (set_errno(EMDB_NOOBJ));
940
941 km = mdb_nv_get_cookie(v);
942 n = 1;
943
944 if (km->km_symtab == NULL)
945 kt_load_module(kt, t, km);
946 }
947
948 for (; n > 0; n--, km = mdb_list_next(km)) {
949 if (mdb_gelf_symtab_lookup_by_name(km->km_symtab, name,
950 symp, &sip->sym_id) == 0) {
951 sip->sym_table = MDB_TGT_SYMTAB;
952 return (0);
953 }
954 }
955
956 return (set_errno(EMDB_NOSYM));
957 }
958
959 int
kt_lookup_by_addr(mdb_tgt_t * t,uintptr_t addr,uint_t flags,char * buf,size_t nbytes,GElf_Sym * symp,mdb_syminfo_t * sip)960 kt_lookup_by_addr(mdb_tgt_t *t, uintptr_t addr, uint_t flags,
961 char *buf, size_t nbytes, GElf_Sym *symp, mdb_syminfo_t *sip)
962 {
963 kt_data_t *kt = t->t_data;
964 kt_module_t kmods[3], *kmods_begin = &kmods[0], *kmods_end;
965 const char *name;
966
967 kt_module_t *km = &kmods[0]; /* Point km at first fake module */
968 kt_module_t *sym_km = NULL; /* Module associated with best sym */
969 GElf_Sym sym; /* Best symbol found so far if !exact */
970 uint_t symid; /* ID of best symbol found so far */
971
972 /*
973 * To simplify the implementation, we create fake modules on the stack
974 * that are "prepended" to k_modlist and whose symtab is set to
975 * each of three special symbol tables, in order of precedence.
976 */
977 km->km_symtab = mdb.m_prsym;
978
979 if (kt->k_symtab != NULL) {
980 km->km_list.ml_next = (mdb_list_t *)(km + 1);
981 km = mdb_list_next(km);
982 km->km_symtab = kt->k_symtab;
983 }
984
985 if (kt->k_dynsym != NULL) {
986 km->km_list.ml_next = (mdb_list_t *)(km + 1);
987 km = mdb_list_next(km);
988 km->km_symtab = kt->k_dynsym;
989 }
990
991 km->km_list.ml_next = mdb_list_next(&kt->k_modlist);
992 kmods_end = km;
993
994 /*
995 * Now iterate over the list of fake and real modules. If the module
996 * has no symbol table and the address is in the text section,
997 * instantiate the module's symbol table. In exact mode, we can
998 * jump to 'found' immediately if we match. Otherwise we continue
999 * looking and improve our choice if we find a closer symbol.
1000 */
1001 for (km = &kmods[0]; km != NULL; km = mdb_list_next(km)) {
1002 if (km->km_symtab == NULL && addr >= km->km_text_va &&
1003 addr < km->km_text_va + km->km_text_size)
1004 kt_load_module(kt, t, km);
1005
1006 if (mdb_gelf_symtab_lookup_by_addr(km->km_symtab, addr,
1007 flags, buf, nbytes, symp, &sip->sym_id) != 0 ||
1008 symp->st_value == 0)
1009 continue;
1010
1011 if (flags & MDB_TGT_SYM_EXACT) {
1012 sym_km = km;
1013 goto found;
1014 }
1015
1016 if (sym_km == NULL || mdb_gelf_sym_closer(symp, &sym, addr)) {
1017 sym_km = km;
1018 sym = *symp;
1019 symid = sip->sym_id;
1020 }
1021 }
1022
1023 if (sym_km == NULL)
1024 return (set_errno(EMDB_NOSYMADDR));
1025
1026 *symp = sym; /* Copy our best symbol into the caller's symbol */
1027 sip->sym_id = symid;
1028 found:
1029 /*
1030 * Once we've found something, copy the final name into the caller's
1031 * buffer and prefix it with the load object name if appropriate.
1032 */
1033 if (sym_km != NULL) {
1034 name = mdb_gelf_sym_name(sym_km->km_symtab, symp);
1035
1036 if (sym_km < kmods_begin || sym_km > kmods_end) {
1037 (void) mdb_snprintf(buf, nbytes, "%s`%s",
1038 sym_km->km_name, name);
1039 } else if (nbytes > 0) {
1040 (void) strncpy(buf, name, nbytes);
1041 buf[nbytes - 1] = '\0';
1042 }
1043
1044 if (sym_km->km_symtab == mdb.m_prsym)
1045 sip->sym_table = MDB_TGT_PRVSYM;
1046 else
1047 sip->sym_table = MDB_TGT_SYMTAB;
1048 } else {
1049 sip->sym_table = MDB_TGT_SYMTAB;
1050 }
1051
1052 return (0);
1053 }
1054
1055 static int
kt_symtab_func(void * data,const GElf_Sym * sym,const char * name,uint_t id)1056 kt_symtab_func(void *data, const GElf_Sym *sym, const char *name, uint_t id)
1057 {
1058 kt_symarg_t *argp = data;
1059
1060 if (mdb_tgt_sym_match(sym, argp->sym_type)) {
1061 argp->sym_info.sym_id = id;
1062
1063 return (argp->sym_cb(argp->sym_data, sym, name,
1064 &argp->sym_info, argp->sym_obj));
1065 }
1066
1067 return (0);
1068 }
1069
1070 static void
kt_symtab_iter(mdb_gelf_symtab_t * gst,uint_t type,const char * obj,mdb_tgt_sym_f * cb,void * p)1071 kt_symtab_iter(mdb_gelf_symtab_t *gst, uint_t type, const char *obj,
1072 mdb_tgt_sym_f *cb, void *p)
1073 {
1074 kt_symarg_t arg;
1075
1076 arg.sym_cb = cb;
1077 arg.sym_data = p;
1078 arg.sym_type = type;
1079 arg.sym_info.sym_table = gst->gst_tabid;
1080 arg.sym_obj = obj;
1081
1082 mdb_gelf_symtab_iter(gst, kt_symtab_func, &arg);
1083 }
1084
1085 int
kt_symbol_iter(mdb_tgt_t * t,const char * obj,uint_t which,uint_t type,mdb_tgt_sym_f * cb,void * data)1086 kt_symbol_iter(mdb_tgt_t *t, const char *obj, uint_t which, uint_t type,
1087 mdb_tgt_sym_f *cb, void *data)
1088 {
1089 kt_data_t *kt = t->t_data;
1090 kt_module_t *km;
1091
1092 mdb_gelf_symtab_t *symtab = NULL;
1093 mdb_var_t *v;
1094
1095 switch ((uintptr_t)obj) {
1096 case (uintptr_t)MDB_TGT_OBJ_EXEC:
1097 if (which == MDB_TGT_SYMTAB)
1098 symtab = kt->k_symtab;
1099 else
1100 symtab = kt->k_dynsym;
1101 break;
1102
1103 case (uintptr_t)MDB_TGT_OBJ_EVERY:
1104 if (which == MDB_TGT_DYNSYM) {
1105 symtab = kt->k_dynsym;
1106 obj = MDB_TGT_OBJ_EXEC;
1107 break;
1108 }
1109
1110 mdb_nv_rewind(&kt->k_modules);
1111 while ((v = mdb_nv_advance(&kt->k_modules)) != NULL) {
1112 km = mdb_nv_get_cookie(v);
1113
1114 if (km->km_symtab == NULL)
1115 kt_load_module(kt, t, km);
1116
1117 if (km->km_symtab != NULL)
1118 kt_symtab_iter(km->km_symtab, type,
1119 km->km_name, cb, data);
1120 }
1121 break;
1122
1123 case (uintptr_t)MDB_TGT_OBJ_RTLD:
1124 obj = kt->k_rtld_name;
1125 /*FALLTHRU*/
1126
1127 default:
1128 v = mdb_nv_lookup(&kt->k_modules, obj);
1129
1130 if (v == NULL)
1131 return (set_errno(EMDB_NOOBJ));
1132
1133 km = mdb_nv_get_cookie(v);
1134
1135 if (km->km_symtab == NULL)
1136 kt_load_module(kt, t, km);
1137
1138 symtab = km->km_symtab;
1139 }
1140
1141 if (symtab)
1142 kt_symtab_iter(symtab, type, obj, cb, data);
1143
1144 return (0);
1145 }
1146
1147 static int
kt_mapping_walk(uintptr_t addr,const void * data,kt_maparg_t * marg)1148 kt_mapping_walk(uintptr_t addr, const void *data, kt_maparg_t *marg)
1149 {
1150 /*
1151 * This is a bit sketchy but avoids problematic compilation of this
1152 * target against the current VM implementation. Now that we have
1153 * vmem, we can make this less broken and more informative by changing
1154 * this code to invoke the vmem walker in the near future.
1155 */
1156 const struct kt_seg {
1157 caddr_t s_base;
1158 size_t s_size;
1159 } *segp = (const struct kt_seg *)data;
1160
1161 mdb_map_t map;
1162 GElf_Sym sym;
1163 mdb_syminfo_t info;
1164
1165 map.map_base = (uintptr_t)segp->s_base;
1166 map.map_size = segp->s_size;
1167 map.map_flags = MDB_TGT_MAP_R | MDB_TGT_MAP_W | MDB_TGT_MAP_X;
1168
1169 if (kt_lookup_by_addr(marg->map_target, addr, MDB_TGT_SYM_EXACT,
1170 map.map_name, MDB_TGT_MAPSZ, &sym, &info) == -1) {
1171
1172 (void) mdb_iob_snprintf(map.map_name, MDB_TGT_MAPSZ,
1173 "%lr", addr);
1174 }
1175
1176 return (marg->map_cb(marg->map_data, &map, map.map_name));
1177 }
1178
1179 int
kt_mapping_iter(mdb_tgt_t * t,mdb_tgt_map_f * func,void * private)1180 kt_mapping_iter(mdb_tgt_t *t, mdb_tgt_map_f *func, void *private)
1181 {
1182 kt_data_t *kt = t->t_data;
1183 kt_maparg_t m;
1184
1185 m.map_target = t;
1186 m.map_cb = func;
1187 m.map_data = private;
1188
1189 return (mdb_pwalk("seg", (mdb_walk_cb_t)kt_mapping_walk, &m,
1190 (uintptr_t)kt->k_as));
1191 }
1192
1193 static const mdb_map_t *
kt_module_to_map(kt_module_t * km,mdb_map_t * map)1194 kt_module_to_map(kt_module_t *km, mdb_map_t *map)
1195 {
1196 (void) strncpy(map->map_name, km->km_name, MDB_TGT_MAPSZ);
1197 map->map_name[MDB_TGT_MAPSZ - 1] = '\0';
1198 map->map_base = km->km_text_va;
1199 map->map_size = km->km_text_size;
1200 map->map_flags = MDB_TGT_MAP_R | MDB_TGT_MAP_W | MDB_TGT_MAP_X;
1201
1202 return (map);
1203 }
1204
1205 int
kt_object_iter(mdb_tgt_t * t,mdb_tgt_map_f * func,void * private)1206 kt_object_iter(mdb_tgt_t *t, mdb_tgt_map_f *func, void *private)
1207 {
1208 kt_data_t *kt = t->t_data;
1209 kt_module_t *km;
1210 mdb_map_t m;
1211
1212 for (km = mdb_list_next(&kt->k_modlist); km; km = mdb_list_next(km)) {
1213 if (func(private, kt_module_to_map(km, &m), km->km_name) == -1)
1214 break;
1215 }
1216
1217 return (0);
1218 }
1219
1220 const mdb_map_t *
kt_addr_to_map(mdb_tgt_t * t,uintptr_t addr)1221 kt_addr_to_map(mdb_tgt_t *t, uintptr_t addr)
1222 {
1223 kt_data_t *kt = t->t_data;
1224 kt_module_t *km;
1225
1226 for (km = mdb_list_next(&kt->k_modlist); km; km = mdb_list_next(km)) {
1227 if (addr - km->km_text_va < km->km_text_size ||
1228 addr - km->km_data_va < km->km_data_size ||
1229 addr - km->km_bss_va < km->km_bss_size)
1230 return (kt_module_to_map(km, &kt->k_map));
1231 }
1232
1233 (void) set_errno(EMDB_NOMAP);
1234 return (NULL);
1235 }
1236
1237 const mdb_map_t *
kt_name_to_map(mdb_tgt_t * t,const char * name)1238 kt_name_to_map(mdb_tgt_t *t, const char *name)
1239 {
1240 kt_data_t *kt = t->t_data;
1241 kt_module_t *km;
1242 mdb_map_t m;
1243
1244 /*
1245 * If name is MDB_TGT_OBJ_EXEC, return the first module on the list,
1246 * which will be unix since we keep k_modlist in load order.
1247 */
1248 if (name == MDB_TGT_OBJ_EXEC)
1249 return (kt_module_to_map(mdb_list_next(&kt->k_modlist), &m));
1250
1251 if (name == MDB_TGT_OBJ_RTLD)
1252 name = kt->k_rtld_name;
1253
1254 if ((km = kt_module_by_name(kt, name)) != NULL)
1255 return (kt_module_to_map(km, &m));
1256
1257 (void) set_errno(EMDB_NOOBJ);
1258 return (NULL);
1259 }
1260
1261 static ctf_file_t *
kt_load_ctfdata(mdb_tgt_t * t,kt_module_t * km)1262 kt_load_ctfdata(mdb_tgt_t *t, kt_module_t *km)
1263 {
1264 kt_data_t *kt = t->t_data;
1265 int err;
1266
1267 if (km->km_ctfp != NULL)
1268 return (km->km_ctfp);
1269
1270 if (km->km_ctf_va == 0) {
1271 (void) set_errno(EMDB_NOCTF);
1272 return (NULL);
1273 }
1274
1275 if (km->km_symtab == NULL)
1276 kt_load_module(t->t_data, t, km);
1277
1278 if ((km->km_ctf_buf = mdb_alloc(km->km_ctf_size, UM_NOSLEEP)) == NULL) {
1279 warn("failed to allocate memory to load %s debugging "
1280 "information", km->km_name);
1281 return (NULL);
1282 }
1283
1284 if (mdb_tgt_vread(t, km->km_ctf_buf, km->km_ctf_size,
1285 km->km_ctf_va) != km->km_ctf_size) {
1286 warn("failed to read %lu bytes of debug data for %s at %p",
1287 (ulong_t)km->km_ctf_size, km->km_name,
1288 (void *)km->km_ctf_va);
1289 mdb_free(km->km_ctf_buf, km->km_ctf_size);
1290 km->km_ctf_buf = NULL;
1291 return (NULL);
1292 }
1293
1294 if ((km->km_ctfp = mdb_ctf_bufopen((const void *)km->km_ctf_buf,
1295 km->km_ctf_size, km->km_symbuf, &km->km_symtab_hdr,
1296 km->km_strtab, &km->km_strtab_hdr, &err)) == NULL) {
1297 mdb_free(km->km_ctf_buf, km->km_ctf_size);
1298 km->km_ctf_buf = NULL;
1299 (void) set_errno(ctf_to_errno(err));
1300 return (NULL);
1301 }
1302
1303 mdb_dprintf(MDB_DBG_KMOD, "loaded %lu bytes of CTF data for %s\n",
1304 (ulong_t)km->km_ctf_size, km->km_name);
1305
1306 if (ctf_parent_name(km->km_ctfp) != NULL) {
1307 mdb_var_t *v;
1308
1309 if ((v = mdb_nv_lookup(&kt->k_modules,
1310 ctf_parent_name(km->km_ctfp))) == NULL) {
1311 warn("failed to load CTF data for %s - parent %s not "
1312 "loaded\n", km->km_name,
1313 ctf_parent_name(km->km_ctfp));
1314 }
1315
1316 if (v != NULL) {
1317 kt_module_t *pm = mdb_nv_get_cookie(v);
1318
1319 if (pm->km_ctfp == NULL)
1320 (void) kt_load_ctfdata(t, pm);
1321
1322 if (pm->km_ctfp != NULL && ctf_import(km->km_ctfp,
1323 pm->km_ctfp) == CTF_ERR) {
1324 warn("failed to import parent types into "
1325 "%s: %s\n", km->km_name,
1326 ctf_errmsg(ctf_errno(km->km_ctfp)));
1327 }
1328 }
1329 }
1330
1331 return (km->km_ctfp);
1332 }
1333
1334 ctf_file_t *
kt_addr_to_ctf(mdb_tgt_t * t,uintptr_t addr)1335 kt_addr_to_ctf(mdb_tgt_t *t, uintptr_t addr)
1336 {
1337 kt_data_t *kt = t->t_data;
1338 kt_module_t *km;
1339
1340 for (km = mdb_list_next(&kt->k_modlist); km; km = mdb_list_next(km)) {
1341 if (addr - km->km_text_va < km->km_text_size ||
1342 addr - km->km_data_va < km->km_data_size ||
1343 addr - km->km_bss_va < km->km_bss_size)
1344 return (kt_load_ctfdata(t, km));
1345 }
1346
1347 (void) set_errno(EMDB_NOMAP);
1348 return (NULL);
1349 }
1350
1351 ctf_file_t *
kt_name_to_ctf(mdb_tgt_t * t,const char * name)1352 kt_name_to_ctf(mdb_tgt_t *t, const char *name)
1353 {
1354 kt_data_t *kt = t->t_data;
1355 kt_module_t *km;
1356
1357 if (name == MDB_TGT_OBJ_EXEC)
1358 name = KT_CTFPARENT;
1359 else if (name == MDB_TGT_OBJ_RTLD)
1360 name = kt->k_rtld_name;
1361
1362 if ((km = kt_module_by_name(kt, name)) != NULL)
1363 return (kt_load_ctfdata(t, km));
1364
1365 (void) set_errno(EMDB_NOOBJ);
1366 return (NULL);
1367 }
1368
1369 /*ARGSUSED*/
1370 int
kt_status(mdb_tgt_t * t,mdb_tgt_status_t * tsp)1371 kt_status(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1372 {
1373 kt_data_t *kt = t->t_data;
1374 bzero(tsp, sizeof (mdb_tgt_status_t));
1375 tsp->st_state = (kt->k_xpv_domu || (kt->k_dumphdr != NULL)) ?
1376 MDB_TGT_DEAD : MDB_TGT_RUNNING;
1377 return (0);
1378 }
1379
1380 static ssize_t
kt_xd_dumphdr(mdb_tgt_t * t,void * buf,size_t nbytes)1381 kt_xd_dumphdr(mdb_tgt_t *t, void *buf, size_t nbytes)
1382 {
1383 kt_data_t *kt = t->t_data;
1384
1385 if (buf == NULL && nbytes == 0)
1386 return (sizeof (dumphdr_t));
1387
1388 if (kt->k_dumphdr == NULL)
1389 return (set_errno(ENODATA));
1390
1391 nbytes = MIN(nbytes, sizeof (dumphdr_t));
1392 bcopy(kt->k_dumphdr, buf, nbytes);
1393
1394 return (nbytes);
1395 }
1396
1397 void
kt_destroy(mdb_tgt_t * t)1398 kt_destroy(mdb_tgt_t *t)
1399 {
1400 kt_data_t *kt = t->t_data;
1401 kt_module_t *km, *nkm;
1402
1403 (void) mdb_module_unload(KT_MODULE, 0);
1404
1405 if (kt->k_regs != NULL)
1406 mdb_free(kt->k_regs, kt->k_regsize);
1407
1408 if (kt->k_symtab != NULL)
1409 mdb_gelf_symtab_destroy(kt->k_symtab);
1410
1411 if (kt->k_dynsym != NULL)
1412 mdb_gelf_symtab_destroy(kt->k_dynsym);
1413
1414 if (kt->k_dumphdr != NULL)
1415 mdb_free(kt->k_dumphdr, sizeof (dumphdr_t));
1416
1417 mdb_gelf_destroy(kt->k_file);
1418
1419 (void) kt->k_kb_ops->kb_close(kt->k_cookie);
1420
1421 for (km = mdb_list_next(&kt->k_modlist); km; km = nkm) {
1422 if (km->km_symtab)
1423 mdb_gelf_symtab_destroy(km->km_symtab);
1424
1425 if (km->km_data)
1426 mdb_free(km->km_data, km->km_datasz);
1427
1428 if (km->km_ctfp)
1429 ctf_close(km->km_ctfp);
1430
1431 if (km->km_ctf_buf != NULL)
1432 mdb_free(km->km_ctf_buf, km->km_ctf_size);
1433
1434 nkm = mdb_list_next(km);
1435 strfree(km->km_name);
1436 mdb_free(km, sizeof (kt_module_t));
1437 }
1438
1439 mdb_nv_destroy(&kt->k_modules);
1440
1441 strfree(kt->k_kvmfile);
1442 if (kt->k_symfile != NULL)
1443 strfree(kt->k_symfile);
1444
1445 mdb_free(kt, sizeof (kt_data_t));
1446 }
1447
1448 static int
kt_data_stub(void)1449 kt_data_stub(void)
1450 {
1451 return (-1);
1452 }
1453
1454 int
mdb_kvm_tgt_create(mdb_tgt_t * t,int argc,const char * argv[])1455 mdb_kvm_tgt_create(mdb_tgt_t *t, int argc, const char *argv[])
1456 {
1457 kt_data_t *kt = mdb_zalloc(sizeof (kt_data_t), UM_SLEEP);
1458 mdb_kb_ops_t *kvm_kb_ops = libkvm_kb_ops();
1459 int oflag = (t->t_flags & MDB_TGT_F_RDWR) ? O_RDWR : O_RDONLY;
1460 struct utsname uts;
1461 GElf_Sym sym;
1462 pgcnt_t pmem;
1463
1464
1465 if (argc == 2) {
1466 kt->k_symfile = strdup(argv[0]);
1467 kt->k_kvmfile = strdup(argv[1]);
1468
1469 kt->k_cookie = kvm_kb_ops->kb_open(kt->k_symfile,
1470 kt->k_kvmfile, NULL, oflag, (char *)mdb.m_pname);
1471
1472 if (kt->k_cookie == NULL)
1473 goto err;
1474
1475 kt->k_xpv_domu = 0;
1476 kt->k_kb_ops = kvm_kb_ops;
1477 } else {
1478 #ifndef __x86
1479 return (set_errno(EINVAL));
1480 #else
1481 mdb_kb_ops_t *(*getops)(void);
1482
1483 kt->k_symfile = NULL;
1484 kt->k_kvmfile = strdup(argv[0]);
1485
1486 getops = (mdb_kb_ops_t *(*)())dlsym(RTLD_NEXT, "mdb_kb_ops");
1487
1488 /*
1489 * Load mdb_kb if it's not already loaded during
1490 * identification.
1491 */
1492 if (getops == NULL) {
1493 (void) mdb_module_load("mdb_kb",
1494 MDB_MOD_GLOBAL | MDB_MOD_SILENT);
1495 getops = (mdb_kb_ops_t *(*)())
1496 dlsym(RTLD_NEXT, "mdb_kb_ops");
1497 }
1498
1499 if (getops == NULL || (kt->k_kb_ops = getops()) == NULL) {
1500 warn("failed to load KVM backend ops\n");
1501 goto err;
1502 }
1503
1504 kt->k_cookie = kt->k_kb_ops->kb_open(NULL, kt->k_kvmfile, NULL,
1505 oflag, (char *)mdb.m_pname);
1506
1507 if (kt->k_cookie == NULL)
1508 goto err;
1509
1510 kt->k_xpv_domu = 1;
1511 #endif
1512 }
1513
1514 if ((kt->k_fio = kt->k_kb_ops->kb_sym_io(kt->k_cookie,
1515 kt->k_symfile)) == NULL)
1516 goto err;
1517
1518 if ((kt->k_file = mdb_gelf_create(kt->k_fio,
1519 ET_EXEC, GF_FILE)) == NULL) {
1520 mdb_io_destroy(kt->k_fio);
1521 goto err;
1522 }
1523
1524 kt->k_symtab =
1525 mdb_gelf_symtab_create_file(kt->k_file, SHT_SYMTAB, MDB_TGT_SYMTAB);
1526
1527 kt->k_dynsym =
1528 mdb_gelf_symtab_create_file(kt->k_file, SHT_DYNSYM, MDB_TGT_DYNSYM);
1529
1530 if (mdb_gelf_symtab_lookup_by_name(kt->k_symtab, "kas",
1531 &sym, NULL) == -1) {
1532 warn("'kas' symbol is missing from kernel\n");
1533 goto err;
1534 }
1535
1536 kt->k_as = (struct as *)(uintptr_t)sym.st_value;
1537
1538 if (mdb_gelf_symtab_lookup_by_name(kt->k_symtab, "platform",
1539 &sym, NULL) == -1) {
1540 warn("'platform' symbol is missing from kernel\n");
1541 goto err;
1542 }
1543
1544 if (kt->k_kb_ops->kb_kread(kt->k_cookie, sym.st_value,
1545 kt->k_platform, MAXNAMELEN) <= 0) {
1546 warn("failed to read 'platform' string from kernel");
1547 goto err;
1548 }
1549
1550 if (mdb_gelf_symtab_lookup_by_name(kt->k_symtab, "utsname",
1551 &sym, NULL) == -1) {
1552 warn("'utsname' symbol is missing from kernel\n");
1553 goto err;
1554 }
1555
1556 if (kt->k_kb_ops->kb_kread(kt->k_cookie, sym.st_value, &uts,
1557 sizeof (uts)) <= 0) {
1558 warn("failed to read 'utsname' struct from kernel");
1559 goto err;
1560 }
1561
1562 kt->k_dump_print_content = (void (*)())(uintptr_t)kt_data_stub;
1563 kt->k_dump_find_curproc = kt_data_stub;
1564
1565 /*
1566 * We set k_ctfvalid based on the presence of the CTF vmem arena
1567 * symbol. The CTF members were added to the end of struct module at
1568 * the same time, so this allows us to know whether we can use them.
1569 */
1570 if (mdb_gelf_symtab_lookup_by_name(kt->k_symtab, "ctf_arena", &sym,
1571 NULL) == 0 && !(mdb.m_flags & MDB_FL_NOCTF))
1572 kt->k_ctfvalid = 1;
1573
1574 (void) mdb_nv_create(&kt->k_modules, UM_SLEEP);
1575 t->t_pshandle = kt->k_cookie;
1576 t->t_data = kt;
1577
1578 #if defined(__sparc)
1579 #if defined(__sparcv9)
1580 kt_sparcv9_init(t);
1581 #else
1582 kt_sparcv7_init(t);
1583 #endif
1584 #elif defined(__amd64)
1585 kt_amd64_init(t);
1586 #elif defined(__i386)
1587 kt_ia32_init(t);
1588 #else
1589 #error "unknown ISA"
1590 #endif
1591
1592 /*
1593 * We read our representative thread ID (address) from the kernel's
1594 * global panic_thread. It will remain 0 if this is a live kernel.
1595 */
1596 (void) mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, &kt->k_tid, sizeof (void *),
1597 MDB_TGT_OBJ_EXEC, "panic_thread");
1598
1599 if ((mdb.m_flags & MDB_FL_ADB) && mdb_tgt_readsym(t, MDB_TGT_AS_VIRT,
1600 &pmem, sizeof (pmem), MDB_TGT_OBJ_EXEC, "physmem") == sizeof (pmem))
1601 mdb_printf("physmem %lx\n", (ulong_t)pmem);
1602
1603 /*
1604 * If this is not a live kernel or a hypervisor dump, read the dump
1605 * header. We don't have to sanity-check the header, as the open would
1606 * not have succeeded otherwise.
1607 */
1608 if (!kt->k_xpv_domu && strcmp(kt->k_symfile, "/dev/ksyms") != 0) {
1609 mdb_io_t *vmcore;
1610
1611 kt->k_dumphdr = mdb_alloc(sizeof (dumphdr_t), UM_SLEEP);
1612
1613 if ((vmcore = mdb_fdio_create_path(NULL, kt->k_kvmfile,
1614 O_RDONLY, 0)) == NULL) {
1615 mdb_warn("failed to open %s", kt->k_kvmfile);
1616 goto err;
1617 }
1618
1619 if (IOP_READ(vmcore, kt->k_dumphdr, sizeof (dumphdr_t)) !=
1620 sizeof (dumphdr_t)) {
1621 mdb_warn("failed to read dump header");
1622 mdb_io_destroy(vmcore);
1623 goto err;
1624 }
1625
1626 mdb_io_destroy(vmcore);
1627
1628 (void) mdb_tgt_xdata_insert(t, "dumphdr",
1629 "dump header structure", kt_xd_dumphdr);
1630 }
1631
1632 return (0);
1633
1634 err:
1635 if (kt->k_dumphdr != NULL)
1636 mdb_free(kt->k_dumphdr, sizeof (dumphdr_t));
1637
1638 if (kt->k_symtab != NULL)
1639 mdb_gelf_symtab_destroy(kt->k_symtab);
1640
1641 if (kt->k_dynsym != NULL)
1642 mdb_gelf_symtab_destroy(kt->k_dynsym);
1643
1644 if (kt->k_file != NULL)
1645 mdb_gelf_destroy(kt->k_file);
1646
1647 if (kt->k_cookie != NULL)
1648 (void) kt->k_kb_ops->kb_close(kt->k_cookie);
1649
1650 mdb_free(kt, sizeof (kt_data_t));
1651 return (-1);
1652 }
1653
1654 int
mdb_kvm_is_dump(mdb_io_t * io)1655 mdb_kvm_is_dump(mdb_io_t *io)
1656 {
1657 dumphdr_t h;
1658
1659 (void) IOP_SEEK(io, (off64_t)0L, SEEK_SET);
1660
1661 return (IOP_READ(io, &h, sizeof (dumphdr_t)) == sizeof (dumphdr_t) &&
1662 h.dump_magic == DUMP_MAGIC);
1663 }
1664
1665 int
mdb_kvm_is_compressed_dump(mdb_io_t * io)1666 mdb_kvm_is_compressed_dump(mdb_io_t *io)
1667 {
1668 dumphdr_t h;
1669
1670 (void) IOP_SEEK(io, (off64_t)0L, SEEK_SET);
1671
1672 return (IOP_READ(io, &h, sizeof (dumphdr_t)) == sizeof (dumphdr_t) &&
1673 h.dump_magic == DUMP_MAGIC &&
1674 (h.dump_flags & DF_COMPRESSED) != 0);
1675 }
1676