1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * Copyright 2019 Joyent, Inc.
27 */
28
29 /*
30 * Libkvm Kernel Target
31 *
32 * The libkvm kernel target provides access to both crash dumps and live
33 * kernels through /dev/ksyms and /dev/kmem, using the facilities provided by
34 * the libkvm.so library. The target-specific data structures are shared
35 * between this file (common code) and the ISA-dependent parts of the target,
36 * and so they are defined in the mdb_kvm.h header. The target processes an
37 * "executable" (/dev/ksyms or the unix.X file) which contains a primary
38 * .symtab and .dynsym, and then also iterates over the krtld module chain in
39 * the kernel in order to obtain a list of loaded modules and per-module symbol
40 * tables. To improve startup performance, the per-module symbol tables are
41 * instantiated on-the-fly whenever an address lookup falls within the text
42 * section of a given module. The target also relies on services from the
43 * mdb_ks (kernel support) module, which contains pieces of the implementation
44 * that must be compiled against the kernel implementation.
45 */
46
47 #include <sys/modctl.h>
48 #include <sys/kobj.h>
49 #include <sys/kobj_impl.h>
50 #include <sys/utsname.h>
51 #include <sys/panic.h>
52 #include <sys/dumphdr.h>
53 #include <sys/dumpadm.h>
54 #include <sys/uuid.h>
55
56 #include <dlfcn.h>
57 #include <libctf.h>
58 #include <string.h>
59 #include <fcntl.h>
60 #include <errno.h>
61
62 #include <mdb/mdb_target_impl.h>
63 #include <mdb/mdb_err.h>
64 #include <mdb/mdb_debug.h>
65 #include <mdb/mdb_string.h>
66 #include <mdb/mdb_modapi.h>
67 #include <mdb/mdb_io_impl.h>
68 #include <mdb/mdb_ctf.h>
69 #include <mdb/mdb_kvm.h>
70 #include <mdb/mdb_module.h>
71 #include <mdb/mdb_kb.h>
72 #include <mdb/mdb_ks.h>
73 #include <mdb/mdb.h>
74
75 #define KT_RELOC_BUF(buf, obase, nbase) \
76 ((uintptr_t)(buf) - (uintptr_t)(obase) + (uintptr_t)(nbase))
77
78 #define KT_BAD_BUF(buf, base, size) \
79 ((uintptr_t)(buf) < (uintptr_t)(base) || \
80 ((uintptr_t)(buf) >= (uintptr_t)(base) + (uintptr_t)(size)))
81
82 typedef struct kt_symarg {
83 mdb_tgt_sym_f *sym_cb; /* Caller's callback function */
84 void *sym_data; /* Callback function argument */
85 uint_t sym_type; /* Symbol type/binding filter */
86 mdb_syminfo_t sym_info; /* Symbol id and table id */
87 const char *sym_obj; /* Containing object */
88 } kt_symarg_t;
89
90 typedef struct kt_maparg {
91 mdb_tgt_t *map_target; /* Target used for mapping iter */
92 mdb_tgt_map_f *map_cb; /* Caller's callback function */
93 void *map_data; /* Callback function argument */
94 } kt_maparg_t;
95
96 static const char KT_MODULE[] = "mdb_ks";
97 static const char KT_CTFPARENT[] = "genunix";
98
99 static void (*print_buildversion)(void);
100
101 static void
kt_load_module(kt_data_t * kt,mdb_tgt_t * t,kt_module_t * km)102 kt_load_module(kt_data_t *kt, mdb_tgt_t *t, kt_module_t *km)
103 {
104 km->km_data = mdb_alloc(km->km_datasz, UM_SLEEP);
105
106 (void) mdb_tgt_vread(t, km->km_data, km->km_datasz, km->km_symspace_va);
107
108 km->km_symbuf = (void *)
109 KT_RELOC_BUF(km->km_symtab_va, km->km_symspace_va, km->km_data);
110
111 km->km_strtab = (char *)
112 KT_RELOC_BUF(km->km_strtab_va, km->km_symspace_va, km->km_data);
113
114 km->km_symtab = mdb_gelf_symtab_create_raw(&kt->k_file->gf_ehdr,
115 &km->km_symtab_hdr, km->km_symbuf,
116 &km->km_strtab_hdr, km->km_strtab, MDB_TGT_SYMTAB);
117 }
118
119 static void
kt_load_modules(kt_data_t * kt,mdb_tgt_t * t)120 kt_load_modules(kt_data_t *kt, mdb_tgt_t *t)
121 {
122 char name[MAXNAMELEN];
123 uintptr_t addr, head;
124
125 struct module kmod;
126 struct modctl ctl;
127 Shdr symhdr, strhdr;
128 GElf_Sym sym;
129
130 kt_module_t *km;
131
132 if (mdb_tgt_lookup_by_name(t, MDB_TGT_OBJ_EXEC,
133 "modules", &sym, NULL) == -1) {
134 warn("failed to get 'modules' symbol");
135 return;
136 }
137
138 if (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, &ctl, sizeof (ctl),
139 MDB_TGT_OBJ_EXEC, "modules") != sizeof (ctl)) {
140 warn("failed to read 'modules' struct");
141 return;
142 }
143
144 addr = head = (uintptr_t)sym.st_value;
145
146 do {
147 if (addr == 0)
148 break; /* Avoid spurious NULL pointers in list */
149
150 if (mdb_tgt_vread(t, &ctl, sizeof (ctl), addr) == -1) {
151 warn("failed to read modctl at %p", (void *)addr);
152 return;
153 }
154
155 if (ctl.mod_mp == NULL)
156 continue; /* No associated krtld structure */
157
158 if (mdb_tgt_readstr(t, MDB_TGT_AS_VIRT, name, MAXNAMELEN,
159 (uintptr_t)ctl.mod_modname) <= 0) {
160 warn("failed to read module name at %p",
161 (void *)ctl.mod_modname);
162 continue;
163 }
164
165 mdb_dprintf(MDB_DBG_KMOD, "reading mod %s (%p)\n",
166 name, (void *)addr);
167
168 if (mdb_nv_lookup(&kt->k_modules, name) != NULL) {
169 warn("skipping duplicate module '%s', id=%d\n",
170 name, ctl.mod_id);
171 continue;
172 }
173
174 if (mdb_tgt_vread(t, &kmod, sizeof (kmod),
175 (uintptr_t)ctl.mod_mp) == -1) {
176 warn("failed to read module at %p\n",
177 (void *)ctl.mod_mp);
178 continue;
179 }
180
181 if (kmod.symspace == NULL || kmod.symhdr == NULL ||
182 kmod.strhdr == NULL) {
183 /*
184 * If no buffer for the symbols has been allocated,
185 * or the shdrs for .symtab and .strtab are missing,
186 * then we're out of luck.
187 */
188 continue;
189 }
190
191 if (mdb_tgt_vread(t, &symhdr, sizeof (Shdr),
192 (uintptr_t)kmod.symhdr) == -1) {
193 warn("failed to read .symtab header for '%s', id=%d",
194 name, ctl.mod_id);
195 continue;
196 }
197
198 if (mdb_tgt_vread(t, &strhdr, sizeof (Shdr),
199 (uintptr_t)kmod.strhdr) == -1) {
200 warn("failed to read .strtab header for '%s', id=%d",
201 name, ctl.mod_id);
202 continue;
203 }
204
205 /*
206 * Now get clever: f(*^ing krtld didn't used to bother updating
207 * its own kmod.symsize value. We know that prior to this bug
208 * being fixed, symspace was a contiguous buffer containing
209 * .symtab, .strtab, and the symbol hash table in that order.
210 * So if symsize is zero, recompute it as the size of .symtab
211 * plus the size of .strtab. We don't need to load the hash
212 * table anyway since we re-hash all the symbols internally.
213 */
214 if (kmod.symsize == 0)
215 kmod.symsize = symhdr.sh_size + strhdr.sh_size;
216
217 /*
218 * Similar logic can be used to make educated guesses
219 * at the values of kmod.symtbl and kmod.strings.
220 */
221 if (kmod.symtbl == NULL)
222 kmod.symtbl = kmod.symspace;
223 if (kmod.strings == NULL)
224 kmod.strings = kmod.symspace + symhdr.sh_size;
225
226 /*
227 * Make sure things seem reasonable before we proceed
228 * to actually read and decipher the symspace.
229 */
230 if (KT_BAD_BUF(kmod.symtbl, kmod.symspace, kmod.symsize) ||
231 KT_BAD_BUF(kmod.strings, kmod.symspace, kmod.symsize)) {
232 warn("skipping module '%s', id=%d (corrupt symspace)\n",
233 name, ctl.mod_id);
234 continue;
235 }
236
237 km = mdb_zalloc(sizeof (kt_module_t), UM_SLEEP);
238 km->km_name = strdup(name);
239
240 (void) mdb_nv_insert(&kt->k_modules, km->km_name, NULL,
241 (uintptr_t)km, MDB_NV_EXTNAME);
242
243 km->km_datasz = kmod.symsize;
244 km->km_symspace_va = (uintptr_t)kmod.symspace;
245 km->km_symtab_va = (uintptr_t)kmod.symtbl;
246 km->km_strtab_va = (uintptr_t)kmod.strings;
247 km->km_symtab_hdr = symhdr;
248 km->km_strtab_hdr = strhdr;
249 km->km_text_va = (uintptr_t)kmod.text;
250 km->km_text_size = kmod.text_size;
251 km->km_data_va = (uintptr_t)kmod.data;
252 km->km_data_size = kmod.data_size;
253 km->km_bss_va = (uintptr_t)kmod.bss;
254 km->km_bss_size = kmod.bss_size;
255
256 if (kt->k_ctfvalid) {
257 km->km_ctf_va = (uintptr_t)kmod.ctfdata;
258 km->km_ctf_size = kmod.ctfsize;
259 }
260
261 /*
262 * Add the module to the end of the list of modules in load-
263 * dependency order. This is needed to load the corresponding
264 * debugger modules in the same order for layering purposes.
265 */
266 mdb_list_append(&kt->k_modlist, km);
267
268 if (t->t_flags & MDB_TGT_F_PRELOAD) {
269 mdb_iob_printf(mdb.m_out, " %s", name);
270 mdb_iob_flush(mdb.m_out);
271 kt_load_module(kt, t, km);
272 }
273
274 } while ((addr = (uintptr_t)ctl.mod_next) != head);
275 }
276
277 int
kt_setflags(mdb_tgt_t * t,int flags)278 kt_setflags(mdb_tgt_t *t, int flags)
279 {
280 int iochg = ((flags ^ t->t_flags) & MDB_TGT_F_ALLOWIO) &&
281 !mdb_prop_postmortem;
282 int rwchg = (flags ^ t->t_flags) & MDB_TGT_F_RDWR;
283 kt_data_t *kt = t->t_data;
284 const char *kvmfile;
285 void *cookie;
286 int mode;
287
288 if (!iochg && !rwchg)
289 return (0);
290
291 if (kt->k_xpv_domu) {
292 warn("read-only target");
293 return (-1);
294 }
295
296 if (iochg) {
297 kvmfile = (flags & MDB_TGT_F_ALLOWIO) ? "/dev/allkmem" :
298 "/dev/kmem";
299 } else {
300 kvmfile = kt->k_kvmfile;
301 }
302
303 mode = (flags & MDB_TGT_F_RDWR) ? O_RDWR : O_RDONLY;
304
305 if ((cookie = kt->k_kb_ops->kb_open(kt->k_symfile, kvmfile, NULL, mode,
306 mdb.m_pname)) == NULL) {
307 /* We failed to re-open, so don't change t_flags */
308 warn("failed to re-open target");
309 return (-1);
310 }
311
312 /*
313 * We successfully reopened the target, so update k_kvmfile. Also set
314 * the RDWR and ALLOWIO bits in t_flags to match those in flags.
315 */
316 (void) kt->k_kb_ops->kb_close(kt->k_cookie);
317 kt->k_cookie = cookie;
318
319 if (kvmfile != kt->k_kvmfile) {
320 strfree(kt->k_kvmfile);
321 kt->k_kvmfile = strdup(kvmfile);
322 }
323
324 t->t_flags = (t->t_flags & ~(MDB_TGT_F_RDWR | MDB_TGT_F_ALLOWIO)) |
325 (flags & (MDB_TGT_F_RDWR | MDB_TGT_F_ALLOWIO));
326
327 return (0);
328 }
329
330 /*
331 * Determine which PIDs (if any) have their pages saved in the dump. We
332 * do this by looking for content flags in dump_flags in the header. These
333 * flags, which won't be set in older dumps, tell us whether a single process
334 * has had its pages included in the dump. If a single process has been
335 * included, we need to get the PID for that process from the dump_pids
336 * array in the dump.
337 */
338 static int
kt_find_dump_contents(kt_data_t * kt)339 kt_find_dump_contents(kt_data_t *kt)
340 {
341 dumphdr_t *dh = kt->k_dumphdr;
342 pid_t pid = -1;
343
344 if (dh->dump_flags & DF_ALL)
345 return (KT_DUMPCONTENT_ALL);
346
347 if (dh->dump_flags & DF_CURPROC) {
348 if ((pid = kt->k_dump_find_curproc()) == -1)
349 return (KT_DUMPCONTENT_INVALID);
350 else
351 return (pid);
352 } else {
353 return (KT_DUMPCONTENT_KERNEL);
354 }
355 }
356
357 static int
kt_dump_contains_proc(mdb_tgt_t * t,void * context)358 kt_dump_contains_proc(mdb_tgt_t *t, void *context)
359 {
360 kt_data_t *kt = t->t_data;
361 pid_t (*f_pid)(uintptr_t);
362 pid_t reqpid;
363
364 switch (kt->k_dumpcontent) {
365 case KT_DUMPCONTENT_KERNEL:
366 return (0);
367 case KT_DUMPCONTENT_ALL:
368 return (1);
369 case KT_DUMPCONTENT_INVALID:
370 goto procnotfound;
371 default:
372 f_pid = (pid_t (*)()) dlsym(RTLD_NEXT, "mdb_kproc_pid");
373 if (f_pid == NULL)
374 goto procnotfound;
375
376 reqpid = f_pid((uintptr_t)context);
377 if (reqpid == -1)
378 goto procnotfound;
379
380 return (kt->k_dumpcontent == reqpid);
381 }
382
383 procnotfound:
384 warn("unable to determine whether dump contains proc %p\n", context);
385 return (1);
386 }
387
388 int
kt_setcontext(mdb_tgt_t * t,void * context)389 kt_setcontext(mdb_tgt_t *t, void *context)
390 {
391 if (context != NULL) {
392 const char *argv[2];
393 int argc = 0;
394 mdb_tgt_t *ct;
395 kt_data_t *kt = t->t_data;
396
397 argv[argc++] = (const char *)context;
398 argv[argc] = NULL;
399
400 if (kt->k_dumphdr != NULL &&
401 !kt_dump_contains_proc(t, context)) {
402 warn("dump does not contain pages for proc %p\n",
403 context);
404 return (-1);
405 }
406
407 if ((ct = mdb_tgt_create(mdb_kproc_tgt_create,
408 t->t_flags, argc, argv)) == NULL)
409 return (-1);
410
411 mdb_printf("debugger context set to proc %p\n", context);
412 mdb_tgt_activate(ct);
413 } else
414 mdb_printf("debugger context set to kernel\n");
415
416 return (0);
417 }
418
419 static int
kt_stack(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)420 kt_stack(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
421 {
422 kt_data_t *kt = mdb.m_target->t_data;
423 return (kt->k_dcmd_stack(addr, flags, argc, argv));
424 }
425
426 static int
kt_stackv(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)427 kt_stackv(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
428 {
429 kt_data_t *kt = mdb.m_target->t_data;
430 return (kt->k_dcmd_stackv(addr, flags, argc, argv));
431 }
432
433 static int
kt_stackr(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)434 kt_stackr(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
435 {
436 kt_data_t *kt = mdb.m_target->t_data;
437 return (kt->k_dcmd_stackr(addr, flags, argc, argv));
438 }
439
440 static int
kt_regs(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)441 kt_regs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
442 {
443 kt_data_t *kt = mdb.m_target->t_data;
444
445 if (argc != 0 || (flags & DCMD_ADDRSPEC))
446 return (DCMD_USAGE);
447
448 addr = (uintptr_t)kt->k_regs;
449
450 return (kt->k_dcmd_regs(addr, flags, argc, argv));
451 }
452
453 #ifdef __x86
454 static int
kt_cpustack(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)455 kt_cpustack(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
456 {
457 kt_data_t *kt = mdb.m_target->t_data;
458 return (kt->k_dcmd_cpustack(addr, flags, argc, argv));
459 }
460
461 static int
kt_cpuregs(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)462 kt_cpuregs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
463 {
464 kt_data_t *kt = mdb.m_target->t_data;
465 return (kt->k_dcmd_cpuregs(addr, flags, argc, argv));
466 }
467 #endif /* __x86 */
468
469 /*ARGSUSED*/
470 static int
kt_status_dcmd(uintptr_t addr,uint_t flags,int argc,const mdb_arg_t * argv)471 kt_status_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
472 {
473 kt_data_t *kt = mdb.m_target->t_data;
474 struct utsname uts;
475
476 bzero(&uts, sizeof (uts));
477 (void) strcpy(uts.nodename, "unknown machine");
478 (void) kt_uname(mdb.m_target, &uts);
479
480 if (mdb_prop_postmortem) {
481 mdb_printf("debugging %scrash dump %s (%d-bit) from %s\n",
482 kt->k_xpv_domu ? "domain " : "", kt->k_kvmfile,
483 (int)(sizeof (void *) * NBBY), uts.nodename);
484 } else {
485 mdb_printf("debugging live kernel (%d-bit) on %s\n",
486 (int)(sizeof (void *) * NBBY), uts.nodename);
487 }
488
489 mdb_printf("operating system: %s %s (%s)\n",
490 uts.release, uts.version, uts.machine);
491
492 if (print_buildversion != NULL)
493 print_buildversion();
494
495 if (kt->k_dumphdr) {
496 dumphdr_t *dh = kt->k_dumphdr;
497
498 mdb_printf("image uuid: %s\n", dh->dump_uuid[0] != '\0' ?
499 dh->dump_uuid : "(not set)");
500 mdb_printf("panic message: %s\n", dh->dump_panicstring);
501
502 kt->k_dump_print_content(dh, kt->k_dumpcontent);
503 } else {
504 char uuid[UUID_PRINTABLE_STRING_LENGTH];
505
506 if (mdb_readsym(uuid, sizeof (uuid),
507 "dump_osimage_uuid") == sizeof (uuid) &&
508 uuid[sizeof (uuid) - 1] == '\0') {
509 mdb_printf("image uuid: %s\n", uuid[0] != '\0' ?
510 uuid : "(not set)");
511 }
512 }
513
514 return (DCMD_OK);
515 }
516
517 static const mdb_dcmd_t kt_dcmds[] = {
518 { "$c", "?[cnt]", "print stack backtrace", kt_stack },
519 { "$C", "?[cnt]", "print stack backtrace", kt_stackv },
520 { "$r", NULL, "print general-purpose registers", kt_regs },
521 { "$?", NULL, "print status and registers", kt_regs },
522 { "regs", NULL, "print general-purpose registers", kt_regs },
523 { "stack", "?[cnt]", "print stack backtrace", kt_stack },
524 { "stackregs", "?", "print stack backtrace and registers", kt_stackr },
525 #ifdef __x86
526 { "cpustack", "?[-v] [-c cpuid] [cnt]", "print stack backtrace for a "
527 "specific CPU", kt_cpustack },
528 { "cpuregs", "?[-c cpuid]", "print general-purpose registers for a "
529 "specific CPU", kt_cpuregs },
530 #endif
531 { "status", NULL, "print summary of current target", kt_status_dcmd },
532 { NULL }
533 };
534
535 static uintmax_t
reg_disc_get(const mdb_var_t * v)536 reg_disc_get(const mdb_var_t *v)
537 {
538 mdb_tgt_t *t = MDB_NV_COOKIE(v);
539 kt_data_t *kt = t->t_data;
540 mdb_tgt_reg_t r = 0;
541
542 (void) mdb_tgt_getareg(t, kt->k_tid, mdb_nv_get_name(v), &r);
543 return (r);
544 }
545
546 static kt_module_t *
kt_module_by_name(kt_data_t * kt,const char * name)547 kt_module_by_name(kt_data_t *kt, const char *name)
548 {
549 kt_module_t *km;
550
551 for (km = mdb_list_next(&kt->k_modlist); km; km = mdb_list_next(km)) {
552 if (strcmp(name, km->km_name) == 0)
553 return (km);
554 }
555
556 return (NULL);
557 }
558
559 void
kt_activate(mdb_tgt_t * t)560 kt_activate(mdb_tgt_t *t)
561 {
562 static const mdb_nv_disc_t reg_disc = { .disc_get = reg_disc_get };
563 kt_data_t *kt = t->t_data;
564 void *sym;
565
566 int oflag = 0;
567
568 mdb_prop_postmortem = kt->k_xpv_domu || (kt->k_dumphdr != NULL);
569 mdb_prop_kernel = TRUE;
570 mdb_prop_datamodel = MDB_TGT_MODEL_NATIVE;
571
572 if (kt->k_activated == FALSE) {
573 struct utsname u1, u2;
574 /*
575 * If we're examining a crash dump, root is /, and uname(2)
576 * does not match the utsname in the dump, issue a warning.
577 * Note that we are assuming that the modules and macros in
578 * /usr/lib are compiled against the kernel from uname -rv.
579 */
580 if (mdb_prop_postmortem && strcmp(mdb.m_root, "/") == 0 &&
581 uname(&u1) >= 0 && kt_uname(t, &u2) >= 0 &&
582 (strcmp(u1.release, u2.release) ||
583 strcmp(u1.version, u2.version))) {
584 mdb_warn("warning: dump is from %s %s %s; dcmds and "
585 "macros may not match kernel implementation\n",
586 u2.sysname, u2.release, u2.version);
587 }
588
589 if (mdb_module_load(KT_MODULE, MDB_MOD_GLOBAL) < 0) {
590 warn("failed to load kernel support module -- "
591 "some modules may not load\n");
592 }
593
594 print_buildversion = (void (*)(void))dlsym(RTLD_NEXT,
595 "mdb_print_buildversion");
596
597 if (mdb_prop_postmortem && kt->k_dumphdr != NULL) {
598 sym = dlsym(RTLD_NEXT, "mdb_dump_print_content");
599 if (sym != NULL)
600 kt->k_dump_print_content = (void (*)())sym;
601
602 sym = dlsym(RTLD_NEXT, "mdb_dump_find_curproc");
603 if (sym != NULL)
604 kt->k_dump_find_curproc = (int (*)())sym;
605
606 kt->k_dumpcontent = kt_find_dump_contents(kt);
607 }
608
609 if (t->t_flags & MDB_TGT_F_PRELOAD) {
610 oflag = mdb_iob_getflags(mdb.m_out) & MDB_IOB_PGENABLE;
611
612 mdb_iob_clrflags(mdb.m_out, oflag);
613 mdb_iob_puts(mdb.m_out, "Preloading module symbols: [");
614 mdb_iob_flush(mdb.m_out);
615 }
616
617 if (!(t->t_flags & MDB_TGT_F_NOLOAD)) {
618 kt_load_modules(kt, t);
619
620 /*
621 * Determine where the CTF data for krtld is. If krtld
622 * is rolled into unix, force load the MDB krtld
623 * module.
624 */
625 kt->k_rtld_name = "krtld";
626
627 if (kt_module_by_name(kt, "krtld") == NULL) {
628 (void) mdb_module_load("krtld", MDB_MOD_SILENT);
629 kt->k_rtld_name = "unix";
630 }
631 }
632
633
634 if (t->t_flags & MDB_TGT_F_PRELOAD) {
635 mdb_iob_puts(mdb.m_out, " ]\n");
636 mdb_iob_setflags(mdb.m_out, oflag);
637 }
638
639 kt->k_activated = TRUE;
640 }
641
642 (void) mdb_tgt_register_dcmds(t, &kt_dcmds[0], MDB_MOD_FORCE);
643
644 /* Export some of our registers as named variables */
645 mdb_tgt_register_regvars(t, kt->k_rds, ®_disc, MDB_NV_RDONLY);
646
647 mdb_tgt_elf_export(kt->k_file);
648 }
649
650 void
kt_deactivate(mdb_tgt_t * t)651 kt_deactivate(mdb_tgt_t *t)
652 {
653 kt_data_t *kt = t->t_data;
654
655 const mdb_tgt_regdesc_t *rdp;
656 const mdb_dcmd_t *dcp;
657
658 for (rdp = kt->k_rds; rdp->rd_name != NULL; rdp++) {
659 mdb_var_t *v;
660
661 if (!(rdp->rd_flags & MDB_TGT_R_EXPORT))
662 continue; /* Didn't export register as a variable */
663
664 if ((v = mdb_nv_lookup(&mdb.m_nv, rdp->rd_name)) != NULL) {
665 v->v_flags &= ~MDB_NV_PERSIST;
666 mdb_nv_remove(&mdb.m_nv, v);
667 }
668 }
669
670 for (dcp = &kt_dcmds[0]; dcp->dc_name != NULL; dcp++) {
671 if (mdb_module_remove_dcmd(t->t_module, dcp->dc_name) == -1)
672 warn("failed to remove dcmd %s", dcp->dc_name);
673 }
674
675 mdb_prop_postmortem = FALSE;
676 mdb_prop_kernel = FALSE;
677 mdb_prop_datamodel = MDB_TGT_MODEL_UNKNOWN;
678 }
679
680 /*ARGSUSED*/
681 const char *
kt_name(mdb_tgt_t * t)682 kt_name(mdb_tgt_t *t)
683 {
684 return ("kvm");
685 }
686
687 const char *
kt_platform(mdb_tgt_t * t)688 kt_platform(mdb_tgt_t *t)
689 {
690 kt_data_t *kt = t->t_data;
691 return (kt->k_platform);
692 }
693
694 int
kt_uname(mdb_tgt_t * t,struct utsname * utsp)695 kt_uname(mdb_tgt_t *t, struct utsname *utsp)
696 {
697 return (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, utsp,
698 sizeof (struct utsname), MDB_TGT_OBJ_EXEC, "utsname"));
699 }
700
701 /*ARGSUSED*/
702 int
kt_dmodel(mdb_tgt_t * t)703 kt_dmodel(mdb_tgt_t *t)
704 {
705 return (MDB_TGT_MODEL_NATIVE);
706 }
707
708 ssize_t
kt_aread(mdb_tgt_t * t,mdb_tgt_as_t as,void * buf,size_t nbytes,mdb_tgt_addr_t addr)709 kt_aread(mdb_tgt_t *t, mdb_tgt_as_t as, void *buf,
710 size_t nbytes, mdb_tgt_addr_t addr)
711 {
712 kt_data_t *kt = t->t_data;
713 ssize_t rval;
714
715 if ((rval = kt->k_kb_ops->kb_aread(kt->k_cookie, addr, buf,
716 nbytes, as)) == -1)
717 return (set_errno(EMDB_NOMAP));
718
719 return (rval);
720 }
721
722 ssize_t
kt_awrite(mdb_tgt_t * t,mdb_tgt_as_t as,const void * buf,size_t nbytes,mdb_tgt_addr_t addr)723 kt_awrite(mdb_tgt_t *t, mdb_tgt_as_t as, const void *buf,
724 size_t nbytes, mdb_tgt_addr_t addr)
725 {
726 kt_data_t *kt = t->t_data;
727 ssize_t rval;
728
729 if ((rval = kt->k_kb_ops->kb_awrite(kt->k_cookie, addr, buf,
730 nbytes, as)) == -1)
731 return (set_errno(EMDB_NOMAP));
732
733 return (rval);
734 }
735
736 ssize_t
kt_vread(mdb_tgt_t * t,void * buf,size_t nbytes,uintptr_t addr)737 kt_vread(mdb_tgt_t *t, void *buf, size_t nbytes, uintptr_t addr)
738 {
739 kt_data_t *kt = t->t_data;
740 ssize_t rval;
741
742 if ((rval = kt->k_kb_ops->kb_kread(kt->k_cookie, addr, buf,
743 nbytes)) == -1)
744 return (set_errno(EMDB_NOMAP));
745
746 return (rval);
747 }
748
749 ssize_t
kt_vwrite(mdb_tgt_t * t,const void * buf,size_t nbytes,uintptr_t addr)750 kt_vwrite(mdb_tgt_t *t, const void *buf, size_t nbytes, uintptr_t addr)
751 {
752 kt_data_t *kt = t->t_data;
753 ssize_t rval;
754
755 if ((rval = kt->k_kb_ops->kb_kwrite(kt->k_cookie, addr, buf,
756 nbytes)) == -1)
757 return (set_errno(EMDB_NOMAP));
758
759 return (rval);
760 }
761
762 ssize_t
kt_fread(mdb_tgt_t * t,void * buf,size_t nbytes,uintptr_t addr)763 kt_fread(mdb_tgt_t *t, void *buf, size_t nbytes, uintptr_t addr)
764 {
765 return (kt_vread(t, buf, nbytes, addr));
766 }
767
768 ssize_t
kt_fwrite(mdb_tgt_t * t,const void * buf,size_t nbytes,uintptr_t addr)769 kt_fwrite(mdb_tgt_t *t, const void *buf, size_t nbytes, uintptr_t addr)
770 {
771 return (kt_vwrite(t, buf, nbytes, addr));
772 }
773
774 ssize_t
kt_pread(mdb_tgt_t * t,void * buf,size_t nbytes,physaddr_t addr)775 kt_pread(mdb_tgt_t *t, void *buf, size_t nbytes, physaddr_t addr)
776 {
777 kt_data_t *kt = t->t_data;
778 ssize_t rval;
779
780 if ((rval = kt->k_kb_ops->kb_pread(kt->k_cookie, addr, buf,
781 nbytes)) == -1)
782 return (set_errno(EMDB_NOMAP));
783
784 return (rval);
785 }
786
787 ssize_t
kt_pwrite(mdb_tgt_t * t,const void * buf,size_t nbytes,physaddr_t addr)788 kt_pwrite(mdb_tgt_t *t, const void *buf, size_t nbytes, physaddr_t addr)
789 {
790 kt_data_t *kt = t->t_data;
791 ssize_t rval;
792
793 if ((rval = kt->k_kb_ops->kb_pwrite(kt->k_cookie, addr, buf,
794 nbytes)) == -1)
795 return (set_errno(EMDB_NOMAP));
796
797 return (rval);
798 }
799
800 int
kt_vtop(mdb_tgt_t * t,mdb_tgt_as_t as,uintptr_t va,physaddr_t * pap)801 kt_vtop(mdb_tgt_t *t, mdb_tgt_as_t as, uintptr_t va, physaddr_t *pap)
802 {
803 kt_data_t *kt = t->t_data;
804
805 struct as *asp;
806 physaddr_t pa;
807 mdb_module_t *mod;
808 mdb_var_t *v;
809 int (*fptr)(uintptr_t, struct as *, physaddr_t *);
810
811 switch ((uintptr_t)as) {
812 case (uintptr_t)MDB_TGT_AS_PHYS:
813 case (uintptr_t)MDB_TGT_AS_FILE:
814 case (uintptr_t)MDB_TGT_AS_IO:
815 return (set_errno(EINVAL));
816 case (uintptr_t)MDB_TGT_AS_VIRT:
817 case (uintptr_t)MDB_TGT_AS_VIRT_I:
818 case (uintptr_t)MDB_TGT_AS_VIRT_S:
819 asp = kt->k_as;
820 break;
821 default:
822 asp = (struct as *)as;
823 }
824
825 if ((pa = kt->k_kb_ops->kb_vtop(kt->k_cookie, asp, va)) != -1ULL) {
826 *pap = pa;
827 return (0);
828 }
829
830 if ((v = mdb_nv_lookup(&mdb.m_modules, "unix")) != NULL &&
831 (mod = mdb_nv_get_cookie(v)) != NULL) {
832
833 fptr = (int (*)(uintptr_t, struct as *, physaddr_t *))
834 dlsym(mod->mod_hdl, "platform_vtop");
835
836 if ((fptr != NULL) && ((*fptr)(va, asp, pap) == 0))
837 return (0);
838 }
839
840 return (set_errno(EMDB_NOMAP));
841 }
842
843 int
kt_lookup_by_name(mdb_tgt_t * t,const char * obj,const char * name,GElf_Sym * symp,mdb_syminfo_t * sip)844 kt_lookup_by_name(mdb_tgt_t *t, const char *obj, const char *name,
845 GElf_Sym *symp, mdb_syminfo_t *sip)
846 {
847 kt_data_t *kt = t->t_data;
848 kt_module_t *km, kmod;
849 mdb_var_t *v;
850 int n;
851
852 /*
853 * To simplify the implementation, we create a fake module on the stack
854 * which is "prepended" to k_modlist and whose symtab is kt->k_symtab.
855 */
856 kmod.km_symtab = kt->k_symtab;
857 kmod.km_list.ml_next = mdb_list_next(&kt->k_modlist);
858
859 switch ((uintptr_t)obj) {
860 case (uintptr_t)MDB_TGT_OBJ_EXEC:
861 km = &kmod;
862 n = 1;
863 break;
864
865 case (uintptr_t)MDB_TGT_OBJ_EVERY:
866 km = &kmod;
867 n = mdb_nv_size(&kt->k_modules) + 1;
868 break;
869
870 case (uintptr_t)MDB_TGT_OBJ_RTLD:
871 obj = kt->k_rtld_name;
872 /*FALLTHRU*/
873
874 default:
875 if ((v = mdb_nv_lookup(&kt->k_modules, obj)) == NULL)
876 return (set_errno(EMDB_NOOBJ));
877
878 km = mdb_nv_get_cookie(v);
879 n = 1;
880
881 if (km->km_symtab == NULL)
882 kt_load_module(kt, t, km);
883 }
884
885 for (; n > 0; n--, km = mdb_list_next(km)) {
886 if (mdb_gelf_symtab_lookup_by_name(km->km_symtab, name,
887 symp, &sip->sym_id) == 0) {
888 sip->sym_table = MDB_TGT_SYMTAB;
889 return (0);
890 }
891 }
892
893 return (set_errno(EMDB_NOSYM));
894 }
895
896 int
kt_lookup_by_addr(mdb_tgt_t * t,uintptr_t addr,uint_t flags,char * buf,size_t nbytes,GElf_Sym * symp,mdb_syminfo_t * sip)897 kt_lookup_by_addr(mdb_tgt_t *t, uintptr_t addr, uint_t flags,
898 char *buf, size_t nbytes, GElf_Sym *symp, mdb_syminfo_t *sip)
899 {
900 kt_data_t *kt = t->t_data;
901 kt_module_t kmods[3], *kmods_begin = &kmods[0], *kmods_end;
902 const char *name;
903
904 kt_module_t *km = &kmods[0]; /* Point km at first fake module */
905 kt_module_t *sym_km = NULL; /* Module associated with best sym */
906 GElf_Sym sym; /* Best symbol found so far if !exact */
907 uint_t symid; /* ID of best symbol found so far */
908
909 /*
910 * To simplify the implementation, we create fake modules on the stack
911 * that are "prepended" to k_modlist and whose symtab is set to
912 * each of three special symbol tables, in order of precedence.
913 */
914 km->km_symtab = mdb.m_prsym;
915
916 if (kt->k_symtab != NULL) {
917 km->km_list.ml_next = (mdb_list_t *)(km + 1);
918 km = mdb_list_next(km);
919 km->km_symtab = kt->k_symtab;
920 }
921
922 if (kt->k_dynsym != NULL) {
923 km->km_list.ml_next = (mdb_list_t *)(km + 1);
924 km = mdb_list_next(km);
925 km->km_symtab = kt->k_dynsym;
926 }
927
928 km->km_list.ml_next = mdb_list_next(&kt->k_modlist);
929 kmods_end = km;
930
931 /*
932 * Now iterate over the list of fake and real modules. If the module
933 * has no symbol table and the address is in the text section,
934 * instantiate the module's symbol table. In exact mode, we can
935 * jump to 'found' immediately if we match. Otherwise we continue
936 * looking and improve our choice if we find a closer symbol.
937 */
938 for (km = &kmods[0]; km != NULL; km = mdb_list_next(km)) {
939 if (km->km_symtab == NULL && addr >= km->km_text_va &&
940 addr < km->km_text_va + km->km_text_size)
941 kt_load_module(kt, t, km);
942
943 if (mdb_gelf_symtab_lookup_by_addr(km->km_symtab, addr,
944 flags, buf, nbytes, symp, &sip->sym_id) != 0 ||
945 symp->st_value == 0)
946 continue;
947
948 if (flags & MDB_TGT_SYM_EXACT) {
949 sym_km = km;
950 goto found;
951 }
952
953 if (sym_km == NULL || mdb_gelf_sym_closer(symp, &sym, addr)) {
954 sym_km = km;
955 sym = *symp;
956 symid = sip->sym_id;
957 }
958 }
959
960 if (sym_km == NULL)
961 return (set_errno(EMDB_NOSYMADDR));
962
963 *symp = sym; /* Copy our best symbol into the caller's symbol */
964 sip->sym_id = symid;
965 found:
966 /*
967 * Once we've found something, copy the final name into the caller's
968 * buffer and prefix it with the load object name if appropriate.
969 */
970 if (sym_km != NULL) {
971 name = mdb_gelf_sym_name(sym_km->km_symtab, symp);
972
973 if (sym_km < kmods_begin || sym_km > kmods_end) {
974 (void) mdb_snprintf(buf, nbytes, "%s`%s",
975 sym_km->km_name, name);
976 } else if (nbytes > 0) {
977 (void) strncpy(buf, name, nbytes);
978 buf[nbytes - 1] = '\0';
979 }
980
981 if (sym_km->km_symtab == mdb.m_prsym)
982 sip->sym_table = MDB_TGT_PRVSYM;
983 else
984 sip->sym_table = MDB_TGT_SYMTAB;
985 } else {
986 sip->sym_table = MDB_TGT_SYMTAB;
987 }
988
989 return (0);
990 }
991
992 static int
kt_symtab_func(void * data,const GElf_Sym * sym,const char * name,uint_t id)993 kt_symtab_func(void *data, const GElf_Sym *sym, const char *name, uint_t id)
994 {
995 kt_symarg_t *argp = data;
996
997 if (mdb_tgt_sym_match(sym, argp->sym_type)) {
998 argp->sym_info.sym_id = id;
999
1000 return (argp->sym_cb(argp->sym_data, sym, name,
1001 &argp->sym_info, argp->sym_obj));
1002 }
1003
1004 return (0);
1005 }
1006
1007 static void
kt_symtab_iter(mdb_gelf_symtab_t * gst,uint_t type,const char * obj,mdb_tgt_sym_f * cb,void * p)1008 kt_symtab_iter(mdb_gelf_symtab_t *gst, uint_t type, const char *obj,
1009 mdb_tgt_sym_f *cb, void *p)
1010 {
1011 kt_symarg_t arg;
1012
1013 arg.sym_cb = cb;
1014 arg.sym_data = p;
1015 arg.sym_type = type;
1016 arg.sym_info.sym_table = gst->gst_tabid;
1017 arg.sym_obj = obj;
1018
1019 mdb_gelf_symtab_iter(gst, kt_symtab_func, &arg);
1020 }
1021
1022 int
kt_symbol_iter(mdb_tgt_t * t,const char * obj,uint_t which,uint_t type,mdb_tgt_sym_f * cb,void * data)1023 kt_symbol_iter(mdb_tgt_t *t, const char *obj, uint_t which, uint_t type,
1024 mdb_tgt_sym_f *cb, void *data)
1025 {
1026 kt_data_t *kt = t->t_data;
1027 kt_module_t *km;
1028
1029 mdb_gelf_symtab_t *symtab = NULL;
1030 mdb_var_t *v;
1031
1032 switch ((uintptr_t)obj) {
1033 case (uintptr_t)MDB_TGT_OBJ_EXEC:
1034 if (which == MDB_TGT_SYMTAB)
1035 symtab = kt->k_symtab;
1036 else
1037 symtab = kt->k_dynsym;
1038 break;
1039
1040 case (uintptr_t)MDB_TGT_OBJ_EVERY:
1041 if (which == MDB_TGT_DYNSYM) {
1042 symtab = kt->k_dynsym;
1043 obj = MDB_TGT_OBJ_EXEC;
1044 break;
1045 }
1046
1047 mdb_nv_rewind(&kt->k_modules);
1048 while ((v = mdb_nv_advance(&kt->k_modules)) != NULL) {
1049 km = mdb_nv_get_cookie(v);
1050
1051 if (km->km_symtab == NULL)
1052 kt_load_module(kt, t, km);
1053
1054 if (km->km_symtab != NULL)
1055 kt_symtab_iter(km->km_symtab, type,
1056 km->km_name, cb, data);
1057 }
1058 break;
1059
1060 case (uintptr_t)MDB_TGT_OBJ_RTLD:
1061 obj = kt->k_rtld_name;
1062 /*FALLTHRU*/
1063
1064 default:
1065 v = mdb_nv_lookup(&kt->k_modules, obj);
1066
1067 if (v == NULL)
1068 return (set_errno(EMDB_NOOBJ));
1069
1070 km = mdb_nv_get_cookie(v);
1071
1072 if (km->km_symtab == NULL)
1073 kt_load_module(kt, t, km);
1074
1075 symtab = km->km_symtab;
1076 }
1077
1078 if (symtab)
1079 kt_symtab_iter(symtab, type, obj, cb, data);
1080
1081 return (0);
1082 }
1083
1084 static int
kt_mapping_walk(uintptr_t addr,const void * data,kt_maparg_t * marg)1085 kt_mapping_walk(uintptr_t addr, const void *data, kt_maparg_t *marg)
1086 {
1087 /*
1088 * This is a bit sketchy but avoids problematic compilation of this
1089 * target against the current VM implementation. Now that we have
1090 * vmem, we can make this less broken and more informative by changing
1091 * this code to invoke the vmem walker in the near future.
1092 */
1093 const struct kt_seg {
1094 caddr_t s_base;
1095 size_t s_size;
1096 } *segp = (const struct kt_seg *)data;
1097
1098 mdb_map_t map;
1099 GElf_Sym sym;
1100 mdb_syminfo_t info;
1101
1102 map.map_base = (uintptr_t)segp->s_base;
1103 map.map_size = segp->s_size;
1104 map.map_flags = MDB_TGT_MAP_R | MDB_TGT_MAP_W | MDB_TGT_MAP_X;
1105
1106 if (kt_lookup_by_addr(marg->map_target, addr, MDB_TGT_SYM_EXACT,
1107 map.map_name, MDB_TGT_MAPSZ, &sym, &info) == -1) {
1108
1109 (void) mdb_iob_snprintf(map.map_name, MDB_TGT_MAPSZ,
1110 "%lr", addr);
1111 }
1112
1113 return (marg->map_cb(marg->map_data, &map, map.map_name));
1114 }
1115
1116 int
kt_mapping_iter(mdb_tgt_t * t,mdb_tgt_map_f * func,void * private)1117 kt_mapping_iter(mdb_tgt_t *t, mdb_tgt_map_f *func, void *private)
1118 {
1119 kt_data_t *kt = t->t_data;
1120 kt_maparg_t m;
1121
1122 m.map_target = t;
1123 m.map_cb = func;
1124 m.map_data = private;
1125
1126 return (mdb_pwalk("seg", (mdb_walk_cb_t)kt_mapping_walk, &m,
1127 (uintptr_t)kt->k_as));
1128 }
1129
1130 static const mdb_map_t *
kt_module_to_map(kt_module_t * km,mdb_map_t * map)1131 kt_module_to_map(kt_module_t *km, mdb_map_t *map)
1132 {
1133 (void) strncpy(map->map_name, km->km_name, MDB_TGT_MAPSZ);
1134 map->map_name[MDB_TGT_MAPSZ - 1] = '\0';
1135 map->map_base = km->km_text_va;
1136 map->map_size = km->km_text_size;
1137 map->map_flags = MDB_TGT_MAP_R | MDB_TGT_MAP_W | MDB_TGT_MAP_X;
1138
1139 return (map);
1140 }
1141
1142 int
kt_object_iter(mdb_tgt_t * t,mdb_tgt_map_f * func,void * private)1143 kt_object_iter(mdb_tgt_t *t, mdb_tgt_map_f *func, void *private)
1144 {
1145 kt_data_t *kt = t->t_data;
1146 kt_module_t *km;
1147 mdb_map_t m;
1148
1149 for (km = mdb_list_next(&kt->k_modlist); km; km = mdb_list_next(km)) {
1150 if (func(private, kt_module_to_map(km, &m), km->km_name) == -1)
1151 break;
1152 }
1153
1154 return (0);
1155 }
1156
1157 const mdb_map_t *
kt_addr_to_map(mdb_tgt_t * t,uintptr_t addr)1158 kt_addr_to_map(mdb_tgt_t *t, uintptr_t addr)
1159 {
1160 kt_data_t *kt = t->t_data;
1161 kt_module_t *km;
1162
1163 for (km = mdb_list_next(&kt->k_modlist); km; km = mdb_list_next(km)) {
1164 if (addr - km->km_text_va < km->km_text_size ||
1165 addr - km->km_data_va < km->km_data_size ||
1166 addr - km->km_bss_va < km->km_bss_size)
1167 return (kt_module_to_map(km, &kt->k_map));
1168 }
1169
1170 (void) set_errno(EMDB_NOMAP);
1171 return (NULL);
1172 }
1173
1174 const mdb_map_t *
kt_name_to_map(mdb_tgt_t * t,const char * name)1175 kt_name_to_map(mdb_tgt_t *t, const char *name)
1176 {
1177 kt_data_t *kt = t->t_data;
1178 kt_module_t *km;
1179 mdb_map_t m;
1180
1181 /*
1182 * If name is MDB_TGT_OBJ_EXEC, return the first module on the list,
1183 * which will be unix since we keep k_modlist in load order.
1184 */
1185 if (name == MDB_TGT_OBJ_EXEC)
1186 return (kt_module_to_map(mdb_list_next(&kt->k_modlist), &m));
1187
1188 if (name == MDB_TGT_OBJ_RTLD)
1189 name = kt->k_rtld_name;
1190
1191 if ((km = kt_module_by_name(kt, name)) != NULL)
1192 return (kt_module_to_map(km, &m));
1193
1194 (void) set_errno(EMDB_NOOBJ);
1195 return (NULL);
1196 }
1197
1198 static ctf_file_t *
kt_load_ctfdata(mdb_tgt_t * t,kt_module_t * km)1199 kt_load_ctfdata(mdb_tgt_t *t, kt_module_t *km)
1200 {
1201 kt_data_t *kt = t->t_data;
1202 int err;
1203
1204 if (km->km_ctfp != NULL)
1205 return (km->km_ctfp);
1206
1207 if (km->km_ctf_va == 0) {
1208 (void) set_errno(EMDB_NOCTF);
1209 return (NULL);
1210 }
1211
1212 if (km->km_symtab == NULL)
1213 kt_load_module(t->t_data, t, km);
1214
1215 if ((km->km_ctf_buf = mdb_alloc(km->km_ctf_size, UM_NOSLEEP)) == NULL) {
1216 warn("failed to allocate memory to load %s debugging "
1217 "information", km->km_name);
1218 return (NULL);
1219 }
1220
1221 if (mdb_tgt_vread(t, km->km_ctf_buf, km->km_ctf_size,
1222 km->km_ctf_va) != km->km_ctf_size) {
1223 warn("failed to read %lu bytes of debug data for %s at %p",
1224 (ulong_t)km->km_ctf_size, km->km_name,
1225 (void *)km->km_ctf_va);
1226 mdb_free(km->km_ctf_buf, km->km_ctf_size);
1227 km->km_ctf_buf = NULL;
1228 return (NULL);
1229 }
1230
1231 if ((km->km_ctfp = mdb_ctf_bufopen((const void *)km->km_ctf_buf,
1232 km->km_ctf_size, km->km_symbuf, &km->km_symtab_hdr,
1233 km->km_strtab, &km->km_strtab_hdr, &err)) == NULL) {
1234 mdb_free(km->km_ctf_buf, km->km_ctf_size);
1235 km->km_ctf_buf = NULL;
1236 (void) set_errno(ctf_to_errno(err));
1237 return (NULL);
1238 }
1239
1240 mdb_dprintf(MDB_DBG_KMOD, "loaded %lu bytes of CTF data for %s\n",
1241 (ulong_t)km->km_ctf_size, km->km_name);
1242
1243 if (ctf_parent_name(km->km_ctfp) != NULL) {
1244 mdb_var_t *v;
1245
1246 if ((v = mdb_nv_lookup(&kt->k_modules,
1247 ctf_parent_name(km->km_ctfp))) == NULL) {
1248 warn("failed to load CTF data for %s - parent %s not "
1249 "loaded\n", km->km_name,
1250 ctf_parent_name(km->km_ctfp));
1251 }
1252
1253 if (v != NULL) {
1254 kt_module_t *pm = mdb_nv_get_cookie(v);
1255
1256 if (pm->km_ctfp == NULL)
1257 (void) kt_load_ctfdata(t, pm);
1258
1259 if (pm->km_ctfp != NULL && ctf_import(km->km_ctfp,
1260 pm->km_ctfp) == CTF_ERR) {
1261 warn("failed to import parent types into "
1262 "%s: %s\n", km->km_name,
1263 ctf_errmsg(ctf_errno(km->km_ctfp)));
1264 }
1265 }
1266 }
1267
1268 return (km->km_ctfp);
1269 }
1270
1271 ctf_file_t *
kt_addr_to_ctf(mdb_tgt_t * t,uintptr_t addr)1272 kt_addr_to_ctf(mdb_tgt_t *t, uintptr_t addr)
1273 {
1274 kt_data_t *kt = t->t_data;
1275 kt_module_t *km;
1276
1277 for (km = mdb_list_next(&kt->k_modlist); km; km = mdb_list_next(km)) {
1278 if (addr - km->km_text_va < km->km_text_size ||
1279 addr - km->km_data_va < km->km_data_size ||
1280 addr - km->km_bss_va < km->km_bss_size)
1281 return (kt_load_ctfdata(t, km));
1282 }
1283
1284 (void) set_errno(EMDB_NOMAP);
1285 return (NULL);
1286 }
1287
1288 ctf_file_t *
kt_name_to_ctf(mdb_tgt_t * t,const char * name)1289 kt_name_to_ctf(mdb_tgt_t *t, const char *name)
1290 {
1291 kt_data_t *kt = t->t_data;
1292 kt_module_t *km;
1293
1294 if (name == MDB_TGT_OBJ_EXEC)
1295 name = KT_CTFPARENT;
1296 else if (name == MDB_TGT_OBJ_RTLD)
1297 name = kt->k_rtld_name;
1298
1299 if ((km = kt_module_by_name(kt, name)) != NULL)
1300 return (kt_load_ctfdata(t, km));
1301
1302 (void) set_errno(EMDB_NOOBJ);
1303 return (NULL);
1304 }
1305
1306 /*ARGSUSED*/
1307 int
kt_status(mdb_tgt_t * t,mdb_tgt_status_t * tsp)1308 kt_status(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1309 {
1310 kt_data_t *kt = t->t_data;
1311 bzero(tsp, sizeof (mdb_tgt_status_t));
1312 tsp->st_state = (kt->k_xpv_domu || (kt->k_dumphdr != NULL)) ?
1313 MDB_TGT_DEAD : MDB_TGT_RUNNING;
1314 return (0);
1315 }
1316
1317 static ssize_t
kt_xd_dumphdr(mdb_tgt_t * t,void * buf,size_t nbytes)1318 kt_xd_dumphdr(mdb_tgt_t *t, void *buf, size_t nbytes)
1319 {
1320 kt_data_t *kt = t->t_data;
1321
1322 if (buf == NULL && nbytes == 0)
1323 return (sizeof (dumphdr_t));
1324
1325 if (kt->k_dumphdr == NULL)
1326 return (set_errno(ENODATA));
1327
1328 nbytes = MIN(nbytes, sizeof (dumphdr_t));
1329 bcopy(kt->k_dumphdr, buf, nbytes);
1330
1331 return (nbytes);
1332 }
1333
1334 void
kt_destroy(mdb_tgt_t * t)1335 kt_destroy(mdb_tgt_t *t)
1336 {
1337 kt_data_t *kt = t->t_data;
1338 kt_module_t *km, *nkm;
1339
1340 (void) mdb_module_unload(KT_MODULE, 0);
1341
1342 if (kt->k_regs != NULL)
1343 mdb_free(kt->k_regs, kt->k_regsize);
1344
1345 if (kt->k_symtab != NULL)
1346 mdb_gelf_symtab_destroy(kt->k_symtab);
1347
1348 if (kt->k_dynsym != NULL)
1349 mdb_gelf_symtab_destroy(kt->k_dynsym);
1350
1351 if (kt->k_dumphdr != NULL)
1352 mdb_free(kt->k_dumphdr, sizeof (dumphdr_t));
1353
1354 mdb_gelf_destroy(kt->k_file);
1355
1356 (void) kt->k_kb_ops->kb_close(kt->k_cookie);
1357
1358 for (km = mdb_list_next(&kt->k_modlist); km; km = nkm) {
1359 if (km->km_symtab)
1360 mdb_gelf_symtab_destroy(km->km_symtab);
1361
1362 if (km->km_data)
1363 mdb_free(km->km_data, km->km_datasz);
1364
1365 if (km->km_ctfp)
1366 ctf_close(km->km_ctfp);
1367
1368 if (km->km_ctf_buf != NULL)
1369 mdb_free(km->km_ctf_buf, km->km_ctf_size);
1370
1371 nkm = mdb_list_next(km);
1372 strfree(km->km_name);
1373 mdb_free(km, sizeof (kt_module_t));
1374 }
1375
1376 mdb_nv_destroy(&kt->k_modules);
1377
1378 strfree(kt->k_kvmfile);
1379 if (kt->k_symfile != NULL)
1380 strfree(kt->k_symfile);
1381
1382 mdb_free(kt, sizeof (kt_data_t));
1383 }
1384
1385 static int
kt_data_stub(void)1386 kt_data_stub(void)
1387 {
1388 return (-1);
1389 }
1390
1391 int
mdb_kvm_tgt_create(mdb_tgt_t * t,int argc,const char * argv[])1392 mdb_kvm_tgt_create(mdb_tgt_t *t, int argc, const char *argv[])
1393 {
1394 kt_data_t *kt = mdb_zalloc(sizeof (kt_data_t), UM_SLEEP);
1395 mdb_kb_ops_t *kvm_kb_ops = libkvm_kb_ops();
1396 int oflag = (t->t_flags & MDB_TGT_F_RDWR) ? O_RDWR : O_RDONLY;
1397 struct utsname uts;
1398 GElf_Sym sym;
1399 pgcnt_t pmem;
1400
1401
1402 if (argc == 2) {
1403 kt->k_symfile = strdup(argv[0]);
1404 kt->k_kvmfile = strdup(argv[1]);
1405
1406 kt->k_cookie = kvm_kb_ops->kb_open(kt->k_symfile,
1407 kt->k_kvmfile, NULL, oflag, (char *)mdb.m_pname);
1408
1409 if (kt->k_cookie == NULL)
1410 goto err;
1411
1412 kt->k_xpv_domu = 0;
1413 kt->k_kb_ops = kvm_kb_ops;
1414 } else {
1415 #ifndef __x86
1416 return (set_errno(EINVAL));
1417 #else
1418 mdb_kb_ops_t *(*getops)(void);
1419
1420 kt->k_symfile = NULL;
1421 kt->k_kvmfile = strdup(argv[0]);
1422
1423 getops = (mdb_kb_ops_t *(*)())dlsym(RTLD_NEXT, "mdb_kb_ops");
1424
1425 /*
1426 * Load mdb_kb if it's not already loaded during
1427 * identification.
1428 */
1429 if (getops == NULL) {
1430 (void) mdb_module_load("mdb_kb",
1431 MDB_MOD_GLOBAL | MDB_MOD_SILENT);
1432 getops = (mdb_kb_ops_t *(*)())
1433 dlsym(RTLD_NEXT, "mdb_kb_ops");
1434 }
1435
1436 if (getops == NULL || (kt->k_kb_ops = getops()) == NULL) {
1437 warn("failed to load KVM backend ops\n");
1438 goto err;
1439 }
1440
1441 kt->k_cookie = kt->k_kb_ops->kb_open(NULL, kt->k_kvmfile, NULL,
1442 oflag, (char *)mdb.m_pname);
1443
1444 if (kt->k_cookie == NULL)
1445 goto err;
1446
1447 kt->k_xpv_domu = 1;
1448 #endif
1449 }
1450
1451 if ((kt->k_fio = kt->k_kb_ops->kb_sym_io(kt->k_cookie,
1452 kt->k_symfile)) == NULL)
1453 goto err;
1454
1455 if ((kt->k_file = mdb_gelf_create(kt->k_fio,
1456 ET_EXEC, GF_FILE)) == NULL) {
1457 mdb_io_destroy(kt->k_fio);
1458 goto err;
1459 }
1460
1461 kt->k_symtab =
1462 mdb_gelf_symtab_create_file(kt->k_file, SHT_SYMTAB, MDB_TGT_SYMTAB);
1463
1464 kt->k_dynsym =
1465 mdb_gelf_symtab_create_file(kt->k_file, SHT_DYNSYM, MDB_TGT_DYNSYM);
1466
1467 if (mdb_gelf_symtab_lookup_by_name(kt->k_symtab, "kas",
1468 &sym, NULL) == -1) {
1469 warn("'kas' symbol is missing from kernel\n");
1470 goto err;
1471 }
1472
1473 kt->k_as = (struct as *)(uintptr_t)sym.st_value;
1474
1475 if (mdb_gelf_symtab_lookup_by_name(kt->k_symtab, "platform",
1476 &sym, NULL) == -1) {
1477 warn("'platform' symbol is missing from kernel\n");
1478 goto err;
1479 }
1480
1481 if (kt->k_kb_ops->kb_kread(kt->k_cookie, sym.st_value,
1482 kt->k_platform, MAXNAMELEN) <= 0) {
1483 warn("failed to read 'platform' string from kernel");
1484 goto err;
1485 }
1486
1487 if (mdb_gelf_symtab_lookup_by_name(kt->k_symtab, "utsname",
1488 &sym, NULL) == -1) {
1489 warn("'utsname' symbol is missing from kernel\n");
1490 goto err;
1491 }
1492
1493 if (kt->k_kb_ops->kb_kread(kt->k_cookie, sym.st_value, &uts,
1494 sizeof (uts)) <= 0) {
1495 warn("failed to read 'utsname' struct from kernel");
1496 goto err;
1497 }
1498
1499 kt->k_dump_print_content = (void (*)())(uintptr_t)kt_data_stub;
1500 kt->k_dump_find_curproc = kt_data_stub;
1501
1502 /*
1503 * We set k_ctfvalid based on the presence of the CTF vmem arena
1504 * symbol. The CTF members were added to the end of struct module at
1505 * the same time, so this allows us to know whether we can use them.
1506 */
1507 if (mdb_gelf_symtab_lookup_by_name(kt->k_symtab, "ctf_arena", &sym,
1508 NULL) == 0 && !(mdb.m_flags & MDB_FL_NOCTF))
1509 kt->k_ctfvalid = 1;
1510
1511 (void) mdb_nv_create(&kt->k_modules, UM_SLEEP);
1512 t->t_pshandle = kt->k_cookie;
1513 t->t_data = kt;
1514
1515 #if defined(__sparc)
1516 #if defined(__sparcv9)
1517 kt_sparcv9_init(t);
1518 #else
1519 kt_sparcv7_init(t);
1520 #endif
1521 #elif defined(__amd64)
1522 kt_amd64_init(t);
1523 #elif defined(__i386)
1524 kt_ia32_init(t);
1525 #else
1526 #error "unknown ISA"
1527 #endif
1528
1529 /*
1530 * We read our representative thread ID (address) from the kernel's
1531 * global panic_thread. It will remain 0 if this is a live kernel.
1532 */
1533 (void) mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, &kt->k_tid, sizeof (void *),
1534 MDB_TGT_OBJ_EXEC, "panic_thread");
1535
1536 if ((mdb.m_flags & MDB_FL_ADB) && mdb_tgt_readsym(t, MDB_TGT_AS_VIRT,
1537 &pmem, sizeof (pmem), MDB_TGT_OBJ_EXEC, "physmem") == sizeof (pmem))
1538 mdb_printf("physmem %lx\n", (ulong_t)pmem);
1539
1540 /*
1541 * If this is not a live kernel or a hypervisor dump, read the dump
1542 * header. We don't have to sanity-check the header, as the open would
1543 * not have succeeded otherwise.
1544 */
1545 if (!kt->k_xpv_domu && strcmp(kt->k_symfile, "/dev/ksyms") != 0) {
1546 mdb_io_t *vmcore;
1547
1548 kt->k_dumphdr = mdb_alloc(sizeof (dumphdr_t), UM_SLEEP);
1549
1550 if ((vmcore = mdb_fdio_create_path(NULL, kt->k_kvmfile,
1551 O_RDONLY, 0)) == NULL) {
1552 mdb_warn("failed to open %s", kt->k_kvmfile);
1553 goto err;
1554 }
1555
1556 if (IOP_READ(vmcore, kt->k_dumphdr, sizeof (dumphdr_t)) !=
1557 sizeof (dumphdr_t)) {
1558 mdb_warn("failed to read dump header");
1559 mdb_io_destroy(vmcore);
1560 goto err;
1561 }
1562
1563 mdb_io_destroy(vmcore);
1564
1565 (void) mdb_tgt_xdata_insert(t, "dumphdr",
1566 "dump header structure", kt_xd_dumphdr);
1567 }
1568
1569 return (0);
1570
1571 err:
1572 if (kt->k_dumphdr != NULL)
1573 mdb_free(kt->k_dumphdr, sizeof (dumphdr_t));
1574
1575 if (kt->k_symtab != NULL)
1576 mdb_gelf_symtab_destroy(kt->k_symtab);
1577
1578 if (kt->k_dynsym != NULL)
1579 mdb_gelf_symtab_destroy(kt->k_dynsym);
1580
1581 if (kt->k_file != NULL)
1582 mdb_gelf_destroy(kt->k_file);
1583
1584 if (kt->k_cookie != NULL)
1585 (void) kt->k_kb_ops->kb_close(kt->k_cookie);
1586
1587 mdb_free(kt, sizeof (kt_data_t));
1588 return (-1);
1589 }
1590
1591 int
mdb_kvm_is_dump(mdb_io_t * io)1592 mdb_kvm_is_dump(mdb_io_t *io)
1593 {
1594 dumphdr_t h;
1595
1596 (void) IOP_SEEK(io, (off64_t)0L, SEEK_SET);
1597
1598 return (IOP_READ(io, &h, sizeof (dumphdr_t)) == sizeof (dumphdr_t) &&
1599 h.dump_magic == DUMP_MAGIC);
1600 }
1601
1602 int
mdb_kvm_is_compressed_dump(mdb_io_t * io)1603 mdb_kvm_is_compressed_dump(mdb_io_t *io)
1604 {
1605 dumphdr_t h;
1606
1607 (void) IOP_SEEK(io, (off64_t)0L, SEEK_SET);
1608
1609 return (IOP_READ(io, &h, sizeof (dumphdr_t)) == sizeof (dumphdr_t) &&
1610 h.dump_magic == DUMP_MAGIC &&
1611 (h.dump_flags & DF_COMPRESSED) != 0);
1612 }
1613