xref: /titanic_52/usr/src/cmd/mdb/common/mdb/mdb_kvm.c (revision 1320caf7cc74a3c5be65ef23516dee229adc288a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 /*
26  * Copyright (c) 2013, Joyent, Inc.  All rights reserved.
27  */
28 
29 /*
30  * Libkvm Kernel Target
31  *
32  * The libkvm kernel target provides access to both crash dumps and live
33  * kernels through /dev/ksyms and /dev/kmem, using the facilities provided by
34  * the libkvm.so library.  The target-specific data structures are shared
35  * between this file (common code) and the ISA-dependent parts of the target,
36  * and so they are defined in the mdb_kvm.h header.  The target processes an
37  * "executable" (/dev/ksyms or the unix.X file) which contains a primary
38  * .symtab and .dynsym, and then also iterates over the krtld module chain in
39  * the kernel in order to obtain a list of loaded modules and per-module symbol
40  * tables.  To improve startup performance, the per-module symbol tables are
41  * instantiated on-the-fly whenever an address lookup falls within the text
42  * section of a given module.  The target also relies on services from the
43  * mdb_ks (kernel support) module, which contains pieces of the implementation
44  * that must be compiled against the kernel implementation.
45  */
46 
47 #include <sys/modctl.h>
48 #include <sys/kobj.h>
49 #include <sys/kobj_impl.h>
50 #include <sys/utsname.h>
51 #include <sys/panic.h>
52 #include <sys/dumphdr.h>
53 #include <sys/dumpadm.h>
54 
55 #include <dlfcn.h>
56 #include <libctf.h>
57 #include <string.h>
58 #include <fcntl.h>
59 #include <errno.h>
60 
61 #include <mdb/mdb_target_impl.h>
62 #include <mdb/mdb_err.h>
63 #include <mdb/mdb_debug.h>
64 #include <mdb/mdb_string.h>
65 #include <mdb/mdb_modapi.h>
66 #include <mdb/mdb_io_impl.h>
67 #include <mdb/mdb_ctf.h>
68 #include <mdb/mdb_kvm.h>
69 #include <mdb/mdb_module.h>
70 #include <mdb/mdb_kb.h>
71 #include <mdb/mdb.h>
72 
73 #define	KT_RELOC_BUF(buf, obase, nbase) \
74 	((uintptr_t)(buf) - (uintptr_t)(obase) + (uintptr_t)(nbase))
75 
76 #define	KT_BAD_BUF(buf, base, size) \
77 	((uintptr_t)(buf) < (uintptr_t)(base) || \
78 	((uintptr_t)(buf) >= (uintptr_t)(base) + (uintptr_t)(size)))
79 
80 typedef struct kt_symarg {
81 	mdb_tgt_sym_f *sym_cb;		/* Caller's callback function */
82 	void *sym_data;			/* Callback function argument */
83 	uint_t sym_type;		/* Symbol type/binding filter */
84 	mdb_syminfo_t sym_info;		/* Symbol id and table id */
85 	const char *sym_obj;		/* Containing object */
86 } kt_symarg_t;
87 
88 typedef struct kt_maparg {
89 	mdb_tgt_t *map_target;		/* Target used for mapping iter */
90 	mdb_tgt_map_f *map_cb;		/* Caller's callback function */
91 	void *map_data;			/* Callback function argument */
92 } kt_maparg_t;
93 
94 static const char KT_MODULE[] = "mdb_ks";
95 static const char KT_CTFPARENT[] = "genunix";
96 
97 static void
98 kt_load_module(kt_data_t *kt, mdb_tgt_t *t, kt_module_t *km)
99 {
100 	km->km_data = mdb_alloc(km->km_datasz, UM_SLEEP);
101 
102 	(void) mdb_tgt_vread(t, km->km_data, km->km_datasz, km->km_symspace_va);
103 
104 	km->km_symbuf = (void *)
105 	    KT_RELOC_BUF(km->km_symtab_va, km->km_symspace_va, km->km_data);
106 
107 	km->km_strtab = (char *)
108 	    KT_RELOC_BUF(km->km_strtab_va, km->km_symspace_va, km->km_data);
109 
110 	km->km_symtab = mdb_gelf_symtab_create_raw(&kt->k_file->gf_ehdr,
111 	    &km->km_symtab_hdr, km->km_symbuf,
112 	    &km->km_strtab_hdr, km->km_strtab, MDB_TGT_SYMTAB);
113 }
114 
115 static void
116 kt_load_modules(kt_data_t *kt, mdb_tgt_t *t)
117 {
118 	char name[MAXNAMELEN];
119 	uintptr_t addr, head;
120 
121 	struct module kmod;
122 	struct modctl ctl;
123 	Shdr symhdr, strhdr;
124 	GElf_Sym sym;
125 
126 	kt_module_t *km;
127 
128 	if (mdb_tgt_lookup_by_name(t, MDB_TGT_OBJ_EXEC,
129 	    "modules", &sym, NULL) == -1) {
130 		warn("failed to get 'modules' symbol");
131 		return;
132 	}
133 
134 	if (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, &ctl, sizeof (ctl),
135 	    MDB_TGT_OBJ_EXEC, "modules") != sizeof (ctl)) {
136 		warn("failed to read 'modules' struct");
137 		return;
138 	}
139 
140 	addr = head = (uintptr_t)sym.st_value;
141 
142 	do {
143 		if (addr == NULL)
144 			break; /* Avoid spurious NULL pointers in list */
145 
146 		if (mdb_tgt_vread(t, &ctl, sizeof (ctl), addr) == -1) {
147 			warn("failed to read modctl at %p", (void *)addr);
148 			return;
149 		}
150 
151 		if (ctl.mod_mp == NULL)
152 			continue; /* No associated krtld structure */
153 
154 		if (mdb_tgt_readstr(t, MDB_TGT_AS_VIRT, name, MAXNAMELEN,
155 		    (uintptr_t)ctl.mod_modname) <= 0) {
156 			warn("failed to read module name at %p",
157 			    (void *)ctl.mod_modname);
158 			continue;
159 		}
160 
161 		mdb_dprintf(MDB_DBG_KMOD, "reading mod %s (%p)\n",
162 		    name, (void *)addr);
163 
164 		if (mdb_nv_lookup(&kt->k_modules, name) != NULL) {
165 			warn("skipping duplicate module '%s', id=%d\n",
166 			    name, ctl.mod_id);
167 			continue;
168 		}
169 
170 		if (mdb_tgt_vread(t, &kmod, sizeof (kmod),
171 		    (uintptr_t)ctl.mod_mp) == -1) {
172 			warn("failed to read module at %p\n",
173 			    (void *)ctl.mod_mp);
174 			continue;
175 		}
176 
177 		if (kmod.symspace == NULL || kmod.symhdr == NULL ||
178 		    kmod.strhdr == NULL) {
179 			/*
180 			 * If no buffer for the symbols has been allocated,
181 			 * or the shdrs for .symtab and .strtab are missing,
182 			 * then we're out of luck.
183 			 */
184 			continue;
185 		}
186 
187 		if (mdb_tgt_vread(t, &symhdr, sizeof (Shdr),
188 		    (uintptr_t)kmod.symhdr) == -1) {
189 			warn("failed to read .symtab header for '%s', id=%d",
190 			    name, ctl.mod_id);
191 			continue;
192 		}
193 
194 		if (mdb_tgt_vread(t, &strhdr, sizeof (Shdr),
195 		    (uintptr_t)kmod.strhdr) == -1) {
196 			warn("failed to read .strtab header for '%s', id=%d",
197 			    name, ctl.mod_id);
198 			continue;
199 		}
200 
201 		/*
202 		 * Now get clever: f(*^ing krtld didn't used to bother updating
203 		 * its own kmod.symsize value.  We know that prior to this bug
204 		 * being fixed, symspace was a contiguous buffer containing
205 		 * .symtab, .strtab, and the symbol hash table in that order.
206 		 * So if symsize is zero, recompute it as the size of .symtab
207 		 * plus the size of .strtab.  We don't need to load the hash
208 		 * table anyway since we re-hash all the symbols internally.
209 		 */
210 		if (kmod.symsize == 0)
211 			kmod.symsize = symhdr.sh_size + strhdr.sh_size;
212 
213 		/*
214 		 * Similar logic can be used to make educated guesses
215 		 * at the values of kmod.symtbl and kmod.strings.
216 		 */
217 		if (kmod.symtbl == NULL)
218 			kmod.symtbl = kmod.symspace;
219 		if (kmod.strings == NULL)
220 			kmod.strings = kmod.symspace + symhdr.sh_size;
221 
222 		/*
223 		 * Make sure things seem reasonable before we proceed
224 		 * to actually read and decipher the symspace.
225 		 */
226 		if (KT_BAD_BUF(kmod.symtbl, kmod.symspace, kmod.symsize) ||
227 		    KT_BAD_BUF(kmod.strings, kmod.symspace, kmod.symsize)) {
228 			warn("skipping module '%s', id=%d (corrupt symspace)\n",
229 			    name, ctl.mod_id);
230 			continue;
231 		}
232 
233 		km = mdb_zalloc(sizeof (kt_module_t), UM_SLEEP);
234 		km->km_name = strdup(name);
235 
236 		(void) mdb_nv_insert(&kt->k_modules, km->km_name, NULL,
237 		    (uintptr_t)km, MDB_NV_EXTNAME);
238 
239 		km->km_datasz = kmod.symsize;
240 		km->km_symspace_va = (uintptr_t)kmod.symspace;
241 		km->km_symtab_va = (uintptr_t)kmod.symtbl;
242 		km->km_strtab_va = (uintptr_t)kmod.strings;
243 		km->km_symtab_hdr = symhdr;
244 		km->km_strtab_hdr = strhdr;
245 		km->km_text_va = (uintptr_t)kmod.text;
246 		km->km_text_size = kmod.text_size;
247 		km->km_data_va = (uintptr_t)kmod.data;
248 		km->km_data_size = kmod.data_size;
249 		km->km_bss_va = (uintptr_t)kmod.bss;
250 		km->km_bss_size = kmod.bss_size;
251 
252 		if (kt->k_ctfvalid) {
253 			km->km_ctf_va = (uintptr_t)kmod.ctfdata;
254 			km->km_ctf_size = kmod.ctfsize;
255 		}
256 
257 		/*
258 		 * Add the module to the end of the list of modules in load-
259 		 * dependency order.  This is needed to load the corresponding
260 		 * debugger modules in the same order for layering purposes.
261 		 */
262 		mdb_list_append(&kt->k_modlist, km);
263 
264 		if (t->t_flags & MDB_TGT_F_PRELOAD) {
265 			mdb_iob_printf(mdb.m_out, " %s", name);
266 			mdb_iob_flush(mdb.m_out);
267 			kt_load_module(kt, t, km);
268 		}
269 
270 	} while ((addr = (uintptr_t)ctl.mod_next) != head);
271 }
272 
273 int
274 kt_setflags(mdb_tgt_t *t, int flags)
275 {
276 	int iochg = ((flags ^ t->t_flags) & MDB_TGT_F_ALLOWIO) &&
277 	    !mdb_prop_postmortem;
278 	int rwchg = (flags ^ t->t_flags) & MDB_TGT_F_RDWR;
279 	kt_data_t *kt = t->t_data;
280 	const char *kvmfile;
281 	void *cookie;
282 	int mode;
283 
284 	if (!iochg && !rwchg)
285 		return (0);
286 
287 	if (kt->k_xpv_domu) {
288 		warn("read-only target");
289 		return (-1);
290 	}
291 
292 	if (iochg) {
293 		kvmfile = (flags & MDB_TGT_F_ALLOWIO) ? "/dev/allkmem" :
294 		    "/dev/kmem";
295 	} else {
296 		kvmfile = kt->k_kvmfile;
297 	}
298 
299 	mode = (flags & MDB_TGT_F_RDWR) ? O_RDWR : O_RDONLY;
300 
301 	if ((cookie = kt->k_kb_ops->kb_open(kt->k_symfile, kvmfile, NULL, mode,
302 	    mdb.m_pname)) == NULL) {
303 		/* We failed to re-open, so don't change t_flags */
304 		warn("failed to re-open target");
305 		return (-1);
306 	}
307 
308 	/*
309 	 * We successfully reopened the target, so update k_kvmfile.  Also set
310 	 * the RDWR and ALLOWIO bits in t_flags to match those in flags.
311 	 */
312 	(void) kt->k_kb_ops->kb_close(kt->k_cookie);
313 	kt->k_cookie = cookie;
314 
315 	if (kvmfile != kt->k_kvmfile) {
316 		strfree(kt->k_kvmfile);
317 		kt->k_kvmfile = strdup(kvmfile);
318 	}
319 
320 	t->t_flags = (t->t_flags & ~(MDB_TGT_F_RDWR | MDB_TGT_F_ALLOWIO)) |
321 	    (flags & (MDB_TGT_F_RDWR | MDB_TGT_F_ALLOWIO));
322 
323 	return (0);
324 }
325 
326 /*
327  * Determine which PIDs (if any) have their pages saved in the dump.  We
328  * do this by looking for content flags in dump_flags in the header.  These
329  * flags, which won't be set in older dumps, tell us whether a single process
330  * has had its pages included in the dump.  If a single process has been
331  * included, we need to get the PID for that process from the dump_pids
332  * array in the dump.
333  */
334 static int
335 kt_find_dump_contents(kt_data_t *kt)
336 {
337 	dumphdr_t *dh = kt->k_dumphdr;
338 	pid_t pid = -1;
339 
340 	if (dh->dump_flags & DF_ALL)
341 		return (KT_DUMPCONTENT_ALL);
342 
343 	if (dh->dump_flags & DF_CURPROC) {
344 		if ((pid = kt->k_dump_find_curproc()) == -1)
345 			return (KT_DUMPCONTENT_INVALID);
346 		else
347 			return (pid);
348 	} else {
349 		return (KT_DUMPCONTENT_KERNEL);
350 	}
351 }
352 
353 static int
354 kt_dump_contains_proc(mdb_tgt_t *t, void *context)
355 {
356 	kt_data_t *kt = t->t_data;
357 	pid_t (*f_pid)(uintptr_t);
358 	pid_t reqpid;
359 
360 	switch (kt->k_dumpcontent) {
361 	case KT_DUMPCONTENT_KERNEL:
362 		return (0);
363 	case KT_DUMPCONTENT_ALL:
364 		return (1);
365 	case KT_DUMPCONTENT_INVALID:
366 		goto procnotfound;
367 	default:
368 		f_pid = (pid_t (*)()) dlsym(RTLD_NEXT, "mdb_kproc_pid");
369 		if (f_pid == NULL)
370 			goto procnotfound;
371 
372 		reqpid = f_pid((uintptr_t)context);
373 		if (reqpid == -1)
374 			goto procnotfound;
375 
376 		return (kt->k_dumpcontent == reqpid);
377 	}
378 
379 procnotfound:
380 	warn("unable to determine whether dump contains proc %p\n", context);
381 	return (1);
382 }
383 
384 int
385 kt_setcontext(mdb_tgt_t *t, void *context)
386 {
387 	if (context != NULL) {
388 		const char *argv[2];
389 		int argc = 0;
390 		mdb_tgt_t *ct;
391 		kt_data_t *kt = t->t_data;
392 
393 		argv[argc++] = (const char *)context;
394 		argv[argc] = NULL;
395 
396 		if (kt->k_dumphdr != NULL &&
397 		    !kt_dump_contains_proc(t, context)) {
398 			warn("dump does not contain pages for proc %p\n",
399 			    context);
400 			return (-1);
401 		}
402 
403 		if ((ct = mdb_tgt_create(mdb_kproc_tgt_create,
404 		    t->t_flags, argc, argv)) == NULL)
405 			return (-1);
406 
407 		mdb_printf("debugger context set to proc %p\n", context);
408 		mdb_tgt_activate(ct);
409 	} else
410 		mdb_printf("debugger context set to kernel\n");
411 
412 	return (0);
413 }
414 
415 static int
416 kt_stack(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
417 {
418 	kt_data_t *kt = mdb.m_target->t_data;
419 	return (kt->k_dcmd_stack(addr, flags, argc, argv));
420 }
421 
422 static int
423 kt_stackv(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
424 {
425 	kt_data_t *kt = mdb.m_target->t_data;
426 	return (kt->k_dcmd_stackv(addr, flags, argc, argv));
427 }
428 
429 static int
430 kt_stackr(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
431 {
432 	kt_data_t *kt = mdb.m_target->t_data;
433 	return (kt->k_dcmd_stackr(addr, flags, argc, argv));
434 }
435 
436 static int
437 kt_regs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
438 {
439 	kt_data_t *kt = mdb.m_target->t_data;
440 
441 	if (argc != 0 || (flags & DCMD_ADDRSPEC))
442 		return (DCMD_USAGE);
443 
444 	addr = (uintptr_t)kt->k_regs;
445 
446 	return (kt->k_dcmd_regs(addr, flags, argc, argv));
447 }
448 
449 #ifdef __x86
450 static int
451 kt_cpustack(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
452 {
453 	kt_data_t *kt = mdb.m_target->t_data;
454 	return (kt->k_dcmd_cpustack(addr, flags, argc, argv));
455 }
456 
457 static int
458 kt_cpuregs(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
459 {
460 	kt_data_t *kt = mdb.m_target->t_data;
461 	return (kt->k_dcmd_cpuregs(addr, flags, argc, argv));
462 }
463 #endif /* __x86 */
464 
465 /*ARGSUSED*/
466 static int
467 kt_status_dcmd(uintptr_t addr, uint_t flags, int argc, const mdb_arg_t *argv)
468 {
469 	kt_data_t *kt = mdb.m_target->t_data;
470 	struct utsname uts;
471 
472 	bzero(&uts, sizeof (uts));
473 	(void) strcpy(uts.nodename, "unknown machine");
474 	(void) kt_uname(mdb.m_target, &uts);
475 
476 	if (mdb_prop_postmortem) {
477 		mdb_printf("debugging %scrash dump %s (%d-bit) from %s\n",
478 		    kt->k_xpv_domu ? "domain " : "", kt->k_kvmfile,
479 		    (int)(sizeof (void *) * NBBY), uts.nodename);
480 	} else {
481 		mdb_printf("debugging live kernel (%d-bit) on %s\n",
482 		    (int)(sizeof (void *) * NBBY), uts.nodename);
483 	}
484 
485 	mdb_printf("operating system: %s %s (%s)\n",
486 	    uts.release, uts.version, uts.machine);
487 
488 	if (kt->k_dumphdr) {
489 		dumphdr_t *dh = kt->k_dumphdr;
490 
491 		mdb_printf("image uuid: %s\n", dh->dump_uuid[0] != '\0' ?
492 		    dh->dump_uuid : "(not set)");
493 		mdb_printf("panic message: %s\n", dh->dump_panicstring);
494 
495 		kt->k_dump_print_content(dh, kt->k_dumpcontent);
496 	} else {
497 		char uuid[37];
498 
499 		if (mdb_readsym(uuid, 37, "dump_osimage_uuid") == 37 &&
500 		    uuid[36] == '\0') {
501 			mdb_printf("image uuid: %s\n", uuid);
502 		}
503 	}
504 
505 	return (DCMD_OK);
506 }
507 
508 static const mdb_dcmd_t kt_dcmds[] = {
509 	{ "$c", "?[cnt]", "print stack backtrace", kt_stack },
510 	{ "$C", "?[cnt]", "print stack backtrace", kt_stackv },
511 	{ "$r", NULL, "print general-purpose registers", kt_regs },
512 	{ "$?", NULL, "print status and registers", kt_regs },
513 	{ "regs", NULL, "print general-purpose registers", kt_regs },
514 	{ "stack", "?[cnt]", "print stack backtrace", kt_stack },
515 	{ "stackregs", "?", "print stack backtrace and registers", kt_stackr },
516 #ifdef __x86
517 	{ "cpustack", "?[-v] [-c cpuid] [cnt]", "print stack backtrace for a "
518 	    "specific CPU", kt_cpustack },
519 	{ "cpuregs", "?[-c cpuid]", "print general-purpose registers for a "
520 	    "specific CPU", kt_cpuregs },
521 #endif
522 	{ "status", NULL, "print summary of current target", kt_status_dcmd },
523 	{ NULL }
524 };
525 
526 static uintmax_t
527 reg_disc_get(const mdb_var_t *v)
528 {
529 	mdb_tgt_t *t = MDB_NV_COOKIE(v);
530 	kt_data_t *kt = t->t_data;
531 	mdb_tgt_reg_t r = 0;
532 
533 	(void) mdb_tgt_getareg(t, kt->k_tid, mdb_nv_get_name(v), &r);
534 	return (r);
535 }
536 
537 static kt_module_t *
538 kt_module_by_name(kt_data_t *kt, const char *name)
539 {
540 	kt_module_t *km;
541 
542 	for (km = mdb_list_next(&kt->k_modlist); km; km = mdb_list_next(km)) {
543 		if (strcmp(name, km->km_name) == 0)
544 			return (km);
545 	}
546 
547 	return (NULL);
548 }
549 
550 void
551 kt_activate(mdb_tgt_t *t)
552 {
553 	static const mdb_nv_disc_t reg_disc = { NULL, reg_disc_get };
554 	kt_data_t *kt = t->t_data;
555 	void *sym;
556 
557 	int oflag;
558 
559 	mdb_prop_postmortem = kt->k_xpv_domu || (kt->k_dumphdr != NULL);
560 	mdb_prop_kernel = TRUE;
561 	mdb_prop_datamodel = MDB_TGT_MODEL_NATIVE;
562 
563 	if (kt->k_activated == FALSE) {
564 		struct utsname u1, u2;
565 		/*
566 		 * If we're examining a crash dump, root is /, and uname(2)
567 		 * does not match the utsname in the dump, issue a warning.
568 		 * Note that we are assuming that the modules and macros in
569 		 * /usr/lib are compiled against the kernel from uname -rv.
570 		 */
571 		if (mdb_prop_postmortem && strcmp(mdb.m_root, "/") == 0 &&
572 		    uname(&u1) >= 0 && kt_uname(t, &u2) >= 0 &&
573 		    (strcmp(u1.release, u2.release) ||
574 		    strcmp(u1.version, u2.version))) {
575 			mdb_warn("warning: dump is from %s %s %s; dcmds and "
576 			    "macros may not match kernel implementation\n",
577 			    u2.sysname, u2.release, u2.version);
578 		}
579 
580 		if (mdb_module_load(KT_MODULE, MDB_MOD_GLOBAL) < 0) {
581 			warn("failed to load kernel support module -- "
582 			    "some modules may not load\n");
583 		}
584 
585 		if (mdb_prop_postmortem && kt->k_dumphdr != NULL) {
586 			sym = dlsym(RTLD_NEXT, "mdb_dump_print_content");
587 			if (sym != NULL)
588 				kt->k_dump_print_content = (void (*)())sym;
589 
590 			sym = dlsym(RTLD_NEXT, "mdb_dump_find_curproc");
591 			if (sym != NULL)
592 				kt->k_dump_find_curproc = (int (*)())sym;
593 
594 			kt->k_dumpcontent = kt_find_dump_contents(kt);
595 		}
596 
597 		if (t->t_flags & MDB_TGT_F_PRELOAD) {
598 			oflag = mdb_iob_getflags(mdb.m_out) & MDB_IOB_PGENABLE;
599 
600 			mdb_iob_clrflags(mdb.m_out, oflag);
601 			mdb_iob_puts(mdb.m_out, "Preloading module symbols: [");
602 			mdb_iob_flush(mdb.m_out);
603 		}
604 
605 		if (!(t->t_flags & MDB_TGT_F_NOLOAD)) {
606 			kt_load_modules(kt, t);
607 
608 			/*
609 			 * Determine where the CTF data for krtld is. If krtld
610 			 * is rolled into unix, force load the MDB krtld
611 			 * module.
612 			 */
613 			kt->k_rtld_name = "krtld";
614 
615 			if (kt_module_by_name(kt, "krtld") == NULL) {
616 				(void) mdb_module_load("krtld", MDB_MOD_SILENT);
617 				kt->k_rtld_name = "unix";
618 			}
619 		}
620 
621 
622 		if (t->t_flags & MDB_TGT_F_PRELOAD) {
623 			mdb_iob_puts(mdb.m_out, " ]\n");
624 			mdb_iob_setflags(mdb.m_out, oflag);
625 		}
626 
627 		kt->k_activated = TRUE;
628 	}
629 
630 	(void) mdb_tgt_register_dcmds(t, &kt_dcmds[0], MDB_MOD_FORCE);
631 
632 	/* Export some of our registers as named variables */
633 	mdb_tgt_register_regvars(t, kt->k_rds, &reg_disc, MDB_NV_RDONLY);
634 
635 	mdb_tgt_elf_export(kt->k_file);
636 }
637 
638 void
639 kt_deactivate(mdb_tgt_t *t)
640 {
641 	kt_data_t *kt = t->t_data;
642 
643 	const mdb_tgt_regdesc_t *rdp;
644 	const mdb_dcmd_t *dcp;
645 
646 	for (rdp = kt->k_rds; rdp->rd_name != NULL; rdp++) {
647 		mdb_var_t *v;
648 
649 		if (!(rdp->rd_flags & MDB_TGT_R_EXPORT))
650 			continue; /* Didn't export register as a variable */
651 
652 		if ((v = mdb_nv_lookup(&mdb.m_nv, rdp->rd_name)) != NULL) {
653 			v->v_flags &= ~MDB_NV_PERSIST;
654 			mdb_nv_remove(&mdb.m_nv, v);
655 		}
656 	}
657 
658 	for (dcp = &kt_dcmds[0]; dcp->dc_name != NULL; dcp++) {
659 		if (mdb_module_remove_dcmd(t->t_module, dcp->dc_name) == -1)
660 			warn("failed to remove dcmd %s", dcp->dc_name);
661 	}
662 
663 	mdb_prop_postmortem = FALSE;
664 	mdb_prop_kernel = FALSE;
665 	mdb_prop_datamodel = MDB_TGT_MODEL_UNKNOWN;
666 }
667 
668 /*ARGSUSED*/
669 const char *
670 kt_name(mdb_tgt_t *t)
671 {
672 	return ("kvm");
673 }
674 
675 const char *
676 kt_platform(mdb_tgt_t *t)
677 {
678 	kt_data_t *kt = t->t_data;
679 	return (kt->k_platform);
680 }
681 
682 int
683 kt_uname(mdb_tgt_t *t, struct utsname *utsp)
684 {
685 	return (mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, utsp,
686 	    sizeof (struct utsname), MDB_TGT_OBJ_EXEC, "utsname"));
687 }
688 
689 /*ARGSUSED*/
690 int
691 kt_dmodel(mdb_tgt_t *t)
692 {
693 	return (MDB_TGT_MODEL_NATIVE);
694 }
695 
696 ssize_t
697 kt_aread(mdb_tgt_t *t, mdb_tgt_as_t as, void *buf,
698     size_t nbytes, mdb_tgt_addr_t addr)
699 {
700 	kt_data_t *kt = t->t_data;
701 	ssize_t rval;
702 
703 	if ((rval = kt->k_kb_ops->kb_aread(kt->k_cookie, addr, buf,
704 	    nbytes, as)) == -1)
705 		return (set_errno(EMDB_NOMAP));
706 
707 	return (rval);
708 }
709 
710 ssize_t
711 kt_awrite(mdb_tgt_t *t, mdb_tgt_as_t as, const void *buf,
712     size_t nbytes, mdb_tgt_addr_t addr)
713 {
714 	kt_data_t *kt = t->t_data;
715 	ssize_t rval;
716 
717 	if ((rval = kt->k_kb_ops->kb_awrite(kt->k_cookie, addr, buf,
718 	    nbytes, as)) == -1)
719 		return (set_errno(EMDB_NOMAP));
720 
721 	return (rval);
722 }
723 
724 ssize_t
725 kt_vread(mdb_tgt_t *t, void *buf, size_t nbytes, uintptr_t addr)
726 {
727 	kt_data_t *kt = t->t_data;
728 	ssize_t rval;
729 
730 	if ((rval = kt->k_kb_ops->kb_kread(kt->k_cookie, addr, buf,
731 	    nbytes)) == -1)
732 		return (set_errno(EMDB_NOMAP));
733 
734 	return (rval);
735 }
736 
737 ssize_t
738 kt_vwrite(mdb_tgt_t *t, const void *buf, size_t nbytes, uintptr_t addr)
739 {
740 	kt_data_t *kt = t->t_data;
741 	ssize_t rval;
742 
743 	if ((rval = kt->k_kb_ops->kb_kwrite(kt->k_cookie, addr, buf,
744 	    nbytes)) == -1)
745 		return (set_errno(EMDB_NOMAP));
746 
747 	return (rval);
748 }
749 
750 ssize_t
751 kt_fread(mdb_tgt_t *t, void *buf, size_t nbytes, uintptr_t addr)
752 {
753 	return (kt_vread(t, buf, nbytes, addr));
754 }
755 
756 ssize_t
757 kt_fwrite(mdb_tgt_t *t, const void *buf, size_t nbytes, uintptr_t addr)
758 {
759 	return (kt_vwrite(t, buf, nbytes, addr));
760 }
761 
762 ssize_t
763 kt_pread(mdb_tgt_t *t, void *buf, size_t nbytes, physaddr_t addr)
764 {
765 	kt_data_t *kt = t->t_data;
766 	ssize_t rval;
767 
768 	if ((rval = kt->k_kb_ops->kb_pread(kt->k_cookie, addr, buf,
769 	    nbytes)) == -1)
770 		return (set_errno(EMDB_NOMAP));
771 
772 	return (rval);
773 }
774 
775 ssize_t
776 kt_pwrite(mdb_tgt_t *t, const void *buf, size_t nbytes, physaddr_t addr)
777 {
778 	kt_data_t *kt = t->t_data;
779 	ssize_t rval;
780 
781 	if ((rval = kt->k_kb_ops->kb_pwrite(kt->k_cookie, addr, buf,
782 	    nbytes)) == -1)
783 		return (set_errno(EMDB_NOMAP));
784 
785 	return (rval);
786 }
787 
788 int
789 kt_vtop(mdb_tgt_t *t, mdb_tgt_as_t as, uintptr_t va, physaddr_t *pap)
790 {
791 	kt_data_t *kt = t->t_data;
792 
793 	struct as *asp;
794 	physaddr_t pa;
795 	mdb_module_t *mod;
796 	mdb_var_t *v;
797 	int (*fptr)(uintptr_t, struct as *, physaddr_t *);
798 
799 	switch ((uintptr_t)as) {
800 	case (uintptr_t)MDB_TGT_AS_PHYS:
801 	case (uintptr_t)MDB_TGT_AS_FILE:
802 	case (uintptr_t)MDB_TGT_AS_IO:
803 		return (set_errno(EINVAL));
804 	case (uintptr_t)MDB_TGT_AS_VIRT:
805 		asp = kt->k_as;
806 		break;
807 	default:
808 		asp = (struct as *)as;
809 	}
810 
811 	if ((pa = kt->k_kb_ops->kb_vtop(kt->k_cookie, asp, va)) != -1ULL) {
812 		*pap = pa;
813 		return (0);
814 	}
815 
816 	if ((v = mdb_nv_lookup(&mdb.m_modules, "unix")) != NULL &&
817 	    (mod = mdb_nv_get_cookie(v)) != NULL) {
818 
819 		fptr = (int (*)(uintptr_t, struct as *, physaddr_t *))
820 		    dlsym(mod->mod_hdl, "platform_vtop");
821 
822 		if ((fptr != NULL) && ((*fptr)(va, asp, pap) == 0))
823 			return (0);
824 	}
825 
826 	return (set_errno(EMDB_NOMAP));
827 }
828 
829 int
830 kt_lookup_by_name(mdb_tgt_t *t, const char *obj, const char *name,
831     GElf_Sym *symp, mdb_syminfo_t *sip)
832 {
833 	kt_data_t *kt = t->t_data;
834 	kt_module_t *km, kmod;
835 	mdb_var_t *v;
836 	int n;
837 
838 	/*
839 	 * To simplify the implementation, we create a fake module on the stack
840 	 * which is "prepended" to k_modlist and whose symtab is kt->k_symtab.
841 	 */
842 	kmod.km_symtab = kt->k_symtab;
843 	kmod.km_list.ml_next = mdb_list_next(&kt->k_modlist);
844 
845 	switch ((uintptr_t)obj) {
846 	case (uintptr_t)MDB_TGT_OBJ_EXEC:
847 		km = &kmod;
848 		n = 1;
849 		break;
850 
851 	case (uintptr_t)MDB_TGT_OBJ_EVERY:
852 		km = &kmod;
853 		n = mdb_nv_size(&kt->k_modules) + 1;
854 		break;
855 
856 	case (uintptr_t)MDB_TGT_OBJ_RTLD:
857 		obj = kt->k_rtld_name;
858 		/*FALLTHRU*/
859 
860 	default:
861 		if ((v = mdb_nv_lookup(&kt->k_modules, obj)) == NULL)
862 			return (set_errno(EMDB_NOOBJ));
863 
864 		km = mdb_nv_get_cookie(v);
865 		n = 1;
866 
867 		if (km->km_symtab == NULL)
868 			kt_load_module(kt, t, km);
869 	}
870 
871 	for (; n > 0; n--, km = mdb_list_next(km)) {
872 		if (mdb_gelf_symtab_lookup_by_name(km->km_symtab, name,
873 		    symp, &sip->sym_id) == 0) {
874 			sip->sym_table = MDB_TGT_SYMTAB;
875 			return (0);
876 		}
877 	}
878 
879 	return (set_errno(EMDB_NOSYM));
880 }
881 
882 int
883 kt_lookup_by_addr(mdb_tgt_t *t, uintptr_t addr, uint_t flags,
884     char *buf, size_t nbytes, GElf_Sym *symp, mdb_syminfo_t *sip)
885 {
886 	kt_data_t *kt = t->t_data;
887 	kt_module_t kmods[3], *kmods_begin = &kmods[0], *kmods_end;
888 	const char *name;
889 
890 	kt_module_t *km = &kmods[0];	/* Point km at first fake module */
891 	kt_module_t *sym_km = NULL;	/* Module associated with best sym */
892 	GElf_Sym sym;			/* Best symbol found so far if !exact */
893 	uint_t symid;			/* ID of best symbol found so far */
894 
895 	/*
896 	 * To simplify the implementation, we create fake modules on the stack
897 	 * that are "prepended" to k_modlist and whose symtab is set to
898 	 * each of three special symbol tables, in order of precedence.
899 	 */
900 	km->km_symtab = mdb.m_prsym;
901 
902 	if (kt->k_symtab != NULL) {
903 		km->km_list.ml_next = (mdb_list_t *)(km + 1);
904 		km = mdb_list_next(km);
905 		km->km_symtab = kt->k_symtab;
906 	}
907 
908 	if (kt->k_dynsym != NULL) {
909 		km->km_list.ml_next = (mdb_list_t *)(km + 1);
910 		km = mdb_list_next(km);
911 		km->km_symtab = kt->k_dynsym;
912 	}
913 
914 	km->km_list.ml_next = mdb_list_next(&kt->k_modlist);
915 	kmods_end = km;
916 
917 	/*
918 	 * Now iterate over the list of fake and real modules.  If the module
919 	 * has no symbol table and the address is in the text section,
920 	 * instantiate the module's symbol table.  In exact mode, we can
921 	 * jump to 'found' immediately if we match.  Otherwise we continue
922 	 * looking and improve our choice if we find a closer symbol.
923 	 */
924 	for (km = &kmods[0]; km != NULL; km = mdb_list_next(km)) {
925 		if (km->km_symtab == NULL && addr >= km->km_text_va &&
926 		    addr < km->km_text_va + km->km_text_size)
927 			kt_load_module(kt, t, km);
928 
929 		if (mdb_gelf_symtab_lookup_by_addr(km->km_symtab, addr,
930 		    flags, buf, nbytes, symp, &sip->sym_id) != 0 ||
931 		    symp->st_value == 0)
932 			continue;
933 
934 		if (flags & MDB_TGT_SYM_EXACT) {
935 			sym_km = km;
936 			goto found;
937 		}
938 
939 		if (sym_km == NULL || mdb_gelf_sym_closer(symp, &sym, addr)) {
940 			sym_km = km;
941 			sym = *symp;
942 			symid = sip->sym_id;
943 		}
944 	}
945 
946 	if (sym_km == NULL)
947 		return (set_errno(EMDB_NOSYMADDR));
948 
949 	*symp = sym; /* Copy our best symbol into the caller's symbol */
950 	sip->sym_id = symid;
951 found:
952 	/*
953 	 * Once we've found something, copy the final name into the caller's
954 	 * buffer and prefix it with the load object name if appropriate.
955 	 */
956 	if (sym_km != NULL) {
957 		name = mdb_gelf_sym_name(sym_km->km_symtab, symp);
958 
959 		if (sym_km < kmods_begin || sym_km > kmods_end) {
960 			(void) mdb_snprintf(buf, nbytes, "%s`%s",
961 			    sym_km->km_name, name);
962 		} else if (nbytes > 0) {
963 			(void) strncpy(buf, name, nbytes);
964 			buf[nbytes - 1] = '\0';
965 		}
966 
967 		if (sym_km->km_symtab == mdb.m_prsym)
968 			sip->sym_table = MDB_TGT_PRVSYM;
969 		else
970 			sip->sym_table = MDB_TGT_SYMTAB;
971 	} else {
972 		sip->sym_table = MDB_TGT_SYMTAB;
973 	}
974 
975 	return (0);
976 }
977 
978 static int
979 kt_symtab_func(void *data, const GElf_Sym *sym, const char *name, uint_t id)
980 {
981 	kt_symarg_t *argp = data;
982 
983 	if (mdb_tgt_sym_match(sym, argp->sym_type)) {
984 		argp->sym_info.sym_id = id;
985 
986 		return (argp->sym_cb(argp->sym_data, sym, name,
987 		    &argp->sym_info, argp->sym_obj));
988 	}
989 
990 	return (0);
991 }
992 
993 static void
994 kt_symtab_iter(mdb_gelf_symtab_t *gst, uint_t type, const char *obj,
995     mdb_tgt_sym_f *cb, void *p)
996 {
997 	kt_symarg_t arg;
998 
999 	arg.sym_cb = cb;
1000 	arg.sym_data = p;
1001 	arg.sym_type = type;
1002 	arg.sym_info.sym_table = gst->gst_tabid;
1003 	arg.sym_obj = obj;
1004 
1005 	mdb_gelf_symtab_iter(gst, kt_symtab_func, &arg);
1006 }
1007 
1008 int
1009 kt_symbol_iter(mdb_tgt_t *t, const char *obj, uint_t which, uint_t type,
1010     mdb_tgt_sym_f *cb, void *data)
1011 {
1012 	kt_data_t *kt = t->t_data;
1013 	kt_module_t *km;
1014 
1015 	mdb_gelf_symtab_t *symtab = NULL;
1016 	mdb_var_t *v;
1017 
1018 	switch ((uintptr_t)obj) {
1019 	case (uintptr_t)MDB_TGT_OBJ_EXEC:
1020 		if (which == MDB_TGT_SYMTAB)
1021 			symtab = kt->k_symtab;
1022 		else
1023 			symtab = kt->k_dynsym;
1024 		break;
1025 
1026 	case (uintptr_t)MDB_TGT_OBJ_EVERY:
1027 		if (which == MDB_TGT_DYNSYM) {
1028 			symtab = kt->k_dynsym;
1029 			obj = MDB_TGT_OBJ_EXEC;
1030 			break;
1031 		}
1032 
1033 		mdb_nv_rewind(&kt->k_modules);
1034 		while ((v = mdb_nv_advance(&kt->k_modules)) != NULL) {
1035 			km = mdb_nv_get_cookie(v);
1036 
1037 			if (km->km_symtab == NULL)
1038 				kt_load_module(kt, t, km);
1039 
1040 			if (km->km_symtab != NULL)
1041 				kt_symtab_iter(km->km_symtab, type,
1042 				    km->km_name, cb, data);
1043 		}
1044 		break;
1045 
1046 	case (uintptr_t)MDB_TGT_OBJ_RTLD:
1047 		obj = kt->k_rtld_name;
1048 		/*FALLTHRU*/
1049 
1050 	default:
1051 		v = mdb_nv_lookup(&kt->k_modules, obj);
1052 
1053 		if (v == NULL)
1054 			return (set_errno(EMDB_NOOBJ));
1055 
1056 		km = mdb_nv_get_cookie(v);
1057 
1058 		if (km->km_symtab == NULL)
1059 			kt_load_module(kt, t, km);
1060 
1061 		symtab = km->km_symtab;
1062 	}
1063 
1064 	if (symtab)
1065 		kt_symtab_iter(symtab, type, obj, cb, data);
1066 
1067 	return (0);
1068 }
1069 
1070 static int
1071 kt_mapping_walk(uintptr_t addr, const void *data, kt_maparg_t *marg)
1072 {
1073 	/*
1074 	 * This is a bit sketchy but avoids problematic compilation of this
1075 	 * target against the current VM implementation.  Now that we have
1076 	 * vmem, we can make this less broken and more informative by changing
1077 	 * this code to invoke the vmem walker in the near future.
1078 	 */
1079 	const struct kt_seg {
1080 		caddr_t s_base;
1081 		size_t s_size;
1082 	} *segp = (const struct kt_seg *)data;
1083 
1084 	mdb_map_t map;
1085 	GElf_Sym sym;
1086 	mdb_syminfo_t info;
1087 
1088 	map.map_base = (uintptr_t)segp->s_base;
1089 	map.map_size = segp->s_size;
1090 	map.map_flags = MDB_TGT_MAP_R | MDB_TGT_MAP_W | MDB_TGT_MAP_X;
1091 
1092 	if (kt_lookup_by_addr(marg->map_target, addr, MDB_TGT_SYM_EXACT,
1093 	    map.map_name, MDB_TGT_MAPSZ, &sym, &info) == -1) {
1094 
1095 		(void) mdb_iob_snprintf(map.map_name, MDB_TGT_MAPSZ,
1096 		    "%lr", addr);
1097 	}
1098 
1099 	return (marg->map_cb(marg->map_data, &map, map.map_name));
1100 }
1101 
1102 int
1103 kt_mapping_iter(mdb_tgt_t *t, mdb_tgt_map_f *func, void *private)
1104 {
1105 	kt_data_t *kt = t->t_data;
1106 	kt_maparg_t m;
1107 
1108 	m.map_target = t;
1109 	m.map_cb = func;
1110 	m.map_data = private;
1111 
1112 	return (mdb_pwalk("seg", (mdb_walk_cb_t)kt_mapping_walk, &m,
1113 	    (uintptr_t)kt->k_as));
1114 }
1115 
1116 static const mdb_map_t *
1117 kt_module_to_map(kt_module_t *km, mdb_map_t *map)
1118 {
1119 	(void) strncpy(map->map_name, km->km_name, MDB_TGT_MAPSZ);
1120 	map->map_name[MDB_TGT_MAPSZ - 1] = '\0';
1121 	map->map_base = km->km_text_va;
1122 	map->map_size = km->km_text_size;
1123 	map->map_flags = MDB_TGT_MAP_R | MDB_TGT_MAP_W | MDB_TGT_MAP_X;
1124 
1125 	return (map);
1126 }
1127 
1128 int
1129 kt_object_iter(mdb_tgt_t *t, mdb_tgt_map_f *func, void *private)
1130 {
1131 	kt_data_t *kt = t->t_data;
1132 	kt_module_t *km;
1133 	mdb_map_t m;
1134 
1135 	for (km = mdb_list_next(&kt->k_modlist); km; km = mdb_list_next(km)) {
1136 		if (func(private, kt_module_to_map(km, &m), km->km_name) == -1)
1137 			break;
1138 	}
1139 
1140 	return (0);
1141 }
1142 
1143 const mdb_map_t *
1144 kt_addr_to_map(mdb_tgt_t *t, uintptr_t addr)
1145 {
1146 	kt_data_t *kt = t->t_data;
1147 	kt_module_t *km;
1148 
1149 	for (km = mdb_list_next(&kt->k_modlist); km; km = mdb_list_next(km)) {
1150 		if (addr - km->km_text_va < km->km_text_size ||
1151 		    addr - km->km_data_va < km->km_data_size ||
1152 		    addr - km->km_bss_va < km->km_bss_size)
1153 			return (kt_module_to_map(km, &kt->k_map));
1154 	}
1155 
1156 	(void) set_errno(EMDB_NOMAP);
1157 	return (NULL);
1158 }
1159 
1160 const mdb_map_t *
1161 kt_name_to_map(mdb_tgt_t *t, const char *name)
1162 {
1163 	kt_data_t *kt = t->t_data;
1164 	kt_module_t *km;
1165 	mdb_map_t m;
1166 
1167 	/*
1168 	 * If name is MDB_TGT_OBJ_EXEC, return the first module on the list,
1169 	 * which will be unix since we keep k_modlist in load order.
1170 	 */
1171 	if (name == MDB_TGT_OBJ_EXEC)
1172 		return (kt_module_to_map(mdb_list_next(&kt->k_modlist), &m));
1173 
1174 	if (name == MDB_TGT_OBJ_RTLD)
1175 		name = kt->k_rtld_name;
1176 
1177 	if ((km = kt_module_by_name(kt, name)) != NULL)
1178 		return (kt_module_to_map(km, &m));
1179 
1180 	(void) set_errno(EMDB_NOOBJ);
1181 	return (NULL);
1182 }
1183 
1184 static ctf_file_t *
1185 kt_load_ctfdata(mdb_tgt_t *t, kt_module_t *km)
1186 {
1187 	kt_data_t *kt = t->t_data;
1188 	int err;
1189 
1190 	if (km->km_ctfp != NULL)
1191 		return (km->km_ctfp);
1192 
1193 	if (km->km_ctf_va == NULL) {
1194 		(void) set_errno(EMDB_NOCTF);
1195 		return (NULL);
1196 	}
1197 
1198 	if (km->km_symtab == NULL)
1199 		kt_load_module(t->t_data, t, km);
1200 
1201 	if ((km->km_ctf_buf = mdb_alloc(km->km_ctf_size, UM_NOSLEEP)) == NULL) {
1202 		warn("failed to allocate memory to load %s debugging "
1203 		    "information", km->km_name);
1204 		return (NULL);
1205 	}
1206 
1207 	if (mdb_tgt_vread(t, km->km_ctf_buf, km->km_ctf_size,
1208 	    km->km_ctf_va) != km->km_ctf_size) {
1209 		warn("failed to read %lu bytes of debug data for %s at %p",
1210 		    (ulong_t)km->km_ctf_size, km->km_name,
1211 		    (void *)km->km_ctf_va);
1212 		mdb_free(km->km_ctf_buf, km->km_ctf_size);
1213 		km->km_ctf_buf = NULL;
1214 		return (NULL);
1215 	}
1216 
1217 	if ((km->km_ctfp = mdb_ctf_bufopen((const void *)km->km_ctf_buf,
1218 	    km->km_ctf_size, km->km_symbuf, &km->km_symtab_hdr,
1219 	    km->km_strtab, &km->km_strtab_hdr, &err)) == NULL) {
1220 		mdb_free(km->km_ctf_buf, km->km_ctf_size);
1221 		km->km_ctf_buf = NULL;
1222 		(void) set_errno(ctf_to_errno(err));
1223 		return (NULL);
1224 	}
1225 
1226 	mdb_dprintf(MDB_DBG_KMOD, "loaded %lu bytes of CTF data for %s\n",
1227 	    (ulong_t)km->km_ctf_size, km->km_name);
1228 
1229 	if (ctf_parent_name(km->km_ctfp) != NULL) {
1230 		mdb_var_t *v;
1231 
1232 		if ((v = mdb_nv_lookup(&kt->k_modules,
1233 		    ctf_parent_name(km->km_ctfp))) == NULL) {
1234 			warn("failed to load CTF data for %s - parent %s not "
1235 			    "loaded\n", km->km_name,
1236 			    ctf_parent_name(km->km_ctfp));
1237 		}
1238 
1239 		if (v != NULL) {
1240 			kt_module_t *pm = mdb_nv_get_cookie(v);
1241 
1242 			if (pm->km_ctfp == NULL)
1243 				(void) kt_load_ctfdata(t, pm);
1244 
1245 			if (pm->km_ctfp != NULL && ctf_import(km->km_ctfp,
1246 			    pm->km_ctfp) == CTF_ERR) {
1247 				warn("failed to import parent types into "
1248 				    "%s: %s\n", km->km_name,
1249 				    ctf_errmsg(ctf_errno(km->km_ctfp)));
1250 			}
1251 		}
1252 	}
1253 
1254 	return (km->km_ctfp);
1255 }
1256 
1257 ctf_file_t *
1258 kt_addr_to_ctf(mdb_tgt_t *t, uintptr_t addr)
1259 {
1260 	kt_data_t *kt = t->t_data;
1261 	kt_module_t *km;
1262 
1263 	for (km = mdb_list_next(&kt->k_modlist); km; km = mdb_list_next(km)) {
1264 		if (addr - km->km_text_va < km->km_text_size ||
1265 		    addr - km->km_data_va < km->km_data_size ||
1266 		    addr - km->km_bss_va < km->km_bss_size)
1267 			return (kt_load_ctfdata(t, km));
1268 	}
1269 
1270 	(void) set_errno(EMDB_NOMAP);
1271 	return (NULL);
1272 }
1273 
1274 ctf_file_t *
1275 kt_name_to_ctf(mdb_tgt_t *t, const char *name)
1276 {
1277 	kt_data_t *kt = t->t_data;
1278 	kt_module_t *km;
1279 
1280 	if (name == MDB_TGT_OBJ_EXEC)
1281 		name = KT_CTFPARENT;
1282 	else if (name == MDB_TGT_OBJ_RTLD)
1283 		name = kt->k_rtld_name;
1284 
1285 	if ((km = kt_module_by_name(kt, name)) != NULL)
1286 		return (kt_load_ctfdata(t, km));
1287 
1288 	(void) set_errno(EMDB_NOOBJ);
1289 	return (NULL);
1290 }
1291 
1292 /*ARGSUSED*/
1293 int
1294 kt_status(mdb_tgt_t *t, mdb_tgt_status_t *tsp)
1295 {
1296 	kt_data_t *kt = t->t_data;
1297 	bzero(tsp, sizeof (mdb_tgt_status_t));
1298 	tsp->st_state = (kt->k_xpv_domu || (kt->k_dumphdr != NULL)) ?
1299 	    MDB_TGT_DEAD : MDB_TGT_RUNNING;
1300 	return (0);
1301 }
1302 
1303 static ssize_t
1304 kt_xd_dumphdr(mdb_tgt_t *t, void *buf, size_t nbytes)
1305 {
1306 	kt_data_t *kt = t->t_data;
1307 
1308 	if (buf == NULL && nbytes == 0)
1309 		return (sizeof (dumphdr_t));
1310 
1311 	if (kt->k_dumphdr == NULL)
1312 		return (set_errno(ENODATA));
1313 
1314 	nbytes = MIN(nbytes, sizeof (dumphdr_t));
1315 	bcopy(kt->k_dumphdr, buf, nbytes);
1316 
1317 	return (nbytes);
1318 }
1319 
1320 void
1321 kt_destroy(mdb_tgt_t *t)
1322 {
1323 	kt_data_t *kt = t->t_data;
1324 	kt_module_t *km, *nkm;
1325 
1326 	(void) mdb_module_unload(KT_MODULE, 0);
1327 
1328 	if (kt->k_regs != NULL)
1329 		mdb_free(kt->k_regs, kt->k_regsize);
1330 
1331 	if (kt->k_symtab != NULL)
1332 		mdb_gelf_symtab_destroy(kt->k_symtab);
1333 
1334 	if (kt->k_dynsym != NULL)
1335 		mdb_gelf_symtab_destroy(kt->k_dynsym);
1336 
1337 	if (kt->k_dumphdr != NULL)
1338 		mdb_free(kt->k_dumphdr, sizeof (dumphdr_t));
1339 
1340 	mdb_gelf_destroy(kt->k_file);
1341 
1342 	(void) kt->k_kb_ops->kb_close(kt->k_cookie);
1343 
1344 	for (km = mdb_list_next(&kt->k_modlist); km; km = nkm) {
1345 		if (km->km_symtab)
1346 			mdb_gelf_symtab_destroy(km->km_symtab);
1347 
1348 		if (km->km_data)
1349 			mdb_free(km->km_data, km->km_datasz);
1350 
1351 		if (km->km_ctfp)
1352 			ctf_close(km->km_ctfp);
1353 
1354 		if (km->km_ctf_buf != NULL)
1355 			mdb_free(km->km_ctf_buf, km->km_ctf_size);
1356 
1357 		nkm = mdb_list_next(km);
1358 		strfree(km->km_name);
1359 		mdb_free(km, sizeof (kt_module_t));
1360 	}
1361 
1362 	mdb_nv_destroy(&kt->k_modules);
1363 
1364 	strfree(kt->k_kvmfile);
1365 	if (kt->k_symfile != NULL)
1366 		strfree(kt->k_symfile);
1367 
1368 	mdb_free(kt, sizeof (kt_data_t));
1369 }
1370 
1371 static int
1372 kt_data_stub(void)
1373 {
1374 	return (-1);
1375 }
1376 
1377 int
1378 mdb_kvm_tgt_create(mdb_tgt_t *t, int argc, const char *argv[])
1379 {
1380 	kt_data_t *kt = mdb_zalloc(sizeof (kt_data_t), UM_SLEEP);
1381 	mdb_kb_ops_t *kvm_kb_ops = libkvm_kb_ops();
1382 	int oflag = (t->t_flags & MDB_TGT_F_RDWR) ? O_RDWR : O_RDONLY;
1383 	struct utsname uts;
1384 	GElf_Sym sym;
1385 	pgcnt_t pmem;
1386 
1387 
1388 	if (argc == 2) {
1389 		kt->k_symfile = strdup(argv[0]);
1390 		kt->k_kvmfile = strdup(argv[1]);
1391 
1392 		kt->k_cookie = kvm_kb_ops->kb_open(kt->k_symfile,
1393 		    kt->k_kvmfile, NULL, oflag, (char *)mdb.m_pname);
1394 
1395 		if (kt->k_cookie == NULL)
1396 			goto err;
1397 
1398 		kt->k_xpv_domu = 0;
1399 		kt->k_kb_ops = kvm_kb_ops;
1400 	} else {
1401 #ifndef __x86
1402 		return (set_errno(EINVAL));
1403 #else
1404 		mdb_kb_ops_t *(*getops)(void);
1405 
1406 		kt->k_symfile = NULL;
1407 		kt->k_kvmfile = strdup(argv[0]);
1408 
1409 		getops = (mdb_kb_ops_t *(*)())dlsym(RTLD_NEXT, "mdb_kb_ops");
1410 
1411 		/*
1412 		 * Load mdb_kb if it's not already loaded during
1413 		 * identification.
1414 		 */
1415 		if (getops == NULL) {
1416 			(void) mdb_module_load("mdb_kb",
1417 			    MDB_MOD_GLOBAL | MDB_MOD_SILENT);
1418 			getops = (mdb_kb_ops_t *(*)())
1419 			    dlsym(RTLD_NEXT, "mdb_kb_ops");
1420 		}
1421 
1422 		if (getops == NULL || (kt->k_kb_ops = getops()) == NULL) {
1423 			warn("failed to load KVM backend ops\n");
1424 			goto err;
1425 		}
1426 
1427 		kt->k_cookie = kt->k_kb_ops->kb_open(NULL, kt->k_kvmfile, NULL,
1428 		    oflag, (char *)mdb.m_pname);
1429 
1430 		if (kt->k_cookie == NULL)
1431 			goto err;
1432 
1433 		kt->k_xpv_domu = 1;
1434 #endif
1435 	}
1436 
1437 	if ((kt->k_fio = kt->k_kb_ops->kb_sym_io(kt->k_cookie,
1438 	    kt->k_symfile)) == NULL)
1439 		goto err;
1440 
1441 	if ((kt->k_file = mdb_gelf_create(kt->k_fio,
1442 	    ET_EXEC, GF_FILE)) == NULL) {
1443 		mdb_io_destroy(kt->k_fio);
1444 		goto err;
1445 	}
1446 
1447 	kt->k_symtab =
1448 	    mdb_gelf_symtab_create_file(kt->k_file, SHT_SYMTAB, MDB_TGT_SYMTAB);
1449 
1450 	kt->k_dynsym =
1451 	    mdb_gelf_symtab_create_file(kt->k_file, SHT_DYNSYM, MDB_TGT_DYNSYM);
1452 
1453 	if (mdb_gelf_symtab_lookup_by_name(kt->k_symtab, "kas",
1454 	    &sym, NULL) == -1) {
1455 		warn("'kas' symbol is missing from kernel\n");
1456 		goto err;
1457 	}
1458 
1459 	kt->k_as = (struct as *)(uintptr_t)sym.st_value;
1460 
1461 	if (mdb_gelf_symtab_lookup_by_name(kt->k_symtab, "platform",
1462 	    &sym, NULL) == -1) {
1463 		warn("'platform' symbol is missing from kernel\n");
1464 		goto err;
1465 	}
1466 
1467 	if (kt->k_kb_ops->kb_kread(kt->k_cookie, sym.st_value,
1468 	    kt->k_platform, MAXNAMELEN) <= 0) {
1469 		warn("failed to read 'platform' string from kernel");
1470 		goto err;
1471 	}
1472 
1473 	if (mdb_gelf_symtab_lookup_by_name(kt->k_symtab, "utsname",
1474 	    &sym, NULL) == -1) {
1475 		warn("'utsname' symbol is missing from kernel\n");
1476 		goto err;
1477 	}
1478 
1479 	if (kt->k_kb_ops->kb_kread(kt->k_cookie, sym.st_value, &uts,
1480 	    sizeof (uts)) <= 0) {
1481 		warn("failed to read 'utsname' struct from kernel");
1482 		goto err;
1483 	}
1484 
1485 	kt->k_dump_print_content = (void (*)())kt_data_stub;
1486 	kt->k_dump_find_curproc = kt_data_stub;
1487 
1488 	/*
1489 	 * We set k_ctfvalid based on the presence of the CTF vmem arena
1490 	 * symbol.  The CTF members were added to the end of struct module at
1491 	 * the same time, so this allows us to know whether we can use them.
1492 	 */
1493 	if (mdb_gelf_symtab_lookup_by_name(kt->k_symtab, "ctf_arena", &sym,
1494 	    NULL) == 0 && !(mdb.m_flags & MDB_FL_NOCTF))
1495 		kt->k_ctfvalid = 1;
1496 
1497 	(void) mdb_nv_create(&kt->k_modules, UM_SLEEP);
1498 	t->t_pshandle = kt->k_cookie;
1499 	t->t_data = kt;
1500 
1501 #if defined(__sparc)
1502 #if defined(__sparcv9)
1503 	kt_sparcv9_init(t);
1504 #else
1505 	kt_sparcv7_init(t);
1506 #endif
1507 #elif defined(__amd64)
1508 	kt_amd64_init(t);
1509 #elif defined(__i386)
1510 	kt_ia32_init(t);
1511 #else
1512 #error	"unknown ISA"
1513 #endif
1514 
1515 	/*
1516 	 * We read our representative thread ID (address) from the kernel's
1517 	 * global panic_thread.  It will remain 0 if this is a live kernel.
1518 	 */
1519 	(void) mdb_tgt_readsym(t, MDB_TGT_AS_VIRT, &kt->k_tid, sizeof (void *),
1520 	    MDB_TGT_OBJ_EXEC, "panic_thread");
1521 
1522 	if ((mdb.m_flags & MDB_FL_ADB) && mdb_tgt_readsym(t, MDB_TGT_AS_VIRT,
1523 	    &pmem, sizeof (pmem), MDB_TGT_OBJ_EXEC, "physmem") == sizeof (pmem))
1524 		mdb_printf("physmem %lx\n", (ulong_t)pmem);
1525 
1526 	/*
1527 	 * If this is not a live kernel or a hypervisor dump, read the dump
1528 	 * header.  We don't have to sanity-check the header, as the open would
1529 	 * not have succeeded otherwise.
1530 	 */
1531 	if (!kt->k_xpv_domu && strcmp(kt->k_symfile, "/dev/ksyms") != 0) {
1532 		mdb_io_t *vmcore;
1533 
1534 		kt->k_dumphdr = mdb_alloc(sizeof (dumphdr_t), UM_SLEEP);
1535 
1536 		if ((vmcore = mdb_fdio_create_path(NULL, kt->k_kvmfile,
1537 		    O_RDONLY, 0)) == NULL) {
1538 			mdb_warn("failed to open %s", kt->k_kvmfile);
1539 			goto err;
1540 		}
1541 
1542 		if (IOP_READ(vmcore, kt->k_dumphdr, sizeof (dumphdr_t)) !=
1543 		    sizeof (dumphdr_t)) {
1544 			mdb_warn("failed to read dump header");
1545 			mdb_io_destroy(vmcore);
1546 			goto err;
1547 		}
1548 
1549 		mdb_io_destroy(vmcore);
1550 
1551 		(void) mdb_tgt_xdata_insert(t, "dumphdr",
1552 		    "dump header structure", kt_xd_dumphdr);
1553 	}
1554 
1555 	return (0);
1556 
1557 err:
1558 	if (kt->k_dumphdr != NULL)
1559 		mdb_free(kt->k_dumphdr, sizeof (dumphdr_t));
1560 
1561 	if (kt->k_symtab != NULL)
1562 		mdb_gelf_symtab_destroy(kt->k_symtab);
1563 
1564 	if (kt->k_dynsym != NULL)
1565 		mdb_gelf_symtab_destroy(kt->k_dynsym);
1566 
1567 	if (kt->k_file != NULL)
1568 		mdb_gelf_destroy(kt->k_file);
1569 
1570 	if (kt->k_cookie != NULL)
1571 		(void) kt->k_kb_ops->kb_close(kt->k_cookie);
1572 
1573 	mdb_free(kt, sizeof (kt_data_t));
1574 	return (-1);
1575 }
1576 
1577 int
1578 mdb_kvm_is_dump(mdb_io_t *io)
1579 {
1580 	dumphdr_t h;
1581 
1582 	(void) IOP_SEEK(io, (off64_t)0L, SEEK_SET);
1583 
1584 	return (IOP_READ(io, &h, sizeof (dumphdr_t)) == sizeof (dumphdr_t) &&
1585 	    h.dump_magic == DUMP_MAGIC);
1586 }
1587 
1588 int
1589 mdb_kvm_is_compressed_dump(mdb_io_t *io)
1590 {
1591 	dumphdr_t h;
1592 
1593 	(void) IOP_SEEK(io, (off64_t)0L, SEEK_SET);
1594 
1595 	return (IOP_READ(io, &h, sizeof (dumphdr_t)) == sizeof (dumphdr_t) &&
1596 	    h.dump_magic == DUMP_MAGIC &&
1597 	    (h.dump_flags & DF_COMPRESSED) != 0);
1598 }
1599