xref: /illumos-gate/usr/src/cmd/mdb/common/kmdb/kctl/kctl_main.c (revision 4de2612967d06c4fdbf524a62556a1e8118a006f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <kmdb/kctl/kctl.h>
30 #include <kmdb/kctl/kctl_wr.h>
31 #include <kmdb/kmdb_kctl.h>
32 #include <kmdb/kmdb_kdi.h>
33 #include <kmdb/kmdb_auxv.h>
34 #include <mdb/mdb_errno.h>
35 
36 #include <sys/sysmacros.h>
37 #include <sys/reboot.h>
38 #include <sys/atomic.h>
39 #include <sys/bootconf.h>
40 #include <sys/kmdb.h>
41 #include <sys/kobj.h>
42 #include <sys/kobj_impl.h>
43 #include <sys/promimpl.h>
44 #include <sys/kdi_impl.h>
45 #include <sys/ctf_api.h>
46 #include <vm/seg_kmem.h>
47 
48 kctl_t kctl;
49 
50 #define	KCTL_EXECNAME		"/kernel/drv/kmdb"
51 
52 #if defined(_LP64)
53 #define	KCTL_MEM_GOALSZ		(20 * 1024 * 1024)
54 #else
55 #define	KCTL_MEM_GOALSZ		(10 * 1024 * 1024)
56 #endif
57 
58 /*
59  * kmdb will call its own copies of the promif routines during
60  * initialization.  As these routines are intended to be used when the
61  * world is stopped, they don't attempt to grab the PROM lock.  Very
62  * Bad Things could happen if kmdb called a prom routine while someone
63  * else was calling the kernel's copy of another prom routine, so we
64  * grab the PROM lock ourselves before we start initialization.
65  */
66 #ifdef __sparc
67 #define	KCTL_PROM_LOCK		promif_preprom()
68 #define	KCTL_PROM_UNLOCK	promif_postprom()
69 #else
70 #define	KCTL_PROM_LOCK
71 #define	KCTL_PROM_UNLOCK
72 #endif
73 
74 static int
75 kctl_init(void)
76 {
77 	if (kobj_kdi.kdi_version != KDI_VERSION) {
78 		kctl_warn("kmdb/kernel version mismatch (expected %d, "
79 		    "found %d)", KDI_VERSION, kobj_kdi.kdi_version);
80 		return (-1);
81 	}
82 
83 	sema_init(&kctl.kctl_wr_avail_sem, 0, NULL, SEMA_DRIVER, NULL);
84 	mutex_init(&kctl.kctl_wr_lock, NULL, MUTEX_DRIVER, NULL);
85 	cv_init(&kctl.kctl_wr_cv, NULL, CV_DRIVER, NULL);
86 	mutex_init(&kctl.kctl_lock, NULL, MUTEX_DRIVER, NULL);
87 
88 	kctl.kctl_execname = KCTL_EXECNAME; /* XXX get from modctl? */
89 
90 	kctl.kctl_state = KCTL_ST_INACTIVE;
91 
92 	kctl.kctl_dseg = kctl.kctl_mrbase = NULL;
93 	kctl.kctl_dseg_size = kctl.kctl_mrsize = 0;
94 
95 	kctl_dmod_init();
96 
97 	return (0);
98 }
99 
100 static void
101 kctl_fini(void)
102 {
103 	kctl_dmod_fini();
104 
105 	mutex_destroy(&kctl.kctl_lock);
106 	cv_destroy(&kctl.kctl_wr_cv);
107 	mutex_destroy(&kctl.kctl_wr_lock);
108 	sema_destroy(&kctl.kctl_wr_avail_sem);
109 }
110 
111 static uint_t
112 kctl_set_state(uint_t state)
113 {
114 	uint_t ostate = kctl.kctl_state;
115 
116 	/* forward progess only, please */
117 	if (state > ostate) {
118 		kctl_dprintf("new kctl state: %d", state);
119 		kctl.kctl_state = state;
120 	}
121 
122 	return (ostate);
123 }
124 
125 static int
126 kctl_boot_dseg_alloc(caddr_t dsegaddr, size_t dsegsz)
127 {
128 	/*
129 	 * The Intel boot memory allocator will cleverly map us onto a 4M
130 	 * page if we request the whole 4M Intel segment at once.  This
131 	 * will break physical memory r/w, so we break the request into
132 	 * chunks.  The allocator isn't smart enough to combine requests,
133 	 * so it'll give us a bunch of 4k pages.
134 	 */
135 	while (dsegsz >= 1024*1024) {
136 		size_t sz = MIN(dsegsz, 1024*1024);
137 
138 		if (BOP_ALLOC(kctl.kctl_boot_ops, dsegaddr, sz, BO_NO_ALIGN) !=
139 		    dsegaddr)
140 			return (-1);
141 
142 		dsegaddr += sz;
143 		dsegsz -= sz;
144 	}
145 
146 	return (0);
147 }
148 
149 static int
150 kctl_dseg_alloc(caddr_t addr, size_t sz)
151 {
152 	ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
153 
154 	/* make sure there isn't something there already (like kadb) */
155 	if (hat_getpfnum(kas.a_hat, addr) != PFN_INVALID)
156 		return (EAGAIN);
157 
158 	if (segkmem_xalloc(NULL, addr, sz, VM_NOSLEEP, 0, segkmem_page_create,
159 	    NULL) == NULL)
160 		return (ENOMEM);
161 
162 	return (0);
163 }
164 
165 static void
166 kctl_dseg_free(caddr_t addr, size_t sz)
167 {
168 	ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
169 
170 	segkmem_free(NULL, addr, sz);
171 }
172 
173 static void
174 kctl_memavail(void)
175 {
176 	size_t needed;
177 	caddr_t base;
178 
179 	/*
180 	 * We're now free to allocate the non-fixed portion of the debugger's
181 	 * memory region.
182 	 */
183 
184 	needed = P2ROUNDUP(kctl.kctl_memgoalsz <= kctl.kctl_dseg_size ? 0 :
185 	    kctl.kctl_memgoalsz - kctl.kctl_dseg_size, PAGESIZE);
186 
187 	if (needed == 0)
188 		return;
189 
190 	if ((base = kmem_zalloc(needed, KM_NOSLEEP)) == NULL) {
191 		/*
192 		 * If we're going to wedge the machine during debugger startup,
193 		 * at least let them know why it's going to wedge.
194 		 */
195 		cmn_err(CE_WARN, "retrying of kmdb allocation of 0x%lx bytes\n",
196 		    (ulong_t)needed);
197 
198 		base = kmem_zalloc(needed, KM_SLEEP);
199 	}
200 
201 	if (kdi_dvec->dv_memavail(base, needed) < 0) {
202 		cmn_err(CE_WARN, "failed to add memory to debugger\n");
203 		kmem_free(base, needed);
204 	}
205 
206 	kctl.kctl_mrbase = base;
207 	kctl.kctl_mrsize = needed;
208 }
209 
210 void
211 kctl_cleanup(void)
212 {
213 	uint_t state = kctl_set_state(KCTL_ST_DEACTIVATING);
214 
215 	kctl_dprintf("cleaning up from state %d", state);
216 
217 	ASSERT(kctl.kctl_boot_loaded == 0);
218 
219 	switch (state) {
220 	case KCTL_ST_ACTIVE:
221 		boothowto &= ~RB_DEBUG;
222 		/* XXX there's a race here */
223 		kdi_dvec = NULL;
224 		/*FALLTHROUGH*/
225 
226 	case KCTL_ST_KCTL_ACTIVATED:
227 		kctl_deactivate_isadep();
228 		/*FALLTHROUGH*/
229 
230 	case KCTL_ST_DBG_ACTIVATED:
231 		KCTL_PROM_LOCK;
232 		kmdb_deactivate();
233 		KCTL_PROM_UNLOCK;
234 		/*FALLTHROUGH*/
235 
236 	case KCTL_ST_THREAD_STARTED:
237 		if (curthread != kctl.kctl_wr_thr) {
238 			kctl_wr_thr_stop();
239 			kctl_wr_thr_join();
240 		}
241 		/*FALLTHROUGH*/
242 
243 	case KCTL_ST_MOD_NOTIFIERS:
244 		kctl_mod_notify_unreg();
245 		/*FALLTHROUGH*/
246 
247 	case KCTL_ST_KCTL_PREACTIVATED:
248 		kctl_depreactivate_isadep();
249 		/*FALLTHROUGH*/
250 
251 	case KCTL_ST_INITIALIZED:
252 		/* There's no kmdb_fini */
253 	case KCTL_ST_DSEG_ALLOCED:
254 		kctl_dseg_free(kctl.kctl_dseg, kctl.kctl_dseg_size);
255 
256 		if (kctl.kctl_mrbase != NULL)
257 			kmem_free(kctl.kctl_mrbase, kctl.kctl_mrsize);
258 		/*FALLTHROUGH*/
259 	}
260 
261 	kctl.kctl_state = KCTL_ST_INACTIVE;
262 }
263 
264 static void
265 kctl_startup_modules(void)
266 {
267 	struct modctl *modp;
268 
269 	/*
270 	 * Normal module load and unload is now available.  Prior to this point,
271 	 * we could only load modules, and that only when the debugger was being
272 	 * initialized.
273 	 *
274 	 * We'll need to prepare the modules we've already loaded (if any) for
275 	 * the brave new world in which boot is unmapped.
276 	 */
277 	kctl_dmod_sync();
278 
279 	/*
280 	 * Process any outstanding loads or unloads and prepare for automatic
281 	 * module loading and unloading.
282 	 */
283 	(void) kctl_wr_process();
284 
285 	kctl_mod_notify_reg();
286 
287 	(void) kctl_set_state(KCTL_ST_MOD_NOTIFIERS);
288 
289 	modp = &modules;
290 	do {
291 		kctl_mod_loaded(modp);
292 	} while ((modp = modp->mod_next) != &modules);
293 }
294 
295 static void
296 kctl_startup_thread(void)
297 {
298 	/*
299 	 * Create the worker thread, which will handle future requests from the
300 	 * debugger.
301 	 */
302 	kctl_wr_thr_start();
303 
304 	(void) kctl_set_state(KCTL_ST_THREAD_STARTED);
305 }
306 
307 static int
308 kctl_startup_boot(void)
309 {
310 	struct modctl_list *lp, **lpp;
311 	int rc;
312 
313 	if (kctl_wr_process() < 0) {
314 		kctl_warn("kmdb: failed to load modules");
315 		return (-1);
316 	}
317 
318 	mutex_enter(&mod_lock);
319 
320 	for (lpp = kobj_linkmaps; *lpp != NULL; lpp++) {
321 		for (lp = *lpp; lp != NULL; lp = lp->modl_next) {
322 			if ((rc = kctl_mod_decompress(lp->modl_modp)) != 0) {
323 				kctl_warn("kmdb: failed to decompress CTF data "
324 				    "for %s: %s", lp->modl_modp->mod_modname,
325 				    ctf_errmsg(rc));
326 			}
327 		}
328 	}
329 
330 	mutex_exit(&mod_lock);
331 
332 	return (0);
333 }
334 
335 static int
336 kctl_startup_preactivate(void *romp, const char *cfg, const char **argv)
337 {
338 	kmdb_auxv_t kav;
339 	int rc;
340 
341 	kctl_auxv_init(&kav, cfg, argv, romp);
342 	KCTL_PROM_LOCK;
343 	rc = kmdb_init(kctl.kctl_execname, &kav);
344 	KCTL_PROM_UNLOCK;
345 	kctl_auxv_fini(&kav);
346 
347 	if (rc < 0)
348 		return (EMDB_KNOLOAD);
349 
350 	(void) kctl_set_state(KCTL_ST_INITIALIZED);
351 
352 	if (kctl_preactivate_isadep() != 0)
353 		return (EIO);
354 
355 	(void) kctl_set_state(KCTL_ST_KCTL_PREACTIVATED);
356 
357 	return (0);
358 }
359 
360 static int
361 kctl_startup_activate(uint_t flags)
362 {
363 	kdi_debugvec_t *dvec;
364 
365 	KCTL_PROM_LOCK;
366 	kmdb_activate(&dvec, flags);
367 	KCTL_PROM_UNLOCK;
368 
369 	(void) kctl_set_state(KCTL_ST_DBG_ACTIVATED);
370 
371 	/*
372 	 * fill in a few remaining debugvec entries.
373 	 */
374 	dvec->dv_kctl_modavail = kctl_startup_modules;
375 	dvec->dv_kctl_thravail = kctl_startup_thread;
376 	dvec->dv_kctl_memavail = kctl_memavail;
377 
378 	if (kctl_activate_isadep(dvec) != 0)
379 		return (EIO);
380 
381 	(void) kctl_set_state(KCTL_ST_KCTL_ACTIVATED);
382 
383 	kdi_dvec = dvec;
384 	membar_producer();
385 
386 	boothowto |= RB_DEBUG;
387 
388 	(void) kctl_set_state(KCTL_ST_ACTIVE);
389 
390 	return (0);
391 }
392 
393 static int
394 kctl_state_check(uint_t state, uint_t ok_state)
395 {
396 	if (state == ok_state)
397 		return (0);
398 
399 	if (state == KCTL_ST_INACTIVE)
400 		return (EMDB_KINACTIVE);
401 	else if (kctl.kctl_state > KCTL_ST_INACTIVE &&
402 	    kctl.kctl_state < KCTL_ST_ACTIVE)
403 		return (EMDB_KACTIVATING);
404 	else if (kctl.kctl_state == KCTL_ST_ACTIVE)
405 		return (EMDB_KACTIVE);
406 	else if (kctl.kctl_state == KCTL_ST_DEACTIVATING)
407 		return (EMDB_KDEACTIVATING);
408 	else
409 		return (EINVAL);
410 }
411 
412 int
413 kctl_deactivate(void)
414 {
415 	int rc;
416 
417 	mutex_enter(&kctl.kctl_lock);
418 
419 	if (kctl.kctl_boot_loaded) {
420 		rc = EMDB_KNOUNLOAD;
421 		goto deactivate_done;
422 	}
423 
424 	if ((rc = kctl_state_check(kctl.kctl_state, KCTL_ST_ACTIVE)) != 0)
425 		goto deactivate_done;
426 
427 	kmdb_kdi_set_unload_request();
428 	kdi_dvec_enter();
429 
430 	/*
431 	 * The debugger will pass the request to the work thread, which will
432 	 * stop itself.
433 	 */
434 	kctl_wr_thr_join();
435 
436 deactivate_done:
437 	mutex_exit(&kctl.kctl_lock);
438 
439 	return (rc);
440 }
441 
442 /*
443  * Called from krtld, this indicates that the user loaded kmdb at boot.  We
444  * track activation states, but we don't attempt to clean up if activation
445  * fails, because boot debugger load failures are fatal.
446  *
447  * Further complicating matters, various kernel routines, such as bcopy and
448  * mutex_enter, assume the presence of some basic state.  On SPARC, it's the
449  * presence of a valid curthread pointer.  On AMD64, it's a valid curcpu
450  * pointer in GSBASE.  We set up temporary versions of these before beginning
451  * activation, and tear them down when we're done.
452  */
453 int
454 kctl_boot_activate(struct bootops *ops, void *romp, size_t memsz,
455     const char **argv)
456 {
457 	void *old;
458 
459 #ifdef __lint
460 	{
461 	/*
462 	 * krtld does a name-based symbol lookup to find this routine.  It then
463 	 * casts the address it gets, calling the result.  We want to make sure
464 	 * that the call in krtld stays in sync with the prototype for this
465 	 * function, so we define a type (kctl_boot_activate_f) that matches the
466 	 * current prototype.  The following assignment ensures that the type
467 	 * still matches the declaration, with lint as the enforcer.
468 	 */
469 	kctl_boot_activate_f *kba = kctl_boot_activate;
470 	if (kba == NULL)	/* Make lint think kba is actually used */
471 		return (0);
472 	}
473 #endif
474 
475 	old = kctl_boot_tmpinit();	/* Set up temporary state */
476 
477 	ASSERT(ops != NULL);
478 	kctl.kctl_boot_ops = ops;	/* must be set before kctl_init */
479 
480 	if (kctl_init() < 0)
481 		return (-1);
482 
483 	kctl.kctl_boot_loaded = 1;
484 
485 	kctl_dprintf("beginning kmdb initialization");
486 
487 	if (memsz == 0)
488 		memsz = KCTL_MEM_GOALSZ;
489 
490 	kctl.kctl_dseg = (caddr_t)SEGDEBUGBASE;
491 	kctl.kctl_dseg_size = (memsz > SEGDEBUGSIZE ? SEGDEBUGSIZE : memsz);
492 	kctl.kctl_memgoalsz = memsz;
493 
494 	if (kctl_boot_dseg_alloc(kctl.kctl_dseg, kctl.kctl_dseg_size) < 0) {
495 		kctl_warn("kmdb: failed to allocate %d-byte debugger area at "
496 		    "%x", kctl.kctl_dseg_size, kctl.kctl_dseg);
497 		return (-1);
498 	}
499 
500 	(void) kctl_set_state(KCTL_ST_DSEG_ALLOCED);
501 
502 	if (kctl_startup_preactivate(romp, NULL, argv) != 0 ||
503 	    kctl_startup_activate(KMDB_ACT_F_BOOT)) {
504 		kctl_warn("kmdb: failed to activate");
505 		return (-1);
506 	}
507 
508 	if (kctl_startup_boot() < 0)
509 		return (-1);
510 
511 	kctl_dprintf("finished with kmdb initialization");
512 
513 	kctl.kctl_boot_ops = NULL;
514 	kctl_boot_tmpfini(old);
515 
516 	return (0);
517 }
518 
519 int
520 kctl_modload_activate(size_t memsz, const char *cfg, uint_t flags)
521 {
522 	int rc;
523 
524 	mutex_enter(&kctl.kctl_lock);
525 
526 	if ((rc = kctl_state_check(kctl.kctl_state, KCTL_ST_INACTIVE)) != 0) {
527 		if ((flags & KMDB_F_AUTO_ENTRY) && rc == EMDB_KACTIVE) {
528 			kdi_dvec_enter();
529 			rc = 0;
530 		}
531 
532 		mutex_exit(&kctl.kctl_lock);
533 		return (rc);
534 	}
535 
536 	kctl.kctl_flags = flags;
537 
538 	if (memsz == 0)
539 		memsz = KCTL_MEM_GOALSZ;
540 
541 	kctl.kctl_dseg = (caddr_t)SEGDEBUGBASE;
542 	kctl.kctl_dseg_size = (memsz > SEGDEBUGSIZE ? SEGDEBUGSIZE : memsz);
543 	kctl.kctl_memgoalsz = memsz;
544 
545 	if ((rc = kctl_dseg_alloc(kctl.kctl_dseg, kctl.kctl_dseg_size)) != 0)
546 		goto activate_fail;
547 
548 	(void) kctl_set_state(KCTL_ST_DSEG_ALLOCED);
549 
550 	if ((rc = kctl_startup_preactivate(NULL, cfg, NULL)) != 0)
551 		goto activate_fail;
552 
553 	kctl_startup_modules();
554 	kctl_startup_thread();
555 
556 	if ((rc = kctl_startup_activate(0)) != 0)
557 		goto activate_fail;
558 
559 	kctl_memavail();	/* Must be after kdi_dvec is set */
560 
561 	if (kctl.kctl_flags & KMDB_F_AUTO_ENTRY)
562 		kdi_dvec_enter();
563 
564 	mutex_exit(&kctl.kctl_lock);
565 	return (0);
566 
567 activate_fail:
568 	kctl_cleanup();
569 	mutex_exit(&kctl.kctl_lock);
570 	return (rc);
571 }
572 
573 /*
574  * This interface will be called when drv/kmdb loads.  When we get the call, one
575  * of two things will have happened:
576  *
577  *  1. The debugger was loaded at boot.  We've progressed far enough into boot
578  *     as to allow drv/kmdb to be loaded as a non-primary.  Invocation of this
579  *     interface is the signal to the debugger that it can start allowing things
580  *     like dmod loading and automatic CTF decompression - things which require
581  *     the system services that have now been started.
582  *
583  *  2. The debugger was loaded after boot.  mdb opened /dev/kmdb, causing
584  *     drv/kmdb to load, followed by misc/kmdb.  Nothing has been set up yet,
585  *     so we need to initialize.  Activation will occur separately, so we don't
586  *     have to worry about that.
587  */
588 int
589 kctl_attach(dev_info_t *dip)
590 {
591 	kctl.kctl_drv_dip = dip;
592 
593 	return (0);
594 }
595 
596 int
597 kctl_detach(void)
598 {
599 	return (kctl.kctl_state == KCTL_ST_INACTIVE ? 0 : EBUSY);
600 }
601 
602 static struct modlmisc modlmisc = {
603 	&mod_miscops,
604 	KMDB_VERSION
605 };
606 
607 static struct modlinkage modlinkage = {
608 	MODREV_1,
609 	(void *)&modlmisc,
610 	NULL
611 };
612 
613 /*
614  * Invoked only when debugger is loaded via modload - not invoked when debugger
615  * is loaded at boot.  kctl_boot_activate needs to call anything (aside from
616  * mod_install) this function does.
617  */
618 int
619 _init(void)
620 {
621 	if (kctl_init() < 0)
622 		return (EINVAL);
623 
624 	return (mod_install(&modlinkage));
625 }
626 
627 int
628 _info(struct modinfo *modinfop)
629 {
630 	return (mod_info(&modlinkage, modinfop));
631 }
632 
633 int
634 _fini(void)
635 {
636 	kctl_fini();
637 
638 	return (mod_remove(&modlinkage));
639 }
640