xref: /illumos-gate/usr/src/cmd/mdb/common/kmdb/kctl/kctl_main.c (revision b0fe7b8fa79924061f3bdf7f240ea116c2c0b704)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <kmdb/kctl/kctl.h>
29 #include <kmdb/kctl/kctl_wr.h>
30 #include <kmdb/kmdb_kctl.h>
31 #include <kmdb/kmdb_kdi.h>
32 #include <kmdb/kmdb_auxv.h>
33 #include <mdb/mdb_errno.h>
34 
35 #include <sys/sysmacros.h>
36 #include <sys/reboot.h>
37 #include <sys/atomic.h>
38 #include <sys/bootconf.h>
39 #include <sys/kmdb.h>
40 #include <sys/kobj.h>
41 #include <sys/kobj_impl.h>
42 #include <sys/promimpl.h>
43 #include <sys/kdi_impl.h>
44 #include <sys/ctf_api.h>
45 #include <vm/seg_kmem.h>
46 
47 kctl_t kctl;
48 
49 #define	KCTL_EXECNAME		"/kernel/drv/kmdb"
50 
51 #if defined(_LP64)
52 #define	KCTL_MEM_GOALSZ		(20 * 1024 * 1024)
53 #else
54 #define	KCTL_MEM_GOALSZ		(10 * 1024 * 1024)
55 #endif
56 
57 /*
58  * kmdb will call its own copies of the promif routines during
59  * initialization.  As these routines are intended to be used when the
60  * world is stopped, they don't attempt to grab the PROM lock.  Very
61  * Bad Things could happen if kmdb called a prom routine while someone
62  * else was calling the kernel's copy of another prom routine, so we
63  * grab the PROM lock ourselves before we start initialization.
64  */
65 #ifdef __sparc
66 #define	KCTL_PROM_LOCK		promif_preprom()
67 #define	KCTL_PROM_UNLOCK	promif_postprom()
68 #else
69 #define	KCTL_PROM_LOCK
70 #define	KCTL_PROM_UNLOCK
71 #endif
72 
73 static int
74 kctl_init(void)
75 {
76 	if (kobj_kdi.kdi_version != KDI_VERSION) {
77 		kctl_warn("kmdb/kernel version mismatch (expected %d, "
78 		    "found %d)", KDI_VERSION, kobj_kdi.kdi_version);
79 		return (-1);
80 	}
81 
82 	sema_init(&kctl.kctl_wr_avail_sem, 0, NULL, SEMA_DRIVER, NULL);
83 	mutex_init(&kctl.kctl_wr_lock, NULL, MUTEX_DRIVER, NULL);
84 	cv_init(&kctl.kctl_wr_cv, NULL, CV_DRIVER, NULL);
85 	mutex_init(&kctl.kctl_lock, NULL, MUTEX_DRIVER, NULL);
86 
87 	kctl.kctl_execname = KCTL_EXECNAME; /* XXX get from modctl? */
88 
89 	kctl.kctl_state = KCTL_ST_INACTIVE;
90 
91 	kctl.kctl_dseg = kctl.kctl_mrbase = NULL;
92 	kctl.kctl_dseg_size = kctl.kctl_mrsize = 0;
93 
94 	kctl_dmod_init();
95 
96 	return (0);
97 }
98 
99 static void
100 kctl_fini(void)
101 {
102 	kctl_dmod_fini();
103 
104 	mutex_destroy(&kctl.kctl_lock);
105 	cv_destroy(&kctl.kctl_wr_cv);
106 	mutex_destroy(&kctl.kctl_wr_lock);
107 	sema_destroy(&kctl.kctl_wr_avail_sem);
108 }
109 
110 static uint_t
111 kctl_set_state(uint_t state)
112 {
113 	uint_t ostate = kctl.kctl_state;
114 
115 	/* forward progess only, please */
116 	if (state > ostate) {
117 		kctl_dprintf("new kctl state: %d", state);
118 		kctl.kctl_state = state;
119 	}
120 
121 	return (ostate);
122 }
123 
124 static int
125 kctl_boot_dseg_alloc(caddr_t dsegaddr, size_t dsegsz)
126 {
127 	/*
128 	 * The Intel boot memory allocator will cleverly map us onto a 4M
129 	 * page if we request the whole 4M Intel segment at once.  This
130 	 * will break physical memory r/w, so we break the request into
131 	 * chunks.  The allocator isn't smart enough to combine requests,
132 	 * so it'll give us a bunch of 4k pages.
133 	 */
134 	while (dsegsz >= 1024*1024) {
135 		size_t sz = MIN(dsegsz, 1024*1024);
136 
137 		if (BOP_ALLOC(kctl.kctl_boot_ops, dsegaddr, sz, BO_NO_ALIGN) !=
138 		    dsegaddr)
139 			return (-1);
140 
141 		dsegaddr += sz;
142 		dsegsz -= sz;
143 	}
144 
145 	return (0);
146 }
147 
148 static int
149 kctl_dseg_alloc(caddr_t addr, size_t sz)
150 {
151 	ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
152 
153 	/* make sure there isn't something there already (like kadb) */
154 	if (hat_getpfnum(kas.a_hat, addr) != PFN_INVALID)
155 		return (EAGAIN);
156 
157 	if (segkmem_xalloc(NULL, addr, sz, VM_NOSLEEP, 0, segkmem_page_create,
158 	    NULL) == NULL)
159 		return (ENOMEM);
160 
161 	return (0);
162 }
163 
164 static void
165 kctl_dseg_free(caddr_t addr, size_t sz)
166 {
167 	ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
168 
169 	segkmem_free(NULL, addr, sz);
170 }
171 
172 static void
173 kctl_memavail(void)
174 {
175 	size_t needed;
176 	caddr_t base;
177 
178 	/*
179 	 * We're now free to allocate the non-fixed portion of the debugger's
180 	 * memory region.
181 	 */
182 
183 	needed = P2ROUNDUP(kctl.kctl_memgoalsz <= kctl.kctl_dseg_size ? 0 :
184 	    kctl.kctl_memgoalsz - kctl.kctl_dseg_size, PAGESIZE);
185 
186 	if (needed == 0)
187 		return;
188 
189 	if ((base = kmem_zalloc(needed, KM_NOSLEEP)) == NULL) {
190 		/*
191 		 * If we're going to wedge the machine during debugger startup,
192 		 * at least let them know why it's going to wedge.
193 		 */
194 		cmn_err(CE_WARN, "retrying of kmdb allocation of 0x%lx bytes",
195 		    (ulong_t)needed);
196 
197 		base = kmem_zalloc(needed, KM_SLEEP);
198 	}
199 
200 	kdi_dvec->dv_memavail(base, needed);
201 	kctl.kctl_mrbase = base;
202 	kctl.kctl_mrsize = needed;
203 }
204 
205 void
206 kctl_cleanup(void)
207 {
208 	uint_t state = kctl_set_state(KCTL_ST_DEACTIVATING);
209 
210 	kctl_dprintf("cleaning up from state %d", state);
211 
212 	ASSERT(kctl.kctl_boot_loaded == 0);
213 
214 	switch (state) {
215 	case KCTL_ST_ACTIVE:
216 		boothowto &= ~RB_DEBUG;
217 		/* XXX there's a race here */
218 		kdi_dvec = NULL;
219 		/*FALLTHROUGH*/
220 
221 	case KCTL_ST_DBG_ACTIVATED:
222 		KCTL_PROM_LOCK;
223 		kmdb_deactivate();
224 		KCTL_PROM_UNLOCK;
225 		/*FALLTHROUGH*/
226 
227 	case KCTL_ST_THREAD_STARTED:
228 		if (curthread != kctl.kctl_wr_thr) {
229 			kctl_wr_thr_stop();
230 			kctl_wr_thr_join();
231 		}
232 		/*FALLTHROUGH*/
233 
234 	case KCTL_ST_MOD_NOTIFIERS:
235 		kctl_mod_notify_unreg();
236 		/*FALLTHROUGH*/
237 
238 	case KCTL_ST_KCTL_PREACTIVATED:
239 		kctl_depreactivate_isadep();
240 		/*FALLTHROUGH*/
241 
242 	case KCTL_ST_INITIALIZED:
243 		/* There's no kmdb_fini */
244 	case KCTL_ST_DSEG_ALLOCED:
245 		kctl_dseg_free(kctl.kctl_dseg, kctl.kctl_dseg_size);
246 
247 		if (kctl.kctl_mrbase != NULL)
248 			kmem_free(kctl.kctl_mrbase, kctl.kctl_mrsize);
249 		/*FALLTHROUGH*/
250 	}
251 
252 	kctl.kctl_state = KCTL_ST_INACTIVE;
253 }
254 
255 static void
256 kctl_startup_modules(void)
257 {
258 	struct modctl *modp;
259 
260 	/*
261 	 * Normal module load and unload is now available.  Prior to this point,
262 	 * we could only load modules, and that only when the debugger was being
263 	 * initialized.
264 	 *
265 	 * We'll need to prepare the modules we've already loaded (if any) for
266 	 * the brave new world in which boot is unmapped.
267 	 */
268 	kctl_dmod_sync();
269 
270 	/*
271 	 * Process any outstanding loads or unloads and prepare for automatic
272 	 * module loading and unloading.
273 	 */
274 	(void) kctl_wr_process();
275 
276 	kctl_mod_notify_reg();
277 
278 	(void) kctl_set_state(KCTL_ST_MOD_NOTIFIERS);
279 
280 	modp = &modules;
281 	do {
282 		kctl_mod_loaded(modp);
283 	} while ((modp = modp->mod_next) != &modules);
284 }
285 
286 static void
287 kctl_startup_thread(void)
288 {
289 	/*
290 	 * Create the worker thread, which will handle future requests from the
291 	 * debugger.
292 	 */
293 	kctl_wr_thr_start();
294 
295 	(void) kctl_set_state(KCTL_ST_THREAD_STARTED);
296 }
297 
298 static int
299 kctl_startup_boot(void)
300 {
301 	struct modctl_list *lp, **lpp;
302 	int rc;
303 
304 	if (kctl_wr_process() < 0) {
305 		kctl_warn("kmdb: failed to load modules");
306 		return (-1);
307 	}
308 
309 	mutex_enter(&mod_lock);
310 
311 	for (lpp = kobj_linkmaps; *lpp != NULL; lpp++) {
312 		for (lp = *lpp; lp != NULL; lp = lp->modl_next) {
313 			if ((rc = kctl_mod_decompress(lp->modl_modp)) != 0) {
314 				kctl_warn("kmdb: failed to decompress CTF data "
315 				    "for %s: %s", lp->modl_modp->mod_modname,
316 				    ctf_errmsg(rc));
317 			}
318 		}
319 	}
320 
321 	mutex_exit(&mod_lock);
322 
323 	return (0);
324 }
325 
326 static int
327 kctl_startup_preactivate(void *romp, const char *cfg, const char **argv)
328 {
329 	kmdb_auxv_t kav;
330 	int rc;
331 
332 	kctl_auxv_init(&kav, cfg, argv, romp);
333 	KCTL_PROM_LOCK;
334 	rc = kmdb_init(kctl.kctl_execname, &kav);
335 	KCTL_PROM_UNLOCK;
336 	kctl_auxv_fini(&kav);
337 
338 	if (rc < 0)
339 		return (EMDB_KNOLOAD);
340 
341 	(void) kctl_set_state(KCTL_ST_INITIALIZED);
342 
343 	if (kctl_preactivate_isadep() != 0)
344 		return (EIO);
345 
346 	(void) kctl_set_state(KCTL_ST_KCTL_PREACTIVATED);
347 
348 	return (0);
349 }
350 
351 static int
352 kctl_startup_activate(uint_t flags)
353 {
354 	kdi_debugvec_t *dvec;
355 
356 	KCTL_PROM_LOCK;
357 	kmdb_activate(&dvec, flags);
358 	KCTL_PROM_UNLOCK;
359 
360 	(void) kctl_set_state(KCTL_ST_DBG_ACTIVATED);
361 
362 	/*
363 	 * fill in a few remaining debugvec entries.
364 	 */
365 	dvec->dv_kctl_modavail = kctl_startup_modules;
366 	dvec->dv_kctl_thravail = kctl_startup_thread;
367 	dvec->dv_kctl_memavail = kctl_memavail;
368 
369 	kctl_activate_isadep(dvec);
370 
371 	kdi_dvec = dvec;
372 	membar_producer();
373 
374 	boothowto |= RB_DEBUG;
375 
376 	(void) kctl_set_state(KCTL_ST_ACTIVE);
377 
378 	return (0);
379 }
380 
381 static int
382 kctl_state_check(uint_t state, uint_t ok_state)
383 {
384 	if (state == ok_state)
385 		return (0);
386 
387 	if (state == KCTL_ST_INACTIVE)
388 		return (EMDB_KINACTIVE);
389 	else if (kctl.kctl_state > KCTL_ST_INACTIVE &&
390 	    kctl.kctl_state < KCTL_ST_ACTIVE)
391 		return (EMDB_KACTIVATING);
392 	else if (kctl.kctl_state == KCTL_ST_ACTIVE)
393 		return (EMDB_KACTIVE);
394 	else if (kctl.kctl_state == KCTL_ST_DEACTIVATING)
395 		return (EMDB_KDEACTIVATING);
396 	else
397 		return (EINVAL);
398 }
399 
400 int
401 kctl_deactivate(void)
402 {
403 	int rc;
404 
405 	mutex_enter(&kctl.kctl_lock);
406 
407 	if (kctl.kctl_boot_loaded) {
408 		rc = EMDB_KNOUNLOAD;
409 		goto deactivate_done;
410 	}
411 
412 	if ((rc = kctl_state_check(kctl.kctl_state, KCTL_ST_ACTIVE)) != 0)
413 		goto deactivate_done;
414 
415 	kmdb_kdi_set_unload_request();
416 	kmdb_kdi_kmdb_enter();
417 
418 	/*
419 	 * The debugger will pass the request to the work thread, which will
420 	 * stop itself.
421 	 */
422 	kctl_wr_thr_join();
423 
424 deactivate_done:
425 	mutex_exit(&kctl.kctl_lock);
426 
427 	return (rc);
428 }
429 
430 /*
431  * Called from krtld, this indicates that the user loaded kmdb at boot.  We
432  * track activation states, but we don't attempt to clean up if activation
433  * fails, because boot debugger load failures are fatal.
434  *
435  * Further complicating matters, various kernel routines, such as bcopy and
436  * mutex_enter, assume the presence of some basic state.  On SPARC, it's the
437  * presence of a valid curthread pointer.  On AMD64, it's a valid curcpu
438  * pointer in GSBASE.  We set up temporary versions of these before beginning
439  * activation, and tear them down when we're done.
440  */
441 int
442 kctl_boot_activate(struct bootops *ops, void *romp, size_t memsz,
443     const char **argv)
444 {
445 	void *old;
446 
447 #ifdef __lint
448 	{
449 	/*
450 	 * krtld does a name-based symbol lookup to find this routine.  It then
451 	 * casts the address it gets, calling the result.  We want to make sure
452 	 * that the call in krtld stays in sync with the prototype for this
453 	 * function, so we define a type (kctl_boot_activate_f) that matches the
454 	 * current prototype.  The following assignment ensures that the type
455 	 * still matches the declaration, with lint as the enforcer.
456 	 */
457 	kctl_boot_activate_f *kba = kctl_boot_activate;
458 	if (kba == NULL)	/* Make lint think kba is actually used */
459 		return (0);
460 	}
461 #endif
462 
463 	old = kctl_boot_tmpinit();	/* Set up temporary state */
464 
465 	ASSERT(ops != NULL);
466 	kctl.kctl_boot_ops = ops;	/* must be set before kctl_init */
467 
468 	if (kctl_init() < 0)
469 		return (-1);
470 
471 	kctl.kctl_boot_loaded = 1;
472 
473 	kctl_dprintf("beginning kmdb initialization");
474 
475 	if (memsz == 0)
476 		memsz = KCTL_MEM_GOALSZ;
477 
478 	kctl.kctl_dseg = kdi_segdebugbase;
479 	kctl.kctl_dseg_size =
480 	    memsz > kdi_segdebugsize ? kdi_segdebugsize : memsz;
481 	kctl.kctl_memgoalsz = memsz;
482 
483 	if (kctl_boot_dseg_alloc(kctl.kctl_dseg, kctl.kctl_dseg_size) < 0) {
484 		kctl_warn("kmdb: failed to allocate %lu-byte debugger area at "
485 		    "%p", kctl.kctl_dseg_size, (void *)kctl.kctl_dseg);
486 		return (-1);
487 	}
488 
489 	(void) kctl_set_state(KCTL_ST_DSEG_ALLOCED);
490 
491 	if (kctl_startup_preactivate(romp, NULL, argv) != 0 ||
492 	    kctl_startup_activate(KMDB_ACT_F_BOOT)) {
493 		kctl_warn("kmdb: failed to activate");
494 		return (-1);
495 	}
496 
497 	if (kctl_startup_boot() < 0)
498 		return (-1);
499 
500 	kctl_dprintf("finished with kmdb initialization");
501 
502 	kctl_boot_tmpfini(old);
503 
504 	kctl.kctl_boot_ops = NULL;
505 
506 	return (0);
507 }
508 
509 int
510 kctl_modload_activate(size_t memsz, const char *cfg, uint_t flags)
511 {
512 	int rc;
513 
514 	mutex_enter(&kctl.kctl_lock);
515 
516 	if ((rc = kctl_state_check(kctl.kctl_state, KCTL_ST_INACTIVE)) != 0) {
517 		if ((flags & KMDB_F_AUTO_ENTRY) && rc == EMDB_KACTIVE) {
518 			kmdb_kdi_kmdb_enter();
519 			rc = 0;
520 		}
521 
522 		mutex_exit(&kctl.kctl_lock);
523 		return (rc);
524 	}
525 
526 	kctl.kctl_flags = flags;
527 
528 	if (memsz == 0)
529 		memsz = KCTL_MEM_GOALSZ;
530 
531 	kctl.kctl_dseg = kdi_segdebugbase;
532 	kctl.kctl_dseg_size =
533 	    memsz > kdi_segdebugsize ? kdi_segdebugsize : memsz;
534 	kctl.kctl_memgoalsz = memsz;
535 
536 	if ((rc = kctl_dseg_alloc(kctl.kctl_dseg, kctl.kctl_dseg_size)) != 0)
537 		goto activate_fail;
538 
539 	(void) kctl_set_state(KCTL_ST_DSEG_ALLOCED);
540 
541 	if ((rc = kctl_startup_preactivate(NULL, cfg, NULL)) != 0)
542 		goto activate_fail;
543 
544 	kctl_startup_modules();
545 	kctl_startup_thread();
546 
547 	if ((rc = kctl_startup_activate(0)) != 0)
548 		goto activate_fail;
549 
550 	kctl_memavail();	/* Must be after kdi_dvec is set */
551 
552 	if (kctl.kctl_flags & KMDB_F_AUTO_ENTRY)
553 		kmdb_kdi_kmdb_enter();
554 
555 	mutex_exit(&kctl.kctl_lock);
556 	return (0);
557 
558 activate_fail:
559 	kctl_cleanup();
560 	mutex_exit(&kctl.kctl_lock);
561 	return (rc);
562 }
563 
564 /*
565  * This interface will be called when drv/kmdb loads.  When we get the call, one
566  * of two things will have happened:
567  *
568  *  1. The debugger was loaded at boot.  We've progressed far enough into boot
569  *     as to allow drv/kmdb to be loaded as a non-primary.  Invocation of this
570  *     interface is the signal to the debugger that it can start allowing things
571  *     like dmod loading and automatic CTF decompression - things which require
572  *     the system services that have now been started.
573  *
574  *  2. The debugger was loaded after boot.  mdb opened /dev/kmdb, causing
575  *     drv/kmdb to load, followed by misc/kmdb.  Nothing has been set up yet,
576  *     so we need to initialize.  Activation will occur separately, so we don't
577  *     have to worry about that.
578  */
579 int
580 kctl_attach(dev_info_t *dip)
581 {
582 	kctl.kctl_drv_dip = dip;
583 
584 	return (0);
585 }
586 
587 int
588 kctl_detach(void)
589 {
590 	return (kctl.kctl_state == KCTL_ST_INACTIVE ? 0 : EBUSY);
591 }
592 
593 static struct modlmisc modlmisc = {
594 	&mod_miscops,
595 	KMDB_VERSION
596 };
597 
598 static struct modlinkage modlinkage = {
599 	MODREV_1,
600 	(void *)&modlmisc,
601 	NULL
602 };
603 
604 /*
605  * Invoked only when debugger is loaded via modload - not invoked when debugger
606  * is loaded at boot.  kctl_boot_activate needs to call anything (aside from
607  * mod_install) this function does.
608  */
609 int
610 _init(void)
611 {
612 	if (kctl_init() < 0)
613 		return (EINVAL);
614 
615 	return (mod_install(&modlinkage));
616 }
617 
618 int
619 _info(struct modinfo *modinfop)
620 {
621 	return (mod_info(&modlinkage, modinfop));
622 }
623 
624 int
625 _fini(void)
626 {
627 	kctl_fini();
628 
629 	return (mod_remove(&modlinkage));
630 }
631