xref: /titanic_41/usr/src/cmd/mdb/common/kmdb/kctl/kctl_main.c (revision 9d0d62ad2e60e8f742a2e723d06e88352ee6a1f3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <kmdb/kctl/kctl.h>
27 #include <kmdb/kctl/kctl_wr.h>
28 #include <kmdb/kmdb_kctl.h>
29 #include <kmdb/kmdb_kdi.h>
30 #include <kmdb/kmdb_auxv.h>
31 #include <mdb/mdb_errno.h>
32 
33 #include <sys/sysmacros.h>
34 #include <sys/reboot.h>
35 #include <sys/atomic.h>
36 #include <sys/bootconf.h>
37 #include <sys/kmdb.h>
38 #include <sys/kobj.h>
39 #include <sys/kobj_impl.h>
40 #include <sys/promimpl.h>
41 #include <sys/kdi_impl.h>
42 #include <sys/ctf_api.h>
43 #include <vm/seg_kmem.h>
44 
45 kctl_t kctl;
46 
47 #define	KCTL_EXECNAME		"/kernel/drv/kmdb"
48 
49 #if defined(_LP64)
50 #define	KCTL_MEM_GOALSZ		(20 * 1024 * 1024)
51 #else
52 #define	KCTL_MEM_GOALSZ		(10 * 1024 * 1024)
53 #endif
54 
55 /*
56  * kmdb will call its own copies of the promif routines during
57  * initialization.  As these routines are intended to be used when the
58  * world is stopped, they don't attempt to grab the PROM lock.  Very
59  * Bad Things could happen if kmdb called a prom routine while someone
60  * else was calling the kernel's copy of another prom routine, so we
61  * grab the PROM lock ourselves before we start initialization.
62  */
63 #ifdef __sparc
64 #define	KCTL_PROM_LOCK		promif_preprom()
65 #define	KCTL_PROM_UNLOCK	promif_postprom()
66 #else
67 #define	KCTL_PROM_LOCK
68 #define	KCTL_PROM_UNLOCK
69 #endif
70 
71 static int
kctl_init(void)72 kctl_init(void)
73 {
74 	if (kobj_kdi.kdi_version != KDI_VERSION) {
75 		kctl_warn("kmdb/kernel version mismatch (expected %d, "
76 		    "found %d)", KDI_VERSION, kobj_kdi.kdi_version);
77 		return (-1);
78 	}
79 
80 	sema_init(&kctl.kctl_wr_avail_sem, 0, NULL, SEMA_DRIVER, NULL);
81 	mutex_init(&kctl.kctl_wr_lock, NULL, MUTEX_DRIVER, NULL);
82 	cv_init(&kctl.kctl_wr_cv, NULL, CV_DRIVER, NULL);
83 	mutex_init(&kctl.kctl_lock, NULL, MUTEX_DRIVER, NULL);
84 
85 	kctl.kctl_execname = KCTL_EXECNAME; /* XXX get from modctl? */
86 
87 	kctl.kctl_state = KCTL_ST_INACTIVE;
88 
89 	kctl.kctl_dseg = kctl.kctl_mrbase = NULL;
90 	kctl.kctl_dseg_size = kctl.kctl_mrsize = 0;
91 
92 	kctl_dmod_init();
93 
94 	return (0);
95 }
96 
97 static void
kctl_fini(void)98 kctl_fini(void)
99 {
100 	kctl_dmod_fini();
101 
102 	mutex_destroy(&kctl.kctl_lock);
103 	cv_destroy(&kctl.kctl_wr_cv);
104 	mutex_destroy(&kctl.kctl_wr_lock);
105 	sema_destroy(&kctl.kctl_wr_avail_sem);
106 }
107 
108 static uint_t
kctl_set_state(uint_t state)109 kctl_set_state(uint_t state)
110 {
111 	uint_t ostate = kctl.kctl_state;
112 
113 	/* forward progess only, please */
114 	if (state > ostate) {
115 		kctl_dprintf("new kctl state: %d", state);
116 		kctl.kctl_state = state;
117 	}
118 
119 	return (ostate);
120 }
121 
122 static int
kctl_boot_dseg_alloc(caddr_t dsegaddr,size_t dsegsz)123 kctl_boot_dseg_alloc(caddr_t dsegaddr, size_t dsegsz)
124 {
125 	/*
126 	 * The Intel boot memory allocator will cleverly map us onto a 4M
127 	 * page if we request the whole 4M Intel segment at once.  This
128 	 * will break physical memory r/w, so we break the request into
129 	 * chunks.  The allocator isn't smart enough to combine requests,
130 	 * so it'll give us a bunch of 4k pages.
131 	 */
132 	while (dsegsz >= 1024*1024) {
133 		size_t sz = MIN(dsegsz, 1024*1024);
134 
135 		if (BOP_ALLOC(kctl.kctl_boot_ops, dsegaddr, sz, BO_NO_ALIGN) !=
136 		    dsegaddr)
137 			return (-1);
138 
139 		dsegaddr += sz;
140 		dsegsz -= sz;
141 	}
142 
143 	return (0);
144 }
145 
146 static int
kctl_dseg_alloc(caddr_t addr,size_t sz)147 kctl_dseg_alloc(caddr_t addr, size_t sz)
148 {
149 	ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
150 
151 	/* make sure there isn't something there already (like kadb) */
152 	if (hat_getpfnum(kas.a_hat, addr) != PFN_INVALID)
153 		return (EAGAIN);
154 
155 	if (segkmem_xalloc(NULL, addr, sz, VM_NOSLEEP, 0, segkmem_page_create,
156 	    NULL) == NULL)
157 		return (ENOMEM);
158 
159 	return (0);
160 }
161 
162 static void
kctl_dseg_free(caddr_t addr,size_t sz)163 kctl_dseg_free(caddr_t addr, size_t sz)
164 {
165 	ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
166 
167 	segkmem_free(NULL, addr, sz);
168 }
169 
170 static void
kctl_memavail(void)171 kctl_memavail(void)
172 {
173 	size_t needed;
174 	caddr_t base;
175 
176 	/*
177 	 * We're now free to allocate the non-fixed portion of the debugger's
178 	 * memory region.
179 	 */
180 
181 	needed = P2ROUNDUP(kctl.kctl_memgoalsz <= kctl.kctl_dseg_size ? 0 :
182 	    kctl.kctl_memgoalsz - kctl.kctl_dseg_size, PAGESIZE);
183 
184 	if (needed == 0)
185 		return;
186 
187 	if ((base = kmem_zalloc(needed, KM_NOSLEEP)) == NULL) {
188 		/*
189 		 * If we're going to wedge the machine during debugger startup,
190 		 * at least let them know why it's going to wedge.
191 		 */
192 		cmn_err(CE_WARN, "retrying of kmdb allocation of 0x%lx bytes",
193 		    (ulong_t)needed);
194 
195 		base = kmem_zalloc(needed, KM_SLEEP);
196 	}
197 
198 	kdi_dvec->dv_memavail(base, needed);
199 	kctl.kctl_mrbase = base;
200 	kctl.kctl_mrsize = needed;
201 }
202 
203 void
kctl_cleanup(void)204 kctl_cleanup(void)
205 {
206 	uint_t state = kctl_set_state(KCTL_ST_DEACTIVATING);
207 
208 	kctl_dprintf("cleaning up from state %d", state);
209 
210 	ASSERT(kctl.kctl_boot_loaded == 0);
211 
212 	switch (state) {
213 	case KCTL_ST_ACTIVE:
214 		boothowto &= ~RB_DEBUG;
215 		/* XXX there's a race here */
216 		kdi_dvec = NULL;
217 		/*FALLTHROUGH*/
218 
219 	case KCTL_ST_DBG_ACTIVATED:
220 		KCTL_PROM_LOCK;
221 		kmdb_deactivate();
222 		KCTL_PROM_UNLOCK;
223 		/*FALLTHROUGH*/
224 
225 	case KCTL_ST_THREAD_STARTED:
226 		if (curthread != kctl.kctl_wr_thr) {
227 			kctl_wr_thr_stop();
228 			kctl_wr_thr_join();
229 		}
230 		/*FALLTHROUGH*/
231 
232 	case KCTL_ST_MOD_NOTIFIERS:
233 		kctl_mod_notify_unreg();
234 		/*FALLTHROUGH*/
235 
236 	case KCTL_ST_KCTL_PREACTIVATED:
237 		kctl_depreactivate_isadep();
238 		/*FALLTHROUGH*/
239 
240 	case KCTL_ST_INITIALIZED:
241 		/* There's no kmdb_fini */
242 	case KCTL_ST_DSEG_ALLOCED:
243 		kctl_dseg_free(kctl.kctl_dseg, kctl.kctl_dseg_size);
244 
245 		if (kctl.kctl_mrbase != NULL)
246 			kmem_free(kctl.kctl_mrbase, kctl.kctl_mrsize);
247 		/*FALLTHROUGH*/
248 	}
249 
250 	kctl.kctl_state = KCTL_ST_INACTIVE;
251 }
252 
253 static void
kctl_startup_modules(void)254 kctl_startup_modules(void)
255 {
256 	struct modctl *modp;
257 
258 	/*
259 	 * Normal module load and unload is now available.  Prior to this point,
260 	 * we could only load modules, and that only when the debugger was being
261 	 * initialized.
262 	 *
263 	 * We'll need to prepare the modules we've already loaded (if any) for
264 	 * the brave new world in which boot is unmapped.
265 	 */
266 	kctl_dmod_sync();
267 
268 	/*
269 	 * Process any outstanding loads or unloads and prepare for automatic
270 	 * module loading and unloading.
271 	 */
272 	(void) kctl_wr_process();
273 
274 	kctl_mod_notify_reg();
275 
276 	(void) kctl_set_state(KCTL_ST_MOD_NOTIFIERS);
277 
278 	modp = &modules;
279 	do {
280 		kctl_mod_loaded(modp);
281 	} while ((modp = modp->mod_next) != &modules);
282 }
283 
284 static void
kctl_startup_thread(void)285 kctl_startup_thread(void)
286 {
287 	/*
288 	 * Create the worker thread, which will handle future requests from the
289 	 * debugger.
290 	 */
291 	kctl_wr_thr_start();
292 
293 	(void) kctl_set_state(KCTL_ST_THREAD_STARTED);
294 }
295 
296 static int
kctl_startup_boot(void)297 kctl_startup_boot(void)
298 {
299 	struct modctl_list *lp, **lpp;
300 	int rc;
301 
302 	if (kctl_wr_process() < 0) {
303 		kctl_warn("kmdb: failed to load modules");
304 		return (-1);
305 	}
306 
307 	mutex_enter(&mod_lock);
308 
309 	for (lpp = kobj_linkmaps; *lpp != NULL; lpp++) {
310 		for (lp = *lpp; lp != NULL; lp = lp->modl_next) {
311 			if ((rc = kctl_mod_decompress(lp->modl_modp)) != 0) {
312 				kctl_warn("kmdb: failed to decompress CTF data "
313 				    "for %s: %s", lp->modl_modp->mod_modname,
314 				    ctf_errmsg(rc));
315 			}
316 		}
317 	}
318 
319 	mutex_exit(&mod_lock);
320 
321 	return (0);
322 }
323 
324 static int
kctl_startup_preactivate(void * romp,const char * cfg,const char ** argv)325 kctl_startup_preactivate(void *romp, const char *cfg, const char **argv)
326 {
327 	kmdb_auxv_t kav;
328 	int rc;
329 
330 	kctl_auxv_init(&kav, cfg, argv, romp);
331 	KCTL_PROM_LOCK;
332 	rc = kmdb_init(kctl.kctl_execname, &kav);
333 	KCTL_PROM_UNLOCK;
334 	kctl_auxv_fini(&kav);
335 
336 	if (rc < 0)
337 		return (EMDB_KNOLOAD);
338 
339 	(void) kctl_set_state(KCTL_ST_INITIALIZED);
340 
341 	if (kctl_preactivate_isadep() != 0)
342 		return (EIO);
343 
344 	(void) kctl_set_state(KCTL_ST_KCTL_PREACTIVATED);
345 
346 	return (0);
347 }
348 
349 static int
kctl_startup_activate(uint_t flags)350 kctl_startup_activate(uint_t flags)
351 {
352 	kdi_debugvec_t *dvec;
353 
354 	KCTL_PROM_LOCK;
355 	kmdb_activate(&dvec, flags);
356 	KCTL_PROM_UNLOCK;
357 
358 	(void) kctl_set_state(KCTL_ST_DBG_ACTIVATED);
359 
360 	/*
361 	 * fill in a few remaining debugvec entries.
362 	 */
363 	dvec->dv_kctl_modavail = kctl_startup_modules;
364 	dvec->dv_kctl_thravail = kctl_startup_thread;
365 	dvec->dv_kctl_memavail = kctl_memavail;
366 
367 	kctl_activate_isadep(dvec);
368 
369 	kdi_dvec = dvec;
370 	membar_producer();
371 
372 	boothowto |= RB_DEBUG;
373 
374 	(void) kctl_set_state(KCTL_ST_ACTIVE);
375 
376 	return (0);
377 }
378 
379 static int
kctl_state_check(uint_t state,uint_t ok_state)380 kctl_state_check(uint_t state, uint_t ok_state)
381 {
382 	if (state == ok_state)
383 		return (0);
384 
385 	if (state == KCTL_ST_INACTIVE)
386 		return (EMDB_KINACTIVE);
387 	else if (kctl.kctl_state > KCTL_ST_INACTIVE &&
388 	    kctl.kctl_state < KCTL_ST_ACTIVE)
389 		return (EMDB_KACTIVATING);
390 	else if (kctl.kctl_state == KCTL_ST_ACTIVE)
391 		return (EMDB_KACTIVE);
392 	else if (kctl.kctl_state == KCTL_ST_DEACTIVATING)
393 		return (EMDB_KDEACTIVATING);
394 	else
395 		return (EINVAL);
396 }
397 
398 int
kctl_deactivate(void)399 kctl_deactivate(void)
400 {
401 	int rc;
402 
403 	mutex_enter(&kctl.kctl_lock);
404 
405 	if (kctl.kctl_boot_loaded) {
406 		rc = EMDB_KNOUNLOAD;
407 		goto deactivate_done;
408 	}
409 
410 	if ((rc = kctl_state_check(kctl.kctl_state, KCTL_ST_ACTIVE)) != 0)
411 		goto deactivate_done;
412 
413 	kmdb_kdi_set_unload_request();
414 	kmdb_kdi_kmdb_enter();
415 
416 	/*
417 	 * The debugger will pass the request to the work thread, which will
418 	 * stop itself.
419 	 */
420 	kctl_wr_thr_join();
421 
422 deactivate_done:
423 	mutex_exit(&kctl.kctl_lock);
424 
425 	return (rc);
426 }
427 
428 /*
429  * Called from krtld, this indicates that the user loaded kmdb at boot.  We
430  * track activation states, but we don't attempt to clean up if activation
431  * fails, because boot debugger load failures are fatal.
432  *
433  * Further complicating matters, various kernel routines, such as bcopy and
434  * mutex_enter, assume the presence of some basic state.  On SPARC, it's the
435  * presence of a valid curthread pointer.  On AMD64, it's a valid curcpu
436  * pointer in GSBASE.  We set up temporary versions of these before beginning
437  * activation, and tear them down when we're done.
438  */
439 int
kctl_boot_activate(struct bootops * ops,void * romp,size_t memsz,const char ** argv)440 kctl_boot_activate(struct bootops *ops, void *romp, size_t memsz,
441     const char **argv)
442 {
443 	void *old;
444 
445 #ifdef __lint
446 	{
447 	/*
448 	 * krtld does a name-based symbol lookup to find this routine.  It then
449 	 * casts the address it gets, calling the result.  We want to make sure
450 	 * that the call in krtld stays in sync with the prototype for this
451 	 * function, so we define a type (kctl_boot_activate_f) that matches the
452 	 * current prototype.  The following assignment ensures that the type
453 	 * still matches the declaration, with lint as the enforcer.
454 	 */
455 	kctl_boot_activate_f *kba = kctl_boot_activate;
456 	if (kba == NULL)	/* Make lint think kba is actually used */
457 		return (0);
458 	}
459 #endif
460 
461 	old = kctl_boot_tmpinit();	/* Set up temporary state */
462 
463 	ASSERT(ops != NULL);
464 	kctl.kctl_boot_ops = ops;	/* must be set before kctl_init */
465 
466 	if (kctl_init() < 0)
467 		return (-1);
468 
469 	kctl.kctl_boot_loaded = 1;
470 
471 	kctl_dprintf("beginning kmdb initialization");
472 
473 	if (memsz == 0)
474 		memsz = KCTL_MEM_GOALSZ;
475 
476 	kctl.kctl_dseg = kdi_segdebugbase;
477 	kctl.kctl_dseg_size =
478 	    memsz > kdi_segdebugsize ? kdi_segdebugsize : memsz;
479 	kctl.kctl_memgoalsz = memsz;
480 
481 	if (kctl_boot_dseg_alloc(kctl.kctl_dseg, kctl.kctl_dseg_size) < 0) {
482 		kctl_warn("kmdb: failed to allocate %lu-byte debugger area at "
483 		    "%p", kctl.kctl_dseg_size, (void *)kctl.kctl_dseg);
484 		return (-1);
485 	}
486 
487 	(void) kctl_set_state(KCTL_ST_DSEG_ALLOCED);
488 
489 	if (kctl_startup_preactivate(romp, NULL, argv) != 0 ||
490 	    kctl_startup_activate(KMDB_ACT_F_BOOT)) {
491 		kctl_warn("kmdb: failed to activate");
492 		return (-1);
493 	}
494 
495 	if (kctl_startup_boot() < 0)
496 		return (-1);
497 
498 	kctl_dprintf("finished with kmdb initialization");
499 
500 	kctl_boot_tmpfini(old);
501 
502 	kctl.kctl_boot_ops = NULL;
503 
504 	return (0);
505 }
506 
507 int
kctl_modload_activate(size_t memsz,const char * cfg,uint_t flags)508 kctl_modload_activate(size_t memsz, const char *cfg, uint_t flags)
509 {
510 	int rc;
511 
512 	mutex_enter(&kctl.kctl_lock);
513 
514 	if ((rc = kctl_state_check(kctl.kctl_state, KCTL_ST_INACTIVE)) != 0) {
515 		if ((flags & KMDB_F_AUTO_ENTRY) && rc == EMDB_KACTIVE) {
516 			kmdb_kdi_kmdb_enter();
517 			rc = 0;
518 		}
519 
520 		mutex_exit(&kctl.kctl_lock);
521 		return (rc);
522 	}
523 
524 	kctl.kctl_flags = flags;
525 
526 	if (memsz == 0)
527 		memsz = KCTL_MEM_GOALSZ;
528 
529 	kctl.kctl_dseg = kdi_segdebugbase;
530 	kctl.kctl_dseg_size =
531 	    memsz > kdi_segdebugsize ? kdi_segdebugsize : memsz;
532 	kctl.kctl_memgoalsz = memsz;
533 
534 	if ((rc = kctl_dseg_alloc(kctl.kctl_dseg, kctl.kctl_dseg_size)) != 0)
535 		goto activate_fail;
536 
537 	(void) kctl_set_state(KCTL_ST_DSEG_ALLOCED);
538 
539 	if ((rc = kctl_startup_preactivate(NULL, cfg, NULL)) != 0)
540 		goto activate_fail;
541 
542 	kctl_startup_modules();
543 	kctl_startup_thread();
544 
545 	if ((rc = kctl_startup_activate(0)) != 0)
546 		goto activate_fail;
547 
548 	kctl_memavail();	/* Must be after kdi_dvec is set */
549 
550 	if (kctl.kctl_flags & KMDB_F_AUTO_ENTRY)
551 		kmdb_kdi_kmdb_enter();
552 
553 	mutex_exit(&kctl.kctl_lock);
554 	return (0);
555 
556 activate_fail:
557 	kctl_cleanup();
558 	mutex_exit(&kctl.kctl_lock);
559 	return (rc);
560 }
561 
562 /*
563  * This interface will be called when drv/kmdb loads.  When we get the call, one
564  * of two things will have happened:
565  *
566  *  1. The debugger was loaded at boot.  We've progressed far enough into boot
567  *     as to allow drv/kmdb to be loaded as a non-primary.  Invocation of this
568  *     interface is the signal to the debugger that it can start allowing things
569  *     like dmod loading and automatic CTF decompression - things which require
570  *     the system services that have now been started.
571  *
572  *  2. The debugger was loaded after boot.  mdb opened /dev/kmdb, causing
573  *     drv/kmdb to load, followed by misc/kmdb.  Nothing has been set up yet,
574  *     so we need to initialize.  Activation will occur separately, so we don't
575  *     have to worry about that.
576  */
577 int
kctl_attach(dev_info_t * dip)578 kctl_attach(dev_info_t *dip)
579 {
580 	kctl.kctl_drv_dip = dip;
581 
582 	return (0);
583 }
584 
585 int
kctl_detach(void)586 kctl_detach(void)
587 {
588 	return (kctl.kctl_state == KCTL_ST_INACTIVE ? 0 : EBUSY);
589 }
590 
591 static struct modlmisc modlmisc = {
592 	&mod_miscops,
593 	KMDB_VERSION
594 };
595 
596 static struct modlinkage modlinkage = {
597 	MODREV_1,
598 	(void *)&modlmisc,
599 	NULL
600 };
601 
602 /*
603  * Invoked only when debugger is loaded via modload - not invoked when debugger
604  * is loaded at boot.  kctl_boot_activate needs to call anything (aside from
605  * mod_install) this function does.
606  */
607 int
_init(void)608 _init(void)
609 {
610 	if (kctl_init() < 0)
611 		return (EINVAL);
612 
613 	return (mod_install(&modlinkage));
614 }
615 
616 int
_info(struct modinfo * modinfop)617 _info(struct modinfo *modinfop)
618 {
619 	return (mod_info(&modlinkage, modinfop));
620 }
621 
622 int
_fini(void)623 _fini(void)
624 {
625 	kctl_fini();
626 
627 	return (mod_remove(&modlinkage));
628 }
629