xref: /titanic_41/usr/src/uts/i86pc/os/machdep.c (revision 75d01c9ab5ef6f1bbac9f9d4eb379d5c38583d82)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/t_lock.h>
31 #include <sys/param.h>
32 #include <sys/sysmacros.h>
33 #include <sys/signal.h>
34 #include <sys/systm.h>
35 #include <sys/user.h>
36 #include <sys/mman.h>
37 #include <sys/vm.h>
38 
39 #include <sys/disp.h>
40 #include <sys/class.h>
41 
42 #include <sys/proc.h>
43 #include <sys/buf.h>
44 #include <sys/kmem.h>
45 
46 #include <sys/reboot.h>
47 #include <sys/uadmin.h>
48 #include <sys/callb.h>
49 
50 #include <sys/cred.h>
51 #include <sys/vnode.h>
52 #include <sys/file.h>
53 
54 #include <sys/procfs.h>
55 #include <sys/acct.h>
56 
57 #include <sys/vfs.h>
58 #include <sys/dnlc.h>
59 #include <sys/var.h>
60 #include <sys/cmn_err.h>
61 #include <sys/utsname.h>
62 #include <sys/debug.h>
63 #include <sys/kdi_impl.h>
64 
65 #include <sys/dumphdr.h>
66 #include <sys/bootconf.h>
67 #include <sys/varargs.h>
68 #include <sys/promif.h>
69 #include <sys/modctl.h>
70 
71 #include <sys/consdev.h>
72 #include <sys/frame.h>
73 
74 #include <sys/sunddi.h>
75 #include <sys/ddidmareq.h>
76 #include <sys/psw.h>
77 #include <sys/regset.h>
78 #include <sys/privregs.h>
79 #include <sys/clock.h>
80 #include <sys/tss.h>
81 #include <sys/cpu.h>
82 #include <sys/stack.h>
83 #include <sys/trap.h>
84 #include <sys/pic.h>
85 #include <sys/mmu.h>
86 #include <vm/hat.h>
87 #include <vm/anon.h>
88 #include <vm/as.h>
89 #include <vm/page.h>
90 #include <vm/seg.h>
91 #include <vm/seg_kmem.h>
92 #include <vm/seg_map.h>
93 #include <vm/seg_vn.h>
94 #include <vm/seg_kp.h>
95 #include <vm/hat_i86.h>
96 #include <sys/swap.h>
97 #include <sys/thread.h>
98 #include <sys/sysconf.h>
99 #include <sys/vm_machparam.h>
100 #include <sys/archsystm.h>
101 #include <sys/machsystm.h>
102 #include <sys/machlock.h>
103 #include <sys/x_call.h>
104 #include <sys/instance.h>
105 
106 #include <sys/time.h>
107 #include <sys/smp_impldefs.h>
108 #include <sys/psm_types.h>
109 #include <sys/atomic.h>
110 #include <sys/panic.h>
111 #include <sys/cpuvar.h>
112 #include <sys/dtrace.h>
113 #include <sys/bl.h>
114 #include <sys/nvpair.h>
115 #include <sys/x86_archext.h>
116 #include <sys/pool_pset.h>
117 #include <sys/autoconf.h>
118 #include <sys/kdi.h>
119 
120 #ifdef	TRAPTRACE
121 #include <sys/traptrace.h>
122 #endif	/* TRAPTRACE */
123 
124 #ifdef C2_AUDIT
125 extern void audit_enterprom(int);
126 extern void audit_exitprom(int);
127 #endif
128 
129 /*
130  * The panicbuf array is used to record messages and state:
131  */
132 char panicbuf[PANICBUFSIZE];
133 
134 /*
135  * maxphys - used during physio
136  * klustsize - used for klustering by swapfs and specfs
137  */
138 int maxphys = 56 * 1024;    /* XXX See vm_subr.c - max b_count in physio */
139 int klustsize = 56 * 1024;
140 
141 caddr_t	p0_va;		/* Virtual address for accessing physical page 0 */
142 
143 /*
144  * defined here, though unused on x86,
145  * to make kstat_fr.c happy.
146  */
147 int vac;
148 
149 void stop_other_cpus();
150 void debug_enter(char *);
151 
152 int	procset = 1;
153 
154 /*
155  * Flags set by mdboot if we're panicking and we invoke mdboot on a CPU which
156  * is not the boot CPU.  When set, panic_idle() on the boot CPU will invoke
157  * mdboot with the corresponding arguments.
158  */
159 
160 #define	BOOT_WAIT	-1		/* Flag indicating we should idle */
161 
162 volatile int cpu_boot_cmd = BOOT_WAIT;
163 volatile int cpu_boot_fcn = BOOT_WAIT;
164 
165 extern void pm_cfb_check_and_powerup(void);
166 extern void pm_cfb_rele(void);
167 
168 /*
169  * Machine dependent code to reboot.
170  * "mdep" is interpreted as a character pointer; if non-null, it is a pointer
171  * to a string to be used as the argument string when rebooting.
172  *
173  * "invoke_cb" is a boolean. It is set to true when mdboot() can safely
174  * invoke CB_CL_MDBOOT callbacks before shutting the system down, i.e. when
175  * we are in a normal shutdown sequence (interrupts are not blocked, the
176  * system is not panic'ing or being suspended).
177  */
178 /*ARGSUSED*/
179 void
180 mdboot(int cmd, int fcn, char *mdep, boolean_t invoke_cb)
181 {
182 	extern void mtrr_resync(void);
183 
184 	/*
185 	 * The PSMI guarantees the implementor of psm_shutdown that it will
186 	 * only be called on the boot CPU.  This was needed by Corollary
187 	 * because the hardware does not allow other CPUs to reset the
188 	 * boot CPU.  So before rebooting, we switch over to the boot CPU.
189 	 * If we are panicking, the other CPUs are at high spl spinning in
190 	 * panic_idle(), so we set the cpu_boot_* variables and wait for
191 	 * the boot CPU to re-invoke mdboot() for us.
192 	 */
193 	if (!panicstr) {
194 		kpreempt_disable();
195 		affinity_set(getbootcpuid());
196 	} else if (CPU->cpu_id != getbootcpuid()) {
197 		cpu_boot_cmd = cmd;
198 		cpu_boot_fcn = fcn;
199 		for (;;);
200 	}
201 
202 	/*
203 	 * XXX - rconsvp is set to NULL to ensure that output messages
204 	 * are sent to the underlying "hardware" device using the
205 	 * monitor's printf routine since we are in the process of
206 	 * either rebooting or halting the machine.
207 	 */
208 	rconsvp = NULL;
209 
210 	/*
211 	 * Print the reboot message now, before pausing other cpus.
212 	 * There is a race condition in the printing support that
213 	 * can deadlock multiprocessor machines.
214 	 */
215 	if (!(fcn == AD_HALT || fcn == AD_POWEROFF))
216 		prom_printf("rebooting...\n");
217 
218 	/*
219 	 * We can't bring up the console from above lock level, so do it now
220 	 */
221 	pm_cfb_check_and_powerup();
222 
223 	/* make sure there are no more changes to the device tree */
224 	devtree_freeze();
225 
226 	if (invoke_cb)
227 		(void) callb_execute_class(CB_CL_MDBOOT, NULL);
228 
229 	/*
230 	 * stop other cpus and raise our priority.  since there is only
231 	 * one active cpu after this, and our priority will be too high
232 	 * for us to be preempted, we're essentially single threaded
233 	 * from here on out.
234 	 */
235 	(void) spl6();
236 	if (!panicstr) {
237 		mutex_enter(&cpu_lock);
238 		pause_cpus(NULL);
239 		mutex_exit(&cpu_lock);
240 	}
241 
242 	/*
243 	 * try and reset leaf devices.  reset_leaves() should only
244 	 * be called when there are no other threads that could be
245 	 * accessing devices
246 	 */
247 	reset_leaves();
248 
249 	(void) spl8();
250 	(*psm_shutdownf)(cmd, fcn);
251 
252 	mtrr_resync();
253 
254 	if (fcn == AD_HALT || fcn == AD_POWEROFF)
255 		halt((char *)NULL);
256 	else
257 		prom_reboot("");
258 	/*NOTREACHED*/
259 }
260 
261 /* mdpreboot - may be called prior to mdboot while root fs still mounted */
262 /*ARGSUSED*/
263 void
264 mdpreboot(int cmd, int fcn, char *mdep)
265 {
266 	(*psm_preshutdownf)(cmd, fcn);
267 }
268 
269 void
270 idle_other_cpus()
271 {
272 	int cpuid = CPU->cpu_id;
273 	cpuset_t xcset;
274 
275 	ASSERT(cpuid < NCPU);
276 	CPUSET_ALL_BUT(xcset, cpuid);
277 	xc_capture_cpus(xcset);
278 }
279 
280 void
281 resume_other_cpus()
282 {
283 	ASSERT(CPU->cpu_id < NCPU);
284 
285 	xc_release_cpus();
286 }
287 
288 extern void	mp_halt(char *);
289 
290 void
291 stop_other_cpus()
292 {
293 	int cpuid = CPU->cpu_id;
294 	cpuset_t xcset;
295 
296 	ASSERT(cpuid < NCPU);
297 
298 	/*
299 	 * xc_trycall will attempt to make all other CPUs execute mp_halt,
300 	 * and will return immediately regardless of whether or not it was
301 	 * able to make them do it.
302 	 */
303 	CPUSET_ALL_BUT(xcset, cpuid);
304 	xc_trycall(NULL, NULL, NULL, xcset, (int (*)())mp_halt);
305 }
306 
307 /*
308  *	Machine dependent abort sequence handling
309  */
310 void
311 abort_sequence_enter(char *msg)
312 {
313 	if (abort_enable == 0) {
314 #ifdef C2_AUDIT
315 		if (audit_active)
316 			audit_enterprom(0);
317 #endif /* C2_AUDIT */
318 		return;
319 	}
320 #ifdef C2_AUDIT
321 	if (audit_active)
322 		audit_enterprom(1);
323 #endif /* C2_AUDIT */
324 	debug_enter(msg);
325 #ifdef C2_AUDIT
326 	if (audit_active)
327 		audit_exitprom(1);
328 #endif /* C2_AUDIT */
329 }
330 
331 /*
332  * Enter debugger.  Called when the user types ctrl-alt-d or whenever
333  * code wants to enter the debugger and possibly resume later.
334  */
335 void
336 debug_enter(
337 	char	*msg)		/* message to print, possibly NULL */
338 {
339 	if (dtrace_debugger_init != NULL)
340 		(*dtrace_debugger_init)();
341 
342 	if (msg)
343 		prom_printf("%s\n", msg);
344 
345 	if (boothowto & RB_DEBUG)
346 		kdi_dvec_enter();
347 
348 	if (dtrace_debugger_fini != NULL)
349 		(*dtrace_debugger_fini)();
350 }
351 
352 void
353 reset(void)
354 {
355 	ushort_t *bios_memchk;
356 
357 	/*
358 	 * Can't use psm_map_phys before the hat is initialized.
359 	 */
360 	if (khat_running) {
361 		bios_memchk = (ushort_t *)psm_map_phys(0x472,
362 		    sizeof (ushort_t), PROT_READ | PROT_WRITE);
363 		if (bios_memchk)
364 			*bios_memchk = 0x1234;	/* bios memory check disable */
365 	}
366 
367 	pc_reset();
368 	/*NOTREACHED*/
369 }
370 
371 /*
372  * Halt the machine and return to the monitor
373  */
374 void
375 halt(char *s)
376 {
377 	stop_other_cpus();	/* send stop signal to other CPUs */
378 	if (s)
379 		prom_printf("(%s) \n", s);
380 	prom_exit_to_mon();
381 	/*NOTREACHED*/
382 }
383 
384 /*
385  * Enter monitor.  Called via cross-call from stop_other_cpus().
386  */
387 void
388 mp_halt(char *msg)
389 {
390 	if (msg)
391 		prom_printf("%s\n", msg);
392 
393 	/*CONSTANTCONDITION*/
394 	while (1)
395 		;
396 }
397 
398 /*
399  * Initiate interrupt redistribution.
400  */
401 void
402 i_ddi_intr_redist_all_cpus()
403 {
404 }
405 
406 /*
407  * XXX These probably ought to live somewhere else
408  * XXX They are called from mem.c
409  */
410 
411 /*
412  * Convert page frame number to an OBMEM page frame number
413  * (i.e. put in the type bits -- zero for this implementation)
414  */
415 pfn_t
416 impl_obmem_pfnum(pfn_t pf)
417 {
418 	return (pf);
419 }
420 
421 #ifdef	NM_DEBUG
422 int nmi_test = 0;	/* checked in intentry.s during clock int */
423 int nmtest = -1;
424 nmfunc1(arg, rp)
425 int	arg;
426 struct regs *rp;
427 {
428 	printf("nmi called with arg = %x, regs = %x\n", arg, rp);
429 	nmtest += 50;
430 	if (arg == nmtest) {
431 		printf("ip = %x\n", rp->r_pc);
432 		return (1);
433 	}
434 	return (0);
435 }
436 
437 #endif
438 
439 #include <sys/bootsvcs.h>
440 
441 /* Hacked up initialization for initial kernel check out is HERE. */
442 /* The basic steps are: */
443 /*	kernel bootfuncs definition/initialization for KADB */
444 /*	kadb bootfuncs pointer initialization */
445 /*	putchar/getchar (interrupts disabled) */
446 
447 /* kadb bootfuncs pointer initialization */
448 
449 int
450 sysp_getchar()
451 {
452 	int i;
453 	int s;
454 
455 	if (cons_polledio == NULL) {
456 		/* Uh oh */
457 		prom_printf("getchar called with no console\n");
458 		for (;;)
459 			/* LOOP FOREVER */;
460 	}
461 
462 	s = clear_int_flag();
463 	i = cons_polledio->cons_polledio_getchar(
464 		cons_polledio->cons_polledio_argument);
465 	restore_int_flag(s);
466 	return (i);
467 }
468 
469 void
470 sysp_putchar(int c)
471 {
472 	int s;
473 
474 	/*
475 	 * We have no alternative but to drop the output on the floor.
476 	 */
477 	if (cons_polledio == NULL)
478 		return;
479 
480 	s = clear_int_flag();
481 	cons_polledio->cons_polledio_putchar(
482 		cons_polledio->cons_polledio_argument, c);
483 	restore_int_flag(s);
484 }
485 
486 int
487 sysp_ischar()
488 {
489 	int i;
490 	int s;
491 
492 	if (cons_polledio == NULL)
493 		return (0);
494 
495 	s = clear_int_flag();
496 	i = cons_polledio->cons_polledio_ischar(
497 		cons_polledio->cons_polledio_argument);
498 	restore_int_flag(s);
499 	return (i);
500 }
501 
502 int
503 goany(void)
504 {
505 	prom_printf("Type any key to continue ");
506 	(void) prom_getchar();
507 	prom_printf("\n");
508 	return (1);
509 }
510 
511 static struct boot_syscalls kern_sysp = {
512 	sysp_getchar,	/*	unchar	(*getchar)();	7  */
513 	sysp_putchar,	/*	int	(*putchar)();	8  */
514 	sysp_ischar,	/*	int	(*ischar)();	9  */
515 };
516 
517 void
518 kadb_uses_kernel()
519 {
520 	/*
521 	 * This routine is now totally misnamed, since it does not in fact
522 	 * control kadb's I/O; it only controls the kernel's prom_* I/O.
523 	 */
524 	sysp = &kern_sysp;
525 }
526 
527 /*
528  *	the interface to the outside world
529  */
530 
531 /*
532  * poll_port -- wait for a register to achieve a
533  *		specific state.  Arguments are a mask of bits we care about,
534  *		and two sub-masks.  To return normally, all the bits in the
535  *		first sub-mask must be ON, all the bits in the second sub-
536  *		mask must be OFF.  If about seconds pass without the register
537  *		achieving the desired bit configuration, we return 1, else
538  *		0.
539  */
540 int
541 poll_port(ushort_t port, ushort_t mask, ushort_t onbits, ushort_t offbits)
542 {
543 	int i;
544 	ushort_t maskval;
545 
546 	for (i = 500000; i; i--) {
547 		maskval = inb(port) & mask;
548 		if (((maskval & onbits) == onbits) &&
549 			((maskval & offbits) == 0))
550 			return (0);
551 		drv_usecwait(10);
552 	}
553 	return (1);
554 }
555 
556 /*
557  * set_idle_cpu is called from idle() when a CPU becomes idle.
558  */
559 /*LINTED: static unused */
560 static uint_t last_idle_cpu;
561 
562 /*ARGSUSED*/
563 void
564 set_idle_cpu(int cpun)
565 {
566 	last_idle_cpu = cpun;
567 	(*psm_set_idle_cpuf)(cpun);
568 }
569 
570 /*
571  * unset_idle_cpu is called from idle() when a CPU is no longer idle.
572  */
573 /*ARGSUSED*/
574 void
575 unset_idle_cpu(int cpun)
576 {
577 	(*psm_unset_idle_cpuf)(cpun);
578 }
579 
580 /*
581  * This routine is almost correct now, but not quite.  It still needs the
582  * equivalent concept of "hres_last_tick", just like on the sparc side.
583  * The idea is to take a snapshot of the hi-res timer while doing the
584  * hrestime_adj updates under hres_lock in locore, so that the small
585  * interval between interrupt assertion and interrupt processing is
586  * accounted for correctly.  Once we have this, the code below should
587  * be modified to subtract off hres_last_tick rather than hrtime_base.
588  *
589  * I'd have done this myself, but I don't have source to all of the
590  * vendor-specific hi-res timer routines (grrr...).  The generic hook I
591  * need is something like "gethrtime_unlocked()", which would be just like
592  * gethrtime() but would assume that you're already holding CLOCK_LOCK().
593  * This is what the GET_HRTIME() macro is for on sparc (although it also
594  * serves the function of making time available without a function call
595  * so you don't take a register window overflow while traps are disabled).
596  */
597 void
598 pc_gethrestime(timestruc_t *tp)
599 {
600 	int lock_prev;
601 	timestruc_t now;
602 	int nslt;		/* nsec since last tick */
603 	int adj;		/* amount of adjustment to apply */
604 
605 loop:
606 	lock_prev = hres_lock;
607 	now = hrestime;
608 	nslt = (int)(gethrtime() - hres_last_tick);
609 	if (nslt < 0) {
610 		/*
611 		 * nslt < 0 means a tick came between sampling
612 		 * gethrtime() and hres_last_tick; restart the loop
613 		 */
614 
615 		goto loop;
616 	}
617 	now.tv_nsec += nslt;
618 	if (hrestime_adj != 0) {
619 		if (hrestime_adj > 0) {
620 			adj = (nslt >> ADJ_SHIFT);
621 			if (adj > hrestime_adj)
622 				adj = (int)hrestime_adj;
623 		} else {
624 			adj = -(nslt >> ADJ_SHIFT);
625 			if (adj < hrestime_adj)
626 				adj = (int)hrestime_adj;
627 		}
628 		now.tv_nsec += adj;
629 	}
630 	while ((unsigned long)now.tv_nsec >= NANOSEC) {
631 
632 		/*
633 		 * We might have a large adjustment or have been in the
634 		 * debugger for a long time; take care of (at most) four
635 		 * of those missed seconds (tv_nsec is 32 bits, so
636 		 * anything >4s will be wrapping around).  However,
637 		 * anything more than 2 seconds out of sync will trigger
638 		 * timedelta from clock() to go correct the time anyway,
639 		 * so do what we can, and let the big crowbar do the
640 		 * rest.  A similar correction while loop exists inside
641 		 * hres_tick(); in all cases we'd like tv_nsec to
642 		 * satisfy 0 <= tv_nsec < NANOSEC to avoid confusing
643 		 * user processes, but if tv_sec's a little behind for a
644 		 * little while, that's OK; time still monotonically
645 		 * increases.
646 		 */
647 
648 		now.tv_nsec -= NANOSEC;
649 		now.tv_sec++;
650 	}
651 	if ((hres_lock & ~1) != lock_prev)
652 		goto loop;
653 
654 	*tp = now;
655 }
656 
657 void
658 gethrestime_lasttick(timespec_t *tp)
659 {
660 	int s;
661 
662 	s = hr_clock_lock();
663 	*tp = hrestime;
664 	hr_clock_unlock(s);
665 }
666 
667 time_t
668 gethrestime_sec(void)
669 {
670 	timestruc_t now;
671 
672 	gethrestime(&now);
673 	return (now.tv_sec);
674 }
675 
676 /*
677  * Initialize a kernel thread's stack
678  */
679 
680 caddr_t
681 thread_stk_init(caddr_t stk)
682 {
683 	ASSERT(((uintptr_t)stk & (STACK_ALIGN - 1)) == 0);
684 	return (stk - SA(MINFRAME));
685 }
686 
687 /*
688  * Initialize lwp's kernel stack.
689  */
690 
691 #ifdef TRAPTRACE
692 /*
693  * There's a tricky interdependency here between use of sysenter and
694  * TRAPTRACE which needs recording to avoid future confusion (this is
695  * about the third time I've re-figured this out ..)
696  *
697  * Here's how debugging lcall works with TRAPTRACE.
698  *
699  * 1 We're in userland with a breakpoint on the lcall instruction.
700  * 2 We execute the instruction - the instruction pushes the userland
701  *   %ss, %esp, %efl, %cs, %eip on the stack and zips into the kernel
702  *   via the call gate.
703  * 3 The hardware raises a debug trap in kernel mode, the hardware
704  *   pushes %efl, %cs, %eip and gets to dbgtrap via the idt.
705  * 4 dbgtrap pushes the error code and trapno and calls cmntrap
706  * 5 cmntrap finishes building a trap frame
707  * 6 The TRACE_REGS macros in cmntrap copy a REGSIZE worth chunk
708  *   off the stack into the traptrace buffer.
709  *
710  * This means that the traptrace buffer contains the wrong values in
711  * %esp and %ss, but everything else in there is correct.
712  *
713  * Here's how debugging sysenter works with TRAPTRACE.
714  *
715  * a We're in userland with a breakpoint on the sysenter instruction.
716  * b We execute the instruction - the instruction pushes -nothing-
717  *   on the stack, but sets %cs, %eip, %ss, %esp to prearranged
718  *   values to take us to sys_sysenter, at the top of the lwp's
719  *   stack.
720  * c goto 3
721  *
722  * At this point, because we got into the kernel without the requisite
723  * five pushes on the stack, if we didn't make extra room, we'd
724  * end up with the TRACE_REGS macro fetching the saved %ss and %esp
725  * values from negative (unmapped) stack addresses -- which really bites.
726  * That's why we do the '-= 8' below.
727  *
728  * XXX	Note that reading "up" lwp0's stack works because t0 is declared
729  *	right next to t0stack in locore.s
730  */
731 #endif
732 
733 caddr_t
734 lwp_stk_init(klwp_t *lwp, caddr_t stk)
735 {
736 	caddr_t oldstk;
737 	struct pcb *pcb = &lwp->lwp_pcb;
738 
739 	oldstk = stk;
740 	stk -= SA(sizeof (struct regs) + SA(MINFRAME));
741 #ifdef TRAPTRACE
742 	stk -= 2 * sizeof (greg_t); /* space for phony %ss:%sp (see above) */
743 #endif
744 	stk = (caddr_t)((uintptr_t)stk & ~(STACK_ALIGN - 1ul));
745 	bzero(stk, oldstk - stk);
746 	lwp->lwp_regs = (void *)(stk + SA(MINFRAME));
747 
748 	/*
749 	 * Arrange that the virtualized %fs and %gs GDT descriptors
750 	 * have a well-defined initial state (present, ring 3
751 	 * and of type data).
752 	 */
753 #if defined(__amd64)
754 	if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE)
755 		pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_udesc;
756 	else
757 		pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_u32desc;
758 #elif defined(__i386)
759 	pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_udesc;
760 #endif	/* __i386 */
761 	lwp_installctx(lwp);
762 	return (stk);
763 }
764 
765 /*ARGSUSED*/
766 void
767 lwp_stk_fini(klwp_t *lwp)
768 {}
769 
770 /*
771  * If we're not the panic CPU, we wait in panic_idle for reboot.  If we're
772  * the boot CPU, then we are responsible for actually doing the reboot, so
773  * we watch for cpu_boot_cmd to be set.
774  */
775 static void
776 panic_idle(void)
777 {
778 	splx(ipltospl(CLOCK_LEVEL));
779 	(void) setjmp(&curthread->t_pcb);
780 
781 	if (CPU->cpu_id == getbootcpuid()) {
782 		while (cpu_boot_cmd == BOOT_WAIT || cpu_boot_fcn == BOOT_WAIT)
783 			drv_usecwait(10);
784 
785 		mdboot(cpu_boot_cmd, cpu_boot_fcn, NULL, B_FALSE);
786 	}
787 
788 	for (;;);
789 }
790 
791 /*
792  * Stop the other CPUs by cross-calling them and forcing them to enter
793  * the panic_idle() loop above.
794  */
795 /*ARGSUSED*/
796 void
797 panic_stopcpus(cpu_t *cp, kthread_t *t, int spl)
798 {
799 	processorid_t i;
800 	cpuset_t xcset;
801 
802 	(void) splzs();
803 
804 	CPUSET_ALL_BUT(xcset, cp->cpu_id);
805 	xc_trycall(NULL, NULL, NULL, xcset, (int (*)())panic_idle);
806 
807 	for (i = 0; i < NCPU; i++) {
808 		if (i != cp->cpu_id && cpu[i] != NULL &&
809 		    (cpu[i]->cpu_flags & CPU_EXISTS))
810 			cpu[i]->cpu_flags |= CPU_QUIESCED;
811 	}
812 }
813 
814 /*
815  * Platform callback following each entry to panicsys().
816  */
817 /*ARGSUSED*/
818 void
819 panic_enter_hw(int spl)
820 {
821 	/* Nothing to do here */
822 }
823 
824 /*
825  * Platform-specific code to execute after panicstr is set: we invoke
826  * the PSM entry point to indicate that a panic has occurred.
827  */
828 /*ARGSUSED*/
829 void
830 panic_quiesce_hw(panic_data_t *pdp)
831 {
832 	psm_notifyf(PSM_PANIC_ENTER);
833 
834 #ifdef	TRAPTRACE
835 	/*
836 	 * Turn off TRAPTRACE
837 	 */
838 	TRAPTRACE_FREEZE;
839 #endif	/* TRAPTRACE */
840 }
841 
842 /*
843  * Platform callback prior to writing crash dump.
844  */
845 /*ARGSUSED*/
846 void
847 panic_dump_hw(int spl)
848 {
849 	/* Nothing to do here */
850 }
851 
852 /*ARGSUSED*/
853 void
854 plat_tod_fault(enum tod_fault_type tod_bad)
855 {
856 }
857 
858 /*ARGSUSED*/
859 int
860 blacklist(int cmd, const char *scheme, nvlist_t *fmri, const char *class)
861 {
862 	return (ENOTSUP);
863 }
864 
865 /*
866  * The underlying console output routines are protected by raising IPL in case
867  * we are still calling into the early boot services.  Once we start calling
868  * the kernel console emulator, it will disable interrupts completely during
869  * character rendering (see sysp_putchar, for example).  Refer to the comments
870  * and code in common/os/console.c for more information on these callbacks.
871  */
872 /*ARGSUSED*/
873 int
874 console_enter(int busy)
875 {
876 	return (splzs());
877 }
878 
879 /*ARGSUSED*/
880 void
881 console_exit(int busy, int spl)
882 {
883 	splx(spl);
884 }
885 
886 /*
887  * Allocate a region of virtual address space, unmapped.
888  * Stubbed out except on sparc, at least for now.
889  */
890 /*ARGSUSED*/
891 void *
892 boot_virt_alloc(void *addr, size_t size)
893 {
894 	return (addr);
895 }
896 
897 volatile unsigned long	tenmicrodata;
898 
899 void
900 tenmicrosec(void)
901 {
902 	extern int	tsc_gethrtime_initted;
903 	int		i;
904 
905 	if (tsc_gethrtime_initted) {
906 		hrtime_t start, end;
907 		start = end =  gethrtime();
908 		while ((end - start) < (10 * (NANOSEC / MICROSEC))) {
909 			SMT_PAUSE();
910 			end = gethrtime();
911 		}
912 	} else {
913 		/*
914 		 * Artificial loop to induce delay.
915 		 */
916 		for (i = 0; i < microdata; i++)
917 			tenmicrodata = microdata;
918 	}
919 }
920 
921 /*
922  * get_cpu_mstate() is passed an array of timestamps, NCMSTATES
923  * long, and it fills in the array with the time spent on cpu in
924  * each of the mstates, where time is returned in nsec.
925  *
926  * No guarantee is made that the returned values in times[] will
927  * monotonically increase on sequential calls, although this will
928  * be true in the long run. Any such guarantee must be handled by
929  * the caller, if needed. This can happen if we fail to account
930  * for elapsed time due to a generation counter conflict, yet we
931  * did account for it on a prior call (see below).
932  *
933  * The complication is that the cpu in question may be updating
934  * its microstate at the same time that we are reading it.
935  * Because the microstate is only updated when the CPU's state
936  * changes, the values in cpu_intracct[] can be indefinitely out
937  * of date. To determine true current values, it is necessary to
938  * compare the current time with cpu_mstate_start, and add the
939  * difference to times[cpu_mstate].
940  *
941  * This can be a problem if those values are changing out from
942  * under us. Because the code path in new_cpu_mstate() is
943  * performance critical, we have not added a lock to it. Instead,
944  * we have added a generation counter. Before beginning
945  * modifications, the counter is set to 0. After modifications,
946  * it is set to the old value plus one.
947  *
948  * get_cpu_mstate() will not consider the values of cpu_mstate
949  * and cpu_mstate_start to be usable unless the value of
950  * cpu_mstate_gen is both non-zero and unchanged, both before and
951  * after reading the mstate information. Note that we must
952  * protect against out-of-order loads around accesses to the
953  * generation counter. Also, this is a best effort approach in
954  * that we do not retry should the counter be found to have
955  * changed.
956  *
957  * cpu_intracct[] is used to identify time spent in each CPU
958  * mstate while handling interrupts. Such time should be reported
959  * against system time, and so is subtracted out from its
960  * corresponding cpu_acct[] time and added to
961  * cpu_acct[CMS_SYSTEM].
962  */
963 
964 void
965 get_cpu_mstate(cpu_t *cpu, hrtime_t *times)
966 {
967 	int i;
968 	hrtime_t now, start;
969 	uint16_t gen;
970 	uint16_t state;
971 	hrtime_t intracct[NCMSTATES];
972 
973 	/*
974 	 * Load all volatile state under the protection of membar.
975 	 * cpu_acct[cpu_mstate] must be loaded to avoid double counting
976 	 * of (now - cpu_mstate_start) by a change in CPU mstate that
977 	 * arrives after we make our last check of cpu_mstate_gen.
978 	 */
979 
980 	now = gethrtime_unscaled();
981 	gen = cpu->cpu_mstate_gen;
982 
983 	membar_consumer();	/* guarantee load ordering */
984 	start = cpu->cpu_mstate_start;
985 	state = cpu->cpu_mstate;
986 	for (i = 0; i < NCMSTATES; i++) {
987 		intracct[i] = cpu->cpu_intracct[i];
988 		times[i] = cpu->cpu_acct[i];
989 	}
990 	membar_consumer();	/* guarantee load ordering */
991 
992 	if (gen != 0 && gen == cpu->cpu_mstate_gen && now > start)
993 		times[state] += now - start;
994 
995 	for (i = 0; i < NCMSTATES; i++) {
996 		if (i == CMS_SYSTEM)
997 			continue;
998 		times[i] -= intracct[i];
999 		if (times[i] < 0) {
1000 			intracct[i] += times[i];
1001 			times[i] = 0;
1002 		}
1003 		times[CMS_SYSTEM] += intracct[i];
1004 		scalehrtime(&times[i]);
1005 	}
1006 	scalehrtime(&times[CMS_SYSTEM]);
1007 }
1008