xref: /titanic_52/usr/src/uts/i86pc/os/machdep.c (revision a99982a76d4cc12b1e9021e88531cf425d1e7369)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/t_lock.h>
31 #include <sys/param.h>
32 #include <sys/sysmacros.h>
33 #include <sys/signal.h>
34 #include <sys/systm.h>
35 #include <sys/user.h>
36 #include <sys/mman.h>
37 #include <sys/vm.h>
38 
39 #include <sys/disp.h>
40 #include <sys/class.h>
41 
42 #include <sys/proc.h>
43 #include <sys/buf.h>
44 #include <sys/kmem.h>
45 
46 #include <sys/reboot.h>
47 #include <sys/uadmin.h>
48 #include <sys/callb.h>
49 
50 #include <sys/cred.h>
51 #include <sys/vnode.h>
52 #include <sys/file.h>
53 
54 #include <sys/procfs.h>
55 #include <sys/acct.h>
56 
57 #include <sys/vfs.h>
58 #include <sys/dnlc.h>
59 #include <sys/var.h>
60 #include <sys/cmn_err.h>
61 #include <sys/utsname.h>
62 #include <sys/debug.h>
63 
64 #include <sys/dumphdr.h>
65 #include <sys/bootconf.h>
66 #include <sys/varargs.h>
67 #include <sys/promif.h>
68 #include <sys/modctl.h>
69 
70 #include <sys/consdev.h>
71 #include <sys/frame.h>
72 
73 #include <sys/sunddi.h>
74 #include <sys/ddidmareq.h>
75 #include <sys/psw.h>
76 #include <sys/regset.h>
77 #include <sys/privregs.h>
78 #include <sys/clock.h>
79 #include <sys/tss.h>
80 #include <sys/cpu.h>
81 #include <sys/stack.h>
82 #include <sys/trap.h>
83 #include <sys/pic.h>
84 #include <vm/hat.h>
85 #include <vm/anon.h>
86 #include <vm/as.h>
87 #include <vm/page.h>
88 #include <vm/seg.h>
89 #include <vm/seg_kmem.h>
90 #include <vm/seg_map.h>
91 #include <vm/seg_vn.h>
92 #include <vm/seg_kp.h>
93 #include <vm/hat_i86.h>
94 #include <sys/swap.h>
95 #include <sys/thread.h>
96 #include <sys/sysconf.h>
97 #include <sys/vm_machparam.h>
98 #include <sys/archsystm.h>
99 #include <sys/machsystm.h>
100 #include <sys/machlock.h>
101 #include <sys/x_call.h>
102 #include <sys/instance.h>
103 
104 #include <sys/time.h>
105 #include <sys/smp_impldefs.h>
106 #include <sys/psm_types.h>
107 #include <sys/atomic.h>
108 #include <sys/panic.h>
109 #include <sys/cpuvar.h>
110 #include <sys/dtrace.h>
111 #include <sys/bl.h>
112 #include <sys/nvpair.h>
113 #include <sys/x86_archext.h>
114 #include <sys/pool_pset.h>
115 #include <sys/autoconf.h>
116 #include <sys/mem.h>
117 #include <sys/dumphdr.h>
118 #include <sys/compress.h>
119 
120 #ifdef	TRAPTRACE
121 #include <sys/traptrace.h>
122 #endif	/* TRAPTRACE */
123 
124 #ifdef C2_AUDIT
125 extern void audit_enterprom(int);
126 extern void audit_exitprom(int);
127 #endif
128 
129 /*
130  * The panicbuf array is used to record messages and state:
131  */
132 char panicbuf[PANICBUFSIZE];
133 
134 /*
135  * maxphys - used during physio
136  * klustsize - used for klustering by swapfs and specfs
137  */
138 int maxphys = 56 * 1024;    /* XXX See vm_subr.c - max b_count in physio */
139 int klustsize = 56 * 1024;
140 
141 caddr_t	p0_va;		/* Virtual address for accessing physical page 0 */
142 
143 /*
144  * defined here, though unused on x86,
145  * to make kstat_fr.c happy.
146  */
147 int vac;
148 
149 void stop_other_cpus();
150 void debug_enter(char *);
151 
152 extern void pm_cfb_check_and_powerup(void);
153 extern void pm_cfb_rele(void);
154 
155 /*
156  * Machine dependent code to reboot.
157  * "mdep" is interpreted as a character pointer; if non-null, it is a pointer
158  * to a string to be used as the argument string when rebooting.
159  *
160  * "invoke_cb" is a boolean. It is set to true when mdboot() can safely
161  * invoke CB_CL_MDBOOT callbacks before shutting the system down, i.e. when
162  * we are in a normal shutdown sequence (interrupts are not blocked, the
163  * system is not panic'ing or being suspended).
164  */
165 /*ARGSUSED*/
166 void
167 mdboot(int cmd, int fcn, char *mdep, boolean_t invoke_cb)
168 {
169 	extern void mtrr_resync(void);
170 
171 	if (!panicstr) {
172 		kpreempt_disable();
173 		affinity_set(CPU_CURRENT);
174 	}
175 
176 	/*
177 	 * XXX - rconsvp is set to NULL to ensure that output messages
178 	 * are sent to the underlying "hardware" device using the
179 	 * monitor's printf routine since we are in the process of
180 	 * either rebooting or halting the machine.
181 	 */
182 	rconsvp = NULL;
183 
184 	/*
185 	 * Print the reboot message now, before pausing other cpus.
186 	 * There is a race condition in the printing support that
187 	 * can deadlock multiprocessor machines.
188 	 */
189 	if (!(fcn == AD_HALT || fcn == AD_POWEROFF))
190 		prom_printf("rebooting...\n");
191 
192 	/*
193 	 * We can't bring up the console from above lock level, so do it now
194 	 */
195 	pm_cfb_check_and_powerup();
196 
197 	/* make sure there are no more changes to the device tree */
198 	devtree_freeze();
199 
200 	if (invoke_cb)
201 		(void) callb_execute_class(CB_CL_MDBOOT, NULL);
202 
203 	/*
204 	 * Clear any unresolved UEs from memory.
205 	 */
206 	page_retire_mdboot();
207 
208 	/*
209 	 * stop other cpus and raise our priority.  since there is only
210 	 * one active cpu after this, and our priority will be too high
211 	 * for us to be preempted, we're essentially single threaded
212 	 * from here on out.
213 	 */
214 	(void) spl6();
215 	if (!panicstr) {
216 		mutex_enter(&cpu_lock);
217 		pause_cpus(NULL);
218 		mutex_exit(&cpu_lock);
219 	}
220 
221 	/*
222 	 * try and reset leaf devices.  reset_leaves() should only
223 	 * be called when there are no other threads that could be
224 	 * accessing devices
225 	 */
226 	reset_leaves();
227 
228 	(void) spl8();
229 	(*psm_shutdownf)(cmd, fcn);
230 
231 	mtrr_resync();
232 
233 	if (fcn == AD_HALT || fcn == AD_POWEROFF)
234 		halt((char *)NULL);
235 	else
236 		prom_reboot("");
237 	/*NOTREACHED*/
238 }
239 
240 /* mdpreboot - may be called prior to mdboot while root fs still mounted */
241 /*ARGSUSED*/
242 void
243 mdpreboot(int cmd, int fcn, char *mdep)
244 {
245 	(*psm_preshutdownf)(cmd, fcn);
246 }
247 
248 void
249 idle_other_cpus()
250 {
251 	int cpuid = CPU->cpu_id;
252 	cpuset_t xcset;
253 
254 	ASSERT(cpuid < NCPU);
255 	CPUSET_ALL_BUT(xcset, cpuid);
256 	xc_capture_cpus(xcset);
257 }
258 
259 void
260 resume_other_cpus()
261 {
262 	ASSERT(CPU->cpu_id < NCPU);
263 
264 	xc_release_cpus();
265 }
266 
267 void
268 stop_other_cpus()
269 {
270 	int cpuid = CPU->cpu_id;
271 	cpuset_t xcset;
272 
273 	ASSERT(cpuid < NCPU);
274 
275 	/*
276 	 * xc_trycall will attempt to make all other CPUs execute mach_cpu_halt,
277 	 * and will return immediately regardless of whether or not it was
278 	 * able to make them do it.
279 	 */
280 	CPUSET_ALL_BUT(xcset, cpuid);
281 	xc_trycall(NULL, NULL, NULL, xcset, (int (*)())mach_cpu_halt);
282 }
283 
284 /*
285  *	Machine dependent abort sequence handling
286  */
287 void
288 abort_sequence_enter(char *msg)
289 {
290 	if (abort_enable == 0) {
291 #ifdef C2_AUDIT
292 		if (audit_active)
293 			audit_enterprom(0);
294 #endif /* C2_AUDIT */
295 		return;
296 	}
297 #ifdef C2_AUDIT
298 	if (audit_active)
299 		audit_enterprom(1);
300 #endif /* C2_AUDIT */
301 	debug_enter(msg);
302 #ifdef C2_AUDIT
303 	if (audit_active)
304 		audit_exitprom(1);
305 #endif /* C2_AUDIT */
306 }
307 
308 /*
309  * Enter debugger.  Called when the user types ctrl-alt-d or whenever
310  * code wants to enter the debugger and possibly resume later.
311  */
312 void
313 debug_enter(
314 	char	*msg)		/* message to print, possibly NULL */
315 {
316 	if (dtrace_debugger_init != NULL)
317 		(*dtrace_debugger_init)();
318 
319 	if (msg)
320 		prom_printf("%s\n", msg);
321 
322 	if (boothowto & RB_DEBUG)
323 		kmdb_enter();
324 
325 	if (dtrace_debugger_fini != NULL)
326 		(*dtrace_debugger_fini)();
327 }
328 
329 void
330 reset(void)
331 {
332 	ushort_t *bios_memchk;
333 
334 	/*
335 	 * Can't use psm_map_phys before the hat is initialized.
336 	 */
337 	if (khat_running) {
338 		bios_memchk = (ushort_t *)psm_map_phys(0x472,
339 		    sizeof (ushort_t), PROT_READ | PROT_WRITE);
340 		if (bios_memchk)
341 			*bios_memchk = 0x1234;	/* bios memory check disable */
342 	}
343 
344 	if (ddi_prop_exists(DDI_DEV_T_ANY, ddi_root_node(), 0, "efi-systab"))
345 		efi_reset();
346 	pc_reset();
347 	/*NOTREACHED*/
348 }
349 
350 /*
351  * Halt the machine and return to the monitor
352  */
353 void
354 halt(char *s)
355 {
356 	stop_other_cpus();	/* send stop signal to other CPUs */
357 	if (s)
358 		prom_printf("(%s) \n", s);
359 	prom_exit_to_mon();
360 	/*NOTREACHED*/
361 }
362 
363 /*
364  * Initiate interrupt redistribution.
365  */
366 void
367 i_ddi_intr_redist_all_cpus()
368 {
369 }
370 
371 /*
372  * XXX These probably ought to live somewhere else
373  * XXX They are called from mem.c
374  */
375 
376 /*
377  * Convert page frame number to an OBMEM page frame number
378  * (i.e. put in the type bits -- zero for this implementation)
379  */
380 pfn_t
381 impl_obmem_pfnum(pfn_t pf)
382 {
383 	return (pf);
384 }
385 
386 #ifdef	NM_DEBUG
387 int nmi_test = 0;	/* checked in intentry.s during clock int */
388 int nmtest = -1;
389 nmfunc1(arg, rp)
390 int	arg;
391 struct regs *rp;
392 {
393 	printf("nmi called with arg = %x, regs = %x\n", arg, rp);
394 	nmtest += 50;
395 	if (arg == nmtest) {
396 		printf("ip = %x\n", rp->r_pc);
397 		return (1);
398 	}
399 	return (0);
400 }
401 
402 #endif
403 
404 #include <sys/bootsvcs.h>
405 
406 /* Hacked up initialization for initial kernel check out is HERE. */
407 /* The basic steps are: */
408 /*	kernel bootfuncs definition/initialization for KADB */
409 /*	kadb bootfuncs pointer initialization */
410 /*	putchar/getchar (interrupts disabled) */
411 
412 /* kadb bootfuncs pointer initialization */
413 
414 int
415 sysp_getchar()
416 {
417 	int i;
418 	ulong_t s;
419 
420 	if (cons_polledio == NULL) {
421 		/* Uh oh */
422 		prom_printf("getchar called with no console\n");
423 		for (;;)
424 			/* LOOP FOREVER */;
425 	}
426 
427 	s = clear_int_flag();
428 	i = cons_polledio->cons_polledio_getchar(
429 		cons_polledio->cons_polledio_argument);
430 	restore_int_flag(s);
431 	return (i);
432 }
433 
434 void
435 sysp_putchar(int c)
436 {
437 	ulong_t s;
438 
439 	/*
440 	 * We have no alternative but to drop the output on the floor.
441 	 */
442 	if (cons_polledio == NULL ||
443 	    cons_polledio->cons_polledio_putchar == NULL)
444 		return;
445 
446 	s = clear_int_flag();
447 	cons_polledio->cons_polledio_putchar(
448 		cons_polledio->cons_polledio_argument, c);
449 	restore_int_flag(s);
450 }
451 
452 int
453 sysp_ischar()
454 {
455 	int i;
456 	ulong_t s;
457 
458 	if (cons_polledio == NULL ||
459 	    cons_polledio->cons_polledio_ischar == NULL)
460 		return (0);
461 
462 	s = clear_int_flag();
463 	i = cons_polledio->cons_polledio_ischar(
464 		cons_polledio->cons_polledio_argument);
465 	restore_int_flag(s);
466 	return (i);
467 }
468 
469 int
470 goany(void)
471 {
472 	prom_printf("Type any key to continue ");
473 	(void) prom_getchar();
474 	prom_printf("\n");
475 	return (1);
476 }
477 
478 static struct boot_syscalls kern_sysp = {
479 	sysp_getchar,	/*	unchar	(*getchar)();	7  */
480 	sysp_putchar,	/*	int	(*putchar)();	8  */
481 	sysp_ischar,	/*	int	(*ischar)();	9  */
482 };
483 
484 void
485 kadb_uses_kernel()
486 {
487 	/*
488 	 * This routine is now totally misnamed, since it does not in fact
489 	 * control kadb's I/O; it only controls the kernel's prom_* I/O.
490 	 */
491 	sysp = &kern_sysp;
492 }
493 
494 /*
495  *	the interface to the outside world
496  */
497 
498 /*
499  * poll_port -- wait for a register to achieve a
500  *		specific state.  Arguments are a mask of bits we care about,
501  *		and two sub-masks.  To return normally, all the bits in the
502  *		first sub-mask must be ON, all the bits in the second sub-
503  *		mask must be OFF.  If about seconds pass without the register
504  *		achieving the desired bit configuration, we return 1, else
505  *		0.
506  */
507 int
508 poll_port(ushort_t port, ushort_t mask, ushort_t onbits, ushort_t offbits)
509 {
510 	int i;
511 	ushort_t maskval;
512 
513 	for (i = 500000; i; i--) {
514 		maskval = inb(port) & mask;
515 		if (((maskval & onbits) == onbits) &&
516 			((maskval & offbits) == 0))
517 			return (0);
518 		drv_usecwait(10);
519 	}
520 	return (1);
521 }
522 
523 /*
524  * set_idle_cpu is called from idle() when a CPU becomes idle.
525  */
526 /*LINTED: static unused */
527 static uint_t last_idle_cpu;
528 
529 /*ARGSUSED*/
530 void
531 set_idle_cpu(int cpun)
532 {
533 	last_idle_cpu = cpun;
534 	(*psm_set_idle_cpuf)(cpun);
535 }
536 
537 /*
538  * unset_idle_cpu is called from idle() when a CPU is no longer idle.
539  */
540 /*ARGSUSED*/
541 void
542 unset_idle_cpu(int cpun)
543 {
544 	(*psm_unset_idle_cpuf)(cpun);
545 }
546 
547 /*
548  * This routine is almost correct now, but not quite.  It still needs the
549  * equivalent concept of "hres_last_tick", just like on the sparc side.
550  * The idea is to take a snapshot of the hi-res timer while doing the
551  * hrestime_adj updates under hres_lock in locore, so that the small
552  * interval between interrupt assertion and interrupt processing is
553  * accounted for correctly.  Once we have this, the code below should
554  * be modified to subtract off hres_last_tick rather than hrtime_base.
555  *
556  * I'd have done this myself, but I don't have source to all of the
557  * vendor-specific hi-res timer routines (grrr...).  The generic hook I
558  * need is something like "gethrtime_unlocked()", which would be just like
559  * gethrtime() but would assume that you're already holding CLOCK_LOCK().
560  * This is what the GET_HRTIME() macro is for on sparc (although it also
561  * serves the function of making time available without a function call
562  * so you don't take a register window overflow while traps are disabled).
563  */
564 void
565 pc_gethrestime(timestruc_t *tp)
566 {
567 	int lock_prev;
568 	timestruc_t now;
569 	int nslt;		/* nsec since last tick */
570 	int adj;		/* amount of adjustment to apply */
571 
572 loop:
573 	lock_prev = hres_lock;
574 	now = hrestime;
575 	nslt = (int)(gethrtime() - hres_last_tick);
576 	if (nslt < 0) {
577 		/*
578 		 * nslt < 0 means a tick came between sampling
579 		 * gethrtime() and hres_last_tick; restart the loop
580 		 */
581 
582 		goto loop;
583 	}
584 	now.tv_nsec += nslt;
585 	if (hrestime_adj != 0) {
586 		if (hrestime_adj > 0) {
587 			adj = (nslt >> ADJ_SHIFT);
588 			if (adj > hrestime_adj)
589 				adj = (int)hrestime_adj;
590 		} else {
591 			adj = -(nslt >> ADJ_SHIFT);
592 			if (adj < hrestime_adj)
593 				adj = (int)hrestime_adj;
594 		}
595 		now.tv_nsec += adj;
596 	}
597 	while ((unsigned long)now.tv_nsec >= NANOSEC) {
598 
599 		/*
600 		 * We might have a large adjustment or have been in the
601 		 * debugger for a long time; take care of (at most) four
602 		 * of those missed seconds (tv_nsec is 32 bits, so
603 		 * anything >4s will be wrapping around).  However,
604 		 * anything more than 2 seconds out of sync will trigger
605 		 * timedelta from clock() to go correct the time anyway,
606 		 * so do what we can, and let the big crowbar do the
607 		 * rest.  A similar correction while loop exists inside
608 		 * hres_tick(); in all cases we'd like tv_nsec to
609 		 * satisfy 0 <= tv_nsec < NANOSEC to avoid confusing
610 		 * user processes, but if tv_sec's a little behind for a
611 		 * little while, that's OK; time still monotonically
612 		 * increases.
613 		 */
614 
615 		now.tv_nsec -= NANOSEC;
616 		now.tv_sec++;
617 	}
618 	if ((hres_lock & ~1) != lock_prev)
619 		goto loop;
620 
621 	*tp = now;
622 }
623 
624 void
625 gethrestime_lasttick(timespec_t *tp)
626 {
627 	int s;
628 
629 	s = hr_clock_lock();
630 	*tp = hrestime;
631 	hr_clock_unlock(s);
632 }
633 
634 time_t
635 gethrestime_sec(void)
636 {
637 	timestruc_t now;
638 
639 	gethrestime(&now);
640 	return (now.tv_sec);
641 }
642 
643 /*
644  * Initialize a kernel thread's stack
645  */
646 
647 caddr_t
648 thread_stk_init(caddr_t stk)
649 {
650 	ASSERT(((uintptr_t)stk & (STACK_ALIGN - 1)) == 0);
651 	return (stk - SA(MINFRAME));
652 }
653 
654 /*
655  * Initialize lwp's kernel stack.
656  */
657 
658 #ifdef TRAPTRACE
659 /*
660  * There's a tricky interdependency here between use of sysenter and
661  * TRAPTRACE which needs recording to avoid future confusion (this is
662  * about the third time I've re-figured this out ..)
663  *
664  * Here's how debugging lcall works with TRAPTRACE.
665  *
666  * 1 We're in userland with a breakpoint on the lcall instruction.
667  * 2 We execute the instruction - the instruction pushes the userland
668  *   %ss, %esp, %efl, %cs, %eip on the stack and zips into the kernel
669  *   via the call gate.
670  * 3 The hardware raises a debug trap in kernel mode, the hardware
671  *   pushes %efl, %cs, %eip and gets to dbgtrap via the idt.
672  * 4 dbgtrap pushes the error code and trapno and calls cmntrap
673  * 5 cmntrap finishes building a trap frame
674  * 6 The TRACE_REGS macros in cmntrap copy a REGSIZE worth chunk
675  *   off the stack into the traptrace buffer.
676  *
677  * This means that the traptrace buffer contains the wrong values in
678  * %esp and %ss, but everything else in there is correct.
679  *
680  * Here's how debugging sysenter works with TRAPTRACE.
681  *
682  * a We're in userland with a breakpoint on the sysenter instruction.
683  * b We execute the instruction - the instruction pushes -nothing-
684  *   on the stack, but sets %cs, %eip, %ss, %esp to prearranged
685  *   values to take us to sys_sysenter, at the top of the lwp's
686  *   stack.
687  * c goto 3
688  *
689  * At this point, because we got into the kernel without the requisite
690  * five pushes on the stack, if we didn't make extra room, we'd
691  * end up with the TRACE_REGS macro fetching the saved %ss and %esp
692  * values from negative (unmapped) stack addresses -- which really bites.
693  * That's why we do the '-= 8' below.
694  *
695  * XXX	Note that reading "up" lwp0's stack works because t0 is declared
696  *	right next to t0stack in locore.s
697  */
698 #endif
699 
700 caddr_t
701 lwp_stk_init(klwp_t *lwp, caddr_t stk)
702 {
703 	caddr_t oldstk;
704 	struct pcb *pcb = &lwp->lwp_pcb;
705 
706 	oldstk = stk;
707 	stk -= SA(sizeof (struct regs) + SA(MINFRAME));
708 #ifdef TRAPTRACE
709 	stk -= 2 * sizeof (greg_t); /* space for phony %ss:%sp (see above) */
710 #endif
711 	stk = (caddr_t)((uintptr_t)stk & ~(STACK_ALIGN - 1ul));
712 	bzero(stk, oldstk - stk);
713 	lwp->lwp_regs = (void *)(stk + SA(MINFRAME));
714 
715 	/*
716 	 * Arrange that the virtualized %fs and %gs GDT descriptors
717 	 * have a well-defined initial state (present, ring 3
718 	 * and of type data).
719 	 */
720 #if defined(__amd64)
721 	if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE)
722 		pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_udesc;
723 	else
724 		pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_u32desc;
725 #elif defined(__i386)
726 	pcb->pcb_fsdesc = pcb->pcb_gsdesc = zero_udesc;
727 #endif	/* __i386 */
728 	lwp_installctx(lwp);
729 	return (stk);
730 }
731 
732 /*ARGSUSED*/
733 void
734 lwp_stk_fini(klwp_t *lwp)
735 {}
736 
737 /*
738  * If we're not the panic CPU, we wait in panic_idle for reboot.
739  */
740 static void
741 panic_idle(void)
742 {
743 	splx(ipltospl(CLOCK_LEVEL));
744 	(void) setjmp(&curthread->t_pcb);
745 
746 	for (;;)
747 		;
748 }
749 
750 /*
751  * Stop the other CPUs by cross-calling them and forcing them to enter
752  * the panic_idle() loop above.
753  */
754 /*ARGSUSED*/
755 void
756 panic_stopcpus(cpu_t *cp, kthread_t *t, int spl)
757 {
758 	processorid_t i;
759 	cpuset_t xcset;
760 
761 	(void) splzs();
762 
763 	CPUSET_ALL_BUT(xcset, cp->cpu_id);
764 	xc_trycall(NULL, NULL, NULL, xcset, (int (*)())panic_idle);
765 
766 	for (i = 0; i < NCPU; i++) {
767 		if (i != cp->cpu_id && cpu[i] != NULL &&
768 		    (cpu[i]->cpu_flags & CPU_EXISTS))
769 			cpu[i]->cpu_flags |= CPU_QUIESCED;
770 	}
771 }
772 
773 /*
774  * Platform callback following each entry to panicsys().
775  */
776 /*ARGSUSED*/
777 void
778 panic_enter_hw(int spl)
779 {
780 	/* Nothing to do here */
781 }
782 
783 /*
784  * Platform-specific code to execute after panicstr is set: we invoke
785  * the PSM entry point to indicate that a panic has occurred.
786  */
787 /*ARGSUSED*/
788 void
789 panic_quiesce_hw(panic_data_t *pdp)
790 {
791 	psm_notifyf(PSM_PANIC_ENTER);
792 
793 #ifdef	TRAPTRACE
794 	/*
795 	 * Turn off TRAPTRACE
796 	 */
797 	TRAPTRACE_FREEZE;
798 #endif	/* TRAPTRACE */
799 }
800 
801 /*
802  * Platform callback prior to writing crash dump.
803  */
804 /*ARGSUSED*/
805 void
806 panic_dump_hw(int spl)
807 {
808 	/* Nothing to do here */
809 }
810 
811 /*ARGSUSED*/
812 void
813 plat_tod_fault(enum tod_fault_type tod_bad)
814 {}
815 
816 /*ARGSUSED*/
817 int
818 blacklist(int cmd, const char *scheme, nvlist_t *fmri, const char *class)
819 {
820 	return (ENOTSUP);
821 }
822 
823 /*
824  * The underlying console output routines are protected by raising IPL in case
825  * we are still calling into the early boot services.  Once we start calling
826  * the kernel console emulator, it will disable interrupts completely during
827  * character rendering (see sysp_putchar, for example).  Refer to the comments
828  * and code in common/os/console.c for more information on these callbacks.
829  */
830 /*ARGSUSED*/
831 int
832 console_enter(int busy)
833 {
834 	return (splzs());
835 }
836 
837 /*ARGSUSED*/
838 void
839 console_exit(int busy, int spl)
840 {
841 	splx(spl);
842 }
843 
844 /*
845  * Allocate a region of virtual address space, unmapped.
846  * Stubbed out except on sparc, at least for now.
847  */
848 /*ARGSUSED*/
849 void *
850 boot_virt_alloc(void *addr, size_t size)
851 {
852 	return (addr);
853 }
854 
855 volatile unsigned long	tenmicrodata;
856 
857 void
858 tenmicrosec(void)
859 {
860 	extern int	tsc_gethrtime_initted;
861 
862 	if (tsc_gethrtime_initted) {
863 		hrtime_t start, end;
864 		start = end =  gethrtime();
865 		while ((end - start) < (10 * (NANOSEC / MICROSEC))) {
866 			SMT_PAUSE();
867 			end = gethrtime();
868 		}
869 	} else {
870 		int i;
871 
872 		/*
873 		 * Artificial loop to induce delay.
874 		 */
875 		for (i = 0; i < microdata; i++)
876 			tenmicrodata = microdata;
877 	}
878 }
879 
880 /*
881  * get_cpu_mstate() is passed an array of timestamps, NCMSTATES
882  * long, and it fills in the array with the time spent on cpu in
883  * each of the mstates, where time is returned in nsec.
884  *
885  * No guarantee is made that the returned values in times[] will
886  * monotonically increase on sequential calls, although this will
887  * be true in the long run. Any such guarantee must be handled by
888  * the caller, if needed. This can happen if we fail to account
889  * for elapsed time due to a generation counter conflict, yet we
890  * did account for it on a prior call (see below).
891  *
892  * The complication is that the cpu in question may be updating
893  * its microstate at the same time that we are reading it.
894  * Because the microstate is only updated when the CPU's state
895  * changes, the values in cpu_intracct[] can be indefinitely out
896  * of date. To determine true current values, it is necessary to
897  * compare the current time with cpu_mstate_start, and add the
898  * difference to times[cpu_mstate].
899  *
900  * This can be a problem if those values are changing out from
901  * under us. Because the code path in new_cpu_mstate() is
902  * performance critical, we have not added a lock to it. Instead,
903  * we have added a generation counter. Before beginning
904  * modifications, the counter is set to 0. After modifications,
905  * it is set to the old value plus one.
906  *
907  * get_cpu_mstate() will not consider the values of cpu_mstate
908  * and cpu_mstate_start to be usable unless the value of
909  * cpu_mstate_gen is both non-zero and unchanged, both before and
910  * after reading the mstate information. Note that we must
911  * protect against out-of-order loads around accesses to the
912  * generation counter. Also, this is a best effort approach in
913  * that we do not retry should the counter be found to have
914  * changed.
915  *
916  * cpu_intracct[] is used to identify time spent in each CPU
917  * mstate while handling interrupts. Such time should be reported
918  * against system time, and so is subtracted out from its
919  * corresponding cpu_acct[] time and added to
920  * cpu_acct[CMS_SYSTEM].
921  */
922 
923 void
924 get_cpu_mstate(cpu_t *cpu, hrtime_t *times)
925 {
926 	int i;
927 	hrtime_t now, start;
928 	uint16_t gen;
929 	uint16_t state;
930 	hrtime_t intracct[NCMSTATES];
931 
932 	/*
933 	 * Load all volatile state under the protection of membar.
934 	 * cpu_acct[cpu_mstate] must be loaded to avoid double counting
935 	 * of (now - cpu_mstate_start) by a change in CPU mstate that
936 	 * arrives after we make our last check of cpu_mstate_gen.
937 	 */
938 
939 	now = gethrtime_unscaled();
940 	gen = cpu->cpu_mstate_gen;
941 
942 	membar_consumer();	/* guarantee load ordering */
943 	start = cpu->cpu_mstate_start;
944 	state = cpu->cpu_mstate;
945 	for (i = 0; i < NCMSTATES; i++) {
946 		intracct[i] = cpu->cpu_intracct[i];
947 		times[i] = cpu->cpu_acct[i];
948 	}
949 	membar_consumer();	/* guarantee load ordering */
950 
951 	if (gen != 0 && gen == cpu->cpu_mstate_gen && now > start)
952 		times[state] += now - start;
953 
954 	for (i = 0; i < NCMSTATES; i++) {
955 		if (i == CMS_SYSTEM)
956 			continue;
957 		times[i] -= intracct[i];
958 		if (times[i] < 0) {
959 			intracct[i] += times[i];
960 			times[i] = 0;
961 		}
962 		times[CMS_SYSTEM] += intracct[i];
963 		scalehrtime(&times[i]);
964 	}
965 	scalehrtime(&times[CMS_SYSTEM]);
966 }
967 
968 
969 /*
970  * This is a version of the rdmsr instruction that allows
971  * an error code to be returned in the case of failure.
972  */
973 int
974 checked_rdmsr(uint_t msr, uint64_t *value)
975 {
976 	if ((x86_feature & X86_MSR) == 0)
977 		return (ENOTSUP);
978 	*value = rdmsr(msr);
979 	return (0);
980 }
981 
982 /*
983  * This is a version of the wrmsr instruction that allows
984  * an error code to be returned in the case of failure.
985  */
986 int
987 checked_wrmsr(uint_t msr, uint64_t value)
988 {
989 	if ((x86_feature & X86_MSR) == 0)
990 		return (ENOTSUP);
991 	wrmsr(msr, value);
992 	return (0);
993 }
994 
995 /*
996  * Return true if the given page VA can be read via /dev/kmem.
997  */
998 /*ARGSUSED*/
999 int
1000 plat_mem_valid_page(uintptr_t pageaddr, uio_rw_t rw)
1001 {
1002 	return (0);
1003 }
1004 
1005 int
1006 dump_plat_addr()
1007 {
1008 	return (0);
1009 }
1010 
1011 void
1012 dump_plat_pfn()
1013 {
1014 }
1015 
1016 /*ARGSUSED*/
1017 int
1018 dump_plat_data(void *dump_cbuf)
1019 {
1020 	return (0);
1021 }
1022