xref: /titanic_52/usr/src/uts/sun4/os/prom_subr.c (revision 0ed5c46e82c989cfa9726d9dae452e3d24ef83be)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5ae115bc7Smrj  * Common Development and Distribution License (the "License").
6ae115bc7Smrj  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
22903a11ebSrh87107  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
267c478bd9Sstevel@tonic-gate #include <sys/types.h>
277c478bd9Sstevel@tonic-gate #include <sys/param.h>
287c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
297c478bd9Sstevel@tonic-gate #include <sys/mutex.h>
307c478bd9Sstevel@tonic-gate #include <sys/systm.h>
317c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
327c478bd9Sstevel@tonic-gate #include <sys/machsystm.h>
337c478bd9Sstevel@tonic-gate #include <sys/archsystm.h>
347c478bd9Sstevel@tonic-gate #include <sys/x_call.h>
357c478bd9Sstevel@tonic-gate #include <sys/promif.h>
367c478bd9Sstevel@tonic-gate #include <sys/prom_isa.h>
377c478bd9Sstevel@tonic-gate #include <sys/privregs.h>
387c478bd9Sstevel@tonic-gate #include <sys/vmem.h>
397c478bd9Sstevel@tonic-gate #include <sys/atomic.h>
407c478bd9Sstevel@tonic-gate #include <sys/panic.h>
417c478bd9Sstevel@tonic-gate #include <sys/rwlock.h>
427c478bd9Sstevel@tonic-gate #include <sys/reboot.h>
437c478bd9Sstevel@tonic-gate #include <sys/kdi.h>
44ae115bc7Smrj #include <sys/kdi_machimpl.h>
457c478bd9Sstevel@tonic-gate 
467c478bd9Sstevel@tonic-gate /*
477c478bd9Sstevel@tonic-gate  * We are called with a pointer to a cell-sized argument array.
487c478bd9Sstevel@tonic-gate  * The service name (the first element of the argument array) is
497c478bd9Sstevel@tonic-gate  * the name of the callback being invoked.  When called, we are
507c478bd9Sstevel@tonic-gate  * running on the firmwares trap table as a trusted subroutine
517c478bd9Sstevel@tonic-gate  * of the firmware.
527c478bd9Sstevel@tonic-gate  *
537c478bd9Sstevel@tonic-gate  * We define entry points to allow callback handlers to be dynamically
547c478bd9Sstevel@tonic-gate  * added and removed, to support obpsym, which is a separate module
557c478bd9Sstevel@tonic-gate  * and can be dynamically loaded and unloaded and registers its
567c478bd9Sstevel@tonic-gate  * callback handlers dynamically.
577c478bd9Sstevel@tonic-gate  *
587c478bd9Sstevel@tonic-gate  * Note: The actual callback handler we register, is the assembly lang.
597c478bd9Sstevel@tonic-gate  * glue, callback_handler, which takes care of switching from a 64
607c478bd9Sstevel@tonic-gate  * bit stack and environment to a 32 bit stack and environment, and
617c478bd9Sstevel@tonic-gate  * back again, if the callback handler returns. callback_handler calls
627c478bd9Sstevel@tonic-gate  * vx_handler to process the callback.
637c478bd9Sstevel@tonic-gate  */
647c478bd9Sstevel@tonic-gate 
657c478bd9Sstevel@tonic-gate static kmutex_t vx_cmd_lock;	/* protect vx_cmd table */
667c478bd9Sstevel@tonic-gate 
677c478bd9Sstevel@tonic-gate #define	VX_CMD_MAX	10
687c478bd9Sstevel@tonic-gate #define	ENDADDR(a)	&a[sizeof (a) / sizeof (a[0])]
697c478bd9Sstevel@tonic-gate #define	vx_cmd_end	((struct vx_cmd *)(ENDADDR(vx_cmd)))
707c478bd9Sstevel@tonic-gate 
717c478bd9Sstevel@tonic-gate static struct vx_cmd {
727c478bd9Sstevel@tonic-gate 	char	*service;	/* Service name */
737c478bd9Sstevel@tonic-gate 	int	take_tba;	/* If Non-zero we take over the tba */
747c478bd9Sstevel@tonic-gate 	void	(*func)(cell_t *argument_array);
757c478bd9Sstevel@tonic-gate } vx_cmd[VX_CMD_MAX+1];
767c478bd9Sstevel@tonic-gate 
777c478bd9Sstevel@tonic-gate void
787c478bd9Sstevel@tonic-gate init_vx_handler(void)
797c478bd9Sstevel@tonic-gate {
807c478bd9Sstevel@tonic-gate 	extern int callback_handler(cell_t *arg_array);
817c478bd9Sstevel@tonic-gate 
827c478bd9Sstevel@tonic-gate 	/*
837c478bd9Sstevel@tonic-gate 	 * initialize the lock protecting additions and deletions from
847c478bd9Sstevel@tonic-gate 	 * the vx_cmd table.  At callback time we don't need to grab
857c478bd9Sstevel@tonic-gate 	 * this lock.  Callback handlers do not need to modify the
867c478bd9Sstevel@tonic-gate 	 * callback handler table.
877c478bd9Sstevel@tonic-gate 	 */
887c478bd9Sstevel@tonic-gate 	mutex_init(&vx_cmd_lock, NULL, MUTEX_DEFAULT, NULL);
897c478bd9Sstevel@tonic-gate 
907c478bd9Sstevel@tonic-gate 	/*
917c478bd9Sstevel@tonic-gate 	 * Tell OBP about our callback handler.
927c478bd9Sstevel@tonic-gate 	 */
937c478bd9Sstevel@tonic-gate 	(void) prom_set_callback((void *)callback_handler);
947c478bd9Sstevel@tonic-gate }
957c478bd9Sstevel@tonic-gate 
967c478bd9Sstevel@tonic-gate /*
977c478bd9Sstevel@tonic-gate  * Add a kernel callback handler to the kernel's list.
987c478bd9Sstevel@tonic-gate  * The table is static, so if you add a callback handler, increase
997c478bd9Sstevel@tonic-gate  * the value of VX_CMD_MAX. Find the first empty slot and use it.
1007c478bd9Sstevel@tonic-gate  */
1017c478bd9Sstevel@tonic-gate void
1027c478bd9Sstevel@tonic-gate add_vx_handler(char *name, int flag, void (*func)(cell_t *))
1037c478bd9Sstevel@tonic-gate {
1047c478bd9Sstevel@tonic-gate 	struct vx_cmd *vp;
1057c478bd9Sstevel@tonic-gate 
1067c478bd9Sstevel@tonic-gate 	mutex_enter(&vx_cmd_lock);
1077c478bd9Sstevel@tonic-gate 	for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
1087c478bd9Sstevel@tonic-gate 		if (vp->service == NULL) {
1097c478bd9Sstevel@tonic-gate 			vp->service = name;
1107c478bd9Sstevel@tonic-gate 			vp->take_tba = flag;
1117c478bd9Sstevel@tonic-gate 			vp->func = func;
1127c478bd9Sstevel@tonic-gate 			mutex_exit(&vx_cmd_lock);
1137c478bd9Sstevel@tonic-gate 			return;
1147c478bd9Sstevel@tonic-gate 		}
1157c478bd9Sstevel@tonic-gate 	}
1167c478bd9Sstevel@tonic-gate 	mutex_exit(&vx_cmd_lock);
1177c478bd9Sstevel@tonic-gate 
1187c478bd9Sstevel@tonic-gate #ifdef	DEBUG
1197c478bd9Sstevel@tonic-gate 
1207c478bd9Sstevel@tonic-gate 	/*
1217c478bd9Sstevel@tonic-gate 	 * There must be enough entries to handle all callback entries.
1227c478bd9Sstevel@tonic-gate 	 * Increase VX_CMD_MAX if this happens. This shouldn't happen.
1237c478bd9Sstevel@tonic-gate 	 */
1247c478bd9Sstevel@tonic-gate 	cmn_err(CE_PANIC, "add_vx_handler <%s>", name);
1257c478bd9Sstevel@tonic-gate 	/* NOTREACHED */
1267c478bd9Sstevel@tonic-gate 
1277c478bd9Sstevel@tonic-gate #else	/* DEBUG */
1287c478bd9Sstevel@tonic-gate 
1297c478bd9Sstevel@tonic-gate 	cmn_err(CE_WARN, "add_vx_handler: Can't add callback hander <%s>",
1307c478bd9Sstevel@tonic-gate 	    name);
1317c478bd9Sstevel@tonic-gate 
1327c478bd9Sstevel@tonic-gate #endif	/* DEBUG */
1337c478bd9Sstevel@tonic-gate 
1347c478bd9Sstevel@tonic-gate }
1357c478bd9Sstevel@tonic-gate 
1367c478bd9Sstevel@tonic-gate /*
1377c478bd9Sstevel@tonic-gate  * Remove a vx_handler function -- find the name string in the table,
1387c478bd9Sstevel@tonic-gate  * and clear it.
1397c478bd9Sstevel@tonic-gate  */
1407c478bd9Sstevel@tonic-gate void
1417c478bd9Sstevel@tonic-gate remove_vx_handler(char *name)
1427c478bd9Sstevel@tonic-gate {
1437c478bd9Sstevel@tonic-gate 	struct vx_cmd *vp;
1447c478bd9Sstevel@tonic-gate 
1457c478bd9Sstevel@tonic-gate 	mutex_enter(&vx_cmd_lock);
1467c478bd9Sstevel@tonic-gate 	for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
1477c478bd9Sstevel@tonic-gate 		if (vp->service == NULL)
1487c478bd9Sstevel@tonic-gate 			continue;
1497c478bd9Sstevel@tonic-gate 		if (strcmp(vp->service, name) != 0)
1507c478bd9Sstevel@tonic-gate 			continue;
1517c478bd9Sstevel@tonic-gate 		vp->service = 0;
1527c478bd9Sstevel@tonic-gate 		vp->take_tba = 0;
1537c478bd9Sstevel@tonic-gate 		vp->func = 0;
1547c478bd9Sstevel@tonic-gate 		mutex_exit(&vx_cmd_lock);
1557c478bd9Sstevel@tonic-gate 		return;
1567c478bd9Sstevel@tonic-gate 	}
1577c478bd9Sstevel@tonic-gate 	mutex_exit(&vx_cmd_lock);
1587c478bd9Sstevel@tonic-gate 	cmn_err(CE_WARN, "remove_vx_handler: <%s> not found", name);
1597c478bd9Sstevel@tonic-gate }
1607c478bd9Sstevel@tonic-gate 
1617c478bd9Sstevel@tonic-gate int
1627c478bd9Sstevel@tonic-gate vx_handler(cell_t *argument_array)
1637c478bd9Sstevel@tonic-gate {
1647c478bd9Sstevel@tonic-gate 	char *name;
1657c478bd9Sstevel@tonic-gate 	struct vx_cmd *vp;
1667c478bd9Sstevel@tonic-gate 	void *old_tba;
1677c478bd9Sstevel@tonic-gate 
1687c478bd9Sstevel@tonic-gate 	name = p1275_cell2ptr(*argument_array);
1697c478bd9Sstevel@tonic-gate 
1707c478bd9Sstevel@tonic-gate 	for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
1717c478bd9Sstevel@tonic-gate 		if (vp->service == (char *)0)
1727c478bd9Sstevel@tonic-gate 			continue;
1737c478bd9Sstevel@tonic-gate 		if (strcmp(vp->service, name) != 0)
1747c478bd9Sstevel@tonic-gate 			continue;
1757c478bd9Sstevel@tonic-gate 		if (vp->take_tba != 0)  {
1767c478bd9Sstevel@tonic-gate 			reestablish_curthread();
1777c478bd9Sstevel@tonic-gate 			if (tba_taken_over != 0)
1787c478bd9Sstevel@tonic-gate 				old_tba = set_tba((void *)&trap_table);
1797c478bd9Sstevel@tonic-gate 		}
1807c478bd9Sstevel@tonic-gate 		vp->func(argument_array);
1817c478bd9Sstevel@tonic-gate 		if ((vp->take_tba != 0) && (tba_taken_over != 0))
1827c478bd9Sstevel@tonic-gate 			(void) set_tba(old_tba);
1837c478bd9Sstevel@tonic-gate 		return (0);	/* Service name was known */
1847c478bd9Sstevel@tonic-gate 	}
1857c478bd9Sstevel@tonic-gate 
1867c478bd9Sstevel@tonic-gate 	return (-1);		/* Service name unknown */
1877c478bd9Sstevel@tonic-gate }
1887c478bd9Sstevel@tonic-gate 
1897c478bd9Sstevel@tonic-gate /*
1907c478bd9Sstevel@tonic-gate  * PROM Locking Primitives
1917c478bd9Sstevel@tonic-gate  *
1927c478bd9Sstevel@tonic-gate  * These routines are called immediately before and immediately after calling
1937c478bd9Sstevel@tonic-gate  * into the firmware.  The firmware is single-threaded and assumes that the
1947c478bd9Sstevel@tonic-gate  * kernel will implement locking to prevent simultaneous service calls.  In
1957c478bd9Sstevel@tonic-gate  * addition, some service calls (particularly character rendering) can be
1967c478bd9Sstevel@tonic-gate  * slow, so we would like to sleep if we cannot acquire the lock to allow the
1977c478bd9Sstevel@tonic-gate  * caller's CPU to continue to perform useful work in the interim.  Service
1987c478bd9Sstevel@tonic-gate  * routines may also be called early in boot as part of slave CPU startup
1997c478bd9Sstevel@tonic-gate  * when mutexes and cvs are not yet available (i.e. they are still running on
2007c478bd9Sstevel@tonic-gate  * the prom's TLB handlers and cannot touch curthread).  Therefore, these
2017c478bd9Sstevel@tonic-gate  * routines must reduce to a simple compare-and-swap spin lock when necessary.
2027c478bd9Sstevel@tonic-gate  * Finally, kernel code may wish to acquire the firmware lock before executing
2037c478bd9Sstevel@tonic-gate  * a block of code that includes service calls, so we also allow the firmware
2047c478bd9Sstevel@tonic-gate  * lock to be acquired recursively by the owning CPU after disabling preemption.
2057c478bd9Sstevel@tonic-gate  *
2067c478bd9Sstevel@tonic-gate  * To meet these constraints, the lock itself is implemented as a compare-and-
2077c478bd9Sstevel@tonic-gate  * swap spin lock on the global prom_cpu pointer.  We implement recursion by
2087c478bd9Sstevel@tonic-gate  * atomically incrementing the integer prom_holdcnt after acquiring the lock.
2097c478bd9Sstevel@tonic-gate  * If the current CPU is an "adult" (determined by testing cpu_m.mutex_ready),
2107c478bd9Sstevel@tonic-gate  * we disable preemption before acquiring the lock and leave it disabled once
2117c478bd9Sstevel@tonic-gate  * the lock is held.  The kern_postprom() routine then enables preemption if
2127c478bd9Sstevel@tonic-gate  * we drop the lock and prom_holdcnt returns to zero.  If the current CPU is
2137c478bd9Sstevel@tonic-gate  * an adult and the lock is held by another adult CPU, we can safely sleep
2147c478bd9Sstevel@tonic-gate  * until the lock is released.  To do so, we acquire the adaptive prom_mutex
2157c478bd9Sstevel@tonic-gate  * and then sleep on prom_cv.  Therefore, service routines must not be called
2167c478bd9Sstevel@tonic-gate  * from above LOCK_LEVEL on any adult CPU.  Finally, if recursive entry is
2177c478bd9Sstevel@tonic-gate  * attempted on an adult CPU, we must also verify that curthread matches the
2187c478bd9Sstevel@tonic-gate  * saved prom_thread (the original owner) to ensure that low-level interrupt
2197c478bd9Sstevel@tonic-gate  * threads do not step on other threads running on the same CPU.
2207c478bd9Sstevel@tonic-gate  */
2217c478bd9Sstevel@tonic-gate 
2227c478bd9Sstevel@tonic-gate static cpu_t *volatile prom_cpu;
2237c478bd9Sstevel@tonic-gate static kthread_t *volatile prom_thread;
2247c478bd9Sstevel@tonic-gate static uint32_t prom_holdcnt;
2257c478bd9Sstevel@tonic-gate static kmutex_t prom_mutex;
2267c478bd9Sstevel@tonic-gate static kcondvar_t prom_cv;
2277c478bd9Sstevel@tonic-gate 
2287c478bd9Sstevel@tonic-gate /*
2297c478bd9Sstevel@tonic-gate  * The debugger uses PROM services, and is thus unable to run if any of the
2307c478bd9Sstevel@tonic-gate  * CPUs on the system are executing in the PROM at the time of debugger entry.
2317c478bd9Sstevel@tonic-gate  * If a CPU is determined to be in the PROM when the debugger is entered,
2327c478bd9Sstevel@tonic-gate  * prom_return_enter_debugger will be set, thus triggering a programmed debugger
2337c478bd9Sstevel@tonic-gate  * entry when the given CPU returns from the PROM.  That CPU is then released by
2347c478bd9Sstevel@tonic-gate  * the debugger, and is allowed to complete PROM-related work.
2357c478bd9Sstevel@tonic-gate  */
2367c478bd9Sstevel@tonic-gate int prom_exit_enter_debugger;
2377c478bd9Sstevel@tonic-gate 
2387c478bd9Sstevel@tonic-gate void
2397c478bd9Sstevel@tonic-gate kern_preprom(void)
2407c478bd9Sstevel@tonic-gate {
2417c478bd9Sstevel@tonic-gate 	for (;;) {
2427c478bd9Sstevel@tonic-gate 		/*
2437c478bd9Sstevel@tonic-gate 		 * Load the current CPU pointer and examine the mutex_ready bit.
2447c478bd9Sstevel@tonic-gate 		 * It doesn't matter if we are preempted here because we are
2457c478bd9Sstevel@tonic-gate 		 * only trying to determine if we are in the *set* of mutex
2467c478bd9Sstevel@tonic-gate 		 * ready CPUs.  We cannot disable preemption until we confirm
2477c478bd9Sstevel@tonic-gate 		 * that we are running on a CPU in this set, since a call to
2487c478bd9Sstevel@tonic-gate 		 * kpreempt_disable() requires access to curthread.
2497c478bd9Sstevel@tonic-gate 		 */
2507c478bd9Sstevel@tonic-gate 		processorid_t cpuid = getprocessorid();
2517c478bd9Sstevel@tonic-gate 		cpu_t *cp = cpu[cpuid];
2527c478bd9Sstevel@tonic-gate 		cpu_t *prcp;
2537c478bd9Sstevel@tonic-gate 
2547c478bd9Sstevel@tonic-gate 		if (panicstr)
2557c478bd9Sstevel@tonic-gate 			return; /* just return if we are currently panicking */
2567c478bd9Sstevel@tonic-gate 
2577c478bd9Sstevel@tonic-gate 		if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
2587c478bd9Sstevel@tonic-gate 			/*
2597c478bd9Sstevel@tonic-gate 			 * Disable premption, and reload the current CPU.  We
2607c478bd9Sstevel@tonic-gate 			 * can't move from a mutex_ready cpu to a non-ready cpu
2617c478bd9Sstevel@tonic-gate 			 * so we don't need to re-check cp->cpu_m.mutex_ready.
2627c478bd9Sstevel@tonic-gate 			 */
2637c478bd9Sstevel@tonic-gate 			kpreempt_disable();
2647c478bd9Sstevel@tonic-gate 			cp = CPU;
2657c478bd9Sstevel@tonic-gate 			ASSERT(cp->cpu_m.mutex_ready);
2667c478bd9Sstevel@tonic-gate 
2677c478bd9Sstevel@tonic-gate 			/*
2687c478bd9Sstevel@tonic-gate 			 * Try the lock.  If we don't get the lock, re-enable
2697c478bd9Sstevel@tonic-gate 			 * preemption and see if we should sleep.  If we are
2707c478bd9Sstevel@tonic-gate 			 * already the lock holder, remove the effect of the
2717c478bd9Sstevel@tonic-gate 			 * previous kpreempt_disable() before returning since
2727c478bd9Sstevel@tonic-gate 			 * preemption was disabled by an earlier kern_preprom.
2737c478bd9Sstevel@tonic-gate 			 */
27475d94465SJosef 'Jeff' Sipek 			prcp = atomic_cas_ptr((void *)&prom_cpu, NULL, cp);
2757c478bd9Sstevel@tonic-gate 			if (prcp == NULL ||
2767c478bd9Sstevel@tonic-gate 			    (prcp == cp && prom_thread == curthread)) {
2777c478bd9Sstevel@tonic-gate 				if (prcp == cp)
2787c478bd9Sstevel@tonic-gate 					kpreempt_enable();
2797c478bd9Sstevel@tonic-gate 				break;
2807c478bd9Sstevel@tonic-gate 			}
2817c478bd9Sstevel@tonic-gate 
2827c478bd9Sstevel@tonic-gate 			kpreempt_enable();
2837c478bd9Sstevel@tonic-gate 
2847c478bd9Sstevel@tonic-gate 			/*
2857c478bd9Sstevel@tonic-gate 			 * We have to be very careful here since both prom_cpu
2867c478bd9Sstevel@tonic-gate 			 * and prcp->cpu_m.mutex_ready can be changed at any
2877c478bd9Sstevel@tonic-gate 			 * time by a non mutex_ready cpu holding the lock.
2887c478bd9Sstevel@tonic-gate 			 * If the owner is mutex_ready, holding prom_mutex
2897c478bd9Sstevel@tonic-gate 			 * prevents kern_postprom() from completing.  If the
2907c478bd9Sstevel@tonic-gate 			 * owner isn't mutex_ready, we only know it will clear
2917c478bd9Sstevel@tonic-gate 			 * prom_cpu before changing cpu_m.mutex_ready, so we
2927c478bd9Sstevel@tonic-gate 			 * issue a membar after checking mutex_ready and then
2937c478bd9Sstevel@tonic-gate 			 * re-verify that prom_cpu is still held by the same
2947c478bd9Sstevel@tonic-gate 			 * cpu before actually proceeding to cv_wait().
2957c478bd9Sstevel@tonic-gate 			 */
2967c478bd9Sstevel@tonic-gate 			mutex_enter(&prom_mutex);
2977c478bd9Sstevel@tonic-gate 			prcp = prom_cpu;
2987c478bd9Sstevel@tonic-gate 			if (prcp != NULL && prcp->cpu_m.mutex_ready != 0) {
2997c478bd9Sstevel@tonic-gate 				membar_consumer();
3007c478bd9Sstevel@tonic-gate 				if (prcp == prom_cpu)
3017c478bd9Sstevel@tonic-gate 					cv_wait(&prom_cv, &prom_mutex);
3027c478bd9Sstevel@tonic-gate 			}
3037c478bd9Sstevel@tonic-gate 			mutex_exit(&prom_mutex);
3047c478bd9Sstevel@tonic-gate 
3057c478bd9Sstevel@tonic-gate 		} else {
3067c478bd9Sstevel@tonic-gate 			/*
3077c478bd9Sstevel@tonic-gate 			 * If we are not yet mutex_ready, just attempt to grab
3087c478bd9Sstevel@tonic-gate 			 * the lock.  If we get it or already hold it, break.
3097c478bd9Sstevel@tonic-gate 			 */
3107c478bd9Sstevel@tonic-gate 			ASSERT(getpil() == PIL_MAX);
31175d94465SJosef 'Jeff' Sipek 			prcp = atomic_cas_ptr((void *)&prom_cpu, NULL, cp);
3127c478bd9Sstevel@tonic-gate 			if (prcp == NULL || prcp == cp)
3137c478bd9Sstevel@tonic-gate 				break;
3147c478bd9Sstevel@tonic-gate 		}
3157c478bd9Sstevel@tonic-gate 	}
3167c478bd9Sstevel@tonic-gate 
3177c478bd9Sstevel@tonic-gate 	/*
3187c478bd9Sstevel@tonic-gate 	 * We now hold the prom_cpu lock.  Increment the hold count by one
3197c478bd9Sstevel@tonic-gate 	 * and assert our current state before returning to the caller.
3207c478bd9Sstevel@tonic-gate 	 */
3211a5e258fSJosef 'Jeff' Sipek 	atomic_inc_32(&prom_holdcnt);
3227c478bd9Sstevel@tonic-gate 	ASSERT(prom_holdcnt >= 1);
3237c478bd9Sstevel@tonic-gate 	prom_thread = curthread;
3247c478bd9Sstevel@tonic-gate }
3257c478bd9Sstevel@tonic-gate 
3267c478bd9Sstevel@tonic-gate /*
3277c478bd9Sstevel@tonic-gate  * Drop the prom lock if it is held by the current CPU.  If the lock is held
3287c478bd9Sstevel@tonic-gate  * recursively, return without clearing prom_cpu.  If the hold count is now
3297c478bd9Sstevel@tonic-gate  * zero, clear prom_cpu and cv_signal any waiting CPU.
3307c478bd9Sstevel@tonic-gate  */
3317c478bd9Sstevel@tonic-gate void
3327c478bd9Sstevel@tonic-gate kern_postprom(void)
3337c478bd9Sstevel@tonic-gate {
3347c478bd9Sstevel@tonic-gate 	processorid_t cpuid = getprocessorid();
3357c478bd9Sstevel@tonic-gate 	cpu_t *cp = cpu[cpuid];
3367c478bd9Sstevel@tonic-gate 
3377c478bd9Sstevel@tonic-gate 	if (panicstr)
3387c478bd9Sstevel@tonic-gate 		return; /* do not modify lock further if we have panicked */
3397c478bd9Sstevel@tonic-gate 
3407c478bd9Sstevel@tonic-gate 	if (prom_cpu != cp)
341903a11ebSrh87107 		panic("kern_postprom: not owner, cp=%p owner=%p",
342903a11ebSrh87107 		    (void *)cp, (void *)prom_cpu);
3437c478bd9Sstevel@tonic-gate 
3447c478bd9Sstevel@tonic-gate 	if (prom_holdcnt == 0)
345903a11ebSrh87107 		panic("kern_postprom: prom_holdcnt == 0, owner=%p",
346903a11ebSrh87107 		    (void *)prom_cpu);
3477c478bd9Sstevel@tonic-gate 
3481a5e258fSJosef 'Jeff' Sipek 	if (atomic_dec_32_nv(&prom_holdcnt) != 0)
3497c478bd9Sstevel@tonic-gate 		return; /* prom lock is held recursively by this CPU */
3507c478bd9Sstevel@tonic-gate 
3517c478bd9Sstevel@tonic-gate 	if ((boothowto & RB_DEBUG) && prom_exit_enter_debugger)
352ae115bc7Smrj 		kmdb_enter();
3537c478bd9Sstevel@tonic-gate 
3547c478bd9Sstevel@tonic-gate 	prom_thread = NULL;
3557c478bd9Sstevel@tonic-gate 	membar_producer();
3567c478bd9Sstevel@tonic-gate 
3577c478bd9Sstevel@tonic-gate 	prom_cpu = NULL;
3587c478bd9Sstevel@tonic-gate 	membar_producer();
3597c478bd9Sstevel@tonic-gate 
3607c478bd9Sstevel@tonic-gate 	if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
3617c478bd9Sstevel@tonic-gate 		mutex_enter(&prom_mutex);
3627c478bd9Sstevel@tonic-gate 		cv_signal(&prom_cv);
3637c478bd9Sstevel@tonic-gate 		mutex_exit(&prom_mutex);
3647c478bd9Sstevel@tonic-gate 		kpreempt_enable();
3657c478bd9Sstevel@tonic-gate 	}
3667c478bd9Sstevel@tonic-gate }
3677c478bd9Sstevel@tonic-gate 
3687c478bd9Sstevel@tonic-gate /*
3697c478bd9Sstevel@tonic-gate  * If the frame buffer device is busy, briefly capture the other CPUs so that
3707c478bd9Sstevel@tonic-gate  * another CPU executing code to manipulate the device does not execute at the
3717c478bd9Sstevel@tonic-gate  * same time we are rendering characters.  Refer to the comments and code in
3727c478bd9Sstevel@tonic-gate  * common/os/console.c for more information on these callbacks.
3737c478bd9Sstevel@tonic-gate  *
3747c478bd9Sstevel@tonic-gate  * Notice that we explicitly acquire the PROM lock using kern_preprom() prior
3757c478bd9Sstevel@tonic-gate  * to idling other CPUs.  The idling mechanism will cross-trap the other CPUs
3767c478bd9Sstevel@tonic-gate  * and have them spin at MAX(%pil, XCALL_PIL), so we must be sure that none of
3777c478bd9Sstevel@tonic-gate  * them are holding the PROM lock before we idle them and then call into the
3787c478bd9Sstevel@tonic-gate  * PROM routines that render characters to the frame buffer.
3797c478bd9Sstevel@tonic-gate  */
3807c478bd9Sstevel@tonic-gate int
3817c478bd9Sstevel@tonic-gate console_enter(int busy)
3827c478bd9Sstevel@tonic-gate {
3837c478bd9Sstevel@tonic-gate 	int s = 0;
3847c478bd9Sstevel@tonic-gate 
3857c478bd9Sstevel@tonic-gate 	if (busy && panicstr == NULL) {
3867c478bd9Sstevel@tonic-gate 		kern_preprom();
3877c478bd9Sstevel@tonic-gate 		s = splhi();
3887c478bd9Sstevel@tonic-gate 		idle_other_cpus();
3897c478bd9Sstevel@tonic-gate 	}
3907c478bd9Sstevel@tonic-gate 
3917c478bd9Sstevel@tonic-gate 	return (s);
3927c478bd9Sstevel@tonic-gate }
3937c478bd9Sstevel@tonic-gate 
3947c478bd9Sstevel@tonic-gate void
3957c478bd9Sstevel@tonic-gate console_exit(int busy, int spl)
3967c478bd9Sstevel@tonic-gate {
3977c478bd9Sstevel@tonic-gate 	if (busy && panicstr == NULL) {
3987c478bd9Sstevel@tonic-gate 		resume_other_cpus();
3997c478bd9Sstevel@tonic-gate 		splx(spl);
4007c478bd9Sstevel@tonic-gate 		kern_postprom();
4017c478bd9Sstevel@tonic-gate 	}
4027c478bd9Sstevel@tonic-gate }
4037c478bd9Sstevel@tonic-gate 
4047c478bd9Sstevel@tonic-gate /*
4057c478bd9Sstevel@tonic-gate  * This routine is a special form of pause_cpus().  It ensures that
4067c478bd9Sstevel@tonic-gate  * prom functions are callable while the cpus are paused.
4077c478bd9Sstevel@tonic-gate  */
4087c478bd9Sstevel@tonic-gate void
4097c478bd9Sstevel@tonic-gate promsafe_pause_cpus(void)
4107c478bd9Sstevel@tonic-gate {
411*0ed5c46eSJosef 'Jeff' Sipek 	pause_cpus(NULL, NULL);
4127c478bd9Sstevel@tonic-gate 
4137c478bd9Sstevel@tonic-gate 	/* If some other cpu is entering or is in the prom, spin */
4147c478bd9Sstevel@tonic-gate 	while (prom_cpu || mutex_owner(&prom_mutex)) {
4157c478bd9Sstevel@tonic-gate 
4167c478bd9Sstevel@tonic-gate 		start_cpus();
4177c478bd9Sstevel@tonic-gate 		mutex_enter(&prom_mutex);
4187c478bd9Sstevel@tonic-gate 
4197c478bd9Sstevel@tonic-gate 		/* Wait for other cpu to exit prom */
4207c478bd9Sstevel@tonic-gate 		while (prom_cpu)
4217c478bd9Sstevel@tonic-gate 			cv_wait(&prom_cv, &prom_mutex);
4227c478bd9Sstevel@tonic-gate 
4237c478bd9Sstevel@tonic-gate 		mutex_exit(&prom_mutex);
424*0ed5c46eSJosef 'Jeff' Sipek 		pause_cpus(NULL, NULL);
4257c478bd9Sstevel@tonic-gate 	}
4267c478bd9Sstevel@tonic-gate 
4277c478bd9Sstevel@tonic-gate 	/* At this point all cpus are paused and none are in the prom */
4287c478bd9Sstevel@tonic-gate }
4297c478bd9Sstevel@tonic-gate 
4307c478bd9Sstevel@tonic-gate /*
4317c478bd9Sstevel@tonic-gate  * This routine is a special form of xc_attention().  It ensures that
4327c478bd9Sstevel@tonic-gate  * prom functions are callable while the cpus are at attention.
4337c478bd9Sstevel@tonic-gate  */
4347c478bd9Sstevel@tonic-gate void
4357c478bd9Sstevel@tonic-gate promsafe_xc_attention(cpuset_t cpuset)
4367c478bd9Sstevel@tonic-gate {
4377c478bd9Sstevel@tonic-gate 	xc_attention(cpuset);
4387c478bd9Sstevel@tonic-gate 
4397c478bd9Sstevel@tonic-gate 	/* If some other cpu is entering or is in the prom, spin */
4407c478bd9Sstevel@tonic-gate 	while (prom_cpu || mutex_owner(&prom_mutex)) {
4417c478bd9Sstevel@tonic-gate 
4427c478bd9Sstevel@tonic-gate 		xc_dismissed(cpuset);
4437c478bd9Sstevel@tonic-gate 		mutex_enter(&prom_mutex);
4447c478bd9Sstevel@tonic-gate 
4457c478bd9Sstevel@tonic-gate 		/* Wait for other cpu to exit prom */
4467c478bd9Sstevel@tonic-gate 		while (prom_cpu)
4477c478bd9Sstevel@tonic-gate 			cv_wait(&prom_cv, &prom_mutex);
4487c478bd9Sstevel@tonic-gate 
4497c478bd9Sstevel@tonic-gate 		mutex_exit(&prom_mutex);
4507c478bd9Sstevel@tonic-gate 		xc_attention(cpuset);
4517c478bd9Sstevel@tonic-gate 	}
4527c478bd9Sstevel@tonic-gate 
4537c478bd9Sstevel@tonic-gate 	/* At this point all cpus are paused and none are in the prom */
4547c478bd9Sstevel@tonic-gate }
4557c478bd9Sstevel@tonic-gate 
4567c478bd9Sstevel@tonic-gate 
4577c478bd9Sstevel@tonic-gate #if defined(PROM_32BIT_ADDRS)
4587c478bd9Sstevel@tonic-gate 
4597c478bd9Sstevel@tonic-gate #include <sys/promimpl.h>
4607c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h>
4617c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
4627c478bd9Sstevel@tonic-gate #include <sys/bootconf.h>
4637c478bd9Sstevel@tonic-gate 
4647c478bd9Sstevel@tonic-gate /*
4657c478bd9Sstevel@tonic-gate  * These routines are only used to workaround "poor feature interaction"
4667c478bd9Sstevel@tonic-gate  * in OBP.  See bug 4115680 for details.
4677c478bd9Sstevel@tonic-gate  *
4687c478bd9Sstevel@tonic-gate  * Many of the promif routines need to allocate temporary buffers
4697c478bd9Sstevel@tonic-gate  * with 32-bit addresses to pass in/out of the CIF.  The lifetime
4707c478bd9Sstevel@tonic-gate  * of the buffers is extremely short, they are allocated and freed
4717c478bd9Sstevel@tonic-gate  * around the CIF call.  We use vmem_alloc() to cache 32-bit memory.
4727c478bd9Sstevel@tonic-gate  *
4737c478bd9Sstevel@tonic-gate  * Note the code in promplat_free() to prevent exhausting the 32 bit
4747c478bd9Sstevel@tonic-gate  * heap during boot.
4757c478bd9Sstevel@tonic-gate  */
4767c478bd9Sstevel@tonic-gate static void *promplat_last_free = NULL;
4777c478bd9Sstevel@tonic-gate static size_t promplat_last_size;
4787c478bd9Sstevel@tonic-gate static vmem_t *promplat_arena;
4797c478bd9Sstevel@tonic-gate static kmutex_t promplat_lock;  /* protect arena, last_free, and last_size */
4807c478bd9Sstevel@tonic-gate 
4817c478bd9Sstevel@tonic-gate void *
4827c478bd9Sstevel@tonic-gate promplat_alloc(size_t size)
4837c478bd9Sstevel@tonic-gate {
4847c478bd9Sstevel@tonic-gate 
4857c478bd9Sstevel@tonic-gate 	mutex_enter(&promplat_lock);
4867c478bd9Sstevel@tonic-gate 	if (promplat_arena == NULL) {
4877c478bd9Sstevel@tonic-gate 		promplat_arena = vmem_create("promplat", NULL, 0, 8,
4887c478bd9Sstevel@tonic-gate 		    segkmem_alloc, segkmem_free, heap32_arena, 0, VM_SLEEP);
4897c478bd9Sstevel@tonic-gate 	}
4907c478bd9Sstevel@tonic-gate 	mutex_exit(&promplat_lock);
4917c478bd9Sstevel@tonic-gate 
4927c478bd9Sstevel@tonic-gate 	return (vmem_alloc(promplat_arena, size, VM_NOSLEEP));
4937c478bd9Sstevel@tonic-gate }
4947c478bd9Sstevel@tonic-gate 
4957c478bd9Sstevel@tonic-gate /*
4967c478bd9Sstevel@tonic-gate  * Delaying the free() of small allocations gets more mileage
4977c478bd9Sstevel@tonic-gate  * from pages during boot, otherwise a cycle of allocate/free
4987c478bd9Sstevel@tonic-gate  * calls could burn through available heap32 space too quickly.
4997c478bd9Sstevel@tonic-gate  */
5007c478bd9Sstevel@tonic-gate void
5017c478bd9Sstevel@tonic-gate promplat_free(void *p, size_t size)
5027c478bd9Sstevel@tonic-gate {
5037c478bd9Sstevel@tonic-gate 	void *p2 = NULL;
5047c478bd9Sstevel@tonic-gate 	size_t s2;
5057c478bd9Sstevel@tonic-gate 
5067c478bd9Sstevel@tonic-gate 	/*
5077c478bd9Sstevel@tonic-gate 	 * If VM is initialized, clean up any delayed free().
5087c478bd9Sstevel@tonic-gate 	 */
5097c478bd9Sstevel@tonic-gate 	if (kvseg.s_base != 0 && promplat_last_free != NULL) {
5107c478bd9Sstevel@tonic-gate 		mutex_enter(&promplat_lock);
5117c478bd9Sstevel@tonic-gate 		p2 = promplat_last_free;
5127c478bd9Sstevel@tonic-gate 		s2 = promplat_last_size;
5137c478bd9Sstevel@tonic-gate 		promplat_last_free = NULL;
5147c478bd9Sstevel@tonic-gate 		promplat_last_size = 0;
5157c478bd9Sstevel@tonic-gate 		mutex_exit(&promplat_lock);
5167c478bd9Sstevel@tonic-gate 		if (p2 != NULL) {
5177c478bd9Sstevel@tonic-gate 			vmem_free(promplat_arena, p2, s2);
5187c478bd9Sstevel@tonic-gate 			p2 = NULL;
5197c478bd9Sstevel@tonic-gate 		}
5207c478bd9Sstevel@tonic-gate 	}
5217c478bd9Sstevel@tonic-gate 
5227c478bd9Sstevel@tonic-gate 	/*
5237c478bd9Sstevel@tonic-gate 	 * Do the free if VM is initialized or it's a large allocation.
5247c478bd9Sstevel@tonic-gate 	 */
5257c478bd9Sstevel@tonic-gate 	if (kvseg.s_base != 0 || size >= PAGESIZE) {
5267c478bd9Sstevel@tonic-gate 		vmem_free(promplat_arena, p, size);
5277c478bd9Sstevel@tonic-gate 		return;
5287c478bd9Sstevel@tonic-gate 	}
5297c478bd9Sstevel@tonic-gate 
5307c478bd9Sstevel@tonic-gate 	/*
5317c478bd9Sstevel@tonic-gate 	 * Otherwise, do the last free request and delay this one.
5327c478bd9Sstevel@tonic-gate 	 */
5337c478bd9Sstevel@tonic-gate 	mutex_enter(&promplat_lock);
5347c478bd9Sstevel@tonic-gate 	if (promplat_last_free != NULL) {
5357c478bd9Sstevel@tonic-gate 		p2 = promplat_last_free;
5367c478bd9Sstevel@tonic-gate 		s2 = promplat_last_size;
5377c478bd9Sstevel@tonic-gate 	}
5387c478bd9Sstevel@tonic-gate 	promplat_last_free = p;
5397c478bd9Sstevel@tonic-gate 	promplat_last_size = size;
5407c478bd9Sstevel@tonic-gate 	mutex_exit(&promplat_lock);
5417c478bd9Sstevel@tonic-gate 
5427c478bd9Sstevel@tonic-gate 	if (p2 != NULL)
5437c478bd9Sstevel@tonic-gate 		vmem_free(promplat_arena, p2, s2);
5447c478bd9Sstevel@tonic-gate }
5457c478bd9Sstevel@tonic-gate 
5467c478bd9Sstevel@tonic-gate void
5477c478bd9Sstevel@tonic-gate promplat_bcopy(const void *src, void *dst, size_t count)
5487c478bd9Sstevel@tonic-gate {
5497c478bd9Sstevel@tonic-gate 	bcopy(src, dst, count);
5507c478bd9Sstevel@tonic-gate }
5517c478bd9Sstevel@tonic-gate 
5527c478bd9Sstevel@tonic-gate #endif /* PROM_32BIT_ADDRS */
5537c478bd9Sstevel@tonic-gate 
5547c478bd9Sstevel@tonic-gate static prom_generation_cookie_t prom_tree_gen;
5557c478bd9Sstevel@tonic-gate static krwlock_t prom_tree_lock;
5567c478bd9Sstevel@tonic-gate 
5577c478bd9Sstevel@tonic-gate int
5587c478bd9Sstevel@tonic-gate prom_tree_access(int (*callback)(void *arg, int has_changed), void *arg,
5597c478bd9Sstevel@tonic-gate     prom_generation_cookie_t *ckp)
5607c478bd9Sstevel@tonic-gate {
5617c478bd9Sstevel@tonic-gate 	int chg, rv;
5627c478bd9Sstevel@tonic-gate 
5637c478bd9Sstevel@tonic-gate 	rw_enter(&prom_tree_lock, RW_READER);
5647c478bd9Sstevel@tonic-gate 	/*
5657c478bd9Sstevel@tonic-gate 	 * If the tree has changed since the caller last accessed it
5667c478bd9Sstevel@tonic-gate 	 * pass 1 as the second argument to the callback function,
5677c478bd9Sstevel@tonic-gate 	 * otherwise 0.
5687c478bd9Sstevel@tonic-gate 	 */
5697c478bd9Sstevel@tonic-gate 	if (ckp != NULL && *ckp != prom_tree_gen) {
5707c478bd9Sstevel@tonic-gate 		*ckp = prom_tree_gen;
5717c478bd9Sstevel@tonic-gate 		chg = 1;
5727c478bd9Sstevel@tonic-gate 	} else
5737c478bd9Sstevel@tonic-gate 		chg = 0;
5747c478bd9Sstevel@tonic-gate 	rv = callback(arg, chg);
5757c478bd9Sstevel@tonic-gate 	rw_exit(&prom_tree_lock);
5767c478bd9Sstevel@tonic-gate 	return (rv);
5777c478bd9Sstevel@tonic-gate }
5787c478bd9Sstevel@tonic-gate 
5797c478bd9Sstevel@tonic-gate int
5807c478bd9Sstevel@tonic-gate prom_tree_update(int (*callback)(void *arg), void *arg)
5817c478bd9Sstevel@tonic-gate {
5827c478bd9Sstevel@tonic-gate 	int rv;
5837c478bd9Sstevel@tonic-gate 
5847c478bd9Sstevel@tonic-gate 	rw_enter(&prom_tree_lock, RW_WRITER);
5857c478bd9Sstevel@tonic-gate 	prom_tree_gen++;
5867c478bd9Sstevel@tonic-gate 	rv = callback(arg);
5877c478bd9Sstevel@tonic-gate 	rw_exit(&prom_tree_lock);
5887c478bd9Sstevel@tonic-gate 	return (rv);
5897c478bd9Sstevel@tonic-gate }
590