xref: /titanic_44/usr/src/uts/sun4/os/prom_subr.c (revision 60972f37b111e11c7b4abcfc08e54c7839fe5e63)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/cmn_err.h>
31 #include <sys/mutex.h>
32 #include <sys/systm.h>
33 #include <sys/sysmacros.h>
34 #include <sys/machsystm.h>
35 #include <sys/archsystm.h>
36 #include <sys/x_call.h>
37 #include <sys/promif.h>
38 #include <sys/prom_isa.h>
39 #include <sys/privregs.h>
40 #include <sys/vmem.h>
41 #include <sys/atomic.h>
42 #include <sys/panic.h>
43 #include <sys/rwlock.h>
44 #include <sys/reboot.h>
45 #include <sys/kdi.h>
46 #include <sys/kdi_machimpl.h>
47 
48 /*
49  * We are called with a pointer to a cell-sized argument array.
50  * The service name (the first element of the argument array) is
51  * the name of the callback being invoked.  When called, we are
52  * running on the firmwares trap table as a trusted subroutine
53  * of the firmware.
54  *
55  * We define entry points to allow callback handlers to be dynamically
56  * added and removed, to support obpsym, which is a separate module
57  * and can be dynamically loaded and unloaded and registers its
58  * callback handlers dynamically.
59  *
60  * Note: The actual callback handler we register, is the assembly lang.
61  * glue, callback_handler, which takes care of switching from a 64
62  * bit stack and environment to a 32 bit stack and environment, and
63  * back again, if the callback handler returns. callback_handler calls
64  * vx_handler to process the callback.
65  */
66 
67 static kmutex_t vx_cmd_lock;	/* protect vx_cmd table */
68 
69 #define	VX_CMD_MAX	10
70 #define	ENDADDR(a)	&a[sizeof (a) / sizeof (a[0])]
71 #define	vx_cmd_end	((struct vx_cmd *)(ENDADDR(vx_cmd)))
72 
73 static struct vx_cmd {
74 	char	*service;	/* Service name */
75 	int	take_tba;	/* If Non-zero we take over the tba */
76 	void	(*func)(cell_t *argument_array);
77 } vx_cmd[VX_CMD_MAX+1];
78 
79 void
80 init_vx_handler(void)
81 {
82 	extern int callback_handler(cell_t *arg_array);
83 
84 	/*
85 	 * initialize the lock protecting additions and deletions from
86 	 * the vx_cmd table.  At callback time we don't need to grab
87 	 * this lock.  Callback handlers do not need to modify the
88 	 * callback handler table.
89 	 */
90 	mutex_init(&vx_cmd_lock, NULL, MUTEX_DEFAULT, NULL);
91 
92 	/*
93 	 * Tell OBP about our callback handler.
94 	 */
95 	(void) prom_set_callback((void *)callback_handler);
96 }
97 
98 /*
99  * Add a kernel callback handler to the kernel's list.
100  * The table is static, so if you add a callback handler, increase
101  * the value of VX_CMD_MAX. Find the first empty slot and use it.
102  */
103 void
104 add_vx_handler(char *name, int flag, void (*func)(cell_t *))
105 {
106 	struct vx_cmd *vp;
107 
108 	mutex_enter(&vx_cmd_lock);
109 	for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
110 		if (vp->service == NULL) {
111 			vp->service = name;
112 			vp->take_tba = flag;
113 			vp->func = func;
114 			mutex_exit(&vx_cmd_lock);
115 			return;
116 		}
117 	}
118 	mutex_exit(&vx_cmd_lock);
119 
120 #ifdef	DEBUG
121 
122 	/*
123 	 * There must be enough entries to handle all callback entries.
124 	 * Increase VX_CMD_MAX if this happens. This shouldn't happen.
125 	 */
126 	cmn_err(CE_PANIC, "add_vx_handler <%s>", name);
127 	/* NOTREACHED */
128 
129 #else	/* DEBUG */
130 
131 	cmn_err(CE_WARN, "add_vx_handler: Can't add callback hander <%s>",
132 	    name);
133 
134 #endif	/* DEBUG */
135 
136 }
137 
138 /*
139  * Remove a vx_handler function -- find the name string in the table,
140  * and clear it.
141  */
142 void
143 remove_vx_handler(char *name)
144 {
145 	struct vx_cmd *vp;
146 
147 	mutex_enter(&vx_cmd_lock);
148 	for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
149 		if (vp->service == NULL)
150 			continue;
151 		if (strcmp(vp->service, name) != 0)
152 			continue;
153 		vp->service = 0;
154 		vp->take_tba = 0;
155 		vp->func = 0;
156 		mutex_exit(&vx_cmd_lock);
157 		return;
158 	}
159 	mutex_exit(&vx_cmd_lock);
160 	cmn_err(CE_WARN, "remove_vx_handler: <%s> not found", name);
161 }
162 
163 int
164 vx_handler(cell_t *argument_array)
165 {
166 	char *name;
167 	struct vx_cmd *vp;
168 	void *old_tba;
169 
170 	name = p1275_cell2ptr(*argument_array);
171 
172 	for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
173 		if (vp->service == (char *)0)
174 			continue;
175 		if (strcmp(vp->service, name) != 0)
176 			continue;
177 		if (vp->take_tba != 0)  {
178 			reestablish_curthread();
179 			if (tba_taken_over != 0)
180 				old_tba = set_tba((void *)&trap_table);
181 		}
182 		vp->func(argument_array);
183 		if ((vp->take_tba != 0) && (tba_taken_over != 0))
184 			(void) set_tba(old_tba);
185 		return (0);	/* Service name was known */
186 	}
187 
188 	return (-1);		/* Service name unknown */
189 }
190 
191 /*
192  * PROM Locking Primitives
193  *
194  * These routines are called immediately before and immediately after calling
195  * into the firmware.  The firmware is single-threaded and assumes that the
196  * kernel will implement locking to prevent simultaneous service calls.  In
197  * addition, some service calls (particularly character rendering) can be
198  * slow, so we would like to sleep if we cannot acquire the lock to allow the
199  * caller's CPU to continue to perform useful work in the interim.  Service
200  * routines may also be called early in boot as part of slave CPU startup
201  * when mutexes and cvs are not yet available (i.e. they are still running on
202  * the prom's TLB handlers and cannot touch curthread).  Therefore, these
203  * routines must reduce to a simple compare-and-swap spin lock when necessary.
204  * Finally, kernel code may wish to acquire the firmware lock before executing
205  * a block of code that includes service calls, so we also allow the firmware
206  * lock to be acquired recursively by the owning CPU after disabling preemption.
207  *
208  * To meet these constraints, the lock itself is implemented as a compare-and-
209  * swap spin lock on the global prom_cpu pointer.  We implement recursion by
210  * atomically incrementing the integer prom_holdcnt after acquiring the lock.
211  * If the current CPU is an "adult" (determined by testing cpu_m.mutex_ready),
212  * we disable preemption before acquiring the lock and leave it disabled once
213  * the lock is held.  The kern_postprom() routine then enables preemption if
214  * we drop the lock and prom_holdcnt returns to zero.  If the current CPU is
215  * an adult and the lock is held by another adult CPU, we can safely sleep
216  * until the lock is released.  To do so, we acquire the adaptive prom_mutex
217  * and then sleep on prom_cv.  Therefore, service routines must not be called
218  * from above LOCK_LEVEL on any adult CPU.  Finally, if recursive entry is
219  * attempted on an adult CPU, we must also verify that curthread matches the
220  * saved prom_thread (the original owner) to ensure that low-level interrupt
221  * threads do not step on other threads running on the same CPU.
222  */
223 
224 static cpu_t *volatile prom_cpu;
225 static kthread_t *volatile prom_thread;
226 static uint32_t prom_holdcnt;
227 static kmutex_t prom_mutex;
228 static kcondvar_t prom_cv;
229 
230 /*
231  * The debugger uses PROM services, and is thus unable to run if any of the
232  * CPUs on the system are executing in the PROM at the time of debugger entry.
233  * If a CPU is determined to be in the PROM when the debugger is entered,
234  * prom_return_enter_debugger will be set, thus triggering a programmed debugger
235  * entry when the given CPU returns from the PROM.  That CPU is then released by
236  * the debugger, and is allowed to complete PROM-related work.
237  */
238 int prom_exit_enter_debugger;
239 
240 void
241 kern_preprom(void)
242 {
243 	for (;;) {
244 		/*
245 		 * Load the current CPU pointer and examine the mutex_ready bit.
246 		 * It doesn't matter if we are preempted here because we are
247 		 * only trying to determine if we are in the *set* of mutex
248 		 * ready CPUs.  We cannot disable preemption until we confirm
249 		 * that we are running on a CPU in this set, since a call to
250 		 * kpreempt_disable() requires access to curthread.
251 		 */
252 		processorid_t cpuid = getprocessorid();
253 		cpu_t *cp = cpu[cpuid];
254 		cpu_t *prcp;
255 
256 		if (panicstr)
257 			return; /* just return if we are currently panicking */
258 
259 		if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
260 			/*
261 			 * Disable premption, and reload the current CPU.  We
262 			 * can't move from a mutex_ready cpu to a non-ready cpu
263 			 * so we don't need to re-check cp->cpu_m.mutex_ready.
264 			 */
265 			kpreempt_disable();
266 			cp = CPU;
267 			ASSERT(cp->cpu_m.mutex_ready);
268 
269 			/*
270 			 * Try the lock.  If we don't get the lock, re-enable
271 			 * preemption and see if we should sleep.  If we are
272 			 * already the lock holder, remove the effect of the
273 			 * previous kpreempt_disable() before returning since
274 			 * preemption was disabled by an earlier kern_preprom.
275 			 */
276 			prcp = casptr((void *)&prom_cpu, NULL, cp);
277 			if (prcp == NULL ||
278 			    (prcp == cp && prom_thread == curthread)) {
279 				if (prcp == cp)
280 					kpreempt_enable();
281 				break;
282 			}
283 
284 			kpreempt_enable();
285 
286 			/*
287 			 * We have to be very careful here since both prom_cpu
288 			 * and prcp->cpu_m.mutex_ready can be changed at any
289 			 * time by a non mutex_ready cpu holding the lock.
290 			 * If the owner is mutex_ready, holding prom_mutex
291 			 * prevents kern_postprom() from completing.  If the
292 			 * owner isn't mutex_ready, we only know it will clear
293 			 * prom_cpu before changing cpu_m.mutex_ready, so we
294 			 * issue a membar after checking mutex_ready and then
295 			 * re-verify that prom_cpu is still held by the same
296 			 * cpu before actually proceeding to cv_wait().
297 			 */
298 			mutex_enter(&prom_mutex);
299 			prcp = prom_cpu;
300 			if (prcp != NULL && prcp->cpu_m.mutex_ready != 0) {
301 				membar_consumer();
302 				if (prcp == prom_cpu)
303 					cv_wait(&prom_cv, &prom_mutex);
304 			}
305 			mutex_exit(&prom_mutex);
306 
307 		} else {
308 			/*
309 			 * If we are not yet mutex_ready, just attempt to grab
310 			 * the lock.  If we get it or already hold it, break.
311 			 */
312 			ASSERT(getpil() == PIL_MAX);
313 			prcp = casptr((void *)&prom_cpu, NULL, cp);
314 			if (prcp == NULL || prcp == cp)
315 				break;
316 		}
317 	}
318 
319 	/*
320 	 * We now hold the prom_cpu lock.  Increment the hold count by one
321 	 * and assert our current state before returning to the caller.
322 	 */
323 	atomic_add_32(&prom_holdcnt, 1);
324 	ASSERT(prom_holdcnt >= 1);
325 	prom_thread = curthread;
326 }
327 
328 /*
329  * Drop the prom lock if it is held by the current CPU.  If the lock is held
330  * recursively, return without clearing prom_cpu.  If the hold count is now
331  * zero, clear prom_cpu and cv_signal any waiting CPU.
332  */
333 void
334 kern_postprom(void)
335 {
336 	processorid_t cpuid = getprocessorid();
337 	cpu_t *cp = cpu[cpuid];
338 
339 	if (panicstr)
340 		return; /* do not modify lock further if we have panicked */
341 
342 	if (prom_cpu != cp)
343 		panic("kern_postprom: not owner, cp=%p owner=%p", cp, prom_cpu);
344 
345 	if (prom_holdcnt == 0)
346 		panic("kern_postprom: prom_holdcnt == 0, owner=%p", prom_cpu);
347 
348 	if (atomic_add_32_nv(&prom_holdcnt, -1) != 0)
349 		return; /* prom lock is held recursively by this CPU */
350 
351 	if ((boothowto & RB_DEBUG) && prom_exit_enter_debugger)
352 		kmdb_enter();
353 
354 	prom_thread = NULL;
355 	membar_producer();
356 
357 	prom_cpu = NULL;
358 	membar_producer();
359 
360 	if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
361 		mutex_enter(&prom_mutex);
362 		cv_signal(&prom_cv);
363 		mutex_exit(&prom_mutex);
364 		kpreempt_enable();
365 	}
366 }
367 
368 /*
369  * If the frame buffer device is busy, briefly capture the other CPUs so that
370  * another CPU executing code to manipulate the device does not execute at the
371  * same time we are rendering characters.  Refer to the comments and code in
372  * common/os/console.c for more information on these callbacks.
373  *
374  * Notice that we explicitly acquire the PROM lock using kern_preprom() prior
375  * to idling other CPUs.  The idling mechanism will cross-trap the other CPUs
376  * and have them spin at MAX(%pil, XCALL_PIL), so we must be sure that none of
377  * them are holding the PROM lock before we idle them and then call into the
378  * PROM routines that render characters to the frame buffer.
379  */
380 int
381 console_enter(int busy)
382 {
383 	int s = 0;
384 
385 	if (busy && panicstr == NULL) {
386 		kern_preprom();
387 		s = splhi();
388 		idle_other_cpus();
389 	}
390 
391 	return (s);
392 }
393 
394 void
395 console_exit(int busy, int spl)
396 {
397 	if (busy && panicstr == NULL) {
398 		resume_other_cpus();
399 		splx(spl);
400 		kern_postprom();
401 	}
402 }
403 
404 /*
405  * This routine is a special form of pause_cpus().  It ensures that
406  * prom functions are callable while the cpus are paused.
407  */
408 void
409 promsafe_pause_cpus(void)
410 {
411 	pause_cpus(NULL);
412 
413 	/* If some other cpu is entering or is in the prom, spin */
414 	while (prom_cpu || mutex_owner(&prom_mutex)) {
415 
416 		start_cpus();
417 		mutex_enter(&prom_mutex);
418 
419 		/* Wait for other cpu to exit prom */
420 		while (prom_cpu)
421 			cv_wait(&prom_cv, &prom_mutex);
422 
423 		mutex_exit(&prom_mutex);
424 		pause_cpus(NULL);
425 	}
426 
427 	/* At this point all cpus are paused and none are in the prom */
428 }
429 
430 /*
431  * This routine is a special form of xc_attention().  It ensures that
432  * prom functions are callable while the cpus are at attention.
433  */
434 void
435 promsafe_xc_attention(cpuset_t cpuset)
436 {
437 	xc_attention(cpuset);
438 
439 	/* If some other cpu is entering or is in the prom, spin */
440 	while (prom_cpu || mutex_owner(&prom_mutex)) {
441 
442 		xc_dismissed(cpuset);
443 		mutex_enter(&prom_mutex);
444 
445 		/* Wait for other cpu to exit prom */
446 		while (prom_cpu)
447 			cv_wait(&prom_cv, &prom_mutex);
448 
449 		mutex_exit(&prom_mutex);
450 		xc_attention(cpuset);
451 	}
452 
453 	/* At this point all cpus are paused and none are in the prom */
454 }
455 
456 
457 #if defined(PROM_32BIT_ADDRS)
458 
459 #include <sys/promimpl.h>
460 #include <vm/seg_kmem.h>
461 #include <sys/kmem.h>
462 #include <sys/bootconf.h>
463 
464 /*
465  * These routines are only used to workaround "poor feature interaction"
466  * in OBP.  See bug 4115680 for details.
467  *
468  * Many of the promif routines need to allocate temporary buffers
469  * with 32-bit addresses to pass in/out of the CIF.  The lifetime
470  * of the buffers is extremely short, they are allocated and freed
471  * around the CIF call.  We use vmem_alloc() to cache 32-bit memory.
472  *
473  * Note the code in promplat_free() to prevent exhausting the 32 bit
474  * heap during boot.
475  */
476 static void *promplat_last_free = NULL;
477 static size_t promplat_last_size;
478 static vmem_t *promplat_arena;
479 static kmutex_t promplat_lock;  /* protect arena, last_free, and last_size */
480 
481 void *
482 promplat_alloc(size_t size)
483 {
484 
485 	mutex_enter(&promplat_lock);
486 	if (promplat_arena == NULL) {
487 		promplat_arena = vmem_create("promplat", NULL, 0, 8,
488 		    segkmem_alloc, segkmem_free, heap32_arena, 0, VM_SLEEP);
489 	}
490 	mutex_exit(&promplat_lock);
491 
492 	return (vmem_alloc(promplat_arena, size, VM_NOSLEEP));
493 }
494 
495 /*
496  * Delaying the free() of small allocations gets more mileage
497  * from pages during boot, otherwise a cycle of allocate/free
498  * calls could burn through available heap32 space too quickly.
499  */
500 void
501 promplat_free(void *p, size_t size)
502 {
503 	void *p2 = NULL;
504 	size_t s2;
505 
506 	/*
507 	 * If VM is initialized, clean up any delayed free().
508 	 */
509 	if (kvseg.s_base != 0 && promplat_last_free != NULL) {
510 		mutex_enter(&promplat_lock);
511 		p2 = promplat_last_free;
512 		s2 = promplat_last_size;
513 		promplat_last_free = NULL;
514 		promplat_last_size = 0;
515 		mutex_exit(&promplat_lock);
516 		if (p2 != NULL) {
517 			vmem_free(promplat_arena, p2, s2);
518 			p2 = NULL;
519 		}
520 	}
521 
522 	/*
523 	 * Do the free if VM is initialized or it's a large allocation.
524 	 */
525 	if (kvseg.s_base != 0 || size >= PAGESIZE) {
526 		vmem_free(promplat_arena, p, size);
527 		return;
528 	}
529 
530 	/*
531 	 * Otherwise, do the last free request and delay this one.
532 	 */
533 	mutex_enter(&promplat_lock);
534 	if (promplat_last_free != NULL) {
535 		p2 = promplat_last_free;
536 		s2 = promplat_last_size;
537 	}
538 	promplat_last_free = p;
539 	promplat_last_size = size;
540 	mutex_exit(&promplat_lock);
541 
542 	if (p2 != NULL)
543 		vmem_free(promplat_arena, p2, s2);
544 }
545 
546 void
547 promplat_bcopy(const void *src, void *dst, size_t count)
548 {
549 	bcopy(src, dst, count);
550 }
551 
552 #endif /* PROM_32BIT_ADDRS */
553 
554 static prom_generation_cookie_t prom_tree_gen;
555 static krwlock_t prom_tree_lock;
556 
557 int
558 prom_tree_access(int (*callback)(void *arg, int has_changed), void *arg,
559     prom_generation_cookie_t *ckp)
560 {
561 	int chg, rv;
562 
563 	rw_enter(&prom_tree_lock, RW_READER);
564 	/*
565 	 * If the tree has changed since the caller last accessed it
566 	 * pass 1 as the second argument to the callback function,
567 	 * otherwise 0.
568 	 */
569 	if (ckp != NULL && *ckp != prom_tree_gen) {
570 		*ckp = prom_tree_gen;
571 		chg = 1;
572 	} else
573 		chg = 0;
574 	rv = callback(arg, chg);
575 	rw_exit(&prom_tree_lock);
576 	return (rv);
577 }
578 
579 int
580 prom_tree_update(int (*callback)(void *arg), void *arg)
581 {
582 	int rv;
583 
584 	rw_enter(&prom_tree_lock, RW_WRITER);
585 	prom_tree_gen++;
586 	rv = callback(arg);
587 	rw_exit(&prom_tree_lock);
588 	return (rv);
589 }
590