xref: /titanic_41/usr/src/uts/sparc/v9/ml/sparcv9_subr.s (revision 2a8d6eba033e4713ab12b61178f0513f1f075482)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * General assembly language routines.
30 * It is the intent of this file to contain routines that are
31 * independent of the specific kernel architecture, and those that are
32 * common across kernel architectures.
33 * As architectures diverge, and implementations of specific
34 * architecture-dependent routines change, the routines should be moved
35 * from this file into the respective ../`arch -k`/subr.s file.
36 * Or, if you want to be really nice, move them to a file whose
37 * name has something to do with the routine you are moving.
38 */
39
40#if defined(lint)
41#include <sys/types.h>
42#include <sys/scb.h>
43#include <sys/systm.h>
44#include <sys/regset.h>
45#include <sys/sunddi.h>
46#include <sys/lockstat.h>
47#include <sys/dtrace.h>
48#include <sys/ftrace.h>
49#endif	/* lint */
50
51#include <sys/asm_linkage.h>
52#include <sys/privregs.h>
53#include <sys/machparam.h>	/* To get SYSBASE and PAGESIZE */
54#include <sys/machthread.h>
55#include <sys/clock.h>
56#include <sys/psr_compat.h>
57#include <sys/isa_defs.h>
58#include <sys/dditypes.h>
59#include <sys/panic.h>
60#include <sys/machlock.h>
61#include <sys/ontrap.h>
62
63#if !defined(lint)
64#include "assym.h"
65
66	.seg	".text"
67	.align	4
68
69/*
70 * Macro to raise processor priority level.
71 * Avoid dropping processor priority if already at high level.
72 * Also avoid going below CPU->cpu_base_spl, which could've just been set by
73 * a higher-level interrupt thread that just blocked.
74 *
75 * level can be %o0 (not other regs used here) or a constant.
76 */
77#define	RAISE(level) \
78	rdpr	%pil, %o1;		/* get current PIL */		\
79	cmp	%o1, level;		/* is PIL high enough? */	\
80	bge	1f;			/* yes, return */		\
81	nop;								\
82	wrpr	%g0, PIL_MAX, %pil;	/* freeze CPU_BASE_SPL */	\
83	ldn	[THREAD_REG + T_CPU], %o2;				\
84	ld	[%o2 + CPU_BASE_SPL], %o2;				\
85	cmp	%o2, level;		/* compare new to base */	\
86	movl	%xcc, level, %o2;	/* use new if base lower */	\
87	wrpr	%g0, %o2, %pil;						\
881:									\
89	retl;								\
90	mov	%o1, %o0		/* return old PIL */
91
92/*
93 * Macro to raise processor priority level to level >= DISP_LEVEL.
94 * Doesn't require comparison to CPU->cpu_base_spl.
95 *
96 * newpil can be %o0 (not other regs used here) or a constant.
97 */
98#define	RAISE_HIGH(level) \
99	rdpr	%pil, %o1;		/* get current PIL */		\
100	cmp	%o1, level;		/* is PIL high enough? */	\
101	bge	1f;			/* yes, return */		\
102	nop;								\
103	wrpr	%g0, level, %pil;	/* use chose value */		\
1041:									\
105	retl;								\
106	mov	%o1, %o0		/* return old PIL */
107
108/*
109 * Macro to set the priority to a specified level.
110 * Avoid dropping the priority below CPU->cpu_base_spl.
111 *
112 * newpil can be %o0 (not other regs used here) or a constant with
113 * the new PIL in the PSR_PIL field of the level arg.
114 */
115#define SETPRI(level) \
116	rdpr	%pil, %o1;		/* get current PIL */		\
117	wrpr	%g0, PIL_MAX, %pil;	/* freeze CPU_BASE_SPL */	\
118	ldn	[THREAD_REG + T_CPU], %o2;				\
119	ld	[%o2 + CPU_BASE_SPL], %o2;				\
120	cmp	%o2, level;		/* compare new to base */	\
121	movl	%xcc, level, %o2;	/* use new if base lower */	\
122	wrpr	%g0, %o2, %pil;						\
123	retl;								\
124	mov	%o1, %o0		/* return old PIL */
125
126/*
127 * Macro to set the priority to a specified level at or above LOCK_LEVEL.
128 * Doesn't require comparison to CPU->cpu_base_spl.
129 *
130 * newpil can be %o0 (not other regs used here) or a constant with
131 * the new PIL in the PSR_PIL field of the level arg.
132 */
133#define	SETPRI_HIGH(level) \
134	rdpr	%pil, %o1;		/* get current PIL */		\
135	wrpr	%g0, level, %pil;					\
136	retl;								\
137	mov	%o1, %o0		/* return old PIL */
138
139#endif	/* lint */
140
141	/*
142	 * Berkley 4.3 introduced symbolically named interrupt levels
143	 * as a way deal with priority in a machine independent fashion.
144	 * Numbered priorities are machine specific, and should be
145	 * discouraged where possible.
146	 *
147	 * Note, for the machine specific priorities there are
148	 * examples listed for devices that use a particular priority.
149	 * It should not be construed that all devices of that
150	 * type should be at that priority.  It is currently were
151	 * the current devices fit into the priority scheme based
152	 * upon time criticalness.
153	 *
154	 * The underlying assumption of these assignments is that
155	 * SPARC9 IPL 10 is the highest level from which a device
156	 * routine can call wakeup.  Devices that interrupt from higher
157	 * levels are restricted in what they can do.  If they need
158	 * kernels services they should schedule a routine at a lower
159	 * level (via software interrupt) to do the required
160	 * processing.
161	 *
162	 * Examples of this higher usage:
163	 *	Level	Usage
164	 *	15	Asynchronous memory exceptions
165	 *	14	Profiling clock (and PROM uart polling clock)
166	 *	13	Audio device
167	 *	12	Serial ports
168	 *	11	Floppy controller
169	 *
170	 * The serial ports request lower level processing on level 6.
171	 * Audio and floppy request lower level processing on level 4.
172	 *
173	 * Also, almost all splN routines (where N is a number or a
174	 * mnemonic) will do a RAISE(), on the assumption that they are
175	 * never used to lower our priority.
176	 * The exceptions are:
177	 *	spl8()		Because you can't be above 15 to begin with!
178	 *	splzs()		Because this is used at boot time to lower our
179	 *			priority, to allow the PROM to poll the uart.
180	 *	spl0()		Used to lower priority to 0.
181	 */
182
183#if defined(lint)
184
185int spl0(void)		{ return (0); }
186int spl6(void)		{ return (0); }
187int spl7(void)		{ return (0); }
188int spl8(void)		{ return (0); }
189int splhi(void)		{ return (0); }
190int splhigh(void)	{ return (0); }
191int splzs(void)		{ return (0); }
192
193#else	/* lint */
194
195	/* locks out all interrupts, including memory errors */
196	ENTRY(spl8)
197	SETPRI_HIGH(15)
198	SET_SIZE(spl8)
199
200	/* just below the level that profiling runs */
201	ENTRY(spl7)
202	RAISE_HIGH(13)
203	SET_SIZE(spl7)
204
205	/* sun specific - highest priority onboard serial i/o zs ports */
206	ENTRY(splzs)
207	SETPRI_HIGH(12)	/* Can't be a RAISE, as it's used to lower us */
208	SET_SIZE(splzs)
209
210	/*
211	 * should lock out clocks and all interrupts,
212	 * as you can see, there are exceptions
213	 */
214	ENTRY(splhi)
215	ALTENTRY(splhigh)
216	ALTENTRY(spl6)
217	ALTENTRY(i_ddi_splhigh)
218	RAISE_HIGH(DISP_LEVEL)
219	SET_SIZE(i_ddi_splhigh)
220	SET_SIZE(spl6)
221	SET_SIZE(splhigh)
222	SET_SIZE(splhi)
223
224	/* allow all interrupts */
225	ENTRY(spl0)
226	SETPRI(0)
227	SET_SIZE(spl0)
228
229#endif	/* lint */
230
231/*
232 * splx - set PIL back to that indicated by the old %pil passed as an argument,
233 * or to the CPU's base priority, whichever is higher.
234 */
235
236#if defined(lint)
237
238/* ARGSUSED */
239void
240splx(int level)
241{}
242
243#else	/* lint */
244
245	ENTRY(splx)
246	ALTENTRY(i_ddi_splx)
247	SETPRI(%o0)		/* set PIL */
248	SET_SIZE(i_ddi_splx)
249	SET_SIZE(splx)
250
251#endif	/* level */
252
253/*
254 * splr()
255 *
256 * splr is like splx but will only raise the priority and never drop it
257 * Be careful not to set priority lower than CPU->cpu_base_pri,
258 * even though it seems we're raising the priority, it could be set higher
259 * at any time by an interrupt routine, so we must block interrupts and
260 * look at CPU->cpu_base_pri.
261 */
262
263#if defined(lint)
264
265/* ARGSUSED */
266int
267splr(int level)
268{ return (0); }
269
270#else	/* lint */
271	ENTRY(splr)
272	RAISE(%o0)
273	SET_SIZE(splr)
274
275#endif	/* lint */
276
277/*
278 * on_fault()
279 * Catch lofault faults. Like setjmp except it returns one
280 * if code following causes uncorrectable fault. Turned off
281 * by calling no_fault().
282 */
283
284#if defined(lint)
285
286/* ARGSUSED */
287int
288on_fault(label_t *ljb)
289{ return (0); }
290
291#else	/* lint */
292
293	ENTRY(on_fault)
294	membar	#Sync			! sync error barrier (see copy.s)
295	stn	%o0, [THREAD_REG + T_ONFAULT]
296	set	catch_fault, %o1
297	b	setjmp			! let setjmp do the rest
298	stn	%o1, [THREAD_REG + T_LOFAULT]	! put catch_fault in t_lofault
299
300catch_fault:
301	save	%sp, -SA(WINDOWSIZE), %sp ! goto next window so that we can rtn
302	ldn	[THREAD_REG + T_ONFAULT], %o0
303	membar	#Sync				! sync error barrier
304	stn	%g0, [THREAD_REG + T_ONFAULT]	! turn off onfault
305	b	longjmp			! let longjmp do the rest
306	stn	%g0, [THREAD_REG + T_LOFAULT]	! turn off lofault
307	SET_SIZE(on_fault)
308
309#endif	/* lint */
310
311/*
312 * no_fault()
313 * turn off fault catching.
314 */
315
316#if defined(lint)
317
318void
319no_fault(void)
320{}
321
322#else	/* lint */
323
324	ENTRY(no_fault)
325	membar	#Sync				! sync error barrier
326	stn	%g0, [THREAD_REG + T_ONFAULT]
327	retl
328	stn	%g0, [THREAD_REG + T_LOFAULT]	! turn off lofault
329	SET_SIZE(no_fault)
330
331#endif	/* lint */
332
333/*
334 * Default trampoline code for on_trap() (see <sys/ontrap.h>).  On sparcv9,
335 * the trap code will complete trap processing but reset the return %pc to
336 * ot_trampoline, which will by default be set to the address of this code.
337 * We longjmp(&curthread->t_ontrap->ot_jmpbuf) to return back to on_trap().
338 */
339#if defined(lint)
340
341void
342on_trap_trampoline(void)
343{}
344
345#else	/* lint */
346
347	ENTRY(on_trap_trampoline)
348	ldn	[THREAD_REG + T_ONTRAP], %o0
349	b	longjmp
350	add	%o0, OT_JMPBUF, %o0
351	SET_SIZE(on_trap_trampoline)
352
353#endif	/* lint */
354
355/*
356 * Push a new element on to the t_ontrap stack.  Refer to <sys/ontrap.h> for
357 * more information about the on_trap() mechanism.  If the on_trap_data is the
358 * same as the topmost stack element, we just modify that element.
359 * On UltraSPARC, we need to issue a membar #Sync before modifying t_ontrap.
360 * The issue barrier is defined to force all deferred errors to complete before
361 * we go any further.  We want these errors to be processed before we modify
362 * our current error protection.
363 */
364#if defined(lint)
365
366/*ARGSUSED*/
367int
368on_trap(on_trap_data_t *otp, uint_t prot)
369{ return (0); }
370
371#else	/* lint */
372
373	ENTRY(on_trap)
374	membar	#Sync				! force error barrier
375	sth	%o1, [%o0 + OT_PROT]		! ot_prot = prot
376	sth	%g0, [%o0 + OT_TRAP]		! ot_trap = 0
377	set	on_trap_trampoline, %o2		! %o2 = &on_trap_trampoline
378	stn	%o2, [%o0 + OT_TRAMPOLINE]	! ot_trampoline = %o2
379	stn	%g0, [%o0 + OT_HANDLE]		! ot_handle = NULL
380	ldn	[THREAD_REG + T_ONTRAP], %o2	! %o2 = curthread->t_ontrap
381	cmp	%o0, %o2			! if (otp == %o2)
382	be	0f				!    don't modify t_ontrap
383	stn	%g0, [%o0 + OT_PAD1]		! delay - ot_pad1 = NULL
384
385	stn	%o2, [%o0 + OT_PREV]		! ot_prev = t_ontrap
386	membar	#Sync				! force error barrier
387	stn	%o0, [THREAD_REG + T_ONTRAP]	! t_ontrap = otp
388
3890:	b	setjmp				! let setjmp do the rest
390	add	%o0, OT_JMPBUF, %o0		! %o0 = &ot_jmpbuf
391	SET_SIZE(on_trap)
392
393#endif	/* lint */
394
395/*
396 * Setjmp and longjmp implement non-local gotos using state vectors
397 * type label_t.
398 */
399
400#if defined(lint)
401
402/* ARGSUSED */
403int
404setjmp(label_t *lp)
405{ return (0); }
406
407#else	/* lint */
408
409	ENTRY(setjmp)
410	stn	%o7, [%o0 + L_PC]	! save return address
411	stn	%sp, [%o0 + L_SP]	! save stack ptr
412	retl
413	clr	%o0			! return 0
414	SET_SIZE(setjmp)
415
416#endif	/* lint */
417
418
419#if defined(lint)
420
421/* ARGSUSED */
422void
423longjmp(label_t *lp)
424{}
425
426#else	/* lint */
427
428	ENTRY(longjmp)
429	!
430        ! The following save is required so that an extra register
431        ! window is flushed.  Flushw flushes nwindows-2
432        ! register windows.  If setjmp and longjmp are called from
433        ! within the same window, that window will not get pushed
434        ! out onto the stack without the extra save below.  Tail call
435        ! optimization can lead to callers of longjmp executing
436        ! from a window that could be the same as the setjmp,
437        ! thus the need for the following save.
438        !
439	save    %sp, -SA(MINFRAME), %sp
440	flushw				! flush all but this window
441	ldn	[%i0 + L_PC], %i7	! restore return addr
442	ldn	[%i0 + L_SP], %fp	! restore sp for dest on foreign stack
443	ret				! return 1
444	restore	%g0, 1, %o0		! takes underflow, switches stacks
445	SET_SIZE(longjmp)
446
447#endif	/* lint */
448
449/*
450 * movtuc(length, from, to, table)
451 *
452 * VAX movtuc instruction (sort of).
453 */
454
455#if defined(lint)
456
457/*ARGSUSED*/
458int
459movtuc(size_t length, u_char *from, u_char *to, u_char table[])
460{ return (0); }
461
462#else	/* lint */
463
464	ENTRY(movtuc)
465	tst     %o0
466	ble,pn	%ncc, 2f		! check length
467	clr     %o4
468
469	ldub    [%o1 + %o4], %g1        ! get next byte in string
4700:
471	ldub    [%o3 + %g1], %g1        ! get corresponding table entry
472	tst     %g1                     ! escape char?
473	bnz     1f
474	stb     %g1, [%o2 + %o4]        ! delay slot, store it
475
476	retl                            ! return (bytes moved)
477	mov     %o4, %o0
4781:
479	inc     %o4                     ! increment index
480	cmp     %o4, %o0                ! index < length ?
481	bl,a,pt	%ncc, 0b
482	ldub    [%o1 + %o4], %g1        ! delay slot, get next byte in string
4832:
484	retl                            ! return (bytes moved)
485	mov     %o4, %o0
486	SET_SIZE(movtuc)
487
488#endif	/* lint */
489
490/*
491 * scanc(length, string, table, mask)
492 *
493 * VAX scanc instruction.
494 */
495
496#if defined(lint)
497
498/*ARGSUSED*/
499int
500scanc(size_t length, u_char *string, u_char table[], u_char mask)
501{ return (0); }
502
503#else	/* lint */
504
505	ENTRY(scanc)
506	tst	%o0
507	ble,pn	%ncc, 1f		! check length
508	clr	%o4
5090:
510	ldub	[%o1 + %o4], %g1	! get next byte in string
511	cmp	%o4, %o0		! interlock slot, index < length ?
512	ldub	[%o2 + %g1], %g1	! get corresponding table entry
513	bge,pn	%ncc, 1f		! interlock slot
514	btst	%o3, %g1		! apply the mask
515	bz,a	0b
516	inc	%o4			! delay slot, increment index
5171:
518	retl				! return(length - index)
519	sub	%o0, %o4, %o0
520	SET_SIZE(scanc)
521
522#endif	/* lint */
523
524/*
525 * if a() calls b() calls caller(),
526 * caller() returns return address in a().
527 */
528
529#if defined(lint)
530
531caddr_t
532caller(void)
533{ return (0); }
534
535#else	/* lint */
536
537	ENTRY(caller)
538	retl
539	mov	%i7, %o0
540	SET_SIZE(caller)
541
542#endif	/* lint */
543
544/*
545 * if a() calls callee(), callee() returns the
546 * return address in a();
547 */
548
549#if defined(lint)
550
551caddr_t
552callee(void)
553{ return (0); }
554
555#else	/* lint */
556
557	ENTRY(callee)
558	retl
559	mov	%o7, %o0
560	SET_SIZE(callee)
561
562#endif	/* lint */
563
564/*
565 * return the current frame pointer
566 */
567
568#if defined(lint)
569
570greg_t
571getfp(void)
572{ return (0); }
573
574#else	/* lint */
575
576	ENTRY(getfp)
577	retl
578	mov	%fp, %o0
579	SET_SIZE(getfp)
580
581#endif	/* lint */
582
583/*
584 * Get vector base register
585 */
586
587#if defined(lint)
588
589greg_t
590gettbr(void)
591{ return (0); }
592
593#else	/* lint */
594
595	ENTRY(gettbr)
596	retl
597	mov     %tbr, %o0
598	SET_SIZE(gettbr)
599
600#endif	/* lint */
601
602/*
603 * Get processor state register, V9 faked to look like V8.
604 * Note: does not provide ccr.xcc and provides FPRS.FEF instead of
605 * PSTATE.PEF, because PSTATE.PEF is always on in order to allow the
606 * libc_psr memcpy routines to run without hitting the fp_disabled trap.
607 */
608
609#if defined(lint)
610
611greg_t
612getpsr(void)
613{ return (0); }
614
615#else	/* lint */
616
617	ENTRY(getpsr)
618	rd	%ccr, %o1			! get ccr
619        sll	%o1, PSR_ICC_SHIFT, %o0		! move icc to V8 psr.icc
620	rd	%fprs, %o1			! get fprs
621	and	%o1, FPRS_FEF, %o1		! mask out dirty upper/lower
622	sllx	%o1, PSR_FPRS_FEF_SHIFT, %o1	! shift fef to V8 psr.ef
623        or	%o0, %o1, %o0			! or into psr.ef
624        set	V9_PSR_IMPLVER, %o1		! SI assigned impl/ver: 0xef
625        retl
626        or	%o0, %o1, %o0			! or into psr.impl/ver
627	SET_SIZE(getpsr)
628
629#endif	/* lint */
630
631/*
632 * Get current processor interrupt level
633 */
634
635#if defined(lint)
636
637u_int
638getpil(void)
639{ return (0); }
640
641#else	/* lint */
642
643	ENTRY(getpil)
644	retl
645	rdpr	%pil, %o0
646	SET_SIZE(getpil)
647
648#endif	/* lint */
649
650#if defined(lint)
651
652/*ARGSUSED*/
653void
654setpil(u_int pil)
655{}
656
657#else	/* lint */
658
659	ENTRY(setpil)
660	retl
661	wrpr	%g0, %o0, %pil
662	SET_SIZE(setpil)
663
664#endif	/* lint */
665
666
667/*
668 * _insque(entryp, predp)
669 *
670 * Insert entryp after predp in a doubly linked list.
671 */
672
673#if defined(lint)
674
675/*ARGSUSED*/
676void
677_insque(caddr_t entryp, caddr_t predp)
678{}
679
680#else	/* lint */
681
682	ENTRY(_insque)
683	ldn	[%o1], %g1		! predp->forw
684	stn	%o1, [%o0 + CPTRSIZE]	! entryp->back = predp
685	stn	%g1, [%o0]		! entryp->forw = predp->forw
686	stn	%o0, [%o1]		! predp->forw = entryp
687	retl
688	stn	%o0, [%g1 + CPTRSIZE]	! predp->forw->back = entryp
689	SET_SIZE(_insque)
690
691#endif	/* lint */
692
693/*
694 * _remque(entryp)
695 *
696 * Remove entryp from a doubly linked list
697 */
698
699#if defined(lint)
700
701/*ARGSUSED*/
702void
703_remque(caddr_t entryp)
704{}
705
706#else	/* lint */
707
708	ENTRY(_remque)
709	ldn	[%o0], %g1		! entryp->forw
710	ldn	[%o0 + CPTRSIZE], %g2	! entryp->back
711	stn	%g1, [%g2]		! entryp->back->forw = entryp->forw
712	retl
713	stn	%g2, [%g1 + CPTRSIZE]	! entryp->forw->back = entryp->back
714	SET_SIZE(_remque)
715
716#endif	/* lint */
717
718
719/*
720 * strlen(str)
721 *
722 * Returns the number of non-NULL bytes in string argument.
723 *
724 * XXX -  why is this here, rather than the traditional file?
725 *	  why does it have local labels which don't start with a `.'?
726 */
727
728#if defined(lint)
729
730/*ARGSUSED*/
731size_t
732strlen(const char *str)
733{ return (0); }
734
735#else	/* lint */
736
737	ENTRY(strlen)
738	mov	%o0, %o1
739	andcc	%o1, 3, %o3		! is src word aligned
740	bz	$nowalgnd
741	clr	%o0			! length of non-zero bytes
742	cmp	%o3, 2			! is src half-word aligned
743	be	$s2algn
744	cmp	%o3, 3			! src is byte aligned
745	ldub	[%o1], %o3		! move 1 or 3 bytes to align it
746	inc	1, %o1			! in either case, safe to do a byte
747	be	$s3algn
748	tst	%o3
749$s1algn:
750	bnz,a	$s2algn			! now go align dest
751	inc	1, %o0
752	b,a	$done
753
754$s2algn:
755	lduh	[%o1], %o3		! know src is half-byte aligned
756	inc	2, %o1
757	srl	%o3, 8, %o4
758	tst	%o4			! is the first byte zero
759	bnz,a	1f
760	inc	%o0
761	b,a	$done
7621:	andcc	%o3, 0xff, %o3		! is the second byte zero
763	bnz,a	$nowalgnd
764	inc	%o0
765	b,a	$done
766$s3algn:
767	bnz,a	$nowalgnd
768	inc	1, %o0
769	b,a	$done
770
771$nowalgnd:
772	! use trick to check if any read bytes of a word are zero
773	! the following two constants will generate "byte carries"
774	! and check if any bit in a byte is set, if all characters
775	! are 7bits (unsigned) this allways works, otherwise
776	! there is a specil case that rarely happens, see below
777
778	set	0x7efefeff, %o3
779	set	0x81010100, %o4
780
7813:	ld	[%o1], %o2		! main loop
782	inc	4, %o1
783	add	%o2, %o3, %o5		! generate byte-carries
784	xor	%o5, %o2, %o5		! see if orignal bits set
785	and	%o5, %o4, %o5
786	cmp	%o5, %o4		! if ==,  no zero bytes
787	be,a	3b
788	inc	4, %o0
789
790	! check for the zero byte and increment the count appropriately
791	! some information (the carry bit) is lost if bit 31
792	! was set (very rare), if this is the rare condition,
793	! return to the main loop again
794
795	sethi	%hi(0xff000000), %o5	! mask used to test for terminator
796	andcc	%o2, %o5, %g0		! check if first byte was zero
797	bnz	1f
798	srl	%o5, 8, %o5
799$done:
800	retl
801	nop
8021:	andcc	%o2, %o5, %g0		! check if second byte was zero
803	bnz	1f
804	srl	%o5, 8, %o5
805$done1:
806	retl
807	inc	%o0
8081:	andcc 	%o2, %o5, %g0		! check if third byte was zero
809	bnz	1f
810	andcc	%o2, 0xff, %g0		! check if last byte is zero
811$done2:
812	retl
813	inc	2, %o0
8141:	bnz,a	3b
815	inc	4, %o0			! count of bytes
816$done3:
817	retl
818	inc	3, %o0
819	SET_SIZE(strlen)
820
821#endif	/* lint */
822
823/*
824 * Provide a C callable interface to the membar instruction.
825 */
826
827#if defined(lint)
828
829void
830membar_ldld(void)
831{}
832
833void
834membar_stld(void)
835{}
836
837void
838membar_ldst(void)
839{}
840
841void
842membar_stst(void)
843{}
844
845void
846membar_ldld_ldst(void)
847{}
848
849void
850membar_ldld_stld(void)
851{}
852
853void
854membar_ldld_stst(void)
855{}
856
857void
858membar_stld_ldld(void)
859{}
860
861void
862membar_stld_ldst(void)
863{}
864
865void
866membar_stld_stst(void)
867{}
868
869void
870membar_ldst_ldld(void)
871{}
872
873void
874membar_ldst_stld(void)
875{}
876
877void
878membar_ldst_stst(void)
879{}
880
881void
882membar_stst_ldld(void)
883{}
884
885void
886membar_stst_stld(void)
887{}
888
889void
890membar_stst_ldst(void)
891{}
892
893void
894membar_lookaside(void)
895{}
896
897void
898membar_memissue(void)
899{}
900
901void
902membar_sync(void)
903{}
904
905#else
906	ENTRY(membar_ldld)
907	retl
908	membar	#LoadLoad
909	SET_SIZE(membar_ldld)
910
911	ENTRY(membar_stld)
912	retl
913	membar	#StoreLoad
914	SET_SIZE(membar_stld)
915
916	ENTRY(membar_ldst)
917	retl
918	membar	#LoadStore
919	SET_SIZE(membar_ldst)
920
921	ENTRY(membar_stst)
922	retl
923	membar	#StoreStore
924	SET_SIZE(membar_stst)
925
926	ENTRY(membar_ldld_stld)
927	ALTENTRY(membar_stld_ldld)
928	retl
929	membar	#LoadLoad|#StoreLoad
930	SET_SIZE(membar_stld_ldld)
931	SET_SIZE(membar_ldld_stld)
932
933	ENTRY(membar_ldld_ldst)
934	ALTENTRY(membar_ldst_ldld)
935	retl
936	membar	#LoadLoad|#LoadStore
937	SET_SIZE(membar_ldst_ldld)
938	SET_SIZE(membar_ldld_ldst)
939
940	ENTRY(membar_ldld_stst)
941	ALTENTRY(membar_stst_ldld)
942	retl
943	membar	#LoadLoad|#StoreStore
944	SET_SIZE(membar_stst_ldld)
945	SET_SIZE(membar_ldld_stst)
946
947	ENTRY(membar_stld_ldst)
948	ALTENTRY(membar_ldst_stld)
949	retl
950	membar	#StoreLoad|#LoadStore
951	SET_SIZE(membar_ldst_stld)
952	SET_SIZE(membar_stld_ldst)
953
954	ENTRY(membar_stld_stst)
955	ALTENTRY(membar_stst_stld)
956	retl
957	membar	#StoreLoad|#StoreStore
958	SET_SIZE(membar_stst_stld)
959	SET_SIZE(membar_stld_stst)
960
961	ENTRY(membar_ldst_stst)
962	ALTENTRY(membar_stst_ldst)
963	retl
964	membar	#LoadStore|#StoreStore
965	SET_SIZE(membar_stst_ldst)
966	SET_SIZE(membar_ldst_stst)
967
968	ENTRY(membar_lookaside)
969	retl
970	membar	#Lookaside
971	SET_SIZE(membar_lookaside)
972
973	ENTRY(membar_memissue)
974	retl
975	membar	#MemIssue
976	SET_SIZE(membar_memissue)
977
978	ENTRY(membar_sync)
979	retl
980	membar	#Sync
981	SET_SIZE(membar_sync)
982
983#endif	/* lint */
984
985
986#if defined(lint)
987
988/*ARGSUSED*/
989int
990fuword64(const void *addr, uint64_t *dst)
991{ return (0); }
992
993/*ARGSUSED*/
994int
995fuword32(const void *addr, uint32_t *dst)
996{ return (0); }
997
998/*ARGSUSED*/
999int
1000fuword16(const void *addr, uint16_t *dst)
1001{ return (0); }
1002
1003/*ARGSUSED*/
1004int
1005fuword8(const void *addr, uint8_t *dst)
1006{ return (0); }
1007
1008/*ARGSUSED*/
1009int
1010dtrace_ft_fuword64(const void *addr, uint64_t *dst)
1011{ return (0); }
1012
1013/*ARGSUSED*/
1014int
1015dtrace_ft_fuword32(const void *addr, uint32_t *dst)
1016{ return (0); }
1017
1018#else	/* lint */
1019
1020/*
1021 * Since all of the fuword() variants are so similar, we have a macro to spit
1022 * them out.
1023 */
1024
1025#define	FUWORD(NAME, LOAD, STORE, COPYOP)	\
1026	ENTRY(NAME);				\
1027	sethi	%hi(1f), %o5;			\
1028	ldn	[THREAD_REG + T_LOFAULT], %o3;	\
1029	or	%o5, %lo(1f), %o5;		\
1030	membar	#Sync;				\
1031	stn	%o5, [THREAD_REG + T_LOFAULT];	\
1032	LOAD	[%o0]ASI_USER, %o2;		\
1033	membar	#Sync;				\
1034	stn	%o3, [THREAD_REG + T_LOFAULT];	\
1035	mov	0, %o0;				\
1036	retl;					\
1037	STORE	%o2, [%o1];			\
10381:						\
1039	membar	#Sync;				\
1040	stn	%o3, [THREAD_REG + T_LOFAULT];	\
1041	ldn	[THREAD_REG + T_COPYOPS], %o2;	\
1042	brz	%o2, 2f;			\
1043	nop;					\
1044	ldn	[%o2 + COPYOP], %g1;		\
1045	jmp	%g1;				\
1046	nop;					\
10472:						\
1048	retl;					\
1049	mov	-1, %o0;			\
1050	SET_SIZE(NAME)
1051
1052	FUWORD(fuword64, ldxa, stx, CP_FUWORD64)
1053	FUWORD(fuword32, lda, st, CP_FUWORD32)
1054	FUWORD(fuword16, lduha, sth, CP_FUWORD16)
1055	FUWORD(fuword8, lduba, stb, CP_FUWORD8)
1056
1057#endif	/* lint */
1058
1059
1060#if defined(lint)
1061
1062/*ARGSUSED*/
1063int
1064suword64(void *addr, uint64_t value)
1065{ return (0); }
1066
1067/*ARGSUSED*/
1068int
1069suword32(void *addr, uint32_t value)
1070{ return (0); }
1071
1072/*ARGSUSED*/
1073int
1074suword16(void *addr, uint16_t value)
1075{ return (0); }
1076
1077/*ARGSUSED*/
1078int
1079suword8(void *addr, uint8_t value)
1080{ return (0); }
1081
1082#else	/* lint */
1083
1084/*
1085 * Since all of the suword() variants are so similar, we have a macro to spit
1086 * them out.
1087 */
1088
1089#define	SUWORD(NAME, STORE, COPYOP)		\
1090	ENTRY(NAME)				\
1091	sethi	%hi(1f), %o5;			\
1092	ldn	[THREAD_REG + T_LOFAULT], %o3;	\
1093	or	%o5, %lo(1f), %o5;		\
1094	membar	#Sync;				\
1095	stn	%o5, [THREAD_REG + T_LOFAULT];	\
1096	STORE	%o1, [%o0]ASI_USER;		\
1097	membar	#Sync;				\
1098	stn	%o3, [THREAD_REG + T_LOFAULT];	\
1099	retl;					\
1100	clr	%o0;				\
11011:						\
1102	membar	#Sync;				\
1103	stn	%o3, [THREAD_REG + T_LOFAULT];	\
1104	ldn	[THREAD_REG + T_COPYOPS], %o2;	\
1105	brz	%o2, 2f;			\
1106	nop;					\
1107	ldn	[%o2 + COPYOP], %g1;		\
1108	jmp	%g1;				\
1109	nop;					\
11102:						\
1111	retl;					\
1112	mov	-1, %o0;			\
1113	SET_SIZE(NAME)
1114
1115	SUWORD(suword64, stxa, CP_SUWORD64)
1116	SUWORD(suword32, sta, CP_SUWORD32)
1117	SUWORD(suword16, stha, CP_SUWORD16)
1118	SUWORD(suword8, stba, CP_SUWORD8)
1119
1120#endif	/* lint */
1121
1122#if defined(lint)
1123
1124/*ARGSUSED*/
1125void
1126fuword8_noerr(const void *addr, uint8_t *dst)
1127{}
1128
1129/*ARGSUSED*/
1130void
1131fuword16_noerr(const void *addr, uint16_t *dst)
1132{}
1133
1134/*ARGSUSED*/
1135void
1136fuword32_noerr(const void *addr, uint32_t *dst)
1137{}
1138
1139/*ARGSUSED*/
1140void
1141fuword64_noerr(const void *addr, uint64_t *dst)
1142{}
1143
1144#else	/* lint */
1145
1146	ENTRY(fuword8_noerr)
1147	lduba	[%o0]ASI_USER, %o0
1148	retl
1149	stb	%o0, [%o1]
1150	SET_SIZE(fuword8_noerr)
1151
1152	ENTRY(fuword16_noerr)
1153	lduha	[%o0]ASI_USER, %o0
1154	retl
1155	sth	%o0, [%o1]
1156	SET_SIZE(fuword16_noerr)
1157
1158	ENTRY(fuword32_noerr)
1159	lda	[%o0]ASI_USER, %o0
1160	retl
1161	st	%o0, [%o1]
1162	SET_SIZE(fuword32_noerr)
1163
1164	ENTRY(fuword64_noerr)
1165	ldxa	[%o0]ASI_USER, %o0
1166	retl
1167	stx	%o0, [%o1]
1168	SET_SIZE(fuword64_noerr)
1169
1170#endif	/* lint */
1171
1172#if defined(lint)
1173
1174/*ARGSUSED*/
1175void
1176suword8_noerr(void *addr, uint8_t value)
1177{}
1178
1179/*ARGSUSED*/
1180void
1181suword16_noerr(void *addr, uint16_t value)
1182{}
1183
1184/*ARGSUSED*/
1185void
1186suword32_noerr(void *addr, uint32_t value)
1187{}
1188
1189/*ARGSUSED*/
1190void
1191suword64_noerr(void *addr, uint64_t value)
1192{}
1193
1194#else	/* lint */
1195
1196	ENTRY(suword8_noerr)
1197	retl
1198	stba	%o1, [%o0]ASI_USER
1199	SET_SIZE(suword8_noerr)
1200
1201	ENTRY(suword16_noerr)
1202	retl
1203	stha	%o1, [%o0]ASI_USER
1204	SET_SIZE(suword16_noerr)
1205
1206	ENTRY(suword32_noerr)
1207	retl
1208	sta	%o1, [%o0]ASI_USER
1209	SET_SIZE(suword32_noerr)
1210
1211	ENTRY(suword64_noerr)
1212	retl
1213	stxa	%o1, [%o0]ASI_USER
1214	SET_SIZE(suword64_noerr)
1215
1216#endif	/* lint */
1217
1218#if defined(__lint)
1219
1220/*ARGSUSED*/
1221int
1222subyte(void *addr, uchar_t value)
1223{ return (0); }
1224
1225/*ARGSUSED*/
1226void
1227subyte_noerr(void *addr, uchar_t value)
1228{}
1229
1230/*ARGSUSED*/
1231int
1232fulword(const void *addr, ulong_t *valuep)
1233{ return (0); }
1234
1235/*ARGSUSED*/
1236void
1237fulword_noerr(const void *addr, ulong_t *valuep)
1238{}
1239
1240/*ARGSUSED*/
1241int
1242sulword(void *addr, ulong_t valuep)
1243{ return (0); }
1244
1245/*ARGSUSED*/
1246void
1247sulword_noerr(void *addr, ulong_t valuep)
1248{}
1249
1250#else
1251
1252	.weak	subyte
1253	subyte=suword8
1254	.weak	subyte_noerr
1255	subyte_noerr=suword8_noerr
1256#ifdef _LP64
1257	.weak	fulword
1258	fulword=fuword64
1259	.weak	fulword_noerr
1260	fulword_noerr=fuword64_noerr
1261	.weak	sulword
1262	sulword=suword64
1263	.weak	sulword_noerr
1264	sulword_noerr=suword64_noerr
1265#else
1266	.weak	fulword
1267	fulword=fuword32
1268	.weak	fulword_noerr
1269	fulword_noerr=fuword32_noerr
1270	.weak	sulword
1271	sulword=suword32
1272	.weak	sulword_noerr
1273	sulword_noerr=suword32_noerr
1274#endif	/* LP64 */
1275
1276#endif	/* lint */
1277
1278
1279#if defined (lint)
1280
1281hrtime_t
1282rdtick()
1283{ return (0); }
1284
1285#else
1286	ENTRY(rdtick)
1287	retl
1288	rd	%tick, %o0
1289        SET_SIZE(rdtick)
1290#endif
1291
1292/*
1293 * Set tba to given address, no side effects.
1294 */
1295#if defined (lint)
1296
1297/*ARGSUSED*/
1298void *
1299set_tba(void *new_tba)
1300{ return (0); }
1301
1302#else	/* lint */
1303
1304	ENTRY(set_tba)
1305	mov	%o0, %o1
1306	rdpr	%tba, %o0
1307	wrpr	%o1, %tba
1308	retl
1309	nop
1310	SET_SIZE(set_tba)
1311
1312#endif	/* lint */
1313
1314#if defined (lint)
1315
1316/*ARGSUSED*/
1317void *
1318get_tba()
1319{ return (0); }
1320
1321#else	/* lint */
1322
1323	ENTRY(get_tba)
1324	retl
1325	rdpr	%tba, %o0
1326	SET_SIZE(get_tba)
1327
1328#endif	/* lint */
1329
1330#if defined(lint) || defined(__lint)
1331
1332/* ARGSUSED */
1333void
1334setpstate(u_int pstate)
1335{}
1336
1337#else	/* lint */
1338
1339	ENTRY_NP(setpstate)
1340	retl
1341	wrpr	%g0, %o0, %pstate
1342	SET_SIZE(setpstate)
1343
1344#endif	/* lint */
1345
1346#if defined(lint) || defined(__lint)
1347
1348u_int
1349getpstate(void)
1350{ return(0); }
1351
1352#else	/* lint */
1353
1354	ENTRY_NP(getpstate)
1355	retl
1356	rdpr	%pstate, %o0
1357	SET_SIZE(getpstate)
1358
1359#endif	/* lint */
1360
1361#if defined(lint) || defined(__lint)
1362
1363dtrace_icookie_t
1364dtrace_interrupt_disable(void)
1365{ return (0); }
1366
1367#else	/* lint */
1368
1369	ENTRY_NP(dtrace_interrupt_disable)
1370	rdpr	%pstate, %o0
1371	andn	%o0, PSTATE_IE, %o1
1372	retl
1373	wrpr	%g0, %o1, %pstate
1374	SET_SIZE(dtrace_interrupt_disable)
1375
1376#endif	/* lint */
1377
1378#if defined(lint) || defined(__lint)
1379
1380/*ARGSUSED*/
1381void
1382dtrace_interrupt_enable(dtrace_icookie_t cookie)
1383{}
1384
1385#else
1386
1387	ENTRY_NP(dtrace_interrupt_enable)
1388	retl
1389	wrpr	%g0, %o0, %pstate
1390	SET_SIZE(dtrace_interrupt_enable)
1391
1392#endif /* lint*/
1393
1394#if defined(lint)
1395
1396void
1397dtrace_membar_producer(void)
1398{}
1399
1400void
1401dtrace_membar_consumer(void)
1402{}
1403
1404#else	/* lint */
1405
1406#ifdef SF_ERRATA_51
1407	.align 32
1408	ENTRY(dtrace_membar_return)
1409	retl
1410	nop
1411	SET_SIZE(dtrace_membar_return)
1412#define	DTRACE_MEMBAR_RETURN	ba,pt %icc, dtrace_membar_return
1413#else
1414#define	DTRACE_MEMBAR_RETURN	retl
1415#endif
1416
1417	ENTRY(dtrace_membar_producer)
1418	DTRACE_MEMBAR_RETURN
1419	membar	#StoreStore
1420	SET_SIZE(dtrace_membar_producer)
1421
1422	ENTRY(dtrace_membar_consumer)
1423	DTRACE_MEMBAR_RETURN
1424	membar	#LoadLoad
1425	SET_SIZE(dtrace_membar_consumer)
1426
1427#endif	/* lint */
1428
1429#if defined(lint) || defined(__lint)
1430
1431void
1432dtrace_flush_windows(void)
1433{}
1434
1435#else
1436
1437	ENTRY_NP(dtrace_flush_windows)
1438	retl
1439	flushw
1440	SET_SIZE(dtrace_flush_windows)
1441
1442#endif	/* lint */
1443
1444#if defined(lint)
1445
1446/*ARGSUSED*/
1447int
1448getpcstack_top(pc_t *pcstack, int limit, uintptr_t *lastfp, pc_t *lastpc)
1449{
1450	return (0);
1451}
1452
1453#else	/* lint */
1454
1455	/*
1456	 * %g1	pcstack
1457	 * %g2	iteration count
1458	 * %g3	final %fp
1459	 * %g4	final %i7
1460	 * %g5	saved %cwp (so we can get back to the original window)
1461	 *
1462	 * %o0	pcstack / return value (iteration count)
1463	 * %o1	limit / saved %cansave
1464	 * %o2	lastfp
1465	 * %o3	lastpc
1466	 * %o4	saved %canrestore
1467	 * %o5	saved %pstate (to restore interrupts)
1468	 *
1469	 * Note:  The frame pointer returned via lastfp is safe to use as
1470	 *	long as getpcstack_top() returns either (0) or a value less
1471	 *	than (limit).
1472	 */
1473	ENTRY_NP(getpcstack_top)
1474
1475	rdpr	%pstate, %o5
1476	andn	%o5, PSTATE_IE, %g1
1477	wrpr	%g0, %g1, %pstate	! disable interrupts
1478
1479	mov	%o0, %g1		! we need the pcstack pointer while
1480					! we're visiting other windows
1481
1482	rdpr	%canrestore, %g2	! number of available windows
1483	sub	%g2, 1, %g2		! account for skipped frame
1484	cmp	%g2, %o1		! compare with limit
1485	movg	%icc, %o1, %g2		! %g2 = min(%canrestore-1, limit)
1486
1487	brlez,a,pn %g2, 3f		! Use slow path if count <= 0 --
1488	clr	%o0			! return zero.
1489
1490	mov	%g2, %o0		! set up return value
1491
1492	rdpr	%cwp, %g5		! remember the register window state
1493	rdpr	%cansave, %o1		! 'restore' changes, so we can undo
1494	rdpr	%canrestore, %o4	! its effects when we finish.
1495
1496	restore				! skip caller's frame
14971:
1498	st	%i7, [%g1]		! stash return address in pcstack
1499	restore				! go to the next frame
1500	subcc	%g2, 1, %g2		! decrement the count
1501	bnz,pt	%icc, 1b		! loop until count reaches 0
1502	add	%g1, 4, %g1		! increment pcstack
1503
1504	mov	%i6, %g3		! copy the final %fp and return PC
1505	mov	%i7, %g4		! aside so we can return them to our
1506					! caller
1507
1508	wrpr	%g0, %g5, %cwp		! jump back to the original window
1509	wrpr	%g0, %o1, %cansave	! and restore the original register
1510	wrpr	%g0, %o4, %canrestore	! window state.
15112:
1512	stn	%g3, [%o2]		! store the frame pointer and pc
1513	st	%g4, [%o3]		! so our caller can continue the trace
1514
1515	retl				! return to caller
1516	wrpr	%g0, %o5, %pstate	! restore interrupts
1517
15183:
1519	flushw				! flush register windows, then
1520	ldn	[%fp + STACK_BIAS + 14*CLONGSIZE], %g3	! load initial fp
1521	ba	2b
1522	ldn	[%fp + STACK_BIAS + 15*CLONGSIZE], %g4	! and pc
1523	SET_SIZE(getpcstack_top)
1524
1525#endif	/* lint */
1526
1527#if defined(lint) || defined(__lint)
1528
1529/* ARGSUSED */
1530void
1531setwstate(u_int wstate)
1532{}
1533
1534#else	/* lint */
1535
1536	ENTRY_NP(setwstate)
1537	retl
1538	wrpr	%g0, %o0, %wstate
1539	SET_SIZE(setwstate)
1540
1541#endif	/* lint */
1542
1543
1544#if defined(lint) || defined(__lint)
1545
1546u_int
1547getwstate(void)
1548{ return(0); }
1549
1550#else	/* lint */
1551
1552	ENTRY_NP(getwstate)
1553	retl
1554	rdpr	%wstate, %o0
1555	SET_SIZE(getwstate)
1556
1557#endif	/* lint */
1558
1559
1560/*
1561 * int panic_trigger(int *tp)
1562 *
1563 * A panic trigger is a word which is updated atomically and can only be set
1564 * once.  We atomically store 0xFF into the high byte and load the old value.
1565 * If the byte was 0xFF, the trigger has already been activated and we fail.
1566 * If the previous value was 0 or not 0xFF, we succeed.  This allows a
1567 * partially corrupt trigger to still trigger correctly.  DTrace has its own
1568 * version of this function to allow it to panic correctly from probe context.
1569 */
1570#if defined(lint)
1571
1572/*ARGSUSED*/
1573int panic_trigger(int *tp) { return (0); }
1574
1575/*ARGSUSED*/
1576int dtrace_panic_trigger(int *tp) { return (0); }
1577
1578#else	/* lint */
1579
1580	ENTRY_NP(panic_trigger)
1581	ldstub	[%o0], %o0		! store 0xFF, load byte into %o0
1582	cmp	%o0, 0xFF		! compare %o0 to 0xFF
1583	set	1, %o1			! %o1 = 1
1584	be,a	0f			! if (%o0 == 0xFF) goto 0f (else annul)
1585	set	0, %o1			! delay - %o1 = 0
15860:	retl
1587	mov	%o1, %o0		! return (%o1);
1588	SET_SIZE(panic_trigger)
1589
1590	ENTRY_NP(dtrace_panic_trigger)
1591	ldstub	[%o0], %o0		! store 0xFF, load byte into %o0
1592	cmp	%o0, 0xFF		! compare %o0 to 0xFF
1593	set	1, %o1			! %o1 = 1
1594	be,a	0f			! if (%o0 == 0xFF) goto 0f (else annul)
1595	set	0, %o1			! delay - %o1 = 0
15960:	retl
1597	mov	%o1, %o0		! return (%o1);
1598	SET_SIZE(dtrace_panic_trigger)
1599
1600#endif	/* lint */
1601
1602/*
1603 * void vpanic(const char *format, va_list alist)
1604 *
1605 * The panic() and cmn_err() functions invoke vpanic() as a common entry point
1606 * into the panic code implemented in panicsys().  vpanic() is responsible
1607 * for passing through the format string and arguments, and constructing a
1608 * regs structure on the stack into which it saves the current register
1609 * values.  If we are not dying due to a fatal trap, these registers will
1610 * then be preserved in panicbuf as the current processor state.  Before
1611 * invoking panicsys(), vpanic() activates the first panic trigger (see
1612 * common/os/panic.c) and switches to the panic_stack if successful.  Note that
1613 * DTrace takes a slightly different panic path if it must panic from probe
1614 * context.  Instead of calling panic, it calls into dtrace_vpanic(), which
1615 * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
1616 * branches back into vpanic().
1617 */
1618#if defined(lint)
1619
1620/*ARGSUSED*/
1621void vpanic(const char *format, va_list alist) {}
1622
1623/*ARGSUSED*/
1624void dtrace_vpanic(const char *format, va_list alist) {}
1625
1626#else	/* lint */
1627
1628	ENTRY_NP(vpanic)
1629
1630	save	%sp, -SA(MINFRAME + REGSIZE), %sp	! save and allocate regs
1631
1632	!
1633	! The v9 struct regs has a 64-bit r_tstate field, which we use here
1634	! to store the %ccr, %asi, %pstate, and %cwp as they would appear
1635	! in %tstate if a trap occurred.  We leave it up to the debugger to
1636	! realize what happened and extract the register values.
1637	!
1638	rd	%ccr, %l0				! %l0 = %ccr
1639	sllx	%l0, TSTATE_CCR_SHIFT, %l0		! %l0 <<= CCR_SHIFT
1640	rd	%asi, %l1				! %l1 = %asi
1641	sllx	%l1, TSTATE_ASI_SHIFT, %l1		! %l1 <<= ASI_SHIFT
1642	or	%l0, %l1, %l0				! %l0 |= %l1
1643	rdpr	%pstate, %l1				! %l1 = %pstate
1644	sllx	%l1, TSTATE_PSTATE_SHIFT, %l1		! %l1 <<= PSTATE_SHIFT
1645	or	%l0, %l1, %l0				! %l0 |= %l1
1646	rdpr	%cwp, %l1				! %l1 = %cwp
1647	sllx	%l1, TSTATE_CWP_SHIFT, %l1		! %l1 <<= CWP_SHIFT
1648	or	%l0, %l1, %l0				! %l0 |= %l1
1649
1650	set	vpanic, %l1				! %l1 = %pc (vpanic)
1651	add	%l1, 4, %l2				! %l2 = %npc (vpanic+4)
1652	rd	%y, %l3					! %l3 = %y
1653	!
1654	! Flush register windows before panic_trigger() in order to avoid a
1655	! problem that a dump hangs if flush_windows() causes another panic.
1656	!
1657	call	flush_windows
1658	nop
1659
1660	sethi	%hi(panic_quiesce), %o0
1661	call	panic_trigger
1662	or	%o0, %lo(panic_quiesce), %o0		! if (!panic_trigger(
1663
1664vpanic_common:
1665	tst	%o0					!     &panic_quiesce))
1666	be	0f					!   goto 0f;
1667	mov	%o0, %l4				!   delay - %l4 = %o0
1668
1669	!
1670	! If panic_trigger() was successful, we are the first to initiate a
1671	! panic: switch to the panic_stack.
1672	!
1673	set	panic_stack, %o0			! %o0 = panic_stack
1674	set	PANICSTKSIZE, %o1			! %o1 = size of stack
1675	add	%o0, %o1, %o0				! %o0 = top of stack
1676
1677	sub	%o0, SA(MINFRAME + REGSIZE) + STACK_BIAS, %sp
1678
1679	!
1680	! Now that we've got everything set up, store each register to its
1681	! designated location in the regs structure allocated on the stack.
1682	! The register set we store is the equivalent of the registers at
1683	! the time the %pc was pointing to vpanic, thus the %i's now contain
1684	! what the %o's contained prior to the save instruction.
1685	!
16860:	stx	%l0, [%sp + STACK_BIAS + SA(MINFRAME) + TSTATE_OFF]
1687	stx	%g1, [%sp + STACK_BIAS + SA(MINFRAME) + G1_OFF]
1688	stx	%g2, [%sp + STACK_BIAS + SA(MINFRAME) + G2_OFF]
1689	stx	%g3, [%sp + STACK_BIAS + SA(MINFRAME) + G3_OFF]
1690	stx	%g4, [%sp + STACK_BIAS + SA(MINFRAME) + G4_OFF]
1691	stx	%g5, [%sp + STACK_BIAS + SA(MINFRAME) + G5_OFF]
1692	stx	%g6, [%sp + STACK_BIAS + SA(MINFRAME) + G6_OFF]
1693	stx	%g7, [%sp + STACK_BIAS + SA(MINFRAME) + G7_OFF]
1694	stx	%i0, [%sp + STACK_BIAS + SA(MINFRAME) + O0_OFF]
1695	stx	%i1, [%sp + STACK_BIAS + SA(MINFRAME) + O1_OFF]
1696	stx	%i2, [%sp + STACK_BIAS + SA(MINFRAME) + O2_OFF]
1697	stx	%i3, [%sp + STACK_BIAS + SA(MINFRAME) + O3_OFF]
1698	stx	%i4, [%sp + STACK_BIAS + SA(MINFRAME) + O4_OFF]
1699	stx	%i5, [%sp + STACK_BIAS + SA(MINFRAME) + O5_OFF]
1700	stx	%i6, [%sp + STACK_BIAS + SA(MINFRAME) + O6_OFF]
1701	stx	%i7, [%sp + STACK_BIAS + SA(MINFRAME) + O7_OFF]
1702	stn	%l1, [%sp + STACK_BIAS + SA(MINFRAME) + PC_OFF]
1703	stn	%l2, [%sp + STACK_BIAS + SA(MINFRAME) + NPC_OFF]
1704	st	%l3, [%sp + STACK_BIAS + SA(MINFRAME) + Y_OFF]
1705
1706	mov	%l4, %o3				! %o3 = on_panic_stack
1707	add	%sp, STACK_BIAS + SA(MINFRAME), %o2	! %o2 = &regs
1708	mov	%i1, %o1				! %o1 = alist
1709	call	panicsys				! panicsys();
1710	mov	%i0, %o0				! %o0 = format
1711	ret
1712	restore
1713
1714	SET_SIZE(vpanic)
1715
1716	ENTRY_NP(dtrace_vpanic)
1717
1718	save	%sp, -SA(MINFRAME + REGSIZE), %sp	! save and allocate regs
1719
1720	!
1721	! The v9 struct regs has a 64-bit r_tstate field, which we use here
1722	! to store the %ccr, %asi, %pstate, and %cwp as they would appear
1723	! in %tstate if a trap occurred.  We leave it up to the debugger to
1724	! realize what happened and extract the register values.
1725	!
1726	rd	%ccr, %l0				! %l0 = %ccr
1727	sllx	%l0, TSTATE_CCR_SHIFT, %l0		! %l0 <<= CCR_SHIFT
1728	rd	%asi, %l1				! %l1 = %asi
1729	sllx	%l1, TSTATE_ASI_SHIFT, %l1		! %l1 <<= ASI_SHIFT
1730	or	%l0, %l1, %l0				! %l0 |= %l1
1731	rdpr	%pstate, %l1				! %l1 = %pstate
1732	sllx	%l1, TSTATE_PSTATE_SHIFT, %l1		! %l1 <<= PSTATE_SHIFT
1733	or	%l0, %l1, %l0				! %l0 |= %l1
1734	rdpr	%cwp, %l1				! %l1 = %cwp
1735	sllx	%l1, TSTATE_CWP_SHIFT, %l1		! %l1 <<= CWP_SHIFT
1736	or	%l0, %l1, %l0				! %l0 |= %l1
1737
1738	set	dtrace_vpanic, %l1			! %l1 = %pc (vpanic)
1739	add	%l1, 4, %l2				! %l2 = %npc (vpanic+4)
1740	rd	%y, %l3					! %l3 = %y
1741	!
1742	! Flush register windows before panic_trigger() in order to avoid a
1743	! problem that a dump hangs if flush_windows() causes another panic.
1744	!
1745	call	dtrace_flush_windows
1746	nop
1747
1748	sethi	%hi(panic_quiesce), %o0
1749	call	dtrace_panic_trigger
1750	or	%o0, %lo(panic_quiesce), %o0		! if (!panic_trigger(
1751
1752	ba,a	vpanic_common
1753	SET_SIZE(dtrace_vpanic)
1754
1755#endif	/* lint */
1756
1757#if defined(lint)
1758
1759/*ARGSUSED*/
1760
1761uint_t
1762get_subcc_ccr( uint64_t addrl, uint64_t addrr)
1763{ return (0); }
1764
1765#else   /* lint */
1766
1767	ENTRY(get_subcc_ccr)
1768	wr	%g0, %ccr	! clear condition codes
1769	subcc	%o0, %o1, %g0
1770	retl
1771	rd	%ccr, %o0	! return condition codes
1772	SET_SIZE(get_subcc_ccr)
1773
1774#endif  /* lint */
1775
1776#if defined(lint) || defined(__lint)
1777
1778ftrace_icookie_t
1779ftrace_interrupt_disable(void)
1780{ return (0); }
1781
1782#else	/* lint */
1783
1784	ENTRY_NP(ftrace_interrupt_disable)
1785	rdpr	%pstate, %o0
1786	andn	%o0, PSTATE_IE, %o1
1787	retl
1788	wrpr	%g0, %o1, %pstate
1789	SET_SIZE(ftrace_interrupt_disable)
1790
1791#endif	/* lint */
1792
1793#if defined(lint) || defined(__lint)
1794
1795/*ARGSUSED*/
1796void
1797ftrace_interrupt_enable(ftrace_icookie_t cookie)
1798{}
1799
1800#else
1801
1802	ENTRY_NP(ftrace_interrupt_enable)
1803	retl
1804	wrpr	%g0, %o0, %pstate
1805	SET_SIZE(ftrace_interrupt_enable)
1806
1807#endif /* lint*/
1808