xref: /titanic_41/usr/src/uts/intel/ia32/ml/i86_subr.s (revision 54925bf60766fbb4f1f2d7c843721406a7b7a3fb)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 *  Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.
29 *  Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T
30 *    All Rights Reserved
31 */
32
33#pragma ident	"%Z%%M%	%I%	%E% SMI"
34
35/*
36 * General assembly language routines.
37 * It is the intent of this file to contain routines that are
38 * independent of the specific kernel architecture, and those that are
39 * common across kernel architectures.
40 * As architectures diverge, and implementations of specific
41 * architecture-dependent routines change, the routines should be moved
42 * from this file into the respective ../`arch -k`/subr.s file.
43 */
44
45#include <sys/asm_linkage.h>
46#include <sys/asm_misc.h>
47#include <sys/panic.h>
48#include <sys/ontrap.h>
49#include <sys/regset.h>
50#include <sys/privregs.h>
51#include <sys/reboot.h>
52#include <sys/psw.h>
53#include <sys/x86_archext.h>
54
55#if defined(__lint)
56#include <sys/types.h>
57#include <sys/systm.h>
58#include <sys/thread.h>
59#include <sys/archsystm.h>
60#include <sys/byteorder.h>
61#include <sys/dtrace.h>
62#include <sys/ftrace.h>
63#else	/* __lint */
64#include "assym.h"
65#endif	/* __lint */
66#include <sys/dditypes.h>
67
68/*
69 * on_fault()
70 * Catch lofault faults. Like setjmp except it returns one
71 * if code following causes uncorrectable fault. Turned off
72 * by calling no_fault().
73 */
74
75#if defined(__lint)
76
77/* ARGSUSED */
78int
79on_fault(label_t *ljb)
80{ return (0); }
81
82void
83no_fault(void)
84{}
85
86#else	/* __lint */
87
88#if defined(__amd64)
89
90	ENTRY(on_fault)
91	movq	%gs:CPU_THREAD, %rsi
92	leaq	catch_fault(%rip), %rdx
93	movq	%rdi, T_ONFAULT(%rsi)		/* jumpbuf in t_onfault */
94	movq	%rdx, T_LOFAULT(%rsi)		/* catch_fault in t_lofault */
95	jmp	setjmp				/* let setjmp do the rest */
96
97catch_fault:
98	movq	%gs:CPU_THREAD, %rsi
99	movq	T_ONFAULT(%rsi), %rdi		/* address of save area */
100	xorl	%eax, %eax
101	movq	%rax, T_ONFAULT(%rsi)		/* turn off onfault */
102	movq	%rax, T_LOFAULT(%rsi)		/* turn off lofault */
103	jmp	longjmp				/* let longjmp do the rest */
104	SET_SIZE(on_fault)
105
106	ENTRY(no_fault)
107	movq	%gs:CPU_THREAD, %rsi
108	xorl	%eax, %eax
109	movq	%rax, T_ONFAULT(%rsi)		/* turn off onfault */
110	movq	%rax, T_LOFAULT(%rsi)		/* turn off lofault */
111	ret
112	SET_SIZE(no_fault)
113
114#elif defined(__i386)
115
116	ENTRY(on_fault)
117	movl	%gs:CPU_THREAD, %edx
118	movl	4(%esp), %eax			/* jumpbuf address */
119	leal	catch_fault, %ecx
120	movl	%eax, T_ONFAULT(%edx)		/* jumpbuf in t_onfault */
121	movl	%ecx, T_LOFAULT(%edx)		/* catch_fault in t_lofault */
122	jmp	setjmp				/* let setjmp do the rest */
123
124catch_fault:
125	movl	%gs:CPU_THREAD, %edx
126	xorl	%eax, %eax
127	movl	T_ONFAULT(%edx), %ecx		/* address of save area */
128	movl	%eax, T_ONFAULT(%edx)		/* turn off onfault */
129	movl	%eax, T_LOFAULT(%edx)		/* turn off lofault */
130	pushl	%ecx
131	call	longjmp				/* let longjmp do the rest */
132	SET_SIZE(on_fault)
133
134	ENTRY(no_fault)
135	movl	%gs:CPU_THREAD, %edx
136	xorl	%eax, %eax
137	movl	%eax, T_ONFAULT(%edx)		/* turn off onfault */
138	movl	%eax, T_LOFAULT(%edx)		/* turn off lofault */
139	ret
140	SET_SIZE(no_fault)
141
142#endif	/* __i386 */
143#endif	/* __lint */
144
145/*
146 * Default trampoline code for on_trap() (see <sys/ontrap.h>).  We just
147 * do a longjmp(&curthread->t_ontrap->ot_jmpbuf) if this is ever called.
148 */
149
150#if defined(lint)
151
152void
153on_trap_trampoline(void)
154{}
155
156#else	/* __lint */
157
158#if defined(__amd64)
159
160	ENTRY(on_trap_trampoline)
161	movq	%gs:CPU_THREAD, %rsi
162	movq	T_ONTRAP(%rsi), %rdi
163	addq	$OT_JMPBUF, %rdi
164	jmp	longjmp
165	SET_SIZE(on_trap_trampoline)
166
167#elif defined(__i386)
168
169	ENTRY(on_trap_trampoline)
170	movl	%gs:CPU_THREAD, %eax
171	movl	T_ONTRAP(%eax), %eax
172	addl	$OT_JMPBUF, %eax
173	pushl	%eax
174	call	longjmp
175	SET_SIZE(on_trap_trampoline)
176
177#endif	/* __i386 */
178#endif	/* __lint */
179
180/*
181 * Push a new element on to the t_ontrap stack.  Refer to <sys/ontrap.h> for
182 * more information about the on_trap() mechanism.  If the on_trap_data is the
183 * same as the topmost stack element, we just modify that element.
184 */
185#if defined(lint)
186
187/*ARGSUSED*/
188int
189on_trap(on_trap_data_t *otp, uint_t prot)
190{ return (0); }
191
192#else	/* __lint */
193
194#if defined(__amd64)
195
196	ENTRY(on_trap)
197	movw	%si, OT_PROT(%rdi)		/* ot_prot = prot */
198	movw	$0, OT_TRAP(%rdi)		/* ot_trap = 0 */
199	leaq	on_trap_trampoline(%rip), %rdx	/* rdx = &on_trap_trampoline */
200	movq	%rdx, OT_TRAMPOLINE(%rdi)	/* ot_trampoline = rdx */
201	xorl	%ecx, %ecx
202	movq	%rcx, OT_HANDLE(%rdi)		/* ot_handle = NULL */
203	movq	%rcx, OT_PAD1(%rdi)		/* ot_pad1 = NULL */
204	movq	%gs:CPU_THREAD, %rdx		/* rdx = curthread */
205	movq	T_ONTRAP(%rdx), %rcx		/* rcx = curthread->t_ontrap */
206	cmpq	%rdi, %rcx			/* if (otp == %rcx)	*/
207	je	0f				/*	don't modify t_ontrap */
208
209	movq	%rcx, OT_PREV(%rdi)		/* ot_prev = t_ontrap */
210	movq	%rdi, T_ONTRAP(%rdx)		/* curthread->t_ontrap = otp */
211
2120:	addq	$OT_JMPBUF, %rdi		/* &ot_jmpbuf */
213	jmp	setjmp
214	SET_SIZE(on_trap)
215
216#elif defined(__i386)
217
218	ENTRY(on_trap)
219	movl	4(%esp), %eax			/* %eax = otp */
220	movl	8(%esp), %edx			/* %edx = prot */
221
222	movw	%dx, OT_PROT(%eax)		/* ot_prot = prot */
223	movw	$0, OT_TRAP(%eax)		/* ot_trap = 0 */
224	leal	on_trap_trampoline, %edx	/* %edx = &on_trap_trampoline */
225	movl	%edx, OT_TRAMPOLINE(%eax)	/* ot_trampoline = %edx */
226	movl	$0, OT_HANDLE(%eax)		/* ot_handle = NULL */
227	movl	$0, OT_PAD1(%eax)		/* ot_pad1 = NULL */
228	movl	%gs:CPU_THREAD, %edx		/* %edx = curthread */
229	movl	T_ONTRAP(%edx), %ecx		/* %ecx = curthread->t_ontrap */
230	cmpl	%eax, %ecx			/* if (otp == %ecx) */
231	je	0f				/*    don't modify t_ontrap */
232
233	movl	%ecx, OT_PREV(%eax)		/* ot_prev = t_ontrap */
234	movl	%eax, T_ONTRAP(%edx)		/* curthread->t_ontrap = otp */
235
2360:	addl	$OT_JMPBUF, %eax		/* %eax = &ot_jmpbuf */
237	movl	%eax, 4(%esp)			/* put %eax back on the stack */
238	jmp	setjmp				/* let setjmp do the rest */
239	SET_SIZE(on_trap)
240
241#endif	/* __i386 */
242#endif	/* __lint */
243
244/*
245 * Setjmp and longjmp implement non-local gotos using state vectors
246 * type label_t.
247 */
248
249#if defined(__lint)
250
251/* ARGSUSED */
252int
253setjmp(label_t *lp)
254{ return (0); }
255
256/* ARGSUSED */
257void
258longjmp(label_t *lp)
259{}
260
261#else	/* __lint */
262
263#if LABEL_PC != 0
264#error LABEL_PC MUST be defined as 0 for setjmp/longjmp to work as coded
265#endif	/* LABEL_PC != 0 */
266
267#if defined(__amd64)
268
269	ENTRY(setjmp)
270	movq	%rsp, LABEL_SP(%rdi)
271	movq	%rbp, LABEL_RBP(%rdi)
272	movq	%rbx, LABEL_RBX(%rdi)
273	movq	%r12, LABEL_R12(%rdi)
274	movq	%r13, LABEL_R13(%rdi)
275	movq	%r14, LABEL_R14(%rdi)
276	movq	%r15, LABEL_R15(%rdi)
277	movq	(%rsp), %rdx		/* return address */
278	movq	%rdx, (%rdi)		/* LABEL_PC is 0 */
279	xorl	%eax, %eax		/* return 0 */
280	ret
281	SET_SIZE(setjmp)
282
283	ENTRY(longjmp)
284	movq	LABEL_SP(%rdi), %rsp
285	movq	LABEL_RBP(%rdi), %rbp
286	movq	LABEL_RBX(%rdi), %rbx
287	movq	LABEL_R12(%rdi), %r12
288	movq	LABEL_R13(%rdi), %r13
289	movq	LABEL_R14(%rdi), %r14
290	movq	LABEL_R15(%rdi), %r15
291	movq	(%rdi), %rdx		/* return address; LABEL_PC is 0 */
292	movq	%rdx, (%rsp)
293	xorl	%eax, %eax
294	incl	%eax			/* return 1 */
295	ret
296	SET_SIZE(longjmp)
297
298#elif defined(__i386)
299
300	ENTRY(setjmp)
301	movl	4(%esp), %edx		/* address of save area */
302	movl	%ebp, LABEL_EBP(%edx)
303	movl	%ebx, LABEL_EBX(%edx)
304	movl	%esi, LABEL_ESI(%edx)
305	movl	%edi, LABEL_EDI(%edx)
306	movl	%esp, 4(%edx)
307	movl	(%esp), %ecx		/* %eip (return address) */
308	movl	%ecx, (%edx)		/* LABEL_PC is 0 */
309	subl	%eax, %eax		/* return 0 */
310	ret
311	SET_SIZE(setjmp)
312
313	ENTRY(longjmp)
314	movl	4(%esp), %edx		/* address of save area */
315	movl	LABEL_EBP(%edx), %ebp
316	movl	LABEL_EBX(%edx), %ebx
317	movl	LABEL_ESI(%edx), %esi
318	movl	LABEL_EDI(%edx), %edi
319	movl	4(%edx), %esp
320	movl	(%edx), %ecx		/* %eip (return addr); LABEL_PC is 0 */
321	movl	$1, %eax
322	addl	$4, %esp		/* pop ret adr */
323	jmp	*%ecx			/* indirect */
324	SET_SIZE(longjmp)
325
326#endif	/* __i386 */
327#endif	/* __lint */
328
329/*
330 * if a() calls b() calls caller(),
331 * caller() returns return address in a().
332 * (Note: We assume a() and b() are C routines which do the normal entry/exit
333 *  sequence.)
334 */
335
336#if defined(__lint)
337
338caddr_t
339caller(void)
340{ return (0); }
341
342#else	/* __lint */
343
344#if defined(__amd64)
345
346	ENTRY(caller)
347	movq	8(%rbp), %rax		/* b()'s return pc, in a() */
348	ret
349	SET_SIZE(caller)
350
351#elif defined(__i386)
352
353	ENTRY(caller)
354	movl	4(%ebp), %eax		/* b()'s return pc, in a() */
355	ret
356	SET_SIZE(caller)
357
358#endif	/* __i386 */
359#endif	/* __lint */
360
361/*
362 * if a() calls callee(), callee() returns the
363 * return address in a();
364 */
365
366#if defined(__lint)
367
368caddr_t
369callee(void)
370{ return (0); }
371
372#else	/* __lint */
373
374#if defined(__amd64)
375
376	ENTRY(callee)
377	movq	(%rsp), %rax		/* callee()'s return pc, in a() */
378	ret
379	SET_SIZE(callee)
380
381#elif defined(__i386)
382
383	ENTRY(callee)
384	movl	(%esp), %eax		/* callee()'s return pc, in a() */
385	ret
386	SET_SIZE(callee)
387
388#endif	/* __i386 */
389#endif	/* __lint */
390
391/*
392 * return the current frame pointer
393 */
394
395#if defined(__lint)
396
397greg_t
398getfp(void)
399{ return (0); }
400
401#else	/* __lint */
402
403#if defined(__amd64)
404
405	ENTRY(getfp)
406	movq	%rbp, %rax
407	ret
408	SET_SIZE(getfp)
409
410#elif defined(__i386)
411
412	ENTRY(getfp)
413	movl	%ebp, %eax
414	ret
415	SET_SIZE(getfp)
416
417#endif	/* __i386 */
418#endif	/* __lint */
419
420/*
421 * Invalidate a single page table entry in the TLB
422 */
423
424#if defined(__lint)
425
426/* ARGSUSED */
427void
428mmu_tlbflush_entry(caddr_t m)
429{}
430
431#else	/* __lint */
432
433#if defined(__amd64)
434
435	ENTRY(mmu_tlbflush_entry)
436	invlpg	(%rdi)
437	ret
438	SET_SIZE(mmu_tlbflush_entry)
439
440#elif defined(__i386)
441
442	ENTRY(mmu_tlbflush_entry)
443	movl	4(%esp), %eax
444	invlpg	(%eax)
445	ret
446	SET_SIZE(mmu_tlbflush_entry)
447
448#endif	/* __i386 */
449#endif	/* __lint */
450
451
452/*
453 * Get/Set the value of various control registers
454 */
455
456#if defined(__lint)
457
458ulong_t
459getcr0(void)
460{ return (0); }
461
462/* ARGSUSED */
463void
464setcr0(ulong_t value)
465{}
466
467ulong_t
468getcr2(void)
469{ return (0); }
470
471ulong_t
472getcr3(void)
473{ return (0); }
474
475/* ARGSUSED */
476void
477setcr3(ulong_t val)
478{}
479
480void
481reload_cr3(void)
482{}
483
484ulong_t
485getcr4(void)
486{ return (0); }
487
488/* ARGSUSED */
489void
490setcr4(ulong_t val)
491{}
492
493#if defined(__amd64)
494
495ulong_t
496getcr8(void)
497{ return (0); }
498
499/* ARGSUSED */
500void
501setcr8(ulong_t val)
502{}
503
504#endif	/* __amd64 */
505
506#else	/* __lint */
507
508#if defined(__amd64)
509
510	ENTRY(getcr0)
511	movq	%cr0, %rax
512	ret
513	SET_SIZE(getcr0)
514
515	ENTRY(setcr0)
516	movq	%rdi, %cr0
517	ret
518	SET_SIZE(setcr0)
519
520        ENTRY(getcr2)
521        movq    %cr2, %rax
522        ret
523	SET_SIZE(getcr2)
524
525	ENTRY(getcr3)
526	movq    %cr3, %rax
527	ret
528	SET_SIZE(getcr3)
529
530        ENTRY(setcr3)
531        movq    %rdi, %cr3
532        ret
533	SET_SIZE(setcr3)
534
535	ENTRY(reload_cr3)
536	movq	%cr3, %rdi
537	movq	%rdi, %cr3
538	ret
539	SET_SIZE(reload_cr3)
540
541	ENTRY(getcr4)
542	movq	%cr4, %rax
543	ret
544	SET_SIZE(getcr4)
545
546	ENTRY(setcr4)
547	movq	%rdi, %cr4
548	ret
549	SET_SIZE(setcr4)
550
551	ENTRY(getcr8)
552	movq	%cr8, %rax
553	ret
554	SET_SIZE(getcr8)
555
556	ENTRY(setcr8)
557	movq	%rdi, %cr8
558	ret
559	SET_SIZE(setcr8)
560
561#elif defined(__i386)
562
563        ENTRY(getcr0)
564        movl    %cr0, %eax
565        ret
566	SET_SIZE(getcr0)
567
568        ENTRY(setcr0)
569        movl    4(%esp), %eax
570        movl    %eax, %cr0
571        ret
572	SET_SIZE(setcr0)
573
574        ENTRY(getcr2)
575        movl    %cr2, %eax
576        ret
577	SET_SIZE(getcr2)
578
579	ENTRY(getcr3)
580	movl    %cr3, %eax
581	ret
582	SET_SIZE(getcr3)
583
584        ENTRY(setcr3)
585        movl    4(%esp), %eax
586        movl    %eax, %cr3
587        ret
588	SET_SIZE(setcr3)
589
590	ENTRY(reload_cr3)
591	movl    %cr3, %eax
592	movl    %eax, %cr3
593	ret
594	SET_SIZE(reload_cr3)
595
596	ENTRY(getcr4)
597	movl    %cr4, %eax
598	ret
599	SET_SIZE(getcr4)
600
601        ENTRY(setcr4)
602        movl    4(%esp), %eax
603        movl    %eax, %cr4
604        ret
605	SET_SIZE(setcr4)
606
607#endif	/* __i386 */
608#endif	/* __lint */
609
610#if defined(__lint)
611
612/*ARGSUSED*/
613uint32_t
614__cpuid_insn(struct cpuid_regs *regs)
615{ return (0); }
616
617#else	/* __lint */
618
619#if defined(__amd64)
620
621	ENTRY(__cpuid_insn)
622	movq	%rbx, %r8
623	movq	%rcx, %r9
624	movq	%rdx, %r11
625	movl	(%rdi), %eax		/* %eax = regs->cp_eax */
626	movl	0x4(%rdi), %ebx		/* %ebx = regs->cp_ebx */
627	movl	0x8(%rdi), %ecx		/* %ecx = regs->cp_ecx */
628	movl	0xc(%rdi), %edx		/* %edx = regs->cp_edx */
629	cpuid
630	movl	%eax, (%rdi)		/* regs->cp_eax = %eax */
631	movl	%ebx, 0x4(%rdi)		/* regs->cp_ebx = %ebx */
632	movl	%ecx, 0x8(%rdi)		/* regs->cp_ecx = %ecx */
633	movl	%edx, 0xc(%rdi)		/* regs->cp_edx = %edx */
634	movq	%r8, %rbx
635	movq	%r9, %rcx
636	movq	%r11, %rdx
637	ret
638	SET_SIZE(__cpuid_insn)
639
640#elif defined(__i386)
641
642        ENTRY(__cpuid_insn)
643	pushl	%ebp
644	movl	0x8(%esp), %ebp		/* %ebp = regs */
645	pushl	%ebx
646	pushl	%ecx
647	pushl	%edx
648	movl	(%ebp), %eax		/* %eax = regs->cp_eax */
649	movl	0x4(%ebp), %ebx		/* %ebx = regs->cp_ebx */
650	movl	0x8(%ebp), %ecx		/* %ecx = regs->cp_ecx */
651	movl	0xc(%ebp), %edx		/* %edx = regs->cp_edx */
652	cpuid
653	movl	%eax, (%ebp)		/* regs->cp_eax = %eax */
654	movl	%ebx, 0x4(%ebp)		/* regs->cp_ebx = %ebx */
655	movl	%ecx, 0x8(%ebp)		/* regs->cp_ecx = %ecx */
656	movl	%edx, 0xc(%ebp)		/* regs->cp_edx = %edx */
657	popl	%edx
658	popl	%ecx
659	popl	%ebx
660	popl	%ebp
661	ret
662	SET_SIZE(__cpuid_insn)
663
664#endif	/* __i386 */
665#endif	/* __lint */
666
667#if defined(__lint)
668
669/*ARGSUSED*/
670void
671i86_monitor(volatile uint32_t *addr, uint32_t extensions, uint32_t hints)
672{ return; }
673
674#else   /* __lint */
675
676#if defined(__amd64)
677
678	ENTRY_NP(i86_monitor)
679	pushq	%rbp
680	movq	%rsp, %rbp
681	movq	%rdi, %rax		/* addr */
682	movq	%rsi, %rcx		/* extensions */
683	/* rdx contains input arg3: hints */
684	.byte	0x0f, 0x01, 0xc8	/* monitor */
685	leave
686	ret
687	SET_SIZE(i86_monitor)
688
689#elif defined(__i386)
690
691ENTRY_NP(i86_monitor)
692	pushl	%ebp
693	movl	%esp, %ebp
694	movl	0x8(%ebp),%eax		/* addr */
695	movl	0xc(%ebp),%ecx		/* extensions */
696	movl	0x10(%ebp),%edx		/* hints */
697	.byte	0x0f, 0x01, 0xc8	/* monitor */
698	leave
699	ret
700	SET_SIZE(i86_monitor)
701
702#endif	/* __i386 */
703#endif	/* __lint */
704
705#if defined(__lint)
706
707/*ARGSUSED*/
708void
709i86_mwait(uint32_t data, uint32_t extensions)
710{ return; }
711
712#else	/* __lint */
713
714#if defined(__amd64)
715
716	ENTRY_NP(i86_mwait)
717	pushq	%rbp
718	movq	%rsp, %rbp
719	movq	%rdi, %rax		/* data */
720	movq	%rsi, %rcx		/* extensions */
721	.byte	0x0f, 0x01, 0xc9	/* mwait */
722	leave
723	ret
724	SET_SIZE(i86_mwait)
725
726#elif defined(__i386)
727
728	ENTRY_NP(i86_mwait)
729	pushl	%ebp
730	movl	%esp, %ebp
731	movl	0x8(%ebp),%eax		/* data */
732	movl	0xc(%ebp),%ecx		/* extensions */
733	.byte	0x0f, 0x01, 0xc9	/* mwait */
734	leave
735	ret
736	SET_SIZE(i86_mwait)
737
738#endif	/* __i386 */
739#endif	/* __lint */
740
741#if defined(__lint)
742
743hrtime_t
744tsc_read(void)
745{
746	return (0);
747}
748
749#else	/* __lint */
750
751	ENTRY_NP(tsc_read)
752	rdtsc
753#if defined(__amd64)
754	shlq	$32, %rdx
755	orq	%rdx, %rax
756#endif
757	ret
758	SET_SIZE(tsc_read)
759
760#endif	/* __lint */
761
762/*
763 * Insert entryp after predp in a doubly linked list.
764 */
765
766#if defined(__lint)
767
768/*ARGSUSED*/
769void
770_insque(caddr_t entryp, caddr_t predp)
771{}
772
773#else	/* __lint */
774
775#if defined(__amd64)
776
777	ENTRY(_insque)
778	movq	(%rsi), %rax		/* predp->forw 			*/
779	movq	%rsi, CPTRSIZE(%rdi)	/* entryp->back = predp		*/
780	movq	%rax, (%rdi)		/* entryp->forw = predp->forw	*/
781	movq	%rdi, (%rsi)		/* predp->forw = entryp		*/
782	movq	%rdi, CPTRSIZE(%rax)	/* predp->forw->back = entryp	*/
783	ret
784	SET_SIZE(_insque)
785
786#elif defined(__i386)
787
788	ENTRY(_insque)
789	movl	8(%esp), %edx
790	movl	4(%esp), %ecx
791	movl	(%edx), %eax		/* predp->forw			*/
792	movl	%edx, CPTRSIZE(%ecx)	/* entryp->back = predp		*/
793	movl	%eax, (%ecx)		/* entryp->forw = predp->forw	*/
794	movl	%ecx, (%edx)		/* predp->forw = entryp		*/
795	movl	%ecx, CPTRSIZE(%eax)	/* predp->forw->back = entryp	*/
796	ret
797	SET_SIZE(_insque)
798
799#endif	/* __i386 */
800#endif	/* __lint */
801
802/*
803 * Remove entryp from a doubly linked list
804 */
805
806#if defined(__lint)
807
808/*ARGSUSED*/
809void
810_remque(caddr_t entryp)
811{}
812
813#else	/* __lint */
814
815#if defined(__amd64)
816
817	ENTRY(_remque)
818	movq	(%rdi), %rax		/* entry->forw */
819	movq	CPTRSIZE(%rdi), %rdx	/* entry->back */
820	movq	%rax, (%rdx)		/* entry->back->forw = entry->forw */
821	movq	%rdx, CPTRSIZE(%rax)	/* entry->forw->back = entry->back */
822	ret
823	SET_SIZE(_remque)
824
825#elif defined(__i386)
826
827	ENTRY(_remque)
828	movl	4(%esp), %ecx
829	movl	(%ecx), %eax		/* entry->forw */
830	movl	CPTRSIZE(%ecx), %edx	/* entry->back */
831	movl	%eax, (%edx)		/* entry->back->forw = entry->forw */
832	movl	%edx, CPTRSIZE(%eax)	/* entry->forw->back = entry->back */
833	ret
834	SET_SIZE(_remque)
835
836#endif	/* __i386 */
837#endif	/* __lint */
838
839/*
840 * Returns the number of
841 * non-NULL bytes in string argument.
842 */
843
844#if defined(__lint)
845
846/* ARGSUSED */
847size_t
848strlen(const char *str)
849{ return (0); }
850
851#else	/* __lint */
852
853#if defined(__amd64)
854
855/*
856 * This is close to a simple transliteration of a C version of this
857 * routine.  We should either just -make- this be a C version, or
858 * justify having it in assembler by making it significantly faster.
859 *
860 * size_t
861 * strlen(const char *s)
862 * {
863 *	const char *s0;
864 * #if defined(DEBUG)
865 *	if ((uintptr_t)s < KERNELBASE)
866 *		panic(.str_panic_msg);
867 * #endif
868 *	for (s0 = s; *s; s++)
869 *		;
870 *	return (s - s0);
871 * }
872 */
873
874	ENTRY(strlen)
875#ifdef DEBUG
876	movq	postbootkernelbase(%rip), %rax
877	cmpq	%rax, %rdi
878	jae	str_valid
879	pushq	%rbp
880	movq	%rsp, %rbp
881	leaq	.str_panic_msg(%rip), %rdi
882	xorl	%eax, %eax
883	call	panic
884#endif	/* DEBUG */
885str_valid:
886	cmpb	$0, (%rdi)
887	movq	%rdi, %rax
888	je	.null_found
889	.align	4
890.strlen_loop:
891	incq	%rdi
892	cmpb	$0, (%rdi)
893	jne	.strlen_loop
894.null_found:
895	subq	%rax, %rdi
896	movq	%rdi, %rax
897	ret
898	SET_SIZE(strlen)
899
900#elif defined(__i386)
901
902	ENTRY(strlen)
903#ifdef DEBUG
904	movl	postbootkernelbase, %eax
905	cmpl	%eax, 4(%esp)
906	jae	str_valid
907	pushl	%ebp
908	movl	%esp, %ebp
909	pushl	$.str_panic_msg
910	call	panic
911#endif /* DEBUG */
912
913str_valid:
914	movl	4(%esp), %eax		/* %eax = string address */
915	testl	$3, %eax		/* if %eax not word aligned */
916	jnz	.not_word_aligned	/* goto .not_word_aligned */
917	.align	4
918.word_aligned:
919	movl	(%eax), %edx		/* move 1 word from (%eax) to %edx */
920	movl	$0x7f7f7f7f, %ecx
921	andl	%edx, %ecx		/* %ecx = %edx & 0x7f7f7f7f */
922	addl	$4, %eax		/* next word */
923	addl	$0x7f7f7f7f, %ecx	/* %ecx += 0x7f7f7f7f */
924	orl	%edx, %ecx		/* %ecx |= %edx */
925	andl	$0x80808080, %ecx	/* %ecx &= 0x80808080 */
926	cmpl	$0x80808080, %ecx	/* if no null byte in this word */
927	je	.word_aligned		/* goto .word_aligned */
928	subl	$4, %eax		/* post-incremented */
929.not_word_aligned:
930	cmpb	$0, (%eax)		/* if a byte in (%eax) is null */
931	je	.null_found		/* goto .null_found */
932	incl	%eax			/* next byte */
933	testl	$3, %eax		/* if %eax not word aligned */
934	jnz	.not_word_aligned	/* goto .not_word_aligned */
935	jmp	.word_aligned		/* goto .word_aligned */
936	.align	4
937.null_found:
938	subl	4(%esp), %eax		/* %eax -= string address */
939	ret
940	SET_SIZE(strlen)
941
942#endif	/* __i386 */
943
944#ifdef DEBUG
945	.text
946.str_panic_msg:
947	.string "strlen: argument below kernelbase"
948#endif /* DEBUG */
949
950#endif	/* __lint */
951
952	/*
953	 * Berkley 4.3 introduced symbolically named interrupt levels
954	 * as a way deal with priority in a machine independent fashion.
955	 * Numbered priorities are machine specific, and should be
956	 * discouraged where possible.
957	 *
958	 * Note, for the machine specific priorities there are
959	 * examples listed for devices that use a particular priority.
960	 * It should not be construed that all devices of that
961	 * type should be at that priority.  It is currently were
962	 * the current devices fit into the priority scheme based
963	 * upon time criticalness.
964	 *
965	 * The underlying assumption of these assignments is that
966	 * IPL 10 is the highest level from which a device
967	 * routine can call wakeup.  Devices that interrupt from higher
968	 * levels are restricted in what they can do.  If they need
969	 * kernels services they should schedule a routine at a lower
970	 * level (via software interrupt) to do the required
971	 * processing.
972	 *
973	 * Examples of this higher usage:
974	 *	Level	Usage
975	 *	14	Profiling clock (and PROM uart polling clock)
976	 *	12	Serial ports
977	 *
978	 * The serial ports request lower level processing on level 6.
979	 *
980	 * Also, almost all splN routines (where N is a number or a
981	 * mnemonic) will do a RAISE(), on the assumption that they are
982	 * never used to lower our priority.
983	 * The exceptions are:
984	 *	spl8()		Because you can't be above 15 to begin with!
985	 *	splzs()		Because this is used at boot time to lower our
986	 *			priority, to allow the PROM to poll the uart.
987	 *	spl0()		Used to lower priority to 0.
988	 */
989
990#if defined(__lint)
991
992int spl0(void)		{ return (0); }
993int spl6(void)		{ return (0); }
994int spl7(void)		{ return (0); }
995int spl8(void)		{ return (0); }
996int splhigh(void)	{ return (0); }
997int splhi(void)		{ return (0); }
998int splzs(void)		{ return (0); }
999
1000/* ARGSUSED */
1001void
1002splx(int level)
1003{}
1004
1005#else	/* __lint */
1006
1007#if defined(__amd64)
1008
1009#define	SETPRI(level) \
1010	movl	$/**/level, %edi;	/* new priority */		\
1011	jmp	do_splx			/* redirect to do_splx */
1012
1013#define	RAISE(level) \
1014	movl	$/**/level, %edi;	/* new priority */		\
1015	jmp	splr			/* redirect to splr */
1016
1017#elif defined(__i386)
1018
1019#define	SETPRI(level) \
1020	pushl	$/**/level;	/* new priority */			\
1021	call	do_splx;	/* invoke common splx code */		\
1022	addl	$4, %esp;	/* unstack arg */			\
1023	ret
1024
1025#define	RAISE(level) \
1026	pushl	$/**/level;	/* new priority */			\
1027	call	splr;		/* invoke common splr code */		\
1028	addl	$4, %esp;	/* unstack args */			\
1029	ret
1030
1031#endif	/* __i386 */
1032
1033	/* locks out all interrupts, including memory errors */
1034	ENTRY(spl8)
1035	SETPRI(15)
1036	SET_SIZE(spl8)
1037
1038	/* just below the level that profiling runs */
1039	ENTRY(spl7)
1040	RAISE(13)
1041	SET_SIZE(spl7)
1042
1043	/* sun specific - highest priority onboard serial i/o asy ports */
1044	ENTRY(splzs)
1045	SETPRI(12)	/* Can't be a RAISE, as it's used to lower us */
1046	SET_SIZE(splzs)
1047
1048	ENTRY(splhi)
1049	ALTENTRY(splhigh)
1050	ALTENTRY(spl6)
1051	ALTENTRY(i_ddi_splhigh)
1052
1053	RAISE(DISP_LEVEL)
1054
1055	SET_SIZE(i_ddi_splhigh)
1056	SET_SIZE(spl6)
1057	SET_SIZE(splhigh)
1058	SET_SIZE(splhi)
1059
1060	/* allow all interrupts */
1061	ENTRY(spl0)
1062	SETPRI(0)
1063	SET_SIZE(spl0)
1064
1065
1066	/* splx implentation */
1067	ENTRY(splx)
1068	jmp	do_splx		/* redirect to common splx code */
1069	SET_SIZE(splx)
1070
1071#endif	/* __lint */
1072
1073#if defined(__i386)
1074
1075/*
1076 * Read and write the %gs register
1077 */
1078
1079#if defined(__lint)
1080
1081/*ARGSUSED*/
1082uint16_t
1083getgs(void)
1084{ return (0); }
1085
1086/*ARGSUSED*/
1087void
1088setgs(uint16_t sel)
1089{}
1090
1091#else	/* __lint */
1092
1093	ENTRY(getgs)
1094	clr	%eax
1095	movw	%gs, %ax
1096	ret
1097	SET_SIZE(getgs)
1098
1099	ENTRY(setgs)
1100	movw	4(%esp), %gs
1101	ret
1102	SET_SIZE(setgs)
1103
1104#endif	/* __lint */
1105#endif	/* __i386 */
1106
1107#if defined(__lint)
1108
1109void
1110pc_reset(void)
1111{}
1112
1113void
1114efi_reset(void)
1115{}
1116
1117#else	/* __lint */
1118
1119	ENTRY(wait_500ms)
1120	push	%ebx
1121	movl	$50000, %ebx
11221:
1123	call	tenmicrosec
1124	decl	%ebx
1125	jnz	1b
1126	pop	%ebx
1127	ret
1128	SET_SIZE(wait_500ms)
1129
1130#define	RESET_METHOD_KBC	1
1131#define	RESET_METHOD_PORT92	2
1132#define RESET_METHOD_PCI	4
1133
1134	DGDEF3(pc_reset_methods, 4, 8)
1135	.long RESET_METHOD_KBC|RESET_METHOD_PORT92|RESET_METHOD_PCI;
1136
1137	ENTRY(pc_reset)
1138
1139#if defined(__i386)
1140	testl	$RESET_METHOD_KBC, pc_reset_methods
1141#elif defined(__amd64)
1142	testl	$RESET_METHOD_KBC, pc_reset_methods(%rip)
1143#endif
1144	jz	1f
1145
1146	/
1147	/ Try the classic keyboard controller-triggered reset.
1148	/
1149	movw	$0x64, %dx
1150	movb	$0xfe, %al
1151	outb	(%dx)
1152
1153	/ Wait up to 500 milliseconds here for the keyboard controller
1154	/ to pull the reset line.  On some systems where the keyboard
1155	/ controller is slow to pull the reset line, the next reset method
1156	/ may be executed (which may be bad if those systems hang when the
1157	/ next reset method is used, e.g. Ferrari 3400 (doesn't like port 92),
1158	/ and Ferrari 4000 (doesn't like the cf9 reset method))
1159
1160	call	wait_500ms
1161
11621:
1163#if defined(__i386)
1164	testl	$RESET_METHOD_PORT92, pc_reset_methods
1165#elif defined(__amd64)
1166	testl	$RESET_METHOD_PORT92, pc_reset_methods(%rip)
1167#endif
1168	jz	3f
1169
1170	/
1171	/ Try port 0x92 fast reset
1172	/
1173	movw	$0x92, %dx
1174	inb	(%dx)
1175	cmpb	$0xff, %al	/ If port's not there, we should get back 0xFF
1176	je	1f
1177	testb	$1, %al		/ If bit 0
1178	jz	2f		/ is clear, jump to perform the reset
1179	andb	$0xfe, %al	/ otherwise,
1180	outb	(%dx)		/ clear bit 0 first, then
11812:
1182	orb	$1, %al		/ Set bit 0
1183	outb	(%dx)		/ and reset the system
11841:
1185
1186	call	wait_500ms
1187
11883:
1189#if defined(__i386)
1190	testl	$RESET_METHOD_PCI, pc_reset_methods
1191#elif defined(__amd64)
1192	testl	$RESET_METHOD_PCI, pc_reset_methods(%rip)
1193#endif
1194	jz	4f
1195
1196	/ Try the PCI (soft) reset vector (should work on all modern systems,
1197	/ but has been shown to cause problems on 450NX systems, and some newer
1198	/ systems (e.g. ATI IXP400-equipped systems))
1199	/ When resetting via this method, 2 writes are required.  The first
1200	/ targets bit 1 (0=hard reset without power cycle, 1=hard reset with
1201	/ power cycle).
1202	/ The reset occurs on the second write, during bit 2's transition from
1203	/ 0->1.
1204	movw	$0xcf9, %dx
1205	movb	$0x2, %al	/ Reset mode = hard, no power cycle
1206	outb	(%dx)
1207	movb	$0x6, %al
1208	outb	(%dx)
1209
1210	call	wait_500ms
1211
12124:
1213	/
1214	/ port 0xcf9 failed also.  Last-ditch effort is to
1215	/ triple-fault the CPU.
1216	/ Also, use triple fault for EFI firmware
1217	/
1218	ENTRY(efi_reset)
1219#if defined(__amd64)
1220	pushq	$0x0
1221	pushq	$0x0		/ IDT base of 0, limit of 0 + 2 unused bytes
1222	lidt	(%rsp)
1223#elif defined(__i386)
1224	pushl	$0x0
1225	pushl	$0x0		/ IDT base of 0, limit of 0 + 2 unused bytes
1226	lidt	(%esp)
1227#endif
1228	int	$0x0		/ Trigger interrupt, generate triple-fault
1229
1230	cli
1231	hlt			/ Wait forever
1232	/*NOTREACHED*/
1233	SET_SIZE(efi_reset)
1234	SET_SIZE(pc_reset)
1235
1236#endif	/* __lint */
1237
1238/*
1239 * C callable in and out routines
1240 */
1241
1242#if defined(__lint)
1243
1244/* ARGSUSED */
1245void
1246outl(int port_address, uint32_t val)
1247{}
1248
1249#else	/* __lint */
1250
1251#if defined(__amd64)
1252
1253	ENTRY(outl)
1254	movw	%di, %dx
1255	movl	%esi, %eax
1256	outl	(%dx)
1257	ret
1258	SET_SIZE(outl)
1259
1260#elif defined(__i386)
1261
1262	.set	PORT, 4
1263	.set	VAL, 8
1264
1265	ENTRY(outl)
1266	movw	PORT(%esp), %dx
1267	movl	VAL(%esp), %eax
1268	outl	(%dx)
1269	ret
1270	SET_SIZE(outl)
1271
1272#endif	/* __i386 */
1273#endif	/* __lint */
1274
1275#if defined(__lint)
1276
1277/* ARGSUSED */
1278void
1279outw(int port_address, uint16_t val)
1280{}
1281
1282#else	/* __lint */
1283
1284#if defined(__amd64)
1285
1286	ENTRY(outw)
1287	movw	%di, %dx
1288	movw	%si, %ax
1289	D16 outl (%dx)		/* XX64 why not outw? */
1290	ret
1291	SET_SIZE(outw)
1292
1293#elif defined(__i386)
1294
1295	ENTRY(outw)
1296	movw	PORT(%esp), %dx
1297	movw	VAL(%esp), %ax
1298	D16 outl (%dx)
1299	ret
1300	SET_SIZE(outw)
1301
1302#endif	/* __i386 */
1303#endif	/* __lint */
1304
1305#if defined(__lint)
1306
1307/* ARGSUSED */
1308void
1309outb(int port_address, uint8_t val)
1310{}
1311
1312#else	/* __lint */
1313
1314#if defined(__amd64)
1315
1316	ENTRY(outb)
1317	movw	%di, %dx
1318	movb	%sil, %al
1319	outb	(%dx)
1320	ret
1321	SET_SIZE(outb)
1322
1323#elif defined(__i386)
1324
1325	ENTRY(outb)
1326	movw	PORT(%esp), %dx
1327	movb	VAL(%esp), %al
1328	outb	(%dx)
1329	ret
1330	SET_SIZE(outb)
1331
1332#endif	/* __i386 */
1333#endif	/* __lint */
1334
1335#if defined(__lint)
1336
1337/* ARGSUSED */
1338uint32_t
1339inl(int port_address)
1340{ return (0); }
1341
1342#else	/* __lint */
1343
1344#if defined(__amd64)
1345
1346	ENTRY(inl)
1347	xorl	%eax, %eax
1348	movw	%di, %dx
1349	inl	(%dx)
1350	ret
1351	SET_SIZE(inl)
1352
1353#elif defined(__i386)
1354
1355	ENTRY(inl)
1356	movw	PORT(%esp), %dx
1357	inl	(%dx)
1358	ret
1359	SET_SIZE(inl)
1360
1361#endif	/* __i386 */
1362#endif	/* __lint */
1363
1364#if defined(__lint)
1365
1366/* ARGSUSED */
1367uint16_t
1368inw(int port_address)
1369{ return (0); }
1370
1371#else	/* __lint */
1372
1373#if defined(__amd64)
1374
1375	ENTRY(inw)
1376	xorl	%eax, %eax
1377	movw	%di, %dx
1378	D16 inl	(%dx)
1379	ret
1380	SET_SIZE(inw)
1381
1382#elif defined(__i386)
1383
1384	ENTRY(inw)
1385	subl	%eax, %eax
1386	movw	PORT(%esp), %dx
1387	D16 inl	(%dx)
1388	ret
1389	SET_SIZE(inw)
1390
1391#endif	/* __i386 */
1392#endif	/* __lint */
1393
1394
1395#if defined(__lint)
1396
1397/* ARGSUSED */
1398uint8_t
1399inb(int port_address)
1400{ return (0); }
1401
1402#else	/* __lint */
1403
1404#if defined(__amd64)
1405
1406	ENTRY(inb)
1407	xorl	%eax, %eax
1408	movw	%di, %dx
1409	inb	(%dx)
1410	ret
1411	SET_SIZE(inb)
1412
1413#elif defined(__i386)
1414
1415	ENTRY(inb)
1416	subl    %eax, %eax
1417	movw	PORT(%esp), %dx
1418	inb	(%dx)
1419	ret
1420	SET_SIZE(inb)
1421
1422#endif	/* __i386 */
1423#endif	/* __lint */
1424
1425
1426#if defined(__lint)
1427
1428/* ARGSUSED */
1429void
1430repoutsw(int port, uint16_t *addr, int cnt)
1431{}
1432
1433#else	/* __lint */
1434
1435#if defined(__amd64)
1436
1437	ENTRY(repoutsw)
1438	movl	%edx, %ecx
1439	movw	%di, %dx
1440	rep
1441	  D16 outsl
1442	ret
1443	SET_SIZE(repoutsw)
1444
1445#elif defined(__i386)
1446
1447	/*
1448	 * The arguments and saved registers are on the stack in the
1449	 *  following order:
1450	 *      |  cnt  |  +16
1451	 *      | *addr |  +12
1452	 *      | port  |  +8
1453	 *      |  eip  |  +4
1454	 *      |  esi  |  <-- %esp
1455	 * If additional values are pushed onto the stack, make sure
1456	 * to adjust the following constants accordingly.
1457	 */
1458	.set	PORT, 8
1459	.set	ADDR, 12
1460	.set	COUNT, 16
1461
1462	ENTRY(repoutsw)
1463	pushl	%esi
1464	movl	PORT(%esp), %edx
1465	movl	ADDR(%esp), %esi
1466	movl	COUNT(%esp), %ecx
1467	rep
1468	  D16 outsl
1469	popl	%esi
1470	ret
1471	SET_SIZE(repoutsw)
1472
1473#endif	/* __i386 */
1474#endif	/* __lint */
1475
1476
1477#if defined(__lint)
1478
1479/* ARGSUSED */
1480void
1481repinsw(int port_addr, uint16_t *addr, int cnt)
1482{}
1483
1484#else	/* __lint */
1485
1486#if defined(__amd64)
1487
1488	ENTRY(repinsw)
1489	movl	%edx, %ecx
1490	movw	%di, %dx
1491	rep
1492	  D16 insl
1493	ret
1494	SET_SIZE(repinsw)
1495
1496#elif defined(__i386)
1497
1498	ENTRY(repinsw)
1499	pushl	%edi
1500	movl	PORT(%esp), %edx
1501	movl	ADDR(%esp), %edi
1502	movl	COUNT(%esp), %ecx
1503	rep
1504	  D16 insl
1505	popl	%edi
1506	ret
1507	SET_SIZE(repinsw)
1508
1509#endif	/* __i386 */
1510#endif	/* __lint */
1511
1512
1513#if defined(__lint)
1514
1515/* ARGSUSED */
1516void
1517repinsb(int port, uint8_t *addr, int count)
1518{}
1519
1520#else	/* __lint */
1521
1522#if defined(__amd64)
1523
1524	ENTRY(repinsb)
1525	movl	%edx, %ecx
1526	movw	%di, %dx
1527	movq	%rsi, %rdi
1528	rep
1529	  insb
1530	ret
1531	SET_SIZE(repinsb)
1532
1533#elif defined(__i386)
1534
1535	/*
1536	 * The arguments and saved registers are on the stack in the
1537	 *  following order:
1538	 *      |  cnt  |  +16
1539	 *      | *addr |  +12
1540	 *      | port  |  +8
1541	 *      |  eip  |  +4
1542	 *      |  esi  |  <-- %esp
1543	 * If additional values are pushed onto the stack, make sure
1544	 * to adjust the following constants accordingly.
1545	 */
1546	.set	IO_PORT, 8
1547	.set	IO_ADDR, 12
1548	.set	IO_COUNT, 16
1549
1550	ENTRY(repinsb)
1551	pushl	%edi
1552	movl	IO_ADDR(%esp), %edi
1553	movl	IO_COUNT(%esp), %ecx
1554	movl	IO_PORT(%esp), %edx
1555	rep
1556	  insb
1557	popl	%edi
1558	ret
1559	SET_SIZE(repinsb)
1560
1561#endif	/* __i386 */
1562#endif	/* __lint */
1563
1564
1565/*
1566 * Input a stream of 32-bit words.
1567 * NOTE: count is a DWORD count.
1568 */
1569#if defined(__lint)
1570
1571/* ARGSUSED */
1572void
1573repinsd(int port, uint32_t *addr, int count)
1574{}
1575
1576#else	/* __lint */
1577
1578#if defined(__amd64)
1579
1580	ENTRY(repinsd)
1581	movl	%edx, %ecx
1582	movw	%di, %dx
1583	movq	%rsi, %rdi
1584	rep
1585	  insl
1586	ret
1587	SET_SIZE(repinsd)
1588
1589#elif defined(__i386)
1590
1591	ENTRY(repinsd)
1592	pushl	%edi
1593	movl	IO_ADDR(%esp), %edi
1594	movl	IO_COUNT(%esp), %ecx
1595	movl	IO_PORT(%esp), %edx
1596	rep
1597	  insl
1598	popl	%edi
1599	ret
1600	SET_SIZE(repinsd)
1601
1602#endif	/* __i386 */
1603#endif	/* __lint */
1604
1605/*
1606 * Output a stream of bytes
1607 * NOTE: count is a byte count
1608 */
1609#if defined(__lint)
1610
1611/* ARGSUSED */
1612void
1613repoutsb(int port, uint8_t *addr, int count)
1614{}
1615
1616#else	/* __lint */
1617
1618#if defined(__amd64)
1619
1620	ENTRY(repoutsb)
1621	movl	%edx, %ecx
1622	movw	%di, %dx
1623	rep
1624	  outsb
1625	ret
1626	SET_SIZE(repoutsb)
1627
1628#elif defined(__i386)
1629
1630	ENTRY(repoutsb)
1631	pushl	%esi
1632	movl	IO_ADDR(%esp), %esi
1633	movl	IO_COUNT(%esp), %ecx
1634	movl	IO_PORT(%esp), %edx
1635	rep
1636	  outsb
1637	popl	%esi
1638	ret
1639	SET_SIZE(repoutsb)
1640
1641#endif	/* __i386 */
1642#endif	/* __lint */
1643
1644/*
1645 * Output a stream of 32-bit words
1646 * NOTE: count is a DWORD count
1647 */
1648#if defined(__lint)
1649
1650/* ARGSUSED */
1651void
1652repoutsd(int port, uint32_t *addr, int count)
1653{}
1654
1655#else	/* __lint */
1656
1657#if defined(__amd64)
1658
1659	ENTRY(repoutsd)
1660	movl	%edx, %ecx
1661	movw	%di, %dx
1662	rep
1663	  outsl
1664	ret
1665	SET_SIZE(repoutsd)
1666
1667#elif defined(__i386)
1668
1669	ENTRY(repoutsd)
1670	pushl	%esi
1671	movl	IO_ADDR(%esp), %esi
1672	movl	IO_COUNT(%esp), %ecx
1673	movl	IO_PORT(%esp), %edx
1674	rep
1675	  outsl
1676	popl	%esi
1677	ret
1678	SET_SIZE(repoutsd)
1679
1680#endif	/* __i386 */
1681#endif	/* __lint */
1682
1683/*
1684 * void int3(void)
1685 * void int18(void)
1686 * void int20(void)
1687 */
1688
1689#if defined(__lint)
1690
1691void
1692int3(void)
1693{}
1694
1695void
1696int18(void)
1697{}
1698
1699void
1700int20(void)
1701{}
1702
1703#else	/* __lint */
1704
1705	ENTRY(int3)
1706	int	$T_BPTFLT
1707	ret
1708	SET_SIZE(int3)
1709
1710	ENTRY(int18)
1711	int	$T_MCE
1712	ret
1713	SET_SIZE(int18)
1714
1715	ENTRY(int20)
1716	movl	boothowto, %eax
1717	andl	$RB_DEBUG, %eax
1718	jz	1f
1719
1720	int	$T_DBGENTR
17211:
1722	rep;	ret	/* use 2 byte return instruction when branch target */
1723			/* AMD Software Optimization Guide - Section 6.2 */
1724	SET_SIZE(int20)
1725
1726#endif	/* __lint */
1727
1728#if defined(__lint)
1729
1730/* ARGSUSED */
1731int
1732scanc(size_t size, uchar_t *cp, uchar_t *table, uchar_t mask)
1733{ return (0); }
1734
1735#else	/* __lint */
1736
1737#if defined(__amd64)
1738
1739	ENTRY(scanc)
1740					/* rdi == size */
1741					/* rsi == cp */
1742					/* rdx == table */
1743					/* rcx == mask */
1744	addq	%rsi, %rdi		/* end = &cp[size] */
1745.scanloop:
1746	cmpq	%rdi, %rsi		/* while (cp < end */
1747	jnb	.scandone
1748	movzbq	(%rsi), %r8		/* %r8 = *cp */
1749	incq	%rsi			/* cp++ */
1750	testb	%cl, (%r8, %rdx)
1751	jz	.scanloop		/*  && (table[*cp] & mask) == 0) */
1752	decq	%rsi			/* (fix post-increment) */
1753.scandone:
1754	movl	%edi, %eax
1755	subl	%esi, %eax		/* return (end - cp) */
1756	ret
1757	SET_SIZE(scanc)
1758
1759#elif defined(__i386)
1760
1761	ENTRY(scanc)
1762	pushl	%edi
1763	pushl	%esi
1764	movb	24(%esp), %cl		/* mask = %cl */
1765	movl	16(%esp), %esi		/* cp = %esi */
1766	movl	20(%esp), %edx		/* table = %edx */
1767	movl	%esi, %edi
1768	addl	12(%esp), %edi		/* end = &cp[size]; */
1769.scanloop:
1770	cmpl	%edi, %esi		/* while (cp < end */
1771	jnb	.scandone
1772	movzbl	(%esi),  %eax		/* %al = *cp */
1773	incl	%esi			/* cp++ */
1774	movb	(%edx,  %eax), %al	/* %al = table[*cp] */
1775	testb	%al, %cl
1776	jz	.scanloop		/*   && (table[*cp] & mask) == 0) */
1777	dec	%esi			/* post-incremented */
1778.scandone:
1779	movl	%edi, %eax
1780	subl	%esi, %eax		/* return (end - cp) */
1781	popl	%esi
1782	popl	%edi
1783	ret
1784	SET_SIZE(scanc)
1785
1786#endif	/* __i386 */
1787#endif	/* __lint */
1788
1789/*
1790 * Replacement functions for ones that are normally inlined.
1791 * In addition to the copy in i86.il, they are defined here just in case.
1792 */
1793
1794#if defined(__lint)
1795
1796ulong_t
1797intr_clear(void)
1798{ return (0); }
1799
1800ulong_t
1801clear_int_flag(void)
1802{ return (0); }
1803
1804#else	/* __lint */
1805
1806#if defined(__amd64)
1807
1808	ENTRY(intr_clear)
1809	ENTRY(clear_int_flag)
1810	pushfq
1811	popq	%rax
1812	CLI(%rdi)
1813	ret
1814	SET_SIZE(clear_int_flag)
1815	SET_SIZE(intr_clear)
1816
1817#elif defined(__i386)
1818
1819	ENTRY(intr_clear)
1820	ENTRY(clear_int_flag)
1821	pushfl
1822	popl	%eax
1823	CLI(%edx)
1824	ret
1825	SET_SIZE(clear_int_flag)
1826	SET_SIZE(intr_clear)
1827
1828#endif	/* __i386 */
1829#endif	/* __lint */
1830
1831#if defined(__lint)
1832
1833struct cpu *
1834curcpup(void)
1835{ return 0; }
1836
1837#else	/* __lint */
1838
1839#if defined(__amd64)
1840
1841	ENTRY(curcpup)
1842	movq	%gs:CPU_SELF, %rax
1843	ret
1844	SET_SIZE(curcpup)
1845
1846#elif defined(__i386)
1847
1848	ENTRY(curcpup)
1849	movl	%gs:CPU_SELF, %eax
1850	ret
1851	SET_SIZE(curcpup)
1852
1853#endif	/* __i386 */
1854#endif	/* __lint */
1855
1856#if defined(__lint)
1857
1858/* ARGSUSED */
1859uint32_t
1860htonl(uint32_t i)
1861{ return (0); }
1862
1863/* ARGSUSED */
1864uint32_t
1865ntohl(uint32_t i)
1866{ return (0); }
1867
1868#else	/* __lint */
1869
1870#if defined(__amd64)
1871
1872	/* XX64 there must be shorter sequences for this */
1873	ENTRY(htonl)
1874	ALTENTRY(ntohl)
1875	movl	%edi, %eax
1876	bswap	%eax
1877	ret
1878	SET_SIZE(ntohl)
1879	SET_SIZE(htonl)
1880
1881#elif defined(__i386)
1882
1883	ENTRY(htonl)
1884	ALTENTRY(ntohl)
1885	movl	4(%esp), %eax
1886	bswap	%eax
1887	ret
1888	SET_SIZE(ntohl)
1889	SET_SIZE(htonl)
1890
1891#endif	/* __i386 */
1892#endif	/* __lint */
1893
1894#if defined(__lint)
1895
1896/* ARGSUSED */
1897uint16_t
1898htons(uint16_t i)
1899{ return (0); }
1900
1901/* ARGSUSED */
1902uint16_t
1903ntohs(uint16_t i)
1904{ return (0); }
1905
1906
1907#else	/* __lint */
1908
1909#if defined(__amd64)
1910
1911	/* XX64 there must be better sequences for this */
1912	ENTRY(htons)
1913	ALTENTRY(ntohs)
1914	movl	%edi, %eax
1915	bswap	%eax
1916	shrl	$16, %eax
1917	ret
1918	SET_SIZE(ntohs)
1919	SET_SIZE(htons)
1920
1921#elif defined(__i386)
1922
1923	ENTRY(htons)
1924	ALTENTRY(ntohs)
1925	movl	4(%esp), %eax
1926	bswap	%eax
1927	shrl	$16, %eax
1928	ret
1929	SET_SIZE(ntohs)
1930	SET_SIZE(htons)
1931
1932#endif	/* __i386 */
1933#endif	/* __lint */
1934
1935
1936#if defined(__lint)
1937
1938/* ARGSUSED */
1939void
1940intr_restore(ulong_t i)
1941{ return; }
1942
1943/* ARGSUSED */
1944void
1945restore_int_flag(ulong_t i)
1946{ return; }
1947
1948#else	/* __lint */
1949
1950#if defined(__amd64)
1951
1952	ENTRY(intr_restore)
1953	ENTRY(restore_int_flag)
1954	pushq	%rdi
1955	popfq
1956	ret
1957	SET_SIZE(restore_int_flag)
1958	SET_SIZE(intr_restore)
1959
1960#elif defined(__i386)
1961
1962	ENTRY(intr_restore)
1963	ENTRY(restore_int_flag)
1964	movl	4(%esp), %eax
1965	pushl	%eax
1966	popfl
1967	ret
1968	SET_SIZE(restore_int_flag)
1969	SET_SIZE(intr_restore)
1970
1971#endif	/* __i386 */
1972#endif	/* __lint */
1973
1974#if defined(__lint)
1975
1976void
1977sti(void)
1978{}
1979
1980void
1981cli(void)
1982{}
1983
1984#else	/* __lint */
1985
1986	ENTRY(sti)
1987	STI
1988	ret
1989	SET_SIZE(sti)
1990
1991	ENTRY(cli)
1992#if defined(__amd64)
1993	CLI(%rax)
1994#elif defined(__i386)
1995	CLI(%eax)
1996#endif	/* __i386 */
1997	ret
1998	SET_SIZE(cli)
1999
2000#endif	/* __lint */
2001
2002#if defined(__lint)
2003
2004dtrace_icookie_t
2005dtrace_interrupt_disable(void)
2006{ return (0); }
2007
2008#else   /* __lint */
2009
2010#if defined(__amd64)
2011
2012	ENTRY(dtrace_interrupt_disable)
2013	pushfq
2014	popq	%rax
2015	CLI(%rdx)
2016	ret
2017	SET_SIZE(dtrace_interrupt_disable)
2018
2019#elif defined(__i386)
2020
2021	ENTRY(dtrace_interrupt_disable)
2022	pushfl
2023	popl	%eax
2024	CLI(%edx)
2025	ret
2026	SET_SIZE(dtrace_interrupt_disable)
2027
2028#endif	/* __i386 */
2029#endif	/* __lint */
2030
2031#if defined(__lint)
2032
2033/*ARGSUSED*/
2034void
2035dtrace_interrupt_enable(dtrace_icookie_t cookie)
2036{}
2037
2038#else	/* __lint */
2039
2040#if defined(__amd64)
2041
2042	ENTRY(dtrace_interrupt_enable)
2043	pushq	%rdi
2044	popfq
2045	ret
2046	SET_SIZE(dtrace_interrupt_enable)
2047
2048#elif defined(__i386)
2049
2050	ENTRY(dtrace_interrupt_enable)
2051	movl	4(%esp), %eax
2052	pushl	%eax
2053	popfl
2054	ret
2055	SET_SIZE(dtrace_interrupt_enable)
2056
2057#endif	/* __i386 */
2058#endif	/* __lint */
2059
2060
2061#if defined(lint)
2062
2063void
2064dtrace_membar_producer(void)
2065{}
2066
2067void
2068dtrace_membar_consumer(void)
2069{}
2070
2071#else	/* __lint */
2072
2073	ENTRY(dtrace_membar_producer)
2074	rep;	ret	/* use 2 byte return instruction when branch target */
2075			/* AMD Software Optimization Guide - Section 6.2 */
2076	SET_SIZE(dtrace_membar_producer)
2077
2078	ENTRY(dtrace_membar_consumer)
2079	rep;	ret	/* use 2 byte return instruction when branch target */
2080			/* AMD Software Optimization Guide - Section 6.2 */
2081	SET_SIZE(dtrace_membar_consumer)
2082
2083#endif	/* __lint */
2084
2085#if defined(__lint)
2086
2087kthread_id_t
2088threadp(void)
2089{ return ((kthread_id_t)0); }
2090
2091#else	/* __lint */
2092
2093#if defined(__amd64)
2094
2095	ENTRY(threadp)
2096	movq	%gs:CPU_THREAD, %rax
2097	ret
2098	SET_SIZE(threadp)
2099
2100#elif defined(__i386)
2101
2102	ENTRY(threadp)
2103	movl	%gs:CPU_THREAD, %eax
2104	ret
2105	SET_SIZE(threadp)
2106
2107#endif	/* __i386 */
2108#endif	/* __lint */
2109
2110/*
2111 *   Checksum routine for Internet Protocol Headers
2112 */
2113
2114#if defined(__lint)
2115
2116/* ARGSUSED */
2117unsigned int
2118ip_ocsum(
2119	ushort_t *address,	/* ptr to 1st message buffer */
2120	int halfword_count,	/* length of data */
2121	unsigned int sum)	/* partial checksum */
2122{
2123	int		i;
2124	unsigned int	psum = 0;	/* partial sum */
2125
2126	for (i = 0; i < halfword_count; i++, address++) {
2127		psum += *address;
2128	}
2129
2130	while ((psum >> 16) != 0) {
2131		psum = (psum & 0xffff) + (psum >> 16);
2132	}
2133
2134	psum += sum;
2135
2136	while ((psum >> 16) != 0) {
2137		psum = (psum & 0xffff) + (psum >> 16);
2138	}
2139
2140	return (psum);
2141}
2142
2143#else	/* __lint */
2144
2145#if defined(__amd64)
2146
2147	ENTRY(ip_ocsum)
2148	pushq	%rbp
2149	movq	%rsp, %rbp
2150#ifdef DEBUG
2151	movq	postbootkernelbase(%rip), %rax
2152	cmpq	%rax, %rdi
2153	jnb	1f
2154	xorl	%eax, %eax
2155	movq	%rdi, %rsi
2156	leaq	.ip_ocsum_panic_msg(%rip), %rdi
2157	call	panic
2158	/*NOTREACHED*/
2159.ip_ocsum_panic_msg:
2160	.string	"ip_ocsum: address 0x%p below kernelbase\n"
21611:
2162#endif
2163	movl	%esi, %ecx	/* halfword_count */
2164	movq	%rdi, %rsi	/* address */
2165				/* partial sum in %edx */
2166	xorl	%eax, %eax
2167	testl	%ecx, %ecx
2168	jz	.ip_ocsum_done
2169	testq	$3, %rsi
2170	jnz	.ip_csum_notaligned
2171.ip_csum_aligned:	/* XX64 opportunities for 8-byte operations? */
2172.next_iter:
2173	/* XX64 opportunities for prefetch? */
2174	/* XX64 compute csum with 64 bit quantities? */
2175	subl	$32, %ecx
2176	jl	.less_than_32
2177
2178	addl	0(%rsi), %edx
2179.only60:
2180	adcl	4(%rsi), %eax
2181.only56:
2182	adcl	8(%rsi), %edx
2183.only52:
2184	adcl	12(%rsi), %eax
2185.only48:
2186	adcl	16(%rsi), %edx
2187.only44:
2188	adcl	20(%rsi), %eax
2189.only40:
2190	adcl	24(%rsi), %edx
2191.only36:
2192	adcl	28(%rsi), %eax
2193.only32:
2194	adcl	32(%rsi), %edx
2195.only28:
2196	adcl	36(%rsi), %eax
2197.only24:
2198	adcl	40(%rsi), %edx
2199.only20:
2200	adcl	44(%rsi), %eax
2201.only16:
2202	adcl	48(%rsi), %edx
2203.only12:
2204	adcl	52(%rsi), %eax
2205.only8:
2206	adcl	56(%rsi), %edx
2207.only4:
2208	adcl	60(%rsi), %eax	/* could be adding -1 and -1 with a carry */
2209.only0:
2210	adcl	$0, %eax	/* could be adding -1 in eax with a carry */
2211	adcl	$0, %eax
2212
2213	addq	$64, %rsi
2214	testl	%ecx, %ecx
2215	jnz	.next_iter
2216
2217.ip_ocsum_done:
2218	addl	%eax, %edx
2219	adcl	$0, %edx
2220	movl	%edx, %eax	/* form a 16 bit checksum by */
2221	shrl	$16, %eax	/* adding two halves of 32 bit checksum */
2222	addw	%dx, %ax
2223	adcw	$0, %ax
2224	andl	$0xffff, %eax
2225	leave
2226	ret
2227
2228.ip_csum_notaligned:
2229	xorl	%edi, %edi
2230	movw	(%rsi), %di
2231	addl	%edi, %edx
2232	adcl	$0, %edx
2233	addq	$2, %rsi
2234	decl	%ecx
2235	jmp	.ip_csum_aligned
2236
2237.less_than_32:
2238	addl	$32, %ecx
2239	testl	$1, %ecx
2240	jz	.size_aligned
2241	andl	$0xfe, %ecx
2242	movzwl	(%rsi, %rcx, 2), %edi
2243	addl	%edi, %edx
2244	adcl	$0, %edx
2245.size_aligned:
2246	movl	%ecx, %edi
2247	shrl	$1, %ecx
2248	shl	$1, %edi
2249	subq	$64, %rdi
2250	addq	%rdi, %rsi
2251	leaq    .ip_ocsum_jmptbl(%rip), %rdi
2252	leaq	(%rdi, %rcx, 8), %rdi
2253	xorl	%ecx, %ecx
2254	clc
2255	jmp 	*(%rdi)
2256
2257	.align	8
2258.ip_ocsum_jmptbl:
2259	.quad	.only0, .only4, .only8, .only12, .only16, .only20
2260	.quad	.only24, .only28, .only32, .only36, .only40, .only44
2261	.quad	.only48, .only52, .only56, .only60
2262	SET_SIZE(ip_ocsum)
2263
2264#elif defined(__i386)
2265
2266	ENTRY(ip_ocsum)
2267	pushl	%ebp
2268	movl	%esp, %ebp
2269	pushl	%ebx
2270	pushl	%esi
2271	pushl	%edi
2272	movl	12(%ebp), %ecx	/* count of half words */
2273	movl	16(%ebp), %edx	/* partial checksum */
2274	movl	8(%ebp), %esi
2275	xorl	%eax, %eax
2276	testl	%ecx, %ecx
2277	jz	.ip_ocsum_done
2278
2279	testl	$3, %esi
2280	jnz	.ip_csum_notaligned
2281.ip_csum_aligned:
2282.next_iter:
2283	subl	$32, %ecx
2284	jl	.less_than_32
2285
2286	addl	0(%esi), %edx
2287.only60:
2288	adcl	4(%esi), %eax
2289.only56:
2290	adcl	8(%esi), %edx
2291.only52:
2292	adcl	12(%esi), %eax
2293.only48:
2294	adcl	16(%esi), %edx
2295.only44:
2296	adcl	20(%esi), %eax
2297.only40:
2298	adcl	24(%esi), %edx
2299.only36:
2300	adcl	28(%esi), %eax
2301.only32:
2302	adcl	32(%esi), %edx
2303.only28:
2304	adcl	36(%esi), %eax
2305.only24:
2306	adcl	40(%esi), %edx
2307.only20:
2308	adcl	44(%esi), %eax
2309.only16:
2310	adcl	48(%esi), %edx
2311.only12:
2312	adcl	52(%esi), %eax
2313.only8:
2314	adcl	56(%esi), %edx
2315.only4:
2316	adcl	60(%esi), %eax	/* We could be adding -1 and -1 with a carry */
2317.only0:
2318	adcl	$0, %eax	/* we could be adding -1 in eax with a carry */
2319	adcl	$0, %eax
2320
2321	addl	$64, %esi
2322	andl	%ecx, %ecx
2323	jnz	.next_iter
2324
2325.ip_ocsum_done:
2326	addl	%eax, %edx
2327	adcl	$0, %edx
2328	movl	%edx, %eax	/* form a 16 bit checksum by */
2329	shrl	$16, %eax	/* adding two halves of 32 bit checksum */
2330	addw	%dx, %ax
2331	adcw	$0, %ax
2332	andl	$0xffff, %eax
2333	popl	%edi		/* restore registers */
2334	popl	%esi
2335	popl	%ebx
2336	leave
2337	ret
2338
2339.ip_csum_notaligned:
2340	xorl	%edi, %edi
2341	movw	(%esi), %di
2342	addl	%edi, %edx
2343	adcl	$0, %edx
2344	addl	$2, %esi
2345	decl	%ecx
2346	jmp	.ip_csum_aligned
2347
2348.less_than_32:
2349	addl	$32, %ecx
2350	testl	$1, %ecx
2351	jz	.size_aligned
2352	andl	$0xfe, %ecx
2353	movzwl	(%esi, %ecx, 2), %edi
2354	addl	%edi, %edx
2355	adcl	$0, %edx
2356.size_aligned:
2357	movl	%ecx, %edi
2358	shrl	$1, %ecx
2359	shl	$1, %edi
2360	subl	$64, %edi
2361	addl	%edi, %esi
2362	movl	$.ip_ocsum_jmptbl, %edi
2363	lea	(%edi, %ecx, 4), %edi
2364	xorl	%ecx, %ecx
2365	clc
2366	jmp 	*(%edi)
2367	SET_SIZE(ip_ocsum)
2368
2369	.data
2370	.align	4
2371
2372.ip_ocsum_jmptbl:
2373	.long	.only0, .only4, .only8, .only12, .only16, .only20
2374	.long	.only24, .only28, .only32, .only36, .only40, .only44
2375	.long	.only48, .only52, .only56, .only60
2376
2377
2378#endif	/* __i386 */
2379#endif	/* __lint */
2380
2381/*
2382 * multiply two long numbers and yield a u_longlong_t result, callable from C.
2383 * Provided to manipulate hrtime_t values.
2384 */
2385#if defined(__lint)
2386
2387/* result = a * b; */
2388
2389/* ARGSUSED */
2390unsigned long long
2391mul32(uint_t a, uint_t b)
2392{ return (0); }
2393
2394#else	/* __lint */
2395
2396#if defined(__amd64)
2397
2398	ENTRY(mul32)
2399	xorl	%edx, %edx	/* XX64 joe, paranoia? */
2400	movl	%edi, %eax
2401	mull	%esi
2402	shlq	$32, %rdx
2403	orq	%rdx, %rax
2404	ret
2405	SET_SIZE(mul32)
2406
2407#elif defined(__i386)
2408
2409	ENTRY(mul32)
2410	movl	8(%esp), %eax
2411	movl	4(%esp), %ecx
2412	mull	%ecx
2413	ret
2414	SET_SIZE(mul32)
2415
2416#endif	/* __i386 */
2417#endif	/* __lint */
2418
2419#if defined(notused)
2420#if defined(__lint)
2421/* ARGSUSED */
2422void
2423load_pte64(uint64_t *pte, uint64_t pte_value)
2424{}
2425#else	/* __lint */
2426	.globl load_pte64
2427load_pte64:
2428	movl	4(%esp), %eax
2429	movl	8(%esp), %ecx
2430	movl	12(%esp), %edx
2431	movl	%edx, 4(%eax)
2432	movl	%ecx, (%eax)
2433	ret
2434#endif	/* __lint */
2435#endif	/* notused */
2436
2437#if defined(__lint)
2438
2439/*ARGSUSED*/
2440void
2441scan_memory(caddr_t addr, size_t size)
2442{}
2443
2444#else	/* __lint */
2445
2446#if defined(__amd64)
2447
2448	ENTRY(scan_memory)
2449	shrq	$3, %rsi	/* convert %rsi from byte to quadword count */
2450	jz	.scanm_done
2451	movq	%rsi, %rcx	/* move count into rep control register */
2452	movq	%rdi, %rsi	/* move addr into lodsq control reg. */
2453	rep lodsq		/* scan the memory range */
2454.scanm_done:
2455	rep;	ret	/* use 2 byte return instruction when branch target */
2456			/* AMD Software Optimization Guide - Section 6.2 */
2457	SET_SIZE(scan_memory)
2458
2459#elif defined(__i386)
2460
2461	ENTRY(scan_memory)
2462	pushl	%ecx
2463	pushl	%esi
2464	movl	16(%esp), %ecx	/* move 2nd arg into rep control register */
2465	shrl	$2, %ecx	/* convert from byte count to word count */
2466	jz	.scanm_done
2467	movl	12(%esp), %esi	/* move 1st arg into lodsw control register */
2468	.byte	0xf3		/* rep prefix.  lame assembler.  sigh. */
2469	lodsl
2470.scanm_done:
2471	popl	%esi
2472	popl	%ecx
2473	ret
2474	SET_SIZE(scan_memory)
2475
2476#endif	/* __i386 */
2477#endif	/* __lint */
2478
2479
2480#if defined(__lint)
2481
2482/*ARGSUSED */
2483int
2484lowbit(ulong_t i)
2485{ return (0); }
2486
2487#else	/* __lint */
2488
2489#if defined(__amd64)
2490
2491	ENTRY(lowbit)
2492	movl	$-1, %eax
2493	bsfq	%rdi, %rax
2494	incl	%eax
2495	ret
2496	SET_SIZE(lowbit)
2497
2498#elif defined(__i386)
2499
2500	ENTRY(lowbit)
2501	movl	$-1, %eax
2502	bsfl	4(%esp), %eax
2503	incl	%eax
2504	ret
2505	SET_SIZE(lowbit)
2506
2507#endif	/* __i386 */
2508#endif	/* __lint */
2509
2510#if defined(__lint)
2511
2512/*ARGSUSED*/
2513int
2514highbit(ulong_t i)
2515{ return (0); }
2516
2517#else	/* __lint */
2518
2519#if defined(__amd64)
2520
2521	ENTRY(highbit)
2522	movl	$-1, %eax
2523	bsrq	%rdi, %rax
2524	incl	%eax
2525	ret
2526	SET_SIZE(highbit)
2527
2528#elif defined(__i386)
2529
2530	ENTRY(highbit)
2531	movl	$-1, %eax
2532	bsrl	4(%esp), %eax
2533	incl	%eax
2534	ret
2535	SET_SIZE(highbit)
2536
2537#endif	/* __i386 */
2538#endif	/* __lint */
2539
2540#if defined(__lint)
2541
2542/*ARGSUSED*/
2543uint64_t
2544rdmsr(uint_t r)
2545{ return (0); }
2546
2547/*ARGSUSED*/
2548void
2549wrmsr(uint_t r, const uint64_t val)
2550{}
2551
2552/*ARGSUSED*/
2553uint64_t
2554xrdmsr(uint_t r)
2555{ return (0); }
2556
2557/*ARGSUSED*/
2558void
2559xwrmsr(uint_t r, const uint64_t val)
2560{}
2561
2562void
2563invalidate_cache(void)
2564{}
2565
2566#else  /* __lint */
2567
2568#define	XMSR_ACCESS_VAL		$0x9c5a203a
2569
2570#if defined(__amd64)
2571
2572	ENTRY(rdmsr)
2573	movl	%edi, %ecx
2574	rdmsr
2575	shlq	$32, %rdx
2576	orq	%rdx, %rax
2577	ret
2578	SET_SIZE(rdmsr)
2579
2580	ENTRY(wrmsr)
2581	movq	%rsi, %rdx
2582	shrq	$32, %rdx
2583	movl	%esi, %eax
2584	movl	%edi, %ecx
2585	wrmsr
2586	ret
2587	SET_SIZE(wrmsr)
2588
2589	ENTRY(xrdmsr)
2590	pushq	%rbp
2591	movq	%rsp, %rbp
2592	movl	%edi, %ecx
2593	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2594	rdmsr
2595	shlq	$32, %rdx
2596	orq	%rdx, %rax
2597	leave
2598	ret
2599	SET_SIZE(xrdmsr)
2600
2601	ENTRY(xwrmsr)
2602	pushq	%rbp
2603	movq	%rsp, %rbp
2604	movl	%edi, %ecx
2605	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2606	movq	%rsi, %rdx
2607	shrq	$32, %rdx
2608	movl	%esi, %eax
2609	wrmsr
2610	leave
2611	ret
2612	SET_SIZE(xwrmsr)
2613
2614#elif defined(__i386)
2615
2616	ENTRY(rdmsr)
2617	movl	4(%esp), %ecx
2618	rdmsr
2619	ret
2620	SET_SIZE(rdmsr)
2621
2622	ENTRY(wrmsr)
2623	movl	4(%esp), %ecx
2624	movl	8(%esp), %eax
2625	movl	12(%esp), %edx
2626	wrmsr
2627	ret
2628	SET_SIZE(wrmsr)
2629
2630	ENTRY(xrdmsr)
2631	pushl	%ebp
2632	movl	%esp, %ebp
2633	movl	8(%esp), %ecx
2634	pushl	%edi
2635	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2636	rdmsr
2637	popl	%edi
2638	leave
2639	ret
2640	SET_SIZE(xrdmsr)
2641
2642	ENTRY(xwrmsr)
2643	pushl	%ebp
2644	movl	%esp, %ebp
2645	movl	8(%esp), %ecx
2646	movl	12(%esp), %eax
2647	movl	16(%esp), %edx
2648	pushl	%edi
2649	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2650	wrmsr
2651	popl	%edi
2652	leave
2653	ret
2654	SET_SIZE(xwrmsr)
2655
2656#endif	/* __i386 */
2657
2658	ENTRY(invalidate_cache)
2659	wbinvd
2660	ret
2661	SET_SIZE(invalidate_cache)
2662
2663#endif	/* __lint */
2664
2665#if defined(__lint)
2666
2667/*ARGSUSED*/
2668void
2669getcregs(struct cregs *crp)
2670{}
2671
2672#else	/* __lint */
2673
2674#if defined(__amd64)
2675
2676	ENTRY_NP(getcregs)
2677
2678#define	GETMSR(r, off, d)	\
2679	movl	$r, %ecx;	\
2680	rdmsr;			\
2681	movl	%eax, off(d);	\
2682	movl	%edx, off+4(d)
2683
2684	xorl	%eax, %eax
2685	movq	%rax, CREG_GDT+8(%rdi)
2686	sgdt	CREG_GDT(%rdi)		/* 10 bytes */
2687	movq	%rax, CREG_IDT+8(%rdi)
2688	sidt	CREG_IDT(%rdi)		/* 10 bytes */
2689	movq	%rax, CREG_LDT(%rdi)
2690	sldt	CREG_LDT(%rdi)		/* 2 bytes */
2691	movq	%rax, CREG_TASKR(%rdi)
2692	str	CREG_TASKR(%rdi)	/* 2 bytes */
2693	movq	%cr0, %rax
2694	movq	%rax, CREG_CR0(%rdi)	/* cr0 */
2695	movq	%cr2, %rax
2696	movq	%rax, CREG_CR2(%rdi)	/* cr2 */
2697	movq	%cr3, %rax
2698	movq	%rax, CREG_CR3(%rdi)	/* cr3 */
2699	movq	%cr4, %rax
2700	movq	%rax, CREG_CR4(%rdi)	/* cr4 */
2701	movq	%cr8, %rax
2702	movq	%rax, CREG_CR8(%rdi)	/* cr8 */
2703	GETMSR(MSR_AMD_KGSBASE, CREG_KGSBASE, %rdi)
2704	GETMSR(MSR_AMD_EFER, CREG_EFER, %rdi)
2705	ret
2706	SET_SIZE(getcregs)
2707
2708#undef GETMSR
2709
2710#elif defined(__i386)
2711
2712	ENTRY_NP(getcregs)
2713	movl	4(%esp), %edx
2714	movw	$0, CREG_GDT+6(%edx)
2715	movw	$0, CREG_IDT+6(%edx)
2716	sgdt	CREG_GDT(%edx)		/* gdt */
2717	sidt	CREG_IDT(%edx)		/* idt */
2718	sldt	CREG_LDT(%edx)		/* ldt */
2719	str	CREG_TASKR(%edx)	/* task */
2720	movl	%cr0, %eax
2721	movl	%eax, CREG_CR0(%edx)	/* cr0 */
2722	movl	%cr2, %eax
2723	movl	%eax, CREG_CR2(%edx)	/* cr2 */
2724	movl	%cr3, %eax
2725	movl	%eax, CREG_CR3(%edx)	/* cr3 */
2726	testl	$X86_LARGEPAGE, x86_feature
2727	jz	.nocr4
2728	movl	%cr4, %eax
2729	movl	%eax, CREG_CR4(%edx)	/* cr4 */
2730	jmp	.skip
2731.nocr4:
2732	movl	$0, CREG_CR4(%edx)
2733.skip:
2734	ret
2735	SET_SIZE(getcregs)
2736
2737#endif	/* __i386 */
2738#endif	/* __lint */
2739
2740
2741/*
2742 * A panic trigger is a word which is updated atomically and can only be set
2743 * once.  We atomically store 0xDEFACEDD and load the old value.  If the
2744 * previous value was 0, we succeed and return 1; otherwise return 0.
2745 * This allows a partially corrupt trigger to still trigger correctly.  DTrace
2746 * has its own version of this function to allow it to panic correctly from
2747 * probe context.
2748 */
2749#if defined(__lint)
2750
2751/*ARGSUSED*/
2752int
2753panic_trigger(int *tp)
2754{ return (0); }
2755
2756/*ARGSUSED*/
2757int
2758dtrace_panic_trigger(int *tp)
2759{ return (0); }
2760
2761#else	/* __lint */
2762
2763#if defined(__amd64)
2764
2765	ENTRY_NP(panic_trigger)
2766	xorl	%eax, %eax
2767	movl	$0xdefacedd, %edx
2768	lock
2769	  xchgl	%edx, (%rdi)
2770	cmpl	$0, %edx
2771	je	0f
2772	movl	$0, %eax
2773	ret
27740:	movl	$1, %eax
2775	ret
2776	SET_SIZE(panic_trigger)
2777
2778	ENTRY_NP(dtrace_panic_trigger)
2779	xorl	%eax, %eax
2780	movl	$0xdefacedd, %edx
2781	lock
2782	  xchgl	%edx, (%rdi)
2783	cmpl	$0, %edx
2784	je	0f
2785	movl	$0, %eax
2786	ret
27870:	movl	$1, %eax
2788	ret
2789	SET_SIZE(dtrace_panic_trigger)
2790
2791#elif defined(__i386)
2792
2793	ENTRY_NP(panic_trigger)
2794	movl	4(%esp), %edx		/ %edx = address of trigger
2795	movl	$0xdefacedd, %eax	/ %eax = 0xdefacedd
2796	lock				/ assert lock
2797	xchgl %eax, (%edx)		/ exchange %eax and the trigger
2798	cmpl	$0, %eax		/ if (%eax == 0x0)
2799	je	0f			/   return (1);
2800	movl	$0, %eax		/ else
2801	ret				/   return (0);
28020:	movl	$1, %eax
2803	ret
2804	SET_SIZE(panic_trigger)
2805
2806	ENTRY_NP(dtrace_panic_trigger)
2807	movl	4(%esp), %edx		/ %edx = address of trigger
2808	movl	$0xdefacedd, %eax	/ %eax = 0xdefacedd
2809	lock				/ assert lock
2810	xchgl %eax, (%edx)		/ exchange %eax and the trigger
2811	cmpl	$0, %eax		/ if (%eax == 0x0)
2812	je	0f			/   return (1);
2813	movl	$0, %eax		/ else
2814	ret				/   return (0);
28150:	movl	$1, %eax
2816	ret
2817	SET_SIZE(dtrace_panic_trigger)
2818
2819#endif	/* __i386 */
2820#endif	/* __lint */
2821
2822/*
2823 * The panic() and cmn_err() functions invoke vpanic() as a common entry point
2824 * into the panic code implemented in panicsys().  vpanic() is responsible
2825 * for passing through the format string and arguments, and constructing a
2826 * regs structure on the stack into which it saves the current register
2827 * values.  If we are not dying due to a fatal trap, these registers will
2828 * then be preserved in panicbuf as the current processor state.  Before
2829 * invoking panicsys(), vpanic() activates the first panic trigger (see
2830 * common/os/panic.c) and switches to the panic_stack if successful.  Note that
2831 * DTrace takes a slightly different panic path if it must panic from probe
2832 * context.  Instead of calling panic, it calls into dtrace_vpanic(), which
2833 * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
2834 * branches back into vpanic().
2835 */
2836#if defined(__lint)
2837
2838/*ARGSUSED*/
2839void
2840vpanic(const char *format, va_list alist)
2841{}
2842
2843/*ARGSUSED*/
2844void
2845dtrace_vpanic(const char *format, va_list alist)
2846{}
2847
2848#else	/* __lint */
2849
2850#if defined(__amd64)
2851
2852	ENTRY_NP(vpanic)			/* Initial stack layout: */
2853
2854	pushq	%rbp				/* | %rip | 	0x60	*/
2855	movq	%rsp, %rbp			/* | %rbp |	0x58	*/
2856	pushfq					/* | rfl  |	0x50	*/
2857	pushq	%r11				/* | %r11 |	0x48	*/
2858	pushq	%r10				/* | %r10 |	0x40	*/
2859	pushq	%rbx				/* | %rbx |	0x38	*/
2860	pushq	%rax				/* | %rax |	0x30	*/
2861	pushq	%r9				/* | %r9  |	0x28	*/
2862	pushq	%r8				/* | %r8  |	0x20	*/
2863	pushq	%rcx				/* | %rcx |	0x18	*/
2864	pushq	%rdx				/* | %rdx |	0x10	*/
2865	pushq	%rsi				/* | %rsi |	0x8 alist */
2866	pushq	%rdi				/* | %rdi |	0x0 format */
2867
2868	movq	%rsp, %rbx			/* %rbx = current %rsp */
2869
2870	leaq	panic_quiesce(%rip), %rdi	/* %rdi = &panic_quiesce */
2871	call	panic_trigger			/* %eax = panic_trigger() */
2872
2873vpanic_common:
2874	/*
2875	 * The panic_trigger result is in %eax from the call above, and
2876	 * dtrace_panic places it in %eax before branching here.
2877	 * The rdmsr instructions that follow below will clobber %eax so
2878	 * we stash the panic_trigger result in %r11d.
2879	 */
2880	movl	%eax, %r11d
2881	cmpl	$0, %r11d
2882	je	0f
2883
2884	/*
2885	 * If panic_trigger() was successful, we are the first to initiate a
2886	 * panic: we now switch to the reserved panic_stack before continuing.
2887	 */
2888	leaq	panic_stack(%rip), %rsp
2889	addq	$PANICSTKSIZE, %rsp
28900:	subq	$REGSIZE, %rsp
2891	/*
2892	 * Now that we've got everything set up, store the register values as
2893	 * they were when we entered vpanic() to the designated location in
2894	 * the regs structure we allocated on the stack.
2895	 */
2896	movq	0x0(%rbx), %rcx
2897	movq	%rcx, REGOFF_RDI(%rsp)
2898	movq	0x8(%rbx), %rcx
2899	movq	%rcx, REGOFF_RSI(%rsp)
2900	movq	0x10(%rbx), %rcx
2901	movq	%rcx, REGOFF_RDX(%rsp)
2902	movq	0x18(%rbx), %rcx
2903	movq	%rcx, REGOFF_RCX(%rsp)
2904	movq	0x20(%rbx), %rcx
2905
2906	movq	%rcx, REGOFF_R8(%rsp)
2907	movq	0x28(%rbx), %rcx
2908	movq	%rcx, REGOFF_R9(%rsp)
2909	movq	0x30(%rbx), %rcx
2910	movq	%rcx, REGOFF_RAX(%rsp)
2911	movq	0x38(%rbx), %rcx
2912	movq	%rcx, REGOFF_RBX(%rsp)
2913	movq	0x58(%rbx), %rcx
2914
2915	movq	%rcx, REGOFF_RBP(%rsp)
2916	movq	0x40(%rbx), %rcx
2917	movq	%rcx, REGOFF_R10(%rsp)
2918	movq	0x48(%rbx), %rcx
2919	movq	%rcx, REGOFF_R11(%rsp)
2920	movq	%r12, REGOFF_R12(%rsp)
2921
2922	movq	%r13, REGOFF_R13(%rsp)
2923	movq	%r14, REGOFF_R14(%rsp)
2924	movq	%r15, REGOFF_R15(%rsp)
2925
2926	xorl	%ecx, %ecx
2927	movw	%ds, %cx
2928	movq	%rcx, REGOFF_DS(%rsp)
2929	movw	%es, %cx
2930	movq	%rcx, REGOFF_ES(%rsp)
2931	movw	%fs, %cx
2932	movq	%rcx, REGOFF_FS(%rsp)
2933	movw	%gs, %cx
2934	movq	%rcx, REGOFF_GS(%rsp)
2935
2936	movq	$0, REGOFF_TRAPNO(%rsp)
2937
2938	movq	$0, REGOFF_ERR(%rsp)
2939	leaq	vpanic(%rip), %rcx
2940	movq	%rcx, REGOFF_RIP(%rsp)
2941	movw	%cs, %cx
2942	movzwq	%cx, %rcx
2943	movq	%rcx, REGOFF_CS(%rsp)
2944	movq	0x50(%rbx), %rcx
2945	movq	%rcx, REGOFF_RFL(%rsp)
2946	movq	%rbx, %rcx
2947	addq	$0x60, %rcx
2948	movq	%rcx, REGOFF_RSP(%rsp)
2949	movw	%ss, %cx
2950	movzwq	%cx, %rcx
2951	movq	%rcx, REGOFF_SS(%rsp)
2952
2953	/*
2954	 * panicsys(format, alist, rp, on_panic_stack)
2955	 */
2956	movq	REGOFF_RDI(%rsp), %rdi		/* format */
2957	movq	REGOFF_RSI(%rsp), %rsi		/* alist */
2958	movq	%rsp, %rdx			/* struct regs */
2959	movl	%r11d, %ecx			/* on_panic_stack */
2960	call	panicsys
2961	addq	$REGSIZE, %rsp
2962	popq	%rdi
2963	popq	%rsi
2964	popq	%rdx
2965	popq	%rcx
2966	popq	%r8
2967	popq	%r9
2968	popq	%rax
2969	popq	%rbx
2970	popq	%r10
2971	popq	%r11
2972	popfq
2973	leave
2974	ret
2975	SET_SIZE(vpanic)
2976
2977	ENTRY_NP(dtrace_vpanic)			/* Initial stack layout: */
2978
2979	pushq	%rbp				/* | %rip | 	0x60	*/
2980	movq	%rsp, %rbp			/* | %rbp |	0x58	*/
2981	pushfq					/* | rfl  |	0x50	*/
2982	pushq	%r11				/* | %r11 |	0x48	*/
2983	pushq	%r10				/* | %r10 |	0x40	*/
2984	pushq	%rbx				/* | %rbx |	0x38	*/
2985	pushq	%rax				/* | %rax |	0x30	*/
2986	pushq	%r9				/* | %r9  |	0x28	*/
2987	pushq	%r8				/* | %r8  |	0x20	*/
2988	pushq	%rcx				/* | %rcx |	0x18	*/
2989	pushq	%rdx				/* | %rdx |	0x10	*/
2990	pushq	%rsi				/* | %rsi |	0x8 alist */
2991	pushq	%rdi				/* | %rdi |	0x0 format */
2992
2993	movq	%rsp, %rbx			/* %rbx = current %rsp */
2994
2995	leaq	panic_quiesce(%rip), %rdi	/* %rdi = &panic_quiesce */
2996	call	dtrace_panic_trigger	/* %eax = dtrace_panic_trigger() */
2997	jmp	vpanic_common
2998
2999	SET_SIZE(dtrace_vpanic)
3000
3001#elif defined(__i386)
3002
3003	ENTRY_NP(vpanic)			/ Initial stack layout:
3004
3005	pushl	%ebp				/ | %eip | 20
3006	movl	%esp, %ebp			/ | %ebp | 16
3007	pushl	%eax				/ | %eax | 12
3008	pushl	%ebx				/ | %ebx |  8
3009	pushl	%ecx				/ | %ecx |  4
3010	pushl	%edx				/ | %edx |  0
3011
3012	movl	%esp, %ebx			/ %ebx = current stack pointer
3013
3014	lea	panic_quiesce, %eax		/ %eax = &panic_quiesce
3015	pushl	%eax				/ push &panic_quiesce
3016	call	panic_trigger			/ %eax = panic_trigger()
3017	addl	$4, %esp			/ reset stack pointer
3018
3019vpanic_common:
3020	cmpl	$0, %eax			/ if (%eax == 0)
3021	je	0f				/   goto 0f;
3022
3023	/*
3024	 * If panic_trigger() was successful, we are the first to initiate a
3025	 * panic: we now switch to the reserved panic_stack before continuing.
3026	 */
3027	lea	panic_stack, %esp		/ %esp  = panic_stack
3028	addl	$PANICSTKSIZE, %esp		/ %esp += PANICSTKSIZE
3029
30300:	subl	$REGSIZE, %esp			/ allocate struct regs
3031
3032	/*
3033	 * Now that we've got everything set up, store the register values as
3034	 * they were when we entered vpanic() to the designated location in
3035	 * the regs structure we allocated on the stack.
3036	 */
3037#if !defined(__GNUC_AS__)
3038	movw	%gs, %edx
3039	movl	%edx, REGOFF_GS(%esp)
3040	movw	%fs, %edx
3041	movl	%edx, REGOFF_FS(%esp)
3042	movw	%es, %edx
3043	movl	%edx, REGOFF_ES(%esp)
3044	movw	%ds, %edx
3045	movl	%edx, REGOFF_DS(%esp)
3046#else	/* __GNUC_AS__ */
3047	mov	%gs, %edx
3048	mov	%edx, REGOFF_GS(%esp)
3049	mov	%fs, %edx
3050	mov	%edx, REGOFF_FS(%esp)
3051	mov	%es, %edx
3052	mov	%edx, REGOFF_ES(%esp)
3053	mov	%ds, %edx
3054	mov	%edx, REGOFF_DS(%esp)
3055#endif	/* __GNUC_AS__ */
3056	movl	%edi, REGOFF_EDI(%esp)
3057	movl	%esi, REGOFF_ESI(%esp)
3058	movl	16(%ebx), %ecx
3059	movl	%ecx, REGOFF_EBP(%esp)
3060	movl	%ebx, %ecx
3061	addl	$20, %ecx
3062	movl	%ecx, REGOFF_ESP(%esp)
3063	movl	8(%ebx), %ecx
3064	movl	%ecx, REGOFF_EBX(%esp)
3065	movl	0(%ebx), %ecx
3066	movl	%ecx, REGOFF_EDX(%esp)
3067	movl	4(%ebx), %ecx
3068	movl	%ecx, REGOFF_ECX(%esp)
3069	movl	12(%ebx), %ecx
3070	movl	%ecx, REGOFF_EAX(%esp)
3071	movl	$0, REGOFF_TRAPNO(%esp)
3072	movl	$0, REGOFF_ERR(%esp)
3073	lea	vpanic, %ecx
3074	movl	%ecx, REGOFF_EIP(%esp)
3075#if !defined(__GNUC_AS__)
3076	movw	%cs, %edx
3077#else	/* __GNUC_AS__ */
3078	mov	%cs, %edx
3079#endif	/* __GNUC_AS__ */
3080	movl	%edx, REGOFF_CS(%esp)
3081	pushfl
3082	popl	%ecx
3083	movl	%ecx, REGOFF_EFL(%esp)
3084	movl	$0, REGOFF_UESP(%esp)
3085#if !defined(__GNUC_AS__)
3086	movw	%ss, %edx
3087#else	/* __GNUC_AS__ */
3088	mov	%ss, %edx
3089#endif	/* __GNUC_AS__ */
3090	movl	%edx, REGOFF_SS(%esp)
3091
3092	movl	%esp, %ecx			/ %ecx = &regs
3093	pushl	%eax				/ push on_panic_stack
3094	pushl	%ecx				/ push &regs
3095	movl	12(%ebp), %ecx			/ %ecx = alist
3096	pushl	%ecx				/ push alist
3097	movl	8(%ebp), %ecx			/ %ecx = format
3098	pushl	%ecx				/ push format
3099	call	panicsys			/ panicsys();
3100	addl	$16, %esp			/ pop arguments
3101
3102	addl	$REGSIZE, %esp
3103	popl	%edx
3104	popl	%ecx
3105	popl	%ebx
3106	popl	%eax
3107	leave
3108	ret
3109	SET_SIZE(vpanic)
3110
3111	ENTRY_NP(dtrace_vpanic)			/ Initial stack layout:
3112
3113	pushl	%ebp				/ | %eip | 20
3114	movl	%esp, %ebp			/ | %ebp | 16
3115	pushl	%eax				/ | %eax | 12
3116	pushl	%ebx				/ | %ebx |  8
3117	pushl	%ecx				/ | %ecx |  4
3118	pushl	%edx				/ | %edx |  0
3119
3120	movl	%esp, %ebx			/ %ebx = current stack pointer
3121
3122	lea	panic_quiesce, %eax		/ %eax = &panic_quiesce
3123	pushl	%eax				/ push &panic_quiesce
3124	call	dtrace_panic_trigger		/ %eax = dtrace_panic_trigger()
3125	addl	$4, %esp			/ reset stack pointer
3126	jmp	vpanic_common			/ jump back to common code
3127
3128	SET_SIZE(dtrace_vpanic)
3129
3130#endif	/* __i386 */
3131#endif	/* __lint */
3132
3133#if defined(__lint)
3134
3135void
3136hres_tick(void)
3137{}
3138
3139int64_t timedelta;
3140hrtime_t hres_last_tick;
3141volatile timestruc_t hrestime;
3142int64_t hrestime_adj;
3143volatile int hres_lock;
3144hrtime_t hrtime_base;
3145
3146#else	/* __lint */
3147
3148	DGDEF3(hrestime, _MUL(2, CLONGSIZE), 8)
3149	.NWORD	0, 0
3150
3151	DGDEF3(hrestime_adj, 8, 8)
3152	.long	0, 0
3153
3154	DGDEF3(hres_last_tick, 8, 8)
3155	.long	0, 0
3156
3157	DGDEF3(timedelta, 8, 8)
3158	.long	0, 0
3159
3160	DGDEF3(hres_lock, 4, 8)
3161	.long	0
3162
3163	/*
3164	 * initialized to a non zero value to make pc_gethrtime()
3165	 * work correctly even before clock is initialized
3166	 */
3167	DGDEF3(hrtime_base, 8, 8)
3168	.long	_MUL(NSEC_PER_CLOCK_TICK, 6), 0
3169
3170	DGDEF3(adj_shift, 4, 4)
3171	.long	ADJ_SHIFT
3172
3173#if defined(__amd64)
3174
3175	ENTRY_NP(hres_tick)
3176	pushq	%rbp
3177	movq	%rsp, %rbp
3178
3179	/*
3180	 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3181	 * hres_last_tick can only be modified while holding CLOCK_LOCK).
3182	 * At worst, performing this now instead of under CLOCK_LOCK may
3183	 * introduce some jitter in pc_gethrestime().
3184	 */
3185	call	*gethrtimef(%rip)
3186	movq	%rax, %r8
3187
3188	leaq	hres_lock(%rip), %rax
3189	movb	$-1, %dl
3190.CL1:
3191	xchgb	%dl, (%rax)
3192	testb	%dl, %dl
3193	jz	.CL3			/* got it */
3194.CL2:
3195	cmpb	$0, (%rax)		/* possible to get lock? */
3196	pause
3197	jne	.CL2
3198	jmp	.CL1			/* yes, try again */
3199.CL3:
3200	/*
3201	 * compute the interval since last time hres_tick was called
3202	 * and adjust hrtime_base and hrestime accordingly
3203	 * hrtime_base is an 8 byte value (in nsec), hrestime is
3204	 * a timestruc_t (sec, nsec)
3205	 */
3206	leaq	hres_last_tick(%rip), %rax
3207	movq	%r8, %r11
3208	subq	(%rax), %r8
3209	addq	%r8, hrtime_base(%rip)	/* add interval to hrtime_base */
3210	addq	%r8, hrestime+8(%rip)	/* add interval to hrestime.tv_nsec */
3211	/*
3212	 * Now that we have CLOCK_LOCK, we can update hres_last_tick
3213	 */
3214	movq	%r11, (%rax)
3215
3216	call	__adj_hrestime
3217
3218	/*
3219	 * release the hres_lock
3220	 */
3221	incl	hres_lock(%rip)
3222	leave
3223	ret
3224	SET_SIZE(hres_tick)
3225
3226#elif defined(__i386)
3227
3228	ENTRY_NP(hres_tick)
3229	pushl	%ebp
3230	movl	%esp, %ebp
3231	pushl	%esi
3232	pushl	%ebx
3233
3234	/*
3235	 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3236	 * hres_last_tick can only be modified while holding CLOCK_LOCK).
3237	 * At worst, performing this now instead of under CLOCK_LOCK may
3238	 * introduce some jitter in pc_gethrestime().
3239	 */
3240	call	*gethrtimef
3241	movl	%eax, %ebx
3242	movl	%edx, %esi
3243
3244	movl	$hres_lock, %eax
3245	movl	$-1, %edx
3246.CL1:
3247	xchgb	%dl, (%eax)
3248	testb	%dl, %dl
3249	jz	.CL3			/ got it
3250.CL2:
3251	cmpb	$0, (%eax)		/ possible to get lock?
3252	pause
3253	jne	.CL2
3254	jmp	.CL1			/ yes, try again
3255.CL3:
3256	/*
3257	 * compute the interval since last time hres_tick was called
3258	 * and adjust hrtime_base and hrestime accordingly
3259	 * hrtime_base is an 8 byte value (in nsec), hrestime is
3260	 * timestruc_t (sec, nsec)
3261	 */
3262
3263	lea	hres_last_tick, %eax
3264
3265	movl	%ebx, %edx
3266	movl	%esi, %ecx
3267
3268	subl 	(%eax), %edx
3269	sbbl 	4(%eax), %ecx
3270
3271	addl	%edx, hrtime_base	/ add interval to hrtime_base
3272	adcl	%ecx, hrtime_base+4
3273
3274	addl 	%edx, hrestime+4	/ add interval to hrestime.tv_nsec
3275
3276	/
3277	/ Now that we have CLOCK_LOCK, we can update hres_last_tick.
3278	/
3279	movl	%ebx, (%eax)
3280	movl	%esi,  4(%eax)
3281
3282	/ get hrestime at this moment. used as base for pc_gethrestime
3283	/
3284	/ Apply adjustment, if any
3285	/
3286	/ #define HRES_ADJ	(NSEC_PER_CLOCK_TICK >> ADJ_SHIFT)
3287	/ (max_hres_adj)
3288	/
3289	/ void
3290	/ adj_hrestime()
3291	/ {
3292	/	long long adj;
3293	/
3294	/	if (hrestime_adj == 0)
3295	/		adj = 0;
3296	/	else if (hrestime_adj > 0) {
3297	/		if (hrestime_adj < HRES_ADJ)
3298	/			adj = hrestime_adj;
3299	/		else
3300	/			adj = HRES_ADJ;
3301	/	}
3302	/	else {
3303	/		if (hrestime_adj < -(HRES_ADJ))
3304	/			adj = -(HRES_ADJ);
3305	/		else
3306	/			adj = hrestime_adj;
3307	/	}
3308	/
3309	/	timedelta -= adj;
3310	/	hrestime_adj = timedelta;
3311	/	hrestime.tv_nsec += adj;
3312	/
3313	/	while (hrestime.tv_nsec >= NANOSEC) {
3314	/		one_sec++;
3315	/		hrestime.tv_sec++;
3316	/		hrestime.tv_nsec -= NANOSEC;
3317	/	}
3318	/ }
3319__adj_hrestime:
3320	movl	hrestime_adj, %esi	/ if (hrestime_adj == 0)
3321	movl	hrestime_adj+4, %edx
3322	andl	%esi, %esi
3323	jne	.CL4			/ no
3324	andl	%edx, %edx
3325	jne	.CL4			/ no
3326	subl	%ecx, %ecx		/ yes, adj = 0;
3327	subl	%edx, %edx
3328	jmp	.CL5
3329.CL4:
3330	subl	%ecx, %ecx
3331	subl	%eax, %eax
3332	subl	%esi, %ecx
3333	sbbl	%edx, %eax
3334	andl	%eax, %eax		/ if (hrestime_adj > 0)
3335	jge	.CL6
3336
3337	/ In the following comments, HRES_ADJ is used, while in the code
3338	/ max_hres_adj is used.
3339	/
3340	/ The test for "hrestime_adj < HRES_ADJ" is complicated because
3341	/ hrestime_adj is 64-bits, while HRES_ADJ is 32-bits.  We rely
3342	/ on the logical equivalence of:
3343	/
3344	/	!(hrestime_adj < HRES_ADJ)
3345	/
3346	/ and the two step sequence:
3347	/
3348	/	(HRES_ADJ - lsw(hrestime_adj)) generates a Borrow/Carry
3349	/
3350	/ which computes whether or not the least significant 32-bits
3351	/ of hrestime_adj is greater than HRES_ADJ, followed by:
3352	/
3353	/	Previous Borrow/Carry + -1 + msw(hrestime_adj) generates a Carry
3354	/
3355	/ which generates a carry whenever step 1 is true or the most
3356	/ significant long of the longlong hrestime_adj is non-zero.
3357
3358	movl	max_hres_adj, %ecx	/ hrestime_adj is positive
3359	subl	%esi, %ecx
3360	movl	%edx, %eax
3361	adcl	$-1, %eax
3362	jnc	.CL7
3363	movl	max_hres_adj, %ecx	/ adj = HRES_ADJ;
3364	subl	%edx, %edx
3365	jmp	.CL5
3366
3367	/ The following computation is similar to the one above.
3368	/
3369	/ The test for "hrestime_adj < -(HRES_ADJ)" is complicated because
3370	/ hrestime_adj is 64-bits, while HRES_ADJ is 32-bits.  We rely
3371	/ on the logical equivalence of:
3372	/
3373	/	(hrestime_adj > -HRES_ADJ)
3374	/
3375	/ and the two step sequence:
3376	/
3377	/	(HRES_ADJ + lsw(hrestime_adj)) generates a Carry
3378	/
3379	/ which means the least significant 32-bits of hrestime_adj is
3380	/ greater than -HRES_ADJ, followed by:
3381	/
3382	/	Previous Carry + 0 + msw(hrestime_adj) generates a Carry
3383	/
3384	/ which generates a carry only when step 1 is true and the most
3385	/ significant long of the longlong hrestime_adj is -1.
3386
3387.CL6:					/ hrestime_adj is negative
3388	movl	%esi, %ecx
3389	addl	max_hres_adj, %ecx
3390	movl	%edx, %eax
3391	adcl	$0, %eax
3392	jc	.CL7
3393	xor	%ecx, %ecx
3394	subl	max_hres_adj, %ecx	/ adj = -(HRES_ADJ);
3395	movl	$-1, %edx
3396	jmp	.CL5
3397.CL7:
3398	movl	%esi, %ecx		/ adj = hrestime_adj;
3399.CL5:
3400	movl	timedelta, %esi
3401	subl	%ecx, %esi
3402	movl	timedelta+4, %eax
3403	sbbl	%edx, %eax
3404	movl	%esi, timedelta
3405	movl	%eax, timedelta+4	/ timedelta -= adj;
3406	movl	%esi, hrestime_adj
3407	movl	%eax, hrestime_adj+4	/ hrestime_adj = timedelta;
3408	addl	hrestime+4, %ecx
3409
3410	movl	%ecx, %eax		/ eax = tv_nsec
34111:
3412	cmpl	$NANOSEC, %eax		/ if ((unsigned long)tv_nsec >= NANOSEC)
3413	jb	.CL8			/ no
3414	incl	one_sec			/ yes,  one_sec++;
3415	incl	hrestime		/ hrestime.tv_sec++;
3416	addl	$-NANOSEC, %eax		/ tv_nsec -= NANOSEC
3417	jmp	1b			/ check for more seconds
3418
3419.CL8:
3420	movl	%eax, hrestime+4	/ store final into hrestime.tv_nsec
3421	incl	hres_lock		/ release the hres_lock
3422
3423	popl	%ebx
3424	popl	%esi
3425	leave
3426	ret
3427	SET_SIZE(hres_tick)
3428
3429#endif	/* __i386 */
3430#endif	/* __lint */
3431
3432/*
3433 * void prefetch_smap_w(void *)
3434 *
3435 * Prefetch ahead within a linear list of smap structures.
3436 * Not implemented for ia32.  Stub for compatibility.
3437 */
3438
3439#if defined(__lint)
3440
3441/*ARGSUSED*/
3442void prefetch_smap_w(void *smp)
3443{}
3444
3445#else	/* __lint */
3446
3447	ENTRY(prefetch_smap_w)
3448	rep;	ret	/* use 2 byte return instruction when branch target */
3449			/* AMD Software Optimization Guide - Section 6.2 */
3450	SET_SIZE(prefetch_smap_w)
3451
3452#endif	/* __lint */
3453
3454/*
3455 * prefetch_page_r(page_t *)
3456 * issue prefetch instructions for a page_t
3457 */
3458#if defined(__lint)
3459
3460/*ARGSUSED*/
3461void
3462prefetch_page_r(void *pp)
3463{}
3464
3465#else	/* __lint */
3466
3467	ENTRY(prefetch_page_r)
3468	rep;	ret	/* use 2 byte return instruction when branch target */
3469			/* AMD Software Optimization Guide - Section 6.2 */
3470	SET_SIZE(prefetch_page_r)
3471
3472#endif	/* __lint */
3473
3474#if defined(__lint)
3475
3476/*ARGSUSED*/
3477int
3478bcmp(const void *s1, const void *s2, size_t count)
3479{ return (0); }
3480
3481#else   /* __lint */
3482
3483#if defined(__amd64)
3484
3485	ENTRY(bcmp)
3486	pushq	%rbp
3487	movq	%rsp, %rbp
3488#ifdef DEBUG
3489	movq	postbootkernelbase(%rip), %r11
3490	cmpq	%r11, %rdi
3491	jb	0f
3492	cmpq	%r11, %rsi
3493	jnb	1f
34940:	leaq	.bcmp_panic_msg(%rip), %rdi
3495	xorl	%eax, %eax
3496	call	panic
34971:
3498#endif	/* DEBUG */
3499	call	memcmp
3500	testl	%eax, %eax
3501	setne	%dl
3502	leave
3503	movzbl	%dl, %eax
3504	ret
3505	SET_SIZE(bcmp)
3506
3507#elif defined(__i386)
3508
3509#define	ARG_S1		8
3510#define	ARG_S2		12
3511#define	ARG_LENGTH	16
3512
3513	ENTRY(bcmp)
3514	pushl	%ebp
3515	movl	%esp, %ebp	/ create new stack frame
3516#ifdef DEBUG
3517	movl    postbootkernelbase, %eax
3518	cmpl    %eax, ARG_S1(%ebp)
3519	jb	0f
3520	cmpl    %eax, ARG_S2(%ebp)
3521	jnb	1f
35220:	pushl   $.bcmp_panic_msg
3523	call    panic
35241:
3525#endif	/* DEBUG */
3526
3527	pushl	%edi		/ save register variable
3528	movl	ARG_S1(%ebp), %eax	/ %eax = address of string 1
3529	movl	ARG_S2(%ebp), %ecx	/ %ecx = address of string 2
3530	cmpl	%eax, %ecx	/ if the same string
3531	je	.equal		/ goto .equal
3532	movl	ARG_LENGTH(%ebp), %edi	/ %edi = length in bytes
3533	cmpl	$4, %edi	/ if %edi < 4
3534	jb	.byte_check	/ goto .byte_check
3535	.align	4
3536.word_loop:
3537	movl	(%ecx), %edx	/ move 1 word from (%ecx) to %edx
3538	leal	-4(%edi), %edi	/ %edi -= 4
3539	cmpl	(%eax), %edx	/ compare 1 word from (%eax) with %edx
3540	jne	.word_not_equal	/ if not equal, goto .word_not_equal
3541	leal	4(%ecx), %ecx	/ %ecx += 4 (next word)
3542	leal	4(%eax), %eax	/ %eax += 4 (next word)
3543	cmpl	$4, %edi	/ if %edi >= 4
3544	jae	.word_loop	/ goto .word_loop
3545.byte_check:
3546	cmpl	$0, %edi	/ if %edi == 0
3547	je	.equal		/ goto .equal
3548	jmp	.byte_loop	/ goto .byte_loop (checks in bytes)
3549.word_not_equal:
3550	leal	4(%edi), %edi	/ %edi += 4 (post-decremented)
3551	.align	4
3552.byte_loop:
3553	movb	(%ecx),	%dl	/ move 1 byte from (%ecx) to %dl
3554	cmpb	%dl, (%eax)	/ compare %dl with 1 byte from (%eax)
3555	jne	.not_equal	/ if not equal, goto .not_equal
3556	incl	%ecx		/ %ecx++ (next byte)
3557	incl	%eax		/ %eax++ (next byte)
3558	decl	%edi		/ %edi--
3559	jnz	.byte_loop	/ if not zero, goto .byte_loop
3560.equal:
3561	xorl	%eax, %eax	/ %eax = 0
3562	popl	%edi		/ restore register variable
3563	leave			/ restore old stack frame
3564	ret			/ return (NULL)
3565	.align	4
3566.not_equal:
3567	movl	$1, %eax	/ return 1
3568	popl	%edi		/ restore register variable
3569	leave			/ restore old stack frame
3570	ret			/ return (NULL)
3571	SET_SIZE(bcmp)
3572
3573#endif	/* __i386 */
3574
3575#ifdef DEBUG
3576	.text
3577.bcmp_panic_msg:
3578	.string "bcmp: arguments below kernelbase"
3579#endif	/* DEBUG */
3580
3581#endif	/* __lint */
3582
3583#if defined(__lint)
3584
3585uint_t
3586bsrw_insn(uint16_t mask)
3587{
3588	uint_t index = sizeof (mask) * NBBY - 1;
3589
3590	while ((mask & (1 << index)) == 0)
3591		index--;
3592	return (index);
3593}
3594
3595#else	/* __lint */
3596
3597#if defined(__amd64)
3598
3599	ENTRY_NP(bsrw_insn)
3600	xorl	%eax, %eax
3601	bsrw	%di, %ax
3602	ret
3603	SET_SIZE(bsrw_insn)
3604
3605#elif defined(__i386)
3606
3607	ENTRY_NP(bsrw_insn)
3608	movw	4(%esp), %cx
3609	xorl	%eax, %eax
3610	bsrw	%cx, %ax
3611	ret
3612	SET_SIZE(bsrw_insn)
3613
3614#endif	/* __i386 */
3615#endif	/* __lint */
3616
3617#if defined(__lint)
3618
3619uint_t
3620atomic_btr32(uint32_t *pending, uint_t pil)
3621{
3622	return (*pending &= ~(1 << pil));
3623}
3624
3625#else	/* __lint */
3626
3627#if defined(__i386)
3628
3629	ENTRY_NP(atomic_btr32)
3630	movl	4(%esp), %ecx
3631	movl	8(%esp), %edx
3632	xorl	%eax, %eax
3633	lock
3634	btrl	%edx, (%ecx)
3635	setc	%al
3636	ret
3637	SET_SIZE(atomic_btr32)
3638
3639#endif	/* __i386 */
3640#endif	/* __lint */
3641
3642#if defined(__lint)
3643
3644/*ARGSUSED*/
3645void
3646switch_sp_and_call(void *newsp, void (*func)(uint_t, uint_t), uint_t arg1,
3647	    uint_t arg2)
3648{}
3649
3650#else	/* __lint */
3651
3652#if defined(__amd64)
3653
3654	ENTRY_NP(switch_sp_and_call)
3655	pushq	%rbp
3656	movq	%rsp, %rbp		/* set up stack frame */
3657	movq	%rdi, %rsp		/* switch stack pointer */
3658	movq	%rdx, %rdi		/* pass func arg 1 */
3659	movq	%rsi, %r11		/* save function to call */
3660	movq	%rcx, %rsi		/* pass func arg 2 */
3661	call	*%r11			/* call function */
3662	leave				/* restore stack */
3663	ret
3664	SET_SIZE(switch_sp_and_call)
3665
3666#elif defined(__i386)
3667
3668	ENTRY_NP(switch_sp_and_call)
3669	pushl	%ebp
3670	mov	%esp, %ebp		/* set up stack frame */
3671	movl	8(%ebp), %esp		/* switch stack pointer */
3672	pushl	20(%ebp)		/* push func arg 2 */
3673	pushl	16(%ebp)		/* push func arg 1 */
3674	call	*12(%ebp)		/* call function */
3675	addl	$8, %esp		/* pop arguments */
3676	leave				/* restore stack */
3677	ret
3678	SET_SIZE(switch_sp_and_call)
3679
3680#endif	/* __i386 */
3681#endif	/* __lint */
3682
3683#if defined(__lint)
3684
3685void
3686kmdb_enter(void)
3687{}
3688
3689#else	/* __lint */
3690
3691#if defined(__amd64)
3692
3693	ENTRY_NP(kmdb_enter)
3694	pushq	%rbp
3695	movq	%rsp, %rbp
3696
3697	/*
3698	 * Save flags, do a 'cli' then return the saved flags
3699	 */
3700	call	intr_clear
3701
3702	int	$T_DBGENTR
3703
3704	/*
3705	 * Restore the saved flags
3706	 */
3707	movq	%rax, %rdi
3708	call	intr_restore
3709
3710	leave
3711	ret
3712	SET_SIZE(kmdb_enter)
3713
3714#elif defined(__i386)
3715
3716	ENTRY_NP(kmdb_enter)
3717	pushl	%ebp
3718	movl	%esp, %ebp
3719
3720	/*
3721	 * Save flags, do a 'cli' then return the saved flags
3722	 */
3723	call	intr_clear
3724
3725	int	$T_DBGENTR
3726
3727	/*
3728	 * Restore the saved flags
3729	 */
3730	pushl	%eax
3731	call	intr_restore
3732	addl	$4, %esp
3733
3734	leave
3735	ret
3736	SET_SIZE(kmdb_enter)
3737
3738#endif	/* __i386 */
3739#endif	/* __lint */
3740
3741#if defined(__lint)
3742
3743void
3744return_instr(void)
3745{}
3746
3747#else	/* __lint */
3748
3749	ENTRY_NP(return_instr)
3750	rep;	ret	/* use 2 byte instruction when branch target */
3751			/* AMD Software Optimization Guide - Section 6.2 */
3752	SET_SIZE(return_instr)
3753
3754#endif	/* __lint */
3755
3756#if defined(__lint)
3757
3758ulong_t
3759getflags(void)
3760{
3761	return (0);
3762}
3763
3764#else	/* __lint */
3765
3766#if defined(__amd64)
3767
3768	ENTRY(getflags)
3769	pushfq
3770	popq	%rax
3771	ret
3772	SET_SIZE(getflags)
3773
3774#elif defined(__i386)
3775
3776	ENTRY(getflags)
3777	pushfl
3778	popl	%eax
3779	ret
3780	SET_SIZE(getflags)
3781
3782#endif	/* __i386 */
3783
3784#endif	/* __lint */
3785
3786#if defined(__lint)
3787
3788ftrace_icookie_t
3789ftrace_interrupt_disable(void)
3790{ return (0); }
3791
3792#else   /* __lint */
3793
3794#if defined(__amd64)
3795
3796	ENTRY(ftrace_interrupt_disable)
3797	pushfq
3798	popq	%rax
3799	CLI(%rdx)
3800	ret
3801	SET_SIZE(ftrace_interrupt_disable)
3802
3803#elif defined(__i386)
3804
3805	ENTRY(ftrace_interrupt_disable)
3806	pushfl
3807	popl	%eax
3808	CLI(%edx)
3809	ret
3810	SET_SIZE(ftrace_interrupt_disable)
3811
3812#endif	/* __i386 */
3813#endif	/* __lint */
3814
3815#if defined(__lint)
3816
3817/*ARGSUSED*/
3818void
3819ftrace_interrupt_enable(ftrace_icookie_t cookie)
3820{}
3821
3822#else	/* __lint */
3823
3824#if defined(__amd64)
3825
3826	ENTRY(ftrace_interrupt_enable)
3827	pushq	%rdi
3828	popfq
3829	ret
3830	SET_SIZE(ftrace_interrupt_enable)
3831
3832#elif defined(__i386)
3833
3834	ENTRY(ftrace_interrupt_enable)
3835	movl	4(%esp), %eax
3836	pushl	%eax
3837	popfl
3838	ret
3839	SET_SIZE(ftrace_interrupt_enable)
3840
3841#endif	/* __i386 */
3842#endif	/* __lint */
3843