xref: /titanic_41/usr/src/uts/intel/ia32/ml/i86_subr.s (revision ea8dc4b6d2251b437950c0056bc626b311c73c27)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 *  Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.
29 *  Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T
30 *    All Rights Reserved
31 */
32
33#pragma ident	"%Z%%M%	%I%	%E% SMI"
34
35/*
36 * General assembly language routines.
37 * It is the intent of this file to contain routines that are
38 * independent of the specific kernel architecture, and those that are
39 * common across kernel architectures.
40 * As architectures diverge, and implementations of specific
41 * architecture-dependent routines change, the routines should be moved
42 * from this file into the respective ../`arch -k`/subr.s file.
43 */
44
45#include <sys/asm_linkage.h>
46#include <sys/asm_misc.h>
47#include <sys/panic.h>
48#include <sys/ontrap.h>
49#include <sys/regset.h>
50#include <sys/privregs.h>
51#include <sys/reboot.h>
52#include <sys/psw.h>
53#include <sys/x86_archext.h>
54
55#if defined(__lint)
56#include <sys/types.h>
57#include <sys/systm.h>
58#include <sys/thread.h>
59#include <sys/archsystm.h>
60#include <sys/byteorder.h>
61#include <sys/dtrace.h>
62#else	/* __lint */
63#include "assym.h"
64#endif	/* __lint */
65#include <sys/dditypes.h>
66
67/*
68 * on_fault()
69 * Catch lofault faults. Like setjmp except it returns one
70 * if code following causes uncorrectable fault. Turned off
71 * by calling no_fault().
72 */
73
74#if defined(__lint)
75
76/* ARGSUSED */
77int
78on_fault(label_t *ljb)
79{ return (0); }
80
81void
82no_fault(void)
83{}
84
85#else	/* __lint */
86
87#if defined(__amd64)
88
89	ENTRY(on_fault)
90	movq	%gs:CPU_THREAD, %rsi
91	leaq	catch_fault(%rip), %rdx
92	movq	%rdi, T_ONFAULT(%rsi)		/* jumpbuf in t_onfault */
93	movq	%rdx, T_LOFAULT(%rsi)		/* catch_fault in t_lofault */
94	jmp	setjmp				/* let setjmp do the rest */
95
96catch_fault:
97	movq	%gs:CPU_THREAD, %rsi
98	movq	T_ONFAULT(%rsi), %rdi		/* address of save area */
99	xorl	%eax, %eax
100	movq	%rax, T_ONFAULT(%rsi)		/* turn off onfault */
101	movq	%rax, T_LOFAULT(%rsi)		/* turn off lofault */
102	jmp	longjmp				/* let longjmp do the rest */
103	SET_SIZE(on_fault)
104
105	ENTRY(no_fault)
106	movq	%gs:CPU_THREAD, %rsi
107	xorl	%eax, %eax
108	movq	%rax, T_ONFAULT(%rsi)		/* turn off onfault */
109	movq	%rax, T_LOFAULT(%rsi)		/* turn off lofault */
110	ret
111	SET_SIZE(no_fault)
112
113#elif defined(__i386)
114
115	ENTRY(on_fault)
116	movl	%gs:CPU_THREAD, %edx
117	movl	4(%esp), %eax			/* jumpbuf address */
118	leal	catch_fault, %ecx
119	movl	%eax, T_ONFAULT(%edx)		/* jumpbuf in t_onfault */
120	movl	%ecx, T_LOFAULT(%edx)		/* catch_fault in t_lofault */
121	jmp	setjmp				/* let setjmp do the rest */
122
123catch_fault:
124	movl	%gs:CPU_THREAD, %edx
125	xorl	%eax, %eax
126	movl	T_ONFAULT(%edx), %ecx		/* address of save area */
127	movl	%eax, T_ONFAULT(%edx)		/* turn off onfault */
128	movl	%eax, T_LOFAULT(%edx)		/* turn off lofault */
129	pushl	%ecx
130	call	longjmp				/* let longjmp do the rest */
131	SET_SIZE(on_fault)
132
133	ENTRY(no_fault)
134	movl	%gs:CPU_THREAD, %edx
135	xorl	%eax, %eax
136	movl	%eax, T_ONFAULT(%edx)		/* turn off onfault */
137	movl	%eax, T_LOFAULT(%edx)		/* turn off lofault */
138	ret
139	SET_SIZE(no_fault)
140
141#endif	/* __i386 */
142#endif	/* __lint */
143
144/*
145 * Default trampoline code for on_trap() (see <sys/ontrap.h>).  We just
146 * do a longjmp(&curthread->t_ontrap->ot_jmpbuf) if this is ever called.
147 */
148
149#if defined(lint)
150
151void
152on_trap_trampoline(void)
153{}
154
155#else	/* __lint */
156
157#if defined(__amd64)
158
159	ENTRY(on_trap_trampoline)
160	movq	%gs:CPU_THREAD, %rsi
161	movq	T_ONTRAP(%rsi), %rdi
162	addq	$OT_JMPBUF, %rdi
163	jmp	longjmp
164	SET_SIZE(on_trap_trampoline)
165
166#elif defined(__i386)
167
168	ENTRY(on_trap_trampoline)
169	movl	%gs:CPU_THREAD, %eax
170	movl	T_ONTRAP(%eax), %eax
171	addl	$OT_JMPBUF, %eax
172	pushl	%eax
173	call	longjmp
174	SET_SIZE(on_trap_trampoline)
175
176#endif	/* __i386 */
177#endif	/* __lint */
178
179/*
180 * Push a new element on to the t_ontrap stack.  Refer to <sys/ontrap.h> for
181 * more information about the on_trap() mechanism.  If the on_trap_data is the
182 * same as the topmost stack element, we just modify that element.
183 */
184#if defined(lint)
185
186/*ARGSUSED*/
187int
188on_trap(on_trap_data_t *otp, uint_t prot)
189{ return (0); }
190
191#else	/* __lint */
192
193#if defined(__amd64)
194
195	ENTRY(on_trap)
196	movw	%si, OT_PROT(%rdi)		/* ot_prot = prot */
197	movw	$0, OT_TRAP(%rdi)		/* ot_trap = 0 */
198	leaq	on_trap_trampoline(%rip), %rdx	/* rdx = &on_trap_trampoline */
199	movq	%rdx, OT_TRAMPOLINE(%rdi)	/* ot_trampoline = rdx */
200	xorl	%ecx, %ecx
201	movq	%rcx, OT_HANDLE(%rdi)		/* ot_handle = NULL */
202	movq	%rcx, OT_PAD1(%rdi)		/* ot_pad1 = NULL */
203	movq	%gs:CPU_THREAD, %rdx		/* rdx = curthread */
204	movq	T_ONTRAP(%rdx), %rcx		/* rcx = curthread->t_ontrap */
205	cmpq	%rdi, %rcx			/* if (otp == %rcx)	*/
206	je	0f				/*	don't modify t_ontrap */
207
208	movq	%rcx, OT_PREV(%rdi)		/* ot_prev = t_ontrap */
209	movq	%rdi, T_ONTRAP(%rdx)		/* curthread->t_ontrap = otp */
210
2110:	addq	$OT_JMPBUF, %rdi		/* &ot_jmpbuf */
212	jmp	setjmp
213	SET_SIZE(on_trap)
214
215#elif defined(__i386)
216
217	ENTRY(on_trap)
218	movl	4(%esp), %eax			/* %eax = otp */
219	movl	8(%esp), %edx			/* %edx = prot */
220
221	movw	%dx, OT_PROT(%eax)		/* ot_prot = prot */
222	movw	$0, OT_TRAP(%eax)		/* ot_trap = 0 */
223	leal	on_trap_trampoline, %edx	/* %edx = &on_trap_trampoline */
224	movl	%edx, OT_TRAMPOLINE(%eax)	/* ot_trampoline = %edx */
225	movl	$0, OT_HANDLE(%eax)		/* ot_handle = NULL */
226	movl	$0, OT_PAD1(%eax)		/* ot_pad1 = NULL */
227	movl	%gs:CPU_THREAD, %edx		/* %edx = curthread */
228	movl	T_ONTRAP(%edx), %ecx		/* %ecx = curthread->t_ontrap */
229	cmpl	%eax, %ecx			/* if (otp == %ecx) */
230	je	0f				/*    don't modify t_ontrap */
231
232	movl	%ecx, OT_PREV(%eax)		/* ot_prev = t_ontrap */
233	movl	%eax, T_ONTRAP(%edx)		/* curthread->t_ontrap = otp */
234
2350:	addl	$OT_JMPBUF, %eax		/* %eax = &ot_jmpbuf */
236	movl	%eax, 4(%esp)			/* put %eax back on the stack */
237	jmp	setjmp				/* let setjmp do the rest */
238	SET_SIZE(on_trap)
239
240#endif	/* __i386 */
241#endif	/* __lint */
242
243/*
244 * Setjmp and longjmp implement non-local gotos using state vectors
245 * type label_t.
246 */
247
248#if defined(__lint)
249
250/* ARGSUSED */
251int
252setjmp(label_t *lp)
253{ return (0); }
254
255/* ARGSUSED */
256void
257longjmp(label_t *lp)
258{}
259
260#else	/* __lint */
261
262#if LABEL_PC != 0
263#error LABEL_PC MUST be defined as 0 for setjmp/longjmp to work as coded
264#endif	/* LABEL_PC != 0 */
265
266#if defined(__amd64)
267
268	ENTRY(setjmp)
269	movq	%rsp, LABEL_SP(%rdi)
270	movq	%rbp, LABEL_RBP(%rdi)
271	movq	%rbx, LABEL_RBX(%rdi)
272	movq	%r12, LABEL_R12(%rdi)
273	movq	%r13, LABEL_R13(%rdi)
274	movq	%r14, LABEL_R14(%rdi)
275	movq	%r15, LABEL_R15(%rdi)
276	movq	(%rsp), %rdx		/* return address */
277	movq	%rdx, (%rdi)		/* LABEL_PC is 0 */
278	xorl	%eax, %eax		/* return 0 */
279	ret
280	SET_SIZE(setjmp)
281
282	ENTRY(longjmp)
283	movq	LABEL_SP(%rdi), %rsp
284	movq	LABEL_RBP(%rdi), %rbp
285	movq	LABEL_RBX(%rdi), %rbx
286	movq	LABEL_R12(%rdi), %r12
287	movq	LABEL_R13(%rdi), %r13
288	movq	LABEL_R14(%rdi), %r14
289	movq	LABEL_R15(%rdi), %r15
290	movq	(%rdi), %rdx		/* return address; LABEL_PC is 0 */
291	movq	%rdx, (%rsp)
292	xorl	%eax, %eax
293	incl	%eax			/* return 1 */
294	ret
295	SET_SIZE(longjmp)
296
297#elif defined(__i386)
298
299	ENTRY(setjmp)
300	movl	4(%esp), %edx		/* address of save area */
301	movl	%ebp, LABEL_EBP(%edx)
302	movl	%ebx, LABEL_EBX(%edx)
303	movl	%esi, LABEL_ESI(%edx)
304	movl	%edi, LABEL_EDI(%edx)
305	movl	%esp, 4(%edx)
306	movl	(%esp), %ecx		/* %eip (return address) */
307	movl	%ecx, (%edx)		/* LABEL_PC is 0 */
308	subl	%eax, %eax		/* return 0 */
309	ret
310	SET_SIZE(setjmp)
311
312	ENTRY(longjmp)
313	movl	4(%esp), %edx		/* address of save area */
314	movl	LABEL_EBP(%edx), %ebp
315	movl	LABEL_EBX(%edx), %ebx
316	movl	LABEL_ESI(%edx), %esi
317	movl	LABEL_EDI(%edx), %edi
318	movl	4(%edx), %esp
319	movl	(%edx), %ecx		/* %eip (return addr); LABEL_PC is 0 */
320	movl	$1, %eax
321	addl	$4, %esp		/* pop ret adr */
322	jmp	*%ecx			/* indirect */
323	SET_SIZE(longjmp)
324
325#endif	/* __i386 */
326#endif	/* __lint */
327
328/*
329 * if a() calls b() calls caller(),
330 * caller() returns return address in a().
331 * (Note: We assume a() and b() are C routines which do the normal entry/exit
332 *  sequence.)
333 */
334
335#if defined(__lint)
336
337caddr_t
338caller(void)
339{ return (0); }
340
341#else	/* __lint */
342
343#if defined(__amd64)
344
345	ENTRY(caller)
346	movq	8(%rbp), %rax		/* b()'s return pc, in a() */
347	ret
348	SET_SIZE(caller)
349
350#elif defined(__i386)
351
352	ENTRY(caller)
353	movl	4(%ebp), %eax		/* b()'s return pc, in a() */
354	ret
355	SET_SIZE(caller)
356
357#endif	/* __i386 */
358#endif	/* __lint */
359
360/*
361 * if a() calls callee(), callee() returns the
362 * return address in a();
363 */
364
365#if defined(__lint)
366
367caddr_t
368callee(void)
369{ return (0); }
370
371#else	/* __lint */
372
373#if defined(__amd64)
374
375	ENTRY(callee)
376	movq	(%rsp), %rax		/* callee()'s return pc, in a() */
377	ret
378	SET_SIZE(callee)
379
380#elif defined(__i386)
381
382	ENTRY(callee)
383	movl	(%esp), %eax		/* callee()'s return pc, in a() */
384	ret
385	SET_SIZE(callee)
386
387#endif	/* __i386 */
388#endif	/* __lint */
389
390/*
391 * return the current frame pointer
392 */
393
394#if defined(__lint)
395
396greg_t
397getfp(void)
398{ return (0); }
399
400#else	/* __lint */
401
402#if defined(__amd64)
403
404	ENTRY(getfp)
405	movq	%rbp, %rax
406	ret
407	SET_SIZE(getfp)
408
409#elif defined(__i386)
410
411	ENTRY(getfp)
412	movl	%ebp, %eax
413	ret
414	SET_SIZE(getfp)
415
416#endif	/* __i386 */
417#endif	/* __lint */
418
419/*
420 * Invalidate a single page table entry in the TLB
421 */
422
423#if defined(__lint)
424
425/* ARGSUSED */
426void
427mmu_tlbflush_entry(caddr_t m)
428{}
429
430#else	/* __lint */
431
432#if defined(__amd64)
433
434	ENTRY(mmu_tlbflush_entry)
435	invlpg	(%rdi)
436	ret
437	SET_SIZE(mmu_tlbflush_entry)
438
439#elif defined(__i386)
440
441	ENTRY(mmu_tlbflush_entry)
442	movl	4(%esp), %eax
443	invlpg	(%eax)
444	ret
445	SET_SIZE(mmu_tlbflush_entry)
446
447#endif	/* __i386 */
448#endif	/* __lint */
449
450
451/*
452 * Get/Set the value of various control registers
453 */
454
455#if defined(__lint)
456
457ulong_t
458getcr0(void)
459{ return (0); }
460
461/* ARGSUSED */
462void
463setcr0(ulong_t value)
464{}
465
466ulong_t
467getcr2(void)
468{ return (0); }
469
470ulong_t
471getcr3(void)
472{ return (0); }
473
474/* ARGSUSED */
475void
476setcr3(ulong_t val)
477{}
478
479void
480reload_cr3(void)
481{}
482
483ulong_t
484getcr4(void)
485{ return (0); }
486
487/* ARGSUSED */
488void
489setcr4(ulong_t val)
490{}
491
492#if defined(__amd64)
493
494ulong_t
495getcr8(void)
496{ return (0); }
497
498/* ARGSUSED */
499void
500setcr8(ulong_t val)
501{}
502
503#endif	/* __amd64 */
504
505#else	/* __lint */
506
507#if defined(__amd64)
508
509	ENTRY(getcr0)
510	movq	%cr0, %rax
511	ret
512	SET_SIZE(getcr0)
513
514	ENTRY(setcr0)
515	movq	%rdi, %cr0
516	ret
517	SET_SIZE(setcr0)
518
519	ENTRY(getcr2)
520	movq	%cr2, %rax
521	ret
522	SET_SIZE(getcr2)
523
524	ENTRY(getcr3)
525	movq	%cr3, %rax
526	ret
527	SET_SIZE(getcr3)
528
529	ENTRY(setcr3)
530	movq	%rdi, %cr3
531	ret
532	SET_SIZE(setcr3)
533
534	ENTRY(reload_cr3)
535	movq	%cr3, %rdi
536	movq	%rdi, %cr3
537	ret
538	SET_SIZE(reload_cr3)
539
540	ENTRY(getcr4)
541	movq	%cr4, %rax
542	ret
543	SET_SIZE(getcr4)
544
545	ENTRY(setcr4)
546	movq	%rdi, %cr4
547	ret
548	SET_SIZE(setcr4)
549
550	ENTRY(getcr8)
551	movq	%cr8, %rax
552	ret
553	SET_SIZE(getcr8)
554
555	ENTRY(setcr8)
556	movq	%rdi, %cr8
557	ret
558	SET_SIZE(setcr8)
559
560#elif defined(__i386)
561
562        ENTRY(getcr0)
563        movl    %cr0, %eax
564        ret
565	SET_SIZE(getcr0)
566
567        ENTRY(setcr0)
568        movl    4(%esp), %eax
569        movl    %eax, %cr0
570        ret
571	SET_SIZE(setcr0)
572
573        ENTRY(getcr2)
574        movl    %cr2, %eax
575        ret
576	SET_SIZE(getcr2)
577
578	ENTRY(getcr3)
579	movl    %cr3, %eax
580	ret
581	SET_SIZE(getcr3)
582
583        ENTRY(setcr3)
584        movl    4(%esp), %eax
585        movl    %eax, %cr3
586        ret
587	SET_SIZE(setcr3)
588
589	ENTRY(reload_cr3)
590	movl    %cr3, %eax
591	movl    %eax, %cr3
592	ret
593	SET_SIZE(reload_cr3)
594
595	ENTRY(getcr4)
596	movl    %cr4, %eax
597	ret
598	SET_SIZE(getcr4)
599
600        ENTRY(setcr4)
601        movl    4(%esp), %eax
602        movl    %eax, %cr4
603        ret
604	SET_SIZE(setcr4)
605
606#endif	/* __i386 */
607#endif	/* __lint */
608
609#if defined(__lint)
610
611/*ARGSUSED*/
612uint32_t
613__cpuid_insn(struct cpuid_regs *regs)
614{ return (0); }
615
616#else	/* __lint */
617
618#if defined(__amd64)
619
620	ENTRY(__cpuid_insn)
621	movq	%rbx, %r8
622	movq	%rcx, %r9
623	movq	%rdx, %r11
624	movl	(%rdi), %eax		/* %eax = regs->cp_eax */
625	movl	0x4(%rdi), %ebx		/* %ebx = regs->cp_ebx */
626	movl	0x8(%rdi), %ecx		/* %ecx = regs->cp_ecx */
627	movl	0xc(%rdi), %edx		/* %edx = regs->cp_edx */
628	cpuid
629	movl	%eax, (%rdi)		/* regs->cp_eax = %eax */
630	movl	%ebx, 0x4(%rdi)		/* regs->cp_ebx = %ebx */
631	movl	%ecx, 0x8(%rdi)		/* regs->cp_ecx = %ecx */
632	movl	%edx, 0xc(%rdi)		/* regs->cp_edx = %edx */
633	movq	%r8, %rbx
634	movq	%r9, %rcx
635	movq	%r11, %rdx
636	ret
637	SET_SIZE(__cpuid_insn)
638
639#elif defined(__i386)
640
641        ENTRY(__cpuid_insn)
642	pushl	%ebp
643	movl	0x8(%esp), %ebp		/* %ebp = regs */
644	pushl	%ebx
645	pushl	%ecx
646	pushl	%edx
647	movl	(%ebp), %eax		/* %eax = regs->cp_eax */
648	movl	0x4(%ebp), %ebx		/* %ebx = regs->cp_ebx */
649	movl	0x8(%ebp), %ecx		/* %ecx = regs->cp_ecx */
650	movl	0xc(%ebp), %edx		/* %edx = regs->cp_edx */
651	cpuid
652	movl	%eax, (%ebp)		/* regs->cp_eax = %eax */
653	movl	%ebx, 0x4(%ebp)		/* regs->cp_ebx = %ebx */
654	movl	%ecx, 0x8(%ebp)		/* regs->cp_ecx = %ecx */
655	movl	%edx, 0xc(%ebp)		/* regs->cp_edx = %edx */
656	popl	%edx
657	popl	%ecx
658	popl	%ebx
659	popl	%ebp
660	ret
661	SET_SIZE(__cpuid_insn)
662
663#endif	/* __i386 */
664#endif	/* __lint */
665
666/*
667 * Insert entryp after predp in a doubly linked list.
668 */
669
670#if defined(__lint)
671
672/*ARGSUSED*/
673void
674_insque(caddr_t entryp, caddr_t predp)
675{}
676
677#else	/* __lint */
678
679#if defined(__amd64)
680
681	ENTRY(_insque)
682	movq	(%rsi), %rax		/* predp->forw 			*/
683	movq	%rsi, CPTRSIZE(%rdi)	/* entryp->back = predp		*/
684	movq	%rax, (%rdi)		/* entryp->forw = predp->forw	*/
685	movq	%rdi, (%rsi)		/* predp->forw = entryp		*/
686	movq	%rdi, CPTRSIZE(%rax)	/* predp->forw->back = entryp	*/
687	ret
688	SET_SIZE(_insque)
689
690#elif defined(__i386)
691
692	ENTRY(_insque)
693	movl	8(%esp), %edx
694	movl	4(%esp), %ecx
695	movl	(%edx), %eax		/* predp->forw			*/
696	movl	%edx, CPTRSIZE(%ecx)	/* entryp->back = predp		*/
697	movl	%eax, (%ecx)		/* entryp->forw = predp->forw	*/
698	movl	%ecx, (%edx)		/* predp->forw = entryp		*/
699	movl	%ecx, CPTRSIZE(%eax)	/* predp->forw->back = entryp	*/
700	ret
701	SET_SIZE(_insque)
702
703#endif	/* __i386 */
704#endif	/* __lint */
705
706/*
707 * Remove entryp from a doubly linked list
708 */
709
710#if defined(__lint)
711
712/*ARGSUSED*/
713void
714_remque(caddr_t entryp)
715{}
716
717#else	/* __lint */
718
719#if defined(__amd64)
720
721	ENTRY(_remque)
722	movq	(%rdi), %rax		/* entry->forw */
723	movq	CPTRSIZE(%rdi), %rdx	/* entry->back */
724	movq	%rax, (%rdx)		/* entry->back->forw = entry->forw */
725	movq	%rdx, CPTRSIZE(%rax)	/* entry->forw->back = entry->back */
726	ret
727	SET_SIZE(_remque)
728
729#elif defined(__i386)
730
731	ENTRY(_remque)
732	movl	4(%esp), %ecx
733	movl	(%ecx), %eax		/* entry->forw */
734	movl	CPTRSIZE(%ecx), %edx	/* entry->back */
735	movl	%eax, (%edx)		/* entry->back->forw = entry->forw */
736	movl	%edx, CPTRSIZE(%eax)	/* entry->forw->back = entry->back */
737	ret
738	SET_SIZE(_remque)
739
740#endif	/* __i386 */
741#endif	/* __lint */
742
743/*
744 * Returns the number of
745 * non-NULL bytes in string argument.
746 */
747
748#if defined(__lint)
749
750/* ARGSUSED */
751size_t
752strlen(const char *str)
753{ return (0); }
754
755#else	/* __lint */
756
757#if defined(__amd64)
758
759/*
760 * This is close to a simple transliteration of a C version of this
761 * routine.  We should either just -make- this be a C version, or
762 * justify having it in assembler by making it significantly faster.
763 *
764 * size_t
765 * strlen(const char *s)
766 * {
767 *	const char *s0;
768 * #if defined(DEBUG)
769 *	if ((uintptr_t)s < KERNELBASE)
770 *		panic(.str_panic_msg);
771 * #endif
772 *	for (s0 = s; *s; s++)
773 *		;
774 *	return (s - s0);
775 * }
776 */
777
778	ENTRY(strlen)
779#ifdef DEBUG
780	movq	kernelbase(%rip), %rax
781	cmpq	%rax, %rdi
782	jae	str_valid
783	pushq	%rbp
784	movq	%rsp, %rbp
785	leaq	.str_panic_msg(%rip), %rdi
786	xorl	%eax, %eax
787	call	panic
788#endif	/* DEBUG */
789str_valid:
790	cmpb	$0, (%rdi)
791	movq	%rdi, %rax
792	je	.null_found
793	.align	4
794.strlen_loop:
795	incq	%rdi
796	cmpb	$0, (%rdi)
797	jne	.strlen_loop
798.null_found:
799	subq	%rax, %rdi
800	movq	%rdi, %rax
801	ret
802	SET_SIZE(strlen)
803
804#elif defined(__i386)
805
806	ENTRY(strlen)
807#ifdef DEBUG
808	movl	kernelbase, %eax
809	cmpl	%eax, 4(%esp)
810	jae	str_valid
811	pushl	%ebp
812	movl	%esp, %ebp
813	pushl	$.str_panic_msg
814	call	panic
815#endif /* DEBUG */
816
817str_valid:
818	movl	4(%esp), %eax		/* %eax = string address */
819	testl	$3, %eax		/* if %eax not word aligned */
820	jnz	.not_word_aligned	/* goto .not_word_aligned */
821	.align	4
822.word_aligned:
823	movl	(%eax), %edx		/* move 1 word from (%eax) to %edx */
824	movl	$0x7f7f7f7f, %ecx
825	andl	%edx, %ecx		/* %ecx = %edx & 0x7f7f7f7f */
826	addl	$4, %eax		/* next word */
827	addl	$0x7f7f7f7f, %ecx	/* %ecx += 0x7f7f7f7f */
828	orl	%edx, %ecx		/* %ecx |= %edx */
829	andl	$0x80808080, %ecx	/* %ecx &= 0x80808080 */
830	cmpl	$0x80808080, %ecx	/* if no null byte in this word */
831	je	.word_aligned		/* goto .word_aligned */
832	subl	$4, %eax		/* post-incremented */
833.not_word_aligned:
834	cmpb	$0, (%eax)		/* if a byte in (%eax) is null */
835	je	.null_found		/* goto .null_found */
836	incl	%eax			/* next byte */
837	testl	$3, %eax		/* if %eax not word aligned */
838	jnz	.not_word_aligned	/* goto .not_word_aligned */
839	jmp	.word_aligned		/* goto .word_aligned */
840	.align	4
841.null_found:
842	subl	4(%esp), %eax		/* %eax -= string address */
843	ret
844	SET_SIZE(strlen)
845
846#endif	/* __i386 */
847
848#ifdef DEBUG
849	.text
850.str_panic_msg:
851	.string "strlen: argument below kernelbase"
852#endif /* DEBUG */
853
854#endif	/* __lint */
855
856	/*
857	 * Berkley 4.3 introduced symbolically named interrupt levels
858	 * as a way deal with priority in a machine independent fashion.
859	 * Numbered priorities are machine specific, and should be
860	 * discouraged where possible.
861	 *
862	 * Note, for the machine specific priorities there are
863	 * examples listed for devices that use a particular priority.
864	 * It should not be construed that all devices of that
865	 * type should be at that priority.  It is currently were
866	 * the current devices fit into the priority scheme based
867	 * upon time criticalness.
868	 *
869	 * The underlying assumption of these assignments is that
870	 * IPL 10 is the highest level from which a device
871	 * routine can call wakeup.  Devices that interrupt from higher
872	 * levels are restricted in what they can do.  If they need
873	 * kernels services they should schedule a routine at a lower
874	 * level (via software interrupt) to do the required
875	 * processing.
876	 *
877	 * Examples of this higher usage:
878	 *	Level	Usage
879	 *	14	Profiling clock (and PROM uart polling clock)
880	 *	12	Serial ports
881	 *
882	 * The serial ports request lower level processing on level 6.
883	 *
884	 * Also, almost all splN routines (where N is a number or a
885	 * mnemonic) will do a RAISE(), on the assumption that they are
886	 * never used to lower our priority.
887	 * The exceptions are:
888	 *	spl8()		Because you can't be above 15 to begin with!
889	 *	splzs()		Because this is used at boot time to lower our
890	 *			priority, to allow the PROM to poll the uart.
891	 *	spl0()		Used to lower priority to 0.
892	 */
893
894#if defined(__lint)
895
896int spl0(void)		{ return (0); }
897int spl6(void)		{ return (0); }
898int spl7(void)		{ return (0); }
899int spl8(void)		{ return (0); }
900int splhigh(void)	{ return (0); }
901int splhi(void)		{ return (0); }
902int splzs(void)		{ return (0); }
903
904#else	/* __lint */
905
906/* reg = cpu->cpu_m.cpu_pri; */
907#define	GETIPL_NOGS(reg, cpup)	\
908	movl	CPU_PRI(cpup), reg;
909
910/* cpu->cpu_m.cpu_pri; */
911#define	SETIPL_NOGS(val, cpup)	\
912	movl	val, CPU_PRI(cpup);
913
914/* reg = cpu->cpu_m.cpu_pri; */
915#define	GETIPL(reg)	\
916	movl	%gs:CPU_PRI, reg;
917
918/* cpu->cpu_m.cpu_pri; */
919#define	SETIPL(val)	\
920	movl	val, %gs:CPU_PRI;
921
922/*
923 * Macro to raise processor priority level.
924 * Avoid dropping processor priority if already at high level.
925 * Also avoid going below CPU->cpu_base_spl, which could've just been set by
926 * a higher-level interrupt thread that just blocked.
927 */
928#if defined(__amd64)
929
930#define	RAISE(level) \
931	cli;			\
932	LOADCPU(%rcx);		\
933	movl	$/**/level, %edi;\
934	GETIPL_NOGS(%eax, %rcx);\
935	cmpl 	%eax, %edi;	\
936	jg	spl;		\
937	jmp	setsplhisti
938
939#elif defined(__i386)
940
941#define	RAISE(level) \
942	cli;			\
943	LOADCPU(%ecx);		\
944	movl	$/**/level, %edx;\
945	GETIPL_NOGS(%eax, %ecx);\
946	cmpl 	%eax, %edx;	\
947	jg	spl;		\
948	jmp	setsplhisti
949
950#endif	/* __i386 */
951
952/*
953 * Macro to set the priority to a specified level.
954 * Avoid dropping the priority below CPU->cpu_base_spl.
955 */
956#if defined(__amd64)
957
958#define	SETPRI(level) \
959	cli;				\
960	LOADCPU(%rcx);			\
961	movl	$/**/level, %edi;	\
962	jmp	spl
963
964#elif defined(__i386)
965
966#define SETPRI(level) \
967	cli;				\
968	LOADCPU(%ecx);			\
969	movl	$/**/level, %edx;	\
970	jmp	spl
971
972#endif	/* __i386 */
973
974	/* locks out all interrupts, including memory errors */
975	ENTRY(spl8)
976	SETPRI(15)
977	SET_SIZE(spl8)
978
979	/* just below the level that profiling runs */
980	ENTRY(spl7)
981	RAISE(13)
982	SET_SIZE(spl7)
983
984	/* sun specific - highest priority onboard serial i/o asy ports */
985	ENTRY(splzs)
986	SETPRI(12)	/* Can't be a RAISE, as it's used to lower us */
987	SET_SIZE(splzs)
988
989	/*
990	 * should lock out clocks and all interrupts,
991	 * as you can see, there are exceptions
992	 */
993
994#if defined(__amd64)
995
996	.align	16
997	ENTRY(splhi)
998	ALTENTRY(splhigh)
999	ALTENTRY(spl6)
1000	ALTENTRY(i_ddi_splhigh)
1001	cli
1002	LOADCPU(%rcx)
1003	movl	$DISP_LEVEL, %edi
1004	movl	CPU_PRI(%rcx), %eax
1005	cmpl	%eax, %edi
1006	jle	setsplhisti
1007	SETIPL_NOGS(%edi, %rcx)
1008	/*
1009	 * If we aren't using cr8 to control ipl then we patch this
1010	 * with a jump to slow_setsplhi
1011	 */
1012	ALTENTRY(setsplhi_patch)
1013	movq	CPU_PRI_DATA(%rcx), %r11 /* get pri data ptr */
1014	movzb	(%r11, %rdi, 1), %rdx	/* get apic mask for this ipl */
1015	movq	%rdx, %cr8		/* set new apic priority */
1016	/*
1017	 * enable interrupts
1018	 */
1019setsplhisti:
1020	nop	/* patch this to a sti when a proper setspl routine appears */
1021	ret
1022
1023	ALTENTRY(slow_setsplhi)
1024	pushq	%rbp
1025	movq	%rsp, %rbp
1026	subq	$16, %rsp
1027	movl	%eax, -4(%rbp)		/* save old ipl */
1028	call	*setspl(%rip)
1029	movl	-4(%rbp), %eax		/* return old ipl */
1030	leave
1031	jmp	setsplhisti
1032
1033	SET_SIZE(i_ddi_splhigh)
1034	SET_SIZE(spl6)
1035	SET_SIZE(splhigh)
1036	SET_SIZE(splhi)
1037
1038#elif defined(__i386)
1039
1040	.align	16
1041	ENTRY(splhi)
1042	ALTENTRY(splhigh)
1043	ALTENTRY(spl6)
1044	ALTENTRY(i_ddi_splhigh)
1045	cli
1046	LOADCPU(%ecx)
1047	movl	$DISP_LEVEL, %edx
1048	movl	CPU_PRI(%ecx), %eax
1049	cmpl	%eax, %edx
1050	jle	setsplhisti
1051	SETIPL_NOGS(%edx, %ecx)		/* set new ipl */
1052
1053	pushl   %eax                    /* save old ipl */
1054	pushl	%edx			/* pass new ipl */
1055	call	*setspl
1056	popl	%ecx			/* dummy pop */
1057	popl    %eax                    /* return old ipl */
1058	/*
1059	 * enable interrupts
1060	 *
1061	 * (we patch this to an sti once a proper setspl routine
1062	 * is installed)
1063	 */
1064setsplhisti:
1065	nop	/* patch this to a sti when a proper setspl routine appears */
1066	ret
1067	SET_SIZE(i_ddi_splhigh)
1068	SET_SIZE(spl6)
1069	SET_SIZE(splhigh)
1070	SET_SIZE(splhi)
1071
1072#endif	/* __i386 */
1073
1074	/* allow all interrupts */
1075	ENTRY(spl0)
1076	SETPRI(0)
1077	SET_SIZE(spl0)
1078
1079#endif	/* __lint */
1080
1081/*
1082 * splr is like splx but will only raise the priority and never drop it
1083 */
1084#if defined(__lint)
1085
1086/* ARGSUSED */
1087int
1088splr(int level)
1089{ return (0); }
1090
1091#else	/* __lint */
1092
1093#if defined(__amd64)
1094
1095	ENTRY(splr)
1096	cli
1097	LOADCPU(%rcx)
1098	GETIPL_NOGS(%eax, %rcx)
1099	cmpl	%eax, %edi		/* if new level > current level */
1100	jg	spl			/* then set ipl to new level */
1101splr_setsti:
1102	nop	/* patch this to a sti when a proper setspl routine appears */
1103	ret				/* else return the current level */
1104	SET_SIZE(splr)
1105
1106#elif defined(__i386)
1107
1108	ENTRY(splr)
1109	cli
1110	LOADCPU(%ecx)
1111	movl	4(%esp), %edx		/* get new spl level */
1112	GETIPL_NOGS(%eax, %ecx)
1113	cmpl 	%eax, %edx		/* if new level > current level */
1114	jg	spl			/* then set ipl to new level */
1115splr_setsti:
1116	nop	/* patch this to a sti when a proper setspl routine appears */
1117	ret				/* else return the current level */
1118	SET_SIZE(splr)
1119
1120#endif	/* __i386 */
1121#endif	/* __lint */
1122
1123
1124
1125/*
1126 * splx - set PIL back to that indicated by the level passed as an argument,
1127 * or to the CPU's base priority, whichever is higher.
1128 * Needs to be fall through to spl to save cycles.
1129 * Algorithm for spl:
1130 *
1131 *      turn off interrupts
1132 *
1133 *	if (CPU->cpu_base_spl > newipl)
1134 *		newipl = CPU->cpu_base_spl;
1135 *      oldipl = CPU->cpu_pridata->c_ipl;
1136 *      CPU->cpu_pridata->c_ipl = newipl;
1137 *
1138 *	/indirectly call function to set spl values (usually setpicmasks)
1139 *      setspl();  // load new masks into pics
1140 *
1141 * Be careful not to set priority lower than CPU->cpu_base_pri,
1142 * even though it seems we're raising the priority, it could be set
1143 * higher at any time by an interrupt routine, so we must block interrupts
1144 * and look at CPU->cpu_base_pri
1145 */
1146#if defined(__lint)
1147
1148/* ARGSUSED */
1149void
1150splx(int level)
1151{}
1152
1153#else	/* __lint */
1154
1155#if defined(__amd64)
1156
1157	ENTRY(splx)
1158	ALTENTRY(i_ddi_splx)
1159	cli				/* disable interrupts */
1160	LOADCPU(%rcx)
1161	/*FALLTHRU*/
1162	.align	4
1163spl:
1164	/*
1165	 * New priority level is in %edi, cpu struct pointer is in %rcx
1166	 */
1167	GETIPL_NOGS(%eax, %rcx)		/* get current ipl */
1168	cmpl   %edi, CPU_BASE_SPL(%rcx) /* if (base spl > new ipl) */
1169	ja     set_to_base_spl		/* then use base_spl */
1170
1171setprilev:
1172	SETIPL_NOGS(%edi, %rcx)		/* set new ipl */
1173	/*
1174	 * If we aren't using cr8 to control ipl then we patch this
1175	 * with a jump to slow_spl
1176	 */
1177	ALTENTRY(spl_patch)
1178	movq	CPU_PRI_DATA(%rcx), %r11 /* get pri data ptr */
1179	movzb	(%r11, %rdi, 1), %rdx	/* get apic mask for this ipl */
1180	movq	%rdx, %cr8		/* set new apic priority */
1181	xorl	%edx, %edx
1182	bsrl	CPU_SOFTINFO(%rcx), %edx /* fls(cpu->cpu_softinfo.st_pending) */
1183	cmpl	%edi, %edx		/* new ipl vs. st_pending */
1184	jle	setsplsti
1185
1186	pushq	%rbp
1187	movq	%rsp, %rbp
1188	/* stack now 16-byte aligned */
1189	pushq	%rax			/* save old spl */
1190	pushq	%rdi			/* save new ipl too */
1191	jmp	fakesoftint
1192
1193setsplsti:
1194	nop	/* patch this to a sti when a proper setspl routine appears */
1195	ret
1196
1197	ALTENTRY(slow_spl)
1198	pushq	%rbp
1199	movq	%rsp, %rbp
1200	/* stack now 16-byte aligned */
1201
1202	pushq	%rax			/* save old spl */
1203	pushq	%rdi			/* save new ipl too */
1204
1205	call	*setspl(%rip)
1206
1207	LOADCPU(%rcx)
1208	movl	CPU_SOFTINFO(%rcx), %eax
1209	orl	%eax, %eax
1210	jz	slow_setsplsti
1211
1212	bsrl	%eax, %edx		/* fls(cpu->cpu_softinfo.st_pending) */
1213	cmpl	0(%rsp), %edx		/* new ipl vs. st_pending */
1214	jg	fakesoftint
1215
1216	ALTENTRY(fakesoftint_return)
1217	/*
1218	 * enable interrupts
1219	 */
1220slow_setsplsti:
1221	nop	/* patch this to a sti when a proper setspl routine appears */
1222	popq	%rdi
1223	popq	%rax			/* return old ipl */
1224	leave
1225	ret
1226	SET_SIZE(fakesoftint_return)
1227
1228set_to_base_spl:
1229	movl	CPU_BASE_SPL(%rcx), %edi
1230	jmp	setprilev
1231	SET_SIZE(spl)
1232	SET_SIZE(i_ddi_splx)
1233	SET_SIZE(splx)
1234
1235#elif defined(__i386)
1236
1237	ENTRY(splx)
1238	ALTENTRY(i_ddi_splx)
1239	cli                             /* disable interrupts */
1240	LOADCPU(%ecx)
1241	movl	4(%esp), %edx		/* get new spl level */
1242	/*FALLTHRU*/
1243
1244	.align	4
1245	ALTENTRY(spl)
1246	/*
1247	 * New priority level is in %edx
1248	 * (doing this early to avoid an AGI in the next instruction)
1249	 */
1250	GETIPL_NOGS(%eax, %ecx)		/* get current ipl */
1251	cmpl	%edx, CPU_BASE_SPL(%ecx) /* if ( base spl > new ipl) */
1252	ja	set_to_base_spl		/* then use base_spl */
1253
1254setprilev:
1255	SETIPL_NOGS(%edx, %ecx)		/* set new ipl */
1256
1257	pushl   %eax                    /* save old ipl */
1258	pushl	%edx			/* pass new ipl */
1259	call	*setspl
1260
1261	LOADCPU(%ecx)
1262	movl	CPU_SOFTINFO(%ecx), %eax
1263	orl	%eax, %eax
1264	jz	setsplsti
1265
1266	/*
1267	 * Before dashing off, check that setsplsti has been patched.
1268	 */
1269	cmpl	$NOP_INSTR, setsplsti
1270	je	setsplsti
1271
1272	bsrl	%eax, %edx
1273	cmpl	0(%esp), %edx
1274	jg	fakesoftint
1275
1276	ALTENTRY(fakesoftint_return)
1277	/*
1278	 * enable interrupts
1279	 */
1280setsplsti:
1281	nop	/* patch this to a sti when a proper setspl routine appears */
1282	popl	%eax
1283	popl    %eax			/ return old ipl
1284	ret
1285	SET_SIZE(fakesoftint_return)
1286
1287set_to_base_spl:
1288	movl	CPU_BASE_SPL(%ecx), %edx
1289	jmp	setprilev
1290	SET_SIZE(spl)
1291	SET_SIZE(i_ddi_splx)
1292	SET_SIZE(splx)
1293
1294#endif	/* __i386 */
1295#endif	/* __lint */
1296
1297#if defined(__lint)
1298
1299void
1300install_spl(void)
1301{}
1302
1303#else	/* __lint */
1304
1305#if defined(__amd64)
1306
1307	ENTRY_NP(install_spl)
1308	movq	%cr0, %rax
1309	movq	%rax, %rdx
1310	movl	$_BITNOT(CR0_WP), %ecx
1311	movslq	%ecx, %rcx
1312	andq	%rcx, %rax		/* we don't want to take a fault */
1313	movq	%rax, %cr0
1314	jmp	1f
13151:	movb	$STI_INSTR, setsplsti(%rip)
1316	movb	$STI_INSTR, slow_setsplsti(%rip)
1317	movb	$STI_INSTR, setsplhisti(%rip)
1318	movb	$STI_INSTR, splr_setsti(%rip)
1319	testl	$1, intpri_use_cr8(%rip)	/* are using %cr8 ? */
1320	jz	2f				/* no, go patch more */
1321	movq	%rdx, %cr0
1322	ret
13232:
1324	/*
1325	 * Patch spl functions to use slow spl method
1326	 */
1327	leaq	setsplhi_patch(%rip), %rdi	/* get patch point addr */
1328	leaq	slow_setsplhi(%rip), %rax	/* jmp target */
1329	subq	%rdi, %rax			/* calculate jmp distance */
1330	subq	$2, %rax			/* minus size of jmp instr */
1331	shlq	$8, %rax			/* construct jmp instr */
1332	addq	$JMP_INSTR, %rax
1333	movw	%ax, setsplhi_patch(%rip)	/* patch in the jmp */
1334	leaq	spl_patch(%rip), %rdi		/* get patch point addr */
1335	leaq	slow_spl(%rip), %rax		/* jmp target */
1336	subq	%rdi, %rax			/* calculate jmp distance */
1337	subq	$2, %rax			/* minus size of jmp instr */
1338	shlq	$8, %rax			/* construct jmp instr */
1339	addq	$JMP_INSTR, %rax
1340	movw	%ax, spl_patch(%rip)		/* patch in the jmp */
1341	/*
1342	 * Ensure %cr8 is zero since we aren't using it
1343	 */
1344	xorl	%eax, %eax
1345	movq	%rax, %cr8
1346	movq	%rdx, %cr0
1347	ret
1348	SET_SIZE(install_spl)
1349
1350#elif defined(__i386)
1351
1352	ENTRY_NP(install_spl)
1353	movl	%cr0, %eax
1354	movl	%eax, %edx
1355	andl	$_BITNOT(CR0_WP), %eax	/* we don't want to take a fault */
1356	movl	%eax, %cr0
1357	jmp	1f
13581:	movb	$STI_INSTR, setsplsti
1359	movb	$STI_INSTR, setsplhisti
1360	movb	$STI_INSTR, splr_setsti
1361	movl	%edx, %cr0
1362	ret
1363	SET_SIZE(install_spl)
1364
1365#endif	/* __i386 */
1366#endif	/* __lint */
1367
1368
1369/*
1370 * Get current processor interrupt level
1371 */
1372
1373#if defined(__lint)
1374
1375int
1376getpil(void)
1377{ return (0); }
1378
1379#else	/* __lint */
1380
1381#if defined(__amd64)
1382
1383	ENTRY(getpil)
1384	GETIPL(%eax)			/* priority level into %eax */
1385	ret
1386	SET_SIZE(getpil)
1387
1388#elif defined(__i386)
1389
1390	ENTRY(getpil)
1391	GETIPL(%eax)			/* priority level into %eax */
1392	ret
1393	SET_SIZE(getpil)
1394
1395#endif	/* __i386 */
1396#endif	/* __lint */
1397
1398#if defined(__i386)
1399
1400/*
1401 * Read and write the %gs register
1402 */
1403
1404#if defined(__lint)
1405
1406/*ARGSUSED*/
1407uint16_t
1408getgs(void)
1409{ return (0); }
1410
1411/*ARGSUSED*/
1412void
1413setgs(uint16_t sel)
1414{}
1415
1416#else	/* __lint */
1417
1418	ENTRY(getgs)
1419	clr	%eax
1420	movw	%gs, %ax
1421	ret
1422	SET_SIZE(getgs)
1423
1424	ENTRY(setgs)
1425	movw	4(%esp), %gs
1426	ret
1427	SET_SIZE(setgs)
1428
1429#endif	/* __lint */
1430#endif	/* __i386 */
1431
1432#if defined(__lint)
1433
1434void
1435pc_reset(void)
1436{}
1437
1438#else	/* __lint */
1439
1440	ENTRY(wait_500ms)
1441	push	%ebx
1442	movl	$50000, %ebx
14431:
1444	call	tenmicrosec
1445	decl	%ebx
1446	jnz	1b
1447	pop	%ebx
1448	ret
1449	SET_SIZE(wait_500ms)
1450
1451#define	RESET_METHOD_KBC	1
1452#define	RESET_METHOD_PORT92	2
1453#define RESET_METHOD_PCI	4
1454
1455	DGDEF3(pc_reset_methods, 4, 8)
1456	.long RESET_METHOD_KBC|RESET_METHOD_PORT92|RESET_METHOD_PCI;
1457
1458	ENTRY(pc_reset)
1459
1460#if defined(__i386)
1461	testl	$RESET_METHOD_KBC, pc_reset_methods
1462#elif defined(__amd64)
1463	testl	$RESET_METHOD_KBC, pc_reset_methods(%rip)
1464#endif
1465	jz	1f
1466
1467	/
1468	/ Try the classic keyboard controller-triggered reset.
1469	/
1470	movw	$0x64, %dx
1471	movb	$0xfe, %al
1472	outb	(%dx)
1473
1474	/ Wait up to 500 milliseconds here for the keyboard controller
1475	/ to pull the reset line.  On some systems where the keyboard
1476	/ controller is slow to pull the reset line, the next reset method
1477	/ may be executed (which may be bad if those systems hang when the
1478	/ next reset method is used, e.g. Ferrari 3400 (doesn't like port 92),
1479	/ and Ferrari 4000 (doesn't like the cf9 reset method))
1480
1481	call	wait_500ms
1482
14831:
1484#if defined(__i386)
1485	testl	$RESET_METHOD_PORT92, pc_reset_methods
1486#elif defined(__amd64)
1487	testl	$RESET_METHOD_PORT92, pc_reset_methods(%rip)
1488#endif
1489	jz	3f
1490
1491	/
1492	/ Try port 0x92 fast reset
1493	/
1494	movw	$0x92, %dx
1495	inb	(%dx)
1496	cmpb	$0xff, %al	/ If port's not there, we should get back 0xFF
1497	je	1f
1498	testb	$1, %al		/ If bit 0
1499	jz	2f		/ is clear, jump to perform the reset
1500	andb	$0xfe, %al	/ otherwise,
1501	outb	(%dx)		/ clear bit 0 first, then
15022:
1503	orb	$1, %al		/ Set bit 0
1504	outb	(%dx)		/ and reset the system
15051:
1506
1507	call	wait_500ms
1508
15093:
1510#if defined(__i386)
1511	testl	$RESET_METHOD_PCI, pc_reset_methods
1512#elif defined(__amd64)
1513	testl	$RESET_METHOD_PCI, pc_reset_methods(%rip)
1514#endif
1515	jz	4f
1516
1517	/ Try the PCI (soft) reset vector (should work on all modern systems,
1518	/ but has been shown to cause problems on 450NX systems, and some newer
1519	/ systems (e.g. ATI IXP400-equipped systems))
1520	/ When resetting via this method, 2 writes are required.  The first
1521	/ targets bit 1 (0=hard reset without power cycle, 1=hard reset with
1522	/ power cycle).
1523	/ The reset occurs on the second write, during bit 2's transition from
1524	/ 0->1.
1525	movw	$0xcf9, %dx
1526	movb	$0x2, %al	/ Reset mode = hard, no power cycle
1527	outb	(%dx)
1528	movb	$0x6, %al
1529	outb	(%dx)
1530
1531	call	wait_500ms
1532
15334:
1534	/
1535	/ port 0xcf9 failed also.  Last-ditch effort is to
1536	/ triple-fault the CPU.
1537	/
1538#if defined(__amd64)
1539	pushq	$0x0
1540	pushq	$0x0		/ IDT base of 0, limit of 0 + 2 unused bytes
1541	lidt	(%rsp)
1542#elif defined(__i386)
1543	pushl	$0x0
1544	pushl	$0x0		/ IDT base of 0, limit of 0 + 2 unused bytes
1545	lidt	(%esp)
1546#endif
1547	int	$0x0		/ Trigger interrupt, generate triple-fault
1548
1549	cli
1550	hlt			/ Wait forever
1551	/*NOTREACHED*/
1552	SET_SIZE(pc_reset)
1553
1554#endif	/* __lint */
1555
1556/*
1557 * C callable in and out routines
1558 */
1559
1560#if defined(__lint)
1561
1562/* ARGSUSED */
1563void
1564outl(int port_address, uint32_t val)
1565{}
1566
1567#else	/* __lint */
1568
1569#if defined(__amd64)
1570
1571	ENTRY(outl)
1572	movw	%di, %dx
1573	movl	%esi, %eax
1574	outl	(%dx)
1575	ret
1576	SET_SIZE(outl)
1577
1578#elif defined(__i386)
1579
1580	.set	PORT, 4
1581	.set	VAL, 8
1582
1583	ENTRY(outl)
1584	movw	PORT(%esp), %dx
1585	movl	VAL(%esp), %eax
1586	outl	(%dx)
1587	ret
1588	SET_SIZE(outl)
1589
1590#endif	/* __i386 */
1591#endif	/* __lint */
1592
1593#if defined(__lint)
1594
1595/* ARGSUSED */
1596void
1597outw(int port_address, uint16_t val)
1598{}
1599
1600#else	/* __lint */
1601
1602#if defined(__amd64)
1603
1604	ENTRY(outw)
1605	movw	%di, %dx
1606	movw	%si, %ax
1607	D16 outl (%dx)		/* XX64 why not outw? */
1608	ret
1609	SET_SIZE(outw)
1610
1611#elif defined(__i386)
1612
1613	ENTRY(outw)
1614	movw	PORT(%esp), %dx
1615	movw	VAL(%esp), %ax
1616	D16 outl (%dx)
1617	ret
1618	SET_SIZE(outw)
1619
1620#endif	/* __i386 */
1621#endif	/* __lint */
1622
1623#if defined(__lint)
1624
1625/* ARGSUSED */
1626void
1627outb(int port_address, uint8_t val)
1628{}
1629
1630#else	/* __lint */
1631
1632#if defined(__amd64)
1633
1634	ENTRY(outb)
1635	movw	%di, %dx
1636	movb	%sil, %al
1637	outb	(%dx)
1638	ret
1639	SET_SIZE(outb)
1640
1641#elif defined(__i386)
1642
1643	ENTRY(outb)
1644	movw	PORT(%esp), %dx
1645	movb	VAL(%esp), %al
1646	outb	(%dx)
1647	ret
1648	SET_SIZE(outb)
1649
1650#endif	/* __i386 */
1651#endif	/* __lint */
1652
1653#if defined(__lint)
1654
1655/* ARGSUSED */
1656uint32_t
1657inl(int port_address)
1658{ return (0); }
1659
1660#else	/* __lint */
1661
1662#if defined(__amd64)
1663
1664	ENTRY(inl)
1665	xorl	%eax, %eax
1666	movw	%di, %dx
1667	inl	(%dx)
1668	ret
1669	SET_SIZE(inl)
1670
1671#elif defined(__i386)
1672
1673	ENTRY(inl)
1674	movw	PORT(%esp), %dx
1675	inl	(%dx)
1676	ret
1677	SET_SIZE(inl)
1678
1679#endif	/* __i386 */
1680#endif	/* __lint */
1681
1682#if defined(__lint)
1683
1684/* ARGSUSED */
1685uint16_t
1686inw(int port_address)
1687{ return (0); }
1688
1689#else	/* __lint */
1690
1691#if defined(__amd64)
1692
1693	ENTRY(inw)
1694	xorl	%eax, %eax
1695	movw	%di, %dx
1696	D16 inl	(%dx)
1697	ret
1698	SET_SIZE(inw)
1699
1700#elif defined(__i386)
1701
1702	ENTRY(inw)
1703	subl	%eax, %eax
1704	movw	PORT(%esp), %dx
1705	D16 inl	(%dx)
1706	ret
1707	SET_SIZE(inw)
1708
1709#endif	/* __i386 */
1710#endif	/* __lint */
1711
1712
1713#if defined(__lint)
1714
1715/* ARGSUSED */
1716uint8_t
1717inb(int port_address)
1718{ return (0); }
1719
1720#else	/* __lint */
1721
1722#if defined(__amd64)
1723
1724	ENTRY(inb)
1725	xorl	%eax, %eax
1726	movw	%di, %dx
1727	inb	(%dx)
1728	ret
1729	SET_SIZE(inb)
1730
1731#elif defined(__i386)
1732
1733	ENTRY(inb)
1734	subl    %eax, %eax
1735	movw	PORT(%esp), %dx
1736	inb	(%dx)
1737	ret
1738	SET_SIZE(inb)
1739
1740#endif	/* __i386 */
1741#endif	/* __lint */
1742
1743
1744#if defined(__lint)
1745
1746/* ARGSUSED */
1747void
1748repoutsw(int port, uint16_t *addr, int cnt)
1749{}
1750
1751#else	/* __lint */
1752
1753#if defined(__amd64)
1754
1755	ENTRY(repoutsw)
1756	movl	%edx, %ecx
1757	movw	%di, %dx
1758	rep
1759	  D16 outsl
1760	ret
1761	SET_SIZE(repoutsw)
1762
1763#elif defined(__i386)
1764
1765	/*
1766	 * The arguments and saved registers are on the stack in the
1767	 *  following order:
1768	 *      |  cnt  |  +16
1769	 *      | *addr |  +12
1770	 *      | port  |  +8
1771	 *      |  eip  |  +4
1772	 *      |  esi  |  <-- %esp
1773	 * If additional values are pushed onto the stack, make sure
1774	 * to adjust the following constants accordingly.
1775	 */
1776	.set	PORT, 8
1777	.set	ADDR, 12
1778	.set	COUNT, 16
1779
1780	ENTRY(repoutsw)
1781	pushl	%esi
1782	movl	PORT(%esp), %edx
1783	movl	ADDR(%esp), %esi
1784	movl	COUNT(%esp), %ecx
1785	rep
1786	  D16 outsl
1787	popl	%esi
1788	ret
1789	SET_SIZE(repoutsw)
1790
1791#endif	/* __i386 */
1792#endif	/* __lint */
1793
1794
1795#if defined(__lint)
1796
1797/* ARGSUSED */
1798void
1799repinsw(int port_addr, uint16_t *addr, int cnt)
1800{}
1801
1802#else	/* __lint */
1803
1804#if defined(__amd64)
1805
1806	ENTRY(repinsw)
1807	movl	%edx, %ecx
1808	movw	%di, %dx
1809	rep
1810	  D16 insl
1811	ret
1812	SET_SIZE(repinsw)
1813
1814#elif defined(__i386)
1815
1816	ENTRY(repinsw)
1817	pushl	%edi
1818	movl	PORT(%esp), %edx
1819	movl	ADDR(%esp), %edi
1820	movl	COUNT(%esp), %ecx
1821	rep
1822	  D16 insl
1823	popl	%edi
1824	ret
1825	SET_SIZE(repinsw)
1826
1827#endif	/* __i386 */
1828#endif	/* __lint */
1829
1830
1831#if defined(__lint)
1832
1833/* ARGSUSED */
1834void
1835repinsb(int port, uint8_t *addr, int count)
1836{}
1837
1838#else	/* __lint */
1839
1840#if defined(__amd64)
1841
1842	ENTRY(repinsb)
1843	movl	%edx, %ecx
1844	movw	%di, %dx
1845	movq	%rsi, %rdi
1846	rep
1847	  insb
1848	ret
1849	SET_SIZE(repinsb)
1850
1851#elif defined(__i386)
1852
1853	/*
1854	 * The arguments and saved registers are on the stack in the
1855	 *  following order:
1856	 *      |  cnt  |  +16
1857	 *      | *addr |  +12
1858	 *      | port  |  +8
1859	 *      |  eip  |  +4
1860	 *      |  esi  |  <-- %esp
1861	 * If additional values are pushed onto the stack, make sure
1862	 * to adjust the following constants accordingly.
1863	 */
1864	.set	IO_PORT, 8
1865	.set	IO_ADDR, 12
1866	.set	IO_COUNT, 16
1867
1868	ENTRY(repinsb)
1869	pushl	%edi
1870	movl	IO_ADDR(%esp), %edi
1871	movl	IO_COUNT(%esp), %ecx
1872	movl	IO_PORT(%esp), %edx
1873	rep
1874	  insb
1875	popl	%edi
1876	ret
1877	SET_SIZE(repinsb)
1878
1879#endif	/* __i386 */
1880#endif	/* __lint */
1881
1882
1883/*
1884 * Input a stream of 32-bit words.
1885 * NOTE: count is a DWORD count.
1886 */
1887#if defined(__lint)
1888
1889/* ARGSUSED */
1890void
1891repinsd(int port, uint32_t *addr, int count)
1892{}
1893
1894#else	/* __lint */
1895
1896#if defined(__amd64)
1897
1898	ENTRY(repinsd)
1899	movl	%edx, %ecx
1900	movw	%di, %dx
1901	movq	%rsi, %rdi
1902	rep
1903	  insl
1904	ret
1905	SET_SIZE(repinsd)
1906
1907#elif defined(__i386)
1908
1909	ENTRY(repinsd)
1910	pushl	%edi
1911	movl	IO_ADDR(%esp), %edi
1912	movl	IO_COUNT(%esp), %ecx
1913	movl	IO_PORT(%esp), %edx
1914	rep
1915	  insl
1916	popl	%edi
1917	ret
1918	SET_SIZE(repinsd)
1919
1920#endif	/* __i386 */
1921#endif	/* __lint */
1922
1923/*
1924 * Output a stream of bytes
1925 * NOTE: count is a byte count
1926 */
1927#if defined(__lint)
1928
1929/* ARGSUSED */
1930void
1931repoutsb(int port, uint8_t *addr, int count)
1932{}
1933
1934#else	/* __lint */
1935
1936#if defined(__amd64)
1937
1938	ENTRY(repoutsb)
1939	movl	%edx, %ecx
1940	movw	%di, %dx
1941	rep
1942	  outsb
1943	ret
1944	SET_SIZE(repoutsb)
1945
1946#elif defined(__i386)
1947
1948	ENTRY(repoutsb)
1949	pushl	%esi
1950	movl	IO_ADDR(%esp), %esi
1951	movl	IO_COUNT(%esp), %ecx
1952	movl	IO_PORT(%esp), %edx
1953	rep
1954	  outsb
1955	popl	%esi
1956	ret
1957	SET_SIZE(repoutsb)
1958
1959#endif	/* __i386 */
1960#endif	/* __lint */
1961
1962/*
1963 * Output a stream of 32-bit words
1964 * NOTE: count is a DWORD count
1965 */
1966#if defined(__lint)
1967
1968/* ARGSUSED */
1969void
1970repoutsd(int port, uint32_t *addr, int count)
1971{}
1972
1973#else	/* __lint */
1974
1975#if defined(__amd64)
1976
1977	ENTRY(repoutsd)
1978	movl	%edx, %ecx
1979	movw	%di, %dx
1980	rep
1981	  outsl
1982	ret
1983	SET_SIZE(repoutsd)
1984
1985#elif defined(__i386)
1986
1987	ENTRY(repoutsd)
1988	pushl	%esi
1989	movl	IO_ADDR(%esp), %esi
1990	movl	IO_COUNT(%esp), %ecx
1991	movl	IO_PORT(%esp), %edx
1992	rep
1993	  outsl
1994	popl	%esi
1995	ret
1996	SET_SIZE(repoutsd)
1997
1998#endif	/* __i386 */
1999#endif	/* __lint */
2000
2001/*
2002 * void int3(void)
2003 * void int18(void)
2004 * void int20(void)
2005 */
2006
2007#if defined(__lint)
2008
2009void
2010int3(void)
2011{}
2012
2013void
2014int18(void)
2015{}
2016
2017void
2018int20(void)
2019{}
2020
2021#else	/* __lint */
2022
2023	ENTRY(int3)
2024	int	$T_BPTFLT
2025	ret
2026	SET_SIZE(int3)
2027
2028	ENTRY(int18)
2029	int	$T_MCE
2030	ret
2031	SET_SIZE(int18)
2032
2033	ENTRY(int20)
2034	movl	boothowto, %eax
2035	andl	$RB_DEBUG, %eax
2036	jz	1f
2037
2038	int	$T_DBGENTR
20391:
2040	rep;	ret	/* use 2 byte return instruction when branch target */
2041			/* AMD Software Optimization Guide - Section 6.2 */
2042	SET_SIZE(int20)
2043
2044#endif	/* __lint */
2045
2046#if defined(__lint)
2047
2048/* ARGSUSED */
2049int
2050scanc(size_t size, uchar_t *cp, uchar_t *table, uchar_t mask)
2051{ return (0); }
2052
2053#else	/* __lint */
2054
2055#if defined(__amd64)
2056
2057	ENTRY(scanc)
2058					/* rdi == size */
2059					/* rsi == cp */
2060					/* rdx == table */
2061					/* rcx == mask */
2062	addq	%rsi, %rdi		/* end = &cp[size] */
2063.scanloop:
2064	cmpq	%rdi, %rsi		/* while (cp < end */
2065	jnb	.scandone
2066	movzbq	(%rsi), %r8		/* %r8 = *cp */
2067	incq	%rsi			/* cp++ */
2068	testb	%cl, (%r8, %rdx)
2069	jz	.scanloop		/*  && (table[*cp] & mask) == 0) */
2070	decq	%rsi			/* (fix post-increment) */
2071.scandone:
2072	movl	%edi, %eax
2073	subl	%esi, %eax		/* return (end - cp) */
2074	ret
2075	SET_SIZE(scanc)
2076
2077#elif defined(__i386)
2078
2079	ENTRY(scanc)
2080	pushl	%edi
2081	pushl	%esi
2082	movb	24(%esp), %cl		/* mask = %cl */
2083	movl	16(%esp), %esi		/* cp = %esi */
2084	movl	20(%esp), %edx		/* table = %edx */
2085	movl	%esi, %edi
2086	addl	12(%esp), %edi		/* end = &cp[size]; */
2087.scanloop:
2088	cmpl	%edi, %esi		/* while (cp < end */
2089	jnb	.scandone
2090	movzbl	(%esi),  %eax		/* %al = *cp */
2091	incl	%esi			/* cp++ */
2092	movb	(%edx,  %eax), %al	/* %al = table[*cp] */
2093	testb	%al, %cl
2094	jz	.scanloop		/*   && (table[*cp] & mask) == 0) */
2095	dec	%esi			/* post-incremented */
2096.scandone:
2097	movl	%edi, %eax
2098	subl	%esi, %eax		/* return (end - cp) */
2099	popl	%esi
2100	popl	%edi
2101	ret
2102	SET_SIZE(scanc)
2103
2104#endif	/* __i386 */
2105#endif	/* __lint */
2106
2107/*
2108 * Replacement functions for ones that are normally inlined.
2109 * In addition to the copy in i86.il, they are defined here just in case.
2110 */
2111
2112#if defined(__lint)
2113
2114int
2115intr_clear(void)
2116{ return 0; }
2117
2118int
2119clear_int_flag(void)
2120{ return 0; }
2121
2122#else	/* __lint */
2123
2124#if defined(__amd64)
2125
2126	ENTRY(intr_clear)
2127	ENTRY(clear_int_flag)
2128	pushfq
2129	cli
2130	popq	%rax
2131	ret
2132	SET_SIZE(clear_int_flag)
2133	SET_SIZE(intr_clear)
2134
2135#elif defined(__i386)
2136
2137	ENTRY(intr_clear)
2138	ENTRY(clear_int_flag)
2139	pushfl
2140	cli
2141	popl	%eax
2142	ret
2143	SET_SIZE(clear_int_flag)
2144	SET_SIZE(intr_clear)
2145
2146#endif	/* __i386 */
2147#endif	/* __lint */
2148
2149#if defined(__lint)
2150
2151struct cpu *
2152curcpup(void)
2153{ return 0; }
2154
2155#else	/* __lint */
2156
2157#if defined(__amd64)
2158
2159	ENTRY(curcpup)
2160	movq	%gs:CPU_SELF, %rax
2161	ret
2162	SET_SIZE(curcpup)
2163
2164#elif defined(__i386)
2165
2166	ENTRY(curcpup)
2167	movl	%gs:CPU_SELF, %eax
2168	ret
2169	SET_SIZE(curcpup)
2170
2171#endif	/* __i386 */
2172#endif	/* __lint */
2173
2174#if defined(__lint)
2175
2176/* ARGSUSED */
2177uint32_t
2178htonl(uint32_t i)
2179{ return (0); }
2180
2181/* ARGSUSED */
2182uint32_t
2183ntohl(uint32_t i)
2184{ return (0); }
2185
2186#else	/* __lint */
2187
2188#if defined(__amd64)
2189
2190	/* XX64 there must be shorter sequences for this */
2191	ENTRY(htonl)
2192	ALTENTRY(ntohl)
2193	movl	%edi, %eax
2194	bswap	%eax
2195	ret
2196	SET_SIZE(ntohl)
2197	SET_SIZE(htonl)
2198
2199#elif defined(__i386)
2200
2201	ENTRY(htonl)
2202	ALTENTRY(ntohl)
2203	movl	4(%esp), %eax
2204	bswap	%eax
2205	ret
2206	SET_SIZE(ntohl)
2207	SET_SIZE(htonl)
2208
2209#endif	/* __i386 */
2210#endif	/* __lint */
2211
2212#if defined(__lint)
2213
2214/* ARGSUSED */
2215uint16_t
2216htons(uint16_t i)
2217{ return (0); }
2218
2219/* ARGSUSED */
2220uint16_t
2221ntohs(uint16_t i)
2222{ return (0); }
2223
2224
2225#else	/* __lint */
2226
2227#if defined(__amd64)
2228
2229	/* XX64 there must be better sequences for this */
2230	ENTRY(htons)
2231	ALTENTRY(ntohs)
2232	movl	%edi, %eax
2233	bswap	%eax
2234	shrl	$16, %eax
2235	ret
2236	SET_SIZE(ntohs)
2237	SET_SIZE(htons)
2238
2239#elif defined(__i386)
2240
2241	ENTRY(htons)
2242	ALTENTRY(ntohs)
2243	movl	4(%esp), %eax
2244	bswap	%eax
2245	shrl	$16, %eax
2246	ret
2247	SET_SIZE(ntohs)
2248	SET_SIZE(htons)
2249
2250#endif	/* __i386 */
2251#endif	/* __lint */
2252
2253
2254#if defined(__lint)
2255
2256/* ARGSUSED */
2257void
2258intr_restore(uint_t i)
2259{ return; }
2260
2261/* ARGSUSED */
2262void
2263restore_int_flag(int i)
2264{ return; }
2265
2266#else	/* __lint */
2267
2268#if defined(__amd64)
2269
2270	ENTRY(intr_restore)
2271	ENTRY(restore_int_flag)
2272	pushq	%rdi
2273	popfq
2274	ret
2275	SET_SIZE(restore_int_flag)
2276	SET_SIZE(intr_restore)
2277
2278#elif defined(__i386)
2279
2280	ENTRY(intr_restore)
2281	ENTRY(restore_int_flag)
2282	pushl	4(%esp)
2283	popfl
2284	ret
2285	SET_SIZE(restore_int_flag)
2286	SET_SIZE(intr_restore)
2287
2288#endif	/* __i386 */
2289#endif	/* __lint */
2290
2291#if defined(__lint)
2292
2293void
2294sti(void)
2295{}
2296
2297#else	/* __lint */
2298
2299	ENTRY(sti)
2300	sti
2301	ret
2302	SET_SIZE(sti)
2303
2304#endif	/* __lint */
2305
2306#if defined(__lint)
2307
2308dtrace_icookie_t
2309dtrace_interrupt_disable(void)
2310{ return (0); }
2311
2312#else   /* __lint */
2313
2314#if defined(__amd64)
2315
2316	ENTRY(dtrace_interrupt_disable)
2317	pushfq
2318	popq	%rax
2319	cli
2320	ret
2321	SET_SIZE(dtrace_interrupt_disable)
2322
2323#elif defined(__i386)
2324
2325	ENTRY(dtrace_interrupt_disable)
2326	pushfl
2327	popl	%eax
2328	cli
2329	ret
2330	SET_SIZE(dtrace_interrupt_disable)
2331
2332#endif	/* __i386 */
2333#endif	/* __lint */
2334
2335#if defined(__lint)
2336
2337/*ARGSUSED*/
2338void
2339dtrace_interrupt_enable(dtrace_icookie_t cookie)
2340{}
2341
2342#else	/* __lint */
2343
2344#if defined(__amd64)
2345
2346	ENTRY(dtrace_interrupt_enable)
2347	pushq	%rdi
2348	popfq
2349	ret
2350	SET_SIZE(dtrace_interrupt_enable)
2351
2352#elif defined(__i386)
2353
2354	ENTRY(dtrace_interrupt_enable)
2355	movl	4(%esp), %eax
2356	pushl	%eax
2357	popfl
2358	ret
2359	SET_SIZE(dtrace_interrupt_enable)
2360
2361#endif	/* __i386 */
2362#endif	/* __lint */
2363
2364
2365#if defined(lint)
2366
2367void
2368dtrace_membar_producer(void)
2369{}
2370
2371void
2372dtrace_membar_consumer(void)
2373{}
2374
2375#else	/* __lint */
2376
2377	ENTRY(dtrace_membar_producer)
2378	rep;	ret	/* use 2 byte return instruction when branch target */
2379			/* AMD Software Optimization Guide - Section 6.2 */
2380	SET_SIZE(dtrace_membar_producer)
2381
2382	ENTRY(dtrace_membar_consumer)
2383	rep;	ret	/* use 2 byte return instruction when branch target */
2384			/* AMD Software Optimization Guide - Section 6.2 */
2385	SET_SIZE(dtrace_membar_consumer)
2386
2387#endif	/* __lint */
2388
2389#if defined(__lint)
2390
2391kthread_id_t
2392threadp(void)
2393{ return ((kthread_id_t)0); }
2394
2395#else	/* __lint */
2396
2397#if defined(__amd64)
2398
2399	ENTRY(threadp)
2400	movq	%gs:CPU_THREAD, %rax
2401	ret
2402	SET_SIZE(threadp)
2403
2404#elif defined(__i386)
2405
2406	ENTRY(threadp)
2407	movl	%gs:CPU_THREAD, %eax
2408	ret
2409	SET_SIZE(threadp)
2410
2411#endif	/* __i386 */
2412#endif	/* __lint */
2413
2414/*
2415 *   Checksum routine for Internet Protocol Headers
2416 */
2417
2418#if defined(__lint)
2419
2420/* ARGSUSED */
2421unsigned int
2422ip_ocsum(
2423	ushort_t *address,	/* ptr to 1st message buffer */
2424	int halfword_count,	/* length of data */
2425	unsigned int sum)	/* partial checksum */
2426{
2427	int		i;
2428	unsigned int	psum = 0;	/* partial sum */
2429
2430	for (i = 0; i < halfword_count; i++, address++) {
2431		psum += *address;
2432	}
2433
2434	while ((psum >> 16) != 0) {
2435		psum = (psum & 0xffff) + (psum >> 16);
2436	}
2437
2438	psum += sum;
2439
2440	while ((psum >> 16) != 0) {
2441		psum = (psum & 0xffff) + (psum >> 16);
2442	}
2443
2444	return (psum);
2445}
2446
2447#else	/* __lint */
2448
2449#if defined(__amd64)
2450
2451	ENTRY(ip_ocsum)
2452	pushq	%rbp
2453	movq	%rsp, %rbp
2454#ifdef DEBUG
2455	movq	kernelbase(%rip), %rax
2456	cmpq	%rax, %rdi
2457	jnb	1f
2458	xorl	%eax, %eax
2459	movq	%rdi, %rsi
2460	leaq	.ip_ocsum_panic_msg(%rip), %rdi
2461	call	panic
2462	/*NOTREACHED*/
2463.ip_ocsum_panic_msg:
2464	.string	"ip_ocsum: address 0x%p below kernelbase\n"
24651:
2466#endif
2467	movl	%esi, %ecx	/* halfword_count */
2468	movq	%rdi, %rsi	/* address */
2469				/* partial sum in %edx */
2470	xorl	%eax, %eax
2471	testl	%ecx, %ecx
2472	jz	.ip_ocsum_done
2473	testq	$3, %rsi
2474	jnz	.ip_csum_notaligned
2475.ip_csum_aligned:	/* XX64 opportunities for 8-byte operations? */
2476.next_iter:
2477	/* XX64 opportunities for prefetch? */
2478	/* XX64 compute csum with 64 bit quantities? */
2479	subl	$32, %ecx
2480	jl	.less_than_32
2481
2482	addl	0(%rsi), %edx
2483.only60:
2484	adcl	4(%rsi), %eax
2485.only56:
2486	adcl	8(%rsi), %edx
2487.only52:
2488	adcl	12(%rsi), %eax
2489.only48:
2490	adcl	16(%rsi), %edx
2491.only44:
2492	adcl	20(%rsi), %eax
2493.only40:
2494	adcl	24(%rsi), %edx
2495.only36:
2496	adcl	28(%rsi), %eax
2497.only32:
2498	adcl	32(%rsi), %edx
2499.only28:
2500	adcl	36(%rsi), %eax
2501.only24:
2502	adcl	40(%rsi), %edx
2503.only20:
2504	adcl	44(%rsi), %eax
2505.only16:
2506	adcl	48(%rsi), %edx
2507.only12:
2508	adcl	52(%rsi), %eax
2509.only8:
2510	adcl	56(%rsi), %edx
2511.only4:
2512	adcl	60(%rsi), %eax	/* could be adding -1 and -1 with a carry */
2513.only0:
2514	adcl	$0, %eax	/* could be adding -1 in eax with a carry */
2515	adcl	$0, %eax
2516
2517	addq	$64, %rsi
2518	testl	%ecx, %ecx
2519	jnz	.next_iter
2520
2521.ip_ocsum_done:
2522	addl	%eax, %edx
2523	adcl	$0, %edx
2524	movl	%edx, %eax	/* form a 16 bit checksum by */
2525	shrl	$16, %eax	/* adding two halves of 32 bit checksum */
2526	addw	%dx, %ax
2527	adcw	$0, %ax
2528	andl	$0xffff, %eax
2529	leave
2530	ret
2531
2532.ip_csum_notaligned:
2533	xorl	%edi, %edi
2534	movw	(%rsi), %di
2535	addl	%edi, %edx
2536	adcl	$0, %edx
2537	addq	$2, %rsi
2538	decl	%ecx
2539	jmp	.ip_csum_aligned
2540
2541.less_than_32:
2542	addl	$32, %ecx
2543	testl	$1, %ecx
2544	jz	.size_aligned
2545	andl	$0xfe, %ecx
2546	movzwl	(%rsi, %rcx, 2), %edi
2547	addl	%edi, %edx
2548	adcl	$0, %edx
2549.size_aligned:
2550	movl	%ecx, %edi
2551	shrl	$1, %ecx
2552	shl	$1, %edi
2553	subq	$64, %rdi
2554	addq	%rdi, %rsi
2555	leaq    .ip_ocsum_jmptbl(%rip), %rdi
2556	leaq	(%rdi, %rcx, 8), %rdi
2557	xorl	%ecx, %ecx
2558	clc
2559	jmp 	*(%rdi)
2560
2561	.align	8
2562.ip_ocsum_jmptbl:
2563	.quad	.only0, .only4, .only8, .only12, .only16, .only20
2564	.quad	.only24, .only28, .only32, .only36, .only40, .only44
2565	.quad	.only48, .only52, .only56, .only60
2566	SET_SIZE(ip_ocsum)
2567
2568#elif defined(__i386)
2569
2570	ENTRY(ip_ocsum)
2571	pushl	%ebp
2572	movl	%esp, %ebp
2573	pushl	%ebx
2574	pushl	%esi
2575	pushl	%edi
2576	movl	12(%ebp), %ecx	/* count of half words */
2577	movl	16(%ebp), %edx	/* partial checksum */
2578	movl	8(%ebp), %esi
2579	xorl	%eax, %eax
2580	testl	%ecx, %ecx
2581	jz	.ip_ocsum_done
2582
2583	testl	$3, %esi
2584	jnz	.ip_csum_notaligned
2585.ip_csum_aligned:
2586.next_iter:
2587	subl	$32, %ecx
2588	jl	.less_than_32
2589
2590	addl	0(%esi), %edx
2591.only60:
2592	adcl	4(%esi), %eax
2593.only56:
2594	adcl	8(%esi), %edx
2595.only52:
2596	adcl	12(%esi), %eax
2597.only48:
2598	adcl	16(%esi), %edx
2599.only44:
2600	adcl	20(%esi), %eax
2601.only40:
2602	adcl	24(%esi), %edx
2603.only36:
2604	adcl	28(%esi), %eax
2605.only32:
2606	adcl	32(%esi), %edx
2607.only28:
2608	adcl	36(%esi), %eax
2609.only24:
2610	adcl	40(%esi), %edx
2611.only20:
2612	adcl	44(%esi), %eax
2613.only16:
2614	adcl	48(%esi), %edx
2615.only12:
2616	adcl	52(%esi), %eax
2617.only8:
2618	adcl	56(%esi), %edx
2619.only4:
2620	adcl	60(%esi), %eax	/* We could be adding -1 and -1 with a carry */
2621.only0:
2622	adcl	$0, %eax	/* we could be adding -1 in eax with a carry */
2623	adcl	$0, %eax
2624
2625	addl	$64, %esi
2626	andl	%ecx, %ecx
2627	jnz	.next_iter
2628
2629.ip_ocsum_done:
2630	addl	%eax, %edx
2631	adcl	$0, %edx
2632	movl	%edx, %eax	/* form a 16 bit checksum by */
2633	shrl	$16, %eax	/* adding two halves of 32 bit checksum */
2634	addw	%dx, %ax
2635	adcw	$0, %ax
2636	andl	$0xffff, %eax
2637	popl	%edi		/* restore registers */
2638	popl	%esi
2639	popl	%ebx
2640	leave
2641	ret
2642
2643.ip_csum_notaligned:
2644	xorl	%edi, %edi
2645	movw	(%esi), %di
2646	addl	%edi, %edx
2647	adcl	$0, %edx
2648	addl	$2, %esi
2649	decl	%ecx
2650	jmp	.ip_csum_aligned
2651
2652.less_than_32:
2653	addl	$32, %ecx
2654	testl	$1, %ecx
2655	jz	.size_aligned
2656	andl	$0xfe, %ecx
2657	movzwl	(%esi, %ecx, 2), %edi
2658	addl	%edi, %edx
2659	adcl	$0, %edx
2660.size_aligned:
2661	movl	%ecx, %edi
2662	shrl	$1, %ecx
2663	shl	$1, %edi
2664	subl	$64, %edi
2665	addl	%edi, %esi
2666	movl	$.ip_ocsum_jmptbl, %edi
2667	lea	(%edi, %ecx, 4), %edi
2668	xorl	%ecx, %ecx
2669	clc
2670	jmp 	*(%edi)
2671	SET_SIZE(ip_ocsum)
2672
2673	.data
2674	.align	4
2675
2676.ip_ocsum_jmptbl:
2677	.long	.only0, .only4, .only8, .only12, .only16, .only20
2678	.long	.only24, .only28, .only32, .only36, .only40, .only44
2679	.long	.only48, .only52, .only56, .only60
2680
2681
2682#endif	/* __i386 */
2683#endif	/* __lint */
2684
2685/*
2686 * multiply two long numbers and yield a u_longlong_t result, callable from C.
2687 * Provided to manipulate hrtime_t values.
2688 */
2689#if defined(__lint)
2690
2691/* result = a * b; */
2692
2693/* ARGSUSED */
2694unsigned long long
2695mul32(uint_t a, uint_t b)
2696{ return (0); }
2697
2698#else	/* __lint */
2699
2700#if defined(__amd64)
2701
2702	ENTRY(mul32)
2703	xorl	%edx, %edx	/* XX64 joe, paranoia? */
2704	movl	%edi, %eax
2705	mull	%esi
2706	shlq	$32, %rdx
2707	orq	%rdx, %rax
2708	ret
2709	SET_SIZE(mul32)
2710
2711#elif defined(__i386)
2712
2713	ENTRY(mul32)
2714	movl	8(%esp), %eax
2715	movl	4(%esp), %ecx
2716	mull	%ecx
2717	ret
2718	SET_SIZE(mul32)
2719
2720#endif	/* __i386 */
2721#endif	/* __lint */
2722
2723#if defined(notused)
2724#if defined(__lint)
2725/* ARGSUSED */
2726void
2727load_pte64(uint64_t *pte, uint64_t pte_value)
2728{}
2729#else	/* __lint */
2730	.globl load_pte64
2731load_pte64:
2732	movl	4(%esp), %eax
2733	movl	8(%esp), %ecx
2734	movl	12(%esp), %edx
2735	movl	%edx, 4(%eax)
2736	movl	%ecx, (%eax)
2737	ret
2738#endif	/* __lint */
2739#endif	/* notused */
2740
2741#if defined(__lint)
2742
2743/*ARGSUSED*/
2744void
2745scan_memory(caddr_t addr, size_t size)
2746{}
2747
2748#else	/* __lint */
2749
2750#if defined(__amd64)
2751
2752	ENTRY(scan_memory)
2753	shrq	$3, %rsi	/* convert %rsi from byte to quadword count */
2754	jz	.scanm_done
2755	movq	%rsi, %rcx	/* move count into rep control register */
2756	movq	%rdi, %rsi	/* move addr into lodsq control reg. */
2757	rep lodsq		/* scan the memory range */
2758.scanm_done:
2759	rep;	ret	/* use 2 byte return instruction when branch target */
2760			/* AMD Software Optimization Guide - Section 6.2 */
2761	SET_SIZE(scan_memory)
2762
2763#elif defined(__i386)
2764
2765	ENTRY(scan_memory)
2766	pushl	%ecx
2767	pushl	%esi
2768	movl	16(%esp), %ecx	/* move 2nd arg into rep control register */
2769	shrl	$2, %ecx	/* convert from byte count to word count */
2770	jz	.scanm_done
2771	movl	12(%esp), %esi	/* move 1st arg into lodsw control register */
2772	.byte	0xf3		/* rep prefix.  lame assembler.  sigh. */
2773	lodsl
2774.scanm_done:
2775	popl	%esi
2776	popl	%ecx
2777	ret
2778	SET_SIZE(scan_memory)
2779
2780#endif	/* __i386 */
2781#endif	/* __lint */
2782
2783
2784#if defined(__lint)
2785
2786/*ARGSUSED */
2787int
2788lowbit(ulong_t i)
2789{ return (0); }
2790
2791#else	/* __lint */
2792
2793#if defined(__amd64)
2794
2795	ENTRY(lowbit)
2796	movl	$-1, %eax
2797	bsfq	%rdi, %rax
2798	incl	%eax
2799	ret
2800	SET_SIZE(lowbit)
2801
2802#elif defined(__i386)
2803
2804	ENTRY(lowbit)
2805	movl	$-1, %eax
2806	bsfl	4(%esp), %eax
2807	incl	%eax
2808	ret
2809	SET_SIZE(lowbit)
2810
2811#endif	/* __i386 */
2812#endif	/* __lint */
2813
2814#if defined(__lint)
2815
2816/*ARGSUSED*/
2817int
2818highbit(ulong_t i)
2819{ return (0); }
2820
2821#else	/* __lint */
2822
2823#if defined(__amd64)
2824
2825	ENTRY(highbit)
2826	movl	$-1, %eax
2827	bsrq	%rdi, %rax
2828	incl	%eax
2829	ret
2830	SET_SIZE(highbit)
2831
2832#elif defined(__i386)
2833
2834	ENTRY(highbit)
2835	movl	$-1, %eax
2836	bsrl	4(%esp), %eax
2837	incl	%eax
2838	ret
2839	SET_SIZE(highbit)
2840
2841#endif	/* __i386 */
2842#endif	/* __lint */
2843
2844#if defined(__lint)
2845
2846/*ARGSUSED*/
2847uint64_t
2848rdmsr(uint_t r)
2849{ return (0); }
2850
2851/*ARGSUSED*/
2852void
2853wrmsr(uint_t r, const uint64_t val)
2854{}
2855
2856void
2857invalidate_cache(void)
2858{}
2859
2860#else  /* __lint */
2861
2862#if defined(__amd64)
2863
2864	ENTRY(rdmsr)
2865	movl	%edi, %ecx
2866	rdmsr
2867	shlq	$32, %rdx
2868	orq	%rdx, %rax
2869	ret
2870	SET_SIZE(rdmsr)
2871
2872	ENTRY(wrmsr)
2873	movq	%rsi, %rdx
2874	shrq	$32, %rdx
2875	movl	%esi, %eax
2876	movl	%edi, %ecx
2877	wrmsr
2878	ret
2879	SET_SIZE(wrmsr)
2880
2881#elif defined(__i386)
2882
2883	ENTRY(rdmsr)
2884	movl	4(%esp), %ecx
2885	rdmsr
2886	ret
2887	SET_SIZE(rdmsr)
2888
2889	ENTRY(wrmsr)
2890	movl	4(%esp), %ecx
2891	movl	8(%esp), %eax
2892	movl	12(%esp), %edx
2893	wrmsr
2894	ret
2895	SET_SIZE(wrmsr)
2896
2897#endif	/* __i386 */
2898
2899	ENTRY(invalidate_cache)
2900	wbinvd
2901	ret
2902	SET_SIZE(invalidate_cache)
2903
2904#endif	/* __lint */
2905
2906#if defined(__lint)
2907
2908/*ARGSUSED*/
2909void getcregs(struct cregs *crp)
2910{}
2911
2912#else	/* __lint */
2913
2914#if defined(__amd64)
2915
2916#define	GETMSR(r, off, d)	\
2917	movl	$r, %ecx;	\
2918	rdmsr;			\
2919	movl	%eax, off(d);	\
2920	movl	%edx, off+4(d)
2921
2922	ENTRY_NP(getcregs)
2923	xorl	%eax, %eax
2924	movq	%rax, CREG_GDT+8(%rdi)
2925	sgdt	CREG_GDT(%rdi)		/* 10 bytes */
2926	movq	%rax, CREG_IDT+8(%rdi)
2927	sidt	CREG_IDT(%rdi)		/* 10 bytes */
2928	movq	%rax, CREG_LDT(%rdi)
2929	sldt	CREG_LDT(%rdi)		/* 2 bytes */
2930	movq	%rax, CREG_TASKR(%rdi)
2931	str	CREG_TASKR(%rdi)	/* 2 bytes */
2932	movq	%cr0, %rax
2933	movq	%rax, CREG_CR0(%rdi)	/* cr0 */
2934	movq	%cr2, %rax
2935	movq	%rax, CREG_CR2(%rdi)	/* cr2 */
2936	movq	%cr3, %rax
2937	movq	%rax, CREG_CR3(%rdi)	/* cr3 */
2938	movq	%cr4, %rax
2939	movq	%rax, CREG_CR8(%rdi)	/* cr4 */
2940	movq	%cr8, %rax
2941	movq	%rax, CREG_CR8(%rdi)	/* cr8 */
2942	GETMSR(MSR_AMD_KGSBASE, CREG_KGSBASE, %rdi)
2943	GETMSR(MSR_AMD_EFER, CREG_EFER, %rdi)
2944	SET_SIZE(getcregs)
2945
2946#undef GETMSR
2947
2948#elif defined(__i386)
2949
2950	ENTRY_NP(getcregs)
2951	movl	4(%esp), %edx
2952	movw	$0, CREG_GDT+6(%edx)
2953	movw	$0, CREG_IDT+6(%edx)
2954	sgdt	CREG_GDT(%edx)		/* gdt */
2955	sidt	CREG_IDT(%edx)		/* idt */
2956	sldt	CREG_LDT(%edx)		/* ldt */
2957	str	CREG_TASKR(%edx)	/* task */
2958	movl	%cr0, %eax
2959	movl	%eax, CREG_CR0(%edx)	/* cr0 */
2960	movl	%cr2, %eax
2961	movl	%eax, CREG_CR2(%edx)	/* cr2 */
2962	movl	%cr3, %eax
2963	movl	%eax, CREG_CR3(%edx)	/* cr3 */
2964	testl	$X86_LARGEPAGE, x86_feature
2965	jz	.nocr4
2966	movl	%cr4, %eax
2967	movl	%eax, CREG_CR4(%edx)	/* cr4 */
2968	jmp	.skip
2969.nocr4:
2970	movl	$0, CREG_CR4(%edx)
2971.skip:
2972	rep;	ret	/* use 2 byte return instruction when branch target */
2973			/* AMD Software Optimization Guide - Section 6.2 */
2974	SET_SIZE(getcregs)
2975
2976#endif	/* __i386 */
2977#endif	/* __lint */
2978
2979
2980/*
2981 * A panic trigger is a word which is updated atomically and can only be set
2982 * once.  We atomically store 0xDEFACEDD and load the old value.  If the
2983 * previous value was 0, we succeed and return 1; otherwise return 0.
2984 * This allows a partially corrupt trigger to still trigger correctly.  DTrace
2985 * has its own version of this function to allow it to panic correctly from
2986 * probe context.
2987 */
2988#if defined(__lint)
2989
2990/*ARGSUSED*/
2991int
2992panic_trigger(int *tp)
2993{ return (0); }
2994
2995/*ARGSUSED*/
2996int
2997dtrace_panic_trigger(int *tp)
2998{ return (0); }
2999
3000#else	/* __lint */
3001
3002#if defined(__amd64)
3003
3004	ENTRY_NP(panic_trigger)
3005	xorl	%eax, %eax
3006	movl	$0xdefacedd, %edx
3007	lock
3008	  xchgl	%edx, (%rdi)
3009	cmpl	$0, %edx
3010	je	0f
3011	movl	$0, %eax
3012	ret
30130:	movl	$1, %eax
3014	ret
3015	SET_SIZE(panic_trigger)
3016
3017	ENTRY_NP(dtrace_panic_trigger)
3018	xorl	%eax, %eax
3019	movl	$0xdefacedd, %edx
3020	lock
3021	  xchgl	%edx, (%rdi)
3022	cmpl	$0, %edx
3023	je	0f
3024	movl	$0, %eax
3025	ret
30260:	movl	$1, %eax
3027	ret
3028	SET_SIZE(dtrace_panic_trigger)
3029
3030#elif defined(__i386)
3031
3032	ENTRY_NP(panic_trigger)
3033	movl	4(%esp), %edx		/ %edx = address of trigger
3034	movl	$0xdefacedd, %eax	/ %eax = 0xdefacedd
3035	lock				/ assert lock
3036	xchgl %eax, (%edx)		/ exchange %eax and the trigger
3037	cmpl	$0, %eax		/ if (%eax == 0x0)
3038	je	0f			/   return (1);
3039	movl	$0, %eax		/ else
3040	ret				/   return (0);
30410:	movl	$1, %eax
3042	ret
3043	SET_SIZE(panic_trigger)
3044
3045	ENTRY_NP(dtrace_panic_trigger)
3046	movl	4(%esp), %edx		/ %edx = address of trigger
3047	movl	$0xdefacedd, %eax	/ %eax = 0xdefacedd
3048	lock				/ assert lock
3049	xchgl %eax, (%edx)		/ exchange %eax and the trigger
3050	cmpl	$0, %eax		/ if (%eax == 0x0)
3051	je	0f			/   return (1);
3052	movl	$0, %eax		/ else
3053	ret				/   return (0);
30540:	movl	$1, %eax
3055	ret
3056	SET_SIZE(dtrace_panic_trigger)
3057
3058#endif	/* __i386 */
3059#endif	/* __lint */
3060
3061/*
3062 * The panic() and cmn_err() functions invoke vpanic() as a common entry point
3063 * into the panic code implemented in panicsys().  vpanic() is responsible
3064 * for passing through the format string and arguments, and constructing a
3065 * regs structure on the stack into which it saves the current register
3066 * values.  If we are not dying due to a fatal trap, these registers will
3067 * then be preserved in panicbuf as the current processor state.  Before
3068 * invoking panicsys(), vpanic() activates the first panic trigger (see
3069 * common/os/panic.c) and switches to the panic_stack if successful.  Note that
3070 * DTrace takes a slightly different panic path if it must panic from probe
3071 * context.  Instead of calling panic, it calls into dtrace_vpanic(), which
3072 * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
3073 * branches back into vpanic().
3074 */
3075#if defined(__lint)
3076
3077/*ARGSUSED*/
3078void
3079vpanic(const char *format, va_list alist)
3080{}
3081
3082/*ARGSUSED*/
3083void
3084dtrace_vpanic(const char *format, va_list alist)
3085{}
3086
3087#else	/* __lint */
3088
3089#if defined(__amd64)
3090
3091	ENTRY_NP(vpanic)			/* Initial stack layout: */
3092
3093	pushq	%rbp				/* | %rip | 	0x60	*/
3094	movq	%rsp, %rbp			/* | %rbp |	0x58	*/
3095	pushfq					/* | rfl  |	0x50	*/
3096	pushq	%r11				/* | %r11 |	0x48	*/
3097	pushq	%r10				/* | %r10 |	0x40	*/
3098	pushq	%rbx				/* | %rbx |	0x38	*/
3099	pushq	%rax				/* | %rax |	0x30	*/
3100	pushq	%r9				/* | %r9  |	0x28	*/
3101	pushq	%r8				/* | %r8  |	0x20	*/
3102	pushq	%rcx				/* | %rcx |	0x18	*/
3103	pushq	%rdx				/* | %rdx |	0x10	*/
3104	pushq	%rsi				/* | %rsi |	0x8 alist */
3105	pushq	%rdi				/* | %rdi |	0x0 format */
3106
3107	movq	%rsp, %rbx			/* %rbx = current %rsp */
3108
3109	leaq	panic_quiesce(%rip), %rdi	/* %rdi = &panic_quiesce */
3110	call	panic_trigger			/* %eax = panic_trigger() */
3111
3112vpanic_common:
3113	cmpl	$0, %eax
3114	je	0f
3115
3116	/*
3117	 * If panic_trigger() was successful, we are the first to initiate a
3118	 * panic: we now switch to the reserved panic_stack before continuing.
3119	 */
3120	leaq	panic_stack(%rip), %rsp
3121	addq	$PANICSTKSIZE, %rsp
31220:	subq	$REGSIZE, %rsp
3123	/*
3124	 * Now that we've got everything set up, store the register values as
3125	 * they were when we entered vpanic() to the designated location in
3126	 * the regs structure we allocated on the stack.
3127	 */
3128	movq	0x0(%rbx), %rcx
3129	movq	%rcx, REGOFF_RDI(%rsp)
3130	movq	0x8(%rbx), %rcx
3131	movq	%rcx, REGOFF_RSI(%rsp)
3132	movq	0x10(%rbx), %rcx
3133	movq	%rcx, REGOFF_RDX(%rsp)
3134	movq	0x18(%rbx), %rcx
3135	movq	%rcx, REGOFF_RCX(%rsp)
3136	movq	0x20(%rbx), %rcx
3137
3138	movq	%rcx, REGOFF_R8(%rsp)
3139	movq	0x28(%rbx), %rcx
3140	movq	%rcx, REGOFF_R9(%rsp)
3141	movq	0x30(%rbx), %rcx
3142	movq	%rcx, REGOFF_RAX(%rsp)
3143	movq	0x38(%rbx), %rcx
3144	movq	%rbx, REGOFF_RBX(%rsp)
3145	movq	0x58(%rbx), %rcx
3146
3147	movq	%rcx, REGOFF_RBP(%rsp)
3148	movq	0x40(%rbx), %rcx
3149	movq	%rcx, REGOFF_R10(%rsp)
3150	movq	0x48(%rbx), %rcx
3151	movq	%rcx, REGOFF_R11(%rsp)
3152	movq	%r12, REGOFF_R12(%rsp)
3153
3154	movq	%r13, REGOFF_R13(%rsp)
3155	movq	%r14, REGOFF_R14(%rsp)
3156	movq	%r15, REGOFF_R15(%rsp)
3157
3158	movl	$MSR_AMD_FSBASE, %ecx
3159	rdmsr
3160	movl	%eax, REGOFF_FSBASE(%rsp)
3161	movl	%edx, REGOFF_FSBASE+4(%rsp)
3162
3163	movl	$MSR_AMD_GSBASE, %ecx
3164	rdmsr
3165	movl	%eax, REGOFF_GSBASE(%rsp)
3166	movl	%edx, REGOFF_GSBASE+4(%rsp)
3167
3168	xorl	%ecx, %ecx
3169	movw	%ds, %cx
3170	movq	%rcx, REGOFF_DS(%rsp)
3171	movw	%es, %cx
3172	movq	%rcx, REGOFF_ES(%rsp)
3173	movw	%fs, %cx
3174	movq	%rcx, REGOFF_FS(%rsp)
3175	movw	%gs, %cx
3176	movq	%rcx, REGOFF_GS(%rsp)
3177
3178	movq	$0, REGOFF_TRAPNO(%rsp)
3179
3180	movq	$0, REGOFF_ERR(%rsp)
3181	leaq	vpanic(%rip), %rcx
3182	movq	%rcx, REGOFF_RIP(%rsp)
3183	movw	%cs, %cx
3184	movzwq	%cx, %rcx
3185	movq	%rcx, REGOFF_CS(%rsp)
3186	movq	0x50(%rbx), %rcx
3187	movq	%rcx, REGOFF_RFL(%rsp)
3188	movq	%rbx, %rcx
3189	addq	$0x60, %rcx
3190	movq	%rcx, REGOFF_RSP(%rsp)
3191	movw	%ss, %cx
3192	movzwq	%cx, %rcx
3193	movq	%rcx, REGOFF_SS(%rsp)
3194
3195	/*
3196	 * panicsys(format, alist, rp, on_panic_stack)
3197	 */
3198	movq	REGOFF_RDI(%rsp), %rdi		/* format */
3199	movq	REGOFF_RSI(%rsp), %rsi		/* alist */
3200	movq	%rsp, %rdx			/* struct regs */
3201	movl	%eax, %ecx			/* on_panic_stack */
3202	call	panicsys
3203	addq	$REGSIZE, %rsp
3204	popq	%rdi
3205	popq	%rsi
3206	popq	%rdx
3207	popq	%rcx
3208	popq	%r8
3209	popq	%r9
3210	popq	%rax
3211	popq	%rbx
3212	popq	%r10
3213	popq	%r11
3214	popfq
3215	leave
3216	ret
3217	SET_SIZE(vpanic)
3218
3219	ENTRY_NP(dtrace_vpanic)			/* Initial stack layout: */
3220
3221	pushq	%rbp				/* | %rip | 	0x60	*/
3222	movq	%rsp, %rbp			/* | %rbp |	0x58	*/
3223	pushfq					/* | rfl  |	0x50	*/
3224	pushq	%r11				/* | %r11 |	0x48	*/
3225	pushq	%r10				/* | %r10 |	0x40	*/
3226	pushq	%rbx				/* | %rbx |	0x38	*/
3227	pushq	%rax				/* | %rax |	0x30	*/
3228	pushq	%r9				/* | %r9  |	0x28	*/
3229	pushq	%r8				/* | %r8  |	0x20	*/
3230	pushq	%rcx				/* | %rcx |	0x18	*/
3231	pushq	%rdx				/* | %rdx |	0x10	*/
3232	pushq	%rsi				/* | %rsi |	0x8 alist */
3233	pushq	%rdi				/* | %rdi |	0x0 format */
3234
3235	movq	%rsp, %rbx			/* %rbx = current %rsp */
3236
3237	leaq	panic_quiesce(%rip), %rdi	/* %rdi = &panic_quiesce */
3238	call	dtrace_panic_trigger	/* %eax = dtrace_panic_trigger() */
3239	jmp	vpanic_common
3240
3241	SET_SIZE(dtrace_vpanic)
3242
3243#elif defined(__i386)
3244
3245	ENTRY_NP(vpanic)			/ Initial stack layout:
3246
3247	pushl	%ebp				/ | %eip | 20
3248	movl	%esp, %ebp			/ | %ebp | 16
3249	pushl	%eax				/ | %eax | 12
3250	pushl	%ebx				/ | %ebx |  8
3251	pushl	%ecx				/ | %ecx |  4
3252	pushl	%edx				/ | %edx |  0
3253
3254	movl	%esp, %ebx			/ %ebx = current stack pointer
3255
3256	lea	panic_quiesce, %eax		/ %eax = &panic_quiesce
3257	pushl	%eax				/ push &panic_quiesce
3258	call	panic_trigger			/ %eax = panic_trigger()
3259	addl	$4, %esp			/ reset stack pointer
3260
3261vpanic_common:
3262	cmpl	$0, %eax			/ if (%eax == 0)
3263	je	0f				/   goto 0f;
3264
3265	/*
3266	 * If panic_trigger() was successful, we are the first to initiate a
3267	 * panic: we now switch to the reserved panic_stack before continuing.
3268	 */
3269	lea	panic_stack, %esp		/ %esp  = panic_stack
3270	addl	$PANICSTKSIZE, %esp		/ %esp += PANICSTKSIZE
3271
32720:	subl	$REGSIZE, %esp			/ allocate struct regs
3273
3274	/*
3275	 * Now that we've got everything set up, store the register values as
3276	 * they were when we entered vpanic() to the designated location in
3277	 * the regs structure we allocated on the stack.
3278	 */
3279#if !defined(__GNUC_AS__)
3280	movw	%gs, %edx
3281	movl	%edx, REGOFF_GS(%esp)
3282	movw	%fs, %edx
3283	movl	%edx, REGOFF_FS(%esp)
3284	movw	%es, %edx
3285	movl	%edx, REGOFF_ES(%esp)
3286	movw	%ds, %edx
3287	movl	%edx, REGOFF_DS(%esp)
3288#else	/* __GNUC_AS__ */
3289	mov	%gs, %edx
3290	mov	%edx, REGOFF_GS(%esp)
3291	mov	%fs, %edx
3292	mov	%edx, REGOFF_FS(%esp)
3293	mov	%es, %edx
3294	mov	%edx, REGOFF_ES(%esp)
3295	mov	%ds, %edx
3296	mov	%edx, REGOFF_DS(%esp)
3297#endif	/* __GNUC_AS__ */
3298	movl	%edi, REGOFF_EDI(%esp)
3299	movl	%esi, REGOFF_ESI(%esp)
3300	movl	16(%ebx), %ecx
3301	movl	%ecx, REGOFF_EBP(%esp)
3302	movl	%ebx, %ecx
3303	addl	$20, %ecx
3304	movl	%ecx, REGOFF_ESP(%esp)
3305	movl	8(%ebx), %ecx
3306	movl	%ecx, REGOFF_EBX(%esp)
3307	movl	0(%ebx), %ecx
3308	movl	%ecx, REGOFF_EDX(%esp)
3309	movl	4(%ebx), %ecx
3310	movl	%ecx, REGOFF_ECX(%esp)
3311	movl	12(%ebx), %ecx
3312	movl	%ecx, REGOFF_EAX(%esp)
3313	movl	$0, REGOFF_TRAPNO(%esp)
3314	movl	$0, REGOFF_ERR(%esp)
3315	lea	vpanic, %ecx
3316	movl	%ecx, REGOFF_EIP(%esp)
3317#if !defined(__GNUC_AS__)
3318	movw	%cs, %edx
3319#else	/* __GNUC_AS__ */
3320	mov	%cs, %edx
3321#endif	/* __GNUC_AS__ */
3322	movl	%edx, REGOFF_CS(%esp)
3323	pushfl
3324	popl	%ecx
3325	movl	%ecx, REGOFF_EFL(%esp)
3326	movl	$0, REGOFF_UESP(%esp)
3327#if !defined(__GNUC_AS__)
3328	movw	%ss, %edx
3329#else	/* __GNUC_AS__ */
3330	mov	%ss, %edx
3331#endif	/* __GNUC_AS__ */
3332	movl	%edx, REGOFF_SS(%esp)
3333
3334	movl	%esp, %ecx			/ %ecx = &regs
3335	pushl	%eax				/ push on_panic_stack
3336	pushl	%ecx				/ push &regs
3337	movl	12(%ebp), %ecx			/ %ecx = alist
3338	pushl	%ecx				/ push alist
3339	movl	8(%ebp), %ecx			/ %ecx = format
3340	pushl	%ecx				/ push format
3341	call	panicsys			/ panicsys();
3342	addl	$16, %esp			/ pop arguments
3343
3344	addl	$REGSIZE, %esp
3345	popl	%edx
3346	popl	%ecx
3347	popl	%ebx
3348	popl	%eax
3349	leave
3350	ret
3351	SET_SIZE(vpanic)
3352
3353	ENTRY_NP(dtrace_vpanic)			/ Initial stack layout:
3354
3355	pushl	%ebp				/ | %eip | 20
3356	movl	%esp, %ebp			/ | %ebp | 16
3357	pushl	%eax				/ | %eax | 12
3358	pushl	%ebx				/ | %ebx |  8
3359	pushl	%ecx				/ | %ecx |  4
3360	pushl	%edx				/ | %edx |  0
3361
3362	movl	%esp, %ebx			/ %ebx = current stack pointer
3363
3364	lea	panic_quiesce, %eax		/ %eax = &panic_quiesce
3365	pushl	%eax				/ push &panic_quiesce
3366	call	dtrace_panic_trigger		/ %eax = dtrace_panic_trigger()
3367	addl	$4, %esp			/ reset stack pointer
3368	jmp	vpanic_common			/ jump back to common code
3369
3370	SET_SIZE(dtrace_vpanic)
3371
3372#endif	/* __i386 */
3373#endif	/* __lint */
3374
3375#if defined(__lint)
3376
3377void
3378hres_tick(void)
3379{}
3380
3381int64_t timedelta;
3382hrtime_t hres_last_tick;
3383timestruc_t hrestime;
3384int64_t hrestime_adj;
3385volatile int hres_lock;
3386uint_t nsec_scale;
3387hrtime_t hrtime_base;
3388
3389#else	/* __lint */
3390
3391	DGDEF3(hrestime, _MUL(2, CLONGSIZE), 8)
3392	.NWORD	0, 0
3393
3394	DGDEF3(hrestime_adj, 8, 8)
3395	.long	0, 0
3396
3397	DGDEF3(hres_last_tick, 8, 8)
3398	.long	0, 0
3399
3400	DGDEF3(timedelta, 8, 8)
3401	.long	0, 0
3402
3403	DGDEF3(hres_lock, 4, 8)
3404	.long	0
3405
3406	/*
3407	 * initialized to a non zero value to make pc_gethrtime()
3408	 * work correctly even before clock is initialized
3409	 */
3410	DGDEF3(hrtime_base, 8, 8)
3411	.long	_MUL(NSEC_PER_CLOCK_TICK, 6), 0
3412
3413	DGDEF3(adj_shift, 4, 4)
3414	.long	ADJ_SHIFT
3415
3416#if defined(__amd64)
3417
3418	ENTRY_NP(hres_tick)
3419	pushq	%rbp
3420	movq	%rsp, %rbp
3421
3422	/*
3423	 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3424	 * hres_last_tick can only be modified while holding CLOCK_LOCK).
3425	 * At worst, performing this now instead of under CLOCK_LOCK may
3426	 * introduce some jitter in pc_gethrestime().
3427	 */
3428	call	*gethrtimef(%rip)
3429	movq	%rax, %r8
3430
3431	leaq	hres_lock(%rip), %rax
3432	movb	$-1, %dl
3433.CL1:
3434	xchgb	%dl, (%rax)
3435	testb	%dl, %dl
3436	jz	.CL3			/* got it */
3437.CL2:
3438	cmpb	$0, (%rax)		/* possible to get lock? */
3439	pause
3440	jne	.CL2
3441	jmp	.CL1			/* yes, try again */
3442.CL3:
3443	/*
3444	 * compute the interval since last time hres_tick was called
3445	 * and adjust hrtime_base and hrestime accordingly
3446	 * hrtime_base is an 8 byte value (in nsec), hrestime is
3447	 * a timestruc_t (sec, nsec)
3448	 */
3449	leaq	hres_last_tick(%rip), %rax
3450	movq	%r8, %r11
3451	subq	(%rax), %r8
3452	addq	%r8, hrtime_base(%rip)	/* add interval to hrtime_base */
3453	addq	%r8, hrestime+8(%rip)	/* add interval to hrestime.tv_nsec */
3454	/*
3455	 * Now that we have CLOCK_LOCK, we can update hres_last_tick
3456	 */
3457	movq	%r11, (%rax)
3458
3459	call	__adj_hrestime
3460
3461	/*
3462	 * release the hres_lock
3463	 */
3464	incl	hres_lock(%rip)
3465	leave
3466	ret
3467	SET_SIZE(hres_tick)
3468
3469#elif defined(__i386)
3470
3471	ENTRY_NP(hres_tick)
3472	pushl	%ebp
3473	movl	%esp, %ebp
3474	pushl	%esi
3475	pushl	%ebx
3476
3477	/*
3478	 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3479	 * hres_last_tick can only be modified while holding CLOCK_LOCK).
3480	 * At worst, performing this now instead of under CLOCK_LOCK may
3481	 * introduce some jitter in pc_gethrestime().
3482	 */
3483	call	*gethrtimef
3484	movl	%eax, %ebx
3485	movl	%edx, %esi
3486
3487	movl	$hres_lock, %eax
3488	movl	$-1, %edx
3489.CL1:
3490	xchgb	%dl, (%eax)
3491	testb	%dl, %dl
3492	jz	.CL3			/ got it
3493.CL2:
3494	cmpb	$0, (%eax)		/ possible to get lock?
3495	pause
3496	jne	.CL2
3497	jmp	.CL1			/ yes, try again
3498.CL3:
3499	/*
3500	 * compute the interval since last time hres_tick was called
3501	 * and adjust hrtime_base and hrestime accordingly
3502	 * hrtime_base is an 8 byte value (in nsec), hrestime is
3503	 * timestruc_t (sec, nsec)
3504	 */
3505
3506	lea	hres_last_tick, %eax
3507
3508	movl	%ebx, %edx
3509	movl	%esi, %ecx
3510
3511	subl 	(%eax), %edx
3512	sbbl 	4(%eax), %ecx
3513
3514	addl	%edx, hrtime_base	/ add interval to hrtime_base
3515	adcl	%ecx, hrtime_base+4
3516
3517	addl 	%edx, hrestime+4	/ add interval to hrestime.tv_nsec
3518
3519	/
3520	/ Now that we have CLOCK_LOCK, we can update hres_last_tick.
3521	/
3522	movl	%ebx, (%eax)
3523	movl	%esi,  4(%eax)
3524
3525	/ get hrestime at this moment. used as base for pc_gethrestime
3526	/
3527	/ Apply adjustment, if any
3528	/
3529	/ #define HRES_ADJ	(NSEC_PER_CLOCK_TICK >> ADJ_SHIFT)
3530	/ (max_hres_adj)
3531	/
3532	/ void
3533	/ adj_hrestime()
3534	/ {
3535	/	long long adj;
3536	/
3537	/	if (hrestime_adj == 0)
3538	/		adj = 0;
3539	/	else if (hrestime_adj > 0) {
3540	/		if (hrestime_adj < HRES_ADJ)
3541	/			adj = hrestime_adj;
3542	/		else
3543	/			adj = HRES_ADJ;
3544	/	}
3545	/	else {
3546	/		if (hrestime_adj < -(HRES_ADJ))
3547	/			adj = -(HRES_ADJ);
3548	/		else
3549	/			adj = hrestime_adj;
3550	/	}
3551	/
3552	/	timedelta -= adj;
3553	/	hrestime_adj = timedelta;
3554	/	hrestime.tv_nsec += adj;
3555	/
3556	/	while (hrestime.tv_nsec >= NANOSEC) {
3557	/		one_sec++;
3558	/		hrestime.tv_sec++;
3559	/		hrestime.tv_nsec -= NANOSEC;
3560	/	}
3561	/ }
3562__adj_hrestime:
3563	movl	hrestime_adj, %esi	/ if (hrestime_adj == 0)
3564	movl	hrestime_adj+4, %edx
3565	andl	%esi, %esi
3566	jne	.CL4			/ no
3567	andl	%edx, %edx
3568	jne	.CL4			/ no
3569	subl	%ecx, %ecx		/ yes, adj = 0;
3570	subl	%edx, %edx
3571	jmp	.CL5
3572.CL4:
3573	subl	%ecx, %ecx
3574	subl	%eax, %eax
3575	subl	%esi, %ecx
3576	sbbl	%edx, %eax
3577	andl	%eax, %eax		/ if (hrestime_adj > 0)
3578	jge	.CL6
3579
3580	/ In the following comments, HRES_ADJ is used, while in the code
3581	/ max_hres_adj is used.
3582	/
3583	/ The test for "hrestime_adj < HRES_ADJ" is complicated because
3584	/ hrestime_adj is 64-bits, while HRES_ADJ is 32-bits.  We rely
3585	/ on the logical equivalence of:
3586	/
3587	/	!(hrestime_adj < HRES_ADJ)
3588	/
3589	/ and the two step sequence:
3590	/
3591	/	(HRES_ADJ - lsw(hrestime_adj)) generates a Borrow/Carry
3592	/
3593	/ which computes whether or not the least significant 32-bits
3594	/ of hrestime_adj is greater than HRES_ADJ, followed by:
3595	/
3596	/	Previous Borrow/Carry + -1 + msw(hrestime_adj) generates a Carry
3597	/
3598	/ which generates a carry whenever step 1 is true or the most
3599	/ significant long of the longlong hrestime_adj is non-zero.
3600
3601	movl	max_hres_adj, %ecx	/ hrestime_adj is positive
3602	subl	%esi, %ecx
3603	movl	%edx, %eax
3604	adcl	$-1, %eax
3605	jnc	.CL7
3606	movl	max_hres_adj, %ecx	/ adj = HRES_ADJ;
3607	subl	%edx, %edx
3608	jmp	.CL5
3609
3610	/ The following computation is similar to the one above.
3611	/
3612	/ The test for "hrestime_adj < -(HRES_ADJ)" is complicated because
3613	/ hrestime_adj is 64-bits, while HRES_ADJ is 32-bits.  We rely
3614	/ on the logical equivalence of:
3615	/
3616	/	(hrestime_adj > -HRES_ADJ)
3617	/
3618	/ and the two step sequence:
3619	/
3620	/	(HRES_ADJ + lsw(hrestime_adj)) generates a Carry
3621	/
3622	/ which means the least significant 32-bits of hrestime_adj is
3623	/ greater than -HRES_ADJ, followed by:
3624	/
3625	/	Previous Carry + 0 + msw(hrestime_adj) generates a Carry
3626	/
3627	/ which generates a carry only when step 1 is true and the most
3628	/ significant long of the longlong hrestime_adj is -1.
3629
3630.CL6:					/ hrestime_adj is negative
3631	movl	%esi, %ecx
3632	addl	max_hres_adj, %ecx
3633	movl	%edx, %eax
3634	adcl	$0, %eax
3635	jc	.CL7
3636	xor	%ecx, %ecx
3637	subl	max_hres_adj, %ecx	/ adj = -(HRES_ADJ);
3638	movl	$-1, %edx
3639	jmp	.CL5
3640.CL7:
3641	movl	%esi, %ecx		/ adj = hrestime_adj;
3642.CL5:
3643	movl	timedelta, %esi
3644	subl	%ecx, %esi
3645	movl	timedelta+4, %eax
3646	sbbl	%edx, %eax
3647	movl	%esi, timedelta
3648	movl	%eax, timedelta+4	/ timedelta -= adj;
3649	movl	%esi, hrestime_adj
3650	movl	%eax, hrestime_adj+4	/ hrestime_adj = timedelta;
3651	addl	hrestime+4, %ecx
3652
3653	movl	%ecx, %eax		/ eax = tv_nsec
36541:
3655	cmpl	$NANOSEC, %eax		/ if ((unsigned long)tv_nsec >= NANOSEC)
3656	jb	.CL8			/ no
3657	incl	one_sec			/ yes,  one_sec++;
3658	incl	hrestime		/ hrestime.tv_sec++;
3659	addl	$-NANOSEC, %eax		/ tv_nsec -= NANOSEC
3660	jmp	1b			/ check for more seconds
3661
3662.CL8:
3663	movl	%eax, hrestime+4	/ store final into hrestime.tv_nsec
3664	incl	hres_lock		/ release the hres_lock
3665
3666	popl	%ebx
3667	popl	%esi
3668	leave
3669	ret
3670	SET_SIZE(hres_tick)
3671
3672#endif	/* __i386 */
3673#endif	/* __lint */
3674
3675/*
3676 * void prefetch_smap_w(void *)
3677 *
3678 * Prefetch ahead within a linear list of smap structures.
3679 * Not implemented for ia32.  Stub for compatibility.
3680 */
3681
3682#if defined(__lint)
3683
3684/*ARGSUSED*/
3685void prefetch_smap_w(void *smp)
3686{}
3687
3688#else	/* __lint */
3689
3690	ENTRY(prefetch_smap_w)
3691	rep;	ret	/* use 2 byte return instruction when branch target */
3692			/* AMD Software Optimization Guide - Section 6.2 */
3693	SET_SIZE(prefetch_smap_w)
3694
3695#endif	/* __lint */
3696
3697/*
3698 * prefetch_page_r(page_t *)
3699 * issue prefetch instructions for a page_t
3700 */
3701#if defined(__lint)
3702
3703/*ARGSUSED*/
3704void
3705prefetch_page_r(void *pp)
3706{}
3707
3708#else	/* __lint */
3709
3710	ENTRY(prefetch_page_r)
3711	rep;	ret	/* use 2 byte return instruction when branch target */
3712			/* AMD Software Optimization Guide - Section 6.2 */
3713	SET_SIZE(prefetch_page_r)
3714
3715#endif	/* __lint */
3716
3717#if defined(__lint)
3718
3719/*ARGSUSED*/
3720int
3721bcmp(const void *s1, const void *s2, size_t count)
3722{ return (0); }
3723
3724#else   /* __lint */
3725
3726#if defined(__amd64)
3727
3728	ENTRY(bcmp)
3729	pushq	%rbp
3730	movq	%rsp, %rbp
3731#ifdef DEBUG
3732	movq	kernelbase(%rip), %r11
3733	cmpq	%r11, %rdi
3734	jb	0f
3735	cmpq	%r11, %rsi
3736	jnb	1f
37370:	leaq	.bcmp_panic_msg(%rip), %rdi
3738	xorl	%eax, %eax
3739	call	panic
37401:
3741#endif	/* DEBUG */
3742	call	memcmp
3743	testl	%eax, %eax
3744	setne	%dl
3745	leave
3746	movzbl	%dl, %eax
3747	ret
3748	SET_SIZE(bcmp)
3749
3750#elif defined(__i386)
3751
3752#define	ARG_S1		8
3753#define	ARG_S2		12
3754#define	ARG_LENGTH	16
3755
3756	ENTRY(bcmp)
3757#ifdef DEBUG
3758	pushl   %ebp
3759	movl    %esp, %ebp
3760	movl    kernelbase, %eax
3761	cmpl    %eax, ARG_S1(%ebp)
3762	jb	0f
3763	cmpl    %eax, ARG_S2(%ebp)
3764	jnb	1f
37650:	pushl   $.bcmp_panic_msg
3766	call    panic
37671:	popl    %ebp
3768#endif	/* DEBUG */
3769
3770	pushl	%edi		/ save register variable
3771	movl	ARG_S1(%esp), %eax	/ %eax = address of string 1
3772	movl	ARG_S2(%esp), %ecx	/ %ecx = address of string 2
3773	cmpl	%eax, %ecx	/ if the same string
3774	je	.equal		/ goto .equal
3775	movl	ARG_LENGTH(%esp), %edi	/ %edi = length in bytes
3776	cmpl	$4, %edi	/ if %edi < 4
3777	jb	.byte_check	/ goto .byte_check
3778	.align	4
3779.word_loop:
3780	movl	(%ecx), %edx	/ move 1 word from (%ecx) to %edx
3781	leal	-4(%edi), %edi	/ %edi -= 4
3782	cmpl	(%eax), %edx	/ compare 1 word from (%eax) with %edx
3783	jne	.word_not_equal	/ if not equal, goto .word_not_equal
3784	leal	4(%ecx), %ecx	/ %ecx += 4 (next word)
3785	leal	4(%eax), %eax	/ %eax += 4 (next word)
3786	cmpl	$4, %edi	/ if %edi >= 4
3787	jae	.word_loop	/ goto .word_loop
3788.byte_check:
3789	cmpl	$0, %edi	/ if %edi == 0
3790	je	.equal		/ goto .equal
3791	jmp	.byte_loop	/ goto .byte_loop (checks in bytes)
3792.word_not_equal:
3793	leal	4(%edi), %edi	/ %edi += 4 (post-decremented)
3794	.align	4
3795.byte_loop:
3796	movb	(%ecx),	%dl	/ move 1 byte from (%ecx) to %dl
3797	cmpb	%dl, (%eax)	/ compare %dl with 1 byte from (%eax)
3798	jne	.not_equal	/ if not equal, goto .not_equal
3799	incl	%ecx		/ %ecx++ (next byte)
3800	incl	%eax		/ %eax++ (next byte)
3801	decl	%edi		/ %edi--
3802	jnz	.byte_loop	/ if not zero, goto .byte_loop
3803.equal:
3804	xorl	%eax, %eax	/ %eax = 0
3805	popl	%edi		/ restore register variable
3806	ret			/ return (NULL)
3807	.align	4
3808.not_equal:
3809	movl	$1, %eax	/ return 1
3810	popl	%edi		/ restore register variable
3811	ret			/ return (NULL)
3812	SET_SIZE(bcmp)
3813
3814#endif	/* __i386 */
3815
3816#ifdef DEBUG
3817	.text
3818.bcmp_panic_msg:
3819	.string "bcmp: arguments below kernelbase"
3820#endif	/* DEBUG */
3821
3822#endif	/* __lint */
3823