xref: /titanic_51/usr/src/uts/intel/ia32/ml/i86_subr.s (revision 4e9cfc9a015e8ca7d41f7d018c74dc8a692305b3)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 *  Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.
29 *  Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T
30 *    All Rights Reserved
31 */
32
33#pragma ident	"%Z%%M%	%I%	%E% SMI"
34
35/*
36 * General assembly language routines.
37 * It is the intent of this file to contain routines that are
38 * independent of the specific kernel architecture, and those that are
39 * common across kernel architectures.
40 * As architectures diverge, and implementations of specific
41 * architecture-dependent routines change, the routines should be moved
42 * from this file into the respective ../`arch -k`/subr.s file.
43 */
44
45#include <sys/asm_linkage.h>
46#include <sys/asm_misc.h>
47#include <sys/panic.h>
48#include <sys/ontrap.h>
49#include <sys/regset.h>
50#include <sys/privregs.h>
51#include <sys/reboot.h>
52#include <sys/psw.h>
53#include <sys/x86_archext.h>
54
55#if defined(__lint)
56#include <sys/types.h>
57#include <sys/systm.h>
58#include <sys/thread.h>
59#include <sys/archsystm.h>
60#include <sys/byteorder.h>
61#include <sys/dtrace.h>
62#include <sys/ftrace.h>
63#else	/* __lint */
64#include "assym.h"
65#endif	/* __lint */
66#include <sys/dditypes.h>
67
68/*
69 * on_fault()
70 * Catch lofault faults. Like setjmp except it returns one
71 * if code following causes uncorrectable fault. Turned off
72 * by calling no_fault().
73 */
74
75#if defined(__lint)
76
77/* ARGSUSED */
78int
79on_fault(label_t *ljb)
80{ return (0); }
81
82void
83no_fault(void)
84{}
85
86#else	/* __lint */
87
88#if defined(__amd64)
89
90	ENTRY(on_fault)
91	movq	%gs:CPU_THREAD, %rsi
92	leaq	catch_fault(%rip), %rdx
93	movq	%rdi, T_ONFAULT(%rsi)		/* jumpbuf in t_onfault */
94	movq	%rdx, T_LOFAULT(%rsi)		/* catch_fault in t_lofault */
95	jmp	setjmp				/* let setjmp do the rest */
96
97catch_fault:
98	movq	%gs:CPU_THREAD, %rsi
99	movq	T_ONFAULT(%rsi), %rdi		/* address of save area */
100	xorl	%eax, %eax
101	movq	%rax, T_ONFAULT(%rsi)		/* turn off onfault */
102	movq	%rax, T_LOFAULT(%rsi)		/* turn off lofault */
103	jmp	longjmp				/* let longjmp do the rest */
104	SET_SIZE(on_fault)
105
106	ENTRY(no_fault)
107	movq	%gs:CPU_THREAD, %rsi
108	xorl	%eax, %eax
109	movq	%rax, T_ONFAULT(%rsi)		/* turn off onfault */
110	movq	%rax, T_LOFAULT(%rsi)		/* turn off lofault */
111	ret
112	SET_SIZE(no_fault)
113
114#elif defined(__i386)
115
116	ENTRY(on_fault)
117	movl	%gs:CPU_THREAD, %edx
118	movl	4(%esp), %eax			/* jumpbuf address */
119	leal	catch_fault, %ecx
120	movl	%eax, T_ONFAULT(%edx)		/* jumpbuf in t_onfault */
121	movl	%ecx, T_LOFAULT(%edx)		/* catch_fault in t_lofault */
122	jmp	setjmp				/* let setjmp do the rest */
123
124catch_fault:
125	movl	%gs:CPU_THREAD, %edx
126	xorl	%eax, %eax
127	movl	T_ONFAULT(%edx), %ecx		/* address of save area */
128	movl	%eax, T_ONFAULT(%edx)		/* turn off onfault */
129	movl	%eax, T_LOFAULT(%edx)		/* turn off lofault */
130	pushl	%ecx
131	call	longjmp				/* let longjmp do the rest */
132	SET_SIZE(on_fault)
133
134	ENTRY(no_fault)
135	movl	%gs:CPU_THREAD, %edx
136	xorl	%eax, %eax
137	movl	%eax, T_ONFAULT(%edx)		/* turn off onfault */
138	movl	%eax, T_LOFAULT(%edx)		/* turn off lofault */
139	ret
140	SET_SIZE(no_fault)
141
142#endif	/* __i386 */
143#endif	/* __lint */
144
145/*
146 * Default trampoline code for on_trap() (see <sys/ontrap.h>).  We just
147 * do a longjmp(&curthread->t_ontrap->ot_jmpbuf) if this is ever called.
148 */
149
150#if defined(lint)
151
152void
153on_trap_trampoline(void)
154{}
155
156#else	/* __lint */
157
158#if defined(__amd64)
159
160	ENTRY(on_trap_trampoline)
161	movq	%gs:CPU_THREAD, %rsi
162	movq	T_ONTRAP(%rsi), %rdi
163	addq	$OT_JMPBUF, %rdi
164	jmp	longjmp
165	SET_SIZE(on_trap_trampoline)
166
167#elif defined(__i386)
168
169	ENTRY(on_trap_trampoline)
170	movl	%gs:CPU_THREAD, %eax
171	movl	T_ONTRAP(%eax), %eax
172	addl	$OT_JMPBUF, %eax
173	pushl	%eax
174	call	longjmp
175	SET_SIZE(on_trap_trampoline)
176
177#endif	/* __i386 */
178#endif	/* __lint */
179
180/*
181 * Push a new element on to the t_ontrap stack.  Refer to <sys/ontrap.h> for
182 * more information about the on_trap() mechanism.  If the on_trap_data is the
183 * same as the topmost stack element, we just modify that element.
184 */
185#if defined(lint)
186
187/*ARGSUSED*/
188int
189on_trap(on_trap_data_t *otp, uint_t prot)
190{ return (0); }
191
192#else	/* __lint */
193
194#if defined(__amd64)
195
196	ENTRY(on_trap)
197	movw	%si, OT_PROT(%rdi)		/* ot_prot = prot */
198	movw	$0, OT_TRAP(%rdi)		/* ot_trap = 0 */
199	leaq	on_trap_trampoline(%rip), %rdx	/* rdx = &on_trap_trampoline */
200	movq	%rdx, OT_TRAMPOLINE(%rdi)	/* ot_trampoline = rdx */
201	xorl	%ecx, %ecx
202	movq	%rcx, OT_HANDLE(%rdi)		/* ot_handle = NULL */
203	movq	%rcx, OT_PAD1(%rdi)		/* ot_pad1 = NULL */
204	movq	%gs:CPU_THREAD, %rdx		/* rdx = curthread */
205	movq	T_ONTRAP(%rdx), %rcx		/* rcx = curthread->t_ontrap */
206	cmpq	%rdi, %rcx			/* if (otp == %rcx)	*/
207	je	0f				/*	don't modify t_ontrap */
208
209	movq	%rcx, OT_PREV(%rdi)		/* ot_prev = t_ontrap */
210	movq	%rdi, T_ONTRAP(%rdx)		/* curthread->t_ontrap = otp */
211
2120:	addq	$OT_JMPBUF, %rdi		/* &ot_jmpbuf */
213	jmp	setjmp
214	SET_SIZE(on_trap)
215
216#elif defined(__i386)
217
218	ENTRY(on_trap)
219	movl	4(%esp), %eax			/* %eax = otp */
220	movl	8(%esp), %edx			/* %edx = prot */
221
222	movw	%dx, OT_PROT(%eax)		/* ot_prot = prot */
223	movw	$0, OT_TRAP(%eax)		/* ot_trap = 0 */
224	leal	on_trap_trampoline, %edx	/* %edx = &on_trap_trampoline */
225	movl	%edx, OT_TRAMPOLINE(%eax)	/* ot_trampoline = %edx */
226	movl	$0, OT_HANDLE(%eax)		/* ot_handle = NULL */
227	movl	$0, OT_PAD1(%eax)		/* ot_pad1 = NULL */
228	movl	%gs:CPU_THREAD, %edx		/* %edx = curthread */
229	movl	T_ONTRAP(%edx), %ecx		/* %ecx = curthread->t_ontrap */
230	cmpl	%eax, %ecx			/* if (otp == %ecx) */
231	je	0f				/*    don't modify t_ontrap */
232
233	movl	%ecx, OT_PREV(%eax)		/* ot_prev = t_ontrap */
234	movl	%eax, T_ONTRAP(%edx)		/* curthread->t_ontrap = otp */
235
2360:	addl	$OT_JMPBUF, %eax		/* %eax = &ot_jmpbuf */
237	movl	%eax, 4(%esp)			/* put %eax back on the stack */
238	jmp	setjmp				/* let setjmp do the rest */
239	SET_SIZE(on_trap)
240
241#endif	/* __i386 */
242#endif	/* __lint */
243
244/*
245 * Setjmp and longjmp implement non-local gotos using state vectors
246 * type label_t.
247 */
248
249#if defined(__lint)
250
251/* ARGSUSED */
252int
253setjmp(label_t *lp)
254{ return (0); }
255
256/* ARGSUSED */
257void
258longjmp(label_t *lp)
259{}
260
261#else	/* __lint */
262
263#if LABEL_PC != 0
264#error LABEL_PC MUST be defined as 0 for setjmp/longjmp to work as coded
265#endif	/* LABEL_PC != 0 */
266
267#if defined(__amd64)
268
269	ENTRY(setjmp)
270	movq	%rsp, LABEL_SP(%rdi)
271	movq	%rbp, LABEL_RBP(%rdi)
272	movq	%rbx, LABEL_RBX(%rdi)
273	movq	%r12, LABEL_R12(%rdi)
274	movq	%r13, LABEL_R13(%rdi)
275	movq	%r14, LABEL_R14(%rdi)
276	movq	%r15, LABEL_R15(%rdi)
277	movq	(%rsp), %rdx		/* return address */
278	movq	%rdx, (%rdi)		/* LABEL_PC is 0 */
279	xorl	%eax, %eax		/* return 0 */
280	ret
281	SET_SIZE(setjmp)
282
283	ENTRY(longjmp)
284	movq	LABEL_SP(%rdi), %rsp
285	movq	LABEL_RBP(%rdi), %rbp
286	movq	LABEL_RBX(%rdi), %rbx
287	movq	LABEL_R12(%rdi), %r12
288	movq	LABEL_R13(%rdi), %r13
289	movq	LABEL_R14(%rdi), %r14
290	movq	LABEL_R15(%rdi), %r15
291	movq	(%rdi), %rdx		/* return address; LABEL_PC is 0 */
292	movq	%rdx, (%rsp)
293	xorl	%eax, %eax
294	incl	%eax			/* return 1 */
295	ret
296	SET_SIZE(longjmp)
297
298#elif defined(__i386)
299
300	ENTRY(setjmp)
301	movl	4(%esp), %edx		/* address of save area */
302	movl	%ebp, LABEL_EBP(%edx)
303	movl	%ebx, LABEL_EBX(%edx)
304	movl	%esi, LABEL_ESI(%edx)
305	movl	%edi, LABEL_EDI(%edx)
306	movl	%esp, 4(%edx)
307	movl	(%esp), %ecx		/* %eip (return address) */
308	movl	%ecx, (%edx)		/* LABEL_PC is 0 */
309	subl	%eax, %eax		/* return 0 */
310	ret
311	SET_SIZE(setjmp)
312
313	ENTRY(longjmp)
314	movl	4(%esp), %edx		/* address of save area */
315	movl	LABEL_EBP(%edx), %ebp
316	movl	LABEL_EBX(%edx), %ebx
317	movl	LABEL_ESI(%edx), %esi
318	movl	LABEL_EDI(%edx), %edi
319	movl	4(%edx), %esp
320	movl	(%edx), %ecx		/* %eip (return addr); LABEL_PC is 0 */
321	movl	$1, %eax
322	addl	$4, %esp		/* pop ret adr */
323	jmp	*%ecx			/* indirect */
324	SET_SIZE(longjmp)
325
326#endif	/* __i386 */
327#endif	/* __lint */
328
329/*
330 * if a() calls b() calls caller(),
331 * caller() returns return address in a().
332 * (Note: We assume a() and b() are C routines which do the normal entry/exit
333 *  sequence.)
334 */
335
336#if defined(__lint)
337
338caddr_t
339caller(void)
340{ return (0); }
341
342#else	/* __lint */
343
344#if defined(__amd64)
345
346	ENTRY(caller)
347	movq	8(%rbp), %rax		/* b()'s return pc, in a() */
348	ret
349	SET_SIZE(caller)
350
351#elif defined(__i386)
352
353	ENTRY(caller)
354	movl	4(%ebp), %eax		/* b()'s return pc, in a() */
355	ret
356	SET_SIZE(caller)
357
358#endif	/* __i386 */
359#endif	/* __lint */
360
361/*
362 * if a() calls callee(), callee() returns the
363 * return address in a();
364 */
365
366#if defined(__lint)
367
368caddr_t
369callee(void)
370{ return (0); }
371
372#else	/* __lint */
373
374#if defined(__amd64)
375
376	ENTRY(callee)
377	movq	(%rsp), %rax		/* callee()'s return pc, in a() */
378	ret
379	SET_SIZE(callee)
380
381#elif defined(__i386)
382
383	ENTRY(callee)
384	movl	(%esp), %eax		/* callee()'s return pc, in a() */
385	ret
386	SET_SIZE(callee)
387
388#endif	/* __i386 */
389#endif	/* __lint */
390
391/*
392 * return the current frame pointer
393 */
394
395#if defined(__lint)
396
397greg_t
398getfp(void)
399{ return (0); }
400
401#else	/* __lint */
402
403#if defined(__amd64)
404
405	ENTRY(getfp)
406	movq	%rbp, %rax
407	ret
408	SET_SIZE(getfp)
409
410#elif defined(__i386)
411
412	ENTRY(getfp)
413	movl	%ebp, %eax
414	ret
415	SET_SIZE(getfp)
416
417#endif	/* __i386 */
418#endif	/* __lint */
419
420/*
421 * Invalidate a single page table entry in the TLB
422 */
423
424#if defined(__lint)
425
426/* ARGSUSED */
427void
428mmu_tlbflush_entry(caddr_t m)
429{}
430
431#else	/* __lint */
432
433#if defined(__amd64)
434
435	ENTRY(mmu_tlbflush_entry)
436	invlpg	(%rdi)
437	ret
438	SET_SIZE(mmu_tlbflush_entry)
439
440#elif defined(__i386)
441
442	ENTRY(mmu_tlbflush_entry)
443	movl	4(%esp), %eax
444	invlpg	(%eax)
445	ret
446	SET_SIZE(mmu_tlbflush_entry)
447
448#endif	/* __i386 */
449#endif	/* __lint */
450
451
452/*
453 * Get/Set the value of various control registers
454 */
455
456#if defined(__lint)
457
458ulong_t
459getcr0(void)
460{ return (0); }
461
462/* ARGSUSED */
463void
464setcr0(ulong_t value)
465{}
466
467ulong_t
468getcr2(void)
469{ return (0); }
470
471ulong_t
472getcr3(void)
473{ return (0); }
474
475#if !defined(__xpv)
476/* ARGSUSED */
477void
478setcr3(ulong_t val)
479{}
480
481void
482reload_cr3(void)
483{}
484#endif
485
486ulong_t
487getcr4(void)
488{ return (0); }
489
490/* ARGSUSED */
491void
492setcr4(ulong_t val)
493{}
494
495#if defined(__amd64)
496
497ulong_t
498getcr8(void)
499{ return (0); }
500
501/* ARGSUSED */
502void
503setcr8(ulong_t val)
504{}
505
506#endif	/* __amd64 */
507
508#else	/* __lint */
509
510#if defined(__amd64)
511
512	ENTRY(getcr0)
513	movq	%cr0, %rax
514	ret
515	SET_SIZE(getcr0)
516
517	ENTRY(setcr0)
518	movq	%rdi, %cr0
519	ret
520	SET_SIZE(setcr0)
521
522        ENTRY(getcr2)
523#if defined(__xpv)
524	movq	%gs:CPU_VCPU_INFO, %rax
525	movq	VCPU_INFO_ARCH_CR2(%rax), %rax
526#else
527        movq    %cr2, %rax
528#endif
529        ret
530	SET_SIZE(getcr2)
531
532	ENTRY(getcr3)
533	movq    %cr3, %rax
534	ret
535	SET_SIZE(getcr3)
536
537#if !defined(__xpv)
538
539        ENTRY(setcr3)
540        movq    %rdi, %cr3
541        ret
542	SET_SIZE(setcr3)
543
544	ENTRY(reload_cr3)
545	movq	%cr3, %rdi
546	movq	%rdi, %cr3
547	ret
548	SET_SIZE(reload_cr3)
549
550#endif	/* __xpv */
551
552	ENTRY(getcr4)
553	movq	%cr4, %rax
554	ret
555	SET_SIZE(getcr4)
556
557	ENTRY(setcr4)
558	movq	%rdi, %cr4
559	ret
560	SET_SIZE(setcr4)
561
562	ENTRY(getcr8)
563	movq	%cr8, %rax
564	ret
565	SET_SIZE(getcr8)
566
567	ENTRY(setcr8)
568	movq	%rdi, %cr8
569	ret
570	SET_SIZE(setcr8)
571
572#elif defined(__i386)
573
574        ENTRY(getcr0)
575        movl    %cr0, %eax
576        ret
577	SET_SIZE(getcr0)
578
579        ENTRY(setcr0)
580        movl    4(%esp), %eax
581        movl    %eax, %cr0
582        ret
583	SET_SIZE(setcr0)
584
585        ENTRY(getcr2)
586#if defined(__xpv)
587	movl	%gs:CPU_VCPU_INFO, %eax
588	movl	VCPU_INFO_ARCH_CR2(%eax), %eax
589#else
590        movl    %cr2, %eax
591#endif
592        ret
593	SET_SIZE(getcr2)
594
595	ENTRY(getcr3)
596	movl    %cr3, %eax
597	ret
598	SET_SIZE(getcr3)
599
600#if !defined(__xpv)
601
602        ENTRY(setcr3)
603        movl    4(%esp), %eax
604        movl    %eax, %cr3
605        ret
606	SET_SIZE(setcr3)
607
608	ENTRY(reload_cr3)
609	movl    %cr3, %eax
610	movl    %eax, %cr3
611	ret
612	SET_SIZE(reload_cr3)
613
614#endif	/* __xpv */
615
616	ENTRY(getcr4)
617	movl    %cr4, %eax
618	ret
619	SET_SIZE(getcr4)
620
621        ENTRY(setcr4)
622        movl    4(%esp), %eax
623        movl    %eax, %cr4
624        ret
625	SET_SIZE(setcr4)
626
627#endif	/* __i386 */
628#endif	/* __lint */
629
630#if defined(__lint)
631
632/*ARGSUSED*/
633uint32_t
634__cpuid_insn(struct cpuid_regs *regs)
635{ return (0); }
636
637#else	/* __lint */
638
639#if defined(__amd64)
640
641	ENTRY(__cpuid_insn)
642	movq	%rbx, %r8
643	movq	%rcx, %r9
644	movq	%rdx, %r11
645	movl	(%rdi), %eax		/* %eax = regs->cp_eax */
646	movl	0x4(%rdi), %ebx		/* %ebx = regs->cp_ebx */
647	movl	0x8(%rdi), %ecx		/* %ecx = regs->cp_ecx */
648	movl	0xc(%rdi), %edx		/* %edx = regs->cp_edx */
649	cpuid
650	movl	%eax, (%rdi)		/* regs->cp_eax = %eax */
651	movl	%ebx, 0x4(%rdi)		/* regs->cp_ebx = %ebx */
652	movl	%ecx, 0x8(%rdi)		/* regs->cp_ecx = %ecx */
653	movl	%edx, 0xc(%rdi)		/* regs->cp_edx = %edx */
654	movq	%r8, %rbx
655	movq	%r9, %rcx
656	movq	%r11, %rdx
657	ret
658	SET_SIZE(__cpuid_insn)
659
660#elif defined(__i386)
661
662        ENTRY(__cpuid_insn)
663	pushl	%ebp
664	movl	0x8(%esp), %ebp		/* %ebp = regs */
665	pushl	%ebx
666	pushl	%ecx
667	pushl	%edx
668	movl	(%ebp), %eax		/* %eax = regs->cp_eax */
669	movl	0x4(%ebp), %ebx		/* %ebx = regs->cp_ebx */
670	movl	0x8(%ebp), %ecx		/* %ecx = regs->cp_ecx */
671	movl	0xc(%ebp), %edx		/* %edx = regs->cp_edx */
672	cpuid
673	movl	%eax, (%ebp)		/* regs->cp_eax = %eax */
674	movl	%ebx, 0x4(%ebp)		/* regs->cp_ebx = %ebx */
675	movl	%ecx, 0x8(%ebp)		/* regs->cp_ecx = %ecx */
676	movl	%edx, 0xc(%ebp)		/* regs->cp_edx = %edx */
677	popl	%edx
678	popl	%ecx
679	popl	%ebx
680	popl	%ebp
681	ret
682	SET_SIZE(__cpuid_insn)
683
684#endif	/* __i386 */
685#endif	/* __lint */
686
687#if defined(__xpv)
688	/*
689	 * Defined in C
690	 */
691#else
692
693#if defined(__lint)
694
695/*ARGSUSED*/
696void
697i86_monitor(volatile uint32_t *addr, uint32_t extensions, uint32_t hints)
698{ return; }
699
700#else   /* __lint */
701
702#if defined(__amd64)
703
704	ENTRY_NP(i86_monitor)
705	pushq	%rbp
706	movq	%rsp, %rbp
707	movq	%rdi, %rax		/* addr */
708	movq	%rsi, %rcx		/* extensions */
709	/* rdx contains input arg3: hints */
710	.byte	0x0f, 0x01, 0xc8	/* monitor */
711	leave
712	ret
713	SET_SIZE(i86_monitor)
714
715#elif defined(__i386)
716
717ENTRY_NP(i86_monitor)
718	pushl	%ebp
719	movl	%esp, %ebp
720	movl	0x8(%ebp),%eax		/* addr */
721	movl	0xc(%ebp),%ecx		/* extensions */
722	movl	0x10(%ebp),%edx		/* hints */
723	.byte	0x0f, 0x01, 0xc8	/* monitor */
724	leave
725	ret
726	SET_SIZE(i86_monitor)
727
728#endif	/* __i386 */
729#endif	/* __lint */
730
731#if defined(__lint)
732
733/*ARGSUSED*/
734void
735i86_mwait(uint32_t data, uint32_t extensions)
736{ return; }
737
738#else	/* __lint */
739
740#if defined(__amd64)
741
742	ENTRY_NP(i86_mwait)
743	pushq	%rbp
744	movq	%rsp, %rbp
745	movq	%rdi, %rax		/* data */
746	movq	%rsi, %rcx		/* extensions */
747	.byte	0x0f, 0x01, 0xc9	/* mwait */
748	leave
749	ret
750	SET_SIZE(i86_mwait)
751
752#elif defined(__i386)
753
754	ENTRY_NP(i86_mwait)
755	pushl	%ebp
756	movl	%esp, %ebp
757	movl	0x8(%ebp),%eax		/* data */
758	movl	0xc(%ebp),%ecx		/* extensions */
759	.byte	0x0f, 0x01, 0xc9	/* mwait */
760	leave
761	ret
762	SET_SIZE(i86_mwait)
763
764#endif	/* __i386 */
765#endif	/* __lint */
766
767#if defined(__lint)
768
769hrtime_t
770tsc_read(void)
771{
772	return (0);
773}
774
775void
776patch_tsc(void)
777{}
778
779#else	/* __lint */
780
781#if defined(__amd64)
782
783	ENTRY_NP(tsc_read)
784	rdtsc
785	shlq	$32, %rdx
786	orq	%rdx, %rax
787	ret
788	SET_SIZE(tsc_read)
789
790#else  /* __i386 */
791
792	/*
793	 * To cope with processors that do not implement the rdtsc instruction,
794	 * we patch the kernel to use rdtsc if that feature is detected on the
795	 * CPU.  On an unpatched kernel, tsc_read() just returns zero.
796	 */
797	ENTRY_NP(patch_tsc)
798	movw	_rdtsc_bytes, %cx
799	movw	%cx, _tsc_patch_point
800	ret
801_rdtsc_bytes:
802	rdtsc
803	SET_SIZE(patch_tsc)
804
805	ENTRY_NP(tsc_read)
806	xorl	%eax, %eax
807	xorl	%edx, %edx
808	.globl _tsc_patch_point
809_tsc_patch_point:
810	nop; nop
811	ret
812	SET_SIZE(tsc_read)
813
814#endif /* __i386 */
815
816#endif	/* __lint */
817
818#endif	/* __xpv */
819
820/*
821 * Insert entryp after predp in a doubly linked list.
822 */
823
824#if defined(__lint)
825
826/*ARGSUSED*/
827void
828_insque(caddr_t entryp, caddr_t predp)
829{}
830
831#else	/* __lint */
832
833#if defined(__amd64)
834
835	ENTRY(_insque)
836	movq	(%rsi), %rax		/* predp->forw 			*/
837	movq	%rsi, CPTRSIZE(%rdi)	/* entryp->back = predp		*/
838	movq	%rax, (%rdi)		/* entryp->forw = predp->forw	*/
839	movq	%rdi, (%rsi)		/* predp->forw = entryp		*/
840	movq	%rdi, CPTRSIZE(%rax)	/* predp->forw->back = entryp	*/
841	ret
842	SET_SIZE(_insque)
843
844#elif defined(__i386)
845
846	ENTRY(_insque)
847	movl	8(%esp), %edx
848	movl	4(%esp), %ecx
849	movl	(%edx), %eax		/* predp->forw			*/
850	movl	%edx, CPTRSIZE(%ecx)	/* entryp->back = predp		*/
851	movl	%eax, (%ecx)		/* entryp->forw = predp->forw	*/
852	movl	%ecx, (%edx)		/* predp->forw = entryp		*/
853	movl	%ecx, CPTRSIZE(%eax)	/* predp->forw->back = entryp	*/
854	ret
855	SET_SIZE(_insque)
856
857#endif	/* __i386 */
858#endif	/* __lint */
859
860/*
861 * Remove entryp from a doubly linked list
862 */
863
864#if defined(__lint)
865
866/*ARGSUSED*/
867void
868_remque(caddr_t entryp)
869{}
870
871#else	/* __lint */
872
873#if defined(__amd64)
874
875	ENTRY(_remque)
876	movq	(%rdi), %rax		/* entry->forw */
877	movq	CPTRSIZE(%rdi), %rdx	/* entry->back */
878	movq	%rax, (%rdx)		/* entry->back->forw = entry->forw */
879	movq	%rdx, CPTRSIZE(%rax)	/* entry->forw->back = entry->back */
880	ret
881	SET_SIZE(_remque)
882
883#elif defined(__i386)
884
885	ENTRY(_remque)
886	movl	4(%esp), %ecx
887	movl	(%ecx), %eax		/* entry->forw */
888	movl	CPTRSIZE(%ecx), %edx	/* entry->back */
889	movl	%eax, (%edx)		/* entry->back->forw = entry->forw */
890	movl	%edx, CPTRSIZE(%eax)	/* entry->forw->back = entry->back */
891	ret
892	SET_SIZE(_remque)
893
894#endif	/* __i386 */
895#endif	/* __lint */
896
897/*
898 * Returns the number of
899 * non-NULL bytes in string argument.
900 */
901
902#if defined(__lint)
903
904/* ARGSUSED */
905size_t
906strlen(const char *str)
907{ return (0); }
908
909#else	/* __lint */
910
911#if defined(__amd64)
912
913/*
914 * This is close to a simple transliteration of a C version of this
915 * routine.  We should either just -make- this be a C version, or
916 * justify having it in assembler by making it significantly faster.
917 *
918 * size_t
919 * strlen(const char *s)
920 * {
921 *	const char *s0;
922 * #if defined(DEBUG)
923 *	if ((uintptr_t)s < KERNELBASE)
924 *		panic(.str_panic_msg);
925 * #endif
926 *	for (s0 = s; *s; s++)
927 *		;
928 *	return (s - s0);
929 * }
930 */
931
932	ENTRY(strlen)
933#ifdef DEBUG
934	movq	postbootkernelbase(%rip), %rax
935	cmpq	%rax, %rdi
936	jae	str_valid
937	pushq	%rbp
938	movq	%rsp, %rbp
939	leaq	.str_panic_msg(%rip), %rdi
940	xorl	%eax, %eax
941	call	panic
942#endif	/* DEBUG */
943str_valid:
944	cmpb	$0, (%rdi)
945	movq	%rdi, %rax
946	je	.null_found
947	.align	4
948.strlen_loop:
949	incq	%rdi
950	cmpb	$0, (%rdi)
951	jne	.strlen_loop
952.null_found:
953	subq	%rax, %rdi
954	movq	%rdi, %rax
955	ret
956	SET_SIZE(strlen)
957
958#elif defined(__i386)
959
960	ENTRY(strlen)
961#ifdef DEBUG
962	movl	postbootkernelbase, %eax
963	cmpl	%eax, 4(%esp)
964	jae	str_valid
965	pushl	%ebp
966	movl	%esp, %ebp
967	pushl	$.str_panic_msg
968	call	panic
969#endif /* DEBUG */
970
971str_valid:
972	movl	4(%esp), %eax		/* %eax = string address */
973	testl	$3, %eax		/* if %eax not word aligned */
974	jnz	.not_word_aligned	/* goto .not_word_aligned */
975	.align	4
976.word_aligned:
977	movl	(%eax), %edx		/* move 1 word from (%eax) to %edx */
978	movl	$0x7f7f7f7f, %ecx
979	andl	%edx, %ecx		/* %ecx = %edx & 0x7f7f7f7f */
980	addl	$4, %eax		/* next word */
981	addl	$0x7f7f7f7f, %ecx	/* %ecx += 0x7f7f7f7f */
982	orl	%edx, %ecx		/* %ecx |= %edx */
983	andl	$0x80808080, %ecx	/* %ecx &= 0x80808080 */
984	cmpl	$0x80808080, %ecx	/* if no null byte in this word */
985	je	.word_aligned		/* goto .word_aligned */
986	subl	$4, %eax		/* post-incremented */
987.not_word_aligned:
988	cmpb	$0, (%eax)		/* if a byte in (%eax) is null */
989	je	.null_found		/* goto .null_found */
990	incl	%eax			/* next byte */
991	testl	$3, %eax		/* if %eax not word aligned */
992	jnz	.not_word_aligned	/* goto .not_word_aligned */
993	jmp	.word_aligned		/* goto .word_aligned */
994	.align	4
995.null_found:
996	subl	4(%esp), %eax		/* %eax -= string address */
997	ret
998	SET_SIZE(strlen)
999
1000#endif	/* __i386 */
1001
1002#ifdef DEBUG
1003	.text
1004.str_panic_msg:
1005	.string "strlen: argument below kernelbase"
1006#endif /* DEBUG */
1007
1008#endif	/* __lint */
1009
1010	/*
1011	 * Berkley 4.3 introduced symbolically named interrupt levels
1012	 * as a way deal with priority in a machine independent fashion.
1013	 * Numbered priorities are machine specific, and should be
1014	 * discouraged where possible.
1015	 *
1016	 * Note, for the machine specific priorities there are
1017	 * examples listed for devices that use a particular priority.
1018	 * It should not be construed that all devices of that
1019	 * type should be at that priority.  It is currently were
1020	 * the current devices fit into the priority scheme based
1021	 * upon time criticalness.
1022	 *
1023	 * The underlying assumption of these assignments is that
1024	 * IPL 10 is the highest level from which a device
1025	 * routine can call wakeup.  Devices that interrupt from higher
1026	 * levels are restricted in what they can do.  If they need
1027	 * kernels services they should schedule a routine at a lower
1028	 * level (via software interrupt) to do the required
1029	 * processing.
1030	 *
1031	 * Examples of this higher usage:
1032	 *	Level	Usage
1033	 *	14	Profiling clock (and PROM uart polling clock)
1034	 *	12	Serial ports
1035	 *
1036	 * The serial ports request lower level processing on level 6.
1037	 *
1038	 * Also, almost all splN routines (where N is a number or a
1039	 * mnemonic) will do a RAISE(), on the assumption that they are
1040	 * never used to lower our priority.
1041	 * The exceptions are:
1042	 *	spl8()		Because you can't be above 15 to begin with!
1043	 *	splzs()		Because this is used at boot time to lower our
1044	 *			priority, to allow the PROM to poll the uart.
1045	 *	spl0()		Used to lower priority to 0.
1046	 */
1047
1048#if defined(__lint)
1049
1050int spl0(void)		{ return (0); }
1051int spl6(void)		{ return (0); }
1052int spl7(void)		{ return (0); }
1053int spl8(void)		{ return (0); }
1054int splhigh(void)	{ return (0); }
1055int splhi(void)		{ return (0); }
1056int splzs(void)		{ return (0); }
1057
1058/* ARGSUSED */
1059void
1060splx(int level)
1061{}
1062
1063#else	/* __lint */
1064
1065#if defined(__amd64)
1066
1067#define	SETPRI(level) \
1068	movl	$/**/level, %edi;	/* new priority */		\
1069	jmp	do_splx			/* redirect to do_splx */
1070
1071#define	RAISE(level) \
1072	movl	$/**/level, %edi;	/* new priority */		\
1073	jmp	splr			/* redirect to splr */
1074
1075#elif defined(__i386)
1076
1077#define	SETPRI(level) \
1078	pushl	$/**/level;	/* new priority */			\
1079	call	do_splx;	/* invoke common splx code */		\
1080	addl	$4, %esp;	/* unstack arg */			\
1081	ret
1082
1083#define	RAISE(level) \
1084	pushl	$/**/level;	/* new priority */			\
1085	call	splr;		/* invoke common splr code */		\
1086	addl	$4, %esp;	/* unstack args */			\
1087	ret
1088
1089#endif	/* __i386 */
1090
1091	/* locks out all interrupts, including memory errors */
1092	ENTRY(spl8)
1093	SETPRI(15)
1094	SET_SIZE(spl8)
1095
1096	/* just below the level that profiling runs */
1097	ENTRY(spl7)
1098	RAISE(13)
1099	SET_SIZE(spl7)
1100
1101	/* sun specific - highest priority onboard serial i/o asy ports */
1102	ENTRY(splzs)
1103	SETPRI(12)	/* Can't be a RAISE, as it's used to lower us */
1104	SET_SIZE(splzs)
1105
1106	ENTRY(splhi)
1107	ALTENTRY(splhigh)
1108	ALTENTRY(spl6)
1109	ALTENTRY(i_ddi_splhigh)
1110
1111	RAISE(DISP_LEVEL)
1112
1113	SET_SIZE(i_ddi_splhigh)
1114	SET_SIZE(spl6)
1115	SET_SIZE(splhigh)
1116	SET_SIZE(splhi)
1117
1118	/* allow all interrupts */
1119	ENTRY(spl0)
1120	SETPRI(0)
1121	SET_SIZE(spl0)
1122
1123
1124	/* splx implentation */
1125	ENTRY(splx)
1126	jmp	do_splx		/* redirect to common splx code */
1127	SET_SIZE(splx)
1128
1129#endif	/* __lint */
1130
1131#if defined(__i386)
1132
1133/*
1134 * Read and write the %gs register
1135 */
1136
1137#if defined(__lint)
1138
1139/*ARGSUSED*/
1140uint16_t
1141getgs(void)
1142{ return (0); }
1143
1144/*ARGSUSED*/
1145void
1146setgs(uint16_t sel)
1147{}
1148
1149#else	/* __lint */
1150
1151	ENTRY(getgs)
1152	clr	%eax
1153	movw	%gs, %ax
1154	ret
1155	SET_SIZE(getgs)
1156
1157	ENTRY(setgs)
1158	movw	4(%esp), %gs
1159	ret
1160	SET_SIZE(setgs)
1161
1162#endif	/* __lint */
1163#endif	/* __i386 */
1164
1165#if defined(__lint)
1166
1167void
1168pc_reset(void)
1169{}
1170
1171void
1172efi_reset(void)
1173{}
1174
1175#else	/* __lint */
1176
1177	ENTRY(wait_500ms)
1178	push	%ebx
1179	movl	$50000, %ebx
11801:
1181	call	tenmicrosec
1182	decl	%ebx
1183	jnz	1b
1184	pop	%ebx
1185	ret
1186	SET_SIZE(wait_500ms)
1187
1188#define	RESET_METHOD_KBC	1
1189#define	RESET_METHOD_PORT92	2
1190#define RESET_METHOD_PCI	4
1191
1192	DGDEF3(pc_reset_methods, 4, 8)
1193	.long RESET_METHOD_KBC|RESET_METHOD_PORT92|RESET_METHOD_PCI;
1194
1195	ENTRY(pc_reset)
1196
1197#if defined(__i386)
1198	testl	$RESET_METHOD_KBC, pc_reset_methods
1199#elif defined(__amd64)
1200	testl	$RESET_METHOD_KBC, pc_reset_methods(%rip)
1201#endif
1202	jz	1f
1203
1204	/
1205	/ Try the classic keyboard controller-triggered reset.
1206	/
1207	movw	$0x64, %dx
1208	movb	$0xfe, %al
1209	outb	(%dx)
1210
1211	/ Wait up to 500 milliseconds here for the keyboard controller
1212	/ to pull the reset line.  On some systems where the keyboard
1213	/ controller is slow to pull the reset line, the next reset method
1214	/ may be executed (which may be bad if those systems hang when the
1215	/ next reset method is used, e.g. Ferrari 3400 (doesn't like port 92),
1216	/ and Ferrari 4000 (doesn't like the cf9 reset method))
1217
1218	call	wait_500ms
1219
12201:
1221#if defined(__i386)
1222	testl	$RESET_METHOD_PORT92, pc_reset_methods
1223#elif defined(__amd64)
1224	testl	$RESET_METHOD_PORT92, pc_reset_methods(%rip)
1225#endif
1226	jz	3f
1227
1228	/
1229	/ Try port 0x92 fast reset
1230	/
1231	movw	$0x92, %dx
1232	inb	(%dx)
1233	cmpb	$0xff, %al	/ If port's not there, we should get back 0xFF
1234	je	1f
1235	testb	$1, %al		/ If bit 0
1236	jz	2f		/ is clear, jump to perform the reset
1237	andb	$0xfe, %al	/ otherwise,
1238	outb	(%dx)		/ clear bit 0 first, then
12392:
1240	orb	$1, %al		/ Set bit 0
1241	outb	(%dx)		/ and reset the system
12421:
1243
1244	call	wait_500ms
1245
12463:
1247#if defined(__i386)
1248	testl	$RESET_METHOD_PCI, pc_reset_methods
1249#elif defined(__amd64)
1250	testl	$RESET_METHOD_PCI, pc_reset_methods(%rip)
1251#endif
1252	jz	4f
1253
1254	/ Try the PCI (soft) reset vector (should work on all modern systems,
1255	/ but has been shown to cause problems on 450NX systems, and some newer
1256	/ systems (e.g. ATI IXP400-equipped systems))
1257	/ When resetting via this method, 2 writes are required.  The first
1258	/ targets bit 1 (0=hard reset without power cycle, 1=hard reset with
1259	/ power cycle).
1260	/ The reset occurs on the second write, during bit 2's transition from
1261	/ 0->1.
1262	movw	$0xcf9, %dx
1263	movb	$0x2, %al	/ Reset mode = hard, no power cycle
1264	outb	(%dx)
1265	movb	$0x6, %al
1266	outb	(%dx)
1267
1268	call	wait_500ms
1269
12704:
1271	/
1272	/ port 0xcf9 failed also.  Last-ditch effort is to
1273	/ triple-fault the CPU.
1274	/ Also, use triple fault for EFI firmware
1275	/
1276	ENTRY(efi_reset)
1277#if defined(__amd64)
1278	pushq	$0x0
1279	pushq	$0x0		/ IDT base of 0, limit of 0 + 2 unused bytes
1280	lidt	(%rsp)
1281#elif defined(__i386)
1282	pushl	$0x0
1283	pushl	$0x0		/ IDT base of 0, limit of 0 + 2 unused bytes
1284	lidt	(%esp)
1285#endif
1286	int	$0x0		/ Trigger interrupt, generate triple-fault
1287
1288	cli
1289	hlt			/ Wait forever
1290	/*NOTREACHED*/
1291	SET_SIZE(efi_reset)
1292	SET_SIZE(pc_reset)
1293
1294#endif	/* __lint */
1295
1296/*
1297 * C callable in and out routines
1298 */
1299
1300#if defined(__lint)
1301
1302/* ARGSUSED */
1303void
1304outl(int port_address, uint32_t val)
1305{}
1306
1307#else	/* __lint */
1308
1309#if defined(__amd64)
1310
1311	ENTRY(outl)
1312	movw	%di, %dx
1313	movl	%esi, %eax
1314	outl	(%dx)
1315	ret
1316	SET_SIZE(outl)
1317
1318#elif defined(__i386)
1319
1320	.set	PORT, 4
1321	.set	VAL, 8
1322
1323	ENTRY(outl)
1324	movw	PORT(%esp), %dx
1325	movl	VAL(%esp), %eax
1326	outl	(%dx)
1327	ret
1328	SET_SIZE(outl)
1329
1330#endif	/* __i386 */
1331#endif	/* __lint */
1332
1333#if defined(__lint)
1334
1335/* ARGSUSED */
1336void
1337outw(int port_address, uint16_t val)
1338{}
1339
1340#else	/* __lint */
1341
1342#if defined(__amd64)
1343
1344	ENTRY(outw)
1345	movw	%di, %dx
1346	movw	%si, %ax
1347	D16 outl (%dx)		/* XX64 why not outw? */
1348	ret
1349	SET_SIZE(outw)
1350
1351#elif defined(__i386)
1352
1353	ENTRY(outw)
1354	movw	PORT(%esp), %dx
1355	movw	VAL(%esp), %ax
1356	D16 outl (%dx)
1357	ret
1358	SET_SIZE(outw)
1359
1360#endif	/* __i386 */
1361#endif	/* __lint */
1362
1363#if defined(__lint)
1364
1365/* ARGSUSED */
1366void
1367outb(int port_address, uint8_t val)
1368{}
1369
1370#else	/* __lint */
1371
1372#if defined(__amd64)
1373
1374	ENTRY(outb)
1375	movw	%di, %dx
1376	movb	%sil, %al
1377	outb	(%dx)
1378	ret
1379	SET_SIZE(outb)
1380
1381#elif defined(__i386)
1382
1383	ENTRY(outb)
1384	movw	PORT(%esp), %dx
1385	movb	VAL(%esp), %al
1386	outb	(%dx)
1387	ret
1388	SET_SIZE(outb)
1389
1390#endif	/* __i386 */
1391#endif	/* __lint */
1392
1393#if defined(__lint)
1394
1395/* ARGSUSED */
1396uint32_t
1397inl(int port_address)
1398{ return (0); }
1399
1400#else	/* __lint */
1401
1402#if defined(__amd64)
1403
1404	ENTRY(inl)
1405	xorl	%eax, %eax
1406	movw	%di, %dx
1407	inl	(%dx)
1408	ret
1409	SET_SIZE(inl)
1410
1411#elif defined(__i386)
1412
1413	ENTRY(inl)
1414	movw	PORT(%esp), %dx
1415	inl	(%dx)
1416	ret
1417	SET_SIZE(inl)
1418
1419#endif	/* __i386 */
1420#endif	/* __lint */
1421
1422#if defined(__lint)
1423
1424/* ARGSUSED */
1425uint16_t
1426inw(int port_address)
1427{ return (0); }
1428
1429#else	/* __lint */
1430
1431#if defined(__amd64)
1432
1433	ENTRY(inw)
1434	xorl	%eax, %eax
1435	movw	%di, %dx
1436	D16 inl	(%dx)
1437	ret
1438	SET_SIZE(inw)
1439
1440#elif defined(__i386)
1441
1442	ENTRY(inw)
1443	subl	%eax, %eax
1444	movw	PORT(%esp), %dx
1445	D16 inl	(%dx)
1446	ret
1447	SET_SIZE(inw)
1448
1449#endif	/* __i386 */
1450#endif	/* __lint */
1451
1452
1453#if defined(__lint)
1454
1455/* ARGSUSED */
1456uint8_t
1457inb(int port_address)
1458{ return (0); }
1459
1460#else	/* __lint */
1461
1462#if defined(__amd64)
1463
1464	ENTRY(inb)
1465	xorl	%eax, %eax
1466	movw	%di, %dx
1467	inb	(%dx)
1468	ret
1469	SET_SIZE(inb)
1470
1471#elif defined(__i386)
1472
1473	ENTRY(inb)
1474	subl    %eax, %eax
1475	movw	PORT(%esp), %dx
1476	inb	(%dx)
1477	ret
1478	SET_SIZE(inb)
1479
1480#endif	/* __i386 */
1481#endif	/* __lint */
1482
1483
1484#if defined(__lint)
1485
1486/* ARGSUSED */
1487void
1488repoutsw(int port, uint16_t *addr, int cnt)
1489{}
1490
1491#else	/* __lint */
1492
1493#if defined(__amd64)
1494
1495	ENTRY(repoutsw)
1496	movl	%edx, %ecx
1497	movw	%di, %dx
1498	rep
1499	  D16 outsl
1500	ret
1501	SET_SIZE(repoutsw)
1502
1503#elif defined(__i386)
1504
1505	/*
1506	 * The arguments and saved registers are on the stack in the
1507	 *  following order:
1508	 *      |  cnt  |  +16
1509	 *      | *addr |  +12
1510	 *      | port  |  +8
1511	 *      |  eip  |  +4
1512	 *      |  esi  |  <-- %esp
1513	 * If additional values are pushed onto the stack, make sure
1514	 * to adjust the following constants accordingly.
1515	 */
1516	.set	PORT, 8
1517	.set	ADDR, 12
1518	.set	COUNT, 16
1519
1520	ENTRY(repoutsw)
1521	pushl	%esi
1522	movl	PORT(%esp), %edx
1523	movl	ADDR(%esp), %esi
1524	movl	COUNT(%esp), %ecx
1525	rep
1526	  D16 outsl
1527	popl	%esi
1528	ret
1529	SET_SIZE(repoutsw)
1530
1531#endif	/* __i386 */
1532#endif	/* __lint */
1533
1534
1535#if defined(__lint)
1536
1537/* ARGSUSED */
1538void
1539repinsw(int port_addr, uint16_t *addr, int cnt)
1540{}
1541
1542#else	/* __lint */
1543
1544#if defined(__amd64)
1545
1546	ENTRY(repinsw)
1547	movl	%edx, %ecx
1548	movw	%di, %dx
1549	rep
1550	  D16 insl
1551	ret
1552	SET_SIZE(repinsw)
1553
1554#elif defined(__i386)
1555
1556	ENTRY(repinsw)
1557	pushl	%edi
1558	movl	PORT(%esp), %edx
1559	movl	ADDR(%esp), %edi
1560	movl	COUNT(%esp), %ecx
1561	rep
1562	  D16 insl
1563	popl	%edi
1564	ret
1565	SET_SIZE(repinsw)
1566
1567#endif	/* __i386 */
1568#endif	/* __lint */
1569
1570
1571#if defined(__lint)
1572
1573/* ARGSUSED */
1574void
1575repinsb(int port, uint8_t *addr, int count)
1576{}
1577
1578#else	/* __lint */
1579
1580#if defined(__amd64)
1581
1582	ENTRY(repinsb)
1583	movl	%edx, %ecx
1584	movw	%di, %dx
1585	movq	%rsi, %rdi
1586	rep
1587	  insb
1588	ret
1589	SET_SIZE(repinsb)
1590
1591#elif defined(__i386)
1592
1593	/*
1594	 * The arguments and saved registers are on the stack in the
1595	 *  following order:
1596	 *      |  cnt  |  +16
1597	 *      | *addr |  +12
1598	 *      | port  |  +8
1599	 *      |  eip  |  +4
1600	 *      |  esi  |  <-- %esp
1601	 * If additional values are pushed onto the stack, make sure
1602	 * to adjust the following constants accordingly.
1603	 */
1604	.set	IO_PORT, 8
1605	.set	IO_ADDR, 12
1606	.set	IO_COUNT, 16
1607
1608	ENTRY(repinsb)
1609	pushl	%edi
1610	movl	IO_ADDR(%esp), %edi
1611	movl	IO_COUNT(%esp), %ecx
1612	movl	IO_PORT(%esp), %edx
1613	rep
1614	  insb
1615	popl	%edi
1616	ret
1617	SET_SIZE(repinsb)
1618
1619#endif	/* __i386 */
1620#endif	/* __lint */
1621
1622
1623/*
1624 * Input a stream of 32-bit words.
1625 * NOTE: count is a DWORD count.
1626 */
1627#if defined(__lint)
1628
1629/* ARGSUSED */
1630void
1631repinsd(int port, uint32_t *addr, int count)
1632{}
1633
1634#else	/* __lint */
1635
1636#if defined(__amd64)
1637
1638	ENTRY(repinsd)
1639	movl	%edx, %ecx
1640	movw	%di, %dx
1641	movq	%rsi, %rdi
1642	rep
1643	  insl
1644	ret
1645	SET_SIZE(repinsd)
1646
1647#elif defined(__i386)
1648
1649	ENTRY(repinsd)
1650	pushl	%edi
1651	movl	IO_ADDR(%esp), %edi
1652	movl	IO_COUNT(%esp), %ecx
1653	movl	IO_PORT(%esp), %edx
1654	rep
1655	  insl
1656	popl	%edi
1657	ret
1658	SET_SIZE(repinsd)
1659
1660#endif	/* __i386 */
1661#endif	/* __lint */
1662
1663/*
1664 * Output a stream of bytes
1665 * NOTE: count is a byte count
1666 */
1667#if defined(__lint)
1668
1669/* ARGSUSED */
1670void
1671repoutsb(int port, uint8_t *addr, int count)
1672{}
1673
1674#else	/* __lint */
1675
1676#if defined(__amd64)
1677
1678	ENTRY(repoutsb)
1679	movl	%edx, %ecx
1680	movw	%di, %dx
1681	rep
1682	  outsb
1683	ret
1684	SET_SIZE(repoutsb)
1685
1686#elif defined(__i386)
1687
1688	ENTRY(repoutsb)
1689	pushl	%esi
1690	movl	IO_ADDR(%esp), %esi
1691	movl	IO_COUNT(%esp), %ecx
1692	movl	IO_PORT(%esp), %edx
1693	rep
1694	  outsb
1695	popl	%esi
1696	ret
1697	SET_SIZE(repoutsb)
1698
1699#endif	/* __i386 */
1700#endif	/* __lint */
1701
1702/*
1703 * Output a stream of 32-bit words
1704 * NOTE: count is a DWORD count
1705 */
1706#if defined(__lint)
1707
1708/* ARGSUSED */
1709void
1710repoutsd(int port, uint32_t *addr, int count)
1711{}
1712
1713#else	/* __lint */
1714
1715#if defined(__amd64)
1716
1717	ENTRY(repoutsd)
1718	movl	%edx, %ecx
1719	movw	%di, %dx
1720	rep
1721	  outsl
1722	ret
1723	SET_SIZE(repoutsd)
1724
1725#elif defined(__i386)
1726
1727	ENTRY(repoutsd)
1728	pushl	%esi
1729	movl	IO_ADDR(%esp), %esi
1730	movl	IO_COUNT(%esp), %ecx
1731	movl	IO_PORT(%esp), %edx
1732	rep
1733	  outsl
1734	popl	%esi
1735	ret
1736	SET_SIZE(repoutsd)
1737
1738#endif	/* __i386 */
1739#endif	/* __lint */
1740
1741/*
1742 * void int3(void)
1743 * void int18(void)
1744 * void int20(void)
1745 */
1746
1747#if defined(__lint)
1748
1749void
1750int3(void)
1751{}
1752
1753void
1754int18(void)
1755{}
1756
1757void
1758int20(void)
1759{}
1760
1761#else	/* __lint */
1762
1763	ENTRY(int3)
1764	int	$T_BPTFLT
1765	ret
1766	SET_SIZE(int3)
1767
1768	ENTRY(int18)
1769	int	$T_MCE
1770	ret
1771	SET_SIZE(int18)
1772
1773	ENTRY(int20)
1774	movl	boothowto, %eax
1775	andl	$RB_DEBUG, %eax
1776	jz	1f
1777
1778	int	$T_DBGENTR
17791:
1780	rep;	ret	/* use 2 byte return instruction when branch target */
1781			/* AMD Software Optimization Guide - Section 6.2 */
1782	SET_SIZE(int20)
1783
1784#endif	/* __lint */
1785
1786#if defined(__lint)
1787
1788/* ARGSUSED */
1789int
1790scanc(size_t size, uchar_t *cp, uchar_t *table, uchar_t mask)
1791{ return (0); }
1792
1793#else	/* __lint */
1794
1795#if defined(__amd64)
1796
1797	ENTRY(scanc)
1798					/* rdi == size */
1799					/* rsi == cp */
1800					/* rdx == table */
1801					/* rcx == mask */
1802	addq	%rsi, %rdi		/* end = &cp[size] */
1803.scanloop:
1804	cmpq	%rdi, %rsi		/* while (cp < end */
1805	jnb	.scandone
1806	movzbq	(%rsi), %r8		/* %r8 = *cp */
1807	incq	%rsi			/* cp++ */
1808	testb	%cl, (%r8, %rdx)
1809	jz	.scanloop		/*  && (table[*cp] & mask) == 0) */
1810	decq	%rsi			/* (fix post-increment) */
1811.scandone:
1812	movl	%edi, %eax
1813	subl	%esi, %eax		/* return (end - cp) */
1814	ret
1815	SET_SIZE(scanc)
1816
1817#elif defined(__i386)
1818
1819	ENTRY(scanc)
1820	pushl	%edi
1821	pushl	%esi
1822	movb	24(%esp), %cl		/* mask = %cl */
1823	movl	16(%esp), %esi		/* cp = %esi */
1824	movl	20(%esp), %edx		/* table = %edx */
1825	movl	%esi, %edi
1826	addl	12(%esp), %edi		/* end = &cp[size]; */
1827.scanloop:
1828	cmpl	%edi, %esi		/* while (cp < end */
1829	jnb	.scandone
1830	movzbl	(%esi),  %eax		/* %al = *cp */
1831	incl	%esi			/* cp++ */
1832	movb	(%edx,  %eax), %al	/* %al = table[*cp] */
1833	testb	%al, %cl
1834	jz	.scanloop		/*   && (table[*cp] & mask) == 0) */
1835	dec	%esi			/* post-incremented */
1836.scandone:
1837	movl	%edi, %eax
1838	subl	%esi, %eax		/* return (end - cp) */
1839	popl	%esi
1840	popl	%edi
1841	ret
1842	SET_SIZE(scanc)
1843
1844#endif	/* __i386 */
1845#endif	/* __lint */
1846
1847/*
1848 * Replacement functions for ones that are normally inlined.
1849 * In addition to the copy in i86.il, they are defined here just in case.
1850 */
1851
1852#if defined(__lint)
1853
1854ulong_t
1855intr_clear(void)
1856{ return (0); }
1857
1858ulong_t
1859clear_int_flag(void)
1860{ return (0); }
1861
1862#else	/* __lint */
1863
1864#if defined(__amd64)
1865
1866	ENTRY(intr_clear)
1867	ENTRY(clear_int_flag)
1868	pushfq
1869	popq	%rax
1870#if defined(__xpv)
1871	leaq	xpv_panicking, %rdi
1872	movl	(%rdi), %edi
1873	cmpl	$0, %edi
1874	jne	2f
1875	CLIRET(%rdi, %dl)	/* returns event mask in %dl */
1876	/*
1877	 * Synthesize the PS_IE bit from the event mask bit
1878	 */
1879	andq    $_BITNOT(PS_IE), %rax
1880	testb	$1, %dl
1881	jnz	1f
1882	orq	$PS_IE, %rax
18831:
1884	ret
18852:
1886#endif
1887	CLI(%rdi)
1888	ret
1889	SET_SIZE(clear_int_flag)
1890	SET_SIZE(intr_clear)
1891
1892#elif defined(__i386)
1893
1894	ENTRY(intr_clear)
1895	ENTRY(clear_int_flag)
1896	pushfl
1897	popl	%eax
1898#if defined(__xpv)
1899	leal	xpv_panicking, %edx
1900	movl	(%edx), %edx
1901	cmpl	$0, %edx
1902	jne	2f
1903	CLIRET(%edx, %cl)	/* returns event mask in %cl */
1904	/*
1905	 * Synthesize the PS_IE bit from the event mask bit
1906	 */
1907	andl    $_BITNOT(PS_IE), %eax
1908	testb	$1, %cl
1909	jnz	1f
1910	orl	$PS_IE, %eax
19111:
1912	ret
19132:
1914#endif
1915	CLI(%edx)
1916	ret
1917	SET_SIZE(clear_int_flag)
1918	SET_SIZE(intr_clear)
1919
1920#endif	/* __i386 */
1921#endif	/* __lint */
1922
1923#if defined(__lint)
1924
1925struct cpu *
1926curcpup(void)
1927{ return 0; }
1928
1929#else	/* __lint */
1930
1931#if defined(__amd64)
1932
1933	ENTRY(curcpup)
1934	movq	%gs:CPU_SELF, %rax
1935	ret
1936	SET_SIZE(curcpup)
1937
1938#elif defined(__i386)
1939
1940	ENTRY(curcpup)
1941	movl	%gs:CPU_SELF, %eax
1942	ret
1943	SET_SIZE(curcpup)
1944
1945#endif	/* __i386 */
1946#endif	/* __lint */
1947
1948#if defined(__lint)
1949
1950/* ARGSUSED */
1951uint32_t
1952htonl(uint32_t i)
1953{ return (0); }
1954
1955/* ARGSUSED */
1956uint32_t
1957ntohl(uint32_t i)
1958{ return (0); }
1959
1960#else	/* __lint */
1961
1962#if defined(__amd64)
1963
1964	/* XX64 there must be shorter sequences for this */
1965	ENTRY(htonl)
1966	ALTENTRY(ntohl)
1967	movl	%edi, %eax
1968	bswap	%eax
1969	ret
1970	SET_SIZE(ntohl)
1971	SET_SIZE(htonl)
1972
1973#elif defined(__i386)
1974
1975	ENTRY(htonl)
1976	ALTENTRY(ntohl)
1977	movl	4(%esp), %eax
1978	bswap	%eax
1979	ret
1980	SET_SIZE(ntohl)
1981	SET_SIZE(htonl)
1982
1983#endif	/* __i386 */
1984#endif	/* __lint */
1985
1986#if defined(__lint)
1987
1988/* ARGSUSED */
1989uint16_t
1990htons(uint16_t i)
1991{ return (0); }
1992
1993/* ARGSUSED */
1994uint16_t
1995ntohs(uint16_t i)
1996{ return (0); }
1997
1998
1999#else	/* __lint */
2000
2001#if defined(__amd64)
2002
2003	/* XX64 there must be better sequences for this */
2004	ENTRY(htons)
2005	ALTENTRY(ntohs)
2006	movl	%edi, %eax
2007	bswap	%eax
2008	shrl	$16, %eax
2009	ret
2010	SET_SIZE(ntohs)
2011	SET_SIZE(htons)
2012
2013#elif defined(__i386)
2014
2015	ENTRY(htons)
2016	ALTENTRY(ntohs)
2017	movl	4(%esp), %eax
2018	bswap	%eax
2019	shrl	$16, %eax
2020	ret
2021	SET_SIZE(ntohs)
2022	SET_SIZE(htons)
2023
2024#endif	/* __i386 */
2025#endif	/* __lint */
2026
2027
2028#if defined(__lint)
2029
2030/* ARGSUSED */
2031void
2032intr_restore(ulong_t i)
2033{ return; }
2034
2035/* ARGSUSED */
2036void
2037restore_int_flag(ulong_t i)
2038{ return; }
2039
2040#else	/* __lint */
2041
2042#if defined(__amd64)
2043
2044	ENTRY(intr_restore)
2045	ENTRY(restore_int_flag)
2046	pushq	%rdi
2047	popfq
2048#if defined(__xpv)
2049	leaq	xpv_panicking, %rsi
2050	movl	(%rsi), %esi
2051	cmpl	$0, %esi
2052	jne	1f
2053	/*
2054	 * Since we're -really- running unprivileged, our attempt
2055	 * to change the state of the IF bit will be ignored.
2056	 * The virtual IF bit is tweaked by CLI and STI.
2057	 */
2058	IE_TO_EVENT_MASK(%rsi, %rdi)
20591:
2060#endif
2061	ret
2062	SET_SIZE(restore_int_flag)
2063	SET_SIZE(intr_restore)
2064
2065#elif defined(__i386)
2066
2067	ENTRY(intr_restore)
2068	ENTRY(restore_int_flag)
2069	movl	4(%esp), %eax
2070	pushl	%eax
2071	popfl
2072#if defined(__xpv)
2073	leal	xpv_panicking, %edx
2074	movl	(%edx), %edx
2075	cmpl	$0, %edx
2076	jne	1f
2077	/*
2078	 * Since we're -really- running unprivileged, our attempt
2079	 * to change the state of the IF bit will be ignored.
2080	 * The virtual IF bit is tweaked by CLI and STI.
2081	 */
2082	IE_TO_EVENT_MASK(%edx, %eax)
20831:
2084#endif
2085	ret
2086	SET_SIZE(restore_int_flag)
2087	SET_SIZE(intr_restore)
2088
2089#endif	/* __i386 */
2090#endif	/* __lint */
2091
2092#if defined(__lint)
2093
2094void
2095sti(void)
2096{}
2097
2098void
2099cli(void)
2100{}
2101
2102#else	/* __lint */
2103
2104	ENTRY(sti)
2105	STI
2106	ret
2107	SET_SIZE(sti)
2108
2109	ENTRY(cli)
2110#if defined(__amd64)
2111	CLI(%rax)
2112#elif defined(__i386)
2113	CLI(%eax)
2114#endif	/* __i386 */
2115	ret
2116	SET_SIZE(cli)
2117
2118#endif	/* __lint */
2119
2120#if defined(__lint)
2121
2122dtrace_icookie_t
2123dtrace_interrupt_disable(void)
2124{ return (0); }
2125
2126#else   /* __lint */
2127
2128#if defined(__amd64)
2129
2130	ENTRY(dtrace_interrupt_disable)
2131	pushfq
2132	popq	%rax
2133#if defined(__xpv)
2134	leaq	xpv_panicking, %rdi
2135	movl	(%rdi), %edi
2136	cmpl	$0, %edi
2137	jne	1f
2138	CLIRET(%rdi, %dl)	/* returns event mask in %dl */
2139	/*
2140	 * Synthesize the PS_IE bit from the event mask bit
2141	 */
2142	andq    $_BITNOT(PS_IE), %rax
2143	testb	$1, %dl
2144	jnz	1f
2145	orq	$PS_IE, %rax
21461:
2147#else
2148	CLI(%rdx)
2149#endif
2150	ret
2151	SET_SIZE(dtrace_interrupt_disable)
2152
2153#elif defined(__i386)
2154
2155	ENTRY(dtrace_interrupt_disable)
2156	pushfl
2157	popl	%eax
2158#if defined(__xpv)
2159	leal	xpv_panicking, %edx
2160	movl	(%edx), %edx
2161	cmpl	$0, %edx
2162	jne	1f
2163	CLIRET(%edx, %cl)	/* returns event mask in %cl */
2164	/*
2165	 * Synthesize the PS_IE bit from the event mask bit
2166	 */
2167	andl    $_BITNOT(PS_IE), %eax
2168	testb	$1, %cl
2169	jnz	1f
2170	orl	$PS_IE, %eax
21711:
2172#else
2173	CLI(%edx)
2174#endif
2175	ret
2176	SET_SIZE(dtrace_interrupt_disable)
2177
2178#endif	/* __i386 */
2179#endif	/* __lint */
2180
2181#if defined(__lint)
2182
2183/*ARGSUSED*/
2184void
2185dtrace_interrupt_enable(dtrace_icookie_t cookie)
2186{}
2187
2188#else	/* __lint */
2189
2190#if defined(__amd64)
2191
2192	ENTRY(dtrace_interrupt_enable)
2193	pushq	%rdi
2194	popfq
2195#if defined(__xpv)
2196	leaq	xpv_panicking, %rdx
2197	movl	(%rdx), %edx
2198	cmpl	$0, %edx
2199	jne	1f
2200	/*
2201	 * Since we're -really- running unprivileged, our attempt
2202	 * to change the state of the IF bit will be ignored. The
2203	 * virtual IF bit is tweaked by CLI and STI.
2204	 */
2205	IE_TO_EVENT_MASK(%rdx, %rdi)
2206#endif
2207	ret
2208	SET_SIZE(dtrace_interrupt_enable)
2209
2210#elif defined(__i386)
2211
2212	ENTRY(dtrace_interrupt_enable)
2213	movl	4(%esp), %eax
2214	pushl	%eax
2215	popfl
2216#if defined(__xpv)
2217	leal	xpv_panicking, %edx
2218	movl	(%edx), %edx
2219	cmpl	$0, %edx
2220	jne	1f
2221	/*
2222	 * Since we're -really- running unprivileged, our attempt
2223	 * to change the state of the IF bit will be ignored. The
2224	 * virtual IF bit is tweaked by CLI and STI.
2225	 */
2226	IE_TO_EVENT_MASK(%edx, %eax)
2227#endif
2228	ret
2229	SET_SIZE(dtrace_interrupt_enable)
2230
2231#endif	/* __i386 */
2232#endif	/* __lint */
2233
2234
2235#if defined(lint)
2236
2237void
2238dtrace_membar_producer(void)
2239{}
2240
2241void
2242dtrace_membar_consumer(void)
2243{}
2244
2245#else	/* __lint */
2246
2247	ENTRY(dtrace_membar_producer)
2248	rep;	ret	/* use 2 byte return instruction when branch target */
2249			/* AMD Software Optimization Guide - Section 6.2 */
2250	SET_SIZE(dtrace_membar_producer)
2251
2252	ENTRY(dtrace_membar_consumer)
2253	rep;	ret	/* use 2 byte return instruction when branch target */
2254			/* AMD Software Optimization Guide - Section 6.2 */
2255	SET_SIZE(dtrace_membar_consumer)
2256
2257#endif	/* __lint */
2258
2259#if defined(__lint)
2260
2261kthread_id_t
2262threadp(void)
2263{ return ((kthread_id_t)0); }
2264
2265#else	/* __lint */
2266
2267#if defined(__amd64)
2268
2269	ENTRY(threadp)
2270	movq	%gs:CPU_THREAD, %rax
2271	ret
2272	SET_SIZE(threadp)
2273
2274#elif defined(__i386)
2275
2276	ENTRY(threadp)
2277	movl	%gs:CPU_THREAD, %eax
2278	ret
2279	SET_SIZE(threadp)
2280
2281#endif	/* __i386 */
2282#endif	/* __lint */
2283
2284/*
2285 *   Checksum routine for Internet Protocol Headers
2286 */
2287
2288#if defined(__lint)
2289
2290/* ARGSUSED */
2291unsigned int
2292ip_ocsum(
2293	ushort_t *address,	/* ptr to 1st message buffer */
2294	int halfword_count,	/* length of data */
2295	unsigned int sum)	/* partial checksum */
2296{
2297	int		i;
2298	unsigned int	psum = 0;	/* partial sum */
2299
2300	for (i = 0; i < halfword_count; i++, address++) {
2301		psum += *address;
2302	}
2303
2304	while ((psum >> 16) != 0) {
2305		psum = (psum & 0xffff) + (psum >> 16);
2306	}
2307
2308	psum += sum;
2309
2310	while ((psum >> 16) != 0) {
2311		psum = (psum & 0xffff) + (psum >> 16);
2312	}
2313
2314	return (psum);
2315}
2316
2317#else	/* __lint */
2318
2319#if defined(__amd64)
2320
2321	ENTRY(ip_ocsum)
2322	pushq	%rbp
2323	movq	%rsp, %rbp
2324#ifdef DEBUG
2325	movq	postbootkernelbase(%rip), %rax
2326	cmpq	%rax, %rdi
2327	jnb	1f
2328	xorl	%eax, %eax
2329	movq	%rdi, %rsi
2330	leaq	.ip_ocsum_panic_msg(%rip), %rdi
2331	call	panic
2332	/*NOTREACHED*/
2333.ip_ocsum_panic_msg:
2334	.string	"ip_ocsum: address 0x%p below kernelbase\n"
23351:
2336#endif
2337	movl	%esi, %ecx	/* halfword_count */
2338	movq	%rdi, %rsi	/* address */
2339				/* partial sum in %edx */
2340	xorl	%eax, %eax
2341	testl	%ecx, %ecx
2342	jz	.ip_ocsum_done
2343	testq	$3, %rsi
2344	jnz	.ip_csum_notaligned
2345.ip_csum_aligned:	/* XX64 opportunities for 8-byte operations? */
2346.next_iter:
2347	/* XX64 opportunities for prefetch? */
2348	/* XX64 compute csum with 64 bit quantities? */
2349	subl	$32, %ecx
2350	jl	.less_than_32
2351
2352	addl	0(%rsi), %edx
2353.only60:
2354	adcl	4(%rsi), %eax
2355.only56:
2356	adcl	8(%rsi), %edx
2357.only52:
2358	adcl	12(%rsi), %eax
2359.only48:
2360	adcl	16(%rsi), %edx
2361.only44:
2362	adcl	20(%rsi), %eax
2363.only40:
2364	adcl	24(%rsi), %edx
2365.only36:
2366	adcl	28(%rsi), %eax
2367.only32:
2368	adcl	32(%rsi), %edx
2369.only28:
2370	adcl	36(%rsi), %eax
2371.only24:
2372	adcl	40(%rsi), %edx
2373.only20:
2374	adcl	44(%rsi), %eax
2375.only16:
2376	adcl	48(%rsi), %edx
2377.only12:
2378	adcl	52(%rsi), %eax
2379.only8:
2380	adcl	56(%rsi), %edx
2381.only4:
2382	adcl	60(%rsi), %eax	/* could be adding -1 and -1 with a carry */
2383.only0:
2384	adcl	$0, %eax	/* could be adding -1 in eax with a carry */
2385	adcl	$0, %eax
2386
2387	addq	$64, %rsi
2388	testl	%ecx, %ecx
2389	jnz	.next_iter
2390
2391.ip_ocsum_done:
2392	addl	%eax, %edx
2393	adcl	$0, %edx
2394	movl	%edx, %eax	/* form a 16 bit checksum by */
2395	shrl	$16, %eax	/* adding two halves of 32 bit checksum */
2396	addw	%dx, %ax
2397	adcw	$0, %ax
2398	andl	$0xffff, %eax
2399	leave
2400	ret
2401
2402.ip_csum_notaligned:
2403	xorl	%edi, %edi
2404	movw	(%rsi), %di
2405	addl	%edi, %edx
2406	adcl	$0, %edx
2407	addq	$2, %rsi
2408	decl	%ecx
2409	jmp	.ip_csum_aligned
2410
2411.less_than_32:
2412	addl	$32, %ecx
2413	testl	$1, %ecx
2414	jz	.size_aligned
2415	andl	$0xfe, %ecx
2416	movzwl	(%rsi, %rcx, 2), %edi
2417	addl	%edi, %edx
2418	adcl	$0, %edx
2419.size_aligned:
2420	movl	%ecx, %edi
2421	shrl	$1, %ecx
2422	shl	$1, %edi
2423	subq	$64, %rdi
2424	addq	%rdi, %rsi
2425	leaq    .ip_ocsum_jmptbl(%rip), %rdi
2426	leaq	(%rdi, %rcx, 8), %rdi
2427	xorl	%ecx, %ecx
2428	clc
2429	jmp 	*(%rdi)
2430
2431	.align	8
2432.ip_ocsum_jmptbl:
2433	.quad	.only0, .only4, .only8, .only12, .only16, .only20
2434	.quad	.only24, .only28, .only32, .only36, .only40, .only44
2435	.quad	.only48, .only52, .only56, .only60
2436	SET_SIZE(ip_ocsum)
2437
2438#elif defined(__i386)
2439
2440	ENTRY(ip_ocsum)
2441	pushl	%ebp
2442	movl	%esp, %ebp
2443	pushl	%ebx
2444	pushl	%esi
2445	pushl	%edi
2446	movl	12(%ebp), %ecx	/* count of half words */
2447	movl	16(%ebp), %edx	/* partial checksum */
2448	movl	8(%ebp), %esi
2449	xorl	%eax, %eax
2450	testl	%ecx, %ecx
2451	jz	.ip_ocsum_done
2452
2453	testl	$3, %esi
2454	jnz	.ip_csum_notaligned
2455.ip_csum_aligned:
2456.next_iter:
2457	subl	$32, %ecx
2458	jl	.less_than_32
2459
2460	addl	0(%esi), %edx
2461.only60:
2462	adcl	4(%esi), %eax
2463.only56:
2464	adcl	8(%esi), %edx
2465.only52:
2466	adcl	12(%esi), %eax
2467.only48:
2468	adcl	16(%esi), %edx
2469.only44:
2470	adcl	20(%esi), %eax
2471.only40:
2472	adcl	24(%esi), %edx
2473.only36:
2474	adcl	28(%esi), %eax
2475.only32:
2476	adcl	32(%esi), %edx
2477.only28:
2478	adcl	36(%esi), %eax
2479.only24:
2480	adcl	40(%esi), %edx
2481.only20:
2482	adcl	44(%esi), %eax
2483.only16:
2484	adcl	48(%esi), %edx
2485.only12:
2486	adcl	52(%esi), %eax
2487.only8:
2488	adcl	56(%esi), %edx
2489.only4:
2490	adcl	60(%esi), %eax	/* We could be adding -1 and -1 with a carry */
2491.only0:
2492	adcl	$0, %eax	/* we could be adding -1 in eax with a carry */
2493	adcl	$0, %eax
2494
2495	addl	$64, %esi
2496	andl	%ecx, %ecx
2497	jnz	.next_iter
2498
2499.ip_ocsum_done:
2500	addl	%eax, %edx
2501	adcl	$0, %edx
2502	movl	%edx, %eax	/* form a 16 bit checksum by */
2503	shrl	$16, %eax	/* adding two halves of 32 bit checksum */
2504	addw	%dx, %ax
2505	adcw	$0, %ax
2506	andl	$0xffff, %eax
2507	popl	%edi		/* restore registers */
2508	popl	%esi
2509	popl	%ebx
2510	leave
2511	ret
2512
2513.ip_csum_notaligned:
2514	xorl	%edi, %edi
2515	movw	(%esi), %di
2516	addl	%edi, %edx
2517	adcl	$0, %edx
2518	addl	$2, %esi
2519	decl	%ecx
2520	jmp	.ip_csum_aligned
2521
2522.less_than_32:
2523	addl	$32, %ecx
2524	testl	$1, %ecx
2525	jz	.size_aligned
2526	andl	$0xfe, %ecx
2527	movzwl	(%esi, %ecx, 2), %edi
2528	addl	%edi, %edx
2529	adcl	$0, %edx
2530.size_aligned:
2531	movl	%ecx, %edi
2532	shrl	$1, %ecx
2533	shl	$1, %edi
2534	subl	$64, %edi
2535	addl	%edi, %esi
2536	movl	$.ip_ocsum_jmptbl, %edi
2537	lea	(%edi, %ecx, 4), %edi
2538	xorl	%ecx, %ecx
2539	clc
2540	jmp 	*(%edi)
2541	SET_SIZE(ip_ocsum)
2542
2543	.data
2544	.align	4
2545
2546.ip_ocsum_jmptbl:
2547	.long	.only0, .only4, .only8, .only12, .only16, .only20
2548	.long	.only24, .only28, .only32, .only36, .only40, .only44
2549	.long	.only48, .only52, .only56, .only60
2550
2551
2552#endif	/* __i386 */
2553#endif	/* __lint */
2554
2555/*
2556 * multiply two long numbers and yield a u_longlong_t result, callable from C.
2557 * Provided to manipulate hrtime_t values.
2558 */
2559#if defined(__lint)
2560
2561/* result = a * b; */
2562
2563/* ARGSUSED */
2564unsigned long long
2565mul32(uint_t a, uint_t b)
2566{ return (0); }
2567
2568#else	/* __lint */
2569
2570#if defined(__amd64)
2571
2572	ENTRY(mul32)
2573	xorl	%edx, %edx	/* XX64 joe, paranoia? */
2574	movl	%edi, %eax
2575	mull	%esi
2576	shlq	$32, %rdx
2577	orq	%rdx, %rax
2578	ret
2579	SET_SIZE(mul32)
2580
2581#elif defined(__i386)
2582
2583	ENTRY(mul32)
2584	movl	8(%esp), %eax
2585	movl	4(%esp), %ecx
2586	mull	%ecx
2587	ret
2588	SET_SIZE(mul32)
2589
2590#endif	/* __i386 */
2591#endif	/* __lint */
2592
2593#if defined(notused)
2594#if defined(__lint)
2595/* ARGSUSED */
2596void
2597load_pte64(uint64_t *pte, uint64_t pte_value)
2598{}
2599#else	/* __lint */
2600	.globl load_pte64
2601load_pte64:
2602	movl	4(%esp), %eax
2603	movl	8(%esp), %ecx
2604	movl	12(%esp), %edx
2605	movl	%edx, 4(%eax)
2606	movl	%ecx, (%eax)
2607	ret
2608#endif	/* __lint */
2609#endif	/* notused */
2610
2611#if defined(__lint)
2612
2613/*ARGSUSED*/
2614void
2615scan_memory(caddr_t addr, size_t size)
2616{}
2617
2618#else	/* __lint */
2619
2620#if defined(__amd64)
2621
2622	ENTRY(scan_memory)
2623	shrq	$3, %rsi	/* convert %rsi from byte to quadword count */
2624	jz	.scanm_done
2625	movq	%rsi, %rcx	/* move count into rep control register */
2626	movq	%rdi, %rsi	/* move addr into lodsq control reg. */
2627	rep lodsq		/* scan the memory range */
2628.scanm_done:
2629	rep;	ret	/* use 2 byte return instruction when branch target */
2630			/* AMD Software Optimization Guide - Section 6.2 */
2631	SET_SIZE(scan_memory)
2632
2633#elif defined(__i386)
2634
2635	ENTRY(scan_memory)
2636	pushl	%ecx
2637	pushl	%esi
2638	movl	16(%esp), %ecx	/* move 2nd arg into rep control register */
2639	shrl	$2, %ecx	/* convert from byte count to word count */
2640	jz	.scanm_done
2641	movl	12(%esp), %esi	/* move 1st arg into lodsw control register */
2642	.byte	0xf3		/* rep prefix.  lame assembler.  sigh. */
2643	lodsl
2644.scanm_done:
2645	popl	%esi
2646	popl	%ecx
2647	ret
2648	SET_SIZE(scan_memory)
2649
2650#endif	/* __i386 */
2651#endif	/* __lint */
2652
2653
2654#if defined(__lint)
2655
2656/*ARGSUSED */
2657int
2658lowbit(ulong_t i)
2659{ return (0); }
2660
2661#else	/* __lint */
2662
2663#if defined(__amd64)
2664
2665	ENTRY(lowbit)
2666	movl	$-1, %eax
2667	bsfq	%rdi, %rax
2668	incl	%eax
2669	ret
2670	SET_SIZE(lowbit)
2671
2672#elif defined(__i386)
2673
2674	ENTRY(lowbit)
2675	movl	$-1, %eax
2676	bsfl	4(%esp), %eax
2677	incl	%eax
2678	ret
2679	SET_SIZE(lowbit)
2680
2681#endif	/* __i386 */
2682#endif	/* __lint */
2683
2684#if defined(__lint)
2685
2686/*ARGSUSED*/
2687int
2688highbit(ulong_t i)
2689{ return (0); }
2690
2691#else	/* __lint */
2692
2693#if defined(__amd64)
2694
2695	ENTRY(highbit)
2696	movl	$-1, %eax
2697	bsrq	%rdi, %rax
2698	incl	%eax
2699	ret
2700	SET_SIZE(highbit)
2701
2702#elif defined(__i386)
2703
2704	ENTRY(highbit)
2705	movl	$-1, %eax
2706	bsrl	4(%esp), %eax
2707	incl	%eax
2708	ret
2709	SET_SIZE(highbit)
2710
2711#endif	/* __i386 */
2712#endif	/* __lint */
2713
2714#if defined(__lint)
2715
2716/*ARGSUSED*/
2717uint64_t
2718rdmsr(uint_t r)
2719{ return (0); }
2720
2721/*ARGSUSED*/
2722void
2723wrmsr(uint_t r, const uint64_t val)
2724{}
2725
2726/*ARGSUSED*/
2727uint64_t
2728xrdmsr(uint_t r)
2729{ return (0); }
2730
2731/*ARGSUSED*/
2732void
2733xwrmsr(uint_t r, const uint64_t val)
2734{}
2735
2736void
2737invalidate_cache(void)
2738{}
2739
2740#else  /* __lint */
2741
2742#define	XMSR_ACCESS_VAL		$0x9c5a203a
2743
2744#if defined(__amd64)
2745
2746	ENTRY(rdmsr)
2747	movl	%edi, %ecx
2748	rdmsr
2749	shlq	$32, %rdx
2750	orq	%rdx, %rax
2751	ret
2752	SET_SIZE(rdmsr)
2753
2754	ENTRY(wrmsr)
2755	movq	%rsi, %rdx
2756	shrq	$32, %rdx
2757	movl	%esi, %eax
2758	movl	%edi, %ecx
2759	wrmsr
2760	ret
2761	SET_SIZE(wrmsr)
2762
2763	ENTRY(xrdmsr)
2764	pushq	%rbp
2765	movq	%rsp, %rbp
2766	movl	%edi, %ecx
2767	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2768	rdmsr
2769	shlq	$32, %rdx
2770	orq	%rdx, %rax
2771	leave
2772	ret
2773	SET_SIZE(xrdmsr)
2774
2775	ENTRY(xwrmsr)
2776	pushq	%rbp
2777	movq	%rsp, %rbp
2778	movl	%edi, %ecx
2779	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2780	movq	%rsi, %rdx
2781	shrq	$32, %rdx
2782	movl	%esi, %eax
2783	wrmsr
2784	leave
2785	ret
2786	SET_SIZE(xwrmsr)
2787
2788#elif defined(__i386)
2789
2790	ENTRY(rdmsr)
2791	movl	4(%esp), %ecx
2792	rdmsr
2793	ret
2794	SET_SIZE(rdmsr)
2795
2796	ENTRY(wrmsr)
2797	movl	4(%esp), %ecx
2798	movl	8(%esp), %eax
2799	movl	12(%esp), %edx
2800	wrmsr
2801	ret
2802	SET_SIZE(wrmsr)
2803
2804	ENTRY(xrdmsr)
2805	pushl	%ebp
2806	movl	%esp, %ebp
2807	movl	8(%esp), %ecx
2808	pushl	%edi
2809	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2810	rdmsr
2811	popl	%edi
2812	leave
2813	ret
2814	SET_SIZE(xrdmsr)
2815
2816	ENTRY(xwrmsr)
2817	pushl	%ebp
2818	movl	%esp, %ebp
2819	movl	8(%esp), %ecx
2820	movl	12(%esp), %eax
2821	movl	16(%esp), %edx
2822	pushl	%edi
2823	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2824	wrmsr
2825	popl	%edi
2826	leave
2827	ret
2828	SET_SIZE(xwrmsr)
2829
2830#endif	/* __i386 */
2831
2832	ENTRY(invalidate_cache)
2833	wbinvd
2834	ret
2835	SET_SIZE(invalidate_cache)
2836
2837#endif	/* __lint */
2838
2839#if defined(__lint)
2840
2841/*ARGSUSED*/
2842void
2843getcregs(struct cregs *crp)
2844{}
2845
2846#else	/* __lint */
2847
2848#if defined(__amd64)
2849
2850	ENTRY_NP(getcregs)
2851#if defined(__xpv)
2852	/*
2853	 * Only a few of the hardware control registers or descriptor tables
2854	 * are directly accessible to us, so just zero the structure.
2855	 *
2856	 * XXPV	Perhaps it would be helpful for the hypervisor to return
2857	 *	virtualized versions of these for post-mortem use.
2858	 *	(Need to reevaluate - perhaps it already does!)
2859	 */
2860	pushq	%rdi		/* save *crp */
2861	movq	$CREGSZ, %rsi
2862	call	bzero
2863	popq	%rdi
2864
2865	/*
2866	 * Dump what limited information we can
2867	 */
2868	movq	%cr0, %rax
2869	movq	%rax, CREG_CR0(%rdi)	/* cr0 */
2870	movq	%cr2, %rax
2871	movq	%rax, CREG_CR2(%rdi)	/* cr2 */
2872	movq	%cr3, %rax
2873	movq	%rax, CREG_CR3(%rdi)	/* cr3 */
2874	movq	%cr4, %rax
2875	movq	%rax, CREG_CR4(%rdi)	/* cr4 */
2876
2877#else	/* __xpv */
2878
2879#define	GETMSR(r, off, d)	\
2880	movl	$r, %ecx;	\
2881	rdmsr;			\
2882	movl	%eax, off(d);	\
2883	movl	%edx, off+4(d)
2884
2885	xorl	%eax, %eax
2886	movq	%rax, CREG_GDT+8(%rdi)
2887	sgdt	CREG_GDT(%rdi)		/* 10 bytes */
2888	movq	%rax, CREG_IDT+8(%rdi)
2889	sidt	CREG_IDT(%rdi)		/* 10 bytes */
2890	movq	%rax, CREG_LDT(%rdi)
2891	sldt	CREG_LDT(%rdi)		/* 2 bytes */
2892	movq	%rax, CREG_TASKR(%rdi)
2893	str	CREG_TASKR(%rdi)	/* 2 bytes */
2894	movq	%cr0, %rax
2895	movq	%rax, CREG_CR0(%rdi)	/* cr0 */
2896	movq	%cr2, %rax
2897	movq	%rax, CREG_CR2(%rdi)	/* cr2 */
2898	movq	%cr3, %rax
2899	movq	%rax, CREG_CR3(%rdi)	/* cr3 */
2900	movq	%cr4, %rax
2901	movq	%rax, CREG_CR4(%rdi)	/* cr4 */
2902	movq	%cr8, %rax
2903	movq	%rax, CREG_CR8(%rdi)	/* cr8 */
2904	GETMSR(MSR_AMD_KGSBASE, CREG_KGSBASE, %rdi)
2905	GETMSR(MSR_AMD_EFER, CREG_EFER, %rdi)
2906#endif	/* __xpv */
2907	ret
2908	SET_SIZE(getcregs)
2909
2910#undef GETMSR
2911
2912#elif defined(__i386)
2913
2914	ENTRY_NP(getcregs)
2915#if defined(__xpv)
2916	/*
2917	 * Only a few of the hardware control registers or descriptor tables
2918	 * are directly accessible to us, so just zero the structure.
2919	 *
2920	 * XXPV	Perhaps it would be helpful for the hypervisor to return
2921	 *	virtualized versions of these for post-mortem use.
2922	 *	(Need to reevaluate - perhaps it already does!)
2923	 */
2924	movl	4(%esp), %edx
2925	pushl	$CREGSZ
2926	pushl	%edx
2927	call	bzero
2928	addl	$8, %esp
2929	movl	4(%esp), %edx
2930
2931	/*
2932	 * Dump what limited information we can
2933	 */
2934	movl	%cr0, %eax
2935	movl	%eax, CREG_CR0(%edx)	/* cr0 */
2936	movl	%cr2, %eax
2937	movl	%eax, CREG_CR2(%edx)	/* cr2 */
2938	movl	%cr3, %eax
2939	movl	%eax, CREG_CR3(%edx)	/* cr3 */
2940	movl	%cr4, %eax
2941	movl	%eax, CREG_CR4(%edx)	/* cr4 */
2942
2943#else	/* __xpv */
2944
2945	movl	4(%esp), %edx
2946	movw	$0, CREG_GDT+6(%edx)
2947	movw	$0, CREG_IDT+6(%edx)
2948	sgdt	CREG_GDT(%edx)		/* gdt */
2949	sidt	CREG_IDT(%edx)		/* idt */
2950	sldt	CREG_LDT(%edx)		/* ldt */
2951	str	CREG_TASKR(%edx)	/* task */
2952	movl	%cr0, %eax
2953	movl	%eax, CREG_CR0(%edx)	/* cr0 */
2954	movl	%cr2, %eax
2955	movl	%eax, CREG_CR2(%edx)	/* cr2 */
2956	movl	%cr3, %eax
2957	movl	%eax, CREG_CR3(%edx)	/* cr3 */
2958	testl	$X86_LARGEPAGE, x86_feature
2959	jz	.nocr4
2960	movl	%cr4, %eax
2961	movl	%eax, CREG_CR4(%edx)	/* cr4 */
2962	jmp	.skip
2963.nocr4:
2964	movl	$0, CREG_CR4(%edx)
2965.skip:
2966#endif
2967	ret
2968	SET_SIZE(getcregs)
2969
2970#endif	/* __i386 */
2971#endif	/* __lint */
2972
2973
2974/*
2975 * A panic trigger is a word which is updated atomically and can only be set
2976 * once.  We atomically store 0xDEFACEDD and load the old value.  If the
2977 * previous value was 0, we succeed and return 1; otherwise return 0.
2978 * This allows a partially corrupt trigger to still trigger correctly.  DTrace
2979 * has its own version of this function to allow it to panic correctly from
2980 * probe context.
2981 */
2982#if defined(__lint)
2983
2984/*ARGSUSED*/
2985int
2986panic_trigger(int *tp)
2987{ return (0); }
2988
2989/*ARGSUSED*/
2990int
2991dtrace_panic_trigger(int *tp)
2992{ return (0); }
2993
2994#else	/* __lint */
2995
2996#if defined(__amd64)
2997
2998	ENTRY_NP(panic_trigger)
2999	xorl	%eax, %eax
3000	movl	$0xdefacedd, %edx
3001	lock
3002	  xchgl	%edx, (%rdi)
3003	cmpl	$0, %edx
3004	je	0f
3005	movl	$0, %eax
3006	ret
30070:	movl	$1, %eax
3008	ret
3009	SET_SIZE(panic_trigger)
3010
3011	ENTRY_NP(dtrace_panic_trigger)
3012	xorl	%eax, %eax
3013	movl	$0xdefacedd, %edx
3014	lock
3015	  xchgl	%edx, (%rdi)
3016	cmpl	$0, %edx
3017	je	0f
3018	movl	$0, %eax
3019	ret
30200:	movl	$1, %eax
3021	ret
3022	SET_SIZE(dtrace_panic_trigger)
3023
3024#elif defined(__i386)
3025
3026	ENTRY_NP(panic_trigger)
3027	movl	4(%esp), %edx		/ %edx = address of trigger
3028	movl	$0xdefacedd, %eax	/ %eax = 0xdefacedd
3029	lock				/ assert lock
3030	xchgl %eax, (%edx)		/ exchange %eax and the trigger
3031	cmpl	$0, %eax		/ if (%eax == 0x0)
3032	je	0f			/   return (1);
3033	movl	$0, %eax		/ else
3034	ret				/   return (0);
30350:	movl	$1, %eax
3036	ret
3037	SET_SIZE(panic_trigger)
3038
3039	ENTRY_NP(dtrace_panic_trigger)
3040	movl	4(%esp), %edx		/ %edx = address of trigger
3041	movl	$0xdefacedd, %eax	/ %eax = 0xdefacedd
3042	lock				/ assert lock
3043	xchgl %eax, (%edx)		/ exchange %eax and the trigger
3044	cmpl	$0, %eax		/ if (%eax == 0x0)
3045	je	0f			/   return (1);
3046	movl	$0, %eax		/ else
3047	ret				/   return (0);
30480:	movl	$1, %eax
3049	ret
3050	SET_SIZE(dtrace_panic_trigger)
3051
3052#endif	/* __i386 */
3053#endif	/* __lint */
3054
3055/*
3056 * The panic() and cmn_err() functions invoke vpanic() as a common entry point
3057 * into the panic code implemented in panicsys().  vpanic() is responsible
3058 * for passing through the format string and arguments, and constructing a
3059 * regs structure on the stack into which it saves the current register
3060 * values.  If we are not dying due to a fatal trap, these registers will
3061 * then be preserved in panicbuf as the current processor state.  Before
3062 * invoking panicsys(), vpanic() activates the first panic trigger (see
3063 * common/os/panic.c) and switches to the panic_stack if successful.  Note that
3064 * DTrace takes a slightly different panic path if it must panic from probe
3065 * context.  Instead of calling panic, it calls into dtrace_vpanic(), which
3066 * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
3067 * branches back into vpanic().
3068 */
3069#if defined(__lint)
3070
3071/*ARGSUSED*/
3072void
3073vpanic(const char *format, va_list alist)
3074{}
3075
3076/*ARGSUSED*/
3077void
3078dtrace_vpanic(const char *format, va_list alist)
3079{}
3080
3081#else	/* __lint */
3082
3083#if defined(__amd64)
3084
3085	ENTRY_NP(vpanic)			/* Initial stack layout: */
3086
3087	pushq	%rbp				/* | %rip | 	0x60	*/
3088	movq	%rsp, %rbp			/* | %rbp |	0x58	*/
3089	pushfq					/* | rfl  |	0x50	*/
3090	pushq	%r11				/* | %r11 |	0x48	*/
3091	pushq	%r10				/* | %r10 |	0x40	*/
3092	pushq	%rbx				/* | %rbx |	0x38	*/
3093	pushq	%rax				/* | %rax |	0x30	*/
3094	pushq	%r9				/* | %r9  |	0x28	*/
3095	pushq	%r8				/* | %r8  |	0x20	*/
3096	pushq	%rcx				/* | %rcx |	0x18	*/
3097	pushq	%rdx				/* | %rdx |	0x10	*/
3098	pushq	%rsi				/* | %rsi |	0x8 alist */
3099	pushq	%rdi				/* | %rdi |	0x0 format */
3100
3101	movq	%rsp, %rbx			/* %rbx = current %rsp */
3102
3103	leaq	panic_quiesce(%rip), %rdi	/* %rdi = &panic_quiesce */
3104	call	panic_trigger			/* %eax = panic_trigger() */
3105
3106vpanic_common:
3107	/*
3108	 * The panic_trigger result is in %eax from the call above, and
3109	 * dtrace_panic places it in %eax before branching here.
3110	 * The rdmsr instructions that follow below will clobber %eax so
3111	 * we stash the panic_trigger result in %r11d.
3112	 */
3113	movl	%eax, %r11d
3114	cmpl	$0, %r11d
3115	je	0f
3116
3117	/*
3118	 * If panic_trigger() was successful, we are the first to initiate a
3119	 * panic: we now switch to the reserved panic_stack before continuing.
3120	 */
3121	leaq	panic_stack(%rip), %rsp
3122	addq	$PANICSTKSIZE, %rsp
31230:	subq	$REGSIZE, %rsp
3124	/*
3125	 * Now that we've got everything set up, store the register values as
3126	 * they were when we entered vpanic() to the designated location in
3127	 * the regs structure we allocated on the stack.
3128	 */
3129	movq	0x0(%rbx), %rcx
3130	movq	%rcx, REGOFF_RDI(%rsp)
3131	movq	0x8(%rbx), %rcx
3132	movq	%rcx, REGOFF_RSI(%rsp)
3133	movq	0x10(%rbx), %rcx
3134	movq	%rcx, REGOFF_RDX(%rsp)
3135	movq	0x18(%rbx), %rcx
3136	movq	%rcx, REGOFF_RCX(%rsp)
3137	movq	0x20(%rbx), %rcx
3138
3139	movq	%rcx, REGOFF_R8(%rsp)
3140	movq	0x28(%rbx), %rcx
3141	movq	%rcx, REGOFF_R9(%rsp)
3142	movq	0x30(%rbx), %rcx
3143	movq	%rcx, REGOFF_RAX(%rsp)
3144	movq	0x38(%rbx), %rcx
3145	movq	%rcx, REGOFF_RBX(%rsp)
3146	movq	0x58(%rbx), %rcx
3147
3148	movq	%rcx, REGOFF_RBP(%rsp)
3149	movq	0x40(%rbx), %rcx
3150	movq	%rcx, REGOFF_R10(%rsp)
3151	movq	0x48(%rbx), %rcx
3152	movq	%rcx, REGOFF_R11(%rsp)
3153	movq	%r12, REGOFF_R12(%rsp)
3154
3155	movq	%r13, REGOFF_R13(%rsp)
3156	movq	%r14, REGOFF_R14(%rsp)
3157	movq	%r15, REGOFF_R15(%rsp)
3158
3159	xorl	%ecx, %ecx
3160	movw	%ds, %cx
3161	movq	%rcx, REGOFF_DS(%rsp)
3162	movw	%es, %cx
3163	movq	%rcx, REGOFF_ES(%rsp)
3164	movw	%fs, %cx
3165	movq	%rcx, REGOFF_FS(%rsp)
3166	movw	%gs, %cx
3167	movq	%rcx, REGOFF_GS(%rsp)
3168
3169	movq	$0, REGOFF_TRAPNO(%rsp)
3170
3171	movq	$0, REGOFF_ERR(%rsp)
3172	leaq	vpanic(%rip), %rcx
3173	movq	%rcx, REGOFF_RIP(%rsp)
3174	movw	%cs, %cx
3175	movzwq	%cx, %rcx
3176	movq	%rcx, REGOFF_CS(%rsp)
3177	movq	0x50(%rbx), %rcx
3178	movq	%rcx, REGOFF_RFL(%rsp)
3179	movq	%rbx, %rcx
3180	addq	$0x60, %rcx
3181	movq	%rcx, REGOFF_RSP(%rsp)
3182	movw	%ss, %cx
3183	movzwq	%cx, %rcx
3184	movq	%rcx, REGOFF_SS(%rsp)
3185
3186	/*
3187	 * panicsys(format, alist, rp, on_panic_stack)
3188	 */
3189	movq	REGOFF_RDI(%rsp), %rdi		/* format */
3190	movq	REGOFF_RSI(%rsp), %rsi		/* alist */
3191	movq	%rsp, %rdx			/* struct regs */
3192	movl	%r11d, %ecx			/* on_panic_stack */
3193	call	panicsys
3194	addq	$REGSIZE, %rsp
3195	popq	%rdi
3196	popq	%rsi
3197	popq	%rdx
3198	popq	%rcx
3199	popq	%r8
3200	popq	%r9
3201	popq	%rax
3202	popq	%rbx
3203	popq	%r10
3204	popq	%r11
3205	popfq
3206	leave
3207	ret
3208	SET_SIZE(vpanic)
3209
3210	ENTRY_NP(dtrace_vpanic)			/* Initial stack layout: */
3211
3212	pushq	%rbp				/* | %rip | 	0x60	*/
3213	movq	%rsp, %rbp			/* | %rbp |	0x58	*/
3214	pushfq					/* | rfl  |	0x50	*/
3215	pushq	%r11				/* | %r11 |	0x48	*/
3216	pushq	%r10				/* | %r10 |	0x40	*/
3217	pushq	%rbx				/* | %rbx |	0x38	*/
3218	pushq	%rax				/* | %rax |	0x30	*/
3219	pushq	%r9				/* | %r9  |	0x28	*/
3220	pushq	%r8				/* | %r8  |	0x20	*/
3221	pushq	%rcx				/* | %rcx |	0x18	*/
3222	pushq	%rdx				/* | %rdx |	0x10	*/
3223	pushq	%rsi				/* | %rsi |	0x8 alist */
3224	pushq	%rdi				/* | %rdi |	0x0 format */
3225
3226	movq	%rsp, %rbx			/* %rbx = current %rsp */
3227
3228	leaq	panic_quiesce(%rip), %rdi	/* %rdi = &panic_quiesce */
3229	call	dtrace_panic_trigger	/* %eax = dtrace_panic_trigger() */
3230	jmp	vpanic_common
3231
3232	SET_SIZE(dtrace_vpanic)
3233
3234#elif defined(__i386)
3235
3236	ENTRY_NP(vpanic)			/ Initial stack layout:
3237
3238	pushl	%ebp				/ | %eip | 20
3239	movl	%esp, %ebp			/ | %ebp | 16
3240	pushl	%eax				/ | %eax | 12
3241	pushl	%ebx				/ | %ebx |  8
3242	pushl	%ecx				/ | %ecx |  4
3243	pushl	%edx				/ | %edx |  0
3244
3245	movl	%esp, %ebx			/ %ebx = current stack pointer
3246
3247	lea	panic_quiesce, %eax		/ %eax = &panic_quiesce
3248	pushl	%eax				/ push &panic_quiesce
3249	call	panic_trigger			/ %eax = panic_trigger()
3250	addl	$4, %esp			/ reset stack pointer
3251
3252vpanic_common:
3253	cmpl	$0, %eax			/ if (%eax == 0)
3254	je	0f				/   goto 0f;
3255
3256	/*
3257	 * If panic_trigger() was successful, we are the first to initiate a
3258	 * panic: we now switch to the reserved panic_stack before continuing.
3259	 */
3260	lea	panic_stack, %esp		/ %esp  = panic_stack
3261	addl	$PANICSTKSIZE, %esp		/ %esp += PANICSTKSIZE
3262
32630:	subl	$REGSIZE, %esp			/ allocate struct regs
3264
3265	/*
3266	 * Now that we've got everything set up, store the register values as
3267	 * they were when we entered vpanic() to the designated location in
3268	 * the regs structure we allocated on the stack.
3269	 */
3270#if !defined(__GNUC_AS__)
3271	movw	%gs, %edx
3272	movl	%edx, REGOFF_GS(%esp)
3273	movw	%fs, %edx
3274	movl	%edx, REGOFF_FS(%esp)
3275	movw	%es, %edx
3276	movl	%edx, REGOFF_ES(%esp)
3277	movw	%ds, %edx
3278	movl	%edx, REGOFF_DS(%esp)
3279#else	/* __GNUC_AS__ */
3280	mov	%gs, %edx
3281	mov	%edx, REGOFF_GS(%esp)
3282	mov	%fs, %edx
3283	mov	%edx, REGOFF_FS(%esp)
3284	mov	%es, %edx
3285	mov	%edx, REGOFF_ES(%esp)
3286	mov	%ds, %edx
3287	mov	%edx, REGOFF_DS(%esp)
3288#endif	/* __GNUC_AS__ */
3289	movl	%edi, REGOFF_EDI(%esp)
3290	movl	%esi, REGOFF_ESI(%esp)
3291	movl	16(%ebx), %ecx
3292	movl	%ecx, REGOFF_EBP(%esp)
3293	movl	%ebx, %ecx
3294	addl	$20, %ecx
3295	movl	%ecx, REGOFF_ESP(%esp)
3296	movl	8(%ebx), %ecx
3297	movl	%ecx, REGOFF_EBX(%esp)
3298	movl	0(%ebx), %ecx
3299	movl	%ecx, REGOFF_EDX(%esp)
3300	movl	4(%ebx), %ecx
3301	movl	%ecx, REGOFF_ECX(%esp)
3302	movl	12(%ebx), %ecx
3303	movl	%ecx, REGOFF_EAX(%esp)
3304	movl	$0, REGOFF_TRAPNO(%esp)
3305	movl	$0, REGOFF_ERR(%esp)
3306	lea	vpanic, %ecx
3307	movl	%ecx, REGOFF_EIP(%esp)
3308#if !defined(__GNUC_AS__)
3309	movw	%cs, %edx
3310#else	/* __GNUC_AS__ */
3311	mov	%cs, %edx
3312#endif	/* __GNUC_AS__ */
3313	movl	%edx, REGOFF_CS(%esp)
3314	pushfl
3315	popl	%ecx
3316#if defined(__xpv)
3317	/*
3318	 * Synthesize the PS_IE bit from the event mask bit
3319	 */
3320	CURTHREAD(%edx)
3321	KPREEMPT_DISABLE(%edx)
3322	EVENT_MASK_TO_IE(%edx, %ecx)
3323	CURTHREAD(%edx)
3324	KPREEMPT_ENABLE_NOKP(%edx)
3325#endif
3326	movl	%ecx, REGOFF_EFL(%esp)
3327	movl	$0, REGOFF_UESP(%esp)
3328#if !defined(__GNUC_AS__)
3329	movw	%ss, %edx
3330#else	/* __GNUC_AS__ */
3331	mov	%ss, %edx
3332#endif	/* __GNUC_AS__ */
3333	movl	%edx, REGOFF_SS(%esp)
3334
3335	movl	%esp, %ecx			/ %ecx = &regs
3336	pushl	%eax				/ push on_panic_stack
3337	pushl	%ecx				/ push &regs
3338	movl	12(%ebp), %ecx			/ %ecx = alist
3339	pushl	%ecx				/ push alist
3340	movl	8(%ebp), %ecx			/ %ecx = format
3341	pushl	%ecx				/ push format
3342	call	panicsys			/ panicsys();
3343	addl	$16, %esp			/ pop arguments
3344
3345	addl	$REGSIZE, %esp
3346	popl	%edx
3347	popl	%ecx
3348	popl	%ebx
3349	popl	%eax
3350	leave
3351	ret
3352	SET_SIZE(vpanic)
3353
3354	ENTRY_NP(dtrace_vpanic)			/ Initial stack layout:
3355
3356	pushl	%ebp				/ | %eip | 20
3357	movl	%esp, %ebp			/ | %ebp | 16
3358	pushl	%eax				/ | %eax | 12
3359	pushl	%ebx				/ | %ebx |  8
3360	pushl	%ecx				/ | %ecx |  4
3361	pushl	%edx				/ | %edx |  0
3362
3363	movl	%esp, %ebx			/ %ebx = current stack pointer
3364
3365	lea	panic_quiesce, %eax		/ %eax = &panic_quiesce
3366	pushl	%eax				/ push &panic_quiesce
3367	call	dtrace_panic_trigger		/ %eax = dtrace_panic_trigger()
3368	addl	$4, %esp			/ reset stack pointer
3369	jmp	vpanic_common			/ jump back to common code
3370
3371	SET_SIZE(dtrace_vpanic)
3372
3373#endif	/* __i386 */
3374#endif	/* __lint */
3375
3376#if defined(__lint)
3377
3378void
3379hres_tick(void)
3380{}
3381
3382int64_t timedelta;
3383hrtime_t hres_last_tick;
3384volatile timestruc_t hrestime;
3385int64_t hrestime_adj;
3386volatile int hres_lock;
3387hrtime_t hrtime_base;
3388
3389#else	/* __lint */
3390
3391	DGDEF3(hrestime, _MUL(2, CLONGSIZE), 8)
3392	.NWORD	0, 0
3393
3394	DGDEF3(hrestime_adj, 8, 8)
3395	.long	0, 0
3396
3397	DGDEF3(hres_last_tick, 8, 8)
3398	.long	0, 0
3399
3400	DGDEF3(timedelta, 8, 8)
3401	.long	0, 0
3402
3403	DGDEF3(hres_lock, 4, 8)
3404	.long	0
3405
3406	/*
3407	 * initialized to a non zero value to make pc_gethrtime()
3408	 * work correctly even before clock is initialized
3409	 */
3410	DGDEF3(hrtime_base, 8, 8)
3411	.long	_MUL(NSEC_PER_CLOCK_TICK, 6), 0
3412
3413	DGDEF3(adj_shift, 4, 4)
3414	.long	ADJ_SHIFT
3415
3416#if defined(__amd64)
3417
3418	ENTRY_NP(hres_tick)
3419	pushq	%rbp
3420	movq	%rsp, %rbp
3421
3422	/*
3423	 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3424	 * hres_last_tick can only be modified while holding CLOCK_LOCK).
3425	 * At worst, performing this now instead of under CLOCK_LOCK may
3426	 * introduce some jitter in pc_gethrestime().
3427	 */
3428	call	*gethrtimef(%rip)
3429	movq	%rax, %r8
3430
3431	leaq	hres_lock(%rip), %rax
3432	movb	$-1, %dl
3433.CL1:
3434	xchgb	%dl, (%rax)
3435	testb	%dl, %dl
3436	jz	.CL3			/* got it */
3437.CL2:
3438	cmpb	$0, (%rax)		/* possible to get lock? */
3439	pause
3440	jne	.CL2
3441	jmp	.CL1			/* yes, try again */
3442.CL3:
3443	/*
3444	 * compute the interval since last time hres_tick was called
3445	 * and adjust hrtime_base and hrestime accordingly
3446	 * hrtime_base is an 8 byte value (in nsec), hrestime is
3447	 * a timestruc_t (sec, nsec)
3448	 */
3449	leaq	hres_last_tick(%rip), %rax
3450	movq	%r8, %r11
3451	subq	(%rax), %r8
3452	addq	%r8, hrtime_base(%rip)	/* add interval to hrtime_base */
3453	addq	%r8, hrestime+8(%rip)	/* add interval to hrestime.tv_nsec */
3454	/*
3455	 * Now that we have CLOCK_LOCK, we can update hres_last_tick
3456	 */
3457	movq	%r11, (%rax)
3458
3459	call	__adj_hrestime
3460
3461	/*
3462	 * release the hres_lock
3463	 */
3464	incl	hres_lock(%rip)
3465	leave
3466	ret
3467	SET_SIZE(hres_tick)
3468
3469#elif defined(__i386)
3470
3471	ENTRY_NP(hres_tick)
3472	pushl	%ebp
3473	movl	%esp, %ebp
3474	pushl	%esi
3475	pushl	%ebx
3476
3477	/*
3478	 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3479	 * hres_last_tick can only be modified while holding CLOCK_LOCK).
3480	 * At worst, performing this now instead of under CLOCK_LOCK may
3481	 * introduce some jitter in pc_gethrestime().
3482	 */
3483	call	*gethrtimef
3484	movl	%eax, %ebx
3485	movl	%edx, %esi
3486
3487	movl	$hres_lock, %eax
3488	movl	$-1, %edx
3489.CL1:
3490	xchgb	%dl, (%eax)
3491	testb	%dl, %dl
3492	jz	.CL3			/ got it
3493.CL2:
3494	cmpb	$0, (%eax)		/ possible to get lock?
3495	pause
3496	jne	.CL2
3497	jmp	.CL1			/ yes, try again
3498.CL3:
3499	/*
3500	 * compute the interval since last time hres_tick was called
3501	 * and adjust hrtime_base and hrestime accordingly
3502	 * hrtime_base is an 8 byte value (in nsec), hrestime is
3503	 * timestruc_t (sec, nsec)
3504	 */
3505
3506	lea	hres_last_tick, %eax
3507
3508	movl	%ebx, %edx
3509	movl	%esi, %ecx
3510
3511	subl 	(%eax), %edx
3512	sbbl 	4(%eax), %ecx
3513
3514	addl	%edx, hrtime_base	/ add interval to hrtime_base
3515	adcl	%ecx, hrtime_base+4
3516
3517	addl 	%edx, hrestime+4	/ add interval to hrestime.tv_nsec
3518
3519	/
3520	/ Now that we have CLOCK_LOCK, we can update hres_last_tick.
3521	/
3522	movl	%ebx, (%eax)
3523	movl	%esi,  4(%eax)
3524
3525	/ get hrestime at this moment. used as base for pc_gethrestime
3526	/
3527	/ Apply adjustment, if any
3528	/
3529	/ #define HRES_ADJ	(NSEC_PER_CLOCK_TICK >> ADJ_SHIFT)
3530	/ (max_hres_adj)
3531	/
3532	/ void
3533	/ adj_hrestime()
3534	/ {
3535	/	long long adj;
3536	/
3537	/	if (hrestime_adj == 0)
3538	/		adj = 0;
3539	/	else if (hrestime_adj > 0) {
3540	/		if (hrestime_adj < HRES_ADJ)
3541	/			adj = hrestime_adj;
3542	/		else
3543	/			adj = HRES_ADJ;
3544	/	}
3545	/	else {
3546	/		if (hrestime_adj < -(HRES_ADJ))
3547	/			adj = -(HRES_ADJ);
3548	/		else
3549	/			adj = hrestime_adj;
3550	/	}
3551	/
3552	/	timedelta -= adj;
3553	/	hrestime_adj = timedelta;
3554	/	hrestime.tv_nsec += adj;
3555	/
3556	/	while (hrestime.tv_nsec >= NANOSEC) {
3557	/		one_sec++;
3558	/		hrestime.tv_sec++;
3559	/		hrestime.tv_nsec -= NANOSEC;
3560	/	}
3561	/ }
3562__adj_hrestime:
3563	movl	hrestime_adj, %esi	/ if (hrestime_adj == 0)
3564	movl	hrestime_adj+4, %edx
3565	andl	%esi, %esi
3566	jne	.CL4			/ no
3567	andl	%edx, %edx
3568	jne	.CL4			/ no
3569	subl	%ecx, %ecx		/ yes, adj = 0;
3570	subl	%edx, %edx
3571	jmp	.CL5
3572.CL4:
3573	subl	%ecx, %ecx
3574	subl	%eax, %eax
3575	subl	%esi, %ecx
3576	sbbl	%edx, %eax
3577	andl	%eax, %eax		/ if (hrestime_adj > 0)
3578	jge	.CL6
3579
3580	/ In the following comments, HRES_ADJ is used, while in the code
3581	/ max_hres_adj is used.
3582	/
3583	/ The test for "hrestime_adj < HRES_ADJ" is complicated because
3584	/ hrestime_adj is 64-bits, while HRES_ADJ is 32-bits.  We rely
3585	/ on the logical equivalence of:
3586	/
3587	/	!(hrestime_adj < HRES_ADJ)
3588	/
3589	/ and the two step sequence:
3590	/
3591	/	(HRES_ADJ - lsw(hrestime_adj)) generates a Borrow/Carry
3592	/
3593	/ which computes whether or not the least significant 32-bits
3594	/ of hrestime_adj is greater than HRES_ADJ, followed by:
3595	/
3596	/	Previous Borrow/Carry + -1 + msw(hrestime_adj) generates a Carry
3597	/
3598	/ which generates a carry whenever step 1 is true or the most
3599	/ significant long of the longlong hrestime_adj is non-zero.
3600
3601	movl	max_hres_adj, %ecx	/ hrestime_adj is positive
3602	subl	%esi, %ecx
3603	movl	%edx, %eax
3604	adcl	$-1, %eax
3605	jnc	.CL7
3606	movl	max_hres_adj, %ecx	/ adj = HRES_ADJ;
3607	subl	%edx, %edx
3608	jmp	.CL5
3609
3610	/ The following computation is similar to the one above.
3611	/
3612	/ The test for "hrestime_adj < -(HRES_ADJ)" is complicated because
3613	/ hrestime_adj is 64-bits, while HRES_ADJ is 32-bits.  We rely
3614	/ on the logical equivalence of:
3615	/
3616	/	(hrestime_adj > -HRES_ADJ)
3617	/
3618	/ and the two step sequence:
3619	/
3620	/	(HRES_ADJ + lsw(hrestime_adj)) generates a Carry
3621	/
3622	/ which means the least significant 32-bits of hrestime_adj is
3623	/ greater than -HRES_ADJ, followed by:
3624	/
3625	/	Previous Carry + 0 + msw(hrestime_adj) generates a Carry
3626	/
3627	/ which generates a carry only when step 1 is true and the most
3628	/ significant long of the longlong hrestime_adj is -1.
3629
3630.CL6:					/ hrestime_adj is negative
3631	movl	%esi, %ecx
3632	addl	max_hres_adj, %ecx
3633	movl	%edx, %eax
3634	adcl	$0, %eax
3635	jc	.CL7
3636	xor	%ecx, %ecx
3637	subl	max_hres_adj, %ecx	/ adj = -(HRES_ADJ);
3638	movl	$-1, %edx
3639	jmp	.CL5
3640.CL7:
3641	movl	%esi, %ecx		/ adj = hrestime_adj;
3642.CL5:
3643	movl	timedelta, %esi
3644	subl	%ecx, %esi
3645	movl	timedelta+4, %eax
3646	sbbl	%edx, %eax
3647	movl	%esi, timedelta
3648	movl	%eax, timedelta+4	/ timedelta -= adj;
3649	movl	%esi, hrestime_adj
3650	movl	%eax, hrestime_adj+4	/ hrestime_adj = timedelta;
3651	addl	hrestime+4, %ecx
3652
3653	movl	%ecx, %eax		/ eax = tv_nsec
36541:
3655	cmpl	$NANOSEC, %eax		/ if ((unsigned long)tv_nsec >= NANOSEC)
3656	jb	.CL8			/ no
3657	incl	one_sec			/ yes,  one_sec++;
3658	incl	hrestime		/ hrestime.tv_sec++;
3659	addl	$-NANOSEC, %eax		/ tv_nsec -= NANOSEC
3660	jmp	1b			/ check for more seconds
3661
3662.CL8:
3663	movl	%eax, hrestime+4	/ store final into hrestime.tv_nsec
3664	incl	hres_lock		/ release the hres_lock
3665
3666	popl	%ebx
3667	popl	%esi
3668	leave
3669	ret
3670	SET_SIZE(hres_tick)
3671
3672#endif	/* __i386 */
3673#endif	/* __lint */
3674
3675/*
3676 * void prefetch_smap_w(void *)
3677 *
3678 * Prefetch ahead within a linear list of smap structures.
3679 * Not implemented for ia32.  Stub for compatibility.
3680 */
3681
3682#if defined(__lint)
3683
3684/*ARGSUSED*/
3685void prefetch_smap_w(void *smp)
3686{}
3687
3688#else	/* __lint */
3689
3690	ENTRY(prefetch_smap_w)
3691	rep;	ret	/* use 2 byte return instruction when branch target */
3692			/* AMD Software Optimization Guide - Section 6.2 */
3693	SET_SIZE(prefetch_smap_w)
3694
3695#endif	/* __lint */
3696
3697/*
3698 * prefetch_page_r(page_t *)
3699 * issue prefetch instructions for a page_t
3700 */
3701#if defined(__lint)
3702
3703/*ARGSUSED*/
3704void
3705prefetch_page_r(void *pp)
3706{}
3707
3708#else	/* __lint */
3709
3710	ENTRY(prefetch_page_r)
3711	rep;	ret	/* use 2 byte return instruction when branch target */
3712			/* AMD Software Optimization Guide - Section 6.2 */
3713	SET_SIZE(prefetch_page_r)
3714
3715#endif	/* __lint */
3716
3717#if defined(__lint)
3718
3719/*ARGSUSED*/
3720int
3721bcmp(const void *s1, const void *s2, size_t count)
3722{ return (0); }
3723
3724#else   /* __lint */
3725
3726#if defined(__amd64)
3727
3728	ENTRY(bcmp)
3729	pushq	%rbp
3730	movq	%rsp, %rbp
3731#ifdef DEBUG
3732	movq	postbootkernelbase(%rip), %r11
3733	cmpq	%r11, %rdi
3734	jb	0f
3735	cmpq	%r11, %rsi
3736	jnb	1f
37370:	leaq	.bcmp_panic_msg(%rip), %rdi
3738	xorl	%eax, %eax
3739	call	panic
37401:
3741#endif	/* DEBUG */
3742	call	memcmp
3743	testl	%eax, %eax
3744	setne	%dl
3745	leave
3746	movzbl	%dl, %eax
3747	ret
3748	SET_SIZE(bcmp)
3749
3750#elif defined(__i386)
3751
3752#define	ARG_S1		8
3753#define	ARG_S2		12
3754#define	ARG_LENGTH	16
3755
3756	ENTRY(bcmp)
3757	pushl	%ebp
3758	movl	%esp, %ebp	/ create new stack frame
3759#ifdef DEBUG
3760	movl    postbootkernelbase, %eax
3761	cmpl    %eax, ARG_S1(%ebp)
3762	jb	0f
3763	cmpl    %eax, ARG_S2(%ebp)
3764	jnb	1f
37650:	pushl   $.bcmp_panic_msg
3766	call    panic
37671:
3768#endif	/* DEBUG */
3769
3770	pushl	%edi		/ save register variable
3771	movl	ARG_S1(%ebp), %eax	/ %eax = address of string 1
3772	movl	ARG_S2(%ebp), %ecx	/ %ecx = address of string 2
3773	cmpl	%eax, %ecx	/ if the same string
3774	je	.equal		/ goto .equal
3775	movl	ARG_LENGTH(%ebp), %edi	/ %edi = length in bytes
3776	cmpl	$4, %edi	/ if %edi < 4
3777	jb	.byte_check	/ goto .byte_check
3778	.align	4
3779.word_loop:
3780	movl	(%ecx), %edx	/ move 1 word from (%ecx) to %edx
3781	leal	-4(%edi), %edi	/ %edi -= 4
3782	cmpl	(%eax), %edx	/ compare 1 word from (%eax) with %edx
3783	jne	.word_not_equal	/ if not equal, goto .word_not_equal
3784	leal	4(%ecx), %ecx	/ %ecx += 4 (next word)
3785	leal	4(%eax), %eax	/ %eax += 4 (next word)
3786	cmpl	$4, %edi	/ if %edi >= 4
3787	jae	.word_loop	/ goto .word_loop
3788.byte_check:
3789	cmpl	$0, %edi	/ if %edi == 0
3790	je	.equal		/ goto .equal
3791	jmp	.byte_loop	/ goto .byte_loop (checks in bytes)
3792.word_not_equal:
3793	leal	4(%edi), %edi	/ %edi += 4 (post-decremented)
3794	.align	4
3795.byte_loop:
3796	movb	(%ecx),	%dl	/ move 1 byte from (%ecx) to %dl
3797	cmpb	%dl, (%eax)	/ compare %dl with 1 byte from (%eax)
3798	jne	.not_equal	/ if not equal, goto .not_equal
3799	incl	%ecx		/ %ecx++ (next byte)
3800	incl	%eax		/ %eax++ (next byte)
3801	decl	%edi		/ %edi--
3802	jnz	.byte_loop	/ if not zero, goto .byte_loop
3803.equal:
3804	xorl	%eax, %eax	/ %eax = 0
3805	popl	%edi		/ restore register variable
3806	leave			/ restore old stack frame
3807	ret			/ return (NULL)
3808	.align	4
3809.not_equal:
3810	movl	$1, %eax	/ return 1
3811	popl	%edi		/ restore register variable
3812	leave			/ restore old stack frame
3813	ret			/ return (NULL)
3814	SET_SIZE(bcmp)
3815
3816#endif	/* __i386 */
3817
3818#ifdef DEBUG
3819	.text
3820.bcmp_panic_msg:
3821	.string "bcmp: arguments below kernelbase"
3822#endif	/* DEBUG */
3823
3824#endif	/* __lint */
3825
3826#if defined(__lint)
3827
3828uint_t
3829bsrw_insn(uint16_t mask)
3830{
3831	uint_t index = sizeof (mask) * NBBY - 1;
3832
3833	while ((mask & (1 << index)) == 0)
3834		index--;
3835	return (index);
3836}
3837
3838#else	/* __lint */
3839
3840#if defined(__amd64)
3841
3842	ENTRY_NP(bsrw_insn)
3843	xorl	%eax, %eax
3844	bsrw	%di, %ax
3845	ret
3846	SET_SIZE(bsrw_insn)
3847
3848#elif defined(__i386)
3849
3850	ENTRY_NP(bsrw_insn)
3851	movw	4(%esp), %cx
3852	xorl	%eax, %eax
3853	bsrw	%cx, %ax
3854	ret
3855	SET_SIZE(bsrw_insn)
3856
3857#endif	/* __i386 */
3858#endif	/* __lint */
3859
3860#if defined(__lint)
3861
3862uint_t
3863atomic_btr32(uint32_t *pending, uint_t pil)
3864{
3865	return (*pending &= ~(1 << pil));
3866}
3867
3868#else	/* __lint */
3869
3870#if defined(__i386)
3871
3872	ENTRY_NP(atomic_btr32)
3873	movl	4(%esp), %ecx
3874	movl	8(%esp), %edx
3875	xorl	%eax, %eax
3876	lock
3877	btrl	%edx, (%ecx)
3878	setc	%al
3879	ret
3880	SET_SIZE(atomic_btr32)
3881
3882#endif	/* __i386 */
3883#endif	/* __lint */
3884
3885#if defined(__lint)
3886
3887/*ARGSUSED*/
3888void
3889switch_sp_and_call(void *newsp, void (*func)(uint_t, uint_t), uint_t arg1,
3890	    uint_t arg2)
3891{}
3892
3893#else	/* __lint */
3894
3895#if defined(__amd64)
3896
3897	ENTRY_NP(switch_sp_and_call)
3898	pushq	%rbp
3899	movq	%rsp, %rbp		/* set up stack frame */
3900	movq	%rdi, %rsp		/* switch stack pointer */
3901	movq	%rdx, %rdi		/* pass func arg 1 */
3902	movq	%rsi, %r11		/* save function to call */
3903	movq	%rcx, %rsi		/* pass func arg 2 */
3904	call	*%r11			/* call function */
3905	leave				/* restore stack */
3906	ret
3907	SET_SIZE(switch_sp_and_call)
3908
3909#elif defined(__i386)
3910
3911	ENTRY_NP(switch_sp_and_call)
3912	pushl	%ebp
3913	mov	%esp, %ebp		/* set up stack frame */
3914	movl	8(%ebp), %esp		/* switch stack pointer */
3915	pushl	20(%ebp)		/* push func arg 2 */
3916	pushl	16(%ebp)		/* push func arg 1 */
3917	call	*12(%ebp)		/* call function */
3918	addl	$8, %esp		/* pop arguments */
3919	leave				/* restore stack */
3920	ret
3921	SET_SIZE(switch_sp_and_call)
3922
3923#endif	/* __i386 */
3924#endif	/* __lint */
3925
3926#if defined(__lint)
3927
3928void
3929kmdb_enter(void)
3930{}
3931
3932#else	/* __lint */
3933
3934#if defined(__amd64)
3935
3936	ENTRY_NP(kmdb_enter)
3937	pushq	%rbp
3938	movq	%rsp, %rbp
3939
3940	/*
3941	 * Save flags, do a 'cli' then return the saved flags
3942	 */
3943	call	intr_clear
3944
3945	int	$T_DBGENTR
3946
3947	/*
3948	 * Restore the saved flags
3949	 */
3950	movq	%rax, %rdi
3951	call	intr_restore
3952
3953	leave
3954	ret
3955	SET_SIZE(kmdb_enter)
3956
3957#elif defined(__i386)
3958
3959	ENTRY_NP(kmdb_enter)
3960	pushl	%ebp
3961	movl	%esp, %ebp
3962
3963	/*
3964	 * Save flags, do a 'cli' then return the saved flags
3965	 */
3966	call	intr_clear
3967
3968	int	$T_DBGENTR
3969
3970	/*
3971	 * Restore the saved flags
3972	 */
3973	pushl	%eax
3974	call	intr_restore
3975	addl	$4, %esp
3976
3977	leave
3978	ret
3979	SET_SIZE(kmdb_enter)
3980
3981#endif	/* __i386 */
3982#endif	/* __lint */
3983
3984#if defined(__lint)
3985
3986void
3987return_instr(void)
3988{}
3989
3990#else	/* __lint */
3991
3992	ENTRY_NP(return_instr)
3993	rep;	ret	/* use 2 byte instruction when branch target */
3994			/* AMD Software Optimization Guide - Section 6.2 */
3995	SET_SIZE(return_instr)
3996
3997#endif	/* __lint */
3998
3999#if defined(__lint)
4000
4001ulong_t
4002getflags(void)
4003{
4004	return (0);
4005}
4006
4007#else	/* __lint */
4008
4009#if defined(__amd64)
4010
4011	ENTRY(getflags)
4012	pushfq
4013	popq	%rax
4014#if defined(__xpv)
4015	CURTHREAD(%rdi)
4016	KPREEMPT_DISABLE(%rdi)
4017	/*
4018	 * Synthesize the PS_IE bit from the event mask bit
4019	 */
4020	CURVCPU(%r11)
4021	andq    $_BITNOT(PS_IE), %rax
4022	XEN_TEST_UPCALL_MASK(%r11)
4023	jnz	1f
4024	orq	$PS_IE, %rax
40251:
4026	KPREEMPT_ENABLE_NOKP(%rdi)
4027#endif
4028	ret
4029	SET_SIZE(getflags)
4030
4031#elif defined(__i386)
4032
4033	ENTRY(getflags)
4034	pushfl
4035	popl	%eax
4036#if defined(__xpv)
4037	CURTHREAD(%ecx)
4038	KPREEMPT_DISABLE(%ecx)
4039	/*
4040	 * Synthesize the PS_IE bit from the event mask bit
4041	 */
4042	CURVCPU(%edx)
4043	andl    $_BITNOT(PS_IE), %eax
4044	XEN_TEST_UPCALL_MASK(%edx)
4045	jnz	1f
4046	orl	$PS_IE, %eax
40471:
4048	KPREEMPT_ENABLE_NOKP(%ecx)
4049#endif
4050	ret
4051	SET_SIZE(getflags)
4052
4053#endif	/* __i386 */
4054
4055#endif	/* __lint */
4056
4057#if defined(__lint)
4058
4059ftrace_icookie_t
4060ftrace_interrupt_disable(void)
4061{ return (0); }
4062
4063#else   /* __lint */
4064
4065#if defined(__amd64)
4066
4067	ENTRY(ftrace_interrupt_disable)
4068	pushfq
4069	popq	%rax
4070	CLI(%rdx)
4071	ret
4072	SET_SIZE(ftrace_interrupt_disable)
4073
4074#elif defined(__i386)
4075
4076	ENTRY(ftrace_interrupt_disable)
4077	pushfl
4078	popl	%eax
4079	CLI(%edx)
4080	ret
4081	SET_SIZE(ftrace_interrupt_disable)
4082
4083#endif	/* __i386 */
4084#endif	/* __lint */
4085
4086#if defined(__lint)
4087
4088/*ARGSUSED*/
4089void
4090ftrace_interrupt_enable(ftrace_icookie_t cookie)
4091{}
4092
4093#else	/* __lint */
4094
4095#if defined(__amd64)
4096
4097	ENTRY(ftrace_interrupt_enable)
4098	pushq	%rdi
4099	popfq
4100	ret
4101	SET_SIZE(ftrace_interrupt_enable)
4102
4103#elif defined(__i386)
4104
4105	ENTRY(ftrace_interrupt_enable)
4106	movl	4(%esp), %eax
4107	pushl	%eax
4108	popfl
4109	ret
4110	SET_SIZE(ftrace_interrupt_enable)
4111
4112#endif	/* __i386 */
4113#endif	/* __lint */
4114