xref: /titanic_51/usr/src/uts/intel/ia32/ml/i86_subr.s (revision 48cd229b73530d4ff90bcaca099aec95e2126239)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26/*
27 *  Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.
28 *  Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T
29 *    All Rights Reserved
30 */
31
32/*
33 * Copyright (c) 2009, Intel Corporation.
34 * All rights reserved.
35 */
36
37/*
38 * General assembly language routines.
39 * It is the intent of this file to contain routines that are
40 * independent of the specific kernel architecture, and those that are
41 * common across kernel architectures.
42 * As architectures diverge, and implementations of specific
43 * architecture-dependent routines change, the routines should be moved
44 * from this file into the respective ../`arch -k`/subr.s file.
45 */
46
47#include <sys/asm_linkage.h>
48#include <sys/asm_misc.h>
49#include <sys/panic.h>
50#include <sys/ontrap.h>
51#include <sys/regset.h>
52#include <sys/privregs.h>
53#include <sys/reboot.h>
54#include <sys/psw.h>
55#include <sys/x86_archext.h>
56
57#if defined(__lint)
58#include <sys/types.h>
59#include <sys/systm.h>
60#include <sys/thread.h>
61#include <sys/archsystm.h>
62#include <sys/byteorder.h>
63#include <sys/dtrace.h>
64#include <sys/ftrace.h>
65#else	/* __lint */
66#include "assym.h"
67#endif	/* __lint */
68#include <sys/dditypes.h>
69
70/*
71 * on_fault()
72 * Catch lofault faults. Like setjmp except it returns one
73 * if code following causes uncorrectable fault. Turned off
74 * by calling no_fault().
75 */
76
77#if defined(__lint)
78
79/* ARGSUSED */
80int
81on_fault(label_t *ljb)
82{ return (0); }
83
84void
85no_fault(void)
86{}
87
88#else	/* __lint */
89
90#if defined(__amd64)
91
92	ENTRY(on_fault)
93	movq	%gs:CPU_THREAD, %rsi
94	leaq	catch_fault(%rip), %rdx
95	movq	%rdi, T_ONFAULT(%rsi)		/* jumpbuf in t_onfault */
96	movq	%rdx, T_LOFAULT(%rsi)		/* catch_fault in t_lofault */
97	jmp	setjmp				/* let setjmp do the rest */
98
99catch_fault:
100	movq	%gs:CPU_THREAD, %rsi
101	movq	T_ONFAULT(%rsi), %rdi		/* address of save area */
102	xorl	%eax, %eax
103	movq	%rax, T_ONFAULT(%rsi)		/* turn off onfault */
104	movq	%rax, T_LOFAULT(%rsi)		/* turn off lofault */
105	jmp	longjmp				/* let longjmp do the rest */
106	SET_SIZE(on_fault)
107
108	ENTRY(no_fault)
109	movq	%gs:CPU_THREAD, %rsi
110	xorl	%eax, %eax
111	movq	%rax, T_ONFAULT(%rsi)		/* turn off onfault */
112	movq	%rax, T_LOFAULT(%rsi)		/* turn off lofault */
113	ret
114	SET_SIZE(no_fault)
115
116#elif defined(__i386)
117
118	ENTRY(on_fault)
119	movl	%gs:CPU_THREAD, %edx
120	movl	4(%esp), %eax			/* jumpbuf address */
121	leal	catch_fault, %ecx
122	movl	%eax, T_ONFAULT(%edx)		/* jumpbuf in t_onfault */
123	movl	%ecx, T_LOFAULT(%edx)		/* catch_fault in t_lofault */
124	jmp	setjmp				/* let setjmp do the rest */
125
126catch_fault:
127	movl	%gs:CPU_THREAD, %edx
128	xorl	%eax, %eax
129	movl	T_ONFAULT(%edx), %ecx		/* address of save area */
130	movl	%eax, T_ONFAULT(%edx)		/* turn off onfault */
131	movl	%eax, T_LOFAULT(%edx)		/* turn off lofault */
132	pushl	%ecx
133	call	longjmp				/* let longjmp do the rest */
134	SET_SIZE(on_fault)
135
136	ENTRY(no_fault)
137	movl	%gs:CPU_THREAD, %edx
138	xorl	%eax, %eax
139	movl	%eax, T_ONFAULT(%edx)		/* turn off onfault */
140	movl	%eax, T_LOFAULT(%edx)		/* turn off lofault */
141	ret
142	SET_SIZE(no_fault)
143
144#endif	/* __i386 */
145#endif	/* __lint */
146
147/*
148 * Default trampoline code for on_trap() (see <sys/ontrap.h>).  We just
149 * do a longjmp(&curthread->t_ontrap->ot_jmpbuf) if this is ever called.
150 */
151
152#if defined(lint)
153
154void
155on_trap_trampoline(void)
156{}
157
158#else	/* __lint */
159
160#if defined(__amd64)
161
162	ENTRY(on_trap_trampoline)
163	movq	%gs:CPU_THREAD, %rsi
164	movq	T_ONTRAP(%rsi), %rdi
165	addq	$OT_JMPBUF, %rdi
166	jmp	longjmp
167	SET_SIZE(on_trap_trampoline)
168
169#elif defined(__i386)
170
171	ENTRY(on_trap_trampoline)
172	movl	%gs:CPU_THREAD, %eax
173	movl	T_ONTRAP(%eax), %eax
174	addl	$OT_JMPBUF, %eax
175	pushl	%eax
176	call	longjmp
177	SET_SIZE(on_trap_trampoline)
178
179#endif	/* __i386 */
180#endif	/* __lint */
181
182/*
183 * Push a new element on to the t_ontrap stack.  Refer to <sys/ontrap.h> for
184 * more information about the on_trap() mechanism.  If the on_trap_data is the
185 * same as the topmost stack element, we just modify that element.
186 */
187#if defined(lint)
188
189/*ARGSUSED*/
190int
191on_trap(on_trap_data_t *otp, uint_t prot)
192{ return (0); }
193
194#else	/* __lint */
195
196#if defined(__amd64)
197
198	ENTRY(on_trap)
199	movw	%si, OT_PROT(%rdi)		/* ot_prot = prot */
200	movw	$0, OT_TRAP(%rdi)		/* ot_trap = 0 */
201	leaq	on_trap_trampoline(%rip), %rdx	/* rdx = &on_trap_trampoline */
202	movq	%rdx, OT_TRAMPOLINE(%rdi)	/* ot_trampoline = rdx */
203	xorl	%ecx, %ecx
204	movq	%rcx, OT_HANDLE(%rdi)		/* ot_handle = NULL */
205	movq	%rcx, OT_PAD1(%rdi)		/* ot_pad1 = NULL */
206	movq	%gs:CPU_THREAD, %rdx		/* rdx = curthread */
207	movq	T_ONTRAP(%rdx), %rcx		/* rcx = curthread->t_ontrap */
208	cmpq	%rdi, %rcx			/* if (otp == %rcx)	*/
209	je	0f				/*	don't modify t_ontrap */
210
211	movq	%rcx, OT_PREV(%rdi)		/* ot_prev = t_ontrap */
212	movq	%rdi, T_ONTRAP(%rdx)		/* curthread->t_ontrap = otp */
213
2140:	addq	$OT_JMPBUF, %rdi		/* &ot_jmpbuf */
215	jmp	setjmp
216	SET_SIZE(on_trap)
217
218#elif defined(__i386)
219
220	ENTRY(on_trap)
221	movl	4(%esp), %eax			/* %eax = otp */
222	movl	8(%esp), %edx			/* %edx = prot */
223
224	movw	%dx, OT_PROT(%eax)		/* ot_prot = prot */
225	movw	$0, OT_TRAP(%eax)		/* ot_trap = 0 */
226	leal	on_trap_trampoline, %edx	/* %edx = &on_trap_trampoline */
227	movl	%edx, OT_TRAMPOLINE(%eax)	/* ot_trampoline = %edx */
228	movl	$0, OT_HANDLE(%eax)		/* ot_handle = NULL */
229	movl	$0, OT_PAD1(%eax)		/* ot_pad1 = NULL */
230	movl	%gs:CPU_THREAD, %edx		/* %edx = curthread */
231	movl	T_ONTRAP(%edx), %ecx		/* %ecx = curthread->t_ontrap */
232	cmpl	%eax, %ecx			/* if (otp == %ecx) */
233	je	0f				/*    don't modify t_ontrap */
234
235	movl	%ecx, OT_PREV(%eax)		/* ot_prev = t_ontrap */
236	movl	%eax, T_ONTRAP(%edx)		/* curthread->t_ontrap = otp */
237
2380:	addl	$OT_JMPBUF, %eax		/* %eax = &ot_jmpbuf */
239	movl	%eax, 4(%esp)			/* put %eax back on the stack */
240	jmp	setjmp				/* let setjmp do the rest */
241	SET_SIZE(on_trap)
242
243#endif	/* __i386 */
244#endif	/* __lint */
245
246/*
247 * Setjmp and longjmp implement non-local gotos using state vectors
248 * type label_t.
249 */
250
251#if defined(__lint)
252
253/* ARGSUSED */
254int
255setjmp(label_t *lp)
256{ return (0); }
257
258/* ARGSUSED */
259void
260longjmp(label_t *lp)
261{}
262
263#else	/* __lint */
264
265#if LABEL_PC != 0
266#error LABEL_PC MUST be defined as 0 for setjmp/longjmp to work as coded
267#endif	/* LABEL_PC != 0 */
268
269#if defined(__amd64)
270
271	ENTRY(setjmp)
272	movq	%rsp, LABEL_SP(%rdi)
273	movq	%rbp, LABEL_RBP(%rdi)
274	movq	%rbx, LABEL_RBX(%rdi)
275	movq	%r12, LABEL_R12(%rdi)
276	movq	%r13, LABEL_R13(%rdi)
277	movq	%r14, LABEL_R14(%rdi)
278	movq	%r15, LABEL_R15(%rdi)
279	movq	(%rsp), %rdx		/* return address */
280	movq	%rdx, (%rdi)		/* LABEL_PC is 0 */
281	xorl	%eax, %eax		/* return 0 */
282	ret
283	SET_SIZE(setjmp)
284
285	ENTRY(longjmp)
286	movq	LABEL_SP(%rdi), %rsp
287	movq	LABEL_RBP(%rdi), %rbp
288	movq	LABEL_RBX(%rdi), %rbx
289	movq	LABEL_R12(%rdi), %r12
290	movq	LABEL_R13(%rdi), %r13
291	movq	LABEL_R14(%rdi), %r14
292	movq	LABEL_R15(%rdi), %r15
293	movq	(%rdi), %rdx		/* return address; LABEL_PC is 0 */
294	movq	%rdx, (%rsp)
295	xorl	%eax, %eax
296	incl	%eax			/* return 1 */
297	ret
298	SET_SIZE(longjmp)
299
300#elif defined(__i386)
301
302	ENTRY(setjmp)
303	movl	4(%esp), %edx		/* address of save area */
304	movl	%ebp, LABEL_EBP(%edx)
305	movl	%ebx, LABEL_EBX(%edx)
306	movl	%esi, LABEL_ESI(%edx)
307	movl	%edi, LABEL_EDI(%edx)
308	movl	%esp, 4(%edx)
309	movl	(%esp), %ecx		/* %eip (return address) */
310	movl	%ecx, (%edx)		/* LABEL_PC is 0 */
311	subl	%eax, %eax		/* return 0 */
312	ret
313	SET_SIZE(setjmp)
314
315	ENTRY(longjmp)
316	movl	4(%esp), %edx		/* address of save area */
317	movl	LABEL_EBP(%edx), %ebp
318	movl	LABEL_EBX(%edx), %ebx
319	movl	LABEL_ESI(%edx), %esi
320	movl	LABEL_EDI(%edx), %edi
321	movl	4(%edx), %esp
322	movl	(%edx), %ecx		/* %eip (return addr); LABEL_PC is 0 */
323	movl	$1, %eax
324	addl	$4, %esp		/* pop ret adr */
325	jmp	*%ecx			/* indirect */
326	SET_SIZE(longjmp)
327
328#endif	/* __i386 */
329#endif	/* __lint */
330
331/*
332 * if a() calls b() calls caller(),
333 * caller() returns return address in a().
334 * (Note: We assume a() and b() are C routines which do the normal entry/exit
335 *  sequence.)
336 */
337
338#if defined(__lint)
339
340caddr_t
341caller(void)
342{ return (0); }
343
344#else	/* __lint */
345
346#if defined(__amd64)
347
348	ENTRY(caller)
349	movq	8(%rbp), %rax		/* b()'s return pc, in a() */
350	ret
351	SET_SIZE(caller)
352
353#elif defined(__i386)
354
355	ENTRY(caller)
356	movl	4(%ebp), %eax		/* b()'s return pc, in a() */
357	ret
358	SET_SIZE(caller)
359
360#endif	/* __i386 */
361#endif	/* __lint */
362
363/*
364 * if a() calls callee(), callee() returns the
365 * return address in a();
366 */
367
368#if defined(__lint)
369
370caddr_t
371callee(void)
372{ return (0); }
373
374#else	/* __lint */
375
376#if defined(__amd64)
377
378	ENTRY(callee)
379	movq	(%rsp), %rax		/* callee()'s return pc, in a() */
380	ret
381	SET_SIZE(callee)
382
383#elif defined(__i386)
384
385	ENTRY(callee)
386	movl	(%esp), %eax		/* callee()'s return pc, in a() */
387	ret
388	SET_SIZE(callee)
389
390#endif	/* __i386 */
391#endif	/* __lint */
392
393/*
394 * return the current frame pointer
395 */
396
397#if defined(__lint)
398
399greg_t
400getfp(void)
401{ return (0); }
402
403#else	/* __lint */
404
405#if defined(__amd64)
406
407	ENTRY(getfp)
408	movq	%rbp, %rax
409	ret
410	SET_SIZE(getfp)
411
412#elif defined(__i386)
413
414	ENTRY(getfp)
415	movl	%ebp, %eax
416	ret
417	SET_SIZE(getfp)
418
419#endif	/* __i386 */
420#endif	/* __lint */
421
422/*
423 * Invalidate a single page table entry in the TLB
424 */
425
426#if defined(__lint)
427
428/* ARGSUSED */
429void
430mmu_tlbflush_entry(caddr_t m)
431{}
432
433#else	/* __lint */
434
435#if defined(__amd64)
436
437	ENTRY(mmu_tlbflush_entry)
438	invlpg	(%rdi)
439	ret
440	SET_SIZE(mmu_tlbflush_entry)
441
442#elif defined(__i386)
443
444	ENTRY(mmu_tlbflush_entry)
445	movl	4(%esp), %eax
446	invlpg	(%eax)
447	ret
448	SET_SIZE(mmu_tlbflush_entry)
449
450#endif	/* __i386 */
451#endif	/* __lint */
452
453
454/*
455 * Get/Set the value of various control registers
456 */
457
458#if defined(__lint)
459
460ulong_t
461getcr0(void)
462{ return (0); }
463
464/* ARGSUSED */
465void
466setcr0(ulong_t value)
467{}
468
469ulong_t
470getcr2(void)
471{ return (0); }
472
473ulong_t
474getcr3(void)
475{ return (0); }
476
477#if !defined(__xpv)
478/* ARGSUSED */
479void
480setcr3(ulong_t val)
481{}
482
483void
484reload_cr3(void)
485{}
486#endif
487
488ulong_t
489getcr4(void)
490{ return (0); }
491
492/* ARGSUSED */
493void
494setcr4(ulong_t val)
495{}
496
497#if defined(__amd64)
498
499ulong_t
500getcr8(void)
501{ return (0); }
502
503/* ARGSUSED */
504void
505setcr8(ulong_t val)
506{}
507
508#endif	/* __amd64 */
509
510#else	/* __lint */
511
512#if defined(__amd64)
513
514	ENTRY(getcr0)
515	movq	%cr0, %rax
516	ret
517	SET_SIZE(getcr0)
518
519	ENTRY(setcr0)
520	movq	%rdi, %cr0
521	ret
522	SET_SIZE(setcr0)
523
524        ENTRY(getcr2)
525#if defined(__xpv)
526	movq	%gs:CPU_VCPU_INFO, %rax
527	movq	VCPU_INFO_ARCH_CR2(%rax), %rax
528#else
529        movq    %cr2, %rax
530#endif
531        ret
532	SET_SIZE(getcr2)
533
534	ENTRY(getcr3)
535	movq    %cr3, %rax
536	ret
537	SET_SIZE(getcr3)
538
539#if !defined(__xpv)
540
541        ENTRY(setcr3)
542        movq    %rdi, %cr3
543        ret
544	SET_SIZE(setcr3)
545
546	ENTRY(reload_cr3)
547	movq	%cr3, %rdi
548	movq	%rdi, %cr3
549	ret
550	SET_SIZE(reload_cr3)
551
552#endif	/* __xpv */
553
554	ENTRY(getcr4)
555	movq	%cr4, %rax
556	ret
557	SET_SIZE(getcr4)
558
559	ENTRY(setcr4)
560	movq	%rdi, %cr4
561	ret
562	SET_SIZE(setcr4)
563
564	ENTRY(getcr8)
565	movq	%cr8, %rax
566	ret
567	SET_SIZE(getcr8)
568
569	ENTRY(setcr8)
570	movq	%rdi, %cr8
571	ret
572	SET_SIZE(setcr8)
573
574#elif defined(__i386)
575
576        ENTRY(getcr0)
577        movl    %cr0, %eax
578        ret
579	SET_SIZE(getcr0)
580
581        ENTRY(setcr0)
582        movl    4(%esp), %eax
583        movl    %eax, %cr0
584        ret
585	SET_SIZE(setcr0)
586
587	/*
588	 * "lock mov %cr0" is used on processors which indicate it is
589	 * supported via CPUID. Normally the 32 bit TPR is accessed via
590	 * the local APIC.
591	 */
592	ENTRY(getcr8)
593	lock
594	movl	%cr0, %eax
595	ret
596	SET_SIZE(getcr8)
597
598	ENTRY(setcr8)
599        movl    4(%esp), %eax
600	lock
601        movl    %eax, %cr0
602	ret
603	SET_SIZE(setcr8)
604
605        ENTRY(getcr2)
606#if defined(__xpv)
607	movl	%gs:CPU_VCPU_INFO, %eax
608	movl	VCPU_INFO_ARCH_CR2(%eax), %eax
609#else
610        movl    %cr2, %eax
611#endif
612        ret
613	SET_SIZE(getcr2)
614
615	ENTRY(getcr3)
616	movl    %cr3, %eax
617	ret
618	SET_SIZE(getcr3)
619
620#if !defined(__xpv)
621
622        ENTRY(setcr3)
623        movl    4(%esp), %eax
624        movl    %eax, %cr3
625        ret
626	SET_SIZE(setcr3)
627
628	ENTRY(reload_cr3)
629	movl    %cr3, %eax
630	movl    %eax, %cr3
631	ret
632	SET_SIZE(reload_cr3)
633
634#endif	/* __xpv */
635
636	ENTRY(getcr4)
637	movl    %cr4, %eax
638	ret
639	SET_SIZE(getcr4)
640
641        ENTRY(setcr4)
642        movl    4(%esp), %eax
643        movl    %eax, %cr4
644        ret
645	SET_SIZE(setcr4)
646
647#endif	/* __i386 */
648#endif	/* __lint */
649
650#if defined(__lint)
651
652/*ARGSUSED*/
653uint32_t
654__cpuid_insn(struct cpuid_regs *regs)
655{ return (0); }
656
657#else	/* __lint */
658
659#if defined(__amd64)
660
661	ENTRY(__cpuid_insn)
662	movq	%rbx, %r8
663	movq	%rcx, %r9
664	movq	%rdx, %r11
665	movl	(%rdi), %eax		/* %eax = regs->cp_eax */
666	movl	0x4(%rdi), %ebx		/* %ebx = regs->cp_ebx */
667	movl	0x8(%rdi), %ecx		/* %ecx = regs->cp_ecx */
668	movl	0xc(%rdi), %edx		/* %edx = regs->cp_edx */
669	cpuid
670	movl	%eax, (%rdi)		/* regs->cp_eax = %eax */
671	movl	%ebx, 0x4(%rdi)		/* regs->cp_ebx = %ebx */
672	movl	%ecx, 0x8(%rdi)		/* regs->cp_ecx = %ecx */
673	movl	%edx, 0xc(%rdi)		/* regs->cp_edx = %edx */
674	movq	%r8, %rbx
675	movq	%r9, %rcx
676	movq	%r11, %rdx
677	ret
678	SET_SIZE(__cpuid_insn)
679
680#elif defined(__i386)
681
682        ENTRY(__cpuid_insn)
683	pushl	%ebp
684	movl	0x8(%esp), %ebp		/* %ebp = regs */
685	pushl	%ebx
686	pushl	%ecx
687	pushl	%edx
688	movl	(%ebp), %eax		/* %eax = regs->cp_eax */
689	movl	0x4(%ebp), %ebx		/* %ebx = regs->cp_ebx */
690	movl	0x8(%ebp), %ecx		/* %ecx = regs->cp_ecx */
691	movl	0xc(%ebp), %edx		/* %edx = regs->cp_edx */
692	cpuid
693	movl	%eax, (%ebp)		/* regs->cp_eax = %eax */
694	movl	%ebx, 0x4(%ebp)		/* regs->cp_ebx = %ebx */
695	movl	%ecx, 0x8(%ebp)		/* regs->cp_ecx = %ecx */
696	movl	%edx, 0xc(%ebp)		/* regs->cp_edx = %edx */
697	popl	%edx
698	popl	%ecx
699	popl	%ebx
700	popl	%ebp
701	ret
702	SET_SIZE(__cpuid_insn)
703
704#endif	/* __i386 */
705#endif	/* __lint */
706
707#if defined(__lint)
708
709/*ARGSUSED*/
710void
711i86_monitor(volatile uint32_t *addr, uint32_t extensions, uint32_t hints)
712{}
713
714#else   /* __lint */
715
716#if defined(__amd64)
717
718	ENTRY_NP(i86_monitor)
719	pushq	%rbp
720	movq	%rsp, %rbp
721	movq	%rdi, %rax		/* addr */
722	movq	%rsi, %rcx		/* extensions */
723	/* rdx contains input arg3: hints */
724	clflush	(%rax)
725	.byte	0x0f, 0x01, 0xc8	/* monitor */
726	leave
727	ret
728	SET_SIZE(i86_monitor)
729
730#elif defined(__i386)
731
732ENTRY_NP(i86_monitor)
733	pushl	%ebp
734	movl	%esp, %ebp
735	movl	0x8(%ebp),%eax		/* addr */
736	movl	0xc(%ebp),%ecx		/* extensions */
737	movl	0x10(%ebp),%edx		/* hints */
738	clflush	(%eax)
739	.byte	0x0f, 0x01, 0xc8	/* monitor */
740	leave
741	ret
742	SET_SIZE(i86_monitor)
743
744#endif	/* __i386 */
745#endif	/* __lint */
746
747#if defined(__lint)
748
749/*ARGSUSED*/
750void
751i86_mwait(uint32_t data, uint32_t extensions)
752{}
753
754#else	/* __lint */
755
756#if defined(__amd64)
757
758	ENTRY_NP(i86_mwait)
759	pushq	%rbp
760	movq	%rsp, %rbp
761	movq	%rdi, %rax		/* data */
762	movq	%rsi, %rcx		/* extensions */
763	.byte	0x0f, 0x01, 0xc9	/* mwait */
764	leave
765	ret
766	SET_SIZE(i86_mwait)
767
768#elif defined(__i386)
769
770	ENTRY_NP(i86_mwait)
771	pushl	%ebp
772	movl	%esp, %ebp
773	movl	0x8(%ebp),%eax		/* data */
774	movl	0xc(%ebp),%ecx		/* extensions */
775	.byte	0x0f, 0x01, 0xc9	/* mwait */
776	leave
777	ret
778	SET_SIZE(i86_mwait)
779
780#endif	/* __i386 */
781#endif	/* __lint */
782
783#if defined(__xpv)
784	/*
785	 * Defined in C
786	 */
787#else
788
789#if defined(__lint)
790
791hrtime_t
792tsc_read(void)
793{
794	return (0);
795}
796
797#else	/* __lint */
798
799#if defined(__amd64)
800
801	ENTRY_NP(tsc_read)
802	movq	%rbx, %r11
803	movl	$0, %eax
804	cpuid
805	rdtsc
806	movq	%r11, %rbx
807	shlq	$32, %rdx
808	orq	%rdx, %rax
809	ret
810	.globl _tsc_mfence_start
811_tsc_mfence_start:
812	mfence
813	rdtsc
814	shlq	$32, %rdx
815	orq	%rdx, %rax
816	ret
817	.globl _tsc_mfence_end
818_tsc_mfence_end:
819	.globl _tscp_start
820_tscp_start:
821	.byte	0x0f, 0x01, 0xf9	/* rdtscp instruction */
822	shlq	$32, %rdx
823	orq	%rdx, %rax
824	ret
825	.globl _tscp_end
826_tscp_end:
827	.globl _no_rdtsc_start
828_no_rdtsc_start:
829	xorl	%edx, %edx
830	xorl	%eax, %eax
831	ret
832	.globl _no_rdtsc_end
833_no_rdtsc_end:
834	.globl _tsc_lfence_start
835_tsc_lfence_start:
836	lfence
837	rdtsc
838	shlq	$32, %rdx
839	orq	%rdx, %rax
840	ret
841	.globl _tsc_lfence_end
842_tsc_lfence_end:
843	SET_SIZE(tsc_read)
844
845#else /* __i386 */
846
847	ENTRY_NP(tsc_read)
848	pushl	%ebx
849	movl	$0, %eax
850	cpuid
851	rdtsc
852	popl	%ebx
853	ret
854	.globl _tsc_mfence_start
855_tsc_mfence_start:
856	mfence
857	rdtsc
858	ret
859	.globl _tsc_mfence_end
860_tsc_mfence_end:
861	.globl	_tscp_start
862_tscp_start:
863	.byte	0x0f, 0x01, 0xf9	/* rdtscp instruction */
864	ret
865	.globl _tscp_end
866_tscp_end:
867	.globl _no_rdtsc_start
868_no_rdtsc_start:
869	xorl	%edx, %edx
870	xorl	%eax, %eax
871	ret
872	.globl _no_rdtsc_end
873_no_rdtsc_end:
874	.globl _tsc_lfence_start
875_tsc_lfence_start:
876	lfence
877	rdtsc
878	ret
879	.globl _tsc_lfence_end
880_tsc_lfence_end:
881	SET_SIZE(tsc_read)
882
883#endif	/* __i386 */
884
885#endif	/* __lint */
886
887
888#endif	/* __xpv */
889
890#ifdef __lint
891/*
892 * Do not use this function for obtaining clock tick.  This
893 * is called by callers who do not need to have a guarenteed
894 * correct tick value.  The proper routine to use is tsc_read().
895 */
896u_longlong_t
897randtick(void)
898{
899	return (0);
900}
901#else
902#if defined(__amd64)
903	ENTRY_NP(randtick)
904	rdtsc
905	shlq    $32, %rdx
906	orq     %rdx, %rax
907	ret
908	SET_SIZE(randtick)
909#else
910	ENTRY_NP(randtick)
911	rdtsc
912	ret
913	SET_SIZE(randtick)
914#endif /* __i386 */
915#endif /* __lint */
916/*
917 * Insert entryp after predp in a doubly linked list.
918 */
919
920#if defined(__lint)
921
922/*ARGSUSED*/
923void
924_insque(caddr_t entryp, caddr_t predp)
925{}
926
927#else	/* __lint */
928
929#if defined(__amd64)
930
931	ENTRY(_insque)
932	movq	(%rsi), %rax		/* predp->forw			*/
933	movq	%rsi, CPTRSIZE(%rdi)	/* entryp->back = predp		*/
934	movq	%rax, (%rdi)		/* entryp->forw = predp->forw	*/
935	movq	%rdi, (%rsi)		/* predp->forw = entryp		*/
936	movq	%rdi, CPTRSIZE(%rax)	/* predp->forw->back = entryp	*/
937	ret
938	SET_SIZE(_insque)
939
940#elif defined(__i386)
941
942	ENTRY(_insque)
943	movl	8(%esp), %edx
944	movl	4(%esp), %ecx
945	movl	(%edx), %eax		/* predp->forw			*/
946	movl	%edx, CPTRSIZE(%ecx)	/* entryp->back = predp		*/
947	movl	%eax, (%ecx)		/* entryp->forw = predp->forw	*/
948	movl	%ecx, (%edx)		/* predp->forw = entryp		*/
949	movl	%ecx, CPTRSIZE(%eax)	/* predp->forw->back = entryp	*/
950	ret
951	SET_SIZE(_insque)
952
953#endif	/* __i386 */
954#endif	/* __lint */
955
956/*
957 * Remove entryp from a doubly linked list
958 */
959
960#if defined(__lint)
961
962/*ARGSUSED*/
963void
964_remque(caddr_t entryp)
965{}
966
967#else	/* __lint */
968
969#if defined(__amd64)
970
971	ENTRY(_remque)
972	movq	(%rdi), %rax		/* entry->forw */
973	movq	CPTRSIZE(%rdi), %rdx	/* entry->back */
974	movq	%rax, (%rdx)		/* entry->back->forw = entry->forw */
975	movq	%rdx, CPTRSIZE(%rax)	/* entry->forw->back = entry->back */
976	ret
977	SET_SIZE(_remque)
978
979#elif defined(__i386)
980
981	ENTRY(_remque)
982	movl	4(%esp), %ecx
983	movl	(%ecx), %eax		/* entry->forw */
984	movl	CPTRSIZE(%ecx), %edx	/* entry->back */
985	movl	%eax, (%edx)		/* entry->back->forw = entry->forw */
986	movl	%edx, CPTRSIZE(%eax)	/* entry->forw->back = entry->back */
987	ret
988	SET_SIZE(_remque)
989
990#endif	/* __i386 */
991#endif	/* __lint */
992
993/*
994 * Returns the number of
995 * non-NULL bytes in string argument.
996 */
997
998#if defined(__lint)
999
1000/* ARGSUSED */
1001size_t
1002strlen(const char *str)
1003{ return (0); }
1004
1005#else	/* __lint */
1006
1007#if defined(__amd64)
1008
1009/*
1010 * This is close to a simple transliteration of a C version of this
1011 * routine.  We should either just -make- this be a C version, or
1012 * justify having it in assembler by making it significantly faster.
1013 *
1014 * size_t
1015 * strlen(const char *s)
1016 * {
1017 *	const char *s0;
1018 * #if defined(DEBUG)
1019 *	if ((uintptr_t)s < KERNELBASE)
1020 *		panic(.str_panic_msg);
1021 * #endif
1022 *	for (s0 = s; *s; s++)
1023 *		;
1024 *	return (s - s0);
1025 * }
1026 */
1027
1028	ENTRY(strlen)
1029#ifdef DEBUG
1030	movq	postbootkernelbase(%rip), %rax
1031	cmpq	%rax, %rdi
1032	jae	str_valid
1033	pushq	%rbp
1034	movq	%rsp, %rbp
1035	leaq	.str_panic_msg(%rip), %rdi
1036	xorl	%eax, %eax
1037	call	panic
1038#endif	/* DEBUG */
1039str_valid:
1040	cmpb	$0, (%rdi)
1041	movq	%rdi, %rax
1042	je	.null_found
1043	.align	4
1044.strlen_loop:
1045	incq	%rdi
1046	cmpb	$0, (%rdi)
1047	jne	.strlen_loop
1048.null_found:
1049	subq	%rax, %rdi
1050	movq	%rdi, %rax
1051	ret
1052	SET_SIZE(strlen)
1053
1054#elif defined(__i386)
1055
1056	ENTRY(strlen)
1057#ifdef DEBUG
1058	movl	postbootkernelbase, %eax
1059	cmpl	%eax, 4(%esp)
1060	jae	str_valid
1061	pushl	%ebp
1062	movl	%esp, %ebp
1063	pushl	$.str_panic_msg
1064	call	panic
1065#endif /* DEBUG */
1066
1067str_valid:
1068	movl	4(%esp), %eax		/* %eax = string address */
1069	testl	$3, %eax		/* if %eax not word aligned */
1070	jnz	.not_word_aligned	/* goto .not_word_aligned */
1071	.align	4
1072.word_aligned:
1073	movl	(%eax), %edx		/* move 1 word from (%eax) to %edx */
1074	movl	$0x7f7f7f7f, %ecx
1075	andl	%edx, %ecx		/* %ecx = %edx & 0x7f7f7f7f */
1076	addl	$4, %eax		/* next word */
1077	addl	$0x7f7f7f7f, %ecx	/* %ecx += 0x7f7f7f7f */
1078	orl	%edx, %ecx		/* %ecx |= %edx */
1079	andl	$0x80808080, %ecx	/* %ecx &= 0x80808080 */
1080	cmpl	$0x80808080, %ecx	/* if no null byte in this word */
1081	je	.word_aligned		/* goto .word_aligned */
1082	subl	$4, %eax		/* post-incremented */
1083.not_word_aligned:
1084	cmpb	$0, (%eax)		/* if a byte in (%eax) is null */
1085	je	.null_found		/* goto .null_found */
1086	incl	%eax			/* next byte */
1087	testl	$3, %eax		/* if %eax not word aligned */
1088	jnz	.not_word_aligned	/* goto .not_word_aligned */
1089	jmp	.word_aligned		/* goto .word_aligned */
1090	.align	4
1091.null_found:
1092	subl	4(%esp), %eax		/* %eax -= string address */
1093	ret
1094	SET_SIZE(strlen)
1095
1096#endif	/* __i386 */
1097
1098#ifdef DEBUG
1099	.text
1100.str_panic_msg:
1101	.string "strlen: argument below kernelbase"
1102#endif /* DEBUG */
1103
1104#endif	/* __lint */
1105
1106	/*
1107	 * Berkeley 4.3 introduced symbolically named interrupt levels
1108	 * as a way deal with priority in a machine independent fashion.
1109	 * Numbered priorities are machine specific, and should be
1110	 * discouraged where possible.
1111	 *
1112	 * Note, for the machine specific priorities there are
1113	 * examples listed for devices that use a particular priority.
1114	 * It should not be construed that all devices of that
1115	 * type should be at that priority.  It is currently were
1116	 * the current devices fit into the priority scheme based
1117	 * upon time criticalness.
1118	 *
1119	 * The underlying assumption of these assignments is that
1120	 * IPL 10 is the highest level from which a device
1121	 * routine can call wakeup.  Devices that interrupt from higher
1122	 * levels are restricted in what they can do.  If they need
1123	 * kernels services they should schedule a routine at a lower
1124	 * level (via software interrupt) to do the required
1125	 * processing.
1126	 *
1127	 * Examples of this higher usage:
1128	 *	Level	Usage
1129	 *	14	Profiling clock (and PROM uart polling clock)
1130	 *	12	Serial ports
1131	 *
1132	 * The serial ports request lower level processing on level 6.
1133	 *
1134	 * Also, almost all splN routines (where N is a number or a
1135	 * mnemonic) will do a RAISE(), on the assumption that they are
1136	 * never used to lower our priority.
1137	 * The exceptions are:
1138	 *	spl8()		Because you can't be above 15 to begin with!
1139	 *	splzs()		Because this is used at boot time to lower our
1140	 *			priority, to allow the PROM to poll the uart.
1141	 *	spl0()		Used to lower priority to 0.
1142	 */
1143
1144#if defined(__lint)
1145
1146int spl0(void)		{ return (0); }
1147int spl6(void)		{ return (0); }
1148int spl7(void)		{ return (0); }
1149int spl8(void)		{ return (0); }
1150int splhigh(void)	{ return (0); }
1151int splhi(void)		{ return (0); }
1152int splzs(void)		{ return (0); }
1153
1154/* ARGSUSED */
1155void
1156splx(int level)
1157{}
1158
1159#else	/* __lint */
1160
1161#if defined(__amd64)
1162
1163#define	SETPRI(level) \
1164	movl	$/**/level, %edi;	/* new priority */		\
1165	jmp	do_splx			/* redirect to do_splx */
1166
1167#define	RAISE(level) \
1168	movl	$/**/level, %edi;	/* new priority */		\
1169	jmp	splr			/* redirect to splr */
1170
1171#elif defined(__i386)
1172
1173#define	SETPRI(level) \
1174	pushl	$/**/level;	/* new priority */			\
1175	call	do_splx;	/* invoke common splx code */		\
1176	addl	$4, %esp;	/* unstack arg */			\
1177	ret
1178
1179#define	RAISE(level) \
1180	pushl	$/**/level;	/* new priority */			\
1181	call	splr;		/* invoke common splr code */		\
1182	addl	$4, %esp;	/* unstack args */			\
1183	ret
1184
1185#endif	/* __i386 */
1186
1187	/* locks out all interrupts, including memory errors */
1188	ENTRY(spl8)
1189	SETPRI(15)
1190	SET_SIZE(spl8)
1191
1192	/* just below the level that profiling runs */
1193	ENTRY(spl7)
1194	RAISE(13)
1195	SET_SIZE(spl7)
1196
1197	/* sun specific - highest priority onboard serial i/o asy ports */
1198	ENTRY(splzs)
1199	SETPRI(12)	/* Can't be a RAISE, as it's used to lower us */
1200	SET_SIZE(splzs)
1201
1202	ENTRY(splhi)
1203	ALTENTRY(splhigh)
1204	ALTENTRY(spl6)
1205	ALTENTRY(i_ddi_splhigh)
1206
1207	RAISE(DISP_LEVEL)
1208
1209	SET_SIZE(i_ddi_splhigh)
1210	SET_SIZE(spl6)
1211	SET_SIZE(splhigh)
1212	SET_SIZE(splhi)
1213
1214	/* allow all interrupts */
1215	ENTRY(spl0)
1216	SETPRI(0)
1217	SET_SIZE(spl0)
1218
1219
1220	/* splx implementation */
1221	ENTRY(splx)
1222	jmp	do_splx		/* redirect to common splx code */
1223	SET_SIZE(splx)
1224
1225#endif	/* __lint */
1226
1227#if defined(__i386)
1228
1229/*
1230 * Read and write the %gs register
1231 */
1232
1233#if defined(__lint)
1234
1235/*ARGSUSED*/
1236uint16_t
1237getgs(void)
1238{ return (0); }
1239
1240/*ARGSUSED*/
1241void
1242setgs(uint16_t sel)
1243{}
1244
1245#else	/* __lint */
1246
1247	ENTRY(getgs)
1248	clr	%eax
1249	movw	%gs, %ax
1250	ret
1251	SET_SIZE(getgs)
1252
1253	ENTRY(setgs)
1254	movw	4(%esp), %gs
1255	ret
1256	SET_SIZE(setgs)
1257
1258#endif	/* __lint */
1259#endif	/* __i386 */
1260
1261#if defined(__lint)
1262
1263void
1264pc_reset(void)
1265{}
1266
1267void
1268efi_reset(void)
1269{}
1270
1271#else	/* __lint */
1272
1273	ENTRY(wait_500ms)
1274#if defined(__amd64)
1275	pushq	%rbx
1276#elif defined(__i386)
1277	push	%ebx
1278#endif
1279	movl	$50000, %ebx
12801:
1281	call	tenmicrosec
1282	decl	%ebx
1283	jnz	1b
1284#if defined(__amd64)
1285	popq	%rbx
1286#elif defined(__i386)
1287	pop	%ebx
1288#endif
1289	ret
1290	SET_SIZE(wait_500ms)
1291
1292#define	RESET_METHOD_KBC	1
1293#define	RESET_METHOD_PORT92	2
1294#define RESET_METHOD_PCI	4
1295
1296	DGDEF3(pc_reset_methods, 4, 8)
1297	.long RESET_METHOD_KBC|RESET_METHOD_PORT92|RESET_METHOD_PCI;
1298
1299	ENTRY(pc_reset)
1300
1301#if defined(__i386)
1302	testl	$RESET_METHOD_KBC, pc_reset_methods
1303#elif defined(__amd64)
1304	testl	$RESET_METHOD_KBC, pc_reset_methods(%rip)
1305#endif
1306	jz	1f
1307
1308	/
1309	/ Try the classic keyboard controller-triggered reset.
1310	/
1311	movw	$0x64, %dx
1312	movb	$0xfe, %al
1313	outb	(%dx)
1314
1315	/ Wait up to 500 milliseconds here for the keyboard controller
1316	/ to pull the reset line.  On some systems where the keyboard
1317	/ controller is slow to pull the reset line, the next reset method
1318	/ may be executed (which may be bad if those systems hang when the
1319	/ next reset method is used, e.g. Ferrari 3400 (doesn't like port 92),
1320	/ and Ferrari 4000 (doesn't like the cf9 reset method))
1321
1322	call	wait_500ms
1323
13241:
1325#if defined(__i386)
1326	testl	$RESET_METHOD_PORT92, pc_reset_methods
1327#elif defined(__amd64)
1328	testl	$RESET_METHOD_PORT92, pc_reset_methods(%rip)
1329#endif
1330	jz	3f
1331
1332	/
1333	/ Try port 0x92 fast reset
1334	/
1335	movw	$0x92, %dx
1336	inb	(%dx)
1337	cmpb	$0xff, %al	/ If port's not there, we should get back 0xFF
1338	je	1f
1339	testb	$1, %al		/ If bit 0
1340	jz	2f		/ is clear, jump to perform the reset
1341	andb	$0xfe, %al	/ otherwise,
1342	outb	(%dx)		/ clear bit 0 first, then
13432:
1344	orb	$1, %al		/ Set bit 0
1345	outb	(%dx)		/ and reset the system
13461:
1347
1348	call	wait_500ms
1349
13503:
1351#if defined(__i386)
1352	testl	$RESET_METHOD_PCI, pc_reset_methods
1353#elif defined(__amd64)
1354	testl	$RESET_METHOD_PCI, pc_reset_methods(%rip)
1355#endif
1356	jz	4f
1357
1358	/ Try the PCI (soft) reset vector (should work on all modern systems,
1359	/ but has been shown to cause problems on 450NX systems, and some newer
1360	/ systems (e.g. ATI IXP400-equipped systems))
1361	/ When resetting via this method, 2 writes are required.  The first
1362	/ targets bit 1 (0=hard reset without power cycle, 1=hard reset with
1363	/ power cycle).
1364	/ The reset occurs on the second write, during bit 2's transition from
1365	/ 0->1.
1366	movw	$0xcf9, %dx
1367	movb	$0x2, %al	/ Reset mode = hard, no power cycle
1368	outb	(%dx)
1369	movb	$0x6, %al
1370	outb	(%dx)
1371
1372	call	wait_500ms
1373
13744:
1375	/
1376	/ port 0xcf9 failed also.  Last-ditch effort is to
1377	/ triple-fault the CPU.
1378	/ Also, use triple fault for EFI firmware
1379	/
1380	ENTRY(efi_reset)
1381#if defined(__amd64)
1382	pushq	$0x0
1383	pushq	$0x0		/ IDT base of 0, limit of 0 + 2 unused bytes
1384	lidt	(%rsp)
1385#elif defined(__i386)
1386	pushl	$0x0
1387	pushl	$0x0		/ IDT base of 0, limit of 0 + 2 unused bytes
1388	lidt	(%esp)
1389#endif
1390	int	$0x0		/ Trigger interrupt, generate triple-fault
1391
1392	cli
1393	hlt			/ Wait forever
1394	/*NOTREACHED*/
1395	SET_SIZE(efi_reset)
1396	SET_SIZE(pc_reset)
1397
1398#endif	/* __lint */
1399
1400/*
1401 * C callable in and out routines
1402 */
1403
1404#if defined(__lint)
1405
1406/* ARGSUSED */
1407void
1408outl(int port_address, uint32_t val)
1409{}
1410
1411#else	/* __lint */
1412
1413#if defined(__amd64)
1414
1415	ENTRY(outl)
1416	movw	%di, %dx
1417	movl	%esi, %eax
1418	outl	(%dx)
1419	ret
1420	SET_SIZE(outl)
1421
1422#elif defined(__i386)
1423
1424	.set	PORT, 4
1425	.set	VAL, 8
1426
1427	ENTRY(outl)
1428	movw	PORT(%esp), %dx
1429	movl	VAL(%esp), %eax
1430	outl	(%dx)
1431	ret
1432	SET_SIZE(outl)
1433
1434#endif	/* __i386 */
1435#endif	/* __lint */
1436
1437#if defined(__lint)
1438
1439/* ARGSUSED */
1440void
1441outw(int port_address, uint16_t val)
1442{}
1443
1444#else	/* __lint */
1445
1446#if defined(__amd64)
1447
1448	ENTRY(outw)
1449	movw	%di, %dx
1450	movw	%si, %ax
1451	D16 outl (%dx)		/* XX64 why not outw? */
1452	ret
1453	SET_SIZE(outw)
1454
1455#elif defined(__i386)
1456
1457	ENTRY(outw)
1458	movw	PORT(%esp), %dx
1459	movw	VAL(%esp), %ax
1460	D16 outl (%dx)
1461	ret
1462	SET_SIZE(outw)
1463
1464#endif	/* __i386 */
1465#endif	/* __lint */
1466
1467#if defined(__lint)
1468
1469/* ARGSUSED */
1470void
1471outb(int port_address, uint8_t val)
1472{}
1473
1474#else	/* __lint */
1475
1476#if defined(__amd64)
1477
1478	ENTRY(outb)
1479	movw	%di, %dx
1480	movb	%sil, %al
1481	outb	(%dx)
1482	ret
1483	SET_SIZE(outb)
1484
1485#elif defined(__i386)
1486
1487	ENTRY(outb)
1488	movw	PORT(%esp), %dx
1489	movb	VAL(%esp), %al
1490	outb	(%dx)
1491	ret
1492	SET_SIZE(outb)
1493
1494#endif	/* __i386 */
1495#endif	/* __lint */
1496
1497#if defined(__lint)
1498
1499/* ARGSUSED */
1500uint32_t
1501inl(int port_address)
1502{ return (0); }
1503
1504#else	/* __lint */
1505
1506#if defined(__amd64)
1507
1508	ENTRY(inl)
1509	xorl	%eax, %eax
1510	movw	%di, %dx
1511	inl	(%dx)
1512	ret
1513	SET_SIZE(inl)
1514
1515#elif defined(__i386)
1516
1517	ENTRY(inl)
1518	movw	PORT(%esp), %dx
1519	inl	(%dx)
1520	ret
1521	SET_SIZE(inl)
1522
1523#endif	/* __i386 */
1524#endif	/* __lint */
1525
1526#if defined(__lint)
1527
1528/* ARGSUSED */
1529uint16_t
1530inw(int port_address)
1531{ return (0); }
1532
1533#else	/* __lint */
1534
1535#if defined(__amd64)
1536
1537	ENTRY(inw)
1538	xorl	%eax, %eax
1539	movw	%di, %dx
1540	D16 inl	(%dx)
1541	ret
1542	SET_SIZE(inw)
1543
1544#elif defined(__i386)
1545
1546	ENTRY(inw)
1547	subl	%eax, %eax
1548	movw	PORT(%esp), %dx
1549	D16 inl	(%dx)
1550	ret
1551	SET_SIZE(inw)
1552
1553#endif	/* __i386 */
1554#endif	/* __lint */
1555
1556
1557#if defined(__lint)
1558
1559/* ARGSUSED */
1560uint8_t
1561inb(int port_address)
1562{ return (0); }
1563
1564#else	/* __lint */
1565
1566#if defined(__amd64)
1567
1568	ENTRY(inb)
1569	xorl	%eax, %eax
1570	movw	%di, %dx
1571	inb	(%dx)
1572	ret
1573	SET_SIZE(inb)
1574
1575#elif defined(__i386)
1576
1577	ENTRY(inb)
1578	subl    %eax, %eax
1579	movw	PORT(%esp), %dx
1580	inb	(%dx)
1581	ret
1582	SET_SIZE(inb)
1583
1584#endif	/* __i386 */
1585#endif	/* __lint */
1586
1587
1588#if defined(__lint)
1589
1590/* ARGSUSED */
1591void
1592repoutsw(int port, uint16_t *addr, int cnt)
1593{}
1594
1595#else	/* __lint */
1596
1597#if defined(__amd64)
1598
1599	ENTRY(repoutsw)
1600	movl	%edx, %ecx
1601	movw	%di, %dx
1602	rep
1603	  D16 outsl
1604	ret
1605	SET_SIZE(repoutsw)
1606
1607#elif defined(__i386)
1608
1609	/*
1610	 * The arguments and saved registers are on the stack in the
1611	 *  following order:
1612	 *      |  cnt  |  +16
1613	 *      | *addr |  +12
1614	 *      | port  |  +8
1615	 *      |  eip  |  +4
1616	 *      |  esi  |  <-- %esp
1617	 * If additional values are pushed onto the stack, make sure
1618	 * to adjust the following constants accordingly.
1619	 */
1620	.set	PORT, 8
1621	.set	ADDR, 12
1622	.set	COUNT, 16
1623
1624	ENTRY(repoutsw)
1625	pushl	%esi
1626	movl	PORT(%esp), %edx
1627	movl	ADDR(%esp), %esi
1628	movl	COUNT(%esp), %ecx
1629	rep
1630	  D16 outsl
1631	popl	%esi
1632	ret
1633	SET_SIZE(repoutsw)
1634
1635#endif	/* __i386 */
1636#endif	/* __lint */
1637
1638
1639#if defined(__lint)
1640
1641/* ARGSUSED */
1642void
1643repinsw(int port_addr, uint16_t *addr, int cnt)
1644{}
1645
1646#else	/* __lint */
1647
1648#if defined(__amd64)
1649
1650	ENTRY(repinsw)
1651	movl	%edx, %ecx
1652	movw	%di, %dx
1653	rep
1654	  D16 insl
1655	ret
1656	SET_SIZE(repinsw)
1657
1658#elif defined(__i386)
1659
1660	ENTRY(repinsw)
1661	pushl	%edi
1662	movl	PORT(%esp), %edx
1663	movl	ADDR(%esp), %edi
1664	movl	COUNT(%esp), %ecx
1665	rep
1666	  D16 insl
1667	popl	%edi
1668	ret
1669	SET_SIZE(repinsw)
1670
1671#endif	/* __i386 */
1672#endif	/* __lint */
1673
1674
1675#if defined(__lint)
1676
1677/* ARGSUSED */
1678void
1679repinsb(int port, uint8_t *addr, int count)
1680{}
1681
1682#else	/* __lint */
1683
1684#if defined(__amd64)
1685
1686	ENTRY(repinsb)
1687	movl	%edx, %ecx
1688	movw	%di, %dx
1689	movq	%rsi, %rdi
1690	rep
1691	  insb
1692	ret
1693	SET_SIZE(repinsb)
1694
1695#elif defined(__i386)
1696
1697	/*
1698	 * The arguments and saved registers are on the stack in the
1699	 *  following order:
1700	 *      |  cnt  |  +16
1701	 *      | *addr |  +12
1702	 *      | port  |  +8
1703	 *      |  eip  |  +4
1704	 *      |  esi  |  <-- %esp
1705	 * If additional values are pushed onto the stack, make sure
1706	 * to adjust the following constants accordingly.
1707	 */
1708	.set	IO_PORT, 8
1709	.set	IO_ADDR, 12
1710	.set	IO_COUNT, 16
1711
1712	ENTRY(repinsb)
1713	pushl	%edi
1714	movl	IO_ADDR(%esp), %edi
1715	movl	IO_COUNT(%esp), %ecx
1716	movl	IO_PORT(%esp), %edx
1717	rep
1718	  insb
1719	popl	%edi
1720	ret
1721	SET_SIZE(repinsb)
1722
1723#endif	/* __i386 */
1724#endif	/* __lint */
1725
1726
1727/*
1728 * Input a stream of 32-bit words.
1729 * NOTE: count is a DWORD count.
1730 */
1731#if defined(__lint)
1732
1733/* ARGSUSED */
1734void
1735repinsd(int port, uint32_t *addr, int count)
1736{}
1737
1738#else	/* __lint */
1739
1740#if defined(__amd64)
1741
1742	ENTRY(repinsd)
1743	movl	%edx, %ecx
1744	movw	%di, %dx
1745	movq	%rsi, %rdi
1746	rep
1747	  insl
1748	ret
1749	SET_SIZE(repinsd)
1750
1751#elif defined(__i386)
1752
1753	ENTRY(repinsd)
1754	pushl	%edi
1755	movl	IO_ADDR(%esp), %edi
1756	movl	IO_COUNT(%esp), %ecx
1757	movl	IO_PORT(%esp), %edx
1758	rep
1759	  insl
1760	popl	%edi
1761	ret
1762	SET_SIZE(repinsd)
1763
1764#endif	/* __i386 */
1765#endif	/* __lint */
1766
1767/*
1768 * Output a stream of bytes
1769 * NOTE: count is a byte count
1770 */
1771#if defined(__lint)
1772
1773/* ARGSUSED */
1774void
1775repoutsb(int port, uint8_t *addr, int count)
1776{}
1777
1778#else	/* __lint */
1779
1780#if defined(__amd64)
1781
1782	ENTRY(repoutsb)
1783	movl	%edx, %ecx
1784	movw	%di, %dx
1785	rep
1786	  outsb
1787	ret
1788	SET_SIZE(repoutsb)
1789
1790#elif defined(__i386)
1791
1792	ENTRY(repoutsb)
1793	pushl	%esi
1794	movl	IO_ADDR(%esp), %esi
1795	movl	IO_COUNT(%esp), %ecx
1796	movl	IO_PORT(%esp), %edx
1797	rep
1798	  outsb
1799	popl	%esi
1800	ret
1801	SET_SIZE(repoutsb)
1802
1803#endif	/* __i386 */
1804#endif	/* __lint */
1805
1806/*
1807 * Output a stream of 32-bit words
1808 * NOTE: count is a DWORD count
1809 */
1810#if defined(__lint)
1811
1812/* ARGSUSED */
1813void
1814repoutsd(int port, uint32_t *addr, int count)
1815{}
1816
1817#else	/* __lint */
1818
1819#if defined(__amd64)
1820
1821	ENTRY(repoutsd)
1822	movl	%edx, %ecx
1823	movw	%di, %dx
1824	rep
1825	  outsl
1826	ret
1827	SET_SIZE(repoutsd)
1828
1829#elif defined(__i386)
1830
1831	ENTRY(repoutsd)
1832	pushl	%esi
1833	movl	IO_ADDR(%esp), %esi
1834	movl	IO_COUNT(%esp), %ecx
1835	movl	IO_PORT(%esp), %edx
1836	rep
1837	  outsl
1838	popl	%esi
1839	ret
1840	SET_SIZE(repoutsd)
1841
1842#endif	/* __i386 */
1843#endif	/* __lint */
1844
1845/*
1846 * void int3(void)
1847 * void int18(void)
1848 * void int20(void)
1849 * void int_cmci(void)
1850 */
1851
1852#if defined(__lint)
1853
1854void
1855int3(void)
1856{}
1857
1858void
1859int18(void)
1860{}
1861
1862void
1863int20(void)
1864{}
1865
1866void
1867int_cmci(void)
1868{}
1869
1870#else	/* __lint */
1871
1872	ENTRY(int3)
1873	int	$T_BPTFLT
1874	ret
1875	SET_SIZE(int3)
1876
1877	ENTRY(int18)
1878	int	$T_MCE
1879	ret
1880	SET_SIZE(int18)
1881
1882	ENTRY(int20)
1883	movl	boothowto, %eax
1884	andl	$RB_DEBUG, %eax
1885	jz	1f
1886
1887	int	$T_DBGENTR
18881:
1889	rep;	ret	/* use 2 byte return instruction when branch target */
1890			/* AMD Software Optimization Guide - Section 6.2 */
1891	SET_SIZE(int20)
1892
1893	ENTRY(int_cmci)
1894	int	$T_ENOEXTFLT
1895	ret
1896	SET_SIZE(int_cmci)
1897
1898#endif	/* __lint */
1899
1900#if defined(__lint)
1901
1902/* ARGSUSED */
1903int
1904scanc(size_t size, uchar_t *cp, uchar_t *table, uchar_t mask)
1905{ return (0); }
1906
1907#else	/* __lint */
1908
1909#if defined(__amd64)
1910
1911	ENTRY(scanc)
1912					/* rdi == size */
1913					/* rsi == cp */
1914					/* rdx == table */
1915					/* rcx == mask */
1916	addq	%rsi, %rdi		/* end = &cp[size] */
1917.scanloop:
1918	cmpq	%rdi, %rsi		/* while (cp < end */
1919	jnb	.scandone
1920	movzbq	(%rsi), %r8		/* %r8 = *cp */
1921	incq	%rsi			/* cp++ */
1922	testb	%cl, (%r8, %rdx)
1923	jz	.scanloop		/*  && (table[*cp] & mask) == 0) */
1924	decq	%rsi			/* (fix post-increment) */
1925.scandone:
1926	movl	%edi, %eax
1927	subl	%esi, %eax		/* return (end - cp) */
1928	ret
1929	SET_SIZE(scanc)
1930
1931#elif defined(__i386)
1932
1933	ENTRY(scanc)
1934	pushl	%edi
1935	pushl	%esi
1936	movb	24(%esp), %cl		/* mask = %cl */
1937	movl	16(%esp), %esi		/* cp = %esi */
1938	movl	20(%esp), %edx		/* table = %edx */
1939	movl	%esi, %edi
1940	addl	12(%esp), %edi		/* end = &cp[size]; */
1941.scanloop:
1942	cmpl	%edi, %esi		/* while (cp < end */
1943	jnb	.scandone
1944	movzbl	(%esi),  %eax		/* %al = *cp */
1945	incl	%esi			/* cp++ */
1946	movb	(%edx,  %eax), %al	/* %al = table[*cp] */
1947	testb	%al, %cl
1948	jz	.scanloop		/*   && (table[*cp] & mask) == 0) */
1949	dec	%esi			/* post-incremented */
1950.scandone:
1951	movl	%edi, %eax
1952	subl	%esi, %eax		/* return (end - cp) */
1953	popl	%esi
1954	popl	%edi
1955	ret
1956	SET_SIZE(scanc)
1957
1958#endif	/* __i386 */
1959#endif	/* __lint */
1960
1961/*
1962 * Replacement functions for ones that are normally inlined.
1963 * In addition to the copy in i86.il, they are defined here just in case.
1964 */
1965
1966#if defined(__lint)
1967
1968ulong_t
1969intr_clear(void)
1970{ return (0); }
1971
1972ulong_t
1973clear_int_flag(void)
1974{ return (0); }
1975
1976#else	/* __lint */
1977
1978#if defined(__amd64)
1979
1980	ENTRY(intr_clear)
1981	ENTRY(clear_int_flag)
1982	pushfq
1983	popq	%rax
1984#if defined(__xpv)
1985	leaq	xpv_panicking, %rdi
1986	movl	(%rdi), %edi
1987	cmpl	$0, %edi
1988	jne	2f
1989	CLIRET(%rdi, %dl)	/* returns event mask in %dl */
1990	/*
1991	 * Synthesize the PS_IE bit from the event mask bit
1992	 */
1993	andq    $_BITNOT(PS_IE), %rax
1994	testb	$1, %dl
1995	jnz	1f
1996	orq	$PS_IE, %rax
19971:
1998	ret
19992:
2000#endif
2001	CLI(%rdi)
2002	ret
2003	SET_SIZE(clear_int_flag)
2004	SET_SIZE(intr_clear)
2005
2006#elif defined(__i386)
2007
2008	ENTRY(intr_clear)
2009	ENTRY(clear_int_flag)
2010	pushfl
2011	popl	%eax
2012#if defined(__xpv)
2013	leal	xpv_panicking, %edx
2014	movl	(%edx), %edx
2015	cmpl	$0, %edx
2016	jne	2f
2017	CLIRET(%edx, %cl)	/* returns event mask in %cl */
2018	/*
2019	 * Synthesize the PS_IE bit from the event mask bit
2020	 */
2021	andl    $_BITNOT(PS_IE), %eax
2022	testb	$1, %cl
2023	jnz	1f
2024	orl	$PS_IE, %eax
20251:
2026	ret
20272:
2028#endif
2029	CLI(%edx)
2030	ret
2031	SET_SIZE(clear_int_flag)
2032	SET_SIZE(intr_clear)
2033
2034#endif	/* __i386 */
2035#endif	/* __lint */
2036
2037#if defined(__lint)
2038
2039struct cpu *
2040curcpup(void)
2041{ return 0; }
2042
2043#else	/* __lint */
2044
2045#if defined(__amd64)
2046
2047	ENTRY(curcpup)
2048	movq	%gs:CPU_SELF, %rax
2049	ret
2050	SET_SIZE(curcpup)
2051
2052#elif defined(__i386)
2053
2054	ENTRY(curcpup)
2055	movl	%gs:CPU_SELF, %eax
2056	ret
2057	SET_SIZE(curcpup)
2058
2059#endif	/* __i386 */
2060#endif	/* __lint */
2061
2062/* htonll(), ntohll(), htonl(), ntohl(), htons(), ntohs()
2063 * These functions reverse the byte order of the input parameter and returns
2064 * the result.  This is to convert the byte order from host byte order
2065 * (little endian) to network byte order (big endian), or vice versa.
2066 */
2067
2068#if defined(__lint)
2069
2070uint64_t
2071htonll(uint64_t i)
2072{ return (i); }
2073
2074uint64_t
2075ntohll(uint64_t i)
2076{ return (i); }
2077
2078uint32_t
2079htonl(uint32_t i)
2080{ return (i); }
2081
2082uint32_t
2083ntohl(uint32_t i)
2084{ return (i); }
2085
2086uint16_t
2087htons(uint16_t i)
2088{ return (i); }
2089
2090uint16_t
2091ntohs(uint16_t i)
2092{ return (i); }
2093
2094#else	/* __lint */
2095
2096#if defined(__amd64)
2097
2098	ENTRY(htonll)
2099	ALTENTRY(ntohll)
2100	movq	%rdi, %rax
2101	bswapq	%rax
2102	ret
2103	SET_SIZE(ntohll)
2104	SET_SIZE(htonll)
2105
2106	/* XX64 there must be shorter sequences for this */
2107	ENTRY(htonl)
2108	ALTENTRY(ntohl)
2109	movl	%edi, %eax
2110	bswap	%eax
2111	ret
2112	SET_SIZE(ntohl)
2113	SET_SIZE(htonl)
2114
2115	/* XX64 there must be better sequences for this */
2116	ENTRY(htons)
2117	ALTENTRY(ntohs)
2118	movl	%edi, %eax
2119	bswap	%eax
2120	shrl	$16, %eax
2121	ret
2122	SET_SIZE(ntohs)
2123	SET_SIZE(htons)
2124
2125#elif defined(__i386)
2126
2127	ENTRY(htonll)
2128	ALTENTRY(ntohll)
2129	movl	4(%esp), %edx
2130	movl	8(%esp), %eax
2131	bswap	%edx
2132	bswap	%eax
2133	ret
2134	SET_SIZE(ntohll)
2135	SET_SIZE(htonll)
2136
2137	ENTRY(htonl)
2138	ALTENTRY(ntohl)
2139	movl	4(%esp), %eax
2140	bswap	%eax
2141	ret
2142	SET_SIZE(ntohl)
2143	SET_SIZE(htonl)
2144
2145	ENTRY(htons)
2146	ALTENTRY(ntohs)
2147	movl	4(%esp), %eax
2148	bswap	%eax
2149	shrl	$16, %eax
2150	ret
2151	SET_SIZE(ntohs)
2152	SET_SIZE(htons)
2153
2154#endif	/* __i386 */
2155#endif	/* __lint */
2156
2157
2158#if defined(__lint)
2159
2160/* ARGSUSED */
2161void
2162intr_restore(ulong_t i)
2163{ return; }
2164
2165/* ARGSUSED */
2166void
2167restore_int_flag(ulong_t i)
2168{ return; }
2169
2170#else	/* __lint */
2171
2172#if defined(__amd64)
2173
2174	ENTRY(intr_restore)
2175	ENTRY(restore_int_flag)
2176	testq	$PS_IE, %rdi
2177	jz	1f
2178#if defined(__xpv)
2179	leaq	xpv_panicking, %rsi
2180	movl	(%rsi), %esi
2181	cmpl	$0, %esi
2182	jne	1f
2183	/*
2184	 * Since we're -really- running unprivileged, our attempt
2185	 * to change the state of the IF bit will be ignored.
2186	 * The virtual IF bit is tweaked by CLI and STI.
2187	 */
2188	IE_TO_EVENT_MASK(%rsi, %rdi)
2189#else
2190	sti
2191#endif
21921:
2193	ret
2194	SET_SIZE(restore_int_flag)
2195	SET_SIZE(intr_restore)
2196
2197#elif defined(__i386)
2198
2199	ENTRY(intr_restore)
2200	ENTRY(restore_int_flag)
2201	testl	$PS_IE, 4(%esp)
2202	jz	1f
2203#if defined(__xpv)
2204	leal	xpv_panicking, %edx
2205	movl	(%edx), %edx
2206	cmpl	$0, %edx
2207	jne	1f
2208	/*
2209	 * Since we're -really- running unprivileged, our attempt
2210	 * to change the state of the IF bit will be ignored.
2211	 * The virtual IF bit is tweaked by CLI and STI.
2212	 */
2213	IE_TO_EVENT_MASK(%edx, 4(%esp))
2214#else
2215	sti
2216#endif
22171:
2218	ret
2219	SET_SIZE(restore_int_flag)
2220	SET_SIZE(intr_restore)
2221
2222#endif	/* __i386 */
2223#endif	/* __lint */
2224
2225#if defined(__lint)
2226
2227void
2228sti(void)
2229{}
2230
2231void
2232cli(void)
2233{}
2234
2235#else	/* __lint */
2236
2237	ENTRY(sti)
2238	STI
2239	ret
2240	SET_SIZE(sti)
2241
2242	ENTRY(cli)
2243#if defined(__amd64)
2244	CLI(%rax)
2245#elif defined(__i386)
2246	CLI(%eax)
2247#endif	/* __i386 */
2248	ret
2249	SET_SIZE(cli)
2250
2251#endif	/* __lint */
2252
2253#if defined(__lint)
2254
2255dtrace_icookie_t
2256dtrace_interrupt_disable(void)
2257{ return (0); }
2258
2259#else   /* __lint */
2260
2261#if defined(__amd64)
2262
2263	ENTRY(dtrace_interrupt_disable)
2264	pushfq
2265	popq	%rax
2266#if defined(__xpv)
2267	leaq	xpv_panicking, %rdi
2268	movl	(%rdi), %edi
2269	cmpl	$0, %edi
2270	jne	.dtrace_interrupt_disable_done
2271	CLIRET(%rdi, %dl)	/* returns event mask in %dl */
2272	/*
2273	 * Synthesize the PS_IE bit from the event mask bit
2274	 */
2275	andq    $_BITNOT(PS_IE), %rax
2276	testb	$1, %dl
2277	jnz	.dtrace_interrupt_disable_done
2278	orq	$PS_IE, %rax
2279#else
2280	CLI(%rdx)
2281#endif
2282.dtrace_interrupt_disable_done:
2283	ret
2284	SET_SIZE(dtrace_interrupt_disable)
2285
2286#elif defined(__i386)
2287
2288	ENTRY(dtrace_interrupt_disable)
2289	pushfl
2290	popl	%eax
2291#if defined(__xpv)
2292	leal	xpv_panicking, %edx
2293	movl	(%edx), %edx
2294	cmpl	$0, %edx
2295	jne	.dtrace_interrupt_disable_done
2296	CLIRET(%edx, %cl)	/* returns event mask in %cl */
2297	/*
2298	 * Synthesize the PS_IE bit from the event mask bit
2299	 */
2300	andl    $_BITNOT(PS_IE), %eax
2301	testb	$1, %cl
2302	jnz	.dtrace_interrupt_disable_done
2303	orl	$PS_IE, %eax
2304#else
2305	CLI(%edx)
2306#endif
2307.dtrace_interrupt_disable_done:
2308	ret
2309	SET_SIZE(dtrace_interrupt_disable)
2310
2311#endif	/* __i386 */
2312#endif	/* __lint */
2313
2314#if defined(__lint)
2315
2316/*ARGSUSED*/
2317void
2318dtrace_interrupt_enable(dtrace_icookie_t cookie)
2319{}
2320
2321#else	/* __lint */
2322
2323#if defined(__amd64)
2324
2325	ENTRY(dtrace_interrupt_enable)
2326	pushq	%rdi
2327	popfq
2328#if defined(__xpv)
2329	leaq	xpv_panicking, %rdx
2330	movl	(%rdx), %edx
2331	cmpl	$0, %edx
2332	jne	.dtrace_interrupt_enable_done
2333	/*
2334	 * Since we're -really- running unprivileged, our attempt
2335	 * to change the state of the IF bit will be ignored. The
2336	 * virtual IF bit is tweaked by CLI and STI.
2337	 */
2338	IE_TO_EVENT_MASK(%rdx, %rdi)
2339#endif
2340.dtrace_interrupt_enable_done:
2341	ret
2342	SET_SIZE(dtrace_interrupt_enable)
2343
2344#elif defined(__i386)
2345
2346	ENTRY(dtrace_interrupt_enable)
2347	movl	4(%esp), %eax
2348	pushl	%eax
2349	popfl
2350#if defined(__xpv)
2351	leal	xpv_panicking, %edx
2352	movl	(%edx), %edx
2353	cmpl	$0, %edx
2354	jne	.dtrace_interrupt_enable_done
2355	/*
2356	 * Since we're -really- running unprivileged, our attempt
2357	 * to change the state of the IF bit will be ignored. The
2358	 * virtual IF bit is tweaked by CLI and STI.
2359	 */
2360	IE_TO_EVENT_MASK(%edx, %eax)
2361#endif
2362.dtrace_interrupt_enable_done:
2363	ret
2364	SET_SIZE(dtrace_interrupt_enable)
2365
2366#endif	/* __i386 */
2367#endif	/* __lint */
2368
2369
2370#if defined(lint)
2371
2372void
2373dtrace_membar_producer(void)
2374{}
2375
2376void
2377dtrace_membar_consumer(void)
2378{}
2379
2380#else	/* __lint */
2381
2382	ENTRY(dtrace_membar_producer)
2383	rep;	ret	/* use 2 byte return instruction when branch target */
2384			/* AMD Software Optimization Guide - Section 6.2 */
2385	SET_SIZE(dtrace_membar_producer)
2386
2387	ENTRY(dtrace_membar_consumer)
2388	rep;	ret	/* use 2 byte return instruction when branch target */
2389			/* AMD Software Optimization Guide - Section 6.2 */
2390	SET_SIZE(dtrace_membar_consumer)
2391
2392#endif	/* __lint */
2393
2394#if defined(__lint)
2395
2396kthread_id_t
2397threadp(void)
2398{ return ((kthread_id_t)0); }
2399
2400#else	/* __lint */
2401
2402#if defined(__amd64)
2403
2404	ENTRY(threadp)
2405	movq	%gs:CPU_THREAD, %rax
2406	ret
2407	SET_SIZE(threadp)
2408
2409#elif defined(__i386)
2410
2411	ENTRY(threadp)
2412	movl	%gs:CPU_THREAD, %eax
2413	ret
2414	SET_SIZE(threadp)
2415
2416#endif	/* __i386 */
2417#endif	/* __lint */
2418
2419/*
2420 *   Checksum routine for Internet Protocol Headers
2421 */
2422
2423#if defined(__lint)
2424
2425/* ARGSUSED */
2426unsigned int
2427ip_ocsum(
2428	ushort_t *address,	/* ptr to 1st message buffer */
2429	int halfword_count,	/* length of data */
2430	unsigned int sum)	/* partial checksum */
2431{
2432	int		i;
2433	unsigned int	psum = 0;	/* partial sum */
2434
2435	for (i = 0; i < halfword_count; i++, address++) {
2436		psum += *address;
2437	}
2438
2439	while ((psum >> 16) != 0) {
2440		psum = (psum & 0xffff) + (psum >> 16);
2441	}
2442
2443	psum += sum;
2444
2445	while ((psum >> 16) != 0) {
2446		psum = (psum & 0xffff) + (psum >> 16);
2447	}
2448
2449	return (psum);
2450}
2451
2452#else	/* __lint */
2453
2454#if defined(__amd64)
2455
2456	ENTRY(ip_ocsum)
2457	pushq	%rbp
2458	movq	%rsp, %rbp
2459#ifdef DEBUG
2460	movq	postbootkernelbase(%rip), %rax
2461	cmpq	%rax, %rdi
2462	jnb	1f
2463	xorl	%eax, %eax
2464	movq	%rdi, %rsi
2465	leaq	.ip_ocsum_panic_msg(%rip), %rdi
2466	call	panic
2467	/*NOTREACHED*/
2468.ip_ocsum_panic_msg:
2469	.string	"ip_ocsum: address 0x%p below kernelbase\n"
24701:
2471#endif
2472	movl	%esi, %ecx	/* halfword_count */
2473	movq	%rdi, %rsi	/* address */
2474				/* partial sum in %edx */
2475	xorl	%eax, %eax
2476	testl	%ecx, %ecx
2477	jz	.ip_ocsum_done
2478	testq	$3, %rsi
2479	jnz	.ip_csum_notaligned
2480.ip_csum_aligned:	/* XX64 opportunities for 8-byte operations? */
2481.next_iter:
2482	/* XX64 opportunities for prefetch? */
2483	/* XX64 compute csum with 64 bit quantities? */
2484	subl	$32, %ecx
2485	jl	.less_than_32
2486
2487	addl	0(%rsi), %edx
2488.only60:
2489	adcl	4(%rsi), %eax
2490.only56:
2491	adcl	8(%rsi), %edx
2492.only52:
2493	adcl	12(%rsi), %eax
2494.only48:
2495	adcl	16(%rsi), %edx
2496.only44:
2497	adcl	20(%rsi), %eax
2498.only40:
2499	adcl	24(%rsi), %edx
2500.only36:
2501	adcl	28(%rsi), %eax
2502.only32:
2503	adcl	32(%rsi), %edx
2504.only28:
2505	adcl	36(%rsi), %eax
2506.only24:
2507	adcl	40(%rsi), %edx
2508.only20:
2509	adcl	44(%rsi), %eax
2510.only16:
2511	adcl	48(%rsi), %edx
2512.only12:
2513	adcl	52(%rsi), %eax
2514.only8:
2515	adcl	56(%rsi), %edx
2516.only4:
2517	adcl	60(%rsi), %eax	/* could be adding -1 and -1 with a carry */
2518.only0:
2519	adcl	$0, %eax	/* could be adding -1 in eax with a carry */
2520	adcl	$0, %eax
2521
2522	addq	$64, %rsi
2523	testl	%ecx, %ecx
2524	jnz	.next_iter
2525
2526.ip_ocsum_done:
2527	addl	%eax, %edx
2528	adcl	$0, %edx
2529	movl	%edx, %eax	/* form a 16 bit checksum by */
2530	shrl	$16, %eax	/* adding two halves of 32 bit checksum */
2531	addw	%dx, %ax
2532	adcw	$0, %ax
2533	andl	$0xffff, %eax
2534	leave
2535	ret
2536
2537.ip_csum_notaligned:
2538	xorl	%edi, %edi
2539	movw	(%rsi), %di
2540	addl	%edi, %edx
2541	adcl	$0, %edx
2542	addq	$2, %rsi
2543	decl	%ecx
2544	jmp	.ip_csum_aligned
2545
2546.less_than_32:
2547	addl	$32, %ecx
2548	testl	$1, %ecx
2549	jz	.size_aligned
2550	andl	$0xfe, %ecx
2551	movzwl	(%rsi, %rcx, 2), %edi
2552	addl	%edi, %edx
2553	adcl	$0, %edx
2554.size_aligned:
2555	movl	%ecx, %edi
2556	shrl	$1, %ecx
2557	shl	$1, %edi
2558	subq	$64, %rdi
2559	addq	%rdi, %rsi
2560	leaq    .ip_ocsum_jmptbl(%rip), %rdi
2561	leaq	(%rdi, %rcx, 8), %rdi
2562	xorl	%ecx, %ecx
2563	clc
2564	jmp 	*(%rdi)
2565
2566	.align	8
2567.ip_ocsum_jmptbl:
2568	.quad	.only0, .only4, .only8, .only12, .only16, .only20
2569	.quad	.only24, .only28, .only32, .only36, .only40, .only44
2570	.quad	.only48, .only52, .only56, .only60
2571	SET_SIZE(ip_ocsum)
2572
2573#elif defined(__i386)
2574
2575	ENTRY(ip_ocsum)
2576	pushl	%ebp
2577	movl	%esp, %ebp
2578	pushl	%ebx
2579	pushl	%esi
2580	pushl	%edi
2581	movl	12(%ebp), %ecx	/* count of half words */
2582	movl	16(%ebp), %edx	/* partial checksum */
2583	movl	8(%ebp), %esi
2584	xorl	%eax, %eax
2585	testl	%ecx, %ecx
2586	jz	.ip_ocsum_done
2587
2588	testl	$3, %esi
2589	jnz	.ip_csum_notaligned
2590.ip_csum_aligned:
2591.next_iter:
2592	subl	$32, %ecx
2593	jl	.less_than_32
2594
2595	addl	0(%esi), %edx
2596.only60:
2597	adcl	4(%esi), %eax
2598.only56:
2599	adcl	8(%esi), %edx
2600.only52:
2601	adcl	12(%esi), %eax
2602.only48:
2603	adcl	16(%esi), %edx
2604.only44:
2605	adcl	20(%esi), %eax
2606.only40:
2607	adcl	24(%esi), %edx
2608.only36:
2609	adcl	28(%esi), %eax
2610.only32:
2611	adcl	32(%esi), %edx
2612.only28:
2613	adcl	36(%esi), %eax
2614.only24:
2615	adcl	40(%esi), %edx
2616.only20:
2617	adcl	44(%esi), %eax
2618.only16:
2619	adcl	48(%esi), %edx
2620.only12:
2621	adcl	52(%esi), %eax
2622.only8:
2623	adcl	56(%esi), %edx
2624.only4:
2625	adcl	60(%esi), %eax	/* We could be adding -1 and -1 with a carry */
2626.only0:
2627	adcl	$0, %eax	/* we could be adding -1 in eax with a carry */
2628	adcl	$0, %eax
2629
2630	addl	$64, %esi
2631	andl	%ecx, %ecx
2632	jnz	.next_iter
2633
2634.ip_ocsum_done:
2635	addl	%eax, %edx
2636	adcl	$0, %edx
2637	movl	%edx, %eax	/* form a 16 bit checksum by */
2638	shrl	$16, %eax	/* adding two halves of 32 bit checksum */
2639	addw	%dx, %ax
2640	adcw	$0, %ax
2641	andl	$0xffff, %eax
2642	popl	%edi		/* restore registers */
2643	popl	%esi
2644	popl	%ebx
2645	leave
2646	ret
2647
2648.ip_csum_notaligned:
2649	xorl	%edi, %edi
2650	movw	(%esi), %di
2651	addl	%edi, %edx
2652	adcl	$0, %edx
2653	addl	$2, %esi
2654	decl	%ecx
2655	jmp	.ip_csum_aligned
2656
2657.less_than_32:
2658	addl	$32, %ecx
2659	testl	$1, %ecx
2660	jz	.size_aligned
2661	andl	$0xfe, %ecx
2662	movzwl	(%esi, %ecx, 2), %edi
2663	addl	%edi, %edx
2664	adcl	$0, %edx
2665.size_aligned:
2666	movl	%ecx, %edi
2667	shrl	$1, %ecx
2668	shl	$1, %edi
2669	subl	$64, %edi
2670	addl	%edi, %esi
2671	movl	$.ip_ocsum_jmptbl, %edi
2672	lea	(%edi, %ecx, 4), %edi
2673	xorl	%ecx, %ecx
2674	clc
2675	jmp 	*(%edi)
2676	SET_SIZE(ip_ocsum)
2677
2678	.data
2679	.align	4
2680
2681.ip_ocsum_jmptbl:
2682	.long	.only0, .only4, .only8, .only12, .only16, .only20
2683	.long	.only24, .only28, .only32, .only36, .only40, .only44
2684	.long	.only48, .only52, .only56, .only60
2685
2686
2687#endif	/* __i386 */
2688#endif	/* __lint */
2689
2690/*
2691 * multiply two long numbers and yield a u_longlong_t result, callable from C.
2692 * Provided to manipulate hrtime_t values.
2693 */
2694#if defined(__lint)
2695
2696/* result = a * b; */
2697
2698/* ARGSUSED */
2699unsigned long long
2700mul32(uint_t a, uint_t b)
2701{ return (0); }
2702
2703#else	/* __lint */
2704
2705#if defined(__amd64)
2706
2707	ENTRY(mul32)
2708	xorl	%edx, %edx	/* XX64 joe, paranoia? */
2709	movl	%edi, %eax
2710	mull	%esi
2711	shlq	$32, %rdx
2712	orq	%rdx, %rax
2713	ret
2714	SET_SIZE(mul32)
2715
2716#elif defined(__i386)
2717
2718	ENTRY(mul32)
2719	movl	8(%esp), %eax
2720	movl	4(%esp), %ecx
2721	mull	%ecx
2722	ret
2723	SET_SIZE(mul32)
2724
2725#endif	/* __i386 */
2726#endif	/* __lint */
2727
2728#if defined(notused)
2729#if defined(__lint)
2730/* ARGSUSED */
2731void
2732load_pte64(uint64_t *pte, uint64_t pte_value)
2733{}
2734#else	/* __lint */
2735	.globl load_pte64
2736load_pte64:
2737	movl	4(%esp), %eax
2738	movl	8(%esp), %ecx
2739	movl	12(%esp), %edx
2740	movl	%edx, 4(%eax)
2741	movl	%ecx, (%eax)
2742	ret
2743#endif	/* __lint */
2744#endif	/* notused */
2745
2746#if defined(__lint)
2747
2748/*ARGSUSED*/
2749void
2750scan_memory(caddr_t addr, size_t size)
2751{}
2752
2753#else	/* __lint */
2754
2755#if defined(__amd64)
2756
2757	ENTRY(scan_memory)
2758	shrq	$3, %rsi	/* convert %rsi from byte to quadword count */
2759	jz	.scanm_done
2760	movq	%rsi, %rcx	/* move count into rep control register */
2761	movq	%rdi, %rsi	/* move addr into lodsq control reg. */
2762	rep lodsq		/* scan the memory range */
2763.scanm_done:
2764	rep;	ret	/* use 2 byte return instruction when branch target */
2765			/* AMD Software Optimization Guide - Section 6.2 */
2766	SET_SIZE(scan_memory)
2767
2768#elif defined(__i386)
2769
2770	ENTRY(scan_memory)
2771	pushl	%ecx
2772	pushl	%esi
2773	movl	16(%esp), %ecx	/* move 2nd arg into rep control register */
2774	shrl	$2, %ecx	/* convert from byte count to word count */
2775	jz	.scanm_done
2776	movl	12(%esp), %esi	/* move 1st arg into lodsw control register */
2777	.byte	0xf3		/* rep prefix.  lame assembler.  sigh. */
2778	lodsl
2779.scanm_done:
2780	popl	%esi
2781	popl	%ecx
2782	ret
2783	SET_SIZE(scan_memory)
2784
2785#endif	/* __i386 */
2786#endif	/* __lint */
2787
2788
2789#if defined(__lint)
2790
2791/*ARGSUSED */
2792int
2793lowbit(ulong_t i)
2794{ return (0); }
2795
2796#else	/* __lint */
2797
2798#if defined(__amd64)
2799
2800	ENTRY(lowbit)
2801	movl	$-1, %eax
2802	bsfq	%rdi, %rax
2803	incl	%eax
2804	ret
2805	SET_SIZE(lowbit)
2806
2807#elif defined(__i386)
2808
2809	ENTRY(lowbit)
2810	movl	$-1, %eax
2811	bsfl	4(%esp), %eax
2812	incl	%eax
2813	ret
2814	SET_SIZE(lowbit)
2815
2816#endif	/* __i386 */
2817#endif	/* __lint */
2818
2819#if defined(__lint)
2820
2821/*ARGSUSED*/
2822int
2823highbit(ulong_t i)
2824{ return (0); }
2825
2826#else	/* __lint */
2827
2828#if defined(__amd64)
2829
2830	ENTRY(highbit)
2831	movl	$-1, %eax
2832	bsrq	%rdi, %rax
2833	incl	%eax
2834	ret
2835	SET_SIZE(highbit)
2836
2837#elif defined(__i386)
2838
2839	ENTRY(highbit)
2840	movl	$-1, %eax
2841	bsrl	4(%esp), %eax
2842	incl	%eax
2843	ret
2844	SET_SIZE(highbit)
2845
2846#endif	/* __i386 */
2847#endif	/* __lint */
2848
2849#if defined(__lint)
2850
2851/*ARGSUSED*/
2852uint64_t
2853rdmsr(uint_t r)
2854{ return (0); }
2855
2856/*ARGSUSED*/
2857void
2858wrmsr(uint_t r, const uint64_t val)
2859{}
2860
2861/*ARGSUSED*/
2862uint64_t
2863xrdmsr(uint_t r)
2864{ return (0); }
2865
2866/*ARGSUSED*/
2867void
2868xwrmsr(uint_t r, const uint64_t val)
2869{}
2870
2871void
2872invalidate_cache(void)
2873{}
2874
2875/*ARGSUSED*/
2876uint64_t
2877get_xcr(uint_t r)
2878{ return (0); }
2879
2880/*ARGSUSED*/
2881void
2882set_xcr(uint_t r, const uint64_t val)
2883{}
2884
2885#else  /* __lint */
2886
2887#define	XMSR_ACCESS_VAL		$0x9c5a203a
2888
2889#if defined(__amd64)
2890
2891	ENTRY(rdmsr)
2892	movl	%edi, %ecx
2893	rdmsr
2894	shlq	$32, %rdx
2895	orq	%rdx, %rax
2896	ret
2897	SET_SIZE(rdmsr)
2898
2899	ENTRY(wrmsr)
2900	movq	%rsi, %rdx
2901	shrq	$32, %rdx
2902	movl	%esi, %eax
2903	movl	%edi, %ecx
2904	wrmsr
2905	ret
2906	SET_SIZE(wrmsr)
2907
2908	ENTRY(xrdmsr)
2909	pushq	%rbp
2910	movq	%rsp, %rbp
2911	movl	%edi, %ecx
2912	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2913	rdmsr
2914	shlq	$32, %rdx
2915	orq	%rdx, %rax
2916	leave
2917	ret
2918	SET_SIZE(xrdmsr)
2919
2920	ENTRY(xwrmsr)
2921	pushq	%rbp
2922	movq	%rsp, %rbp
2923	movl	%edi, %ecx
2924	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2925	movq	%rsi, %rdx
2926	shrq	$32, %rdx
2927	movl	%esi, %eax
2928	wrmsr
2929	leave
2930	ret
2931	SET_SIZE(xwrmsr)
2932
2933	ENTRY(get_xcr)
2934	movl	%edi, %ecx
2935	#xgetbv
2936	.byte	0x0f,0x01,0xd0
2937	shlq	$32, %rdx
2938	orq	%rdx, %rax
2939	ret
2940	SET_SIZE(get_xcr)
2941
2942	ENTRY(set_xcr)
2943	movq	%rsi, %rdx
2944	shrq	$32, %rdx
2945	movl	%esi, %eax
2946	movl	%edi, %ecx
2947	#xsetbv
2948	.byte	0x0f,0x01,0xd1
2949	ret
2950	SET_SIZE(set_xcr)
2951
2952#elif defined(__i386)
2953
2954	ENTRY(rdmsr)
2955	movl	4(%esp), %ecx
2956	rdmsr
2957	ret
2958	SET_SIZE(rdmsr)
2959
2960	ENTRY(wrmsr)
2961	movl	4(%esp), %ecx
2962	movl	8(%esp), %eax
2963	movl	12(%esp), %edx
2964	wrmsr
2965	ret
2966	SET_SIZE(wrmsr)
2967
2968	ENTRY(xrdmsr)
2969	pushl	%ebp
2970	movl	%esp, %ebp
2971	movl	8(%esp), %ecx
2972	pushl	%edi
2973	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2974	rdmsr
2975	popl	%edi
2976	leave
2977	ret
2978	SET_SIZE(xrdmsr)
2979
2980	ENTRY(xwrmsr)
2981	pushl	%ebp
2982	movl	%esp, %ebp
2983	movl	8(%esp), %ecx
2984	movl	12(%esp), %eax
2985	movl	16(%esp), %edx
2986	pushl	%edi
2987	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2988	wrmsr
2989	popl	%edi
2990	leave
2991	ret
2992	SET_SIZE(xwrmsr)
2993
2994	ENTRY(get_xcr)
2995	movl	4(%esp), %ecx
2996	#xgetbv
2997	.byte	0x0f,0x01,0xd0
2998	ret
2999	SET_SIZE(get_xcr)
3000
3001	ENTRY(set_xcr)
3002	movl	4(%esp), %ecx
3003	movl	8(%esp), %eax
3004	movl	12(%esp), %edx
3005	#xsetbv
3006	.byte	0x0f,0x01,0xd1
3007	ret
3008	SET_SIZE(set_xcr)
3009
3010#endif	/* __i386 */
3011
3012	ENTRY(invalidate_cache)
3013	wbinvd
3014	ret
3015	SET_SIZE(invalidate_cache)
3016
3017#endif	/* __lint */
3018
3019#if defined(__lint)
3020
3021/*ARGSUSED*/
3022void
3023getcregs(struct cregs *crp)
3024{}
3025
3026#else	/* __lint */
3027
3028#if defined(__amd64)
3029
3030	ENTRY_NP(getcregs)
3031#if defined(__xpv)
3032	/*
3033	 * Only a few of the hardware control registers or descriptor tables
3034	 * are directly accessible to us, so just zero the structure.
3035	 *
3036	 * XXPV	Perhaps it would be helpful for the hypervisor to return
3037	 *	virtualized versions of these for post-mortem use.
3038	 *	(Need to reevaluate - perhaps it already does!)
3039	 */
3040	pushq	%rdi		/* save *crp */
3041	movq	$CREGSZ, %rsi
3042	call	bzero
3043	popq	%rdi
3044
3045	/*
3046	 * Dump what limited information we can
3047	 */
3048	movq	%cr0, %rax
3049	movq	%rax, CREG_CR0(%rdi)	/* cr0 */
3050	movq	%cr2, %rax
3051	movq	%rax, CREG_CR2(%rdi)	/* cr2 */
3052	movq	%cr3, %rax
3053	movq	%rax, CREG_CR3(%rdi)	/* cr3 */
3054	movq	%cr4, %rax
3055	movq	%rax, CREG_CR4(%rdi)	/* cr4 */
3056
3057#else	/* __xpv */
3058
3059#define	GETMSR(r, off, d)	\
3060	movl	$r, %ecx;	\
3061	rdmsr;			\
3062	movl	%eax, off(d);	\
3063	movl	%edx, off+4(d)
3064
3065	xorl	%eax, %eax
3066	movq	%rax, CREG_GDT+8(%rdi)
3067	sgdt	CREG_GDT(%rdi)		/* 10 bytes */
3068	movq	%rax, CREG_IDT+8(%rdi)
3069	sidt	CREG_IDT(%rdi)		/* 10 bytes */
3070	movq	%rax, CREG_LDT(%rdi)
3071	sldt	CREG_LDT(%rdi)		/* 2 bytes */
3072	movq	%rax, CREG_TASKR(%rdi)
3073	str	CREG_TASKR(%rdi)	/* 2 bytes */
3074	movq	%cr0, %rax
3075	movq	%rax, CREG_CR0(%rdi)	/* cr0 */
3076	movq	%cr2, %rax
3077	movq	%rax, CREG_CR2(%rdi)	/* cr2 */
3078	movq	%cr3, %rax
3079	movq	%rax, CREG_CR3(%rdi)	/* cr3 */
3080	movq	%cr4, %rax
3081	movq	%rax, CREG_CR4(%rdi)	/* cr4 */
3082	movq	%cr8, %rax
3083	movq	%rax, CREG_CR8(%rdi)	/* cr8 */
3084	GETMSR(MSR_AMD_KGSBASE, CREG_KGSBASE, %rdi)
3085	GETMSR(MSR_AMD_EFER, CREG_EFER, %rdi)
3086#endif	/* __xpv */
3087	ret
3088	SET_SIZE(getcregs)
3089
3090#undef GETMSR
3091
3092#elif defined(__i386)
3093
3094	ENTRY_NP(getcregs)
3095#if defined(__xpv)
3096	/*
3097	 * Only a few of the hardware control registers or descriptor tables
3098	 * are directly accessible to us, so just zero the structure.
3099	 *
3100	 * XXPV	Perhaps it would be helpful for the hypervisor to return
3101	 *	virtualized versions of these for post-mortem use.
3102	 *	(Need to reevaluate - perhaps it already does!)
3103	 */
3104	movl	4(%esp), %edx
3105	pushl	$CREGSZ
3106	pushl	%edx
3107	call	bzero
3108	addl	$8, %esp
3109	movl	4(%esp), %edx
3110
3111	/*
3112	 * Dump what limited information we can
3113	 */
3114	movl	%cr0, %eax
3115	movl	%eax, CREG_CR0(%edx)	/* cr0 */
3116	movl	%cr2, %eax
3117	movl	%eax, CREG_CR2(%edx)	/* cr2 */
3118	movl	%cr3, %eax
3119	movl	%eax, CREG_CR3(%edx)	/* cr3 */
3120	movl	%cr4, %eax
3121	movl	%eax, CREG_CR4(%edx)	/* cr4 */
3122
3123#else	/* __xpv */
3124
3125	movl	4(%esp), %edx
3126	movw	$0, CREG_GDT+6(%edx)
3127	movw	$0, CREG_IDT+6(%edx)
3128	sgdt	CREG_GDT(%edx)		/* gdt */
3129	sidt	CREG_IDT(%edx)		/* idt */
3130	sldt	CREG_LDT(%edx)		/* ldt */
3131	str	CREG_TASKR(%edx)	/* task */
3132	movl	%cr0, %eax
3133	movl	%eax, CREG_CR0(%edx)	/* cr0 */
3134	movl	%cr2, %eax
3135	movl	%eax, CREG_CR2(%edx)	/* cr2 */
3136	movl	%cr3, %eax
3137	movl	%eax, CREG_CR3(%edx)	/* cr3 */
3138	bt	$X86FSET_LARGEPAGE, x86_featureset
3139	jnc	.nocr4
3140	movl	%cr4, %eax
3141	movl	%eax, CREG_CR4(%edx)	/* cr4 */
3142	jmp	.skip
3143.nocr4:
3144	movl	$0, CREG_CR4(%edx)
3145.skip:
3146#endif
3147	ret
3148	SET_SIZE(getcregs)
3149
3150#endif	/* __i386 */
3151#endif	/* __lint */
3152
3153
3154/*
3155 * A panic trigger is a word which is updated atomically and can only be set
3156 * once.  We atomically store 0xDEFACEDD and load the old value.  If the
3157 * previous value was 0, we succeed and return 1; otherwise return 0.
3158 * This allows a partially corrupt trigger to still trigger correctly.  DTrace
3159 * has its own version of this function to allow it to panic correctly from
3160 * probe context.
3161 */
3162#if defined(__lint)
3163
3164/*ARGSUSED*/
3165int
3166panic_trigger(int *tp)
3167{ return (0); }
3168
3169/*ARGSUSED*/
3170int
3171dtrace_panic_trigger(int *tp)
3172{ return (0); }
3173
3174#else	/* __lint */
3175
3176#if defined(__amd64)
3177
3178	ENTRY_NP(panic_trigger)
3179	xorl	%eax, %eax
3180	movl	$0xdefacedd, %edx
3181	lock
3182	  xchgl	%edx, (%rdi)
3183	cmpl	$0, %edx
3184	je	0f
3185	movl	$0, %eax
3186	ret
31870:	movl	$1, %eax
3188	ret
3189	SET_SIZE(panic_trigger)
3190
3191	ENTRY_NP(dtrace_panic_trigger)
3192	xorl	%eax, %eax
3193	movl	$0xdefacedd, %edx
3194	lock
3195	  xchgl	%edx, (%rdi)
3196	cmpl	$0, %edx
3197	je	0f
3198	movl	$0, %eax
3199	ret
32000:	movl	$1, %eax
3201	ret
3202	SET_SIZE(dtrace_panic_trigger)
3203
3204#elif defined(__i386)
3205
3206	ENTRY_NP(panic_trigger)
3207	movl	4(%esp), %edx		/ %edx = address of trigger
3208	movl	$0xdefacedd, %eax	/ %eax = 0xdefacedd
3209	lock				/ assert lock
3210	xchgl %eax, (%edx)		/ exchange %eax and the trigger
3211	cmpl	$0, %eax		/ if (%eax == 0x0)
3212	je	0f			/   return (1);
3213	movl	$0, %eax		/ else
3214	ret				/   return (0);
32150:	movl	$1, %eax
3216	ret
3217	SET_SIZE(panic_trigger)
3218
3219	ENTRY_NP(dtrace_panic_trigger)
3220	movl	4(%esp), %edx		/ %edx = address of trigger
3221	movl	$0xdefacedd, %eax	/ %eax = 0xdefacedd
3222	lock				/ assert lock
3223	xchgl %eax, (%edx)		/ exchange %eax and the trigger
3224	cmpl	$0, %eax		/ if (%eax == 0x0)
3225	je	0f			/   return (1);
3226	movl	$0, %eax		/ else
3227	ret				/   return (0);
32280:	movl	$1, %eax
3229	ret
3230	SET_SIZE(dtrace_panic_trigger)
3231
3232#endif	/* __i386 */
3233#endif	/* __lint */
3234
3235/*
3236 * The panic() and cmn_err() functions invoke vpanic() as a common entry point
3237 * into the panic code implemented in panicsys().  vpanic() is responsible
3238 * for passing through the format string and arguments, and constructing a
3239 * regs structure on the stack into which it saves the current register
3240 * values.  If we are not dying due to a fatal trap, these registers will
3241 * then be preserved in panicbuf as the current processor state.  Before
3242 * invoking panicsys(), vpanic() activates the first panic trigger (see
3243 * common/os/panic.c) and switches to the panic_stack if successful.  Note that
3244 * DTrace takes a slightly different panic path if it must panic from probe
3245 * context.  Instead of calling panic, it calls into dtrace_vpanic(), which
3246 * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
3247 * branches back into vpanic().
3248 */
3249#if defined(__lint)
3250
3251/*ARGSUSED*/
3252void
3253vpanic(const char *format, va_list alist)
3254{}
3255
3256/*ARGSUSED*/
3257void
3258dtrace_vpanic(const char *format, va_list alist)
3259{}
3260
3261#else	/* __lint */
3262
3263#if defined(__amd64)
3264
3265	ENTRY_NP(vpanic)			/* Initial stack layout: */
3266
3267	pushq	%rbp				/* | %rip | 	0x60	*/
3268	movq	%rsp, %rbp			/* | %rbp |	0x58	*/
3269	pushfq					/* | rfl  |	0x50	*/
3270	pushq	%r11				/* | %r11 |	0x48	*/
3271	pushq	%r10				/* | %r10 |	0x40	*/
3272	pushq	%rbx				/* | %rbx |	0x38	*/
3273	pushq	%rax				/* | %rax |	0x30	*/
3274	pushq	%r9				/* | %r9  |	0x28	*/
3275	pushq	%r8				/* | %r8  |	0x20	*/
3276	pushq	%rcx				/* | %rcx |	0x18	*/
3277	pushq	%rdx				/* | %rdx |	0x10	*/
3278	pushq	%rsi				/* | %rsi |	0x8 alist */
3279	pushq	%rdi				/* | %rdi |	0x0 format */
3280
3281	movq	%rsp, %rbx			/* %rbx = current %rsp */
3282
3283	leaq	panic_quiesce(%rip), %rdi	/* %rdi = &panic_quiesce */
3284	call	panic_trigger			/* %eax = panic_trigger() */
3285
3286vpanic_common:
3287	/*
3288	 * The panic_trigger result is in %eax from the call above, and
3289	 * dtrace_panic places it in %eax before branching here.
3290	 * The rdmsr instructions that follow below will clobber %eax so
3291	 * we stash the panic_trigger result in %r11d.
3292	 */
3293	movl	%eax, %r11d
3294	cmpl	$0, %r11d
3295	je	0f
3296
3297	/*
3298	 * If panic_trigger() was successful, we are the first to initiate a
3299	 * panic: we now switch to the reserved panic_stack before continuing.
3300	 */
3301	leaq	panic_stack(%rip), %rsp
3302	addq	$PANICSTKSIZE, %rsp
33030:	subq	$REGSIZE, %rsp
3304	/*
3305	 * Now that we've got everything set up, store the register values as
3306	 * they were when we entered vpanic() to the designated location in
3307	 * the regs structure we allocated on the stack.
3308	 */
3309	movq	0x0(%rbx), %rcx
3310	movq	%rcx, REGOFF_RDI(%rsp)
3311	movq	0x8(%rbx), %rcx
3312	movq	%rcx, REGOFF_RSI(%rsp)
3313	movq	0x10(%rbx), %rcx
3314	movq	%rcx, REGOFF_RDX(%rsp)
3315	movq	0x18(%rbx), %rcx
3316	movq	%rcx, REGOFF_RCX(%rsp)
3317	movq	0x20(%rbx), %rcx
3318
3319	movq	%rcx, REGOFF_R8(%rsp)
3320	movq	0x28(%rbx), %rcx
3321	movq	%rcx, REGOFF_R9(%rsp)
3322	movq	0x30(%rbx), %rcx
3323	movq	%rcx, REGOFF_RAX(%rsp)
3324	movq	0x38(%rbx), %rcx
3325	movq	%rcx, REGOFF_RBX(%rsp)
3326	movq	0x58(%rbx), %rcx
3327
3328	movq	%rcx, REGOFF_RBP(%rsp)
3329	movq	0x40(%rbx), %rcx
3330	movq	%rcx, REGOFF_R10(%rsp)
3331	movq	0x48(%rbx), %rcx
3332	movq	%rcx, REGOFF_R11(%rsp)
3333	movq	%r12, REGOFF_R12(%rsp)
3334
3335	movq	%r13, REGOFF_R13(%rsp)
3336	movq	%r14, REGOFF_R14(%rsp)
3337	movq	%r15, REGOFF_R15(%rsp)
3338
3339	xorl	%ecx, %ecx
3340	movw	%ds, %cx
3341	movq	%rcx, REGOFF_DS(%rsp)
3342	movw	%es, %cx
3343	movq	%rcx, REGOFF_ES(%rsp)
3344	movw	%fs, %cx
3345	movq	%rcx, REGOFF_FS(%rsp)
3346	movw	%gs, %cx
3347	movq	%rcx, REGOFF_GS(%rsp)
3348
3349	movq	$0, REGOFF_TRAPNO(%rsp)
3350
3351	movq	$0, REGOFF_ERR(%rsp)
3352	leaq	vpanic(%rip), %rcx
3353	movq	%rcx, REGOFF_RIP(%rsp)
3354	movw	%cs, %cx
3355	movzwq	%cx, %rcx
3356	movq	%rcx, REGOFF_CS(%rsp)
3357	movq	0x50(%rbx), %rcx
3358	movq	%rcx, REGOFF_RFL(%rsp)
3359	movq	%rbx, %rcx
3360	addq	$0x60, %rcx
3361	movq	%rcx, REGOFF_RSP(%rsp)
3362	movw	%ss, %cx
3363	movzwq	%cx, %rcx
3364	movq	%rcx, REGOFF_SS(%rsp)
3365
3366	/*
3367	 * panicsys(format, alist, rp, on_panic_stack)
3368	 */
3369	movq	REGOFF_RDI(%rsp), %rdi		/* format */
3370	movq	REGOFF_RSI(%rsp), %rsi		/* alist */
3371	movq	%rsp, %rdx			/* struct regs */
3372	movl	%r11d, %ecx			/* on_panic_stack */
3373	call	panicsys
3374	addq	$REGSIZE, %rsp
3375	popq	%rdi
3376	popq	%rsi
3377	popq	%rdx
3378	popq	%rcx
3379	popq	%r8
3380	popq	%r9
3381	popq	%rax
3382	popq	%rbx
3383	popq	%r10
3384	popq	%r11
3385	popfq
3386	leave
3387	ret
3388	SET_SIZE(vpanic)
3389
3390	ENTRY_NP(dtrace_vpanic)			/* Initial stack layout: */
3391
3392	pushq	%rbp				/* | %rip | 	0x60	*/
3393	movq	%rsp, %rbp			/* | %rbp |	0x58	*/
3394	pushfq					/* | rfl  |	0x50	*/
3395	pushq	%r11				/* | %r11 |	0x48	*/
3396	pushq	%r10				/* | %r10 |	0x40	*/
3397	pushq	%rbx				/* | %rbx |	0x38	*/
3398	pushq	%rax				/* | %rax |	0x30	*/
3399	pushq	%r9				/* | %r9  |	0x28	*/
3400	pushq	%r8				/* | %r8  |	0x20	*/
3401	pushq	%rcx				/* | %rcx |	0x18	*/
3402	pushq	%rdx				/* | %rdx |	0x10	*/
3403	pushq	%rsi				/* | %rsi |	0x8 alist */
3404	pushq	%rdi				/* | %rdi |	0x0 format */
3405
3406	movq	%rsp, %rbx			/* %rbx = current %rsp */
3407
3408	leaq	panic_quiesce(%rip), %rdi	/* %rdi = &panic_quiesce */
3409	call	dtrace_panic_trigger	/* %eax = dtrace_panic_trigger() */
3410	jmp	vpanic_common
3411
3412	SET_SIZE(dtrace_vpanic)
3413
3414#elif defined(__i386)
3415
3416	ENTRY_NP(vpanic)			/ Initial stack layout:
3417
3418	pushl	%ebp				/ | %eip | 20
3419	movl	%esp, %ebp			/ | %ebp | 16
3420	pushl	%eax				/ | %eax | 12
3421	pushl	%ebx				/ | %ebx |  8
3422	pushl	%ecx				/ | %ecx |  4
3423	pushl	%edx				/ | %edx |  0
3424
3425	movl	%esp, %ebx			/ %ebx = current stack pointer
3426
3427	lea	panic_quiesce, %eax		/ %eax = &panic_quiesce
3428	pushl	%eax				/ push &panic_quiesce
3429	call	panic_trigger			/ %eax = panic_trigger()
3430	addl	$4, %esp			/ reset stack pointer
3431
3432vpanic_common:
3433	cmpl	$0, %eax			/ if (%eax == 0)
3434	je	0f				/   goto 0f;
3435
3436	/*
3437	 * If panic_trigger() was successful, we are the first to initiate a
3438	 * panic: we now switch to the reserved panic_stack before continuing.
3439	 */
3440	lea	panic_stack, %esp		/ %esp  = panic_stack
3441	addl	$PANICSTKSIZE, %esp		/ %esp += PANICSTKSIZE
3442
34430:	subl	$REGSIZE, %esp			/ allocate struct regs
3444
3445	/*
3446	 * Now that we've got everything set up, store the register values as
3447	 * they were when we entered vpanic() to the designated location in
3448	 * the regs structure we allocated on the stack.
3449	 */
3450#if !defined(__GNUC_AS__)
3451	movw	%gs, %edx
3452	movl	%edx, REGOFF_GS(%esp)
3453	movw	%fs, %edx
3454	movl	%edx, REGOFF_FS(%esp)
3455	movw	%es, %edx
3456	movl	%edx, REGOFF_ES(%esp)
3457	movw	%ds, %edx
3458	movl	%edx, REGOFF_DS(%esp)
3459#else	/* __GNUC_AS__ */
3460	mov	%gs, %edx
3461	mov	%edx, REGOFF_GS(%esp)
3462	mov	%fs, %edx
3463	mov	%edx, REGOFF_FS(%esp)
3464	mov	%es, %edx
3465	mov	%edx, REGOFF_ES(%esp)
3466	mov	%ds, %edx
3467	mov	%edx, REGOFF_DS(%esp)
3468#endif	/* __GNUC_AS__ */
3469	movl	%edi, REGOFF_EDI(%esp)
3470	movl	%esi, REGOFF_ESI(%esp)
3471	movl	16(%ebx), %ecx
3472	movl	%ecx, REGOFF_EBP(%esp)
3473	movl	%ebx, %ecx
3474	addl	$20, %ecx
3475	movl	%ecx, REGOFF_ESP(%esp)
3476	movl	8(%ebx), %ecx
3477	movl	%ecx, REGOFF_EBX(%esp)
3478	movl	0(%ebx), %ecx
3479	movl	%ecx, REGOFF_EDX(%esp)
3480	movl	4(%ebx), %ecx
3481	movl	%ecx, REGOFF_ECX(%esp)
3482	movl	12(%ebx), %ecx
3483	movl	%ecx, REGOFF_EAX(%esp)
3484	movl	$0, REGOFF_TRAPNO(%esp)
3485	movl	$0, REGOFF_ERR(%esp)
3486	lea	vpanic, %ecx
3487	movl	%ecx, REGOFF_EIP(%esp)
3488#if !defined(__GNUC_AS__)
3489	movw	%cs, %edx
3490#else	/* __GNUC_AS__ */
3491	mov	%cs, %edx
3492#endif	/* __GNUC_AS__ */
3493	movl	%edx, REGOFF_CS(%esp)
3494	pushfl
3495	popl	%ecx
3496#if defined(__xpv)
3497	/*
3498	 * Synthesize the PS_IE bit from the event mask bit
3499	 */
3500	CURTHREAD(%edx)
3501	KPREEMPT_DISABLE(%edx)
3502	EVENT_MASK_TO_IE(%edx, %ecx)
3503	CURTHREAD(%edx)
3504	KPREEMPT_ENABLE_NOKP(%edx)
3505#endif
3506	movl	%ecx, REGOFF_EFL(%esp)
3507	movl	$0, REGOFF_UESP(%esp)
3508#if !defined(__GNUC_AS__)
3509	movw	%ss, %edx
3510#else	/* __GNUC_AS__ */
3511	mov	%ss, %edx
3512#endif	/* __GNUC_AS__ */
3513	movl	%edx, REGOFF_SS(%esp)
3514
3515	movl	%esp, %ecx			/ %ecx = &regs
3516	pushl	%eax				/ push on_panic_stack
3517	pushl	%ecx				/ push &regs
3518	movl	12(%ebp), %ecx			/ %ecx = alist
3519	pushl	%ecx				/ push alist
3520	movl	8(%ebp), %ecx			/ %ecx = format
3521	pushl	%ecx				/ push format
3522	call	panicsys			/ panicsys();
3523	addl	$16, %esp			/ pop arguments
3524
3525	addl	$REGSIZE, %esp
3526	popl	%edx
3527	popl	%ecx
3528	popl	%ebx
3529	popl	%eax
3530	leave
3531	ret
3532	SET_SIZE(vpanic)
3533
3534	ENTRY_NP(dtrace_vpanic)			/ Initial stack layout:
3535
3536	pushl	%ebp				/ | %eip | 20
3537	movl	%esp, %ebp			/ | %ebp | 16
3538	pushl	%eax				/ | %eax | 12
3539	pushl	%ebx				/ | %ebx |  8
3540	pushl	%ecx				/ | %ecx |  4
3541	pushl	%edx				/ | %edx |  0
3542
3543	movl	%esp, %ebx			/ %ebx = current stack pointer
3544
3545	lea	panic_quiesce, %eax		/ %eax = &panic_quiesce
3546	pushl	%eax				/ push &panic_quiesce
3547	call	dtrace_panic_trigger		/ %eax = dtrace_panic_trigger()
3548	addl	$4, %esp			/ reset stack pointer
3549	jmp	vpanic_common			/ jump back to common code
3550
3551	SET_SIZE(dtrace_vpanic)
3552
3553#endif	/* __i386 */
3554#endif	/* __lint */
3555
3556#if defined(__lint)
3557
3558void
3559hres_tick(void)
3560{}
3561
3562int64_t timedelta;
3563hrtime_t hres_last_tick;
3564volatile timestruc_t hrestime;
3565int64_t hrestime_adj;
3566volatile int hres_lock;
3567hrtime_t hrtime_base;
3568
3569#else	/* __lint */
3570
3571	DGDEF3(hrestime, _MUL(2, CLONGSIZE), 8)
3572	.NWORD	0, 0
3573
3574	DGDEF3(hrestime_adj, 8, 8)
3575	.long	0, 0
3576
3577	DGDEF3(hres_last_tick, 8, 8)
3578	.long	0, 0
3579
3580	DGDEF3(timedelta, 8, 8)
3581	.long	0, 0
3582
3583	DGDEF3(hres_lock, 4, 8)
3584	.long	0
3585
3586	/*
3587	 * initialized to a non zero value to make pc_gethrtime()
3588	 * work correctly even before clock is initialized
3589	 */
3590	DGDEF3(hrtime_base, 8, 8)
3591	.long	_MUL(NSEC_PER_CLOCK_TICK, 6), 0
3592
3593	DGDEF3(adj_shift, 4, 4)
3594	.long	ADJ_SHIFT
3595
3596#if defined(__amd64)
3597
3598	ENTRY_NP(hres_tick)
3599	pushq	%rbp
3600	movq	%rsp, %rbp
3601
3602	/*
3603	 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3604	 * hres_last_tick can only be modified while holding CLOCK_LOCK).
3605	 * At worst, performing this now instead of under CLOCK_LOCK may
3606	 * introduce some jitter in pc_gethrestime().
3607	 */
3608	call	*gethrtimef(%rip)
3609	movq	%rax, %r8
3610
3611	leaq	hres_lock(%rip), %rax
3612	movb	$-1, %dl
3613.CL1:
3614	xchgb	%dl, (%rax)
3615	testb	%dl, %dl
3616	jz	.CL3			/* got it */
3617.CL2:
3618	cmpb	$0, (%rax)		/* possible to get lock? */
3619	pause
3620	jne	.CL2
3621	jmp	.CL1			/* yes, try again */
3622.CL3:
3623	/*
3624	 * compute the interval since last time hres_tick was called
3625	 * and adjust hrtime_base and hrestime accordingly
3626	 * hrtime_base is an 8 byte value (in nsec), hrestime is
3627	 * a timestruc_t (sec, nsec)
3628	 */
3629	leaq	hres_last_tick(%rip), %rax
3630	movq	%r8, %r11
3631	subq	(%rax), %r8
3632	addq	%r8, hrtime_base(%rip)	/* add interval to hrtime_base */
3633	addq	%r8, hrestime+8(%rip)	/* add interval to hrestime.tv_nsec */
3634	/*
3635	 * Now that we have CLOCK_LOCK, we can update hres_last_tick
3636	 */
3637	movq	%r11, (%rax)
3638
3639	call	__adj_hrestime
3640
3641	/*
3642	 * release the hres_lock
3643	 */
3644	incl	hres_lock(%rip)
3645	leave
3646	ret
3647	SET_SIZE(hres_tick)
3648
3649#elif defined(__i386)
3650
3651	ENTRY_NP(hres_tick)
3652	pushl	%ebp
3653	movl	%esp, %ebp
3654	pushl	%esi
3655	pushl	%ebx
3656
3657	/*
3658	 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3659	 * hres_last_tick can only be modified while holding CLOCK_LOCK).
3660	 * At worst, performing this now instead of under CLOCK_LOCK may
3661	 * introduce some jitter in pc_gethrestime().
3662	 */
3663	call	*gethrtimef
3664	movl	%eax, %ebx
3665	movl	%edx, %esi
3666
3667	movl	$hres_lock, %eax
3668	movl	$-1, %edx
3669.CL1:
3670	xchgb	%dl, (%eax)
3671	testb	%dl, %dl
3672	jz	.CL3			/ got it
3673.CL2:
3674	cmpb	$0, (%eax)		/ possible to get lock?
3675	pause
3676	jne	.CL2
3677	jmp	.CL1			/ yes, try again
3678.CL3:
3679	/*
3680	 * compute the interval since last time hres_tick was called
3681	 * and adjust hrtime_base and hrestime accordingly
3682	 * hrtime_base is an 8 byte value (in nsec), hrestime is
3683	 * timestruc_t (sec, nsec)
3684	 */
3685
3686	lea	hres_last_tick, %eax
3687
3688	movl	%ebx, %edx
3689	movl	%esi, %ecx
3690
3691	subl 	(%eax), %edx
3692	sbbl 	4(%eax), %ecx
3693
3694	addl	%edx, hrtime_base	/ add interval to hrtime_base
3695	adcl	%ecx, hrtime_base+4
3696
3697	addl 	%edx, hrestime+4	/ add interval to hrestime.tv_nsec
3698
3699	/
3700	/ Now that we have CLOCK_LOCK, we can update hres_last_tick.
3701	/
3702	movl	%ebx, (%eax)
3703	movl	%esi,  4(%eax)
3704
3705	/ get hrestime at this moment. used as base for pc_gethrestime
3706	/
3707	/ Apply adjustment, if any
3708	/
3709	/ #define HRES_ADJ	(NSEC_PER_CLOCK_TICK >> ADJ_SHIFT)
3710	/ (max_hres_adj)
3711	/
3712	/ void
3713	/ adj_hrestime()
3714	/ {
3715	/	long long adj;
3716	/
3717	/	if (hrestime_adj == 0)
3718	/		adj = 0;
3719	/	else if (hrestime_adj > 0) {
3720	/		if (hrestime_adj < HRES_ADJ)
3721	/			adj = hrestime_adj;
3722	/		else
3723	/			adj = HRES_ADJ;
3724	/	}
3725	/	else {
3726	/		if (hrestime_adj < -(HRES_ADJ))
3727	/			adj = -(HRES_ADJ);
3728	/		else
3729	/			adj = hrestime_adj;
3730	/	}
3731	/
3732	/	timedelta -= adj;
3733	/	hrestime_adj = timedelta;
3734	/	hrestime.tv_nsec += adj;
3735	/
3736	/	while (hrestime.tv_nsec >= NANOSEC) {
3737	/		one_sec++;
3738	/		hrestime.tv_sec++;
3739	/		hrestime.tv_nsec -= NANOSEC;
3740	/	}
3741	/ }
3742__adj_hrestime:
3743	movl	hrestime_adj, %esi	/ if (hrestime_adj == 0)
3744	movl	hrestime_adj+4, %edx
3745	andl	%esi, %esi
3746	jne	.CL4			/ no
3747	andl	%edx, %edx
3748	jne	.CL4			/ no
3749	subl	%ecx, %ecx		/ yes, adj = 0;
3750	subl	%edx, %edx
3751	jmp	.CL5
3752.CL4:
3753	subl	%ecx, %ecx
3754	subl	%eax, %eax
3755	subl	%esi, %ecx
3756	sbbl	%edx, %eax
3757	andl	%eax, %eax		/ if (hrestime_adj > 0)
3758	jge	.CL6
3759
3760	/ In the following comments, HRES_ADJ is used, while in the code
3761	/ max_hres_adj is used.
3762	/
3763	/ The test for "hrestime_adj < HRES_ADJ" is complicated because
3764	/ hrestime_adj is 64-bits, while HRES_ADJ is 32-bits.  We rely
3765	/ on the logical equivalence of:
3766	/
3767	/	!(hrestime_adj < HRES_ADJ)
3768	/
3769	/ and the two step sequence:
3770	/
3771	/	(HRES_ADJ - lsw(hrestime_adj)) generates a Borrow/Carry
3772	/
3773	/ which computes whether or not the least significant 32-bits
3774	/ of hrestime_adj is greater than HRES_ADJ, followed by:
3775	/
3776	/	Previous Borrow/Carry + -1 + msw(hrestime_adj) generates a Carry
3777	/
3778	/ which generates a carry whenever step 1 is true or the most
3779	/ significant long of the longlong hrestime_adj is non-zero.
3780
3781	movl	max_hres_adj, %ecx	/ hrestime_adj is positive
3782	subl	%esi, %ecx
3783	movl	%edx, %eax
3784	adcl	$-1, %eax
3785	jnc	.CL7
3786	movl	max_hres_adj, %ecx	/ adj = HRES_ADJ;
3787	subl	%edx, %edx
3788	jmp	.CL5
3789
3790	/ The following computation is similar to the one above.
3791	/
3792	/ The test for "hrestime_adj < -(HRES_ADJ)" is complicated because
3793	/ hrestime_adj is 64-bits, while HRES_ADJ is 32-bits.  We rely
3794	/ on the logical equivalence of:
3795	/
3796	/	(hrestime_adj > -HRES_ADJ)
3797	/
3798	/ and the two step sequence:
3799	/
3800	/	(HRES_ADJ + lsw(hrestime_adj)) generates a Carry
3801	/
3802	/ which means the least significant 32-bits of hrestime_adj is
3803	/ greater than -HRES_ADJ, followed by:
3804	/
3805	/	Previous Carry + 0 + msw(hrestime_adj) generates a Carry
3806	/
3807	/ which generates a carry only when step 1 is true and the most
3808	/ significant long of the longlong hrestime_adj is -1.
3809
3810.CL6:					/ hrestime_adj is negative
3811	movl	%esi, %ecx
3812	addl	max_hres_adj, %ecx
3813	movl	%edx, %eax
3814	adcl	$0, %eax
3815	jc	.CL7
3816	xor	%ecx, %ecx
3817	subl	max_hres_adj, %ecx	/ adj = -(HRES_ADJ);
3818	movl	$-1, %edx
3819	jmp	.CL5
3820.CL7:
3821	movl	%esi, %ecx		/ adj = hrestime_adj;
3822.CL5:
3823	movl	timedelta, %esi
3824	subl	%ecx, %esi
3825	movl	timedelta+4, %eax
3826	sbbl	%edx, %eax
3827	movl	%esi, timedelta
3828	movl	%eax, timedelta+4	/ timedelta -= adj;
3829	movl	%esi, hrestime_adj
3830	movl	%eax, hrestime_adj+4	/ hrestime_adj = timedelta;
3831	addl	hrestime+4, %ecx
3832
3833	movl	%ecx, %eax		/ eax = tv_nsec
38341:
3835	cmpl	$NANOSEC, %eax		/ if ((unsigned long)tv_nsec >= NANOSEC)
3836	jb	.CL8			/ no
3837	incl	one_sec			/ yes,  one_sec++;
3838	incl	hrestime		/ hrestime.tv_sec++;
3839	addl	$-NANOSEC, %eax		/ tv_nsec -= NANOSEC
3840	jmp	1b			/ check for more seconds
3841
3842.CL8:
3843	movl	%eax, hrestime+4	/ store final into hrestime.tv_nsec
3844	incl	hres_lock		/ release the hres_lock
3845
3846	popl	%ebx
3847	popl	%esi
3848	leave
3849	ret
3850	SET_SIZE(hres_tick)
3851
3852#endif	/* __i386 */
3853#endif	/* __lint */
3854
3855/*
3856 * void prefetch_smap_w(void *)
3857 *
3858 * Prefetch ahead within a linear list of smap structures.
3859 * Not implemented for ia32.  Stub for compatibility.
3860 */
3861
3862#if defined(__lint)
3863
3864/*ARGSUSED*/
3865void prefetch_smap_w(void *smp)
3866{}
3867
3868#else	/* __lint */
3869
3870	ENTRY(prefetch_smap_w)
3871	rep;	ret	/* use 2 byte return instruction when branch target */
3872			/* AMD Software Optimization Guide - Section 6.2 */
3873	SET_SIZE(prefetch_smap_w)
3874
3875#endif	/* __lint */
3876
3877/*
3878 * prefetch_page_r(page_t *)
3879 * issue prefetch instructions for a page_t
3880 */
3881#if defined(__lint)
3882
3883/*ARGSUSED*/
3884void
3885prefetch_page_r(void *pp)
3886{}
3887
3888#else	/* __lint */
3889
3890	ENTRY(prefetch_page_r)
3891	rep;	ret	/* use 2 byte return instruction when branch target */
3892			/* AMD Software Optimization Guide - Section 6.2 */
3893	SET_SIZE(prefetch_page_r)
3894
3895#endif	/* __lint */
3896
3897#if defined(__lint)
3898
3899/*ARGSUSED*/
3900int
3901bcmp(const void *s1, const void *s2, size_t count)
3902{ return (0); }
3903
3904#else   /* __lint */
3905
3906#if defined(__amd64)
3907
3908	ENTRY(bcmp)
3909	pushq	%rbp
3910	movq	%rsp, %rbp
3911#ifdef DEBUG
3912	movq	postbootkernelbase(%rip), %r11
3913	cmpq	%r11, %rdi
3914	jb	0f
3915	cmpq	%r11, %rsi
3916	jnb	1f
39170:	leaq	.bcmp_panic_msg(%rip), %rdi
3918	xorl	%eax, %eax
3919	call	panic
39201:
3921#endif	/* DEBUG */
3922	call	memcmp
3923	testl	%eax, %eax
3924	setne	%dl
3925	leave
3926	movzbl	%dl, %eax
3927	ret
3928	SET_SIZE(bcmp)
3929
3930#elif defined(__i386)
3931
3932#define	ARG_S1		8
3933#define	ARG_S2		12
3934#define	ARG_LENGTH	16
3935
3936	ENTRY(bcmp)
3937	pushl	%ebp
3938	movl	%esp, %ebp	/ create new stack frame
3939#ifdef DEBUG
3940	movl    postbootkernelbase, %eax
3941	cmpl    %eax, ARG_S1(%ebp)
3942	jb	0f
3943	cmpl    %eax, ARG_S2(%ebp)
3944	jnb	1f
39450:	pushl   $.bcmp_panic_msg
3946	call    panic
39471:
3948#endif	/* DEBUG */
3949
3950	pushl	%edi		/ save register variable
3951	movl	ARG_S1(%ebp), %eax	/ %eax = address of string 1
3952	movl	ARG_S2(%ebp), %ecx	/ %ecx = address of string 2
3953	cmpl	%eax, %ecx	/ if the same string
3954	je	.equal		/ goto .equal
3955	movl	ARG_LENGTH(%ebp), %edi	/ %edi = length in bytes
3956	cmpl	$4, %edi	/ if %edi < 4
3957	jb	.byte_check	/ goto .byte_check
3958	.align	4
3959.word_loop:
3960	movl	(%ecx), %edx	/ move 1 word from (%ecx) to %edx
3961	leal	-4(%edi), %edi	/ %edi -= 4
3962	cmpl	(%eax), %edx	/ compare 1 word from (%eax) with %edx
3963	jne	.word_not_equal	/ if not equal, goto .word_not_equal
3964	leal	4(%ecx), %ecx	/ %ecx += 4 (next word)
3965	leal	4(%eax), %eax	/ %eax += 4 (next word)
3966	cmpl	$4, %edi	/ if %edi >= 4
3967	jae	.word_loop	/ goto .word_loop
3968.byte_check:
3969	cmpl	$0, %edi	/ if %edi == 0
3970	je	.equal		/ goto .equal
3971	jmp	.byte_loop	/ goto .byte_loop (checks in bytes)
3972.word_not_equal:
3973	leal	4(%edi), %edi	/ %edi += 4 (post-decremented)
3974	.align	4
3975.byte_loop:
3976	movb	(%ecx),	%dl	/ move 1 byte from (%ecx) to %dl
3977	cmpb	%dl, (%eax)	/ compare %dl with 1 byte from (%eax)
3978	jne	.not_equal	/ if not equal, goto .not_equal
3979	incl	%ecx		/ %ecx++ (next byte)
3980	incl	%eax		/ %eax++ (next byte)
3981	decl	%edi		/ %edi--
3982	jnz	.byte_loop	/ if not zero, goto .byte_loop
3983.equal:
3984	xorl	%eax, %eax	/ %eax = 0
3985	popl	%edi		/ restore register variable
3986	leave			/ restore old stack frame
3987	ret			/ return (NULL)
3988	.align	4
3989.not_equal:
3990	movl	$1, %eax	/ return 1
3991	popl	%edi		/ restore register variable
3992	leave			/ restore old stack frame
3993	ret			/ return (NULL)
3994	SET_SIZE(bcmp)
3995
3996#endif	/* __i386 */
3997
3998#ifdef DEBUG
3999	.text
4000.bcmp_panic_msg:
4001	.string "bcmp: arguments below kernelbase"
4002#endif	/* DEBUG */
4003
4004#endif	/* __lint */
4005
4006#if defined(__lint)
4007
4008uint_t
4009bsrw_insn(uint16_t mask)
4010{
4011	uint_t index = sizeof (mask) * NBBY - 1;
4012
4013	while ((mask & (1 << index)) == 0)
4014		index--;
4015	return (index);
4016}
4017
4018#else	/* __lint */
4019
4020#if defined(__amd64)
4021
4022	ENTRY_NP(bsrw_insn)
4023	xorl	%eax, %eax
4024	bsrw	%di, %ax
4025	ret
4026	SET_SIZE(bsrw_insn)
4027
4028#elif defined(__i386)
4029
4030	ENTRY_NP(bsrw_insn)
4031	movw	4(%esp), %cx
4032	xorl	%eax, %eax
4033	bsrw	%cx, %ax
4034	ret
4035	SET_SIZE(bsrw_insn)
4036
4037#endif	/* __i386 */
4038#endif	/* __lint */
4039
4040#if defined(__lint)
4041
4042uint_t
4043atomic_btr32(uint32_t *pending, uint_t pil)
4044{
4045	return (*pending &= ~(1 << pil));
4046}
4047
4048#else	/* __lint */
4049
4050#if defined(__i386)
4051
4052	ENTRY_NP(atomic_btr32)
4053	movl	4(%esp), %ecx
4054	movl	8(%esp), %edx
4055	xorl	%eax, %eax
4056	lock
4057	btrl	%edx, (%ecx)
4058	setc	%al
4059	ret
4060	SET_SIZE(atomic_btr32)
4061
4062#endif	/* __i386 */
4063#endif	/* __lint */
4064
4065#if defined(__lint)
4066
4067/*ARGSUSED*/
4068void
4069switch_sp_and_call(void *newsp, void (*func)(uint_t, uint_t), uint_t arg1,
4070	    uint_t arg2)
4071{}
4072
4073#else	/* __lint */
4074
4075#if defined(__amd64)
4076
4077	ENTRY_NP(switch_sp_and_call)
4078	pushq	%rbp
4079	movq	%rsp, %rbp		/* set up stack frame */
4080	movq	%rdi, %rsp		/* switch stack pointer */
4081	movq	%rdx, %rdi		/* pass func arg 1 */
4082	movq	%rsi, %r11		/* save function to call */
4083	movq	%rcx, %rsi		/* pass func arg 2 */
4084	call	*%r11			/* call function */
4085	leave				/* restore stack */
4086	ret
4087	SET_SIZE(switch_sp_and_call)
4088
4089#elif defined(__i386)
4090
4091	ENTRY_NP(switch_sp_and_call)
4092	pushl	%ebp
4093	mov	%esp, %ebp		/* set up stack frame */
4094	movl	8(%ebp), %esp		/* switch stack pointer */
4095	pushl	20(%ebp)		/* push func arg 2 */
4096	pushl	16(%ebp)		/* push func arg 1 */
4097	call	*12(%ebp)		/* call function */
4098	addl	$8, %esp		/* pop arguments */
4099	leave				/* restore stack */
4100	ret
4101	SET_SIZE(switch_sp_and_call)
4102
4103#endif	/* __i386 */
4104#endif	/* __lint */
4105
4106#if defined(__lint)
4107
4108void
4109kmdb_enter(void)
4110{}
4111
4112#else	/* __lint */
4113
4114#if defined(__amd64)
4115
4116	ENTRY_NP(kmdb_enter)
4117	pushq	%rbp
4118	movq	%rsp, %rbp
4119
4120	/*
4121	 * Save flags, do a 'cli' then return the saved flags
4122	 */
4123	call	intr_clear
4124
4125	int	$T_DBGENTR
4126
4127	/*
4128	 * Restore the saved flags
4129	 */
4130	movq	%rax, %rdi
4131	call	intr_restore
4132
4133	leave
4134	ret
4135	SET_SIZE(kmdb_enter)
4136
4137#elif defined(__i386)
4138
4139	ENTRY_NP(kmdb_enter)
4140	pushl	%ebp
4141	movl	%esp, %ebp
4142
4143	/*
4144	 * Save flags, do a 'cli' then return the saved flags
4145	 */
4146	call	intr_clear
4147
4148	int	$T_DBGENTR
4149
4150	/*
4151	 * Restore the saved flags
4152	 */
4153	pushl	%eax
4154	call	intr_restore
4155	addl	$4, %esp
4156
4157	leave
4158	ret
4159	SET_SIZE(kmdb_enter)
4160
4161#endif	/* __i386 */
4162#endif	/* __lint */
4163
4164#if defined(__lint)
4165
4166void
4167return_instr(void)
4168{}
4169
4170#else	/* __lint */
4171
4172	ENTRY_NP(return_instr)
4173	rep;	ret	/* use 2 byte instruction when branch target */
4174			/* AMD Software Optimization Guide - Section 6.2 */
4175	SET_SIZE(return_instr)
4176
4177#endif	/* __lint */
4178
4179#if defined(__lint)
4180
4181ulong_t
4182getflags(void)
4183{
4184	return (0);
4185}
4186
4187#else	/* __lint */
4188
4189#if defined(__amd64)
4190
4191	ENTRY(getflags)
4192	pushfq
4193	popq	%rax
4194#if defined(__xpv)
4195	CURTHREAD(%rdi)
4196	KPREEMPT_DISABLE(%rdi)
4197	/*
4198	 * Synthesize the PS_IE bit from the event mask bit
4199	 */
4200	CURVCPU(%r11)
4201	andq    $_BITNOT(PS_IE), %rax
4202	XEN_TEST_UPCALL_MASK(%r11)
4203	jnz	1f
4204	orq	$PS_IE, %rax
42051:
4206	KPREEMPT_ENABLE_NOKP(%rdi)
4207#endif
4208	ret
4209	SET_SIZE(getflags)
4210
4211#elif defined(__i386)
4212
4213	ENTRY(getflags)
4214	pushfl
4215	popl	%eax
4216#if defined(__xpv)
4217	CURTHREAD(%ecx)
4218	KPREEMPT_DISABLE(%ecx)
4219	/*
4220	 * Synthesize the PS_IE bit from the event mask bit
4221	 */
4222	CURVCPU(%edx)
4223	andl    $_BITNOT(PS_IE), %eax
4224	XEN_TEST_UPCALL_MASK(%edx)
4225	jnz	1f
4226	orl	$PS_IE, %eax
42271:
4228	KPREEMPT_ENABLE_NOKP(%ecx)
4229#endif
4230	ret
4231	SET_SIZE(getflags)
4232
4233#endif	/* __i386 */
4234
4235#endif	/* __lint */
4236
4237#if defined(__lint)
4238
4239ftrace_icookie_t
4240ftrace_interrupt_disable(void)
4241{ return (0); }
4242
4243#else   /* __lint */
4244
4245#if defined(__amd64)
4246
4247	ENTRY(ftrace_interrupt_disable)
4248	pushfq
4249	popq	%rax
4250	CLI(%rdx)
4251	ret
4252	SET_SIZE(ftrace_interrupt_disable)
4253
4254#elif defined(__i386)
4255
4256	ENTRY(ftrace_interrupt_disable)
4257	pushfl
4258	popl	%eax
4259	CLI(%edx)
4260	ret
4261	SET_SIZE(ftrace_interrupt_disable)
4262
4263#endif	/* __i386 */
4264#endif	/* __lint */
4265
4266#if defined(__lint)
4267
4268/*ARGSUSED*/
4269void
4270ftrace_interrupt_enable(ftrace_icookie_t cookie)
4271{}
4272
4273#else	/* __lint */
4274
4275#if defined(__amd64)
4276
4277	ENTRY(ftrace_interrupt_enable)
4278	pushq	%rdi
4279	popfq
4280	ret
4281	SET_SIZE(ftrace_interrupt_enable)
4282
4283#elif defined(__i386)
4284
4285	ENTRY(ftrace_interrupt_enable)
4286	movl	4(%esp), %eax
4287	pushl	%eax
4288	popfl
4289	ret
4290	SET_SIZE(ftrace_interrupt_enable)
4291
4292#endif	/* __i386 */
4293#endif	/* __lint */
4294
4295#if defined (__lint)
4296
4297/*ARGSUSED*/
4298void
4299clflush_insn(caddr_t addr)
4300{}
4301
4302#else /* __lint */
4303
4304#if defined (__amd64)
4305	ENTRY(clflush_insn)
4306	clflush (%rdi)
4307	ret
4308	SET_SIZE(clflush_insn)
4309#elif defined (__i386)
4310	ENTRY(clflush_insn)
4311	movl	4(%esp), %eax
4312	clflush (%eax)
4313	ret
4314	SET_SIZE(clflush_insn)
4315
4316#endif /* __i386 */
4317#endif /* __lint */
4318
4319#if defined (__lint)
4320/*ARGSUSED*/
4321void
4322mfence_insn(void)
4323{}
4324
4325#else /* __lint */
4326
4327#if defined (__amd64)
4328	ENTRY(mfence_insn)
4329	mfence
4330	ret
4331	SET_SIZE(mfence_insn)
4332#elif defined (__i386)
4333	ENTRY(mfence_insn)
4334	mfence
4335	ret
4336	SET_SIZE(mfence_insn)
4337
4338#endif /* __i386 */
4339#endif /* __lint */
4340
4341/*
4342 * This is how VMware lets the guests figure that they are running
4343 * on top of VMWare platform :
4344 * Write 0xA in the ECX register and put the I/O port address value of
4345 * 0x564D5868 in the EAX register. Then read a word from port 0x5658.
4346 * If VMWare is installed than this code will be executed correctly and
4347 * the EBX register will contain the same I/O port address value of 0x564D5868.
4348 * If VMWare is not installed then OS will return an exception on port access.
4349 */
4350#if defined(__lint)
4351
4352int
4353vmware_platform(void) { return (1); }
4354
4355#else
4356
4357#if defined(__amd64)
4358
4359	ENTRY(vmware_platform)
4360	pushq	%rbx
4361	xorl	%ebx, %ebx
4362	movl	$0x564d5868, %eax
4363	movl	$0xa, %ecx
4364	movl	$0x5658, %edx
4365	inl	(%dx)
4366	movl	$0x564d5868, %ecx
4367	xorl	%eax, %eax
4368	cmpl	%ecx, %ebx
4369	jne	1f
4370	incl	%eax
43711:
4372	popq	%rbx
4373	ret
4374	SET_SIZE(vmware_platform)
4375
4376#elif defined(__i386)
4377
4378	ENTRY(vmware_platform)
4379	pushl	%ebx
4380	pushl	%ecx
4381	pushl	%edx
4382	xorl	%ebx, %ebx
4383	movl	$0x564d5868, %eax
4384	movl	$0xa, %ecx
4385	movl	$0x5658, %edx
4386	inl	(%dx)
4387	movl	$0x564d5868, %ecx
4388	xorl	%eax, %eax
4389	cmpl	%ecx, %ebx
4390	jne	1f
4391	incl	%eax
43921:
4393	popl	%edx
4394	popl	%ecx
4395	popl	%ebx
4396	ret
4397	SET_SIZE(vmware_platform)
4398
4399#endif /* __i386 */
4400#endif /* __lint */
4401