xref: /titanic_52/usr/src/uts/intel/ia32/ml/i86_subr.s (revision e1d3217b9afde782c4d3e946fda0e6ef36a61306)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2014 Nexenta Systems, Inc.  All rights reserved.
25 * Copyright (c) 2014 by Delphix. All rights reserved.
26 */
27
28/*
29 *  Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.
30 *  Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T
31 *    All Rights Reserved
32 */
33
34/*
35 * Copyright (c) 2009, Intel Corporation.
36 * All rights reserved.
37 */
38
39/*
40 * General assembly language routines.
41 * It is the intent of this file to contain routines that are
42 * independent of the specific kernel architecture, and those that are
43 * common across kernel architectures.
44 * As architectures diverge, and implementations of specific
45 * architecture-dependent routines change, the routines should be moved
46 * from this file into the respective ../`arch -k`/subr.s file.
47 */
48
49#include <sys/asm_linkage.h>
50#include <sys/asm_misc.h>
51#include <sys/panic.h>
52#include <sys/ontrap.h>
53#include <sys/regset.h>
54#include <sys/privregs.h>
55#include <sys/reboot.h>
56#include <sys/psw.h>
57#include <sys/x86_archext.h>
58
59#if defined(__lint)
60#include <sys/types.h>
61#include <sys/systm.h>
62#include <sys/thread.h>
63#include <sys/archsystm.h>
64#include <sys/byteorder.h>
65#include <sys/dtrace.h>
66#include <sys/ftrace.h>
67#else	/* __lint */
68#include "assym.h"
69#endif	/* __lint */
70#include <sys/dditypes.h>
71
72/*
73 * on_fault()
74 * Catch lofault faults. Like setjmp except it returns one
75 * if code following causes uncorrectable fault. Turned off
76 * by calling no_fault().
77 */
78
79#if defined(__lint)
80
81/* ARGSUSED */
82int
83on_fault(label_t *ljb)
84{ return (0); }
85
86void
87no_fault(void)
88{}
89
90#else	/* __lint */
91
92#if defined(__amd64)
93
94	ENTRY(on_fault)
95	movq	%gs:CPU_THREAD, %rsi
96	leaq	catch_fault(%rip), %rdx
97	movq	%rdi, T_ONFAULT(%rsi)		/* jumpbuf in t_onfault */
98	movq	%rdx, T_LOFAULT(%rsi)		/* catch_fault in t_lofault */
99	jmp	setjmp				/* let setjmp do the rest */
100
101catch_fault:
102	movq	%gs:CPU_THREAD, %rsi
103	movq	T_ONFAULT(%rsi), %rdi		/* address of save area */
104	xorl	%eax, %eax
105	movq	%rax, T_ONFAULT(%rsi)		/* turn off onfault */
106	movq	%rax, T_LOFAULT(%rsi)		/* turn off lofault */
107	jmp	longjmp				/* let longjmp do the rest */
108	SET_SIZE(on_fault)
109
110	ENTRY(no_fault)
111	movq	%gs:CPU_THREAD, %rsi
112	xorl	%eax, %eax
113	movq	%rax, T_ONFAULT(%rsi)		/* turn off onfault */
114	movq	%rax, T_LOFAULT(%rsi)		/* turn off lofault */
115	ret
116	SET_SIZE(no_fault)
117
118#elif defined(__i386)
119
120	ENTRY(on_fault)
121	movl	%gs:CPU_THREAD, %edx
122	movl	4(%esp), %eax			/* jumpbuf address */
123	leal	catch_fault, %ecx
124	movl	%eax, T_ONFAULT(%edx)		/* jumpbuf in t_onfault */
125	movl	%ecx, T_LOFAULT(%edx)		/* catch_fault in t_lofault */
126	jmp	setjmp				/* let setjmp do the rest */
127
128catch_fault:
129	movl	%gs:CPU_THREAD, %edx
130	xorl	%eax, %eax
131	movl	T_ONFAULT(%edx), %ecx		/* address of save area */
132	movl	%eax, T_ONFAULT(%edx)		/* turn off onfault */
133	movl	%eax, T_LOFAULT(%edx)		/* turn off lofault */
134	pushl	%ecx
135	call	longjmp				/* let longjmp do the rest */
136	SET_SIZE(on_fault)
137
138	ENTRY(no_fault)
139	movl	%gs:CPU_THREAD, %edx
140	xorl	%eax, %eax
141	movl	%eax, T_ONFAULT(%edx)		/* turn off onfault */
142	movl	%eax, T_LOFAULT(%edx)		/* turn off lofault */
143	ret
144	SET_SIZE(no_fault)
145
146#endif	/* __i386 */
147#endif	/* __lint */
148
149/*
150 * Default trampoline code for on_trap() (see <sys/ontrap.h>).  We just
151 * do a longjmp(&curthread->t_ontrap->ot_jmpbuf) if this is ever called.
152 */
153
154#if defined(lint)
155
156void
157on_trap_trampoline(void)
158{}
159
160#else	/* __lint */
161
162#if defined(__amd64)
163
164	ENTRY(on_trap_trampoline)
165	movq	%gs:CPU_THREAD, %rsi
166	movq	T_ONTRAP(%rsi), %rdi
167	addq	$OT_JMPBUF, %rdi
168	jmp	longjmp
169	SET_SIZE(on_trap_trampoline)
170
171#elif defined(__i386)
172
173	ENTRY(on_trap_trampoline)
174	movl	%gs:CPU_THREAD, %eax
175	movl	T_ONTRAP(%eax), %eax
176	addl	$OT_JMPBUF, %eax
177	pushl	%eax
178	call	longjmp
179	SET_SIZE(on_trap_trampoline)
180
181#endif	/* __i386 */
182#endif	/* __lint */
183
184/*
185 * Push a new element on to the t_ontrap stack.  Refer to <sys/ontrap.h> for
186 * more information about the on_trap() mechanism.  If the on_trap_data is the
187 * same as the topmost stack element, we just modify that element.
188 */
189#if defined(lint)
190
191/*ARGSUSED*/
192int
193on_trap(on_trap_data_t *otp, uint_t prot)
194{ return (0); }
195
196#else	/* __lint */
197
198#if defined(__amd64)
199
200	ENTRY(on_trap)
201	movw	%si, OT_PROT(%rdi)		/* ot_prot = prot */
202	movw	$0, OT_TRAP(%rdi)		/* ot_trap = 0 */
203	leaq	on_trap_trampoline(%rip), %rdx	/* rdx = &on_trap_trampoline */
204	movq	%rdx, OT_TRAMPOLINE(%rdi)	/* ot_trampoline = rdx */
205	xorl	%ecx, %ecx
206	movq	%rcx, OT_HANDLE(%rdi)		/* ot_handle = NULL */
207	movq	%rcx, OT_PAD1(%rdi)		/* ot_pad1 = NULL */
208	movq	%gs:CPU_THREAD, %rdx		/* rdx = curthread */
209	movq	T_ONTRAP(%rdx), %rcx		/* rcx = curthread->t_ontrap */
210	cmpq	%rdi, %rcx			/* if (otp == %rcx)	*/
211	je	0f				/*	don't modify t_ontrap */
212
213	movq	%rcx, OT_PREV(%rdi)		/* ot_prev = t_ontrap */
214	movq	%rdi, T_ONTRAP(%rdx)		/* curthread->t_ontrap = otp */
215
2160:	addq	$OT_JMPBUF, %rdi		/* &ot_jmpbuf */
217	jmp	setjmp
218	SET_SIZE(on_trap)
219
220#elif defined(__i386)
221
222	ENTRY(on_trap)
223	movl	4(%esp), %eax			/* %eax = otp */
224	movl	8(%esp), %edx			/* %edx = prot */
225
226	movw	%dx, OT_PROT(%eax)		/* ot_prot = prot */
227	movw	$0, OT_TRAP(%eax)		/* ot_trap = 0 */
228	leal	on_trap_trampoline, %edx	/* %edx = &on_trap_trampoline */
229	movl	%edx, OT_TRAMPOLINE(%eax)	/* ot_trampoline = %edx */
230	movl	$0, OT_HANDLE(%eax)		/* ot_handle = NULL */
231	movl	$0, OT_PAD1(%eax)		/* ot_pad1 = NULL */
232	movl	%gs:CPU_THREAD, %edx		/* %edx = curthread */
233	movl	T_ONTRAP(%edx), %ecx		/* %ecx = curthread->t_ontrap */
234	cmpl	%eax, %ecx			/* if (otp == %ecx) */
235	je	0f				/*    don't modify t_ontrap */
236
237	movl	%ecx, OT_PREV(%eax)		/* ot_prev = t_ontrap */
238	movl	%eax, T_ONTRAP(%edx)		/* curthread->t_ontrap = otp */
239
2400:	addl	$OT_JMPBUF, %eax		/* %eax = &ot_jmpbuf */
241	movl	%eax, 4(%esp)			/* put %eax back on the stack */
242	jmp	setjmp				/* let setjmp do the rest */
243	SET_SIZE(on_trap)
244
245#endif	/* __i386 */
246#endif	/* __lint */
247
248/*
249 * Setjmp and longjmp implement non-local gotos using state vectors
250 * type label_t.
251 */
252
253#if defined(__lint)
254
255/* ARGSUSED */
256int
257setjmp(label_t *lp)
258{ return (0); }
259
260/* ARGSUSED */
261void
262longjmp(label_t *lp)
263{}
264
265#else	/* __lint */
266
267#if LABEL_PC != 0
268#error LABEL_PC MUST be defined as 0 for setjmp/longjmp to work as coded
269#endif	/* LABEL_PC != 0 */
270
271#if defined(__amd64)
272
273	ENTRY(setjmp)
274	movq	%rsp, LABEL_SP(%rdi)
275	movq	%rbp, LABEL_RBP(%rdi)
276	movq	%rbx, LABEL_RBX(%rdi)
277	movq	%r12, LABEL_R12(%rdi)
278	movq	%r13, LABEL_R13(%rdi)
279	movq	%r14, LABEL_R14(%rdi)
280	movq	%r15, LABEL_R15(%rdi)
281	movq	(%rsp), %rdx		/* return address */
282	movq	%rdx, (%rdi)		/* LABEL_PC is 0 */
283	xorl	%eax, %eax		/* return 0 */
284	ret
285	SET_SIZE(setjmp)
286
287	ENTRY(longjmp)
288	movq	LABEL_SP(%rdi), %rsp
289	movq	LABEL_RBP(%rdi), %rbp
290	movq	LABEL_RBX(%rdi), %rbx
291	movq	LABEL_R12(%rdi), %r12
292	movq	LABEL_R13(%rdi), %r13
293	movq	LABEL_R14(%rdi), %r14
294	movq	LABEL_R15(%rdi), %r15
295	movq	(%rdi), %rdx		/* return address; LABEL_PC is 0 */
296	movq	%rdx, (%rsp)
297	xorl	%eax, %eax
298	incl	%eax			/* return 1 */
299	ret
300	SET_SIZE(longjmp)
301
302#elif defined(__i386)
303
304	ENTRY(setjmp)
305	movl	4(%esp), %edx		/* address of save area */
306	movl	%ebp, LABEL_EBP(%edx)
307	movl	%ebx, LABEL_EBX(%edx)
308	movl	%esi, LABEL_ESI(%edx)
309	movl	%edi, LABEL_EDI(%edx)
310	movl	%esp, 4(%edx)
311	movl	(%esp), %ecx		/* %eip (return address) */
312	movl	%ecx, (%edx)		/* LABEL_PC is 0 */
313	subl	%eax, %eax		/* return 0 */
314	ret
315	SET_SIZE(setjmp)
316
317	ENTRY(longjmp)
318	movl	4(%esp), %edx		/* address of save area */
319	movl	LABEL_EBP(%edx), %ebp
320	movl	LABEL_EBX(%edx), %ebx
321	movl	LABEL_ESI(%edx), %esi
322	movl	LABEL_EDI(%edx), %edi
323	movl	4(%edx), %esp
324	movl	(%edx), %ecx		/* %eip (return addr); LABEL_PC is 0 */
325	movl	$1, %eax
326	addl	$4, %esp		/* pop ret adr */
327	jmp	*%ecx			/* indirect */
328	SET_SIZE(longjmp)
329
330#endif	/* __i386 */
331#endif	/* __lint */
332
333/*
334 * if a() calls b() calls caller(),
335 * caller() returns return address in a().
336 * (Note: We assume a() and b() are C routines which do the normal entry/exit
337 *  sequence.)
338 */
339
340#if defined(__lint)
341
342caddr_t
343caller(void)
344{ return (0); }
345
346#else	/* __lint */
347
348#if defined(__amd64)
349
350	ENTRY(caller)
351	movq	8(%rbp), %rax		/* b()'s return pc, in a() */
352	ret
353	SET_SIZE(caller)
354
355#elif defined(__i386)
356
357	ENTRY(caller)
358	movl	4(%ebp), %eax		/* b()'s return pc, in a() */
359	ret
360	SET_SIZE(caller)
361
362#endif	/* __i386 */
363#endif	/* __lint */
364
365/*
366 * if a() calls callee(), callee() returns the
367 * return address in a();
368 */
369
370#if defined(__lint)
371
372caddr_t
373callee(void)
374{ return (0); }
375
376#else	/* __lint */
377
378#if defined(__amd64)
379
380	ENTRY(callee)
381	movq	(%rsp), %rax		/* callee()'s return pc, in a() */
382	ret
383	SET_SIZE(callee)
384
385#elif defined(__i386)
386
387	ENTRY(callee)
388	movl	(%esp), %eax		/* callee()'s return pc, in a() */
389	ret
390	SET_SIZE(callee)
391
392#endif	/* __i386 */
393#endif	/* __lint */
394
395/*
396 * return the current frame pointer
397 */
398
399#if defined(__lint)
400
401greg_t
402getfp(void)
403{ return (0); }
404
405#else	/* __lint */
406
407#if defined(__amd64)
408
409	ENTRY(getfp)
410	movq	%rbp, %rax
411	ret
412	SET_SIZE(getfp)
413
414#elif defined(__i386)
415
416	ENTRY(getfp)
417	movl	%ebp, %eax
418	ret
419	SET_SIZE(getfp)
420
421#endif	/* __i386 */
422#endif	/* __lint */
423
424/*
425 * Invalidate a single page table entry in the TLB
426 */
427
428#if defined(__lint)
429
430/* ARGSUSED */
431void
432mmu_tlbflush_entry(caddr_t m)
433{}
434
435#else	/* __lint */
436
437#if defined(__amd64)
438
439	ENTRY(mmu_tlbflush_entry)
440	invlpg	(%rdi)
441	ret
442	SET_SIZE(mmu_tlbflush_entry)
443
444#elif defined(__i386)
445
446	ENTRY(mmu_tlbflush_entry)
447	movl	4(%esp), %eax
448	invlpg	(%eax)
449	ret
450	SET_SIZE(mmu_tlbflush_entry)
451
452#endif	/* __i386 */
453#endif	/* __lint */
454
455
456/*
457 * Get/Set the value of various control registers
458 */
459
460#if defined(__lint)
461
462ulong_t
463getcr0(void)
464{ return (0); }
465
466/* ARGSUSED */
467void
468setcr0(ulong_t value)
469{}
470
471ulong_t
472getcr2(void)
473{ return (0); }
474
475ulong_t
476getcr3(void)
477{ return (0); }
478
479#if !defined(__xpv)
480/* ARGSUSED */
481void
482setcr3(ulong_t val)
483{}
484
485void
486reload_cr3(void)
487{}
488#endif
489
490ulong_t
491getcr4(void)
492{ return (0); }
493
494/* ARGSUSED */
495void
496setcr4(ulong_t val)
497{}
498
499#if defined(__amd64)
500
501ulong_t
502getcr8(void)
503{ return (0); }
504
505/* ARGSUSED */
506void
507setcr8(ulong_t val)
508{}
509
510#endif	/* __amd64 */
511
512#else	/* __lint */
513
514#if defined(__amd64)
515
516	ENTRY(getcr0)
517	movq	%cr0, %rax
518	ret
519	SET_SIZE(getcr0)
520
521	ENTRY(setcr0)
522	movq	%rdi, %cr0
523	ret
524	SET_SIZE(setcr0)
525
526        ENTRY(getcr2)
527#if defined(__xpv)
528	movq	%gs:CPU_VCPU_INFO, %rax
529	movq	VCPU_INFO_ARCH_CR2(%rax), %rax
530#else
531        movq    %cr2, %rax
532#endif
533        ret
534	SET_SIZE(getcr2)
535
536	ENTRY(getcr3)
537	movq    %cr3, %rax
538	ret
539	SET_SIZE(getcr3)
540
541#if !defined(__xpv)
542
543        ENTRY(setcr3)
544        movq    %rdi, %cr3
545        ret
546	SET_SIZE(setcr3)
547
548	ENTRY(reload_cr3)
549	movq	%cr3, %rdi
550	movq	%rdi, %cr3
551	ret
552	SET_SIZE(reload_cr3)
553
554#endif	/* __xpv */
555
556	ENTRY(getcr4)
557	movq	%cr4, %rax
558	ret
559	SET_SIZE(getcr4)
560
561	ENTRY(setcr4)
562	movq	%rdi, %cr4
563	ret
564	SET_SIZE(setcr4)
565
566	ENTRY(getcr8)
567	movq	%cr8, %rax
568	ret
569	SET_SIZE(getcr8)
570
571	ENTRY(setcr8)
572	movq	%rdi, %cr8
573	ret
574	SET_SIZE(setcr8)
575
576#elif defined(__i386)
577
578        ENTRY(getcr0)
579        movl    %cr0, %eax
580        ret
581	SET_SIZE(getcr0)
582
583        ENTRY(setcr0)
584        movl    4(%esp), %eax
585        movl    %eax, %cr0
586        ret
587	SET_SIZE(setcr0)
588
589	/*
590	 * "lock mov %cr0" is used on processors which indicate it is
591	 * supported via CPUID. Normally the 32 bit TPR is accessed via
592	 * the local APIC.
593	 */
594	ENTRY(getcr8)
595	lock
596	movl	%cr0, %eax
597	ret
598	SET_SIZE(getcr8)
599
600	ENTRY(setcr8)
601        movl    4(%esp), %eax
602	lock
603        movl    %eax, %cr0
604	ret
605	SET_SIZE(setcr8)
606
607        ENTRY(getcr2)
608#if defined(__xpv)
609	movl	%gs:CPU_VCPU_INFO, %eax
610	movl	VCPU_INFO_ARCH_CR2(%eax), %eax
611#else
612        movl    %cr2, %eax
613#endif
614        ret
615	SET_SIZE(getcr2)
616
617	ENTRY(getcr3)
618	movl    %cr3, %eax
619	ret
620	SET_SIZE(getcr3)
621
622#if !defined(__xpv)
623
624        ENTRY(setcr3)
625        movl    4(%esp), %eax
626        movl    %eax, %cr3
627        ret
628	SET_SIZE(setcr3)
629
630	ENTRY(reload_cr3)
631	movl    %cr3, %eax
632	movl    %eax, %cr3
633	ret
634	SET_SIZE(reload_cr3)
635
636#endif	/* __xpv */
637
638	ENTRY(getcr4)
639	movl    %cr4, %eax
640	ret
641	SET_SIZE(getcr4)
642
643        ENTRY(setcr4)
644        movl    4(%esp), %eax
645        movl    %eax, %cr4
646        ret
647	SET_SIZE(setcr4)
648
649#endif	/* __i386 */
650#endif	/* __lint */
651
652#if defined(__lint)
653
654/*ARGSUSED*/
655uint32_t
656__cpuid_insn(struct cpuid_regs *regs)
657{ return (0); }
658
659#else	/* __lint */
660
661#if defined(__amd64)
662
663	ENTRY(__cpuid_insn)
664	movq	%rbx, %r8
665	movq	%rcx, %r9
666	movq	%rdx, %r11
667	movl	(%rdi), %eax		/* %eax = regs->cp_eax */
668	movl	0x4(%rdi), %ebx		/* %ebx = regs->cp_ebx */
669	movl	0x8(%rdi), %ecx		/* %ecx = regs->cp_ecx */
670	movl	0xc(%rdi), %edx		/* %edx = regs->cp_edx */
671	cpuid
672	movl	%eax, (%rdi)		/* regs->cp_eax = %eax */
673	movl	%ebx, 0x4(%rdi)		/* regs->cp_ebx = %ebx */
674	movl	%ecx, 0x8(%rdi)		/* regs->cp_ecx = %ecx */
675	movl	%edx, 0xc(%rdi)		/* regs->cp_edx = %edx */
676	movq	%r8, %rbx
677	movq	%r9, %rcx
678	movq	%r11, %rdx
679	ret
680	SET_SIZE(__cpuid_insn)
681
682#elif defined(__i386)
683
684        ENTRY(__cpuid_insn)
685	pushl	%ebp
686	movl	0x8(%esp), %ebp		/* %ebp = regs */
687	pushl	%ebx
688	pushl	%ecx
689	pushl	%edx
690	movl	(%ebp), %eax		/* %eax = regs->cp_eax */
691	movl	0x4(%ebp), %ebx		/* %ebx = regs->cp_ebx */
692	movl	0x8(%ebp), %ecx		/* %ecx = regs->cp_ecx */
693	movl	0xc(%ebp), %edx		/* %edx = regs->cp_edx */
694	cpuid
695	movl	%eax, (%ebp)		/* regs->cp_eax = %eax */
696	movl	%ebx, 0x4(%ebp)		/* regs->cp_ebx = %ebx */
697	movl	%ecx, 0x8(%ebp)		/* regs->cp_ecx = %ecx */
698	movl	%edx, 0xc(%ebp)		/* regs->cp_edx = %edx */
699	popl	%edx
700	popl	%ecx
701	popl	%ebx
702	popl	%ebp
703	ret
704	SET_SIZE(__cpuid_insn)
705
706#endif	/* __i386 */
707#endif	/* __lint */
708
709#if defined(__lint)
710
711/*ARGSUSED*/
712void
713i86_monitor(volatile uint32_t *addr, uint32_t extensions, uint32_t hints)
714{}
715
716#else   /* __lint */
717
718#if defined(__amd64)
719
720	ENTRY_NP(i86_monitor)
721	pushq	%rbp
722	movq	%rsp, %rbp
723	movq	%rdi, %rax		/* addr */
724	movq	%rsi, %rcx		/* extensions */
725	/* rdx contains input arg3: hints */
726	clflush	(%rax)
727	.byte	0x0f, 0x01, 0xc8	/* monitor */
728	leave
729	ret
730	SET_SIZE(i86_monitor)
731
732#elif defined(__i386)
733
734ENTRY_NP(i86_monitor)
735	pushl	%ebp
736	movl	%esp, %ebp
737	movl	0x8(%ebp),%eax		/* addr */
738	movl	0xc(%ebp),%ecx		/* extensions */
739	movl	0x10(%ebp),%edx		/* hints */
740	clflush	(%eax)
741	.byte	0x0f, 0x01, 0xc8	/* monitor */
742	leave
743	ret
744	SET_SIZE(i86_monitor)
745
746#endif	/* __i386 */
747#endif	/* __lint */
748
749#if defined(__lint)
750
751/*ARGSUSED*/
752void
753i86_mwait(uint32_t data, uint32_t extensions)
754{}
755
756#else	/* __lint */
757
758#if defined(__amd64)
759
760	ENTRY_NP(i86_mwait)
761	pushq	%rbp
762	movq	%rsp, %rbp
763	movq	%rdi, %rax		/* data */
764	movq	%rsi, %rcx		/* extensions */
765	.byte	0x0f, 0x01, 0xc9	/* mwait */
766	leave
767	ret
768	SET_SIZE(i86_mwait)
769
770#elif defined(__i386)
771
772	ENTRY_NP(i86_mwait)
773	pushl	%ebp
774	movl	%esp, %ebp
775	movl	0x8(%ebp),%eax		/* data */
776	movl	0xc(%ebp),%ecx		/* extensions */
777	.byte	0x0f, 0x01, 0xc9	/* mwait */
778	leave
779	ret
780	SET_SIZE(i86_mwait)
781
782#endif	/* __i386 */
783#endif	/* __lint */
784
785#if defined(__xpv)
786	/*
787	 * Defined in C
788	 */
789#else
790
791#if defined(__lint)
792
793hrtime_t
794tsc_read(void)
795{
796	return (0);
797}
798
799#else	/* __lint */
800
801#if defined(__amd64)
802
803	ENTRY_NP(tsc_read)
804	movq	%rbx, %r11
805	movl	$0, %eax
806	cpuid
807	rdtsc
808	movq	%r11, %rbx
809	shlq	$32, %rdx
810	orq	%rdx, %rax
811	ret
812	.globl _tsc_mfence_start
813_tsc_mfence_start:
814	mfence
815	rdtsc
816	shlq	$32, %rdx
817	orq	%rdx, %rax
818	ret
819	.globl _tsc_mfence_end
820_tsc_mfence_end:
821	.globl _tscp_start
822_tscp_start:
823	.byte	0x0f, 0x01, 0xf9	/* rdtscp instruction */
824	shlq	$32, %rdx
825	orq	%rdx, %rax
826	ret
827	.globl _tscp_end
828_tscp_end:
829	.globl _no_rdtsc_start
830_no_rdtsc_start:
831	xorl	%edx, %edx
832	xorl	%eax, %eax
833	ret
834	.globl _no_rdtsc_end
835_no_rdtsc_end:
836	.globl _tsc_lfence_start
837_tsc_lfence_start:
838	lfence
839	rdtsc
840	shlq	$32, %rdx
841	orq	%rdx, %rax
842	ret
843	.globl _tsc_lfence_end
844_tsc_lfence_end:
845	SET_SIZE(tsc_read)
846
847#else /* __i386 */
848
849	ENTRY_NP(tsc_read)
850	pushl	%ebx
851	movl	$0, %eax
852	cpuid
853	rdtsc
854	popl	%ebx
855	ret
856	.globl _tsc_mfence_start
857_tsc_mfence_start:
858	mfence
859	rdtsc
860	ret
861	.globl _tsc_mfence_end
862_tsc_mfence_end:
863	.globl	_tscp_start
864_tscp_start:
865	.byte	0x0f, 0x01, 0xf9	/* rdtscp instruction */
866	ret
867	.globl _tscp_end
868_tscp_end:
869	.globl _no_rdtsc_start
870_no_rdtsc_start:
871	xorl	%edx, %edx
872	xorl	%eax, %eax
873	ret
874	.globl _no_rdtsc_end
875_no_rdtsc_end:
876	.globl _tsc_lfence_start
877_tsc_lfence_start:
878	lfence
879	rdtsc
880	ret
881	.globl _tsc_lfence_end
882_tsc_lfence_end:
883	SET_SIZE(tsc_read)
884
885#endif	/* __i386 */
886
887#endif	/* __lint */
888
889
890#endif	/* __xpv */
891
892#ifdef __lint
893/*
894 * Do not use this function for obtaining clock tick.  This
895 * is called by callers who do not need to have a guarenteed
896 * correct tick value.  The proper routine to use is tsc_read().
897 */
898u_longlong_t
899randtick(void)
900{
901	return (0);
902}
903#else
904#if defined(__amd64)
905	ENTRY_NP(randtick)
906	rdtsc
907	shlq    $32, %rdx
908	orq     %rdx, %rax
909	ret
910	SET_SIZE(randtick)
911#else
912	ENTRY_NP(randtick)
913	rdtsc
914	ret
915	SET_SIZE(randtick)
916#endif /* __i386 */
917#endif /* __lint */
918/*
919 * Insert entryp after predp in a doubly linked list.
920 */
921
922#if defined(__lint)
923
924/*ARGSUSED*/
925void
926_insque(caddr_t entryp, caddr_t predp)
927{}
928
929#else	/* __lint */
930
931#if defined(__amd64)
932
933	ENTRY(_insque)
934	movq	(%rsi), %rax		/* predp->forw			*/
935	movq	%rsi, CPTRSIZE(%rdi)	/* entryp->back = predp		*/
936	movq	%rax, (%rdi)		/* entryp->forw = predp->forw	*/
937	movq	%rdi, (%rsi)		/* predp->forw = entryp		*/
938	movq	%rdi, CPTRSIZE(%rax)	/* predp->forw->back = entryp	*/
939	ret
940	SET_SIZE(_insque)
941
942#elif defined(__i386)
943
944	ENTRY(_insque)
945	movl	8(%esp), %edx
946	movl	4(%esp), %ecx
947	movl	(%edx), %eax		/* predp->forw			*/
948	movl	%edx, CPTRSIZE(%ecx)	/* entryp->back = predp		*/
949	movl	%eax, (%ecx)		/* entryp->forw = predp->forw	*/
950	movl	%ecx, (%edx)		/* predp->forw = entryp		*/
951	movl	%ecx, CPTRSIZE(%eax)	/* predp->forw->back = entryp	*/
952	ret
953	SET_SIZE(_insque)
954
955#endif	/* __i386 */
956#endif	/* __lint */
957
958/*
959 * Remove entryp from a doubly linked list
960 */
961
962#if defined(__lint)
963
964/*ARGSUSED*/
965void
966_remque(caddr_t entryp)
967{}
968
969#else	/* __lint */
970
971#if defined(__amd64)
972
973	ENTRY(_remque)
974	movq	(%rdi), %rax		/* entry->forw */
975	movq	CPTRSIZE(%rdi), %rdx	/* entry->back */
976	movq	%rax, (%rdx)		/* entry->back->forw = entry->forw */
977	movq	%rdx, CPTRSIZE(%rax)	/* entry->forw->back = entry->back */
978	ret
979	SET_SIZE(_remque)
980
981#elif defined(__i386)
982
983	ENTRY(_remque)
984	movl	4(%esp), %ecx
985	movl	(%ecx), %eax		/* entry->forw */
986	movl	CPTRSIZE(%ecx), %edx	/* entry->back */
987	movl	%eax, (%edx)		/* entry->back->forw = entry->forw */
988	movl	%edx, CPTRSIZE(%eax)	/* entry->forw->back = entry->back */
989	ret
990	SET_SIZE(_remque)
991
992#endif	/* __i386 */
993#endif	/* __lint */
994
995/*
996 * Returns the number of
997 * non-NULL bytes in string argument.
998 */
999
1000#if defined(__lint)
1001
1002/* ARGSUSED */
1003size_t
1004strlen(const char *str)
1005{ return (0); }
1006
1007#else	/* __lint */
1008
1009#if defined(__amd64)
1010
1011/*
1012 * This is close to a simple transliteration of a C version of this
1013 * routine.  We should either just -make- this be a C version, or
1014 * justify having it in assembler by making it significantly faster.
1015 *
1016 * size_t
1017 * strlen(const char *s)
1018 * {
1019 *	const char *s0;
1020 * #if defined(DEBUG)
1021 *	if ((uintptr_t)s < KERNELBASE)
1022 *		panic(.str_panic_msg);
1023 * #endif
1024 *	for (s0 = s; *s; s++)
1025 *		;
1026 *	return (s - s0);
1027 * }
1028 */
1029
1030	ENTRY(strlen)
1031#ifdef DEBUG
1032	movq	postbootkernelbase(%rip), %rax
1033	cmpq	%rax, %rdi
1034	jae	str_valid
1035	pushq	%rbp
1036	movq	%rsp, %rbp
1037	leaq	.str_panic_msg(%rip), %rdi
1038	xorl	%eax, %eax
1039	call	panic
1040#endif	/* DEBUG */
1041str_valid:
1042	cmpb	$0, (%rdi)
1043	movq	%rdi, %rax
1044	je	.null_found
1045	.align	4
1046.strlen_loop:
1047	incq	%rdi
1048	cmpb	$0, (%rdi)
1049	jne	.strlen_loop
1050.null_found:
1051	subq	%rax, %rdi
1052	movq	%rdi, %rax
1053	ret
1054	SET_SIZE(strlen)
1055
1056#elif defined(__i386)
1057
1058	ENTRY(strlen)
1059#ifdef DEBUG
1060	movl	postbootkernelbase, %eax
1061	cmpl	%eax, 4(%esp)
1062	jae	str_valid
1063	pushl	%ebp
1064	movl	%esp, %ebp
1065	pushl	$.str_panic_msg
1066	call	panic
1067#endif /* DEBUG */
1068
1069str_valid:
1070	movl	4(%esp), %eax		/* %eax = string address */
1071	testl	$3, %eax		/* if %eax not word aligned */
1072	jnz	.not_word_aligned	/* goto .not_word_aligned */
1073	.align	4
1074.word_aligned:
1075	movl	(%eax), %edx		/* move 1 word from (%eax) to %edx */
1076	movl	$0x7f7f7f7f, %ecx
1077	andl	%edx, %ecx		/* %ecx = %edx & 0x7f7f7f7f */
1078	addl	$4, %eax		/* next word */
1079	addl	$0x7f7f7f7f, %ecx	/* %ecx += 0x7f7f7f7f */
1080	orl	%edx, %ecx		/* %ecx |= %edx */
1081	andl	$0x80808080, %ecx	/* %ecx &= 0x80808080 */
1082	cmpl	$0x80808080, %ecx	/* if no null byte in this word */
1083	je	.word_aligned		/* goto .word_aligned */
1084	subl	$4, %eax		/* post-incremented */
1085.not_word_aligned:
1086	cmpb	$0, (%eax)		/* if a byte in (%eax) is null */
1087	je	.null_found		/* goto .null_found */
1088	incl	%eax			/* next byte */
1089	testl	$3, %eax		/* if %eax not word aligned */
1090	jnz	.not_word_aligned	/* goto .not_word_aligned */
1091	jmp	.word_aligned		/* goto .word_aligned */
1092	.align	4
1093.null_found:
1094	subl	4(%esp), %eax		/* %eax -= string address */
1095	ret
1096	SET_SIZE(strlen)
1097
1098#endif	/* __i386 */
1099
1100#ifdef DEBUG
1101	.text
1102.str_panic_msg:
1103	.string "strlen: argument below kernelbase"
1104#endif /* DEBUG */
1105
1106#endif	/* __lint */
1107
1108	/*
1109	 * Berkeley 4.3 introduced symbolically named interrupt levels
1110	 * as a way deal with priority in a machine independent fashion.
1111	 * Numbered priorities are machine specific, and should be
1112	 * discouraged where possible.
1113	 *
1114	 * Note, for the machine specific priorities there are
1115	 * examples listed for devices that use a particular priority.
1116	 * It should not be construed that all devices of that
1117	 * type should be at that priority.  It is currently were
1118	 * the current devices fit into the priority scheme based
1119	 * upon time criticalness.
1120	 *
1121	 * The underlying assumption of these assignments is that
1122	 * IPL 10 is the highest level from which a device
1123	 * routine can call wakeup.  Devices that interrupt from higher
1124	 * levels are restricted in what they can do.  If they need
1125	 * kernels services they should schedule a routine at a lower
1126	 * level (via software interrupt) to do the required
1127	 * processing.
1128	 *
1129	 * Examples of this higher usage:
1130	 *	Level	Usage
1131	 *	14	Profiling clock (and PROM uart polling clock)
1132	 *	12	Serial ports
1133	 *
1134	 * The serial ports request lower level processing on level 6.
1135	 *
1136	 * Also, almost all splN routines (where N is a number or a
1137	 * mnemonic) will do a RAISE(), on the assumption that they are
1138	 * never used to lower our priority.
1139	 * The exceptions are:
1140	 *	spl8()		Because you can't be above 15 to begin with!
1141	 *	splzs()		Because this is used at boot time to lower our
1142	 *			priority, to allow the PROM to poll the uart.
1143	 *	spl0()		Used to lower priority to 0.
1144	 */
1145
1146#if defined(__lint)
1147
1148int spl0(void)		{ return (0); }
1149int spl6(void)		{ return (0); }
1150int spl7(void)		{ return (0); }
1151int spl8(void)		{ return (0); }
1152int splhigh(void)	{ return (0); }
1153int splhi(void)		{ return (0); }
1154int splzs(void)		{ return (0); }
1155
1156/* ARGSUSED */
1157void
1158splx(int level)
1159{}
1160
1161#else	/* __lint */
1162
1163#if defined(__amd64)
1164
1165#define	SETPRI(level) \
1166	movl	$/**/level, %edi;	/* new priority */		\
1167	jmp	do_splx			/* redirect to do_splx */
1168
1169#define	RAISE(level) \
1170	movl	$/**/level, %edi;	/* new priority */		\
1171	jmp	splr			/* redirect to splr */
1172
1173#elif defined(__i386)
1174
1175#define	SETPRI(level) \
1176	pushl	$/**/level;	/* new priority */			\
1177	call	do_splx;	/* invoke common splx code */		\
1178	addl	$4, %esp;	/* unstack arg */			\
1179	ret
1180
1181#define	RAISE(level) \
1182	pushl	$/**/level;	/* new priority */			\
1183	call	splr;		/* invoke common splr code */		\
1184	addl	$4, %esp;	/* unstack args */			\
1185	ret
1186
1187#endif	/* __i386 */
1188
1189	/* locks out all interrupts, including memory errors */
1190	ENTRY(spl8)
1191	SETPRI(15)
1192	SET_SIZE(spl8)
1193
1194	/* just below the level that profiling runs */
1195	ENTRY(spl7)
1196	RAISE(13)
1197	SET_SIZE(spl7)
1198
1199	/* sun specific - highest priority onboard serial i/o asy ports */
1200	ENTRY(splzs)
1201	SETPRI(12)	/* Can't be a RAISE, as it's used to lower us */
1202	SET_SIZE(splzs)
1203
1204	ENTRY(splhi)
1205	ALTENTRY(splhigh)
1206	ALTENTRY(spl6)
1207	ALTENTRY(i_ddi_splhigh)
1208
1209	RAISE(DISP_LEVEL)
1210
1211	SET_SIZE(i_ddi_splhigh)
1212	SET_SIZE(spl6)
1213	SET_SIZE(splhigh)
1214	SET_SIZE(splhi)
1215
1216	/* allow all interrupts */
1217	ENTRY(spl0)
1218	SETPRI(0)
1219	SET_SIZE(spl0)
1220
1221
1222	/* splx implementation */
1223	ENTRY(splx)
1224	jmp	do_splx		/* redirect to common splx code */
1225	SET_SIZE(splx)
1226
1227#endif	/* __lint */
1228
1229#if defined(__i386)
1230
1231/*
1232 * Read and write the %gs register
1233 */
1234
1235#if defined(__lint)
1236
1237/*ARGSUSED*/
1238uint16_t
1239getgs(void)
1240{ return (0); }
1241
1242/*ARGSUSED*/
1243void
1244setgs(uint16_t sel)
1245{}
1246
1247#else	/* __lint */
1248
1249	ENTRY(getgs)
1250	clr	%eax
1251	movw	%gs, %ax
1252	ret
1253	SET_SIZE(getgs)
1254
1255	ENTRY(setgs)
1256	movw	4(%esp), %gs
1257	ret
1258	SET_SIZE(setgs)
1259
1260#endif	/* __lint */
1261#endif	/* __i386 */
1262
1263#if defined(__lint)
1264
1265void
1266pc_reset(void)
1267{}
1268
1269void
1270efi_reset(void)
1271{}
1272
1273#else	/* __lint */
1274
1275	ENTRY(wait_500ms)
1276#if defined(__amd64)
1277	pushq	%rbx
1278#elif defined(__i386)
1279	push	%ebx
1280#endif
1281	movl	$50000, %ebx
12821:
1283	call	tenmicrosec
1284	decl	%ebx
1285	jnz	1b
1286#if defined(__amd64)
1287	popq	%rbx
1288#elif defined(__i386)
1289	pop	%ebx
1290#endif
1291	ret
1292	SET_SIZE(wait_500ms)
1293
1294#define	RESET_METHOD_KBC	1
1295#define	RESET_METHOD_PORT92	2
1296#define RESET_METHOD_PCI	4
1297
1298	DGDEF3(pc_reset_methods, 4, 8)
1299	.long RESET_METHOD_KBC|RESET_METHOD_PORT92|RESET_METHOD_PCI;
1300
1301	ENTRY(pc_reset)
1302
1303#if defined(__i386)
1304	testl	$RESET_METHOD_KBC, pc_reset_methods
1305#elif defined(__amd64)
1306	testl	$RESET_METHOD_KBC, pc_reset_methods(%rip)
1307#endif
1308	jz	1f
1309
1310	/
1311	/ Try the classic keyboard controller-triggered reset.
1312	/
1313	movw	$0x64, %dx
1314	movb	$0xfe, %al
1315	outb	(%dx)
1316
1317	/ Wait up to 500 milliseconds here for the keyboard controller
1318	/ to pull the reset line.  On some systems where the keyboard
1319	/ controller is slow to pull the reset line, the next reset method
1320	/ may be executed (which may be bad if those systems hang when the
1321	/ next reset method is used, e.g. Ferrari 3400 (doesn't like port 92),
1322	/ and Ferrari 4000 (doesn't like the cf9 reset method))
1323
1324	call	wait_500ms
1325
13261:
1327#if defined(__i386)
1328	testl	$RESET_METHOD_PORT92, pc_reset_methods
1329#elif defined(__amd64)
1330	testl	$RESET_METHOD_PORT92, pc_reset_methods(%rip)
1331#endif
1332	jz	3f
1333
1334	/
1335	/ Try port 0x92 fast reset
1336	/
1337	movw	$0x92, %dx
1338	inb	(%dx)
1339	cmpb	$0xff, %al	/ If port's not there, we should get back 0xFF
1340	je	1f
1341	testb	$1, %al		/ If bit 0
1342	jz	2f		/ is clear, jump to perform the reset
1343	andb	$0xfe, %al	/ otherwise,
1344	outb	(%dx)		/ clear bit 0 first, then
13452:
1346	orb	$1, %al		/ Set bit 0
1347	outb	(%dx)		/ and reset the system
13481:
1349
1350	call	wait_500ms
1351
13523:
1353#if defined(__i386)
1354	testl	$RESET_METHOD_PCI, pc_reset_methods
1355#elif defined(__amd64)
1356	testl	$RESET_METHOD_PCI, pc_reset_methods(%rip)
1357#endif
1358	jz	4f
1359
1360	/ Try the PCI (soft) reset vector (should work on all modern systems,
1361	/ but has been shown to cause problems on 450NX systems, and some newer
1362	/ systems (e.g. ATI IXP400-equipped systems))
1363	/ When resetting via this method, 2 writes are required.  The first
1364	/ targets bit 1 (0=hard reset without power cycle, 1=hard reset with
1365	/ power cycle).
1366	/ The reset occurs on the second write, during bit 2's transition from
1367	/ 0->1.
1368	movw	$0xcf9, %dx
1369	movb	$0x2, %al	/ Reset mode = hard, no power cycle
1370	outb	(%dx)
1371	movb	$0x6, %al
1372	outb	(%dx)
1373
1374	call	wait_500ms
1375
13764:
1377	/
1378	/ port 0xcf9 failed also.  Last-ditch effort is to
1379	/ triple-fault the CPU.
1380	/ Also, use triple fault for EFI firmware
1381	/
1382	ENTRY(efi_reset)
1383#if defined(__amd64)
1384	pushq	$0x0
1385	pushq	$0x0		/ IDT base of 0, limit of 0 + 2 unused bytes
1386	lidt	(%rsp)
1387#elif defined(__i386)
1388	pushl	$0x0
1389	pushl	$0x0		/ IDT base of 0, limit of 0 + 2 unused bytes
1390	lidt	(%esp)
1391#endif
1392	int	$0x0		/ Trigger interrupt, generate triple-fault
1393
1394	cli
1395	hlt			/ Wait forever
1396	/*NOTREACHED*/
1397	SET_SIZE(efi_reset)
1398	SET_SIZE(pc_reset)
1399
1400#endif	/* __lint */
1401
1402/*
1403 * C callable in and out routines
1404 */
1405
1406#if defined(__lint)
1407
1408/* ARGSUSED */
1409void
1410outl(int port_address, uint32_t val)
1411{}
1412
1413#else	/* __lint */
1414
1415#if defined(__amd64)
1416
1417	ENTRY(outl)
1418	movw	%di, %dx
1419	movl	%esi, %eax
1420	outl	(%dx)
1421	ret
1422	SET_SIZE(outl)
1423
1424#elif defined(__i386)
1425
1426	.set	PORT, 4
1427	.set	VAL, 8
1428
1429	ENTRY(outl)
1430	movw	PORT(%esp), %dx
1431	movl	VAL(%esp), %eax
1432	outl	(%dx)
1433	ret
1434	SET_SIZE(outl)
1435
1436#endif	/* __i386 */
1437#endif	/* __lint */
1438
1439#if defined(__lint)
1440
1441/* ARGSUSED */
1442void
1443outw(int port_address, uint16_t val)
1444{}
1445
1446#else	/* __lint */
1447
1448#if defined(__amd64)
1449
1450	ENTRY(outw)
1451	movw	%di, %dx
1452	movw	%si, %ax
1453	D16 outl (%dx)		/* XX64 why not outw? */
1454	ret
1455	SET_SIZE(outw)
1456
1457#elif defined(__i386)
1458
1459	ENTRY(outw)
1460	movw	PORT(%esp), %dx
1461	movw	VAL(%esp), %ax
1462	D16 outl (%dx)
1463	ret
1464	SET_SIZE(outw)
1465
1466#endif	/* __i386 */
1467#endif	/* __lint */
1468
1469#if defined(__lint)
1470
1471/* ARGSUSED */
1472void
1473outb(int port_address, uint8_t val)
1474{}
1475
1476#else	/* __lint */
1477
1478#if defined(__amd64)
1479
1480	ENTRY(outb)
1481	movw	%di, %dx
1482	movb	%sil, %al
1483	outb	(%dx)
1484	ret
1485	SET_SIZE(outb)
1486
1487#elif defined(__i386)
1488
1489	ENTRY(outb)
1490	movw	PORT(%esp), %dx
1491	movb	VAL(%esp), %al
1492	outb	(%dx)
1493	ret
1494	SET_SIZE(outb)
1495
1496#endif	/* __i386 */
1497#endif	/* __lint */
1498
1499#if defined(__lint)
1500
1501/* ARGSUSED */
1502uint32_t
1503inl(int port_address)
1504{ return (0); }
1505
1506#else	/* __lint */
1507
1508#if defined(__amd64)
1509
1510	ENTRY(inl)
1511	xorl	%eax, %eax
1512	movw	%di, %dx
1513	inl	(%dx)
1514	ret
1515	SET_SIZE(inl)
1516
1517#elif defined(__i386)
1518
1519	ENTRY(inl)
1520	movw	PORT(%esp), %dx
1521	inl	(%dx)
1522	ret
1523	SET_SIZE(inl)
1524
1525#endif	/* __i386 */
1526#endif	/* __lint */
1527
1528#if defined(__lint)
1529
1530/* ARGSUSED */
1531uint16_t
1532inw(int port_address)
1533{ return (0); }
1534
1535#else	/* __lint */
1536
1537#if defined(__amd64)
1538
1539	ENTRY(inw)
1540	xorl	%eax, %eax
1541	movw	%di, %dx
1542	D16 inl	(%dx)
1543	ret
1544	SET_SIZE(inw)
1545
1546#elif defined(__i386)
1547
1548	ENTRY(inw)
1549	subl	%eax, %eax
1550	movw	PORT(%esp), %dx
1551	D16 inl	(%dx)
1552	ret
1553	SET_SIZE(inw)
1554
1555#endif	/* __i386 */
1556#endif	/* __lint */
1557
1558
1559#if defined(__lint)
1560
1561/* ARGSUSED */
1562uint8_t
1563inb(int port_address)
1564{ return (0); }
1565
1566#else	/* __lint */
1567
1568#if defined(__amd64)
1569
1570	ENTRY(inb)
1571	xorl	%eax, %eax
1572	movw	%di, %dx
1573	inb	(%dx)
1574	ret
1575	SET_SIZE(inb)
1576
1577#elif defined(__i386)
1578
1579	ENTRY(inb)
1580	subl    %eax, %eax
1581	movw	PORT(%esp), %dx
1582	inb	(%dx)
1583	ret
1584	SET_SIZE(inb)
1585
1586#endif	/* __i386 */
1587#endif	/* __lint */
1588
1589
1590#if defined(__lint)
1591
1592/* ARGSUSED */
1593void
1594repoutsw(int port, uint16_t *addr, int cnt)
1595{}
1596
1597#else	/* __lint */
1598
1599#if defined(__amd64)
1600
1601	ENTRY(repoutsw)
1602	movl	%edx, %ecx
1603	movw	%di, %dx
1604	rep
1605	  D16 outsl
1606	ret
1607	SET_SIZE(repoutsw)
1608
1609#elif defined(__i386)
1610
1611	/*
1612	 * The arguments and saved registers are on the stack in the
1613	 *  following order:
1614	 *      |  cnt  |  +16
1615	 *      | *addr |  +12
1616	 *      | port  |  +8
1617	 *      |  eip  |  +4
1618	 *      |  esi  |  <-- %esp
1619	 * If additional values are pushed onto the stack, make sure
1620	 * to adjust the following constants accordingly.
1621	 */
1622	.set	PORT, 8
1623	.set	ADDR, 12
1624	.set	COUNT, 16
1625
1626	ENTRY(repoutsw)
1627	pushl	%esi
1628	movl	PORT(%esp), %edx
1629	movl	ADDR(%esp), %esi
1630	movl	COUNT(%esp), %ecx
1631	rep
1632	  D16 outsl
1633	popl	%esi
1634	ret
1635	SET_SIZE(repoutsw)
1636
1637#endif	/* __i386 */
1638#endif	/* __lint */
1639
1640
1641#if defined(__lint)
1642
1643/* ARGSUSED */
1644void
1645repinsw(int port_addr, uint16_t *addr, int cnt)
1646{}
1647
1648#else	/* __lint */
1649
1650#if defined(__amd64)
1651
1652	ENTRY(repinsw)
1653	movl	%edx, %ecx
1654	movw	%di, %dx
1655	rep
1656	  D16 insl
1657	ret
1658	SET_SIZE(repinsw)
1659
1660#elif defined(__i386)
1661
1662	ENTRY(repinsw)
1663	pushl	%edi
1664	movl	PORT(%esp), %edx
1665	movl	ADDR(%esp), %edi
1666	movl	COUNT(%esp), %ecx
1667	rep
1668	  D16 insl
1669	popl	%edi
1670	ret
1671	SET_SIZE(repinsw)
1672
1673#endif	/* __i386 */
1674#endif	/* __lint */
1675
1676
1677#if defined(__lint)
1678
1679/* ARGSUSED */
1680void
1681repinsb(int port, uint8_t *addr, int count)
1682{}
1683
1684#else	/* __lint */
1685
1686#if defined(__amd64)
1687
1688	ENTRY(repinsb)
1689	movl	%edx, %ecx
1690	movw	%di, %dx
1691	movq	%rsi, %rdi
1692	rep
1693	  insb
1694	ret
1695	SET_SIZE(repinsb)
1696
1697#elif defined(__i386)
1698
1699	/*
1700	 * The arguments and saved registers are on the stack in the
1701	 *  following order:
1702	 *      |  cnt  |  +16
1703	 *      | *addr |  +12
1704	 *      | port  |  +8
1705	 *      |  eip  |  +4
1706	 *      |  esi  |  <-- %esp
1707	 * If additional values are pushed onto the stack, make sure
1708	 * to adjust the following constants accordingly.
1709	 */
1710	.set	IO_PORT, 8
1711	.set	IO_ADDR, 12
1712	.set	IO_COUNT, 16
1713
1714	ENTRY(repinsb)
1715	pushl	%edi
1716	movl	IO_ADDR(%esp), %edi
1717	movl	IO_COUNT(%esp), %ecx
1718	movl	IO_PORT(%esp), %edx
1719	rep
1720	  insb
1721	popl	%edi
1722	ret
1723	SET_SIZE(repinsb)
1724
1725#endif	/* __i386 */
1726#endif	/* __lint */
1727
1728
1729/*
1730 * Input a stream of 32-bit words.
1731 * NOTE: count is a DWORD count.
1732 */
1733#if defined(__lint)
1734
1735/* ARGSUSED */
1736void
1737repinsd(int port, uint32_t *addr, int count)
1738{}
1739
1740#else	/* __lint */
1741
1742#if defined(__amd64)
1743
1744	ENTRY(repinsd)
1745	movl	%edx, %ecx
1746	movw	%di, %dx
1747	movq	%rsi, %rdi
1748	rep
1749	  insl
1750	ret
1751	SET_SIZE(repinsd)
1752
1753#elif defined(__i386)
1754
1755	ENTRY(repinsd)
1756	pushl	%edi
1757	movl	IO_ADDR(%esp), %edi
1758	movl	IO_COUNT(%esp), %ecx
1759	movl	IO_PORT(%esp), %edx
1760	rep
1761	  insl
1762	popl	%edi
1763	ret
1764	SET_SIZE(repinsd)
1765
1766#endif	/* __i386 */
1767#endif	/* __lint */
1768
1769/*
1770 * Output a stream of bytes
1771 * NOTE: count is a byte count
1772 */
1773#if defined(__lint)
1774
1775/* ARGSUSED */
1776void
1777repoutsb(int port, uint8_t *addr, int count)
1778{}
1779
1780#else	/* __lint */
1781
1782#if defined(__amd64)
1783
1784	ENTRY(repoutsb)
1785	movl	%edx, %ecx
1786	movw	%di, %dx
1787	rep
1788	  outsb
1789	ret
1790	SET_SIZE(repoutsb)
1791
1792#elif defined(__i386)
1793
1794	ENTRY(repoutsb)
1795	pushl	%esi
1796	movl	IO_ADDR(%esp), %esi
1797	movl	IO_COUNT(%esp), %ecx
1798	movl	IO_PORT(%esp), %edx
1799	rep
1800	  outsb
1801	popl	%esi
1802	ret
1803	SET_SIZE(repoutsb)
1804
1805#endif	/* __i386 */
1806#endif	/* __lint */
1807
1808/*
1809 * Output a stream of 32-bit words
1810 * NOTE: count is a DWORD count
1811 */
1812#if defined(__lint)
1813
1814/* ARGSUSED */
1815void
1816repoutsd(int port, uint32_t *addr, int count)
1817{}
1818
1819#else	/* __lint */
1820
1821#if defined(__amd64)
1822
1823	ENTRY(repoutsd)
1824	movl	%edx, %ecx
1825	movw	%di, %dx
1826	rep
1827	  outsl
1828	ret
1829	SET_SIZE(repoutsd)
1830
1831#elif defined(__i386)
1832
1833	ENTRY(repoutsd)
1834	pushl	%esi
1835	movl	IO_ADDR(%esp), %esi
1836	movl	IO_COUNT(%esp), %ecx
1837	movl	IO_PORT(%esp), %edx
1838	rep
1839	  outsl
1840	popl	%esi
1841	ret
1842	SET_SIZE(repoutsd)
1843
1844#endif	/* __i386 */
1845#endif	/* __lint */
1846
1847/*
1848 * void int3(void)
1849 * void int18(void)
1850 * void int20(void)
1851 * void int_cmci(void)
1852 */
1853
1854#if defined(__lint)
1855
1856void
1857int3(void)
1858{}
1859
1860void
1861int18(void)
1862{}
1863
1864void
1865int20(void)
1866{}
1867
1868void
1869int_cmci(void)
1870{}
1871
1872#else	/* __lint */
1873
1874	ENTRY(int3)
1875	int	$T_BPTFLT
1876	ret
1877	SET_SIZE(int3)
1878
1879	ENTRY(int18)
1880	int	$T_MCE
1881	ret
1882	SET_SIZE(int18)
1883
1884	ENTRY(int20)
1885	movl	boothowto, %eax
1886	andl	$RB_DEBUG, %eax
1887	jz	1f
1888
1889	int	$T_DBGENTR
18901:
1891	rep;	ret	/* use 2 byte return instruction when branch target */
1892			/* AMD Software Optimization Guide - Section 6.2 */
1893	SET_SIZE(int20)
1894
1895	ENTRY(int_cmci)
1896	int	$T_ENOEXTFLT
1897	ret
1898	SET_SIZE(int_cmci)
1899
1900#endif	/* __lint */
1901
1902#if defined(__lint)
1903
1904/* ARGSUSED */
1905int
1906scanc(size_t size, uchar_t *cp, uchar_t *table, uchar_t mask)
1907{ return (0); }
1908
1909#else	/* __lint */
1910
1911#if defined(__amd64)
1912
1913	ENTRY(scanc)
1914					/* rdi == size */
1915					/* rsi == cp */
1916					/* rdx == table */
1917					/* rcx == mask */
1918	addq	%rsi, %rdi		/* end = &cp[size] */
1919.scanloop:
1920	cmpq	%rdi, %rsi		/* while (cp < end */
1921	jnb	.scandone
1922	movzbq	(%rsi), %r8		/* %r8 = *cp */
1923	incq	%rsi			/* cp++ */
1924	testb	%cl, (%r8, %rdx)
1925	jz	.scanloop		/*  && (table[*cp] & mask) == 0) */
1926	decq	%rsi			/* (fix post-increment) */
1927.scandone:
1928	movl	%edi, %eax
1929	subl	%esi, %eax		/* return (end - cp) */
1930	ret
1931	SET_SIZE(scanc)
1932
1933#elif defined(__i386)
1934
1935	ENTRY(scanc)
1936	pushl	%edi
1937	pushl	%esi
1938	movb	24(%esp), %cl		/* mask = %cl */
1939	movl	16(%esp), %esi		/* cp = %esi */
1940	movl	20(%esp), %edx		/* table = %edx */
1941	movl	%esi, %edi
1942	addl	12(%esp), %edi		/* end = &cp[size]; */
1943.scanloop:
1944	cmpl	%edi, %esi		/* while (cp < end */
1945	jnb	.scandone
1946	movzbl	(%esi),  %eax		/* %al = *cp */
1947	incl	%esi			/* cp++ */
1948	movb	(%edx,  %eax), %al	/* %al = table[*cp] */
1949	testb	%al, %cl
1950	jz	.scanloop		/*   && (table[*cp] & mask) == 0) */
1951	dec	%esi			/* post-incremented */
1952.scandone:
1953	movl	%edi, %eax
1954	subl	%esi, %eax		/* return (end - cp) */
1955	popl	%esi
1956	popl	%edi
1957	ret
1958	SET_SIZE(scanc)
1959
1960#endif	/* __i386 */
1961#endif	/* __lint */
1962
1963/*
1964 * Replacement functions for ones that are normally inlined.
1965 * In addition to the copy in i86.il, they are defined here just in case.
1966 */
1967
1968#if defined(__lint)
1969
1970ulong_t
1971intr_clear(void)
1972{ return (0); }
1973
1974ulong_t
1975clear_int_flag(void)
1976{ return (0); }
1977
1978#else	/* __lint */
1979
1980#if defined(__amd64)
1981
1982	ENTRY(intr_clear)
1983	ENTRY(clear_int_flag)
1984	pushfq
1985	popq	%rax
1986#if defined(__xpv)
1987	leaq	xpv_panicking, %rdi
1988	movl	(%rdi), %edi
1989	cmpl	$0, %edi
1990	jne	2f
1991	CLIRET(%rdi, %dl)	/* returns event mask in %dl */
1992	/*
1993	 * Synthesize the PS_IE bit from the event mask bit
1994	 */
1995	andq    $_BITNOT(PS_IE), %rax
1996	testb	$1, %dl
1997	jnz	1f
1998	orq	$PS_IE, %rax
19991:
2000	ret
20012:
2002#endif
2003	CLI(%rdi)
2004	ret
2005	SET_SIZE(clear_int_flag)
2006	SET_SIZE(intr_clear)
2007
2008#elif defined(__i386)
2009
2010	ENTRY(intr_clear)
2011	ENTRY(clear_int_flag)
2012	pushfl
2013	popl	%eax
2014#if defined(__xpv)
2015	leal	xpv_panicking, %edx
2016	movl	(%edx), %edx
2017	cmpl	$0, %edx
2018	jne	2f
2019	CLIRET(%edx, %cl)	/* returns event mask in %cl */
2020	/*
2021	 * Synthesize the PS_IE bit from the event mask bit
2022	 */
2023	andl    $_BITNOT(PS_IE), %eax
2024	testb	$1, %cl
2025	jnz	1f
2026	orl	$PS_IE, %eax
20271:
2028	ret
20292:
2030#endif
2031	CLI(%edx)
2032	ret
2033	SET_SIZE(clear_int_flag)
2034	SET_SIZE(intr_clear)
2035
2036#endif	/* __i386 */
2037#endif	/* __lint */
2038
2039#if defined(__lint)
2040
2041struct cpu *
2042curcpup(void)
2043{ return 0; }
2044
2045#else	/* __lint */
2046
2047#if defined(__amd64)
2048
2049	ENTRY(curcpup)
2050	movq	%gs:CPU_SELF, %rax
2051	ret
2052	SET_SIZE(curcpup)
2053
2054#elif defined(__i386)
2055
2056	ENTRY(curcpup)
2057	movl	%gs:CPU_SELF, %eax
2058	ret
2059	SET_SIZE(curcpup)
2060
2061#endif	/* __i386 */
2062#endif	/* __lint */
2063
2064/* htonll(), ntohll(), htonl(), ntohl(), htons(), ntohs()
2065 * These functions reverse the byte order of the input parameter and returns
2066 * the result.  This is to convert the byte order from host byte order
2067 * (little endian) to network byte order (big endian), or vice versa.
2068 */
2069
2070#if defined(__lint)
2071
2072uint64_t
2073htonll(uint64_t i)
2074{ return (i); }
2075
2076uint64_t
2077ntohll(uint64_t i)
2078{ return (i); }
2079
2080uint32_t
2081htonl(uint32_t i)
2082{ return (i); }
2083
2084uint32_t
2085ntohl(uint32_t i)
2086{ return (i); }
2087
2088uint16_t
2089htons(uint16_t i)
2090{ return (i); }
2091
2092uint16_t
2093ntohs(uint16_t i)
2094{ return (i); }
2095
2096#else	/* __lint */
2097
2098#if defined(__amd64)
2099
2100	ENTRY(htonll)
2101	ALTENTRY(ntohll)
2102	movq	%rdi, %rax
2103	bswapq	%rax
2104	ret
2105	SET_SIZE(ntohll)
2106	SET_SIZE(htonll)
2107
2108	/* XX64 there must be shorter sequences for this */
2109	ENTRY(htonl)
2110	ALTENTRY(ntohl)
2111	movl	%edi, %eax
2112	bswap	%eax
2113	ret
2114	SET_SIZE(ntohl)
2115	SET_SIZE(htonl)
2116
2117	/* XX64 there must be better sequences for this */
2118	ENTRY(htons)
2119	ALTENTRY(ntohs)
2120	movl	%edi, %eax
2121	bswap	%eax
2122	shrl	$16, %eax
2123	ret
2124	SET_SIZE(ntohs)
2125	SET_SIZE(htons)
2126
2127#elif defined(__i386)
2128
2129	ENTRY(htonll)
2130	ALTENTRY(ntohll)
2131	movl	4(%esp), %edx
2132	movl	8(%esp), %eax
2133	bswap	%edx
2134	bswap	%eax
2135	ret
2136	SET_SIZE(ntohll)
2137	SET_SIZE(htonll)
2138
2139	ENTRY(htonl)
2140	ALTENTRY(ntohl)
2141	movl	4(%esp), %eax
2142	bswap	%eax
2143	ret
2144	SET_SIZE(ntohl)
2145	SET_SIZE(htonl)
2146
2147	ENTRY(htons)
2148	ALTENTRY(ntohs)
2149	movl	4(%esp), %eax
2150	bswap	%eax
2151	shrl	$16, %eax
2152	ret
2153	SET_SIZE(ntohs)
2154	SET_SIZE(htons)
2155
2156#endif	/* __i386 */
2157#endif	/* __lint */
2158
2159
2160#if defined(__lint)
2161
2162/* ARGSUSED */
2163void
2164intr_restore(ulong_t i)
2165{ return; }
2166
2167/* ARGSUSED */
2168void
2169restore_int_flag(ulong_t i)
2170{ return; }
2171
2172#else	/* __lint */
2173
2174#if defined(__amd64)
2175
2176	ENTRY(intr_restore)
2177	ENTRY(restore_int_flag)
2178	testq	$PS_IE, %rdi
2179	jz	1f
2180#if defined(__xpv)
2181	leaq	xpv_panicking, %rsi
2182	movl	(%rsi), %esi
2183	cmpl	$0, %esi
2184	jne	1f
2185	/*
2186	 * Since we're -really- running unprivileged, our attempt
2187	 * to change the state of the IF bit will be ignored.
2188	 * The virtual IF bit is tweaked by CLI and STI.
2189	 */
2190	IE_TO_EVENT_MASK(%rsi, %rdi)
2191#else
2192	sti
2193#endif
21941:
2195	ret
2196	SET_SIZE(restore_int_flag)
2197	SET_SIZE(intr_restore)
2198
2199#elif defined(__i386)
2200
2201	ENTRY(intr_restore)
2202	ENTRY(restore_int_flag)
2203	testl	$PS_IE, 4(%esp)
2204	jz	1f
2205#if defined(__xpv)
2206	leal	xpv_panicking, %edx
2207	movl	(%edx), %edx
2208	cmpl	$0, %edx
2209	jne	1f
2210	/*
2211	 * Since we're -really- running unprivileged, our attempt
2212	 * to change the state of the IF bit will be ignored.
2213	 * The virtual IF bit is tweaked by CLI and STI.
2214	 */
2215	IE_TO_EVENT_MASK(%edx, 4(%esp))
2216#else
2217	sti
2218#endif
22191:
2220	ret
2221	SET_SIZE(restore_int_flag)
2222	SET_SIZE(intr_restore)
2223
2224#endif	/* __i386 */
2225#endif	/* __lint */
2226
2227#if defined(__lint)
2228
2229void
2230sti(void)
2231{}
2232
2233void
2234cli(void)
2235{}
2236
2237#else	/* __lint */
2238
2239	ENTRY(sti)
2240	STI
2241	ret
2242	SET_SIZE(sti)
2243
2244	ENTRY(cli)
2245#if defined(__amd64)
2246	CLI(%rax)
2247#elif defined(__i386)
2248	CLI(%eax)
2249#endif	/* __i386 */
2250	ret
2251	SET_SIZE(cli)
2252
2253#endif	/* __lint */
2254
2255#if defined(__lint)
2256
2257dtrace_icookie_t
2258dtrace_interrupt_disable(void)
2259{ return (0); }
2260
2261#else   /* __lint */
2262
2263#if defined(__amd64)
2264
2265	ENTRY(dtrace_interrupt_disable)
2266	pushfq
2267	popq	%rax
2268#if defined(__xpv)
2269	leaq	xpv_panicking, %rdi
2270	movl	(%rdi), %edi
2271	cmpl	$0, %edi
2272	jne	.dtrace_interrupt_disable_done
2273	CLIRET(%rdi, %dl)	/* returns event mask in %dl */
2274	/*
2275	 * Synthesize the PS_IE bit from the event mask bit
2276	 */
2277	andq    $_BITNOT(PS_IE), %rax
2278	testb	$1, %dl
2279	jnz	.dtrace_interrupt_disable_done
2280	orq	$PS_IE, %rax
2281#else
2282	CLI(%rdx)
2283#endif
2284.dtrace_interrupt_disable_done:
2285	ret
2286	SET_SIZE(dtrace_interrupt_disable)
2287
2288#elif defined(__i386)
2289
2290	ENTRY(dtrace_interrupt_disable)
2291	pushfl
2292	popl	%eax
2293#if defined(__xpv)
2294	leal	xpv_panicking, %edx
2295	movl	(%edx), %edx
2296	cmpl	$0, %edx
2297	jne	.dtrace_interrupt_disable_done
2298	CLIRET(%edx, %cl)	/* returns event mask in %cl */
2299	/*
2300	 * Synthesize the PS_IE bit from the event mask bit
2301	 */
2302	andl    $_BITNOT(PS_IE), %eax
2303	testb	$1, %cl
2304	jnz	.dtrace_interrupt_disable_done
2305	orl	$PS_IE, %eax
2306#else
2307	CLI(%edx)
2308#endif
2309.dtrace_interrupt_disable_done:
2310	ret
2311	SET_SIZE(dtrace_interrupt_disable)
2312
2313#endif	/* __i386 */
2314#endif	/* __lint */
2315
2316#if defined(__lint)
2317
2318/*ARGSUSED*/
2319void
2320dtrace_interrupt_enable(dtrace_icookie_t cookie)
2321{}
2322
2323#else	/* __lint */
2324
2325#if defined(__amd64)
2326
2327	ENTRY(dtrace_interrupt_enable)
2328	pushq	%rdi
2329	popfq
2330#if defined(__xpv)
2331	leaq	xpv_panicking, %rdx
2332	movl	(%rdx), %edx
2333	cmpl	$0, %edx
2334	jne	.dtrace_interrupt_enable_done
2335	/*
2336	 * Since we're -really- running unprivileged, our attempt
2337	 * to change the state of the IF bit will be ignored. The
2338	 * virtual IF bit is tweaked by CLI and STI.
2339	 */
2340	IE_TO_EVENT_MASK(%rdx, %rdi)
2341#endif
2342.dtrace_interrupt_enable_done:
2343	ret
2344	SET_SIZE(dtrace_interrupt_enable)
2345
2346#elif defined(__i386)
2347
2348	ENTRY(dtrace_interrupt_enable)
2349	movl	4(%esp), %eax
2350	pushl	%eax
2351	popfl
2352#if defined(__xpv)
2353	leal	xpv_panicking, %edx
2354	movl	(%edx), %edx
2355	cmpl	$0, %edx
2356	jne	.dtrace_interrupt_enable_done
2357	/*
2358	 * Since we're -really- running unprivileged, our attempt
2359	 * to change the state of the IF bit will be ignored. The
2360	 * virtual IF bit is tweaked by CLI and STI.
2361	 */
2362	IE_TO_EVENT_MASK(%edx, %eax)
2363#endif
2364.dtrace_interrupt_enable_done:
2365	ret
2366	SET_SIZE(dtrace_interrupt_enable)
2367
2368#endif	/* __i386 */
2369#endif	/* __lint */
2370
2371
2372#if defined(lint)
2373
2374void
2375dtrace_membar_producer(void)
2376{}
2377
2378void
2379dtrace_membar_consumer(void)
2380{}
2381
2382#else	/* __lint */
2383
2384	ENTRY(dtrace_membar_producer)
2385	rep;	ret	/* use 2 byte return instruction when branch target */
2386			/* AMD Software Optimization Guide - Section 6.2 */
2387	SET_SIZE(dtrace_membar_producer)
2388
2389	ENTRY(dtrace_membar_consumer)
2390	rep;	ret	/* use 2 byte return instruction when branch target */
2391			/* AMD Software Optimization Guide - Section 6.2 */
2392	SET_SIZE(dtrace_membar_consumer)
2393
2394#endif	/* __lint */
2395
2396#if defined(__lint)
2397
2398kthread_id_t
2399threadp(void)
2400{ return ((kthread_id_t)0); }
2401
2402#else	/* __lint */
2403
2404#if defined(__amd64)
2405
2406	ENTRY(threadp)
2407	movq	%gs:CPU_THREAD, %rax
2408	ret
2409	SET_SIZE(threadp)
2410
2411#elif defined(__i386)
2412
2413	ENTRY(threadp)
2414	movl	%gs:CPU_THREAD, %eax
2415	ret
2416	SET_SIZE(threadp)
2417
2418#endif	/* __i386 */
2419#endif	/* __lint */
2420
2421/*
2422 *   Checksum routine for Internet Protocol Headers
2423 */
2424
2425#if defined(__lint)
2426
2427/* ARGSUSED */
2428unsigned int
2429ip_ocsum(
2430	ushort_t *address,	/* ptr to 1st message buffer */
2431	int halfword_count,	/* length of data */
2432	unsigned int sum)	/* partial checksum */
2433{
2434	int		i;
2435	unsigned int	psum = 0;	/* partial sum */
2436
2437	for (i = 0; i < halfword_count; i++, address++) {
2438		psum += *address;
2439	}
2440
2441	while ((psum >> 16) != 0) {
2442		psum = (psum & 0xffff) + (psum >> 16);
2443	}
2444
2445	psum += sum;
2446
2447	while ((psum >> 16) != 0) {
2448		psum = (psum & 0xffff) + (psum >> 16);
2449	}
2450
2451	return (psum);
2452}
2453
2454#else	/* __lint */
2455
2456#if defined(__amd64)
2457
2458	ENTRY(ip_ocsum)
2459	pushq	%rbp
2460	movq	%rsp, %rbp
2461#ifdef DEBUG
2462	movq	postbootkernelbase(%rip), %rax
2463	cmpq	%rax, %rdi
2464	jnb	1f
2465	xorl	%eax, %eax
2466	movq	%rdi, %rsi
2467	leaq	.ip_ocsum_panic_msg(%rip), %rdi
2468	call	panic
2469	/*NOTREACHED*/
2470.ip_ocsum_panic_msg:
2471	.string	"ip_ocsum: address 0x%p below kernelbase\n"
24721:
2473#endif
2474	movl	%esi, %ecx	/* halfword_count */
2475	movq	%rdi, %rsi	/* address */
2476				/* partial sum in %edx */
2477	xorl	%eax, %eax
2478	testl	%ecx, %ecx
2479	jz	.ip_ocsum_done
2480	testq	$3, %rsi
2481	jnz	.ip_csum_notaligned
2482.ip_csum_aligned:	/* XX64 opportunities for 8-byte operations? */
2483.next_iter:
2484	/* XX64 opportunities for prefetch? */
2485	/* XX64 compute csum with 64 bit quantities? */
2486	subl	$32, %ecx
2487	jl	.less_than_32
2488
2489	addl	0(%rsi), %edx
2490.only60:
2491	adcl	4(%rsi), %eax
2492.only56:
2493	adcl	8(%rsi), %edx
2494.only52:
2495	adcl	12(%rsi), %eax
2496.only48:
2497	adcl	16(%rsi), %edx
2498.only44:
2499	adcl	20(%rsi), %eax
2500.only40:
2501	adcl	24(%rsi), %edx
2502.only36:
2503	adcl	28(%rsi), %eax
2504.only32:
2505	adcl	32(%rsi), %edx
2506.only28:
2507	adcl	36(%rsi), %eax
2508.only24:
2509	adcl	40(%rsi), %edx
2510.only20:
2511	adcl	44(%rsi), %eax
2512.only16:
2513	adcl	48(%rsi), %edx
2514.only12:
2515	adcl	52(%rsi), %eax
2516.only8:
2517	adcl	56(%rsi), %edx
2518.only4:
2519	adcl	60(%rsi), %eax	/* could be adding -1 and -1 with a carry */
2520.only0:
2521	adcl	$0, %eax	/* could be adding -1 in eax with a carry */
2522	adcl	$0, %eax
2523
2524	addq	$64, %rsi
2525	testl	%ecx, %ecx
2526	jnz	.next_iter
2527
2528.ip_ocsum_done:
2529	addl	%eax, %edx
2530	adcl	$0, %edx
2531	movl	%edx, %eax	/* form a 16 bit checksum by */
2532	shrl	$16, %eax	/* adding two halves of 32 bit checksum */
2533	addw	%dx, %ax
2534	adcw	$0, %ax
2535	andl	$0xffff, %eax
2536	leave
2537	ret
2538
2539.ip_csum_notaligned:
2540	xorl	%edi, %edi
2541	movw	(%rsi), %di
2542	addl	%edi, %edx
2543	adcl	$0, %edx
2544	addq	$2, %rsi
2545	decl	%ecx
2546	jmp	.ip_csum_aligned
2547
2548.less_than_32:
2549	addl	$32, %ecx
2550	testl	$1, %ecx
2551	jz	.size_aligned
2552	andl	$0xfe, %ecx
2553	movzwl	(%rsi, %rcx, 2), %edi
2554	addl	%edi, %edx
2555	adcl	$0, %edx
2556.size_aligned:
2557	movl	%ecx, %edi
2558	shrl	$1, %ecx
2559	shl	$1, %edi
2560	subq	$64, %rdi
2561	addq	%rdi, %rsi
2562	leaq    .ip_ocsum_jmptbl(%rip), %rdi
2563	leaq	(%rdi, %rcx, 8), %rdi
2564	xorl	%ecx, %ecx
2565	clc
2566	jmp 	*(%rdi)
2567
2568	.align	8
2569.ip_ocsum_jmptbl:
2570	.quad	.only0, .only4, .only8, .only12, .only16, .only20
2571	.quad	.only24, .only28, .only32, .only36, .only40, .only44
2572	.quad	.only48, .only52, .only56, .only60
2573	SET_SIZE(ip_ocsum)
2574
2575#elif defined(__i386)
2576
2577	ENTRY(ip_ocsum)
2578	pushl	%ebp
2579	movl	%esp, %ebp
2580	pushl	%ebx
2581	pushl	%esi
2582	pushl	%edi
2583	movl	12(%ebp), %ecx	/* count of half words */
2584	movl	16(%ebp), %edx	/* partial checksum */
2585	movl	8(%ebp), %esi
2586	xorl	%eax, %eax
2587	testl	%ecx, %ecx
2588	jz	.ip_ocsum_done
2589
2590	testl	$3, %esi
2591	jnz	.ip_csum_notaligned
2592.ip_csum_aligned:
2593.next_iter:
2594	subl	$32, %ecx
2595	jl	.less_than_32
2596
2597	addl	0(%esi), %edx
2598.only60:
2599	adcl	4(%esi), %eax
2600.only56:
2601	adcl	8(%esi), %edx
2602.only52:
2603	adcl	12(%esi), %eax
2604.only48:
2605	adcl	16(%esi), %edx
2606.only44:
2607	adcl	20(%esi), %eax
2608.only40:
2609	adcl	24(%esi), %edx
2610.only36:
2611	adcl	28(%esi), %eax
2612.only32:
2613	adcl	32(%esi), %edx
2614.only28:
2615	adcl	36(%esi), %eax
2616.only24:
2617	adcl	40(%esi), %edx
2618.only20:
2619	adcl	44(%esi), %eax
2620.only16:
2621	adcl	48(%esi), %edx
2622.only12:
2623	adcl	52(%esi), %eax
2624.only8:
2625	adcl	56(%esi), %edx
2626.only4:
2627	adcl	60(%esi), %eax	/* We could be adding -1 and -1 with a carry */
2628.only0:
2629	adcl	$0, %eax	/* we could be adding -1 in eax with a carry */
2630	adcl	$0, %eax
2631
2632	addl	$64, %esi
2633	andl	%ecx, %ecx
2634	jnz	.next_iter
2635
2636.ip_ocsum_done:
2637	addl	%eax, %edx
2638	adcl	$0, %edx
2639	movl	%edx, %eax	/* form a 16 bit checksum by */
2640	shrl	$16, %eax	/* adding two halves of 32 bit checksum */
2641	addw	%dx, %ax
2642	adcw	$0, %ax
2643	andl	$0xffff, %eax
2644	popl	%edi		/* restore registers */
2645	popl	%esi
2646	popl	%ebx
2647	leave
2648	ret
2649
2650.ip_csum_notaligned:
2651	xorl	%edi, %edi
2652	movw	(%esi), %di
2653	addl	%edi, %edx
2654	adcl	$0, %edx
2655	addl	$2, %esi
2656	decl	%ecx
2657	jmp	.ip_csum_aligned
2658
2659.less_than_32:
2660	addl	$32, %ecx
2661	testl	$1, %ecx
2662	jz	.size_aligned
2663	andl	$0xfe, %ecx
2664	movzwl	(%esi, %ecx, 2), %edi
2665	addl	%edi, %edx
2666	adcl	$0, %edx
2667.size_aligned:
2668	movl	%ecx, %edi
2669	shrl	$1, %ecx
2670	shl	$1, %edi
2671	subl	$64, %edi
2672	addl	%edi, %esi
2673	movl	$.ip_ocsum_jmptbl, %edi
2674	lea	(%edi, %ecx, 4), %edi
2675	xorl	%ecx, %ecx
2676	clc
2677	jmp 	*(%edi)
2678	SET_SIZE(ip_ocsum)
2679
2680	.data
2681	.align	4
2682
2683.ip_ocsum_jmptbl:
2684	.long	.only0, .only4, .only8, .only12, .only16, .only20
2685	.long	.only24, .only28, .only32, .only36, .only40, .only44
2686	.long	.only48, .only52, .only56, .only60
2687
2688
2689#endif	/* __i386 */
2690#endif	/* __lint */
2691
2692/*
2693 * multiply two long numbers and yield a u_longlong_t result, callable from C.
2694 * Provided to manipulate hrtime_t values.
2695 */
2696#if defined(__lint)
2697
2698/* result = a * b; */
2699
2700/* ARGSUSED */
2701unsigned long long
2702mul32(uint_t a, uint_t b)
2703{ return (0); }
2704
2705#else	/* __lint */
2706
2707#if defined(__amd64)
2708
2709	ENTRY(mul32)
2710	xorl	%edx, %edx	/* XX64 joe, paranoia? */
2711	movl	%edi, %eax
2712	mull	%esi
2713	shlq	$32, %rdx
2714	orq	%rdx, %rax
2715	ret
2716	SET_SIZE(mul32)
2717
2718#elif defined(__i386)
2719
2720	ENTRY(mul32)
2721	movl	8(%esp), %eax
2722	movl	4(%esp), %ecx
2723	mull	%ecx
2724	ret
2725	SET_SIZE(mul32)
2726
2727#endif	/* __i386 */
2728#endif	/* __lint */
2729
2730#if defined(notused)
2731#if defined(__lint)
2732/* ARGSUSED */
2733void
2734load_pte64(uint64_t *pte, uint64_t pte_value)
2735{}
2736#else	/* __lint */
2737	.globl load_pte64
2738load_pte64:
2739	movl	4(%esp), %eax
2740	movl	8(%esp), %ecx
2741	movl	12(%esp), %edx
2742	movl	%edx, 4(%eax)
2743	movl	%ecx, (%eax)
2744	ret
2745#endif	/* __lint */
2746#endif	/* notused */
2747
2748#if defined(__lint)
2749
2750/*ARGSUSED*/
2751void
2752scan_memory(caddr_t addr, size_t size)
2753{}
2754
2755#else	/* __lint */
2756
2757#if defined(__amd64)
2758
2759	ENTRY(scan_memory)
2760	shrq	$3, %rsi	/* convert %rsi from byte to quadword count */
2761	jz	.scanm_done
2762	movq	%rsi, %rcx	/* move count into rep control register */
2763	movq	%rdi, %rsi	/* move addr into lodsq control reg. */
2764	rep lodsq		/* scan the memory range */
2765.scanm_done:
2766	rep;	ret	/* use 2 byte return instruction when branch target */
2767			/* AMD Software Optimization Guide - Section 6.2 */
2768	SET_SIZE(scan_memory)
2769
2770#elif defined(__i386)
2771
2772	ENTRY(scan_memory)
2773	pushl	%ecx
2774	pushl	%esi
2775	movl	16(%esp), %ecx	/* move 2nd arg into rep control register */
2776	shrl	$2, %ecx	/* convert from byte count to word count */
2777	jz	.scanm_done
2778	movl	12(%esp), %esi	/* move 1st arg into lodsw control register */
2779	.byte	0xf3		/* rep prefix.  lame assembler.  sigh. */
2780	lodsl
2781.scanm_done:
2782	popl	%esi
2783	popl	%ecx
2784	ret
2785	SET_SIZE(scan_memory)
2786
2787#endif	/* __i386 */
2788#endif	/* __lint */
2789
2790
2791#if defined(__lint)
2792
2793/*ARGSUSED */
2794int
2795lowbit(ulong_t i)
2796{ return (0); }
2797
2798#else	/* __lint */
2799
2800#if defined(__amd64)
2801
2802	ENTRY(lowbit)
2803	movl	$-1, %eax
2804	bsfq	%rdi, %rax
2805	incl	%eax
2806	ret
2807	SET_SIZE(lowbit)
2808
2809#elif defined(__i386)
2810
2811	ENTRY(lowbit)
2812	movl	$-1, %eax
2813	bsfl	4(%esp), %eax
2814	incl	%eax
2815	ret
2816	SET_SIZE(lowbit)
2817
2818#endif	/* __i386 */
2819#endif	/* __lint */
2820
2821#if defined(__lint)
2822
2823/*ARGSUSED*/
2824int
2825highbit(ulong_t i)
2826{ return (0); }
2827
2828#else	/* __lint */
2829
2830#if defined(__amd64)
2831
2832	ENTRY(highbit)
2833	movl	$-1, %eax
2834	bsrq	%rdi, %rax
2835	incl	%eax
2836	ret
2837	SET_SIZE(highbit)
2838
2839#elif defined(__i386)
2840
2841	ENTRY(highbit)
2842	movl	$-1, %eax
2843	bsrl	4(%esp), %eax
2844	incl	%eax
2845	ret
2846	SET_SIZE(highbit)
2847
2848#endif	/* __i386 */
2849#endif	/* __lint */
2850
2851#if defined(__lint)
2852
2853/*ARGSUSED*/
2854int
2855highbit64(uint64_t i)
2856{ return (0); }
2857
2858#else	/* __lint */
2859
2860#if defined(__amd64)
2861
2862	ENTRY(highbit64)
2863	movl	$-1, %eax
2864	bsrq	%rdi, %rax
2865	incl	%eax
2866	ret
2867	SET_SIZE(highbit64)
2868
2869#elif defined(__i386)
2870
2871	ENTRY(highbit64)
2872	bsrl	8(%esp), %eax
2873	jz	.lowbit
2874	addl	$32, %eax
2875	jmp	.done
2876
2877.lowbit:
2878	movl	$-1, %eax
2879	bsrl	4(%esp), %eax
2880.done:
2881	incl	%eax
2882	ret
2883	SET_SIZE(highbit64)
2884
2885#endif	/* __i386 */
2886#endif	/* __lint */
2887
2888#if defined(__lint)
2889
2890/*ARGSUSED*/
2891uint64_t
2892rdmsr(uint_t r)
2893{ return (0); }
2894
2895/*ARGSUSED*/
2896void
2897wrmsr(uint_t r, const uint64_t val)
2898{}
2899
2900/*ARGSUSED*/
2901uint64_t
2902xrdmsr(uint_t r)
2903{ return (0); }
2904
2905/*ARGSUSED*/
2906void
2907xwrmsr(uint_t r, const uint64_t val)
2908{}
2909
2910void
2911invalidate_cache(void)
2912{}
2913
2914/*ARGSUSED*/
2915uint64_t
2916get_xcr(uint_t r)
2917{ return (0); }
2918
2919/*ARGSUSED*/
2920void
2921set_xcr(uint_t r, const uint64_t val)
2922{}
2923
2924#else  /* __lint */
2925
2926#define	XMSR_ACCESS_VAL		$0x9c5a203a
2927
2928#if defined(__amd64)
2929
2930	ENTRY(rdmsr)
2931	movl	%edi, %ecx
2932	rdmsr
2933	shlq	$32, %rdx
2934	orq	%rdx, %rax
2935	ret
2936	SET_SIZE(rdmsr)
2937
2938	ENTRY(wrmsr)
2939	movq	%rsi, %rdx
2940	shrq	$32, %rdx
2941	movl	%esi, %eax
2942	movl	%edi, %ecx
2943	wrmsr
2944	ret
2945	SET_SIZE(wrmsr)
2946
2947	ENTRY(xrdmsr)
2948	pushq	%rbp
2949	movq	%rsp, %rbp
2950	movl	%edi, %ecx
2951	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2952	rdmsr
2953	shlq	$32, %rdx
2954	orq	%rdx, %rax
2955	leave
2956	ret
2957	SET_SIZE(xrdmsr)
2958
2959	ENTRY(xwrmsr)
2960	pushq	%rbp
2961	movq	%rsp, %rbp
2962	movl	%edi, %ecx
2963	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2964	movq	%rsi, %rdx
2965	shrq	$32, %rdx
2966	movl	%esi, %eax
2967	wrmsr
2968	leave
2969	ret
2970	SET_SIZE(xwrmsr)
2971
2972	ENTRY(get_xcr)
2973	movl	%edi, %ecx
2974	#xgetbv
2975	.byte	0x0f,0x01,0xd0
2976	shlq	$32, %rdx
2977	orq	%rdx, %rax
2978	ret
2979	SET_SIZE(get_xcr)
2980
2981	ENTRY(set_xcr)
2982	movq	%rsi, %rdx
2983	shrq	$32, %rdx
2984	movl	%esi, %eax
2985	movl	%edi, %ecx
2986	#xsetbv
2987	.byte	0x0f,0x01,0xd1
2988	ret
2989	SET_SIZE(set_xcr)
2990
2991#elif defined(__i386)
2992
2993	ENTRY(rdmsr)
2994	movl	4(%esp), %ecx
2995	rdmsr
2996	ret
2997	SET_SIZE(rdmsr)
2998
2999	ENTRY(wrmsr)
3000	movl	4(%esp), %ecx
3001	movl	8(%esp), %eax
3002	movl	12(%esp), %edx
3003	wrmsr
3004	ret
3005	SET_SIZE(wrmsr)
3006
3007	ENTRY(xrdmsr)
3008	pushl	%ebp
3009	movl	%esp, %ebp
3010	movl	8(%esp), %ecx
3011	pushl	%edi
3012	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
3013	rdmsr
3014	popl	%edi
3015	leave
3016	ret
3017	SET_SIZE(xrdmsr)
3018
3019	ENTRY(xwrmsr)
3020	pushl	%ebp
3021	movl	%esp, %ebp
3022	movl	8(%esp), %ecx
3023	movl	12(%esp), %eax
3024	movl	16(%esp), %edx
3025	pushl	%edi
3026	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
3027	wrmsr
3028	popl	%edi
3029	leave
3030	ret
3031	SET_SIZE(xwrmsr)
3032
3033	ENTRY(get_xcr)
3034	movl	4(%esp), %ecx
3035	#xgetbv
3036	.byte	0x0f,0x01,0xd0
3037	ret
3038	SET_SIZE(get_xcr)
3039
3040	ENTRY(set_xcr)
3041	movl	4(%esp), %ecx
3042	movl	8(%esp), %eax
3043	movl	12(%esp), %edx
3044	#xsetbv
3045	.byte	0x0f,0x01,0xd1
3046	ret
3047	SET_SIZE(set_xcr)
3048
3049#endif	/* __i386 */
3050
3051	ENTRY(invalidate_cache)
3052	wbinvd
3053	ret
3054	SET_SIZE(invalidate_cache)
3055
3056#endif	/* __lint */
3057
3058#if defined(__lint)
3059
3060/*ARGSUSED*/
3061void
3062getcregs(struct cregs *crp)
3063{}
3064
3065#else	/* __lint */
3066
3067#if defined(__amd64)
3068
3069	ENTRY_NP(getcregs)
3070#if defined(__xpv)
3071	/*
3072	 * Only a few of the hardware control registers or descriptor tables
3073	 * are directly accessible to us, so just zero the structure.
3074	 *
3075	 * XXPV	Perhaps it would be helpful for the hypervisor to return
3076	 *	virtualized versions of these for post-mortem use.
3077	 *	(Need to reevaluate - perhaps it already does!)
3078	 */
3079	pushq	%rdi		/* save *crp */
3080	movq	$CREGSZ, %rsi
3081	call	bzero
3082	popq	%rdi
3083
3084	/*
3085	 * Dump what limited information we can
3086	 */
3087	movq	%cr0, %rax
3088	movq	%rax, CREG_CR0(%rdi)	/* cr0 */
3089	movq	%cr2, %rax
3090	movq	%rax, CREG_CR2(%rdi)	/* cr2 */
3091	movq	%cr3, %rax
3092	movq	%rax, CREG_CR3(%rdi)	/* cr3 */
3093	movq	%cr4, %rax
3094	movq	%rax, CREG_CR4(%rdi)	/* cr4 */
3095
3096#else	/* __xpv */
3097
3098#define	GETMSR(r, off, d)	\
3099	movl	$r, %ecx;	\
3100	rdmsr;			\
3101	movl	%eax, off(d);	\
3102	movl	%edx, off+4(d)
3103
3104	xorl	%eax, %eax
3105	movq	%rax, CREG_GDT+8(%rdi)
3106	sgdt	CREG_GDT(%rdi)		/* 10 bytes */
3107	movq	%rax, CREG_IDT+8(%rdi)
3108	sidt	CREG_IDT(%rdi)		/* 10 bytes */
3109	movq	%rax, CREG_LDT(%rdi)
3110	sldt	CREG_LDT(%rdi)		/* 2 bytes */
3111	movq	%rax, CREG_TASKR(%rdi)
3112	str	CREG_TASKR(%rdi)	/* 2 bytes */
3113	movq	%cr0, %rax
3114	movq	%rax, CREG_CR0(%rdi)	/* cr0 */
3115	movq	%cr2, %rax
3116	movq	%rax, CREG_CR2(%rdi)	/* cr2 */
3117	movq	%cr3, %rax
3118	movq	%rax, CREG_CR3(%rdi)	/* cr3 */
3119	movq	%cr4, %rax
3120	movq	%rax, CREG_CR4(%rdi)	/* cr4 */
3121	movq	%cr8, %rax
3122	movq	%rax, CREG_CR8(%rdi)	/* cr8 */
3123	GETMSR(MSR_AMD_KGSBASE, CREG_KGSBASE, %rdi)
3124	GETMSR(MSR_AMD_EFER, CREG_EFER, %rdi)
3125#endif	/* __xpv */
3126	ret
3127	SET_SIZE(getcregs)
3128
3129#undef GETMSR
3130
3131#elif defined(__i386)
3132
3133	ENTRY_NP(getcregs)
3134#if defined(__xpv)
3135	/*
3136	 * Only a few of the hardware control registers or descriptor tables
3137	 * are directly accessible to us, so just zero the structure.
3138	 *
3139	 * XXPV	Perhaps it would be helpful for the hypervisor to return
3140	 *	virtualized versions of these for post-mortem use.
3141	 *	(Need to reevaluate - perhaps it already does!)
3142	 */
3143	movl	4(%esp), %edx
3144	pushl	$CREGSZ
3145	pushl	%edx
3146	call	bzero
3147	addl	$8, %esp
3148	movl	4(%esp), %edx
3149
3150	/*
3151	 * Dump what limited information we can
3152	 */
3153	movl	%cr0, %eax
3154	movl	%eax, CREG_CR0(%edx)	/* cr0 */
3155	movl	%cr2, %eax
3156	movl	%eax, CREG_CR2(%edx)	/* cr2 */
3157	movl	%cr3, %eax
3158	movl	%eax, CREG_CR3(%edx)	/* cr3 */
3159	movl	%cr4, %eax
3160	movl	%eax, CREG_CR4(%edx)	/* cr4 */
3161
3162#else	/* __xpv */
3163
3164	movl	4(%esp), %edx
3165	movw	$0, CREG_GDT+6(%edx)
3166	movw	$0, CREG_IDT+6(%edx)
3167	sgdt	CREG_GDT(%edx)		/* gdt */
3168	sidt	CREG_IDT(%edx)		/* idt */
3169	sldt	CREG_LDT(%edx)		/* ldt */
3170	str	CREG_TASKR(%edx)	/* task */
3171	movl	%cr0, %eax
3172	movl	%eax, CREG_CR0(%edx)	/* cr0 */
3173	movl	%cr2, %eax
3174	movl	%eax, CREG_CR2(%edx)	/* cr2 */
3175	movl	%cr3, %eax
3176	movl	%eax, CREG_CR3(%edx)	/* cr3 */
3177	bt	$X86FSET_LARGEPAGE, x86_featureset
3178	jnc	.nocr4
3179	movl	%cr4, %eax
3180	movl	%eax, CREG_CR4(%edx)	/* cr4 */
3181	jmp	.skip
3182.nocr4:
3183	movl	$0, CREG_CR4(%edx)
3184.skip:
3185#endif
3186	ret
3187	SET_SIZE(getcregs)
3188
3189#endif	/* __i386 */
3190#endif	/* __lint */
3191
3192
3193/*
3194 * A panic trigger is a word which is updated atomically and can only be set
3195 * once.  We atomically store 0xDEFACEDD and load the old value.  If the
3196 * previous value was 0, we succeed and return 1; otherwise return 0.
3197 * This allows a partially corrupt trigger to still trigger correctly.  DTrace
3198 * has its own version of this function to allow it to panic correctly from
3199 * probe context.
3200 */
3201#if defined(__lint)
3202
3203/*ARGSUSED*/
3204int
3205panic_trigger(int *tp)
3206{ return (0); }
3207
3208/*ARGSUSED*/
3209int
3210dtrace_panic_trigger(int *tp)
3211{ return (0); }
3212
3213#else	/* __lint */
3214
3215#if defined(__amd64)
3216
3217	ENTRY_NP(panic_trigger)
3218	xorl	%eax, %eax
3219	movl	$0xdefacedd, %edx
3220	lock
3221	  xchgl	%edx, (%rdi)
3222	cmpl	$0, %edx
3223	je	0f
3224	movl	$0, %eax
3225	ret
32260:	movl	$1, %eax
3227	ret
3228	SET_SIZE(panic_trigger)
3229
3230	ENTRY_NP(dtrace_panic_trigger)
3231	xorl	%eax, %eax
3232	movl	$0xdefacedd, %edx
3233	lock
3234	  xchgl	%edx, (%rdi)
3235	cmpl	$0, %edx
3236	je	0f
3237	movl	$0, %eax
3238	ret
32390:	movl	$1, %eax
3240	ret
3241	SET_SIZE(dtrace_panic_trigger)
3242
3243#elif defined(__i386)
3244
3245	ENTRY_NP(panic_trigger)
3246	movl	4(%esp), %edx		/ %edx = address of trigger
3247	movl	$0xdefacedd, %eax	/ %eax = 0xdefacedd
3248	lock				/ assert lock
3249	xchgl %eax, (%edx)		/ exchange %eax and the trigger
3250	cmpl	$0, %eax		/ if (%eax == 0x0)
3251	je	0f			/   return (1);
3252	movl	$0, %eax		/ else
3253	ret				/   return (0);
32540:	movl	$1, %eax
3255	ret
3256	SET_SIZE(panic_trigger)
3257
3258	ENTRY_NP(dtrace_panic_trigger)
3259	movl	4(%esp), %edx		/ %edx = address of trigger
3260	movl	$0xdefacedd, %eax	/ %eax = 0xdefacedd
3261	lock				/ assert lock
3262	xchgl %eax, (%edx)		/ exchange %eax and the trigger
3263	cmpl	$0, %eax		/ if (%eax == 0x0)
3264	je	0f			/   return (1);
3265	movl	$0, %eax		/ else
3266	ret				/   return (0);
32670:	movl	$1, %eax
3268	ret
3269	SET_SIZE(dtrace_panic_trigger)
3270
3271#endif	/* __i386 */
3272#endif	/* __lint */
3273
3274/*
3275 * The panic() and cmn_err() functions invoke vpanic() as a common entry point
3276 * into the panic code implemented in panicsys().  vpanic() is responsible
3277 * for passing through the format string and arguments, and constructing a
3278 * regs structure on the stack into which it saves the current register
3279 * values.  If we are not dying due to a fatal trap, these registers will
3280 * then be preserved in panicbuf as the current processor state.  Before
3281 * invoking panicsys(), vpanic() activates the first panic trigger (see
3282 * common/os/panic.c) and switches to the panic_stack if successful.  Note that
3283 * DTrace takes a slightly different panic path if it must panic from probe
3284 * context.  Instead of calling panic, it calls into dtrace_vpanic(), which
3285 * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
3286 * branches back into vpanic().
3287 */
3288#if defined(__lint)
3289
3290/*ARGSUSED*/
3291void
3292vpanic(const char *format, va_list alist)
3293{}
3294
3295/*ARGSUSED*/
3296void
3297dtrace_vpanic(const char *format, va_list alist)
3298{}
3299
3300#else	/* __lint */
3301
3302#if defined(__amd64)
3303
3304	ENTRY_NP(vpanic)			/* Initial stack layout: */
3305
3306	pushq	%rbp				/* | %rip | 	0x60	*/
3307	movq	%rsp, %rbp			/* | %rbp |	0x58	*/
3308	pushfq					/* | rfl  |	0x50	*/
3309	pushq	%r11				/* | %r11 |	0x48	*/
3310	pushq	%r10				/* | %r10 |	0x40	*/
3311	pushq	%rbx				/* | %rbx |	0x38	*/
3312	pushq	%rax				/* | %rax |	0x30	*/
3313	pushq	%r9				/* | %r9  |	0x28	*/
3314	pushq	%r8				/* | %r8  |	0x20	*/
3315	pushq	%rcx				/* | %rcx |	0x18	*/
3316	pushq	%rdx				/* | %rdx |	0x10	*/
3317	pushq	%rsi				/* | %rsi |	0x8 alist */
3318	pushq	%rdi				/* | %rdi |	0x0 format */
3319
3320	movq	%rsp, %rbx			/* %rbx = current %rsp */
3321
3322	leaq	panic_quiesce(%rip), %rdi	/* %rdi = &panic_quiesce */
3323	call	panic_trigger			/* %eax = panic_trigger() */
3324
3325vpanic_common:
3326	/*
3327	 * The panic_trigger result is in %eax from the call above, and
3328	 * dtrace_panic places it in %eax before branching here.
3329	 * The rdmsr instructions that follow below will clobber %eax so
3330	 * we stash the panic_trigger result in %r11d.
3331	 */
3332	movl	%eax, %r11d
3333	cmpl	$0, %r11d
3334	je	0f
3335
3336	/*
3337	 * If panic_trigger() was successful, we are the first to initiate a
3338	 * panic: we now switch to the reserved panic_stack before continuing.
3339	 */
3340	leaq	panic_stack(%rip), %rsp
3341	addq	$PANICSTKSIZE, %rsp
33420:	subq	$REGSIZE, %rsp
3343	/*
3344	 * Now that we've got everything set up, store the register values as
3345	 * they were when we entered vpanic() to the designated location in
3346	 * the regs structure we allocated on the stack.
3347	 */
3348	movq	0x0(%rbx), %rcx
3349	movq	%rcx, REGOFF_RDI(%rsp)
3350	movq	0x8(%rbx), %rcx
3351	movq	%rcx, REGOFF_RSI(%rsp)
3352	movq	0x10(%rbx), %rcx
3353	movq	%rcx, REGOFF_RDX(%rsp)
3354	movq	0x18(%rbx), %rcx
3355	movq	%rcx, REGOFF_RCX(%rsp)
3356	movq	0x20(%rbx), %rcx
3357
3358	movq	%rcx, REGOFF_R8(%rsp)
3359	movq	0x28(%rbx), %rcx
3360	movq	%rcx, REGOFF_R9(%rsp)
3361	movq	0x30(%rbx), %rcx
3362	movq	%rcx, REGOFF_RAX(%rsp)
3363	movq	0x38(%rbx), %rcx
3364	movq	%rcx, REGOFF_RBX(%rsp)
3365	movq	0x58(%rbx), %rcx
3366
3367	movq	%rcx, REGOFF_RBP(%rsp)
3368	movq	0x40(%rbx), %rcx
3369	movq	%rcx, REGOFF_R10(%rsp)
3370	movq	0x48(%rbx), %rcx
3371	movq	%rcx, REGOFF_R11(%rsp)
3372	movq	%r12, REGOFF_R12(%rsp)
3373
3374	movq	%r13, REGOFF_R13(%rsp)
3375	movq	%r14, REGOFF_R14(%rsp)
3376	movq	%r15, REGOFF_R15(%rsp)
3377
3378	xorl	%ecx, %ecx
3379	movw	%ds, %cx
3380	movq	%rcx, REGOFF_DS(%rsp)
3381	movw	%es, %cx
3382	movq	%rcx, REGOFF_ES(%rsp)
3383	movw	%fs, %cx
3384	movq	%rcx, REGOFF_FS(%rsp)
3385	movw	%gs, %cx
3386	movq	%rcx, REGOFF_GS(%rsp)
3387
3388	movq	$0, REGOFF_TRAPNO(%rsp)
3389
3390	movq	$0, REGOFF_ERR(%rsp)
3391	leaq	vpanic(%rip), %rcx
3392	movq	%rcx, REGOFF_RIP(%rsp)
3393	movw	%cs, %cx
3394	movzwq	%cx, %rcx
3395	movq	%rcx, REGOFF_CS(%rsp)
3396	movq	0x50(%rbx), %rcx
3397	movq	%rcx, REGOFF_RFL(%rsp)
3398	movq	%rbx, %rcx
3399	addq	$0x60, %rcx
3400	movq	%rcx, REGOFF_RSP(%rsp)
3401	movw	%ss, %cx
3402	movzwq	%cx, %rcx
3403	movq	%rcx, REGOFF_SS(%rsp)
3404
3405	/*
3406	 * panicsys(format, alist, rp, on_panic_stack)
3407	 */
3408	movq	REGOFF_RDI(%rsp), %rdi		/* format */
3409	movq	REGOFF_RSI(%rsp), %rsi		/* alist */
3410	movq	%rsp, %rdx			/* struct regs */
3411	movl	%r11d, %ecx			/* on_panic_stack */
3412	call	panicsys
3413	addq	$REGSIZE, %rsp
3414	popq	%rdi
3415	popq	%rsi
3416	popq	%rdx
3417	popq	%rcx
3418	popq	%r8
3419	popq	%r9
3420	popq	%rax
3421	popq	%rbx
3422	popq	%r10
3423	popq	%r11
3424	popfq
3425	leave
3426	ret
3427	SET_SIZE(vpanic)
3428
3429	ENTRY_NP(dtrace_vpanic)			/* Initial stack layout: */
3430
3431	pushq	%rbp				/* | %rip | 	0x60	*/
3432	movq	%rsp, %rbp			/* | %rbp |	0x58	*/
3433	pushfq					/* | rfl  |	0x50	*/
3434	pushq	%r11				/* | %r11 |	0x48	*/
3435	pushq	%r10				/* | %r10 |	0x40	*/
3436	pushq	%rbx				/* | %rbx |	0x38	*/
3437	pushq	%rax				/* | %rax |	0x30	*/
3438	pushq	%r9				/* | %r9  |	0x28	*/
3439	pushq	%r8				/* | %r8  |	0x20	*/
3440	pushq	%rcx				/* | %rcx |	0x18	*/
3441	pushq	%rdx				/* | %rdx |	0x10	*/
3442	pushq	%rsi				/* | %rsi |	0x8 alist */
3443	pushq	%rdi				/* | %rdi |	0x0 format */
3444
3445	movq	%rsp, %rbx			/* %rbx = current %rsp */
3446
3447	leaq	panic_quiesce(%rip), %rdi	/* %rdi = &panic_quiesce */
3448	call	dtrace_panic_trigger	/* %eax = dtrace_panic_trigger() */
3449	jmp	vpanic_common
3450
3451	SET_SIZE(dtrace_vpanic)
3452
3453#elif defined(__i386)
3454
3455	ENTRY_NP(vpanic)			/ Initial stack layout:
3456
3457	pushl	%ebp				/ | %eip | 20
3458	movl	%esp, %ebp			/ | %ebp | 16
3459	pushl	%eax				/ | %eax | 12
3460	pushl	%ebx				/ | %ebx |  8
3461	pushl	%ecx				/ | %ecx |  4
3462	pushl	%edx				/ | %edx |  0
3463
3464	movl	%esp, %ebx			/ %ebx = current stack pointer
3465
3466	lea	panic_quiesce, %eax		/ %eax = &panic_quiesce
3467	pushl	%eax				/ push &panic_quiesce
3468	call	panic_trigger			/ %eax = panic_trigger()
3469	addl	$4, %esp			/ reset stack pointer
3470
3471vpanic_common:
3472	cmpl	$0, %eax			/ if (%eax == 0)
3473	je	0f				/   goto 0f;
3474
3475	/*
3476	 * If panic_trigger() was successful, we are the first to initiate a
3477	 * panic: we now switch to the reserved panic_stack before continuing.
3478	 */
3479	lea	panic_stack, %esp		/ %esp  = panic_stack
3480	addl	$PANICSTKSIZE, %esp		/ %esp += PANICSTKSIZE
3481
34820:	subl	$REGSIZE, %esp			/ allocate struct regs
3483
3484	/*
3485	 * Now that we've got everything set up, store the register values as
3486	 * they were when we entered vpanic() to the designated location in
3487	 * the regs structure we allocated on the stack.
3488	 */
3489#if !defined(__GNUC_AS__)
3490	movw	%gs, %edx
3491	movl	%edx, REGOFF_GS(%esp)
3492	movw	%fs, %edx
3493	movl	%edx, REGOFF_FS(%esp)
3494	movw	%es, %edx
3495	movl	%edx, REGOFF_ES(%esp)
3496	movw	%ds, %edx
3497	movl	%edx, REGOFF_DS(%esp)
3498#else	/* __GNUC_AS__ */
3499	mov	%gs, %edx
3500	mov	%edx, REGOFF_GS(%esp)
3501	mov	%fs, %edx
3502	mov	%edx, REGOFF_FS(%esp)
3503	mov	%es, %edx
3504	mov	%edx, REGOFF_ES(%esp)
3505	mov	%ds, %edx
3506	mov	%edx, REGOFF_DS(%esp)
3507#endif	/* __GNUC_AS__ */
3508	movl	%edi, REGOFF_EDI(%esp)
3509	movl	%esi, REGOFF_ESI(%esp)
3510	movl	16(%ebx), %ecx
3511	movl	%ecx, REGOFF_EBP(%esp)
3512	movl	%ebx, %ecx
3513	addl	$20, %ecx
3514	movl	%ecx, REGOFF_ESP(%esp)
3515	movl	8(%ebx), %ecx
3516	movl	%ecx, REGOFF_EBX(%esp)
3517	movl	0(%ebx), %ecx
3518	movl	%ecx, REGOFF_EDX(%esp)
3519	movl	4(%ebx), %ecx
3520	movl	%ecx, REGOFF_ECX(%esp)
3521	movl	12(%ebx), %ecx
3522	movl	%ecx, REGOFF_EAX(%esp)
3523	movl	$0, REGOFF_TRAPNO(%esp)
3524	movl	$0, REGOFF_ERR(%esp)
3525	lea	vpanic, %ecx
3526	movl	%ecx, REGOFF_EIP(%esp)
3527#if !defined(__GNUC_AS__)
3528	movw	%cs, %edx
3529#else	/* __GNUC_AS__ */
3530	mov	%cs, %edx
3531#endif	/* __GNUC_AS__ */
3532	movl	%edx, REGOFF_CS(%esp)
3533	pushfl
3534	popl	%ecx
3535#if defined(__xpv)
3536	/*
3537	 * Synthesize the PS_IE bit from the event mask bit
3538	 */
3539	CURTHREAD(%edx)
3540	KPREEMPT_DISABLE(%edx)
3541	EVENT_MASK_TO_IE(%edx, %ecx)
3542	CURTHREAD(%edx)
3543	KPREEMPT_ENABLE_NOKP(%edx)
3544#endif
3545	movl	%ecx, REGOFF_EFL(%esp)
3546	movl	$0, REGOFF_UESP(%esp)
3547#if !defined(__GNUC_AS__)
3548	movw	%ss, %edx
3549#else	/* __GNUC_AS__ */
3550	mov	%ss, %edx
3551#endif	/* __GNUC_AS__ */
3552	movl	%edx, REGOFF_SS(%esp)
3553
3554	movl	%esp, %ecx			/ %ecx = &regs
3555	pushl	%eax				/ push on_panic_stack
3556	pushl	%ecx				/ push &regs
3557	movl	12(%ebp), %ecx			/ %ecx = alist
3558	pushl	%ecx				/ push alist
3559	movl	8(%ebp), %ecx			/ %ecx = format
3560	pushl	%ecx				/ push format
3561	call	panicsys			/ panicsys();
3562	addl	$16, %esp			/ pop arguments
3563
3564	addl	$REGSIZE, %esp
3565	popl	%edx
3566	popl	%ecx
3567	popl	%ebx
3568	popl	%eax
3569	leave
3570	ret
3571	SET_SIZE(vpanic)
3572
3573	ENTRY_NP(dtrace_vpanic)			/ Initial stack layout:
3574
3575	pushl	%ebp				/ | %eip | 20
3576	movl	%esp, %ebp			/ | %ebp | 16
3577	pushl	%eax				/ | %eax | 12
3578	pushl	%ebx				/ | %ebx |  8
3579	pushl	%ecx				/ | %ecx |  4
3580	pushl	%edx				/ | %edx |  0
3581
3582	movl	%esp, %ebx			/ %ebx = current stack pointer
3583
3584	lea	panic_quiesce, %eax		/ %eax = &panic_quiesce
3585	pushl	%eax				/ push &panic_quiesce
3586	call	dtrace_panic_trigger		/ %eax = dtrace_panic_trigger()
3587	addl	$4, %esp			/ reset stack pointer
3588	jmp	vpanic_common			/ jump back to common code
3589
3590	SET_SIZE(dtrace_vpanic)
3591
3592#endif	/* __i386 */
3593#endif	/* __lint */
3594
3595#if defined(__lint)
3596
3597void
3598hres_tick(void)
3599{}
3600
3601int64_t timedelta;
3602hrtime_t hres_last_tick;
3603volatile timestruc_t hrestime;
3604int64_t hrestime_adj;
3605volatile int hres_lock;
3606hrtime_t hrtime_base;
3607
3608#else	/* __lint */
3609
3610	DGDEF3(hrestime, _MUL(2, CLONGSIZE), 8)
3611	.NWORD	0, 0
3612
3613	DGDEF3(hrestime_adj, 8, 8)
3614	.long	0, 0
3615
3616	DGDEF3(hres_last_tick, 8, 8)
3617	.long	0, 0
3618
3619	DGDEF3(timedelta, 8, 8)
3620	.long	0, 0
3621
3622	DGDEF3(hres_lock, 4, 8)
3623	.long	0
3624
3625	/*
3626	 * initialized to a non zero value to make pc_gethrtime()
3627	 * work correctly even before clock is initialized
3628	 */
3629	DGDEF3(hrtime_base, 8, 8)
3630	.long	_MUL(NSEC_PER_CLOCK_TICK, 6), 0
3631
3632	DGDEF3(adj_shift, 4, 4)
3633	.long	ADJ_SHIFT
3634
3635#if defined(__amd64)
3636
3637	ENTRY_NP(hres_tick)
3638	pushq	%rbp
3639	movq	%rsp, %rbp
3640
3641	/*
3642	 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3643	 * hres_last_tick can only be modified while holding CLOCK_LOCK).
3644	 * At worst, performing this now instead of under CLOCK_LOCK may
3645	 * introduce some jitter in pc_gethrestime().
3646	 */
3647	call	*gethrtimef(%rip)
3648	movq	%rax, %r8
3649
3650	leaq	hres_lock(%rip), %rax
3651	movb	$-1, %dl
3652.CL1:
3653	xchgb	%dl, (%rax)
3654	testb	%dl, %dl
3655	jz	.CL3			/* got it */
3656.CL2:
3657	cmpb	$0, (%rax)		/* possible to get lock? */
3658	pause
3659	jne	.CL2
3660	jmp	.CL1			/* yes, try again */
3661.CL3:
3662	/*
3663	 * compute the interval since last time hres_tick was called
3664	 * and adjust hrtime_base and hrestime accordingly
3665	 * hrtime_base is an 8 byte value (in nsec), hrestime is
3666	 * a timestruc_t (sec, nsec)
3667	 */
3668	leaq	hres_last_tick(%rip), %rax
3669	movq	%r8, %r11
3670	subq	(%rax), %r8
3671	addq	%r8, hrtime_base(%rip)	/* add interval to hrtime_base */
3672	addq	%r8, hrestime+8(%rip)	/* add interval to hrestime.tv_nsec */
3673	/*
3674	 * Now that we have CLOCK_LOCK, we can update hres_last_tick
3675	 */
3676	movq	%r11, (%rax)
3677
3678	call	__adj_hrestime
3679
3680	/*
3681	 * release the hres_lock
3682	 */
3683	incl	hres_lock(%rip)
3684	leave
3685	ret
3686	SET_SIZE(hres_tick)
3687
3688#elif defined(__i386)
3689
3690	ENTRY_NP(hres_tick)
3691	pushl	%ebp
3692	movl	%esp, %ebp
3693	pushl	%esi
3694	pushl	%ebx
3695
3696	/*
3697	 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3698	 * hres_last_tick can only be modified while holding CLOCK_LOCK).
3699	 * At worst, performing this now instead of under CLOCK_LOCK may
3700	 * introduce some jitter in pc_gethrestime().
3701	 */
3702	call	*gethrtimef
3703	movl	%eax, %ebx
3704	movl	%edx, %esi
3705
3706	movl	$hres_lock, %eax
3707	movl	$-1, %edx
3708.CL1:
3709	xchgb	%dl, (%eax)
3710	testb	%dl, %dl
3711	jz	.CL3			/ got it
3712.CL2:
3713	cmpb	$0, (%eax)		/ possible to get lock?
3714	pause
3715	jne	.CL2
3716	jmp	.CL1			/ yes, try again
3717.CL3:
3718	/*
3719	 * compute the interval since last time hres_tick was called
3720	 * and adjust hrtime_base and hrestime accordingly
3721	 * hrtime_base is an 8 byte value (in nsec), hrestime is
3722	 * timestruc_t (sec, nsec)
3723	 */
3724
3725	lea	hres_last_tick, %eax
3726
3727	movl	%ebx, %edx
3728	movl	%esi, %ecx
3729
3730	subl 	(%eax), %edx
3731	sbbl 	4(%eax), %ecx
3732
3733	addl	%edx, hrtime_base	/ add interval to hrtime_base
3734	adcl	%ecx, hrtime_base+4
3735
3736	addl 	%edx, hrestime+4	/ add interval to hrestime.tv_nsec
3737
3738	/
3739	/ Now that we have CLOCK_LOCK, we can update hres_last_tick.
3740	/
3741	movl	%ebx, (%eax)
3742	movl	%esi,  4(%eax)
3743
3744	/ get hrestime at this moment. used as base for pc_gethrestime
3745	/
3746	/ Apply adjustment, if any
3747	/
3748	/ #define HRES_ADJ	(NSEC_PER_CLOCK_TICK >> ADJ_SHIFT)
3749	/ (max_hres_adj)
3750	/
3751	/ void
3752	/ adj_hrestime()
3753	/ {
3754	/	long long adj;
3755	/
3756	/	if (hrestime_adj == 0)
3757	/		adj = 0;
3758	/	else if (hrestime_adj > 0) {
3759	/		if (hrestime_adj < HRES_ADJ)
3760	/			adj = hrestime_adj;
3761	/		else
3762	/			adj = HRES_ADJ;
3763	/	}
3764	/	else {
3765	/		if (hrestime_adj < -(HRES_ADJ))
3766	/			adj = -(HRES_ADJ);
3767	/		else
3768	/			adj = hrestime_adj;
3769	/	}
3770	/
3771	/	timedelta -= adj;
3772	/	hrestime_adj = timedelta;
3773	/	hrestime.tv_nsec += adj;
3774	/
3775	/	while (hrestime.tv_nsec >= NANOSEC) {
3776	/		one_sec++;
3777	/		hrestime.tv_sec++;
3778	/		hrestime.tv_nsec -= NANOSEC;
3779	/	}
3780	/ }
3781__adj_hrestime:
3782	movl	hrestime_adj, %esi	/ if (hrestime_adj == 0)
3783	movl	hrestime_adj+4, %edx
3784	andl	%esi, %esi
3785	jne	.CL4			/ no
3786	andl	%edx, %edx
3787	jne	.CL4			/ no
3788	subl	%ecx, %ecx		/ yes, adj = 0;
3789	subl	%edx, %edx
3790	jmp	.CL5
3791.CL4:
3792	subl	%ecx, %ecx
3793	subl	%eax, %eax
3794	subl	%esi, %ecx
3795	sbbl	%edx, %eax
3796	andl	%eax, %eax		/ if (hrestime_adj > 0)
3797	jge	.CL6
3798
3799	/ In the following comments, HRES_ADJ is used, while in the code
3800	/ max_hres_adj is used.
3801	/
3802	/ The test for "hrestime_adj < HRES_ADJ" is complicated because
3803	/ hrestime_adj is 64-bits, while HRES_ADJ is 32-bits.  We rely
3804	/ on the logical equivalence of:
3805	/
3806	/	!(hrestime_adj < HRES_ADJ)
3807	/
3808	/ and the two step sequence:
3809	/
3810	/	(HRES_ADJ - lsw(hrestime_adj)) generates a Borrow/Carry
3811	/
3812	/ which computes whether or not the least significant 32-bits
3813	/ of hrestime_adj is greater than HRES_ADJ, followed by:
3814	/
3815	/	Previous Borrow/Carry + -1 + msw(hrestime_adj) generates a Carry
3816	/
3817	/ which generates a carry whenever step 1 is true or the most
3818	/ significant long of the longlong hrestime_adj is non-zero.
3819
3820	movl	max_hres_adj, %ecx	/ hrestime_adj is positive
3821	subl	%esi, %ecx
3822	movl	%edx, %eax
3823	adcl	$-1, %eax
3824	jnc	.CL7
3825	movl	max_hres_adj, %ecx	/ adj = HRES_ADJ;
3826	subl	%edx, %edx
3827	jmp	.CL5
3828
3829	/ The following computation is similar to the one above.
3830	/
3831	/ The test for "hrestime_adj < -(HRES_ADJ)" is complicated because
3832	/ hrestime_adj is 64-bits, while HRES_ADJ is 32-bits.  We rely
3833	/ on the logical equivalence of:
3834	/
3835	/	(hrestime_adj > -HRES_ADJ)
3836	/
3837	/ and the two step sequence:
3838	/
3839	/	(HRES_ADJ + lsw(hrestime_adj)) generates a Carry
3840	/
3841	/ which means the least significant 32-bits of hrestime_adj is
3842	/ greater than -HRES_ADJ, followed by:
3843	/
3844	/	Previous Carry + 0 + msw(hrestime_adj) generates a Carry
3845	/
3846	/ which generates a carry only when step 1 is true and the most
3847	/ significant long of the longlong hrestime_adj is -1.
3848
3849.CL6:					/ hrestime_adj is negative
3850	movl	%esi, %ecx
3851	addl	max_hres_adj, %ecx
3852	movl	%edx, %eax
3853	adcl	$0, %eax
3854	jc	.CL7
3855	xor	%ecx, %ecx
3856	subl	max_hres_adj, %ecx	/ adj = -(HRES_ADJ);
3857	movl	$-1, %edx
3858	jmp	.CL5
3859.CL7:
3860	movl	%esi, %ecx		/ adj = hrestime_adj;
3861.CL5:
3862	movl	timedelta, %esi
3863	subl	%ecx, %esi
3864	movl	timedelta+4, %eax
3865	sbbl	%edx, %eax
3866	movl	%esi, timedelta
3867	movl	%eax, timedelta+4	/ timedelta -= adj;
3868	movl	%esi, hrestime_adj
3869	movl	%eax, hrestime_adj+4	/ hrestime_adj = timedelta;
3870	addl	hrestime+4, %ecx
3871
3872	movl	%ecx, %eax		/ eax = tv_nsec
38731:
3874	cmpl	$NANOSEC, %eax		/ if ((unsigned long)tv_nsec >= NANOSEC)
3875	jb	.CL8			/ no
3876	incl	one_sec			/ yes,  one_sec++;
3877	incl	hrestime		/ hrestime.tv_sec++;
3878	addl	$-NANOSEC, %eax		/ tv_nsec -= NANOSEC
3879	jmp	1b			/ check for more seconds
3880
3881.CL8:
3882	movl	%eax, hrestime+4	/ store final into hrestime.tv_nsec
3883	incl	hres_lock		/ release the hres_lock
3884
3885	popl	%ebx
3886	popl	%esi
3887	leave
3888	ret
3889	SET_SIZE(hres_tick)
3890
3891#endif	/* __i386 */
3892#endif	/* __lint */
3893
3894/*
3895 * void prefetch_smap_w(void *)
3896 *
3897 * Prefetch ahead within a linear list of smap structures.
3898 * Not implemented for ia32.  Stub for compatibility.
3899 */
3900
3901#if defined(__lint)
3902
3903/*ARGSUSED*/
3904void prefetch_smap_w(void *smp)
3905{}
3906
3907#else	/* __lint */
3908
3909	ENTRY(prefetch_smap_w)
3910	rep;	ret	/* use 2 byte return instruction when branch target */
3911			/* AMD Software Optimization Guide - Section 6.2 */
3912	SET_SIZE(prefetch_smap_w)
3913
3914#endif	/* __lint */
3915
3916/*
3917 * prefetch_page_r(page_t *)
3918 * issue prefetch instructions for a page_t
3919 */
3920#if defined(__lint)
3921
3922/*ARGSUSED*/
3923void
3924prefetch_page_r(void *pp)
3925{}
3926
3927#else	/* __lint */
3928
3929	ENTRY(prefetch_page_r)
3930	rep;	ret	/* use 2 byte return instruction when branch target */
3931			/* AMD Software Optimization Guide - Section 6.2 */
3932	SET_SIZE(prefetch_page_r)
3933
3934#endif	/* __lint */
3935
3936#if defined(__lint)
3937
3938/*ARGSUSED*/
3939int
3940bcmp(const void *s1, const void *s2, size_t count)
3941{ return (0); }
3942
3943#else   /* __lint */
3944
3945#if defined(__amd64)
3946
3947	ENTRY(bcmp)
3948	pushq	%rbp
3949	movq	%rsp, %rbp
3950#ifdef DEBUG
3951	testq	%rdx,%rdx
3952	je	1f
3953	movq	postbootkernelbase(%rip), %r11
3954	cmpq	%r11, %rdi
3955	jb	0f
3956	cmpq	%r11, %rsi
3957	jnb	1f
39580:	leaq	.bcmp_panic_msg(%rip), %rdi
3959	xorl	%eax, %eax
3960	call	panic
39611:
3962#endif	/* DEBUG */
3963	call	memcmp
3964	testl	%eax, %eax
3965	setne	%dl
3966	leave
3967	movzbl	%dl, %eax
3968	ret
3969	SET_SIZE(bcmp)
3970
3971#elif defined(__i386)
3972
3973#define	ARG_S1		8
3974#define	ARG_S2		12
3975#define	ARG_LENGTH	16
3976
3977	ENTRY(bcmp)
3978	pushl	%ebp
3979	movl	%esp, %ebp	/ create new stack frame
3980#ifdef DEBUG
3981	cmpl	$0, ARG_LENGTH(%ebp)
3982	je	1f
3983	movl    postbootkernelbase, %eax
3984	cmpl    %eax, ARG_S1(%ebp)
3985	jb	0f
3986	cmpl    %eax, ARG_S2(%ebp)
3987	jnb	1f
39880:	pushl   $.bcmp_panic_msg
3989	call    panic
39901:
3991#endif	/* DEBUG */
3992
3993	pushl	%edi		/ save register variable
3994	movl	ARG_S1(%ebp), %eax	/ %eax = address of string 1
3995	movl	ARG_S2(%ebp), %ecx	/ %ecx = address of string 2
3996	cmpl	%eax, %ecx	/ if the same string
3997	je	.equal		/ goto .equal
3998	movl	ARG_LENGTH(%ebp), %edi	/ %edi = length in bytes
3999	cmpl	$4, %edi	/ if %edi < 4
4000	jb	.byte_check	/ goto .byte_check
4001	.align	4
4002.word_loop:
4003	movl	(%ecx), %edx	/ move 1 word from (%ecx) to %edx
4004	leal	-4(%edi), %edi	/ %edi -= 4
4005	cmpl	(%eax), %edx	/ compare 1 word from (%eax) with %edx
4006	jne	.word_not_equal	/ if not equal, goto .word_not_equal
4007	leal	4(%ecx), %ecx	/ %ecx += 4 (next word)
4008	leal	4(%eax), %eax	/ %eax += 4 (next word)
4009	cmpl	$4, %edi	/ if %edi >= 4
4010	jae	.word_loop	/ goto .word_loop
4011.byte_check:
4012	cmpl	$0, %edi	/ if %edi == 0
4013	je	.equal		/ goto .equal
4014	jmp	.byte_loop	/ goto .byte_loop (checks in bytes)
4015.word_not_equal:
4016	leal	4(%edi), %edi	/ %edi += 4 (post-decremented)
4017	.align	4
4018.byte_loop:
4019	movb	(%ecx),	%dl	/ move 1 byte from (%ecx) to %dl
4020	cmpb	%dl, (%eax)	/ compare %dl with 1 byte from (%eax)
4021	jne	.not_equal	/ if not equal, goto .not_equal
4022	incl	%ecx		/ %ecx++ (next byte)
4023	incl	%eax		/ %eax++ (next byte)
4024	decl	%edi		/ %edi--
4025	jnz	.byte_loop	/ if not zero, goto .byte_loop
4026.equal:
4027	xorl	%eax, %eax	/ %eax = 0
4028	popl	%edi		/ restore register variable
4029	leave			/ restore old stack frame
4030	ret			/ return (NULL)
4031	.align	4
4032.not_equal:
4033	movl	$1, %eax	/ return 1
4034	popl	%edi		/ restore register variable
4035	leave			/ restore old stack frame
4036	ret			/ return (NULL)
4037	SET_SIZE(bcmp)
4038
4039#endif	/* __i386 */
4040
4041#ifdef DEBUG
4042	.text
4043.bcmp_panic_msg:
4044	.string "bcmp: arguments below kernelbase"
4045#endif	/* DEBUG */
4046
4047#endif	/* __lint */
4048
4049#if defined(__lint)
4050
4051uint_t
4052bsrw_insn(uint16_t mask)
4053{
4054	uint_t index = sizeof (mask) * NBBY - 1;
4055
4056	while ((mask & (1 << index)) == 0)
4057		index--;
4058	return (index);
4059}
4060
4061#else	/* __lint */
4062
4063#if defined(__amd64)
4064
4065	ENTRY_NP(bsrw_insn)
4066	xorl	%eax, %eax
4067	bsrw	%di, %ax
4068	ret
4069	SET_SIZE(bsrw_insn)
4070
4071#elif defined(__i386)
4072
4073	ENTRY_NP(bsrw_insn)
4074	movw	4(%esp), %cx
4075	xorl	%eax, %eax
4076	bsrw	%cx, %ax
4077	ret
4078	SET_SIZE(bsrw_insn)
4079
4080#endif	/* __i386 */
4081#endif	/* __lint */
4082
4083#if defined(__lint)
4084
4085uint_t
4086atomic_btr32(uint32_t *pending, uint_t pil)
4087{
4088	return (*pending &= ~(1 << pil));
4089}
4090
4091#else	/* __lint */
4092
4093#if defined(__i386)
4094
4095	ENTRY_NP(atomic_btr32)
4096	movl	4(%esp), %ecx
4097	movl	8(%esp), %edx
4098	xorl	%eax, %eax
4099	lock
4100	btrl	%edx, (%ecx)
4101	setc	%al
4102	ret
4103	SET_SIZE(atomic_btr32)
4104
4105#endif	/* __i386 */
4106#endif	/* __lint */
4107
4108#if defined(__lint)
4109
4110/*ARGSUSED*/
4111void
4112switch_sp_and_call(void *newsp, void (*func)(uint_t, uint_t), uint_t arg1,
4113	    uint_t arg2)
4114{}
4115
4116#else	/* __lint */
4117
4118#if defined(__amd64)
4119
4120	ENTRY_NP(switch_sp_and_call)
4121	pushq	%rbp
4122	movq	%rsp, %rbp		/* set up stack frame */
4123	movq	%rdi, %rsp		/* switch stack pointer */
4124	movq	%rdx, %rdi		/* pass func arg 1 */
4125	movq	%rsi, %r11		/* save function to call */
4126	movq	%rcx, %rsi		/* pass func arg 2 */
4127	call	*%r11			/* call function */
4128	leave				/* restore stack */
4129	ret
4130	SET_SIZE(switch_sp_and_call)
4131
4132#elif defined(__i386)
4133
4134	ENTRY_NP(switch_sp_and_call)
4135	pushl	%ebp
4136	mov	%esp, %ebp		/* set up stack frame */
4137	movl	8(%ebp), %esp		/* switch stack pointer */
4138	pushl	20(%ebp)		/* push func arg 2 */
4139	pushl	16(%ebp)		/* push func arg 1 */
4140	call	*12(%ebp)		/* call function */
4141	addl	$8, %esp		/* pop arguments */
4142	leave				/* restore stack */
4143	ret
4144	SET_SIZE(switch_sp_and_call)
4145
4146#endif	/* __i386 */
4147#endif	/* __lint */
4148
4149#if defined(__lint)
4150
4151void
4152kmdb_enter(void)
4153{}
4154
4155#else	/* __lint */
4156
4157#if defined(__amd64)
4158
4159	ENTRY_NP(kmdb_enter)
4160	pushq	%rbp
4161	movq	%rsp, %rbp
4162
4163	/*
4164	 * Save flags, do a 'cli' then return the saved flags
4165	 */
4166	call	intr_clear
4167
4168	int	$T_DBGENTR
4169
4170	/*
4171	 * Restore the saved flags
4172	 */
4173	movq	%rax, %rdi
4174	call	intr_restore
4175
4176	leave
4177	ret
4178	SET_SIZE(kmdb_enter)
4179
4180#elif defined(__i386)
4181
4182	ENTRY_NP(kmdb_enter)
4183	pushl	%ebp
4184	movl	%esp, %ebp
4185
4186	/*
4187	 * Save flags, do a 'cli' then return the saved flags
4188	 */
4189	call	intr_clear
4190
4191	int	$T_DBGENTR
4192
4193	/*
4194	 * Restore the saved flags
4195	 */
4196	pushl	%eax
4197	call	intr_restore
4198	addl	$4, %esp
4199
4200	leave
4201	ret
4202	SET_SIZE(kmdb_enter)
4203
4204#endif	/* __i386 */
4205#endif	/* __lint */
4206
4207#if defined(__lint)
4208
4209void
4210return_instr(void)
4211{}
4212
4213#else	/* __lint */
4214
4215	ENTRY_NP(return_instr)
4216	rep;	ret	/* use 2 byte instruction when branch target */
4217			/* AMD Software Optimization Guide - Section 6.2 */
4218	SET_SIZE(return_instr)
4219
4220#endif	/* __lint */
4221
4222#if defined(__lint)
4223
4224ulong_t
4225getflags(void)
4226{
4227	return (0);
4228}
4229
4230#else	/* __lint */
4231
4232#if defined(__amd64)
4233
4234	ENTRY(getflags)
4235	pushfq
4236	popq	%rax
4237#if defined(__xpv)
4238	CURTHREAD(%rdi)
4239	KPREEMPT_DISABLE(%rdi)
4240	/*
4241	 * Synthesize the PS_IE bit from the event mask bit
4242	 */
4243	CURVCPU(%r11)
4244	andq    $_BITNOT(PS_IE), %rax
4245	XEN_TEST_UPCALL_MASK(%r11)
4246	jnz	1f
4247	orq	$PS_IE, %rax
42481:
4249	KPREEMPT_ENABLE_NOKP(%rdi)
4250#endif
4251	ret
4252	SET_SIZE(getflags)
4253
4254#elif defined(__i386)
4255
4256	ENTRY(getflags)
4257	pushfl
4258	popl	%eax
4259#if defined(__xpv)
4260	CURTHREAD(%ecx)
4261	KPREEMPT_DISABLE(%ecx)
4262	/*
4263	 * Synthesize the PS_IE bit from the event mask bit
4264	 */
4265	CURVCPU(%edx)
4266	andl    $_BITNOT(PS_IE), %eax
4267	XEN_TEST_UPCALL_MASK(%edx)
4268	jnz	1f
4269	orl	$PS_IE, %eax
42701:
4271	KPREEMPT_ENABLE_NOKP(%ecx)
4272#endif
4273	ret
4274	SET_SIZE(getflags)
4275
4276#endif	/* __i386 */
4277
4278#endif	/* __lint */
4279
4280#if defined(__lint)
4281
4282ftrace_icookie_t
4283ftrace_interrupt_disable(void)
4284{ return (0); }
4285
4286#else   /* __lint */
4287
4288#if defined(__amd64)
4289
4290	ENTRY(ftrace_interrupt_disable)
4291	pushfq
4292	popq	%rax
4293	CLI(%rdx)
4294	ret
4295	SET_SIZE(ftrace_interrupt_disable)
4296
4297#elif defined(__i386)
4298
4299	ENTRY(ftrace_interrupt_disable)
4300	pushfl
4301	popl	%eax
4302	CLI(%edx)
4303	ret
4304	SET_SIZE(ftrace_interrupt_disable)
4305
4306#endif	/* __i386 */
4307#endif	/* __lint */
4308
4309#if defined(__lint)
4310
4311/*ARGSUSED*/
4312void
4313ftrace_interrupt_enable(ftrace_icookie_t cookie)
4314{}
4315
4316#else	/* __lint */
4317
4318#if defined(__amd64)
4319
4320	ENTRY(ftrace_interrupt_enable)
4321	pushq	%rdi
4322	popfq
4323	ret
4324	SET_SIZE(ftrace_interrupt_enable)
4325
4326#elif defined(__i386)
4327
4328	ENTRY(ftrace_interrupt_enable)
4329	movl	4(%esp), %eax
4330	pushl	%eax
4331	popfl
4332	ret
4333	SET_SIZE(ftrace_interrupt_enable)
4334
4335#endif	/* __i386 */
4336#endif	/* __lint */
4337
4338#if defined (__lint)
4339
4340/*ARGSUSED*/
4341void
4342clflush_insn(caddr_t addr)
4343{}
4344
4345#else /* __lint */
4346
4347#if defined (__amd64)
4348	ENTRY(clflush_insn)
4349	clflush (%rdi)
4350	ret
4351	SET_SIZE(clflush_insn)
4352#elif defined (__i386)
4353	ENTRY(clflush_insn)
4354	movl	4(%esp), %eax
4355	clflush (%eax)
4356	ret
4357	SET_SIZE(clflush_insn)
4358
4359#endif /* __i386 */
4360#endif /* __lint */
4361
4362#if defined (__lint)
4363/*ARGSUSED*/
4364void
4365mfence_insn(void)
4366{}
4367
4368#else /* __lint */
4369
4370#if defined (__amd64)
4371	ENTRY(mfence_insn)
4372	mfence
4373	ret
4374	SET_SIZE(mfence_insn)
4375#elif defined (__i386)
4376	ENTRY(mfence_insn)
4377	mfence
4378	ret
4379	SET_SIZE(mfence_insn)
4380
4381#endif /* __i386 */
4382#endif /* __lint */
4383
4384/*
4385 * VMware implements an I/O port that programs can query to detect if software
4386 * is running in a VMware hypervisor. This hypervisor port behaves differently
4387 * depending on magic values in certain registers and modifies some registers
4388 * as a side effect.
4389 *
4390 * References: http://kb.vmware.com/kb/1009458
4391 */
4392
4393#if defined(__lint)
4394
4395/* ARGSUSED */
4396void
4397vmware_port(int cmd, uint32_t *regs) { return; }
4398
4399#else
4400
4401#if defined(__amd64)
4402
4403	ENTRY(vmware_port)
4404	pushq	%rbx
4405	movl	$VMWARE_HVMAGIC, %eax
4406	movl	$0xffffffff, %ebx
4407	movl	%edi, %ecx
4408	movl	$VMWARE_HVPORT, %edx
4409	inl	(%dx)
4410	movl	%eax, (%rsi)
4411	movl	%ebx, 4(%rsi)
4412	movl	%ecx, 8(%rsi)
4413	movl	%edx, 12(%rsi)
4414	popq	%rbx
4415	ret
4416	SET_SIZE(vmware_port)
4417
4418#elif defined(__i386)
4419
4420	ENTRY(vmware_port)
4421	pushl	%ebx
4422	pushl	%esi
4423	movl	$VMWARE_HVMAGIC, %eax
4424	movl	$0xffffffff, %ebx
4425	movl	12(%esp), %ecx
4426	movl	$VMWARE_HVPORT, %edx
4427	inl	(%dx)
4428	movl	16(%esp), %esi
4429	movl	%eax, (%esi)
4430	movl	%ebx, 4(%esi)
4431	movl	%ecx, 8(%esi)
4432	movl	%edx, 12(%esi)
4433	popl	%esi
4434	popl	%ebx
4435	ret
4436	SET_SIZE(vmware_port)
4437
4438#endif /* __i386 */
4439#endif /* __lint */
4440