xref: /titanic_44/usr/src/uts/intel/ia32/ml/i86_subr.s (revision a60349c89adffc0902b2353230891d8e7f2b24d9)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2014 Nexenta Systems, Inc.  All rights reserved.
25 * Copyright (c) 2014 by Delphix. All rights reserved.
26 * Copyright 2016 Joyent, Inc.
27 */
28
29/*
30 *  Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.
31 *  Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T
32 *    All Rights Reserved
33 */
34
35/*
36 * Copyright (c) 2009, Intel Corporation.
37 * All rights reserved.
38 */
39
40/*
41 * General assembly language routines.
42 * It is the intent of this file to contain routines that are
43 * independent of the specific kernel architecture, and those that are
44 * common across kernel architectures.
45 * As architectures diverge, and implementations of specific
46 * architecture-dependent routines change, the routines should be moved
47 * from this file into the respective ../`arch -k`/subr.s file.
48 */
49
50#include <sys/asm_linkage.h>
51#include <sys/asm_misc.h>
52#include <sys/panic.h>
53#include <sys/ontrap.h>
54#include <sys/regset.h>
55#include <sys/privregs.h>
56#include <sys/reboot.h>
57#include <sys/psw.h>
58#include <sys/x86_archext.h>
59
60#if defined(__lint)
61#include <sys/types.h>
62#include <sys/systm.h>
63#include <sys/thread.h>
64#include <sys/archsystm.h>
65#include <sys/byteorder.h>
66#include <sys/dtrace.h>
67#include <sys/ftrace.h>
68#else	/* __lint */
69#include "assym.h"
70#endif	/* __lint */
71#include <sys/dditypes.h>
72
73/*
74 * on_fault()
75 * Catch lofault faults. Like setjmp except it returns one
76 * if code following causes uncorrectable fault. Turned off
77 * by calling no_fault().
78 */
79
80#if defined(__lint)
81
82/* ARGSUSED */
83int
84on_fault(label_t *ljb)
85{ return (0); }
86
87void
88no_fault(void)
89{}
90
91#else	/* __lint */
92
93#if defined(__amd64)
94
95	ENTRY(on_fault)
96	movq	%gs:CPU_THREAD, %rsi
97	leaq	catch_fault(%rip), %rdx
98	movq	%rdi, T_ONFAULT(%rsi)		/* jumpbuf in t_onfault */
99	movq	%rdx, T_LOFAULT(%rsi)		/* catch_fault in t_lofault */
100	jmp	setjmp				/* let setjmp do the rest */
101
102catch_fault:
103	movq	%gs:CPU_THREAD, %rsi
104	movq	T_ONFAULT(%rsi), %rdi		/* address of save area */
105	xorl	%eax, %eax
106	movq	%rax, T_ONFAULT(%rsi)		/* turn off onfault */
107	movq	%rax, T_LOFAULT(%rsi)		/* turn off lofault */
108	jmp	longjmp				/* let longjmp do the rest */
109	SET_SIZE(on_fault)
110
111	ENTRY(no_fault)
112	movq	%gs:CPU_THREAD, %rsi
113	xorl	%eax, %eax
114	movq	%rax, T_ONFAULT(%rsi)		/* turn off onfault */
115	movq	%rax, T_LOFAULT(%rsi)		/* turn off lofault */
116	ret
117	SET_SIZE(no_fault)
118
119#elif defined(__i386)
120
121	ENTRY(on_fault)
122	movl	%gs:CPU_THREAD, %edx
123	movl	4(%esp), %eax			/* jumpbuf address */
124	leal	catch_fault, %ecx
125	movl	%eax, T_ONFAULT(%edx)		/* jumpbuf in t_onfault */
126	movl	%ecx, T_LOFAULT(%edx)		/* catch_fault in t_lofault */
127	jmp	setjmp				/* let setjmp do the rest */
128
129catch_fault:
130	movl	%gs:CPU_THREAD, %edx
131	xorl	%eax, %eax
132	movl	T_ONFAULT(%edx), %ecx		/* address of save area */
133	movl	%eax, T_ONFAULT(%edx)		/* turn off onfault */
134	movl	%eax, T_LOFAULT(%edx)		/* turn off lofault */
135	pushl	%ecx
136	call	longjmp				/* let longjmp do the rest */
137	SET_SIZE(on_fault)
138
139	ENTRY(no_fault)
140	movl	%gs:CPU_THREAD, %edx
141	xorl	%eax, %eax
142	movl	%eax, T_ONFAULT(%edx)		/* turn off onfault */
143	movl	%eax, T_LOFAULT(%edx)		/* turn off lofault */
144	ret
145	SET_SIZE(no_fault)
146
147#endif	/* __i386 */
148#endif	/* __lint */
149
150/*
151 * Default trampoline code for on_trap() (see <sys/ontrap.h>).  We just
152 * do a longjmp(&curthread->t_ontrap->ot_jmpbuf) if this is ever called.
153 */
154
155#if defined(lint)
156
157void
158on_trap_trampoline(void)
159{}
160
161#else	/* __lint */
162
163#if defined(__amd64)
164
165	ENTRY(on_trap_trampoline)
166	movq	%gs:CPU_THREAD, %rsi
167	movq	T_ONTRAP(%rsi), %rdi
168	addq	$OT_JMPBUF, %rdi
169	jmp	longjmp
170	SET_SIZE(on_trap_trampoline)
171
172#elif defined(__i386)
173
174	ENTRY(on_trap_trampoline)
175	movl	%gs:CPU_THREAD, %eax
176	movl	T_ONTRAP(%eax), %eax
177	addl	$OT_JMPBUF, %eax
178	pushl	%eax
179	call	longjmp
180	SET_SIZE(on_trap_trampoline)
181
182#endif	/* __i386 */
183#endif	/* __lint */
184
185/*
186 * Push a new element on to the t_ontrap stack.  Refer to <sys/ontrap.h> for
187 * more information about the on_trap() mechanism.  If the on_trap_data is the
188 * same as the topmost stack element, we just modify that element.
189 */
190#if defined(lint)
191
192/*ARGSUSED*/
193int
194on_trap(on_trap_data_t *otp, uint_t prot)
195{ return (0); }
196
197#else	/* __lint */
198
199#if defined(__amd64)
200
201	ENTRY(on_trap)
202	movw	%si, OT_PROT(%rdi)		/* ot_prot = prot */
203	movw	$0, OT_TRAP(%rdi)		/* ot_trap = 0 */
204	leaq	on_trap_trampoline(%rip), %rdx	/* rdx = &on_trap_trampoline */
205	movq	%rdx, OT_TRAMPOLINE(%rdi)	/* ot_trampoline = rdx */
206	xorl	%ecx, %ecx
207	movq	%rcx, OT_HANDLE(%rdi)		/* ot_handle = NULL */
208	movq	%rcx, OT_PAD1(%rdi)		/* ot_pad1 = NULL */
209	movq	%gs:CPU_THREAD, %rdx		/* rdx = curthread */
210	movq	T_ONTRAP(%rdx), %rcx		/* rcx = curthread->t_ontrap */
211	cmpq	%rdi, %rcx			/* if (otp == %rcx)	*/
212	je	0f				/*	don't modify t_ontrap */
213
214	movq	%rcx, OT_PREV(%rdi)		/* ot_prev = t_ontrap */
215	movq	%rdi, T_ONTRAP(%rdx)		/* curthread->t_ontrap = otp */
216
2170:	addq	$OT_JMPBUF, %rdi		/* &ot_jmpbuf */
218	jmp	setjmp
219	SET_SIZE(on_trap)
220
221#elif defined(__i386)
222
223	ENTRY(on_trap)
224	movl	4(%esp), %eax			/* %eax = otp */
225	movl	8(%esp), %edx			/* %edx = prot */
226
227	movw	%dx, OT_PROT(%eax)		/* ot_prot = prot */
228	movw	$0, OT_TRAP(%eax)		/* ot_trap = 0 */
229	leal	on_trap_trampoline, %edx	/* %edx = &on_trap_trampoline */
230	movl	%edx, OT_TRAMPOLINE(%eax)	/* ot_trampoline = %edx */
231	movl	$0, OT_HANDLE(%eax)		/* ot_handle = NULL */
232	movl	$0, OT_PAD1(%eax)		/* ot_pad1 = NULL */
233	movl	%gs:CPU_THREAD, %edx		/* %edx = curthread */
234	movl	T_ONTRAP(%edx), %ecx		/* %ecx = curthread->t_ontrap */
235	cmpl	%eax, %ecx			/* if (otp == %ecx) */
236	je	0f				/*    don't modify t_ontrap */
237
238	movl	%ecx, OT_PREV(%eax)		/* ot_prev = t_ontrap */
239	movl	%eax, T_ONTRAP(%edx)		/* curthread->t_ontrap = otp */
240
2410:	addl	$OT_JMPBUF, %eax		/* %eax = &ot_jmpbuf */
242	movl	%eax, 4(%esp)			/* put %eax back on the stack */
243	jmp	setjmp				/* let setjmp do the rest */
244	SET_SIZE(on_trap)
245
246#endif	/* __i386 */
247#endif	/* __lint */
248
249/*
250 * Setjmp and longjmp implement non-local gotos using state vectors
251 * type label_t.
252 */
253
254#if defined(__lint)
255
256/* ARGSUSED */
257int
258setjmp(label_t *lp)
259{ return (0); }
260
261/* ARGSUSED */
262void
263longjmp(label_t *lp)
264{}
265
266#else	/* __lint */
267
268#if LABEL_PC != 0
269#error LABEL_PC MUST be defined as 0 for setjmp/longjmp to work as coded
270#endif	/* LABEL_PC != 0 */
271
272#if defined(__amd64)
273
274	ENTRY(setjmp)
275	movq	%rsp, LABEL_SP(%rdi)
276	movq	%rbp, LABEL_RBP(%rdi)
277	movq	%rbx, LABEL_RBX(%rdi)
278	movq	%r12, LABEL_R12(%rdi)
279	movq	%r13, LABEL_R13(%rdi)
280	movq	%r14, LABEL_R14(%rdi)
281	movq	%r15, LABEL_R15(%rdi)
282	movq	(%rsp), %rdx		/* return address */
283	movq	%rdx, (%rdi)		/* LABEL_PC is 0 */
284	xorl	%eax, %eax		/* return 0 */
285	ret
286	SET_SIZE(setjmp)
287
288	ENTRY(longjmp)
289	movq	LABEL_SP(%rdi), %rsp
290	movq	LABEL_RBP(%rdi), %rbp
291	movq	LABEL_RBX(%rdi), %rbx
292	movq	LABEL_R12(%rdi), %r12
293	movq	LABEL_R13(%rdi), %r13
294	movq	LABEL_R14(%rdi), %r14
295	movq	LABEL_R15(%rdi), %r15
296	movq	(%rdi), %rdx		/* return address; LABEL_PC is 0 */
297	movq	%rdx, (%rsp)
298	xorl	%eax, %eax
299	incl	%eax			/* return 1 */
300	ret
301	SET_SIZE(longjmp)
302
303#elif defined(__i386)
304
305	ENTRY(setjmp)
306	movl	4(%esp), %edx		/* address of save area */
307	movl	%ebp, LABEL_EBP(%edx)
308	movl	%ebx, LABEL_EBX(%edx)
309	movl	%esi, LABEL_ESI(%edx)
310	movl	%edi, LABEL_EDI(%edx)
311	movl	%esp, 4(%edx)
312	movl	(%esp), %ecx		/* %eip (return address) */
313	movl	%ecx, (%edx)		/* LABEL_PC is 0 */
314	subl	%eax, %eax		/* return 0 */
315	ret
316	SET_SIZE(setjmp)
317
318	ENTRY(longjmp)
319	movl	4(%esp), %edx		/* address of save area */
320	movl	LABEL_EBP(%edx), %ebp
321	movl	LABEL_EBX(%edx), %ebx
322	movl	LABEL_ESI(%edx), %esi
323	movl	LABEL_EDI(%edx), %edi
324	movl	4(%edx), %esp
325	movl	(%edx), %ecx		/* %eip (return addr); LABEL_PC is 0 */
326	movl	$1, %eax
327	addl	$4, %esp		/* pop ret adr */
328	jmp	*%ecx			/* indirect */
329	SET_SIZE(longjmp)
330
331#endif	/* __i386 */
332#endif	/* __lint */
333
334/*
335 * if a() calls b() calls caller(),
336 * caller() returns return address in a().
337 * (Note: We assume a() and b() are C routines which do the normal entry/exit
338 *  sequence.)
339 */
340
341#if defined(__lint)
342
343caddr_t
344caller(void)
345{ return (0); }
346
347#else	/* __lint */
348
349#if defined(__amd64)
350
351	ENTRY(caller)
352	movq	8(%rbp), %rax		/* b()'s return pc, in a() */
353	ret
354	SET_SIZE(caller)
355
356#elif defined(__i386)
357
358	ENTRY(caller)
359	movl	4(%ebp), %eax		/* b()'s return pc, in a() */
360	ret
361	SET_SIZE(caller)
362
363#endif	/* __i386 */
364#endif	/* __lint */
365
366/*
367 * if a() calls callee(), callee() returns the
368 * return address in a();
369 */
370
371#if defined(__lint)
372
373caddr_t
374callee(void)
375{ return (0); }
376
377#else	/* __lint */
378
379#if defined(__amd64)
380
381	ENTRY(callee)
382	movq	(%rsp), %rax		/* callee()'s return pc, in a() */
383	ret
384	SET_SIZE(callee)
385
386#elif defined(__i386)
387
388	ENTRY(callee)
389	movl	(%esp), %eax		/* callee()'s return pc, in a() */
390	ret
391	SET_SIZE(callee)
392
393#endif	/* __i386 */
394#endif	/* __lint */
395
396/*
397 * return the current frame pointer
398 */
399
400#if defined(__lint)
401
402greg_t
403getfp(void)
404{ return (0); }
405
406#else	/* __lint */
407
408#if defined(__amd64)
409
410	ENTRY(getfp)
411	movq	%rbp, %rax
412	ret
413	SET_SIZE(getfp)
414
415#elif defined(__i386)
416
417	ENTRY(getfp)
418	movl	%ebp, %eax
419	ret
420	SET_SIZE(getfp)
421
422#endif	/* __i386 */
423#endif	/* __lint */
424
425/*
426 * Invalidate a single page table entry in the TLB
427 */
428
429#if defined(__lint)
430
431/* ARGSUSED */
432void
433mmu_tlbflush_entry(caddr_t m)
434{}
435
436#else	/* __lint */
437
438#if defined(__amd64)
439
440	ENTRY(mmu_tlbflush_entry)
441	invlpg	(%rdi)
442	ret
443	SET_SIZE(mmu_tlbflush_entry)
444
445#elif defined(__i386)
446
447	ENTRY(mmu_tlbflush_entry)
448	movl	4(%esp), %eax
449	invlpg	(%eax)
450	ret
451	SET_SIZE(mmu_tlbflush_entry)
452
453#endif	/* __i386 */
454#endif	/* __lint */
455
456
457/*
458 * Get/Set the value of various control registers
459 */
460
461#if defined(__lint)
462
463ulong_t
464getcr0(void)
465{ return (0); }
466
467/* ARGSUSED */
468void
469setcr0(ulong_t value)
470{}
471
472ulong_t
473getcr2(void)
474{ return (0); }
475
476ulong_t
477getcr3(void)
478{ return (0); }
479
480#if !defined(__xpv)
481/* ARGSUSED */
482void
483setcr3(ulong_t val)
484{}
485
486void
487reload_cr3(void)
488{}
489#endif
490
491ulong_t
492getcr4(void)
493{ return (0); }
494
495/* ARGSUSED */
496void
497setcr4(ulong_t val)
498{}
499
500#if defined(__amd64)
501
502ulong_t
503getcr8(void)
504{ return (0); }
505
506/* ARGSUSED */
507void
508setcr8(ulong_t val)
509{}
510
511#endif	/* __amd64 */
512
513#else	/* __lint */
514
515#if defined(__amd64)
516
517	ENTRY(getcr0)
518	movq	%cr0, %rax
519	ret
520	SET_SIZE(getcr0)
521
522	ENTRY(setcr0)
523	movq	%rdi, %cr0
524	ret
525	SET_SIZE(setcr0)
526
527        ENTRY(getcr2)
528#if defined(__xpv)
529	movq	%gs:CPU_VCPU_INFO, %rax
530	movq	VCPU_INFO_ARCH_CR2(%rax), %rax
531#else
532        movq    %cr2, %rax
533#endif
534        ret
535	SET_SIZE(getcr2)
536
537	ENTRY(getcr3)
538	movq    %cr3, %rax
539	ret
540	SET_SIZE(getcr3)
541
542#if !defined(__xpv)
543
544        ENTRY(setcr3)
545        movq    %rdi, %cr3
546        ret
547	SET_SIZE(setcr3)
548
549	ENTRY(reload_cr3)
550	movq	%cr3, %rdi
551	movq	%rdi, %cr3
552	ret
553	SET_SIZE(reload_cr3)
554
555#endif	/* __xpv */
556
557	ENTRY(getcr4)
558	movq	%cr4, %rax
559	ret
560	SET_SIZE(getcr4)
561
562	ENTRY(setcr4)
563	movq	%rdi, %cr4
564	ret
565	SET_SIZE(setcr4)
566
567	ENTRY(getcr8)
568	movq	%cr8, %rax
569	ret
570	SET_SIZE(getcr8)
571
572	ENTRY(setcr8)
573	movq	%rdi, %cr8
574	ret
575	SET_SIZE(setcr8)
576
577#elif defined(__i386)
578
579        ENTRY(getcr0)
580        movl    %cr0, %eax
581        ret
582	SET_SIZE(getcr0)
583
584        ENTRY(setcr0)
585        movl    4(%esp), %eax
586        movl    %eax, %cr0
587        ret
588	SET_SIZE(setcr0)
589
590	/*
591	 * "lock mov %cr0" is used on processors which indicate it is
592	 * supported via CPUID. Normally the 32 bit TPR is accessed via
593	 * the local APIC.
594	 */
595	ENTRY(getcr8)
596	lock
597	movl	%cr0, %eax
598	ret
599	SET_SIZE(getcr8)
600
601	ENTRY(setcr8)
602        movl    4(%esp), %eax
603	lock
604        movl    %eax, %cr0
605	ret
606	SET_SIZE(setcr8)
607
608        ENTRY(getcr2)
609#if defined(__xpv)
610	movl	%gs:CPU_VCPU_INFO, %eax
611	movl	VCPU_INFO_ARCH_CR2(%eax), %eax
612#else
613        movl    %cr2, %eax
614#endif
615        ret
616	SET_SIZE(getcr2)
617
618	ENTRY(getcr3)
619	movl    %cr3, %eax
620	ret
621	SET_SIZE(getcr3)
622
623#if !defined(__xpv)
624
625        ENTRY(setcr3)
626        movl    4(%esp), %eax
627        movl    %eax, %cr3
628        ret
629	SET_SIZE(setcr3)
630
631	ENTRY(reload_cr3)
632	movl    %cr3, %eax
633	movl    %eax, %cr3
634	ret
635	SET_SIZE(reload_cr3)
636
637#endif	/* __xpv */
638
639	ENTRY(getcr4)
640	movl    %cr4, %eax
641	ret
642	SET_SIZE(getcr4)
643
644        ENTRY(setcr4)
645        movl    4(%esp), %eax
646        movl    %eax, %cr4
647        ret
648	SET_SIZE(setcr4)
649
650#endif	/* __i386 */
651#endif	/* __lint */
652
653#if defined(__lint)
654
655/*ARGSUSED*/
656uint32_t
657__cpuid_insn(struct cpuid_regs *regs)
658{ return (0); }
659
660#else	/* __lint */
661
662#if defined(__amd64)
663
664	ENTRY(__cpuid_insn)
665	movq	%rbx, %r8
666	movq	%rcx, %r9
667	movq	%rdx, %r11
668	movl	(%rdi), %eax		/* %eax = regs->cp_eax */
669	movl	0x4(%rdi), %ebx		/* %ebx = regs->cp_ebx */
670	movl	0x8(%rdi), %ecx		/* %ecx = regs->cp_ecx */
671	movl	0xc(%rdi), %edx		/* %edx = regs->cp_edx */
672	cpuid
673	movl	%eax, (%rdi)		/* regs->cp_eax = %eax */
674	movl	%ebx, 0x4(%rdi)		/* regs->cp_ebx = %ebx */
675	movl	%ecx, 0x8(%rdi)		/* regs->cp_ecx = %ecx */
676	movl	%edx, 0xc(%rdi)		/* regs->cp_edx = %edx */
677	movq	%r8, %rbx
678	movq	%r9, %rcx
679	movq	%r11, %rdx
680	ret
681	SET_SIZE(__cpuid_insn)
682
683#elif defined(__i386)
684
685        ENTRY(__cpuid_insn)
686	pushl	%ebp
687	movl	0x8(%esp), %ebp		/* %ebp = regs */
688	pushl	%ebx
689	pushl	%ecx
690	pushl	%edx
691	movl	(%ebp), %eax		/* %eax = regs->cp_eax */
692	movl	0x4(%ebp), %ebx		/* %ebx = regs->cp_ebx */
693	movl	0x8(%ebp), %ecx		/* %ecx = regs->cp_ecx */
694	movl	0xc(%ebp), %edx		/* %edx = regs->cp_edx */
695	cpuid
696	movl	%eax, (%ebp)		/* regs->cp_eax = %eax */
697	movl	%ebx, 0x4(%ebp)		/* regs->cp_ebx = %ebx */
698	movl	%ecx, 0x8(%ebp)		/* regs->cp_ecx = %ecx */
699	movl	%edx, 0xc(%ebp)		/* regs->cp_edx = %edx */
700	popl	%edx
701	popl	%ecx
702	popl	%ebx
703	popl	%ebp
704	ret
705	SET_SIZE(__cpuid_insn)
706
707#endif	/* __i386 */
708#endif	/* __lint */
709
710#if defined(__lint)
711
712/*ARGSUSED*/
713void
714i86_monitor(volatile uint32_t *addr, uint32_t extensions, uint32_t hints)
715{}
716
717#else   /* __lint */
718
719#if defined(__amd64)
720
721	ENTRY_NP(i86_monitor)
722	pushq	%rbp
723	movq	%rsp, %rbp
724	movq	%rdi, %rax		/* addr */
725	movq	%rsi, %rcx		/* extensions */
726	/* rdx contains input arg3: hints */
727	clflush	(%rax)
728	.byte	0x0f, 0x01, 0xc8	/* monitor */
729	leave
730	ret
731	SET_SIZE(i86_monitor)
732
733#elif defined(__i386)
734
735ENTRY_NP(i86_monitor)
736	pushl	%ebp
737	movl	%esp, %ebp
738	movl	0x8(%ebp),%eax		/* addr */
739	movl	0xc(%ebp),%ecx		/* extensions */
740	movl	0x10(%ebp),%edx		/* hints */
741	clflush	(%eax)
742	.byte	0x0f, 0x01, 0xc8	/* monitor */
743	leave
744	ret
745	SET_SIZE(i86_monitor)
746
747#endif	/* __i386 */
748#endif	/* __lint */
749
750#if defined(__lint)
751
752/*ARGSUSED*/
753void
754i86_mwait(uint32_t data, uint32_t extensions)
755{}
756
757#else	/* __lint */
758
759#if defined(__amd64)
760
761	ENTRY_NP(i86_mwait)
762	pushq	%rbp
763	movq	%rsp, %rbp
764	movq	%rdi, %rax		/* data */
765	movq	%rsi, %rcx		/* extensions */
766	.byte	0x0f, 0x01, 0xc9	/* mwait */
767	leave
768	ret
769	SET_SIZE(i86_mwait)
770
771#elif defined(__i386)
772
773	ENTRY_NP(i86_mwait)
774	pushl	%ebp
775	movl	%esp, %ebp
776	movl	0x8(%ebp),%eax		/* data */
777	movl	0xc(%ebp),%ecx		/* extensions */
778	.byte	0x0f, 0x01, 0xc9	/* mwait */
779	leave
780	ret
781	SET_SIZE(i86_mwait)
782
783#endif	/* __i386 */
784#endif	/* __lint */
785
786#if defined(__xpv)
787	/*
788	 * Defined in C
789	 */
790#else
791
792#if defined(__lint)
793
794hrtime_t
795tsc_read(void)
796{
797	return (0);
798}
799
800#else	/* __lint */
801
802#if defined(__amd64)
803
804	ENTRY_NP(tsc_read)
805	movq	%rbx, %r11
806	movl	$0, %eax
807	cpuid
808	rdtsc
809	movq	%r11, %rbx
810	shlq	$32, %rdx
811	orq	%rdx, %rax
812	ret
813	.globl _tsc_mfence_start
814_tsc_mfence_start:
815	mfence
816	rdtsc
817	shlq	$32, %rdx
818	orq	%rdx, %rax
819	ret
820	.globl _tsc_mfence_end
821_tsc_mfence_end:
822	.globl _tscp_start
823_tscp_start:
824	.byte	0x0f, 0x01, 0xf9	/* rdtscp instruction */
825	shlq	$32, %rdx
826	orq	%rdx, %rax
827	ret
828	.globl _tscp_end
829_tscp_end:
830	.globl _no_rdtsc_start
831_no_rdtsc_start:
832	xorl	%edx, %edx
833	xorl	%eax, %eax
834	ret
835	.globl _no_rdtsc_end
836_no_rdtsc_end:
837	.globl _tsc_lfence_start
838_tsc_lfence_start:
839	lfence
840	rdtsc
841	shlq	$32, %rdx
842	orq	%rdx, %rax
843	ret
844	.globl _tsc_lfence_end
845_tsc_lfence_end:
846	SET_SIZE(tsc_read)
847
848#else /* __i386 */
849
850	ENTRY_NP(tsc_read)
851	pushl	%ebx
852	movl	$0, %eax
853	cpuid
854	rdtsc
855	popl	%ebx
856	ret
857	.globl _tsc_mfence_start
858_tsc_mfence_start:
859	mfence
860	rdtsc
861	ret
862	.globl _tsc_mfence_end
863_tsc_mfence_end:
864	.globl	_tscp_start
865_tscp_start:
866	.byte	0x0f, 0x01, 0xf9	/* rdtscp instruction */
867	ret
868	.globl _tscp_end
869_tscp_end:
870	.globl _no_rdtsc_start
871_no_rdtsc_start:
872	xorl	%edx, %edx
873	xorl	%eax, %eax
874	ret
875	.globl _no_rdtsc_end
876_no_rdtsc_end:
877	.globl _tsc_lfence_start
878_tsc_lfence_start:
879	lfence
880	rdtsc
881	ret
882	.globl _tsc_lfence_end
883_tsc_lfence_end:
884	SET_SIZE(tsc_read)
885
886#endif	/* __i386 */
887
888#endif	/* __lint */
889
890
891#endif	/* __xpv */
892
893#ifdef __lint
894/*
895 * Do not use this function for obtaining clock tick.  This
896 * is called by callers who do not need to have a guarenteed
897 * correct tick value.  The proper routine to use is tsc_read().
898 */
899u_longlong_t
900randtick(void)
901{
902	return (0);
903}
904#else
905#if defined(__amd64)
906	ENTRY_NP(randtick)
907	rdtsc
908	shlq    $32, %rdx
909	orq     %rdx, %rax
910	ret
911	SET_SIZE(randtick)
912#else
913	ENTRY_NP(randtick)
914	rdtsc
915	ret
916	SET_SIZE(randtick)
917#endif /* __i386 */
918#endif /* __lint */
919/*
920 * Insert entryp after predp in a doubly linked list.
921 */
922
923#if defined(__lint)
924
925/*ARGSUSED*/
926void
927_insque(caddr_t entryp, caddr_t predp)
928{}
929
930#else	/* __lint */
931
932#if defined(__amd64)
933
934	ENTRY(_insque)
935	movq	(%rsi), %rax		/* predp->forw			*/
936	movq	%rsi, CPTRSIZE(%rdi)	/* entryp->back = predp		*/
937	movq	%rax, (%rdi)		/* entryp->forw = predp->forw	*/
938	movq	%rdi, (%rsi)		/* predp->forw = entryp		*/
939	movq	%rdi, CPTRSIZE(%rax)	/* predp->forw->back = entryp	*/
940	ret
941	SET_SIZE(_insque)
942
943#elif defined(__i386)
944
945	ENTRY(_insque)
946	movl	8(%esp), %edx
947	movl	4(%esp), %ecx
948	movl	(%edx), %eax		/* predp->forw			*/
949	movl	%edx, CPTRSIZE(%ecx)	/* entryp->back = predp		*/
950	movl	%eax, (%ecx)		/* entryp->forw = predp->forw	*/
951	movl	%ecx, (%edx)		/* predp->forw = entryp		*/
952	movl	%ecx, CPTRSIZE(%eax)	/* predp->forw->back = entryp	*/
953	ret
954	SET_SIZE(_insque)
955
956#endif	/* __i386 */
957#endif	/* __lint */
958
959/*
960 * Remove entryp from a doubly linked list
961 */
962
963#if defined(__lint)
964
965/*ARGSUSED*/
966void
967_remque(caddr_t entryp)
968{}
969
970#else	/* __lint */
971
972#if defined(__amd64)
973
974	ENTRY(_remque)
975	movq	(%rdi), %rax		/* entry->forw */
976	movq	CPTRSIZE(%rdi), %rdx	/* entry->back */
977	movq	%rax, (%rdx)		/* entry->back->forw = entry->forw */
978	movq	%rdx, CPTRSIZE(%rax)	/* entry->forw->back = entry->back */
979	ret
980	SET_SIZE(_remque)
981
982#elif defined(__i386)
983
984	ENTRY(_remque)
985	movl	4(%esp), %ecx
986	movl	(%ecx), %eax		/* entry->forw */
987	movl	CPTRSIZE(%ecx), %edx	/* entry->back */
988	movl	%eax, (%edx)		/* entry->back->forw = entry->forw */
989	movl	%edx, CPTRSIZE(%eax)	/* entry->forw->back = entry->back */
990	ret
991	SET_SIZE(_remque)
992
993#endif	/* __i386 */
994#endif	/* __lint */
995
996/*
997 * Returns the number of
998 * non-NULL bytes in string argument.
999 */
1000
1001#if defined(__lint)
1002
1003/* ARGSUSED */
1004size_t
1005strlen(const char *str)
1006{ return (0); }
1007
1008#else	/* __lint */
1009
1010#if defined(__amd64)
1011
1012/*
1013 * This is close to a simple transliteration of a C version of this
1014 * routine.  We should either just -make- this be a C version, or
1015 * justify having it in assembler by making it significantly faster.
1016 *
1017 * size_t
1018 * strlen(const char *s)
1019 * {
1020 *	const char *s0;
1021 * #if defined(DEBUG)
1022 *	if ((uintptr_t)s < KERNELBASE)
1023 *		panic(.str_panic_msg);
1024 * #endif
1025 *	for (s0 = s; *s; s++)
1026 *		;
1027 *	return (s - s0);
1028 * }
1029 */
1030
1031	ENTRY(strlen)
1032#ifdef DEBUG
1033	movq	postbootkernelbase(%rip), %rax
1034	cmpq	%rax, %rdi
1035	jae	str_valid
1036	pushq	%rbp
1037	movq	%rsp, %rbp
1038	leaq	.str_panic_msg(%rip), %rdi
1039	xorl	%eax, %eax
1040	call	panic
1041#endif	/* DEBUG */
1042str_valid:
1043	cmpb	$0, (%rdi)
1044	movq	%rdi, %rax
1045	je	.null_found
1046	.align	4
1047.strlen_loop:
1048	incq	%rdi
1049	cmpb	$0, (%rdi)
1050	jne	.strlen_loop
1051.null_found:
1052	subq	%rax, %rdi
1053	movq	%rdi, %rax
1054	ret
1055	SET_SIZE(strlen)
1056
1057#elif defined(__i386)
1058
1059	ENTRY(strlen)
1060#ifdef DEBUG
1061	movl	postbootkernelbase, %eax
1062	cmpl	%eax, 4(%esp)
1063	jae	str_valid
1064	pushl	%ebp
1065	movl	%esp, %ebp
1066	pushl	$.str_panic_msg
1067	call	panic
1068#endif /* DEBUG */
1069
1070str_valid:
1071	movl	4(%esp), %eax		/* %eax = string address */
1072	testl	$3, %eax		/* if %eax not word aligned */
1073	jnz	.not_word_aligned	/* goto .not_word_aligned */
1074	.align	4
1075.word_aligned:
1076	movl	(%eax), %edx		/* move 1 word from (%eax) to %edx */
1077	movl	$0x7f7f7f7f, %ecx
1078	andl	%edx, %ecx		/* %ecx = %edx & 0x7f7f7f7f */
1079	addl	$4, %eax		/* next word */
1080	addl	$0x7f7f7f7f, %ecx	/* %ecx += 0x7f7f7f7f */
1081	orl	%edx, %ecx		/* %ecx |= %edx */
1082	andl	$0x80808080, %ecx	/* %ecx &= 0x80808080 */
1083	cmpl	$0x80808080, %ecx	/* if no null byte in this word */
1084	je	.word_aligned		/* goto .word_aligned */
1085	subl	$4, %eax		/* post-incremented */
1086.not_word_aligned:
1087	cmpb	$0, (%eax)		/* if a byte in (%eax) is null */
1088	je	.null_found		/* goto .null_found */
1089	incl	%eax			/* next byte */
1090	testl	$3, %eax		/* if %eax not word aligned */
1091	jnz	.not_word_aligned	/* goto .not_word_aligned */
1092	jmp	.word_aligned		/* goto .word_aligned */
1093	.align	4
1094.null_found:
1095	subl	4(%esp), %eax		/* %eax -= string address */
1096	ret
1097	SET_SIZE(strlen)
1098
1099#endif	/* __i386 */
1100
1101#ifdef DEBUG
1102	.text
1103.str_panic_msg:
1104	.string "strlen: argument below kernelbase"
1105#endif /* DEBUG */
1106
1107#endif	/* __lint */
1108
1109	/*
1110	 * Berkeley 4.3 introduced symbolically named interrupt levels
1111	 * as a way deal with priority in a machine independent fashion.
1112	 * Numbered priorities are machine specific, and should be
1113	 * discouraged where possible.
1114	 *
1115	 * Note, for the machine specific priorities there are
1116	 * examples listed for devices that use a particular priority.
1117	 * It should not be construed that all devices of that
1118	 * type should be at that priority.  It is currently were
1119	 * the current devices fit into the priority scheme based
1120	 * upon time criticalness.
1121	 *
1122	 * The underlying assumption of these assignments is that
1123	 * IPL 10 is the highest level from which a device
1124	 * routine can call wakeup.  Devices that interrupt from higher
1125	 * levels are restricted in what they can do.  If they need
1126	 * kernels services they should schedule a routine at a lower
1127	 * level (via software interrupt) to do the required
1128	 * processing.
1129	 *
1130	 * Examples of this higher usage:
1131	 *	Level	Usage
1132	 *	14	Profiling clock (and PROM uart polling clock)
1133	 *	12	Serial ports
1134	 *
1135	 * The serial ports request lower level processing on level 6.
1136	 *
1137	 * Also, almost all splN routines (where N is a number or a
1138	 * mnemonic) will do a RAISE(), on the assumption that they are
1139	 * never used to lower our priority.
1140	 * The exceptions are:
1141	 *	spl8()		Because you can't be above 15 to begin with!
1142	 *	splzs()		Because this is used at boot time to lower our
1143	 *			priority, to allow the PROM to poll the uart.
1144	 *	spl0()		Used to lower priority to 0.
1145	 */
1146
1147#if defined(__lint)
1148
1149int spl0(void)		{ return (0); }
1150int spl6(void)		{ return (0); }
1151int spl7(void)		{ return (0); }
1152int spl8(void)		{ return (0); }
1153int splhigh(void)	{ return (0); }
1154int splhi(void)		{ return (0); }
1155int splzs(void)		{ return (0); }
1156
1157/* ARGSUSED */
1158void
1159splx(int level)
1160{}
1161
1162#else	/* __lint */
1163
1164#if defined(__amd64)
1165
1166#define	SETPRI(level) \
1167	movl	$/**/level, %edi;	/* new priority */		\
1168	jmp	do_splx			/* redirect to do_splx */
1169
1170#define	RAISE(level) \
1171	movl	$/**/level, %edi;	/* new priority */		\
1172	jmp	splr			/* redirect to splr */
1173
1174#elif defined(__i386)
1175
1176#define	SETPRI(level) \
1177	pushl	$/**/level;	/* new priority */			\
1178	call	do_splx;	/* invoke common splx code */		\
1179	addl	$4, %esp;	/* unstack arg */			\
1180	ret
1181
1182#define	RAISE(level) \
1183	pushl	$/**/level;	/* new priority */			\
1184	call	splr;		/* invoke common splr code */		\
1185	addl	$4, %esp;	/* unstack args */			\
1186	ret
1187
1188#endif	/* __i386 */
1189
1190	/* locks out all interrupts, including memory errors */
1191	ENTRY(spl8)
1192	SETPRI(15)
1193	SET_SIZE(spl8)
1194
1195	/* just below the level that profiling runs */
1196	ENTRY(spl7)
1197	RAISE(13)
1198	SET_SIZE(spl7)
1199
1200	/* sun specific - highest priority onboard serial i/o asy ports */
1201	ENTRY(splzs)
1202	SETPRI(12)	/* Can't be a RAISE, as it's used to lower us */
1203	SET_SIZE(splzs)
1204
1205	ENTRY(splhi)
1206	ALTENTRY(splhigh)
1207	ALTENTRY(spl6)
1208	ALTENTRY(i_ddi_splhigh)
1209
1210	RAISE(DISP_LEVEL)
1211
1212	SET_SIZE(i_ddi_splhigh)
1213	SET_SIZE(spl6)
1214	SET_SIZE(splhigh)
1215	SET_SIZE(splhi)
1216
1217	/* allow all interrupts */
1218	ENTRY(spl0)
1219	SETPRI(0)
1220	SET_SIZE(spl0)
1221
1222
1223	/* splx implementation */
1224	ENTRY(splx)
1225	jmp	do_splx		/* redirect to common splx code */
1226	SET_SIZE(splx)
1227
1228#endif	/* __lint */
1229
1230#if defined(__i386)
1231
1232/*
1233 * Read and write the %gs register
1234 */
1235
1236#if defined(__lint)
1237
1238/*ARGSUSED*/
1239uint16_t
1240getgs(void)
1241{ return (0); }
1242
1243/*ARGSUSED*/
1244void
1245setgs(uint16_t sel)
1246{}
1247
1248#else	/* __lint */
1249
1250	ENTRY(getgs)
1251	clr	%eax
1252	movw	%gs, %ax
1253	ret
1254	SET_SIZE(getgs)
1255
1256	ENTRY(setgs)
1257	movw	4(%esp), %gs
1258	ret
1259	SET_SIZE(setgs)
1260
1261#endif	/* __lint */
1262#endif	/* __i386 */
1263
1264#if defined(__lint)
1265
1266void
1267pc_reset(void)
1268{}
1269
1270void
1271efi_reset(void)
1272{}
1273
1274#else	/* __lint */
1275
1276	ENTRY(wait_500ms)
1277#if defined(__amd64)
1278	pushq	%rbx
1279#elif defined(__i386)
1280	push	%ebx
1281#endif
1282	movl	$50000, %ebx
12831:
1284	call	tenmicrosec
1285	decl	%ebx
1286	jnz	1b
1287#if defined(__amd64)
1288	popq	%rbx
1289#elif defined(__i386)
1290	pop	%ebx
1291#endif
1292	ret
1293	SET_SIZE(wait_500ms)
1294
1295#define	RESET_METHOD_KBC	1
1296#define	RESET_METHOD_PORT92	2
1297#define RESET_METHOD_PCI	4
1298
1299	DGDEF3(pc_reset_methods, 4, 8)
1300	.long RESET_METHOD_KBC|RESET_METHOD_PORT92|RESET_METHOD_PCI;
1301
1302	ENTRY(pc_reset)
1303
1304#if defined(__i386)
1305	testl	$RESET_METHOD_KBC, pc_reset_methods
1306#elif defined(__amd64)
1307	testl	$RESET_METHOD_KBC, pc_reset_methods(%rip)
1308#endif
1309	jz	1f
1310
1311	/
1312	/ Try the classic keyboard controller-triggered reset.
1313	/
1314	movw	$0x64, %dx
1315	movb	$0xfe, %al
1316	outb	(%dx)
1317
1318	/ Wait up to 500 milliseconds here for the keyboard controller
1319	/ to pull the reset line.  On some systems where the keyboard
1320	/ controller is slow to pull the reset line, the next reset method
1321	/ may be executed (which may be bad if those systems hang when the
1322	/ next reset method is used, e.g. Ferrari 3400 (doesn't like port 92),
1323	/ and Ferrari 4000 (doesn't like the cf9 reset method))
1324
1325	call	wait_500ms
1326
13271:
1328#if defined(__i386)
1329	testl	$RESET_METHOD_PORT92, pc_reset_methods
1330#elif defined(__amd64)
1331	testl	$RESET_METHOD_PORT92, pc_reset_methods(%rip)
1332#endif
1333	jz	3f
1334
1335	/
1336	/ Try port 0x92 fast reset
1337	/
1338	movw	$0x92, %dx
1339	inb	(%dx)
1340	cmpb	$0xff, %al	/ If port's not there, we should get back 0xFF
1341	je	1f
1342	testb	$1, %al		/ If bit 0
1343	jz	2f		/ is clear, jump to perform the reset
1344	andb	$0xfe, %al	/ otherwise,
1345	outb	(%dx)		/ clear bit 0 first, then
13462:
1347	orb	$1, %al		/ Set bit 0
1348	outb	(%dx)		/ and reset the system
13491:
1350
1351	call	wait_500ms
1352
13533:
1354#if defined(__i386)
1355	testl	$RESET_METHOD_PCI, pc_reset_methods
1356#elif defined(__amd64)
1357	testl	$RESET_METHOD_PCI, pc_reset_methods(%rip)
1358#endif
1359	jz	4f
1360
1361	/ Try the PCI (soft) reset vector (should work on all modern systems,
1362	/ but has been shown to cause problems on 450NX systems, and some newer
1363	/ systems (e.g. ATI IXP400-equipped systems))
1364	/ When resetting via this method, 2 writes are required.  The first
1365	/ targets bit 1 (0=hard reset without power cycle, 1=hard reset with
1366	/ power cycle).
1367	/ The reset occurs on the second write, during bit 2's transition from
1368	/ 0->1.
1369	movw	$0xcf9, %dx
1370	movb	$0x2, %al	/ Reset mode = hard, no power cycle
1371	outb	(%dx)
1372	movb	$0x6, %al
1373	outb	(%dx)
1374
1375	call	wait_500ms
1376
13774:
1378	/
1379	/ port 0xcf9 failed also.  Last-ditch effort is to
1380	/ triple-fault the CPU.
1381	/ Also, use triple fault for EFI firmware
1382	/
1383	ENTRY(efi_reset)
1384#if defined(__amd64)
1385	pushq	$0x0
1386	pushq	$0x0		/ IDT base of 0, limit of 0 + 2 unused bytes
1387	lidt	(%rsp)
1388#elif defined(__i386)
1389	pushl	$0x0
1390	pushl	$0x0		/ IDT base of 0, limit of 0 + 2 unused bytes
1391	lidt	(%esp)
1392#endif
1393	int	$0x0		/ Trigger interrupt, generate triple-fault
1394
1395	cli
1396	hlt			/ Wait forever
1397	/*NOTREACHED*/
1398	SET_SIZE(efi_reset)
1399	SET_SIZE(pc_reset)
1400
1401#endif	/* __lint */
1402
1403/*
1404 * C callable in and out routines
1405 */
1406
1407#if defined(__lint)
1408
1409/* ARGSUSED */
1410void
1411outl(int port_address, uint32_t val)
1412{}
1413
1414#else	/* __lint */
1415
1416#if defined(__amd64)
1417
1418	ENTRY(outl)
1419	movw	%di, %dx
1420	movl	%esi, %eax
1421	outl	(%dx)
1422	ret
1423	SET_SIZE(outl)
1424
1425#elif defined(__i386)
1426
1427	.set	PORT, 4
1428	.set	VAL, 8
1429
1430	ENTRY(outl)
1431	movw	PORT(%esp), %dx
1432	movl	VAL(%esp), %eax
1433	outl	(%dx)
1434	ret
1435	SET_SIZE(outl)
1436
1437#endif	/* __i386 */
1438#endif	/* __lint */
1439
1440#if defined(__lint)
1441
1442/* ARGSUSED */
1443void
1444outw(int port_address, uint16_t val)
1445{}
1446
1447#else	/* __lint */
1448
1449#if defined(__amd64)
1450
1451	ENTRY(outw)
1452	movw	%di, %dx
1453	movw	%si, %ax
1454	D16 outl (%dx)		/* XX64 why not outw? */
1455	ret
1456	SET_SIZE(outw)
1457
1458#elif defined(__i386)
1459
1460	ENTRY(outw)
1461	movw	PORT(%esp), %dx
1462	movw	VAL(%esp), %ax
1463	D16 outl (%dx)
1464	ret
1465	SET_SIZE(outw)
1466
1467#endif	/* __i386 */
1468#endif	/* __lint */
1469
1470#if defined(__lint)
1471
1472/* ARGSUSED */
1473void
1474outb(int port_address, uint8_t val)
1475{}
1476
1477#else	/* __lint */
1478
1479#if defined(__amd64)
1480
1481	ENTRY(outb)
1482	movw	%di, %dx
1483	movb	%sil, %al
1484	outb	(%dx)
1485	ret
1486	SET_SIZE(outb)
1487
1488#elif defined(__i386)
1489
1490	ENTRY(outb)
1491	movw	PORT(%esp), %dx
1492	movb	VAL(%esp), %al
1493	outb	(%dx)
1494	ret
1495	SET_SIZE(outb)
1496
1497#endif	/* __i386 */
1498#endif	/* __lint */
1499
1500#if defined(__lint)
1501
1502/* ARGSUSED */
1503uint32_t
1504inl(int port_address)
1505{ return (0); }
1506
1507#else	/* __lint */
1508
1509#if defined(__amd64)
1510
1511	ENTRY(inl)
1512	xorl	%eax, %eax
1513	movw	%di, %dx
1514	inl	(%dx)
1515	ret
1516	SET_SIZE(inl)
1517
1518#elif defined(__i386)
1519
1520	ENTRY(inl)
1521	movw	PORT(%esp), %dx
1522	inl	(%dx)
1523	ret
1524	SET_SIZE(inl)
1525
1526#endif	/* __i386 */
1527#endif	/* __lint */
1528
1529#if defined(__lint)
1530
1531/* ARGSUSED */
1532uint16_t
1533inw(int port_address)
1534{ return (0); }
1535
1536#else	/* __lint */
1537
1538#if defined(__amd64)
1539
1540	ENTRY(inw)
1541	xorl	%eax, %eax
1542	movw	%di, %dx
1543	D16 inl	(%dx)
1544	ret
1545	SET_SIZE(inw)
1546
1547#elif defined(__i386)
1548
1549	ENTRY(inw)
1550	subl	%eax, %eax
1551	movw	PORT(%esp), %dx
1552	D16 inl	(%dx)
1553	ret
1554	SET_SIZE(inw)
1555
1556#endif	/* __i386 */
1557#endif	/* __lint */
1558
1559
1560#if defined(__lint)
1561
1562/* ARGSUSED */
1563uint8_t
1564inb(int port_address)
1565{ return (0); }
1566
1567#else	/* __lint */
1568
1569#if defined(__amd64)
1570
1571	ENTRY(inb)
1572	xorl	%eax, %eax
1573	movw	%di, %dx
1574	inb	(%dx)
1575	ret
1576	SET_SIZE(inb)
1577
1578#elif defined(__i386)
1579
1580	ENTRY(inb)
1581	subl    %eax, %eax
1582	movw	PORT(%esp), %dx
1583	inb	(%dx)
1584	ret
1585	SET_SIZE(inb)
1586
1587#endif	/* __i386 */
1588#endif	/* __lint */
1589
1590
1591#if defined(__lint)
1592
1593/* ARGSUSED */
1594void
1595repoutsw(int port, uint16_t *addr, int cnt)
1596{}
1597
1598#else	/* __lint */
1599
1600#if defined(__amd64)
1601
1602	ENTRY(repoutsw)
1603	movl	%edx, %ecx
1604	movw	%di, %dx
1605	rep
1606	  D16 outsl
1607	ret
1608	SET_SIZE(repoutsw)
1609
1610#elif defined(__i386)
1611
1612	/*
1613	 * The arguments and saved registers are on the stack in the
1614	 *  following order:
1615	 *      |  cnt  |  +16
1616	 *      | *addr |  +12
1617	 *      | port  |  +8
1618	 *      |  eip  |  +4
1619	 *      |  esi  |  <-- %esp
1620	 * If additional values are pushed onto the stack, make sure
1621	 * to adjust the following constants accordingly.
1622	 */
1623	.set	PORT, 8
1624	.set	ADDR, 12
1625	.set	COUNT, 16
1626
1627	ENTRY(repoutsw)
1628	pushl	%esi
1629	movl	PORT(%esp), %edx
1630	movl	ADDR(%esp), %esi
1631	movl	COUNT(%esp), %ecx
1632	rep
1633	  D16 outsl
1634	popl	%esi
1635	ret
1636	SET_SIZE(repoutsw)
1637
1638#endif	/* __i386 */
1639#endif	/* __lint */
1640
1641
1642#if defined(__lint)
1643
1644/* ARGSUSED */
1645void
1646repinsw(int port_addr, uint16_t *addr, int cnt)
1647{}
1648
1649#else	/* __lint */
1650
1651#if defined(__amd64)
1652
1653	ENTRY(repinsw)
1654	movl	%edx, %ecx
1655	movw	%di, %dx
1656	rep
1657	  D16 insl
1658	ret
1659	SET_SIZE(repinsw)
1660
1661#elif defined(__i386)
1662
1663	ENTRY(repinsw)
1664	pushl	%edi
1665	movl	PORT(%esp), %edx
1666	movl	ADDR(%esp), %edi
1667	movl	COUNT(%esp), %ecx
1668	rep
1669	  D16 insl
1670	popl	%edi
1671	ret
1672	SET_SIZE(repinsw)
1673
1674#endif	/* __i386 */
1675#endif	/* __lint */
1676
1677
1678#if defined(__lint)
1679
1680/* ARGSUSED */
1681void
1682repinsb(int port, uint8_t *addr, int count)
1683{}
1684
1685#else	/* __lint */
1686
1687#if defined(__amd64)
1688
1689	ENTRY(repinsb)
1690	movl	%edx, %ecx
1691	movw	%di, %dx
1692	movq	%rsi, %rdi
1693	rep
1694	  insb
1695	ret
1696	SET_SIZE(repinsb)
1697
1698#elif defined(__i386)
1699
1700	/*
1701	 * The arguments and saved registers are on the stack in the
1702	 *  following order:
1703	 *      |  cnt  |  +16
1704	 *      | *addr |  +12
1705	 *      | port  |  +8
1706	 *      |  eip  |  +4
1707	 *      |  esi  |  <-- %esp
1708	 * If additional values are pushed onto the stack, make sure
1709	 * to adjust the following constants accordingly.
1710	 */
1711	.set	IO_PORT, 8
1712	.set	IO_ADDR, 12
1713	.set	IO_COUNT, 16
1714
1715	ENTRY(repinsb)
1716	pushl	%edi
1717	movl	IO_ADDR(%esp), %edi
1718	movl	IO_COUNT(%esp), %ecx
1719	movl	IO_PORT(%esp), %edx
1720	rep
1721	  insb
1722	popl	%edi
1723	ret
1724	SET_SIZE(repinsb)
1725
1726#endif	/* __i386 */
1727#endif	/* __lint */
1728
1729
1730/*
1731 * Input a stream of 32-bit words.
1732 * NOTE: count is a DWORD count.
1733 */
1734#if defined(__lint)
1735
1736/* ARGSUSED */
1737void
1738repinsd(int port, uint32_t *addr, int count)
1739{}
1740
1741#else	/* __lint */
1742
1743#if defined(__amd64)
1744
1745	ENTRY(repinsd)
1746	movl	%edx, %ecx
1747	movw	%di, %dx
1748	movq	%rsi, %rdi
1749	rep
1750	  insl
1751	ret
1752	SET_SIZE(repinsd)
1753
1754#elif defined(__i386)
1755
1756	ENTRY(repinsd)
1757	pushl	%edi
1758	movl	IO_ADDR(%esp), %edi
1759	movl	IO_COUNT(%esp), %ecx
1760	movl	IO_PORT(%esp), %edx
1761	rep
1762	  insl
1763	popl	%edi
1764	ret
1765	SET_SIZE(repinsd)
1766
1767#endif	/* __i386 */
1768#endif	/* __lint */
1769
1770/*
1771 * Output a stream of bytes
1772 * NOTE: count is a byte count
1773 */
1774#if defined(__lint)
1775
1776/* ARGSUSED */
1777void
1778repoutsb(int port, uint8_t *addr, int count)
1779{}
1780
1781#else	/* __lint */
1782
1783#if defined(__amd64)
1784
1785	ENTRY(repoutsb)
1786	movl	%edx, %ecx
1787	movw	%di, %dx
1788	rep
1789	  outsb
1790	ret
1791	SET_SIZE(repoutsb)
1792
1793#elif defined(__i386)
1794
1795	ENTRY(repoutsb)
1796	pushl	%esi
1797	movl	IO_ADDR(%esp), %esi
1798	movl	IO_COUNT(%esp), %ecx
1799	movl	IO_PORT(%esp), %edx
1800	rep
1801	  outsb
1802	popl	%esi
1803	ret
1804	SET_SIZE(repoutsb)
1805
1806#endif	/* __i386 */
1807#endif	/* __lint */
1808
1809/*
1810 * Output a stream of 32-bit words
1811 * NOTE: count is a DWORD count
1812 */
1813#if defined(__lint)
1814
1815/* ARGSUSED */
1816void
1817repoutsd(int port, uint32_t *addr, int count)
1818{}
1819
1820#else	/* __lint */
1821
1822#if defined(__amd64)
1823
1824	ENTRY(repoutsd)
1825	movl	%edx, %ecx
1826	movw	%di, %dx
1827	rep
1828	  outsl
1829	ret
1830	SET_SIZE(repoutsd)
1831
1832#elif defined(__i386)
1833
1834	ENTRY(repoutsd)
1835	pushl	%esi
1836	movl	IO_ADDR(%esp), %esi
1837	movl	IO_COUNT(%esp), %ecx
1838	movl	IO_PORT(%esp), %edx
1839	rep
1840	  outsl
1841	popl	%esi
1842	ret
1843	SET_SIZE(repoutsd)
1844
1845#endif	/* __i386 */
1846#endif	/* __lint */
1847
1848/*
1849 * void int3(void)
1850 * void int18(void)
1851 * void int20(void)
1852 * void int_cmci(void)
1853 */
1854
1855#if defined(__lint)
1856
1857void
1858int3(void)
1859{}
1860
1861void
1862int18(void)
1863{}
1864
1865void
1866int20(void)
1867{}
1868
1869void
1870int_cmci(void)
1871{}
1872
1873#else	/* __lint */
1874
1875	ENTRY(int3)
1876	int	$T_BPTFLT
1877	ret
1878	SET_SIZE(int3)
1879
1880	ENTRY(int18)
1881	int	$T_MCE
1882	ret
1883	SET_SIZE(int18)
1884
1885	ENTRY(int20)
1886	movl	boothowto, %eax
1887	andl	$RB_DEBUG, %eax
1888	jz	1f
1889
1890	int	$T_DBGENTR
18911:
1892	rep;	ret	/* use 2 byte return instruction when branch target */
1893			/* AMD Software Optimization Guide - Section 6.2 */
1894	SET_SIZE(int20)
1895
1896	ENTRY(int_cmci)
1897	int	$T_ENOEXTFLT
1898	ret
1899	SET_SIZE(int_cmci)
1900
1901#endif	/* __lint */
1902
1903#if defined(__lint)
1904
1905/* ARGSUSED */
1906int
1907scanc(size_t size, uchar_t *cp, uchar_t *table, uchar_t mask)
1908{ return (0); }
1909
1910#else	/* __lint */
1911
1912#if defined(__amd64)
1913
1914	ENTRY(scanc)
1915					/* rdi == size */
1916					/* rsi == cp */
1917					/* rdx == table */
1918					/* rcx == mask */
1919	addq	%rsi, %rdi		/* end = &cp[size] */
1920.scanloop:
1921	cmpq	%rdi, %rsi		/* while (cp < end */
1922	jnb	.scandone
1923	movzbq	(%rsi), %r8		/* %r8 = *cp */
1924	incq	%rsi			/* cp++ */
1925	testb	%cl, (%r8, %rdx)
1926	jz	.scanloop		/*  && (table[*cp] & mask) == 0) */
1927	decq	%rsi			/* (fix post-increment) */
1928.scandone:
1929	movl	%edi, %eax
1930	subl	%esi, %eax		/* return (end - cp) */
1931	ret
1932	SET_SIZE(scanc)
1933
1934#elif defined(__i386)
1935
1936	ENTRY(scanc)
1937	pushl	%edi
1938	pushl	%esi
1939	movb	24(%esp), %cl		/* mask = %cl */
1940	movl	16(%esp), %esi		/* cp = %esi */
1941	movl	20(%esp), %edx		/* table = %edx */
1942	movl	%esi, %edi
1943	addl	12(%esp), %edi		/* end = &cp[size]; */
1944.scanloop:
1945	cmpl	%edi, %esi		/* while (cp < end */
1946	jnb	.scandone
1947	movzbl	(%esi),  %eax		/* %al = *cp */
1948	incl	%esi			/* cp++ */
1949	movb	(%edx,  %eax), %al	/* %al = table[*cp] */
1950	testb	%al, %cl
1951	jz	.scanloop		/*   && (table[*cp] & mask) == 0) */
1952	dec	%esi			/* post-incremented */
1953.scandone:
1954	movl	%edi, %eax
1955	subl	%esi, %eax		/* return (end - cp) */
1956	popl	%esi
1957	popl	%edi
1958	ret
1959	SET_SIZE(scanc)
1960
1961#endif	/* __i386 */
1962#endif	/* __lint */
1963
1964/*
1965 * Replacement functions for ones that are normally inlined.
1966 * In addition to the copy in i86.il, they are defined here just in case.
1967 */
1968
1969#if defined(__lint)
1970
1971ulong_t
1972intr_clear(void)
1973{ return (0); }
1974
1975ulong_t
1976clear_int_flag(void)
1977{ return (0); }
1978
1979#else	/* __lint */
1980
1981#if defined(__amd64)
1982
1983	ENTRY(intr_clear)
1984	ENTRY(clear_int_flag)
1985	pushfq
1986	popq	%rax
1987#if defined(__xpv)
1988	leaq	xpv_panicking, %rdi
1989	movl	(%rdi), %edi
1990	cmpl	$0, %edi
1991	jne	2f
1992	CLIRET(%rdi, %dl)	/* returns event mask in %dl */
1993	/*
1994	 * Synthesize the PS_IE bit from the event mask bit
1995	 */
1996	andq    $_BITNOT(PS_IE), %rax
1997	testb	$1, %dl
1998	jnz	1f
1999	orq	$PS_IE, %rax
20001:
2001	ret
20022:
2003#endif
2004	CLI(%rdi)
2005	ret
2006	SET_SIZE(clear_int_flag)
2007	SET_SIZE(intr_clear)
2008
2009#elif defined(__i386)
2010
2011	ENTRY(intr_clear)
2012	ENTRY(clear_int_flag)
2013	pushfl
2014	popl	%eax
2015#if defined(__xpv)
2016	leal	xpv_panicking, %edx
2017	movl	(%edx), %edx
2018	cmpl	$0, %edx
2019	jne	2f
2020	CLIRET(%edx, %cl)	/* returns event mask in %cl */
2021	/*
2022	 * Synthesize the PS_IE bit from the event mask bit
2023	 */
2024	andl    $_BITNOT(PS_IE), %eax
2025	testb	$1, %cl
2026	jnz	1f
2027	orl	$PS_IE, %eax
20281:
2029	ret
20302:
2031#endif
2032	CLI(%edx)
2033	ret
2034	SET_SIZE(clear_int_flag)
2035	SET_SIZE(intr_clear)
2036
2037#endif	/* __i386 */
2038#endif	/* __lint */
2039
2040#if defined(__lint)
2041
2042struct cpu *
2043curcpup(void)
2044{ return 0; }
2045
2046#else	/* __lint */
2047
2048#if defined(__amd64)
2049
2050	ENTRY(curcpup)
2051	movq	%gs:CPU_SELF, %rax
2052	ret
2053	SET_SIZE(curcpup)
2054
2055#elif defined(__i386)
2056
2057	ENTRY(curcpup)
2058	movl	%gs:CPU_SELF, %eax
2059	ret
2060	SET_SIZE(curcpup)
2061
2062#endif	/* __i386 */
2063#endif	/* __lint */
2064
2065/* htonll(), ntohll(), htonl(), ntohl(), htons(), ntohs()
2066 * These functions reverse the byte order of the input parameter and returns
2067 * the result.  This is to convert the byte order from host byte order
2068 * (little endian) to network byte order (big endian), or vice versa.
2069 */
2070
2071#if defined(__lint)
2072
2073uint64_t
2074htonll(uint64_t i)
2075{ return (i); }
2076
2077uint64_t
2078ntohll(uint64_t i)
2079{ return (i); }
2080
2081uint32_t
2082htonl(uint32_t i)
2083{ return (i); }
2084
2085uint32_t
2086ntohl(uint32_t i)
2087{ return (i); }
2088
2089uint16_t
2090htons(uint16_t i)
2091{ return (i); }
2092
2093uint16_t
2094ntohs(uint16_t i)
2095{ return (i); }
2096
2097#else	/* __lint */
2098
2099#if defined(__amd64)
2100
2101	ENTRY(htonll)
2102	ALTENTRY(ntohll)
2103	movq	%rdi, %rax
2104	bswapq	%rax
2105	ret
2106	SET_SIZE(ntohll)
2107	SET_SIZE(htonll)
2108
2109	/* XX64 there must be shorter sequences for this */
2110	ENTRY(htonl)
2111	ALTENTRY(ntohl)
2112	movl	%edi, %eax
2113	bswap	%eax
2114	ret
2115	SET_SIZE(ntohl)
2116	SET_SIZE(htonl)
2117
2118	/* XX64 there must be better sequences for this */
2119	ENTRY(htons)
2120	ALTENTRY(ntohs)
2121	movl	%edi, %eax
2122	bswap	%eax
2123	shrl	$16, %eax
2124	ret
2125	SET_SIZE(ntohs)
2126	SET_SIZE(htons)
2127
2128#elif defined(__i386)
2129
2130	ENTRY(htonll)
2131	ALTENTRY(ntohll)
2132	movl	4(%esp), %edx
2133	movl	8(%esp), %eax
2134	bswap	%edx
2135	bswap	%eax
2136	ret
2137	SET_SIZE(ntohll)
2138	SET_SIZE(htonll)
2139
2140	ENTRY(htonl)
2141	ALTENTRY(ntohl)
2142	movl	4(%esp), %eax
2143	bswap	%eax
2144	ret
2145	SET_SIZE(ntohl)
2146	SET_SIZE(htonl)
2147
2148	ENTRY(htons)
2149	ALTENTRY(ntohs)
2150	movl	4(%esp), %eax
2151	bswap	%eax
2152	shrl	$16, %eax
2153	ret
2154	SET_SIZE(ntohs)
2155	SET_SIZE(htons)
2156
2157#endif	/* __i386 */
2158#endif	/* __lint */
2159
2160
2161#if defined(__lint)
2162
2163/* ARGSUSED */
2164void
2165intr_restore(ulong_t i)
2166{ return; }
2167
2168/* ARGSUSED */
2169void
2170restore_int_flag(ulong_t i)
2171{ return; }
2172
2173#else	/* __lint */
2174
2175#if defined(__amd64)
2176
2177	ENTRY(intr_restore)
2178	ENTRY(restore_int_flag)
2179	testq	$PS_IE, %rdi
2180	jz	1f
2181#if defined(__xpv)
2182	leaq	xpv_panicking, %rsi
2183	movl	(%rsi), %esi
2184	cmpl	$0, %esi
2185	jne	1f
2186	/*
2187	 * Since we're -really- running unprivileged, our attempt
2188	 * to change the state of the IF bit will be ignored.
2189	 * The virtual IF bit is tweaked by CLI and STI.
2190	 */
2191	IE_TO_EVENT_MASK(%rsi, %rdi)
2192#else
2193	sti
2194#endif
21951:
2196	ret
2197	SET_SIZE(restore_int_flag)
2198	SET_SIZE(intr_restore)
2199
2200#elif defined(__i386)
2201
2202	ENTRY(intr_restore)
2203	ENTRY(restore_int_flag)
2204	testl	$PS_IE, 4(%esp)
2205	jz	1f
2206#if defined(__xpv)
2207	leal	xpv_panicking, %edx
2208	movl	(%edx), %edx
2209	cmpl	$0, %edx
2210	jne	1f
2211	/*
2212	 * Since we're -really- running unprivileged, our attempt
2213	 * to change the state of the IF bit will be ignored.
2214	 * The virtual IF bit is tweaked by CLI and STI.
2215	 */
2216	IE_TO_EVENT_MASK(%edx, 4(%esp))
2217#else
2218	sti
2219#endif
22201:
2221	ret
2222	SET_SIZE(restore_int_flag)
2223	SET_SIZE(intr_restore)
2224
2225#endif	/* __i386 */
2226#endif	/* __lint */
2227
2228#if defined(__lint)
2229
2230void
2231sti(void)
2232{}
2233
2234void
2235cli(void)
2236{}
2237
2238#else	/* __lint */
2239
2240	ENTRY(sti)
2241	STI
2242	ret
2243	SET_SIZE(sti)
2244
2245	ENTRY(cli)
2246#if defined(__amd64)
2247	CLI(%rax)
2248#elif defined(__i386)
2249	CLI(%eax)
2250#endif	/* __i386 */
2251	ret
2252	SET_SIZE(cli)
2253
2254#endif	/* __lint */
2255
2256#if defined(__lint)
2257
2258dtrace_icookie_t
2259dtrace_interrupt_disable(void)
2260{ return (0); }
2261
2262#else   /* __lint */
2263
2264#if defined(__amd64)
2265
2266	ENTRY(dtrace_interrupt_disable)
2267	pushfq
2268	popq	%rax
2269#if defined(__xpv)
2270	leaq	xpv_panicking, %rdi
2271	movl	(%rdi), %edi
2272	cmpl	$0, %edi
2273	jne	.dtrace_interrupt_disable_done
2274	CLIRET(%rdi, %dl)	/* returns event mask in %dl */
2275	/*
2276	 * Synthesize the PS_IE bit from the event mask bit
2277	 */
2278	andq    $_BITNOT(PS_IE), %rax
2279	testb	$1, %dl
2280	jnz	.dtrace_interrupt_disable_done
2281	orq	$PS_IE, %rax
2282#else
2283	CLI(%rdx)
2284#endif
2285.dtrace_interrupt_disable_done:
2286	ret
2287	SET_SIZE(dtrace_interrupt_disable)
2288
2289#elif defined(__i386)
2290
2291	ENTRY(dtrace_interrupt_disable)
2292	pushfl
2293	popl	%eax
2294#if defined(__xpv)
2295	leal	xpv_panicking, %edx
2296	movl	(%edx), %edx
2297	cmpl	$0, %edx
2298	jne	.dtrace_interrupt_disable_done
2299	CLIRET(%edx, %cl)	/* returns event mask in %cl */
2300	/*
2301	 * Synthesize the PS_IE bit from the event mask bit
2302	 */
2303	andl    $_BITNOT(PS_IE), %eax
2304	testb	$1, %cl
2305	jnz	.dtrace_interrupt_disable_done
2306	orl	$PS_IE, %eax
2307#else
2308	CLI(%edx)
2309#endif
2310.dtrace_interrupt_disable_done:
2311	ret
2312	SET_SIZE(dtrace_interrupt_disable)
2313
2314#endif	/* __i386 */
2315#endif	/* __lint */
2316
2317#if defined(__lint)
2318
2319/*ARGSUSED*/
2320void
2321dtrace_interrupt_enable(dtrace_icookie_t cookie)
2322{}
2323
2324#else	/* __lint */
2325
2326#if defined(__amd64)
2327
2328	ENTRY(dtrace_interrupt_enable)
2329	pushq	%rdi
2330	popfq
2331#if defined(__xpv)
2332	leaq	xpv_panicking, %rdx
2333	movl	(%rdx), %edx
2334	cmpl	$0, %edx
2335	jne	.dtrace_interrupt_enable_done
2336	/*
2337	 * Since we're -really- running unprivileged, our attempt
2338	 * to change the state of the IF bit will be ignored. The
2339	 * virtual IF bit is tweaked by CLI and STI.
2340	 */
2341	IE_TO_EVENT_MASK(%rdx, %rdi)
2342#endif
2343.dtrace_interrupt_enable_done:
2344	ret
2345	SET_SIZE(dtrace_interrupt_enable)
2346
2347#elif defined(__i386)
2348
2349	ENTRY(dtrace_interrupt_enable)
2350	movl	4(%esp), %eax
2351	pushl	%eax
2352	popfl
2353#if defined(__xpv)
2354	leal	xpv_panicking, %edx
2355	movl	(%edx), %edx
2356	cmpl	$0, %edx
2357	jne	.dtrace_interrupt_enable_done
2358	/*
2359	 * Since we're -really- running unprivileged, our attempt
2360	 * to change the state of the IF bit will be ignored. The
2361	 * virtual IF bit is tweaked by CLI and STI.
2362	 */
2363	IE_TO_EVENT_MASK(%edx, %eax)
2364#endif
2365.dtrace_interrupt_enable_done:
2366	ret
2367	SET_SIZE(dtrace_interrupt_enable)
2368
2369#endif	/* __i386 */
2370#endif	/* __lint */
2371
2372
2373#if defined(lint)
2374
2375void
2376dtrace_membar_producer(void)
2377{}
2378
2379void
2380dtrace_membar_consumer(void)
2381{}
2382
2383#else	/* __lint */
2384
2385	ENTRY(dtrace_membar_producer)
2386	rep;	ret	/* use 2 byte return instruction when branch target */
2387			/* AMD Software Optimization Guide - Section 6.2 */
2388	SET_SIZE(dtrace_membar_producer)
2389
2390	ENTRY(dtrace_membar_consumer)
2391	rep;	ret	/* use 2 byte return instruction when branch target */
2392			/* AMD Software Optimization Guide - Section 6.2 */
2393	SET_SIZE(dtrace_membar_consumer)
2394
2395#endif	/* __lint */
2396
2397#if defined(__lint)
2398
2399kthread_id_t
2400threadp(void)
2401{ return ((kthread_id_t)0); }
2402
2403#else	/* __lint */
2404
2405#if defined(__amd64)
2406
2407	ENTRY(threadp)
2408	movq	%gs:CPU_THREAD, %rax
2409	ret
2410	SET_SIZE(threadp)
2411
2412#elif defined(__i386)
2413
2414	ENTRY(threadp)
2415	movl	%gs:CPU_THREAD, %eax
2416	ret
2417	SET_SIZE(threadp)
2418
2419#endif	/* __i386 */
2420#endif	/* __lint */
2421
2422/*
2423 *   Checksum routine for Internet Protocol Headers
2424 */
2425
2426#if defined(__lint)
2427
2428/* ARGSUSED */
2429unsigned int
2430ip_ocsum(
2431	ushort_t *address,	/* ptr to 1st message buffer */
2432	int halfword_count,	/* length of data */
2433	unsigned int sum)	/* partial checksum */
2434{
2435	int		i;
2436	unsigned int	psum = 0;	/* partial sum */
2437
2438	for (i = 0; i < halfword_count; i++, address++) {
2439		psum += *address;
2440	}
2441
2442	while ((psum >> 16) != 0) {
2443		psum = (psum & 0xffff) + (psum >> 16);
2444	}
2445
2446	psum += sum;
2447
2448	while ((psum >> 16) != 0) {
2449		psum = (psum & 0xffff) + (psum >> 16);
2450	}
2451
2452	return (psum);
2453}
2454
2455#else	/* __lint */
2456
2457#if defined(__amd64)
2458
2459	ENTRY(ip_ocsum)
2460	pushq	%rbp
2461	movq	%rsp, %rbp
2462#ifdef DEBUG
2463	movq	postbootkernelbase(%rip), %rax
2464	cmpq	%rax, %rdi
2465	jnb	1f
2466	xorl	%eax, %eax
2467	movq	%rdi, %rsi
2468	leaq	.ip_ocsum_panic_msg(%rip), %rdi
2469	call	panic
2470	/*NOTREACHED*/
2471.ip_ocsum_panic_msg:
2472	.string	"ip_ocsum: address 0x%p below kernelbase\n"
24731:
2474#endif
2475	movl	%esi, %ecx	/* halfword_count */
2476	movq	%rdi, %rsi	/* address */
2477				/* partial sum in %edx */
2478	xorl	%eax, %eax
2479	testl	%ecx, %ecx
2480	jz	.ip_ocsum_done
2481	testq	$3, %rsi
2482	jnz	.ip_csum_notaligned
2483.ip_csum_aligned:	/* XX64 opportunities for 8-byte operations? */
2484.next_iter:
2485	/* XX64 opportunities for prefetch? */
2486	/* XX64 compute csum with 64 bit quantities? */
2487	subl	$32, %ecx
2488	jl	.less_than_32
2489
2490	addl	0(%rsi), %edx
2491.only60:
2492	adcl	4(%rsi), %eax
2493.only56:
2494	adcl	8(%rsi), %edx
2495.only52:
2496	adcl	12(%rsi), %eax
2497.only48:
2498	adcl	16(%rsi), %edx
2499.only44:
2500	adcl	20(%rsi), %eax
2501.only40:
2502	adcl	24(%rsi), %edx
2503.only36:
2504	adcl	28(%rsi), %eax
2505.only32:
2506	adcl	32(%rsi), %edx
2507.only28:
2508	adcl	36(%rsi), %eax
2509.only24:
2510	adcl	40(%rsi), %edx
2511.only20:
2512	adcl	44(%rsi), %eax
2513.only16:
2514	adcl	48(%rsi), %edx
2515.only12:
2516	adcl	52(%rsi), %eax
2517.only8:
2518	adcl	56(%rsi), %edx
2519.only4:
2520	adcl	60(%rsi), %eax	/* could be adding -1 and -1 with a carry */
2521.only0:
2522	adcl	$0, %eax	/* could be adding -1 in eax with a carry */
2523	adcl	$0, %eax
2524
2525	addq	$64, %rsi
2526	testl	%ecx, %ecx
2527	jnz	.next_iter
2528
2529.ip_ocsum_done:
2530	addl	%eax, %edx
2531	adcl	$0, %edx
2532	movl	%edx, %eax	/* form a 16 bit checksum by */
2533	shrl	$16, %eax	/* adding two halves of 32 bit checksum */
2534	addw	%dx, %ax
2535	adcw	$0, %ax
2536	andl	$0xffff, %eax
2537	leave
2538	ret
2539
2540.ip_csum_notaligned:
2541	xorl	%edi, %edi
2542	movw	(%rsi), %di
2543	addl	%edi, %edx
2544	adcl	$0, %edx
2545	addq	$2, %rsi
2546	decl	%ecx
2547	jmp	.ip_csum_aligned
2548
2549.less_than_32:
2550	addl	$32, %ecx
2551	testl	$1, %ecx
2552	jz	.size_aligned
2553	andl	$0xfe, %ecx
2554	movzwl	(%rsi, %rcx, 2), %edi
2555	addl	%edi, %edx
2556	adcl	$0, %edx
2557.size_aligned:
2558	movl	%ecx, %edi
2559	shrl	$1, %ecx
2560	shl	$1, %edi
2561	subq	$64, %rdi
2562	addq	%rdi, %rsi
2563	leaq    .ip_ocsum_jmptbl(%rip), %rdi
2564	leaq	(%rdi, %rcx, 8), %rdi
2565	xorl	%ecx, %ecx
2566	clc
2567	jmp 	*(%rdi)
2568
2569	.align	8
2570.ip_ocsum_jmptbl:
2571	.quad	.only0, .only4, .only8, .only12, .only16, .only20
2572	.quad	.only24, .only28, .only32, .only36, .only40, .only44
2573	.quad	.only48, .only52, .only56, .only60
2574	SET_SIZE(ip_ocsum)
2575
2576#elif defined(__i386)
2577
2578	ENTRY(ip_ocsum)
2579	pushl	%ebp
2580	movl	%esp, %ebp
2581	pushl	%ebx
2582	pushl	%esi
2583	pushl	%edi
2584	movl	12(%ebp), %ecx	/* count of half words */
2585	movl	16(%ebp), %edx	/* partial checksum */
2586	movl	8(%ebp), %esi
2587	xorl	%eax, %eax
2588	testl	%ecx, %ecx
2589	jz	.ip_ocsum_done
2590
2591	testl	$3, %esi
2592	jnz	.ip_csum_notaligned
2593.ip_csum_aligned:
2594.next_iter:
2595	subl	$32, %ecx
2596	jl	.less_than_32
2597
2598	addl	0(%esi), %edx
2599.only60:
2600	adcl	4(%esi), %eax
2601.only56:
2602	adcl	8(%esi), %edx
2603.only52:
2604	adcl	12(%esi), %eax
2605.only48:
2606	adcl	16(%esi), %edx
2607.only44:
2608	adcl	20(%esi), %eax
2609.only40:
2610	adcl	24(%esi), %edx
2611.only36:
2612	adcl	28(%esi), %eax
2613.only32:
2614	adcl	32(%esi), %edx
2615.only28:
2616	adcl	36(%esi), %eax
2617.only24:
2618	adcl	40(%esi), %edx
2619.only20:
2620	adcl	44(%esi), %eax
2621.only16:
2622	adcl	48(%esi), %edx
2623.only12:
2624	adcl	52(%esi), %eax
2625.only8:
2626	adcl	56(%esi), %edx
2627.only4:
2628	adcl	60(%esi), %eax	/* We could be adding -1 and -1 with a carry */
2629.only0:
2630	adcl	$0, %eax	/* we could be adding -1 in eax with a carry */
2631	adcl	$0, %eax
2632
2633	addl	$64, %esi
2634	andl	%ecx, %ecx
2635	jnz	.next_iter
2636
2637.ip_ocsum_done:
2638	addl	%eax, %edx
2639	adcl	$0, %edx
2640	movl	%edx, %eax	/* form a 16 bit checksum by */
2641	shrl	$16, %eax	/* adding two halves of 32 bit checksum */
2642	addw	%dx, %ax
2643	adcw	$0, %ax
2644	andl	$0xffff, %eax
2645	popl	%edi		/* restore registers */
2646	popl	%esi
2647	popl	%ebx
2648	leave
2649	ret
2650
2651.ip_csum_notaligned:
2652	xorl	%edi, %edi
2653	movw	(%esi), %di
2654	addl	%edi, %edx
2655	adcl	$0, %edx
2656	addl	$2, %esi
2657	decl	%ecx
2658	jmp	.ip_csum_aligned
2659
2660.less_than_32:
2661	addl	$32, %ecx
2662	testl	$1, %ecx
2663	jz	.size_aligned
2664	andl	$0xfe, %ecx
2665	movzwl	(%esi, %ecx, 2), %edi
2666	addl	%edi, %edx
2667	adcl	$0, %edx
2668.size_aligned:
2669	movl	%ecx, %edi
2670	shrl	$1, %ecx
2671	shl	$1, %edi
2672	subl	$64, %edi
2673	addl	%edi, %esi
2674	movl	$.ip_ocsum_jmptbl, %edi
2675	lea	(%edi, %ecx, 4), %edi
2676	xorl	%ecx, %ecx
2677	clc
2678	jmp 	*(%edi)
2679	SET_SIZE(ip_ocsum)
2680
2681	.data
2682	.align	4
2683
2684.ip_ocsum_jmptbl:
2685	.long	.only0, .only4, .only8, .only12, .only16, .only20
2686	.long	.only24, .only28, .only32, .only36, .only40, .only44
2687	.long	.only48, .only52, .only56, .only60
2688
2689
2690#endif	/* __i386 */
2691#endif	/* __lint */
2692
2693/*
2694 * multiply two long numbers and yield a u_longlong_t result, callable from C.
2695 * Provided to manipulate hrtime_t values.
2696 */
2697#if defined(__lint)
2698
2699/* result = a * b; */
2700
2701/* ARGSUSED */
2702unsigned long long
2703mul32(uint_t a, uint_t b)
2704{ return (0); }
2705
2706#else	/* __lint */
2707
2708#if defined(__amd64)
2709
2710	ENTRY(mul32)
2711	xorl	%edx, %edx	/* XX64 joe, paranoia? */
2712	movl	%edi, %eax
2713	mull	%esi
2714	shlq	$32, %rdx
2715	orq	%rdx, %rax
2716	ret
2717	SET_SIZE(mul32)
2718
2719#elif defined(__i386)
2720
2721	ENTRY(mul32)
2722	movl	8(%esp), %eax
2723	movl	4(%esp), %ecx
2724	mull	%ecx
2725	ret
2726	SET_SIZE(mul32)
2727
2728#endif	/* __i386 */
2729#endif	/* __lint */
2730
2731#if defined(notused)
2732#if defined(__lint)
2733/* ARGSUSED */
2734void
2735load_pte64(uint64_t *pte, uint64_t pte_value)
2736{}
2737#else	/* __lint */
2738	.globl load_pte64
2739load_pte64:
2740	movl	4(%esp), %eax
2741	movl	8(%esp), %ecx
2742	movl	12(%esp), %edx
2743	movl	%edx, 4(%eax)
2744	movl	%ecx, (%eax)
2745	ret
2746#endif	/* __lint */
2747#endif	/* notused */
2748
2749#if defined(__lint)
2750
2751/*ARGSUSED*/
2752void
2753scan_memory(caddr_t addr, size_t size)
2754{}
2755
2756#else	/* __lint */
2757
2758#if defined(__amd64)
2759
2760	ENTRY(scan_memory)
2761	shrq	$3, %rsi	/* convert %rsi from byte to quadword count */
2762	jz	.scanm_done
2763	movq	%rsi, %rcx	/* move count into rep control register */
2764	movq	%rdi, %rsi	/* move addr into lodsq control reg. */
2765	rep lodsq		/* scan the memory range */
2766.scanm_done:
2767	rep;	ret	/* use 2 byte return instruction when branch target */
2768			/* AMD Software Optimization Guide - Section 6.2 */
2769	SET_SIZE(scan_memory)
2770
2771#elif defined(__i386)
2772
2773	ENTRY(scan_memory)
2774	pushl	%ecx
2775	pushl	%esi
2776	movl	16(%esp), %ecx	/* move 2nd arg into rep control register */
2777	shrl	$2, %ecx	/* convert from byte count to word count */
2778	jz	.scanm_done
2779	movl	12(%esp), %esi	/* move 1st arg into lodsw control register */
2780	.byte	0xf3		/* rep prefix.  lame assembler.  sigh. */
2781	lodsl
2782.scanm_done:
2783	popl	%esi
2784	popl	%ecx
2785	ret
2786	SET_SIZE(scan_memory)
2787
2788#endif	/* __i386 */
2789#endif	/* __lint */
2790
2791
2792#if defined(__lint)
2793
2794/*ARGSUSED */
2795int
2796lowbit(ulong_t i)
2797{ return (0); }
2798
2799#else	/* __lint */
2800
2801#if defined(__amd64)
2802
2803	ENTRY(lowbit)
2804	movl	$-1, %eax
2805	bsfq	%rdi, %rdi
2806	cmovnz	%edi, %eax
2807	incl	%eax
2808	ret
2809	SET_SIZE(lowbit)
2810
2811#elif defined(__i386)
2812
2813	ENTRY(lowbit)
2814	bsfl	4(%esp), %eax
2815	jz	0f
2816	incl	%eax
2817	ret
28180:
2819	xorl	%eax, %eax
2820	ret
2821	SET_SIZE(lowbit)
2822
2823#endif	/* __i386 */
2824#endif	/* __lint */
2825
2826#if defined(__lint)
2827
2828/*ARGSUSED*/
2829int
2830highbit(ulong_t i)
2831{ return (0); }
2832
2833/*ARGSUSED*/
2834int
2835highbit64(uint64_t i)
2836{ return (0); }
2837
2838#else	/* __lint */
2839
2840#if defined(__amd64)
2841
2842	ENTRY(highbit)
2843	ALTENTRY(highbit64)
2844	movl	$-1, %eax
2845	bsrq	%rdi, %rdi
2846	cmovnz	%edi, %eax
2847	incl	%eax
2848	ret
2849	SET_SIZE(highbit64)
2850	SET_SIZE(highbit)
2851
2852#elif defined(__i386)
2853
2854	ENTRY(highbit)
2855	bsrl	4(%esp), %eax
2856	jz	0f
2857	incl	%eax
2858	ret
28590:
2860	xorl	%eax, %eax
2861	ret
2862	SET_SIZE(highbit)
2863
2864	ENTRY(highbit64)
2865	bsrl	8(%esp), %eax
2866	jz	highbit
2867	addl	$33, %eax
2868	ret
2869	SET_SIZE(highbit64)
2870
2871#endif	/* __i386 */
2872#endif	/* __lint */
2873
2874#if defined(__lint)
2875
2876/*ARGSUSED*/
2877uint64_t
2878rdmsr(uint_t r)
2879{ return (0); }
2880
2881/*ARGSUSED*/
2882void
2883wrmsr(uint_t r, const uint64_t val)
2884{}
2885
2886/*ARGSUSED*/
2887uint64_t
2888xrdmsr(uint_t r)
2889{ return (0); }
2890
2891/*ARGSUSED*/
2892void
2893xwrmsr(uint_t r, const uint64_t val)
2894{}
2895
2896void
2897invalidate_cache(void)
2898{}
2899
2900/*ARGSUSED*/
2901uint64_t
2902get_xcr(uint_t r)
2903{ return (0); }
2904
2905/*ARGSUSED*/
2906void
2907set_xcr(uint_t r, const uint64_t val)
2908{}
2909
2910#else  /* __lint */
2911
2912#define	XMSR_ACCESS_VAL		$0x9c5a203a
2913
2914#if defined(__amd64)
2915
2916	ENTRY(rdmsr)
2917	movl	%edi, %ecx
2918	rdmsr
2919	shlq	$32, %rdx
2920	orq	%rdx, %rax
2921	ret
2922	SET_SIZE(rdmsr)
2923
2924	ENTRY(wrmsr)
2925	movq	%rsi, %rdx
2926	shrq	$32, %rdx
2927	movl	%esi, %eax
2928	movl	%edi, %ecx
2929	wrmsr
2930	ret
2931	SET_SIZE(wrmsr)
2932
2933	ENTRY(xrdmsr)
2934	pushq	%rbp
2935	movq	%rsp, %rbp
2936	movl	%edi, %ecx
2937	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2938	rdmsr
2939	shlq	$32, %rdx
2940	orq	%rdx, %rax
2941	leave
2942	ret
2943	SET_SIZE(xrdmsr)
2944
2945	ENTRY(xwrmsr)
2946	pushq	%rbp
2947	movq	%rsp, %rbp
2948	movl	%edi, %ecx
2949	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2950	movq	%rsi, %rdx
2951	shrq	$32, %rdx
2952	movl	%esi, %eax
2953	wrmsr
2954	leave
2955	ret
2956	SET_SIZE(xwrmsr)
2957
2958	ENTRY(get_xcr)
2959	movl	%edi, %ecx
2960	#xgetbv
2961	.byte	0x0f,0x01,0xd0
2962	shlq	$32, %rdx
2963	orq	%rdx, %rax
2964	ret
2965	SET_SIZE(get_xcr)
2966
2967	ENTRY(set_xcr)
2968	movq	%rsi, %rdx
2969	shrq	$32, %rdx
2970	movl	%esi, %eax
2971	movl	%edi, %ecx
2972	#xsetbv
2973	.byte	0x0f,0x01,0xd1
2974	ret
2975	SET_SIZE(set_xcr)
2976
2977#elif defined(__i386)
2978
2979	ENTRY(rdmsr)
2980	movl	4(%esp), %ecx
2981	rdmsr
2982	ret
2983	SET_SIZE(rdmsr)
2984
2985	ENTRY(wrmsr)
2986	movl	4(%esp), %ecx
2987	movl	8(%esp), %eax
2988	movl	12(%esp), %edx
2989	wrmsr
2990	ret
2991	SET_SIZE(wrmsr)
2992
2993	ENTRY(xrdmsr)
2994	pushl	%ebp
2995	movl	%esp, %ebp
2996	movl	8(%esp), %ecx
2997	pushl	%edi
2998	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2999	rdmsr
3000	popl	%edi
3001	leave
3002	ret
3003	SET_SIZE(xrdmsr)
3004
3005	ENTRY(xwrmsr)
3006	pushl	%ebp
3007	movl	%esp, %ebp
3008	movl	8(%esp), %ecx
3009	movl	12(%esp), %eax
3010	movl	16(%esp), %edx
3011	pushl	%edi
3012	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
3013	wrmsr
3014	popl	%edi
3015	leave
3016	ret
3017	SET_SIZE(xwrmsr)
3018
3019	ENTRY(get_xcr)
3020	movl	4(%esp), %ecx
3021	#xgetbv
3022	.byte	0x0f,0x01,0xd0
3023	ret
3024	SET_SIZE(get_xcr)
3025
3026	ENTRY(set_xcr)
3027	movl	4(%esp), %ecx
3028	movl	8(%esp), %eax
3029	movl	12(%esp), %edx
3030	#xsetbv
3031	.byte	0x0f,0x01,0xd1
3032	ret
3033	SET_SIZE(set_xcr)
3034
3035#endif	/* __i386 */
3036
3037	ENTRY(invalidate_cache)
3038	wbinvd
3039	ret
3040	SET_SIZE(invalidate_cache)
3041
3042#endif	/* __lint */
3043
3044#if defined(__lint)
3045
3046/*ARGSUSED*/
3047void
3048getcregs(struct cregs *crp)
3049{}
3050
3051#else	/* __lint */
3052
3053#if defined(__amd64)
3054
3055	ENTRY_NP(getcregs)
3056#if defined(__xpv)
3057	/*
3058	 * Only a few of the hardware control registers or descriptor tables
3059	 * are directly accessible to us, so just zero the structure.
3060	 *
3061	 * XXPV	Perhaps it would be helpful for the hypervisor to return
3062	 *	virtualized versions of these for post-mortem use.
3063	 *	(Need to reevaluate - perhaps it already does!)
3064	 */
3065	pushq	%rdi		/* save *crp */
3066	movq	$CREGSZ, %rsi
3067	call	bzero
3068	popq	%rdi
3069
3070	/*
3071	 * Dump what limited information we can
3072	 */
3073	movq	%cr0, %rax
3074	movq	%rax, CREG_CR0(%rdi)	/* cr0 */
3075	movq	%cr2, %rax
3076	movq	%rax, CREG_CR2(%rdi)	/* cr2 */
3077	movq	%cr3, %rax
3078	movq	%rax, CREG_CR3(%rdi)	/* cr3 */
3079	movq	%cr4, %rax
3080	movq	%rax, CREG_CR4(%rdi)	/* cr4 */
3081
3082#else	/* __xpv */
3083
3084#define	GETMSR(r, off, d)	\
3085	movl	$r, %ecx;	\
3086	rdmsr;			\
3087	movl	%eax, off(d);	\
3088	movl	%edx, off+4(d)
3089
3090	xorl	%eax, %eax
3091	movq	%rax, CREG_GDT+8(%rdi)
3092	sgdt	CREG_GDT(%rdi)		/* 10 bytes */
3093	movq	%rax, CREG_IDT+8(%rdi)
3094	sidt	CREG_IDT(%rdi)		/* 10 bytes */
3095	movq	%rax, CREG_LDT(%rdi)
3096	sldt	CREG_LDT(%rdi)		/* 2 bytes */
3097	movq	%rax, CREG_TASKR(%rdi)
3098	str	CREG_TASKR(%rdi)	/* 2 bytes */
3099	movq	%cr0, %rax
3100	movq	%rax, CREG_CR0(%rdi)	/* cr0 */
3101	movq	%cr2, %rax
3102	movq	%rax, CREG_CR2(%rdi)	/* cr2 */
3103	movq	%cr3, %rax
3104	movq	%rax, CREG_CR3(%rdi)	/* cr3 */
3105	movq	%cr4, %rax
3106	movq	%rax, CREG_CR4(%rdi)	/* cr4 */
3107	movq	%cr8, %rax
3108	movq	%rax, CREG_CR8(%rdi)	/* cr8 */
3109	GETMSR(MSR_AMD_KGSBASE, CREG_KGSBASE, %rdi)
3110	GETMSR(MSR_AMD_EFER, CREG_EFER, %rdi)
3111#endif	/* __xpv */
3112	ret
3113	SET_SIZE(getcregs)
3114
3115#undef GETMSR
3116
3117#elif defined(__i386)
3118
3119	ENTRY_NP(getcregs)
3120#if defined(__xpv)
3121	/*
3122	 * Only a few of the hardware control registers or descriptor tables
3123	 * are directly accessible to us, so just zero the structure.
3124	 *
3125	 * XXPV	Perhaps it would be helpful for the hypervisor to return
3126	 *	virtualized versions of these for post-mortem use.
3127	 *	(Need to reevaluate - perhaps it already does!)
3128	 */
3129	movl	4(%esp), %edx
3130	pushl	$CREGSZ
3131	pushl	%edx
3132	call	bzero
3133	addl	$8, %esp
3134	movl	4(%esp), %edx
3135
3136	/*
3137	 * Dump what limited information we can
3138	 */
3139	movl	%cr0, %eax
3140	movl	%eax, CREG_CR0(%edx)	/* cr0 */
3141	movl	%cr2, %eax
3142	movl	%eax, CREG_CR2(%edx)	/* cr2 */
3143	movl	%cr3, %eax
3144	movl	%eax, CREG_CR3(%edx)	/* cr3 */
3145	movl	%cr4, %eax
3146	movl	%eax, CREG_CR4(%edx)	/* cr4 */
3147
3148#else	/* __xpv */
3149
3150	movl	4(%esp), %edx
3151	movw	$0, CREG_GDT+6(%edx)
3152	movw	$0, CREG_IDT+6(%edx)
3153	sgdt	CREG_GDT(%edx)		/* gdt */
3154	sidt	CREG_IDT(%edx)		/* idt */
3155	sldt	CREG_LDT(%edx)		/* ldt */
3156	str	CREG_TASKR(%edx)	/* task */
3157	movl	%cr0, %eax
3158	movl	%eax, CREG_CR0(%edx)	/* cr0 */
3159	movl	%cr2, %eax
3160	movl	%eax, CREG_CR2(%edx)	/* cr2 */
3161	movl	%cr3, %eax
3162	movl	%eax, CREG_CR3(%edx)	/* cr3 */
3163	bt	$X86FSET_LARGEPAGE, x86_featureset
3164	jnc	.nocr4
3165	movl	%cr4, %eax
3166	movl	%eax, CREG_CR4(%edx)	/* cr4 */
3167	jmp	.skip
3168.nocr4:
3169	movl	$0, CREG_CR4(%edx)
3170.skip:
3171#endif
3172	ret
3173	SET_SIZE(getcregs)
3174
3175#endif	/* __i386 */
3176#endif	/* __lint */
3177
3178
3179/*
3180 * A panic trigger is a word which is updated atomically and can only be set
3181 * once.  We atomically store 0xDEFACEDD and load the old value.  If the
3182 * previous value was 0, we succeed and return 1; otherwise return 0.
3183 * This allows a partially corrupt trigger to still trigger correctly.  DTrace
3184 * has its own version of this function to allow it to panic correctly from
3185 * probe context.
3186 */
3187#if defined(__lint)
3188
3189/*ARGSUSED*/
3190int
3191panic_trigger(int *tp)
3192{ return (0); }
3193
3194/*ARGSUSED*/
3195int
3196dtrace_panic_trigger(int *tp)
3197{ return (0); }
3198
3199#else	/* __lint */
3200
3201#if defined(__amd64)
3202
3203	ENTRY_NP(panic_trigger)
3204	xorl	%eax, %eax
3205	movl	$0xdefacedd, %edx
3206	lock
3207	  xchgl	%edx, (%rdi)
3208	cmpl	$0, %edx
3209	je	0f
3210	movl	$0, %eax
3211	ret
32120:	movl	$1, %eax
3213	ret
3214	SET_SIZE(panic_trigger)
3215
3216	ENTRY_NP(dtrace_panic_trigger)
3217	xorl	%eax, %eax
3218	movl	$0xdefacedd, %edx
3219	lock
3220	  xchgl	%edx, (%rdi)
3221	cmpl	$0, %edx
3222	je	0f
3223	movl	$0, %eax
3224	ret
32250:	movl	$1, %eax
3226	ret
3227	SET_SIZE(dtrace_panic_trigger)
3228
3229#elif defined(__i386)
3230
3231	ENTRY_NP(panic_trigger)
3232	movl	4(%esp), %edx		/ %edx = address of trigger
3233	movl	$0xdefacedd, %eax	/ %eax = 0xdefacedd
3234	lock				/ assert lock
3235	xchgl %eax, (%edx)		/ exchange %eax and the trigger
3236	cmpl	$0, %eax		/ if (%eax == 0x0)
3237	je	0f			/   return (1);
3238	movl	$0, %eax		/ else
3239	ret				/   return (0);
32400:	movl	$1, %eax
3241	ret
3242	SET_SIZE(panic_trigger)
3243
3244	ENTRY_NP(dtrace_panic_trigger)
3245	movl	4(%esp), %edx		/ %edx = address of trigger
3246	movl	$0xdefacedd, %eax	/ %eax = 0xdefacedd
3247	lock				/ assert lock
3248	xchgl %eax, (%edx)		/ exchange %eax and the trigger
3249	cmpl	$0, %eax		/ if (%eax == 0x0)
3250	je	0f			/   return (1);
3251	movl	$0, %eax		/ else
3252	ret				/   return (0);
32530:	movl	$1, %eax
3254	ret
3255	SET_SIZE(dtrace_panic_trigger)
3256
3257#endif	/* __i386 */
3258#endif	/* __lint */
3259
3260/*
3261 * The panic() and cmn_err() functions invoke vpanic() as a common entry point
3262 * into the panic code implemented in panicsys().  vpanic() is responsible
3263 * for passing through the format string and arguments, and constructing a
3264 * regs structure on the stack into which it saves the current register
3265 * values.  If we are not dying due to a fatal trap, these registers will
3266 * then be preserved in panicbuf as the current processor state.  Before
3267 * invoking panicsys(), vpanic() activates the first panic trigger (see
3268 * common/os/panic.c) and switches to the panic_stack if successful.  Note that
3269 * DTrace takes a slightly different panic path if it must panic from probe
3270 * context.  Instead of calling panic, it calls into dtrace_vpanic(), which
3271 * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
3272 * branches back into vpanic().
3273 */
3274#if defined(__lint)
3275
3276/*ARGSUSED*/
3277void
3278vpanic(const char *format, va_list alist)
3279{}
3280
3281/*ARGSUSED*/
3282void
3283dtrace_vpanic(const char *format, va_list alist)
3284{}
3285
3286#else	/* __lint */
3287
3288#if defined(__amd64)
3289
3290	ENTRY_NP(vpanic)			/* Initial stack layout: */
3291
3292	pushq	%rbp				/* | %rip | 	0x60	*/
3293	movq	%rsp, %rbp			/* | %rbp |	0x58	*/
3294	pushfq					/* | rfl  |	0x50	*/
3295	pushq	%r11				/* | %r11 |	0x48	*/
3296	pushq	%r10				/* | %r10 |	0x40	*/
3297	pushq	%rbx				/* | %rbx |	0x38	*/
3298	pushq	%rax				/* | %rax |	0x30	*/
3299	pushq	%r9				/* | %r9  |	0x28	*/
3300	pushq	%r8				/* | %r8  |	0x20	*/
3301	pushq	%rcx				/* | %rcx |	0x18	*/
3302	pushq	%rdx				/* | %rdx |	0x10	*/
3303	pushq	%rsi				/* | %rsi |	0x8 alist */
3304	pushq	%rdi				/* | %rdi |	0x0 format */
3305
3306	movq	%rsp, %rbx			/* %rbx = current %rsp */
3307
3308	leaq	panic_quiesce(%rip), %rdi	/* %rdi = &panic_quiesce */
3309	call	panic_trigger			/* %eax = panic_trigger() */
3310
3311vpanic_common:
3312	/*
3313	 * The panic_trigger result is in %eax from the call above, and
3314	 * dtrace_panic places it in %eax before branching here.
3315	 * The rdmsr instructions that follow below will clobber %eax so
3316	 * we stash the panic_trigger result in %r11d.
3317	 */
3318	movl	%eax, %r11d
3319	cmpl	$0, %r11d
3320	je	0f
3321
3322	/*
3323	 * If panic_trigger() was successful, we are the first to initiate a
3324	 * panic: we now switch to the reserved panic_stack before continuing.
3325	 */
3326	leaq	panic_stack(%rip), %rsp
3327	addq	$PANICSTKSIZE, %rsp
33280:	subq	$REGSIZE, %rsp
3329	/*
3330	 * Now that we've got everything set up, store the register values as
3331	 * they were when we entered vpanic() to the designated location in
3332	 * the regs structure we allocated on the stack.
3333	 */
3334	movq	0x0(%rbx), %rcx
3335	movq	%rcx, REGOFF_RDI(%rsp)
3336	movq	0x8(%rbx), %rcx
3337	movq	%rcx, REGOFF_RSI(%rsp)
3338	movq	0x10(%rbx), %rcx
3339	movq	%rcx, REGOFF_RDX(%rsp)
3340	movq	0x18(%rbx), %rcx
3341	movq	%rcx, REGOFF_RCX(%rsp)
3342	movq	0x20(%rbx), %rcx
3343
3344	movq	%rcx, REGOFF_R8(%rsp)
3345	movq	0x28(%rbx), %rcx
3346	movq	%rcx, REGOFF_R9(%rsp)
3347	movq	0x30(%rbx), %rcx
3348	movq	%rcx, REGOFF_RAX(%rsp)
3349	movq	0x38(%rbx), %rcx
3350	movq	%rcx, REGOFF_RBX(%rsp)
3351	movq	0x58(%rbx), %rcx
3352
3353	movq	%rcx, REGOFF_RBP(%rsp)
3354	movq	0x40(%rbx), %rcx
3355	movq	%rcx, REGOFF_R10(%rsp)
3356	movq	0x48(%rbx), %rcx
3357	movq	%rcx, REGOFF_R11(%rsp)
3358	movq	%r12, REGOFF_R12(%rsp)
3359
3360	movq	%r13, REGOFF_R13(%rsp)
3361	movq	%r14, REGOFF_R14(%rsp)
3362	movq	%r15, REGOFF_R15(%rsp)
3363
3364	xorl	%ecx, %ecx
3365	movw	%ds, %cx
3366	movq	%rcx, REGOFF_DS(%rsp)
3367	movw	%es, %cx
3368	movq	%rcx, REGOFF_ES(%rsp)
3369	movw	%fs, %cx
3370	movq	%rcx, REGOFF_FS(%rsp)
3371	movw	%gs, %cx
3372	movq	%rcx, REGOFF_GS(%rsp)
3373
3374	movq	$0, REGOFF_TRAPNO(%rsp)
3375
3376	movq	$0, REGOFF_ERR(%rsp)
3377	leaq	vpanic(%rip), %rcx
3378	movq	%rcx, REGOFF_RIP(%rsp)
3379	movw	%cs, %cx
3380	movzwq	%cx, %rcx
3381	movq	%rcx, REGOFF_CS(%rsp)
3382	movq	0x50(%rbx), %rcx
3383	movq	%rcx, REGOFF_RFL(%rsp)
3384	movq	%rbx, %rcx
3385	addq	$0x60, %rcx
3386	movq	%rcx, REGOFF_RSP(%rsp)
3387	movw	%ss, %cx
3388	movzwq	%cx, %rcx
3389	movq	%rcx, REGOFF_SS(%rsp)
3390
3391	/*
3392	 * panicsys(format, alist, rp, on_panic_stack)
3393	 */
3394	movq	REGOFF_RDI(%rsp), %rdi		/* format */
3395	movq	REGOFF_RSI(%rsp), %rsi		/* alist */
3396	movq	%rsp, %rdx			/* struct regs */
3397	movl	%r11d, %ecx			/* on_panic_stack */
3398	call	panicsys
3399	addq	$REGSIZE, %rsp
3400	popq	%rdi
3401	popq	%rsi
3402	popq	%rdx
3403	popq	%rcx
3404	popq	%r8
3405	popq	%r9
3406	popq	%rax
3407	popq	%rbx
3408	popq	%r10
3409	popq	%r11
3410	popfq
3411	leave
3412	ret
3413	SET_SIZE(vpanic)
3414
3415	ENTRY_NP(dtrace_vpanic)			/* Initial stack layout: */
3416
3417	pushq	%rbp				/* | %rip | 	0x60	*/
3418	movq	%rsp, %rbp			/* | %rbp |	0x58	*/
3419	pushfq					/* | rfl  |	0x50	*/
3420	pushq	%r11				/* | %r11 |	0x48	*/
3421	pushq	%r10				/* | %r10 |	0x40	*/
3422	pushq	%rbx				/* | %rbx |	0x38	*/
3423	pushq	%rax				/* | %rax |	0x30	*/
3424	pushq	%r9				/* | %r9  |	0x28	*/
3425	pushq	%r8				/* | %r8  |	0x20	*/
3426	pushq	%rcx				/* | %rcx |	0x18	*/
3427	pushq	%rdx				/* | %rdx |	0x10	*/
3428	pushq	%rsi				/* | %rsi |	0x8 alist */
3429	pushq	%rdi				/* | %rdi |	0x0 format */
3430
3431	movq	%rsp, %rbx			/* %rbx = current %rsp */
3432
3433	leaq	panic_quiesce(%rip), %rdi	/* %rdi = &panic_quiesce */
3434	call	dtrace_panic_trigger	/* %eax = dtrace_panic_trigger() */
3435	jmp	vpanic_common
3436
3437	SET_SIZE(dtrace_vpanic)
3438
3439#elif defined(__i386)
3440
3441	ENTRY_NP(vpanic)			/ Initial stack layout:
3442
3443	pushl	%ebp				/ | %eip | 20
3444	movl	%esp, %ebp			/ | %ebp | 16
3445	pushl	%eax				/ | %eax | 12
3446	pushl	%ebx				/ | %ebx |  8
3447	pushl	%ecx				/ | %ecx |  4
3448	pushl	%edx				/ | %edx |  0
3449
3450	movl	%esp, %ebx			/ %ebx = current stack pointer
3451
3452	lea	panic_quiesce, %eax		/ %eax = &panic_quiesce
3453	pushl	%eax				/ push &panic_quiesce
3454	call	panic_trigger			/ %eax = panic_trigger()
3455	addl	$4, %esp			/ reset stack pointer
3456
3457vpanic_common:
3458	cmpl	$0, %eax			/ if (%eax == 0)
3459	je	0f				/   goto 0f;
3460
3461	/*
3462	 * If panic_trigger() was successful, we are the first to initiate a
3463	 * panic: we now switch to the reserved panic_stack before continuing.
3464	 */
3465	lea	panic_stack, %esp		/ %esp  = panic_stack
3466	addl	$PANICSTKSIZE, %esp		/ %esp += PANICSTKSIZE
3467
34680:	subl	$REGSIZE, %esp			/ allocate struct regs
3469
3470	/*
3471	 * Now that we've got everything set up, store the register values as
3472	 * they were when we entered vpanic() to the designated location in
3473	 * the regs structure we allocated on the stack.
3474	 */
3475#if !defined(__GNUC_AS__)
3476	movw	%gs, %edx
3477	movl	%edx, REGOFF_GS(%esp)
3478	movw	%fs, %edx
3479	movl	%edx, REGOFF_FS(%esp)
3480	movw	%es, %edx
3481	movl	%edx, REGOFF_ES(%esp)
3482	movw	%ds, %edx
3483	movl	%edx, REGOFF_DS(%esp)
3484#else	/* __GNUC_AS__ */
3485	mov	%gs, %edx
3486	mov	%edx, REGOFF_GS(%esp)
3487	mov	%fs, %edx
3488	mov	%edx, REGOFF_FS(%esp)
3489	mov	%es, %edx
3490	mov	%edx, REGOFF_ES(%esp)
3491	mov	%ds, %edx
3492	mov	%edx, REGOFF_DS(%esp)
3493#endif	/* __GNUC_AS__ */
3494	movl	%edi, REGOFF_EDI(%esp)
3495	movl	%esi, REGOFF_ESI(%esp)
3496	movl	16(%ebx), %ecx
3497	movl	%ecx, REGOFF_EBP(%esp)
3498	movl	%ebx, %ecx
3499	addl	$20, %ecx
3500	movl	%ecx, REGOFF_ESP(%esp)
3501	movl	8(%ebx), %ecx
3502	movl	%ecx, REGOFF_EBX(%esp)
3503	movl	0(%ebx), %ecx
3504	movl	%ecx, REGOFF_EDX(%esp)
3505	movl	4(%ebx), %ecx
3506	movl	%ecx, REGOFF_ECX(%esp)
3507	movl	12(%ebx), %ecx
3508	movl	%ecx, REGOFF_EAX(%esp)
3509	movl	$0, REGOFF_TRAPNO(%esp)
3510	movl	$0, REGOFF_ERR(%esp)
3511	lea	vpanic, %ecx
3512	movl	%ecx, REGOFF_EIP(%esp)
3513#if !defined(__GNUC_AS__)
3514	movw	%cs, %edx
3515#else	/* __GNUC_AS__ */
3516	mov	%cs, %edx
3517#endif	/* __GNUC_AS__ */
3518	movl	%edx, REGOFF_CS(%esp)
3519	pushfl
3520	popl	%ecx
3521#if defined(__xpv)
3522	/*
3523	 * Synthesize the PS_IE bit from the event mask bit
3524	 */
3525	CURTHREAD(%edx)
3526	KPREEMPT_DISABLE(%edx)
3527	EVENT_MASK_TO_IE(%edx, %ecx)
3528	CURTHREAD(%edx)
3529	KPREEMPT_ENABLE_NOKP(%edx)
3530#endif
3531	movl	%ecx, REGOFF_EFL(%esp)
3532	movl	$0, REGOFF_UESP(%esp)
3533#if !defined(__GNUC_AS__)
3534	movw	%ss, %edx
3535#else	/* __GNUC_AS__ */
3536	mov	%ss, %edx
3537#endif	/* __GNUC_AS__ */
3538	movl	%edx, REGOFF_SS(%esp)
3539
3540	movl	%esp, %ecx			/ %ecx = &regs
3541	pushl	%eax				/ push on_panic_stack
3542	pushl	%ecx				/ push &regs
3543	movl	12(%ebp), %ecx			/ %ecx = alist
3544	pushl	%ecx				/ push alist
3545	movl	8(%ebp), %ecx			/ %ecx = format
3546	pushl	%ecx				/ push format
3547	call	panicsys			/ panicsys();
3548	addl	$16, %esp			/ pop arguments
3549
3550	addl	$REGSIZE, %esp
3551	popl	%edx
3552	popl	%ecx
3553	popl	%ebx
3554	popl	%eax
3555	leave
3556	ret
3557	SET_SIZE(vpanic)
3558
3559	ENTRY_NP(dtrace_vpanic)			/ Initial stack layout:
3560
3561	pushl	%ebp				/ | %eip | 20
3562	movl	%esp, %ebp			/ | %ebp | 16
3563	pushl	%eax				/ | %eax | 12
3564	pushl	%ebx				/ | %ebx |  8
3565	pushl	%ecx				/ | %ecx |  4
3566	pushl	%edx				/ | %edx |  0
3567
3568	movl	%esp, %ebx			/ %ebx = current stack pointer
3569
3570	lea	panic_quiesce, %eax		/ %eax = &panic_quiesce
3571	pushl	%eax				/ push &panic_quiesce
3572	call	dtrace_panic_trigger		/ %eax = dtrace_panic_trigger()
3573	addl	$4, %esp			/ reset stack pointer
3574	jmp	vpanic_common			/ jump back to common code
3575
3576	SET_SIZE(dtrace_vpanic)
3577
3578#endif	/* __i386 */
3579#endif	/* __lint */
3580
3581#if defined(__lint)
3582
3583void
3584hres_tick(void)
3585{}
3586
3587int64_t timedelta;
3588hrtime_t hrtime_base;
3589
3590#else	/* __lint */
3591
3592	DGDEF3(timedelta, 8, 8)
3593	.long	0, 0
3594
3595	/*
3596	 * initialized to a non zero value to make pc_gethrtime()
3597	 * work correctly even before clock is initialized
3598	 */
3599	DGDEF3(hrtime_base, 8, 8)
3600	.long	_MUL(NSEC_PER_CLOCK_TICK, 6), 0
3601
3602	DGDEF3(adj_shift, 4, 4)
3603	.long	ADJ_SHIFT
3604
3605#if defined(__amd64)
3606
3607	ENTRY_NP(hres_tick)
3608	pushq	%rbp
3609	movq	%rsp, %rbp
3610
3611	/*
3612	 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3613	 * hres_last_tick can only be modified while holding CLOCK_LOCK).
3614	 * At worst, performing this now instead of under CLOCK_LOCK may
3615	 * introduce some jitter in pc_gethrestime().
3616	 */
3617	call	*gethrtimef(%rip)
3618	movq	%rax, %r8
3619
3620	leaq	hres_lock(%rip), %rax
3621	movb	$-1, %dl
3622.CL1:
3623	xchgb	%dl, (%rax)
3624	testb	%dl, %dl
3625	jz	.CL3			/* got it */
3626.CL2:
3627	cmpb	$0, (%rax)		/* possible to get lock? */
3628	pause
3629	jne	.CL2
3630	jmp	.CL1			/* yes, try again */
3631.CL3:
3632	/*
3633	 * compute the interval since last time hres_tick was called
3634	 * and adjust hrtime_base and hrestime accordingly
3635	 * hrtime_base is an 8 byte value (in nsec), hrestime is
3636	 * a timestruc_t (sec, nsec)
3637	 */
3638	leaq	hres_last_tick(%rip), %rax
3639	movq	%r8, %r11
3640	subq	(%rax), %r8
3641	addq	%r8, hrtime_base(%rip)	/* add interval to hrtime_base */
3642	addq	%r8, hrestime+8(%rip)	/* add interval to hrestime.tv_nsec */
3643	/*
3644	 * Now that we have CLOCK_LOCK, we can update hres_last_tick
3645	 */
3646	movq	%r11, (%rax)
3647
3648	call	__adj_hrestime
3649
3650	/*
3651	 * release the hres_lock
3652	 */
3653	incl	hres_lock(%rip)
3654	leave
3655	ret
3656	SET_SIZE(hres_tick)
3657
3658#elif defined(__i386)
3659
3660	ENTRY_NP(hres_tick)
3661	pushl	%ebp
3662	movl	%esp, %ebp
3663	pushl	%esi
3664	pushl	%ebx
3665
3666	/*
3667	 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3668	 * hres_last_tick can only be modified while holding CLOCK_LOCK).
3669	 * At worst, performing this now instead of under CLOCK_LOCK may
3670	 * introduce some jitter in pc_gethrestime().
3671	 */
3672	call	*gethrtimef
3673	movl	%eax, %ebx
3674	movl	%edx, %esi
3675
3676	movl	$hres_lock, %eax
3677	movl	$-1, %edx
3678.CL1:
3679	xchgb	%dl, (%eax)
3680	testb	%dl, %dl
3681	jz	.CL3			/ got it
3682.CL2:
3683	cmpb	$0, (%eax)		/ possible to get lock?
3684	pause
3685	jne	.CL2
3686	jmp	.CL1			/ yes, try again
3687.CL3:
3688	/*
3689	 * compute the interval since last time hres_tick was called
3690	 * and adjust hrtime_base and hrestime accordingly
3691	 * hrtime_base is an 8 byte value (in nsec), hrestime is
3692	 * timestruc_t (sec, nsec)
3693	 */
3694
3695	lea	hres_last_tick, %eax
3696
3697	movl	%ebx, %edx
3698	movl	%esi, %ecx
3699
3700	subl 	(%eax), %edx
3701	sbbl 	4(%eax), %ecx
3702
3703	addl	%edx, hrtime_base	/ add interval to hrtime_base
3704	adcl	%ecx, hrtime_base+4
3705
3706	addl 	%edx, hrestime+4	/ add interval to hrestime.tv_nsec
3707
3708	/
3709	/ Now that we have CLOCK_LOCK, we can update hres_last_tick.
3710	/
3711	movl	%ebx, (%eax)
3712	movl	%esi,  4(%eax)
3713
3714	/ get hrestime at this moment. used as base for pc_gethrestime
3715	/
3716	/ Apply adjustment, if any
3717	/
3718	/ #define HRES_ADJ	(NSEC_PER_CLOCK_TICK >> ADJ_SHIFT)
3719	/ (max_hres_adj)
3720	/
3721	/ void
3722	/ adj_hrestime()
3723	/ {
3724	/	long long adj;
3725	/
3726	/	if (hrestime_adj == 0)
3727	/		adj = 0;
3728	/	else if (hrestime_adj > 0) {
3729	/		if (hrestime_adj < HRES_ADJ)
3730	/			adj = hrestime_adj;
3731	/		else
3732	/			adj = HRES_ADJ;
3733	/	}
3734	/	else {
3735	/		if (hrestime_adj < -(HRES_ADJ))
3736	/			adj = -(HRES_ADJ);
3737	/		else
3738	/			adj = hrestime_adj;
3739	/	}
3740	/
3741	/	timedelta -= adj;
3742	/	hrestime_adj = timedelta;
3743	/	hrestime.tv_nsec += adj;
3744	/
3745	/	while (hrestime.tv_nsec >= NANOSEC) {
3746	/		one_sec++;
3747	/		hrestime.tv_sec++;
3748	/		hrestime.tv_nsec -= NANOSEC;
3749	/	}
3750	/ }
3751__adj_hrestime:
3752	movl	hrestime_adj, %esi	/ if (hrestime_adj == 0)
3753	movl	hrestime_adj+4, %edx
3754	andl	%esi, %esi
3755	jne	.CL4			/ no
3756	andl	%edx, %edx
3757	jne	.CL4			/ no
3758	subl	%ecx, %ecx		/ yes, adj = 0;
3759	subl	%edx, %edx
3760	jmp	.CL5
3761.CL4:
3762	subl	%ecx, %ecx
3763	subl	%eax, %eax
3764	subl	%esi, %ecx
3765	sbbl	%edx, %eax
3766	andl	%eax, %eax		/ if (hrestime_adj > 0)
3767	jge	.CL6
3768
3769	/ In the following comments, HRES_ADJ is used, while in the code
3770	/ max_hres_adj is used.
3771	/
3772	/ The test for "hrestime_adj < HRES_ADJ" is complicated because
3773	/ hrestime_adj is 64-bits, while HRES_ADJ is 32-bits.  We rely
3774	/ on the logical equivalence of:
3775	/
3776	/	!(hrestime_adj < HRES_ADJ)
3777	/
3778	/ and the two step sequence:
3779	/
3780	/	(HRES_ADJ - lsw(hrestime_adj)) generates a Borrow/Carry
3781	/
3782	/ which computes whether or not the least significant 32-bits
3783	/ of hrestime_adj is greater than HRES_ADJ, followed by:
3784	/
3785	/	Previous Borrow/Carry + -1 + msw(hrestime_adj) generates a Carry
3786	/
3787	/ which generates a carry whenever step 1 is true or the most
3788	/ significant long of the longlong hrestime_adj is non-zero.
3789
3790	movl	max_hres_adj, %ecx	/ hrestime_adj is positive
3791	subl	%esi, %ecx
3792	movl	%edx, %eax
3793	adcl	$-1, %eax
3794	jnc	.CL7
3795	movl	max_hres_adj, %ecx	/ adj = HRES_ADJ;
3796	subl	%edx, %edx
3797	jmp	.CL5
3798
3799	/ The following computation is similar to the one above.
3800	/
3801	/ The test for "hrestime_adj < -(HRES_ADJ)" is complicated because
3802	/ hrestime_adj is 64-bits, while HRES_ADJ is 32-bits.  We rely
3803	/ on the logical equivalence of:
3804	/
3805	/	(hrestime_adj > -HRES_ADJ)
3806	/
3807	/ and the two step sequence:
3808	/
3809	/	(HRES_ADJ + lsw(hrestime_adj)) generates a Carry
3810	/
3811	/ which means the least significant 32-bits of hrestime_adj is
3812	/ greater than -HRES_ADJ, followed by:
3813	/
3814	/	Previous Carry + 0 + msw(hrestime_adj) generates a Carry
3815	/
3816	/ which generates a carry only when step 1 is true and the most
3817	/ significant long of the longlong hrestime_adj is -1.
3818
3819.CL6:					/ hrestime_adj is negative
3820	movl	%esi, %ecx
3821	addl	max_hres_adj, %ecx
3822	movl	%edx, %eax
3823	adcl	$0, %eax
3824	jc	.CL7
3825	xor	%ecx, %ecx
3826	subl	max_hres_adj, %ecx	/ adj = -(HRES_ADJ);
3827	movl	$-1, %edx
3828	jmp	.CL5
3829.CL7:
3830	movl	%esi, %ecx		/ adj = hrestime_adj;
3831.CL5:
3832	movl	timedelta, %esi
3833	subl	%ecx, %esi
3834	movl	timedelta+4, %eax
3835	sbbl	%edx, %eax
3836	movl	%esi, timedelta
3837	movl	%eax, timedelta+4	/ timedelta -= adj;
3838	movl	%esi, hrestime_adj
3839	movl	%eax, hrestime_adj+4	/ hrestime_adj = timedelta;
3840	addl	hrestime+4, %ecx
3841
3842	movl	%ecx, %eax		/ eax = tv_nsec
38431:
3844	cmpl	$NANOSEC, %eax		/ if ((unsigned long)tv_nsec >= NANOSEC)
3845	jb	.CL8			/ no
3846	incl	one_sec			/ yes,  one_sec++;
3847	incl	hrestime		/ hrestime.tv_sec++;
3848	addl	$-NANOSEC, %eax		/ tv_nsec -= NANOSEC
3849	jmp	1b			/ check for more seconds
3850
3851.CL8:
3852	movl	%eax, hrestime+4	/ store final into hrestime.tv_nsec
3853	incl	hres_lock		/ release the hres_lock
3854
3855	popl	%ebx
3856	popl	%esi
3857	leave
3858	ret
3859	SET_SIZE(hres_tick)
3860
3861#endif	/* __i386 */
3862#endif	/* __lint */
3863
3864/*
3865 * void prefetch_smap_w(void *)
3866 *
3867 * Prefetch ahead within a linear list of smap structures.
3868 * Not implemented for ia32.  Stub for compatibility.
3869 */
3870
3871#if defined(__lint)
3872
3873/*ARGSUSED*/
3874void prefetch_smap_w(void *smp)
3875{}
3876
3877#else	/* __lint */
3878
3879	ENTRY(prefetch_smap_w)
3880	rep;	ret	/* use 2 byte return instruction when branch target */
3881			/* AMD Software Optimization Guide - Section 6.2 */
3882	SET_SIZE(prefetch_smap_w)
3883
3884#endif	/* __lint */
3885
3886/*
3887 * prefetch_page_r(page_t *)
3888 * issue prefetch instructions for a page_t
3889 */
3890#if defined(__lint)
3891
3892/*ARGSUSED*/
3893void
3894prefetch_page_r(void *pp)
3895{}
3896
3897#else	/* __lint */
3898
3899	ENTRY(prefetch_page_r)
3900	rep;	ret	/* use 2 byte return instruction when branch target */
3901			/* AMD Software Optimization Guide - Section 6.2 */
3902	SET_SIZE(prefetch_page_r)
3903
3904#endif	/* __lint */
3905
3906#if defined(__lint)
3907
3908/*ARGSUSED*/
3909int
3910bcmp(const void *s1, const void *s2, size_t count)
3911{ return (0); }
3912
3913#else   /* __lint */
3914
3915#if defined(__amd64)
3916
3917	ENTRY(bcmp)
3918	pushq	%rbp
3919	movq	%rsp, %rbp
3920#ifdef DEBUG
3921	testq	%rdx,%rdx
3922	je	1f
3923	movq	postbootkernelbase(%rip), %r11
3924	cmpq	%r11, %rdi
3925	jb	0f
3926	cmpq	%r11, %rsi
3927	jnb	1f
39280:	leaq	.bcmp_panic_msg(%rip), %rdi
3929	xorl	%eax, %eax
3930	call	panic
39311:
3932#endif	/* DEBUG */
3933	call	memcmp
3934	testl	%eax, %eax
3935	setne	%dl
3936	leave
3937	movzbl	%dl, %eax
3938	ret
3939	SET_SIZE(bcmp)
3940
3941#elif defined(__i386)
3942
3943#define	ARG_S1		8
3944#define	ARG_S2		12
3945#define	ARG_LENGTH	16
3946
3947	ENTRY(bcmp)
3948	pushl	%ebp
3949	movl	%esp, %ebp	/ create new stack frame
3950#ifdef DEBUG
3951	cmpl	$0, ARG_LENGTH(%ebp)
3952	je	1f
3953	movl    postbootkernelbase, %eax
3954	cmpl    %eax, ARG_S1(%ebp)
3955	jb	0f
3956	cmpl    %eax, ARG_S2(%ebp)
3957	jnb	1f
39580:	pushl   $.bcmp_panic_msg
3959	call    panic
39601:
3961#endif	/* DEBUG */
3962
3963	pushl	%edi		/ save register variable
3964	movl	ARG_S1(%ebp), %eax	/ %eax = address of string 1
3965	movl	ARG_S2(%ebp), %ecx	/ %ecx = address of string 2
3966	cmpl	%eax, %ecx	/ if the same string
3967	je	.equal		/ goto .equal
3968	movl	ARG_LENGTH(%ebp), %edi	/ %edi = length in bytes
3969	cmpl	$4, %edi	/ if %edi < 4
3970	jb	.byte_check	/ goto .byte_check
3971	.align	4
3972.word_loop:
3973	movl	(%ecx), %edx	/ move 1 word from (%ecx) to %edx
3974	leal	-4(%edi), %edi	/ %edi -= 4
3975	cmpl	(%eax), %edx	/ compare 1 word from (%eax) with %edx
3976	jne	.word_not_equal	/ if not equal, goto .word_not_equal
3977	leal	4(%ecx), %ecx	/ %ecx += 4 (next word)
3978	leal	4(%eax), %eax	/ %eax += 4 (next word)
3979	cmpl	$4, %edi	/ if %edi >= 4
3980	jae	.word_loop	/ goto .word_loop
3981.byte_check:
3982	cmpl	$0, %edi	/ if %edi == 0
3983	je	.equal		/ goto .equal
3984	jmp	.byte_loop	/ goto .byte_loop (checks in bytes)
3985.word_not_equal:
3986	leal	4(%edi), %edi	/ %edi += 4 (post-decremented)
3987	.align	4
3988.byte_loop:
3989	movb	(%ecx),	%dl	/ move 1 byte from (%ecx) to %dl
3990	cmpb	%dl, (%eax)	/ compare %dl with 1 byte from (%eax)
3991	jne	.not_equal	/ if not equal, goto .not_equal
3992	incl	%ecx		/ %ecx++ (next byte)
3993	incl	%eax		/ %eax++ (next byte)
3994	decl	%edi		/ %edi--
3995	jnz	.byte_loop	/ if not zero, goto .byte_loop
3996.equal:
3997	xorl	%eax, %eax	/ %eax = 0
3998	popl	%edi		/ restore register variable
3999	leave			/ restore old stack frame
4000	ret			/ return (NULL)
4001	.align	4
4002.not_equal:
4003	movl	$1, %eax	/ return 1
4004	popl	%edi		/ restore register variable
4005	leave			/ restore old stack frame
4006	ret			/ return (NULL)
4007	SET_SIZE(bcmp)
4008
4009#endif	/* __i386 */
4010
4011#ifdef DEBUG
4012	.text
4013.bcmp_panic_msg:
4014	.string "bcmp: arguments below kernelbase"
4015#endif	/* DEBUG */
4016
4017#endif	/* __lint */
4018
4019#if defined(__lint)
4020
4021uint_t
4022bsrw_insn(uint16_t mask)
4023{
4024	uint_t index = sizeof (mask) * NBBY - 1;
4025
4026	while ((mask & (1 << index)) == 0)
4027		index--;
4028	return (index);
4029}
4030
4031#else	/* __lint */
4032
4033#if defined(__amd64)
4034
4035	ENTRY_NP(bsrw_insn)
4036	xorl	%eax, %eax
4037	bsrw	%di, %ax
4038	ret
4039	SET_SIZE(bsrw_insn)
4040
4041#elif defined(__i386)
4042
4043	ENTRY_NP(bsrw_insn)
4044	movw	4(%esp), %cx
4045	xorl	%eax, %eax
4046	bsrw	%cx, %ax
4047	ret
4048	SET_SIZE(bsrw_insn)
4049
4050#endif	/* __i386 */
4051#endif	/* __lint */
4052
4053#if defined(__lint)
4054
4055uint_t
4056atomic_btr32(uint32_t *pending, uint_t pil)
4057{
4058	return (*pending &= ~(1 << pil));
4059}
4060
4061#else	/* __lint */
4062
4063#if defined(__i386)
4064
4065	ENTRY_NP(atomic_btr32)
4066	movl	4(%esp), %ecx
4067	movl	8(%esp), %edx
4068	xorl	%eax, %eax
4069	lock
4070	btrl	%edx, (%ecx)
4071	setc	%al
4072	ret
4073	SET_SIZE(atomic_btr32)
4074
4075#endif	/* __i386 */
4076#endif	/* __lint */
4077
4078#if defined(__lint)
4079
4080/*ARGSUSED*/
4081void
4082switch_sp_and_call(void *newsp, void (*func)(uint_t, uint_t), uint_t arg1,
4083	    uint_t arg2)
4084{}
4085
4086#else	/* __lint */
4087
4088#if defined(__amd64)
4089
4090	ENTRY_NP(switch_sp_and_call)
4091	pushq	%rbp
4092	movq	%rsp, %rbp		/* set up stack frame */
4093	movq	%rdi, %rsp		/* switch stack pointer */
4094	movq	%rdx, %rdi		/* pass func arg 1 */
4095	movq	%rsi, %r11		/* save function to call */
4096	movq	%rcx, %rsi		/* pass func arg 2 */
4097	call	*%r11			/* call function */
4098	leave				/* restore stack */
4099	ret
4100	SET_SIZE(switch_sp_and_call)
4101
4102#elif defined(__i386)
4103
4104	ENTRY_NP(switch_sp_and_call)
4105	pushl	%ebp
4106	mov	%esp, %ebp		/* set up stack frame */
4107	movl	8(%ebp), %esp		/* switch stack pointer */
4108	pushl	20(%ebp)		/* push func arg 2 */
4109	pushl	16(%ebp)		/* push func arg 1 */
4110	call	*12(%ebp)		/* call function */
4111	addl	$8, %esp		/* pop arguments */
4112	leave				/* restore stack */
4113	ret
4114	SET_SIZE(switch_sp_and_call)
4115
4116#endif	/* __i386 */
4117#endif	/* __lint */
4118
4119#if defined(__lint)
4120
4121void
4122kmdb_enter(void)
4123{}
4124
4125#else	/* __lint */
4126
4127#if defined(__amd64)
4128
4129	ENTRY_NP(kmdb_enter)
4130	pushq	%rbp
4131	movq	%rsp, %rbp
4132
4133	/*
4134	 * Save flags, do a 'cli' then return the saved flags
4135	 */
4136	call	intr_clear
4137
4138	int	$T_DBGENTR
4139
4140	/*
4141	 * Restore the saved flags
4142	 */
4143	movq	%rax, %rdi
4144	call	intr_restore
4145
4146	leave
4147	ret
4148	SET_SIZE(kmdb_enter)
4149
4150#elif defined(__i386)
4151
4152	ENTRY_NP(kmdb_enter)
4153	pushl	%ebp
4154	movl	%esp, %ebp
4155
4156	/*
4157	 * Save flags, do a 'cli' then return the saved flags
4158	 */
4159	call	intr_clear
4160
4161	int	$T_DBGENTR
4162
4163	/*
4164	 * Restore the saved flags
4165	 */
4166	pushl	%eax
4167	call	intr_restore
4168	addl	$4, %esp
4169
4170	leave
4171	ret
4172	SET_SIZE(kmdb_enter)
4173
4174#endif	/* __i386 */
4175#endif	/* __lint */
4176
4177#if defined(__lint)
4178
4179void
4180return_instr(void)
4181{}
4182
4183#else	/* __lint */
4184
4185	ENTRY_NP(return_instr)
4186	rep;	ret	/* use 2 byte instruction when branch target */
4187			/* AMD Software Optimization Guide - Section 6.2 */
4188	SET_SIZE(return_instr)
4189
4190#endif	/* __lint */
4191
4192#if defined(__lint)
4193
4194ulong_t
4195getflags(void)
4196{
4197	return (0);
4198}
4199
4200#else	/* __lint */
4201
4202#if defined(__amd64)
4203
4204	ENTRY(getflags)
4205	pushfq
4206	popq	%rax
4207#if defined(__xpv)
4208	CURTHREAD(%rdi)
4209	KPREEMPT_DISABLE(%rdi)
4210	/*
4211	 * Synthesize the PS_IE bit from the event mask bit
4212	 */
4213	CURVCPU(%r11)
4214	andq    $_BITNOT(PS_IE), %rax
4215	XEN_TEST_UPCALL_MASK(%r11)
4216	jnz	1f
4217	orq	$PS_IE, %rax
42181:
4219	KPREEMPT_ENABLE_NOKP(%rdi)
4220#endif
4221	ret
4222	SET_SIZE(getflags)
4223
4224#elif defined(__i386)
4225
4226	ENTRY(getflags)
4227	pushfl
4228	popl	%eax
4229#if defined(__xpv)
4230	CURTHREAD(%ecx)
4231	KPREEMPT_DISABLE(%ecx)
4232	/*
4233	 * Synthesize the PS_IE bit from the event mask bit
4234	 */
4235	CURVCPU(%edx)
4236	andl    $_BITNOT(PS_IE), %eax
4237	XEN_TEST_UPCALL_MASK(%edx)
4238	jnz	1f
4239	orl	$PS_IE, %eax
42401:
4241	KPREEMPT_ENABLE_NOKP(%ecx)
4242#endif
4243	ret
4244	SET_SIZE(getflags)
4245
4246#endif	/* __i386 */
4247
4248#endif	/* __lint */
4249
4250#if defined(__lint)
4251
4252ftrace_icookie_t
4253ftrace_interrupt_disable(void)
4254{ return (0); }
4255
4256#else   /* __lint */
4257
4258#if defined(__amd64)
4259
4260	ENTRY(ftrace_interrupt_disable)
4261	pushfq
4262	popq	%rax
4263	CLI(%rdx)
4264	ret
4265	SET_SIZE(ftrace_interrupt_disable)
4266
4267#elif defined(__i386)
4268
4269	ENTRY(ftrace_interrupt_disable)
4270	pushfl
4271	popl	%eax
4272	CLI(%edx)
4273	ret
4274	SET_SIZE(ftrace_interrupt_disable)
4275
4276#endif	/* __i386 */
4277#endif	/* __lint */
4278
4279#if defined(__lint)
4280
4281/*ARGSUSED*/
4282void
4283ftrace_interrupt_enable(ftrace_icookie_t cookie)
4284{}
4285
4286#else	/* __lint */
4287
4288#if defined(__amd64)
4289
4290	ENTRY(ftrace_interrupt_enable)
4291	pushq	%rdi
4292	popfq
4293	ret
4294	SET_SIZE(ftrace_interrupt_enable)
4295
4296#elif defined(__i386)
4297
4298	ENTRY(ftrace_interrupt_enable)
4299	movl	4(%esp), %eax
4300	pushl	%eax
4301	popfl
4302	ret
4303	SET_SIZE(ftrace_interrupt_enable)
4304
4305#endif	/* __i386 */
4306#endif	/* __lint */
4307
4308#if defined (__lint)
4309
4310/*ARGSUSED*/
4311void
4312clflush_insn(caddr_t addr)
4313{}
4314
4315#else /* __lint */
4316
4317#if defined (__amd64)
4318	ENTRY(clflush_insn)
4319	clflush (%rdi)
4320	ret
4321	SET_SIZE(clflush_insn)
4322#elif defined (__i386)
4323	ENTRY(clflush_insn)
4324	movl	4(%esp), %eax
4325	clflush (%eax)
4326	ret
4327	SET_SIZE(clflush_insn)
4328
4329#endif /* __i386 */
4330#endif /* __lint */
4331
4332#if defined (__lint)
4333/*ARGSUSED*/
4334void
4335mfence_insn(void)
4336{}
4337
4338#else /* __lint */
4339
4340#if defined (__amd64)
4341	ENTRY(mfence_insn)
4342	mfence
4343	ret
4344	SET_SIZE(mfence_insn)
4345#elif defined (__i386)
4346	ENTRY(mfence_insn)
4347	mfence
4348	ret
4349	SET_SIZE(mfence_insn)
4350
4351#endif /* __i386 */
4352#endif /* __lint */
4353
4354/*
4355 * VMware implements an I/O port that programs can query to detect if software
4356 * is running in a VMware hypervisor. This hypervisor port behaves differently
4357 * depending on magic values in certain registers and modifies some registers
4358 * as a side effect.
4359 *
4360 * References: http://kb.vmware.com/kb/1009458
4361 */
4362
4363#if defined(__lint)
4364
4365/* ARGSUSED */
4366void
4367vmware_port(int cmd, uint32_t *regs) { return; }
4368
4369#else
4370
4371#if defined(__amd64)
4372
4373	ENTRY(vmware_port)
4374	pushq	%rbx
4375	movl	$VMWARE_HVMAGIC, %eax
4376	movl	$0xffffffff, %ebx
4377	movl	%edi, %ecx
4378	movl	$VMWARE_HVPORT, %edx
4379	inl	(%dx)
4380	movl	%eax, (%rsi)
4381	movl	%ebx, 4(%rsi)
4382	movl	%ecx, 8(%rsi)
4383	movl	%edx, 12(%rsi)
4384	popq	%rbx
4385	ret
4386	SET_SIZE(vmware_port)
4387
4388#elif defined(__i386)
4389
4390	ENTRY(vmware_port)
4391	pushl	%ebx
4392	pushl	%esi
4393	movl	$VMWARE_HVMAGIC, %eax
4394	movl	$0xffffffff, %ebx
4395	movl	12(%esp), %ecx
4396	movl	$VMWARE_HVPORT, %edx
4397	inl	(%dx)
4398	movl	16(%esp), %esi
4399	movl	%eax, (%esi)
4400	movl	%ebx, 4(%esi)
4401	movl	%ecx, 8(%esi)
4402	movl	%edx, 12(%esi)
4403	popl	%esi
4404	popl	%ebx
4405	ret
4406	SET_SIZE(vmware_port)
4407
4408#endif /* __i386 */
4409#endif /* __lint */
4410