xref: /titanic_51/usr/src/uts/intel/ia32/ml/i86_subr.s (revision 7997e108b559ec4bb8a4c39fbfb6ca5606995a08)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*
28 *  Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.
29 *  Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T
30 *    All Rights Reserved
31 */
32
33/*
34 * General assembly language routines.
35 * It is the intent of this file to contain routines that are
36 * independent of the specific kernel architecture, and those that are
37 * common across kernel architectures.
38 * As architectures diverge, and implementations of specific
39 * architecture-dependent routines change, the routines should be moved
40 * from this file into the respective ../`arch -k`/subr.s file.
41 */
42
43#include <sys/asm_linkage.h>
44#include <sys/asm_misc.h>
45#include <sys/panic.h>
46#include <sys/ontrap.h>
47#include <sys/regset.h>
48#include <sys/privregs.h>
49#include <sys/reboot.h>
50#include <sys/psw.h>
51#include <sys/x86_archext.h>
52
53#if defined(__lint)
54#include <sys/types.h>
55#include <sys/systm.h>
56#include <sys/thread.h>
57#include <sys/archsystm.h>
58#include <sys/byteorder.h>
59#include <sys/dtrace.h>
60#include <sys/ftrace.h>
61#else	/* __lint */
62#include "assym.h"
63#endif	/* __lint */
64#include <sys/dditypes.h>
65
66/*
67 * on_fault()
68 * Catch lofault faults. Like setjmp except it returns one
69 * if code following causes uncorrectable fault. Turned off
70 * by calling no_fault().
71 */
72
73#if defined(__lint)
74
75/* ARGSUSED */
76int
77on_fault(label_t *ljb)
78{ return (0); }
79
80void
81no_fault(void)
82{}
83
84#else	/* __lint */
85
86#if defined(__amd64)
87
88	ENTRY(on_fault)
89	movq	%gs:CPU_THREAD, %rsi
90	leaq	catch_fault(%rip), %rdx
91	movq	%rdi, T_ONFAULT(%rsi)		/* jumpbuf in t_onfault */
92	movq	%rdx, T_LOFAULT(%rsi)		/* catch_fault in t_lofault */
93	jmp	setjmp				/* let setjmp do the rest */
94
95catch_fault:
96	movq	%gs:CPU_THREAD, %rsi
97	movq	T_ONFAULT(%rsi), %rdi		/* address of save area */
98	xorl	%eax, %eax
99	movq	%rax, T_ONFAULT(%rsi)		/* turn off onfault */
100	movq	%rax, T_LOFAULT(%rsi)		/* turn off lofault */
101	jmp	longjmp				/* let longjmp do the rest */
102	SET_SIZE(on_fault)
103
104	ENTRY(no_fault)
105	movq	%gs:CPU_THREAD, %rsi
106	xorl	%eax, %eax
107	movq	%rax, T_ONFAULT(%rsi)		/* turn off onfault */
108	movq	%rax, T_LOFAULT(%rsi)		/* turn off lofault */
109	ret
110	SET_SIZE(no_fault)
111
112#elif defined(__i386)
113
114	ENTRY(on_fault)
115	movl	%gs:CPU_THREAD, %edx
116	movl	4(%esp), %eax			/* jumpbuf address */
117	leal	catch_fault, %ecx
118	movl	%eax, T_ONFAULT(%edx)		/* jumpbuf in t_onfault */
119	movl	%ecx, T_LOFAULT(%edx)		/* catch_fault in t_lofault */
120	jmp	setjmp				/* let setjmp do the rest */
121
122catch_fault:
123	movl	%gs:CPU_THREAD, %edx
124	xorl	%eax, %eax
125	movl	T_ONFAULT(%edx), %ecx		/* address of save area */
126	movl	%eax, T_ONFAULT(%edx)		/* turn off onfault */
127	movl	%eax, T_LOFAULT(%edx)		/* turn off lofault */
128	pushl	%ecx
129	call	longjmp				/* let longjmp do the rest */
130	SET_SIZE(on_fault)
131
132	ENTRY(no_fault)
133	movl	%gs:CPU_THREAD, %edx
134	xorl	%eax, %eax
135	movl	%eax, T_ONFAULT(%edx)		/* turn off onfault */
136	movl	%eax, T_LOFAULT(%edx)		/* turn off lofault */
137	ret
138	SET_SIZE(no_fault)
139
140#endif	/* __i386 */
141#endif	/* __lint */
142
143/*
144 * Default trampoline code for on_trap() (see <sys/ontrap.h>).  We just
145 * do a longjmp(&curthread->t_ontrap->ot_jmpbuf) if this is ever called.
146 */
147
148#if defined(lint)
149
150void
151on_trap_trampoline(void)
152{}
153
154#else	/* __lint */
155
156#if defined(__amd64)
157
158	ENTRY(on_trap_trampoline)
159	movq	%gs:CPU_THREAD, %rsi
160	movq	T_ONTRAP(%rsi), %rdi
161	addq	$OT_JMPBUF, %rdi
162	jmp	longjmp
163	SET_SIZE(on_trap_trampoline)
164
165#elif defined(__i386)
166
167	ENTRY(on_trap_trampoline)
168	movl	%gs:CPU_THREAD, %eax
169	movl	T_ONTRAP(%eax), %eax
170	addl	$OT_JMPBUF, %eax
171	pushl	%eax
172	call	longjmp
173	SET_SIZE(on_trap_trampoline)
174
175#endif	/* __i386 */
176#endif	/* __lint */
177
178/*
179 * Push a new element on to the t_ontrap stack.  Refer to <sys/ontrap.h> for
180 * more information about the on_trap() mechanism.  If the on_trap_data is the
181 * same as the topmost stack element, we just modify that element.
182 */
183#if defined(lint)
184
185/*ARGSUSED*/
186int
187on_trap(on_trap_data_t *otp, uint_t prot)
188{ return (0); }
189
190#else	/* __lint */
191
192#if defined(__amd64)
193
194	ENTRY(on_trap)
195	movw	%si, OT_PROT(%rdi)		/* ot_prot = prot */
196	movw	$0, OT_TRAP(%rdi)		/* ot_trap = 0 */
197	leaq	on_trap_trampoline(%rip), %rdx	/* rdx = &on_trap_trampoline */
198	movq	%rdx, OT_TRAMPOLINE(%rdi)	/* ot_trampoline = rdx */
199	xorl	%ecx, %ecx
200	movq	%rcx, OT_HANDLE(%rdi)		/* ot_handle = NULL */
201	movq	%rcx, OT_PAD1(%rdi)		/* ot_pad1 = NULL */
202	movq	%gs:CPU_THREAD, %rdx		/* rdx = curthread */
203	movq	T_ONTRAP(%rdx), %rcx		/* rcx = curthread->t_ontrap */
204	cmpq	%rdi, %rcx			/* if (otp == %rcx)	*/
205	je	0f				/*	don't modify t_ontrap */
206
207	movq	%rcx, OT_PREV(%rdi)		/* ot_prev = t_ontrap */
208	movq	%rdi, T_ONTRAP(%rdx)		/* curthread->t_ontrap = otp */
209
2100:	addq	$OT_JMPBUF, %rdi		/* &ot_jmpbuf */
211	jmp	setjmp
212	SET_SIZE(on_trap)
213
214#elif defined(__i386)
215
216	ENTRY(on_trap)
217	movl	4(%esp), %eax			/* %eax = otp */
218	movl	8(%esp), %edx			/* %edx = prot */
219
220	movw	%dx, OT_PROT(%eax)		/* ot_prot = prot */
221	movw	$0, OT_TRAP(%eax)		/* ot_trap = 0 */
222	leal	on_trap_trampoline, %edx	/* %edx = &on_trap_trampoline */
223	movl	%edx, OT_TRAMPOLINE(%eax)	/* ot_trampoline = %edx */
224	movl	$0, OT_HANDLE(%eax)		/* ot_handle = NULL */
225	movl	$0, OT_PAD1(%eax)		/* ot_pad1 = NULL */
226	movl	%gs:CPU_THREAD, %edx		/* %edx = curthread */
227	movl	T_ONTRAP(%edx), %ecx		/* %ecx = curthread->t_ontrap */
228	cmpl	%eax, %ecx			/* if (otp == %ecx) */
229	je	0f				/*    don't modify t_ontrap */
230
231	movl	%ecx, OT_PREV(%eax)		/* ot_prev = t_ontrap */
232	movl	%eax, T_ONTRAP(%edx)		/* curthread->t_ontrap = otp */
233
2340:	addl	$OT_JMPBUF, %eax		/* %eax = &ot_jmpbuf */
235	movl	%eax, 4(%esp)			/* put %eax back on the stack */
236	jmp	setjmp				/* let setjmp do the rest */
237	SET_SIZE(on_trap)
238
239#endif	/* __i386 */
240#endif	/* __lint */
241
242/*
243 * Setjmp and longjmp implement non-local gotos using state vectors
244 * type label_t.
245 */
246
247#if defined(__lint)
248
249/* ARGSUSED */
250int
251setjmp(label_t *lp)
252{ return (0); }
253
254/* ARGSUSED */
255void
256longjmp(label_t *lp)
257{}
258
259#else	/* __lint */
260
261#if LABEL_PC != 0
262#error LABEL_PC MUST be defined as 0 for setjmp/longjmp to work as coded
263#endif	/* LABEL_PC != 0 */
264
265#if defined(__amd64)
266
267	ENTRY(setjmp)
268	movq	%rsp, LABEL_SP(%rdi)
269	movq	%rbp, LABEL_RBP(%rdi)
270	movq	%rbx, LABEL_RBX(%rdi)
271	movq	%r12, LABEL_R12(%rdi)
272	movq	%r13, LABEL_R13(%rdi)
273	movq	%r14, LABEL_R14(%rdi)
274	movq	%r15, LABEL_R15(%rdi)
275	movq	(%rsp), %rdx		/* return address */
276	movq	%rdx, (%rdi)		/* LABEL_PC is 0 */
277	xorl	%eax, %eax		/* return 0 */
278	ret
279	SET_SIZE(setjmp)
280
281	ENTRY(longjmp)
282	movq	LABEL_SP(%rdi), %rsp
283	movq	LABEL_RBP(%rdi), %rbp
284	movq	LABEL_RBX(%rdi), %rbx
285	movq	LABEL_R12(%rdi), %r12
286	movq	LABEL_R13(%rdi), %r13
287	movq	LABEL_R14(%rdi), %r14
288	movq	LABEL_R15(%rdi), %r15
289	movq	(%rdi), %rdx		/* return address; LABEL_PC is 0 */
290	movq	%rdx, (%rsp)
291	xorl	%eax, %eax
292	incl	%eax			/* return 1 */
293	ret
294	SET_SIZE(longjmp)
295
296#elif defined(__i386)
297
298	ENTRY(setjmp)
299	movl	4(%esp), %edx		/* address of save area */
300	movl	%ebp, LABEL_EBP(%edx)
301	movl	%ebx, LABEL_EBX(%edx)
302	movl	%esi, LABEL_ESI(%edx)
303	movl	%edi, LABEL_EDI(%edx)
304	movl	%esp, 4(%edx)
305	movl	(%esp), %ecx		/* %eip (return address) */
306	movl	%ecx, (%edx)		/* LABEL_PC is 0 */
307	subl	%eax, %eax		/* return 0 */
308	ret
309	SET_SIZE(setjmp)
310
311	ENTRY(longjmp)
312	movl	4(%esp), %edx		/* address of save area */
313	movl	LABEL_EBP(%edx), %ebp
314	movl	LABEL_EBX(%edx), %ebx
315	movl	LABEL_ESI(%edx), %esi
316	movl	LABEL_EDI(%edx), %edi
317	movl	4(%edx), %esp
318	movl	(%edx), %ecx		/* %eip (return addr); LABEL_PC is 0 */
319	movl	$1, %eax
320	addl	$4, %esp		/* pop ret adr */
321	jmp	*%ecx			/* indirect */
322	SET_SIZE(longjmp)
323
324#endif	/* __i386 */
325#endif	/* __lint */
326
327/*
328 * if a() calls b() calls caller(),
329 * caller() returns return address in a().
330 * (Note: We assume a() and b() are C routines which do the normal entry/exit
331 *  sequence.)
332 */
333
334#if defined(__lint)
335
336caddr_t
337caller(void)
338{ return (0); }
339
340#else	/* __lint */
341
342#if defined(__amd64)
343
344	ENTRY(caller)
345	movq	8(%rbp), %rax		/* b()'s return pc, in a() */
346	ret
347	SET_SIZE(caller)
348
349#elif defined(__i386)
350
351	ENTRY(caller)
352	movl	4(%ebp), %eax		/* b()'s return pc, in a() */
353	ret
354	SET_SIZE(caller)
355
356#endif	/* __i386 */
357#endif	/* __lint */
358
359/*
360 * if a() calls callee(), callee() returns the
361 * return address in a();
362 */
363
364#if defined(__lint)
365
366caddr_t
367callee(void)
368{ return (0); }
369
370#else	/* __lint */
371
372#if defined(__amd64)
373
374	ENTRY(callee)
375	movq	(%rsp), %rax		/* callee()'s return pc, in a() */
376	ret
377	SET_SIZE(callee)
378
379#elif defined(__i386)
380
381	ENTRY(callee)
382	movl	(%esp), %eax		/* callee()'s return pc, in a() */
383	ret
384	SET_SIZE(callee)
385
386#endif	/* __i386 */
387#endif	/* __lint */
388
389/*
390 * return the current frame pointer
391 */
392
393#if defined(__lint)
394
395greg_t
396getfp(void)
397{ return (0); }
398
399#else	/* __lint */
400
401#if defined(__amd64)
402
403	ENTRY(getfp)
404	movq	%rbp, %rax
405	ret
406	SET_SIZE(getfp)
407
408#elif defined(__i386)
409
410	ENTRY(getfp)
411	movl	%ebp, %eax
412	ret
413	SET_SIZE(getfp)
414
415#endif	/* __i386 */
416#endif	/* __lint */
417
418/*
419 * Invalidate a single page table entry in the TLB
420 */
421
422#if defined(__lint)
423
424/* ARGSUSED */
425void
426mmu_tlbflush_entry(caddr_t m)
427{}
428
429#else	/* __lint */
430
431#if defined(__amd64)
432
433	ENTRY(mmu_tlbflush_entry)
434	invlpg	(%rdi)
435	ret
436	SET_SIZE(mmu_tlbflush_entry)
437
438#elif defined(__i386)
439
440	ENTRY(mmu_tlbflush_entry)
441	movl	4(%esp), %eax
442	invlpg	(%eax)
443	ret
444	SET_SIZE(mmu_tlbflush_entry)
445
446#endif	/* __i386 */
447#endif	/* __lint */
448
449
450/*
451 * Get/Set the value of various control registers
452 */
453
454#if defined(__lint)
455
456ulong_t
457getcr0(void)
458{ return (0); }
459
460/* ARGSUSED */
461void
462setcr0(ulong_t value)
463{}
464
465ulong_t
466getcr2(void)
467{ return (0); }
468
469ulong_t
470getcr3(void)
471{ return (0); }
472
473#if !defined(__xpv)
474/* ARGSUSED */
475void
476setcr3(ulong_t val)
477{}
478
479void
480reload_cr3(void)
481{}
482#endif
483
484ulong_t
485getcr4(void)
486{ return (0); }
487
488/* ARGSUSED */
489void
490setcr4(ulong_t val)
491{}
492
493#if defined(__amd64)
494
495ulong_t
496getcr8(void)
497{ return (0); }
498
499/* ARGSUSED */
500void
501setcr8(ulong_t val)
502{}
503
504#endif	/* __amd64 */
505
506#else	/* __lint */
507
508#if defined(__amd64)
509
510	ENTRY(getcr0)
511	movq	%cr0, %rax
512	ret
513	SET_SIZE(getcr0)
514
515	ENTRY(setcr0)
516	movq	%rdi, %cr0
517	ret
518	SET_SIZE(setcr0)
519
520        ENTRY(getcr2)
521#if defined(__xpv)
522	movq	%gs:CPU_VCPU_INFO, %rax
523	movq	VCPU_INFO_ARCH_CR2(%rax), %rax
524#else
525        movq    %cr2, %rax
526#endif
527        ret
528	SET_SIZE(getcr2)
529
530	ENTRY(getcr3)
531	movq    %cr3, %rax
532	ret
533	SET_SIZE(getcr3)
534
535#if !defined(__xpv)
536
537        ENTRY(setcr3)
538        movq    %rdi, %cr3
539        ret
540	SET_SIZE(setcr3)
541
542	ENTRY(reload_cr3)
543	movq	%cr3, %rdi
544	movq	%rdi, %cr3
545	ret
546	SET_SIZE(reload_cr3)
547
548#endif	/* __xpv */
549
550	ENTRY(getcr4)
551	movq	%cr4, %rax
552	ret
553	SET_SIZE(getcr4)
554
555	ENTRY(setcr4)
556	movq	%rdi, %cr4
557	ret
558	SET_SIZE(setcr4)
559
560	ENTRY(getcr8)
561	movq	%cr8, %rax
562	ret
563	SET_SIZE(getcr8)
564
565	ENTRY(setcr8)
566	movq	%rdi, %cr8
567	ret
568	SET_SIZE(setcr8)
569
570#elif defined(__i386)
571
572        ENTRY(getcr0)
573        movl    %cr0, %eax
574        ret
575	SET_SIZE(getcr0)
576
577        ENTRY(setcr0)
578        movl    4(%esp), %eax
579        movl    %eax, %cr0
580        ret
581	SET_SIZE(setcr0)
582
583        ENTRY(getcr2)
584#if defined(__xpv)
585	movl	%gs:CPU_VCPU_INFO, %eax
586	movl	VCPU_INFO_ARCH_CR2(%eax), %eax
587#else
588        movl    %cr2, %eax
589#endif
590        ret
591	SET_SIZE(getcr2)
592
593	ENTRY(getcr3)
594	movl    %cr3, %eax
595	ret
596	SET_SIZE(getcr3)
597
598#if !defined(__xpv)
599
600        ENTRY(setcr3)
601        movl    4(%esp), %eax
602        movl    %eax, %cr3
603        ret
604	SET_SIZE(setcr3)
605
606	ENTRY(reload_cr3)
607	movl    %cr3, %eax
608	movl    %eax, %cr3
609	ret
610	SET_SIZE(reload_cr3)
611
612#endif	/* __xpv */
613
614	ENTRY(getcr4)
615	movl    %cr4, %eax
616	ret
617	SET_SIZE(getcr4)
618
619        ENTRY(setcr4)
620        movl    4(%esp), %eax
621        movl    %eax, %cr4
622        ret
623	SET_SIZE(setcr4)
624
625#endif	/* __i386 */
626#endif	/* __lint */
627
628#if defined(__lint)
629
630/*ARGSUSED*/
631uint32_t
632__cpuid_insn(struct cpuid_regs *regs)
633{ return (0); }
634
635#else	/* __lint */
636
637#if defined(__amd64)
638
639	ENTRY(__cpuid_insn)
640	movq	%rbx, %r8
641	movq	%rcx, %r9
642	movq	%rdx, %r11
643	movl	(%rdi), %eax		/* %eax = regs->cp_eax */
644	movl	0x4(%rdi), %ebx		/* %ebx = regs->cp_ebx */
645	movl	0x8(%rdi), %ecx		/* %ecx = regs->cp_ecx */
646	movl	0xc(%rdi), %edx		/* %edx = regs->cp_edx */
647	cpuid
648	movl	%eax, (%rdi)		/* regs->cp_eax = %eax */
649	movl	%ebx, 0x4(%rdi)		/* regs->cp_ebx = %ebx */
650	movl	%ecx, 0x8(%rdi)		/* regs->cp_ecx = %ecx */
651	movl	%edx, 0xc(%rdi)		/* regs->cp_edx = %edx */
652	movq	%r8, %rbx
653	movq	%r9, %rcx
654	movq	%r11, %rdx
655	ret
656	SET_SIZE(__cpuid_insn)
657
658#elif defined(__i386)
659
660        ENTRY(__cpuid_insn)
661	pushl	%ebp
662	movl	0x8(%esp), %ebp		/* %ebp = regs */
663	pushl	%ebx
664	pushl	%ecx
665	pushl	%edx
666	movl	(%ebp), %eax		/* %eax = regs->cp_eax */
667	movl	0x4(%ebp), %ebx		/* %ebx = regs->cp_ebx */
668	movl	0x8(%ebp), %ecx		/* %ecx = regs->cp_ecx */
669	movl	0xc(%ebp), %edx		/* %edx = regs->cp_edx */
670	cpuid
671	movl	%eax, (%ebp)		/* regs->cp_eax = %eax */
672	movl	%ebx, 0x4(%ebp)		/* regs->cp_ebx = %ebx */
673	movl	%ecx, 0x8(%ebp)		/* regs->cp_ecx = %ecx */
674	movl	%edx, 0xc(%ebp)		/* regs->cp_edx = %edx */
675	popl	%edx
676	popl	%ecx
677	popl	%ebx
678	popl	%ebp
679	ret
680	SET_SIZE(__cpuid_insn)
681
682#endif	/* __i386 */
683#endif	/* __lint */
684
685#if defined(__xpv)
686	/*
687	 * Defined in C
688	 */
689#else
690
691#if defined(__lint)
692
693/*ARGSUSED*/
694void
695i86_monitor(volatile uint32_t *addr, uint32_t extensions, uint32_t hints)
696{ return; }
697
698#else   /* __lint */
699
700#if defined(__amd64)
701
702	ENTRY_NP(i86_monitor)
703	pushq	%rbp
704	movq	%rsp, %rbp
705	movq	%rdi, %rax		/* addr */
706	movq	%rsi, %rcx		/* extensions */
707	/* rdx contains input arg3: hints */
708	clflush	(%rax)
709	.byte	0x0f, 0x01, 0xc8	/* monitor */
710	leave
711	ret
712	SET_SIZE(i86_monitor)
713
714#elif defined(__i386)
715
716ENTRY_NP(i86_monitor)
717	pushl	%ebp
718	movl	%esp, %ebp
719	movl	0x8(%ebp),%eax		/* addr */
720	movl	0xc(%ebp),%ecx		/* extensions */
721	movl	0x10(%ebp),%edx		/* hints */
722	clflush	(%eax)
723	.byte	0x0f, 0x01, 0xc8	/* monitor */
724	leave
725	ret
726	SET_SIZE(i86_monitor)
727
728#endif	/* __i386 */
729#endif	/* __lint */
730
731#if defined(__lint)
732
733/*ARGSUSED*/
734void
735i86_mwait(uint32_t data, uint32_t extensions)
736{ return; }
737
738#else	/* __lint */
739
740#if defined(__amd64)
741
742	ENTRY_NP(i86_mwait)
743	pushq	%rbp
744	movq	%rsp, %rbp
745	movq	%rdi, %rax		/* data */
746	movq	%rsi, %rcx		/* extensions */
747	.byte	0x0f, 0x01, 0xc9	/* mwait */
748	leave
749	ret
750	SET_SIZE(i86_mwait)
751
752#elif defined(__i386)
753
754	ENTRY_NP(i86_mwait)
755	pushl	%ebp
756	movl	%esp, %ebp
757	movl	0x8(%ebp),%eax		/* data */
758	movl	0xc(%ebp),%ecx		/* extensions */
759	.byte	0x0f, 0x01, 0xc9	/* mwait */
760	leave
761	ret
762	SET_SIZE(i86_mwait)
763
764#endif	/* __i386 */
765#endif	/* __lint */
766
767#if defined(__lint)
768
769hrtime_t
770tsc_read(void)
771{
772	return (0);
773}
774
775#else	/* __lint */
776
777#if defined(__amd64)
778
779	ENTRY_NP(tsc_read)
780	movq	%rbx, %r11
781	movl	$0, %eax
782	cpuid
783	rdtsc
784	movq	%r11, %rbx
785	shlq	$32, %rdx
786	orq	%rdx, %rax
787	ret
788	.globl _tsc_mfence_start
789_tsc_mfence_start:
790	mfence
791	rdtsc
792	shlq	$32, %rdx
793	orq	%rdx, %rax
794	ret
795	.globl _tsc_mfence_end
796_tsc_mfence_end:
797	.globl _tscp_start
798_tscp_start:
799	.byte	0x0f, 0x01, 0xf9	/* rdtscp instruction */
800	shlq	$32, %rdx
801	orq	%rdx, %rax
802	ret
803	.globl _tscp_end
804_tscp_end:
805	.globl _no_rdtsc_start
806_no_rdtsc_start:
807	xorl	%edx, %edx
808	xorl	%eax, %eax
809	ret
810	.globl _no_rdtsc_end
811_no_rdtsc_end:
812	.globl _tsc_lfence_start
813_tsc_lfence_start:
814	lfence
815	rdtsc
816	shlq	$32, %rdx
817	orq	%rdx, %rax
818	ret
819	.globl _tsc_lfence_end
820_tsc_lfence_end:
821	SET_SIZE(tsc_read)
822
823#else /* __i386 */
824
825	ENTRY_NP(tsc_read)
826	pushl	%ebx
827	movl	$0, %eax
828	cpuid
829	rdtsc
830	popl	%ebx
831	ret
832	.globl _tsc_mfence_start
833_tsc_mfence_start:
834	mfence
835	rdtsc
836	ret
837	.globl _tsc_mfence_end
838_tsc_mfence_end:
839	.globl	_tscp_start
840_tscp_start:
841	.byte	0x0f, 0x01, 0xf9	/* rdtscp instruction */
842	ret
843	.globl _tscp_end
844_tscp_end:
845	.globl _no_rdtsc_start
846_no_rdtsc_start:
847	xorl	%edx, %edx
848	xorl	%eax, %eax
849	ret
850	.globl _no_rdtsc_end
851_no_rdtsc_end:
852	.globl _tsc_lfence_start
853_tsc_lfence_start:
854	lfence
855	rdtsc
856	ret
857	.globl _tsc_lfence_end
858_tsc_lfence_end:
859	SET_SIZE(tsc_read)
860
861#endif	/* __i386 */
862
863#endif	/* __lint */
864
865
866#endif	/* __xpv */
867
868#ifdef __lint
869/*
870 * Do not use this function for obtaining clock tick.  This
871 * is called by callers who do not need to have a guarenteed
872 * correct tick value.  The proper routine to use is tsc_read().
873 */
874hrtime_t
875randtick(void)
876{
877	return (0);
878}
879#else
880#if defined(__amd64)
881	ENTRY_NP(randtick)
882	rdtsc
883	shlq    $32, %rdx
884	orq     %rdx, %rax
885	ret
886	SET_SIZE(randtick)
887#else
888	ENTRY_NP(randtick)
889	rdtsc
890	ret
891	SET_SIZE(randtick)
892#endif /* __i386 */
893#endif /* __lint */
894/*
895 * Insert entryp after predp in a doubly linked list.
896 */
897
898#if defined(__lint)
899
900/*ARGSUSED*/
901void
902_insque(caddr_t entryp, caddr_t predp)
903{}
904
905#else	/* __lint */
906
907#if defined(__amd64)
908
909	ENTRY(_insque)
910	movq	(%rsi), %rax		/* predp->forw			*/
911	movq	%rsi, CPTRSIZE(%rdi)	/* entryp->back = predp		*/
912	movq	%rax, (%rdi)		/* entryp->forw = predp->forw	*/
913	movq	%rdi, (%rsi)		/* predp->forw = entryp		*/
914	movq	%rdi, CPTRSIZE(%rax)	/* predp->forw->back = entryp	*/
915	ret
916	SET_SIZE(_insque)
917
918#elif defined(__i386)
919
920	ENTRY(_insque)
921	movl	8(%esp), %edx
922	movl	4(%esp), %ecx
923	movl	(%edx), %eax		/* predp->forw			*/
924	movl	%edx, CPTRSIZE(%ecx)	/* entryp->back = predp		*/
925	movl	%eax, (%ecx)		/* entryp->forw = predp->forw	*/
926	movl	%ecx, (%edx)		/* predp->forw = entryp		*/
927	movl	%ecx, CPTRSIZE(%eax)	/* predp->forw->back = entryp	*/
928	ret
929	SET_SIZE(_insque)
930
931#endif	/* __i386 */
932#endif	/* __lint */
933
934/*
935 * Remove entryp from a doubly linked list
936 */
937
938#if defined(__lint)
939
940/*ARGSUSED*/
941void
942_remque(caddr_t entryp)
943{}
944
945#else	/* __lint */
946
947#if defined(__amd64)
948
949	ENTRY(_remque)
950	movq	(%rdi), %rax		/* entry->forw */
951	movq	CPTRSIZE(%rdi), %rdx	/* entry->back */
952	movq	%rax, (%rdx)		/* entry->back->forw = entry->forw */
953	movq	%rdx, CPTRSIZE(%rax)	/* entry->forw->back = entry->back */
954	ret
955	SET_SIZE(_remque)
956
957#elif defined(__i386)
958
959	ENTRY(_remque)
960	movl	4(%esp), %ecx
961	movl	(%ecx), %eax		/* entry->forw */
962	movl	CPTRSIZE(%ecx), %edx	/* entry->back */
963	movl	%eax, (%edx)		/* entry->back->forw = entry->forw */
964	movl	%edx, CPTRSIZE(%eax)	/* entry->forw->back = entry->back */
965	ret
966	SET_SIZE(_remque)
967
968#endif	/* __i386 */
969#endif	/* __lint */
970
971/*
972 * Returns the number of
973 * non-NULL bytes in string argument.
974 */
975
976#if defined(__lint)
977
978/* ARGSUSED */
979size_t
980strlen(const char *str)
981{ return (0); }
982
983#else	/* __lint */
984
985#if defined(__amd64)
986
987/*
988 * This is close to a simple transliteration of a C version of this
989 * routine.  We should either just -make- this be a C version, or
990 * justify having it in assembler by making it significantly faster.
991 *
992 * size_t
993 * strlen(const char *s)
994 * {
995 *	const char *s0;
996 * #if defined(DEBUG)
997 *	if ((uintptr_t)s < KERNELBASE)
998 *		panic(.str_panic_msg);
999 * #endif
1000 *	for (s0 = s; *s; s++)
1001 *		;
1002 *	return (s - s0);
1003 * }
1004 */
1005
1006	ENTRY(strlen)
1007#ifdef DEBUG
1008	movq	postbootkernelbase(%rip), %rax
1009	cmpq	%rax, %rdi
1010	jae	str_valid
1011	pushq	%rbp
1012	movq	%rsp, %rbp
1013	leaq	.str_panic_msg(%rip), %rdi
1014	xorl	%eax, %eax
1015	call	panic
1016#endif	/* DEBUG */
1017str_valid:
1018	cmpb	$0, (%rdi)
1019	movq	%rdi, %rax
1020	je	.null_found
1021	.align	4
1022.strlen_loop:
1023	incq	%rdi
1024	cmpb	$0, (%rdi)
1025	jne	.strlen_loop
1026.null_found:
1027	subq	%rax, %rdi
1028	movq	%rdi, %rax
1029	ret
1030	SET_SIZE(strlen)
1031
1032#elif defined(__i386)
1033
1034	ENTRY(strlen)
1035#ifdef DEBUG
1036	movl	postbootkernelbase, %eax
1037	cmpl	%eax, 4(%esp)
1038	jae	str_valid
1039	pushl	%ebp
1040	movl	%esp, %ebp
1041	pushl	$.str_panic_msg
1042	call	panic
1043#endif /* DEBUG */
1044
1045str_valid:
1046	movl	4(%esp), %eax		/* %eax = string address */
1047	testl	$3, %eax		/* if %eax not word aligned */
1048	jnz	.not_word_aligned	/* goto .not_word_aligned */
1049	.align	4
1050.word_aligned:
1051	movl	(%eax), %edx		/* move 1 word from (%eax) to %edx */
1052	movl	$0x7f7f7f7f, %ecx
1053	andl	%edx, %ecx		/* %ecx = %edx & 0x7f7f7f7f */
1054	addl	$4, %eax		/* next word */
1055	addl	$0x7f7f7f7f, %ecx	/* %ecx += 0x7f7f7f7f */
1056	orl	%edx, %ecx		/* %ecx |= %edx */
1057	andl	$0x80808080, %ecx	/* %ecx &= 0x80808080 */
1058	cmpl	$0x80808080, %ecx	/* if no null byte in this word */
1059	je	.word_aligned		/* goto .word_aligned */
1060	subl	$4, %eax		/* post-incremented */
1061.not_word_aligned:
1062	cmpb	$0, (%eax)		/* if a byte in (%eax) is null */
1063	je	.null_found		/* goto .null_found */
1064	incl	%eax			/* next byte */
1065	testl	$3, %eax		/* if %eax not word aligned */
1066	jnz	.not_word_aligned	/* goto .not_word_aligned */
1067	jmp	.word_aligned		/* goto .word_aligned */
1068	.align	4
1069.null_found:
1070	subl	4(%esp), %eax		/* %eax -= string address */
1071	ret
1072	SET_SIZE(strlen)
1073
1074#endif	/* __i386 */
1075
1076#ifdef DEBUG
1077	.text
1078.str_panic_msg:
1079	.string "strlen: argument below kernelbase"
1080#endif /* DEBUG */
1081
1082#endif	/* __lint */
1083
1084	/*
1085	 * Berkeley 4.3 introduced symbolically named interrupt levels
1086	 * as a way deal with priority in a machine independent fashion.
1087	 * Numbered priorities are machine specific, and should be
1088	 * discouraged where possible.
1089	 *
1090	 * Note, for the machine specific priorities there are
1091	 * examples listed for devices that use a particular priority.
1092	 * It should not be construed that all devices of that
1093	 * type should be at that priority.  It is currently were
1094	 * the current devices fit into the priority scheme based
1095	 * upon time criticalness.
1096	 *
1097	 * The underlying assumption of these assignments is that
1098	 * IPL 10 is the highest level from which a device
1099	 * routine can call wakeup.  Devices that interrupt from higher
1100	 * levels are restricted in what they can do.  If they need
1101	 * kernels services they should schedule a routine at a lower
1102	 * level (via software interrupt) to do the required
1103	 * processing.
1104	 *
1105	 * Examples of this higher usage:
1106	 *	Level	Usage
1107	 *	14	Profiling clock (and PROM uart polling clock)
1108	 *	12	Serial ports
1109	 *
1110	 * The serial ports request lower level processing on level 6.
1111	 *
1112	 * Also, almost all splN routines (where N is a number or a
1113	 * mnemonic) will do a RAISE(), on the assumption that they are
1114	 * never used to lower our priority.
1115	 * The exceptions are:
1116	 *	spl8()		Because you can't be above 15 to begin with!
1117	 *	splzs()		Because this is used at boot time to lower our
1118	 *			priority, to allow the PROM to poll the uart.
1119	 *	spl0()		Used to lower priority to 0.
1120	 */
1121
1122#if defined(__lint)
1123
1124int spl0(void)		{ return (0); }
1125int spl6(void)		{ return (0); }
1126int spl7(void)		{ return (0); }
1127int spl8(void)		{ return (0); }
1128int splhigh(void)	{ return (0); }
1129int splhi(void)		{ return (0); }
1130int splzs(void)		{ return (0); }
1131
1132/* ARGSUSED */
1133void
1134splx(int level)
1135{}
1136
1137#else	/* __lint */
1138
1139#if defined(__amd64)
1140
1141#define	SETPRI(level) \
1142	movl	$/**/level, %edi;	/* new priority */		\
1143	jmp	do_splx			/* redirect to do_splx */
1144
1145#define	RAISE(level) \
1146	movl	$/**/level, %edi;	/* new priority */		\
1147	jmp	splr			/* redirect to splr */
1148
1149#elif defined(__i386)
1150
1151#define	SETPRI(level) \
1152	pushl	$/**/level;	/* new priority */			\
1153	call	do_splx;	/* invoke common splx code */		\
1154	addl	$4, %esp;	/* unstack arg */			\
1155	ret
1156
1157#define	RAISE(level) \
1158	pushl	$/**/level;	/* new priority */			\
1159	call	splr;		/* invoke common splr code */		\
1160	addl	$4, %esp;	/* unstack args */			\
1161	ret
1162
1163#endif	/* __i386 */
1164
1165	/* locks out all interrupts, including memory errors */
1166	ENTRY(spl8)
1167	SETPRI(15)
1168	SET_SIZE(spl8)
1169
1170	/* just below the level that profiling runs */
1171	ENTRY(spl7)
1172	RAISE(13)
1173	SET_SIZE(spl7)
1174
1175	/* sun specific - highest priority onboard serial i/o asy ports */
1176	ENTRY(splzs)
1177	SETPRI(12)	/* Can't be a RAISE, as it's used to lower us */
1178	SET_SIZE(splzs)
1179
1180	ENTRY(splhi)
1181	ALTENTRY(splhigh)
1182	ALTENTRY(spl6)
1183	ALTENTRY(i_ddi_splhigh)
1184
1185	RAISE(DISP_LEVEL)
1186
1187	SET_SIZE(i_ddi_splhigh)
1188	SET_SIZE(spl6)
1189	SET_SIZE(splhigh)
1190	SET_SIZE(splhi)
1191
1192	/* allow all interrupts */
1193	ENTRY(spl0)
1194	SETPRI(0)
1195	SET_SIZE(spl0)
1196
1197
1198	/* splx implementation */
1199	ENTRY(splx)
1200	jmp	do_splx		/* redirect to common splx code */
1201	SET_SIZE(splx)
1202
1203#endif	/* __lint */
1204
1205#if defined(__i386)
1206
1207/*
1208 * Read and write the %gs register
1209 */
1210
1211#if defined(__lint)
1212
1213/*ARGSUSED*/
1214uint16_t
1215getgs(void)
1216{ return (0); }
1217
1218/*ARGSUSED*/
1219void
1220setgs(uint16_t sel)
1221{}
1222
1223#else	/* __lint */
1224
1225	ENTRY(getgs)
1226	clr	%eax
1227	movw	%gs, %ax
1228	ret
1229	SET_SIZE(getgs)
1230
1231	ENTRY(setgs)
1232	movw	4(%esp), %gs
1233	ret
1234	SET_SIZE(setgs)
1235
1236#endif	/* __lint */
1237#endif	/* __i386 */
1238
1239#if defined(__lint)
1240
1241void
1242pc_reset(void)
1243{}
1244
1245void
1246efi_reset(void)
1247{}
1248
1249#else	/* __lint */
1250
1251	ENTRY(wait_500ms)
1252	push	%ebx
1253	movl	$50000, %ebx
12541:
1255	call	tenmicrosec
1256	decl	%ebx
1257	jnz	1b
1258	pop	%ebx
1259	ret
1260	SET_SIZE(wait_500ms)
1261
1262#define	RESET_METHOD_KBC	1
1263#define	RESET_METHOD_PORT92	2
1264#define RESET_METHOD_PCI	4
1265
1266	DGDEF3(pc_reset_methods, 4, 8)
1267	.long RESET_METHOD_KBC|RESET_METHOD_PORT92|RESET_METHOD_PCI;
1268
1269	ENTRY(pc_reset)
1270
1271#if defined(__i386)
1272	testl	$RESET_METHOD_KBC, pc_reset_methods
1273#elif defined(__amd64)
1274	testl	$RESET_METHOD_KBC, pc_reset_methods(%rip)
1275#endif
1276	jz	1f
1277
1278	/
1279	/ Try the classic keyboard controller-triggered reset.
1280	/
1281	movw	$0x64, %dx
1282	movb	$0xfe, %al
1283	outb	(%dx)
1284
1285	/ Wait up to 500 milliseconds here for the keyboard controller
1286	/ to pull the reset line.  On some systems where the keyboard
1287	/ controller is slow to pull the reset line, the next reset method
1288	/ may be executed (which may be bad if those systems hang when the
1289	/ next reset method is used, e.g. Ferrari 3400 (doesn't like port 92),
1290	/ and Ferrari 4000 (doesn't like the cf9 reset method))
1291
1292	call	wait_500ms
1293
12941:
1295#if defined(__i386)
1296	testl	$RESET_METHOD_PORT92, pc_reset_methods
1297#elif defined(__amd64)
1298	testl	$RESET_METHOD_PORT92, pc_reset_methods(%rip)
1299#endif
1300	jz	3f
1301
1302	/
1303	/ Try port 0x92 fast reset
1304	/
1305	movw	$0x92, %dx
1306	inb	(%dx)
1307	cmpb	$0xff, %al	/ If port's not there, we should get back 0xFF
1308	je	1f
1309	testb	$1, %al		/ If bit 0
1310	jz	2f		/ is clear, jump to perform the reset
1311	andb	$0xfe, %al	/ otherwise,
1312	outb	(%dx)		/ clear bit 0 first, then
13132:
1314	orb	$1, %al		/ Set bit 0
1315	outb	(%dx)		/ and reset the system
13161:
1317
1318	call	wait_500ms
1319
13203:
1321#if defined(__i386)
1322	testl	$RESET_METHOD_PCI, pc_reset_methods
1323#elif defined(__amd64)
1324	testl	$RESET_METHOD_PCI, pc_reset_methods(%rip)
1325#endif
1326	jz	4f
1327
1328	/ Try the PCI (soft) reset vector (should work on all modern systems,
1329	/ but has been shown to cause problems on 450NX systems, and some newer
1330	/ systems (e.g. ATI IXP400-equipped systems))
1331	/ When resetting via this method, 2 writes are required.  The first
1332	/ targets bit 1 (0=hard reset without power cycle, 1=hard reset with
1333	/ power cycle).
1334	/ The reset occurs on the second write, during bit 2's transition from
1335	/ 0->1.
1336	movw	$0xcf9, %dx
1337	movb	$0x2, %al	/ Reset mode = hard, no power cycle
1338	outb	(%dx)
1339	movb	$0x6, %al
1340	outb	(%dx)
1341
1342	call	wait_500ms
1343
13444:
1345	/
1346	/ port 0xcf9 failed also.  Last-ditch effort is to
1347	/ triple-fault the CPU.
1348	/ Also, use triple fault for EFI firmware
1349	/
1350	ENTRY(efi_reset)
1351#if defined(__amd64)
1352	pushq	$0x0
1353	pushq	$0x0		/ IDT base of 0, limit of 0 + 2 unused bytes
1354	lidt	(%rsp)
1355#elif defined(__i386)
1356	pushl	$0x0
1357	pushl	$0x0		/ IDT base of 0, limit of 0 + 2 unused bytes
1358	lidt	(%esp)
1359#endif
1360	int	$0x0		/ Trigger interrupt, generate triple-fault
1361
1362	cli
1363	hlt			/ Wait forever
1364	/*NOTREACHED*/
1365	SET_SIZE(efi_reset)
1366	SET_SIZE(pc_reset)
1367
1368#endif	/* __lint */
1369
1370/*
1371 * C callable in and out routines
1372 */
1373
1374#if defined(__lint)
1375
1376/* ARGSUSED */
1377void
1378outl(int port_address, uint32_t val)
1379{}
1380
1381#else	/* __lint */
1382
1383#if defined(__amd64)
1384
1385	ENTRY(outl)
1386	movw	%di, %dx
1387	movl	%esi, %eax
1388	outl	(%dx)
1389	ret
1390	SET_SIZE(outl)
1391
1392#elif defined(__i386)
1393
1394	.set	PORT, 4
1395	.set	VAL, 8
1396
1397	ENTRY(outl)
1398	movw	PORT(%esp), %dx
1399	movl	VAL(%esp), %eax
1400	outl	(%dx)
1401	ret
1402	SET_SIZE(outl)
1403
1404#endif	/* __i386 */
1405#endif	/* __lint */
1406
1407#if defined(__lint)
1408
1409/* ARGSUSED */
1410void
1411outw(int port_address, uint16_t val)
1412{}
1413
1414#else	/* __lint */
1415
1416#if defined(__amd64)
1417
1418	ENTRY(outw)
1419	movw	%di, %dx
1420	movw	%si, %ax
1421	D16 outl (%dx)		/* XX64 why not outw? */
1422	ret
1423	SET_SIZE(outw)
1424
1425#elif defined(__i386)
1426
1427	ENTRY(outw)
1428	movw	PORT(%esp), %dx
1429	movw	VAL(%esp), %ax
1430	D16 outl (%dx)
1431	ret
1432	SET_SIZE(outw)
1433
1434#endif	/* __i386 */
1435#endif	/* __lint */
1436
1437#if defined(__lint)
1438
1439/* ARGSUSED */
1440void
1441outb(int port_address, uint8_t val)
1442{}
1443
1444#else	/* __lint */
1445
1446#if defined(__amd64)
1447
1448	ENTRY(outb)
1449	movw	%di, %dx
1450	movb	%sil, %al
1451	outb	(%dx)
1452	ret
1453	SET_SIZE(outb)
1454
1455#elif defined(__i386)
1456
1457	ENTRY(outb)
1458	movw	PORT(%esp), %dx
1459	movb	VAL(%esp), %al
1460	outb	(%dx)
1461	ret
1462	SET_SIZE(outb)
1463
1464#endif	/* __i386 */
1465#endif	/* __lint */
1466
1467#if defined(__lint)
1468
1469/* ARGSUSED */
1470uint32_t
1471inl(int port_address)
1472{ return (0); }
1473
1474#else	/* __lint */
1475
1476#if defined(__amd64)
1477
1478	ENTRY(inl)
1479	xorl	%eax, %eax
1480	movw	%di, %dx
1481	inl	(%dx)
1482	ret
1483	SET_SIZE(inl)
1484
1485#elif defined(__i386)
1486
1487	ENTRY(inl)
1488	movw	PORT(%esp), %dx
1489	inl	(%dx)
1490	ret
1491	SET_SIZE(inl)
1492
1493#endif	/* __i386 */
1494#endif	/* __lint */
1495
1496#if defined(__lint)
1497
1498/* ARGSUSED */
1499uint16_t
1500inw(int port_address)
1501{ return (0); }
1502
1503#else	/* __lint */
1504
1505#if defined(__amd64)
1506
1507	ENTRY(inw)
1508	xorl	%eax, %eax
1509	movw	%di, %dx
1510	D16 inl	(%dx)
1511	ret
1512	SET_SIZE(inw)
1513
1514#elif defined(__i386)
1515
1516	ENTRY(inw)
1517	subl	%eax, %eax
1518	movw	PORT(%esp), %dx
1519	D16 inl	(%dx)
1520	ret
1521	SET_SIZE(inw)
1522
1523#endif	/* __i386 */
1524#endif	/* __lint */
1525
1526
1527#if defined(__lint)
1528
1529/* ARGSUSED */
1530uint8_t
1531inb(int port_address)
1532{ return (0); }
1533
1534#else	/* __lint */
1535
1536#if defined(__amd64)
1537
1538	ENTRY(inb)
1539	xorl	%eax, %eax
1540	movw	%di, %dx
1541	inb	(%dx)
1542	ret
1543	SET_SIZE(inb)
1544
1545#elif defined(__i386)
1546
1547	ENTRY(inb)
1548	subl    %eax, %eax
1549	movw	PORT(%esp), %dx
1550	inb	(%dx)
1551	ret
1552	SET_SIZE(inb)
1553
1554#endif	/* __i386 */
1555#endif	/* __lint */
1556
1557
1558#if defined(__lint)
1559
1560/* ARGSUSED */
1561void
1562repoutsw(int port, uint16_t *addr, int cnt)
1563{}
1564
1565#else	/* __lint */
1566
1567#if defined(__amd64)
1568
1569	ENTRY(repoutsw)
1570	movl	%edx, %ecx
1571	movw	%di, %dx
1572	rep
1573	  D16 outsl
1574	ret
1575	SET_SIZE(repoutsw)
1576
1577#elif defined(__i386)
1578
1579	/*
1580	 * The arguments and saved registers are on the stack in the
1581	 *  following order:
1582	 *      |  cnt  |  +16
1583	 *      | *addr |  +12
1584	 *      | port  |  +8
1585	 *      |  eip  |  +4
1586	 *      |  esi  |  <-- %esp
1587	 * If additional values are pushed onto the stack, make sure
1588	 * to adjust the following constants accordingly.
1589	 */
1590	.set	PORT, 8
1591	.set	ADDR, 12
1592	.set	COUNT, 16
1593
1594	ENTRY(repoutsw)
1595	pushl	%esi
1596	movl	PORT(%esp), %edx
1597	movl	ADDR(%esp), %esi
1598	movl	COUNT(%esp), %ecx
1599	rep
1600	  D16 outsl
1601	popl	%esi
1602	ret
1603	SET_SIZE(repoutsw)
1604
1605#endif	/* __i386 */
1606#endif	/* __lint */
1607
1608
1609#if defined(__lint)
1610
1611/* ARGSUSED */
1612void
1613repinsw(int port_addr, uint16_t *addr, int cnt)
1614{}
1615
1616#else	/* __lint */
1617
1618#if defined(__amd64)
1619
1620	ENTRY(repinsw)
1621	movl	%edx, %ecx
1622	movw	%di, %dx
1623	rep
1624	  D16 insl
1625	ret
1626	SET_SIZE(repinsw)
1627
1628#elif defined(__i386)
1629
1630	ENTRY(repinsw)
1631	pushl	%edi
1632	movl	PORT(%esp), %edx
1633	movl	ADDR(%esp), %edi
1634	movl	COUNT(%esp), %ecx
1635	rep
1636	  D16 insl
1637	popl	%edi
1638	ret
1639	SET_SIZE(repinsw)
1640
1641#endif	/* __i386 */
1642#endif	/* __lint */
1643
1644
1645#if defined(__lint)
1646
1647/* ARGSUSED */
1648void
1649repinsb(int port, uint8_t *addr, int count)
1650{}
1651
1652#else	/* __lint */
1653
1654#if defined(__amd64)
1655
1656	ENTRY(repinsb)
1657	movl	%edx, %ecx
1658	movw	%di, %dx
1659	movq	%rsi, %rdi
1660	rep
1661	  insb
1662	ret
1663	SET_SIZE(repinsb)
1664
1665#elif defined(__i386)
1666
1667	/*
1668	 * The arguments and saved registers are on the stack in the
1669	 *  following order:
1670	 *      |  cnt  |  +16
1671	 *      | *addr |  +12
1672	 *      | port  |  +8
1673	 *      |  eip  |  +4
1674	 *      |  esi  |  <-- %esp
1675	 * If additional values are pushed onto the stack, make sure
1676	 * to adjust the following constants accordingly.
1677	 */
1678	.set	IO_PORT, 8
1679	.set	IO_ADDR, 12
1680	.set	IO_COUNT, 16
1681
1682	ENTRY(repinsb)
1683	pushl	%edi
1684	movl	IO_ADDR(%esp), %edi
1685	movl	IO_COUNT(%esp), %ecx
1686	movl	IO_PORT(%esp), %edx
1687	rep
1688	  insb
1689	popl	%edi
1690	ret
1691	SET_SIZE(repinsb)
1692
1693#endif	/* __i386 */
1694#endif	/* __lint */
1695
1696
1697/*
1698 * Input a stream of 32-bit words.
1699 * NOTE: count is a DWORD count.
1700 */
1701#if defined(__lint)
1702
1703/* ARGSUSED */
1704void
1705repinsd(int port, uint32_t *addr, int count)
1706{}
1707
1708#else	/* __lint */
1709
1710#if defined(__amd64)
1711
1712	ENTRY(repinsd)
1713	movl	%edx, %ecx
1714	movw	%di, %dx
1715	movq	%rsi, %rdi
1716	rep
1717	  insl
1718	ret
1719	SET_SIZE(repinsd)
1720
1721#elif defined(__i386)
1722
1723	ENTRY(repinsd)
1724	pushl	%edi
1725	movl	IO_ADDR(%esp), %edi
1726	movl	IO_COUNT(%esp), %ecx
1727	movl	IO_PORT(%esp), %edx
1728	rep
1729	  insl
1730	popl	%edi
1731	ret
1732	SET_SIZE(repinsd)
1733
1734#endif	/* __i386 */
1735#endif	/* __lint */
1736
1737/*
1738 * Output a stream of bytes
1739 * NOTE: count is a byte count
1740 */
1741#if defined(__lint)
1742
1743/* ARGSUSED */
1744void
1745repoutsb(int port, uint8_t *addr, int count)
1746{}
1747
1748#else	/* __lint */
1749
1750#if defined(__amd64)
1751
1752	ENTRY(repoutsb)
1753	movl	%edx, %ecx
1754	movw	%di, %dx
1755	rep
1756	  outsb
1757	ret
1758	SET_SIZE(repoutsb)
1759
1760#elif defined(__i386)
1761
1762	ENTRY(repoutsb)
1763	pushl	%esi
1764	movl	IO_ADDR(%esp), %esi
1765	movl	IO_COUNT(%esp), %ecx
1766	movl	IO_PORT(%esp), %edx
1767	rep
1768	  outsb
1769	popl	%esi
1770	ret
1771	SET_SIZE(repoutsb)
1772
1773#endif	/* __i386 */
1774#endif	/* __lint */
1775
1776/*
1777 * Output a stream of 32-bit words
1778 * NOTE: count is a DWORD count
1779 */
1780#if defined(__lint)
1781
1782/* ARGSUSED */
1783void
1784repoutsd(int port, uint32_t *addr, int count)
1785{}
1786
1787#else	/* __lint */
1788
1789#if defined(__amd64)
1790
1791	ENTRY(repoutsd)
1792	movl	%edx, %ecx
1793	movw	%di, %dx
1794	rep
1795	  outsl
1796	ret
1797	SET_SIZE(repoutsd)
1798
1799#elif defined(__i386)
1800
1801	ENTRY(repoutsd)
1802	pushl	%esi
1803	movl	IO_ADDR(%esp), %esi
1804	movl	IO_COUNT(%esp), %ecx
1805	movl	IO_PORT(%esp), %edx
1806	rep
1807	  outsl
1808	popl	%esi
1809	ret
1810	SET_SIZE(repoutsd)
1811
1812#endif	/* __i386 */
1813#endif	/* __lint */
1814
1815/*
1816 * void int3(void)
1817 * void int18(void)
1818 * void int20(void)
1819 * void int_cmci(void)
1820 */
1821
1822#if defined(__lint)
1823
1824void
1825int3(void)
1826{}
1827
1828void
1829int18(void)
1830{}
1831
1832void
1833int20(void)
1834{}
1835
1836void
1837int_cmci(void)
1838{}
1839
1840#else	/* __lint */
1841
1842	ENTRY(int3)
1843	int	$T_BPTFLT
1844	ret
1845	SET_SIZE(int3)
1846
1847	ENTRY(int18)
1848	int	$T_MCE
1849	ret
1850	SET_SIZE(int18)
1851
1852	ENTRY(int20)
1853	movl	boothowto, %eax
1854	andl	$RB_DEBUG, %eax
1855	jz	1f
1856
1857	int	$T_DBGENTR
18581:
1859	rep;	ret	/* use 2 byte return instruction when branch target */
1860			/* AMD Software Optimization Guide - Section 6.2 */
1861	SET_SIZE(int20)
1862
1863	ENTRY(int_cmci)
1864	int	$T_ENOEXTFLT
1865	ret
1866	SET_SIZE(int_cmci)
1867
1868#endif	/* __lint */
1869
1870#if defined(__lint)
1871
1872/* ARGSUSED */
1873int
1874scanc(size_t size, uchar_t *cp, uchar_t *table, uchar_t mask)
1875{ return (0); }
1876
1877#else	/* __lint */
1878
1879#if defined(__amd64)
1880
1881	ENTRY(scanc)
1882					/* rdi == size */
1883					/* rsi == cp */
1884					/* rdx == table */
1885					/* rcx == mask */
1886	addq	%rsi, %rdi		/* end = &cp[size] */
1887.scanloop:
1888	cmpq	%rdi, %rsi		/* while (cp < end */
1889	jnb	.scandone
1890	movzbq	(%rsi), %r8		/* %r8 = *cp */
1891	incq	%rsi			/* cp++ */
1892	testb	%cl, (%r8, %rdx)
1893	jz	.scanloop		/*  && (table[*cp] & mask) == 0) */
1894	decq	%rsi			/* (fix post-increment) */
1895.scandone:
1896	movl	%edi, %eax
1897	subl	%esi, %eax		/* return (end - cp) */
1898	ret
1899	SET_SIZE(scanc)
1900
1901#elif defined(__i386)
1902
1903	ENTRY(scanc)
1904	pushl	%edi
1905	pushl	%esi
1906	movb	24(%esp), %cl		/* mask = %cl */
1907	movl	16(%esp), %esi		/* cp = %esi */
1908	movl	20(%esp), %edx		/* table = %edx */
1909	movl	%esi, %edi
1910	addl	12(%esp), %edi		/* end = &cp[size]; */
1911.scanloop:
1912	cmpl	%edi, %esi		/* while (cp < end */
1913	jnb	.scandone
1914	movzbl	(%esi),  %eax		/* %al = *cp */
1915	incl	%esi			/* cp++ */
1916	movb	(%edx,  %eax), %al	/* %al = table[*cp] */
1917	testb	%al, %cl
1918	jz	.scanloop		/*   && (table[*cp] & mask) == 0) */
1919	dec	%esi			/* post-incremented */
1920.scandone:
1921	movl	%edi, %eax
1922	subl	%esi, %eax		/* return (end - cp) */
1923	popl	%esi
1924	popl	%edi
1925	ret
1926	SET_SIZE(scanc)
1927
1928#endif	/* __i386 */
1929#endif	/* __lint */
1930
1931/*
1932 * Replacement functions for ones that are normally inlined.
1933 * In addition to the copy in i86.il, they are defined here just in case.
1934 */
1935
1936#if defined(__lint)
1937
1938ulong_t
1939intr_clear(void)
1940{ return (0); }
1941
1942ulong_t
1943clear_int_flag(void)
1944{ return (0); }
1945
1946#else	/* __lint */
1947
1948#if defined(__amd64)
1949
1950	ENTRY(intr_clear)
1951	ENTRY(clear_int_flag)
1952	pushfq
1953	popq	%rax
1954#if defined(__xpv)
1955	leaq	xpv_panicking, %rdi
1956	movl	(%rdi), %edi
1957	cmpl	$0, %edi
1958	jne	2f
1959	CLIRET(%rdi, %dl)	/* returns event mask in %dl */
1960	/*
1961	 * Synthesize the PS_IE bit from the event mask bit
1962	 */
1963	andq    $_BITNOT(PS_IE), %rax
1964	testb	$1, %dl
1965	jnz	1f
1966	orq	$PS_IE, %rax
19671:
1968	ret
19692:
1970#endif
1971	CLI(%rdi)
1972	ret
1973	SET_SIZE(clear_int_flag)
1974	SET_SIZE(intr_clear)
1975
1976#elif defined(__i386)
1977
1978	ENTRY(intr_clear)
1979	ENTRY(clear_int_flag)
1980	pushfl
1981	popl	%eax
1982#if defined(__xpv)
1983	leal	xpv_panicking, %edx
1984	movl	(%edx), %edx
1985	cmpl	$0, %edx
1986	jne	2f
1987	CLIRET(%edx, %cl)	/* returns event mask in %cl */
1988	/*
1989	 * Synthesize the PS_IE bit from the event mask bit
1990	 */
1991	andl    $_BITNOT(PS_IE), %eax
1992	testb	$1, %cl
1993	jnz	1f
1994	orl	$PS_IE, %eax
19951:
1996	ret
19972:
1998#endif
1999	CLI(%edx)
2000	ret
2001	SET_SIZE(clear_int_flag)
2002	SET_SIZE(intr_clear)
2003
2004#endif	/* __i386 */
2005#endif	/* __lint */
2006
2007#if defined(__lint)
2008
2009struct cpu *
2010curcpup(void)
2011{ return 0; }
2012
2013#else	/* __lint */
2014
2015#if defined(__amd64)
2016
2017	ENTRY(curcpup)
2018	movq	%gs:CPU_SELF, %rax
2019	ret
2020	SET_SIZE(curcpup)
2021
2022#elif defined(__i386)
2023
2024	ENTRY(curcpup)
2025	movl	%gs:CPU_SELF, %eax
2026	ret
2027	SET_SIZE(curcpup)
2028
2029#endif	/* __i386 */
2030#endif	/* __lint */
2031
2032/* htonll(), ntohll(), htonl(), ntohl(), htons(), ntohs()
2033 * These functions reverse the byte order of the input parameter and returns
2034 * the result.  This is to convert the byte order from host byte order
2035 * (little endian) to network byte order (big endian), or vice versa.
2036 */
2037
2038#if defined(__lint)
2039
2040uint64_t
2041htonll(uint64_t i)
2042{ return (i); }
2043
2044uint64_t
2045ntohll(uint64_t i)
2046{ return (i); }
2047
2048uint32_t
2049htonl(uint32_t i)
2050{ return (i); }
2051
2052uint32_t
2053ntohl(uint32_t i)
2054{ return (i); }
2055
2056uint16_t
2057htons(uint16_t i)
2058{ return (i); }
2059
2060uint16_t
2061ntohs(uint16_t i)
2062{ return (i); }
2063
2064#else	/* __lint */
2065
2066#if defined(__amd64)
2067
2068	ENTRY(htonll)
2069	ALTENTRY(ntohll)
2070	movq	%rdi, %rax
2071	bswapq	%rax
2072	ret
2073	SET_SIZE(ntohll)
2074	SET_SIZE(htonll)
2075
2076	/* XX64 there must be shorter sequences for this */
2077	ENTRY(htonl)
2078	ALTENTRY(ntohl)
2079	movl	%edi, %eax
2080	bswap	%eax
2081	ret
2082	SET_SIZE(ntohl)
2083	SET_SIZE(htonl)
2084
2085	/* XX64 there must be better sequences for this */
2086	ENTRY(htons)
2087	ALTENTRY(ntohs)
2088	movl	%edi, %eax
2089	bswap	%eax
2090	shrl	$16, %eax
2091	ret
2092	SET_SIZE(ntohs)
2093	SET_SIZE(htons)
2094
2095#elif defined(__i386)
2096
2097	ENTRY(htonll)
2098	ALTENTRY(ntohll)
2099	movl	4(%esp), %edx
2100	movl	8(%esp), %eax
2101	bswap	%edx
2102	bswap	%eax
2103	ret
2104	SET_SIZE(ntohll)
2105	SET_SIZE(htonll)
2106
2107	ENTRY(htonl)
2108	ALTENTRY(ntohl)
2109	movl	4(%esp), %eax
2110	bswap	%eax
2111	ret
2112	SET_SIZE(ntohl)
2113	SET_SIZE(htonl)
2114
2115	ENTRY(htons)
2116	ALTENTRY(ntohs)
2117	movl	4(%esp), %eax
2118	bswap	%eax
2119	shrl	$16, %eax
2120	ret
2121	SET_SIZE(ntohs)
2122	SET_SIZE(htons)
2123
2124#endif	/* __i386 */
2125#endif	/* __lint */
2126
2127
2128#if defined(__lint)
2129
2130/* ARGSUSED */
2131void
2132intr_restore(ulong_t i)
2133{ return; }
2134
2135/* ARGSUSED */
2136void
2137restore_int_flag(ulong_t i)
2138{ return; }
2139
2140#else	/* __lint */
2141
2142#if defined(__amd64)
2143
2144	ENTRY(intr_restore)
2145	ENTRY(restore_int_flag)
2146	testq	$PS_IE, %rdi
2147	jz	1f
2148#if defined(__xpv)
2149	leaq	xpv_panicking, %rsi
2150	movl	(%rsi), %esi
2151	cmpl	$0, %esi
2152	jne	1f
2153	/*
2154	 * Since we're -really- running unprivileged, our attempt
2155	 * to change the state of the IF bit will be ignored.
2156	 * The virtual IF bit is tweaked by CLI and STI.
2157	 */
2158	IE_TO_EVENT_MASK(%rsi, %rdi)
2159#else
2160	sti
2161#endif
21621:
2163	ret
2164	SET_SIZE(restore_int_flag)
2165	SET_SIZE(intr_restore)
2166
2167#elif defined(__i386)
2168
2169	ENTRY(intr_restore)
2170	ENTRY(restore_int_flag)
2171	testl	$PS_IE, 4(%esp)
2172	jz	1f
2173#if defined(__xpv)
2174	leal	xpv_panicking, %edx
2175	movl	(%edx), %edx
2176	cmpl	$0, %edx
2177	jne	1f
2178	/*
2179	 * Since we're -really- running unprivileged, our attempt
2180	 * to change the state of the IF bit will be ignored.
2181	 * The virtual IF bit is tweaked by CLI and STI.
2182	 */
2183	IE_TO_EVENT_MASK(%edx, 4(%esp))
2184#else
2185	sti
2186#endif
21871:
2188	ret
2189	SET_SIZE(restore_int_flag)
2190	SET_SIZE(intr_restore)
2191
2192#endif	/* __i386 */
2193#endif	/* __lint */
2194
2195#if defined(__lint)
2196
2197void
2198sti(void)
2199{}
2200
2201void
2202cli(void)
2203{}
2204
2205#else	/* __lint */
2206
2207	ENTRY(sti)
2208	STI
2209	ret
2210	SET_SIZE(sti)
2211
2212	ENTRY(cli)
2213#if defined(__amd64)
2214	CLI(%rax)
2215#elif defined(__i386)
2216	CLI(%eax)
2217#endif	/* __i386 */
2218	ret
2219	SET_SIZE(cli)
2220
2221#endif	/* __lint */
2222
2223#if defined(__lint)
2224
2225dtrace_icookie_t
2226dtrace_interrupt_disable(void)
2227{ return (0); }
2228
2229#else   /* __lint */
2230
2231#if defined(__amd64)
2232
2233	ENTRY(dtrace_interrupt_disable)
2234	pushfq
2235	popq	%rax
2236#if defined(__xpv)
2237	leaq	xpv_panicking, %rdi
2238	movl	(%rdi), %edi
2239	cmpl	$0, %edi
2240	jne	.dtrace_interrupt_disable_done
2241	CLIRET(%rdi, %dl)	/* returns event mask in %dl */
2242	/*
2243	 * Synthesize the PS_IE bit from the event mask bit
2244	 */
2245	andq    $_BITNOT(PS_IE), %rax
2246	testb	$1, %dl
2247	jnz	.dtrace_interrupt_disable_done
2248	orq	$PS_IE, %rax
2249#else
2250	CLI(%rdx)
2251#endif
2252.dtrace_interrupt_disable_done:
2253	ret
2254	SET_SIZE(dtrace_interrupt_disable)
2255
2256#elif defined(__i386)
2257
2258	ENTRY(dtrace_interrupt_disable)
2259	pushfl
2260	popl	%eax
2261#if defined(__xpv)
2262	leal	xpv_panicking, %edx
2263	movl	(%edx), %edx
2264	cmpl	$0, %edx
2265	jne	.dtrace_interrupt_disable_done
2266	CLIRET(%edx, %cl)	/* returns event mask in %cl */
2267	/*
2268	 * Synthesize the PS_IE bit from the event mask bit
2269	 */
2270	andl    $_BITNOT(PS_IE), %eax
2271	testb	$1, %cl
2272	jnz	.dtrace_interrupt_disable_done
2273	orl	$PS_IE, %eax
2274#else
2275	CLI(%edx)
2276#endif
2277.dtrace_interrupt_disable_done:
2278	ret
2279	SET_SIZE(dtrace_interrupt_disable)
2280
2281#endif	/* __i386 */
2282#endif	/* __lint */
2283
2284#if defined(__lint)
2285
2286/*ARGSUSED*/
2287void
2288dtrace_interrupt_enable(dtrace_icookie_t cookie)
2289{}
2290
2291#else	/* __lint */
2292
2293#if defined(__amd64)
2294
2295	ENTRY(dtrace_interrupt_enable)
2296	pushq	%rdi
2297	popfq
2298#if defined(__xpv)
2299	leaq	xpv_panicking, %rdx
2300	movl	(%rdx), %edx
2301	cmpl	$0, %edx
2302	jne	.dtrace_interrupt_enable_done
2303	/*
2304	 * Since we're -really- running unprivileged, our attempt
2305	 * to change the state of the IF bit will be ignored. The
2306	 * virtual IF bit is tweaked by CLI and STI.
2307	 */
2308	IE_TO_EVENT_MASK(%rdx, %rdi)
2309#endif
2310.dtrace_interrupt_enable_done:
2311	ret
2312	SET_SIZE(dtrace_interrupt_enable)
2313
2314#elif defined(__i386)
2315
2316	ENTRY(dtrace_interrupt_enable)
2317	movl	4(%esp), %eax
2318	pushl	%eax
2319	popfl
2320#if defined(__xpv)
2321	leal	xpv_panicking, %edx
2322	movl	(%edx), %edx
2323	cmpl	$0, %edx
2324	jne	.dtrace_interrupt_enable_done
2325	/*
2326	 * Since we're -really- running unprivileged, our attempt
2327	 * to change the state of the IF bit will be ignored. The
2328	 * virtual IF bit is tweaked by CLI and STI.
2329	 */
2330	IE_TO_EVENT_MASK(%edx, %eax)
2331#endif
2332.dtrace_interrupt_enable_done:
2333	ret
2334	SET_SIZE(dtrace_interrupt_enable)
2335
2336#endif	/* __i386 */
2337#endif	/* __lint */
2338
2339
2340#if defined(lint)
2341
2342void
2343dtrace_membar_producer(void)
2344{}
2345
2346void
2347dtrace_membar_consumer(void)
2348{}
2349
2350#else	/* __lint */
2351
2352	ENTRY(dtrace_membar_producer)
2353	rep;	ret	/* use 2 byte return instruction when branch target */
2354			/* AMD Software Optimization Guide - Section 6.2 */
2355	SET_SIZE(dtrace_membar_producer)
2356
2357	ENTRY(dtrace_membar_consumer)
2358	rep;	ret	/* use 2 byte return instruction when branch target */
2359			/* AMD Software Optimization Guide - Section 6.2 */
2360	SET_SIZE(dtrace_membar_consumer)
2361
2362#endif	/* __lint */
2363
2364#if defined(__lint)
2365
2366kthread_id_t
2367threadp(void)
2368{ return ((kthread_id_t)0); }
2369
2370#else	/* __lint */
2371
2372#if defined(__amd64)
2373
2374	ENTRY(threadp)
2375	movq	%gs:CPU_THREAD, %rax
2376	ret
2377	SET_SIZE(threadp)
2378
2379#elif defined(__i386)
2380
2381	ENTRY(threadp)
2382	movl	%gs:CPU_THREAD, %eax
2383	ret
2384	SET_SIZE(threadp)
2385
2386#endif	/* __i386 */
2387#endif	/* __lint */
2388
2389/*
2390 *   Checksum routine for Internet Protocol Headers
2391 */
2392
2393#if defined(__lint)
2394
2395/* ARGSUSED */
2396unsigned int
2397ip_ocsum(
2398	ushort_t *address,	/* ptr to 1st message buffer */
2399	int halfword_count,	/* length of data */
2400	unsigned int sum)	/* partial checksum */
2401{
2402	int		i;
2403	unsigned int	psum = 0;	/* partial sum */
2404
2405	for (i = 0; i < halfword_count; i++, address++) {
2406		psum += *address;
2407	}
2408
2409	while ((psum >> 16) != 0) {
2410		psum = (psum & 0xffff) + (psum >> 16);
2411	}
2412
2413	psum += sum;
2414
2415	while ((psum >> 16) != 0) {
2416		psum = (psum & 0xffff) + (psum >> 16);
2417	}
2418
2419	return (psum);
2420}
2421
2422#else	/* __lint */
2423
2424#if defined(__amd64)
2425
2426	ENTRY(ip_ocsum)
2427	pushq	%rbp
2428	movq	%rsp, %rbp
2429#ifdef DEBUG
2430	movq	postbootkernelbase(%rip), %rax
2431	cmpq	%rax, %rdi
2432	jnb	1f
2433	xorl	%eax, %eax
2434	movq	%rdi, %rsi
2435	leaq	.ip_ocsum_panic_msg(%rip), %rdi
2436	call	panic
2437	/*NOTREACHED*/
2438.ip_ocsum_panic_msg:
2439	.string	"ip_ocsum: address 0x%p below kernelbase\n"
24401:
2441#endif
2442	movl	%esi, %ecx	/* halfword_count */
2443	movq	%rdi, %rsi	/* address */
2444				/* partial sum in %edx */
2445	xorl	%eax, %eax
2446	testl	%ecx, %ecx
2447	jz	.ip_ocsum_done
2448	testq	$3, %rsi
2449	jnz	.ip_csum_notaligned
2450.ip_csum_aligned:	/* XX64 opportunities for 8-byte operations? */
2451.next_iter:
2452	/* XX64 opportunities for prefetch? */
2453	/* XX64 compute csum with 64 bit quantities? */
2454	subl	$32, %ecx
2455	jl	.less_than_32
2456
2457	addl	0(%rsi), %edx
2458.only60:
2459	adcl	4(%rsi), %eax
2460.only56:
2461	adcl	8(%rsi), %edx
2462.only52:
2463	adcl	12(%rsi), %eax
2464.only48:
2465	adcl	16(%rsi), %edx
2466.only44:
2467	adcl	20(%rsi), %eax
2468.only40:
2469	adcl	24(%rsi), %edx
2470.only36:
2471	adcl	28(%rsi), %eax
2472.only32:
2473	adcl	32(%rsi), %edx
2474.only28:
2475	adcl	36(%rsi), %eax
2476.only24:
2477	adcl	40(%rsi), %edx
2478.only20:
2479	adcl	44(%rsi), %eax
2480.only16:
2481	adcl	48(%rsi), %edx
2482.only12:
2483	adcl	52(%rsi), %eax
2484.only8:
2485	adcl	56(%rsi), %edx
2486.only4:
2487	adcl	60(%rsi), %eax	/* could be adding -1 and -1 with a carry */
2488.only0:
2489	adcl	$0, %eax	/* could be adding -1 in eax with a carry */
2490	adcl	$0, %eax
2491
2492	addq	$64, %rsi
2493	testl	%ecx, %ecx
2494	jnz	.next_iter
2495
2496.ip_ocsum_done:
2497	addl	%eax, %edx
2498	adcl	$0, %edx
2499	movl	%edx, %eax	/* form a 16 bit checksum by */
2500	shrl	$16, %eax	/* adding two halves of 32 bit checksum */
2501	addw	%dx, %ax
2502	adcw	$0, %ax
2503	andl	$0xffff, %eax
2504	leave
2505	ret
2506
2507.ip_csum_notaligned:
2508	xorl	%edi, %edi
2509	movw	(%rsi), %di
2510	addl	%edi, %edx
2511	adcl	$0, %edx
2512	addq	$2, %rsi
2513	decl	%ecx
2514	jmp	.ip_csum_aligned
2515
2516.less_than_32:
2517	addl	$32, %ecx
2518	testl	$1, %ecx
2519	jz	.size_aligned
2520	andl	$0xfe, %ecx
2521	movzwl	(%rsi, %rcx, 2), %edi
2522	addl	%edi, %edx
2523	adcl	$0, %edx
2524.size_aligned:
2525	movl	%ecx, %edi
2526	shrl	$1, %ecx
2527	shl	$1, %edi
2528	subq	$64, %rdi
2529	addq	%rdi, %rsi
2530	leaq    .ip_ocsum_jmptbl(%rip), %rdi
2531	leaq	(%rdi, %rcx, 8), %rdi
2532	xorl	%ecx, %ecx
2533	clc
2534	jmp 	*(%rdi)
2535
2536	.align	8
2537.ip_ocsum_jmptbl:
2538	.quad	.only0, .only4, .only8, .only12, .only16, .only20
2539	.quad	.only24, .only28, .only32, .only36, .only40, .only44
2540	.quad	.only48, .only52, .only56, .only60
2541	SET_SIZE(ip_ocsum)
2542
2543#elif defined(__i386)
2544
2545	ENTRY(ip_ocsum)
2546	pushl	%ebp
2547	movl	%esp, %ebp
2548	pushl	%ebx
2549	pushl	%esi
2550	pushl	%edi
2551	movl	12(%ebp), %ecx	/* count of half words */
2552	movl	16(%ebp), %edx	/* partial checksum */
2553	movl	8(%ebp), %esi
2554	xorl	%eax, %eax
2555	testl	%ecx, %ecx
2556	jz	.ip_ocsum_done
2557
2558	testl	$3, %esi
2559	jnz	.ip_csum_notaligned
2560.ip_csum_aligned:
2561.next_iter:
2562	subl	$32, %ecx
2563	jl	.less_than_32
2564
2565	addl	0(%esi), %edx
2566.only60:
2567	adcl	4(%esi), %eax
2568.only56:
2569	adcl	8(%esi), %edx
2570.only52:
2571	adcl	12(%esi), %eax
2572.only48:
2573	adcl	16(%esi), %edx
2574.only44:
2575	adcl	20(%esi), %eax
2576.only40:
2577	adcl	24(%esi), %edx
2578.only36:
2579	adcl	28(%esi), %eax
2580.only32:
2581	adcl	32(%esi), %edx
2582.only28:
2583	adcl	36(%esi), %eax
2584.only24:
2585	adcl	40(%esi), %edx
2586.only20:
2587	adcl	44(%esi), %eax
2588.only16:
2589	adcl	48(%esi), %edx
2590.only12:
2591	adcl	52(%esi), %eax
2592.only8:
2593	adcl	56(%esi), %edx
2594.only4:
2595	adcl	60(%esi), %eax	/* We could be adding -1 and -1 with a carry */
2596.only0:
2597	adcl	$0, %eax	/* we could be adding -1 in eax with a carry */
2598	adcl	$0, %eax
2599
2600	addl	$64, %esi
2601	andl	%ecx, %ecx
2602	jnz	.next_iter
2603
2604.ip_ocsum_done:
2605	addl	%eax, %edx
2606	adcl	$0, %edx
2607	movl	%edx, %eax	/* form a 16 bit checksum by */
2608	shrl	$16, %eax	/* adding two halves of 32 bit checksum */
2609	addw	%dx, %ax
2610	adcw	$0, %ax
2611	andl	$0xffff, %eax
2612	popl	%edi		/* restore registers */
2613	popl	%esi
2614	popl	%ebx
2615	leave
2616	ret
2617
2618.ip_csum_notaligned:
2619	xorl	%edi, %edi
2620	movw	(%esi), %di
2621	addl	%edi, %edx
2622	adcl	$0, %edx
2623	addl	$2, %esi
2624	decl	%ecx
2625	jmp	.ip_csum_aligned
2626
2627.less_than_32:
2628	addl	$32, %ecx
2629	testl	$1, %ecx
2630	jz	.size_aligned
2631	andl	$0xfe, %ecx
2632	movzwl	(%esi, %ecx, 2), %edi
2633	addl	%edi, %edx
2634	adcl	$0, %edx
2635.size_aligned:
2636	movl	%ecx, %edi
2637	shrl	$1, %ecx
2638	shl	$1, %edi
2639	subl	$64, %edi
2640	addl	%edi, %esi
2641	movl	$.ip_ocsum_jmptbl, %edi
2642	lea	(%edi, %ecx, 4), %edi
2643	xorl	%ecx, %ecx
2644	clc
2645	jmp 	*(%edi)
2646	SET_SIZE(ip_ocsum)
2647
2648	.data
2649	.align	4
2650
2651.ip_ocsum_jmptbl:
2652	.long	.only0, .only4, .only8, .only12, .only16, .only20
2653	.long	.only24, .only28, .only32, .only36, .only40, .only44
2654	.long	.only48, .only52, .only56, .only60
2655
2656
2657#endif	/* __i386 */
2658#endif	/* __lint */
2659
2660/*
2661 * multiply two long numbers and yield a u_longlong_t result, callable from C.
2662 * Provided to manipulate hrtime_t values.
2663 */
2664#if defined(__lint)
2665
2666/* result = a * b; */
2667
2668/* ARGSUSED */
2669unsigned long long
2670mul32(uint_t a, uint_t b)
2671{ return (0); }
2672
2673#else	/* __lint */
2674
2675#if defined(__amd64)
2676
2677	ENTRY(mul32)
2678	xorl	%edx, %edx	/* XX64 joe, paranoia? */
2679	movl	%edi, %eax
2680	mull	%esi
2681	shlq	$32, %rdx
2682	orq	%rdx, %rax
2683	ret
2684	SET_SIZE(mul32)
2685
2686#elif defined(__i386)
2687
2688	ENTRY(mul32)
2689	movl	8(%esp), %eax
2690	movl	4(%esp), %ecx
2691	mull	%ecx
2692	ret
2693	SET_SIZE(mul32)
2694
2695#endif	/* __i386 */
2696#endif	/* __lint */
2697
2698#if defined(notused)
2699#if defined(__lint)
2700/* ARGSUSED */
2701void
2702load_pte64(uint64_t *pte, uint64_t pte_value)
2703{}
2704#else	/* __lint */
2705	.globl load_pte64
2706load_pte64:
2707	movl	4(%esp), %eax
2708	movl	8(%esp), %ecx
2709	movl	12(%esp), %edx
2710	movl	%edx, 4(%eax)
2711	movl	%ecx, (%eax)
2712	ret
2713#endif	/* __lint */
2714#endif	/* notused */
2715
2716#if defined(__lint)
2717
2718/*ARGSUSED*/
2719void
2720scan_memory(caddr_t addr, size_t size)
2721{}
2722
2723#else	/* __lint */
2724
2725#if defined(__amd64)
2726
2727	ENTRY(scan_memory)
2728	shrq	$3, %rsi	/* convert %rsi from byte to quadword count */
2729	jz	.scanm_done
2730	movq	%rsi, %rcx	/* move count into rep control register */
2731	movq	%rdi, %rsi	/* move addr into lodsq control reg. */
2732	rep lodsq		/* scan the memory range */
2733.scanm_done:
2734	rep;	ret	/* use 2 byte return instruction when branch target */
2735			/* AMD Software Optimization Guide - Section 6.2 */
2736	SET_SIZE(scan_memory)
2737
2738#elif defined(__i386)
2739
2740	ENTRY(scan_memory)
2741	pushl	%ecx
2742	pushl	%esi
2743	movl	16(%esp), %ecx	/* move 2nd arg into rep control register */
2744	shrl	$2, %ecx	/* convert from byte count to word count */
2745	jz	.scanm_done
2746	movl	12(%esp), %esi	/* move 1st arg into lodsw control register */
2747	.byte	0xf3		/* rep prefix.  lame assembler.  sigh. */
2748	lodsl
2749.scanm_done:
2750	popl	%esi
2751	popl	%ecx
2752	ret
2753	SET_SIZE(scan_memory)
2754
2755#endif	/* __i386 */
2756#endif	/* __lint */
2757
2758
2759#if defined(__lint)
2760
2761/*ARGSUSED */
2762int
2763lowbit(ulong_t i)
2764{ return (0); }
2765
2766#else	/* __lint */
2767
2768#if defined(__amd64)
2769
2770	ENTRY(lowbit)
2771	movl	$-1, %eax
2772	bsfq	%rdi, %rax
2773	incl	%eax
2774	ret
2775	SET_SIZE(lowbit)
2776
2777#elif defined(__i386)
2778
2779	ENTRY(lowbit)
2780	movl	$-1, %eax
2781	bsfl	4(%esp), %eax
2782	incl	%eax
2783	ret
2784	SET_SIZE(lowbit)
2785
2786#endif	/* __i386 */
2787#endif	/* __lint */
2788
2789#if defined(__lint)
2790
2791/*ARGSUSED*/
2792int
2793highbit(ulong_t i)
2794{ return (0); }
2795
2796#else	/* __lint */
2797
2798#if defined(__amd64)
2799
2800	ENTRY(highbit)
2801	movl	$-1, %eax
2802	bsrq	%rdi, %rax
2803	incl	%eax
2804	ret
2805	SET_SIZE(highbit)
2806
2807#elif defined(__i386)
2808
2809	ENTRY(highbit)
2810	movl	$-1, %eax
2811	bsrl	4(%esp), %eax
2812	incl	%eax
2813	ret
2814	SET_SIZE(highbit)
2815
2816#endif	/* __i386 */
2817#endif	/* __lint */
2818
2819#if defined(__lint)
2820
2821/*ARGSUSED*/
2822uint64_t
2823rdmsr(uint_t r)
2824{ return (0); }
2825
2826/*ARGSUSED*/
2827void
2828wrmsr(uint_t r, const uint64_t val)
2829{}
2830
2831/*ARGSUSED*/
2832uint64_t
2833xrdmsr(uint_t r)
2834{ return (0); }
2835
2836/*ARGSUSED*/
2837void
2838xwrmsr(uint_t r, const uint64_t val)
2839{}
2840
2841void
2842invalidate_cache(void)
2843{}
2844
2845#else  /* __lint */
2846
2847#define	XMSR_ACCESS_VAL		$0x9c5a203a
2848
2849#if defined(__amd64)
2850
2851	ENTRY(rdmsr)
2852	movl	%edi, %ecx
2853	rdmsr
2854	shlq	$32, %rdx
2855	orq	%rdx, %rax
2856	ret
2857	SET_SIZE(rdmsr)
2858
2859	ENTRY(wrmsr)
2860	movq	%rsi, %rdx
2861	shrq	$32, %rdx
2862	movl	%esi, %eax
2863	movl	%edi, %ecx
2864	wrmsr
2865	ret
2866	SET_SIZE(wrmsr)
2867
2868	ENTRY(xrdmsr)
2869	pushq	%rbp
2870	movq	%rsp, %rbp
2871	movl	%edi, %ecx
2872	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2873	rdmsr
2874	shlq	$32, %rdx
2875	orq	%rdx, %rax
2876	leave
2877	ret
2878	SET_SIZE(xrdmsr)
2879
2880	ENTRY(xwrmsr)
2881	pushq	%rbp
2882	movq	%rsp, %rbp
2883	movl	%edi, %ecx
2884	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2885	movq	%rsi, %rdx
2886	shrq	$32, %rdx
2887	movl	%esi, %eax
2888	wrmsr
2889	leave
2890	ret
2891	SET_SIZE(xwrmsr)
2892
2893#elif defined(__i386)
2894
2895	ENTRY(rdmsr)
2896	movl	4(%esp), %ecx
2897	rdmsr
2898	ret
2899	SET_SIZE(rdmsr)
2900
2901	ENTRY(wrmsr)
2902	movl	4(%esp), %ecx
2903	movl	8(%esp), %eax
2904	movl	12(%esp), %edx
2905	wrmsr
2906	ret
2907	SET_SIZE(wrmsr)
2908
2909	ENTRY(xrdmsr)
2910	pushl	%ebp
2911	movl	%esp, %ebp
2912	movl	8(%esp), %ecx
2913	pushl	%edi
2914	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2915	rdmsr
2916	popl	%edi
2917	leave
2918	ret
2919	SET_SIZE(xrdmsr)
2920
2921	ENTRY(xwrmsr)
2922	pushl	%ebp
2923	movl	%esp, %ebp
2924	movl	8(%esp), %ecx
2925	movl	12(%esp), %eax
2926	movl	16(%esp), %edx
2927	pushl	%edi
2928	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2929	wrmsr
2930	popl	%edi
2931	leave
2932	ret
2933	SET_SIZE(xwrmsr)
2934
2935#endif	/* __i386 */
2936
2937	ENTRY(invalidate_cache)
2938	wbinvd
2939	ret
2940	SET_SIZE(invalidate_cache)
2941
2942#endif	/* __lint */
2943
2944#if defined(__lint)
2945
2946/*ARGSUSED*/
2947void
2948getcregs(struct cregs *crp)
2949{}
2950
2951#else	/* __lint */
2952
2953#if defined(__amd64)
2954
2955	ENTRY_NP(getcregs)
2956#if defined(__xpv)
2957	/*
2958	 * Only a few of the hardware control registers or descriptor tables
2959	 * are directly accessible to us, so just zero the structure.
2960	 *
2961	 * XXPV	Perhaps it would be helpful for the hypervisor to return
2962	 *	virtualized versions of these for post-mortem use.
2963	 *	(Need to reevaluate - perhaps it already does!)
2964	 */
2965	pushq	%rdi		/* save *crp */
2966	movq	$CREGSZ, %rsi
2967	call	bzero
2968	popq	%rdi
2969
2970	/*
2971	 * Dump what limited information we can
2972	 */
2973	movq	%cr0, %rax
2974	movq	%rax, CREG_CR0(%rdi)	/* cr0 */
2975	movq	%cr2, %rax
2976	movq	%rax, CREG_CR2(%rdi)	/* cr2 */
2977	movq	%cr3, %rax
2978	movq	%rax, CREG_CR3(%rdi)	/* cr3 */
2979	movq	%cr4, %rax
2980	movq	%rax, CREG_CR4(%rdi)	/* cr4 */
2981
2982#else	/* __xpv */
2983
2984#define	GETMSR(r, off, d)	\
2985	movl	$r, %ecx;	\
2986	rdmsr;			\
2987	movl	%eax, off(d);	\
2988	movl	%edx, off+4(d)
2989
2990	xorl	%eax, %eax
2991	movq	%rax, CREG_GDT+8(%rdi)
2992	sgdt	CREG_GDT(%rdi)		/* 10 bytes */
2993	movq	%rax, CREG_IDT+8(%rdi)
2994	sidt	CREG_IDT(%rdi)		/* 10 bytes */
2995	movq	%rax, CREG_LDT(%rdi)
2996	sldt	CREG_LDT(%rdi)		/* 2 bytes */
2997	movq	%rax, CREG_TASKR(%rdi)
2998	str	CREG_TASKR(%rdi)	/* 2 bytes */
2999	movq	%cr0, %rax
3000	movq	%rax, CREG_CR0(%rdi)	/* cr0 */
3001	movq	%cr2, %rax
3002	movq	%rax, CREG_CR2(%rdi)	/* cr2 */
3003	movq	%cr3, %rax
3004	movq	%rax, CREG_CR3(%rdi)	/* cr3 */
3005	movq	%cr4, %rax
3006	movq	%rax, CREG_CR4(%rdi)	/* cr4 */
3007	movq	%cr8, %rax
3008	movq	%rax, CREG_CR8(%rdi)	/* cr8 */
3009	GETMSR(MSR_AMD_KGSBASE, CREG_KGSBASE, %rdi)
3010	GETMSR(MSR_AMD_EFER, CREG_EFER, %rdi)
3011#endif	/* __xpv */
3012	ret
3013	SET_SIZE(getcregs)
3014
3015#undef GETMSR
3016
3017#elif defined(__i386)
3018
3019	ENTRY_NP(getcregs)
3020#if defined(__xpv)
3021	/*
3022	 * Only a few of the hardware control registers or descriptor tables
3023	 * are directly accessible to us, so just zero the structure.
3024	 *
3025	 * XXPV	Perhaps it would be helpful for the hypervisor to return
3026	 *	virtualized versions of these for post-mortem use.
3027	 *	(Need to reevaluate - perhaps it already does!)
3028	 */
3029	movl	4(%esp), %edx
3030	pushl	$CREGSZ
3031	pushl	%edx
3032	call	bzero
3033	addl	$8, %esp
3034	movl	4(%esp), %edx
3035
3036	/*
3037	 * Dump what limited information we can
3038	 */
3039	movl	%cr0, %eax
3040	movl	%eax, CREG_CR0(%edx)	/* cr0 */
3041	movl	%cr2, %eax
3042	movl	%eax, CREG_CR2(%edx)	/* cr2 */
3043	movl	%cr3, %eax
3044	movl	%eax, CREG_CR3(%edx)	/* cr3 */
3045	movl	%cr4, %eax
3046	movl	%eax, CREG_CR4(%edx)	/* cr4 */
3047
3048#else	/* __xpv */
3049
3050	movl	4(%esp), %edx
3051	movw	$0, CREG_GDT+6(%edx)
3052	movw	$0, CREG_IDT+6(%edx)
3053	sgdt	CREG_GDT(%edx)		/* gdt */
3054	sidt	CREG_IDT(%edx)		/* idt */
3055	sldt	CREG_LDT(%edx)		/* ldt */
3056	str	CREG_TASKR(%edx)	/* task */
3057	movl	%cr0, %eax
3058	movl	%eax, CREG_CR0(%edx)	/* cr0 */
3059	movl	%cr2, %eax
3060	movl	%eax, CREG_CR2(%edx)	/* cr2 */
3061	movl	%cr3, %eax
3062	movl	%eax, CREG_CR3(%edx)	/* cr3 */
3063	testl	$X86_LARGEPAGE, x86_feature
3064	jz	.nocr4
3065	movl	%cr4, %eax
3066	movl	%eax, CREG_CR4(%edx)	/* cr4 */
3067	jmp	.skip
3068.nocr4:
3069	movl	$0, CREG_CR4(%edx)
3070.skip:
3071#endif
3072	ret
3073	SET_SIZE(getcregs)
3074
3075#endif	/* __i386 */
3076#endif	/* __lint */
3077
3078
3079/*
3080 * A panic trigger is a word which is updated atomically and can only be set
3081 * once.  We atomically store 0xDEFACEDD and load the old value.  If the
3082 * previous value was 0, we succeed and return 1; otherwise return 0.
3083 * This allows a partially corrupt trigger to still trigger correctly.  DTrace
3084 * has its own version of this function to allow it to panic correctly from
3085 * probe context.
3086 */
3087#if defined(__lint)
3088
3089/*ARGSUSED*/
3090int
3091panic_trigger(int *tp)
3092{ return (0); }
3093
3094/*ARGSUSED*/
3095int
3096dtrace_panic_trigger(int *tp)
3097{ return (0); }
3098
3099#else	/* __lint */
3100
3101#if defined(__amd64)
3102
3103	ENTRY_NP(panic_trigger)
3104	xorl	%eax, %eax
3105	movl	$0xdefacedd, %edx
3106	lock
3107	  xchgl	%edx, (%rdi)
3108	cmpl	$0, %edx
3109	je	0f
3110	movl	$0, %eax
3111	ret
31120:	movl	$1, %eax
3113	ret
3114	SET_SIZE(panic_trigger)
3115
3116	ENTRY_NP(dtrace_panic_trigger)
3117	xorl	%eax, %eax
3118	movl	$0xdefacedd, %edx
3119	lock
3120	  xchgl	%edx, (%rdi)
3121	cmpl	$0, %edx
3122	je	0f
3123	movl	$0, %eax
3124	ret
31250:	movl	$1, %eax
3126	ret
3127	SET_SIZE(dtrace_panic_trigger)
3128
3129#elif defined(__i386)
3130
3131	ENTRY_NP(panic_trigger)
3132	movl	4(%esp), %edx		/ %edx = address of trigger
3133	movl	$0xdefacedd, %eax	/ %eax = 0xdefacedd
3134	lock				/ assert lock
3135	xchgl %eax, (%edx)		/ exchange %eax and the trigger
3136	cmpl	$0, %eax		/ if (%eax == 0x0)
3137	je	0f			/   return (1);
3138	movl	$0, %eax		/ else
3139	ret				/   return (0);
31400:	movl	$1, %eax
3141	ret
3142	SET_SIZE(panic_trigger)
3143
3144	ENTRY_NP(dtrace_panic_trigger)
3145	movl	4(%esp), %edx		/ %edx = address of trigger
3146	movl	$0xdefacedd, %eax	/ %eax = 0xdefacedd
3147	lock				/ assert lock
3148	xchgl %eax, (%edx)		/ exchange %eax and the trigger
3149	cmpl	$0, %eax		/ if (%eax == 0x0)
3150	je	0f			/   return (1);
3151	movl	$0, %eax		/ else
3152	ret				/   return (0);
31530:	movl	$1, %eax
3154	ret
3155	SET_SIZE(dtrace_panic_trigger)
3156
3157#endif	/* __i386 */
3158#endif	/* __lint */
3159
3160/*
3161 * The panic() and cmn_err() functions invoke vpanic() as a common entry point
3162 * into the panic code implemented in panicsys().  vpanic() is responsible
3163 * for passing through the format string and arguments, and constructing a
3164 * regs structure on the stack into which it saves the current register
3165 * values.  If we are not dying due to a fatal trap, these registers will
3166 * then be preserved in panicbuf as the current processor state.  Before
3167 * invoking panicsys(), vpanic() activates the first panic trigger (see
3168 * common/os/panic.c) and switches to the panic_stack if successful.  Note that
3169 * DTrace takes a slightly different panic path if it must panic from probe
3170 * context.  Instead of calling panic, it calls into dtrace_vpanic(), which
3171 * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
3172 * branches back into vpanic().
3173 */
3174#if defined(__lint)
3175
3176/*ARGSUSED*/
3177void
3178vpanic(const char *format, va_list alist)
3179{}
3180
3181/*ARGSUSED*/
3182void
3183dtrace_vpanic(const char *format, va_list alist)
3184{}
3185
3186#else	/* __lint */
3187
3188#if defined(__amd64)
3189
3190	ENTRY_NP(vpanic)			/* Initial stack layout: */
3191
3192	pushq	%rbp				/* | %rip | 	0x60	*/
3193	movq	%rsp, %rbp			/* | %rbp |	0x58	*/
3194	pushfq					/* | rfl  |	0x50	*/
3195	pushq	%r11				/* | %r11 |	0x48	*/
3196	pushq	%r10				/* | %r10 |	0x40	*/
3197	pushq	%rbx				/* | %rbx |	0x38	*/
3198	pushq	%rax				/* | %rax |	0x30	*/
3199	pushq	%r9				/* | %r9  |	0x28	*/
3200	pushq	%r8				/* | %r8  |	0x20	*/
3201	pushq	%rcx				/* | %rcx |	0x18	*/
3202	pushq	%rdx				/* | %rdx |	0x10	*/
3203	pushq	%rsi				/* | %rsi |	0x8 alist */
3204	pushq	%rdi				/* | %rdi |	0x0 format */
3205
3206	movq	%rsp, %rbx			/* %rbx = current %rsp */
3207
3208	leaq	panic_quiesce(%rip), %rdi	/* %rdi = &panic_quiesce */
3209	call	panic_trigger			/* %eax = panic_trigger() */
3210
3211vpanic_common:
3212	/*
3213	 * The panic_trigger result is in %eax from the call above, and
3214	 * dtrace_panic places it in %eax before branching here.
3215	 * The rdmsr instructions that follow below will clobber %eax so
3216	 * we stash the panic_trigger result in %r11d.
3217	 */
3218	movl	%eax, %r11d
3219	cmpl	$0, %r11d
3220	je	0f
3221
3222	/*
3223	 * If panic_trigger() was successful, we are the first to initiate a
3224	 * panic: we now switch to the reserved panic_stack before continuing.
3225	 */
3226	leaq	panic_stack(%rip), %rsp
3227	addq	$PANICSTKSIZE, %rsp
32280:	subq	$REGSIZE, %rsp
3229	/*
3230	 * Now that we've got everything set up, store the register values as
3231	 * they were when we entered vpanic() to the designated location in
3232	 * the regs structure we allocated on the stack.
3233	 */
3234	movq	0x0(%rbx), %rcx
3235	movq	%rcx, REGOFF_RDI(%rsp)
3236	movq	0x8(%rbx), %rcx
3237	movq	%rcx, REGOFF_RSI(%rsp)
3238	movq	0x10(%rbx), %rcx
3239	movq	%rcx, REGOFF_RDX(%rsp)
3240	movq	0x18(%rbx), %rcx
3241	movq	%rcx, REGOFF_RCX(%rsp)
3242	movq	0x20(%rbx), %rcx
3243
3244	movq	%rcx, REGOFF_R8(%rsp)
3245	movq	0x28(%rbx), %rcx
3246	movq	%rcx, REGOFF_R9(%rsp)
3247	movq	0x30(%rbx), %rcx
3248	movq	%rcx, REGOFF_RAX(%rsp)
3249	movq	0x38(%rbx), %rcx
3250	movq	%rcx, REGOFF_RBX(%rsp)
3251	movq	0x58(%rbx), %rcx
3252
3253	movq	%rcx, REGOFF_RBP(%rsp)
3254	movq	0x40(%rbx), %rcx
3255	movq	%rcx, REGOFF_R10(%rsp)
3256	movq	0x48(%rbx), %rcx
3257	movq	%rcx, REGOFF_R11(%rsp)
3258	movq	%r12, REGOFF_R12(%rsp)
3259
3260	movq	%r13, REGOFF_R13(%rsp)
3261	movq	%r14, REGOFF_R14(%rsp)
3262	movq	%r15, REGOFF_R15(%rsp)
3263
3264	xorl	%ecx, %ecx
3265	movw	%ds, %cx
3266	movq	%rcx, REGOFF_DS(%rsp)
3267	movw	%es, %cx
3268	movq	%rcx, REGOFF_ES(%rsp)
3269	movw	%fs, %cx
3270	movq	%rcx, REGOFF_FS(%rsp)
3271	movw	%gs, %cx
3272	movq	%rcx, REGOFF_GS(%rsp)
3273
3274	movq	$0, REGOFF_TRAPNO(%rsp)
3275
3276	movq	$0, REGOFF_ERR(%rsp)
3277	leaq	vpanic(%rip), %rcx
3278	movq	%rcx, REGOFF_RIP(%rsp)
3279	movw	%cs, %cx
3280	movzwq	%cx, %rcx
3281	movq	%rcx, REGOFF_CS(%rsp)
3282	movq	0x50(%rbx), %rcx
3283	movq	%rcx, REGOFF_RFL(%rsp)
3284	movq	%rbx, %rcx
3285	addq	$0x60, %rcx
3286	movq	%rcx, REGOFF_RSP(%rsp)
3287	movw	%ss, %cx
3288	movzwq	%cx, %rcx
3289	movq	%rcx, REGOFF_SS(%rsp)
3290
3291	/*
3292	 * panicsys(format, alist, rp, on_panic_stack)
3293	 */
3294	movq	REGOFF_RDI(%rsp), %rdi		/* format */
3295	movq	REGOFF_RSI(%rsp), %rsi		/* alist */
3296	movq	%rsp, %rdx			/* struct regs */
3297	movl	%r11d, %ecx			/* on_panic_stack */
3298	call	panicsys
3299	addq	$REGSIZE, %rsp
3300	popq	%rdi
3301	popq	%rsi
3302	popq	%rdx
3303	popq	%rcx
3304	popq	%r8
3305	popq	%r9
3306	popq	%rax
3307	popq	%rbx
3308	popq	%r10
3309	popq	%r11
3310	popfq
3311	leave
3312	ret
3313	SET_SIZE(vpanic)
3314
3315	ENTRY_NP(dtrace_vpanic)			/* Initial stack layout: */
3316
3317	pushq	%rbp				/* | %rip | 	0x60	*/
3318	movq	%rsp, %rbp			/* | %rbp |	0x58	*/
3319	pushfq					/* | rfl  |	0x50	*/
3320	pushq	%r11				/* | %r11 |	0x48	*/
3321	pushq	%r10				/* | %r10 |	0x40	*/
3322	pushq	%rbx				/* | %rbx |	0x38	*/
3323	pushq	%rax				/* | %rax |	0x30	*/
3324	pushq	%r9				/* | %r9  |	0x28	*/
3325	pushq	%r8				/* | %r8  |	0x20	*/
3326	pushq	%rcx				/* | %rcx |	0x18	*/
3327	pushq	%rdx				/* | %rdx |	0x10	*/
3328	pushq	%rsi				/* | %rsi |	0x8 alist */
3329	pushq	%rdi				/* | %rdi |	0x0 format */
3330
3331	movq	%rsp, %rbx			/* %rbx = current %rsp */
3332
3333	leaq	panic_quiesce(%rip), %rdi	/* %rdi = &panic_quiesce */
3334	call	dtrace_panic_trigger	/* %eax = dtrace_panic_trigger() */
3335	jmp	vpanic_common
3336
3337	SET_SIZE(dtrace_vpanic)
3338
3339#elif defined(__i386)
3340
3341	ENTRY_NP(vpanic)			/ Initial stack layout:
3342
3343	pushl	%ebp				/ | %eip | 20
3344	movl	%esp, %ebp			/ | %ebp | 16
3345	pushl	%eax				/ | %eax | 12
3346	pushl	%ebx				/ | %ebx |  8
3347	pushl	%ecx				/ | %ecx |  4
3348	pushl	%edx				/ | %edx |  0
3349
3350	movl	%esp, %ebx			/ %ebx = current stack pointer
3351
3352	lea	panic_quiesce, %eax		/ %eax = &panic_quiesce
3353	pushl	%eax				/ push &panic_quiesce
3354	call	panic_trigger			/ %eax = panic_trigger()
3355	addl	$4, %esp			/ reset stack pointer
3356
3357vpanic_common:
3358	cmpl	$0, %eax			/ if (%eax == 0)
3359	je	0f				/   goto 0f;
3360
3361	/*
3362	 * If panic_trigger() was successful, we are the first to initiate a
3363	 * panic: we now switch to the reserved panic_stack before continuing.
3364	 */
3365	lea	panic_stack, %esp		/ %esp  = panic_stack
3366	addl	$PANICSTKSIZE, %esp		/ %esp += PANICSTKSIZE
3367
33680:	subl	$REGSIZE, %esp			/ allocate struct regs
3369
3370	/*
3371	 * Now that we've got everything set up, store the register values as
3372	 * they were when we entered vpanic() to the designated location in
3373	 * the regs structure we allocated on the stack.
3374	 */
3375#if !defined(__GNUC_AS__)
3376	movw	%gs, %edx
3377	movl	%edx, REGOFF_GS(%esp)
3378	movw	%fs, %edx
3379	movl	%edx, REGOFF_FS(%esp)
3380	movw	%es, %edx
3381	movl	%edx, REGOFF_ES(%esp)
3382	movw	%ds, %edx
3383	movl	%edx, REGOFF_DS(%esp)
3384#else	/* __GNUC_AS__ */
3385	mov	%gs, %edx
3386	mov	%edx, REGOFF_GS(%esp)
3387	mov	%fs, %edx
3388	mov	%edx, REGOFF_FS(%esp)
3389	mov	%es, %edx
3390	mov	%edx, REGOFF_ES(%esp)
3391	mov	%ds, %edx
3392	mov	%edx, REGOFF_DS(%esp)
3393#endif	/* __GNUC_AS__ */
3394	movl	%edi, REGOFF_EDI(%esp)
3395	movl	%esi, REGOFF_ESI(%esp)
3396	movl	16(%ebx), %ecx
3397	movl	%ecx, REGOFF_EBP(%esp)
3398	movl	%ebx, %ecx
3399	addl	$20, %ecx
3400	movl	%ecx, REGOFF_ESP(%esp)
3401	movl	8(%ebx), %ecx
3402	movl	%ecx, REGOFF_EBX(%esp)
3403	movl	0(%ebx), %ecx
3404	movl	%ecx, REGOFF_EDX(%esp)
3405	movl	4(%ebx), %ecx
3406	movl	%ecx, REGOFF_ECX(%esp)
3407	movl	12(%ebx), %ecx
3408	movl	%ecx, REGOFF_EAX(%esp)
3409	movl	$0, REGOFF_TRAPNO(%esp)
3410	movl	$0, REGOFF_ERR(%esp)
3411	lea	vpanic, %ecx
3412	movl	%ecx, REGOFF_EIP(%esp)
3413#if !defined(__GNUC_AS__)
3414	movw	%cs, %edx
3415#else	/* __GNUC_AS__ */
3416	mov	%cs, %edx
3417#endif	/* __GNUC_AS__ */
3418	movl	%edx, REGOFF_CS(%esp)
3419	pushfl
3420	popl	%ecx
3421#if defined(__xpv)
3422	/*
3423	 * Synthesize the PS_IE bit from the event mask bit
3424	 */
3425	CURTHREAD(%edx)
3426	KPREEMPT_DISABLE(%edx)
3427	EVENT_MASK_TO_IE(%edx, %ecx)
3428	CURTHREAD(%edx)
3429	KPREEMPT_ENABLE_NOKP(%edx)
3430#endif
3431	movl	%ecx, REGOFF_EFL(%esp)
3432	movl	$0, REGOFF_UESP(%esp)
3433#if !defined(__GNUC_AS__)
3434	movw	%ss, %edx
3435#else	/* __GNUC_AS__ */
3436	mov	%ss, %edx
3437#endif	/* __GNUC_AS__ */
3438	movl	%edx, REGOFF_SS(%esp)
3439
3440	movl	%esp, %ecx			/ %ecx = &regs
3441	pushl	%eax				/ push on_panic_stack
3442	pushl	%ecx				/ push &regs
3443	movl	12(%ebp), %ecx			/ %ecx = alist
3444	pushl	%ecx				/ push alist
3445	movl	8(%ebp), %ecx			/ %ecx = format
3446	pushl	%ecx				/ push format
3447	call	panicsys			/ panicsys();
3448	addl	$16, %esp			/ pop arguments
3449
3450	addl	$REGSIZE, %esp
3451	popl	%edx
3452	popl	%ecx
3453	popl	%ebx
3454	popl	%eax
3455	leave
3456	ret
3457	SET_SIZE(vpanic)
3458
3459	ENTRY_NP(dtrace_vpanic)			/ Initial stack layout:
3460
3461	pushl	%ebp				/ | %eip | 20
3462	movl	%esp, %ebp			/ | %ebp | 16
3463	pushl	%eax				/ | %eax | 12
3464	pushl	%ebx				/ | %ebx |  8
3465	pushl	%ecx				/ | %ecx |  4
3466	pushl	%edx				/ | %edx |  0
3467
3468	movl	%esp, %ebx			/ %ebx = current stack pointer
3469
3470	lea	panic_quiesce, %eax		/ %eax = &panic_quiesce
3471	pushl	%eax				/ push &panic_quiesce
3472	call	dtrace_panic_trigger		/ %eax = dtrace_panic_trigger()
3473	addl	$4, %esp			/ reset stack pointer
3474	jmp	vpanic_common			/ jump back to common code
3475
3476	SET_SIZE(dtrace_vpanic)
3477
3478#endif	/* __i386 */
3479#endif	/* __lint */
3480
3481#if defined(__lint)
3482
3483void
3484hres_tick(void)
3485{}
3486
3487int64_t timedelta;
3488hrtime_t hres_last_tick;
3489volatile timestruc_t hrestime;
3490int64_t hrestime_adj;
3491volatile int hres_lock;
3492hrtime_t hrtime_base;
3493
3494#else	/* __lint */
3495
3496	DGDEF3(hrestime, _MUL(2, CLONGSIZE), 8)
3497	.NWORD	0, 0
3498
3499	DGDEF3(hrestime_adj, 8, 8)
3500	.long	0, 0
3501
3502	DGDEF3(hres_last_tick, 8, 8)
3503	.long	0, 0
3504
3505	DGDEF3(timedelta, 8, 8)
3506	.long	0, 0
3507
3508	DGDEF3(hres_lock, 4, 8)
3509	.long	0
3510
3511	/*
3512	 * initialized to a non zero value to make pc_gethrtime()
3513	 * work correctly even before clock is initialized
3514	 */
3515	DGDEF3(hrtime_base, 8, 8)
3516	.long	_MUL(NSEC_PER_CLOCK_TICK, 6), 0
3517
3518	DGDEF3(adj_shift, 4, 4)
3519	.long	ADJ_SHIFT
3520
3521#if defined(__amd64)
3522
3523	ENTRY_NP(hres_tick)
3524	pushq	%rbp
3525	movq	%rsp, %rbp
3526
3527	/*
3528	 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3529	 * hres_last_tick can only be modified while holding CLOCK_LOCK).
3530	 * At worst, performing this now instead of under CLOCK_LOCK may
3531	 * introduce some jitter in pc_gethrestime().
3532	 */
3533	call	*gethrtimef(%rip)
3534	movq	%rax, %r8
3535
3536	leaq	hres_lock(%rip), %rax
3537	movb	$-1, %dl
3538.CL1:
3539	xchgb	%dl, (%rax)
3540	testb	%dl, %dl
3541	jz	.CL3			/* got it */
3542.CL2:
3543	cmpb	$0, (%rax)		/* possible to get lock? */
3544	pause
3545	jne	.CL2
3546	jmp	.CL1			/* yes, try again */
3547.CL3:
3548	/*
3549	 * compute the interval since last time hres_tick was called
3550	 * and adjust hrtime_base and hrestime accordingly
3551	 * hrtime_base is an 8 byte value (in nsec), hrestime is
3552	 * a timestruc_t (sec, nsec)
3553	 */
3554	leaq	hres_last_tick(%rip), %rax
3555	movq	%r8, %r11
3556	subq	(%rax), %r8
3557	addq	%r8, hrtime_base(%rip)	/* add interval to hrtime_base */
3558	addq	%r8, hrestime+8(%rip)	/* add interval to hrestime.tv_nsec */
3559	/*
3560	 * Now that we have CLOCK_LOCK, we can update hres_last_tick
3561	 */
3562	movq	%r11, (%rax)
3563
3564	call	__adj_hrestime
3565
3566	/*
3567	 * release the hres_lock
3568	 */
3569	incl	hres_lock(%rip)
3570	leave
3571	ret
3572	SET_SIZE(hres_tick)
3573
3574#elif defined(__i386)
3575
3576	ENTRY_NP(hres_tick)
3577	pushl	%ebp
3578	movl	%esp, %ebp
3579	pushl	%esi
3580	pushl	%ebx
3581
3582	/*
3583	 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3584	 * hres_last_tick can only be modified while holding CLOCK_LOCK).
3585	 * At worst, performing this now instead of under CLOCK_LOCK may
3586	 * introduce some jitter in pc_gethrestime().
3587	 */
3588	call	*gethrtimef
3589	movl	%eax, %ebx
3590	movl	%edx, %esi
3591
3592	movl	$hres_lock, %eax
3593	movl	$-1, %edx
3594.CL1:
3595	xchgb	%dl, (%eax)
3596	testb	%dl, %dl
3597	jz	.CL3			/ got it
3598.CL2:
3599	cmpb	$0, (%eax)		/ possible to get lock?
3600	pause
3601	jne	.CL2
3602	jmp	.CL1			/ yes, try again
3603.CL3:
3604	/*
3605	 * compute the interval since last time hres_tick was called
3606	 * and adjust hrtime_base and hrestime accordingly
3607	 * hrtime_base is an 8 byte value (in nsec), hrestime is
3608	 * timestruc_t (sec, nsec)
3609	 */
3610
3611	lea	hres_last_tick, %eax
3612
3613	movl	%ebx, %edx
3614	movl	%esi, %ecx
3615
3616	subl 	(%eax), %edx
3617	sbbl 	4(%eax), %ecx
3618
3619	addl	%edx, hrtime_base	/ add interval to hrtime_base
3620	adcl	%ecx, hrtime_base+4
3621
3622	addl 	%edx, hrestime+4	/ add interval to hrestime.tv_nsec
3623
3624	/
3625	/ Now that we have CLOCK_LOCK, we can update hres_last_tick.
3626	/
3627	movl	%ebx, (%eax)
3628	movl	%esi,  4(%eax)
3629
3630	/ get hrestime at this moment. used as base for pc_gethrestime
3631	/
3632	/ Apply adjustment, if any
3633	/
3634	/ #define HRES_ADJ	(NSEC_PER_CLOCK_TICK >> ADJ_SHIFT)
3635	/ (max_hres_adj)
3636	/
3637	/ void
3638	/ adj_hrestime()
3639	/ {
3640	/	long long adj;
3641	/
3642	/	if (hrestime_adj == 0)
3643	/		adj = 0;
3644	/	else if (hrestime_adj > 0) {
3645	/		if (hrestime_adj < HRES_ADJ)
3646	/			adj = hrestime_adj;
3647	/		else
3648	/			adj = HRES_ADJ;
3649	/	}
3650	/	else {
3651	/		if (hrestime_adj < -(HRES_ADJ))
3652	/			adj = -(HRES_ADJ);
3653	/		else
3654	/			adj = hrestime_adj;
3655	/	}
3656	/
3657	/	timedelta -= adj;
3658	/	hrestime_adj = timedelta;
3659	/	hrestime.tv_nsec += adj;
3660	/
3661	/	while (hrestime.tv_nsec >= NANOSEC) {
3662	/		one_sec++;
3663	/		hrestime.tv_sec++;
3664	/		hrestime.tv_nsec -= NANOSEC;
3665	/	}
3666	/ }
3667__adj_hrestime:
3668	movl	hrestime_adj, %esi	/ if (hrestime_adj == 0)
3669	movl	hrestime_adj+4, %edx
3670	andl	%esi, %esi
3671	jne	.CL4			/ no
3672	andl	%edx, %edx
3673	jne	.CL4			/ no
3674	subl	%ecx, %ecx		/ yes, adj = 0;
3675	subl	%edx, %edx
3676	jmp	.CL5
3677.CL4:
3678	subl	%ecx, %ecx
3679	subl	%eax, %eax
3680	subl	%esi, %ecx
3681	sbbl	%edx, %eax
3682	andl	%eax, %eax		/ if (hrestime_adj > 0)
3683	jge	.CL6
3684
3685	/ In the following comments, HRES_ADJ is used, while in the code
3686	/ max_hres_adj is used.
3687	/
3688	/ The test for "hrestime_adj < HRES_ADJ" is complicated because
3689	/ hrestime_adj is 64-bits, while HRES_ADJ is 32-bits.  We rely
3690	/ on the logical equivalence of:
3691	/
3692	/	!(hrestime_adj < HRES_ADJ)
3693	/
3694	/ and the two step sequence:
3695	/
3696	/	(HRES_ADJ - lsw(hrestime_adj)) generates a Borrow/Carry
3697	/
3698	/ which computes whether or not the least significant 32-bits
3699	/ of hrestime_adj is greater than HRES_ADJ, followed by:
3700	/
3701	/	Previous Borrow/Carry + -1 + msw(hrestime_adj) generates a Carry
3702	/
3703	/ which generates a carry whenever step 1 is true or the most
3704	/ significant long of the longlong hrestime_adj is non-zero.
3705
3706	movl	max_hres_adj, %ecx	/ hrestime_adj is positive
3707	subl	%esi, %ecx
3708	movl	%edx, %eax
3709	adcl	$-1, %eax
3710	jnc	.CL7
3711	movl	max_hres_adj, %ecx	/ adj = HRES_ADJ;
3712	subl	%edx, %edx
3713	jmp	.CL5
3714
3715	/ The following computation is similar to the one above.
3716	/
3717	/ The test for "hrestime_adj < -(HRES_ADJ)" is complicated because
3718	/ hrestime_adj is 64-bits, while HRES_ADJ is 32-bits.  We rely
3719	/ on the logical equivalence of:
3720	/
3721	/	(hrestime_adj > -HRES_ADJ)
3722	/
3723	/ and the two step sequence:
3724	/
3725	/	(HRES_ADJ + lsw(hrestime_adj)) generates a Carry
3726	/
3727	/ which means the least significant 32-bits of hrestime_adj is
3728	/ greater than -HRES_ADJ, followed by:
3729	/
3730	/	Previous Carry + 0 + msw(hrestime_adj) generates a Carry
3731	/
3732	/ which generates a carry only when step 1 is true and the most
3733	/ significant long of the longlong hrestime_adj is -1.
3734
3735.CL6:					/ hrestime_adj is negative
3736	movl	%esi, %ecx
3737	addl	max_hres_adj, %ecx
3738	movl	%edx, %eax
3739	adcl	$0, %eax
3740	jc	.CL7
3741	xor	%ecx, %ecx
3742	subl	max_hres_adj, %ecx	/ adj = -(HRES_ADJ);
3743	movl	$-1, %edx
3744	jmp	.CL5
3745.CL7:
3746	movl	%esi, %ecx		/ adj = hrestime_adj;
3747.CL5:
3748	movl	timedelta, %esi
3749	subl	%ecx, %esi
3750	movl	timedelta+4, %eax
3751	sbbl	%edx, %eax
3752	movl	%esi, timedelta
3753	movl	%eax, timedelta+4	/ timedelta -= adj;
3754	movl	%esi, hrestime_adj
3755	movl	%eax, hrestime_adj+4	/ hrestime_adj = timedelta;
3756	addl	hrestime+4, %ecx
3757
3758	movl	%ecx, %eax		/ eax = tv_nsec
37591:
3760	cmpl	$NANOSEC, %eax		/ if ((unsigned long)tv_nsec >= NANOSEC)
3761	jb	.CL8			/ no
3762	incl	one_sec			/ yes,  one_sec++;
3763	incl	hrestime		/ hrestime.tv_sec++;
3764	addl	$-NANOSEC, %eax		/ tv_nsec -= NANOSEC
3765	jmp	1b			/ check for more seconds
3766
3767.CL8:
3768	movl	%eax, hrestime+4	/ store final into hrestime.tv_nsec
3769	incl	hres_lock		/ release the hres_lock
3770
3771	popl	%ebx
3772	popl	%esi
3773	leave
3774	ret
3775	SET_SIZE(hres_tick)
3776
3777#endif	/* __i386 */
3778#endif	/* __lint */
3779
3780/*
3781 * void prefetch_smap_w(void *)
3782 *
3783 * Prefetch ahead within a linear list of smap structures.
3784 * Not implemented for ia32.  Stub for compatibility.
3785 */
3786
3787#if defined(__lint)
3788
3789/*ARGSUSED*/
3790void prefetch_smap_w(void *smp)
3791{}
3792
3793#else	/* __lint */
3794
3795	ENTRY(prefetch_smap_w)
3796	rep;	ret	/* use 2 byte return instruction when branch target */
3797			/* AMD Software Optimization Guide - Section 6.2 */
3798	SET_SIZE(prefetch_smap_w)
3799
3800#endif	/* __lint */
3801
3802/*
3803 * prefetch_page_r(page_t *)
3804 * issue prefetch instructions for a page_t
3805 */
3806#if defined(__lint)
3807
3808/*ARGSUSED*/
3809void
3810prefetch_page_r(void *pp)
3811{}
3812
3813#else	/* __lint */
3814
3815	ENTRY(prefetch_page_r)
3816	rep;	ret	/* use 2 byte return instruction when branch target */
3817			/* AMD Software Optimization Guide - Section 6.2 */
3818	SET_SIZE(prefetch_page_r)
3819
3820#endif	/* __lint */
3821
3822#if defined(__lint)
3823
3824/*ARGSUSED*/
3825int
3826bcmp(const void *s1, const void *s2, size_t count)
3827{ return (0); }
3828
3829#else   /* __lint */
3830
3831#if defined(__amd64)
3832
3833	ENTRY(bcmp)
3834	pushq	%rbp
3835	movq	%rsp, %rbp
3836#ifdef DEBUG
3837	movq	postbootkernelbase(%rip), %r11
3838	cmpq	%r11, %rdi
3839	jb	0f
3840	cmpq	%r11, %rsi
3841	jnb	1f
38420:	leaq	.bcmp_panic_msg(%rip), %rdi
3843	xorl	%eax, %eax
3844	call	panic
38451:
3846#endif	/* DEBUG */
3847	call	memcmp
3848	testl	%eax, %eax
3849	setne	%dl
3850	leave
3851	movzbl	%dl, %eax
3852	ret
3853	SET_SIZE(bcmp)
3854
3855#elif defined(__i386)
3856
3857#define	ARG_S1		8
3858#define	ARG_S2		12
3859#define	ARG_LENGTH	16
3860
3861	ENTRY(bcmp)
3862	pushl	%ebp
3863	movl	%esp, %ebp	/ create new stack frame
3864#ifdef DEBUG
3865	movl    postbootkernelbase, %eax
3866	cmpl    %eax, ARG_S1(%ebp)
3867	jb	0f
3868	cmpl    %eax, ARG_S2(%ebp)
3869	jnb	1f
38700:	pushl   $.bcmp_panic_msg
3871	call    panic
38721:
3873#endif	/* DEBUG */
3874
3875	pushl	%edi		/ save register variable
3876	movl	ARG_S1(%ebp), %eax	/ %eax = address of string 1
3877	movl	ARG_S2(%ebp), %ecx	/ %ecx = address of string 2
3878	cmpl	%eax, %ecx	/ if the same string
3879	je	.equal		/ goto .equal
3880	movl	ARG_LENGTH(%ebp), %edi	/ %edi = length in bytes
3881	cmpl	$4, %edi	/ if %edi < 4
3882	jb	.byte_check	/ goto .byte_check
3883	.align	4
3884.word_loop:
3885	movl	(%ecx), %edx	/ move 1 word from (%ecx) to %edx
3886	leal	-4(%edi), %edi	/ %edi -= 4
3887	cmpl	(%eax), %edx	/ compare 1 word from (%eax) with %edx
3888	jne	.word_not_equal	/ if not equal, goto .word_not_equal
3889	leal	4(%ecx), %ecx	/ %ecx += 4 (next word)
3890	leal	4(%eax), %eax	/ %eax += 4 (next word)
3891	cmpl	$4, %edi	/ if %edi >= 4
3892	jae	.word_loop	/ goto .word_loop
3893.byte_check:
3894	cmpl	$0, %edi	/ if %edi == 0
3895	je	.equal		/ goto .equal
3896	jmp	.byte_loop	/ goto .byte_loop (checks in bytes)
3897.word_not_equal:
3898	leal	4(%edi), %edi	/ %edi += 4 (post-decremented)
3899	.align	4
3900.byte_loop:
3901	movb	(%ecx),	%dl	/ move 1 byte from (%ecx) to %dl
3902	cmpb	%dl, (%eax)	/ compare %dl with 1 byte from (%eax)
3903	jne	.not_equal	/ if not equal, goto .not_equal
3904	incl	%ecx		/ %ecx++ (next byte)
3905	incl	%eax		/ %eax++ (next byte)
3906	decl	%edi		/ %edi--
3907	jnz	.byte_loop	/ if not zero, goto .byte_loop
3908.equal:
3909	xorl	%eax, %eax	/ %eax = 0
3910	popl	%edi		/ restore register variable
3911	leave			/ restore old stack frame
3912	ret			/ return (NULL)
3913	.align	4
3914.not_equal:
3915	movl	$1, %eax	/ return 1
3916	popl	%edi		/ restore register variable
3917	leave			/ restore old stack frame
3918	ret			/ return (NULL)
3919	SET_SIZE(bcmp)
3920
3921#endif	/* __i386 */
3922
3923#ifdef DEBUG
3924	.text
3925.bcmp_panic_msg:
3926	.string "bcmp: arguments below kernelbase"
3927#endif	/* DEBUG */
3928
3929#endif	/* __lint */
3930
3931#if defined(__lint)
3932
3933uint_t
3934bsrw_insn(uint16_t mask)
3935{
3936	uint_t index = sizeof (mask) * NBBY - 1;
3937
3938	while ((mask & (1 << index)) == 0)
3939		index--;
3940	return (index);
3941}
3942
3943#else	/* __lint */
3944
3945#if defined(__amd64)
3946
3947	ENTRY_NP(bsrw_insn)
3948	xorl	%eax, %eax
3949	bsrw	%di, %ax
3950	ret
3951	SET_SIZE(bsrw_insn)
3952
3953#elif defined(__i386)
3954
3955	ENTRY_NP(bsrw_insn)
3956	movw	4(%esp), %cx
3957	xorl	%eax, %eax
3958	bsrw	%cx, %ax
3959	ret
3960	SET_SIZE(bsrw_insn)
3961
3962#endif	/* __i386 */
3963#endif	/* __lint */
3964
3965#if defined(__lint)
3966
3967uint_t
3968atomic_btr32(uint32_t *pending, uint_t pil)
3969{
3970	return (*pending &= ~(1 << pil));
3971}
3972
3973#else	/* __lint */
3974
3975#if defined(__i386)
3976
3977	ENTRY_NP(atomic_btr32)
3978	movl	4(%esp), %ecx
3979	movl	8(%esp), %edx
3980	xorl	%eax, %eax
3981	lock
3982	btrl	%edx, (%ecx)
3983	setc	%al
3984	ret
3985	SET_SIZE(atomic_btr32)
3986
3987#endif	/* __i386 */
3988#endif	/* __lint */
3989
3990#if defined(__lint)
3991
3992/*ARGSUSED*/
3993void
3994switch_sp_and_call(void *newsp, void (*func)(uint_t, uint_t), uint_t arg1,
3995	    uint_t arg2)
3996{}
3997
3998#else	/* __lint */
3999
4000#if defined(__amd64)
4001
4002	ENTRY_NP(switch_sp_and_call)
4003	pushq	%rbp
4004	movq	%rsp, %rbp		/* set up stack frame */
4005	movq	%rdi, %rsp		/* switch stack pointer */
4006	movq	%rdx, %rdi		/* pass func arg 1 */
4007	movq	%rsi, %r11		/* save function to call */
4008	movq	%rcx, %rsi		/* pass func arg 2 */
4009	call	*%r11			/* call function */
4010	leave				/* restore stack */
4011	ret
4012	SET_SIZE(switch_sp_and_call)
4013
4014#elif defined(__i386)
4015
4016	ENTRY_NP(switch_sp_and_call)
4017	pushl	%ebp
4018	mov	%esp, %ebp		/* set up stack frame */
4019	movl	8(%ebp), %esp		/* switch stack pointer */
4020	pushl	20(%ebp)		/* push func arg 2 */
4021	pushl	16(%ebp)		/* push func arg 1 */
4022	call	*12(%ebp)		/* call function */
4023	addl	$8, %esp		/* pop arguments */
4024	leave				/* restore stack */
4025	ret
4026	SET_SIZE(switch_sp_and_call)
4027
4028#endif	/* __i386 */
4029#endif	/* __lint */
4030
4031#if defined(__lint)
4032
4033void
4034kmdb_enter(void)
4035{}
4036
4037#else	/* __lint */
4038
4039#if defined(__amd64)
4040
4041	ENTRY_NP(kmdb_enter)
4042	pushq	%rbp
4043	movq	%rsp, %rbp
4044
4045	/*
4046	 * Save flags, do a 'cli' then return the saved flags
4047	 */
4048	call	intr_clear
4049
4050	int	$T_DBGENTR
4051
4052	/*
4053	 * Restore the saved flags
4054	 */
4055	movq	%rax, %rdi
4056	call	intr_restore
4057
4058	leave
4059	ret
4060	SET_SIZE(kmdb_enter)
4061
4062#elif defined(__i386)
4063
4064	ENTRY_NP(kmdb_enter)
4065	pushl	%ebp
4066	movl	%esp, %ebp
4067
4068	/*
4069	 * Save flags, do a 'cli' then return the saved flags
4070	 */
4071	call	intr_clear
4072
4073	int	$T_DBGENTR
4074
4075	/*
4076	 * Restore the saved flags
4077	 */
4078	pushl	%eax
4079	call	intr_restore
4080	addl	$4, %esp
4081
4082	leave
4083	ret
4084	SET_SIZE(kmdb_enter)
4085
4086#endif	/* __i386 */
4087#endif	/* __lint */
4088
4089#if defined(__lint)
4090
4091void
4092return_instr(void)
4093{}
4094
4095#else	/* __lint */
4096
4097	ENTRY_NP(return_instr)
4098	rep;	ret	/* use 2 byte instruction when branch target */
4099			/* AMD Software Optimization Guide - Section 6.2 */
4100	SET_SIZE(return_instr)
4101
4102#endif	/* __lint */
4103
4104#if defined(__lint)
4105
4106ulong_t
4107getflags(void)
4108{
4109	return (0);
4110}
4111
4112#else	/* __lint */
4113
4114#if defined(__amd64)
4115
4116	ENTRY(getflags)
4117	pushfq
4118	popq	%rax
4119#if defined(__xpv)
4120	CURTHREAD(%rdi)
4121	KPREEMPT_DISABLE(%rdi)
4122	/*
4123	 * Synthesize the PS_IE bit from the event mask bit
4124	 */
4125	CURVCPU(%r11)
4126	andq    $_BITNOT(PS_IE), %rax
4127	XEN_TEST_UPCALL_MASK(%r11)
4128	jnz	1f
4129	orq	$PS_IE, %rax
41301:
4131	KPREEMPT_ENABLE_NOKP(%rdi)
4132#endif
4133	ret
4134	SET_SIZE(getflags)
4135
4136#elif defined(__i386)
4137
4138	ENTRY(getflags)
4139	pushfl
4140	popl	%eax
4141#if defined(__xpv)
4142	CURTHREAD(%ecx)
4143	KPREEMPT_DISABLE(%ecx)
4144	/*
4145	 * Synthesize the PS_IE bit from the event mask bit
4146	 */
4147	CURVCPU(%edx)
4148	andl    $_BITNOT(PS_IE), %eax
4149	XEN_TEST_UPCALL_MASK(%edx)
4150	jnz	1f
4151	orl	$PS_IE, %eax
41521:
4153	KPREEMPT_ENABLE_NOKP(%ecx)
4154#endif
4155	ret
4156	SET_SIZE(getflags)
4157
4158#endif	/* __i386 */
4159
4160#endif	/* __lint */
4161
4162#if defined(__lint)
4163
4164ftrace_icookie_t
4165ftrace_interrupt_disable(void)
4166{ return (0); }
4167
4168#else   /* __lint */
4169
4170#if defined(__amd64)
4171
4172	ENTRY(ftrace_interrupt_disable)
4173	pushfq
4174	popq	%rax
4175	CLI(%rdx)
4176	ret
4177	SET_SIZE(ftrace_interrupt_disable)
4178
4179#elif defined(__i386)
4180
4181	ENTRY(ftrace_interrupt_disable)
4182	pushfl
4183	popl	%eax
4184	CLI(%edx)
4185	ret
4186	SET_SIZE(ftrace_interrupt_disable)
4187
4188#endif	/* __i386 */
4189#endif	/* __lint */
4190
4191#if defined(__lint)
4192
4193/*ARGSUSED*/
4194void
4195ftrace_interrupt_enable(ftrace_icookie_t cookie)
4196{}
4197
4198#else	/* __lint */
4199
4200#if defined(__amd64)
4201
4202	ENTRY(ftrace_interrupt_enable)
4203	pushq	%rdi
4204	popfq
4205	ret
4206	SET_SIZE(ftrace_interrupt_enable)
4207
4208#elif defined(__i386)
4209
4210	ENTRY(ftrace_interrupt_enable)
4211	movl	4(%esp), %eax
4212	pushl	%eax
4213	popfl
4214	ret
4215	SET_SIZE(ftrace_interrupt_enable)
4216
4217#endif	/* __i386 */
4218#endif	/* __lint */
4219
4220#if defined (__lint)
4221
4222/*ARGSUSED*/
4223void
4224iommu_cpu_nop(void)
4225{}
4226
4227#else /* __lint */
4228
4229	ENTRY(iommu_cpu_nop)
4230	rep;	nop
4231	ret
4232	SET_SIZE(iommu_cpu_nop)
4233
4234#endif /* __lint */
4235
4236#if defined (__lint)
4237
4238/*ARGSUSED*/
4239void
4240clflush_insn(caddr_t addr)
4241{}
4242
4243#else /* __lint */
4244
4245#if defined (__amd64)
4246	ENTRY(clflush_insn)
4247	clflush (%rdi)
4248	ret
4249	SET_SIZE(clflush_insn)
4250#elif defined (__i386)
4251	ENTRY(clflush_insn)
4252	movl	4(%esp), %eax
4253	clflush (%eax)
4254	ret
4255	SET_SIZE(clflush_insn)
4256
4257#endif /* __i386 */
4258#endif /* __lint */
4259
4260#if defined (__lint)
4261/*ARGSUSED*/
4262void
4263mfence_insn(void)
4264{}
4265
4266#else /* __lint */
4267
4268#if defined (__amd64)
4269	ENTRY(mfence_insn)
4270	mfence
4271	ret
4272	SET_SIZE(mfence_insn)
4273#elif defined (__i386)
4274	ENTRY(mfence_insn)
4275	mfence
4276	ret
4277	SET_SIZE(mfence_insn)
4278
4279#endif /* __i386 */
4280#endif /* __lint */
4281
4282/*
4283 * This is how VMware lets the guests figure that they are running
4284 * on top of VMWare platform :
4285 * Write 0xA in the ECX register and put the I/O port address value of
4286 * 0x564D5868 in the EAX register. Then read a word from port 0x5658.
4287 * If VMWare is installed than this code will be executed correctly and
4288 * the EBX register will contain the same I/O port address value of 0x564D5868.
4289 * If VMWare is not installed then OS will return an exception on port access.
4290 */
4291#if defined(__lint)
4292
4293int
4294vmware_platform(void) { return (1); }
4295
4296#else
4297
4298#if defined(__amd64)
4299
4300	ENTRY(vmware_platform)
4301	pushq	%rbx
4302	xorl	%ebx, %ebx
4303	movl	$0x564d5868, %eax
4304	movl	$0xa, %ecx
4305	movl	$0x5658, %edx
4306	inl	(%dx)
4307	movl	$0x564d5868, %ecx
4308	xorl	%eax, %eax
4309	cmpl	%ecx, %ebx
4310	jne	1f
4311	incl	%eax
43121:
4313	popq	%rbx
4314	ret
4315	SET_SIZE(vmware_platform)
4316
4317#elif defined(__i386)
4318
4319	ENTRY(vmware_platform)
4320	pushl	%ebx
4321	pushl	%ecx
4322	pushl	%edx
4323	xorl	%ebx, %ebx
4324	movl	$0x564d5868, %eax
4325	movl	$0xa, %ecx
4326	movl	$0x5658, %edx
4327	inl	(%dx)
4328	movl	$0x564d5868, %ecx
4329	xorl	%eax, %eax
4330	cmpl	%ecx, %ebx
4331	jne	1f
4332	incl	%eax
43331:
4334	popl	%edx
4335	popl	%ecx
4336	popl	%ebx
4337	ret
4338	SET_SIZE(vmware_platform)
4339
4340#endif /* __i386 */
4341#endif /* __lint */
4342