xref: /titanic_44/usr/src/uts/intel/ia32/ml/i86_subr.s (revision 8668df41d90e075636bc3817b28ad77cbd470959)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*
27 *  Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.
28 *  Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T
29 *    All Rights Reserved
30 */
31
32#pragma ident	"%Z%%M%	%I%	%E% SMI"
33
34/*
35 * General assembly language routines.
36 * It is the intent of this file to contain routines that are
37 * independent of the specific kernel architecture, and those that are
38 * common across kernel architectures.
39 * As architectures diverge, and implementations of specific
40 * architecture-dependent routines change, the routines should be moved
41 * from this file into the respective ../`arch -k`/subr.s file.
42 */
43
44#include <sys/asm_linkage.h>
45#include <sys/asm_misc.h>
46#include <sys/panic.h>
47#include <sys/ontrap.h>
48#include <sys/regset.h>
49#include <sys/privregs.h>
50#include <sys/reboot.h>
51#include <sys/psw.h>
52#include <sys/x86_archext.h>
53
54#if defined(__lint)
55#include <sys/types.h>
56#include <sys/systm.h>
57#include <sys/thread.h>
58#include <sys/archsystm.h>
59#include <sys/byteorder.h>
60#include <sys/dtrace.h>
61#else	/* __lint */
62#include "assym.h"
63#endif	/* __lint */
64#include <sys/dditypes.h>
65
66/*
67 * on_fault()
68 * Catch lofault faults. Like setjmp except it returns one
69 * if code following causes uncorrectable fault. Turned off
70 * by calling no_fault().
71 */
72
73#if defined(__lint)
74
75/* ARGSUSED */
76int
77on_fault(label_t *ljb)
78{ return (0); }
79
80void
81no_fault(void)
82{}
83
84#else	/* __lint */
85
86#if defined(__amd64)
87
88	ENTRY(on_fault)
89	movq	%gs:CPU_THREAD, %rsi
90	leaq	catch_fault(%rip), %rdx
91	movq	%rdi, T_ONFAULT(%rsi)		/* jumpbuf in t_onfault */
92	movq	%rdx, T_LOFAULT(%rsi)		/* catch_fault in t_lofault */
93	jmp	setjmp				/* let setjmp do the rest */
94
95catch_fault:
96	movq	%gs:CPU_THREAD, %rsi
97	movq	T_ONFAULT(%rsi), %rdi		/* address of save area */
98	xorl	%eax, %eax
99	movq	%rax, T_ONFAULT(%rsi)		/* turn off onfault */
100	movq	%rax, T_LOFAULT(%rsi)		/* turn off lofault */
101	jmp	longjmp				/* let longjmp do the rest */
102	SET_SIZE(on_fault)
103
104	ENTRY(no_fault)
105	movq	%gs:CPU_THREAD, %rsi
106	xorl	%eax, %eax
107	movq	%rax, T_ONFAULT(%rsi)		/* turn off onfault */
108	movq	%rax, T_LOFAULT(%rsi)		/* turn off lofault */
109	ret
110	SET_SIZE(no_fault)
111
112#elif defined(__i386)
113
114	ENTRY(on_fault)
115	movl	%gs:CPU_THREAD, %edx
116	movl	4(%esp), %eax			/* jumpbuf address */
117	leal	catch_fault, %ecx
118	movl	%eax, T_ONFAULT(%edx)		/* jumpbuf in t_onfault */
119	movl	%ecx, T_LOFAULT(%edx)		/* catch_fault in t_lofault */
120	jmp	setjmp				/* let setjmp do the rest */
121
122catch_fault:
123	movl	%gs:CPU_THREAD, %edx
124	xorl	%eax, %eax
125	movl	T_ONFAULT(%edx), %ecx		/* address of save area */
126	movl	%eax, T_ONFAULT(%edx)		/* turn off onfault */
127	movl	%eax, T_LOFAULT(%edx)		/* turn off lofault */
128	pushl	%ecx
129	call	longjmp				/* let longjmp do the rest */
130	SET_SIZE(on_fault)
131
132	ENTRY(no_fault)
133	movl	%gs:CPU_THREAD, %edx
134	xorl	%eax, %eax
135	movl	%eax, T_ONFAULT(%edx)		/* turn off onfault */
136	movl	%eax, T_LOFAULT(%edx)		/* turn off lofault */
137	ret
138	SET_SIZE(no_fault)
139
140#endif	/* __i386 */
141#endif	/* __lint */
142
143/*
144 * Default trampoline code for on_trap() (see <sys/ontrap.h>).  We just
145 * do a longjmp(&curthread->t_ontrap->ot_jmpbuf) if this is ever called.
146 */
147
148#if defined(lint)
149
150void
151on_trap_trampoline(void)
152{}
153
154#else	/* __lint */
155
156#if defined(__amd64)
157
158	ENTRY(on_trap_trampoline)
159	movq	%gs:CPU_THREAD, %rsi
160	movq	T_ONTRAP(%rsi), %rdi
161	addq	$OT_JMPBUF, %rdi
162	jmp	longjmp
163	SET_SIZE(on_trap_trampoline)
164
165#elif defined(__i386)
166
167	ENTRY(on_trap_trampoline)
168	movl	%gs:CPU_THREAD, %eax
169	movl	T_ONTRAP(%eax), %eax
170	addl	$OT_JMPBUF, %eax
171	pushl	%eax
172	call	longjmp
173	SET_SIZE(on_trap_trampoline)
174
175#endif	/* __i386 */
176#endif	/* __lint */
177
178/*
179 * Push a new element on to the t_ontrap stack.  Refer to <sys/ontrap.h> for
180 * more information about the on_trap() mechanism.  If the on_trap_data is the
181 * same as the topmost stack element, we just modify that element.
182 */
183#if defined(lint)
184
185/*ARGSUSED*/
186int
187on_trap(on_trap_data_t *otp, uint_t prot)
188{ return (0); }
189
190#else	/* __lint */
191
192#if defined(__amd64)
193
194	ENTRY(on_trap)
195	movw	%si, OT_PROT(%rdi)		/* ot_prot = prot */
196	movw	$0, OT_TRAP(%rdi)		/* ot_trap = 0 */
197	leaq	on_trap_trampoline(%rip), %rdx	/* rdx = &on_trap_trampoline */
198	movq	%rdx, OT_TRAMPOLINE(%rdi)	/* ot_trampoline = rdx */
199	xorl	%ecx, %ecx
200	movq	%rcx, OT_HANDLE(%rdi)		/* ot_handle = NULL */
201	movq	%rcx, OT_PAD1(%rdi)		/* ot_pad1 = NULL */
202	movq	%gs:CPU_THREAD, %rdx		/* rdx = curthread */
203	movq	T_ONTRAP(%rdx), %rcx		/* rcx = curthread->t_ontrap */
204	cmpq	%rdi, %rcx			/* if (otp == %rcx)	*/
205	je	0f				/*	don't modify t_ontrap */
206
207	movq	%rcx, OT_PREV(%rdi)		/* ot_prev = t_ontrap */
208	movq	%rdi, T_ONTRAP(%rdx)		/* curthread->t_ontrap = otp */
209
2100:	addq	$OT_JMPBUF, %rdi		/* &ot_jmpbuf */
211	jmp	setjmp
212	SET_SIZE(on_trap)
213
214#elif defined(__i386)
215
216	ENTRY(on_trap)
217	movl	4(%esp), %eax			/* %eax = otp */
218	movl	8(%esp), %edx			/* %edx = prot */
219
220	movw	%dx, OT_PROT(%eax)		/* ot_prot = prot */
221	movw	$0, OT_TRAP(%eax)		/* ot_trap = 0 */
222	leal	on_trap_trampoline, %edx	/* %edx = &on_trap_trampoline */
223	movl	%edx, OT_TRAMPOLINE(%eax)	/* ot_trampoline = %edx */
224	movl	$0, OT_HANDLE(%eax)		/* ot_handle = NULL */
225	movl	$0, OT_PAD1(%eax)		/* ot_pad1 = NULL */
226	movl	%gs:CPU_THREAD, %edx		/* %edx = curthread */
227	movl	T_ONTRAP(%edx), %ecx		/* %ecx = curthread->t_ontrap */
228	cmpl	%eax, %ecx			/* if (otp == %ecx) */
229	je	0f				/*    don't modify t_ontrap */
230
231	movl	%ecx, OT_PREV(%eax)		/* ot_prev = t_ontrap */
232	movl	%eax, T_ONTRAP(%edx)		/* curthread->t_ontrap = otp */
233
2340:	addl	$OT_JMPBUF, %eax		/* %eax = &ot_jmpbuf */
235	movl	%eax, 4(%esp)			/* put %eax back on the stack */
236	jmp	setjmp				/* let setjmp do the rest */
237	SET_SIZE(on_trap)
238
239#endif	/* __i386 */
240#endif	/* __lint */
241
242/*
243 * Setjmp and longjmp implement non-local gotos using state vectors
244 * type label_t.
245 */
246
247#if defined(__lint)
248
249/* ARGSUSED */
250int
251setjmp(label_t *lp)
252{ return (0); }
253
254/* ARGSUSED */
255void
256longjmp(label_t *lp)
257{}
258
259#else	/* __lint */
260
261#if LABEL_PC != 0
262#error LABEL_PC MUST be defined as 0 for setjmp/longjmp to work as coded
263#endif	/* LABEL_PC != 0 */
264
265#if defined(__amd64)
266
267	ENTRY(setjmp)
268	movq	%rsp, LABEL_SP(%rdi)
269	movq	%rbp, LABEL_RBP(%rdi)
270	movq	%rbx, LABEL_RBX(%rdi)
271	movq	%r12, LABEL_R12(%rdi)
272	movq	%r13, LABEL_R13(%rdi)
273	movq	%r14, LABEL_R14(%rdi)
274	movq	%r15, LABEL_R15(%rdi)
275	movq	(%rsp), %rdx		/* return address */
276	movq	%rdx, (%rdi)		/* LABEL_PC is 0 */
277	xorl	%eax, %eax		/* return 0 */
278	ret
279	SET_SIZE(setjmp)
280
281	ENTRY(longjmp)
282	movq	LABEL_SP(%rdi), %rsp
283	movq	LABEL_RBP(%rdi), %rbp
284	movq	LABEL_RBX(%rdi), %rbx
285	movq	LABEL_R12(%rdi), %r12
286	movq	LABEL_R13(%rdi), %r13
287	movq	LABEL_R14(%rdi), %r14
288	movq	LABEL_R15(%rdi), %r15
289	movq	(%rdi), %rdx		/* return address; LABEL_PC is 0 */
290	movq	%rdx, (%rsp)
291	xorl	%eax, %eax
292	incl	%eax			/* return 1 */
293	ret
294	SET_SIZE(longjmp)
295
296#elif defined(__i386)
297
298	ENTRY(setjmp)
299	movl	4(%esp), %edx		/* address of save area */
300	movl	%ebp, LABEL_EBP(%edx)
301	movl	%ebx, LABEL_EBX(%edx)
302	movl	%esi, LABEL_ESI(%edx)
303	movl	%edi, LABEL_EDI(%edx)
304	movl	%esp, 4(%edx)
305	movl	(%esp), %ecx		/* %eip (return address) */
306	movl	%ecx, (%edx)		/* LABEL_PC is 0 */
307	subl	%eax, %eax		/* return 0 */
308	ret
309	SET_SIZE(setjmp)
310
311	ENTRY(longjmp)
312	movl	4(%esp), %edx		/* address of save area */
313	movl	LABEL_EBP(%edx), %ebp
314	movl	LABEL_EBX(%edx), %ebx
315	movl	LABEL_ESI(%edx), %esi
316	movl	LABEL_EDI(%edx), %edi
317	movl	4(%edx), %esp
318	movl	(%edx), %ecx		/* %eip (return addr); LABEL_PC is 0 */
319	movl	$1, %eax
320	addl	$4, %esp		/* pop ret adr */
321	jmp	*%ecx			/* indirect */
322	SET_SIZE(longjmp)
323
324#endif	/* __i386 */
325#endif	/* __lint */
326
327/*
328 * if a() calls b() calls caller(),
329 * caller() returns return address in a().
330 * (Note: We assume a() and b() are C routines which do the normal entry/exit
331 *  sequence.)
332 */
333
334#if defined(__lint)
335
336caddr_t
337caller(void)
338{ return (0); }
339
340#else	/* __lint */
341
342#if defined(__amd64)
343
344	ENTRY(caller)
345	movq	8(%rbp), %rax		/* b()'s return pc, in a() */
346	ret
347	SET_SIZE(caller)
348
349#elif defined(__i386)
350
351	ENTRY(caller)
352	movl	4(%ebp), %eax		/* b()'s return pc, in a() */
353	ret
354	SET_SIZE(caller)
355
356#endif	/* __i386 */
357#endif	/* __lint */
358
359/*
360 * if a() calls callee(), callee() returns the
361 * return address in a();
362 */
363
364#if defined(__lint)
365
366caddr_t
367callee(void)
368{ return (0); }
369
370#else	/* __lint */
371
372#if defined(__amd64)
373
374	ENTRY(callee)
375	movq	(%rsp), %rax		/* callee()'s return pc, in a() */
376	ret
377	SET_SIZE(callee)
378
379#elif defined(__i386)
380
381	ENTRY(callee)
382	movl	(%esp), %eax		/* callee()'s return pc, in a() */
383	ret
384	SET_SIZE(callee)
385
386#endif	/* __i386 */
387#endif	/* __lint */
388
389/*
390 * return the current frame pointer
391 */
392
393#if defined(__lint)
394
395greg_t
396getfp(void)
397{ return (0); }
398
399#else	/* __lint */
400
401#if defined(__amd64)
402
403	ENTRY(getfp)
404	movq	%rbp, %rax
405	ret
406	SET_SIZE(getfp)
407
408#elif defined(__i386)
409
410	ENTRY(getfp)
411	movl	%ebp, %eax
412	ret
413	SET_SIZE(getfp)
414
415#endif	/* __i386 */
416#endif	/* __lint */
417
418/*
419 * Invalidate a single page table entry in the TLB
420 */
421
422#if defined(__lint)
423
424/* ARGSUSED */
425void
426mmu_tlbflush_entry(caddr_t m)
427{}
428
429#else	/* __lint */
430
431#if defined(__amd64)
432
433	ENTRY(mmu_tlbflush_entry)
434	invlpg	(%rdi)
435	ret
436	SET_SIZE(mmu_tlbflush_entry)
437
438#elif defined(__i386)
439
440	ENTRY(mmu_tlbflush_entry)
441	movl	4(%esp), %eax
442	invlpg	(%eax)
443	ret
444	SET_SIZE(mmu_tlbflush_entry)
445
446#endif	/* __i386 */
447#endif	/* __lint */
448
449
450/*
451 * Get/Set the value of various control registers
452 */
453
454#if defined(__lint)
455
456ulong_t
457getcr0(void)
458{ return (0); }
459
460/* ARGSUSED */
461void
462setcr0(ulong_t value)
463{}
464
465ulong_t
466getcr2(void)
467{ return (0); }
468
469ulong_t
470getcr3(void)
471{ return (0); }
472
473/* ARGSUSED */
474void
475setcr3(ulong_t val)
476{}
477
478void
479reload_cr3(void)
480{}
481
482ulong_t
483getcr4(void)
484{ return (0); }
485
486/* ARGSUSED */
487void
488setcr4(ulong_t val)
489{}
490
491#if defined(__amd64)
492
493ulong_t
494getcr8(void)
495{ return (0); }
496
497/* ARGSUSED */
498void
499setcr8(ulong_t val)
500{}
501
502#endif	/* __amd64 */
503
504#else	/* __lint */
505
506#if defined(__amd64)
507
508	ENTRY(getcr0)
509	movq	%cr0, %rax
510	ret
511	SET_SIZE(getcr0)
512
513	ENTRY(setcr0)
514	movq	%rdi, %cr0
515	ret
516	SET_SIZE(setcr0)
517
518	ENTRY(getcr2)
519	movq	%cr2, %rax
520	ret
521	SET_SIZE(getcr2)
522
523	ENTRY(getcr3)
524	movq	%cr3, %rax
525	ret
526	SET_SIZE(getcr3)
527
528	ENTRY(setcr3)
529	movq	%rdi, %cr3
530	ret
531	SET_SIZE(setcr3)
532
533	ENTRY(reload_cr3)
534	movq	%cr3, %rdi
535	movq	%rdi, %cr3
536	ret
537	SET_SIZE(reload_cr3)
538
539	ENTRY(getcr4)
540	movq	%cr4, %rax
541	ret
542	SET_SIZE(getcr4)
543
544	ENTRY(setcr4)
545	movq	%rdi, %cr4
546	ret
547	SET_SIZE(setcr4)
548
549	ENTRY(getcr8)
550	movq	%cr8, %rax
551	ret
552	SET_SIZE(getcr8)
553
554	ENTRY(setcr8)
555	movq	%rdi, %cr8
556	ret
557	SET_SIZE(setcr8)
558
559#elif defined(__i386)
560
561        ENTRY(getcr0)
562        movl    %cr0, %eax
563        ret
564	SET_SIZE(getcr0)
565
566        ENTRY(setcr0)
567        movl    4(%esp), %eax
568        movl    %eax, %cr0
569        ret
570	SET_SIZE(setcr0)
571
572        ENTRY(getcr2)
573        movl    %cr2, %eax
574        ret
575	SET_SIZE(getcr2)
576
577	ENTRY(getcr3)
578	movl    %cr3, %eax
579	ret
580	SET_SIZE(getcr3)
581
582        ENTRY(setcr3)
583        movl    4(%esp), %eax
584        movl    %eax, %cr3
585        ret
586	SET_SIZE(setcr3)
587
588	ENTRY(reload_cr3)
589	movl    %cr3, %eax
590	movl    %eax, %cr3
591	ret
592	SET_SIZE(reload_cr3)
593
594	ENTRY(getcr4)
595	movl    %cr4, %eax
596	ret
597	SET_SIZE(getcr4)
598
599        ENTRY(setcr4)
600        movl    4(%esp), %eax
601        movl    %eax, %cr4
602        ret
603	SET_SIZE(setcr4)
604
605#endif	/* __i386 */
606#endif	/* __lint */
607
608#if defined(__lint)
609
610/*ARGSUSED*/
611uint32_t
612__cpuid_insn(struct cpuid_regs *regs)
613{ return (0); }
614
615#else	/* __lint */
616
617#if defined(__amd64)
618
619	ENTRY(__cpuid_insn)
620	movq	%rbx, %r8
621	movq	%rcx, %r9
622	movq	%rdx, %r11
623	movl	(%rdi), %eax		/* %eax = regs->cp_eax */
624	movl	0x4(%rdi), %ebx		/* %ebx = regs->cp_ebx */
625	movl	0x8(%rdi), %ecx		/* %ecx = regs->cp_ecx */
626	movl	0xc(%rdi), %edx		/* %edx = regs->cp_edx */
627	cpuid
628	movl	%eax, (%rdi)		/* regs->cp_eax = %eax */
629	movl	%ebx, 0x4(%rdi)		/* regs->cp_ebx = %ebx */
630	movl	%ecx, 0x8(%rdi)		/* regs->cp_ecx = %ecx */
631	movl	%edx, 0xc(%rdi)		/* regs->cp_edx = %edx */
632	movq	%r8, %rbx
633	movq	%r9, %rcx
634	movq	%r11, %rdx
635	ret
636	SET_SIZE(__cpuid_insn)
637
638#elif defined(__i386)
639
640        ENTRY(__cpuid_insn)
641	pushl	%ebp
642	movl	0x8(%esp), %ebp		/* %ebp = regs */
643	pushl	%ebx
644	pushl	%ecx
645	pushl	%edx
646	movl	(%ebp), %eax		/* %eax = regs->cp_eax */
647	movl	0x4(%ebp), %ebx		/* %ebx = regs->cp_ebx */
648	movl	0x8(%ebp), %ecx		/* %ecx = regs->cp_ecx */
649	movl	0xc(%ebp), %edx		/* %edx = regs->cp_edx */
650	cpuid
651	movl	%eax, (%ebp)		/* regs->cp_eax = %eax */
652	movl	%ebx, 0x4(%ebp)		/* regs->cp_ebx = %ebx */
653	movl	%ecx, 0x8(%ebp)		/* regs->cp_ecx = %ecx */
654	movl	%edx, 0xc(%ebp)		/* regs->cp_edx = %edx */
655	popl	%edx
656	popl	%ecx
657	popl	%ebx
658	popl	%ebp
659	ret
660	SET_SIZE(__cpuid_insn)
661
662#endif	/* __i386 */
663#endif	/* __lint */
664
665/*
666 * Insert entryp after predp in a doubly linked list.
667 */
668
669#if defined(__lint)
670
671/*ARGSUSED*/
672void
673_insque(caddr_t entryp, caddr_t predp)
674{}
675
676#else	/* __lint */
677
678#if defined(__amd64)
679
680	ENTRY(_insque)
681	movq	(%rsi), %rax		/* predp->forw 			*/
682	movq	%rsi, CPTRSIZE(%rdi)	/* entryp->back = predp		*/
683	movq	%rax, (%rdi)		/* entryp->forw = predp->forw	*/
684	movq	%rdi, (%rsi)		/* predp->forw = entryp		*/
685	movq	%rdi, CPTRSIZE(%rax)	/* predp->forw->back = entryp	*/
686	ret
687	SET_SIZE(_insque)
688
689#elif defined(__i386)
690
691	ENTRY(_insque)
692	movl	8(%esp), %edx
693	movl	4(%esp), %ecx
694	movl	(%edx), %eax		/* predp->forw			*/
695	movl	%edx, CPTRSIZE(%ecx)	/* entryp->back = predp		*/
696	movl	%eax, (%ecx)		/* entryp->forw = predp->forw	*/
697	movl	%ecx, (%edx)		/* predp->forw = entryp		*/
698	movl	%ecx, CPTRSIZE(%eax)	/* predp->forw->back = entryp	*/
699	ret
700	SET_SIZE(_insque)
701
702#endif	/* __i386 */
703#endif	/* __lint */
704
705/*
706 * Remove entryp from a doubly linked list
707 */
708
709#if defined(__lint)
710
711/*ARGSUSED*/
712void
713_remque(caddr_t entryp)
714{}
715
716#else	/* __lint */
717
718#if defined(__amd64)
719
720	ENTRY(_remque)
721	movq	(%rdi), %rax		/* entry->forw */
722	movq	CPTRSIZE(%rdi), %rdx	/* entry->back */
723	movq	%rax, (%rdx)		/* entry->back->forw = entry->forw */
724	movq	%rdx, CPTRSIZE(%rax)	/* entry->forw->back = entry->back */
725	ret
726	SET_SIZE(_remque)
727
728#elif defined(__i386)
729
730	ENTRY(_remque)
731	movl	4(%esp), %ecx
732	movl	(%ecx), %eax		/* entry->forw */
733	movl	CPTRSIZE(%ecx), %edx	/* entry->back */
734	movl	%eax, (%edx)		/* entry->back->forw = entry->forw */
735	movl	%edx, CPTRSIZE(%eax)	/* entry->forw->back = entry->back */
736	ret
737	SET_SIZE(_remque)
738
739#endif	/* __i386 */
740#endif	/* __lint */
741
742/*
743 * Returns the number of
744 * non-NULL bytes in string argument.
745 */
746
747#if defined(__lint)
748
749/* ARGSUSED */
750size_t
751strlen(const char *str)
752{ return (0); }
753
754#else	/* __lint */
755
756#if defined(__amd64)
757
758/*
759 * This is close to a simple transliteration of a C version of this
760 * routine.  We should either just -make- this be a C version, or
761 * justify having it in assembler by making it significantly faster.
762 *
763 * size_t
764 * strlen(const char *s)
765 * {
766 *	const char *s0;
767 * #if defined(DEBUG)
768 *	if ((uintptr_t)s < KERNELBASE)
769 *		panic(.str_panic_msg);
770 * #endif
771 *	for (s0 = s; *s; s++)
772 *		;
773 *	return (s - s0);
774 * }
775 */
776
777	ENTRY(strlen)
778#ifdef DEBUG
779	movq	kernelbase(%rip), %rax
780	cmpq	%rax, %rdi
781	jae	str_valid
782	pushq	%rbp
783	movq	%rsp, %rbp
784	leaq	.str_panic_msg(%rip), %rdi
785	xorl	%eax, %eax
786	call	panic
787#endif	/* DEBUG */
788str_valid:
789	cmpb	$0, (%rdi)
790	movq	%rdi, %rax
791	je	.null_found
792	.align	4
793.strlen_loop:
794	incq	%rdi
795	cmpb	$0, (%rdi)
796	jne	.strlen_loop
797.null_found:
798	subq	%rax, %rdi
799	movq	%rdi, %rax
800	ret
801	SET_SIZE(strlen)
802
803#elif defined(__i386)
804
805	ENTRY(strlen)
806#ifdef DEBUG
807	movl	kernelbase, %eax
808	cmpl	%eax, 4(%esp)
809	jae	str_valid
810	pushl	%ebp
811	movl	%esp, %ebp
812	pushl	$.str_panic_msg
813	call	panic
814#endif /* DEBUG */
815
816str_valid:
817	movl	4(%esp), %eax		/* %eax = string address */
818	testl	$3, %eax		/* if %eax not word aligned */
819	jnz	.not_word_aligned	/* goto .not_word_aligned */
820	.align	4
821.word_aligned:
822	movl	(%eax), %edx		/* move 1 word from (%eax) to %edx */
823	movl	$0x7f7f7f7f, %ecx
824	andl	%edx, %ecx		/* %ecx = %edx & 0x7f7f7f7f */
825	addl	$4, %eax		/* next word */
826	addl	$0x7f7f7f7f, %ecx	/* %ecx += 0x7f7f7f7f */
827	orl	%edx, %ecx		/* %ecx |= %edx */
828	andl	$0x80808080, %ecx	/* %ecx &= 0x80808080 */
829	cmpl	$0x80808080, %ecx	/* if no null byte in this word */
830	je	.word_aligned		/* goto .word_aligned */
831	subl	$4, %eax		/* post-incremented */
832.not_word_aligned:
833	cmpb	$0, (%eax)		/* if a byte in (%eax) is null */
834	je	.null_found		/* goto .null_found */
835	incl	%eax			/* next byte */
836	testl	$3, %eax		/* if %eax not word aligned */
837	jnz	.not_word_aligned	/* goto .not_word_aligned */
838	jmp	.word_aligned		/* goto .word_aligned */
839	.align	4
840.null_found:
841	subl	4(%esp), %eax		/* %eax -= string address */
842	ret
843	SET_SIZE(strlen)
844
845#endif	/* __i386 */
846
847#ifdef DEBUG
848	.text
849.str_panic_msg:
850	.string "strlen: argument below kernelbase"
851#endif /* DEBUG */
852
853#endif	/* __lint */
854
855	/*
856	 * Berkley 4.3 introduced symbolically named interrupt levels
857	 * as a way deal with priority in a machine independent fashion.
858	 * Numbered priorities are machine specific, and should be
859	 * discouraged where possible.
860	 *
861	 * Note, for the machine specific priorities there are
862	 * examples listed for devices that use a particular priority.
863	 * It should not be construed that all devices of that
864	 * type should be at that priority.  It is currently were
865	 * the current devices fit into the priority scheme based
866	 * upon time criticalness.
867	 *
868	 * The underlying assumption of these assignments is that
869	 * IPL 10 is the highest level from which a device
870	 * routine can call wakeup.  Devices that interrupt from higher
871	 * levels are restricted in what they can do.  If they need
872	 * kernels services they should schedule a routine at a lower
873	 * level (via software interrupt) to do the required
874	 * processing.
875	 *
876	 * Examples of this higher usage:
877	 *	Level	Usage
878	 *	14	Profiling clock (and PROM uart polling clock)
879	 *	12	Serial ports
880	 *
881	 * The serial ports request lower level processing on level 6.
882	 *
883	 * Also, almost all splN routines (where N is a number or a
884	 * mnemonic) will do a RAISE(), on the assumption that they are
885	 * never used to lower our priority.
886	 * The exceptions are:
887	 *	spl8()		Because you can't be above 15 to begin with!
888	 *	splzs()		Because this is used at boot time to lower our
889	 *			priority, to allow the PROM to poll the uart.
890	 *	spl0()		Used to lower priority to 0.
891	 */
892
893#if defined(__lint)
894
895int spl0(void)		{ return (0); }
896int spl6(void)		{ return (0); }
897int spl7(void)		{ return (0); }
898int spl8(void)		{ return (0); }
899int splhigh(void)	{ return (0); }
900int splhi(void)		{ return (0); }
901int splzs(void)		{ return (0); }
902
903#else	/* __lint */
904
905/* reg = cpu->cpu_m.cpu_pri; */
906#define	GETIPL_NOGS(reg, cpup)	\
907	movl	CPU_PRI(cpup), reg;
908
909/* cpu->cpu_m.cpu_pri; */
910#define	SETIPL_NOGS(val, cpup)	\
911	movl	val, CPU_PRI(cpup);
912
913/* reg = cpu->cpu_m.cpu_pri; */
914#define	GETIPL(reg)	\
915	movl	%gs:CPU_PRI, reg;
916
917/* cpu->cpu_m.cpu_pri; */
918#define	SETIPL(val)	\
919	movl	val, %gs:CPU_PRI;
920
921/*
922 * Macro to raise processor priority level.
923 * Avoid dropping processor priority if already at high level.
924 * Also avoid going below CPU->cpu_base_spl, which could've just been set by
925 * a higher-level interrupt thread that just blocked.
926 */
927#if defined(__amd64)
928
929#define	RAISE(level) \
930	cli;			\
931	LOADCPU(%rcx);		\
932	movl	$/**/level, %edi;\
933	GETIPL_NOGS(%eax, %rcx);\
934	cmpl 	%eax, %edi;	\
935	jg	spl;		\
936	jmp	setsplhisti
937
938#elif defined(__i386)
939
940#define	RAISE(level) \
941	cli;			\
942	LOADCPU(%ecx);		\
943	movl	$/**/level, %edx;\
944	GETIPL_NOGS(%eax, %ecx);\
945	cmpl 	%eax, %edx;	\
946	jg	spl;		\
947	jmp	setsplhisti
948
949#endif	/* __i386 */
950
951/*
952 * Macro to set the priority to a specified level.
953 * Avoid dropping the priority below CPU->cpu_base_spl.
954 */
955#if defined(__amd64)
956
957#define	SETPRI(level) \
958	cli;				\
959	LOADCPU(%rcx);			\
960	movl	$/**/level, %edi;	\
961	jmp	spl
962
963#elif defined(__i386)
964
965#define SETPRI(level) \
966	cli;				\
967	LOADCPU(%ecx);			\
968	movl	$/**/level, %edx;	\
969	jmp	spl
970
971#endif	/* __i386 */
972
973	/* locks out all interrupts, including memory errors */
974	ENTRY(spl8)
975	SETPRI(15)
976	SET_SIZE(spl8)
977
978	/* just below the level that profiling runs */
979	ENTRY(spl7)
980	RAISE(13)
981	SET_SIZE(spl7)
982
983	/* sun specific - highest priority onboard serial i/o asy ports */
984	ENTRY(splzs)
985	SETPRI(12)	/* Can't be a RAISE, as it's used to lower us */
986	SET_SIZE(splzs)
987
988	/*
989	 * should lock out clocks and all interrupts,
990	 * as you can see, there are exceptions
991	 */
992
993#if defined(__amd64)
994
995	.align	16
996	ENTRY(splhi)
997	ALTENTRY(splhigh)
998	ALTENTRY(spl6)
999	ALTENTRY(i_ddi_splhigh)
1000	cli
1001	LOADCPU(%rcx)
1002	movl	$DISP_LEVEL, %edi
1003	movl	CPU_PRI(%rcx), %eax
1004	cmpl	%eax, %edi
1005	jle	setsplhisti
1006	SETIPL_NOGS(%edi, %rcx)
1007	/*
1008	 * If we aren't using cr8 to control ipl then we patch this
1009	 * with a jump to slow_setsplhi
1010	 */
1011	ALTENTRY(setsplhi_patch)
1012	movq	CPU_PRI_DATA(%rcx), %r11 /* get pri data ptr */
1013	movzb	(%r11, %rdi, 1), %rdx	/* get apic mask for this ipl */
1014	movq	%rdx, %cr8		/* set new apic priority */
1015	/*
1016	 * enable interrupts
1017	 */
1018setsplhisti:
1019	nop	/* patch this to a sti when a proper setspl routine appears */
1020	ret
1021
1022	ALTENTRY(slow_setsplhi)
1023	pushq	%rbp
1024	movq	%rsp, %rbp
1025	subq	$16, %rsp
1026	movl	%eax, -4(%rbp)		/* save old ipl */
1027	call	*setspl(%rip)
1028	movl	-4(%rbp), %eax		/* return old ipl */
1029	leave
1030	jmp	setsplhisti
1031
1032	SET_SIZE(i_ddi_splhigh)
1033	SET_SIZE(spl6)
1034	SET_SIZE(splhigh)
1035	SET_SIZE(splhi)
1036
1037#elif defined(__i386)
1038
1039	.align	16
1040	ENTRY(splhi)
1041	ALTENTRY(splhigh)
1042	ALTENTRY(spl6)
1043	ALTENTRY(i_ddi_splhigh)
1044	cli
1045	LOADCPU(%ecx)
1046	movl	$DISP_LEVEL, %edx
1047	movl	CPU_PRI(%ecx), %eax
1048	cmpl	%eax, %edx
1049	jle	setsplhisti
1050	SETIPL_NOGS(%edx, %ecx)		/* set new ipl */
1051
1052	pushl   %eax                    /* save old ipl */
1053	pushl	%edx			/* pass new ipl */
1054	call	*setspl
1055	popl	%ecx			/* dummy pop */
1056	popl    %eax                    /* return old ipl */
1057	/*
1058	 * enable interrupts
1059	 *
1060	 * (we patch this to an sti once a proper setspl routine
1061	 * is installed)
1062	 */
1063setsplhisti:
1064	nop	/* patch this to a sti when a proper setspl routine appears */
1065	ret
1066	SET_SIZE(i_ddi_splhigh)
1067	SET_SIZE(spl6)
1068	SET_SIZE(splhigh)
1069	SET_SIZE(splhi)
1070
1071#endif	/* __i386 */
1072
1073	/* allow all interrupts */
1074	ENTRY(spl0)
1075	SETPRI(0)
1076	SET_SIZE(spl0)
1077
1078#endif	/* __lint */
1079
1080/*
1081 * splr is like splx but will only raise the priority and never drop it
1082 */
1083#if defined(__lint)
1084
1085/* ARGSUSED */
1086int
1087splr(int level)
1088{ return (0); }
1089
1090#else	/* __lint */
1091
1092#if defined(__amd64)
1093
1094	ENTRY(splr)
1095	cli
1096	LOADCPU(%rcx)
1097	GETIPL_NOGS(%eax, %rcx)
1098	cmpl	%eax, %edi		/* if new level > current level */
1099	jg	spl			/* then set ipl to new level */
1100splr_setsti:
1101	nop	/* patch this to a sti when a proper setspl routine appears */
1102	ret				/* else return the current level */
1103	SET_SIZE(splr)
1104
1105#elif defined(__i386)
1106
1107	ENTRY(splr)
1108	cli
1109	LOADCPU(%ecx)
1110	movl	4(%esp), %edx		/* get new spl level */
1111	GETIPL_NOGS(%eax, %ecx)
1112	cmpl 	%eax, %edx		/* if new level > current level */
1113	jg	spl			/* then set ipl to new level */
1114splr_setsti:
1115	nop	/* patch this to a sti when a proper setspl routine appears */
1116	ret				/* else return the current level */
1117	SET_SIZE(splr)
1118
1119#endif	/* __i386 */
1120#endif	/* __lint */
1121
1122
1123
1124/*
1125 * splx - set PIL back to that indicated by the level passed as an argument,
1126 * or to the CPU's base priority, whichever is higher.
1127 * Needs to be fall through to spl to save cycles.
1128 * Algorithm for spl:
1129 *
1130 *      turn off interrupts
1131 *
1132 *	if (CPU->cpu_base_spl > newipl)
1133 *		newipl = CPU->cpu_base_spl;
1134 *      oldipl = CPU->cpu_pridata->c_ipl;
1135 *      CPU->cpu_pridata->c_ipl = newipl;
1136 *
1137 *	/indirectly call function to set spl values (usually setpicmasks)
1138 *      setspl();  // load new masks into pics
1139 *
1140 * Be careful not to set priority lower than CPU->cpu_base_pri,
1141 * even though it seems we're raising the priority, it could be set
1142 * higher at any time by an interrupt routine, so we must block interrupts
1143 * and look at CPU->cpu_base_pri
1144 */
1145#if defined(__lint)
1146
1147/* ARGSUSED */
1148void
1149splx(int level)
1150{}
1151
1152#else	/* __lint */
1153
1154#if defined(__amd64)
1155
1156	ENTRY(splx)
1157	ALTENTRY(i_ddi_splx)
1158	cli				/* disable interrupts */
1159	LOADCPU(%rcx)
1160	/*FALLTHRU*/
1161	.align	4
1162spl:
1163	/*
1164	 * New priority level is in %edi, cpu struct pointer is in %rcx
1165	 */
1166	GETIPL_NOGS(%eax, %rcx)		/* get current ipl */
1167	cmpl   %edi, CPU_BASE_SPL(%rcx) /* if (base spl > new ipl) */
1168	ja     set_to_base_spl		/* then use base_spl */
1169
1170setprilev:
1171	SETIPL_NOGS(%edi, %rcx)		/* set new ipl */
1172	/*
1173	 * If we aren't using cr8 to control ipl then we patch this
1174	 * with a jump to slow_spl
1175	 */
1176	ALTENTRY(spl_patch)
1177	movq	CPU_PRI_DATA(%rcx), %r11 /* get pri data ptr */
1178	movzb	(%r11, %rdi, 1), %rdx	/* get apic mask for this ipl */
1179	movq	%rdx, %cr8		/* set new apic priority */
1180	xorl	%edx, %edx
1181	bsrl	CPU_SOFTINFO(%rcx), %edx /* fls(cpu->cpu_softinfo.st_pending) */
1182	cmpl	%edi, %edx		/* new ipl vs. st_pending */
1183	jle	setsplsti
1184
1185	pushq	%rbp
1186	movq	%rsp, %rbp
1187	/* stack now 16-byte aligned */
1188	pushq	%rax			/* save old spl */
1189	pushq	%rdi			/* save new ipl too */
1190	jmp	fakesoftint
1191
1192setsplsti:
1193	nop	/* patch this to a sti when a proper setspl routine appears */
1194	ret
1195
1196	ALTENTRY(slow_spl)
1197	pushq	%rbp
1198	movq	%rsp, %rbp
1199	/* stack now 16-byte aligned */
1200
1201	pushq	%rax			/* save old spl */
1202	pushq	%rdi			/* save new ipl too */
1203
1204	call	*setspl(%rip)
1205
1206	LOADCPU(%rcx)
1207	movl	CPU_SOFTINFO(%rcx), %eax
1208	orl	%eax, %eax
1209	jz	slow_setsplsti
1210
1211	bsrl	%eax, %edx		/* fls(cpu->cpu_softinfo.st_pending) */
1212	cmpl	0(%rsp), %edx		/* new ipl vs. st_pending */
1213	jg	fakesoftint
1214
1215	ALTENTRY(fakesoftint_return)
1216	/*
1217	 * enable interrupts
1218	 */
1219slow_setsplsti:
1220	nop	/* patch this to a sti when a proper setspl routine appears */
1221	popq	%rdi
1222	popq	%rax			/* return old ipl */
1223	leave
1224	ret
1225	SET_SIZE(fakesoftint_return)
1226
1227set_to_base_spl:
1228	movl	CPU_BASE_SPL(%rcx), %edi
1229	jmp	setprilev
1230	SET_SIZE(spl)
1231	SET_SIZE(i_ddi_splx)
1232	SET_SIZE(splx)
1233
1234#elif defined(__i386)
1235
1236	ENTRY(splx)
1237	ALTENTRY(i_ddi_splx)
1238	cli                             /* disable interrupts */
1239	LOADCPU(%ecx)
1240	movl	4(%esp), %edx		/* get new spl level */
1241	/*FALLTHRU*/
1242
1243	.align	4
1244	ALTENTRY(spl)
1245	/*
1246	 * New priority level is in %edx
1247	 * (doing this early to avoid an AGI in the next instruction)
1248	 */
1249	GETIPL_NOGS(%eax, %ecx)		/* get current ipl */
1250	cmpl	%edx, CPU_BASE_SPL(%ecx) /* if ( base spl > new ipl) */
1251	ja	set_to_base_spl		/* then use base_spl */
1252
1253setprilev:
1254	SETIPL_NOGS(%edx, %ecx)		/* set new ipl */
1255
1256	pushl   %eax                    /* save old ipl */
1257	pushl	%edx			/* pass new ipl */
1258	call	*setspl
1259
1260	LOADCPU(%ecx)
1261	movl	CPU_SOFTINFO(%ecx), %eax
1262	orl	%eax, %eax
1263	jz	setsplsti
1264
1265	/*
1266	 * Before dashing off, check that setsplsti has been patched.
1267	 */
1268	cmpl	$NOP_INSTR, setsplsti
1269	je	setsplsti
1270
1271	bsrl	%eax, %edx
1272	cmpl	0(%esp), %edx
1273	jg	fakesoftint
1274
1275	ALTENTRY(fakesoftint_return)
1276	/*
1277	 * enable interrupts
1278	 */
1279setsplsti:
1280	nop	/* patch this to a sti when a proper setspl routine appears */
1281	popl	%eax
1282	popl    %eax			/ return old ipl
1283	ret
1284	SET_SIZE(fakesoftint_return)
1285
1286set_to_base_spl:
1287	movl	CPU_BASE_SPL(%ecx), %edx
1288	jmp	setprilev
1289	SET_SIZE(spl)
1290	SET_SIZE(i_ddi_splx)
1291	SET_SIZE(splx)
1292
1293#endif	/* __i386 */
1294#endif	/* __lint */
1295
1296#if defined(__lint)
1297
1298void
1299install_spl(void)
1300{}
1301
1302#else	/* __lint */
1303
1304#if defined(__amd64)
1305
1306	ENTRY_NP(install_spl)
1307	movq	%cr0, %rax
1308	movq	%rax, %rdx
1309	movl	$_BITNOT(CR0_WP), %ecx
1310	movslq	%ecx, %rcx
1311	andq	%rcx, %rax		/* we don't want to take a fault */
1312	movq	%rax, %cr0
1313	jmp	1f
13141:	movb	$STI_INSTR, setsplsti(%rip)
1315	movb	$STI_INSTR, slow_setsplsti(%rip)
1316	movb	$STI_INSTR, setsplhisti(%rip)
1317	movb	$STI_INSTR, splr_setsti(%rip)
1318	testl	$1, intpri_use_cr8(%rip)	/* are using %cr8 ? */
1319	jz	2f				/* no, go patch more */
1320	movq	%rdx, %cr0
1321	ret
13222:
1323	/*
1324	 * Patch spl functions to use slow spl method
1325	 */
1326	leaq	setsplhi_patch(%rip), %rdi	/* get patch point addr */
1327	leaq	slow_setsplhi(%rip), %rax	/* jmp target */
1328	subq	%rdi, %rax			/* calculate jmp distance */
1329	subq	$2, %rax			/* minus size of jmp instr */
1330	shlq	$8, %rax			/* construct jmp instr */
1331	addq	$JMP_INSTR, %rax
1332	movw	%ax, setsplhi_patch(%rip)	/* patch in the jmp */
1333	leaq	spl_patch(%rip), %rdi		/* get patch point addr */
1334	leaq	slow_spl(%rip), %rax		/* jmp target */
1335	subq	%rdi, %rax			/* calculate jmp distance */
1336	subq	$2, %rax			/* minus size of jmp instr */
1337	shlq	$8, %rax			/* construct jmp instr */
1338	addq	$JMP_INSTR, %rax
1339	movw	%ax, spl_patch(%rip)		/* patch in the jmp */
1340	/*
1341	 * Ensure %cr8 is zero since we aren't using it
1342	 */
1343	xorl	%eax, %eax
1344	movq	%rax, %cr8
1345	movq	%rdx, %cr0
1346	ret
1347	SET_SIZE(install_spl)
1348
1349#elif defined(__i386)
1350
1351	ENTRY_NP(install_spl)
1352	movl	%cr0, %eax
1353	movl	%eax, %edx
1354	andl	$_BITNOT(CR0_WP), %eax	/* we don't want to take a fault */
1355	movl	%eax, %cr0
1356	jmp	1f
13571:	movb	$STI_INSTR, setsplsti
1358	movb	$STI_INSTR, setsplhisti
1359	movb	$STI_INSTR, splr_setsti
1360	movl	%edx, %cr0
1361	ret
1362	SET_SIZE(install_spl)
1363
1364#endif	/* __i386 */
1365#endif	/* __lint */
1366
1367
1368/*
1369 * Get current processor interrupt level
1370 */
1371
1372#if defined(__lint)
1373
1374int
1375getpil(void)
1376{ return (0); }
1377
1378#else	/* __lint */
1379
1380#if defined(__amd64)
1381
1382	ENTRY(getpil)
1383	GETIPL(%eax)			/* priority level into %eax */
1384	ret
1385	SET_SIZE(getpil)
1386
1387#elif defined(__i386)
1388
1389	ENTRY(getpil)
1390	GETIPL(%eax)			/* priority level into %eax */
1391	ret
1392	SET_SIZE(getpil)
1393
1394#endif	/* __i386 */
1395#endif	/* __lint */
1396
1397#if defined(__i386)
1398
1399/*
1400 * Read and write the %gs register
1401 */
1402
1403#if defined(__lint)
1404
1405/*ARGSUSED*/
1406uint16_t
1407getgs(void)
1408{ return (0); }
1409
1410/*ARGSUSED*/
1411void
1412setgs(uint16_t sel)
1413{}
1414
1415#else	/* __lint */
1416
1417	ENTRY(getgs)
1418	clr	%eax
1419	movw	%gs, %ax
1420	ret
1421	SET_SIZE(getgs)
1422
1423	ENTRY(setgs)
1424	movw	4(%esp), %gs
1425	ret
1426	SET_SIZE(setgs)
1427
1428#endif	/* __lint */
1429#endif	/* __i386 */
1430
1431#if defined(__lint)
1432
1433void
1434pc_reset(void)
1435{}
1436
1437void
1438efi_reset(void)
1439{}
1440
1441#else	/* __lint */
1442
1443	ENTRY(wait_500ms)
1444	push	%ebx
1445	movl	$50000, %ebx
14461:
1447	call	tenmicrosec
1448	decl	%ebx
1449	jnz	1b
1450	pop	%ebx
1451	ret
1452	SET_SIZE(wait_500ms)
1453
1454#define	RESET_METHOD_KBC	1
1455#define	RESET_METHOD_PORT92	2
1456#define RESET_METHOD_PCI	4
1457
1458	DGDEF3(pc_reset_methods, 4, 8)
1459	.long RESET_METHOD_KBC|RESET_METHOD_PORT92|RESET_METHOD_PCI;
1460
1461	ENTRY(pc_reset)
1462
1463#if defined(__i386)
1464	testl	$RESET_METHOD_KBC, pc_reset_methods
1465#elif defined(__amd64)
1466	testl	$RESET_METHOD_KBC, pc_reset_methods(%rip)
1467#endif
1468	jz	1f
1469
1470	/
1471	/ Try the classic keyboard controller-triggered reset.
1472	/
1473	movw	$0x64, %dx
1474	movb	$0xfe, %al
1475	outb	(%dx)
1476
1477	/ Wait up to 500 milliseconds here for the keyboard controller
1478	/ to pull the reset line.  On some systems where the keyboard
1479	/ controller is slow to pull the reset line, the next reset method
1480	/ may be executed (which may be bad if those systems hang when the
1481	/ next reset method is used, e.g. Ferrari 3400 (doesn't like port 92),
1482	/ and Ferrari 4000 (doesn't like the cf9 reset method))
1483
1484	call	wait_500ms
1485
14861:
1487#if defined(__i386)
1488	testl	$RESET_METHOD_PORT92, pc_reset_methods
1489#elif defined(__amd64)
1490	testl	$RESET_METHOD_PORT92, pc_reset_methods(%rip)
1491#endif
1492	jz	3f
1493
1494	/
1495	/ Try port 0x92 fast reset
1496	/
1497	movw	$0x92, %dx
1498	inb	(%dx)
1499	cmpb	$0xff, %al	/ If port's not there, we should get back 0xFF
1500	je	1f
1501	testb	$1, %al		/ If bit 0
1502	jz	2f		/ is clear, jump to perform the reset
1503	andb	$0xfe, %al	/ otherwise,
1504	outb	(%dx)		/ clear bit 0 first, then
15052:
1506	orb	$1, %al		/ Set bit 0
1507	outb	(%dx)		/ and reset the system
15081:
1509
1510	call	wait_500ms
1511
15123:
1513#if defined(__i386)
1514	testl	$RESET_METHOD_PCI, pc_reset_methods
1515#elif defined(__amd64)
1516	testl	$RESET_METHOD_PCI, pc_reset_methods(%rip)
1517#endif
1518	jz	4f
1519
1520	/ Try the PCI (soft) reset vector (should work on all modern systems,
1521	/ but has been shown to cause problems on 450NX systems, and some newer
1522	/ systems (e.g. ATI IXP400-equipped systems))
1523	/ When resetting via this method, 2 writes are required.  The first
1524	/ targets bit 1 (0=hard reset without power cycle, 1=hard reset with
1525	/ power cycle).
1526	/ The reset occurs on the second write, during bit 2's transition from
1527	/ 0->1.
1528	movw	$0xcf9, %dx
1529	movb	$0x2, %al	/ Reset mode = hard, no power cycle
1530	outb	(%dx)
1531	movb	$0x6, %al
1532	outb	(%dx)
1533
1534	call	wait_500ms
1535
15364:
1537	/
1538	/ port 0xcf9 failed also.  Last-ditch effort is to
1539	/ triple-fault the CPU.
1540	/ Also, use triple fault for EFI firmware
1541	/
1542	ENTRY(efi_reset)
1543#if defined(__amd64)
1544	pushq	$0x0
1545	pushq	$0x0		/ IDT base of 0, limit of 0 + 2 unused bytes
1546	lidt	(%rsp)
1547#elif defined(__i386)
1548	pushl	$0x0
1549	pushl	$0x0		/ IDT base of 0, limit of 0 + 2 unused bytes
1550	lidt	(%esp)
1551#endif
1552	int	$0x0		/ Trigger interrupt, generate triple-fault
1553
1554	cli
1555	hlt			/ Wait forever
1556	/*NOTREACHED*/
1557	SET_SIZE(efi_reset)
1558	SET_SIZE(pc_reset)
1559
1560#endif	/* __lint */
1561
1562/*
1563 * C callable in and out routines
1564 */
1565
1566#if defined(__lint)
1567
1568/* ARGSUSED */
1569void
1570outl(int port_address, uint32_t val)
1571{}
1572
1573#else	/* __lint */
1574
1575#if defined(__amd64)
1576
1577	ENTRY(outl)
1578	movw	%di, %dx
1579	movl	%esi, %eax
1580	outl	(%dx)
1581	ret
1582	SET_SIZE(outl)
1583
1584#elif defined(__i386)
1585
1586	.set	PORT, 4
1587	.set	VAL, 8
1588
1589	ENTRY(outl)
1590	movw	PORT(%esp), %dx
1591	movl	VAL(%esp), %eax
1592	outl	(%dx)
1593	ret
1594	SET_SIZE(outl)
1595
1596#endif	/* __i386 */
1597#endif	/* __lint */
1598
1599#if defined(__lint)
1600
1601/* ARGSUSED */
1602void
1603outw(int port_address, uint16_t val)
1604{}
1605
1606#else	/* __lint */
1607
1608#if defined(__amd64)
1609
1610	ENTRY(outw)
1611	movw	%di, %dx
1612	movw	%si, %ax
1613	D16 outl (%dx)		/* XX64 why not outw? */
1614	ret
1615	SET_SIZE(outw)
1616
1617#elif defined(__i386)
1618
1619	ENTRY(outw)
1620	movw	PORT(%esp), %dx
1621	movw	VAL(%esp), %ax
1622	D16 outl (%dx)
1623	ret
1624	SET_SIZE(outw)
1625
1626#endif	/* __i386 */
1627#endif	/* __lint */
1628
1629#if defined(__lint)
1630
1631/* ARGSUSED */
1632void
1633outb(int port_address, uint8_t val)
1634{}
1635
1636#else	/* __lint */
1637
1638#if defined(__amd64)
1639
1640	ENTRY(outb)
1641	movw	%di, %dx
1642	movb	%sil, %al
1643	outb	(%dx)
1644	ret
1645	SET_SIZE(outb)
1646
1647#elif defined(__i386)
1648
1649	ENTRY(outb)
1650	movw	PORT(%esp), %dx
1651	movb	VAL(%esp), %al
1652	outb	(%dx)
1653	ret
1654	SET_SIZE(outb)
1655
1656#endif	/* __i386 */
1657#endif	/* __lint */
1658
1659#if defined(__lint)
1660
1661/* ARGSUSED */
1662uint32_t
1663inl(int port_address)
1664{ return (0); }
1665
1666#else	/* __lint */
1667
1668#if defined(__amd64)
1669
1670	ENTRY(inl)
1671	xorl	%eax, %eax
1672	movw	%di, %dx
1673	inl	(%dx)
1674	ret
1675	SET_SIZE(inl)
1676
1677#elif defined(__i386)
1678
1679	ENTRY(inl)
1680	movw	PORT(%esp), %dx
1681	inl	(%dx)
1682	ret
1683	SET_SIZE(inl)
1684
1685#endif	/* __i386 */
1686#endif	/* __lint */
1687
1688#if defined(__lint)
1689
1690/* ARGSUSED */
1691uint16_t
1692inw(int port_address)
1693{ return (0); }
1694
1695#else	/* __lint */
1696
1697#if defined(__amd64)
1698
1699	ENTRY(inw)
1700	xorl	%eax, %eax
1701	movw	%di, %dx
1702	D16 inl	(%dx)
1703	ret
1704	SET_SIZE(inw)
1705
1706#elif defined(__i386)
1707
1708	ENTRY(inw)
1709	subl	%eax, %eax
1710	movw	PORT(%esp), %dx
1711	D16 inl	(%dx)
1712	ret
1713	SET_SIZE(inw)
1714
1715#endif	/* __i386 */
1716#endif	/* __lint */
1717
1718
1719#if defined(__lint)
1720
1721/* ARGSUSED */
1722uint8_t
1723inb(int port_address)
1724{ return (0); }
1725
1726#else	/* __lint */
1727
1728#if defined(__amd64)
1729
1730	ENTRY(inb)
1731	xorl	%eax, %eax
1732	movw	%di, %dx
1733	inb	(%dx)
1734	ret
1735	SET_SIZE(inb)
1736
1737#elif defined(__i386)
1738
1739	ENTRY(inb)
1740	subl    %eax, %eax
1741	movw	PORT(%esp), %dx
1742	inb	(%dx)
1743	ret
1744	SET_SIZE(inb)
1745
1746#endif	/* __i386 */
1747#endif	/* __lint */
1748
1749
1750#if defined(__lint)
1751
1752/* ARGSUSED */
1753void
1754repoutsw(int port, uint16_t *addr, int cnt)
1755{}
1756
1757#else	/* __lint */
1758
1759#if defined(__amd64)
1760
1761	ENTRY(repoutsw)
1762	movl	%edx, %ecx
1763	movw	%di, %dx
1764	rep
1765	  D16 outsl
1766	ret
1767	SET_SIZE(repoutsw)
1768
1769#elif defined(__i386)
1770
1771	/*
1772	 * The arguments and saved registers are on the stack in the
1773	 *  following order:
1774	 *      |  cnt  |  +16
1775	 *      | *addr |  +12
1776	 *      | port  |  +8
1777	 *      |  eip  |  +4
1778	 *      |  esi  |  <-- %esp
1779	 * If additional values are pushed onto the stack, make sure
1780	 * to adjust the following constants accordingly.
1781	 */
1782	.set	PORT, 8
1783	.set	ADDR, 12
1784	.set	COUNT, 16
1785
1786	ENTRY(repoutsw)
1787	pushl	%esi
1788	movl	PORT(%esp), %edx
1789	movl	ADDR(%esp), %esi
1790	movl	COUNT(%esp), %ecx
1791	rep
1792	  D16 outsl
1793	popl	%esi
1794	ret
1795	SET_SIZE(repoutsw)
1796
1797#endif	/* __i386 */
1798#endif	/* __lint */
1799
1800
1801#if defined(__lint)
1802
1803/* ARGSUSED */
1804void
1805repinsw(int port_addr, uint16_t *addr, int cnt)
1806{}
1807
1808#else	/* __lint */
1809
1810#if defined(__amd64)
1811
1812	ENTRY(repinsw)
1813	movl	%edx, %ecx
1814	movw	%di, %dx
1815	rep
1816	  D16 insl
1817	ret
1818	SET_SIZE(repinsw)
1819
1820#elif defined(__i386)
1821
1822	ENTRY(repinsw)
1823	pushl	%edi
1824	movl	PORT(%esp), %edx
1825	movl	ADDR(%esp), %edi
1826	movl	COUNT(%esp), %ecx
1827	rep
1828	  D16 insl
1829	popl	%edi
1830	ret
1831	SET_SIZE(repinsw)
1832
1833#endif	/* __i386 */
1834#endif	/* __lint */
1835
1836
1837#if defined(__lint)
1838
1839/* ARGSUSED */
1840void
1841repinsb(int port, uint8_t *addr, int count)
1842{}
1843
1844#else	/* __lint */
1845
1846#if defined(__amd64)
1847
1848	ENTRY(repinsb)
1849	movl	%edx, %ecx
1850	movw	%di, %dx
1851	movq	%rsi, %rdi
1852	rep
1853	  insb
1854	ret
1855	SET_SIZE(repinsb)
1856
1857#elif defined(__i386)
1858
1859	/*
1860	 * The arguments and saved registers are on the stack in the
1861	 *  following order:
1862	 *      |  cnt  |  +16
1863	 *      | *addr |  +12
1864	 *      | port  |  +8
1865	 *      |  eip  |  +4
1866	 *      |  esi  |  <-- %esp
1867	 * If additional values are pushed onto the stack, make sure
1868	 * to adjust the following constants accordingly.
1869	 */
1870	.set	IO_PORT, 8
1871	.set	IO_ADDR, 12
1872	.set	IO_COUNT, 16
1873
1874	ENTRY(repinsb)
1875	pushl	%edi
1876	movl	IO_ADDR(%esp), %edi
1877	movl	IO_COUNT(%esp), %ecx
1878	movl	IO_PORT(%esp), %edx
1879	rep
1880	  insb
1881	popl	%edi
1882	ret
1883	SET_SIZE(repinsb)
1884
1885#endif	/* __i386 */
1886#endif	/* __lint */
1887
1888
1889/*
1890 * Input a stream of 32-bit words.
1891 * NOTE: count is a DWORD count.
1892 */
1893#if defined(__lint)
1894
1895/* ARGSUSED */
1896void
1897repinsd(int port, uint32_t *addr, int count)
1898{}
1899
1900#else	/* __lint */
1901
1902#if defined(__amd64)
1903
1904	ENTRY(repinsd)
1905	movl	%edx, %ecx
1906	movw	%di, %dx
1907	movq	%rsi, %rdi
1908	rep
1909	  insl
1910	ret
1911	SET_SIZE(repinsd)
1912
1913#elif defined(__i386)
1914
1915	ENTRY(repinsd)
1916	pushl	%edi
1917	movl	IO_ADDR(%esp), %edi
1918	movl	IO_COUNT(%esp), %ecx
1919	movl	IO_PORT(%esp), %edx
1920	rep
1921	  insl
1922	popl	%edi
1923	ret
1924	SET_SIZE(repinsd)
1925
1926#endif	/* __i386 */
1927#endif	/* __lint */
1928
1929/*
1930 * Output a stream of bytes
1931 * NOTE: count is a byte count
1932 */
1933#if defined(__lint)
1934
1935/* ARGSUSED */
1936void
1937repoutsb(int port, uint8_t *addr, int count)
1938{}
1939
1940#else	/* __lint */
1941
1942#if defined(__amd64)
1943
1944	ENTRY(repoutsb)
1945	movl	%edx, %ecx
1946	movw	%di, %dx
1947	rep
1948	  outsb
1949	ret
1950	SET_SIZE(repoutsb)
1951
1952#elif defined(__i386)
1953
1954	ENTRY(repoutsb)
1955	pushl	%esi
1956	movl	IO_ADDR(%esp), %esi
1957	movl	IO_COUNT(%esp), %ecx
1958	movl	IO_PORT(%esp), %edx
1959	rep
1960	  outsb
1961	popl	%esi
1962	ret
1963	SET_SIZE(repoutsb)
1964
1965#endif	/* __i386 */
1966#endif	/* __lint */
1967
1968/*
1969 * Output a stream of 32-bit words
1970 * NOTE: count is a DWORD count
1971 */
1972#if defined(__lint)
1973
1974/* ARGSUSED */
1975void
1976repoutsd(int port, uint32_t *addr, int count)
1977{}
1978
1979#else	/* __lint */
1980
1981#if defined(__amd64)
1982
1983	ENTRY(repoutsd)
1984	movl	%edx, %ecx
1985	movw	%di, %dx
1986	rep
1987	  outsl
1988	ret
1989	SET_SIZE(repoutsd)
1990
1991#elif defined(__i386)
1992
1993	ENTRY(repoutsd)
1994	pushl	%esi
1995	movl	IO_ADDR(%esp), %esi
1996	movl	IO_COUNT(%esp), %ecx
1997	movl	IO_PORT(%esp), %edx
1998	rep
1999	  outsl
2000	popl	%esi
2001	ret
2002	SET_SIZE(repoutsd)
2003
2004#endif	/* __i386 */
2005#endif	/* __lint */
2006
2007/*
2008 * void int3(void)
2009 * void int18(void)
2010 * void int20(void)
2011 */
2012
2013#if defined(__lint)
2014
2015void
2016int3(void)
2017{}
2018
2019void
2020int18(void)
2021{}
2022
2023void
2024int20(void)
2025{}
2026
2027#else	/* __lint */
2028
2029	ENTRY(int3)
2030	int	$T_BPTFLT
2031	ret
2032	SET_SIZE(int3)
2033
2034	ENTRY(int18)
2035	int	$T_MCE
2036	ret
2037	SET_SIZE(int18)
2038
2039	ENTRY(int20)
2040	movl	boothowto, %eax
2041	andl	$RB_DEBUG, %eax
2042	jz	1f
2043
2044	int	$T_DBGENTR
20451:
2046	rep;	ret	/* use 2 byte return instruction when branch target */
2047			/* AMD Software Optimization Guide - Section 6.2 */
2048	SET_SIZE(int20)
2049
2050#endif	/* __lint */
2051
2052#if defined(__lint)
2053
2054/* ARGSUSED */
2055int
2056scanc(size_t size, uchar_t *cp, uchar_t *table, uchar_t mask)
2057{ return (0); }
2058
2059#else	/* __lint */
2060
2061#if defined(__amd64)
2062
2063	ENTRY(scanc)
2064					/* rdi == size */
2065					/* rsi == cp */
2066					/* rdx == table */
2067					/* rcx == mask */
2068	addq	%rsi, %rdi		/* end = &cp[size] */
2069.scanloop:
2070	cmpq	%rdi, %rsi		/* while (cp < end */
2071	jnb	.scandone
2072	movzbq	(%rsi), %r8		/* %r8 = *cp */
2073	incq	%rsi			/* cp++ */
2074	testb	%cl, (%r8, %rdx)
2075	jz	.scanloop		/*  && (table[*cp] & mask) == 0) */
2076	decq	%rsi			/* (fix post-increment) */
2077.scandone:
2078	movl	%edi, %eax
2079	subl	%esi, %eax		/* return (end - cp) */
2080	ret
2081	SET_SIZE(scanc)
2082
2083#elif defined(__i386)
2084
2085	ENTRY(scanc)
2086	pushl	%edi
2087	pushl	%esi
2088	movb	24(%esp), %cl		/* mask = %cl */
2089	movl	16(%esp), %esi		/* cp = %esi */
2090	movl	20(%esp), %edx		/* table = %edx */
2091	movl	%esi, %edi
2092	addl	12(%esp), %edi		/* end = &cp[size]; */
2093.scanloop:
2094	cmpl	%edi, %esi		/* while (cp < end */
2095	jnb	.scandone
2096	movzbl	(%esi),  %eax		/* %al = *cp */
2097	incl	%esi			/* cp++ */
2098	movb	(%edx,  %eax), %al	/* %al = table[*cp] */
2099	testb	%al, %cl
2100	jz	.scanloop		/*   && (table[*cp] & mask) == 0) */
2101	dec	%esi			/* post-incremented */
2102.scandone:
2103	movl	%edi, %eax
2104	subl	%esi, %eax		/* return (end - cp) */
2105	popl	%esi
2106	popl	%edi
2107	ret
2108	SET_SIZE(scanc)
2109
2110#endif	/* __i386 */
2111#endif	/* __lint */
2112
2113/*
2114 * Replacement functions for ones that are normally inlined.
2115 * In addition to the copy in i86.il, they are defined here just in case.
2116 */
2117
2118#if defined(__lint)
2119
2120int
2121intr_clear(void)
2122{ return 0; }
2123
2124int
2125clear_int_flag(void)
2126{ return 0; }
2127
2128#else	/* __lint */
2129
2130#if defined(__amd64)
2131
2132	ENTRY(intr_clear)
2133	ENTRY(clear_int_flag)
2134	pushfq
2135	cli
2136	popq	%rax
2137	ret
2138	SET_SIZE(clear_int_flag)
2139	SET_SIZE(intr_clear)
2140
2141#elif defined(__i386)
2142
2143	ENTRY(intr_clear)
2144	ENTRY(clear_int_flag)
2145	pushfl
2146	cli
2147	popl	%eax
2148	ret
2149	SET_SIZE(clear_int_flag)
2150	SET_SIZE(intr_clear)
2151
2152#endif	/* __i386 */
2153#endif	/* __lint */
2154
2155#if defined(__lint)
2156
2157struct cpu *
2158curcpup(void)
2159{ return 0; }
2160
2161#else	/* __lint */
2162
2163#if defined(__amd64)
2164
2165	ENTRY(curcpup)
2166	movq	%gs:CPU_SELF, %rax
2167	ret
2168	SET_SIZE(curcpup)
2169
2170#elif defined(__i386)
2171
2172	ENTRY(curcpup)
2173	movl	%gs:CPU_SELF, %eax
2174	ret
2175	SET_SIZE(curcpup)
2176
2177#endif	/* __i386 */
2178#endif	/* __lint */
2179
2180#if defined(__lint)
2181
2182/* ARGSUSED */
2183uint32_t
2184htonl(uint32_t i)
2185{ return (0); }
2186
2187/* ARGSUSED */
2188uint32_t
2189ntohl(uint32_t i)
2190{ return (0); }
2191
2192#else	/* __lint */
2193
2194#if defined(__amd64)
2195
2196	/* XX64 there must be shorter sequences for this */
2197	ENTRY(htonl)
2198	ALTENTRY(ntohl)
2199	movl	%edi, %eax
2200	bswap	%eax
2201	ret
2202	SET_SIZE(ntohl)
2203	SET_SIZE(htonl)
2204
2205#elif defined(__i386)
2206
2207	ENTRY(htonl)
2208	ALTENTRY(ntohl)
2209	movl	4(%esp), %eax
2210	bswap	%eax
2211	ret
2212	SET_SIZE(ntohl)
2213	SET_SIZE(htonl)
2214
2215#endif	/* __i386 */
2216#endif	/* __lint */
2217
2218#if defined(__lint)
2219
2220/* ARGSUSED */
2221uint16_t
2222htons(uint16_t i)
2223{ return (0); }
2224
2225/* ARGSUSED */
2226uint16_t
2227ntohs(uint16_t i)
2228{ return (0); }
2229
2230
2231#else	/* __lint */
2232
2233#if defined(__amd64)
2234
2235	/* XX64 there must be better sequences for this */
2236	ENTRY(htons)
2237	ALTENTRY(ntohs)
2238	movl	%edi, %eax
2239	bswap	%eax
2240	shrl	$16, %eax
2241	ret
2242	SET_SIZE(ntohs)
2243	SET_SIZE(htons)
2244
2245#elif defined(__i386)
2246
2247	ENTRY(htons)
2248	ALTENTRY(ntohs)
2249	movl	4(%esp), %eax
2250	bswap	%eax
2251	shrl	$16, %eax
2252	ret
2253	SET_SIZE(ntohs)
2254	SET_SIZE(htons)
2255
2256#endif	/* __i386 */
2257#endif	/* __lint */
2258
2259
2260#if defined(__lint)
2261
2262/* ARGSUSED */
2263void
2264intr_restore(uint_t i)
2265{ return; }
2266
2267/* ARGSUSED */
2268void
2269restore_int_flag(int i)
2270{ return; }
2271
2272#else	/* __lint */
2273
2274#if defined(__amd64)
2275
2276	ENTRY(intr_restore)
2277	ENTRY(restore_int_flag)
2278	pushq	%rdi
2279	popfq
2280	ret
2281	SET_SIZE(restore_int_flag)
2282	SET_SIZE(intr_restore)
2283
2284#elif defined(__i386)
2285
2286	ENTRY(intr_restore)
2287	ENTRY(restore_int_flag)
2288	pushl	4(%esp)
2289	popfl
2290	ret
2291	SET_SIZE(restore_int_flag)
2292	SET_SIZE(intr_restore)
2293
2294#endif	/* __i386 */
2295#endif	/* __lint */
2296
2297#if defined(__lint)
2298
2299void
2300sti(void)
2301{}
2302
2303#else	/* __lint */
2304
2305	ENTRY(sti)
2306	sti
2307	ret
2308	SET_SIZE(sti)
2309
2310#endif	/* __lint */
2311
2312#if defined(__lint)
2313
2314dtrace_icookie_t
2315dtrace_interrupt_disable(void)
2316{ return (0); }
2317
2318#else   /* __lint */
2319
2320#if defined(__amd64)
2321
2322	ENTRY(dtrace_interrupt_disable)
2323	pushfq
2324	popq	%rax
2325	cli
2326	ret
2327	SET_SIZE(dtrace_interrupt_disable)
2328
2329#elif defined(__i386)
2330
2331	ENTRY(dtrace_interrupt_disable)
2332	pushfl
2333	popl	%eax
2334	cli
2335	ret
2336	SET_SIZE(dtrace_interrupt_disable)
2337
2338#endif	/* __i386 */
2339#endif	/* __lint */
2340
2341#if defined(__lint)
2342
2343/*ARGSUSED*/
2344void
2345dtrace_interrupt_enable(dtrace_icookie_t cookie)
2346{}
2347
2348#else	/* __lint */
2349
2350#if defined(__amd64)
2351
2352	ENTRY(dtrace_interrupt_enable)
2353	pushq	%rdi
2354	popfq
2355	ret
2356	SET_SIZE(dtrace_interrupt_enable)
2357
2358#elif defined(__i386)
2359
2360	ENTRY(dtrace_interrupt_enable)
2361	movl	4(%esp), %eax
2362	pushl	%eax
2363	popfl
2364	ret
2365	SET_SIZE(dtrace_interrupt_enable)
2366
2367#endif	/* __i386 */
2368#endif	/* __lint */
2369
2370
2371#if defined(lint)
2372
2373void
2374dtrace_membar_producer(void)
2375{}
2376
2377void
2378dtrace_membar_consumer(void)
2379{}
2380
2381#else	/* __lint */
2382
2383	ENTRY(dtrace_membar_producer)
2384	rep;	ret	/* use 2 byte return instruction when branch target */
2385			/* AMD Software Optimization Guide - Section 6.2 */
2386	SET_SIZE(dtrace_membar_producer)
2387
2388	ENTRY(dtrace_membar_consumer)
2389	rep;	ret	/* use 2 byte return instruction when branch target */
2390			/* AMD Software Optimization Guide - Section 6.2 */
2391	SET_SIZE(dtrace_membar_consumer)
2392
2393#endif	/* __lint */
2394
2395#if defined(__lint)
2396
2397kthread_id_t
2398threadp(void)
2399{ return ((kthread_id_t)0); }
2400
2401#else	/* __lint */
2402
2403#if defined(__amd64)
2404
2405	ENTRY(threadp)
2406	movq	%gs:CPU_THREAD, %rax
2407	ret
2408	SET_SIZE(threadp)
2409
2410#elif defined(__i386)
2411
2412	ENTRY(threadp)
2413	movl	%gs:CPU_THREAD, %eax
2414	ret
2415	SET_SIZE(threadp)
2416
2417#endif	/* __i386 */
2418#endif	/* __lint */
2419
2420/*
2421 *   Checksum routine for Internet Protocol Headers
2422 */
2423
2424#if defined(__lint)
2425
2426/* ARGSUSED */
2427unsigned int
2428ip_ocsum(
2429	ushort_t *address,	/* ptr to 1st message buffer */
2430	int halfword_count,	/* length of data */
2431	unsigned int sum)	/* partial checksum */
2432{
2433	int		i;
2434	unsigned int	psum = 0;	/* partial sum */
2435
2436	for (i = 0; i < halfword_count; i++, address++) {
2437		psum += *address;
2438	}
2439
2440	while ((psum >> 16) != 0) {
2441		psum = (psum & 0xffff) + (psum >> 16);
2442	}
2443
2444	psum += sum;
2445
2446	while ((psum >> 16) != 0) {
2447		psum = (psum & 0xffff) + (psum >> 16);
2448	}
2449
2450	return (psum);
2451}
2452
2453#else	/* __lint */
2454
2455#if defined(__amd64)
2456
2457	ENTRY(ip_ocsum)
2458	pushq	%rbp
2459	movq	%rsp, %rbp
2460#ifdef DEBUG
2461	movq	kernelbase(%rip), %rax
2462	cmpq	%rax, %rdi
2463	jnb	1f
2464	xorl	%eax, %eax
2465	movq	%rdi, %rsi
2466	leaq	.ip_ocsum_panic_msg(%rip), %rdi
2467	call	panic
2468	/*NOTREACHED*/
2469.ip_ocsum_panic_msg:
2470	.string	"ip_ocsum: address 0x%p below kernelbase\n"
24711:
2472#endif
2473	movl	%esi, %ecx	/* halfword_count */
2474	movq	%rdi, %rsi	/* address */
2475				/* partial sum in %edx */
2476	xorl	%eax, %eax
2477	testl	%ecx, %ecx
2478	jz	.ip_ocsum_done
2479	testq	$3, %rsi
2480	jnz	.ip_csum_notaligned
2481.ip_csum_aligned:	/* XX64 opportunities for 8-byte operations? */
2482.next_iter:
2483	/* XX64 opportunities for prefetch? */
2484	/* XX64 compute csum with 64 bit quantities? */
2485	subl	$32, %ecx
2486	jl	.less_than_32
2487
2488	addl	0(%rsi), %edx
2489.only60:
2490	adcl	4(%rsi), %eax
2491.only56:
2492	adcl	8(%rsi), %edx
2493.only52:
2494	adcl	12(%rsi), %eax
2495.only48:
2496	adcl	16(%rsi), %edx
2497.only44:
2498	adcl	20(%rsi), %eax
2499.only40:
2500	adcl	24(%rsi), %edx
2501.only36:
2502	adcl	28(%rsi), %eax
2503.only32:
2504	adcl	32(%rsi), %edx
2505.only28:
2506	adcl	36(%rsi), %eax
2507.only24:
2508	adcl	40(%rsi), %edx
2509.only20:
2510	adcl	44(%rsi), %eax
2511.only16:
2512	adcl	48(%rsi), %edx
2513.only12:
2514	adcl	52(%rsi), %eax
2515.only8:
2516	adcl	56(%rsi), %edx
2517.only4:
2518	adcl	60(%rsi), %eax	/* could be adding -1 and -1 with a carry */
2519.only0:
2520	adcl	$0, %eax	/* could be adding -1 in eax with a carry */
2521	adcl	$0, %eax
2522
2523	addq	$64, %rsi
2524	testl	%ecx, %ecx
2525	jnz	.next_iter
2526
2527.ip_ocsum_done:
2528	addl	%eax, %edx
2529	adcl	$0, %edx
2530	movl	%edx, %eax	/* form a 16 bit checksum by */
2531	shrl	$16, %eax	/* adding two halves of 32 bit checksum */
2532	addw	%dx, %ax
2533	adcw	$0, %ax
2534	andl	$0xffff, %eax
2535	leave
2536	ret
2537
2538.ip_csum_notaligned:
2539	xorl	%edi, %edi
2540	movw	(%rsi), %di
2541	addl	%edi, %edx
2542	adcl	$0, %edx
2543	addq	$2, %rsi
2544	decl	%ecx
2545	jmp	.ip_csum_aligned
2546
2547.less_than_32:
2548	addl	$32, %ecx
2549	testl	$1, %ecx
2550	jz	.size_aligned
2551	andl	$0xfe, %ecx
2552	movzwl	(%rsi, %rcx, 2), %edi
2553	addl	%edi, %edx
2554	adcl	$0, %edx
2555.size_aligned:
2556	movl	%ecx, %edi
2557	shrl	$1, %ecx
2558	shl	$1, %edi
2559	subq	$64, %rdi
2560	addq	%rdi, %rsi
2561	leaq    .ip_ocsum_jmptbl(%rip), %rdi
2562	leaq	(%rdi, %rcx, 8), %rdi
2563	xorl	%ecx, %ecx
2564	clc
2565	jmp 	*(%rdi)
2566
2567	.align	8
2568.ip_ocsum_jmptbl:
2569	.quad	.only0, .only4, .only8, .only12, .only16, .only20
2570	.quad	.only24, .only28, .only32, .only36, .only40, .only44
2571	.quad	.only48, .only52, .only56, .only60
2572	SET_SIZE(ip_ocsum)
2573
2574#elif defined(__i386)
2575
2576	ENTRY(ip_ocsum)
2577	pushl	%ebp
2578	movl	%esp, %ebp
2579	pushl	%ebx
2580	pushl	%esi
2581	pushl	%edi
2582	movl	12(%ebp), %ecx	/* count of half words */
2583	movl	16(%ebp), %edx	/* partial checksum */
2584	movl	8(%ebp), %esi
2585	xorl	%eax, %eax
2586	testl	%ecx, %ecx
2587	jz	.ip_ocsum_done
2588
2589	testl	$3, %esi
2590	jnz	.ip_csum_notaligned
2591.ip_csum_aligned:
2592.next_iter:
2593	subl	$32, %ecx
2594	jl	.less_than_32
2595
2596	addl	0(%esi), %edx
2597.only60:
2598	adcl	4(%esi), %eax
2599.only56:
2600	adcl	8(%esi), %edx
2601.only52:
2602	adcl	12(%esi), %eax
2603.only48:
2604	adcl	16(%esi), %edx
2605.only44:
2606	adcl	20(%esi), %eax
2607.only40:
2608	adcl	24(%esi), %edx
2609.only36:
2610	adcl	28(%esi), %eax
2611.only32:
2612	adcl	32(%esi), %edx
2613.only28:
2614	adcl	36(%esi), %eax
2615.only24:
2616	adcl	40(%esi), %edx
2617.only20:
2618	adcl	44(%esi), %eax
2619.only16:
2620	adcl	48(%esi), %edx
2621.only12:
2622	adcl	52(%esi), %eax
2623.only8:
2624	adcl	56(%esi), %edx
2625.only4:
2626	adcl	60(%esi), %eax	/* We could be adding -1 and -1 with a carry */
2627.only0:
2628	adcl	$0, %eax	/* we could be adding -1 in eax with a carry */
2629	adcl	$0, %eax
2630
2631	addl	$64, %esi
2632	andl	%ecx, %ecx
2633	jnz	.next_iter
2634
2635.ip_ocsum_done:
2636	addl	%eax, %edx
2637	adcl	$0, %edx
2638	movl	%edx, %eax	/* form a 16 bit checksum by */
2639	shrl	$16, %eax	/* adding two halves of 32 bit checksum */
2640	addw	%dx, %ax
2641	adcw	$0, %ax
2642	andl	$0xffff, %eax
2643	popl	%edi		/* restore registers */
2644	popl	%esi
2645	popl	%ebx
2646	leave
2647	ret
2648
2649.ip_csum_notaligned:
2650	xorl	%edi, %edi
2651	movw	(%esi), %di
2652	addl	%edi, %edx
2653	adcl	$0, %edx
2654	addl	$2, %esi
2655	decl	%ecx
2656	jmp	.ip_csum_aligned
2657
2658.less_than_32:
2659	addl	$32, %ecx
2660	testl	$1, %ecx
2661	jz	.size_aligned
2662	andl	$0xfe, %ecx
2663	movzwl	(%esi, %ecx, 2), %edi
2664	addl	%edi, %edx
2665	adcl	$0, %edx
2666.size_aligned:
2667	movl	%ecx, %edi
2668	shrl	$1, %ecx
2669	shl	$1, %edi
2670	subl	$64, %edi
2671	addl	%edi, %esi
2672	movl	$.ip_ocsum_jmptbl, %edi
2673	lea	(%edi, %ecx, 4), %edi
2674	xorl	%ecx, %ecx
2675	clc
2676	jmp 	*(%edi)
2677	SET_SIZE(ip_ocsum)
2678
2679	.data
2680	.align	4
2681
2682.ip_ocsum_jmptbl:
2683	.long	.only0, .only4, .only8, .only12, .only16, .only20
2684	.long	.only24, .only28, .only32, .only36, .only40, .only44
2685	.long	.only48, .only52, .only56, .only60
2686
2687
2688#endif	/* __i386 */
2689#endif	/* __lint */
2690
2691/*
2692 * multiply two long numbers and yield a u_longlong_t result, callable from C.
2693 * Provided to manipulate hrtime_t values.
2694 */
2695#if defined(__lint)
2696
2697/* result = a * b; */
2698
2699/* ARGSUSED */
2700unsigned long long
2701mul32(uint_t a, uint_t b)
2702{ return (0); }
2703
2704#else	/* __lint */
2705
2706#if defined(__amd64)
2707
2708	ENTRY(mul32)
2709	xorl	%edx, %edx	/* XX64 joe, paranoia? */
2710	movl	%edi, %eax
2711	mull	%esi
2712	shlq	$32, %rdx
2713	orq	%rdx, %rax
2714	ret
2715	SET_SIZE(mul32)
2716
2717#elif defined(__i386)
2718
2719	ENTRY(mul32)
2720	movl	8(%esp), %eax
2721	movl	4(%esp), %ecx
2722	mull	%ecx
2723	ret
2724	SET_SIZE(mul32)
2725
2726#endif	/* __i386 */
2727#endif	/* __lint */
2728
2729#if defined(notused)
2730#if defined(__lint)
2731/* ARGSUSED */
2732void
2733load_pte64(uint64_t *pte, uint64_t pte_value)
2734{}
2735#else	/* __lint */
2736	.globl load_pte64
2737load_pte64:
2738	movl	4(%esp), %eax
2739	movl	8(%esp), %ecx
2740	movl	12(%esp), %edx
2741	movl	%edx, 4(%eax)
2742	movl	%ecx, (%eax)
2743	ret
2744#endif	/* __lint */
2745#endif	/* notused */
2746
2747#if defined(__lint)
2748
2749/*ARGSUSED*/
2750void
2751scan_memory(caddr_t addr, size_t size)
2752{}
2753
2754#else	/* __lint */
2755
2756#if defined(__amd64)
2757
2758	ENTRY(scan_memory)
2759	shrq	$3, %rsi	/* convert %rsi from byte to quadword count */
2760	jz	.scanm_done
2761	movq	%rsi, %rcx	/* move count into rep control register */
2762	movq	%rdi, %rsi	/* move addr into lodsq control reg. */
2763	rep lodsq		/* scan the memory range */
2764.scanm_done:
2765	rep;	ret	/* use 2 byte return instruction when branch target */
2766			/* AMD Software Optimization Guide - Section 6.2 */
2767	SET_SIZE(scan_memory)
2768
2769#elif defined(__i386)
2770
2771	ENTRY(scan_memory)
2772	pushl	%ecx
2773	pushl	%esi
2774	movl	16(%esp), %ecx	/* move 2nd arg into rep control register */
2775	shrl	$2, %ecx	/* convert from byte count to word count */
2776	jz	.scanm_done
2777	movl	12(%esp), %esi	/* move 1st arg into lodsw control register */
2778	.byte	0xf3		/* rep prefix.  lame assembler.  sigh. */
2779	lodsl
2780.scanm_done:
2781	popl	%esi
2782	popl	%ecx
2783	ret
2784	SET_SIZE(scan_memory)
2785
2786#endif	/* __i386 */
2787#endif	/* __lint */
2788
2789
2790#if defined(__lint)
2791
2792/*ARGSUSED */
2793int
2794lowbit(ulong_t i)
2795{ return (0); }
2796
2797#else	/* __lint */
2798
2799#if defined(__amd64)
2800
2801	ENTRY(lowbit)
2802	movl	$-1, %eax
2803	bsfq	%rdi, %rax
2804	incl	%eax
2805	ret
2806	SET_SIZE(lowbit)
2807
2808#elif defined(__i386)
2809
2810	ENTRY(lowbit)
2811	movl	$-1, %eax
2812	bsfl	4(%esp), %eax
2813	incl	%eax
2814	ret
2815	SET_SIZE(lowbit)
2816
2817#endif	/* __i386 */
2818#endif	/* __lint */
2819
2820#if defined(__lint)
2821
2822/*ARGSUSED*/
2823int
2824highbit(ulong_t i)
2825{ return (0); }
2826
2827#else	/* __lint */
2828
2829#if defined(__amd64)
2830
2831	ENTRY(highbit)
2832	movl	$-1, %eax
2833	bsrq	%rdi, %rax
2834	incl	%eax
2835	ret
2836	SET_SIZE(highbit)
2837
2838#elif defined(__i386)
2839
2840	ENTRY(highbit)
2841	movl	$-1, %eax
2842	bsrl	4(%esp), %eax
2843	incl	%eax
2844	ret
2845	SET_SIZE(highbit)
2846
2847#endif	/* __i386 */
2848#endif	/* __lint */
2849
2850#if defined(__lint)
2851
2852/*ARGSUSED*/
2853uint64_t
2854rdmsr(uint_t r)
2855{ return (0); }
2856
2857/*ARGSUSED*/
2858void
2859wrmsr(uint_t r, const uint64_t val)
2860{}
2861
2862/*ARGSUSED*/
2863uint64_t
2864xrdmsr(uint_t r)
2865{ return (0); }
2866
2867/*ARGSUSED*/
2868void
2869xwrmsr(uint_t r, const uint64_t val)
2870{}
2871
2872void
2873invalidate_cache(void)
2874{}
2875
2876#else  /* __lint */
2877
2878#define	XMSR_ACCESS_VAL		$0x9c5a203a
2879
2880#if defined(__amd64)
2881	ENTRY(rdmsr)
2882	movl	%edi, %ecx
2883	rdmsr
2884	shlq	$32, %rdx
2885	orq	%rdx, %rax
2886	ret
2887	SET_SIZE(rdmsr)
2888
2889	ENTRY(wrmsr)
2890	movq	%rsi, %rdx
2891	shrq	$32, %rdx
2892	movl	%esi, %eax
2893	movl	%edi, %ecx
2894	wrmsr
2895	ret
2896	SET_SIZE(wrmsr)
2897
2898	ENTRY(xrdmsr)
2899	movl	%edi, %ecx
2900	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2901	rdmsr
2902	shlq	$32, %rdx
2903	orq	%rdx, %rax
2904	ret
2905	SET_SIZE(xrdmsr)
2906
2907	ENTRY(xwrmsr)
2908	movl	%edi, %ecx
2909	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2910	movq	%rsi, %rdx
2911	shrq	$32, %rdx
2912	movl	%esi, %eax
2913	wrmsr
2914	ret
2915	SET_SIZE(xwrmsr)
2916
2917#elif defined(__i386)
2918
2919	ENTRY(rdmsr)
2920	movl	4(%esp), %ecx
2921	rdmsr
2922	ret
2923	SET_SIZE(rdmsr)
2924
2925	ENTRY(wrmsr)
2926	movl	4(%esp), %ecx
2927	movl	8(%esp), %eax
2928	movl	12(%esp), %edx
2929	wrmsr
2930	ret
2931	SET_SIZE(wrmsr)
2932
2933	ENTRY(xrdmsr)
2934	movl	4(%esp), %ecx
2935	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2936	rdmsr
2937	ret
2938	SET_SIZE(xrdmsr)
2939
2940	ENTRY(xwrmsr)
2941	movl	4(%esp), %ecx
2942	movl	8(%esp), %eax
2943	movl	12(%esp), %edx
2944	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
2945	wrmsr
2946	ret
2947	SET_SIZE(xwrmsr)
2948
2949#endif	/* __i386 */
2950
2951	ENTRY(invalidate_cache)
2952	wbinvd
2953	ret
2954	SET_SIZE(invalidate_cache)
2955
2956#endif	/* __lint */
2957
2958#if defined(__lint)
2959
2960/*ARGSUSED*/
2961void getcregs(struct cregs *crp)
2962{}
2963
2964#else	/* __lint */
2965
2966#if defined(__amd64)
2967
2968#define	GETMSR(r, off, d)	\
2969	movl	$r, %ecx;	\
2970	rdmsr;			\
2971	movl	%eax, off(d);	\
2972	movl	%edx, off+4(d)
2973
2974	ENTRY_NP(getcregs)
2975	xorl	%eax, %eax
2976	movq	%rax, CREG_GDT+8(%rdi)
2977	sgdt	CREG_GDT(%rdi)		/* 10 bytes */
2978	movq	%rax, CREG_IDT+8(%rdi)
2979	sidt	CREG_IDT(%rdi)		/* 10 bytes */
2980	movq	%rax, CREG_LDT(%rdi)
2981	sldt	CREG_LDT(%rdi)		/* 2 bytes */
2982	movq	%rax, CREG_TASKR(%rdi)
2983	str	CREG_TASKR(%rdi)	/* 2 bytes */
2984	movq	%cr0, %rax
2985	movq	%rax, CREG_CR0(%rdi)	/* cr0 */
2986	movq	%cr2, %rax
2987	movq	%rax, CREG_CR2(%rdi)	/* cr2 */
2988	movq	%cr3, %rax
2989	movq	%rax, CREG_CR3(%rdi)	/* cr3 */
2990	movq	%cr4, %rax
2991	movq	%rax, CREG_CR8(%rdi)	/* cr4 */
2992	movq	%cr8, %rax
2993	movq	%rax, CREG_CR8(%rdi)	/* cr8 */
2994	GETMSR(MSR_AMD_KGSBASE, CREG_KGSBASE, %rdi)
2995	GETMSR(MSR_AMD_EFER, CREG_EFER, %rdi)
2996	SET_SIZE(getcregs)
2997
2998#undef GETMSR
2999
3000#elif defined(__i386)
3001
3002	ENTRY_NP(getcregs)
3003	movl	4(%esp), %edx
3004	movw	$0, CREG_GDT+6(%edx)
3005	movw	$0, CREG_IDT+6(%edx)
3006	sgdt	CREG_GDT(%edx)		/* gdt */
3007	sidt	CREG_IDT(%edx)		/* idt */
3008	sldt	CREG_LDT(%edx)		/* ldt */
3009	str	CREG_TASKR(%edx)	/* task */
3010	movl	%cr0, %eax
3011	movl	%eax, CREG_CR0(%edx)	/* cr0 */
3012	movl	%cr2, %eax
3013	movl	%eax, CREG_CR2(%edx)	/* cr2 */
3014	movl	%cr3, %eax
3015	movl	%eax, CREG_CR3(%edx)	/* cr3 */
3016	testl	$X86_LARGEPAGE, x86_feature
3017	jz	.nocr4
3018	movl	%cr4, %eax
3019	movl	%eax, CREG_CR4(%edx)	/* cr4 */
3020	jmp	.skip
3021.nocr4:
3022	movl	$0, CREG_CR4(%edx)
3023.skip:
3024	rep;	ret	/* use 2 byte return instruction when branch target */
3025			/* AMD Software Optimization Guide - Section 6.2 */
3026	SET_SIZE(getcregs)
3027
3028#endif	/* __i386 */
3029#endif	/* __lint */
3030
3031
3032/*
3033 * A panic trigger is a word which is updated atomically and can only be set
3034 * once.  We atomically store 0xDEFACEDD and load the old value.  If the
3035 * previous value was 0, we succeed and return 1; otherwise return 0.
3036 * This allows a partially corrupt trigger to still trigger correctly.  DTrace
3037 * has its own version of this function to allow it to panic correctly from
3038 * probe context.
3039 */
3040#if defined(__lint)
3041
3042/*ARGSUSED*/
3043int
3044panic_trigger(int *tp)
3045{ return (0); }
3046
3047/*ARGSUSED*/
3048int
3049dtrace_panic_trigger(int *tp)
3050{ return (0); }
3051
3052#else	/* __lint */
3053
3054#if defined(__amd64)
3055
3056	ENTRY_NP(panic_trigger)
3057	xorl	%eax, %eax
3058	movl	$0xdefacedd, %edx
3059	lock
3060	  xchgl	%edx, (%rdi)
3061	cmpl	$0, %edx
3062	je	0f
3063	movl	$0, %eax
3064	ret
30650:	movl	$1, %eax
3066	ret
3067	SET_SIZE(panic_trigger)
3068
3069	ENTRY_NP(dtrace_panic_trigger)
3070	xorl	%eax, %eax
3071	movl	$0xdefacedd, %edx
3072	lock
3073	  xchgl	%edx, (%rdi)
3074	cmpl	$0, %edx
3075	je	0f
3076	movl	$0, %eax
3077	ret
30780:	movl	$1, %eax
3079	ret
3080	SET_SIZE(dtrace_panic_trigger)
3081
3082#elif defined(__i386)
3083
3084	ENTRY_NP(panic_trigger)
3085	movl	4(%esp), %edx		/ %edx = address of trigger
3086	movl	$0xdefacedd, %eax	/ %eax = 0xdefacedd
3087	lock				/ assert lock
3088	xchgl %eax, (%edx)		/ exchange %eax and the trigger
3089	cmpl	$0, %eax		/ if (%eax == 0x0)
3090	je	0f			/   return (1);
3091	movl	$0, %eax		/ else
3092	ret				/   return (0);
30930:	movl	$1, %eax
3094	ret
3095	SET_SIZE(panic_trigger)
3096
3097	ENTRY_NP(dtrace_panic_trigger)
3098	movl	4(%esp), %edx		/ %edx = address of trigger
3099	movl	$0xdefacedd, %eax	/ %eax = 0xdefacedd
3100	lock				/ assert lock
3101	xchgl %eax, (%edx)		/ exchange %eax and the trigger
3102	cmpl	$0, %eax		/ if (%eax == 0x0)
3103	je	0f			/   return (1);
3104	movl	$0, %eax		/ else
3105	ret				/   return (0);
31060:	movl	$1, %eax
3107	ret
3108	SET_SIZE(dtrace_panic_trigger)
3109
3110#endif	/* __i386 */
3111#endif	/* __lint */
3112
3113/*
3114 * The panic() and cmn_err() functions invoke vpanic() as a common entry point
3115 * into the panic code implemented in panicsys().  vpanic() is responsible
3116 * for passing through the format string and arguments, and constructing a
3117 * regs structure on the stack into which it saves the current register
3118 * values.  If we are not dying due to a fatal trap, these registers will
3119 * then be preserved in panicbuf as the current processor state.  Before
3120 * invoking panicsys(), vpanic() activates the first panic trigger (see
3121 * common/os/panic.c) and switches to the panic_stack if successful.  Note that
3122 * DTrace takes a slightly different panic path if it must panic from probe
3123 * context.  Instead of calling panic, it calls into dtrace_vpanic(), which
3124 * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
3125 * branches back into vpanic().
3126 */
3127#if defined(__lint)
3128
3129/*ARGSUSED*/
3130void
3131vpanic(const char *format, va_list alist)
3132{}
3133
3134/*ARGSUSED*/
3135void
3136dtrace_vpanic(const char *format, va_list alist)
3137{}
3138
3139#else	/* __lint */
3140
3141#if defined(__amd64)
3142
3143	ENTRY_NP(vpanic)			/* Initial stack layout: */
3144
3145	pushq	%rbp				/* | %rip | 	0x60	*/
3146	movq	%rsp, %rbp			/* | %rbp |	0x58	*/
3147	pushfq					/* | rfl  |	0x50	*/
3148	pushq	%r11				/* | %r11 |	0x48	*/
3149	pushq	%r10				/* | %r10 |	0x40	*/
3150	pushq	%rbx				/* | %rbx |	0x38	*/
3151	pushq	%rax				/* | %rax |	0x30	*/
3152	pushq	%r9				/* | %r9  |	0x28	*/
3153	pushq	%r8				/* | %r8  |	0x20	*/
3154	pushq	%rcx				/* | %rcx |	0x18	*/
3155	pushq	%rdx				/* | %rdx |	0x10	*/
3156	pushq	%rsi				/* | %rsi |	0x8 alist */
3157	pushq	%rdi				/* | %rdi |	0x0 format */
3158
3159	movq	%rsp, %rbx			/* %rbx = current %rsp */
3160
3161	leaq	panic_quiesce(%rip), %rdi	/* %rdi = &panic_quiesce */
3162	call	panic_trigger			/* %eax = panic_trigger() */
3163
3164vpanic_common:
3165	/*
3166	 * The panic_trigger result is in %eax from the call above, and
3167	 * dtrace_panic places it in %eax before branching here.
3168	 * The rdmsr instructions that follow below will clobber %eax so
3169	 * we stash the panic_trigger result in %r11d.
3170	 */
3171	movl	%eax, %r11d
3172	cmpl	$0, %r11d
3173	je	0f
3174
3175	/*
3176	 * If panic_trigger() was successful, we are the first to initiate a
3177	 * panic: we now switch to the reserved panic_stack before continuing.
3178	 */
3179	leaq	panic_stack(%rip), %rsp
3180	addq	$PANICSTKSIZE, %rsp
31810:	subq	$REGSIZE, %rsp
3182	/*
3183	 * Now that we've got everything set up, store the register values as
3184	 * they were when we entered vpanic() to the designated location in
3185	 * the regs structure we allocated on the stack.
3186	 */
3187	movq	0x0(%rbx), %rcx
3188	movq	%rcx, REGOFF_RDI(%rsp)
3189	movq	0x8(%rbx), %rcx
3190	movq	%rcx, REGOFF_RSI(%rsp)
3191	movq	0x10(%rbx), %rcx
3192	movq	%rcx, REGOFF_RDX(%rsp)
3193	movq	0x18(%rbx), %rcx
3194	movq	%rcx, REGOFF_RCX(%rsp)
3195	movq	0x20(%rbx), %rcx
3196
3197	movq	%rcx, REGOFF_R8(%rsp)
3198	movq	0x28(%rbx), %rcx
3199	movq	%rcx, REGOFF_R9(%rsp)
3200	movq	0x30(%rbx), %rcx
3201	movq	%rcx, REGOFF_RAX(%rsp)
3202	movq	0x38(%rbx), %rcx
3203	movq	%rcx, REGOFF_RBX(%rsp)
3204	movq	0x58(%rbx), %rcx
3205
3206	movq	%rcx, REGOFF_RBP(%rsp)
3207	movq	0x40(%rbx), %rcx
3208	movq	%rcx, REGOFF_R10(%rsp)
3209	movq	0x48(%rbx), %rcx
3210	movq	%rcx, REGOFF_R11(%rsp)
3211	movq	%r12, REGOFF_R12(%rsp)
3212
3213	movq	%r13, REGOFF_R13(%rsp)
3214	movq	%r14, REGOFF_R14(%rsp)
3215	movq	%r15, REGOFF_R15(%rsp)
3216
3217	movl	$MSR_AMD_FSBASE, %ecx
3218	rdmsr
3219	movl	%eax, REGOFF_FSBASE(%rsp)
3220	movl	%edx, REGOFF_FSBASE+4(%rsp)
3221
3222	movl	$MSR_AMD_GSBASE, %ecx
3223	rdmsr
3224	movl	%eax, REGOFF_GSBASE(%rsp)
3225	movl	%edx, REGOFF_GSBASE+4(%rsp)
3226
3227	xorl	%ecx, %ecx
3228	movw	%ds, %cx
3229	movq	%rcx, REGOFF_DS(%rsp)
3230	movw	%es, %cx
3231	movq	%rcx, REGOFF_ES(%rsp)
3232	movw	%fs, %cx
3233	movq	%rcx, REGOFF_FS(%rsp)
3234	movw	%gs, %cx
3235	movq	%rcx, REGOFF_GS(%rsp)
3236
3237	movq	$0, REGOFF_TRAPNO(%rsp)
3238
3239	movq	$0, REGOFF_ERR(%rsp)
3240	leaq	vpanic(%rip), %rcx
3241	movq	%rcx, REGOFF_RIP(%rsp)
3242	movw	%cs, %cx
3243	movzwq	%cx, %rcx
3244	movq	%rcx, REGOFF_CS(%rsp)
3245	movq	0x50(%rbx), %rcx
3246	movq	%rcx, REGOFF_RFL(%rsp)
3247	movq	%rbx, %rcx
3248	addq	$0x60, %rcx
3249	movq	%rcx, REGOFF_RSP(%rsp)
3250	movw	%ss, %cx
3251	movzwq	%cx, %rcx
3252	movq	%rcx, REGOFF_SS(%rsp)
3253
3254	/*
3255	 * panicsys(format, alist, rp, on_panic_stack)
3256	 */
3257	movq	REGOFF_RDI(%rsp), %rdi		/* format */
3258	movq	REGOFF_RSI(%rsp), %rsi		/* alist */
3259	movq	%rsp, %rdx			/* struct regs */
3260	movl	%r11d, %ecx			/* on_panic_stack */
3261	call	panicsys
3262	addq	$REGSIZE, %rsp
3263	popq	%rdi
3264	popq	%rsi
3265	popq	%rdx
3266	popq	%rcx
3267	popq	%r8
3268	popq	%r9
3269	popq	%rax
3270	popq	%rbx
3271	popq	%r10
3272	popq	%r11
3273	popfq
3274	leave
3275	ret
3276	SET_SIZE(vpanic)
3277
3278	ENTRY_NP(dtrace_vpanic)			/* Initial stack layout: */
3279
3280	pushq	%rbp				/* | %rip | 	0x60	*/
3281	movq	%rsp, %rbp			/* | %rbp |	0x58	*/
3282	pushfq					/* | rfl  |	0x50	*/
3283	pushq	%r11				/* | %r11 |	0x48	*/
3284	pushq	%r10				/* | %r10 |	0x40	*/
3285	pushq	%rbx				/* | %rbx |	0x38	*/
3286	pushq	%rax				/* | %rax |	0x30	*/
3287	pushq	%r9				/* | %r9  |	0x28	*/
3288	pushq	%r8				/* | %r8  |	0x20	*/
3289	pushq	%rcx				/* | %rcx |	0x18	*/
3290	pushq	%rdx				/* | %rdx |	0x10	*/
3291	pushq	%rsi				/* | %rsi |	0x8 alist */
3292	pushq	%rdi				/* | %rdi |	0x0 format */
3293
3294	movq	%rsp, %rbx			/* %rbx = current %rsp */
3295
3296	leaq	panic_quiesce(%rip), %rdi	/* %rdi = &panic_quiesce */
3297	call	dtrace_panic_trigger	/* %eax = dtrace_panic_trigger() */
3298	jmp	vpanic_common
3299
3300	SET_SIZE(dtrace_vpanic)
3301
3302#elif defined(__i386)
3303
3304	ENTRY_NP(vpanic)			/ Initial stack layout:
3305
3306	pushl	%ebp				/ | %eip | 20
3307	movl	%esp, %ebp			/ | %ebp | 16
3308	pushl	%eax				/ | %eax | 12
3309	pushl	%ebx				/ | %ebx |  8
3310	pushl	%ecx				/ | %ecx |  4
3311	pushl	%edx				/ | %edx |  0
3312
3313	movl	%esp, %ebx			/ %ebx = current stack pointer
3314
3315	lea	panic_quiesce, %eax		/ %eax = &panic_quiesce
3316	pushl	%eax				/ push &panic_quiesce
3317	call	panic_trigger			/ %eax = panic_trigger()
3318	addl	$4, %esp			/ reset stack pointer
3319
3320vpanic_common:
3321	cmpl	$0, %eax			/ if (%eax == 0)
3322	je	0f				/   goto 0f;
3323
3324	/*
3325	 * If panic_trigger() was successful, we are the first to initiate a
3326	 * panic: we now switch to the reserved panic_stack before continuing.
3327	 */
3328	lea	panic_stack, %esp		/ %esp  = panic_stack
3329	addl	$PANICSTKSIZE, %esp		/ %esp += PANICSTKSIZE
3330
33310:	subl	$REGSIZE, %esp			/ allocate struct regs
3332
3333	/*
3334	 * Now that we've got everything set up, store the register values as
3335	 * they were when we entered vpanic() to the designated location in
3336	 * the regs structure we allocated on the stack.
3337	 */
3338#if !defined(__GNUC_AS__)
3339	movw	%gs, %edx
3340	movl	%edx, REGOFF_GS(%esp)
3341	movw	%fs, %edx
3342	movl	%edx, REGOFF_FS(%esp)
3343	movw	%es, %edx
3344	movl	%edx, REGOFF_ES(%esp)
3345	movw	%ds, %edx
3346	movl	%edx, REGOFF_DS(%esp)
3347#else	/* __GNUC_AS__ */
3348	mov	%gs, %edx
3349	mov	%edx, REGOFF_GS(%esp)
3350	mov	%fs, %edx
3351	mov	%edx, REGOFF_FS(%esp)
3352	mov	%es, %edx
3353	mov	%edx, REGOFF_ES(%esp)
3354	mov	%ds, %edx
3355	mov	%edx, REGOFF_DS(%esp)
3356#endif	/* __GNUC_AS__ */
3357	movl	%edi, REGOFF_EDI(%esp)
3358	movl	%esi, REGOFF_ESI(%esp)
3359	movl	16(%ebx), %ecx
3360	movl	%ecx, REGOFF_EBP(%esp)
3361	movl	%ebx, %ecx
3362	addl	$20, %ecx
3363	movl	%ecx, REGOFF_ESP(%esp)
3364	movl	8(%ebx), %ecx
3365	movl	%ecx, REGOFF_EBX(%esp)
3366	movl	0(%ebx), %ecx
3367	movl	%ecx, REGOFF_EDX(%esp)
3368	movl	4(%ebx), %ecx
3369	movl	%ecx, REGOFF_ECX(%esp)
3370	movl	12(%ebx), %ecx
3371	movl	%ecx, REGOFF_EAX(%esp)
3372	movl	$0, REGOFF_TRAPNO(%esp)
3373	movl	$0, REGOFF_ERR(%esp)
3374	lea	vpanic, %ecx
3375	movl	%ecx, REGOFF_EIP(%esp)
3376#if !defined(__GNUC_AS__)
3377	movw	%cs, %edx
3378#else	/* __GNUC_AS__ */
3379	mov	%cs, %edx
3380#endif	/* __GNUC_AS__ */
3381	movl	%edx, REGOFF_CS(%esp)
3382	pushfl
3383	popl	%ecx
3384	movl	%ecx, REGOFF_EFL(%esp)
3385	movl	$0, REGOFF_UESP(%esp)
3386#if !defined(__GNUC_AS__)
3387	movw	%ss, %edx
3388#else	/* __GNUC_AS__ */
3389	mov	%ss, %edx
3390#endif	/* __GNUC_AS__ */
3391	movl	%edx, REGOFF_SS(%esp)
3392
3393	movl	%esp, %ecx			/ %ecx = &regs
3394	pushl	%eax				/ push on_panic_stack
3395	pushl	%ecx				/ push &regs
3396	movl	12(%ebp), %ecx			/ %ecx = alist
3397	pushl	%ecx				/ push alist
3398	movl	8(%ebp), %ecx			/ %ecx = format
3399	pushl	%ecx				/ push format
3400	call	panicsys			/ panicsys();
3401	addl	$16, %esp			/ pop arguments
3402
3403	addl	$REGSIZE, %esp
3404	popl	%edx
3405	popl	%ecx
3406	popl	%ebx
3407	popl	%eax
3408	leave
3409	ret
3410	SET_SIZE(vpanic)
3411
3412	ENTRY_NP(dtrace_vpanic)			/ Initial stack layout:
3413
3414	pushl	%ebp				/ | %eip | 20
3415	movl	%esp, %ebp			/ | %ebp | 16
3416	pushl	%eax				/ | %eax | 12
3417	pushl	%ebx				/ | %ebx |  8
3418	pushl	%ecx				/ | %ecx |  4
3419	pushl	%edx				/ | %edx |  0
3420
3421	movl	%esp, %ebx			/ %ebx = current stack pointer
3422
3423	lea	panic_quiesce, %eax		/ %eax = &panic_quiesce
3424	pushl	%eax				/ push &panic_quiesce
3425	call	dtrace_panic_trigger		/ %eax = dtrace_panic_trigger()
3426	addl	$4, %esp			/ reset stack pointer
3427	jmp	vpanic_common			/ jump back to common code
3428
3429	SET_SIZE(dtrace_vpanic)
3430
3431#endif	/* __i386 */
3432#endif	/* __lint */
3433
3434#if defined(__lint)
3435
3436void
3437hres_tick(void)
3438{}
3439
3440int64_t timedelta;
3441hrtime_t hres_last_tick;
3442timestruc_t hrestime;
3443int64_t hrestime_adj;
3444volatile int hres_lock;
3445uint_t nsec_scale;
3446hrtime_t hrtime_base;
3447
3448#else	/* __lint */
3449
3450	DGDEF3(hrestime, _MUL(2, CLONGSIZE), 8)
3451	.NWORD	0, 0
3452
3453	DGDEF3(hrestime_adj, 8, 8)
3454	.long	0, 0
3455
3456	DGDEF3(hres_last_tick, 8, 8)
3457	.long	0, 0
3458
3459	DGDEF3(timedelta, 8, 8)
3460	.long	0, 0
3461
3462	DGDEF3(hres_lock, 4, 8)
3463	.long	0
3464
3465	/*
3466	 * initialized to a non zero value to make pc_gethrtime()
3467	 * work correctly even before clock is initialized
3468	 */
3469	DGDEF3(hrtime_base, 8, 8)
3470	.long	_MUL(NSEC_PER_CLOCK_TICK, 6), 0
3471
3472	DGDEF3(adj_shift, 4, 4)
3473	.long	ADJ_SHIFT
3474
3475#if defined(__amd64)
3476
3477	ENTRY_NP(hres_tick)
3478	pushq	%rbp
3479	movq	%rsp, %rbp
3480
3481	/*
3482	 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3483	 * hres_last_tick can only be modified while holding CLOCK_LOCK).
3484	 * At worst, performing this now instead of under CLOCK_LOCK may
3485	 * introduce some jitter in pc_gethrestime().
3486	 */
3487	call	*gethrtimef(%rip)
3488	movq	%rax, %r8
3489
3490	leaq	hres_lock(%rip), %rax
3491	movb	$-1, %dl
3492.CL1:
3493	xchgb	%dl, (%rax)
3494	testb	%dl, %dl
3495	jz	.CL3			/* got it */
3496.CL2:
3497	cmpb	$0, (%rax)		/* possible to get lock? */
3498	pause
3499	jne	.CL2
3500	jmp	.CL1			/* yes, try again */
3501.CL3:
3502	/*
3503	 * compute the interval since last time hres_tick was called
3504	 * and adjust hrtime_base and hrestime accordingly
3505	 * hrtime_base is an 8 byte value (in nsec), hrestime is
3506	 * a timestruc_t (sec, nsec)
3507	 */
3508	leaq	hres_last_tick(%rip), %rax
3509	movq	%r8, %r11
3510	subq	(%rax), %r8
3511	addq	%r8, hrtime_base(%rip)	/* add interval to hrtime_base */
3512	addq	%r8, hrestime+8(%rip)	/* add interval to hrestime.tv_nsec */
3513	/*
3514	 * Now that we have CLOCK_LOCK, we can update hres_last_tick
3515	 */
3516	movq	%r11, (%rax)
3517
3518	call	__adj_hrestime
3519
3520	/*
3521	 * release the hres_lock
3522	 */
3523	incl	hres_lock(%rip)
3524	leave
3525	ret
3526	SET_SIZE(hres_tick)
3527
3528#elif defined(__i386)
3529
3530	ENTRY_NP(hres_tick)
3531	pushl	%ebp
3532	movl	%esp, %ebp
3533	pushl	%esi
3534	pushl	%ebx
3535
3536	/*
3537	 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3538	 * hres_last_tick can only be modified while holding CLOCK_LOCK).
3539	 * At worst, performing this now instead of under CLOCK_LOCK may
3540	 * introduce some jitter in pc_gethrestime().
3541	 */
3542	call	*gethrtimef
3543	movl	%eax, %ebx
3544	movl	%edx, %esi
3545
3546	movl	$hres_lock, %eax
3547	movl	$-1, %edx
3548.CL1:
3549	xchgb	%dl, (%eax)
3550	testb	%dl, %dl
3551	jz	.CL3			/ got it
3552.CL2:
3553	cmpb	$0, (%eax)		/ possible to get lock?
3554	pause
3555	jne	.CL2
3556	jmp	.CL1			/ yes, try again
3557.CL3:
3558	/*
3559	 * compute the interval since last time hres_tick was called
3560	 * and adjust hrtime_base and hrestime accordingly
3561	 * hrtime_base is an 8 byte value (in nsec), hrestime is
3562	 * timestruc_t (sec, nsec)
3563	 */
3564
3565	lea	hres_last_tick, %eax
3566
3567	movl	%ebx, %edx
3568	movl	%esi, %ecx
3569
3570	subl 	(%eax), %edx
3571	sbbl 	4(%eax), %ecx
3572
3573	addl	%edx, hrtime_base	/ add interval to hrtime_base
3574	adcl	%ecx, hrtime_base+4
3575
3576	addl 	%edx, hrestime+4	/ add interval to hrestime.tv_nsec
3577
3578	/
3579	/ Now that we have CLOCK_LOCK, we can update hres_last_tick.
3580	/
3581	movl	%ebx, (%eax)
3582	movl	%esi,  4(%eax)
3583
3584	/ get hrestime at this moment. used as base for pc_gethrestime
3585	/
3586	/ Apply adjustment, if any
3587	/
3588	/ #define HRES_ADJ	(NSEC_PER_CLOCK_TICK >> ADJ_SHIFT)
3589	/ (max_hres_adj)
3590	/
3591	/ void
3592	/ adj_hrestime()
3593	/ {
3594	/	long long adj;
3595	/
3596	/	if (hrestime_adj == 0)
3597	/		adj = 0;
3598	/	else if (hrestime_adj > 0) {
3599	/		if (hrestime_adj < HRES_ADJ)
3600	/			adj = hrestime_adj;
3601	/		else
3602	/			adj = HRES_ADJ;
3603	/	}
3604	/	else {
3605	/		if (hrestime_adj < -(HRES_ADJ))
3606	/			adj = -(HRES_ADJ);
3607	/		else
3608	/			adj = hrestime_adj;
3609	/	}
3610	/
3611	/	timedelta -= adj;
3612	/	hrestime_adj = timedelta;
3613	/	hrestime.tv_nsec += adj;
3614	/
3615	/	while (hrestime.tv_nsec >= NANOSEC) {
3616	/		one_sec++;
3617	/		hrestime.tv_sec++;
3618	/		hrestime.tv_nsec -= NANOSEC;
3619	/	}
3620	/ }
3621__adj_hrestime:
3622	movl	hrestime_adj, %esi	/ if (hrestime_adj == 0)
3623	movl	hrestime_adj+4, %edx
3624	andl	%esi, %esi
3625	jne	.CL4			/ no
3626	andl	%edx, %edx
3627	jne	.CL4			/ no
3628	subl	%ecx, %ecx		/ yes, adj = 0;
3629	subl	%edx, %edx
3630	jmp	.CL5
3631.CL4:
3632	subl	%ecx, %ecx
3633	subl	%eax, %eax
3634	subl	%esi, %ecx
3635	sbbl	%edx, %eax
3636	andl	%eax, %eax		/ if (hrestime_adj > 0)
3637	jge	.CL6
3638
3639	/ In the following comments, HRES_ADJ is used, while in the code
3640	/ max_hres_adj is used.
3641	/
3642	/ The test for "hrestime_adj < HRES_ADJ" is complicated because
3643	/ hrestime_adj is 64-bits, while HRES_ADJ is 32-bits.  We rely
3644	/ on the logical equivalence of:
3645	/
3646	/	!(hrestime_adj < HRES_ADJ)
3647	/
3648	/ and the two step sequence:
3649	/
3650	/	(HRES_ADJ - lsw(hrestime_adj)) generates a Borrow/Carry
3651	/
3652	/ which computes whether or not the least significant 32-bits
3653	/ of hrestime_adj is greater than HRES_ADJ, followed by:
3654	/
3655	/	Previous Borrow/Carry + -1 + msw(hrestime_adj) generates a Carry
3656	/
3657	/ which generates a carry whenever step 1 is true or the most
3658	/ significant long of the longlong hrestime_adj is non-zero.
3659
3660	movl	max_hres_adj, %ecx	/ hrestime_adj is positive
3661	subl	%esi, %ecx
3662	movl	%edx, %eax
3663	adcl	$-1, %eax
3664	jnc	.CL7
3665	movl	max_hres_adj, %ecx	/ adj = HRES_ADJ;
3666	subl	%edx, %edx
3667	jmp	.CL5
3668
3669	/ The following computation is similar to the one above.
3670	/
3671	/ The test for "hrestime_adj < -(HRES_ADJ)" is complicated because
3672	/ hrestime_adj is 64-bits, while HRES_ADJ is 32-bits.  We rely
3673	/ on the logical equivalence of:
3674	/
3675	/	(hrestime_adj > -HRES_ADJ)
3676	/
3677	/ and the two step sequence:
3678	/
3679	/	(HRES_ADJ + lsw(hrestime_adj)) generates a Carry
3680	/
3681	/ which means the least significant 32-bits of hrestime_adj is
3682	/ greater than -HRES_ADJ, followed by:
3683	/
3684	/	Previous Carry + 0 + msw(hrestime_adj) generates a Carry
3685	/
3686	/ which generates a carry only when step 1 is true and the most
3687	/ significant long of the longlong hrestime_adj is -1.
3688
3689.CL6:					/ hrestime_adj is negative
3690	movl	%esi, %ecx
3691	addl	max_hres_adj, %ecx
3692	movl	%edx, %eax
3693	adcl	$0, %eax
3694	jc	.CL7
3695	xor	%ecx, %ecx
3696	subl	max_hres_adj, %ecx	/ adj = -(HRES_ADJ);
3697	movl	$-1, %edx
3698	jmp	.CL5
3699.CL7:
3700	movl	%esi, %ecx		/ adj = hrestime_adj;
3701.CL5:
3702	movl	timedelta, %esi
3703	subl	%ecx, %esi
3704	movl	timedelta+4, %eax
3705	sbbl	%edx, %eax
3706	movl	%esi, timedelta
3707	movl	%eax, timedelta+4	/ timedelta -= adj;
3708	movl	%esi, hrestime_adj
3709	movl	%eax, hrestime_adj+4	/ hrestime_adj = timedelta;
3710	addl	hrestime+4, %ecx
3711
3712	movl	%ecx, %eax		/ eax = tv_nsec
37131:
3714	cmpl	$NANOSEC, %eax		/ if ((unsigned long)tv_nsec >= NANOSEC)
3715	jb	.CL8			/ no
3716	incl	one_sec			/ yes,  one_sec++;
3717	incl	hrestime		/ hrestime.tv_sec++;
3718	addl	$-NANOSEC, %eax		/ tv_nsec -= NANOSEC
3719	jmp	1b			/ check for more seconds
3720
3721.CL8:
3722	movl	%eax, hrestime+4	/ store final into hrestime.tv_nsec
3723	incl	hres_lock		/ release the hres_lock
3724
3725	popl	%ebx
3726	popl	%esi
3727	leave
3728	ret
3729	SET_SIZE(hres_tick)
3730
3731#endif	/* __i386 */
3732#endif	/* __lint */
3733
3734/*
3735 * void prefetch_smap_w(void *)
3736 *
3737 * Prefetch ahead within a linear list of smap structures.
3738 * Not implemented for ia32.  Stub for compatibility.
3739 */
3740
3741#if defined(__lint)
3742
3743/*ARGSUSED*/
3744void prefetch_smap_w(void *smp)
3745{}
3746
3747#else	/* __lint */
3748
3749	ENTRY(prefetch_smap_w)
3750	rep;	ret	/* use 2 byte return instruction when branch target */
3751			/* AMD Software Optimization Guide - Section 6.2 */
3752	SET_SIZE(prefetch_smap_w)
3753
3754#endif	/* __lint */
3755
3756/*
3757 * prefetch_page_r(page_t *)
3758 * issue prefetch instructions for a page_t
3759 */
3760#if defined(__lint)
3761
3762/*ARGSUSED*/
3763void
3764prefetch_page_r(void *pp)
3765{}
3766
3767#else	/* __lint */
3768
3769	ENTRY(prefetch_page_r)
3770	rep;	ret	/* use 2 byte return instruction when branch target */
3771			/* AMD Software Optimization Guide - Section 6.2 */
3772	SET_SIZE(prefetch_page_r)
3773
3774#endif	/* __lint */
3775
3776#if defined(__lint)
3777
3778/*ARGSUSED*/
3779int
3780bcmp(const void *s1, const void *s2, size_t count)
3781{ return (0); }
3782
3783#else   /* __lint */
3784
3785#if defined(__amd64)
3786
3787	ENTRY(bcmp)
3788	pushq	%rbp
3789	movq	%rsp, %rbp
3790#ifdef DEBUG
3791	movq	kernelbase(%rip), %r11
3792	cmpq	%r11, %rdi
3793	jb	0f
3794	cmpq	%r11, %rsi
3795	jnb	1f
37960:	leaq	.bcmp_panic_msg(%rip), %rdi
3797	xorl	%eax, %eax
3798	call	panic
37991:
3800#endif	/* DEBUG */
3801	call	memcmp
3802	testl	%eax, %eax
3803	setne	%dl
3804	leave
3805	movzbl	%dl, %eax
3806	ret
3807	SET_SIZE(bcmp)
3808
3809#elif defined(__i386)
3810
3811#define	ARG_S1		8
3812#define	ARG_S2		12
3813#define	ARG_LENGTH	16
3814
3815	ENTRY(bcmp)
3816	pushl	%ebp
3817	movl	%esp, %ebp	/ create new stack frame
3818#ifdef DEBUG
3819	movl    kernelbase, %eax
3820	cmpl    %eax, ARG_S1(%ebp)
3821	jb	0f
3822	cmpl    %eax, ARG_S2(%ebp)
3823	jnb	1f
38240:	pushl   $.bcmp_panic_msg
3825	call    panic
38261:
3827#endif	/* DEBUG */
3828
3829	pushl	%edi		/ save register variable
3830	movl	ARG_S1(%ebp), %eax	/ %eax = address of string 1
3831	movl	ARG_S2(%ebp), %ecx	/ %ecx = address of string 2
3832	cmpl	%eax, %ecx	/ if the same string
3833	je	.equal		/ goto .equal
3834	movl	ARG_LENGTH(%ebp), %edi	/ %edi = length in bytes
3835	cmpl	$4, %edi	/ if %edi < 4
3836	jb	.byte_check	/ goto .byte_check
3837	.align	4
3838.word_loop:
3839	movl	(%ecx), %edx	/ move 1 word from (%ecx) to %edx
3840	leal	-4(%edi), %edi	/ %edi -= 4
3841	cmpl	(%eax), %edx	/ compare 1 word from (%eax) with %edx
3842	jne	.word_not_equal	/ if not equal, goto .word_not_equal
3843	leal	4(%ecx), %ecx	/ %ecx += 4 (next word)
3844	leal	4(%eax), %eax	/ %eax += 4 (next word)
3845	cmpl	$4, %edi	/ if %edi >= 4
3846	jae	.word_loop	/ goto .word_loop
3847.byte_check:
3848	cmpl	$0, %edi	/ if %edi == 0
3849	je	.equal		/ goto .equal
3850	jmp	.byte_loop	/ goto .byte_loop (checks in bytes)
3851.word_not_equal:
3852	leal	4(%edi), %edi	/ %edi += 4 (post-decremented)
3853	.align	4
3854.byte_loop:
3855	movb	(%ecx),	%dl	/ move 1 byte from (%ecx) to %dl
3856	cmpb	%dl, (%eax)	/ compare %dl with 1 byte from (%eax)
3857	jne	.not_equal	/ if not equal, goto .not_equal
3858	incl	%ecx		/ %ecx++ (next byte)
3859	incl	%eax		/ %eax++ (next byte)
3860	decl	%edi		/ %edi--
3861	jnz	.byte_loop	/ if not zero, goto .byte_loop
3862.equal:
3863	xorl	%eax, %eax	/ %eax = 0
3864	popl	%edi		/ restore register variable
3865	leave			/ restore old stack frame
3866	ret			/ return (NULL)
3867	.align	4
3868.not_equal:
3869	movl	$1, %eax	/ return 1
3870	popl	%edi		/ restore register variable
3871	leave			/ restore old stack frame
3872	ret			/ return (NULL)
3873	SET_SIZE(bcmp)
3874
3875#endif	/* __i386 */
3876
3877#ifdef DEBUG
3878	.text
3879.bcmp_panic_msg:
3880	.string "bcmp: arguments below kernelbase"
3881#endif	/* DEBUG */
3882
3883#endif	/* __lint */
3884