xref: /illumos-gate/usr/src/uts/intel/ml/i86_subr.S (revision 012e6ce759c490003aed29439cc47d3d73a99ad3)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2014 Nexenta Systems, Inc.  All rights reserved.
25 * Copyright (c) 2014 by Delphix. All rights reserved.
26 * Copyright 2019 Joyent, Inc.
27 */
28
29/*
30 *  Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.
31 *  Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T
32 *    All Rights Reserved
33 */
34
35/*
36 * Copyright (c) 2009, Intel Corporation.
37 * All rights reserved.
38 */
39
40/*
41 * General assembly language routines.
42 * It is the intent of this file to contain routines that are
43 * independent of the specific kernel architecture, and those that are
44 * common across kernel architectures.
45 * As architectures diverge, and implementations of specific
46 * architecture-dependent routines change, the routines should be moved
47 * from this file into the respective ../`arch -k`/subr.s file.
48 */
49
50#include <sys/asm_linkage.h>
51#include <sys/asm_misc.h>
52#include <sys/panic.h>
53#include <sys/ontrap.h>
54#include <sys/regset.h>
55#include <sys/privregs.h>
56#include <sys/reboot.h>
57#include <sys/psw.h>
58#include <sys/x86_archext.h>
59
60#include "assym.h"
61#include <sys/dditypes.h>
62
63/*
64 * on_fault()
65 *
66 * Catch lofault faults. Like setjmp except it returns one
67 * if code following causes uncorrectable fault. Turned off
68 * by calling no_fault(). Note that while under on_fault(),
69 * SMAP is disabled. For more information see
70 * uts/intel/ml/copy.s.
71 */
72
73	ENTRY(on_fault)
74	movq	%gs:CPU_THREAD, %rsi
75	leaq	catch_fault(%rip), %rdx
76	movq	%rdi, T_ONFAULT(%rsi)		/* jumpbuf in t_onfault */
77	movq	%rdx, T_LOFAULT(%rsi)		/* catch_fault in t_lofault */
78	call	smap_disable			/* allow user accesses */
79	jmp	setjmp				/* let setjmp do the rest */
80
81catch_fault:
82	movq	%gs:CPU_THREAD, %rsi
83	movq	T_ONFAULT(%rsi), %rdi		/* address of save area */
84	xorl	%eax, %eax
85	movq	%rax, T_ONFAULT(%rsi)		/* turn off onfault */
86	movq	%rax, T_LOFAULT(%rsi)		/* turn off lofault */
87	call	smap_enable			/* disallow user accesses */
88	jmp	longjmp				/* let longjmp do the rest */
89	SET_SIZE(on_fault)
90
91	ENTRY(no_fault)
92	movq	%gs:CPU_THREAD, %rsi
93	xorl	%eax, %eax
94	movq	%rax, T_ONFAULT(%rsi)		/* turn off onfault */
95	movq	%rax, T_LOFAULT(%rsi)		/* turn off lofault */
96	call	smap_enable			/* disallow user accesses */
97	ret
98	SET_SIZE(no_fault)
99
100/*
101 * Default trampoline code for on_trap() (see <sys/ontrap.h>).  We just
102 * do a longjmp(&curthread->t_ontrap->ot_jmpbuf) if this is ever called.
103 */
104
105	ENTRY(on_trap_trampoline)
106	movq	%gs:CPU_THREAD, %rsi
107	movq	T_ONTRAP(%rsi), %rdi
108	addq	$OT_JMPBUF, %rdi
109	jmp	longjmp
110	SET_SIZE(on_trap_trampoline)
111
112/*
113 * Push a new element on to the t_ontrap stack.  Refer to <sys/ontrap.h> for
114 * more information about the on_trap() mechanism.  If the on_trap_data is the
115 * same as the topmost stack element, we just modify that element.
116 */
117
118	ENTRY(on_trap)
119	movw	%si, OT_PROT(%rdi)		/* ot_prot = prot */
120	movw	$0, OT_TRAP(%rdi)		/* ot_trap = 0 */
121	leaq	on_trap_trampoline(%rip), %rdx	/* rdx = &on_trap_trampoline */
122	movq	%rdx, OT_TRAMPOLINE(%rdi)	/* ot_trampoline = rdx */
123	xorl	%ecx, %ecx
124	movq	%rcx, OT_HANDLE(%rdi)		/* ot_handle = NULL */
125	movq	%rcx, OT_PAD1(%rdi)		/* ot_pad1 = NULL */
126	movq	%gs:CPU_THREAD, %rdx		/* rdx = curthread */
127	movq	T_ONTRAP(%rdx), %rcx		/* rcx = curthread->t_ontrap */
128	cmpq	%rdi, %rcx			/* if (otp == %rcx)	*/
129	je	0f				/*	don't modify t_ontrap */
130
131	movq	%rcx, OT_PREV(%rdi)		/* ot_prev = t_ontrap */
132	movq	%rdi, T_ONTRAP(%rdx)		/* curthread->t_ontrap = otp */
133
1340:	addq	$OT_JMPBUF, %rdi		/* &ot_jmpbuf */
135	jmp	setjmp
136	SET_SIZE(on_trap)
137
138/*
139 * Setjmp and longjmp implement non-local gotos using state vectors
140 * type label_t.
141 */
142
143#if LABEL_PC != 0
144#error LABEL_PC MUST be defined as 0 for setjmp/longjmp to work as coded
145#endif	/* LABEL_PC != 0 */
146
147	ENTRY(setjmp)
148	movq	%rsp, LABEL_SP(%rdi)
149	movq	%rbp, LABEL_RBP(%rdi)
150	movq	%rbx, LABEL_RBX(%rdi)
151	movq	%r12, LABEL_R12(%rdi)
152	movq	%r13, LABEL_R13(%rdi)
153	movq	%r14, LABEL_R14(%rdi)
154	movq	%r15, LABEL_R15(%rdi)
155	movq	(%rsp), %rdx		/* return address */
156	movq	%rdx, (%rdi)		/* LABEL_PC is 0 */
157	xorl	%eax, %eax		/* return 0 */
158	ret
159	SET_SIZE(setjmp)
160
161	ENTRY(longjmp)
162	movq	LABEL_SP(%rdi), %rsp
163	movq	LABEL_RBP(%rdi), %rbp
164	movq	LABEL_RBX(%rdi), %rbx
165	movq	LABEL_R12(%rdi), %r12
166	movq	LABEL_R13(%rdi), %r13
167	movq	LABEL_R14(%rdi), %r14
168	movq	LABEL_R15(%rdi), %r15
169	movq	(%rdi), %rdx		/* return address; LABEL_PC is 0 */
170	movq	%rdx, (%rsp)
171	xorl	%eax, %eax
172	incl	%eax			/* return 1 */
173	ret
174	SET_SIZE(longjmp)
175
176/*
177 * if a() calls b() calls caller(),
178 * caller() returns return address in a().
179 * (Note: We assume a() and b() are C routines which do the normal entry/exit
180 *  sequence.)
181 */
182
183	ENTRY(caller)
184	movq	8(%rbp), %rax		/* b()'s return pc, in a() */
185	ret
186	SET_SIZE(caller)
187
188/*
189 * if a() calls callee(), callee() returns the
190 * return address in a();
191 */
192
193	ENTRY(callee)
194	movq	(%rsp), %rax		/* callee()'s return pc, in a() */
195	ret
196	SET_SIZE(callee)
197
198/*
199 * return the current frame pointer
200 */
201
202	ENTRY(getfp)
203	movq	%rbp, %rax
204	ret
205	SET_SIZE(getfp)
206
207/*
208 * Invalidate a single page table entry in the TLB
209 */
210
211	ENTRY(mmu_invlpg)
212	invlpg	(%rdi)
213	ret
214	SET_SIZE(mmu_invlpg)
215
216
217/*
218 * Get/Set the value of various control registers
219 */
220
221	ENTRY(getcr0)
222	movq	%cr0, %rax
223	ret
224	SET_SIZE(getcr0)
225
226	ENTRY(setcr0)
227	movq	%rdi, %cr0
228	ret
229	SET_SIZE(setcr0)
230
231        ENTRY(getcr2)
232#if defined(__xpv)
233	movq	%gs:CPU_VCPU_INFO, %rax
234	movq	VCPU_INFO_ARCH_CR2(%rax), %rax
235#else
236        movq    %cr2, %rax
237#endif
238        ret
239	SET_SIZE(getcr2)
240
241	ENTRY(getcr3)
242	movq    %cr3, %rax
243	ret
244	SET_SIZE(getcr3)
245
246#if !defined(__xpv)
247
248        ENTRY(setcr3)
249        movq    %rdi, %cr3
250        ret
251	SET_SIZE(setcr3)
252
253	ENTRY(reload_cr3)
254	movq	%cr3, %rdi
255	movq	%rdi, %cr3
256	ret
257	SET_SIZE(reload_cr3)
258
259#endif	/* __xpv */
260
261	ENTRY(getcr4)
262	movq	%cr4, %rax
263	ret
264	SET_SIZE(getcr4)
265
266	ENTRY(setcr4)
267	movq	%rdi, %cr4
268	ret
269	SET_SIZE(setcr4)
270
271	ENTRY(getcr8)
272	movq	%cr8, %rax
273	ret
274	SET_SIZE(getcr8)
275
276	ENTRY(setcr8)
277	movq	%rdi, %cr8
278	ret
279	SET_SIZE(setcr8)
280
281	ENTRY(__cpuid_insn)
282	movq	%rbx, %r8
283	movq	%rcx, %r9
284	movq	%rdx, %r11
285	movl	(%rdi), %eax		/* %eax = regs->cp_eax */
286	movl	0x4(%rdi), %ebx		/* %ebx = regs->cp_ebx */
287	movl	0x8(%rdi), %ecx		/* %ecx = regs->cp_ecx */
288	movl	0xc(%rdi), %edx		/* %edx = regs->cp_edx */
289	cpuid
290	movl	%eax, (%rdi)		/* regs->cp_eax = %eax */
291	movl	%ebx, 0x4(%rdi)		/* regs->cp_ebx = %ebx */
292	movl	%ecx, 0x8(%rdi)		/* regs->cp_ecx = %ecx */
293	movl	%edx, 0xc(%rdi)		/* regs->cp_edx = %edx */
294	movq	%r8, %rbx
295	movq	%r9, %rcx
296	movq	%r11, %rdx
297	ret
298	SET_SIZE(__cpuid_insn)
299
300	ENTRY_NP(i86_monitor)
301	pushq	%rbp
302	movq	%rsp, %rbp
303	movq	%rdi, %rax		/* addr */
304	movq	%rsi, %rcx		/* extensions */
305	/* rdx contains input arg3: hints */
306	clflush	(%rax)
307	.byte	0x0f, 0x01, 0xc8	/* monitor */
308	leave
309	ret
310	SET_SIZE(i86_monitor)
311
312	ENTRY_NP(i86_mwait)
313	pushq	%rbp
314	call	x86_md_clear
315	movq	%rsp, %rbp
316	movq	%rdi, %rax		/* data */
317	movq	%rsi, %rcx		/* extensions */
318	.byte	0x0f, 0x01, 0xc9	/* mwait */
319	leave
320	ret
321	SET_SIZE(i86_mwait)
322
323#if defined(__xpv)
324	/*
325	 * Defined in C
326	 */
327#else
328
329	ENTRY_NP(tsc_read)
330	movq	%rbx, %r11
331	movl	$0, %eax
332	cpuid
333	rdtsc
334	movq	%r11, %rbx
335	shlq	$32, %rdx
336	orq	%rdx, %rax
337	ret
338	.globl _tsc_mfence_start
339_tsc_mfence_start:
340	mfence
341	rdtsc
342	shlq	$32, %rdx
343	orq	%rdx, %rax
344	ret
345	.globl _tsc_mfence_end
346_tsc_mfence_end:
347	.globl _tscp_start
348_tscp_start:
349	.byte	0x0f, 0x01, 0xf9	/* rdtscp instruction */
350	shlq	$32, %rdx
351	orq	%rdx, %rax
352	ret
353	.globl _tscp_end
354_tscp_end:
355	.globl _no_rdtsc_start
356_no_rdtsc_start:
357	xorl	%edx, %edx
358	xorl	%eax, %eax
359	ret
360	.globl _no_rdtsc_end
361_no_rdtsc_end:
362	.globl _tsc_lfence_start
363_tsc_lfence_start:
364	lfence
365	rdtsc
366	shlq	$32, %rdx
367	orq	%rdx, %rax
368	ret
369	.globl _tsc_lfence_end
370_tsc_lfence_end:
371	SET_SIZE(tsc_read)
372
373
374#endif	/* __xpv */
375
376	ENTRY_NP(randtick)
377	rdtsc
378	shlq    $32, %rdx
379	orq     %rdx, %rax
380	ret
381	SET_SIZE(randtick)
382/*
383 * Insert entryp after predp in a doubly linked list.
384 */
385
386	ENTRY(_insque)
387	movq	(%rsi), %rax		/* predp->forw			*/
388	movq	%rsi, CPTRSIZE(%rdi)	/* entryp->back = predp		*/
389	movq	%rax, (%rdi)		/* entryp->forw = predp->forw	*/
390	movq	%rdi, (%rsi)		/* predp->forw = entryp		*/
391	movq	%rdi, CPTRSIZE(%rax)	/* predp->forw->back = entryp	*/
392	ret
393	SET_SIZE(_insque)
394
395/*
396 * Remove entryp from a doubly linked list
397 */
398
399	ENTRY(_remque)
400	movq	(%rdi), %rax		/* entry->forw */
401	movq	CPTRSIZE(%rdi), %rdx	/* entry->back */
402	movq	%rax, (%rdx)		/* entry->back->forw = entry->forw */
403	movq	%rdx, CPTRSIZE(%rax)	/* entry->forw->back = entry->back */
404	ret
405	SET_SIZE(_remque)
406
407/*
408 * Returns the number of
409 * non-NULL bytes in string argument.
410 */
411
412/*
413 * This is close to a simple transliteration of a C version of this
414 * routine.  We should either just -make- this be a C version, or
415 * justify having it in assembler by making it significantly faster.
416 *
417 * size_t
418 * strlen(const char *s)
419 * {
420 *	const char *s0;
421 * #if defined(DEBUG)
422 *	if ((uintptr_t)s < KERNELBASE)
423 *		panic(.str_panic_msg);
424 * #endif
425 *	for (s0 = s; *s; s++)
426 *		;
427 *	return (s - s0);
428 * }
429 */
430
431	ENTRY(strlen)
432#ifdef DEBUG
433	movq	postbootkernelbase(%rip), %rax
434	cmpq	%rax, %rdi
435	jae	str_valid
436	pushq	%rbp
437	movq	%rsp, %rbp
438	leaq	.str_panic_msg(%rip), %rdi
439	xorl	%eax, %eax
440	call	panic
441#endif	/* DEBUG */
442str_valid:
443	cmpb	$0, (%rdi)
444	movq	%rdi, %rax
445	je	.null_found
446	.align	4
447.strlen_loop:
448	incq	%rdi
449	cmpb	$0, (%rdi)
450	jne	.strlen_loop
451.null_found:
452	subq	%rax, %rdi
453	movq	%rdi, %rax
454	ret
455	SET_SIZE(strlen)
456
457#ifdef DEBUG
458	.text
459.str_panic_msg:
460	.string "strlen: argument below kernelbase"
461#endif /* DEBUG */
462
463	/*
464	 * Berkeley 4.3 introduced symbolically named interrupt levels
465	 * as a way deal with priority in a machine independent fashion.
466	 * Numbered priorities are machine specific, and should be
467	 * discouraged where possible.
468	 *
469	 * Note, for the machine specific priorities there are
470	 * examples listed for devices that use a particular priority.
471	 * It should not be construed that all devices of that
472	 * type should be at that priority.  It is currently were
473	 * the current devices fit into the priority scheme based
474	 * upon time criticalness.
475	 *
476	 * The underlying assumption of these assignments is that
477	 * IPL 10 is the highest level from which a device
478	 * routine can call wakeup.  Devices that interrupt from higher
479	 * levels are restricted in what they can do.  If they need
480	 * kernels services they should schedule a routine at a lower
481	 * level (via software interrupt) to do the required
482	 * processing.
483	 *
484	 * Examples of this higher usage:
485	 *	Level	Usage
486	 *	14	Profiling clock (and PROM uart polling clock)
487	 *	12	Serial ports
488	 *
489	 * The serial ports request lower level processing on level 6.
490	 *
491	 * Also, almost all splN routines (where N is a number or a
492	 * mnemonic) will do a RAISE(), on the assumption that they are
493	 * never used to lower our priority.
494	 * The exceptions are:
495	 *	spl8()		Because you can't be above 15 to begin with!
496	 *	splzs()		Because this is used at boot time to lower our
497	 *			priority, to allow the PROM to poll the uart.
498	 *	spl0()		Used to lower priority to 0.
499	 */
500
501#define	SETPRI(level) \
502	movl	$##level, %edi;	/* new priority */		\
503	jmp	do_splx			/* redirect to do_splx */
504
505#define	RAISE(level) \
506	movl	$##level, %edi;	/* new priority */		\
507	jmp	splr			/* redirect to splr */
508
509	/* locks out all interrupts, including memory errors */
510	ENTRY(spl8)
511	SETPRI(15)
512	SET_SIZE(spl8)
513
514	/* just below the level that profiling runs */
515	ENTRY(spl7)
516	RAISE(13)
517	SET_SIZE(spl7)
518
519	/* sun specific - highest priority onboard serial i/o asy ports */
520	ENTRY(splzs)
521	SETPRI(12)	/* Can't be a RAISE, as it's used to lower us */
522	SET_SIZE(splzs)
523
524	ENTRY(splhi)
525	ALTENTRY(splhigh)
526	ALTENTRY(spl6)
527	ALTENTRY(i_ddi_splhigh)
528
529	RAISE(DISP_LEVEL)
530
531	SET_SIZE(i_ddi_splhigh)
532	SET_SIZE(spl6)
533	SET_SIZE(splhigh)
534	SET_SIZE(splhi)
535
536	/* allow all interrupts */
537	ENTRY(spl0)
538	SETPRI(0)
539	SET_SIZE(spl0)
540
541
542	/* splx implementation */
543	ENTRY(splx)
544	jmp	do_splx		/* redirect to common splx code */
545	SET_SIZE(splx)
546
547	ENTRY(wait_500ms)
548	pushq	%rbx
549	movl	$50000, %ebx
5501:
551	call	tenmicrosec
552	decl	%ebx
553	jnz	1b
554	popq	%rbx
555	ret
556	SET_SIZE(wait_500ms)
557
558#define	RESET_METHOD_KBC	1
559#define	RESET_METHOD_PORT92	2
560#define RESET_METHOD_PCI	4
561
562	DGDEF3(pc_reset_methods, 4, 8)
563	.long RESET_METHOD_KBC|RESET_METHOD_PORT92|RESET_METHOD_PCI;
564
565	ENTRY(pc_reset)
566
567	testl	$RESET_METHOD_KBC, pc_reset_methods(%rip)
568	jz	1f
569
570	/
571	/ Try the classic keyboard controller-triggered reset.
572	/
573	movw	$0x64, %dx
574	movb	$0xfe, %al
575	outb	(%dx)
576
577	/ Wait up to 500 milliseconds here for the keyboard controller
578	/ to pull the reset line.  On some systems where the keyboard
579	/ controller is slow to pull the reset line, the next reset method
580	/ may be executed (which may be bad if those systems hang when the
581	/ next reset method is used, e.g. Ferrari 3400 (doesn't like port 92),
582	/ and Ferrari 4000 (doesn't like the cf9 reset method))
583
584	call	wait_500ms
585
5861:
587	testl	$RESET_METHOD_PORT92, pc_reset_methods(%rip)
588	jz	3f
589
590	/
591	/ Try port 0x92 fast reset
592	/
593	movw	$0x92, %dx
594	inb	(%dx)
595	cmpb	$0xff, %al	/ If port's not there, we should get back 0xFF
596	je	1f
597	testb	$1, %al		/ If bit 0
598	jz	2f		/ is clear, jump to perform the reset
599	andb	$0xfe, %al	/ otherwise,
600	outb	(%dx)		/ clear bit 0 first, then
6012:
602	orb	$1, %al		/ Set bit 0
603	outb	(%dx)		/ and reset the system
6041:
605
606	call	wait_500ms
607
6083:
609	testl	$RESET_METHOD_PCI, pc_reset_methods(%rip)
610	jz	4f
611
612	/ Try the PCI (soft) reset vector (should work on all modern systems,
613	/ but has been shown to cause problems on 450NX systems, and some newer
614	/ systems (e.g. ATI IXP400-equipped systems))
615	/ When resetting via this method, 2 writes are required.  The first
616	/ targets bit 1 (0=hard reset without power cycle, 1=hard reset with
617	/ power cycle).
618	/ The reset occurs on the second write, during bit 2's transition from
619	/ 0->1.
620	movw	$0xcf9, %dx
621	movb	$0x2, %al	/ Reset mode = hard, no power cycle
622	outb	(%dx)
623	movb	$0x6, %al
624	outb	(%dx)
625
626	call	wait_500ms
627
6284:
629	/
630	/ port 0xcf9 failed also.  Last-ditch effort is to
631	/ triple-fault the CPU.
632	/ Also, use triple fault for EFI firmware
633	/
634	ENTRY(efi_reset)
635	pushq	$0x0
636	pushq	$0x0		/ IDT base of 0, limit of 0 + 2 unused bytes
637	lidt	(%rsp)
638	int	$0x0		/ Trigger interrupt, generate triple-fault
639
640	cli
641	hlt			/ Wait forever
642	/*NOTREACHED*/
643	SET_SIZE(efi_reset)
644	SET_SIZE(pc_reset)
645
646/*
647 * C callable in and out routines
648 */
649
650	ENTRY(outl)
651	movw	%di, %dx
652	movl	%esi, %eax
653	outl	(%dx)
654	ret
655	SET_SIZE(outl)
656
657	ENTRY(outw)
658	movw	%di, %dx
659	movw	%si, %ax
660	D16 outl (%dx)		/* XX64 why not outw? */
661	ret
662	SET_SIZE(outw)
663
664	ENTRY(outb)
665	movw	%di, %dx
666	movb	%sil, %al
667	outb	(%dx)
668	ret
669	SET_SIZE(outb)
670
671	ENTRY(inl)
672	xorl	%eax, %eax
673	movw	%di, %dx
674	inl	(%dx)
675	ret
676	SET_SIZE(inl)
677
678	ENTRY(inw)
679	xorl	%eax, %eax
680	movw	%di, %dx
681	D16 inl	(%dx)
682	ret
683	SET_SIZE(inw)
684
685
686	ENTRY(inb)
687	xorl	%eax, %eax
688	movw	%di, %dx
689	inb	(%dx)
690	ret
691	SET_SIZE(inb)
692
693/*
694 * void int3(void)
695 * void int18(void)
696 * void int20(void)
697 * void int_cmci(void)
698 */
699
700	ENTRY(int3)
701	int	$T_BPTFLT
702	ret
703	SET_SIZE(int3)
704
705	ENTRY(int18)
706	int	$T_MCE
707	ret
708	SET_SIZE(int18)
709
710	ENTRY(int20)
711	movl	boothowto, %eax
712	andl	$RB_DEBUG, %eax
713	jz	1f
714
715	int	$T_DBGENTR
7161:
717	rep;	ret	/* use 2 byte return instruction when branch target */
718			/* AMD Software Optimization Guide - Section 6.2 */
719	SET_SIZE(int20)
720
721	ENTRY(int_cmci)
722	int	$T_ENOEXTFLT
723	ret
724	SET_SIZE(int_cmci)
725
726	ENTRY(scanc)
727					/* rdi == size */
728					/* rsi == cp */
729					/* rdx == table */
730					/* rcx == mask */
731	addq	%rsi, %rdi		/* end = &cp[size] */
732.scanloop:
733	cmpq	%rdi, %rsi		/* while (cp < end */
734	jnb	.scandone
735	movzbq	(%rsi), %r8		/* %r8 = *cp */
736	incq	%rsi			/* cp++ */
737	testb	%cl, (%r8, %rdx)
738	jz	.scanloop		/*  && (table[*cp] & mask) == 0) */
739	decq	%rsi			/* (fix post-increment) */
740.scandone:
741	movl	%edi, %eax
742	subl	%esi, %eax		/* return (end - cp) */
743	ret
744	SET_SIZE(scanc)
745
746/*
747 * Replacement functions for ones that are normally inlined.
748 * In addition to the inline copies, they are defined here just in case.
749 */
750
751	ENTRY(intr_clear)
752	ENTRY(clear_int_flag)
753	pushfq
754	popq	%rax
755#if defined(__xpv)
756	leaq	xpv_panicking, %rdi
757	movl	(%rdi), %edi
758	cmpl	$0, %edi
759	jne	2f
760	CLIRET(%rdi, %dl)	/* returns event mask in %dl */
761	/*
762	 * Synthesize the PS_IE bit from the event mask bit
763	 */
764	andq    $_BITNOT(PS_IE), %rax
765	testb	$1, %dl
766	jnz	1f
767	orq	$PS_IE, %rax
7681:
769	ret
7702:
771#endif
772	CLI(%rdi)
773	ret
774	SET_SIZE(clear_int_flag)
775	SET_SIZE(intr_clear)
776
777	ENTRY(curcpup)
778	movq	%gs:CPU_SELF, %rax
779	ret
780	SET_SIZE(curcpup)
781
782/* htonll(), ntohll(), htonl(), ntohl(), htons(), ntohs()
783 * These functions reverse the byte order of the input parameter and returns
784 * the result.  This is to convert the byte order from host byte order
785 * (little endian) to network byte order (big endian), or vice versa.
786 */
787
788	ENTRY(htonll)
789	ALTENTRY(ntohll)
790	movq	%rdi, %rax
791	bswapq	%rax
792	ret
793	SET_SIZE(ntohll)
794	SET_SIZE(htonll)
795
796	/* XX64 there must be shorter sequences for this */
797	ENTRY(htonl)
798	ALTENTRY(ntohl)
799	movl	%edi, %eax
800	bswap	%eax
801	ret
802	SET_SIZE(ntohl)
803	SET_SIZE(htonl)
804
805	/* XX64 there must be better sequences for this */
806	ENTRY(htons)
807	ALTENTRY(ntohs)
808	movl	%edi, %eax
809	bswap	%eax
810	shrl	$16, %eax
811	ret
812	SET_SIZE(ntohs)
813	SET_SIZE(htons)
814
815
816	ENTRY(intr_restore)
817	ENTRY(restore_int_flag)
818	testq	$PS_IE, %rdi
819	jz	1f
820#if defined(__xpv)
821	leaq	xpv_panicking, %rsi
822	movl	(%rsi), %esi
823	cmpl	$0, %esi
824	jne	1f
825	/*
826	 * Since we're -really- running unprivileged, our attempt
827	 * to change the state of the IF bit will be ignored.
828	 * The virtual IF bit is tweaked by CLI and STI.
829	 */
830	IE_TO_EVENT_MASK(%rsi, %rdi)
831#else
832	sti
833#endif
8341:
835	ret
836	SET_SIZE(restore_int_flag)
837	SET_SIZE(intr_restore)
838
839	ENTRY(sti)
840	STI
841	ret
842	SET_SIZE(sti)
843
844	ENTRY(cli)
845	CLI(%rax)
846	ret
847	SET_SIZE(cli)
848
849	ENTRY(dtrace_interrupt_disable)
850	pushfq
851	popq	%rax
852#if defined(__xpv)
853	leaq	xpv_panicking, %rdi
854	movl	(%rdi), %edi
855	cmpl	$0, %edi
856	jne	.dtrace_interrupt_disable_done
857	CLIRET(%rdi, %dl)	/* returns event mask in %dl */
858	/*
859	 * Synthesize the PS_IE bit from the event mask bit
860	 */
861	andq    $_BITNOT(PS_IE), %rax
862	testb	$1, %dl
863	jnz	.dtrace_interrupt_disable_done
864	orq	$PS_IE, %rax
865#else
866	CLI(%rdx)
867#endif
868.dtrace_interrupt_disable_done:
869	ret
870	SET_SIZE(dtrace_interrupt_disable)
871
872	ENTRY(dtrace_interrupt_enable)
873	pushq	%rdi
874	popfq
875#if defined(__xpv)
876	leaq	xpv_panicking, %rdx
877	movl	(%rdx), %edx
878	cmpl	$0, %edx
879	jne	.dtrace_interrupt_enable_done
880	/*
881	 * Since we're -really- running unprivileged, our attempt
882	 * to change the state of the IF bit will be ignored. The
883	 * virtual IF bit is tweaked by CLI and STI.
884	 */
885	IE_TO_EVENT_MASK(%rdx, %rdi)
886#endif
887.dtrace_interrupt_enable_done:
888	ret
889	SET_SIZE(dtrace_interrupt_enable)
890
891
892	ENTRY(dtrace_membar_producer)
893	rep;	ret	/* use 2 byte return instruction when branch target */
894			/* AMD Software Optimization Guide - Section 6.2 */
895	SET_SIZE(dtrace_membar_producer)
896
897	ENTRY(dtrace_membar_consumer)
898	rep;	ret	/* use 2 byte return instruction when branch target */
899			/* AMD Software Optimization Guide - Section 6.2 */
900	SET_SIZE(dtrace_membar_consumer)
901
902	ENTRY(threadp)
903	movq	%gs:CPU_THREAD, %rax
904	ret
905	SET_SIZE(threadp)
906
907/*
908 *   Checksum routine for Internet Protocol Headers
909 */
910
911	ENTRY(ip_ocsum)
912	pushq	%rbp
913	movq	%rsp, %rbp
914#ifdef DEBUG
915	movq	postbootkernelbase(%rip), %rax
916	cmpq	%rax, %rdi
917	jnb	1f
918	xorl	%eax, %eax
919	movq	%rdi, %rsi
920	leaq	.ip_ocsum_panic_msg(%rip), %rdi
921	call	panic
922	/*NOTREACHED*/
923.ip_ocsum_panic_msg:
924	.string	"ip_ocsum: address 0x%p below kernelbase\n"
9251:
926#endif
927	movl	%esi, %ecx	/* halfword_count */
928	movq	%rdi, %rsi	/* address */
929				/* partial sum in %edx */
930	xorl	%eax, %eax
931	testl	%ecx, %ecx
932	jz	.ip_ocsum_done
933	testq	$3, %rsi
934	jnz	.ip_csum_notaligned
935.ip_csum_aligned:	/* XX64 opportunities for 8-byte operations? */
936.next_iter:
937	/* XX64 opportunities for prefetch? */
938	/* XX64 compute csum with 64 bit quantities? */
939	subl	$32, %ecx
940	jl	.less_than_32
941
942	addl	0(%rsi), %edx
943.only60:
944	adcl	4(%rsi), %eax
945.only56:
946	adcl	8(%rsi), %edx
947.only52:
948	adcl	12(%rsi), %eax
949.only48:
950	adcl	16(%rsi), %edx
951.only44:
952	adcl	20(%rsi), %eax
953.only40:
954	adcl	24(%rsi), %edx
955.only36:
956	adcl	28(%rsi), %eax
957.only32:
958	adcl	32(%rsi), %edx
959.only28:
960	adcl	36(%rsi), %eax
961.only24:
962	adcl	40(%rsi), %edx
963.only20:
964	adcl	44(%rsi), %eax
965.only16:
966	adcl	48(%rsi), %edx
967.only12:
968	adcl	52(%rsi), %eax
969.only8:
970	adcl	56(%rsi), %edx
971.only4:
972	adcl	60(%rsi), %eax	/* could be adding -1 and -1 with a carry */
973.only0:
974	adcl	$0, %eax	/* could be adding -1 in eax with a carry */
975	adcl	$0, %eax
976
977	addq	$64, %rsi
978	testl	%ecx, %ecx
979	jnz	.next_iter
980
981.ip_ocsum_done:
982	addl	%eax, %edx
983	adcl	$0, %edx
984	movl	%edx, %eax	/* form a 16 bit checksum by */
985	shrl	$16, %eax	/* adding two halves of 32 bit checksum */
986	addw	%dx, %ax
987	adcw	$0, %ax
988	andl	$0xffff, %eax
989	leave
990	ret
991
992.ip_csum_notaligned:
993	xorl	%edi, %edi
994	movw	(%rsi), %di
995	addl	%edi, %edx
996	adcl	$0, %edx
997	addq	$2, %rsi
998	decl	%ecx
999	jmp	.ip_csum_aligned
1000
1001.less_than_32:
1002	addl	$32, %ecx
1003	testl	$1, %ecx
1004	jz	.size_aligned
1005	andl	$0xfe, %ecx
1006	movzwl	(%rsi, %rcx, 2), %edi
1007	addl	%edi, %edx
1008	adcl	$0, %edx
1009.size_aligned:
1010	movl	%ecx, %edi
1011	shrl	$1, %ecx
1012	shl	$1, %edi
1013	subq	$64, %rdi
1014	addq	%rdi, %rsi
1015	leaq    .ip_ocsum_jmptbl(%rip), %rdi
1016	leaq	(%rdi, %rcx, 8), %rdi
1017	xorl	%ecx, %ecx
1018	clc
1019	movq	(%rdi), %rdi
1020	INDIRECT_JMP_REG(rdi)
1021
1022	.align	8
1023.ip_ocsum_jmptbl:
1024	.quad	.only0, .only4, .only8, .only12, .only16, .only20
1025	.quad	.only24, .only28, .only32, .only36, .only40, .only44
1026	.quad	.only48, .only52, .only56, .only60
1027	SET_SIZE(ip_ocsum)
1028
1029/*
1030 * multiply two long numbers and yield a u_longlong_t result, callable from C.
1031 * Provided to manipulate hrtime_t values.
1032 */
1033
1034	ENTRY(mul32)
1035	xorl	%edx, %edx	/* XX64 joe, paranoia? */
1036	movl	%edi, %eax
1037	mull	%esi
1038	shlq	$32, %rdx
1039	orq	%rdx, %rax
1040	ret
1041	SET_SIZE(mul32)
1042
1043	ENTRY(scan_memory)
1044	shrq	$3, %rsi	/* convert %rsi from byte to quadword count */
1045	jz	.scanm_done
1046	movq	%rsi, %rcx	/* move count into rep control register */
1047	movq	%rdi, %rsi	/* move addr into lodsq control reg. */
1048	rep lodsq		/* scan the memory range */
1049.scanm_done:
1050	rep;	ret	/* use 2 byte return instruction when branch target */
1051			/* AMD Software Optimization Guide - Section 6.2 */
1052	SET_SIZE(scan_memory)
1053
1054
1055	ENTRY(lowbit)
1056	movl	$-1, %eax
1057	bsfq	%rdi, %rdi
1058	cmovnz	%edi, %eax
1059	incl	%eax
1060	ret
1061	SET_SIZE(lowbit)
1062
1063	ENTRY(highbit)
1064	ALTENTRY(highbit64)
1065	movl	$-1, %eax
1066	bsrq	%rdi, %rdi
1067	cmovnz	%edi, %eax
1068	incl	%eax
1069	ret
1070	SET_SIZE(highbit64)
1071	SET_SIZE(highbit)
1072
1073#define	XMSR_ACCESS_VAL		$0x9c5a203a
1074
1075	ENTRY(rdmsr)
1076	movl	%edi, %ecx
1077	rdmsr
1078	shlq	$32, %rdx
1079	orq	%rdx, %rax
1080	ret
1081	SET_SIZE(rdmsr)
1082
1083	ENTRY(wrmsr)
1084	movq	%rsi, %rdx
1085	shrq	$32, %rdx
1086	movl	%esi, %eax
1087	movl	%edi, %ecx
1088	wrmsr
1089	ret
1090	SET_SIZE(wrmsr)
1091
1092	ENTRY(xrdmsr)
1093	pushq	%rbp
1094	movq	%rsp, %rbp
1095	movl	%edi, %ecx
1096	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
1097	rdmsr
1098	shlq	$32, %rdx
1099	orq	%rdx, %rax
1100	leave
1101	ret
1102	SET_SIZE(xrdmsr)
1103
1104	ENTRY(xwrmsr)
1105	pushq	%rbp
1106	movq	%rsp, %rbp
1107	movl	%edi, %ecx
1108	movl	XMSR_ACCESS_VAL, %edi	/* this value is needed to access MSR */
1109	movq	%rsi, %rdx
1110	shrq	$32, %rdx
1111	movl	%esi, %eax
1112	wrmsr
1113	leave
1114	ret
1115	SET_SIZE(xwrmsr)
1116
1117	ENTRY(get_xcr)
1118	movl	%edi, %ecx
1119	#xgetbv
1120	.byte	0x0f,0x01,0xd0
1121	shlq	$32, %rdx
1122	orq	%rdx, %rax
1123	ret
1124	SET_SIZE(get_xcr)
1125
1126	ENTRY(set_xcr)
1127	movq	%rsi, %rdx
1128	shrq	$32, %rdx
1129	movl	%esi, %eax
1130	movl	%edi, %ecx
1131	#xsetbv
1132	.byte	0x0f,0x01,0xd1
1133	ret
1134	SET_SIZE(set_xcr)
1135
1136	ENTRY(invalidate_cache)
1137	wbinvd
1138	ret
1139	SET_SIZE(invalidate_cache)
1140
1141	ENTRY_NP(getcregs)
1142#if defined(__xpv)
1143	/*
1144	 * Only a few of the hardware control registers or descriptor tables
1145	 * are directly accessible to us, so just zero the structure.
1146	 *
1147	 * XXPV	Perhaps it would be helpful for the hypervisor to return
1148	 *	virtualized versions of these for post-mortem use.
1149	 *	(Need to reevaluate - perhaps it already does!)
1150	 */
1151	pushq	%rdi		/* save *crp */
1152	movq	$CREGSZ, %rsi
1153	call	bzero
1154	popq	%rdi
1155
1156	/*
1157	 * Dump what limited information we can
1158	 */
1159	movq	%cr0, %rax
1160	movq	%rax, CREG_CR0(%rdi)	/* cr0 */
1161	movq	%cr2, %rax
1162	movq	%rax, CREG_CR2(%rdi)	/* cr2 */
1163	movq	%cr3, %rax
1164	movq	%rax, CREG_CR3(%rdi)	/* cr3 */
1165	movq	%cr4, %rax
1166	movq	%rax, CREG_CR4(%rdi)	/* cr4 */
1167
1168#else	/* __xpv */
1169
1170#define	GETMSR(r, off, d)	\
1171	movl	$r, %ecx;	\
1172	rdmsr;			\
1173	movl	%eax, off(d);	\
1174	movl	%edx, off+4(d)
1175
1176	xorl	%eax, %eax
1177	movq	%rax, CREG_GDT+8(%rdi)
1178	sgdt	CREG_GDT(%rdi)		/* 10 bytes */
1179	movq	%rax, CREG_IDT+8(%rdi)
1180	sidt	CREG_IDT(%rdi)		/* 10 bytes */
1181	movq	%rax, CREG_LDT(%rdi)
1182	sldt	CREG_LDT(%rdi)		/* 2 bytes */
1183	movq	%rax, CREG_TASKR(%rdi)
1184	str	CREG_TASKR(%rdi)	/* 2 bytes */
1185	movq	%cr0, %rax
1186	movq	%rax, CREG_CR0(%rdi)	/* cr0 */
1187	movq	%cr2, %rax
1188	movq	%rax, CREG_CR2(%rdi)	/* cr2 */
1189	movq	%cr3, %rax
1190	movq	%rax, CREG_CR3(%rdi)	/* cr3 */
1191	movq	%cr4, %rax
1192	movq	%rax, CREG_CR4(%rdi)	/* cr4 */
1193	movq	%cr8, %rax
1194	movq	%rax, CREG_CR8(%rdi)	/* cr8 */
1195	GETMSR(MSR_AMD_KGSBASE, CREG_KGSBASE, %rdi)
1196	GETMSR(MSR_AMD_EFER, CREG_EFER, %rdi)
1197#endif	/* __xpv */
1198	ret
1199	SET_SIZE(getcregs)
1200
1201#undef GETMSR
1202
1203
1204/*
1205 * A panic trigger is a word which is updated atomically and can only be set
1206 * once.  We atomically store 0xDEFACEDD and load the old value.  If the
1207 * previous value was 0, we succeed and return 1; otherwise return 0.
1208 * This allows a partially corrupt trigger to still trigger correctly.  DTrace
1209 * has its own version of this function to allow it to panic correctly from
1210 * probe context.
1211 */
1212
1213	ENTRY_NP(panic_trigger)
1214	xorl	%eax, %eax
1215	movl	$0xdefacedd, %edx
1216	lock
1217	  xchgl	%edx, (%rdi)
1218	cmpl	$0, %edx
1219	je	0f
1220	movl	$0, %eax
1221	ret
12220:	movl	$1, %eax
1223	ret
1224	SET_SIZE(panic_trigger)
1225
1226	ENTRY_NP(dtrace_panic_trigger)
1227	xorl	%eax, %eax
1228	movl	$0xdefacedd, %edx
1229	lock
1230	  xchgl	%edx, (%rdi)
1231	cmpl	$0, %edx
1232	je	0f
1233	movl	$0, %eax
1234	ret
12350:	movl	$1, %eax
1236	ret
1237	SET_SIZE(dtrace_panic_trigger)
1238
1239/*
1240 * The panic() and cmn_err() functions invoke vpanic() as a common entry point
1241 * into the panic code implemented in panicsys().  vpanic() is responsible
1242 * for passing through the format string and arguments, and constructing a
1243 * regs structure on the stack into which it saves the current register
1244 * values.  If we are not dying due to a fatal trap, these registers will
1245 * then be preserved in panicbuf as the current processor state.  Before
1246 * invoking panicsys(), vpanic() activates the first panic trigger (see
1247 * common/os/panic.c) and switches to the panic_stack if successful.  Note that
1248 * DTrace takes a slightly different panic path if it must panic from probe
1249 * context.  Instead of calling panic, it calls into dtrace_vpanic(), which
1250 * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
1251 * branches back into vpanic().
1252 */
1253
1254	ENTRY_NP(vpanic)			/* Initial stack layout: */
1255
1256	pushq	%rbp				/* | %rip |	0x60	*/
1257	movq	%rsp, %rbp			/* | %rbp |	0x58	*/
1258	pushfq					/* | rfl  |	0x50	*/
1259	pushq	%r11				/* | %r11 |	0x48	*/
1260	pushq	%r10				/* | %r10 |	0x40	*/
1261	pushq	%rbx				/* | %rbx |	0x38	*/
1262	pushq	%rax				/* | %rax |	0x30	*/
1263	pushq	%r9				/* | %r9  |	0x28	*/
1264	pushq	%r8				/* | %r8  |	0x20	*/
1265	pushq	%rcx				/* | %rcx |	0x18	*/
1266	pushq	%rdx				/* | %rdx |	0x10	*/
1267	pushq	%rsi				/* | %rsi |	0x8 alist */
1268	pushq	%rdi				/* | %rdi |	0x0 format */
1269
1270	movq	%rsp, %rbx			/* %rbx = current %rsp */
1271
1272	leaq	panic_quiesce(%rip), %rdi	/* %rdi = &panic_quiesce */
1273	call	panic_trigger			/* %eax = panic_trigger() */
1274
1275vpanic_common:
1276	/*
1277	 * The panic_trigger result is in %eax from the call above, and
1278	 * dtrace_panic places it in %eax before branching here.
1279	 * The rdmsr instructions that follow below will clobber %eax so
1280	 * we stash the panic_trigger result in %r11d.
1281	 */
1282	movl	%eax, %r11d
1283	cmpl	$0, %r11d
1284	je	0f
1285
1286	/*
1287	 * If panic_trigger() was successful, we are the first to initiate a
1288	 * panic: we now switch to the reserved panic_stack before continuing.
1289	 */
1290	leaq	panic_stack(%rip), %rsp
1291	addq	$PANICSTKSIZE, %rsp
12920:	subq	$REGSIZE, %rsp
1293	/*
1294	 * Now that we've got everything set up, store the register values as
1295	 * they were when we entered vpanic() to the designated location in
1296	 * the regs structure we allocated on the stack.
1297	 */
1298	movq	0x0(%rbx), %rcx
1299	movq	%rcx, REGOFF_RDI(%rsp)
1300	movq	0x8(%rbx), %rcx
1301	movq	%rcx, REGOFF_RSI(%rsp)
1302	movq	0x10(%rbx), %rcx
1303	movq	%rcx, REGOFF_RDX(%rsp)
1304	movq	0x18(%rbx), %rcx
1305	movq	%rcx, REGOFF_RCX(%rsp)
1306	movq	0x20(%rbx), %rcx
1307
1308	movq	%rcx, REGOFF_R8(%rsp)
1309	movq	0x28(%rbx), %rcx
1310	movq	%rcx, REGOFF_R9(%rsp)
1311	movq	0x30(%rbx), %rcx
1312	movq	%rcx, REGOFF_RAX(%rsp)
1313	movq	0x38(%rbx), %rcx
1314	movq	%rcx, REGOFF_RBX(%rsp)
1315	movq	0x58(%rbx), %rcx
1316
1317	movq	%rcx, REGOFF_RBP(%rsp)
1318	movq	0x40(%rbx), %rcx
1319	movq	%rcx, REGOFF_R10(%rsp)
1320	movq	0x48(%rbx), %rcx
1321	movq	%rcx, REGOFF_R11(%rsp)
1322	movq	%r12, REGOFF_R12(%rsp)
1323
1324	movq	%r13, REGOFF_R13(%rsp)
1325	movq	%r14, REGOFF_R14(%rsp)
1326	movq	%r15, REGOFF_R15(%rsp)
1327
1328	xorl	%ecx, %ecx
1329	movw	%ds, %cx
1330	movq	%rcx, REGOFF_DS(%rsp)
1331	movw	%es, %cx
1332	movq	%rcx, REGOFF_ES(%rsp)
1333	movw	%fs, %cx
1334	movq	%rcx, REGOFF_FS(%rsp)
1335	movw	%gs, %cx
1336	movq	%rcx, REGOFF_GS(%rsp)
1337
1338	movq	$0, REGOFF_TRAPNO(%rsp)
1339
1340	movq	$0, REGOFF_ERR(%rsp)
1341	leaq	vpanic(%rip), %rcx
1342	movq	%rcx, REGOFF_RIP(%rsp)
1343	movw	%cs, %cx
1344	movzwq	%cx, %rcx
1345	movq	%rcx, REGOFF_CS(%rsp)
1346	movq	0x50(%rbx), %rcx
1347	movq	%rcx, REGOFF_RFL(%rsp)
1348	movq	%rbx, %rcx
1349	addq	$0x60, %rcx
1350	movq	%rcx, REGOFF_RSP(%rsp)
1351	movw	%ss, %cx
1352	movzwq	%cx, %rcx
1353	movq	%rcx, REGOFF_SS(%rsp)
1354
1355	/*
1356	 * panicsys(format, alist, rp, on_panic_stack)
1357	 */
1358	movq	REGOFF_RDI(%rsp), %rdi		/* format */
1359	movq	REGOFF_RSI(%rsp), %rsi		/* alist */
1360	movq	%rsp, %rdx			/* struct regs */
1361	movl	%r11d, %ecx			/* on_panic_stack */
1362	call	panicsys
1363	addq	$REGSIZE, %rsp
1364	popq	%rdi
1365	popq	%rsi
1366	popq	%rdx
1367	popq	%rcx
1368	popq	%r8
1369	popq	%r9
1370	popq	%rax
1371	popq	%rbx
1372	popq	%r10
1373	popq	%r11
1374	popfq
1375	leave
1376	ret
1377	SET_SIZE(vpanic)
1378
1379	ENTRY_NP(dtrace_vpanic)			/* Initial stack layout: */
1380
1381	pushq	%rbp				/* | %rip |	0x60	*/
1382	movq	%rsp, %rbp			/* | %rbp |	0x58	*/
1383	pushfq					/* | rfl  |	0x50	*/
1384	pushq	%r11				/* | %r11 |	0x48	*/
1385	pushq	%r10				/* | %r10 |	0x40	*/
1386	pushq	%rbx				/* | %rbx |	0x38	*/
1387	pushq	%rax				/* | %rax |	0x30	*/
1388	pushq	%r9				/* | %r9  |	0x28	*/
1389	pushq	%r8				/* | %r8  |	0x20	*/
1390	pushq	%rcx				/* | %rcx |	0x18	*/
1391	pushq	%rdx				/* | %rdx |	0x10	*/
1392	pushq	%rsi				/* | %rsi |	0x8 alist */
1393	pushq	%rdi				/* | %rdi |	0x0 format */
1394
1395	movq	%rsp, %rbx			/* %rbx = current %rsp */
1396
1397	leaq	panic_quiesce(%rip), %rdi	/* %rdi = &panic_quiesce */
1398	call	dtrace_panic_trigger	/* %eax = dtrace_panic_trigger() */
1399	jmp	vpanic_common
1400
1401	SET_SIZE(dtrace_vpanic)
1402
1403	DGDEF3(timedelta, 8, 8)
1404	.long	0, 0
1405
1406	/*
1407	 * initialized to a non zero value to make pc_gethrtime()
1408	 * work correctly even before clock is initialized
1409	 */
1410	DGDEF3(hrtime_base, 8, 8)
1411	.long	_MUL(NSEC_PER_CLOCK_TICK, 6), 0
1412
1413	DGDEF3(adj_shift, 4, 4)
1414	.long	ADJ_SHIFT
1415
1416	ENTRY_NP(hres_tick)
1417	pushq	%rbp
1418	movq	%rsp, %rbp
1419
1420	/*
1421	 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
1422	 * hres_last_tick can only be modified while holding CLOCK_LOCK).
1423	 * At worst, performing this now instead of under CLOCK_LOCK may
1424	 * introduce some jitter in pc_gethrestime().
1425	 */
1426	movq	gethrtimef(%rip), %rsi
1427	INDIRECT_CALL_REG(rsi)
1428	movq	%rax, %r8
1429
1430	leaq	hres_lock(%rip), %rax
1431	movb	$-1, %dl
1432.CL1:
1433	xchgb	%dl, (%rax)
1434	testb	%dl, %dl
1435	jz	.CL3			/* got it */
1436.CL2:
1437	cmpb	$0, (%rax)		/* possible to get lock? */
1438	pause
1439	jne	.CL2
1440	jmp	.CL1			/* yes, try again */
1441.CL3:
1442	/*
1443	 * compute the interval since last time hres_tick was called
1444	 * and adjust hrtime_base and hrestime accordingly
1445	 * hrtime_base is an 8 byte value (in nsec), hrestime is
1446	 * a timestruc_t (sec, nsec)
1447	 */
1448	leaq	hres_last_tick(%rip), %rax
1449	movq	%r8, %r11
1450	subq	(%rax), %r8
1451	addq	%r8, hrtime_base(%rip)	/* add interval to hrtime_base */
1452	addq	%r8, hrestime+8(%rip)	/* add interval to hrestime.tv_nsec */
1453	/*
1454	 * Now that we have CLOCK_LOCK, we can update hres_last_tick
1455	 */
1456	movq	%r11, (%rax)
1457
1458	call	__adj_hrestime
1459
1460	/*
1461	 * release the hres_lock
1462	 */
1463	incl	hres_lock(%rip)
1464	leave
1465	ret
1466	SET_SIZE(hres_tick)
1467
1468/*
1469 * void prefetch_smap_w(void *)
1470 *
1471 * Prefetch ahead within a linear list of smap structures.
1472 * Not implemented for ia32.  Stub for compatibility.
1473 */
1474
1475	ENTRY(prefetch_smap_w)
1476	rep;	ret	/* use 2 byte return instruction when branch target */
1477			/* AMD Software Optimization Guide - Section 6.2 */
1478	SET_SIZE(prefetch_smap_w)
1479
1480/*
1481 * prefetch_page_r(page_t *)
1482 * issue prefetch instructions for a page_t
1483 */
1484
1485	ENTRY(prefetch_page_r)
1486	rep;	ret	/* use 2 byte return instruction when branch target */
1487			/* AMD Software Optimization Guide - Section 6.2 */
1488	SET_SIZE(prefetch_page_r)
1489
1490	ENTRY(bcmp)
1491	pushq	%rbp
1492	movq	%rsp, %rbp
1493#ifdef DEBUG
1494	testq	%rdx,%rdx
1495	je	1f
1496	movq	postbootkernelbase(%rip), %r11
1497	cmpq	%r11, %rdi
1498	jb	0f
1499	cmpq	%r11, %rsi
1500	jnb	1f
15010:	leaq	.bcmp_panic_msg(%rip), %rdi
1502	xorl	%eax, %eax
1503	call	panic
15041:
1505#endif	/* DEBUG */
1506	call	memcmp
1507	testl	%eax, %eax
1508	setne	%dl
1509	leave
1510	movzbl	%dl, %eax
1511	ret
1512	SET_SIZE(bcmp)
1513
1514#ifdef DEBUG
1515	.text
1516.bcmp_panic_msg:
1517	.string "bcmp: arguments below kernelbase"
1518#endif	/* DEBUG */
1519
1520	ENTRY_NP(bsrw_insn)
1521	xorl	%eax, %eax
1522	bsrw	%di, %ax
1523	ret
1524	SET_SIZE(bsrw_insn)
1525
1526	ENTRY_NP(switch_sp_and_call)
1527	pushq	%rbp
1528	movq	%rsp, %rbp		/* set up stack frame */
1529	movq	%rdi, %rsp		/* switch stack pointer */
1530	movq	%rdx, %rdi		/* pass func arg 1 */
1531	movq	%rsi, %r11		/* save function to call */
1532	movq	%rcx, %rsi		/* pass func arg 2 */
1533	INDIRECT_CALL_REG(r11)		/* call function */
1534	leave				/* restore stack */
1535	ret
1536	SET_SIZE(switch_sp_and_call)
1537
1538	ENTRY_NP(kmdb_enter)
1539	pushq	%rbp
1540	movq	%rsp, %rbp
1541
1542	/*
1543	 * Save flags, do a 'cli' then return the saved flags
1544	 */
1545	call	intr_clear
1546
1547	int	$T_DBGENTR
1548
1549	/*
1550	 * Restore the saved flags
1551	 */
1552	movq	%rax, %rdi
1553	call	intr_restore
1554
1555	leave
1556	ret
1557	SET_SIZE(kmdb_enter)
1558
1559	ENTRY_NP(return_instr)
1560	rep;	ret	/* use 2 byte instruction when branch target */
1561			/* AMD Software Optimization Guide - Section 6.2 */
1562	SET_SIZE(return_instr)
1563
1564	ENTRY(getflags)
1565	pushfq
1566	popq	%rax
1567#if defined(__xpv)
1568	CURTHREAD(%rdi)
1569	KPREEMPT_DISABLE(%rdi)
1570	/*
1571	 * Synthesize the PS_IE bit from the event mask bit
1572	 */
1573	CURVCPU(%r11)
1574	andq    $_BITNOT(PS_IE), %rax
1575	XEN_TEST_UPCALL_MASK(%r11)
1576	jnz	1f
1577	orq	$PS_IE, %rax
15781:
1579	KPREEMPT_ENABLE_NOKP(%rdi)
1580#endif
1581	ret
1582	SET_SIZE(getflags)
1583
1584	ENTRY(ftrace_interrupt_disable)
1585	pushfq
1586	popq	%rax
1587	CLI(%rdx)
1588	ret
1589	SET_SIZE(ftrace_interrupt_disable)
1590
1591	ENTRY(ftrace_interrupt_enable)
1592	pushq	%rdi
1593	popfq
1594	ret
1595	SET_SIZE(ftrace_interrupt_enable)
1596
1597	ENTRY(clflush_insn)
1598	clflush (%rdi)
1599	ret
1600	SET_SIZE(clflush_insn)
1601
1602	ENTRY(mfence_insn)
1603	mfence
1604	ret
1605	SET_SIZE(mfence_insn)
1606
1607/*
1608 * VMware implements an I/O port that programs can query to detect if software
1609 * is running in a VMware hypervisor. This hypervisor port behaves differently
1610 * depending on magic values in certain registers and modifies some registers
1611 * as a side effect.
1612 *
1613 * References: http://kb.vmware.com/kb/1009458
1614 */
1615
1616	ENTRY(vmware_port)
1617	pushq	%rbx
1618	movl	$VMWARE_HVMAGIC, %eax
1619	movl	$0xffffffff, %ebx
1620	movl	%edi, %ecx
1621	movl	$VMWARE_HVPORT, %edx
1622	inl	(%dx)
1623	movl	%eax, (%rsi)
1624	movl	%ebx, 4(%rsi)
1625	movl	%ecx, 8(%rsi)
1626	movl	%edx, 12(%rsi)
1627	popq	%rbx
1628	ret
1629	SET_SIZE(vmware_port)
1630