xref: /freebsd/sys/amd64/include/cpufunc.h (revision 908e277230ef1a80589f85687f5b422b0e863e79)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2003 Peter Wemm.
5  * Copyright (c) 1993 The Regents of the University of California.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 
35 /*
36  * Functions to provide access to special i386 instructions.
37  * This in included in sys/systm.h, and that file should be
38  * used in preference to this.
39  */
40 
41 #ifndef _MACHINE_CPUFUNC_H_
42 #define	_MACHINE_CPUFUNC_H_
43 
44 #ifndef _SYS_CDEFS_H_
45 #error this file needs sys/cdefs.h as a prerequisite
46 #endif
47 
48 struct region_descriptor;
49 
50 #define readb(va)	(*(volatile uint8_t *) (va))
51 #define readw(va)	(*(volatile uint16_t *) (va))
52 #define readl(va)	(*(volatile uint32_t *) (va))
53 #define readq(va)	(*(volatile uint64_t *) (va))
54 
55 #define writeb(va, d)	(*(volatile uint8_t *) (va) = (d))
56 #define writew(va, d)	(*(volatile uint16_t *) (va) = (d))
57 #define writel(va, d)	(*(volatile uint32_t *) (va) = (d))
58 #define writeq(va, d)	(*(volatile uint64_t *) (va) = (d))
59 
60 #if defined(__GNUCLIKE_ASM) && defined(__CC_SUPPORTS___INLINE)
61 
62 static __inline void
63 breakpoint(void)
64 {
65 	__asm __volatile("int $3");
66 }
67 
68 #define	bsfl(mask)	__builtin_ctz(mask)
69 
70 #define	bsfq(mask)	__builtin_ctzl(mask)
71 
72 #define	bsrl(mask)	(__builtin_clz(mask) ^ 0x1f)
73 
74 #define	bsrq(mask)	(__builtin_clzl(mask) ^ 0x3f)
75 
76 static __inline void
77 clflush(u_long addr)
78 {
79 
80 	__asm __volatile("clflush %0" : : "m" (*(char *)addr));
81 }
82 
83 static __inline void
84 clflushopt(u_long addr)
85 {
86 
87 	__asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr));
88 }
89 
90 static __inline void
91 clwb(u_long addr)
92 {
93 
94 	__asm __volatile("clwb %0" : : "m" (*(char *)addr));
95 }
96 
97 static __inline void
98 clts(void)
99 {
100 
101 	__asm __volatile("clts");
102 }
103 
104 static __inline void
105 disable_intr(void)
106 {
107 	__asm __volatile("cli" : : : "memory");
108 }
109 
110 static __inline void
111 do_cpuid(u_int ax, u_int *p)
112 {
113 	__asm __volatile("cpuid"
114 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
115 			 :  "0" (ax));
116 }
117 
118 static __inline void
119 cpuid_count(u_int ax, u_int cx, u_int *p)
120 {
121 	__asm __volatile("cpuid"
122 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
123 			 :  "0" (ax), "c" (cx));
124 }
125 
126 static __inline void
127 enable_intr(void)
128 {
129 	__asm __volatile("sti");
130 }
131 
132 #ifdef _KERNEL
133 
134 #define	HAVE_INLINE_FFS
135 #define	ffs(x)		__builtin_ffs(x)
136 
137 #define	HAVE_INLINE_FFSL
138 #define	ffsl(x)		__builtin_ffsl(x)
139 
140 #define	HAVE_INLINE_FFSLL
141 #define	ffsll(x)	__builtin_ffsll(x)
142 
143 #define	HAVE_INLINE_FLS
144 
145 static __inline __pure2 int
146 fls(int mask)
147 {
148 	return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
149 }
150 
151 #define	HAVE_INLINE_FLSL
152 
153 static __inline __pure2 int
154 flsl(long mask)
155 {
156 	return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
157 }
158 
159 #define	HAVE_INLINE_FLSLL
160 
161 static __inline __pure2 int
162 flsll(long long mask)
163 {
164 	return (flsl((long)mask));
165 }
166 
167 #endif /* _KERNEL */
168 
169 static __inline void
170 halt(void)
171 {
172 	__asm __volatile("hlt");
173 }
174 
175 static __inline u_char
176 inb(u_int port)
177 {
178 	u_char	data;
179 
180 	__asm __volatile("inb %w1, %0" : "=a" (data) : "Nd" (port));
181 	return (data);
182 }
183 
184 static __inline u_int
185 inl(u_int port)
186 {
187 	u_int	data;
188 
189 	__asm __volatile("inl %w1, %0" : "=a" (data) : "Nd" (port));
190 	return (data);
191 }
192 
193 static __inline void
194 insb(u_int port, void *addr, size_t count)
195 {
196 	__asm __volatile("rep; insb"
197 			 : "+D" (addr), "+c" (count)
198 			 : "d" (port)
199 			 : "memory");
200 }
201 
202 static __inline void
203 insw(u_int port, void *addr, size_t count)
204 {
205 	__asm __volatile("rep; insw"
206 			 : "+D" (addr), "+c" (count)
207 			 : "d" (port)
208 			 : "memory");
209 }
210 
211 static __inline void
212 insl(u_int port, void *addr, size_t count)
213 {
214 	__asm __volatile("rep; insl"
215 			 : "+D" (addr), "+c" (count)
216 			 : "d" (port)
217 			 : "memory");
218 }
219 
220 static __inline void
221 invd(void)
222 {
223 	__asm __volatile("invd");
224 }
225 
226 static __inline u_short
227 inw(u_int port)
228 {
229 	u_short	data;
230 
231 	__asm __volatile("inw %w1, %0" : "=a" (data) : "Nd" (port));
232 	return (data);
233 }
234 
235 static __inline void
236 outb(u_int port, u_char data)
237 {
238 	__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
239 }
240 
241 static __inline void
242 outl(u_int port, u_int data)
243 {
244 	__asm __volatile("outl %0, %w1" : : "a" (data), "Nd" (port));
245 }
246 
247 static __inline void
248 outsb(u_int port, const void *addr, size_t count)
249 {
250 	__asm __volatile("rep; outsb"
251 			 : "+S" (addr), "+c" (count)
252 			 : "d" (port));
253 }
254 
255 static __inline void
256 outsw(u_int port, const void *addr, size_t count)
257 {
258 	__asm __volatile("rep; outsw"
259 			 : "+S" (addr), "+c" (count)
260 			 : "d" (port));
261 }
262 
263 static __inline void
264 outsl(u_int port, const void *addr, size_t count)
265 {
266 	__asm __volatile("rep; outsl"
267 			 : "+S" (addr), "+c" (count)
268 			 : "d" (port));
269 }
270 
271 static __inline void
272 outw(u_int port, u_short data)
273 {
274 	__asm __volatile("outw %0, %w1" : : "a" (data), "Nd" (port));
275 }
276 
277 static __inline u_long
278 popcntq(u_long mask)
279 {
280 	u_long result;
281 
282 	__asm __volatile("popcntq %1,%0" : "=r" (result) : "rm" (mask));
283 	return (result);
284 }
285 
286 static __inline void
287 lfence(void)
288 {
289 
290 	__asm __volatile("lfence" : : : "memory");
291 }
292 
293 static __inline void
294 mfence(void)
295 {
296 
297 	__asm __volatile("mfence" : : : "memory");
298 }
299 
300 static __inline void
301 sfence(void)
302 {
303 
304 	__asm __volatile("sfence" : : : "memory");
305 }
306 
307 static __inline void
308 ia32_pause(void)
309 {
310 	__asm __volatile("pause");
311 }
312 
313 static __inline u_long
314 read_rflags(void)
315 {
316 	u_long	rf;
317 
318 	__asm __volatile("pushfq; popq %0" : "=r" (rf));
319 	return (rf);
320 }
321 
322 static __inline uint64_t
323 rdmsr(u_int msr)
324 {
325 	uint32_t low, high;
326 
327 	__asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
328 	return (low | ((uint64_t)high << 32));
329 }
330 
331 static __inline uint32_t
332 rdmsr32(u_int msr)
333 {
334 	uint32_t low;
335 
336 	__asm __volatile("rdmsr" : "=a" (low) : "c" (msr) : "rdx");
337 	return (low);
338 }
339 
340 static __inline uint64_t
341 rdpmc(u_int pmc)
342 {
343 	uint32_t low, high;
344 
345 	__asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
346 	return (low | ((uint64_t)high << 32));
347 }
348 
349 static __inline uint64_t
350 rdtsc(void)
351 {
352 	uint32_t low, high;
353 
354 	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
355 	return (low | ((uint64_t)high << 32));
356 }
357 
358 static __inline uint64_t
359 rdtscp(void)
360 {
361 	uint32_t low, high;
362 
363 	__asm __volatile("rdtscp" : "=a" (low), "=d" (high) : : "ecx");
364 	return (low | ((uint64_t)high << 32));
365 }
366 
367 static __inline uint64_t
368 rdtscp_aux(uint32_t *aux)
369 {
370 	uint32_t low, high;
371 
372 	__asm __volatile("rdtscp" : "=a" (low), "=d" (high), "=c" (*aux));
373 	return (low | ((uint64_t)high << 32));
374 }
375 
376 static __inline uint32_t
377 rdtsc32(void)
378 {
379 	uint32_t rv;
380 
381 	__asm __volatile("rdtsc" : "=a" (rv) : : "edx");
382 	return (rv);
383 }
384 
385 static __inline uint32_t
386 rdtscp32(void)
387 {
388 	uint32_t rv;
389 
390 	__asm __volatile("rdtscp" : "=a" (rv) : : "ecx", "edx");
391 	return (rv);
392 }
393 
394 static __inline void
395 wbinvd(void)
396 {
397 	__asm __volatile("wbinvd");
398 }
399 
400 static __inline void
401 write_rflags(u_long rf)
402 {
403 	__asm __volatile("pushq %0;  popfq" : : "r" (rf));
404 }
405 
406 static __inline void
407 wrmsr(u_int msr, uint64_t newval)
408 {
409 	uint32_t low, high;
410 
411 	low = newval;
412 	high = newval >> 32;
413 	__asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
414 }
415 
416 static __inline void
417 load_cr0(u_long data)
418 {
419 
420 	__asm __volatile("movq %0,%%cr0" : : "r" (data));
421 }
422 
423 static __inline u_long
424 rcr0(void)
425 {
426 	u_long	data;
427 
428 	__asm __volatile("movq %%cr0,%0" : "=r" (data));
429 	return (data);
430 }
431 
432 static __inline u_long
433 rcr2(void)
434 {
435 	u_long	data;
436 
437 	__asm __volatile("movq %%cr2,%0" : "=r" (data));
438 	return (data);
439 }
440 
441 static __inline void
442 load_cr3(u_long data)
443 {
444 
445 	__asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
446 }
447 
448 static __inline u_long
449 rcr3(void)
450 {
451 	u_long	data;
452 
453 	__asm __volatile("movq %%cr3,%0" : "=r" (data));
454 	return (data);
455 }
456 
457 static __inline void
458 load_cr4(u_long data)
459 {
460 	__asm __volatile("movq %0,%%cr4" : : "r" (data));
461 }
462 
463 static __inline u_long
464 rcr4(void)
465 {
466 	u_long	data;
467 
468 	__asm __volatile("movq %%cr4,%0" : "=r" (data));
469 	return (data);
470 }
471 
472 static __inline u_long
473 rxcr(u_int reg)
474 {
475 	u_int low, high;
476 
477 	__asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (reg));
478 	return (low | ((uint64_t)high << 32));
479 }
480 
481 static __inline void
482 load_xcr(u_int reg, u_long val)
483 {
484 	u_int low, high;
485 
486 	low = val;
487 	high = val >> 32;
488 	__asm __volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high));
489 }
490 
491 /*
492  * Global TLB flush (except for thise for pages marked PG_G)
493  */
494 static __inline void
495 invltlb(void)
496 {
497 
498 	load_cr3(rcr3());
499 }
500 
501 #ifndef CR4_PGE
502 #define	CR4_PGE	0x00000080	/* Page global enable */
503 #endif
504 
505 /*
506  * Perform the guaranteed invalidation of all TLB entries.  This
507  * includes the global entries, and entries in all PCIDs, not only the
508  * current context.  The function works both on non-PCID CPUs and CPUs
509  * with the PCID turned off or on.  See IA-32 SDM Vol. 3a 4.10.4.1
510  * Operations that Invalidate TLBs and Paging-Structure Caches.
511  */
512 static __inline void
513 invltlb_glob(void)
514 {
515 	uint64_t cr4;
516 
517 	cr4 = rcr4();
518 	load_cr4(cr4 & ~CR4_PGE);
519 	/*
520 	 * Although preemption at this point could be detrimental to
521 	 * performance, it would not lead to an error.  PG_G is simply
522 	 * ignored if CR4.PGE is clear.  Moreover, in case this block
523 	 * is re-entered, the load_cr4() either above or below will
524 	 * modify CR4.PGE flushing the TLB.
525 	 */
526 	load_cr4(cr4 | CR4_PGE);
527 }
528 
529 /*
530  * TLB flush for an individual page (even if it has PG_G).
531  * Only works on 486+ CPUs (i386 does not have PG_G).
532  */
533 static __inline void
534 invlpg(u_long addr)
535 {
536 
537 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
538 }
539 
540 #define	INVPCID_ADDR	0
541 #define	INVPCID_CTX	1
542 #define	INVPCID_CTXGLOB	2
543 #define	INVPCID_ALLCTX	3
544 
545 struct invpcid_descr {
546 	uint64_t	pcid:12 __packed;
547 	uint64_t	pad:52 __packed;
548 	uint64_t	addr;
549 } __packed;
550 
551 static __inline void
552 invpcid(struct invpcid_descr *d, int type)
553 {
554 
555 	__asm __volatile("invpcid (%0),%1"
556 	    : : "r" (d), "r" ((u_long)type) : "memory");
557 }
558 
559 static __inline u_short
560 rfs(void)
561 {
562 	u_short sel;
563 	__asm __volatile("movw %%fs,%0" : "=rm" (sel));
564 	return (sel);
565 }
566 
567 static __inline u_short
568 rgs(void)
569 {
570 	u_short sel;
571 	__asm __volatile("movw %%gs,%0" : "=rm" (sel));
572 	return (sel);
573 }
574 
575 static __inline u_short
576 rss(void)
577 {
578 	u_short sel;
579 	__asm __volatile("movw %%ss,%0" : "=rm" (sel));
580 	return (sel);
581 }
582 
583 static __inline void
584 load_ds(u_short sel)
585 {
586 	__asm __volatile("movw %0,%%ds" : : "rm" (sel));
587 }
588 
589 static __inline void
590 load_es(u_short sel)
591 {
592 	__asm __volatile("movw %0,%%es" : : "rm" (sel));
593 }
594 
595 static __inline void
596 cpu_monitor(const void *addr, u_long extensions, u_int hints)
597 {
598 
599 	__asm __volatile("monitor"
600 	    : : "a" (addr), "c" (extensions), "d" (hints));
601 }
602 
603 static __inline void
604 cpu_mwait(u_long extensions, u_int hints)
605 {
606 
607 	__asm __volatile("mwait" : : "a" (hints), "c" (extensions));
608 }
609 
610 static __inline uint32_t
611 rdpkru(void)
612 {
613 	uint32_t res;
614 
615 	__asm __volatile("rdpkru" :  "=a" (res) : "c" (0) : "edx");
616 	return (res);
617 }
618 
619 static __inline void
620 wrpkru(uint32_t mask)
621 {
622 
623 	__asm __volatile("wrpkru" :  : "a" (mask),  "c" (0), "d" (0));
624 }
625 
626 #ifdef _KERNEL
627 /* This is defined in <machine/specialreg.h> but is too painful to get to */
628 #ifndef	MSR_FSBASE
629 #define	MSR_FSBASE	0xc0000100
630 #endif
631 static __inline void
632 load_fs(u_short sel)
633 {
634 	/* Preserve the fsbase value across the selector load */
635 	__asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
636 	    : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
637 }
638 
639 #ifndef	MSR_GSBASE
640 #define	MSR_GSBASE	0xc0000101
641 #endif
642 static __inline void
643 load_gs(u_short sel)
644 {
645 	/*
646 	 * Preserve the gsbase value across the selector load.
647 	 * Note that we have to disable interrupts because the gsbase
648 	 * being trashed happens to be the kernel gsbase at the time.
649 	 */
650 	__asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
651 	    : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
652 }
653 #else
654 /* Usable by userland */
655 static __inline void
656 load_fs(u_short sel)
657 {
658 	__asm __volatile("movw %0,%%fs" : : "rm" (sel));
659 }
660 
661 static __inline void
662 load_gs(u_short sel)
663 {
664 	__asm __volatile("movw %0,%%gs" : : "rm" (sel));
665 }
666 #endif
667 
668 static __inline uint64_t
669 rdfsbase(void)
670 {
671 	uint64_t x;
672 
673 	__asm __volatile("rdfsbase %0" : "=r" (x));
674 	return (x);
675 }
676 
677 static __inline void
678 wrfsbase(uint64_t x)
679 {
680 
681 	__asm __volatile("wrfsbase %0" : : "r" (x));
682 }
683 
684 static __inline uint64_t
685 rdgsbase(void)
686 {
687 	uint64_t x;
688 
689 	__asm __volatile("rdgsbase %0" : "=r" (x));
690 	return (x);
691 }
692 
693 static __inline void
694 wrgsbase(uint64_t x)
695 {
696 
697 	__asm __volatile("wrgsbase %0" : : "r" (x));
698 }
699 
700 static __inline void
701 bare_lgdt(struct region_descriptor *addr)
702 {
703 	__asm __volatile("lgdt (%0)" : : "r" (addr));
704 }
705 
706 static __inline void
707 sgdt(struct region_descriptor *addr)
708 {
709 	char *loc;
710 
711 	loc = (char *)addr;
712 	__asm __volatile("sgdt %0" : "=m" (*loc) : : "memory");
713 }
714 
715 static __inline void
716 lidt(struct region_descriptor *addr)
717 {
718 	__asm __volatile("lidt (%0)" : : "r" (addr));
719 }
720 
721 static __inline void
722 sidt(struct region_descriptor *addr)
723 {
724 	char *loc;
725 
726 	loc = (char *)addr;
727 	__asm __volatile("sidt %0" : "=m" (*loc) : : "memory");
728 }
729 
730 static __inline void
731 lldt(u_short sel)
732 {
733 	__asm __volatile("lldt %0" : : "r" (sel));
734 }
735 
736 static __inline u_short
737 sldt(void)
738 {
739 	u_short sel;
740 
741 	__asm __volatile("sldt %0" : "=r" (sel));
742 	return (sel);
743 }
744 
745 static __inline void
746 ltr(u_short sel)
747 {
748 	__asm __volatile("ltr %0" : : "r" (sel));
749 }
750 
751 static __inline uint32_t
752 read_tr(void)
753 {
754 	u_short sel;
755 
756 	__asm __volatile("str %0" : "=r" (sel));
757 	return (sel);
758 }
759 
760 static __inline uint64_t
761 rdr0(void)
762 {
763 	uint64_t data;
764 	__asm __volatile("movq %%dr0,%0" : "=r" (data));
765 	return (data);
766 }
767 
768 static __inline void
769 load_dr0(uint64_t dr0)
770 {
771 	__asm __volatile("movq %0,%%dr0" : : "r" (dr0));
772 }
773 
774 static __inline uint64_t
775 rdr1(void)
776 {
777 	uint64_t data;
778 	__asm __volatile("movq %%dr1,%0" : "=r" (data));
779 	return (data);
780 }
781 
782 static __inline void
783 load_dr1(uint64_t dr1)
784 {
785 	__asm __volatile("movq %0,%%dr1" : : "r" (dr1));
786 }
787 
788 static __inline uint64_t
789 rdr2(void)
790 {
791 	uint64_t data;
792 	__asm __volatile("movq %%dr2,%0" : "=r" (data));
793 	return (data);
794 }
795 
796 static __inline void
797 load_dr2(uint64_t dr2)
798 {
799 	__asm __volatile("movq %0,%%dr2" : : "r" (dr2));
800 }
801 
802 static __inline uint64_t
803 rdr3(void)
804 {
805 	uint64_t data;
806 	__asm __volatile("movq %%dr3,%0" : "=r" (data));
807 	return (data);
808 }
809 
810 static __inline void
811 load_dr3(uint64_t dr3)
812 {
813 	__asm __volatile("movq %0,%%dr3" : : "r" (dr3));
814 }
815 
816 static __inline uint64_t
817 rdr6(void)
818 {
819 	uint64_t data;
820 	__asm __volatile("movq %%dr6,%0" : "=r" (data));
821 	return (data);
822 }
823 
824 static __inline void
825 load_dr6(uint64_t dr6)
826 {
827 	__asm __volatile("movq %0,%%dr6" : : "r" (dr6));
828 }
829 
830 static __inline uint64_t
831 rdr7(void)
832 {
833 	uint64_t data;
834 	__asm __volatile("movq %%dr7,%0" : "=r" (data));
835 	return (data);
836 }
837 
838 static __inline void
839 load_dr7(uint64_t dr7)
840 {
841 	__asm __volatile("movq %0,%%dr7" : : "r" (dr7));
842 }
843 
844 static __inline register_t
845 intr_disable(void)
846 {
847 	register_t rflags;
848 
849 	rflags = read_rflags();
850 	disable_intr();
851 	return (rflags);
852 }
853 
854 static __inline void
855 intr_restore(register_t rflags)
856 {
857 	write_rflags(rflags);
858 }
859 
860 static __inline void
861 stac(void)
862 {
863 
864 	__asm __volatile("stac" : : : "cc");
865 }
866 
867 static __inline void
868 clac(void)
869 {
870 
871 	__asm __volatile("clac" : : : "cc");
872 }
873 
874 enum {
875 	SGX_ECREATE	= 0x0,
876 	SGX_EADD	= 0x1,
877 	SGX_EINIT	= 0x2,
878 	SGX_EREMOVE	= 0x3,
879 	SGX_EDGBRD	= 0x4,
880 	SGX_EDGBWR	= 0x5,
881 	SGX_EEXTEND	= 0x6,
882 	SGX_ELDU	= 0x8,
883 	SGX_EBLOCK	= 0x9,
884 	SGX_EPA		= 0xA,
885 	SGX_EWB		= 0xB,
886 	SGX_ETRACK	= 0xC,
887 };
888 
889 enum {
890 	SGX_PT_SECS = 0x00,
891 	SGX_PT_TCS  = 0x01,
892 	SGX_PT_REG  = 0x02,
893 	SGX_PT_VA   = 0x03,
894 	SGX_PT_TRIM = 0x04,
895 };
896 
897 int sgx_encls(uint32_t eax, uint64_t rbx, uint64_t rcx, uint64_t rdx);
898 
899 static __inline int
900 sgx_ecreate(void *pginfo, void *secs)
901 {
902 
903 	return (sgx_encls(SGX_ECREATE, (uint64_t)pginfo,
904 	    (uint64_t)secs, 0));
905 }
906 
907 static __inline int
908 sgx_eadd(void *pginfo, void *epc)
909 {
910 
911 	return (sgx_encls(SGX_EADD, (uint64_t)pginfo,
912 	    (uint64_t)epc, 0));
913 }
914 
915 static __inline int
916 sgx_einit(void *sigstruct, void *secs, void *einittoken)
917 {
918 
919 	return (sgx_encls(SGX_EINIT, (uint64_t)sigstruct,
920 	    (uint64_t)secs, (uint64_t)einittoken));
921 }
922 
923 static __inline int
924 sgx_eextend(void *secs, void *epc)
925 {
926 
927 	return (sgx_encls(SGX_EEXTEND, (uint64_t)secs,
928 	    (uint64_t)epc, 0));
929 }
930 
931 static __inline int
932 sgx_epa(void *epc)
933 {
934 
935 	return (sgx_encls(SGX_EPA, SGX_PT_VA, (uint64_t)epc, 0));
936 }
937 
938 static __inline int
939 sgx_eldu(uint64_t rbx, uint64_t rcx,
940     uint64_t rdx)
941 {
942 
943 	return (sgx_encls(SGX_ELDU, rbx, rcx, rdx));
944 }
945 
946 static __inline int
947 sgx_eremove(void *epc)
948 {
949 
950 	return (sgx_encls(SGX_EREMOVE, 0, (uint64_t)epc, 0));
951 }
952 
953 #else /* !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */
954 
955 int	breakpoint(void);
956 u_int	bsfl(u_int mask);
957 u_int	bsrl(u_int mask);
958 void	clflush(u_long addr);
959 void	clts(void);
960 void	cpuid_count(u_int ax, u_int cx, u_int *p);
961 void	disable_intr(void);
962 void	do_cpuid(u_int ax, u_int *p);
963 void	enable_intr(void);
964 void	halt(void);
965 void	ia32_pause(void);
966 u_char	inb(u_int port);
967 u_int	inl(u_int port);
968 void	insb(u_int port, void *addr, size_t count);
969 void	insl(u_int port, void *addr, size_t count);
970 void	insw(u_int port, void *addr, size_t count);
971 register_t	intr_disable(void);
972 void	intr_restore(register_t rf);
973 void	invd(void);
974 void	invlpg(u_int addr);
975 void	invltlb(void);
976 u_short	inw(u_int port);
977 void	lidt(struct region_descriptor *addr);
978 void	lldt(u_short sel);
979 void	load_cr0(u_long cr0);
980 void	load_cr3(u_long cr3);
981 void	load_cr4(u_long cr4);
982 void	load_dr0(uint64_t dr0);
983 void	load_dr1(uint64_t dr1);
984 void	load_dr2(uint64_t dr2);
985 void	load_dr3(uint64_t dr3);
986 void	load_dr6(uint64_t dr6);
987 void	load_dr7(uint64_t dr7);
988 void	load_fs(u_short sel);
989 void	load_gs(u_short sel);
990 void	ltr(u_short sel);
991 void	outb(u_int port, u_char data);
992 void	outl(u_int port, u_int data);
993 void	outsb(u_int port, const void *addr, size_t count);
994 void	outsl(u_int port, const void *addr, size_t count);
995 void	outsw(u_int port, const void *addr, size_t count);
996 void	outw(u_int port, u_short data);
997 u_long	rcr0(void);
998 u_long	rcr2(void);
999 u_long	rcr3(void);
1000 u_long	rcr4(void);
1001 uint64_t rdmsr(u_int msr);
1002 uint32_t rdmsr32(u_int msr);
1003 uint64_t rdpmc(u_int pmc);
1004 uint64_t rdr0(void);
1005 uint64_t rdr1(void);
1006 uint64_t rdr2(void);
1007 uint64_t rdr3(void);
1008 uint64_t rdr6(void);
1009 uint64_t rdr7(void);
1010 uint64_t rdtsc(void);
1011 u_long	read_rflags(void);
1012 u_int	rfs(void);
1013 u_int	rgs(void);
1014 void	wbinvd(void);
1015 void	write_rflags(u_int rf);
1016 void	wrmsr(u_int msr, uint64_t newval);
1017 
1018 #endif	/* __GNUCLIKE_ASM && __CC_SUPPORTS___INLINE */
1019 
1020 void	reset_dbregs(void);
1021 
1022 #ifdef _KERNEL
1023 int	rdmsr_safe(u_int msr, uint64_t *val);
1024 int	wrmsr_safe(u_int msr, uint64_t newval);
1025 #endif
1026 
1027 #endif /* !_MACHINE_CPUFUNC_H_ */
1028