xref: /freebsd/sys/amd64/include/cpufunc.h (revision 6be3386466ab79a84b48429ae66244f21526d3df)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2003 Peter Wemm.
5  * Copyright (c) 1993 The Regents of the University of California.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 
35 /*
36  * Functions to provide access to special i386 instructions.
37  * This in included in sys/systm.h, and that file should be
38  * used in preference to this.
39  */
40 
41 #ifndef _MACHINE_CPUFUNC_H_
42 #define	_MACHINE_CPUFUNC_H_
43 
44 #ifndef _SYS_CDEFS_H_
45 #error this file needs sys/cdefs.h as a prerequisite
46 #endif
47 
48 struct region_descriptor;
49 
50 #define readb(va)	(*(volatile uint8_t *) (va))
51 #define readw(va)	(*(volatile uint16_t *) (va))
52 #define readl(va)	(*(volatile uint32_t *) (va))
53 #define readq(va)	(*(volatile uint64_t *) (va))
54 
55 #define writeb(va, d)	(*(volatile uint8_t *) (va) = (d))
56 #define writew(va, d)	(*(volatile uint16_t *) (va) = (d))
57 #define writel(va, d)	(*(volatile uint32_t *) (va) = (d))
58 #define writeq(va, d)	(*(volatile uint64_t *) (va) = (d))
59 
60 #if defined(__GNUCLIKE_ASM) && defined(__CC_SUPPORTS___INLINE)
61 
62 static __inline void
63 breakpoint(void)
64 {
65 	__asm __volatile("int $3");
66 }
67 
68 #define	bsfl(mask)	__builtin_ctz(mask)
69 
70 #define	bsfq(mask)	__builtin_ctzl(mask)
71 
72 #define	bsrl(mask)	(__builtin_clz(mask) ^ 0x1f)
73 
74 #define	bsrq(mask)	(__builtin_clzl(mask) ^ 0x3f)
75 
76 static __inline void
77 clflush(u_long addr)
78 {
79 
80 	__asm __volatile("clflush %0" : : "m" (*(char *)addr));
81 }
82 
83 static __inline void
84 clflushopt(u_long addr)
85 {
86 
87 	__asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr));
88 }
89 
90 static __inline void
91 clwb(u_long addr)
92 {
93 
94 	__asm __volatile("clwb %0" : : "m" (*(char *)addr));
95 }
96 
97 static __inline void
98 clts(void)
99 {
100 
101 	__asm __volatile("clts");
102 }
103 
104 static __inline void
105 disable_intr(void)
106 {
107 	__asm __volatile("cli" : : : "memory");
108 }
109 
110 static __inline void
111 do_cpuid(u_int ax, u_int *p)
112 {
113 	__asm __volatile("cpuid"
114 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
115 			 :  "0" (ax));
116 }
117 
118 static __inline void
119 cpuid_count(u_int ax, u_int cx, u_int *p)
120 {
121 	__asm __volatile("cpuid"
122 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
123 			 :  "0" (ax), "c" (cx));
124 }
125 
126 static __inline void
127 enable_intr(void)
128 {
129 	__asm __volatile("sti");
130 }
131 
132 #ifdef _KERNEL
133 
134 #define	HAVE_INLINE_FFS
135 #define	ffs(x)		__builtin_ffs(x)
136 
137 #define	HAVE_INLINE_FFSL
138 #define	ffsl(x)		__builtin_ffsl(x)
139 
140 #define	HAVE_INLINE_FFSLL
141 #define	ffsll(x)	__builtin_ffsll(x)
142 
143 #define	HAVE_INLINE_FLS
144 
145 static __inline __pure2 int
146 fls(int mask)
147 {
148 	return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
149 }
150 
151 #define	HAVE_INLINE_FLSL
152 
153 static __inline __pure2 int
154 flsl(long mask)
155 {
156 	return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
157 }
158 
159 #define	HAVE_INLINE_FLSLL
160 
161 static __inline __pure2 int
162 flsll(long long mask)
163 {
164 	return (flsl((long)mask));
165 }
166 
167 #endif /* _KERNEL */
168 
169 static __inline void
170 halt(void)
171 {
172 	__asm __volatile("hlt");
173 }
174 
175 static __inline u_char
176 inb(u_int port)
177 {
178 	u_char	data;
179 
180 	__asm __volatile("inb %w1, %0" : "=a" (data) : "Nd" (port));
181 	return (data);
182 }
183 
184 static __inline u_int
185 inl(u_int port)
186 {
187 	u_int	data;
188 
189 	__asm __volatile("inl %w1, %0" : "=a" (data) : "Nd" (port));
190 	return (data);
191 }
192 
193 static __inline void
194 insb(u_int port, void *addr, size_t count)
195 {
196 	__asm __volatile("rep; insb"
197 			 : "+D" (addr), "+c" (count)
198 			 : "d" (port)
199 			 : "memory");
200 }
201 
202 static __inline void
203 insw(u_int port, void *addr, size_t count)
204 {
205 	__asm __volatile("rep; insw"
206 			 : "+D" (addr), "+c" (count)
207 			 : "d" (port)
208 			 : "memory");
209 }
210 
211 static __inline void
212 insl(u_int port, void *addr, size_t count)
213 {
214 	__asm __volatile("rep; insl"
215 			 : "+D" (addr), "+c" (count)
216 			 : "d" (port)
217 			 : "memory");
218 }
219 
220 static __inline void
221 invd(void)
222 {
223 	__asm __volatile("invd");
224 }
225 
226 static __inline u_short
227 inw(u_int port)
228 {
229 	u_short	data;
230 
231 	__asm __volatile("inw %w1, %0" : "=a" (data) : "Nd" (port));
232 	return (data);
233 }
234 
235 static __inline void
236 outb(u_int port, u_char data)
237 {
238 	__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
239 }
240 
241 static __inline void
242 outl(u_int port, u_int data)
243 {
244 	__asm __volatile("outl %0, %w1" : : "a" (data), "Nd" (port));
245 }
246 
247 static __inline void
248 outsb(u_int port, const void *addr, size_t count)
249 {
250 	__asm __volatile("rep; outsb"
251 			 : "+S" (addr), "+c" (count)
252 			 : "d" (port));
253 }
254 
255 static __inline void
256 outsw(u_int port, const void *addr, size_t count)
257 {
258 	__asm __volatile("rep; outsw"
259 			 : "+S" (addr), "+c" (count)
260 			 : "d" (port));
261 }
262 
263 static __inline void
264 outsl(u_int port, const void *addr, size_t count)
265 {
266 	__asm __volatile("rep; outsl"
267 			 : "+S" (addr), "+c" (count)
268 			 : "d" (port));
269 }
270 
271 static __inline void
272 outw(u_int port, u_short data)
273 {
274 	__asm __volatile("outw %0, %w1" : : "a" (data), "Nd" (port));
275 }
276 
277 static __inline u_long
278 popcntq(u_long mask)
279 {
280 	u_long result;
281 
282 	__asm __volatile("popcntq %1,%0" : "=r" (result) : "rm" (mask));
283 	return (result);
284 }
285 
286 static __inline void
287 lfence(void)
288 {
289 
290 	__asm __volatile("lfence" : : : "memory");
291 }
292 
293 static __inline void
294 mfence(void)
295 {
296 
297 	__asm __volatile("mfence" : : : "memory");
298 }
299 
300 static __inline void
301 sfence(void)
302 {
303 
304 	__asm __volatile("sfence" : : : "memory");
305 }
306 
307 static __inline void
308 ia32_pause(void)
309 {
310 	__asm __volatile("pause");
311 }
312 
313 static __inline u_long
314 read_rflags(void)
315 {
316 	u_long	rf;
317 
318 	__asm __volatile("pushfq; popq %0" : "=r" (rf));
319 	return (rf);
320 }
321 
322 static __inline uint64_t
323 rdmsr(u_int msr)
324 {
325 	uint32_t low, high;
326 
327 	__asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
328 	return (low | ((uint64_t)high << 32));
329 }
330 
331 static __inline uint32_t
332 rdmsr32(u_int msr)
333 {
334 	uint32_t low;
335 
336 	__asm __volatile("rdmsr" : "=a" (low) : "c" (msr) : "rdx");
337 	return (low);
338 }
339 
340 static __inline uint64_t
341 rdpmc(u_int pmc)
342 {
343 	uint32_t low, high;
344 
345 	__asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
346 	return (low | ((uint64_t)high << 32));
347 }
348 
349 static __inline uint64_t
350 rdtsc(void)
351 {
352 	uint32_t low, high;
353 
354 	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
355 	return (low | ((uint64_t)high << 32));
356 }
357 
358 static __inline uint64_t
359 rdtscp(void)
360 {
361 	uint32_t low, high;
362 
363 	__asm __volatile("rdtscp" : "=a" (low), "=d" (high) : : "ecx");
364 	return (low | ((uint64_t)high << 32));
365 }
366 
367 static __inline uint32_t
368 rdtsc32(void)
369 {
370 	uint32_t rv;
371 
372 	__asm __volatile("rdtsc" : "=a" (rv) : : "edx");
373 	return (rv);
374 }
375 
376 static __inline uint32_t
377 rdtscp32(void)
378 {
379 	uint32_t rv;
380 
381 	__asm __volatile("rdtscp" : "=a" (rv) : : "ecx", "edx");
382 	return (rv);
383 }
384 
385 static __inline void
386 wbinvd(void)
387 {
388 	__asm __volatile("wbinvd");
389 }
390 
391 static __inline void
392 write_rflags(u_long rf)
393 {
394 	__asm __volatile("pushq %0;  popfq" : : "r" (rf));
395 }
396 
397 static __inline void
398 wrmsr(u_int msr, uint64_t newval)
399 {
400 	uint32_t low, high;
401 
402 	low = newval;
403 	high = newval >> 32;
404 	__asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
405 }
406 
407 static __inline void
408 load_cr0(u_long data)
409 {
410 
411 	__asm __volatile("movq %0,%%cr0" : : "r" (data));
412 }
413 
414 static __inline u_long
415 rcr0(void)
416 {
417 	u_long	data;
418 
419 	__asm __volatile("movq %%cr0,%0" : "=r" (data));
420 	return (data);
421 }
422 
423 static __inline u_long
424 rcr2(void)
425 {
426 	u_long	data;
427 
428 	__asm __volatile("movq %%cr2,%0" : "=r" (data));
429 	return (data);
430 }
431 
432 static __inline void
433 load_cr3(u_long data)
434 {
435 
436 	__asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
437 }
438 
439 static __inline u_long
440 rcr3(void)
441 {
442 	u_long	data;
443 
444 	__asm __volatile("movq %%cr3,%0" : "=r" (data));
445 	return (data);
446 }
447 
448 static __inline void
449 load_cr4(u_long data)
450 {
451 	__asm __volatile("movq %0,%%cr4" : : "r" (data));
452 }
453 
454 static __inline u_long
455 rcr4(void)
456 {
457 	u_long	data;
458 
459 	__asm __volatile("movq %%cr4,%0" : "=r" (data));
460 	return (data);
461 }
462 
463 static __inline u_long
464 rxcr(u_int reg)
465 {
466 	u_int low, high;
467 
468 	__asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (reg));
469 	return (low | ((uint64_t)high << 32));
470 }
471 
472 static __inline void
473 load_xcr(u_int reg, u_long val)
474 {
475 	u_int low, high;
476 
477 	low = val;
478 	high = val >> 32;
479 	__asm __volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high));
480 }
481 
482 /*
483  * Global TLB flush (except for thise for pages marked PG_G)
484  */
485 static __inline void
486 invltlb(void)
487 {
488 
489 	load_cr3(rcr3());
490 }
491 
492 #ifndef CR4_PGE
493 #define	CR4_PGE	0x00000080	/* Page global enable */
494 #endif
495 
496 /*
497  * Perform the guaranteed invalidation of all TLB entries.  This
498  * includes the global entries, and entries in all PCIDs, not only the
499  * current context.  The function works both on non-PCID CPUs and CPUs
500  * with the PCID turned off or on.  See IA-32 SDM Vol. 3a 4.10.4.1
501  * Operations that Invalidate TLBs and Paging-Structure Caches.
502  */
503 static __inline void
504 invltlb_glob(void)
505 {
506 	uint64_t cr4;
507 
508 	cr4 = rcr4();
509 	load_cr4(cr4 & ~CR4_PGE);
510 	/*
511 	 * Although preemption at this point could be detrimental to
512 	 * performance, it would not lead to an error.  PG_G is simply
513 	 * ignored if CR4.PGE is clear.  Moreover, in case this block
514 	 * is re-entered, the load_cr4() either above or below will
515 	 * modify CR4.PGE flushing the TLB.
516 	 */
517 	load_cr4(cr4 | CR4_PGE);
518 }
519 
520 /*
521  * TLB flush for an individual page (even if it has PG_G).
522  * Only works on 486+ CPUs (i386 does not have PG_G).
523  */
524 static __inline void
525 invlpg(u_long addr)
526 {
527 
528 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
529 }
530 
531 #define	INVPCID_ADDR	0
532 #define	INVPCID_CTX	1
533 #define	INVPCID_CTXGLOB	2
534 #define	INVPCID_ALLCTX	3
535 
536 struct invpcid_descr {
537 	uint64_t	pcid:12 __packed;
538 	uint64_t	pad:52 __packed;
539 	uint64_t	addr;
540 } __packed;
541 
542 static __inline void
543 invpcid(struct invpcid_descr *d, int type)
544 {
545 
546 	__asm __volatile("invpcid (%0),%1"
547 	    : : "r" (d), "r" ((u_long)type) : "memory");
548 }
549 
550 static __inline u_short
551 rfs(void)
552 {
553 	u_short sel;
554 	__asm __volatile("movw %%fs,%0" : "=rm" (sel));
555 	return (sel);
556 }
557 
558 static __inline u_short
559 rgs(void)
560 {
561 	u_short sel;
562 	__asm __volatile("movw %%gs,%0" : "=rm" (sel));
563 	return (sel);
564 }
565 
566 static __inline u_short
567 rss(void)
568 {
569 	u_short sel;
570 	__asm __volatile("movw %%ss,%0" : "=rm" (sel));
571 	return (sel);
572 }
573 
574 static __inline void
575 load_ds(u_short sel)
576 {
577 	__asm __volatile("movw %0,%%ds" : : "rm" (sel));
578 }
579 
580 static __inline void
581 load_es(u_short sel)
582 {
583 	__asm __volatile("movw %0,%%es" : : "rm" (sel));
584 }
585 
586 static __inline void
587 cpu_monitor(const void *addr, u_long extensions, u_int hints)
588 {
589 
590 	__asm __volatile("monitor"
591 	    : : "a" (addr), "c" (extensions), "d" (hints));
592 }
593 
594 static __inline void
595 cpu_mwait(u_long extensions, u_int hints)
596 {
597 
598 	__asm __volatile("mwait" : : "a" (hints), "c" (extensions));
599 }
600 
601 static __inline uint32_t
602 rdpkru(void)
603 {
604 	uint32_t res;
605 
606 	__asm __volatile("rdpkru" :  "=a" (res) : "c" (0) : "edx");
607 	return (res);
608 }
609 
610 static __inline void
611 wrpkru(uint32_t mask)
612 {
613 
614 	__asm __volatile("wrpkru" :  : "a" (mask),  "c" (0), "d" (0));
615 }
616 
617 #ifdef _KERNEL
618 /* This is defined in <machine/specialreg.h> but is too painful to get to */
619 #ifndef	MSR_FSBASE
620 #define	MSR_FSBASE	0xc0000100
621 #endif
622 static __inline void
623 load_fs(u_short sel)
624 {
625 	/* Preserve the fsbase value across the selector load */
626 	__asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
627 	    : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
628 }
629 
630 #ifndef	MSR_GSBASE
631 #define	MSR_GSBASE	0xc0000101
632 #endif
633 static __inline void
634 load_gs(u_short sel)
635 {
636 	/*
637 	 * Preserve the gsbase value across the selector load.
638 	 * Note that we have to disable interrupts because the gsbase
639 	 * being trashed happens to be the kernel gsbase at the time.
640 	 */
641 	__asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
642 	    : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
643 }
644 #else
645 /* Usable by userland */
646 static __inline void
647 load_fs(u_short sel)
648 {
649 	__asm __volatile("movw %0,%%fs" : : "rm" (sel));
650 }
651 
652 static __inline void
653 load_gs(u_short sel)
654 {
655 	__asm __volatile("movw %0,%%gs" : : "rm" (sel));
656 }
657 #endif
658 
659 static __inline uint64_t
660 rdfsbase(void)
661 {
662 	uint64_t x;
663 
664 	__asm __volatile("rdfsbase %0" : "=r" (x));
665 	return (x);
666 }
667 
668 static __inline void
669 wrfsbase(uint64_t x)
670 {
671 
672 	__asm __volatile("wrfsbase %0" : : "r" (x));
673 }
674 
675 static __inline uint64_t
676 rdgsbase(void)
677 {
678 	uint64_t x;
679 
680 	__asm __volatile("rdgsbase %0" : "=r" (x));
681 	return (x);
682 }
683 
684 static __inline void
685 wrgsbase(uint64_t x)
686 {
687 
688 	__asm __volatile("wrgsbase %0" : : "r" (x));
689 }
690 
691 static __inline void
692 bare_lgdt(struct region_descriptor *addr)
693 {
694 	__asm __volatile("lgdt (%0)" : : "r" (addr));
695 }
696 
697 static __inline void
698 sgdt(struct region_descriptor *addr)
699 {
700 	char *loc;
701 
702 	loc = (char *)addr;
703 	__asm __volatile("sgdt %0" : "=m" (*loc) : : "memory");
704 }
705 
706 static __inline void
707 lidt(struct region_descriptor *addr)
708 {
709 	__asm __volatile("lidt (%0)" : : "r" (addr));
710 }
711 
712 static __inline void
713 sidt(struct region_descriptor *addr)
714 {
715 	char *loc;
716 
717 	loc = (char *)addr;
718 	__asm __volatile("sidt %0" : "=m" (*loc) : : "memory");
719 }
720 
721 static __inline void
722 lldt(u_short sel)
723 {
724 	__asm __volatile("lldt %0" : : "r" (sel));
725 }
726 
727 static __inline u_short
728 sldt(void)
729 {
730 	u_short sel;
731 
732 	__asm __volatile("sldt %0" : "=r" (sel));
733 	return (sel);
734 }
735 
736 static __inline void
737 ltr(u_short sel)
738 {
739 	__asm __volatile("ltr %0" : : "r" (sel));
740 }
741 
742 static __inline uint32_t
743 read_tr(void)
744 {
745 	u_short sel;
746 
747 	__asm __volatile("str %0" : "=r" (sel));
748 	return (sel);
749 }
750 
751 static __inline uint64_t
752 rdr0(void)
753 {
754 	uint64_t data;
755 	__asm __volatile("movq %%dr0,%0" : "=r" (data));
756 	return (data);
757 }
758 
759 static __inline void
760 load_dr0(uint64_t dr0)
761 {
762 	__asm __volatile("movq %0,%%dr0" : : "r" (dr0));
763 }
764 
765 static __inline uint64_t
766 rdr1(void)
767 {
768 	uint64_t data;
769 	__asm __volatile("movq %%dr1,%0" : "=r" (data));
770 	return (data);
771 }
772 
773 static __inline void
774 load_dr1(uint64_t dr1)
775 {
776 	__asm __volatile("movq %0,%%dr1" : : "r" (dr1));
777 }
778 
779 static __inline uint64_t
780 rdr2(void)
781 {
782 	uint64_t data;
783 	__asm __volatile("movq %%dr2,%0" : "=r" (data));
784 	return (data);
785 }
786 
787 static __inline void
788 load_dr2(uint64_t dr2)
789 {
790 	__asm __volatile("movq %0,%%dr2" : : "r" (dr2));
791 }
792 
793 static __inline uint64_t
794 rdr3(void)
795 {
796 	uint64_t data;
797 	__asm __volatile("movq %%dr3,%0" : "=r" (data));
798 	return (data);
799 }
800 
801 static __inline void
802 load_dr3(uint64_t dr3)
803 {
804 	__asm __volatile("movq %0,%%dr3" : : "r" (dr3));
805 }
806 
807 static __inline uint64_t
808 rdr6(void)
809 {
810 	uint64_t data;
811 	__asm __volatile("movq %%dr6,%0" : "=r" (data));
812 	return (data);
813 }
814 
815 static __inline void
816 load_dr6(uint64_t dr6)
817 {
818 	__asm __volatile("movq %0,%%dr6" : : "r" (dr6));
819 }
820 
821 static __inline uint64_t
822 rdr7(void)
823 {
824 	uint64_t data;
825 	__asm __volatile("movq %%dr7,%0" : "=r" (data));
826 	return (data);
827 }
828 
829 static __inline void
830 load_dr7(uint64_t dr7)
831 {
832 	__asm __volatile("movq %0,%%dr7" : : "r" (dr7));
833 }
834 
835 static __inline register_t
836 intr_disable(void)
837 {
838 	register_t rflags;
839 
840 	rflags = read_rflags();
841 	disable_intr();
842 	return (rflags);
843 }
844 
845 static __inline void
846 intr_restore(register_t rflags)
847 {
848 	write_rflags(rflags);
849 }
850 
851 static __inline void
852 stac(void)
853 {
854 
855 	__asm __volatile("stac" : : : "cc");
856 }
857 
858 static __inline void
859 clac(void)
860 {
861 
862 	__asm __volatile("clac" : : : "cc");
863 }
864 
865 enum {
866 	SGX_ECREATE	= 0x0,
867 	SGX_EADD	= 0x1,
868 	SGX_EINIT	= 0x2,
869 	SGX_EREMOVE	= 0x3,
870 	SGX_EDGBRD	= 0x4,
871 	SGX_EDGBWR	= 0x5,
872 	SGX_EEXTEND	= 0x6,
873 	SGX_ELDU	= 0x8,
874 	SGX_EBLOCK	= 0x9,
875 	SGX_EPA		= 0xA,
876 	SGX_EWB		= 0xB,
877 	SGX_ETRACK	= 0xC,
878 };
879 
880 enum {
881 	SGX_PT_SECS = 0x00,
882 	SGX_PT_TCS  = 0x01,
883 	SGX_PT_REG  = 0x02,
884 	SGX_PT_VA   = 0x03,
885 	SGX_PT_TRIM = 0x04,
886 };
887 
888 int sgx_encls(uint32_t eax, uint64_t rbx, uint64_t rcx, uint64_t rdx);
889 
890 static __inline int
891 sgx_ecreate(void *pginfo, void *secs)
892 {
893 
894 	return (sgx_encls(SGX_ECREATE, (uint64_t)pginfo,
895 	    (uint64_t)secs, 0));
896 }
897 
898 static __inline int
899 sgx_eadd(void *pginfo, void *epc)
900 {
901 
902 	return (sgx_encls(SGX_EADD, (uint64_t)pginfo,
903 	    (uint64_t)epc, 0));
904 }
905 
906 static __inline int
907 sgx_einit(void *sigstruct, void *secs, void *einittoken)
908 {
909 
910 	return (sgx_encls(SGX_EINIT, (uint64_t)sigstruct,
911 	    (uint64_t)secs, (uint64_t)einittoken));
912 }
913 
914 static __inline int
915 sgx_eextend(void *secs, void *epc)
916 {
917 
918 	return (sgx_encls(SGX_EEXTEND, (uint64_t)secs,
919 	    (uint64_t)epc, 0));
920 }
921 
922 static __inline int
923 sgx_epa(void *epc)
924 {
925 
926 	return (sgx_encls(SGX_EPA, SGX_PT_VA, (uint64_t)epc, 0));
927 }
928 
929 static __inline int
930 sgx_eldu(uint64_t rbx, uint64_t rcx,
931     uint64_t rdx)
932 {
933 
934 	return (sgx_encls(SGX_ELDU, rbx, rcx, rdx));
935 }
936 
937 static __inline int
938 sgx_eremove(void *epc)
939 {
940 
941 	return (sgx_encls(SGX_EREMOVE, 0, (uint64_t)epc, 0));
942 }
943 
944 #else /* !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */
945 
946 int	breakpoint(void);
947 u_int	bsfl(u_int mask);
948 u_int	bsrl(u_int mask);
949 void	clflush(u_long addr);
950 void	clts(void);
951 void	cpuid_count(u_int ax, u_int cx, u_int *p);
952 void	disable_intr(void);
953 void	do_cpuid(u_int ax, u_int *p);
954 void	enable_intr(void);
955 void	halt(void);
956 void	ia32_pause(void);
957 u_char	inb(u_int port);
958 u_int	inl(u_int port);
959 void	insb(u_int port, void *addr, size_t count);
960 void	insl(u_int port, void *addr, size_t count);
961 void	insw(u_int port, void *addr, size_t count);
962 register_t	intr_disable(void);
963 void	intr_restore(register_t rf);
964 void	invd(void);
965 void	invlpg(u_int addr);
966 void	invltlb(void);
967 u_short	inw(u_int port);
968 void	lidt(struct region_descriptor *addr);
969 void	lldt(u_short sel);
970 void	load_cr0(u_long cr0);
971 void	load_cr3(u_long cr3);
972 void	load_cr4(u_long cr4);
973 void	load_dr0(uint64_t dr0);
974 void	load_dr1(uint64_t dr1);
975 void	load_dr2(uint64_t dr2);
976 void	load_dr3(uint64_t dr3);
977 void	load_dr6(uint64_t dr6);
978 void	load_dr7(uint64_t dr7);
979 void	load_fs(u_short sel);
980 void	load_gs(u_short sel);
981 void	ltr(u_short sel);
982 void	outb(u_int port, u_char data);
983 void	outl(u_int port, u_int data);
984 void	outsb(u_int port, const void *addr, size_t count);
985 void	outsl(u_int port, const void *addr, size_t count);
986 void	outsw(u_int port, const void *addr, size_t count);
987 void	outw(u_int port, u_short data);
988 u_long	rcr0(void);
989 u_long	rcr2(void);
990 u_long	rcr3(void);
991 u_long	rcr4(void);
992 uint64_t rdmsr(u_int msr);
993 uint32_t rdmsr32(u_int msr);
994 uint64_t rdpmc(u_int pmc);
995 uint64_t rdr0(void);
996 uint64_t rdr1(void);
997 uint64_t rdr2(void);
998 uint64_t rdr3(void);
999 uint64_t rdr6(void);
1000 uint64_t rdr7(void);
1001 uint64_t rdtsc(void);
1002 u_long	read_rflags(void);
1003 u_int	rfs(void);
1004 u_int	rgs(void);
1005 void	wbinvd(void);
1006 void	write_rflags(u_int rf);
1007 void	wrmsr(u_int msr, uint64_t newval);
1008 
1009 #endif	/* __GNUCLIKE_ASM && __CC_SUPPORTS___INLINE */
1010 
1011 void	reset_dbregs(void);
1012 
1013 #ifdef _KERNEL
1014 int	rdmsr_safe(u_int msr, uint64_t *val);
1015 int	wrmsr_safe(u_int msr, uint64_t newval);
1016 #endif
1017 
1018 #endif /* !_MACHINE_CPUFUNC_H_ */
1019