xref: /freebsd/sys/amd64/include/cpufunc.h (revision ba3c1f5972d7b90feb6e6da47905ff2757e0fe57)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2003 Peter Wemm.
5  * Copyright (c) 1993 The Regents of the University of California.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 
35 /*
36  * Functions to provide access to special i386 instructions.
37  * This in included in sys/systm.h, and that file should be
38  * used in preference to this.
39  */
40 
41 #ifdef __i386__
42 #include <i386/cpufunc.h>
43 #else /* !__i386__ */
44 
45 #ifndef _MACHINE_CPUFUNC_H_
46 #define	_MACHINE_CPUFUNC_H_
47 
48 struct region_descriptor;
49 
50 #define readb(va)	(*(volatile uint8_t *) (va))
51 #define readw(va)	(*(volatile uint16_t *) (va))
52 #define readl(va)	(*(volatile uint32_t *) (va))
53 #define readq(va)	(*(volatile uint64_t *) (va))
54 
55 #define writeb(va, d)	(*(volatile uint8_t *) (va) = (d))
56 #define writew(va, d)	(*(volatile uint16_t *) (va) = (d))
57 #define writel(va, d)	(*(volatile uint32_t *) (va) = (d))
58 #define writeq(va, d)	(*(volatile uint64_t *) (va) = (d))
59 
60 static __inline void
61 breakpoint(void)
62 {
63 	__asm __volatile("int $3");
64 }
65 
66 #define	bsfl(mask)	__builtin_ctz(mask)
67 
68 #define	bsfq(mask)	__builtin_ctzl(mask)
69 
70 #define	bsrl(mask)	(__builtin_clz(mask) ^ 0x1f)
71 
72 #define	bsrq(mask)	(__builtin_clzl(mask) ^ 0x3f)
73 
74 static __inline void
75 clflush(u_long addr)
76 {
77 
78 	__asm __volatile("clflush %0" : : "m" (*(char *)addr));
79 }
80 
81 static __inline void
82 clflushopt(u_long addr)
83 {
84 
85 	__asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr));
86 }
87 
88 static __inline void
89 clwb(u_long addr)
90 {
91 
92 	__asm __volatile("clwb %0" : : "m" (*(char *)addr));
93 }
94 
95 static __inline void
96 clts(void)
97 {
98 
99 	__asm __volatile("clts");
100 }
101 
102 static __inline void
103 disable_intr(void)
104 {
105 	__asm __volatile("cli" : : : "memory");
106 }
107 
108 static __inline void
109 do_cpuid(u_int ax, u_int *p)
110 {
111 	__asm __volatile("cpuid"
112 	    : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
113 	    :  "0" (ax));
114 }
115 
116 static __inline void
117 cpuid_count(u_int ax, u_int cx, u_int *p)
118 {
119 	__asm __volatile("cpuid"
120 	    : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
121 	    :  "0" (ax), "c" (cx));
122 }
123 
124 static __inline void
125 enable_intr(void)
126 {
127 	__asm __volatile("sti");
128 }
129 
130 static __inline void
131 halt(void)
132 {
133 	__asm __volatile("hlt");
134 }
135 
136 static __inline u_char
137 inb(u_int port)
138 {
139 	u_char	data;
140 
141 	__asm __volatile("inb %w1, %0" : "=a" (data) : "Nd" (port));
142 	return (data);
143 }
144 
145 static __inline u_int
146 inl(u_int port)
147 {
148 	u_int	data;
149 
150 	__asm __volatile("inl %w1, %0" : "=a" (data) : "Nd" (port));
151 	return (data);
152 }
153 
154 static __inline void
155 insb(u_int port, void *addr, size_t count)
156 {
157 	__asm __volatile("rep; insb"
158 			 : "+D" (addr), "+c" (count)
159 			 : "d" (port)
160 			 : "memory");
161 }
162 
163 static __inline void
164 insw(u_int port, void *addr, size_t count)
165 {
166 	__asm __volatile("rep; insw"
167 			 : "+D" (addr), "+c" (count)
168 			 : "d" (port)
169 			 : "memory");
170 }
171 
172 static __inline void
173 insl(u_int port, void *addr, size_t count)
174 {
175 	__asm __volatile("rep; insl"
176 			 : "+D" (addr), "+c" (count)
177 			 : "d" (port)
178 			 : "memory");
179 }
180 
181 static __inline void
182 invd(void)
183 {
184 	__asm __volatile("invd");
185 }
186 
187 static __inline u_short
188 inw(u_int port)
189 {
190 	u_short	data;
191 
192 	__asm __volatile("inw %w1, %0" : "=a" (data) : "Nd" (port));
193 	return (data);
194 }
195 
196 static __inline void
197 outb(u_int port, u_char data)
198 {
199 	__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
200 }
201 
202 static __inline void
203 outl(u_int port, u_int data)
204 {
205 	__asm __volatile("outl %0, %w1" : : "a" (data), "Nd" (port));
206 }
207 
208 static __inline void
209 outsb(u_int port, const void *addr, size_t count)
210 {
211 	__asm __volatile("rep; outsb"
212 			 : "+S" (addr), "+c" (count)
213 			 : "d" (port));
214 }
215 
216 static __inline void
217 outsw(u_int port, const void *addr, size_t count)
218 {
219 	__asm __volatile("rep; outsw"
220 			 : "+S" (addr), "+c" (count)
221 			 : "d" (port));
222 }
223 
224 static __inline void
225 outsl(u_int port, const void *addr, size_t count)
226 {
227 	__asm __volatile("rep; outsl"
228 			 : "+S" (addr), "+c" (count)
229 			 : "d" (port));
230 }
231 
232 static __inline void
233 outw(u_int port, u_short data)
234 {
235 	__asm __volatile("outw %0, %w1" : : "a" (data), "Nd" (port));
236 }
237 
238 static __inline u_long
239 popcntq(u_long mask)
240 {
241 	u_long result;
242 
243 	__asm __volatile("popcntq %1,%0" : "=r" (result) : "rm" (mask));
244 	return (result);
245 }
246 
247 static __inline void
248 lfence(void)
249 {
250 
251 	__asm __volatile("lfence" : : : "memory");
252 }
253 
254 static __inline void
255 mfence(void)
256 {
257 
258 	__asm __volatile("mfence" : : : "memory");
259 }
260 
261 static __inline void
262 sfence(void)
263 {
264 
265 	__asm __volatile("sfence" : : : "memory");
266 }
267 
268 static __inline void
269 ia32_pause(void)
270 {
271 	__asm __volatile("pause");
272 }
273 
274 static __inline u_long
275 read_rflags(void)
276 {
277 	u_long	rf;
278 
279 	__asm __volatile("pushfq; popq %0" : "=r" (rf));
280 	return (rf);
281 }
282 
283 static __inline uint64_t
284 rdmsr(u_int msr)
285 {
286 	uint32_t low, high;
287 
288 	__asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
289 	return (low | ((uint64_t)high << 32));
290 }
291 
292 static __inline uint32_t
293 rdmsr32(u_int msr)
294 {
295 	uint32_t low;
296 
297 	__asm __volatile("rdmsr" : "=a" (low) : "c" (msr) : "rdx");
298 	return (low);
299 }
300 
301 static __inline uint64_t
302 rdpmc(u_int pmc)
303 {
304 	uint32_t low, high;
305 
306 	__asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
307 	return (low | ((uint64_t)high << 32));
308 }
309 
310 static __inline uint64_t
311 rdtsc(void)
312 {
313 	uint32_t low, high;
314 
315 	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
316 	return (low | ((uint64_t)high << 32));
317 }
318 
319 static __inline uint64_t
320 rdtsc_ordered_lfence(void)
321 {
322 	lfence();
323 	return (rdtsc());
324 }
325 
326 static __inline uint64_t
327 rdtsc_ordered_mfence(void)
328 {
329 	mfence();
330 	return (rdtsc());
331 }
332 
333 static __inline uint64_t
334 rdtscp(void)
335 {
336 	uint32_t low, high;
337 
338 	__asm __volatile("rdtscp" : "=a" (low), "=d" (high) : : "ecx");
339 	return (low | ((uint64_t)high << 32));
340 }
341 
342 static __inline uint64_t
343 rdtscp_aux(uint32_t *aux)
344 {
345 	uint32_t low, high;
346 
347 	__asm __volatile("rdtscp" : "=a" (low), "=d" (high), "=c" (*aux));
348 	return (low | ((uint64_t)high << 32));
349 }
350 
351 static __inline uint32_t
352 rdtsc32(void)
353 {
354 	uint32_t rv;
355 
356 	__asm __volatile("rdtsc" : "=a" (rv) : : "edx");
357 	return (rv);
358 }
359 
360 static __inline uint32_t
361 rdtscp32(void)
362 {
363 	uint32_t rv;
364 
365 	__asm __volatile("rdtscp" : "=a" (rv) : : "ecx", "edx");
366 	return (rv);
367 }
368 
369 static __inline void
370 wbinvd(void)
371 {
372 	__asm __volatile("wbinvd");
373 }
374 
375 static __inline void
376 write_rflags(u_long rf)
377 {
378 	__asm __volatile("pushq %0;  popfq" : : "r" (rf));
379 }
380 
381 static __inline void
382 wrmsr(u_int msr, uint64_t newval)
383 {
384 	uint32_t low, high;
385 
386 	low = newval;
387 	high = newval >> 32;
388 	__asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
389 }
390 
391 static __inline void
392 load_cr0(u_long data)
393 {
394 
395 	__asm __volatile("movq %0,%%cr0" : : "r" (data));
396 }
397 
398 static __inline u_long
399 rcr0(void)
400 {
401 	u_long	data;
402 
403 	__asm __volatile("movq %%cr0,%0" : "=r" (data));
404 	return (data);
405 }
406 
407 static __inline u_long
408 rcr2(void)
409 {
410 	u_long	data;
411 
412 	__asm __volatile("movq %%cr2,%0" : "=r" (data));
413 	return (data);
414 }
415 
416 static __inline void
417 load_cr3(u_long data)
418 {
419 
420 	__asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
421 }
422 
423 static __inline u_long
424 rcr3(void)
425 {
426 	u_long	data;
427 
428 	__asm __volatile("movq %%cr3,%0" : "=r" (data));
429 	return (data);
430 }
431 
432 static __inline void
433 load_cr4(u_long data)
434 {
435 	__asm __volatile("movq %0,%%cr4" : : "r" (data));
436 }
437 
438 static __inline u_long
439 rcr4(void)
440 {
441 	u_long	data;
442 
443 	__asm __volatile("movq %%cr4,%0" : "=r" (data));
444 	return (data);
445 }
446 
447 static __inline u_long
448 rxcr(u_int reg)
449 {
450 	u_int low, high;
451 
452 	__asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (reg));
453 	return (low | ((uint64_t)high << 32));
454 }
455 
456 static __inline void
457 load_xcr(u_int reg, u_long val)
458 {
459 	u_int low, high;
460 
461 	low = val;
462 	high = val >> 32;
463 	__asm __volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high));
464 }
465 
466 /*
467  * Global TLB flush (except for thise for pages marked PG_G)
468  */
469 static __inline void
470 invltlb(void)
471 {
472 
473 	load_cr3(rcr3());
474 }
475 
476 #ifndef CR4_PGE
477 #define	CR4_PGE	0x00000080	/* Page global enable */
478 #endif
479 
480 /*
481  * Perform the guaranteed invalidation of all TLB entries.  This
482  * includes the global entries, and entries in all PCIDs, not only the
483  * current context.  The function works both on non-PCID CPUs and CPUs
484  * with the PCID turned off or on.  See IA-32 SDM Vol. 3a 4.10.4.1
485  * Operations that Invalidate TLBs and Paging-Structure Caches.
486  */
487 static __inline void
488 invltlb_glob(void)
489 {
490 	uint64_t cr4;
491 
492 	cr4 = rcr4();
493 	load_cr4(cr4 & ~CR4_PGE);
494 	/*
495 	 * Although preemption at this point could be detrimental to
496 	 * performance, it would not lead to an error.  PG_G is simply
497 	 * ignored if CR4.PGE is clear.  Moreover, in case this block
498 	 * is re-entered, the load_cr4() either above or below will
499 	 * modify CR4.PGE flushing the TLB.
500 	 */
501 	load_cr4(cr4 | CR4_PGE);
502 }
503 
504 /*
505  * TLB flush for an individual page (even if it has PG_G).
506  * Only works on 486+ CPUs (i386 does not have PG_G).
507  */
508 static __inline void
509 invlpg(u_long addr)
510 {
511 
512 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
513 }
514 
515 #define	INVPCID_ADDR	0
516 #define	INVPCID_CTX	1
517 #define	INVPCID_CTXGLOB	2
518 #define	INVPCID_ALLCTX	3
519 
520 struct invpcid_descr {
521 	uint64_t	pcid:12 __packed;
522 	uint64_t	pad:52 __packed;
523 	uint64_t	addr;
524 } __packed;
525 
526 static __inline void
527 invpcid(struct invpcid_descr *d, int type)
528 {
529 
530 	__asm __volatile("invpcid (%0),%1"
531 	    : : "r" (d), "r" ((u_long)type) : "memory");
532 }
533 
534 static __inline u_short
535 rfs(void)
536 {
537 	u_short sel;
538 	__asm __volatile("movw %%fs,%0" : "=rm" (sel));
539 	return (sel);
540 }
541 
542 static __inline u_short
543 rgs(void)
544 {
545 	u_short sel;
546 	__asm __volatile("movw %%gs,%0" : "=rm" (sel));
547 	return (sel);
548 }
549 
550 static __inline u_short
551 rss(void)
552 {
553 	u_short sel;
554 	__asm __volatile("movw %%ss,%0" : "=rm" (sel));
555 	return (sel);
556 }
557 
558 static __inline void
559 load_ds(u_short sel)
560 {
561 	__asm __volatile("movw %0,%%ds" : : "rm" (sel));
562 }
563 
564 static __inline void
565 load_es(u_short sel)
566 {
567 	__asm __volatile("movw %0,%%es" : : "rm" (sel));
568 }
569 
570 static __inline void
571 cpu_monitor(const void *addr, u_long extensions, u_int hints)
572 {
573 
574 	__asm __volatile("monitor"
575 	    : : "a" (addr), "c" (extensions), "d" (hints));
576 }
577 
578 static __inline void
579 cpu_mwait(u_long extensions, u_int hints)
580 {
581 
582 	__asm __volatile("mwait" : : "a" (hints), "c" (extensions));
583 }
584 
585 static __inline uint32_t
586 rdpkru(void)
587 {
588 	uint32_t res;
589 
590 	__asm __volatile("rdpkru" :  "=a" (res) : "c" (0) : "edx");
591 	return (res);
592 }
593 
594 static __inline void
595 wrpkru(uint32_t mask)
596 {
597 
598 	__asm __volatile("wrpkru" :  : "a" (mask),  "c" (0), "d" (0));
599 }
600 
601 #ifdef _KERNEL
602 /* This is defined in <machine/specialreg.h> but is too painful to get to */
603 #ifndef	MSR_FSBASE
604 #define	MSR_FSBASE	0xc0000100
605 #endif
606 static __inline void
607 load_fs(u_short sel)
608 {
609 	/* Preserve the fsbase value across the selector load */
610 	__asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
611 	    : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
612 }
613 
614 #ifndef	MSR_GSBASE
615 #define	MSR_GSBASE	0xc0000101
616 #endif
617 static __inline void
618 load_gs(u_short sel)
619 {
620 	/*
621 	 * Preserve the gsbase value across the selector load.
622 	 * Note that we have to disable interrupts because the gsbase
623 	 * being trashed happens to be the kernel gsbase at the time.
624 	 */
625 	__asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
626 	    : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
627 }
628 #else
629 /* Usable by userland */
630 static __inline void
631 load_fs(u_short sel)
632 {
633 	__asm __volatile("movw %0,%%fs" : : "rm" (sel));
634 }
635 
636 static __inline void
637 load_gs(u_short sel)
638 {
639 	__asm __volatile("movw %0,%%gs" : : "rm" (sel));
640 }
641 #endif
642 
643 static __inline uint64_t
644 rdfsbase(void)
645 {
646 	uint64_t x;
647 
648 	__asm __volatile("rdfsbase %0" : "=r" (x));
649 	return (x);
650 }
651 
652 static __inline void
653 wrfsbase(uint64_t x)
654 {
655 
656 	__asm __volatile("wrfsbase %0" : : "r" (x));
657 }
658 
659 static __inline uint64_t
660 rdgsbase(void)
661 {
662 	uint64_t x;
663 
664 	__asm __volatile("rdgsbase %0" : "=r" (x));
665 	return (x);
666 }
667 
668 static __inline void
669 wrgsbase(uint64_t x)
670 {
671 
672 	__asm __volatile("wrgsbase %0" : : "r" (x));
673 }
674 
675 static __inline void
676 bare_lgdt(struct region_descriptor *addr)
677 {
678 	__asm __volatile("lgdt (%0)" : : "r" (addr));
679 }
680 
681 static __inline void
682 sgdt(struct region_descriptor *addr)
683 {
684 	char *loc;
685 
686 	loc = (char *)addr;
687 	__asm __volatile("sgdt %0" : "=m" (*loc) : : "memory");
688 }
689 
690 static __inline void
691 lidt(struct region_descriptor *addr)
692 {
693 	__asm __volatile("lidt (%0)" : : "r" (addr));
694 }
695 
696 static __inline void
697 sidt(struct region_descriptor *addr)
698 {
699 	char *loc;
700 
701 	loc = (char *)addr;
702 	__asm __volatile("sidt %0" : "=m" (*loc) : : "memory");
703 }
704 
705 static __inline void
706 lldt(u_short sel)
707 {
708 	__asm __volatile("lldt %0" : : "r" (sel));
709 }
710 
711 static __inline u_short
712 sldt(void)
713 {
714 	u_short sel;
715 
716 	__asm __volatile("sldt %0" : "=r" (sel));
717 	return (sel);
718 }
719 
720 static __inline void
721 ltr(u_short sel)
722 {
723 	__asm __volatile("ltr %0" : : "r" (sel));
724 }
725 
726 static __inline uint32_t
727 read_tr(void)
728 {
729 	u_short sel;
730 
731 	__asm __volatile("str %0" : "=r" (sel));
732 	return (sel);
733 }
734 
735 static __inline uint64_t
736 rdr0(void)
737 {
738 	uint64_t data;
739 	__asm __volatile("movq %%dr0,%0" : "=r" (data));
740 	return (data);
741 }
742 
743 static __inline void
744 load_dr0(uint64_t dr0)
745 {
746 	__asm __volatile("movq %0,%%dr0" : : "r" (dr0));
747 }
748 
749 static __inline uint64_t
750 rdr1(void)
751 {
752 	uint64_t data;
753 	__asm __volatile("movq %%dr1,%0" : "=r" (data));
754 	return (data);
755 }
756 
757 static __inline void
758 load_dr1(uint64_t dr1)
759 {
760 	__asm __volatile("movq %0,%%dr1" : : "r" (dr1));
761 }
762 
763 static __inline uint64_t
764 rdr2(void)
765 {
766 	uint64_t data;
767 	__asm __volatile("movq %%dr2,%0" : "=r" (data));
768 	return (data);
769 }
770 
771 static __inline void
772 load_dr2(uint64_t dr2)
773 {
774 	__asm __volatile("movq %0,%%dr2" : : "r" (dr2));
775 }
776 
777 static __inline uint64_t
778 rdr3(void)
779 {
780 	uint64_t data;
781 	__asm __volatile("movq %%dr3,%0" : "=r" (data));
782 	return (data);
783 }
784 
785 static __inline void
786 load_dr3(uint64_t dr3)
787 {
788 	__asm __volatile("movq %0,%%dr3" : : "r" (dr3));
789 }
790 
791 static __inline uint64_t
792 rdr6(void)
793 {
794 	uint64_t data;
795 	__asm __volatile("movq %%dr6,%0" : "=r" (data));
796 	return (data);
797 }
798 
799 static __inline void
800 load_dr6(uint64_t dr6)
801 {
802 	__asm __volatile("movq %0,%%dr6" : : "r" (dr6));
803 }
804 
805 static __inline uint64_t
806 rdr7(void)
807 {
808 	uint64_t data;
809 	__asm __volatile("movq %%dr7,%0" : "=r" (data));
810 	return (data);
811 }
812 
813 static __inline void
814 load_dr7(uint64_t dr7)
815 {
816 	__asm __volatile("movq %0,%%dr7" : : "r" (dr7));
817 }
818 
819 static __inline register_t
820 intr_disable(void)
821 {
822 	register_t rflags;
823 
824 	rflags = read_rflags();
825 	disable_intr();
826 	return (rflags);
827 }
828 
829 static __inline void
830 intr_restore(register_t rflags)
831 {
832 	write_rflags(rflags);
833 }
834 
835 static __inline void
836 stac(void)
837 {
838 
839 	__asm __volatile("stac" : : : "cc");
840 }
841 
842 static __inline void
843 clac(void)
844 {
845 
846 	__asm __volatile("clac" : : : "cc");
847 }
848 
849 enum {
850 	SGX_ECREATE	= 0x0,
851 	SGX_EADD	= 0x1,
852 	SGX_EINIT	= 0x2,
853 	SGX_EREMOVE	= 0x3,
854 	SGX_EDGBRD	= 0x4,
855 	SGX_EDGBWR	= 0x5,
856 	SGX_EEXTEND	= 0x6,
857 	SGX_ELDU	= 0x8,
858 	SGX_EBLOCK	= 0x9,
859 	SGX_EPA		= 0xA,
860 	SGX_EWB		= 0xB,
861 	SGX_ETRACK	= 0xC,
862 };
863 
864 enum {
865 	SGX_PT_SECS = 0x00,
866 	SGX_PT_TCS  = 0x01,
867 	SGX_PT_REG  = 0x02,
868 	SGX_PT_VA   = 0x03,
869 	SGX_PT_TRIM = 0x04,
870 };
871 
872 int sgx_encls(uint32_t eax, uint64_t rbx, uint64_t rcx, uint64_t rdx);
873 
874 static __inline int
875 sgx_ecreate(void *pginfo, void *secs)
876 {
877 
878 	return (sgx_encls(SGX_ECREATE, (uint64_t)pginfo,
879 	    (uint64_t)secs, 0));
880 }
881 
882 static __inline int
883 sgx_eadd(void *pginfo, void *epc)
884 {
885 
886 	return (sgx_encls(SGX_EADD, (uint64_t)pginfo,
887 	    (uint64_t)epc, 0));
888 }
889 
890 static __inline int
891 sgx_einit(void *sigstruct, void *secs, void *einittoken)
892 {
893 
894 	return (sgx_encls(SGX_EINIT, (uint64_t)sigstruct,
895 	    (uint64_t)secs, (uint64_t)einittoken));
896 }
897 
898 static __inline int
899 sgx_eextend(void *secs, void *epc)
900 {
901 
902 	return (sgx_encls(SGX_EEXTEND, (uint64_t)secs,
903 	    (uint64_t)epc, 0));
904 }
905 
906 static __inline int
907 sgx_epa(void *epc)
908 {
909 
910 	return (sgx_encls(SGX_EPA, SGX_PT_VA, (uint64_t)epc, 0));
911 }
912 
913 static __inline int
914 sgx_eldu(uint64_t rbx, uint64_t rcx,
915     uint64_t rdx)
916 {
917 
918 	return (sgx_encls(SGX_ELDU, rbx, rcx, rdx));
919 }
920 
921 static __inline int
922 sgx_eremove(void *epc)
923 {
924 
925 	return (sgx_encls(SGX_EREMOVE, 0, (uint64_t)epc, 0));
926 }
927 
928 void	reset_dbregs(void);
929 
930 #ifdef _KERNEL
931 int	rdmsr_safe(u_int msr, uint64_t *val);
932 int	wrmsr_safe(u_int msr, uint64_t newval);
933 #endif
934 
935 #endif /* !_MACHINE_CPUFUNC_H_ */
936 
937 #endif /* __i386__ */
938