xref: /freebsd/sys/amd64/include/cpufunc.h (revision 58a08f9e9910ea986e0f1103f47274a781b11874)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2003 Peter Wemm.
5  * Copyright (c) 1993 The Regents of the University of California.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of the University nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 
35 /*
36  * Functions to provide access to special i386 instructions.
37  * This in included in sys/systm.h, and that file should be
38  * used in preference to this.
39  */
40 
41 #ifndef _MACHINE_CPUFUNC_H_
42 #define	_MACHINE_CPUFUNC_H_
43 
44 #ifndef _SYS_CDEFS_H_
45 #error this file needs sys/cdefs.h as a prerequisite
46 #endif
47 
48 struct region_descriptor;
49 
50 #define readb(va)	(*(volatile uint8_t *) (va))
51 #define readw(va)	(*(volatile uint16_t *) (va))
52 #define readl(va)	(*(volatile uint32_t *) (va))
53 #define readq(va)	(*(volatile uint64_t *) (va))
54 
55 #define writeb(va, d)	(*(volatile uint8_t *) (va) = (d))
56 #define writew(va, d)	(*(volatile uint16_t *) (va) = (d))
57 #define writel(va, d)	(*(volatile uint32_t *) (va) = (d))
58 #define writeq(va, d)	(*(volatile uint64_t *) (va) = (d))
59 
60 #if defined(__GNUCLIKE_ASM) && defined(__CC_SUPPORTS___INLINE)
61 
62 static __inline void
63 breakpoint(void)
64 {
65 	__asm __volatile("int $3");
66 }
67 
68 static __inline __pure2 u_int
69 bsfl(u_int mask)
70 {
71 	u_int	result;
72 
73 	__asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
74 	return (result);
75 }
76 
77 static __inline __pure2 u_long
78 bsfq(u_long mask)
79 {
80 	u_long	result;
81 
82 	__asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
83 	return (result);
84 }
85 
86 static __inline __pure2 u_int
87 bsrl(u_int mask)
88 {
89 	u_int	result;
90 
91 	__asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
92 	return (result);
93 }
94 
95 static __inline __pure2 u_long
96 bsrq(u_long mask)
97 {
98 	u_long	result;
99 
100 	__asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask));
101 	return (result);
102 }
103 
104 static __inline void
105 clflush(u_long addr)
106 {
107 
108 	__asm __volatile("clflush %0" : : "m" (*(char *)addr));
109 }
110 
111 static __inline void
112 clflushopt(u_long addr)
113 {
114 
115 	__asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr));
116 }
117 
118 static __inline void
119 clwb(u_long addr)
120 {
121 
122 	__asm __volatile("clwb %0" : : "m" (*(char *)addr));
123 }
124 
125 static __inline void
126 clts(void)
127 {
128 
129 	__asm __volatile("clts");
130 }
131 
132 static __inline void
133 disable_intr(void)
134 {
135 	__asm __volatile("cli" : : : "memory");
136 }
137 
138 static __inline void
139 do_cpuid(u_int ax, u_int *p)
140 {
141 	__asm __volatile("cpuid"
142 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
143 			 :  "0" (ax));
144 }
145 
146 static __inline void
147 cpuid_count(u_int ax, u_int cx, u_int *p)
148 {
149 	__asm __volatile("cpuid"
150 			 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
151 			 :  "0" (ax), "c" (cx));
152 }
153 
154 static __inline void
155 enable_intr(void)
156 {
157 	__asm __volatile("sti");
158 }
159 
160 #ifdef _KERNEL
161 
162 #define	HAVE_INLINE_FFS
163 #define        ffs(x)  __builtin_ffs(x)
164 
165 #define	HAVE_INLINE_FFSL
166 
167 static __inline __pure2 int
168 ffsl(long mask)
169 {
170 
171 	return (__builtin_ffsl(mask));
172 }
173 
174 #define	HAVE_INLINE_FFSLL
175 
176 static __inline __pure2 int
177 ffsll(long long mask)
178 {
179 	return (ffsl((long)mask));
180 }
181 
182 #define	HAVE_INLINE_FLS
183 
184 static __inline __pure2 int
185 fls(int mask)
186 {
187 	return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
188 }
189 
190 #define	HAVE_INLINE_FLSL
191 
192 static __inline __pure2 int
193 flsl(long mask)
194 {
195 	return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
196 }
197 
198 #define	HAVE_INLINE_FLSLL
199 
200 static __inline __pure2 int
201 flsll(long long mask)
202 {
203 	return (flsl((long)mask));
204 }
205 
206 #endif /* _KERNEL */
207 
208 static __inline void
209 halt(void)
210 {
211 	__asm __volatile("hlt");
212 }
213 
214 static __inline u_char
215 inb(u_int port)
216 {
217 	u_char	data;
218 
219 	__asm __volatile("inb %w1, %0" : "=a" (data) : "Nd" (port));
220 	return (data);
221 }
222 
223 static __inline u_int
224 inl(u_int port)
225 {
226 	u_int	data;
227 
228 	__asm __volatile("inl %w1, %0" : "=a" (data) : "Nd" (port));
229 	return (data);
230 }
231 
232 static __inline void
233 insb(u_int port, void *addr, size_t count)
234 {
235 	__asm __volatile("rep; insb"
236 			 : "+D" (addr), "+c" (count)
237 			 : "d" (port)
238 			 : "memory");
239 }
240 
241 static __inline void
242 insw(u_int port, void *addr, size_t count)
243 {
244 	__asm __volatile("rep; insw"
245 			 : "+D" (addr), "+c" (count)
246 			 : "d" (port)
247 			 : "memory");
248 }
249 
250 static __inline void
251 insl(u_int port, void *addr, size_t count)
252 {
253 	__asm __volatile("rep; insl"
254 			 : "+D" (addr), "+c" (count)
255 			 : "d" (port)
256 			 : "memory");
257 }
258 
259 static __inline void
260 invd(void)
261 {
262 	__asm __volatile("invd");
263 }
264 
265 static __inline u_short
266 inw(u_int port)
267 {
268 	u_short	data;
269 
270 	__asm __volatile("inw %w1, %0" : "=a" (data) : "Nd" (port));
271 	return (data);
272 }
273 
274 static __inline void
275 outb(u_int port, u_char data)
276 {
277 	__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
278 }
279 
280 static __inline void
281 outl(u_int port, u_int data)
282 {
283 	__asm __volatile("outl %0, %w1" : : "a" (data), "Nd" (port));
284 }
285 
286 static __inline void
287 outsb(u_int port, const void *addr, size_t count)
288 {
289 	__asm __volatile("rep; outsb"
290 			 : "+S" (addr), "+c" (count)
291 			 : "d" (port));
292 }
293 
294 static __inline void
295 outsw(u_int port, const void *addr, size_t count)
296 {
297 	__asm __volatile("rep; outsw"
298 			 : "+S" (addr), "+c" (count)
299 			 : "d" (port));
300 }
301 
302 static __inline void
303 outsl(u_int port, const void *addr, size_t count)
304 {
305 	__asm __volatile("rep; outsl"
306 			 : "+S" (addr), "+c" (count)
307 			 : "d" (port));
308 }
309 
310 static __inline void
311 outw(u_int port, u_short data)
312 {
313 	__asm __volatile("outw %0, %w1" : : "a" (data), "Nd" (port));
314 }
315 
316 static __inline u_long
317 popcntq(u_long mask)
318 {
319 	u_long result;
320 
321 	__asm __volatile("popcntq %1,%0" : "=r" (result) : "rm" (mask));
322 	return (result);
323 }
324 
325 static __inline void
326 lfence(void)
327 {
328 
329 	__asm __volatile("lfence" : : : "memory");
330 }
331 
332 static __inline void
333 mfence(void)
334 {
335 
336 	__asm __volatile("mfence" : : : "memory");
337 }
338 
339 static __inline void
340 sfence(void)
341 {
342 
343 	__asm __volatile("sfence" : : : "memory");
344 }
345 
346 static __inline void
347 ia32_pause(void)
348 {
349 	__asm __volatile("pause");
350 }
351 
352 static __inline u_long
353 read_rflags(void)
354 {
355 	u_long	rf;
356 
357 	__asm __volatile("pushfq; popq %0" : "=r" (rf));
358 	return (rf);
359 }
360 
361 static __inline uint64_t
362 rdmsr(u_int msr)
363 {
364 	uint32_t low, high;
365 
366 	__asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
367 	return (low | ((uint64_t)high << 32));
368 }
369 
370 static __inline uint32_t
371 rdmsr32(u_int msr)
372 {
373 	uint32_t low;
374 
375 	__asm __volatile("rdmsr" : "=a" (low) : "c" (msr) : "rdx");
376 	return (low);
377 }
378 
379 static __inline uint64_t
380 rdpmc(u_int pmc)
381 {
382 	uint32_t low, high;
383 
384 	__asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
385 	return (low | ((uint64_t)high << 32));
386 }
387 
388 static __inline uint64_t
389 rdtsc(void)
390 {
391 	uint32_t low, high;
392 
393 	__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
394 	return (low | ((uint64_t)high << 32));
395 }
396 
397 static __inline uint64_t
398 rdtscp(void)
399 {
400 	uint32_t low, high;
401 
402 	__asm __volatile("rdtscp" : "=a" (low), "=d" (high) : : "ecx");
403 	return (low | ((uint64_t)high << 32));
404 }
405 
406 static __inline uint32_t
407 rdtsc32(void)
408 {
409 	uint32_t rv;
410 
411 	__asm __volatile("rdtsc" : "=a" (rv) : : "edx");
412 	return (rv);
413 }
414 
415 static __inline uint32_t
416 rdtscp32(void)
417 {
418 	uint32_t rv;
419 
420 	__asm __volatile("rdtscp" : "=a" (rv) : : "ecx", "edx");
421 	return (rv);
422 }
423 
424 static __inline void
425 wbinvd(void)
426 {
427 	__asm __volatile("wbinvd");
428 }
429 
430 static __inline void
431 write_rflags(u_long rf)
432 {
433 	__asm __volatile("pushq %0;  popfq" : : "r" (rf));
434 }
435 
436 static __inline void
437 wrmsr(u_int msr, uint64_t newval)
438 {
439 	uint32_t low, high;
440 
441 	low = newval;
442 	high = newval >> 32;
443 	__asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
444 }
445 
446 static __inline void
447 load_cr0(u_long data)
448 {
449 
450 	__asm __volatile("movq %0,%%cr0" : : "r" (data));
451 }
452 
453 static __inline u_long
454 rcr0(void)
455 {
456 	u_long	data;
457 
458 	__asm __volatile("movq %%cr0,%0" : "=r" (data));
459 	return (data);
460 }
461 
462 static __inline u_long
463 rcr2(void)
464 {
465 	u_long	data;
466 
467 	__asm __volatile("movq %%cr2,%0" : "=r" (data));
468 	return (data);
469 }
470 
471 static __inline void
472 load_cr3(u_long data)
473 {
474 
475 	__asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
476 }
477 
478 static __inline u_long
479 rcr3(void)
480 {
481 	u_long	data;
482 
483 	__asm __volatile("movq %%cr3,%0" : "=r" (data));
484 	return (data);
485 }
486 
487 static __inline void
488 load_cr4(u_long data)
489 {
490 	__asm __volatile("movq %0,%%cr4" : : "r" (data));
491 }
492 
493 static __inline u_long
494 rcr4(void)
495 {
496 	u_long	data;
497 
498 	__asm __volatile("movq %%cr4,%0" : "=r" (data));
499 	return (data);
500 }
501 
502 static __inline u_long
503 rxcr(u_int reg)
504 {
505 	u_int low, high;
506 
507 	__asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (reg));
508 	return (low | ((uint64_t)high << 32));
509 }
510 
511 static __inline void
512 load_xcr(u_int reg, u_long val)
513 {
514 	u_int low, high;
515 
516 	low = val;
517 	high = val >> 32;
518 	__asm __volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high));
519 }
520 
521 /*
522  * Global TLB flush (except for thise for pages marked PG_G)
523  */
524 static __inline void
525 invltlb(void)
526 {
527 
528 	load_cr3(rcr3());
529 }
530 
531 #ifndef CR4_PGE
532 #define	CR4_PGE	0x00000080	/* Page global enable */
533 #endif
534 
535 /*
536  * Perform the guaranteed invalidation of all TLB entries.  This
537  * includes the global entries, and entries in all PCIDs, not only the
538  * current context.  The function works both on non-PCID CPUs and CPUs
539  * with the PCID turned off or on.  See IA-32 SDM Vol. 3a 4.10.4.1
540  * Operations that Invalidate TLBs and Paging-Structure Caches.
541  */
542 static __inline void
543 invltlb_glob(void)
544 {
545 	uint64_t cr4;
546 
547 	cr4 = rcr4();
548 	load_cr4(cr4 & ~CR4_PGE);
549 	/*
550 	 * Although preemption at this point could be detrimental to
551 	 * performance, it would not lead to an error.  PG_G is simply
552 	 * ignored if CR4.PGE is clear.  Moreover, in case this block
553 	 * is re-entered, the load_cr4() either above or below will
554 	 * modify CR4.PGE flushing the TLB.
555 	 */
556 	load_cr4(cr4 | CR4_PGE);
557 }
558 
559 /*
560  * TLB flush for an individual page (even if it has PG_G).
561  * Only works on 486+ CPUs (i386 does not have PG_G).
562  */
563 static __inline void
564 invlpg(u_long addr)
565 {
566 
567 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
568 }
569 
570 #define	INVPCID_ADDR	0
571 #define	INVPCID_CTX	1
572 #define	INVPCID_CTXGLOB	2
573 #define	INVPCID_ALLCTX	3
574 
575 struct invpcid_descr {
576 	uint64_t	pcid:12 __packed;
577 	uint64_t	pad:52 __packed;
578 	uint64_t	addr;
579 } __packed;
580 
581 static __inline void
582 invpcid(struct invpcid_descr *d, int type)
583 {
584 
585 	__asm __volatile("invpcid (%0),%1"
586 	    : : "r" (d), "r" ((u_long)type) : "memory");
587 }
588 
589 static __inline u_short
590 rfs(void)
591 {
592 	u_short sel;
593 	__asm __volatile("movw %%fs,%0" : "=rm" (sel));
594 	return (sel);
595 }
596 
597 static __inline u_short
598 rgs(void)
599 {
600 	u_short sel;
601 	__asm __volatile("movw %%gs,%0" : "=rm" (sel));
602 	return (sel);
603 }
604 
605 static __inline u_short
606 rss(void)
607 {
608 	u_short sel;
609 	__asm __volatile("movw %%ss,%0" : "=rm" (sel));
610 	return (sel);
611 }
612 
613 static __inline void
614 load_ds(u_short sel)
615 {
616 	__asm __volatile("movw %0,%%ds" : : "rm" (sel));
617 }
618 
619 static __inline void
620 load_es(u_short sel)
621 {
622 	__asm __volatile("movw %0,%%es" : : "rm" (sel));
623 }
624 
625 static __inline void
626 cpu_monitor(const void *addr, u_long extensions, u_int hints)
627 {
628 
629 	__asm __volatile("monitor"
630 	    : : "a" (addr), "c" (extensions), "d" (hints));
631 }
632 
633 static __inline void
634 cpu_mwait(u_long extensions, u_int hints)
635 {
636 
637 	__asm __volatile("mwait" : : "a" (hints), "c" (extensions));
638 }
639 
640 static __inline uint32_t
641 rdpkru(void)
642 {
643 	uint32_t res;
644 
645 	__asm __volatile("rdpkru" :  "=a" (res) : "c" (0) : "edx");
646 	return (res);
647 }
648 
649 static __inline void
650 wrpkru(uint32_t mask)
651 {
652 
653 	__asm __volatile("wrpkru" :  : "a" (mask),  "c" (0), "d" (0));
654 }
655 
656 #ifdef _KERNEL
657 /* This is defined in <machine/specialreg.h> but is too painful to get to */
658 #ifndef	MSR_FSBASE
659 #define	MSR_FSBASE	0xc0000100
660 #endif
661 static __inline void
662 load_fs(u_short sel)
663 {
664 	/* Preserve the fsbase value across the selector load */
665 	__asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
666 	    : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
667 }
668 
669 #ifndef	MSR_GSBASE
670 #define	MSR_GSBASE	0xc0000101
671 #endif
672 static __inline void
673 load_gs(u_short sel)
674 {
675 	/*
676 	 * Preserve the gsbase value across the selector load.
677 	 * Note that we have to disable interrupts because the gsbase
678 	 * being trashed happens to be the kernel gsbase at the time.
679 	 */
680 	__asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
681 	    : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
682 }
683 #else
684 /* Usable by userland */
685 static __inline void
686 load_fs(u_short sel)
687 {
688 	__asm __volatile("movw %0,%%fs" : : "rm" (sel));
689 }
690 
691 static __inline void
692 load_gs(u_short sel)
693 {
694 	__asm __volatile("movw %0,%%gs" : : "rm" (sel));
695 }
696 #endif
697 
698 static __inline uint64_t
699 rdfsbase(void)
700 {
701 	uint64_t x;
702 
703 	__asm __volatile("rdfsbase %0" : "=r" (x));
704 	return (x);
705 }
706 
707 static __inline void
708 wrfsbase(uint64_t x)
709 {
710 
711 	__asm __volatile("wrfsbase %0" : : "r" (x));
712 }
713 
714 static __inline uint64_t
715 rdgsbase(void)
716 {
717 	uint64_t x;
718 
719 	__asm __volatile("rdgsbase %0" : "=r" (x));
720 	return (x);
721 }
722 
723 static __inline void
724 wrgsbase(uint64_t x)
725 {
726 
727 	__asm __volatile("wrgsbase %0" : : "r" (x));
728 }
729 
730 static __inline void
731 bare_lgdt(struct region_descriptor *addr)
732 {
733 	__asm __volatile("lgdt (%0)" : : "r" (addr));
734 }
735 
736 static __inline void
737 sgdt(struct region_descriptor *addr)
738 {
739 	char *loc;
740 
741 	loc = (char *)addr;
742 	__asm __volatile("sgdt %0" : "=m" (*loc) : : "memory");
743 }
744 
745 static __inline void
746 lidt(struct region_descriptor *addr)
747 {
748 	__asm __volatile("lidt (%0)" : : "r" (addr));
749 }
750 
751 static __inline void
752 sidt(struct region_descriptor *addr)
753 {
754 	char *loc;
755 
756 	loc = (char *)addr;
757 	__asm __volatile("sidt %0" : "=m" (*loc) : : "memory");
758 }
759 
760 static __inline void
761 lldt(u_short sel)
762 {
763 	__asm __volatile("lldt %0" : : "r" (sel));
764 }
765 
766 static __inline u_short
767 sldt(void)
768 {
769 	u_short sel;
770 
771 	__asm __volatile("sldt %0" : "=r" (sel));
772 	return (sel);
773 }
774 
775 static __inline void
776 ltr(u_short sel)
777 {
778 	__asm __volatile("ltr %0" : : "r" (sel));
779 }
780 
781 static __inline uint32_t
782 read_tr(void)
783 {
784 	u_short sel;
785 
786 	__asm __volatile("str %0" : "=r" (sel));
787 	return (sel);
788 }
789 
790 static __inline uint64_t
791 rdr0(void)
792 {
793 	uint64_t data;
794 	__asm __volatile("movq %%dr0,%0" : "=r" (data));
795 	return (data);
796 }
797 
798 static __inline void
799 load_dr0(uint64_t dr0)
800 {
801 	__asm __volatile("movq %0,%%dr0" : : "r" (dr0));
802 }
803 
804 static __inline uint64_t
805 rdr1(void)
806 {
807 	uint64_t data;
808 	__asm __volatile("movq %%dr1,%0" : "=r" (data));
809 	return (data);
810 }
811 
812 static __inline void
813 load_dr1(uint64_t dr1)
814 {
815 	__asm __volatile("movq %0,%%dr1" : : "r" (dr1));
816 }
817 
818 static __inline uint64_t
819 rdr2(void)
820 {
821 	uint64_t data;
822 	__asm __volatile("movq %%dr2,%0" : "=r" (data));
823 	return (data);
824 }
825 
826 static __inline void
827 load_dr2(uint64_t dr2)
828 {
829 	__asm __volatile("movq %0,%%dr2" : : "r" (dr2));
830 }
831 
832 static __inline uint64_t
833 rdr3(void)
834 {
835 	uint64_t data;
836 	__asm __volatile("movq %%dr3,%0" : "=r" (data));
837 	return (data);
838 }
839 
840 static __inline void
841 load_dr3(uint64_t dr3)
842 {
843 	__asm __volatile("movq %0,%%dr3" : : "r" (dr3));
844 }
845 
846 static __inline uint64_t
847 rdr6(void)
848 {
849 	uint64_t data;
850 	__asm __volatile("movq %%dr6,%0" : "=r" (data));
851 	return (data);
852 }
853 
854 static __inline void
855 load_dr6(uint64_t dr6)
856 {
857 	__asm __volatile("movq %0,%%dr6" : : "r" (dr6));
858 }
859 
860 static __inline uint64_t
861 rdr7(void)
862 {
863 	uint64_t data;
864 	__asm __volatile("movq %%dr7,%0" : "=r" (data));
865 	return (data);
866 }
867 
868 static __inline void
869 load_dr7(uint64_t dr7)
870 {
871 	__asm __volatile("movq %0,%%dr7" : : "r" (dr7));
872 }
873 
874 static __inline register_t
875 intr_disable(void)
876 {
877 	register_t rflags;
878 
879 	rflags = read_rflags();
880 	disable_intr();
881 	return (rflags);
882 }
883 
884 static __inline void
885 intr_restore(register_t rflags)
886 {
887 	write_rflags(rflags);
888 }
889 
890 static __inline void
891 stac(void)
892 {
893 
894 	__asm __volatile("stac" : : : "cc");
895 }
896 
897 static __inline void
898 clac(void)
899 {
900 
901 	__asm __volatile("clac" : : : "cc");
902 }
903 
904 enum {
905 	SGX_ECREATE	= 0x0,
906 	SGX_EADD	= 0x1,
907 	SGX_EINIT	= 0x2,
908 	SGX_EREMOVE	= 0x3,
909 	SGX_EDGBRD	= 0x4,
910 	SGX_EDGBWR	= 0x5,
911 	SGX_EEXTEND	= 0x6,
912 	SGX_ELDU	= 0x8,
913 	SGX_EBLOCK	= 0x9,
914 	SGX_EPA		= 0xA,
915 	SGX_EWB		= 0xB,
916 	SGX_ETRACK	= 0xC,
917 };
918 
919 enum {
920 	SGX_PT_SECS = 0x00,
921 	SGX_PT_TCS  = 0x01,
922 	SGX_PT_REG  = 0x02,
923 	SGX_PT_VA   = 0x03,
924 	SGX_PT_TRIM = 0x04,
925 };
926 
927 int sgx_encls(uint32_t eax, uint64_t rbx, uint64_t rcx, uint64_t rdx);
928 
929 static __inline int
930 sgx_ecreate(void *pginfo, void *secs)
931 {
932 
933 	return (sgx_encls(SGX_ECREATE, (uint64_t)pginfo,
934 	    (uint64_t)secs, 0));
935 }
936 
937 static __inline int
938 sgx_eadd(void *pginfo, void *epc)
939 {
940 
941 	return (sgx_encls(SGX_EADD, (uint64_t)pginfo,
942 	    (uint64_t)epc, 0));
943 }
944 
945 static __inline int
946 sgx_einit(void *sigstruct, void *secs, void *einittoken)
947 {
948 
949 	return (sgx_encls(SGX_EINIT, (uint64_t)sigstruct,
950 	    (uint64_t)secs, (uint64_t)einittoken));
951 }
952 
953 static __inline int
954 sgx_eextend(void *secs, void *epc)
955 {
956 
957 	return (sgx_encls(SGX_EEXTEND, (uint64_t)secs,
958 	    (uint64_t)epc, 0));
959 }
960 
961 static __inline int
962 sgx_epa(void *epc)
963 {
964 
965 	return (sgx_encls(SGX_EPA, SGX_PT_VA, (uint64_t)epc, 0));
966 }
967 
968 static __inline int
969 sgx_eldu(uint64_t rbx, uint64_t rcx,
970     uint64_t rdx)
971 {
972 
973 	return (sgx_encls(SGX_ELDU, rbx, rcx, rdx));
974 }
975 
976 static __inline int
977 sgx_eremove(void *epc)
978 {
979 
980 	return (sgx_encls(SGX_EREMOVE, 0, (uint64_t)epc, 0));
981 }
982 
983 #else /* !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */
984 
985 int	breakpoint(void);
986 u_int	bsfl(u_int mask);
987 u_int	bsrl(u_int mask);
988 void	clflush(u_long addr);
989 void	clts(void);
990 void	cpuid_count(u_int ax, u_int cx, u_int *p);
991 void	disable_intr(void);
992 void	do_cpuid(u_int ax, u_int *p);
993 void	enable_intr(void);
994 void	halt(void);
995 void	ia32_pause(void);
996 u_char	inb(u_int port);
997 u_int	inl(u_int port);
998 void	insb(u_int port, void *addr, size_t count);
999 void	insl(u_int port, void *addr, size_t count);
1000 void	insw(u_int port, void *addr, size_t count);
1001 register_t	intr_disable(void);
1002 void	intr_restore(register_t rf);
1003 void	invd(void);
1004 void	invlpg(u_int addr);
1005 void	invltlb(void);
1006 u_short	inw(u_int port);
1007 void	lidt(struct region_descriptor *addr);
1008 void	lldt(u_short sel);
1009 void	load_cr0(u_long cr0);
1010 void	load_cr3(u_long cr3);
1011 void	load_cr4(u_long cr4);
1012 void	load_dr0(uint64_t dr0);
1013 void	load_dr1(uint64_t dr1);
1014 void	load_dr2(uint64_t dr2);
1015 void	load_dr3(uint64_t dr3);
1016 void	load_dr6(uint64_t dr6);
1017 void	load_dr7(uint64_t dr7);
1018 void	load_fs(u_short sel);
1019 void	load_gs(u_short sel);
1020 void	ltr(u_short sel);
1021 void	outb(u_int port, u_char data);
1022 void	outl(u_int port, u_int data);
1023 void	outsb(u_int port, const void *addr, size_t count);
1024 void	outsl(u_int port, const void *addr, size_t count);
1025 void	outsw(u_int port, const void *addr, size_t count);
1026 void	outw(u_int port, u_short data);
1027 u_long	rcr0(void);
1028 u_long	rcr2(void);
1029 u_long	rcr3(void);
1030 u_long	rcr4(void);
1031 uint64_t rdmsr(u_int msr);
1032 uint32_t rdmsr32(u_int msr);
1033 uint64_t rdpmc(u_int pmc);
1034 uint64_t rdr0(void);
1035 uint64_t rdr1(void);
1036 uint64_t rdr2(void);
1037 uint64_t rdr3(void);
1038 uint64_t rdr6(void);
1039 uint64_t rdr7(void);
1040 uint64_t rdtsc(void);
1041 u_long	read_rflags(void);
1042 u_int	rfs(void);
1043 u_int	rgs(void);
1044 void	wbinvd(void);
1045 void	write_rflags(u_int rf);
1046 void	wrmsr(u_int msr, uint64_t newval);
1047 
1048 #endif	/* __GNUCLIKE_ASM && __CC_SUPPORTS___INLINE */
1049 
1050 void	reset_dbregs(void);
1051 
1052 #ifdef _KERNEL
1053 int	rdmsr_safe(u_int msr, uint64_t *val);
1054 int	wrmsr_safe(u_int msr, uint64_t newval);
1055 #endif
1056 
1057 #endif /* !_MACHINE_CPUFUNC_H_ */
1058