xref: /freebsd/sys/i386/include/cpufunc.h (revision 3078531de10dcae44b253a35125c949ff4235284)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1993 The Regents of the University of California.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 
34 /*
35  * Functions to provide access to special i386 instructions.
36  * This in included in sys/systm.h, and that file should be
37  * used in preference to this.
38  */
39 
40 #ifndef _MACHINE_CPUFUNC_H_
41 #define	_MACHINE_CPUFUNC_H_
42 
43 struct region_descriptor;
44 
45 #define readb(va)	(*(volatile uint8_t *) (va))
46 #define readw(va)	(*(volatile uint16_t *) (va))
47 #define readl(va)	(*(volatile uint32_t *) (va))
48 
49 #define writeb(va, d)	(*(volatile uint8_t *) (va) = (d))
50 #define writew(va, d)	(*(volatile uint16_t *) (va) = (d))
51 #define writel(va, d)	(*(volatile uint32_t *) (va) = (d))
52 
53 static __inline void
54 breakpoint(void)
55 {
56 	__asm __volatile("int $3");
57 }
58 
59 static __inline __pure2 u_int
60 bsfl(u_int mask)
61 {
62 	u_int	result;
63 
64 	__asm("bsfl %1,%0" : "=r" (result) : "rm" (mask) : "cc");
65 	return (result);
66 }
67 
68 static __inline __pure2 u_int
69 bsrl(u_int mask)
70 {
71 	u_int	result;
72 
73 	__asm("bsrl %1,%0" : "=r" (result) : "rm" (mask) : "cc");
74 	return (result);
75 }
76 
77 static __inline void
78 clflush(u_long addr)
79 {
80 
81 	__asm __volatile("clflush %0" : : "m" (*(char *)addr));
82 }
83 
84 static __inline void
85 clflushopt(u_long addr)
86 {
87 
88 	__asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr));
89 }
90 
91 static __inline void
92 clts(void)
93 {
94 
95 	__asm __volatile("clts");
96 }
97 
98 static __inline void
99 disable_intr(void)
100 {
101 	__asm __volatile("cli" : : : "memory");
102 }
103 
104 #ifdef _KERNEL
105 static __inline void
106 do_cpuid(u_int ax, u_int *p)
107 {
108 	__asm __volatile("cpuid"
109 	    : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
110 	    :  "0" (ax));
111 }
112 
113 static __inline void
114 cpuid_count(u_int ax, u_int cx, u_int *p)
115 {
116 	__asm __volatile("cpuid"
117 	    : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
118 	    :  "0" (ax), "c" (cx));
119 }
120 #else
121 static __inline void
122 do_cpuid(u_int ax, u_int *p)
123 {
124 	__asm __volatile(
125 	    "pushl\t%%ebx\n\t"
126 	    "cpuid\n\t"
127 	    "movl\t%%ebx,%1\n\t"
128 	    "popl\t%%ebx"
129 	    : "=a" (p[0]), "=DS" (p[1]), "=c" (p[2]), "=d" (p[3])
130 	    :  "0" (ax));
131 }
132 
133 static __inline void
134 cpuid_count(u_int ax, u_int cx, u_int *p)
135 {
136 	__asm __volatile(
137 	    "pushl\t%%ebx\n\t"
138 	    "cpuid\n\t"
139 	    "movl\t%%ebx,%1\n\t"
140 	    "popl\t%%ebx"
141 	    : "=a" (p[0]), "=DS" (p[1]), "=c" (p[2]), "=d" (p[3])
142 	    :  "0" (ax), "c" (cx));
143 }
144 #endif
145 
146 static __inline void
147 enable_intr(void)
148 {
149 	__asm __volatile("sti");
150 }
151 
152 static __inline void
153 cpu_monitor(const void *addr, u_long extensions, u_int hints)
154 {
155 	__asm __volatile("monitor"
156 	    : : "a" (addr), "c" (extensions), "d" (hints));
157 }
158 
159 static __inline void
160 cpu_mwait(u_long extensions, u_int hints)
161 {
162 	__asm __volatile("mwait" : : "a" (hints), "c" (extensions));
163 }
164 
165 static __inline void
166 lfence(void)
167 {
168 	__asm __volatile("lfence" : : : "memory");
169 }
170 
171 static __inline void
172 mfence(void)
173 {
174 	__asm __volatile("mfence" : : : "memory");
175 }
176 
177 static __inline void
178 sfence(void)
179 {
180 	__asm __volatile("sfence" : : : "memory");
181 }
182 
183 #ifdef _KERNEL
184 
185 #define	HAVE_INLINE_FFS
186 
187 static __inline __pure2 int
188 ffs(int mask)
189 {
190 	/*
191 	 * Note that gcc-2's builtin ffs would be used if we didn't declare
192 	 * this inline or turn off the builtin.  The builtin is faster but
193 	 * broken in gcc-2.4.5 and slower but working in gcc-2.5 and later
194 	 * versions.
195 	 */
196 	 return (mask == 0 ? mask : (int)bsfl((u_int)mask) + 1);
197 }
198 
199 #define	HAVE_INLINE_FFSL
200 
201 static __inline __pure2 int
202 ffsl(long mask)
203 {
204 	return (ffs((int)mask));
205 }
206 
207 #define	HAVE_INLINE_FLS
208 
209 static __inline __pure2 int
210 fls(int mask)
211 {
212 	return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
213 }
214 
215 #define	HAVE_INLINE_FLSL
216 
217 static __inline __pure2 int
218 flsl(long mask)
219 {
220 	return (fls((int)mask));
221 }
222 
223 #endif /* _KERNEL */
224 
225 static __inline void
226 halt(void)
227 {
228 	__asm __volatile("hlt");
229 }
230 
231 static __inline u_char
232 inb(u_int port)
233 {
234 	u_char	data;
235 
236 	__asm __volatile("inb %w1, %0" : "=a" (data) : "Nd" (port));
237 	return (data);
238 }
239 
240 static __inline u_int
241 inl(u_int port)
242 {
243 	u_int	data;
244 
245 	__asm __volatile("inl %w1, %0" : "=a" (data) : "Nd" (port));
246 	return (data);
247 }
248 
249 static __inline void
250 insb(u_int port, void *addr, size_t count)
251 {
252 	__asm __volatile("cld; rep; insb"
253 			 : "+D" (addr), "+c" (count)
254 			 : "d" (port)
255 			 : "memory");
256 }
257 
258 static __inline void
259 insw(u_int port, void *addr, size_t count)
260 {
261 	__asm __volatile("cld; rep; insw"
262 			 : "+D" (addr), "+c" (count)
263 			 : "d" (port)
264 			 : "memory");
265 }
266 
267 static __inline void
268 insl(u_int port, void *addr, size_t count)
269 {
270 	__asm __volatile("cld; rep; insl"
271 			 : "+D" (addr), "+c" (count)
272 			 : "d" (port)
273 			 : "memory");
274 }
275 
276 static __inline void
277 invd(void)
278 {
279 	__asm __volatile("invd");
280 }
281 
282 static __inline u_short
283 inw(u_int port)
284 {
285 	u_short	data;
286 
287 	__asm __volatile("inw %w1, %0" : "=a" (data) : "Nd" (port));
288 	return (data);
289 }
290 
291 static __inline void
292 outb(u_int port, u_char data)
293 {
294 	__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
295 }
296 
297 static __inline void
298 outl(u_int port, u_int data)
299 {
300 	__asm __volatile("outl %0, %w1" : : "a" (data), "Nd" (port));
301 }
302 
303 static __inline void
304 outsb(u_int port, const void *addr, size_t count)
305 {
306 	__asm __volatile("cld; rep; outsb"
307 			 : "+S" (addr), "+c" (count)
308 			 : "d" (port));
309 }
310 
311 static __inline void
312 outsw(u_int port, const void *addr, size_t count)
313 {
314 	__asm __volatile("cld; rep; outsw"
315 			 : "+S" (addr), "+c" (count)
316 			 : "d" (port));
317 }
318 
319 static __inline void
320 outsl(u_int port, const void *addr, size_t count)
321 {
322 	__asm __volatile("cld; rep; outsl"
323 			 : "+S" (addr), "+c" (count)
324 			 : "d" (port));
325 }
326 
327 static __inline void
328 outw(u_int port, u_short data)
329 {
330 	__asm __volatile("outw %0, %w1" : : "a" (data), "Nd" (port));
331 }
332 
333 static __inline void
334 ia32_pause(void)
335 {
336 	__asm __volatile("pause");
337 }
338 
339 static __inline u_int
340 read_eflags(void)
341 {
342 	u_int	ef;
343 
344 	__asm __volatile("pushfl; popl %0" : "=r" (ef));
345 	return (ef);
346 }
347 
348 static __inline uint64_t
349 rdmsr(u_int msr)
350 {
351 	uint64_t rv;
352 
353 	__asm __volatile("rdmsr" : "=A" (rv) : "c" (msr));
354 	return (rv);
355 }
356 
357 static __inline uint32_t
358 rdmsr32(u_int msr)
359 {
360 	uint32_t low;
361 
362 	__asm __volatile("rdmsr" : "=a" (low) : "c" (msr) : "edx");
363 	return (low);
364 }
365 
366 static __inline uint64_t
367 rdpmc(u_int pmc)
368 {
369 	uint64_t rv;
370 
371 	__asm __volatile("rdpmc" : "=A" (rv) : "c" (pmc));
372 	return (rv);
373 }
374 
375 static __inline uint64_t
376 rdtsc(void)
377 {
378 	uint64_t rv;
379 
380 	__asm __volatile("rdtsc" : "=A" (rv));
381 	return (rv);
382 }
383 
384 static __inline uint64_t
385 rdtsc_ordered_lfence(void)
386 {
387 	lfence();
388 	return (rdtsc());
389 }
390 
391 static __inline uint64_t
392 rdtsc_ordered_mfence(void)
393 {
394 	mfence();
395 	return (rdtsc());
396 }
397 
398 static __inline uint64_t
399 rdtscp(void)
400 {
401 	uint64_t rv;
402 
403 	__asm __volatile("rdtscp" : "=A" (rv) : : "ecx");
404 	return (rv);
405 }
406 
407 static __inline uint64_t
408 rdtscp_aux(uint32_t *aux)
409 {
410 	uint64_t rv;
411 
412 	__asm __volatile("rdtscp" : "=A" (rv), "=c" (*aux));
413 	return (rv);
414 }
415 
416 static __inline uint32_t
417 rdtsc32(void)
418 {
419 	uint32_t rv;
420 
421 	__asm __volatile("rdtsc" : "=a" (rv) : : "edx");
422 	return (rv);
423 }
424 
425 static __inline uint32_t
426 rdtscp32(void)
427 {
428 	uint32_t rv;
429 
430 	__asm __volatile("rdtscp" : "=a" (rv) : : "ecx", "edx");
431 	return (rv);
432 }
433 
434 static __inline void
435 wbinvd(void)
436 {
437 	__asm __volatile("wbinvd");
438 }
439 
440 static __inline void
441 write_eflags(u_int ef)
442 {
443 	__asm __volatile("pushl %0; popfl" : : "r" (ef));
444 }
445 
446 static __inline void
447 wrmsr(u_int msr, uint64_t newval)
448 {
449 	__asm __volatile("wrmsr" : : "A" (newval), "c" (msr));
450 }
451 
452 static __inline void
453 load_cr0(u_int data)
454 {
455 
456 	__asm __volatile("movl %0,%%cr0" : : "r" (data));
457 }
458 
459 static __inline u_int
460 rcr0(void)
461 {
462 	u_int	data;
463 
464 	__asm __volatile("movl %%cr0,%0" : "=r" (data));
465 	return (data);
466 }
467 
468 static __inline u_int
469 rcr2(void)
470 {
471 	u_int	data;
472 
473 	__asm __volatile("movl %%cr2,%0" : "=r" (data));
474 	return (data);
475 }
476 
477 static __inline void
478 load_cr3(u_int data)
479 {
480 
481 	__asm __volatile("movl %0,%%cr3" : : "r" (data) : "memory");
482 }
483 
484 static __inline u_int
485 rcr3(void)
486 {
487 	u_int	data;
488 
489 	__asm __volatile("movl %%cr3,%0" : "=r" (data));
490 	return (data);
491 }
492 
493 static __inline void
494 load_cr4(u_int data)
495 {
496 	__asm __volatile("movl %0,%%cr4" : : "r" (data));
497 }
498 
499 static __inline u_int
500 rcr4(void)
501 {
502 	u_int	data;
503 
504 	__asm __volatile("movl %%cr4,%0" : "=r" (data));
505 	return (data);
506 }
507 
508 static __inline uint64_t
509 rxcr(u_int reg)
510 {
511 	u_int low, high;
512 
513 	__asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (reg));
514 	return (low | ((uint64_t)high << 32));
515 }
516 
517 static __inline void
518 load_xcr(u_int reg, uint64_t val)
519 {
520 	u_int low, high;
521 
522 	low = val;
523 	high = val >> 32;
524 	__asm __volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high));
525 }
526 
527 /*
528  * Global TLB flush (except for thise for pages marked PG_G)
529  */
530 static __inline void
531 invltlb(void)
532 {
533 
534 	load_cr3(rcr3());
535 }
536 
537 /*
538  * TLB flush for an individual page (even if it has PG_G).
539  * Only works on 486+ CPUs (i386 does not have PG_G).
540  */
541 static __inline void
542 invlpg(u_int addr)
543 {
544 
545 	__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
546 }
547 
548 static __inline u_short
549 rfs(void)
550 {
551 	u_short sel;
552 	__asm __volatile("movw %%fs,%0" : "=rm" (sel));
553 	return (sel);
554 }
555 
556 static __inline uint64_t
557 rgdt(void)
558 {
559 	uint64_t gdtr;
560 	__asm __volatile("sgdt %0" : "=m" (gdtr));
561 	return (gdtr);
562 }
563 
564 static __inline u_short
565 rgs(void)
566 {
567 	u_short sel;
568 	__asm __volatile("movw %%gs,%0" : "=rm" (sel));
569 	return (sel);
570 }
571 
572 static __inline uint64_t
573 ridt(void)
574 {
575 	uint64_t idtr;
576 	__asm __volatile("sidt %0" : "=m" (idtr));
577 	return (idtr);
578 }
579 
580 static __inline u_short
581 rldt(void)
582 {
583 	u_short ldtr;
584 	__asm __volatile("sldt %0" : "=g" (ldtr));
585 	return (ldtr);
586 }
587 
588 static __inline u_short
589 rss(void)
590 {
591 	u_short sel;
592 	__asm __volatile("movw %%ss,%0" : "=rm" (sel));
593 	return (sel);
594 }
595 
596 static __inline u_short
597 rtr(void)
598 {
599 	u_short tr;
600 	__asm __volatile("str %0" : "=g" (tr));
601 	return (tr);
602 }
603 
604 static __inline void
605 load_fs(u_short sel)
606 {
607 	__asm __volatile("movw %0,%%fs" : : "rm" (sel));
608 }
609 
610 static __inline void
611 load_gs(u_short sel)
612 {
613 	__asm __volatile("movw %0,%%gs" : : "rm" (sel));
614 }
615 
616 static __inline void
617 lidt(struct region_descriptor *addr)
618 {
619 	__asm __volatile("lidt (%0)" : : "r" (addr));
620 }
621 
622 static __inline void
623 lldt(u_short sel)
624 {
625 	__asm __volatile("lldt %0" : : "r" (sel));
626 }
627 
628 static __inline void
629 ltr(u_short sel)
630 {
631 	__asm __volatile("ltr %0" : : "r" (sel));
632 }
633 
634 static __inline u_int
635 rdr0(void)
636 {
637 	u_int	data;
638 	__asm __volatile("movl %%dr0,%0" : "=r" (data));
639 	return (data);
640 }
641 
642 static __inline void
643 load_dr0(u_int dr0)
644 {
645 	__asm __volatile("movl %0,%%dr0" : : "r" (dr0));
646 }
647 
648 static __inline u_int
649 rdr1(void)
650 {
651 	u_int	data;
652 	__asm __volatile("movl %%dr1,%0" : "=r" (data));
653 	return (data);
654 }
655 
656 static __inline void
657 load_dr1(u_int dr1)
658 {
659 	__asm __volatile("movl %0,%%dr1" : : "r" (dr1));
660 }
661 
662 static __inline u_int
663 rdr2(void)
664 {
665 	u_int	data;
666 	__asm __volatile("movl %%dr2,%0" : "=r" (data));
667 	return (data);
668 }
669 
670 static __inline void
671 load_dr2(u_int dr2)
672 {
673 	__asm __volatile("movl %0,%%dr2" : : "r" (dr2));
674 }
675 
676 static __inline u_int
677 rdr3(void)
678 {
679 	u_int	data;
680 	__asm __volatile("movl %%dr3,%0" : "=r" (data));
681 	return (data);
682 }
683 
684 static __inline void
685 load_dr3(u_int dr3)
686 {
687 	__asm __volatile("movl %0,%%dr3" : : "r" (dr3));
688 }
689 
690 static __inline u_int
691 rdr6(void)
692 {
693 	u_int	data;
694 	__asm __volatile("movl %%dr6,%0" : "=r" (data));
695 	return (data);
696 }
697 
698 static __inline void
699 load_dr6(u_int dr6)
700 {
701 	__asm __volatile("movl %0,%%dr6" : : "r" (dr6));
702 }
703 
704 static __inline u_int
705 rdr7(void)
706 {
707 	u_int	data;
708 	__asm __volatile("movl %%dr7,%0" : "=r" (data));
709 	return (data);
710 }
711 
712 static __inline void
713 load_dr7(u_int dr7)
714 {
715 	__asm __volatile("movl %0,%%dr7" : : "r" (dr7));
716 }
717 
718 static __inline u_char
719 read_cyrix_reg(u_char reg)
720 {
721 	outb(0x22, reg);
722 	return inb(0x23);
723 }
724 
725 static __inline void
726 write_cyrix_reg(u_char reg, u_char data)
727 {
728 	outb(0x22, reg);
729 	outb(0x23, data);
730 }
731 
732 static __inline register_t
733 intr_disable(void)
734 {
735 	register_t eflags;
736 
737 	eflags = read_eflags();
738 	disable_intr();
739 	return (eflags);
740 }
741 
742 static __inline void
743 intr_restore(register_t eflags)
744 {
745 	write_eflags(eflags);
746 }
747 
748 static __inline uint32_t
749 rdpkru(void)
750 {
751 	uint32_t res;
752 
753 	__asm __volatile("rdpkru" :  "=a" (res) : "c" (0) : "edx");
754 	return (res);
755 }
756 
757 static __inline void
758 wrpkru(uint32_t mask)
759 {
760 
761 	__asm __volatile("wrpkru" :  : "a" (mask),  "c" (0), "d" (0));
762 }
763 
764 void    reset_dbregs(void);
765 
766 #ifdef _KERNEL
767 int	rdmsr_safe(u_int msr, uint64_t *val);
768 int	wrmsr_safe(u_int msr, uint64_t newval);
769 #endif
770 
771 #endif /* !_MACHINE_CPUFUNC_H_ */
772