xref: /freebsd/sys/amd64/amd64/initcpu.c (revision 729362425c09cf6b362366aabc6fb547eee8035a)
1 /*
2  * Copyright (c) KATO Takenori, 1997, 1998.
3  *
4  * All rights reserved.  Unpublished rights reserved under the copyright
5  * laws of Japan.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer as
13  *    the first lines of this file unmodified.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 #include "opt_cpu.h"
33 
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/systm.h>
37 #include <sys/sysctl.h>
38 
39 #include <machine/cputypes.h>
40 #include <machine/md_var.h>
41 #include <machine/specialreg.h>
42 
43 #if !defined(CPU_ENABLE_SSE) && defined(I686_CPU)
44 #define CPU_ENABLE_SSE
45 #endif
46 #if defined(CPU_DISABLE_SSE)
47 #undef CPU_ENABLE_SSE
48 #endif
49 
50 void initializecpu(void);
51 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
52 void	enable_K5_wt_alloc(void);
53 void	enable_K6_wt_alloc(void);
54 void	enable_K6_2_wt_alloc(void);
55 #endif
56 
57 #ifdef I486_CPU
58 static void init_5x86(void);
59 static void init_bluelightning(void);
60 static void init_486dlc(void);
61 static void init_cy486dx(void);
62 #ifdef CPU_I486_ON_386
63 static void init_i486_on_386(void);
64 #endif
65 static void init_6x86(void);
66 #endif /* I486_CPU */
67 
68 #ifdef I686_CPU
69 static void	init_6x86MX(void);
70 static void	init_ppro(void);
71 static void	init_mendocino(void);
72 #endif
73 
74 static int	hw_instruction_sse;
75 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
76     &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
77 
78 /* Must *NOT* be BSS or locore will bzero these after setting them */
79 int	cpu = 0;		/* Are we 386, 386sx, 486, etc? */
80 u_int	cpu_feature = 0;	/* Feature flags */
81 u_int	cpu_high = 0;		/* Highest arg to CPUID */
82 u_int	cpu_id = 0;		/* Stepping ID */
83 u_int	cpu_procinfo = 0;	/* HyperThreading Info / Brand Index / CLFUSH */
84 char	cpu_vendor[20] = "";	/* CPU Origin code */
85 
86 #ifdef CPU_ENABLE_SSE
87 u_int	cpu_fxsr;		/* SSE enabled */
88 #endif
89 
90 #ifdef I486_CPU
91 /*
92  * IBM Blue Lightning
93  */
94 static void
95 init_bluelightning(void)
96 {
97 	u_long	eflags;
98 
99 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
100 	need_post_dma_flush = 1;
101 #endif
102 
103 	eflags = read_eflags();
104 	disable_intr();
105 
106 	load_cr0(rcr0() | CR0_CD | CR0_NW);
107 	invd();
108 
109 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
110 	wrmsr(0x1000, 0x9c92LL);	/* FP operand can be cacheable on Cyrix FPU */
111 #else
112 	wrmsr(0x1000, 0x1c92LL);	/* Intel FPU */
113 #endif
114 	/* Enables 13MB and 0-640KB cache. */
115 	wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
116 #ifdef CPU_BLUELIGHTNING_3X
117 	wrmsr(0x1002, 0x04000000LL);	/* Enables triple-clock mode. */
118 #else
119 	wrmsr(0x1002, 0x03000000LL);	/* Enables double-clock mode. */
120 #endif
121 
122 	/* Enable caching in CR0. */
123 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
124 	invd();
125 	write_eflags(eflags);
126 }
127 
128 /*
129  * Cyrix 486SLC/DLC/SR/DR series
130  */
131 static void
132 init_486dlc(void)
133 {
134 	u_long	eflags;
135 	u_char	ccr0;
136 
137 	eflags = read_eflags();
138 	disable_intr();
139 	invd();
140 
141 	ccr0 = read_cyrix_reg(CCR0);
142 #ifndef CYRIX_CACHE_WORKS
143 	ccr0 |= CCR0_NC1 | CCR0_BARB;
144 	write_cyrix_reg(CCR0, ccr0);
145 	invd();
146 #else
147 	ccr0 &= ~CCR0_NC0;
148 #ifndef CYRIX_CACHE_REALLY_WORKS
149 	ccr0 |= CCR0_NC1 | CCR0_BARB;
150 #else
151 	ccr0 |= CCR0_NC1;
152 #endif
153 #ifdef CPU_DIRECT_MAPPED_CACHE
154 	ccr0 |= CCR0_CO;			/* Direct mapped mode. */
155 #endif
156 	write_cyrix_reg(CCR0, ccr0);
157 
158 	/* Clear non-cacheable region. */
159 	write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
160 	write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
161 	write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
162 	write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
163 
164 	write_cyrix_reg(0, 0);	/* dummy write */
165 
166 	/* Enable caching in CR0. */
167 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
168 	invd();
169 #endif /* !CYRIX_CACHE_WORKS */
170 	write_eflags(eflags);
171 }
172 
173 
174 /*
175  * Cyrix 486S/DX series
176  */
177 static void
178 init_cy486dx(void)
179 {
180 	u_long	eflags;
181 	u_char	ccr2;
182 
183 	eflags = read_eflags();
184 	disable_intr();
185 	invd();
186 
187 	ccr2 = read_cyrix_reg(CCR2);
188 #ifdef CPU_SUSP_HLT
189 	ccr2 |= CCR2_SUSP_HLT;
190 #endif
191 
192 #ifdef PC98
193 	/* Enables WB cache interface pin and Lock NW bit in CR0. */
194 	ccr2 |= CCR2_WB | CCR2_LOCK_NW;
195 	/* Unlock NW bit in CR0. */
196 	write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW);
197 	load_cr0((rcr0() & ~CR0_CD) | CR0_NW);	/* CD = 0, NW = 1 */
198 #endif
199 
200 	write_cyrix_reg(CCR2, ccr2);
201 	write_eflags(eflags);
202 }
203 
204 
205 /*
206  * Cyrix 5x86
207  */
208 static void
209 init_5x86(void)
210 {
211 	u_long	eflags;
212 	u_char	ccr2, ccr3, ccr4, pcr0;
213 
214 	eflags = read_eflags();
215 	disable_intr();
216 
217 	load_cr0(rcr0() | CR0_CD | CR0_NW);
218 	wbinvd();
219 
220 	(void)read_cyrix_reg(CCR3);		/* dummy */
221 
222 	/* Initialize CCR2. */
223 	ccr2 = read_cyrix_reg(CCR2);
224 	ccr2 |= CCR2_WB;
225 #ifdef CPU_SUSP_HLT
226 	ccr2 |= CCR2_SUSP_HLT;
227 #else
228 	ccr2 &= ~CCR2_SUSP_HLT;
229 #endif
230 	ccr2 |= CCR2_WT1;
231 	write_cyrix_reg(CCR2, ccr2);
232 
233 	/* Initialize CCR4. */
234 	ccr3 = read_cyrix_reg(CCR3);
235 	write_cyrix_reg(CCR3, CCR3_MAPEN0);
236 
237 	ccr4 = read_cyrix_reg(CCR4);
238 	ccr4 |= CCR4_DTE;
239 	ccr4 |= CCR4_MEM;
240 #ifdef CPU_FASTER_5X86_FPU
241 	ccr4 |= CCR4_FASTFPE;
242 #else
243 	ccr4 &= ~CCR4_FASTFPE;
244 #endif
245 	ccr4 &= ~CCR4_IOMASK;
246 	/********************************************************************
247 	 * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
248 	 * should be 0 for errata fix.
249 	 ********************************************************************/
250 #ifdef CPU_IORT
251 	ccr4 |= CPU_IORT & CCR4_IOMASK;
252 #endif
253 	write_cyrix_reg(CCR4, ccr4);
254 
255 	/* Initialize PCR0. */
256 	/****************************************************************
257 	 * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
258 	 * BTB_EN might make your system unstable.
259 	 ****************************************************************/
260 	pcr0 = read_cyrix_reg(PCR0);
261 #ifdef CPU_RSTK_EN
262 	pcr0 |= PCR0_RSTK;
263 #else
264 	pcr0 &= ~PCR0_RSTK;
265 #endif
266 #ifdef CPU_BTB_EN
267 	pcr0 |= PCR0_BTB;
268 #else
269 	pcr0 &= ~PCR0_BTB;
270 #endif
271 #ifdef CPU_LOOP_EN
272 	pcr0 |= PCR0_LOOP;
273 #else
274 	pcr0 &= ~PCR0_LOOP;
275 #endif
276 
277 	/****************************************************************
278 	 * WARNING: if you use a memory mapped I/O device, don't use
279 	 * DISABLE_5X86_LSSER option, which may reorder memory mapped
280 	 * I/O access.
281 	 * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
282 	 ****************************************************************/
283 #ifdef CPU_DISABLE_5X86_LSSER
284 	pcr0 &= ~PCR0_LSSER;
285 #else
286 	pcr0 |= PCR0_LSSER;
287 #endif
288 	write_cyrix_reg(PCR0, pcr0);
289 
290 	/* Restore CCR3. */
291 	write_cyrix_reg(CCR3, ccr3);
292 
293 	(void)read_cyrix_reg(0x80);		/* dummy */
294 
295 	/* Unlock NW bit in CR0. */
296 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
297 	load_cr0((rcr0() & ~CR0_CD) | CR0_NW);	/* CD = 0, NW = 1 */
298 	/* Lock NW bit in CR0. */
299 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
300 
301 	write_eflags(eflags);
302 }
303 
304 #ifdef CPU_I486_ON_386
305 /*
306  * There are i486 based upgrade products for i386 machines.
307  * In this case, BIOS doesn't enables CPU cache.
308  */
309 static void
310 init_i486_on_386(void)
311 {
312 	u_long	eflags;
313 
314 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
315 	need_post_dma_flush = 1;
316 #endif
317 
318 	eflags = read_eflags();
319 	disable_intr();
320 
321 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0, NW = 0 */
322 
323 	write_eflags(eflags);
324 }
325 #endif
326 
327 /*
328  * Cyrix 6x86
329  *
330  * XXX - What should I do here?  Please let me know.
331  */
332 static void
333 init_6x86(void)
334 {
335 	u_long	eflags;
336 	u_char	ccr3, ccr4;
337 
338 	eflags = read_eflags();
339 	disable_intr();
340 
341 	load_cr0(rcr0() | CR0_CD | CR0_NW);
342 	wbinvd();
343 
344 	/* Initialize CCR0. */
345 	write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
346 
347 	/* Initialize CCR1. */
348 #ifdef CPU_CYRIX_NO_LOCK
349 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
350 #else
351 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
352 #endif
353 
354 	/* Initialize CCR2. */
355 #ifdef CPU_SUSP_HLT
356 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
357 #else
358 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
359 #endif
360 
361 	ccr3 = read_cyrix_reg(CCR3);
362 	write_cyrix_reg(CCR3, CCR3_MAPEN0);
363 
364 	/* Initialize CCR4. */
365 	ccr4 = read_cyrix_reg(CCR4);
366 	ccr4 |= CCR4_DTE;
367 	ccr4 &= ~CCR4_IOMASK;
368 #ifdef CPU_IORT
369 	write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
370 #else
371 	write_cyrix_reg(CCR4, ccr4 | 7);
372 #endif
373 
374 	/* Initialize CCR5. */
375 #ifdef CPU_WT_ALLOC
376 	write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
377 #endif
378 
379 	/* Restore CCR3. */
380 	write_cyrix_reg(CCR3, ccr3);
381 
382 	/* Unlock NW bit in CR0. */
383 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
384 
385 	/*
386 	 * Earlier revision of the 6x86 CPU could crash the system if
387 	 * L1 cache is in write-back mode.
388 	 */
389 	if ((cyrix_did & 0xff00) > 0x1600)
390 		load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
391 	else {
392 		/* Revision 2.6 and lower. */
393 #ifdef CYRIX_CACHE_REALLY_WORKS
394 		load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
395 #else
396 		load_cr0((rcr0() & ~CR0_CD) | CR0_NW);	/* CD = 0 and NW = 1 */
397 #endif
398 	}
399 
400 	/* Lock NW bit in CR0. */
401 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
402 
403 	write_eflags(eflags);
404 }
405 #endif /* I486_CPU */
406 
407 #ifdef I686_CPU
408 /*
409  * Cyrix 6x86MX (code-named M2)
410  *
411  * XXX - What should I do here?  Please let me know.
412  */
413 static void
414 init_6x86MX(void)
415 {
416 	u_long	eflags;
417 	u_char	ccr3, ccr4;
418 
419 	eflags = read_eflags();
420 	disable_intr();
421 
422 	load_cr0(rcr0() | CR0_CD | CR0_NW);
423 	wbinvd();
424 
425 	/* Initialize CCR0. */
426 	write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
427 
428 	/* Initialize CCR1. */
429 #ifdef CPU_CYRIX_NO_LOCK
430 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
431 #else
432 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
433 #endif
434 
435 	/* Initialize CCR2. */
436 #ifdef CPU_SUSP_HLT
437 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
438 #else
439 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
440 #endif
441 
442 	ccr3 = read_cyrix_reg(CCR3);
443 	write_cyrix_reg(CCR3, CCR3_MAPEN0);
444 
445 	/* Initialize CCR4. */
446 	ccr4 = read_cyrix_reg(CCR4);
447 	ccr4 &= ~CCR4_IOMASK;
448 #ifdef CPU_IORT
449 	write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
450 #else
451 	write_cyrix_reg(CCR4, ccr4 | 7);
452 #endif
453 
454 	/* Initialize CCR5. */
455 #ifdef CPU_WT_ALLOC
456 	write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
457 #endif
458 
459 	/* Restore CCR3. */
460 	write_cyrix_reg(CCR3, ccr3);
461 
462 	/* Unlock NW bit in CR0. */
463 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
464 
465 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
466 
467 	/* Lock NW bit in CR0. */
468 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
469 
470 	write_eflags(eflags);
471 }
472 
473 static void
474 init_ppro(void)
475 {
476 #ifndef SMP
477 	u_int64_t	apicbase;
478 
479 	/*
480 	 * Local APIC should be diabled in UP kernel.
481 	 */
482 	apicbase = rdmsr(0x1b);
483 	apicbase &= ~0x800LL;
484 	wrmsr(0x1b, apicbase);
485 #endif
486 }
487 
488 /*
489  * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
490  * L2 cache).
491  */
492 static void
493 init_mendocino(void)
494 {
495 #ifdef CPU_PPRO2CELERON
496 	u_long	eflags;
497 	u_int64_t	bbl_cr_ctl3;
498 
499 	eflags = read_eflags();
500 	disable_intr();
501 
502 	load_cr0(rcr0() | CR0_CD | CR0_NW);
503 	wbinvd();
504 
505 	bbl_cr_ctl3 = rdmsr(0x11e);
506 
507 	/* If the L2 cache is configured, do nothing. */
508 	if (!(bbl_cr_ctl3 & 1)) {
509 		bbl_cr_ctl3 = 0x134052bLL;
510 
511 		/* Set L2 Cache Latency (Default: 5). */
512 #ifdef	CPU_CELERON_L2_LATENCY
513 #if CPU_L2_LATENCY > 15
514 #error invalid CPU_L2_LATENCY.
515 #endif
516 		bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
517 #else
518 		bbl_cr_ctl3 |= 5 << 1;
519 #endif
520 		wrmsr(0x11e, bbl_cr_ctl3);
521 	}
522 
523 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
524 	write_eflags(eflags);
525 #endif /* CPU_PPRO2CELERON */
526 }
527 
528 #endif /* I686_CPU */
529 
530 /*
531  * Initialize CR4 (Control register 4) to enable SSE instructions.
532  */
533 void
534 enable_sse(void)
535 {
536 #if defined(CPU_ENABLE_SSE)
537 	if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
538 		load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
539 		cpu_fxsr = hw_instruction_sse = 1;
540 	}
541 #endif
542 }
543 
544 void
545 initializecpu(void)
546 {
547 
548 	switch (cpu) {
549 #ifdef I486_CPU
550 	case CPU_BLUE:
551 		init_bluelightning();
552 		break;
553 	case CPU_486DLC:
554 		init_486dlc();
555 		break;
556 	case CPU_CY486DX:
557 		init_cy486dx();
558 		break;
559 	case CPU_M1SC:
560 		init_5x86();
561 		break;
562 #ifdef CPU_I486_ON_386
563 	case CPU_486:
564 		init_i486_on_386();
565 		break;
566 #endif
567 	case CPU_M1:
568 		init_6x86();
569 		break;
570 #endif /* I486_CPU */
571 #ifdef I686_CPU
572 	case CPU_M2:
573 		init_6x86MX();
574 		break;
575 	case CPU_686:
576 		if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
577 			switch (cpu_id & 0xff0) {
578 			case 0x610:
579 				init_ppro();
580 				break;
581 			case 0x660:
582 				init_mendocino();
583 				break;
584 			}
585 		} else if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
586 #if defined(I686_CPU) && defined(CPU_ATHLON_SSE_HACK)
587 			/*
588 			 * Sometimes the BIOS doesn't enable SSE instructions.
589 			 * According to AMD document 20734, the mobile
590 			 * Duron, the (mobile) Athlon 4 and the Athlon MP
591 			 * support SSE. These correspond to cpu_id 0x66X
592 			 * or 0x67X.
593 			 */
594 			if ((cpu_feature & CPUID_XMM) == 0 &&
595 			    ((cpu_id & ~0xf) == 0x660 ||
596 			     (cpu_id & ~0xf) == 0x670 ||
597 			     (cpu_id & ~0xf) == 0x680)) {
598 				u_int regs[4];
599 				wrmsr(0xC0010015, rdmsr(0xC0010015) & ~0x08000);
600 				do_cpuid(1, regs);
601 				cpu_feature = regs[3];
602 			}
603 #endif
604 		}
605 		break;
606 #endif
607 	default:
608 		break;
609 	}
610 	enable_sse();
611 
612 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
613 	/*
614 	 * OS should flush L1 cache by itself because no PC-98 supports
615 	 * non-Intel CPUs.  Use wbinvd instruction before DMA transfer
616 	 * when need_pre_dma_flush = 1, use invd instruction after DMA
617 	 * transfer when need_post_dma_flush = 1.  If your CPU upgrade
618 	 * product supports hardware cache control, you can add the
619 	 * CPU_UPGRADE_HW_CACHE option in your kernel configuration file.
620 	 * This option eliminates unneeded cache flush instruction(s).
621 	 */
622 	if (strcmp(cpu_vendor, "CyrixInstead") == 0) {
623 		switch (cpu) {
624 #ifdef I486_CPU
625 		case CPU_486DLC:
626 			need_post_dma_flush = 1;
627 			break;
628 		case CPU_M1SC:
629 			need_pre_dma_flush = 1;
630 			break;
631 		case CPU_CY486DX:
632 			need_pre_dma_flush = 1;
633 #ifdef CPU_I486_ON_386
634 			need_post_dma_flush = 1;
635 #endif
636 			break;
637 #endif
638 		default:
639 			break;
640 		}
641 	} else if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
642 		switch (cpu_id & 0xFF0) {
643 		case 0x470:		/* Enhanced Am486DX2 WB */
644 		case 0x490:		/* Enhanced Am486DX4 WB */
645 		case 0x4F0:		/* Am5x86 WB */
646 			need_pre_dma_flush = 1;
647 			break;
648 		}
649 	} else if (strcmp(cpu_vendor, "IBM") == 0) {
650 		need_post_dma_flush = 1;
651 	} else {
652 #ifdef CPU_I486_ON_386
653 		need_pre_dma_flush = 1;
654 #endif
655 	}
656 #endif /* PC98 && !CPU_UPGRADE_HW_CACHE */
657 }
658 
659 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
660 /*
661  * Enable write allocate feature of AMD processors.
662  * Following two functions require the Maxmem variable being set.
663  */
664 void
665 enable_K5_wt_alloc(void)
666 {
667 	u_int64_t	msr;
668 	register_t	savecrit;
669 
670 	/*
671 	 * Write allocate is supported only on models 1, 2, and 3, with
672 	 * a stepping of 4 or greater.
673 	 */
674 	if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
675 		savecrit = intr_disable();
676 		msr = rdmsr(0x83);		/* HWCR */
677 		wrmsr(0x83, msr & !(0x10));
678 
679 		/*
680 		 * We have to tell the chip where the top of memory is,
681 		 * since video cards could have frame bufferes there,
682 		 * memory-mapped I/O could be there, etc.
683 		 */
684 		if(Maxmem > 0)
685 		  msr = Maxmem / 16;
686 		else
687 		  msr = 0;
688 		msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
689 #ifdef PC98
690 		if (!(inb(0x43b) & 4)) {
691 			wrmsr(0x86, 0x0ff00f0);
692 			msr |= AMD_WT_ALLOC_PRE;
693 		}
694 #else
695 		/*
696 		 * There is no way to know wheter 15-16M hole exists or not.
697 		 * Therefore, we disable write allocate for this range.
698 		 */
699 			wrmsr(0x86, 0x0ff00f0);
700 			msr |= AMD_WT_ALLOC_PRE;
701 #endif
702 		wrmsr(0x85, msr);
703 
704 		msr=rdmsr(0x83);
705 		wrmsr(0x83, msr|0x10); /* enable write allocate */
706 		intr_restore(savecrit);
707 	}
708 }
709 
710 void
711 enable_K6_wt_alloc(void)
712 {
713 	quad_t	size;
714 	u_int64_t	whcr;
715 	u_long	eflags;
716 
717 	eflags = read_eflags();
718 	disable_intr();
719 	wbinvd();
720 
721 #ifdef CPU_DISABLE_CACHE
722 	/*
723 	 * Certain K6-2 box becomes unstable when write allocation is
724 	 * enabled.
725 	 */
726 	/*
727 	 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
728 	 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
729 	 * All other bits in TR12 have no effect on the processer's operation.
730 	 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
731 	 * on the AMD-K6.
732 	 */
733 	wrmsr(0x0000000e, (u_int64_t)0x0008);
734 #endif
735 	/* Don't assume that memory size is aligned with 4M. */
736 	if (Maxmem > 0)
737 	  size = ((Maxmem >> 8) + 3) >> 2;
738 	else
739 	  size = 0;
740 
741 	/* Limit is 508M bytes. */
742 	if (size > 0x7f)
743 		size = 0x7f;
744 	whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
745 
746 #if defined(PC98) || defined(NO_MEMORY_HOLE)
747 	if (whcr & (0x7fLL << 1)) {
748 #ifdef PC98
749 		/*
750 		 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
751 		 * 15-16M range.
752 		 */
753 		if (!(inb(0x43b) & 4))
754 			whcr &= ~0x0001LL;
755 		else
756 #endif
757 			whcr |=  0x0001LL;
758 	}
759 #else
760 	/*
761 	 * There is no way to know wheter 15-16M hole exists or not.
762 	 * Therefore, we disable write allocate for this range.
763 	 */
764 	whcr &= ~0x0001LL;
765 #endif
766 	wrmsr(0x0c0000082, whcr);
767 
768 	write_eflags(eflags);
769 }
770 
771 void
772 enable_K6_2_wt_alloc(void)
773 {
774 	quad_t	size;
775 	u_int64_t	whcr;
776 	u_long	eflags;
777 
778 	eflags = read_eflags();
779 	disable_intr();
780 	wbinvd();
781 
782 #ifdef CPU_DISABLE_CACHE
783 	/*
784 	 * Certain K6-2 box becomes unstable when write allocation is
785 	 * enabled.
786 	 */
787 	/*
788 	 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
789 	 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
790 	 * All other bits in TR12 have no effect on the processer's operation.
791 	 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
792 	 * on the AMD-K6.
793 	 */
794 	wrmsr(0x0000000e, (u_int64_t)0x0008);
795 #endif
796 	/* Don't assume that memory size is aligned with 4M. */
797 	if (Maxmem > 0)
798 	  size = ((Maxmem >> 8) + 3) >> 2;
799 	else
800 	  size = 0;
801 
802 	/* Limit is 4092M bytes. */
803 	if (size > 0x3fff)
804 		size = 0x3ff;
805 	whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
806 
807 #if defined(PC98) || defined(NO_MEMORY_HOLE)
808 	if (whcr & (0x3ffLL << 22)) {
809 #ifdef PC98
810 		/*
811 		 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
812 		 * 15-16M range.
813 		 */
814 		if (!(inb(0x43b) & 4))
815 			whcr &= ~(1LL << 16);
816 		else
817 #endif
818 			whcr |=  1LL << 16;
819 	}
820 #else
821 	/*
822 	 * There is no way to know wheter 15-16M hole exists or not.
823 	 * Therefore, we disable write allocate for this range.
824 	 */
825 	whcr &= ~(1LL << 16);
826 #endif
827 	wrmsr(0x0c0000082, whcr);
828 
829 	write_eflags(eflags);
830 }
831 #endif /* I585_CPU && CPU_WT_ALLOC */
832 
833 #include "opt_ddb.h"
834 #ifdef DDB
835 #include <ddb/ddb.h>
836 
837 DB_SHOW_COMMAND(cyrixreg, cyrixreg)
838 {
839 	u_long	eflags;
840 	u_int	cr0;
841 	u_char	ccr1, ccr2, ccr3;
842 	u_char	ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
843 
844 	cr0 = rcr0();
845 	if (strcmp(cpu_vendor,"CyrixInstead") == 0) {
846 		eflags = read_eflags();
847 		disable_intr();
848 
849 
850 		if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
851 			ccr0 = read_cyrix_reg(CCR0);
852 		}
853 		ccr1 = read_cyrix_reg(CCR1);
854 		ccr2 = read_cyrix_reg(CCR2);
855 		ccr3 = read_cyrix_reg(CCR3);
856 		if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
857 			write_cyrix_reg(CCR3, CCR3_MAPEN0);
858 			ccr4 = read_cyrix_reg(CCR4);
859 			if ((cpu == CPU_M1) || (cpu == CPU_M2))
860 				ccr5 = read_cyrix_reg(CCR5);
861 			else
862 				pcr0 = read_cyrix_reg(PCR0);
863 			write_cyrix_reg(CCR3, ccr3);		/* Restore CCR3. */
864 		}
865 		write_eflags(eflags);
866 
867 		if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
868 			printf("CCR0=%x, ", (u_int)ccr0);
869 
870 		printf("CCR1=%x, CCR2=%x, CCR3=%x",
871 			(u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
872 		if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
873 			printf(", CCR4=%x, ", (u_int)ccr4);
874 			if (cpu == CPU_M1SC)
875 				printf("PCR0=%x\n", pcr0);
876 			else
877 				printf("CCR5=%x\n", ccr5);
878 		}
879 	}
880 	printf("CR0=%x\n", cr0);
881 }
882 #endif /* DDB */
883