xref: /freebsd/sys/amd64/amd64/initcpu.c (revision b52b9d56d4e96089873a75f9e29062eec19fabba)
1 /*
2  * Copyright (c) KATO Takenori, 1997, 1998.
3  *
4  * All rights reserved.  Unpublished rights reserved under the copyright
5  * laws of Japan.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer as
13  *    the first lines of this file unmodified.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 #include "opt_cpu.h"
33 
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/systm.h>
37 #include <sys/sysctl.h>
38 
39 #include <machine/cputypes.h>
40 #include <machine/md_var.h>
41 #include <machine/specialreg.h>
42 
43 void initializecpu(void);
44 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
45 void	enable_K5_wt_alloc(void);
46 void	enable_K6_wt_alloc(void);
47 void	enable_K6_2_wt_alloc(void);
48 #endif
49 
50 #ifdef I486_CPU
51 static void init_5x86(void);
52 static void init_bluelightning(void);
53 static void init_486dlc(void);
54 static void init_cy486dx(void);
55 #ifdef CPU_I486_ON_386
56 static void init_i486_on_386(void);
57 #endif
58 static void init_6x86(void);
59 #endif /* I486_CPU */
60 
61 #ifdef I686_CPU
62 static void	init_6x86MX(void);
63 static void	init_ppro(void);
64 static void	init_mendocino(void);
65 #endif
66 void	enable_sse(void);
67 
68 int	hw_instruction_sse = 0;
69 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
70 	   &hw_instruction_sse, 0,
71 	   "SIMD/MMX2 instructions available in CPU");
72 
73 /* Must *NOT* be BSS or locore will bzero these after setting them */
74 int	cpu = 0;		/* Are we 386, 386sx, 486, etc? */
75 u_int	cpu_id = 0;		/* Stepping ID */
76 u_int	cpu_feature = 0;	/* Feature flags */
77 u_int	cpu_high = 0;		/* Highest arg to CPUID */
78 #ifdef CPU_ENABLE_SSE
79 u_int	cpu_fxsr = 0;		/* SSE enabled */
80 #endif
81 char	cpu_vendor[20] = "";	/* CPU Origin code */
82 
83 #ifdef I486_CPU
84 /*
85  * IBM Blue Lightning
86  */
87 static void
88 init_bluelightning(void)
89 {
90 	u_long	eflags;
91 
92 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
93 	need_post_dma_flush = 1;
94 #endif
95 
96 	eflags = read_eflags();
97 	disable_intr();
98 
99 	load_cr0(rcr0() | CR0_CD | CR0_NW);
100 	invd();
101 
102 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
103 	wrmsr(0x1000, 0x9c92LL);	/* FP operand can be cacheable on Cyrix FPU */
104 #else
105 	wrmsr(0x1000, 0x1c92LL);	/* Intel FPU */
106 #endif
107 	/* Enables 13MB and 0-640KB cache. */
108 	wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
109 #ifdef CPU_BLUELIGHTNING_3X
110 	wrmsr(0x1002, 0x04000000LL);	/* Enables triple-clock mode. */
111 #else
112 	wrmsr(0x1002, 0x03000000LL);	/* Enables double-clock mode. */
113 #endif
114 
115 	/* Enable caching in CR0. */
116 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
117 	invd();
118 	write_eflags(eflags);
119 }
120 
121 /*
122  * Cyrix 486SLC/DLC/SR/DR series
123  */
124 static void
125 init_486dlc(void)
126 {
127 	u_long	eflags;
128 	u_char	ccr0;
129 
130 	eflags = read_eflags();
131 	disable_intr();
132 	invd();
133 
134 	ccr0 = read_cyrix_reg(CCR0);
135 #ifndef CYRIX_CACHE_WORKS
136 	ccr0 |= CCR0_NC1 | CCR0_BARB;
137 	write_cyrix_reg(CCR0, ccr0);
138 	invd();
139 #else
140 	ccr0 &= ~CCR0_NC0;
141 #ifndef CYRIX_CACHE_REALLY_WORKS
142 	ccr0 |= CCR0_NC1 | CCR0_BARB;
143 #else
144 	ccr0 |= CCR0_NC1;
145 #endif
146 #ifdef CPU_DIRECT_MAPPED_CACHE
147 	ccr0 |= CCR0_CO;			/* Direct mapped mode. */
148 #endif
149 	write_cyrix_reg(CCR0, ccr0);
150 
151 	/* Clear non-cacheable region. */
152 	write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
153 	write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
154 	write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
155 	write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
156 
157 	write_cyrix_reg(0, 0);	/* dummy write */
158 
159 	/* Enable caching in CR0. */
160 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
161 	invd();
162 #endif /* !CYRIX_CACHE_WORKS */
163 	write_eflags(eflags);
164 }
165 
166 
167 /*
168  * Cyrix 486S/DX series
169  */
170 static void
171 init_cy486dx(void)
172 {
173 	u_long	eflags;
174 	u_char	ccr2;
175 
176 	eflags = read_eflags();
177 	disable_intr();
178 	invd();
179 
180 	ccr2 = read_cyrix_reg(CCR2);
181 #ifdef CPU_SUSP_HLT
182 	ccr2 |= CCR2_SUSP_HLT;
183 #endif
184 
185 #ifdef PC98
186 	/* Enables WB cache interface pin and Lock NW bit in CR0. */
187 	ccr2 |= CCR2_WB | CCR2_LOCK_NW;
188 	/* Unlock NW bit in CR0. */
189 	write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW);
190 	load_cr0((rcr0() & ~CR0_CD) | CR0_NW);	/* CD = 0, NW = 1 */
191 #endif
192 
193 	write_cyrix_reg(CCR2, ccr2);
194 	write_eflags(eflags);
195 }
196 
197 
198 /*
199  * Cyrix 5x86
200  */
201 static void
202 init_5x86(void)
203 {
204 	u_long	eflags;
205 	u_char	ccr2, ccr3, ccr4, pcr0;
206 
207 	eflags = read_eflags();
208 	disable_intr();
209 
210 	load_cr0(rcr0() | CR0_CD | CR0_NW);
211 	wbinvd();
212 
213 	(void)read_cyrix_reg(CCR3);		/* dummy */
214 
215 	/* Initialize CCR2. */
216 	ccr2 = read_cyrix_reg(CCR2);
217 	ccr2 |= CCR2_WB;
218 #ifdef CPU_SUSP_HLT
219 	ccr2 |= CCR2_SUSP_HLT;
220 #else
221 	ccr2 &= ~CCR2_SUSP_HLT;
222 #endif
223 	ccr2 |= CCR2_WT1;
224 	write_cyrix_reg(CCR2, ccr2);
225 
226 	/* Initialize CCR4. */
227 	ccr3 = read_cyrix_reg(CCR3);
228 	write_cyrix_reg(CCR3, CCR3_MAPEN0);
229 
230 	ccr4 = read_cyrix_reg(CCR4);
231 	ccr4 |= CCR4_DTE;
232 	ccr4 |= CCR4_MEM;
233 #ifdef CPU_FASTER_5X86_FPU
234 	ccr4 |= CCR4_FASTFPE;
235 #else
236 	ccr4 &= ~CCR4_FASTFPE;
237 #endif
238 	ccr4 &= ~CCR4_IOMASK;
239 	/********************************************************************
240 	 * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
241 	 * should be 0 for errata fix.
242 	 ********************************************************************/
243 #ifdef CPU_IORT
244 	ccr4 |= CPU_IORT & CCR4_IOMASK;
245 #endif
246 	write_cyrix_reg(CCR4, ccr4);
247 
248 	/* Initialize PCR0. */
249 	/****************************************************************
250 	 * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
251 	 * BTB_EN might make your system unstable.
252 	 ****************************************************************/
253 	pcr0 = read_cyrix_reg(PCR0);
254 #ifdef CPU_RSTK_EN
255 	pcr0 |= PCR0_RSTK;
256 #else
257 	pcr0 &= ~PCR0_RSTK;
258 #endif
259 #ifdef CPU_BTB_EN
260 	pcr0 |= PCR0_BTB;
261 #else
262 	pcr0 &= ~PCR0_BTB;
263 #endif
264 #ifdef CPU_LOOP_EN
265 	pcr0 |= PCR0_LOOP;
266 #else
267 	pcr0 &= ~PCR0_LOOP;
268 #endif
269 
270 	/****************************************************************
271 	 * WARNING: if you use a memory mapped I/O device, don't use
272 	 * DISABLE_5X86_LSSER option, which may reorder memory mapped
273 	 * I/O access.
274 	 * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
275 	 ****************************************************************/
276 #ifdef CPU_DISABLE_5X86_LSSER
277 	pcr0 &= ~PCR0_LSSER;
278 #else
279 	pcr0 |= PCR0_LSSER;
280 #endif
281 	write_cyrix_reg(PCR0, pcr0);
282 
283 	/* Restore CCR3. */
284 	write_cyrix_reg(CCR3, ccr3);
285 
286 	(void)read_cyrix_reg(0x80);		/* dummy */
287 
288 	/* Unlock NW bit in CR0. */
289 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
290 	load_cr0((rcr0() & ~CR0_CD) | CR0_NW);	/* CD = 0, NW = 1 */
291 	/* Lock NW bit in CR0. */
292 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
293 
294 	write_eflags(eflags);
295 }
296 
297 #ifdef CPU_I486_ON_386
298 /*
299  * There are i486 based upgrade products for i386 machines.
300  * In this case, BIOS doesn't enables CPU cache.
301  */
302 void
303 init_i486_on_386(void)
304 {
305 	u_long	eflags;
306 
307 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
308 	need_post_dma_flush = 1;
309 #endif
310 
311 	eflags = read_eflags();
312 	disable_intr();
313 
314 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0, NW = 0 */
315 
316 	write_eflags(eflags);
317 }
318 #endif
319 
320 /*
321  * Cyrix 6x86
322  *
323  * XXX - What should I do here?  Please let me know.
324  */
325 static void
326 init_6x86(void)
327 {
328 	u_long	eflags;
329 	u_char	ccr3, ccr4;
330 
331 	eflags = read_eflags();
332 	disable_intr();
333 
334 	load_cr0(rcr0() | CR0_CD | CR0_NW);
335 	wbinvd();
336 
337 	/* Initialize CCR0. */
338 	write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
339 
340 	/* Initialize CCR1. */
341 #ifdef CPU_CYRIX_NO_LOCK
342 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
343 #else
344 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
345 #endif
346 
347 	/* Initialize CCR2. */
348 #ifdef CPU_SUSP_HLT
349 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
350 #else
351 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
352 #endif
353 
354 	ccr3 = read_cyrix_reg(CCR3);
355 	write_cyrix_reg(CCR3, CCR3_MAPEN0);
356 
357 	/* Initialize CCR4. */
358 	ccr4 = read_cyrix_reg(CCR4);
359 	ccr4 |= CCR4_DTE;
360 	ccr4 &= ~CCR4_IOMASK;
361 #ifdef CPU_IORT
362 	write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
363 #else
364 	write_cyrix_reg(CCR4, ccr4 | 7);
365 #endif
366 
367 	/* Initialize CCR5. */
368 #ifdef CPU_WT_ALLOC
369 	write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
370 #endif
371 
372 	/* Restore CCR3. */
373 	write_cyrix_reg(CCR3, ccr3);
374 
375 	/* Unlock NW bit in CR0. */
376 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
377 
378 	/*
379 	 * Earlier revision of the 6x86 CPU could crash the system if
380 	 * L1 cache is in write-back mode.
381 	 */
382 	if ((cyrix_did & 0xff00) > 0x1600)
383 		load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
384 	else {
385 		/* Revision 2.6 and lower. */
386 #ifdef CYRIX_CACHE_REALLY_WORKS
387 		load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
388 #else
389 		load_cr0((rcr0() & ~CR0_CD) | CR0_NW);	/* CD = 0 and NW = 1 */
390 #endif
391 	}
392 
393 	/* Lock NW bit in CR0. */
394 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
395 
396 	write_eflags(eflags);
397 }
398 #endif /* I486_CPU */
399 
400 #ifdef I686_CPU
401 /*
402  * Cyrix 6x86MX (code-named M2)
403  *
404  * XXX - What should I do here?  Please let me know.
405  */
406 static void
407 init_6x86MX(void)
408 {
409 	u_long	eflags;
410 	u_char	ccr3, ccr4;
411 
412 	eflags = read_eflags();
413 	disable_intr();
414 
415 	load_cr0(rcr0() | CR0_CD | CR0_NW);
416 	wbinvd();
417 
418 	/* Initialize CCR0. */
419 	write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
420 
421 	/* Initialize CCR1. */
422 #ifdef CPU_CYRIX_NO_LOCK
423 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
424 #else
425 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
426 #endif
427 
428 	/* Initialize CCR2. */
429 #ifdef CPU_SUSP_HLT
430 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
431 #else
432 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
433 #endif
434 
435 	ccr3 = read_cyrix_reg(CCR3);
436 	write_cyrix_reg(CCR3, CCR3_MAPEN0);
437 
438 	/* Initialize CCR4. */
439 	ccr4 = read_cyrix_reg(CCR4);
440 	ccr4 &= ~CCR4_IOMASK;
441 #ifdef CPU_IORT
442 	write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
443 #else
444 	write_cyrix_reg(CCR4, ccr4 | 7);
445 #endif
446 
447 	/* Initialize CCR5. */
448 #ifdef CPU_WT_ALLOC
449 	write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
450 #endif
451 
452 	/* Restore CCR3. */
453 	write_cyrix_reg(CCR3, ccr3);
454 
455 	/* Unlock NW bit in CR0. */
456 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
457 
458 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
459 
460 	/* Lock NW bit in CR0. */
461 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
462 
463 	write_eflags(eflags);
464 }
465 
466 static void
467 init_ppro(void)
468 {
469 #ifndef SMP
470 	u_int64_t	apicbase;
471 
472 	/*
473 	 * Local APIC should be diabled in UP kernel.
474 	 */
475 	apicbase = rdmsr(0x1b);
476 	apicbase &= ~0x800LL;
477 	wrmsr(0x1b, apicbase);
478 #endif
479 }
480 
481 /*
482  * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
483  * L2 cache).
484  */
485 void
486 init_mendocino(void)
487 {
488 #ifdef CPU_PPRO2CELERON
489 	u_long	eflags;
490 	u_int64_t	bbl_cr_ctl3;
491 
492 	eflags = read_eflags();
493 	disable_intr();
494 
495 	load_cr0(rcr0() | CR0_CD | CR0_NW);
496 	wbinvd();
497 
498 	bbl_cr_ctl3 = rdmsr(0x11e);
499 
500 	/* If the L2 cache is configured, do nothing. */
501 	if (!(bbl_cr_ctl3 & 1)) {
502 		bbl_cr_ctl3 = 0x134052bLL;
503 
504 		/* Set L2 Cache Latency (Default: 5). */
505 #ifdef	CPU_CELERON_L2_LATENCY
506 #if CPU_L2_LATENCY > 15
507 #error invalid CPU_L2_LATENCY.
508 #endif
509 		bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
510 #else
511 		bbl_cr_ctl3 |= 5 << 1;
512 #endif
513 		wrmsr(0x11e, bbl_cr_ctl3);
514 	}
515 
516 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
517 	write_eflags(eflags);
518 #endif /* CPU_PPRO2CELERON */
519 }
520 
521 #endif /* I686_CPU */
522 
523 /*
524  * Initialize CR4 (Control register 4) to enable SSE instructions.
525  */
526 void
527 enable_sse(void)
528 {
529 #if defined(CPU_ENABLE_SSE)
530 	if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
531 		load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
532 		cpu_fxsr = hw_instruction_sse = 1;
533 	}
534 #endif
535 }
536 
537 void
538 initializecpu(void)
539 {
540 
541 	switch (cpu) {
542 #ifdef I486_CPU
543 	case CPU_BLUE:
544 		init_bluelightning();
545 		break;
546 	case CPU_486DLC:
547 		init_486dlc();
548 		break;
549 	case CPU_CY486DX:
550 		init_cy486dx();
551 		break;
552 	case CPU_M1SC:
553 		init_5x86();
554 		break;
555 #ifdef CPU_I486_ON_386
556 	case CPU_486:
557 		init_i486_on_386();
558 		break;
559 #endif
560 	case CPU_M1:
561 		init_6x86();
562 		break;
563 #endif /* I486_CPU */
564 #ifdef I686_CPU
565 	case CPU_M2:
566 		init_6x86MX();
567 		break;
568 	case CPU_686:
569 		if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
570 			switch (cpu_id & 0xff0) {
571 			case 0x610:
572 				init_ppro();
573 				break;
574 			case 0x660:
575 				init_mendocino();
576 				break;
577 			}
578 		} else if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
579 #if defined(I686_CPU) && defined(CPU_ATHLON_SSE_HACK)
580 			/*
581 			 * Sometimes the BIOS doesn't enable SSE instructions.
582 			 * According to AMD document 20734, the mobile
583 			 * Duron, the (mobile) Athlon 4 and the Athlon MP
584 			 * support SSE. These correspond to cpu_id 0x66X
585 			 * or 0x67X.
586 			 */
587 			if ((cpu_feature & CPUID_XMM) == 0 &&
588 			    ((cpu_id & ~0xf) == 0x660 ||
589 			     (cpu_id & ~0xf) == 0x670)) {
590 				u_int regs[4];
591 				wrmsr(0xC0010015, rdmsr(0xC0010015) & ~0x08000);
592 				do_cpuid(1, regs);
593 				cpu_feature = regs[3];
594 			}
595 #endif
596 		}
597 		break;
598 #endif
599 	default:
600 		break;
601 	}
602 	enable_sse();
603 
604 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
605 	/*
606 	 * OS should flush L1 cache by itself because no PC-98 supports
607 	 * non-Intel CPUs.  Use wbinvd instruction before DMA transfer
608 	 * when need_pre_dma_flush = 1, use invd instruction after DMA
609 	 * transfer when need_post_dma_flush = 1.  If your CPU upgrade
610 	 * product supports hardware cache control, you can add the
611 	 * CPU_UPGRADE_HW_CACHE option in your kernel configuration file.
612 	 * This option eliminates unneeded cache flush instruction(s).
613 	 */
614 	if (strcmp(cpu_vendor, "CyrixInstead") == 0) {
615 		switch (cpu) {
616 #ifdef I486_CPU
617 		case CPU_486DLC:
618 			need_post_dma_flush = 1;
619 			break;
620 		case CPU_M1SC:
621 			need_pre_dma_flush = 1;
622 			break;
623 		case CPU_CY486DX:
624 			need_pre_dma_flush = 1;
625 #ifdef CPU_I486_ON_386
626 			need_post_dma_flush = 1;
627 #endif
628 			break;
629 #endif
630 		default:
631 			break;
632 		}
633 	} else if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
634 		switch (cpu_id & 0xFF0) {
635 		case 0x470:		/* Enhanced Am486DX2 WB */
636 		case 0x490:		/* Enhanced Am486DX4 WB */
637 		case 0x4F0:		/* Am5x86 WB */
638 			need_pre_dma_flush = 1;
639 			break;
640 		}
641 	} else if (strcmp(cpu_vendor, "IBM") == 0) {
642 		need_post_dma_flush = 1;
643 	} else {
644 #ifdef CPU_I486_ON_386
645 		need_pre_dma_flush = 1;
646 #endif
647 	}
648 #endif /* PC98 && !CPU_UPGRADE_HW_CACHE */
649 }
650 
651 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
652 /*
653  * Enable write allocate feature of AMD processors.
654  * Following two functions require the Maxmem variable being set.
655  */
656 void
657 enable_K5_wt_alloc(void)
658 {
659 	u_int64_t	msr;
660 	register_t	savecrit;
661 
662 	/*
663 	 * Write allocate is supported only on models 1, 2, and 3, with
664 	 * a stepping of 4 or greater.
665 	 */
666 	if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
667 		savecrit = intr_disable();
668 		msr = rdmsr(0x83);		/* HWCR */
669 		wrmsr(0x83, msr & !(0x10));
670 
671 		/*
672 		 * We have to tell the chip where the top of memory is,
673 		 * since video cards could have frame bufferes there,
674 		 * memory-mapped I/O could be there, etc.
675 		 */
676 		if(Maxmem > 0)
677 		  msr = Maxmem / 16;
678 		else
679 		  msr = 0;
680 		msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
681 #ifdef PC98
682 		if (!(inb(0x43b) & 4)) {
683 			wrmsr(0x86, 0x0ff00f0);
684 			msr |= AMD_WT_ALLOC_PRE;
685 		}
686 #else
687 		/*
688 		 * There is no way to know wheter 15-16M hole exists or not.
689 		 * Therefore, we disable write allocate for this range.
690 		 */
691 			wrmsr(0x86, 0x0ff00f0);
692 			msr |= AMD_WT_ALLOC_PRE;
693 #endif
694 		wrmsr(0x85, msr);
695 
696 		msr=rdmsr(0x83);
697 		wrmsr(0x83, msr|0x10); /* enable write allocate */
698 		intr_restore(savecrit);
699 	}
700 }
701 
702 void
703 enable_K6_wt_alloc(void)
704 {
705 	quad_t	size;
706 	u_int64_t	whcr;
707 	u_long	eflags;
708 
709 	eflags = read_eflags();
710 	disable_intr();
711 	wbinvd();
712 
713 #ifdef CPU_DISABLE_CACHE
714 	/*
715 	 * Certain K6-2 box becomes unstable when write allocation is
716 	 * enabled.
717 	 */
718 	/*
719 	 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
720 	 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
721 	 * All other bits in TR12 have no effect on the processer's operation.
722 	 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
723 	 * on the AMD-K6.
724 	 */
725 	wrmsr(0x0000000e, (u_int64_t)0x0008);
726 #endif
727 	/* Don't assume that memory size is aligned with 4M. */
728 	if (Maxmem > 0)
729 	  size = ((Maxmem >> 8) + 3) >> 2;
730 	else
731 	  size = 0;
732 
733 	/* Limit is 508M bytes. */
734 	if (size > 0x7f)
735 		size = 0x7f;
736 	whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
737 
738 #if defined(PC98) || defined(NO_MEMORY_HOLE)
739 	if (whcr & (0x7fLL << 1)) {
740 #ifdef PC98
741 		/*
742 		 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
743 		 * 15-16M range.
744 		 */
745 		if (!(inb(0x43b) & 4))
746 			whcr &= ~0x0001LL;
747 		else
748 #endif
749 			whcr |=  0x0001LL;
750 	}
751 #else
752 	/*
753 	 * There is no way to know wheter 15-16M hole exists or not.
754 	 * Therefore, we disable write allocate for this range.
755 	 */
756 	whcr &= ~0x0001LL;
757 #endif
758 	wrmsr(0x0c0000082, whcr);
759 
760 	write_eflags(eflags);
761 }
762 
763 void
764 enable_K6_2_wt_alloc(void)
765 {
766 	quad_t	size;
767 	u_int64_t	whcr;
768 	u_long	eflags;
769 
770 	eflags = read_eflags();
771 	disable_intr();
772 	wbinvd();
773 
774 #ifdef CPU_DISABLE_CACHE
775 	/*
776 	 * Certain K6-2 box becomes unstable when write allocation is
777 	 * enabled.
778 	 */
779 	/*
780 	 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
781 	 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
782 	 * All other bits in TR12 have no effect on the processer's operation.
783 	 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
784 	 * on the AMD-K6.
785 	 */
786 	wrmsr(0x0000000e, (u_int64_t)0x0008);
787 #endif
788 	/* Don't assume that memory size is aligned with 4M. */
789 	if (Maxmem > 0)
790 	  size = ((Maxmem >> 8) + 3) >> 2;
791 	else
792 	  size = 0;
793 
794 	/* Limit is 4092M bytes. */
795 	if (size > 0x3fff)
796 		size = 0x3ff;
797 	whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
798 
799 #if defined(PC98) || defined(NO_MEMORY_HOLE)
800 	if (whcr & (0x3ffLL << 22)) {
801 #ifdef PC98
802 		/*
803 		 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
804 		 * 15-16M range.
805 		 */
806 		if (!(inb(0x43b) & 4))
807 			whcr &= ~(1LL << 16);
808 		else
809 #endif
810 			whcr |=  1LL << 16;
811 	}
812 #else
813 	/*
814 	 * There is no way to know wheter 15-16M hole exists or not.
815 	 * Therefore, we disable write allocate for this range.
816 	 */
817 	whcr &= ~(1LL << 16);
818 #endif
819 	wrmsr(0x0c0000082, whcr);
820 
821 	write_eflags(eflags);
822 }
823 #endif /* I585_CPU && CPU_WT_ALLOC */
824 
825 #include "opt_ddb.h"
826 #ifdef DDB
827 #include <ddb/ddb.h>
828 
829 DB_SHOW_COMMAND(cyrixreg, cyrixreg)
830 {
831 	u_long	eflags;
832 	u_int	cr0;
833 	u_char	ccr1, ccr2, ccr3;
834 	u_char	ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
835 
836 	cr0 = rcr0();
837 	if (strcmp(cpu_vendor,"CyrixInstead") == 0) {
838 		eflags = read_eflags();
839 		disable_intr();
840 
841 
842 		if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
843 			ccr0 = read_cyrix_reg(CCR0);
844 		}
845 		ccr1 = read_cyrix_reg(CCR1);
846 		ccr2 = read_cyrix_reg(CCR2);
847 		ccr3 = read_cyrix_reg(CCR3);
848 		if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
849 			write_cyrix_reg(CCR3, CCR3_MAPEN0);
850 			ccr4 = read_cyrix_reg(CCR4);
851 			if ((cpu == CPU_M1) || (cpu == CPU_M2))
852 				ccr5 = read_cyrix_reg(CCR5);
853 			else
854 				pcr0 = read_cyrix_reg(PCR0);
855 			write_cyrix_reg(CCR3, ccr3);		/* Restore CCR3. */
856 		}
857 		write_eflags(eflags);
858 
859 		if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
860 			printf("CCR0=%x, ", (u_int)ccr0);
861 
862 		printf("CCR1=%x, CCR2=%x, CCR3=%x",
863 			(u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
864 		if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
865 			printf(", CCR4=%x, ", (u_int)ccr4);
866 			if (cpu == CPU_M1SC)
867 				printf("PCR0=%x\n", pcr0);
868 			else
869 				printf("CCR5=%x\n", ccr5);
870 		}
871 	}
872 	printf("CR0=%x\n", cr0);
873 }
874 #endif /* DDB */
875