xref: /freebsd/sys/amd64/amd64/initcpu.c (revision 5521ff5a4d1929056e7ffc982fac3341ca54df7c)
1 /*
2  * Copyright (c) KATO Takenori, 1997, 1998.
3  *
4  * All rights reserved.  Unpublished rights reserved under the copyright
5  * laws of Japan.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer as
13  *    the first lines of this file unmodified.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 #include "opt_cpu.h"
33 
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/systm.h>
37 
38 #include <machine/cputypes.h>
39 #include <machine/md_var.h>
40 #include <machine/specialreg.h>
41 
42 void initializecpu(void);
43 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
44 void	enable_K5_wt_alloc(void);
45 void	enable_K6_wt_alloc(void);
46 void	enable_K6_2_wt_alloc(void);
47 #endif
48 
49 #ifdef I486_CPU
50 static void init_5x86(void);
51 static void init_bluelightning(void);
52 static void init_486dlc(void);
53 static void init_cy486dx(void);
54 #ifdef CPU_I486_ON_386
55 static void init_i486_on_386(void);
56 #endif
57 static void init_6x86(void);
58 #endif /* I486_CPU */
59 
60 #ifdef I686_CPU
61 static void	init_6x86MX(void);
62 static void	init_ppro(void);
63 static void	init_mendocino(void);
64 #endif
65 
66 #ifdef I486_CPU
67 /*
68  * IBM Blue Lightning
69  */
70 static void
71 init_bluelightning(void)
72 {
73 	u_long	eflags;
74 
75 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
76 	need_post_dma_flush = 1;
77 #endif
78 
79 	eflags = read_eflags();
80 	disable_intr();
81 
82 	load_cr0(rcr0() | CR0_CD | CR0_NW);
83 	invd();
84 
85 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
86 	wrmsr(0x1000, 0x9c92LL);	/* FP operand can be cacheable on Cyrix FPU */
87 #else
88 	wrmsr(0x1000, 0x1c92LL);	/* Intel FPU */
89 #endif
90 	/* Enables 13MB and 0-640KB cache. */
91 	wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
92 #ifdef CPU_BLUELIGHTNING_3X
93 	wrmsr(0x1002, 0x04000000LL);	/* Enables triple-clock mode. */
94 #else
95 	wrmsr(0x1002, 0x03000000LL);	/* Enables double-clock mode. */
96 #endif
97 
98 	/* Enable caching in CR0. */
99 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
100 	invd();
101 	write_eflags(eflags);
102 }
103 
104 /*
105  * Cyrix 486SLC/DLC/SR/DR series
106  */
107 static void
108 init_486dlc(void)
109 {
110 	u_long	eflags;
111 	u_char	ccr0;
112 
113 	eflags = read_eflags();
114 	disable_intr();
115 	invd();
116 
117 	ccr0 = read_cyrix_reg(CCR0);
118 #ifndef CYRIX_CACHE_WORKS
119 	ccr0 |= CCR0_NC1 | CCR0_BARB;
120 	write_cyrix_reg(CCR0, ccr0);
121 	invd();
122 #else
123 	ccr0 &= ~CCR0_NC0;
124 #ifndef CYRIX_CACHE_REALLY_WORKS
125 	ccr0 |= CCR0_NC1 | CCR0_BARB;
126 #else
127 	ccr0 |= CCR0_NC1;
128 #endif
129 #ifdef CPU_DIRECT_MAPPED_CACHE
130 	ccr0 |= CCR0_CO;			/* Direct mapped mode. */
131 #endif
132 	write_cyrix_reg(CCR0, ccr0);
133 
134 	/* Clear non-cacheable region. */
135 	write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
136 	write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
137 	write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
138 	write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
139 
140 	write_cyrix_reg(0, 0);	/* dummy write */
141 
142 	/* Enable caching in CR0. */
143 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
144 	invd();
145 #endif /* !CYRIX_CACHE_WORKS */
146 	write_eflags(eflags);
147 }
148 
149 
150 /*
151  * Cyrix 486S/DX series
152  */
153 static void
154 init_cy486dx(void)
155 {
156 	u_long	eflags;
157 	u_char	ccr2;
158 
159 	eflags = read_eflags();
160 	disable_intr();
161 	invd();
162 
163 	ccr2 = read_cyrix_reg(CCR2);
164 #ifdef CPU_SUSP_HLT
165 	ccr2 |= CCR2_SUSP_HLT;
166 #endif
167 
168 #ifdef PC98
169 	/* Enables WB cache interface pin and Lock NW bit in CR0. */
170 	ccr2 |= CCR2_WB | CCR2_LOCK_NW;
171 	/* Unlock NW bit in CR0. */
172 	write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW);
173 	load_cr0((rcr0() & ~CR0_CD) | CR0_NW);	/* CD = 0, NW = 1 */
174 #endif
175 
176 	write_cyrix_reg(CCR2, ccr2);
177 	write_eflags(eflags);
178 }
179 
180 
181 /*
182  * Cyrix 5x86
183  */
184 static void
185 init_5x86(void)
186 {
187 	u_long	eflags;
188 	u_char	ccr2, ccr3, ccr4, pcr0;
189 
190 	eflags = read_eflags();
191 	disable_intr();
192 
193 	load_cr0(rcr0() | CR0_CD | CR0_NW);
194 	wbinvd();
195 
196 	(void)read_cyrix_reg(CCR3);		/* dummy */
197 
198 	/* Initialize CCR2. */
199 	ccr2 = read_cyrix_reg(CCR2);
200 	ccr2 |= CCR2_WB;
201 #ifdef CPU_SUSP_HLT
202 	ccr2 |= CCR2_SUSP_HLT;
203 #else
204 	ccr2 &= ~CCR2_SUSP_HLT;
205 #endif
206 	ccr2 |= CCR2_WT1;
207 	write_cyrix_reg(CCR2, ccr2);
208 
209 	/* Initialize CCR4. */
210 	ccr3 = read_cyrix_reg(CCR3);
211 	write_cyrix_reg(CCR3, CCR3_MAPEN0);
212 
213 	ccr4 = read_cyrix_reg(CCR4);
214 	ccr4 |= CCR4_DTE;
215 	ccr4 |= CCR4_MEM;
216 #ifdef CPU_FASTER_5X86_FPU
217 	ccr4 |= CCR4_FASTFPE;
218 #else
219 	ccr4 &= ~CCR4_FASTFPE;
220 #endif
221 	ccr4 &= ~CCR4_IOMASK;
222 	/********************************************************************
223 	 * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
224 	 * should be 0 for errata fix.
225 	 ********************************************************************/
226 #ifdef CPU_IORT
227 	ccr4 |= CPU_IORT & CCR4_IOMASK;
228 #endif
229 	write_cyrix_reg(CCR4, ccr4);
230 
231 	/* Initialize PCR0. */
232 	/****************************************************************
233 	 * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
234 	 * BTB_EN might make your system unstable.
235 	 ****************************************************************/
236 	pcr0 = read_cyrix_reg(PCR0);
237 #ifdef CPU_RSTK_EN
238 	pcr0 |= PCR0_RSTK;
239 #else
240 	pcr0 &= ~PCR0_RSTK;
241 #endif
242 #ifdef CPU_BTB_EN
243 	pcr0 |= PCR0_BTB;
244 #else
245 	pcr0 &= ~PCR0_BTB;
246 #endif
247 #ifdef CPU_LOOP_EN
248 	pcr0 |= PCR0_LOOP;
249 #else
250 	pcr0 &= ~PCR0_LOOP;
251 #endif
252 
253 	/****************************************************************
254 	 * WARNING: if you use a memory mapped I/O device, don't use
255 	 * DISABLE_5X86_LSSER option, which may reorder memory mapped
256 	 * I/O access.
257 	 * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
258 	 ****************************************************************/
259 #ifdef CPU_DISABLE_5X86_LSSER
260 	pcr0 &= ~PCR0_LSSER;
261 #else
262 	pcr0 |= PCR0_LSSER;
263 #endif
264 	write_cyrix_reg(PCR0, pcr0);
265 
266 	/* Restore CCR3. */
267 	write_cyrix_reg(CCR3, ccr3);
268 
269 	(void)read_cyrix_reg(0x80);		/* dummy */
270 
271 	/* Unlock NW bit in CR0. */
272 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
273 	load_cr0((rcr0() & ~CR0_CD) | CR0_NW);	/* CD = 0, NW = 1 */
274 	/* Lock NW bit in CR0. */
275 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
276 
277 	write_eflags(eflags);
278 }
279 
280 #ifdef CPU_I486_ON_386
281 /*
282  * There are i486 based upgrade products for i386 machines.
283  * In this case, BIOS doesn't enables CPU cache.
284  */
285 void
286 init_i486_on_386(void)
287 {
288 	u_long	eflags;
289 
290 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
291 	need_post_dma_flush = 1;
292 #endif
293 
294 	eflags = read_eflags();
295 	disable_intr();
296 
297 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0, NW = 0 */
298 
299 	write_eflags(eflags);
300 }
301 #endif
302 
303 /*
304  * Cyrix 6x86
305  *
306  * XXX - What should I do here?  Please let me know.
307  */
308 static void
309 init_6x86(void)
310 {
311 	u_long	eflags;
312 	u_char	ccr3, ccr4;
313 
314 	eflags = read_eflags();
315 	disable_intr();
316 
317 	load_cr0(rcr0() | CR0_CD | CR0_NW);
318 	wbinvd();
319 
320 	/* Initialize CCR0. */
321 	write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
322 
323 	/* Initialize CCR1. */
324 #ifdef CPU_CYRIX_NO_LOCK
325 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
326 #else
327 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
328 #endif
329 
330 	/* Initialize CCR2. */
331 #ifdef CPU_SUSP_HLT
332 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
333 #else
334 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
335 #endif
336 
337 	ccr3 = read_cyrix_reg(CCR3);
338 	write_cyrix_reg(CCR3, CCR3_MAPEN0);
339 
340 	/* Initialize CCR4. */
341 	ccr4 = read_cyrix_reg(CCR4);
342 	ccr4 |= CCR4_DTE;
343 	ccr4 &= ~CCR4_IOMASK;
344 #ifdef CPU_IORT
345 	write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
346 #else
347 	write_cyrix_reg(CCR4, ccr4 | 7);
348 #endif
349 
350 	/* Initialize CCR5. */
351 #ifdef CPU_WT_ALLOC
352 	write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
353 #endif
354 
355 	/* Restore CCR3. */
356 	write_cyrix_reg(CCR3, ccr3);
357 
358 	/* Unlock NW bit in CR0. */
359 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
360 
361 	/*
362 	 * Earlier revision of the 6x86 CPU could crash the system if
363 	 * L1 cache is in write-back mode.
364 	 */
365 	if ((cyrix_did & 0xff00) > 0x1600)
366 		load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
367 	else {
368 		/* Revision 2.6 and lower. */
369 #ifdef CYRIX_CACHE_REALLY_WORKS
370 		load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
371 #else
372 		load_cr0((rcr0() & ~CR0_CD) | CR0_NW);	/* CD = 0 and NW = 1 */
373 #endif
374 	}
375 
376 	/* Lock NW bit in CR0. */
377 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
378 
379 	write_eflags(eflags);
380 }
381 #endif /* I486_CPU */
382 
383 #ifdef I686_CPU
384 /*
385  * Cyrix 6x86MX (code-named M2)
386  *
387  * XXX - What should I do here?  Please let me know.
388  */
389 static void
390 init_6x86MX(void)
391 {
392 	u_long	eflags;
393 	u_char	ccr3, ccr4;
394 
395 	eflags = read_eflags();
396 	disable_intr();
397 
398 	load_cr0(rcr0() | CR0_CD | CR0_NW);
399 	wbinvd();
400 
401 	/* Initialize CCR0. */
402 	write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
403 
404 	/* Initialize CCR1. */
405 #ifdef CPU_CYRIX_NO_LOCK
406 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
407 #else
408 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
409 #endif
410 
411 	/* Initialize CCR2. */
412 #ifdef CPU_SUSP_HLT
413 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
414 #else
415 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
416 #endif
417 
418 	ccr3 = read_cyrix_reg(CCR3);
419 	write_cyrix_reg(CCR3, CCR3_MAPEN0);
420 
421 	/* Initialize CCR4. */
422 	ccr4 = read_cyrix_reg(CCR4);
423 	ccr4 &= ~CCR4_IOMASK;
424 #ifdef CPU_IORT
425 	write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
426 #else
427 	write_cyrix_reg(CCR4, ccr4 | 7);
428 #endif
429 
430 	/* Initialize CCR5. */
431 #ifdef CPU_WT_ALLOC
432 	write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
433 #endif
434 
435 	/* Restore CCR3. */
436 	write_cyrix_reg(CCR3, ccr3);
437 
438 	/* Unlock NW bit in CR0. */
439 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
440 
441 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
442 
443 	/* Lock NW bit in CR0. */
444 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
445 
446 	write_eflags(eflags);
447 }
448 
449 static void
450 init_ppro(void)
451 {
452 #ifndef SMP
453 	u_int64_t	apicbase;
454 
455 	/*
456 	 * Local APIC should be diabled in UP kernel.
457 	 */
458 	apicbase = rdmsr(0x1b);
459 	apicbase &= ~0x800LL;
460 	wrmsr(0x1b, apicbase);
461 #endif
462 }
463 
464 /*
465  * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
466  * L2 cache).
467  */
468 void
469 init_mendocino(void)
470 {
471 #ifdef CPU_PPRO2CELERON
472 	u_long	eflags;
473 	u_int64_t	bbl_cr_ctl3;
474 
475 	eflags = read_eflags();
476 	disable_intr();
477 
478 	load_cr0(rcr0() | CR0_CD | CR0_NW);
479 	wbinvd();
480 
481 	bbl_cr_ctl3 = rdmsr(0x11e);
482 
483 	/* If the L2 cache is configured, do nothing. */
484 	if (!(bbl_cr_ctl3 & 1)) {
485 		bbl_cr_ctl3 = 0x134052bLL;
486 
487 		/* Set L2 Cache Latency (Default: 5). */
488 #ifdef	CPU_CELERON_L2_LATENCY
489 #if CPU_L2_LATENCY > 15
490 #error invalid CPU_L2_LATENCY.
491 #endif
492 		bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
493 #else
494 		bbl_cr_ctl3 |= 5 << 1;
495 #endif
496 		wrmsr(0x11e, bbl_cr_ctl3);
497 	}
498 
499 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
500 	write_eflags(eflags);
501 #endif /* CPU_PPRO2CELERON */
502 }
503 
504 #endif /* I686_CPU */
505 
506 void
507 initializecpu(void)
508 {
509 
510 	switch (cpu) {
511 #ifdef I486_CPU
512 	case CPU_BLUE:
513 		init_bluelightning();
514 		break;
515 	case CPU_486DLC:
516 		init_486dlc();
517 		break;
518 	case CPU_CY486DX:
519 		init_cy486dx();
520 		break;
521 	case CPU_M1SC:
522 		init_5x86();
523 		break;
524 #ifdef CPU_I486_ON_386
525 	case CPU_486:
526 		init_i486_on_386();
527 		break;
528 #endif
529 	case CPU_M1:
530 		init_6x86();
531 		break;
532 #endif /* I486_CPU */
533 #ifdef I686_CPU
534 	case CPU_M2:
535 		init_6x86MX();
536 		break;
537 	case CPU_686:
538 		if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
539 			switch (cpu_id & 0xff0) {
540 			case 0x610:
541 				init_ppro();
542 				break;
543 			case 0x660:
544 				init_mendocino();
545 				break;
546 			}
547 		}
548 		break;
549 #endif
550 	default:
551 		break;
552 	}
553 
554 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
555 	/*
556 	 * OS should flush L1 cache by itself because no PC-98 supports
557 	 * non-Intel CPUs.  Use wbinvd instruction before DMA transfer
558 	 * when need_pre_dma_flush = 1, use invd instruction after DMA
559 	 * transfer when need_post_dma_flush = 1.  If your CPU upgrade
560 	 * product supports hardware cache control, you can add the
561 	 * CPU_UPGRADE_HW_CACHE option in your kernel configuration file.
562 	 * This option eliminates unneeded cache flush instruction(s).
563 	 */
564 	if (strcmp(cpu_vendor, "CyrixInstead") == 0) {
565 		switch (cpu) {
566 #ifdef I486_CPU
567 		case CPU_486DLC:
568 			need_post_dma_flush = 1;
569 			break;
570 		case CPU_M1SC:
571 			need_pre_dma_flush = 1;
572 			break;
573 		case CPU_CY486DX:
574 			need_pre_dma_flush = 1;
575 #ifdef CPU_I486_ON_386
576 			need_post_dma_flush = 1;
577 #endif
578 			break;
579 #endif
580 		default:
581 			break;
582 		}
583 	} else if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
584 		switch (cpu_id & 0xFF0) {
585 		case 0x470:		/* Enhanced Am486DX2 WB */
586 		case 0x490:		/* Enhanced Am486DX4 WB */
587 		case 0x4F0:		/* Am5x86 WB */
588 			need_pre_dma_flush = 1;
589 			break;
590 		}
591 	} else if (strcmp(cpu_vendor, "IBM") == 0) {
592 		need_post_dma_flush = 1;
593 	} else {
594 #ifdef CPU_I486_ON_386
595 		need_pre_dma_flush = 1;
596 #endif
597 	}
598 #endif /* PC98 && !CPU_UPGRADE_HW_CACHE */
599 }
600 
601 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
602 /*
603  * Enable write allocate feature of AMD processors.
604  * Following two functions require the Maxmem variable being set.
605  */
606 void
607 enable_K5_wt_alloc(void)
608 {
609 	u_int64_t	msr;
610 	critical_t	savecrit;
611 
612 	/*
613 	 * Write allocate is supported only on models 1, 2, and 3, with
614 	 * a stepping of 4 or greater.
615 	 */
616 	if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
617 		savecrit = critical_enter();
618 		msr = rdmsr(0x83);		/* HWCR */
619 		wrmsr(0x83, msr & !(0x10));
620 
621 		/*
622 		 * We have to tell the chip where the top of memory is,
623 		 * since video cards could have frame bufferes there,
624 		 * memory-mapped I/O could be there, etc.
625 		 */
626 		if(Maxmem > 0)
627 		  msr = Maxmem / 16;
628 		else
629 		  msr = 0;
630 		msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
631 #ifdef PC98
632 		if (!(inb(0x43b) & 4)) {
633 			wrmsr(0x86, 0x0ff00f0);
634 			msr |= AMD_WT_ALLOC_PRE;
635 		}
636 #else
637 		/*
638 		 * There is no way to know wheter 15-16M hole exists or not.
639 		 * Therefore, we disable write allocate for this range.
640 		 */
641 			wrmsr(0x86, 0x0ff00f0);
642 			msr |= AMD_WT_ALLOC_PRE;
643 #endif
644 		wrmsr(0x85, msr);
645 
646 		msr=rdmsr(0x83);
647 		wrmsr(0x83, msr|0x10); /* enable write allocate */
648 
649 		critical_exit(savecrit);
650 	}
651 }
652 
653 void
654 enable_K6_wt_alloc(void)
655 {
656 	quad_t	size;
657 	u_int64_t	whcr;
658 	u_long	eflags;
659 
660 	eflags = read_eflags();
661 	disable_intr();
662 	wbinvd();
663 
664 #ifdef CPU_DISABLE_CACHE
665 	/*
666 	 * Certain K6-2 box becomes unstable when write allocation is
667 	 * enabled.
668 	 */
669 	/*
670 	 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
671 	 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
672 	 * All other bits in TR12 have no effect on the processer's operation.
673 	 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
674 	 * on the AMD-K6.
675 	 */
676 	wrmsr(0x0000000e, (u_int64_t)0x0008);
677 #endif
678 	/* Don't assume that memory size is aligned with 4M. */
679 	if (Maxmem > 0)
680 	  size = ((Maxmem >> 8) + 3) >> 2;
681 	else
682 	  size = 0;
683 
684 	/* Limit is 508M bytes. */
685 	if (size > 0x7f)
686 		size = 0x7f;
687 	whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
688 
689 #if defined(PC98) || defined(NO_MEMORY_HOLE)
690 	if (whcr & (0x7fLL << 1)) {
691 #ifdef PC98
692 		/*
693 		 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
694 		 * 15-16M range.
695 		 */
696 		if (!(inb(0x43b) & 4))
697 			whcr &= ~0x0001LL;
698 		else
699 #endif
700 			whcr |=  0x0001LL;
701 	}
702 #else
703 	/*
704 	 * There is no way to know wheter 15-16M hole exists or not.
705 	 * Therefore, we disable write allocate for this range.
706 	 */
707 	whcr &= ~0x0001LL;
708 #endif
709 	wrmsr(0x0c0000082, whcr);
710 
711 	write_eflags(eflags);
712 }
713 
714 void
715 enable_K6_2_wt_alloc(void)
716 {
717 	quad_t	size;
718 	u_int64_t	whcr;
719 	u_long	eflags;
720 
721 	eflags = read_eflags();
722 	disable_intr();
723 	wbinvd();
724 
725 #ifdef CPU_DISABLE_CACHE
726 	/*
727 	 * Certain K6-2 box becomes unstable when write allocation is
728 	 * enabled.
729 	 */
730 	/*
731 	 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
732 	 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
733 	 * All other bits in TR12 have no effect on the processer's operation.
734 	 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
735 	 * on the AMD-K6.
736 	 */
737 	wrmsr(0x0000000e, (u_int64_t)0x0008);
738 #endif
739 	/* Don't assume that memory size is aligned with 4M. */
740 	if (Maxmem > 0)
741 	  size = ((Maxmem >> 8) + 3) >> 2;
742 	else
743 	  size = 0;
744 
745 	/* Limit is 4092M bytes. */
746 	if (size > 0x3fff)
747 		size = 0x3ff;
748 	whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
749 
750 #if defined(PC98) || defined(NO_MEMORY_HOLE)
751 	if (whcr & (0x3ffLL << 22)) {
752 #ifdef PC98
753 		/*
754 		 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
755 		 * 15-16M range.
756 		 */
757 		if (!(inb(0x43b) & 4))
758 			whcr &= ~(1LL << 16);
759 		else
760 #endif
761 			whcr |=  1LL << 16;
762 	}
763 #else
764 	/*
765 	 * There is no way to know wheter 15-16M hole exists or not.
766 	 * Therefore, we disable write allocate for this range.
767 	 */
768 	whcr &= ~(1LL << 16);
769 #endif
770 	wrmsr(0x0c0000082, whcr);
771 
772 	write_eflags(eflags);
773 }
774 #endif /* I585_CPU && CPU_WT_ALLOC */
775 
776 #include "opt_ddb.h"
777 #ifdef DDB
778 #include <ddb/ddb.h>
779 
780 DB_SHOW_COMMAND(cyrixreg, cyrixreg)
781 {
782 	u_long	eflags;
783 	u_int	cr0;
784 	u_char	ccr1, ccr2, ccr3;
785 	u_char	ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
786 
787 	cr0 = rcr0();
788 	if (strcmp(cpu_vendor,"CyrixInstead") == 0) {
789 		eflags = read_eflags();
790 		disable_intr();
791 
792 
793 		if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
794 			ccr0 = read_cyrix_reg(CCR0);
795 		}
796 		ccr1 = read_cyrix_reg(CCR1);
797 		ccr2 = read_cyrix_reg(CCR2);
798 		ccr3 = read_cyrix_reg(CCR3);
799 		if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
800 			write_cyrix_reg(CCR3, CCR3_MAPEN0);
801 			ccr4 = read_cyrix_reg(CCR4);
802 			if ((cpu == CPU_M1) || (cpu == CPU_M2))
803 				ccr5 = read_cyrix_reg(CCR5);
804 			else
805 				pcr0 = read_cyrix_reg(PCR0);
806 			write_cyrix_reg(CCR3, ccr3);		/* Restore CCR3. */
807 		}
808 		write_eflags(eflags);
809 
810 		if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
811 			printf("CCR0=%x, ", (u_int)ccr0);
812 
813 		printf("CCR1=%x, CCR2=%x, CCR3=%x",
814 			(u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
815 		if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
816 			printf(", CCR4=%x, ", (u_int)ccr4);
817 			if (cpu == CPU_M1SC)
818 				printf("PCR0=%x\n", pcr0);
819 			else
820 				printf("CCR5=%x\n", ccr5);
821 		}
822 	}
823 	printf("CR0=%x\n", cr0);
824 }
825 #endif /* DDB */
826