xref: /freebsd/sys/amd64/amd64/initcpu.c (revision b601c69bdbe8755d26570261d7fd4c02ee4eff74)
1 /*
2  * Copyright (c) KATO Takenori, 1997, 1998.
3  *
4  * All rights reserved.  Unpublished rights reserved under the copyright
5  * laws of Japan.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer as
13  *    the first lines of this file unmodified.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 #include "opt_cpu.h"
33 
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/systm.h>
37 
38 #include <machine/cputypes.h>
39 #include <machine/md_var.h>
40 #include <machine/specialreg.h>
41 
42 void initializecpu(void);
43 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
44 void	enable_K5_wt_alloc(void);
45 void	enable_K6_wt_alloc(void);
46 void	enable_K6_2_wt_alloc(void);
47 #endif
48 
49 #ifdef I486_CPU
50 static void init_5x86(void);
51 static void init_bluelightning(void);
52 static void init_486dlc(void);
53 static void init_cy486dx(void);
54 #ifdef CPU_I486_ON_386
55 static void init_i486_on_386(void);
56 #endif
57 static void init_6x86(void);
58 #endif /* I486_CPU */
59 
60 #ifdef I686_CPU
61 static void	init_6x86MX(void);
62 static void	init_ppro(void);
63 static void	init_mendocino(void);
64 #endif
65 
66 #ifdef I486_CPU
67 /*
68  * IBM Blue Lightning
69  */
70 static void
71 init_bluelightning(void)
72 {
73 	u_long	eflags;
74 
75 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
76 	need_post_dma_flush = 1;
77 #endif
78 
79 	eflags = read_eflags();
80 	disable_intr();
81 
82 	load_cr0(rcr0() | CR0_CD | CR0_NW);
83 	invd();
84 
85 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
86 	wrmsr(0x1000, 0x9c92LL);	/* FP operand can be cacheable on Cyrix FPU */
87 #else
88 	wrmsr(0x1000, 0x1c92LL);	/* Intel FPU */
89 #endif
90 	/* Enables 13MB and 0-640KB cache. */
91 	wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
92 #ifdef CPU_BLUELIGHTNING_3X
93 	wrmsr(0x1002, 0x04000000LL);	/* Enables triple-clock mode. */
94 #else
95 	wrmsr(0x1002, 0x03000000LL);	/* Enables double-clock mode. */
96 #endif
97 
98 	/* Enable caching in CR0. */
99 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
100 	invd();
101 	write_eflags(eflags);
102 }
103 
104 /*
105  * Cyrix 486SLC/DLC/SR/DR series
106  */
107 static void
108 init_486dlc(void)
109 {
110 	u_long	eflags;
111 	u_char	ccr0;
112 
113 	eflags = read_eflags();
114 	disable_intr();
115 	invd();
116 
117 	ccr0 = read_cyrix_reg(CCR0);
118 #ifndef CYRIX_CACHE_WORKS
119 	ccr0 |= CCR0_NC1 | CCR0_BARB;
120 	write_cyrix_reg(CCR0, ccr0);
121 	invd();
122 #else
123 	ccr0 &= ~CCR0_NC0;
124 #ifndef CYRIX_CACHE_REALLY_WORKS
125 	ccr0 |= CCR0_NC1 | CCR0_BARB;
126 #else
127 	ccr0 |= CCR0_NC1;
128 #endif
129 #ifdef CPU_DIRECT_MAPPED_CACHE
130 	ccr0 |= CCR0_CO;			/* Direct mapped mode. */
131 #endif
132 	write_cyrix_reg(CCR0, ccr0);
133 
134 	/* Clear non-cacheable region. */
135 	write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
136 	write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
137 	write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
138 	write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
139 
140 	write_cyrix_reg(0, 0);	/* dummy write */
141 
142 	/* Enable caching in CR0. */
143 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
144 	invd();
145 #endif /* !CYRIX_CACHE_WORKS */
146 	write_eflags(eflags);
147 }
148 
149 
150 /*
151  * Cyrix 486S/DX series
152  */
153 static void
154 init_cy486dx(void)
155 {
156 	u_long	eflags;
157 	u_char	ccr2;
158 
159 	eflags = read_eflags();
160 	disable_intr();
161 	invd();
162 
163 	ccr2 = read_cyrix_reg(CCR2);
164 #ifdef CPU_SUSP_HLT
165 	ccr2 |= CCR2_SUSP_HLT;
166 #endif
167 	write_cyrix_reg(CCR2, ccr2);
168 	write_eflags(eflags);
169 }
170 
171 
172 /*
173  * Cyrix 5x86
174  */
175 static void
176 init_5x86(void)
177 {
178 	u_long	eflags;
179 	u_char	ccr2, ccr3, ccr4, pcr0;
180 
181 	eflags = read_eflags();
182 	disable_intr();
183 
184 	load_cr0(rcr0() | CR0_CD | CR0_NW);
185 	wbinvd();
186 
187 	(void)read_cyrix_reg(CCR3);		/* dummy */
188 
189 	/* Initialize CCR2. */
190 	ccr2 = read_cyrix_reg(CCR2);
191 	ccr2 |= CCR2_WB;
192 #ifdef CPU_SUSP_HLT
193 	ccr2 |= CCR2_SUSP_HLT;
194 #else
195 	ccr2 &= ~CCR2_SUSP_HLT;
196 #endif
197 	ccr2 |= CCR2_WT1;
198 	write_cyrix_reg(CCR2, ccr2);
199 
200 	/* Initialize CCR4. */
201 	ccr3 = read_cyrix_reg(CCR3);
202 	write_cyrix_reg(CCR3, CCR3_MAPEN0);
203 
204 	ccr4 = read_cyrix_reg(CCR4);
205 	ccr4 |= CCR4_DTE;
206 	ccr4 |= CCR4_MEM;
207 #ifdef CPU_FASTER_5X86_FPU
208 	ccr4 |= CCR4_FASTFPE;
209 #else
210 	ccr4 &= ~CCR4_FASTFPE;
211 #endif
212 	ccr4 &= ~CCR4_IOMASK;
213 	/********************************************************************
214 	 * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
215 	 * should be 0 for errata fix.
216 	 ********************************************************************/
217 #ifdef CPU_IORT
218 	ccr4 |= CPU_IORT & CCR4_IOMASK;
219 #endif
220 	write_cyrix_reg(CCR4, ccr4);
221 
222 	/* Initialize PCR0. */
223 	/****************************************************************
224 	 * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
225 	 * BTB_EN might make your system unstable.
226 	 ****************************************************************/
227 	pcr0 = read_cyrix_reg(PCR0);
228 #ifdef CPU_RSTK_EN
229 	pcr0 |= PCR0_RSTK;
230 #else
231 	pcr0 &= ~PCR0_RSTK;
232 #endif
233 #ifdef CPU_BTB_EN
234 	pcr0 |= PCR0_BTB;
235 #else
236 	pcr0 &= ~PCR0_BTB;
237 #endif
238 #ifdef CPU_LOOP_EN
239 	pcr0 |= PCR0_LOOP;
240 #else
241 	pcr0 &= ~PCR0_LOOP;
242 #endif
243 
244 	/****************************************************************
245 	 * WARNING: if you use a memory mapped I/O device, don't use
246 	 * DISABLE_5X86_LSSER option, which may reorder memory mapped
247 	 * I/O access.
248 	 * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
249 	 ****************************************************************/
250 #ifdef CPU_DISABLE_5X86_LSSER
251 	pcr0 &= ~PCR0_LSSER;
252 #else
253 	pcr0 |= PCR0_LSSER;
254 #endif
255 	write_cyrix_reg(PCR0, pcr0);
256 
257 	/* Restore CCR3. */
258 	write_cyrix_reg(CCR3, ccr3);
259 
260 	(void)read_cyrix_reg(0x80);		/* dummy */
261 
262 	/* Unlock NW bit in CR0. */
263 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
264 	load_cr0((rcr0() & ~CR0_CD) | CR0_NW);	/* CD = 0, NW = 1 */
265 	/* Lock NW bit in CR0. */
266 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
267 
268 	write_eflags(eflags);
269 }
270 
271 #ifdef CPU_I486_ON_386
272 /*
273  * There are i486 based upgrade products for i386 machines.
274  * In this case, BIOS doesn't enables CPU cache.
275  */
276 void
277 init_i486_on_386(void)
278 {
279 	u_long	eflags;
280 
281 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
282 	need_post_dma_flush = 1;
283 #endif
284 
285 	eflags = read_eflags();
286 	disable_intr();
287 
288 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0, NW = 0 */
289 
290 	write_eflags(eflags);
291 }
292 #endif
293 
294 /*
295  * Cyrix 6x86
296  *
297  * XXX - What should I do here?  Please let me know.
298  */
299 static void
300 init_6x86(void)
301 {
302 	u_long	eflags;
303 	u_char	ccr3, ccr4;
304 
305 	eflags = read_eflags();
306 	disable_intr();
307 
308 	load_cr0(rcr0() | CR0_CD | CR0_NW);
309 	wbinvd();
310 
311 	/* Initialize CCR0. */
312 	write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
313 
314 	/* Initialize CCR1. */
315 #ifdef CPU_CYRIX_NO_LOCK
316 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
317 #else
318 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
319 #endif
320 
321 	/* Initialize CCR2. */
322 #ifdef CPU_SUSP_HLT
323 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
324 #else
325 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
326 #endif
327 
328 	ccr3 = read_cyrix_reg(CCR3);
329 	write_cyrix_reg(CCR3, CCR3_MAPEN0);
330 
331 	/* Initialize CCR4. */
332 	ccr4 = read_cyrix_reg(CCR4);
333 	ccr4 |= CCR4_DTE;
334 	ccr4 &= ~CCR4_IOMASK;
335 #ifdef CPU_IORT
336 	write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
337 #else
338 	write_cyrix_reg(CCR4, ccr4 | 7);
339 #endif
340 
341 	/* Initialize CCR5. */
342 #ifdef CPU_WT_ALLOC
343 	write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
344 #endif
345 
346 	/* Restore CCR3. */
347 	write_cyrix_reg(CCR3, ccr3);
348 
349 	/* Unlock NW bit in CR0. */
350 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
351 
352 	/*
353 	 * Earlier revision of the 6x86 CPU could crash the system if
354 	 * L1 cache is in write-back mode.
355 	 */
356 	if ((cyrix_did & 0xff00) > 0x1600)
357 		load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
358 	else {
359 		/* Revision 2.6 and lower. */
360 #ifdef CYRIX_CACHE_REALLY_WORKS
361 		load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
362 #else
363 		load_cr0((rcr0() & ~CR0_CD) | CR0_NW);	/* CD = 0 and NW = 1 */
364 #endif
365 	}
366 
367 	/* Lock NW bit in CR0. */
368 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
369 
370 	write_eflags(eflags);
371 }
372 #endif /* I486_CPU */
373 
374 #ifdef I686_CPU
375 /*
376  * Cyrix 6x86MX (code-named M2)
377  *
378  * XXX - What should I do here?  Please let me know.
379  */
380 static void
381 init_6x86MX(void)
382 {
383 	u_long	eflags;
384 	u_char	ccr3, ccr4;
385 
386 	eflags = read_eflags();
387 	disable_intr();
388 
389 	load_cr0(rcr0() | CR0_CD | CR0_NW);
390 	wbinvd();
391 
392 	/* Initialize CCR0. */
393 	write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
394 
395 	/* Initialize CCR1. */
396 #ifdef CPU_CYRIX_NO_LOCK
397 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
398 #else
399 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
400 #endif
401 
402 	/* Initialize CCR2. */
403 #ifdef CPU_SUSP_HLT
404 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
405 #else
406 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
407 #endif
408 
409 	ccr3 = read_cyrix_reg(CCR3);
410 	write_cyrix_reg(CCR3, CCR3_MAPEN0);
411 
412 	/* Initialize CCR4. */
413 	ccr4 = read_cyrix_reg(CCR4);
414 	ccr4 &= ~CCR4_IOMASK;
415 #ifdef CPU_IORT
416 	write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
417 #else
418 	write_cyrix_reg(CCR4, ccr4 | 7);
419 #endif
420 
421 	/* Initialize CCR5. */
422 #ifdef CPU_WT_ALLOC
423 	write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
424 #endif
425 
426 	/* Restore CCR3. */
427 	write_cyrix_reg(CCR3, ccr3);
428 
429 	/* Unlock NW bit in CR0. */
430 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
431 
432 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
433 
434 	/* Lock NW bit in CR0. */
435 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
436 
437 	write_eflags(eflags);
438 }
439 
440 static void
441 init_ppro(void)
442 {
443 #ifndef SMP
444 	u_int64_t	apicbase;
445 
446 	/*
447 	 * Local APIC should be diabled in UP kernel.
448 	 */
449 	apicbase = rdmsr(0x1b);
450 	apicbase &= ~0x800LL;
451 	wrmsr(0x1b, apicbase);
452 #endif
453 }
454 
455 /*
456  * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
457  * L2 cache).
458  */
459 void
460 init_mendocino(void)
461 {
462 #ifdef CPU_PPRO2CELERON
463 	u_long	eflags;
464 	u_int64_t	bbl_cr_ctl3;
465 
466 	eflags = read_eflags();
467 	disable_intr();
468 
469 	load_cr0(rcr0() | CR0_CD | CR0_NW);
470 	wbinvd();
471 
472 	bbl_cr_ctl3 = rdmsr(0x11e);
473 
474 	/* If the L2 cache is configured, do nothing. */
475 	if (!(bbl_cr_ctl3 & 1)) {
476 		bbl_cr_ctl3 = 0x134052bLL;
477 
478 		/* Set L2 Cache Latency (Default: 5). */
479 #ifdef	CPU_CELERON_L2_LATENCY
480 #if CPU_L2_LATENCY > 15
481 #error invalid CPU_L2_LATENCY.
482 #endif
483 		bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
484 #else
485 		bbl_cr_ctl3 |= 5 << 1;
486 #endif
487 		wrmsr(0x11e, bbl_cr_ctl3);
488 	}
489 
490 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
491 	write_eflags(eflags);
492 #endif /* CPU_PPRO2CELERON */
493 }
494 
495 #endif /* I686_CPU */
496 
497 void
498 initializecpu(void)
499 {
500 
501 	switch (cpu) {
502 #ifdef I486_CPU
503 	case CPU_BLUE:
504 		init_bluelightning();
505 		break;
506 	case CPU_486DLC:
507 		init_486dlc();
508 		break;
509 	case CPU_CY486DX:
510 		init_cy486dx();
511 		break;
512 	case CPU_M1SC:
513 		init_5x86();
514 		break;
515 #ifdef CPU_I486_ON_386
516 	case CPU_486:
517 		init_i486_on_386();
518 		break;
519 #endif
520 	case CPU_M1:
521 		init_6x86();
522 		break;
523 #endif /* I486_CPU */
524 #ifdef I686_CPU
525 	case CPU_M2:
526 		init_6x86MX();
527 		break;
528 	case CPU_686:
529 		if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
530 			switch (cpu_id & 0xff0) {
531 			case 0x610:
532 				init_ppro();
533 				break;
534 			case 0x660:
535 				init_mendocino();
536 				break;
537 			}
538 		}
539 		break;
540 #endif
541 	default:
542 		break;
543 	}
544 
545 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
546 	/*
547 	 * OS should flush L1 cahce by itself because no PC-98 supports
548 	 * non-Intel CPUs.  Use wbinvd instruction before DMA transfer
549 	 * when need_pre_dma_flush = 1, use invd instruction after DMA
550 	 * transfer when need_post_dma_flush = 1.  If your CPU upgrade
551 	 * product support hardware cache control, you can add
552 	 * UPGRADE_CPU_HW_CACHE option in your kernel configuration file.
553 	 * This option elminate unneeded cache flush instruction.
554 	 */
555 	if (strcmp(cpu_vendor, "CyrixInstead") == 0) {
556 		switch (cpu) {
557 #ifdef I486_CPU
558 		case CPU_486DLC:
559 			need_post_dma_flush = 1;
560 			break;
561 		case CPU_M1SC:
562 			need_pre_dma_flush = 1;
563 			break;
564 #endif
565 		default:
566 			break;
567 		}
568 	} else if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
569 		switch (cpu_id & 0xFF0) {
570 		case 0x470:		/* Enhanced Am486DX2 WB */
571 		case 0x490:		/* Enhanced Am486DX4 WB */
572 		case 0x4F0:		/* Am5x86 WB */
573 			need_pre_dma_flush = 1;
574 			break;
575 		}
576 	} else if (strcmp(cpu_vendor, "IBM") == 0) {
577 		need_post_dma_flush = 1;
578 	} else {
579 #ifdef CPU_I486_ON_386
580 		need_pre_dma_flush = 1;
581 #endif
582 	}
583 #endif /* PC98 && !UPGRADE_CPU_HW_CACHE */
584 }
585 
586 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
587 /*
588  * Enable write allocate feature of AMD processors.
589  * Following two functions require the Maxmem variable being set.
590  */
591 void
592 enable_K5_wt_alloc(void)
593 {
594 	u_int64_t	msr;
595 
596 	/*
597 	 * Write allocate is supported only on models 1, 2, and 3, with
598 	 * a stepping of 4 or greater.
599 	 */
600 	if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
601 		disable_intr();
602 		msr = rdmsr(0x83);		/* HWCR */
603 		wrmsr(0x83, msr & !(0x10));
604 
605 		/*
606 		 * We have to tell the chip where the top of memory is,
607 		 * since video cards could have frame bufferes there,
608 		 * memory-mapped I/O could be there, etc.
609 		 */
610 		if(Maxmem > 0)
611 		  msr = Maxmem / 16;
612 		else
613 		  msr = 0;
614 		msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
615 #ifdef PC98
616 		if (!(inb(0x43b) & 4)) {
617 			wrmsr(0x86, 0x0ff00f0);
618 			msr |= AMD_WT_ALLOC_PRE;
619 		}
620 #else
621 		/*
622 		 * There is no way to know wheter 15-16M hole exists or not.
623 		 * Therefore, we disable write allocate for this range.
624 		 */
625 			wrmsr(0x86, 0x0ff00f0);
626 			msr |= AMD_WT_ALLOC_PRE;
627 #endif
628 		wrmsr(0x85, msr);
629 
630 		msr=rdmsr(0x83);
631 		wrmsr(0x83, msr|0x10); /* enable write allocate */
632 
633 		enable_intr();
634 	}
635 }
636 
637 void
638 enable_K6_wt_alloc(void)
639 {
640 	quad_t	size;
641 	u_int64_t	whcr;
642 	u_long	eflags;
643 
644 	eflags = read_eflags();
645 	disable_intr();
646 	wbinvd();
647 
648 #ifdef CPU_DISABLE_CACHE
649 	/*
650 	 * Certain K6-2 box becomes unstable when write allocation is
651 	 * enabled.
652 	 */
653 	/*
654 	 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
655 	 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
656 	 * All other bits in TR12 have no effect on the processer's operation.
657 	 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
658 	 * on the AMD-K6.
659 	 */
660 	wrmsr(0x0000000e, (u_int64_t)0x0008);
661 #endif
662 	/* Don't assume that memory size is aligned with 4M. */
663 	if (Maxmem > 0)
664 	  size = ((Maxmem >> 8) + 3) >> 2;
665 	else
666 	  size = 0;
667 
668 	/* Limit is 508M bytes. */
669 	if (size > 0x7f)
670 		size = 0x7f;
671 	whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
672 
673 #if defined(PC98) || defined(NO_MEMORY_HOLE)
674 	if (whcr & (0x7fLL << 1)) {
675 #ifdef PC98
676 		/*
677 		 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
678 		 * 15-16M range.
679 		 */
680 		if (!(inb(0x43b) & 4))
681 			whcr &= ~0x0001LL;
682 		else
683 #endif
684 			whcr |=  0x0001LL;
685 	}
686 #else
687 	/*
688 	 * There is no way to know wheter 15-16M hole exists or not.
689 	 * Therefore, we disable write allocate for this range.
690 	 */
691 	whcr &= ~0x0001LL;
692 #endif
693 	wrmsr(0x0c0000082, whcr);
694 
695 	write_eflags(eflags);
696 	enable_intr();
697 }
698 
699 void
700 enable_K6_2_wt_alloc(void)
701 {
702 	quad_t	size;
703 	u_int64_t	whcr;
704 	u_long	eflags;
705 
706 	eflags = read_eflags();
707 	disable_intr();
708 	wbinvd();
709 
710 #ifdef CPU_DISABLE_CACHE
711 	/*
712 	 * Certain K6-2 box becomes unstable when write allocation is
713 	 * enabled.
714 	 */
715 	/*
716 	 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
717 	 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
718 	 * All other bits in TR12 have no effect on the processer's operation.
719 	 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
720 	 * on the AMD-K6.
721 	 */
722 	wrmsr(0x0000000e, (u_int64_t)0x0008);
723 #endif
724 	/* Don't assume that memory size is aligned with 4M. */
725 	if (Maxmem > 0)
726 	  size = ((Maxmem >> 8) + 3) >> 2;
727 	else
728 	  size = 0;
729 
730 	/* Limit is 4092M bytes. */
731 	if (size > 0x3fff)
732 		size = 0x3ff;
733 	whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
734 
735 #if defined(PC98) || defined(NO_MEMORY_HOLE)
736 	if (whcr & (0x3ffLL << 22)) {
737 #ifdef PC98
738 		/*
739 		 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
740 		 * 15-16M range.
741 		 */
742 		if (!(inb(0x43b) & 4))
743 			whcr &= ~(1LL << 16);
744 		else
745 #endif
746 			whcr |=  1LL << 16;
747 	}
748 #else
749 	/*
750 	 * There is no way to know wheter 15-16M hole exists or not.
751 	 * Therefore, we disable write allocate for this range.
752 	 */
753 	whcr &= ~(1LL << 16);
754 #endif
755 	wrmsr(0x0c0000082, whcr);
756 
757 	write_eflags(eflags);
758 	enable_intr();
759 }
760 #endif /* I585_CPU && CPU_WT_ALLOC */
761 
762 #include "opt_ddb.h"
763 #ifdef DDB
764 #include <ddb/ddb.h>
765 
766 DB_SHOW_COMMAND(cyrixreg, cyrixreg)
767 {
768 	u_long	eflags;
769 	u_int	cr0;
770 	u_char	ccr1, ccr2, ccr3;
771 	u_char	ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
772 
773 	cr0 = rcr0();
774 	if (strcmp(cpu_vendor,"CyrixInstead") == 0) {
775 		eflags = read_eflags();
776 		disable_intr();
777 
778 
779 		if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
780 			ccr0 = read_cyrix_reg(CCR0);
781 		}
782 		ccr1 = read_cyrix_reg(CCR1);
783 		ccr2 = read_cyrix_reg(CCR2);
784 		ccr3 = read_cyrix_reg(CCR3);
785 		if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
786 			write_cyrix_reg(CCR3, CCR3_MAPEN0);
787 			ccr4 = read_cyrix_reg(CCR4);
788 			if ((cpu == CPU_M1) || (cpu == CPU_M2))
789 				ccr5 = read_cyrix_reg(CCR5);
790 			else
791 				pcr0 = read_cyrix_reg(PCR0);
792 			write_cyrix_reg(CCR3, ccr3);		/* Restore CCR3. */
793 		}
794 		write_eflags(eflags);
795 
796 		if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
797 			printf("CCR0=%x, ", (u_int)ccr0);
798 
799 		printf("CCR1=%x, CCR2=%x, CCR3=%x",
800 			(u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
801 		if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
802 			printf(", CCR4=%x, ", (u_int)ccr4);
803 			if (cpu == CPU_M1SC)
804 				printf("PCR0=%x\n", pcr0);
805 			else
806 				printf("CCR5=%x\n", ccr5);
807 		}
808 	}
809 	printf("CR0=%x\n", cr0);
810 }
811 #endif /* DDB */
812