xref: /freebsd/sys/amd64/amd64/initcpu.c (revision daf1cffce2e07931f27c6c6998652e90df6ba87e)
1 /*
2  * Copyright (c) KATO Takenori, 1997, 1998.
3  *
4  * All rights reserved.  Unpublished rights reserved under the copyright
5  * laws of Japan.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer as
13  *    the first lines of this file unmodified.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 
32 #include "opt_cpu.h"
33 
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/systm.h>
37 
38 #include <machine/cputypes.h>
39 #include <machine/md_var.h>
40 #include <machine/specialreg.h>
41 
42 void initializecpu(void);
43 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
44 void	enable_K5_wt_alloc(void);
45 void	enable_K6_wt_alloc(void);
46 void	enable_K6_2_wt_alloc(void);
47 #endif
48 
49 #ifdef I486_CPU
50 static void init_5x86(void);
51 static void init_bluelightning(void);
52 static void init_486dlc(void);
53 static void init_cy486dx(void);
54 #ifdef CPU_I486_ON_386
55 static void init_i486_on_386(void);
56 #endif
57 static void init_6x86(void);
58 #endif /* I486_CPU */
59 
60 #ifdef I686_CPU
61 static void	init_6x86MX(void);
62 static void	init_ppro(void);
63 #endif
64 
65 #ifdef I486_CPU
66 /*
67  * IBM Blue Lightning
68  */
69 static void
70 init_bluelightning(void)
71 {
72 	u_long	eflags;
73 
74 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
75 	need_post_dma_flush = 1;
76 #endif
77 
78 	eflags = read_eflags();
79 	disable_intr();
80 
81 	load_cr0(rcr0() | CR0_CD | CR0_NW);
82 	invd();
83 
84 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
85 	wrmsr(0x1000, 0x9c92LL);	/* FP operand can be cacheable on Cyrix FPU */
86 #else
87 	wrmsr(0x1000, 0x1c92LL);	/* Intel FPU */
88 #endif
89 	/* Enables 13MB and 0-640KB cache. */
90 	wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
91 #ifdef CPU_BLUELIGHTNING_3X
92 	wrmsr(0x1002, 0x04000000LL);	/* Enables triple-clock mode. */
93 #else
94 	wrmsr(0x1002, 0x03000000LL);	/* Enables double-clock mode. */
95 #endif
96 
97 	/* Enable caching in CR0. */
98 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
99 	invd();
100 	write_eflags(eflags);
101 }
102 
103 /*
104  * Cyrix 486SLC/DLC/SR/DR series
105  */
106 static void
107 init_486dlc(void)
108 {
109 	u_long	eflags;
110 	u_char	ccr0;
111 
112 	eflags = read_eflags();
113 	disable_intr();
114 	invd();
115 
116 	ccr0 = read_cyrix_reg(CCR0);
117 #ifndef CYRIX_CACHE_WORKS
118 	ccr0 |= CCR0_NC1 | CCR0_BARB;
119 	write_cyrix_reg(CCR0, ccr0);
120 	invd();
121 #else
122 	ccr0 &= ~CCR0_NC0;
123 #ifndef CYRIX_CACHE_REALLY_WORKS
124 	ccr0 |= CCR0_NC1 | CCR0_BARB;
125 #else
126 	ccr0 |= CCR0_NC1;
127 #endif
128 #ifdef CPU_DIRECT_MAPPED_CACHE
129 	ccr0 |= CCR0_CO;			/* Direct mapped mode. */
130 #endif
131 	write_cyrix_reg(CCR0, ccr0);
132 
133 	/* Clear non-cacheable region. */
134 	write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
135 	write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
136 	write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
137 	write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
138 
139 	write_cyrix_reg(0, 0);	/* dummy write */
140 
141 	/* Enable caching in CR0. */
142 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
143 	invd();
144 #endif /* !CYRIX_CACHE_WORKS */
145 	write_eflags(eflags);
146 }
147 
148 
149 /*
150  * Cyrix 486S/DX series
151  */
152 static void
153 init_cy486dx(void)
154 {
155 	u_long	eflags;
156 	u_char	ccr2;
157 
158 	eflags = read_eflags();
159 	disable_intr();
160 	invd();
161 
162 	ccr2 = read_cyrix_reg(CCR2);
163 #ifdef CPU_SUSP_HLT
164 	ccr2 |= CCR2_SUSP_HLT;
165 #endif
166 	write_cyrix_reg(CCR2, ccr2);
167 	write_eflags(eflags);
168 }
169 
170 
171 /*
172  * Cyrix 5x86
173  */
174 static void
175 init_5x86(void)
176 {
177 	u_long	eflags;
178 	u_char	ccr2, ccr3, ccr4, pcr0;
179 
180 	eflags = read_eflags();
181 	disable_intr();
182 
183 	load_cr0(rcr0() | CR0_CD | CR0_NW);
184 	wbinvd();
185 
186 	(void)read_cyrix_reg(CCR3);		/* dummy */
187 
188 	/* Initialize CCR2. */
189 	ccr2 = read_cyrix_reg(CCR2);
190 	ccr2 |= CCR2_WB;
191 #ifdef CPU_SUSP_HLT
192 	ccr2 |= CCR2_SUSP_HLT;
193 #else
194 	ccr2 &= ~CCR2_SUSP_HLT;
195 #endif
196 	ccr2 |= CCR2_WT1;
197 	write_cyrix_reg(CCR2, ccr2);
198 
199 	/* Initialize CCR4. */
200 	ccr3 = read_cyrix_reg(CCR3);
201 	write_cyrix_reg(CCR3, CCR3_MAPEN0);
202 
203 	ccr4 = read_cyrix_reg(CCR4);
204 	ccr4 |= CCR4_DTE;
205 	ccr4 |= CCR4_MEM;
206 #ifdef CPU_FASTER_5X86_FPU
207 	ccr4 |= CCR4_FASTFPE;
208 #else
209 	ccr4 &= ~CCR4_FASTFPE;
210 #endif
211 	ccr4 &= ~CCR4_IOMASK;
212 	/********************************************************************
213 	 * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
214 	 * should be 0 for errata fix.
215 	 ********************************************************************/
216 #ifdef CPU_IORT
217 	ccr4 |= CPU_IORT & CCR4_IOMASK;
218 #endif
219 	write_cyrix_reg(CCR4, ccr4);
220 
221 	/* Initialize PCR0. */
222 	/****************************************************************
223 	 * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
224 	 * BTB_EN might make your system unstable.
225 	 ****************************************************************/
226 	pcr0 = read_cyrix_reg(PCR0);
227 #ifdef CPU_RSTK_EN
228 	pcr0 |= PCR0_RSTK;
229 #else
230 	pcr0 &= ~PCR0_RSTK;
231 #endif
232 #ifdef CPU_BTB_EN
233 	pcr0 |= PCR0_BTB;
234 #else
235 	pcr0 &= ~PCR0_BTB;
236 #endif
237 #ifdef CPU_LOOP_EN
238 	pcr0 |= PCR0_LOOP;
239 #else
240 	pcr0 &= ~PCR0_LOOP;
241 #endif
242 
243 	/****************************************************************
244 	 * WARNING: if you use a memory mapped I/O device, don't use
245 	 * DISABLE_5X86_LSSER option, which may reorder memory mapped
246 	 * I/O access.
247 	 * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
248 	 ****************************************************************/
249 #ifdef CPU_DISABLE_5X86_LSSER
250 	pcr0 &= ~PCR0_LSSER;
251 #else
252 	pcr0 |= PCR0_LSSER;
253 #endif
254 	write_cyrix_reg(PCR0, pcr0);
255 
256 	/* Restore CCR3. */
257 	write_cyrix_reg(CCR3, ccr3);
258 
259 	(void)read_cyrix_reg(0x80);		/* dummy */
260 
261 	/* Unlock NW bit in CR0. */
262 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
263 	load_cr0((rcr0() & ~CR0_CD) | CR0_NW);	/* CD = 0, NW = 1 */
264 	/* Lock NW bit in CR0. */
265 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
266 
267 	write_eflags(eflags);
268 }
269 
270 #ifdef CPU_I486_ON_386
271 /*
272  * There are i486 based upgrade products for i386 machines.
273  * In this case, BIOS doesn't enables CPU cache.
274  */
275 void
276 init_i486_on_386(void)
277 {
278 	u_long	eflags;
279 
280 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
281 	need_post_dma_flush = 1;
282 #endif
283 
284 	eflags = read_eflags();
285 	disable_intr();
286 
287 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0, NW = 0 */
288 
289 	write_eflags(eflags);
290 }
291 #endif
292 
293 /*
294  * Cyrix 6x86
295  *
296  * XXX - What should I do here?  Please let me know.
297  */
298 static void
299 init_6x86(void)
300 {
301 	u_long	eflags;
302 	u_char	ccr3, ccr4;
303 
304 	eflags = read_eflags();
305 	disable_intr();
306 
307 	load_cr0(rcr0() | CR0_CD | CR0_NW);
308 	wbinvd();
309 
310 	/* Initialize CCR0. */
311 	write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
312 
313 	/* Initialize CCR1. */
314 #ifdef CPU_CYRIX_NO_LOCK
315 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
316 #else
317 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
318 #endif
319 
320 	/* Initialize CCR2. */
321 #ifdef CPU_SUSP_HLT
322 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
323 #else
324 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
325 #endif
326 
327 	ccr3 = read_cyrix_reg(CCR3);
328 	write_cyrix_reg(CCR3, CCR3_MAPEN0);
329 
330 	/* Initialize CCR4. */
331 	ccr4 = read_cyrix_reg(CCR4);
332 	ccr4 |= CCR4_DTE;
333 	ccr4 &= ~CCR4_IOMASK;
334 #ifdef CPU_IORT
335 	write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
336 #else
337 	write_cyrix_reg(CCR4, ccr4 | 7);
338 #endif
339 
340 	/* Initialize CCR5. */
341 #ifdef CPU_WT_ALLOC
342 	write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
343 #endif
344 
345 	/* Restore CCR3. */
346 	write_cyrix_reg(CCR3, ccr3);
347 
348 	/* Unlock NW bit in CR0. */
349 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
350 
351 	/*
352 	 * Earlier revision of the 6x86 CPU could crash the system if
353 	 * L1 cache is in write-back mode.
354 	 */
355 	if ((cyrix_did & 0xff00) > 0x1600)
356 		load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
357 	else {
358 		/* Revision 2.6 and lower. */
359 #ifdef CYRIX_CACHE_REALLY_WORKS
360 		load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
361 #else
362 		load_cr0((rcr0() & ~CR0_CD) | CR0_NW);	/* CD = 0 and NW = 1 */
363 #endif
364 	}
365 
366 	/* Lock NW bit in CR0. */
367 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
368 
369 	write_eflags(eflags);
370 }
371 #endif /* I486_CPU */
372 
373 #ifdef I686_CPU
374 /*
375  * Cyrix 6x86MX (code-named M2)
376  *
377  * XXX - What should I do here?  Please let me know.
378  */
379 static void
380 init_6x86MX(void)
381 {
382 	u_long	eflags;
383 	u_char	ccr3, ccr4;
384 
385 	eflags = read_eflags();
386 	disable_intr();
387 
388 	load_cr0(rcr0() | CR0_CD | CR0_NW);
389 	wbinvd();
390 
391 	/* Initialize CCR0. */
392 	write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
393 
394 	/* Initialize CCR1. */
395 #ifdef CPU_CYRIX_NO_LOCK
396 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
397 #else
398 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
399 #endif
400 
401 	/* Initialize CCR2. */
402 #ifdef CPU_SUSP_HLT
403 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
404 #else
405 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
406 #endif
407 
408 	ccr3 = read_cyrix_reg(CCR3);
409 	write_cyrix_reg(CCR3, CCR3_MAPEN0);
410 
411 	/* Initialize CCR4. */
412 	ccr4 = read_cyrix_reg(CCR4);
413 	ccr4 &= ~CCR4_IOMASK;
414 #ifdef CPU_IORT
415 	write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
416 #else
417 	write_cyrix_reg(CCR4, ccr4 | 7);
418 #endif
419 
420 	/* Initialize CCR5. */
421 #ifdef CPU_WT_ALLOC
422 	write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
423 #endif
424 
425 	/* Restore CCR3. */
426 	write_cyrix_reg(CCR3, ccr3);
427 
428 	/* Unlock NW bit in CR0. */
429 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
430 
431 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
432 
433 	/* Lock NW bit in CR0. */
434 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
435 
436 	write_eflags(eflags);
437 }
438 
439 static void
440 init_ppro(void)
441 {
442 #ifndef SMP
443 	u_int64_t	apicbase;
444 
445 	/*
446 	 * Local APIC should be diabled in UP kernel.
447 	 */
448 	apicbase = rdmsr(0x1b);
449 	apicbase &= ~0x800LL;
450 	wrmsr(0x1b, apicbase);
451 #endif
452 }
453 #endif /* I686_CPU */
454 
455 void
456 initializecpu(void)
457 {
458 
459 	switch (cpu) {
460 #ifdef I486_CPU
461 	case CPU_BLUE:
462 		init_bluelightning();
463 		break;
464 	case CPU_486DLC:
465 		init_486dlc();
466 		break;
467 	case CPU_CY486DX:
468 		init_cy486dx();
469 		break;
470 	case CPU_M1SC:
471 		init_5x86();
472 		break;
473 #ifdef CPU_I486_ON_386
474 	case CPU_486:
475 		init_i486_on_386();
476 		break;
477 #endif
478 	case CPU_M1:
479 		init_6x86();
480 		break;
481 #endif /* I486_CPU */
482 #ifdef I686_CPU
483 	case CPU_M2:
484 		init_6x86MX();
485 		break;
486 	case CPU_686:
487 		if (strcmp(cpu_vendor, "GenuineIntel") == 0 &&
488 			(cpu_id & 0xff0) == 0x610)
489 			init_ppro();
490 		break;
491 #endif
492 	default:
493 		break;
494 	}
495 
496 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE)
497 	/*
498 	 * OS should flush L1 cahce by itself because no PC-98 supports
499 	 * non-Intel CPUs.  Use wbinvd instruction before DMA transfer
500 	 * when need_pre_dma_flush = 1, use invd instruction after DMA
501 	 * transfer when need_post_dma_flush = 1.  If your CPU upgrade
502 	 * product support hardware cache control, you can add
503 	 * UPGRADE_CPU_HW_CACHE option in your kernel configuration file.
504 	 * This option elminate unneeded cache flush instruction.
505 	 */
506 	if (strcmp(cpu_vendor, "CyrixInstead") == 0) {
507 		switch (cpu) {
508 #ifdef I486_CPU
509 		case CPU_486DLC:
510 			need_post_dma_flush = 1;
511 			break;
512 		case CPU_M1SC:
513 			need_pre_dma_flush = 1;
514 			break;
515 #endif
516 		default:
517 			break;
518 		}
519 	} else if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
520 		switch (cpu_id & 0xFF0) {
521 		case 0x470:		/* Enhanced Am486DX2 WB */
522 		case 0x490:		/* Enhanced Am486DX4 WB */
523 		case 0x4F0:		/* Am5x86 WB */
524 			need_pre_dma_flush = 1;
525 			break;
526 		}
527 	} else if (strcmp(cpu_vendor, "IBM") == 0) {
528 		need_post_dma_flush = 1;
529 	} else {
530 #ifdef CPU_I486_ON_386
531 		need_pre_dma_flush = 1;
532 #endif
533 	}
534 #endif /* PC98 && !UPGRADE_CPU_HW_CACHE */
535 }
536 
537 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
538 /*
539  * Enable write allocate feature of AMD processors.
540  * Following two functions require the Maxmem variable being set.
541  */
542 void
543 enable_K5_wt_alloc(void)
544 {
545 	u_int64_t	msr;
546 
547 	/*
548 	 * Write allocate is supported only on models 1, 2, and 3, with
549 	 * a stepping of 4 or greater.
550 	 */
551 	if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
552 		disable_intr();
553 		msr = rdmsr(0x83);		/* HWCR */
554 		wrmsr(0x83, msr & !(0x10));
555 
556 		/*
557 		 * We have to tell the chip where the top of memory is,
558 		 * since video cards could have frame bufferes there,
559 		 * memory-mapped I/O could be there, etc.
560 		 */
561 		if(Maxmem > 0)
562 		  msr = Maxmem / 16;
563 		else
564 		  msr = 0;
565 		msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
566 #ifdef PC98
567 		if (!(inb(0x43b) & 4)) {
568 			wrmsr(0x86, 0x0ff00f0);
569 			msr |= AMD_WT_ALLOC_PRE;
570 		}
571 #else
572 		/*
573 		 * There is no way to know wheter 15-16M hole exists or not.
574 		 * Therefore, we disable write allocate for this range.
575 		 */
576 			wrmsr(0x86, 0x0ff00f0);
577 			msr |= AMD_WT_ALLOC_PRE;
578 #endif
579 		wrmsr(0x85, msr);
580 
581 		msr=rdmsr(0x83);
582 		wrmsr(0x83, msr|0x10); /* enable write allocate */
583 
584 		enable_intr();
585 	}
586 }
587 
588 void
589 enable_K6_wt_alloc(void)
590 {
591 	quad_t	size;
592 	u_int64_t	whcr;
593 	u_long	eflags;
594 
595 	eflags = read_eflags();
596 	disable_intr();
597 	wbinvd();
598 
599 #ifdef CPU_DISABLE_CACHE
600 	/*
601 	 * Certain K6-2 box becomes unstable when write allocation is
602 	 * enabled.
603 	 */
604 	/*
605 	 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
606 	 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
607 	 * All other bits in TR12 have no effect on the processer's operation.
608 	 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
609 	 * on the AMD-K6.
610 	 */
611 	wrmsr(0x0000000e, (u_int64_t)0x0008);
612 #endif
613 	/* Don't assume that memory size is aligned with 4M. */
614 	if (Maxmem > 0)
615 	  size = ((Maxmem >> 8) + 3) >> 2;
616 	else
617 	  size = 0;
618 
619 	/* Limit is 508M bytes. */
620 	if (size > 0x7f)
621 		size = 0x7f;
622 	whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
623 
624 #if defined(PC98) || defined(NO_MEMORY_HOLE)
625 	if (whcr & (0x7fLL << 1)) {
626 #ifdef PC98
627 		/*
628 		 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
629 		 * 15-16M range.
630 		 */
631 		if (!(inb(0x43b) & 4))
632 			whcr &= ~0x0001LL;
633 		else
634 #endif
635 			whcr |=  0x0001LL;
636 	}
637 #else
638 	/*
639 	 * There is no way to know wheter 15-16M hole exists or not.
640 	 * Therefore, we disable write allocate for this range.
641 	 */
642 	whcr &= ~0x0001LL;
643 #endif
644 	wrmsr(0x0c0000082, whcr);
645 
646 	write_eflags(eflags);
647 	enable_intr();
648 }
649 
650 void
651 enable_K6_2_wt_alloc(void)
652 {
653 	quad_t	size;
654 	u_int64_t	whcr;
655 	u_long	eflags;
656 
657 	eflags = read_eflags();
658 	disable_intr();
659 	wbinvd();
660 
661 #ifdef CPU_DISABLE_CACHE
662 	/*
663 	 * Certain K6-2 box becomes unstable when write allocation is
664 	 * enabled.
665 	 */
666 	/*
667 	 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
668 	 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
669 	 * All other bits in TR12 have no effect on the processer's operation.
670 	 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
671 	 * on the AMD-K6.
672 	 */
673 	wrmsr(0x0000000e, (u_int64_t)0x0008);
674 #endif
675 	/* Don't assume that memory size is aligned with 4M. */
676 	if (Maxmem > 0)
677 	  size = ((Maxmem >> 8) + 3) >> 2;
678 	else
679 	  size = 0;
680 
681 	/* Limit is 4092M bytes. */
682 	if (size > 0x3fff)
683 		size = 0x3ff;
684 	whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
685 
686 #if defined(PC98) || defined(NO_MEMORY_HOLE)
687 	if (whcr & (0x3ffLL << 22)) {
688 #ifdef PC98
689 		/*
690 		 * If bit 2 of port 0x43b is 0, disable wrte allocate for the
691 		 * 15-16M range.
692 		 */
693 		if (!(inb(0x43b) & 4))
694 			whcr &= ~(1LL << 16);
695 		else
696 #endif
697 			whcr |=  1LL << 16;
698 	}
699 #else
700 	/*
701 	 * There is no way to know wheter 15-16M hole exists or not.
702 	 * Therefore, we disable write allocate for this range.
703 	 */
704 	whcr &= ~(1LL << 16);
705 #endif
706 	wrmsr(0x0c0000082, whcr);
707 
708 	write_eflags(eflags);
709 	enable_intr();
710 }
711 #endif /* I585_CPU && CPU_WT_ALLOC */
712 
713 #include "opt_ddb.h"
714 #ifdef DDB
715 #include <ddb/ddb.h>
716 
717 DB_SHOW_COMMAND(cyrixreg, cyrixreg)
718 {
719 	u_long	eflags;
720 	u_int	cr0;
721 	u_char	ccr1, ccr2, ccr3;
722 	u_char	ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
723 
724 	cr0 = rcr0();
725 	if (strcmp(cpu_vendor,"CyrixInstead") == 0) {
726 		eflags = read_eflags();
727 		disable_intr();
728 
729 
730 		if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
731 			ccr0 = read_cyrix_reg(CCR0);
732 		}
733 		ccr1 = read_cyrix_reg(CCR1);
734 		ccr2 = read_cyrix_reg(CCR2);
735 		ccr3 = read_cyrix_reg(CCR3);
736 		if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
737 			write_cyrix_reg(CCR3, CCR3_MAPEN0);
738 			ccr4 = read_cyrix_reg(CCR4);
739 			if ((cpu == CPU_M1) || (cpu == CPU_M2))
740 				ccr5 = read_cyrix_reg(CCR5);
741 			else
742 				pcr0 = read_cyrix_reg(PCR0);
743 			write_cyrix_reg(CCR3, ccr3);		/* Restore CCR3. */
744 		}
745 		write_eflags(eflags);
746 
747 		if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
748 			printf("CCR0=%x, ", (u_int)ccr0);
749 
750 		printf("CCR1=%x, CCR2=%x, CCR3=%x",
751 			(u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
752 		if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
753 			printf(", CCR4=%x, ", (u_int)ccr4);
754 			if (cpu == CPU_M1SC)
755 				printf("PCR0=%x\n", pcr0);
756 			else
757 				printf("CCR5=%x\n", ccr5);
758 		}
759 	}
760 	printf("CR0=%x\n", cr0);
761 }
762 #endif /* DDB */
763