xref: /linux/arch/x86/kernel/cpu/mtrr/cyrix.c (revision 643d1f7fe3aa12c8bdea6fa5b4ba874ff6dd601d)
1 #include <linux/init.h>
2 #include <linux/mm.h>
3 #include <asm/mtrr.h>
4 #include <asm/msr.h>
5 #include <asm/io.h>
6 #include <asm/processor-cyrix.h>
7 #include <asm/processor-flags.h>
8 #include "mtrr.h"
9 
10 int arr3_protected;
11 
12 static void
13 cyrix_get_arr(unsigned int reg, unsigned long *base,
14 	      unsigned long *size, mtrr_type * type)
15 {
16 	unsigned long flags;
17 	unsigned char arr, ccr3, rcr, shift;
18 
19 	arr = CX86_ARR_BASE + (reg << 1) + reg;	/* avoid multiplication by 3 */
20 
21 	/* Save flags and disable interrupts */
22 	local_irq_save(flags);
23 
24 	ccr3 = getCx86(CX86_CCR3);
25 	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);	/* enable MAPEN */
26 	((unsigned char *) base)[3] = getCx86(arr);
27 	((unsigned char *) base)[2] = getCx86(arr + 1);
28 	((unsigned char *) base)[1] = getCx86(arr + 2);
29 	rcr = getCx86(CX86_RCR_BASE + reg);
30 	setCx86(CX86_CCR3, ccr3);	/* disable MAPEN */
31 
32 	/* Enable interrupts if it was enabled previously */
33 	local_irq_restore(flags);
34 	shift = ((unsigned char *) base)[1] & 0x0f;
35 	*base >>= PAGE_SHIFT;
36 
37 	/* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
38 	 * Note: shift==0xf means 4G, this is unsupported.
39 	 */
40 	if (shift)
41 		*size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1);
42 	else
43 		*size = 0;
44 
45 	/* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
46 	if (reg < 7) {
47 		switch (rcr) {
48 		case 1:
49 			*type = MTRR_TYPE_UNCACHABLE;
50 			break;
51 		case 8:
52 			*type = MTRR_TYPE_WRBACK;
53 			break;
54 		case 9:
55 			*type = MTRR_TYPE_WRCOMB;
56 			break;
57 		case 24:
58 		default:
59 			*type = MTRR_TYPE_WRTHROUGH;
60 			break;
61 		}
62 	} else {
63 		switch (rcr) {
64 		case 0:
65 			*type = MTRR_TYPE_UNCACHABLE;
66 			break;
67 		case 8:
68 			*type = MTRR_TYPE_WRCOMB;
69 			break;
70 		case 9:
71 			*type = MTRR_TYPE_WRBACK;
72 			break;
73 		case 25:
74 		default:
75 			*type = MTRR_TYPE_WRTHROUGH;
76 			break;
77 		}
78 	}
79 }
80 
81 static int
82 cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
83 /*  [SUMMARY] Get a free ARR.
84     <base> The starting (base) address of the region.
85     <size> The size (in bytes) of the region.
86     [RETURNS] The index of the region on success, else -1 on error.
87 */
88 {
89 	int i;
90 	mtrr_type ltype;
91 	unsigned long lbase, lsize;
92 
93 	switch (replace_reg) {
94 	case 7:
95 		if (size < 0x40)
96 			break;
97 	case 6:
98 	case 5:
99 	case 4:
100 		return replace_reg;
101 	case 3:
102 		if (arr3_protected)
103 			break;
104 	case 2:
105 	case 1:
106 	case 0:
107 		return replace_reg;
108 	}
109 	/* If we are to set up a region >32M then look at ARR7 immediately */
110 	if (size > 0x2000) {
111 		cyrix_get_arr(7, &lbase, &lsize, &ltype);
112 		if (lsize == 0)
113 			return 7;
114 		/*  Else try ARR0-ARR6 first  */
115 	} else {
116 		for (i = 0; i < 7; i++) {
117 			cyrix_get_arr(i, &lbase, &lsize, &ltype);
118 			if ((i == 3) && arr3_protected)
119 				continue;
120 			if (lsize == 0)
121 				return i;
122 		}
123 		/* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
124 		cyrix_get_arr(i, &lbase, &lsize, &ltype);
125 		if ((lsize == 0) && (size >= 0x40))
126 			return i;
127 	}
128 	return -ENOSPC;
129 }
130 
131 static u32 cr4 = 0;
132 static u32 ccr3;
133 
134 static void prepare_set(void)
135 {
136 	u32 cr0;
137 
138 	/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
139 	if ( cpu_has_pge ) {
140 		cr4 = read_cr4();
141 		write_cr4(cr4 & ~X86_CR4_PGE);
142 	}
143 
144 	/*  Disable and flush caches. Note that wbinvd flushes the TLBs as
145 	    a side-effect  */
146 	cr0 = read_cr0() | X86_CR0_CD;
147 	wbinvd();
148 	write_cr0(cr0);
149 	wbinvd();
150 
151 	/* Cyrix ARRs - everything else was excluded at the top */
152 	ccr3 = getCx86(CX86_CCR3);
153 
154 	/* Cyrix ARRs - everything else was excluded at the top */
155 	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
156 
157 }
158 
159 static void post_set(void)
160 {
161 	/*  Flush caches and TLBs  */
162 	wbinvd();
163 
164 	/* Cyrix ARRs - everything else was excluded at the top */
165 	setCx86(CX86_CCR3, ccr3);
166 
167 	/*  Enable caches  */
168 	write_cr0(read_cr0() & 0xbfffffff);
169 
170 	/*  Restore value of CR4  */
171 	if ( cpu_has_pge )
172 		write_cr4(cr4);
173 }
174 
175 static void cyrix_set_arr(unsigned int reg, unsigned long base,
176 			  unsigned long size, mtrr_type type)
177 {
178 	unsigned char arr, arr_type, arr_size;
179 
180 	arr = CX86_ARR_BASE + (reg << 1) + reg;	/* avoid multiplication by 3 */
181 
182 	/* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
183 	if (reg >= 7)
184 		size >>= 6;
185 
186 	size &= 0x7fff;		/* make sure arr_size <= 14 */
187 	for (arr_size = 0; size; arr_size++, size >>= 1) ;
188 
189 	if (reg < 7) {
190 		switch (type) {
191 		case MTRR_TYPE_UNCACHABLE:
192 			arr_type = 1;
193 			break;
194 		case MTRR_TYPE_WRCOMB:
195 			arr_type = 9;
196 			break;
197 		case MTRR_TYPE_WRTHROUGH:
198 			arr_type = 24;
199 			break;
200 		default:
201 			arr_type = 8;
202 			break;
203 		}
204 	} else {
205 		switch (type) {
206 		case MTRR_TYPE_UNCACHABLE:
207 			arr_type = 0;
208 			break;
209 		case MTRR_TYPE_WRCOMB:
210 			arr_type = 8;
211 			break;
212 		case MTRR_TYPE_WRTHROUGH:
213 			arr_type = 25;
214 			break;
215 		default:
216 			arr_type = 9;
217 			break;
218 		}
219 	}
220 
221 	prepare_set();
222 
223 	base <<= PAGE_SHIFT;
224 	setCx86(arr, ((unsigned char *) &base)[3]);
225 	setCx86(arr + 1, ((unsigned char *) &base)[2]);
226 	setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size);
227 	setCx86(CX86_RCR_BASE + reg, arr_type);
228 
229 	post_set();
230 }
231 
232 typedef struct {
233 	unsigned long base;
234 	unsigned long size;
235 	mtrr_type type;
236 } arr_state_t;
237 
238 static arr_state_t arr_state[8] = {
239 	{0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL},
240 	{0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}
241 };
242 
243 static unsigned char ccr_state[7] = { 0, 0, 0, 0, 0, 0, 0 };
244 
245 static void cyrix_set_all(void)
246 {
247 	int i;
248 
249 	prepare_set();
250 
251 	/* the CCRs are not contiguous */
252 	for (i = 0; i < 4; i++)
253 		setCx86(CX86_CCR0 + i, ccr_state[i]);
254 	for (; i < 7; i++)
255 		setCx86(CX86_CCR4 + i, ccr_state[i]);
256 	for (i = 0; i < 8; i++)
257 		cyrix_set_arr(i, arr_state[i].base,
258 			      arr_state[i].size, arr_state[i].type);
259 
260 	post_set();
261 }
262 
263 #if 0
264 /*
265  * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
266  * with the SMM (System Management Mode) mode. So we need the following:
267  * Check whether SMI_LOCK (CCR3 bit 0) is set
268  *   if it is set, write a warning message: ARR3 cannot be changed!
269  *     (it cannot be changed until the next processor reset)
270  *   if it is reset, then we can change it, set all the needed bits:
271  *   - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
272  *   - disable access to SMM memory (CCR1 bit 2 reset)
273  *   - disable SMM mode (CCR1 bit 1 reset)
274  *   - disable write protection of ARR3 (CCR6 bit 1 reset)
275  *   - (maybe) disable ARR3
276  * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
277  */
278 static void __init
279 cyrix_arr_init(void)
280 {
281 	struct set_mtrr_context ctxt;
282 	unsigned char ccr[7];
283 	int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
284 #ifdef CONFIG_SMP
285 	int i;
286 #endif
287 
288 	/* flush cache and enable MAPEN */
289 	set_mtrr_prepare_save(&ctxt);
290 	set_mtrr_cache_disable(&ctxt);
291 
292 	/* Save all CCRs locally */
293 	ccr[0] = getCx86(CX86_CCR0);
294 	ccr[1] = getCx86(CX86_CCR1);
295 	ccr[2] = getCx86(CX86_CCR2);
296 	ccr[3] = ctxt.ccr3;
297 	ccr[4] = getCx86(CX86_CCR4);
298 	ccr[5] = getCx86(CX86_CCR5);
299 	ccr[6] = getCx86(CX86_CCR6);
300 
301 	if (ccr[3] & 1) {
302 		ccrc[3] = 1;
303 		arr3_protected = 1;
304 	} else {
305 		/* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
306 		 * access to SMM memory through ARR3 (bit 7).
307 		 */
308 		if (ccr[1] & 0x80) {
309 			ccr[1] &= 0x7f;
310 			ccrc[1] |= 0x80;
311 		}
312 		if (ccr[1] & 0x04) {
313 			ccr[1] &= 0xfb;
314 			ccrc[1] |= 0x04;
315 		}
316 		if (ccr[1] & 0x02) {
317 			ccr[1] &= 0xfd;
318 			ccrc[1] |= 0x02;
319 		}
320 		arr3_protected = 0;
321 		if (ccr[6] & 0x02) {
322 			ccr[6] &= 0xfd;
323 			ccrc[6] = 1;	/* Disable write protection of ARR3 */
324 			setCx86(CX86_CCR6, ccr[6]);
325 		}
326 		/* Disable ARR3. This is safe now that we disabled SMM. */
327 		/* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
328 	}
329 	/* If we changed CCR1 in memory, change it in the processor, too. */
330 	if (ccrc[1])
331 		setCx86(CX86_CCR1, ccr[1]);
332 
333 	/* Enable ARR usage by the processor */
334 	if (!(ccr[5] & 0x20)) {
335 		ccr[5] |= 0x20;
336 		ccrc[5] = 1;
337 		setCx86(CX86_CCR5, ccr[5]);
338 	}
339 #ifdef CONFIG_SMP
340 	for (i = 0; i < 7; i++)
341 		ccr_state[i] = ccr[i];
342 	for (i = 0; i < 8; i++)
343 		cyrix_get_arr(i,
344 			      &arr_state[i].base, &arr_state[i].size,
345 			      &arr_state[i].type);
346 #endif
347 
348 	set_mtrr_done(&ctxt);	/* flush cache and disable MAPEN */
349 
350 	if (ccrc[5])
351 		printk(KERN_INFO "mtrr: ARR usage was not enabled, enabled manually\n");
352 	if (ccrc[3])
353 		printk(KERN_INFO "mtrr: ARR3 cannot be changed\n");
354 /*
355     if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
356     if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
357     if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
358 */
359 	if (ccrc[6])
360 		printk(KERN_INFO "mtrr: ARR3 was write protected, unprotected\n");
361 }
362 #endif
363 
364 static struct mtrr_ops cyrix_mtrr_ops = {
365 	.vendor            = X86_VENDOR_CYRIX,
366 //	.init              = cyrix_arr_init,
367 	.set_all	   = cyrix_set_all,
368 	.set               = cyrix_set_arr,
369 	.get               = cyrix_get_arr,
370 	.get_free_region   = cyrix_get_free_region,
371 	.validate_add_page = generic_validate_add_page,
372 	.have_wrcomb       = positive_have_wrcomb,
373 };
374 
375 int __init cyrix_init_mtrr(void)
376 {
377 	set_mtrr_ops(&cyrix_mtrr_ops);
378 	return 0;
379 }
380 
381 //arch_initcall(cyrix_init_mtrr);
382