xref: /linux/arch/x86/kernel/cpu/mtrr/generic.c (revision 2d2ee8de5f6d26ef2942e0b449aa68d9236d5777)
12ec1df41SThomas Gleixner /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
22ec1df41SThomas Gleixner    because MTRRs can span upto 40 bits (36bits on most modern x86) */
32ec1df41SThomas Gleixner #include <linux/init.h>
42ec1df41SThomas Gleixner #include <linux/slab.h>
52ec1df41SThomas Gleixner #include <linux/mm.h>
62ec1df41SThomas Gleixner #include <linux/module.h>
72ec1df41SThomas Gleixner #include <asm/io.h>
82ec1df41SThomas Gleixner #include <asm/mtrr.h>
92ec1df41SThomas Gleixner #include <asm/msr.h>
102ec1df41SThomas Gleixner #include <asm/system.h>
112ec1df41SThomas Gleixner #include <asm/cpufeature.h>
122ec1df41SThomas Gleixner #include <asm/tlbflush.h>
132ec1df41SThomas Gleixner #include "mtrr.h"
142ec1df41SThomas Gleixner 
152ec1df41SThomas Gleixner struct mtrr_state {
162ec1df41SThomas Gleixner 	struct mtrr_var_range *var_ranges;
172ec1df41SThomas Gleixner 	mtrr_type fixed_ranges[NUM_FIXED_RANGES];
182ec1df41SThomas Gleixner 	unsigned char enabled;
192ec1df41SThomas Gleixner 	unsigned char have_fixed;
202ec1df41SThomas Gleixner 	mtrr_type def_type;
212ec1df41SThomas Gleixner };
222ec1df41SThomas Gleixner 
232ec1df41SThomas Gleixner struct fixed_range_block {
242ec1df41SThomas Gleixner 	int base_msr; /* start address of an MTRR block */
252ec1df41SThomas Gleixner 	int ranges;   /* number of MTRRs in this block  */
262ec1df41SThomas Gleixner };
272ec1df41SThomas Gleixner 
282ec1df41SThomas Gleixner static struct fixed_range_block fixed_range_blocks[] = {
292ec1df41SThomas Gleixner 	{ MTRRfix64K_00000_MSR, 1 }, /* one  64k MTRR  */
302ec1df41SThomas Gleixner 	{ MTRRfix16K_80000_MSR, 2 }, /* two  16k MTRRs */
312ec1df41SThomas Gleixner 	{ MTRRfix4K_C0000_MSR,  8 }, /* eight 4k MTRRs */
322ec1df41SThomas Gleixner 	{}
332ec1df41SThomas Gleixner };
342ec1df41SThomas Gleixner 
352ec1df41SThomas Gleixner static unsigned long smp_changes_mask;
362ec1df41SThomas Gleixner static struct mtrr_state mtrr_state = {};
372ec1df41SThomas Gleixner 
382ec1df41SThomas Gleixner #undef MODULE_PARAM_PREFIX
392ec1df41SThomas Gleixner #define MODULE_PARAM_PREFIX "mtrr."
402ec1df41SThomas Gleixner 
412ec1df41SThomas Gleixner static int mtrr_show;
422ec1df41SThomas Gleixner module_param_named(show, mtrr_show, bool, 0);
432ec1df41SThomas Gleixner 
442ec1df41SThomas Gleixner /*  Get the MSR pair relating to a var range  */
452ec1df41SThomas Gleixner static void
462ec1df41SThomas Gleixner get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
472ec1df41SThomas Gleixner {
482ec1df41SThomas Gleixner 	rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
492ec1df41SThomas Gleixner 	rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
502ec1df41SThomas Gleixner }
512ec1df41SThomas Gleixner 
522ec1df41SThomas Gleixner static void
532ec1df41SThomas Gleixner get_fixed_ranges(mtrr_type * frs)
542ec1df41SThomas Gleixner {
552ec1df41SThomas Gleixner 	unsigned int *p = (unsigned int *) frs;
562ec1df41SThomas Gleixner 	int i;
572ec1df41SThomas Gleixner 
582ec1df41SThomas Gleixner 	rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
592ec1df41SThomas Gleixner 
602ec1df41SThomas Gleixner 	for (i = 0; i < 2; i++)
612ec1df41SThomas Gleixner 		rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
622ec1df41SThomas Gleixner 	for (i = 0; i < 8; i++)
632ec1df41SThomas Gleixner 		rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
642ec1df41SThomas Gleixner }
652ec1df41SThomas Gleixner 
662ec1df41SThomas Gleixner void mtrr_save_fixed_ranges(void *info)
672ec1df41SThomas Gleixner {
682ec1df41SThomas Gleixner 	if (cpu_has_mtrr)
692ec1df41SThomas Gleixner 		get_fixed_ranges(mtrr_state.fixed_ranges);
702ec1df41SThomas Gleixner }
712ec1df41SThomas Gleixner 
722ec1df41SThomas Gleixner static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
732ec1df41SThomas Gleixner {
742ec1df41SThomas Gleixner 	unsigned i;
752ec1df41SThomas Gleixner 
762ec1df41SThomas Gleixner 	for (i = 0; i < 8; ++i, ++types, base += step)
772ec1df41SThomas Gleixner 		printk(KERN_INFO "MTRR %05X-%05X %s\n",
782ec1df41SThomas Gleixner 			base, base + step - 1, mtrr_attrib_to_str(*types));
792ec1df41SThomas Gleixner }
802ec1df41SThomas Gleixner 
812ec1df41SThomas Gleixner /*  Grab all of the MTRR state for this CPU into *state  */
822ec1df41SThomas Gleixner void __init get_mtrr_state(void)
832ec1df41SThomas Gleixner {
842ec1df41SThomas Gleixner 	unsigned int i;
852ec1df41SThomas Gleixner 	struct mtrr_var_range *vrs;
862ec1df41SThomas Gleixner 	unsigned lo, dummy;
872ec1df41SThomas Gleixner 
882ec1df41SThomas Gleixner 	if (!mtrr_state.var_ranges) {
892ec1df41SThomas Gleixner 		mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range),
902ec1df41SThomas Gleixner 						GFP_KERNEL);
912ec1df41SThomas Gleixner 		if (!mtrr_state.var_ranges)
922ec1df41SThomas Gleixner 			return;
932ec1df41SThomas Gleixner 	}
942ec1df41SThomas Gleixner 	vrs = mtrr_state.var_ranges;
952ec1df41SThomas Gleixner 
962ec1df41SThomas Gleixner 	rdmsr(MTRRcap_MSR, lo, dummy);
972ec1df41SThomas Gleixner 	mtrr_state.have_fixed = (lo >> 8) & 1;
982ec1df41SThomas Gleixner 
992ec1df41SThomas Gleixner 	for (i = 0; i < num_var_ranges; i++)
1002ec1df41SThomas Gleixner 		get_mtrr_var_range(i, &vrs[i]);
1012ec1df41SThomas Gleixner 	if (mtrr_state.have_fixed)
1022ec1df41SThomas Gleixner 		get_fixed_ranges(mtrr_state.fixed_ranges);
1032ec1df41SThomas Gleixner 
1042ec1df41SThomas Gleixner 	rdmsr(MTRRdefType_MSR, lo, dummy);
1052ec1df41SThomas Gleixner 	mtrr_state.def_type = (lo & 0xff);
1062ec1df41SThomas Gleixner 	mtrr_state.enabled = (lo & 0xc00) >> 10;
1072ec1df41SThomas Gleixner 
1082ec1df41SThomas Gleixner 	if (mtrr_show) {
1092ec1df41SThomas Gleixner 		int high_width;
1102ec1df41SThomas Gleixner 
1112ec1df41SThomas Gleixner 		printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
1122ec1df41SThomas Gleixner 		if (mtrr_state.have_fixed) {
1132ec1df41SThomas Gleixner 			printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
1142ec1df41SThomas Gleixner 			       mtrr_state.enabled & 1 ? "en" : "dis");
1152ec1df41SThomas Gleixner 			print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
1162ec1df41SThomas Gleixner 			for (i = 0; i < 2; ++i)
1172ec1df41SThomas Gleixner 				print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
1182ec1df41SThomas Gleixner 			for (i = 0; i < 8; ++i)
1192ec1df41SThomas Gleixner 				print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
1202ec1df41SThomas Gleixner 		}
1212ec1df41SThomas Gleixner 		printk(KERN_INFO "MTRR variable ranges %sabled:\n",
1222ec1df41SThomas Gleixner 		       mtrr_state.enabled & 2 ? "en" : "dis");
1232ec1df41SThomas Gleixner 		high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
1242ec1df41SThomas Gleixner 		for (i = 0; i < num_var_ranges; ++i) {
1252ec1df41SThomas Gleixner 			if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
1262ec1df41SThomas Gleixner 				printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
1272ec1df41SThomas Gleixner 				       i,
1282ec1df41SThomas Gleixner 				       high_width,
1292ec1df41SThomas Gleixner 				       mtrr_state.var_ranges[i].base_hi,
1302ec1df41SThomas Gleixner 				       mtrr_state.var_ranges[i].base_lo >> 12,
1312ec1df41SThomas Gleixner 				       high_width,
1322ec1df41SThomas Gleixner 				       mtrr_state.var_ranges[i].mask_hi,
1332ec1df41SThomas Gleixner 				       mtrr_state.var_ranges[i].mask_lo >> 12,
1342ec1df41SThomas Gleixner 				       mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
1352ec1df41SThomas Gleixner 			else
1362ec1df41SThomas Gleixner 				printk(KERN_INFO "MTRR %u disabled\n", i);
1372ec1df41SThomas Gleixner 		}
1382ec1df41SThomas Gleixner 	}
1392ec1df41SThomas Gleixner }
1402ec1df41SThomas Gleixner 
1412ec1df41SThomas Gleixner /*  Some BIOS's are fucked and don't set all MTRRs the same!  */
1422ec1df41SThomas Gleixner void __init mtrr_state_warn(void)
1432ec1df41SThomas Gleixner {
1442ec1df41SThomas Gleixner 	unsigned long mask = smp_changes_mask;
1452ec1df41SThomas Gleixner 
1462ec1df41SThomas Gleixner 	if (!mask)
1472ec1df41SThomas Gleixner 		return;
1482ec1df41SThomas Gleixner 	if (mask & MTRR_CHANGE_MASK_FIXED)
1492ec1df41SThomas Gleixner 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
1502ec1df41SThomas Gleixner 	if (mask & MTRR_CHANGE_MASK_VARIABLE)
1512ec1df41SThomas Gleixner 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
1522ec1df41SThomas Gleixner 	if (mask & MTRR_CHANGE_MASK_DEFTYPE)
1532ec1df41SThomas Gleixner 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
1542ec1df41SThomas Gleixner 	printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
1552ec1df41SThomas Gleixner 	printk(KERN_INFO "mtrr: corrected configuration.\n");
1562ec1df41SThomas Gleixner }
1572ec1df41SThomas Gleixner 
1582ec1df41SThomas Gleixner /* Doesn't attempt to pass an error out to MTRR users
1592ec1df41SThomas Gleixner    because it's quite complicated in some cases and probably not
1602ec1df41SThomas Gleixner    worth it because the best error handling is to ignore it. */
1612ec1df41SThomas Gleixner void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
1622ec1df41SThomas Gleixner {
1632ec1df41SThomas Gleixner 	if (wrmsr_safe(msr, a, b) < 0)
1642ec1df41SThomas Gleixner 		printk(KERN_ERR
1652ec1df41SThomas Gleixner 			"MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
1662ec1df41SThomas Gleixner 			smp_processor_id(), msr, a, b);
1672ec1df41SThomas Gleixner }
1682ec1df41SThomas Gleixner 
1692ec1df41SThomas Gleixner /**
1702ec1df41SThomas Gleixner  * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs
1712ec1df41SThomas Gleixner  * see AMD publication no. 24593, chapter 3.2.1 for more information
1722ec1df41SThomas Gleixner  */
1732ec1df41SThomas Gleixner static inline void k8_enable_fixed_iorrs(void)
1742ec1df41SThomas Gleixner {
1752ec1df41SThomas Gleixner 	unsigned lo, hi;
1762ec1df41SThomas Gleixner 
1772ec1df41SThomas Gleixner 	rdmsr(MSR_K8_SYSCFG, lo, hi);
1782ec1df41SThomas Gleixner 	mtrr_wrmsr(MSR_K8_SYSCFG, lo
1792ec1df41SThomas Gleixner 				| K8_MTRRFIXRANGE_DRAM_ENABLE
1802ec1df41SThomas Gleixner 				| K8_MTRRFIXRANGE_DRAM_MODIFY, hi);
1812ec1df41SThomas Gleixner }
1822ec1df41SThomas Gleixner 
1832ec1df41SThomas Gleixner /**
1842ec1df41SThomas Gleixner  * Checks and updates an fixed-range MTRR if it differs from the value it
18527b46d76SSimon Arlott  * should have. If K8 extentions are wanted, update the K8 SYSCFG MSR also.
1862ec1df41SThomas Gleixner  * see AMD publication no. 24593, chapter 7.8.1, page 233 for more information
1872ec1df41SThomas Gleixner  * \param msr MSR address of the MTTR which should be checked and updated
1882ec1df41SThomas Gleixner  * \param changed pointer which indicates whether the MTRR needed to be changed
1892ec1df41SThomas Gleixner  * \param msrwords pointer to the MSR values which the MSR should have
1902ec1df41SThomas Gleixner  */
191*2d2ee8deSPaul Jimenez static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
1922ec1df41SThomas Gleixner {
1932ec1df41SThomas Gleixner 	unsigned lo, hi;
1942ec1df41SThomas Gleixner 
1952ec1df41SThomas Gleixner 	rdmsr(msr, lo, hi);
1962ec1df41SThomas Gleixner 
1972ec1df41SThomas Gleixner 	if (lo != msrwords[0] || hi != msrwords[1]) {
1982ec1df41SThomas Gleixner 		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
1992ec1df41SThomas Gleixner 		    boot_cpu_data.x86 == 15 &&
2002ec1df41SThomas Gleixner 		    ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK))
2012ec1df41SThomas Gleixner 			k8_enable_fixed_iorrs();
2022ec1df41SThomas Gleixner 		mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
203*2d2ee8deSPaul Jimenez 		*changed = true;
2042ec1df41SThomas Gleixner 	}
2052ec1df41SThomas Gleixner }
2062ec1df41SThomas Gleixner 
2072ec1df41SThomas Gleixner int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
2082ec1df41SThomas Gleixner /*  [SUMMARY] Get a free MTRR.
2092ec1df41SThomas Gleixner     <base> The starting (base) address of the region.
2102ec1df41SThomas Gleixner     <size> The size (in bytes) of the region.
2112ec1df41SThomas Gleixner     [RETURNS] The index of the region on success, else -1 on error.
2122ec1df41SThomas Gleixner */
2132ec1df41SThomas Gleixner {
2142ec1df41SThomas Gleixner 	int i, max;
2152ec1df41SThomas Gleixner 	mtrr_type ltype;
2162ec1df41SThomas Gleixner 	unsigned long lbase, lsize;
2172ec1df41SThomas Gleixner 
2182ec1df41SThomas Gleixner 	max = num_var_ranges;
2192ec1df41SThomas Gleixner 	if (replace_reg >= 0 && replace_reg < max)
2202ec1df41SThomas Gleixner 		return replace_reg;
2212ec1df41SThomas Gleixner 	for (i = 0; i < max; ++i) {
2222ec1df41SThomas Gleixner 		mtrr_if->get(i, &lbase, &lsize, &ltype);
2232ec1df41SThomas Gleixner 		if (lsize == 0)
2242ec1df41SThomas Gleixner 			return i;
2252ec1df41SThomas Gleixner 	}
2262ec1df41SThomas Gleixner 	return -ENOSPC;
2272ec1df41SThomas Gleixner }
2282ec1df41SThomas Gleixner 
2292ec1df41SThomas Gleixner static void generic_get_mtrr(unsigned int reg, unsigned long *base,
2302ec1df41SThomas Gleixner 			     unsigned long *size, mtrr_type *type)
2312ec1df41SThomas Gleixner {
2322ec1df41SThomas Gleixner 	unsigned int mask_lo, mask_hi, base_lo, base_hi;
2332ec1df41SThomas Gleixner 
2342ec1df41SThomas Gleixner 	rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
2352ec1df41SThomas Gleixner 	if ((mask_lo & 0x800) == 0) {
2362ec1df41SThomas Gleixner 		/*  Invalid (i.e. free) range  */
2372ec1df41SThomas Gleixner 		*base = 0;
2382ec1df41SThomas Gleixner 		*size = 0;
2392ec1df41SThomas Gleixner 		*type = 0;
2402ec1df41SThomas Gleixner 		return;
2412ec1df41SThomas Gleixner 	}
2422ec1df41SThomas Gleixner 
2432ec1df41SThomas Gleixner 	rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
2442ec1df41SThomas Gleixner 
2452ec1df41SThomas Gleixner 	/* Work out the shifted address mask. */
2462ec1df41SThomas Gleixner 	mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
2472ec1df41SThomas Gleixner 	    | mask_lo >> PAGE_SHIFT;
2482ec1df41SThomas Gleixner 
2492ec1df41SThomas Gleixner 	/* This works correctly if size is a power of two, i.e. a
2502ec1df41SThomas Gleixner 	   contiguous range. */
2512ec1df41SThomas Gleixner 	*size = -mask_lo;
2522ec1df41SThomas Gleixner 	*base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
2532ec1df41SThomas Gleixner 	*type = base_lo & 0xff;
2542ec1df41SThomas Gleixner }
2552ec1df41SThomas Gleixner 
2562ec1df41SThomas Gleixner /**
2572ec1df41SThomas Gleixner  * Checks and updates the fixed-range MTRRs if they differ from the saved set
2582ec1df41SThomas Gleixner  * \param frs pointer to fixed-range MTRR values, saved by get_fixed_ranges()
2592ec1df41SThomas Gleixner  */
2602ec1df41SThomas Gleixner static int set_fixed_ranges(mtrr_type * frs)
2612ec1df41SThomas Gleixner {
2622ec1df41SThomas Gleixner 	unsigned long long *saved = (unsigned long long *) frs;
263*2d2ee8deSPaul Jimenez 	bool changed = false;
2642ec1df41SThomas Gleixner 	int block=-1, range;
2652ec1df41SThomas Gleixner 
2662ec1df41SThomas Gleixner 	while (fixed_range_blocks[++block].ranges)
2672ec1df41SThomas Gleixner 	    for (range=0; range < fixed_range_blocks[block].ranges; range++)
2682ec1df41SThomas Gleixner 		set_fixed_range(fixed_range_blocks[block].base_msr + range,
2692ec1df41SThomas Gleixner 		    &changed, (unsigned int *) saved++);
2702ec1df41SThomas Gleixner 
2712ec1df41SThomas Gleixner 	return changed;
2722ec1df41SThomas Gleixner }
2732ec1df41SThomas Gleixner 
2742ec1df41SThomas Gleixner /*  Set the MSR pair relating to a var range. Returns TRUE if
2752ec1df41SThomas Gleixner     changes are made  */
276*2d2ee8deSPaul Jimenez static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
2772ec1df41SThomas Gleixner {
2782ec1df41SThomas Gleixner 	unsigned int lo, hi;
279*2d2ee8deSPaul Jimenez 	bool changed = false;
2802ec1df41SThomas Gleixner 
2812ec1df41SThomas Gleixner 	rdmsr(MTRRphysBase_MSR(index), lo, hi);
2822ec1df41SThomas Gleixner 	if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
2832ec1df41SThomas Gleixner 	    || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
2842ec1df41SThomas Gleixner 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
2852ec1df41SThomas Gleixner 		mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
286*2d2ee8deSPaul Jimenez 		changed = true;
2872ec1df41SThomas Gleixner 	}
2882ec1df41SThomas Gleixner 
2892ec1df41SThomas Gleixner 	rdmsr(MTRRphysMask_MSR(index), lo, hi);
2902ec1df41SThomas Gleixner 
2912ec1df41SThomas Gleixner 	if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
2922ec1df41SThomas Gleixner 	    || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
2932ec1df41SThomas Gleixner 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
2942ec1df41SThomas Gleixner 		mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
295*2d2ee8deSPaul Jimenez 		changed = true;
2962ec1df41SThomas Gleixner 	}
2972ec1df41SThomas Gleixner 	return changed;
2982ec1df41SThomas Gleixner }
2992ec1df41SThomas Gleixner 
3002ec1df41SThomas Gleixner static u32 deftype_lo, deftype_hi;
3012ec1df41SThomas Gleixner 
3022ec1df41SThomas Gleixner static unsigned long set_mtrr_state(void)
3032ec1df41SThomas Gleixner /*  [SUMMARY] Set the MTRR state for this CPU.
3042ec1df41SThomas Gleixner     <state> The MTRR state information to read.
3052ec1df41SThomas Gleixner     <ctxt> Some relevant CPU context.
3062ec1df41SThomas Gleixner     [NOTE] The CPU must already be in a safe state for MTRR changes.
3072ec1df41SThomas Gleixner     [RETURNS] 0 if no changes made, else a mask indication what was changed.
3082ec1df41SThomas Gleixner */
3092ec1df41SThomas Gleixner {
3102ec1df41SThomas Gleixner 	unsigned int i;
3112ec1df41SThomas Gleixner 	unsigned long change_mask = 0;
3122ec1df41SThomas Gleixner 
3132ec1df41SThomas Gleixner 	for (i = 0; i < num_var_ranges; i++)
3142ec1df41SThomas Gleixner 		if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
3152ec1df41SThomas Gleixner 			change_mask |= MTRR_CHANGE_MASK_VARIABLE;
3162ec1df41SThomas Gleixner 
3172ec1df41SThomas Gleixner 	if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
3182ec1df41SThomas Gleixner 		change_mask |= MTRR_CHANGE_MASK_FIXED;
3192ec1df41SThomas Gleixner 
3202ec1df41SThomas Gleixner 	/*  Set_mtrr_restore restores the old value of MTRRdefType,
3212ec1df41SThomas Gleixner 	   so to set it we fiddle with the saved value  */
3222ec1df41SThomas Gleixner 	if ((deftype_lo & 0xff) != mtrr_state.def_type
3232ec1df41SThomas Gleixner 	    || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
3242ec1df41SThomas Gleixner 		deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
3252ec1df41SThomas Gleixner 		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
3262ec1df41SThomas Gleixner 	}
3272ec1df41SThomas Gleixner 
3282ec1df41SThomas Gleixner 	return change_mask;
3292ec1df41SThomas Gleixner }
3302ec1df41SThomas Gleixner 
3312ec1df41SThomas Gleixner 
3322ec1df41SThomas Gleixner static unsigned long cr4 = 0;
3332ec1df41SThomas Gleixner static DEFINE_SPINLOCK(set_atomicity_lock);
3342ec1df41SThomas Gleixner 
3352ec1df41SThomas Gleixner /*
3362ec1df41SThomas Gleixner  * Since we are disabling the cache don't allow any interrupts - they
3372ec1df41SThomas Gleixner  * would run extremely slow and would only increase the pain.  The caller must
3382ec1df41SThomas Gleixner  * ensure that local interrupts are disabled and are reenabled after post_set()
3392ec1df41SThomas Gleixner  * has been called.
3402ec1df41SThomas Gleixner  */
3412ec1df41SThomas Gleixner 
3422ec1df41SThomas Gleixner static void prepare_set(void) __acquires(set_atomicity_lock)
3432ec1df41SThomas Gleixner {
3442ec1df41SThomas Gleixner 	unsigned long cr0;
3452ec1df41SThomas Gleixner 
3462ec1df41SThomas Gleixner 	/*  Note that this is not ideal, since the cache is only flushed/disabled
3472ec1df41SThomas Gleixner 	   for this CPU while the MTRRs are changed, but changing this requires
3482ec1df41SThomas Gleixner 	   more invasive changes to the way the kernel boots  */
3492ec1df41SThomas Gleixner 
3502ec1df41SThomas Gleixner 	spin_lock(&set_atomicity_lock);
3512ec1df41SThomas Gleixner 
3522ec1df41SThomas Gleixner 	/*  Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
3532ec1df41SThomas Gleixner 	cr0 = read_cr0() | 0x40000000;	/* set CD flag */
3542ec1df41SThomas Gleixner 	write_cr0(cr0);
3552ec1df41SThomas Gleixner 	wbinvd();
3562ec1df41SThomas Gleixner 
3572ec1df41SThomas Gleixner 	/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
3582ec1df41SThomas Gleixner 	if ( cpu_has_pge ) {
3592ec1df41SThomas Gleixner 		cr4 = read_cr4();
3602ec1df41SThomas Gleixner 		write_cr4(cr4 & ~X86_CR4_PGE);
3612ec1df41SThomas Gleixner 	}
3622ec1df41SThomas Gleixner 
3632ec1df41SThomas Gleixner 	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
3642ec1df41SThomas Gleixner 	__flush_tlb();
3652ec1df41SThomas Gleixner 
3662ec1df41SThomas Gleixner 	/*  Save MTRR state */
3672ec1df41SThomas Gleixner 	rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
3682ec1df41SThomas Gleixner 
3692ec1df41SThomas Gleixner 	/*  Disable MTRRs, and set the default type to uncached  */
3702ec1df41SThomas Gleixner 	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
3712ec1df41SThomas Gleixner }
3722ec1df41SThomas Gleixner 
3732ec1df41SThomas Gleixner static void post_set(void) __releases(set_atomicity_lock)
3742ec1df41SThomas Gleixner {
3752ec1df41SThomas Gleixner 	/*  Flush TLBs (no need to flush caches - they are disabled)  */
3762ec1df41SThomas Gleixner 	__flush_tlb();
3772ec1df41SThomas Gleixner 
3782ec1df41SThomas Gleixner 	/* Intel (P6) standard MTRRs */
3792ec1df41SThomas Gleixner 	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
3802ec1df41SThomas Gleixner 
3812ec1df41SThomas Gleixner 	/*  Enable caches  */
3822ec1df41SThomas Gleixner 	write_cr0(read_cr0() & 0xbfffffff);
3832ec1df41SThomas Gleixner 
3842ec1df41SThomas Gleixner 	/*  Restore value of CR4  */
3852ec1df41SThomas Gleixner 	if ( cpu_has_pge )
3862ec1df41SThomas Gleixner 		write_cr4(cr4);
3872ec1df41SThomas Gleixner 	spin_unlock(&set_atomicity_lock);
3882ec1df41SThomas Gleixner }
3892ec1df41SThomas Gleixner 
3902ec1df41SThomas Gleixner static void generic_set_all(void)
3912ec1df41SThomas Gleixner {
3922ec1df41SThomas Gleixner 	unsigned long mask, count;
3932ec1df41SThomas Gleixner 	unsigned long flags;
3942ec1df41SThomas Gleixner 
3952ec1df41SThomas Gleixner 	local_irq_save(flags);
3962ec1df41SThomas Gleixner 	prepare_set();
3972ec1df41SThomas Gleixner 
3982ec1df41SThomas Gleixner 	/* Actually set the state */
3992ec1df41SThomas Gleixner 	mask = set_mtrr_state();
4002ec1df41SThomas Gleixner 
4012ec1df41SThomas Gleixner 	post_set();
4022ec1df41SThomas Gleixner 	local_irq_restore(flags);
4032ec1df41SThomas Gleixner 
4042ec1df41SThomas Gleixner 	/*  Use the atomic bitops to update the global mask  */
4052ec1df41SThomas Gleixner 	for (count = 0; count < sizeof mask * 8; ++count) {
4062ec1df41SThomas Gleixner 		if (mask & 0x01)
4072ec1df41SThomas Gleixner 			set_bit(count, &smp_changes_mask);
4082ec1df41SThomas Gleixner 		mask >>= 1;
4092ec1df41SThomas Gleixner 	}
4102ec1df41SThomas Gleixner 
4112ec1df41SThomas Gleixner }
4122ec1df41SThomas Gleixner 
4132ec1df41SThomas Gleixner static void generic_set_mtrr(unsigned int reg, unsigned long base,
4142ec1df41SThomas Gleixner 			     unsigned long size, mtrr_type type)
4152ec1df41SThomas Gleixner /*  [SUMMARY] Set variable MTRR register on the local CPU.
4162ec1df41SThomas Gleixner     <reg> The register to set.
4172ec1df41SThomas Gleixner     <base> The base address of the region.
4182ec1df41SThomas Gleixner     <size> The size of the region. If this is 0 the region is disabled.
4192ec1df41SThomas Gleixner     <type> The type of the region.
4202ec1df41SThomas Gleixner     [RETURNS] Nothing.
4212ec1df41SThomas Gleixner */
4222ec1df41SThomas Gleixner {
4232ec1df41SThomas Gleixner 	unsigned long flags;
4242ec1df41SThomas Gleixner 	struct mtrr_var_range *vr;
4252ec1df41SThomas Gleixner 
4262ec1df41SThomas Gleixner 	vr = &mtrr_state.var_ranges[reg];
4272ec1df41SThomas Gleixner 
4282ec1df41SThomas Gleixner 	local_irq_save(flags);
4292ec1df41SThomas Gleixner 	prepare_set();
4302ec1df41SThomas Gleixner 
4312ec1df41SThomas Gleixner 	if (size == 0) {
4322ec1df41SThomas Gleixner 		/* The invalid bit is kept in the mask, so we simply clear the
4332ec1df41SThomas Gleixner 		   relevant mask register to disable a range. */
4342ec1df41SThomas Gleixner 		mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
4352ec1df41SThomas Gleixner 		memset(vr, 0, sizeof(struct mtrr_var_range));
4362ec1df41SThomas Gleixner 	} else {
4372ec1df41SThomas Gleixner 		vr->base_lo = base << PAGE_SHIFT | type;
4382ec1df41SThomas Gleixner 		vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
4392ec1df41SThomas Gleixner 		vr->mask_lo = -size << PAGE_SHIFT | 0x800;
4402ec1df41SThomas Gleixner 		vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
4412ec1df41SThomas Gleixner 
4422ec1df41SThomas Gleixner 		mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
4432ec1df41SThomas Gleixner 		mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
4442ec1df41SThomas Gleixner 	}
4452ec1df41SThomas Gleixner 
4462ec1df41SThomas Gleixner 	post_set();
4472ec1df41SThomas Gleixner 	local_irq_restore(flags);
4482ec1df41SThomas Gleixner }
4492ec1df41SThomas Gleixner 
4502ec1df41SThomas Gleixner int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
4512ec1df41SThomas Gleixner {
4522ec1df41SThomas Gleixner 	unsigned long lbase, last;
4532ec1df41SThomas Gleixner 
4542ec1df41SThomas Gleixner 	/*  For Intel PPro stepping <= 7, must be 4 MiB aligned
4552ec1df41SThomas Gleixner 	    and not touch 0x70000000->0x7003FFFF */
4562ec1df41SThomas Gleixner 	if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
4572ec1df41SThomas Gleixner 	    boot_cpu_data.x86_model == 1 &&
4582ec1df41SThomas Gleixner 	    boot_cpu_data.x86_mask <= 7) {
4592ec1df41SThomas Gleixner 		if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
4602ec1df41SThomas Gleixner 			printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
4612ec1df41SThomas Gleixner 			return -EINVAL;
4622ec1df41SThomas Gleixner 		}
4632ec1df41SThomas Gleixner 		if (!(base + size < 0x70000 || base > 0x7003F) &&
4642ec1df41SThomas Gleixner 		    (type == MTRR_TYPE_WRCOMB
4652ec1df41SThomas Gleixner 		     || type == MTRR_TYPE_WRBACK)) {
4662ec1df41SThomas Gleixner 			printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
4672ec1df41SThomas Gleixner 			return -EINVAL;
4682ec1df41SThomas Gleixner 		}
4692ec1df41SThomas Gleixner 	}
4702ec1df41SThomas Gleixner 
4712ec1df41SThomas Gleixner 	/*  Check upper bits of base and last are equal and lower bits are 0
4722ec1df41SThomas Gleixner 	    for base and 1 for last  */
4732ec1df41SThomas Gleixner 	last = base + size - 1;
4742ec1df41SThomas Gleixner 	for (lbase = base; !(lbase & 1) && (last & 1);
4752ec1df41SThomas Gleixner 	     lbase = lbase >> 1, last = last >> 1) ;
4762ec1df41SThomas Gleixner 	if (lbase != last) {
4772ec1df41SThomas Gleixner 		printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
4782ec1df41SThomas Gleixner 		       base, size);
4792ec1df41SThomas Gleixner 		return -EINVAL;
4802ec1df41SThomas Gleixner 	}
4812ec1df41SThomas Gleixner 	return 0;
4822ec1df41SThomas Gleixner }
4832ec1df41SThomas Gleixner 
4842ec1df41SThomas Gleixner 
4852ec1df41SThomas Gleixner static int generic_have_wrcomb(void)
4862ec1df41SThomas Gleixner {
4872ec1df41SThomas Gleixner 	unsigned long config, dummy;
4882ec1df41SThomas Gleixner 	rdmsr(MTRRcap_MSR, config, dummy);
4892ec1df41SThomas Gleixner 	return (config & (1 << 10));
4902ec1df41SThomas Gleixner }
4912ec1df41SThomas Gleixner 
4922ec1df41SThomas Gleixner int positive_have_wrcomb(void)
4932ec1df41SThomas Gleixner {
4942ec1df41SThomas Gleixner 	return 1;
4952ec1df41SThomas Gleixner }
4962ec1df41SThomas Gleixner 
4972ec1df41SThomas Gleixner /* generic structure...
4982ec1df41SThomas Gleixner  */
4992ec1df41SThomas Gleixner struct mtrr_ops generic_mtrr_ops = {
5002ec1df41SThomas Gleixner 	.use_intel_if      = 1,
5012ec1df41SThomas Gleixner 	.set_all	   = generic_set_all,
5022ec1df41SThomas Gleixner 	.get               = generic_get_mtrr,
5032ec1df41SThomas Gleixner 	.get_free_region   = generic_get_free_region,
5042ec1df41SThomas Gleixner 	.set               = generic_set_mtrr,
5052ec1df41SThomas Gleixner 	.validate_add_page = generic_validate_add_page,
5062ec1df41SThomas Gleixner 	.have_wrcomb       = generic_have_wrcomb,
5072ec1df41SThomas Gleixner };
508