xref: /linux/arch/x86/kernel/cpu/mtrr/generic.c (revision 3ff42da5048649503e343a32be37b14a6a4e8aaf)
12ec1df41SThomas Gleixner /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
22ec1df41SThomas Gleixner    because MTRRs can span upto 40 bits (36bits on most modern x86) */
32ec1df41SThomas Gleixner #include <linux/init.h>
42ec1df41SThomas Gleixner #include <linux/slab.h>
52ec1df41SThomas Gleixner #include <linux/mm.h>
62ec1df41SThomas Gleixner #include <linux/module.h>
72ec1df41SThomas Gleixner #include <asm/io.h>
82ec1df41SThomas Gleixner #include <asm/mtrr.h>
92ec1df41SThomas Gleixner #include <asm/msr.h>
102ec1df41SThomas Gleixner #include <asm/system.h>
112ec1df41SThomas Gleixner #include <asm/cpufeature.h>
127ebad705SDave Jones #include <asm/processor-flags.h>
132ec1df41SThomas Gleixner #include <asm/tlbflush.h>
142e5d9c85Svenkatesh.pallipadi@intel.com #include <asm/pat.h>
152ec1df41SThomas Gleixner #include "mtrr.h"
162ec1df41SThomas Gleixner 
172ec1df41SThomas Gleixner struct fixed_range_block {
182ec1df41SThomas Gleixner 	int base_msr; /* start address of an MTRR block */
192ec1df41SThomas Gleixner 	int ranges;   /* number of MTRRs in this block  */
202ec1df41SThomas Gleixner };
212ec1df41SThomas Gleixner 
222ec1df41SThomas Gleixner static struct fixed_range_block fixed_range_blocks[] = {
232ec1df41SThomas Gleixner 	{ MTRRfix64K_00000_MSR, 1 }, /* one  64k MTRR  */
242ec1df41SThomas Gleixner 	{ MTRRfix16K_80000_MSR, 2 }, /* two  16k MTRRs */
252ec1df41SThomas Gleixner 	{ MTRRfix4K_C0000_MSR,  8 }, /* eight 4k MTRRs */
262ec1df41SThomas Gleixner 	{}
272ec1df41SThomas Gleixner };
282ec1df41SThomas Gleixner 
292ec1df41SThomas Gleixner static unsigned long smp_changes_mask;
302e5d9c85Svenkatesh.pallipadi@intel.com static int mtrr_state_set;
3195ffa243SYinghai Lu u64 mtrr_tom2;
322ec1df41SThomas Gleixner 
33932d27a7SSheng Yang struct mtrr_state_type mtrr_state = {};
34932d27a7SSheng Yang EXPORT_SYMBOL_GPL(mtrr_state);
35932d27a7SSheng Yang 
36*3ff42da5SAndreas Herrmann /**
37*3ff42da5SAndreas Herrmann  * BIOS is expected to clear MtrrFixDramModEn bit, see for example
38*3ff42da5SAndreas Herrmann  * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
39*3ff42da5SAndreas Herrmann  * Opteron Processors" (26094 Rev. 3.30 February 2006), section
40*3ff42da5SAndreas Herrmann  * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
41*3ff42da5SAndreas Herrmann  * to 1 during BIOS initalization of the fixed MTRRs, then cleared to
42*3ff42da5SAndreas Herrmann  * 0 for operation."
43*3ff42da5SAndreas Herrmann  */
44*3ff42da5SAndreas Herrmann static inline void k8_check_syscfg_dram_mod_en(void)
45*3ff42da5SAndreas Herrmann {
46*3ff42da5SAndreas Herrmann 	u32 lo, hi;
47*3ff42da5SAndreas Herrmann 
48*3ff42da5SAndreas Herrmann 	if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
49*3ff42da5SAndreas Herrmann 	      (boot_cpu_data.x86 >= 0x0f)))
50*3ff42da5SAndreas Herrmann 		return;
51*3ff42da5SAndreas Herrmann 
52*3ff42da5SAndreas Herrmann 	rdmsr(MSR_K8_SYSCFG, lo, hi);
53*3ff42da5SAndreas Herrmann 	if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
54*3ff42da5SAndreas Herrmann 		printk(KERN_ERR FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
55*3ff42da5SAndreas Herrmann 		       " not cleared by BIOS, clearing this bit\n",
56*3ff42da5SAndreas Herrmann 		       smp_processor_id());
57*3ff42da5SAndreas Herrmann 		lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
58*3ff42da5SAndreas Herrmann 		mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
59*3ff42da5SAndreas Herrmann 	}
60*3ff42da5SAndreas Herrmann }
61*3ff42da5SAndreas Herrmann 
622e5d9c85Svenkatesh.pallipadi@intel.com /*
632e5d9c85Svenkatesh.pallipadi@intel.com  * Returns the effective MTRR type for the region
642e5d9c85Svenkatesh.pallipadi@intel.com  * Error returns:
652e5d9c85Svenkatesh.pallipadi@intel.com  * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR
662e5d9c85Svenkatesh.pallipadi@intel.com  * - 0xFF - when MTRR is not enabled
672e5d9c85Svenkatesh.pallipadi@intel.com  */
682e5d9c85Svenkatesh.pallipadi@intel.com u8 mtrr_type_lookup(u64 start, u64 end)
692e5d9c85Svenkatesh.pallipadi@intel.com {
702e5d9c85Svenkatesh.pallipadi@intel.com 	int i;
712e5d9c85Svenkatesh.pallipadi@intel.com 	u64 base, mask;
722e5d9c85Svenkatesh.pallipadi@intel.com 	u8 prev_match, curr_match;
732e5d9c85Svenkatesh.pallipadi@intel.com 
742e5d9c85Svenkatesh.pallipadi@intel.com 	if (!mtrr_state_set)
752e5d9c85Svenkatesh.pallipadi@intel.com 		return 0xFF;
762e5d9c85Svenkatesh.pallipadi@intel.com 
772e5d9c85Svenkatesh.pallipadi@intel.com 	if (!mtrr_state.enabled)
782e5d9c85Svenkatesh.pallipadi@intel.com 		return 0xFF;
792e5d9c85Svenkatesh.pallipadi@intel.com 
802e5d9c85Svenkatesh.pallipadi@intel.com 	/* Make end inclusive end, instead of exclusive */
812e5d9c85Svenkatesh.pallipadi@intel.com 	end--;
822e5d9c85Svenkatesh.pallipadi@intel.com 
832e5d9c85Svenkatesh.pallipadi@intel.com 	/* Look in fixed ranges. Just return the type as per start */
842e5d9c85Svenkatesh.pallipadi@intel.com 	if (mtrr_state.have_fixed && (start < 0x100000)) {
852e5d9c85Svenkatesh.pallipadi@intel.com 		int idx;
862e5d9c85Svenkatesh.pallipadi@intel.com 
872e5d9c85Svenkatesh.pallipadi@intel.com 		if (start < 0x80000) {
882e5d9c85Svenkatesh.pallipadi@intel.com 			idx = 0;
892e5d9c85Svenkatesh.pallipadi@intel.com 			idx += (start >> 16);
902e5d9c85Svenkatesh.pallipadi@intel.com 			return mtrr_state.fixed_ranges[idx];
912e5d9c85Svenkatesh.pallipadi@intel.com 		} else if (start < 0xC0000) {
922e5d9c85Svenkatesh.pallipadi@intel.com 			idx = 1 * 8;
932e5d9c85Svenkatesh.pallipadi@intel.com 			idx += ((start - 0x80000) >> 14);
942e5d9c85Svenkatesh.pallipadi@intel.com 			return mtrr_state.fixed_ranges[idx];
952e5d9c85Svenkatesh.pallipadi@intel.com 		} else if (start < 0x1000000) {
962e5d9c85Svenkatesh.pallipadi@intel.com 			idx = 3 * 8;
972e5d9c85Svenkatesh.pallipadi@intel.com 			idx += ((start - 0xC0000) >> 12);
982e5d9c85Svenkatesh.pallipadi@intel.com 			return mtrr_state.fixed_ranges[idx];
992e5d9c85Svenkatesh.pallipadi@intel.com 		}
1002e5d9c85Svenkatesh.pallipadi@intel.com 	}
1012e5d9c85Svenkatesh.pallipadi@intel.com 
1022e5d9c85Svenkatesh.pallipadi@intel.com 	/*
1032e5d9c85Svenkatesh.pallipadi@intel.com 	 * Look in variable ranges
1042e5d9c85Svenkatesh.pallipadi@intel.com 	 * Look of multiple ranges matching this address and pick type
1052e5d9c85Svenkatesh.pallipadi@intel.com 	 * as per MTRR precedence
1062e5d9c85Svenkatesh.pallipadi@intel.com 	 */
107e686d341SHarvey Harrison 	if (!(mtrr_state.enabled & 2)) {
1082e5d9c85Svenkatesh.pallipadi@intel.com 		return mtrr_state.def_type;
1092e5d9c85Svenkatesh.pallipadi@intel.com 	}
1102e5d9c85Svenkatesh.pallipadi@intel.com 
1112e5d9c85Svenkatesh.pallipadi@intel.com 	prev_match = 0xFF;
1122e5d9c85Svenkatesh.pallipadi@intel.com 	for (i = 0; i < num_var_ranges; ++i) {
1132e5d9c85Svenkatesh.pallipadi@intel.com 		unsigned short start_state, end_state;
1142e5d9c85Svenkatesh.pallipadi@intel.com 
1152e5d9c85Svenkatesh.pallipadi@intel.com 		if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
1162e5d9c85Svenkatesh.pallipadi@intel.com 			continue;
1172e5d9c85Svenkatesh.pallipadi@intel.com 
1182e5d9c85Svenkatesh.pallipadi@intel.com 		base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
1192e5d9c85Svenkatesh.pallipadi@intel.com 		       (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
1202e5d9c85Svenkatesh.pallipadi@intel.com 		mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
1212e5d9c85Svenkatesh.pallipadi@intel.com 		       (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
1222e5d9c85Svenkatesh.pallipadi@intel.com 
1232e5d9c85Svenkatesh.pallipadi@intel.com 		start_state = ((start & mask) == (base & mask));
1242e5d9c85Svenkatesh.pallipadi@intel.com 		end_state = ((end & mask) == (base & mask));
1252e5d9c85Svenkatesh.pallipadi@intel.com 		if (start_state != end_state)
1262e5d9c85Svenkatesh.pallipadi@intel.com 			return 0xFE;
1272e5d9c85Svenkatesh.pallipadi@intel.com 
1282e5d9c85Svenkatesh.pallipadi@intel.com 		if ((start & mask) != (base & mask)) {
1292e5d9c85Svenkatesh.pallipadi@intel.com 			continue;
1302e5d9c85Svenkatesh.pallipadi@intel.com 		}
1312e5d9c85Svenkatesh.pallipadi@intel.com 
1322e5d9c85Svenkatesh.pallipadi@intel.com 		curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
1332e5d9c85Svenkatesh.pallipadi@intel.com 		if (prev_match == 0xFF) {
1342e5d9c85Svenkatesh.pallipadi@intel.com 			prev_match = curr_match;
1352e5d9c85Svenkatesh.pallipadi@intel.com 			continue;
1362e5d9c85Svenkatesh.pallipadi@intel.com 		}
1372e5d9c85Svenkatesh.pallipadi@intel.com 
1382e5d9c85Svenkatesh.pallipadi@intel.com 		if (prev_match == MTRR_TYPE_UNCACHABLE ||
1392e5d9c85Svenkatesh.pallipadi@intel.com 		    curr_match == MTRR_TYPE_UNCACHABLE) {
1402e5d9c85Svenkatesh.pallipadi@intel.com 			return MTRR_TYPE_UNCACHABLE;
1412e5d9c85Svenkatesh.pallipadi@intel.com 		}
1422e5d9c85Svenkatesh.pallipadi@intel.com 
1432e5d9c85Svenkatesh.pallipadi@intel.com 		if ((prev_match == MTRR_TYPE_WRBACK &&
1442e5d9c85Svenkatesh.pallipadi@intel.com 		     curr_match == MTRR_TYPE_WRTHROUGH) ||
1452e5d9c85Svenkatesh.pallipadi@intel.com 		    (prev_match == MTRR_TYPE_WRTHROUGH &&
1462e5d9c85Svenkatesh.pallipadi@intel.com 		     curr_match == MTRR_TYPE_WRBACK)) {
1472e5d9c85Svenkatesh.pallipadi@intel.com 			prev_match = MTRR_TYPE_WRTHROUGH;
1482e5d9c85Svenkatesh.pallipadi@intel.com 			curr_match = MTRR_TYPE_WRTHROUGH;
1492e5d9c85Svenkatesh.pallipadi@intel.com 		}
1502e5d9c85Svenkatesh.pallipadi@intel.com 
1512e5d9c85Svenkatesh.pallipadi@intel.com 		if (prev_match != curr_match) {
1522e5d9c85Svenkatesh.pallipadi@intel.com 			return MTRR_TYPE_UNCACHABLE;
1532e5d9c85Svenkatesh.pallipadi@intel.com 		}
1542e5d9c85Svenkatesh.pallipadi@intel.com 	}
1552e5d9c85Svenkatesh.pallipadi@intel.com 
15695ffa243SYinghai Lu 	if (mtrr_tom2) {
15795ffa243SYinghai Lu 		if (start >= (1ULL<<32) && (end < mtrr_tom2))
15835605a10SYinghai Lu 			return MTRR_TYPE_WRBACK;
15935605a10SYinghai Lu 	}
16035605a10SYinghai Lu 
1612e5d9c85Svenkatesh.pallipadi@intel.com 	if (prev_match != 0xFF)
1622e5d9c85Svenkatesh.pallipadi@intel.com 		return prev_match;
1632e5d9c85Svenkatesh.pallipadi@intel.com 
1642e5d9c85Svenkatesh.pallipadi@intel.com 	return mtrr_state.def_type;
1652e5d9c85Svenkatesh.pallipadi@intel.com }
1662e5d9c85Svenkatesh.pallipadi@intel.com 
1672ec1df41SThomas Gleixner /*  Get the MSR pair relating to a var range  */
1682ec1df41SThomas Gleixner static void
1692ec1df41SThomas Gleixner get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
1702ec1df41SThomas Gleixner {
1712ec1df41SThomas Gleixner 	rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
1722ec1df41SThomas Gleixner 	rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
1732ec1df41SThomas Gleixner }
1742ec1df41SThomas Gleixner 
17595ffa243SYinghai Lu /*  fill the MSR pair relating to a var range  */
17695ffa243SYinghai Lu void fill_mtrr_var_range(unsigned int index,
17795ffa243SYinghai Lu 		u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
17895ffa243SYinghai Lu {
17995ffa243SYinghai Lu 	struct mtrr_var_range *vr;
18095ffa243SYinghai Lu 
18195ffa243SYinghai Lu 	vr = mtrr_state.var_ranges;
18295ffa243SYinghai Lu 
18395ffa243SYinghai Lu 	vr[index].base_lo = base_lo;
18495ffa243SYinghai Lu 	vr[index].base_hi = base_hi;
18595ffa243SYinghai Lu 	vr[index].mask_lo = mask_lo;
18695ffa243SYinghai Lu 	vr[index].mask_hi = mask_hi;
18795ffa243SYinghai Lu }
18895ffa243SYinghai Lu 
1892ec1df41SThomas Gleixner static void
1902ec1df41SThomas Gleixner get_fixed_ranges(mtrr_type * frs)
1912ec1df41SThomas Gleixner {
1922ec1df41SThomas Gleixner 	unsigned int *p = (unsigned int *) frs;
1932ec1df41SThomas Gleixner 	int i;
1942ec1df41SThomas Gleixner 
195*3ff42da5SAndreas Herrmann 	k8_check_syscfg_dram_mod_en();
196*3ff42da5SAndreas Herrmann 
1972ec1df41SThomas Gleixner 	rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
1982ec1df41SThomas Gleixner 
1992ec1df41SThomas Gleixner 	for (i = 0; i < 2; i++)
2002ec1df41SThomas Gleixner 		rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
2012ec1df41SThomas Gleixner 	for (i = 0; i < 8; i++)
2022ec1df41SThomas Gleixner 		rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
2032ec1df41SThomas Gleixner }
2042ec1df41SThomas Gleixner 
2052ec1df41SThomas Gleixner void mtrr_save_fixed_ranges(void *info)
2062ec1df41SThomas Gleixner {
2072ec1df41SThomas Gleixner 	if (cpu_has_mtrr)
2082ec1df41SThomas Gleixner 		get_fixed_ranges(mtrr_state.fixed_ranges);
2092ec1df41SThomas Gleixner }
2102ec1df41SThomas Gleixner 
2112ec1df41SThomas Gleixner static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
2122ec1df41SThomas Gleixner {
2132ec1df41SThomas Gleixner 	unsigned i;
2142ec1df41SThomas Gleixner 
2152ec1df41SThomas Gleixner 	for (i = 0; i < 8; ++i, ++types, base += step)
2168ad97905SYinghai Lu 		printk(KERN_INFO "  %05X-%05X %s\n",
2172ec1df41SThomas Gleixner 			base, base + step - 1, mtrr_attrib_to_str(*types));
2182ec1df41SThomas Gleixner }
2192ec1df41SThomas Gleixner 
2202e5d9c85Svenkatesh.pallipadi@intel.com static void prepare_set(void);
2212e5d9c85Svenkatesh.pallipadi@intel.com static void post_set(void);
2222e5d9c85Svenkatesh.pallipadi@intel.com 
2238ad97905SYinghai Lu static void __init print_mtrr_state(void)
2248ad97905SYinghai Lu {
2258ad97905SYinghai Lu 	unsigned int i;
2268ad97905SYinghai Lu 	int high_width;
2278ad97905SYinghai Lu 
2288ad97905SYinghai Lu 	printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
2298ad97905SYinghai Lu 	if (mtrr_state.have_fixed) {
2308ad97905SYinghai Lu 		printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
2318ad97905SYinghai Lu 		       mtrr_state.enabled & 1 ? "en" : "dis");
2328ad97905SYinghai Lu 		print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
2338ad97905SYinghai Lu 		for (i = 0; i < 2; ++i)
2348ad97905SYinghai Lu 			print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
2358ad97905SYinghai Lu 		for (i = 0; i < 8; ++i)
2368ad97905SYinghai Lu 			print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
2378ad97905SYinghai Lu 	}
2388ad97905SYinghai Lu 	printk(KERN_INFO "MTRR variable ranges %sabled:\n",
2398ad97905SYinghai Lu 	       mtrr_state.enabled & 2 ? "en" : "dis");
2408ad97905SYinghai Lu 	high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
2418ad97905SYinghai Lu 	for (i = 0; i < num_var_ranges; ++i) {
2428ad97905SYinghai Lu 		if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
2438ad97905SYinghai Lu 			printk(KERN_INFO "  %u base %0*X%05X000 mask %0*X%05X000 %s\n",
2448ad97905SYinghai Lu 			       i,
2458ad97905SYinghai Lu 			       high_width,
2468ad97905SYinghai Lu 			       mtrr_state.var_ranges[i].base_hi,
2478ad97905SYinghai Lu 			       mtrr_state.var_ranges[i].base_lo >> 12,
2488ad97905SYinghai Lu 			       high_width,
2498ad97905SYinghai Lu 			       mtrr_state.var_ranges[i].mask_hi,
2508ad97905SYinghai Lu 			       mtrr_state.var_ranges[i].mask_lo >> 12,
2518ad97905SYinghai Lu 			       mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
2528ad97905SYinghai Lu 		else
2538ad97905SYinghai Lu 			printk(KERN_INFO "   %u disabled\n", i);
2548ad97905SYinghai Lu 	}
2558ad97905SYinghai Lu 	if (mtrr_tom2) {
2568ad97905SYinghai Lu 		printk(KERN_INFO "TOM2: %016llx aka %lldM\n",
2578ad97905SYinghai Lu 				  mtrr_tom2, mtrr_tom2>>20);
2588ad97905SYinghai Lu 	}
2598ad97905SYinghai Lu }
2608ad97905SYinghai Lu 
2612ec1df41SThomas Gleixner /*  Grab all of the MTRR state for this CPU into *state  */
2622ec1df41SThomas Gleixner void __init get_mtrr_state(void)
2632ec1df41SThomas Gleixner {
2642ec1df41SThomas Gleixner 	unsigned int i;
2652ec1df41SThomas Gleixner 	struct mtrr_var_range *vrs;
2662ec1df41SThomas Gleixner 	unsigned lo, dummy;
2672e5d9c85Svenkatesh.pallipadi@intel.com 	unsigned long flags;
2682ec1df41SThomas Gleixner 
2692ec1df41SThomas Gleixner 	vrs = mtrr_state.var_ranges;
2702ec1df41SThomas Gleixner 
2712ec1df41SThomas Gleixner 	rdmsr(MTRRcap_MSR, lo, dummy);
2722ec1df41SThomas Gleixner 	mtrr_state.have_fixed = (lo >> 8) & 1;
2732ec1df41SThomas Gleixner 
2742ec1df41SThomas Gleixner 	for (i = 0; i < num_var_ranges; i++)
2752ec1df41SThomas Gleixner 		get_mtrr_var_range(i, &vrs[i]);
2762ec1df41SThomas Gleixner 	if (mtrr_state.have_fixed)
2772ec1df41SThomas Gleixner 		get_fixed_ranges(mtrr_state.fixed_ranges);
2782ec1df41SThomas Gleixner 
2792ec1df41SThomas Gleixner 	rdmsr(MTRRdefType_MSR, lo, dummy);
2802ec1df41SThomas Gleixner 	mtrr_state.def_type = (lo & 0xff);
2812ec1df41SThomas Gleixner 	mtrr_state.enabled = (lo & 0xc00) >> 10;
2822ec1df41SThomas Gleixner 
28335605a10SYinghai Lu 	if (amd_special_default_mtrr()) {
2840da72a4aSThomas Gleixner 		unsigned low, high;
28535605a10SYinghai Lu 		/* TOP_MEM2 */
2860da72a4aSThomas Gleixner 		rdmsr(MSR_K8_TOP_MEM2, low, high);
28795ffa243SYinghai Lu 		mtrr_tom2 = high;
28895ffa243SYinghai Lu 		mtrr_tom2 <<= 32;
28995ffa243SYinghai Lu 		mtrr_tom2 |= low;
2908004dd96SYinghai Lu 		mtrr_tom2 &= 0xffffff800000ULL;
29135605a10SYinghai Lu 	}
2922ec1df41SThomas Gleixner 
2938ad97905SYinghai Lu 	print_mtrr_state();
2948ad97905SYinghai Lu 
2952e5d9c85Svenkatesh.pallipadi@intel.com 	mtrr_state_set = 1;
2962e5d9c85Svenkatesh.pallipadi@intel.com 
2972e5d9c85Svenkatesh.pallipadi@intel.com 	/* PAT setup for BP. We need to go through sync steps here */
2982e5d9c85Svenkatesh.pallipadi@intel.com 	local_irq_save(flags);
2992e5d9c85Svenkatesh.pallipadi@intel.com 	prepare_set();
3002e5d9c85Svenkatesh.pallipadi@intel.com 
3012e5d9c85Svenkatesh.pallipadi@intel.com 	pat_init();
3022e5d9c85Svenkatesh.pallipadi@intel.com 
3032e5d9c85Svenkatesh.pallipadi@intel.com 	post_set();
3042e5d9c85Svenkatesh.pallipadi@intel.com 	local_irq_restore(flags);
3052e5d9c85Svenkatesh.pallipadi@intel.com 
3062ec1df41SThomas Gleixner }
3072ec1df41SThomas Gleixner 
3082ec1df41SThomas Gleixner /*  Some BIOS's are fucked and don't set all MTRRs the same!  */
3092ec1df41SThomas Gleixner void __init mtrr_state_warn(void)
3102ec1df41SThomas Gleixner {
3112ec1df41SThomas Gleixner 	unsigned long mask = smp_changes_mask;
3122ec1df41SThomas Gleixner 
3132ec1df41SThomas Gleixner 	if (!mask)
3142ec1df41SThomas Gleixner 		return;
3152ec1df41SThomas Gleixner 	if (mask & MTRR_CHANGE_MASK_FIXED)
3162ec1df41SThomas Gleixner 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
3172ec1df41SThomas Gleixner 	if (mask & MTRR_CHANGE_MASK_VARIABLE)
3182ec1df41SThomas Gleixner 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
3192ec1df41SThomas Gleixner 	if (mask & MTRR_CHANGE_MASK_DEFTYPE)
3202ec1df41SThomas Gleixner 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
3212ec1df41SThomas Gleixner 	printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
3222ec1df41SThomas Gleixner 	printk(KERN_INFO "mtrr: corrected configuration.\n");
3232ec1df41SThomas Gleixner }
3242ec1df41SThomas Gleixner 
3252ec1df41SThomas Gleixner /* Doesn't attempt to pass an error out to MTRR users
3262ec1df41SThomas Gleixner    because it's quite complicated in some cases and probably not
3272ec1df41SThomas Gleixner    worth it because the best error handling is to ignore it. */
3282ec1df41SThomas Gleixner void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
3292ec1df41SThomas Gleixner {
3302ec1df41SThomas Gleixner 	if (wrmsr_safe(msr, a, b) < 0)
3312ec1df41SThomas Gleixner 		printk(KERN_ERR
3322ec1df41SThomas Gleixner 			"MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
3332ec1df41SThomas Gleixner 			smp_processor_id(), msr, a, b);
3342ec1df41SThomas Gleixner }
3352ec1df41SThomas Gleixner 
3362ec1df41SThomas Gleixner /**
3371d3381ebSRandy Dunlap  * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
3381d3381ebSRandy Dunlap  * @msr: MSR address of the MTTR which should be checked and updated
3391d3381ebSRandy Dunlap  * @changed: pointer which indicates whether the MTRR needed to be changed
3401d3381ebSRandy Dunlap  * @msrwords: pointer to the MSR values which the MSR should have
3412ec1df41SThomas Gleixner  */
3422d2ee8deSPaul Jimenez static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
3432ec1df41SThomas Gleixner {
3442ec1df41SThomas Gleixner 	unsigned lo, hi;
3452ec1df41SThomas Gleixner 
3462ec1df41SThomas Gleixner 	rdmsr(msr, lo, hi);
3472ec1df41SThomas Gleixner 
3482ec1df41SThomas Gleixner 	if (lo != msrwords[0] || hi != msrwords[1]) {
3492ec1df41SThomas Gleixner 		mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
3502d2ee8deSPaul Jimenez 		*changed = true;
3512ec1df41SThomas Gleixner 	}
3522ec1df41SThomas Gleixner }
3532ec1df41SThomas Gleixner 
3541d3381ebSRandy Dunlap /**
3551d3381ebSRandy Dunlap  * generic_get_free_region - Get a free MTRR.
3561d3381ebSRandy Dunlap  * @base: The starting (base) address of the region.
3571d3381ebSRandy Dunlap  * @size: The size (in bytes) of the region.
3581d3381ebSRandy Dunlap  * @replace_reg: mtrr index to be replaced; set to invalid value if none.
3591d3381ebSRandy Dunlap  *
3601d3381ebSRandy Dunlap  * Returns: The index of the region on success, else negative on error.
3612ec1df41SThomas Gleixner  */
3621d3381ebSRandy Dunlap int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
3632ec1df41SThomas Gleixner {
3642ec1df41SThomas Gleixner 	int i, max;
3652ec1df41SThomas Gleixner 	mtrr_type ltype;
3662ec1df41SThomas Gleixner 	unsigned long lbase, lsize;
3672ec1df41SThomas Gleixner 
3682ec1df41SThomas Gleixner 	max = num_var_ranges;
3692ec1df41SThomas Gleixner 	if (replace_reg >= 0 && replace_reg < max)
3702ec1df41SThomas Gleixner 		return replace_reg;
3712ec1df41SThomas Gleixner 	for (i = 0; i < max; ++i) {
3722ec1df41SThomas Gleixner 		mtrr_if->get(i, &lbase, &lsize, &ltype);
3732ec1df41SThomas Gleixner 		if (lsize == 0)
3742ec1df41SThomas Gleixner 			return i;
3752ec1df41SThomas Gleixner 	}
3762ec1df41SThomas Gleixner 	return -ENOSPC;
3772ec1df41SThomas Gleixner }
3782ec1df41SThomas Gleixner 
3792ec1df41SThomas Gleixner static void generic_get_mtrr(unsigned int reg, unsigned long *base,
3802ec1df41SThomas Gleixner 			     unsigned long *size, mtrr_type *type)
3812ec1df41SThomas Gleixner {
3822ec1df41SThomas Gleixner 	unsigned int mask_lo, mask_hi, base_lo, base_hi;
38338cc1c3dSYinghai Lu 	unsigned int tmp, hi;
3842ec1df41SThomas Gleixner 
3858ad97905SYinghai Lu 	/*
3868ad97905SYinghai Lu 	 * get_mtrr doesn't need to update mtrr_state, also it could be called
3878ad97905SYinghai Lu 	 * from any cpu, so try to print it out directly.
3888ad97905SYinghai Lu 	 */
3892ec1df41SThomas Gleixner 	rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
3908ad97905SYinghai Lu 
3912ec1df41SThomas Gleixner 	if ((mask_lo & 0x800) == 0) {
3922ec1df41SThomas Gleixner 		/*  Invalid (i.e. free) range  */
3932ec1df41SThomas Gleixner 		*base = 0;
3942ec1df41SThomas Gleixner 		*size = 0;
3952ec1df41SThomas Gleixner 		*type = 0;
3962ec1df41SThomas Gleixner 		return;
3972ec1df41SThomas Gleixner 	}
3982ec1df41SThomas Gleixner 
3992ec1df41SThomas Gleixner 	rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
4002ec1df41SThomas Gleixner 
4012ec1df41SThomas Gleixner 	/* Work out the shifted address mask. */
40238cc1c3dSYinghai Lu 	tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
40338cc1c3dSYinghai Lu 	mask_lo = size_or_mask | tmp;
40438cc1c3dSYinghai Lu 	/* Expand tmp with high bits to all 1s*/
40538cc1c3dSYinghai Lu 	hi = fls(tmp);
40638cc1c3dSYinghai Lu 	if (hi > 0) {
40738cc1c3dSYinghai Lu 		tmp |= ~((1<<(hi - 1)) - 1);
40838cc1c3dSYinghai Lu 
40938cc1c3dSYinghai Lu 		if (tmp != mask_lo) {
41016dc552fSYinghai Lu 			WARN_ONCE(1, KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
41138cc1c3dSYinghai Lu 			mask_lo = tmp;
41238cc1c3dSYinghai Lu 		}
41338cc1c3dSYinghai Lu 	}
4142ec1df41SThomas Gleixner 
4152ec1df41SThomas Gleixner 	/* This works correctly if size is a power of two, i.e. a
4162ec1df41SThomas Gleixner 	   contiguous range. */
4172ec1df41SThomas Gleixner 	*size = -mask_lo;
4182ec1df41SThomas Gleixner 	*base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
4192ec1df41SThomas Gleixner 	*type = base_lo & 0xff;
4208ad97905SYinghai Lu 
4218ad97905SYinghai Lu 	printk(KERN_DEBUG "  get_mtrr: cpu%d reg%02d base=%010lx size=%010lx %s\n",
4228ad97905SYinghai Lu 			smp_processor_id(), reg, *base, *size,
4238ad97905SYinghai Lu 			mtrr_attrib_to_str(*type & 0xff));
4242ec1df41SThomas Gleixner }
4252ec1df41SThomas Gleixner 
4262ec1df41SThomas Gleixner /**
4271d3381ebSRandy Dunlap  * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set
4281d3381ebSRandy Dunlap  * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
4292ec1df41SThomas Gleixner  */
4302ec1df41SThomas Gleixner static int set_fixed_ranges(mtrr_type * frs)
4312ec1df41SThomas Gleixner {
4322ec1df41SThomas Gleixner 	unsigned long long *saved = (unsigned long long *) frs;
4332d2ee8deSPaul Jimenez 	bool changed = false;
4342ec1df41SThomas Gleixner 	int block=-1, range;
4352ec1df41SThomas Gleixner 
436*3ff42da5SAndreas Herrmann 	k8_check_syscfg_dram_mod_en();
437*3ff42da5SAndreas Herrmann 
4382ec1df41SThomas Gleixner 	while (fixed_range_blocks[++block].ranges)
4392ec1df41SThomas Gleixner 	    for (range=0; range < fixed_range_blocks[block].ranges; range++)
4402ec1df41SThomas Gleixner 		set_fixed_range(fixed_range_blocks[block].base_msr + range,
4412ec1df41SThomas Gleixner 		    &changed, (unsigned int *) saved++);
4422ec1df41SThomas Gleixner 
4432ec1df41SThomas Gleixner 	return changed;
4442ec1df41SThomas Gleixner }
4452ec1df41SThomas Gleixner 
4462ec1df41SThomas Gleixner /*  Set the MSR pair relating to a var range. Returns TRUE if
4472ec1df41SThomas Gleixner     changes are made  */
4482d2ee8deSPaul Jimenez static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
4492ec1df41SThomas Gleixner {
4502ec1df41SThomas Gleixner 	unsigned int lo, hi;
4512d2ee8deSPaul Jimenez 	bool changed = false;
4522ec1df41SThomas Gleixner 
4532ec1df41SThomas Gleixner 	rdmsr(MTRRphysBase_MSR(index), lo, hi);
4542ec1df41SThomas Gleixner 	if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
4552ec1df41SThomas Gleixner 	    || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
4562ec1df41SThomas Gleixner 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
4572ec1df41SThomas Gleixner 		mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
4582d2ee8deSPaul Jimenez 		changed = true;
4592ec1df41SThomas Gleixner 	}
4602ec1df41SThomas Gleixner 
4612ec1df41SThomas Gleixner 	rdmsr(MTRRphysMask_MSR(index), lo, hi);
4622ec1df41SThomas Gleixner 
4632ec1df41SThomas Gleixner 	if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
4642ec1df41SThomas Gleixner 	    || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
4652ec1df41SThomas Gleixner 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
4662ec1df41SThomas Gleixner 		mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
4672d2ee8deSPaul Jimenez 		changed = true;
4682ec1df41SThomas Gleixner 	}
4692ec1df41SThomas Gleixner 	return changed;
4702ec1df41SThomas Gleixner }
4712ec1df41SThomas Gleixner 
4722ec1df41SThomas Gleixner static u32 deftype_lo, deftype_hi;
4732ec1df41SThomas Gleixner 
4741d3381ebSRandy Dunlap /**
4751d3381ebSRandy Dunlap  * set_mtrr_state - Set the MTRR state for this CPU.
4761d3381ebSRandy Dunlap  *
4771d3381ebSRandy Dunlap  * NOTE: The CPU must already be in a safe state for MTRR changes.
4781d3381ebSRandy Dunlap  * RETURNS: 0 if no changes made, else a mask indicating what was changed.
4792ec1df41SThomas Gleixner  */
4801d3381ebSRandy Dunlap static unsigned long set_mtrr_state(void)
4812ec1df41SThomas Gleixner {
4822ec1df41SThomas Gleixner 	unsigned int i;
4832ec1df41SThomas Gleixner 	unsigned long change_mask = 0;
4842ec1df41SThomas Gleixner 
4852ec1df41SThomas Gleixner 	for (i = 0; i < num_var_ranges; i++)
4862ec1df41SThomas Gleixner 		if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
4872ec1df41SThomas Gleixner 			change_mask |= MTRR_CHANGE_MASK_VARIABLE;
4882ec1df41SThomas Gleixner 
4892ec1df41SThomas Gleixner 	if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
4902ec1df41SThomas Gleixner 		change_mask |= MTRR_CHANGE_MASK_FIXED;
4912ec1df41SThomas Gleixner 
4922ec1df41SThomas Gleixner 	/*  Set_mtrr_restore restores the old value of MTRRdefType,
4932ec1df41SThomas Gleixner 	   so to set it we fiddle with the saved value  */
4942ec1df41SThomas Gleixner 	if ((deftype_lo & 0xff) != mtrr_state.def_type
4952ec1df41SThomas Gleixner 	    || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
4962ec1df41SThomas Gleixner 		deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
4972ec1df41SThomas Gleixner 		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
4982ec1df41SThomas Gleixner 	}
4992ec1df41SThomas Gleixner 
5002ec1df41SThomas Gleixner 	return change_mask;
5012ec1df41SThomas Gleixner }
5022ec1df41SThomas Gleixner 
5032ec1df41SThomas Gleixner 
5042ec1df41SThomas Gleixner static unsigned long cr4 = 0;
5052ec1df41SThomas Gleixner static DEFINE_SPINLOCK(set_atomicity_lock);
5062ec1df41SThomas Gleixner 
5072ec1df41SThomas Gleixner /*
5082ec1df41SThomas Gleixner  * Since we are disabling the cache don't allow any interrupts - they
5092ec1df41SThomas Gleixner  * would run extremely slow and would only increase the pain.  The caller must
5102ec1df41SThomas Gleixner  * ensure that local interrupts are disabled and are reenabled after post_set()
5112ec1df41SThomas Gleixner  * has been called.
5122ec1df41SThomas Gleixner  */
5132ec1df41SThomas Gleixner 
5142ec1df41SThomas Gleixner static void prepare_set(void) __acquires(set_atomicity_lock)
5152ec1df41SThomas Gleixner {
5162ec1df41SThomas Gleixner 	unsigned long cr0;
5172ec1df41SThomas Gleixner 
5182ec1df41SThomas Gleixner 	/*  Note that this is not ideal, since the cache is only flushed/disabled
5192ec1df41SThomas Gleixner 	   for this CPU while the MTRRs are changed, but changing this requires
5202ec1df41SThomas Gleixner 	   more invasive changes to the way the kernel boots  */
5212ec1df41SThomas Gleixner 
5222ec1df41SThomas Gleixner 	spin_lock(&set_atomicity_lock);
5232ec1df41SThomas Gleixner 
5242ec1df41SThomas Gleixner 	/*  Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
5257ebad705SDave Jones 	cr0 = read_cr0() | X86_CR0_CD;
5262ec1df41SThomas Gleixner 	write_cr0(cr0);
5272ec1df41SThomas Gleixner 	wbinvd();
5282ec1df41SThomas Gleixner 
5292ec1df41SThomas Gleixner 	/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
5302ec1df41SThomas Gleixner 	if ( cpu_has_pge ) {
5312ec1df41SThomas Gleixner 		cr4 = read_cr4();
5322ec1df41SThomas Gleixner 		write_cr4(cr4 & ~X86_CR4_PGE);
5332ec1df41SThomas Gleixner 	}
5342ec1df41SThomas Gleixner 
5352ec1df41SThomas Gleixner 	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
5362ec1df41SThomas Gleixner 	__flush_tlb();
5372ec1df41SThomas Gleixner 
5382ec1df41SThomas Gleixner 	/*  Save MTRR state */
5392ec1df41SThomas Gleixner 	rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
5402ec1df41SThomas Gleixner 
5412ec1df41SThomas Gleixner 	/*  Disable MTRRs, and set the default type to uncached  */
5422ec1df41SThomas Gleixner 	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
5432ec1df41SThomas Gleixner }
5442ec1df41SThomas Gleixner 
5452ec1df41SThomas Gleixner static void post_set(void) __releases(set_atomicity_lock)
5462ec1df41SThomas Gleixner {
5472ec1df41SThomas Gleixner 	/*  Flush TLBs (no need to flush caches - they are disabled)  */
5482ec1df41SThomas Gleixner 	__flush_tlb();
5492ec1df41SThomas Gleixner 
5502ec1df41SThomas Gleixner 	/* Intel (P6) standard MTRRs */
5512ec1df41SThomas Gleixner 	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
5522ec1df41SThomas Gleixner 
5532ec1df41SThomas Gleixner 	/*  Enable caches  */
5542ec1df41SThomas Gleixner 	write_cr0(read_cr0() & 0xbfffffff);
5552ec1df41SThomas Gleixner 
5562ec1df41SThomas Gleixner 	/*  Restore value of CR4  */
5572ec1df41SThomas Gleixner 	if ( cpu_has_pge )
5582ec1df41SThomas Gleixner 		write_cr4(cr4);
5592ec1df41SThomas Gleixner 	spin_unlock(&set_atomicity_lock);
5602ec1df41SThomas Gleixner }
5612ec1df41SThomas Gleixner 
5622ec1df41SThomas Gleixner static void generic_set_all(void)
5632ec1df41SThomas Gleixner {
5642ec1df41SThomas Gleixner 	unsigned long mask, count;
5652ec1df41SThomas Gleixner 	unsigned long flags;
5662ec1df41SThomas Gleixner 
5672ec1df41SThomas Gleixner 	local_irq_save(flags);
5682ec1df41SThomas Gleixner 	prepare_set();
5692ec1df41SThomas Gleixner 
5702ec1df41SThomas Gleixner 	/* Actually set the state */
5712ec1df41SThomas Gleixner 	mask = set_mtrr_state();
5722ec1df41SThomas Gleixner 
5732e5d9c85Svenkatesh.pallipadi@intel.com 	/* also set PAT */
5742e5d9c85Svenkatesh.pallipadi@intel.com 	pat_init();
5752e5d9c85Svenkatesh.pallipadi@intel.com 
5762ec1df41SThomas Gleixner 	post_set();
5772ec1df41SThomas Gleixner 	local_irq_restore(flags);
5782ec1df41SThomas Gleixner 
5792ec1df41SThomas Gleixner 	/*  Use the atomic bitops to update the global mask  */
5802ec1df41SThomas Gleixner 	for (count = 0; count < sizeof mask * 8; ++count) {
5812ec1df41SThomas Gleixner 		if (mask & 0x01)
5822ec1df41SThomas Gleixner 			set_bit(count, &smp_changes_mask);
5832ec1df41SThomas Gleixner 		mask >>= 1;
5842ec1df41SThomas Gleixner 	}
5852ec1df41SThomas Gleixner 
5862ec1df41SThomas Gleixner }
5872ec1df41SThomas Gleixner 
5882ec1df41SThomas Gleixner static void generic_set_mtrr(unsigned int reg, unsigned long base,
5892ec1df41SThomas Gleixner 			     unsigned long size, mtrr_type type)
5902ec1df41SThomas Gleixner /*  [SUMMARY] Set variable MTRR register on the local CPU.
5912ec1df41SThomas Gleixner     <reg> The register to set.
5922ec1df41SThomas Gleixner     <base> The base address of the region.
5932ec1df41SThomas Gleixner     <size> The size of the region. If this is 0 the region is disabled.
5942ec1df41SThomas Gleixner     <type> The type of the region.
5952ec1df41SThomas Gleixner     [RETURNS] Nothing.
5962ec1df41SThomas Gleixner */
5972ec1df41SThomas Gleixner {
5982ec1df41SThomas Gleixner 	unsigned long flags;
5992ec1df41SThomas Gleixner 	struct mtrr_var_range *vr;
6002ec1df41SThomas Gleixner 
6012ec1df41SThomas Gleixner 	vr = &mtrr_state.var_ranges[reg];
6022ec1df41SThomas Gleixner 
6032ec1df41SThomas Gleixner 	local_irq_save(flags);
6042ec1df41SThomas Gleixner 	prepare_set();
6052ec1df41SThomas Gleixner 
6062ec1df41SThomas Gleixner 	if (size == 0) {
6072ec1df41SThomas Gleixner 		/* The invalid bit is kept in the mask, so we simply clear the
6082ec1df41SThomas Gleixner 		   relevant mask register to disable a range. */
6092ec1df41SThomas Gleixner 		mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
6102ec1df41SThomas Gleixner 		memset(vr, 0, sizeof(struct mtrr_var_range));
6112ec1df41SThomas Gleixner 	} else {
6122ec1df41SThomas Gleixner 		vr->base_lo = base << PAGE_SHIFT | type;
6132ec1df41SThomas Gleixner 		vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
6142ec1df41SThomas Gleixner 		vr->mask_lo = -size << PAGE_SHIFT | 0x800;
6152ec1df41SThomas Gleixner 		vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
6162ec1df41SThomas Gleixner 
6172ec1df41SThomas Gleixner 		mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
6182ec1df41SThomas Gleixner 		mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
6192ec1df41SThomas Gleixner 	}
6202ec1df41SThomas Gleixner 
6212ec1df41SThomas Gleixner 	post_set();
6222ec1df41SThomas Gleixner 	local_irq_restore(flags);
6232ec1df41SThomas Gleixner }
6242ec1df41SThomas Gleixner 
6252ec1df41SThomas Gleixner int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
6262ec1df41SThomas Gleixner {
6272ec1df41SThomas Gleixner 	unsigned long lbase, last;
6282ec1df41SThomas Gleixner 
6292ec1df41SThomas Gleixner 	/*  For Intel PPro stepping <= 7, must be 4 MiB aligned
6302ec1df41SThomas Gleixner 	    and not touch 0x70000000->0x7003FFFF */
6312ec1df41SThomas Gleixner 	if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
6322ec1df41SThomas Gleixner 	    boot_cpu_data.x86_model == 1 &&
6332ec1df41SThomas Gleixner 	    boot_cpu_data.x86_mask <= 7) {
6342ec1df41SThomas Gleixner 		if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
6352ec1df41SThomas Gleixner 			printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
6362ec1df41SThomas Gleixner 			return -EINVAL;
6372ec1df41SThomas Gleixner 		}
6382ec1df41SThomas Gleixner 		if (!(base + size < 0x70000 || base > 0x7003F) &&
6392ec1df41SThomas Gleixner 		    (type == MTRR_TYPE_WRCOMB
6402ec1df41SThomas Gleixner 		     || type == MTRR_TYPE_WRBACK)) {
6412ec1df41SThomas Gleixner 			printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
6422ec1df41SThomas Gleixner 			return -EINVAL;
6432ec1df41SThomas Gleixner 		}
6442ec1df41SThomas Gleixner 	}
6452ec1df41SThomas Gleixner 
6462ec1df41SThomas Gleixner 	/*  Check upper bits of base and last are equal and lower bits are 0
6472ec1df41SThomas Gleixner 	    for base and 1 for last  */
6482ec1df41SThomas Gleixner 	last = base + size - 1;
6492ec1df41SThomas Gleixner 	for (lbase = base; !(lbase & 1) && (last & 1);
6502ec1df41SThomas Gleixner 	     lbase = lbase >> 1, last = last >> 1) ;
6512ec1df41SThomas Gleixner 	if (lbase != last) {
6522ec1df41SThomas Gleixner 		printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
6532ec1df41SThomas Gleixner 		       base, size);
6542ec1df41SThomas Gleixner 		return -EINVAL;
6552ec1df41SThomas Gleixner 	}
6562ec1df41SThomas Gleixner 	return 0;
6572ec1df41SThomas Gleixner }
6582ec1df41SThomas Gleixner 
6592ec1df41SThomas Gleixner 
6602ec1df41SThomas Gleixner static int generic_have_wrcomb(void)
6612ec1df41SThomas Gleixner {
6622ec1df41SThomas Gleixner 	unsigned long config, dummy;
6632ec1df41SThomas Gleixner 	rdmsr(MTRRcap_MSR, config, dummy);
6642ec1df41SThomas Gleixner 	return (config & (1 << 10));
6652ec1df41SThomas Gleixner }
6662ec1df41SThomas Gleixner 
6672ec1df41SThomas Gleixner int positive_have_wrcomb(void)
6682ec1df41SThomas Gleixner {
6692ec1df41SThomas Gleixner 	return 1;
6702ec1df41SThomas Gleixner }
6712ec1df41SThomas Gleixner 
6722ec1df41SThomas Gleixner /* generic structure...
6732ec1df41SThomas Gleixner  */
6742ec1df41SThomas Gleixner struct mtrr_ops generic_mtrr_ops = {
6752ec1df41SThomas Gleixner 	.use_intel_if      = 1,
6762ec1df41SThomas Gleixner 	.set_all	   = generic_set_all,
6772ec1df41SThomas Gleixner 	.get               = generic_get_mtrr,
6782ec1df41SThomas Gleixner 	.get_free_region   = generic_get_free_region,
6792ec1df41SThomas Gleixner 	.set               = generic_set_mtrr,
6802ec1df41SThomas Gleixner 	.validate_add_page = generic_validate_add_page,
6812ec1df41SThomas Gleixner 	.have_wrcomb       = generic_have_wrcomb,
6822ec1df41SThomas Gleixner };
683