xref: /linux/arch/x86/kernel/cpu/mtrr/generic.c (revision ba5673ff1ff5f428256db4cedd4b05b7be008bb6)
12ec1df41SThomas Gleixner /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
22ec1df41SThomas Gleixner    because MTRRs can span upto 40 bits (36bits on most modern x86) */
32ec1df41SThomas Gleixner #include <linux/init.h>
42ec1df41SThomas Gleixner #include <linux/slab.h>
52ec1df41SThomas Gleixner #include <linux/mm.h>
62ec1df41SThomas Gleixner #include <linux/module.h>
72ec1df41SThomas Gleixner #include <asm/io.h>
82ec1df41SThomas Gleixner #include <asm/mtrr.h>
92ec1df41SThomas Gleixner #include <asm/msr.h>
102ec1df41SThomas Gleixner #include <asm/system.h>
112ec1df41SThomas Gleixner #include <asm/cpufeature.h>
127ebad705SDave Jones #include <asm/processor-flags.h>
132ec1df41SThomas Gleixner #include <asm/tlbflush.h>
142e5d9c85Svenkatesh.pallipadi@intel.com #include <asm/pat.h>
152ec1df41SThomas Gleixner #include "mtrr.h"
162ec1df41SThomas Gleixner 
172ec1df41SThomas Gleixner struct fixed_range_block {
182ec1df41SThomas Gleixner 	int base_msr; /* start address of an MTRR block */
192ec1df41SThomas Gleixner 	int ranges;   /* number of MTRRs in this block  */
202ec1df41SThomas Gleixner };
212ec1df41SThomas Gleixner 
222ec1df41SThomas Gleixner static struct fixed_range_block fixed_range_blocks[] = {
23a036c7a3SJaswinder Singh Rajput 	{ MSR_MTRRfix64K_00000, 1 }, /* one  64k MTRR  */
247d9d55e4SJaswinder Singh Rajput 	{ MSR_MTRRfix16K_80000, 2 }, /* two  16k MTRRs */
25*ba5673ffSJaswinder Singh Rajput 	{ MSR_MTRRfix4K_C0000,  8 }, /* eight 4k MTRRs */
262ec1df41SThomas Gleixner 	{}
272ec1df41SThomas Gleixner };
282ec1df41SThomas Gleixner 
292ec1df41SThomas Gleixner static unsigned long smp_changes_mask;
302e5d9c85Svenkatesh.pallipadi@intel.com static int mtrr_state_set;
3195ffa243SYinghai Lu u64 mtrr_tom2;
322ec1df41SThomas Gleixner 
33932d27a7SSheng Yang struct mtrr_state_type mtrr_state = {};
34932d27a7SSheng Yang EXPORT_SYMBOL_GPL(mtrr_state);
35932d27a7SSheng Yang 
363ff42da5SAndreas Herrmann /**
373ff42da5SAndreas Herrmann  * BIOS is expected to clear MtrrFixDramModEn bit, see for example
383ff42da5SAndreas Herrmann  * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
393ff42da5SAndreas Herrmann  * Opteron Processors" (26094 Rev. 3.30 February 2006), section
403ff42da5SAndreas Herrmann  * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
413ff42da5SAndreas Herrmann  * to 1 during BIOS initalization of the fixed MTRRs, then cleared to
423ff42da5SAndreas Herrmann  * 0 for operation."
433ff42da5SAndreas Herrmann  */
443ff42da5SAndreas Herrmann static inline void k8_check_syscfg_dram_mod_en(void)
453ff42da5SAndreas Herrmann {
463ff42da5SAndreas Herrmann 	u32 lo, hi;
473ff42da5SAndreas Herrmann 
483ff42da5SAndreas Herrmann 	if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
493ff42da5SAndreas Herrmann 	      (boot_cpu_data.x86 >= 0x0f)))
503ff42da5SAndreas Herrmann 		return;
513ff42da5SAndreas Herrmann 
523ff42da5SAndreas Herrmann 	rdmsr(MSR_K8_SYSCFG, lo, hi);
533ff42da5SAndreas Herrmann 	if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
543ff42da5SAndreas Herrmann 		printk(KERN_ERR FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
553ff42da5SAndreas Herrmann 		       " not cleared by BIOS, clearing this bit\n",
563ff42da5SAndreas Herrmann 		       smp_processor_id());
573ff42da5SAndreas Herrmann 		lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
583ff42da5SAndreas Herrmann 		mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
593ff42da5SAndreas Herrmann 	}
603ff42da5SAndreas Herrmann }
613ff42da5SAndreas Herrmann 
622e5d9c85Svenkatesh.pallipadi@intel.com /*
632e5d9c85Svenkatesh.pallipadi@intel.com  * Returns the effective MTRR type for the region
642e5d9c85Svenkatesh.pallipadi@intel.com  * Error returns:
652e5d9c85Svenkatesh.pallipadi@intel.com  * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR
662e5d9c85Svenkatesh.pallipadi@intel.com  * - 0xFF - when MTRR is not enabled
672e5d9c85Svenkatesh.pallipadi@intel.com  */
682e5d9c85Svenkatesh.pallipadi@intel.com u8 mtrr_type_lookup(u64 start, u64 end)
692e5d9c85Svenkatesh.pallipadi@intel.com {
702e5d9c85Svenkatesh.pallipadi@intel.com 	int i;
712e5d9c85Svenkatesh.pallipadi@intel.com 	u64 base, mask;
722e5d9c85Svenkatesh.pallipadi@intel.com 	u8 prev_match, curr_match;
732e5d9c85Svenkatesh.pallipadi@intel.com 
742e5d9c85Svenkatesh.pallipadi@intel.com 	if (!mtrr_state_set)
752e5d9c85Svenkatesh.pallipadi@intel.com 		return 0xFF;
762e5d9c85Svenkatesh.pallipadi@intel.com 
772e5d9c85Svenkatesh.pallipadi@intel.com 	if (!mtrr_state.enabled)
782e5d9c85Svenkatesh.pallipadi@intel.com 		return 0xFF;
792e5d9c85Svenkatesh.pallipadi@intel.com 
802e5d9c85Svenkatesh.pallipadi@intel.com 	/* Make end inclusive end, instead of exclusive */
812e5d9c85Svenkatesh.pallipadi@intel.com 	end--;
822e5d9c85Svenkatesh.pallipadi@intel.com 
832e5d9c85Svenkatesh.pallipadi@intel.com 	/* Look in fixed ranges. Just return the type as per start */
842e5d9c85Svenkatesh.pallipadi@intel.com 	if (mtrr_state.have_fixed && (start < 0x100000)) {
852e5d9c85Svenkatesh.pallipadi@intel.com 		int idx;
862e5d9c85Svenkatesh.pallipadi@intel.com 
872e5d9c85Svenkatesh.pallipadi@intel.com 		if (start < 0x80000) {
882e5d9c85Svenkatesh.pallipadi@intel.com 			idx = 0;
892e5d9c85Svenkatesh.pallipadi@intel.com 			idx += (start >> 16);
902e5d9c85Svenkatesh.pallipadi@intel.com 			return mtrr_state.fixed_ranges[idx];
912e5d9c85Svenkatesh.pallipadi@intel.com 		} else if (start < 0xC0000) {
922e5d9c85Svenkatesh.pallipadi@intel.com 			idx = 1 * 8;
932e5d9c85Svenkatesh.pallipadi@intel.com 			idx += ((start - 0x80000) >> 14);
942e5d9c85Svenkatesh.pallipadi@intel.com 			return mtrr_state.fixed_ranges[idx];
952e5d9c85Svenkatesh.pallipadi@intel.com 		} else if (start < 0x1000000) {
962e5d9c85Svenkatesh.pallipadi@intel.com 			idx = 3 * 8;
972e5d9c85Svenkatesh.pallipadi@intel.com 			idx += ((start - 0xC0000) >> 12);
982e5d9c85Svenkatesh.pallipadi@intel.com 			return mtrr_state.fixed_ranges[idx];
992e5d9c85Svenkatesh.pallipadi@intel.com 		}
1002e5d9c85Svenkatesh.pallipadi@intel.com 	}
1012e5d9c85Svenkatesh.pallipadi@intel.com 
1022e5d9c85Svenkatesh.pallipadi@intel.com 	/*
1032e5d9c85Svenkatesh.pallipadi@intel.com 	 * Look in variable ranges
1042e5d9c85Svenkatesh.pallipadi@intel.com 	 * Look of multiple ranges matching this address and pick type
1052e5d9c85Svenkatesh.pallipadi@intel.com 	 * as per MTRR precedence
1062e5d9c85Svenkatesh.pallipadi@intel.com 	 */
107e686d341SHarvey Harrison 	if (!(mtrr_state.enabled & 2)) {
1082e5d9c85Svenkatesh.pallipadi@intel.com 		return mtrr_state.def_type;
1092e5d9c85Svenkatesh.pallipadi@intel.com 	}
1102e5d9c85Svenkatesh.pallipadi@intel.com 
1112e5d9c85Svenkatesh.pallipadi@intel.com 	prev_match = 0xFF;
1122e5d9c85Svenkatesh.pallipadi@intel.com 	for (i = 0; i < num_var_ranges; ++i) {
1132e5d9c85Svenkatesh.pallipadi@intel.com 		unsigned short start_state, end_state;
1142e5d9c85Svenkatesh.pallipadi@intel.com 
1152e5d9c85Svenkatesh.pallipadi@intel.com 		if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
1162e5d9c85Svenkatesh.pallipadi@intel.com 			continue;
1172e5d9c85Svenkatesh.pallipadi@intel.com 
1182e5d9c85Svenkatesh.pallipadi@intel.com 		base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
1192e5d9c85Svenkatesh.pallipadi@intel.com 		       (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
1202e5d9c85Svenkatesh.pallipadi@intel.com 		mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
1212e5d9c85Svenkatesh.pallipadi@intel.com 		       (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
1222e5d9c85Svenkatesh.pallipadi@intel.com 
1232e5d9c85Svenkatesh.pallipadi@intel.com 		start_state = ((start & mask) == (base & mask));
1242e5d9c85Svenkatesh.pallipadi@intel.com 		end_state = ((end & mask) == (base & mask));
1252e5d9c85Svenkatesh.pallipadi@intel.com 		if (start_state != end_state)
1262e5d9c85Svenkatesh.pallipadi@intel.com 			return 0xFE;
1272e5d9c85Svenkatesh.pallipadi@intel.com 
1282e5d9c85Svenkatesh.pallipadi@intel.com 		if ((start & mask) != (base & mask)) {
1292e5d9c85Svenkatesh.pallipadi@intel.com 			continue;
1302e5d9c85Svenkatesh.pallipadi@intel.com 		}
1312e5d9c85Svenkatesh.pallipadi@intel.com 
1322e5d9c85Svenkatesh.pallipadi@intel.com 		curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
1332e5d9c85Svenkatesh.pallipadi@intel.com 		if (prev_match == 0xFF) {
1342e5d9c85Svenkatesh.pallipadi@intel.com 			prev_match = curr_match;
1352e5d9c85Svenkatesh.pallipadi@intel.com 			continue;
1362e5d9c85Svenkatesh.pallipadi@intel.com 		}
1372e5d9c85Svenkatesh.pallipadi@intel.com 
1382e5d9c85Svenkatesh.pallipadi@intel.com 		if (prev_match == MTRR_TYPE_UNCACHABLE ||
1392e5d9c85Svenkatesh.pallipadi@intel.com 		    curr_match == MTRR_TYPE_UNCACHABLE) {
1402e5d9c85Svenkatesh.pallipadi@intel.com 			return MTRR_TYPE_UNCACHABLE;
1412e5d9c85Svenkatesh.pallipadi@intel.com 		}
1422e5d9c85Svenkatesh.pallipadi@intel.com 
1432e5d9c85Svenkatesh.pallipadi@intel.com 		if ((prev_match == MTRR_TYPE_WRBACK &&
1442e5d9c85Svenkatesh.pallipadi@intel.com 		     curr_match == MTRR_TYPE_WRTHROUGH) ||
1452e5d9c85Svenkatesh.pallipadi@intel.com 		    (prev_match == MTRR_TYPE_WRTHROUGH &&
1462e5d9c85Svenkatesh.pallipadi@intel.com 		     curr_match == MTRR_TYPE_WRBACK)) {
1472e5d9c85Svenkatesh.pallipadi@intel.com 			prev_match = MTRR_TYPE_WRTHROUGH;
1482e5d9c85Svenkatesh.pallipadi@intel.com 			curr_match = MTRR_TYPE_WRTHROUGH;
1492e5d9c85Svenkatesh.pallipadi@intel.com 		}
1502e5d9c85Svenkatesh.pallipadi@intel.com 
1512e5d9c85Svenkatesh.pallipadi@intel.com 		if (prev_match != curr_match) {
1522e5d9c85Svenkatesh.pallipadi@intel.com 			return MTRR_TYPE_UNCACHABLE;
1532e5d9c85Svenkatesh.pallipadi@intel.com 		}
1542e5d9c85Svenkatesh.pallipadi@intel.com 	}
1552e5d9c85Svenkatesh.pallipadi@intel.com 
15695ffa243SYinghai Lu 	if (mtrr_tom2) {
15795ffa243SYinghai Lu 		if (start >= (1ULL<<32) && (end < mtrr_tom2))
15835605a10SYinghai Lu 			return MTRR_TYPE_WRBACK;
15935605a10SYinghai Lu 	}
16035605a10SYinghai Lu 
1612e5d9c85Svenkatesh.pallipadi@intel.com 	if (prev_match != 0xFF)
1622e5d9c85Svenkatesh.pallipadi@intel.com 		return prev_match;
1632e5d9c85Svenkatesh.pallipadi@intel.com 
1642e5d9c85Svenkatesh.pallipadi@intel.com 	return mtrr_state.def_type;
1652e5d9c85Svenkatesh.pallipadi@intel.com }
1662e5d9c85Svenkatesh.pallipadi@intel.com 
1672ec1df41SThomas Gleixner /*  Get the MSR pair relating to a var range  */
1682ec1df41SThomas Gleixner static void
1692ec1df41SThomas Gleixner get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
1702ec1df41SThomas Gleixner {
1712ec1df41SThomas Gleixner 	rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
1722ec1df41SThomas Gleixner 	rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
1732ec1df41SThomas Gleixner }
1742ec1df41SThomas Gleixner 
17595ffa243SYinghai Lu /*  fill the MSR pair relating to a var range  */
17695ffa243SYinghai Lu void fill_mtrr_var_range(unsigned int index,
17795ffa243SYinghai Lu 		u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
17895ffa243SYinghai Lu {
17995ffa243SYinghai Lu 	struct mtrr_var_range *vr;
18095ffa243SYinghai Lu 
18195ffa243SYinghai Lu 	vr = mtrr_state.var_ranges;
18295ffa243SYinghai Lu 
18395ffa243SYinghai Lu 	vr[index].base_lo = base_lo;
18495ffa243SYinghai Lu 	vr[index].base_hi = base_hi;
18595ffa243SYinghai Lu 	vr[index].mask_lo = mask_lo;
18695ffa243SYinghai Lu 	vr[index].mask_hi = mask_hi;
18795ffa243SYinghai Lu }
18895ffa243SYinghai Lu 
1892ec1df41SThomas Gleixner static void
1902ec1df41SThomas Gleixner get_fixed_ranges(mtrr_type * frs)
1912ec1df41SThomas Gleixner {
1922ec1df41SThomas Gleixner 	unsigned int *p = (unsigned int *) frs;
1932ec1df41SThomas Gleixner 	int i;
1942ec1df41SThomas Gleixner 
1953ff42da5SAndreas Herrmann 	k8_check_syscfg_dram_mod_en();
1963ff42da5SAndreas Herrmann 
197a036c7a3SJaswinder Singh Rajput 	rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]);
1982ec1df41SThomas Gleixner 
1992ec1df41SThomas Gleixner 	for (i = 0; i < 2; i++)
2007d9d55e4SJaswinder Singh Rajput 		rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]);
2012ec1df41SThomas Gleixner 	for (i = 0; i < 8; i++)
202*ba5673ffSJaswinder Singh Rajput 		rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]);
2032ec1df41SThomas Gleixner }
2042ec1df41SThomas Gleixner 
2052ec1df41SThomas Gleixner void mtrr_save_fixed_ranges(void *info)
2062ec1df41SThomas Gleixner {
2072ec1df41SThomas Gleixner 	if (cpu_has_mtrr)
2082ec1df41SThomas Gleixner 		get_fixed_ranges(mtrr_state.fixed_ranges);
2092ec1df41SThomas Gleixner }
2102ec1df41SThomas Gleixner 
211d4c90e37SYinghai Lu static unsigned __initdata last_fixed_start;
212d4c90e37SYinghai Lu static unsigned __initdata last_fixed_end;
213d4c90e37SYinghai Lu static mtrr_type __initdata last_fixed_type;
214d4c90e37SYinghai Lu 
215d4c90e37SYinghai Lu static void __init print_fixed_last(void)
216d4c90e37SYinghai Lu {
217d4c90e37SYinghai Lu 	if (!last_fixed_end)
218d4c90e37SYinghai Lu 		return;
219d4c90e37SYinghai Lu 
220d4c90e37SYinghai Lu 	printk(KERN_DEBUG "  %05X-%05X %s\n", last_fixed_start,
221d4c90e37SYinghai Lu 		last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
222d4c90e37SYinghai Lu 
223d4c90e37SYinghai Lu 	last_fixed_end = 0;
224d4c90e37SYinghai Lu }
225d4c90e37SYinghai Lu 
226d4c90e37SYinghai Lu static void __init update_fixed_last(unsigned base, unsigned end,
227d4c90e37SYinghai Lu 				       mtrr_type type)
228d4c90e37SYinghai Lu {
229d4c90e37SYinghai Lu 	last_fixed_start = base;
230d4c90e37SYinghai Lu 	last_fixed_end = end;
231d4c90e37SYinghai Lu 	last_fixed_type = type;
232d4c90e37SYinghai Lu }
233d4c90e37SYinghai Lu 
234d4c90e37SYinghai Lu static void __init print_fixed(unsigned base, unsigned step,
235d4c90e37SYinghai Lu 			       const mtrr_type *types)
2362ec1df41SThomas Gleixner {
2372ec1df41SThomas Gleixner 	unsigned i;
2382ec1df41SThomas Gleixner 
239d4c90e37SYinghai Lu 	for (i = 0; i < 8; ++i, ++types, base += step) {
240d4c90e37SYinghai Lu 		if (last_fixed_end == 0) {
241d4c90e37SYinghai Lu 			update_fixed_last(base, base + step, *types);
242d4c90e37SYinghai Lu 			continue;
243d4c90e37SYinghai Lu 		}
244d4c90e37SYinghai Lu 		if (last_fixed_end == base && last_fixed_type == *types) {
245d4c90e37SYinghai Lu 			last_fixed_end = base + step;
246d4c90e37SYinghai Lu 			continue;
247d4c90e37SYinghai Lu 		}
248d4c90e37SYinghai Lu 		/* new segments: gap or different type */
249d4c90e37SYinghai Lu 		print_fixed_last();
250d4c90e37SYinghai Lu 		update_fixed_last(base, base + step, *types);
251d4c90e37SYinghai Lu 	}
2522ec1df41SThomas Gleixner }
2532ec1df41SThomas Gleixner 
2542e5d9c85Svenkatesh.pallipadi@intel.com static void prepare_set(void);
2552e5d9c85Svenkatesh.pallipadi@intel.com static void post_set(void);
2562e5d9c85Svenkatesh.pallipadi@intel.com 
2578ad97905SYinghai Lu static void __init print_mtrr_state(void)
2588ad97905SYinghai Lu {
2598ad97905SYinghai Lu 	unsigned int i;
2608ad97905SYinghai Lu 	int high_width;
2618ad97905SYinghai Lu 
262d4c90e37SYinghai Lu 	printk(KERN_DEBUG "MTRR default type: %s\n",
263d4c90e37SYinghai Lu 			 mtrr_attrib_to_str(mtrr_state.def_type));
2648ad97905SYinghai Lu 	if (mtrr_state.have_fixed) {
265d4c90e37SYinghai Lu 		printk(KERN_DEBUG "MTRR fixed ranges %sabled:\n",
2668ad97905SYinghai Lu 		       mtrr_state.enabled & 1 ? "en" : "dis");
2678ad97905SYinghai Lu 		print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
2688ad97905SYinghai Lu 		for (i = 0; i < 2; ++i)
2698ad97905SYinghai Lu 			print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
2708ad97905SYinghai Lu 		for (i = 0; i < 8; ++i)
2718ad97905SYinghai Lu 			print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
272d4c90e37SYinghai Lu 
273d4c90e37SYinghai Lu 		/* tail */
274d4c90e37SYinghai Lu 		print_fixed_last();
2758ad97905SYinghai Lu 	}
276d4c90e37SYinghai Lu 	printk(KERN_DEBUG "MTRR variable ranges %sabled:\n",
2778ad97905SYinghai Lu 	       mtrr_state.enabled & 2 ? "en" : "dis");
2788ad97905SYinghai Lu 	high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
2798ad97905SYinghai Lu 	for (i = 0; i < num_var_ranges; ++i) {
2808ad97905SYinghai Lu 		if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
281d4c90e37SYinghai Lu 			printk(KERN_DEBUG "  %u base %0*X%05X000 mask %0*X%05X000 %s\n",
2828ad97905SYinghai Lu 			       i,
2838ad97905SYinghai Lu 			       high_width,
2848ad97905SYinghai Lu 			       mtrr_state.var_ranges[i].base_hi,
2858ad97905SYinghai Lu 			       mtrr_state.var_ranges[i].base_lo >> 12,
2868ad97905SYinghai Lu 			       high_width,
2878ad97905SYinghai Lu 			       mtrr_state.var_ranges[i].mask_hi,
2888ad97905SYinghai Lu 			       mtrr_state.var_ranges[i].mask_lo >> 12,
2898ad97905SYinghai Lu 			       mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
2908ad97905SYinghai Lu 		else
291d4c90e37SYinghai Lu 			printk(KERN_DEBUG "  %u disabled\n", i);
2928ad97905SYinghai Lu 	}
2938ad97905SYinghai Lu 	if (mtrr_tom2) {
294d4c90e37SYinghai Lu 		printk(KERN_DEBUG "TOM2: %016llx aka %lldM\n",
2958ad97905SYinghai Lu 				  mtrr_tom2, mtrr_tom2>>20);
2968ad97905SYinghai Lu 	}
2978ad97905SYinghai Lu }
2988ad97905SYinghai Lu 
2992ec1df41SThomas Gleixner /*  Grab all of the MTRR state for this CPU into *state  */
3002ec1df41SThomas Gleixner void __init get_mtrr_state(void)
3012ec1df41SThomas Gleixner {
3022ec1df41SThomas Gleixner 	unsigned int i;
3032ec1df41SThomas Gleixner 	struct mtrr_var_range *vrs;
3042ec1df41SThomas Gleixner 	unsigned lo, dummy;
3052e5d9c85Svenkatesh.pallipadi@intel.com 	unsigned long flags;
3062ec1df41SThomas Gleixner 
3072ec1df41SThomas Gleixner 	vrs = mtrr_state.var_ranges;
3082ec1df41SThomas Gleixner 
309d9bcc01dSJaswinder Singh Rajput 	rdmsr(MSR_MTRRcap, lo, dummy);
3102ec1df41SThomas Gleixner 	mtrr_state.have_fixed = (lo >> 8) & 1;
3112ec1df41SThomas Gleixner 
3122ec1df41SThomas Gleixner 	for (i = 0; i < num_var_ranges; i++)
3132ec1df41SThomas Gleixner 		get_mtrr_var_range(i, &vrs[i]);
3142ec1df41SThomas Gleixner 	if (mtrr_state.have_fixed)
3152ec1df41SThomas Gleixner 		get_fixed_ranges(mtrr_state.fixed_ranges);
3162ec1df41SThomas Gleixner 
3172ec1df41SThomas Gleixner 	rdmsr(MTRRdefType_MSR, lo, dummy);
3182ec1df41SThomas Gleixner 	mtrr_state.def_type = (lo & 0xff);
3192ec1df41SThomas Gleixner 	mtrr_state.enabled = (lo & 0xc00) >> 10;
3202ec1df41SThomas Gleixner 
32135605a10SYinghai Lu 	if (amd_special_default_mtrr()) {
3220da72a4aSThomas Gleixner 		unsigned low, high;
32335605a10SYinghai Lu 		/* TOP_MEM2 */
3240da72a4aSThomas Gleixner 		rdmsr(MSR_K8_TOP_MEM2, low, high);
32595ffa243SYinghai Lu 		mtrr_tom2 = high;
32695ffa243SYinghai Lu 		mtrr_tom2 <<= 32;
32795ffa243SYinghai Lu 		mtrr_tom2 |= low;
3288004dd96SYinghai Lu 		mtrr_tom2 &= 0xffffff800000ULL;
32935605a10SYinghai Lu 	}
3302ec1df41SThomas Gleixner 
3318ad97905SYinghai Lu 	print_mtrr_state();
3328ad97905SYinghai Lu 
3332e5d9c85Svenkatesh.pallipadi@intel.com 	mtrr_state_set = 1;
3342e5d9c85Svenkatesh.pallipadi@intel.com 
3352e5d9c85Svenkatesh.pallipadi@intel.com 	/* PAT setup for BP. We need to go through sync steps here */
3362e5d9c85Svenkatesh.pallipadi@intel.com 	local_irq_save(flags);
3372e5d9c85Svenkatesh.pallipadi@intel.com 	prepare_set();
3382e5d9c85Svenkatesh.pallipadi@intel.com 
3392e5d9c85Svenkatesh.pallipadi@intel.com 	pat_init();
3402e5d9c85Svenkatesh.pallipadi@intel.com 
3412e5d9c85Svenkatesh.pallipadi@intel.com 	post_set();
3422e5d9c85Svenkatesh.pallipadi@intel.com 	local_irq_restore(flags);
3432e5d9c85Svenkatesh.pallipadi@intel.com 
3442ec1df41SThomas Gleixner }
3452ec1df41SThomas Gleixner 
3462ec1df41SThomas Gleixner /*  Some BIOS's are fucked and don't set all MTRRs the same!  */
3472ec1df41SThomas Gleixner void __init mtrr_state_warn(void)
3482ec1df41SThomas Gleixner {
3492ec1df41SThomas Gleixner 	unsigned long mask = smp_changes_mask;
3502ec1df41SThomas Gleixner 
3512ec1df41SThomas Gleixner 	if (!mask)
3522ec1df41SThomas Gleixner 		return;
3532ec1df41SThomas Gleixner 	if (mask & MTRR_CHANGE_MASK_FIXED)
3542ec1df41SThomas Gleixner 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
3552ec1df41SThomas Gleixner 	if (mask & MTRR_CHANGE_MASK_VARIABLE)
3562ec1df41SThomas Gleixner 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
3572ec1df41SThomas Gleixner 	if (mask & MTRR_CHANGE_MASK_DEFTYPE)
3582ec1df41SThomas Gleixner 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
3592ec1df41SThomas Gleixner 	printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
3602ec1df41SThomas Gleixner 	printk(KERN_INFO "mtrr: corrected configuration.\n");
3612ec1df41SThomas Gleixner }
3622ec1df41SThomas Gleixner 
3632ec1df41SThomas Gleixner /* Doesn't attempt to pass an error out to MTRR users
3642ec1df41SThomas Gleixner    because it's quite complicated in some cases and probably not
3652ec1df41SThomas Gleixner    worth it because the best error handling is to ignore it. */
3662ec1df41SThomas Gleixner void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
3672ec1df41SThomas Gleixner {
3682ec1df41SThomas Gleixner 	if (wrmsr_safe(msr, a, b) < 0)
3692ec1df41SThomas Gleixner 		printk(KERN_ERR
3702ec1df41SThomas Gleixner 			"MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
3712ec1df41SThomas Gleixner 			smp_processor_id(), msr, a, b);
3722ec1df41SThomas Gleixner }
3732ec1df41SThomas Gleixner 
3742ec1df41SThomas Gleixner /**
3751d3381ebSRandy Dunlap  * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
3761d3381ebSRandy Dunlap  * @msr: MSR address of the MTTR which should be checked and updated
3771d3381ebSRandy Dunlap  * @changed: pointer which indicates whether the MTRR needed to be changed
3781d3381ebSRandy Dunlap  * @msrwords: pointer to the MSR values which the MSR should have
3792ec1df41SThomas Gleixner  */
3802d2ee8deSPaul Jimenez static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
3812ec1df41SThomas Gleixner {
3822ec1df41SThomas Gleixner 	unsigned lo, hi;
3832ec1df41SThomas Gleixner 
3842ec1df41SThomas Gleixner 	rdmsr(msr, lo, hi);
3852ec1df41SThomas Gleixner 
3862ec1df41SThomas Gleixner 	if (lo != msrwords[0] || hi != msrwords[1]) {
3872ec1df41SThomas Gleixner 		mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
3882d2ee8deSPaul Jimenez 		*changed = true;
3892ec1df41SThomas Gleixner 	}
3902ec1df41SThomas Gleixner }
3912ec1df41SThomas Gleixner 
3921d3381ebSRandy Dunlap /**
3931d3381ebSRandy Dunlap  * generic_get_free_region - Get a free MTRR.
3941d3381ebSRandy Dunlap  * @base: The starting (base) address of the region.
3951d3381ebSRandy Dunlap  * @size: The size (in bytes) of the region.
3961d3381ebSRandy Dunlap  * @replace_reg: mtrr index to be replaced; set to invalid value if none.
3971d3381ebSRandy Dunlap  *
3981d3381ebSRandy Dunlap  * Returns: The index of the region on success, else negative on error.
3992ec1df41SThomas Gleixner  */
4001d3381ebSRandy Dunlap int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
4012ec1df41SThomas Gleixner {
4022ec1df41SThomas Gleixner 	int i, max;
4032ec1df41SThomas Gleixner 	mtrr_type ltype;
4042ec1df41SThomas Gleixner 	unsigned long lbase, lsize;
4052ec1df41SThomas Gleixner 
4062ec1df41SThomas Gleixner 	max = num_var_ranges;
4072ec1df41SThomas Gleixner 	if (replace_reg >= 0 && replace_reg < max)
4082ec1df41SThomas Gleixner 		return replace_reg;
4092ec1df41SThomas Gleixner 	for (i = 0; i < max; ++i) {
4102ec1df41SThomas Gleixner 		mtrr_if->get(i, &lbase, &lsize, &ltype);
4112ec1df41SThomas Gleixner 		if (lsize == 0)
4122ec1df41SThomas Gleixner 			return i;
4132ec1df41SThomas Gleixner 	}
4142ec1df41SThomas Gleixner 	return -ENOSPC;
4152ec1df41SThomas Gleixner }
4162ec1df41SThomas Gleixner 
4172ec1df41SThomas Gleixner static void generic_get_mtrr(unsigned int reg, unsigned long *base,
4182ec1df41SThomas Gleixner 			     unsigned long *size, mtrr_type *type)
4192ec1df41SThomas Gleixner {
4202ec1df41SThomas Gleixner 	unsigned int mask_lo, mask_hi, base_lo, base_hi;
42138cc1c3dSYinghai Lu 	unsigned int tmp, hi;
42263516ef6SYinghai Lu 	int cpu;
4232ec1df41SThomas Gleixner 
4248ad97905SYinghai Lu 	/*
4258ad97905SYinghai Lu 	 * get_mtrr doesn't need to update mtrr_state, also it could be called
4268ad97905SYinghai Lu 	 * from any cpu, so try to print it out directly.
4278ad97905SYinghai Lu 	 */
42863516ef6SYinghai Lu 	cpu = get_cpu();
42963516ef6SYinghai Lu 
4302ec1df41SThomas Gleixner 	rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
4318ad97905SYinghai Lu 
4322ec1df41SThomas Gleixner 	if ((mask_lo & 0x800) == 0) {
4332ec1df41SThomas Gleixner 		/*  Invalid (i.e. free) range  */
4342ec1df41SThomas Gleixner 		*base = 0;
4352ec1df41SThomas Gleixner 		*size = 0;
4362ec1df41SThomas Gleixner 		*type = 0;
43763516ef6SYinghai Lu 		goto out_put_cpu;
4382ec1df41SThomas Gleixner 	}
4392ec1df41SThomas Gleixner 
4402ec1df41SThomas Gleixner 	rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
4412ec1df41SThomas Gleixner 
44263516ef6SYinghai Lu 	/* Work out the shifted address mask: */
44338cc1c3dSYinghai Lu 	tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
44438cc1c3dSYinghai Lu 	mask_lo = size_or_mask | tmp;
44563516ef6SYinghai Lu 
44663516ef6SYinghai Lu 	/* Expand tmp with high bits to all 1s: */
44738cc1c3dSYinghai Lu 	hi = fls(tmp);
44838cc1c3dSYinghai Lu 	if (hi > 0) {
44938cc1c3dSYinghai Lu 		tmp |= ~((1<<(hi - 1)) - 1);
45038cc1c3dSYinghai Lu 
45138cc1c3dSYinghai Lu 		if (tmp != mask_lo) {
45216dc552fSYinghai Lu 			WARN_ONCE(1, KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
45338cc1c3dSYinghai Lu 			mask_lo = tmp;
45438cc1c3dSYinghai Lu 		}
45538cc1c3dSYinghai Lu 	}
4562ec1df41SThomas Gleixner 
45763516ef6SYinghai Lu 	/*
45863516ef6SYinghai Lu 	 * This works correctly if size is a power of two, i.e. a
45963516ef6SYinghai Lu 	 * contiguous range:
46063516ef6SYinghai Lu 	 */
4612ec1df41SThomas Gleixner 	*size = -mask_lo;
4622ec1df41SThomas Gleixner 	*base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
4632ec1df41SThomas Gleixner 	*type = base_lo & 0xff;
4648ad97905SYinghai Lu 
46563516ef6SYinghai Lu out_put_cpu:
46663516ef6SYinghai Lu 	put_cpu();
4672ec1df41SThomas Gleixner }
4682ec1df41SThomas Gleixner 
4692ec1df41SThomas Gleixner /**
4701d3381ebSRandy Dunlap  * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set
4711d3381ebSRandy Dunlap  * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
4722ec1df41SThomas Gleixner  */
4732ec1df41SThomas Gleixner static int set_fixed_ranges(mtrr_type * frs)
4742ec1df41SThomas Gleixner {
4752ec1df41SThomas Gleixner 	unsigned long long *saved = (unsigned long long *) frs;
4762d2ee8deSPaul Jimenez 	bool changed = false;
4772ec1df41SThomas Gleixner 	int block=-1, range;
4782ec1df41SThomas Gleixner 
4793ff42da5SAndreas Herrmann 	k8_check_syscfg_dram_mod_en();
4803ff42da5SAndreas Herrmann 
4812ec1df41SThomas Gleixner 	while (fixed_range_blocks[++block].ranges)
4822ec1df41SThomas Gleixner 	    for (range=0; range < fixed_range_blocks[block].ranges; range++)
4832ec1df41SThomas Gleixner 		set_fixed_range(fixed_range_blocks[block].base_msr + range,
4842ec1df41SThomas Gleixner 		    &changed, (unsigned int *) saved++);
4852ec1df41SThomas Gleixner 
4862ec1df41SThomas Gleixner 	return changed;
4872ec1df41SThomas Gleixner }
4882ec1df41SThomas Gleixner 
4892ec1df41SThomas Gleixner /*  Set the MSR pair relating to a var range. Returns TRUE if
4902ec1df41SThomas Gleixner     changes are made  */
4912d2ee8deSPaul Jimenez static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
4922ec1df41SThomas Gleixner {
4932ec1df41SThomas Gleixner 	unsigned int lo, hi;
4942d2ee8deSPaul Jimenez 	bool changed = false;
4952ec1df41SThomas Gleixner 
4962ec1df41SThomas Gleixner 	rdmsr(MTRRphysBase_MSR(index), lo, hi);
4972ec1df41SThomas Gleixner 	if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
4982ec1df41SThomas Gleixner 	    || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
4992ec1df41SThomas Gleixner 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
5002ec1df41SThomas Gleixner 		mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
5012d2ee8deSPaul Jimenez 		changed = true;
5022ec1df41SThomas Gleixner 	}
5032ec1df41SThomas Gleixner 
5042ec1df41SThomas Gleixner 	rdmsr(MTRRphysMask_MSR(index), lo, hi);
5052ec1df41SThomas Gleixner 
5062ec1df41SThomas Gleixner 	if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
5072ec1df41SThomas Gleixner 	    || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
5082ec1df41SThomas Gleixner 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
5092ec1df41SThomas Gleixner 		mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
5102d2ee8deSPaul Jimenez 		changed = true;
5112ec1df41SThomas Gleixner 	}
5122ec1df41SThomas Gleixner 	return changed;
5132ec1df41SThomas Gleixner }
5142ec1df41SThomas Gleixner 
5152ec1df41SThomas Gleixner static u32 deftype_lo, deftype_hi;
5162ec1df41SThomas Gleixner 
5171d3381ebSRandy Dunlap /**
5181d3381ebSRandy Dunlap  * set_mtrr_state - Set the MTRR state for this CPU.
5191d3381ebSRandy Dunlap  *
5201d3381ebSRandy Dunlap  * NOTE: The CPU must already be in a safe state for MTRR changes.
5211d3381ebSRandy Dunlap  * RETURNS: 0 if no changes made, else a mask indicating what was changed.
5222ec1df41SThomas Gleixner  */
5231d3381ebSRandy Dunlap static unsigned long set_mtrr_state(void)
5242ec1df41SThomas Gleixner {
5252ec1df41SThomas Gleixner 	unsigned int i;
5262ec1df41SThomas Gleixner 	unsigned long change_mask = 0;
5272ec1df41SThomas Gleixner 
5282ec1df41SThomas Gleixner 	for (i = 0; i < num_var_ranges; i++)
5292ec1df41SThomas Gleixner 		if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
5302ec1df41SThomas Gleixner 			change_mask |= MTRR_CHANGE_MASK_VARIABLE;
5312ec1df41SThomas Gleixner 
5322ec1df41SThomas Gleixner 	if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
5332ec1df41SThomas Gleixner 		change_mask |= MTRR_CHANGE_MASK_FIXED;
5342ec1df41SThomas Gleixner 
5352ec1df41SThomas Gleixner 	/*  Set_mtrr_restore restores the old value of MTRRdefType,
5362ec1df41SThomas Gleixner 	   so to set it we fiddle with the saved value  */
5372ec1df41SThomas Gleixner 	if ((deftype_lo & 0xff) != mtrr_state.def_type
5382ec1df41SThomas Gleixner 	    || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
5392ec1df41SThomas Gleixner 		deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
5402ec1df41SThomas Gleixner 		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
5412ec1df41SThomas Gleixner 	}
5422ec1df41SThomas Gleixner 
5432ec1df41SThomas Gleixner 	return change_mask;
5442ec1df41SThomas Gleixner }
5452ec1df41SThomas Gleixner 
5462ec1df41SThomas Gleixner 
5472ec1df41SThomas Gleixner static unsigned long cr4 = 0;
5482ec1df41SThomas Gleixner static DEFINE_SPINLOCK(set_atomicity_lock);
5492ec1df41SThomas Gleixner 
5502ec1df41SThomas Gleixner /*
5512ec1df41SThomas Gleixner  * Since we are disabling the cache don't allow any interrupts - they
5522ec1df41SThomas Gleixner  * would run extremely slow and would only increase the pain.  The caller must
5532ec1df41SThomas Gleixner  * ensure that local interrupts are disabled and are reenabled after post_set()
5542ec1df41SThomas Gleixner  * has been called.
5552ec1df41SThomas Gleixner  */
5562ec1df41SThomas Gleixner 
5572ec1df41SThomas Gleixner static void prepare_set(void) __acquires(set_atomicity_lock)
5582ec1df41SThomas Gleixner {
5592ec1df41SThomas Gleixner 	unsigned long cr0;
5602ec1df41SThomas Gleixner 
5612ec1df41SThomas Gleixner 	/*  Note that this is not ideal, since the cache is only flushed/disabled
5622ec1df41SThomas Gleixner 	   for this CPU while the MTRRs are changed, but changing this requires
5632ec1df41SThomas Gleixner 	   more invasive changes to the way the kernel boots  */
5642ec1df41SThomas Gleixner 
5652ec1df41SThomas Gleixner 	spin_lock(&set_atomicity_lock);
5662ec1df41SThomas Gleixner 
5672ec1df41SThomas Gleixner 	/*  Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
5687ebad705SDave Jones 	cr0 = read_cr0() | X86_CR0_CD;
5692ec1df41SThomas Gleixner 	write_cr0(cr0);
5702ec1df41SThomas Gleixner 	wbinvd();
5712ec1df41SThomas Gleixner 
5722ec1df41SThomas Gleixner 	/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
5732ec1df41SThomas Gleixner 	if ( cpu_has_pge ) {
5742ec1df41SThomas Gleixner 		cr4 = read_cr4();
5752ec1df41SThomas Gleixner 		write_cr4(cr4 & ~X86_CR4_PGE);
5762ec1df41SThomas Gleixner 	}
5772ec1df41SThomas Gleixner 
5782ec1df41SThomas Gleixner 	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
5792ec1df41SThomas Gleixner 	__flush_tlb();
5802ec1df41SThomas Gleixner 
5812ec1df41SThomas Gleixner 	/*  Save MTRR state */
5822ec1df41SThomas Gleixner 	rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
5832ec1df41SThomas Gleixner 
5842ec1df41SThomas Gleixner 	/*  Disable MTRRs, and set the default type to uncached  */
5852ec1df41SThomas Gleixner 	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
5862ec1df41SThomas Gleixner }
5872ec1df41SThomas Gleixner 
5882ec1df41SThomas Gleixner static void post_set(void) __releases(set_atomicity_lock)
5892ec1df41SThomas Gleixner {
5902ec1df41SThomas Gleixner 	/*  Flush TLBs (no need to flush caches - they are disabled)  */
5912ec1df41SThomas Gleixner 	__flush_tlb();
5922ec1df41SThomas Gleixner 
5932ec1df41SThomas Gleixner 	/* Intel (P6) standard MTRRs */
5942ec1df41SThomas Gleixner 	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
5952ec1df41SThomas Gleixner 
5962ec1df41SThomas Gleixner 	/*  Enable caches  */
5972ec1df41SThomas Gleixner 	write_cr0(read_cr0() & 0xbfffffff);
5982ec1df41SThomas Gleixner 
5992ec1df41SThomas Gleixner 	/*  Restore value of CR4  */
6002ec1df41SThomas Gleixner 	if ( cpu_has_pge )
6012ec1df41SThomas Gleixner 		write_cr4(cr4);
6022ec1df41SThomas Gleixner 	spin_unlock(&set_atomicity_lock);
6032ec1df41SThomas Gleixner }
6042ec1df41SThomas Gleixner 
6052ec1df41SThomas Gleixner static void generic_set_all(void)
6062ec1df41SThomas Gleixner {
6072ec1df41SThomas Gleixner 	unsigned long mask, count;
6082ec1df41SThomas Gleixner 	unsigned long flags;
6092ec1df41SThomas Gleixner 
6102ec1df41SThomas Gleixner 	local_irq_save(flags);
6112ec1df41SThomas Gleixner 	prepare_set();
6122ec1df41SThomas Gleixner 
6132ec1df41SThomas Gleixner 	/* Actually set the state */
6142ec1df41SThomas Gleixner 	mask = set_mtrr_state();
6152ec1df41SThomas Gleixner 
6162e5d9c85Svenkatesh.pallipadi@intel.com 	/* also set PAT */
6172e5d9c85Svenkatesh.pallipadi@intel.com 	pat_init();
6182e5d9c85Svenkatesh.pallipadi@intel.com 
6192ec1df41SThomas Gleixner 	post_set();
6202ec1df41SThomas Gleixner 	local_irq_restore(flags);
6212ec1df41SThomas Gleixner 
6222ec1df41SThomas Gleixner 	/*  Use the atomic bitops to update the global mask  */
6232ec1df41SThomas Gleixner 	for (count = 0; count < sizeof mask * 8; ++count) {
6242ec1df41SThomas Gleixner 		if (mask & 0x01)
6252ec1df41SThomas Gleixner 			set_bit(count, &smp_changes_mask);
6262ec1df41SThomas Gleixner 		mask >>= 1;
6272ec1df41SThomas Gleixner 	}
6282ec1df41SThomas Gleixner 
6292ec1df41SThomas Gleixner }
6302ec1df41SThomas Gleixner 
6312ec1df41SThomas Gleixner static void generic_set_mtrr(unsigned int reg, unsigned long base,
6322ec1df41SThomas Gleixner 			     unsigned long size, mtrr_type type)
6332ec1df41SThomas Gleixner /*  [SUMMARY] Set variable MTRR register on the local CPU.
6342ec1df41SThomas Gleixner     <reg> The register to set.
6352ec1df41SThomas Gleixner     <base> The base address of the region.
6362ec1df41SThomas Gleixner     <size> The size of the region. If this is 0 the region is disabled.
6372ec1df41SThomas Gleixner     <type> The type of the region.
6382ec1df41SThomas Gleixner     [RETURNS] Nothing.
6392ec1df41SThomas Gleixner */
6402ec1df41SThomas Gleixner {
6412ec1df41SThomas Gleixner 	unsigned long flags;
6422ec1df41SThomas Gleixner 	struct mtrr_var_range *vr;
6432ec1df41SThomas Gleixner 
6442ec1df41SThomas Gleixner 	vr = &mtrr_state.var_ranges[reg];
6452ec1df41SThomas Gleixner 
6462ec1df41SThomas Gleixner 	local_irq_save(flags);
6472ec1df41SThomas Gleixner 	prepare_set();
6482ec1df41SThomas Gleixner 
6492ec1df41SThomas Gleixner 	if (size == 0) {
6502ec1df41SThomas Gleixner 		/* The invalid bit is kept in the mask, so we simply clear the
6512ec1df41SThomas Gleixner 		   relevant mask register to disable a range. */
6522ec1df41SThomas Gleixner 		mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
6532ec1df41SThomas Gleixner 		memset(vr, 0, sizeof(struct mtrr_var_range));
6542ec1df41SThomas Gleixner 	} else {
6552ec1df41SThomas Gleixner 		vr->base_lo = base << PAGE_SHIFT | type;
6562ec1df41SThomas Gleixner 		vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
6572ec1df41SThomas Gleixner 		vr->mask_lo = -size << PAGE_SHIFT | 0x800;
6582ec1df41SThomas Gleixner 		vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
6592ec1df41SThomas Gleixner 
6602ec1df41SThomas Gleixner 		mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
6612ec1df41SThomas Gleixner 		mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
6622ec1df41SThomas Gleixner 	}
6632ec1df41SThomas Gleixner 
6642ec1df41SThomas Gleixner 	post_set();
6652ec1df41SThomas Gleixner 	local_irq_restore(flags);
6662ec1df41SThomas Gleixner }
6672ec1df41SThomas Gleixner 
6682ec1df41SThomas Gleixner int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
6692ec1df41SThomas Gleixner {
6702ec1df41SThomas Gleixner 	unsigned long lbase, last;
6712ec1df41SThomas Gleixner 
6722ec1df41SThomas Gleixner 	/*  For Intel PPro stepping <= 7, must be 4 MiB aligned
6732ec1df41SThomas Gleixner 	    and not touch 0x70000000->0x7003FFFF */
6742ec1df41SThomas Gleixner 	if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
6752ec1df41SThomas Gleixner 	    boot_cpu_data.x86_model == 1 &&
6762ec1df41SThomas Gleixner 	    boot_cpu_data.x86_mask <= 7) {
6772ec1df41SThomas Gleixner 		if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
6782ec1df41SThomas Gleixner 			printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
6792ec1df41SThomas Gleixner 			return -EINVAL;
6802ec1df41SThomas Gleixner 		}
6812ec1df41SThomas Gleixner 		if (!(base + size < 0x70000 || base > 0x7003F) &&
6822ec1df41SThomas Gleixner 		    (type == MTRR_TYPE_WRCOMB
6832ec1df41SThomas Gleixner 		     || type == MTRR_TYPE_WRBACK)) {
6842ec1df41SThomas Gleixner 			printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
6852ec1df41SThomas Gleixner 			return -EINVAL;
6862ec1df41SThomas Gleixner 		}
6872ec1df41SThomas Gleixner 	}
6882ec1df41SThomas Gleixner 
6892ec1df41SThomas Gleixner 	/*  Check upper bits of base and last are equal and lower bits are 0
6902ec1df41SThomas Gleixner 	    for base and 1 for last  */
6912ec1df41SThomas Gleixner 	last = base + size - 1;
6922ec1df41SThomas Gleixner 	for (lbase = base; !(lbase & 1) && (last & 1);
6932ec1df41SThomas Gleixner 	     lbase = lbase >> 1, last = last >> 1) ;
6942ec1df41SThomas Gleixner 	if (lbase != last) {
6952ec1df41SThomas Gleixner 		printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
6962ec1df41SThomas Gleixner 		       base, size);
6972ec1df41SThomas Gleixner 		return -EINVAL;
6982ec1df41SThomas Gleixner 	}
6992ec1df41SThomas Gleixner 	return 0;
7002ec1df41SThomas Gleixner }
7012ec1df41SThomas Gleixner 
7022ec1df41SThomas Gleixner 
7032ec1df41SThomas Gleixner static int generic_have_wrcomb(void)
7042ec1df41SThomas Gleixner {
7052ec1df41SThomas Gleixner 	unsigned long config, dummy;
706d9bcc01dSJaswinder Singh Rajput 	rdmsr(MSR_MTRRcap, config, dummy);
7072ec1df41SThomas Gleixner 	return (config & (1 << 10));
7082ec1df41SThomas Gleixner }
7092ec1df41SThomas Gleixner 
7102ec1df41SThomas Gleixner int positive_have_wrcomb(void)
7112ec1df41SThomas Gleixner {
7122ec1df41SThomas Gleixner 	return 1;
7132ec1df41SThomas Gleixner }
7142ec1df41SThomas Gleixner 
7152ec1df41SThomas Gleixner /* generic structure...
7162ec1df41SThomas Gleixner  */
7172ec1df41SThomas Gleixner struct mtrr_ops generic_mtrr_ops = {
7182ec1df41SThomas Gleixner 	.use_intel_if      = 1,
7192ec1df41SThomas Gleixner 	.set_all	   = generic_set_all,
7202ec1df41SThomas Gleixner 	.get               = generic_get_mtrr,
7212ec1df41SThomas Gleixner 	.get_free_region   = generic_get_free_region,
7222ec1df41SThomas Gleixner 	.set               = generic_set_mtrr,
7232ec1df41SThomas Gleixner 	.validate_add_page = generic_validate_add_page,
7242ec1df41SThomas Gleixner 	.have_wrcomb       = generic_have_wrcomb,
7252ec1df41SThomas Gleixner };
726