xref: /linux/arch/x86/kernel/cpu/mtrr/generic.c (revision 01c97c7303580682751b5aaae043b639bdcbacb3)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2a1a499a3SJaswinder Singh Rajput /*
3a1a499a3SJaswinder Singh Rajput  * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
4a1a499a3SJaswinder Singh Rajput  * because MTRRs can span up to 40 bits (36bits on most modern x86)
5a1a499a3SJaswinder Singh Rajput  */
6a1a499a3SJaswinder Singh Rajput 
7186f4360SPaul Gortmaker #include <linux/export.h>
82ec1df41SThomas Gleixner #include <linux/init.h>
9a1a499a3SJaswinder Singh Rajput #include <linux/io.h>
102ec1df41SThomas Gleixner #include <linux/mm.h>
11a1a499a3SJaswinder Singh Rajput 
12a1a499a3SJaswinder Singh Rajput #include <asm/processor-flags.h>
13a1a499a3SJaswinder Singh Rajput #include <asm/cpufeature.h>
14a1a499a3SJaswinder Singh Rajput #include <asm/tlbflush.h>
152ec1df41SThomas Gleixner #include <asm/mtrr.h>
162ec1df41SThomas Gleixner #include <asm/msr.h>
17eb243d1dSIngo Molnar #include <asm/memtype.h>
18a1a499a3SJaswinder Singh Rajput 
192ec1df41SThomas Gleixner #include "mtrr.h"
202ec1df41SThomas Gleixner 
212ec1df41SThomas Gleixner struct fixed_range_block {
222ec1df41SThomas Gleixner 	int base_msr;		/* start address of an MTRR block */
232ec1df41SThomas Gleixner 	int ranges;		/* number of MTRRs in this block  */
242ec1df41SThomas Gleixner };
252ec1df41SThomas Gleixner 
262ec1df41SThomas Gleixner static struct fixed_range_block fixed_range_blocks[] = {
27a036c7a3SJaswinder Singh Rajput 	{ MSR_MTRRfix64K_00000, 1 }, /* one   64k MTRR  */
287d9d55e4SJaswinder Singh Rajput 	{ MSR_MTRRfix16K_80000, 2 }, /* two   16k MTRRs */
29ba5673ffSJaswinder Singh Rajput 	{ MSR_MTRRfix4K_C0000,  8 }, /* eight  4k MTRRs */
302ec1df41SThomas Gleixner 	{}
312ec1df41SThomas Gleixner };
322ec1df41SThomas Gleixner 
332ec1df41SThomas Gleixner static unsigned long smp_changes_mask;
342e5d9c85Svenkatesh.pallipadi@intel.com static int mtrr_state_set;
3595ffa243SYinghai Lu u64 mtrr_tom2;
362ec1df41SThomas Gleixner 
37a1a499a3SJaswinder Singh Rajput struct mtrr_state_type mtrr_state;
38932d27a7SSheng Yang EXPORT_SYMBOL_GPL(mtrr_state);
39932d27a7SSheng Yang 
40a1a499a3SJaswinder Singh Rajput /*
413ff42da5SAndreas Herrmann  * BIOS is expected to clear MtrrFixDramModEn bit, see for example
423ff42da5SAndreas Herrmann  * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
433ff42da5SAndreas Herrmann  * Opteron Processors" (26094 Rev. 3.30 February 2006), section
443ff42da5SAndreas Herrmann  * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
456a6256f9SAdam Buchbinder  * to 1 during BIOS initialization of the fixed MTRRs, then cleared to
463ff42da5SAndreas Herrmann  * 0 for operation."
473ff42da5SAndreas Herrmann  */
483ff42da5SAndreas Herrmann static inline void k8_check_syscfg_dram_mod_en(void)
493ff42da5SAndreas Herrmann {
503ff42da5SAndreas Herrmann 	u32 lo, hi;
513ff42da5SAndreas Herrmann 
523ff42da5SAndreas Herrmann 	if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
533ff42da5SAndreas Herrmann 	      (boot_cpu_data.x86 >= 0x0f)))
543ff42da5SAndreas Herrmann 		return;
553ff42da5SAndreas Herrmann 
56059e5c32SBrijesh Singh 	rdmsr(MSR_AMD64_SYSCFG, lo, hi);
573ff42da5SAndreas Herrmann 	if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
581b74dde7SChen Yucong 		pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
593ff42da5SAndreas Herrmann 		       " not cleared by BIOS, clearing this bit\n",
603ff42da5SAndreas Herrmann 		       smp_processor_id());
613ff42da5SAndreas Herrmann 		lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
62059e5c32SBrijesh Singh 		mtrr_wrmsr(MSR_AMD64_SYSCFG, lo, hi);
633ff42da5SAndreas Herrmann 	}
643ff42da5SAndreas Herrmann }
653ff42da5SAndreas Herrmann 
66351e5a70SVenkatesh Pallipadi /* Get the size of contiguous MTRR range */
67351e5a70SVenkatesh Pallipadi static u64 get_mtrr_size(u64 mask)
68351e5a70SVenkatesh Pallipadi {
69351e5a70SVenkatesh Pallipadi 	u64 size;
70351e5a70SVenkatesh Pallipadi 
71351e5a70SVenkatesh Pallipadi 	mask >>= PAGE_SHIFT;
72351e5a70SVenkatesh Pallipadi 	mask |= size_or_mask;
73351e5a70SVenkatesh Pallipadi 	size = -mask;
74351e5a70SVenkatesh Pallipadi 	size <<= PAGE_SHIFT;
75351e5a70SVenkatesh Pallipadi 	return size;
76351e5a70SVenkatesh Pallipadi }
77351e5a70SVenkatesh Pallipadi 
782e5d9c85Svenkatesh.pallipadi@intel.com /*
79a7f07cfbSVenkatesh Pallipadi  * Check and return the effective type for MTRR-MTRR type overlap.
80a7f07cfbSVenkatesh Pallipadi  * Returns 1 if the effective type is UNCACHEABLE, else returns 0
81a7f07cfbSVenkatesh Pallipadi  */
82a7f07cfbSVenkatesh Pallipadi static int check_type_overlap(u8 *prev, u8 *curr)
83a7f07cfbSVenkatesh Pallipadi {
84a7f07cfbSVenkatesh Pallipadi 	if (*prev == MTRR_TYPE_UNCACHABLE || *curr == MTRR_TYPE_UNCACHABLE) {
85a7f07cfbSVenkatesh Pallipadi 		*prev = MTRR_TYPE_UNCACHABLE;
86a7f07cfbSVenkatesh Pallipadi 		*curr = MTRR_TYPE_UNCACHABLE;
87a7f07cfbSVenkatesh Pallipadi 		return 1;
88a7f07cfbSVenkatesh Pallipadi 	}
89a7f07cfbSVenkatesh Pallipadi 
90a7f07cfbSVenkatesh Pallipadi 	if ((*prev == MTRR_TYPE_WRBACK && *curr == MTRR_TYPE_WRTHROUGH) ||
91a7f07cfbSVenkatesh Pallipadi 	    (*prev == MTRR_TYPE_WRTHROUGH && *curr == MTRR_TYPE_WRBACK)) {
92a7f07cfbSVenkatesh Pallipadi 		*prev = MTRR_TYPE_WRTHROUGH;
93a7f07cfbSVenkatesh Pallipadi 		*curr = MTRR_TYPE_WRTHROUGH;
94a7f07cfbSVenkatesh Pallipadi 	}
95a7f07cfbSVenkatesh Pallipadi 
96a7f07cfbSVenkatesh Pallipadi 	if (*prev != *curr) {
97a7f07cfbSVenkatesh Pallipadi 		*prev = MTRR_TYPE_UNCACHABLE;
98a7f07cfbSVenkatesh Pallipadi 		*curr = MTRR_TYPE_UNCACHABLE;
99a7f07cfbSVenkatesh Pallipadi 		return 1;
100a7f07cfbSVenkatesh Pallipadi 	}
101a7f07cfbSVenkatesh Pallipadi 
102a7f07cfbSVenkatesh Pallipadi 	return 0;
103a7f07cfbSVenkatesh Pallipadi }
104a7f07cfbSVenkatesh Pallipadi 
1050cc705f5SToshi Kani /**
1060cc705f5SToshi Kani  * mtrr_type_lookup_fixed - look up memory type in MTRR fixed entries
1070cc705f5SToshi Kani  *
1080cc705f5SToshi Kani  * Return the MTRR fixed memory type of 'start'.
1090cc705f5SToshi Kani  *
1100cc705f5SToshi Kani  * MTRR fixed entries are divided into the following ways:
1110cc705f5SToshi Kani  *  0x00000 - 0x7FFFF : This range is divided into eight 64KB sub-ranges
1120cc705f5SToshi Kani  *  0x80000 - 0xBFFFF : This range is divided into sixteen 16KB sub-ranges
1130cc705f5SToshi Kani  *  0xC0000 - 0xFFFFF : This range is divided into sixty-four 4KB sub-ranges
1140cc705f5SToshi Kani  *
1150cc705f5SToshi Kani  * Return Values:
1160cc705f5SToshi Kani  * MTRR_TYPE_(type)  - Matched memory type
1170cc705f5SToshi Kani  * MTRR_TYPE_INVALID - Unmatched
1182e5d9c85Svenkatesh.pallipadi@intel.com  */
1190cc705f5SToshi Kani static u8 mtrr_type_lookup_fixed(u64 start, u64 end)
1200cc705f5SToshi Kani {
1210cc705f5SToshi Kani 	int idx;
1220cc705f5SToshi Kani 
1230cc705f5SToshi Kani 	if (start >= 0x100000)
1240cc705f5SToshi Kani 		return MTRR_TYPE_INVALID;
1250cc705f5SToshi Kani 
1260cc705f5SToshi Kani 	/* 0x0 - 0x7FFFF */
1270cc705f5SToshi Kani 	if (start < 0x80000) {
1280cc705f5SToshi Kani 		idx = 0;
1290cc705f5SToshi Kani 		idx += (start >> 16);
1300cc705f5SToshi Kani 		return mtrr_state.fixed_ranges[idx];
1310cc705f5SToshi Kani 	/* 0x80000 - 0xBFFFF */
1320cc705f5SToshi Kani 	} else if (start < 0xC0000) {
1330cc705f5SToshi Kani 		idx = 1 * 8;
1340cc705f5SToshi Kani 		idx += ((start - 0x80000) >> 14);
1350cc705f5SToshi Kani 		return mtrr_state.fixed_ranges[idx];
1360cc705f5SToshi Kani 	}
1370cc705f5SToshi Kani 
1380cc705f5SToshi Kani 	/* 0xC0000 - 0xFFFFF */
1390cc705f5SToshi Kani 	idx = 3 * 8;
1400cc705f5SToshi Kani 	idx += ((start - 0xC0000) >> 12);
1410cc705f5SToshi Kani 	return mtrr_state.fixed_ranges[idx];
1420cc705f5SToshi Kani }
1430cc705f5SToshi Kani 
1440cc705f5SToshi Kani /**
1450cc705f5SToshi Kani  * mtrr_type_lookup_variable - look up memory type in MTRR variable entries
1460cc705f5SToshi Kani  *
1470cc705f5SToshi Kani  * Return Value:
1480cc705f5SToshi Kani  * MTRR_TYPE_(type) - Matched memory type or default memory type (unmatched)
1490cc705f5SToshi Kani  *
150b73522e0SToshi Kani  * Output Arguments:
1510cc705f5SToshi Kani  * repeat - Set to 1 when [start:end] spanned across MTRR range and type
1520cc705f5SToshi Kani  *	    returned corresponds only to [start:*partial_end].  Caller has
1530cc705f5SToshi Kani  *	    to lookup again for [*partial_end:end].
154b73522e0SToshi Kani  *
155b73522e0SToshi Kani  * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the
156b73522e0SToshi Kani  *	     region is fully covered by a single MTRR entry or the default
157b73522e0SToshi Kani  *	     type.
1580cc705f5SToshi Kani  */
1590cc705f5SToshi Kani static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
160b73522e0SToshi Kani 				    int *repeat, u8 *uniform)
1612e5d9c85Svenkatesh.pallipadi@intel.com {
1622e5d9c85Svenkatesh.pallipadi@intel.com 	int i;
1632e5d9c85Svenkatesh.pallipadi@intel.com 	u64 base, mask;
1642e5d9c85Svenkatesh.pallipadi@intel.com 	u8 prev_match, curr_match;
1652e5d9c85Svenkatesh.pallipadi@intel.com 
166351e5a70SVenkatesh Pallipadi 	*repeat = 0;
167b73522e0SToshi Kani 	*uniform = 1;
1682e5d9c85Svenkatesh.pallipadi@intel.com 
1693d3ca416SToshi Kani 	prev_match = MTRR_TYPE_INVALID;
1702e5d9c85Svenkatesh.pallipadi@intel.com 	for (i = 0; i < num_var_ranges; ++i) {
1717f0431e3SToshi Kani 		unsigned short start_state, end_state, inclusive;
1722e5d9c85Svenkatesh.pallipadi@intel.com 
1732e5d9c85Svenkatesh.pallipadi@intel.com 		if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
1742e5d9c85Svenkatesh.pallipadi@intel.com 			continue;
1752e5d9c85Svenkatesh.pallipadi@intel.com 
1762e5d9c85Svenkatesh.pallipadi@intel.com 		base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
1772e5d9c85Svenkatesh.pallipadi@intel.com 		       (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
1782e5d9c85Svenkatesh.pallipadi@intel.com 		mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
1792e5d9c85Svenkatesh.pallipadi@intel.com 		       (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
1802e5d9c85Svenkatesh.pallipadi@intel.com 
1812e5d9c85Svenkatesh.pallipadi@intel.com 		start_state = ((start & mask) == (base & mask));
1822e5d9c85Svenkatesh.pallipadi@intel.com 		end_state = ((end & mask) == (base & mask));
1837f0431e3SToshi Kani 		inclusive = ((start < base) && (end > base));
184351e5a70SVenkatesh Pallipadi 
1857f0431e3SToshi Kani 		if ((start_state != end_state) || inclusive) {
186351e5a70SVenkatesh Pallipadi 			/*
187351e5a70SVenkatesh Pallipadi 			 * We have start:end spanning across an MTRR.
1887f0431e3SToshi Kani 			 * We split the region into either
1897f0431e3SToshi Kani 			 *
1907f0431e3SToshi Kani 			 * - start_state:1
191351e5a70SVenkatesh Pallipadi 			 * (start:mtrr_end)(mtrr_end:end)
1927f0431e3SToshi Kani 			 * - end_state:1
193351e5a70SVenkatesh Pallipadi 			 * (start:mtrr_start)(mtrr_start:end)
1947f0431e3SToshi Kani 			 * - inclusive:1
1957f0431e3SToshi Kani 			 * (start:mtrr_start)(mtrr_start:mtrr_end)(mtrr_end:end)
1967f0431e3SToshi Kani 			 *
197351e5a70SVenkatesh Pallipadi 			 * depending on kind of overlap.
1987f0431e3SToshi Kani 			 *
1997f0431e3SToshi Kani 			 * Return the type of the first region and a pointer
2007f0431e3SToshi Kani 			 * to the start of next region so that caller will be
2017f0431e3SToshi Kani 			 * advised to lookup again after having adjusted start
2027f0431e3SToshi Kani 			 * and end.
2037f0431e3SToshi Kani 			 *
2040cc705f5SToshi Kani 			 * Note: This way we handle overlaps with multiple
2050cc705f5SToshi Kani 			 * entries and the default type properly.
206351e5a70SVenkatesh Pallipadi 			 */
207351e5a70SVenkatesh Pallipadi 			if (start_state)
208351e5a70SVenkatesh Pallipadi 				*partial_end = base + get_mtrr_size(mask);
209351e5a70SVenkatesh Pallipadi 			else
210351e5a70SVenkatesh Pallipadi 				*partial_end = base;
211351e5a70SVenkatesh Pallipadi 
212351e5a70SVenkatesh Pallipadi 			if (unlikely(*partial_end <= start)) {
213351e5a70SVenkatesh Pallipadi 				WARN_ON(1);
214351e5a70SVenkatesh Pallipadi 				*partial_end = start + PAGE_SIZE;
215351e5a70SVenkatesh Pallipadi 			}
216351e5a70SVenkatesh Pallipadi 
217351e5a70SVenkatesh Pallipadi 			end = *partial_end - 1; /* end is inclusive */
218351e5a70SVenkatesh Pallipadi 			*repeat = 1;
219b73522e0SToshi Kani 			*uniform = 0;
220351e5a70SVenkatesh Pallipadi 		}
2212e5d9c85Svenkatesh.pallipadi@intel.com 
222a1a499a3SJaswinder Singh Rajput 		if ((start & mask) != (base & mask))
2232e5d9c85Svenkatesh.pallipadi@intel.com 			continue;
2242e5d9c85Svenkatesh.pallipadi@intel.com 
2252e5d9c85Svenkatesh.pallipadi@intel.com 		curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
2263d3ca416SToshi Kani 		if (prev_match == MTRR_TYPE_INVALID) {
2272e5d9c85Svenkatesh.pallipadi@intel.com 			prev_match = curr_match;
2282e5d9c85Svenkatesh.pallipadi@intel.com 			continue;
2292e5d9c85Svenkatesh.pallipadi@intel.com 		}
2302e5d9c85Svenkatesh.pallipadi@intel.com 
231b73522e0SToshi Kani 		*uniform = 0;
232a7f07cfbSVenkatesh Pallipadi 		if (check_type_overlap(&prev_match, &curr_match))
233a7f07cfbSVenkatesh Pallipadi 			return curr_match;
2342e5d9c85Svenkatesh.pallipadi@intel.com 	}
2352e5d9c85Svenkatesh.pallipadi@intel.com 
2363d3ca416SToshi Kani 	if (prev_match != MTRR_TYPE_INVALID)
2372e5d9c85Svenkatesh.pallipadi@intel.com 		return prev_match;
2382e5d9c85Svenkatesh.pallipadi@intel.com 
2392e5d9c85Svenkatesh.pallipadi@intel.com 	return mtrr_state.def_type;
2402e5d9c85Svenkatesh.pallipadi@intel.com }
2412e5d9c85Svenkatesh.pallipadi@intel.com 
2420cc705f5SToshi Kani /**
2430cc705f5SToshi Kani  * mtrr_type_lookup - look up memory type in MTRR
2440cc705f5SToshi Kani  *
2450cc705f5SToshi Kani  * Return Values:
2460cc705f5SToshi Kani  * MTRR_TYPE_(type)  - The effective MTRR type for the region
2470cc705f5SToshi Kani  * MTRR_TYPE_INVALID - MTRR is disabled
248b73522e0SToshi Kani  *
249b73522e0SToshi Kani  * Output Argument:
250b73522e0SToshi Kani  * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the
251b73522e0SToshi Kani  *	     region is fully covered by a single MTRR entry or the default
252b73522e0SToshi Kani  *	     type.
253351e5a70SVenkatesh Pallipadi  */
254b73522e0SToshi Kani u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform)
255351e5a70SVenkatesh Pallipadi {
256b73522e0SToshi Kani 	u8 type, prev_type, is_uniform = 1, dummy;
257351e5a70SVenkatesh Pallipadi 	int repeat;
258351e5a70SVenkatesh Pallipadi 	u64 partial_end;
259351e5a70SVenkatesh Pallipadi 
260cb7f4a8bSYing-Tsun Huang 	/* Make end inclusive instead of exclusive */
261cb7f4a8bSYing-Tsun Huang 	end--;
262cb7f4a8bSYing-Tsun Huang 
2630cc705f5SToshi Kani 	if (!mtrr_state_set)
2640cc705f5SToshi Kani 		return MTRR_TYPE_INVALID;
2650cc705f5SToshi Kani 
2660cc705f5SToshi Kani 	if (!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED))
2670cc705f5SToshi Kani 		return MTRR_TYPE_INVALID;
2680cc705f5SToshi Kani 
2690cc705f5SToshi Kani 	/*
2700cc705f5SToshi Kani 	 * Look up the fixed ranges first, which take priority over
2710cc705f5SToshi Kani 	 * the variable ranges.
2720cc705f5SToshi Kani 	 */
2730cc705f5SToshi Kani 	if ((start < 0x100000) &&
2740cc705f5SToshi Kani 	    (mtrr_state.have_fixed) &&
275b73522e0SToshi Kani 	    (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) {
276b73522e0SToshi Kani 		is_uniform = 0;
277b73522e0SToshi Kani 		type = mtrr_type_lookup_fixed(start, end);
278b73522e0SToshi Kani 		goto out;
279b73522e0SToshi Kani 	}
2800cc705f5SToshi Kani 
2810cc705f5SToshi Kani 	/*
2820cc705f5SToshi Kani 	 * Look up the variable ranges.  Look of multiple ranges matching
2830cc705f5SToshi Kani 	 * this address and pick type as per MTRR precedence.
2840cc705f5SToshi Kani 	 */
285b73522e0SToshi Kani 	type = mtrr_type_lookup_variable(start, end, &partial_end,
286b73522e0SToshi Kani 					 &repeat, &is_uniform);
287351e5a70SVenkatesh Pallipadi 
288351e5a70SVenkatesh Pallipadi 	/*
289351e5a70SVenkatesh Pallipadi 	 * Common path is with repeat = 0.
290351e5a70SVenkatesh Pallipadi 	 * However, we can have cases where [start:end] spans across some
2910cc705f5SToshi Kani 	 * MTRR ranges and/or the default type.  Do repeated lookups for
2920cc705f5SToshi Kani 	 * that case here.
293351e5a70SVenkatesh Pallipadi 	 */
294351e5a70SVenkatesh Pallipadi 	while (repeat) {
295351e5a70SVenkatesh Pallipadi 		prev_type = type;
296351e5a70SVenkatesh Pallipadi 		start = partial_end;
297b73522e0SToshi Kani 		is_uniform = 0;
298b73522e0SToshi Kani 		type = mtrr_type_lookup_variable(start, end, &partial_end,
299b73522e0SToshi Kani 						 &repeat, &dummy);
300351e5a70SVenkatesh Pallipadi 
301351e5a70SVenkatesh Pallipadi 		if (check_type_overlap(&prev_type, &type))
302b73522e0SToshi Kani 			goto out;
303351e5a70SVenkatesh Pallipadi 	}
304351e5a70SVenkatesh Pallipadi 
3050cc705f5SToshi Kani 	if (mtrr_tom2 && (start >= (1ULL<<32)) && (end < mtrr_tom2))
306b73522e0SToshi Kani 		type = MTRR_TYPE_WRBACK;
3070cc705f5SToshi Kani 
308b73522e0SToshi Kani out:
309b73522e0SToshi Kani 	*uniform = is_uniform;
310351e5a70SVenkatesh Pallipadi 	return type;
311351e5a70SVenkatesh Pallipadi }
312351e5a70SVenkatesh Pallipadi 
3132ec1df41SThomas Gleixner /* Get the MSR pair relating to a var range */
3142ec1df41SThomas Gleixner static void
3152ec1df41SThomas Gleixner get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
3162ec1df41SThomas Gleixner {
3172ec1df41SThomas Gleixner 	rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
3182ec1df41SThomas Gleixner 	rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
3192ec1df41SThomas Gleixner }
3202ec1df41SThomas Gleixner 
321a1a499a3SJaswinder Singh Rajput /* Fill the MSR pair relating to a var range */
32295ffa243SYinghai Lu void fill_mtrr_var_range(unsigned int index,
32395ffa243SYinghai Lu 		u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
32495ffa243SYinghai Lu {
32595ffa243SYinghai Lu 	struct mtrr_var_range *vr;
32695ffa243SYinghai Lu 
32795ffa243SYinghai Lu 	vr = mtrr_state.var_ranges;
32895ffa243SYinghai Lu 
32995ffa243SYinghai Lu 	vr[index].base_lo = base_lo;
33095ffa243SYinghai Lu 	vr[index].base_hi = base_hi;
33195ffa243SYinghai Lu 	vr[index].mask_lo = mask_lo;
33295ffa243SYinghai Lu 	vr[index].mask_hi = mask_hi;
33395ffa243SYinghai Lu }
33495ffa243SYinghai Lu 
335a1a499a3SJaswinder Singh Rajput static void get_fixed_ranges(mtrr_type *frs)
3362ec1df41SThomas Gleixner {
3372ec1df41SThomas Gleixner 	unsigned int *p = (unsigned int *)frs;
3382ec1df41SThomas Gleixner 	int i;
3392ec1df41SThomas Gleixner 
3403ff42da5SAndreas Herrmann 	k8_check_syscfg_dram_mod_en();
3413ff42da5SAndreas Herrmann 
342a036c7a3SJaswinder Singh Rajput 	rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]);
3432ec1df41SThomas Gleixner 
3442ec1df41SThomas Gleixner 	for (i = 0; i < 2; i++)
3457d9d55e4SJaswinder Singh Rajput 		rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]);
3462ec1df41SThomas Gleixner 	for (i = 0; i < 8; i++)
347ba5673ffSJaswinder Singh Rajput 		rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]);
3482ec1df41SThomas Gleixner }
3492ec1df41SThomas Gleixner 
3502ec1df41SThomas Gleixner void mtrr_save_fixed_ranges(void *info)
3512ec1df41SThomas Gleixner {
352362f924bSBorislav Petkov 	if (boot_cpu_has(X86_FEATURE_MTRR))
3532ec1df41SThomas Gleixner 		get_fixed_ranges(mtrr_state.fixed_ranges);
3542ec1df41SThomas Gleixner }
3552ec1df41SThomas Gleixner 
356d4c90e37SYinghai Lu static unsigned __initdata last_fixed_start;
357d4c90e37SYinghai Lu static unsigned __initdata last_fixed_end;
358d4c90e37SYinghai Lu static mtrr_type __initdata last_fixed_type;
359d4c90e37SYinghai Lu 
360d4c90e37SYinghai Lu static void __init print_fixed_last(void)
361d4c90e37SYinghai Lu {
362d4c90e37SYinghai Lu 	if (!last_fixed_end)
363d4c90e37SYinghai Lu 		return;
364d4c90e37SYinghai Lu 
365a1a499a3SJaswinder Singh Rajput 	pr_debug("  %05X-%05X %s\n", last_fixed_start,
366d4c90e37SYinghai Lu 		 last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
367d4c90e37SYinghai Lu 
368d4c90e37SYinghai Lu 	last_fixed_end = 0;
369d4c90e37SYinghai Lu }
370d4c90e37SYinghai Lu 
371d4c90e37SYinghai Lu static void __init update_fixed_last(unsigned base, unsigned end,
372d4c90e37SYinghai Lu 				     mtrr_type type)
373d4c90e37SYinghai Lu {
374d4c90e37SYinghai Lu 	last_fixed_start = base;
375d4c90e37SYinghai Lu 	last_fixed_end = end;
376d4c90e37SYinghai Lu 	last_fixed_type = type;
377d4c90e37SYinghai Lu }
378d4c90e37SYinghai Lu 
379a1a499a3SJaswinder Singh Rajput static void __init
380a1a499a3SJaswinder Singh Rajput print_fixed(unsigned base, unsigned step, const mtrr_type *types)
3812ec1df41SThomas Gleixner {
3822ec1df41SThomas Gleixner 	unsigned i;
3832ec1df41SThomas Gleixner 
384d4c90e37SYinghai Lu 	for (i = 0; i < 8; ++i, ++types, base += step) {
385d4c90e37SYinghai Lu 		if (last_fixed_end == 0) {
386d4c90e37SYinghai Lu 			update_fixed_last(base, base + step, *types);
387d4c90e37SYinghai Lu 			continue;
388d4c90e37SYinghai Lu 		}
389d4c90e37SYinghai Lu 		if (last_fixed_end == base && last_fixed_type == *types) {
390d4c90e37SYinghai Lu 			last_fixed_end = base + step;
391d4c90e37SYinghai Lu 			continue;
392d4c90e37SYinghai Lu 		}
393d4c90e37SYinghai Lu 		/* new segments: gap or different type */
394d4c90e37SYinghai Lu 		print_fixed_last();
395d4c90e37SYinghai Lu 		update_fixed_last(base, base + step, *types);
396d4c90e37SYinghai Lu 	}
3972ec1df41SThomas Gleixner }
3982ec1df41SThomas Gleixner 
3992e5d9c85Svenkatesh.pallipadi@intel.com static void prepare_set(void);
4002e5d9c85Svenkatesh.pallipadi@intel.com static void post_set(void);
4012e5d9c85Svenkatesh.pallipadi@intel.com 
4028ad97905SYinghai Lu static void __init print_mtrr_state(void)
4038ad97905SYinghai Lu {
4048ad97905SYinghai Lu 	unsigned int i;
4058ad97905SYinghai Lu 	int high_width;
4068ad97905SYinghai Lu 
407a1a499a3SJaswinder Singh Rajput 	pr_debug("MTRR default type: %s\n",
408d4c90e37SYinghai Lu 		 mtrr_attrib_to_str(mtrr_state.def_type));
4098ad97905SYinghai Lu 	if (mtrr_state.have_fixed) {
410a1a499a3SJaswinder Singh Rajput 		pr_debug("MTRR fixed ranges %sabled:\n",
4119b3aca62SToshi Kani 			((mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
4129b3aca62SToshi Kani 			 (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) ?
4139b3aca62SToshi Kani 			 "en" : "dis");
4148ad97905SYinghai Lu 		print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
4158ad97905SYinghai Lu 		for (i = 0; i < 2; ++i)
416a1a499a3SJaswinder Singh Rajput 			print_fixed(0x80000 + i * 0x20000, 0x04000,
417a1a499a3SJaswinder Singh Rajput 				    mtrr_state.fixed_ranges + (i + 1) * 8);
4188ad97905SYinghai Lu 		for (i = 0; i < 8; ++i)
419a1a499a3SJaswinder Singh Rajput 			print_fixed(0xC0000 + i * 0x08000, 0x01000,
420a1a499a3SJaswinder Singh Rajput 				    mtrr_state.fixed_ranges + (i + 3) * 8);
421d4c90e37SYinghai Lu 
422d4c90e37SYinghai Lu 		/* tail */
423d4c90e37SYinghai Lu 		print_fixed_last();
4248ad97905SYinghai Lu 	}
425a1a499a3SJaswinder Singh Rajput 	pr_debug("MTRR variable ranges %sabled:\n",
4269b3aca62SToshi Kani 		 mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED ? "en" : "dis");
427a7101d15SJan Beulich 	high_width = (__ffs64(size_or_mask) - (32 - PAGE_SHIFT) + 3) / 4;
428a1a499a3SJaswinder Singh Rajput 
4298ad97905SYinghai Lu 	for (i = 0; i < num_var_ranges; ++i) {
4308ad97905SYinghai Lu 		if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
431a1a499a3SJaswinder Singh Rajput 			pr_debug("  %u base %0*X%05X000 mask %0*X%05X000 %s\n",
4328ad97905SYinghai Lu 				 i,
4338ad97905SYinghai Lu 				 high_width,
4348ad97905SYinghai Lu 				 mtrr_state.var_ranges[i].base_hi,
4358ad97905SYinghai Lu 				 mtrr_state.var_ranges[i].base_lo >> 12,
4368ad97905SYinghai Lu 				 high_width,
4378ad97905SYinghai Lu 				 mtrr_state.var_ranges[i].mask_hi,
4388ad97905SYinghai Lu 				 mtrr_state.var_ranges[i].mask_lo >> 12,
4398ad97905SYinghai Lu 				 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
4408ad97905SYinghai Lu 		else
441a1a499a3SJaswinder Singh Rajput 			pr_debug("  %u disabled\n", i);
4428ad97905SYinghai Lu 	}
443a1a499a3SJaswinder Singh Rajput 	if (mtrr_tom2)
444a1a499a3SJaswinder Singh Rajput 		pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20);
4458ad97905SYinghai Lu }
4468ad97905SYinghai Lu 
447ad025a73SToshi Kani /* PAT setup for BP. We need to go through sync steps here */
448ad025a73SToshi Kani void __init mtrr_bp_pat_init(void)
449ad025a73SToshi Kani {
450ad025a73SToshi Kani 	unsigned long flags;
451ad025a73SToshi Kani 
452ad025a73SToshi Kani 	local_irq_save(flags);
453ad025a73SToshi Kani 	prepare_set();
454ad025a73SToshi Kani 
455ad025a73SToshi Kani 	pat_init();
456ad025a73SToshi Kani 
457ad025a73SToshi Kani 	post_set();
458ad025a73SToshi Kani 	local_irq_restore(flags);
459ad025a73SToshi Kani }
460ad025a73SToshi Kani 
4612ec1df41SThomas Gleixner /* Grab all of the MTRR state for this CPU into *state */
462f9626104SLuis R. Rodriguez bool __init get_mtrr_state(void)
4632ec1df41SThomas Gleixner {
4642ec1df41SThomas Gleixner 	struct mtrr_var_range *vrs;
465a1a499a3SJaswinder Singh Rajput 	unsigned lo, dummy;
466a1a499a3SJaswinder Singh Rajput 	unsigned int i;
4672ec1df41SThomas Gleixner 
4682ec1df41SThomas Gleixner 	vrs = mtrr_state.var_ranges;
4692ec1df41SThomas Gleixner 
470d9bcc01dSJaswinder Singh Rajput 	rdmsr(MSR_MTRRcap, lo, dummy);
4712ec1df41SThomas Gleixner 	mtrr_state.have_fixed = (lo >> 8) & 1;
4722ec1df41SThomas Gleixner 
4732ec1df41SThomas Gleixner 	for (i = 0; i < num_var_ranges; i++)
4742ec1df41SThomas Gleixner 		get_mtrr_var_range(i, &vrs[i]);
4752ec1df41SThomas Gleixner 	if (mtrr_state.have_fixed)
4762ec1df41SThomas Gleixner 		get_fixed_ranges(mtrr_state.fixed_ranges);
4772ec1df41SThomas Gleixner 
47852650257SJaswinder Singh Rajput 	rdmsr(MSR_MTRRdefType, lo, dummy);
4792ec1df41SThomas Gleixner 	mtrr_state.def_type = (lo & 0xff);
4802ec1df41SThomas Gleixner 	mtrr_state.enabled = (lo & 0xc00) >> 10;
4812ec1df41SThomas Gleixner 
48235605a10SYinghai Lu 	if (amd_special_default_mtrr()) {
4830da72a4aSThomas Gleixner 		unsigned low, high;
484a1a499a3SJaswinder Singh Rajput 
48535605a10SYinghai Lu 		/* TOP_MEM2 */
4860da72a4aSThomas Gleixner 		rdmsr(MSR_K8_TOP_MEM2, low, high);
48795ffa243SYinghai Lu 		mtrr_tom2 = high;
48895ffa243SYinghai Lu 		mtrr_tom2 <<= 32;
48995ffa243SYinghai Lu 		mtrr_tom2 |= low;
4908004dd96SYinghai Lu 		mtrr_tom2 &= 0xffffff800000ULL;
49135605a10SYinghai Lu 	}
4922ec1df41SThomas Gleixner 
4938ad97905SYinghai Lu 	print_mtrr_state();
4948ad97905SYinghai Lu 
4952e5d9c85Svenkatesh.pallipadi@intel.com 	mtrr_state_set = 1;
4962e5d9c85Svenkatesh.pallipadi@intel.com 
497f9626104SLuis R. Rodriguez 	return !!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED);
4982ec1df41SThomas Gleixner }
4992ec1df41SThomas Gleixner 
500a1a499a3SJaswinder Singh Rajput /* Some BIOS's are messed up and don't set all MTRRs the same! */
5012ec1df41SThomas Gleixner void __init mtrr_state_warn(void)
5022ec1df41SThomas Gleixner {
5032ec1df41SThomas Gleixner 	unsigned long mask = smp_changes_mask;
5042ec1df41SThomas Gleixner 
5052ec1df41SThomas Gleixner 	if (!mask)
5062ec1df41SThomas Gleixner 		return;
5072ec1df41SThomas Gleixner 	if (mask & MTRR_CHANGE_MASK_FIXED)
5081b74dde7SChen Yucong 		pr_warn("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
5092ec1df41SThomas Gleixner 	if (mask & MTRR_CHANGE_MASK_VARIABLE)
5101b74dde7SChen Yucong 		pr_warn("mtrr: your CPUs had inconsistent variable MTRR settings\n");
5112ec1df41SThomas Gleixner 	if (mask & MTRR_CHANGE_MASK_DEFTYPE)
5121b74dde7SChen Yucong 		pr_warn("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
513a1a499a3SJaswinder Singh Rajput 
5141b74dde7SChen Yucong 	pr_info("mtrr: probably your BIOS does not setup all CPUs.\n");
5151b74dde7SChen Yucong 	pr_info("mtrr: corrected configuration.\n");
5162ec1df41SThomas Gleixner }
5172ec1df41SThomas Gleixner 
518a1a499a3SJaswinder Singh Rajput /*
519a1a499a3SJaswinder Singh Rajput  * Doesn't attempt to pass an error out to MTRR users
520a1a499a3SJaswinder Singh Rajput  * because it's quite complicated in some cases and probably not
521a1a499a3SJaswinder Singh Rajput  * worth it because the best error handling is to ignore it.
522a1a499a3SJaswinder Singh Rajput  */
5232ec1df41SThomas Gleixner void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
5242ec1df41SThomas Gleixner {
525a1a499a3SJaswinder Singh Rajput 	if (wrmsr_safe(msr, a, b) < 0) {
5261b74dde7SChen Yucong 		pr_err("MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
5272ec1df41SThomas Gleixner 			smp_processor_id(), msr, a, b);
5282ec1df41SThomas Gleixner 	}
529a1a499a3SJaswinder Singh Rajput }
5302ec1df41SThomas Gleixner 
5312ec1df41SThomas Gleixner /**
532a1a499a3SJaswinder Singh Rajput  * set_fixed_range - checks & updates a fixed-range MTRR if it
533a1a499a3SJaswinder Singh Rajput  *		     differs from the value it should have
5341d3381ebSRandy Dunlap  * @msr: MSR address of the MTTR which should be checked and updated
5351d3381ebSRandy Dunlap  * @changed: pointer which indicates whether the MTRR needed to be changed
5361d3381ebSRandy Dunlap  * @msrwords: pointer to the MSR values which the MSR should have
5372ec1df41SThomas Gleixner  */
5382d2ee8deSPaul Jimenez static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
5392ec1df41SThomas Gleixner {
5402ec1df41SThomas Gleixner 	unsigned lo, hi;
5412ec1df41SThomas Gleixner 
5422ec1df41SThomas Gleixner 	rdmsr(msr, lo, hi);
5432ec1df41SThomas Gleixner 
5442ec1df41SThomas Gleixner 	if (lo != msrwords[0] || hi != msrwords[1]) {
5452ec1df41SThomas Gleixner 		mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
5462d2ee8deSPaul Jimenez 		*changed = true;
5472ec1df41SThomas Gleixner 	}
5482ec1df41SThomas Gleixner }
5492ec1df41SThomas Gleixner 
5501d3381ebSRandy Dunlap /**
5511d3381ebSRandy Dunlap  * generic_get_free_region - Get a free MTRR.
5521d3381ebSRandy Dunlap  * @base: The starting (base) address of the region.
5531d3381ebSRandy Dunlap  * @size: The size (in bytes) of the region.
5541d3381ebSRandy Dunlap  * @replace_reg: mtrr index to be replaced; set to invalid value if none.
5551d3381ebSRandy Dunlap  *
5561d3381ebSRandy Dunlap  * Returns: The index of the region on success, else negative on error.
5572ec1df41SThomas Gleixner  */
558a1a499a3SJaswinder Singh Rajput int
559a1a499a3SJaswinder Singh Rajput generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
5602ec1df41SThomas Gleixner {
5612ec1df41SThomas Gleixner 	unsigned long lbase, lsize;
562a1a499a3SJaswinder Singh Rajput 	mtrr_type ltype;
563a1a499a3SJaswinder Singh Rajput 	int i, max;
5642ec1df41SThomas Gleixner 
5652ec1df41SThomas Gleixner 	max = num_var_ranges;
5662ec1df41SThomas Gleixner 	if (replace_reg >= 0 && replace_reg < max)
5672ec1df41SThomas Gleixner 		return replace_reg;
568a1a499a3SJaswinder Singh Rajput 
5692ec1df41SThomas Gleixner 	for (i = 0; i < max; ++i) {
5702ec1df41SThomas Gleixner 		mtrr_if->get(i, &lbase, &lsize, &ltype);
5712ec1df41SThomas Gleixner 		if (lsize == 0)
5722ec1df41SThomas Gleixner 			return i;
5732ec1df41SThomas Gleixner 	}
574a1a499a3SJaswinder Singh Rajput 
5752ec1df41SThomas Gleixner 	return -ENOSPC;
5762ec1df41SThomas Gleixner }
5772ec1df41SThomas Gleixner 
5782ec1df41SThomas Gleixner static void generic_get_mtrr(unsigned int reg, unsigned long *base,
5792ec1df41SThomas Gleixner 			     unsigned long *size, mtrr_type *type)
5802ec1df41SThomas Gleixner {
581d5c78673SYinghai Lu 	u32 mask_lo, mask_hi, base_lo, base_hi;
582d5c78673SYinghai Lu 	unsigned int hi;
583d5c78673SYinghai Lu 	u64 tmp, mask;
5842ec1df41SThomas Gleixner 
5858ad97905SYinghai Lu 	/*
5868ad97905SYinghai Lu 	 * get_mtrr doesn't need to update mtrr_state, also it could be called
5878ad97905SYinghai Lu 	 * from any cpu, so try to print it out directly.
5888ad97905SYinghai Lu 	 */
589fa10ba64SAndi Kleen 	get_cpu();
59063516ef6SYinghai Lu 
5912ec1df41SThomas Gleixner 	rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
5928ad97905SYinghai Lu 
5932ec1df41SThomas Gleixner 	if ((mask_lo & 0x800) == 0) {
5942ec1df41SThomas Gleixner 		/*  Invalid (i.e. free) range */
5952ec1df41SThomas Gleixner 		*base = 0;
5962ec1df41SThomas Gleixner 		*size = 0;
5972ec1df41SThomas Gleixner 		*type = 0;
59863516ef6SYinghai Lu 		goto out_put_cpu;
5992ec1df41SThomas Gleixner 	}
6002ec1df41SThomas Gleixner 
6012ec1df41SThomas Gleixner 	rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
6022ec1df41SThomas Gleixner 
60363516ef6SYinghai Lu 	/* Work out the shifted address mask: */
604d5c78673SYinghai Lu 	tmp = (u64)mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
605d5c78673SYinghai Lu 	mask = size_or_mask | tmp;
60663516ef6SYinghai Lu 
60763516ef6SYinghai Lu 	/* Expand tmp with high bits to all 1s: */
608d5c78673SYinghai Lu 	hi = fls64(tmp);
60938cc1c3dSYinghai Lu 	if (hi > 0) {
610d5c78673SYinghai Lu 		tmp |= ~((1ULL<<(hi - 1)) - 1);
61138cc1c3dSYinghai Lu 
612d5c78673SYinghai Lu 		if (tmp != mask) {
6131b74dde7SChen Yucong 			pr_warn("mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
614373d4d09SRusty Russell 			add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
615d5c78673SYinghai Lu 			mask = tmp;
61638cc1c3dSYinghai Lu 		}
61738cc1c3dSYinghai Lu 	}
6182ec1df41SThomas Gleixner 
61963516ef6SYinghai Lu 	/*
62063516ef6SYinghai Lu 	 * This works correctly if size is a power of two, i.e. a
62163516ef6SYinghai Lu 	 * contiguous range:
62263516ef6SYinghai Lu 	 */
623d5c78673SYinghai Lu 	*size = -mask;
624d5c78673SYinghai Lu 	*base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
6252ec1df41SThomas Gleixner 	*type = base_lo & 0xff;
6268ad97905SYinghai Lu 
62763516ef6SYinghai Lu out_put_cpu:
62863516ef6SYinghai Lu 	put_cpu();
6292ec1df41SThomas Gleixner }
6302ec1df41SThomas Gleixner 
6312ec1df41SThomas Gleixner /**
632a1a499a3SJaswinder Singh Rajput  * set_fixed_ranges - checks & updates the fixed-range MTRRs if they
633a1a499a3SJaswinder Singh Rajput  *		      differ from the saved set
6341d3381ebSRandy Dunlap  * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
6352ec1df41SThomas Gleixner  */
6362ec1df41SThomas Gleixner static int set_fixed_ranges(mtrr_type *frs)
6372ec1df41SThomas Gleixner {
6382ec1df41SThomas Gleixner 	unsigned long long *saved = (unsigned long long *)frs;
6392d2ee8deSPaul Jimenez 	bool changed = false;
6402ec1df41SThomas Gleixner 	int block = -1, range;
6412ec1df41SThomas Gleixner 
6423ff42da5SAndreas Herrmann 	k8_check_syscfg_dram_mod_en();
6433ff42da5SAndreas Herrmann 
644a1a499a3SJaswinder Singh Rajput 	while (fixed_range_blocks[++block].ranges) {
6452ec1df41SThomas Gleixner 		for (range = 0; range < fixed_range_blocks[block].ranges; range++)
6462ec1df41SThomas Gleixner 			set_fixed_range(fixed_range_blocks[block].base_msr + range,
6472ec1df41SThomas Gleixner 					&changed, (unsigned int *)saved++);
648a1a499a3SJaswinder Singh Rajput 	}
6492ec1df41SThomas Gleixner 
6502ec1df41SThomas Gleixner 	return changed;
6512ec1df41SThomas Gleixner }
6522ec1df41SThomas Gleixner 
653a1a499a3SJaswinder Singh Rajput /*
654a1a499a3SJaswinder Singh Rajput  * Set the MSR pair relating to a var range.
655a1a499a3SJaswinder Singh Rajput  * Returns true if changes are made.
656a1a499a3SJaswinder Singh Rajput  */
6572d2ee8deSPaul Jimenez static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
6582ec1df41SThomas Gleixner {
6592ec1df41SThomas Gleixner 	unsigned int lo, hi;
6602d2ee8deSPaul Jimenez 	bool changed = false;
6612ec1df41SThomas Gleixner 
6622ec1df41SThomas Gleixner 	rdmsr(MTRRphysBase_MSR(index), lo, hi);
6632ec1df41SThomas Gleixner 	if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
6642ec1df41SThomas Gleixner 	    || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
6652ec1df41SThomas Gleixner 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
666a1a499a3SJaswinder Singh Rajput 
6672ec1df41SThomas Gleixner 		mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
6682d2ee8deSPaul Jimenez 		changed = true;
6692ec1df41SThomas Gleixner 	}
6702ec1df41SThomas Gleixner 
6712ec1df41SThomas Gleixner 	rdmsr(MTRRphysMask_MSR(index), lo, hi);
6722ec1df41SThomas Gleixner 
6732ec1df41SThomas Gleixner 	if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
6742ec1df41SThomas Gleixner 	    || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
6752ec1df41SThomas Gleixner 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
6762ec1df41SThomas Gleixner 		mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
6772d2ee8deSPaul Jimenez 		changed = true;
6782ec1df41SThomas Gleixner 	}
6792ec1df41SThomas Gleixner 	return changed;
6802ec1df41SThomas Gleixner }
6812ec1df41SThomas Gleixner 
6822ec1df41SThomas Gleixner static u32 deftype_lo, deftype_hi;
6832ec1df41SThomas Gleixner 
6841d3381ebSRandy Dunlap /**
6851d3381ebSRandy Dunlap  * set_mtrr_state - Set the MTRR state for this CPU.
6861d3381ebSRandy Dunlap  *
687*01c97c73SJuergen Gross  * NOTE: The CPU must already be in a safe state for MTRR changes, including
688*01c97c73SJuergen Gross  *       measures that only a single CPU can be active in set_mtrr_state() in
689*01c97c73SJuergen Gross  *       order to not be subject to races for usage of deftype_lo. This is
690*01c97c73SJuergen Gross  *       accomplished by taking set_atomicity_lock.
6911d3381ebSRandy Dunlap  * RETURNS: 0 if no changes made, else a mask indicating what was changed.
6922ec1df41SThomas Gleixner  */
6931d3381ebSRandy Dunlap static unsigned long set_mtrr_state(void)
6942ec1df41SThomas Gleixner {
6952ec1df41SThomas Gleixner 	unsigned long change_mask = 0;
696a1a499a3SJaswinder Singh Rajput 	unsigned int i;
6972ec1df41SThomas Gleixner 
698a1a499a3SJaswinder Singh Rajput 	for (i = 0; i < num_var_ranges; i++) {
6992ec1df41SThomas Gleixner 		if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
7002ec1df41SThomas Gleixner 			change_mask |= MTRR_CHANGE_MASK_VARIABLE;
701a1a499a3SJaswinder Singh Rajput 	}
7022ec1df41SThomas Gleixner 
7032ec1df41SThomas Gleixner 	if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
7042ec1df41SThomas Gleixner 		change_mask |= MTRR_CHANGE_MASK_FIXED;
7052ec1df41SThomas Gleixner 
706a1a499a3SJaswinder Singh Rajput 	/*
707a1a499a3SJaswinder Singh Rajput 	 * Set_mtrr_restore restores the old value of MTRRdefType,
708a1a499a3SJaswinder Singh Rajput 	 * so to set it we fiddle with the saved value:
709a1a499a3SJaswinder Singh Rajput 	 */
7102ec1df41SThomas Gleixner 	if ((deftype_lo & 0xff) != mtrr_state.def_type
7112ec1df41SThomas Gleixner 	    || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
712a1a499a3SJaswinder Singh Rajput 
713a1a499a3SJaswinder Singh Rajput 		deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type |
714a1a499a3SJaswinder Singh Rajput 			     (mtrr_state.enabled << 10);
7152ec1df41SThomas Gleixner 		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
7162ec1df41SThomas Gleixner 	}
7172ec1df41SThomas Gleixner 
7182ec1df41SThomas Gleixner 	return change_mask;
7192ec1df41SThomas Gleixner }
7202ec1df41SThomas Gleixner 
7212ec1df41SThomas Gleixner 
722a1a499a3SJaswinder Singh Rajput static unsigned long cr4;
72340d6753eSThomas Gleixner static DEFINE_RAW_SPINLOCK(set_atomicity_lock);
7242ec1df41SThomas Gleixner 
7252ec1df41SThomas Gleixner /*
726a1a499a3SJaswinder Singh Rajput  * Since we are disabling the cache don't allow any interrupts,
727a1a499a3SJaswinder Singh Rajput  * they would run extremely slow and would only increase the pain.
728a1a499a3SJaswinder Singh Rajput  *
729a1a499a3SJaswinder Singh Rajput  * The caller must ensure that local interrupts are disabled and
730a1a499a3SJaswinder Singh Rajput  * are reenabled after post_set() has been called.
7312ec1df41SThomas Gleixner  */
7322ec1df41SThomas Gleixner static void prepare_set(void) __acquires(set_atomicity_lock)
7332ec1df41SThomas Gleixner {
7342ec1df41SThomas Gleixner 	unsigned long cr0;
7352ec1df41SThomas Gleixner 
736a1a499a3SJaswinder Singh Rajput 	/*
737a1a499a3SJaswinder Singh Rajput 	 * Note that this is not ideal
738a1a499a3SJaswinder Singh Rajput 	 * since the cache is only flushed/disabled for this CPU while the
739a1a499a3SJaswinder Singh Rajput 	 * MTRRs are changed, but changing this requires more invasive
740a1a499a3SJaswinder Singh Rajput 	 * changes to the way the kernel boots
741a1a499a3SJaswinder Singh Rajput 	 */
7422ec1df41SThomas Gleixner 
74340d6753eSThomas Gleixner 	raw_spin_lock(&set_atomicity_lock);
7442ec1df41SThomas Gleixner 
7452ec1df41SThomas Gleixner 	/* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
7467ebad705SDave Jones 	cr0 = read_cr0() | X86_CR0_CD;
7472ec1df41SThomas Gleixner 	write_cr0(cr0);
748fd329f27SRicardo Neri 
749fd329f27SRicardo Neri 	/*
750fd329f27SRicardo Neri 	 * Cache flushing is the most time-consuming step when programming
751fd329f27SRicardo Neri 	 * the MTRRs. Fortunately, as per the Intel Software Development
752fd329f27SRicardo Neri 	 * Manual, we can skip it if the processor supports cache self-
753fd329f27SRicardo Neri 	 * snooping.
754fd329f27SRicardo Neri 	 */
755fd329f27SRicardo Neri 	if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
7562ec1df41SThomas Gleixner 		wbinvd();
7572ec1df41SThomas Gleixner 
7582ec1df41SThomas Gleixner 	/* Save value of CR4 and clear Page Global Enable (bit 7) */
759c109bf95SBorislav Petkov 	if (boot_cpu_has(X86_FEATURE_PGE)) {
7601e02ce4cSAndy Lutomirski 		cr4 = __read_cr4();
7611e02ce4cSAndy Lutomirski 		__write_cr4(cr4 & ~X86_CR4_PGE);
7622ec1df41SThomas Gleixner 	}
7632ec1df41SThomas Gleixner 
7642ec1df41SThomas Gleixner 	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
765ec659934SMel Gorman 	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
7662faf153bSThomas Gleixner 	flush_tlb_local();
7672ec1df41SThomas Gleixner 
7682ec1df41SThomas Gleixner 	/* Save MTRR state */
76952650257SJaswinder Singh Rajput 	rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
7702ec1df41SThomas Gleixner 
7712ec1df41SThomas Gleixner 	/* Disable MTRRs, and set the default type to uncached */
77252650257SJaswinder Singh Rajput 	mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
773fd329f27SRicardo Neri 
774fd329f27SRicardo Neri 	/* Again, only flush caches if we have to. */
775fd329f27SRicardo Neri 	if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
7768dbf4a30SAjaykumar Hotchandani 		wbinvd();
7772ec1df41SThomas Gleixner }
7782ec1df41SThomas Gleixner 
7792ec1df41SThomas Gleixner static void post_set(void) __releases(set_atomicity_lock)
7802ec1df41SThomas Gleixner {
7812ec1df41SThomas Gleixner 	/* Flush TLBs (no need to flush caches - they are disabled) */
782ec659934SMel Gorman 	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
7832faf153bSThomas Gleixner 	flush_tlb_local();
7842ec1df41SThomas Gleixner 
7852ec1df41SThomas Gleixner 	/* Intel (P6) standard MTRRs */
78652650257SJaswinder Singh Rajput 	mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
7872ec1df41SThomas Gleixner 
7882ec1df41SThomas Gleixner 	/* Enable caches */
789a3d7b7ddSH. Peter Anvin 	write_cr0(read_cr0() & ~X86_CR0_CD);
7902ec1df41SThomas Gleixner 
7912ec1df41SThomas Gleixner 	/* Restore value of CR4 */
792c109bf95SBorislav Petkov 	if (boot_cpu_has(X86_FEATURE_PGE))
7931e02ce4cSAndy Lutomirski 		__write_cr4(cr4);
79440d6753eSThomas Gleixner 	raw_spin_unlock(&set_atomicity_lock);
7952ec1df41SThomas Gleixner }
7962ec1df41SThomas Gleixner 
7972ec1df41SThomas Gleixner static void generic_set_all(void)
7982ec1df41SThomas Gleixner {
7992ec1df41SThomas Gleixner 	unsigned long mask, count;
8002ec1df41SThomas Gleixner 	unsigned long flags;
8012ec1df41SThomas Gleixner 
8022ec1df41SThomas Gleixner 	local_irq_save(flags);
8032ec1df41SThomas Gleixner 	prepare_set();
8042ec1df41SThomas Gleixner 
8052ec1df41SThomas Gleixner 	/* Actually set the state */
8062ec1df41SThomas Gleixner 	mask = set_mtrr_state();
8072ec1df41SThomas Gleixner 
8082e5d9c85Svenkatesh.pallipadi@intel.com 	/* also set PAT */
8092e5d9c85Svenkatesh.pallipadi@intel.com 	pat_init();
8102e5d9c85Svenkatesh.pallipadi@intel.com 
8112ec1df41SThomas Gleixner 	post_set();
8122ec1df41SThomas Gleixner 	local_irq_restore(flags);
8132ec1df41SThomas Gleixner 
8142ec1df41SThomas Gleixner 	/* Use the atomic bitops to update the global mask */
8150e96f31eSJordan Borgner 	for (count = 0; count < sizeof(mask) * 8; ++count) {
8162ec1df41SThomas Gleixner 		if (mask & 0x01)
8172ec1df41SThomas Gleixner 			set_bit(count, &smp_changes_mask);
8182ec1df41SThomas Gleixner 		mask >>= 1;
8192ec1df41SThomas Gleixner 	}
8202ec1df41SThomas Gleixner 
8212ec1df41SThomas Gleixner }
8222ec1df41SThomas Gleixner 
823a1a499a3SJaswinder Singh Rajput /**
824a1a499a3SJaswinder Singh Rajput  * generic_set_mtrr - set variable MTRR register on the local CPU.
825a1a499a3SJaswinder Singh Rajput  *
826a1a499a3SJaswinder Singh Rajput  * @reg: The register to set.
827a1a499a3SJaswinder Singh Rajput  * @base: The base address of the region.
828a1a499a3SJaswinder Singh Rajput  * @size: The size of the region. If this is 0 the region is disabled.
829a1a499a3SJaswinder Singh Rajput  * @type: The type of the region.
830a1a499a3SJaswinder Singh Rajput  *
831a1a499a3SJaswinder Singh Rajput  * Returns nothing.
832a1a499a3SJaswinder Singh Rajput  */
8332ec1df41SThomas Gleixner static void generic_set_mtrr(unsigned int reg, unsigned long base,
8342ec1df41SThomas Gleixner 			     unsigned long size, mtrr_type type)
8352ec1df41SThomas Gleixner {
8362ec1df41SThomas Gleixner 	unsigned long flags;
8372ec1df41SThomas Gleixner 	struct mtrr_var_range *vr;
8382ec1df41SThomas Gleixner 
8392ec1df41SThomas Gleixner 	vr = &mtrr_state.var_ranges[reg];
8402ec1df41SThomas Gleixner 
8412ec1df41SThomas Gleixner 	local_irq_save(flags);
8422ec1df41SThomas Gleixner 	prepare_set();
8432ec1df41SThomas Gleixner 
8442ec1df41SThomas Gleixner 	if (size == 0) {
845a1a499a3SJaswinder Singh Rajput 		/*
846a1a499a3SJaswinder Singh Rajput 		 * The invalid bit is kept in the mask, so we simply
847a1a499a3SJaswinder Singh Rajput 		 * clear the relevant mask register to disable a range.
848a1a499a3SJaswinder Singh Rajput 		 */
8492ec1df41SThomas Gleixner 		mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
8502ec1df41SThomas Gleixner 		memset(vr, 0, sizeof(struct mtrr_var_range));
8512ec1df41SThomas Gleixner 	} else {
8522ec1df41SThomas Gleixner 		vr->base_lo = base << PAGE_SHIFT | type;
8532ec1df41SThomas Gleixner 		vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
8542ec1df41SThomas Gleixner 		vr->mask_lo = -size << PAGE_SHIFT | 0x800;
8552ec1df41SThomas Gleixner 		vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
8562ec1df41SThomas Gleixner 
8572ec1df41SThomas Gleixner 		mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
8582ec1df41SThomas Gleixner 		mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
8592ec1df41SThomas Gleixner 	}
8602ec1df41SThomas Gleixner 
8612ec1df41SThomas Gleixner 	post_set();
8622ec1df41SThomas Gleixner 	local_irq_restore(flags);
8632ec1df41SThomas Gleixner }
8642ec1df41SThomas Gleixner 
865a1a499a3SJaswinder Singh Rajput int generic_validate_add_page(unsigned long base, unsigned long size,
866a1a499a3SJaswinder Singh Rajput 			      unsigned int type)
8672ec1df41SThomas Gleixner {
8682ec1df41SThomas Gleixner 	unsigned long lbase, last;
8692ec1df41SThomas Gleixner 
870a1a499a3SJaswinder Singh Rajput 	/*
871a1a499a3SJaswinder Singh Rajput 	 * For Intel PPro stepping <= 7
872a1a499a3SJaswinder Singh Rajput 	 * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF
873a1a499a3SJaswinder Singh Rajput 	 */
8742ec1df41SThomas Gleixner 	if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
8752ec1df41SThomas Gleixner 	    boot_cpu_data.x86_model == 1 &&
876b399151cSJia Zhang 	    boot_cpu_data.x86_stepping <= 7) {
8772ec1df41SThomas Gleixner 		if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
8781b74dde7SChen Yucong 			pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
8792ec1df41SThomas Gleixner 			return -EINVAL;
8802ec1df41SThomas Gleixner 		}
8812ec1df41SThomas Gleixner 		if (!(base + size < 0x70000 || base > 0x7003F) &&
8822ec1df41SThomas Gleixner 		    (type == MTRR_TYPE_WRCOMB
8832ec1df41SThomas Gleixner 		     || type == MTRR_TYPE_WRBACK)) {
8841b74dde7SChen Yucong 			pr_warn("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
8852ec1df41SThomas Gleixner 			return -EINVAL;
8862ec1df41SThomas Gleixner 		}
8872ec1df41SThomas Gleixner 	}
8882ec1df41SThomas Gleixner 
889a1a499a3SJaswinder Singh Rajput 	/*
890a1a499a3SJaswinder Singh Rajput 	 * Check upper bits of base and last are equal and lower bits are 0
891a1a499a3SJaswinder Singh Rajput 	 * for base and 1 for last
892a1a499a3SJaswinder Singh Rajput 	 */
8932ec1df41SThomas Gleixner 	last = base + size - 1;
8942ec1df41SThomas Gleixner 	for (lbase = base; !(lbase & 1) && (last & 1);
895a1a499a3SJaswinder Singh Rajput 	     lbase = lbase >> 1, last = last >> 1)
896a1a499a3SJaswinder Singh Rajput 		;
8972ec1df41SThomas Gleixner 	if (lbase != last) {
8981b74dde7SChen Yucong 		pr_warn("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size);
8992ec1df41SThomas Gleixner 		return -EINVAL;
9002ec1df41SThomas Gleixner 	}
9012ec1df41SThomas Gleixner 	return 0;
9022ec1df41SThomas Gleixner }
9032ec1df41SThomas Gleixner 
9042ec1df41SThomas Gleixner static int generic_have_wrcomb(void)
9052ec1df41SThomas Gleixner {
9062ec1df41SThomas Gleixner 	unsigned long config, dummy;
907d9bcc01dSJaswinder Singh Rajput 	rdmsr(MSR_MTRRcap, config, dummy);
908a1a499a3SJaswinder Singh Rajput 	return config & (1 << 10);
9092ec1df41SThomas Gleixner }
9102ec1df41SThomas Gleixner 
9112ec1df41SThomas Gleixner int positive_have_wrcomb(void)
9122ec1df41SThomas Gleixner {
9132ec1df41SThomas Gleixner 	return 1;
9142ec1df41SThomas Gleixner }
9152ec1df41SThomas Gleixner 
916a1a499a3SJaswinder Singh Rajput /*
917a1a499a3SJaswinder Singh Rajput  * Generic structure...
9182ec1df41SThomas Gleixner  */
9193b9cfc0aSEmese Revfy const struct mtrr_ops generic_mtrr_ops = {
9202ec1df41SThomas Gleixner 	.use_intel_if		= 1,
9212ec1df41SThomas Gleixner 	.set_all		= generic_set_all,
9222ec1df41SThomas Gleixner 	.get			= generic_get_mtrr,
9232ec1df41SThomas Gleixner 	.get_free_region	= generic_get_free_region,
9242ec1df41SThomas Gleixner 	.set			= generic_set_mtrr,
9252ec1df41SThomas Gleixner 	.validate_add_page	= generic_validate_add_page,
9262ec1df41SThomas Gleixner 	.have_wrcomb		= generic_have_wrcomb,
9272ec1df41SThomas Gleixner };
928