xref: /linux/arch/x86/kernel/cpu/mtrr/generic.c (revision 2e5d9c857d4e6c9e7b7d8c8c86a68a7842d213d6)
1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2    because MTRRs can span upto 40 bits (36bits on most modern x86) */
3 #include <linux/init.h>
4 #include <linux/slab.h>
5 #include <linux/mm.h>
6 #include <linux/module.h>
7 #include <asm/io.h>
8 #include <asm/mtrr.h>
9 #include <asm/msr.h>
10 #include <asm/system.h>
11 #include <asm/cpufeature.h>
12 #include <asm/processor-flags.h>
13 #include <asm/tlbflush.h>
14 #include <asm/pat.h>
15 #include "mtrr.h"
16 
17 struct mtrr_state {
18 	struct mtrr_var_range var_ranges[MAX_VAR_RANGES];
19 	mtrr_type fixed_ranges[NUM_FIXED_RANGES];
20 	unsigned char enabled;
21 	unsigned char have_fixed;
22 	mtrr_type def_type;
23 };
24 
25 struct fixed_range_block {
26 	int base_msr; /* start address of an MTRR block */
27 	int ranges;   /* number of MTRRs in this block  */
28 };
29 
30 static struct fixed_range_block fixed_range_blocks[] = {
31 	{ MTRRfix64K_00000_MSR, 1 }, /* one  64k MTRR  */
32 	{ MTRRfix16K_80000_MSR, 2 }, /* two  16k MTRRs */
33 	{ MTRRfix4K_C0000_MSR,  8 }, /* eight 4k MTRRs */
34 	{}
35 };
36 
37 static unsigned long smp_changes_mask;
38 static struct mtrr_state mtrr_state = {};
39 static int mtrr_state_set;
40 
41 #undef MODULE_PARAM_PREFIX
42 #define MODULE_PARAM_PREFIX "mtrr."
43 
44 static int mtrr_show;
45 module_param_named(show, mtrr_show, bool, 0);
46 
47 /*
48  * Returns the effective MTRR type for the region
49  * Error returns:
50  * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR
51  * - 0xFF - when MTRR is not enabled
52  */
53 u8 mtrr_type_lookup(u64 start, u64 end)
54 {
55 	int i;
56 	u64 base, mask;
57 	u8 prev_match, curr_match;
58 
59 	if (!mtrr_state_set)
60 		return 0xFF;
61 
62 	if (!mtrr_state.enabled)
63 		return 0xFF;
64 
65 	/* Make end inclusive end, instead of exclusive */
66 	end--;
67 
68 	/* Look in fixed ranges. Just return the type as per start */
69 	if (mtrr_state.have_fixed && (start < 0x100000)) {
70 		int idx;
71 
72 		if (start < 0x80000) {
73 			idx = 0;
74 			idx += (start >> 16);
75 			return mtrr_state.fixed_ranges[idx];
76 		} else if (start < 0xC0000) {
77 			idx = 1 * 8;
78 			idx += ((start - 0x80000) >> 14);
79 			return mtrr_state.fixed_ranges[idx];
80 		} else if (start < 0x1000000) {
81 			idx = 3 * 8;
82 			idx += ((start - 0xC0000) >> 12);
83 			return mtrr_state.fixed_ranges[idx];
84 		}
85 	}
86 
87 	/*
88 	 * Look in variable ranges
89 	 * Look of multiple ranges matching this address and pick type
90 	 * as per MTRR precedence
91 	 */
92 	if (!mtrr_state.enabled & 2) {
93 		return mtrr_state.def_type;
94 	}
95 
96 	prev_match = 0xFF;
97 	for (i = 0; i < num_var_ranges; ++i) {
98 		unsigned short start_state, end_state;
99 
100 		if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
101 			continue;
102 
103 		base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
104 		       (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
105 		mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
106 		       (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
107 
108 		start_state = ((start & mask) == (base & mask));
109 		end_state = ((end & mask) == (base & mask));
110 		if (start_state != end_state)
111 			return 0xFE;
112 
113 		if ((start & mask) != (base & mask)) {
114 			continue;
115 		}
116 
117 		curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
118 		if (prev_match == 0xFF) {
119 			prev_match = curr_match;
120 			continue;
121 		}
122 
123 		if (prev_match == MTRR_TYPE_UNCACHABLE ||
124 		    curr_match == MTRR_TYPE_UNCACHABLE) {
125 			return MTRR_TYPE_UNCACHABLE;
126 		}
127 
128 		if ((prev_match == MTRR_TYPE_WRBACK &&
129 		     curr_match == MTRR_TYPE_WRTHROUGH) ||
130 		    (prev_match == MTRR_TYPE_WRTHROUGH &&
131 		     curr_match == MTRR_TYPE_WRBACK)) {
132 			prev_match = MTRR_TYPE_WRTHROUGH;
133 			curr_match = MTRR_TYPE_WRTHROUGH;
134 		}
135 
136 		if (prev_match != curr_match) {
137 			return MTRR_TYPE_UNCACHABLE;
138 		}
139 	}
140 
141 	if (prev_match != 0xFF)
142 		return prev_match;
143 
144 	return mtrr_state.def_type;
145 }
146 
147 /*  Get the MSR pair relating to a var range  */
148 static void
149 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
150 {
151 	rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
152 	rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
153 }
154 
155 static void
156 get_fixed_ranges(mtrr_type * frs)
157 {
158 	unsigned int *p = (unsigned int *) frs;
159 	int i;
160 
161 	rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
162 
163 	for (i = 0; i < 2; i++)
164 		rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
165 	for (i = 0; i < 8; i++)
166 		rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
167 }
168 
169 void mtrr_save_fixed_ranges(void *info)
170 {
171 	if (cpu_has_mtrr)
172 		get_fixed_ranges(mtrr_state.fixed_ranges);
173 }
174 
175 static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
176 {
177 	unsigned i;
178 
179 	for (i = 0; i < 8; ++i, ++types, base += step)
180 		printk(KERN_INFO "MTRR %05X-%05X %s\n",
181 			base, base + step - 1, mtrr_attrib_to_str(*types));
182 }
183 
184 static void prepare_set(void);
185 static void post_set(void);
186 
187 /*  Grab all of the MTRR state for this CPU into *state  */
188 void __init get_mtrr_state(void)
189 {
190 	unsigned int i;
191 	struct mtrr_var_range *vrs;
192 	unsigned lo, dummy;
193 	unsigned long flags;
194 
195 	vrs = mtrr_state.var_ranges;
196 
197 	rdmsr(MTRRcap_MSR, lo, dummy);
198 	mtrr_state.have_fixed = (lo >> 8) & 1;
199 
200 	for (i = 0; i < num_var_ranges; i++)
201 		get_mtrr_var_range(i, &vrs[i]);
202 	if (mtrr_state.have_fixed)
203 		get_fixed_ranges(mtrr_state.fixed_ranges);
204 
205 	rdmsr(MTRRdefType_MSR, lo, dummy);
206 	mtrr_state.def_type = (lo & 0xff);
207 	mtrr_state.enabled = (lo & 0xc00) >> 10;
208 
209 	if (mtrr_show) {
210 		int high_width;
211 
212 		printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
213 		if (mtrr_state.have_fixed) {
214 			printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
215 			       mtrr_state.enabled & 1 ? "en" : "dis");
216 			print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
217 			for (i = 0; i < 2; ++i)
218 				print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
219 			for (i = 0; i < 8; ++i)
220 				print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
221 		}
222 		printk(KERN_INFO "MTRR variable ranges %sabled:\n",
223 		       mtrr_state.enabled & 2 ? "en" : "dis");
224 		high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
225 		for (i = 0; i < num_var_ranges; ++i) {
226 			if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
227 				printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
228 				       i,
229 				       high_width,
230 				       mtrr_state.var_ranges[i].base_hi,
231 				       mtrr_state.var_ranges[i].base_lo >> 12,
232 				       high_width,
233 				       mtrr_state.var_ranges[i].mask_hi,
234 				       mtrr_state.var_ranges[i].mask_lo >> 12,
235 				       mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
236 			else
237 				printk(KERN_INFO "MTRR %u disabled\n", i);
238 		}
239 	}
240 	mtrr_state_set = 1;
241 
242 	/* PAT setup for BP. We need to go through sync steps here */
243 	local_irq_save(flags);
244 	prepare_set();
245 
246 	pat_init();
247 
248 	post_set();
249 	local_irq_restore(flags);
250 
251 }
252 
253 /*  Some BIOS's are fucked and don't set all MTRRs the same!  */
254 void __init mtrr_state_warn(void)
255 {
256 	unsigned long mask = smp_changes_mask;
257 
258 	if (!mask)
259 		return;
260 	if (mask & MTRR_CHANGE_MASK_FIXED)
261 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
262 	if (mask & MTRR_CHANGE_MASK_VARIABLE)
263 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
264 	if (mask & MTRR_CHANGE_MASK_DEFTYPE)
265 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
266 	printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
267 	printk(KERN_INFO "mtrr: corrected configuration.\n");
268 }
269 
270 /* Doesn't attempt to pass an error out to MTRR users
271    because it's quite complicated in some cases and probably not
272    worth it because the best error handling is to ignore it. */
273 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
274 {
275 	if (wrmsr_safe(msr, a, b) < 0)
276 		printk(KERN_ERR
277 			"MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
278 			smp_processor_id(), msr, a, b);
279 }
280 
281 /**
282  * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs
283  * see AMD publication no. 24593, chapter 3.2.1 for more information
284  */
285 static inline void k8_enable_fixed_iorrs(void)
286 {
287 	unsigned lo, hi;
288 
289 	rdmsr(MSR_K8_SYSCFG, lo, hi);
290 	mtrr_wrmsr(MSR_K8_SYSCFG, lo
291 				| K8_MTRRFIXRANGE_DRAM_ENABLE
292 				| K8_MTRRFIXRANGE_DRAM_MODIFY, hi);
293 }
294 
295 /**
296  * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
297  * @msr: MSR address of the MTTR which should be checked and updated
298  * @changed: pointer which indicates whether the MTRR needed to be changed
299  * @msrwords: pointer to the MSR values which the MSR should have
300  *
301  * If K8 extentions are wanted, update the K8 SYSCFG MSR also.
302  * See AMD publication no. 24593, chapter 7.8.1, page 233 for more information.
303  */
304 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
305 {
306 	unsigned lo, hi;
307 
308 	rdmsr(msr, lo, hi);
309 
310 	if (lo != msrwords[0] || hi != msrwords[1]) {
311 		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
312 		    boot_cpu_data.x86 == 15 &&
313 		    ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK))
314 			k8_enable_fixed_iorrs();
315 		mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
316 		*changed = true;
317 	}
318 }
319 
320 /**
321  * generic_get_free_region - Get a free MTRR.
322  * @base: The starting (base) address of the region.
323  * @size: The size (in bytes) of the region.
324  * @replace_reg: mtrr index to be replaced; set to invalid value if none.
325  *
326  * Returns: The index of the region on success, else negative on error.
327  */
328 int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
329 {
330 	int i, max;
331 	mtrr_type ltype;
332 	unsigned long lbase, lsize;
333 
334 	max = num_var_ranges;
335 	if (replace_reg >= 0 && replace_reg < max)
336 		return replace_reg;
337 	for (i = 0; i < max; ++i) {
338 		mtrr_if->get(i, &lbase, &lsize, &ltype);
339 		if (lsize == 0)
340 			return i;
341 	}
342 	return -ENOSPC;
343 }
344 
345 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
346 			     unsigned long *size, mtrr_type *type)
347 {
348 	unsigned int mask_lo, mask_hi, base_lo, base_hi;
349 
350 	rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
351 	if ((mask_lo & 0x800) == 0) {
352 		/*  Invalid (i.e. free) range  */
353 		*base = 0;
354 		*size = 0;
355 		*type = 0;
356 		return;
357 	}
358 
359 	rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
360 
361 	/* Work out the shifted address mask. */
362 	mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
363 	    | mask_lo >> PAGE_SHIFT;
364 
365 	/* This works correctly if size is a power of two, i.e. a
366 	   contiguous range. */
367 	*size = -mask_lo;
368 	*base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
369 	*type = base_lo & 0xff;
370 }
371 
372 /**
373  * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set
374  * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
375  */
376 static int set_fixed_ranges(mtrr_type * frs)
377 {
378 	unsigned long long *saved = (unsigned long long *) frs;
379 	bool changed = false;
380 	int block=-1, range;
381 
382 	while (fixed_range_blocks[++block].ranges)
383 	    for (range=0; range < fixed_range_blocks[block].ranges; range++)
384 		set_fixed_range(fixed_range_blocks[block].base_msr + range,
385 		    &changed, (unsigned int *) saved++);
386 
387 	return changed;
388 }
389 
390 /*  Set the MSR pair relating to a var range. Returns TRUE if
391     changes are made  */
392 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
393 {
394 	unsigned int lo, hi;
395 	bool changed = false;
396 
397 	rdmsr(MTRRphysBase_MSR(index), lo, hi);
398 	if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
399 	    || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
400 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
401 		mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
402 		changed = true;
403 	}
404 
405 	rdmsr(MTRRphysMask_MSR(index), lo, hi);
406 
407 	if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
408 	    || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
409 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
410 		mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
411 		changed = true;
412 	}
413 	return changed;
414 }
415 
416 static u32 deftype_lo, deftype_hi;
417 
418 /**
419  * set_mtrr_state - Set the MTRR state for this CPU.
420  *
421  * NOTE: The CPU must already be in a safe state for MTRR changes.
422  * RETURNS: 0 if no changes made, else a mask indicating what was changed.
423  */
424 static unsigned long set_mtrr_state(void)
425 {
426 	unsigned int i;
427 	unsigned long change_mask = 0;
428 
429 	for (i = 0; i < num_var_ranges; i++)
430 		if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
431 			change_mask |= MTRR_CHANGE_MASK_VARIABLE;
432 
433 	if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
434 		change_mask |= MTRR_CHANGE_MASK_FIXED;
435 
436 	/*  Set_mtrr_restore restores the old value of MTRRdefType,
437 	   so to set it we fiddle with the saved value  */
438 	if ((deftype_lo & 0xff) != mtrr_state.def_type
439 	    || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
440 		deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
441 		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
442 	}
443 
444 	return change_mask;
445 }
446 
447 
448 static unsigned long cr4 = 0;
449 static DEFINE_SPINLOCK(set_atomicity_lock);
450 
451 /*
452  * Since we are disabling the cache don't allow any interrupts - they
453  * would run extremely slow and would only increase the pain.  The caller must
454  * ensure that local interrupts are disabled and are reenabled after post_set()
455  * has been called.
456  */
457 
458 static void prepare_set(void) __acquires(set_atomicity_lock)
459 {
460 	unsigned long cr0;
461 
462 	/*  Note that this is not ideal, since the cache is only flushed/disabled
463 	   for this CPU while the MTRRs are changed, but changing this requires
464 	   more invasive changes to the way the kernel boots  */
465 
466 	spin_lock(&set_atomicity_lock);
467 
468 	/*  Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
469 	cr0 = read_cr0() | X86_CR0_CD;
470 	write_cr0(cr0);
471 	wbinvd();
472 
473 	/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
474 	if ( cpu_has_pge ) {
475 		cr4 = read_cr4();
476 		write_cr4(cr4 & ~X86_CR4_PGE);
477 	}
478 
479 	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
480 	__flush_tlb();
481 
482 	/*  Save MTRR state */
483 	rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
484 
485 	/*  Disable MTRRs, and set the default type to uncached  */
486 	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
487 }
488 
489 static void post_set(void) __releases(set_atomicity_lock)
490 {
491 	/*  Flush TLBs (no need to flush caches - they are disabled)  */
492 	__flush_tlb();
493 
494 	/* Intel (P6) standard MTRRs */
495 	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
496 
497 	/*  Enable caches  */
498 	write_cr0(read_cr0() & 0xbfffffff);
499 
500 	/*  Restore value of CR4  */
501 	if ( cpu_has_pge )
502 		write_cr4(cr4);
503 	spin_unlock(&set_atomicity_lock);
504 }
505 
506 static void generic_set_all(void)
507 {
508 	unsigned long mask, count;
509 	unsigned long flags;
510 
511 	local_irq_save(flags);
512 	prepare_set();
513 
514 	/* Actually set the state */
515 	mask = set_mtrr_state();
516 
517 	/* also set PAT */
518 	pat_init();
519 
520 	post_set();
521 	local_irq_restore(flags);
522 
523 	/*  Use the atomic bitops to update the global mask  */
524 	for (count = 0; count < sizeof mask * 8; ++count) {
525 		if (mask & 0x01)
526 			set_bit(count, &smp_changes_mask);
527 		mask >>= 1;
528 	}
529 
530 }
531 
532 static void generic_set_mtrr(unsigned int reg, unsigned long base,
533 			     unsigned long size, mtrr_type type)
534 /*  [SUMMARY] Set variable MTRR register on the local CPU.
535     <reg> The register to set.
536     <base> The base address of the region.
537     <size> The size of the region. If this is 0 the region is disabled.
538     <type> The type of the region.
539     [RETURNS] Nothing.
540 */
541 {
542 	unsigned long flags;
543 	struct mtrr_var_range *vr;
544 
545 	vr = &mtrr_state.var_ranges[reg];
546 
547 	local_irq_save(flags);
548 	prepare_set();
549 
550 	if (size == 0) {
551 		/* The invalid bit is kept in the mask, so we simply clear the
552 		   relevant mask register to disable a range. */
553 		mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
554 		memset(vr, 0, sizeof(struct mtrr_var_range));
555 	} else {
556 		vr->base_lo = base << PAGE_SHIFT | type;
557 		vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
558 		vr->mask_lo = -size << PAGE_SHIFT | 0x800;
559 		vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
560 
561 		mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
562 		mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
563 	}
564 
565 	post_set();
566 	local_irq_restore(flags);
567 }
568 
569 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
570 {
571 	unsigned long lbase, last;
572 
573 	/*  For Intel PPro stepping <= 7, must be 4 MiB aligned
574 	    and not touch 0x70000000->0x7003FFFF */
575 	if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
576 	    boot_cpu_data.x86_model == 1 &&
577 	    boot_cpu_data.x86_mask <= 7) {
578 		if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
579 			printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
580 			return -EINVAL;
581 		}
582 		if (!(base + size < 0x70000 || base > 0x7003F) &&
583 		    (type == MTRR_TYPE_WRCOMB
584 		     || type == MTRR_TYPE_WRBACK)) {
585 			printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
586 			return -EINVAL;
587 		}
588 	}
589 
590 	/*  Check upper bits of base and last are equal and lower bits are 0
591 	    for base and 1 for last  */
592 	last = base + size - 1;
593 	for (lbase = base; !(lbase & 1) && (last & 1);
594 	     lbase = lbase >> 1, last = last >> 1) ;
595 	if (lbase != last) {
596 		printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
597 		       base, size);
598 		return -EINVAL;
599 	}
600 	return 0;
601 }
602 
603 
604 static int generic_have_wrcomb(void)
605 {
606 	unsigned long config, dummy;
607 	rdmsr(MTRRcap_MSR, config, dummy);
608 	return (config & (1 << 10));
609 }
610 
611 int positive_have_wrcomb(void)
612 {
613 	return 1;
614 }
615 
616 /* generic structure...
617  */
618 struct mtrr_ops generic_mtrr_ops = {
619 	.use_intel_if      = 1,
620 	.set_all	   = generic_set_all,
621 	.get               = generic_get_mtrr,
622 	.get_free_region   = generic_get_free_region,
623 	.set               = generic_set_mtrr,
624 	.validate_add_page = generic_validate_add_page,
625 	.have_wrcomb       = generic_have_wrcomb,
626 };
627