1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong 2 because MTRRs can span upto 40 bits (36bits on most modern x86) */ 3 #include <linux/init.h> 4 #include <linux/slab.h> 5 #include <linux/mm.h> 6 #include <linux/module.h> 7 #include <asm/io.h> 8 #include <asm/mtrr.h> 9 #include <asm/msr.h> 10 #include <asm/system.h> 11 #include <asm/cpufeature.h> 12 #include <asm/processor-flags.h> 13 #include <asm/tlbflush.h> 14 #include <asm/pat.h> 15 #include "mtrr.h" 16 17 struct mtrr_state { 18 struct mtrr_var_range var_ranges[MAX_VAR_RANGES]; 19 mtrr_type fixed_ranges[NUM_FIXED_RANGES]; 20 unsigned char enabled; 21 unsigned char have_fixed; 22 mtrr_type def_type; 23 }; 24 25 struct fixed_range_block { 26 int base_msr; /* start address of an MTRR block */ 27 int ranges; /* number of MTRRs in this block */ 28 }; 29 30 static struct fixed_range_block fixed_range_blocks[] = { 31 { MTRRfix64K_00000_MSR, 1 }, /* one 64k MTRR */ 32 { MTRRfix16K_80000_MSR, 2 }, /* two 16k MTRRs */ 33 { MTRRfix4K_C0000_MSR, 8 }, /* eight 4k MTRRs */ 34 {} 35 }; 36 37 static unsigned long smp_changes_mask; 38 static struct mtrr_state mtrr_state = {}; 39 static int mtrr_state_set; 40 u64 mtrr_tom2; 41 42 #undef MODULE_PARAM_PREFIX 43 #define MODULE_PARAM_PREFIX "mtrr." 44 45 static int mtrr_show; 46 module_param_named(show, mtrr_show, bool, 0); 47 48 /* 49 * Returns the effective MTRR type for the region 50 * Error returns: 51 * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR 52 * - 0xFF - when MTRR is not enabled 53 */ 54 u8 mtrr_type_lookup(u64 start, u64 end) 55 { 56 int i; 57 u64 base, mask; 58 u8 prev_match, curr_match; 59 60 if (!mtrr_state_set) 61 return 0xFF; 62 63 if (!mtrr_state.enabled) 64 return 0xFF; 65 66 /* Make end inclusive end, instead of exclusive */ 67 end--; 68 69 /* Look in fixed ranges. Just return the type as per start */ 70 if (mtrr_state.have_fixed && (start < 0x100000)) { 71 int idx; 72 73 if (start < 0x80000) { 74 idx = 0; 75 idx += (start >> 16); 76 return mtrr_state.fixed_ranges[idx]; 77 } else if (start < 0xC0000) { 78 idx = 1 * 8; 79 idx += ((start - 0x80000) >> 14); 80 return mtrr_state.fixed_ranges[idx]; 81 } else if (start < 0x1000000) { 82 idx = 3 * 8; 83 idx += ((start - 0xC0000) >> 12); 84 return mtrr_state.fixed_ranges[idx]; 85 } 86 } 87 88 /* 89 * Look in variable ranges 90 * Look of multiple ranges matching this address and pick type 91 * as per MTRR precedence 92 */ 93 if (!(mtrr_state.enabled & 2)) { 94 return mtrr_state.def_type; 95 } 96 97 prev_match = 0xFF; 98 for (i = 0; i < num_var_ranges; ++i) { 99 unsigned short start_state, end_state; 100 101 if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11))) 102 continue; 103 104 base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) + 105 (mtrr_state.var_ranges[i].base_lo & PAGE_MASK); 106 mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) + 107 (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK); 108 109 start_state = ((start & mask) == (base & mask)); 110 end_state = ((end & mask) == (base & mask)); 111 if (start_state != end_state) 112 return 0xFE; 113 114 if ((start & mask) != (base & mask)) { 115 continue; 116 } 117 118 curr_match = mtrr_state.var_ranges[i].base_lo & 0xff; 119 if (prev_match == 0xFF) { 120 prev_match = curr_match; 121 continue; 122 } 123 124 if (prev_match == MTRR_TYPE_UNCACHABLE || 125 curr_match == MTRR_TYPE_UNCACHABLE) { 126 return MTRR_TYPE_UNCACHABLE; 127 } 128 129 if ((prev_match == MTRR_TYPE_WRBACK && 130 curr_match == MTRR_TYPE_WRTHROUGH) || 131 (prev_match == MTRR_TYPE_WRTHROUGH && 132 curr_match == MTRR_TYPE_WRBACK)) { 133 prev_match = MTRR_TYPE_WRTHROUGH; 134 curr_match = MTRR_TYPE_WRTHROUGH; 135 } 136 137 if (prev_match != curr_match) { 138 return MTRR_TYPE_UNCACHABLE; 139 } 140 } 141 142 if (mtrr_tom2) { 143 if (start >= (1ULL<<32) && (end < mtrr_tom2)) 144 return MTRR_TYPE_WRBACK; 145 } 146 147 if (prev_match != 0xFF) 148 return prev_match; 149 150 return mtrr_state.def_type; 151 } 152 153 /* Get the MSR pair relating to a var range */ 154 static void 155 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) 156 { 157 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); 158 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); 159 } 160 161 /* fill the MSR pair relating to a var range */ 162 void fill_mtrr_var_range(unsigned int index, 163 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi) 164 { 165 struct mtrr_var_range *vr; 166 167 vr = mtrr_state.var_ranges; 168 169 vr[index].base_lo = base_lo; 170 vr[index].base_hi = base_hi; 171 vr[index].mask_lo = mask_lo; 172 vr[index].mask_hi = mask_hi; 173 } 174 175 static void 176 get_fixed_ranges(mtrr_type * frs) 177 { 178 unsigned int *p = (unsigned int *) frs; 179 int i; 180 181 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]); 182 183 for (i = 0; i < 2; i++) 184 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]); 185 for (i = 0; i < 8; i++) 186 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]); 187 } 188 189 void mtrr_save_fixed_ranges(void *info) 190 { 191 if (cpu_has_mtrr) 192 get_fixed_ranges(mtrr_state.fixed_ranges); 193 } 194 195 static void print_fixed(unsigned base, unsigned step, const mtrr_type*types) 196 { 197 unsigned i; 198 199 for (i = 0; i < 8; ++i, ++types, base += step) 200 printk(KERN_INFO "MTRR %05X-%05X %s\n", 201 base, base + step - 1, mtrr_attrib_to_str(*types)); 202 } 203 204 static void prepare_set(void); 205 static void post_set(void); 206 207 /* Grab all of the MTRR state for this CPU into *state */ 208 void __init get_mtrr_state(void) 209 { 210 unsigned int i; 211 struct mtrr_var_range *vrs; 212 unsigned lo, dummy; 213 unsigned long flags; 214 215 vrs = mtrr_state.var_ranges; 216 217 rdmsr(MTRRcap_MSR, lo, dummy); 218 mtrr_state.have_fixed = (lo >> 8) & 1; 219 220 for (i = 0; i < num_var_ranges; i++) 221 get_mtrr_var_range(i, &vrs[i]); 222 if (mtrr_state.have_fixed) 223 get_fixed_ranges(mtrr_state.fixed_ranges); 224 225 rdmsr(MTRRdefType_MSR, lo, dummy); 226 mtrr_state.def_type = (lo & 0xff); 227 mtrr_state.enabled = (lo & 0xc00) >> 10; 228 229 if (amd_special_default_mtrr()) { 230 unsigned low, high; 231 /* TOP_MEM2 */ 232 rdmsr(MSR_K8_TOP_MEM2, low, high); 233 mtrr_tom2 = high; 234 mtrr_tom2 <<= 32; 235 mtrr_tom2 |= low; 236 mtrr_tom2 &= 0xffffff800000ULL; 237 } 238 if (mtrr_show) { 239 int high_width; 240 241 printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type)); 242 if (mtrr_state.have_fixed) { 243 printk(KERN_INFO "MTRR fixed ranges %sabled:\n", 244 mtrr_state.enabled & 1 ? "en" : "dis"); 245 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); 246 for (i = 0; i < 2; ++i) 247 print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8); 248 for (i = 0; i < 8; ++i) 249 print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8); 250 } 251 printk(KERN_INFO "MTRR variable ranges %sabled:\n", 252 mtrr_state.enabled & 2 ? "en" : "dis"); 253 high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4; 254 for (i = 0; i < num_var_ranges; ++i) { 255 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) 256 printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n", 257 i, 258 high_width, 259 mtrr_state.var_ranges[i].base_hi, 260 mtrr_state.var_ranges[i].base_lo >> 12, 261 high_width, 262 mtrr_state.var_ranges[i].mask_hi, 263 mtrr_state.var_ranges[i].mask_lo >> 12, 264 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff)); 265 else 266 printk(KERN_INFO "MTRR %u disabled\n", i); 267 } 268 if (mtrr_tom2) { 269 printk(KERN_INFO "TOM2: %016llx aka %lldM\n", 270 mtrr_tom2, mtrr_tom2>>20); 271 } 272 } 273 mtrr_state_set = 1; 274 275 /* PAT setup for BP. We need to go through sync steps here */ 276 local_irq_save(flags); 277 prepare_set(); 278 279 pat_init(); 280 281 post_set(); 282 local_irq_restore(flags); 283 284 } 285 286 /* Some BIOS's are fucked and don't set all MTRRs the same! */ 287 void __init mtrr_state_warn(void) 288 { 289 unsigned long mask = smp_changes_mask; 290 291 if (!mask) 292 return; 293 if (mask & MTRR_CHANGE_MASK_FIXED) 294 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n"); 295 if (mask & MTRR_CHANGE_MASK_VARIABLE) 296 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n"); 297 if (mask & MTRR_CHANGE_MASK_DEFTYPE) 298 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n"); 299 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); 300 printk(KERN_INFO "mtrr: corrected configuration.\n"); 301 } 302 303 /* Doesn't attempt to pass an error out to MTRR users 304 because it's quite complicated in some cases and probably not 305 worth it because the best error handling is to ignore it. */ 306 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) 307 { 308 if (wrmsr_safe(msr, a, b) < 0) 309 printk(KERN_ERR 310 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", 311 smp_processor_id(), msr, a, b); 312 } 313 314 /** 315 * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs 316 * see AMD publication no. 24593, chapter 3.2.1 for more information 317 */ 318 static inline void k8_enable_fixed_iorrs(void) 319 { 320 unsigned lo, hi; 321 322 rdmsr(MSR_K8_SYSCFG, lo, hi); 323 mtrr_wrmsr(MSR_K8_SYSCFG, lo 324 | K8_MTRRFIXRANGE_DRAM_ENABLE 325 | K8_MTRRFIXRANGE_DRAM_MODIFY, hi); 326 } 327 328 /** 329 * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have 330 * @msr: MSR address of the MTTR which should be checked and updated 331 * @changed: pointer which indicates whether the MTRR needed to be changed 332 * @msrwords: pointer to the MSR values which the MSR should have 333 * 334 * If K8 extentions are wanted, update the K8 SYSCFG MSR also. 335 * See AMD publication no. 24593, chapter 7.8.1, page 233 for more information. 336 */ 337 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords) 338 { 339 unsigned lo, hi; 340 341 rdmsr(msr, lo, hi); 342 343 if (lo != msrwords[0] || hi != msrwords[1]) { 344 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && 345 (boot_cpu_data.x86 >= 0x0f && boot_cpu_data.x86 <= 0x11) && 346 ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK)) 347 k8_enable_fixed_iorrs(); 348 mtrr_wrmsr(msr, msrwords[0], msrwords[1]); 349 *changed = true; 350 } 351 } 352 353 /** 354 * generic_get_free_region - Get a free MTRR. 355 * @base: The starting (base) address of the region. 356 * @size: The size (in bytes) of the region. 357 * @replace_reg: mtrr index to be replaced; set to invalid value if none. 358 * 359 * Returns: The index of the region on success, else negative on error. 360 */ 361 int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) 362 { 363 int i, max; 364 mtrr_type ltype; 365 unsigned long lbase, lsize; 366 367 max = num_var_ranges; 368 if (replace_reg >= 0 && replace_reg < max) 369 return replace_reg; 370 for (i = 0; i < max; ++i) { 371 mtrr_if->get(i, &lbase, &lsize, <ype); 372 if (lsize == 0) 373 return i; 374 } 375 return -ENOSPC; 376 } 377 378 static void generic_get_mtrr(unsigned int reg, unsigned long *base, 379 unsigned long *size, mtrr_type *type) 380 { 381 unsigned int mask_lo, mask_hi, base_lo, base_hi; 382 unsigned int tmp, hi; 383 384 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); 385 if ((mask_lo & 0x800) == 0) { 386 /* Invalid (i.e. free) range */ 387 *base = 0; 388 *size = 0; 389 *type = 0; 390 return; 391 } 392 393 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); 394 395 /* Work out the shifted address mask. */ 396 tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT; 397 mask_lo = size_or_mask | tmp; 398 /* Expand tmp with high bits to all 1s*/ 399 hi = fls(tmp); 400 if (hi > 0) { 401 tmp |= ~((1<<(hi - 1)) - 1); 402 403 if (tmp != mask_lo) { 404 WARN_ONCE(1, KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n"); 405 mask_lo = tmp; 406 } 407 } 408 409 /* This works correctly if size is a power of two, i.e. a 410 contiguous range. */ 411 *size = -mask_lo; 412 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; 413 *type = base_lo & 0xff; 414 } 415 416 /** 417 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set 418 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges() 419 */ 420 static int set_fixed_ranges(mtrr_type * frs) 421 { 422 unsigned long long *saved = (unsigned long long *) frs; 423 bool changed = false; 424 int block=-1, range; 425 426 while (fixed_range_blocks[++block].ranges) 427 for (range=0; range < fixed_range_blocks[block].ranges; range++) 428 set_fixed_range(fixed_range_blocks[block].base_msr + range, 429 &changed, (unsigned int *) saved++); 430 431 return changed; 432 } 433 434 /* Set the MSR pair relating to a var range. Returns TRUE if 435 changes are made */ 436 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) 437 { 438 unsigned int lo, hi; 439 bool changed = false; 440 441 rdmsr(MTRRphysBase_MSR(index), lo, hi); 442 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) 443 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != 444 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { 445 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); 446 changed = true; 447 } 448 449 rdmsr(MTRRphysMask_MSR(index), lo, hi); 450 451 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL) 452 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != 453 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { 454 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); 455 changed = true; 456 } 457 return changed; 458 } 459 460 static u32 deftype_lo, deftype_hi; 461 462 /** 463 * set_mtrr_state - Set the MTRR state for this CPU. 464 * 465 * NOTE: The CPU must already be in a safe state for MTRR changes. 466 * RETURNS: 0 if no changes made, else a mask indicating what was changed. 467 */ 468 static unsigned long set_mtrr_state(void) 469 { 470 unsigned int i; 471 unsigned long change_mask = 0; 472 473 for (i = 0; i < num_var_ranges; i++) 474 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) 475 change_mask |= MTRR_CHANGE_MASK_VARIABLE; 476 477 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) 478 change_mask |= MTRR_CHANGE_MASK_FIXED; 479 480 /* Set_mtrr_restore restores the old value of MTRRdefType, 481 so to set it we fiddle with the saved value */ 482 if ((deftype_lo & 0xff) != mtrr_state.def_type 483 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { 484 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10); 485 change_mask |= MTRR_CHANGE_MASK_DEFTYPE; 486 } 487 488 return change_mask; 489 } 490 491 492 static unsigned long cr4 = 0; 493 static DEFINE_SPINLOCK(set_atomicity_lock); 494 495 /* 496 * Since we are disabling the cache don't allow any interrupts - they 497 * would run extremely slow and would only increase the pain. The caller must 498 * ensure that local interrupts are disabled and are reenabled after post_set() 499 * has been called. 500 */ 501 502 static void prepare_set(void) __acquires(set_atomicity_lock) 503 { 504 unsigned long cr0; 505 506 /* Note that this is not ideal, since the cache is only flushed/disabled 507 for this CPU while the MTRRs are changed, but changing this requires 508 more invasive changes to the way the kernel boots */ 509 510 spin_lock(&set_atomicity_lock); 511 512 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ 513 cr0 = read_cr0() | X86_CR0_CD; 514 write_cr0(cr0); 515 wbinvd(); 516 517 /* Save value of CR4 and clear Page Global Enable (bit 7) */ 518 if ( cpu_has_pge ) { 519 cr4 = read_cr4(); 520 write_cr4(cr4 & ~X86_CR4_PGE); 521 } 522 523 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ 524 __flush_tlb(); 525 526 /* Save MTRR state */ 527 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); 528 529 /* Disable MTRRs, and set the default type to uncached */ 530 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi); 531 } 532 533 static void post_set(void) __releases(set_atomicity_lock) 534 { 535 /* Flush TLBs (no need to flush caches - they are disabled) */ 536 __flush_tlb(); 537 538 /* Intel (P6) standard MTRRs */ 539 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); 540 541 /* Enable caches */ 542 write_cr0(read_cr0() & 0xbfffffff); 543 544 /* Restore value of CR4 */ 545 if ( cpu_has_pge ) 546 write_cr4(cr4); 547 spin_unlock(&set_atomicity_lock); 548 } 549 550 static void generic_set_all(void) 551 { 552 unsigned long mask, count; 553 unsigned long flags; 554 555 local_irq_save(flags); 556 prepare_set(); 557 558 /* Actually set the state */ 559 mask = set_mtrr_state(); 560 561 /* also set PAT */ 562 pat_init(); 563 564 post_set(); 565 local_irq_restore(flags); 566 567 /* Use the atomic bitops to update the global mask */ 568 for (count = 0; count < sizeof mask * 8; ++count) { 569 if (mask & 0x01) 570 set_bit(count, &smp_changes_mask); 571 mask >>= 1; 572 } 573 574 } 575 576 static void generic_set_mtrr(unsigned int reg, unsigned long base, 577 unsigned long size, mtrr_type type) 578 /* [SUMMARY] Set variable MTRR register on the local CPU. 579 <reg> The register to set. 580 <base> The base address of the region. 581 <size> The size of the region. If this is 0 the region is disabled. 582 <type> The type of the region. 583 [RETURNS] Nothing. 584 */ 585 { 586 unsigned long flags; 587 struct mtrr_var_range *vr; 588 589 vr = &mtrr_state.var_ranges[reg]; 590 591 local_irq_save(flags); 592 prepare_set(); 593 594 if (size == 0) { 595 /* The invalid bit is kept in the mask, so we simply clear the 596 relevant mask register to disable a range. */ 597 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); 598 memset(vr, 0, sizeof(struct mtrr_var_range)); 599 } else { 600 vr->base_lo = base << PAGE_SHIFT | type; 601 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT); 602 vr->mask_lo = -size << PAGE_SHIFT | 0x800; 603 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT); 604 605 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi); 606 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi); 607 } 608 609 post_set(); 610 local_irq_restore(flags); 611 } 612 613 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type) 614 { 615 unsigned long lbase, last; 616 617 /* For Intel PPro stepping <= 7, must be 4 MiB aligned 618 and not touch 0x70000000->0x7003FFFF */ 619 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && 620 boot_cpu_data.x86_model == 1 && 621 boot_cpu_data.x86_mask <= 7) { 622 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { 623 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); 624 return -EINVAL; 625 } 626 if (!(base + size < 0x70000 || base > 0x7003F) && 627 (type == MTRR_TYPE_WRCOMB 628 || type == MTRR_TYPE_WRBACK)) { 629 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); 630 return -EINVAL; 631 } 632 } 633 634 /* Check upper bits of base and last are equal and lower bits are 0 635 for base and 1 for last */ 636 last = base + size - 1; 637 for (lbase = base; !(lbase & 1) && (last & 1); 638 lbase = lbase >> 1, last = last >> 1) ; 639 if (lbase != last) { 640 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", 641 base, size); 642 return -EINVAL; 643 } 644 return 0; 645 } 646 647 648 static int generic_have_wrcomb(void) 649 { 650 unsigned long config, dummy; 651 rdmsr(MTRRcap_MSR, config, dummy); 652 return (config & (1 << 10)); 653 } 654 655 int positive_have_wrcomb(void) 656 { 657 return 1; 658 } 659 660 /* generic structure... 661 */ 662 struct mtrr_ops generic_mtrr_ops = { 663 .use_intel_if = 1, 664 .set_all = generic_set_all, 665 .get = generic_get_mtrr, 666 .get_free_region = generic_get_free_region, 667 .set = generic_set_mtrr, 668 .validate_add_page = generic_validate_add_page, 669 .have_wrcomb = generic_have_wrcomb, 670 }; 671