1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong 2 because MTRRs can span upto 40 bits (36bits on most modern x86) */ 3 #include <linux/init.h> 4 #include <linux/slab.h> 5 #include <linux/mm.h> 6 #include <linux/module.h> 7 #include <asm/io.h> 8 #include <asm/mtrr.h> 9 #include <asm/msr.h> 10 #include <asm/system.h> 11 #include <asm/cpufeature.h> 12 #include <asm/processor-flags.h> 13 #include <asm/tlbflush.h> 14 #include <asm/pat.h> 15 #include "mtrr.h" 16 17 struct mtrr_state { 18 struct mtrr_var_range var_ranges[MAX_VAR_RANGES]; 19 mtrr_type fixed_ranges[NUM_FIXED_RANGES]; 20 unsigned char enabled; 21 unsigned char have_fixed; 22 mtrr_type def_type; 23 }; 24 25 struct fixed_range_block { 26 int base_msr; /* start address of an MTRR block */ 27 int ranges; /* number of MTRRs in this block */ 28 }; 29 30 static struct fixed_range_block fixed_range_blocks[] = { 31 { MTRRfix64K_00000_MSR, 1 }, /* one 64k MTRR */ 32 { MTRRfix16K_80000_MSR, 2 }, /* two 16k MTRRs */ 33 { MTRRfix4K_C0000_MSR, 8 }, /* eight 4k MTRRs */ 34 {} 35 }; 36 37 static unsigned long smp_changes_mask; 38 static struct mtrr_state mtrr_state = {}; 39 static int mtrr_state_set; 40 u64 mtrr_tom2; 41 42 #undef MODULE_PARAM_PREFIX 43 #define MODULE_PARAM_PREFIX "mtrr." 44 45 static int mtrr_show; 46 module_param_named(show, mtrr_show, bool, 0); 47 48 /* 49 * Returns the effective MTRR type for the region 50 * Error returns: 51 * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR 52 * - 0xFF - when MTRR is not enabled 53 */ 54 u8 mtrr_type_lookup(u64 start, u64 end) 55 { 56 int i; 57 u64 base, mask; 58 u8 prev_match, curr_match; 59 60 if (!mtrr_state_set) 61 return 0xFF; 62 63 if (!mtrr_state.enabled) 64 return 0xFF; 65 66 /* Make end inclusive end, instead of exclusive */ 67 end--; 68 69 /* Look in fixed ranges. Just return the type as per start */ 70 if (mtrr_state.have_fixed && (start < 0x100000)) { 71 int idx; 72 73 if (start < 0x80000) { 74 idx = 0; 75 idx += (start >> 16); 76 return mtrr_state.fixed_ranges[idx]; 77 } else if (start < 0xC0000) { 78 idx = 1 * 8; 79 idx += ((start - 0x80000) >> 14); 80 return mtrr_state.fixed_ranges[idx]; 81 } else if (start < 0x1000000) { 82 idx = 3 * 8; 83 idx += ((start - 0xC0000) >> 12); 84 return mtrr_state.fixed_ranges[idx]; 85 } 86 } 87 88 /* 89 * Look in variable ranges 90 * Look of multiple ranges matching this address and pick type 91 * as per MTRR precedence 92 */ 93 if (!(mtrr_state.enabled & 2)) { 94 return mtrr_state.def_type; 95 } 96 97 prev_match = 0xFF; 98 for (i = 0; i < num_var_ranges; ++i) { 99 unsigned short start_state, end_state; 100 101 if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11))) 102 continue; 103 104 base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) + 105 (mtrr_state.var_ranges[i].base_lo & PAGE_MASK); 106 mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) + 107 (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK); 108 109 start_state = ((start & mask) == (base & mask)); 110 end_state = ((end & mask) == (base & mask)); 111 if (start_state != end_state) 112 return 0xFE; 113 114 if ((start & mask) != (base & mask)) { 115 continue; 116 } 117 118 curr_match = mtrr_state.var_ranges[i].base_lo & 0xff; 119 if (prev_match == 0xFF) { 120 prev_match = curr_match; 121 continue; 122 } 123 124 if (prev_match == MTRR_TYPE_UNCACHABLE || 125 curr_match == MTRR_TYPE_UNCACHABLE) { 126 return MTRR_TYPE_UNCACHABLE; 127 } 128 129 if ((prev_match == MTRR_TYPE_WRBACK && 130 curr_match == MTRR_TYPE_WRTHROUGH) || 131 (prev_match == MTRR_TYPE_WRTHROUGH && 132 curr_match == MTRR_TYPE_WRBACK)) { 133 prev_match = MTRR_TYPE_WRTHROUGH; 134 curr_match = MTRR_TYPE_WRTHROUGH; 135 } 136 137 if (prev_match != curr_match) { 138 return MTRR_TYPE_UNCACHABLE; 139 } 140 } 141 142 if (mtrr_tom2) { 143 if (start >= (1ULL<<32) && (end < mtrr_tom2)) 144 return MTRR_TYPE_WRBACK; 145 } 146 147 if (prev_match != 0xFF) 148 return prev_match; 149 150 return mtrr_state.def_type; 151 } 152 153 /* Get the MSR pair relating to a var range */ 154 static void 155 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) 156 { 157 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); 158 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); 159 } 160 161 /* fill the MSR pair relating to a var range */ 162 void fill_mtrr_var_range(unsigned int index, 163 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi) 164 { 165 struct mtrr_var_range *vr; 166 167 vr = mtrr_state.var_ranges; 168 169 vr[index].base_lo = base_lo; 170 vr[index].base_hi = base_hi; 171 vr[index].mask_lo = mask_lo; 172 vr[index].mask_hi = mask_hi; 173 } 174 175 static void 176 get_fixed_ranges(mtrr_type * frs) 177 { 178 unsigned int *p = (unsigned int *) frs; 179 int i; 180 181 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]); 182 183 for (i = 0; i < 2; i++) 184 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]); 185 for (i = 0; i < 8; i++) 186 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]); 187 } 188 189 void mtrr_save_fixed_ranges(void *info) 190 { 191 if (cpu_has_mtrr) 192 get_fixed_ranges(mtrr_state.fixed_ranges); 193 } 194 195 static void print_fixed(unsigned base, unsigned step, const mtrr_type*types) 196 { 197 unsigned i; 198 199 for (i = 0; i < 8; ++i, ++types, base += step) 200 printk(KERN_INFO "MTRR %05X-%05X %s\n", 201 base, base + step - 1, mtrr_attrib_to_str(*types)); 202 } 203 204 static void prepare_set(void); 205 static void post_set(void); 206 207 /* Grab all of the MTRR state for this CPU into *state */ 208 void __init get_mtrr_state(void) 209 { 210 unsigned int i; 211 struct mtrr_var_range *vrs; 212 unsigned lo, dummy; 213 unsigned long flags; 214 215 vrs = mtrr_state.var_ranges; 216 217 rdmsr(MTRRcap_MSR, lo, dummy); 218 mtrr_state.have_fixed = (lo >> 8) & 1; 219 220 for (i = 0; i < num_var_ranges; i++) 221 get_mtrr_var_range(i, &vrs[i]); 222 if (mtrr_state.have_fixed) 223 get_fixed_ranges(mtrr_state.fixed_ranges); 224 225 rdmsr(MTRRdefType_MSR, lo, dummy); 226 mtrr_state.def_type = (lo & 0xff); 227 mtrr_state.enabled = (lo & 0xc00) >> 10; 228 229 if (amd_special_default_mtrr()) { 230 unsigned low, high; 231 /* TOP_MEM2 */ 232 rdmsr(MSR_K8_TOP_MEM2, low, high); 233 mtrr_tom2 = high; 234 mtrr_tom2 <<= 32; 235 mtrr_tom2 |= low; 236 mtrr_tom2 &= 0xffffff800000ULL; 237 } 238 if (mtrr_show) { 239 int high_width; 240 241 printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type)); 242 if (mtrr_state.have_fixed) { 243 printk(KERN_INFO "MTRR fixed ranges %sabled:\n", 244 mtrr_state.enabled & 1 ? "en" : "dis"); 245 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); 246 for (i = 0; i < 2; ++i) 247 print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8); 248 for (i = 0; i < 8; ++i) 249 print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8); 250 } 251 printk(KERN_INFO "MTRR variable ranges %sabled:\n", 252 mtrr_state.enabled & 2 ? "en" : "dis"); 253 high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4; 254 for (i = 0; i < num_var_ranges; ++i) { 255 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) 256 printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n", 257 i, 258 high_width, 259 mtrr_state.var_ranges[i].base_hi, 260 mtrr_state.var_ranges[i].base_lo >> 12, 261 high_width, 262 mtrr_state.var_ranges[i].mask_hi, 263 mtrr_state.var_ranges[i].mask_lo >> 12, 264 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff)); 265 else 266 printk(KERN_INFO "MTRR %u disabled\n", i); 267 } 268 if (mtrr_tom2) { 269 printk(KERN_INFO "TOM2: %016llx aka %lldM\n", 270 mtrr_tom2, mtrr_tom2>>20); 271 } 272 } 273 mtrr_state_set = 1; 274 275 /* PAT setup for BP. We need to go through sync steps here */ 276 local_irq_save(flags); 277 prepare_set(); 278 279 pat_init(); 280 281 post_set(); 282 local_irq_restore(flags); 283 284 } 285 286 /* Some BIOS's are fucked and don't set all MTRRs the same! */ 287 void __init mtrr_state_warn(void) 288 { 289 unsigned long mask = smp_changes_mask; 290 291 if (!mask) 292 return; 293 if (mask & MTRR_CHANGE_MASK_FIXED) 294 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n"); 295 if (mask & MTRR_CHANGE_MASK_VARIABLE) 296 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n"); 297 if (mask & MTRR_CHANGE_MASK_DEFTYPE) 298 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n"); 299 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); 300 printk(KERN_INFO "mtrr: corrected configuration.\n"); 301 } 302 303 /* Doesn't attempt to pass an error out to MTRR users 304 because it's quite complicated in some cases and probably not 305 worth it because the best error handling is to ignore it. */ 306 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) 307 { 308 if (wrmsr_safe(msr, a, b) < 0) 309 printk(KERN_ERR 310 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", 311 smp_processor_id(), msr, a, b); 312 } 313 314 /** 315 * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs 316 * see AMD publication no. 24593, chapter 3.2.1 for more information 317 */ 318 static inline void k8_enable_fixed_iorrs(void) 319 { 320 unsigned lo, hi; 321 322 rdmsr(MSR_K8_SYSCFG, lo, hi); 323 mtrr_wrmsr(MSR_K8_SYSCFG, lo 324 | K8_MTRRFIXRANGE_DRAM_ENABLE 325 | K8_MTRRFIXRANGE_DRAM_MODIFY, hi); 326 } 327 328 /** 329 * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have 330 * @msr: MSR address of the MTTR which should be checked and updated 331 * @changed: pointer which indicates whether the MTRR needed to be changed 332 * @msrwords: pointer to the MSR values which the MSR should have 333 * 334 * If K8 extentions are wanted, update the K8 SYSCFG MSR also. 335 * See AMD publication no. 24593, chapter 7.8.1, page 233 for more information. 336 */ 337 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords) 338 { 339 unsigned lo, hi; 340 341 rdmsr(msr, lo, hi); 342 343 if (lo != msrwords[0] || hi != msrwords[1]) { 344 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && 345 (boot_cpu_data.x86 >= 0x0f && boot_cpu_data.x86 <= 0x11) && 346 ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK)) 347 k8_enable_fixed_iorrs(); 348 mtrr_wrmsr(msr, msrwords[0], msrwords[1]); 349 *changed = true; 350 } 351 } 352 353 /** 354 * generic_get_free_region - Get a free MTRR. 355 * @base: The starting (base) address of the region. 356 * @size: The size (in bytes) of the region. 357 * @replace_reg: mtrr index to be replaced; set to invalid value if none. 358 * 359 * Returns: The index of the region on success, else negative on error. 360 */ 361 int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) 362 { 363 int i, max; 364 mtrr_type ltype; 365 unsigned long lbase, lsize; 366 367 max = num_var_ranges; 368 if (replace_reg >= 0 && replace_reg < max) 369 return replace_reg; 370 for (i = 0; i < max; ++i) { 371 mtrr_if->get(i, &lbase, &lsize, <ype); 372 if (lsize == 0) 373 return i; 374 } 375 return -ENOSPC; 376 } 377 378 static void generic_get_mtrr(unsigned int reg, unsigned long *base, 379 unsigned long *size, mtrr_type *type) 380 { 381 unsigned int mask_lo, mask_hi, base_lo, base_hi; 382 unsigned int tmp, hi; 383 384 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); 385 if ((mask_lo & 0x800) == 0) { 386 /* Invalid (i.e. free) range */ 387 *base = 0; 388 *size = 0; 389 *type = 0; 390 return; 391 } 392 393 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); 394 395 /* Work out the shifted address mask. */ 396 tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT; 397 mask_lo = size_or_mask | tmp; 398 /* Expand tmp with high bits to all 1s*/ 399 hi = fls(tmp); 400 if (hi > 0) { 401 tmp |= ~((1<<(hi - 1)) - 1); 402 403 if (tmp != mask_lo) { 404 static int once = 1; 405 406 if (once) { 407 printk(KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n"); 408 once = 0; 409 } 410 mask_lo = tmp; 411 } 412 } 413 414 /* This works correctly if size is a power of two, i.e. a 415 contiguous range. */ 416 *size = -mask_lo; 417 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; 418 *type = base_lo & 0xff; 419 } 420 421 /** 422 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set 423 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges() 424 */ 425 static int set_fixed_ranges(mtrr_type * frs) 426 { 427 unsigned long long *saved = (unsigned long long *) frs; 428 bool changed = false; 429 int block=-1, range; 430 431 while (fixed_range_blocks[++block].ranges) 432 for (range=0; range < fixed_range_blocks[block].ranges; range++) 433 set_fixed_range(fixed_range_blocks[block].base_msr + range, 434 &changed, (unsigned int *) saved++); 435 436 return changed; 437 } 438 439 /* Set the MSR pair relating to a var range. Returns TRUE if 440 changes are made */ 441 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) 442 { 443 unsigned int lo, hi; 444 bool changed = false; 445 446 rdmsr(MTRRphysBase_MSR(index), lo, hi); 447 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) 448 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != 449 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { 450 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); 451 changed = true; 452 } 453 454 rdmsr(MTRRphysMask_MSR(index), lo, hi); 455 456 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL) 457 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != 458 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { 459 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); 460 changed = true; 461 } 462 return changed; 463 } 464 465 static u32 deftype_lo, deftype_hi; 466 467 /** 468 * set_mtrr_state - Set the MTRR state for this CPU. 469 * 470 * NOTE: The CPU must already be in a safe state for MTRR changes. 471 * RETURNS: 0 if no changes made, else a mask indicating what was changed. 472 */ 473 static unsigned long set_mtrr_state(void) 474 { 475 unsigned int i; 476 unsigned long change_mask = 0; 477 478 for (i = 0; i < num_var_ranges; i++) 479 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) 480 change_mask |= MTRR_CHANGE_MASK_VARIABLE; 481 482 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) 483 change_mask |= MTRR_CHANGE_MASK_FIXED; 484 485 /* Set_mtrr_restore restores the old value of MTRRdefType, 486 so to set it we fiddle with the saved value */ 487 if ((deftype_lo & 0xff) != mtrr_state.def_type 488 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { 489 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10); 490 change_mask |= MTRR_CHANGE_MASK_DEFTYPE; 491 } 492 493 return change_mask; 494 } 495 496 497 static unsigned long cr4 = 0; 498 static DEFINE_SPINLOCK(set_atomicity_lock); 499 500 /* 501 * Since we are disabling the cache don't allow any interrupts - they 502 * would run extremely slow and would only increase the pain. The caller must 503 * ensure that local interrupts are disabled and are reenabled after post_set() 504 * has been called. 505 */ 506 507 static void prepare_set(void) __acquires(set_atomicity_lock) 508 { 509 unsigned long cr0; 510 511 /* Note that this is not ideal, since the cache is only flushed/disabled 512 for this CPU while the MTRRs are changed, but changing this requires 513 more invasive changes to the way the kernel boots */ 514 515 spin_lock(&set_atomicity_lock); 516 517 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ 518 cr0 = read_cr0() | X86_CR0_CD; 519 write_cr0(cr0); 520 wbinvd(); 521 522 /* Save value of CR4 and clear Page Global Enable (bit 7) */ 523 if ( cpu_has_pge ) { 524 cr4 = read_cr4(); 525 write_cr4(cr4 & ~X86_CR4_PGE); 526 } 527 528 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ 529 __flush_tlb(); 530 531 /* Save MTRR state */ 532 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); 533 534 /* Disable MTRRs, and set the default type to uncached */ 535 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi); 536 } 537 538 static void post_set(void) __releases(set_atomicity_lock) 539 { 540 /* Flush TLBs (no need to flush caches - they are disabled) */ 541 __flush_tlb(); 542 543 /* Intel (P6) standard MTRRs */ 544 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); 545 546 /* Enable caches */ 547 write_cr0(read_cr0() & 0xbfffffff); 548 549 /* Restore value of CR4 */ 550 if ( cpu_has_pge ) 551 write_cr4(cr4); 552 spin_unlock(&set_atomicity_lock); 553 } 554 555 static void generic_set_all(void) 556 { 557 unsigned long mask, count; 558 unsigned long flags; 559 560 local_irq_save(flags); 561 prepare_set(); 562 563 /* Actually set the state */ 564 mask = set_mtrr_state(); 565 566 /* also set PAT */ 567 pat_init(); 568 569 post_set(); 570 local_irq_restore(flags); 571 572 /* Use the atomic bitops to update the global mask */ 573 for (count = 0; count < sizeof mask * 8; ++count) { 574 if (mask & 0x01) 575 set_bit(count, &smp_changes_mask); 576 mask >>= 1; 577 } 578 579 } 580 581 static void generic_set_mtrr(unsigned int reg, unsigned long base, 582 unsigned long size, mtrr_type type) 583 /* [SUMMARY] Set variable MTRR register on the local CPU. 584 <reg> The register to set. 585 <base> The base address of the region. 586 <size> The size of the region. If this is 0 the region is disabled. 587 <type> The type of the region. 588 [RETURNS] Nothing. 589 */ 590 { 591 unsigned long flags; 592 struct mtrr_var_range *vr; 593 594 vr = &mtrr_state.var_ranges[reg]; 595 596 local_irq_save(flags); 597 prepare_set(); 598 599 if (size == 0) { 600 /* The invalid bit is kept in the mask, so we simply clear the 601 relevant mask register to disable a range. */ 602 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); 603 memset(vr, 0, sizeof(struct mtrr_var_range)); 604 } else { 605 vr->base_lo = base << PAGE_SHIFT | type; 606 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT); 607 vr->mask_lo = -size << PAGE_SHIFT | 0x800; 608 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT); 609 610 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi); 611 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi); 612 } 613 614 post_set(); 615 local_irq_restore(flags); 616 } 617 618 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type) 619 { 620 unsigned long lbase, last; 621 622 /* For Intel PPro stepping <= 7, must be 4 MiB aligned 623 and not touch 0x70000000->0x7003FFFF */ 624 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && 625 boot_cpu_data.x86_model == 1 && 626 boot_cpu_data.x86_mask <= 7) { 627 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { 628 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); 629 return -EINVAL; 630 } 631 if (!(base + size < 0x70000 || base > 0x7003F) && 632 (type == MTRR_TYPE_WRCOMB 633 || type == MTRR_TYPE_WRBACK)) { 634 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); 635 return -EINVAL; 636 } 637 } 638 639 /* Check upper bits of base and last are equal and lower bits are 0 640 for base and 1 for last */ 641 last = base + size - 1; 642 for (lbase = base; !(lbase & 1) && (last & 1); 643 lbase = lbase >> 1, last = last >> 1) ; 644 if (lbase != last) { 645 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", 646 base, size); 647 return -EINVAL; 648 } 649 return 0; 650 } 651 652 653 static int generic_have_wrcomb(void) 654 { 655 unsigned long config, dummy; 656 rdmsr(MTRRcap_MSR, config, dummy); 657 return (config & (1 << 10)); 658 } 659 660 int positive_have_wrcomb(void) 661 { 662 return 1; 663 } 664 665 /* generic structure... 666 */ 667 struct mtrr_ops generic_mtrr_ops = { 668 .use_intel_if = 1, 669 .set_all = generic_set_all, 670 .get = generic_get_mtrr, 671 .get_free_region = generic_get_free_region, 672 .set = generic_set_mtrr, 673 .validate_add_page = generic_validate_add_page, 674 .have_wrcomb = generic_have_wrcomb, 675 }; 676