1*2ec1df41SThomas Gleixner /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong 2*2ec1df41SThomas Gleixner because MTRRs can span upto 40 bits (36bits on most modern x86) */ 3*2ec1df41SThomas Gleixner #include <linux/init.h> 4*2ec1df41SThomas Gleixner #include <linux/slab.h> 5*2ec1df41SThomas Gleixner #include <linux/mm.h> 6*2ec1df41SThomas Gleixner #include <linux/module.h> 7*2ec1df41SThomas Gleixner #include <asm/io.h> 8*2ec1df41SThomas Gleixner #include <asm/mtrr.h> 9*2ec1df41SThomas Gleixner #include <asm/msr.h> 10*2ec1df41SThomas Gleixner #include <asm/system.h> 11*2ec1df41SThomas Gleixner #include <asm/cpufeature.h> 12*2ec1df41SThomas Gleixner #include <asm/tlbflush.h> 13*2ec1df41SThomas Gleixner #include "mtrr.h" 14*2ec1df41SThomas Gleixner 15*2ec1df41SThomas Gleixner struct mtrr_state { 16*2ec1df41SThomas Gleixner struct mtrr_var_range *var_ranges; 17*2ec1df41SThomas Gleixner mtrr_type fixed_ranges[NUM_FIXED_RANGES]; 18*2ec1df41SThomas Gleixner unsigned char enabled; 19*2ec1df41SThomas Gleixner unsigned char have_fixed; 20*2ec1df41SThomas Gleixner mtrr_type def_type; 21*2ec1df41SThomas Gleixner }; 22*2ec1df41SThomas Gleixner 23*2ec1df41SThomas Gleixner struct fixed_range_block { 24*2ec1df41SThomas Gleixner int base_msr; /* start address of an MTRR block */ 25*2ec1df41SThomas Gleixner int ranges; /* number of MTRRs in this block */ 26*2ec1df41SThomas Gleixner }; 27*2ec1df41SThomas Gleixner 28*2ec1df41SThomas Gleixner static struct fixed_range_block fixed_range_blocks[] = { 29*2ec1df41SThomas Gleixner { MTRRfix64K_00000_MSR, 1 }, /* one 64k MTRR */ 30*2ec1df41SThomas Gleixner { MTRRfix16K_80000_MSR, 2 }, /* two 16k MTRRs */ 31*2ec1df41SThomas Gleixner { MTRRfix4K_C0000_MSR, 8 }, /* eight 4k MTRRs */ 32*2ec1df41SThomas Gleixner {} 33*2ec1df41SThomas Gleixner }; 34*2ec1df41SThomas Gleixner 35*2ec1df41SThomas Gleixner static unsigned long smp_changes_mask; 36*2ec1df41SThomas Gleixner static struct mtrr_state mtrr_state = {}; 37*2ec1df41SThomas Gleixner 38*2ec1df41SThomas Gleixner #undef MODULE_PARAM_PREFIX 39*2ec1df41SThomas Gleixner #define MODULE_PARAM_PREFIX "mtrr." 40*2ec1df41SThomas Gleixner 41*2ec1df41SThomas Gleixner static int mtrr_show; 42*2ec1df41SThomas Gleixner module_param_named(show, mtrr_show, bool, 0); 43*2ec1df41SThomas Gleixner 44*2ec1df41SThomas Gleixner /* Get the MSR pair relating to a var range */ 45*2ec1df41SThomas Gleixner static void 46*2ec1df41SThomas Gleixner get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) 47*2ec1df41SThomas Gleixner { 48*2ec1df41SThomas Gleixner rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); 49*2ec1df41SThomas Gleixner rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); 50*2ec1df41SThomas Gleixner } 51*2ec1df41SThomas Gleixner 52*2ec1df41SThomas Gleixner static void 53*2ec1df41SThomas Gleixner get_fixed_ranges(mtrr_type * frs) 54*2ec1df41SThomas Gleixner { 55*2ec1df41SThomas Gleixner unsigned int *p = (unsigned int *) frs; 56*2ec1df41SThomas Gleixner int i; 57*2ec1df41SThomas Gleixner 58*2ec1df41SThomas Gleixner rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]); 59*2ec1df41SThomas Gleixner 60*2ec1df41SThomas Gleixner for (i = 0; i < 2; i++) 61*2ec1df41SThomas Gleixner rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]); 62*2ec1df41SThomas Gleixner for (i = 0; i < 8; i++) 63*2ec1df41SThomas Gleixner rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]); 64*2ec1df41SThomas Gleixner } 65*2ec1df41SThomas Gleixner 66*2ec1df41SThomas Gleixner void mtrr_save_fixed_ranges(void *info) 67*2ec1df41SThomas Gleixner { 68*2ec1df41SThomas Gleixner if (cpu_has_mtrr) 69*2ec1df41SThomas Gleixner get_fixed_ranges(mtrr_state.fixed_ranges); 70*2ec1df41SThomas Gleixner } 71*2ec1df41SThomas Gleixner 72*2ec1df41SThomas Gleixner static void print_fixed(unsigned base, unsigned step, const mtrr_type*types) 73*2ec1df41SThomas Gleixner { 74*2ec1df41SThomas Gleixner unsigned i; 75*2ec1df41SThomas Gleixner 76*2ec1df41SThomas Gleixner for (i = 0; i < 8; ++i, ++types, base += step) 77*2ec1df41SThomas Gleixner printk(KERN_INFO "MTRR %05X-%05X %s\n", 78*2ec1df41SThomas Gleixner base, base + step - 1, mtrr_attrib_to_str(*types)); 79*2ec1df41SThomas Gleixner } 80*2ec1df41SThomas Gleixner 81*2ec1df41SThomas Gleixner /* Grab all of the MTRR state for this CPU into *state */ 82*2ec1df41SThomas Gleixner void __init get_mtrr_state(void) 83*2ec1df41SThomas Gleixner { 84*2ec1df41SThomas Gleixner unsigned int i; 85*2ec1df41SThomas Gleixner struct mtrr_var_range *vrs; 86*2ec1df41SThomas Gleixner unsigned lo, dummy; 87*2ec1df41SThomas Gleixner 88*2ec1df41SThomas Gleixner if (!mtrr_state.var_ranges) { 89*2ec1df41SThomas Gleixner mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range), 90*2ec1df41SThomas Gleixner GFP_KERNEL); 91*2ec1df41SThomas Gleixner if (!mtrr_state.var_ranges) 92*2ec1df41SThomas Gleixner return; 93*2ec1df41SThomas Gleixner } 94*2ec1df41SThomas Gleixner vrs = mtrr_state.var_ranges; 95*2ec1df41SThomas Gleixner 96*2ec1df41SThomas Gleixner rdmsr(MTRRcap_MSR, lo, dummy); 97*2ec1df41SThomas Gleixner mtrr_state.have_fixed = (lo >> 8) & 1; 98*2ec1df41SThomas Gleixner 99*2ec1df41SThomas Gleixner for (i = 0; i < num_var_ranges; i++) 100*2ec1df41SThomas Gleixner get_mtrr_var_range(i, &vrs[i]); 101*2ec1df41SThomas Gleixner if (mtrr_state.have_fixed) 102*2ec1df41SThomas Gleixner get_fixed_ranges(mtrr_state.fixed_ranges); 103*2ec1df41SThomas Gleixner 104*2ec1df41SThomas Gleixner rdmsr(MTRRdefType_MSR, lo, dummy); 105*2ec1df41SThomas Gleixner mtrr_state.def_type = (lo & 0xff); 106*2ec1df41SThomas Gleixner mtrr_state.enabled = (lo & 0xc00) >> 10; 107*2ec1df41SThomas Gleixner 108*2ec1df41SThomas Gleixner if (mtrr_show) { 109*2ec1df41SThomas Gleixner int high_width; 110*2ec1df41SThomas Gleixner 111*2ec1df41SThomas Gleixner printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type)); 112*2ec1df41SThomas Gleixner if (mtrr_state.have_fixed) { 113*2ec1df41SThomas Gleixner printk(KERN_INFO "MTRR fixed ranges %sabled:\n", 114*2ec1df41SThomas Gleixner mtrr_state.enabled & 1 ? "en" : "dis"); 115*2ec1df41SThomas Gleixner print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); 116*2ec1df41SThomas Gleixner for (i = 0; i < 2; ++i) 117*2ec1df41SThomas Gleixner print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8); 118*2ec1df41SThomas Gleixner for (i = 0; i < 8; ++i) 119*2ec1df41SThomas Gleixner print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8); 120*2ec1df41SThomas Gleixner } 121*2ec1df41SThomas Gleixner printk(KERN_INFO "MTRR variable ranges %sabled:\n", 122*2ec1df41SThomas Gleixner mtrr_state.enabled & 2 ? "en" : "dis"); 123*2ec1df41SThomas Gleixner high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4; 124*2ec1df41SThomas Gleixner for (i = 0; i < num_var_ranges; ++i) { 125*2ec1df41SThomas Gleixner if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) 126*2ec1df41SThomas Gleixner printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n", 127*2ec1df41SThomas Gleixner i, 128*2ec1df41SThomas Gleixner high_width, 129*2ec1df41SThomas Gleixner mtrr_state.var_ranges[i].base_hi, 130*2ec1df41SThomas Gleixner mtrr_state.var_ranges[i].base_lo >> 12, 131*2ec1df41SThomas Gleixner high_width, 132*2ec1df41SThomas Gleixner mtrr_state.var_ranges[i].mask_hi, 133*2ec1df41SThomas Gleixner mtrr_state.var_ranges[i].mask_lo >> 12, 134*2ec1df41SThomas Gleixner mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff)); 135*2ec1df41SThomas Gleixner else 136*2ec1df41SThomas Gleixner printk(KERN_INFO "MTRR %u disabled\n", i); 137*2ec1df41SThomas Gleixner } 138*2ec1df41SThomas Gleixner } 139*2ec1df41SThomas Gleixner } 140*2ec1df41SThomas Gleixner 141*2ec1df41SThomas Gleixner /* Some BIOS's are fucked and don't set all MTRRs the same! */ 142*2ec1df41SThomas Gleixner void __init mtrr_state_warn(void) 143*2ec1df41SThomas Gleixner { 144*2ec1df41SThomas Gleixner unsigned long mask = smp_changes_mask; 145*2ec1df41SThomas Gleixner 146*2ec1df41SThomas Gleixner if (!mask) 147*2ec1df41SThomas Gleixner return; 148*2ec1df41SThomas Gleixner if (mask & MTRR_CHANGE_MASK_FIXED) 149*2ec1df41SThomas Gleixner printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n"); 150*2ec1df41SThomas Gleixner if (mask & MTRR_CHANGE_MASK_VARIABLE) 151*2ec1df41SThomas Gleixner printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n"); 152*2ec1df41SThomas Gleixner if (mask & MTRR_CHANGE_MASK_DEFTYPE) 153*2ec1df41SThomas Gleixner printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n"); 154*2ec1df41SThomas Gleixner printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); 155*2ec1df41SThomas Gleixner printk(KERN_INFO "mtrr: corrected configuration.\n"); 156*2ec1df41SThomas Gleixner } 157*2ec1df41SThomas Gleixner 158*2ec1df41SThomas Gleixner /* Doesn't attempt to pass an error out to MTRR users 159*2ec1df41SThomas Gleixner because it's quite complicated in some cases and probably not 160*2ec1df41SThomas Gleixner worth it because the best error handling is to ignore it. */ 161*2ec1df41SThomas Gleixner void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) 162*2ec1df41SThomas Gleixner { 163*2ec1df41SThomas Gleixner if (wrmsr_safe(msr, a, b) < 0) 164*2ec1df41SThomas Gleixner printk(KERN_ERR 165*2ec1df41SThomas Gleixner "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", 166*2ec1df41SThomas Gleixner smp_processor_id(), msr, a, b); 167*2ec1df41SThomas Gleixner } 168*2ec1df41SThomas Gleixner 169*2ec1df41SThomas Gleixner /** 170*2ec1df41SThomas Gleixner * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs 171*2ec1df41SThomas Gleixner * see AMD publication no. 24593, chapter 3.2.1 for more information 172*2ec1df41SThomas Gleixner */ 173*2ec1df41SThomas Gleixner static inline void k8_enable_fixed_iorrs(void) 174*2ec1df41SThomas Gleixner { 175*2ec1df41SThomas Gleixner unsigned lo, hi; 176*2ec1df41SThomas Gleixner 177*2ec1df41SThomas Gleixner rdmsr(MSR_K8_SYSCFG, lo, hi); 178*2ec1df41SThomas Gleixner mtrr_wrmsr(MSR_K8_SYSCFG, lo 179*2ec1df41SThomas Gleixner | K8_MTRRFIXRANGE_DRAM_ENABLE 180*2ec1df41SThomas Gleixner | K8_MTRRFIXRANGE_DRAM_MODIFY, hi); 181*2ec1df41SThomas Gleixner } 182*2ec1df41SThomas Gleixner 183*2ec1df41SThomas Gleixner /** 184*2ec1df41SThomas Gleixner * Checks and updates an fixed-range MTRR if it differs from the value it 185*2ec1df41SThomas Gleixner * should have. If K8 extenstions are wanted, update the K8 SYSCFG MSR also. 186*2ec1df41SThomas Gleixner * see AMD publication no. 24593, chapter 7.8.1, page 233 for more information 187*2ec1df41SThomas Gleixner * \param msr MSR address of the MTTR which should be checked and updated 188*2ec1df41SThomas Gleixner * \param changed pointer which indicates whether the MTRR needed to be changed 189*2ec1df41SThomas Gleixner * \param msrwords pointer to the MSR values which the MSR should have 190*2ec1df41SThomas Gleixner */ 191*2ec1df41SThomas Gleixner static void set_fixed_range(int msr, int * changed, unsigned int * msrwords) 192*2ec1df41SThomas Gleixner { 193*2ec1df41SThomas Gleixner unsigned lo, hi; 194*2ec1df41SThomas Gleixner 195*2ec1df41SThomas Gleixner rdmsr(msr, lo, hi); 196*2ec1df41SThomas Gleixner 197*2ec1df41SThomas Gleixner if (lo != msrwords[0] || hi != msrwords[1]) { 198*2ec1df41SThomas Gleixner if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && 199*2ec1df41SThomas Gleixner boot_cpu_data.x86 == 15 && 200*2ec1df41SThomas Gleixner ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK)) 201*2ec1df41SThomas Gleixner k8_enable_fixed_iorrs(); 202*2ec1df41SThomas Gleixner mtrr_wrmsr(msr, msrwords[0], msrwords[1]); 203*2ec1df41SThomas Gleixner *changed = TRUE; 204*2ec1df41SThomas Gleixner } 205*2ec1df41SThomas Gleixner } 206*2ec1df41SThomas Gleixner 207*2ec1df41SThomas Gleixner int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) 208*2ec1df41SThomas Gleixner /* [SUMMARY] Get a free MTRR. 209*2ec1df41SThomas Gleixner <base> The starting (base) address of the region. 210*2ec1df41SThomas Gleixner <size> The size (in bytes) of the region. 211*2ec1df41SThomas Gleixner [RETURNS] The index of the region on success, else -1 on error. 212*2ec1df41SThomas Gleixner */ 213*2ec1df41SThomas Gleixner { 214*2ec1df41SThomas Gleixner int i, max; 215*2ec1df41SThomas Gleixner mtrr_type ltype; 216*2ec1df41SThomas Gleixner unsigned long lbase, lsize; 217*2ec1df41SThomas Gleixner 218*2ec1df41SThomas Gleixner max = num_var_ranges; 219*2ec1df41SThomas Gleixner if (replace_reg >= 0 && replace_reg < max) 220*2ec1df41SThomas Gleixner return replace_reg; 221*2ec1df41SThomas Gleixner for (i = 0; i < max; ++i) { 222*2ec1df41SThomas Gleixner mtrr_if->get(i, &lbase, &lsize, <ype); 223*2ec1df41SThomas Gleixner if (lsize == 0) 224*2ec1df41SThomas Gleixner return i; 225*2ec1df41SThomas Gleixner } 226*2ec1df41SThomas Gleixner return -ENOSPC; 227*2ec1df41SThomas Gleixner } 228*2ec1df41SThomas Gleixner 229*2ec1df41SThomas Gleixner static void generic_get_mtrr(unsigned int reg, unsigned long *base, 230*2ec1df41SThomas Gleixner unsigned long *size, mtrr_type *type) 231*2ec1df41SThomas Gleixner { 232*2ec1df41SThomas Gleixner unsigned int mask_lo, mask_hi, base_lo, base_hi; 233*2ec1df41SThomas Gleixner 234*2ec1df41SThomas Gleixner rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); 235*2ec1df41SThomas Gleixner if ((mask_lo & 0x800) == 0) { 236*2ec1df41SThomas Gleixner /* Invalid (i.e. free) range */ 237*2ec1df41SThomas Gleixner *base = 0; 238*2ec1df41SThomas Gleixner *size = 0; 239*2ec1df41SThomas Gleixner *type = 0; 240*2ec1df41SThomas Gleixner return; 241*2ec1df41SThomas Gleixner } 242*2ec1df41SThomas Gleixner 243*2ec1df41SThomas Gleixner rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); 244*2ec1df41SThomas Gleixner 245*2ec1df41SThomas Gleixner /* Work out the shifted address mask. */ 246*2ec1df41SThomas Gleixner mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT) 247*2ec1df41SThomas Gleixner | mask_lo >> PAGE_SHIFT; 248*2ec1df41SThomas Gleixner 249*2ec1df41SThomas Gleixner /* This works correctly if size is a power of two, i.e. a 250*2ec1df41SThomas Gleixner contiguous range. */ 251*2ec1df41SThomas Gleixner *size = -mask_lo; 252*2ec1df41SThomas Gleixner *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; 253*2ec1df41SThomas Gleixner *type = base_lo & 0xff; 254*2ec1df41SThomas Gleixner } 255*2ec1df41SThomas Gleixner 256*2ec1df41SThomas Gleixner /** 257*2ec1df41SThomas Gleixner * Checks and updates the fixed-range MTRRs if they differ from the saved set 258*2ec1df41SThomas Gleixner * \param frs pointer to fixed-range MTRR values, saved by get_fixed_ranges() 259*2ec1df41SThomas Gleixner */ 260*2ec1df41SThomas Gleixner static int set_fixed_ranges(mtrr_type * frs) 261*2ec1df41SThomas Gleixner { 262*2ec1df41SThomas Gleixner unsigned long long *saved = (unsigned long long *) frs; 263*2ec1df41SThomas Gleixner int changed = FALSE; 264*2ec1df41SThomas Gleixner int block=-1, range; 265*2ec1df41SThomas Gleixner 266*2ec1df41SThomas Gleixner while (fixed_range_blocks[++block].ranges) 267*2ec1df41SThomas Gleixner for (range=0; range < fixed_range_blocks[block].ranges; range++) 268*2ec1df41SThomas Gleixner set_fixed_range(fixed_range_blocks[block].base_msr + range, 269*2ec1df41SThomas Gleixner &changed, (unsigned int *) saved++); 270*2ec1df41SThomas Gleixner 271*2ec1df41SThomas Gleixner return changed; 272*2ec1df41SThomas Gleixner } 273*2ec1df41SThomas Gleixner 274*2ec1df41SThomas Gleixner /* Set the MSR pair relating to a var range. Returns TRUE if 275*2ec1df41SThomas Gleixner changes are made */ 276*2ec1df41SThomas Gleixner static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) 277*2ec1df41SThomas Gleixner { 278*2ec1df41SThomas Gleixner unsigned int lo, hi; 279*2ec1df41SThomas Gleixner int changed = FALSE; 280*2ec1df41SThomas Gleixner 281*2ec1df41SThomas Gleixner rdmsr(MTRRphysBase_MSR(index), lo, hi); 282*2ec1df41SThomas Gleixner if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) 283*2ec1df41SThomas Gleixner || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != 284*2ec1df41SThomas Gleixner (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { 285*2ec1df41SThomas Gleixner mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); 286*2ec1df41SThomas Gleixner changed = TRUE; 287*2ec1df41SThomas Gleixner } 288*2ec1df41SThomas Gleixner 289*2ec1df41SThomas Gleixner rdmsr(MTRRphysMask_MSR(index), lo, hi); 290*2ec1df41SThomas Gleixner 291*2ec1df41SThomas Gleixner if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL) 292*2ec1df41SThomas Gleixner || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != 293*2ec1df41SThomas Gleixner (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { 294*2ec1df41SThomas Gleixner mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); 295*2ec1df41SThomas Gleixner changed = TRUE; 296*2ec1df41SThomas Gleixner } 297*2ec1df41SThomas Gleixner return changed; 298*2ec1df41SThomas Gleixner } 299*2ec1df41SThomas Gleixner 300*2ec1df41SThomas Gleixner static u32 deftype_lo, deftype_hi; 301*2ec1df41SThomas Gleixner 302*2ec1df41SThomas Gleixner static unsigned long set_mtrr_state(void) 303*2ec1df41SThomas Gleixner /* [SUMMARY] Set the MTRR state for this CPU. 304*2ec1df41SThomas Gleixner <state> The MTRR state information to read. 305*2ec1df41SThomas Gleixner <ctxt> Some relevant CPU context. 306*2ec1df41SThomas Gleixner [NOTE] The CPU must already be in a safe state for MTRR changes. 307*2ec1df41SThomas Gleixner [RETURNS] 0 if no changes made, else a mask indication what was changed. 308*2ec1df41SThomas Gleixner */ 309*2ec1df41SThomas Gleixner { 310*2ec1df41SThomas Gleixner unsigned int i; 311*2ec1df41SThomas Gleixner unsigned long change_mask = 0; 312*2ec1df41SThomas Gleixner 313*2ec1df41SThomas Gleixner for (i = 0; i < num_var_ranges; i++) 314*2ec1df41SThomas Gleixner if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) 315*2ec1df41SThomas Gleixner change_mask |= MTRR_CHANGE_MASK_VARIABLE; 316*2ec1df41SThomas Gleixner 317*2ec1df41SThomas Gleixner if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) 318*2ec1df41SThomas Gleixner change_mask |= MTRR_CHANGE_MASK_FIXED; 319*2ec1df41SThomas Gleixner 320*2ec1df41SThomas Gleixner /* Set_mtrr_restore restores the old value of MTRRdefType, 321*2ec1df41SThomas Gleixner so to set it we fiddle with the saved value */ 322*2ec1df41SThomas Gleixner if ((deftype_lo & 0xff) != mtrr_state.def_type 323*2ec1df41SThomas Gleixner || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { 324*2ec1df41SThomas Gleixner deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10); 325*2ec1df41SThomas Gleixner change_mask |= MTRR_CHANGE_MASK_DEFTYPE; 326*2ec1df41SThomas Gleixner } 327*2ec1df41SThomas Gleixner 328*2ec1df41SThomas Gleixner return change_mask; 329*2ec1df41SThomas Gleixner } 330*2ec1df41SThomas Gleixner 331*2ec1df41SThomas Gleixner 332*2ec1df41SThomas Gleixner static unsigned long cr4 = 0; 333*2ec1df41SThomas Gleixner static DEFINE_SPINLOCK(set_atomicity_lock); 334*2ec1df41SThomas Gleixner 335*2ec1df41SThomas Gleixner /* 336*2ec1df41SThomas Gleixner * Since we are disabling the cache don't allow any interrupts - they 337*2ec1df41SThomas Gleixner * would run extremely slow and would only increase the pain. The caller must 338*2ec1df41SThomas Gleixner * ensure that local interrupts are disabled and are reenabled after post_set() 339*2ec1df41SThomas Gleixner * has been called. 340*2ec1df41SThomas Gleixner */ 341*2ec1df41SThomas Gleixner 342*2ec1df41SThomas Gleixner static void prepare_set(void) __acquires(set_atomicity_lock) 343*2ec1df41SThomas Gleixner { 344*2ec1df41SThomas Gleixner unsigned long cr0; 345*2ec1df41SThomas Gleixner 346*2ec1df41SThomas Gleixner /* Note that this is not ideal, since the cache is only flushed/disabled 347*2ec1df41SThomas Gleixner for this CPU while the MTRRs are changed, but changing this requires 348*2ec1df41SThomas Gleixner more invasive changes to the way the kernel boots */ 349*2ec1df41SThomas Gleixner 350*2ec1df41SThomas Gleixner spin_lock(&set_atomicity_lock); 351*2ec1df41SThomas Gleixner 352*2ec1df41SThomas Gleixner /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ 353*2ec1df41SThomas Gleixner cr0 = read_cr0() | 0x40000000; /* set CD flag */ 354*2ec1df41SThomas Gleixner write_cr0(cr0); 355*2ec1df41SThomas Gleixner wbinvd(); 356*2ec1df41SThomas Gleixner 357*2ec1df41SThomas Gleixner /* Save value of CR4 and clear Page Global Enable (bit 7) */ 358*2ec1df41SThomas Gleixner if ( cpu_has_pge ) { 359*2ec1df41SThomas Gleixner cr4 = read_cr4(); 360*2ec1df41SThomas Gleixner write_cr4(cr4 & ~X86_CR4_PGE); 361*2ec1df41SThomas Gleixner } 362*2ec1df41SThomas Gleixner 363*2ec1df41SThomas Gleixner /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ 364*2ec1df41SThomas Gleixner __flush_tlb(); 365*2ec1df41SThomas Gleixner 366*2ec1df41SThomas Gleixner /* Save MTRR state */ 367*2ec1df41SThomas Gleixner rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); 368*2ec1df41SThomas Gleixner 369*2ec1df41SThomas Gleixner /* Disable MTRRs, and set the default type to uncached */ 370*2ec1df41SThomas Gleixner mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi); 371*2ec1df41SThomas Gleixner } 372*2ec1df41SThomas Gleixner 373*2ec1df41SThomas Gleixner static void post_set(void) __releases(set_atomicity_lock) 374*2ec1df41SThomas Gleixner { 375*2ec1df41SThomas Gleixner /* Flush TLBs (no need to flush caches - they are disabled) */ 376*2ec1df41SThomas Gleixner __flush_tlb(); 377*2ec1df41SThomas Gleixner 378*2ec1df41SThomas Gleixner /* Intel (P6) standard MTRRs */ 379*2ec1df41SThomas Gleixner mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); 380*2ec1df41SThomas Gleixner 381*2ec1df41SThomas Gleixner /* Enable caches */ 382*2ec1df41SThomas Gleixner write_cr0(read_cr0() & 0xbfffffff); 383*2ec1df41SThomas Gleixner 384*2ec1df41SThomas Gleixner /* Restore value of CR4 */ 385*2ec1df41SThomas Gleixner if ( cpu_has_pge ) 386*2ec1df41SThomas Gleixner write_cr4(cr4); 387*2ec1df41SThomas Gleixner spin_unlock(&set_atomicity_lock); 388*2ec1df41SThomas Gleixner } 389*2ec1df41SThomas Gleixner 390*2ec1df41SThomas Gleixner static void generic_set_all(void) 391*2ec1df41SThomas Gleixner { 392*2ec1df41SThomas Gleixner unsigned long mask, count; 393*2ec1df41SThomas Gleixner unsigned long flags; 394*2ec1df41SThomas Gleixner 395*2ec1df41SThomas Gleixner local_irq_save(flags); 396*2ec1df41SThomas Gleixner prepare_set(); 397*2ec1df41SThomas Gleixner 398*2ec1df41SThomas Gleixner /* Actually set the state */ 399*2ec1df41SThomas Gleixner mask = set_mtrr_state(); 400*2ec1df41SThomas Gleixner 401*2ec1df41SThomas Gleixner post_set(); 402*2ec1df41SThomas Gleixner local_irq_restore(flags); 403*2ec1df41SThomas Gleixner 404*2ec1df41SThomas Gleixner /* Use the atomic bitops to update the global mask */ 405*2ec1df41SThomas Gleixner for (count = 0; count < sizeof mask * 8; ++count) { 406*2ec1df41SThomas Gleixner if (mask & 0x01) 407*2ec1df41SThomas Gleixner set_bit(count, &smp_changes_mask); 408*2ec1df41SThomas Gleixner mask >>= 1; 409*2ec1df41SThomas Gleixner } 410*2ec1df41SThomas Gleixner 411*2ec1df41SThomas Gleixner } 412*2ec1df41SThomas Gleixner 413*2ec1df41SThomas Gleixner static void generic_set_mtrr(unsigned int reg, unsigned long base, 414*2ec1df41SThomas Gleixner unsigned long size, mtrr_type type) 415*2ec1df41SThomas Gleixner /* [SUMMARY] Set variable MTRR register on the local CPU. 416*2ec1df41SThomas Gleixner <reg> The register to set. 417*2ec1df41SThomas Gleixner <base> The base address of the region. 418*2ec1df41SThomas Gleixner <size> The size of the region. If this is 0 the region is disabled. 419*2ec1df41SThomas Gleixner <type> The type of the region. 420*2ec1df41SThomas Gleixner <do_safe> If TRUE, do the change safely. If FALSE, safety measures should 421*2ec1df41SThomas Gleixner be done externally. 422*2ec1df41SThomas Gleixner [RETURNS] Nothing. 423*2ec1df41SThomas Gleixner */ 424*2ec1df41SThomas Gleixner { 425*2ec1df41SThomas Gleixner unsigned long flags; 426*2ec1df41SThomas Gleixner struct mtrr_var_range *vr; 427*2ec1df41SThomas Gleixner 428*2ec1df41SThomas Gleixner vr = &mtrr_state.var_ranges[reg]; 429*2ec1df41SThomas Gleixner 430*2ec1df41SThomas Gleixner local_irq_save(flags); 431*2ec1df41SThomas Gleixner prepare_set(); 432*2ec1df41SThomas Gleixner 433*2ec1df41SThomas Gleixner if (size == 0) { 434*2ec1df41SThomas Gleixner /* The invalid bit is kept in the mask, so we simply clear the 435*2ec1df41SThomas Gleixner relevant mask register to disable a range. */ 436*2ec1df41SThomas Gleixner mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); 437*2ec1df41SThomas Gleixner memset(vr, 0, sizeof(struct mtrr_var_range)); 438*2ec1df41SThomas Gleixner } else { 439*2ec1df41SThomas Gleixner vr->base_lo = base << PAGE_SHIFT | type; 440*2ec1df41SThomas Gleixner vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT); 441*2ec1df41SThomas Gleixner vr->mask_lo = -size << PAGE_SHIFT | 0x800; 442*2ec1df41SThomas Gleixner vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT); 443*2ec1df41SThomas Gleixner 444*2ec1df41SThomas Gleixner mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi); 445*2ec1df41SThomas Gleixner mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi); 446*2ec1df41SThomas Gleixner } 447*2ec1df41SThomas Gleixner 448*2ec1df41SThomas Gleixner post_set(); 449*2ec1df41SThomas Gleixner local_irq_restore(flags); 450*2ec1df41SThomas Gleixner } 451*2ec1df41SThomas Gleixner 452*2ec1df41SThomas Gleixner int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type) 453*2ec1df41SThomas Gleixner { 454*2ec1df41SThomas Gleixner unsigned long lbase, last; 455*2ec1df41SThomas Gleixner 456*2ec1df41SThomas Gleixner /* For Intel PPro stepping <= 7, must be 4 MiB aligned 457*2ec1df41SThomas Gleixner and not touch 0x70000000->0x7003FFFF */ 458*2ec1df41SThomas Gleixner if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && 459*2ec1df41SThomas Gleixner boot_cpu_data.x86_model == 1 && 460*2ec1df41SThomas Gleixner boot_cpu_data.x86_mask <= 7) { 461*2ec1df41SThomas Gleixner if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { 462*2ec1df41SThomas Gleixner printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); 463*2ec1df41SThomas Gleixner return -EINVAL; 464*2ec1df41SThomas Gleixner } 465*2ec1df41SThomas Gleixner if (!(base + size < 0x70000 || base > 0x7003F) && 466*2ec1df41SThomas Gleixner (type == MTRR_TYPE_WRCOMB 467*2ec1df41SThomas Gleixner || type == MTRR_TYPE_WRBACK)) { 468*2ec1df41SThomas Gleixner printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); 469*2ec1df41SThomas Gleixner return -EINVAL; 470*2ec1df41SThomas Gleixner } 471*2ec1df41SThomas Gleixner } 472*2ec1df41SThomas Gleixner 473*2ec1df41SThomas Gleixner /* Check upper bits of base and last are equal and lower bits are 0 474*2ec1df41SThomas Gleixner for base and 1 for last */ 475*2ec1df41SThomas Gleixner last = base + size - 1; 476*2ec1df41SThomas Gleixner for (lbase = base; !(lbase & 1) && (last & 1); 477*2ec1df41SThomas Gleixner lbase = lbase >> 1, last = last >> 1) ; 478*2ec1df41SThomas Gleixner if (lbase != last) { 479*2ec1df41SThomas Gleixner printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", 480*2ec1df41SThomas Gleixner base, size); 481*2ec1df41SThomas Gleixner return -EINVAL; 482*2ec1df41SThomas Gleixner } 483*2ec1df41SThomas Gleixner return 0; 484*2ec1df41SThomas Gleixner } 485*2ec1df41SThomas Gleixner 486*2ec1df41SThomas Gleixner 487*2ec1df41SThomas Gleixner static int generic_have_wrcomb(void) 488*2ec1df41SThomas Gleixner { 489*2ec1df41SThomas Gleixner unsigned long config, dummy; 490*2ec1df41SThomas Gleixner rdmsr(MTRRcap_MSR, config, dummy); 491*2ec1df41SThomas Gleixner return (config & (1 << 10)); 492*2ec1df41SThomas Gleixner } 493*2ec1df41SThomas Gleixner 494*2ec1df41SThomas Gleixner int positive_have_wrcomb(void) 495*2ec1df41SThomas Gleixner { 496*2ec1df41SThomas Gleixner return 1; 497*2ec1df41SThomas Gleixner } 498*2ec1df41SThomas Gleixner 499*2ec1df41SThomas Gleixner /* generic structure... 500*2ec1df41SThomas Gleixner */ 501*2ec1df41SThomas Gleixner struct mtrr_ops generic_mtrr_ops = { 502*2ec1df41SThomas Gleixner .use_intel_if = 1, 503*2ec1df41SThomas Gleixner .set_all = generic_set_all, 504*2ec1df41SThomas Gleixner .get = generic_get_mtrr, 505*2ec1df41SThomas Gleixner .get_free_region = generic_get_free_region, 506*2ec1df41SThomas Gleixner .set = generic_set_mtrr, 507*2ec1df41SThomas Gleixner .validate_add_page = generic_validate_add_page, 508*2ec1df41SThomas Gleixner .have_wrcomb = generic_have_wrcomb, 509*2ec1df41SThomas Gleixner }; 510