1 /* 2 * Shared support code for AMD K8 northbridges and derivates. 3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2. 4 */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <linux/types.h> 9 #include <linux/slab.h> 10 #include <linux/init.h> 11 #include <linux/errno.h> 12 #include <linux/module.h> 13 #include <linux/spinlock.h> 14 #include <asm/amd_nb.h> 15 16 static u32 *flush_words; 17 18 const struct pci_device_id amd_nb_misc_ids[] = { 19 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 21 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, 22 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, 23 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, 24 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) }, 25 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 26 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 27 {} 28 }; 29 EXPORT_SYMBOL(amd_nb_misc_ids); 30 31 static const struct pci_device_id amd_nb_link_ids[] = { 32 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, 33 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) }, 34 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) }, 35 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, 36 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, 37 {} 38 }; 39 40 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { 41 { 0x00, 0x18, 0x20 }, 42 { 0xff, 0x00, 0x20 }, 43 { 0xfe, 0x00, 0x20 }, 44 { } 45 }; 46 47 struct amd_northbridge_info amd_northbridges; 48 EXPORT_SYMBOL(amd_northbridges); 49 50 static struct pci_dev *next_northbridge(struct pci_dev *dev, 51 const struct pci_device_id *ids) 52 { 53 do { 54 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); 55 if (!dev) 56 break; 57 } while (!pci_match_id(ids, dev)); 58 return dev; 59 } 60 61 int amd_cache_northbridges(void) 62 { 63 u16 i = 0; 64 struct amd_northbridge *nb; 65 struct pci_dev *misc, *link; 66 67 if (amd_nb_num()) 68 return 0; 69 70 misc = NULL; 71 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL) 72 i++; 73 74 if (i == 0) 75 return 0; 76 77 nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL); 78 if (!nb) 79 return -ENOMEM; 80 81 amd_northbridges.nb = nb; 82 amd_northbridges.num = i; 83 84 link = misc = NULL; 85 for (i = 0; i != amd_nb_num(); i++) { 86 node_to_amd_nb(i)->misc = misc = 87 next_northbridge(misc, amd_nb_misc_ids); 88 node_to_amd_nb(i)->link = link = 89 next_northbridge(link, amd_nb_link_ids); 90 } 91 92 if (amd_gart_present()) 93 amd_northbridges.flags |= AMD_NB_GART; 94 95 /* 96 * Check for L3 cache presence. 97 */ 98 if (!cpuid_edx(0x80000006)) 99 return 0; 100 101 /* 102 * Some CPU families support L3 Cache Index Disable. There are some 103 * limitations because of E382 and E388 on family 0x10. 104 */ 105 if (boot_cpu_data.x86 == 0x10 && 106 boot_cpu_data.x86_model >= 0x8 && 107 (boot_cpu_data.x86_model > 0x9 || 108 boot_cpu_data.x86_mask >= 0x1)) 109 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 110 111 if (boot_cpu_data.x86 == 0x15) 112 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 113 114 /* L3 cache partitioning is supported on family 0x15 */ 115 if (boot_cpu_data.x86 == 0x15) 116 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; 117 118 return 0; 119 } 120 EXPORT_SYMBOL_GPL(amd_cache_northbridges); 121 122 /* 123 * Ignores subdevice/subvendor but as far as I can figure out 124 * they're useless anyways 125 */ 126 bool __init early_is_amd_nb(u32 device) 127 { 128 const struct pci_device_id *id; 129 u32 vendor = device & 0xffff; 130 131 device >>= 16; 132 for (id = amd_nb_misc_ids; id->vendor; id++) 133 if (vendor == id->vendor && device == id->device) 134 return true; 135 return false; 136 } 137 138 struct resource *amd_get_mmconfig_range(struct resource *res) 139 { 140 u32 address; 141 u64 base, msr; 142 unsigned segn_busn_bits; 143 144 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 145 return NULL; 146 147 /* assume all cpus from fam10h have mmconfig */ 148 if (boot_cpu_data.x86 < 0x10) 149 return NULL; 150 151 address = MSR_FAM10H_MMIO_CONF_BASE; 152 rdmsrl(address, msr); 153 154 /* mmconfig is not enabled */ 155 if (!(msr & FAM10H_MMIO_CONF_ENABLE)) 156 return NULL; 157 158 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT); 159 160 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & 161 FAM10H_MMIO_CONF_BUSRANGE_MASK; 162 163 res->flags = IORESOURCE_MEM; 164 res->start = base; 165 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1; 166 return res; 167 } 168 169 int amd_get_subcaches(int cpu) 170 { 171 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link; 172 unsigned int mask; 173 174 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) 175 return 0; 176 177 pci_read_config_dword(link, 0x1d4, &mask); 178 179 return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf; 180 } 181 182 int amd_set_subcaches(int cpu, unsigned long mask) 183 { 184 static unsigned int reset, ban; 185 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu)); 186 unsigned int reg; 187 int cuid; 188 189 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) 190 return -EINVAL; 191 192 /* if necessary, collect reset state of L3 partitioning and BAN mode */ 193 if (reset == 0) { 194 pci_read_config_dword(nb->link, 0x1d4, &reset); 195 pci_read_config_dword(nb->misc, 0x1b8, &ban); 196 ban &= 0x180000; 197 } 198 199 /* deactivate BAN mode if any subcaches are to be disabled */ 200 if (mask != 0xf) { 201 pci_read_config_dword(nb->misc, 0x1b8, ®); 202 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); 203 } 204 205 cuid = cpu_data(cpu).cpu_core_id; 206 mask <<= 4 * cuid; 207 mask |= (0xf ^ (1 << cuid)) << 26; 208 209 pci_write_config_dword(nb->link, 0x1d4, mask); 210 211 /* reset BAN mode if L3 partitioning returned to reset state */ 212 pci_read_config_dword(nb->link, 0x1d4, ®); 213 if (reg == reset) { 214 pci_read_config_dword(nb->misc, 0x1b8, ®); 215 reg &= ~0x180000; 216 pci_write_config_dword(nb->misc, 0x1b8, reg | ban); 217 } 218 219 return 0; 220 } 221 222 static int amd_cache_gart(void) 223 { 224 u16 i; 225 226 if (!amd_nb_has_feature(AMD_NB_GART)) 227 return 0; 228 229 flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL); 230 if (!flush_words) { 231 amd_northbridges.flags &= ~AMD_NB_GART; 232 return -ENOMEM; 233 } 234 235 for (i = 0; i != amd_nb_num(); i++) 236 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, 237 &flush_words[i]); 238 239 return 0; 240 } 241 242 void amd_flush_garts(void) 243 { 244 int flushed, i; 245 unsigned long flags; 246 static DEFINE_SPINLOCK(gart_lock); 247 248 if (!amd_nb_has_feature(AMD_NB_GART)) 249 return; 250 251 /* Avoid races between AGP and IOMMU. In theory it's not needed 252 but I'm not sure if the hardware won't lose flush requests 253 when another is pending. This whole thing is so expensive anyways 254 that it doesn't matter to serialize more. -AK */ 255 spin_lock_irqsave(&gart_lock, flags); 256 flushed = 0; 257 for (i = 0; i < amd_nb_num(); i++) { 258 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c, 259 flush_words[i] | 1); 260 flushed++; 261 } 262 for (i = 0; i < amd_nb_num(); i++) { 263 u32 w; 264 /* Make sure the hardware actually executed the flush*/ 265 for (;;) { 266 pci_read_config_dword(node_to_amd_nb(i)->misc, 267 0x9c, &w); 268 if (!(w & 1)) 269 break; 270 cpu_relax(); 271 } 272 } 273 spin_unlock_irqrestore(&gart_lock, flags); 274 if (!flushed) 275 pr_notice("nothing to flush?\n"); 276 } 277 EXPORT_SYMBOL_GPL(amd_flush_garts); 278 279 static __init int init_amd_nbs(void) 280 { 281 int err = 0; 282 283 err = amd_cache_northbridges(); 284 285 if (err < 0) 286 pr_notice("Cannot enumerate AMD northbridges\n"); 287 288 if (amd_cache_gart() < 0) 289 pr_notice("Cannot initialize GART flush words, GART support disabled\n"); 290 291 return err; 292 } 293 294 /* This has to go after the PCI subsystem */ 295 fs_initcall(init_amd_nbs); 296