1 /* 2 * Shared support code for AMD K8 northbridges and derivates. 3 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2. 4 */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <linux/types.h> 9 #include <linux/slab.h> 10 #include <linux/init.h> 11 #include <linux/errno.h> 12 #include <linux/export.h> 13 #include <linux/spinlock.h> 14 #include <asm/amd_nb.h> 15 16 #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 17 #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 18 #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 19 20 /* Protect the PCI config register pairs used for SMN and DF indirect access. */ 21 static DEFINE_MUTEX(smn_mutex); 22 23 static u32 *flush_words; 24 25 static const struct pci_device_id amd_root_ids[] = { 26 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, 27 {} 28 }; 29 30 #define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704 31 32 const struct pci_device_id amd_nb_misc_ids[] = { 33 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 34 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 35 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, 36 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, 37 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, 38 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) }, 39 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 40 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 41 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, 42 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, 43 {} 44 }; 45 EXPORT_SYMBOL_GPL(amd_nb_misc_ids); 46 47 static const struct pci_device_id amd_nb_link_ids[] = { 48 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, 49 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) }, 50 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) }, 51 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, 52 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, 53 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, 54 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, 55 {} 56 }; 57 58 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { 59 { 0x00, 0x18, 0x20 }, 60 { 0xff, 0x00, 0x20 }, 61 { 0xfe, 0x00, 0x20 }, 62 { } 63 }; 64 65 static struct amd_northbridge_info amd_northbridges; 66 67 u16 amd_nb_num(void) 68 { 69 return amd_northbridges.num; 70 } 71 EXPORT_SYMBOL_GPL(amd_nb_num); 72 73 bool amd_nb_has_feature(unsigned int feature) 74 { 75 return ((amd_northbridges.flags & feature) == feature); 76 } 77 EXPORT_SYMBOL_GPL(amd_nb_has_feature); 78 79 struct amd_northbridge *node_to_amd_nb(int node) 80 { 81 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; 82 } 83 EXPORT_SYMBOL_GPL(node_to_amd_nb); 84 85 static struct pci_dev *next_northbridge(struct pci_dev *dev, 86 const struct pci_device_id *ids) 87 { 88 do { 89 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); 90 if (!dev) 91 break; 92 } while (!pci_match_id(ids, dev)); 93 return dev; 94 } 95 96 static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write) 97 { 98 struct pci_dev *root; 99 int err = -ENODEV; 100 101 if (node >= amd_northbridges.num) 102 goto out; 103 104 root = node_to_amd_nb(node)->root; 105 if (!root) 106 goto out; 107 108 mutex_lock(&smn_mutex); 109 110 err = pci_write_config_dword(root, 0x60, address); 111 if (err) { 112 pr_warn("Error programming SMN address 0x%x.\n", address); 113 goto out_unlock; 114 } 115 116 err = (write ? pci_write_config_dword(root, 0x64, *value) 117 : pci_read_config_dword(root, 0x64, value)); 118 if (err) 119 pr_warn("Error %s SMN address 0x%x.\n", 120 (write ? "writing to" : "reading from"), address); 121 122 out_unlock: 123 mutex_unlock(&smn_mutex); 124 125 out: 126 return err; 127 } 128 129 int amd_smn_read(u16 node, u32 address, u32 *value) 130 { 131 return __amd_smn_rw(node, address, value, false); 132 } 133 EXPORT_SYMBOL_GPL(amd_smn_read); 134 135 int amd_smn_write(u16 node, u32 address, u32 value) 136 { 137 return __amd_smn_rw(node, address, &value, true); 138 } 139 EXPORT_SYMBOL_GPL(amd_smn_write); 140 141 /* 142 * Data Fabric Indirect Access uses FICAA/FICAD. 143 * 144 * Fabric Indirect Configuration Access Address (FICAA): Constructed based 145 * on the device's Instance Id and the PCI function and register offset of 146 * the desired register. 147 * 148 * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO 149 * and FICAD HI registers but so far we only need the LO register. 150 */ 151 int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo) 152 { 153 struct pci_dev *F4; 154 u32 ficaa; 155 int err = -ENODEV; 156 157 if (node >= amd_northbridges.num) 158 goto out; 159 160 F4 = node_to_amd_nb(node)->link; 161 if (!F4) 162 goto out; 163 164 ficaa = 1; 165 ficaa |= reg & 0x3FC; 166 ficaa |= (func & 0x7) << 11; 167 ficaa |= instance_id << 16; 168 169 mutex_lock(&smn_mutex); 170 171 err = pci_write_config_dword(F4, 0x5C, ficaa); 172 if (err) { 173 pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa); 174 goto out_unlock; 175 } 176 177 err = pci_read_config_dword(F4, 0x98, lo); 178 if (err) 179 pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa); 180 181 out_unlock: 182 mutex_unlock(&smn_mutex); 183 184 out: 185 return err; 186 } 187 EXPORT_SYMBOL_GPL(amd_df_indirect_read); 188 189 int amd_cache_northbridges(void) 190 { 191 u16 i = 0; 192 struct amd_northbridge *nb; 193 struct pci_dev *root, *misc, *link; 194 195 if (amd_northbridges.num) 196 return 0; 197 198 misc = NULL; 199 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL) 200 i++; 201 202 if (!i) 203 return -ENODEV; 204 205 nb = kcalloc(i, sizeof(struct amd_northbridge), GFP_KERNEL); 206 if (!nb) 207 return -ENOMEM; 208 209 amd_northbridges.nb = nb; 210 amd_northbridges.num = i; 211 212 link = misc = root = NULL; 213 for (i = 0; i != amd_northbridges.num; i++) { 214 node_to_amd_nb(i)->root = root = 215 next_northbridge(root, amd_root_ids); 216 node_to_amd_nb(i)->misc = misc = 217 next_northbridge(misc, amd_nb_misc_ids); 218 node_to_amd_nb(i)->link = link = 219 next_northbridge(link, amd_nb_link_ids); 220 } 221 222 if (amd_gart_present()) 223 amd_northbridges.flags |= AMD_NB_GART; 224 225 /* 226 * Check for L3 cache presence. 227 */ 228 if (!cpuid_edx(0x80000006)) 229 return 0; 230 231 /* 232 * Some CPU families support L3 Cache Index Disable. There are some 233 * limitations because of E382 and E388 on family 0x10. 234 */ 235 if (boot_cpu_data.x86 == 0x10 && 236 boot_cpu_data.x86_model >= 0x8 && 237 (boot_cpu_data.x86_model > 0x9 || 238 boot_cpu_data.x86_mask >= 0x1)) 239 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 240 241 if (boot_cpu_data.x86 == 0x15) 242 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 243 244 /* L3 cache partitioning is supported on family 0x15 */ 245 if (boot_cpu_data.x86 == 0x15) 246 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; 247 248 return 0; 249 } 250 EXPORT_SYMBOL_GPL(amd_cache_northbridges); 251 252 /* 253 * Ignores subdevice/subvendor but as far as I can figure out 254 * they're useless anyways 255 */ 256 bool __init early_is_amd_nb(u32 device) 257 { 258 const struct pci_device_id *id; 259 u32 vendor = device & 0xffff; 260 261 device >>= 16; 262 for (id = amd_nb_misc_ids; id->vendor; id++) 263 if (vendor == id->vendor && device == id->device) 264 return true; 265 return false; 266 } 267 268 struct resource *amd_get_mmconfig_range(struct resource *res) 269 { 270 u32 address; 271 u64 base, msr; 272 unsigned int segn_busn_bits; 273 274 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) 275 return NULL; 276 277 /* assume all cpus from fam10h have mmconfig */ 278 if (boot_cpu_data.x86 < 0x10) 279 return NULL; 280 281 address = MSR_FAM10H_MMIO_CONF_BASE; 282 rdmsrl(address, msr); 283 284 /* mmconfig is not enabled */ 285 if (!(msr & FAM10H_MMIO_CONF_ENABLE)) 286 return NULL; 287 288 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT); 289 290 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & 291 FAM10H_MMIO_CONF_BUSRANGE_MASK; 292 293 res->flags = IORESOURCE_MEM; 294 res->start = base; 295 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1; 296 return res; 297 } 298 299 int amd_get_subcaches(int cpu) 300 { 301 struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link; 302 unsigned int mask; 303 304 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) 305 return 0; 306 307 pci_read_config_dword(link, 0x1d4, &mask); 308 309 return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf; 310 } 311 312 int amd_set_subcaches(int cpu, unsigned long mask) 313 { 314 static unsigned int reset, ban; 315 struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu)); 316 unsigned int reg; 317 int cuid; 318 319 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) 320 return -EINVAL; 321 322 /* if necessary, collect reset state of L3 partitioning and BAN mode */ 323 if (reset == 0) { 324 pci_read_config_dword(nb->link, 0x1d4, &reset); 325 pci_read_config_dword(nb->misc, 0x1b8, &ban); 326 ban &= 0x180000; 327 } 328 329 /* deactivate BAN mode if any subcaches are to be disabled */ 330 if (mask != 0xf) { 331 pci_read_config_dword(nb->misc, 0x1b8, ®); 332 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); 333 } 334 335 cuid = cpu_data(cpu).cpu_core_id; 336 mask <<= 4 * cuid; 337 mask |= (0xf ^ (1 << cuid)) << 26; 338 339 pci_write_config_dword(nb->link, 0x1d4, mask); 340 341 /* reset BAN mode if L3 partitioning returned to reset state */ 342 pci_read_config_dword(nb->link, 0x1d4, ®); 343 if (reg == reset) { 344 pci_read_config_dword(nb->misc, 0x1b8, ®); 345 reg &= ~0x180000; 346 pci_write_config_dword(nb->misc, 0x1b8, reg | ban); 347 } 348 349 return 0; 350 } 351 352 static void amd_cache_gart(void) 353 { 354 u16 i; 355 356 if (!amd_nb_has_feature(AMD_NB_GART)) 357 return; 358 359 flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL); 360 if (!flush_words) { 361 amd_northbridges.flags &= ~AMD_NB_GART; 362 pr_notice("Cannot initialize GART flush words, GART support disabled\n"); 363 return; 364 } 365 366 for (i = 0; i != amd_northbridges.num; i++) 367 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]); 368 } 369 370 void amd_flush_garts(void) 371 { 372 int flushed, i; 373 unsigned long flags; 374 static DEFINE_SPINLOCK(gart_lock); 375 376 if (!amd_nb_has_feature(AMD_NB_GART)) 377 return; 378 379 /* 380 * Avoid races between AGP and IOMMU. In theory it's not needed 381 * but I'm not sure if the hardware won't lose flush requests 382 * when another is pending. This whole thing is so expensive anyways 383 * that it doesn't matter to serialize more. -AK 384 */ 385 spin_lock_irqsave(&gart_lock, flags); 386 flushed = 0; 387 for (i = 0; i < amd_northbridges.num; i++) { 388 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c, 389 flush_words[i] | 1); 390 flushed++; 391 } 392 for (i = 0; i < amd_northbridges.num; i++) { 393 u32 w; 394 /* Make sure the hardware actually executed the flush*/ 395 for (;;) { 396 pci_read_config_dword(node_to_amd_nb(i)->misc, 397 0x9c, &w); 398 if (!(w & 1)) 399 break; 400 cpu_relax(); 401 } 402 } 403 spin_unlock_irqrestore(&gart_lock, flags); 404 if (!flushed) 405 pr_notice("nothing to flush?\n"); 406 } 407 EXPORT_SYMBOL_GPL(amd_flush_garts); 408 409 static void __fix_erratum_688(void *info) 410 { 411 #define MSR_AMD64_IC_CFG 0xC0011021 412 413 msr_set_bit(MSR_AMD64_IC_CFG, 3); 414 msr_set_bit(MSR_AMD64_IC_CFG, 14); 415 } 416 417 /* Apply erratum 688 fix so machines without a BIOS fix work. */ 418 static __init void fix_erratum_688(void) 419 { 420 struct pci_dev *F4; 421 u32 val; 422 423 if (boot_cpu_data.x86 != 0x14) 424 return; 425 426 if (!amd_northbridges.num) 427 return; 428 429 F4 = node_to_amd_nb(0)->link; 430 if (!F4) 431 return; 432 433 if (pci_read_config_dword(F4, 0x164, &val)) 434 return; 435 436 if (val & BIT(2)) 437 return; 438 439 on_each_cpu(__fix_erratum_688, NULL, 0); 440 441 pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n"); 442 } 443 444 static __init int init_amd_nbs(void) 445 { 446 amd_cache_northbridges(); 447 amd_cache_gart(); 448 449 fix_erratum_688(); 450 451 return 0; 452 } 453 454 /* This has to go after the PCI subsystem */ 455 fs_initcall(init_amd_nbs); 456