1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Shared support code for AMD K8 northbridges and derivatives. 4 * Copyright 2006 Andi Kleen, SUSE Labs. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/types.h> 10 #include <linux/slab.h> 11 #include <linux/init.h> 12 #include <linux/errno.h> 13 #include <linux/export.h> 14 #include <linux/spinlock.h> 15 #include <linux/pci_ids.h> 16 #include <asm/amd_nb.h> 17 18 #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 19 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0 20 #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480 21 #define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630 22 #define PCI_DEVICE_ID_AMD_17H_MA0H_ROOT 0x14b5 23 #define PCI_DEVICE_ID_AMD_19H_M10H_ROOT 0x14a4 24 #define PCI_DEVICE_ID_AMD_19H_M60H_ROOT 0x14d8 25 #define PCI_DEVICE_ID_AMD_19H_M70H_ROOT 0x14e8 26 #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 27 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec 28 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494 29 #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c 30 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444 31 #define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4 0x1728 32 #define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654 33 #define PCI_DEVICE_ID_AMD_19H_M10H_DF_F4 0x14b1 34 #define PCI_DEVICE_ID_AMD_19H_M40H_ROOT 0x14b5 35 #define PCI_DEVICE_ID_AMD_19H_M40H_DF_F4 0x167d 36 #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e 37 #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4 0x14e4 38 #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4 39 #define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4 0x12fc 40 41 /* Protect the PCI config register pairs used for SMN. */ 42 static DEFINE_MUTEX(smn_mutex); 43 44 static u32 *flush_words; 45 46 static const struct pci_device_id amd_root_ids[] = { 47 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, 48 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) }, 49 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, 50 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) }, 51 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_ROOT) }, 52 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_ROOT) }, 53 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) }, 54 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_ROOT) }, 55 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) }, 56 {} 57 }; 58 59 #define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704 60 61 static const struct pci_device_id amd_nb_misc_ids[] = { 62 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 63 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 64 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, 65 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, 66 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, 67 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) }, 68 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 69 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 70 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, 71 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, 72 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, 73 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) }, 74 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) }, 75 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, 76 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, 77 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) }, 78 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) }, 79 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) }, 80 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) }, 81 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) }, 82 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) }, 83 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) }, 84 {} 85 }; 86 87 static const struct pci_device_id amd_nb_link_ids[] = { 88 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, 89 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) }, 90 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) }, 91 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, 92 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, 93 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, 94 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) }, 95 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, 96 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) }, 97 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) }, 98 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4) }, 99 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) }, 100 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) }, 101 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) }, 102 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) }, 103 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, 104 {} 105 }; 106 107 static const struct pci_device_id hygon_root_ids[] = { 108 { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) }, 109 {} 110 }; 111 112 static const struct pci_device_id hygon_nb_misc_ids[] = { 113 { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, 114 {} 115 }; 116 117 static const struct pci_device_id hygon_nb_link_ids[] = { 118 { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) }, 119 {} 120 }; 121 122 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { 123 { 0x00, 0x18, 0x20 }, 124 { 0xff, 0x00, 0x20 }, 125 { 0xfe, 0x00, 0x20 }, 126 { } 127 }; 128 129 static struct amd_northbridge_info amd_northbridges; 130 131 u16 amd_nb_num(void) 132 { 133 return amd_northbridges.num; 134 } 135 EXPORT_SYMBOL_GPL(amd_nb_num); 136 137 bool amd_nb_has_feature(unsigned int feature) 138 { 139 return ((amd_northbridges.flags & feature) == feature); 140 } 141 EXPORT_SYMBOL_GPL(amd_nb_has_feature); 142 143 struct amd_northbridge *node_to_amd_nb(int node) 144 { 145 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; 146 } 147 EXPORT_SYMBOL_GPL(node_to_amd_nb); 148 149 static struct pci_dev *next_northbridge(struct pci_dev *dev, 150 const struct pci_device_id *ids) 151 { 152 do { 153 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); 154 if (!dev) 155 break; 156 } while (!pci_match_id(ids, dev)); 157 return dev; 158 } 159 160 static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write) 161 { 162 struct pci_dev *root; 163 int err = -ENODEV; 164 165 if (node >= amd_northbridges.num) 166 goto out; 167 168 root = node_to_amd_nb(node)->root; 169 if (!root) 170 goto out; 171 172 mutex_lock(&smn_mutex); 173 174 err = pci_write_config_dword(root, 0x60, address); 175 if (err) { 176 pr_warn("Error programming SMN address 0x%x.\n", address); 177 goto out_unlock; 178 } 179 180 err = (write ? pci_write_config_dword(root, 0x64, *value) 181 : pci_read_config_dword(root, 0x64, value)); 182 if (err) 183 pr_warn("Error %s SMN address 0x%x.\n", 184 (write ? "writing to" : "reading from"), address); 185 186 out_unlock: 187 mutex_unlock(&smn_mutex); 188 189 out: 190 return err; 191 } 192 193 int amd_smn_read(u16 node, u32 address, u32 *value) 194 { 195 return __amd_smn_rw(node, address, value, false); 196 } 197 EXPORT_SYMBOL_GPL(amd_smn_read); 198 199 int amd_smn_write(u16 node, u32 address, u32 value) 200 { 201 return __amd_smn_rw(node, address, &value, true); 202 } 203 EXPORT_SYMBOL_GPL(amd_smn_write); 204 205 206 static int amd_cache_northbridges(void) 207 { 208 const struct pci_device_id *misc_ids = amd_nb_misc_ids; 209 const struct pci_device_id *link_ids = amd_nb_link_ids; 210 const struct pci_device_id *root_ids = amd_root_ids; 211 struct pci_dev *root, *misc, *link; 212 struct amd_northbridge *nb; 213 u16 roots_per_misc = 0; 214 u16 misc_count = 0; 215 u16 root_count = 0; 216 u16 i, j; 217 218 if (amd_northbridges.num) 219 return 0; 220 221 if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { 222 root_ids = hygon_root_ids; 223 misc_ids = hygon_nb_misc_ids; 224 link_ids = hygon_nb_link_ids; 225 } 226 227 misc = NULL; 228 while ((misc = next_northbridge(misc, misc_ids))) 229 misc_count++; 230 231 if (!misc_count) 232 return -ENODEV; 233 234 root = NULL; 235 while ((root = next_northbridge(root, root_ids))) 236 root_count++; 237 238 if (root_count) { 239 roots_per_misc = root_count / misc_count; 240 241 /* 242 * There should be _exactly_ N roots for each DF/SMN 243 * interface. 244 */ 245 if (!roots_per_misc || (root_count % roots_per_misc)) { 246 pr_info("Unsupported AMD DF/PCI configuration found\n"); 247 return -ENODEV; 248 } 249 } 250 251 nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL); 252 if (!nb) 253 return -ENOMEM; 254 255 amd_northbridges.nb = nb; 256 amd_northbridges.num = misc_count; 257 258 link = misc = root = NULL; 259 for (i = 0; i < amd_northbridges.num; i++) { 260 node_to_amd_nb(i)->root = root = 261 next_northbridge(root, root_ids); 262 node_to_amd_nb(i)->misc = misc = 263 next_northbridge(misc, misc_ids); 264 node_to_amd_nb(i)->link = link = 265 next_northbridge(link, link_ids); 266 267 /* 268 * If there are more PCI root devices than data fabric/ 269 * system management network interfaces, then the (N) 270 * PCI roots per DF/SMN interface are functionally the 271 * same (for DF/SMN access) and N-1 are redundant. N-1 272 * PCI roots should be skipped per DF/SMN interface so 273 * the following DF/SMN interfaces get mapped to 274 * correct PCI roots. 275 */ 276 for (j = 1; j < roots_per_misc; j++) 277 root = next_northbridge(root, root_ids); 278 } 279 280 if (amd_gart_present()) 281 amd_northbridges.flags |= AMD_NB_GART; 282 283 /* 284 * Check for L3 cache presence. 285 */ 286 if (!cpuid_edx(0x80000006)) 287 return 0; 288 289 /* 290 * Some CPU families support L3 Cache Index Disable. There are some 291 * limitations because of E382 and E388 on family 0x10. 292 */ 293 if (boot_cpu_data.x86 == 0x10 && 294 boot_cpu_data.x86_model >= 0x8 && 295 (boot_cpu_data.x86_model > 0x9 || 296 boot_cpu_data.x86_stepping >= 0x1)) 297 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 298 299 if (boot_cpu_data.x86 == 0x15) 300 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 301 302 /* L3 cache partitioning is supported on family 0x15 */ 303 if (boot_cpu_data.x86 == 0x15) 304 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; 305 306 return 0; 307 } 308 309 /* 310 * Ignores subdevice/subvendor but as far as I can figure out 311 * they're useless anyways 312 */ 313 bool __init early_is_amd_nb(u32 device) 314 { 315 const struct pci_device_id *misc_ids = amd_nb_misc_ids; 316 const struct pci_device_id *id; 317 u32 vendor = device & 0xffff; 318 319 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 320 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 321 return false; 322 323 if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) 324 misc_ids = hygon_nb_misc_ids; 325 326 device >>= 16; 327 for (id = misc_ids; id->vendor; id++) 328 if (vendor == id->vendor && device == id->device) 329 return true; 330 return false; 331 } 332 333 struct resource *amd_get_mmconfig_range(struct resource *res) 334 { 335 u32 address; 336 u64 base, msr; 337 unsigned int segn_busn_bits; 338 339 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 340 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 341 return NULL; 342 343 /* assume all cpus from fam10h have mmconfig */ 344 if (boot_cpu_data.x86 < 0x10) 345 return NULL; 346 347 address = MSR_FAM10H_MMIO_CONF_BASE; 348 rdmsrl(address, msr); 349 350 /* mmconfig is not enabled */ 351 if (!(msr & FAM10H_MMIO_CONF_ENABLE)) 352 return NULL; 353 354 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT); 355 356 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & 357 FAM10H_MMIO_CONF_BUSRANGE_MASK; 358 359 res->flags = IORESOURCE_MEM; 360 res->start = base; 361 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1; 362 return res; 363 } 364 365 int amd_get_subcaches(int cpu) 366 { 367 struct pci_dev *link = node_to_amd_nb(topology_die_id(cpu))->link; 368 unsigned int mask; 369 370 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) 371 return 0; 372 373 pci_read_config_dword(link, 0x1d4, &mask); 374 375 return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf; 376 } 377 378 int amd_set_subcaches(int cpu, unsigned long mask) 379 { 380 static unsigned int reset, ban; 381 struct amd_northbridge *nb = node_to_amd_nb(topology_die_id(cpu)); 382 unsigned int reg; 383 int cuid; 384 385 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) 386 return -EINVAL; 387 388 /* if necessary, collect reset state of L3 partitioning and BAN mode */ 389 if (reset == 0) { 390 pci_read_config_dword(nb->link, 0x1d4, &reset); 391 pci_read_config_dword(nb->misc, 0x1b8, &ban); 392 ban &= 0x180000; 393 } 394 395 /* deactivate BAN mode if any subcaches are to be disabled */ 396 if (mask != 0xf) { 397 pci_read_config_dword(nb->misc, 0x1b8, ®); 398 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); 399 } 400 401 cuid = cpu_data(cpu).cpu_core_id; 402 mask <<= 4 * cuid; 403 mask |= (0xf ^ (1 << cuid)) << 26; 404 405 pci_write_config_dword(nb->link, 0x1d4, mask); 406 407 /* reset BAN mode if L3 partitioning returned to reset state */ 408 pci_read_config_dword(nb->link, 0x1d4, ®); 409 if (reg == reset) { 410 pci_read_config_dword(nb->misc, 0x1b8, ®); 411 reg &= ~0x180000; 412 pci_write_config_dword(nb->misc, 0x1b8, reg | ban); 413 } 414 415 return 0; 416 } 417 418 static void amd_cache_gart(void) 419 { 420 u16 i; 421 422 if (!amd_nb_has_feature(AMD_NB_GART)) 423 return; 424 425 flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL); 426 if (!flush_words) { 427 amd_northbridges.flags &= ~AMD_NB_GART; 428 pr_notice("Cannot initialize GART flush words, GART support disabled\n"); 429 return; 430 } 431 432 for (i = 0; i != amd_northbridges.num; i++) 433 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]); 434 } 435 436 void amd_flush_garts(void) 437 { 438 int flushed, i; 439 unsigned long flags; 440 static DEFINE_SPINLOCK(gart_lock); 441 442 if (!amd_nb_has_feature(AMD_NB_GART)) 443 return; 444 445 /* 446 * Avoid races between AGP and IOMMU. In theory it's not needed 447 * but I'm not sure if the hardware won't lose flush requests 448 * when another is pending. This whole thing is so expensive anyways 449 * that it doesn't matter to serialize more. -AK 450 */ 451 spin_lock_irqsave(&gart_lock, flags); 452 flushed = 0; 453 for (i = 0; i < amd_northbridges.num; i++) { 454 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c, 455 flush_words[i] | 1); 456 flushed++; 457 } 458 for (i = 0; i < amd_northbridges.num; i++) { 459 u32 w; 460 /* Make sure the hardware actually executed the flush*/ 461 for (;;) { 462 pci_read_config_dword(node_to_amd_nb(i)->misc, 463 0x9c, &w); 464 if (!(w & 1)) 465 break; 466 cpu_relax(); 467 } 468 } 469 spin_unlock_irqrestore(&gart_lock, flags); 470 if (!flushed) 471 pr_notice("nothing to flush?\n"); 472 } 473 EXPORT_SYMBOL_GPL(amd_flush_garts); 474 475 static void __fix_erratum_688(void *info) 476 { 477 #define MSR_AMD64_IC_CFG 0xC0011021 478 479 msr_set_bit(MSR_AMD64_IC_CFG, 3); 480 msr_set_bit(MSR_AMD64_IC_CFG, 14); 481 } 482 483 /* Apply erratum 688 fix so machines without a BIOS fix work. */ 484 static __init void fix_erratum_688(void) 485 { 486 struct pci_dev *F4; 487 u32 val; 488 489 if (boot_cpu_data.x86 != 0x14) 490 return; 491 492 if (!amd_northbridges.num) 493 return; 494 495 F4 = node_to_amd_nb(0)->link; 496 if (!F4) 497 return; 498 499 if (pci_read_config_dword(F4, 0x164, &val)) 500 return; 501 502 if (val & BIT(2)) 503 return; 504 505 on_each_cpu(__fix_erratum_688, NULL, 0); 506 507 pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n"); 508 } 509 510 static __init int init_amd_nbs(void) 511 { 512 amd_cache_northbridges(); 513 amd_cache_gart(); 514 515 fix_erratum_688(); 516 517 return 0; 518 } 519 520 /* This has to go after the PCI subsystem */ 521 fs_initcall(init_amd_nbs); 522