1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Shared support code for AMD K8 northbridges and derivatives. 4 * Copyright 2006 Andi Kleen, SUSE Labs. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/types.h> 10 #include <linux/slab.h> 11 #include <linux/init.h> 12 #include <linux/errno.h> 13 #include <linux/export.h> 14 #include <linux/spinlock.h> 15 #include <linux/pci_ids.h> 16 #include <asm/amd_nb.h> 17 18 #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 19 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0 20 #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480 21 #define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630 22 #define PCI_DEVICE_ID_AMD_17H_MA0H_ROOT 0x14b5 23 #define PCI_DEVICE_ID_AMD_19H_M10H_ROOT 0x14a4 24 #define PCI_DEVICE_ID_AMD_19H_M40H_ROOT 0x14b5 25 #define PCI_DEVICE_ID_AMD_19H_M60H_ROOT 0x14d8 26 #define PCI_DEVICE_ID_AMD_19H_M70H_ROOT 0x14e8 27 #define PCI_DEVICE_ID_AMD_1AH_M00H_ROOT 0x153a 28 #define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507 29 #define PCI_DEVICE_ID_AMD_MI200_ROOT 0x14bb 30 #define PCI_DEVICE_ID_AMD_MI300_ROOT 0x14f8 31 32 #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 33 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec 34 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494 35 #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c 36 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444 37 #define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4 0x1728 38 #define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654 39 #define PCI_DEVICE_ID_AMD_19H_M10H_DF_F4 0x14b1 40 #define PCI_DEVICE_ID_AMD_19H_M40H_DF_F4 0x167d 41 #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e 42 #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4 0x14e4 43 #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4 44 #define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4 0x12fc 45 #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4 0x12c4 46 #define PCI_DEVICE_ID_AMD_MI200_DF_F4 0x14d4 47 #define PCI_DEVICE_ID_AMD_MI300_DF_F4 0x152c 48 49 /* Protect the PCI config register pairs used for SMN. */ 50 static DEFINE_MUTEX(smn_mutex); 51 52 static u32 *flush_words; 53 54 static const struct pci_device_id amd_root_ids[] = { 55 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, 56 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) }, 57 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, 58 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) }, 59 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_ROOT) }, 60 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_ROOT) }, 61 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) }, 62 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_ROOT) }, 63 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) }, 64 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_ROOT) }, 65 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) }, 66 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_ROOT) }, 67 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_ROOT) }, 68 {} 69 }; 70 71 #define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704 72 73 static const struct pci_device_id amd_nb_misc_ids[] = { 74 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 75 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 76 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, 77 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, 78 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, 79 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) }, 80 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 81 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 82 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, 83 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, 84 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, 85 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) }, 86 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) }, 87 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, 88 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, 89 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) }, 90 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) }, 91 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) }, 92 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) }, 93 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) }, 94 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) }, 95 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) }, 96 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) }, 97 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) }, 98 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3) }, 99 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F3) }, 100 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F3) }, 101 {} 102 }; 103 104 static const struct pci_device_id amd_nb_link_ids[] = { 105 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, 106 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) }, 107 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) }, 108 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, 109 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, 110 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, 111 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) }, 112 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, 113 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) }, 114 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) }, 115 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4) }, 116 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) }, 117 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) }, 118 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) }, 119 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) }, 120 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F4) }, 121 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F4) }, 122 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F4) }, 123 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, 124 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) }, 125 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) }, 126 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F4) }, 127 {} 128 }; 129 130 static const struct pci_device_id hygon_root_ids[] = { 131 { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) }, 132 {} 133 }; 134 135 static const struct pci_device_id hygon_nb_misc_ids[] = { 136 { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, 137 {} 138 }; 139 140 static const struct pci_device_id hygon_nb_link_ids[] = { 141 { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) }, 142 {} 143 }; 144 145 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { 146 { 0x00, 0x18, 0x20 }, 147 { 0xff, 0x00, 0x20 }, 148 { 0xfe, 0x00, 0x20 }, 149 { } 150 }; 151 152 static struct amd_northbridge_info amd_northbridges; 153 154 u16 amd_nb_num(void) 155 { 156 return amd_northbridges.num; 157 } 158 EXPORT_SYMBOL_GPL(amd_nb_num); 159 160 bool amd_nb_has_feature(unsigned int feature) 161 { 162 return ((amd_northbridges.flags & feature) == feature); 163 } 164 EXPORT_SYMBOL_GPL(amd_nb_has_feature); 165 166 struct amd_northbridge *node_to_amd_nb(int node) 167 { 168 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; 169 } 170 EXPORT_SYMBOL_GPL(node_to_amd_nb); 171 172 static struct pci_dev *next_northbridge(struct pci_dev *dev, 173 const struct pci_device_id *ids) 174 { 175 do { 176 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); 177 if (!dev) 178 break; 179 } while (!pci_match_id(ids, dev)); 180 return dev; 181 } 182 183 /* 184 * SMN accesses may fail in ways that are difficult to detect here in the called 185 * functions amd_smn_read() and amd_smn_write(). Therefore, callers must do 186 * their own checking based on what behavior they expect. 187 * 188 * For SMN reads, the returned value may be zero if the register is Read-as-Zero. 189 * Or it may be a "PCI Error Response", e.g. all 0xFFs. The "PCI Error Response" 190 * can be checked here, and a proper error code can be returned. 191 * 192 * But the Read-as-Zero response cannot be verified here. A value of 0 may be 193 * correct in some cases, so callers must check that this correct is for the 194 * register/fields they need. 195 * 196 * For SMN writes, success can be determined through a "write and read back" 197 * However, this is not robust when done here. 198 * 199 * Possible issues: 200 * 201 * 1) Bits that are "Write-1-to-Clear". In this case, the read value should 202 * *not* match the write value. 203 * 204 * 2) Bits that are "Read-as-Zero"/"Writes-Ignored". This information cannot be 205 * known here. 206 * 207 * 3) Bits that are "Reserved / Set to 1". Ditto above. 208 * 209 * Callers of amd_smn_write() should do the "write and read back" check 210 * themselves, if needed. 211 * 212 * For #1, they can see if their target bits got cleared. 213 * 214 * For #2 and #3, they can check if their target bits got set as intended. 215 * 216 * This matches what is done for RDMSR/WRMSR. As long as there's no #GP, then 217 * the operation is considered a success, and the caller does their own 218 * checking. 219 */ 220 static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write) 221 { 222 struct pci_dev *root; 223 int err = -ENODEV; 224 225 if (node >= amd_northbridges.num) 226 goto out; 227 228 root = node_to_amd_nb(node)->root; 229 if (!root) 230 goto out; 231 232 mutex_lock(&smn_mutex); 233 234 err = pci_write_config_dword(root, 0x60, address); 235 if (err) { 236 pr_warn("Error programming SMN address 0x%x.\n", address); 237 goto out_unlock; 238 } 239 240 err = (write ? pci_write_config_dword(root, 0x64, *value) 241 : pci_read_config_dword(root, 0x64, value)); 242 243 out_unlock: 244 mutex_unlock(&smn_mutex); 245 246 out: 247 return err; 248 } 249 250 int __must_check amd_smn_read(u16 node, u32 address, u32 *value) 251 { 252 int err = __amd_smn_rw(node, address, value, false); 253 254 if (PCI_POSSIBLE_ERROR(*value)) { 255 err = -ENODEV; 256 *value = 0; 257 } 258 259 return err; 260 } 261 EXPORT_SYMBOL_GPL(amd_smn_read); 262 263 int __must_check amd_smn_write(u16 node, u32 address, u32 value) 264 { 265 return __amd_smn_rw(node, address, &value, true); 266 } 267 EXPORT_SYMBOL_GPL(amd_smn_write); 268 269 270 static int amd_cache_northbridges(void) 271 { 272 const struct pci_device_id *misc_ids = amd_nb_misc_ids; 273 const struct pci_device_id *link_ids = amd_nb_link_ids; 274 const struct pci_device_id *root_ids = amd_root_ids; 275 struct pci_dev *root, *misc, *link; 276 struct amd_northbridge *nb; 277 u16 roots_per_misc = 0; 278 u16 misc_count = 0; 279 u16 root_count = 0; 280 u16 i, j; 281 282 if (amd_northbridges.num) 283 return 0; 284 285 if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { 286 root_ids = hygon_root_ids; 287 misc_ids = hygon_nb_misc_ids; 288 link_ids = hygon_nb_link_ids; 289 } 290 291 misc = NULL; 292 while ((misc = next_northbridge(misc, misc_ids))) 293 misc_count++; 294 295 if (!misc_count) 296 return -ENODEV; 297 298 root = NULL; 299 while ((root = next_northbridge(root, root_ids))) 300 root_count++; 301 302 if (root_count) { 303 roots_per_misc = root_count / misc_count; 304 305 /* 306 * There should be _exactly_ N roots for each DF/SMN 307 * interface. 308 */ 309 if (!roots_per_misc || (root_count % roots_per_misc)) { 310 pr_info("Unsupported AMD DF/PCI configuration found\n"); 311 return -ENODEV; 312 } 313 } 314 315 nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL); 316 if (!nb) 317 return -ENOMEM; 318 319 amd_northbridges.nb = nb; 320 amd_northbridges.num = misc_count; 321 322 link = misc = root = NULL; 323 for (i = 0; i < amd_northbridges.num; i++) { 324 node_to_amd_nb(i)->root = root = 325 next_northbridge(root, root_ids); 326 node_to_amd_nb(i)->misc = misc = 327 next_northbridge(misc, misc_ids); 328 node_to_amd_nb(i)->link = link = 329 next_northbridge(link, link_ids); 330 331 /* 332 * If there are more PCI root devices than data fabric/ 333 * system management network interfaces, then the (N) 334 * PCI roots per DF/SMN interface are functionally the 335 * same (for DF/SMN access) and N-1 are redundant. N-1 336 * PCI roots should be skipped per DF/SMN interface so 337 * the following DF/SMN interfaces get mapped to 338 * correct PCI roots. 339 */ 340 for (j = 1; j < roots_per_misc; j++) 341 root = next_northbridge(root, root_ids); 342 } 343 344 if (amd_gart_present()) 345 amd_northbridges.flags |= AMD_NB_GART; 346 347 /* 348 * Check for L3 cache presence. 349 */ 350 if (!cpuid_edx(0x80000006)) 351 return 0; 352 353 /* 354 * Some CPU families support L3 Cache Index Disable. There are some 355 * limitations because of E382 and E388 on family 0x10. 356 */ 357 if (boot_cpu_data.x86 == 0x10 && 358 boot_cpu_data.x86_model >= 0x8 && 359 (boot_cpu_data.x86_model > 0x9 || 360 boot_cpu_data.x86_stepping >= 0x1)) 361 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 362 363 if (boot_cpu_data.x86 == 0x15) 364 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 365 366 /* L3 cache partitioning is supported on family 0x15 */ 367 if (boot_cpu_data.x86 == 0x15) 368 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; 369 370 return 0; 371 } 372 373 /* 374 * Ignores subdevice/subvendor but as far as I can figure out 375 * they're useless anyways 376 */ 377 bool __init early_is_amd_nb(u32 device) 378 { 379 const struct pci_device_id *misc_ids = amd_nb_misc_ids; 380 const struct pci_device_id *id; 381 u32 vendor = device & 0xffff; 382 383 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 384 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 385 return false; 386 387 if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) 388 misc_ids = hygon_nb_misc_ids; 389 390 device >>= 16; 391 for (id = misc_ids; id->vendor; id++) 392 if (vendor == id->vendor && device == id->device) 393 return true; 394 return false; 395 } 396 397 struct resource *amd_get_mmconfig_range(struct resource *res) 398 { 399 u32 address; 400 u64 base, msr; 401 unsigned int segn_busn_bits; 402 403 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 404 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 405 return NULL; 406 407 /* assume all cpus from fam10h have mmconfig */ 408 if (boot_cpu_data.x86 < 0x10) 409 return NULL; 410 411 address = MSR_FAM10H_MMIO_CONF_BASE; 412 rdmsrl(address, msr); 413 414 /* mmconfig is not enabled */ 415 if (!(msr & FAM10H_MMIO_CONF_ENABLE)) 416 return NULL; 417 418 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT); 419 420 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & 421 FAM10H_MMIO_CONF_BUSRANGE_MASK; 422 423 res->flags = IORESOURCE_MEM; 424 res->start = base; 425 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1; 426 return res; 427 } 428 429 int amd_get_subcaches(int cpu) 430 { 431 struct pci_dev *link = node_to_amd_nb(topology_amd_node_id(cpu))->link; 432 unsigned int mask; 433 434 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) 435 return 0; 436 437 pci_read_config_dword(link, 0x1d4, &mask); 438 439 return (mask >> (4 * cpu_data(cpu).topo.core_id)) & 0xf; 440 } 441 442 int amd_set_subcaches(int cpu, unsigned long mask) 443 { 444 static unsigned int reset, ban; 445 struct amd_northbridge *nb = node_to_amd_nb(topology_amd_node_id(cpu)); 446 unsigned int reg; 447 int cuid; 448 449 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) 450 return -EINVAL; 451 452 /* if necessary, collect reset state of L3 partitioning and BAN mode */ 453 if (reset == 0) { 454 pci_read_config_dword(nb->link, 0x1d4, &reset); 455 pci_read_config_dword(nb->misc, 0x1b8, &ban); 456 ban &= 0x180000; 457 } 458 459 /* deactivate BAN mode if any subcaches are to be disabled */ 460 if (mask != 0xf) { 461 pci_read_config_dword(nb->misc, 0x1b8, ®); 462 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); 463 } 464 465 cuid = cpu_data(cpu).topo.core_id; 466 mask <<= 4 * cuid; 467 mask |= (0xf ^ (1 << cuid)) << 26; 468 469 pci_write_config_dword(nb->link, 0x1d4, mask); 470 471 /* reset BAN mode if L3 partitioning returned to reset state */ 472 pci_read_config_dword(nb->link, 0x1d4, ®); 473 if (reg == reset) { 474 pci_read_config_dword(nb->misc, 0x1b8, ®); 475 reg &= ~0x180000; 476 pci_write_config_dword(nb->misc, 0x1b8, reg | ban); 477 } 478 479 return 0; 480 } 481 482 static void amd_cache_gart(void) 483 { 484 u16 i; 485 486 if (!amd_nb_has_feature(AMD_NB_GART)) 487 return; 488 489 flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL); 490 if (!flush_words) { 491 amd_northbridges.flags &= ~AMD_NB_GART; 492 pr_notice("Cannot initialize GART flush words, GART support disabled\n"); 493 return; 494 } 495 496 for (i = 0; i != amd_northbridges.num; i++) 497 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]); 498 } 499 500 void amd_flush_garts(void) 501 { 502 int flushed, i; 503 unsigned long flags; 504 static DEFINE_SPINLOCK(gart_lock); 505 506 if (!amd_nb_has_feature(AMD_NB_GART)) 507 return; 508 509 /* 510 * Avoid races between AGP and IOMMU. In theory it's not needed 511 * but I'm not sure if the hardware won't lose flush requests 512 * when another is pending. This whole thing is so expensive anyways 513 * that it doesn't matter to serialize more. -AK 514 */ 515 spin_lock_irqsave(&gart_lock, flags); 516 flushed = 0; 517 for (i = 0; i < amd_northbridges.num; i++) { 518 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c, 519 flush_words[i] | 1); 520 flushed++; 521 } 522 for (i = 0; i < amd_northbridges.num; i++) { 523 u32 w; 524 /* Make sure the hardware actually executed the flush*/ 525 for (;;) { 526 pci_read_config_dword(node_to_amd_nb(i)->misc, 527 0x9c, &w); 528 if (!(w & 1)) 529 break; 530 cpu_relax(); 531 } 532 } 533 spin_unlock_irqrestore(&gart_lock, flags); 534 if (!flushed) 535 pr_notice("nothing to flush?\n"); 536 } 537 EXPORT_SYMBOL_GPL(amd_flush_garts); 538 539 static void __fix_erratum_688(void *info) 540 { 541 #define MSR_AMD64_IC_CFG 0xC0011021 542 543 msr_set_bit(MSR_AMD64_IC_CFG, 3); 544 msr_set_bit(MSR_AMD64_IC_CFG, 14); 545 } 546 547 /* Apply erratum 688 fix so machines without a BIOS fix work. */ 548 static __init void fix_erratum_688(void) 549 { 550 struct pci_dev *F4; 551 u32 val; 552 553 if (boot_cpu_data.x86 != 0x14) 554 return; 555 556 if (!amd_northbridges.num) 557 return; 558 559 F4 = node_to_amd_nb(0)->link; 560 if (!F4) 561 return; 562 563 if (pci_read_config_dword(F4, 0x164, &val)) 564 return; 565 566 if (val & BIT(2)) 567 return; 568 569 on_each_cpu(__fix_erratum_688, NULL, 0); 570 571 pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n"); 572 } 573 574 static __init int init_amd_nbs(void) 575 { 576 amd_cache_northbridges(); 577 amd_cache_gart(); 578 579 fix_erratum_688(); 580 581 return 0; 582 } 583 584 /* This has to go after the PCI subsystem */ 585 fs_initcall(init_amd_nbs); 586