1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Shared support code for AMD K8 northbridges and derivatives. 4 * Copyright 2006 Andi Kleen, SUSE Labs. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/types.h> 10 #include <linux/slab.h> 11 #include <linux/init.h> 12 #include <linux/errno.h> 13 #include <linux/export.h> 14 #include <linux/spinlock.h> 15 #include <linux/pci_ids.h> 16 #include <asm/amd_nb.h> 17 18 #define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450 19 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0 20 #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480 21 #define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630 22 #define PCI_DEVICE_ID_AMD_17H_MA0H_ROOT 0x14b5 23 #define PCI_DEVICE_ID_AMD_19H_M10H_ROOT 0x14a4 24 #define PCI_DEVICE_ID_AMD_19H_M40H_ROOT 0x14b5 25 #define PCI_DEVICE_ID_AMD_19H_M60H_ROOT 0x14d8 26 #define PCI_DEVICE_ID_AMD_19H_M70H_ROOT 0x14e8 27 #define PCI_DEVICE_ID_AMD_1AH_M00H_ROOT 0x153a 28 #define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507 29 #define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT 0x1122 30 #define PCI_DEVICE_ID_AMD_MI200_ROOT 0x14bb 31 #define PCI_DEVICE_ID_AMD_MI300_ROOT 0x14f8 32 33 #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464 34 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec 35 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494 36 #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c 37 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444 38 #define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4 0x1728 39 #define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654 40 #define PCI_DEVICE_ID_AMD_19H_M10H_DF_F4 0x14b1 41 #define PCI_DEVICE_ID_AMD_19H_M40H_DF_F4 0x167d 42 #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e 43 #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4 0x14e4 44 #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4 45 #define PCI_DEVICE_ID_AMD_19H_M78H_DF_F4 0x12fc 46 #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4 0x12c4 47 #define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F4 0x16fc 48 #define PCI_DEVICE_ID_AMD_1AH_M60H_DF_F4 0x124c 49 #define PCI_DEVICE_ID_AMD_1AH_M70H_DF_F4 0x12bc 50 #define PCI_DEVICE_ID_AMD_MI200_DF_F4 0x14d4 51 #define PCI_DEVICE_ID_AMD_MI300_DF_F4 0x152c 52 53 /* Protect the PCI config register pairs used for SMN. */ 54 static DEFINE_MUTEX(smn_mutex); 55 56 static u32 *flush_words; 57 58 static const struct pci_device_id amd_root_ids[] = { 59 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, 60 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) }, 61 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, 62 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) }, 63 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_ROOT) }, 64 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_ROOT) }, 65 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) }, 66 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_ROOT) }, 67 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) }, 68 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_ROOT) }, 69 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) }, 70 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_ROOT) }, 71 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_ROOT) }, 72 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_ROOT) }, 73 {} 74 }; 75 76 #define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704 77 78 static const struct pci_device_id amd_nb_misc_ids[] = { 79 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 80 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 81 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, 82 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, 83 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, 84 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) }, 85 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 86 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 87 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, 88 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, 89 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, 90 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) }, 91 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) }, 92 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, 93 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) }, 94 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) }, 95 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) }, 96 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) }, 97 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) }, 98 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) }, 99 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) }, 100 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) }, 101 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) }, 102 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) }, 103 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3) }, 104 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3) }, 105 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F3) }, 106 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F3) }, 107 {} 108 }; 109 110 static const struct pci_device_id amd_nb_link_ids[] = { 111 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, 112 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) }, 113 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) }, 114 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, 115 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, 116 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, 117 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) }, 118 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, 119 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) }, 120 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) }, 121 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4) }, 122 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) }, 123 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) }, 124 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) }, 125 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) }, 126 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F4) }, 127 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F4) }, 128 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F4) }, 129 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, 130 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4) }, 131 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F4) }, 132 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F4) }, 133 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M70H_DF_F4) }, 134 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F4) }, 135 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI300_DF_F4) }, 136 {} 137 }; 138 139 static const struct pci_device_id hygon_root_ids[] = { 140 { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) }, 141 {} 142 }; 143 144 static const struct pci_device_id hygon_nb_misc_ids[] = { 145 { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, 146 {} 147 }; 148 149 static const struct pci_device_id hygon_nb_link_ids[] = { 150 { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) }, 151 {} 152 }; 153 154 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { 155 { 0x00, 0x18, 0x20 }, 156 { 0xff, 0x00, 0x20 }, 157 { 0xfe, 0x00, 0x20 }, 158 { } 159 }; 160 161 static struct amd_northbridge_info amd_northbridges; 162 163 u16 amd_nb_num(void) 164 { 165 return amd_northbridges.num; 166 } 167 EXPORT_SYMBOL_GPL(amd_nb_num); 168 169 bool amd_nb_has_feature(unsigned int feature) 170 { 171 return ((amd_northbridges.flags & feature) == feature); 172 } 173 EXPORT_SYMBOL_GPL(amd_nb_has_feature); 174 175 struct amd_northbridge *node_to_amd_nb(int node) 176 { 177 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; 178 } 179 EXPORT_SYMBOL_GPL(node_to_amd_nb); 180 181 static struct pci_dev *next_northbridge(struct pci_dev *dev, 182 const struct pci_device_id *ids) 183 { 184 do { 185 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); 186 if (!dev) 187 break; 188 } while (!pci_match_id(ids, dev)); 189 return dev; 190 } 191 192 /* 193 * SMN accesses may fail in ways that are difficult to detect here in the called 194 * functions amd_smn_read() and amd_smn_write(). Therefore, callers must do 195 * their own checking based on what behavior they expect. 196 * 197 * For SMN reads, the returned value may be zero if the register is Read-as-Zero. 198 * Or it may be a "PCI Error Response", e.g. all 0xFFs. The "PCI Error Response" 199 * can be checked here, and a proper error code can be returned. 200 * 201 * But the Read-as-Zero response cannot be verified here. A value of 0 may be 202 * correct in some cases, so callers must check that this correct is for the 203 * register/fields they need. 204 * 205 * For SMN writes, success can be determined through a "write and read back" 206 * However, this is not robust when done here. 207 * 208 * Possible issues: 209 * 210 * 1) Bits that are "Write-1-to-Clear". In this case, the read value should 211 * *not* match the write value. 212 * 213 * 2) Bits that are "Read-as-Zero"/"Writes-Ignored". This information cannot be 214 * known here. 215 * 216 * 3) Bits that are "Reserved / Set to 1". Ditto above. 217 * 218 * Callers of amd_smn_write() should do the "write and read back" check 219 * themselves, if needed. 220 * 221 * For #1, they can see if their target bits got cleared. 222 * 223 * For #2 and #3, they can check if their target bits got set as intended. 224 * 225 * This matches what is done for RDMSR/WRMSR. As long as there's no #GP, then 226 * the operation is considered a success, and the caller does their own 227 * checking. 228 */ 229 static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write) 230 { 231 struct pci_dev *root; 232 int err = -ENODEV; 233 234 if (node >= amd_northbridges.num) 235 goto out; 236 237 root = node_to_amd_nb(node)->root; 238 if (!root) 239 goto out; 240 241 mutex_lock(&smn_mutex); 242 243 err = pci_write_config_dword(root, 0x60, address); 244 if (err) { 245 pr_warn("Error programming SMN address 0x%x.\n", address); 246 goto out_unlock; 247 } 248 249 err = (write ? pci_write_config_dword(root, 0x64, *value) 250 : pci_read_config_dword(root, 0x64, value)); 251 252 out_unlock: 253 mutex_unlock(&smn_mutex); 254 255 out: 256 return err; 257 } 258 259 int __must_check amd_smn_read(u16 node, u32 address, u32 *value) 260 { 261 int err = __amd_smn_rw(node, address, value, false); 262 263 if (PCI_POSSIBLE_ERROR(*value)) { 264 err = -ENODEV; 265 *value = 0; 266 } 267 268 return err; 269 } 270 EXPORT_SYMBOL_GPL(amd_smn_read); 271 272 int __must_check amd_smn_write(u16 node, u32 address, u32 value) 273 { 274 return __amd_smn_rw(node, address, &value, true); 275 } 276 EXPORT_SYMBOL_GPL(amd_smn_write); 277 278 279 static int amd_cache_northbridges(void) 280 { 281 const struct pci_device_id *misc_ids = amd_nb_misc_ids; 282 const struct pci_device_id *link_ids = amd_nb_link_ids; 283 const struct pci_device_id *root_ids = amd_root_ids; 284 struct pci_dev *root, *misc, *link; 285 struct amd_northbridge *nb; 286 u16 roots_per_misc = 0; 287 u16 misc_count = 0; 288 u16 root_count = 0; 289 u16 i, j; 290 291 if (amd_northbridges.num) 292 return 0; 293 294 if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { 295 root_ids = hygon_root_ids; 296 misc_ids = hygon_nb_misc_ids; 297 link_ids = hygon_nb_link_ids; 298 } 299 300 misc = NULL; 301 while ((misc = next_northbridge(misc, misc_ids))) 302 misc_count++; 303 304 if (!misc_count) 305 return -ENODEV; 306 307 root = NULL; 308 while ((root = next_northbridge(root, root_ids))) 309 root_count++; 310 311 if (root_count) { 312 roots_per_misc = root_count / misc_count; 313 314 /* 315 * There should be _exactly_ N roots for each DF/SMN 316 * interface. 317 */ 318 if (!roots_per_misc || (root_count % roots_per_misc)) { 319 pr_info("Unsupported AMD DF/PCI configuration found\n"); 320 return -ENODEV; 321 } 322 } 323 324 nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL); 325 if (!nb) 326 return -ENOMEM; 327 328 amd_northbridges.nb = nb; 329 amd_northbridges.num = misc_count; 330 331 link = misc = root = NULL; 332 for (i = 0; i < amd_northbridges.num; i++) { 333 node_to_amd_nb(i)->root = root = 334 next_northbridge(root, root_ids); 335 node_to_amd_nb(i)->misc = misc = 336 next_northbridge(misc, misc_ids); 337 node_to_amd_nb(i)->link = link = 338 next_northbridge(link, link_ids); 339 340 /* 341 * If there are more PCI root devices than data fabric/ 342 * system management network interfaces, then the (N) 343 * PCI roots per DF/SMN interface are functionally the 344 * same (for DF/SMN access) and N-1 are redundant. N-1 345 * PCI roots should be skipped per DF/SMN interface so 346 * the following DF/SMN interfaces get mapped to 347 * correct PCI roots. 348 */ 349 for (j = 1; j < roots_per_misc; j++) 350 root = next_northbridge(root, root_ids); 351 } 352 353 if (amd_gart_present()) 354 amd_northbridges.flags |= AMD_NB_GART; 355 356 /* 357 * Check for L3 cache presence. 358 */ 359 if (!cpuid_edx(0x80000006)) 360 return 0; 361 362 /* 363 * Some CPU families support L3 Cache Index Disable. There are some 364 * limitations because of E382 and E388 on family 0x10. 365 */ 366 if (boot_cpu_data.x86 == 0x10 && 367 boot_cpu_data.x86_model >= 0x8 && 368 (boot_cpu_data.x86_model > 0x9 || 369 boot_cpu_data.x86_stepping >= 0x1)) 370 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 371 372 if (boot_cpu_data.x86 == 0x15) 373 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 374 375 /* L3 cache partitioning is supported on family 0x15 */ 376 if (boot_cpu_data.x86 == 0x15) 377 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; 378 379 return 0; 380 } 381 382 /* 383 * Ignores subdevice/subvendor but as far as I can figure out 384 * they're useless anyways 385 */ 386 bool __init early_is_amd_nb(u32 device) 387 { 388 const struct pci_device_id *misc_ids = amd_nb_misc_ids; 389 const struct pci_device_id *id; 390 u32 vendor = device & 0xffff; 391 392 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 393 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 394 return false; 395 396 if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) 397 misc_ids = hygon_nb_misc_ids; 398 399 device >>= 16; 400 for (id = misc_ids; id->vendor; id++) 401 if (vendor == id->vendor && device == id->device) 402 return true; 403 return false; 404 } 405 406 struct resource *amd_get_mmconfig_range(struct resource *res) 407 { 408 u32 address; 409 u64 base, msr; 410 unsigned int segn_busn_bits; 411 412 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 413 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 414 return NULL; 415 416 /* assume all cpus from fam10h have mmconfig */ 417 if (boot_cpu_data.x86 < 0x10) 418 return NULL; 419 420 address = MSR_FAM10H_MMIO_CONF_BASE; 421 rdmsrl(address, msr); 422 423 /* mmconfig is not enabled */ 424 if (!(msr & FAM10H_MMIO_CONF_ENABLE)) 425 return NULL; 426 427 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT); 428 429 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & 430 FAM10H_MMIO_CONF_BUSRANGE_MASK; 431 432 res->flags = IORESOURCE_MEM; 433 res->start = base; 434 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1; 435 return res; 436 } 437 438 int amd_get_subcaches(int cpu) 439 { 440 struct pci_dev *link = node_to_amd_nb(topology_amd_node_id(cpu))->link; 441 unsigned int mask; 442 443 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) 444 return 0; 445 446 pci_read_config_dword(link, 0x1d4, &mask); 447 448 return (mask >> (4 * cpu_data(cpu).topo.core_id)) & 0xf; 449 } 450 451 int amd_set_subcaches(int cpu, unsigned long mask) 452 { 453 static unsigned int reset, ban; 454 struct amd_northbridge *nb = node_to_amd_nb(topology_amd_node_id(cpu)); 455 unsigned int reg; 456 int cuid; 457 458 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) 459 return -EINVAL; 460 461 /* if necessary, collect reset state of L3 partitioning and BAN mode */ 462 if (reset == 0) { 463 pci_read_config_dword(nb->link, 0x1d4, &reset); 464 pci_read_config_dword(nb->misc, 0x1b8, &ban); 465 ban &= 0x180000; 466 } 467 468 /* deactivate BAN mode if any subcaches are to be disabled */ 469 if (mask != 0xf) { 470 pci_read_config_dword(nb->misc, 0x1b8, ®); 471 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); 472 } 473 474 cuid = cpu_data(cpu).topo.core_id; 475 mask <<= 4 * cuid; 476 mask |= (0xf ^ (1 << cuid)) << 26; 477 478 pci_write_config_dword(nb->link, 0x1d4, mask); 479 480 /* reset BAN mode if L3 partitioning returned to reset state */ 481 pci_read_config_dword(nb->link, 0x1d4, ®); 482 if (reg == reset) { 483 pci_read_config_dword(nb->misc, 0x1b8, ®); 484 reg &= ~0x180000; 485 pci_write_config_dword(nb->misc, 0x1b8, reg | ban); 486 } 487 488 return 0; 489 } 490 491 static void amd_cache_gart(void) 492 { 493 u16 i; 494 495 if (!amd_nb_has_feature(AMD_NB_GART)) 496 return; 497 498 flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL); 499 if (!flush_words) { 500 amd_northbridges.flags &= ~AMD_NB_GART; 501 pr_notice("Cannot initialize GART flush words, GART support disabled\n"); 502 return; 503 } 504 505 for (i = 0; i != amd_northbridges.num; i++) 506 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]); 507 } 508 509 void amd_flush_garts(void) 510 { 511 int flushed, i; 512 unsigned long flags; 513 static DEFINE_SPINLOCK(gart_lock); 514 515 if (!amd_nb_has_feature(AMD_NB_GART)) 516 return; 517 518 /* 519 * Avoid races between AGP and IOMMU. In theory it's not needed 520 * but I'm not sure if the hardware won't lose flush requests 521 * when another is pending. This whole thing is so expensive anyways 522 * that it doesn't matter to serialize more. -AK 523 */ 524 spin_lock_irqsave(&gart_lock, flags); 525 flushed = 0; 526 for (i = 0; i < amd_northbridges.num; i++) { 527 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c, 528 flush_words[i] | 1); 529 flushed++; 530 } 531 for (i = 0; i < amd_northbridges.num; i++) { 532 u32 w; 533 /* Make sure the hardware actually executed the flush*/ 534 for (;;) { 535 pci_read_config_dword(node_to_amd_nb(i)->misc, 536 0x9c, &w); 537 if (!(w & 1)) 538 break; 539 cpu_relax(); 540 } 541 } 542 spin_unlock_irqrestore(&gart_lock, flags); 543 if (!flushed) 544 pr_notice("nothing to flush?\n"); 545 } 546 EXPORT_SYMBOL_GPL(amd_flush_garts); 547 548 static void __fix_erratum_688(void *info) 549 { 550 #define MSR_AMD64_IC_CFG 0xC0011021 551 552 msr_set_bit(MSR_AMD64_IC_CFG, 3); 553 msr_set_bit(MSR_AMD64_IC_CFG, 14); 554 } 555 556 /* Apply erratum 688 fix so machines without a BIOS fix work. */ 557 static __init void fix_erratum_688(void) 558 { 559 struct pci_dev *F4; 560 u32 val; 561 562 if (boot_cpu_data.x86 != 0x14) 563 return; 564 565 if (!amd_northbridges.num) 566 return; 567 568 F4 = node_to_amd_nb(0)->link; 569 if (!F4) 570 return; 571 572 if (pci_read_config_dword(F4, 0x164, &val)) 573 return; 574 575 if (val & BIT(2)) 576 return; 577 578 on_each_cpu(__fix_erratum_688, NULL, 0); 579 580 pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n"); 581 } 582 583 static __init int init_amd_nbs(void) 584 { 585 amd_cache_northbridges(); 586 amd_cache_gart(); 587 588 fix_erratum_688(); 589 590 return 0; 591 } 592 593 /* This has to go after the PCI subsystem */ 594 fs_initcall(init_amd_nbs); 595