1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Shared support code for AMD K8 northbridges and derivatives. 4 * Copyright 2006 Andi Kleen, SUSE Labs. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/types.h> 10 #include <linux/slab.h> 11 #include <linux/init.h> 12 #include <linux/errno.h> 13 #include <linux/export.h> 14 #include <linux/spinlock.h> 15 #include <linux/pci_ids.h> 16 #include <asm/amd_nb.h> 17 18 static u32 *flush_words; 19 20 static const struct pci_device_id amd_nb_misc_ids[] = { 21 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 22 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 23 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, 24 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, 25 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, 26 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) }, 27 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 28 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 29 {} 30 }; 31 32 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { 33 { 0x00, 0x18, 0x20 }, 34 { 0xff, 0x00, 0x20 }, 35 { 0xfe, 0x00, 0x20 }, 36 { } 37 }; 38 39 static struct amd_northbridge_info amd_northbridges; 40 41 u16 amd_nb_num(void) 42 { 43 return amd_northbridges.num; 44 } 45 EXPORT_SYMBOL_GPL(amd_nb_num); 46 47 bool amd_nb_has_feature(unsigned int feature) 48 { 49 return ((amd_northbridges.flags & feature) == feature); 50 } 51 EXPORT_SYMBOL_GPL(amd_nb_has_feature); 52 53 struct amd_northbridge *node_to_amd_nb(int node) 54 { 55 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; 56 } 57 EXPORT_SYMBOL_GPL(node_to_amd_nb); 58 59 static int amd_cache_northbridges(void) 60 { 61 struct amd_northbridge *nb; 62 u16 i; 63 64 if (amd_northbridges.num) 65 return 0; 66 67 amd_northbridges.num = amd_num_nodes(); 68 69 nb = kcalloc(amd_northbridges.num, sizeof(struct amd_northbridge), GFP_KERNEL); 70 if (!nb) 71 return -ENOMEM; 72 73 amd_northbridges.nb = nb; 74 75 for (i = 0; i < amd_northbridges.num; i++) { 76 node_to_amd_nb(i)->root = amd_node_get_root(i); 77 node_to_amd_nb(i)->misc = amd_node_get_func(i, 3); 78 79 /* 80 * Each Northbridge must have a 'misc' device. 81 * If not, then uninitialize everything. 82 */ 83 if (!node_to_amd_nb(i)->misc) { 84 amd_northbridges.num = 0; 85 kfree(nb); 86 return -ENODEV; 87 } 88 89 node_to_amd_nb(i)->link = amd_node_get_func(i, 4); 90 } 91 92 if (amd_gart_present()) 93 amd_northbridges.flags |= AMD_NB_GART; 94 95 /* 96 * Check for L3 cache presence. 97 */ 98 if (!cpuid_edx(0x80000006)) 99 return 0; 100 101 /* 102 * Some CPU families support L3 Cache Index Disable. There are some 103 * limitations because of E382 and E388 on family 0x10. 104 */ 105 if (boot_cpu_data.x86 == 0x10 && 106 boot_cpu_data.x86_model >= 0x8 && 107 (boot_cpu_data.x86_model > 0x9 || 108 boot_cpu_data.x86_stepping >= 0x1)) 109 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 110 111 if (boot_cpu_data.x86 == 0x15) 112 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 113 114 /* L3 cache partitioning is supported on family 0x15 */ 115 if (boot_cpu_data.x86 == 0x15) 116 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; 117 118 return 0; 119 } 120 121 /* 122 * Ignores subdevice/subvendor but as far as I can figure out 123 * they're useless anyways 124 */ 125 bool __init early_is_amd_nb(u32 device) 126 { 127 const struct pci_device_id *id; 128 u32 vendor = device & 0xffff; 129 130 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 131 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 132 return false; 133 134 if (cpu_feature_enabled(X86_FEATURE_ZEN)) 135 return false; 136 137 device >>= 16; 138 for (id = amd_nb_misc_ids; id->vendor; id++) 139 if (vendor == id->vendor && device == id->device) 140 return true; 141 return false; 142 } 143 144 struct resource *amd_get_mmconfig_range(struct resource *res) 145 { 146 u32 address; 147 u64 base, msr; 148 unsigned int segn_busn_bits; 149 150 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 151 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 152 return NULL; 153 154 /* assume all cpus from fam10h have mmconfig */ 155 if (boot_cpu_data.x86 < 0x10) 156 return NULL; 157 158 address = MSR_FAM10H_MMIO_CONF_BASE; 159 rdmsrl(address, msr); 160 161 /* mmconfig is not enabled */ 162 if (!(msr & FAM10H_MMIO_CONF_ENABLE)) 163 return NULL; 164 165 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT); 166 167 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & 168 FAM10H_MMIO_CONF_BUSRANGE_MASK; 169 170 res->flags = IORESOURCE_MEM; 171 res->start = base; 172 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1; 173 return res; 174 } 175 176 int amd_get_subcaches(int cpu) 177 { 178 struct pci_dev *link = node_to_amd_nb(topology_amd_node_id(cpu))->link; 179 unsigned int mask; 180 181 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) 182 return 0; 183 184 pci_read_config_dword(link, 0x1d4, &mask); 185 186 return (mask >> (4 * cpu_data(cpu).topo.core_id)) & 0xf; 187 } 188 189 int amd_set_subcaches(int cpu, unsigned long mask) 190 { 191 static unsigned int reset, ban; 192 struct amd_northbridge *nb = node_to_amd_nb(topology_amd_node_id(cpu)); 193 unsigned int reg; 194 int cuid; 195 196 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) 197 return -EINVAL; 198 199 /* if necessary, collect reset state of L3 partitioning and BAN mode */ 200 if (reset == 0) { 201 pci_read_config_dword(nb->link, 0x1d4, &reset); 202 pci_read_config_dword(nb->misc, 0x1b8, &ban); 203 ban &= 0x180000; 204 } 205 206 /* deactivate BAN mode if any subcaches are to be disabled */ 207 if (mask != 0xf) { 208 pci_read_config_dword(nb->misc, 0x1b8, ®); 209 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); 210 } 211 212 cuid = cpu_data(cpu).topo.core_id; 213 mask <<= 4 * cuid; 214 mask |= (0xf ^ (1 << cuid)) << 26; 215 216 pci_write_config_dword(nb->link, 0x1d4, mask); 217 218 /* reset BAN mode if L3 partitioning returned to reset state */ 219 pci_read_config_dword(nb->link, 0x1d4, ®); 220 if (reg == reset) { 221 pci_read_config_dword(nb->misc, 0x1b8, ®); 222 reg &= ~0x180000; 223 pci_write_config_dword(nb->misc, 0x1b8, reg | ban); 224 } 225 226 return 0; 227 } 228 229 static void amd_cache_gart(void) 230 { 231 u16 i; 232 233 if (!amd_nb_has_feature(AMD_NB_GART)) 234 return; 235 236 flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL); 237 if (!flush_words) { 238 amd_northbridges.flags &= ~AMD_NB_GART; 239 pr_notice("Cannot initialize GART flush words, GART support disabled\n"); 240 return; 241 } 242 243 for (i = 0; i != amd_northbridges.num; i++) 244 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]); 245 } 246 247 void amd_flush_garts(void) 248 { 249 int flushed, i; 250 unsigned long flags; 251 static DEFINE_SPINLOCK(gart_lock); 252 253 if (!amd_nb_has_feature(AMD_NB_GART)) 254 return; 255 256 /* 257 * Avoid races between AGP and IOMMU. In theory it's not needed 258 * but I'm not sure if the hardware won't lose flush requests 259 * when another is pending. This whole thing is so expensive anyways 260 * that it doesn't matter to serialize more. -AK 261 */ 262 spin_lock_irqsave(&gart_lock, flags); 263 flushed = 0; 264 for (i = 0; i < amd_northbridges.num; i++) { 265 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c, 266 flush_words[i] | 1); 267 flushed++; 268 } 269 for (i = 0; i < amd_northbridges.num; i++) { 270 u32 w; 271 /* Make sure the hardware actually executed the flush*/ 272 for (;;) { 273 pci_read_config_dword(node_to_amd_nb(i)->misc, 274 0x9c, &w); 275 if (!(w & 1)) 276 break; 277 cpu_relax(); 278 } 279 } 280 spin_unlock_irqrestore(&gart_lock, flags); 281 if (!flushed) 282 pr_notice("nothing to flush?\n"); 283 } 284 EXPORT_SYMBOL_GPL(amd_flush_garts); 285 286 static void __fix_erratum_688(void *info) 287 { 288 #define MSR_AMD64_IC_CFG 0xC0011021 289 290 msr_set_bit(MSR_AMD64_IC_CFG, 3); 291 msr_set_bit(MSR_AMD64_IC_CFG, 14); 292 } 293 294 /* Apply erratum 688 fix so machines without a BIOS fix work. */ 295 static __init void fix_erratum_688(void) 296 { 297 struct pci_dev *F4; 298 u32 val; 299 300 if (boot_cpu_data.x86 != 0x14) 301 return; 302 303 if (!amd_northbridges.num) 304 return; 305 306 F4 = node_to_amd_nb(0)->link; 307 if (!F4) 308 return; 309 310 if (pci_read_config_dword(F4, 0x164, &val)) 311 return; 312 313 if (val & BIT(2)) 314 return; 315 316 on_each_cpu(__fix_erratum_688, NULL, 0); 317 318 pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n"); 319 } 320 321 static __init int init_amd_nbs(void) 322 { 323 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 324 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 325 return 0; 326 327 amd_cache_northbridges(); 328 amd_cache_gart(); 329 330 fix_erratum_688(); 331 332 return 0; 333 } 334 335 /* This has to go after the PCI subsystem */ 336 fs_initcall(init_amd_nbs); 337