1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Shared support code for AMD K8 northbridges and derivatives. 4 * Copyright 2006 Andi Kleen, SUSE Labs. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/types.h> 10 #include <linux/slab.h> 11 #include <linux/init.h> 12 #include <linux/errno.h> 13 #include <linux/export.h> 14 #include <linux/spinlock.h> 15 #include <linux/pci_ids.h> 16 #include <asm/amd_nb.h> 17 18 static u32 *flush_words; 19 20 static const struct pci_device_id amd_nb_misc_ids[] = { 21 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 22 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 23 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, 24 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, 25 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, 26 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) }, 27 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 28 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 29 {} 30 }; 31 32 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { 33 { 0x00, 0x18, 0x20 }, 34 { 0xff, 0x00, 0x20 }, 35 { 0xfe, 0x00, 0x20 }, 36 { } 37 }; 38 39 static struct amd_northbridge_info amd_northbridges; 40 41 u16 amd_nb_num(void) 42 { 43 return amd_northbridges.num; 44 } 45 EXPORT_SYMBOL_GPL(amd_nb_num); 46 47 bool amd_nb_has_feature(unsigned int feature) 48 { 49 return ((amd_northbridges.flags & feature) == feature); 50 } 51 EXPORT_SYMBOL_GPL(amd_nb_has_feature); 52 53 struct amd_northbridge *node_to_amd_nb(int node) 54 { 55 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; 56 } 57 EXPORT_SYMBOL_GPL(node_to_amd_nb); 58 59 static int amd_cache_northbridges(void) 60 { 61 struct amd_northbridge *nb; 62 u16 i; 63 64 if (amd_northbridges.num) 65 return 0; 66 67 amd_northbridges.num = amd_num_nodes(); 68 69 nb = kcalloc(amd_northbridges.num, sizeof(struct amd_northbridge), GFP_KERNEL); 70 if (!nb) 71 return -ENOMEM; 72 73 amd_northbridges.nb = nb; 74 75 for (i = 0; i < amd_northbridges.num; i++) { 76 node_to_amd_nb(i)->misc = amd_node_get_func(i, 3); 77 78 /* 79 * Each Northbridge must have a 'misc' device. 80 * If not, then uninitialize everything. 81 */ 82 if (!node_to_amd_nb(i)->misc) { 83 amd_northbridges.num = 0; 84 kfree(nb); 85 return -ENODEV; 86 } 87 88 node_to_amd_nb(i)->link = amd_node_get_func(i, 4); 89 } 90 91 if (amd_gart_present()) 92 amd_northbridges.flags |= AMD_NB_GART; 93 94 /* 95 * Check for L3 cache presence. 96 */ 97 if (!cpuid_edx(0x80000006)) 98 return 0; 99 100 /* 101 * Some CPU families support L3 Cache Index Disable. There are some 102 * limitations because of E382 and E388 on family 0x10. 103 */ 104 if (boot_cpu_data.x86 == 0x10 && 105 boot_cpu_data.x86_model >= 0x8 && 106 (boot_cpu_data.x86_model > 0x9 || 107 boot_cpu_data.x86_stepping >= 0x1)) 108 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 109 110 if (boot_cpu_data.x86 == 0x15) 111 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 112 113 /* L3 cache partitioning is supported on family 0x15 */ 114 if (boot_cpu_data.x86 == 0x15) 115 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; 116 117 return 0; 118 } 119 120 /* 121 * Ignores subdevice/subvendor but as far as I can figure out 122 * they're useless anyways 123 */ 124 bool __init early_is_amd_nb(u32 device) 125 { 126 const struct pci_device_id *id; 127 u32 vendor = device & 0xffff; 128 129 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 130 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 131 return false; 132 133 if (cpu_feature_enabled(X86_FEATURE_ZEN)) 134 return false; 135 136 device >>= 16; 137 for (id = amd_nb_misc_ids; id->vendor; id++) 138 if (vendor == id->vendor && device == id->device) 139 return true; 140 return false; 141 } 142 143 struct resource *amd_get_mmconfig_range(struct resource *res) 144 { 145 u64 base, msr; 146 unsigned int segn_busn_bits; 147 148 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 149 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 150 return NULL; 151 152 /* Assume CPUs from Fam10h have mmconfig, although not all VMs do */ 153 if (boot_cpu_data.x86 < 0x10 || 154 rdmsrl_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr)) 155 return NULL; 156 157 /* mmconfig is not enabled */ 158 if (!(msr & FAM10H_MMIO_CONF_ENABLE)) 159 return NULL; 160 161 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT); 162 163 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & 164 FAM10H_MMIO_CONF_BUSRANGE_MASK; 165 166 res->flags = IORESOURCE_MEM; 167 res->start = base; 168 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1; 169 return res; 170 } 171 172 int amd_get_subcaches(int cpu) 173 { 174 struct pci_dev *link = node_to_amd_nb(topology_amd_node_id(cpu))->link; 175 unsigned int mask; 176 177 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) 178 return 0; 179 180 pci_read_config_dword(link, 0x1d4, &mask); 181 182 return (mask >> (4 * cpu_data(cpu).topo.core_id)) & 0xf; 183 } 184 185 int amd_set_subcaches(int cpu, unsigned long mask) 186 { 187 static unsigned int reset, ban; 188 struct amd_northbridge *nb = node_to_amd_nb(topology_amd_node_id(cpu)); 189 unsigned int reg; 190 int cuid; 191 192 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) 193 return -EINVAL; 194 195 /* if necessary, collect reset state of L3 partitioning and BAN mode */ 196 if (reset == 0) { 197 pci_read_config_dword(nb->link, 0x1d4, &reset); 198 pci_read_config_dword(nb->misc, 0x1b8, &ban); 199 ban &= 0x180000; 200 } 201 202 /* deactivate BAN mode if any subcaches are to be disabled */ 203 if (mask != 0xf) { 204 pci_read_config_dword(nb->misc, 0x1b8, ®); 205 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); 206 } 207 208 cuid = cpu_data(cpu).topo.core_id; 209 mask <<= 4 * cuid; 210 mask |= (0xf ^ (1 << cuid)) << 26; 211 212 pci_write_config_dword(nb->link, 0x1d4, mask); 213 214 /* reset BAN mode if L3 partitioning returned to reset state */ 215 pci_read_config_dword(nb->link, 0x1d4, ®); 216 if (reg == reset) { 217 pci_read_config_dword(nb->misc, 0x1b8, ®); 218 reg &= ~0x180000; 219 pci_write_config_dword(nb->misc, 0x1b8, reg | ban); 220 } 221 222 return 0; 223 } 224 225 static void amd_cache_gart(void) 226 { 227 u16 i; 228 229 if (!amd_nb_has_feature(AMD_NB_GART)) 230 return; 231 232 flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL); 233 if (!flush_words) { 234 amd_northbridges.flags &= ~AMD_NB_GART; 235 pr_notice("Cannot initialize GART flush words, GART support disabled\n"); 236 return; 237 } 238 239 for (i = 0; i != amd_northbridges.num; i++) 240 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]); 241 } 242 243 void amd_flush_garts(void) 244 { 245 int flushed, i; 246 unsigned long flags; 247 static DEFINE_SPINLOCK(gart_lock); 248 249 if (!amd_nb_has_feature(AMD_NB_GART)) 250 return; 251 252 /* 253 * Avoid races between AGP and IOMMU. In theory it's not needed 254 * but I'm not sure if the hardware won't lose flush requests 255 * when another is pending. This whole thing is so expensive anyways 256 * that it doesn't matter to serialize more. -AK 257 */ 258 spin_lock_irqsave(&gart_lock, flags); 259 flushed = 0; 260 for (i = 0; i < amd_northbridges.num; i++) { 261 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c, 262 flush_words[i] | 1); 263 flushed++; 264 } 265 for (i = 0; i < amd_northbridges.num; i++) { 266 u32 w; 267 /* Make sure the hardware actually executed the flush*/ 268 for (;;) { 269 pci_read_config_dword(node_to_amd_nb(i)->misc, 270 0x9c, &w); 271 if (!(w & 1)) 272 break; 273 cpu_relax(); 274 } 275 } 276 spin_unlock_irqrestore(&gart_lock, flags); 277 if (!flushed) 278 pr_notice("nothing to flush?\n"); 279 } 280 EXPORT_SYMBOL_GPL(amd_flush_garts); 281 282 static void __fix_erratum_688(void *info) 283 { 284 #define MSR_AMD64_IC_CFG 0xC0011021 285 286 msr_set_bit(MSR_AMD64_IC_CFG, 3); 287 msr_set_bit(MSR_AMD64_IC_CFG, 14); 288 } 289 290 /* Apply erratum 688 fix so machines without a BIOS fix work. */ 291 static __init void fix_erratum_688(void) 292 { 293 struct pci_dev *F4; 294 u32 val; 295 296 if (boot_cpu_data.x86 != 0x14) 297 return; 298 299 if (!amd_northbridges.num) 300 return; 301 302 F4 = node_to_amd_nb(0)->link; 303 if (!F4) 304 return; 305 306 if (pci_read_config_dword(F4, 0x164, &val)) 307 return; 308 309 if (val & BIT(2)) 310 return; 311 312 on_each_cpu(__fix_erratum_688, NULL, 0); 313 314 pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n"); 315 } 316 317 static __init int init_amd_nbs(void) 318 { 319 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 320 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 321 return 0; 322 323 amd_cache_northbridges(); 324 amd_cache_gart(); 325 326 fix_erratum_688(); 327 328 return 0; 329 } 330 331 /* This has to go after the PCI subsystem */ 332 fs_initcall(init_amd_nbs); 333