1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Shared support code for AMD K8 northbridges and derivatives. 4 * Copyright 2006 Andi Kleen, SUSE Labs. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/types.h> 10 #include <linux/slab.h> 11 #include <linux/init.h> 12 #include <linux/errno.h> 13 #include <linux/export.h> 14 #include <linux/spinlock.h> 15 #include <linux/pci_ids.h> 16 #include <asm/amd_nb.h> 17 18 static u32 *flush_words; 19 20 static const struct pci_device_id amd_nb_misc_ids[] = { 21 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 22 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 23 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, 24 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, 25 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, 26 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) }, 27 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 28 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 29 {} 30 }; 31 32 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { 33 { 0x00, 0x18, 0x20 }, 34 { 0xff, 0x00, 0x20 }, 35 { 0xfe, 0x00, 0x20 }, 36 { } 37 }; 38 39 static struct amd_northbridge_info amd_northbridges; 40 41 u16 amd_nb_num(void) 42 { 43 return amd_northbridges.num; 44 } 45 EXPORT_SYMBOL_GPL(amd_nb_num); 46 47 bool amd_nb_has_feature(unsigned int feature) 48 { 49 return ((amd_northbridges.flags & feature) == feature); 50 } 51 EXPORT_SYMBOL_GPL(amd_nb_has_feature); 52 53 struct amd_northbridge *node_to_amd_nb(int node) 54 { 55 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; 56 } 57 EXPORT_SYMBOL_GPL(node_to_amd_nb); 58 59 static int amd_cache_northbridges(void) 60 { 61 struct amd_northbridge *nb; 62 u16 i; 63 64 if (amd_northbridges.num) 65 return 0; 66 67 amd_northbridges.num = amd_num_nodes(); 68 69 nb = kcalloc(amd_northbridges.num, sizeof(struct amd_northbridge), GFP_KERNEL); 70 if (!nb) 71 return -ENOMEM; 72 73 amd_northbridges.nb = nb; 74 75 for (i = 0; i < amd_northbridges.num; i++) { 76 node_to_amd_nb(i)->root = amd_node_get_root(i); 77 node_to_amd_nb(i)->misc = amd_node_get_func(i, 3); 78 79 /* 80 * Each Northbridge must have a 'misc' device. 81 * If not, then uninitialize everything. 82 */ 83 if (!node_to_amd_nb(i)->misc) { 84 amd_northbridges.num = 0; 85 kfree(nb); 86 return -ENODEV; 87 } 88 89 node_to_amd_nb(i)->link = amd_node_get_func(i, 4); 90 } 91 92 if (amd_gart_present()) 93 amd_northbridges.flags |= AMD_NB_GART; 94 95 /* 96 * Check for L3 cache presence. 97 */ 98 if (!cpuid_edx(0x80000006)) 99 return 0; 100 101 /* 102 * Some CPU families support L3 Cache Index Disable. There are some 103 * limitations because of E382 and E388 on family 0x10. 104 */ 105 if (boot_cpu_data.x86 == 0x10 && 106 boot_cpu_data.x86_model >= 0x8 && 107 (boot_cpu_data.x86_model > 0x9 || 108 boot_cpu_data.x86_stepping >= 0x1)) 109 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 110 111 if (boot_cpu_data.x86 == 0x15) 112 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 113 114 /* L3 cache partitioning is supported on family 0x15 */ 115 if (boot_cpu_data.x86 == 0x15) 116 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; 117 118 return 0; 119 } 120 121 /* 122 * Ignores subdevice/subvendor but as far as I can figure out 123 * they're useless anyways 124 */ 125 bool __init early_is_amd_nb(u32 device) 126 { 127 const struct pci_device_id *id; 128 u32 vendor = device & 0xffff; 129 130 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 131 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 132 return false; 133 134 if (cpu_feature_enabled(X86_FEATURE_ZEN)) 135 return false; 136 137 device >>= 16; 138 for (id = amd_nb_misc_ids; id->vendor; id++) 139 if (vendor == id->vendor && device == id->device) 140 return true; 141 return false; 142 } 143 144 struct resource *amd_get_mmconfig_range(struct resource *res) 145 { 146 u64 base, msr; 147 unsigned int segn_busn_bits; 148 149 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 150 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 151 return NULL; 152 153 /* Assume CPUs from Fam10h have mmconfig, although not all VMs do */ 154 if (boot_cpu_data.x86 < 0x10 || 155 rdmsrl_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr)) 156 return NULL; 157 158 /* mmconfig is not enabled */ 159 if (!(msr & FAM10H_MMIO_CONF_ENABLE)) 160 return NULL; 161 162 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT); 163 164 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & 165 FAM10H_MMIO_CONF_BUSRANGE_MASK; 166 167 res->flags = IORESOURCE_MEM; 168 res->start = base; 169 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1; 170 return res; 171 } 172 173 int amd_get_subcaches(int cpu) 174 { 175 struct pci_dev *link = node_to_amd_nb(topology_amd_node_id(cpu))->link; 176 unsigned int mask; 177 178 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) 179 return 0; 180 181 pci_read_config_dword(link, 0x1d4, &mask); 182 183 return (mask >> (4 * cpu_data(cpu).topo.core_id)) & 0xf; 184 } 185 186 int amd_set_subcaches(int cpu, unsigned long mask) 187 { 188 static unsigned int reset, ban; 189 struct amd_northbridge *nb = node_to_amd_nb(topology_amd_node_id(cpu)); 190 unsigned int reg; 191 int cuid; 192 193 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) 194 return -EINVAL; 195 196 /* if necessary, collect reset state of L3 partitioning and BAN mode */ 197 if (reset == 0) { 198 pci_read_config_dword(nb->link, 0x1d4, &reset); 199 pci_read_config_dword(nb->misc, 0x1b8, &ban); 200 ban &= 0x180000; 201 } 202 203 /* deactivate BAN mode if any subcaches are to be disabled */ 204 if (mask != 0xf) { 205 pci_read_config_dword(nb->misc, 0x1b8, ®); 206 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); 207 } 208 209 cuid = cpu_data(cpu).topo.core_id; 210 mask <<= 4 * cuid; 211 mask |= (0xf ^ (1 << cuid)) << 26; 212 213 pci_write_config_dword(nb->link, 0x1d4, mask); 214 215 /* reset BAN mode if L3 partitioning returned to reset state */ 216 pci_read_config_dword(nb->link, 0x1d4, ®); 217 if (reg == reset) { 218 pci_read_config_dword(nb->misc, 0x1b8, ®); 219 reg &= ~0x180000; 220 pci_write_config_dword(nb->misc, 0x1b8, reg | ban); 221 } 222 223 return 0; 224 } 225 226 static void amd_cache_gart(void) 227 { 228 u16 i; 229 230 if (!amd_nb_has_feature(AMD_NB_GART)) 231 return; 232 233 flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL); 234 if (!flush_words) { 235 amd_northbridges.flags &= ~AMD_NB_GART; 236 pr_notice("Cannot initialize GART flush words, GART support disabled\n"); 237 return; 238 } 239 240 for (i = 0; i != amd_northbridges.num; i++) 241 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]); 242 } 243 244 void amd_flush_garts(void) 245 { 246 int flushed, i; 247 unsigned long flags; 248 static DEFINE_SPINLOCK(gart_lock); 249 250 if (!amd_nb_has_feature(AMD_NB_GART)) 251 return; 252 253 /* 254 * Avoid races between AGP and IOMMU. In theory it's not needed 255 * but I'm not sure if the hardware won't lose flush requests 256 * when another is pending. This whole thing is so expensive anyways 257 * that it doesn't matter to serialize more. -AK 258 */ 259 spin_lock_irqsave(&gart_lock, flags); 260 flushed = 0; 261 for (i = 0; i < amd_northbridges.num; i++) { 262 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c, 263 flush_words[i] | 1); 264 flushed++; 265 } 266 for (i = 0; i < amd_northbridges.num; i++) { 267 u32 w; 268 /* Make sure the hardware actually executed the flush*/ 269 for (;;) { 270 pci_read_config_dword(node_to_amd_nb(i)->misc, 271 0x9c, &w); 272 if (!(w & 1)) 273 break; 274 cpu_relax(); 275 } 276 } 277 spin_unlock_irqrestore(&gart_lock, flags); 278 if (!flushed) 279 pr_notice("nothing to flush?\n"); 280 } 281 EXPORT_SYMBOL_GPL(amd_flush_garts); 282 283 static void __fix_erratum_688(void *info) 284 { 285 #define MSR_AMD64_IC_CFG 0xC0011021 286 287 msr_set_bit(MSR_AMD64_IC_CFG, 3); 288 msr_set_bit(MSR_AMD64_IC_CFG, 14); 289 } 290 291 /* Apply erratum 688 fix so machines without a BIOS fix work. */ 292 static __init void fix_erratum_688(void) 293 { 294 struct pci_dev *F4; 295 u32 val; 296 297 if (boot_cpu_data.x86 != 0x14) 298 return; 299 300 if (!amd_northbridges.num) 301 return; 302 303 F4 = node_to_amd_nb(0)->link; 304 if (!F4) 305 return; 306 307 if (pci_read_config_dword(F4, 0x164, &val)) 308 return; 309 310 if (val & BIT(2)) 311 return; 312 313 on_each_cpu(__fix_erratum_688, NULL, 0); 314 315 pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n"); 316 } 317 318 static __init int init_amd_nbs(void) 319 { 320 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 321 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 322 return 0; 323 324 amd_cache_northbridges(); 325 amd_cache_gart(); 326 327 fix_erratum_688(); 328 329 return 0; 330 } 331 332 /* This has to go after the PCI subsystem */ 333 fs_initcall(init_amd_nbs); 334