1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Shared support code for AMD K8 northbridges and derivatives. 4 * Copyright 2006 Andi Kleen, SUSE Labs. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/types.h> 10 #include <linux/slab.h> 11 #include <linux/init.h> 12 #include <linux/errno.h> 13 #include <linux/export.h> 14 #include <linux/spinlock.h> 15 #include <linux/pci_ids.h> 16 #include <asm/amd_nb.h> 17 18 static u32 *flush_words; 19 20 static const struct pci_device_id amd_nb_misc_ids[] = { 21 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 22 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 23 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, 24 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, 25 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, 26 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) }, 27 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 28 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 29 {} 30 }; 31 32 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { 33 { 0x00, 0x18, 0x20 }, 34 { 0xff, 0x00, 0x20 }, 35 { 0xfe, 0x00, 0x20 }, 36 { } 37 }; 38 39 static struct amd_northbridge_info amd_northbridges; 40 41 u16 amd_nb_num(void) 42 { 43 return amd_northbridges.num; 44 } 45 EXPORT_SYMBOL_GPL(amd_nb_num); 46 47 bool amd_nb_has_feature(unsigned int feature) 48 { 49 return ((amd_northbridges.flags & feature) == feature); 50 } 51 EXPORT_SYMBOL_GPL(amd_nb_has_feature); 52 53 struct amd_northbridge *node_to_amd_nb(int node) 54 { 55 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; 56 } 57 EXPORT_SYMBOL_GPL(node_to_amd_nb); 58 59 static int amd_cache_northbridges(void) 60 { 61 struct amd_northbridge *nb; 62 u16 i; 63 64 if (amd_northbridges.num) 65 return 0; 66 67 amd_northbridges.num = amd_num_nodes(); 68 69 nb = kcalloc(amd_northbridges.num, sizeof(struct amd_northbridge), GFP_KERNEL); 70 if (!nb) 71 return -ENOMEM; 72 73 amd_northbridges.nb = nb; 74 75 for (i = 0; i < amd_northbridges.num; i++) { 76 node_to_amd_nb(i)->misc = amd_node_get_func(i, 3); 77 78 /* 79 * Each Northbridge must have a 'misc' device. 80 * If not, then uninitialize everything. 81 */ 82 if (!node_to_amd_nb(i)->misc) { 83 amd_northbridges.num = 0; 84 kfree(nb); 85 return -ENODEV; 86 } 87 88 node_to_amd_nb(i)->link = amd_node_get_func(i, 4); 89 } 90 91 if (amd_gart_present()) 92 amd_northbridges.flags |= AMD_NB_GART; 93 94 /* 95 * Check for L3 cache presence. 96 */ 97 if (!cpuid_edx(0x80000006)) 98 return 0; 99 100 /* 101 * Some CPU families support L3 Cache Index Disable. There are some 102 * limitations because of E382 and E388 on family 0x10. 103 */ 104 if (boot_cpu_data.x86 == 0x10 && 105 boot_cpu_data.x86_model >= 0x8 && 106 (boot_cpu_data.x86_model > 0x9 || 107 boot_cpu_data.x86_stepping >= 0x1)) 108 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 109 110 if (boot_cpu_data.x86 == 0x15) 111 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 112 113 /* L3 cache partitioning is supported on family 0x15 */ 114 if (boot_cpu_data.x86 == 0x15) 115 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; 116 117 return 0; 118 } 119 120 /* 121 * Ignores subdevice/subvendor but as far as I can figure out 122 * they're useless anyways 123 */ 124 bool __init early_is_amd_nb(u32 device) 125 { 126 const struct pci_device_id *id; 127 u32 vendor = device & 0xffff; 128 129 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 130 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 131 return false; 132 133 if (cpu_feature_enabled(X86_FEATURE_ZEN)) 134 return false; 135 136 device >>= 16; 137 for (id = amd_nb_misc_ids; id->vendor; id++) 138 if (vendor == id->vendor && device == id->device) 139 return true; 140 return false; 141 } 142 143 struct resource *amd_get_mmconfig_range(struct resource *res) 144 { 145 u32 address; 146 u64 base, msr; 147 unsigned int segn_busn_bits; 148 149 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 150 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 151 return NULL; 152 153 /* assume all cpus from fam10h have mmconfig */ 154 if (boot_cpu_data.x86 < 0x10) 155 return NULL; 156 157 address = MSR_FAM10H_MMIO_CONF_BASE; 158 rdmsrl(address, msr); 159 160 /* mmconfig is not enabled */ 161 if (!(msr & FAM10H_MMIO_CONF_ENABLE)) 162 return NULL; 163 164 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT); 165 166 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & 167 FAM10H_MMIO_CONF_BUSRANGE_MASK; 168 169 res->flags = IORESOURCE_MEM; 170 res->start = base; 171 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1; 172 return res; 173 } 174 175 int amd_get_subcaches(int cpu) 176 { 177 struct pci_dev *link = node_to_amd_nb(topology_amd_node_id(cpu))->link; 178 unsigned int mask; 179 180 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) 181 return 0; 182 183 pci_read_config_dword(link, 0x1d4, &mask); 184 185 return (mask >> (4 * cpu_data(cpu).topo.core_id)) & 0xf; 186 } 187 188 int amd_set_subcaches(int cpu, unsigned long mask) 189 { 190 static unsigned int reset, ban; 191 struct amd_northbridge *nb = node_to_amd_nb(topology_amd_node_id(cpu)); 192 unsigned int reg; 193 int cuid; 194 195 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) 196 return -EINVAL; 197 198 /* if necessary, collect reset state of L3 partitioning and BAN mode */ 199 if (reset == 0) { 200 pci_read_config_dword(nb->link, 0x1d4, &reset); 201 pci_read_config_dword(nb->misc, 0x1b8, &ban); 202 ban &= 0x180000; 203 } 204 205 /* deactivate BAN mode if any subcaches are to be disabled */ 206 if (mask != 0xf) { 207 pci_read_config_dword(nb->misc, 0x1b8, ®); 208 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); 209 } 210 211 cuid = cpu_data(cpu).topo.core_id; 212 mask <<= 4 * cuid; 213 mask |= (0xf ^ (1 << cuid)) << 26; 214 215 pci_write_config_dword(nb->link, 0x1d4, mask); 216 217 /* reset BAN mode if L3 partitioning returned to reset state */ 218 pci_read_config_dword(nb->link, 0x1d4, ®); 219 if (reg == reset) { 220 pci_read_config_dword(nb->misc, 0x1b8, ®); 221 reg &= ~0x180000; 222 pci_write_config_dword(nb->misc, 0x1b8, reg | ban); 223 } 224 225 return 0; 226 } 227 228 static void amd_cache_gart(void) 229 { 230 u16 i; 231 232 if (!amd_nb_has_feature(AMD_NB_GART)) 233 return; 234 235 flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL); 236 if (!flush_words) { 237 amd_northbridges.flags &= ~AMD_NB_GART; 238 pr_notice("Cannot initialize GART flush words, GART support disabled\n"); 239 return; 240 } 241 242 for (i = 0; i != amd_northbridges.num; i++) 243 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]); 244 } 245 246 void amd_flush_garts(void) 247 { 248 int flushed, i; 249 unsigned long flags; 250 static DEFINE_SPINLOCK(gart_lock); 251 252 if (!amd_nb_has_feature(AMD_NB_GART)) 253 return; 254 255 /* 256 * Avoid races between AGP and IOMMU. In theory it's not needed 257 * but I'm not sure if the hardware won't lose flush requests 258 * when another is pending. This whole thing is so expensive anyways 259 * that it doesn't matter to serialize more. -AK 260 */ 261 spin_lock_irqsave(&gart_lock, flags); 262 flushed = 0; 263 for (i = 0; i < amd_northbridges.num; i++) { 264 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c, 265 flush_words[i] | 1); 266 flushed++; 267 } 268 for (i = 0; i < amd_northbridges.num; i++) { 269 u32 w; 270 /* Make sure the hardware actually executed the flush*/ 271 for (;;) { 272 pci_read_config_dword(node_to_amd_nb(i)->misc, 273 0x9c, &w); 274 if (!(w & 1)) 275 break; 276 cpu_relax(); 277 } 278 } 279 spin_unlock_irqrestore(&gart_lock, flags); 280 if (!flushed) 281 pr_notice("nothing to flush?\n"); 282 } 283 EXPORT_SYMBOL_GPL(amd_flush_garts); 284 285 static void __fix_erratum_688(void *info) 286 { 287 #define MSR_AMD64_IC_CFG 0xC0011021 288 289 msr_set_bit(MSR_AMD64_IC_CFG, 3); 290 msr_set_bit(MSR_AMD64_IC_CFG, 14); 291 } 292 293 /* Apply erratum 688 fix so machines without a BIOS fix work. */ 294 static __init void fix_erratum_688(void) 295 { 296 struct pci_dev *F4; 297 u32 val; 298 299 if (boot_cpu_data.x86 != 0x14) 300 return; 301 302 if (!amd_northbridges.num) 303 return; 304 305 F4 = node_to_amd_nb(0)->link; 306 if (!F4) 307 return; 308 309 if (pci_read_config_dword(F4, 0x164, &val)) 310 return; 311 312 if (val & BIT(2)) 313 return; 314 315 on_each_cpu(__fix_erratum_688, NULL, 0); 316 317 pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n"); 318 } 319 320 static __init int init_amd_nbs(void) 321 { 322 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 323 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 324 return 0; 325 326 amd_cache_northbridges(); 327 amd_cache_gart(); 328 329 fix_erratum_688(); 330 331 return 0; 332 } 333 334 /* This has to go after the PCI subsystem */ 335 fs_initcall(init_amd_nbs); 336