1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Shared support code for AMD K8 northbridges and derivatives. 4 * Copyright 2006 Andi Kleen, SUSE Labs. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/types.h> 10 #include <linux/slab.h> 11 #include <linux/init.h> 12 #include <linux/errno.h> 13 #include <linux/export.h> 14 #include <linux/spinlock.h> 15 #include <linux/pci_ids.h> 16 17 #include <asm/amd/nb.h> 18 #include <asm/cpuid/api.h> 19 20 static u32 *flush_words; 21 22 static const struct pci_device_id amd_nb_misc_ids[] = { 23 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 24 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 25 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, 26 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, 27 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, 28 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) }, 29 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 30 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 31 {} 32 }; 33 34 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { 35 { 0x00, 0x18, 0x20 }, 36 { 0xff, 0x00, 0x20 }, 37 { 0xfe, 0x00, 0x20 }, 38 { } 39 }; 40 41 static struct amd_northbridge_info amd_northbridges; 42 43 u16 amd_nb_num(void) 44 { 45 return amd_northbridges.num; 46 } 47 EXPORT_SYMBOL_GPL(amd_nb_num); 48 49 bool amd_nb_has_feature(unsigned int feature) 50 { 51 return ((amd_northbridges.flags & feature) == feature); 52 } 53 EXPORT_SYMBOL_GPL(amd_nb_has_feature); 54 55 struct amd_northbridge *node_to_amd_nb(int node) 56 { 57 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; 58 } 59 EXPORT_SYMBOL_GPL(node_to_amd_nb); 60 61 static int amd_cache_northbridges(void) 62 { 63 struct amd_northbridge *nb; 64 u16 i; 65 66 if (amd_northbridges.num) 67 return 0; 68 69 amd_northbridges.num = amd_num_nodes(); 70 71 nb = kzalloc_objs(struct amd_northbridge, amd_northbridges.num, 72 GFP_KERNEL); 73 if (!nb) 74 return -ENOMEM; 75 76 amd_northbridges.nb = nb; 77 78 for (i = 0; i < amd_northbridges.num; i++) { 79 node_to_amd_nb(i)->misc = amd_node_get_func(i, 3); 80 81 /* 82 * Each Northbridge must have a 'misc' device. 83 * If not, then uninitialize everything. 84 */ 85 if (!node_to_amd_nb(i)->misc) { 86 amd_northbridges.num = 0; 87 kfree(nb); 88 return -ENODEV; 89 } 90 91 node_to_amd_nb(i)->link = amd_node_get_func(i, 4); 92 } 93 94 if (amd_gart_present()) 95 amd_northbridges.flags |= AMD_NB_GART; 96 97 if (!cpuid_amd_hygon_has_l3_cache()) 98 return 0; 99 100 /* 101 * Some CPU families support L3 Cache Index Disable. There are some 102 * limitations because of E382 and E388 on family 0x10. 103 */ 104 if (boot_cpu_data.x86 == 0x10 && 105 boot_cpu_data.x86_model >= 0x8 && 106 (boot_cpu_data.x86_model > 0x9 || 107 boot_cpu_data.x86_stepping >= 0x1)) 108 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 109 110 if (boot_cpu_data.x86 == 0x15) 111 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE; 112 113 /* L3 cache partitioning is supported on family 0x15 */ 114 if (boot_cpu_data.x86 == 0x15) 115 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING; 116 117 return 0; 118 } 119 120 /* 121 * Ignores subdevice/subvendor but as far as I can figure out 122 * they're useless anyways 123 */ 124 bool __init early_is_amd_nb(u32 device) 125 { 126 const struct pci_device_id *id; 127 u32 vendor = device & 0xffff; 128 129 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 130 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 131 return false; 132 133 if (cpu_feature_enabled(X86_FEATURE_ZEN)) 134 return false; 135 136 device >>= 16; 137 for (id = amd_nb_misc_ids; id->vendor; id++) 138 if (vendor == id->vendor && device == id->device) 139 return true; 140 return false; 141 } 142 143 struct resource *amd_get_mmconfig_range(struct resource *res) 144 { 145 u64 base, msr; 146 unsigned int segn_busn_bits; 147 148 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 149 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 150 return NULL; 151 152 /* Assume CPUs from Fam10h have mmconfig, although not all VMs do */ 153 if (boot_cpu_data.x86 < 0x10 || 154 rdmsrq_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr)) 155 return NULL; 156 157 /* mmconfig is not enabled */ 158 if (!(msr & FAM10H_MMIO_CONF_ENABLE)) 159 return NULL; 160 161 base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT); 162 163 segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & 164 FAM10H_MMIO_CONF_BUSRANGE_MASK; 165 166 res->flags = IORESOURCE_MEM; 167 res->start = base; 168 res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1; 169 return res; 170 } 171 172 int amd_get_subcaches(int cpu) 173 { 174 struct pci_dev *link = node_to_amd_nb(topology_amd_node_id(cpu))->link; 175 unsigned int mask; 176 177 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) 178 return 0; 179 180 pci_read_config_dword(link, 0x1d4, &mask); 181 182 return (mask >> (4 * cpu_data(cpu).topo.core_id)) & 0xf; 183 } 184 185 int amd_set_subcaches(int cpu, unsigned long mask) 186 { 187 static unsigned int reset, ban; 188 struct amd_northbridge *nb = node_to_amd_nb(topology_amd_node_id(cpu)); 189 unsigned int reg; 190 int cuid; 191 192 if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) 193 return -EINVAL; 194 195 /* if necessary, collect reset state of L3 partitioning and BAN mode */ 196 if (reset == 0) { 197 pci_read_config_dword(nb->link, 0x1d4, &reset); 198 pci_read_config_dword(nb->misc, 0x1b8, &ban); 199 ban &= 0x180000; 200 } 201 202 /* deactivate BAN mode if any subcaches are to be disabled */ 203 if (mask != 0xf) { 204 pci_read_config_dword(nb->misc, 0x1b8, ®); 205 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000); 206 } 207 208 cuid = cpu_data(cpu).topo.core_id; 209 mask <<= 4 * cuid; 210 mask |= (0xf ^ (1 << cuid)) << 26; 211 212 pci_write_config_dword(nb->link, 0x1d4, mask); 213 214 /* reset BAN mode if L3 partitioning returned to reset state */ 215 pci_read_config_dword(nb->link, 0x1d4, ®); 216 if (reg == reset) { 217 pci_read_config_dword(nb->misc, 0x1b8, ®); 218 reg &= ~0x180000; 219 pci_write_config_dword(nb->misc, 0x1b8, reg | ban); 220 } 221 222 return 0; 223 } 224 225 static void amd_cache_gart(void) 226 { 227 u16 i; 228 229 if (!amd_nb_has_feature(AMD_NB_GART)) 230 return; 231 232 flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL); 233 if (!flush_words) { 234 amd_northbridges.flags &= ~AMD_NB_GART; 235 pr_notice("Cannot initialize GART flush words, GART support disabled\n"); 236 return; 237 } 238 239 for (i = 0; i != amd_northbridges.num; i++) 240 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]); 241 } 242 243 void amd_flush_garts(void) 244 { 245 int flushed, i; 246 unsigned long flags; 247 static DEFINE_SPINLOCK(gart_lock); 248 249 if (!amd_nb_has_feature(AMD_NB_GART)) 250 return; 251 252 /* 253 * Avoid races between AGP and IOMMU. In theory it's not needed 254 * but I'm not sure if the hardware won't lose flush requests 255 * when another is pending. This whole thing is so expensive anyways 256 * that it doesn't matter to serialize more. -AK 257 */ 258 spin_lock_irqsave(&gart_lock, flags); 259 flushed = 0; 260 for (i = 0; i < amd_northbridges.num; i++) { 261 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c, 262 flush_words[i] | 1); 263 flushed++; 264 } 265 for (i = 0; i < amd_northbridges.num; i++) { 266 u32 w; 267 /* Make sure the hardware actually executed the flush*/ 268 for (;;) { 269 pci_read_config_dword(node_to_amd_nb(i)->misc, 270 0x9c, &w); 271 if (!(w & 1)) 272 break; 273 cpu_relax(); 274 } 275 } 276 spin_unlock_irqrestore(&gart_lock, flags); 277 if (!flushed) 278 pr_notice("nothing to flush?\n"); 279 } 280 EXPORT_SYMBOL_GPL(amd_flush_garts); 281 282 static void __fix_erratum_688(void *info) 283 { 284 #define MSR_AMD64_IC_CFG 0xC0011021 285 286 msr_set_bit(MSR_AMD64_IC_CFG, 3); 287 msr_set_bit(MSR_AMD64_IC_CFG, 14); 288 } 289 290 /* Apply erratum 688 fix so machines without a BIOS fix work. */ 291 static __init void fix_erratum_688(void) 292 { 293 struct pci_dev *F4; 294 u32 val; 295 296 if (boot_cpu_data.x86 != 0x14) 297 return; 298 299 if (!amd_northbridges.num) 300 return; 301 302 F4 = node_to_amd_nb(0)->link; 303 if (!F4) 304 return; 305 306 if (pci_read_config_dword(F4, 0x164, &val)) 307 return; 308 309 if (val & BIT(2)) 310 return; 311 312 on_each_cpu(__fix_erratum_688, NULL, 0); 313 314 pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n"); 315 } 316 317 static __init int init_amd_nbs(void) 318 { 319 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 320 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 321 return 0; 322 323 amd_cache_northbridges(); 324 amd_cache_gart(); 325 326 fix_erratum_688(); 327 328 return 0; 329 } 330 331 /* This has to go after the PCI subsystem */ 332 fs_initcall(init_amd_nbs); 333