1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Hygon Processor Support for Linux 4 * 5 * Copyright (C) 2018 Chengdu Haiguang IC Design Co., Ltd. 6 * 7 * Author: Pu Wen <puwen@hygon.cn> 8 */ 9 #include <linux/io.h> 10 11 #include <asm/apic.h> 12 #include <asm/cpu.h> 13 #include <asm/smp.h> 14 #include <asm/numa.h> 15 #include <asm/cacheinfo.h> 16 #include <asm/spec-ctrl.h> 17 #include <asm/delay.h> 18 19 #include "cpu.h" 20 21 #define APICID_SOCKET_ID_BIT 6 22 23 /* 24 * nodes_per_socket: Stores the number of nodes per socket. 25 * Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8] 26 */ 27 static u32 nodes_per_socket = 1; 28 29 #ifdef CONFIG_NUMA 30 /* 31 * To workaround broken NUMA config. Read the comment in 32 * srat_detect_node(). 33 */ 34 static int nearby_node(int apicid) 35 { 36 int i, node; 37 38 for (i = apicid - 1; i >= 0; i--) { 39 node = __apicid_to_node[i]; 40 if (node != NUMA_NO_NODE && node_online(node)) 41 return node; 42 } 43 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { 44 node = __apicid_to_node[i]; 45 if (node != NUMA_NO_NODE && node_online(node)) 46 return node; 47 } 48 return first_node(node_online_map); /* Shouldn't happen */ 49 } 50 #endif 51 52 static void hygon_get_topology_early(struct cpuinfo_x86 *c) 53 { 54 if (cpu_has(c, X86_FEATURE_TOPOEXT)) 55 smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1; 56 } 57 58 /* 59 * Fixup core topology information for 60 * (1) Hygon multi-node processors 61 * Assumption: Number of cores in each internal node is the same. 62 * (2) Hygon processors supporting compute units 63 */ 64 static void hygon_get_topology(struct cpuinfo_x86 *c) 65 { 66 /* get information required for multi-node processors */ 67 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { 68 int err; 69 u32 eax, ebx, ecx, edx; 70 71 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); 72 73 c->topo.die_id = ecx & 0xff; 74 75 c->topo.core_id = ebx & 0xff; 76 77 if (smp_num_siblings > 1) 78 c->x86_max_cores /= smp_num_siblings; 79 80 /* 81 * In case leaf B is available, use it to derive 82 * topology information. 83 */ 84 err = detect_extended_topology(c); 85 if (!err) 86 c->x86_coreid_bits = get_count_order(c->x86_max_cores); 87 88 /* 89 * Socket ID is ApicId[6] for the processors with model <= 0x3 90 * when running on host. 91 */ 92 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR) && c->x86_model <= 0x3) 93 c->topo.pkg_id = c->topo.apicid >> APICID_SOCKET_ID_BIT; 94 95 cacheinfo_hygon_init_llc_id(c); 96 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { 97 u64 value; 98 99 rdmsrl(MSR_FAM10H_NODE_ID, value); 100 c->topo.die_id = value & 7; 101 c->topo.llc_id = c->topo.die_id; 102 } else 103 return; 104 105 if (nodes_per_socket > 1) 106 set_cpu_cap(c, X86_FEATURE_AMD_DCM); 107 } 108 109 /* 110 * On Hygon setup the lower bits of the APIC id distinguish the cores. 111 * Assumes number of cores is a power of two. 112 */ 113 static void hygon_detect_cmp(struct cpuinfo_x86 *c) 114 { 115 unsigned int bits; 116 117 bits = c->x86_coreid_bits; 118 /* Low order bits define the core id (index of core in socket) */ 119 c->topo.core_id = c->topo.initial_apicid & ((1 << bits)-1); 120 /* Convert the initial APIC ID into the socket ID */ 121 c->topo.pkg_id = c->topo.initial_apicid >> bits; 122 /* Use package ID also for last level cache */ 123 c->topo.llc_id = c->topo.die_id = c->topo.pkg_id; 124 } 125 126 static void srat_detect_node(struct cpuinfo_x86 *c) 127 { 128 #ifdef CONFIG_NUMA 129 int cpu = smp_processor_id(); 130 int node; 131 unsigned int apicid = c->topo.apicid; 132 133 node = numa_cpu_node(cpu); 134 if (node == NUMA_NO_NODE) 135 node = c->topo.llc_id; 136 137 /* 138 * On multi-fabric platform (e.g. Numascale NumaChip) a 139 * platform-specific handler needs to be called to fixup some 140 * IDs of the CPU. 141 */ 142 if (x86_cpuinit.fixup_cpu_id) 143 x86_cpuinit.fixup_cpu_id(c, node); 144 145 if (!node_online(node)) { 146 /* 147 * Two possibilities here: 148 * 149 * - The CPU is missing memory and no node was created. In 150 * that case try picking one from a nearby CPU. 151 * 152 * - The APIC IDs differ from the HyperTransport node IDs. 153 * Assume they are all increased by a constant offset, but 154 * in the same order as the HT nodeids. If that doesn't 155 * result in a usable node fall back to the path for the 156 * previous case. 157 * 158 * This workaround operates directly on the mapping between 159 * APIC ID and NUMA node, assuming certain relationship 160 * between APIC ID, HT node ID and NUMA topology. As going 161 * through CPU mapping may alter the outcome, directly 162 * access __apicid_to_node[]. 163 */ 164 int ht_nodeid = c->topo.initial_apicid; 165 166 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE) 167 node = __apicid_to_node[ht_nodeid]; 168 /* Pick a nearby node */ 169 if (!node_online(node)) 170 node = nearby_node(apicid); 171 } 172 numa_set_node(cpu, node); 173 #endif 174 } 175 176 static void early_init_hygon_mc(struct cpuinfo_x86 *c) 177 { 178 #ifdef CONFIG_SMP 179 unsigned int bits, ecx; 180 181 /* Multi core CPU? */ 182 if (c->extended_cpuid_level < 0x80000008) 183 return; 184 185 ecx = cpuid_ecx(0x80000008); 186 187 c->x86_max_cores = (ecx & 0xff) + 1; 188 189 /* CPU telling us the core id bits shift? */ 190 bits = (ecx >> 12) & 0xF; 191 192 /* Otherwise recompute */ 193 if (bits == 0) { 194 while ((1 << bits) < c->x86_max_cores) 195 bits++; 196 } 197 198 c->x86_coreid_bits = bits; 199 #endif 200 } 201 202 static void bsp_init_hygon(struct cpuinfo_x86 *c) 203 { 204 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { 205 u64 val; 206 207 rdmsrl(MSR_K7_HWCR, val); 208 if (!(val & BIT(24))) 209 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n"); 210 } 211 212 if (cpu_has(c, X86_FEATURE_MWAITX)) 213 use_mwaitx_delay(); 214 215 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { 216 u32 ecx; 217 218 ecx = cpuid_ecx(0x8000001e); 219 __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1; 220 } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) { 221 u64 value; 222 223 rdmsrl(MSR_FAM10H_NODE_ID, value); 224 __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1; 225 } 226 227 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) && 228 !boot_cpu_has(X86_FEATURE_VIRT_SSBD)) { 229 /* 230 * Try to cache the base value so further operations can 231 * avoid RMW. If that faults, do not enable SSBD. 232 */ 233 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { 234 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD); 235 setup_force_cpu_cap(X86_FEATURE_SSBD); 236 x86_amd_ls_cfg_ssbd_mask = 1ULL << 10; 237 } 238 } 239 } 240 241 static void early_init_hygon(struct cpuinfo_x86 *c) 242 { 243 u32 dummy; 244 245 early_init_hygon_mc(c); 246 247 set_cpu_cap(c, X86_FEATURE_K8); 248 249 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); 250 251 /* 252 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate 253 * with P/T states and does not stop in deep C-states 254 */ 255 if (c->x86_power & (1 << 8)) { 256 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 257 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 258 } 259 260 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */ 261 if (c->x86_power & BIT(12)) 262 set_cpu_cap(c, X86_FEATURE_ACC_POWER); 263 264 /* Bit 14 indicates the Runtime Average Power Limit interface. */ 265 if (c->x86_power & BIT(14)) 266 set_cpu_cap(c, X86_FEATURE_RAPL); 267 268 #ifdef CONFIG_X86_64 269 set_cpu_cap(c, X86_FEATURE_SYSCALL32); 270 #endif 271 272 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) 273 /* 274 * ApicID can always be treated as an 8-bit value for Hygon APIC So, we 275 * can safely set X86_FEATURE_EXTD_APICID unconditionally. 276 */ 277 if (boot_cpu_has(X86_FEATURE_APIC)) 278 set_cpu_cap(c, X86_FEATURE_EXTD_APICID); 279 #endif 280 281 /* 282 * This is only needed to tell the kernel whether to use VMCALL 283 * and VMMCALL. VMMCALL is never executed except under virt, so 284 * we can set it unconditionally. 285 */ 286 set_cpu_cap(c, X86_FEATURE_VMMCALL); 287 288 hygon_get_topology_early(c); 289 } 290 291 static void init_hygon(struct cpuinfo_x86 *c) 292 { 293 u64 vm_cr; 294 295 early_init_hygon(c); 296 297 /* 298 * Bit 31 in normal CPUID used for nonstandard 3DNow ID; 299 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway 300 */ 301 clear_cpu_cap(c, 0*32+31); 302 303 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 304 305 /* get apicid instead of initial apic id from cpuid */ 306 c->topo.apicid = read_apic_id(); 307 308 /* 309 * XXX someone from Hygon needs to confirm this DTRT 310 * 311 init_spectral_chicken(c); 312 */ 313 314 set_cpu_cap(c, X86_FEATURE_ZEN); 315 set_cpu_cap(c, X86_FEATURE_CPB); 316 317 cpu_detect_cache_sizes(c); 318 319 hygon_detect_cmp(c); 320 hygon_get_topology(c); 321 srat_detect_node(c); 322 323 init_hygon_cacheinfo(c); 324 325 if (cpu_has(c, X86_FEATURE_SVM)) { 326 rdmsrl(MSR_VM_CR, vm_cr); 327 if (vm_cr & SVM_VM_CR_SVM_DIS_MASK) { 328 pr_notice_once("SVM disabled (by BIOS) in MSR_VM_CR\n"); 329 clear_cpu_cap(c, X86_FEATURE_SVM); 330 } 331 } 332 333 if (cpu_has(c, X86_FEATURE_XMM2)) { 334 /* 335 * Use LFENCE for execution serialization. On families which 336 * don't have that MSR, LFENCE is already serializing. 337 * msr_set_bit() uses the safe accessors, too, even if the MSR 338 * is not present. 339 */ 340 msr_set_bit(MSR_AMD64_DE_CFG, 341 MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT); 342 343 /* A serializing LFENCE stops RDTSC speculation */ 344 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 345 } 346 347 /* 348 * Hygon processors have APIC timer running in deep C states. 349 */ 350 set_cpu_cap(c, X86_FEATURE_ARAT); 351 352 /* Hygon CPUs don't reset SS attributes on SYSRET, Xen does. */ 353 if (!cpu_feature_enabled(X86_FEATURE_XENPV)) 354 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); 355 356 check_null_seg_clears_base(c); 357 358 /* Hygon CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */ 359 clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE); 360 } 361 362 static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c) 363 { 364 u32 ebx, eax, ecx, edx; 365 u16 mask = 0xfff; 366 367 if (c->extended_cpuid_level < 0x80000006) 368 return; 369 370 cpuid(0x80000006, &eax, &ebx, &ecx, &edx); 371 372 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask; 373 tlb_lli_4k[ENTRIES] = ebx & mask; 374 375 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ 376 if (!((eax >> 16) & mask)) 377 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff; 378 else 379 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask; 380 381 /* a 4M entry uses two 2M entries */ 382 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1; 383 384 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ 385 if (!(eax & mask)) { 386 cpuid(0x80000005, &eax, &ebx, &ecx, &edx); 387 tlb_lli_2m[ENTRIES] = eax & 0xff; 388 } else 389 tlb_lli_2m[ENTRIES] = eax & mask; 390 391 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1; 392 } 393 394 static const struct cpu_dev hygon_cpu_dev = { 395 .c_vendor = "Hygon", 396 .c_ident = { "HygonGenuine" }, 397 .c_early_init = early_init_hygon, 398 .c_detect_tlb = cpu_detect_tlb_hygon, 399 .c_bsp_init = bsp_init_hygon, 400 .c_init = init_hygon, 401 .c_x86_vendor = X86_VENDOR_HYGON, 402 }; 403 404 cpu_dev_register(hygon_cpu_dev); 405