1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/cpu.h> 3 4 #include <xen/xen.h> 5 6 #include <asm/intel-family.h> 7 #include <asm/apic.h> 8 #include <asm/processor.h> 9 #include <asm/smp.h> 10 11 #include "cpu.h" 12 13 struct x86_topology_system x86_topo_system __ro_after_init; 14 EXPORT_SYMBOL_GPL(x86_topo_system); 15 16 unsigned int __amd_nodes_per_pkg __ro_after_init; 17 EXPORT_SYMBOL_GPL(__amd_nodes_per_pkg); 18 19 /* CPUs which are the primary SMT threads */ 20 struct cpumask __cpu_primary_thread_mask __read_mostly; 21 22 void topology_set_dom(struct topo_scan *tscan, enum x86_topology_domains dom, 23 unsigned int shift, unsigned int ncpus) 24 { 25 topology_update_dom(tscan, dom, shift, ncpus); 26 27 /* Propagate to the upper levels */ 28 for (dom++; dom < TOPO_MAX_DOMAIN; dom++) { 29 tscan->dom_shifts[dom] = tscan->dom_shifts[dom - 1]; 30 tscan->dom_ncpus[dom] = tscan->dom_ncpus[dom - 1]; 31 } 32 } 33 34 enum x86_topology_cpu_type get_topology_cpu_type(struct cpuinfo_x86 *c) 35 { 36 if (c->x86_vendor == X86_VENDOR_INTEL) { 37 switch (c->topo.intel_type) { 38 case INTEL_CPU_TYPE_ATOM: return TOPO_CPU_TYPE_EFFICIENCY; 39 case INTEL_CPU_TYPE_CORE: return TOPO_CPU_TYPE_PERFORMANCE; 40 } 41 } 42 if (c->x86_vendor == X86_VENDOR_AMD) { 43 switch (c->topo.amd_type) { 44 case 0: return TOPO_CPU_TYPE_PERFORMANCE; 45 case 1: return TOPO_CPU_TYPE_EFFICIENCY; 46 } 47 } 48 49 return TOPO_CPU_TYPE_UNKNOWN; 50 } 51 52 const char *get_topology_cpu_type_name(struct cpuinfo_x86 *c) 53 { 54 switch (get_topology_cpu_type(c)) { 55 case TOPO_CPU_TYPE_PERFORMANCE: 56 return "performance"; 57 case TOPO_CPU_TYPE_EFFICIENCY: 58 return "efficiency"; 59 default: 60 return "unknown"; 61 } 62 } 63 64 static unsigned int __maybe_unused parse_num_cores_legacy(struct cpuinfo_x86 *c) 65 { 66 struct { 67 u32 cache_type : 5, 68 unused : 21, 69 ncores : 6; 70 } eax; 71 72 if (c->cpuid_level < 4) 73 return 1; 74 75 cpuid_subleaf_reg(4, 0, CPUID_EAX, &eax); 76 if (!eax.cache_type) 77 return 1; 78 79 return eax.ncores + 1; 80 } 81 82 static void parse_legacy(struct topo_scan *tscan) 83 { 84 unsigned int cores, core_shift, smt_shift = 0; 85 struct cpuinfo_x86 *c = tscan->c; 86 87 cores = parse_num_cores_legacy(c); 88 core_shift = get_count_order(cores); 89 90 if (cpu_has(c, X86_FEATURE_HT)) { 91 if (!WARN_ON_ONCE(tscan->ebx1_nproc_shift < core_shift)) 92 smt_shift = tscan->ebx1_nproc_shift - core_shift; 93 /* 94 * The parser expects leaf 0xb/0x1f format, which means 95 * the number of logical processors at core level is 96 * counting threads. 97 */ 98 core_shift += smt_shift; 99 cores <<= smt_shift; 100 } 101 102 topology_set_dom(tscan, TOPO_SMT_DOMAIN, smt_shift, 1U << smt_shift); 103 topology_set_dom(tscan, TOPO_CORE_DOMAIN, core_shift, cores); 104 } 105 106 static bool fake_topology(struct topo_scan *tscan) 107 { 108 /* 109 * Preset the CORE level shift for CPUID less systems and XEN_PV, 110 * which has useless CPUID information. 111 */ 112 topology_set_dom(tscan, TOPO_SMT_DOMAIN, 0, 1); 113 topology_set_dom(tscan, TOPO_CORE_DOMAIN, 0, 1); 114 115 return tscan->c->cpuid_level < 1; 116 } 117 118 static void parse_topology(struct topo_scan *tscan, bool early) 119 { 120 const struct cpuinfo_topology topo_defaults = { 121 .cu_id = 0xff, 122 .llc_id = BAD_APICID, 123 .l2c_id = BAD_APICID, 124 .cpu_type = TOPO_CPU_TYPE_UNKNOWN, 125 }; 126 struct cpuinfo_x86 *c = tscan->c; 127 struct { 128 u32 unused0 : 16, 129 nproc : 8, 130 apicid : 8; 131 } ebx; 132 133 c->topo = topo_defaults; 134 135 if (fake_topology(tscan)) 136 return; 137 138 /* Preset Initial APIC ID from CPUID leaf 1 */ 139 cpuid_leaf_reg(1, CPUID_EBX, &ebx); 140 c->topo.initial_apicid = ebx.apicid; 141 142 /* 143 * The initial invocation from early_identify_cpu() happens before 144 * the APIC is mapped or X2APIC enabled. For establishing the 145 * topology, that's not required. Use the initial APIC ID. 146 */ 147 if (early) 148 c->topo.apicid = c->topo.initial_apicid; 149 else 150 c->topo.apicid = read_apic_id(); 151 152 /* The above is sufficient for UP */ 153 if (!IS_ENABLED(CONFIG_SMP)) 154 return; 155 156 tscan->ebx1_nproc_shift = get_count_order(ebx.nproc); 157 158 switch (c->x86_vendor) { 159 case X86_VENDOR_AMD: 160 case X86_VENDOR_HYGON: 161 cpu_parse_topology_amd(tscan); 162 break; 163 case X86_VENDOR_CENTAUR: 164 case X86_VENDOR_ZHAOXIN: 165 parse_legacy(tscan); 166 break; 167 case X86_VENDOR_INTEL: 168 if (!IS_ENABLED(CONFIG_CPU_SUP_INTEL) || !cpu_parse_topology_ext(tscan)) 169 parse_legacy(tscan); 170 if (c->cpuid_level >= 0x1a) 171 c->topo.cpu_type = cpuid_eax(0x1a); 172 break; 173 } 174 } 175 176 static void topo_set_ids(struct topo_scan *tscan, bool early) 177 { 178 struct cpuinfo_x86 *c = tscan->c; 179 u32 apicid = c->topo.apicid; 180 181 c->topo.pkg_id = topo_shift_apicid(apicid, TOPO_PKG_DOMAIN); 182 c->topo.die_id = topo_shift_apicid(apicid, TOPO_DIE_DOMAIN); 183 184 if (!early) { 185 c->topo.logical_pkg_id = topology_get_logical_id(apicid, TOPO_PKG_DOMAIN); 186 c->topo.logical_die_id = topology_get_logical_id(apicid, TOPO_DIE_DOMAIN); 187 c->topo.logical_core_id = topology_get_logical_id(apicid, TOPO_CORE_DOMAIN); 188 } 189 190 /* Package relative core ID */ 191 c->topo.core_id = (apicid & topo_domain_mask(TOPO_PKG_DOMAIN)) >> 192 x86_topo_system.dom_shifts[TOPO_SMT_DOMAIN]; 193 194 c->topo.amd_node_id = tscan->amd_node_id; 195 196 if (c->x86_vendor == X86_VENDOR_AMD) 197 cpu_topology_fixup_amd(tscan); 198 } 199 200 void cpu_parse_topology(struct cpuinfo_x86 *c) 201 { 202 unsigned int dom, cpu = smp_processor_id(); 203 struct topo_scan tscan = { .c = c, }; 204 205 parse_topology(&tscan, false); 206 207 if (IS_ENABLED(CONFIG_X86_LOCAL_APIC)) { 208 if (c->topo.initial_apicid != c->topo.apicid) { 209 pr_err(FW_BUG "CPU%4u: APIC ID mismatch. CPUID: 0x%04x APIC: 0x%04x\n", 210 cpu, c->topo.initial_apicid, c->topo.apicid); 211 } 212 213 if (c->topo.apicid != cpuid_to_apicid[cpu]) { 214 pr_err(FW_BUG "CPU%4u: APIC ID mismatch. Firmware: 0x%04x APIC: 0x%04x\n", 215 cpu, cpuid_to_apicid[cpu], c->topo.apicid); 216 } 217 } 218 219 for (dom = TOPO_SMT_DOMAIN; dom < TOPO_MAX_DOMAIN; dom++) { 220 if (tscan.dom_shifts[dom] == x86_topo_system.dom_shifts[dom]) 221 continue; 222 pr_err(FW_BUG "CPU%d: Topology domain %u shift %u != %u\n", cpu, dom, 223 tscan.dom_shifts[dom], x86_topo_system.dom_shifts[dom]); 224 } 225 226 topo_set_ids(&tscan, false); 227 } 228 229 void __init cpu_init_topology(struct cpuinfo_x86 *c) 230 { 231 struct topo_scan tscan = { .c = c, }; 232 unsigned int dom, sft; 233 234 parse_topology(&tscan, true); 235 236 /* Copy the shift values and calculate the unit sizes. */ 237 memcpy(x86_topo_system.dom_shifts, tscan.dom_shifts, sizeof(x86_topo_system.dom_shifts)); 238 239 dom = TOPO_SMT_DOMAIN; 240 x86_topo_system.dom_size[dom] = 1U << x86_topo_system.dom_shifts[dom]; 241 242 for (dom++; dom < TOPO_MAX_DOMAIN; dom++) { 243 sft = x86_topo_system.dom_shifts[dom] - x86_topo_system.dom_shifts[dom - 1]; 244 x86_topo_system.dom_size[dom] = 1U << sft; 245 } 246 247 topo_set_ids(&tscan, true); 248 249 /* 250 * AMD systems have Nodes per package which cannot be mapped to 251 * APIC ID. 252 */ 253 __amd_nodes_per_pkg = tscan.amd_nodes_per_pkg; 254 } 255