xref: /linux/arch/x86/kernel/cpu/hygon.c (revision 18f90d372cf35b387663f1567de701e5393f6eb5)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Hygon Processor Support for Linux
4  *
5  * Copyright (C) 2018 Chengdu Haiguang IC Design Co., Ltd.
6  *
7  * Author: Pu Wen <puwen@hygon.cn>
8  */
9 #include <linux/io.h>
10 
11 #include <asm/cpu.h>
12 #include <asm/smp.h>
13 #include <asm/cacheinfo.h>
14 #include <asm/spec-ctrl.h>
15 #include <asm/delay.h>
16 #ifdef CONFIG_X86_64
17 # include <asm/set_memory.h>
18 #endif
19 
20 #include "cpu.h"
21 
22 /*
23  * nodes_per_socket: Stores the number of nodes per socket.
24  * Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8]
25  */
26 static u32 nodes_per_socket = 1;
27 
28 #ifdef CONFIG_NUMA
29 /*
30  * To workaround broken NUMA config.  Read the comment in
31  * srat_detect_node().
32  */
33 static int nearby_node(int apicid)
34 {
35 	int i, node;
36 
37 	for (i = apicid - 1; i >= 0; i--) {
38 		node = __apicid_to_node[i];
39 		if (node != NUMA_NO_NODE && node_online(node))
40 			return node;
41 	}
42 	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
43 		node = __apicid_to_node[i];
44 		if (node != NUMA_NO_NODE && node_online(node))
45 			return node;
46 	}
47 	return first_node(node_online_map); /* Shouldn't happen */
48 }
49 #endif
50 
51 static void hygon_get_topology_early(struct cpuinfo_x86 *c)
52 {
53 	if (cpu_has(c, X86_FEATURE_TOPOEXT))
54 		smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
55 }
56 
57 /*
58  * Fixup core topology information for
59  * (1) Hygon multi-node processors
60  *     Assumption: Number of cores in each internal node is the same.
61  * (2) Hygon processors supporting compute units
62  */
63 static void hygon_get_topology(struct cpuinfo_x86 *c)
64 {
65 	u8 node_id;
66 	int cpu = smp_processor_id();
67 
68 	/* get information required for multi-node processors */
69 	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
70 		int err;
71 		u32 eax, ebx, ecx, edx;
72 
73 		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
74 
75 		node_id  = ecx & 0xff;
76 
77 		c->cpu_core_id = ebx & 0xff;
78 
79 		if (smp_num_siblings > 1)
80 			c->x86_max_cores /= smp_num_siblings;
81 
82 		/*
83 		 * In case leaf B is available, use it to derive
84 		 * topology information.
85 		 */
86 		err = detect_extended_topology(c);
87 		if (!err)
88 			c->x86_coreid_bits = get_count_order(c->x86_max_cores);
89 
90 		cacheinfo_hygon_init_llc_id(c, cpu, node_id);
91 	} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
92 		u64 value;
93 
94 		rdmsrl(MSR_FAM10H_NODE_ID, value);
95 		node_id = value & 7;
96 
97 		per_cpu(cpu_llc_id, cpu) = node_id;
98 	} else
99 		return;
100 
101 	if (nodes_per_socket > 1)
102 		set_cpu_cap(c, X86_FEATURE_AMD_DCM);
103 }
104 
105 /*
106  * On Hygon setup the lower bits of the APIC id distinguish the cores.
107  * Assumes number of cores is a power of two.
108  */
109 static void hygon_detect_cmp(struct cpuinfo_x86 *c)
110 {
111 	unsigned int bits;
112 	int cpu = smp_processor_id();
113 
114 	bits = c->x86_coreid_bits;
115 	/* Low order bits define the core id (index of core in socket) */
116 	c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
117 	/* Convert the initial APIC ID into the socket ID */
118 	c->phys_proc_id = c->initial_apicid >> bits;
119 	/* use socket ID also for last level cache */
120 	per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
121 }
122 
123 static void srat_detect_node(struct cpuinfo_x86 *c)
124 {
125 #ifdef CONFIG_NUMA
126 	int cpu = smp_processor_id();
127 	int node;
128 	unsigned int apicid = c->apicid;
129 
130 	node = numa_cpu_node(cpu);
131 	if (node == NUMA_NO_NODE)
132 		node = per_cpu(cpu_llc_id, cpu);
133 
134 	/*
135 	 * On multi-fabric platform (e.g. Numascale NumaChip) a
136 	 * platform-specific handler needs to be called to fixup some
137 	 * IDs of the CPU.
138 	 */
139 	if (x86_cpuinit.fixup_cpu_id)
140 		x86_cpuinit.fixup_cpu_id(c, node);
141 
142 	if (!node_online(node)) {
143 		/*
144 		 * Two possibilities here:
145 		 *
146 		 * - The CPU is missing memory and no node was created.  In
147 		 *   that case try picking one from a nearby CPU.
148 		 *
149 		 * - The APIC IDs differ from the HyperTransport node IDs.
150 		 *   Assume they are all increased by a constant offset, but
151 		 *   in the same order as the HT nodeids.  If that doesn't
152 		 *   result in a usable node fall back to the path for the
153 		 *   previous case.
154 		 *
155 		 * This workaround operates directly on the mapping between
156 		 * APIC ID and NUMA node, assuming certain relationship
157 		 * between APIC ID, HT node ID and NUMA topology.  As going
158 		 * through CPU mapping may alter the outcome, directly
159 		 * access __apicid_to_node[].
160 		 */
161 		int ht_nodeid = c->initial_apicid;
162 
163 		if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
164 			node = __apicid_to_node[ht_nodeid];
165 		/* Pick a nearby node */
166 		if (!node_online(node))
167 			node = nearby_node(apicid);
168 	}
169 	numa_set_node(cpu, node);
170 #endif
171 }
172 
173 static void early_init_hygon_mc(struct cpuinfo_x86 *c)
174 {
175 #ifdef CONFIG_SMP
176 	unsigned int bits, ecx;
177 
178 	/* Multi core CPU? */
179 	if (c->extended_cpuid_level < 0x80000008)
180 		return;
181 
182 	ecx = cpuid_ecx(0x80000008);
183 
184 	c->x86_max_cores = (ecx & 0xff) + 1;
185 
186 	/* CPU telling us the core id bits shift? */
187 	bits = (ecx >> 12) & 0xF;
188 
189 	/* Otherwise recompute */
190 	if (bits == 0) {
191 		while ((1 << bits) < c->x86_max_cores)
192 			bits++;
193 	}
194 
195 	c->x86_coreid_bits = bits;
196 #endif
197 }
198 
199 static void bsp_init_hygon(struct cpuinfo_x86 *c)
200 {
201 #ifdef CONFIG_X86_64
202 	unsigned long long tseg;
203 
204 	/*
205 	 * Split up direct mapping around the TSEG SMM area.
206 	 * Don't do it for gbpages because there seems very little
207 	 * benefit in doing so.
208 	 */
209 	if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
210 		unsigned long pfn = tseg >> PAGE_SHIFT;
211 
212 		pr_debug("tseg: %010llx\n", tseg);
213 		if (pfn_range_is_mapped(pfn, pfn + 1))
214 			set_memory_4k((unsigned long)__va(tseg), 1);
215 	}
216 #endif
217 
218 	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
219 		u64 val;
220 
221 		rdmsrl(MSR_K7_HWCR, val);
222 		if (!(val & BIT(24)))
223 			pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
224 	}
225 
226 	if (cpu_has(c, X86_FEATURE_MWAITX))
227 		use_mwaitx_delay();
228 
229 	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
230 		u32 ecx;
231 
232 		ecx = cpuid_ecx(0x8000001e);
233 		nodes_per_socket = ((ecx >> 8) & 7) + 1;
234 	} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
235 		u64 value;
236 
237 		rdmsrl(MSR_FAM10H_NODE_ID, value);
238 		nodes_per_socket = ((value >> 3) & 7) + 1;
239 	}
240 
241 	if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
242 	    !boot_cpu_has(X86_FEATURE_VIRT_SSBD)) {
243 		/*
244 		 * Try to cache the base value so further operations can
245 		 * avoid RMW. If that faults, do not enable SSBD.
246 		 */
247 		if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
248 			setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
249 			setup_force_cpu_cap(X86_FEATURE_SSBD);
250 			x86_amd_ls_cfg_ssbd_mask = 1ULL << 10;
251 		}
252 	}
253 }
254 
255 static void early_init_hygon(struct cpuinfo_x86 *c)
256 {
257 	u32 dummy;
258 
259 	early_init_hygon_mc(c);
260 
261 	set_cpu_cap(c, X86_FEATURE_K8);
262 
263 	rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
264 
265 	/*
266 	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
267 	 * with P/T states and does not stop in deep C-states
268 	 */
269 	if (c->x86_power & (1 << 8)) {
270 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
271 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
272 	}
273 
274 	/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
275 	if (c->x86_power & BIT(12))
276 		set_cpu_cap(c, X86_FEATURE_ACC_POWER);
277 
278 #ifdef CONFIG_X86_64
279 	set_cpu_cap(c, X86_FEATURE_SYSCALL32);
280 #endif
281 
282 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
283 	/*
284 	 * ApicID can always be treated as an 8-bit value for Hygon APIC So, we
285 	 * can safely set X86_FEATURE_EXTD_APICID unconditionally.
286 	 */
287 	if (boot_cpu_has(X86_FEATURE_APIC))
288 		set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
289 #endif
290 
291 	/*
292 	 * This is only needed to tell the kernel whether to use VMCALL
293 	 * and VMMCALL.  VMMCALL is never executed except under virt, so
294 	 * we can set it unconditionally.
295 	 */
296 	set_cpu_cap(c, X86_FEATURE_VMMCALL);
297 
298 	hygon_get_topology_early(c);
299 }
300 
301 static void init_hygon(struct cpuinfo_x86 *c)
302 {
303 	early_init_hygon(c);
304 
305 	/*
306 	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
307 	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
308 	 */
309 	clear_cpu_cap(c, 0*32+31);
310 
311 	set_cpu_cap(c, X86_FEATURE_REP_GOOD);
312 
313 	/* get apicid instead of initial apic id from cpuid */
314 	c->apicid = hard_smp_processor_id();
315 
316 	set_cpu_cap(c, X86_FEATURE_ZEN);
317 	set_cpu_cap(c, X86_FEATURE_CPB);
318 
319 	cpu_detect_cache_sizes(c);
320 
321 	hygon_detect_cmp(c);
322 	hygon_get_topology(c);
323 	srat_detect_node(c);
324 
325 	init_hygon_cacheinfo(c);
326 
327 	if (cpu_has(c, X86_FEATURE_XMM2)) {
328 		unsigned long long val;
329 		int ret;
330 
331 		/*
332 		 * A serializing LFENCE has less overhead than MFENCE, so
333 		 * use it for execution serialization.  On families which
334 		 * don't have that MSR, LFENCE is already serializing.
335 		 * msr_set_bit() uses the safe accessors, too, even if the MSR
336 		 * is not present.
337 		 */
338 		msr_set_bit(MSR_F10H_DECFG,
339 			    MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
340 
341 		/*
342 		 * Verify that the MSR write was successful (could be running
343 		 * under a hypervisor) and only then assume that LFENCE is
344 		 * serializing.
345 		 */
346 		ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
347 		if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
348 			/* A serializing LFENCE stops RDTSC speculation */
349 			set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
350 		} else {
351 			/* MFENCE stops RDTSC speculation */
352 			set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
353 		}
354 	}
355 
356 	/*
357 	 * Hygon processors have APIC timer running in deep C states.
358 	 */
359 	set_cpu_cap(c, X86_FEATURE_ARAT);
360 
361 	/* Hygon CPUs don't reset SS attributes on SYSRET, Xen does. */
362 	if (!cpu_has(c, X86_FEATURE_XENPV))
363 		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
364 }
365 
366 static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c)
367 {
368 	u32 ebx, eax, ecx, edx;
369 	u16 mask = 0xfff;
370 
371 	if (c->extended_cpuid_level < 0x80000006)
372 		return;
373 
374 	cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
375 
376 	tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
377 	tlb_lli_4k[ENTRIES] = ebx & mask;
378 
379 	/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
380 	if (!((eax >> 16) & mask))
381 		tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
382 	else
383 		tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
384 
385 	/* a 4M entry uses two 2M entries */
386 	tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
387 
388 	/* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
389 	if (!(eax & mask)) {
390 		cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
391 		tlb_lli_2m[ENTRIES] = eax & 0xff;
392 	} else
393 		tlb_lli_2m[ENTRIES] = eax & mask;
394 
395 	tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
396 }
397 
398 static const struct cpu_dev hygon_cpu_dev = {
399 	.c_vendor	= "Hygon",
400 	.c_ident	= { "HygonGenuine" },
401 	.c_early_init   = early_init_hygon,
402 	.c_detect_tlb	= cpu_detect_tlb_hygon,
403 	.c_bsp_init	= bsp_init_hygon,
404 	.c_init		= init_hygon,
405 	.c_x86_vendor	= X86_VENDOR_HYGON,
406 };
407 
408 cpu_dev_register(hygon_cpu_dev);
409