xref: /linux/arch/x86/kernel/cpu/intel.c (revision 955abe0a1b41de5ba61fe4cd614ebc123084d499)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/pgtable.h>
4 
5 #include <linux/string.h>
6 #include <linux/bitops.h>
7 #include <linux/smp.h>
8 #include <linux/sched.h>
9 #include <linux/sched/clock.h>
10 #include <linux/semaphore.h>
11 #include <linux/thread_info.h>
12 #include <linux/init.h>
13 #include <linux/uaccess.h>
14 #include <linux/workqueue.h>
15 #include <linux/delay.h>
16 #include <linux/cpuhotplug.h>
17 
18 #include <asm/cpufeature.h>
19 #include <asm/msr.h>
20 #include <asm/bugs.h>
21 #include <asm/cpu.h>
22 #include <asm/intel-family.h>
23 #include <asm/microcode.h>
24 #include <asm/hwcap2.h>
25 #include <asm/elf.h>
26 #include <asm/cpu_device_id.h>
27 #include <asm/cmdline.h>
28 #include <asm/traps.h>
29 #include <asm/resctrl.h>
30 #include <asm/numa.h>
31 #include <asm/thermal.h>
32 
33 #ifdef CONFIG_X86_64
34 #include <linux/topology.h>
35 #endif
36 
37 #include "cpu.h"
38 
39 #ifdef CONFIG_X86_LOCAL_APIC
40 #include <asm/mpspec.h>
41 #include <asm/apic.h>
42 #endif
43 
44 enum split_lock_detect_state {
45 	sld_off = 0,
46 	sld_warn,
47 	sld_fatal,
48 	sld_ratelimit,
49 };
50 
51 /*
52  * Default to sld_off because most systems do not support split lock detection.
53  * sld_state_setup() will switch this to sld_warn on systems that support
54  * split lock/bus lock detect, unless there is a command line override.
55  */
56 static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
57 static u64 msr_test_ctrl_cache __ro_after_init;
58 
59 /*
60  * With a name like MSR_TEST_CTL it should go without saying, but don't touch
61  * MSR_TEST_CTL unless the CPU is one of the whitelisted models.  Writing it
62  * on CPUs that do not support SLD can cause fireworks, even when writing '0'.
63  */
64 static bool cpu_model_supports_sld __ro_after_init;
65 
66 /*
67  * Processors which have self-snooping capability can handle conflicting
68  * memory type across CPUs by snooping its own cache. However, there exists
69  * CPU models in which having conflicting memory types still leads to
70  * unpredictable behavior, machine check errors, or hangs. Clear this
71  * feature to prevent its use on machines with known erratas.
72  */
73 static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)
74 {
75 	switch (c->x86_vfm) {
76 	case INTEL_CORE_YONAH:
77 	case INTEL_CORE2_MEROM:
78 	case INTEL_CORE2_MEROM_L:
79 	case INTEL_CORE2_PENRYN:
80 	case INTEL_CORE2_DUNNINGTON:
81 	case INTEL_NEHALEM:
82 	case INTEL_NEHALEM_G:
83 	case INTEL_NEHALEM_EP:
84 	case INTEL_NEHALEM_EX:
85 	case INTEL_WESTMERE:
86 	case INTEL_WESTMERE_EP:
87 	case INTEL_SANDYBRIDGE:
88 		setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP);
89 	}
90 }
91 
92 static bool ring3mwait_disabled __read_mostly;
93 
94 static int __init ring3mwait_disable(char *__unused)
95 {
96 	ring3mwait_disabled = true;
97 	return 1;
98 }
99 __setup("ring3mwait=disable", ring3mwait_disable);
100 
101 static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
102 {
103 	/*
104 	 * Ring 3 MONITOR/MWAIT feature cannot be detected without
105 	 * cpu model and family comparison.
106 	 */
107 	if (c->x86 != 6)
108 		return;
109 	switch (c->x86_vfm) {
110 	case INTEL_XEON_PHI_KNL:
111 	case INTEL_XEON_PHI_KNM:
112 		break;
113 	default:
114 		return;
115 	}
116 
117 	if (ring3mwait_disabled)
118 		return;
119 
120 	set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
121 	this_cpu_or(msr_misc_features_shadow,
122 		    1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT);
123 
124 	if (c == &boot_cpu_data)
125 		ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
126 }
127 
128 /*
129  * Early microcode releases for the Spectre v2 mitigation were broken.
130  * Information taken from;
131  * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
132  * - https://kb.vmware.com/s/article/52345
133  * - Microcode revisions observed in the wild
134  * - Release note from 20180108 microcode release
135  */
136 struct sku_microcode {
137 	u32 vfm;
138 	u8 stepping;
139 	u32 microcode;
140 };
141 static const struct sku_microcode spectre_bad_microcodes[] = {
142 	{ INTEL_KABYLAKE,	0x0B,	0x80 },
143 	{ INTEL_KABYLAKE,	0x0A,	0x80 },
144 	{ INTEL_KABYLAKE,	0x09,	0x80 },
145 	{ INTEL_KABYLAKE_L,	0x0A,	0x80 },
146 	{ INTEL_KABYLAKE_L,	0x09,	0x80 },
147 	{ INTEL_SKYLAKE_X,	0x03,	0x0100013e },
148 	{ INTEL_SKYLAKE_X,	0x04,	0x0200003c },
149 	{ INTEL_BROADWELL,	0x04,	0x28 },
150 	{ INTEL_BROADWELL_G,	0x01,	0x1b },
151 	{ INTEL_BROADWELL_D,	0x02,	0x14 },
152 	{ INTEL_BROADWELL_D,	0x03,	0x07000011 },
153 	{ INTEL_BROADWELL_X,	0x01,	0x0b000025 },
154 	{ INTEL_HASWELL_L,	0x01,	0x21 },
155 	{ INTEL_HASWELL_G,	0x01,	0x18 },
156 	{ INTEL_HASWELL,	0x03,	0x23 },
157 	{ INTEL_HASWELL_X,	0x02,	0x3b },
158 	{ INTEL_HASWELL_X,	0x04,	0x10 },
159 	{ INTEL_IVYBRIDGE_X,	0x04,	0x42a },
160 	/* Observed in the wild */
161 	{ INTEL_SANDYBRIDGE_X,	0x06,	0x61b },
162 	{ INTEL_SANDYBRIDGE_X,	0x07,	0x712 },
163 };
164 
165 static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
166 {
167 	int i;
168 
169 	/*
170 	 * We know that the hypervisor lie to us on the microcode version so
171 	 * we may as well hope that it is running the correct version.
172 	 */
173 	if (cpu_has(c, X86_FEATURE_HYPERVISOR))
174 		return false;
175 
176 	for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
177 		if (c->x86_vfm == spectre_bad_microcodes[i].vfm &&
178 		    c->x86_stepping == spectre_bad_microcodes[i].stepping)
179 			return (c->microcode <= spectre_bad_microcodes[i].microcode);
180 	}
181 	return false;
182 }
183 
184 #define MSR_IA32_TME_ACTIVATE		0x982
185 
186 /* Helpers to access TME_ACTIVATE MSR */
187 #define TME_ACTIVATE_LOCKED(x)		(x & 0x1)
188 #define TME_ACTIVATE_ENABLED(x)		(x & 0x2)
189 
190 #define TME_ACTIVATE_KEYID_BITS(x)	((x >> 32) & 0xf)	/* Bits 35:32 */
191 
192 static void detect_tme_early(struct cpuinfo_x86 *c)
193 {
194 	u64 tme_activate;
195 	int keyid_bits;
196 
197 	rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
198 
199 	if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
200 		pr_info_once("x86/tme: not enabled by BIOS\n");
201 		clear_cpu_cap(c, X86_FEATURE_TME);
202 		return;
203 	}
204 	pr_info_once("x86/tme: enabled by BIOS\n");
205 	keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
206 	if (!keyid_bits)
207 		return;
208 
209 	/*
210 	 * KeyID bits are set by BIOS and can be present regardless
211 	 * of whether the kernel is using them. They effectively lower
212 	 * the number of physical address bits.
213 	 *
214 	 * Update cpuinfo_x86::x86_phys_bits accordingly.
215 	 */
216 	c->x86_phys_bits -= keyid_bits;
217 	pr_info_once("x86/mktme: BIOS enabled: x86_phys_bits reduced by %d\n",
218 		     keyid_bits);
219 }
220 
221 void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c)
222 {
223 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
224 		return;
225 
226 	if (c->x86 < 6 || (c->x86 == 6 && c->x86_model < 0xd))
227 		return;
228 
229 	/*
230 	 * The BIOS can have limited CPUID to leaf 2, which breaks feature
231 	 * enumeration. Unlock it and update the maximum leaf info.
232 	 */
233 	if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0)
234 		c->cpuid_level = cpuid_eax(0);
235 }
236 
237 static void early_init_intel(struct cpuinfo_x86 *c)
238 {
239 	u64 misc_enable;
240 
241 	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
242 		(c->x86 == 0x6 && c->x86_model >= 0x0e))
243 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
244 
245 	if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
246 		c->microcode = intel_get_microcode_revision();
247 
248 	/* Now if any of them are set, check the blacklist and clear the lot */
249 	if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
250 	     cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
251 	     cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
252 	     cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
253 		pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
254 		setup_clear_cpu_cap(X86_FEATURE_IBRS);
255 		setup_clear_cpu_cap(X86_FEATURE_IBPB);
256 		setup_clear_cpu_cap(X86_FEATURE_STIBP);
257 		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
258 		setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
259 		setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
260 		setup_clear_cpu_cap(X86_FEATURE_SSBD);
261 		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
262 	}
263 
264 	/*
265 	 * Atom erratum AAE44/AAF40/AAG38/AAH41:
266 	 *
267 	 * A race condition between speculative fetches and invalidating
268 	 * a large page.  This is worked around in microcode, but we
269 	 * need the microcode to have already been loaded... so if it is
270 	 * not, recommend a BIOS update and disable large pages.
271 	 */
272 	if (c->x86_vfm == INTEL_ATOM_BONNELL && c->x86_stepping <= 2 &&
273 	    c->microcode < 0x20e) {
274 		pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
275 		clear_cpu_cap(c, X86_FEATURE_PSE);
276 	}
277 
278 #ifdef CONFIG_X86_64
279 	set_cpu_cap(c, X86_FEATURE_SYSENTER32);
280 #else
281 	/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
282 	if (c->x86 == 15 && c->x86_cache_alignment == 64)
283 		c->x86_cache_alignment = 128;
284 #endif
285 
286 	/* CPUID workaround for 0F33/0F34 CPU */
287 	if (c->x86 == 0xF && c->x86_model == 0x3
288 	    && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
289 		c->x86_phys_bits = 36;
290 
291 	/*
292 	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
293 	 * with P/T states and does not stop in deep C-states.
294 	 *
295 	 * It is also reliable across cores and sockets. (but not across
296 	 * cabinets - we turn it off in that case explicitly.)
297 	 */
298 	if (c->x86_power & (1 << 8)) {
299 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
300 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
301 	}
302 
303 	/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
304 	switch (c->x86_vfm) {
305 	case INTEL_ATOM_SALTWELL_MID:
306 	case INTEL_ATOM_SALTWELL_TABLET:
307 	case INTEL_ATOM_SILVERMONT_MID:
308 	case INTEL_ATOM_AIRMONT_NP:
309 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
310 		break;
311 	}
312 
313 	/*
314 	 * There is a known erratum on Pentium III and Core Solo
315 	 * and Core Duo CPUs.
316 	 * " Page with PAT set to WC while associated MTRR is UC
317 	 *   may consolidate to UC "
318 	 * Because of this erratum, it is better to stick with
319 	 * setting WC in MTRR rather than using PAT on these CPUs.
320 	 *
321 	 * Enable PAT WC only on P4, Core 2 or later CPUs.
322 	 */
323 	if (c->x86 == 6 && c->x86_model < 15)
324 		clear_cpu_cap(c, X86_FEATURE_PAT);
325 
326 	/*
327 	 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
328 	 * clear the fast string and enhanced fast string CPU capabilities.
329 	 */
330 	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
331 		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
332 		if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
333 			pr_info("Disabled fast string operations\n");
334 			setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
335 			setup_clear_cpu_cap(X86_FEATURE_ERMS);
336 		}
337 	}
338 
339 	/*
340 	 * Intel Quark Core DevMan_001.pdf section 6.4.11
341 	 * "The operating system also is required to invalidate (i.e., flush)
342 	 *  the TLB when any changes are made to any of the page table entries.
343 	 *  The operating system must reload CR3 to cause the TLB to be flushed"
344 	 *
345 	 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
346 	 * should be false so that __flush_tlb_all() causes CR3 instead of CR4.PGE
347 	 * to be modified.
348 	 */
349 	if (c->x86_vfm == INTEL_QUARK_X1000) {
350 		pr_info("Disabling PGE capability bit\n");
351 		setup_clear_cpu_cap(X86_FEATURE_PGE);
352 	}
353 
354 	check_memory_type_self_snoop_errata(c);
355 
356 	/*
357 	 * Adjust the number of physical bits early because it affects the
358 	 * valid bits of the MTRR mask registers.
359 	 */
360 	if (cpu_has(c, X86_FEATURE_TME))
361 		detect_tme_early(c);
362 }
363 
364 static void bsp_init_intel(struct cpuinfo_x86 *c)
365 {
366 	resctrl_cpu_detect(c);
367 }
368 
369 #ifdef CONFIG_X86_32
370 /*
371  *	Early probe support logic for ppro memory erratum #50
372  *
373  *	This is called before we do cpu ident work
374  */
375 
376 int ppro_with_ram_bug(void)
377 {
378 	/* Uses data from early_cpu_detect now */
379 	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
380 	    boot_cpu_data.x86 == 6 &&
381 	    boot_cpu_data.x86_model == 1 &&
382 	    boot_cpu_data.x86_stepping < 8) {
383 		pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
384 		return 1;
385 	}
386 	return 0;
387 }
388 
389 static void intel_smp_check(struct cpuinfo_x86 *c)
390 {
391 	/* calling is from identify_secondary_cpu() ? */
392 	if (!c->cpu_index)
393 		return;
394 
395 	/*
396 	 * Mask B, Pentium, but not Pentium MMX
397 	 */
398 	if (c->x86 == 5 &&
399 	    c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
400 	    c->x86_model <= 3) {
401 		/*
402 		 * Remember we have B step Pentia with bugs
403 		 */
404 		WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
405 				    "with B stepping processors.\n");
406 	}
407 }
408 
409 static int forcepae;
410 static int __init forcepae_setup(char *__unused)
411 {
412 	forcepae = 1;
413 	return 1;
414 }
415 __setup("forcepae", forcepae_setup);
416 
417 static void intel_workarounds(struct cpuinfo_x86 *c)
418 {
419 #ifdef CONFIG_X86_F00F_BUG
420 	/*
421 	 * All models of Pentium and Pentium with MMX technology CPUs
422 	 * have the F0 0F bug, which lets nonprivileged users lock up the
423 	 * system. Announce that the fault handler will be checking for it.
424 	 * The Quark is also family 5, but does not have the same bug.
425 	 */
426 	clear_cpu_bug(c, X86_BUG_F00F);
427 	if (c->x86 == 5 && c->x86_model < 9) {
428 		static int f00f_workaround_enabled;
429 
430 		set_cpu_bug(c, X86_BUG_F00F);
431 		if (!f00f_workaround_enabled) {
432 			pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
433 			f00f_workaround_enabled = 1;
434 		}
435 	}
436 #endif
437 
438 	/*
439 	 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
440 	 * model 3 mask 3
441 	 */
442 	if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
443 		clear_cpu_cap(c, X86_FEATURE_SEP);
444 
445 	/*
446 	 * PAE CPUID issue: many Pentium M report no PAE but may have a
447 	 * functionally usable PAE implementation.
448 	 * Forcefully enable PAE if kernel parameter "forcepae" is present.
449 	 */
450 	if (forcepae) {
451 		pr_warn("PAE forced!\n");
452 		set_cpu_cap(c, X86_FEATURE_PAE);
453 		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
454 	}
455 
456 	/*
457 	 * P4 Xeon erratum 037 workaround.
458 	 * Hardware prefetcher may cause stale data to be loaded into the cache.
459 	 */
460 	if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
461 		if (msr_set_bit(MSR_IA32_MISC_ENABLE,
462 				MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
463 			pr_info("CPU: C0 stepping P4 Xeon detected.\n");
464 			pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
465 		}
466 	}
467 
468 	/*
469 	 * See if we have a good local APIC by checking for buggy Pentia,
470 	 * i.e. all B steppings and the C2 stepping of P54C when using their
471 	 * integrated APIC (see 11AP erratum in "Pentium Processor
472 	 * Specification Update").
473 	 */
474 	if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
475 	    (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
476 		set_cpu_bug(c, X86_BUG_11AP);
477 
478 
479 #ifdef CONFIG_X86_INTEL_USERCOPY
480 	/*
481 	 * Set up the preferred alignment for movsl bulk memory moves
482 	 */
483 	switch (c->x86) {
484 	case 4:		/* 486: untested */
485 		break;
486 	case 5:		/* Old Pentia: untested */
487 		break;
488 	case 6:		/* PII/PIII only like movsl with 8-byte alignment */
489 		movsl_mask.mask = 7;
490 		break;
491 	case 15:	/* P4 is OK down to 8-byte alignment */
492 		movsl_mask.mask = 7;
493 		break;
494 	}
495 #endif
496 
497 	intel_smp_check(c);
498 }
499 #else
500 static void intel_workarounds(struct cpuinfo_x86 *c)
501 {
502 }
503 #endif
504 
505 static void srat_detect_node(struct cpuinfo_x86 *c)
506 {
507 #ifdef CONFIG_NUMA
508 	unsigned node;
509 	int cpu = smp_processor_id();
510 
511 	/* Don't do the funky fallback heuristics the AMD version employs
512 	   for now. */
513 	node = numa_cpu_node(cpu);
514 	if (node == NUMA_NO_NODE || !node_online(node)) {
515 		/* reuse the value from init_cpu_to_node() */
516 		node = cpu_to_node(cpu);
517 	}
518 	numa_set_node(cpu, node);
519 #endif
520 }
521 
522 static void init_cpuid_fault(struct cpuinfo_x86 *c)
523 {
524 	u64 msr;
525 
526 	if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) {
527 		if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
528 			set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
529 	}
530 }
531 
532 static void init_intel_misc_features(struct cpuinfo_x86 *c)
533 {
534 	u64 msr;
535 
536 	if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr))
537 		return;
538 
539 	/* Clear all MISC features */
540 	this_cpu_write(msr_misc_features_shadow, 0);
541 
542 	/* Check features and update capabilities and shadow control bits */
543 	init_cpuid_fault(c);
544 	probe_xeon_phi_r3mwait(c);
545 
546 	msr = this_cpu_read(msr_misc_features_shadow);
547 	wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
548 }
549 
550 static void split_lock_init(void);
551 static void bus_lock_init(void);
552 
553 static void init_intel(struct cpuinfo_x86 *c)
554 {
555 	early_init_intel(c);
556 
557 	intel_workarounds(c);
558 
559 	init_intel_cacheinfo(c);
560 
561 	if (c->cpuid_level > 9) {
562 		unsigned eax = cpuid_eax(10);
563 		/* Check for version and the number of counters */
564 		if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
565 			set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
566 	}
567 
568 	if (cpu_has(c, X86_FEATURE_XMM2))
569 		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
570 
571 	if (boot_cpu_has(X86_FEATURE_DS)) {
572 		unsigned int l1, l2;
573 
574 		rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
575 		if (!(l1 & MSR_IA32_MISC_ENABLE_BTS_UNAVAIL))
576 			set_cpu_cap(c, X86_FEATURE_BTS);
577 		if (!(l1 & MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL))
578 			set_cpu_cap(c, X86_FEATURE_PEBS);
579 	}
580 
581 	if (boot_cpu_has(X86_FEATURE_CLFLUSH) &&
582 	    (c->x86_vfm == INTEL_CORE2_DUNNINGTON ||
583 	     c->x86_vfm == INTEL_NEHALEM_EX ||
584 	     c->x86_vfm == INTEL_WESTMERE_EX))
585 		set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
586 
587 	if (boot_cpu_has(X86_FEATURE_MWAIT) && c->x86_vfm == INTEL_ATOM_GOLDMONT)
588 		set_cpu_bug(c, X86_BUG_MONITOR);
589 
590 #ifdef CONFIG_X86_64
591 	if (c->x86 == 15)
592 		c->x86_cache_alignment = c->x86_clflush_size * 2;
593 	if (c->x86 == 6)
594 		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
595 #else
596 	/*
597 	 * Names for the Pentium II/Celeron processors
598 	 * detectable only by also checking the cache size.
599 	 * Dixon is NOT a Celeron.
600 	 */
601 	if (c->x86 == 6) {
602 		unsigned int l2 = c->x86_cache_size;
603 		char *p = NULL;
604 
605 		switch (c->x86_model) {
606 		case 5:
607 			if (l2 == 0)
608 				p = "Celeron (Covington)";
609 			else if (l2 == 256)
610 				p = "Mobile Pentium II (Dixon)";
611 			break;
612 
613 		case 6:
614 			if (l2 == 128)
615 				p = "Celeron (Mendocino)";
616 			else if (c->x86_stepping == 0 || c->x86_stepping == 5)
617 				p = "Celeron-A";
618 			break;
619 
620 		case 8:
621 			if (l2 == 128)
622 				p = "Celeron (Coppermine)";
623 			break;
624 		}
625 
626 		if (p)
627 			strcpy(c->x86_model_id, p);
628 	}
629 
630 	if (c->x86 == 15)
631 		set_cpu_cap(c, X86_FEATURE_P4);
632 	if (c->x86 == 6)
633 		set_cpu_cap(c, X86_FEATURE_P3);
634 #endif
635 
636 	/* Work around errata */
637 	srat_detect_node(c);
638 
639 	init_ia32_feat_ctl(c);
640 
641 	init_intel_misc_features(c);
642 
643 	split_lock_init();
644 	bus_lock_init();
645 
646 	intel_init_thermal(c);
647 }
648 
649 #ifdef CONFIG_X86_32
650 static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
651 {
652 	/*
653 	 * Intel PIII Tualatin. This comes in two flavours.
654 	 * One has 256kb of cache, the other 512. We have no way
655 	 * to determine which, so we use a boottime override
656 	 * for the 512kb model, and assume 256 otherwise.
657 	 */
658 	if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
659 		size = 256;
660 
661 	/*
662 	 * Intel Quark SoC X1000 contains a 4-way set associative
663 	 * 16K cache with a 16 byte cache line and 256 lines per tag
664 	 */
665 	if ((c->x86 == 5) && (c->x86_model == 9))
666 		size = 16;
667 	return size;
668 }
669 #endif
670 
671 #define TLB_INST_4K	0x01
672 #define TLB_INST_4M	0x02
673 #define TLB_INST_2M_4M	0x03
674 
675 #define TLB_INST_ALL	0x05
676 #define TLB_INST_1G	0x06
677 
678 #define TLB_DATA_4K	0x11
679 #define TLB_DATA_4M	0x12
680 #define TLB_DATA_2M_4M	0x13
681 #define TLB_DATA_4K_4M	0x14
682 
683 #define TLB_DATA_1G	0x16
684 
685 #define TLB_DATA0_4K	0x21
686 #define TLB_DATA0_4M	0x22
687 #define TLB_DATA0_2M_4M	0x23
688 
689 #define STLB_4K		0x41
690 #define STLB_4K_2M	0x42
691 
692 static const struct _tlb_table intel_tlb_table[] = {
693 	{ 0x01, TLB_INST_4K,		32,	" TLB_INST 4 KByte pages, 4-way set associative" },
694 	{ 0x02, TLB_INST_4M,		2,	" TLB_INST 4 MByte pages, full associative" },
695 	{ 0x03, TLB_DATA_4K,		64,	" TLB_DATA 4 KByte pages, 4-way set associative" },
696 	{ 0x04, TLB_DATA_4M,		8,	" TLB_DATA 4 MByte pages, 4-way set associative" },
697 	{ 0x05, TLB_DATA_4M,		32,	" TLB_DATA 4 MByte pages, 4-way set associative" },
698 	{ 0x0b, TLB_INST_4M,		4,	" TLB_INST 4 MByte pages, 4-way set associative" },
699 	{ 0x4f, TLB_INST_4K,		32,	" TLB_INST 4 KByte pages" },
700 	{ 0x50, TLB_INST_ALL,		64,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
701 	{ 0x51, TLB_INST_ALL,		128,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
702 	{ 0x52, TLB_INST_ALL,		256,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
703 	{ 0x55, TLB_INST_2M_4M,		7,	" TLB_INST 2-MByte or 4-MByte pages, fully associative" },
704 	{ 0x56, TLB_DATA0_4M,		16,	" TLB_DATA0 4 MByte pages, 4-way set associative" },
705 	{ 0x57, TLB_DATA0_4K,		16,	" TLB_DATA0 4 KByte pages, 4-way associative" },
706 	{ 0x59, TLB_DATA0_4K,		16,	" TLB_DATA0 4 KByte pages, fully associative" },
707 	{ 0x5a, TLB_DATA0_2M_4M,	32,	" TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
708 	{ 0x5b, TLB_DATA_4K_4M,		64,	" TLB_DATA 4 KByte and 4 MByte pages" },
709 	{ 0x5c, TLB_DATA_4K_4M,		128,	" TLB_DATA 4 KByte and 4 MByte pages" },
710 	{ 0x5d, TLB_DATA_4K_4M,		256,	" TLB_DATA 4 KByte and 4 MByte pages" },
711 	{ 0x61, TLB_INST_4K,		48,	" TLB_INST 4 KByte pages, full associative" },
712 	{ 0x63, TLB_DATA_1G,		4,	" TLB_DATA 1 GByte pages, 4-way set associative" },
713 	{ 0x6b, TLB_DATA_4K,		256,	" TLB_DATA 4 KByte pages, 8-way associative" },
714 	{ 0x6c, TLB_DATA_2M_4M,		128,	" TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
715 	{ 0x6d, TLB_DATA_1G,		16,	" TLB_DATA 1 GByte pages, fully associative" },
716 	{ 0x76, TLB_INST_2M_4M,		8,	" TLB_INST 2-MByte or 4-MByte pages, fully associative" },
717 	{ 0xb0, TLB_INST_4K,		128,	" TLB_INST 4 KByte pages, 4-way set associative" },
718 	{ 0xb1, TLB_INST_2M_4M,		4,	" TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
719 	{ 0xb2, TLB_INST_4K,		64,	" TLB_INST 4KByte pages, 4-way set associative" },
720 	{ 0xb3, TLB_DATA_4K,		128,	" TLB_DATA 4 KByte pages, 4-way set associative" },
721 	{ 0xb4, TLB_DATA_4K,		256,	" TLB_DATA 4 KByte pages, 4-way associative" },
722 	{ 0xb5, TLB_INST_4K,		64,	" TLB_INST 4 KByte pages, 8-way set associative" },
723 	{ 0xb6, TLB_INST_4K,		128,	" TLB_INST 4 KByte pages, 8-way set associative" },
724 	{ 0xba, TLB_DATA_4K,		64,	" TLB_DATA 4 KByte pages, 4-way associative" },
725 	{ 0xc0, TLB_DATA_4K_4M,		8,	" TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
726 	{ 0xc1, STLB_4K_2M,		1024,	" STLB 4 KByte and 2 MByte pages, 8-way associative" },
727 	{ 0xc2, TLB_DATA_2M_4M,		16,	" TLB_DATA 2 MByte/4MByte pages, 4-way associative" },
728 	{ 0xca, STLB_4K,		512,	" STLB 4 KByte pages, 4-way associative" },
729 	{ 0x00, 0, 0 }
730 };
731 
732 static void intel_tlb_lookup(const unsigned char desc)
733 {
734 	unsigned char k;
735 	if (desc == 0)
736 		return;
737 
738 	/* look up this descriptor in the table */
739 	for (k = 0; intel_tlb_table[k].descriptor != desc &&
740 	     intel_tlb_table[k].descriptor != 0; k++)
741 		;
742 
743 	if (intel_tlb_table[k].tlb_type == 0)
744 		return;
745 
746 	switch (intel_tlb_table[k].tlb_type) {
747 	case STLB_4K:
748 		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
749 			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
750 		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
751 			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
752 		break;
753 	case STLB_4K_2M:
754 		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
755 			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
756 		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
757 			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
758 		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
759 			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
760 		if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
761 			tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
762 		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
763 			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
764 		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
765 			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
766 		break;
767 	case TLB_INST_ALL:
768 		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
769 			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
770 		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
771 			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
772 		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
773 			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
774 		break;
775 	case TLB_INST_4K:
776 		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
777 			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
778 		break;
779 	case TLB_INST_4M:
780 		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
781 			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
782 		break;
783 	case TLB_INST_2M_4M:
784 		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
785 			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
786 		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
787 			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
788 		break;
789 	case TLB_DATA_4K:
790 	case TLB_DATA0_4K:
791 		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
792 			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
793 		break;
794 	case TLB_DATA_4M:
795 	case TLB_DATA0_4M:
796 		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
797 			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
798 		break;
799 	case TLB_DATA_2M_4M:
800 	case TLB_DATA0_2M_4M:
801 		if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
802 			tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
803 		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
804 			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
805 		break;
806 	case TLB_DATA_4K_4M:
807 		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
808 			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
809 		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
810 			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
811 		break;
812 	case TLB_DATA_1G:
813 		if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
814 			tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
815 		break;
816 	}
817 }
818 
819 static void intel_detect_tlb(struct cpuinfo_x86 *c)
820 {
821 	int i, j, n;
822 	unsigned int regs[4];
823 	unsigned char *desc = (unsigned char *)regs;
824 
825 	if (c->cpuid_level < 2)
826 		return;
827 
828 	/* Number of times to iterate */
829 	n = cpuid_eax(2) & 0xFF;
830 
831 	for (i = 0 ; i < n ; i++) {
832 		cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
833 
834 		/* If bit 31 is set, this is an unknown format */
835 		for (j = 0 ; j < 3 ; j++)
836 			if (regs[j] & (1 << 31))
837 				regs[j] = 0;
838 
839 		/* Byte 0 is level count, not a descriptor */
840 		for (j = 1 ; j < 16 ; j++)
841 			intel_tlb_lookup(desc[j]);
842 	}
843 }
844 
845 static const struct cpu_dev intel_cpu_dev = {
846 	.c_vendor	= "Intel",
847 	.c_ident	= { "GenuineIntel" },
848 #ifdef CONFIG_X86_32
849 	.legacy_models = {
850 		{ .family = 4, .model_names =
851 		  {
852 			  [0] = "486 DX-25/33",
853 			  [1] = "486 DX-50",
854 			  [2] = "486 SX",
855 			  [3] = "486 DX/2",
856 			  [4] = "486 SL",
857 			  [5] = "486 SX/2",
858 			  [7] = "486 DX/2-WB",
859 			  [8] = "486 DX/4",
860 			  [9] = "486 DX/4-WB"
861 		  }
862 		},
863 		{ .family = 5, .model_names =
864 		  {
865 			  [0] = "Pentium 60/66 A-step",
866 			  [1] = "Pentium 60/66",
867 			  [2] = "Pentium 75 - 200",
868 			  [3] = "OverDrive PODP5V83",
869 			  [4] = "Pentium MMX",
870 			  [7] = "Mobile Pentium 75 - 200",
871 			  [8] = "Mobile Pentium MMX",
872 			  [9] = "Quark SoC X1000",
873 		  }
874 		},
875 		{ .family = 6, .model_names =
876 		  {
877 			  [0] = "Pentium Pro A-step",
878 			  [1] = "Pentium Pro",
879 			  [3] = "Pentium II (Klamath)",
880 			  [4] = "Pentium II (Deschutes)",
881 			  [5] = "Pentium II (Deschutes)",
882 			  [6] = "Mobile Pentium II",
883 			  [7] = "Pentium III (Katmai)",
884 			  [8] = "Pentium III (Coppermine)",
885 			  [10] = "Pentium III (Cascades)",
886 			  [11] = "Pentium III (Tualatin)",
887 		  }
888 		},
889 		{ .family = 15, .model_names =
890 		  {
891 			  [0] = "Pentium 4 (Unknown)",
892 			  [1] = "Pentium 4 (Willamette)",
893 			  [2] = "Pentium 4 (Northwood)",
894 			  [4] = "Pentium 4 (Foster)",
895 			  [5] = "Pentium 4 (Foster)",
896 		  }
897 		},
898 	},
899 	.legacy_cache_size = intel_size_cache,
900 #endif
901 	.c_detect_tlb	= intel_detect_tlb,
902 	.c_early_init   = early_init_intel,
903 	.c_bsp_init	= bsp_init_intel,
904 	.c_init		= init_intel,
905 	.c_x86_vendor	= X86_VENDOR_INTEL,
906 };
907 
908 cpu_dev_register(intel_cpu_dev);
909 
910 #undef pr_fmt
911 #define pr_fmt(fmt) "x86/split lock detection: " fmt
912 
913 static const struct {
914 	const char			*option;
915 	enum split_lock_detect_state	state;
916 } sld_options[] __initconst = {
917 	{ "off",	sld_off   },
918 	{ "warn",	sld_warn  },
919 	{ "fatal",	sld_fatal },
920 	{ "ratelimit:", sld_ratelimit },
921 };
922 
923 static struct ratelimit_state bld_ratelimit;
924 
925 static unsigned int sysctl_sld_mitigate = 1;
926 static DEFINE_SEMAPHORE(buslock_sem, 1);
927 
928 #ifdef CONFIG_PROC_SYSCTL
929 static struct ctl_table sld_sysctls[] = {
930 	{
931 		.procname       = "split_lock_mitigate",
932 		.data           = &sysctl_sld_mitigate,
933 		.maxlen         = sizeof(unsigned int),
934 		.mode           = 0644,
935 		.proc_handler	= proc_douintvec_minmax,
936 		.extra1         = SYSCTL_ZERO,
937 		.extra2         = SYSCTL_ONE,
938 	},
939 };
940 
941 static int __init sld_mitigate_sysctl_init(void)
942 {
943 	register_sysctl_init("kernel", sld_sysctls);
944 	return 0;
945 }
946 
947 late_initcall(sld_mitigate_sysctl_init);
948 #endif
949 
950 static inline bool match_option(const char *arg, int arglen, const char *opt)
951 {
952 	int len = strlen(opt), ratelimit;
953 
954 	if (strncmp(arg, opt, len))
955 		return false;
956 
957 	/*
958 	 * Min ratelimit is 1 bus lock/sec.
959 	 * Max ratelimit is 1000 bus locks/sec.
960 	 */
961 	if (sscanf(arg, "ratelimit:%d", &ratelimit) == 1 &&
962 	    ratelimit > 0 && ratelimit <= 1000) {
963 		ratelimit_state_init(&bld_ratelimit, HZ, ratelimit);
964 		ratelimit_set_flags(&bld_ratelimit, RATELIMIT_MSG_ON_RELEASE);
965 		return true;
966 	}
967 
968 	return len == arglen;
969 }
970 
971 static bool split_lock_verify_msr(bool on)
972 {
973 	u64 ctrl, tmp;
974 
975 	if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl))
976 		return false;
977 	if (on)
978 		ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
979 	else
980 		ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
981 	if (wrmsrl_safe(MSR_TEST_CTRL, ctrl))
982 		return false;
983 	rdmsrl(MSR_TEST_CTRL, tmp);
984 	return ctrl == tmp;
985 }
986 
987 static void __init sld_state_setup(void)
988 {
989 	enum split_lock_detect_state state = sld_warn;
990 	char arg[20];
991 	int i, ret;
992 
993 	if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
994 	    !boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
995 		return;
996 
997 	ret = cmdline_find_option(boot_command_line, "split_lock_detect",
998 				  arg, sizeof(arg));
999 	if (ret >= 0) {
1000 		for (i = 0; i < ARRAY_SIZE(sld_options); i++) {
1001 			if (match_option(arg, ret, sld_options[i].option)) {
1002 				state = sld_options[i].state;
1003 				break;
1004 			}
1005 		}
1006 	}
1007 	sld_state = state;
1008 }
1009 
1010 static void __init __split_lock_setup(void)
1011 {
1012 	if (!split_lock_verify_msr(false)) {
1013 		pr_info("MSR access failed: Disabled\n");
1014 		return;
1015 	}
1016 
1017 	rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
1018 
1019 	if (!split_lock_verify_msr(true)) {
1020 		pr_info("MSR access failed: Disabled\n");
1021 		return;
1022 	}
1023 
1024 	/* Restore the MSR to its cached value. */
1025 	wrmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
1026 
1027 	setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT);
1028 }
1029 
1030 /*
1031  * MSR_TEST_CTRL is per core, but we treat it like a per CPU MSR. Locking
1032  * is not implemented as one thread could undo the setting of the other
1033  * thread immediately after dropping the lock anyway.
1034  */
1035 static void sld_update_msr(bool on)
1036 {
1037 	u64 test_ctrl_val = msr_test_ctrl_cache;
1038 
1039 	if (on)
1040 		test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1041 
1042 	wrmsrl(MSR_TEST_CTRL, test_ctrl_val);
1043 }
1044 
1045 static void split_lock_init(void)
1046 {
1047 	/*
1048 	 * #DB for bus lock handles ratelimit and #AC for split lock is
1049 	 * disabled.
1050 	 */
1051 	if (sld_state == sld_ratelimit) {
1052 		split_lock_verify_msr(false);
1053 		return;
1054 	}
1055 
1056 	if (cpu_model_supports_sld)
1057 		split_lock_verify_msr(sld_state != sld_off);
1058 }
1059 
1060 static void __split_lock_reenable_unlock(struct work_struct *work)
1061 {
1062 	sld_update_msr(true);
1063 	up(&buslock_sem);
1064 }
1065 
1066 static DECLARE_DELAYED_WORK(sl_reenable_unlock, __split_lock_reenable_unlock);
1067 
1068 static void __split_lock_reenable(struct work_struct *work)
1069 {
1070 	sld_update_msr(true);
1071 }
1072 static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable);
1073 
1074 /*
1075  * If a CPU goes offline with pending delayed work to re-enable split lock
1076  * detection then the delayed work will be executed on some other CPU. That
1077  * handles releasing the buslock_sem, but because it executes on a
1078  * different CPU probably won't re-enable split lock detection. This is a
1079  * problem on HT systems since the sibling CPU on the same core may then be
1080  * left running with split lock detection disabled.
1081  *
1082  * Unconditionally re-enable detection here.
1083  */
1084 static int splitlock_cpu_offline(unsigned int cpu)
1085 {
1086 	sld_update_msr(true);
1087 
1088 	return 0;
1089 }
1090 
1091 static void split_lock_warn(unsigned long ip)
1092 {
1093 	struct delayed_work *work;
1094 	int cpu;
1095 
1096 	if (!current->reported_split_lock)
1097 		pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
1098 				    current->comm, current->pid, ip);
1099 	current->reported_split_lock = 1;
1100 
1101 	if (sysctl_sld_mitigate) {
1102 		/*
1103 		 * misery factor #1:
1104 		 * sleep 10ms before trying to execute split lock.
1105 		 */
1106 		if (msleep_interruptible(10) > 0)
1107 			return;
1108 		/*
1109 		 * Misery factor #2:
1110 		 * only allow one buslocked disabled core at a time.
1111 		 */
1112 		if (down_interruptible(&buslock_sem) == -EINTR)
1113 			return;
1114 		work = &sl_reenable_unlock;
1115 	} else {
1116 		work = &sl_reenable;
1117 	}
1118 
1119 	cpu = get_cpu();
1120 	schedule_delayed_work_on(cpu, work, 2);
1121 
1122 	/* Disable split lock detection on this CPU to make progress */
1123 	sld_update_msr(false);
1124 	put_cpu();
1125 }
1126 
1127 bool handle_guest_split_lock(unsigned long ip)
1128 {
1129 	if (sld_state == sld_warn) {
1130 		split_lock_warn(ip);
1131 		return true;
1132 	}
1133 
1134 	pr_warn_once("#AC: %s/%d %s split_lock trap at address: 0x%lx\n",
1135 		     current->comm, current->pid,
1136 		     sld_state == sld_fatal ? "fatal" : "bogus", ip);
1137 
1138 	current->thread.error_code = 0;
1139 	current->thread.trap_nr = X86_TRAP_AC;
1140 	force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
1141 	return false;
1142 }
1143 EXPORT_SYMBOL_GPL(handle_guest_split_lock);
1144 
1145 static void bus_lock_init(void)
1146 {
1147 	u64 val;
1148 
1149 	if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
1150 		return;
1151 
1152 	rdmsrl(MSR_IA32_DEBUGCTLMSR, val);
1153 
1154 	if ((boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
1155 	    (sld_state == sld_warn || sld_state == sld_fatal)) ||
1156 	    sld_state == sld_off) {
1157 		/*
1158 		 * Warn and fatal are handled by #AC for split lock if #AC for
1159 		 * split lock is supported.
1160 		 */
1161 		val &= ~DEBUGCTLMSR_BUS_LOCK_DETECT;
1162 	} else {
1163 		val |= DEBUGCTLMSR_BUS_LOCK_DETECT;
1164 	}
1165 
1166 	wrmsrl(MSR_IA32_DEBUGCTLMSR, val);
1167 }
1168 
1169 bool handle_user_split_lock(struct pt_regs *regs, long error_code)
1170 {
1171 	if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal)
1172 		return false;
1173 	split_lock_warn(regs->ip);
1174 	return true;
1175 }
1176 
1177 void handle_bus_lock(struct pt_regs *regs)
1178 {
1179 	switch (sld_state) {
1180 	case sld_off:
1181 		break;
1182 	case sld_ratelimit:
1183 		/* Enforce no more than bld_ratelimit bus locks/sec. */
1184 		while (!__ratelimit(&bld_ratelimit))
1185 			msleep(20);
1186 		/* Warn on the bus lock. */
1187 		fallthrough;
1188 	case sld_warn:
1189 		pr_warn_ratelimited("#DB: %s/%d took a bus_lock trap at address: 0x%lx\n",
1190 				    current->comm, current->pid, regs->ip);
1191 		break;
1192 	case sld_fatal:
1193 		force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
1194 		break;
1195 	}
1196 }
1197 
1198 /*
1199  * CPU models that are known to have the per-core split-lock detection
1200  * feature even though they do not enumerate IA32_CORE_CAPABILITIES.
1201  */
1202 static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
1203 	X86_MATCH_VFM(INTEL_ICELAKE_X,	0),
1204 	X86_MATCH_VFM(INTEL_ICELAKE_L,	0),
1205 	X86_MATCH_VFM(INTEL_ICELAKE_D,	0),
1206 	{}
1207 };
1208 
1209 static void __init split_lock_setup(struct cpuinfo_x86 *c)
1210 {
1211 	const struct x86_cpu_id *m;
1212 	u64 ia32_core_caps;
1213 
1214 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1215 		return;
1216 
1217 	/* Check for CPUs that have support but do not enumerate it: */
1218 	m = x86_match_cpu(split_lock_cpu_ids);
1219 	if (m)
1220 		goto supported;
1221 
1222 	if (!cpu_has(c, X86_FEATURE_CORE_CAPABILITIES))
1223 		return;
1224 
1225 	/*
1226 	 * Not all bits in MSR_IA32_CORE_CAPS are architectural, but
1227 	 * MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT is.  All CPUs that set
1228 	 * it have split lock detection.
1229 	 */
1230 	rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps);
1231 	if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT)
1232 		goto supported;
1233 
1234 	/* CPU is not in the model list and does not have the MSR bit: */
1235 	return;
1236 
1237 supported:
1238 	cpu_model_supports_sld = true;
1239 	__split_lock_setup();
1240 }
1241 
1242 static void sld_state_show(void)
1243 {
1244 	if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) &&
1245 	    !boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
1246 		return;
1247 
1248 	switch (sld_state) {
1249 	case sld_off:
1250 		pr_info("disabled\n");
1251 		break;
1252 	case sld_warn:
1253 		if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
1254 			pr_info("#AC: crashing the kernel on kernel split_locks and warning on user-space split_locks\n");
1255 			if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
1256 					      "x86/splitlock", NULL, splitlock_cpu_offline) < 0)
1257 				pr_warn("No splitlock CPU offline handler\n");
1258 		} else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) {
1259 			pr_info("#DB: warning on user-space bus_locks\n");
1260 		}
1261 		break;
1262 	case sld_fatal:
1263 		if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
1264 			pr_info("#AC: crashing the kernel on kernel split_locks and sending SIGBUS on user-space split_locks\n");
1265 		} else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) {
1266 			pr_info("#DB: sending SIGBUS on user-space bus_locks%s\n",
1267 				boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) ?
1268 				" from non-WB" : "");
1269 		}
1270 		break;
1271 	case sld_ratelimit:
1272 		if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
1273 			pr_info("#DB: setting system wide bus lock rate limit to %u/sec\n", bld_ratelimit.burst);
1274 		break;
1275 	}
1276 }
1277 
1278 void __init sld_setup(struct cpuinfo_x86 *c)
1279 {
1280 	split_lock_setup(c);
1281 	sld_state_setup();
1282 	sld_state_show();
1283 }
1284 
1285 #define X86_HYBRID_CPU_TYPE_ID_SHIFT	24
1286 
1287 /**
1288  * get_this_hybrid_cpu_type() - Get the type of this hybrid CPU
1289  *
1290  * Returns the CPU type [31:24] (i.e., Atom or Core) of a CPU in
1291  * a hybrid processor. If the processor is not hybrid, returns 0.
1292  */
1293 u8 get_this_hybrid_cpu_type(void)
1294 {
1295 	if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
1296 		return 0;
1297 
1298 	return cpuid_eax(0x0000001a) >> X86_HYBRID_CPU_TYPE_ID_SHIFT;
1299 }
1300