common.c (50501936288d6a29d7ef78f25d00e33240fad45f) common.c (8974eb588283b7d44a7c91fa09fcbaf380339f3a)
1// SPDX-License-Identifier: GPL-2.0-only
2/* cpu_feature_enabled() cannot be used this early */
3#define USE_EARLY_PGTABLE_L5
4
5#include <linux/memblock.h>
6#include <linux/linkage.h>
7#include <linux/bitops.h>
8#include <linux/kernel.h>

--- 4 unchanged lines hidden (view full) ---

13#include <linux/delay.h>
14#include <linux/sched/mm.h>
15#include <linux/sched/clock.h>
16#include <linux/sched/task.h>
17#include <linux/sched/smt.h>
18#include <linux/init.h>
19#include <linux/kprobes.h>
20#include <linux/kgdb.h>
1// SPDX-License-Identifier: GPL-2.0-only
2/* cpu_feature_enabled() cannot be used this early */
3#define USE_EARLY_PGTABLE_L5
4
5#include <linux/memblock.h>
6#include <linux/linkage.h>
7#include <linux/bitops.h>
8#include <linux/kernel.h>

--- 4 unchanged lines hidden (view full) ---

13#include <linux/delay.h>
14#include <linux/sched/mm.h>
15#include <linux/sched/clock.h>
16#include <linux/sched/task.h>
17#include <linux/sched/smt.h>
18#include <linux/init.h>
19#include <linux/kprobes.h>
20#include <linux/kgdb.h>
21#include <linux/mem_encrypt.h>
21#include <linux/smp.h>
22#include <linux/smp.h>
23#include <linux/cpu.h>
22#include <linux/io.h>
23#include <linux/syscore_ops.h>
24#include <linux/pgtable.h>
25#include <linux/stackprotector.h>
24#include <linux/io.h>
25#include <linux/syscore_ops.h>
26#include <linux/pgtable.h>
27#include <linux/stackprotector.h>
28#include <linux/utsname.h>
26
29
30#include <asm/alternative.h>
27#include <asm/cmdline.h>
28#include <asm/perf_event.h>
29#include <asm/mmu_context.h>
30#include <asm/doublefault.h>
31#include <asm/archrandom.h>
32#include <asm/hypervisor.h>
33#include <asm/processor.h>
34#include <asm/tlbflush.h>

--- 19 unchanged lines hidden (view full) ---

54#include <asm/msr.h>
55#include <asm/cacheinfo.h>
56#include <asm/memtype.h>
57#include <asm/microcode.h>
58#include <asm/microcode_intel.h>
59#include <asm/intel-family.h>
60#include <asm/cpu_device_id.h>
61#include <asm/uv/uv.h>
31#include <asm/cmdline.h>
32#include <asm/perf_event.h>
33#include <asm/mmu_context.h>
34#include <asm/doublefault.h>
35#include <asm/archrandom.h>
36#include <asm/hypervisor.h>
37#include <asm/processor.h>
38#include <asm/tlbflush.h>

--- 19 unchanged lines hidden (view full) ---

58#include <asm/msr.h>
59#include <asm/cacheinfo.h>
60#include <asm/memtype.h>
61#include <asm/microcode.h>
62#include <asm/microcode_intel.h>
63#include <asm/intel-family.h>
64#include <asm/cpu_device_id.h>
65#include <asm/uv/uv.h>
62#include <asm/sigframe.h>
66#include <asm/set_memory.h>
63#include <asm/traps.h>
64#include <asm/sev.h>
65
66#include "cpu.h"
67
68u32 elf_hwcap2 __read_mostly;
69
67#include <asm/traps.h>
68#include <asm/sev.h>
69
70#include "cpu.h"
71
72u32 elf_hwcap2 __read_mostly;
73
70/* all of these masks are initialized in setup_cpu_local_masks() */
71cpumask_var_t cpu_initialized_mask;
72cpumask_var_t cpu_callout_mask;
73cpumask_var_t cpu_callin_mask;
74
75/* representing cpus for which sibling maps can be computed */
76cpumask_var_t cpu_sibling_setup_mask;
77
78/* Number of siblings per CPU package */
79int smp_num_siblings = 1;
80EXPORT_SYMBOL(smp_num_siblings);
81
82/* Last level cache ID of each logical CPU */
83DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
84
85u16 get_llc_id(unsigned int cpu)

--- 78 unchanged lines hidden (view full) ---

164 set_cpu_cap(c, info->feature);
165 return;
166 }
167
168clear_ppin:
169 clear_cpu_cap(c, info->feature);
170}
171
74/* Number of siblings per CPU package */
75int smp_num_siblings = 1;
76EXPORT_SYMBOL(smp_num_siblings);
77
78/* Last level cache ID of each logical CPU */
79DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
80
81u16 get_llc_id(unsigned int cpu)

--- 78 unchanged lines hidden (view full) ---

160 set_cpu_cap(c, info->feature);
161 return;
162 }
163
164clear_ppin:
165 clear_cpu_cap(c, info->feature);
166}
167
172/* correctly size the local cpu masks */
173void __init setup_cpu_local_masks(void)
174{
175 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
176 alloc_bootmem_cpumask_var(&cpu_callin_mask);
177 alloc_bootmem_cpumask_var(&cpu_callout_mask);
178 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
179}
180
181static void default_init(struct cpuinfo_x86 *c)
182{
183#ifdef CONFIG_X86_64
184 cpu_detect_cache_sizes(c);
185#else
186 /* Not much we can do here... */
187 /* Check if at least it has cpuid */
188 if (c->cpuid_level == -1) {

--- 1069 unchanged lines hidden (view full) ---

1258/* CPU is affected by X86_BUG_MMIO_STALE_DATA */
1259#define MMIO BIT(1)
1260/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
1261#define MMIO_SBDS BIT(2)
1262/* CPU is affected by RETbleed, speculating where you would not expect it */
1263#define RETBLEED BIT(3)
1264/* CPU is affected by SMT (cross-thread) return predictions */
1265#define SMT_RSB BIT(4)
168static void default_init(struct cpuinfo_x86 *c)
169{
170#ifdef CONFIG_X86_64
171 cpu_detect_cache_sizes(c);
172#else
173 /* Not much we can do here... */
174 /* Check if at least it has cpuid */
175 if (c->cpuid_level == -1) {

--- 1069 unchanged lines hidden (view full) ---

1245/* CPU is affected by X86_BUG_MMIO_STALE_DATA */
1246#define MMIO BIT(1)
1247/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
1248#define MMIO_SBDS BIT(2)
1249/* CPU is affected by RETbleed, speculating where you would not expect it */
1250#define RETBLEED BIT(3)
1251/* CPU is affected by SMT (cross-thread) return predictions */
1252#define SMT_RSB BIT(4)
1253/* CPU is affected by GDS */
1254#define GDS BIT(5)
1266
1267static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
1268 VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
1269 VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS),
1270 VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS),
1271 VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS),
1272 VULNBL_INTEL_STEPPINGS(HASWELL_X, X86_STEPPING_ANY, MMIO),
1273 VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPING_ANY, MMIO),
1274 VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS),
1275 VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO),
1276 VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
1277 VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
1255
1256static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
1257 VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
1258 VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS),
1259 VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS),
1260 VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS),
1261 VULNBL_INTEL_STEPPINGS(HASWELL_X, X86_STEPPING_ANY, MMIO),
1262 VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPING_ANY, MMIO),
1263 VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS),
1264 VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO),
1265 VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
1266 VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
1278 VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED),
1267 VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
1279 VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
1268 VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
1280 VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
1281 VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
1269 VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED | GDS),
1270 VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED | GDS),
1282 VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED),
1271 VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED),
1283 VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
1284 VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO),
1285 VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO),
1286 VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
1272 VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
1273 VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS),
1274 VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS),
1275 VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
1287 VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED),
1276 VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED),
1288 VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
1277 VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
1278 VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS),
1279 VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS),
1289 VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
1280 VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
1290 VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED),
1281 VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
1291 VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
1292 VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO),
1293 VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
1294
1295 VULNBL_AMD(0x15, RETBLEED),
1296 VULNBL_AMD(0x16, RETBLEED),
1297 VULNBL_AMD(0x17, RETBLEED | SMT_RSB),
1298 VULNBL_HYGON(0x18, RETBLEED | SMT_RSB),

--- 115 unchanged lines hidden (view full) ---

1414 if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
1415 if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
1416 setup_force_cpu_bug(X86_BUG_RETBLEED);
1417 }
1418
1419 if (cpu_matches(cpu_vuln_blacklist, SMT_RSB))
1420 setup_force_cpu_bug(X86_BUG_SMT_RSB);
1421
1282 VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
1283 VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO),
1284 VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
1285
1286 VULNBL_AMD(0x15, RETBLEED),
1287 VULNBL_AMD(0x16, RETBLEED),
1288 VULNBL_AMD(0x17, RETBLEED | SMT_RSB),
1289 VULNBL_HYGON(0x18, RETBLEED | SMT_RSB),

--- 115 unchanged lines hidden (view full) ---

1405 if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
1406 if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
1407 setup_force_cpu_bug(X86_BUG_RETBLEED);
1408 }
1409
1410 if (cpu_matches(cpu_vuln_blacklist, SMT_RSB))
1411 setup_force_cpu_bug(X86_BUG_SMT_RSB);
1412
1413 /*
1414 * Check if CPU is vulnerable to GDS. If running in a virtual machine on
1415 * an affected processor, the VMM may have disabled the use of GATHER by
1416 * disabling AVX2. The only way to do this in HW is to clear XCR0[2],
1417 * which means that AVX will be disabled.
1418 */
1419 if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) &&
1420 boot_cpu_has(X86_FEATURE_AVX))
1421 setup_force_cpu_bug(X86_BUG_GDS);
1422
1422 if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
1423 return;
1424
1425 /* Rogue Data Cache Load? No! */
1426 if (ia32_cap & ARCH_CAP_RDCL_NO)
1427 return;
1428
1429 setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);

--- 67 unchanged lines hidden (view full) ---

1497
1498 /*
1499 * Handle naked numbers first for feature flags which don't
1500 * have names.
1501 */
1502 if (!kstrtouint(opt, 10, &bit)) {
1503 if (bit < NCAPINTS * 32) {
1504
1423 if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
1424 return;
1425
1426 /* Rogue Data Cache Load? No! */
1427 if (ia32_cap & ARCH_CAP_RDCL_NO)
1428 return;
1429
1430 setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);

--- 67 unchanged lines hidden (view full) ---

1498
1499 /*
1500 * Handle naked numbers first for feature flags which don't
1501 * have names.
1502 */
1503 if (!kstrtouint(opt, 10, &bit)) {
1504 if (bit < NCAPINTS * 32) {
1505
1505#ifdef CONFIG_X86_FEATURE_NAMES
1506 /* empty-string, i.e., ""-defined feature flags */
1507 if (!x86_cap_flags[bit])
1508 pr_cont(" " X86_CAP_FMT_NUM, x86_cap_flag_num(bit));
1509 else
1506 /* empty-string, i.e., ""-defined feature flags */
1507 if (!x86_cap_flags[bit])
1508 pr_cont(" " X86_CAP_FMT_NUM, x86_cap_flag_num(bit));
1509 else
1510#endif
1511 pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
1512
1513 setup_clear_cpu_cap(bit);
1514 taint++;
1515 }
1516 /*
1517 * The assumption is that there are no feature names with only
1518 * numbers in the name thus go to the next argument.
1519 */
1520 continue;
1521 }
1522
1510 pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
1511
1512 setup_clear_cpu_cap(bit);
1513 taint++;
1514 }
1515 /*
1516 * The assumption is that there are no feature names with only
1517 * numbers in the name thus go to the next argument.
1518 */
1519 continue;
1520 }
1521
1523#ifdef CONFIG_X86_FEATURE_NAMES
1524 for (bit = 0; bit < 32 * NCAPINTS; bit++) {
1525 if (!x86_cap_flag(bit))
1526 continue;
1527
1528 if (strcmp(x86_cap_flag(bit), opt))
1529 continue;
1530
1531 pr_cont(" %s", opt);
1532 setup_clear_cpu_cap(bit);
1533 taint++;
1534 found = true;
1535 break;
1536 }
1537
1538 if (!found)
1539 pr_cont(" (unknown: %s)", opt);
1522 for (bit = 0; bit < 32 * NCAPINTS; bit++) {
1523 if (!x86_cap_flag(bit))
1524 continue;
1525
1526 if (strcmp(x86_cap_flag(bit), opt))
1527 continue;
1528
1529 pr_cont(" %s", opt);
1530 setup_clear_cpu_cap(bit);
1531 taint++;
1532 found = true;
1533 break;
1534 }
1535
1536 if (!found)
1537 pr_cont(" (unknown: %s)", opt);
1540#endif
1541 }
1542 pr_cont("\n");
1543
1544 if (taint)
1545 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1546}
1547
1548/*

--- 46 unchanged lines hidden (view full) ---

1595 }
1596
1597 setup_force_cpu_cap(X86_FEATURE_ALWAYS);
1598
1599 cpu_set_bug_bits(c);
1600
1601 sld_setup(c);
1602
1538 }
1539 pr_cont("\n");
1540
1541 if (taint)
1542 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1543}
1544
1545/*

--- 46 unchanged lines hidden (view full) ---

1592 }
1593
1594 setup_force_cpu_cap(X86_FEATURE_ALWAYS);
1595
1596 cpu_set_bug_bits(c);
1597
1598 sld_setup(c);
1599
1603 fpu__init_system(c);
1604
1605 init_sigframe_size();
1606
1607#ifdef CONFIG_X86_32
1608 /*
1609 * Regardless of whether PCID is enumerated, the SDM says
1610 * that it can't be enabled in 32-bit mode.
1611 */
1612 setup_clear_cpu_cap(X86_FEATURE_PCID);
1613#endif
1614

--- 363 unchanged lines hidden (view full) ---

1978 BUG_ON(c == &boot_cpu_data);
1979 identify_cpu(c);
1980#ifdef CONFIG_X86_32
1981 enable_sep_cpu();
1982#endif
1983 validate_apic_and_package_id(c);
1984 x86_spec_ctrl_setup_ap();
1985 update_srbds_msr();
1600#ifdef CONFIG_X86_32
1601 /*
1602 * Regardless of whether PCID is enumerated, the SDM says
1603 * that it can't be enabled in 32-bit mode.
1604 */
1605 setup_clear_cpu_cap(X86_FEATURE_PCID);
1606#endif
1607

--- 363 unchanged lines hidden (view full) ---

1971 BUG_ON(c == &boot_cpu_data);
1972 identify_cpu(c);
1973#ifdef CONFIG_X86_32
1974 enable_sep_cpu();
1975#endif
1976 validate_apic_and_package_id(c);
1977 x86_spec_ctrl_setup_ap();
1978 update_srbds_msr();
1979 if (boot_cpu_has_bug(X86_BUG_GDS))
1980 update_gds_msr();
1986
1987 tsx_ap_init();
1988}
1989
1990void print_cpu_info(struct cpuinfo_x86 *c)
1991{
1992 const char *vendor = NULL;
1993

--- 124 unchanged lines hidden (view full) ---

2118{
2119 if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
2120 arch_kgdb_ops.correct_hw_break();
2121}
2122#else /* ! CONFIG_KGDB */
2123#define dbg_restore_debug_regs()
2124#endif /* ! CONFIG_KGDB */
2125
1981
1982 tsx_ap_init();
1983}
1984
1985void print_cpu_info(struct cpuinfo_x86 *c)
1986{
1987 const char *vendor = NULL;
1988

--- 124 unchanged lines hidden (view full) ---

2113{
2114 if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
2115 arch_kgdb_ops.correct_hw_break();
2116}
2117#else /* ! CONFIG_KGDB */
2118#define dbg_restore_debug_regs()
2119#endif /* ! CONFIG_KGDB */
2120
2126static void wait_for_master_cpu(int cpu)
2127{
2128#ifdef CONFIG_SMP
2129 /*
2130 * wait for ACK from master CPU before continuing
2131 * with AP initialization
2132 */
2133 WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
2134 while (!cpumask_test_cpu(cpu, cpu_callout_mask))
2135 cpu_relax();
2136#endif
2137}
2138
2139static inline void setup_getcpu(int cpu)
2140{
2141 unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
2142 struct desc_struct d = { };
2143
2144 if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
2145 wrmsr(MSR_TSC_AUX, cpudata, 0);
2146

--- 6 unchanged lines hidden (view full) ---

2153 d.s = 1; /* Not a system segment */
2154 d.p = 1; /* Present */
2155 d.d = 1; /* 32-bit */
2156
2157 write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S);
2158}
2159
2160#ifdef CONFIG_X86_64
2121static inline void setup_getcpu(int cpu)
2122{
2123 unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
2124 struct desc_struct d = { };
2125
2126 if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
2127 wrmsr(MSR_TSC_AUX, cpudata, 0);
2128

--- 6 unchanged lines hidden (view full) ---

2135 d.s = 1; /* Not a system segment */
2136 d.p = 1; /* Present */
2137 d.d = 1; /* 32-bit */
2138
2139 write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S);
2140}
2141
2142#ifdef CONFIG_X86_64
2161static inline void ucode_cpu_init(int cpu)
2162{
2163 if (cpu)
2164 load_ucode_ap();
2165}
2143static inline void ucode_cpu_init(int cpu) { }
2166
2167static inline void tss_setup_ist(struct tss_struct *tss)
2168{
2169 /* Set up the per-CPU TSS IST stacks */
2170 tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
2171 tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
2172 tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
2173 tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);

--- 60 unchanged lines hidden (view full) ---

2234 * reload it nevertheless, this function acts as a 'CPU state barrier',
2235 * nothing should get across.
2236 */
2237void cpu_init(void)
2238{
2239 struct task_struct *cur = current;
2240 int cpu = raw_smp_processor_id();
2241
2144
2145static inline void tss_setup_ist(struct tss_struct *tss)
2146{
2147 /* Set up the per-CPU TSS IST stacks */
2148 tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
2149 tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
2150 tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
2151 tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);

--- 60 unchanged lines hidden (view full) ---

2212 * reload it nevertheless, this function acts as a 'CPU state barrier',
2213 * nothing should get across.
2214 */
2215void cpu_init(void)
2216{
2217 struct task_struct *cur = current;
2218 int cpu = raw_smp_processor_id();
2219
2242 wait_for_master_cpu(cpu);
2243
2244 ucode_cpu_init(cpu);
2245
2246#ifdef CONFIG_NUMA
2247 if (this_cpu_read(numa_node) == 0 &&
2248 early_cpu_to_node(cpu) != NUMA_NO_NODE)
2249 set_numa_node(early_cpu_to_node(cpu));
2250#endif
2251 pr_debug("Initializing CPU#%d\n", cpu);

--- 28 unchanged lines hidden (view full) ---

2280
2281 load_mm_ldt(&init_mm);
2282
2283 clear_all_debug_regs();
2284 dbg_restore_debug_regs();
2285
2286 doublefault_init_cpu_tss();
2287
2220 ucode_cpu_init(cpu);
2221
2222#ifdef CONFIG_NUMA
2223 if (this_cpu_read(numa_node) == 0 &&
2224 early_cpu_to_node(cpu) != NUMA_NO_NODE)
2225 set_numa_node(early_cpu_to_node(cpu));
2226#endif
2227 pr_debug("Initializing CPU#%d\n", cpu);

--- 28 unchanged lines hidden (view full) ---

2256
2257 load_mm_ldt(&init_mm);
2258
2259 clear_all_debug_regs();
2260 dbg_restore_debug_regs();
2261
2262 doublefault_init_cpu_tss();
2263
2288 fpu__init_cpu();
2289
2290 if (is_uv_system())
2291 uv_cpu_init();
2292
2293 load_fixmap_gdt(cpu);
2294}
2295
2264 if (is_uv_system())
2265 uv_cpu_init();
2266
2267 load_fixmap_gdt(cpu);
2268}
2269
2296#ifdef CONFIG_SMP
2297void cpu_init_secondary(void)
2298{
2299 /*
2300 * Relies on the BP having set-up the IDT tables, which are loaded
2301 * on this CPU in cpu_init_exception_handling().
2302 */
2303 cpu_init_exception_handling();
2304 cpu_init();
2305}
2306#endif
2307
2308#ifdef CONFIG_MICROCODE_LATE_LOADING
2309/**
2310 * store_cpu_caps() - Store a snapshot of CPU capabilities
2311 * @curr_info: Pointer where to store it
2312 *
2313 * Returns: None
2314 */
2315void store_cpu_caps(struct cpuinfo_x86 *curr_info)

--- 41 unchanged lines hidden (view full) ---

2357 */
2358void arch_smt_update(void)
2359{
2360 /* Handle the speculative execution misfeatures */
2361 cpu_bugs_smt_update();
2362 /* Check whether IPI broadcasting can be enabled */
2363 apic_smt_update();
2364}
2270#ifdef CONFIG_MICROCODE_LATE_LOADING
2271/**
2272 * store_cpu_caps() - Store a snapshot of CPU capabilities
2273 * @curr_info: Pointer where to store it
2274 *
2275 * Returns: None
2276 */
2277void store_cpu_caps(struct cpuinfo_x86 *curr_info)

--- 41 unchanged lines hidden (view full) ---

2319 */
2320void arch_smt_update(void)
2321{
2322 /* Handle the speculative execution misfeatures */
2323 cpu_bugs_smt_update();
2324 /* Check whether IPI broadcasting can be enabled */
2325 apic_smt_update();
2326}
2327
2328void __init arch_cpu_finalize_init(void)
2329{
2330 identify_boot_cpu();
2331
2332 /*
2333 * identify_boot_cpu() initialized SMT support information, let the
2334 * core code know.
2335 */
2336 cpu_smt_check_topology();
2337
2338 if (!IS_ENABLED(CONFIG_SMP)) {
2339 pr_info("CPU: ");
2340 print_cpu_info(&boot_cpu_data);
2341 }
2342
2343 cpu_select_mitigations();
2344
2345 arch_smt_update();
2346
2347 if (IS_ENABLED(CONFIG_X86_32)) {
2348 /*
2349 * Check whether this is a real i386 which is not longer
2350 * supported and fixup the utsname.
2351 */
2352 if (boot_cpu_data.x86 < 4)
2353 panic("Kernel requires i486+ for 'invlpg' and other features");
2354
2355 init_utsname()->machine[1] =
2356 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
2357 }
2358
2359 /*
2360 * Must be before alternatives because it might set or clear
2361 * feature bits.
2362 */
2363 fpu__init_system();
2364 fpu__init_cpu();
2365
2366 alternative_instructions();
2367
2368 if (IS_ENABLED(CONFIG_X86_64)) {
2369 /*
2370 * Make sure the first 2MB area is not mapped by huge pages
2371 * There are typically fixed size MTRRs in there and overlapping
2372 * MTRRs into large pages causes slow downs.
2373 *
2374 * Right now we don't do that with gbpages because there seems
2375 * very little benefit for that case.
2376 */
2377 if (!direct_gbpages)
2378 set_memory_4k((unsigned long)__va(0), 1);
2379 } else {
2380 fpu__init_check_bugs();
2381 }
2382
2383 /*
2384 * This needs to be called before any devices perform DMA
2385 * operations that might use the SWIOTLB bounce buffers. It will
2386 * mark the bounce buffers as decrypted so that their usage will
2387 * not cause "plain-text" data to be decrypted when accessed. It
2388 * must be called after late_time_init() so that Hyper-V x86/x64
2389 * hypercalls work when the SWIOTLB bounce buffers are decrypted.
2390 */
2391 mem_encrypt_init();
2392}