xref: /linux/arch/x86/kernel/cpu/zhaoxin.c (revision b8d312aa075f33282565467662c4628dae0a2aff)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/sched.h>
3 #include <linux/sched/clock.h>
4 
5 #include <asm/cpufeature.h>
6 
7 #include "cpu.h"
8 
9 #define MSR_ZHAOXIN_FCR57 0x00001257
10 
11 #define ACE_PRESENT	(1 << 6)
12 #define ACE_ENABLED	(1 << 7)
13 #define ACE_FCR		(1 << 7)	/* MSR_ZHAOXIN_FCR */
14 
15 #define RNG_PRESENT	(1 << 2)
16 #define RNG_ENABLED	(1 << 3)
17 #define RNG_ENABLE	(1 << 8)	/* MSR_ZHAOXIN_RNG */
18 
19 #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW	0x00200000
20 #define X86_VMX_FEATURE_PROC_CTLS_VNMI		0x00400000
21 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS	0x80000000
22 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC	0x00000001
23 #define X86_VMX_FEATURE_PROC_CTLS2_EPT		0x00000002
24 #define X86_VMX_FEATURE_PROC_CTLS2_VPID		0x00000020
25 
26 static void init_zhaoxin_cap(struct cpuinfo_x86 *c)
27 {
28 	u32  lo, hi;
29 
30 	/* Test for Extended Feature Flags presence */
31 	if (cpuid_eax(0xC0000000) >= 0xC0000001) {
32 		u32 tmp = cpuid_edx(0xC0000001);
33 
34 		/* Enable ACE unit, if present and disabled */
35 		if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
36 			rdmsr(MSR_ZHAOXIN_FCR57, lo, hi);
37 			/* Enable ACE unit */
38 			lo |= ACE_FCR;
39 			wrmsr(MSR_ZHAOXIN_FCR57, lo, hi);
40 			pr_info("CPU: Enabled ACE h/w crypto\n");
41 		}
42 
43 		/* Enable RNG unit, if present and disabled */
44 		if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
45 			rdmsr(MSR_ZHAOXIN_FCR57, lo, hi);
46 			/* Enable RNG unit */
47 			lo |= RNG_ENABLE;
48 			wrmsr(MSR_ZHAOXIN_FCR57, lo, hi);
49 			pr_info("CPU: Enabled h/w RNG\n");
50 		}
51 
52 		/*
53 		 * Store Extended Feature Flags as word 5 of the CPU
54 		 * capability bit array
55 		 */
56 		c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001);
57 	}
58 
59 	if (c->x86 >= 0x6)
60 		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
61 
62 	cpu_detect_cache_sizes(c);
63 }
64 
65 static void early_init_zhaoxin(struct cpuinfo_x86 *c)
66 {
67 	if (c->x86 >= 0x6)
68 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
69 #ifdef CONFIG_X86_64
70 	set_cpu_cap(c, X86_FEATURE_SYSENTER32);
71 #endif
72 	if (c->x86_power & (1 << 8)) {
73 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
74 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
75 	}
76 
77 	if (c->cpuid_level >= 0x00000001) {
78 		u32 eax, ebx, ecx, edx;
79 
80 		cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
81 		/*
82 		 * If HTT (EDX[28]) is set EBX[16:23] contain the number of
83 		 * apicids which are reserved per package. Store the resulting
84 		 * shift value for the package management code.
85 		 */
86 		if (edx & (1U << 28))
87 			c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
88 	}
89 
90 }
91 
92 static void zhaoxin_detect_vmx_virtcap(struct cpuinfo_x86 *c)
93 {
94 	u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
95 
96 	rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
97 	msr_ctl = vmx_msr_high | vmx_msr_low;
98 
99 	if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
100 		set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
101 	if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
102 		set_cpu_cap(c, X86_FEATURE_VNMI);
103 	if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
104 		rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
105 		      vmx_msr_low, vmx_msr_high);
106 		msr_ctl2 = vmx_msr_high | vmx_msr_low;
107 		if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
108 		    (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
109 			set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
110 		if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
111 			set_cpu_cap(c, X86_FEATURE_EPT);
112 		if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
113 			set_cpu_cap(c, X86_FEATURE_VPID);
114 	}
115 }
116 
117 static void init_zhaoxin(struct cpuinfo_x86 *c)
118 {
119 	early_init_zhaoxin(c);
120 	init_intel_cacheinfo(c);
121 	detect_num_cpu_cores(c);
122 #ifdef CONFIG_X86_32
123 	detect_ht(c);
124 #endif
125 
126 	if (c->cpuid_level > 9) {
127 		unsigned int eax = cpuid_eax(10);
128 
129 		/*
130 		 * Check for version and the number of counters
131 		 * Version(eax[7:0]) can't be 0;
132 		 * Counters(eax[15:8]) should be greater than 1;
133 		 */
134 		if ((eax & 0xff) && (((eax >> 8) & 0xff) > 1))
135 			set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
136 	}
137 
138 	if (c->x86 >= 0x6)
139 		init_zhaoxin_cap(c);
140 #ifdef CONFIG_X86_64
141 	set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
142 #endif
143 
144 	if (cpu_has(c, X86_FEATURE_VMX))
145 		zhaoxin_detect_vmx_virtcap(c);
146 }
147 
148 #ifdef CONFIG_X86_32
149 static unsigned int
150 zhaoxin_size_cache(struct cpuinfo_x86 *c, unsigned int size)
151 {
152 	return size;
153 }
154 #endif
155 
156 static const struct cpu_dev zhaoxin_cpu_dev = {
157 	.c_vendor	= "zhaoxin",
158 	.c_ident	= { "  Shanghai  " },
159 	.c_early_init	= early_init_zhaoxin,
160 	.c_init		= init_zhaoxin,
161 #ifdef CONFIG_X86_32
162 	.legacy_cache_size = zhaoxin_size_cache,
163 #endif
164 	.c_x86_vendor	= X86_VENDOR_ZHAOXIN,
165 };
166 
167 cpu_dev_register(zhaoxin_cpu_dev);
168