1 /*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * Copyright (c) 2014-2016 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Portions of this software were developed by Andrew Turner
10 * under sponsorship from the FreeBSD Foundation
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 * from: FreeBSD: src/sys/i386/include/cpu.h,v 1.62 2001/06/29
36 */
37
38 #ifdef __arm__
39 #include <arm/cpu.h>
40 #else /* !__arm__ */
41
42 #ifndef _MACHINE_CPU_H_
43 #define _MACHINE_CPU_H_
44
45 #if !defined(__ASSEMBLER__)
46 #include <machine/atomic.h>
47 #include <machine/frame.h>
48 #endif
49 #include <machine/armreg.h>
50
51 #define TRAPF_PC(tfp) ((tfp)->tf_elr)
52 #define TRAPF_USERMODE(tfp) (((tfp)->tf_spsr & PSR_M_MASK) == PSR_M_EL0t)
53
54 #define cpu_getstack(td) ((td)->td_frame->tf_sp)
55 #define cpu_setstack(td, sp) ((td)->td_frame->tf_sp = (sp))
56 #define cpu_spinwait() __asm __volatile("yield" ::: "memory")
57 #define cpu_lock_delay() DELAY(1)
58
59 /* Extract CPU affinity levels 0-3 */
60 #define CPU_AFF0(mpidr) (u_int)(((mpidr) >> 0) & 0xff)
61 #define CPU_AFF1(mpidr) (u_int)(((mpidr) >> 8) & 0xff)
62 #define CPU_AFF2(mpidr) (u_int)(((mpidr) >> 16) & 0xff)
63 #define CPU_AFF3(mpidr) (u_int)(((mpidr) >> 32) & 0xff)
64 #define CPU_AFF0_MASK 0xffUL
65 #define CPU_AFF1_MASK 0xff00UL
66 #define CPU_AFF2_MASK 0xff0000UL
67 #define CPU_AFF3_MASK 0xff00000000UL
68 #define CPU_AFF_MASK (CPU_AFF0_MASK | CPU_AFF1_MASK | \
69 CPU_AFF2_MASK| CPU_AFF3_MASK) /* Mask affinity fields in MPIDR_EL1 */
70
71 #ifdef _KERNEL
72
73 #define CPU_IMPL_ARM 0x41
74 #define CPU_IMPL_BROADCOM 0x42
75 #define CPU_IMPL_CAVIUM 0x43
76 #define CPU_IMPL_DEC 0x44
77 #define CPU_IMPL_FUJITSU 0x46
78 #define CPU_IMPL_HISILICON 0x48
79 #define CPU_IMPL_INFINEON 0x49
80 #define CPU_IMPL_FREESCALE 0x4D
81 #define CPU_IMPL_NVIDIA 0x4E
82 #define CPU_IMPL_APM 0x50
83 #define CPU_IMPL_QUALCOMM 0x51
84 #define CPU_IMPL_MARVELL 0x56
85 #define CPU_IMPL_APPLE 0x61
86 #define CPU_IMPL_INTEL 0x69
87 #define CPU_IMPL_AMPERE 0xC0
88 #define CPU_IMPL_MICROSOFT 0x6D
89
90 /* ARM Part numbers */
91 #define CPU_PART_FOUNDATION 0xD00
92 #define CPU_PART_CORTEX_A34 0xD02
93 #define CPU_PART_CORTEX_A53 0xD03
94 #define CPU_PART_CORTEX_A35 0xD04
95 #define CPU_PART_CORTEX_A55 0xD05
96 #define CPU_PART_CORTEX_A65 0xD06
97 #define CPU_PART_CORTEX_A57 0xD07
98 #define CPU_PART_CORTEX_A72 0xD08
99 #define CPU_PART_CORTEX_A73 0xD09
100 #define CPU_PART_CORTEX_A75 0xD0A
101 #define CPU_PART_CORTEX_A76 0xD0B
102 #define CPU_PART_NEOVERSE_N1 0xD0C
103 #define CPU_PART_CORTEX_A77 0xD0D
104 #define CPU_PART_CORTEX_A76AE 0xD0E
105 #define CPU_PART_AEM_V8 0xD0F
106 #define CPU_PART_NEOVERSE_V1 0xD40
107 #define CPU_PART_CORTEX_A78 0xD41
108 #define CPU_PART_CORTEX_A78AE 0xD42
109 #define CPU_PART_CORTEX_A65AE 0xD43
110 #define CPU_PART_CORTEX_X1 0xD44
111 #define CPU_PART_CORTEX_A510 0xD46
112 #define CPU_PART_CORTEX_A710 0xD47
113 #define CPU_PART_CORTEX_X2 0xD48
114 #define CPU_PART_NEOVERSE_N2 0xD49
115 #define CPU_PART_NEOVERSE_E1 0xD4A
116 #define CPU_PART_CORTEX_A78C 0xD4B
117 #define CPU_PART_CORTEX_X1C 0xD4C
118 #define CPU_PART_CORTEX_A715 0xD4D
119 #define CPU_PART_CORTEX_X3 0xD4E
120 #define CPU_PART_NEOVERSE_V2 0xD4F
121 #define CPU_PART_CORTEX_A520 0xD80
122 #define CPU_PART_CORTEX_A720 0xD81
123 #define CPU_PART_CORTEX_X4 0xD82
124 #define CPU_PART_NEOVERSE_V3AE 0xD83
125 #define CPU_PART_NEOVERSE_V3 0xD84
126 #define CPU_PART_CORTEX_X925 0xD85
127 #define CPU_PART_CORTEX_A725 0xD87
128 #define CPU_PART_NEOVERSE_N3 0xD8E
129
130 /* Cavium Part numbers */
131 #define CPU_PART_THUNDERX 0x0A1
132 #define CPU_PART_THUNDERX_81XX 0x0A2
133 #define CPU_PART_THUNDERX_83XX 0x0A3
134 #define CPU_PART_THUNDERX2 0x0AF
135
136 #define CPU_REV_THUNDERX_1_0 0x00
137 #define CPU_REV_THUNDERX_1_1 0x01
138
139 #define CPU_REV_THUNDERX2_0 0x00
140
141 /* APM (now Ampere) Part number */
142 #define CPU_PART_EMAG8180 0x000
143
144 /* Ampere Part numbers */
145 #define CPU_PART_AMPERE1 0xAC3
146 #define CPU_PART_AMPERE1A 0xAC4
147
148 /* Microsoft Part numbers */
149 #define CPU_PART_AZURE_COBALT_100 0xD49
150
151 /* Qualcomm */
152 #define CPU_PART_KRYO400_GOLD 0x804
153 #define CPU_PART_KRYO400_SILVER 0x805
154
155 /* Apple part numbers */
156 #define CPU_PART_M1_ICESTORM 0x022
157 #define CPU_PART_M1_FIRESTORM 0x023
158 #define CPU_PART_M1_ICESTORM_PRO 0x024
159 #define CPU_PART_M1_FIRESTORM_PRO 0x025
160 #define CPU_PART_M1_ICESTORM_MAX 0x028
161 #define CPU_PART_M1_FIRESTORM_MAX 0x029
162 #define CPU_PART_M2_BLIZZARD 0x032
163 #define CPU_PART_M2_AVALANCHE 0x033
164 #define CPU_PART_M2_BLIZZARD_PRO 0x034
165 #define CPU_PART_M2_AVALANCHE_PRO 0x035
166 #define CPU_PART_M2_BLIZZARD_MAX 0x038
167 #define CPU_PART_M2_AVALANCHE_MAX 0x039
168
169 #define CPU_IMPL(midr) (((midr) >> 24) & 0xff)
170 #define CPU_PART(midr) (((midr) >> 4) & 0xfff)
171 #define CPU_VAR(midr) (((midr) >> 20) & 0xf)
172 #define CPU_ARCH(midr) (((midr) >> 16) & 0xf)
173 #define CPU_REV(midr) (((midr) >> 0) & 0xf)
174
175 #define CPU_IMPL_TO_MIDR(val) (((val) & 0xff) << 24)
176 #define CPU_PART_TO_MIDR(val) (((val) & 0xfff) << 4)
177 #define CPU_VAR_TO_MIDR(val) (((val) & 0xf) << 20)
178 #define CPU_ARCH_TO_MIDR(val) (((val) & 0xf) << 16)
179 #define CPU_REV_TO_MIDR(val) (((val) & 0xf) << 0)
180
181 #define CPU_IMPL_MASK (0xff << 24)
182 #define CPU_PART_MASK (0xfff << 4)
183 #define CPU_VAR_MASK (0xf << 20)
184 #define CPU_ARCH_MASK (0xf << 16)
185 #define CPU_REV_MASK (0xf << 0)
186
187 #define CPU_ID_RAW(impl, part, var, rev) \
188 (CPU_IMPL_TO_MIDR((impl)) | \
189 CPU_PART_TO_MIDR((part)) | CPU_VAR_TO_MIDR((var)) | \
190 CPU_REV_TO_MIDR((rev)))
191
192 #define CPU_MATCH(mask, impl, part, var, rev) \
193 (((mask) & PCPU_GET(midr)) == \
194 ((mask) & CPU_ID_RAW((impl), (part), (var), (rev))))
195
196 #if !defined(__ASSEMBLER__)
197 static inline bool
midr_check_var_part_range(u_int midr,u_int impl,u_int part,u_int var_low,u_int part_low,u_int var_high,u_int part_high)198 midr_check_var_part_range(u_int midr, u_int impl, u_int part, u_int var_low,
199 u_int part_low, u_int var_high, u_int part_high)
200 {
201 /* Check for the correct part */
202 if (CPU_IMPL(midr) != impl || CPU_PART(midr) != part)
203 return (false);
204
205 /* Check if the variant is between var_low and var_high inclusive */
206 if (CPU_VAR(midr) < var_low || CPU_VAR(midr) > var_high)
207 return (false);
208
209 /* If the variant is the low value, check if the part is high enough */
210 if (CPU_VAR(midr) == var_low && CPU_PART(midr) < part_low)
211 return (false);
212
213 /* If the variant is the high value, check if the part is low enough */
214 if (CPU_VAR(midr) == var_high && CPU_PART(midr) > part_high)
215 return (false);
216
217 return (true);
218 }
219 #endif
220
221 /*
222 * Chip-specific errata. This defines are intended to be
223 * booleans used within if statements. When an appropriate
224 * kernel option is disabled, these defines must be defined
225 * as 0 to allow the compiler to remove a dead code thus
226 * produce better optimized kernel image.
227 */
228 /*
229 * Vendor: Cavium
230 * Chip: ThunderX
231 * Revision(s): Pass 1.0, Pass 1.1
232 */
233 #ifdef THUNDERX_PASS_1_1_ERRATA
234 #define CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 \
235 (CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK | CPU_REV_MASK, \
236 CPU_IMPL_CAVIUM, CPU_PART_THUNDERX, 0, CPU_REV_THUNDERX_1_0) || \
237 CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK | CPU_REV_MASK, \
238 CPU_IMPL_CAVIUM, CPU_PART_THUNDERX, 0, CPU_REV_THUNDERX_1_1))
239 #else
240 #define CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 0
241 #endif
242
243 #if !defined(__ASSEMBLER__)
244 extern char btext[];
245 extern char etext[];
246
247 extern uint64_t __cpu_affinity[];
248
249 struct arm64_addr_mask;
250 extern struct arm64_addr_mask elf64_addr_mask;
251 #ifdef COMPAT_FREEBSD14
252 extern struct arm64_addr_mask elf64_addr_mask_14;
253 #endif
254
255 typedef void (*cpu_reset_hook_t)(void);
256 extern cpu_reset_hook_t cpu_reset_hook;
257
258 void cpu_halt(void) __dead2;
259 void cpu_reset(void) __dead2;
260 void fork_trampoline(void);
261 void identify_cache(uint64_t);
262 void identify_cpu(u_int);
263 void install_cpu_errata(void);
264
265 /* Pointer Authentication Code (PAC) support */
266 void ptrauth_init(void);
267 void ptrauth_fork(struct thread *, struct thread *);
268 void ptrauth_exec(struct thread *);
269 void ptrauth_copy_thread(struct thread *, struct thread *);
270 void ptrauth_thread_alloc(struct thread *);
271 void ptrauth_thread0(struct thread *);
272 #ifdef SMP
273 void ptrauth_mp_start(uint64_t);
274 #endif
275
276 /* Functions to read the sanitised view of the special registers */
277 void update_special_regs(u_int);
278 void update_special_reg_iss(u_int, uint64_t, uint64_t);
279 #define update_special_reg(reg, clear, set) \
280 update_special_reg_iss(reg ## _ISS, clear, set)
281 bool get_kernel_reg_iss(u_int, uint64_t *);
282 #define get_kernel_reg(reg, valp) \
283 get_kernel_reg_iss(reg ## _ISS, valp)
284 bool get_kernel_reg_iss_masked(u_int, uint64_t *, uint64_t);
285 #define get_kernel_reg_masked(reg, valp, mask) \
286 get_kernel_reg_iss_masked(reg ## _ISS, valp, mask)
287 bool get_user_reg_iss(u_int, uint64_t *, bool);
288 #define get_user_reg(reg, valp, fbsd) \
289 get_user_reg_iss(reg ## _ISS, valp, fbsd)
290
291 void cpu_desc_init(void);
292
293 #define CPU_AFFINITY(cpu) __cpu_affinity[(cpu)]
294 #define CPU_CURRENT_SOCKET \
295 (CPU_AFF2(CPU_AFFINITY(PCPU_GET(cpuid))))
296
297 static __inline uint64_t
get_cyclecount(void)298 get_cyclecount(void)
299 {
300 uint64_t ret;
301
302 ret = READ_SPECIALREG(cntvct_el0);
303
304 return (ret);
305 }
306
307 #define ADDRESS_TRANSLATE_FUNC(stage) \
308 static inline uint64_t \
309 arm64_address_translate_ ##stage (uint64_t addr) \
310 { \
311 uint64_t ret; \
312 \
313 __asm __volatile( \
314 "at " __STRING(stage) ", %1 \n" \
315 "isb \n" \
316 "mrs %0, par_el1" : "=r"(ret) : "r"(addr)); \
317 \
318 return (ret); \
319 }
320
321 ADDRESS_TRANSLATE_FUNC(s1e0r)
322 ADDRESS_TRANSLATE_FUNC(s1e0w)
323 ADDRESS_TRANSLATE_FUNC(s1e1r)
324 ADDRESS_TRANSLATE_FUNC(s1e1w)
325
326 #endif /* !__ASSEMBLER__ */
327 #endif
328
329 #endif /* !_MACHINE_CPU_H_ */
330
331 #endif /* !__arm__ */
332