1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/kvm/coproc.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Authors: Rusty Russell <rusty@rustcorp.com.au>
9 * Christoffer Dall <c.dall@virtualopensystems.com>
10 */
11
12 #include <linux/bitfield.h>
13 #include <linux/bsearch.h>
14 #include <linux/cacheinfo.h>
15 #include <linux/debugfs.h>
16 #include <linux/kvm_host.h>
17 #include <linux/mm.h>
18 #include <linux/printk.h>
19 #include <linux/uaccess.h>
20 #include <linux/irqchip/arm-gic-v3.h>
21
22 #include <asm/arm_pmuv3.h>
23 #include <asm/cacheflush.h>
24 #include <asm/cputype.h>
25 #include <asm/debug-monitors.h>
26 #include <asm/esr.h>
27 #include <asm/kvm_arm.h>
28 #include <asm/kvm_emulate.h>
29 #include <asm/kvm_hyp.h>
30 #include <asm/kvm_mmu.h>
31 #include <asm/kvm_nested.h>
32 #include <asm/perf_event.h>
33 #include <asm/sysreg.h>
34
35 #include <trace/events/kvm.h>
36
37 #include "sys_regs.h"
38 #include "vgic/vgic.h"
39
40 #include "trace.h"
41
42 /*
43 * For AArch32, we only take care of what is being trapped. Anything
44 * that has to do with init and userspace access has to go via the
45 * 64bit interface.
46 */
47
48 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
49 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
50 u64 val);
51
undef_access(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)52 static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
53 const struct sys_reg_desc *r)
54 {
55 kvm_inject_undefined(vcpu);
56 return false;
57 }
58
bad_trap(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r,const char * msg)59 static bool bad_trap(struct kvm_vcpu *vcpu,
60 struct sys_reg_params *params,
61 const struct sys_reg_desc *r,
62 const char *msg)
63 {
64 WARN_ONCE(1, "Unexpected %s\n", msg);
65 print_sys_reg_instr(params);
66 return undef_access(vcpu, params, r);
67 }
68
read_from_write_only(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)69 static bool read_from_write_only(struct kvm_vcpu *vcpu,
70 struct sys_reg_params *params,
71 const struct sys_reg_desc *r)
72 {
73 return bad_trap(vcpu, params, r,
74 "sys_reg read to write-only register");
75 }
76
write_to_read_only(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)77 static bool write_to_read_only(struct kvm_vcpu *vcpu,
78 struct sys_reg_params *params,
79 const struct sys_reg_desc *r)
80 {
81 return bad_trap(vcpu, params, r,
82 "sys_reg write to read-only register");
83 }
84
85 enum sr_loc_attr {
86 SR_LOC_MEMORY = 0, /* Register definitely in memory */
87 SR_LOC_LOADED = BIT(0), /* Register on CPU, unless it cannot */
88 SR_LOC_MAPPED = BIT(1), /* Register in a different CPU register */
89 SR_LOC_XLATED = BIT(2), /* Register translated to fit another reg */
90 SR_LOC_SPECIAL = BIT(3), /* Demanding register, implies loaded */
91 };
92
93 struct sr_loc {
94 enum sr_loc_attr loc;
95 enum vcpu_sysreg map_reg;
96 u64 (*xlate)(u64);
97 };
98
locate_direct_register(const struct kvm_vcpu * vcpu,enum vcpu_sysreg reg)99 static enum sr_loc_attr locate_direct_register(const struct kvm_vcpu *vcpu,
100 enum vcpu_sysreg reg)
101 {
102 switch (reg) {
103 case SCTLR_EL1:
104 case CPACR_EL1:
105 case TTBR0_EL1:
106 case TTBR1_EL1:
107 case TCR_EL1:
108 case TCR2_EL1:
109 case PIR_EL1:
110 case PIRE0_EL1:
111 case POR_EL1:
112 case ESR_EL1:
113 case AFSR0_EL1:
114 case AFSR1_EL1:
115 case FAR_EL1:
116 case MAIR_EL1:
117 case VBAR_EL1:
118 case CONTEXTIDR_EL1:
119 case AMAIR_EL1:
120 case CNTKCTL_EL1:
121 case ELR_EL1:
122 case SPSR_EL1:
123 case ZCR_EL1:
124 case SCTLR2_EL1:
125 /*
126 * EL1 registers which have an ELx2 mapping are loaded if
127 * we're not in hypervisor context.
128 */
129 return is_hyp_ctxt(vcpu) ? SR_LOC_MEMORY : SR_LOC_LOADED;
130
131 case TPIDR_EL0:
132 case TPIDRRO_EL0:
133 case TPIDR_EL1:
134 case PAR_EL1:
135 case DACR32_EL2:
136 case IFSR32_EL2:
137 case DBGVCR32_EL2:
138 /* These registers are always loaded, no matter what */
139 return SR_LOC_LOADED;
140
141 default:
142 /* Non-mapped EL2 registers are by definition in memory. */
143 return SR_LOC_MEMORY;
144 }
145 }
146
locate_mapped_el2_register(const struct kvm_vcpu * vcpu,enum vcpu_sysreg reg,enum vcpu_sysreg map_reg,u64 (* xlate)(u64),struct sr_loc * loc)147 static void locate_mapped_el2_register(const struct kvm_vcpu *vcpu,
148 enum vcpu_sysreg reg,
149 enum vcpu_sysreg map_reg,
150 u64 (*xlate)(u64),
151 struct sr_loc *loc)
152 {
153 if (!is_hyp_ctxt(vcpu)) {
154 loc->loc = SR_LOC_MEMORY;
155 return;
156 }
157
158 loc->loc = SR_LOC_LOADED | SR_LOC_MAPPED;
159 loc->map_reg = map_reg;
160
161 WARN_ON(locate_direct_register(vcpu, map_reg) != SR_LOC_MEMORY);
162
163 if (xlate != NULL && !vcpu_el2_e2h_is_set(vcpu)) {
164 loc->loc |= SR_LOC_XLATED;
165 loc->xlate = xlate;
166 }
167 }
168
169 #define MAPPED_EL2_SYSREG(r, m, t) \
170 case r: { \
171 locate_mapped_el2_register(vcpu, r, m, t, loc); \
172 break; \
173 }
174
locate_register(const struct kvm_vcpu * vcpu,enum vcpu_sysreg reg,struct sr_loc * loc)175 static void locate_register(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg,
176 struct sr_loc *loc)
177 {
178 if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU)) {
179 loc->loc = SR_LOC_MEMORY;
180 return;
181 }
182
183 switch (reg) {
184 MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1,
185 translate_sctlr_el2_to_sctlr_el1 );
186 MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1,
187 translate_cptr_el2_to_cpacr_el1 );
188 MAPPED_EL2_SYSREG(TTBR0_EL2, TTBR0_EL1,
189 translate_ttbr0_el2_to_ttbr0_el1 );
190 MAPPED_EL2_SYSREG(TTBR1_EL2, TTBR1_EL1, NULL );
191 MAPPED_EL2_SYSREG(TCR_EL2, TCR_EL1,
192 translate_tcr_el2_to_tcr_el1 );
193 MAPPED_EL2_SYSREG(VBAR_EL2, VBAR_EL1, NULL );
194 MAPPED_EL2_SYSREG(AFSR0_EL2, AFSR0_EL1, NULL );
195 MAPPED_EL2_SYSREG(AFSR1_EL2, AFSR1_EL1, NULL );
196 MAPPED_EL2_SYSREG(ESR_EL2, ESR_EL1, NULL );
197 MAPPED_EL2_SYSREG(FAR_EL2, FAR_EL1, NULL );
198 MAPPED_EL2_SYSREG(MAIR_EL2, MAIR_EL1, NULL );
199 MAPPED_EL2_SYSREG(TCR2_EL2, TCR2_EL1, NULL );
200 MAPPED_EL2_SYSREG(PIR_EL2, PIR_EL1, NULL );
201 MAPPED_EL2_SYSREG(PIRE0_EL2, PIRE0_EL1, NULL );
202 MAPPED_EL2_SYSREG(POR_EL2, POR_EL1, NULL );
203 MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1, NULL );
204 MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1, NULL );
205 MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1, NULL );
206 MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL );
207 MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1, NULL );
208 case CNTHCTL_EL2:
209 /* CNTHCTL_EL2 is super special, until we support NV2.1 */
210 loc->loc = ((is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) ?
211 SR_LOC_SPECIAL : SR_LOC_MEMORY);
212 break;
213 default:
214 loc->loc = locate_direct_register(vcpu, reg);
215 }
216 }
217
read_sr_from_cpu(enum vcpu_sysreg reg)218 static u64 read_sr_from_cpu(enum vcpu_sysreg reg)
219 {
220 u64 val = 0x8badf00d8badf00d;
221
222 switch (reg) {
223 case SCTLR_EL1: val = read_sysreg_s(SYS_SCTLR_EL12); break;
224 case CPACR_EL1: val = read_sysreg_s(SYS_CPACR_EL12); break;
225 case TTBR0_EL1: val = read_sysreg_s(SYS_TTBR0_EL12); break;
226 case TTBR1_EL1: val = read_sysreg_s(SYS_TTBR1_EL12); break;
227 case TCR_EL1: val = read_sysreg_s(SYS_TCR_EL12); break;
228 case TCR2_EL1: val = read_sysreg_s(SYS_TCR2_EL12); break;
229 case PIR_EL1: val = read_sysreg_s(SYS_PIR_EL12); break;
230 case PIRE0_EL1: val = read_sysreg_s(SYS_PIRE0_EL12); break;
231 case POR_EL1: val = read_sysreg_s(SYS_POR_EL12); break;
232 case ESR_EL1: val = read_sysreg_s(SYS_ESR_EL12); break;
233 case AFSR0_EL1: val = read_sysreg_s(SYS_AFSR0_EL12); break;
234 case AFSR1_EL1: val = read_sysreg_s(SYS_AFSR1_EL12); break;
235 case FAR_EL1: val = read_sysreg_s(SYS_FAR_EL12); break;
236 case MAIR_EL1: val = read_sysreg_s(SYS_MAIR_EL12); break;
237 case VBAR_EL1: val = read_sysreg_s(SYS_VBAR_EL12); break;
238 case CONTEXTIDR_EL1: val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
239 case AMAIR_EL1: val = read_sysreg_s(SYS_AMAIR_EL12); break;
240 case CNTKCTL_EL1: val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
241 case ELR_EL1: val = read_sysreg_s(SYS_ELR_EL12); break;
242 case SPSR_EL1: val = read_sysreg_s(SYS_SPSR_EL12); break;
243 case ZCR_EL1: val = read_sysreg_s(SYS_ZCR_EL12); break;
244 case SCTLR2_EL1: val = read_sysreg_s(SYS_SCTLR2_EL12); break;
245 case TPIDR_EL0: val = read_sysreg_s(SYS_TPIDR_EL0); break;
246 case TPIDRRO_EL0: val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
247 case TPIDR_EL1: val = read_sysreg_s(SYS_TPIDR_EL1); break;
248 case PAR_EL1: val = read_sysreg_par(); break;
249 case DACR32_EL2: val = read_sysreg_s(SYS_DACR32_EL2); break;
250 case IFSR32_EL2: val = read_sysreg_s(SYS_IFSR32_EL2); break;
251 case DBGVCR32_EL2: val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
252 default: WARN_ON_ONCE(1);
253 }
254
255 return val;
256 }
257
write_sr_to_cpu(enum vcpu_sysreg reg,u64 val)258 static void write_sr_to_cpu(enum vcpu_sysreg reg, u64 val)
259 {
260 switch (reg) {
261 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
262 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
263 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
264 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
265 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
266 case TCR2_EL1: write_sysreg_s(val, SYS_TCR2_EL12); break;
267 case PIR_EL1: write_sysreg_s(val, SYS_PIR_EL12); break;
268 case PIRE0_EL1: write_sysreg_s(val, SYS_PIRE0_EL12); break;
269 case POR_EL1: write_sysreg_s(val, SYS_POR_EL12); break;
270 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
271 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
272 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
273 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
274 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
275 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
276 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
277 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
278 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
279 case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
280 case SPSR_EL1: write_sysreg_s(val, SYS_SPSR_EL12); break;
281 case ZCR_EL1: write_sysreg_s(val, SYS_ZCR_EL12); break;
282 case SCTLR2_EL1: write_sysreg_s(val, SYS_SCTLR2_EL12); break;
283 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
284 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
285 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
286 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
287 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
288 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
289 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
290 default: WARN_ON_ONCE(1);
291 }
292 }
293
vcpu_read_sys_reg(const struct kvm_vcpu * vcpu,enum vcpu_sysreg reg)294 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg)
295 {
296 struct sr_loc loc = {};
297
298 locate_register(vcpu, reg, &loc);
299
300 WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY);
301
302 if (loc.loc & SR_LOC_SPECIAL) {
303 u64 val;
304
305 WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL);
306
307 /*
308 * CNTHCTL_EL2 requires some special treatment to account
309 * for the bits that can be set via CNTKCTL_EL1 when E2H==1.
310 */
311 switch (reg) {
312 case CNTHCTL_EL2:
313 val = read_sysreg_el1(SYS_CNTKCTL);
314 val &= CNTKCTL_VALID_BITS;
315 val |= __vcpu_sys_reg(vcpu, reg) & ~CNTKCTL_VALID_BITS;
316 return val;
317 default:
318 WARN_ON_ONCE(1);
319 }
320 }
321
322 if (loc.loc & SR_LOC_LOADED) {
323 enum vcpu_sysreg map_reg = reg;
324
325 if (loc.loc & SR_LOC_MAPPED)
326 map_reg = loc.map_reg;
327
328 if (!(loc.loc & SR_LOC_XLATED)) {
329 u64 val = read_sr_from_cpu(map_reg);
330
331 if (reg >= __SANITISED_REG_START__)
332 val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
333
334 return val;
335 }
336 }
337
338 return __vcpu_sys_reg(vcpu, reg);
339 }
340
vcpu_write_sys_reg(struct kvm_vcpu * vcpu,u64 val,enum vcpu_sysreg reg)341 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, enum vcpu_sysreg reg)
342 {
343 struct sr_loc loc = {};
344
345 locate_register(vcpu, reg, &loc);
346
347 WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY);
348
349 if (loc.loc & SR_LOC_SPECIAL) {
350
351 WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL);
352
353 switch (reg) {
354 case CNTHCTL_EL2:
355 /*
356 * If E2H=1, some of the bits are backed by
357 * CNTKCTL_EL1, while the rest is kept in memory.
358 * Yes, this is fun stuff.
359 */
360 write_sysreg_el1(val, SYS_CNTKCTL);
361 break;
362 default:
363 WARN_ON_ONCE(1);
364 }
365 }
366
367 if (loc.loc & SR_LOC_LOADED) {
368 enum vcpu_sysreg map_reg = reg;
369 u64 xlated_val;
370
371 if (reg >= __SANITISED_REG_START__)
372 val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
373
374 if (loc.loc & SR_LOC_MAPPED)
375 map_reg = loc.map_reg;
376
377 if (loc.loc & SR_LOC_XLATED)
378 xlated_val = loc.xlate(val);
379 else
380 xlated_val = val;
381
382 write_sr_to_cpu(map_reg, xlated_val);
383
384 /*
385 * Fall through to write the backing store anyway, which
386 * allows translated registers to be directly read without a
387 * reverse translation.
388 */
389 }
390
391 __vcpu_assign_sys_reg(vcpu, reg, val);
392 }
393
394 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
395 #define CSSELR_MAX 14
396
397 /*
398 * Returns the minimum line size for the selected cache, expressed as
399 * Log2(bytes).
400 */
get_min_cache_line_size(bool icache)401 static u8 get_min_cache_line_size(bool icache)
402 {
403 u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0);
404 u8 field;
405
406 if (icache)
407 field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr);
408 else
409 field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr);
410
411 /*
412 * Cache line size is represented as Log2(words) in CTR_EL0.
413 * Log2(bytes) can be derived with the following:
414 *
415 * Log2(words) + 2 = Log2(bytes / 4) + 2
416 * = Log2(bytes) - 2 + 2
417 * = Log2(bytes)
418 */
419 return field + 2;
420 }
421
422 /* Which cache CCSIDR represents depends on CSSELR value. */
get_ccsidr(struct kvm_vcpu * vcpu,u32 csselr)423 static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr)
424 {
425 u8 line_size;
426
427 if (vcpu->arch.ccsidr)
428 return vcpu->arch.ccsidr[csselr];
429
430 line_size = get_min_cache_line_size(csselr & CSSELR_EL1_InD);
431
432 /*
433 * Fabricate a CCSIDR value as the overriding value does not exist.
434 * The real CCSIDR value will not be used as it can vary by the
435 * physical CPU which the vcpu currently resides in.
436 *
437 * The line size is determined with get_min_cache_line_size(), which
438 * should be valid for all CPUs even if they have different cache
439 * configuration.
440 *
441 * The associativity bits are cleared, meaning the geometry of all data
442 * and unified caches (which are guaranteed to be PIPT and thus
443 * non-aliasing) are 1 set and 1 way.
444 * Guests should not be doing cache operations by set/way at all, and
445 * for this reason, we trap them and attempt to infer the intent, so
446 * that we can flush the entire guest's address space at the appropriate
447 * time. The exposed geometry minimizes the number of the traps.
448 * [If guests should attempt to infer aliasing properties from the
449 * geometry (which is not permitted by the architecture), they would
450 * only do so for virtually indexed caches.]
451 *
452 * We don't check if the cache level exists as it is allowed to return
453 * an UNKNOWN value if not.
454 */
455 return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4);
456 }
457
set_ccsidr(struct kvm_vcpu * vcpu,u32 csselr,u32 val)458 static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val)
459 {
460 u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4;
461 u32 *ccsidr = vcpu->arch.ccsidr;
462 u32 i;
463
464 if ((val & CCSIDR_EL1_RES0) ||
465 line_size < get_min_cache_line_size(csselr & CSSELR_EL1_InD))
466 return -EINVAL;
467
468 if (!ccsidr) {
469 if (val == get_ccsidr(vcpu, csselr))
470 return 0;
471
472 ccsidr = kmalloc_array(CSSELR_MAX, sizeof(u32), GFP_KERNEL_ACCOUNT);
473 if (!ccsidr)
474 return -ENOMEM;
475
476 for (i = 0; i < CSSELR_MAX; i++)
477 ccsidr[i] = get_ccsidr(vcpu, i);
478
479 vcpu->arch.ccsidr = ccsidr;
480 }
481
482 ccsidr[csselr] = val;
483
484 return 0;
485 }
486
access_rw(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)487 static bool access_rw(struct kvm_vcpu *vcpu,
488 struct sys_reg_params *p,
489 const struct sys_reg_desc *r)
490 {
491 if (p->is_write)
492 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
493 else
494 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
495
496 return true;
497 }
498
499 /*
500 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
501 */
access_dcsw(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)502 static bool access_dcsw(struct kvm_vcpu *vcpu,
503 struct sys_reg_params *p,
504 const struct sys_reg_desc *r)
505 {
506 if (!p->is_write)
507 return read_from_write_only(vcpu, p, r);
508
509 /*
510 * Only track S/W ops if we don't have FWB. It still indicates
511 * that the guest is a bit broken (S/W operations should only
512 * be done by firmware, knowing that there is only a single
513 * CPU left in the system, and certainly not from non-secure
514 * software).
515 */
516 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
517 kvm_set_way_flush(vcpu);
518
519 return true;
520 }
521
access_dcgsw(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)522 static bool access_dcgsw(struct kvm_vcpu *vcpu,
523 struct sys_reg_params *p,
524 const struct sys_reg_desc *r)
525 {
526 if (!kvm_has_mte(vcpu->kvm))
527 return undef_access(vcpu, p, r);
528
529 /* Treat MTE S/W ops as we treat the classic ones: with contempt */
530 return access_dcsw(vcpu, p, r);
531 }
532
get_access_mask(const struct sys_reg_desc * r,u64 * mask,u64 * shift)533 static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
534 {
535 switch (r->aarch32_map) {
536 case AA32_LO:
537 *mask = GENMASK_ULL(31, 0);
538 *shift = 0;
539 break;
540 case AA32_HI:
541 *mask = GENMASK_ULL(63, 32);
542 *shift = 32;
543 break;
544 default:
545 *mask = GENMASK_ULL(63, 0);
546 *shift = 0;
547 break;
548 }
549 }
550
551 /*
552 * Generic accessor for VM registers. Only called as long as HCR_TVM
553 * is set. If the guest enables the MMU, we stop trapping the VM
554 * sys_regs and leave it in complete control of the caches.
555 */
access_vm_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)556 static bool access_vm_reg(struct kvm_vcpu *vcpu,
557 struct sys_reg_params *p,
558 const struct sys_reg_desc *r)
559 {
560 bool was_enabled = vcpu_has_cache_enabled(vcpu);
561 u64 val, mask, shift;
562
563 BUG_ON(!p->is_write);
564
565 get_access_mask(r, &mask, &shift);
566
567 if (~mask) {
568 val = vcpu_read_sys_reg(vcpu, r->reg);
569 val &= ~mask;
570 } else {
571 val = 0;
572 }
573
574 val |= (p->regval & (mask >> shift)) << shift;
575 vcpu_write_sys_reg(vcpu, val, r->reg);
576
577 kvm_toggle_cache(vcpu, was_enabled);
578 return true;
579 }
580
access_actlr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)581 static bool access_actlr(struct kvm_vcpu *vcpu,
582 struct sys_reg_params *p,
583 const struct sys_reg_desc *r)
584 {
585 u64 mask, shift;
586
587 if (p->is_write)
588 return ignore_write(vcpu, p);
589
590 get_access_mask(r, &mask, &shift);
591 p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
592
593 return true;
594 }
595
596 /*
597 * Trap handler for the GICv3 SGI generation system register.
598 * Forward the request to the VGIC emulation.
599 * The cp15_64 code makes sure this automatically works
600 * for both AArch64 and AArch32 accesses.
601 */
access_gic_sgi(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)602 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
603 struct sys_reg_params *p,
604 const struct sys_reg_desc *r)
605 {
606 bool g1;
607
608 if (!kvm_has_gicv3(vcpu->kvm))
609 return undef_access(vcpu, p, r);
610
611 if (!p->is_write)
612 return read_from_write_only(vcpu, p, r);
613
614 /*
615 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
616 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
617 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
618 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
619 * group.
620 */
621 if (p->Op0 == 0) { /* AArch32 */
622 switch (p->Op1) {
623 default: /* Keep GCC quiet */
624 case 0: /* ICC_SGI1R */
625 g1 = true;
626 break;
627 case 1: /* ICC_ASGI1R */
628 case 2: /* ICC_SGI0R */
629 g1 = false;
630 break;
631 }
632 } else { /* AArch64 */
633 switch (p->Op2) {
634 default: /* Keep GCC quiet */
635 case 5: /* ICC_SGI1R_EL1 */
636 g1 = true;
637 break;
638 case 6: /* ICC_ASGI1R_EL1 */
639 case 7: /* ICC_SGI0R_EL1 */
640 g1 = false;
641 break;
642 }
643 }
644
645 vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
646
647 return true;
648 }
649
access_gic_sre(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)650 static bool access_gic_sre(struct kvm_vcpu *vcpu,
651 struct sys_reg_params *p,
652 const struct sys_reg_desc *r)
653 {
654 if (!kvm_has_gicv3(vcpu->kvm))
655 return undef_access(vcpu, p, r);
656
657 if (p->is_write)
658 return ignore_write(vcpu, p);
659
660 if (p->Op1 == 4) { /* ICC_SRE_EL2 */
661 p->regval = KVM_ICC_SRE_EL2;
662 } else { /* ICC_SRE_EL1 */
663 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
664 }
665
666 return true;
667 }
668
access_gic_dir(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)669 static bool access_gic_dir(struct kvm_vcpu *vcpu,
670 struct sys_reg_params *p,
671 const struct sys_reg_desc *r)
672 {
673 if (!kvm_has_gicv3(vcpu->kvm))
674 return undef_access(vcpu, p, r);
675
676 if (!p->is_write)
677 return undef_access(vcpu, p, r);
678
679 vgic_v3_deactivate(vcpu, p->regval);
680
681 return true;
682 }
683
trap_raz_wi(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)684 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
685 struct sys_reg_params *p,
686 const struct sys_reg_desc *r)
687 {
688 if (p->is_write)
689 return ignore_write(vcpu, p);
690 else
691 return read_zero(vcpu, p);
692 }
693
694 /*
695 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
696 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
697 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
698 * treat it separately.
699 */
trap_loregion(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)700 static bool trap_loregion(struct kvm_vcpu *vcpu,
701 struct sys_reg_params *p,
702 const struct sys_reg_desc *r)
703 {
704 u32 sr = reg_to_encoding(r);
705
706 if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP))
707 return undef_access(vcpu, p, r);
708
709 if (p->is_write && sr == SYS_LORID_EL1)
710 return write_to_read_only(vcpu, p, r);
711
712 return trap_raz_wi(vcpu, p, r);
713 }
714
trap_oslar_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)715 static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
716 struct sys_reg_params *p,
717 const struct sys_reg_desc *r)
718 {
719 if (!p->is_write)
720 return read_from_write_only(vcpu, p, r);
721
722 kvm_debug_handle_oslar(vcpu, p->regval);
723 return true;
724 }
725
trap_oslsr_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)726 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
727 struct sys_reg_params *p,
728 const struct sys_reg_desc *r)
729 {
730 if (p->is_write)
731 return write_to_read_only(vcpu, p, r);
732
733 p->regval = __vcpu_sys_reg(vcpu, r->reg);
734 return true;
735 }
736
set_oslsr_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)737 static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
738 u64 val)
739 {
740 /*
741 * The only modifiable bit is the OSLK bit. Refuse the write if
742 * userspace attempts to change any other bit in the register.
743 */
744 if ((val ^ rd->val) & ~OSLSR_EL1_OSLK)
745 return -EINVAL;
746
747 __vcpu_assign_sys_reg(vcpu, rd->reg, val);
748 return 0;
749 }
750
trap_dbgauthstatus_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)751 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
752 struct sys_reg_params *p,
753 const struct sys_reg_desc *r)
754 {
755 if (p->is_write) {
756 return ignore_write(vcpu, p);
757 } else {
758 p->regval = read_sysreg(dbgauthstatus_el1);
759 return true;
760 }
761 }
762
trap_debug_regs(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)763 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
764 struct sys_reg_params *p,
765 const struct sys_reg_desc *r)
766 {
767 access_rw(vcpu, p, r);
768
769 kvm_debug_set_guest_ownership(vcpu);
770 return true;
771 }
772
773 /*
774 * reg_to_dbg/dbg_to_reg
775 *
776 * A 32 bit write to a debug register leave top bits alone
777 * A 32 bit read from a debug register only returns the bottom bits
778 */
reg_to_dbg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd,u64 * dbg_reg)779 static void reg_to_dbg(struct kvm_vcpu *vcpu,
780 struct sys_reg_params *p,
781 const struct sys_reg_desc *rd,
782 u64 *dbg_reg)
783 {
784 u64 mask, shift, val;
785
786 get_access_mask(rd, &mask, &shift);
787
788 val = *dbg_reg;
789 val &= ~mask;
790 val |= (p->regval & (mask >> shift)) << shift;
791 *dbg_reg = val;
792 }
793
dbg_to_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd,u64 * dbg_reg)794 static void dbg_to_reg(struct kvm_vcpu *vcpu,
795 struct sys_reg_params *p,
796 const struct sys_reg_desc *rd,
797 u64 *dbg_reg)
798 {
799 u64 mask, shift;
800
801 get_access_mask(rd, &mask, &shift);
802 p->regval = (*dbg_reg & mask) >> shift;
803 }
804
demux_wb_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)805 static u64 *demux_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
806 {
807 struct kvm_guest_debug_arch *dbg = &vcpu->arch.vcpu_debug_state;
808
809 switch (rd->Op2) {
810 case 0b100:
811 return &dbg->dbg_bvr[rd->CRm];
812 case 0b101:
813 return &dbg->dbg_bcr[rd->CRm];
814 case 0b110:
815 return &dbg->dbg_wvr[rd->CRm];
816 case 0b111:
817 return &dbg->dbg_wcr[rd->CRm];
818 default:
819 KVM_BUG_ON(1, vcpu->kvm);
820 return NULL;
821 }
822 }
823
trap_dbg_wb_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)824 static bool trap_dbg_wb_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
825 const struct sys_reg_desc *rd)
826 {
827 u64 *reg = demux_wb_reg(vcpu, rd);
828
829 if (!reg)
830 return false;
831
832 if (p->is_write)
833 reg_to_dbg(vcpu, p, rd, reg);
834 else
835 dbg_to_reg(vcpu, p, rd, reg);
836
837 kvm_debug_set_guest_ownership(vcpu);
838 return true;
839 }
840
set_dbg_wb_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)841 static int set_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
842 u64 val)
843 {
844 u64 *reg = demux_wb_reg(vcpu, rd);
845
846 if (!reg)
847 return -EINVAL;
848
849 *reg = val;
850 return 0;
851 }
852
get_dbg_wb_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)853 static int get_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
854 u64 *val)
855 {
856 u64 *reg = demux_wb_reg(vcpu, rd);
857
858 if (!reg)
859 return -EINVAL;
860
861 *val = *reg;
862 return 0;
863 }
864
reset_dbg_wb_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)865 static u64 reset_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
866 {
867 u64 *reg = demux_wb_reg(vcpu, rd);
868
869 /*
870 * Bail early if we couldn't find storage for the register, the
871 * KVM_BUG_ON() in demux_wb_reg() will prevent this VM from ever
872 * being run.
873 */
874 if (!reg)
875 return 0;
876
877 *reg = rd->val;
878 return rd->val;
879 }
880
reset_amair_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)881 static u64 reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
882 {
883 u64 amair = read_sysreg(amair_el1);
884 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
885 return amair;
886 }
887
reset_actlr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)888 static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
889 {
890 u64 actlr = read_sysreg(actlr_el1);
891 vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
892 return actlr;
893 }
894
reset_mpidr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)895 static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
896 {
897 u64 mpidr;
898
899 /*
900 * Map the vcpu_id into the first three affinity level fields of
901 * the MPIDR. We limit the number of VCPUs in level 0 due to a
902 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
903 * of the GICv3 to be able to address each CPU directly when
904 * sending IPIs.
905 */
906 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
907 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
908 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
909 mpidr |= (1ULL << 31);
910 vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1);
911
912 return mpidr;
913 }
914
hidden_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)915 static unsigned int hidden_visibility(const struct kvm_vcpu *vcpu,
916 const struct sys_reg_desc *r)
917 {
918 return REG_HIDDEN;
919 }
920
pmu_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)921 static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
922 const struct sys_reg_desc *r)
923 {
924 if (kvm_vcpu_has_pmu(vcpu))
925 return 0;
926
927 return REG_HIDDEN;
928 }
929
reset_pmu_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)930 static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
931 {
932 u64 mask = BIT(ARMV8_PMU_CYCLE_IDX);
933 u8 n = vcpu->kvm->arch.nr_pmu_counters;
934
935 if (n)
936 mask |= GENMASK(n - 1, 0);
937
938 reset_unknown(vcpu, r);
939 __vcpu_rmw_sys_reg(vcpu, r->reg, &=, mask);
940
941 return __vcpu_sys_reg(vcpu, r->reg);
942 }
943
reset_pmevcntr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)944 static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
945 {
946 reset_unknown(vcpu, r);
947 __vcpu_rmw_sys_reg(vcpu, r->reg, &=, GENMASK(31, 0));
948
949 return __vcpu_sys_reg(vcpu, r->reg);
950 }
951
reset_pmevtyper(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)952 static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
953 {
954 /* This thing will UNDEF, who cares about the reset value? */
955 if (!kvm_vcpu_has_pmu(vcpu))
956 return 0;
957
958 reset_unknown(vcpu, r);
959 __vcpu_rmw_sys_reg(vcpu, r->reg, &=, kvm_pmu_evtyper_mask(vcpu->kvm));
960
961 return __vcpu_sys_reg(vcpu, r->reg);
962 }
963
reset_pmselr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)964 static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
965 {
966 reset_unknown(vcpu, r);
967 __vcpu_rmw_sys_reg(vcpu, r->reg, &=, PMSELR_EL0_SEL_MASK);
968
969 return __vcpu_sys_reg(vcpu, r->reg);
970 }
971
reset_pmcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)972 static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
973 {
974 u64 pmcr = 0;
975
976 if (!kvm_supports_32bit_el0())
977 pmcr |= ARMV8_PMU_PMCR_LC;
978
979 /*
980 * The value of PMCR.N field is included when the
981 * vCPU register is read via kvm_vcpu_read_pmcr().
982 */
983 __vcpu_assign_sys_reg(vcpu, r->reg, pmcr);
984
985 return __vcpu_sys_reg(vcpu, r->reg);
986 }
987
check_pmu_access_disabled(struct kvm_vcpu * vcpu,u64 flags)988 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
989 {
990 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
991 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
992
993 if (!enabled)
994 kvm_inject_undefined(vcpu);
995
996 return !enabled;
997 }
998
pmu_access_el0_disabled(struct kvm_vcpu * vcpu)999 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
1000 {
1001 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
1002 }
1003
pmu_write_swinc_el0_disabled(struct kvm_vcpu * vcpu)1004 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
1005 {
1006 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
1007 }
1008
pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu * vcpu)1009 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
1010 {
1011 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
1012 }
1013
pmu_access_event_counter_el0_disabled(struct kvm_vcpu * vcpu)1014 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
1015 {
1016 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
1017 }
1018
access_pmcr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1019 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1020 const struct sys_reg_desc *r)
1021 {
1022 u64 val;
1023
1024 if (pmu_access_el0_disabled(vcpu))
1025 return false;
1026
1027 if (p->is_write) {
1028 /*
1029 * Only update writeable bits of PMCR (continuing into
1030 * kvm_pmu_handle_pmcr() as well)
1031 */
1032 val = kvm_vcpu_read_pmcr(vcpu);
1033 val &= ~ARMV8_PMU_PMCR_MASK;
1034 val |= p->regval & ARMV8_PMU_PMCR_MASK;
1035 if (!kvm_supports_32bit_el0())
1036 val |= ARMV8_PMU_PMCR_LC;
1037 kvm_pmu_handle_pmcr(vcpu, val);
1038 } else {
1039 /* PMCR.P & PMCR.C are RAZ */
1040 val = kvm_vcpu_read_pmcr(vcpu)
1041 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
1042 p->regval = val;
1043 }
1044
1045 return true;
1046 }
1047
access_pmselr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1048 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1049 const struct sys_reg_desc *r)
1050 {
1051 if (pmu_access_event_counter_el0_disabled(vcpu))
1052 return false;
1053
1054 if (p->is_write)
1055 __vcpu_assign_sys_reg(vcpu, PMSELR_EL0, p->regval);
1056 else
1057 /* return PMSELR.SEL field */
1058 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
1059 & PMSELR_EL0_SEL_MASK;
1060
1061 return true;
1062 }
1063
access_pmceid(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1064 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1065 const struct sys_reg_desc *r)
1066 {
1067 u64 pmceid, mask, shift;
1068
1069 BUG_ON(p->is_write);
1070
1071 if (pmu_access_el0_disabled(vcpu))
1072 return false;
1073
1074 get_access_mask(r, &mask, &shift);
1075
1076 pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
1077 pmceid &= mask;
1078 pmceid >>= shift;
1079
1080 p->regval = pmceid;
1081
1082 return true;
1083 }
1084
pmu_counter_idx_valid(struct kvm_vcpu * vcpu,u64 idx)1085 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
1086 {
1087 u64 pmcr, val;
1088
1089 pmcr = kvm_vcpu_read_pmcr(vcpu);
1090 val = FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
1091 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
1092 kvm_inject_undefined(vcpu);
1093 return false;
1094 }
1095
1096 return true;
1097 }
1098
get_pmu_evcntr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 * val)1099 static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1100 u64 *val)
1101 {
1102 u64 idx;
1103
1104 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
1105 /* PMCCNTR_EL0 */
1106 idx = ARMV8_PMU_CYCLE_IDX;
1107 else
1108 /* PMEVCNTRn_EL0 */
1109 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1110
1111 *val = kvm_pmu_get_counter_value(vcpu, idx);
1112 return 0;
1113 }
1114
set_pmu_evcntr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 val)1115 static int set_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1116 u64 val)
1117 {
1118 u64 idx;
1119
1120 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
1121 /* PMCCNTR_EL0 */
1122 idx = ARMV8_PMU_CYCLE_IDX;
1123 else
1124 /* PMEVCNTRn_EL0 */
1125 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1126
1127 kvm_pmu_set_counter_value_user(vcpu, idx, val);
1128 return 0;
1129 }
1130
access_pmu_evcntr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1131 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
1132 struct sys_reg_params *p,
1133 const struct sys_reg_desc *r)
1134 {
1135 u64 idx = ~0UL;
1136
1137 if (r->CRn == 9 && r->CRm == 13) {
1138 if (r->Op2 == 2) {
1139 /* PMXEVCNTR_EL0 */
1140 if (pmu_access_event_counter_el0_disabled(vcpu))
1141 return false;
1142
1143 idx = SYS_FIELD_GET(PMSELR_EL0, SEL,
1144 __vcpu_sys_reg(vcpu, PMSELR_EL0));
1145 } else if (r->Op2 == 0) {
1146 /* PMCCNTR_EL0 */
1147 if (pmu_access_cycle_counter_el0_disabled(vcpu))
1148 return false;
1149
1150 idx = ARMV8_PMU_CYCLE_IDX;
1151 }
1152 } else if (r->CRn == 0 && r->CRm == 9) {
1153 /* PMCCNTR */
1154 if (pmu_access_event_counter_el0_disabled(vcpu))
1155 return false;
1156
1157 idx = ARMV8_PMU_CYCLE_IDX;
1158 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
1159 /* PMEVCNTRn_EL0 */
1160 if (pmu_access_event_counter_el0_disabled(vcpu))
1161 return false;
1162
1163 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1164 }
1165
1166 /* Catch any decoding mistake */
1167 WARN_ON(idx == ~0UL);
1168
1169 if (!pmu_counter_idx_valid(vcpu, idx))
1170 return false;
1171
1172 if (p->is_write) {
1173 if (pmu_access_el0_disabled(vcpu))
1174 return false;
1175
1176 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
1177 } else {
1178 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
1179 }
1180
1181 return true;
1182 }
1183
access_pmu_evtyper(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1184 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1185 const struct sys_reg_desc *r)
1186 {
1187 u64 idx, reg;
1188
1189 if (pmu_access_el0_disabled(vcpu))
1190 return false;
1191
1192 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
1193 /* PMXEVTYPER_EL0 */
1194 idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0));
1195 reg = PMEVTYPER0_EL0 + idx;
1196 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
1197 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1198 if (idx == ARMV8_PMU_CYCLE_IDX)
1199 reg = PMCCFILTR_EL0;
1200 else
1201 /* PMEVTYPERn_EL0 */
1202 reg = PMEVTYPER0_EL0 + idx;
1203 } else {
1204 BUG();
1205 }
1206
1207 if (!pmu_counter_idx_valid(vcpu, idx))
1208 return false;
1209
1210 if (p->is_write) {
1211 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
1212 kvm_vcpu_pmu_restore_guest(vcpu);
1213 } else {
1214 p->regval = __vcpu_sys_reg(vcpu, reg);
1215 }
1216
1217 return true;
1218 }
1219
set_pmreg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 val)1220 static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
1221 {
1222 u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1223
1224 __vcpu_assign_sys_reg(vcpu, r->reg, val & mask);
1225 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
1226
1227 return 0;
1228 }
1229
get_pmreg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 * val)1230 static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
1231 {
1232 u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1233
1234 *val = __vcpu_sys_reg(vcpu, r->reg) & mask;
1235 return 0;
1236 }
1237
access_pmcnten(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1238 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1239 const struct sys_reg_desc *r)
1240 {
1241 u64 val, mask;
1242
1243 if (pmu_access_el0_disabled(vcpu))
1244 return false;
1245
1246 mask = kvm_pmu_accessible_counter_mask(vcpu);
1247 if (p->is_write) {
1248 val = p->regval & mask;
1249 if (r->Op2 & 0x1)
1250 /* accessing PMCNTENSET_EL0 */
1251 __vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, |=, val);
1252 else
1253 /* accessing PMCNTENCLR_EL0 */
1254 __vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, ~val);
1255
1256 kvm_pmu_reprogram_counter_mask(vcpu, val);
1257 } else {
1258 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
1259 }
1260
1261 return true;
1262 }
1263
access_pminten(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1264 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1265 const struct sys_reg_desc *r)
1266 {
1267 u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1268
1269 if (check_pmu_access_disabled(vcpu, 0))
1270 return false;
1271
1272 if (p->is_write) {
1273 u64 val = p->regval & mask;
1274
1275 if (r->Op2 & 0x1)
1276 /* accessing PMINTENSET_EL1 */
1277 __vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, |=, val);
1278 else
1279 /* accessing PMINTENCLR_EL1 */
1280 __vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, ~val);
1281 } else {
1282 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
1283 }
1284
1285 return true;
1286 }
1287
access_pmovs(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1288 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1289 const struct sys_reg_desc *r)
1290 {
1291 u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1292
1293 if (pmu_access_el0_disabled(vcpu))
1294 return false;
1295
1296 if (p->is_write) {
1297 if (r->CRm & 0x2)
1298 /* accessing PMOVSSET_EL0 */
1299 __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, (p->regval & mask));
1300 else
1301 /* accessing PMOVSCLR_EL0 */
1302 __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, ~(p->regval & mask));
1303 } else {
1304 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
1305 }
1306
1307 return true;
1308 }
1309
access_pmswinc(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1310 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1311 const struct sys_reg_desc *r)
1312 {
1313 u64 mask;
1314
1315 if (!p->is_write)
1316 return read_from_write_only(vcpu, p, r);
1317
1318 if (pmu_write_swinc_el0_disabled(vcpu))
1319 return false;
1320
1321 mask = kvm_pmu_accessible_counter_mask(vcpu);
1322 kvm_pmu_software_increment(vcpu, p->regval & mask);
1323 return true;
1324 }
1325
access_pmuserenr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1326 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1327 const struct sys_reg_desc *r)
1328 {
1329 if (p->is_write) {
1330 if (!vcpu_mode_priv(vcpu))
1331 return undef_access(vcpu, p, r);
1332
1333 __vcpu_assign_sys_reg(vcpu, PMUSERENR_EL0,
1334 (p->regval & ARMV8_PMU_USERENR_MASK));
1335 } else {
1336 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
1337 & ARMV8_PMU_USERENR_MASK;
1338 }
1339
1340 return true;
1341 }
1342
get_pmcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 * val)1343 static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1344 u64 *val)
1345 {
1346 *val = kvm_vcpu_read_pmcr(vcpu);
1347 return 0;
1348 }
1349
set_pmcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 val)1350 static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1351 u64 val)
1352 {
1353 u8 new_n = FIELD_GET(ARMV8_PMU_PMCR_N, val);
1354 struct kvm *kvm = vcpu->kvm;
1355
1356 mutex_lock(&kvm->arch.config_lock);
1357
1358 /*
1359 * The vCPU can't have more counters than the PMU hardware
1360 * implements. Ignore this error to maintain compatibility
1361 * with the existing KVM behavior.
1362 */
1363 if (!kvm_vm_has_ran_once(kvm) &&
1364 !vcpu_has_nv(vcpu) &&
1365 new_n <= kvm_arm_pmu_get_max_counters(kvm))
1366 kvm->arch.nr_pmu_counters = new_n;
1367
1368 mutex_unlock(&kvm->arch.config_lock);
1369
1370 /*
1371 * Ignore writes to RES0 bits, read only bits that are cleared on
1372 * vCPU reset, and writable bits that KVM doesn't support yet.
1373 * (i.e. only PMCR.N and bits [7:0] are mutable from userspace)
1374 * The LP bit is RES0 when FEAT_PMUv3p5 is not supported on the vCPU.
1375 * But, we leave the bit as it is here, as the vCPU's PMUver might
1376 * be changed later (NOTE: the bit will be cleared on first vCPU run
1377 * if necessary).
1378 */
1379 val &= ARMV8_PMU_PMCR_MASK;
1380
1381 /* The LC bit is RES1 when AArch32 is not supported */
1382 if (!kvm_supports_32bit_el0())
1383 val |= ARMV8_PMU_PMCR_LC;
1384
1385 __vcpu_assign_sys_reg(vcpu, r->reg, val);
1386 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
1387
1388 return 0;
1389 }
1390
1391 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
1392 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
1393 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
1394 trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
1395 get_dbg_wb_reg, set_dbg_wb_reg }, \
1396 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
1397 trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
1398 get_dbg_wb_reg, set_dbg_wb_reg }, \
1399 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
1400 trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
1401 get_dbg_wb_reg, set_dbg_wb_reg }, \
1402 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
1403 trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
1404 get_dbg_wb_reg, set_dbg_wb_reg }
1405
1406 #define PMU_SYS_REG(name) \
1407 SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \
1408 .visibility = pmu_visibility
1409
1410 /* Macro to expand the PMEVCNTRn_EL0 register */
1411 #define PMU_PMEVCNTR_EL0(n) \
1412 { PMU_SYS_REG(PMEVCNTRn_EL0(n)), \
1413 .reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
1414 .set_user = set_pmu_evcntr, \
1415 .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
1416
1417 /* Macro to expand the PMEVTYPERn_EL0 register */
1418 #define PMU_PMEVTYPER_EL0(n) \
1419 { PMU_SYS_REG(PMEVTYPERn_EL0(n)), \
1420 .reset = reset_pmevtyper, \
1421 .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
1422
1423 /* Macro to expand the AMU counter and type registers*/
1424 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1425 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1426 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1427 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1428
ptrauth_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1429 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1430 const struct sys_reg_desc *rd)
1431 {
1432 return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1433 }
1434
1435 /*
1436 * If we land here on a PtrAuth access, that is because we didn't
1437 * fixup the access on exit by allowing the PtrAuth sysregs. The only
1438 * way this happens is when the guest does not have PtrAuth support
1439 * enabled.
1440 */
1441 #define __PTRAUTH_KEY(k) \
1442 { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
1443 .visibility = ptrauth_visibility}
1444
1445 #define PTRAUTH_KEY(k) \
1446 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1447 __PTRAUTH_KEY(k ## KEYHI_EL1)
1448
access_arch_timer(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1449 static bool access_arch_timer(struct kvm_vcpu *vcpu,
1450 struct sys_reg_params *p,
1451 const struct sys_reg_desc *r)
1452 {
1453 enum kvm_arch_timers tmr;
1454 enum kvm_arch_timer_regs treg;
1455 u64 reg = reg_to_encoding(r);
1456
1457 switch (reg) {
1458 case SYS_CNTP_TVAL_EL0:
1459 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1460 tmr = TIMER_HPTIMER;
1461 else
1462 tmr = TIMER_PTIMER;
1463 treg = TIMER_REG_TVAL;
1464 break;
1465
1466 case SYS_CNTV_TVAL_EL0:
1467 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1468 tmr = TIMER_HVTIMER;
1469 else
1470 tmr = TIMER_VTIMER;
1471 treg = TIMER_REG_TVAL;
1472 break;
1473
1474 case SYS_AARCH32_CNTP_TVAL:
1475 case SYS_CNTP_TVAL_EL02:
1476 tmr = TIMER_PTIMER;
1477 treg = TIMER_REG_TVAL;
1478 break;
1479
1480 case SYS_CNTV_TVAL_EL02:
1481 tmr = TIMER_VTIMER;
1482 treg = TIMER_REG_TVAL;
1483 break;
1484
1485 case SYS_CNTHP_TVAL_EL2:
1486 tmr = TIMER_HPTIMER;
1487 treg = TIMER_REG_TVAL;
1488 break;
1489
1490 case SYS_CNTHV_TVAL_EL2:
1491 tmr = TIMER_HVTIMER;
1492 treg = TIMER_REG_TVAL;
1493 break;
1494
1495 case SYS_CNTP_CTL_EL0:
1496 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1497 tmr = TIMER_HPTIMER;
1498 else
1499 tmr = TIMER_PTIMER;
1500 treg = TIMER_REG_CTL;
1501 break;
1502
1503 case SYS_CNTV_CTL_EL0:
1504 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1505 tmr = TIMER_HVTIMER;
1506 else
1507 tmr = TIMER_VTIMER;
1508 treg = TIMER_REG_CTL;
1509 break;
1510
1511 case SYS_AARCH32_CNTP_CTL:
1512 case SYS_CNTP_CTL_EL02:
1513 tmr = TIMER_PTIMER;
1514 treg = TIMER_REG_CTL;
1515 break;
1516
1517 case SYS_CNTV_CTL_EL02:
1518 tmr = TIMER_VTIMER;
1519 treg = TIMER_REG_CTL;
1520 break;
1521
1522 case SYS_CNTHP_CTL_EL2:
1523 tmr = TIMER_HPTIMER;
1524 treg = TIMER_REG_CTL;
1525 break;
1526
1527 case SYS_CNTHV_CTL_EL2:
1528 tmr = TIMER_HVTIMER;
1529 treg = TIMER_REG_CTL;
1530 break;
1531
1532 case SYS_CNTP_CVAL_EL0:
1533 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1534 tmr = TIMER_HPTIMER;
1535 else
1536 tmr = TIMER_PTIMER;
1537 treg = TIMER_REG_CVAL;
1538 break;
1539
1540 case SYS_CNTV_CVAL_EL0:
1541 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1542 tmr = TIMER_HVTIMER;
1543 else
1544 tmr = TIMER_VTIMER;
1545 treg = TIMER_REG_CVAL;
1546 break;
1547
1548 case SYS_AARCH32_CNTP_CVAL:
1549 case SYS_CNTP_CVAL_EL02:
1550 tmr = TIMER_PTIMER;
1551 treg = TIMER_REG_CVAL;
1552 break;
1553
1554 case SYS_CNTV_CVAL_EL02:
1555 tmr = TIMER_VTIMER;
1556 treg = TIMER_REG_CVAL;
1557 break;
1558
1559 case SYS_CNTHP_CVAL_EL2:
1560 tmr = TIMER_HPTIMER;
1561 treg = TIMER_REG_CVAL;
1562 break;
1563
1564 case SYS_CNTHV_CVAL_EL2:
1565 tmr = TIMER_HVTIMER;
1566 treg = TIMER_REG_CVAL;
1567 break;
1568
1569 case SYS_CNTPCT_EL0:
1570 case SYS_CNTPCTSS_EL0:
1571 if (is_hyp_ctxt(vcpu))
1572 tmr = TIMER_HPTIMER;
1573 else
1574 tmr = TIMER_PTIMER;
1575 treg = TIMER_REG_CNT;
1576 break;
1577
1578 case SYS_AARCH32_CNTPCT:
1579 case SYS_AARCH32_CNTPCTSS:
1580 tmr = TIMER_PTIMER;
1581 treg = TIMER_REG_CNT;
1582 break;
1583
1584 case SYS_CNTVCT_EL0:
1585 case SYS_CNTVCTSS_EL0:
1586 if (is_hyp_ctxt(vcpu))
1587 tmr = TIMER_HVTIMER;
1588 else
1589 tmr = TIMER_VTIMER;
1590 treg = TIMER_REG_CNT;
1591 break;
1592
1593 case SYS_AARCH32_CNTVCT:
1594 case SYS_AARCH32_CNTVCTSS:
1595 tmr = TIMER_VTIMER;
1596 treg = TIMER_REG_CNT;
1597 break;
1598
1599 default:
1600 print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
1601 return undef_access(vcpu, p, r);
1602 }
1603
1604 if (p->is_write)
1605 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1606 else
1607 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1608
1609 return true;
1610 }
1611
arch_timer_set_user(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)1612 static int arch_timer_set_user(struct kvm_vcpu *vcpu,
1613 const struct sys_reg_desc *rd,
1614 u64 val)
1615 {
1616 switch (reg_to_encoding(rd)) {
1617 case SYS_CNTV_CTL_EL0:
1618 case SYS_CNTP_CTL_EL0:
1619 case SYS_CNTHV_CTL_EL2:
1620 case SYS_CNTHP_CTL_EL2:
1621 val &= ~ARCH_TIMER_CTRL_IT_STAT;
1622 break;
1623 case SYS_CNTVCT_EL0:
1624 if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags))
1625 timer_set_offset(vcpu_vtimer(vcpu), kvm_phys_timer_read() - val);
1626 return 0;
1627 case SYS_CNTPCT_EL0:
1628 if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags))
1629 timer_set_offset(vcpu_ptimer(vcpu), kvm_phys_timer_read() - val);
1630 return 0;
1631 }
1632
1633 __vcpu_assign_sys_reg(vcpu, rd->reg, val);
1634 return 0;
1635 }
1636
arch_timer_get_user(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)1637 static int arch_timer_get_user(struct kvm_vcpu *vcpu,
1638 const struct sys_reg_desc *rd,
1639 u64 *val)
1640 {
1641 switch (reg_to_encoding(rd)) {
1642 case SYS_CNTVCT_EL0:
1643 *val = kvm_phys_timer_read() - timer_get_offset(vcpu_vtimer(vcpu));
1644 break;
1645 case SYS_CNTPCT_EL0:
1646 *val = kvm_phys_timer_read() - timer_get_offset(vcpu_ptimer(vcpu));
1647 break;
1648 default:
1649 *val = __vcpu_sys_reg(vcpu, rd->reg);
1650 }
1651
1652 return 0;
1653 }
1654
kvm_arm64_ftr_safe_value(u32 id,const struct arm64_ftr_bits * ftrp,s64 new,s64 cur)1655 static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
1656 s64 new, s64 cur)
1657 {
1658 struct arm64_ftr_bits kvm_ftr = *ftrp;
1659
1660 /* Some features have different safe value type in KVM than host features */
1661 switch (id) {
1662 case SYS_ID_AA64DFR0_EL1:
1663 switch (kvm_ftr.shift) {
1664 case ID_AA64DFR0_EL1_PMUVer_SHIFT:
1665 kvm_ftr.type = FTR_LOWER_SAFE;
1666 break;
1667 case ID_AA64DFR0_EL1_DebugVer_SHIFT:
1668 kvm_ftr.type = FTR_LOWER_SAFE;
1669 break;
1670 }
1671 break;
1672 case SYS_ID_DFR0_EL1:
1673 if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT)
1674 kvm_ftr.type = FTR_LOWER_SAFE;
1675 break;
1676 }
1677
1678 return arm64_ftr_safe_value(&kvm_ftr, new, cur);
1679 }
1680
1681 /*
1682 * arm64_check_features() - Check if a feature register value constitutes
1683 * a subset of features indicated by the idreg's KVM sanitised limit.
1684 *
1685 * This function will check if each feature field of @val is the "safe" value
1686 * against idreg's KVM sanitised limit return from reset() callback.
1687 * If a field value in @val is the same as the one in limit, it is always
1688 * considered the safe value regardless For register fields that are not in
1689 * writable, only the value in limit is considered the safe value.
1690 *
1691 * Return: 0 if all the fields are safe. Otherwise, return negative errno.
1692 */
arm64_check_features(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)1693 static int arm64_check_features(struct kvm_vcpu *vcpu,
1694 const struct sys_reg_desc *rd,
1695 u64 val)
1696 {
1697 const struct arm64_ftr_reg *ftr_reg;
1698 const struct arm64_ftr_bits *ftrp = NULL;
1699 u32 id = reg_to_encoding(rd);
1700 u64 writable_mask = rd->val;
1701 u64 limit = rd->reset(vcpu, rd);
1702 u64 mask = 0;
1703
1704 /*
1705 * Hidden and unallocated ID registers may not have a corresponding
1706 * struct arm64_ftr_reg. Of course, if the register is RAZ we know the
1707 * only safe value is 0.
1708 */
1709 if (sysreg_visible_as_raz(vcpu, rd))
1710 return val ? -E2BIG : 0;
1711
1712 ftr_reg = get_arm64_ftr_reg(id);
1713 if (!ftr_reg)
1714 return -EINVAL;
1715
1716 ftrp = ftr_reg->ftr_bits;
1717
1718 for (; ftrp && ftrp->width; ftrp++) {
1719 s64 f_val, f_lim, safe_val;
1720 u64 ftr_mask;
1721
1722 ftr_mask = arm64_ftr_mask(ftrp);
1723 if ((ftr_mask & writable_mask) != ftr_mask)
1724 continue;
1725
1726 f_val = arm64_ftr_value(ftrp, val);
1727 f_lim = arm64_ftr_value(ftrp, limit);
1728 mask |= ftr_mask;
1729
1730 if (f_val == f_lim)
1731 safe_val = f_val;
1732 else
1733 safe_val = kvm_arm64_ftr_safe_value(id, ftrp, f_val, f_lim);
1734
1735 if (safe_val != f_val)
1736 return -E2BIG;
1737 }
1738
1739 /* For fields that are not writable, values in limit are the safe values. */
1740 if ((val & ~mask) != (limit & ~mask))
1741 return -E2BIG;
1742
1743 return 0;
1744 }
1745
pmuver_to_perfmon(u8 pmuver)1746 static u8 pmuver_to_perfmon(u8 pmuver)
1747 {
1748 switch (pmuver) {
1749 case ID_AA64DFR0_EL1_PMUVer_IMP:
1750 return ID_DFR0_EL1_PerfMon_PMUv3;
1751 case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
1752 return ID_DFR0_EL1_PerfMon_IMPDEF;
1753 default:
1754 /* Anything ARMv8.1+ and NI have the same value. For now. */
1755 return pmuver;
1756 }
1757 }
1758
1759 static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
1760 static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val);
1761 static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
1762
1763 /* Read a sanitised cpufeature ID register by sys_reg_desc */
__kvm_read_sanitised_id_reg(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1764 static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
1765 const struct sys_reg_desc *r)
1766 {
1767 u32 id = reg_to_encoding(r);
1768 u64 val;
1769
1770 if (sysreg_visible_as_raz(vcpu, r))
1771 return 0;
1772
1773 val = read_sanitised_ftr_reg(id);
1774
1775 switch (id) {
1776 case SYS_ID_AA64DFR0_EL1:
1777 val = sanitise_id_aa64dfr0_el1(vcpu, val);
1778 break;
1779 case SYS_ID_AA64PFR0_EL1:
1780 val = sanitise_id_aa64pfr0_el1(vcpu, val);
1781 break;
1782 case SYS_ID_AA64PFR1_EL1:
1783 val = sanitise_id_aa64pfr1_el1(vcpu, val);
1784 break;
1785 case SYS_ID_AA64PFR2_EL1:
1786 val &= ID_AA64PFR2_EL1_FPMR |
1787 (kvm_has_mte(vcpu->kvm) ?
1788 ID_AA64PFR2_EL1_MTEFAR | ID_AA64PFR2_EL1_MTESTOREONLY :
1789 0);
1790 break;
1791 case SYS_ID_AA64ISAR1_EL1:
1792 if (!vcpu_has_ptrauth(vcpu))
1793 val &= ~(ID_AA64ISAR1_EL1_APA |
1794 ID_AA64ISAR1_EL1_API |
1795 ID_AA64ISAR1_EL1_GPA |
1796 ID_AA64ISAR1_EL1_GPI);
1797 break;
1798 case SYS_ID_AA64ISAR2_EL1:
1799 if (!vcpu_has_ptrauth(vcpu))
1800 val &= ~(ID_AA64ISAR2_EL1_APA3 |
1801 ID_AA64ISAR2_EL1_GPA3);
1802 if (!cpus_have_final_cap(ARM64_HAS_WFXT) ||
1803 has_broken_cntvoff())
1804 val &= ~ID_AA64ISAR2_EL1_WFxT;
1805 break;
1806 case SYS_ID_AA64ISAR3_EL1:
1807 val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_LSFE |
1808 ID_AA64ISAR3_EL1_FAMINMAX;
1809 break;
1810 case SYS_ID_AA64MMFR2_EL1:
1811 val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
1812 val &= ~ID_AA64MMFR2_EL1_NV;
1813 break;
1814 case SYS_ID_AA64MMFR3_EL1:
1815 val &= ID_AA64MMFR3_EL1_TCRX |
1816 ID_AA64MMFR3_EL1_SCTLRX |
1817 ID_AA64MMFR3_EL1_S1POE |
1818 ID_AA64MMFR3_EL1_S1PIE;
1819
1820 if (!system_supports_poe())
1821 val &= ~ID_AA64MMFR3_EL1_S1POE;
1822 break;
1823 case SYS_ID_MMFR4_EL1:
1824 val &= ~ID_MMFR4_EL1_CCIDX;
1825 break;
1826 }
1827
1828 if (vcpu_has_nv(vcpu))
1829 val = limit_nv_id_reg(vcpu->kvm, id, val);
1830
1831 return val;
1832 }
1833
kvm_read_sanitised_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1834 static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu,
1835 const struct sys_reg_desc *r)
1836 {
1837 return __kvm_read_sanitised_id_reg(vcpu, r);
1838 }
1839
read_id_reg(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1840 static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1841 {
1842 return kvm_read_vm_id_reg(vcpu->kvm, reg_to_encoding(r));
1843 }
1844
is_feature_id_reg(u32 encoding)1845 static bool is_feature_id_reg(u32 encoding)
1846 {
1847 return (sys_reg_Op0(encoding) == 3 &&
1848 (sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) &&
1849 sys_reg_CRn(encoding) == 0 &&
1850 sys_reg_CRm(encoding) <= 7);
1851 }
1852
1853 /*
1854 * Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
1855 * (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8, which is the range of ID
1856 * registers KVM maintains on a per-VM basis.
1857 *
1858 * Additionally, the implementation ID registers and CTR_EL0 are handled as
1859 * per-VM registers.
1860 */
is_vm_ftr_id_reg(u32 id)1861 static inline bool is_vm_ftr_id_reg(u32 id)
1862 {
1863 switch (id) {
1864 case SYS_CTR_EL0:
1865 case SYS_MIDR_EL1:
1866 case SYS_REVIDR_EL1:
1867 case SYS_AIDR_EL1:
1868 return true;
1869 default:
1870 return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1871 sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1872 sys_reg_CRm(id) < 8);
1873
1874 }
1875 }
1876
is_vcpu_ftr_id_reg(u32 id)1877 static inline bool is_vcpu_ftr_id_reg(u32 id)
1878 {
1879 return is_feature_id_reg(id) && !is_vm_ftr_id_reg(id);
1880 }
1881
is_aa32_id_reg(u32 id)1882 static inline bool is_aa32_id_reg(u32 id)
1883 {
1884 return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1885 sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1886 sys_reg_CRm(id) <= 3);
1887 }
1888
id_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1889 static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1890 const struct sys_reg_desc *r)
1891 {
1892 u32 id = reg_to_encoding(r);
1893
1894 switch (id) {
1895 case SYS_ID_AA64ZFR0_EL1:
1896 if (!vcpu_has_sve(vcpu))
1897 return REG_RAZ;
1898 break;
1899 }
1900
1901 return 0;
1902 }
1903
aa32_id_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1904 static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu,
1905 const struct sys_reg_desc *r)
1906 {
1907 /*
1908 * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
1909 * EL. Promote to RAZ/WI in order to guarantee consistency between
1910 * systems.
1911 */
1912 if (!kvm_supports_32bit_el0())
1913 return REG_RAZ | REG_USER_WI;
1914
1915 return id_visibility(vcpu, r);
1916 }
1917
raz_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1918 static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
1919 const struct sys_reg_desc *r)
1920 {
1921 return REG_RAZ;
1922 }
1923
1924 /* cpufeature ID register access trap handlers */
1925
access_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1926 static bool access_id_reg(struct kvm_vcpu *vcpu,
1927 struct sys_reg_params *p,
1928 const struct sys_reg_desc *r)
1929 {
1930 if (p->is_write)
1931 return write_to_read_only(vcpu, p, r);
1932
1933 p->regval = read_id_reg(vcpu, r);
1934
1935 return true;
1936 }
1937
1938 /* Visibility overrides for SVE-specific control registers */
sve_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1939 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1940 const struct sys_reg_desc *rd)
1941 {
1942 if (vcpu_has_sve(vcpu))
1943 return 0;
1944
1945 return REG_HIDDEN;
1946 }
1947
sme_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1948 static unsigned int sme_visibility(const struct kvm_vcpu *vcpu,
1949 const struct sys_reg_desc *rd)
1950 {
1951 if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SME, IMP))
1952 return 0;
1953
1954 return REG_HIDDEN;
1955 }
1956
fp8_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1957 static unsigned int fp8_visibility(const struct kvm_vcpu *vcpu,
1958 const struct sys_reg_desc *rd)
1959 {
1960 if (kvm_has_fpmr(vcpu->kvm))
1961 return 0;
1962
1963 return REG_HIDDEN;
1964 }
1965
sanitise_id_aa64pfr0_el1(const struct kvm_vcpu * vcpu,u64 val)1966 static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
1967 {
1968 if (!vcpu_has_sve(vcpu))
1969 val &= ~ID_AA64PFR0_EL1_SVE_MASK;
1970
1971 /*
1972 * The default is to expose CSV2 == 1 if the HW isn't affected.
1973 * Although this is a per-CPU feature, we make it global because
1974 * asymmetric systems are just a nuisance.
1975 *
1976 * Userspace can override this as long as it doesn't promise
1977 * the impossible.
1978 */
1979 if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
1980 val &= ~ID_AA64PFR0_EL1_CSV2_MASK;
1981 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP);
1982 }
1983 if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
1984 val &= ~ID_AA64PFR0_EL1_CSV3_MASK;
1985 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
1986 }
1987
1988 if (vgic_is_v3(vcpu->kvm)) {
1989 val &= ~ID_AA64PFR0_EL1_GIC_MASK;
1990 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
1991 }
1992
1993 val &= ~ID_AA64PFR0_EL1_AMU_MASK;
1994
1995 /*
1996 * MPAM is disabled by default as KVM also needs a set of PARTID to
1997 * program the MPAMVPMx_EL2 PARTID remapping registers with. But some
1998 * older kernels let the guest see the ID bit.
1999 */
2000 val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
2001
2002 return val;
2003 }
2004
sanitise_id_aa64pfr1_el1(const struct kvm_vcpu * vcpu,u64 val)2005 static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val)
2006 {
2007 u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
2008
2009 if (!kvm_has_mte(vcpu->kvm)) {
2010 val &= ~ID_AA64PFR1_EL1_MTE;
2011 val &= ~ID_AA64PFR1_EL1_MTE_frac;
2012 }
2013
2014 if (!(cpus_have_final_cap(ARM64_HAS_RASV1P1_EXTN) &&
2015 SYS_FIELD_GET(ID_AA64PFR0_EL1, RAS, pfr0) == ID_AA64PFR0_EL1_RAS_IMP))
2016 val &= ~ID_AA64PFR1_EL1_RAS_frac;
2017
2018 val &= ~ID_AA64PFR1_EL1_SME;
2019 val &= ~ID_AA64PFR1_EL1_RNDR_trap;
2020 val &= ~ID_AA64PFR1_EL1_NMI;
2021 val &= ~ID_AA64PFR1_EL1_GCS;
2022 val &= ~ID_AA64PFR1_EL1_THE;
2023 val &= ~ID_AA64PFR1_EL1_MTEX;
2024 val &= ~ID_AA64PFR1_EL1_PFAR;
2025 val &= ~ID_AA64PFR1_EL1_MPAM_frac;
2026
2027 return val;
2028 }
2029
sanitise_id_aa64dfr0_el1(const struct kvm_vcpu * vcpu,u64 val)2030 static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
2031 {
2032 val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
2033
2034 /*
2035 * Only initialize the PMU version if the vCPU was configured with one.
2036 */
2037 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
2038 if (kvm_vcpu_has_pmu(vcpu))
2039 val |= SYS_FIELD_PREP(ID_AA64DFR0_EL1, PMUVer,
2040 kvm_arm_pmu_get_pmuver_limit());
2041
2042 /* Hide SPE from guests */
2043 val &= ~ID_AA64DFR0_EL1_PMSVer_MASK;
2044
2045 /* Hide BRBE from guests */
2046 val &= ~ID_AA64DFR0_EL1_BRBE_MASK;
2047
2048 return val;
2049 }
2050
2051 /*
2052 * Older versions of KVM erroneously claim support for FEAT_DoubleLock with
2053 * NV-enabled VMs on unsupporting hardware. Silently ignore the incorrect
2054 * value if it is consistent with the bug.
2055 */
ignore_feat_doublelock(struct kvm_vcpu * vcpu,u64 val)2056 static bool ignore_feat_doublelock(struct kvm_vcpu *vcpu, u64 val)
2057 {
2058 u8 host, user;
2059
2060 if (!vcpu_has_nv(vcpu))
2061 return false;
2062
2063 host = SYS_FIELD_GET(ID_AA64DFR0_EL1, DoubleLock,
2064 read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1));
2065 user = SYS_FIELD_GET(ID_AA64DFR0_EL1, DoubleLock, val);
2066
2067 return host == ID_AA64DFR0_EL1_DoubleLock_NI &&
2068 user == ID_AA64DFR0_EL1_DoubleLock_IMP;
2069 }
2070
set_id_aa64dfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)2071 static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
2072 const struct sys_reg_desc *rd,
2073 u64 val)
2074 {
2075 u8 debugver = SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, val);
2076 u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);
2077
2078 /*
2079 * Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
2080 * ID_AA64DFR0_EL1.PMUver limit to VM creation"), KVM erroneously
2081 * exposed an IMP_DEF PMU to userspace and the guest on systems w/
2082 * non-architectural PMUs. Of course, PMUv3 is the only game in town for
2083 * PMU virtualization, so the IMP_DEF value was rather user-hostile.
2084 *
2085 * At minimum, we're on the hook to allow values that were given to
2086 * userspace by KVM. Cover our tracks here and replace the IMP_DEF value
2087 * with a more sensible NI. The value of an ID register changing under
2088 * the nose of the guest is unfortunate, but is certainly no more
2089 * surprising than an ill-guided PMU driver poking at impdef system
2090 * registers that end in an UNDEF...
2091 */
2092 if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
2093 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
2094
2095 /*
2096 * ID_AA64DFR0_EL1.DebugVer is one of those awkward fields with a
2097 * nonzero minimum safe value.
2098 */
2099 if (debugver < ID_AA64DFR0_EL1_DebugVer_IMP)
2100 return -EINVAL;
2101
2102 if (ignore_feat_doublelock(vcpu, val)) {
2103 val &= ~ID_AA64DFR0_EL1_DoubleLock;
2104 val |= SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, DoubleLock, NI);
2105 }
2106
2107 return set_id_reg(vcpu, rd, val);
2108 }
2109
read_sanitised_id_dfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2110 static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
2111 const struct sys_reg_desc *rd)
2112 {
2113 u8 perfmon;
2114 u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
2115
2116 val &= ~ID_DFR0_EL1_PerfMon_MASK;
2117 if (kvm_vcpu_has_pmu(vcpu)) {
2118 perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
2119 val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
2120 }
2121
2122 val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8);
2123
2124 return val;
2125 }
2126
set_id_dfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)2127 static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
2128 const struct sys_reg_desc *rd,
2129 u64 val)
2130 {
2131 u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val);
2132 u8 copdbg = SYS_FIELD_GET(ID_DFR0_EL1, CopDbg, val);
2133
2134 if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) {
2135 val &= ~ID_DFR0_EL1_PerfMon_MASK;
2136 perfmon = 0;
2137 }
2138
2139 /*
2140 * Allow DFR0_EL1.PerfMon to be set from userspace as long as
2141 * it doesn't promise more than what the HW gives us on the
2142 * AArch64 side (as everything is emulated with that), and
2143 * that this is a PMUv3.
2144 */
2145 if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3)
2146 return -EINVAL;
2147
2148 if (copdbg < ID_DFR0_EL1_CopDbg_Armv8)
2149 return -EINVAL;
2150
2151 return set_id_reg(vcpu, rd, val);
2152 }
2153
set_id_aa64pfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 user_val)2154 static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
2155 const struct sys_reg_desc *rd, u64 user_val)
2156 {
2157 u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
2158 u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK;
2159
2160 /*
2161 * Commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits
2162 * in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to
2163 * guests, but didn't add trap handling. KVM doesn't support MPAM and
2164 * always returns an UNDEF for these registers. The guest must see 0
2165 * for this field.
2166 *
2167 * But KVM must also accept values from user-space that were provided
2168 * by KVM. On CPUs that support MPAM, permit user-space to write
2169 * the sanitizied value to ID_AA64PFR0_EL1.MPAM, but ignore this field.
2170 */
2171 if ((hw_val & mpam_mask) == (user_val & mpam_mask))
2172 user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
2173
2174 /* Fail the guest's request to disable the AA64 ISA at EL{0,1,2} */
2175 if (!FIELD_GET(ID_AA64PFR0_EL1_EL0, user_val) ||
2176 !FIELD_GET(ID_AA64PFR0_EL1_EL1, user_val) ||
2177 (vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val)))
2178 return -EINVAL;
2179
2180 /*
2181 * If we are running on a GICv5 host and support FEAT_GCIE_LEGACY, then
2182 * we support GICv3. Fail attempts to do anything but set that to IMP.
2183 */
2184 if (vgic_is_v3_compat(vcpu->kvm) &&
2185 FIELD_GET(ID_AA64PFR0_EL1_GIC_MASK, user_val) != ID_AA64PFR0_EL1_GIC_IMP)
2186 return -EINVAL;
2187
2188 return set_id_reg(vcpu, rd, user_val);
2189 }
2190
set_id_aa64pfr1_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 user_val)2191 static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
2192 const struct sys_reg_desc *rd, u64 user_val)
2193 {
2194 u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
2195 u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK;
2196 u8 mte = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE, hw_val);
2197 u8 user_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, user_val);
2198 u8 hw_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, hw_val);
2199
2200 /* See set_id_aa64pfr0_el1 for comment about MPAM */
2201 if ((hw_val & mpam_mask) == (user_val & mpam_mask))
2202 user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
2203
2204 /*
2205 * Previously MTE_frac was hidden from guest. However, if the
2206 * hardware supports MTE2 but not MTE_ASYM_FAULT then a value
2207 * of 0 for this field indicates that the hardware supports
2208 * MTE_ASYNC. Whereas, 0xf indicates MTE_ASYNC is not supported.
2209 *
2210 * As KVM must accept values from KVM provided by user-space,
2211 * when ID_AA64PFR1_EL1.MTE is 2 allow user-space to set
2212 * ID_AA64PFR1_EL1.MTE_frac to 0. However, ignore it to avoid
2213 * incorrectly claiming hardware support for MTE_ASYNC in the
2214 * guest.
2215 */
2216
2217 if (mte == ID_AA64PFR1_EL1_MTE_MTE2 &&
2218 hw_mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI &&
2219 user_mte_frac == ID_AA64PFR1_EL1_MTE_frac_ASYNC) {
2220 user_val &= ~ID_AA64PFR1_EL1_MTE_frac_MASK;
2221 user_val |= hw_val & ID_AA64PFR1_EL1_MTE_frac_MASK;
2222 }
2223
2224 return set_id_reg(vcpu, rd, user_val);
2225 }
2226
2227 /*
2228 * Allow userspace to de-feature a stage-2 translation granule but prevent it
2229 * from claiming the impossible.
2230 */
2231 #define tgran2_val_allowed(tg, safe, user) \
2232 ({ \
2233 u8 __s = SYS_FIELD_GET(ID_AA64MMFR0_EL1, tg, safe); \
2234 u8 __u = SYS_FIELD_GET(ID_AA64MMFR0_EL1, tg, user); \
2235 \
2236 __s == __u || __u == ID_AA64MMFR0_EL1_##tg##_NI; \
2237 })
2238
set_id_aa64mmfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 user_val)2239 static int set_id_aa64mmfr0_el1(struct kvm_vcpu *vcpu,
2240 const struct sys_reg_desc *rd, u64 user_val)
2241 {
2242 u64 sanitized_val = kvm_read_sanitised_id_reg(vcpu, rd);
2243
2244 if (!vcpu_has_nv(vcpu))
2245 return set_id_reg(vcpu, rd, user_val);
2246
2247 if (!tgran2_val_allowed(TGRAN4_2, sanitized_val, user_val) ||
2248 !tgran2_val_allowed(TGRAN16_2, sanitized_val, user_val) ||
2249 !tgran2_val_allowed(TGRAN64_2, sanitized_val, user_val))
2250 return -EINVAL;
2251
2252 return set_id_reg(vcpu, rd, user_val);
2253 }
2254
set_id_aa64mmfr2_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 user_val)2255 static int set_id_aa64mmfr2_el1(struct kvm_vcpu *vcpu,
2256 const struct sys_reg_desc *rd, u64 user_val)
2257 {
2258 u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
2259 u64 nv_mask = ID_AA64MMFR2_EL1_NV_MASK;
2260
2261 /*
2262 * We made the mistake to expose the now deprecated NV field,
2263 * so allow userspace to write it, but silently ignore it.
2264 */
2265 if ((hw_val & nv_mask) == (user_val & nv_mask))
2266 user_val &= ~nv_mask;
2267
2268 return set_id_reg(vcpu, rd, user_val);
2269 }
2270
set_ctr_el0(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 user_val)2271 static int set_ctr_el0(struct kvm_vcpu *vcpu,
2272 const struct sys_reg_desc *rd, u64 user_val)
2273 {
2274 u8 user_L1Ip = SYS_FIELD_GET(CTR_EL0, L1Ip, user_val);
2275
2276 /*
2277 * Both AIVIVT (0b01) and VPIPT (0b00) are documented as reserved.
2278 * Hence only allow to set VIPT(0b10) or PIPT(0b11) for L1Ip based
2279 * on what hardware reports.
2280 *
2281 * Using a VIPT software model on PIPT will lead to over invalidation,
2282 * but still correct. Hence, we can allow downgrading PIPT to VIPT,
2283 * but not the other way around. This is handled via arm64_ftr_safe_value()
2284 * as CTR_EL0 ftr_bits has L1Ip field with type FTR_EXACT and safe value
2285 * set as VIPT.
2286 */
2287 switch (user_L1Ip) {
2288 case CTR_EL0_L1Ip_RESERVED_VPIPT:
2289 case CTR_EL0_L1Ip_RESERVED_AIVIVT:
2290 return -EINVAL;
2291 case CTR_EL0_L1Ip_VIPT:
2292 case CTR_EL0_L1Ip_PIPT:
2293 return set_id_reg(vcpu, rd, user_val);
2294 default:
2295 return -ENOENT;
2296 }
2297 }
2298
2299 /*
2300 * cpufeature ID register user accessors
2301 *
2302 * For now, these registers are immutable for userspace, so no values
2303 * are stored, and for set_id_reg() we don't allow the effective value
2304 * to be changed.
2305 */
get_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)2306 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2307 u64 *val)
2308 {
2309 /*
2310 * Avoid locking if the VM has already started, as the ID registers are
2311 * guaranteed to be invariant at that point.
2312 */
2313 if (kvm_vm_has_ran_once(vcpu->kvm)) {
2314 *val = read_id_reg(vcpu, rd);
2315 return 0;
2316 }
2317
2318 mutex_lock(&vcpu->kvm->arch.config_lock);
2319 *val = read_id_reg(vcpu, rd);
2320 mutex_unlock(&vcpu->kvm->arch.config_lock);
2321
2322 return 0;
2323 }
2324
set_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)2325 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2326 u64 val)
2327 {
2328 u32 id = reg_to_encoding(rd);
2329 int ret;
2330
2331 mutex_lock(&vcpu->kvm->arch.config_lock);
2332
2333 /*
2334 * Once the VM has started the ID registers are immutable. Reject any
2335 * write that does not match the final register value.
2336 */
2337 if (kvm_vm_has_ran_once(vcpu->kvm)) {
2338 if (val != read_id_reg(vcpu, rd))
2339 ret = -EBUSY;
2340 else
2341 ret = 0;
2342
2343 mutex_unlock(&vcpu->kvm->arch.config_lock);
2344 return ret;
2345 }
2346
2347 ret = arm64_check_features(vcpu, rd, val);
2348 if (!ret)
2349 kvm_set_vm_id_reg(vcpu->kvm, id, val);
2350
2351 mutex_unlock(&vcpu->kvm->arch.config_lock);
2352
2353 /*
2354 * arm64_check_features() returns -E2BIG to indicate the register's
2355 * feature set is a superset of the maximally-allowed register value.
2356 * While it would be nice to precisely describe this to userspace, the
2357 * existing UAPI for KVM_SET_ONE_REG has it that invalid register
2358 * writes return -EINVAL.
2359 */
2360 if (ret == -E2BIG)
2361 ret = -EINVAL;
2362 return ret;
2363 }
2364
kvm_set_vm_id_reg(struct kvm * kvm,u32 reg,u64 val)2365 void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val)
2366 {
2367 u64 *p = __vm_id_reg(&kvm->arch, reg);
2368
2369 lockdep_assert_held(&kvm->arch.config_lock);
2370
2371 if (KVM_BUG_ON(kvm_vm_has_ran_once(kvm) || !p, kvm))
2372 return;
2373
2374 *p = val;
2375 }
2376
get_raz_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)2377 static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2378 u64 *val)
2379 {
2380 *val = 0;
2381 return 0;
2382 }
2383
set_wi_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)2384 static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2385 u64 val)
2386 {
2387 return 0;
2388 }
2389
access_ctr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2390 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2391 const struct sys_reg_desc *r)
2392 {
2393 if (p->is_write)
2394 return write_to_read_only(vcpu, p, r);
2395
2396 p->regval = kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0);
2397 return true;
2398 }
2399
access_clidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2400 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2401 const struct sys_reg_desc *r)
2402 {
2403 if (p->is_write)
2404 return write_to_read_only(vcpu, p, r);
2405
2406 p->regval = __vcpu_sys_reg(vcpu, r->reg);
2407 return true;
2408 }
2409
2410 /*
2411 * Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
2412 * by the physical CPU which the vcpu currently resides in.
2413 */
reset_clidr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)2414 static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2415 {
2416 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
2417 u64 clidr;
2418 u8 loc;
2419
2420 if ((ctr_el0 & CTR_EL0_IDC)) {
2421 /*
2422 * Data cache clean to the PoU is not required so LoUU and LoUIS
2423 * will not be set and a unified cache, which will be marked as
2424 * LoC, will be added.
2425 *
2426 * If not DIC, let the unified cache L2 so that an instruction
2427 * cache can be added as L1 later.
2428 */
2429 loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2;
2430 clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc);
2431 } else {
2432 /*
2433 * Data cache clean to the PoU is required so let L1 have a data
2434 * cache and mark it as LoUU and LoUIS. As L1 has a data cache,
2435 * it can be marked as LoC too.
2436 */
2437 loc = 1;
2438 clidr = 1 << CLIDR_LOUU_SHIFT;
2439 clidr |= 1 << CLIDR_LOUIS_SHIFT;
2440 clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1);
2441 }
2442
2443 /*
2444 * Instruction cache invalidation to the PoU is required so let L1 have
2445 * an instruction cache. If L1 already has a data cache, it will be
2446 * CACHE_TYPE_SEPARATE.
2447 */
2448 if (!(ctr_el0 & CTR_EL0_DIC))
2449 clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1);
2450
2451 clidr |= loc << CLIDR_LOC_SHIFT;
2452
2453 /*
2454 * Add tag cache unified to data cache. Allocation tags and data are
2455 * unified in a cache line so that it looks valid even if there is only
2456 * one cache line.
2457 */
2458 if (kvm_has_mte(vcpu->kvm))
2459 clidr |= 2ULL << CLIDR_TTYPE_SHIFT(loc);
2460
2461 __vcpu_assign_sys_reg(vcpu, r->reg, clidr);
2462
2463 return __vcpu_sys_reg(vcpu, r->reg);
2464 }
2465
set_clidr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)2466 static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2467 u64 val)
2468 {
2469 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
2470 u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val));
2471
2472 if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
2473 return -EINVAL;
2474
2475 __vcpu_assign_sys_reg(vcpu, rd->reg, val);
2476
2477 return 0;
2478 }
2479
access_csselr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2480 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2481 const struct sys_reg_desc *r)
2482 {
2483 int reg = r->reg;
2484
2485 if (p->is_write)
2486 vcpu_write_sys_reg(vcpu, p->regval, reg);
2487 else
2488 p->regval = vcpu_read_sys_reg(vcpu, reg);
2489 return true;
2490 }
2491
access_ccsidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2492 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2493 const struct sys_reg_desc *r)
2494 {
2495 u32 csselr;
2496
2497 if (p->is_write)
2498 return write_to_read_only(vcpu, p, r);
2499
2500 csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
2501 csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD;
2502 if (csselr < CSSELR_MAX)
2503 p->regval = get_ccsidr(vcpu, csselr);
2504
2505 return true;
2506 }
2507
mte_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2508 static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
2509 const struct sys_reg_desc *rd)
2510 {
2511 if (kvm_has_mte(vcpu->kvm))
2512 return 0;
2513
2514 return REG_HIDDEN;
2515 }
2516
2517 #define MTE_REG(name) { \
2518 SYS_DESC(SYS_##name), \
2519 .access = undef_access, \
2520 .reset = reset_unknown, \
2521 .reg = name, \
2522 .visibility = mte_visibility, \
2523 }
2524
el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2525 static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
2526 const struct sys_reg_desc *rd)
2527 {
2528 if (vcpu_has_nv(vcpu))
2529 return 0;
2530
2531 return REG_HIDDEN;
2532 }
2533
bad_vncr_trap(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2534 static bool bad_vncr_trap(struct kvm_vcpu *vcpu,
2535 struct sys_reg_params *p,
2536 const struct sys_reg_desc *r)
2537 {
2538 /*
2539 * We really shouldn't be here, and this is likely the result
2540 * of a misconfigured trap, as this register should target the
2541 * VNCR page, and nothing else.
2542 */
2543 return bad_trap(vcpu, p, r,
2544 "trap of VNCR-backed register");
2545 }
2546
bad_redir_trap(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2547 static bool bad_redir_trap(struct kvm_vcpu *vcpu,
2548 struct sys_reg_params *p,
2549 const struct sys_reg_desc *r)
2550 {
2551 /*
2552 * We really shouldn't be here, and this is likely the result
2553 * of a misconfigured trap, as this register should target the
2554 * corresponding EL1, and nothing else.
2555 */
2556 return bad_trap(vcpu, p, r,
2557 "trap of EL2 register redirected to EL1");
2558 }
2559
2560 #define SYS_REG_USER_FILTER(name, acc, rst, v, gu, su, filter) { \
2561 SYS_DESC(SYS_##name), \
2562 .access = acc, \
2563 .reset = rst, \
2564 .reg = name, \
2565 .get_user = gu, \
2566 .set_user = su, \
2567 .visibility = filter, \
2568 .val = v, \
2569 }
2570
2571 #define EL2_REG_FILTERED(name, acc, rst, v, filter) \
2572 SYS_REG_USER_FILTER(name, acc, rst, v, NULL, NULL, filter)
2573
2574 #define EL2_REG(name, acc, rst, v) \
2575 EL2_REG_FILTERED(name, acc, rst, v, el2_visibility)
2576
2577 #define EL2_REG_VNCR(name, rst, v) EL2_REG(name, bad_vncr_trap, rst, v)
2578 #define EL2_REG_VNCR_FILT(name, vis) \
2579 EL2_REG_FILTERED(name, bad_vncr_trap, reset_val, 0, vis)
2580 #define EL2_REG_VNCR_GICv3(name) \
2581 EL2_REG_VNCR_FILT(name, hidden_visibility)
2582 #define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v)
2583
2584 #define TIMER_REG(name, vis) \
2585 SYS_REG_USER_FILTER(name, access_arch_timer, reset_val, 0, \
2586 arch_timer_get_user, arch_timer_set_user, vis)
2587
2588 /*
2589 * Since reset() callback and field val are not used for idregs, they will be
2590 * used for specific purposes for idregs.
2591 * The reset() would return KVM sanitised register value. The value would be the
2592 * same as the host kernel sanitised value if there is no KVM sanitisation.
2593 * The val would be used as a mask indicating writable fields for the idreg.
2594 * Only bits with 1 are writable from userspace. This mask might not be
2595 * necessary in the future whenever all ID registers are enabled as writable
2596 * from userspace.
2597 */
2598
2599 #define ID_DESC_DEFAULT_CALLBACKS \
2600 .access = access_id_reg, \
2601 .get_user = get_id_reg, \
2602 .set_user = set_id_reg, \
2603 .visibility = id_visibility, \
2604 .reset = kvm_read_sanitised_id_reg
2605
2606 #define ID_DESC(name) \
2607 SYS_DESC(SYS_##name), \
2608 ID_DESC_DEFAULT_CALLBACKS
2609
2610 /* sys_reg_desc initialiser for known cpufeature ID registers */
2611 #define ID_SANITISED(name) { \
2612 ID_DESC(name), \
2613 .val = 0, \
2614 }
2615
2616 /* sys_reg_desc initialiser for writable ID registers */
2617 #define ID_WRITABLE(name, mask) { \
2618 ID_DESC(name), \
2619 .val = mask, \
2620 }
2621
2622 /*
2623 * 32bit ID regs are fully writable when the guest is 32bit
2624 * capable. Nothing in the KVM code should rely on 32bit features
2625 * anyway, only 64bit, so let the VMM do its worse.
2626 */
2627 #define AA32_ID_WRITABLE(name) { \
2628 ID_DESC(name), \
2629 .visibility = aa32_id_visibility, \
2630 .val = GENMASK(31, 0), \
2631 }
2632
2633 /* sys_reg_desc initialiser for cpufeature ID registers that need filtering */
2634 #define ID_FILTERED(sysreg, name, mask) { \
2635 ID_DESC(sysreg), \
2636 .set_user = set_##name, \
2637 .val = (mask), \
2638 }
2639
2640 /*
2641 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
2642 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
2643 * (1 <= crm < 8, 0 <= Op2 < 8).
2644 */
2645 #define ID_UNALLOCATED(crm, op2) { \
2646 .name = "S3_0_0_" #crm "_" #op2, \
2647 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
2648 ID_DESC_DEFAULT_CALLBACKS, \
2649 .visibility = raz_visibility, \
2650 .val = 0, \
2651 }
2652
2653 /*
2654 * sys_reg_desc initialiser for known ID registers that we hide from guests.
2655 * For now, these are exposed just like unallocated ID regs: they appear
2656 * RAZ for the guest.
2657 */
2658 #define ID_HIDDEN(name) { \
2659 ID_DESC(name), \
2660 .visibility = raz_visibility, \
2661 .val = 0, \
2662 }
2663
access_sp_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2664 static bool access_sp_el1(struct kvm_vcpu *vcpu,
2665 struct sys_reg_params *p,
2666 const struct sys_reg_desc *r)
2667 {
2668 if (p->is_write)
2669 __vcpu_assign_sys_reg(vcpu, SP_EL1, p->regval);
2670 else
2671 p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
2672
2673 return true;
2674 }
2675
access_elr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2676 static bool access_elr(struct kvm_vcpu *vcpu,
2677 struct sys_reg_params *p,
2678 const struct sys_reg_desc *r)
2679 {
2680 if (p->is_write)
2681 vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1);
2682 else
2683 p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1);
2684
2685 return true;
2686 }
2687
access_spsr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2688 static bool access_spsr(struct kvm_vcpu *vcpu,
2689 struct sys_reg_params *p,
2690 const struct sys_reg_desc *r)
2691 {
2692 if (p->is_write)
2693 __vcpu_assign_sys_reg(vcpu, SPSR_EL1, p->regval);
2694 else
2695 p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
2696
2697 return true;
2698 }
2699
access_cntkctl_el12(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2700 static bool access_cntkctl_el12(struct kvm_vcpu *vcpu,
2701 struct sys_reg_params *p,
2702 const struct sys_reg_desc *r)
2703 {
2704 if (p->is_write)
2705 __vcpu_assign_sys_reg(vcpu, CNTKCTL_EL1, p->regval);
2706 else
2707 p->regval = __vcpu_sys_reg(vcpu, CNTKCTL_EL1);
2708
2709 return true;
2710 }
2711
reset_hcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)2712 static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2713 {
2714 u64 val = r->val;
2715
2716 if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
2717 val |= HCR_E2H;
2718
2719 __vcpu_assign_sys_reg(vcpu, r->reg, val);
2720
2721 return __vcpu_sys_reg(vcpu, r->reg);
2722 }
2723
__el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,unsigned int (* fn)(const struct kvm_vcpu *,const struct sys_reg_desc *))2724 static unsigned int __el2_visibility(const struct kvm_vcpu *vcpu,
2725 const struct sys_reg_desc *rd,
2726 unsigned int (*fn)(const struct kvm_vcpu *,
2727 const struct sys_reg_desc *))
2728 {
2729 return el2_visibility(vcpu, rd) ?: fn(vcpu, rd);
2730 }
2731
sve_el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2732 static unsigned int sve_el2_visibility(const struct kvm_vcpu *vcpu,
2733 const struct sys_reg_desc *rd)
2734 {
2735 return __el2_visibility(vcpu, rd, sve_visibility);
2736 }
2737
vncr_el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2738 static unsigned int vncr_el2_visibility(const struct kvm_vcpu *vcpu,
2739 const struct sys_reg_desc *rd)
2740 {
2741 if (el2_visibility(vcpu, rd) == 0 &&
2742 kvm_has_feat(vcpu->kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
2743 return 0;
2744
2745 return REG_HIDDEN;
2746 }
2747
sctlr2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2748 static unsigned int sctlr2_visibility(const struct kvm_vcpu *vcpu,
2749 const struct sys_reg_desc *rd)
2750 {
2751 if (kvm_has_sctlr2(vcpu->kvm))
2752 return 0;
2753
2754 return REG_HIDDEN;
2755 }
2756
sctlr2_el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2757 static unsigned int sctlr2_el2_visibility(const struct kvm_vcpu *vcpu,
2758 const struct sys_reg_desc *rd)
2759 {
2760 return __el2_visibility(vcpu, rd, sctlr2_visibility);
2761 }
2762
access_zcr_el2(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2763 static bool access_zcr_el2(struct kvm_vcpu *vcpu,
2764 struct sys_reg_params *p,
2765 const struct sys_reg_desc *r)
2766 {
2767 unsigned int vq;
2768
2769 if (guest_hyp_sve_traps_enabled(vcpu)) {
2770 kvm_inject_nested_sve_trap(vcpu);
2771 return false;
2772 }
2773
2774 if (!p->is_write) {
2775 p->regval = __vcpu_sys_reg(vcpu, ZCR_EL2);
2776 return true;
2777 }
2778
2779 vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1;
2780 vq = min(vq, vcpu_sve_max_vq(vcpu));
2781 __vcpu_assign_sys_reg(vcpu, ZCR_EL2, vq - 1);
2782 return true;
2783 }
2784
access_gic_vtr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2785 static bool access_gic_vtr(struct kvm_vcpu *vcpu,
2786 struct sys_reg_params *p,
2787 const struct sys_reg_desc *r)
2788 {
2789 if (p->is_write)
2790 return write_to_read_only(vcpu, p, r);
2791
2792 p->regval = kvm_get_guest_vtr_el2();
2793
2794 return true;
2795 }
2796
access_gic_misr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2797 static bool access_gic_misr(struct kvm_vcpu *vcpu,
2798 struct sys_reg_params *p,
2799 const struct sys_reg_desc *r)
2800 {
2801 if (p->is_write)
2802 return write_to_read_only(vcpu, p, r);
2803
2804 p->regval = vgic_v3_get_misr(vcpu);
2805
2806 return true;
2807 }
2808
access_gic_eisr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2809 static bool access_gic_eisr(struct kvm_vcpu *vcpu,
2810 struct sys_reg_params *p,
2811 const struct sys_reg_desc *r)
2812 {
2813 if (p->is_write)
2814 return write_to_read_only(vcpu, p, r);
2815
2816 p->regval = vgic_v3_get_eisr(vcpu);
2817
2818 return true;
2819 }
2820
access_gic_elrsr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2821 static bool access_gic_elrsr(struct kvm_vcpu *vcpu,
2822 struct sys_reg_params *p,
2823 const struct sys_reg_desc *r)
2824 {
2825 if (p->is_write)
2826 return write_to_read_only(vcpu, p, r);
2827
2828 p->regval = vgic_v3_get_elrsr(vcpu);
2829
2830 return true;
2831 }
2832
s1poe_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2833 static unsigned int s1poe_visibility(const struct kvm_vcpu *vcpu,
2834 const struct sys_reg_desc *rd)
2835 {
2836 if (kvm_has_s1poe(vcpu->kvm))
2837 return 0;
2838
2839 return REG_HIDDEN;
2840 }
2841
s1poe_el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2842 static unsigned int s1poe_el2_visibility(const struct kvm_vcpu *vcpu,
2843 const struct sys_reg_desc *rd)
2844 {
2845 return __el2_visibility(vcpu, rd, s1poe_visibility);
2846 }
2847
tcr2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2848 static unsigned int tcr2_visibility(const struct kvm_vcpu *vcpu,
2849 const struct sys_reg_desc *rd)
2850 {
2851 if (kvm_has_tcr2(vcpu->kvm))
2852 return 0;
2853
2854 return REG_HIDDEN;
2855 }
2856
tcr2_el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2857 static unsigned int tcr2_el2_visibility(const struct kvm_vcpu *vcpu,
2858 const struct sys_reg_desc *rd)
2859 {
2860 return __el2_visibility(vcpu, rd, tcr2_visibility);
2861 }
2862
fgt2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2863 static unsigned int fgt2_visibility(const struct kvm_vcpu *vcpu,
2864 const struct sys_reg_desc *rd)
2865 {
2866 if (el2_visibility(vcpu, rd) == 0 &&
2867 kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, FGT, FGT2))
2868 return 0;
2869
2870 return REG_HIDDEN;
2871 }
2872
fgt_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2873 static unsigned int fgt_visibility(const struct kvm_vcpu *vcpu,
2874 const struct sys_reg_desc *rd)
2875 {
2876 if (el2_visibility(vcpu, rd) == 0 &&
2877 kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, FGT, IMP))
2878 return 0;
2879
2880 return REG_HIDDEN;
2881 }
2882
s1pie_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2883 static unsigned int s1pie_visibility(const struct kvm_vcpu *vcpu,
2884 const struct sys_reg_desc *rd)
2885 {
2886 if (kvm_has_s1pie(vcpu->kvm))
2887 return 0;
2888
2889 return REG_HIDDEN;
2890 }
2891
s1pie_el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2892 static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu,
2893 const struct sys_reg_desc *rd)
2894 {
2895 return __el2_visibility(vcpu, rd, s1pie_visibility);
2896 }
2897
cnthv_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2898 static unsigned int cnthv_visibility(const struct kvm_vcpu *vcpu,
2899 const struct sys_reg_desc *rd)
2900 {
2901 if (vcpu_has_nv(vcpu) &&
2902 !vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2_E2H0))
2903 return 0;
2904
2905 return REG_HIDDEN;
2906 }
2907
access_mdcr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2908 static bool access_mdcr(struct kvm_vcpu *vcpu,
2909 struct sys_reg_params *p,
2910 const struct sys_reg_desc *r)
2911 {
2912 u64 hpmn, val, old = __vcpu_sys_reg(vcpu, MDCR_EL2);
2913
2914 if (!p->is_write) {
2915 p->regval = old;
2916 return true;
2917 }
2918
2919 val = p->regval;
2920 hpmn = FIELD_GET(MDCR_EL2_HPMN, val);
2921
2922 /*
2923 * If HPMN is out of bounds, limit it to what we actually
2924 * support. This matches the UNKNOWN definition of the field
2925 * in that case, and keeps the emulation simple. Sort of.
2926 */
2927 if (hpmn > vcpu->kvm->arch.nr_pmu_counters) {
2928 hpmn = vcpu->kvm->arch.nr_pmu_counters;
2929 u64p_replace_bits(&val, hpmn, MDCR_EL2_HPMN);
2930 }
2931
2932 __vcpu_assign_sys_reg(vcpu, MDCR_EL2, val);
2933
2934 /*
2935 * Request a reload of the PMU to enable/disable the counters
2936 * affected by HPME.
2937 */
2938 if ((old ^ val) & MDCR_EL2_HPME)
2939 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
2940
2941 return true;
2942 }
2943
access_ras(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2944 static bool access_ras(struct kvm_vcpu *vcpu,
2945 struct sys_reg_params *p,
2946 const struct sys_reg_desc *r)
2947 {
2948 struct kvm *kvm = vcpu->kvm;
2949
2950 switch(reg_to_encoding(r)) {
2951 case SYS_ERXPFGCDN_EL1:
2952 case SYS_ERXPFGCTL_EL1:
2953 case SYS_ERXPFGF_EL1:
2954 case SYS_ERXMISC2_EL1:
2955 case SYS_ERXMISC3_EL1:
2956 if (!(kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1) ||
2957 (kvm_has_feat_enum(kvm, ID_AA64PFR0_EL1, RAS, IMP) &&
2958 kvm_has_feat(kvm, ID_AA64PFR1_EL1, RAS_frac, RASv1p1)))) {
2959 kvm_inject_undefined(vcpu);
2960 return false;
2961 }
2962 break;
2963 default:
2964 if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) {
2965 kvm_inject_undefined(vcpu);
2966 return false;
2967 }
2968 }
2969
2970 return trap_raz_wi(vcpu, p, r);
2971 }
2972
2973 /*
2974 * For historical (ahem ABI) reasons, KVM treated MIDR_EL1, REVIDR_EL1, and
2975 * AIDR_EL1 as "invariant" registers, meaning userspace cannot change them.
2976 * The values made visible to userspace were the register values of the boot
2977 * CPU.
2978 *
2979 * At the same time, reads from these registers at EL1 previously were not
2980 * trapped, allowing the guest to read the actual hardware value. On big-little
2981 * machines, this means the VM can see different values depending on where a
2982 * given vCPU got scheduled.
2983 *
2984 * These registers are now trapped as collateral damage from SME, and what
2985 * follows attempts to give a user / guest view consistent with the existing
2986 * ABI.
2987 */
access_imp_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2988 static bool access_imp_id_reg(struct kvm_vcpu *vcpu,
2989 struct sys_reg_params *p,
2990 const struct sys_reg_desc *r)
2991 {
2992 if (p->is_write)
2993 return write_to_read_only(vcpu, p, r);
2994
2995 /*
2996 * Return the VM-scoped implementation ID register values if userspace
2997 * has made them writable.
2998 */
2999 if (test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &vcpu->kvm->arch.flags))
3000 return access_id_reg(vcpu, p, r);
3001
3002 /*
3003 * Otherwise, fall back to the old behavior of returning the value of
3004 * the current CPU.
3005 */
3006 switch (reg_to_encoding(r)) {
3007 case SYS_REVIDR_EL1:
3008 p->regval = read_sysreg(revidr_el1);
3009 break;
3010 case SYS_AIDR_EL1:
3011 p->regval = read_sysreg(aidr_el1);
3012 break;
3013 default:
3014 WARN_ON_ONCE(1);
3015 }
3016
3017 return true;
3018 }
3019
3020 static u64 __ro_after_init boot_cpu_midr_val;
3021 static u64 __ro_after_init boot_cpu_revidr_val;
3022 static u64 __ro_after_init boot_cpu_aidr_val;
3023
init_imp_id_regs(void)3024 static void init_imp_id_regs(void)
3025 {
3026 boot_cpu_midr_val = read_sysreg(midr_el1);
3027 boot_cpu_revidr_val = read_sysreg(revidr_el1);
3028 boot_cpu_aidr_val = read_sysreg(aidr_el1);
3029 }
3030
reset_imp_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)3031 static u64 reset_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
3032 {
3033 switch (reg_to_encoding(r)) {
3034 case SYS_MIDR_EL1:
3035 return boot_cpu_midr_val;
3036 case SYS_REVIDR_EL1:
3037 return boot_cpu_revidr_val;
3038 case SYS_AIDR_EL1:
3039 return boot_cpu_aidr_val;
3040 default:
3041 KVM_BUG_ON(1, vcpu->kvm);
3042 return 0;
3043 }
3044 }
3045
set_imp_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 val)3046 static int set_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
3047 u64 val)
3048 {
3049 struct kvm *kvm = vcpu->kvm;
3050 u64 expected;
3051
3052 guard(mutex)(&kvm->arch.config_lock);
3053
3054 expected = read_id_reg(vcpu, r);
3055 if (expected == val)
3056 return 0;
3057
3058 if (!test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags))
3059 return -EINVAL;
3060
3061 /*
3062 * Once the VM has started the ID registers are immutable. Reject the
3063 * write if userspace tries to change it.
3064 */
3065 if (kvm_vm_has_ran_once(kvm))
3066 return -EBUSY;
3067
3068 /*
3069 * Any value is allowed for the implementation ID registers so long as
3070 * it is within the writable mask.
3071 */
3072 if ((val & r->val) != val)
3073 return -EINVAL;
3074
3075 kvm_set_vm_id_reg(kvm, reg_to_encoding(r), val);
3076 return 0;
3077 }
3078
3079 #define IMPLEMENTATION_ID(reg, mask) { \
3080 SYS_DESC(SYS_##reg), \
3081 .access = access_imp_id_reg, \
3082 .get_user = get_id_reg, \
3083 .set_user = set_imp_id_reg, \
3084 .reset = reset_imp_id_reg, \
3085 .val = mask, \
3086 }
3087
reset_mdcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)3088 static u64 reset_mdcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
3089 {
3090 __vcpu_assign_sys_reg(vcpu, r->reg, vcpu->kvm->arch.nr_pmu_counters);
3091 return vcpu->kvm->arch.nr_pmu_counters;
3092 }
3093
3094 /*
3095 * Architected system registers.
3096 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
3097 *
3098 * Debug handling: We do trap most, if not all debug related system
3099 * registers. The implementation is good enough to ensure that a guest
3100 * can use these with minimal performance degradation. The drawback is
3101 * that we don't implement any of the external debug architecture.
3102 * This should be revisited if we ever encounter a more demanding
3103 * guest...
3104 */
3105 static const struct sys_reg_desc sys_reg_descs[] = {
3106 DBG_BCR_BVR_WCR_WVR_EL1(0),
3107 DBG_BCR_BVR_WCR_WVR_EL1(1),
3108 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
3109 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
3110 DBG_BCR_BVR_WCR_WVR_EL1(2),
3111 DBG_BCR_BVR_WCR_WVR_EL1(3),
3112 DBG_BCR_BVR_WCR_WVR_EL1(4),
3113 DBG_BCR_BVR_WCR_WVR_EL1(5),
3114 DBG_BCR_BVR_WCR_WVR_EL1(6),
3115 DBG_BCR_BVR_WCR_WVR_EL1(7),
3116 DBG_BCR_BVR_WCR_WVR_EL1(8),
3117 DBG_BCR_BVR_WCR_WVR_EL1(9),
3118 DBG_BCR_BVR_WCR_WVR_EL1(10),
3119 DBG_BCR_BVR_WCR_WVR_EL1(11),
3120 DBG_BCR_BVR_WCR_WVR_EL1(12),
3121 DBG_BCR_BVR_WCR_WVR_EL1(13),
3122 DBG_BCR_BVR_WCR_WVR_EL1(14),
3123 DBG_BCR_BVR_WCR_WVR_EL1(15),
3124
3125 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
3126 { SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
3127 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
3128 OSLSR_EL1_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
3129 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
3130 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
3131 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
3132 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
3133 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
3134
3135 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
3136 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
3137 // DBGDTR[TR]X_EL0 share the same encoding
3138 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
3139
3140 { SYS_DESC(SYS_DBGVCR32_EL2), undef_access, reset_val, DBGVCR32_EL2, 0 },
3141
3142 IMPLEMENTATION_ID(MIDR_EL1, GENMASK_ULL(31, 0)),
3143 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
3144 IMPLEMENTATION_ID(REVIDR_EL1, GENMASK_ULL(63, 0)),
3145
3146 /*
3147 * ID regs: all ID_SANITISED() entries here must have corresponding
3148 * entries in arm64_ftr_regs[].
3149 */
3150
3151 /* AArch64 mappings of the AArch32 ID registers */
3152 /* CRm=1 */
3153 AA32_ID_WRITABLE(ID_PFR0_EL1),
3154 AA32_ID_WRITABLE(ID_PFR1_EL1),
3155 { SYS_DESC(SYS_ID_DFR0_EL1),
3156 .access = access_id_reg,
3157 .get_user = get_id_reg,
3158 .set_user = set_id_dfr0_el1,
3159 .visibility = aa32_id_visibility,
3160 .reset = read_sanitised_id_dfr0_el1,
3161 .val = GENMASK(31, 0) },
3162 ID_HIDDEN(ID_AFR0_EL1),
3163 AA32_ID_WRITABLE(ID_MMFR0_EL1),
3164 AA32_ID_WRITABLE(ID_MMFR1_EL1),
3165 AA32_ID_WRITABLE(ID_MMFR2_EL1),
3166 AA32_ID_WRITABLE(ID_MMFR3_EL1),
3167
3168 /* CRm=2 */
3169 AA32_ID_WRITABLE(ID_ISAR0_EL1),
3170 AA32_ID_WRITABLE(ID_ISAR1_EL1),
3171 AA32_ID_WRITABLE(ID_ISAR2_EL1),
3172 AA32_ID_WRITABLE(ID_ISAR3_EL1),
3173 AA32_ID_WRITABLE(ID_ISAR4_EL1),
3174 AA32_ID_WRITABLE(ID_ISAR5_EL1),
3175 AA32_ID_WRITABLE(ID_MMFR4_EL1),
3176 AA32_ID_WRITABLE(ID_ISAR6_EL1),
3177
3178 /* CRm=3 */
3179 AA32_ID_WRITABLE(MVFR0_EL1),
3180 AA32_ID_WRITABLE(MVFR1_EL1),
3181 AA32_ID_WRITABLE(MVFR2_EL1),
3182 ID_UNALLOCATED(3,3),
3183 AA32_ID_WRITABLE(ID_PFR2_EL1),
3184 ID_HIDDEN(ID_DFR1_EL1),
3185 AA32_ID_WRITABLE(ID_MMFR5_EL1),
3186 ID_UNALLOCATED(3,7),
3187
3188 /* AArch64 ID registers */
3189 /* CRm=4 */
3190 ID_FILTERED(ID_AA64PFR0_EL1, id_aa64pfr0_el1,
3191 ~(ID_AA64PFR0_EL1_AMU |
3192 ID_AA64PFR0_EL1_MPAM |
3193 ID_AA64PFR0_EL1_SVE |
3194 ID_AA64PFR0_EL1_AdvSIMD |
3195 ID_AA64PFR0_EL1_FP)),
3196 ID_FILTERED(ID_AA64PFR1_EL1, id_aa64pfr1_el1,
3197 ~(ID_AA64PFR1_EL1_PFAR |
3198 ID_AA64PFR1_EL1_MTEX |
3199 ID_AA64PFR1_EL1_THE |
3200 ID_AA64PFR1_EL1_GCS |
3201 ID_AA64PFR1_EL1_MTE_frac |
3202 ID_AA64PFR1_EL1_NMI |
3203 ID_AA64PFR1_EL1_RNDR_trap |
3204 ID_AA64PFR1_EL1_SME |
3205 ID_AA64PFR1_EL1_RES0 |
3206 ID_AA64PFR1_EL1_MPAM_frac |
3207 ID_AA64PFR1_EL1_MTE)),
3208 ID_WRITABLE(ID_AA64PFR2_EL1,
3209 ID_AA64PFR2_EL1_FPMR |
3210 ID_AA64PFR2_EL1_MTEFAR |
3211 ID_AA64PFR2_EL1_MTESTOREONLY),
3212 ID_UNALLOCATED(4,3),
3213 ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
3214 ID_HIDDEN(ID_AA64SMFR0_EL1),
3215 ID_UNALLOCATED(4,6),
3216 ID_WRITABLE(ID_AA64FPFR0_EL1, ~ID_AA64FPFR0_EL1_RES0),
3217
3218 /* CRm=5 */
3219 /*
3220 * Prior to FEAT_Debugv8.9, the architecture defines context-aware
3221 * breakpoints (CTX_CMPs) as the highest numbered breakpoints (BRPs).
3222 * KVM does not trap + emulate the breakpoint registers, and as such
3223 * cannot support a layout that misaligns with the underlying hardware.
3224 * While it may be possible to describe a subset that aligns with
3225 * hardware, just prevent changes to BRPs and CTX_CMPs altogether for
3226 * simplicity.
3227 *
3228 * See DDI0487K.a, section D2.8.3 Breakpoint types and linking
3229 * of breakpoints for more details.
3230 */
3231 ID_FILTERED(ID_AA64DFR0_EL1, id_aa64dfr0_el1,
3232 ID_AA64DFR0_EL1_DoubleLock_MASK |
3233 ID_AA64DFR0_EL1_WRPs_MASK |
3234 ID_AA64DFR0_EL1_PMUVer_MASK |
3235 ID_AA64DFR0_EL1_DebugVer_MASK),
3236 ID_SANITISED(ID_AA64DFR1_EL1),
3237 ID_UNALLOCATED(5,2),
3238 ID_UNALLOCATED(5,3),
3239 ID_HIDDEN(ID_AA64AFR0_EL1),
3240 ID_HIDDEN(ID_AA64AFR1_EL1),
3241 ID_UNALLOCATED(5,6),
3242 ID_UNALLOCATED(5,7),
3243
3244 /* CRm=6 */
3245 ID_WRITABLE(ID_AA64ISAR0_EL1, ~ID_AA64ISAR0_EL1_RES0),
3246 ID_WRITABLE(ID_AA64ISAR1_EL1, ~(ID_AA64ISAR1_EL1_GPI |
3247 ID_AA64ISAR1_EL1_GPA |
3248 ID_AA64ISAR1_EL1_API |
3249 ID_AA64ISAR1_EL1_APA)),
3250 ID_WRITABLE(ID_AA64ISAR2_EL1, ~(ID_AA64ISAR2_EL1_RES0 |
3251 ID_AA64ISAR2_EL1_APA3 |
3252 ID_AA64ISAR2_EL1_GPA3)),
3253 ID_WRITABLE(ID_AA64ISAR3_EL1, (ID_AA64ISAR3_EL1_FPRCVT |
3254 ID_AA64ISAR3_EL1_LSFE |
3255 ID_AA64ISAR3_EL1_FAMINMAX)),
3256 ID_UNALLOCATED(6,4),
3257 ID_UNALLOCATED(6,5),
3258 ID_UNALLOCATED(6,6),
3259 ID_UNALLOCATED(6,7),
3260
3261 /* CRm=7 */
3262 ID_FILTERED(ID_AA64MMFR0_EL1, id_aa64mmfr0_el1,
3263 ~(ID_AA64MMFR0_EL1_RES0 |
3264 ID_AA64MMFR0_EL1_ASIDBITS)),
3265 ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
3266 ID_AA64MMFR1_EL1_XNX |
3267 ID_AA64MMFR1_EL1_VH |
3268 ID_AA64MMFR1_EL1_VMIDBits)),
3269 ID_FILTERED(ID_AA64MMFR2_EL1,
3270 id_aa64mmfr2_el1, ~(ID_AA64MMFR2_EL1_RES0 |
3271 ID_AA64MMFR2_EL1_EVT |
3272 ID_AA64MMFR2_EL1_FWB |
3273 ID_AA64MMFR2_EL1_IDS |
3274 ID_AA64MMFR2_EL1_NV |
3275 ID_AA64MMFR2_EL1_CCIDX)),
3276 ID_WRITABLE(ID_AA64MMFR3_EL1, (ID_AA64MMFR3_EL1_TCRX |
3277 ID_AA64MMFR3_EL1_SCTLRX |
3278 ID_AA64MMFR3_EL1_S1PIE |
3279 ID_AA64MMFR3_EL1_S1POE)),
3280 ID_WRITABLE(ID_AA64MMFR4_EL1, ID_AA64MMFR4_EL1_NV_frac),
3281 ID_UNALLOCATED(7,5),
3282 ID_UNALLOCATED(7,6),
3283 ID_UNALLOCATED(7,7),
3284
3285 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
3286 { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
3287 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
3288 { SYS_DESC(SYS_SCTLR2_EL1), access_vm_reg, reset_val, SCTLR2_EL1, 0,
3289 .visibility = sctlr2_visibility },
3290
3291 MTE_REG(RGSR_EL1),
3292 MTE_REG(GCR_EL1),
3293
3294 { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
3295 { SYS_DESC(SYS_TRFCR_EL1), undef_access },
3296 { SYS_DESC(SYS_SMPRI_EL1), undef_access },
3297 { SYS_DESC(SYS_SMCR_EL1), undef_access },
3298 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
3299 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
3300 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
3301 { SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0,
3302 .visibility = tcr2_visibility },
3303
3304 PTRAUTH_KEY(APIA),
3305 PTRAUTH_KEY(APIB),
3306 PTRAUTH_KEY(APDA),
3307 PTRAUTH_KEY(APDB),
3308 PTRAUTH_KEY(APGA),
3309
3310 { SYS_DESC(SYS_SPSR_EL1), access_spsr},
3311 { SYS_DESC(SYS_ELR_EL1), access_elr},
3312
3313 { SYS_DESC(SYS_ICC_PMR_EL1), undef_access },
3314
3315 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
3316 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
3317 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
3318
3319 { SYS_DESC(SYS_ERRIDR_EL1), access_ras },
3320 { SYS_DESC(SYS_ERRSELR_EL1), access_ras },
3321 { SYS_DESC(SYS_ERXFR_EL1), access_ras },
3322 { SYS_DESC(SYS_ERXCTLR_EL1), access_ras },
3323 { SYS_DESC(SYS_ERXSTATUS_EL1), access_ras },
3324 { SYS_DESC(SYS_ERXADDR_EL1), access_ras },
3325 { SYS_DESC(SYS_ERXPFGF_EL1), access_ras },
3326 { SYS_DESC(SYS_ERXPFGCTL_EL1), access_ras },
3327 { SYS_DESC(SYS_ERXPFGCDN_EL1), access_ras },
3328 { SYS_DESC(SYS_ERXMISC0_EL1), access_ras },
3329 { SYS_DESC(SYS_ERXMISC1_EL1), access_ras },
3330 { SYS_DESC(SYS_ERXMISC2_EL1), access_ras },
3331 { SYS_DESC(SYS_ERXMISC3_EL1), access_ras },
3332
3333 MTE_REG(TFSR_EL1),
3334 MTE_REG(TFSRE0_EL1),
3335
3336 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
3337 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
3338
3339 { SYS_DESC(SYS_PMSCR_EL1), undef_access },
3340 { SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
3341 { SYS_DESC(SYS_PMSICR_EL1), undef_access },
3342 { SYS_DESC(SYS_PMSIRR_EL1), undef_access },
3343 { SYS_DESC(SYS_PMSFCR_EL1), undef_access },
3344 { SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
3345 { SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
3346 { SYS_DESC(SYS_PMSIDR_EL1), undef_access },
3347 { SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
3348 { SYS_DESC(SYS_PMBPTR_EL1), undef_access },
3349 { SYS_DESC(SYS_PMBSR_EL1), undef_access },
3350 { SYS_DESC(SYS_PMSDSFR_EL1), undef_access },
3351 /* PMBIDR_EL1 is not trapped */
3352
3353 { PMU_SYS_REG(PMINTENSET_EL1),
3354 .access = access_pminten, .reg = PMINTENSET_EL1,
3355 .get_user = get_pmreg, .set_user = set_pmreg },
3356 { PMU_SYS_REG(PMINTENCLR_EL1),
3357 .access = access_pminten, .reg = PMINTENSET_EL1,
3358 .get_user = get_pmreg, .set_user = set_pmreg },
3359 { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
3360
3361 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
3362 { SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1,
3363 .visibility = s1pie_visibility },
3364 { SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1,
3365 .visibility = s1pie_visibility },
3366 { SYS_DESC(SYS_POR_EL1), NULL, reset_unknown, POR_EL1,
3367 .visibility = s1poe_visibility },
3368 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
3369
3370 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
3371 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
3372 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
3373 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
3374 { SYS_DESC(SYS_MPAMIDR_EL1), undef_access },
3375 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
3376
3377 { SYS_DESC(SYS_MPAM1_EL1), undef_access },
3378 { SYS_DESC(SYS_MPAM0_EL1), undef_access },
3379 { SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
3380 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
3381
3382 { SYS_DESC(SYS_ICC_IAR0_EL1), undef_access },
3383 { SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access },
3384 { SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access },
3385 { SYS_DESC(SYS_ICC_BPR0_EL1), undef_access },
3386 { SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access },
3387 { SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access },
3388 { SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access },
3389 { SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access },
3390 { SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access },
3391 { SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access },
3392 { SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access },
3393 { SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
3394 { SYS_DESC(SYS_ICC_DIR_EL1), access_gic_dir },
3395 { SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
3396 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
3397 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
3398 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
3399 { SYS_DESC(SYS_ICC_IAR1_EL1), undef_access },
3400 { SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access },
3401 { SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access },
3402 { SYS_DESC(SYS_ICC_BPR1_EL1), undef_access },
3403 { SYS_DESC(SYS_ICC_CTLR_EL1), undef_access },
3404 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
3405 { SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access },
3406 { SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access },
3407
3408 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
3409 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
3410
3411 { SYS_DESC(SYS_ACCDATA_EL1), undef_access },
3412
3413 { SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
3414
3415 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
3416
3417 { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
3418 { SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1,
3419 .set_user = set_clidr, .val = ~CLIDR_EL1_RES0 },
3420 IMPLEMENTATION_ID(AIDR_EL1, GENMASK_ULL(63, 0)),
3421 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
3422 ID_FILTERED(CTR_EL0, ctr_el0,
3423 CTR_EL0_DIC_MASK |
3424 CTR_EL0_IDC_MASK |
3425 CTR_EL0_DminLine_MASK |
3426 CTR_EL0_L1Ip_MASK |
3427 CTR_EL0_IminLine_MASK),
3428 { SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = sme_visibility },
3429 { SYS_DESC(SYS_FPMR), undef_access, reset_val, FPMR, 0, .visibility = fp8_visibility },
3430
3431 { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr,
3432 .reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr },
3433 { PMU_SYS_REG(PMCNTENSET_EL0),
3434 .access = access_pmcnten, .reg = PMCNTENSET_EL0,
3435 .get_user = get_pmreg, .set_user = set_pmreg },
3436 { PMU_SYS_REG(PMCNTENCLR_EL0),
3437 .access = access_pmcnten, .reg = PMCNTENSET_EL0,
3438 .get_user = get_pmreg, .set_user = set_pmreg },
3439 { PMU_SYS_REG(PMOVSCLR_EL0),
3440 .access = access_pmovs, .reg = PMOVSSET_EL0,
3441 .get_user = get_pmreg, .set_user = set_pmreg },
3442 /*
3443 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
3444 * previously (and pointlessly) advertised in the past...
3445 */
3446 { PMU_SYS_REG(PMSWINC_EL0),
3447 .get_user = get_raz_reg, .set_user = set_wi_reg,
3448 .access = access_pmswinc, .reset = NULL },
3449 { PMU_SYS_REG(PMSELR_EL0),
3450 .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
3451 { PMU_SYS_REG(PMCEID0_EL0),
3452 .access = access_pmceid, .reset = NULL },
3453 { PMU_SYS_REG(PMCEID1_EL0),
3454 .access = access_pmceid, .reset = NULL },
3455 { PMU_SYS_REG(PMCCNTR_EL0),
3456 .access = access_pmu_evcntr, .reset = reset_unknown,
3457 .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr,
3458 .set_user = set_pmu_evcntr },
3459 { PMU_SYS_REG(PMXEVTYPER_EL0),
3460 .access = access_pmu_evtyper, .reset = NULL },
3461 { PMU_SYS_REG(PMXEVCNTR_EL0),
3462 .access = access_pmu_evcntr, .reset = NULL },
3463 /*
3464 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
3465 * in 32bit mode. Here we choose to reset it as zero for consistency.
3466 */
3467 { PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr,
3468 .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
3469 { PMU_SYS_REG(PMOVSSET_EL0),
3470 .access = access_pmovs, .reg = PMOVSSET_EL0,
3471 .get_user = get_pmreg, .set_user = set_pmreg },
3472
3473 { SYS_DESC(SYS_POR_EL0), NULL, reset_unknown, POR_EL0,
3474 .visibility = s1poe_visibility },
3475 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
3476 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
3477 { SYS_DESC(SYS_TPIDR2_EL0), undef_access },
3478
3479 { SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
3480
3481 { SYS_DESC(SYS_AMCR_EL0), undef_access },
3482 { SYS_DESC(SYS_AMCFGR_EL0), undef_access },
3483 { SYS_DESC(SYS_AMCGCR_EL0), undef_access },
3484 { SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
3485 { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
3486 { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
3487 { SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
3488 { SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
3489 AMU_AMEVCNTR0_EL0(0),
3490 AMU_AMEVCNTR0_EL0(1),
3491 AMU_AMEVCNTR0_EL0(2),
3492 AMU_AMEVCNTR0_EL0(3),
3493 AMU_AMEVCNTR0_EL0(4),
3494 AMU_AMEVCNTR0_EL0(5),
3495 AMU_AMEVCNTR0_EL0(6),
3496 AMU_AMEVCNTR0_EL0(7),
3497 AMU_AMEVCNTR0_EL0(8),
3498 AMU_AMEVCNTR0_EL0(9),
3499 AMU_AMEVCNTR0_EL0(10),
3500 AMU_AMEVCNTR0_EL0(11),
3501 AMU_AMEVCNTR0_EL0(12),
3502 AMU_AMEVCNTR0_EL0(13),
3503 AMU_AMEVCNTR0_EL0(14),
3504 AMU_AMEVCNTR0_EL0(15),
3505 AMU_AMEVTYPER0_EL0(0),
3506 AMU_AMEVTYPER0_EL0(1),
3507 AMU_AMEVTYPER0_EL0(2),
3508 AMU_AMEVTYPER0_EL0(3),
3509 AMU_AMEVTYPER0_EL0(4),
3510 AMU_AMEVTYPER0_EL0(5),
3511 AMU_AMEVTYPER0_EL0(6),
3512 AMU_AMEVTYPER0_EL0(7),
3513 AMU_AMEVTYPER0_EL0(8),
3514 AMU_AMEVTYPER0_EL0(9),
3515 AMU_AMEVTYPER0_EL0(10),
3516 AMU_AMEVTYPER0_EL0(11),
3517 AMU_AMEVTYPER0_EL0(12),
3518 AMU_AMEVTYPER0_EL0(13),
3519 AMU_AMEVTYPER0_EL0(14),
3520 AMU_AMEVTYPER0_EL0(15),
3521 AMU_AMEVCNTR1_EL0(0),
3522 AMU_AMEVCNTR1_EL0(1),
3523 AMU_AMEVCNTR1_EL0(2),
3524 AMU_AMEVCNTR1_EL0(3),
3525 AMU_AMEVCNTR1_EL0(4),
3526 AMU_AMEVCNTR1_EL0(5),
3527 AMU_AMEVCNTR1_EL0(6),
3528 AMU_AMEVCNTR1_EL0(7),
3529 AMU_AMEVCNTR1_EL0(8),
3530 AMU_AMEVCNTR1_EL0(9),
3531 AMU_AMEVCNTR1_EL0(10),
3532 AMU_AMEVCNTR1_EL0(11),
3533 AMU_AMEVCNTR1_EL0(12),
3534 AMU_AMEVCNTR1_EL0(13),
3535 AMU_AMEVCNTR1_EL0(14),
3536 AMU_AMEVCNTR1_EL0(15),
3537 AMU_AMEVTYPER1_EL0(0),
3538 AMU_AMEVTYPER1_EL0(1),
3539 AMU_AMEVTYPER1_EL0(2),
3540 AMU_AMEVTYPER1_EL0(3),
3541 AMU_AMEVTYPER1_EL0(4),
3542 AMU_AMEVTYPER1_EL0(5),
3543 AMU_AMEVTYPER1_EL0(6),
3544 AMU_AMEVTYPER1_EL0(7),
3545 AMU_AMEVTYPER1_EL0(8),
3546 AMU_AMEVTYPER1_EL0(9),
3547 AMU_AMEVTYPER1_EL0(10),
3548 AMU_AMEVTYPER1_EL0(11),
3549 AMU_AMEVTYPER1_EL0(12),
3550 AMU_AMEVTYPER1_EL0(13),
3551 AMU_AMEVTYPER1_EL0(14),
3552 AMU_AMEVTYPER1_EL0(15),
3553
3554 { SYS_DESC(SYS_CNTPCT_EL0), .access = access_arch_timer,
3555 .get_user = arch_timer_get_user, .set_user = arch_timer_set_user },
3556 { SYS_DESC(SYS_CNTVCT_EL0), .access = access_arch_timer,
3557 .get_user = arch_timer_get_user, .set_user = arch_timer_set_user },
3558 { SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
3559 { SYS_DESC(SYS_CNTVCTSS_EL0), access_arch_timer },
3560 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
3561 TIMER_REG(CNTP_CTL_EL0, NULL),
3562 TIMER_REG(CNTP_CVAL_EL0, NULL),
3563
3564 { SYS_DESC(SYS_CNTV_TVAL_EL0), access_arch_timer },
3565 TIMER_REG(CNTV_CTL_EL0, NULL),
3566 TIMER_REG(CNTV_CVAL_EL0, NULL),
3567
3568 /* PMEVCNTRn_EL0 */
3569 PMU_PMEVCNTR_EL0(0),
3570 PMU_PMEVCNTR_EL0(1),
3571 PMU_PMEVCNTR_EL0(2),
3572 PMU_PMEVCNTR_EL0(3),
3573 PMU_PMEVCNTR_EL0(4),
3574 PMU_PMEVCNTR_EL0(5),
3575 PMU_PMEVCNTR_EL0(6),
3576 PMU_PMEVCNTR_EL0(7),
3577 PMU_PMEVCNTR_EL0(8),
3578 PMU_PMEVCNTR_EL0(9),
3579 PMU_PMEVCNTR_EL0(10),
3580 PMU_PMEVCNTR_EL0(11),
3581 PMU_PMEVCNTR_EL0(12),
3582 PMU_PMEVCNTR_EL0(13),
3583 PMU_PMEVCNTR_EL0(14),
3584 PMU_PMEVCNTR_EL0(15),
3585 PMU_PMEVCNTR_EL0(16),
3586 PMU_PMEVCNTR_EL0(17),
3587 PMU_PMEVCNTR_EL0(18),
3588 PMU_PMEVCNTR_EL0(19),
3589 PMU_PMEVCNTR_EL0(20),
3590 PMU_PMEVCNTR_EL0(21),
3591 PMU_PMEVCNTR_EL0(22),
3592 PMU_PMEVCNTR_EL0(23),
3593 PMU_PMEVCNTR_EL0(24),
3594 PMU_PMEVCNTR_EL0(25),
3595 PMU_PMEVCNTR_EL0(26),
3596 PMU_PMEVCNTR_EL0(27),
3597 PMU_PMEVCNTR_EL0(28),
3598 PMU_PMEVCNTR_EL0(29),
3599 PMU_PMEVCNTR_EL0(30),
3600 /* PMEVTYPERn_EL0 */
3601 PMU_PMEVTYPER_EL0(0),
3602 PMU_PMEVTYPER_EL0(1),
3603 PMU_PMEVTYPER_EL0(2),
3604 PMU_PMEVTYPER_EL0(3),
3605 PMU_PMEVTYPER_EL0(4),
3606 PMU_PMEVTYPER_EL0(5),
3607 PMU_PMEVTYPER_EL0(6),
3608 PMU_PMEVTYPER_EL0(7),
3609 PMU_PMEVTYPER_EL0(8),
3610 PMU_PMEVTYPER_EL0(9),
3611 PMU_PMEVTYPER_EL0(10),
3612 PMU_PMEVTYPER_EL0(11),
3613 PMU_PMEVTYPER_EL0(12),
3614 PMU_PMEVTYPER_EL0(13),
3615 PMU_PMEVTYPER_EL0(14),
3616 PMU_PMEVTYPER_EL0(15),
3617 PMU_PMEVTYPER_EL0(16),
3618 PMU_PMEVTYPER_EL0(17),
3619 PMU_PMEVTYPER_EL0(18),
3620 PMU_PMEVTYPER_EL0(19),
3621 PMU_PMEVTYPER_EL0(20),
3622 PMU_PMEVTYPER_EL0(21),
3623 PMU_PMEVTYPER_EL0(22),
3624 PMU_PMEVTYPER_EL0(23),
3625 PMU_PMEVTYPER_EL0(24),
3626 PMU_PMEVTYPER_EL0(25),
3627 PMU_PMEVTYPER_EL0(26),
3628 PMU_PMEVTYPER_EL0(27),
3629 PMU_PMEVTYPER_EL0(28),
3630 PMU_PMEVTYPER_EL0(29),
3631 PMU_PMEVTYPER_EL0(30),
3632 /*
3633 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
3634 * in 32bit mode. Here we choose to reset it as zero for consistency.
3635 */
3636 { PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper,
3637 .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
3638
3639 EL2_REG_VNCR(VPIDR_EL2, reset_unknown, 0),
3640 EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0),
3641 EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
3642 EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
3643 EL2_REG_FILTERED(SCTLR2_EL2, access_vm_reg, reset_val, 0,
3644 sctlr2_el2_visibility),
3645 EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
3646 EL2_REG(MDCR_EL2, access_mdcr, reset_mdcr, 0),
3647 EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
3648 EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
3649 EL2_REG_VNCR_FILT(HFGRTR_EL2, fgt_visibility),
3650 EL2_REG_VNCR_FILT(HFGWTR_EL2, fgt_visibility),
3651 EL2_REG_VNCR(HFGITR_EL2, reset_val, 0),
3652 EL2_REG_VNCR(HACR_EL2, reset_val, 0),
3653
3654 EL2_REG_FILTERED(ZCR_EL2, access_zcr_el2, reset_val, 0,
3655 sve_el2_visibility),
3656
3657 EL2_REG_VNCR(HCRX_EL2, reset_val, 0),
3658
3659 EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
3660 EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
3661 EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
3662 EL2_REG_FILTERED(TCR2_EL2, access_rw, reset_val, TCR2_EL2_RES1,
3663 tcr2_el2_visibility),
3664 EL2_REG_VNCR(VTTBR_EL2, reset_val, 0),
3665 EL2_REG_VNCR(VTCR_EL2, reset_val, 0),
3666 EL2_REG_FILTERED(VNCR_EL2, bad_vncr_trap, reset_val, 0,
3667 vncr_el2_visibility),
3668
3669 { SYS_DESC(SYS_DACR32_EL2), undef_access, reset_unknown, DACR32_EL2 },
3670 EL2_REG_VNCR_FILT(HDFGRTR2_EL2, fgt2_visibility),
3671 EL2_REG_VNCR_FILT(HDFGWTR2_EL2, fgt2_visibility),
3672 EL2_REG_VNCR_FILT(HFGRTR2_EL2, fgt2_visibility),
3673 EL2_REG_VNCR_FILT(HFGWTR2_EL2, fgt2_visibility),
3674 EL2_REG_VNCR_FILT(HDFGRTR_EL2, fgt_visibility),
3675 EL2_REG_VNCR_FILT(HDFGWTR_EL2, fgt_visibility),
3676 EL2_REG_VNCR_FILT(HAFGRTR_EL2, fgt_visibility),
3677 EL2_REG_VNCR_FILT(HFGITR2_EL2, fgt2_visibility),
3678 EL2_REG_REDIR(SPSR_EL2, reset_val, 0),
3679 EL2_REG_REDIR(ELR_EL2, reset_val, 0),
3680 { SYS_DESC(SYS_SP_EL1), access_sp_el1},
3681
3682 /* AArch32 SPSR_* are RES0 if trapped from a NV guest */
3683 { SYS_DESC(SYS_SPSR_irq), .access = trap_raz_wi },
3684 { SYS_DESC(SYS_SPSR_abt), .access = trap_raz_wi },
3685 { SYS_DESC(SYS_SPSR_und), .access = trap_raz_wi },
3686 { SYS_DESC(SYS_SPSR_fiq), .access = trap_raz_wi },
3687
3688 { SYS_DESC(SYS_IFSR32_EL2), undef_access, reset_unknown, IFSR32_EL2 },
3689 EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
3690 EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
3691 EL2_REG_REDIR(ESR_EL2, reset_val, 0),
3692 EL2_REG_VNCR(VSESR_EL2, reset_unknown, 0),
3693 { SYS_DESC(SYS_FPEXC32_EL2), undef_access, reset_val, FPEXC32_EL2, 0x700 },
3694
3695 EL2_REG_REDIR(FAR_EL2, reset_val, 0),
3696 EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
3697
3698 EL2_REG(MAIR_EL2, access_rw, reset_val, 0),
3699 EL2_REG_FILTERED(PIRE0_EL2, access_rw, reset_val, 0,
3700 s1pie_el2_visibility),
3701 EL2_REG_FILTERED(PIR_EL2, access_rw, reset_val, 0,
3702 s1pie_el2_visibility),
3703 EL2_REG_FILTERED(POR_EL2, access_rw, reset_val, 0,
3704 s1poe_el2_visibility),
3705 EL2_REG(AMAIR_EL2, access_rw, reset_val, 0),
3706 { SYS_DESC(SYS_MPAMHCR_EL2), undef_access },
3707 { SYS_DESC(SYS_MPAMVPMV_EL2), undef_access },
3708 { SYS_DESC(SYS_MPAM2_EL2), undef_access },
3709 { SYS_DESC(SYS_MPAMVPM0_EL2), undef_access },
3710 { SYS_DESC(SYS_MPAMVPM1_EL2), undef_access },
3711 { SYS_DESC(SYS_MPAMVPM2_EL2), undef_access },
3712 { SYS_DESC(SYS_MPAMVPM3_EL2), undef_access },
3713 { SYS_DESC(SYS_MPAMVPM4_EL2), undef_access },
3714 { SYS_DESC(SYS_MPAMVPM5_EL2), undef_access },
3715 { SYS_DESC(SYS_MPAMVPM6_EL2), undef_access },
3716 { SYS_DESC(SYS_MPAMVPM7_EL2), undef_access },
3717
3718 EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
3719 { SYS_DESC(SYS_RVBAR_EL2), undef_access },
3720 { SYS_DESC(SYS_RMR_EL2), undef_access },
3721 EL2_REG_VNCR(VDISR_EL2, reset_unknown, 0),
3722
3723 EL2_REG_VNCR_GICv3(ICH_AP0R0_EL2),
3724 EL2_REG_VNCR_GICv3(ICH_AP0R1_EL2),
3725 EL2_REG_VNCR_GICv3(ICH_AP0R2_EL2),
3726 EL2_REG_VNCR_GICv3(ICH_AP0R3_EL2),
3727 EL2_REG_VNCR_GICv3(ICH_AP1R0_EL2),
3728 EL2_REG_VNCR_GICv3(ICH_AP1R1_EL2),
3729 EL2_REG_VNCR_GICv3(ICH_AP1R2_EL2),
3730 EL2_REG_VNCR_GICv3(ICH_AP1R3_EL2),
3731
3732 { SYS_DESC(SYS_ICC_SRE_EL2), access_gic_sre },
3733
3734 EL2_REG_VNCR_GICv3(ICH_HCR_EL2),
3735 { SYS_DESC(SYS_ICH_VTR_EL2), access_gic_vtr },
3736 { SYS_DESC(SYS_ICH_MISR_EL2), access_gic_misr },
3737 { SYS_DESC(SYS_ICH_EISR_EL2), access_gic_eisr },
3738 { SYS_DESC(SYS_ICH_ELRSR_EL2), access_gic_elrsr },
3739 EL2_REG_VNCR_GICv3(ICH_VMCR_EL2),
3740
3741 EL2_REG_VNCR_GICv3(ICH_LR0_EL2),
3742 EL2_REG_VNCR_GICv3(ICH_LR1_EL2),
3743 EL2_REG_VNCR_GICv3(ICH_LR2_EL2),
3744 EL2_REG_VNCR_GICv3(ICH_LR3_EL2),
3745 EL2_REG_VNCR_GICv3(ICH_LR4_EL2),
3746 EL2_REG_VNCR_GICv3(ICH_LR5_EL2),
3747 EL2_REG_VNCR_GICv3(ICH_LR6_EL2),
3748 EL2_REG_VNCR_GICv3(ICH_LR7_EL2),
3749 EL2_REG_VNCR_GICv3(ICH_LR8_EL2),
3750 EL2_REG_VNCR_GICv3(ICH_LR9_EL2),
3751 EL2_REG_VNCR_GICv3(ICH_LR10_EL2),
3752 EL2_REG_VNCR_GICv3(ICH_LR11_EL2),
3753 EL2_REG_VNCR_GICv3(ICH_LR12_EL2),
3754 EL2_REG_VNCR_GICv3(ICH_LR13_EL2),
3755 EL2_REG_VNCR_GICv3(ICH_LR14_EL2),
3756 EL2_REG_VNCR_GICv3(ICH_LR15_EL2),
3757
3758 EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
3759 EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
3760
3761 EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0),
3762 EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
3763 { SYS_DESC(SYS_CNTHP_TVAL_EL2), access_arch_timer },
3764 TIMER_REG(CNTHP_CTL_EL2, el2_visibility),
3765 TIMER_REG(CNTHP_CVAL_EL2, el2_visibility),
3766
3767 { SYS_DESC(SYS_CNTHV_TVAL_EL2), access_arch_timer, .visibility = cnthv_visibility },
3768 TIMER_REG(CNTHV_CTL_EL2, cnthv_visibility),
3769 TIMER_REG(CNTHV_CVAL_EL2, cnthv_visibility),
3770
3771 { SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 },
3772
3773 { SYS_DESC(SYS_CNTP_TVAL_EL02), access_arch_timer },
3774 { SYS_DESC(SYS_CNTP_CTL_EL02), access_arch_timer },
3775 { SYS_DESC(SYS_CNTP_CVAL_EL02), access_arch_timer },
3776
3777 { SYS_DESC(SYS_CNTV_TVAL_EL02), access_arch_timer },
3778 { SYS_DESC(SYS_CNTV_CTL_EL02), access_arch_timer },
3779 { SYS_DESC(SYS_CNTV_CVAL_EL02), access_arch_timer },
3780
3781 EL2_REG(SP_EL2, NULL, reset_unknown, 0),
3782 };
3783
handle_at_s1e01(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3784 static bool handle_at_s1e01(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3785 const struct sys_reg_desc *r)
3786 {
3787 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3788
3789 if (__kvm_at_s1e01(vcpu, op, p->regval))
3790 return false;
3791
3792 return true;
3793 }
3794
handle_at_s1e2(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3795 static bool handle_at_s1e2(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3796 const struct sys_reg_desc *r)
3797 {
3798 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3799
3800 /* There is no FGT associated with AT S1E2A :-( */
3801 if (op == OP_AT_S1E2A &&
3802 !kvm_has_feat(vcpu->kvm, ID_AA64ISAR2_EL1, ATS1A, IMP)) {
3803 kvm_inject_undefined(vcpu);
3804 return false;
3805 }
3806
3807 if (__kvm_at_s1e2(vcpu, op, p->regval))
3808 return false;
3809
3810 return true;
3811 }
3812
handle_at_s12(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3813 static bool handle_at_s12(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3814 const struct sys_reg_desc *r)
3815 {
3816 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3817
3818 if (__kvm_at_s12(vcpu, op, p->regval))
3819 return false;
3820
3821 return true;
3822 }
3823
kvm_supported_tlbi_s12_op(struct kvm_vcpu * vpcu,u32 instr)3824 static bool kvm_supported_tlbi_s12_op(struct kvm_vcpu *vpcu, u32 instr)
3825 {
3826 struct kvm *kvm = vpcu->kvm;
3827 u8 CRm = sys_reg_CRm(instr);
3828
3829 if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
3830 !kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
3831 return false;
3832
3833 if (CRm == TLBI_CRm_nROS &&
3834 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
3835 return false;
3836
3837 return true;
3838 }
3839
handle_alle1is(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3840 static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3841 const struct sys_reg_desc *r)
3842 {
3843 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3844
3845 if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding))
3846 return undef_access(vcpu, p, r);
3847
3848 write_lock(&vcpu->kvm->mmu_lock);
3849
3850 /*
3851 * Drop all shadow S2s, resulting in S1/S2 TLBIs for each of the
3852 * corresponding VMIDs.
3853 */
3854 kvm_nested_s2_unmap(vcpu->kvm, true);
3855
3856 write_unlock(&vcpu->kvm->mmu_lock);
3857
3858 return true;
3859 }
3860
kvm_supported_tlbi_ipas2_op(struct kvm_vcpu * vpcu,u32 instr)3861 static bool kvm_supported_tlbi_ipas2_op(struct kvm_vcpu *vpcu, u32 instr)
3862 {
3863 struct kvm *kvm = vpcu->kvm;
3864 u8 CRm = sys_reg_CRm(instr);
3865 u8 Op2 = sys_reg_Op2(instr);
3866
3867 if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
3868 !kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
3869 return false;
3870
3871 if (CRm == TLBI_CRm_IPAIS && (Op2 == 2 || Op2 == 6) &&
3872 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
3873 return false;
3874
3875 if (CRm == TLBI_CRm_IPAONS && (Op2 == 0 || Op2 == 4) &&
3876 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
3877 return false;
3878
3879 if (CRm == TLBI_CRm_IPAONS && (Op2 == 3 || Op2 == 7) &&
3880 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
3881 return false;
3882
3883 return true;
3884 }
3885
3886 /* Only defined here as this is an internal "abstraction" */
3887 union tlbi_info {
3888 struct {
3889 u64 start;
3890 u64 size;
3891 } range;
3892
3893 struct {
3894 u64 addr;
3895 } ipa;
3896
3897 struct {
3898 u64 addr;
3899 u32 encoding;
3900 } va;
3901 };
3902
s2_mmu_unmap_range(struct kvm_s2_mmu * mmu,const union tlbi_info * info)3903 static void s2_mmu_unmap_range(struct kvm_s2_mmu *mmu,
3904 const union tlbi_info *info)
3905 {
3906 /*
3907 * The unmap operation is allowed to drop the MMU lock and block, which
3908 * means that @mmu could be used for a different context than the one
3909 * currently being invalidated.
3910 *
3911 * This behavior is still safe, as:
3912 *
3913 * 1) The vCPU(s) that recycled the MMU are responsible for invalidating
3914 * the entire MMU before reusing it, which still honors the intent
3915 * of a TLBI.
3916 *
3917 * 2) Until the guest TLBI instruction is 'retired' (i.e. increment PC
3918 * and ERET to the guest), other vCPUs are allowed to use stale
3919 * translations.
3920 *
3921 * 3) Accidentally unmapping an unrelated MMU context is nonfatal, and
3922 * at worst may cause more aborts for shadow stage-2 fills.
3923 *
3924 * Dropping the MMU lock also implies that shadow stage-2 fills could
3925 * happen behind the back of the TLBI. This is still safe, though, as
3926 * the L1 needs to put its stage-2 in a consistent state before doing
3927 * the TLBI.
3928 */
3929 kvm_stage2_unmap_range(mmu, info->range.start, info->range.size, true);
3930 }
3931
handle_vmalls12e1is(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3932 static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3933 const struct sys_reg_desc *r)
3934 {
3935 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3936 u64 limit, vttbr;
3937
3938 if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding))
3939 return undef_access(vcpu, p, r);
3940
3941 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
3942 limit = BIT_ULL(kvm_get_pa_bits(vcpu->kvm));
3943
3944 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
3945 &(union tlbi_info) {
3946 .range = {
3947 .start = 0,
3948 .size = limit,
3949 },
3950 },
3951 s2_mmu_unmap_range);
3952
3953 return true;
3954 }
3955
handle_ripas2e1is(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3956 static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3957 const struct sys_reg_desc *r)
3958 {
3959 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3960 u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
3961 u64 base, range;
3962
3963 if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
3964 return undef_access(vcpu, p, r);
3965
3966 /*
3967 * Because the shadow S2 structure doesn't necessarily reflect that
3968 * of the guest's S2 (different base granule size, for example), we
3969 * decide to ignore TTL and only use the described range.
3970 */
3971 base = decode_range_tlbi(p->regval, &range, NULL);
3972
3973 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
3974 &(union tlbi_info) {
3975 .range = {
3976 .start = base,
3977 .size = range,
3978 },
3979 },
3980 s2_mmu_unmap_range);
3981
3982 return true;
3983 }
3984
s2_mmu_unmap_ipa(struct kvm_s2_mmu * mmu,const union tlbi_info * info)3985 static void s2_mmu_unmap_ipa(struct kvm_s2_mmu *mmu,
3986 const union tlbi_info *info)
3987 {
3988 unsigned long max_size;
3989 u64 base_addr;
3990
3991 /*
3992 * We drop a number of things from the supplied value:
3993 *
3994 * - NS bit: we're non-secure only.
3995 *
3996 * - IPA[51:48]: We don't support 52bit IPA just yet...
3997 *
3998 * And of course, adjust the IPA to be on an actual address.
3999 */
4000 base_addr = (info->ipa.addr & GENMASK_ULL(35, 0)) << 12;
4001 max_size = compute_tlb_inval_range(mmu, info->ipa.addr);
4002 base_addr &= ~(max_size - 1);
4003
4004 /*
4005 * See comment in s2_mmu_unmap_range() for why this is allowed to
4006 * reschedule.
4007 */
4008 kvm_stage2_unmap_range(mmu, base_addr, max_size, true);
4009 }
4010
handle_ipas2e1is(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)4011 static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
4012 const struct sys_reg_desc *r)
4013 {
4014 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
4015 u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
4016
4017 if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
4018 return undef_access(vcpu, p, r);
4019
4020 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
4021 &(union tlbi_info) {
4022 .ipa = {
4023 .addr = p->regval,
4024 },
4025 },
4026 s2_mmu_unmap_ipa);
4027
4028 return true;
4029 }
4030
s2_mmu_tlbi_s1e1(struct kvm_s2_mmu * mmu,const union tlbi_info * info)4031 static void s2_mmu_tlbi_s1e1(struct kvm_s2_mmu *mmu,
4032 const union tlbi_info *info)
4033 {
4034 WARN_ON(__kvm_tlbi_s1e2(mmu, info->va.addr, info->va.encoding));
4035 }
4036
handle_tlbi_el2(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)4037 static bool handle_tlbi_el2(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
4038 const struct sys_reg_desc *r)
4039 {
4040 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
4041
4042 if (!kvm_supported_tlbi_s1e2_op(vcpu, sys_encoding))
4043 return undef_access(vcpu, p, r);
4044
4045 kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval);
4046 return true;
4047 }
4048
handle_tlbi_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)4049 static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
4050 const struct sys_reg_desc *r)
4051 {
4052 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
4053
4054 /*
4055 * If we're here, this is because we've trapped on a EL1 TLBI
4056 * instruction that affects the EL1 translation regime while
4057 * we're running in a context that doesn't allow us to let the
4058 * HW do its thing (aka vEL2):
4059 *
4060 * - HCR_EL2.E2H == 0 : a non-VHE guest
4061 * - HCR_EL2.{E2H,TGE} == { 1, 0 } : a VHE guest in guest mode
4062 *
4063 * Another possibility is that we are invalidating the EL2 context
4064 * using EL1 instructions, but that we landed here because we need
4065 * additional invalidation for structures that are not held in the
4066 * CPU TLBs (such as the VNCR pseudo-TLB and its EL2 mapping). In
4067 * that case, we are guaranteed that HCR_EL2.{E2H,TGE} == { 1, 1 }
4068 * as we don't allow an NV-capable L1 in a nVHE configuration.
4069 *
4070 * We don't expect these helpers to ever be called when running
4071 * in a vEL1 context.
4072 */
4073
4074 WARN_ON(!vcpu_is_el2(vcpu));
4075
4076 if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding))
4077 return undef_access(vcpu, p, r);
4078
4079 if (vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)) {
4080 kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval);
4081 return true;
4082 }
4083
4084 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm,
4085 get_vmid(__vcpu_sys_reg(vcpu, VTTBR_EL2)),
4086 &(union tlbi_info) {
4087 .va = {
4088 .addr = p->regval,
4089 .encoding = sys_encoding,
4090 },
4091 },
4092 s2_mmu_tlbi_s1e1);
4093
4094 return true;
4095 }
4096
4097 #define SYS_INSN(insn, access_fn) \
4098 { \
4099 SYS_DESC(OP_##insn), \
4100 .access = (access_fn), \
4101 }
4102
4103 static struct sys_reg_desc sys_insn_descs[] = {
4104 { SYS_DESC(SYS_DC_ISW), access_dcsw },
4105 { SYS_DESC(SYS_DC_IGSW), access_dcgsw },
4106 { SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
4107
4108 SYS_INSN(AT_S1E1R, handle_at_s1e01),
4109 SYS_INSN(AT_S1E1W, handle_at_s1e01),
4110 SYS_INSN(AT_S1E0R, handle_at_s1e01),
4111 SYS_INSN(AT_S1E0W, handle_at_s1e01),
4112 SYS_INSN(AT_S1E1RP, handle_at_s1e01),
4113 SYS_INSN(AT_S1E1WP, handle_at_s1e01),
4114
4115 { SYS_DESC(SYS_DC_CSW), access_dcsw },
4116 { SYS_DESC(SYS_DC_CGSW), access_dcgsw },
4117 { SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
4118 { SYS_DESC(SYS_DC_CISW), access_dcsw },
4119 { SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
4120 { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
4121
4122 SYS_INSN(TLBI_VMALLE1OS, handle_tlbi_el1),
4123 SYS_INSN(TLBI_VAE1OS, handle_tlbi_el1),
4124 SYS_INSN(TLBI_ASIDE1OS, handle_tlbi_el1),
4125 SYS_INSN(TLBI_VAAE1OS, handle_tlbi_el1),
4126 SYS_INSN(TLBI_VALE1OS, handle_tlbi_el1),
4127 SYS_INSN(TLBI_VAALE1OS, handle_tlbi_el1),
4128
4129 SYS_INSN(TLBI_RVAE1IS, handle_tlbi_el1),
4130 SYS_INSN(TLBI_RVAAE1IS, handle_tlbi_el1),
4131 SYS_INSN(TLBI_RVALE1IS, handle_tlbi_el1),
4132 SYS_INSN(TLBI_RVAALE1IS, handle_tlbi_el1),
4133
4134 SYS_INSN(TLBI_VMALLE1IS, handle_tlbi_el1),
4135 SYS_INSN(TLBI_VAE1IS, handle_tlbi_el1),
4136 SYS_INSN(TLBI_ASIDE1IS, handle_tlbi_el1),
4137 SYS_INSN(TLBI_VAAE1IS, handle_tlbi_el1),
4138 SYS_INSN(TLBI_VALE1IS, handle_tlbi_el1),
4139 SYS_INSN(TLBI_VAALE1IS, handle_tlbi_el1),
4140
4141 SYS_INSN(TLBI_RVAE1OS, handle_tlbi_el1),
4142 SYS_INSN(TLBI_RVAAE1OS, handle_tlbi_el1),
4143 SYS_INSN(TLBI_RVALE1OS, handle_tlbi_el1),
4144 SYS_INSN(TLBI_RVAALE1OS, handle_tlbi_el1),
4145
4146 SYS_INSN(TLBI_RVAE1, handle_tlbi_el1),
4147 SYS_INSN(TLBI_RVAAE1, handle_tlbi_el1),
4148 SYS_INSN(TLBI_RVALE1, handle_tlbi_el1),
4149 SYS_INSN(TLBI_RVAALE1, handle_tlbi_el1),
4150
4151 SYS_INSN(TLBI_VMALLE1, handle_tlbi_el1),
4152 SYS_INSN(TLBI_VAE1, handle_tlbi_el1),
4153 SYS_INSN(TLBI_ASIDE1, handle_tlbi_el1),
4154 SYS_INSN(TLBI_VAAE1, handle_tlbi_el1),
4155 SYS_INSN(TLBI_VALE1, handle_tlbi_el1),
4156 SYS_INSN(TLBI_VAALE1, handle_tlbi_el1),
4157
4158 SYS_INSN(TLBI_VMALLE1OSNXS, handle_tlbi_el1),
4159 SYS_INSN(TLBI_VAE1OSNXS, handle_tlbi_el1),
4160 SYS_INSN(TLBI_ASIDE1OSNXS, handle_tlbi_el1),
4161 SYS_INSN(TLBI_VAAE1OSNXS, handle_tlbi_el1),
4162 SYS_INSN(TLBI_VALE1OSNXS, handle_tlbi_el1),
4163 SYS_INSN(TLBI_VAALE1OSNXS, handle_tlbi_el1),
4164
4165 SYS_INSN(TLBI_RVAE1ISNXS, handle_tlbi_el1),
4166 SYS_INSN(TLBI_RVAAE1ISNXS, handle_tlbi_el1),
4167 SYS_INSN(TLBI_RVALE1ISNXS, handle_tlbi_el1),
4168 SYS_INSN(TLBI_RVAALE1ISNXS, handle_tlbi_el1),
4169
4170 SYS_INSN(TLBI_VMALLE1ISNXS, handle_tlbi_el1),
4171 SYS_INSN(TLBI_VAE1ISNXS, handle_tlbi_el1),
4172 SYS_INSN(TLBI_ASIDE1ISNXS, handle_tlbi_el1),
4173 SYS_INSN(TLBI_VAAE1ISNXS, handle_tlbi_el1),
4174 SYS_INSN(TLBI_VALE1ISNXS, handle_tlbi_el1),
4175 SYS_INSN(TLBI_VAALE1ISNXS, handle_tlbi_el1),
4176
4177 SYS_INSN(TLBI_RVAE1OSNXS, handle_tlbi_el1),
4178 SYS_INSN(TLBI_RVAAE1OSNXS, handle_tlbi_el1),
4179 SYS_INSN(TLBI_RVALE1OSNXS, handle_tlbi_el1),
4180 SYS_INSN(TLBI_RVAALE1OSNXS, handle_tlbi_el1),
4181
4182 SYS_INSN(TLBI_RVAE1NXS, handle_tlbi_el1),
4183 SYS_INSN(TLBI_RVAAE1NXS, handle_tlbi_el1),
4184 SYS_INSN(TLBI_RVALE1NXS, handle_tlbi_el1),
4185 SYS_INSN(TLBI_RVAALE1NXS, handle_tlbi_el1),
4186
4187 SYS_INSN(TLBI_VMALLE1NXS, handle_tlbi_el1),
4188 SYS_INSN(TLBI_VAE1NXS, handle_tlbi_el1),
4189 SYS_INSN(TLBI_ASIDE1NXS, handle_tlbi_el1),
4190 SYS_INSN(TLBI_VAAE1NXS, handle_tlbi_el1),
4191 SYS_INSN(TLBI_VALE1NXS, handle_tlbi_el1),
4192 SYS_INSN(TLBI_VAALE1NXS, handle_tlbi_el1),
4193
4194 SYS_INSN(AT_S1E2R, handle_at_s1e2),
4195 SYS_INSN(AT_S1E2W, handle_at_s1e2),
4196 SYS_INSN(AT_S12E1R, handle_at_s12),
4197 SYS_INSN(AT_S12E1W, handle_at_s12),
4198 SYS_INSN(AT_S12E0R, handle_at_s12),
4199 SYS_INSN(AT_S12E0W, handle_at_s12),
4200 SYS_INSN(AT_S1E2A, handle_at_s1e2),
4201
4202 SYS_INSN(TLBI_IPAS2E1IS, handle_ipas2e1is),
4203 SYS_INSN(TLBI_RIPAS2E1IS, handle_ripas2e1is),
4204 SYS_INSN(TLBI_IPAS2LE1IS, handle_ipas2e1is),
4205 SYS_INSN(TLBI_RIPAS2LE1IS, handle_ripas2e1is),
4206
4207 SYS_INSN(TLBI_ALLE2OS, handle_tlbi_el2),
4208 SYS_INSN(TLBI_VAE2OS, handle_tlbi_el2),
4209 SYS_INSN(TLBI_ALLE1OS, handle_alle1is),
4210 SYS_INSN(TLBI_VALE2OS, handle_tlbi_el2),
4211 SYS_INSN(TLBI_VMALLS12E1OS, handle_vmalls12e1is),
4212
4213 SYS_INSN(TLBI_RVAE2IS, handle_tlbi_el2),
4214 SYS_INSN(TLBI_RVALE2IS, handle_tlbi_el2),
4215 SYS_INSN(TLBI_ALLE2IS, handle_tlbi_el2),
4216 SYS_INSN(TLBI_VAE2IS, handle_tlbi_el2),
4217
4218 SYS_INSN(TLBI_ALLE1IS, handle_alle1is),
4219
4220 SYS_INSN(TLBI_VALE2IS, handle_tlbi_el2),
4221
4222 SYS_INSN(TLBI_VMALLS12E1IS, handle_vmalls12e1is),
4223 SYS_INSN(TLBI_IPAS2E1OS, handle_ipas2e1is),
4224 SYS_INSN(TLBI_IPAS2E1, handle_ipas2e1is),
4225 SYS_INSN(TLBI_RIPAS2E1, handle_ripas2e1is),
4226 SYS_INSN(TLBI_RIPAS2E1OS, handle_ripas2e1is),
4227 SYS_INSN(TLBI_IPAS2LE1OS, handle_ipas2e1is),
4228 SYS_INSN(TLBI_IPAS2LE1, handle_ipas2e1is),
4229 SYS_INSN(TLBI_RIPAS2LE1, handle_ripas2e1is),
4230 SYS_INSN(TLBI_RIPAS2LE1OS, handle_ripas2e1is),
4231 SYS_INSN(TLBI_RVAE2OS, handle_tlbi_el2),
4232 SYS_INSN(TLBI_RVALE2OS, handle_tlbi_el2),
4233 SYS_INSN(TLBI_RVAE2, handle_tlbi_el2),
4234 SYS_INSN(TLBI_RVALE2, handle_tlbi_el2),
4235 SYS_INSN(TLBI_ALLE2, handle_tlbi_el2),
4236 SYS_INSN(TLBI_VAE2, handle_tlbi_el2),
4237
4238 SYS_INSN(TLBI_ALLE1, handle_alle1is),
4239
4240 SYS_INSN(TLBI_VALE2, handle_tlbi_el2),
4241
4242 SYS_INSN(TLBI_VMALLS12E1, handle_vmalls12e1is),
4243
4244 SYS_INSN(TLBI_IPAS2E1ISNXS, handle_ipas2e1is),
4245 SYS_INSN(TLBI_RIPAS2E1ISNXS, handle_ripas2e1is),
4246 SYS_INSN(TLBI_IPAS2LE1ISNXS, handle_ipas2e1is),
4247 SYS_INSN(TLBI_RIPAS2LE1ISNXS, handle_ripas2e1is),
4248
4249 SYS_INSN(TLBI_ALLE2OSNXS, handle_tlbi_el2),
4250 SYS_INSN(TLBI_VAE2OSNXS, handle_tlbi_el2),
4251 SYS_INSN(TLBI_ALLE1OSNXS, handle_alle1is),
4252 SYS_INSN(TLBI_VALE2OSNXS, handle_tlbi_el2),
4253 SYS_INSN(TLBI_VMALLS12E1OSNXS, handle_vmalls12e1is),
4254
4255 SYS_INSN(TLBI_RVAE2ISNXS, handle_tlbi_el2),
4256 SYS_INSN(TLBI_RVALE2ISNXS, handle_tlbi_el2),
4257 SYS_INSN(TLBI_ALLE2ISNXS, handle_tlbi_el2),
4258 SYS_INSN(TLBI_VAE2ISNXS, handle_tlbi_el2),
4259
4260 SYS_INSN(TLBI_ALLE1ISNXS, handle_alle1is),
4261 SYS_INSN(TLBI_VALE2ISNXS, handle_tlbi_el2),
4262 SYS_INSN(TLBI_VMALLS12E1ISNXS, handle_vmalls12e1is),
4263 SYS_INSN(TLBI_IPAS2E1OSNXS, handle_ipas2e1is),
4264 SYS_INSN(TLBI_IPAS2E1NXS, handle_ipas2e1is),
4265 SYS_INSN(TLBI_RIPAS2E1NXS, handle_ripas2e1is),
4266 SYS_INSN(TLBI_RIPAS2E1OSNXS, handle_ripas2e1is),
4267 SYS_INSN(TLBI_IPAS2LE1OSNXS, handle_ipas2e1is),
4268 SYS_INSN(TLBI_IPAS2LE1NXS, handle_ipas2e1is),
4269 SYS_INSN(TLBI_RIPAS2LE1NXS, handle_ripas2e1is),
4270 SYS_INSN(TLBI_RIPAS2LE1OSNXS, handle_ripas2e1is),
4271 SYS_INSN(TLBI_RVAE2OSNXS, handle_tlbi_el2),
4272 SYS_INSN(TLBI_RVALE2OSNXS, handle_tlbi_el2),
4273 SYS_INSN(TLBI_RVAE2NXS, handle_tlbi_el2),
4274 SYS_INSN(TLBI_RVALE2NXS, handle_tlbi_el2),
4275 SYS_INSN(TLBI_ALLE2NXS, handle_tlbi_el2),
4276 SYS_INSN(TLBI_VAE2NXS, handle_tlbi_el2),
4277 SYS_INSN(TLBI_ALLE1NXS, handle_alle1is),
4278 SYS_INSN(TLBI_VALE2NXS, handle_tlbi_el2),
4279 SYS_INSN(TLBI_VMALLS12E1NXS, handle_vmalls12e1is),
4280 };
4281
trap_dbgdidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)4282 static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
4283 struct sys_reg_params *p,
4284 const struct sys_reg_desc *r)
4285 {
4286 if (p->is_write) {
4287 return ignore_write(vcpu, p);
4288 } else {
4289 u64 dfr = kvm_read_vm_id_reg(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
4290 u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP);
4291
4292 p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
4293 (SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
4294 (SYS_FIELD_GET(ID_AA64DFR0_EL1, CTX_CMPs, dfr) << 20) |
4295 (SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, dfr) << 16) |
4296 (1 << 15) | (el3 << 14) | (el3 << 12));
4297 return true;
4298 }
4299 }
4300
4301 /*
4302 * AArch32 debug register mappings
4303 *
4304 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
4305 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
4306 *
4307 * None of the other registers share their location, so treat them as
4308 * if they were 64bit.
4309 */
4310 #define DBG_BCR_BVR_WCR_WVR(n) \
4311 /* DBGBVRn */ \
4312 { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), \
4313 trap_dbg_wb_reg, NULL, n }, \
4314 /* DBGBCRn */ \
4315 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_dbg_wb_reg, NULL, n }, \
4316 /* DBGWVRn */ \
4317 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_dbg_wb_reg, NULL, n }, \
4318 /* DBGWCRn */ \
4319 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_dbg_wb_reg, NULL, n }
4320
4321 #define DBGBXVR(n) \
4322 { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), \
4323 trap_dbg_wb_reg, NULL, n }
4324
4325 /*
4326 * Trapped cp14 registers. We generally ignore most of the external
4327 * debug, on the principle that they don't really make sense to a
4328 * guest. Revisit this one day, would this principle change.
4329 */
4330 static const struct sys_reg_desc cp14_regs[] = {
4331 /* DBGDIDR */
4332 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
4333 /* DBGDTRRXext */
4334 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
4335
4336 DBG_BCR_BVR_WCR_WVR(0),
4337 /* DBGDSCRint */
4338 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
4339 DBG_BCR_BVR_WCR_WVR(1),
4340 /* DBGDCCINT */
4341 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
4342 /* DBGDSCRext */
4343 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
4344 DBG_BCR_BVR_WCR_WVR(2),
4345 /* DBGDTR[RT]Xint */
4346 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
4347 /* DBGDTR[RT]Xext */
4348 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
4349 DBG_BCR_BVR_WCR_WVR(3),
4350 DBG_BCR_BVR_WCR_WVR(4),
4351 DBG_BCR_BVR_WCR_WVR(5),
4352 /* DBGWFAR */
4353 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
4354 /* DBGOSECCR */
4355 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
4356 DBG_BCR_BVR_WCR_WVR(6),
4357 /* DBGVCR */
4358 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
4359 DBG_BCR_BVR_WCR_WVR(7),
4360 DBG_BCR_BVR_WCR_WVR(8),
4361 DBG_BCR_BVR_WCR_WVR(9),
4362 DBG_BCR_BVR_WCR_WVR(10),
4363 DBG_BCR_BVR_WCR_WVR(11),
4364 DBG_BCR_BVR_WCR_WVR(12),
4365 DBG_BCR_BVR_WCR_WVR(13),
4366 DBG_BCR_BVR_WCR_WVR(14),
4367 DBG_BCR_BVR_WCR_WVR(15),
4368
4369 /* DBGDRAR (32bit) */
4370 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
4371
4372 DBGBXVR(0),
4373 /* DBGOSLAR */
4374 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
4375 DBGBXVR(1),
4376 /* DBGOSLSR */
4377 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
4378 DBGBXVR(2),
4379 DBGBXVR(3),
4380 /* DBGOSDLR */
4381 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
4382 DBGBXVR(4),
4383 /* DBGPRCR */
4384 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
4385 DBGBXVR(5),
4386 DBGBXVR(6),
4387 DBGBXVR(7),
4388 DBGBXVR(8),
4389 DBGBXVR(9),
4390 DBGBXVR(10),
4391 DBGBXVR(11),
4392 DBGBXVR(12),
4393 DBGBXVR(13),
4394 DBGBXVR(14),
4395 DBGBXVR(15),
4396
4397 /* DBGDSAR (32bit) */
4398 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
4399
4400 /* DBGDEVID2 */
4401 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
4402 /* DBGDEVID1 */
4403 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
4404 /* DBGDEVID */
4405 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
4406 /* DBGCLAIMSET */
4407 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
4408 /* DBGCLAIMCLR */
4409 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
4410 /* DBGAUTHSTATUS */
4411 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
4412 };
4413
4414 /* Trapped cp14 64bit registers */
4415 static const struct sys_reg_desc cp14_64_regs[] = {
4416 /* DBGDRAR (64bit) */
4417 { Op1( 0), CRm( 1), .access = trap_raz_wi },
4418
4419 /* DBGDSAR (64bit) */
4420 { Op1( 0), CRm( 2), .access = trap_raz_wi },
4421 };
4422
4423 #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \
4424 AA32(_map), \
4425 Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \
4426 .visibility = pmu_visibility
4427
4428 /* Macro to expand the PMEVCNTRn register */
4429 #define PMU_PMEVCNTR(n) \
4430 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
4431 (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
4432 .access = access_pmu_evcntr }
4433
4434 /* Macro to expand the PMEVTYPERn register */
4435 #define PMU_PMEVTYPER(n) \
4436 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
4437 (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
4438 .access = access_pmu_evtyper }
4439 /*
4440 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
4441 * depending on the way they are accessed (as a 32bit or a 64bit
4442 * register).
4443 */
4444 static const struct sys_reg_desc cp15_regs[] = {
4445 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
4446 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
4447 /* ACTLR */
4448 { AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
4449 /* ACTLR2 */
4450 { AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
4451 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
4452 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
4453 /* TTBCR */
4454 { AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
4455 /* TTBCR2 */
4456 { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
4457 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
4458 { CP15_SYS_DESC(SYS_ICC_PMR_EL1), undef_access },
4459 /* DFSR */
4460 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
4461 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
4462 /* ADFSR */
4463 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
4464 /* AIFSR */
4465 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
4466 /* DFAR */
4467 { AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
4468 /* IFAR */
4469 { AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
4470
4471 /*
4472 * DC{C,I,CI}SW operations:
4473 */
4474 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
4475 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
4476 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
4477
4478 /* PMU */
4479 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
4480 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
4481 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
4482 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
4483 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
4484 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
4485 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 6), .access = access_pmceid },
4486 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 7), .access = access_pmceid },
4487 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
4488 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
4489 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
4490 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
4491 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
4492 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
4493 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
4494 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 4), .access = access_pmceid },
4495 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 5), .access = access_pmceid },
4496 /* PMMIR */
4497 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
4498
4499 /* PRRR/MAIR0 */
4500 { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
4501 /* NMRR/MAIR1 */
4502 { AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
4503 /* AMAIR0 */
4504 { AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
4505 /* AMAIR1 */
4506 { AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
4507
4508 { CP15_SYS_DESC(SYS_ICC_IAR0_EL1), undef_access },
4509 { CP15_SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access },
4510 { CP15_SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access },
4511 { CP15_SYS_DESC(SYS_ICC_BPR0_EL1), undef_access },
4512 { CP15_SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access },
4513 { CP15_SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access },
4514 { CP15_SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access },
4515 { CP15_SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access },
4516 { CP15_SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access },
4517 { CP15_SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access },
4518 { CP15_SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access },
4519 { CP15_SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
4520 { CP15_SYS_DESC(SYS_ICC_DIR_EL1), access_gic_dir },
4521 { CP15_SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
4522 { CP15_SYS_DESC(SYS_ICC_IAR1_EL1), undef_access },
4523 { CP15_SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access },
4524 { CP15_SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access },
4525 { CP15_SYS_DESC(SYS_ICC_BPR1_EL1), undef_access },
4526 { CP15_SYS_DESC(SYS_ICC_CTLR_EL1), undef_access },
4527 { CP15_SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
4528 { CP15_SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access },
4529 { CP15_SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access },
4530
4531 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
4532
4533 /* Arch Tmers */
4534 { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
4535 { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
4536
4537 /* PMEVCNTRn */
4538 PMU_PMEVCNTR(0),
4539 PMU_PMEVCNTR(1),
4540 PMU_PMEVCNTR(2),
4541 PMU_PMEVCNTR(3),
4542 PMU_PMEVCNTR(4),
4543 PMU_PMEVCNTR(5),
4544 PMU_PMEVCNTR(6),
4545 PMU_PMEVCNTR(7),
4546 PMU_PMEVCNTR(8),
4547 PMU_PMEVCNTR(9),
4548 PMU_PMEVCNTR(10),
4549 PMU_PMEVCNTR(11),
4550 PMU_PMEVCNTR(12),
4551 PMU_PMEVCNTR(13),
4552 PMU_PMEVCNTR(14),
4553 PMU_PMEVCNTR(15),
4554 PMU_PMEVCNTR(16),
4555 PMU_PMEVCNTR(17),
4556 PMU_PMEVCNTR(18),
4557 PMU_PMEVCNTR(19),
4558 PMU_PMEVCNTR(20),
4559 PMU_PMEVCNTR(21),
4560 PMU_PMEVCNTR(22),
4561 PMU_PMEVCNTR(23),
4562 PMU_PMEVCNTR(24),
4563 PMU_PMEVCNTR(25),
4564 PMU_PMEVCNTR(26),
4565 PMU_PMEVCNTR(27),
4566 PMU_PMEVCNTR(28),
4567 PMU_PMEVCNTR(29),
4568 PMU_PMEVCNTR(30),
4569 /* PMEVTYPERn */
4570 PMU_PMEVTYPER(0),
4571 PMU_PMEVTYPER(1),
4572 PMU_PMEVTYPER(2),
4573 PMU_PMEVTYPER(3),
4574 PMU_PMEVTYPER(4),
4575 PMU_PMEVTYPER(5),
4576 PMU_PMEVTYPER(6),
4577 PMU_PMEVTYPER(7),
4578 PMU_PMEVTYPER(8),
4579 PMU_PMEVTYPER(9),
4580 PMU_PMEVTYPER(10),
4581 PMU_PMEVTYPER(11),
4582 PMU_PMEVTYPER(12),
4583 PMU_PMEVTYPER(13),
4584 PMU_PMEVTYPER(14),
4585 PMU_PMEVTYPER(15),
4586 PMU_PMEVTYPER(16),
4587 PMU_PMEVTYPER(17),
4588 PMU_PMEVTYPER(18),
4589 PMU_PMEVTYPER(19),
4590 PMU_PMEVTYPER(20),
4591 PMU_PMEVTYPER(21),
4592 PMU_PMEVTYPER(22),
4593 PMU_PMEVTYPER(23),
4594 PMU_PMEVTYPER(24),
4595 PMU_PMEVTYPER(25),
4596 PMU_PMEVTYPER(26),
4597 PMU_PMEVTYPER(27),
4598 PMU_PMEVTYPER(28),
4599 PMU_PMEVTYPER(29),
4600 PMU_PMEVTYPER(30),
4601 /* PMCCFILTR */
4602 { CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
4603
4604 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
4605 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
4606
4607 /* CCSIDR2 */
4608 { Op1(1), CRn( 0), CRm( 0), Op2(2), undef_access },
4609
4610 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
4611 };
4612
4613 static const struct sys_reg_desc cp15_64_regs[] = {
4614 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
4615 { CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
4616 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
4617 { SYS_DESC(SYS_AARCH32_CNTPCT), access_arch_timer },
4618 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
4619 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
4620 { SYS_DESC(SYS_AARCH32_CNTVCT), access_arch_timer },
4621 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
4622 { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
4623 { SYS_DESC(SYS_AARCH32_CNTPCTSS), access_arch_timer },
4624 { SYS_DESC(SYS_AARCH32_CNTVCTSS), access_arch_timer },
4625 };
4626
check_sysreg_table(const struct sys_reg_desc * table,unsigned int n,bool reset_check)4627 static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
4628 bool reset_check)
4629 {
4630 unsigned int i;
4631
4632 for (i = 0; i < n; i++) {
4633 if (reset_check && table[i].reg && !table[i].reset) {
4634 kvm_err("sys_reg table %pS entry %d (%s) lacks reset\n",
4635 &table[i], i, table[i].name);
4636 return false;
4637 }
4638
4639 if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
4640 kvm_err("sys_reg table %pS entry %d (%s -> %s) out of order\n",
4641 &table[i], i, table[i - 1].name, table[i].name);
4642 return false;
4643 }
4644 }
4645
4646 return true;
4647 }
4648
kvm_handle_cp14_load_store(struct kvm_vcpu * vcpu)4649 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
4650 {
4651 kvm_inject_undefined(vcpu);
4652 return 1;
4653 }
4654
perform_access(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)4655 static void perform_access(struct kvm_vcpu *vcpu,
4656 struct sys_reg_params *params,
4657 const struct sys_reg_desc *r)
4658 {
4659 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
4660
4661 /* Check for regs disabled by runtime config */
4662 if (sysreg_hidden(vcpu, r)) {
4663 kvm_inject_undefined(vcpu);
4664 return;
4665 }
4666
4667 /*
4668 * Not having an accessor means that we have configured a trap
4669 * that we don't know how to handle. This certainly qualifies
4670 * as a gross bug that should be fixed right away.
4671 */
4672 if (!r->access) {
4673 bad_trap(vcpu, params, r, "register access");
4674 return;
4675 }
4676
4677 /* Skip instruction if instructed so */
4678 if (likely(r->access(vcpu, params, r)))
4679 kvm_incr_pc(vcpu);
4680 }
4681
4682 /*
4683 * emulate_cp -- tries to match a sys_reg access in a handling table, and
4684 * call the corresponding trap handler.
4685 *
4686 * @params: pointer to the descriptor of the access
4687 * @table: array of trap descriptors
4688 * @num: size of the trap descriptor array
4689 *
4690 * Return true if the access has been handled, false if not.
4691 */
emulate_cp(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * table,size_t num)4692 static bool emulate_cp(struct kvm_vcpu *vcpu,
4693 struct sys_reg_params *params,
4694 const struct sys_reg_desc *table,
4695 size_t num)
4696 {
4697 const struct sys_reg_desc *r;
4698
4699 if (!table)
4700 return false; /* Not handled */
4701
4702 r = find_reg(params, table, num);
4703
4704 if (r) {
4705 perform_access(vcpu, params, r);
4706 return true;
4707 }
4708
4709 /* Not handled */
4710 return false;
4711 }
4712
unhandled_cp_access(struct kvm_vcpu * vcpu,struct sys_reg_params * params)4713 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
4714 struct sys_reg_params *params)
4715 {
4716 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
4717 int cp = -1;
4718
4719 switch (esr_ec) {
4720 case ESR_ELx_EC_CP15_32:
4721 case ESR_ELx_EC_CP15_64:
4722 cp = 15;
4723 break;
4724 case ESR_ELx_EC_CP14_MR:
4725 case ESR_ELx_EC_CP14_64:
4726 cp = 14;
4727 break;
4728 default:
4729 WARN_ON(1);
4730 }
4731
4732 print_sys_reg_msg(params,
4733 "Unsupported guest CP%d access at: %08lx [%08lx]\n",
4734 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
4735 kvm_inject_undefined(vcpu);
4736 }
4737
4738 /**
4739 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
4740 * @vcpu: The VCPU pointer
4741 * @global: &struct sys_reg_desc
4742 * @nr_global: size of the @global array
4743 */
kvm_handle_cp_64(struct kvm_vcpu * vcpu,const struct sys_reg_desc * global,size_t nr_global)4744 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
4745 const struct sys_reg_desc *global,
4746 size_t nr_global)
4747 {
4748 struct sys_reg_params params;
4749 u64 esr = kvm_vcpu_get_esr(vcpu);
4750 int Rt = kvm_vcpu_sys_get_rt(vcpu);
4751 int Rt2 = (esr >> 10) & 0x1f;
4752
4753 params.CRm = (esr >> 1) & 0xf;
4754 params.is_write = ((esr & 1) == 0);
4755
4756 params.Op0 = 0;
4757 params.Op1 = (esr >> 16) & 0xf;
4758 params.Op2 = 0;
4759 params.CRn = 0;
4760
4761 /*
4762 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
4763 * backends between AArch32 and AArch64, we get away with it.
4764 */
4765 if (params.is_write) {
4766 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
4767 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
4768 }
4769
4770 /*
4771 * If the table contains a handler, handle the
4772 * potential register operation in the case of a read and return
4773 * with success.
4774 */
4775 if (emulate_cp(vcpu, ¶ms, global, nr_global)) {
4776 /* Split up the value between registers for the read side */
4777 if (!params.is_write) {
4778 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
4779 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
4780 }
4781
4782 return 1;
4783 }
4784
4785 unhandled_cp_access(vcpu, ¶ms);
4786 return 1;
4787 }
4788
4789 static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
4790
4791 /*
4792 * The CP10 ID registers are architecturally mapped to AArch64 feature
4793 * registers. Abuse that fact so we can rely on the AArch64 handler for accesses
4794 * from AArch32.
4795 */
kvm_esr_cp10_id_to_sys64(u64 esr,struct sys_reg_params * params)4796 static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
4797 {
4798 u8 reg_id = (esr >> 10) & 0xf;
4799 bool valid;
4800
4801 params->is_write = ((esr & 1) == 0);
4802 params->Op0 = 3;
4803 params->Op1 = 0;
4804 params->CRn = 0;
4805 params->CRm = 3;
4806
4807 /* CP10 ID registers are read-only */
4808 valid = !params->is_write;
4809
4810 switch (reg_id) {
4811 /* MVFR0 */
4812 case 0b0111:
4813 params->Op2 = 0;
4814 break;
4815 /* MVFR1 */
4816 case 0b0110:
4817 params->Op2 = 1;
4818 break;
4819 /* MVFR2 */
4820 case 0b0101:
4821 params->Op2 = 2;
4822 break;
4823 default:
4824 valid = false;
4825 }
4826
4827 if (valid)
4828 return true;
4829
4830 kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
4831 str_write_read(params->is_write), reg_id);
4832 return false;
4833 }
4834
4835 /**
4836 * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
4837 * VFP Register' from AArch32.
4838 * @vcpu: The vCPU pointer
4839 *
4840 * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
4841 * Work out the correct AArch64 system register encoding and reroute to the
4842 * AArch64 system register emulation.
4843 */
kvm_handle_cp10_id(struct kvm_vcpu * vcpu)4844 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
4845 {
4846 int Rt = kvm_vcpu_sys_get_rt(vcpu);
4847 u64 esr = kvm_vcpu_get_esr(vcpu);
4848 struct sys_reg_params params;
4849
4850 /* UNDEF on any unhandled register access */
4851 if (!kvm_esr_cp10_id_to_sys64(esr, ¶ms)) {
4852 kvm_inject_undefined(vcpu);
4853 return 1;
4854 }
4855
4856 if (emulate_sys_reg(vcpu, ¶ms))
4857 vcpu_set_reg(vcpu, Rt, params.regval);
4858
4859 return 1;
4860 }
4861
4862 /**
4863 * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
4864 * CRn=0, which corresponds to the AArch32 feature
4865 * registers.
4866 * @vcpu: the vCPU pointer
4867 * @params: the system register access parameters.
4868 *
4869 * Our cp15 system register tables do not enumerate the AArch32 feature
4870 * registers. Conveniently, our AArch64 table does, and the AArch32 system
4871 * register encoding can be trivially remapped into the AArch64 for the feature
4872 * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
4873 *
4874 * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
4875 * System registers with (coproc=0b1111, CRn==c0)", read accesses from this
4876 * range are either UNKNOWN or RES0. Rerouting remains architectural as we
4877 * treat undefined registers in this range as RAZ.
4878 */
kvm_emulate_cp15_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * params)4879 static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
4880 struct sys_reg_params *params)
4881 {
4882 int Rt = kvm_vcpu_sys_get_rt(vcpu);
4883
4884 /* Treat impossible writes to RO registers as UNDEFINED */
4885 if (params->is_write) {
4886 unhandled_cp_access(vcpu, params);
4887 return 1;
4888 }
4889
4890 params->Op0 = 3;
4891
4892 /*
4893 * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
4894 * Avoid conflicting with future expansion of AArch64 feature registers
4895 * and simply treat them as RAZ here.
4896 */
4897 if (params->CRm > 3)
4898 params->regval = 0;
4899 else if (!emulate_sys_reg(vcpu, params))
4900 return 1;
4901
4902 vcpu_set_reg(vcpu, Rt, params->regval);
4903 return 1;
4904 }
4905
4906 /**
4907 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
4908 * @vcpu: The VCPU pointer
4909 * @params: &struct sys_reg_params
4910 * @global: &struct sys_reg_desc
4911 * @nr_global: size of the @global array
4912 */
kvm_handle_cp_32(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * global,size_t nr_global)4913 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
4914 struct sys_reg_params *params,
4915 const struct sys_reg_desc *global,
4916 size_t nr_global)
4917 {
4918 int Rt = kvm_vcpu_sys_get_rt(vcpu);
4919
4920 params->regval = vcpu_get_reg(vcpu, Rt);
4921
4922 if (emulate_cp(vcpu, params, global, nr_global)) {
4923 if (!params->is_write)
4924 vcpu_set_reg(vcpu, Rt, params->regval);
4925 return 1;
4926 }
4927
4928 unhandled_cp_access(vcpu, params);
4929 return 1;
4930 }
4931
kvm_handle_cp15_64(struct kvm_vcpu * vcpu)4932 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
4933 {
4934 return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
4935 }
4936
kvm_handle_cp15_32(struct kvm_vcpu * vcpu)4937 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
4938 {
4939 struct sys_reg_params params;
4940
4941 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
4942
4943 /*
4944 * Certain AArch32 ID registers are handled by rerouting to the AArch64
4945 * system register table. Registers in the ID range where CRm=0 are
4946 * excluded from this scheme as they do not trivially map into AArch64
4947 * system register encodings, except for AIDR/REVIDR.
4948 */
4949 if (params.Op1 == 0 && params.CRn == 0 &&
4950 (params.CRm || params.Op2 == 6 /* REVIDR */))
4951 return kvm_emulate_cp15_id_reg(vcpu, ¶ms);
4952 if (params.Op1 == 1 && params.CRn == 0 &&
4953 params.CRm == 0 && params.Op2 == 7 /* AIDR */)
4954 return kvm_emulate_cp15_id_reg(vcpu, ¶ms);
4955
4956 return kvm_handle_cp_32(vcpu, ¶ms, cp15_regs, ARRAY_SIZE(cp15_regs));
4957 }
4958
kvm_handle_cp14_64(struct kvm_vcpu * vcpu)4959 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
4960 {
4961 return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
4962 }
4963
kvm_handle_cp14_32(struct kvm_vcpu * vcpu)4964 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
4965 {
4966 struct sys_reg_params params;
4967
4968 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
4969
4970 return kvm_handle_cp_32(vcpu, ¶ms, cp14_regs, ARRAY_SIZE(cp14_regs));
4971 }
4972
4973 /**
4974 * emulate_sys_reg - Emulate a guest access to an AArch64 system register
4975 * @vcpu: The VCPU pointer
4976 * @params: Decoded system register parameters
4977 *
4978 * Return: true if the system register access was successful, false otherwise.
4979 */
emulate_sys_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * params)4980 static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
4981 struct sys_reg_params *params)
4982 {
4983 const struct sys_reg_desc *r;
4984
4985 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
4986 if (likely(r)) {
4987 perform_access(vcpu, params, r);
4988 return true;
4989 }
4990
4991 print_sys_reg_msg(params,
4992 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
4993 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
4994 kvm_inject_undefined(vcpu);
4995
4996 return false;
4997 }
4998
idregs_debug_find(struct kvm * kvm,loff_t pos)4999 static const struct sys_reg_desc *idregs_debug_find(struct kvm *kvm, loff_t pos)
5000 {
5001 unsigned long i, idreg_idx = 0;
5002
5003 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
5004 const struct sys_reg_desc *r = &sys_reg_descs[i];
5005
5006 if (!is_vm_ftr_id_reg(reg_to_encoding(r)))
5007 continue;
5008
5009 if (idreg_idx++ == pos)
5010 return r;
5011 }
5012
5013 return NULL;
5014 }
5015
idregs_debug_start(struct seq_file * s,loff_t * pos)5016 static void *idregs_debug_start(struct seq_file *s, loff_t *pos)
5017 {
5018 struct kvm *kvm = s->private;
5019
5020 if (!test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
5021 return NULL;
5022
5023 return (void *)idregs_debug_find(kvm, *pos);
5024 }
5025
idregs_debug_next(struct seq_file * s,void * v,loff_t * pos)5026 static void *idregs_debug_next(struct seq_file *s, void *v, loff_t *pos)
5027 {
5028 struct kvm *kvm = s->private;
5029
5030 (*pos)++;
5031
5032 return (void *)idregs_debug_find(kvm, *pos);
5033 }
5034
idregs_debug_stop(struct seq_file * s,void * v)5035 static void idregs_debug_stop(struct seq_file *s, void *v)
5036 {
5037 }
5038
idregs_debug_show(struct seq_file * s,void * v)5039 static int idregs_debug_show(struct seq_file *s, void *v)
5040 {
5041 const struct sys_reg_desc *desc = v;
5042 struct kvm *kvm = s->private;
5043
5044 if (!desc)
5045 return 0;
5046
5047 seq_printf(s, "%20s:\t%016llx\n",
5048 desc->name, kvm_read_vm_id_reg(kvm, reg_to_encoding(desc)));
5049
5050 return 0;
5051 }
5052
5053 static const struct seq_operations idregs_debug_sops = {
5054 .start = idregs_debug_start,
5055 .next = idregs_debug_next,
5056 .stop = idregs_debug_stop,
5057 .show = idregs_debug_show,
5058 };
5059
5060 DEFINE_SEQ_ATTRIBUTE(idregs_debug);
5061
sr_resx_find(struct kvm * kvm,loff_t pos)5062 static const struct sys_reg_desc *sr_resx_find(struct kvm *kvm, loff_t pos)
5063 {
5064 unsigned long i, sr_idx = 0;
5065
5066 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
5067 const struct sys_reg_desc *r = &sys_reg_descs[i];
5068
5069 if (r->reg < __SANITISED_REG_START__)
5070 continue;
5071
5072 if (sr_idx++ == pos)
5073 return r;
5074 }
5075
5076 return NULL;
5077 }
5078
sr_resx_start(struct seq_file * s,loff_t * pos)5079 static void *sr_resx_start(struct seq_file *s, loff_t *pos)
5080 {
5081 struct kvm *kvm = s->private;
5082
5083 if (!kvm->arch.sysreg_masks)
5084 return NULL;
5085
5086 return (void *)sr_resx_find(kvm, *pos);
5087 }
5088
sr_resx_next(struct seq_file * s,void * v,loff_t * pos)5089 static void *sr_resx_next(struct seq_file *s, void *v, loff_t *pos)
5090 {
5091 struct kvm *kvm = s->private;
5092
5093 (*pos)++;
5094
5095 return (void *)sr_resx_find(kvm, *pos);
5096 }
5097
sr_resx_stop(struct seq_file * s,void * v)5098 static void sr_resx_stop(struct seq_file *s, void *v)
5099 {
5100 }
5101
sr_resx_show(struct seq_file * s,void * v)5102 static int sr_resx_show(struct seq_file *s, void *v)
5103 {
5104 const struct sys_reg_desc *desc = v;
5105 struct kvm *kvm = s->private;
5106 struct resx resx;
5107
5108 if (!desc)
5109 return 0;
5110
5111 resx = kvm_get_sysreg_resx(kvm, desc->reg);
5112
5113 seq_printf(s, "%20s:\tRES0:%016llx\tRES1:%016llx\n",
5114 desc->name, resx.res0, resx.res1);
5115
5116 return 0;
5117 }
5118
5119 static const struct seq_operations sr_resx_sops = {
5120 .start = sr_resx_start,
5121 .next = sr_resx_next,
5122 .stop = sr_resx_stop,
5123 .show = sr_resx_show,
5124 };
5125
5126 DEFINE_SEQ_ATTRIBUTE(sr_resx);
5127
kvm_sys_regs_create_debugfs(struct kvm * kvm)5128 void kvm_sys_regs_create_debugfs(struct kvm *kvm)
5129 {
5130 debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm,
5131 &idregs_debug_fops);
5132 debugfs_create_file("resx", 0444, kvm->debugfs_dentry, kvm,
5133 &sr_resx_fops);
5134 }
5135
reset_vm_ftr_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * reg)5136 static void reset_vm_ftr_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *reg)
5137 {
5138 u32 id = reg_to_encoding(reg);
5139 struct kvm *kvm = vcpu->kvm;
5140
5141 if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
5142 return;
5143
5144 kvm_set_vm_id_reg(kvm, id, reg->reset(vcpu, reg));
5145 }
5146
reset_vcpu_ftr_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * reg)5147 static void reset_vcpu_ftr_id_reg(struct kvm_vcpu *vcpu,
5148 const struct sys_reg_desc *reg)
5149 {
5150 if (kvm_vcpu_initialized(vcpu))
5151 return;
5152
5153 reg->reset(vcpu, reg);
5154 }
5155
5156 /**
5157 * kvm_reset_sys_regs - sets system registers to reset value
5158 * @vcpu: The VCPU pointer
5159 *
5160 * This function finds the right table above and sets the registers on the
5161 * virtual CPU struct to their architecturally defined reset values.
5162 */
kvm_reset_sys_regs(struct kvm_vcpu * vcpu)5163 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
5164 {
5165 struct kvm *kvm = vcpu->kvm;
5166 unsigned long i;
5167
5168 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
5169 const struct sys_reg_desc *r = &sys_reg_descs[i];
5170
5171 if (!r->reset)
5172 continue;
5173
5174 if (is_vm_ftr_id_reg(reg_to_encoding(r)))
5175 reset_vm_ftr_id_reg(vcpu, r);
5176 else if (is_vcpu_ftr_id_reg(reg_to_encoding(r)))
5177 reset_vcpu_ftr_id_reg(vcpu, r);
5178 else
5179 r->reset(vcpu, r);
5180
5181 if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS)
5182 __vcpu_rmw_sys_reg(vcpu, r->reg, |=, 0);
5183 }
5184
5185 set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
5186
5187 if (kvm_vcpu_has_pmu(vcpu))
5188 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
5189 }
5190
5191 /**
5192 * kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction
5193 * trap on a guest execution
5194 * @vcpu: The VCPU pointer
5195 */
kvm_handle_sys_reg(struct kvm_vcpu * vcpu)5196 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
5197 {
5198 const struct sys_reg_desc *desc = NULL;
5199 struct sys_reg_params params;
5200 unsigned long esr = kvm_vcpu_get_esr(vcpu);
5201 int Rt = kvm_vcpu_sys_get_rt(vcpu);
5202 int sr_idx;
5203
5204 trace_kvm_handle_sys_reg(esr);
5205
5206 if (triage_sysreg_trap(vcpu, &sr_idx))
5207 return 1;
5208
5209 params = esr_sys64_to_params(esr);
5210 params.regval = vcpu_get_reg(vcpu, Rt);
5211
5212 /* System registers have Op0=={2,3}, as per DDI487 J.a C5.1.2 */
5213 if (params.Op0 == 2 || params.Op0 == 3)
5214 desc = &sys_reg_descs[sr_idx];
5215 else
5216 desc = &sys_insn_descs[sr_idx];
5217
5218 perform_access(vcpu, ¶ms, desc);
5219
5220 /* Read from system register? */
5221 if (!params.is_write &&
5222 (params.Op0 == 2 || params.Op0 == 3))
5223 vcpu_set_reg(vcpu, Rt, params.regval);
5224
5225 return 1;
5226 }
5227
5228 /******************************************************************************
5229 * Userspace API
5230 *****************************************************************************/
5231
index_to_params(u64 id,struct sys_reg_params * params)5232 static bool index_to_params(u64 id, struct sys_reg_params *params)
5233 {
5234 switch (id & KVM_REG_SIZE_MASK) {
5235 case KVM_REG_SIZE_U64:
5236 /* Any unused index bits means it's not valid. */
5237 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
5238 | KVM_REG_ARM_COPROC_MASK
5239 | KVM_REG_ARM64_SYSREG_OP0_MASK
5240 | KVM_REG_ARM64_SYSREG_OP1_MASK
5241 | KVM_REG_ARM64_SYSREG_CRN_MASK
5242 | KVM_REG_ARM64_SYSREG_CRM_MASK
5243 | KVM_REG_ARM64_SYSREG_OP2_MASK))
5244 return false;
5245 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
5246 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
5247 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
5248 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
5249 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
5250 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
5251 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
5252 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
5253 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
5254 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
5255 return true;
5256 default:
5257 return false;
5258 }
5259 }
5260
get_reg_by_id(u64 id,const struct sys_reg_desc table[],unsigned int num)5261 const struct sys_reg_desc *get_reg_by_id(u64 id,
5262 const struct sys_reg_desc table[],
5263 unsigned int num)
5264 {
5265 struct sys_reg_params params;
5266
5267 if (!index_to_params(id, ¶ms))
5268 return NULL;
5269
5270 return find_reg(¶ms, table, num);
5271 }
5272
5273 /* Decode an index value, and find the sys_reg_desc entry. */
5274 static const struct sys_reg_desc *
id_to_sys_reg_desc(struct kvm_vcpu * vcpu,u64 id,const struct sys_reg_desc table[],unsigned int num)5275 id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
5276 const struct sys_reg_desc table[], unsigned int num)
5277
5278 {
5279 const struct sys_reg_desc *r;
5280
5281 /* We only do sys_reg for now. */
5282 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
5283 return NULL;
5284
5285 r = get_reg_by_id(id, table, num);
5286
5287 /* Not saved in the sys_reg array and not otherwise accessible? */
5288 if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
5289 r = NULL;
5290
5291 return r;
5292 }
5293
demux_c15_get(struct kvm_vcpu * vcpu,u64 id,void __user * uaddr)5294 static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
5295 {
5296 u32 val;
5297 u32 __user *uval = uaddr;
5298
5299 /* Fail if we have unknown bits set. */
5300 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
5301 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
5302 return -ENOENT;
5303
5304 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
5305 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
5306 if (KVM_REG_SIZE(id) != 4)
5307 return -ENOENT;
5308 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
5309 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
5310 if (val >= CSSELR_MAX)
5311 return -ENOENT;
5312
5313 return put_user(get_ccsidr(vcpu, val), uval);
5314 default:
5315 return -ENOENT;
5316 }
5317 }
5318
demux_c15_set(struct kvm_vcpu * vcpu,u64 id,void __user * uaddr)5319 static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
5320 {
5321 u32 val, newval;
5322 u32 __user *uval = uaddr;
5323
5324 /* Fail if we have unknown bits set. */
5325 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
5326 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
5327 return -ENOENT;
5328
5329 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
5330 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
5331 if (KVM_REG_SIZE(id) != 4)
5332 return -ENOENT;
5333 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
5334 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
5335 if (val >= CSSELR_MAX)
5336 return -ENOENT;
5337
5338 if (get_user(newval, uval))
5339 return -EFAULT;
5340
5341 return set_ccsidr(vcpu, val, newval);
5342 default:
5343 return -ENOENT;
5344 }
5345 }
5346
kvm_one_reg_to_id(const struct kvm_one_reg * reg)5347 static u64 kvm_one_reg_to_id(const struct kvm_one_reg *reg)
5348 {
5349 switch(reg->id) {
5350 case KVM_REG_ARM_TIMER_CVAL:
5351 return TO_ARM64_SYS_REG(CNTV_CVAL_EL0);
5352 case KVM_REG_ARM_TIMER_CNT:
5353 return TO_ARM64_SYS_REG(CNTVCT_EL0);
5354 default:
5355 return reg->id;
5356 }
5357 }
5358
kvm_sys_reg_get_user(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,const struct sys_reg_desc table[],unsigned int num)5359 int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
5360 const struct sys_reg_desc table[], unsigned int num)
5361 {
5362 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
5363 const struct sys_reg_desc *r;
5364 u64 id = kvm_one_reg_to_id(reg);
5365 u64 val;
5366 int ret;
5367
5368 r = id_to_sys_reg_desc(vcpu, id, table, num);
5369 if (!r || sysreg_hidden(vcpu, r))
5370 return -ENOENT;
5371
5372 if (r->get_user) {
5373 ret = (r->get_user)(vcpu, r, &val);
5374 } else {
5375 val = __vcpu_sys_reg(vcpu, r->reg);
5376 ret = 0;
5377 }
5378
5379 if (!ret)
5380 ret = put_user(val, uaddr);
5381
5382 return ret;
5383 }
5384
kvm_arm_sys_reg_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)5385 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
5386 {
5387 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
5388
5389 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
5390 return demux_c15_get(vcpu, reg->id, uaddr);
5391
5392 return kvm_sys_reg_get_user(vcpu, reg,
5393 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
5394 }
5395
kvm_sys_reg_set_user(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,const struct sys_reg_desc table[],unsigned int num)5396 int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
5397 const struct sys_reg_desc table[], unsigned int num)
5398 {
5399 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
5400 const struct sys_reg_desc *r;
5401 u64 id = kvm_one_reg_to_id(reg);
5402 u64 val;
5403 int ret;
5404
5405 if (get_user(val, uaddr))
5406 return -EFAULT;
5407
5408 r = id_to_sys_reg_desc(vcpu, id, table, num);
5409 if (!r || sysreg_hidden(vcpu, r))
5410 return -ENOENT;
5411
5412 if (sysreg_user_write_ignore(vcpu, r))
5413 return 0;
5414
5415 if (r->set_user) {
5416 ret = (r->set_user)(vcpu, r, val);
5417 } else {
5418 __vcpu_assign_sys_reg(vcpu, r->reg, val);
5419 ret = 0;
5420 }
5421
5422 return ret;
5423 }
5424
kvm_arm_sys_reg_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)5425 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
5426 {
5427 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
5428
5429 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
5430 return demux_c15_set(vcpu, reg->id, uaddr);
5431
5432 return kvm_sys_reg_set_user(vcpu, reg,
5433 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
5434 }
5435
num_demux_regs(void)5436 static unsigned int num_demux_regs(void)
5437 {
5438 return CSSELR_MAX;
5439 }
5440
write_demux_regids(u64 __user * uindices)5441 static int write_demux_regids(u64 __user *uindices)
5442 {
5443 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
5444 unsigned int i;
5445
5446 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
5447 for (i = 0; i < CSSELR_MAX; i++) {
5448 if (put_user(val | i, uindices))
5449 return -EFAULT;
5450 uindices++;
5451 }
5452 return 0;
5453 }
5454
sys_reg_to_index(const struct sys_reg_desc * reg)5455 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
5456 {
5457 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
5458 KVM_REG_ARM64_SYSREG |
5459 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
5460 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
5461 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
5462 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
5463 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
5464 }
5465
copy_reg_to_user(const struct sys_reg_desc * reg,u64 __user ** uind)5466 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
5467 {
5468 u64 idx;
5469
5470 if (!*uind)
5471 return true;
5472
5473 switch (reg_to_encoding(reg)) {
5474 case SYS_CNTV_CVAL_EL0:
5475 idx = KVM_REG_ARM_TIMER_CVAL;
5476 break;
5477 case SYS_CNTVCT_EL0:
5478 idx = KVM_REG_ARM_TIMER_CNT;
5479 break;
5480 default:
5481 idx = sys_reg_to_index(reg);
5482 }
5483
5484 if (put_user(idx, *uind))
5485 return false;
5486
5487 (*uind)++;
5488 return true;
5489 }
5490
walk_one_sys_reg(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 __user ** uind,unsigned int * total)5491 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
5492 const struct sys_reg_desc *rd,
5493 u64 __user **uind,
5494 unsigned int *total)
5495 {
5496 /*
5497 * Ignore registers we trap but don't save,
5498 * and for which no custom user accessor is provided.
5499 */
5500 if (!(rd->reg || rd->get_user))
5501 return 0;
5502
5503 if (sysreg_hidden(vcpu, rd))
5504 return 0;
5505
5506 if (!copy_reg_to_user(rd, uind))
5507 return -EFAULT;
5508
5509 (*total)++;
5510 return 0;
5511 }
5512
5513 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
walk_sys_regs(struct kvm_vcpu * vcpu,u64 __user * uind)5514 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
5515 {
5516 const struct sys_reg_desc *i2, *end2;
5517 unsigned int total = 0;
5518 int err;
5519
5520 i2 = sys_reg_descs;
5521 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
5522
5523 while (i2 != end2) {
5524 err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
5525 if (err)
5526 return err;
5527 }
5528 return total;
5529 }
5530
kvm_arm_num_sys_reg_descs(struct kvm_vcpu * vcpu)5531 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
5532 {
5533 return num_demux_regs()
5534 + walk_sys_regs(vcpu, (u64 __user *)NULL);
5535 }
5536
kvm_arm_copy_sys_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)5537 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
5538 {
5539 int err;
5540
5541 err = walk_sys_regs(vcpu, uindices);
5542 if (err < 0)
5543 return err;
5544 uindices += err;
5545
5546 return write_demux_regids(uindices);
5547 }
5548
5549 #define KVM_ARM_FEATURE_ID_RANGE_INDEX(r) \
5550 KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(r), \
5551 sys_reg_Op1(r), \
5552 sys_reg_CRn(r), \
5553 sys_reg_CRm(r), \
5554 sys_reg_Op2(r))
5555
kvm_vm_ioctl_get_reg_writable_masks(struct kvm * kvm,struct reg_mask_range * range)5556 int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range)
5557 {
5558 const void *zero_page = page_to_virt(ZERO_PAGE(0));
5559 u64 __user *masks = (u64 __user *)range->addr;
5560
5561 /* Only feature id range is supported, reserved[13] must be zero. */
5562 if (range->range ||
5563 memcmp(range->reserved, zero_page, sizeof(range->reserved)))
5564 return -EINVAL;
5565
5566 /* Wipe the whole thing first */
5567 if (clear_user(masks, KVM_ARM_FEATURE_ID_RANGE_SIZE * sizeof(__u64)))
5568 return -EFAULT;
5569
5570 for (int i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
5571 const struct sys_reg_desc *reg = &sys_reg_descs[i];
5572 u32 encoding = reg_to_encoding(reg);
5573 u64 val;
5574
5575 if (!is_feature_id_reg(encoding) || !reg->set_user)
5576 continue;
5577
5578 if (!reg->val ||
5579 (is_aa32_id_reg(encoding) && !kvm_supports_32bit_el0())) {
5580 continue;
5581 }
5582 val = reg->val;
5583
5584 if (put_user(val, (masks + KVM_ARM_FEATURE_ID_RANGE_INDEX(encoding))))
5585 return -EFAULT;
5586 }
5587
5588 return 0;
5589 }
5590
vcpu_set_hcr(struct kvm_vcpu * vcpu)5591 static void vcpu_set_hcr(struct kvm_vcpu *vcpu)
5592 {
5593 struct kvm *kvm = vcpu->kvm;
5594
5595 if (has_vhe() || has_hvhe())
5596 vcpu->arch.hcr_el2 |= HCR_E2H;
5597 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
5598 /* route synchronous external abort exceptions to EL2 */
5599 vcpu->arch.hcr_el2 |= HCR_TEA;
5600 /* trap error record accesses */
5601 vcpu->arch.hcr_el2 |= HCR_TERR;
5602 }
5603
5604 if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
5605 vcpu->arch.hcr_el2 |= HCR_FWB;
5606
5607 if (cpus_have_final_cap(ARM64_HAS_EVT) &&
5608 !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE) &&
5609 kvm_read_vm_id_reg(kvm, SYS_CTR_EL0) == read_sanitised_ftr_reg(SYS_CTR_EL0))
5610 vcpu->arch.hcr_el2 |= HCR_TID4;
5611 else
5612 vcpu->arch.hcr_el2 |= HCR_TID2;
5613
5614 if (vcpu_el1_is_32bit(vcpu))
5615 vcpu->arch.hcr_el2 &= ~HCR_RW;
5616
5617 if (kvm_has_mte(vcpu->kvm))
5618 vcpu->arch.hcr_el2 |= HCR_ATA;
5619 else
5620 vcpu->arch.hcr_el2 |= HCR_TID5;
5621
5622 /*
5623 * In the absence of FGT, we cannot independently trap TLBI
5624 * Range instructions. This isn't great, but trapping all
5625 * TLBIs would be far worse. Live with it...
5626 */
5627 if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
5628 vcpu->arch.hcr_el2 |= HCR_TTLBOS;
5629 }
5630
kvm_calculate_traps(struct kvm_vcpu * vcpu)5631 void kvm_calculate_traps(struct kvm_vcpu *vcpu)
5632 {
5633 struct kvm *kvm = vcpu->kvm;
5634
5635 mutex_lock(&kvm->arch.config_lock);
5636 vcpu_set_hcr(vcpu);
5637 vcpu_set_ich_hcr(vcpu);
5638 vcpu_set_hcrx(vcpu);
5639
5640 if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
5641 goto out;
5642
5643 compute_fgu(kvm, HFGRTR_GROUP);
5644 compute_fgu(kvm, HFGITR_GROUP);
5645 compute_fgu(kvm, HDFGRTR_GROUP);
5646 compute_fgu(kvm, HAFGRTR_GROUP);
5647 compute_fgu(kvm, HFGRTR2_GROUP);
5648 compute_fgu(kvm, HFGITR2_GROUP);
5649 compute_fgu(kvm, HDFGRTR2_GROUP);
5650
5651 set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags);
5652 out:
5653 mutex_unlock(&kvm->arch.config_lock);
5654 }
5655
5656 /*
5657 * Perform last adjustments to the ID registers that are implied by the
5658 * configuration outside of the ID regs themselves, as well as any
5659 * initialisation that directly depend on these ID registers (such as
5660 * RES0/RES1 behaviours). This is not the place to configure traps though.
5661 *
5662 * Because this can be called once per CPU, changes must be idempotent.
5663 */
kvm_finalize_sys_regs(struct kvm_vcpu * vcpu)5664 int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu)
5665 {
5666 struct kvm *kvm = vcpu->kvm;
5667
5668 guard(mutex)(&kvm->arch.config_lock);
5669
5670 /*
5671 * This hacks into the ID registers, so only perform it when the
5672 * first vcpu runs, or the kvm_set_vm_id_reg() helper will scream.
5673 */
5674 if (!irqchip_in_kernel(kvm) && !kvm_vm_has_ran_once(kvm)) {
5675 u64 val;
5676
5677 val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC;
5678 kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, val);
5679 val = kvm_read_vm_id_reg(kvm, SYS_ID_PFR1_EL1) & ~ID_PFR1_EL1_GIC;
5680 kvm_set_vm_id_reg(kvm, SYS_ID_PFR1_EL1, val);
5681 }
5682
5683 if (vcpu_has_nv(vcpu)) {
5684 int ret = kvm_init_nv_sysregs(vcpu);
5685 if (ret)
5686 return ret;
5687 }
5688
5689 return 0;
5690 }
5691
kvm_sys_reg_table_init(void)5692 int __init kvm_sys_reg_table_init(void)
5693 {
5694 const struct sys_reg_desc *gicv3_regs;
5695 bool valid = true;
5696 unsigned int i, sz;
5697 int ret = 0;
5698
5699 /* Make sure tables are unique and in order. */
5700 valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), true);
5701 valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), false);
5702 valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), false);
5703 valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), false);
5704 valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), false);
5705 valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false);
5706
5707 gicv3_regs = vgic_v3_get_sysreg_table(&sz);
5708 valid &= check_sysreg_table(gicv3_regs, sz, false);
5709
5710 if (!valid)
5711 return -EINVAL;
5712
5713 init_imp_id_regs();
5714
5715 ret = populate_nv_trap_config();
5716
5717 check_feature_map();
5718
5719 for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++)
5720 ret = populate_sysreg_config(sys_reg_descs + i, i);
5721
5722 for (i = 0; !ret && i < ARRAY_SIZE(sys_insn_descs); i++)
5723 ret = populate_sysreg_config(sys_insn_descs + i, i);
5724
5725 return ret;
5726 }
5727