1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/kvm/coproc.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Authors: Rusty Russell <rusty@rustcorp.com.au>
9 * Christoffer Dall <c.dall@virtualopensystems.com>
10 */
11
12 #include <linux/bitfield.h>
13 #include <linux/bsearch.h>
14 #include <linux/cacheinfo.h>
15 #include <linux/debugfs.h>
16 #include <linux/kvm_host.h>
17 #include <linux/mm.h>
18 #include <linux/printk.h>
19 #include <linux/uaccess.h>
20
21 #include <asm/arm_pmuv3.h>
22 #include <asm/cacheflush.h>
23 #include <asm/cputype.h>
24 #include <asm/debug-monitors.h>
25 #include <asm/esr.h>
26 #include <asm/kvm_arm.h>
27 #include <asm/kvm_emulate.h>
28 #include <asm/kvm_hyp.h>
29 #include <asm/kvm_mmu.h>
30 #include <asm/kvm_nested.h>
31 #include <asm/perf_event.h>
32 #include <asm/sysreg.h>
33
34 #include <trace/events/kvm.h>
35
36 #include "sys_regs.h"
37 #include "vgic/vgic.h"
38
39 #include "trace.h"
40
41 /*
42 * For AArch32, we only take care of what is being trapped. Anything
43 * that has to do with init and userspace access has to go via the
44 * 64bit interface.
45 */
46
47 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
48 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
49 u64 val);
50
undef_access(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)51 static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
52 const struct sys_reg_desc *r)
53 {
54 kvm_inject_undefined(vcpu);
55 return false;
56 }
57
bad_trap(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r,const char * msg)58 static bool bad_trap(struct kvm_vcpu *vcpu,
59 struct sys_reg_params *params,
60 const struct sys_reg_desc *r,
61 const char *msg)
62 {
63 WARN_ONCE(1, "Unexpected %s\n", msg);
64 print_sys_reg_instr(params);
65 return undef_access(vcpu, params, r);
66 }
67
read_from_write_only(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)68 static bool read_from_write_only(struct kvm_vcpu *vcpu,
69 struct sys_reg_params *params,
70 const struct sys_reg_desc *r)
71 {
72 return bad_trap(vcpu, params, r,
73 "sys_reg read to write-only register");
74 }
75
write_to_read_only(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)76 static bool write_to_read_only(struct kvm_vcpu *vcpu,
77 struct sys_reg_params *params,
78 const struct sys_reg_desc *r)
79 {
80 return bad_trap(vcpu, params, r,
81 "sys_reg write to read-only register");
82 }
83
84 #define PURE_EL2_SYSREG(el2) \
85 case el2: { \
86 *el1r = el2; \
87 return true; \
88 }
89
90 #define MAPPED_EL2_SYSREG(el2, el1, fn) \
91 case el2: { \
92 *xlate = fn; \
93 *el1r = el1; \
94 return true; \
95 }
96
get_el2_to_el1_mapping(unsigned int reg,unsigned int * el1r,u64 (** xlate)(u64))97 static bool get_el2_to_el1_mapping(unsigned int reg,
98 unsigned int *el1r, u64 (**xlate)(u64))
99 {
100 switch (reg) {
101 PURE_EL2_SYSREG( VPIDR_EL2 );
102 PURE_EL2_SYSREG( VMPIDR_EL2 );
103 PURE_EL2_SYSREG( ACTLR_EL2 );
104 PURE_EL2_SYSREG( HCR_EL2 );
105 PURE_EL2_SYSREG( MDCR_EL2 );
106 PURE_EL2_SYSREG( HSTR_EL2 );
107 PURE_EL2_SYSREG( HACR_EL2 );
108 PURE_EL2_SYSREG( VTTBR_EL2 );
109 PURE_EL2_SYSREG( VTCR_EL2 );
110 PURE_EL2_SYSREG( RVBAR_EL2 );
111 PURE_EL2_SYSREG( TPIDR_EL2 );
112 PURE_EL2_SYSREG( HPFAR_EL2 );
113 PURE_EL2_SYSREG( CNTHCTL_EL2 );
114 MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1,
115 translate_sctlr_el2_to_sctlr_el1 );
116 MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1,
117 translate_cptr_el2_to_cpacr_el1 );
118 MAPPED_EL2_SYSREG(TTBR0_EL2, TTBR0_EL1,
119 translate_ttbr0_el2_to_ttbr0_el1 );
120 MAPPED_EL2_SYSREG(TTBR1_EL2, TTBR1_EL1, NULL );
121 MAPPED_EL2_SYSREG(TCR_EL2, TCR_EL1,
122 translate_tcr_el2_to_tcr_el1 );
123 MAPPED_EL2_SYSREG(VBAR_EL2, VBAR_EL1, NULL );
124 MAPPED_EL2_SYSREG(AFSR0_EL2, AFSR0_EL1, NULL );
125 MAPPED_EL2_SYSREG(AFSR1_EL2, AFSR1_EL1, NULL );
126 MAPPED_EL2_SYSREG(ESR_EL2, ESR_EL1, NULL );
127 MAPPED_EL2_SYSREG(FAR_EL2, FAR_EL1, NULL );
128 MAPPED_EL2_SYSREG(MAIR_EL2, MAIR_EL1, NULL );
129 MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1, NULL );
130 MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1, NULL );
131 MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1, NULL );
132 MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1, NULL );
133 default:
134 return false;
135 }
136 }
137
vcpu_read_sys_reg(const struct kvm_vcpu * vcpu,int reg)138 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
139 {
140 u64 val = 0x8badf00d8badf00d;
141 u64 (*xlate)(u64) = NULL;
142 unsigned int el1r;
143
144 if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
145 goto memory_read;
146
147 if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) {
148 if (!is_hyp_ctxt(vcpu))
149 goto memory_read;
150
151 /*
152 * If this register does not have an EL1 counterpart,
153 * then read the stored EL2 version.
154 */
155 if (reg == el1r)
156 goto memory_read;
157
158 /*
159 * If we have a non-VHE guest and that the sysreg
160 * requires translation to be used at EL1, use the
161 * in-memory copy instead.
162 */
163 if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
164 goto memory_read;
165
166 /* Get the current version of the EL1 counterpart. */
167 WARN_ON(!__vcpu_read_sys_reg_from_cpu(el1r, &val));
168 return val;
169 }
170
171 /* EL1 register can't be on the CPU if the guest is in vEL2. */
172 if (unlikely(is_hyp_ctxt(vcpu)))
173 goto memory_read;
174
175 if (__vcpu_read_sys_reg_from_cpu(reg, &val))
176 return val;
177
178 memory_read:
179 return __vcpu_sys_reg(vcpu, reg);
180 }
181
vcpu_write_sys_reg(struct kvm_vcpu * vcpu,u64 val,int reg)182 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
183 {
184 u64 (*xlate)(u64) = NULL;
185 unsigned int el1r;
186
187 if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
188 goto memory_write;
189
190 if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) {
191 if (!is_hyp_ctxt(vcpu))
192 goto memory_write;
193
194 /*
195 * Always store a copy of the write to memory to avoid having
196 * to reverse-translate virtual EL2 system registers for a
197 * non-VHE guest hypervisor.
198 */
199 __vcpu_sys_reg(vcpu, reg) = val;
200
201 /* No EL1 counterpart? We're done here.? */
202 if (reg == el1r)
203 return;
204
205 if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
206 val = xlate(val);
207
208 /* Redirect this to the EL1 version of the register. */
209 WARN_ON(!__vcpu_write_sys_reg_to_cpu(val, el1r));
210 return;
211 }
212
213 /* EL1 register can't be on the CPU if the guest is in vEL2. */
214 if (unlikely(is_hyp_ctxt(vcpu)))
215 goto memory_write;
216
217 if (__vcpu_write_sys_reg_to_cpu(val, reg))
218 return;
219
220 memory_write:
221 __vcpu_sys_reg(vcpu, reg) = val;
222 }
223
224 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
225 #define CSSELR_MAX 14
226
227 /*
228 * Returns the minimum line size for the selected cache, expressed as
229 * Log2(bytes).
230 */
get_min_cache_line_size(bool icache)231 static u8 get_min_cache_line_size(bool icache)
232 {
233 u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0);
234 u8 field;
235
236 if (icache)
237 field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr);
238 else
239 field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr);
240
241 /*
242 * Cache line size is represented as Log2(words) in CTR_EL0.
243 * Log2(bytes) can be derived with the following:
244 *
245 * Log2(words) + 2 = Log2(bytes / 4) + 2
246 * = Log2(bytes) - 2 + 2
247 * = Log2(bytes)
248 */
249 return field + 2;
250 }
251
252 /* Which cache CCSIDR represents depends on CSSELR value. */
get_ccsidr(struct kvm_vcpu * vcpu,u32 csselr)253 static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr)
254 {
255 u8 line_size;
256
257 if (vcpu->arch.ccsidr)
258 return vcpu->arch.ccsidr[csselr];
259
260 line_size = get_min_cache_line_size(csselr & CSSELR_EL1_InD);
261
262 /*
263 * Fabricate a CCSIDR value as the overriding value does not exist.
264 * The real CCSIDR value will not be used as it can vary by the
265 * physical CPU which the vcpu currently resides in.
266 *
267 * The line size is determined with get_min_cache_line_size(), which
268 * should be valid for all CPUs even if they have different cache
269 * configuration.
270 *
271 * The associativity bits are cleared, meaning the geometry of all data
272 * and unified caches (which are guaranteed to be PIPT and thus
273 * non-aliasing) are 1 set and 1 way.
274 * Guests should not be doing cache operations by set/way at all, and
275 * for this reason, we trap them and attempt to infer the intent, so
276 * that we can flush the entire guest's address space at the appropriate
277 * time. The exposed geometry minimizes the number of the traps.
278 * [If guests should attempt to infer aliasing properties from the
279 * geometry (which is not permitted by the architecture), they would
280 * only do so for virtually indexed caches.]
281 *
282 * We don't check if the cache level exists as it is allowed to return
283 * an UNKNOWN value if not.
284 */
285 return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4);
286 }
287
set_ccsidr(struct kvm_vcpu * vcpu,u32 csselr,u32 val)288 static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val)
289 {
290 u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4;
291 u32 *ccsidr = vcpu->arch.ccsidr;
292 u32 i;
293
294 if ((val & CCSIDR_EL1_RES0) ||
295 line_size < get_min_cache_line_size(csselr & CSSELR_EL1_InD))
296 return -EINVAL;
297
298 if (!ccsidr) {
299 if (val == get_ccsidr(vcpu, csselr))
300 return 0;
301
302 ccsidr = kmalloc_array(CSSELR_MAX, sizeof(u32), GFP_KERNEL_ACCOUNT);
303 if (!ccsidr)
304 return -ENOMEM;
305
306 for (i = 0; i < CSSELR_MAX; i++)
307 ccsidr[i] = get_ccsidr(vcpu, i);
308
309 vcpu->arch.ccsidr = ccsidr;
310 }
311
312 ccsidr[csselr] = val;
313
314 return 0;
315 }
316
access_rw(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)317 static bool access_rw(struct kvm_vcpu *vcpu,
318 struct sys_reg_params *p,
319 const struct sys_reg_desc *r)
320 {
321 if (p->is_write)
322 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
323 else
324 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
325
326 return true;
327 }
328
329 /*
330 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
331 */
access_dcsw(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)332 static bool access_dcsw(struct kvm_vcpu *vcpu,
333 struct sys_reg_params *p,
334 const struct sys_reg_desc *r)
335 {
336 if (!p->is_write)
337 return read_from_write_only(vcpu, p, r);
338
339 /*
340 * Only track S/W ops if we don't have FWB. It still indicates
341 * that the guest is a bit broken (S/W operations should only
342 * be done by firmware, knowing that there is only a single
343 * CPU left in the system, and certainly not from non-secure
344 * software).
345 */
346 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
347 kvm_set_way_flush(vcpu);
348
349 return true;
350 }
351
access_dcgsw(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)352 static bool access_dcgsw(struct kvm_vcpu *vcpu,
353 struct sys_reg_params *p,
354 const struct sys_reg_desc *r)
355 {
356 if (!kvm_has_mte(vcpu->kvm))
357 return undef_access(vcpu, p, r);
358
359 /* Treat MTE S/W ops as we treat the classic ones: with contempt */
360 return access_dcsw(vcpu, p, r);
361 }
362
get_access_mask(const struct sys_reg_desc * r,u64 * mask,u64 * shift)363 static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
364 {
365 switch (r->aarch32_map) {
366 case AA32_LO:
367 *mask = GENMASK_ULL(31, 0);
368 *shift = 0;
369 break;
370 case AA32_HI:
371 *mask = GENMASK_ULL(63, 32);
372 *shift = 32;
373 break;
374 default:
375 *mask = GENMASK_ULL(63, 0);
376 *shift = 0;
377 break;
378 }
379 }
380
381 /*
382 * Generic accessor for VM registers. Only called as long as HCR_TVM
383 * is set. If the guest enables the MMU, we stop trapping the VM
384 * sys_regs and leave it in complete control of the caches.
385 */
access_vm_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)386 static bool access_vm_reg(struct kvm_vcpu *vcpu,
387 struct sys_reg_params *p,
388 const struct sys_reg_desc *r)
389 {
390 bool was_enabled = vcpu_has_cache_enabled(vcpu);
391 u64 val, mask, shift;
392
393 if (reg_to_encoding(r) == SYS_TCR2_EL1 &&
394 !kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, TCRX, IMP))
395 return undef_access(vcpu, p, r);
396
397 BUG_ON(!p->is_write);
398
399 get_access_mask(r, &mask, &shift);
400
401 if (~mask) {
402 val = vcpu_read_sys_reg(vcpu, r->reg);
403 val &= ~mask;
404 } else {
405 val = 0;
406 }
407
408 val |= (p->regval & (mask >> shift)) << shift;
409 vcpu_write_sys_reg(vcpu, val, r->reg);
410
411 kvm_toggle_cache(vcpu, was_enabled);
412 return true;
413 }
414
access_actlr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)415 static bool access_actlr(struct kvm_vcpu *vcpu,
416 struct sys_reg_params *p,
417 const struct sys_reg_desc *r)
418 {
419 u64 mask, shift;
420
421 if (p->is_write)
422 return ignore_write(vcpu, p);
423
424 get_access_mask(r, &mask, &shift);
425 p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
426
427 return true;
428 }
429
430 /*
431 * Trap handler for the GICv3 SGI generation system register.
432 * Forward the request to the VGIC emulation.
433 * The cp15_64 code makes sure this automatically works
434 * for both AArch64 and AArch32 accesses.
435 */
access_gic_sgi(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)436 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
437 struct sys_reg_params *p,
438 const struct sys_reg_desc *r)
439 {
440 bool g1;
441
442 if (!kvm_has_gicv3(vcpu->kvm))
443 return undef_access(vcpu, p, r);
444
445 if (!p->is_write)
446 return read_from_write_only(vcpu, p, r);
447
448 /*
449 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
450 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
451 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
452 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
453 * group.
454 */
455 if (p->Op0 == 0) { /* AArch32 */
456 switch (p->Op1) {
457 default: /* Keep GCC quiet */
458 case 0: /* ICC_SGI1R */
459 g1 = true;
460 break;
461 case 1: /* ICC_ASGI1R */
462 case 2: /* ICC_SGI0R */
463 g1 = false;
464 break;
465 }
466 } else { /* AArch64 */
467 switch (p->Op2) {
468 default: /* Keep GCC quiet */
469 case 5: /* ICC_SGI1R_EL1 */
470 g1 = true;
471 break;
472 case 6: /* ICC_ASGI1R_EL1 */
473 case 7: /* ICC_SGI0R_EL1 */
474 g1 = false;
475 break;
476 }
477 }
478
479 vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
480
481 return true;
482 }
483
access_gic_sre(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)484 static bool access_gic_sre(struct kvm_vcpu *vcpu,
485 struct sys_reg_params *p,
486 const struct sys_reg_desc *r)
487 {
488 if (!kvm_has_gicv3(vcpu->kvm))
489 return undef_access(vcpu, p, r);
490
491 if (p->is_write)
492 return ignore_write(vcpu, p);
493
494 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
495 return true;
496 }
497
trap_raz_wi(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)498 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
499 struct sys_reg_params *p,
500 const struct sys_reg_desc *r)
501 {
502 if (p->is_write)
503 return ignore_write(vcpu, p);
504 else
505 return read_zero(vcpu, p);
506 }
507
508 /*
509 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
510 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
511 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
512 * treat it separately.
513 */
trap_loregion(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)514 static bool trap_loregion(struct kvm_vcpu *vcpu,
515 struct sys_reg_params *p,
516 const struct sys_reg_desc *r)
517 {
518 u32 sr = reg_to_encoding(r);
519
520 if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP))
521 return undef_access(vcpu, p, r);
522
523 if (p->is_write && sr == SYS_LORID_EL1)
524 return write_to_read_only(vcpu, p, r);
525
526 return trap_raz_wi(vcpu, p, r);
527 }
528
trap_oslar_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)529 static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
530 struct sys_reg_params *p,
531 const struct sys_reg_desc *r)
532 {
533 u64 oslsr;
534
535 if (!p->is_write)
536 return read_from_write_only(vcpu, p, r);
537
538 /* Forward the OSLK bit to OSLSR */
539 oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~OSLSR_EL1_OSLK;
540 if (p->regval & OSLAR_EL1_OSLK)
541 oslsr |= OSLSR_EL1_OSLK;
542
543 __vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr;
544 return true;
545 }
546
trap_oslsr_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)547 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
548 struct sys_reg_params *p,
549 const struct sys_reg_desc *r)
550 {
551 if (p->is_write)
552 return write_to_read_only(vcpu, p, r);
553
554 p->regval = __vcpu_sys_reg(vcpu, r->reg);
555 return true;
556 }
557
set_oslsr_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)558 static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
559 u64 val)
560 {
561 /*
562 * The only modifiable bit is the OSLK bit. Refuse the write if
563 * userspace attempts to change any other bit in the register.
564 */
565 if ((val ^ rd->val) & ~OSLSR_EL1_OSLK)
566 return -EINVAL;
567
568 __vcpu_sys_reg(vcpu, rd->reg) = val;
569 return 0;
570 }
571
trap_dbgauthstatus_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)572 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
573 struct sys_reg_params *p,
574 const struct sys_reg_desc *r)
575 {
576 if (p->is_write) {
577 return ignore_write(vcpu, p);
578 } else {
579 p->regval = read_sysreg(dbgauthstatus_el1);
580 return true;
581 }
582 }
583
584 /*
585 * We want to avoid world-switching all the DBG registers all the
586 * time:
587 *
588 * - If we've touched any debug register, it is likely that we're
589 * going to touch more of them. It then makes sense to disable the
590 * traps and start doing the save/restore dance
591 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
592 * then mandatory to save/restore the registers, as the guest
593 * depends on them.
594 *
595 * For this, we use a DIRTY bit, indicating the guest has modified the
596 * debug registers, used as follow:
597 *
598 * On guest entry:
599 * - If the dirty bit is set (because we're coming back from trapping),
600 * disable the traps, save host registers, restore guest registers.
601 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
602 * set the dirty bit, disable the traps, save host registers,
603 * restore guest registers.
604 * - Otherwise, enable the traps
605 *
606 * On guest exit:
607 * - If the dirty bit is set, save guest registers, restore host
608 * registers and clear the dirty bit. This ensure that the host can
609 * now use the debug registers.
610 */
trap_debug_regs(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)611 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
612 struct sys_reg_params *p,
613 const struct sys_reg_desc *r)
614 {
615 access_rw(vcpu, p, r);
616 if (p->is_write)
617 vcpu_set_flag(vcpu, DEBUG_DIRTY);
618
619 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
620
621 return true;
622 }
623
624 /*
625 * reg_to_dbg/dbg_to_reg
626 *
627 * A 32 bit write to a debug register leave top bits alone
628 * A 32 bit read from a debug register only returns the bottom bits
629 *
630 * All writes will set the DEBUG_DIRTY flag to ensure the hyp code
631 * switches between host and guest values in future.
632 */
reg_to_dbg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd,u64 * dbg_reg)633 static void reg_to_dbg(struct kvm_vcpu *vcpu,
634 struct sys_reg_params *p,
635 const struct sys_reg_desc *rd,
636 u64 *dbg_reg)
637 {
638 u64 mask, shift, val;
639
640 get_access_mask(rd, &mask, &shift);
641
642 val = *dbg_reg;
643 val &= ~mask;
644 val |= (p->regval & (mask >> shift)) << shift;
645 *dbg_reg = val;
646
647 vcpu_set_flag(vcpu, DEBUG_DIRTY);
648 }
649
dbg_to_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd,u64 * dbg_reg)650 static void dbg_to_reg(struct kvm_vcpu *vcpu,
651 struct sys_reg_params *p,
652 const struct sys_reg_desc *rd,
653 u64 *dbg_reg)
654 {
655 u64 mask, shift;
656
657 get_access_mask(rd, &mask, &shift);
658 p->regval = (*dbg_reg & mask) >> shift;
659 }
660
trap_bvr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)661 static bool trap_bvr(struct kvm_vcpu *vcpu,
662 struct sys_reg_params *p,
663 const struct sys_reg_desc *rd)
664 {
665 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
666
667 if (p->is_write)
668 reg_to_dbg(vcpu, p, rd, dbg_reg);
669 else
670 dbg_to_reg(vcpu, p, rd, dbg_reg);
671
672 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
673
674 return true;
675 }
676
set_bvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)677 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
678 u64 val)
679 {
680 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val;
681 return 0;
682 }
683
get_bvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)684 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
685 u64 *val)
686 {
687 *val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
688 return 0;
689 }
690
reset_bvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)691 static u64 reset_bvr(struct kvm_vcpu *vcpu,
692 const struct sys_reg_desc *rd)
693 {
694 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
695 return rd->val;
696 }
697
trap_bcr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)698 static bool trap_bcr(struct kvm_vcpu *vcpu,
699 struct sys_reg_params *p,
700 const struct sys_reg_desc *rd)
701 {
702 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
703
704 if (p->is_write)
705 reg_to_dbg(vcpu, p, rd, dbg_reg);
706 else
707 dbg_to_reg(vcpu, p, rd, dbg_reg);
708
709 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
710
711 return true;
712 }
713
set_bcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)714 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
715 u64 val)
716 {
717 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val;
718 return 0;
719 }
720
get_bcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)721 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
722 u64 *val)
723 {
724 *val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
725 return 0;
726 }
727
reset_bcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)728 static u64 reset_bcr(struct kvm_vcpu *vcpu,
729 const struct sys_reg_desc *rd)
730 {
731 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
732 return rd->val;
733 }
734
trap_wvr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)735 static bool trap_wvr(struct kvm_vcpu *vcpu,
736 struct sys_reg_params *p,
737 const struct sys_reg_desc *rd)
738 {
739 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
740
741 if (p->is_write)
742 reg_to_dbg(vcpu, p, rd, dbg_reg);
743 else
744 dbg_to_reg(vcpu, p, rd, dbg_reg);
745
746 trace_trap_reg(__func__, rd->CRm, p->is_write,
747 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
748
749 return true;
750 }
751
set_wvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)752 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
753 u64 val)
754 {
755 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val;
756 return 0;
757 }
758
get_wvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)759 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
760 u64 *val)
761 {
762 *val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
763 return 0;
764 }
765
reset_wvr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)766 static u64 reset_wvr(struct kvm_vcpu *vcpu,
767 const struct sys_reg_desc *rd)
768 {
769 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
770 return rd->val;
771 }
772
trap_wcr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)773 static bool trap_wcr(struct kvm_vcpu *vcpu,
774 struct sys_reg_params *p,
775 const struct sys_reg_desc *rd)
776 {
777 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
778
779 if (p->is_write)
780 reg_to_dbg(vcpu, p, rd, dbg_reg);
781 else
782 dbg_to_reg(vcpu, p, rd, dbg_reg);
783
784 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
785
786 return true;
787 }
788
set_wcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)789 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
790 u64 val)
791 {
792 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val;
793 return 0;
794 }
795
get_wcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)796 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
797 u64 *val)
798 {
799 *val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
800 return 0;
801 }
802
reset_wcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)803 static u64 reset_wcr(struct kvm_vcpu *vcpu,
804 const struct sys_reg_desc *rd)
805 {
806 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
807 return rd->val;
808 }
809
reset_amair_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)810 static u64 reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
811 {
812 u64 amair = read_sysreg(amair_el1);
813 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
814 return amair;
815 }
816
reset_actlr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)817 static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
818 {
819 u64 actlr = read_sysreg(actlr_el1);
820 vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
821 return actlr;
822 }
823
reset_mpidr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)824 static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
825 {
826 u64 mpidr;
827
828 /*
829 * Map the vcpu_id into the first three affinity level fields of
830 * the MPIDR. We limit the number of VCPUs in level 0 due to a
831 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
832 * of the GICv3 to be able to address each CPU directly when
833 * sending IPIs.
834 */
835 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
836 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
837 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
838 mpidr |= (1ULL << 31);
839 vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1);
840
841 return mpidr;
842 }
843
pmu_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)844 static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
845 const struct sys_reg_desc *r)
846 {
847 if (kvm_vcpu_has_pmu(vcpu))
848 return 0;
849
850 return REG_HIDDEN;
851 }
852
reset_pmu_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)853 static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
854 {
855 u64 mask = BIT(ARMV8_PMU_CYCLE_IDX);
856 u8 n = vcpu->kvm->arch.pmcr_n;
857
858 if (n)
859 mask |= GENMASK(n - 1, 0);
860
861 reset_unknown(vcpu, r);
862 __vcpu_sys_reg(vcpu, r->reg) &= mask;
863
864 return __vcpu_sys_reg(vcpu, r->reg);
865 }
866
reset_pmevcntr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)867 static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
868 {
869 reset_unknown(vcpu, r);
870 __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
871
872 return __vcpu_sys_reg(vcpu, r->reg);
873 }
874
reset_pmevtyper(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)875 static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
876 {
877 /* This thing will UNDEF, who cares about the reset value? */
878 if (!kvm_vcpu_has_pmu(vcpu))
879 return 0;
880
881 reset_unknown(vcpu, r);
882 __vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm);
883
884 return __vcpu_sys_reg(vcpu, r->reg);
885 }
886
reset_pmselr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)887 static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
888 {
889 reset_unknown(vcpu, r);
890 __vcpu_sys_reg(vcpu, r->reg) &= PMSELR_EL0_SEL_MASK;
891
892 return __vcpu_sys_reg(vcpu, r->reg);
893 }
894
reset_pmcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)895 static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
896 {
897 u64 pmcr = 0;
898
899 if (!kvm_supports_32bit_el0())
900 pmcr |= ARMV8_PMU_PMCR_LC;
901
902 /*
903 * The value of PMCR.N field is included when the
904 * vCPU register is read via kvm_vcpu_read_pmcr().
905 */
906 __vcpu_sys_reg(vcpu, r->reg) = pmcr;
907
908 return __vcpu_sys_reg(vcpu, r->reg);
909 }
910
check_pmu_access_disabled(struct kvm_vcpu * vcpu,u64 flags)911 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
912 {
913 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
914 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
915
916 if (!enabled)
917 kvm_inject_undefined(vcpu);
918
919 return !enabled;
920 }
921
pmu_access_el0_disabled(struct kvm_vcpu * vcpu)922 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
923 {
924 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
925 }
926
pmu_write_swinc_el0_disabled(struct kvm_vcpu * vcpu)927 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
928 {
929 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
930 }
931
pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu * vcpu)932 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
933 {
934 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
935 }
936
pmu_access_event_counter_el0_disabled(struct kvm_vcpu * vcpu)937 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
938 {
939 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
940 }
941
access_pmcr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)942 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
943 const struct sys_reg_desc *r)
944 {
945 u64 val;
946
947 if (pmu_access_el0_disabled(vcpu))
948 return false;
949
950 if (p->is_write) {
951 /*
952 * Only update writeable bits of PMCR (continuing into
953 * kvm_pmu_handle_pmcr() as well)
954 */
955 val = kvm_vcpu_read_pmcr(vcpu);
956 val &= ~ARMV8_PMU_PMCR_MASK;
957 val |= p->regval & ARMV8_PMU_PMCR_MASK;
958 if (!kvm_supports_32bit_el0())
959 val |= ARMV8_PMU_PMCR_LC;
960 kvm_pmu_handle_pmcr(vcpu, val);
961 } else {
962 /* PMCR.P & PMCR.C are RAZ */
963 val = kvm_vcpu_read_pmcr(vcpu)
964 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
965 p->regval = val;
966 }
967
968 return true;
969 }
970
access_pmselr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)971 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
972 const struct sys_reg_desc *r)
973 {
974 if (pmu_access_event_counter_el0_disabled(vcpu))
975 return false;
976
977 if (p->is_write)
978 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
979 else
980 /* return PMSELR.SEL field */
981 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
982 & PMSELR_EL0_SEL_MASK;
983
984 return true;
985 }
986
access_pmceid(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)987 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
988 const struct sys_reg_desc *r)
989 {
990 u64 pmceid, mask, shift;
991
992 BUG_ON(p->is_write);
993
994 if (pmu_access_el0_disabled(vcpu))
995 return false;
996
997 get_access_mask(r, &mask, &shift);
998
999 pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
1000 pmceid &= mask;
1001 pmceid >>= shift;
1002
1003 p->regval = pmceid;
1004
1005 return true;
1006 }
1007
pmu_counter_idx_valid(struct kvm_vcpu * vcpu,u64 idx)1008 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
1009 {
1010 u64 pmcr, val;
1011
1012 pmcr = kvm_vcpu_read_pmcr(vcpu);
1013 val = FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
1014 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
1015 kvm_inject_undefined(vcpu);
1016 return false;
1017 }
1018
1019 return true;
1020 }
1021
get_pmu_evcntr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 * val)1022 static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1023 u64 *val)
1024 {
1025 u64 idx;
1026
1027 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
1028 /* PMCCNTR_EL0 */
1029 idx = ARMV8_PMU_CYCLE_IDX;
1030 else
1031 /* PMEVCNTRn_EL0 */
1032 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1033
1034 *val = kvm_pmu_get_counter_value(vcpu, idx);
1035 return 0;
1036 }
1037
access_pmu_evcntr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1038 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
1039 struct sys_reg_params *p,
1040 const struct sys_reg_desc *r)
1041 {
1042 u64 idx = ~0UL;
1043
1044 if (r->CRn == 9 && r->CRm == 13) {
1045 if (r->Op2 == 2) {
1046 /* PMXEVCNTR_EL0 */
1047 if (pmu_access_event_counter_el0_disabled(vcpu))
1048 return false;
1049
1050 idx = SYS_FIELD_GET(PMSELR_EL0, SEL,
1051 __vcpu_sys_reg(vcpu, PMSELR_EL0));
1052 } else if (r->Op2 == 0) {
1053 /* PMCCNTR_EL0 */
1054 if (pmu_access_cycle_counter_el0_disabled(vcpu))
1055 return false;
1056
1057 idx = ARMV8_PMU_CYCLE_IDX;
1058 }
1059 } else if (r->CRn == 0 && r->CRm == 9) {
1060 /* PMCCNTR */
1061 if (pmu_access_event_counter_el0_disabled(vcpu))
1062 return false;
1063
1064 idx = ARMV8_PMU_CYCLE_IDX;
1065 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
1066 /* PMEVCNTRn_EL0 */
1067 if (pmu_access_event_counter_el0_disabled(vcpu))
1068 return false;
1069
1070 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1071 }
1072
1073 /* Catch any decoding mistake */
1074 WARN_ON(idx == ~0UL);
1075
1076 if (!pmu_counter_idx_valid(vcpu, idx))
1077 return false;
1078
1079 if (p->is_write) {
1080 if (pmu_access_el0_disabled(vcpu))
1081 return false;
1082
1083 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
1084 } else {
1085 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
1086 }
1087
1088 return true;
1089 }
1090
access_pmu_evtyper(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1091 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1092 const struct sys_reg_desc *r)
1093 {
1094 u64 idx, reg;
1095
1096 if (pmu_access_el0_disabled(vcpu))
1097 return false;
1098
1099 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
1100 /* PMXEVTYPER_EL0 */
1101 idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0));
1102 reg = PMEVTYPER0_EL0 + idx;
1103 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
1104 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1105 if (idx == ARMV8_PMU_CYCLE_IDX)
1106 reg = PMCCFILTR_EL0;
1107 else
1108 /* PMEVTYPERn_EL0 */
1109 reg = PMEVTYPER0_EL0 + idx;
1110 } else {
1111 BUG();
1112 }
1113
1114 if (!pmu_counter_idx_valid(vcpu, idx))
1115 return false;
1116
1117 if (p->is_write) {
1118 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
1119 kvm_vcpu_pmu_restore_guest(vcpu);
1120 } else {
1121 p->regval = __vcpu_sys_reg(vcpu, reg);
1122 }
1123
1124 return true;
1125 }
1126
set_pmreg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 val)1127 static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
1128 {
1129 bool set;
1130
1131 val &= kvm_pmu_valid_counter_mask(vcpu);
1132
1133 switch (r->reg) {
1134 case PMOVSSET_EL0:
1135 /* CRm[1] being set indicates a SET register, and CLR otherwise */
1136 set = r->CRm & 2;
1137 break;
1138 default:
1139 /* Op2[0] being set indicates a SET register, and CLR otherwise */
1140 set = r->Op2 & 1;
1141 break;
1142 }
1143
1144 if (set)
1145 __vcpu_sys_reg(vcpu, r->reg) |= val;
1146 else
1147 __vcpu_sys_reg(vcpu, r->reg) &= ~val;
1148
1149 return 0;
1150 }
1151
get_pmreg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 * val)1152 static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
1153 {
1154 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1155
1156 *val = __vcpu_sys_reg(vcpu, r->reg) & mask;
1157 return 0;
1158 }
1159
access_pmcnten(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1160 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1161 const struct sys_reg_desc *r)
1162 {
1163 u64 val, mask;
1164
1165 if (pmu_access_el0_disabled(vcpu))
1166 return false;
1167
1168 mask = kvm_pmu_valid_counter_mask(vcpu);
1169 if (p->is_write) {
1170 val = p->regval & mask;
1171 if (r->Op2 & 0x1) {
1172 /* accessing PMCNTENSET_EL0 */
1173 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
1174 kvm_pmu_enable_counter_mask(vcpu, val);
1175 kvm_vcpu_pmu_restore_guest(vcpu);
1176 } else {
1177 /* accessing PMCNTENCLR_EL0 */
1178 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
1179 kvm_pmu_disable_counter_mask(vcpu, val);
1180 }
1181 } else {
1182 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
1183 }
1184
1185 return true;
1186 }
1187
access_pminten(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1188 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1189 const struct sys_reg_desc *r)
1190 {
1191 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1192
1193 if (check_pmu_access_disabled(vcpu, 0))
1194 return false;
1195
1196 if (p->is_write) {
1197 u64 val = p->regval & mask;
1198
1199 if (r->Op2 & 0x1)
1200 /* accessing PMINTENSET_EL1 */
1201 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
1202 else
1203 /* accessing PMINTENCLR_EL1 */
1204 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
1205 } else {
1206 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
1207 }
1208
1209 return true;
1210 }
1211
access_pmovs(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1212 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1213 const struct sys_reg_desc *r)
1214 {
1215 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1216
1217 if (pmu_access_el0_disabled(vcpu))
1218 return false;
1219
1220 if (p->is_write) {
1221 if (r->CRm & 0x2)
1222 /* accessing PMOVSSET_EL0 */
1223 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
1224 else
1225 /* accessing PMOVSCLR_EL0 */
1226 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
1227 } else {
1228 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
1229 }
1230
1231 return true;
1232 }
1233
access_pmswinc(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1234 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1235 const struct sys_reg_desc *r)
1236 {
1237 u64 mask;
1238
1239 if (!p->is_write)
1240 return read_from_write_only(vcpu, p, r);
1241
1242 if (pmu_write_swinc_el0_disabled(vcpu))
1243 return false;
1244
1245 mask = kvm_pmu_valid_counter_mask(vcpu);
1246 kvm_pmu_software_increment(vcpu, p->regval & mask);
1247 return true;
1248 }
1249
access_pmuserenr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1250 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1251 const struct sys_reg_desc *r)
1252 {
1253 if (p->is_write) {
1254 if (!vcpu_mode_priv(vcpu))
1255 return undef_access(vcpu, p, r);
1256
1257 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
1258 p->regval & ARMV8_PMU_USERENR_MASK;
1259 } else {
1260 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
1261 & ARMV8_PMU_USERENR_MASK;
1262 }
1263
1264 return true;
1265 }
1266
get_pmcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 * val)1267 static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1268 u64 *val)
1269 {
1270 *val = kvm_vcpu_read_pmcr(vcpu);
1271 return 0;
1272 }
1273
set_pmcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 val)1274 static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1275 u64 val)
1276 {
1277 u8 new_n = FIELD_GET(ARMV8_PMU_PMCR_N, val);
1278 struct kvm *kvm = vcpu->kvm;
1279
1280 mutex_lock(&kvm->arch.config_lock);
1281
1282 /*
1283 * The vCPU can't have more counters than the PMU hardware
1284 * implements. Ignore this error to maintain compatibility
1285 * with the existing KVM behavior.
1286 */
1287 if (!kvm_vm_has_ran_once(kvm) &&
1288 new_n <= kvm_arm_pmu_get_max_counters(kvm))
1289 kvm->arch.pmcr_n = new_n;
1290
1291 mutex_unlock(&kvm->arch.config_lock);
1292
1293 /*
1294 * Ignore writes to RES0 bits, read only bits that are cleared on
1295 * vCPU reset, and writable bits that KVM doesn't support yet.
1296 * (i.e. only PMCR.N and bits [7:0] are mutable from userspace)
1297 * The LP bit is RES0 when FEAT_PMUv3p5 is not supported on the vCPU.
1298 * But, we leave the bit as it is here, as the vCPU's PMUver might
1299 * be changed later (NOTE: the bit will be cleared on first vCPU run
1300 * if necessary).
1301 */
1302 val &= ARMV8_PMU_PMCR_MASK;
1303
1304 /* The LC bit is RES1 when AArch32 is not supported */
1305 if (!kvm_supports_32bit_el0())
1306 val |= ARMV8_PMU_PMCR_LC;
1307
1308 __vcpu_sys_reg(vcpu, r->reg) = val;
1309 return 0;
1310 }
1311
1312 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
1313 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
1314 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
1315 trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
1316 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
1317 trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
1318 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
1319 trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
1320 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
1321 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
1322
1323 #define PMU_SYS_REG(name) \
1324 SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \
1325 .visibility = pmu_visibility
1326
1327 /* Macro to expand the PMEVCNTRn_EL0 register */
1328 #define PMU_PMEVCNTR_EL0(n) \
1329 { PMU_SYS_REG(PMEVCNTRn_EL0(n)), \
1330 .reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
1331 .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
1332
1333 /* Macro to expand the PMEVTYPERn_EL0 register */
1334 #define PMU_PMEVTYPER_EL0(n) \
1335 { PMU_SYS_REG(PMEVTYPERn_EL0(n)), \
1336 .reset = reset_pmevtyper, \
1337 .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
1338
1339 /* Macro to expand the AMU counter and type registers*/
1340 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1341 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1342 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1343 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1344
ptrauth_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1345 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1346 const struct sys_reg_desc *rd)
1347 {
1348 return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1349 }
1350
1351 /*
1352 * If we land here on a PtrAuth access, that is because we didn't
1353 * fixup the access on exit by allowing the PtrAuth sysregs. The only
1354 * way this happens is when the guest does not have PtrAuth support
1355 * enabled.
1356 */
1357 #define __PTRAUTH_KEY(k) \
1358 { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
1359 .visibility = ptrauth_visibility}
1360
1361 #define PTRAUTH_KEY(k) \
1362 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1363 __PTRAUTH_KEY(k ## KEYHI_EL1)
1364
access_arch_timer(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1365 static bool access_arch_timer(struct kvm_vcpu *vcpu,
1366 struct sys_reg_params *p,
1367 const struct sys_reg_desc *r)
1368 {
1369 enum kvm_arch_timers tmr;
1370 enum kvm_arch_timer_regs treg;
1371 u64 reg = reg_to_encoding(r);
1372
1373 switch (reg) {
1374 case SYS_CNTP_TVAL_EL0:
1375 case SYS_AARCH32_CNTP_TVAL:
1376 tmr = TIMER_PTIMER;
1377 treg = TIMER_REG_TVAL;
1378 break;
1379 case SYS_CNTP_CTL_EL0:
1380 case SYS_AARCH32_CNTP_CTL:
1381 tmr = TIMER_PTIMER;
1382 treg = TIMER_REG_CTL;
1383 break;
1384 case SYS_CNTP_CVAL_EL0:
1385 case SYS_AARCH32_CNTP_CVAL:
1386 tmr = TIMER_PTIMER;
1387 treg = TIMER_REG_CVAL;
1388 break;
1389 case SYS_CNTPCT_EL0:
1390 case SYS_CNTPCTSS_EL0:
1391 case SYS_AARCH32_CNTPCT:
1392 tmr = TIMER_PTIMER;
1393 treg = TIMER_REG_CNT;
1394 break;
1395 default:
1396 print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
1397 return undef_access(vcpu, p, r);
1398 }
1399
1400 if (p->is_write)
1401 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1402 else
1403 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1404
1405 return true;
1406 }
1407
kvm_arm64_ftr_safe_value(u32 id,const struct arm64_ftr_bits * ftrp,s64 new,s64 cur)1408 static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
1409 s64 new, s64 cur)
1410 {
1411 struct arm64_ftr_bits kvm_ftr = *ftrp;
1412
1413 /* Some features have different safe value type in KVM than host features */
1414 switch (id) {
1415 case SYS_ID_AA64DFR0_EL1:
1416 switch (kvm_ftr.shift) {
1417 case ID_AA64DFR0_EL1_PMUVer_SHIFT:
1418 kvm_ftr.type = FTR_LOWER_SAFE;
1419 break;
1420 case ID_AA64DFR0_EL1_DebugVer_SHIFT:
1421 kvm_ftr.type = FTR_LOWER_SAFE;
1422 break;
1423 }
1424 break;
1425 case SYS_ID_DFR0_EL1:
1426 if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT)
1427 kvm_ftr.type = FTR_LOWER_SAFE;
1428 break;
1429 }
1430
1431 return arm64_ftr_safe_value(&kvm_ftr, new, cur);
1432 }
1433
1434 /*
1435 * arm64_check_features() - Check if a feature register value constitutes
1436 * a subset of features indicated by the idreg's KVM sanitised limit.
1437 *
1438 * This function will check if each feature field of @val is the "safe" value
1439 * against idreg's KVM sanitised limit return from reset() callback.
1440 * If a field value in @val is the same as the one in limit, it is always
1441 * considered the safe value regardless For register fields that are not in
1442 * writable, only the value in limit is considered the safe value.
1443 *
1444 * Return: 0 if all the fields are safe. Otherwise, return negative errno.
1445 */
arm64_check_features(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)1446 static int arm64_check_features(struct kvm_vcpu *vcpu,
1447 const struct sys_reg_desc *rd,
1448 u64 val)
1449 {
1450 const struct arm64_ftr_reg *ftr_reg;
1451 const struct arm64_ftr_bits *ftrp = NULL;
1452 u32 id = reg_to_encoding(rd);
1453 u64 writable_mask = rd->val;
1454 u64 limit = rd->reset(vcpu, rd);
1455 u64 mask = 0;
1456
1457 /*
1458 * Hidden and unallocated ID registers may not have a corresponding
1459 * struct arm64_ftr_reg. Of course, if the register is RAZ we know the
1460 * only safe value is 0.
1461 */
1462 if (sysreg_visible_as_raz(vcpu, rd))
1463 return val ? -E2BIG : 0;
1464
1465 ftr_reg = get_arm64_ftr_reg(id);
1466 if (!ftr_reg)
1467 return -EINVAL;
1468
1469 ftrp = ftr_reg->ftr_bits;
1470
1471 for (; ftrp && ftrp->width; ftrp++) {
1472 s64 f_val, f_lim, safe_val;
1473 u64 ftr_mask;
1474
1475 ftr_mask = arm64_ftr_mask(ftrp);
1476 if ((ftr_mask & writable_mask) != ftr_mask)
1477 continue;
1478
1479 f_val = arm64_ftr_value(ftrp, val);
1480 f_lim = arm64_ftr_value(ftrp, limit);
1481 mask |= ftr_mask;
1482
1483 if (f_val == f_lim)
1484 safe_val = f_val;
1485 else
1486 safe_val = kvm_arm64_ftr_safe_value(id, ftrp, f_val, f_lim);
1487
1488 if (safe_val != f_val)
1489 return -E2BIG;
1490 }
1491
1492 /* For fields that are not writable, values in limit are the safe values. */
1493 if ((val & ~mask) != (limit & ~mask))
1494 return -E2BIG;
1495
1496 return 0;
1497 }
1498
pmuver_to_perfmon(u8 pmuver)1499 static u8 pmuver_to_perfmon(u8 pmuver)
1500 {
1501 switch (pmuver) {
1502 case ID_AA64DFR0_EL1_PMUVer_IMP:
1503 return ID_DFR0_EL1_PerfMon_PMUv3;
1504 case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
1505 return ID_DFR0_EL1_PerfMon_IMPDEF;
1506 default:
1507 /* Anything ARMv8.1+ and NI have the same value. For now. */
1508 return pmuver;
1509 }
1510 }
1511
1512 /* Read a sanitised cpufeature ID register by sys_reg_desc */
__kvm_read_sanitised_id_reg(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1513 static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
1514 const struct sys_reg_desc *r)
1515 {
1516 u32 id = reg_to_encoding(r);
1517 u64 val;
1518
1519 if (sysreg_visible_as_raz(vcpu, r))
1520 return 0;
1521
1522 val = read_sanitised_ftr_reg(id);
1523
1524 switch (id) {
1525 case SYS_ID_AA64PFR1_EL1:
1526 if (!kvm_has_mte(vcpu->kvm))
1527 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
1528
1529 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
1530 break;
1531 case SYS_ID_AA64PFR2_EL1:
1532 /* We only expose FPMR */
1533 val &= ID_AA64PFR2_EL1_FPMR;
1534 break;
1535 case SYS_ID_AA64ISAR1_EL1:
1536 if (!vcpu_has_ptrauth(vcpu))
1537 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
1538 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
1539 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
1540 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
1541 break;
1542 case SYS_ID_AA64ISAR2_EL1:
1543 if (!vcpu_has_ptrauth(vcpu))
1544 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
1545 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
1546 if (!cpus_have_final_cap(ARM64_HAS_WFXT))
1547 val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
1548 break;
1549 case SYS_ID_AA64MMFR2_EL1:
1550 val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
1551 break;
1552 case SYS_ID_AA64MMFR3_EL1:
1553 val &= ID_AA64MMFR3_EL1_TCRX | ID_AA64MMFR3_EL1_S1POE;
1554 break;
1555 case SYS_ID_MMFR4_EL1:
1556 val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
1557 break;
1558 }
1559
1560 return val;
1561 }
1562
kvm_read_sanitised_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1563 static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu,
1564 const struct sys_reg_desc *r)
1565 {
1566 return __kvm_read_sanitised_id_reg(vcpu, r);
1567 }
1568
read_id_reg(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1569 static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1570 {
1571 return kvm_read_vm_id_reg(vcpu->kvm, reg_to_encoding(r));
1572 }
1573
is_feature_id_reg(u32 encoding)1574 static bool is_feature_id_reg(u32 encoding)
1575 {
1576 return (sys_reg_Op0(encoding) == 3 &&
1577 (sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) &&
1578 sys_reg_CRn(encoding) == 0 &&
1579 sys_reg_CRm(encoding) <= 7);
1580 }
1581
1582 /*
1583 * Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
1584 * (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8, which is the range of ID
1585 * registers KVM maintains on a per-VM basis.
1586 */
is_vm_ftr_id_reg(u32 id)1587 static inline bool is_vm_ftr_id_reg(u32 id)
1588 {
1589 if (id == SYS_CTR_EL0)
1590 return true;
1591
1592 return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1593 sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1594 sys_reg_CRm(id) < 8);
1595 }
1596
is_vcpu_ftr_id_reg(u32 id)1597 static inline bool is_vcpu_ftr_id_reg(u32 id)
1598 {
1599 return is_feature_id_reg(id) && !is_vm_ftr_id_reg(id);
1600 }
1601
is_aa32_id_reg(u32 id)1602 static inline bool is_aa32_id_reg(u32 id)
1603 {
1604 return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1605 sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1606 sys_reg_CRm(id) <= 3);
1607 }
1608
id_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1609 static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1610 const struct sys_reg_desc *r)
1611 {
1612 u32 id = reg_to_encoding(r);
1613
1614 switch (id) {
1615 case SYS_ID_AA64ZFR0_EL1:
1616 if (!vcpu_has_sve(vcpu))
1617 return REG_RAZ;
1618 break;
1619 }
1620
1621 return 0;
1622 }
1623
aa32_id_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1624 static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu,
1625 const struct sys_reg_desc *r)
1626 {
1627 /*
1628 * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
1629 * EL. Promote to RAZ/WI in order to guarantee consistency between
1630 * systems.
1631 */
1632 if (!kvm_supports_32bit_el0())
1633 return REG_RAZ | REG_USER_WI;
1634
1635 return id_visibility(vcpu, r);
1636 }
1637
raz_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1638 static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
1639 const struct sys_reg_desc *r)
1640 {
1641 return REG_RAZ;
1642 }
1643
1644 /* cpufeature ID register access trap handlers */
1645
access_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1646 static bool access_id_reg(struct kvm_vcpu *vcpu,
1647 struct sys_reg_params *p,
1648 const struct sys_reg_desc *r)
1649 {
1650 if (p->is_write)
1651 return write_to_read_only(vcpu, p, r);
1652
1653 p->regval = read_id_reg(vcpu, r);
1654
1655 return true;
1656 }
1657
1658 /* Visibility overrides for SVE-specific control registers */
sve_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1659 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1660 const struct sys_reg_desc *rd)
1661 {
1662 if (vcpu_has_sve(vcpu))
1663 return 0;
1664
1665 return REG_HIDDEN;
1666 }
1667
sme_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1668 static unsigned int sme_visibility(const struct kvm_vcpu *vcpu,
1669 const struct sys_reg_desc *rd)
1670 {
1671 if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SME, IMP))
1672 return 0;
1673
1674 return REG_HIDDEN;
1675 }
1676
fp8_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1677 static unsigned int fp8_visibility(const struct kvm_vcpu *vcpu,
1678 const struct sys_reg_desc *rd)
1679 {
1680 if (kvm_has_fpmr(vcpu->kvm))
1681 return 0;
1682
1683 return REG_HIDDEN;
1684 }
1685
read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1686 static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
1687 const struct sys_reg_desc *rd)
1688 {
1689 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1690
1691 if (!vcpu_has_sve(vcpu))
1692 val &= ~ID_AA64PFR0_EL1_SVE_MASK;
1693
1694 /*
1695 * The default is to expose CSV2 == 1 if the HW isn't affected.
1696 * Although this is a per-CPU feature, we make it global because
1697 * asymmetric systems are just a nuisance.
1698 *
1699 * Userspace can override this as long as it doesn't promise
1700 * the impossible.
1701 */
1702 if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
1703 val &= ~ID_AA64PFR0_EL1_CSV2_MASK;
1704 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP);
1705 }
1706 if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
1707 val &= ~ID_AA64PFR0_EL1_CSV3_MASK;
1708 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
1709 }
1710
1711 if (kvm_vgic_global_state.type == VGIC_V3) {
1712 val &= ~ID_AA64PFR0_EL1_GIC_MASK;
1713 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
1714 }
1715
1716 val &= ~ID_AA64PFR0_EL1_AMU_MASK;
1717
1718 return val;
1719 }
1720
1721 #define ID_REG_LIMIT_FIELD_ENUM(val, reg, field, limit) \
1722 ({ \
1723 u64 __f_val = FIELD_GET(reg##_##field##_MASK, val); \
1724 (val) &= ~reg##_##field##_MASK; \
1725 (val) |= FIELD_PREP(reg##_##field##_MASK, \
1726 min(__f_val, \
1727 (u64)SYS_FIELD_VALUE(reg, field, limit))); \
1728 (val); \
1729 })
1730
read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1731 static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
1732 const struct sys_reg_desc *rd)
1733 {
1734 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1735
1736 val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
1737
1738 /*
1739 * Only initialize the PMU version if the vCPU was configured with one.
1740 */
1741 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
1742 if (kvm_vcpu_has_pmu(vcpu))
1743 val |= SYS_FIELD_PREP(ID_AA64DFR0_EL1, PMUVer,
1744 kvm_arm_pmu_get_pmuver_limit());
1745
1746 /* Hide SPE from guests */
1747 val &= ~ID_AA64DFR0_EL1_PMSVer_MASK;
1748
1749 return val;
1750 }
1751
set_id_aa64dfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)1752 static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
1753 const struct sys_reg_desc *rd,
1754 u64 val)
1755 {
1756 u8 debugver = SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, val);
1757 u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);
1758
1759 /*
1760 * Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
1761 * ID_AA64DFR0_EL1.PMUver limit to VM creation"), KVM erroneously
1762 * exposed an IMP_DEF PMU to userspace and the guest on systems w/
1763 * non-architectural PMUs. Of course, PMUv3 is the only game in town for
1764 * PMU virtualization, so the IMP_DEF value was rather user-hostile.
1765 *
1766 * At minimum, we're on the hook to allow values that were given to
1767 * userspace by KVM. Cover our tracks here and replace the IMP_DEF value
1768 * with a more sensible NI. The value of an ID register changing under
1769 * the nose of the guest is unfortunate, but is certainly no more
1770 * surprising than an ill-guided PMU driver poking at impdef system
1771 * registers that end in an UNDEF...
1772 */
1773 if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
1774 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
1775
1776 /*
1777 * ID_AA64DFR0_EL1.DebugVer is one of those awkward fields with a
1778 * nonzero minimum safe value.
1779 */
1780 if (debugver < ID_AA64DFR0_EL1_DebugVer_IMP)
1781 return -EINVAL;
1782
1783 return set_id_reg(vcpu, rd, val);
1784 }
1785
read_sanitised_id_dfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1786 static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
1787 const struct sys_reg_desc *rd)
1788 {
1789 u8 perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
1790 u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
1791
1792 val &= ~ID_DFR0_EL1_PerfMon_MASK;
1793 if (kvm_vcpu_has_pmu(vcpu))
1794 val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
1795
1796 val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8);
1797
1798 return val;
1799 }
1800
set_id_dfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)1801 static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
1802 const struct sys_reg_desc *rd,
1803 u64 val)
1804 {
1805 u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val);
1806 u8 copdbg = SYS_FIELD_GET(ID_DFR0_EL1, CopDbg, val);
1807
1808 if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) {
1809 val &= ~ID_DFR0_EL1_PerfMon_MASK;
1810 perfmon = 0;
1811 }
1812
1813 /*
1814 * Allow DFR0_EL1.PerfMon to be set from userspace as long as
1815 * it doesn't promise more than what the HW gives us on the
1816 * AArch64 side (as everything is emulated with that), and
1817 * that this is a PMUv3.
1818 */
1819 if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3)
1820 return -EINVAL;
1821
1822 if (copdbg < ID_DFR0_EL1_CopDbg_Armv8)
1823 return -EINVAL;
1824
1825 return set_id_reg(vcpu, rd, val);
1826 }
1827
1828 /*
1829 * cpufeature ID register user accessors
1830 *
1831 * For now, these registers are immutable for userspace, so no values
1832 * are stored, and for set_id_reg() we don't allow the effective value
1833 * to be changed.
1834 */
get_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)1835 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1836 u64 *val)
1837 {
1838 /*
1839 * Avoid locking if the VM has already started, as the ID registers are
1840 * guaranteed to be invariant at that point.
1841 */
1842 if (kvm_vm_has_ran_once(vcpu->kvm)) {
1843 *val = read_id_reg(vcpu, rd);
1844 return 0;
1845 }
1846
1847 mutex_lock(&vcpu->kvm->arch.config_lock);
1848 *val = read_id_reg(vcpu, rd);
1849 mutex_unlock(&vcpu->kvm->arch.config_lock);
1850
1851 return 0;
1852 }
1853
set_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)1854 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1855 u64 val)
1856 {
1857 u32 id = reg_to_encoding(rd);
1858 int ret;
1859
1860 mutex_lock(&vcpu->kvm->arch.config_lock);
1861
1862 /*
1863 * Once the VM has started the ID registers are immutable. Reject any
1864 * write that does not match the final register value.
1865 */
1866 if (kvm_vm_has_ran_once(vcpu->kvm)) {
1867 if (val != read_id_reg(vcpu, rd))
1868 ret = -EBUSY;
1869 else
1870 ret = 0;
1871
1872 mutex_unlock(&vcpu->kvm->arch.config_lock);
1873 return ret;
1874 }
1875
1876 ret = arm64_check_features(vcpu, rd, val);
1877 if (!ret)
1878 kvm_set_vm_id_reg(vcpu->kvm, id, val);
1879
1880 mutex_unlock(&vcpu->kvm->arch.config_lock);
1881
1882 /*
1883 * arm64_check_features() returns -E2BIG to indicate the register's
1884 * feature set is a superset of the maximally-allowed register value.
1885 * While it would be nice to precisely describe this to userspace, the
1886 * existing UAPI for KVM_SET_ONE_REG has it that invalid register
1887 * writes return -EINVAL.
1888 */
1889 if (ret == -E2BIG)
1890 ret = -EINVAL;
1891 return ret;
1892 }
1893
kvm_set_vm_id_reg(struct kvm * kvm,u32 reg,u64 val)1894 void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val)
1895 {
1896 u64 *p = __vm_id_reg(&kvm->arch, reg);
1897
1898 lockdep_assert_held(&kvm->arch.config_lock);
1899
1900 if (KVM_BUG_ON(kvm_vm_has_ran_once(kvm) || !p, kvm))
1901 return;
1902
1903 *p = val;
1904 }
1905
get_raz_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)1906 static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1907 u64 *val)
1908 {
1909 *val = 0;
1910 return 0;
1911 }
1912
set_wi_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)1913 static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1914 u64 val)
1915 {
1916 return 0;
1917 }
1918
access_ctr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1919 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1920 const struct sys_reg_desc *r)
1921 {
1922 if (p->is_write)
1923 return write_to_read_only(vcpu, p, r);
1924
1925 p->regval = kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0);
1926 return true;
1927 }
1928
access_clidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1929 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1930 const struct sys_reg_desc *r)
1931 {
1932 if (p->is_write)
1933 return write_to_read_only(vcpu, p, r);
1934
1935 p->regval = __vcpu_sys_reg(vcpu, r->reg);
1936 return true;
1937 }
1938
1939 /*
1940 * Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
1941 * by the physical CPU which the vcpu currently resides in.
1942 */
reset_clidr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1943 static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1944 {
1945 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
1946 u64 clidr;
1947 u8 loc;
1948
1949 if ((ctr_el0 & CTR_EL0_IDC)) {
1950 /*
1951 * Data cache clean to the PoU is not required so LoUU and LoUIS
1952 * will not be set and a unified cache, which will be marked as
1953 * LoC, will be added.
1954 *
1955 * If not DIC, let the unified cache L2 so that an instruction
1956 * cache can be added as L1 later.
1957 */
1958 loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2;
1959 clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc);
1960 } else {
1961 /*
1962 * Data cache clean to the PoU is required so let L1 have a data
1963 * cache and mark it as LoUU and LoUIS. As L1 has a data cache,
1964 * it can be marked as LoC too.
1965 */
1966 loc = 1;
1967 clidr = 1 << CLIDR_LOUU_SHIFT;
1968 clidr |= 1 << CLIDR_LOUIS_SHIFT;
1969 clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1);
1970 }
1971
1972 /*
1973 * Instruction cache invalidation to the PoU is required so let L1 have
1974 * an instruction cache. If L1 already has a data cache, it will be
1975 * CACHE_TYPE_SEPARATE.
1976 */
1977 if (!(ctr_el0 & CTR_EL0_DIC))
1978 clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1);
1979
1980 clidr |= loc << CLIDR_LOC_SHIFT;
1981
1982 /*
1983 * Add tag cache unified to data cache. Allocation tags and data are
1984 * unified in a cache line so that it looks valid even if there is only
1985 * one cache line.
1986 */
1987 if (kvm_has_mte(vcpu->kvm))
1988 clidr |= 2 << CLIDR_TTYPE_SHIFT(loc);
1989
1990 __vcpu_sys_reg(vcpu, r->reg) = clidr;
1991
1992 return __vcpu_sys_reg(vcpu, r->reg);
1993 }
1994
set_clidr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)1995 static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1996 u64 val)
1997 {
1998 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
1999 u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val));
2000
2001 if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
2002 return -EINVAL;
2003
2004 __vcpu_sys_reg(vcpu, rd->reg) = val;
2005
2006 return 0;
2007 }
2008
access_csselr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2009 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2010 const struct sys_reg_desc *r)
2011 {
2012 int reg = r->reg;
2013
2014 if (p->is_write)
2015 vcpu_write_sys_reg(vcpu, p->regval, reg);
2016 else
2017 p->regval = vcpu_read_sys_reg(vcpu, reg);
2018 return true;
2019 }
2020
access_ccsidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2021 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2022 const struct sys_reg_desc *r)
2023 {
2024 u32 csselr;
2025
2026 if (p->is_write)
2027 return write_to_read_only(vcpu, p, r);
2028
2029 csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
2030 csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD;
2031 if (csselr < CSSELR_MAX)
2032 p->regval = get_ccsidr(vcpu, csselr);
2033
2034 return true;
2035 }
2036
mte_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2037 static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
2038 const struct sys_reg_desc *rd)
2039 {
2040 if (kvm_has_mte(vcpu->kvm))
2041 return 0;
2042
2043 return REG_HIDDEN;
2044 }
2045
2046 #define MTE_REG(name) { \
2047 SYS_DESC(SYS_##name), \
2048 .access = undef_access, \
2049 .reset = reset_unknown, \
2050 .reg = name, \
2051 .visibility = mte_visibility, \
2052 }
2053
el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2054 static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
2055 const struct sys_reg_desc *rd)
2056 {
2057 if (vcpu_has_nv(vcpu))
2058 return 0;
2059
2060 return REG_HIDDEN;
2061 }
2062
bad_vncr_trap(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2063 static bool bad_vncr_trap(struct kvm_vcpu *vcpu,
2064 struct sys_reg_params *p,
2065 const struct sys_reg_desc *r)
2066 {
2067 /*
2068 * We really shouldn't be here, and this is likely the result
2069 * of a misconfigured trap, as this register should target the
2070 * VNCR page, and nothing else.
2071 */
2072 return bad_trap(vcpu, p, r,
2073 "trap of VNCR-backed register");
2074 }
2075
bad_redir_trap(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2076 static bool bad_redir_trap(struct kvm_vcpu *vcpu,
2077 struct sys_reg_params *p,
2078 const struct sys_reg_desc *r)
2079 {
2080 /*
2081 * We really shouldn't be here, and this is likely the result
2082 * of a misconfigured trap, as this register should target the
2083 * corresponding EL1, and nothing else.
2084 */
2085 return bad_trap(vcpu, p, r,
2086 "trap of EL2 register redirected to EL1");
2087 }
2088
2089 #define EL2_REG(name, acc, rst, v) { \
2090 SYS_DESC(SYS_##name), \
2091 .access = acc, \
2092 .reset = rst, \
2093 .reg = name, \
2094 .visibility = el2_visibility, \
2095 .val = v, \
2096 }
2097
2098 #define EL2_REG_VNCR(name, rst, v) EL2_REG(name, bad_vncr_trap, rst, v)
2099 #define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v)
2100
2101 /*
2102 * Since reset() callback and field val are not used for idregs, they will be
2103 * used for specific purposes for idregs.
2104 * The reset() would return KVM sanitised register value. The value would be the
2105 * same as the host kernel sanitised value if there is no KVM sanitisation.
2106 * The val would be used as a mask indicating writable fields for the idreg.
2107 * Only bits with 1 are writable from userspace. This mask might not be
2108 * necessary in the future whenever all ID registers are enabled as writable
2109 * from userspace.
2110 */
2111
2112 #define ID_DESC(name) \
2113 SYS_DESC(SYS_##name), \
2114 .access = access_id_reg, \
2115 .get_user = get_id_reg \
2116
2117 /* sys_reg_desc initialiser for known cpufeature ID registers */
2118 #define ID_SANITISED(name) { \
2119 ID_DESC(name), \
2120 .set_user = set_id_reg, \
2121 .visibility = id_visibility, \
2122 .reset = kvm_read_sanitised_id_reg, \
2123 .val = 0, \
2124 }
2125
2126 /* sys_reg_desc initialiser for known cpufeature ID registers */
2127 #define AA32_ID_SANITISED(name) { \
2128 ID_DESC(name), \
2129 .set_user = set_id_reg, \
2130 .visibility = aa32_id_visibility, \
2131 .reset = kvm_read_sanitised_id_reg, \
2132 .val = 0, \
2133 }
2134
2135 /* sys_reg_desc initialiser for writable ID registers */
2136 #define ID_WRITABLE(name, mask) { \
2137 ID_DESC(name), \
2138 .set_user = set_id_reg, \
2139 .visibility = id_visibility, \
2140 .reset = kvm_read_sanitised_id_reg, \
2141 .val = mask, \
2142 }
2143
2144 /*
2145 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
2146 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
2147 * (1 <= crm < 8, 0 <= Op2 < 8).
2148 */
2149 #define ID_UNALLOCATED(crm, op2) { \
2150 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
2151 .access = access_id_reg, \
2152 .get_user = get_id_reg, \
2153 .set_user = set_id_reg, \
2154 .visibility = raz_visibility, \
2155 .reset = kvm_read_sanitised_id_reg, \
2156 .val = 0, \
2157 }
2158
2159 /*
2160 * sys_reg_desc initialiser for known ID registers that we hide from guests.
2161 * For now, these are exposed just like unallocated ID regs: they appear
2162 * RAZ for the guest.
2163 */
2164 #define ID_HIDDEN(name) { \
2165 ID_DESC(name), \
2166 .set_user = set_id_reg, \
2167 .visibility = raz_visibility, \
2168 .reset = kvm_read_sanitised_id_reg, \
2169 .val = 0, \
2170 }
2171
access_sp_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2172 static bool access_sp_el1(struct kvm_vcpu *vcpu,
2173 struct sys_reg_params *p,
2174 const struct sys_reg_desc *r)
2175 {
2176 if (p->is_write)
2177 __vcpu_sys_reg(vcpu, SP_EL1) = p->regval;
2178 else
2179 p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
2180
2181 return true;
2182 }
2183
access_elr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2184 static bool access_elr(struct kvm_vcpu *vcpu,
2185 struct sys_reg_params *p,
2186 const struct sys_reg_desc *r)
2187 {
2188 if (p->is_write)
2189 vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1);
2190 else
2191 p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1);
2192
2193 return true;
2194 }
2195
access_spsr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2196 static bool access_spsr(struct kvm_vcpu *vcpu,
2197 struct sys_reg_params *p,
2198 const struct sys_reg_desc *r)
2199 {
2200 if (p->is_write)
2201 __vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval;
2202 else
2203 p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
2204
2205 return true;
2206 }
2207
access_cntkctl_el12(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2208 static bool access_cntkctl_el12(struct kvm_vcpu *vcpu,
2209 struct sys_reg_params *p,
2210 const struct sys_reg_desc *r)
2211 {
2212 if (p->is_write)
2213 __vcpu_sys_reg(vcpu, CNTKCTL_EL1) = p->regval;
2214 else
2215 p->regval = __vcpu_sys_reg(vcpu, CNTKCTL_EL1);
2216
2217 return true;
2218 }
2219
reset_hcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)2220 static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2221 {
2222 u64 val = r->val;
2223
2224 if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
2225 val |= HCR_E2H;
2226
2227 return __vcpu_sys_reg(vcpu, r->reg) = val;
2228 }
2229
sve_el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2230 static unsigned int sve_el2_visibility(const struct kvm_vcpu *vcpu,
2231 const struct sys_reg_desc *rd)
2232 {
2233 unsigned int r;
2234
2235 r = el2_visibility(vcpu, rd);
2236 if (r)
2237 return r;
2238
2239 return sve_visibility(vcpu, rd);
2240 }
2241
access_zcr_el2(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2242 static bool access_zcr_el2(struct kvm_vcpu *vcpu,
2243 struct sys_reg_params *p,
2244 const struct sys_reg_desc *r)
2245 {
2246 unsigned int vq;
2247
2248 if (guest_hyp_sve_traps_enabled(vcpu)) {
2249 kvm_inject_nested_sve_trap(vcpu);
2250 return true;
2251 }
2252
2253 if (!p->is_write) {
2254 p->regval = vcpu_read_sys_reg(vcpu, ZCR_EL2);
2255 return true;
2256 }
2257
2258 vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1;
2259 vq = min(vq, vcpu_sve_max_vq(vcpu));
2260 vcpu_write_sys_reg(vcpu, vq - 1, ZCR_EL2);
2261 return true;
2262 }
2263
s1poe_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2264 static unsigned int s1poe_visibility(const struct kvm_vcpu *vcpu,
2265 const struct sys_reg_desc *rd)
2266 {
2267 if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S1POE, IMP))
2268 return 0;
2269
2270 return REG_HIDDEN;
2271 }
2272
2273 /*
2274 * Architected system registers.
2275 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
2276 *
2277 * Debug handling: We do trap most, if not all debug related system
2278 * registers. The implementation is good enough to ensure that a guest
2279 * can use these with minimal performance degradation. The drawback is
2280 * that we don't implement any of the external debug architecture.
2281 * This should be revisited if we ever encounter a more demanding
2282 * guest...
2283 */
2284 static const struct sys_reg_desc sys_reg_descs[] = {
2285 DBG_BCR_BVR_WCR_WVR_EL1(0),
2286 DBG_BCR_BVR_WCR_WVR_EL1(1),
2287 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
2288 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
2289 DBG_BCR_BVR_WCR_WVR_EL1(2),
2290 DBG_BCR_BVR_WCR_WVR_EL1(3),
2291 DBG_BCR_BVR_WCR_WVR_EL1(4),
2292 DBG_BCR_BVR_WCR_WVR_EL1(5),
2293 DBG_BCR_BVR_WCR_WVR_EL1(6),
2294 DBG_BCR_BVR_WCR_WVR_EL1(7),
2295 DBG_BCR_BVR_WCR_WVR_EL1(8),
2296 DBG_BCR_BVR_WCR_WVR_EL1(9),
2297 DBG_BCR_BVR_WCR_WVR_EL1(10),
2298 DBG_BCR_BVR_WCR_WVR_EL1(11),
2299 DBG_BCR_BVR_WCR_WVR_EL1(12),
2300 DBG_BCR_BVR_WCR_WVR_EL1(13),
2301 DBG_BCR_BVR_WCR_WVR_EL1(14),
2302 DBG_BCR_BVR_WCR_WVR_EL1(15),
2303
2304 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
2305 { SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
2306 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
2307 OSLSR_EL1_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
2308 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
2309 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
2310 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
2311 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
2312 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
2313
2314 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
2315 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
2316 // DBGDTR[TR]X_EL0 share the same encoding
2317 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
2318
2319 { SYS_DESC(SYS_DBGVCR32_EL2), undef_access, reset_val, DBGVCR32_EL2, 0 },
2320
2321 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
2322
2323 /*
2324 * ID regs: all ID_SANITISED() entries here must have corresponding
2325 * entries in arm64_ftr_regs[].
2326 */
2327
2328 /* AArch64 mappings of the AArch32 ID registers */
2329 /* CRm=1 */
2330 AA32_ID_SANITISED(ID_PFR0_EL1),
2331 AA32_ID_SANITISED(ID_PFR1_EL1),
2332 { SYS_DESC(SYS_ID_DFR0_EL1),
2333 .access = access_id_reg,
2334 .get_user = get_id_reg,
2335 .set_user = set_id_dfr0_el1,
2336 .visibility = aa32_id_visibility,
2337 .reset = read_sanitised_id_dfr0_el1,
2338 .val = ID_DFR0_EL1_PerfMon_MASK |
2339 ID_DFR0_EL1_CopDbg_MASK, },
2340 ID_HIDDEN(ID_AFR0_EL1),
2341 AA32_ID_SANITISED(ID_MMFR0_EL1),
2342 AA32_ID_SANITISED(ID_MMFR1_EL1),
2343 AA32_ID_SANITISED(ID_MMFR2_EL1),
2344 AA32_ID_SANITISED(ID_MMFR3_EL1),
2345
2346 /* CRm=2 */
2347 AA32_ID_SANITISED(ID_ISAR0_EL1),
2348 AA32_ID_SANITISED(ID_ISAR1_EL1),
2349 AA32_ID_SANITISED(ID_ISAR2_EL1),
2350 AA32_ID_SANITISED(ID_ISAR3_EL1),
2351 AA32_ID_SANITISED(ID_ISAR4_EL1),
2352 AA32_ID_SANITISED(ID_ISAR5_EL1),
2353 AA32_ID_SANITISED(ID_MMFR4_EL1),
2354 AA32_ID_SANITISED(ID_ISAR6_EL1),
2355
2356 /* CRm=3 */
2357 AA32_ID_SANITISED(MVFR0_EL1),
2358 AA32_ID_SANITISED(MVFR1_EL1),
2359 AA32_ID_SANITISED(MVFR2_EL1),
2360 ID_UNALLOCATED(3,3),
2361 AA32_ID_SANITISED(ID_PFR2_EL1),
2362 ID_HIDDEN(ID_DFR1_EL1),
2363 AA32_ID_SANITISED(ID_MMFR5_EL1),
2364 ID_UNALLOCATED(3,7),
2365
2366 /* AArch64 ID registers */
2367 /* CRm=4 */
2368 { SYS_DESC(SYS_ID_AA64PFR0_EL1),
2369 .access = access_id_reg,
2370 .get_user = get_id_reg,
2371 .set_user = set_id_reg,
2372 .reset = read_sanitised_id_aa64pfr0_el1,
2373 .val = ~(ID_AA64PFR0_EL1_AMU |
2374 ID_AA64PFR0_EL1_MPAM |
2375 ID_AA64PFR0_EL1_SVE |
2376 ID_AA64PFR0_EL1_RAS |
2377 ID_AA64PFR0_EL1_AdvSIMD |
2378 ID_AA64PFR0_EL1_FP), },
2379 ID_SANITISED(ID_AA64PFR1_EL1),
2380 ID_WRITABLE(ID_AA64PFR2_EL1, ID_AA64PFR2_EL1_FPMR),
2381 ID_UNALLOCATED(4,3),
2382 ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
2383 ID_HIDDEN(ID_AA64SMFR0_EL1),
2384 ID_UNALLOCATED(4,6),
2385 ID_WRITABLE(ID_AA64FPFR0_EL1, ~ID_AA64FPFR0_EL1_RES0),
2386
2387 /* CRm=5 */
2388 { SYS_DESC(SYS_ID_AA64DFR0_EL1),
2389 .access = access_id_reg,
2390 .get_user = get_id_reg,
2391 .set_user = set_id_aa64dfr0_el1,
2392 .reset = read_sanitised_id_aa64dfr0_el1,
2393 .val = ID_AA64DFR0_EL1_PMUVer_MASK |
2394 ID_AA64DFR0_EL1_DebugVer_MASK, },
2395 ID_SANITISED(ID_AA64DFR1_EL1),
2396 ID_UNALLOCATED(5,2),
2397 ID_UNALLOCATED(5,3),
2398 ID_HIDDEN(ID_AA64AFR0_EL1),
2399 ID_HIDDEN(ID_AA64AFR1_EL1),
2400 ID_UNALLOCATED(5,6),
2401 ID_UNALLOCATED(5,7),
2402
2403 /* CRm=6 */
2404 ID_WRITABLE(ID_AA64ISAR0_EL1, ~ID_AA64ISAR0_EL1_RES0),
2405 ID_WRITABLE(ID_AA64ISAR1_EL1, ~(ID_AA64ISAR1_EL1_GPI |
2406 ID_AA64ISAR1_EL1_GPA |
2407 ID_AA64ISAR1_EL1_API |
2408 ID_AA64ISAR1_EL1_APA)),
2409 ID_WRITABLE(ID_AA64ISAR2_EL1, ~(ID_AA64ISAR2_EL1_RES0 |
2410 ID_AA64ISAR2_EL1_APA3 |
2411 ID_AA64ISAR2_EL1_GPA3)),
2412 ID_UNALLOCATED(6,3),
2413 ID_UNALLOCATED(6,4),
2414 ID_UNALLOCATED(6,5),
2415 ID_UNALLOCATED(6,6),
2416 ID_UNALLOCATED(6,7),
2417
2418 /* CRm=7 */
2419 ID_WRITABLE(ID_AA64MMFR0_EL1, ~(ID_AA64MMFR0_EL1_RES0 |
2420 ID_AA64MMFR0_EL1_TGRAN4_2 |
2421 ID_AA64MMFR0_EL1_TGRAN64_2 |
2422 ID_AA64MMFR0_EL1_TGRAN16_2)),
2423 ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
2424 ID_AA64MMFR1_EL1_HCX |
2425 ID_AA64MMFR1_EL1_TWED |
2426 ID_AA64MMFR1_EL1_XNX |
2427 ID_AA64MMFR1_EL1_VH |
2428 ID_AA64MMFR1_EL1_VMIDBits)),
2429 ID_WRITABLE(ID_AA64MMFR2_EL1, ~(ID_AA64MMFR2_EL1_RES0 |
2430 ID_AA64MMFR2_EL1_EVT |
2431 ID_AA64MMFR2_EL1_FWB |
2432 ID_AA64MMFR2_EL1_IDS |
2433 ID_AA64MMFR2_EL1_NV |
2434 ID_AA64MMFR2_EL1_CCIDX)),
2435 ID_WRITABLE(ID_AA64MMFR3_EL1, (ID_AA64MMFR3_EL1_TCRX |
2436 ID_AA64MMFR3_EL1_S1POE)),
2437 ID_SANITISED(ID_AA64MMFR4_EL1),
2438 ID_UNALLOCATED(7,5),
2439 ID_UNALLOCATED(7,6),
2440 ID_UNALLOCATED(7,7),
2441
2442 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
2443 { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
2444 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
2445
2446 MTE_REG(RGSR_EL1),
2447 MTE_REG(GCR_EL1),
2448
2449 { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
2450 { SYS_DESC(SYS_TRFCR_EL1), undef_access },
2451 { SYS_DESC(SYS_SMPRI_EL1), undef_access },
2452 { SYS_DESC(SYS_SMCR_EL1), undef_access },
2453 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
2454 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
2455 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
2456 { SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0 },
2457
2458 PTRAUTH_KEY(APIA),
2459 PTRAUTH_KEY(APIB),
2460 PTRAUTH_KEY(APDA),
2461 PTRAUTH_KEY(APDB),
2462 PTRAUTH_KEY(APGA),
2463
2464 { SYS_DESC(SYS_SPSR_EL1), access_spsr},
2465 { SYS_DESC(SYS_ELR_EL1), access_elr},
2466
2467 { SYS_DESC(SYS_ICC_PMR_EL1), undef_access },
2468
2469 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
2470 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
2471 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
2472
2473 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
2474 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
2475 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
2476 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
2477 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
2478 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
2479 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
2480 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
2481
2482 MTE_REG(TFSR_EL1),
2483 MTE_REG(TFSRE0_EL1),
2484
2485 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
2486 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
2487
2488 { SYS_DESC(SYS_PMSCR_EL1), undef_access },
2489 { SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
2490 { SYS_DESC(SYS_PMSICR_EL1), undef_access },
2491 { SYS_DESC(SYS_PMSIRR_EL1), undef_access },
2492 { SYS_DESC(SYS_PMSFCR_EL1), undef_access },
2493 { SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
2494 { SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
2495 { SYS_DESC(SYS_PMSIDR_EL1), undef_access },
2496 { SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
2497 { SYS_DESC(SYS_PMBPTR_EL1), undef_access },
2498 { SYS_DESC(SYS_PMBSR_EL1), undef_access },
2499 /* PMBIDR_EL1 is not trapped */
2500
2501 { PMU_SYS_REG(PMINTENSET_EL1),
2502 .access = access_pminten, .reg = PMINTENSET_EL1,
2503 .get_user = get_pmreg, .set_user = set_pmreg },
2504 { PMU_SYS_REG(PMINTENCLR_EL1),
2505 .access = access_pminten, .reg = PMINTENSET_EL1,
2506 .get_user = get_pmreg, .set_user = set_pmreg },
2507 { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
2508
2509 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
2510 { SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1 },
2511 { SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1 },
2512 { SYS_DESC(SYS_POR_EL1), NULL, reset_unknown, POR_EL1,
2513 .visibility = s1poe_visibility },
2514 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
2515
2516 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
2517 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
2518 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
2519 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
2520 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
2521
2522 { SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
2523 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
2524
2525 { SYS_DESC(SYS_ICC_IAR0_EL1), undef_access },
2526 { SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access },
2527 { SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access },
2528 { SYS_DESC(SYS_ICC_BPR0_EL1), undef_access },
2529 { SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access },
2530 { SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access },
2531 { SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access },
2532 { SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access },
2533 { SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access },
2534 { SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access },
2535 { SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access },
2536 { SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
2537 { SYS_DESC(SYS_ICC_DIR_EL1), undef_access },
2538 { SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
2539 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
2540 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
2541 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
2542 { SYS_DESC(SYS_ICC_IAR1_EL1), undef_access },
2543 { SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access },
2544 { SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access },
2545 { SYS_DESC(SYS_ICC_BPR1_EL1), undef_access },
2546 { SYS_DESC(SYS_ICC_CTLR_EL1), undef_access },
2547 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
2548 { SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access },
2549 { SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access },
2550
2551 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
2552 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
2553
2554 { SYS_DESC(SYS_ACCDATA_EL1), undef_access },
2555
2556 { SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
2557
2558 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
2559
2560 { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
2561 { SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1,
2562 .set_user = set_clidr, .val = ~CLIDR_EL1_RES0 },
2563 { SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
2564 { SYS_DESC(SYS_SMIDR_EL1), undef_access },
2565 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
2566 ID_WRITABLE(CTR_EL0, CTR_EL0_DIC_MASK |
2567 CTR_EL0_IDC_MASK |
2568 CTR_EL0_DminLine_MASK |
2569 CTR_EL0_IminLine_MASK),
2570 { SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = sme_visibility },
2571 { SYS_DESC(SYS_FPMR), undef_access, reset_val, FPMR, 0, .visibility = fp8_visibility },
2572
2573 { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr,
2574 .reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr },
2575 { PMU_SYS_REG(PMCNTENSET_EL0),
2576 .access = access_pmcnten, .reg = PMCNTENSET_EL0,
2577 .get_user = get_pmreg, .set_user = set_pmreg },
2578 { PMU_SYS_REG(PMCNTENCLR_EL0),
2579 .access = access_pmcnten, .reg = PMCNTENSET_EL0,
2580 .get_user = get_pmreg, .set_user = set_pmreg },
2581 { PMU_SYS_REG(PMOVSCLR_EL0),
2582 .access = access_pmovs, .reg = PMOVSSET_EL0,
2583 .get_user = get_pmreg, .set_user = set_pmreg },
2584 /*
2585 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
2586 * previously (and pointlessly) advertised in the past...
2587 */
2588 { PMU_SYS_REG(PMSWINC_EL0),
2589 .get_user = get_raz_reg, .set_user = set_wi_reg,
2590 .access = access_pmswinc, .reset = NULL },
2591 { PMU_SYS_REG(PMSELR_EL0),
2592 .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
2593 { PMU_SYS_REG(PMCEID0_EL0),
2594 .access = access_pmceid, .reset = NULL },
2595 { PMU_SYS_REG(PMCEID1_EL0),
2596 .access = access_pmceid, .reset = NULL },
2597 { PMU_SYS_REG(PMCCNTR_EL0),
2598 .access = access_pmu_evcntr, .reset = reset_unknown,
2599 .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
2600 { PMU_SYS_REG(PMXEVTYPER_EL0),
2601 .access = access_pmu_evtyper, .reset = NULL },
2602 { PMU_SYS_REG(PMXEVCNTR_EL0),
2603 .access = access_pmu_evcntr, .reset = NULL },
2604 /*
2605 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
2606 * in 32bit mode. Here we choose to reset it as zero for consistency.
2607 */
2608 { PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr,
2609 .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
2610 { PMU_SYS_REG(PMOVSSET_EL0),
2611 .access = access_pmovs, .reg = PMOVSSET_EL0,
2612 .get_user = get_pmreg, .set_user = set_pmreg },
2613
2614 { SYS_DESC(SYS_POR_EL0), NULL, reset_unknown, POR_EL0,
2615 .visibility = s1poe_visibility },
2616 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
2617 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
2618 { SYS_DESC(SYS_TPIDR2_EL0), undef_access },
2619
2620 { SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
2621
2622 { SYS_DESC(SYS_AMCR_EL0), undef_access },
2623 { SYS_DESC(SYS_AMCFGR_EL0), undef_access },
2624 { SYS_DESC(SYS_AMCGCR_EL0), undef_access },
2625 { SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
2626 { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
2627 { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
2628 { SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
2629 { SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
2630 AMU_AMEVCNTR0_EL0(0),
2631 AMU_AMEVCNTR0_EL0(1),
2632 AMU_AMEVCNTR0_EL0(2),
2633 AMU_AMEVCNTR0_EL0(3),
2634 AMU_AMEVCNTR0_EL0(4),
2635 AMU_AMEVCNTR0_EL0(5),
2636 AMU_AMEVCNTR0_EL0(6),
2637 AMU_AMEVCNTR0_EL0(7),
2638 AMU_AMEVCNTR0_EL0(8),
2639 AMU_AMEVCNTR0_EL0(9),
2640 AMU_AMEVCNTR0_EL0(10),
2641 AMU_AMEVCNTR0_EL0(11),
2642 AMU_AMEVCNTR0_EL0(12),
2643 AMU_AMEVCNTR0_EL0(13),
2644 AMU_AMEVCNTR0_EL0(14),
2645 AMU_AMEVCNTR0_EL0(15),
2646 AMU_AMEVTYPER0_EL0(0),
2647 AMU_AMEVTYPER0_EL0(1),
2648 AMU_AMEVTYPER0_EL0(2),
2649 AMU_AMEVTYPER0_EL0(3),
2650 AMU_AMEVTYPER0_EL0(4),
2651 AMU_AMEVTYPER0_EL0(5),
2652 AMU_AMEVTYPER0_EL0(6),
2653 AMU_AMEVTYPER0_EL0(7),
2654 AMU_AMEVTYPER0_EL0(8),
2655 AMU_AMEVTYPER0_EL0(9),
2656 AMU_AMEVTYPER0_EL0(10),
2657 AMU_AMEVTYPER0_EL0(11),
2658 AMU_AMEVTYPER0_EL0(12),
2659 AMU_AMEVTYPER0_EL0(13),
2660 AMU_AMEVTYPER0_EL0(14),
2661 AMU_AMEVTYPER0_EL0(15),
2662 AMU_AMEVCNTR1_EL0(0),
2663 AMU_AMEVCNTR1_EL0(1),
2664 AMU_AMEVCNTR1_EL0(2),
2665 AMU_AMEVCNTR1_EL0(3),
2666 AMU_AMEVCNTR1_EL0(4),
2667 AMU_AMEVCNTR1_EL0(5),
2668 AMU_AMEVCNTR1_EL0(6),
2669 AMU_AMEVCNTR1_EL0(7),
2670 AMU_AMEVCNTR1_EL0(8),
2671 AMU_AMEVCNTR1_EL0(9),
2672 AMU_AMEVCNTR1_EL0(10),
2673 AMU_AMEVCNTR1_EL0(11),
2674 AMU_AMEVCNTR1_EL0(12),
2675 AMU_AMEVCNTR1_EL0(13),
2676 AMU_AMEVCNTR1_EL0(14),
2677 AMU_AMEVCNTR1_EL0(15),
2678 AMU_AMEVTYPER1_EL0(0),
2679 AMU_AMEVTYPER1_EL0(1),
2680 AMU_AMEVTYPER1_EL0(2),
2681 AMU_AMEVTYPER1_EL0(3),
2682 AMU_AMEVTYPER1_EL0(4),
2683 AMU_AMEVTYPER1_EL0(5),
2684 AMU_AMEVTYPER1_EL0(6),
2685 AMU_AMEVTYPER1_EL0(7),
2686 AMU_AMEVTYPER1_EL0(8),
2687 AMU_AMEVTYPER1_EL0(9),
2688 AMU_AMEVTYPER1_EL0(10),
2689 AMU_AMEVTYPER1_EL0(11),
2690 AMU_AMEVTYPER1_EL0(12),
2691 AMU_AMEVTYPER1_EL0(13),
2692 AMU_AMEVTYPER1_EL0(14),
2693 AMU_AMEVTYPER1_EL0(15),
2694
2695 { SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer },
2696 { SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
2697 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
2698 { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
2699 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
2700
2701 /* PMEVCNTRn_EL0 */
2702 PMU_PMEVCNTR_EL0(0),
2703 PMU_PMEVCNTR_EL0(1),
2704 PMU_PMEVCNTR_EL0(2),
2705 PMU_PMEVCNTR_EL0(3),
2706 PMU_PMEVCNTR_EL0(4),
2707 PMU_PMEVCNTR_EL0(5),
2708 PMU_PMEVCNTR_EL0(6),
2709 PMU_PMEVCNTR_EL0(7),
2710 PMU_PMEVCNTR_EL0(8),
2711 PMU_PMEVCNTR_EL0(9),
2712 PMU_PMEVCNTR_EL0(10),
2713 PMU_PMEVCNTR_EL0(11),
2714 PMU_PMEVCNTR_EL0(12),
2715 PMU_PMEVCNTR_EL0(13),
2716 PMU_PMEVCNTR_EL0(14),
2717 PMU_PMEVCNTR_EL0(15),
2718 PMU_PMEVCNTR_EL0(16),
2719 PMU_PMEVCNTR_EL0(17),
2720 PMU_PMEVCNTR_EL0(18),
2721 PMU_PMEVCNTR_EL0(19),
2722 PMU_PMEVCNTR_EL0(20),
2723 PMU_PMEVCNTR_EL0(21),
2724 PMU_PMEVCNTR_EL0(22),
2725 PMU_PMEVCNTR_EL0(23),
2726 PMU_PMEVCNTR_EL0(24),
2727 PMU_PMEVCNTR_EL0(25),
2728 PMU_PMEVCNTR_EL0(26),
2729 PMU_PMEVCNTR_EL0(27),
2730 PMU_PMEVCNTR_EL0(28),
2731 PMU_PMEVCNTR_EL0(29),
2732 PMU_PMEVCNTR_EL0(30),
2733 /* PMEVTYPERn_EL0 */
2734 PMU_PMEVTYPER_EL0(0),
2735 PMU_PMEVTYPER_EL0(1),
2736 PMU_PMEVTYPER_EL0(2),
2737 PMU_PMEVTYPER_EL0(3),
2738 PMU_PMEVTYPER_EL0(4),
2739 PMU_PMEVTYPER_EL0(5),
2740 PMU_PMEVTYPER_EL0(6),
2741 PMU_PMEVTYPER_EL0(7),
2742 PMU_PMEVTYPER_EL0(8),
2743 PMU_PMEVTYPER_EL0(9),
2744 PMU_PMEVTYPER_EL0(10),
2745 PMU_PMEVTYPER_EL0(11),
2746 PMU_PMEVTYPER_EL0(12),
2747 PMU_PMEVTYPER_EL0(13),
2748 PMU_PMEVTYPER_EL0(14),
2749 PMU_PMEVTYPER_EL0(15),
2750 PMU_PMEVTYPER_EL0(16),
2751 PMU_PMEVTYPER_EL0(17),
2752 PMU_PMEVTYPER_EL0(18),
2753 PMU_PMEVTYPER_EL0(19),
2754 PMU_PMEVTYPER_EL0(20),
2755 PMU_PMEVTYPER_EL0(21),
2756 PMU_PMEVTYPER_EL0(22),
2757 PMU_PMEVTYPER_EL0(23),
2758 PMU_PMEVTYPER_EL0(24),
2759 PMU_PMEVTYPER_EL0(25),
2760 PMU_PMEVTYPER_EL0(26),
2761 PMU_PMEVTYPER_EL0(27),
2762 PMU_PMEVTYPER_EL0(28),
2763 PMU_PMEVTYPER_EL0(29),
2764 PMU_PMEVTYPER_EL0(30),
2765 /*
2766 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
2767 * in 32bit mode. Here we choose to reset it as zero for consistency.
2768 */
2769 { PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper,
2770 .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
2771
2772 EL2_REG_VNCR(VPIDR_EL2, reset_unknown, 0),
2773 EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0),
2774 EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
2775 EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
2776 EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
2777 EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
2778 EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
2779 EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
2780 EL2_REG_VNCR(HFGRTR_EL2, reset_val, 0),
2781 EL2_REG_VNCR(HFGWTR_EL2, reset_val, 0),
2782 EL2_REG_VNCR(HFGITR_EL2, reset_val, 0),
2783 EL2_REG_VNCR(HACR_EL2, reset_val, 0),
2784
2785 { SYS_DESC(SYS_ZCR_EL2), .access = access_zcr_el2, .reset = reset_val,
2786 .visibility = sve_el2_visibility, .reg = ZCR_EL2 },
2787
2788 EL2_REG_VNCR(HCRX_EL2, reset_val, 0),
2789
2790 EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
2791 EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
2792 EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
2793 EL2_REG_VNCR(VTTBR_EL2, reset_val, 0),
2794 EL2_REG_VNCR(VTCR_EL2, reset_val, 0),
2795
2796 { SYS_DESC(SYS_DACR32_EL2), undef_access, reset_unknown, DACR32_EL2 },
2797 EL2_REG_VNCR(HDFGRTR_EL2, reset_val, 0),
2798 EL2_REG_VNCR(HDFGWTR_EL2, reset_val, 0),
2799 EL2_REG_VNCR(HAFGRTR_EL2, reset_val, 0),
2800 EL2_REG_REDIR(SPSR_EL2, reset_val, 0),
2801 EL2_REG_REDIR(ELR_EL2, reset_val, 0),
2802 { SYS_DESC(SYS_SP_EL1), access_sp_el1},
2803
2804 /* AArch32 SPSR_* are RES0 if trapped from a NV guest */
2805 { SYS_DESC(SYS_SPSR_irq), .access = trap_raz_wi },
2806 { SYS_DESC(SYS_SPSR_abt), .access = trap_raz_wi },
2807 { SYS_DESC(SYS_SPSR_und), .access = trap_raz_wi },
2808 { SYS_DESC(SYS_SPSR_fiq), .access = trap_raz_wi },
2809
2810 { SYS_DESC(SYS_IFSR32_EL2), undef_access, reset_unknown, IFSR32_EL2 },
2811 EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
2812 EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
2813 EL2_REG_REDIR(ESR_EL2, reset_val, 0),
2814 { SYS_DESC(SYS_FPEXC32_EL2), undef_access, reset_val, FPEXC32_EL2, 0x700 },
2815
2816 EL2_REG_REDIR(FAR_EL2, reset_val, 0),
2817 EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
2818
2819 EL2_REG(MAIR_EL2, access_rw, reset_val, 0),
2820 EL2_REG(AMAIR_EL2, access_rw, reset_val, 0),
2821
2822 EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
2823 EL2_REG(RVBAR_EL2, access_rw, reset_val, 0),
2824 { SYS_DESC(SYS_RMR_EL2), undef_access },
2825
2826 EL2_REG_VNCR(ICH_HCR_EL2, reset_val, 0),
2827
2828 EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
2829 EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
2830
2831 EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0),
2832 EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
2833
2834 { SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 },
2835
2836 EL2_REG(SP_EL2, NULL, reset_unknown, 0),
2837 };
2838
handle_at_s1e01(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2839 static bool handle_at_s1e01(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2840 const struct sys_reg_desc *r)
2841 {
2842 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
2843
2844 __kvm_at_s1e01(vcpu, op, p->regval);
2845
2846 return true;
2847 }
2848
handle_at_s1e2(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2849 static bool handle_at_s1e2(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2850 const struct sys_reg_desc *r)
2851 {
2852 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
2853
2854 /* There is no FGT associated with AT S1E2A :-( */
2855 if (op == OP_AT_S1E2A &&
2856 !kvm_has_feat(vcpu->kvm, ID_AA64ISAR2_EL1, ATS1A, IMP)) {
2857 kvm_inject_undefined(vcpu);
2858 return false;
2859 }
2860
2861 __kvm_at_s1e2(vcpu, op, p->regval);
2862
2863 return true;
2864 }
2865
handle_at_s12(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2866 static bool handle_at_s12(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2867 const struct sys_reg_desc *r)
2868 {
2869 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
2870
2871 __kvm_at_s12(vcpu, op, p->regval);
2872
2873 return true;
2874 }
2875
kvm_supported_tlbi_s12_op(struct kvm_vcpu * vpcu,u32 instr)2876 static bool kvm_supported_tlbi_s12_op(struct kvm_vcpu *vpcu, u32 instr)
2877 {
2878 struct kvm *kvm = vpcu->kvm;
2879 u8 CRm = sys_reg_CRm(instr);
2880
2881 if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
2882 !kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
2883 return false;
2884
2885 if (CRm == TLBI_CRm_nROS &&
2886 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
2887 return false;
2888
2889 return true;
2890 }
2891
handle_alle1is(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2892 static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2893 const struct sys_reg_desc *r)
2894 {
2895 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
2896
2897 if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding))
2898 return undef_access(vcpu, p, r);
2899
2900 write_lock(&vcpu->kvm->mmu_lock);
2901
2902 /*
2903 * Drop all shadow S2s, resulting in S1/S2 TLBIs for each of the
2904 * corresponding VMIDs.
2905 */
2906 kvm_nested_s2_unmap(vcpu->kvm);
2907
2908 write_unlock(&vcpu->kvm->mmu_lock);
2909
2910 return true;
2911 }
2912
kvm_supported_tlbi_ipas2_op(struct kvm_vcpu * vpcu,u32 instr)2913 static bool kvm_supported_tlbi_ipas2_op(struct kvm_vcpu *vpcu, u32 instr)
2914 {
2915 struct kvm *kvm = vpcu->kvm;
2916 u8 CRm = sys_reg_CRm(instr);
2917 u8 Op2 = sys_reg_Op2(instr);
2918
2919 if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
2920 !kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
2921 return false;
2922
2923 if (CRm == TLBI_CRm_IPAIS && (Op2 == 2 || Op2 == 6) &&
2924 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
2925 return false;
2926
2927 if (CRm == TLBI_CRm_IPAONS && (Op2 == 0 || Op2 == 4) &&
2928 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
2929 return false;
2930
2931 if (CRm == TLBI_CRm_IPAONS && (Op2 == 3 || Op2 == 7) &&
2932 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
2933 return false;
2934
2935 return true;
2936 }
2937
2938 /* Only defined here as this is an internal "abstraction" */
2939 union tlbi_info {
2940 struct {
2941 u64 start;
2942 u64 size;
2943 } range;
2944
2945 struct {
2946 u64 addr;
2947 } ipa;
2948
2949 struct {
2950 u64 addr;
2951 u32 encoding;
2952 } va;
2953 };
2954
s2_mmu_unmap_range(struct kvm_s2_mmu * mmu,const union tlbi_info * info)2955 static void s2_mmu_unmap_range(struct kvm_s2_mmu *mmu,
2956 const union tlbi_info *info)
2957 {
2958 kvm_stage2_unmap_range(mmu, info->range.start, info->range.size);
2959 }
2960
handle_vmalls12e1is(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2961 static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2962 const struct sys_reg_desc *r)
2963 {
2964 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
2965 u64 limit, vttbr;
2966
2967 if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding))
2968 return undef_access(vcpu, p, r);
2969
2970 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
2971 limit = BIT_ULL(kvm_get_pa_bits(vcpu->kvm));
2972
2973 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
2974 &(union tlbi_info) {
2975 .range = {
2976 .start = 0,
2977 .size = limit,
2978 },
2979 },
2980 s2_mmu_unmap_range);
2981
2982 return true;
2983 }
2984
handle_ripas2e1is(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2985 static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2986 const struct sys_reg_desc *r)
2987 {
2988 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
2989 u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
2990 u64 base, range, tg, num, scale;
2991 int shift;
2992
2993 if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
2994 return undef_access(vcpu, p, r);
2995
2996 /*
2997 * Because the shadow S2 structure doesn't necessarily reflect that
2998 * of the guest's S2 (different base granule size, for example), we
2999 * decide to ignore TTL and only use the described range.
3000 */
3001 tg = FIELD_GET(GENMASK(47, 46), p->regval);
3002 scale = FIELD_GET(GENMASK(45, 44), p->regval);
3003 num = FIELD_GET(GENMASK(43, 39), p->regval);
3004 base = p->regval & GENMASK(36, 0);
3005
3006 switch(tg) {
3007 case 1:
3008 shift = 12;
3009 break;
3010 case 2:
3011 shift = 14;
3012 break;
3013 case 3:
3014 default: /* IMPDEF: handle tg==0 as 64k */
3015 shift = 16;
3016 break;
3017 }
3018
3019 base <<= shift;
3020 range = __TLBI_RANGE_PAGES(num, scale) << shift;
3021
3022 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
3023 &(union tlbi_info) {
3024 .range = {
3025 .start = base,
3026 .size = range,
3027 },
3028 },
3029 s2_mmu_unmap_range);
3030
3031 return true;
3032 }
3033
s2_mmu_unmap_ipa(struct kvm_s2_mmu * mmu,const union tlbi_info * info)3034 static void s2_mmu_unmap_ipa(struct kvm_s2_mmu *mmu,
3035 const union tlbi_info *info)
3036 {
3037 unsigned long max_size;
3038 u64 base_addr;
3039
3040 /*
3041 * We drop a number of things from the supplied value:
3042 *
3043 * - NS bit: we're non-secure only.
3044 *
3045 * - IPA[51:48]: We don't support 52bit IPA just yet...
3046 *
3047 * And of course, adjust the IPA to be on an actual address.
3048 */
3049 base_addr = (info->ipa.addr & GENMASK_ULL(35, 0)) << 12;
3050 max_size = compute_tlb_inval_range(mmu, info->ipa.addr);
3051 base_addr &= ~(max_size - 1);
3052
3053 kvm_stage2_unmap_range(mmu, base_addr, max_size);
3054 }
3055
handle_ipas2e1is(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3056 static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3057 const struct sys_reg_desc *r)
3058 {
3059 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3060 u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
3061
3062 if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
3063 return undef_access(vcpu, p, r);
3064
3065 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
3066 &(union tlbi_info) {
3067 .ipa = {
3068 .addr = p->regval,
3069 },
3070 },
3071 s2_mmu_unmap_ipa);
3072
3073 return true;
3074 }
3075
s2_mmu_tlbi_s1e1(struct kvm_s2_mmu * mmu,const union tlbi_info * info)3076 static void s2_mmu_tlbi_s1e1(struct kvm_s2_mmu *mmu,
3077 const union tlbi_info *info)
3078 {
3079 WARN_ON(__kvm_tlbi_s1e2(mmu, info->va.addr, info->va.encoding));
3080 }
3081
handle_tlbi_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3082 static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3083 const struct sys_reg_desc *r)
3084 {
3085 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3086 u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
3087
3088 /*
3089 * If we're here, this is because we've trapped on a EL1 TLBI
3090 * instruction that affects the EL1 translation regime while
3091 * we're running in a context that doesn't allow us to let the
3092 * HW do its thing (aka vEL2):
3093 *
3094 * - HCR_EL2.E2H == 0 : a non-VHE guest
3095 * - HCR_EL2.{E2H,TGE} == { 1, 0 } : a VHE guest in guest mode
3096 *
3097 * We don't expect these helpers to ever be called when running
3098 * in a vEL1 context.
3099 */
3100
3101 WARN_ON(!vcpu_is_el2(vcpu));
3102
3103 if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding))
3104 return undef_access(vcpu, p, r);
3105
3106 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
3107 &(union tlbi_info) {
3108 .va = {
3109 .addr = p->regval,
3110 .encoding = sys_encoding,
3111 },
3112 },
3113 s2_mmu_tlbi_s1e1);
3114
3115 return true;
3116 }
3117
3118 #define SYS_INSN(insn, access_fn) \
3119 { \
3120 SYS_DESC(OP_##insn), \
3121 .access = (access_fn), \
3122 }
3123
3124 static struct sys_reg_desc sys_insn_descs[] = {
3125 { SYS_DESC(SYS_DC_ISW), access_dcsw },
3126 { SYS_DESC(SYS_DC_IGSW), access_dcgsw },
3127 { SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
3128
3129 SYS_INSN(AT_S1E1R, handle_at_s1e01),
3130 SYS_INSN(AT_S1E1W, handle_at_s1e01),
3131 SYS_INSN(AT_S1E0R, handle_at_s1e01),
3132 SYS_INSN(AT_S1E0W, handle_at_s1e01),
3133 SYS_INSN(AT_S1E1RP, handle_at_s1e01),
3134 SYS_INSN(AT_S1E1WP, handle_at_s1e01),
3135
3136 { SYS_DESC(SYS_DC_CSW), access_dcsw },
3137 { SYS_DESC(SYS_DC_CGSW), access_dcgsw },
3138 { SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
3139 { SYS_DESC(SYS_DC_CISW), access_dcsw },
3140 { SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
3141 { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
3142
3143 SYS_INSN(TLBI_VMALLE1OS, handle_tlbi_el1),
3144 SYS_INSN(TLBI_VAE1OS, handle_tlbi_el1),
3145 SYS_INSN(TLBI_ASIDE1OS, handle_tlbi_el1),
3146 SYS_INSN(TLBI_VAAE1OS, handle_tlbi_el1),
3147 SYS_INSN(TLBI_VALE1OS, handle_tlbi_el1),
3148 SYS_INSN(TLBI_VAALE1OS, handle_tlbi_el1),
3149
3150 SYS_INSN(TLBI_RVAE1IS, handle_tlbi_el1),
3151 SYS_INSN(TLBI_RVAAE1IS, handle_tlbi_el1),
3152 SYS_INSN(TLBI_RVALE1IS, handle_tlbi_el1),
3153 SYS_INSN(TLBI_RVAALE1IS, handle_tlbi_el1),
3154
3155 SYS_INSN(TLBI_VMALLE1IS, handle_tlbi_el1),
3156 SYS_INSN(TLBI_VAE1IS, handle_tlbi_el1),
3157 SYS_INSN(TLBI_ASIDE1IS, handle_tlbi_el1),
3158 SYS_INSN(TLBI_VAAE1IS, handle_tlbi_el1),
3159 SYS_INSN(TLBI_VALE1IS, handle_tlbi_el1),
3160 SYS_INSN(TLBI_VAALE1IS, handle_tlbi_el1),
3161
3162 SYS_INSN(TLBI_RVAE1OS, handle_tlbi_el1),
3163 SYS_INSN(TLBI_RVAAE1OS, handle_tlbi_el1),
3164 SYS_INSN(TLBI_RVALE1OS, handle_tlbi_el1),
3165 SYS_INSN(TLBI_RVAALE1OS, handle_tlbi_el1),
3166
3167 SYS_INSN(TLBI_RVAE1, handle_tlbi_el1),
3168 SYS_INSN(TLBI_RVAAE1, handle_tlbi_el1),
3169 SYS_INSN(TLBI_RVALE1, handle_tlbi_el1),
3170 SYS_INSN(TLBI_RVAALE1, handle_tlbi_el1),
3171
3172 SYS_INSN(TLBI_VMALLE1, handle_tlbi_el1),
3173 SYS_INSN(TLBI_VAE1, handle_tlbi_el1),
3174 SYS_INSN(TLBI_ASIDE1, handle_tlbi_el1),
3175 SYS_INSN(TLBI_VAAE1, handle_tlbi_el1),
3176 SYS_INSN(TLBI_VALE1, handle_tlbi_el1),
3177 SYS_INSN(TLBI_VAALE1, handle_tlbi_el1),
3178
3179 SYS_INSN(TLBI_VMALLE1OSNXS, handle_tlbi_el1),
3180 SYS_INSN(TLBI_VAE1OSNXS, handle_tlbi_el1),
3181 SYS_INSN(TLBI_ASIDE1OSNXS, handle_tlbi_el1),
3182 SYS_INSN(TLBI_VAAE1OSNXS, handle_tlbi_el1),
3183 SYS_INSN(TLBI_VALE1OSNXS, handle_tlbi_el1),
3184 SYS_INSN(TLBI_VAALE1OSNXS, handle_tlbi_el1),
3185
3186 SYS_INSN(TLBI_RVAE1ISNXS, handle_tlbi_el1),
3187 SYS_INSN(TLBI_RVAAE1ISNXS, handle_tlbi_el1),
3188 SYS_INSN(TLBI_RVALE1ISNXS, handle_tlbi_el1),
3189 SYS_INSN(TLBI_RVAALE1ISNXS, handle_tlbi_el1),
3190
3191 SYS_INSN(TLBI_VMALLE1ISNXS, handle_tlbi_el1),
3192 SYS_INSN(TLBI_VAE1ISNXS, handle_tlbi_el1),
3193 SYS_INSN(TLBI_ASIDE1ISNXS, handle_tlbi_el1),
3194 SYS_INSN(TLBI_VAAE1ISNXS, handle_tlbi_el1),
3195 SYS_INSN(TLBI_VALE1ISNXS, handle_tlbi_el1),
3196 SYS_INSN(TLBI_VAALE1ISNXS, handle_tlbi_el1),
3197
3198 SYS_INSN(TLBI_RVAE1OSNXS, handle_tlbi_el1),
3199 SYS_INSN(TLBI_RVAAE1OSNXS, handle_tlbi_el1),
3200 SYS_INSN(TLBI_RVALE1OSNXS, handle_tlbi_el1),
3201 SYS_INSN(TLBI_RVAALE1OSNXS, handle_tlbi_el1),
3202
3203 SYS_INSN(TLBI_RVAE1NXS, handle_tlbi_el1),
3204 SYS_INSN(TLBI_RVAAE1NXS, handle_tlbi_el1),
3205 SYS_INSN(TLBI_RVALE1NXS, handle_tlbi_el1),
3206 SYS_INSN(TLBI_RVAALE1NXS, handle_tlbi_el1),
3207
3208 SYS_INSN(TLBI_VMALLE1NXS, handle_tlbi_el1),
3209 SYS_INSN(TLBI_VAE1NXS, handle_tlbi_el1),
3210 SYS_INSN(TLBI_ASIDE1NXS, handle_tlbi_el1),
3211 SYS_INSN(TLBI_VAAE1NXS, handle_tlbi_el1),
3212 SYS_INSN(TLBI_VALE1NXS, handle_tlbi_el1),
3213 SYS_INSN(TLBI_VAALE1NXS, handle_tlbi_el1),
3214
3215 SYS_INSN(AT_S1E2R, handle_at_s1e2),
3216 SYS_INSN(AT_S1E2W, handle_at_s1e2),
3217 SYS_INSN(AT_S12E1R, handle_at_s12),
3218 SYS_INSN(AT_S12E1W, handle_at_s12),
3219 SYS_INSN(AT_S12E0R, handle_at_s12),
3220 SYS_INSN(AT_S12E0W, handle_at_s12),
3221 SYS_INSN(AT_S1E2A, handle_at_s1e2),
3222
3223 SYS_INSN(TLBI_IPAS2E1IS, handle_ipas2e1is),
3224 SYS_INSN(TLBI_RIPAS2E1IS, handle_ripas2e1is),
3225 SYS_INSN(TLBI_IPAS2LE1IS, handle_ipas2e1is),
3226 SYS_INSN(TLBI_RIPAS2LE1IS, handle_ripas2e1is),
3227
3228 SYS_INSN(TLBI_ALLE2OS, undef_access),
3229 SYS_INSN(TLBI_VAE2OS, undef_access),
3230 SYS_INSN(TLBI_ALLE1OS, handle_alle1is),
3231 SYS_INSN(TLBI_VALE2OS, undef_access),
3232 SYS_INSN(TLBI_VMALLS12E1OS, handle_vmalls12e1is),
3233
3234 SYS_INSN(TLBI_RVAE2IS, undef_access),
3235 SYS_INSN(TLBI_RVALE2IS, undef_access),
3236
3237 SYS_INSN(TLBI_ALLE1IS, handle_alle1is),
3238 SYS_INSN(TLBI_VMALLS12E1IS, handle_vmalls12e1is),
3239 SYS_INSN(TLBI_IPAS2E1OS, handle_ipas2e1is),
3240 SYS_INSN(TLBI_IPAS2E1, handle_ipas2e1is),
3241 SYS_INSN(TLBI_RIPAS2E1, handle_ripas2e1is),
3242 SYS_INSN(TLBI_RIPAS2E1OS, handle_ripas2e1is),
3243 SYS_INSN(TLBI_IPAS2LE1OS, handle_ipas2e1is),
3244 SYS_INSN(TLBI_IPAS2LE1, handle_ipas2e1is),
3245 SYS_INSN(TLBI_RIPAS2LE1, handle_ripas2e1is),
3246 SYS_INSN(TLBI_RIPAS2LE1OS, handle_ripas2e1is),
3247 SYS_INSN(TLBI_RVAE2OS, undef_access),
3248 SYS_INSN(TLBI_RVALE2OS, undef_access),
3249 SYS_INSN(TLBI_RVAE2, undef_access),
3250 SYS_INSN(TLBI_RVALE2, undef_access),
3251 SYS_INSN(TLBI_ALLE1, handle_alle1is),
3252 SYS_INSN(TLBI_VMALLS12E1, handle_vmalls12e1is),
3253
3254 SYS_INSN(TLBI_IPAS2E1ISNXS, handle_ipas2e1is),
3255 SYS_INSN(TLBI_RIPAS2E1ISNXS, handle_ripas2e1is),
3256 SYS_INSN(TLBI_IPAS2LE1ISNXS, handle_ipas2e1is),
3257 SYS_INSN(TLBI_RIPAS2LE1ISNXS, handle_ripas2e1is),
3258
3259 SYS_INSN(TLBI_ALLE2OSNXS, undef_access),
3260 SYS_INSN(TLBI_VAE2OSNXS, undef_access),
3261 SYS_INSN(TLBI_ALLE1OSNXS, handle_alle1is),
3262 SYS_INSN(TLBI_VALE2OSNXS, undef_access),
3263 SYS_INSN(TLBI_VMALLS12E1OSNXS, handle_vmalls12e1is),
3264
3265 SYS_INSN(TLBI_RVAE2ISNXS, undef_access),
3266 SYS_INSN(TLBI_RVALE2ISNXS, undef_access),
3267 SYS_INSN(TLBI_ALLE2ISNXS, undef_access),
3268 SYS_INSN(TLBI_VAE2ISNXS, undef_access),
3269
3270 SYS_INSN(TLBI_ALLE1ISNXS, handle_alle1is),
3271 SYS_INSN(TLBI_VALE2ISNXS, undef_access),
3272 SYS_INSN(TLBI_VMALLS12E1ISNXS, handle_vmalls12e1is),
3273 SYS_INSN(TLBI_IPAS2E1OSNXS, handle_ipas2e1is),
3274 SYS_INSN(TLBI_IPAS2E1NXS, handle_ipas2e1is),
3275 SYS_INSN(TLBI_RIPAS2E1NXS, handle_ripas2e1is),
3276 SYS_INSN(TLBI_RIPAS2E1OSNXS, handle_ripas2e1is),
3277 SYS_INSN(TLBI_IPAS2LE1OSNXS, handle_ipas2e1is),
3278 SYS_INSN(TLBI_IPAS2LE1NXS, handle_ipas2e1is),
3279 SYS_INSN(TLBI_RIPAS2LE1NXS, handle_ripas2e1is),
3280 SYS_INSN(TLBI_RIPAS2LE1OSNXS, handle_ripas2e1is),
3281 SYS_INSN(TLBI_RVAE2OSNXS, undef_access),
3282 SYS_INSN(TLBI_RVALE2OSNXS, undef_access),
3283 SYS_INSN(TLBI_RVAE2NXS, undef_access),
3284 SYS_INSN(TLBI_RVALE2NXS, undef_access),
3285 SYS_INSN(TLBI_ALLE2NXS, undef_access),
3286 SYS_INSN(TLBI_VAE2NXS, undef_access),
3287 SYS_INSN(TLBI_ALLE1NXS, handle_alle1is),
3288 SYS_INSN(TLBI_VALE2NXS, undef_access),
3289 SYS_INSN(TLBI_VMALLS12E1NXS, handle_vmalls12e1is),
3290 };
3291
trap_dbgdidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3292 static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
3293 struct sys_reg_params *p,
3294 const struct sys_reg_desc *r)
3295 {
3296 if (p->is_write) {
3297 return ignore_write(vcpu, p);
3298 } else {
3299 u64 dfr = kvm_read_vm_id_reg(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
3300 u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP);
3301
3302 p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
3303 (SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
3304 (SYS_FIELD_GET(ID_AA64DFR0_EL1, CTX_CMPs, dfr) << 20) |
3305 (SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, dfr) << 16) |
3306 (1 << 15) | (el3 << 14) | (el3 << 12));
3307 return true;
3308 }
3309 }
3310
3311 /*
3312 * AArch32 debug register mappings
3313 *
3314 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
3315 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
3316 *
3317 * None of the other registers share their location, so treat them as
3318 * if they were 64bit.
3319 */
3320 #define DBG_BCR_BVR_WCR_WVR(n) \
3321 /* DBGBVRn */ \
3322 { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
3323 /* DBGBCRn */ \
3324 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
3325 /* DBGWVRn */ \
3326 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
3327 /* DBGWCRn */ \
3328 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
3329
3330 #define DBGBXVR(n) \
3331 { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
3332
3333 /*
3334 * Trapped cp14 registers. We generally ignore most of the external
3335 * debug, on the principle that they don't really make sense to a
3336 * guest. Revisit this one day, would this principle change.
3337 */
3338 static const struct sys_reg_desc cp14_regs[] = {
3339 /* DBGDIDR */
3340 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
3341 /* DBGDTRRXext */
3342 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
3343
3344 DBG_BCR_BVR_WCR_WVR(0),
3345 /* DBGDSCRint */
3346 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
3347 DBG_BCR_BVR_WCR_WVR(1),
3348 /* DBGDCCINT */
3349 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
3350 /* DBGDSCRext */
3351 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
3352 DBG_BCR_BVR_WCR_WVR(2),
3353 /* DBGDTR[RT]Xint */
3354 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
3355 /* DBGDTR[RT]Xext */
3356 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
3357 DBG_BCR_BVR_WCR_WVR(3),
3358 DBG_BCR_BVR_WCR_WVR(4),
3359 DBG_BCR_BVR_WCR_WVR(5),
3360 /* DBGWFAR */
3361 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
3362 /* DBGOSECCR */
3363 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
3364 DBG_BCR_BVR_WCR_WVR(6),
3365 /* DBGVCR */
3366 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
3367 DBG_BCR_BVR_WCR_WVR(7),
3368 DBG_BCR_BVR_WCR_WVR(8),
3369 DBG_BCR_BVR_WCR_WVR(9),
3370 DBG_BCR_BVR_WCR_WVR(10),
3371 DBG_BCR_BVR_WCR_WVR(11),
3372 DBG_BCR_BVR_WCR_WVR(12),
3373 DBG_BCR_BVR_WCR_WVR(13),
3374 DBG_BCR_BVR_WCR_WVR(14),
3375 DBG_BCR_BVR_WCR_WVR(15),
3376
3377 /* DBGDRAR (32bit) */
3378 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
3379
3380 DBGBXVR(0),
3381 /* DBGOSLAR */
3382 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
3383 DBGBXVR(1),
3384 /* DBGOSLSR */
3385 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
3386 DBGBXVR(2),
3387 DBGBXVR(3),
3388 /* DBGOSDLR */
3389 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
3390 DBGBXVR(4),
3391 /* DBGPRCR */
3392 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
3393 DBGBXVR(5),
3394 DBGBXVR(6),
3395 DBGBXVR(7),
3396 DBGBXVR(8),
3397 DBGBXVR(9),
3398 DBGBXVR(10),
3399 DBGBXVR(11),
3400 DBGBXVR(12),
3401 DBGBXVR(13),
3402 DBGBXVR(14),
3403 DBGBXVR(15),
3404
3405 /* DBGDSAR (32bit) */
3406 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
3407
3408 /* DBGDEVID2 */
3409 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
3410 /* DBGDEVID1 */
3411 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
3412 /* DBGDEVID */
3413 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
3414 /* DBGCLAIMSET */
3415 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
3416 /* DBGCLAIMCLR */
3417 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
3418 /* DBGAUTHSTATUS */
3419 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
3420 };
3421
3422 /* Trapped cp14 64bit registers */
3423 static const struct sys_reg_desc cp14_64_regs[] = {
3424 /* DBGDRAR (64bit) */
3425 { Op1( 0), CRm( 1), .access = trap_raz_wi },
3426
3427 /* DBGDSAR (64bit) */
3428 { Op1( 0), CRm( 2), .access = trap_raz_wi },
3429 };
3430
3431 #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \
3432 AA32(_map), \
3433 Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \
3434 .visibility = pmu_visibility
3435
3436 /* Macro to expand the PMEVCNTRn register */
3437 #define PMU_PMEVCNTR(n) \
3438 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
3439 (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
3440 .access = access_pmu_evcntr }
3441
3442 /* Macro to expand the PMEVTYPERn register */
3443 #define PMU_PMEVTYPER(n) \
3444 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
3445 (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
3446 .access = access_pmu_evtyper }
3447 /*
3448 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
3449 * depending on the way they are accessed (as a 32bit or a 64bit
3450 * register).
3451 */
3452 static const struct sys_reg_desc cp15_regs[] = {
3453 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
3454 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
3455 /* ACTLR */
3456 { AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
3457 /* ACTLR2 */
3458 { AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
3459 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
3460 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
3461 /* TTBCR */
3462 { AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
3463 /* TTBCR2 */
3464 { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
3465 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
3466 { CP15_SYS_DESC(SYS_ICC_PMR_EL1), undef_access },
3467 /* DFSR */
3468 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
3469 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
3470 /* ADFSR */
3471 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
3472 /* AIFSR */
3473 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
3474 /* DFAR */
3475 { AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
3476 /* IFAR */
3477 { AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
3478
3479 /*
3480 * DC{C,I,CI}SW operations:
3481 */
3482 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
3483 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
3484 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
3485
3486 /* PMU */
3487 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
3488 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
3489 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
3490 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
3491 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
3492 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
3493 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 6), .access = access_pmceid },
3494 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 7), .access = access_pmceid },
3495 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
3496 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
3497 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
3498 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
3499 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
3500 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
3501 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
3502 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 4), .access = access_pmceid },
3503 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 5), .access = access_pmceid },
3504 /* PMMIR */
3505 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
3506
3507 /* PRRR/MAIR0 */
3508 { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
3509 /* NMRR/MAIR1 */
3510 { AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
3511 /* AMAIR0 */
3512 { AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
3513 /* AMAIR1 */
3514 { AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
3515
3516 { CP15_SYS_DESC(SYS_ICC_IAR0_EL1), undef_access },
3517 { CP15_SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access },
3518 { CP15_SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access },
3519 { CP15_SYS_DESC(SYS_ICC_BPR0_EL1), undef_access },
3520 { CP15_SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access },
3521 { CP15_SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access },
3522 { CP15_SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access },
3523 { CP15_SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access },
3524 { CP15_SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access },
3525 { CP15_SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access },
3526 { CP15_SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access },
3527 { CP15_SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
3528 { CP15_SYS_DESC(SYS_ICC_DIR_EL1), undef_access },
3529 { CP15_SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
3530 { CP15_SYS_DESC(SYS_ICC_IAR1_EL1), undef_access },
3531 { CP15_SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access },
3532 { CP15_SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access },
3533 { CP15_SYS_DESC(SYS_ICC_BPR1_EL1), undef_access },
3534 { CP15_SYS_DESC(SYS_ICC_CTLR_EL1), undef_access },
3535 { CP15_SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
3536 { CP15_SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access },
3537 { CP15_SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access },
3538
3539 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
3540
3541 /* Arch Tmers */
3542 { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
3543 { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
3544
3545 /* PMEVCNTRn */
3546 PMU_PMEVCNTR(0),
3547 PMU_PMEVCNTR(1),
3548 PMU_PMEVCNTR(2),
3549 PMU_PMEVCNTR(3),
3550 PMU_PMEVCNTR(4),
3551 PMU_PMEVCNTR(5),
3552 PMU_PMEVCNTR(6),
3553 PMU_PMEVCNTR(7),
3554 PMU_PMEVCNTR(8),
3555 PMU_PMEVCNTR(9),
3556 PMU_PMEVCNTR(10),
3557 PMU_PMEVCNTR(11),
3558 PMU_PMEVCNTR(12),
3559 PMU_PMEVCNTR(13),
3560 PMU_PMEVCNTR(14),
3561 PMU_PMEVCNTR(15),
3562 PMU_PMEVCNTR(16),
3563 PMU_PMEVCNTR(17),
3564 PMU_PMEVCNTR(18),
3565 PMU_PMEVCNTR(19),
3566 PMU_PMEVCNTR(20),
3567 PMU_PMEVCNTR(21),
3568 PMU_PMEVCNTR(22),
3569 PMU_PMEVCNTR(23),
3570 PMU_PMEVCNTR(24),
3571 PMU_PMEVCNTR(25),
3572 PMU_PMEVCNTR(26),
3573 PMU_PMEVCNTR(27),
3574 PMU_PMEVCNTR(28),
3575 PMU_PMEVCNTR(29),
3576 PMU_PMEVCNTR(30),
3577 /* PMEVTYPERn */
3578 PMU_PMEVTYPER(0),
3579 PMU_PMEVTYPER(1),
3580 PMU_PMEVTYPER(2),
3581 PMU_PMEVTYPER(3),
3582 PMU_PMEVTYPER(4),
3583 PMU_PMEVTYPER(5),
3584 PMU_PMEVTYPER(6),
3585 PMU_PMEVTYPER(7),
3586 PMU_PMEVTYPER(8),
3587 PMU_PMEVTYPER(9),
3588 PMU_PMEVTYPER(10),
3589 PMU_PMEVTYPER(11),
3590 PMU_PMEVTYPER(12),
3591 PMU_PMEVTYPER(13),
3592 PMU_PMEVTYPER(14),
3593 PMU_PMEVTYPER(15),
3594 PMU_PMEVTYPER(16),
3595 PMU_PMEVTYPER(17),
3596 PMU_PMEVTYPER(18),
3597 PMU_PMEVTYPER(19),
3598 PMU_PMEVTYPER(20),
3599 PMU_PMEVTYPER(21),
3600 PMU_PMEVTYPER(22),
3601 PMU_PMEVTYPER(23),
3602 PMU_PMEVTYPER(24),
3603 PMU_PMEVTYPER(25),
3604 PMU_PMEVTYPER(26),
3605 PMU_PMEVTYPER(27),
3606 PMU_PMEVTYPER(28),
3607 PMU_PMEVTYPER(29),
3608 PMU_PMEVTYPER(30),
3609 /* PMCCFILTR */
3610 { CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
3611
3612 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
3613 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
3614
3615 /* CCSIDR2 */
3616 { Op1(1), CRn( 0), CRm( 0), Op2(2), undef_access },
3617
3618 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
3619 };
3620
3621 static const struct sys_reg_desc cp15_64_regs[] = {
3622 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
3623 { CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
3624 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
3625 { SYS_DESC(SYS_AARCH32_CNTPCT), access_arch_timer },
3626 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
3627 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
3628 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
3629 { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
3630 { SYS_DESC(SYS_AARCH32_CNTPCTSS), access_arch_timer },
3631 };
3632
check_sysreg_table(const struct sys_reg_desc * table,unsigned int n,bool is_32)3633 static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
3634 bool is_32)
3635 {
3636 unsigned int i;
3637
3638 for (i = 0; i < n; i++) {
3639 if (!is_32 && table[i].reg && !table[i].reset) {
3640 kvm_err("sys_reg table %pS entry %d (%s) lacks reset\n",
3641 &table[i], i, table[i].name);
3642 return false;
3643 }
3644
3645 if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
3646 kvm_err("sys_reg table %pS entry %d (%s -> %s) out of order\n",
3647 &table[i], i, table[i - 1].name, table[i].name);
3648 return false;
3649 }
3650 }
3651
3652 return true;
3653 }
3654
kvm_handle_cp14_load_store(struct kvm_vcpu * vcpu)3655 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
3656 {
3657 kvm_inject_undefined(vcpu);
3658 return 1;
3659 }
3660
perform_access(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)3661 static void perform_access(struct kvm_vcpu *vcpu,
3662 struct sys_reg_params *params,
3663 const struct sys_reg_desc *r)
3664 {
3665 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
3666
3667 /* Check for regs disabled by runtime config */
3668 if (sysreg_hidden(vcpu, r)) {
3669 kvm_inject_undefined(vcpu);
3670 return;
3671 }
3672
3673 /*
3674 * Not having an accessor means that we have configured a trap
3675 * that we don't know how to handle. This certainly qualifies
3676 * as a gross bug that should be fixed right away.
3677 */
3678 BUG_ON(!r->access);
3679
3680 /* Skip instruction if instructed so */
3681 if (likely(r->access(vcpu, params, r)))
3682 kvm_incr_pc(vcpu);
3683 }
3684
3685 /*
3686 * emulate_cp -- tries to match a sys_reg access in a handling table, and
3687 * call the corresponding trap handler.
3688 *
3689 * @params: pointer to the descriptor of the access
3690 * @table: array of trap descriptors
3691 * @num: size of the trap descriptor array
3692 *
3693 * Return true if the access has been handled, false if not.
3694 */
emulate_cp(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * table,size_t num)3695 static bool emulate_cp(struct kvm_vcpu *vcpu,
3696 struct sys_reg_params *params,
3697 const struct sys_reg_desc *table,
3698 size_t num)
3699 {
3700 const struct sys_reg_desc *r;
3701
3702 if (!table)
3703 return false; /* Not handled */
3704
3705 r = find_reg(params, table, num);
3706
3707 if (r) {
3708 perform_access(vcpu, params, r);
3709 return true;
3710 }
3711
3712 /* Not handled */
3713 return false;
3714 }
3715
unhandled_cp_access(struct kvm_vcpu * vcpu,struct sys_reg_params * params)3716 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
3717 struct sys_reg_params *params)
3718 {
3719 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
3720 int cp = -1;
3721
3722 switch (esr_ec) {
3723 case ESR_ELx_EC_CP15_32:
3724 case ESR_ELx_EC_CP15_64:
3725 cp = 15;
3726 break;
3727 case ESR_ELx_EC_CP14_MR:
3728 case ESR_ELx_EC_CP14_64:
3729 cp = 14;
3730 break;
3731 default:
3732 WARN_ON(1);
3733 }
3734
3735 print_sys_reg_msg(params,
3736 "Unsupported guest CP%d access at: %08lx [%08lx]\n",
3737 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
3738 kvm_inject_undefined(vcpu);
3739 }
3740
3741 /**
3742 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
3743 * @vcpu: The VCPU pointer
3744 * @global: &struct sys_reg_desc
3745 * @nr_global: size of the @global array
3746 */
kvm_handle_cp_64(struct kvm_vcpu * vcpu,const struct sys_reg_desc * global,size_t nr_global)3747 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
3748 const struct sys_reg_desc *global,
3749 size_t nr_global)
3750 {
3751 struct sys_reg_params params;
3752 u64 esr = kvm_vcpu_get_esr(vcpu);
3753 int Rt = kvm_vcpu_sys_get_rt(vcpu);
3754 int Rt2 = (esr >> 10) & 0x1f;
3755
3756 params.CRm = (esr >> 1) & 0xf;
3757 params.is_write = ((esr & 1) == 0);
3758
3759 params.Op0 = 0;
3760 params.Op1 = (esr >> 16) & 0xf;
3761 params.Op2 = 0;
3762 params.CRn = 0;
3763
3764 /*
3765 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
3766 * backends between AArch32 and AArch64, we get away with it.
3767 */
3768 if (params.is_write) {
3769 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
3770 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
3771 }
3772
3773 /*
3774 * If the table contains a handler, handle the
3775 * potential register operation in the case of a read and return
3776 * with success.
3777 */
3778 if (emulate_cp(vcpu, ¶ms, global, nr_global)) {
3779 /* Split up the value between registers for the read side */
3780 if (!params.is_write) {
3781 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
3782 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
3783 }
3784
3785 return 1;
3786 }
3787
3788 unhandled_cp_access(vcpu, ¶ms);
3789 return 1;
3790 }
3791
3792 static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
3793
3794 /*
3795 * The CP10 ID registers are architecturally mapped to AArch64 feature
3796 * registers. Abuse that fact so we can rely on the AArch64 handler for accesses
3797 * from AArch32.
3798 */
kvm_esr_cp10_id_to_sys64(u64 esr,struct sys_reg_params * params)3799 static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
3800 {
3801 u8 reg_id = (esr >> 10) & 0xf;
3802 bool valid;
3803
3804 params->is_write = ((esr & 1) == 0);
3805 params->Op0 = 3;
3806 params->Op1 = 0;
3807 params->CRn = 0;
3808 params->CRm = 3;
3809
3810 /* CP10 ID registers are read-only */
3811 valid = !params->is_write;
3812
3813 switch (reg_id) {
3814 /* MVFR0 */
3815 case 0b0111:
3816 params->Op2 = 0;
3817 break;
3818 /* MVFR1 */
3819 case 0b0110:
3820 params->Op2 = 1;
3821 break;
3822 /* MVFR2 */
3823 case 0b0101:
3824 params->Op2 = 2;
3825 break;
3826 default:
3827 valid = false;
3828 }
3829
3830 if (valid)
3831 return true;
3832
3833 kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
3834 params->is_write ? "write" : "read", reg_id);
3835 return false;
3836 }
3837
3838 /**
3839 * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
3840 * VFP Register' from AArch32.
3841 * @vcpu: The vCPU pointer
3842 *
3843 * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
3844 * Work out the correct AArch64 system register encoding and reroute to the
3845 * AArch64 system register emulation.
3846 */
kvm_handle_cp10_id(struct kvm_vcpu * vcpu)3847 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
3848 {
3849 int Rt = kvm_vcpu_sys_get_rt(vcpu);
3850 u64 esr = kvm_vcpu_get_esr(vcpu);
3851 struct sys_reg_params params;
3852
3853 /* UNDEF on any unhandled register access */
3854 if (!kvm_esr_cp10_id_to_sys64(esr, ¶ms)) {
3855 kvm_inject_undefined(vcpu);
3856 return 1;
3857 }
3858
3859 if (emulate_sys_reg(vcpu, ¶ms))
3860 vcpu_set_reg(vcpu, Rt, params.regval);
3861
3862 return 1;
3863 }
3864
3865 /**
3866 * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
3867 * CRn=0, which corresponds to the AArch32 feature
3868 * registers.
3869 * @vcpu: the vCPU pointer
3870 * @params: the system register access parameters.
3871 *
3872 * Our cp15 system register tables do not enumerate the AArch32 feature
3873 * registers. Conveniently, our AArch64 table does, and the AArch32 system
3874 * register encoding can be trivially remapped into the AArch64 for the feature
3875 * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
3876 *
3877 * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
3878 * System registers with (coproc=0b1111, CRn==c0)", read accesses from this
3879 * range are either UNKNOWN or RES0. Rerouting remains architectural as we
3880 * treat undefined registers in this range as RAZ.
3881 */
kvm_emulate_cp15_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * params)3882 static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
3883 struct sys_reg_params *params)
3884 {
3885 int Rt = kvm_vcpu_sys_get_rt(vcpu);
3886
3887 /* Treat impossible writes to RO registers as UNDEFINED */
3888 if (params->is_write) {
3889 unhandled_cp_access(vcpu, params);
3890 return 1;
3891 }
3892
3893 params->Op0 = 3;
3894
3895 /*
3896 * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
3897 * Avoid conflicting with future expansion of AArch64 feature registers
3898 * and simply treat them as RAZ here.
3899 */
3900 if (params->CRm > 3)
3901 params->regval = 0;
3902 else if (!emulate_sys_reg(vcpu, params))
3903 return 1;
3904
3905 vcpu_set_reg(vcpu, Rt, params->regval);
3906 return 1;
3907 }
3908
3909 /**
3910 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
3911 * @vcpu: The VCPU pointer
3912 * @params: &struct sys_reg_params
3913 * @global: &struct sys_reg_desc
3914 * @nr_global: size of the @global array
3915 */
kvm_handle_cp_32(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * global,size_t nr_global)3916 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
3917 struct sys_reg_params *params,
3918 const struct sys_reg_desc *global,
3919 size_t nr_global)
3920 {
3921 int Rt = kvm_vcpu_sys_get_rt(vcpu);
3922
3923 params->regval = vcpu_get_reg(vcpu, Rt);
3924
3925 if (emulate_cp(vcpu, params, global, nr_global)) {
3926 if (!params->is_write)
3927 vcpu_set_reg(vcpu, Rt, params->regval);
3928 return 1;
3929 }
3930
3931 unhandled_cp_access(vcpu, params);
3932 return 1;
3933 }
3934
kvm_handle_cp15_64(struct kvm_vcpu * vcpu)3935 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
3936 {
3937 return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
3938 }
3939
kvm_handle_cp15_32(struct kvm_vcpu * vcpu)3940 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
3941 {
3942 struct sys_reg_params params;
3943
3944 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
3945
3946 /*
3947 * Certain AArch32 ID registers are handled by rerouting to the AArch64
3948 * system register table. Registers in the ID range where CRm=0 are
3949 * excluded from this scheme as they do not trivially map into AArch64
3950 * system register encodings.
3951 */
3952 if (params.Op1 == 0 && params.CRn == 0 && params.CRm)
3953 return kvm_emulate_cp15_id_reg(vcpu, ¶ms);
3954
3955 return kvm_handle_cp_32(vcpu, ¶ms, cp15_regs, ARRAY_SIZE(cp15_regs));
3956 }
3957
kvm_handle_cp14_64(struct kvm_vcpu * vcpu)3958 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
3959 {
3960 return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
3961 }
3962
kvm_handle_cp14_32(struct kvm_vcpu * vcpu)3963 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
3964 {
3965 struct sys_reg_params params;
3966
3967 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
3968
3969 return kvm_handle_cp_32(vcpu, ¶ms, cp14_regs, ARRAY_SIZE(cp14_regs));
3970 }
3971
3972 /**
3973 * emulate_sys_reg - Emulate a guest access to an AArch64 system register
3974 * @vcpu: The VCPU pointer
3975 * @params: Decoded system register parameters
3976 *
3977 * Return: true if the system register access was successful, false otherwise.
3978 */
emulate_sys_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * params)3979 static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
3980 struct sys_reg_params *params)
3981 {
3982 const struct sys_reg_desc *r;
3983
3984 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
3985 if (likely(r)) {
3986 perform_access(vcpu, params, r);
3987 return true;
3988 }
3989
3990 print_sys_reg_msg(params,
3991 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
3992 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
3993 kvm_inject_undefined(vcpu);
3994
3995 return false;
3996 }
3997
idregs_debug_find(struct kvm * kvm,u8 pos)3998 static const struct sys_reg_desc *idregs_debug_find(struct kvm *kvm, u8 pos)
3999 {
4000 unsigned long i, idreg_idx = 0;
4001
4002 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
4003 const struct sys_reg_desc *r = &sys_reg_descs[i];
4004
4005 if (!is_vm_ftr_id_reg(reg_to_encoding(r)))
4006 continue;
4007
4008 if (idreg_idx == pos)
4009 return r;
4010
4011 idreg_idx++;
4012 }
4013
4014 return NULL;
4015 }
4016
idregs_debug_start(struct seq_file * s,loff_t * pos)4017 static void *idregs_debug_start(struct seq_file *s, loff_t *pos)
4018 {
4019 struct kvm *kvm = s->private;
4020 u8 *iter;
4021
4022 mutex_lock(&kvm->arch.config_lock);
4023
4024 iter = &kvm->arch.idreg_debugfs_iter;
4025 if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags) &&
4026 *iter == (u8)~0) {
4027 *iter = *pos;
4028 if (!idregs_debug_find(kvm, *iter))
4029 iter = NULL;
4030 } else {
4031 iter = ERR_PTR(-EBUSY);
4032 }
4033
4034 mutex_unlock(&kvm->arch.config_lock);
4035
4036 return iter;
4037 }
4038
idregs_debug_next(struct seq_file * s,void * v,loff_t * pos)4039 static void *idregs_debug_next(struct seq_file *s, void *v, loff_t *pos)
4040 {
4041 struct kvm *kvm = s->private;
4042
4043 (*pos)++;
4044
4045 if (idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter + 1)) {
4046 kvm->arch.idreg_debugfs_iter++;
4047
4048 return &kvm->arch.idreg_debugfs_iter;
4049 }
4050
4051 return NULL;
4052 }
4053
idregs_debug_stop(struct seq_file * s,void * v)4054 static void idregs_debug_stop(struct seq_file *s, void *v)
4055 {
4056 struct kvm *kvm = s->private;
4057
4058 if (IS_ERR(v))
4059 return;
4060
4061 mutex_lock(&kvm->arch.config_lock);
4062
4063 kvm->arch.idreg_debugfs_iter = ~0;
4064
4065 mutex_unlock(&kvm->arch.config_lock);
4066 }
4067
idregs_debug_show(struct seq_file * s,void * v)4068 static int idregs_debug_show(struct seq_file *s, void *v)
4069 {
4070 const struct sys_reg_desc *desc;
4071 struct kvm *kvm = s->private;
4072
4073 desc = idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter);
4074
4075 if (!desc->name)
4076 return 0;
4077
4078 seq_printf(s, "%20s:\t%016llx\n",
4079 desc->name, kvm_read_vm_id_reg(kvm, reg_to_encoding(desc)));
4080
4081 return 0;
4082 }
4083
4084 static const struct seq_operations idregs_debug_sops = {
4085 .start = idregs_debug_start,
4086 .next = idregs_debug_next,
4087 .stop = idregs_debug_stop,
4088 .show = idregs_debug_show,
4089 };
4090
4091 DEFINE_SEQ_ATTRIBUTE(idregs_debug);
4092
kvm_sys_regs_create_debugfs(struct kvm * kvm)4093 void kvm_sys_regs_create_debugfs(struct kvm *kvm)
4094 {
4095 kvm->arch.idreg_debugfs_iter = ~0;
4096
4097 debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm,
4098 &idregs_debug_fops);
4099 }
4100
reset_vm_ftr_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * reg)4101 static void reset_vm_ftr_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *reg)
4102 {
4103 u32 id = reg_to_encoding(reg);
4104 struct kvm *kvm = vcpu->kvm;
4105
4106 if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
4107 return;
4108
4109 kvm_set_vm_id_reg(kvm, id, reg->reset(vcpu, reg));
4110 }
4111
reset_vcpu_ftr_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * reg)4112 static void reset_vcpu_ftr_id_reg(struct kvm_vcpu *vcpu,
4113 const struct sys_reg_desc *reg)
4114 {
4115 if (kvm_vcpu_initialized(vcpu))
4116 return;
4117
4118 reg->reset(vcpu, reg);
4119 }
4120
4121 /**
4122 * kvm_reset_sys_regs - sets system registers to reset value
4123 * @vcpu: The VCPU pointer
4124 *
4125 * This function finds the right table above and sets the registers on the
4126 * virtual CPU struct to their architecturally defined reset values.
4127 */
kvm_reset_sys_regs(struct kvm_vcpu * vcpu)4128 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
4129 {
4130 struct kvm *kvm = vcpu->kvm;
4131 unsigned long i;
4132
4133 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
4134 const struct sys_reg_desc *r = &sys_reg_descs[i];
4135
4136 if (!r->reset)
4137 continue;
4138
4139 if (is_vm_ftr_id_reg(reg_to_encoding(r)))
4140 reset_vm_ftr_id_reg(vcpu, r);
4141 else if (is_vcpu_ftr_id_reg(reg_to_encoding(r)))
4142 reset_vcpu_ftr_id_reg(vcpu, r);
4143 else
4144 r->reset(vcpu, r);
4145 }
4146
4147 set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
4148 }
4149
4150 /**
4151 * kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction
4152 * trap on a guest execution
4153 * @vcpu: The VCPU pointer
4154 */
kvm_handle_sys_reg(struct kvm_vcpu * vcpu)4155 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
4156 {
4157 const struct sys_reg_desc *desc = NULL;
4158 struct sys_reg_params params;
4159 unsigned long esr = kvm_vcpu_get_esr(vcpu);
4160 int Rt = kvm_vcpu_sys_get_rt(vcpu);
4161 int sr_idx;
4162
4163 trace_kvm_handle_sys_reg(esr);
4164
4165 if (triage_sysreg_trap(vcpu, &sr_idx))
4166 return 1;
4167
4168 params = esr_sys64_to_params(esr);
4169 params.regval = vcpu_get_reg(vcpu, Rt);
4170
4171 /* System registers have Op0=={2,3}, as per DDI487 J.a C5.1.2 */
4172 if (params.Op0 == 2 || params.Op0 == 3)
4173 desc = &sys_reg_descs[sr_idx];
4174 else
4175 desc = &sys_insn_descs[sr_idx];
4176
4177 perform_access(vcpu, ¶ms, desc);
4178
4179 /* Read from system register? */
4180 if (!params.is_write &&
4181 (params.Op0 == 2 || params.Op0 == 3))
4182 vcpu_set_reg(vcpu, Rt, params.regval);
4183
4184 return 1;
4185 }
4186
4187 /******************************************************************************
4188 * Userspace API
4189 *****************************************************************************/
4190
index_to_params(u64 id,struct sys_reg_params * params)4191 static bool index_to_params(u64 id, struct sys_reg_params *params)
4192 {
4193 switch (id & KVM_REG_SIZE_MASK) {
4194 case KVM_REG_SIZE_U64:
4195 /* Any unused index bits means it's not valid. */
4196 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
4197 | KVM_REG_ARM_COPROC_MASK
4198 | KVM_REG_ARM64_SYSREG_OP0_MASK
4199 | KVM_REG_ARM64_SYSREG_OP1_MASK
4200 | KVM_REG_ARM64_SYSREG_CRN_MASK
4201 | KVM_REG_ARM64_SYSREG_CRM_MASK
4202 | KVM_REG_ARM64_SYSREG_OP2_MASK))
4203 return false;
4204 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
4205 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
4206 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
4207 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
4208 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
4209 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
4210 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
4211 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
4212 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
4213 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
4214 return true;
4215 default:
4216 return false;
4217 }
4218 }
4219
get_reg_by_id(u64 id,const struct sys_reg_desc table[],unsigned int num)4220 const struct sys_reg_desc *get_reg_by_id(u64 id,
4221 const struct sys_reg_desc table[],
4222 unsigned int num)
4223 {
4224 struct sys_reg_params params;
4225
4226 if (!index_to_params(id, ¶ms))
4227 return NULL;
4228
4229 return find_reg(¶ms, table, num);
4230 }
4231
4232 /* Decode an index value, and find the sys_reg_desc entry. */
4233 static const struct sys_reg_desc *
id_to_sys_reg_desc(struct kvm_vcpu * vcpu,u64 id,const struct sys_reg_desc table[],unsigned int num)4234 id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
4235 const struct sys_reg_desc table[], unsigned int num)
4236
4237 {
4238 const struct sys_reg_desc *r;
4239
4240 /* We only do sys_reg for now. */
4241 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
4242 return NULL;
4243
4244 r = get_reg_by_id(id, table, num);
4245
4246 /* Not saved in the sys_reg array and not otherwise accessible? */
4247 if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
4248 r = NULL;
4249
4250 return r;
4251 }
4252
4253 /*
4254 * These are the invariant sys_reg registers: we let the guest see the
4255 * host versions of these, so they're part of the guest state.
4256 *
4257 * A future CPU may provide a mechanism to present different values to
4258 * the guest, or a future kvm may trap them.
4259 */
4260
4261 #define FUNCTION_INVARIANT(reg) \
4262 static u64 reset_##reg(struct kvm_vcpu *v, \
4263 const struct sys_reg_desc *r) \
4264 { \
4265 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
4266 return ((struct sys_reg_desc *)r)->val; \
4267 }
4268
4269 FUNCTION_INVARIANT(midr_el1)
4270 FUNCTION_INVARIANT(revidr_el1)
4271 FUNCTION_INVARIANT(aidr_el1)
4272
4273 /* ->val is filled in by kvm_sys_reg_table_init() */
4274 static struct sys_reg_desc invariant_sys_regs[] __ro_after_init = {
4275 { SYS_DESC(SYS_MIDR_EL1), NULL, reset_midr_el1 },
4276 { SYS_DESC(SYS_REVIDR_EL1), NULL, reset_revidr_el1 },
4277 { SYS_DESC(SYS_AIDR_EL1), NULL, reset_aidr_el1 },
4278 };
4279
get_invariant_sys_reg(u64 id,u64 __user * uaddr)4280 static int get_invariant_sys_reg(u64 id, u64 __user *uaddr)
4281 {
4282 const struct sys_reg_desc *r;
4283
4284 r = get_reg_by_id(id, invariant_sys_regs,
4285 ARRAY_SIZE(invariant_sys_regs));
4286 if (!r)
4287 return -ENOENT;
4288
4289 return put_user(r->val, uaddr);
4290 }
4291
set_invariant_sys_reg(u64 id,u64 __user * uaddr)4292 static int set_invariant_sys_reg(u64 id, u64 __user *uaddr)
4293 {
4294 const struct sys_reg_desc *r;
4295 u64 val;
4296
4297 r = get_reg_by_id(id, invariant_sys_regs,
4298 ARRAY_SIZE(invariant_sys_regs));
4299 if (!r)
4300 return -ENOENT;
4301
4302 if (get_user(val, uaddr))
4303 return -EFAULT;
4304
4305 /* This is what we mean by invariant: you can't change it. */
4306 if (r->val != val)
4307 return -EINVAL;
4308
4309 return 0;
4310 }
4311
demux_c15_get(struct kvm_vcpu * vcpu,u64 id,void __user * uaddr)4312 static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
4313 {
4314 u32 val;
4315 u32 __user *uval = uaddr;
4316
4317 /* Fail if we have unknown bits set. */
4318 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
4319 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
4320 return -ENOENT;
4321
4322 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
4323 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
4324 if (KVM_REG_SIZE(id) != 4)
4325 return -ENOENT;
4326 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
4327 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
4328 if (val >= CSSELR_MAX)
4329 return -ENOENT;
4330
4331 return put_user(get_ccsidr(vcpu, val), uval);
4332 default:
4333 return -ENOENT;
4334 }
4335 }
4336
demux_c15_set(struct kvm_vcpu * vcpu,u64 id,void __user * uaddr)4337 static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
4338 {
4339 u32 val, newval;
4340 u32 __user *uval = uaddr;
4341
4342 /* Fail if we have unknown bits set. */
4343 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
4344 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
4345 return -ENOENT;
4346
4347 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
4348 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
4349 if (KVM_REG_SIZE(id) != 4)
4350 return -ENOENT;
4351 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
4352 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
4353 if (val >= CSSELR_MAX)
4354 return -ENOENT;
4355
4356 if (get_user(newval, uval))
4357 return -EFAULT;
4358
4359 return set_ccsidr(vcpu, val, newval);
4360 default:
4361 return -ENOENT;
4362 }
4363 }
4364
kvm_sys_reg_get_user(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,const struct sys_reg_desc table[],unsigned int num)4365 int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
4366 const struct sys_reg_desc table[], unsigned int num)
4367 {
4368 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
4369 const struct sys_reg_desc *r;
4370 u64 val;
4371 int ret;
4372
4373 r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
4374 if (!r || sysreg_hidden(vcpu, r))
4375 return -ENOENT;
4376
4377 if (r->get_user) {
4378 ret = (r->get_user)(vcpu, r, &val);
4379 } else {
4380 val = __vcpu_sys_reg(vcpu, r->reg);
4381 ret = 0;
4382 }
4383
4384 if (!ret)
4385 ret = put_user(val, uaddr);
4386
4387 return ret;
4388 }
4389
kvm_arm_sys_reg_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)4390 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
4391 {
4392 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
4393 int err;
4394
4395 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
4396 return demux_c15_get(vcpu, reg->id, uaddr);
4397
4398 err = get_invariant_sys_reg(reg->id, uaddr);
4399 if (err != -ENOENT)
4400 return err;
4401
4402 return kvm_sys_reg_get_user(vcpu, reg,
4403 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
4404 }
4405
kvm_sys_reg_set_user(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,const struct sys_reg_desc table[],unsigned int num)4406 int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
4407 const struct sys_reg_desc table[], unsigned int num)
4408 {
4409 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
4410 const struct sys_reg_desc *r;
4411 u64 val;
4412 int ret;
4413
4414 if (get_user(val, uaddr))
4415 return -EFAULT;
4416
4417 r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
4418 if (!r || sysreg_hidden(vcpu, r))
4419 return -ENOENT;
4420
4421 if (sysreg_user_write_ignore(vcpu, r))
4422 return 0;
4423
4424 if (r->set_user) {
4425 ret = (r->set_user)(vcpu, r, val);
4426 } else {
4427 __vcpu_sys_reg(vcpu, r->reg) = val;
4428 ret = 0;
4429 }
4430
4431 return ret;
4432 }
4433
kvm_arm_sys_reg_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)4434 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
4435 {
4436 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
4437 int err;
4438
4439 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
4440 return demux_c15_set(vcpu, reg->id, uaddr);
4441
4442 err = set_invariant_sys_reg(reg->id, uaddr);
4443 if (err != -ENOENT)
4444 return err;
4445
4446 return kvm_sys_reg_set_user(vcpu, reg,
4447 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
4448 }
4449
num_demux_regs(void)4450 static unsigned int num_demux_regs(void)
4451 {
4452 return CSSELR_MAX;
4453 }
4454
write_demux_regids(u64 __user * uindices)4455 static int write_demux_regids(u64 __user *uindices)
4456 {
4457 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
4458 unsigned int i;
4459
4460 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
4461 for (i = 0; i < CSSELR_MAX; i++) {
4462 if (put_user(val | i, uindices))
4463 return -EFAULT;
4464 uindices++;
4465 }
4466 return 0;
4467 }
4468
sys_reg_to_index(const struct sys_reg_desc * reg)4469 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
4470 {
4471 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
4472 KVM_REG_ARM64_SYSREG |
4473 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
4474 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
4475 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
4476 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
4477 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
4478 }
4479
copy_reg_to_user(const struct sys_reg_desc * reg,u64 __user ** uind)4480 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
4481 {
4482 if (!*uind)
4483 return true;
4484
4485 if (put_user(sys_reg_to_index(reg), *uind))
4486 return false;
4487
4488 (*uind)++;
4489 return true;
4490 }
4491
walk_one_sys_reg(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 __user ** uind,unsigned int * total)4492 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
4493 const struct sys_reg_desc *rd,
4494 u64 __user **uind,
4495 unsigned int *total)
4496 {
4497 /*
4498 * Ignore registers we trap but don't save,
4499 * and for which no custom user accessor is provided.
4500 */
4501 if (!(rd->reg || rd->get_user))
4502 return 0;
4503
4504 if (sysreg_hidden(vcpu, rd))
4505 return 0;
4506
4507 if (!copy_reg_to_user(rd, uind))
4508 return -EFAULT;
4509
4510 (*total)++;
4511 return 0;
4512 }
4513
4514 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
walk_sys_regs(struct kvm_vcpu * vcpu,u64 __user * uind)4515 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
4516 {
4517 const struct sys_reg_desc *i2, *end2;
4518 unsigned int total = 0;
4519 int err;
4520
4521 i2 = sys_reg_descs;
4522 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
4523
4524 while (i2 != end2) {
4525 err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
4526 if (err)
4527 return err;
4528 }
4529 return total;
4530 }
4531
kvm_arm_num_sys_reg_descs(struct kvm_vcpu * vcpu)4532 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
4533 {
4534 return ARRAY_SIZE(invariant_sys_regs)
4535 + num_demux_regs()
4536 + walk_sys_regs(vcpu, (u64 __user *)NULL);
4537 }
4538
kvm_arm_copy_sys_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)4539 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
4540 {
4541 unsigned int i;
4542 int err;
4543
4544 /* Then give them all the invariant registers' indices. */
4545 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
4546 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
4547 return -EFAULT;
4548 uindices++;
4549 }
4550
4551 err = walk_sys_regs(vcpu, uindices);
4552 if (err < 0)
4553 return err;
4554 uindices += err;
4555
4556 return write_demux_regids(uindices);
4557 }
4558
4559 #define KVM_ARM_FEATURE_ID_RANGE_INDEX(r) \
4560 KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(r), \
4561 sys_reg_Op1(r), \
4562 sys_reg_CRn(r), \
4563 sys_reg_CRm(r), \
4564 sys_reg_Op2(r))
4565
kvm_vm_ioctl_get_reg_writable_masks(struct kvm * kvm,struct reg_mask_range * range)4566 int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range)
4567 {
4568 const void *zero_page = page_to_virt(ZERO_PAGE(0));
4569 u64 __user *masks = (u64 __user *)range->addr;
4570
4571 /* Only feature id range is supported, reserved[13] must be zero. */
4572 if (range->range ||
4573 memcmp(range->reserved, zero_page, sizeof(range->reserved)))
4574 return -EINVAL;
4575
4576 /* Wipe the whole thing first */
4577 if (clear_user(masks, KVM_ARM_FEATURE_ID_RANGE_SIZE * sizeof(__u64)))
4578 return -EFAULT;
4579
4580 for (int i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
4581 const struct sys_reg_desc *reg = &sys_reg_descs[i];
4582 u32 encoding = reg_to_encoding(reg);
4583 u64 val;
4584
4585 if (!is_feature_id_reg(encoding) || !reg->set_user)
4586 continue;
4587
4588 if (!reg->val ||
4589 (is_aa32_id_reg(encoding) && !kvm_supports_32bit_el0())) {
4590 continue;
4591 }
4592 val = reg->val;
4593
4594 if (put_user(val, (masks + KVM_ARM_FEATURE_ID_RANGE_INDEX(encoding))))
4595 return -EFAULT;
4596 }
4597
4598 return 0;
4599 }
4600
vcpu_set_hcr(struct kvm_vcpu * vcpu)4601 static void vcpu_set_hcr(struct kvm_vcpu *vcpu)
4602 {
4603 struct kvm *kvm = vcpu->kvm;
4604
4605 if (has_vhe() || has_hvhe())
4606 vcpu->arch.hcr_el2 |= HCR_E2H;
4607 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
4608 /* route synchronous external abort exceptions to EL2 */
4609 vcpu->arch.hcr_el2 |= HCR_TEA;
4610 /* trap error record accesses */
4611 vcpu->arch.hcr_el2 |= HCR_TERR;
4612 }
4613
4614 if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
4615 vcpu->arch.hcr_el2 |= HCR_FWB;
4616
4617 if (cpus_have_final_cap(ARM64_HAS_EVT) &&
4618 !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE) &&
4619 kvm_read_vm_id_reg(kvm, SYS_CTR_EL0) == read_sanitised_ftr_reg(SYS_CTR_EL0))
4620 vcpu->arch.hcr_el2 |= HCR_TID4;
4621 else
4622 vcpu->arch.hcr_el2 |= HCR_TID2;
4623
4624 if (vcpu_el1_is_32bit(vcpu))
4625 vcpu->arch.hcr_el2 &= ~HCR_RW;
4626
4627 if (kvm_has_mte(vcpu->kvm))
4628 vcpu->arch.hcr_el2 |= HCR_ATA;
4629
4630 /*
4631 * In the absence of FGT, we cannot independently trap TLBI
4632 * Range instructions. This isn't great, but trapping all
4633 * TLBIs would be far worse. Live with it...
4634 */
4635 if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
4636 vcpu->arch.hcr_el2 |= HCR_TTLBOS;
4637 }
4638
kvm_calculate_traps(struct kvm_vcpu * vcpu)4639 void kvm_calculate_traps(struct kvm_vcpu *vcpu)
4640 {
4641 struct kvm *kvm = vcpu->kvm;
4642
4643 mutex_lock(&kvm->arch.config_lock);
4644 vcpu_set_hcr(vcpu);
4645 vcpu_set_ich_hcr(vcpu);
4646
4647 if (cpus_have_final_cap(ARM64_HAS_HCX)) {
4648 /*
4649 * In general, all HCRX_EL2 bits are gated by a feature.
4650 * The only reason we can set SMPME without checking any
4651 * feature is that its effects are not directly observable
4652 * from the guest.
4653 */
4654 vcpu->arch.hcrx_el2 = HCRX_EL2_SMPME;
4655
4656 if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
4657 vcpu->arch.hcrx_el2 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
4658
4659 if (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP))
4660 vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En;
4661
4662 if (kvm_has_fpmr(kvm))
4663 vcpu->arch.hcrx_el2 |= HCRX_EL2_EnFPM;
4664 }
4665
4666 if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
4667 goto out;
4668
4669 kvm->arch.fgu[HFGxTR_GROUP] = (HFGxTR_EL2_nAMAIR2_EL1 |
4670 HFGxTR_EL2_nMAIR2_EL1 |
4671 HFGxTR_EL2_nS2POR_EL1 |
4672 HFGxTR_EL2_nACCDATA_EL1 |
4673 HFGxTR_EL2_nSMPRI_EL1_MASK |
4674 HFGxTR_EL2_nTPIDR2_EL0_MASK);
4675
4676 if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
4677 kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1OS|
4678 HFGITR_EL2_TLBIRVALE1OS |
4679 HFGITR_EL2_TLBIRVAAE1OS |
4680 HFGITR_EL2_TLBIRVAE1OS |
4681 HFGITR_EL2_TLBIVAALE1OS |
4682 HFGITR_EL2_TLBIVALE1OS |
4683 HFGITR_EL2_TLBIVAAE1OS |
4684 HFGITR_EL2_TLBIASIDE1OS |
4685 HFGITR_EL2_TLBIVAE1OS |
4686 HFGITR_EL2_TLBIVMALLE1OS);
4687
4688 if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
4689 kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1 |
4690 HFGITR_EL2_TLBIRVALE1 |
4691 HFGITR_EL2_TLBIRVAAE1 |
4692 HFGITR_EL2_TLBIRVAE1 |
4693 HFGITR_EL2_TLBIRVAALE1IS|
4694 HFGITR_EL2_TLBIRVALE1IS |
4695 HFGITR_EL2_TLBIRVAAE1IS |
4696 HFGITR_EL2_TLBIRVAE1IS |
4697 HFGITR_EL2_TLBIRVAALE1OS|
4698 HFGITR_EL2_TLBIRVALE1OS |
4699 HFGITR_EL2_TLBIRVAAE1OS |
4700 HFGITR_EL2_TLBIRVAE1OS);
4701
4702 if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, ATS1A, IMP))
4703 kvm->arch.fgu[HFGITR_GROUP] |= HFGITR_EL2_ATS1E1A;
4704
4705 if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, PAN2))
4706 kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_ATS1E1RP |
4707 HFGITR_EL2_ATS1E1WP);
4708
4709 if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1PIE, IMP))
4710 kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPIRE0_EL1 |
4711 HFGxTR_EL2_nPIR_EL1);
4712
4713 if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1POE, IMP))
4714 kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPOR_EL1 |
4715 HFGxTR_EL2_nPOR_EL0);
4716
4717 if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
4718 kvm->arch.fgu[HAFGRTR_GROUP] |= ~(HAFGRTR_EL2_RES0 |
4719 HAFGRTR_EL2_RES1);
4720
4721 set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags);
4722 out:
4723 mutex_unlock(&kvm->arch.config_lock);
4724 }
4725
4726 /*
4727 * Perform last adjustments to the ID registers that are implied by the
4728 * configuration outside of the ID regs themselves, as well as any
4729 * initialisation that directly depend on these ID registers (such as
4730 * RES0/RES1 behaviours). This is not the place to configure traps though.
4731 *
4732 * Because this can be called once per CPU, changes must be idempotent.
4733 */
kvm_finalize_sys_regs(struct kvm_vcpu * vcpu)4734 int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu)
4735 {
4736 struct kvm *kvm = vcpu->kvm;
4737
4738 guard(mutex)(&kvm->arch.config_lock);
4739
4740 if (!(static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) &&
4741 irqchip_in_kernel(kvm) &&
4742 kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)) {
4743 kvm->arch.id_regs[IDREG_IDX(SYS_ID_AA64PFR0_EL1)] &= ~ID_AA64PFR0_EL1_GIC_MASK;
4744 kvm->arch.id_regs[IDREG_IDX(SYS_ID_PFR1_EL1)] &= ~ID_PFR1_EL1_GIC_MASK;
4745 }
4746
4747 if (vcpu_has_nv(vcpu)) {
4748 int ret = kvm_init_nv_sysregs(kvm);
4749 if (ret)
4750 return ret;
4751 }
4752
4753 return 0;
4754 }
4755
kvm_sys_reg_table_init(void)4756 int __init kvm_sys_reg_table_init(void)
4757 {
4758 bool valid = true;
4759 unsigned int i;
4760 int ret = 0;
4761
4762 /* Make sure tables are unique and in order. */
4763 valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
4764 valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true);
4765 valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true);
4766 valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
4767 valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
4768 valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
4769 valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false);
4770
4771 if (!valid)
4772 return -EINVAL;
4773
4774 /* We abuse the reset function to overwrite the table itself. */
4775 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
4776 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
4777
4778 ret = populate_nv_trap_config();
4779
4780 for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++)
4781 ret = populate_sysreg_config(sys_reg_descs + i, i);
4782
4783 for (i = 0; !ret && i < ARRAY_SIZE(sys_insn_descs); i++)
4784 ret = populate_sysreg_config(sys_insn_descs + i, i);
4785
4786 return ret;
4787 }
4788