xref: /linux/arch/arm64/kvm/sys_regs.c (revision 7c31c06e2d2d75859d773ba940e56d1db2bd1fcd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/kvm/coproc.c:
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Authors: Rusty Russell <rusty@rustcorp.com.au>
9  *          Christoffer Dall <c.dall@virtualopensystems.com>
10  */
11 
12 #include <linux/bitfield.h>
13 #include <linux/bsearch.h>
14 #include <linux/cacheinfo.h>
15 #include <linux/debugfs.h>
16 #include <linux/kvm_host.h>
17 #include <linux/mm.h>
18 #include <linux/printk.h>
19 #include <linux/uaccess.h>
20 #include <linux/irqchip/arm-gic-v3.h>
21 
22 #include <asm/arm_pmuv3.h>
23 #include <asm/cacheflush.h>
24 #include <asm/cputype.h>
25 #include <asm/debug-monitors.h>
26 #include <asm/esr.h>
27 #include <asm/kvm_arm.h>
28 #include <asm/kvm_emulate.h>
29 #include <asm/kvm_hyp.h>
30 #include <asm/kvm_mmu.h>
31 #include <asm/kvm_nested.h>
32 #include <asm/perf_event.h>
33 #include <asm/sysreg.h>
34 
35 #include <trace/events/kvm.h>
36 
37 #include "sys_regs.h"
38 #include "vgic/vgic.h"
39 
40 #include "trace.h"
41 
42 /*
43  * For AArch32, we only take care of what is being trapped. Anything
44  * that has to do with init and userspace access has to go via the
45  * 64bit interface.
46  */
47 
48 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
49 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
50 		      u64 val);
51 
52 static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
53 			 const struct sys_reg_desc *r)
54 {
55 	kvm_inject_undefined(vcpu);
56 	return false;
57 }
58 
59 static bool bad_trap(struct kvm_vcpu *vcpu,
60 		     struct sys_reg_params *params,
61 		     const struct sys_reg_desc *r,
62 		     const char *msg)
63 {
64 	WARN_ONCE(1, "Unexpected %s\n", msg);
65 	print_sys_reg_instr(params);
66 	return undef_access(vcpu, params, r);
67 }
68 
69 static bool read_from_write_only(struct kvm_vcpu *vcpu,
70 				 struct sys_reg_params *params,
71 				 const struct sys_reg_desc *r)
72 {
73 	return bad_trap(vcpu, params, r,
74 			"sys_reg read to write-only register");
75 }
76 
77 static bool write_to_read_only(struct kvm_vcpu *vcpu,
78 			       struct sys_reg_params *params,
79 			       const struct sys_reg_desc *r)
80 {
81 	return bad_trap(vcpu, params, r,
82 			"sys_reg write to read-only register");
83 }
84 
85 enum sr_loc_attr {
86 	SR_LOC_MEMORY	= 0,	  /* Register definitely in memory */
87 	SR_LOC_LOADED	= BIT(0), /* Register on CPU, unless it cannot */
88 	SR_LOC_MAPPED	= BIT(1), /* Register in a different CPU register */
89 	SR_LOC_XLATED	= BIT(2), /* Register translated to fit another reg */
90 	SR_LOC_SPECIAL	= BIT(3), /* Demanding register, implies loaded */
91 };
92 
93 struct sr_loc {
94 	enum sr_loc_attr loc;
95 	enum vcpu_sysreg map_reg;
96 	u64		 (*xlate)(u64);
97 };
98 
99 static enum sr_loc_attr locate_direct_register(const struct kvm_vcpu *vcpu,
100 					       enum vcpu_sysreg reg)
101 {
102 	switch (reg) {
103 	case SCTLR_EL1:
104 	case CPACR_EL1:
105 	case TTBR0_EL1:
106 	case TTBR1_EL1:
107 	case TCR_EL1:
108 	case TCR2_EL1:
109 	case PIR_EL1:
110 	case PIRE0_EL1:
111 	case POR_EL1:
112 	case ESR_EL1:
113 	case AFSR0_EL1:
114 	case AFSR1_EL1:
115 	case FAR_EL1:
116 	case MAIR_EL1:
117 	case VBAR_EL1:
118 	case CONTEXTIDR_EL1:
119 	case AMAIR_EL1:
120 	case CNTKCTL_EL1:
121 	case ELR_EL1:
122 	case SPSR_EL1:
123 	case ZCR_EL1:
124 	case SCTLR2_EL1:
125 		/*
126 		 * EL1 registers which have an ELx2 mapping are loaded if
127 		 * we're not in hypervisor context.
128 		 */
129 		return is_hyp_ctxt(vcpu) ? SR_LOC_MEMORY : SR_LOC_LOADED;
130 
131 	case TPIDR_EL0:
132 	case TPIDRRO_EL0:
133 	case TPIDR_EL1:
134 	case PAR_EL1:
135 	case DACR32_EL2:
136 	case IFSR32_EL2:
137 	case DBGVCR32_EL2:
138 		/* These registers are always loaded, no matter what */
139 		return SR_LOC_LOADED;
140 
141 	default:
142 		/* Non-mapped EL2 registers are by definition in memory. */
143 		return SR_LOC_MEMORY;
144 	}
145 }
146 
147 static void locate_mapped_el2_register(const struct kvm_vcpu *vcpu,
148 				       enum vcpu_sysreg reg,
149 				       enum vcpu_sysreg map_reg,
150 				       u64 (*xlate)(u64),
151 				       struct sr_loc *loc)
152 {
153 	if (!is_hyp_ctxt(vcpu)) {
154 		loc->loc = SR_LOC_MEMORY;
155 		return;
156 	}
157 
158 	loc->loc = SR_LOC_LOADED | SR_LOC_MAPPED;
159 	loc->map_reg = map_reg;
160 
161 	WARN_ON(locate_direct_register(vcpu, map_reg) != SR_LOC_MEMORY);
162 
163 	if (xlate != NULL && !vcpu_el2_e2h_is_set(vcpu)) {
164 		loc->loc |= SR_LOC_XLATED;
165 		loc->xlate = xlate;
166 	}
167 }
168 
169 #define MAPPED_EL2_SYSREG(r, m, t)					\
170 	case r:	{							\
171 		locate_mapped_el2_register(vcpu, r, m, t, loc);		\
172 		break;							\
173 	}
174 
175 static void locate_register(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg,
176 			    struct sr_loc *loc)
177 {
178 	if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU)) {
179 		loc->loc = SR_LOC_MEMORY;
180 		return;
181 	}
182 
183 	switch (reg) {
184 		MAPPED_EL2_SYSREG(SCTLR_EL2,   SCTLR_EL1,
185 				  translate_sctlr_el2_to_sctlr_el1	     );
186 		MAPPED_EL2_SYSREG(CPTR_EL2,    CPACR_EL1,
187 				  translate_cptr_el2_to_cpacr_el1	     );
188 		MAPPED_EL2_SYSREG(TTBR0_EL2,   TTBR0_EL1,
189 				  translate_ttbr0_el2_to_ttbr0_el1	     );
190 		MAPPED_EL2_SYSREG(TTBR1_EL2,   TTBR1_EL1,   NULL	     );
191 		MAPPED_EL2_SYSREG(TCR_EL2,     TCR_EL1,
192 				  translate_tcr_el2_to_tcr_el1		     );
193 		MAPPED_EL2_SYSREG(VBAR_EL2,    VBAR_EL1,    NULL	     );
194 		MAPPED_EL2_SYSREG(AFSR0_EL2,   AFSR0_EL1,   NULL	     );
195 		MAPPED_EL2_SYSREG(AFSR1_EL2,   AFSR1_EL1,   NULL	     );
196 		MAPPED_EL2_SYSREG(ESR_EL2,     ESR_EL1,     NULL	     );
197 		MAPPED_EL2_SYSREG(FAR_EL2,     FAR_EL1,     NULL	     );
198 		MAPPED_EL2_SYSREG(MAIR_EL2,    MAIR_EL1,    NULL	     );
199 		MAPPED_EL2_SYSREG(TCR2_EL2,    TCR2_EL1,    NULL	     );
200 		MAPPED_EL2_SYSREG(PIR_EL2,     PIR_EL1,     NULL	     );
201 		MAPPED_EL2_SYSREG(PIRE0_EL2,   PIRE0_EL1,   NULL	     );
202 		MAPPED_EL2_SYSREG(POR_EL2,     POR_EL1,     NULL	     );
203 		MAPPED_EL2_SYSREG(AMAIR_EL2,   AMAIR_EL1,   NULL	     );
204 		MAPPED_EL2_SYSREG(ELR_EL2,     ELR_EL1,	    NULL	     );
205 		MAPPED_EL2_SYSREG(SPSR_EL2,    SPSR_EL1,    NULL	     );
206 		MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL	     );
207 		MAPPED_EL2_SYSREG(SCTLR2_EL2,  SCTLR2_EL1,  NULL	     );
208 	case CNTHCTL_EL2:
209 		/* CNTHCTL_EL2 is super special, until we support NV2.1 */
210 		loc->loc = ((is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) ?
211 			    SR_LOC_SPECIAL : SR_LOC_MEMORY);
212 		break;
213 	default:
214 		loc->loc = locate_direct_register(vcpu, reg);
215 	}
216 }
217 
218 static u64 read_sr_from_cpu(enum vcpu_sysreg reg)
219 {
220 	u64 val = 0x8badf00d8badf00d;
221 
222 	switch (reg) {
223 	case SCTLR_EL1:		val = read_sysreg_s(SYS_SCTLR_EL12);	break;
224 	case CPACR_EL1:		val = read_sysreg_s(SYS_CPACR_EL12);	break;
225 	case TTBR0_EL1:		val = read_sysreg_s(SYS_TTBR0_EL12);	break;
226 	case TTBR1_EL1:		val = read_sysreg_s(SYS_TTBR1_EL12);	break;
227 	case TCR_EL1:		val = read_sysreg_s(SYS_TCR_EL12);	break;
228 	case TCR2_EL1:		val = read_sysreg_s(SYS_TCR2_EL12);	break;
229 	case PIR_EL1:		val = read_sysreg_s(SYS_PIR_EL12);	break;
230 	case PIRE0_EL1:		val = read_sysreg_s(SYS_PIRE0_EL12);	break;
231 	case POR_EL1:		val = read_sysreg_s(SYS_POR_EL12);	break;
232 	case ESR_EL1:		val = read_sysreg_s(SYS_ESR_EL12);	break;
233 	case AFSR0_EL1:		val = read_sysreg_s(SYS_AFSR0_EL12);	break;
234 	case AFSR1_EL1:		val = read_sysreg_s(SYS_AFSR1_EL12);	break;
235 	case FAR_EL1:		val = read_sysreg_s(SYS_FAR_EL12);	break;
236 	case MAIR_EL1:		val = read_sysreg_s(SYS_MAIR_EL12);	break;
237 	case VBAR_EL1:		val = read_sysreg_s(SYS_VBAR_EL12);	break;
238 	case CONTEXTIDR_EL1:	val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
239 	case AMAIR_EL1:		val = read_sysreg_s(SYS_AMAIR_EL12);	break;
240 	case CNTKCTL_EL1:	val = read_sysreg_s(SYS_CNTKCTL_EL12);	break;
241 	case ELR_EL1:		val = read_sysreg_s(SYS_ELR_EL12);	break;
242 	case SPSR_EL1:		val = read_sysreg_s(SYS_SPSR_EL12);	break;
243 	case ZCR_EL1:		val = read_sysreg_s(SYS_ZCR_EL12);	break;
244 	case SCTLR2_EL1:	val = read_sysreg_s(SYS_SCTLR2_EL12);	break;
245 	case TPIDR_EL0:		val = read_sysreg_s(SYS_TPIDR_EL0);	break;
246 	case TPIDRRO_EL0:	val = read_sysreg_s(SYS_TPIDRRO_EL0);	break;
247 	case TPIDR_EL1:		val = read_sysreg_s(SYS_TPIDR_EL1);	break;
248 	case PAR_EL1:		val = read_sysreg_par();		break;
249 	case DACR32_EL2:	val = read_sysreg_s(SYS_DACR32_EL2);	break;
250 	case IFSR32_EL2:	val = read_sysreg_s(SYS_IFSR32_EL2);	break;
251 	case DBGVCR32_EL2:	val = read_sysreg_s(SYS_DBGVCR32_EL2);	break;
252 	default:		WARN_ON_ONCE(1);
253 	}
254 
255 	return val;
256 }
257 
258 static void write_sr_to_cpu(enum vcpu_sysreg reg, u64 val)
259 {
260 	switch (reg) {
261 	case SCTLR_EL1:		write_sysreg_s(val, SYS_SCTLR_EL12);	break;
262 	case CPACR_EL1:		write_sysreg_s(val, SYS_CPACR_EL12);	break;
263 	case TTBR0_EL1:		write_sysreg_s(val, SYS_TTBR0_EL12);	break;
264 	case TTBR1_EL1:		write_sysreg_s(val, SYS_TTBR1_EL12);	break;
265 	case TCR_EL1:		write_sysreg_s(val, SYS_TCR_EL12);	break;
266 	case TCR2_EL1:		write_sysreg_s(val, SYS_TCR2_EL12);	break;
267 	case PIR_EL1:		write_sysreg_s(val, SYS_PIR_EL12);	break;
268 	case PIRE0_EL1:		write_sysreg_s(val, SYS_PIRE0_EL12);	break;
269 	case POR_EL1:		write_sysreg_s(val, SYS_POR_EL12);	break;
270 	case ESR_EL1:		write_sysreg_s(val, SYS_ESR_EL12);	break;
271 	case AFSR0_EL1:		write_sysreg_s(val, SYS_AFSR0_EL12);	break;
272 	case AFSR1_EL1:		write_sysreg_s(val, SYS_AFSR1_EL12);	break;
273 	case FAR_EL1:		write_sysreg_s(val, SYS_FAR_EL12);	break;
274 	case MAIR_EL1:		write_sysreg_s(val, SYS_MAIR_EL12);	break;
275 	case VBAR_EL1:		write_sysreg_s(val, SYS_VBAR_EL12);	break;
276 	case CONTEXTIDR_EL1:	write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
277 	case AMAIR_EL1:		write_sysreg_s(val, SYS_AMAIR_EL12);	break;
278 	case CNTKCTL_EL1:	write_sysreg_s(val, SYS_CNTKCTL_EL12);	break;
279 	case ELR_EL1:		write_sysreg_s(val, SYS_ELR_EL12);	break;
280 	case SPSR_EL1:		write_sysreg_s(val, SYS_SPSR_EL12);	break;
281 	case ZCR_EL1:		write_sysreg_s(val, SYS_ZCR_EL12);	break;
282 	case SCTLR2_EL1:	write_sysreg_s(val, SYS_SCTLR2_EL12);	break;
283 	case TPIDR_EL0:		write_sysreg_s(val, SYS_TPIDR_EL0);	break;
284 	case TPIDRRO_EL0:	write_sysreg_s(val, SYS_TPIDRRO_EL0);	break;
285 	case TPIDR_EL1:		write_sysreg_s(val, SYS_TPIDR_EL1);	break;
286 	case PAR_EL1:		write_sysreg_s(val, SYS_PAR_EL1);	break;
287 	case DACR32_EL2:	write_sysreg_s(val, SYS_DACR32_EL2);	break;
288 	case IFSR32_EL2:	write_sysreg_s(val, SYS_IFSR32_EL2);	break;
289 	case DBGVCR32_EL2:	write_sysreg_s(val, SYS_DBGVCR32_EL2);	break;
290 	default:		WARN_ON_ONCE(1);
291 	}
292 }
293 
294 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg)
295 {
296 	struct sr_loc loc = {};
297 
298 	locate_register(vcpu, reg, &loc);
299 
300 	WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY);
301 
302 	if (loc.loc & SR_LOC_SPECIAL) {
303 		u64 val;
304 
305 		WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL);
306 
307 		/*
308 		 * CNTHCTL_EL2 requires some special treatment to account
309 		 * for the bits that can be set via CNTKCTL_EL1 when E2H==1.
310 		 */
311 		switch (reg) {
312 		case CNTHCTL_EL2:
313 			val = read_sysreg_el1(SYS_CNTKCTL);
314 			val &= CNTKCTL_VALID_BITS;
315 			val |= __vcpu_sys_reg(vcpu, reg) & ~CNTKCTL_VALID_BITS;
316 			return val;
317 		default:
318 			WARN_ON_ONCE(1);
319 		}
320 	}
321 
322 	if (loc.loc & SR_LOC_LOADED) {
323 		enum vcpu_sysreg map_reg = reg;
324 
325 		if (loc.loc & SR_LOC_MAPPED)
326 			map_reg = loc.map_reg;
327 
328 		if (!(loc.loc & SR_LOC_XLATED)) {
329 			u64 val = read_sr_from_cpu(map_reg);
330 
331 			if (reg >= __SANITISED_REG_START__)
332 				val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
333 
334 			return val;
335 		}
336 	}
337 
338 	return __vcpu_sys_reg(vcpu, reg);
339 }
340 
341 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, enum vcpu_sysreg reg)
342 {
343 	struct sr_loc loc = {};
344 
345 	locate_register(vcpu, reg, &loc);
346 
347 	WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY);
348 
349 	if (loc.loc & SR_LOC_SPECIAL) {
350 
351 		WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL);
352 
353 		switch (reg) {
354 		case CNTHCTL_EL2:
355 			/*
356 			 * If E2H=1, some of the bits are backed by
357 			 * CNTKCTL_EL1, while the rest is kept in memory.
358 			 * Yes, this is fun stuff.
359 			 */
360 			write_sysreg_el1(val, SYS_CNTKCTL);
361 			break;
362 		default:
363 			WARN_ON_ONCE(1);
364 		}
365 	}
366 
367 	if (loc.loc & SR_LOC_LOADED) {
368 		enum vcpu_sysreg map_reg = reg;
369 		u64 xlated_val;
370 
371 		if (reg >= __SANITISED_REG_START__)
372 			val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
373 
374 		if (loc.loc & SR_LOC_MAPPED)
375 			map_reg = loc.map_reg;
376 
377 		if (loc.loc & SR_LOC_XLATED)
378 			xlated_val = loc.xlate(val);
379 		else
380 			xlated_val = val;
381 
382 		write_sr_to_cpu(map_reg, xlated_val);
383 
384 		/*
385 		 * Fall through to write the backing store anyway, which
386 		 * allows translated registers to be directly read without a
387 		 * reverse translation.
388 		 */
389 	}
390 
391 	__vcpu_assign_sys_reg(vcpu, reg, val);
392 }
393 
394 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
395 #define CSSELR_MAX 14
396 
397 /*
398  * Returns the minimum line size for the selected cache, expressed as
399  * Log2(bytes).
400  */
401 static u8 get_min_cache_line_size(bool icache)
402 {
403 	u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0);
404 	u8 field;
405 
406 	if (icache)
407 		field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr);
408 	else
409 		field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr);
410 
411 	/*
412 	 * Cache line size is represented as Log2(words) in CTR_EL0.
413 	 * Log2(bytes) can be derived with the following:
414 	 *
415 	 * Log2(words) + 2 = Log2(bytes / 4) + 2
416 	 * 		   = Log2(bytes) - 2 + 2
417 	 * 		   = Log2(bytes)
418 	 */
419 	return field + 2;
420 }
421 
422 /* Which cache CCSIDR represents depends on CSSELR value. */
423 static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr)
424 {
425 	u8 line_size;
426 
427 	if (vcpu->arch.ccsidr)
428 		return vcpu->arch.ccsidr[csselr];
429 
430 	line_size = get_min_cache_line_size(csselr & CSSELR_EL1_InD);
431 
432 	/*
433 	 * Fabricate a CCSIDR value as the overriding value does not exist.
434 	 * The real CCSIDR value will not be used as it can vary by the
435 	 * physical CPU which the vcpu currently resides in.
436 	 *
437 	 * The line size is determined with get_min_cache_line_size(), which
438 	 * should be valid for all CPUs even if they have different cache
439 	 * configuration.
440 	 *
441 	 * The associativity bits are cleared, meaning the geometry of all data
442 	 * and unified caches (which are guaranteed to be PIPT and thus
443 	 * non-aliasing) are 1 set and 1 way.
444 	 * Guests should not be doing cache operations by set/way at all, and
445 	 * for this reason, we trap them and attempt to infer the intent, so
446 	 * that we can flush the entire guest's address space at the appropriate
447 	 * time. The exposed geometry minimizes the number of the traps.
448 	 * [If guests should attempt to infer aliasing properties from the
449 	 * geometry (which is not permitted by the architecture), they would
450 	 * only do so for virtually indexed caches.]
451 	 *
452 	 * We don't check if the cache level exists as it is allowed to return
453 	 * an UNKNOWN value if not.
454 	 */
455 	return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4);
456 }
457 
458 static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val)
459 {
460 	u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4;
461 	u32 *ccsidr = vcpu->arch.ccsidr;
462 	u32 i;
463 
464 	if ((val & CCSIDR_EL1_RES0) ||
465 	    line_size < get_min_cache_line_size(csselr & CSSELR_EL1_InD))
466 		return -EINVAL;
467 
468 	if (!ccsidr) {
469 		if (val == get_ccsidr(vcpu, csselr))
470 			return 0;
471 
472 		ccsidr = kmalloc_array(CSSELR_MAX, sizeof(u32), GFP_KERNEL_ACCOUNT);
473 		if (!ccsidr)
474 			return -ENOMEM;
475 
476 		for (i = 0; i < CSSELR_MAX; i++)
477 			ccsidr[i] = get_ccsidr(vcpu, i);
478 
479 		vcpu->arch.ccsidr = ccsidr;
480 	}
481 
482 	ccsidr[csselr] = val;
483 
484 	return 0;
485 }
486 
487 static bool access_rw(struct kvm_vcpu *vcpu,
488 		      struct sys_reg_params *p,
489 		      const struct sys_reg_desc *r)
490 {
491 	if (p->is_write)
492 		vcpu_write_sys_reg(vcpu, p->regval, r->reg);
493 	else
494 		p->regval = vcpu_read_sys_reg(vcpu, r->reg);
495 
496 	return true;
497 }
498 
499 /*
500  * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
501  */
502 static bool access_dcsw(struct kvm_vcpu *vcpu,
503 			struct sys_reg_params *p,
504 			const struct sys_reg_desc *r)
505 {
506 	if (!p->is_write)
507 		return read_from_write_only(vcpu, p, r);
508 
509 	/*
510 	 * Only track S/W ops if we don't have FWB. It still indicates
511 	 * that the guest is a bit broken (S/W operations should only
512 	 * be done by firmware, knowing that there is only a single
513 	 * CPU left in the system, and certainly not from non-secure
514 	 * software).
515 	 */
516 	if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
517 		kvm_set_way_flush(vcpu);
518 
519 	return true;
520 }
521 
522 static bool access_dcgsw(struct kvm_vcpu *vcpu,
523 			 struct sys_reg_params *p,
524 			 const struct sys_reg_desc *r)
525 {
526 	if (!kvm_has_mte(vcpu->kvm))
527 		return undef_access(vcpu, p, r);
528 
529 	/* Treat MTE S/W ops as we treat the classic ones: with contempt */
530 	return access_dcsw(vcpu, p, r);
531 }
532 
533 static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
534 {
535 	switch (r->aarch32_map) {
536 	case AA32_LO:
537 		*mask = GENMASK_ULL(31, 0);
538 		*shift = 0;
539 		break;
540 	case AA32_HI:
541 		*mask = GENMASK_ULL(63, 32);
542 		*shift = 32;
543 		break;
544 	default:
545 		*mask = GENMASK_ULL(63, 0);
546 		*shift = 0;
547 		break;
548 	}
549 }
550 
551 /*
552  * Generic accessor for VM registers. Only called as long as HCR_TVM
553  * is set. If the guest enables the MMU, we stop trapping the VM
554  * sys_regs and leave it in complete control of the caches.
555  */
556 static bool access_vm_reg(struct kvm_vcpu *vcpu,
557 			  struct sys_reg_params *p,
558 			  const struct sys_reg_desc *r)
559 {
560 	bool was_enabled = vcpu_has_cache_enabled(vcpu);
561 	u64 val, mask, shift;
562 
563 	BUG_ON(!p->is_write);
564 
565 	get_access_mask(r, &mask, &shift);
566 
567 	if (~mask) {
568 		val = vcpu_read_sys_reg(vcpu, r->reg);
569 		val &= ~mask;
570 	} else {
571 		val = 0;
572 	}
573 
574 	val |= (p->regval & (mask >> shift)) << shift;
575 	vcpu_write_sys_reg(vcpu, val, r->reg);
576 
577 	kvm_toggle_cache(vcpu, was_enabled);
578 	return true;
579 }
580 
581 static bool access_actlr(struct kvm_vcpu *vcpu,
582 			 struct sys_reg_params *p,
583 			 const struct sys_reg_desc *r)
584 {
585 	u64 mask, shift;
586 
587 	if (p->is_write)
588 		return ignore_write(vcpu, p);
589 
590 	get_access_mask(r, &mask, &shift);
591 	p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
592 
593 	return true;
594 }
595 
596 /*
597  * Trap handler for the GICv3 SGI generation system register.
598  * Forward the request to the VGIC emulation.
599  * The cp15_64 code makes sure this automatically works
600  * for both AArch64 and AArch32 accesses.
601  */
602 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
603 			   struct sys_reg_params *p,
604 			   const struct sys_reg_desc *r)
605 {
606 	bool g1;
607 
608 	if (!kvm_has_gicv3(vcpu->kvm))
609 		return undef_access(vcpu, p, r);
610 
611 	if (!p->is_write)
612 		return read_from_write_only(vcpu, p, r);
613 
614 	/*
615 	 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
616 	 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
617 	 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
618 	 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
619 	 * group.
620 	 */
621 	if (p->Op0 == 0) {		/* AArch32 */
622 		switch (p->Op1) {
623 		default:		/* Keep GCC quiet */
624 		case 0:			/* ICC_SGI1R */
625 			g1 = true;
626 			break;
627 		case 1:			/* ICC_ASGI1R */
628 		case 2:			/* ICC_SGI0R */
629 			g1 = false;
630 			break;
631 		}
632 	} else {			/* AArch64 */
633 		switch (p->Op2) {
634 		default:		/* Keep GCC quiet */
635 		case 5:			/* ICC_SGI1R_EL1 */
636 			g1 = true;
637 			break;
638 		case 6:			/* ICC_ASGI1R_EL1 */
639 		case 7:			/* ICC_SGI0R_EL1 */
640 			g1 = false;
641 			break;
642 		}
643 	}
644 
645 	vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
646 
647 	return true;
648 }
649 
650 static bool access_gic_sre(struct kvm_vcpu *vcpu,
651 			   struct sys_reg_params *p,
652 			   const struct sys_reg_desc *r)
653 {
654 	if (!kvm_has_gicv3(vcpu->kvm))
655 		return undef_access(vcpu, p, r);
656 
657 	if (p->is_write)
658 		return ignore_write(vcpu, p);
659 
660 	if (p->Op1 == 4) {	/* ICC_SRE_EL2 */
661 		p->regval = KVM_ICC_SRE_EL2;
662 	} else {		/* ICC_SRE_EL1 */
663 		p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
664 	}
665 
666 	return true;
667 }
668 
669 static bool access_gic_dir(struct kvm_vcpu *vcpu,
670 			   struct sys_reg_params *p,
671 			   const struct sys_reg_desc *r)
672 {
673 	if (!kvm_has_gicv3(vcpu->kvm))
674 		return undef_access(vcpu, p, r);
675 
676 	if (!p->is_write)
677 		return undef_access(vcpu, p, r);
678 
679 	vgic_v3_deactivate(vcpu, p->regval);
680 
681 	return true;
682 }
683 
684 static bool access_gicv5_idr0(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
685 			      const struct sys_reg_desc *r)
686 {
687 	if (p->is_write)
688 		return undef_access(vcpu, p, r);
689 
690 	/*
691 	 * Expose KVM's priority- and ID-bits to the guest, but not GCIE_LEGACY.
692 	 *
693 	 * Note: for GICv5 the mimic the way that the num_pri_bits and
694 	 * num_id_bits fields are used with GICv3:
695 	 * - num_pri_bits stores the actual number of priority bits, whereas the
696 	 *   register field stores num_pri_bits - 1.
697 	 * - num_id_bits stores the raw field value, which is 0b0000 for 16 bits
698 	 *   and 0b0001 for 24 bits.
699 	 */
700 	p->regval = FIELD_PREP(ICC_IDR0_EL1_PRI_BITS, vcpu->arch.vgic_cpu.num_pri_bits - 1) |
701 		    FIELD_PREP(ICC_IDR0_EL1_ID_BITS, vcpu->arch.vgic_cpu.num_id_bits);
702 
703 	return true;
704 }
705 
706 static bool access_gicv5_iaffid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
707 				const struct sys_reg_desc *r)
708 {
709 	if (p->is_write)
710 		return undef_access(vcpu, p, r);
711 
712 	/*
713 	 * For GICv5 VMs, the IAFFID value is the same as the VPE ID. The VPE ID
714 	 * is the same as the VCPU's ID.
715 	 */
716 	p->regval = FIELD_PREP(ICC_IAFFIDR_EL1_IAFFID, vcpu->vcpu_id);
717 
718 	return true;
719 }
720 
721 static bool access_gicv5_ppi_enabler(struct kvm_vcpu *vcpu,
722 				     struct sys_reg_params *p,
723 				     const struct sys_reg_desc *r)
724 {
725 	unsigned long *mask = vcpu->kvm->arch.vgic.gicv5_vm.vgic_ppi_mask;
726 	struct vgic_v5_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v5;
727 	int i;
728 
729 	/* We never expect to get here with a read! */
730 	if (WARN_ON_ONCE(!p->is_write))
731 		return undef_access(vcpu, p, r);
732 
733 	/*
734 	 * If we're only handling architected PPIs and the guest writes to the
735 	 * enable for the non-architected PPIs, we just return as there's
736 	 * nothing to do at all. We don't even allocate the storage for them in
737 	 * this case.
738 	 */
739 	if (VGIC_V5_NR_PRIVATE_IRQS == 64 && p->Op2 % 2)
740 		return true;
741 
742 	/*
743 	 * Merge the raw guest write into out bitmap at an offset of either 0 or
744 	 * 64, then and it with our PPI mask.
745 	 */
746 	bitmap_write(cpu_if->vgic_ppi_enabler, p->regval, 64 * (p->Op2 % 2), 64);
747 	bitmap_and(cpu_if->vgic_ppi_enabler, cpu_if->vgic_ppi_enabler, mask,
748 		   VGIC_V5_NR_PRIVATE_IRQS);
749 
750 	/*
751 	 * Sync the change in enable states to the vgic_irqs. We consider all
752 	 * PPIs as we don't expose many to the guest.
753 	 */
754 	for_each_set_bit(i, mask, VGIC_V5_NR_PRIVATE_IRQS) {
755 		u32 intid = vgic_v5_make_ppi(i);
756 		struct vgic_irq *irq;
757 
758 		irq = vgic_get_vcpu_irq(vcpu, intid);
759 
760 		scoped_guard(raw_spinlock_irqsave, &irq->irq_lock)
761 			irq->enabled = test_bit(i, cpu_if->vgic_ppi_enabler);
762 
763 		vgic_put_irq(vcpu->kvm, irq);
764 	}
765 
766 	return true;
767 }
768 
769 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
770 			struct sys_reg_params *p,
771 			const struct sys_reg_desc *r)
772 {
773 	if (p->is_write)
774 		return ignore_write(vcpu, p);
775 	else
776 		return read_zero(vcpu, p);
777 }
778 
779 /*
780  * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
781  * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
782  * system, these registers should UNDEF. LORID_EL1 being a RO register, we
783  * treat it separately.
784  */
785 static bool trap_loregion(struct kvm_vcpu *vcpu,
786 			  struct sys_reg_params *p,
787 			  const struct sys_reg_desc *r)
788 {
789 	u32 sr = reg_to_encoding(r);
790 
791 	if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP))
792 		return undef_access(vcpu, p, r);
793 
794 	if (p->is_write && sr == SYS_LORID_EL1)
795 		return write_to_read_only(vcpu, p, r);
796 
797 	return trap_raz_wi(vcpu, p, r);
798 }
799 
800 static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
801 			   struct sys_reg_params *p,
802 			   const struct sys_reg_desc *r)
803 {
804 	if (!p->is_write)
805 		return read_from_write_only(vcpu, p, r);
806 
807 	kvm_debug_handle_oslar(vcpu, p->regval);
808 	return true;
809 }
810 
811 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
812 			   struct sys_reg_params *p,
813 			   const struct sys_reg_desc *r)
814 {
815 	if (p->is_write)
816 		return write_to_read_only(vcpu, p, r);
817 
818 	p->regval = __vcpu_sys_reg(vcpu, r->reg);
819 	return true;
820 }
821 
822 static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
823 			 u64 val)
824 {
825 	/*
826 	 * The only modifiable bit is the OSLK bit. Refuse the write if
827 	 * userspace attempts to change any other bit in the register.
828 	 */
829 	if ((val ^ rd->val) & ~OSLSR_EL1_OSLK)
830 		return -EINVAL;
831 
832 	__vcpu_assign_sys_reg(vcpu, rd->reg, val);
833 	return 0;
834 }
835 
836 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
837 				   struct sys_reg_params *p,
838 				   const struct sys_reg_desc *r)
839 {
840 	if (p->is_write) {
841 		return ignore_write(vcpu, p);
842 	} else {
843 		p->regval = read_sysreg(dbgauthstatus_el1);
844 		return true;
845 	}
846 }
847 
848 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
849 			    struct sys_reg_params *p,
850 			    const struct sys_reg_desc *r)
851 {
852 	access_rw(vcpu, p, r);
853 
854 	kvm_debug_set_guest_ownership(vcpu);
855 	return true;
856 }
857 
858 /*
859  * reg_to_dbg/dbg_to_reg
860  *
861  * A 32 bit write to a debug register leave top bits alone
862  * A 32 bit read from a debug register only returns the bottom bits
863  */
864 static void reg_to_dbg(struct kvm_vcpu *vcpu,
865 		       struct sys_reg_params *p,
866 		       const struct sys_reg_desc *rd,
867 		       u64 *dbg_reg)
868 {
869 	u64 mask, shift, val;
870 
871 	get_access_mask(rd, &mask, &shift);
872 
873 	val = *dbg_reg;
874 	val &= ~mask;
875 	val |= (p->regval & (mask >> shift)) << shift;
876 	*dbg_reg = val;
877 }
878 
879 static void dbg_to_reg(struct kvm_vcpu *vcpu,
880 		       struct sys_reg_params *p,
881 		       const struct sys_reg_desc *rd,
882 		       u64 *dbg_reg)
883 {
884 	u64 mask, shift;
885 
886 	get_access_mask(rd, &mask, &shift);
887 	p->regval = (*dbg_reg & mask) >> shift;
888 }
889 
890 static u64 *demux_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
891 {
892 	struct kvm_guest_debug_arch *dbg = &vcpu->arch.vcpu_debug_state;
893 
894 	switch (rd->Op2) {
895 	case 0b100:
896 		return &dbg->dbg_bvr[rd->CRm];
897 	case 0b101:
898 		return &dbg->dbg_bcr[rd->CRm];
899 	case 0b110:
900 		return &dbg->dbg_wvr[rd->CRm];
901 	case 0b111:
902 		return &dbg->dbg_wcr[rd->CRm];
903 	default:
904 		KVM_BUG_ON(1, vcpu->kvm);
905 		return NULL;
906 	}
907 }
908 
909 static bool trap_dbg_wb_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
910 			    const struct sys_reg_desc *rd)
911 {
912 	u64 *reg = demux_wb_reg(vcpu, rd);
913 
914 	if (!reg)
915 		return false;
916 
917 	if (p->is_write)
918 		reg_to_dbg(vcpu, p, rd, reg);
919 	else
920 		dbg_to_reg(vcpu, p, rd, reg);
921 
922 	kvm_debug_set_guest_ownership(vcpu);
923 	return true;
924 }
925 
926 static int set_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
927 			  u64 val)
928 {
929 	u64 *reg = demux_wb_reg(vcpu, rd);
930 
931 	if (!reg)
932 		return -EINVAL;
933 
934 	*reg = val;
935 	return 0;
936 }
937 
938 static int get_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
939 			  u64 *val)
940 {
941 	u64 *reg = demux_wb_reg(vcpu, rd);
942 
943 	if (!reg)
944 		return -EINVAL;
945 
946 	*val = *reg;
947 	return 0;
948 }
949 
950 static u64 reset_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
951 {
952 	u64 *reg = demux_wb_reg(vcpu, rd);
953 
954 	/*
955 	 * Bail early if we couldn't find storage for the register, the
956 	 * KVM_BUG_ON() in demux_wb_reg() will prevent this VM from ever
957 	 * being run.
958 	 */
959 	if (!reg)
960 		return 0;
961 
962 	*reg = rd->val;
963 	return rd->val;
964 }
965 
966 static u64 reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
967 {
968 	u64 amair = read_sysreg(amair_el1);
969 	vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
970 	return amair;
971 }
972 
973 static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
974 {
975 	u64 actlr = read_sysreg(actlr_el1);
976 	vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
977 	return actlr;
978 }
979 
980 static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
981 {
982 	u64 mpidr;
983 
984 	/*
985 	 * Map the vcpu_id into the first three affinity level fields of
986 	 * the MPIDR. We limit the number of VCPUs in level 0 due to a
987 	 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
988 	 * of the GICv3 to be able to address each CPU directly when
989 	 * sending IPIs.
990 	 */
991 	mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
992 	mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
993 	mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
994 	mpidr |= (1ULL << 31);
995 	vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1);
996 
997 	return mpidr;
998 }
999 
1000 static unsigned int hidden_visibility(const struct kvm_vcpu *vcpu,
1001 				      const struct sys_reg_desc *r)
1002 {
1003 	return REG_HIDDEN;
1004 }
1005 
1006 static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
1007 				   const struct sys_reg_desc *r)
1008 {
1009 	if (kvm_vcpu_has_pmu(vcpu))
1010 		return 0;
1011 
1012 	return REG_HIDDEN;
1013 }
1014 
1015 static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1016 {
1017 	u64 mask = BIT(ARMV8_PMU_CYCLE_IDX);
1018 	u8 n = vcpu->kvm->arch.nr_pmu_counters;
1019 
1020 	if (n)
1021 		mask |= GENMASK(n - 1, 0);
1022 
1023 	reset_unknown(vcpu, r);
1024 	__vcpu_rmw_sys_reg(vcpu, r->reg, &=, mask);
1025 
1026 	return __vcpu_sys_reg(vcpu, r->reg);
1027 }
1028 
1029 static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1030 {
1031 	reset_unknown(vcpu, r);
1032 	__vcpu_rmw_sys_reg(vcpu, r->reg, &=, GENMASK(31, 0));
1033 
1034 	return __vcpu_sys_reg(vcpu, r->reg);
1035 }
1036 
1037 static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1038 {
1039 	/* This thing will UNDEF, who cares about the reset value? */
1040 	if (!kvm_vcpu_has_pmu(vcpu))
1041 		return 0;
1042 
1043 	reset_unknown(vcpu, r);
1044 	__vcpu_rmw_sys_reg(vcpu, r->reg, &=, kvm_pmu_evtyper_mask(vcpu->kvm));
1045 
1046 	return __vcpu_sys_reg(vcpu, r->reg);
1047 }
1048 
1049 static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1050 {
1051 	reset_unknown(vcpu, r);
1052 	__vcpu_rmw_sys_reg(vcpu, r->reg, &=, PMSELR_EL0_SEL_MASK);
1053 
1054 	return __vcpu_sys_reg(vcpu, r->reg);
1055 }
1056 
1057 static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1058 {
1059 	u64 pmcr = 0;
1060 
1061 	if (!kvm_supports_32bit_el0())
1062 		pmcr |= ARMV8_PMU_PMCR_LC;
1063 
1064 	/*
1065 	 * The value of PMCR.N field is included when the
1066 	 * vCPU register is read via kvm_vcpu_read_pmcr().
1067 	 */
1068 	__vcpu_assign_sys_reg(vcpu, r->reg, pmcr);
1069 
1070 	return __vcpu_sys_reg(vcpu, r->reg);
1071 }
1072 
1073 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
1074 {
1075 	u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
1076 	bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
1077 
1078 	if (!enabled)
1079 		kvm_inject_undefined(vcpu);
1080 
1081 	return !enabled;
1082 }
1083 
1084 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
1085 {
1086 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
1087 }
1088 
1089 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
1090 {
1091 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
1092 }
1093 
1094 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
1095 {
1096 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
1097 }
1098 
1099 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
1100 {
1101 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
1102 }
1103 
1104 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1105 			const struct sys_reg_desc *r)
1106 {
1107 	u64 val;
1108 
1109 	if (pmu_access_el0_disabled(vcpu))
1110 		return false;
1111 
1112 	if (p->is_write) {
1113 		/*
1114 		 * Only update writeable bits of PMCR (continuing into
1115 		 * kvm_pmu_handle_pmcr() as well)
1116 		 */
1117 		val = kvm_vcpu_read_pmcr(vcpu);
1118 		val &= ~ARMV8_PMU_PMCR_MASK;
1119 		val |= p->regval & ARMV8_PMU_PMCR_MASK;
1120 		if (!kvm_supports_32bit_el0())
1121 			val |= ARMV8_PMU_PMCR_LC;
1122 		kvm_pmu_handle_pmcr(vcpu, val);
1123 	} else {
1124 		/* PMCR.P & PMCR.C are RAZ */
1125 		val = kvm_vcpu_read_pmcr(vcpu)
1126 		      & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
1127 		p->regval = val;
1128 	}
1129 
1130 	return true;
1131 }
1132 
1133 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1134 			  const struct sys_reg_desc *r)
1135 {
1136 	if (pmu_access_event_counter_el0_disabled(vcpu))
1137 		return false;
1138 
1139 	if (p->is_write)
1140 		__vcpu_assign_sys_reg(vcpu, PMSELR_EL0, p->regval);
1141 	else
1142 		/* return PMSELR.SEL field */
1143 		p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
1144 			    & PMSELR_EL0_SEL_MASK;
1145 
1146 	return true;
1147 }
1148 
1149 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1150 			  const struct sys_reg_desc *r)
1151 {
1152 	u64 pmceid, mask, shift;
1153 
1154 	BUG_ON(p->is_write);
1155 
1156 	if (pmu_access_el0_disabled(vcpu))
1157 		return false;
1158 
1159 	get_access_mask(r, &mask, &shift);
1160 
1161 	pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
1162 	pmceid &= mask;
1163 	pmceid >>= shift;
1164 
1165 	p->regval = pmceid;
1166 
1167 	return true;
1168 }
1169 
1170 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
1171 {
1172 	u64 pmcr, val;
1173 
1174 	pmcr = kvm_vcpu_read_pmcr(vcpu);
1175 	val = FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
1176 	if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
1177 		kvm_inject_undefined(vcpu);
1178 		return false;
1179 	}
1180 
1181 	return true;
1182 }
1183 
1184 static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1185 			  u64 *val)
1186 {
1187 	u64 idx;
1188 
1189 	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
1190 		/* PMCCNTR_EL0 */
1191 		idx = ARMV8_PMU_CYCLE_IDX;
1192 	else
1193 		/* PMEVCNTRn_EL0 */
1194 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1195 
1196 	*val = kvm_pmu_get_counter_value(vcpu, idx);
1197 	return 0;
1198 }
1199 
1200 static int set_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1201 			  u64 val)
1202 {
1203 	u64 idx;
1204 
1205 	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
1206 		/* PMCCNTR_EL0 */
1207 		idx = ARMV8_PMU_CYCLE_IDX;
1208 	else
1209 		/* PMEVCNTRn_EL0 */
1210 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1211 
1212 	kvm_pmu_set_counter_value_user(vcpu, idx, val);
1213 	return 0;
1214 }
1215 
1216 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
1217 			      struct sys_reg_params *p,
1218 			      const struct sys_reg_desc *r)
1219 {
1220 	u64 idx = ~0UL;
1221 
1222 	if (r->CRn == 9 && r->CRm == 13) {
1223 		if (r->Op2 == 2) {
1224 			/* PMXEVCNTR_EL0 */
1225 			if (pmu_access_event_counter_el0_disabled(vcpu))
1226 				return false;
1227 
1228 			idx = SYS_FIELD_GET(PMSELR_EL0, SEL,
1229 					    __vcpu_sys_reg(vcpu, PMSELR_EL0));
1230 		} else if (r->Op2 == 0) {
1231 			/* PMCCNTR_EL0 */
1232 			if (pmu_access_cycle_counter_el0_disabled(vcpu))
1233 				return false;
1234 
1235 			idx = ARMV8_PMU_CYCLE_IDX;
1236 		}
1237 	} else if (r->CRn == 0 && r->CRm == 9) {
1238 		/* PMCCNTR */
1239 		if (pmu_access_event_counter_el0_disabled(vcpu))
1240 			return false;
1241 
1242 		idx = ARMV8_PMU_CYCLE_IDX;
1243 	} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
1244 		/* PMEVCNTRn_EL0 */
1245 		if (pmu_access_event_counter_el0_disabled(vcpu))
1246 			return false;
1247 
1248 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1249 	}
1250 
1251 	/* Catch any decoding mistake */
1252 	WARN_ON(idx == ~0UL);
1253 
1254 	if (!pmu_counter_idx_valid(vcpu, idx))
1255 		return false;
1256 
1257 	if (p->is_write) {
1258 		if (pmu_access_el0_disabled(vcpu))
1259 			return false;
1260 
1261 		kvm_pmu_set_counter_value(vcpu, idx, p->regval);
1262 	} else {
1263 		p->regval = kvm_pmu_get_counter_value(vcpu, idx);
1264 	}
1265 
1266 	return true;
1267 }
1268 
1269 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1270 			       const struct sys_reg_desc *r)
1271 {
1272 	u64 idx, reg;
1273 
1274 	if (pmu_access_el0_disabled(vcpu))
1275 		return false;
1276 
1277 	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
1278 		/* PMXEVTYPER_EL0 */
1279 		idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0));
1280 		reg = PMEVTYPER0_EL0 + idx;
1281 	} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
1282 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1283 		if (idx == ARMV8_PMU_CYCLE_IDX)
1284 			reg = PMCCFILTR_EL0;
1285 		else
1286 			/* PMEVTYPERn_EL0 */
1287 			reg = PMEVTYPER0_EL0 + idx;
1288 	} else {
1289 		BUG();
1290 	}
1291 
1292 	if (!pmu_counter_idx_valid(vcpu, idx))
1293 		return false;
1294 
1295 	if (p->is_write) {
1296 		kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
1297 		kvm_vcpu_pmu_restore_guest(vcpu);
1298 	} else {
1299 		p->regval = __vcpu_sys_reg(vcpu, reg);
1300 	}
1301 
1302 	return true;
1303 }
1304 
1305 static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
1306 {
1307 	u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1308 
1309 	__vcpu_assign_sys_reg(vcpu, r->reg, val & mask);
1310 	kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
1311 
1312 	return 0;
1313 }
1314 
1315 static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
1316 {
1317 	u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1318 
1319 	*val = __vcpu_sys_reg(vcpu, r->reg) & mask;
1320 	return 0;
1321 }
1322 
1323 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1324 			   const struct sys_reg_desc *r)
1325 {
1326 	u64 val, mask;
1327 
1328 	if (pmu_access_el0_disabled(vcpu))
1329 		return false;
1330 
1331 	mask = kvm_pmu_accessible_counter_mask(vcpu);
1332 	if (p->is_write) {
1333 		val = p->regval & mask;
1334 		if (r->Op2 & 0x1)
1335 			/* accessing PMCNTENSET_EL0 */
1336 			__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, |=, val);
1337 		else
1338 			/* accessing PMCNTENCLR_EL0 */
1339 			__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, ~val);
1340 
1341 		kvm_pmu_reprogram_counter_mask(vcpu, val);
1342 	} else {
1343 		p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
1344 	}
1345 
1346 	return true;
1347 }
1348 
1349 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1350 			   const struct sys_reg_desc *r)
1351 {
1352 	u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1353 
1354 	if (check_pmu_access_disabled(vcpu, 0))
1355 		return false;
1356 
1357 	if (p->is_write) {
1358 		u64 val = p->regval & mask;
1359 
1360 		if (r->Op2 & 0x1)
1361 			/* accessing PMINTENSET_EL1 */
1362 			__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, |=, val);
1363 		else
1364 			/* accessing PMINTENCLR_EL1 */
1365 			__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, ~val);
1366 	} else {
1367 		p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
1368 	}
1369 
1370 	return true;
1371 }
1372 
1373 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1374 			 const struct sys_reg_desc *r)
1375 {
1376 	u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1377 
1378 	if (pmu_access_el0_disabled(vcpu))
1379 		return false;
1380 
1381 	if (p->is_write) {
1382 		if (r->CRm & 0x2)
1383 			/* accessing PMOVSSET_EL0 */
1384 			__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, (p->regval & mask));
1385 		else
1386 			/* accessing PMOVSCLR_EL0 */
1387 			__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, ~(p->regval & mask));
1388 	} else {
1389 		p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
1390 	}
1391 
1392 	return true;
1393 }
1394 
1395 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1396 			   const struct sys_reg_desc *r)
1397 {
1398 	u64 mask;
1399 
1400 	if (!p->is_write)
1401 		return read_from_write_only(vcpu, p, r);
1402 
1403 	if (pmu_write_swinc_el0_disabled(vcpu))
1404 		return false;
1405 
1406 	mask = kvm_pmu_accessible_counter_mask(vcpu);
1407 	kvm_pmu_software_increment(vcpu, p->regval & mask);
1408 	return true;
1409 }
1410 
1411 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1412 			     const struct sys_reg_desc *r)
1413 {
1414 	if (p->is_write) {
1415 		if (!vcpu_mode_priv(vcpu))
1416 			return undef_access(vcpu, p, r);
1417 
1418 		__vcpu_assign_sys_reg(vcpu, PMUSERENR_EL0,
1419 				      (p->regval & ARMV8_PMU_USERENR_MASK));
1420 	} else {
1421 		p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
1422 			    & ARMV8_PMU_USERENR_MASK;
1423 	}
1424 
1425 	return true;
1426 }
1427 
1428 static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1429 		    u64 *val)
1430 {
1431 	*val = kvm_vcpu_read_pmcr(vcpu);
1432 	return 0;
1433 }
1434 
1435 static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1436 		    u64 val)
1437 {
1438 	u8 new_n = FIELD_GET(ARMV8_PMU_PMCR_N, val);
1439 	struct kvm *kvm = vcpu->kvm;
1440 
1441 	mutex_lock(&kvm->arch.config_lock);
1442 
1443 	/*
1444 	 * The vCPU can't have more counters than the PMU hardware
1445 	 * implements. Ignore this error to maintain compatibility
1446 	 * with the existing KVM behavior.
1447 	 */
1448 	if (!kvm_vm_has_ran_once(kvm) &&
1449 	    !vcpu_has_nv(vcpu)	      &&
1450 	    new_n <= kvm_arm_pmu_get_max_counters(kvm))
1451 		kvm->arch.nr_pmu_counters = new_n;
1452 
1453 	mutex_unlock(&kvm->arch.config_lock);
1454 
1455 	/*
1456 	 * Ignore writes to RES0 bits, read only bits that are cleared on
1457 	 * vCPU reset, and writable bits that KVM doesn't support yet.
1458 	 * (i.e. only PMCR.N and bits [7:0] are mutable from userspace)
1459 	 * The LP bit is RES0 when FEAT_PMUv3p5 is not supported on the vCPU.
1460 	 * But, we leave the bit as it is here, as the vCPU's PMUver might
1461 	 * be changed later (NOTE: the bit will be cleared on first vCPU run
1462 	 * if necessary).
1463 	 */
1464 	val &= ARMV8_PMU_PMCR_MASK;
1465 
1466 	/* The LC bit is RES1 when AArch32 is not supported */
1467 	if (!kvm_supports_32bit_el0())
1468 		val |= ARMV8_PMU_PMCR_LC;
1469 
1470 	__vcpu_assign_sys_reg(vcpu, r->reg, val);
1471 	kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
1472 
1473 	return 0;
1474 }
1475 
1476 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
1477 #define DBG_BCR_BVR_WCR_WVR_EL1(n)					\
1478 	{ SYS_DESC(SYS_DBGBVRn_EL1(n)),					\
1479 	  trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0,			\
1480 	  get_dbg_wb_reg, set_dbg_wb_reg },				\
1481 	{ SYS_DESC(SYS_DBGBCRn_EL1(n)),					\
1482 	  trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0,			\
1483 	  get_dbg_wb_reg, set_dbg_wb_reg },				\
1484 	{ SYS_DESC(SYS_DBGWVRn_EL1(n)),					\
1485 	  trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0,			\
1486 	  get_dbg_wb_reg, set_dbg_wb_reg },				\
1487 	{ SYS_DESC(SYS_DBGWCRn_EL1(n)),					\
1488 	  trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0,			\
1489 	  get_dbg_wb_reg, set_dbg_wb_reg }
1490 
1491 #define PMU_SYS_REG(name)						\
1492 	SYS_DESC(SYS_##name), .reset = reset_pmu_reg,			\
1493 	.visibility = pmu_visibility
1494 
1495 /* Macro to expand the PMEVCNTRn_EL0 register */
1496 #define PMU_PMEVCNTR_EL0(n)						\
1497 	{ PMU_SYS_REG(PMEVCNTRn_EL0(n)),				\
1498 	  .reset = reset_pmevcntr, .get_user = get_pmu_evcntr,		\
1499 	  .set_user = set_pmu_evcntr,					\
1500 	  .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
1501 
1502 /* Macro to expand the PMEVTYPERn_EL0 register */
1503 #define PMU_PMEVTYPER_EL0(n)						\
1504 	{ PMU_SYS_REG(PMEVTYPERn_EL0(n)),				\
1505 	  .reset = reset_pmevtyper,					\
1506 	  .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
1507 
1508 /* Macro to expand the AMU counter and type registers*/
1509 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1510 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1511 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1512 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1513 
1514 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1515 			const struct sys_reg_desc *rd)
1516 {
1517 	return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1518 }
1519 
1520 /*
1521  * If we land here on a PtrAuth access, that is because we didn't
1522  * fixup the access on exit by allowing the PtrAuth sysregs. The only
1523  * way this happens is when the guest does not have PtrAuth support
1524  * enabled.
1525  */
1526 #define __PTRAUTH_KEY(k)						\
1527 	{ SYS_DESC(SYS_## k), undef_access, reset_unknown, k,		\
1528 	.visibility = ptrauth_visibility}
1529 
1530 #define PTRAUTH_KEY(k)							\
1531 	__PTRAUTH_KEY(k ## KEYLO_EL1),					\
1532 	__PTRAUTH_KEY(k ## KEYHI_EL1)
1533 
1534 static bool access_arch_timer(struct kvm_vcpu *vcpu,
1535 			      struct sys_reg_params *p,
1536 			      const struct sys_reg_desc *r)
1537 {
1538 	enum kvm_arch_timers tmr;
1539 	enum kvm_arch_timer_regs treg;
1540 	u64 reg = reg_to_encoding(r);
1541 
1542 	switch (reg) {
1543 	case SYS_CNTP_TVAL_EL0:
1544 		if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1545 			tmr = TIMER_HPTIMER;
1546 		else
1547 			tmr = TIMER_PTIMER;
1548 		treg = TIMER_REG_TVAL;
1549 		break;
1550 
1551 	case SYS_CNTV_TVAL_EL0:
1552 		if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1553 			tmr = TIMER_HVTIMER;
1554 		else
1555 			tmr = TIMER_VTIMER;
1556 		treg = TIMER_REG_TVAL;
1557 		break;
1558 
1559 	case SYS_AARCH32_CNTP_TVAL:
1560 	case SYS_CNTP_TVAL_EL02:
1561 		tmr = TIMER_PTIMER;
1562 		treg = TIMER_REG_TVAL;
1563 		break;
1564 
1565 	case SYS_CNTV_TVAL_EL02:
1566 		tmr = TIMER_VTIMER;
1567 		treg = TIMER_REG_TVAL;
1568 		break;
1569 
1570 	case SYS_CNTHP_TVAL_EL2:
1571 		tmr = TIMER_HPTIMER;
1572 		treg = TIMER_REG_TVAL;
1573 		break;
1574 
1575 	case SYS_CNTHV_TVAL_EL2:
1576 		tmr = TIMER_HVTIMER;
1577 		treg = TIMER_REG_TVAL;
1578 		break;
1579 
1580 	case SYS_CNTP_CTL_EL0:
1581 		if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1582 			tmr = TIMER_HPTIMER;
1583 		else
1584 			tmr = TIMER_PTIMER;
1585 		treg = TIMER_REG_CTL;
1586 		break;
1587 
1588 	case SYS_CNTV_CTL_EL0:
1589 		if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1590 			tmr = TIMER_HVTIMER;
1591 		else
1592 			tmr = TIMER_VTIMER;
1593 		treg = TIMER_REG_CTL;
1594 		break;
1595 
1596 	case SYS_AARCH32_CNTP_CTL:
1597 	case SYS_CNTP_CTL_EL02:
1598 		tmr = TIMER_PTIMER;
1599 		treg = TIMER_REG_CTL;
1600 		break;
1601 
1602 	case SYS_CNTV_CTL_EL02:
1603 		tmr = TIMER_VTIMER;
1604 		treg = TIMER_REG_CTL;
1605 		break;
1606 
1607 	case SYS_CNTHP_CTL_EL2:
1608 		tmr = TIMER_HPTIMER;
1609 		treg = TIMER_REG_CTL;
1610 		break;
1611 
1612 	case SYS_CNTHV_CTL_EL2:
1613 		tmr = TIMER_HVTIMER;
1614 		treg = TIMER_REG_CTL;
1615 		break;
1616 
1617 	case SYS_CNTP_CVAL_EL0:
1618 		if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1619 			tmr = TIMER_HPTIMER;
1620 		else
1621 			tmr = TIMER_PTIMER;
1622 		treg = TIMER_REG_CVAL;
1623 		break;
1624 
1625 	case SYS_CNTV_CVAL_EL0:
1626 		if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1627 			tmr = TIMER_HVTIMER;
1628 		else
1629 			tmr = TIMER_VTIMER;
1630 		treg = TIMER_REG_CVAL;
1631 		break;
1632 
1633 	case SYS_AARCH32_CNTP_CVAL:
1634 	case SYS_CNTP_CVAL_EL02:
1635 		tmr = TIMER_PTIMER;
1636 		treg = TIMER_REG_CVAL;
1637 		break;
1638 
1639 	case SYS_CNTV_CVAL_EL02:
1640 		tmr = TIMER_VTIMER;
1641 		treg = TIMER_REG_CVAL;
1642 		break;
1643 
1644 	case SYS_CNTHP_CVAL_EL2:
1645 		tmr = TIMER_HPTIMER;
1646 		treg = TIMER_REG_CVAL;
1647 		break;
1648 
1649 	case SYS_CNTHV_CVAL_EL2:
1650 		tmr = TIMER_HVTIMER;
1651 		treg = TIMER_REG_CVAL;
1652 		break;
1653 
1654 	case SYS_CNTPCT_EL0:
1655 	case SYS_CNTPCTSS_EL0:
1656 		if (is_hyp_ctxt(vcpu))
1657 			tmr = TIMER_HPTIMER;
1658 		else
1659 			tmr = TIMER_PTIMER;
1660 		treg = TIMER_REG_CNT;
1661 		break;
1662 
1663 	case SYS_AARCH32_CNTPCT:
1664 	case SYS_AARCH32_CNTPCTSS:
1665 		tmr = TIMER_PTIMER;
1666 		treg = TIMER_REG_CNT;
1667 		break;
1668 
1669 	case SYS_CNTVCT_EL0:
1670 	case SYS_CNTVCTSS_EL0:
1671 		if (is_hyp_ctxt(vcpu))
1672 			tmr = TIMER_HVTIMER;
1673 		else
1674 			tmr = TIMER_VTIMER;
1675 		treg = TIMER_REG_CNT;
1676 		break;
1677 
1678 	case SYS_AARCH32_CNTVCT:
1679 	case SYS_AARCH32_CNTVCTSS:
1680 		tmr = TIMER_VTIMER;
1681 		treg = TIMER_REG_CNT;
1682 		break;
1683 
1684 	default:
1685 		print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
1686 		return undef_access(vcpu, p, r);
1687 	}
1688 
1689 	if (p->is_write)
1690 		kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1691 	else
1692 		p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1693 
1694 	return true;
1695 }
1696 
1697 static int arch_timer_set_user(struct kvm_vcpu *vcpu,
1698 			       const struct sys_reg_desc *rd,
1699 			       u64 val)
1700 {
1701 	switch (reg_to_encoding(rd)) {
1702 	case SYS_CNTV_CTL_EL0:
1703 	case SYS_CNTP_CTL_EL0:
1704 	case SYS_CNTHV_CTL_EL2:
1705 	case SYS_CNTHP_CTL_EL2:
1706 		val &= ~ARCH_TIMER_CTRL_IT_STAT;
1707 		break;
1708 	case SYS_CNTVCT_EL0:
1709 		if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags))
1710 			timer_set_offset(vcpu_vtimer(vcpu), kvm_phys_timer_read() - val);
1711 		return 0;
1712 	case SYS_CNTPCT_EL0:
1713 		if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags))
1714 			timer_set_offset(vcpu_ptimer(vcpu), kvm_phys_timer_read() - val);
1715 		return 0;
1716 	}
1717 
1718 	__vcpu_assign_sys_reg(vcpu, rd->reg, val);
1719 	return 0;
1720 }
1721 
1722 static int arch_timer_get_user(struct kvm_vcpu *vcpu,
1723 			       const struct sys_reg_desc *rd,
1724 			       u64 *val)
1725 {
1726 	switch (reg_to_encoding(rd)) {
1727 	case SYS_CNTVCT_EL0:
1728 		*val = kvm_phys_timer_read() - timer_get_offset(vcpu_vtimer(vcpu));
1729 		break;
1730 	case SYS_CNTPCT_EL0:
1731 		*val = kvm_phys_timer_read() - timer_get_offset(vcpu_ptimer(vcpu));
1732 		break;
1733 	default:
1734 		*val = __vcpu_sys_reg(vcpu, rd->reg);
1735 	}
1736 
1737 	return 0;
1738 }
1739 
1740 static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
1741 				    s64 new, s64 cur)
1742 {
1743 	struct arm64_ftr_bits kvm_ftr = *ftrp;
1744 
1745 	/* Some features have different safe value type in KVM than host features */
1746 	switch (id) {
1747 	case SYS_ID_AA64DFR0_EL1:
1748 		switch (kvm_ftr.shift) {
1749 		case ID_AA64DFR0_EL1_PMUVer_SHIFT:
1750 			kvm_ftr.type = FTR_LOWER_SAFE;
1751 			break;
1752 		case ID_AA64DFR0_EL1_DebugVer_SHIFT:
1753 			kvm_ftr.type = FTR_LOWER_SAFE;
1754 			break;
1755 		}
1756 		break;
1757 	case SYS_ID_DFR0_EL1:
1758 		if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT)
1759 			kvm_ftr.type = FTR_LOWER_SAFE;
1760 		break;
1761 	}
1762 
1763 	return arm64_ftr_safe_value(&kvm_ftr, new, cur);
1764 }
1765 
1766 /*
1767  * arm64_check_features() - Check if a feature register value constitutes
1768  * a subset of features indicated by the idreg's KVM sanitised limit.
1769  *
1770  * This function will check if each feature field of @val is the "safe" value
1771  * against idreg's KVM sanitised limit return from reset() callback.
1772  * If a field value in @val is the same as the one in limit, it is always
1773  * considered the safe value regardless For register fields that are not in
1774  * writable, only the value in limit is considered the safe value.
1775  *
1776  * Return: 0 if all the fields are safe. Otherwise, return negative errno.
1777  */
1778 static int arm64_check_features(struct kvm_vcpu *vcpu,
1779 				const struct sys_reg_desc *rd,
1780 				u64 val)
1781 {
1782 	const struct arm64_ftr_reg *ftr_reg;
1783 	const struct arm64_ftr_bits *ftrp = NULL;
1784 	u32 id = reg_to_encoding(rd);
1785 	u64 writable_mask = rd->val;
1786 	u64 limit = rd->reset(vcpu, rd);
1787 	u64 mask = 0;
1788 
1789 	/*
1790 	 * Hidden and unallocated ID registers may not have a corresponding
1791 	 * struct arm64_ftr_reg. Of course, if the register is RAZ we know the
1792 	 * only safe value is 0.
1793 	 */
1794 	if (sysreg_visible_as_raz(vcpu, rd))
1795 		return val ? -E2BIG : 0;
1796 
1797 	ftr_reg = get_arm64_ftr_reg(id);
1798 	if (!ftr_reg)
1799 		return -EINVAL;
1800 
1801 	ftrp = ftr_reg->ftr_bits;
1802 
1803 	for (; ftrp && ftrp->width; ftrp++) {
1804 		s64 f_val, f_lim, safe_val;
1805 		u64 ftr_mask;
1806 
1807 		ftr_mask = arm64_ftr_mask(ftrp);
1808 		if ((ftr_mask & writable_mask) != ftr_mask)
1809 			continue;
1810 
1811 		f_val = arm64_ftr_value(ftrp, val);
1812 		f_lim = arm64_ftr_value(ftrp, limit);
1813 		mask |= ftr_mask;
1814 
1815 		if (f_val == f_lim)
1816 			safe_val = f_val;
1817 		else
1818 			safe_val = kvm_arm64_ftr_safe_value(id, ftrp, f_val, f_lim);
1819 
1820 		if (safe_val != f_val)
1821 			return -E2BIG;
1822 	}
1823 
1824 	/* For fields that are not writable, values in limit are the safe values. */
1825 	if ((val & ~mask) != (limit & ~mask))
1826 		return -E2BIG;
1827 
1828 	return 0;
1829 }
1830 
1831 static u8 pmuver_to_perfmon(u8 pmuver)
1832 {
1833 	switch (pmuver) {
1834 	case ID_AA64DFR0_EL1_PMUVer_IMP:
1835 		return ID_DFR0_EL1_PerfMon_PMUv3;
1836 	case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
1837 		return ID_DFR0_EL1_PerfMon_IMPDEF;
1838 	default:
1839 		/* Anything ARMv8.1+ and NI have the same value. For now. */
1840 		return pmuver;
1841 	}
1842 }
1843 
1844 static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
1845 static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val);
1846 static u64 sanitise_id_aa64pfr2_el1(const struct kvm_vcpu *vcpu, u64 val);
1847 static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
1848 
1849 /* Read a sanitised cpufeature ID register by sys_reg_desc */
1850 static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
1851 				       const struct sys_reg_desc *r)
1852 {
1853 	u32 id = reg_to_encoding(r);
1854 	u64 val;
1855 
1856 	if (sysreg_visible_as_raz(vcpu, r))
1857 		return 0;
1858 
1859 	val = read_sanitised_ftr_reg(id);
1860 
1861 	switch (id) {
1862 	case SYS_ID_AA64DFR0_EL1:
1863 		val = sanitise_id_aa64dfr0_el1(vcpu, val);
1864 		break;
1865 	case SYS_ID_AA64PFR0_EL1:
1866 		val = sanitise_id_aa64pfr0_el1(vcpu, val);
1867 		break;
1868 	case SYS_ID_AA64PFR1_EL1:
1869 		val = sanitise_id_aa64pfr1_el1(vcpu, val);
1870 		break;
1871 	case SYS_ID_AA64PFR2_EL1:
1872 		val = sanitise_id_aa64pfr2_el1(vcpu, val);
1873 		break;
1874 	case SYS_ID_AA64ISAR1_EL1:
1875 		if (!vcpu_has_ptrauth(vcpu))
1876 			val &= ~(ID_AA64ISAR1_EL1_APA |
1877 				 ID_AA64ISAR1_EL1_API |
1878 				 ID_AA64ISAR1_EL1_GPA |
1879 				 ID_AA64ISAR1_EL1_GPI);
1880 		break;
1881 	case SYS_ID_AA64ISAR2_EL1:
1882 		if (!vcpu_has_ptrauth(vcpu))
1883 			val &= ~(ID_AA64ISAR2_EL1_APA3 |
1884 				 ID_AA64ISAR2_EL1_GPA3);
1885 		if (!cpus_have_final_cap(ARM64_HAS_WFXT) ||
1886 		    has_broken_cntvoff())
1887 			val &= ~ID_AA64ISAR2_EL1_WFxT;
1888 		break;
1889 	case SYS_ID_AA64ISAR3_EL1:
1890 		val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_LSFE |
1891 			ID_AA64ISAR3_EL1_FAMINMAX;
1892 		break;
1893 	case SYS_ID_AA64MMFR2_EL1:
1894 		val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
1895 		val &= ~ID_AA64MMFR2_EL1_NV;
1896 		break;
1897 	case SYS_ID_AA64MMFR3_EL1:
1898 		val &= ID_AA64MMFR3_EL1_TCRX |
1899 		       ID_AA64MMFR3_EL1_SCTLRX |
1900 		       ID_AA64MMFR3_EL1_S1POE |
1901 		       ID_AA64MMFR3_EL1_S1PIE;
1902 
1903 		if (!system_supports_poe())
1904 			val &= ~ID_AA64MMFR3_EL1_S1POE;
1905 		break;
1906 	case SYS_ID_MMFR4_EL1:
1907 		val &= ~ID_MMFR4_EL1_CCIDX;
1908 		break;
1909 	}
1910 
1911 	if (vcpu_has_nv(vcpu))
1912 		val = limit_nv_id_reg(vcpu->kvm, id, val);
1913 
1914 	return val;
1915 }
1916 
1917 static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu,
1918 				     const struct sys_reg_desc *r)
1919 {
1920 	return __kvm_read_sanitised_id_reg(vcpu, r);
1921 }
1922 
1923 static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1924 {
1925 	return kvm_read_vm_id_reg(vcpu->kvm, reg_to_encoding(r));
1926 }
1927 
1928 static bool is_feature_id_reg(u32 encoding)
1929 {
1930 	return (sys_reg_Op0(encoding) == 3 &&
1931 		(sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) &&
1932 		sys_reg_CRn(encoding) == 0 &&
1933 		sys_reg_CRm(encoding) <= 7);
1934 }
1935 
1936 /*
1937  * Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
1938  * (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8, which is the range of ID
1939  * registers KVM maintains on a per-VM basis.
1940  *
1941  * Additionally, the implementation ID registers and CTR_EL0 are handled as
1942  * per-VM registers.
1943  */
1944 static inline bool is_vm_ftr_id_reg(u32 id)
1945 {
1946 	switch (id) {
1947 	case SYS_CTR_EL0:
1948 	case SYS_MIDR_EL1:
1949 	case SYS_REVIDR_EL1:
1950 	case SYS_AIDR_EL1:
1951 		return true;
1952 	default:
1953 		return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1954 			sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1955 			sys_reg_CRm(id) < 8);
1956 
1957 	}
1958 }
1959 
1960 static inline bool is_vcpu_ftr_id_reg(u32 id)
1961 {
1962 	return is_feature_id_reg(id) && !is_vm_ftr_id_reg(id);
1963 }
1964 
1965 static inline bool is_aa32_id_reg(u32 id)
1966 {
1967 	return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1968 		sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1969 		sys_reg_CRm(id) <= 3);
1970 }
1971 
1972 static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1973 				  const struct sys_reg_desc *r)
1974 {
1975 	u32 id = reg_to_encoding(r);
1976 
1977 	switch (id) {
1978 	case SYS_ID_AA64ZFR0_EL1:
1979 		if (!vcpu_has_sve(vcpu))
1980 			return REG_RAZ;
1981 		break;
1982 	}
1983 
1984 	return 0;
1985 }
1986 
1987 static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu,
1988 				       const struct sys_reg_desc *r)
1989 {
1990 	/*
1991 	 * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
1992 	 * EL. Promote to RAZ/WI in order to guarantee consistency between
1993 	 * systems.
1994 	 */
1995 	if (!kvm_supports_32bit_el0())
1996 		return REG_RAZ | REG_USER_WI;
1997 
1998 	return id_visibility(vcpu, r);
1999 }
2000 
2001 static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
2002 				   const struct sys_reg_desc *r)
2003 {
2004 	return REG_RAZ;
2005 }
2006 
2007 /* cpufeature ID register access trap handlers */
2008 
2009 static bool access_id_reg(struct kvm_vcpu *vcpu,
2010 			  struct sys_reg_params *p,
2011 			  const struct sys_reg_desc *r)
2012 {
2013 	if (p->is_write)
2014 		return write_to_read_only(vcpu, p, r);
2015 
2016 	p->regval = read_id_reg(vcpu, r);
2017 
2018 	return true;
2019 }
2020 
2021 /* Visibility overrides for SVE-specific control registers */
2022 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
2023 				   const struct sys_reg_desc *rd)
2024 {
2025 	if (vcpu_has_sve(vcpu))
2026 		return 0;
2027 
2028 	return REG_HIDDEN;
2029 }
2030 
2031 static unsigned int sme_visibility(const struct kvm_vcpu *vcpu,
2032 				   const struct sys_reg_desc *rd)
2033 {
2034 	if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SME, IMP))
2035 		return 0;
2036 
2037 	return REG_HIDDEN;
2038 }
2039 
2040 static unsigned int fp8_visibility(const struct kvm_vcpu *vcpu,
2041 				   const struct sys_reg_desc *rd)
2042 {
2043 	if (kvm_has_fpmr(vcpu->kvm))
2044 		return 0;
2045 
2046 	return REG_HIDDEN;
2047 }
2048 
2049 static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
2050 {
2051 	if (!vcpu_has_sve(vcpu))
2052 		val &= ~ID_AA64PFR0_EL1_SVE_MASK;
2053 
2054 	/*
2055 	 * The default is to expose CSV2 == 1 if the HW isn't affected.
2056 	 * Although this is a per-CPU feature, we make it global because
2057 	 * asymmetric systems are just a nuisance.
2058 	 *
2059 	 * Userspace can override this as long as it doesn't promise
2060 	 * the impossible.
2061 	 */
2062 	if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
2063 		val &= ~ID_AA64PFR0_EL1_CSV2_MASK;
2064 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP);
2065 	}
2066 	if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
2067 		val &= ~ID_AA64PFR0_EL1_CSV3_MASK;
2068 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
2069 	}
2070 
2071 	if (vgic_host_has_gicv3()) {
2072 		val &= ~ID_AA64PFR0_EL1_GIC_MASK;
2073 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
2074 	}
2075 
2076 	val &= ~ID_AA64PFR0_EL1_AMU_MASK;
2077 
2078 	/*
2079 	 * MPAM is disabled by default as KVM also needs a set of PARTID to
2080 	 * program the MPAMVPMx_EL2 PARTID remapping registers with. But some
2081 	 * older kernels let the guest see the ID bit.
2082 	 */
2083 	val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
2084 
2085 	return val;
2086 }
2087 
2088 static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val)
2089 {
2090 	u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
2091 
2092 	if (!kvm_has_mte(vcpu->kvm)) {
2093 		val &= ~ID_AA64PFR1_EL1_MTE;
2094 		val &= ~ID_AA64PFR1_EL1_MTE_frac;
2095 	}
2096 
2097 	if (!(cpus_have_final_cap(ARM64_HAS_RASV1P1_EXTN) &&
2098 	      SYS_FIELD_GET(ID_AA64PFR0_EL1, RAS, pfr0) == ID_AA64PFR0_EL1_RAS_IMP))
2099 		val &= ~ID_AA64PFR1_EL1_RAS_frac;
2100 
2101 	val &= ~ID_AA64PFR1_EL1_SME;
2102 	val &= ~ID_AA64PFR1_EL1_RNDR_trap;
2103 	val &= ~ID_AA64PFR1_EL1_NMI;
2104 	val &= ~ID_AA64PFR1_EL1_GCS;
2105 	val &= ~ID_AA64PFR1_EL1_THE;
2106 	val &= ~ID_AA64PFR1_EL1_MTEX;
2107 	val &= ~ID_AA64PFR1_EL1_PFAR;
2108 	val &= ~ID_AA64PFR1_EL1_MPAM_frac;
2109 
2110 	return val;
2111 }
2112 
2113 static u64 sanitise_id_aa64pfr2_el1(const struct kvm_vcpu *vcpu, u64 val)
2114 {
2115 	val &= ID_AA64PFR2_EL1_FPMR |
2116 	       ID_AA64PFR2_EL1_MTEFAR |
2117 	       ID_AA64PFR2_EL1_MTESTOREONLY;
2118 
2119 	if (!kvm_has_mte(vcpu->kvm)) {
2120 		val &= ~ID_AA64PFR2_EL1_MTEFAR;
2121 		val &= ~ID_AA64PFR2_EL1_MTESTOREONLY;
2122 	}
2123 
2124 	if (vgic_host_has_gicv5())
2125 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR2_EL1, GCIE, IMP);
2126 
2127 	return val;
2128 }
2129 
2130 static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
2131 {
2132 	val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
2133 
2134 	/*
2135 	 * Only initialize the PMU version if the vCPU was configured with one.
2136 	 */
2137 	val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
2138 	if (kvm_vcpu_has_pmu(vcpu))
2139 		val |= SYS_FIELD_PREP(ID_AA64DFR0_EL1, PMUVer,
2140 				      kvm_arm_pmu_get_pmuver_limit());
2141 
2142 	/* Hide SPE from guests */
2143 	val &= ~ID_AA64DFR0_EL1_PMSVer_MASK;
2144 
2145 	/* Hide BRBE from guests */
2146 	val &= ~ID_AA64DFR0_EL1_BRBE_MASK;
2147 
2148 	return val;
2149 }
2150 
2151 /*
2152  * Older versions of KVM erroneously claim support for FEAT_DoubleLock with
2153  * NV-enabled VMs on unsupporting hardware. Silently ignore the incorrect
2154  * value if it is consistent with the bug.
2155  */
2156 static bool ignore_feat_doublelock(struct kvm_vcpu *vcpu, u64 val)
2157 {
2158 	u8 host, user;
2159 
2160 	if (!vcpu_has_nv(vcpu))
2161 		return false;
2162 
2163 	host = SYS_FIELD_GET(ID_AA64DFR0_EL1, DoubleLock,
2164 			     read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1));
2165 	user = SYS_FIELD_GET(ID_AA64DFR0_EL1, DoubleLock, val);
2166 
2167 	return host == ID_AA64DFR0_EL1_DoubleLock_NI &&
2168 	       user == ID_AA64DFR0_EL1_DoubleLock_IMP;
2169 }
2170 
2171 static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
2172 			       const struct sys_reg_desc *rd,
2173 			       u64 val)
2174 {
2175 	u8 debugver = SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, val);
2176 	u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);
2177 
2178 	/*
2179 	 * Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
2180 	 * ID_AA64DFR0_EL1.PMUver limit to VM creation"), KVM erroneously
2181 	 * exposed an IMP_DEF PMU to userspace and the guest on systems w/
2182 	 * non-architectural PMUs. Of course, PMUv3 is the only game in town for
2183 	 * PMU virtualization, so the IMP_DEF value was rather user-hostile.
2184 	 *
2185 	 * At minimum, we're on the hook to allow values that were given to
2186 	 * userspace by KVM. Cover our tracks here and replace the IMP_DEF value
2187 	 * with a more sensible NI. The value of an ID register changing under
2188 	 * the nose of the guest is unfortunate, but is certainly no more
2189 	 * surprising than an ill-guided PMU driver poking at impdef system
2190 	 * registers that end in an UNDEF...
2191 	 */
2192 	if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
2193 		val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
2194 
2195 	/*
2196 	 * ID_AA64DFR0_EL1.DebugVer is one of those awkward fields with a
2197 	 * nonzero minimum safe value.
2198 	 */
2199 	if (debugver < ID_AA64DFR0_EL1_DebugVer_IMP)
2200 		return -EINVAL;
2201 
2202 	if (ignore_feat_doublelock(vcpu, val)) {
2203 		val &= ~ID_AA64DFR0_EL1_DoubleLock;
2204 		val |= SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, DoubleLock, NI);
2205 	}
2206 
2207 	return set_id_reg(vcpu, rd, val);
2208 }
2209 
2210 static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
2211 				      const struct sys_reg_desc *rd)
2212 {
2213 	u8 perfmon;
2214 	u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
2215 
2216 	val &= ~ID_DFR0_EL1_PerfMon_MASK;
2217 	if (kvm_vcpu_has_pmu(vcpu)) {
2218 		perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
2219 		val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
2220 	}
2221 
2222 	val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8);
2223 
2224 	return val;
2225 }
2226 
2227 static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
2228 			   const struct sys_reg_desc *rd,
2229 			   u64 val)
2230 {
2231 	u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val);
2232 	u8 copdbg = SYS_FIELD_GET(ID_DFR0_EL1, CopDbg, val);
2233 
2234 	if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) {
2235 		val &= ~ID_DFR0_EL1_PerfMon_MASK;
2236 		perfmon = 0;
2237 	}
2238 
2239 	/*
2240 	 * Allow DFR0_EL1.PerfMon to be set from userspace as long as
2241 	 * it doesn't promise more than what the HW gives us on the
2242 	 * AArch64 side (as everything is emulated with that), and
2243 	 * that this is a PMUv3.
2244 	 */
2245 	if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3)
2246 		return -EINVAL;
2247 
2248 	if (copdbg < ID_DFR0_EL1_CopDbg_Armv8)
2249 		return -EINVAL;
2250 
2251 	return set_id_reg(vcpu, rd, val);
2252 }
2253 
2254 static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
2255 			       const struct sys_reg_desc *rd, u64 user_val)
2256 {
2257 	u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
2258 	u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK;
2259 
2260 	/*
2261 	 * Commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits
2262 	 * in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to
2263 	 * guests, but didn't add trap handling. KVM doesn't support MPAM and
2264 	 * always returns an UNDEF for these registers. The guest must see 0
2265 	 * for this field.
2266 	 *
2267 	 * But KVM must also accept values from user-space that were provided
2268 	 * by KVM. On CPUs that support MPAM, permit user-space to write
2269 	 * the sanitizied value to ID_AA64PFR0_EL1.MPAM, but ignore this field.
2270 	 */
2271 	if ((hw_val & mpam_mask) == (user_val & mpam_mask))
2272 		user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
2273 
2274 	/* Fail the guest's request to disable the AA64 ISA at EL{0,1,2} */
2275 	if (!FIELD_GET(ID_AA64PFR0_EL1_EL0, user_val) ||
2276 	    !FIELD_GET(ID_AA64PFR0_EL1_EL1, user_val) ||
2277 	    (vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val)))
2278 		return -EINVAL;
2279 
2280 	return set_id_reg(vcpu, rd, user_val);
2281 }
2282 
2283 static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
2284 			       const struct sys_reg_desc *rd, u64 user_val)
2285 {
2286 	u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
2287 	u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK;
2288 	u8 mte = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE, hw_val);
2289 	u8 user_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, user_val);
2290 	u8 hw_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, hw_val);
2291 
2292 	/* See set_id_aa64pfr0_el1 for comment about MPAM */
2293 	if ((hw_val & mpam_mask) == (user_val & mpam_mask))
2294 		user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
2295 
2296 	/*
2297 	 * Previously MTE_frac was hidden from guest. However, if the
2298 	 * hardware supports MTE2 but not MTE_ASYM_FAULT then a value
2299 	 * of 0 for this field indicates that the hardware supports
2300 	 * MTE_ASYNC. Whereas, 0xf indicates MTE_ASYNC is not supported.
2301 	 *
2302 	 * As KVM must accept values from KVM provided by user-space,
2303 	 * when ID_AA64PFR1_EL1.MTE is 2 allow user-space to set
2304 	 * ID_AA64PFR1_EL1.MTE_frac to 0. However, ignore it to avoid
2305 	 * incorrectly claiming hardware support for MTE_ASYNC in the
2306 	 * guest.
2307 	 */
2308 
2309 	if (mte == ID_AA64PFR1_EL1_MTE_MTE2 &&
2310 	    hw_mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI &&
2311 	    user_mte_frac == ID_AA64PFR1_EL1_MTE_frac_ASYNC) {
2312 		user_val &= ~ID_AA64PFR1_EL1_MTE_frac_MASK;
2313 		user_val |= hw_val & ID_AA64PFR1_EL1_MTE_frac_MASK;
2314 	}
2315 
2316 	return set_id_reg(vcpu, rd, user_val);
2317 }
2318 
2319 static int set_id_aa64pfr2_el1(struct kvm_vcpu *vcpu,
2320 			       const struct sys_reg_desc *rd, u64 user_val)
2321 {
2322 	return set_id_reg(vcpu, rd, user_val);
2323 }
2324 
2325 /*
2326  * Allow userspace to de-feature a stage-2 translation granule but prevent it
2327  * from claiming the impossible.
2328  */
2329 #define tgran2_val_allowed(tg, safe, user)			\
2330 ({								\
2331 	u8 __s = SYS_FIELD_GET(ID_AA64MMFR0_EL1, tg, safe);	\
2332 	u8 __u = SYS_FIELD_GET(ID_AA64MMFR0_EL1, tg, user);	\
2333 								\
2334 	__s == __u || __u == ID_AA64MMFR0_EL1_##tg##_NI;	\
2335 })
2336 
2337 static int set_id_aa64mmfr0_el1(struct kvm_vcpu *vcpu,
2338 				const struct sys_reg_desc *rd, u64 user_val)
2339 {
2340 	u64 sanitized_val = kvm_read_sanitised_id_reg(vcpu, rd);
2341 
2342 	if (!vcpu_has_nv(vcpu))
2343 		return set_id_reg(vcpu, rd, user_val);
2344 
2345 	if (!tgran2_val_allowed(TGRAN4_2, sanitized_val, user_val) ||
2346 	    !tgran2_val_allowed(TGRAN16_2, sanitized_val, user_val) ||
2347 	    !tgran2_val_allowed(TGRAN64_2, sanitized_val, user_val))
2348 		return -EINVAL;
2349 
2350 	return set_id_reg(vcpu, rd, user_val);
2351 }
2352 
2353 static int set_id_aa64mmfr2_el1(struct kvm_vcpu *vcpu,
2354 				const struct sys_reg_desc *rd, u64 user_val)
2355 {
2356 	u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
2357 	u64 nv_mask = ID_AA64MMFR2_EL1_NV_MASK;
2358 
2359 	/*
2360 	 * We made the mistake to expose the now deprecated NV field,
2361 	 * so allow userspace to write it, but silently ignore it.
2362 	 */
2363 	if ((hw_val & nv_mask) == (user_val & nv_mask))
2364 		user_val &= ~nv_mask;
2365 
2366 	return set_id_reg(vcpu, rd, user_val);
2367 }
2368 
2369 static int set_ctr_el0(struct kvm_vcpu *vcpu,
2370 		       const struct sys_reg_desc *rd, u64 user_val)
2371 {
2372 	u8 user_L1Ip = SYS_FIELD_GET(CTR_EL0, L1Ip, user_val);
2373 
2374 	/*
2375 	 * Both AIVIVT (0b01) and VPIPT (0b00) are documented as reserved.
2376 	 * Hence only allow to set VIPT(0b10) or PIPT(0b11) for L1Ip based
2377 	 * on what hardware reports.
2378 	 *
2379 	 * Using a VIPT software model on PIPT will lead to over invalidation,
2380 	 * but still correct. Hence, we can allow downgrading PIPT to VIPT,
2381 	 * but not the other way around. This is handled via arm64_ftr_safe_value()
2382 	 * as CTR_EL0 ftr_bits has L1Ip field with type FTR_EXACT and safe value
2383 	 * set as VIPT.
2384 	 */
2385 	switch (user_L1Ip) {
2386 	case CTR_EL0_L1Ip_RESERVED_VPIPT:
2387 	case CTR_EL0_L1Ip_RESERVED_AIVIVT:
2388 		return -EINVAL;
2389 	case CTR_EL0_L1Ip_VIPT:
2390 	case CTR_EL0_L1Ip_PIPT:
2391 		return set_id_reg(vcpu, rd, user_val);
2392 	default:
2393 		return -ENOENT;
2394 	}
2395 }
2396 
2397 /*
2398  * cpufeature ID register user accessors
2399  *
2400  * For now, these registers are immutable for userspace, so no values
2401  * are stored, and for set_id_reg() we don't allow the effective value
2402  * to be changed.
2403  */
2404 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2405 		      u64 *val)
2406 {
2407 	/*
2408 	 * Avoid locking if the VM has already started, as the ID registers are
2409 	 * guaranteed to be invariant at that point.
2410 	 */
2411 	if (kvm_vm_has_ran_once(vcpu->kvm)) {
2412 		*val = read_id_reg(vcpu, rd);
2413 		return 0;
2414 	}
2415 
2416 	mutex_lock(&vcpu->kvm->arch.config_lock);
2417 	*val = read_id_reg(vcpu, rd);
2418 	mutex_unlock(&vcpu->kvm->arch.config_lock);
2419 
2420 	return 0;
2421 }
2422 
2423 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2424 		      u64 val)
2425 {
2426 	u32 id = reg_to_encoding(rd);
2427 	int ret;
2428 
2429 	mutex_lock(&vcpu->kvm->arch.config_lock);
2430 
2431 	/*
2432 	 * Once the VM has started the ID registers are immutable. Reject any
2433 	 * write that does not match the final register value.
2434 	 */
2435 	if (kvm_vm_has_ran_once(vcpu->kvm)) {
2436 		if (val != read_id_reg(vcpu, rd))
2437 			ret = -EBUSY;
2438 		else
2439 			ret = 0;
2440 
2441 		mutex_unlock(&vcpu->kvm->arch.config_lock);
2442 		return ret;
2443 	}
2444 
2445 	ret = arm64_check_features(vcpu, rd, val);
2446 	if (!ret)
2447 		kvm_set_vm_id_reg(vcpu->kvm, id, val);
2448 
2449 	mutex_unlock(&vcpu->kvm->arch.config_lock);
2450 
2451 	/*
2452 	 * arm64_check_features() returns -E2BIG to indicate the register's
2453 	 * feature set is a superset of the maximally-allowed register value.
2454 	 * While it would be nice to precisely describe this to userspace, the
2455 	 * existing UAPI for KVM_SET_ONE_REG has it that invalid register
2456 	 * writes return -EINVAL.
2457 	 */
2458 	if (ret == -E2BIG)
2459 		ret = -EINVAL;
2460 	return ret;
2461 }
2462 
2463 void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val)
2464 {
2465 	u64 *p = __vm_id_reg(&kvm->arch, reg);
2466 
2467 	lockdep_assert_held(&kvm->arch.config_lock);
2468 
2469 	if (KVM_BUG_ON(kvm_vm_has_ran_once(kvm) || !p, kvm))
2470 		return;
2471 
2472 	*p = val;
2473 }
2474 
2475 static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2476 		       u64 *val)
2477 {
2478 	*val = 0;
2479 	return 0;
2480 }
2481 
2482 static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2483 		      u64 val)
2484 {
2485 	return 0;
2486 }
2487 
2488 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2489 		       const struct sys_reg_desc *r)
2490 {
2491 	if (p->is_write)
2492 		return write_to_read_only(vcpu, p, r);
2493 
2494 	p->regval = kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0);
2495 	return true;
2496 }
2497 
2498 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2499 			 const struct sys_reg_desc *r)
2500 {
2501 	if (p->is_write)
2502 		return write_to_read_only(vcpu, p, r);
2503 
2504 	p->regval = __vcpu_sys_reg(vcpu, r->reg);
2505 	return true;
2506 }
2507 
2508 /*
2509  * Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
2510  * by the physical CPU which the vcpu currently resides in.
2511  */
2512 static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2513 {
2514 	u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
2515 	u64 clidr;
2516 	u8 loc;
2517 
2518 	if ((ctr_el0 & CTR_EL0_IDC)) {
2519 		/*
2520 		 * Data cache clean to the PoU is not required so LoUU and LoUIS
2521 		 * will not be set and a unified cache, which will be marked as
2522 		 * LoC, will be added.
2523 		 *
2524 		 * If not DIC, let the unified cache L2 so that an instruction
2525 		 * cache can be added as L1 later.
2526 		 */
2527 		loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2;
2528 		clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc);
2529 	} else {
2530 		/*
2531 		 * Data cache clean to the PoU is required so let L1 have a data
2532 		 * cache and mark it as LoUU and LoUIS. As L1 has a data cache,
2533 		 * it can be marked as LoC too.
2534 		 */
2535 		loc = 1;
2536 		clidr = 1 << CLIDR_LOUU_SHIFT;
2537 		clidr |= 1 << CLIDR_LOUIS_SHIFT;
2538 		clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1);
2539 	}
2540 
2541 	/*
2542 	 * Instruction cache invalidation to the PoU is required so let L1 have
2543 	 * an instruction cache. If L1 already has a data cache, it will be
2544 	 * CACHE_TYPE_SEPARATE.
2545 	 */
2546 	if (!(ctr_el0 & CTR_EL0_DIC))
2547 		clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1);
2548 
2549 	clidr |= loc << CLIDR_LOC_SHIFT;
2550 
2551 	/*
2552 	 * Add tag cache unified to data cache. Allocation tags and data are
2553 	 * unified in a cache line so that it looks valid even if there is only
2554 	 * one cache line.
2555 	 */
2556 	if (kvm_has_mte(vcpu->kvm))
2557 		clidr |= 2ULL << CLIDR_TTYPE_SHIFT(loc);
2558 
2559 	__vcpu_assign_sys_reg(vcpu, r->reg, clidr);
2560 
2561 	return __vcpu_sys_reg(vcpu, r->reg);
2562 }
2563 
2564 static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2565 		      u64 val)
2566 {
2567 	u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
2568 	u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val));
2569 
2570 	if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
2571 		return -EINVAL;
2572 
2573 	__vcpu_assign_sys_reg(vcpu, rd->reg, val);
2574 
2575 	return 0;
2576 }
2577 
2578 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2579 			  const struct sys_reg_desc *r)
2580 {
2581 	int reg = r->reg;
2582 
2583 	if (p->is_write)
2584 		vcpu_write_sys_reg(vcpu, p->regval, reg);
2585 	else
2586 		p->regval = vcpu_read_sys_reg(vcpu, reg);
2587 	return true;
2588 }
2589 
2590 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2591 			  const struct sys_reg_desc *r)
2592 {
2593 	u32 csselr;
2594 
2595 	if (p->is_write)
2596 		return write_to_read_only(vcpu, p, r);
2597 
2598 	csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
2599 	csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD;
2600 	if (csselr < CSSELR_MAX)
2601 		p->regval = get_ccsidr(vcpu, csselr);
2602 
2603 	return true;
2604 }
2605 
2606 static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
2607 				   const struct sys_reg_desc *rd)
2608 {
2609 	if (kvm_has_mte(vcpu->kvm))
2610 		return 0;
2611 
2612 	return REG_HIDDEN;
2613 }
2614 
2615 #define MTE_REG(name) {				\
2616 	SYS_DESC(SYS_##name),			\
2617 	.access = undef_access,			\
2618 	.reset = reset_unknown,			\
2619 	.reg = name,				\
2620 	.visibility = mte_visibility,		\
2621 }
2622 
2623 static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
2624 				   const struct sys_reg_desc *rd)
2625 {
2626 	if (vcpu_has_nv(vcpu))
2627 		return 0;
2628 
2629 	return REG_HIDDEN;
2630 }
2631 
2632 static bool bad_vncr_trap(struct kvm_vcpu *vcpu,
2633 			  struct sys_reg_params *p,
2634 			  const struct sys_reg_desc *r)
2635 {
2636 	/*
2637 	 * We really shouldn't be here, and this is likely the result
2638 	 * of a misconfigured trap, as this register should target the
2639 	 * VNCR page, and nothing else.
2640 	 */
2641 	return bad_trap(vcpu, p, r,
2642 			"trap of VNCR-backed register");
2643 }
2644 
2645 static bool bad_redir_trap(struct kvm_vcpu *vcpu,
2646 			   struct sys_reg_params *p,
2647 			   const struct sys_reg_desc *r)
2648 {
2649 	/*
2650 	 * We really shouldn't be here, and this is likely the result
2651 	 * of a misconfigured trap, as this register should target the
2652 	 * corresponding EL1, and nothing else.
2653 	 */
2654 	return bad_trap(vcpu, p, r,
2655 			"trap of EL2 register redirected to EL1");
2656 }
2657 
2658 #define SYS_REG_USER_FILTER(name, acc, rst, v, gu, su, filter) { \
2659 	SYS_DESC(SYS_##name),			\
2660 	.access = acc,				\
2661 	.reset = rst,				\
2662 	.reg = name,				\
2663 	.get_user = gu,				\
2664 	.set_user = su,				\
2665 	.visibility = filter,			\
2666 	.val = v,				\
2667 }
2668 
2669 #define EL2_REG_FILTERED(name, acc, rst, v, filter)	\
2670 	SYS_REG_USER_FILTER(name, acc, rst, v, NULL, NULL, filter)
2671 
2672 #define EL2_REG(name, acc, rst, v)			\
2673 	EL2_REG_FILTERED(name, acc, rst, v, el2_visibility)
2674 
2675 #define EL2_REG_VNCR(name, rst, v)	EL2_REG(name, bad_vncr_trap, rst, v)
2676 #define EL2_REG_VNCR_FILT(name, vis)			\
2677 	EL2_REG_FILTERED(name, bad_vncr_trap, reset_val, 0, vis)
2678 #define EL2_REG_VNCR_GICv3(name)			\
2679 	EL2_REG_VNCR_FILT(name, hidden_visibility)
2680 #define EL2_REG_REDIR(name, rst, v)	EL2_REG(name, bad_redir_trap, rst, v)
2681 
2682 #define TIMER_REG(name, vis)					   \
2683 	SYS_REG_USER_FILTER(name, access_arch_timer, reset_val, 0, \
2684 			    arch_timer_get_user, arch_timer_set_user, vis)
2685 
2686 /*
2687  * Since reset() callback and field val are not used for idregs, they will be
2688  * used for specific purposes for idregs.
2689  * The reset() would return KVM sanitised register value. The value would be the
2690  * same as the host kernel sanitised value if there is no KVM sanitisation.
2691  * The val would be used as a mask indicating writable fields for the idreg.
2692  * Only bits with 1 are writable from userspace. This mask might not be
2693  * necessary in the future whenever all ID registers are enabled as writable
2694  * from userspace.
2695  */
2696 
2697 #define ID_DESC_DEFAULT_CALLBACKS		\
2698 	.access	= access_id_reg,		\
2699 	.get_user = get_id_reg,			\
2700 	.set_user = set_id_reg,			\
2701 	.visibility = id_visibility,		\
2702 	.reset = kvm_read_sanitised_id_reg
2703 
2704 #define ID_DESC(name)				\
2705 	SYS_DESC(SYS_##name),			\
2706 	ID_DESC_DEFAULT_CALLBACKS
2707 
2708 /* sys_reg_desc initialiser for known cpufeature ID registers */
2709 #define ID_SANITISED(name) {			\
2710 	ID_DESC(name),				\
2711 	.val = 0,				\
2712 }
2713 
2714 /* sys_reg_desc initialiser for writable ID registers */
2715 #define ID_WRITABLE(name, mask) {		\
2716 	ID_DESC(name),				\
2717 	.val = mask,				\
2718 }
2719 
2720 /*
2721  * 32bit ID regs are fully writable when the guest is 32bit
2722  * capable. Nothing in the KVM code should rely on 32bit features
2723  * anyway, only 64bit, so let the VMM do its worse.
2724  */
2725 #define AA32_ID_WRITABLE(name) {		\
2726 	ID_DESC(name),				\
2727 	.visibility = aa32_id_visibility,	\
2728 	.val = GENMASK(31, 0),			\
2729 }
2730 
2731 /* sys_reg_desc initialiser for cpufeature ID registers that need filtering */
2732 #define ID_FILTERED(sysreg, name, mask) {	\
2733 	ID_DESC(sysreg),				\
2734 	.set_user = set_##name,				\
2735 	.val = (mask),					\
2736 }
2737 
2738 /*
2739  * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
2740  * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
2741  * (1 <= crm < 8, 0 <= Op2 < 8).
2742  */
2743 #define ID_UNALLOCATED(crm, op2) {			\
2744 	.name = "S3_0_0_" #crm "_" #op2,		\
2745 	Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2),	\
2746 	ID_DESC_DEFAULT_CALLBACKS,			\
2747 	.visibility = raz_visibility,			\
2748 	.val = 0,					\
2749 }
2750 
2751 /*
2752  * sys_reg_desc initialiser for known ID registers that we hide from guests.
2753  * For now, these are exposed just like unallocated ID regs: they appear
2754  * RAZ for the guest.
2755  */
2756 #define ID_HIDDEN(name) {			\
2757 	ID_DESC(name),				\
2758 	.visibility = raz_visibility,		\
2759 	.val = 0,				\
2760 }
2761 
2762 static bool access_sp_el1(struct kvm_vcpu *vcpu,
2763 			  struct sys_reg_params *p,
2764 			  const struct sys_reg_desc *r)
2765 {
2766 	if (p->is_write)
2767 		__vcpu_assign_sys_reg(vcpu, SP_EL1, p->regval);
2768 	else
2769 		p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
2770 
2771 	return true;
2772 }
2773 
2774 static bool access_elr(struct kvm_vcpu *vcpu,
2775 		       struct sys_reg_params *p,
2776 		       const struct sys_reg_desc *r)
2777 {
2778 	if (p->is_write)
2779 		vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1);
2780 	else
2781 		p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1);
2782 
2783 	return true;
2784 }
2785 
2786 static bool access_spsr(struct kvm_vcpu *vcpu,
2787 			struct sys_reg_params *p,
2788 			const struct sys_reg_desc *r)
2789 {
2790 	if (p->is_write)
2791 		__vcpu_assign_sys_reg(vcpu, SPSR_EL1, p->regval);
2792 	else
2793 		p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
2794 
2795 	return true;
2796 }
2797 
2798 static bool access_cntkctl_el12(struct kvm_vcpu *vcpu,
2799 				struct sys_reg_params *p,
2800 				const struct sys_reg_desc *r)
2801 {
2802 	if (p->is_write)
2803 		__vcpu_assign_sys_reg(vcpu, CNTKCTL_EL1, p->regval);
2804 	else
2805 		p->regval = __vcpu_sys_reg(vcpu, CNTKCTL_EL1);
2806 
2807 	return true;
2808 }
2809 
2810 static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2811 {
2812 	u64 val = r->val;
2813 
2814 	if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
2815 		val |= HCR_E2H;
2816 
2817 	__vcpu_assign_sys_reg(vcpu, r->reg, val);
2818 
2819 	return __vcpu_sys_reg(vcpu, r->reg);
2820 }
2821 
2822 static unsigned int __el2_visibility(const struct kvm_vcpu *vcpu,
2823 				     const struct sys_reg_desc *rd,
2824 				     unsigned int (*fn)(const struct kvm_vcpu *,
2825 							const struct sys_reg_desc *))
2826 {
2827 	return el2_visibility(vcpu, rd) ?: fn(vcpu, rd);
2828 }
2829 
2830 static unsigned int sve_el2_visibility(const struct kvm_vcpu *vcpu,
2831 				       const struct sys_reg_desc *rd)
2832 {
2833 	return __el2_visibility(vcpu, rd, sve_visibility);
2834 }
2835 
2836 static unsigned int vncr_el2_visibility(const struct kvm_vcpu *vcpu,
2837 					const struct sys_reg_desc *rd)
2838 {
2839 	if (el2_visibility(vcpu, rd) == 0 &&
2840 	    kvm_has_feat(vcpu->kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
2841 		return 0;
2842 
2843 	return REG_HIDDEN;
2844 }
2845 
2846 static unsigned int sctlr2_visibility(const struct kvm_vcpu *vcpu,
2847 				      const struct sys_reg_desc *rd)
2848 {
2849 	if (kvm_has_sctlr2(vcpu->kvm))
2850 		return 0;
2851 
2852 	return REG_HIDDEN;
2853 }
2854 
2855 static unsigned int sctlr2_el2_visibility(const struct kvm_vcpu *vcpu,
2856 					  const struct sys_reg_desc *rd)
2857 {
2858 	return __el2_visibility(vcpu, rd, sctlr2_visibility);
2859 }
2860 
2861 static bool access_zcr_el2(struct kvm_vcpu *vcpu,
2862 			   struct sys_reg_params *p,
2863 			   const struct sys_reg_desc *r)
2864 {
2865 	unsigned int vq;
2866 
2867 	if (guest_hyp_sve_traps_enabled(vcpu)) {
2868 		kvm_inject_nested_sve_trap(vcpu);
2869 		return false;
2870 	}
2871 
2872 	if (!p->is_write) {
2873 		p->regval = __vcpu_sys_reg(vcpu, ZCR_EL2);
2874 		return true;
2875 	}
2876 
2877 	vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1;
2878 	vq = min(vq, vcpu_sve_max_vq(vcpu));
2879 	__vcpu_assign_sys_reg(vcpu, ZCR_EL2, vq - 1);
2880 	return true;
2881 }
2882 
2883 static bool access_gic_vtr(struct kvm_vcpu *vcpu,
2884 			   struct sys_reg_params *p,
2885 			   const struct sys_reg_desc *r)
2886 {
2887 	if (p->is_write)
2888 		return write_to_read_only(vcpu, p, r);
2889 
2890 	p->regval = kvm_get_guest_vtr_el2();
2891 
2892 	return true;
2893 }
2894 
2895 static bool access_gic_misr(struct kvm_vcpu *vcpu,
2896 			    struct sys_reg_params *p,
2897 			    const struct sys_reg_desc *r)
2898 {
2899 	if (p->is_write)
2900 		return write_to_read_only(vcpu, p, r);
2901 
2902 	p->regval = vgic_v3_get_misr(vcpu);
2903 
2904 	return true;
2905 }
2906 
2907 static bool access_gic_eisr(struct kvm_vcpu *vcpu,
2908 			    struct sys_reg_params *p,
2909 			    const struct sys_reg_desc *r)
2910 {
2911 	if (p->is_write)
2912 		return write_to_read_only(vcpu, p, r);
2913 
2914 	p->regval = vgic_v3_get_eisr(vcpu);
2915 
2916 	return true;
2917 }
2918 
2919 static bool access_gic_elrsr(struct kvm_vcpu *vcpu,
2920 			     struct sys_reg_params *p,
2921 			     const struct sys_reg_desc *r)
2922 {
2923 	if (p->is_write)
2924 		return write_to_read_only(vcpu, p, r);
2925 
2926 	p->regval = vgic_v3_get_elrsr(vcpu);
2927 
2928 	return true;
2929 }
2930 
2931 static unsigned int s1poe_visibility(const struct kvm_vcpu *vcpu,
2932 				     const struct sys_reg_desc *rd)
2933 {
2934 	if (kvm_has_s1poe(vcpu->kvm))
2935 		return 0;
2936 
2937 	return REG_HIDDEN;
2938 }
2939 
2940 static unsigned int s1poe_el2_visibility(const struct kvm_vcpu *vcpu,
2941 					 const struct sys_reg_desc *rd)
2942 {
2943 	return __el2_visibility(vcpu, rd, s1poe_visibility);
2944 }
2945 
2946 static unsigned int tcr2_visibility(const struct kvm_vcpu *vcpu,
2947 				    const struct sys_reg_desc *rd)
2948 {
2949 	if (kvm_has_tcr2(vcpu->kvm))
2950 		return 0;
2951 
2952 	return REG_HIDDEN;
2953 }
2954 
2955 static unsigned int tcr2_el2_visibility(const struct kvm_vcpu *vcpu,
2956 				    const struct sys_reg_desc *rd)
2957 {
2958 	return __el2_visibility(vcpu, rd, tcr2_visibility);
2959 }
2960 
2961 static unsigned int fgt2_visibility(const struct kvm_vcpu *vcpu,
2962 				    const struct sys_reg_desc *rd)
2963 {
2964 	if (el2_visibility(vcpu, rd) == 0 &&
2965 	    kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, FGT, FGT2))
2966 		return 0;
2967 
2968 	return REG_HIDDEN;
2969 }
2970 
2971 static unsigned int fgt_visibility(const struct kvm_vcpu *vcpu,
2972 				   const struct sys_reg_desc *rd)
2973 {
2974 	if (el2_visibility(vcpu, rd) == 0 &&
2975 	    kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, FGT, IMP))
2976 		return 0;
2977 
2978 	return REG_HIDDEN;
2979 }
2980 
2981 static unsigned int s1pie_visibility(const struct kvm_vcpu *vcpu,
2982 				     const struct sys_reg_desc *rd)
2983 {
2984 	if (kvm_has_s1pie(vcpu->kvm))
2985 		return 0;
2986 
2987 	return REG_HIDDEN;
2988 }
2989 
2990 static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu,
2991 					 const struct sys_reg_desc *rd)
2992 {
2993 	return __el2_visibility(vcpu, rd, s1pie_visibility);
2994 }
2995 
2996 static unsigned int cnthv_visibility(const struct kvm_vcpu *vcpu,
2997 				     const struct sys_reg_desc *rd)
2998 {
2999 	if (vcpu_has_nv(vcpu) &&
3000 	    !vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2_E2H0))
3001 		return 0;
3002 
3003 	return REG_HIDDEN;
3004 }
3005 
3006 static bool access_mdcr(struct kvm_vcpu *vcpu,
3007 			struct sys_reg_params *p,
3008 			const struct sys_reg_desc *r)
3009 {
3010 	u64 hpmn, val, old = __vcpu_sys_reg(vcpu, MDCR_EL2);
3011 
3012 	if (!p->is_write) {
3013 		p->regval = old;
3014 		return true;
3015 	}
3016 
3017 	val = p->regval;
3018 	hpmn = FIELD_GET(MDCR_EL2_HPMN, val);
3019 
3020 	/*
3021 	 * If HPMN is out of bounds, limit it to what we actually
3022 	 * support. This matches the UNKNOWN definition of the field
3023 	 * in that case, and keeps the emulation simple. Sort of.
3024 	 */
3025 	if (hpmn > vcpu->kvm->arch.nr_pmu_counters) {
3026 		hpmn = vcpu->kvm->arch.nr_pmu_counters;
3027 		u64p_replace_bits(&val, hpmn, MDCR_EL2_HPMN);
3028 	}
3029 
3030 	__vcpu_assign_sys_reg(vcpu, MDCR_EL2, val);
3031 
3032 	/*
3033 	 * Request a reload of the PMU to enable/disable the counters
3034 	 * affected by HPME.
3035 	 */
3036 	if ((old ^ val) & MDCR_EL2_HPME)
3037 		kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
3038 
3039 	return true;
3040 }
3041 
3042 static bool access_ras(struct kvm_vcpu *vcpu,
3043 		       struct sys_reg_params *p,
3044 		       const struct sys_reg_desc *r)
3045 {
3046 	struct kvm *kvm = vcpu->kvm;
3047 
3048 	switch(reg_to_encoding(r)) {
3049 	case SYS_ERXPFGCDN_EL1:
3050 	case SYS_ERXPFGCTL_EL1:
3051 	case SYS_ERXPFGF_EL1:
3052 	case SYS_ERXMISC2_EL1:
3053 	case SYS_ERXMISC3_EL1:
3054 		if (!(kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1) ||
3055 		      (kvm_has_feat_enum(kvm, ID_AA64PFR0_EL1, RAS, IMP) &&
3056 		       kvm_has_feat(kvm, ID_AA64PFR1_EL1, RAS_frac, RASv1p1)))) {
3057 			kvm_inject_undefined(vcpu);
3058 			return false;
3059 		}
3060 		break;
3061 	default:
3062 		if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) {
3063 			kvm_inject_undefined(vcpu);
3064 			return false;
3065 		}
3066 	}
3067 
3068 	return trap_raz_wi(vcpu, p, r);
3069 }
3070 
3071 /*
3072  * For historical (ahem ABI) reasons, KVM treated MIDR_EL1, REVIDR_EL1, and
3073  * AIDR_EL1 as "invariant" registers, meaning userspace cannot change them.
3074  * The values made visible to userspace were the register values of the boot
3075  * CPU.
3076  *
3077  * At the same time, reads from these registers at EL1 previously were not
3078  * trapped, allowing the guest to read the actual hardware value. On big-little
3079  * machines, this means the VM can see different values depending on where a
3080  * given vCPU got scheduled.
3081  *
3082  * These registers are now trapped as collateral damage from SME, and what
3083  * follows attempts to give a user / guest view consistent with the existing
3084  * ABI.
3085  */
3086 static bool access_imp_id_reg(struct kvm_vcpu *vcpu,
3087 			      struct sys_reg_params *p,
3088 			      const struct sys_reg_desc *r)
3089 {
3090 	if (p->is_write)
3091 		return write_to_read_only(vcpu, p, r);
3092 
3093 	/*
3094 	 * Return the VM-scoped implementation ID register values if userspace
3095 	 * has made them writable.
3096 	 */
3097 	if (test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &vcpu->kvm->arch.flags))
3098 		return access_id_reg(vcpu, p, r);
3099 
3100 	/*
3101 	 * Otherwise, fall back to the old behavior of returning the value of
3102 	 * the current CPU.
3103 	 */
3104 	switch (reg_to_encoding(r)) {
3105 	case SYS_REVIDR_EL1:
3106 		p->regval = read_sysreg(revidr_el1);
3107 		break;
3108 	case SYS_AIDR_EL1:
3109 		p->regval = read_sysreg(aidr_el1);
3110 		break;
3111 	default:
3112 		WARN_ON_ONCE(1);
3113 	}
3114 
3115 	return true;
3116 }
3117 
3118 static u64 __ro_after_init boot_cpu_midr_val;
3119 static u64 __ro_after_init boot_cpu_revidr_val;
3120 static u64 __ro_after_init boot_cpu_aidr_val;
3121 
3122 static void init_imp_id_regs(void)
3123 {
3124 	boot_cpu_midr_val = read_sysreg(midr_el1);
3125 	boot_cpu_revidr_val = read_sysreg(revidr_el1);
3126 	boot_cpu_aidr_val = read_sysreg(aidr_el1);
3127 }
3128 
3129 static u64 reset_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
3130 {
3131 	switch (reg_to_encoding(r)) {
3132 	case SYS_MIDR_EL1:
3133 		return boot_cpu_midr_val;
3134 	case SYS_REVIDR_EL1:
3135 		return boot_cpu_revidr_val;
3136 	case SYS_AIDR_EL1:
3137 		return boot_cpu_aidr_val;
3138 	default:
3139 		KVM_BUG_ON(1, vcpu->kvm);
3140 		return 0;
3141 	}
3142 }
3143 
3144 static int set_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
3145 			  u64 val)
3146 {
3147 	struct kvm *kvm = vcpu->kvm;
3148 	u64 expected;
3149 
3150 	guard(mutex)(&kvm->arch.config_lock);
3151 
3152 	expected = read_id_reg(vcpu, r);
3153 	if (expected == val)
3154 		return 0;
3155 
3156 	if (!test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags))
3157 		return -EINVAL;
3158 
3159 	/*
3160 	 * Once the VM has started the ID registers are immutable. Reject the
3161 	 * write if userspace tries to change it.
3162 	 */
3163 	if (kvm_vm_has_ran_once(kvm))
3164 		return -EBUSY;
3165 
3166 	/*
3167 	 * Any value is allowed for the implementation ID registers so long as
3168 	 * it is within the writable mask.
3169 	 */
3170 	if ((val & r->val) != val)
3171 		return -EINVAL;
3172 
3173 	kvm_set_vm_id_reg(kvm, reg_to_encoding(r), val);
3174 	return 0;
3175 }
3176 
3177 #define IMPLEMENTATION_ID(reg, mask) {			\
3178 	SYS_DESC(SYS_##reg),				\
3179 	.access = access_imp_id_reg,			\
3180 	.get_user = get_id_reg,				\
3181 	.set_user = set_imp_id_reg,			\
3182 	.reset = reset_imp_id_reg,			\
3183 	.val = mask,					\
3184 	}
3185 
3186 static u64 reset_mdcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
3187 {
3188 	__vcpu_assign_sys_reg(vcpu, r->reg, vcpu->kvm->arch.nr_pmu_counters);
3189 	return vcpu->kvm->arch.nr_pmu_counters;
3190 }
3191 
3192 /*
3193  * Architected system registers.
3194  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
3195  *
3196  * Debug handling: We do trap most, if not all debug related system
3197  * registers. The implementation is good enough to ensure that a guest
3198  * can use these with minimal performance degradation. The drawback is
3199  * that we don't implement any of the external debug architecture.
3200  * This should be revisited if we ever encounter a more demanding
3201  * guest...
3202  */
3203 static const struct sys_reg_desc sys_reg_descs[] = {
3204 	DBG_BCR_BVR_WCR_WVR_EL1(0),
3205 	DBG_BCR_BVR_WCR_WVR_EL1(1),
3206 	{ SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
3207 	{ SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
3208 	DBG_BCR_BVR_WCR_WVR_EL1(2),
3209 	DBG_BCR_BVR_WCR_WVR_EL1(3),
3210 	DBG_BCR_BVR_WCR_WVR_EL1(4),
3211 	DBG_BCR_BVR_WCR_WVR_EL1(5),
3212 	DBG_BCR_BVR_WCR_WVR_EL1(6),
3213 	DBG_BCR_BVR_WCR_WVR_EL1(7),
3214 	DBG_BCR_BVR_WCR_WVR_EL1(8),
3215 	DBG_BCR_BVR_WCR_WVR_EL1(9),
3216 	DBG_BCR_BVR_WCR_WVR_EL1(10),
3217 	DBG_BCR_BVR_WCR_WVR_EL1(11),
3218 	DBG_BCR_BVR_WCR_WVR_EL1(12),
3219 	DBG_BCR_BVR_WCR_WVR_EL1(13),
3220 	DBG_BCR_BVR_WCR_WVR_EL1(14),
3221 	DBG_BCR_BVR_WCR_WVR_EL1(15),
3222 
3223 	{ SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
3224 	{ SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
3225 	{ SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
3226 		OSLSR_EL1_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
3227 	{ SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
3228 	{ SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
3229 	{ SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
3230 	{ SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
3231 	{ SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
3232 
3233 	{ SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
3234 	{ SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
3235 	// DBGDTR[TR]X_EL0 share the same encoding
3236 	{ SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
3237 
3238 	{ SYS_DESC(SYS_DBGVCR32_EL2), undef_access, reset_val, DBGVCR32_EL2, 0 },
3239 
3240 	IMPLEMENTATION_ID(MIDR_EL1, GENMASK_ULL(31, 0)),
3241 	{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
3242 	IMPLEMENTATION_ID(REVIDR_EL1, GENMASK_ULL(63, 0)),
3243 
3244 	/*
3245 	 * ID regs: all ID_SANITISED() entries here must have corresponding
3246 	 * entries in arm64_ftr_regs[].
3247 	 */
3248 
3249 	/* AArch64 mappings of the AArch32 ID registers */
3250 	/* CRm=1 */
3251 	AA32_ID_WRITABLE(ID_PFR0_EL1),
3252 	AA32_ID_WRITABLE(ID_PFR1_EL1),
3253 	{ SYS_DESC(SYS_ID_DFR0_EL1),
3254 	  .access = access_id_reg,
3255 	  .get_user = get_id_reg,
3256 	  .set_user = set_id_dfr0_el1,
3257 	  .visibility = aa32_id_visibility,
3258 	  .reset = read_sanitised_id_dfr0_el1,
3259 	  .val = GENMASK(31, 0) },
3260 	ID_HIDDEN(ID_AFR0_EL1),
3261 	AA32_ID_WRITABLE(ID_MMFR0_EL1),
3262 	AA32_ID_WRITABLE(ID_MMFR1_EL1),
3263 	AA32_ID_WRITABLE(ID_MMFR2_EL1),
3264 	AA32_ID_WRITABLE(ID_MMFR3_EL1),
3265 
3266 	/* CRm=2 */
3267 	AA32_ID_WRITABLE(ID_ISAR0_EL1),
3268 	AA32_ID_WRITABLE(ID_ISAR1_EL1),
3269 	AA32_ID_WRITABLE(ID_ISAR2_EL1),
3270 	AA32_ID_WRITABLE(ID_ISAR3_EL1),
3271 	AA32_ID_WRITABLE(ID_ISAR4_EL1),
3272 	AA32_ID_WRITABLE(ID_ISAR5_EL1),
3273 	AA32_ID_WRITABLE(ID_MMFR4_EL1),
3274 	AA32_ID_WRITABLE(ID_ISAR6_EL1),
3275 
3276 	/* CRm=3 */
3277 	AA32_ID_WRITABLE(MVFR0_EL1),
3278 	AA32_ID_WRITABLE(MVFR1_EL1),
3279 	AA32_ID_WRITABLE(MVFR2_EL1),
3280 	ID_UNALLOCATED(3,3),
3281 	AA32_ID_WRITABLE(ID_PFR2_EL1),
3282 	ID_HIDDEN(ID_DFR1_EL1),
3283 	AA32_ID_WRITABLE(ID_MMFR5_EL1),
3284 	ID_UNALLOCATED(3,7),
3285 
3286 	/* AArch64 ID registers */
3287 	/* CRm=4 */
3288 	ID_FILTERED(ID_AA64PFR0_EL1, id_aa64pfr0_el1,
3289 		    ~(ID_AA64PFR0_EL1_AMU |
3290 		      ID_AA64PFR0_EL1_MPAM |
3291 		      ID_AA64PFR0_EL1_SVE |
3292 		      ID_AA64PFR0_EL1_AdvSIMD |
3293 		      ID_AA64PFR0_EL1_FP)),
3294 	ID_FILTERED(ID_AA64PFR1_EL1, id_aa64pfr1_el1,
3295 				     ~(ID_AA64PFR1_EL1_PFAR |
3296 				       ID_AA64PFR1_EL1_MTEX |
3297 				       ID_AA64PFR1_EL1_THE |
3298 				       ID_AA64PFR1_EL1_GCS |
3299 				       ID_AA64PFR1_EL1_MTE_frac |
3300 				       ID_AA64PFR1_EL1_NMI |
3301 				       ID_AA64PFR1_EL1_RNDR_trap |
3302 				       ID_AA64PFR1_EL1_SME |
3303 				       ID_AA64PFR1_EL1_RES0 |
3304 				       ID_AA64PFR1_EL1_MPAM_frac |
3305 				       ID_AA64PFR1_EL1_MTE)),
3306 	ID_FILTERED(ID_AA64PFR2_EL1, id_aa64pfr2_el1,
3307 		    ~(ID_AA64PFR2_EL1_FPMR |
3308 		      ID_AA64PFR2_EL1_MTEFAR |
3309 		      ID_AA64PFR2_EL1_MTESTOREONLY |
3310 		      ID_AA64PFR2_EL1_GCIE)),
3311 	ID_UNALLOCATED(4,3),
3312 	ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
3313 	ID_HIDDEN(ID_AA64SMFR0_EL1),
3314 	ID_UNALLOCATED(4,6),
3315 	ID_WRITABLE(ID_AA64FPFR0_EL1, ~ID_AA64FPFR0_EL1_RES0),
3316 
3317 	/* CRm=5 */
3318 	/*
3319 	 * Prior to FEAT_Debugv8.9, the architecture defines context-aware
3320 	 * breakpoints (CTX_CMPs) as the highest numbered breakpoints (BRPs).
3321 	 * KVM does not trap + emulate the breakpoint registers, and as such
3322 	 * cannot support a layout that misaligns with the underlying hardware.
3323 	 * While it may be possible to describe a subset that aligns with
3324 	 * hardware, just prevent changes to BRPs and CTX_CMPs altogether for
3325 	 * simplicity.
3326 	 *
3327 	 * See DDI0487K.a, section D2.8.3 Breakpoint types and linking
3328 	 * of breakpoints for more details.
3329 	 */
3330 	ID_FILTERED(ID_AA64DFR0_EL1, id_aa64dfr0_el1,
3331 		    ID_AA64DFR0_EL1_DoubleLock_MASK |
3332 		    ID_AA64DFR0_EL1_WRPs_MASK |
3333 		    ID_AA64DFR0_EL1_PMUVer_MASK |
3334 		    ID_AA64DFR0_EL1_DebugVer_MASK),
3335 	ID_SANITISED(ID_AA64DFR1_EL1),
3336 	ID_UNALLOCATED(5,2),
3337 	ID_UNALLOCATED(5,3),
3338 	ID_HIDDEN(ID_AA64AFR0_EL1),
3339 	ID_HIDDEN(ID_AA64AFR1_EL1),
3340 	ID_UNALLOCATED(5,6),
3341 	ID_UNALLOCATED(5,7),
3342 
3343 	/* CRm=6 */
3344 	ID_WRITABLE(ID_AA64ISAR0_EL1, ~ID_AA64ISAR0_EL1_RES0),
3345 	ID_WRITABLE(ID_AA64ISAR1_EL1, ~(ID_AA64ISAR1_EL1_GPI |
3346 					ID_AA64ISAR1_EL1_GPA |
3347 					ID_AA64ISAR1_EL1_API |
3348 					ID_AA64ISAR1_EL1_APA)),
3349 	ID_WRITABLE(ID_AA64ISAR2_EL1, ~(ID_AA64ISAR2_EL1_RES0 |
3350 					ID_AA64ISAR2_EL1_APA3 |
3351 					ID_AA64ISAR2_EL1_GPA3)),
3352 	ID_WRITABLE(ID_AA64ISAR3_EL1, (ID_AA64ISAR3_EL1_FPRCVT |
3353 				       ID_AA64ISAR3_EL1_LSFE |
3354 				       ID_AA64ISAR3_EL1_FAMINMAX)),
3355 	ID_UNALLOCATED(6,4),
3356 	ID_UNALLOCATED(6,5),
3357 	ID_UNALLOCATED(6,6),
3358 	ID_UNALLOCATED(6,7),
3359 
3360 	/* CRm=7 */
3361 	ID_FILTERED(ID_AA64MMFR0_EL1, id_aa64mmfr0_el1,
3362 				      ~(ID_AA64MMFR0_EL1_RES0 |
3363 					ID_AA64MMFR0_EL1_ASIDBITS)),
3364 	ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
3365 					ID_AA64MMFR1_EL1_XNX |
3366 					ID_AA64MMFR1_EL1_VH |
3367 					ID_AA64MMFR1_EL1_VMIDBits)),
3368 	ID_FILTERED(ID_AA64MMFR2_EL1,
3369 		    id_aa64mmfr2_el1, ~(ID_AA64MMFR2_EL1_RES0 |
3370 					ID_AA64MMFR2_EL1_EVT |
3371 					ID_AA64MMFR2_EL1_FWB |
3372 					ID_AA64MMFR2_EL1_IDS |
3373 					ID_AA64MMFR2_EL1_NV |
3374 					ID_AA64MMFR2_EL1_CCIDX)),
3375 	ID_WRITABLE(ID_AA64MMFR3_EL1, (ID_AA64MMFR3_EL1_TCRX	|
3376 				       ID_AA64MMFR3_EL1_SCTLRX	|
3377 				       ID_AA64MMFR3_EL1_S1PIE   |
3378 				       ID_AA64MMFR3_EL1_S1POE)),
3379 	ID_WRITABLE(ID_AA64MMFR4_EL1, ID_AA64MMFR4_EL1_NV_frac),
3380 	ID_UNALLOCATED(7,5),
3381 	ID_UNALLOCATED(7,6),
3382 	ID_UNALLOCATED(7,7),
3383 
3384 	{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
3385 	{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
3386 	{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
3387 	{ SYS_DESC(SYS_SCTLR2_EL1), access_vm_reg, reset_val, SCTLR2_EL1, 0,
3388 	  .visibility = sctlr2_visibility },
3389 
3390 	MTE_REG(RGSR_EL1),
3391 	MTE_REG(GCR_EL1),
3392 
3393 	{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
3394 	{ SYS_DESC(SYS_TRFCR_EL1), undef_access },
3395 	{ SYS_DESC(SYS_SMPRI_EL1), undef_access },
3396 	{ SYS_DESC(SYS_SMCR_EL1), undef_access },
3397 	{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
3398 	{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
3399 	{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
3400 	{ SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0,
3401 	  .visibility = tcr2_visibility },
3402 
3403 	PTRAUTH_KEY(APIA),
3404 	PTRAUTH_KEY(APIB),
3405 	PTRAUTH_KEY(APDA),
3406 	PTRAUTH_KEY(APDB),
3407 	PTRAUTH_KEY(APGA),
3408 
3409 	{ SYS_DESC(SYS_SPSR_EL1), access_spsr},
3410 	{ SYS_DESC(SYS_ELR_EL1), access_elr},
3411 
3412 	{ SYS_DESC(SYS_ICC_PMR_EL1), undef_access },
3413 
3414 	{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
3415 	{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
3416 	{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
3417 
3418 	{ SYS_DESC(SYS_ERRIDR_EL1), access_ras },
3419 	{ SYS_DESC(SYS_ERRSELR_EL1), access_ras },
3420 	{ SYS_DESC(SYS_ERXFR_EL1), access_ras },
3421 	{ SYS_DESC(SYS_ERXCTLR_EL1), access_ras },
3422 	{ SYS_DESC(SYS_ERXSTATUS_EL1), access_ras },
3423 	{ SYS_DESC(SYS_ERXADDR_EL1), access_ras },
3424 	{ SYS_DESC(SYS_ERXPFGF_EL1), access_ras },
3425 	{ SYS_DESC(SYS_ERXPFGCTL_EL1), access_ras },
3426 	{ SYS_DESC(SYS_ERXPFGCDN_EL1), access_ras },
3427 	{ SYS_DESC(SYS_ERXMISC0_EL1), access_ras },
3428 	{ SYS_DESC(SYS_ERXMISC1_EL1), access_ras },
3429 	{ SYS_DESC(SYS_ERXMISC2_EL1), access_ras },
3430 	{ SYS_DESC(SYS_ERXMISC3_EL1), access_ras },
3431 
3432 	MTE_REG(TFSR_EL1),
3433 	MTE_REG(TFSRE0_EL1),
3434 
3435 	{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
3436 	{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
3437 
3438 	{ SYS_DESC(SYS_PMSCR_EL1), undef_access },
3439 	{ SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
3440 	{ SYS_DESC(SYS_PMSICR_EL1), undef_access },
3441 	{ SYS_DESC(SYS_PMSIRR_EL1), undef_access },
3442 	{ SYS_DESC(SYS_PMSFCR_EL1), undef_access },
3443 	{ SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
3444 	{ SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
3445 	{ SYS_DESC(SYS_PMSIDR_EL1), undef_access },
3446 	{ SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
3447 	{ SYS_DESC(SYS_PMBPTR_EL1), undef_access },
3448 	{ SYS_DESC(SYS_PMBSR_EL1), undef_access },
3449 	{ SYS_DESC(SYS_PMSDSFR_EL1), undef_access },
3450 	/* PMBIDR_EL1 is not trapped */
3451 
3452 	{ PMU_SYS_REG(PMINTENSET_EL1),
3453 	  .access = access_pminten, .reg = PMINTENSET_EL1,
3454 	  .get_user = get_pmreg, .set_user = set_pmreg },
3455 	{ PMU_SYS_REG(PMINTENCLR_EL1),
3456 	  .access = access_pminten, .reg = PMINTENSET_EL1,
3457 	  .get_user = get_pmreg, .set_user = set_pmreg },
3458 	{ SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
3459 
3460 	{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
3461 	{ SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1,
3462 	  .visibility = s1pie_visibility },
3463 	{ SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1,
3464 	  .visibility = s1pie_visibility },
3465 	{ SYS_DESC(SYS_POR_EL1), NULL, reset_unknown, POR_EL1,
3466 	  .visibility = s1poe_visibility },
3467 	{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
3468 
3469 	{ SYS_DESC(SYS_LORSA_EL1), trap_loregion },
3470 	{ SYS_DESC(SYS_LOREA_EL1), trap_loregion },
3471 	{ SYS_DESC(SYS_LORN_EL1), trap_loregion },
3472 	{ SYS_DESC(SYS_LORC_EL1), trap_loregion },
3473 	{ SYS_DESC(SYS_MPAMIDR_EL1), undef_access },
3474 	{ SYS_DESC(SYS_LORID_EL1), trap_loregion },
3475 
3476 	{ SYS_DESC(SYS_MPAM1_EL1), undef_access },
3477 	{ SYS_DESC(SYS_MPAM0_EL1), undef_access },
3478 	{ SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
3479 	{ SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
3480 
3481 	{ SYS_DESC(SYS_ICC_IAR0_EL1), undef_access },
3482 	{ SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access },
3483 	{ SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access },
3484 	{ SYS_DESC(SYS_ICC_BPR0_EL1), undef_access },
3485 	{ SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access },
3486 	{ SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access },
3487 	{ SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access },
3488 	{ SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access },
3489 	{ SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access },
3490 	{ SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access },
3491 	{ SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access },
3492 	{ SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
3493 	{ SYS_DESC(SYS_ICC_IDR0_EL1), access_gicv5_idr0 },
3494 	{ SYS_DESC(SYS_ICC_IAFFIDR_EL1), access_gicv5_iaffid },
3495 	{ SYS_DESC(SYS_ICC_PPI_ENABLER0_EL1), access_gicv5_ppi_enabler },
3496 	{ SYS_DESC(SYS_ICC_PPI_ENABLER1_EL1), access_gicv5_ppi_enabler },
3497 	{ SYS_DESC(SYS_ICC_DIR_EL1), access_gic_dir },
3498 	{ SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
3499 	{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
3500 	{ SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
3501 	{ SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
3502 	{ SYS_DESC(SYS_ICC_IAR1_EL1), undef_access },
3503 	{ SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access },
3504 	{ SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access },
3505 	{ SYS_DESC(SYS_ICC_BPR1_EL1), undef_access },
3506 	{ SYS_DESC(SYS_ICC_CTLR_EL1), undef_access },
3507 	{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
3508 	{ SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access },
3509 	{ SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access },
3510 
3511 	{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
3512 	{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
3513 
3514 	{ SYS_DESC(SYS_ACCDATA_EL1), undef_access },
3515 
3516 	{ SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
3517 
3518 	{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
3519 
3520 	{ SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
3521 	{ SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1,
3522 	  .set_user = set_clidr, .val = ~CLIDR_EL1_RES0 },
3523 	IMPLEMENTATION_ID(AIDR_EL1, GENMASK_ULL(63, 0)),
3524 	{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
3525 	ID_FILTERED(CTR_EL0, ctr_el0,
3526 		    CTR_EL0_DIC_MASK |
3527 		    CTR_EL0_IDC_MASK |
3528 		    CTR_EL0_DminLine_MASK |
3529 		    CTR_EL0_L1Ip_MASK |
3530 		    CTR_EL0_IminLine_MASK),
3531 	{ SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = sme_visibility  },
3532 	{ SYS_DESC(SYS_FPMR), undef_access, reset_val, FPMR, 0, .visibility = fp8_visibility },
3533 
3534 	{ PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr,
3535 	  .reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr },
3536 	{ PMU_SYS_REG(PMCNTENSET_EL0),
3537 	  .access = access_pmcnten, .reg = PMCNTENSET_EL0,
3538 	  .get_user = get_pmreg, .set_user = set_pmreg },
3539 	{ PMU_SYS_REG(PMCNTENCLR_EL0),
3540 	  .access = access_pmcnten, .reg = PMCNTENSET_EL0,
3541 	  .get_user = get_pmreg, .set_user = set_pmreg },
3542 	{ PMU_SYS_REG(PMOVSCLR_EL0),
3543 	  .access = access_pmovs, .reg = PMOVSSET_EL0,
3544 	  .get_user = get_pmreg, .set_user = set_pmreg },
3545 	/*
3546 	 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
3547 	 * previously (and pointlessly) advertised in the past...
3548 	 */
3549 	{ PMU_SYS_REG(PMSWINC_EL0),
3550 	  .get_user = get_raz_reg, .set_user = set_wi_reg,
3551 	  .access = access_pmswinc, .reset = NULL },
3552 	{ PMU_SYS_REG(PMSELR_EL0),
3553 	  .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
3554 	{ PMU_SYS_REG(PMCEID0_EL0),
3555 	  .access = access_pmceid, .reset = NULL },
3556 	{ PMU_SYS_REG(PMCEID1_EL0),
3557 	  .access = access_pmceid, .reset = NULL },
3558 	{ PMU_SYS_REG(PMCCNTR_EL0),
3559 	  .access = access_pmu_evcntr, .reset = reset_unknown,
3560 	  .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr,
3561 	  .set_user = set_pmu_evcntr },
3562 	{ PMU_SYS_REG(PMXEVTYPER_EL0),
3563 	  .access = access_pmu_evtyper, .reset = NULL },
3564 	{ PMU_SYS_REG(PMXEVCNTR_EL0),
3565 	  .access = access_pmu_evcntr, .reset = NULL },
3566 	/*
3567 	 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
3568 	 * in 32bit mode. Here we choose to reset it as zero for consistency.
3569 	 */
3570 	{ PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr,
3571 	  .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
3572 	{ PMU_SYS_REG(PMOVSSET_EL0),
3573 	  .access = access_pmovs, .reg = PMOVSSET_EL0,
3574 	  .get_user = get_pmreg, .set_user = set_pmreg },
3575 
3576 	{ SYS_DESC(SYS_POR_EL0), NULL, reset_unknown, POR_EL0,
3577 	  .visibility = s1poe_visibility },
3578 	{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
3579 	{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
3580 	{ SYS_DESC(SYS_TPIDR2_EL0), undef_access },
3581 
3582 	{ SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
3583 
3584 	{ SYS_DESC(SYS_AMCR_EL0), undef_access },
3585 	{ SYS_DESC(SYS_AMCFGR_EL0), undef_access },
3586 	{ SYS_DESC(SYS_AMCGCR_EL0), undef_access },
3587 	{ SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
3588 	{ SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
3589 	{ SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
3590 	{ SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
3591 	{ SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
3592 	AMU_AMEVCNTR0_EL0(0),
3593 	AMU_AMEVCNTR0_EL0(1),
3594 	AMU_AMEVCNTR0_EL0(2),
3595 	AMU_AMEVCNTR0_EL0(3),
3596 	AMU_AMEVCNTR0_EL0(4),
3597 	AMU_AMEVCNTR0_EL0(5),
3598 	AMU_AMEVCNTR0_EL0(6),
3599 	AMU_AMEVCNTR0_EL0(7),
3600 	AMU_AMEVCNTR0_EL0(8),
3601 	AMU_AMEVCNTR0_EL0(9),
3602 	AMU_AMEVCNTR0_EL0(10),
3603 	AMU_AMEVCNTR0_EL0(11),
3604 	AMU_AMEVCNTR0_EL0(12),
3605 	AMU_AMEVCNTR0_EL0(13),
3606 	AMU_AMEVCNTR0_EL0(14),
3607 	AMU_AMEVCNTR0_EL0(15),
3608 	AMU_AMEVTYPER0_EL0(0),
3609 	AMU_AMEVTYPER0_EL0(1),
3610 	AMU_AMEVTYPER0_EL0(2),
3611 	AMU_AMEVTYPER0_EL0(3),
3612 	AMU_AMEVTYPER0_EL0(4),
3613 	AMU_AMEVTYPER0_EL0(5),
3614 	AMU_AMEVTYPER0_EL0(6),
3615 	AMU_AMEVTYPER0_EL0(7),
3616 	AMU_AMEVTYPER0_EL0(8),
3617 	AMU_AMEVTYPER0_EL0(9),
3618 	AMU_AMEVTYPER0_EL0(10),
3619 	AMU_AMEVTYPER0_EL0(11),
3620 	AMU_AMEVTYPER0_EL0(12),
3621 	AMU_AMEVTYPER0_EL0(13),
3622 	AMU_AMEVTYPER0_EL0(14),
3623 	AMU_AMEVTYPER0_EL0(15),
3624 	AMU_AMEVCNTR1_EL0(0),
3625 	AMU_AMEVCNTR1_EL0(1),
3626 	AMU_AMEVCNTR1_EL0(2),
3627 	AMU_AMEVCNTR1_EL0(3),
3628 	AMU_AMEVCNTR1_EL0(4),
3629 	AMU_AMEVCNTR1_EL0(5),
3630 	AMU_AMEVCNTR1_EL0(6),
3631 	AMU_AMEVCNTR1_EL0(7),
3632 	AMU_AMEVCNTR1_EL0(8),
3633 	AMU_AMEVCNTR1_EL0(9),
3634 	AMU_AMEVCNTR1_EL0(10),
3635 	AMU_AMEVCNTR1_EL0(11),
3636 	AMU_AMEVCNTR1_EL0(12),
3637 	AMU_AMEVCNTR1_EL0(13),
3638 	AMU_AMEVCNTR1_EL0(14),
3639 	AMU_AMEVCNTR1_EL0(15),
3640 	AMU_AMEVTYPER1_EL0(0),
3641 	AMU_AMEVTYPER1_EL0(1),
3642 	AMU_AMEVTYPER1_EL0(2),
3643 	AMU_AMEVTYPER1_EL0(3),
3644 	AMU_AMEVTYPER1_EL0(4),
3645 	AMU_AMEVTYPER1_EL0(5),
3646 	AMU_AMEVTYPER1_EL0(6),
3647 	AMU_AMEVTYPER1_EL0(7),
3648 	AMU_AMEVTYPER1_EL0(8),
3649 	AMU_AMEVTYPER1_EL0(9),
3650 	AMU_AMEVTYPER1_EL0(10),
3651 	AMU_AMEVTYPER1_EL0(11),
3652 	AMU_AMEVTYPER1_EL0(12),
3653 	AMU_AMEVTYPER1_EL0(13),
3654 	AMU_AMEVTYPER1_EL0(14),
3655 	AMU_AMEVTYPER1_EL0(15),
3656 
3657 	{ SYS_DESC(SYS_CNTPCT_EL0), .access = access_arch_timer,
3658 	  .get_user = arch_timer_get_user, .set_user = arch_timer_set_user },
3659 	{ SYS_DESC(SYS_CNTVCT_EL0), .access = access_arch_timer,
3660 	  .get_user = arch_timer_get_user, .set_user = arch_timer_set_user },
3661 	{ SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
3662 	{ SYS_DESC(SYS_CNTVCTSS_EL0), access_arch_timer },
3663 	{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
3664 	TIMER_REG(CNTP_CTL_EL0, NULL),
3665 	TIMER_REG(CNTP_CVAL_EL0, NULL),
3666 
3667 	{ SYS_DESC(SYS_CNTV_TVAL_EL0), access_arch_timer },
3668 	TIMER_REG(CNTV_CTL_EL0, NULL),
3669 	TIMER_REG(CNTV_CVAL_EL0, NULL),
3670 
3671 	/* PMEVCNTRn_EL0 */
3672 	PMU_PMEVCNTR_EL0(0),
3673 	PMU_PMEVCNTR_EL0(1),
3674 	PMU_PMEVCNTR_EL0(2),
3675 	PMU_PMEVCNTR_EL0(3),
3676 	PMU_PMEVCNTR_EL0(4),
3677 	PMU_PMEVCNTR_EL0(5),
3678 	PMU_PMEVCNTR_EL0(6),
3679 	PMU_PMEVCNTR_EL0(7),
3680 	PMU_PMEVCNTR_EL0(8),
3681 	PMU_PMEVCNTR_EL0(9),
3682 	PMU_PMEVCNTR_EL0(10),
3683 	PMU_PMEVCNTR_EL0(11),
3684 	PMU_PMEVCNTR_EL0(12),
3685 	PMU_PMEVCNTR_EL0(13),
3686 	PMU_PMEVCNTR_EL0(14),
3687 	PMU_PMEVCNTR_EL0(15),
3688 	PMU_PMEVCNTR_EL0(16),
3689 	PMU_PMEVCNTR_EL0(17),
3690 	PMU_PMEVCNTR_EL0(18),
3691 	PMU_PMEVCNTR_EL0(19),
3692 	PMU_PMEVCNTR_EL0(20),
3693 	PMU_PMEVCNTR_EL0(21),
3694 	PMU_PMEVCNTR_EL0(22),
3695 	PMU_PMEVCNTR_EL0(23),
3696 	PMU_PMEVCNTR_EL0(24),
3697 	PMU_PMEVCNTR_EL0(25),
3698 	PMU_PMEVCNTR_EL0(26),
3699 	PMU_PMEVCNTR_EL0(27),
3700 	PMU_PMEVCNTR_EL0(28),
3701 	PMU_PMEVCNTR_EL0(29),
3702 	PMU_PMEVCNTR_EL0(30),
3703 	/* PMEVTYPERn_EL0 */
3704 	PMU_PMEVTYPER_EL0(0),
3705 	PMU_PMEVTYPER_EL0(1),
3706 	PMU_PMEVTYPER_EL0(2),
3707 	PMU_PMEVTYPER_EL0(3),
3708 	PMU_PMEVTYPER_EL0(4),
3709 	PMU_PMEVTYPER_EL0(5),
3710 	PMU_PMEVTYPER_EL0(6),
3711 	PMU_PMEVTYPER_EL0(7),
3712 	PMU_PMEVTYPER_EL0(8),
3713 	PMU_PMEVTYPER_EL0(9),
3714 	PMU_PMEVTYPER_EL0(10),
3715 	PMU_PMEVTYPER_EL0(11),
3716 	PMU_PMEVTYPER_EL0(12),
3717 	PMU_PMEVTYPER_EL0(13),
3718 	PMU_PMEVTYPER_EL0(14),
3719 	PMU_PMEVTYPER_EL0(15),
3720 	PMU_PMEVTYPER_EL0(16),
3721 	PMU_PMEVTYPER_EL0(17),
3722 	PMU_PMEVTYPER_EL0(18),
3723 	PMU_PMEVTYPER_EL0(19),
3724 	PMU_PMEVTYPER_EL0(20),
3725 	PMU_PMEVTYPER_EL0(21),
3726 	PMU_PMEVTYPER_EL0(22),
3727 	PMU_PMEVTYPER_EL0(23),
3728 	PMU_PMEVTYPER_EL0(24),
3729 	PMU_PMEVTYPER_EL0(25),
3730 	PMU_PMEVTYPER_EL0(26),
3731 	PMU_PMEVTYPER_EL0(27),
3732 	PMU_PMEVTYPER_EL0(28),
3733 	PMU_PMEVTYPER_EL0(29),
3734 	PMU_PMEVTYPER_EL0(30),
3735 	/*
3736 	 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
3737 	 * in 32bit mode. Here we choose to reset it as zero for consistency.
3738 	 */
3739 	{ PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper,
3740 	  .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
3741 
3742 	EL2_REG_VNCR(VPIDR_EL2, reset_unknown, 0),
3743 	EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0),
3744 	EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
3745 	EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
3746 	EL2_REG_FILTERED(SCTLR2_EL2, access_vm_reg, reset_val, 0,
3747 			 sctlr2_el2_visibility),
3748 	EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
3749 	EL2_REG(MDCR_EL2, access_mdcr, reset_mdcr, 0),
3750 	EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
3751 	EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
3752 	EL2_REG_VNCR_FILT(HFGRTR_EL2, fgt_visibility),
3753 	EL2_REG_VNCR_FILT(HFGWTR_EL2, fgt_visibility),
3754 	EL2_REG_VNCR(HFGITR_EL2, reset_val, 0),
3755 	EL2_REG_VNCR(HACR_EL2, reset_val, 0),
3756 
3757 	EL2_REG_FILTERED(ZCR_EL2, access_zcr_el2, reset_val, 0,
3758 			 sve_el2_visibility),
3759 
3760 	EL2_REG_VNCR(HCRX_EL2, reset_val, 0),
3761 
3762 	EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
3763 	EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
3764 	EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
3765 	EL2_REG_FILTERED(TCR2_EL2, access_rw, reset_val, TCR2_EL2_RES1,
3766 			 tcr2_el2_visibility),
3767 	EL2_REG_VNCR(VTTBR_EL2, reset_val, 0),
3768 	EL2_REG_VNCR(VTCR_EL2, reset_val, 0),
3769 	EL2_REG_FILTERED(VNCR_EL2, bad_vncr_trap, reset_val, 0,
3770 			 vncr_el2_visibility),
3771 
3772 	{ SYS_DESC(SYS_DACR32_EL2), undef_access, reset_unknown, DACR32_EL2 },
3773 	EL2_REG_VNCR_FILT(HDFGRTR2_EL2, fgt2_visibility),
3774 	EL2_REG_VNCR_FILT(HDFGWTR2_EL2, fgt2_visibility),
3775 	EL2_REG_VNCR_FILT(HFGRTR2_EL2, fgt2_visibility),
3776 	EL2_REG_VNCR_FILT(HFGWTR2_EL2, fgt2_visibility),
3777 	EL2_REG_VNCR_FILT(HDFGRTR_EL2, fgt_visibility),
3778 	EL2_REG_VNCR_FILT(HDFGWTR_EL2, fgt_visibility),
3779 	EL2_REG_VNCR_FILT(HAFGRTR_EL2, fgt_visibility),
3780 	EL2_REG_VNCR_FILT(HFGITR2_EL2, fgt2_visibility),
3781 	EL2_REG_REDIR(SPSR_EL2, reset_val, 0),
3782 	EL2_REG_REDIR(ELR_EL2, reset_val, 0),
3783 	{ SYS_DESC(SYS_SP_EL1), access_sp_el1},
3784 
3785 	/* AArch32 SPSR_* are RES0 if trapped from a NV guest */
3786 	{ SYS_DESC(SYS_SPSR_irq), .access = trap_raz_wi },
3787 	{ SYS_DESC(SYS_SPSR_abt), .access = trap_raz_wi },
3788 	{ SYS_DESC(SYS_SPSR_und), .access = trap_raz_wi },
3789 	{ SYS_DESC(SYS_SPSR_fiq), .access = trap_raz_wi },
3790 
3791 	{ SYS_DESC(SYS_IFSR32_EL2), undef_access, reset_unknown, IFSR32_EL2 },
3792 	EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
3793 	EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
3794 	EL2_REG_REDIR(ESR_EL2, reset_val, 0),
3795 	EL2_REG_VNCR(VSESR_EL2, reset_unknown, 0),
3796 	{ SYS_DESC(SYS_FPEXC32_EL2), undef_access, reset_val, FPEXC32_EL2, 0x700 },
3797 
3798 	EL2_REG_REDIR(FAR_EL2, reset_val, 0),
3799 	EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
3800 
3801 	EL2_REG(MAIR_EL2, access_rw, reset_val, 0),
3802 	EL2_REG_FILTERED(PIRE0_EL2, access_rw, reset_val, 0,
3803 			 s1pie_el2_visibility),
3804 	EL2_REG_FILTERED(PIR_EL2, access_rw, reset_val, 0,
3805 			 s1pie_el2_visibility),
3806 	EL2_REG_FILTERED(POR_EL2, access_rw, reset_val, 0,
3807 			 s1poe_el2_visibility),
3808 	EL2_REG(AMAIR_EL2, access_rw, reset_val, 0),
3809 	{ SYS_DESC(SYS_MPAMHCR_EL2), undef_access },
3810 	{ SYS_DESC(SYS_MPAMVPMV_EL2), undef_access },
3811 	{ SYS_DESC(SYS_MPAM2_EL2), undef_access },
3812 	{ SYS_DESC(SYS_MPAMVPM0_EL2), undef_access },
3813 	{ SYS_DESC(SYS_MPAMVPM1_EL2), undef_access },
3814 	{ SYS_DESC(SYS_MPAMVPM2_EL2), undef_access },
3815 	{ SYS_DESC(SYS_MPAMVPM3_EL2), undef_access },
3816 	{ SYS_DESC(SYS_MPAMVPM4_EL2), undef_access },
3817 	{ SYS_DESC(SYS_MPAMVPM5_EL2), undef_access },
3818 	{ SYS_DESC(SYS_MPAMVPM6_EL2), undef_access },
3819 	{ SYS_DESC(SYS_MPAMVPM7_EL2), undef_access },
3820 
3821 	EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
3822 	{ SYS_DESC(SYS_RVBAR_EL2), undef_access },
3823 	{ SYS_DESC(SYS_RMR_EL2), undef_access },
3824 	EL2_REG_VNCR(VDISR_EL2, reset_unknown, 0),
3825 
3826 	EL2_REG_VNCR_GICv3(ICH_AP0R0_EL2),
3827 	EL2_REG_VNCR_GICv3(ICH_AP0R1_EL2),
3828 	EL2_REG_VNCR_GICv3(ICH_AP0R2_EL2),
3829 	EL2_REG_VNCR_GICv3(ICH_AP0R3_EL2),
3830 	EL2_REG_VNCR_GICv3(ICH_AP1R0_EL2),
3831 	EL2_REG_VNCR_GICv3(ICH_AP1R1_EL2),
3832 	EL2_REG_VNCR_GICv3(ICH_AP1R2_EL2),
3833 	EL2_REG_VNCR_GICv3(ICH_AP1R3_EL2),
3834 
3835 	{ SYS_DESC(SYS_ICC_SRE_EL2), access_gic_sre },
3836 
3837 	EL2_REG_VNCR_GICv3(ICH_HCR_EL2),
3838 	{ SYS_DESC(SYS_ICH_VTR_EL2), access_gic_vtr },
3839 	{ SYS_DESC(SYS_ICH_MISR_EL2), access_gic_misr },
3840 	{ SYS_DESC(SYS_ICH_EISR_EL2), access_gic_eisr },
3841 	{ SYS_DESC(SYS_ICH_ELRSR_EL2), access_gic_elrsr },
3842 	EL2_REG_VNCR_GICv3(ICH_VMCR_EL2),
3843 
3844 	EL2_REG_VNCR_GICv3(ICH_LR0_EL2),
3845 	EL2_REG_VNCR_GICv3(ICH_LR1_EL2),
3846 	EL2_REG_VNCR_GICv3(ICH_LR2_EL2),
3847 	EL2_REG_VNCR_GICv3(ICH_LR3_EL2),
3848 	EL2_REG_VNCR_GICv3(ICH_LR4_EL2),
3849 	EL2_REG_VNCR_GICv3(ICH_LR5_EL2),
3850 	EL2_REG_VNCR_GICv3(ICH_LR6_EL2),
3851 	EL2_REG_VNCR_GICv3(ICH_LR7_EL2),
3852 	EL2_REG_VNCR_GICv3(ICH_LR8_EL2),
3853 	EL2_REG_VNCR_GICv3(ICH_LR9_EL2),
3854 	EL2_REG_VNCR_GICv3(ICH_LR10_EL2),
3855 	EL2_REG_VNCR_GICv3(ICH_LR11_EL2),
3856 	EL2_REG_VNCR_GICv3(ICH_LR12_EL2),
3857 	EL2_REG_VNCR_GICv3(ICH_LR13_EL2),
3858 	EL2_REG_VNCR_GICv3(ICH_LR14_EL2),
3859 	EL2_REG_VNCR_GICv3(ICH_LR15_EL2),
3860 
3861 	EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
3862 	EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
3863 
3864 	EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0),
3865 	EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
3866 	{ SYS_DESC(SYS_CNTHP_TVAL_EL2), access_arch_timer },
3867 	TIMER_REG(CNTHP_CTL_EL2, el2_visibility),
3868 	TIMER_REG(CNTHP_CVAL_EL2, el2_visibility),
3869 
3870 	{ SYS_DESC(SYS_CNTHV_TVAL_EL2), access_arch_timer, .visibility = cnthv_visibility },
3871 	TIMER_REG(CNTHV_CTL_EL2, cnthv_visibility),
3872 	TIMER_REG(CNTHV_CVAL_EL2, cnthv_visibility),
3873 
3874 	{ SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 },
3875 
3876 	{ SYS_DESC(SYS_CNTP_TVAL_EL02), access_arch_timer },
3877 	{ SYS_DESC(SYS_CNTP_CTL_EL02), access_arch_timer },
3878 	{ SYS_DESC(SYS_CNTP_CVAL_EL02), access_arch_timer },
3879 
3880 	{ SYS_DESC(SYS_CNTV_TVAL_EL02), access_arch_timer },
3881 	{ SYS_DESC(SYS_CNTV_CTL_EL02), access_arch_timer },
3882 	{ SYS_DESC(SYS_CNTV_CVAL_EL02), access_arch_timer },
3883 
3884 	EL2_REG(SP_EL2, NULL, reset_unknown, 0),
3885 };
3886 
3887 static bool handle_at_s1e01(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3888 			    const struct sys_reg_desc *r)
3889 {
3890 	u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3891 
3892 	if (__kvm_at_s1e01(vcpu, op, p->regval))
3893 		return false;
3894 
3895 	return true;
3896 }
3897 
3898 static bool handle_at_s1e2(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3899 			   const struct sys_reg_desc *r)
3900 {
3901 	u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3902 
3903 	/* There is no FGT associated with AT S1E2A :-( */
3904 	if (op == OP_AT_S1E2A &&
3905 	    !kvm_has_feat(vcpu->kvm, ID_AA64ISAR2_EL1, ATS1A, IMP)) {
3906 		kvm_inject_undefined(vcpu);
3907 		return false;
3908 	}
3909 
3910 	if (__kvm_at_s1e2(vcpu, op, p->regval))
3911 		return false;
3912 
3913 	return true;
3914 }
3915 
3916 static bool handle_at_s12(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3917 			  const struct sys_reg_desc *r)
3918 {
3919 	u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3920 
3921 	if (__kvm_at_s12(vcpu, op, p->regval))
3922 		return false;
3923 
3924 	return true;
3925 }
3926 
3927 static bool kvm_supported_tlbi_s12_op(struct kvm_vcpu *vpcu, u32 instr)
3928 {
3929 	struct kvm *kvm = vpcu->kvm;
3930 	u8 CRm = sys_reg_CRm(instr);
3931 
3932 	if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
3933 	    !kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
3934 		return false;
3935 
3936 	if (CRm == TLBI_CRm_nROS &&
3937 	    !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
3938 		return false;
3939 
3940 	return true;
3941 }
3942 
3943 static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3944 			   const struct sys_reg_desc *r)
3945 {
3946 	u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3947 
3948 	if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding))
3949 		return undef_access(vcpu, p, r);
3950 
3951 	write_lock(&vcpu->kvm->mmu_lock);
3952 
3953 	/*
3954 	 * Drop all shadow S2s, resulting in S1/S2 TLBIs for each of the
3955 	 * corresponding VMIDs.
3956 	 */
3957 	kvm_nested_s2_unmap(vcpu->kvm, true);
3958 
3959 	write_unlock(&vcpu->kvm->mmu_lock);
3960 
3961 	return true;
3962 }
3963 
3964 static bool kvm_supported_tlbi_ipas2_op(struct kvm_vcpu *vpcu, u32 instr)
3965 {
3966 	struct kvm *kvm = vpcu->kvm;
3967 	u8 CRm = sys_reg_CRm(instr);
3968 	u8 Op2 = sys_reg_Op2(instr);
3969 
3970 	if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
3971 	    !kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
3972 		return false;
3973 
3974 	if (CRm == TLBI_CRm_IPAIS && (Op2 == 2 || Op2 == 6) &&
3975 	    !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
3976 		return false;
3977 
3978 	if (CRm == TLBI_CRm_IPAONS && (Op2 == 0 || Op2 == 4) &&
3979 	    !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
3980 		return false;
3981 
3982 	if (CRm == TLBI_CRm_IPAONS && (Op2 == 3 || Op2 == 7) &&
3983 	    !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
3984 		return false;
3985 
3986 	return true;
3987 }
3988 
3989 /* Only defined here as this is an internal "abstraction" */
3990 union tlbi_info {
3991 	struct {
3992 		u64	start;
3993 		u64	size;
3994 	} range;
3995 
3996 	struct {
3997 		u64	addr;
3998 	} ipa;
3999 
4000 	struct {
4001 		u64	addr;
4002 		u32	encoding;
4003 	} va;
4004 };
4005 
4006 static void s2_mmu_unmap_range(struct kvm_s2_mmu *mmu,
4007 			       const union tlbi_info *info)
4008 {
4009 	/*
4010 	 * The unmap operation is allowed to drop the MMU lock and block, which
4011 	 * means that @mmu could be used for a different context than the one
4012 	 * currently being invalidated.
4013 	 *
4014 	 * This behavior is still safe, as:
4015 	 *
4016 	 *  1) The vCPU(s) that recycled the MMU are responsible for invalidating
4017 	 *     the entire MMU before reusing it, which still honors the intent
4018 	 *     of a TLBI.
4019 	 *
4020 	 *  2) Until the guest TLBI instruction is 'retired' (i.e. increment PC
4021 	 *     and ERET to the guest), other vCPUs are allowed to use stale
4022 	 *     translations.
4023 	 *
4024 	 *  3) Accidentally unmapping an unrelated MMU context is nonfatal, and
4025 	 *     at worst may cause more aborts for shadow stage-2 fills.
4026 	 *
4027 	 * Dropping the MMU lock also implies that shadow stage-2 fills could
4028 	 * happen behind the back of the TLBI. This is still safe, though, as
4029 	 * the L1 needs to put its stage-2 in a consistent state before doing
4030 	 * the TLBI.
4031 	 */
4032 	kvm_stage2_unmap_range(mmu, info->range.start, info->range.size, true);
4033 }
4034 
4035 static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
4036 				const struct sys_reg_desc *r)
4037 {
4038 	u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
4039 	u64 limit, vttbr;
4040 
4041 	if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding))
4042 		return undef_access(vcpu, p, r);
4043 
4044 	vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
4045 	limit = BIT_ULL(kvm_get_pa_bits(vcpu->kvm));
4046 
4047 	kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
4048 				   &(union tlbi_info) {
4049 					   .range = {
4050 						   .start = 0,
4051 						   .size = limit,
4052 					   },
4053 				   },
4054 				   s2_mmu_unmap_range);
4055 
4056 	return true;
4057 }
4058 
4059 static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
4060 			      const struct sys_reg_desc *r)
4061 {
4062 	u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
4063 	u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
4064 	u64 base, range;
4065 
4066 	if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
4067 		return undef_access(vcpu, p, r);
4068 
4069 	/*
4070 	 * Because the shadow S2 structure doesn't necessarily reflect that
4071 	 * of the guest's S2 (different base granule size, for example), we
4072 	 * decide to ignore TTL and only use the described range.
4073 	 */
4074 	base = decode_range_tlbi(p->regval, &range, NULL);
4075 
4076 	kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
4077 				   &(union tlbi_info) {
4078 					   .range = {
4079 						   .start = base,
4080 						   .size = range,
4081 					   },
4082 				   },
4083 				   s2_mmu_unmap_range);
4084 
4085 	return true;
4086 }
4087 
4088 static void s2_mmu_unmap_ipa(struct kvm_s2_mmu *mmu,
4089 			     const union tlbi_info *info)
4090 {
4091 	unsigned long max_size;
4092 	u64 base_addr;
4093 
4094 	/*
4095 	 * We drop a number of things from the supplied value:
4096 	 *
4097 	 * - NS bit: we're non-secure only.
4098 	 *
4099 	 * - IPA[51:48]: We don't support 52bit IPA just yet...
4100 	 *
4101 	 * And of course, adjust the IPA to be on an actual address.
4102 	 */
4103 	base_addr = (info->ipa.addr & GENMASK_ULL(35, 0)) << 12;
4104 	max_size = compute_tlb_inval_range(mmu, info->ipa.addr);
4105 	base_addr &= ~(max_size - 1);
4106 
4107 	/*
4108 	 * See comment in s2_mmu_unmap_range() for why this is allowed to
4109 	 * reschedule.
4110 	 */
4111 	kvm_stage2_unmap_range(mmu, base_addr, max_size, true);
4112 }
4113 
4114 static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
4115 			     const struct sys_reg_desc *r)
4116 {
4117 	u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
4118 	u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
4119 
4120 	if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
4121 		return undef_access(vcpu, p, r);
4122 
4123 	kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
4124 				   &(union tlbi_info) {
4125 					   .ipa = {
4126 						   .addr = p->regval,
4127 					   },
4128 				   },
4129 				   s2_mmu_unmap_ipa);
4130 
4131 	return true;
4132 }
4133 
4134 static void s2_mmu_tlbi_s1e1(struct kvm_s2_mmu *mmu,
4135 			     const union tlbi_info *info)
4136 {
4137 	WARN_ON(__kvm_tlbi_s1e2(mmu, info->va.addr, info->va.encoding));
4138 }
4139 
4140 static bool handle_tlbi_el2(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
4141 			    const struct sys_reg_desc *r)
4142 {
4143 	u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
4144 
4145 	if (!kvm_supported_tlbi_s1e2_op(vcpu, sys_encoding))
4146 		return undef_access(vcpu, p, r);
4147 
4148 	kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval);
4149 	return true;
4150 }
4151 
4152 static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
4153 			    const struct sys_reg_desc *r)
4154 {
4155 	u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
4156 
4157 	/*
4158 	 * If we're here, this is because we've trapped on a EL1 TLBI
4159 	 * instruction that affects the EL1 translation regime while
4160 	 * we're running in a context that doesn't allow us to let the
4161 	 * HW do its thing (aka vEL2):
4162 	 *
4163 	 * - HCR_EL2.E2H == 0 : a non-VHE guest
4164 	 * - HCR_EL2.{E2H,TGE} == { 1, 0 } : a VHE guest in guest mode
4165 	 *
4166 	 * Another possibility is that we are invalidating the EL2 context
4167 	 * using EL1 instructions, but that we landed here because we need
4168 	 * additional invalidation for structures that are not held in the
4169 	 * CPU TLBs (such as the VNCR pseudo-TLB and its EL2 mapping). In
4170 	 * that case, we are guaranteed that HCR_EL2.{E2H,TGE} == { 1, 1 }
4171 	 * as we don't allow an NV-capable L1 in a nVHE configuration.
4172 	 *
4173 	 * We don't expect these helpers to ever be called when running
4174 	 * in a vEL1 context.
4175 	 */
4176 
4177 	WARN_ON(!vcpu_is_el2(vcpu));
4178 
4179 	if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding))
4180 		return undef_access(vcpu, p, r);
4181 
4182 	if (vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)) {
4183 		kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval);
4184 		return true;
4185 	}
4186 
4187 	kvm_s2_mmu_iterate_by_vmid(vcpu->kvm,
4188 				   get_vmid(__vcpu_sys_reg(vcpu, VTTBR_EL2)),
4189 				   &(union tlbi_info) {
4190 					   .va = {
4191 						   .addr = p->regval,
4192 						   .encoding = sys_encoding,
4193 					   },
4194 				   },
4195 				   s2_mmu_tlbi_s1e1);
4196 
4197 	return true;
4198 }
4199 
4200 #define SYS_INSN(insn, access_fn)					\
4201 	{								\
4202 		SYS_DESC(OP_##insn),					\
4203 		.access = (access_fn),					\
4204 	}
4205 
4206 static struct sys_reg_desc sys_insn_descs[] = {
4207 	{ SYS_DESC(SYS_DC_ISW), access_dcsw },
4208 	{ SYS_DESC(SYS_DC_IGSW), access_dcgsw },
4209 	{ SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
4210 
4211 	SYS_INSN(AT_S1E1R, handle_at_s1e01),
4212 	SYS_INSN(AT_S1E1W, handle_at_s1e01),
4213 	SYS_INSN(AT_S1E0R, handle_at_s1e01),
4214 	SYS_INSN(AT_S1E0W, handle_at_s1e01),
4215 	SYS_INSN(AT_S1E1RP, handle_at_s1e01),
4216 	SYS_INSN(AT_S1E1WP, handle_at_s1e01),
4217 
4218 	{ SYS_DESC(SYS_DC_CSW), access_dcsw },
4219 	{ SYS_DESC(SYS_DC_CGSW), access_dcgsw },
4220 	{ SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
4221 	{ SYS_DESC(SYS_DC_CISW), access_dcsw },
4222 	{ SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
4223 	{ SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
4224 
4225 	SYS_INSN(TLBI_VMALLE1OS, handle_tlbi_el1),
4226 	SYS_INSN(TLBI_VAE1OS, handle_tlbi_el1),
4227 	SYS_INSN(TLBI_ASIDE1OS, handle_tlbi_el1),
4228 	SYS_INSN(TLBI_VAAE1OS, handle_tlbi_el1),
4229 	SYS_INSN(TLBI_VALE1OS, handle_tlbi_el1),
4230 	SYS_INSN(TLBI_VAALE1OS, handle_tlbi_el1),
4231 
4232 	SYS_INSN(TLBI_RVAE1IS, handle_tlbi_el1),
4233 	SYS_INSN(TLBI_RVAAE1IS, handle_tlbi_el1),
4234 	SYS_INSN(TLBI_RVALE1IS, handle_tlbi_el1),
4235 	SYS_INSN(TLBI_RVAALE1IS, handle_tlbi_el1),
4236 
4237 	SYS_INSN(TLBI_VMALLE1IS, handle_tlbi_el1),
4238 	SYS_INSN(TLBI_VAE1IS, handle_tlbi_el1),
4239 	SYS_INSN(TLBI_ASIDE1IS, handle_tlbi_el1),
4240 	SYS_INSN(TLBI_VAAE1IS, handle_tlbi_el1),
4241 	SYS_INSN(TLBI_VALE1IS, handle_tlbi_el1),
4242 	SYS_INSN(TLBI_VAALE1IS, handle_tlbi_el1),
4243 
4244 	SYS_INSN(TLBI_RVAE1OS, handle_tlbi_el1),
4245 	SYS_INSN(TLBI_RVAAE1OS, handle_tlbi_el1),
4246 	SYS_INSN(TLBI_RVALE1OS, handle_tlbi_el1),
4247 	SYS_INSN(TLBI_RVAALE1OS, handle_tlbi_el1),
4248 
4249 	SYS_INSN(TLBI_RVAE1, handle_tlbi_el1),
4250 	SYS_INSN(TLBI_RVAAE1, handle_tlbi_el1),
4251 	SYS_INSN(TLBI_RVALE1, handle_tlbi_el1),
4252 	SYS_INSN(TLBI_RVAALE1, handle_tlbi_el1),
4253 
4254 	SYS_INSN(TLBI_VMALLE1, handle_tlbi_el1),
4255 	SYS_INSN(TLBI_VAE1, handle_tlbi_el1),
4256 	SYS_INSN(TLBI_ASIDE1, handle_tlbi_el1),
4257 	SYS_INSN(TLBI_VAAE1, handle_tlbi_el1),
4258 	SYS_INSN(TLBI_VALE1, handle_tlbi_el1),
4259 	SYS_INSN(TLBI_VAALE1, handle_tlbi_el1),
4260 
4261 	SYS_INSN(TLBI_VMALLE1OSNXS, handle_tlbi_el1),
4262 	SYS_INSN(TLBI_VAE1OSNXS, handle_tlbi_el1),
4263 	SYS_INSN(TLBI_ASIDE1OSNXS, handle_tlbi_el1),
4264 	SYS_INSN(TLBI_VAAE1OSNXS, handle_tlbi_el1),
4265 	SYS_INSN(TLBI_VALE1OSNXS, handle_tlbi_el1),
4266 	SYS_INSN(TLBI_VAALE1OSNXS, handle_tlbi_el1),
4267 
4268 	SYS_INSN(TLBI_RVAE1ISNXS, handle_tlbi_el1),
4269 	SYS_INSN(TLBI_RVAAE1ISNXS, handle_tlbi_el1),
4270 	SYS_INSN(TLBI_RVALE1ISNXS, handle_tlbi_el1),
4271 	SYS_INSN(TLBI_RVAALE1ISNXS, handle_tlbi_el1),
4272 
4273 	SYS_INSN(TLBI_VMALLE1ISNXS, handle_tlbi_el1),
4274 	SYS_INSN(TLBI_VAE1ISNXS, handle_tlbi_el1),
4275 	SYS_INSN(TLBI_ASIDE1ISNXS, handle_tlbi_el1),
4276 	SYS_INSN(TLBI_VAAE1ISNXS, handle_tlbi_el1),
4277 	SYS_INSN(TLBI_VALE1ISNXS, handle_tlbi_el1),
4278 	SYS_INSN(TLBI_VAALE1ISNXS, handle_tlbi_el1),
4279 
4280 	SYS_INSN(TLBI_RVAE1OSNXS, handle_tlbi_el1),
4281 	SYS_INSN(TLBI_RVAAE1OSNXS, handle_tlbi_el1),
4282 	SYS_INSN(TLBI_RVALE1OSNXS, handle_tlbi_el1),
4283 	SYS_INSN(TLBI_RVAALE1OSNXS, handle_tlbi_el1),
4284 
4285 	SYS_INSN(TLBI_RVAE1NXS, handle_tlbi_el1),
4286 	SYS_INSN(TLBI_RVAAE1NXS, handle_tlbi_el1),
4287 	SYS_INSN(TLBI_RVALE1NXS, handle_tlbi_el1),
4288 	SYS_INSN(TLBI_RVAALE1NXS, handle_tlbi_el1),
4289 
4290 	SYS_INSN(TLBI_VMALLE1NXS, handle_tlbi_el1),
4291 	SYS_INSN(TLBI_VAE1NXS, handle_tlbi_el1),
4292 	SYS_INSN(TLBI_ASIDE1NXS, handle_tlbi_el1),
4293 	SYS_INSN(TLBI_VAAE1NXS, handle_tlbi_el1),
4294 	SYS_INSN(TLBI_VALE1NXS, handle_tlbi_el1),
4295 	SYS_INSN(TLBI_VAALE1NXS, handle_tlbi_el1),
4296 
4297 	SYS_INSN(AT_S1E2R, handle_at_s1e2),
4298 	SYS_INSN(AT_S1E2W, handle_at_s1e2),
4299 	SYS_INSN(AT_S12E1R, handle_at_s12),
4300 	SYS_INSN(AT_S12E1W, handle_at_s12),
4301 	SYS_INSN(AT_S12E0R, handle_at_s12),
4302 	SYS_INSN(AT_S12E0W, handle_at_s12),
4303 	SYS_INSN(AT_S1E2A, handle_at_s1e2),
4304 
4305 	SYS_INSN(TLBI_IPAS2E1IS, handle_ipas2e1is),
4306 	SYS_INSN(TLBI_RIPAS2E1IS, handle_ripas2e1is),
4307 	SYS_INSN(TLBI_IPAS2LE1IS, handle_ipas2e1is),
4308 	SYS_INSN(TLBI_RIPAS2LE1IS, handle_ripas2e1is),
4309 
4310 	SYS_INSN(TLBI_ALLE2OS, handle_tlbi_el2),
4311 	SYS_INSN(TLBI_VAE2OS, handle_tlbi_el2),
4312 	SYS_INSN(TLBI_ALLE1OS, handle_alle1is),
4313 	SYS_INSN(TLBI_VALE2OS, handle_tlbi_el2),
4314 	SYS_INSN(TLBI_VMALLS12E1OS, handle_vmalls12e1is),
4315 
4316 	SYS_INSN(TLBI_RVAE2IS, handle_tlbi_el2),
4317 	SYS_INSN(TLBI_RVALE2IS, handle_tlbi_el2),
4318 	SYS_INSN(TLBI_ALLE2IS, handle_tlbi_el2),
4319 	SYS_INSN(TLBI_VAE2IS, handle_tlbi_el2),
4320 
4321 	SYS_INSN(TLBI_ALLE1IS, handle_alle1is),
4322 
4323 	SYS_INSN(TLBI_VALE2IS, handle_tlbi_el2),
4324 
4325 	SYS_INSN(TLBI_VMALLS12E1IS, handle_vmalls12e1is),
4326 	SYS_INSN(TLBI_IPAS2E1OS, handle_ipas2e1is),
4327 	SYS_INSN(TLBI_IPAS2E1, handle_ipas2e1is),
4328 	SYS_INSN(TLBI_RIPAS2E1, handle_ripas2e1is),
4329 	SYS_INSN(TLBI_RIPAS2E1OS, handle_ripas2e1is),
4330 	SYS_INSN(TLBI_IPAS2LE1OS, handle_ipas2e1is),
4331 	SYS_INSN(TLBI_IPAS2LE1, handle_ipas2e1is),
4332 	SYS_INSN(TLBI_RIPAS2LE1, handle_ripas2e1is),
4333 	SYS_INSN(TLBI_RIPAS2LE1OS, handle_ripas2e1is),
4334 	SYS_INSN(TLBI_RVAE2OS, handle_tlbi_el2),
4335 	SYS_INSN(TLBI_RVALE2OS, handle_tlbi_el2),
4336 	SYS_INSN(TLBI_RVAE2, handle_tlbi_el2),
4337 	SYS_INSN(TLBI_RVALE2, handle_tlbi_el2),
4338 	SYS_INSN(TLBI_ALLE2, handle_tlbi_el2),
4339 	SYS_INSN(TLBI_VAE2, handle_tlbi_el2),
4340 
4341 	SYS_INSN(TLBI_ALLE1, handle_alle1is),
4342 
4343 	SYS_INSN(TLBI_VALE2, handle_tlbi_el2),
4344 
4345 	SYS_INSN(TLBI_VMALLS12E1, handle_vmalls12e1is),
4346 
4347 	SYS_INSN(TLBI_IPAS2E1ISNXS, handle_ipas2e1is),
4348 	SYS_INSN(TLBI_RIPAS2E1ISNXS, handle_ripas2e1is),
4349 	SYS_INSN(TLBI_IPAS2LE1ISNXS, handle_ipas2e1is),
4350 	SYS_INSN(TLBI_RIPAS2LE1ISNXS, handle_ripas2e1is),
4351 
4352 	SYS_INSN(TLBI_ALLE2OSNXS, handle_tlbi_el2),
4353 	SYS_INSN(TLBI_VAE2OSNXS, handle_tlbi_el2),
4354 	SYS_INSN(TLBI_ALLE1OSNXS, handle_alle1is),
4355 	SYS_INSN(TLBI_VALE2OSNXS, handle_tlbi_el2),
4356 	SYS_INSN(TLBI_VMALLS12E1OSNXS, handle_vmalls12e1is),
4357 
4358 	SYS_INSN(TLBI_RVAE2ISNXS, handle_tlbi_el2),
4359 	SYS_INSN(TLBI_RVALE2ISNXS, handle_tlbi_el2),
4360 	SYS_INSN(TLBI_ALLE2ISNXS, handle_tlbi_el2),
4361 	SYS_INSN(TLBI_VAE2ISNXS, handle_tlbi_el2),
4362 
4363 	SYS_INSN(TLBI_ALLE1ISNXS, handle_alle1is),
4364 	SYS_INSN(TLBI_VALE2ISNXS, handle_tlbi_el2),
4365 	SYS_INSN(TLBI_VMALLS12E1ISNXS, handle_vmalls12e1is),
4366 	SYS_INSN(TLBI_IPAS2E1OSNXS, handle_ipas2e1is),
4367 	SYS_INSN(TLBI_IPAS2E1NXS, handle_ipas2e1is),
4368 	SYS_INSN(TLBI_RIPAS2E1NXS, handle_ripas2e1is),
4369 	SYS_INSN(TLBI_RIPAS2E1OSNXS, handle_ripas2e1is),
4370 	SYS_INSN(TLBI_IPAS2LE1OSNXS, handle_ipas2e1is),
4371 	SYS_INSN(TLBI_IPAS2LE1NXS, handle_ipas2e1is),
4372 	SYS_INSN(TLBI_RIPAS2LE1NXS, handle_ripas2e1is),
4373 	SYS_INSN(TLBI_RIPAS2LE1OSNXS, handle_ripas2e1is),
4374 	SYS_INSN(TLBI_RVAE2OSNXS, handle_tlbi_el2),
4375 	SYS_INSN(TLBI_RVALE2OSNXS, handle_tlbi_el2),
4376 	SYS_INSN(TLBI_RVAE2NXS, handle_tlbi_el2),
4377 	SYS_INSN(TLBI_RVALE2NXS, handle_tlbi_el2),
4378 	SYS_INSN(TLBI_ALLE2NXS, handle_tlbi_el2),
4379 	SYS_INSN(TLBI_VAE2NXS, handle_tlbi_el2),
4380 	SYS_INSN(TLBI_ALLE1NXS, handle_alle1is),
4381 	SYS_INSN(TLBI_VALE2NXS, handle_tlbi_el2),
4382 	SYS_INSN(TLBI_VMALLS12E1NXS, handle_vmalls12e1is),
4383 };
4384 
4385 static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
4386 			struct sys_reg_params *p,
4387 			const struct sys_reg_desc *r)
4388 {
4389 	if (p->is_write) {
4390 		return ignore_write(vcpu, p);
4391 	} else {
4392 		u64 dfr = kvm_read_vm_id_reg(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
4393 		u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP);
4394 
4395 		p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
4396 			     (SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
4397 			     (SYS_FIELD_GET(ID_AA64DFR0_EL1, CTX_CMPs, dfr) << 20) |
4398 			     (SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, dfr) << 16) |
4399 			     (1 << 15) | (el3 << 14) | (el3 << 12));
4400 		return true;
4401 	}
4402 }
4403 
4404 /*
4405  * AArch32 debug register mappings
4406  *
4407  * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
4408  * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
4409  *
4410  * None of the other registers share their location, so treat them as
4411  * if they were 64bit.
4412  */
4413 #define DBG_BCR_BVR_WCR_WVR(n)							\
4414 	/* DBGBVRn */								\
4415 	{ AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4),			\
4416 	  trap_dbg_wb_reg, NULL, n },						\
4417 	/* DBGBCRn */								\
4418 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_dbg_wb_reg, NULL, n },	\
4419 	/* DBGWVRn */								\
4420 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_dbg_wb_reg, NULL, n },	\
4421 	/* DBGWCRn */								\
4422 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_dbg_wb_reg, NULL, n }
4423 
4424 #define DBGBXVR(n)								\
4425 	{ AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1),			\
4426 	  trap_dbg_wb_reg, NULL, n }
4427 
4428 /*
4429  * Trapped cp14 registers. We generally ignore most of the external
4430  * debug, on the principle that they don't really make sense to a
4431  * guest. Revisit this one day, would this principle change.
4432  */
4433 static const struct sys_reg_desc cp14_regs[] = {
4434 	/* DBGDIDR */
4435 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
4436 	/* DBGDTRRXext */
4437 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
4438 
4439 	DBG_BCR_BVR_WCR_WVR(0),
4440 	/* DBGDSCRint */
4441 	{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
4442 	DBG_BCR_BVR_WCR_WVR(1),
4443 	/* DBGDCCINT */
4444 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
4445 	/* DBGDSCRext */
4446 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
4447 	DBG_BCR_BVR_WCR_WVR(2),
4448 	/* DBGDTR[RT]Xint */
4449 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
4450 	/* DBGDTR[RT]Xext */
4451 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
4452 	DBG_BCR_BVR_WCR_WVR(3),
4453 	DBG_BCR_BVR_WCR_WVR(4),
4454 	DBG_BCR_BVR_WCR_WVR(5),
4455 	/* DBGWFAR */
4456 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
4457 	/* DBGOSECCR */
4458 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
4459 	DBG_BCR_BVR_WCR_WVR(6),
4460 	/* DBGVCR */
4461 	{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
4462 	DBG_BCR_BVR_WCR_WVR(7),
4463 	DBG_BCR_BVR_WCR_WVR(8),
4464 	DBG_BCR_BVR_WCR_WVR(9),
4465 	DBG_BCR_BVR_WCR_WVR(10),
4466 	DBG_BCR_BVR_WCR_WVR(11),
4467 	DBG_BCR_BVR_WCR_WVR(12),
4468 	DBG_BCR_BVR_WCR_WVR(13),
4469 	DBG_BCR_BVR_WCR_WVR(14),
4470 	DBG_BCR_BVR_WCR_WVR(15),
4471 
4472 	/* DBGDRAR (32bit) */
4473 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
4474 
4475 	DBGBXVR(0),
4476 	/* DBGOSLAR */
4477 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
4478 	DBGBXVR(1),
4479 	/* DBGOSLSR */
4480 	{ Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
4481 	DBGBXVR(2),
4482 	DBGBXVR(3),
4483 	/* DBGOSDLR */
4484 	{ Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
4485 	DBGBXVR(4),
4486 	/* DBGPRCR */
4487 	{ Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
4488 	DBGBXVR(5),
4489 	DBGBXVR(6),
4490 	DBGBXVR(7),
4491 	DBGBXVR(8),
4492 	DBGBXVR(9),
4493 	DBGBXVR(10),
4494 	DBGBXVR(11),
4495 	DBGBXVR(12),
4496 	DBGBXVR(13),
4497 	DBGBXVR(14),
4498 	DBGBXVR(15),
4499 
4500 	/* DBGDSAR (32bit) */
4501 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
4502 
4503 	/* DBGDEVID2 */
4504 	{ Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
4505 	/* DBGDEVID1 */
4506 	{ Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
4507 	/* DBGDEVID */
4508 	{ Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
4509 	/* DBGCLAIMSET */
4510 	{ Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
4511 	/* DBGCLAIMCLR */
4512 	{ Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
4513 	/* DBGAUTHSTATUS */
4514 	{ Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
4515 };
4516 
4517 /* Trapped cp14 64bit registers */
4518 static const struct sys_reg_desc cp14_64_regs[] = {
4519 	/* DBGDRAR (64bit) */
4520 	{ Op1( 0), CRm( 1), .access = trap_raz_wi },
4521 
4522 	/* DBGDSAR (64bit) */
4523 	{ Op1( 0), CRm( 2), .access = trap_raz_wi },
4524 };
4525 
4526 #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2)			\
4527 	AA32(_map),							\
4528 	Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2),			\
4529 	.visibility = pmu_visibility
4530 
4531 /* Macro to expand the PMEVCNTRn register */
4532 #define PMU_PMEVCNTR(n)							\
4533 	{ CP15_PMU_SYS_REG(DIRECT, 0, 0b1110,				\
4534 	  (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)),			\
4535 	  .access = access_pmu_evcntr }
4536 
4537 /* Macro to expand the PMEVTYPERn register */
4538 #define PMU_PMEVTYPER(n)						\
4539 	{ CP15_PMU_SYS_REG(DIRECT, 0, 0b1110,				\
4540 	  (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)),			\
4541 	  .access = access_pmu_evtyper }
4542 /*
4543  * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
4544  * depending on the way they are accessed (as a 32bit or a 64bit
4545  * register).
4546  */
4547 static const struct sys_reg_desc cp15_regs[] = {
4548 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
4549 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
4550 	/* ACTLR */
4551 	{ AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
4552 	/* ACTLR2 */
4553 	{ AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
4554 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
4555 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
4556 	/* TTBCR */
4557 	{ AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
4558 	/* TTBCR2 */
4559 	{ AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
4560 	{ Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
4561 	{ CP15_SYS_DESC(SYS_ICC_PMR_EL1), undef_access },
4562 	/* DFSR */
4563 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
4564 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
4565 	/* ADFSR */
4566 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
4567 	/* AIFSR */
4568 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
4569 	/* DFAR */
4570 	{ AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
4571 	/* IFAR */
4572 	{ AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
4573 
4574 	/*
4575 	 * DC{C,I,CI}SW operations:
4576 	 */
4577 	{ Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
4578 	{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
4579 	{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
4580 
4581 	/* PMU */
4582 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
4583 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
4584 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
4585 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
4586 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
4587 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
4588 	{ CP15_PMU_SYS_REG(LO,     0, 9, 12, 6), .access = access_pmceid },
4589 	{ CP15_PMU_SYS_REG(LO,     0, 9, 12, 7), .access = access_pmceid },
4590 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
4591 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
4592 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
4593 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
4594 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
4595 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
4596 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
4597 	{ CP15_PMU_SYS_REG(HI,     0, 9, 14, 4), .access = access_pmceid },
4598 	{ CP15_PMU_SYS_REG(HI,     0, 9, 14, 5), .access = access_pmceid },
4599 	/* PMMIR */
4600 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
4601 
4602 	/* PRRR/MAIR0 */
4603 	{ AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
4604 	/* NMRR/MAIR1 */
4605 	{ AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
4606 	/* AMAIR0 */
4607 	{ AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
4608 	/* AMAIR1 */
4609 	{ AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
4610 
4611 	{ CP15_SYS_DESC(SYS_ICC_IAR0_EL1), undef_access },
4612 	{ CP15_SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access },
4613 	{ CP15_SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access },
4614 	{ CP15_SYS_DESC(SYS_ICC_BPR0_EL1), undef_access },
4615 	{ CP15_SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access },
4616 	{ CP15_SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access },
4617 	{ CP15_SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access },
4618 	{ CP15_SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access },
4619 	{ CP15_SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access },
4620 	{ CP15_SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access },
4621 	{ CP15_SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access },
4622 	{ CP15_SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
4623 	{ CP15_SYS_DESC(SYS_ICC_DIR_EL1), access_gic_dir },
4624 	{ CP15_SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
4625 	{ CP15_SYS_DESC(SYS_ICC_IAR1_EL1), undef_access },
4626 	{ CP15_SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access },
4627 	{ CP15_SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access },
4628 	{ CP15_SYS_DESC(SYS_ICC_BPR1_EL1), undef_access },
4629 	{ CP15_SYS_DESC(SYS_ICC_CTLR_EL1), undef_access },
4630 	{ CP15_SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
4631 	{ CP15_SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access },
4632 	{ CP15_SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access },
4633 
4634 	{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
4635 
4636 	/* Arch Tmers */
4637 	{ SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
4638 	{ SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
4639 
4640 	/* PMEVCNTRn */
4641 	PMU_PMEVCNTR(0),
4642 	PMU_PMEVCNTR(1),
4643 	PMU_PMEVCNTR(2),
4644 	PMU_PMEVCNTR(3),
4645 	PMU_PMEVCNTR(4),
4646 	PMU_PMEVCNTR(5),
4647 	PMU_PMEVCNTR(6),
4648 	PMU_PMEVCNTR(7),
4649 	PMU_PMEVCNTR(8),
4650 	PMU_PMEVCNTR(9),
4651 	PMU_PMEVCNTR(10),
4652 	PMU_PMEVCNTR(11),
4653 	PMU_PMEVCNTR(12),
4654 	PMU_PMEVCNTR(13),
4655 	PMU_PMEVCNTR(14),
4656 	PMU_PMEVCNTR(15),
4657 	PMU_PMEVCNTR(16),
4658 	PMU_PMEVCNTR(17),
4659 	PMU_PMEVCNTR(18),
4660 	PMU_PMEVCNTR(19),
4661 	PMU_PMEVCNTR(20),
4662 	PMU_PMEVCNTR(21),
4663 	PMU_PMEVCNTR(22),
4664 	PMU_PMEVCNTR(23),
4665 	PMU_PMEVCNTR(24),
4666 	PMU_PMEVCNTR(25),
4667 	PMU_PMEVCNTR(26),
4668 	PMU_PMEVCNTR(27),
4669 	PMU_PMEVCNTR(28),
4670 	PMU_PMEVCNTR(29),
4671 	PMU_PMEVCNTR(30),
4672 	/* PMEVTYPERn */
4673 	PMU_PMEVTYPER(0),
4674 	PMU_PMEVTYPER(1),
4675 	PMU_PMEVTYPER(2),
4676 	PMU_PMEVTYPER(3),
4677 	PMU_PMEVTYPER(4),
4678 	PMU_PMEVTYPER(5),
4679 	PMU_PMEVTYPER(6),
4680 	PMU_PMEVTYPER(7),
4681 	PMU_PMEVTYPER(8),
4682 	PMU_PMEVTYPER(9),
4683 	PMU_PMEVTYPER(10),
4684 	PMU_PMEVTYPER(11),
4685 	PMU_PMEVTYPER(12),
4686 	PMU_PMEVTYPER(13),
4687 	PMU_PMEVTYPER(14),
4688 	PMU_PMEVTYPER(15),
4689 	PMU_PMEVTYPER(16),
4690 	PMU_PMEVTYPER(17),
4691 	PMU_PMEVTYPER(18),
4692 	PMU_PMEVTYPER(19),
4693 	PMU_PMEVTYPER(20),
4694 	PMU_PMEVTYPER(21),
4695 	PMU_PMEVTYPER(22),
4696 	PMU_PMEVTYPER(23),
4697 	PMU_PMEVTYPER(24),
4698 	PMU_PMEVTYPER(25),
4699 	PMU_PMEVTYPER(26),
4700 	PMU_PMEVTYPER(27),
4701 	PMU_PMEVTYPER(28),
4702 	PMU_PMEVTYPER(29),
4703 	PMU_PMEVTYPER(30),
4704 	/* PMCCFILTR */
4705 	{ CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
4706 
4707 	{ Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
4708 	{ Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
4709 
4710 	/* CCSIDR2 */
4711 	{ Op1(1), CRn( 0), CRm( 0),  Op2(2), undef_access },
4712 
4713 	{ Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
4714 };
4715 
4716 static const struct sys_reg_desc cp15_64_regs[] = {
4717 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
4718 	{ CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
4719 	{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
4720 	{ SYS_DESC(SYS_AARCH32_CNTPCT),	      access_arch_timer },
4721 	{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
4722 	{ Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
4723 	{ SYS_DESC(SYS_AARCH32_CNTVCT),	      access_arch_timer },
4724 	{ Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
4725 	{ SYS_DESC(SYS_AARCH32_CNTP_CVAL),    access_arch_timer },
4726 	{ SYS_DESC(SYS_AARCH32_CNTPCTSS),     access_arch_timer },
4727 	{ SYS_DESC(SYS_AARCH32_CNTVCTSS),     access_arch_timer },
4728 };
4729 
4730 static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
4731 			       bool reset_check)
4732 {
4733 	unsigned int i;
4734 
4735 	for (i = 0; i < n; i++) {
4736 		if (reset_check && table[i].reg && !table[i].reset) {
4737 			kvm_err("sys_reg table %pS entry %d (%s) lacks reset\n",
4738 				&table[i], i, table[i].name);
4739 			return false;
4740 		}
4741 
4742 		if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
4743 			kvm_err("sys_reg table %pS entry %d (%s -> %s) out of order\n",
4744 				&table[i], i, table[i - 1].name, table[i].name);
4745 			return false;
4746 		}
4747 	}
4748 
4749 	return true;
4750 }
4751 
4752 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
4753 {
4754 	kvm_inject_undefined(vcpu);
4755 	return 1;
4756 }
4757 
4758 static void perform_access(struct kvm_vcpu *vcpu,
4759 			   struct sys_reg_params *params,
4760 			   const struct sys_reg_desc *r)
4761 {
4762 	trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
4763 
4764 	/* Check for regs disabled by runtime config */
4765 	if (sysreg_hidden(vcpu, r)) {
4766 		kvm_inject_undefined(vcpu);
4767 		return;
4768 	}
4769 
4770 	/*
4771 	 * Not having an accessor means that we have configured a trap
4772 	 * that we don't know how to handle. This certainly qualifies
4773 	 * as a gross bug that should be fixed right away.
4774 	 */
4775 	if (!r->access) {
4776 		bad_trap(vcpu, params, r, "register access");
4777 		return;
4778 	}
4779 
4780 	/* Skip instruction if instructed so */
4781 	if (likely(r->access(vcpu, params, r)))
4782 		kvm_incr_pc(vcpu);
4783 }
4784 
4785 /*
4786  * emulate_cp --  tries to match a sys_reg access in a handling table, and
4787  *                call the corresponding trap handler.
4788  *
4789  * @params: pointer to the descriptor of the access
4790  * @table: array of trap descriptors
4791  * @num: size of the trap descriptor array
4792  *
4793  * Return true if the access has been handled, false if not.
4794  */
4795 static bool emulate_cp(struct kvm_vcpu *vcpu,
4796 		       struct sys_reg_params *params,
4797 		       const struct sys_reg_desc *table,
4798 		       size_t num)
4799 {
4800 	const struct sys_reg_desc *r;
4801 
4802 	if (!table)
4803 		return false;	/* Not handled */
4804 
4805 	r = find_reg(params, table, num);
4806 
4807 	if (r) {
4808 		perform_access(vcpu, params, r);
4809 		return true;
4810 	}
4811 
4812 	/* Not handled */
4813 	return false;
4814 }
4815 
4816 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
4817 				struct sys_reg_params *params)
4818 {
4819 	u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
4820 	int cp = -1;
4821 
4822 	switch (esr_ec) {
4823 	case ESR_ELx_EC_CP15_32:
4824 	case ESR_ELx_EC_CP15_64:
4825 		cp = 15;
4826 		break;
4827 	case ESR_ELx_EC_CP14_MR:
4828 	case ESR_ELx_EC_CP14_64:
4829 		cp = 14;
4830 		break;
4831 	default:
4832 		WARN_ON(1);
4833 	}
4834 
4835 	print_sys_reg_msg(params,
4836 			  "Unsupported guest CP%d access at: %08lx [%08lx]\n",
4837 			  cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
4838 	kvm_inject_undefined(vcpu);
4839 }
4840 
4841 /**
4842  * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
4843  * @vcpu: The VCPU pointer
4844  * @global: &struct sys_reg_desc
4845  * @nr_global: size of the @global array
4846  */
4847 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
4848 			    const struct sys_reg_desc *global,
4849 			    size_t nr_global)
4850 {
4851 	struct sys_reg_params params;
4852 	u64 esr = kvm_vcpu_get_esr(vcpu);
4853 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
4854 	int Rt2 = (esr >> 10) & 0x1f;
4855 
4856 	params.CRm = (esr >> 1) & 0xf;
4857 	params.is_write = ((esr & 1) == 0);
4858 
4859 	params.Op0 = 0;
4860 	params.Op1 = (esr >> 16) & 0xf;
4861 	params.Op2 = 0;
4862 	params.CRn = 0;
4863 
4864 	/*
4865 	 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
4866 	 * backends between AArch32 and AArch64, we get away with it.
4867 	 */
4868 	if (params.is_write) {
4869 		params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
4870 		params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
4871 	}
4872 
4873 	/*
4874 	 * If the table contains a handler, handle the
4875 	 * potential register operation in the case of a read and return
4876 	 * with success.
4877 	 */
4878 	if (emulate_cp(vcpu, &params, global, nr_global)) {
4879 		/* Split up the value between registers for the read side */
4880 		if (!params.is_write) {
4881 			vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
4882 			vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
4883 		}
4884 
4885 		return 1;
4886 	}
4887 
4888 	unhandled_cp_access(vcpu, &params);
4889 	return 1;
4890 }
4891 
4892 static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
4893 
4894 /*
4895  * The CP10 ID registers are architecturally mapped to AArch64 feature
4896  * registers. Abuse that fact so we can rely on the AArch64 handler for accesses
4897  * from AArch32.
4898  */
4899 static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
4900 {
4901 	u8 reg_id = (esr >> 10) & 0xf;
4902 	bool valid;
4903 
4904 	params->is_write = ((esr & 1) == 0);
4905 	params->Op0 = 3;
4906 	params->Op1 = 0;
4907 	params->CRn = 0;
4908 	params->CRm = 3;
4909 
4910 	/* CP10 ID registers are read-only */
4911 	valid = !params->is_write;
4912 
4913 	switch (reg_id) {
4914 	/* MVFR0 */
4915 	case 0b0111:
4916 		params->Op2 = 0;
4917 		break;
4918 	/* MVFR1 */
4919 	case 0b0110:
4920 		params->Op2 = 1;
4921 		break;
4922 	/* MVFR2 */
4923 	case 0b0101:
4924 		params->Op2 = 2;
4925 		break;
4926 	default:
4927 		valid = false;
4928 	}
4929 
4930 	if (valid)
4931 		return true;
4932 
4933 	kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
4934 		      str_write_read(params->is_write), reg_id);
4935 	return false;
4936 }
4937 
4938 /**
4939  * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
4940  *			  VFP Register' from AArch32.
4941  * @vcpu: The vCPU pointer
4942  *
4943  * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
4944  * Work out the correct AArch64 system register encoding and reroute to the
4945  * AArch64 system register emulation.
4946  */
4947 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
4948 {
4949 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
4950 	u64 esr = kvm_vcpu_get_esr(vcpu);
4951 	struct sys_reg_params params;
4952 
4953 	/* UNDEF on any unhandled register access */
4954 	if (!kvm_esr_cp10_id_to_sys64(esr, &params)) {
4955 		kvm_inject_undefined(vcpu);
4956 		return 1;
4957 	}
4958 
4959 	if (emulate_sys_reg(vcpu, &params))
4960 		vcpu_set_reg(vcpu, Rt, params.regval);
4961 
4962 	return 1;
4963 }
4964 
4965 /**
4966  * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
4967  *			       CRn=0, which corresponds to the AArch32 feature
4968  *			       registers.
4969  * @vcpu: the vCPU pointer
4970  * @params: the system register access parameters.
4971  *
4972  * Our cp15 system register tables do not enumerate the AArch32 feature
4973  * registers. Conveniently, our AArch64 table does, and the AArch32 system
4974  * register encoding can be trivially remapped into the AArch64 for the feature
4975  * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
4976  *
4977  * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
4978  * System registers with (coproc=0b1111, CRn==c0)", read accesses from this
4979  * range are either UNKNOWN or RES0. Rerouting remains architectural as we
4980  * treat undefined registers in this range as RAZ.
4981  */
4982 static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
4983 				   struct sys_reg_params *params)
4984 {
4985 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
4986 
4987 	/* Treat impossible writes to RO registers as UNDEFINED */
4988 	if (params->is_write) {
4989 		unhandled_cp_access(vcpu, params);
4990 		return 1;
4991 	}
4992 
4993 	params->Op0 = 3;
4994 
4995 	/*
4996 	 * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
4997 	 * Avoid conflicting with future expansion of AArch64 feature registers
4998 	 * and simply treat them as RAZ here.
4999 	 */
5000 	if (params->CRm > 3)
5001 		params->regval = 0;
5002 	else if (!emulate_sys_reg(vcpu, params))
5003 		return 1;
5004 
5005 	vcpu_set_reg(vcpu, Rt, params->regval);
5006 	return 1;
5007 }
5008 
5009 /**
5010  * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
5011  * @vcpu: The VCPU pointer
5012  * @params: &struct sys_reg_params
5013  * @global: &struct sys_reg_desc
5014  * @nr_global: size of the @global array
5015  */
5016 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
5017 			    struct sys_reg_params *params,
5018 			    const struct sys_reg_desc *global,
5019 			    size_t nr_global)
5020 {
5021 	int Rt  = kvm_vcpu_sys_get_rt(vcpu);
5022 
5023 	params->regval = vcpu_get_reg(vcpu, Rt);
5024 
5025 	if (emulate_cp(vcpu, params, global, nr_global)) {
5026 		if (!params->is_write)
5027 			vcpu_set_reg(vcpu, Rt, params->regval);
5028 		return 1;
5029 	}
5030 
5031 	unhandled_cp_access(vcpu, params);
5032 	return 1;
5033 }
5034 
5035 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
5036 {
5037 	return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
5038 }
5039 
5040 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
5041 {
5042 	struct sys_reg_params params;
5043 
5044 	params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
5045 
5046 	/*
5047 	 * Certain AArch32 ID registers are handled by rerouting to the AArch64
5048 	 * system register table. Registers in the ID range where CRm=0 are
5049 	 * excluded from this scheme as they do not trivially map into AArch64
5050 	 * system register encodings, except for AIDR/REVIDR.
5051 	 */
5052 	if (params.Op1 == 0 && params.CRn == 0 &&
5053 	    (params.CRm || params.Op2 == 6 /* REVIDR */))
5054 		return kvm_emulate_cp15_id_reg(vcpu, &params);
5055 	if (params.Op1 == 1 && params.CRn == 0 &&
5056 	    params.CRm == 0 && params.Op2 == 7 /* AIDR */)
5057 		return kvm_emulate_cp15_id_reg(vcpu, &params);
5058 
5059 	return kvm_handle_cp_32(vcpu, &params, cp15_regs, ARRAY_SIZE(cp15_regs));
5060 }
5061 
5062 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
5063 {
5064 	return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
5065 }
5066 
5067 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
5068 {
5069 	struct sys_reg_params params;
5070 
5071 	params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
5072 
5073 	return kvm_handle_cp_32(vcpu, &params, cp14_regs, ARRAY_SIZE(cp14_regs));
5074 }
5075 
5076 /**
5077  * emulate_sys_reg - Emulate a guest access to an AArch64 system register
5078  * @vcpu: The VCPU pointer
5079  * @params: Decoded system register parameters
5080  *
5081  * Return: true if the system register access was successful, false otherwise.
5082  */
5083 static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
5084 			    struct sys_reg_params *params)
5085 {
5086 	const struct sys_reg_desc *r;
5087 
5088 	r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
5089 	if (likely(r)) {
5090 		perform_access(vcpu, params, r);
5091 		return true;
5092 	}
5093 
5094 	print_sys_reg_msg(params,
5095 			  "Unsupported guest sys_reg access at: %lx [%08lx]\n",
5096 			  *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
5097 	kvm_inject_undefined(vcpu);
5098 
5099 	return false;
5100 }
5101 
5102 static const struct sys_reg_desc *idregs_debug_find(struct kvm *kvm, loff_t pos)
5103 {
5104 	unsigned long i, idreg_idx = 0;
5105 
5106 	for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
5107 		const struct sys_reg_desc *r = &sys_reg_descs[i];
5108 
5109 		if (!is_vm_ftr_id_reg(reg_to_encoding(r)))
5110 			continue;
5111 
5112 		if (idreg_idx++ == pos)
5113 			return r;
5114 	}
5115 
5116 	return NULL;
5117 }
5118 
5119 static void *idregs_debug_start(struct seq_file *s, loff_t *pos)
5120 {
5121 	struct kvm *kvm = s->private;
5122 
5123 	if (!test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
5124 		return NULL;
5125 
5126 	return (void *)idregs_debug_find(kvm, *pos);
5127 }
5128 
5129 static void *idregs_debug_next(struct seq_file *s, void *v, loff_t *pos)
5130 {
5131 	struct kvm *kvm = s->private;
5132 
5133 	(*pos)++;
5134 
5135 	return (void *)idregs_debug_find(kvm, *pos);
5136 }
5137 
5138 static void idregs_debug_stop(struct seq_file *s, void *v)
5139 {
5140 }
5141 
5142 static int idregs_debug_show(struct seq_file *s, void *v)
5143 {
5144 	const struct sys_reg_desc *desc = v;
5145 	struct kvm *kvm = s->private;
5146 
5147 	if (!desc)
5148 		return 0;
5149 
5150 	seq_printf(s, "%20s:\t%016llx\n",
5151 		   desc->name, kvm_read_vm_id_reg(kvm, reg_to_encoding(desc)));
5152 
5153 	return 0;
5154 }
5155 
5156 static const struct seq_operations idregs_debug_sops = {
5157 	.start	= idregs_debug_start,
5158 	.next	= idregs_debug_next,
5159 	.stop	= idregs_debug_stop,
5160 	.show	= idregs_debug_show,
5161 };
5162 
5163 DEFINE_SEQ_ATTRIBUTE(idregs_debug);
5164 
5165 static const struct sys_reg_desc *sr_resx_find(struct kvm *kvm, loff_t pos)
5166 {
5167 	unsigned long i, sr_idx = 0;
5168 
5169 	for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
5170 		const struct sys_reg_desc *r = &sys_reg_descs[i];
5171 
5172 		if (r->reg < __SANITISED_REG_START__)
5173 			continue;
5174 
5175 		if (sr_idx++ == pos)
5176 			return r;
5177 	}
5178 
5179 	return NULL;
5180 }
5181 
5182 static void *sr_resx_start(struct seq_file *s, loff_t *pos)
5183 {
5184 	struct kvm *kvm = s->private;
5185 
5186 	if (!kvm->arch.sysreg_masks)
5187 		return NULL;
5188 
5189 	return (void *)sr_resx_find(kvm, *pos);
5190 }
5191 
5192 static void *sr_resx_next(struct seq_file *s, void *v, loff_t *pos)
5193 {
5194 	struct kvm *kvm = s->private;
5195 
5196 	(*pos)++;
5197 
5198 	return (void *)sr_resx_find(kvm, *pos);
5199 }
5200 
5201 static void sr_resx_stop(struct seq_file *s, void *v)
5202 {
5203 }
5204 
5205 static int sr_resx_show(struct seq_file *s, void *v)
5206 {
5207 	const struct sys_reg_desc *desc = v;
5208 	struct kvm *kvm = s->private;
5209 	struct resx resx;
5210 
5211 	if (!desc)
5212 		return 0;
5213 
5214 	resx = kvm_get_sysreg_resx(kvm, desc->reg);
5215 
5216 	seq_printf(s, "%20s:\tRES0:%016llx\tRES1:%016llx\n",
5217 		   desc->name, resx.res0, resx.res1);
5218 
5219 	return 0;
5220 }
5221 
5222 static const struct seq_operations sr_resx_sops = {
5223 	.start	= sr_resx_start,
5224 	.next	= sr_resx_next,
5225 	.stop	= sr_resx_stop,
5226 	.show	= sr_resx_show,
5227 };
5228 
5229 DEFINE_SEQ_ATTRIBUTE(sr_resx);
5230 
5231 void kvm_sys_regs_create_debugfs(struct kvm *kvm)
5232 {
5233 	debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm,
5234 			    &idregs_debug_fops);
5235 	debugfs_create_file("resx", 0444, kvm->debugfs_dentry, kvm,
5236 			    &sr_resx_fops);
5237 }
5238 
5239 static void reset_vm_ftr_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *reg)
5240 {
5241 	u32 id = reg_to_encoding(reg);
5242 	struct kvm *kvm = vcpu->kvm;
5243 
5244 	if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
5245 		return;
5246 
5247 	kvm_set_vm_id_reg(kvm, id, reg->reset(vcpu, reg));
5248 }
5249 
5250 static void reset_vcpu_ftr_id_reg(struct kvm_vcpu *vcpu,
5251 				  const struct sys_reg_desc *reg)
5252 {
5253 	if (kvm_vcpu_initialized(vcpu))
5254 		return;
5255 
5256 	reg->reset(vcpu, reg);
5257 }
5258 
5259 /**
5260  * kvm_reset_sys_regs - sets system registers to reset value
5261  * @vcpu: The VCPU pointer
5262  *
5263  * This function finds the right table above and sets the registers on the
5264  * virtual CPU struct to their architecturally defined reset values.
5265  */
5266 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
5267 {
5268 	struct kvm *kvm = vcpu->kvm;
5269 	unsigned long i;
5270 
5271 	for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
5272 		const struct sys_reg_desc *r = &sys_reg_descs[i];
5273 
5274 		if (!r->reset)
5275 			continue;
5276 
5277 		if (is_vm_ftr_id_reg(reg_to_encoding(r)))
5278 			reset_vm_ftr_id_reg(vcpu, r);
5279 		else if (is_vcpu_ftr_id_reg(reg_to_encoding(r)))
5280 			reset_vcpu_ftr_id_reg(vcpu, r);
5281 		else
5282 			r->reset(vcpu, r);
5283 
5284 		if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS)
5285 			__vcpu_rmw_sys_reg(vcpu, r->reg, |=, 0);
5286 	}
5287 
5288 	set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
5289 
5290 	if (kvm_vcpu_has_pmu(vcpu))
5291 		kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
5292 }
5293 
5294 /**
5295  * kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction
5296  *			 trap on a guest execution
5297  * @vcpu: The VCPU pointer
5298  */
5299 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
5300 {
5301 	const struct sys_reg_desc *desc = NULL;
5302 	struct sys_reg_params params;
5303 	unsigned long esr = kvm_vcpu_get_esr(vcpu);
5304 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
5305 	int sr_idx;
5306 
5307 	trace_kvm_handle_sys_reg(esr);
5308 
5309 	if (triage_sysreg_trap(vcpu, &sr_idx))
5310 		return 1;
5311 
5312 	params = esr_sys64_to_params(esr);
5313 	params.regval = vcpu_get_reg(vcpu, Rt);
5314 
5315 	/* System registers have Op0=={2,3}, as per DDI487 J.a C5.1.2 */
5316 	if (params.Op0 == 2 || params.Op0 == 3)
5317 		desc = &sys_reg_descs[sr_idx];
5318 	else
5319 		desc = &sys_insn_descs[sr_idx];
5320 
5321 	perform_access(vcpu, &params, desc);
5322 
5323 	/* Read from system register? */
5324 	if (!params.is_write &&
5325 	    (params.Op0 == 2 || params.Op0 == 3))
5326 		vcpu_set_reg(vcpu, Rt, params.regval);
5327 
5328 	return 1;
5329 }
5330 
5331 /******************************************************************************
5332  * Userspace API
5333  *****************************************************************************/
5334 
5335 static bool index_to_params(u64 id, struct sys_reg_params *params)
5336 {
5337 	switch (id & KVM_REG_SIZE_MASK) {
5338 	case KVM_REG_SIZE_U64:
5339 		/* Any unused index bits means it's not valid. */
5340 		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
5341 			      | KVM_REG_ARM_COPROC_MASK
5342 			      | KVM_REG_ARM64_SYSREG_OP0_MASK
5343 			      | KVM_REG_ARM64_SYSREG_OP1_MASK
5344 			      | KVM_REG_ARM64_SYSREG_CRN_MASK
5345 			      | KVM_REG_ARM64_SYSREG_CRM_MASK
5346 			      | KVM_REG_ARM64_SYSREG_OP2_MASK))
5347 			return false;
5348 		params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
5349 			       >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
5350 		params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
5351 			       >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
5352 		params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
5353 			       >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
5354 		params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
5355 			       >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
5356 		params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
5357 			       >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
5358 		return true;
5359 	default:
5360 		return false;
5361 	}
5362 }
5363 
5364 const struct sys_reg_desc *get_reg_by_id(u64 id,
5365 					 const struct sys_reg_desc table[],
5366 					 unsigned int num)
5367 {
5368 	struct sys_reg_params params;
5369 
5370 	if (!index_to_params(id, &params))
5371 		return NULL;
5372 
5373 	return find_reg(&params, table, num);
5374 }
5375 
5376 /* Decode an index value, and find the sys_reg_desc entry. */
5377 static const struct sys_reg_desc *
5378 id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
5379 		   const struct sys_reg_desc table[], unsigned int num)
5380 
5381 {
5382 	const struct sys_reg_desc *r;
5383 
5384 	/* We only do sys_reg for now. */
5385 	if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
5386 		return NULL;
5387 
5388 	r = get_reg_by_id(id, table, num);
5389 
5390 	/* Not saved in the sys_reg array and not otherwise accessible? */
5391 	if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
5392 		r = NULL;
5393 
5394 	return r;
5395 }
5396 
5397 static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
5398 {
5399 	u32 val;
5400 	u32 __user *uval = uaddr;
5401 
5402 	/* Fail if we have unknown bits set. */
5403 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
5404 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
5405 		return -ENOENT;
5406 
5407 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
5408 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
5409 		if (KVM_REG_SIZE(id) != 4)
5410 			return -ENOENT;
5411 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
5412 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
5413 		if (val >= CSSELR_MAX)
5414 			return -ENOENT;
5415 
5416 		return put_user(get_ccsidr(vcpu, val), uval);
5417 	default:
5418 		return -ENOENT;
5419 	}
5420 }
5421 
5422 static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
5423 {
5424 	u32 val, newval;
5425 	u32 __user *uval = uaddr;
5426 
5427 	/* Fail if we have unknown bits set. */
5428 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
5429 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
5430 		return -ENOENT;
5431 
5432 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
5433 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
5434 		if (KVM_REG_SIZE(id) != 4)
5435 			return -ENOENT;
5436 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
5437 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
5438 		if (val >= CSSELR_MAX)
5439 			return -ENOENT;
5440 
5441 		if (get_user(newval, uval))
5442 			return -EFAULT;
5443 
5444 		return set_ccsidr(vcpu, val, newval);
5445 	default:
5446 		return -ENOENT;
5447 	}
5448 }
5449 
5450 static u64 kvm_one_reg_to_id(const struct kvm_one_reg *reg)
5451 {
5452 	switch(reg->id) {
5453 	case KVM_REG_ARM_TIMER_CVAL:
5454 		return TO_ARM64_SYS_REG(CNTV_CVAL_EL0);
5455 	case KVM_REG_ARM_TIMER_CNT:
5456 		return TO_ARM64_SYS_REG(CNTVCT_EL0);
5457 	default:
5458 		return reg->id;
5459 	}
5460 }
5461 
5462 int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
5463 			 const struct sys_reg_desc table[], unsigned int num)
5464 {
5465 	u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
5466 	const struct sys_reg_desc *r;
5467 	u64 id = kvm_one_reg_to_id(reg);
5468 	u64 val;
5469 	int ret;
5470 
5471 	r = id_to_sys_reg_desc(vcpu, id, table, num);
5472 	if (!r || sysreg_hidden(vcpu, r))
5473 		return -ENOENT;
5474 
5475 	if (r->get_user) {
5476 		ret = (r->get_user)(vcpu, r, &val);
5477 	} else {
5478 		val = __vcpu_sys_reg(vcpu, r->reg);
5479 		ret = 0;
5480 	}
5481 
5482 	if (!ret)
5483 		ret = put_user(val, uaddr);
5484 
5485 	return ret;
5486 }
5487 
5488 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
5489 {
5490 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
5491 
5492 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
5493 		return demux_c15_get(vcpu, reg->id, uaddr);
5494 
5495 	return kvm_sys_reg_get_user(vcpu, reg,
5496 				    sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
5497 }
5498 
5499 int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
5500 			 const struct sys_reg_desc table[], unsigned int num)
5501 {
5502 	u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
5503 	const struct sys_reg_desc *r;
5504 	u64 id = kvm_one_reg_to_id(reg);
5505 	u64 val;
5506 	int ret;
5507 
5508 	if (get_user(val, uaddr))
5509 		return -EFAULT;
5510 
5511 	r = id_to_sys_reg_desc(vcpu, id, table, num);
5512 	if (!r || sysreg_hidden(vcpu, r))
5513 		return -ENOENT;
5514 
5515 	if (sysreg_user_write_ignore(vcpu, r))
5516 		return 0;
5517 
5518 	if (r->set_user) {
5519 		ret = (r->set_user)(vcpu, r, val);
5520 	} else {
5521 		__vcpu_assign_sys_reg(vcpu, r->reg, val);
5522 		ret = 0;
5523 	}
5524 
5525 	return ret;
5526 }
5527 
5528 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
5529 {
5530 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
5531 
5532 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
5533 		return demux_c15_set(vcpu, reg->id, uaddr);
5534 
5535 	return kvm_sys_reg_set_user(vcpu, reg,
5536 				    sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
5537 }
5538 
5539 static unsigned int num_demux_regs(void)
5540 {
5541 	return CSSELR_MAX;
5542 }
5543 
5544 static int write_demux_regids(u64 __user *uindices)
5545 {
5546 	u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
5547 	unsigned int i;
5548 
5549 	val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
5550 	for (i = 0; i < CSSELR_MAX; i++) {
5551 		if (put_user(val | i, uindices))
5552 			return -EFAULT;
5553 		uindices++;
5554 	}
5555 	return 0;
5556 }
5557 
5558 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
5559 {
5560 	return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
5561 		KVM_REG_ARM64_SYSREG |
5562 		(reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
5563 		(reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
5564 		(reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
5565 		(reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
5566 		(reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
5567 }
5568 
5569 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
5570 {
5571 	u64 idx;
5572 
5573 	if (!*uind)
5574 		return true;
5575 
5576 	switch (reg_to_encoding(reg)) {
5577 	case SYS_CNTV_CVAL_EL0:
5578 		idx = KVM_REG_ARM_TIMER_CVAL;
5579 		break;
5580 	case SYS_CNTVCT_EL0:
5581 		idx = KVM_REG_ARM_TIMER_CNT;
5582 		break;
5583 	default:
5584 		idx = sys_reg_to_index(reg);
5585 	}
5586 
5587 	if (put_user(idx, *uind))
5588 		return false;
5589 
5590 	(*uind)++;
5591 	return true;
5592 }
5593 
5594 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
5595 			    const struct sys_reg_desc *rd,
5596 			    u64 __user **uind,
5597 			    unsigned int *total)
5598 {
5599 	/*
5600 	 * Ignore registers we trap but don't save,
5601 	 * and for which no custom user accessor is provided.
5602 	 */
5603 	if (!(rd->reg || rd->get_user))
5604 		return 0;
5605 
5606 	if (sysreg_hidden(vcpu, rd))
5607 		return 0;
5608 
5609 	if (!copy_reg_to_user(rd, uind))
5610 		return -EFAULT;
5611 
5612 	(*total)++;
5613 	return 0;
5614 }
5615 
5616 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
5617 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
5618 {
5619 	const struct sys_reg_desc *i2, *end2;
5620 	unsigned int total = 0;
5621 	int err;
5622 
5623 	i2 = sys_reg_descs;
5624 	end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
5625 
5626 	while (i2 != end2) {
5627 		err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
5628 		if (err)
5629 			return err;
5630 	}
5631 	return total;
5632 }
5633 
5634 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
5635 {
5636 	return num_demux_regs()
5637 		+ walk_sys_regs(vcpu, (u64 __user *)NULL);
5638 }
5639 
5640 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
5641 {
5642 	int err;
5643 
5644 	err = walk_sys_regs(vcpu, uindices);
5645 	if (err < 0)
5646 		return err;
5647 	uindices += err;
5648 
5649 	return write_demux_regids(uindices);
5650 }
5651 
5652 #define KVM_ARM_FEATURE_ID_RANGE_INDEX(r)			\
5653 	KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(r),		\
5654 		sys_reg_Op1(r),					\
5655 		sys_reg_CRn(r),					\
5656 		sys_reg_CRm(r),					\
5657 		sys_reg_Op2(r))
5658 
5659 int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range)
5660 {
5661 	const void *zero_page = page_to_virt(ZERO_PAGE(0));
5662 	u64 __user *masks = (u64 __user *)range->addr;
5663 
5664 	/* Only feature id range is supported, reserved[13] must be zero. */
5665 	if (range->range ||
5666 	    memcmp(range->reserved, zero_page, sizeof(range->reserved)))
5667 		return -EINVAL;
5668 
5669 	/* Wipe the whole thing first */
5670 	if (clear_user(masks, KVM_ARM_FEATURE_ID_RANGE_SIZE * sizeof(__u64)))
5671 		return -EFAULT;
5672 
5673 	for (int i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
5674 		const struct sys_reg_desc *reg = &sys_reg_descs[i];
5675 		u32 encoding = reg_to_encoding(reg);
5676 		u64 val;
5677 
5678 		if (!is_feature_id_reg(encoding) || !reg->set_user)
5679 			continue;
5680 
5681 		if (!reg->val ||
5682 		    (is_aa32_id_reg(encoding) && !kvm_supports_32bit_el0())) {
5683 			continue;
5684 		}
5685 		val = reg->val;
5686 
5687 		if (put_user(val, (masks + KVM_ARM_FEATURE_ID_RANGE_INDEX(encoding))))
5688 			return -EFAULT;
5689 	}
5690 
5691 	return 0;
5692 }
5693 
5694 static void vcpu_set_hcr(struct kvm_vcpu *vcpu)
5695 {
5696 	struct kvm *kvm = vcpu->kvm;
5697 
5698 	if (has_vhe() || has_hvhe())
5699 		vcpu->arch.hcr_el2 |= HCR_E2H;
5700 	if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
5701 		/* route synchronous external abort exceptions to EL2 */
5702 		vcpu->arch.hcr_el2 |= HCR_TEA;
5703 		/* trap error record accesses */
5704 		vcpu->arch.hcr_el2 |= HCR_TERR;
5705 	}
5706 
5707 	if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
5708 		vcpu->arch.hcr_el2 |= HCR_FWB;
5709 
5710 	if (cpus_have_final_cap(ARM64_HAS_EVT) &&
5711 	    !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE) &&
5712 	    kvm_read_vm_id_reg(kvm, SYS_CTR_EL0) == read_sanitised_ftr_reg(SYS_CTR_EL0))
5713 		vcpu->arch.hcr_el2 |= HCR_TID4;
5714 	else
5715 		vcpu->arch.hcr_el2 |= HCR_TID2;
5716 
5717 	if (vcpu_el1_is_32bit(vcpu))
5718 		vcpu->arch.hcr_el2 &= ~HCR_RW;
5719 
5720 	if (kvm_has_mte(vcpu->kvm))
5721 		vcpu->arch.hcr_el2 |= HCR_ATA;
5722 	else
5723 		vcpu->arch.hcr_el2 |= HCR_TID5;
5724 
5725 	/*
5726 	 * In the absence of FGT, we cannot independently trap TLBI
5727 	 * Range instructions. This isn't great, but trapping all
5728 	 * TLBIs would be far worse. Live with it...
5729 	 */
5730 	if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
5731 		vcpu->arch.hcr_el2 |= HCR_TTLBOS;
5732 }
5733 
5734 void kvm_calculate_traps(struct kvm_vcpu *vcpu)
5735 {
5736 	struct kvm *kvm = vcpu->kvm;
5737 
5738 	mutex_lock(&kvm->arch.config_lock);
5739 	vcpu_set_hcr(vcpu);
5740 	vcpu_set_ich_hcr(vcpu);
5741 	vcpu_set_hcrx(vcpu);
5742 
5743 	if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
5744 		goto out;
5745 
5746 	compute_fgu(kvm, HFGRTR_GROUP);
5747 	compute_fgu(kvm, HFGITR_GROUP);
5748 	compute_fgu(kvm, HDFGRTR_GROUP);
5749 	compute_fgu(kvm, HAFGRTR_GROUP);
5750 	compute_fgu(kvm, HFGRTR2_GROUP);
5751 	compute_fgu(kvm, HFGITR2_GROUP);
5752 	compute_fgu(kvm, HDFGRTR2_GROUP);
5753 	compute_fgu(kvm, ICH_HFGRTR_GROUP);
5754 	compute_fgu(kvm, ICH_HFGITR_GROUP);
5755 
5756 	set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags);
5757 out:
5758 	mutex_unlock(&kvm->arch.config_lock);
5759 }
5760 
5761 /*
5762  * Perform last adjustments to the ID registers that are implied by the
5763  * configuration outside of the ID regs themselves, as well as any
5764  * initialisation that directly depend on these ID registers (such as
5765  * RES0/RES1 behaviours). This is not the place to configure traps though.
5766  *
5767  * Because this can be called once per CPU, changes must be idempotent.
5768  */
5769 int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu)
5770 {
5771 	struct kvm *kvm = vcpu->kvm;
5772 
5773 	guard(mutex)(&kvm->arch.config_lock);
5774 
5775 	if (kvm_vm_has_ran_once(kvm))
5776 		return 0;
5777 
5778 	/*
5779 	 * This hacks into the ID registers, so only perform it when the
5780 	 * first vcpu runs, or the kvm_set_vm_id_reg() helper will scream.
5781 	 */
5782 	if (!irqchip_in_kernel(kvm)) {
5783 		u64 val;
5784 
5785 		val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC;
5786 		kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, val);
5787 		val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR2_EL1) & ~ID_AA64PFR2_EL1_GCIE;
5788 		kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR2_EL1, val);
5789 		val = kvm_read_vm_id_reg(kvm, SYS_ID_PFR1_EL1) & ~ID_PFR1_EL1_GIC;
5790 		kvm_set_vm_id_reg(kvm, SYS_ID_PFR1_EL1, val);
5791 	} else {
5792 		/*
5793 		 * Certain userspace software - QEMU - samples the system
5794 		 * register state without creating an irqchip, then blindly
5795 		 * restores the state prior to running the final guest. This
5796 		 * means that it restores the virtualization & emulation
5797 		 * capabilities of the host system, rather than something that
5798 		 * reflects the final guest state. Moreover, it checks that the
5799 		 * state was "correctly" restored (i.e., verbatim), bailing if
5800 		 * it isn't, so masking off invalid state isn't an option.
5801 		 *
5802 		 * On GICv5 hardware that supports FEAT_GCIE_LEGACY we can run
5803 		 * both GICv3- and GICv5-based guests. Therefore, we initially
5804 		 * present both ID_AA64PFR0.GIC and ID_AA64PFR2.GCIE as IMP to
5805 		 * reflect that userspace can create EITHER a vGICv3 or a
5806 		 * vGICv5. This is an architecturally invalid combination, of
5807 		 * course. Once an in-kernel GIC is created, the sysreg state is
5808 		 * updated to reflect the actual, valid configuration.
5809 		 *
5810 		 * Setting both the GIC and GCIE features to IMP unsurprisingly
5811 		 * results in guests falling over, and hence we need to fix up
5812 		 * this mess in KVM. Before running for the first time we yet
5813 		 * again ensure that the GIC and GCIE fields accurately reflect
5814 		 * the actual hardware the guest should see.
5815 		 *
5816 		 * This hack allows legacy QEMU-based GICv3 guests to run
5817 		 * unmodified on compatible GICv5 hosts, and avoids the inverse
5818 		 * problem for GICv5-based guests in the future.
5819 		 */
5820 		kvm_vgic_finalize_idregs(kvm);
5821 	}
5822 
5823 	if (vcpu_has_nv(vcpu)) {
5824 		int ret = kvm_init_nv_sysregs(vcpu);
5825 		if (ret)
5826 			return ret;
5827 	}
5828 
5829 	return 0;
5830 }
5831 
5832 int __init kvm_sys_reg_table_init(void)
5833 {
5834 	const struct sys_reg_desc *gicv3_regs;
5835 	bool valid = true;
5836 	unsigned int i, sz;
5837 	int ret = 0;
5838 
5839 	/* Make sure tables are unique and in order. */
5840 	valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), true);
5841 	valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), false);
5842 	valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), false);
5843 	valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), false);
5844 	valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), false);
5845 	valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false);
5846 
5847 	gicv3_regs = vgic_v3_get_sysreg_table(&sz);
5848 	valid &= check_sysreg_table(gicv3_regs, sz, false);
5849 
5850 	if (!valid)
5851 		return -EINVAL;
5852 
5853 	init_imp_id_regs();
5854 
5855 	ret = populate_nv_trap_config();
5856 
5857 	check_feature_map();
5858 
5859 	for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++)
5860 		ret = populate_sysreg_config(sys_reg_descs + i, i);
5861 
5862 	for (i = 0; !ret && i < ARRAY_SIZE(sys_insn_descs); i++)
5863 		ret = populate_sysreg_config(sys_insn_descs + i, i);
5864 
5865 	return ret;
5866 }
5867