xref: /linux/arch/arm64/kvm/sys_regs.c (revision 4ea7c1717f3f2344f7a1cdab4f5875cfa89c87a9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/kvm/coproc.c:
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Authors: Rusty Russell <rusty@rustcorp.com.au>
9  *          Christoffer Dall <c.dall@virtualopensystems.com>
10  */
11 
12 #include <linux/bitfield.h>
13 #include <linux/bsearch.h>
14 #include <linux/cacheinfo.h>
15 #include <linux/debugfs.h>
16 #include <linux/kvm_host.h>
17 #include <linux/mm.h>
18 #include <linux/printk.h>
19 #include <linux/uaccess.h>
20 #include <linux/irqchip/arm-gic-v3.h>
21 
22 #include <asm/arm_pmuv3.h>
23 #include <asm/cacheflush.h>
24 #include <asm/cputype.h>
25 #include <asm/debug-monitors.h>
26 #include <asm/esr.h>
27 #include <asm/kvm_arm.h>
28 #include <asm/kvm_emulate.h>
29 #include <asm/kvm_hyp.h>
30 #include <asm/kvm_mmu.h>
31 #include <asm/kvm_nested.h>
32 #include <asm/perf_event.h>
33 #include <asm/sysreg.h>
34 
35 #include <trace/events/kvm.h>
36 
37 #include "sys_regs.h"
38 #include "vgic/vgic.h"
39 
40 #include "trace.h"
41 
42 /*
43  * For AArch32, we only take care of what is being trapped. Anything
44  * that has to do with init and userspace access has to go via the
45  * 64bit interface.
46  */
47 
48 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
49 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
50 		      u64 val);
51 
undef_access(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)52 static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
53 			 const struct sys_reg_desc *r)
54 {
55 	kvm_inject_undefined(vcpu);
56 	return false;
57 }
58 
bad_trap(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r,const char * msg)59 static bool bad_trap(struct kvm_vcpu *vcpu,
60 		     struct sys_reg_params *params,
61 		     const struct sys_reg_desc *r,
62 		     const char *msg)
63 {
64 	WARN_ONCE(1, "Unexpected %s\n", msg);
65 	print_sys_reg_instr(params);
66 	return undef_access(vcpu, params, r);
67 }
68 
read_from_write_only(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)69 static bool read_from_write_only(struct kvm_vcpu *vcpu,
70 				 struct sys_reg_params *params,
71 				 const struct sys_reg_desc *r)
72 {
73 	return bad_trap(vcpu, params, r,
74 			"sys_reg read to write-only register");
75 }
76 
write_to_read_only(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)77 static bool write_to_read_only(struct kvm_vcpu *vcpu,
78 			       struct sys_reg_params *params,
79 			       const struct sys_reg_desc *r)
80 {
81 	return bad_trap(vcpu, params, r,
82 			"sys_reg write to read-only register");
83 }
84 
85 enum sr_loc_attr {
86 	SR_LOC_MEMORY	= 0,	  /* Register definitely in memory */
87 	SR_LOC_LOADED	= BIT(0), /* Register on CPU, unless it cannot */
88 	SR_LOC_MAPPED	= BIT(1), /* Register in a different CPU register */
89 	SR_LOC_XLATED	= BIT(2), /* Register translated to fit another reg */
90 	SR_LOC_SPECIAL	= BIT(3), /* Demanding register, implies loaded */
91 };
92 
93 struct sr_loc {
94 	enum sr_loc_attr loc;
95 	enum vcpu_sysreg map_reg;
96 	u64		 (*xlate)(u64);
97 };
98 
locate_direct_register(const struct kvm_vcpu * vcpu,enum vcpu_sysreg reg)99 static enum sr_loc_attr locate_direct_register(const struct kvm_vcpu *vcpu,
100 					       enum vcpu_sysreg reg)
101 {
102 	switch (reg) {
103 	case SCTLR_EL1:
104 	case CPACR_EL1:
105 	case TTBR0_EL1:
106 	case TTBR1_EL1:
107 	case TCR_EL1:
108 	case TCR2_EL1:
109 	case PIR_EL1:
110 	case PIRE0_EL1:
111 	case POR_EL1:
112 	case ESR_EL1:
113 	case AFSR0_EL1:
114 	case AFSR1_EL1:
115 	case FAR_EL1:
116 	case MAIR_EL1:
117 	case VBAR_EL1:
118 	case CONTEXTIDR_EL1:
119 	case AMAIR_EL1:
120 	case CNTKCTL_EL1:
121 	case ELR_EL1:
122 	case SPSR_EL1:
123 	case ZCR_EL1:
124 	case SCTLR2_EL1:
125 		/*
126 		 * EL1 registers which have an ELx2 mapping are loaded if
127 		 * we're not in hypervisor context.
128 		 */
129 		return is_hyp_ctxt(vcpu) ? SR_LOC_MEMORY : SR_LOC_LOADED;
130 
131 	case TPIDR_EL0:
132 	case TPIDRRO_EL0:
133 	case TPIDR_EL1:
134 	case PAR_EL1:
135 	case DACR32_EL2:
136 	case IFSR32_EL2:
137 	case DBGVCR32_EL2:
138 		/* These registers are always loaded, no matter what */
139 		return SR_LOC_LOADED;
140 
141 	default:
142 		/* Non-mapped EL2 registers are by definition in memory. */
143 		return SR_LOC_MEMORY;
144 	}
145 }
146 
locate_mapped_el2_register(const struct kvm_vcpu * vcpu,enum vcpu_sysreg reg,enum vcpu_sysreg map_reg,u64 (* xlate)(u64),struct sr_loc * loc)147 static void locate_mapped_el2_register(const struct kvm_vcpu *vcpu,
148 				       enum vcpu_sysreg reg,
149 				       enum vcpu_sysreg map_reg,
150 				       u64 (*xlate)(u64),
151 				       struct sr_loc *loc)
152 {
153 	if (!is_hyp_ctxt(vcpu)) {
154 		loc->loc = SR_LOC_MEMORY;
155 		return;
156 	}
157 
158 	loc->loc = SR_LOC_LOADED | SR_LOC_MAPPED;
159 	loc->map_reg = map_reg;
160 
161 	WARN_ON(locate_direct_register(vcpu, map_reg) != SR_LOC_MEMORY);
162 
163 	if (xlate != NULL && !vcpu_el2_e2h_is_set(vcpu)) {
164 		loc->loc |= SR_LOC_XLATED;
165 		loc->xlate = xlate;
166 	}
167 }
168 
169 #define MAPPED_EL2_SYSREG(r, m, t)					\
170 	case r:	{							\
171 		locate_mapped_el2_register(vcpu, r, m, t, loc);		\
172 		break;							\
173 	}
174 
locate_register(const struct kvm_vcpu * vcpu,enum vcpu_sysreg reg,struct sr_loc * loc)175 static void locate_register(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg,
176 			    struct sr_loc *loc)
177 {
178 	if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU)) {
179 		loc->loc = SR_LOC_MEMORY;
180 		return;
181 	}
182 
183 	switch (reg) {
184 		MAPPED_EL2_SYSREG(SCTLR_EL2,   SCTLR_EL1,
185 				  translate_sctlr_el2_to_sctlr_el1	     );
186 		MAPPED_EL2_SYSREG(CPTR_EL2,    CPACR_EL1,
187 				  translate_cptr_el2_to_cpacr_el1	     );
188 		MAPPED_EL2_SYSREG(TTBR0_EL2,   TTBR0_EL1,
189 				  translate_ttbr0_el2_to_ttbr0_el1	     );
190 		MAPPED_EL2_SYSREG(TTBR1_EL2,   TTBR1_EL1,   NULL	     );
191 		MAPPED_EL2_SYSREG(TCR_EL2,     TCR_EL1,
192 				  translate_tcr_el2_to_tcr_el1		     );
193 		MAPPED_EL2_SYSREG(VBAR_EL2,    VBAR_EL1,    NULL	     );
194 		MAPPED_EL2_SYSREG(AFSR0_EL2,   AFSR0_EL1,   NULL	     );
195 		MAPPED_EL2_SYSREG(AFSR1_EL2,   AFSR1_EL1,   NULL	     );
196 		MAPPED_EL2_SYSREG(ESR_EL2,     ESR_EL1,     NULL	     );
197 		MAPPED_EL2_SYSREG(FAR_EL2,     FAR_EL1,     NULL	     );
198 		MAPPED_EL2_SYSREG(MAIR_EL2,    MAIR_EL1,    NULL	     );
199 		MAPPED_EL2_SYSREG(TCR2_EL2,    TCR2_EL1,    NULL	     );
200 		MAPPED_EL2_SYSREG(PIR_EL2,     PIR_EL1,     NULL	     );
201 		MAPPED_EL2_SYSREG(PIRE0_EL2,   PIRE0_EL1,   NULL	     );
202 		MAPPED_EL2_SYSREG(POR_EL2,     POR_EL1,     NULL	     );
203 		MAPPED_EL2_SYSREG(AMAIR_EL2,   AMAIR_EL1,   NULL	     );
204 		MAPPED_EL2_SYSREG(ELR_EL2,     ELR_EL1,	    NULL	     );
205 		MAPPED_EL2_SYSREG(SPSR_EL2,    SPSR_EL1,    NULL	     );
206 		MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL	     );
207 		MAPPED_EL2_SYSREG(SCTLR2_EL2,  SCTLR2_EL1,  NULL	     );
208 	case CNTHCTL_EL2:
209 		/* CNTHCTL_EL2 is super special, until we support NV2.1 */
210 		loc->loc = ((is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) ?
211 			    SR_LOC_SPECIAL : SR_LOC_MEMORY);
212 		break;
213 	default:
214 		loc->loc = locate_direct_register(vcpu, reg);
215 	}
216 }
217 
read_sr_from_cpu(enum vcpu_sysreg reg)218 static u64 read_sr_from_cpu(enum vcpu_sysreg reg)
219 {
220 	u64 val = 0x8badf00d8badf00d;
221 
222 	switch (reg) {
223 	case SCTLR_EL1:		val = read_sysreg_s(SYS_SCTLR_EL12);	break;
224 	case CPACR_EL1:		val = read_sysreg_s(SYS_CPACR_EL12);	break;
225 	case TTBR0_EL1:		val = read_sysreg_s(SYS_TTBR0_EL12);	break;
226 	case TTBR1_EL1:		val = read_sysreg_s(SYS_TTBR1_EL12);	break;
227 	case TCR_EL1:		val = read_sysreg_s(SYS_TCR_EL12);	break;
228 	case TCR2_EL1:		val = read_sysreg_s(SYS_TCR2_EL12);	break;
229 	case PIR_EL1:		val = read_sysreg_s(SYS_PIR_EL12);	break;
230 	case PIRE0_EL1:		val = read_sysreg_s(SYS_PIRE0_EL12);	break;
231 	case POR_EL1:		val = read_sysreg_s(SYS_POR_EL12);	break;
232 	case ESR_EL1:		val = read_sysreg_s(SYS_ESR_EL12);	break;
233 	case AFSR0_EL1:		val = read_sysreg_s(SYS_AFSR0_EL12);	break;
234 	case AFSR1_EL1:		val = read_sysreg_s(SYS_AFSR1_EL12);	break;
235 	case FAR_EL1:		val = read_sysreg_s(SYS_FAR_EL12);	break;
236 	case MAIR_EL1:		val = read_sysreg_s(SYS_MAIR_EL12);	break;
237 	case VBAR_EL1:		val = read_sysreg_s(SYS_VBAR_EL12);	break;
238 	case CONTEXTIDR_EL1:	val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
239 	case AMAIR_EL1:		val = read_sysreg_s(SYS_AMAIR_EL12);	break;
240 	case CNTKCTL_EL1:	val = read_sysreg_s(SYS_CNTKCTL_EL12);	break;
241 	case ELR_EL1:		val = read_sysreg_s(SYS_ELR_EL12);	break;
242 	case SPSR_EL1:		val = read_sysreg_s(SYS_SPSR_EL12);	break;
243 	case ZCR_EL1:		val = read_sysreg_s(SYS_ZCR_EL12);	break;
244 	case SCTLR2_EL1:	val = read_sysreg_s(SYS_SCTLR2_EL12);	break;
245 	case TPIDR_EL0:		val = read_sysreg_s(SYS_TPIDR_EL0);	break;
246 	case TPIDRRO_EL0:	val = read_sysreg_s(SYS_TPIDRRO_EL0);	break;
247 	case TPIDR_EL1:		val = read_sysreg_s(SYS_TPIDR_EL1);	break;
248 	case PAR_EL1:		val = read_sysreg_par();		break;
249 	case DACR32_EL2:	val = read_sysreg_s(SYS_DACR32_EL2);	break;
250 	case IFSR32_EL2:	val = read_sysreg_s(SYS_IFSR32_EL2);	break;
251 	case DBGVCR32_EL2:	val = read_sysreg_s(SYS_DBGVCR32_EL2);	break;
252 	default:		WARN_ON_ONCE(1);
253 	}
254 
255 	return val;
256 }
257 
write_sr_to_cpu(enum vcpu_sysreg reg,u64 val)258 static void write_sr_to_cpu(enum vcpu_sysreg reg, u64 val)
259 {
260 	switch (reg) {
261 	case SCTLR_EL1:		write_sysreg_s(val, SYS_SCTLR_EL12);	break;
262 	case CPACR_EL1:		write_sysreg_s(val, SYS_CPACR_EL12);	break;
263 	case TTBR0_EL1:		write_sysreg_s(val, SYS_TTBR0_EL12);	break;
264 	case TTBR1_EL1:		write_sysreg_s(val, SYS_TTBR1_EL12);	break;
265 	case TCR_EL1:		write_sysreg_s(val, SYS_TCR_EL12);	break;
266 	case TCR2_EL1:		write_sysreg_s(val, SYS_TCR2_EL12);	break;
267 	case PIR_EL1:		write_sysreg_s(val, SYS_PIR_EL12);	break;
268 	case PIRE0_EL1:		write_sysreg_s(val, SYS_PIRE0_EL12);	break;
269 	case POR_EL1:		write_sysreg_s(val, SYS_POR_EL12);	break;
270 	case ESR_EL1:		write_sysreg_s(val, SYS_ESR_EL12);	break;
271 	case AFSR0_EL1:		write_sysreg_s(val, SYS_AFSR0_EL12);	break;
272 	case AFSR1_EL1:		write_sysreg_s(val, SYS_AFSR1_EL12);	break;
273 	case FAR_EL1:		write_sysreg_s(val, SYS_FAR_EL12);	break;
274 	case MAIR_EL1:		write_sysreg_s(val, SYS_MAIR_EL12);	break;
275 	case VBAR_EL1:		write_sysreg_s(val, SYS_VBAR_EL12);	break;
276 	case CONTEXTIDR_EL1:	write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
277 	case AMAIR_EL1:		write_sysreg_s(val, SYS_AMAIR_EL12);	break;
278 	case CNTKCTL_EL1:	write_sysreg_s(val, SYS_CNTKCTL_EL12);	break;
279 	case ELR_EL1:		write_sysreg_s(val, SYS_ELR_EL12);	break;
280 	case SPSR_EL1:		write_sysreg_s(val, SYS_SPSR_EL12);	break;
281 	case ZCR_EL1:		write_sysreg_s(val, SYS_ZCR_EL12);	break;
282 	case SCTLR2_EL1:	write_sysreg_s(val, SYS_SCTLR2_EL12);	break;
283 	case TPIDR_EL0:		write_sysreg_s(val, SYS_TPIDR_EL0);	break;
284 	case TPIDRRO_EL0:	write_sysreg_s(val, SYS_TPIDRRO_EL0);	break;
285 	case TPIDR_EL1:		write_sysreg_s(val, SYS_TPIDR_EL1);	break;
286 	case PAR_EL1:		write_sysreg_s(val, SYS_PAR_EL1);	break;
287 	case DACR32_EL2:	write_sysreg_s(val, SYS_DACR32_EL2);	break;
288 	case IFSR32_EL2:	write_sysreg_s(val, SYS_IFSR32_EL2);	break;
289 	case DBGVCR32_EL2:	write_sysreg_s(val, SYS_DBGVCR32_EL2);	break;
290 	default:		WARN_ON_ONCE(1);
291 	}
292 }
293 
vcpu_read_sys_reg(const struct kvm_vcpu * vcpu,enum vcpu_sysreg reg)294 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg)
295 {
296 	struct sr_loc loc = {};
297 
298 	locate_register(vcpu, reg, &loc);
299 
300 	WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY);
301 
302 	if (loc.loc & SR_LOC_SPECIAL) {
303 		u64 val;
304 
305 		WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL);
306 
307 		/*
308 		 * CNTHCTL_EL2 requires some special treatment to account
309 		 * for the bits that can be set via CNTKCTL_EL1 when E2H==1.
310 		 */
311 		switch (reg) {
312 		case CNTHCTL_EL2:
313 			val = read_sysreg_el1(SYS_CNTKCTL);
314 			val &= CNTKCTL_VALID_BITS;
315 			val |= __vcpu_sys_reg(vcpu, reg) & ~CNTKCTL_VALID_BITS;
316 			return val;
317 		default:
318 			WARN_ON_ONCE(1);
319 		}
320 	}
321 
322 	if (loc.loc & SR_LOC_LOADED) {
323 		enum vcpu_sysreg map_reg = reg;
324 
325 		if (loc.loc & SR_LOC_MAPPED)
326 			map_reg = loc.map_reg;
327 
328 		if (!(loc.loc & SR_LOC_XLATED)) {
329 			u64 val = read_sr_from_cpu(map_reg);
330 
331 			if (reg >= __SANITISED_REG_START__)
332 				val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
333 
334 			return val;
335 		}
336 	}
337 
338 	return __vcpu_sys_reg(vcpu, reg);
339 }
340 
vcpu_write_sys_reg(struct kvm_vcpu * vcpu,u64 val,enum vcpu_sysreg reg)341 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, enum vcpu_sysreg reg)
342 {
343 	struct sr_loc loc = {};
344 
345 	locate_register(vcpu, reg, &loc);
346 
347 	WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY);
348 
349 	if (loc.loc & SR_LOC_SPECIAL) {
350 
351 		WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL);
352 
353 		switch (reg) {
354 		case CNTHCTL_EL2:
355 			/*
356 			 * If E2H=1, some of the bits are backed by
357 			 * CNTKCTL_EL1, while the rest is kept in memory.
358 			 * Yes, this is fun stuff.
359 			 */
360 			write_sysreg_el1(val, SYS_CNTKCTL);
361 			break;
362 		default:
363 			WARN_ON_ONCE(1);
364 		}
365 	}
366 
367 	if (loc.loc & SR_LOC_LOADED) {
368 		enum vcpu_sysreg map_reg = reg;
369 		u64 xlated_val;
370 
371 		if (reg >= __SANITISED_REG_START__)
372 			val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
373 
374 		if (loc.loc & SR_LOC_MAPPED)
375 			map_reg = loc.map_reg;
376 
377 		if (loc.loc & SR_LOC_XLATED)
378 			xlated_val = loc.xlate(val);
379 		else
380 			xlated_val = val;
381 
382 		write_sr_to_cpu(map_reg, xlated_val);
383 
384 		/*
385 		 * Fall through to write the backing store anyway, which
386 		 * allows translated registers to be directly read without a
387 		 * reverse translation.
388 		 */
389 	}
390 
391 	__vcpu_assign_sys_reg(vcpu, reg, val);
392 }
393 
394 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
395 #define CSSELR_MAX 14
396 
397 /*
398  * Returns the minimum line size for the selected cache, expressed as
399  * Log2(bytes).
400  */
get_min_cache_line_size(bool icache)401 static u8 get_min_cache_line_size(bool icache)
402 {
403 	u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0);
404 	u8 field;
405 
406 	if (icache)
407 		field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr);
408 	else
409 		field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr);
410 
411 	/*
412 	 * Cache line size is represented as Log2(words) in CTR_EL0.
413 	 * Log2(bytes) can be derived with the following:
414 	 *
415 	 * Log2(words) + 2 = Log2(bytes / 4) + 2
416 	 * 		   = Log2(bytes) - 2 + 2
417 	 * 		   = Log2(bytes)
418 	 */
419 	return field + 2;
420 }
421 
422 /* Which cache CCSIDR represents depends on CSSELR value. */
get_ccsidr(struct kvm_vcpu * vcpu,u32 csselr)423 static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr)
424 {
425 	u8 line_size;
426 
427 	if (vcpu->arch.ccsidr)
428 		return vcpu->arch.ccsidr[csselr];
429 
430 	line_size = get_min_cache_line_size(csselr & CSSELR_EL1_InD);
431 
432 	/*
433 	 * Fabricate a CCSIDR value as the overriding value does not exist.
434 	 * The real CCSIDR value will not be used as it can vary by the
435 	 * physical CPU which the vcpu currently resides in.
436 	 *
437 	 * The line size is determined with get_min_cache_line_size(), which
438 	 * should be valid for all CPUs even if they have different cache
439 	 * configuration.
440 	 *
441 	 * The associativity bits are cleared, meaning the geometry of all data
442 	 * and unified caches (which are guaranteed to be PIPT and thus
443 	 * non-aliasing) are 1 set and 1 way.
444 	 * Guests should not be doing cache operations by set/way at all, and
445 	 * for this reason, we trap them and attempt to infer the intent, so
446 	 * that we can flush the entire guest's address space at the appropriate
447 	 * time. The exposed geometry minimizes the number of the traps.
448 	 * [If guests should attempt to infer aliasing properties from the
449 	 * geometry (which is not permitted by the architecture), they would
450 	 * only do so for virtually indexed caches.]
451 	 *
452 	 * We don't check if the cache level exists as it is allowed to return
453 	 * an UNKNOWN value if not.
454 	 */
455 	return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4);
456 }
457 
set_ccsidr(struct kvm_vcpu * vcpu,u32 csselr,u32 val)458 static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val)
459 {
460 	u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4;
461 	u32 *ccsidr = vcpu->arch.ccsidr;
462 	u32 i;
463 
464 	if ((val & CCSIDR_EL1_RES0) ||
465 	    line_size < get_min_cache_line_size(csselr & CSSELR_EL1_InD))
466 		return -EINVAL;
467 
468 	if (!ccsidr) {
469 		if (val == get_ccsidr(vcpu, csselr))
470 			return 0;
471 
472 		ccsidr = kmalloc_array(CSSELR_MAX, sizeof(u32), GFP_KERNEL_ACCOUNT);
473 		if (!ccsidr)
474 			return -ENOMEM;
475 
476 		for (i = 0; i < CSSELR_MAX; i++)
477 			ccsidr[i] = get_ccsidr(vcpu, i);
478 
479 		vcpu->arch.ccsidr = ccsidr;
480 	}
481 
482 	ccsidr[csselr] = val;
483 
484 	return 0;
485 }
486 
access_rw(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)487 static bool access_rw(struct kvm_vcpu *vcpu,
488 		      struct sys_reg_params *p,
489 		      const struct sys_reg_desc *r)
490 {
491 	if (p->is_write)
492 		vcpu_write_sys_reg(vcpu, p->regval, r->reg);
493 	else
494 		p->regval = vcpu_read_sys_reg(vcpu, r->reg);
495 
496 	return true;
497 }
498 
499 /*
500  * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
501  */
access_dcsw(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)502 static bool access_dcsw(struct kvm_vcpu *vcpu,
503 			struct sys_reg_params *p,
504 			const struct sys_reg_desc *r)
505 {
506 	if (!p->is_write)
507 		return read_from_write_only(vcpu, p, r);
508 
509 	/*
510 	 * Only track S/W ops if we don't have FWB. It still indicates
511 	 * that the guest is a bit broken (S/W operations should only
512 	 * be done by firmware, knowing that there is only a single
513 	 * CPU left in the system, and certainly not from non-secure
514 	 * software).
515 	 */
516 	if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
517 		kvm_set_way_flush(vcpu);
518 
519 	return true;
520 }
521 
access_dcgsw(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)522 static bool access_dcgsw(struct kvm_vcpu *vcpu,
523 			 struct sys_reg_params *p,
524 			 const struct sys_reg_desc *r)
525 {
526 	if (!kvm_has_mte(vcpu->kvm))
527 		return undef_access(vcpu, p, r);
528 
529 	/* Treat MTE S/W ops as we treat the classic ones: with contempt */
530 	return access_dcsw(vcpu, p, r);
531 }
532 
get_access_mask(const struct sys_reg_desc * r,u64 * mask,u64 * shift)533 static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
534 {
535 	switch (r->aarch32_map) {
536 	case AA32_LO:
537 		*mask = GENMASK_ULL(31, 0);
538 		*shift = 0;
539 		break;
540 	case AA32_HI:
541 		*mask = GENMASK_ULL(63, 32);
542 		*shift = 32;
543 		break;
544 	default:
545 		*mask = GENMASK_ULL(63, 0);
546 		*shift = 0;
547 		break;
548 	}
549 }
550 
551 /*
552  * Generic accessor for VM registers. Only called as long as HCR_TVM
553  * is set. If the guest enables the MMU, we stop trapping the VM
554  * sys_regs and leave it in complete control of the caches.
555  */
access_vm_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)556 static bool access_vm_reg(struct kvm_vcpu *vcpu,
557 			  struct sys_reg_params *p,
558 			  const struct sys_reg_desc *r)
559 {
560 	bool was_enabled = vcpu_has_cache_enabled(vcpu);
561 	u64 val, mask, shift;
562 
563 	BUG_ON(!p->is_write);
564 
565 	get_access_mask(r, &mask, &shift);
566 
567 	if (~mask) {
568 		val = vcpu_read_sys_reg(vcpu, r->reg);
569 		val &= ~mask;
570 	} else {
571 		val = 0;
572 	}
573 
574 	val |= (p->regval & (mask >> shift)) << shift;
575 	vcpu_write_sys_reg(vcpu, val, r->reg);
576 
577 	kvm_toggle_cache(vcpu, was_enabled);
578 	return true;
579 }
580 
access_actlr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)581 static bool access_actlr(struct kvm_vcpu *vcpu,
582 			 struct sys_reg_params *p,
583 			 const struct sys_reg_desc *r)
584 {
585 	u64 mask, shift;
586 
587 	if (p->is_write)
588 		return ignore_write(vcpu, p);
589 
590 	get_access_mask(r, &mask, &shift);
591 	p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
592 
593 	return true;
594 }
595 
596 /*
597  * Trap handler for the GICv3 SGI generation system register.
598  * Forward the request to the VGIC emulation.
599  * The cp15_64 code makes sure this automatically works
600  * for both AArch64 and AArch32 accesses.
601  */
access_gic_sgi(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)602 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
603 			   struct sys_reg_params *p,
604 			   const struct sys_reg_desc *r)
605 {
606 	bool g1;
607 
608 	if (!kvm_has_gicv3(vcpu->kvm))
609 		return undef_access(vcpu, p, r);
610 
611 	if (!p->is_write)
612 		return read_from_write_only(vcpu, p, r);
613 
614 	/*
615 	 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
616 	 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
617 	 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
618 	 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
619 	 * group.
620 	 */
621 	if (p->Op0 == 0) {		/* AArch32 */
622 		switch (p->Op1) {
623 		default:		/* Keep GCC quiet */
624 		case 0:			/* ICC_SGI1R */
625 			g1 = true;
626 			break;
627 		case 1:			/* ICC_ASGI1R */
628 		case 2:			/* ICC_SGI0R */
629 			g1 = false;
630 			break;
631 		}
632 	} else {			/* AArch64 */
633 		switch (p->Op2) {
634 		default:		/* Keep GCC quiet */
635 		case 5:			/* ICC_SGI1R_EL1 */
636 			g1 = true;
637 			break;
638 		case 6:			/* ICC_ASGI1R_EL1 */
639 		case 7:			/* ICC_SGI0R_EL1 */
640 			g1 = false;
641 			break;
642 		}
643 	}
644 
645 	vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
646 
647 	return true;
648 }
649 
access_gic_sre(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)650 static bool access_gic_sre(struct kvm_vcpu *vcpu,
651 			   struct sys_reg_params *p,
652 			   const struct sys_reg_desc *r)
653 {
654 	if (!kvm_has_gicv3(vcpu->kvm))
655 		return undef_access(vcpu, p, r);
656 
657 	if (p->is_write)
658 		return ignore_write(vcpu, p);
659 
660 	if (p->Op1 == 4) {	/* ICC_SRE_EL2 */
661 		p->regval = KVM_ICC_SRE_EL2;
662 	} else {		/* ICC_SRE_EL1 */
663 		p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
664 	}
665 
666 	return true;
667 }
668 
trap_raz_wi(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)669 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
670 			struct sys_reg_params *p,
671 			const struct sys_reg_desc *r)
672 {
673 	if (p->is_write)
674 		return ignore_write(vcpu, p);
675 	else
676 		return read_zero(vcpu, p);
677 }
678 
679 /*
680  * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
681  * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
682  * system, these registers should UNDEF. LORID_EL1 being a RO register, we
683  * treat it separately.
684  */
trap_loregion(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)685 static bool trap_loregion(struct kvm_vcpu *vcpu,
686 			  struct sys_reg_params *p,
687 			  const struct sys_reg_desc *r)
688 {
689 	u32 sr = reg_to_encoding(r);
690 
691 	if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP))
692 		return undef_access(vcpu, p, r);
693 
694 	if (p->is_write && sr == SYS_LORID_EL1)
695 		return write_to_read_only(vcpu, p, r);
696 
697 	return trap_raz_wi(vcpu, p, r);
698 }
699 
trap_oslar_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)700 static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
701 			   struct sys_reg_params *p,
702 			   const struct sys_reg_desc *r)
703 {
704 	if (!p->is_write)
705 		return read_from_write_only(vcpu, p, r);
706 
707 	kvm_debug_handle_oslar(vcpu, p->regval);
708 	return true;
709 }
710 
trap_oslsr_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)711 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
712 			   struct sys_reg_params *p,
713 			   const struct sys_reg_desc *r)
714 {
715 	if (p->is_write)
716 		return write_to_read_only(vcpu, p, r);
717 
718 	p->regval = __vcpu_sys_reg(vcpu, r->reg);
719 	return true;
720 }
721 
set_oslsr_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)722 static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
723 			 u64 val)
724 {
725 	/*
726 	 * The only modifiable bit is the OSLK bit. Refuse the write if
727 	 * userspace attempts to change any other bit in the register.
728 	 */
729 	if ((val ^ rd->val) & ~OSLSR_EL1_OSLK)
730 		return -EINVAL;
731 
732 	__vcpu_assign_sys_reg(vcpu, rd->reg, val);
733 	return 0;
734 }
735 
trap_dbgauthstatus_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)736 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
737 				   struct sys_reg_params *p,
738 				   const struct sys_reg_desc *r)
739 {
740 	if (p->is_write) {
741 		return ignore_write(vcpu, p);
742 	} else {
743 		p->regval = read_sysreg(dbgauthstatus_el1);
744 		return true;
745 	}
746 }
747 
trap_debug_regs(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)748 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
749 			    struct sys_reg_params *p,
750 			    const struct sys_reg_desc *r)
751 {
752 	access_rw(vcpu, p, r);
753 
754 	kvm_debug_set_guest_ownership(vcpu);
755 	return true;
756 }
757 
758 /*
759  * reg_to_dbg/dbg_to_reg
760  *
761  * A 32 bit write to a debug register leave top bits alone
762  * A 32 bit read from a debug register only returns the bottom bits
763  */
reg_to_dbg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd,u64 * dbg_reg)764 static void reg_to_dbg(struct kvm_vcpu *vcpu,
765 		       struct sys_reg_params *p,
766 		       const struct sys_reg_desc *rd,
767 		       u64 *dbg_reg)
768 {
769 	u64 mask, shift, val;
770 
771 	get_access_mask(rd, &mask, &shift);
772 
773 	val = *dbg_reg;
774 	val &= ~mask;
775 	val |= (p->regval & (mask >> shift)) << shift;
776 	*dbg_reg = val;
777 }
778 
dbg_to_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd,u64 * dbg_reg)779 static void dbg_to_reg(struct kvm_vcpu *vcpu,
780 		       struct sys_reg_params *p,
781 		       const struct sys_reg_desc *rd,
782 		       u64 *dbg_reg)
783 {
784 	u64 mask, shift;
785 
786 	get_access_mask(rd, &mask, &shift);
787 	p->regval = (*dbg_reg & mask) >> shift;
788 }
789 
demux_wb_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)790 static u64 *demux_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
791 {
792 	struct kvm_guest_debug_arch *dbg = &vcpu->arch.vcpu_debug_state;
793 
794 	switch (rd->Op2) {
795 	case 0b100:
796 		return &dbg->dbg_bvr[rd->CRm];
797 	case 0b101:
798 		return &dbg->dbg_bcr[rd->CRm];
799 	case 0b110:
800 		return &dbg->dbg_wvr[rd->CRm];
801 	case 0b111:
802 		return &dbg->dbg_wcr[rd->CRm];
803 	default:
804 		KVM_BUG_ON(1, vcpu->kvm);
805 		return NULL;
806 	}
807 }
808 
trap_dbg_wb_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * rd)809 static bool trap_dbg_wb_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
810 			    const struct sys_reg_desc *rd)
811 {
812 	u64 *reg = demux_wb_reg(vcpu, rd);
813 
814 	if (!reg)
815 		return false;
816 
817 	if (p->is_write)
818 		reg_to_dbg(vcpu, p, rd, reg);
819 	else
820 		dbg_to_reg(vcpu, p, rd, reg);
821 
822 	kvm_debug_set_guest_ownership(vcpu);
823 	return true;
824 }
825 
set_dbg_wb_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)826 static int set_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
827 			  u64 val)
828 {
829 	u64 *reg = demux_wb_reg(vcpu, rd);
830 
831 	if (!reg)
832 		return -EINVAL;
833 
834 	*reg = val;
835 	return 0;
836 }
837 
get_dbg_wb_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)838 static int get_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
839 			  u64 *val)
840 {
841 	u64 *reg = demux_wb_reg(vcpu, rd);
842 
843 	if (!reg)
844 		return -EINVAL;
845 
846 	*val = *reg;
847 	return 0;
848 }
849 
reset_dbg_wb_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)850 static u64 reset_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
851 {
852 	u64 *reg = demux_wb_reg(vcpu, rd);
853 
854 	/*
855 	 * Bail early if we couldn't find storage for the register, the
856 	 * KVM_BUG_ON() in demux_wb_reg() will prevent this VM from ever
857 	 * being run.
858 	 */
859 	if (!reg)
860 		return 0;
861 
862 	*reg = rd->val;
863 	return rd->val;
864 }
865 
reset_amair_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)866 static u64 reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
867 {
868 	u64 amair = read_sysreg(amair_el1);
869 	vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
870 	return amair;
871 }
872 
reset_actlr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)873 static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
874 {
875 	u64 actlr = read_sysreg(actlr_el1);
876 	vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
877 	return actlr;
878 }
879 
reset_mpidr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)880 static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
881 {
882 	u64 mpidr;
883 
884 	/*
885 	 * Map the vcpu_id into the first three affinity level fields of
886 	 * the MPIDR. We limit the number of VCPUs in level 0 due to a
887 	 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
888 	 * of the GICv3 to be able to address each CPU directly when
889 	 * sending IPIs.
890 	 */
891 	mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
892 	mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
893 	mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
894 	mpidr |= (1ULL << 31);
895 	vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1);
896 
897 	return mpidr;
898 }
899 
hidden_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)900 static unsigned int hidden_visibility(const struct kvm_vcpu *vcpu,
901 				      const struct sys_reg_desc *r)
902 {
903 	return REG_HIDDEN;
904 }
905 
pmu_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)906 static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
907 				   const struct sys_reg_desc *r)
908 {
909 	if (kvm_vcpu_has_pmu(vcpu))
910 		return 0;
911 
912 	return REG_HIDDEN;
913 }
914 
reset_pmu_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)915 static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
916 {
917 	u64 mask = BIT(ARMV8_PMU_CYCLE_IDX);
918 	u8 n = vcpu->kvm->arch.nr_pmu_counters;
919 
920 	if (n)
921 		mask |= GENMASK(n - 1, 0);
922 
923 	reset_unknown(vcpu, r);
924 	__vcpu_rmw_sys_reg(vcpu, r->reg, &=, mask);
925 
926 	return __vcpu_sys_reg(vcpu, r->reg);
927 }
928 
reset_pmevcntr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)929 static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
930 {
931 	reset_unknown(vcpu, r);
932 	__vcpu_rmw_sys_reg(vcpu, r->reg, &=, GENMASK(31, 0));
933 
934 	return __vcpu_sys_reg(vcpu, r->reg);
935 }
936 
reset_pmevtyper(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)937 static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
938 {
939 	/* This thing will UNDEF, who cares about the reset value? */
940 	if (!kvm_vcpu_has_pmu(vcpu))
941 		return 0;
942 
943 	reset_unknown(vcpu, r);
944 	__vcpu_rmw_sys_reg(vcpu, r->reg, &=, kvm_pmu_evtyper_mask(vcpu->kvm));
945 
946 	return __vcpu_sys_reg(vcpu, r->reg);
947 }
948 
reset_pmselr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)949 static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
950 {
951 	reset_unknown(vcpu, r);
952 	__vcpu_rmw_sys_reg(vcpu, r->reg, &=, PMSELR_EL0_SEL_MASK);
953 
954 	return __vcpu_sys_reg(vcpu, r->reg);
955 }
956 
reset_pmcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)957 static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
958 {
959 	u64 pmcr = 0;
960 
961 	if (!kvm_supports_32bit_el0())
962 		pmcr |= ARMV8_PMU_PMCR_LC;
963 
964 	/*
965 	 * The value of PMCR.N field is included when the
966 	 * vCPU register is read via kvm_vcpu_read_pmcr().
967 	 */
968 	__vcpu_assign_sys_reg(vcpu, r->reg, pmcr);
969 
970 	return __vcpu_sys_reg(vcpu, r->reg);
971 }
972 
check_pmu_access_disabled(struct kvm_vcpu * vcpu,u64 flags)973 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
974 {
975 	u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
976 	bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
977 
978 	if (!enabled)
979 		kvm_inject_undefined(vcpu);
980 
981 	return !enabled;
982 }
983 
pmu_access_el0_disabled(struct kvm_vcpu * vcpu)984 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
985 {
986 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
987 }
988 
pmu_write_swinc_el0_disabled(struct kvm_vcpu * vcpu)989 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
990 {
991 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
992 }
993 
pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu * vcpu)994 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
995 {
996 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
997 }
998 
pmu_access_event_counter_el0_disabled(struct kvm_vcpu * vcpu)999 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
1000 {
1001 	return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
1002 }
1003 
access_pmcr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1004 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1005 			const struct sys_reg_desc *r)
1006 {
1007 	u64 val;
1008 
1009 	if (pmu_access_el0_disabled(vcpu))
1010 		return false;
1011 
1012 	if (p->is_write) {
1013 		/*
1014 		 * Only update writeable bits of PMCR (continuing into
1015 		 * kvm_pmu_handle_pmcr() as well)
1016 		 */
1017 		val = kvm_vcpu_read_pmcr(vcpu);
1018 		val &= ~ARMV8_PMU_PMCR_MASK;
1019 		val |= p->regval & ARMV8_PMU_PMCR_MASK;
1020 		if (!kvm_supports_32bit_el0())
1021 			val |= ARMV8_PMU_PMCR_LC;
1022 		kvm_pmu_handle_pmcr(vcpu, val);
1023 	} else {
1024 		/* PMCR.P & PMCR.C are RAZ */
1025 		val = kvm_vcpu_read_pmcr(vcpu)
1026 		      & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
1027 		p->regval = val;
1028 	}
1029 
1030 	return true;
1031 }
1032 
access_pmselr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1033 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1034 			  const struct sys_reg_desc *r)
1035 {
1036 	if (pmu_access_event_counter_el0_disabled(vcpu))
1037 		return false;
1038 
1039 	if (p->is_write)
1040 		__vcpu_assign_sys_reg(vcpu, PMSELR_EL0, p->regval);
1041 	else
1042 		/* return PMSELR.SEL field */
1043 		p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
1044 			    & PMSELR_EL0_SEL_MASK;
1045 
1046 	return true;
1047 }
1048 
access_pmceid(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1049 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1050 			  const struct sys_reg_desc *r)
1051 {
1052 	u64 pmceid, mask, shift;
1053 
1054 	BUG_ON(p->is_write);
1055 
1056 	if (pmu_access_el0_disabled(vcpu))
1057 		return false;
1058 
1059 	get_access_mask(r, &mask, &shift);
1060 
1061 	pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
1062 	pmceid &= mask;
1063 	pmceid >>= shift;
1064 
1065 	p->regval = pmceid;
1066 
1067 	return true;
1068 }
1069 
pmu_counter_idx_valid(struct kvm_vcpu * vcpu,u64 idx)1070 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
1071 {
1072 	u64 pmcr, val;
1073 
1074 	pmcr = kvm_vcpu_read_pmcr(vcpu);
1075 	val = FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
1076 	if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
1077 		kvm_inject_undefined(vcpu);
1078 		return false;
1079 	}
1080 
1081 	return true;
1082 }
1083 
get_pmu_evcntr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 * val)1084 static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1085 			  u64 *val)
1086 {
1087 	u64 idx;
1088 
1089 	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
1090 		/* PMCCNTR_EL0 */
1091 		idx = ARMV8_PMU_CYCLE_IDX;
1092 	else
1093 		/* PMEVCNTRn_EL0 */
1094 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1095 
1096 	*val = kvm_pmu_get_counter_value(vcpu, idx);
1097 	return 0;
1098 }
1099 
set_pmu_evcntr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 val)1100 static int set_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1101 			  u64 val)
1102 {
1103 	u64 idx;
1104 
1105 	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
1106 		/* PMCCNTR_EL0 */
1107 		idx = ARMV8_PMU_CYCLE_IDX;
1108 	else
1109 		/* PMEVCNTRn_EL0 */
1110 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1111 
1112 	kvm_pmu_set_counter_value_user(vcpu, idx, val);
1113 	return 0;
1114 }
1115 
access_pmu_evcntr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1116 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
1117 			      struct sys_reg_params *p,
1118 			      const struct sys_reg_desc *r)
1119 {
1120 	u64 idx = ~0UL;
1121 
1122 	if (r->CRn == 9 && r->CRm == 13) {
1123 		if (r->Op2 == 2) {
1124 			/* PMXEVCNTR_EL0 */
1125 			if (pmu_access_event_counter_el0_disabled(vcpu))
1126 				return false;
1127 
1128 			idx = SYS_FIELD_GET(PMSELR_EL0, SEL,
1129 					    __vcpu_sys_reg(vcpu, PMSELR_EL0));
1130 		} else if (r->Op2 == 0) {
1131 			/* PMCCNTR_EL0 */
1132 			if (pmu_access_cycle_counter_el0_disabled(vcpu))
1133 				return false;
1134 
1135 			idx = ARMV8_PMU_CYCLE_IDX;
1136 		}
1137 	} else if (r->CRn == 0 && r->CRm == 9) {
1138 		/* PMCCNTR */
1139 		if (pmu_access_event_counter_el0_disabled(vcpu))
1140 			return false;
1141 
1142 		idx = ARMV8_PMU_CYCLE_IDX;
1143 	} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
1144 		/* PMEVCNTRn_EL0 */
1145 		if (pmu_access_event_counter_el0_disabled(vcpu))
1146 			return false;
1147 
1148 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1149 	}
1150 
1151 	/* Catch any decoding mistake */
1152 	WARN_ON(idx == ~0UL);
1153 
1154 	if (!pmu_counter_idx_valid(vcpu, idx))
1155 		return false;
1156 
1157 	if (p->is_write) {
1158 		if (pmu_access_el0_disabled(vcpu))
1159 			return false;
1160 
1161 		kvm_pmu_set_counter_value(vcpu, idx, p->regval);
1162 	} else {
1163 		p->regval = kvm_pmu_get_counter_value(vcpu, idx);
1164 	}
1165 
1166 	return true;
1167 }
1168 
access_pmu_evtyper(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1169 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1170 			       const struct sys_reg_desc *r)
1171 {
1172 	u64 idx, reg;
1173 
1174 	if (pmu_access_el0_disabled(vcpu))
1175 		return false;
1176 
1177 	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
1178 		/* PMXEVTYPER_EL0 */
1179 		idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0));
1180 		reg = PMEVTYPER0_EL0 + idx;
1181 	} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
1182 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1183 		if (idx == ARMV8_PMU_CYCLE_IDX)
1184 			reg = PMCCFILTR_EL0;
1185 		else
1186 			/* PMEVTYPERn_EL0 */
1187 			reg = PMEVTYPER0_EL0 + idx;
1188 	} else {
1189 		BUG();
1190 	}
1191 
1192 	if (!pmu_counter_idx_valid(vcpu, idx))
1193 		return false;
1194 
1195 	if (p->is_write) {
1196 		kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
1197 		kvm_vcpu_pmu_restore_guest(vcpu);
1198 	} else {
1199 		p->regval = __vcpu_sys_reg(vcpu, reg);
1200 	}
1201 
1202 	return true;
1203 }
1204 
set_pmreg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 val)1205 static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
1206 {
1207 	u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1208 
1209 	__vcpu_assign_sys_reg(vcpu, r->reg, val & mask);
1210 	kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
1211 
1212 	return 0;
1213 }
1214 
get_pmreg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 * val)1215 static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
1216 {
1217 	u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1218 
1219 	*val = __vcpu_sys_reg(vcpu, r->reg) & mask;
1220 	return 0;
1221 }
1222 
access_pmcnten(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1223 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1224 			   const struct sys_reg_desc *r)
1225 {
1226 	u64 val, mask;
1227 
1228 	if (pmu_access_el0_disabled(vcpu))
1229 		return false;
1230 
1231 	mask = kvm_pmu_accessible_counter_mask(vcpu);
1232 	if (p->is_write) {
1233 		val = p->regval & mask;
1234 		if (r->Op2 & 0x1)
1235 			/* accessing PMCNTENSET_EL0 */
1236 			__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, |=, val);
1237 		else
1238 			/* accessing PMCNTENCLR_EL0 */
1239 			__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, ~val);
1240 
1241 		kvm_pmu_reprogram_counter_mask(vcpu, val);
1242 	} else {
1243 		p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
1244 	}
1245 
1246 	return true;
1247 }
1248 
access_pminten(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1249 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1250 			   const struct sys_reg_desc *r)
1251 {
1252 	u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1253 
1254 	if (check_pmu_access_disabled(vcpu, 0))
1255 		return false;
1256 
1257 	if (p->is_write) {
1258 		u64 val = p->regval & mask;
1259 
1260 		if (r->Op2 & 0x1)
1261 			/* accessing PMINTENSET_EL1 */
1262 			__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, |=, val);
1263 		else
1264 			/* accessing PMINTENCLR_EL1 */
1265 			__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, ~val);
1266 	} else {
1267 		p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
1268 	}
1269 
1270 	return true;
1271 }
1272 
access_pmovs(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1273 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1274 			 const struct sys_reg_desc *r)
1275 {
1276 	u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1277 
1278 	if (pmu_access_el0_disabled(vcpu))
1279 		return false;
1280 
1281 	if (p->is_write) {
1282 		if (r->CRm & 0x2)
1283 			/* accessing PMOVSSET_EL0 */
1284 			__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, (p->regval & mask));
1285 		else
1286 			/* accessing PMOVSCLR_EL0 */
1287 			__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, ~(p->regval & mask));
1288 	} else {
1289 		p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
1290 	}
1291 
1292 	return true;
1293 }
1294 
access_pmswinc(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1295 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1296 			   const struct sys_reg_desc *r)
1297 {
1298 	u64 mask;
1299 
1300 	if (!p->is_write)
1301 		return read_from_write_only(vcpu, p, r);
1302 
1303 	if (pmu_write_swinc_el0_disabled(vcpu))
1304 		return false;
1305 
1306 	mask = kvm_pmu_accessible_counter_mask(vcpu);
1307 	kvm_pmu_software_increment(vcpu, p->regval & mask);
1308 	return true;
1309 }
1310 
access_pmuserenr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1311 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1312 			     const struct sys_reg_desc *r)
1313 {
1314 	if (p->is_write) {
1315 		if (!vcpu_mode_priv(vcpu))
1316 			return undef_access(vcpu, p, r);
1317 
1318 		__vcpu_assign_sys_reg(vcpu, PMUSERENR_EL0,
1319 				      (p->regval & ARMV8_PMU_USERENR_MASK));
1320 	} else {
1321 		p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
1322 			    & ARMV8_PMU_USERENR_MASK;
1323 	}
1324 
1325 	return true;
1326 }
1327 
get_pmcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 * val)1328 static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1329 		    u64 *val)
1330 {
1331 	*val = kvm_vcpu_read_pmcr(vcpu);
1332 	return 0;
1333 }
1334 
set_pmcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 val)1335 static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1336 		    u64 val)
1337 {
1338 	u8 new_n = FIELD_GET(ARMV8_PMU_PMCR_N, val);
1339 	struct kvm *kvm = vcpu->kvm;
1340 
1341 	mutex_lock(&kvm->arch.config_lock);
1342 
1343 	/*
1344 	 * The vCPU can't have more counters than the PMU hardware
1345 	 * implements. Ignore this error to maintain compatibility
1346 	 * with the existing KVM behavior.
1347 	 */
1348 	if (!kvm_vm_has_ran_once(kvm) &&
1349 	    !vcpu_has_nv(vcpu)	      &&
1350 	    new_n <= kvm_arm_pmu_get_max_counters(kvm))
1351 		kvm->arch.nr_pmu_counters = new_n;
1352 
1353 	mutex_unlock(&kvm->arch.config_lock);
1354 
1355 	/*
1356 	 * Ignore writes to RES0 bits, read only bits that are cleared on
1357 	 * vCPU reset, and writable bits that KVM doesn't support yet.
1358 	 * (i.e. only PMCR.N and bits [7:0] are mutable from userspace)
1359 	 * The LP bit is RES0 when FEAT_PMUv3p5 is not supported on the vCPU.
1360 	 * But, we leave the bit as it is here, as the vCPU's PMUver might
1361 	 * be changed later (NOTE: the bit will be cleared on first vCPU run
1362 	 * if necessary).
1363 	 */
1364 	val &= ARMV8_PMU_PMCR_MASK;
1365 
1366 	/* The LC bit is RES1 when AArch32 is not supported */
1367 	if (!kvm_supports_32bit_el0())
1368 		val |= ARMV8_PMU_PMCR_LC;
1369 
1370 	__vcpu_assign_sys_reg(vcpu, r->reg, val);
1371 	kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
1372 
1373 	return 0;
1374 }
1375 
1376 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
1377 #define DBG_BCR_BVR_WCR_WVR_EL1(n)					\
1378 	{ SYS_DESC(SYS_DBGBVRn_EL1(n)),					\
1379 	  trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0,			\
1380 	  get_dbg_wb_reg, set_dbg_wb_reg },				\
1381 	{ SYS_DESC(SYS_DBGBCRn_EL1(n)),					\
1382 	  trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0,			\
1383 	  get_dbg_wb_reg, set_dbg_wb_reg },				\
1384 	{ SYS_DESC(SYS_DBGWVRn_EL1(n)),					\
1385 	  trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0,			\
1386 	  get_dbg_wb_reg, set_dbg_wb_reg },				\
1387 	{ SYS_DESC(SYS_DBGWCRn_EL1(n)),					\
1388 	  trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0,			\
1389 	  get_dbg_wb_reg, set_dbg_wb_reg }
1390 
1391 #define PMU_SYS_REG(name)						\
1392 	SYS_DESC(SYS_##name), .reset = reset_pmu_reg,			\
1393 	.visibility = pmu_visibility
1394 
1395 /* Macro to expand the PMEVCNTRn_EL0 register */
1396 #define PMU_PMEVCNTR_EL0(n)						\
1397 	{ PMU_SYS_REG(PMEVCNTRn_EL0(n)),				\
1398 	  .reset = reset_pmevcntr, .get_user = get_pmu_evcntr,		\
1399 	  .set_user = set_pmu_evcntr,					\
1400 	  .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
1401 
1402 /* Macro to expand the PMEVTYPERn_EL0 register */
1403 #define PMU_PMEVTYPER_EL0(n)						\
1404 	{ PMU_SYS_REG(PMEVTYPERn_EL0(n)),				\
1405 	  .reset = reset_pmevtyper,					\
1406 	  .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
1407 
1408 /* Macro to expand the AMU counter and type registers*/
1409 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1410 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1411 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1412 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1413 
ptrauth_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1414 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1415 			const struct sys_reg_desc *rd)
1416 {
1417 	return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1418 }
1419 
1420 /*
1421  * If we land here on a PtrAuth access, that is because we didn't
1422  * fixup the access on exit by allowing the PtrAuth sysregs. The only
1423  * way this happens is when the guest does not have PtrAuth support
1424  * enabled.
1425  */
1426 #define __PTRAUTH_KEY(k)						\
1427 	{ SYS_DESC(SYS_## k), undef_access, reset_unknown, k,		\
1428 	.visibility = ptrauth_visibility}
1429 
1430 #define PTRAUTH_KEY(k)							\
1431 	__PTRAUTH_KEY(k ## KEYLO_EL1),					\
1432 	__PTRAUTH_KEY(k ## KEYHI_EL1)
1433 
access_arch_timer(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1434 static bool access_arch_timer(struct kvm_vcpu *vcpu,
1435 			      struct sys_reg_params *p,
1436 			      const struct sys_reg_desc *r)
1437 {
1438 	enum kvm_arch_timers tmr;
1439 	enum kvm_arch_timer_regs treg;
1440 	u64 reg = reg_to_encoding(r);
1441 
1442 	switch (reg) {
1443 	case SYS_CNTP_TVAL_EL0:
1444 		if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1445 			tmr = TIMER_HPTIMER;
1446 		else
1447 			tmr = TIMER_PTIMER;
1448 		treg = TIMER_REG_TVAL;
1449 		break;
1450 
1451 	case SYS_CNTV_TVAL_EL0:
1452 		if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1453 			tmr = TIMER_HVTIMER;
1454 		else
1455 			tmr = TIMER_VTIMER;
1456 		treg = TIMER_REG_TVAL;
1457 		break;
1458 
1459 	case SYS_AARCH32_CNTP_TVAL:
1460 	case SYS_CNTP_TVAL_EL02:
1461 		tmr = TIMER_PTIMER;
1462 		treg = TIMER_REG_TVAL;
1463 		break;
1464 
1465 	case SYS_CNTV_TVAL_EL02:
1466 		tmr = TIMER_VTIMER;
1467 		treg = TIMER_REG_TVAL;
1468 		break;
1469 
1470 	case SYS_CNTHP_TVAL_EL2:
1471 		tmr = TIMER_HPTIMER;
1472 		treg = TIMER_REG_TVAL;
1473 		break;
1474 
1475 	case SYS_CNTHV_TVAL_EL2:
1476 		tmr = TIMER_HVTIMER;
1477 		treg = TIMER_REG_TVAL;
1478 		break;
1479 
1480 	case SYS_CNTP_CTL_EL0:
1481 		if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1482 			tmr = TIMER_HPTIMER;
1483 		else
1484 			tmr = TIMER_PTIMER;
1485 		treg = TIMER_REG_CTL;
1486 		break;
1487 
1488 	case SYS_CNTV_CTL_EL0:
1489 		if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1490 			tmr = TIMER_HVTIMER;
1491 		else
1492 			tmr = TIMER_VTIMER;
1493 		treg = TIMER_REG_CTL;
1494 		break;
1495 
1496 	case SYS_AARCH32_CNTP_CTL:
1497 	case SYS_CNTP_CTL_EL02:
1498 		tmr = TIMER_PTIMER;
1499 		treg = TIMER_REG_CTL;
1500 		break;
1501 
1502 	case SYS_CNTV_CTL_EL02:
1503 		tmr = TIMER_VTIMER;
1504 		treg = TIMER_REG_CTL;
1505 		break;
1506 
1507 	case SYS_CNTHP_CTL_EL2:
1508 		tmr = TIMER_HPTIMER;
1509 		treg = TIMER_REG_CTL;
1510 		break;
1511 
1512 	case SYS_CNTHV_CTL_EL2:
1513 		tmr = TIMER_HVTIMER;
1514 		treg = TIMER_REG_CTL;
1515 		break;
1516 
1517 	case SYS_CNTP_CVAL_EL0:
1518 		if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1519 			tmr = TIMER_HPTIMER;
1520 		else
1521 			tmr = TIMER_PTIMER;
1522 		treg = TIMER_REG_CVAL;
1523 		break;
1524 
1525 	case SYS_CNTV_CVAL_EL0:
1526 		if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1527 			tmr = TIMER_HVTIMER;
1528 		else
1529 			tmr = TIMER_VTIMER;
1530 		treg = TIMER_REG_CVAL;
1531 		break;
1532 
1533 	case SYS_AARCH32_CNTP_CVAL:
1534 	case SYS_CNTP_CVAL_EL02:
1535 		tmr = TIMER_PTIMER;
1536 		treg = TIMER_REG_CVAL;
1537 		break;
1538 
1539 	case SYS_CNTV_CVAL_EL02:
1540 		tmr = TIMER_VTIMER;
1541 		treg = TIMER_REG_CVAL;
1542 		break;
1543 
1544 	case SYS_CNTHP_CVAL_EL2:
1545 		tmr = TIMER_HPTIMER;
1546 		treg = TIMER_REG_CVAL;
1547 		break;
1548 
1549 	case SYS_CNTHV_CVAL_EL2:
1550 		tmr = TIMER_HVTIMER;
1551 		treg = TIMER_REG_CVAL;
1552 		break;
1553 
1554 	case SYS_CNTPCT_EL0:
1555 	case SYS_CNTPCTSS_EL0:
1556 		if (is_hyp_ctxt(vcpu))
1557 			tmr = TIMER_HPTIMER;
1558 		else
1559 			tmr = TIMER_PTIMER;
1560 		treg = TIMER_REG_CNT;
1561 		break;
1562 
1563 	case SYS_AARCH32_CNTPCT:
1564 	case SYS_AARCH32_CNTPCTSS:
1565 		tmr = TIMER_PTIMER;
1566 		treg = TIMER_REG_CNT;
1567 		break;
1568 
1569 	case SYS_CNTVCT_EL0:
1570 	case SYS_CNTVCTSS_EL0:
1571 		if (is_hyp_ctxt(vcpu))
1572 			tmr = TIMER_HVTIMER;
1573 		else
1574 			tmr = TIMER_VTIMER;
1575 		treg = TIMER_REG_CNT;
1576 		break;
1577 
1578 	case SYS_AARCH32_CNTVCT:
1579 	case SYS_AARCH32_CNTVCTSS:
1580 		tmr = TIMER_VTIMER;
1581 		treg = TIMER_REG_CNT;
1582 		break;
1583 
1584 	default:
1585 		print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
1586 		return undef_access(vcpu, p, r);
1587 	}
1588 
1589 	if (p->is_write)
1590 		kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1591 	else
1592 		p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1593 
1594 	return true;
1595 }
1596 
arch_timer_set_user(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)1597 static int arch_timer_set_user(struct kvm_vcpu *vcpu,
1598 			       const struct sys_reg_desc *rd,
1599 			       u64 val)
1600 {
1601 	switch (reg_to_encoding(rd)) {
1602 	case SYS_CNTV_CTL_EL0:
1603 	case SYS_CNTP_CTL_EL0:
1604 	case SYS_CNTHV_CTL_EL2:
1605 	case SYS_CNTHP_CTL_EL2:
1606 		val &= ~ARCH_TIMER_CTRL_IT_STAT;
1607 		break;
1608 	case SYS_CNTVCT_EL0:
1609 		if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags))
1610 			timer_set_offset(vcpu_vtimer(vcpu), kvm_phys_timer_read() - val);
1611 		return 0;
1612 	case SYS_CNTPCT_EL0:
1613 		if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags))
1614 			timer_set_offset(vcpu_ptimer(vcpu), kvm_phys_timer_read() - val);
1615 		return 0;
1616 	}
1617 
1618 	__vcpu_assign_sys_reg(vcpu, rd->reg, val);
1619 	return 0;
1620 }
1621 
arch_timer_get_user(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)1622 static int arch_timer_get_user(struct kvm_vcpu *vcpu,
1623 			       const struct sys_reg_desc *rd,
1624 			       u64 *val)
1625 {
1626 	switch (reg_to_encoding(rd)) {
1627 	case SYS_CNTVCT_EL0:
1628 		*val = kvm_phys_timer_read() - timer_get_offset(vcpu_vtimer(vcpu));
1629 		break;
1630 	case SYS_CNTPCT_EL0:
1631 		*val = kvm_phys_timer_read() - timer_get_offset(vcpu_ptimer(vcpu));
1632 		break;
1633 	default:
1634 		*val = __vcpu_sys_reg(vcpu, rd->reg);
1635 	}
1636 
1637 	return 0;
1638 }
1639 
kvm_arm64_ftr_safe_value(u32 id,const struct arm64_ftr_bits * ftrp,s64 new,s64 cur)1640 static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
1641 				    s64 new, s64 cur)
1642 {
1643 	struct arm64_ftr_bits kvm_ftr = *ftrp;
1644 
1645 	/* Some features have different safe value type in KVM than host features */
1646 	switch (id) {
1647 	case SYS_ID_AA64DFR0_EL1:
1648 		switch (kvm_ftr.shift) {
1649 		case ID_AA64DFR0_EL1_PMUVer_SHIFT:
1650 			kvm_ftr.type = FTR_LOWER_SAFE;
1651 			break;
1652 		case ID_AA64DFR0_EL1_DebugVer_SHIFT:
1653 			kvm_ftr.type = FTR_LOWER_SAFE;
1654 			break;
1655 		}
1656 		break;
1657 	case SYS_ID_DFR0_EL1:
1658 		if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT)
1659 			kvm_ftr.type = FTR_LOWER_SAFE;
1660 		break;
1661 	}
1662 
1663 	return arm64_ftr_safe_value(&kvm_ftr, new, cur);
1664 }
1665 
1666 /*
1667  * arm64_check_features() - Check if a feature register value constitutes
1668  * a subset of features indicated by the idreg's KVM sanitised limit.
1669  *
1670  * This function will check if each feature field of @val is the "safe" value
1671  * against idreg's KVM sanitised limit return from reset() callback.
1672  * If a field value in @val is the same as the one in limit, it is always
1673  * considered the safe value regardless For register fields that are not in
1674  * writable, only the value in limit is considered the safe value.
1675  *
1676  * Return: 0 if all the fields are safe. Otherwise, return negative errno.
1677  */
arm64_check_features(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)1678 static int arm64_check_features(struct kvm_vcpu *vcpu,
1679 				const struct sys_reg_desc *rd,
1680 				u64 val)
1681 {
1682 	const struct arm64_ftr_reg *ftr_reg;
1683 	const struct arm64_ftr_bits *ftrp = NULL;
1684 	u32 id = reg_to_encoding(rd);
1685 	u64 writable_mask = rd->val;
1686 	u64 limit = rd->reset(vcpu, rd);
1687 	u64 mask = 0;
1688 
1689 	/*
1690 	 * Hidden and unallocated ID registers may not have a corresponding
1691 	 * struct arm64_ftr_reg. Of course, if the register is RAZ we know the
1692 	 * only safe value is 0.
1693 	 */
1694 	if (sysreg_visible_as_raz(vcpu, rd))
1695 		return val ? -E2BIG : 0;
1696 
1697 	ftr_reg = get_arm64_ftr_reg(id);
1698 	if (!ftr_reg)
1699 		return -EINVAL;
1700 
1701 	ftrp = ftr_reg->ftr_bits;
1702 
1703 	for (; ftrp && ftrp->width; ftrp++) {
1704 		s64 f_val, f_lim, safe_val;
1705 		u64 ftr_mask;
1706 
1707 		ftr_mask = arm64_ftr_mask(ftrp);
1708 		if ((ftr_mask & writable_mask) != ftr_mask)
1709 			continue;
1710 
1711 		f_val = arm64_ftr_value(ftrp, val);
1712 		f_lim = arm64_ftr_value(ftrp, limit);
1713 		mask |= ftr_mask;
1714 
1715 		if (f_val == f_lim)
1716 			safe_val = f_val;
1717 		else
1718 			safe_val = kvm_arm64_ftr_safe_value(id, ftrp, f_val, f_lim);
1719 
1720 		if (safe_val != f_val)
1721 			return -E2BIG;
1722 	}
1723 
1724 	/* For fields that are not writable, values in limit are the safe values. */
1725 	if ((val & ~mask) != (limit & ~mask))
1726 		return -E2BIG;
1727 
1728 	return 0;
1729 }
1730 
pmuver_to_perfmon(u8 pmuver)1731 static u8 pmuver_to_perfmon(u8 pmuver)
1732 {
1733 	switch (pmuver) {
1734 	case ID_AA64DFR0_EL1_PMUVer_IMP:
1735 		return ID_DFR0_EL1_PerfMon_PMUv3;
1736 	case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
1737 		return ID_DFR0_EL1_PerfMon_IMPDEF;
1738 	default:
1739 		/* Anything ARMv8.1+ and NI have the same value. For now. */
1740 		return pmuver;
1741 	}
1742 }
1743 
1744 static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
1745 static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val);
1746 static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
1747 
1748 /* Read a sanitised cpufeature ID register by sys_reg_desc */
__kvm_read_sanitised_id_reg(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1749 static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
1750 				       const struct sys_reg_desc *r)
1751 {
1752 	u32 id = reg_to_encoding(r);
1753 	u64 val;
1754 
1755 	if (sysreg_visible_as_raz(vcpu, r))
1756 		return 0;
1757 
1758 	val = read_sanitised_ftr_reg(id);
1759 
1760 	switch (id) {
1761 	case SYS_ID_AA64DFR0_EL1:
1762 		val = sanitise_id_aa64dfr0_el1(vcpu, val);
1763 		break;
1764 	case SYS_ID_AA64PFR0_EL1:
1765 		val = sanitise_id_aa64pfr0_el1(vcpu, val);
1766 		break;
1767 	case SYS_ID_AA64PFR1_EL1:
1768 		val = sanitise_id_aa64pfr1_el1(vcpu, val);
1769 		break;
1770 	case SYS_ID_AA64PFR2_EL1:
1771 		val &= ID_AA64PFR2_EL1_FPMR |
1772 			(kvm_has_mte(vcpu->kvm) ?
1773 			 ID_AA64PFR2_EL1_MTEFAR | ID_AA64PFR2_EL1_MTESTOREONLY :
1774 			 0);
1775 		break;
1776 	case SYS_ID_AA64ISAR1_EL1:
1777 		if (!vcpu_has_ptrauth(vcpu))
1778 			val &= ~(ID_AA64ISAR1_EL1_APA |
1779 				 ID_AA64ISAR1_EL1_API |
1780 				 ID_AA64ISAR1_EL1_GPA |
1781 				 ID_AA64ISAR1_EL1_GPI);
1782 		break;
1783 	case SYS_ID_AA64ISAR2_EL1:
1784 		if (!vcpu_has_ptrauth(vcpu))
1785 			val &= ~(ID_AA64ISAR2_EL1_APA3 |
1786 				 ID_AA64ISAR2_EL1_GPA3);
1787 		if (!cpus_have_final_cap(ARM64_HAS_WFXT) ||
1788 		    has_broken_cntvoff())
1789 			val &= ~ID_AA64ISAR2_EL1_WFxT;
1790 		break;
1791 	case SYS_ID_AA64ISAR3_EL1:
1792 		val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_LSFE |
1793 			ID_AA64ISAR3_EL1_FAMINMAX;
1794 		break;
1795 	case SYS_ID_AA64MMFR2_EL1:
1796 		val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
1797 		val &= ~ID_AA64MMFR2_EL1_NV;
1798 		break;
1799 	case SYS_ID_AA64MMFR3_EL1:
1800 		val &= ID_AA64MMFR3_EL1_TCRX |
1801 		       ID_AA64MMFR3_EL1_SCTLRX |
1802 		       ID_AA64MMFR3_EL1_S1POE |
1803 		       ID_AA64MMFR3_EL1_S1PIE;
1804 		break;
1805 	case SYS_ID_MMFR4_EL1:
1806 		val &= ~ID_MMFR4_EL1_CCIDX;
1807 		break;
1808 	}
1809 
1810 	if (vcpu_has_nv(vcpu))
1811 		val = limit_nv_id_reg(vcpu->kvm, id, val);
1812 
1813 	return val;
1814 }
1815 
kvm_read_sanitised_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1816 static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu,
1817 				     const struct sys_reg_desc *r)
1818 {
1819 	return __kvm_read_sanitised_id_reg(vcpu, r);
1820 }
1821 
read_id_reg(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1822 static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1823 {
1824 	return kvm_read_vm_id_reg(vcpu->kvm, reg_to_encoding(r));
1825 }
1826 
is_feature_id_reg(u32 encoding)1827 static bool is_feature_id_reg(u32 encoding)
1828 {
1829 	return (sys_reg_Op0(encoding) == 3 &&
1830 		(sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) &&
1831 		sys_reg_CRn(encoding) == 0 &&
1832 		sys_reg_CRm(encoding) <= 7);
1833 }
1834 
1835 /*
1836  * Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
1837  * (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8, which is the range of ID
1838  * registers KVM maintains on a per-VM basis.
1839  *
1840  * Additionally, the implementation ID registers and CTR_EL0 are handled as
1841  * per-VM registers.
1842  */
is_vm_ftr_id_reg(u32 id)1843 static inline bool is_vm_ftr_id_reg(u32 id)
1844 {
1845 	switch (id) {
1846 	case SYS_CTR_EL0:
1847 	case SYS_MIDR_EL1:
1848 	case SYS_REVIDR_EL1:
1849 	case SYS_AIDR_EL1:
1850 		return true;
1851 	default:
1852 		return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1853 			sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1854 			sys_reg_CRm(id) < 8);
1855 
1856 	}
1857 }
1858 
is_vcpu_ftr_id_reg(u32 id)1859 static inline bool is_vcpu_ftr_id_reg(u32 id)
1860 {
1861 	return is_feature_id_reg(id) && !is_vm_ftr_id_reg(id);
1862 }
1863 
is_aa32_id_reg(u32 id)1864 static inline bool is_aa32_id_reg(u32 id)
1865 {
1866 	return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1867 		sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1868 		sys_reg_CRm(id) <= 3);
1869 }
1870 
id_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1871 static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1872 				  const struct sys_reg_desc *r)
1873 {
1874 	u32 id = reg_to_encoding(r);
1875 
1876 	switch (id) {
1877 	case SYS_ID_AA64ZFR0_EL1:
1878 		if (!vcpu_has_sve(vcpu))
1879 			return REG_RAZ;
1880 		break;
1881 	}
1882 
1883 	return 0;
1884 }
1885 
aa32_id_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1886 static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu,
1887 				       const struct sys_reg_desc *r)
1888 {
1889 	/*
1890 	 * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
1891 	 * EL. Promote to RAZ/WI in order to guarantee consistency between
1892 	 * systems.
1893 	 */
1894 	if (!kvm_supports_32bit_el0())
1895 		return REG_RAZ | REG_USER_WI;
1896 
1897 	return id_visibility(vcpu, r);
1898 }
1899 
raz_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)1900 static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
1901 				   const struct sys_reg_desc *r)
1902 {
1903 	return REG_RAZ;
1904 }
1905 
1906 /* cpufeature ID register access trap handlers */
1907 
access_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)1908 static bool access_id_reg(struct kvm_vcpu *vcpu,
1909 			  struct sys_reg_params *p,
1910 			  const struct sys_reg_desc *r)
1911 {
1912 	if (p->is_write)
1913 		return write_to_read_only(vcpu, p, r);
1914 
1915 	p->regval = read_id_reg(vcpu, r);
1916 
1917 	return true;
1918 }
1919 
1920 /* Visibility overrides for SVE-specific control registers */
sve_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1921 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1922 				   const struct sys_reg_desc *rd)
1923 {
1924 	if (vcpu_has_sve(vcpu))
1925 		return 0;
1926 
1927 	return REG_HIDDEN;
1928 }
1929 
sme_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1930 static unsigned int sme_visibility(const struct kvm_vcpu *vcpu,
1931 				   const struct sys_reg_desc *rd)
1932 {
1933 	if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SME, IMP))
1934 		return 0;
1935 
1936 	return REG_HIDDEN;
1937 }
1938 
fp8_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)1939 static unsigned int fp8_visibility(const struct kvm_vcpu *vcpu,
1940 				   const struct sys_reg_desc *rd)
1941 {
1942 	if (kvm_has_fpmr(vcpu->kvm))
1943 		return 0;
1944 
1945 	return REG_HIDDEN;
1946 }
1947 
sanitise_id_aa64pfr0_el1(const struct kvm_vcpu * vcpu,u64 val)1948 static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
1949 {
1950 	if (!vcpu_has_sve(vcpu))
1951 		val &= ~ID_AA64PFR0_EL1_SVE_MASK;
1952 
1953 	/*
1954 	 * The default is to expose CSV2 == 1 if the HW isn't affected.
1955 	 * Although this is a per-CPU feature, we make it global because
1956 	 * asymmetric systems are just a nuisance.
1957 	 *
1958 	 * Userspace can override this as long as it doesn't promise
1959 	 * the impossible.
1960 	 */
1961 	if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
1962 		val &= ~ID_AA64PFR0_EL1_CSV2_MASK;
1963 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP);
1964 	}
1965 	if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
1966 		val &= ~ID_AA64PFR0_EL1_CSV3_MASK;
1967 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
1968 	}
1969 
1970 	if (vgic_is_v3(vcpu->kvm)) {
1971 		val &= ~ID_AA64PFR0_EL1_GIC_MASK;
1972 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
1973 	}
1974 
1975 	val &= ~ID_AA64PFR0_EL1_AMU_MASK;
1976 
1977 	/*
1978 	 * MPAM is disabled by default as KVM also needs a set of PARTID to
1979 	 * program the MPAMVPMx_EL2 PARTID remapping registers with. But some
1980 	 * older kernels let the guest see the ID bit.
1981 	 */
1982 	val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
1983 
1984 	return val;
1985 }
1986 
sanitise_id_aa64pfr1_el1(const struct kvm_vcpu * vcpu,u64 val)1987 static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val)
1988 {
1989 	u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1990 
1991 	if (!kvm_has_mte(vcpu->kvm)) {
1992 		val &= ~ID_AA64PFR1_EL1_MTE;
1993 		val &= ~ID_AA64PFR1_EL1_MTE_frac;
1994 	}
1995 
1996 	if (!(cpus_have_final_cap(ARM64_HAS_RASV1P1_EXTN) &&
1997 	      SYS_FIELD_GET(ID_AA64PFR0_EL1, RAS, pfr0) == ID_AA64PFR0_EL1_RAS_IMP))
1998 		val &= ~ID_AA64PFR1_EL1_RAS_frac;
1999 
2000 	val &= ~ID_AA64PFR1_EL1_SME;
2001 	val &= ~ID_AA64PFR1_EL1_RNDR_trap;
2002 	val &= ~ID_AA64PFR1_EL1_NMI;
2003 	val &= ~ID_AA64PFR1_EL1_GCS;
2004 	val &= ~ID_AA64PFR1_EL1_THE;
2005 	val &= ~ID_AA64PFR1_EL1_MTEX;
2006 	val &= ~ID_AA64PFR1_EL1_PFAR;
2007 	val &= ~ID_AA64PFR1_EL1_MPAM_frac;
2008 
2009 	return val;
2010 }
2011 
sanitise_id_aa64dfr0_el1(const struct kvm_vcpu * vcpu,u64 val)2012 static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
2013 {
2014 	val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
2015 
2016 	/*
2017 	 * Only initialize the PMU version if the vCPU was configured with one.
2018 	 */
2019 	val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
2020 	if (kvm_vcpu_has_pmu(vcpu))
2021 		val |= SYS_FIELD_PREP(ID_AA64DFR0_EL1, PMUVer,
2022 				      kvm_arm_pmu_get_pmuver_limit());
2023 
2024 	/* Hide SPE from guests */
2025 	val &= ~ID_AA64DFR0_EL1_PMSVer_MASK;
2026 
2027 	/* Hide BRBE from guests */
2028 	val &= ~ID_AA64DFR0_EL1_BRBE_MASK;
2029 
2030 	return val;
2031 }
2032 
2033 /*
2034  * Older versions of KVM erroneously claim support for FEAT_DoubleLock with
2035  * NV-enabled VMs on unsupporting hardware. Silently ignore the incorrect
2036  * value if it is consistent with the bug.
2037  */
ignore_feat_doublelock(struct kvm_vcpu * vcpu,u64 val)2038 static bool ignore_feat_doublelock(struct kvm_vcpu *vcpu, u64 val)
2039 {
2040 	u8 host, user;
2041 
2042 	if (!vcpu_has_nv(vcpu))
2043 		return false;
2044 
2045 	host = SYS_FIELD_GET(ID_AA64DFR0_EL1, DoubleLock,
2046 			     read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1));
2047 	user = SYS_FIELD_GET(ID_AA64DFR0_EL1, DoubleLock, val);
2048 
2049 	return host == ID_AA64DFR0_EL1_DoubleLock_NI &&
2050 	       user == ID_AA64DFR0_EL1_DoubleLock_IMP;
2051 }
2052 
set_id_aa64dfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)2053 static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
2054 			       const struct sys_reg_desc *rd,
2055 			       u64 val)
2056 {
2057 	u8 debugver = SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, val);
2058 	u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);
2059 
2060 	/*
2061 	 * Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
2062 	 * ID_AA64DFR0_EL1.PMUver limit to VM creation"), KVM erroneously
2063 	 * exposed an IMP_DEF PMU to userspace and the guest on systems w/
2064 	 * non-architectural PMUs. Of course, PMUv3 is the only game in town for
2065 	 * PMU virtualization, so the IMP_DEF value was rather user-hostile.
2066 	 *
2067 	 * At minimum, we're on the hook to allow values that were given to
2068 	 * userspace by KVM. Cover our tracks here and replace the IMP_DEF value
2069 	 * with a more sensible NI. The value of an ID register changing under
2070 	 * the nose of the guest is unfortunate, but is certainly no more
2071 	 * surprising than an ill-guided PMU driver poking at impdef system
2072 	 * registers that end in an UNDEF...
2073 	 */
2074 	if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
2075 		val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
2076 
2077 	/*
2078 	 * ID_AA64DFR0_EL1.DebugVer is one of those awkward fields with a
2079 	 * nonzero minimum safe value.
2080 	 */
2081 	if (debugver < ID_AA64DFR0_EL1_DebugVer_IMP)
2082 		return -EINVAL;
2083 
2084 	if (ignore_feat_doublelock(vcpu, val)) {
2085 		val &= ~ID_AA64DFR0_EL1_DoubleLock;
2086 		val |= SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, DoubleLock, NI);
2087 	}
2088 
2089 	return set_id_reg(vcpu, rd, val);
2090 }
2091 
read_sanitised_id_dfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2092 static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
2093 				      const struct sys_reg_desc *rd)
2094 {
2095 	u8 perfmon;
2096 	u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
2097 
2098 	val &= ~ID_DFR0_EL1_PerfMon_MASK;
2099 	if (kvm_vcpu_has_pmu(vcpu)) {
2100 		perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
2101 		val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
2102 	}
2103 
2104 	val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8);
2105 
2106 	return val;
2107 }
2108 
set_id_dfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)2109 static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
2110 			   const struct sys_reg_desc *rd,
2111 			   u64 val)
2112 {
2113 	u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val);
2114 	u8 copdbg = SYS_FIELD_GET(ID_DFR0_EL1, CopDbg, val);
2115 
2116 	if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) {
2117 		val &= ~ID_DFR0_EL1_PerfMon_MASK;
2118 		perfmon = 0;
2119 	}
2120 
2121 	/*
2122 	 * Allow DFR0_EL1.PerfMon to be set from userspace as long as
2123 	 * it doesn't promise more than what the HW gives us on the
2124 	 * AArch64 side (as everything is emulated with that), and
2125 	 * that this is a PMUv3.
2126 	 */
2127 	if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3)
2128 		return -EINVAL;
2129 
2130 	if (copdbg < ID_DFR0_EL1_CopDbg_Armv8)
2131 		return -EINVAL;
2132 
2133 	return set_id_reg(vcpu, rd, val);
2134 }
2135 
set_id_aa64pfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 user_val)2136 static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
2137 			       const struct sys_reg_desc *rd, u64 user_val)
2138 {
2139 	u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
2140 	u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK;
2141 
2142 	/*
2143 	 * Commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits
2144 	 * in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to
2145 	 * guests, but didn't add trap handling. KVM doesn't support MPAM and
2146 	 * always returns an UNDEF for these registers. The guest must see 0
2147 	 * for this field.
2148 	 *
2149 	 * But KVM must also accept values from user-space that were provided
2150 	 * by KVM. On CPUs that support MPAM, permit user-space to write
2151 	 * the sanitizied value to ID_AA64PFR0_EL1.MPAM, but ignore this field.
2152 	 */
2153 	if ((hw_val & mpam_mask) == (user_val & mpam_mask))
2154 		user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
2155 
2156 	/* Fail the guest's request to disable the AA64 ISA at EL{0,1,2} */
2157 	if (!FIELD_GET(ID_AA64PFR0_EL1_EL0, user_val) ||
2158 	    !FIELD_GET(ID_AA64PFR0_EL1_EL1, user_val) ||
2159 	    (vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val)))
2160 		return -EINVAL;
2161 
2162 	/*
2163 	 * If we are running on a GICv5 host and support FEAT_GCIE_LEGACY, then
2164 	 * we support GICv3. Fail attempts to do anything but set that to IMP.
2165 	 */
2166 	if (vgic_is_v3_compat(vcpu->kvm) &&
2167 	    FIELD_GET(ID_AA64PFR0_EL1_GIC_MASK, user_val) != ID_AA64PFR0_EL1_GIC_IMP)
2168 		return -EINVAL;
2169 
2170 	return set_id_reg(vcpu, rd, user_val);
2171 }
2172 
set_id_aa64pfr1_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 user_val)2173 static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
2174 			       const struct sys_reg_desc *rd, u64 user_val)
2175 {
2176 	u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
2177 	u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK;
2178 	u8 mte = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE, hw_val);
2179 	u8 user_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, user_val);
2180 	u8 hw_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, hw_val);
2181 
2182 	/* See set_id_aa64pfr0_el1 for comment about MPAM */
2183 	if ((hw_val & mpam_mask) == (user_val & mpam_mask))
2184 		user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
2185 
2186 	/*
2187 	 * Previously MTE_frac was hidden from guest. However, if the
2188 	 * hardware supports MTE2 but not MTE_ASYM_FAULT then a value
2189 	 * of 0 for this field indicates that the hardware supports
2190 	 * MTE_ASYNC. Whereas, 0xf indicates MTE_ASYNC is not supported.
2191 	 *
2192 	 * As KVM must accept values from KVM provided by user-space,
2193 	 * when ID_AA64PFR1_EL1.MTE is 2 allow user-space to set
2194 	 * ID_AA64PFR1_EL1.MTE_frac to 0. However, ignore it to avoid
2195 	 * incorrectly claiming hardware support for MTE_ASYNC in the
2196 	 * guest.
2197 	 */
2198 
2199 	if (mte == ID_AA64PFR1_EL1_MTE_MTE2 &&
2200 	    hw_mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI &&
2201 	    user_mte_frac == ID_AA64PFR1_EL1_MTE_frac_ASYNC) {
2202 		user_val &= ~ID_AA64PFR1_EL1_MTE_frac_MASK;
2203 		user_val |= hw_val & ID_AA64PFR1_EL1_MTE_frac_MASK;
2204 	}
2205 
2206 	return set_id_reg(vcpu, rd, user_val);
2207 }
2208 
2209 /*
2210  * Allow userspace to de-feature a stage-2 translation granule but prevent it
2211  * from claiming the impossible.
2212  */
2213 #define tgran2_val_allowed(tg, safe, user)			\
2214 ({								\
2215 	u8 __s = SYS_FIELD_GET(ID_AA64MMFR0_EL1, tg, safe);	\
2216 	u8 __u = SYS_FIELD_GET(ID_AA64MMFR0_EL1, tg, user);	\
2217 								\
2218 	__s == __u || __u == ID_AA64MMFR0_EL1_##tg##_NI;	\
2219 })
2220 
set_id_aa64mmfr0_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 user_val)2221 static int set_id_aa64mmfr0_el1(struct kvm_vcpu *vcpu,
2222 				const struct sys_reg_desc *rd, u64 user_val)
2223 {
2224 	u64 sanitized_val = kvm_read_sanitised_id_reg(vcpu, rd);
2225 
2226 	if (!vcpu_has_nv(vcpu))
2227 		return set_id_reg(vcpu, rd, user_val);
2228 
2229 	if (!tgran2_val_allowed(TGRAN4_2, sanitized_val, user_val) ||
2230 	    !tgran2_val_allowed(TGRAN16_2, sanitized_val, user_val) ||
2231 	    !tgran2_val_allowed(TGRAN64_2, sanitized_val, user_val))
2232 		return -EINVAL;
2233 
2234 	return set_id_reg(vcpu, rd, user_val);
2235 }
2236 
set_id_aa64mmfr2_el1(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 user_val)2237 static int set_id_aa64mmfr2_el1(struct kvm_vcpu *vcpu,
2238 				const struct sys_reg_desc *rd, u64 user_val)
2239 {
2240 	u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
2241 	u64 nv_mask = ID_AA64MMFR2_EL1_NV_MASK;
2242 
2243 	/*
2244 	 * We made the mistake to expose the now deprecated NV field,
2245 	 * so allow userspace to write it, but silently ignore it.
2246 	 */
2247 	if ((hw_val & nv_mask) == (user_val & nv_mask))
2248 		user_val &= ~nv_mask;
2249 
2250 	return set_id_reg(vcpu, rd, user_val);
2251 }
2252 
set_ctr_el0(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 user_val)2253 static int set_ctr_el0(struct kvm_vcpu *vcpu,
2254 		       const struct sys_reg_desc *rd, u64 user_val)
2255 {
2256 	u8 user_L1Ip = SYS_FIELD_GET(CTR_EL0, L1Ip, user_val);
2257 
2258 	/*
2259 	 * Both AIVIVT (0b01) and VPIPT (0b00) are documented as reserved.
2260 	 * Hence only allow to set VIPT(0b10) or PIPT(0b11) for L1Ip based
2261 	 * on what hardware reports.
2262 	 *
2263 	 * Using a VIPT software model on PIPT will lead to over invalidation,
2264 	 * but still correct. Hence, we can allow downgrading PIPT to VIPT,
2265 	 * but not the other way around. This is handled via arm64_ftr_safe_value()
2266 	 * as CTR_EL0 ftr_bits has L1Ip field with type FTR_EXACT and safe value
2267 	 * set as VIPT.
2268 	 */
2269 	switch (user_L1Ip) {
2270 	case CTR_EL0_L1Ip_RESERVED_VPIPT:
2271 	case CTR_EL0_L1Ip_RESERVED_AIVIVT:
2272 		return -EINVAL;
2273 	case CTR_EL0_L1Ip_VIPT:
2274 	case CTR_EL0_L1Ip_PIPT:
2275 		return set_id_reg(vcpu, rd, user_val);
2276 	default:
2277 		return -ENOENT;
2278 	}
2279 }
2280 
2281 /*
2282  * cpufeature ID register user accessors
2283  *
2284  * For now, these registers are immutable for userspace, so no values
2285  * are stored, and for set_id_reg() we don't allow the effective value
2286  * to be changed.
2287  */
get_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)2288 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2289 		      u64 *val)
2290 {
2291 	/*
2292 	 * Avoid locking if the VM has already started, as the ID registers are
2293 	 * guaranteed to be invariant at that point.
2294 	 */
2295 	if (kvm_vm_has_ran_once(vcpu->kvm)) {
2296 		*val = read_id_reg(vcpu, rd);
2297 		return 0;
2298 	}
2299 
2300 	mutex_lock(&vcpu->kvm->arch.config_lock);
2301 	*val = read_id_reg(vcpu, rd);
2302 	mutex_unlock(&vcpu->kvm->arch.config_lock);
2303 
2304 	return 0;
2305 }
2306 
set_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)2307 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2308 		      u64 val)
2309 {
2310 	u32 id = reg_to_encoding(rd);
2311 	int ret;
2312 
2313 	mutex_lock(&vcpu->kvm->arch.config_lock);
2314 
2315 	/*
2316 	 * Once the VM has started the ID registers are immutable. Reject any
2317 	 * write that does not match the final register value.
2318 	 */
2319 	if (kvm_vm_has_ran_once(vcpu->kvm)) {
2320 		if (val != read_id_reg(vcpu, rd))
2321 			ret = -EBUSY;
2322 		else
2323 			ret = 0;
2324 
2325 		mutex_unlock(&vcpu->kvm->arch.config_lock);
2326 		return ret;
2327 	}
2328 
2329 	ret = arm64_check_features(vcpu, rd, val);
2330 	if (!ret)
2331 		kvm_set_vm_id_reg(vcpu->kvm, id, val);
2332 
2333 	mutex_unlock(&vcpu->kvm->arch.config_lock);
2334 
2335 	/*
2336 	 * arm64_check_features() returns -E2BIG to indicate the register's
2337 	 * feature set is a superset of the maximally-allowed register value.
2338 	 * While it would be nice to precisely describe this to userspace, the
2339 	 * existing UAPI for KVM_SET_ONE_REG has it that invalid register
2340 	 * writes return -EINVAL.
2341 	 */
2342 	if (ret == -E2BIG)
2343 		ret = -EINVAL;
2344 	return ret;
2345 }
2346 
kvm_set_vm_id_reg(struct kvm * kvm,u32 reg,u64 val)2347 void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val)
2348 {
2349 	u64 *p = __vm_id_reg(&kvm->arch, reg);
2350 
2351 	lockdep_assert_held(&kvm->arch.config_lock);
2352 
2353 	if (KVM_BUG_ON(kvm_vm_has_ran_once(kvm) || !p, kvm))
2354 		return;
2355 
2356 	*p = val;
2357 }
2358 
get_raz_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 * val)2359 static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2360 		       u64 *val)
2361 {
2362 	*val = 0;
2363 	return 0;
2364 }
2365 
set_wi_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)2366 static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2367 		      u64 val)
2368 {
2369 	return 0;
2370 }
2371 
access_ctr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2372 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2373 		       const struct sys_reg_desc *r)
2374 {
2375 	if (p->is_write)
2376 		return write_to_read_only(vcpu, p, r);
2377 
2378 	p->regval = kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0);
2379 	return true;
2380 }
2381 
access_clidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2382 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2383 			 const struct sys_reg_desc *r)
2384 {
2385 	if (p->is_write)
2386 		return write_to_read_only(vcpu, p, r);
2387 
2388 	p->regval = __vcpu_sys_reg(vcpu, r->reg);
2389 	return true;
2390 }
2391 
2392 /*
2393  * Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
2394  * by the physical CPU which the vcpu currently resides in.
2395  */
reset_clidr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)2396 static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2397 {
2398 	u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
2399 	u64 clidr;
2400 	u8 loc;
2401 
2402 	if ((ctr_el0 & CTR_EL0_IDC)) {
2403 		/*
2404 		 * Data cache clean to the PoU is not required so LoUU and LoUIS
2405 		 * will not be set and a unified cache, which will be marked as
2406 		 * LoC, will be added.
2407 		 *
2408 		 * If not DIC, let the unified cache L2 so that an instruction
2409 		 * cache can be added as L1 later.
2410 		 */
2411 		loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2;
2412 		clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc);
2413 	} else {
2414 		/*
2415 		 * Data cache clean to the PoU is required so let L1 have a data
2416 		 * cache and mark it as LoUU and LoUIS. As L1 has a data cache,
2417 		 * it can be marked as LoC too.
2418 		 */
2419 		loc = 1;
2420 		clidr = 1 << CLIDR_LOUU_SHIFT;
2421 		clidr |= 1 << CLIDR_LOUIS_SHIFT;
2422 		clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1);
2423 	}
2424 
2425 	/*
2426 	 * Instruction cache invalidation to the PoU is required so let L1 have
2427 	 * an instruction cache. If L1 already has a data cache, it will be
2428 	 * CACHE_TYPE_SEPARATE.
2429 	 */
2430 	if (!(ctr_el0 & CTR_EL0_DIC))
2431 		clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1);
2432 
2433 	clidr |= loc << CLIDR_LOC_SHIFT;
2434 
2435 	/*
2436 	 * Add tag cache unified to data cache. Allocation tags and data are
2437 	 * unified in a cache line so that it looks valid even if there is only
2438 	 * one cache line.
2439 	 */
2440 	if (kvm_has_mte(vcpu->kvm))
2441 		clidr |= 2ULL << CLIDR_TTYPE_SHIFT(loc);
2442 
2443 	__vcpu_assign_sys_reg(vcpu, r->reg, clidr);
2444 
2445 	return __vcpu_sys_reg(vcpu, r->reg);
2446 }
2447 
set_clidr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 val)2448 static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2449 		      u64 val)
2450 {
2451 	u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
2452 	u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val));
2453 
2454 	if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
2455 		return -EINVAL;
2456 
2457 	__vcpu_assign_sys_reg(vcpu, rd->reg, val);
2458 
2459 	return 0;
2460 }
2461 
access_csselr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2462 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2463 			  const struct sys_reg_desc *r)
2464 {
2465 	int reg = r->reg;
2466 
2467 	if (p->is_write)
2468 		vcpu_write_sys_reg(vcpu, p->regval, reg);
2469 	else
2470 		p->regval = vcpu_read_sys_reg(vcpu, reg);
2471 	return true;
2472 }
2473 
access_ccsidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2474 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2475 			  const struct sys_reg_desc *r)
2476 {
2477 	u32 csselr;
2478 
2479 	if (p->is_write)
2480 		return write_to_read_only(vcpu, p, r);
2481 
2482 	csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
2483 	csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD;
2484 	if (csselr < CSSELR_MAX)
2485 		p->regval = get_ccsidr(vcpu, csselr);
2486 
2487 	return true;
2488 }
2489 
mte_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2490 static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
2491 				   const struct sys_reg_desc *rd)
2492 {
2493 	if (kvm_has_mte(vcpu->kvm))
2494 		return 0;
2495 
2496 	return REG_HIDDEN;
2497 }
2498 
2499 #define MTE_REG(name) {				\
2500 	SYS_DESC(SYS_##name),			\
2501 	.access = undef_access,			\
2502 	.reset = reset_unknown,			\
2503 	.reg = name,				\
2504 	.visibility = mte_visibility,		\
2505 }
2506 
el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2507 static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
2508 				   const struct sys_reg_desc *rd)
2509 {
2510 	if (vcpu_has_nv(vcpu))
2511 		return 0;
2512 
2513 	return REG_HIDDEN;
2514 }
2515 
bad_vncr_trap(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2516 static bool bad_vncr_trap(struct kvm_vcpu *vcpu,
2517 			  struct sys_reg_params *p,
2518 			  const struct sys_reg_desc *r)
2519 {
2520 	/*
2521 	 * We really shouldn't be here, and this is likely the result
2522 	 * of a misconfigured trap, as this register should target the
2523 	 * VNCR page, and nothing else.
2524 	 */
2525 	return bad_trap(vcpu, p, r,
2526 			"trap of VNCR-backed register");
2527 }
2528 
bad_redir_trap(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2529 static bool bad_redir_trap(struct kvm_vcpu *vcpu,
2530 			   struct sys_reg_params *p,
2531 			   const struct sys_reg_desc *r)
2532 {
2533 	/*
2534 	 * We really shouldn't be here, and this is likely the result
2535 	 * of a misconfigured trap, as this register should target the
2536 	 * corresponding EL1, and nothing else.
2537 	 */
2538 	return bad_trap(vcpu, p, r,
2539 			"trap of EL2 register redirected to EL1");
2540 }
2541 
2542 #define SYS_REG_USER_FILTER(name, acc, rst, v, gu, su, filter) { \
2543 	SYS_DESC(SYS_##name),			\
2544 	.access = acc,				\
2545 	.reset = rst,				\
2546 	.reg = name,				\
2547 	.get_user = gu,				\
2548 	.set_user = su,				\
2549 	.visibility = filter,			\
2550 	.val = v,				\
2551 }
2552 
2553 #define EL2_REG_FILTERED(name, acc, rst, v, filter)	\
2554 	SYS_REG_USER_FILTER(name, acc, rst, v, NULL, NULL, filter)
2555 
2556 #define EL2_REG(name, acc, rst, v)			\
2557 	EL2_REG_FILTERED(name, acc, rst, v, el2_visibility)
2558 
2559 #define EL2_REG_VNCR(name, rst, v)	EL2_REG(name, bad_vncr_trap, rst, v)
2560 #define EL2_REG_VNCR_FILT(name, vis)			\
2561 	EL2_REG_FILTERED(name, bad_vncr_trap, reset_val, 0, vis)
2562 #define EL2_REG_VNCR_GICv3(name)			\
2563 	EL2_REG_VNCR_FILT(name, hidden_visibility)
2564 #define EL2_REG_REDIR(name, rst, v)	EL2_REG(name, bad_redir_trap, rst, v)
2565 
2566 #define TIMER_REG(name, vis)					   \
2567 	SYS_REG_USER_FILTER(name, access_arch_timer, reset_val, 0, \
2568 			    arch_timer_get_user, arch_timer_set_user, vis)
2569 
2570 /*
2571  * Since reset() callback and field val are not used for idregs, they will be
2572  * used for specific purposes for idregs.
2573  * The reset() would return KVM sanitised register value. The value would be the
2574  * same as the host kernel sanitised value if there is no KVM sanitisation.
2575  * The val would be used as a mask indicating writable fields for the idreg.
2576  * Only bits with 1 are writable from userspace. This mask might not be
2577  * necessary in the future whenever all ID registers are enabled as writable
2578  * from userspace.
2579  */
2580 
2581 #define ID_DESC_DEFAULT_CALLBACKS		\
2582 	.access	= access_id_reg,		\
2583 	.get_user = get_id_reg,			\
2584 	.set_user = set_id_reg,			\
2585 	.visibility = id_visibility,		\
2586 	.reset = kvm_read_sanitised_id_reg
2587 
2588 #define ID_DESC(name)				\
2589 	SYS_DESC(SYS_##name),			\
2590 	ID_DESC_DEFAULT_CALLBACKS
2591 
2592 /* sys_reg_desc initialiser for known cpufeature ID registers */
2593 #define ID_SANITISED(name) {			\
2594 	ID_DESC(name),				\
2595 	.val = 0,				\
2596 }
2597 
2598 /* sys_reg_desc initialiser for writable ID registers */
2599 #define ID_WRITABLE(name, mask) {		\
2600 	ID_DESC(name),				\
2601 	.val = mask,				\
2602 }
2603 
2604 /*
2605  * 32bit ID regs are fully writable when the guest is 32bit
2606  * capable. Nothing in the KVM code should rely on 32bit features
2607  * anyway, only 64bit, so let the VMM do its worse.
2608  */
2609 #define AA32_ID_WRITABLE(name) {		\
2610 	ID_DESC(name),				\
2611 	.visibility = aa32_id_visibility,	\
2612 	.val = GENMASK(31, 0),			\
2613 }
2614 
2615 /* sys_reg_desc initialiser for cpufeature ID registers that need filtering */
2616 #define ID_FILTERED(sysreg, name, mask) {	\
2617 	ID_DESC(sysreg),				\
2618 	.set_user = set_##name,				\
2619 	.val = (mask),					\
2620 }
2621 
2622 /*
2623  * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
2624  * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
2625  * (1 <= crm < 8, 0 <= Op2 < 8).
2626  */
2627 #define ID_UNALLOCATED(crm, op2) {			\
2628 	.name = "S3_0_0_" #crm "_" #op2,		\
2629 	Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2),	\
2630 	ID_DESC_DEFAULT_CALLBACKS,			\
2631 	.visibility = raz_visibility,			\
2632 	.val = 0,					\
2633 }
2634 
2635 /*
2636  * sys_reg_desc initialiser for known ID registers that we hide from guests.
2637  * For now, these are exposed just like unallocated ID regs: they appear
2638  * RAZ for the guest.
2639  */
2640 #define ID_HIDDEN(name) {			\
2641 	ID_DESC(name),				\
2642 	.visibility = raz_visibility,		\
2643 	.val = 0,				\
2644 }
2645 
access_sp_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2646 static bool access_sp_el1(struct kvm_vcpu *vcpu,
2647 			  struct sys_reg_params *p,
2648 			  const struct sys_reg_desc *r)
2649 {
2650 	if (p->is_write)
2651 		__vcpu_assign_sys_reg(vcpu, SP_EL1, p->regval);
2652 	else
2653 		p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
2654 
2655 	return true;
2656 }
2657 
access_elr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2658 static bool access_elr(struct kvm_vcpu *vcpu,
2659 		       struct sys_reg_params *p,
2660 		       const struct sys_reg_desc *r)
2661 {
2662 	if (p->is_write)
2663 		vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1);
2664 	else
2665 		p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1);
2666 
2667 	return true;
2668 }
2669 
access_spsr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2670 static bool access_spsr(struct kvm_vcpu *vcpu,
2671 			struct sys_reg_params *p,
2672 			const struct sys_reg_desc *r)
2673 {
2674 	if (p->is_write)
2675 		__vcpu_assign_sys_reg(vcpu, SPSR_EL1, p->regval);
2676 	else
2677 		p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
2678 
2679 	return true;
2680 }
2681 
access_cntkctl_el12(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2682 static bool access_cntkctl_el12(struct kvm_vcpu *vcpu,
2683 				struct sys_reg_params *p,
2684 				const struct sys_reg_desc *r)
2685 {
2686 	if (p->is_write)
2687 		__vcpu_assign_sys_reg(vcpu, CNTKCTL_EL1, p->regval);
2688 	else
2689 		p->regval = __vcpu_sys_reg(vcpu, CNTKCTL_EL1);
2690 
2691 	return true;
2692 }
2693 
reset_hcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)2694 static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2695 {
2696 	u64 val = r->val;
2697 
2698 	if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
2699 		val |= HCR_E2H;
2700 
2701 	__vcpu_assign_sys_reg(vcpu, r->reg, val);
2702 
2703 	return __vcpu_sys_reg(vcpu, r->reg);
2704 }
2705 
__el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,unsigned int (* fn)(const struct kvm_vcpu *,const struct sys_reg_desc *))2706 static unsigned int __el2_visibility(const struct kvm_vcpu *vcpu,
2707 				     const struct sys_reg_desc *rd,
2708 				     unsigned int (*fn)(const struct kvm_vcpu *,
2709 							const struct sys_reg_desc *))
2710 {
2711 	return el2_visibility(vcpu, rd) ?: fn(vcpu, rd);
2712 }
2713 
sve_el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2714 static unsigned int sve_el2_visibility(const struct kvm_vcpu *vcpu,
2715 				       const struct sys_reg_desc *rd)
2716 {
2717 	return __el2_visibility(vcpu, rd, sve_visibility);
2718 }
2719 
vncr_el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2720 static unsigned int vncr_el2_visibility(const struct kvm_vcpu *vcpu,
2721 					const struct sys_reg_desc *rd)
2722 {
2723 	if (el2_visibility(vcpu, rd) == 0 &&
2724 	    kvm_has_feat(vcpu->kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
2725 		return 0;
2726 
2727 	return REG_HIDDEN;
2728 }
2729 
sctlr2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2730 static unsigned int sctlr2_visibility(const struct kvm_vcpu *vcpu,
2731 				      const struct sys_reg_desc *rd)
2732 {
2733 	if (kvm_has_sctlr2(vcpu->kvm))
2734 		return 0;
2735 
2736 	return REG_HIDDEN;
2737 }
2738 
sctlr2_el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2739 static unsigned int sctlr2_el2_visibility(const struct kvm_vcpu *vcpu,
2740 					  const struct sys_reg_desc *rd)
2741 {
2742 	return __el2_visibility(vcpu, rd, sctlr2_visibility);
2743 }
2744 
access_zcr_el2(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2745 static bool access_zcr_el2(struct kvm_vcpu *vcpu,
2746 			   struct sys_reg_params *p,
2747 			   const struct sys_reg_desc *r)
2748 {
2749 	unsigned int vq;
2750 
2751 	if (guest_hyp_sve_traps_enabled(vcpu)) {
2752 		kvm_inject_nested_sve_trap(vcpu);
2753 		return false;
2754 	}
2755 
2756 	if (!p->is_write) {
2757 		p->regval = __vcpu_sys_reg(vcpu, ZCR_EL2);
2758 		return true;
2759 	}
2760 
2761 	vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1;
2762 	vq = min(vq, vcpu_sve_max_vq(vcpu));
2763 	__vcpu_assign_sys_reg(vcpu, ZCR_EL2, vq - 1);
2764 	return true;
2765 }
2766 
access_gic_vtr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2767 static bool access_gic_vtr(struct kvm_vcpu *vcpu,
2768 			   struct sys_reg_params *p,
2769 			   const struct sys_reg_desc *r)
2770 {
2771 	if (p->is_write)
2772 		return write_to_read_only(vcpu, p, r);
2773 
2774 	p->regval = kvm_get_guest_vtr_el2();
2775 
2776 	return true;
2777 }
2778 
access_gic_misr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2779 static bool access_gic_misr(struct kvm_vcpu *vcpu,
2780 			    struct sys_reg_params *p,
2781 			    const struct sys_reg_desc *r)
2782 {
2783 	if (p->is_write)
2784 		return write_to_read_only(vcpu, p, r);
2785 
2786 	p->regval = vgic_v3_get_misr(vcpu);
2787 
2788 	return true;
2789 }
2790 
access_gic_eisr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2791 static bool access_gic_eisr(struct kvm_vcpu *vcpu,
2792 			    struct sys_reg_params *p,
2793 			    const struct sys_reg_desc *r)
2794 {
2795 	if (p->is_write)
2796 		return write_to_read_only(vcpu, p, r);
2797 
2798 	p->regval = vgic_v3_get_eisr(vcpu);
2799 
2800 	return true;
2801 }
2802 
access_gic_elrsr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2803 static bool access_gic_elrsr(struct kvm_vcpu *vcpu,
2804 			     struct sys_reg_params *p,
2805 			     const struct sys_reg_desc *r)
2806 {
2807 	if (p->is_write)
2808 		return write_to_read_only(vcpu, p, r);
2809 
2810 	p->regval = vgic_v3_get_elrsr(vcpu);
2811 
2812 	return true;
2813 }
2814 
s1poe_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2815 static unsigned int s1poe_visibility(const struct kvm_vcpu *vcpu,
2816 				     const struct sys_reg_desc *rd)
2817 {
2818 	if (kvm_has_s1poe(vcpu->kvm))
2819 		return 0;
2820 
2821 	return REG_HIDDEN;
2822 }
2823 
s1poe_el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2824 static unsigned int s1poe_el2_visibility(const struct kvm_vcpu *vcpu,
2825 					 const struct sys_reg_desc *rd)
2826 {
2827 	return __el2_visibility(vcpu, rd, s1poe_visibility);
2828 }
2829 
tcr2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2830 static unsigned int tcr2_visibility(const struct kvm_vcpu *vcpu,
2831 				    const struct sys_reg_desc *rd)
2832 {
2833 	if (kvm_has_tcr2(vcpu->kvm))
2834 		return 0;
2835 
2836 	return REG_HIDDEN;
2837 }
2838 
tcr2_el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2839 static unsigned int tcr2_el2_visibility(const struct kvm_vcpu *vcpu,
2840 				    const struct sys_reg_desc *rd)
2841 {
2842 	return __el2_visibility(vcpu, rd, tcr2_visibility);
2843 }
2844 
fgt2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2845 static unsigned int fgt2_visibility(const struct kvm_vcpu *vcpu,
2846 				    const struct sys_reg_desc *rd)
2847 {
2848 	if (el2_visibility(vcpu, rd) == 0 &&
2849 	    kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, FGT, FGT2))
2850 		return 0;
2851 
2852 	return REG_HIDDEN;
2853 }
2854 
fgt_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2855 static unsigned int fgt_visibility(const struct kvm_vcpu *vcpu,
2856 				   const struct sys_reg_desc *rd)
2857 {
2858 	if (el2_visibility(vcpu, rd) == 0 &&
2859 	    kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, FGT, IMP))
2860 		return 0;
2861 
2862 	return REG_HIDDEN;
2863 }
2864 
s1pie_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2865 static unsigned int s1pie_visibility(const struct kvm_vcpu *vcpu,
2866 				     const struct sys_reg_desc *rd)
2867 {
2868 	if (kvm_has_s1pie(vcpu->kvm))
2869 		return 0;
2870 
2871 	return REG_HIDDEN;
2872 }
2873 
s1pie_el2_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2874 static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu,
2875 					 const struct sys_reg_desc *rd)
2876 {
2877 	return __el2_visibility(vcpu, rd, s1pie_visibility);
2878 }
2879 
cnthv_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd)2880 static unsigned int cnthv_visibility(const struct kvm_vcpu *vcpu,
2881 				     const struct sys_reg_desc *rd)
2882 {
2883 	if (vcpu_has_nv(vcpu) &&
2884 	    !vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2_E2H0))
2885 		return 0;
2886 
2887 	return REG_HIDDEN;
2888 }
2889 
access_mdcr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2890 static bool access_mdcr(struct kvm_vcpu *vcpu,
2891 			struct sys_reg_params *p,
2892 			const struct sys_reg_desc *r)
2893 {
2894 	u64 hpmn, val, old = __vcpu_sys_reg(vcpu, MDCR_EL2);
2895 
2896 	if (!p->is_write) {
2897 		p->regval = old;
2898 		return true;
2899 	}
2900 
2901 	val = p->regval;
2902 	hpmn = FIELD_GET(MDCR_EL2_HPMN, val);
2903 
2904 	/*
2905 	 * If HPMN is out of bounds, limit it to what we actually
2906 	 * support. This matches the UNKNOWN definition of the field
2907 	 * in that case, and keeps the emulation simple. Sort of.
2908 	 */
2909 	if (hpmn > vcpu->kvm->arch.nr_pmu_counters) {
2910 		hpmn = vcpu->kvm->arch.nr_pmu_counters;
2911 		u64p_replace_bits(&val, hpmn, MDCR_EL2_HPMN);
2912 	}
2913 
2914 	__vcpu_assign_sys_reg(vcpu, MDCR_EL2, val);
2915 
2916 	/*
2917 	 * Request a reload of the PMU to enable/disable the counters
2918 	 * affected by HPME.
2919 	 */
2920 	if ((old ^ val) & MDCR_EL2_HPME)
2921 		kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
2922 
2923 	return true;
2924 }
2925 
access_ras(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2926 static bool access_ras(struct kvm_vcpu *vcpu,
2927 		       struct sys_reg_params *p,
2928 		       const struct sys_reg_desc *r)
2929 {
2930 	struct kvm *kvm = vcpu->kvm;
2931 
2932 	switch(reg_to_encoding(r)) {
2933 	case SYS_ERXPFGCDN_EL1:
2934 	case SYS_ERXPFGCTL_EL1:
2935 	case SYS_ERXPFGF_EL1:
2936 	case SYS_ERXMISC2_EL1:
2937 	case SYS_ERXMISC3_EL1:
2938 		if (!(kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1) ||
2939 		      (kvm_has_feat_enum(kvm, ID_AA64PFR0_EL1, RAS, IMP) &&
2940 		       kvm_has_feat(kvm, ID_AA64PFR1_EL1, RAS_frac, RASv1p1)))) {
2941 			kvm_inject_undefined(vcpu);
2942 			return false;
2943 		}
2944 		break;
2945 	default:
2946 		if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) {
2947 			kvm_inject_undefined(vcpu);
2948 			return false;
2949 		}
2950 	}
2951 
2952 	return trap_raz_wi(vcpu, p, r);
2953 }
2954 
2955 /*
2956  * For historical (ahem ABI) reasons, KVM treated MIDR_EL1, REVIDR_EL1, and
2957  * AIDR_EL1 as "invariant" registers, meaning userspace cannot change them.
2958  * The values made visible to userspace were the register values of the boot
2959  * CPU.
2960  *
2961  * At the same time, reads from these registers at EL1 previously were not
2962  * trapped, allowing the guest to read the actual hardware value. On big-little
2963  * machines, this means the VM can see different values depending on where a
2964  * given vCPU got scheduled.
2965  *
2966  * These registers are now trapped as collateral damage from SME, and what
2967  * follows attempts to give a user / guest view consistent with the existing
2968  * ABI.
2969  */
access_imp_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)2970 static bool access_imp_id_reg(struct kvm_vcpu *vcpu,
2971 			      struct sys_reg_params *p,
2972 			      const struct sys_reg_desc *r)
2973 {
2974 	if (p->is_write)
2975 		return write_to_read_only(vcpu, p, r);
2976 
2977 	/*
2978 	 * Return the VM-scoped implementation ID register values if userspace
2979 	 * has made them writable.
2980 	 */
2981 	if (test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &vcpu->kvm->arch.flags))
2982 		return access_id_reg(vcpu, p, r);
2983 
2984 	/*
2985 	 * Otherwise, fall back to the old behavior of returning the value of
2986 	 * the current CPU.
2987 	 */
2988 	switch (reg_to_encoding(r)) {
2989 	case SYS_REVIDR_EL1:
2990 		p->regval = read_sysreg(revidr_el1);
2991 		break;
2992 	case SYS_AIDR_EL1:
2993 		p->regval = read_sysreg(aidr_el1);
2994 		break;
2995 	default:
2996 		WARN_ON_ONCE(1);
2997 	}
2998 
2999 	return true;
3000 }
3001 
3002 static u64 __ro_after_init boot_cpu_midr_val;
3003 static u64 __ro_after_init boot_cpu_revidr_val;
3004 static u64 __ro_after_init boot_cpu_aidr_val;
3005 
init_imp_id_regs(void)3006 static void init_imp_id_regs(void)
3007 {
3008 	boot_cpu_midr_val = read_sysreg(midr_el1);
3009 	boot_cpu_revidr_val = read_sysreg(revidr_el1);
3010 	boot_cpu_aidr_val = read_sysreg(aidr_el1);
3011 }
3012 
reset_imp_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)3013 static u64 reset_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
3014 {
3015 	switch (reg_to_encoding(r)) {
3016 	case SYS_MIDR_EL1:
3017 		return boot_cpu_midr_val;
3018 	case SYS_REVIDR_EL1:
3019 		return boot_cpu_revidr_val;
3020 	case SYS_AIDR_EL1:
3021 		return boot_cpu_aidr_val;
3022 	default:
3023 		KVM_BUG_ON(1, vcpu->kvm);
3024 		return 0;
3025 	}
3026 }
3027 
set_imp_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r,u64 val)3028 static int set_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
3029 			  u64 val)
3030 {
3031 	struct kvm *kvm = vcpu->kvm;
3032 	u64 expected;
3033 
3034 	guard(mutex)(&kvm->arch.config_lock);
3035 
3036 	expected = read_id_reg(vcpu, r);
3037 	if (expected == val)
3038 		return 0;
3039 
3040 	if (!test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags))
3041 		return -EINVAL;
3042 
3043 	/*
3044 	 * Once the VM has started the ID registers are immutable. Reject the
3045 	 * write if userspace tries to change it.
3046 	 */
3047 	if (kvm_vm_has_ran_once(kvm))
3048 		return -EBUSY;
3049 
3050 	/*
3051 	 * Any value is allowed for the implementation ID registers so long as
3052 	 * it is within the writable mask.
3053 	 */
3054 	if ((val & r->val) != val)
3055 		return -EINVAL;
3056 
3057 	kvm_set_vm_id_reg(kvm, reg_to_encoding(r), val);
3058 	return 0;
3059 }
3060 
3061 #define IMPLEMENTATION_ID(reg, mask) {			\
3062 	SYS_DESC(SYS_##reg),				\
3063 	.access = access_imp_id_reg,			\
3064 	.get_user = get_id_reg,				\
3065 	.set_user = set_imp_id_reg,			\
3066 	.reset = reset_imp_id_reg,			\
3067 	.val = mask,					\
3068 	}
3069 
reset_mdcr(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)3070 static u64 reset_mdcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
3071 {
3072 	__vcpu_assign_sys_reg(vcpu, r->reg, vcpu->kvm->arch.nr_pmu_counters);
3073 	return vcpu->kvm->arch.nr_pmu_counters;
3074 }
3075 
3076 /*
3077  * Architected system registers.
3078  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
3079  *
3080  * Debug handling: We do trap most, if not all debug related system
3081  * registers. The implementation is good enough to ensure that a guest
3082  * can use these with minimal performance degradation. The drawback is
3083  * that we don't implement any of the external debug architecture.
3084  * This should be revisited if we ever encounter a more demanding
3085  * guest...
3086  */
3087 static const struct sys_reg_desc sys_reg_descs[] = {
3088 	DBG_BCR_BVR_WCR_WVR_EL1(0),
3089 	DBG_BCR_BVR_WCR_WVR_EL1(1),
3090 	{ SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
3091 	{ SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
3092 	DBG_BCR_BVR_WCR_WVR_EL1(2),
3093 	DBG_BCR_BVR_WCR_WVR_EL1(3),
3094 	DBG_BCR_BVR_WCR_WVR_EL1(4),
3095 	DBG_BCR_BVR_WCR_WVR_EL1(5),
3096 	DBG_BCR_BVR_WCR_WVR_EL1(6),
3097 	DBG_BCR_BVR_WCR_WVR_EL1(7),
3098 	DBG_BCR_BVR_WCR_WVR_EL1(8),
3099 	DBG_BCR_BVR_WCR_WVR_EL1(9),
3100 	DBG_BCR_BVR_WCR_WVR_EL1(10),
3101 	DBG_BCR_BVR_WCR_WVR_EL1(11),
3102 	DBG_BCR_BVR_WCR_WVR_EL1(12),
3103 	DBG_BCR_BVR_WCR_WVR_EL1(13),
3104 	DBG_BCR_BVR_WCR_WVR_EL1(14),
3105 	DBG_BCR_BVR_WCR_WVR_EL1(15),
3106 
3107 	{ SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
3108 	{ SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
3109 	{ SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
3110 		OSLSR_EL1_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
3111 	{ SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
3112 	{ SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
3113 	{ SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
3114 	{ SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
3115 	{ SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
3116 
3117 	{ SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
3118 	{ SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
3119 	// DBGDTR[TR]X_EL0 share the same encoding
3120 	{ SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
3121 
3122 	{ SYS_DESC(SYS_DBGVCR32_EL2), undef_access, reset_val, DBGVCR32_EL2, 0 },
3123 
3124 	IMPLEMENTATION_ID(MIDR_EL1, GENMASK_ULL(31, 0)),
3125 	{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
3126 	IMPLEMENTATION_ID(REVIDR_EL1, GENMASK_ULL(63, 0)),
3127 
3128 	/*
3129 	 * ID regs: all ID_SANITISED() entries here must have corresponding
3130 	 * entries in arm64_ftr_regs[].
3131 	 */
3132 
3133 	/* AArch64 mappings of the AArch32 ID registers */
3134 	/* CRm=1 */
3135 	AA32_ID_WRITABLE(ID_PFR0_EL1),
3136 	AA32_ID_WRITABLE(ID_PFR1_EL1),
3137 	{ SYS_DESC(SYS_ID_DFR0_EL1),
3138 	  .access = access_id_reg,
3139 	  .get_user = get_id_reg,
3140 	  .set_user = set_id_dfr0_el1,
3141 	  .visibility = aa32_id_visibility,
3142 	  .reset = read_sanitised_id_dfr0_el1,
3143 	  .val = GENMASK(31, 0) },
3144 	ID_HIDDEN(ID_AFR0_EL1),
3145 	AA32_ID_WRITABLE(ID_MMFR0_EL1),
3146 	AA32_ID_WRITABLE(ID_MMFR1_EL1),
3147 	AA32_ID_WRITABLE(ID_MMFR2_EL1),
3148 	AA32_ID_WRITABLE(ID_MMFR3_EL1),
3149 
3150 	/* CRm=2 */
3151 	AA32_ID_WRITABLE(ID_ISAR0_EL1),
3152 	AA32_ID_WRITABLE(ID_ISAR1_EL1),
3153 	AA32_ID_WRITABLE(ID_ISAR2_EL1),
3154 	AA32_ID_WRITABLE(ID_ISAR3_EL1),
3155 	AA32_ID_WRITABLE(ID_ISAR4_EL1),
3156 	AA32_ID_WRITABLE(ID_ISAR5_EL1),
3157 	AA32_ID_WRITABLE(ID_MMFR4_EL1),
3158 	AA32_ID_WRITABLE(ID_ISAR6_EL1),
3159 
3160 	/* CRm=3 */
3161 	AA32_ID_WRITABLE(MVFR0_EL1),
3162 	AA32_ID_WRITABLE(MVFR1_EL1),
3163 	AA32_ID_WRITABLE(MVFR2_EL1),
3164 	ID_UNALLOCATED(3,3),
3165 	AA32_ID_WRITABLE(ID_PFR2_EL1),
3166 	ID_HIDDEN(ID_DFR1_EL1),
3167 	AA32_ID_WRITABLE(ID_MMFR5_EL1),
3168 	ID_UNALLOCATED(3,7),
3169 
3170 	/* AArch64 ID registers */
3171 	/* CRm=4 */
3172 	ID_FILTERED(ID_AA64PFR0_EL1, id_aa64pfr0_el1,
3173 		    ~(ID_AA64PFR0_EL1_AMU |
3174 		      ID_AA64PFR0_EL1_MPAM |
3175 		      ID_AA64PFR0_EL1_SVE |
3176 		      ID_AA64PFR0_EL1_AdvSIMD |
3177 		      ID_AA64PFR0_EL1_FP)),
3178 	ID_FILTERED(ID_AA64PFR1_EL1, id_aa64pfr1_el1,
3179 				     ~(ID_AA64PFR1_EL1_PFAR |
3180 				       ID_AA64PFR1_EL1_MTEX |
3181 				       ID_AA64PFR1_EL1_THE |
3182 				       ID_AA64PFR1_EL1_GCS |
3183 				       ID_AA64PFR1_EL1_MTE_frac |
3184 				       ID_AA64PFR1_EL1_NMI |
3185 				       ID_AA64PFR1_EL1_RNDR_trap |
3186 				       ID_AA64PFR1_EL1_SME |
3187 				       ID_AA64PFR1_EL1_RES0 |
3188 				       ID_AA64PFR1_EL1_MPAM_frac |
3189 				       ID_AA64PFR1_EL1_MTE)),
3190 	ID_WRITABLE(ID_AA64PFR2_EL1,
3191 		    ID_AA64PFR2_EL1_FPMR |
3192 		    ID_AA64PFR2_EL1_MTEFAR |
3193 		    ID_AA64PFR2_EL1_MTESTOREONLY),
3194 	ID_UNALLOCATED(4,3),
3195 	ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
3196 	ID_HIDDEN(ID_AA64SMFR0_EL1),
3197 	ID_UNALLOCATED(4,6),
3198 	ID_WRITABLE(ID_AA64FPFR0_EL1, ~ID_AA64FPFR0_EL1_RES0),
3199 
3200 	/* CRm=5 */
3201 	/*
3202 	 * Prior to FEAT_Debugv8.9, the architecture defines context-aware
3203 	 * breakpoints (CTX_CMPs) as the highest numbered breakpoints (BRPs).
3204 	 * KVM does not trap + emulate the breakpoint registers, and as such
3205 	 * cannot support a layout that misaligns with the underlying hardware.
3206 	 * While it may be possible to describe a subset that aligns with
3207 	 * hardware, just prevent changes to BRPs and CTX_CMPs altogether for
3208 	 * simplicity.
3209 	 *
3210 	 * See DDI0487K.a, section D2.8.3 Breakpoint types and linking
3211 	 * of breakpoints for more details.
3212 	 */
3213 	ID_FILTERED(ID_AA64DFR0_EL1, id_aa64dfr0_el1,
3214 		    ID_AA64DFR0_EL1_DoubleLock_MASK |
3215 		    ID_AA64DFR0_EL1_WRPs_MASK |
3216 		    ID_AA64DFR0_EL1_PMUVer_MASK |
3217 		    ID_AA64DFR0_EL1_DebugVer_MASK),
3218 	ID_SANITISED(ID_AA64DFR1_EL1),
3219 	ID_UNALLOCATED(5,2),
3220 	ID_UNALLOCATED(5,3),
3221 	ID_HIDDEN(ID_AA64AFR0_EL1),
3222 	ID_HIDDEN(ID_AA64AFR1_EL1),
3223 	ID_UNALLOCATED(5,6),
3224 	ID_UNALLOCATED(5,7),
3225 
3226 	/* CRm=6 */
3227 	ID_WRITABLE(ID_AA64ISAR0_EL1, ~ID_AA64ISAR0_EL1_RES0),
3228 	ID_WRITABLE(ID_AA64ISAR1_EL1, ~(ID_AA64ISAR1_EL1_GPI |
3229 					ID_AA64ISAR1_EL1_GPA |
3230 					ID_AA64ISAR1_EL1_API |
3231 					ID_AA64ISAR1_EL1_APA)),
3232 	ID_WRITABLE(ID_AA64ISAR2_EL1, ~(ID_AA64ISAR2_EL1_RES0 |
3233 					ID_AA64ISAR2_EL1_APA3 |
3234 					ID_AA64ISAR2_EL1_GPA3)),
3235 	ID_WRITABLE(ID_AA64ISAR3_EL1, (ID_AA64ISAR3_EL1_FPRCVT |
3236 				       ID_AA64ISAR3_EL1_LSFE |
3237 				       ID_AA64ISAR3_EL1_FAMINMAX)),
3238 	ID_UNALLOCATED(6,4),
3239 	ID_UNALLOCATED(6,5),
3240 	ID_UNALLOCATED(6,6),
3241 	ID_UNALLOCATED(6,7),
3242 
3243 	/* CRm=7 */
3244 	ID_FILTERED(ID_AA64MMFR0_EL1, id_aa64mmfr0_el1,
3245 				      ~(ID_AA64MMFR0_EL1_RES0 |
3246 					ID_AA64MMFR0_EL1_ASIDBITS)),
3247 	ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
3248 					ID_AA64MMFR1_EL1_XNX |
3249 					ID_AA64MMFR1_EL1_VH |
3250 					ID_AA64MMFR1_EL1_VMIDBits)),
3251 	ID_FILTERED(ID_AA64MMFR2_EL1,
3252 		    id_aa64mmfr2_el1, ~(ID_AA64MMFR2_EL1_RES0 |
3253 					ID_AA64MMFR2_EL1_EVT |
3254 					ID_AA64MMFR2_EL1_FWB |
3255 					ID_AA64MMFR2_EL1_IDS |
3256 					ID_AA64MMFR2_EL1_NV |
3257 					ID_AA64MMFR2_EL1_CCIDX)),
3258 	ID_WRITABLE(ID_AA64MMFR3_EL1, (ID_AA64MMFR3_EL1_TCRX	|
3259 				       ID_AA64MMFR3_EL1_SCTLRX	|
3260 				       ID_AA64MMFR3_EL1_S1PIE   |
3261 				       ID_AA64MMFR3_EL1_S1POE)),
3262 	ID_WRITABLE(ID_AA64MMFR4_EL1, ID_AA64MMFR4_EL1_NV_frac),
3263 	ID_UNALLOCATED(7,5),
3264 	ID_UNALLOCATED(7,6),
3265 	ID_UNALLOCATED(7,7),
3266 
3267 	{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
3268 	{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
3269 	{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
3270 	{ SYS_DESC(SYS_SCTLR2_EL1), access_vm_reg, reset_val, SCTLR2_EL1, 0,
3271 	  .visibility = sctlr2_visibility },
3272 
3273 	MTE_REG(RGSR_EL1),
3274 	MTE_REG(GCR_EL1),
3275 
3276 	{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
3277 	{ SYS_DESC(SYS_TRFCR_EL1), undef_access },
3278 	{ SYS_DESC(SYS_SMPRI_EL1), undef_access },
3279 	{ SYS_DESC(SYS_SMCR_EL1), undef_access },
3280 	{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
3281 	{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
3282 	{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
3283 	{ SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0,
3284 	  .visibility = tcr2_visibility },
3285 
3286 	PTRAUTH_KEY(APIA),
3287 	PTRAUTH_KEY(APIB),
3288 	PTRAUTH_KEY(APDA),
3289 	PTRAUTH_KEY(APDB),
3290 	PTRAUTH_KEY(APGA),
3291 
3292 	{ SYS_DESC(SYS_SPSR_EL1), access_spsr},
3293 	{ SYS_DESC(SYS_ELR_EL1), access_elr},
3294 
3295 	{ SYS_DESC(SYS_ICC_PMR_EL1), undef_access },
3296 
3297 	{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
3298 	{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
3299 	{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
3300 
3301 	{ SYS_DESC(SYS_ERRIDR_EL1), access_ras },
3302 	{ SYS_DESC(SYS_ERRSELR_EL1), access_ras },
3303 	{ SYS_DESC(SYS_ERXFR_EL1), access_ras },
3304 	{ SYS_DESC(SYS_ERXCTLR_EL1), access_ras },
3305 	{ SYS_DESC(SYS_ERXSTATUS_EL1), access_ras },
3306 	{ SYS_DESC(SYS_ERXADDR_EL1), access_ras },
3307 	{ SYS_DESC(SYS_ERXPFGF_EL1), access_ras },
3308 	{ SYS_DESC(SYS_ERXPFGCTL_EL1), access_ras },
3309 	{ SYS_DESC(SYS_ERXPFGCDN_EL1), access_ras },
3310 	{ SYS_DESC(SYS_ERXMISC0_EL1), access_ras },
3311 	{ SYS_DESC(SYS_ERXMISC1_EL1), access_ras },
3312 	{ SYS_DESC(SYS_ERXMISC2_EL1), access_ras },
3313 	{ SYS_DESC(SYS_ERXMISC3_EL1), access_ras },
3314 
3315 	MTE_REG(TFSR_EL1),
3316 	MTE_REG(TFSRE0_EL1),
3317 
3318 	{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
3319 	{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
3320 
3321 	{ SYS_DESC(SYS_PMSCR_EL1), undef_access },
3322 	{ SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
3323 	{ SYS_DESC(SYS_PMSICR_EL1), undef_access },
3324 	{ SYS_DESC(SYS_PMSIRR_EL1), undef_access },
3325 	{ SYS_DESC(SYS_PMSFCR_EL1), undef_access },
3326 	{ SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
3327 	{ SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
3328 	{ SYS_DESC(SYS_PMSIDR_EL1), undef_access },
3329 	{ SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
3330 	{ SYS_DESC(SYS_PMBPTR_EL1), undef_access },
3331 	{ SYS_DESC(SYS_PMBSR_EL1), undef_access },
3332 	{ SYS_DESC(SYS_PMSDSFR_EL1), undef_access },
3333 	/* PMBIDR_EL1 is not trapped */
3334 
3335 	{ PMU_SYS_REG(PMINTENSET_EL1),
3336 	  .access = access_pminten, .reg = PMINTENSET_EL1,
3337 	  .get_user = get_pmreg, .set_user = set_pmreg },
3338 	{ PMU_SYS_REG(PMINTENCLR_EL1),
3339 	  .access = access_pminten, .reg = PMINTENSET_EL1,
3340 	  .get_user = get_pmreg, .set_user = set_pmreg },
3341 	{ SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
3342 
3343 	{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
3344 	{ SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1,
3345 	  .visibility = s1pie_visibility },
3346 	{ SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1,
3347 	  .visibility = s1pie_visibility },
3348 	{ SYS_DESC(SYS_POR_EL1), NULL, reset_unknown, POR_EL1,
3349 	  .visibility = s1poe_visibility },
3350 	{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
3351 
3352 	{ SYS_DESC(SYS_LORSA_EL1), trap_loregion },
3353 	{ SYS_DESC(SYS_LOREA_EL1), trap_loregion },
3354 	{ SYS_DESC(SYS_LORN_EL1), trap_loregion },
3355 	{ SYS_DESC(SYS_LORC_EL1), trap_loregion },
3356 	{ SYS_DESC(SYS_MPAMIDR_EL1), undef_access },
3357 	{ SYS_DESC(SYS_LORID_EL1), trap_loregion },
3358 
3359 	{ SYS_DESC(SYS_MPAM1_EL1), undef_access },
3360 	{ SYS_DESC(SYS_MPAM0_EL1), undef_access },
3361 	{ SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
3362 	{ SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
3363 
3364 	{ SYS_DESC(SYS_ICC_IAR0_EL1), undef_access },
3365 	{ SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access },
3366 	{ SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access },
3367 	{ SYS_DESC(SYS_ICC_BPR0_EL1), undef_access },
3368 	{ SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access },
3369 	{ SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access },
3370 	{ SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access },
3371 	{ SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access },
3372 	{ SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access },
3373 	{ SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access },
3374 	{ SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access },
3375 	{ SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
3376 	{ SYS_DESC(SYS_ICC_DIR_EL1), undef_access },
3377 	{ SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
3378 	{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
3379 	{ SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
3380 	{ SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
3381 	{ SYS_DESC(SYS_ICC_IAR1_EL1), undef_access },
3382 	{ SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access },
3383 	{ SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access },
3384 	{ SYS_DESC(SYS_ICC_BPR1_EL1), undef_access },
3385 	{ SYS_DESC(SYS_ICC_CTLR_EL1), undef_access },
3386 	{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
3387 	{ SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access },
3388 	{ SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access },
3389 
3390 	{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
3391 	{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
3392 
3393 	{ SYS_DESC(SYS_ACCDATA_EL1), undef_access },
3394 
3395 	{ SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
3396 
3397 	{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
3398 
3399 	{ SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
3400 	{ SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1,
3401 	  .set_user = set_clidr, .val = ~CLIDR_EL1_RES0 },
3402 	{ SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
3403 	{ SYS_DESC(SYS_SMIDR_EL1), undef_access },
3404 	IMPLEMENTATION_ID(AIDR_EL1, GENMASK_ULL(63, 0)),
3405 	{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
3406 	ID_FILTERED(CTR_EL0, ctr_el0,
3407 		    CTR_EL0_DIC_MASK |
3408 		    CTR_EL0_IDC_MASK |
3409 		    CTR_EL0_DminLine_MASK |
3410 		    CTR_EL0_L1Ip_MASK |
3411 		    CTR_EL0_IminLine_MASK),
3412 	{ SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = sme_visibility  },
3413 	{ SYS_DESC(SYS_FPMR), undef_access, reset_val, FPMR, 0, .visibility = fp8_visibility },
3414 
3415 	{ PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr,
3416 	  .reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr },
3417 	{ PMU_SYS_REG(PMCNTENSET_EL0),
3418 	  .access = access_pmcnten, .reg = PMCNTENSET_EL0,
3419 	  .get_user = get_pmreg, .set_user = set_pmreg },
3420 	{ PMU_SYS_REG(PMCNTENCLR_EL0),
3421 	  .access = access_pmcnten, .reg = PMCNTENSET_EL0,
3422 	  .get_user = get_pmreg, .set_user = set_pmreg },
3423 	{ PMU_SYS_REG(PMOVSCLR_EL0),
3424 	  .access = access_pmovs, .reg = PMOVSSET_EL0,
3425 	  .get_user = get_pmreg, .set_user = set_pmreg },
3426 	/*
3427 	 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
3428 	 * previously (and pointlessly) advertised in the past...
3429 	 */
3430 	{ PMU_SYS_REG(PMSWINC_EL0),
3431 	  .get_user = get_raz_reg, .set_user = set_wi_reg,
3432 	  .access = access_pmswinc, .reset = NULL },
3433 	{ PMU_SYS_REG(PMSELR_EL0),
3434 	  .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
3435 	{ PMU_SYS_REG(PMCEID0_EL0),
3436 	  .access = access_pmceid, .reset = NULL },
3437 	{ PMU_SYS_REG(PMCEID1_EL0),
3438 	  .access = access_pmceid, .reset = NULL },
3439 	{ PMU_SYS_REG(PMCCNTR_EL0),
3440 	  .access = access_pmu_evcntr, .reset = reset_unknown,
3441 	  .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr,
3442 	  .set_user = set_pmu_evcntr },
3443 	{ PMU_SYS_REG(PMXEVTYPER_EL0),
3444 	  .access = access_pmu_evtyper, .reset = NULL },
3445 	{ PMU_SYS_REG(PMXEVCNTR_EL0),
3446 	  .access = access_pmu_evcntr, .reset = NULL },
3447 	/*
3448 	 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
3449 	 * in 32bit mode. Here we choose to reset it as zero for consistency.
3450 	 */
3451 	{ PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr,
3452 	  .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
3453 	{ PMU_SYS_REG(PMOVSSET_EL0),
3454 	  .access = access_pmovs, .reg = PMOVSSET_EL0,
3455 	  .get_user = get_pmreg, .set_user = set_pmreg },
3456 
3457 	{ SYS_DESC(SYS_POR_EL0), NULL, reset_unknown, POR_EL0,
3458 	  .visibility = s1poe_visibility },
3459 	{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
3460 	{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
3461 	{ SYS_DESC(SYS_TPIDR2_EL0), undef_access },
3462 
3463 	{ SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
3464 
3465 	{ SYS_DESC(SYS_AMCR_EL0), undef_access },
3466 	{ SYS_DESC(SYS_AMCFGR_EL0), undef_access },
3467 	{ SYS_DESC(SYS_AMCGCR_EL0), undef_access },
3468 	{ SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
3469 	{ SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
3470 	{ SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
3471 	{ SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
3472 	{ SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
3473 	AMU_AMEVCNTR0_EL0(0),
3474 	AMU_AMEVCNTR0_EL0(1),
3475 	AMU_AMEVCNTR0_EL0(2),
3476 	AMU_AMEVCNTR0_EL0(3),
3477 	AMU_AMEVCNTR0_EL0(4),
3478 	AMU_AMEVCNTR0_EL0(5),
3479 	AMU_AMEVCNTR0_EL0(6),
3480 	AMU_AMEVCNTR0_EL0(7),
3481 	AMU_AMEVCNTR0_EL0(8),
3482 	AMU_AMEVCNTR0_EL0(9),
3483 	AMU_AMEVCNTR0_EL0(10),
3484 	AMU_AMEVCNTR0_EL0(11),
3485 	AMU_AMEVCNTR0_EL0(12),
3486 	AMU_AMEVCNTR0_EL0(13),
3487 	AMU_AMEVCNTR0_EL0(14),
3488 	AMU_AMEVCNTR0_EL0(15),
3489 	AMU_AMEVTYPER0_EL0(0),
3490 	AMU_AMEVTYPER0_EL0(1),
3491 	AMU_AMEVTYPER0_EL0(2),
3492 	AMU_AMEVTYPER0_EL0(3),
3493 	AMU_AMEVTYPER0_EL0(4),
3494 	AMU_AMEVTYPER0_EL0(5),
3495 	AMU_AMEVTYPER0_EL0(6),
3496 	AMU_AMEVTYPER0_EL0(7),
3497 	AMU_AMEVTYPER0_EL0(8),
3498 	AMU_AMEVTYPER0_EL0(9),
3499 	AMU_AMEVTYPER0_EL0(10),
3500 	AMU_AMEVTYPER0_EL0(11),
3501 	AMU_AMEVTYPER0_EL0(12),
3502 	AMU_AMEVTYPER0_EL0(13),
3503 	AMU_AMEVTYPER0_EL0(14),
3504 	AMU_AMEVTYPER0_EL0(15),
3505 	AMU_AMEVCNTR1_EL0(0),
3506 	AMU_AMEVCNTR1_EL0(1),
3507 	AMU_AMEVCNTR1_EL0(2),
3508 	AMU_AMEVCNTR1_EL0(3),
3509 	AMU_AMEVCNTR1_EL0(4),
3510 	AMU_AMEVCNTR1_EL0(5),
3511 	AMU_AMEVCNTR1_EL0(6),
3512 	AMU_AMEVCNTR1_EL0(7),
3513 	AMU_AMEVCNTR1_EL0(8),
3514 	AMU_AMEVCNTR1_EL0(9),
3515 	AMU_AMEVCNTR1_EL0(10),
3516 	AMU_AMEVCNTR1_EL0(11),
3517 	AMU_AMEVCNTR1_EL0(12),
3518 	AMU_AMEVCNTR1_EL0(13),
3519 	AMU_AMEVCNTR1_EL0(14),
3520 	AMU_AMEVCNTR1_EL0(15),
3521 	AMU_AMEVTYPER1_EL0(0),
3522 	AMU_AMEVTYPER1_EL0(1),
3523 	AMU_AMEVTYPER1_EL0(2),
3524 	AMU_AMEVTYPER1_EL0(3),
3525 	AMU_AMEVTYPER1_EL0(4),
3526 	AMU_AMEVTYPER1_EL0(5),
3527 	AMU_AMEVTYPER1_EL0(6),
3528 	AMU_AMEVTYPER1_EL0(7),
3529 	AMU_AMEVTYPER1_EL0(8),
3530 	AMU_AMEVTYPER1_EL0(9),
3531 	AMU_AMEVTYPER1_EL0(10),
3532 	AMU_AMEVTYPER1_EL0(11),
3533 	AMU_AMEVTYPER1_EL0(12),
3534 	AMU_AMEVTYPER1_EL0(13),
3535 	AMU_AMEVTYPER1_EL0(14),
3536 	AMU_AMEVTYPER1_EL0(15),
3537 
3538 	{ SYS_DESC(SYS_CNTPCT_EL0), .access = access_arch_timer,
3539 	  .get_user = arch_timer_get_user, .set_user = arch_timer_set_user },
3540 	{ SYS_DESC(SYS_CNTVCT_EL0), .access = access_arch_timer,
3541 	  .get_user = arch_timer_get_user, .set_user = arch_timer_set_user },
3542 	{ SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
3543 	{ SYS_DESC(SYS_CNTVCTSS_EL0), access_arch_timer },
3544 	{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
3545 	TIMER_REG(CNTP_CTL_EL0, NULL),
3546 	TIMER_REG(CNTP_CVAL_EL0, NULL),
3547 
3548 	{ SYS_DESC(SYS_CNTV_TVAL_EL0), access_arch_timer },
3549 	TIMER_REG(CNTV_CTL_EL0, NULL),
3550 	TIMER_REG(CNTV_CVAL_EL0, NULL),
3551 
3552 	/* PMEVCNTRn_EL0 */
3553 	PMU_PMEVCNTR_EL0(0),
3554 	PMU_PMEVCNTR_EL0(1),
3555 	PMU_PMEVCNTR_EL0(2),
3556 	PMU_PMEVCNTR_EL0(3),
3557 	PMU_PMEVCNTR_EL0(4),
3558 	PMU_PMEVCNTR_EL0(5),
3559 	PMU_PMEVCNTR_EL0(6),
3560 	PMU_PMEVCNTR_EL0(7),
3561 	PMU_PMEVCNTR_EL0(8),
3562 	PMU_PMEVCNTR_EL0(9),
3563 	PMU_PMEVCNTR_EL0(10),
3564 	PMU_PMEVCNTR_EL0(11),
3565 	PMU_PMEVCNTR_EL0(12),
3566 	PMU_PMEVCNTR_EL0(13),
3567 	PMU_PMEVCNTR_EL0(14),
3568 	PMU_PMEVCNTR_EL0(15),
3569 	PMU_PMEVCNTR_EL0(16),
3570 	PMU_PMEVCNTR_EL0(17),
3571 	PMU_PMEVCNTR_EL0(18),
3572 	PMU_PMEVCNTR_EL0(19),
3573 	PMU_PMEVCNTR_EL0(20),
3574 	PMU_PMEVCNTR_EL0(21),
3575 	PMU_PMEVCNTR_EL0(22),
3576 	PMU_PMEVCNTR_EL0(23),
3577 	PMU_PMEVCNTR_EL0(24),
3578 	PMU_PMEVCNTR_EL0(25),
3579 	PMU_PMEVCNTR_EL0(26),
3580 	PMU_PMEVCNTR_EL0(27),
3581 	PMU_PMEVCNTR_EL0(28),
3582 	PMU_PMEVCNTR_EL0(29),
3583 	PMU_PMEVCNTR_EL0(30),
3584 	/* PMEVTYPERn_EL0 */
3585 	PMU_PMEVTYPER_EL0(0),
3586 	PMU_PMEVTYPER_EL0(1),
3587 	PMU_PMEVTYPER_EL0(2),
3588 	PMU_PMEVTYPER_EL0(3),
3589 	PMU_PMEVTYPER_EL0(4),
3590 	PMU_PMEVTYPER_EL0(5),
3591 	PMU_PMEVTYPER_EL0(6),
3592 	PMU_PMEVTYPER_EL0(7),
3593 	PMU_PMEVTYPER_EL0(8),
3594 	PMU_PMEVTYPER_EL0(9),
3595 	PMU_PMEVTYPER_EL0(10),
3596 	PMU_PMEVTYPER_EL0(11),
3597 	PMU_PMEVTYPER_EL0(12),
3598 	PMU_PMEVTYPER_EL0(13),
3599 	PMU_PMEVTYPER_EL0(14),
3600 	PMU_PMEVTYPER_EL0(15),
3601 	PMU_PMEVTYPER_EL0(16),
3602 	PMU_PMEVTYPER_EL0(17),
3603 	PMU_PMEVTYPER_EL0(18),
3604 	PMU_PMEVTYPER_EL0(19),
3605 	PMU_PMEVTYPER_EL0(20),
3606 	PMU_PMEVTYPER_EL0(21),
3607 	PMU_PMEVTYPER_EL0(22),
3608 	PMU_PMEVTYPER_EL0(23),
3609 	PMU_PMEVTYPER_EL0(24),
3610 	PMU_PMEVTYPER_EL0(25),
3611 	PMU_PMEVTYPER_EL0(26),
3612 	PMU_PMEVTYPER_EL0(27),
3613 	PMU_PMEVTYPER_EL0(28),
3614 	PMU_PMEVTYPER_EL0(29),
3615 	PMU_PMEVTYPER_EL0(30),
3616 	/*
3617 	 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
3618 	 * in 32bit mode. Here we choose to reset it as zero for consistency.
3619 	 */
3620 	{ PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper,
3621 	  .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
3622 
3623 	EL2_REG_VNCR(VPIDR_EL2, reset_unknown, 0),
3624 	EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0),
3625 	EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
3626 	EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
3627 	EL2_REG_FILTERED(SCTLR2_EL2, access_vm_reg, reset_val, 0,
3628 			 sctlr2_el2_visibility),
3629 	EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
3630 	EL2_REG(MDCR_EL2, access_mdcr, reset_mdcr, 0),
3631 	EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
3632 	EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
3633 	EL2_REG_VNCR_FILT(HFGRTR_EL2, fgt_visibility),
3634 	EL2_REG_VNCR_FILT(HFGWTR_EL2, fgt_visibility),
3635 	EL2_REG_VNCR(HFGITR_EL2, reset_val, 0),
3636 	EL2_REG_VNCR(HACR_EL2, reset_val, 0),
3637 
3638 	EL2_REG_FILTERED(ZCR_EL2, access_zcr_el2, reset_val, 0,
3639 			 sve_el2_visibility),
3640 
3641 	EL2_REG_VNCR(HCRX_EL2, reset_val, 0),
3642 
3643 	EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
3644 	EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
3645 	EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
3646 	EL2_REG_FILTERED(TCR2_EL2, access_rw, reset_val, TCR2_EL2_RES1,
3647 			 tcr2_el2_visibility),
3648 	EL2_REG_VNCR(VTTBR_EL2, reset_val, 0),
3649 	EL2_REG_VNCR(VTCR_EL2, reset_val, 0),
3650 	EL2_REG_FILTERED(VNCR_EL2, bad_vncr_trap, reset_val, 0,
3651 			 vncr_el2_visibility),
3652 
3653 	{ SYS_DESC(SYS_DACR32_EL2), undef_access, reset_unknown, DACR32_EL2 },
3654 	EL2_REG_VNCR_FILT(HDFGRTR2_EL2, fgt2_visibility),
3655 	EL2_REG_VNCR_FILT(HDFGWTR2_EL2, fgt2_visibility),
3656 	EL2_REG_VNCR_FILT(HFGRTR2_EL2, fgt2_visibility),
3657 	EL2_REG_VNCR_FILT(HFGWTR2_EL2, fgt2_visibility),
3658 	EL2_REG_VNCR_FILT(HDFGRTR_EL2, fgt_visibility),
3659 	EL2_REG_VNCR_FILT(HDFGWTR_EL2, fgt_visibility),
3660 	EL2_REG_VNCR_FILT(HAFGRTR_EL2, fgt_visibility),
3661 	EL2_REG_VNCR_FILT(HFGITR2_EL2, fgt2_visibility),
3662 	EL2_REG_REDIR(SPSR_EL2, reset_val, 0),
3663 	EL2_REG_REDIR(ELR_EL2, reset_val, 0),
3664 	{ SYS_DESC(SYS_SP_EL1), access_sp_el1},
3665 
3666 	/* AArch32 SPSR_* are RES0 if trapped from a NV guest */
3667 	{ SYS_DESC(SYS_SPSR_irq), .access = trap_raz_wi },
3668 	{ SYS_DESC(SYS_SPSR_abt), .access = trap_raz_wi },
3669 	{ SYS_DESC(SYS_SPSR_und), .access = trap_raz_wi },
3670 	{ SYS_DESC(SYS_SPSR_fiq), .access = trap_raz_wi },
3671 
3672 	{ SYS_DESC(SYS_IFSR32_EL2), undef_access, reset_unknown, IFSR32_EL2 },
3673 	EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
3674 	EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
3675 	EL2_REG_REDIR(ESR_EL2, reset_val, 0),
3676 	EL2_REG_VNCR(VSESR_EL2, reset_unknown, 0),
3677 	{ SYS_DESC(SYS_FPEXC32_EL2), undef_access, reset_val, FPEXC32_EL2, 0x700 },
3678 
3679 	EL2_REG_REDIR(FAR_EL2, reset_val, 0),
3680 	EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
3681 
3682 	EL2_REG(MAIR_EL2, access_rw, reset_val, 0),
3683 	EL2_REG_FILTERED(PIRE0_EL2, access_rw, reset_val, 0,
3684 			 s1pie_el2_visibility),
3685 	EL2_REG_FILTERED(PIR_EL2, access_rw, reset_val, 0,
3686 			 s1pie_el2_visibility),
3687 	EL2_REG_FILTERED(POR_EL2, access_rw, reset_val, 0,
3688 			 s1poe_el2_visibility),
3689 	EL2_REG(AMAIR_EL2, access_rw, reset_val, 0),
3690 	{ SYS_DESC(SYS_MPAMHCR_EL2), undef_access },
3691 	{ SYS_DESC(SYS_MPAMVPMV_EL2), undef_access },
3692 	{ SYS_DESC(SYS_MPAM2_EL2), undef_access },
3693 	{ SYS_DESC(SYS_MPAMVPM0_EL2), undef_access },
3694 	{ SYS_DESC(SYS_MPAMVPM1_EL2), undef_access },
3695 	{ SYS_DESC(SYS_MPAMVPM2_EL2), undef_access },
3696 	{ SYS_DESC(SYS_MPAMVPM3_EL2), undef_access },
3697 	{ SYS_DESC(SYS_MPAMVPM4_EL2), undef_access },
3698 	{ SYS_DESC(SYS_MPAMVPM5_EL2), undef_access },
3699 	{ SYS_DESC(SYS_MPAMVPM6_EL2), undef_access },
3700 	{ SYS_DESC(SYS_MPAMVPM7_EL2), undef_access },
3701 
3702 	EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
3703 	{ SYS_DESC(SYS_RVBAR_EL2), undef_access },
3704 	{ SYS_DESC(SYS_RMR_EL2), undef_access },
3705 	EL2_REG_VNCR(VDISR_EL2, reset_unknown, 0),
3706 
3707 	EL2_REG_VNCR_GICv3(ICH_AP0R0_EL2),
3708 	EL2_REG_VNCR_GICv3(ICH_AP0R1_EL2),
3709 	EL2_REG_VNCR_GICv3(ICH_AP0R2_EL2),
3710 	EL2_REG_VNCR_GICv3(ICH_AP0R3_EL2),
3711 	EL2_REG_VNCR_GICv3(ICH_AP1R0_EL2),
3712 	EL2_REG_VNCR_GICv3(ICH_AP1R1_EL2),
3713 	EL2_REG_VNCR_GICv3(ICH_AP1R2_EL2),
3714 	EL2_REG_VNCR_GICv3(ICH_AP1R3_EL2),
3715 
3716 	{ SYS_DESC(SYS_ICC_SRE_EL2), access_gic_sre },
3717 
3718 	EL2_REG_VNCR_GICv3(ICH_HCR_EL2),
3719 	{ SYS_DESC(SYS_ICH_VTR_EL2), access_gic_vtr },
3720 	{ SYS_DESC(SYS_ICH_MISR_EL2), access_gic_misr },
3721 	{ SYS_DESC(SYS_ICH_EISR_EL2), access_gic_eisr },
3722 	{ SYS_DESC(SYS_ICH_ELRSR_EL2), access_gic_elrsr },
3723 	EL2_REG_VNCR_GICv3(ICH_VMCR_EL2),
3724 
3725 	EL2_REG_VNCR_GICv3(ICH_LR0_EL2),
3726 	EL2_REG_VNCR_GICv3(ICH_LR1_EL2),
3727 	EL2_REG_VNCR_GICv3(ICH_LR2_EL2),
3728 	EL2_REG_VNCR_GICv3(ICH_LR3_EL2),
3729 	EL2_REG_VNCR_GICv3(ICH_LR4_EL2),
3730 	EL2_REG_VNCR_GICv3(ICH_LR5_EL2),
3731 	EL2_REG_VNCR_GICv3(ICH_LR6_EL2),
3732 	EL2_REG_VNCR_GICv3(ICH_LR7_EL2),
3733 	EL2_REG_VNCR_GICv3(ICH_LR8_EL2),
3734 	EL2_REG_VNCR_GICv3(ICH_LR9_EL2),
3735 	EL2_REG_VNCR_GICv3(ICH_LR10_EL2),
3736 	EL2_REG_VNCR_GICv3(ICH_LR11_EL2),
3737 	EL2_REG_VNCR_GICv3(ICH_LR12_EL2),
3738 	EL2_REG_VNCR_GICv3(ICH_LR13_EL2),
3739 	EL2_REG_VNCR_GICv3(ICH_LR14_EL2),
3740 	EL2_REG_VNCR_GICv3(ICH_LR15_EL2),
3741 
3742 	EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
3743 	EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
3744 
3745 	EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0),
3746 	EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
3747 	{ SYS_DESC(SYS_CNTHP_TVAL_EL2), access_arch_timer },
3748 	TIMER_REG(CNTHP_CTL_EL2, el2_visibility),
3749 	TIMER_REG(CNTHP_CVAL_EL2, el2_visibility),
3750 
3751 	{ SYS_DESC(SYS_CNTHV_TVAL_EL2), access_arch_timer, .visibility = cnthv_visibility },
3752 	TIMER_REG(CNTHV_CTL_EL2, cnthv_visibility),
3753 	TIMER_REG(CNTHV_CVAL_EL2, cnthv_visibility),
3754 
3755 	{ SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 },
3756 
3757 	{ SYS_DESC(SYS_CNTP_TVAL_EL02), access_arch_timer },
3758 	{ SYS_DESC(SYS_CNTP_CTL_EL02), access_arch_timer },
3759 	{ SYS_DESC(SYS_CNTP_CVAL_EL02), access_arch_timer },
3760 
3761 	{ SYS_DESC(SYS_CNTV_TVAL_EL02), access_arch_timer },
3762 	{ SYS_DESC(SYS_CNTV_CTL_EL02), access_arch_timer },
3763 	{ SYS_DESC(SYS_CNTV_CVAL_EL02), access_arch_timer },
3764 
3765 	EL2_REG(SP_EL2, NULL, reset_unknown, 0),
3766 };
3767 
handle_at_s1e01(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3768 static bool handle_at_s1e01(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3769 			    const struct sys_reg_desc *r)
3770 {
3771 	u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3772 
3773 	__kvm_at_s1e01(vcpu, op, p->regval);
3774 
3775 	return true;
3776 }
3777 
handle_at_s1e2(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3778 static bool handle_at_s1e2(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3779 			   const struct sys_reg_desc *r)
3780 {
3781 	u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3782 
3783 	/* There is no FGT associated with AT S1E2A :-( */
3784 	if (op == OP_AT_S1E2A &&
3785 	    !kvm_has_feat(vcpu->kvm, ID_AA64ISAR2_EL1, ATS1A, IMP)) {
3786 		kvm_inject_undefined(vcpu);
3787 		return false;
3788 	}
3789 
3790 	__kvm_at_s1e2(vcpu, op, p->regval);
3791 
3792 	return true;
3793 }
3794 
handle_at_s12(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3795 static bool handle_at_s12(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3796 			  const struct sys_reg_desc *r)
3797 {
3798 	u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3799 
3800 	__kvm_at_s12(vcpu, op, p->regval);
3801 
3802 	return true;
3803 }
3804 
kvm_supported_tlbi_s12_op(struct kvm_vcpu * vpcu,u32 instr)3805 static bool kvm_supported_tlbi_s12_op(struct kvm_vcpu *vpcu, u32 instr)
3806 {
3807 	struct kvm *kvm = vpcu->kvm;
3808 	u8 CRm = sys_reg_CRm(instr);
3809 
3810 	if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
3811 	    !kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
3812 		return false;
3813 
3814 	if (CRm == TLBI_CRm_nROS &&
3815 	    !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
3816 		return false;
3817 
3818 	return true;
3819 }
3820 
handle_alle1is(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3821 static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3822 			   const struct sys_reg_desc *r)
3823 {
3824 	u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3825 
3826 	if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding))
3827 		return undef_access(vcpu, p, r);
3828 
3829 	write_lock(&vcpu->kvm->mmu_lock);
3830 
3831 	/*
3832 	 * Drop all shadow S2s, resulting in S1/S2 TLBIs for each of the
3833 	 * corresponding VMIDs.
3834 	 */
3835 	kvm_nested_s2_unmap(vcpu->kvm, true);
3836 
3837 	write_unlock(&vcpu->kvm->mmu_lock);
3838 
3839 	return true;
3840 }
3841 
kvm_supported_tlbi_ipas2_op(struct kvm_vcpu * vpcu,u32 instr)3842 static bool kvm_supported_tlbi_ipas2_op(struct kvm_vcpu *vpcu, u32 instr)
3843 {
3844 	struct kvm *kvm = vpcu->kvm;
3845 	u8 CRm = sys_reg_CRm(instr);
3846 	u8 Op2 = sys_reg_Op2(instr);
3847 
3848 	if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
3849 	    !kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
3850 		return false;
3851 
3852 	if (CRm == TLBI_CRm_IPAIS && (Op2 == 2 || Op2 == 6) &&
3853 	    !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
3854 		return false;
3855 
3856 	if (CRm == TLBI_CRm_IPAONS && (Op2 == 0 || Op2 == 4) &&
3857 	    !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
3858 		return false;
3859 
3860 	if (CRm == TLBI_CRm_IPAONS && (Op2 == 3 || Op2 == 7) &&
3861 	    !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
3862 		return false;
3863 
3864 	return true;
3865 }
3866 
3867 /* Only defined here as this is an internal "abstraction" */
3868 union tlbi_info {
3869 	struct {
3870 		u64	start;
3871 		u64	size;
3872 	} range;
3873 
3874 	struct {
3875 		u64	addr;
3876 	} ipa;
3877 
3878 	struct {
3879 		u64	addr;
3880 		u32	encoding;
3881 	} va;
3882 };
3883 
s2_mmu_unmap_range(struct kvm_s2_mmu * mmu,const union tlbi_info * info)3884 static void s2_mmu_unmap_range(struct kvm_s2_mmu *mmu,
3885 			       const union tlbi_info *info)
3886 {
3887 	/*
3888 	 * The unmap operation is allowed to drop the MMU lock and block, which
3889 	 * means that @mmu could be used for a different context than the one
3890 	 * currently being invalidated.
3891 	 *
3892 	 * This behavior is still safe, as:
3893 	 *
3894 	 *  1) The vCPU(s) that recycled the MMU are responsible for invalidating
3895 	 *     the entire MMU before reusing it, which still honors the intent
3896 	 *     of a TLBI.
3897 	 *
3898 	 *  2) Until the guest TLBI instruction is 'retired' (i.e. increment PC
3899 	 *     and ERET to the guest), other vCPUs are allowed to use stale
3900 	 *     translations.
3901 	 *
3902 	 *  3) Accidentally unmapping an unrelated MMU context is nonfatal, and
3903 	 *     at worst may cause more aborts for shadow stage-2 fills.
3904 	 *
3905 	 * Dropping the MMU lock also implies that shadow stage-2 fills could
3906 	 * happen behind the back of the TLBI. This is still safe, though, as
3907 	 * the L1 needs to put its stage-2 in a consistent state before doing
3908 	 * the TLBI.
3909 	 */
3910 	kvm_stage2_unmap_range(mmu, info->range.start, info->range.size, true);
3911 }
3912 
handle_vmalls12e1is(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3913 static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3914 				const struct sys_reg_desc *r)
3915 {
3916 	u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3917 	u64 limit, vttbr;
3918 
3919 	if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding))
3920 		return undef_access(vcpu, p, r);
3921 
3922 	vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
3923 	limit = BIT_ULL(kvm_get_pa_bits(vcpu->kvm));
3924 
3925 	kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
3926 				   &(union tlbi_info) {
3927 					   .range = {
3928 						   .start = 0,
3929 						   .size = limit,
3930 					   },
3931 				   },
3932 				   s2_mmu_unmap_range);
3933 
3934 	return true;
3935 }
3936 
handle_ripas2e1is(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3937 static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3938 			      const struct sys_reg_desc *r)
3939 {
3940 	u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3941 	u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
3942 	u64 base, range;
3943 
3944 	if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
3945 		return undef_access(vcpu, p, r);
3946 
3947 	/*
3948 	 * Because the shadow S2 structure doesn't necessarily reflect that
3949 	 * of the guest's S2 (different base granule size, for example), we
3950 	 * decide to ignore TTL and only use the described range.
3951 	 */
3952 	base = decode_range_tlbi(p->regval, &range, NULL);
3953 
3954 	kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
3955 				   &(union tlbi_info) {
3956 					   .range = {
3957 						   .start = base,
3958 						   .size = range,
3959 					   },
3960 				   },
3961 				   s2_mmu_unmap_range);
3962 
3963 	return true;
3964 }
3965 
s2_mmu_unmap_ipa(struct kvm_s2_mmu * mmu,const union tlbi_info * info)3966 static void s2_mmu_unmap_ipa(struct kvm_s2_mmu *mmu,
3967 			     const union tlbi_info *info)
3968 {
3969 	unsigned long max_size;
3970 	u64 base_addr;
3971 
3972 	/*
3973 	 * We drop a number of things from the supplied value:
3974 	 *
3975 	 * - NS bit: we're non-secure only.
3976 	 *
3977 	 * - IPA[51:48]: We don't support 52bit IPA just yet...
3978 	 *
3979 	 * And of course, adjust the IPA to be on an actual address.
3980 	 */
3981 	base_addr = (info->ipa.addr & GENMASK_ULL(35, 0)) << 12;
3982 	max_size = compute_tlb_inval_range(mmu, info->ipa.addr);
3983 	base_addr &= ~(max_size - 1);
3984 
3985 	/*
3986 	 * See comment in s2_mmu_unmap_range() for why this is allowed to
3987 	 * reschedule.
3988 	 */
3989 	kvm_stage2_unmap_range(mmu, base_addr, max_size, true);
3990 }
3991 
handle_ipas2e1is(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)3992 static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3993 			     const struct sys_reg_desc *r)
3994 {
3995 	u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3996 	u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
3997 
3998 	if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
3999 		return undef_access(vcpu, p, r);
4000 
4001 	kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
4002 				   &(union tlbi_info) {
4003 					   .ipa = {
4004 						   .addr = p->regval,
4005 					   },
4006 				   },
4007 				   s2_mmu_unmap_ipa);
4008 
4009 	return true;
4010 }
4011 
s2_mmu_tlbi_s1e1(struct kvm_s2_mmu * mmu,const union tlbi_info * info)4012 static void s2_mmu_tlbi_s1e1(struct kvm_s2_mmu *mmu,
4013 			     const union tlbi_info *info)
4014 {
4015 	WARN_ON(__kvm_tlbi_s1e2(mmu, info->va.addr, info->va.encoding));
4016 }
4017 
handle_tlbi_el2(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)4018 static bool handle_tlbi_el2(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
4019 			    const struct sys_reg_desc *r)
4020 {
4021 	u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
4022 
4023 	if (!kvm_supported_tlbi_s1e2_op(vcpu, sys_encoding))
4024 		return undef_access(vcpu, p, r);
4025 
4026 	kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval);
4027 	return true;
4028 }
4029 
handle_tlbi_el1(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)4030 static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
4031 			    const struct sys_reg_desc *r)
4032 {
4033 	u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
4034 
4035 	/*
4036 	 * If we're here, this is because we've trapped on a EL1 TLBI
4037 	 * instruction that affects the EL1 translation regime while
4038 	 * we're running in a context that doesn't allow us to let the
4039 	 * HW do its thing (aka vEL2):
4040 	 *
4041 	 * - HCR_EL2.E2H == 0 : a non-VHE guest
4042 	 * - HCR_EL2.{E2H,TGE} == { 1, 0 } : a VHE guest in guest mode
4043 	 *
4044 	 * Another possibility is that we are invalidating the EL2 context
4045 	 * using EL1 instructions, but that we landed here because we need
4046 	 * additional invalidation for structures that are not held in the
4047 	 * CPU TLBs (such as the VNCR pseudo-TLB and its EL2 mapping). In
4048 	 * that case, we are guaranteed that HCR_EL2.{E2H,TGE} == { 1, 1 }
4049 	 * as we don't allow an NV-capable L1 in a nVHE configuration.
4050 	 *
4051 	 * We don't expect these helpers to ever be called when running
4052 	 * in a vEL1 context.
4053 	 */
4054 
4055 	WARN_ON(!vcpu_is_el2(vcpu));
4056 
4057 	if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding))
4058 		return undef_access(vcpu, p, r);
4059 
4060 	if (vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)) {
4061 		kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval);
4062 		return true;
4063 	}
4064 
4065 	kvm_s2_mmu_iterate_by_vmid(vcpu->kvm,
4066 				   get_vmid(__vcpu_sys_reg(vcpu, VTTBR_EL2)),
4067 				   &(union tlbi_info) {
4068 					   .va = {
4069 						   .addr = p->regval,
4070 						   .encoding = sys_encoding,
4071 					   },
4072 				   },
4073 				   s2_mmu_tlbi_s1e1);
4074 
4075 	return true;
4076 }
4077 
4078 #define SYS_INSN(insn, access_fn)					\
4079 	{								\
4080 		SYS_DESC(OP_##insn),					\
4081 		.access = (access_fn),					\
4082 	}
4083 
4084 static struct sys_reg_desc sys_insn_descs[] = {
4085 	{ SYS_DESC(SYS_DC_ISW), access_dcsw },
4086 	{ SYS_DESC(SYS_DC_IGSW), access_dcgsw },
4087 	{ SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
4088 
4089 	SYS_INSN(AT_S1E1R, handle_at_s1e01),
4090 	SYS_INSN(AT_S1E1W, handle_at_s1e01),
4091 	SYS_INSN(AT_S1E0R, handle_at_s1e01),
4092 	SYS_INSN(AT_S1E0W, handle_at_s1e01),
4093 	SYS_INSN(AT_S1E1RP, handle_at_s1e01),
4094 	SYS_INSN(AT_S1E1WP, handle_at_s1e01),
4095 
4096 	{ SYS_DESC(SYS_DC_CSW), access_dcsw },
4097 	{ SYS_DESC(SYS_DC_CGSW), access_dcgsw },
4098 	{ SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
4099 	{ SYS_DESC(SYS_DC_CISW), access_dcsw },
4100 	{ SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
4101 	{ SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
4102 
4103 	SYS_INSN(TLBI_VMALLE1OS, handle_tlbi_el1),
4104 	SYS_INSN(TLBI_VAE1OS, handle_tlbi_el1),
4105 	SYS_INSN(TLBI_ASIDE1OS, handle_tlbi_el1),
4106 	SYS_INSN(TLBI_VAAE1OS, handle_tlbi_el1),
4107 	SYS_INSN(TLBI_VALE1OS, handle_tlbi_el1),
4108 	SYS_INSN(TLBI_VAALE1OS, handle_tlbi_el1),
4109 
4110 	SYS_INSN(TLBI_RVAE1IS, handle_tlbi_el1),
4111 	SYS_INSN(TLBI_RVAAE1IS, handle_tlbi_el1),
4112 	SYS_INSN(TLBI_RVALE1IS, handle_tlbi_el1),
4113 	SYS_INSN(TLBI_RVAALE1IS, handle_tlbi_el1),
4114 
4115 	SYS_INSN(TLBI_VMALLE1IS, handle_tlbi_el1),
4116 	SYS_INSN(TLBI_VAE1IS, handle_tlbi_el1),
4117 	SYS_INSN(TLBI_ASIDE1IS, handle_tlbi_el1),
4118 	SYS_INSN(TLBI_VAAE1IS, handle_tlbi_el1),
4119 	SYS_INSN(TLBI_VALE1IS, handle_tlbi_el1),
4120 	SYS_INSN(TLBI_VAALE1IS, handle_tlbi_el1),
4121 
4122 	SYS_INSN(TLBI_RVAE1OS, handle_tlbi_el1),
4123 	SYS_INSN(TLBI_RVAAE1OS, handle_tlbi_el1),
4124 	SYS_INSN(TLBI_RVALE1OS, handle_tlbi_el1),
4125 	SYS_INSN(TLBI_RVAALE1OS, handle_tlbi_el1),
4126 
4127 	SYS_INSN(TLBI_RVAE1, handle_tlbi_el1),
4128 	SYS_INSN(TLBI_RVAAE1, handle_tlbi_el1),
4129 	SYS_INSN(TLBI_RVALE1, handle_tlbi_el1),
4130 	SYS_INSN(TLBI_RVAALE1, handle_tlbi_el1),
4131 
4132 	SYS_INSN(TLBI_VMALLE1, handle_tlbi_el1),
4133 	SYS_INSN(TLBI_VAE1, handle_tlbi_el1),
4134 	SYS_INSN(TLBI_ASIDE1, handle_tlbi_el1),
4135 	SYS_INSN(TLBI_VAAE1, handle_tlbi_el1),
4136 	SYS_INSN(TLBI_VALE1, handle_tlbi_el1),
4137 	SYS_INSN(TLBI_VAALE1, handle_tlbi_el1),
4138 
4139 	SYS_INSN(TLBI_VMALLE1OSNXS, handle_tlbi_el1),
4140 	SYS_INSN(TLBI_VAE1OSNXS, handle_tlbi_el1),
4141 	SYS_INSN(TLBI_ASIDE1OSNXS, handle_tlbi_el1),
4142 	SYS_INSN(TLBI_VAAE1OSNXS, handle_tlbi_el1),
4143 	SYS_INSN(TLBI_VALE1OSNXS, handle_tlbi_el1),
4144 	SYS_INSN(TLBI_VAALE1OSNXS, handle_tlbi_el1),
4145 
4146 	SYS_INSN(TLBI_RVAE1ISNXS, handle_tlbi_el1),
4147 	SYS_INSN(TLBI_RVAAE1ISNXS, handle_tlbi_el1),
4148 	SYS_INSN(TLBI_RVALE1ISNXS, handle_tlbi_el1),
4149 	SYS_INSN(TLBI_RVAALE1ISNXS, handle_tlbi_el1),
4150 
4151 	SYS_INSN(TLBI_VMALLE1ISNXS, handle_tlbi_el1),
4152 	SYS_INSN(TLBI_VAE1ISNXS, handle_tlbi_el1),
4153 	SYS_INSN(TLBI_ASIDE1ISNXS, handle_tlbi_el1),
4154 	SYS_INSN(TLBI_VAAE1ISNXS, handle_tlbi_el1),
4155 	SYS_INSN(TLBI_VALE1ISNXS, handle_tlbi_el1),
4156 	SYS_INSN(TLBI_VAALE1ISNXS, handle_tlbi_el1),
4157 
4158 	SYS_INSN(TLBI_RVAE1OSNXS, handle_tlbi_el1),
4159 	SYS_INSN(TLBI_RVAAE1OSNXS, handle_tlbi_el1),
4160 	SYS_INSN(TLBI_RVALE1OSNXS, handle_tlbi_el1),
4161 	SYS_INSN(TLBI_RVAALE1OSNXS, handle_tlbi_el1),
4162 
4163 	SYS_INSN(TLBI_RVAE1NXS, handle_tlbi_el1),
4164 	SYS_INSN(TLBI_RVAAE1NXS, handle_tlbi_el1),
4165 	SYS_INSN(TLBI_RVALE1NXS, handle_tlbi_el1),
4166 	SYS_INSN(TLBI_RVAALE1NXS, handle_tlbi_el1),
4167 
4168 	SYS_INSN(TLBI_VMALLE1NXS, handle_tlbi_el1),
4169 	SYS_INSN(TLBI_VAE1NXS, handle_tlbi_el1),
4170 	SYS_INSN(TLBI_ASIDE1NXS, handle_tlbi_el1),
4171 	SYS_INSN(TLBI_VAAE1NXS, handle_tlbi_el1),
4172 	SYS_INSN(TLBI_VALE1NXS, handle_tlbi_el1),
4173 	SYS_INSN(TLBI_VAALE1NXS, handle_tlbi_el1),
4174 
4175 	SYS_INSN(AT_S1E2R, handle_at_s1e2),
4176 	SYS_INSN(AT_S1E2W, handle_at_s1e2),
4177 	SYS_INSN(AT_S12E1R, handle_at_s12),
4178 	SYS_INSN(AT_S12E1W, handle_at_s12),
4179 	SYS_INSN(AT_S12E0R, handle_at_s12),
4180 	SYS_INSN(AT_S12E0W, handle_at_s12),
4181 	SYS_INSN(AT_S1E2A, handle_at_s1e2),
4182 
4183 	SYS_INSN(TLBI_IPAS2E1IS, handle_ipas2e1is),
4184 	SYS_INSN(TLBI_RIPAS2E1IS, handle_ripas2e1is),
4185 	SYS_INSN(TLBI_IPAS2LE1IS, handle_ipas2e1is),
4186 	SYS_INSN(TLBI_RIPAS2LE1IS, handle_ripas2e1is),
4187 
4188 	SYS_INSN(TLBI_ALLE2OS, handle_tlbi_el2),
4189 	SYS_INSN(TLBI_VAE2OS, handle_tlbi_el2),
4190 	SYS_INSN(TLBI_ALLE1OS, handle_alle1is),
4191 	SYS_INSN(TLBI_VALE2OS, handle_tlbi_el2),
4192 	SYS_INSN(TLBI_VMALLS12E1OS, handle_vmalls12e1is),
4193 
4194 	SYS_INSN(TLBI_RVAE2IS, handle_tlbi_el2),
4195 	SYS_INSN(TLBI_RVALE2IS, handle_tlbi_el2),
4196 	SYS_INSN(TLBI_ALLE2IS, handle_tlbi_el2),
4197 	SYS_INSN(TLBI_VAE2IS, handle_tlbi_el2),
4198 
4199 	SYS_INSN(TLBI_ALLE1IS, handle_alle1is),
4200 
4201 	SYS_INSN(TLBI_VALE2IS, handle_tlbi_el2),
4202 
4203 	SYS_INSN(TLBI_VMALLS12E1IS, handle_vmalls12e1is),
4204 	SYS_INSN(TLBI_IPAS2E1OS, handle_ipas2e1is),
4205 	SYS_INSN(TLBI_IPAS2E1, handle_ipas2e1is),
4206 	SYS_INSN(TLBI_RIPAS2E1, handle_ripas2e1is),
4207 	SYS_INSN(TLBI_RIPAS2E1OS, handle_ripas2e1is),
4208 	SYS_INSN(TLBI_IPAS2LE1OS, handle_ipas2e1is),
4209 	SYS_INSN(TLBI_IPAS2LE1, handle_ipas2e1is),
4210 	SYS_INSN(TLBI_RIPAS2LE1, handle_ripas2e1is),
4211 	SYS_INSN(TLBI_RIPAS2LE1OS, handle_ripas2e1is),
4212 	SYS_INSN(TLBI_RVAE2OS, handle_tlbi_el2),
4213 	SYS_INSN(TLBI_RVALE2OS, handle_tlbi_el2),
4214 	SYS_INSN(TLBI_RVAE2, handle_tlbi_el2),
4215 	SYS_INSN(TLBI_RVALE2, handle_tlbi_el2),
4216 	SYS_INSN(TLBI_ALLE2, handle_tlbi_el2),
4217 	SYS_INSN(TLBI_VAE2, handle_tlbi_el2),
4218 
4219 	SYS_INSN(TLBI_ALLE1, handle_alle1is),
4220 
4221 	SYS_INSN(TLBI_VALE2, handle_tlbi_el2),
4222 
4223 	SYS_INSN(TLBI_VMALLS12E1, handle_vmalls12e1is),
4224 
4225 	SYS_INSN(TLBI_IPAS2E1ISNXS, handle_ipas2e1is),
4226 	SYS_INSN(TLBI_RIPAS2E1ISNXS, handle_ripas2e1is),
4227 	SYS_INSN(TLBI_IPAS2LE1ISNXS, handle_ipas2e1is),
4228 	SYS_INSN(TLBI_RIPAS2LE1ISNXS, handle_ripas2e1is),
4229 
4230 	SYS_INSN(TLBI_ALLE2OSNXS, handle_tlbi_el2),
4231 	SYS_INSN(TLBI_VAE2OSNXS, handle_tlbi_el2),
4232 	SYS_INSN(TLBI_ALLE1OSNXS, handle_alle1is),
4233 	SYS_INSN(TLBI_VALE2OSNXS, handle_tlbi_el2),
4234 	SYS_INSN(TLBI_VMALLS12E1OSNXS, handle_vmalls12e1is),
4235 
4236 	SYS_INSN(TLBI_RVAE2ISNXS, handle_tlbi_el2),
4237 	SYS_INSN(TLBI_RVALE2ISNXS, handle_tlbi_el2),
4238 	SYS_INSN(TLBI_ALLE2ISNXS, handle_tlbi_el2),
4239 	SYS_INSN(TLBI_VAE2ISNXS, handle_tlbi_el2),
4240 
4241 	SYS_INSN(TLBI_ALLE1ISNXS, handle_alle1is),
4242 	SYS_INSN(TLBI_VALE2ISNXS, handle_tlbi_el2),
4243 	SYS_INSN(TLBI_VMALLS12E1ISNXS, handle_vmalls12e1is),
4244 	SYS_INSN(TLBI_IPAS2E1OSNXS, handle_ipas2e1is),
4245 	SYS_INSN(TLBI_IPAS2E1NXS, handle_ipas2e1is),
4246 	SYS_INSN(TLBI_RIPAS2E1NXS, handle_ripas2e1is),
4247 	SYS_INSN(TLBI_RIPAS2E1OSNXS, handle_ripas2e1is),
4248 	SYS_INSN(TLBI_IPAS2LE1OSNXS, handle_ipas2e1is),
4249 	SYS_INSN(TLBI_IPAS2LE1NXS, handle_ipas2e1is),
4250 	SYS_INSN(TLBI_RIPAS2LE1NXS, handle_ripas2e1is),
4251 	SYS_INSN(TLBI_RIPAS2LE1OSNXS, handle_ripas2e1is),
4252 	SYS_INSN(TLBI_RVAE2OSNXS, handle_tlbi_el2),
4253 	SYS_INSN(TLBI_RVALE2OSNXS, handle_tlbi_el2),
4254 	SYS_INSN(TLBI_RVAE2NXS, handle_tlbi_el2),
4255 	SYS_INSN(TLBI_RVALE2NXS, handle_tlbi_el2),
4256 	SYS_INSN(TLBI_ALLE2NXS, handle_tlbi_el2),
4257 	SYS_INSN(TLBI_VAE2NXS, handle_tlbi_el2),
4258 	SYS_INSN(TLBI_ALLE1NXS, handle_alle1is),
4259 	SYS_INSN(TLBI_VALE2NXS, handle_tlbi_el2),
4260 	SYS_INSN(TLBI_VMALLS12E1NXS, handle_vmalls12e1is),
4261 };
4262 
trap_dbgdidr(struct kvm_vcpu * vcpu,struct sys_reg_params * p,const struct sys_reg_desc * r)4263 static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
4264 			struct sys_reg_params *p,
4265 			const struct sys_reg_desc *r)
4266 {
4267 	if (p->is_write) {
4268 		return ignore_write(vcpu, p);
4269 	} else {
4270 		u64 dfr = kvm_read_vm_id_reg(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
4271 		u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP);
4272 
4273 		p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
4274 			     (SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
4275 			     (SYS_FIELD_GET(ID_AA64DFR0_EL1, CTX_CMPs, dfr) << 20) |
4276 			     (SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, dfr) << 16) |
4277 			     (1 << 15) | (el3 << 14) | (el3 << 12));
4278 		return true;
4279 	}
4280 }
4281 
4282 /*
4283  * AArch32 debug register mappings
4284  *
4285  * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
4286  * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
4287  *
4288  * None of the other registers share their location, so treat them as
4289  * if they were 64bit.
4290  */
4291 #define DBG_BCR_BVR_WCR_WVR(n)							\
4292 	/* DBGBVRn */								\
4293 	{ AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4),			\
4294 	  trap_dbg_wb_reg, NULL, n },						\
4295 	/* DBGBCRn */								\
4296 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_dbg_wb_reg, NULL, n },	\
4297 	/* DBGWVRn */								\
4298 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_dbg_wb_reg, NULL, n },	\
4299 	/* DBGWCRn */								\
4300 	{ Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_dbg_wb_reg, NULL, n }
4301 
4302 #define DBGBXVR(n)								\
4303 	{ AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1),			\
4304 	  trap_dbg_wb_reg, NULL, n }
4305 
4306 /*
4307  * Trapped cp14 registers. We generally ignore most of the external
4308  * debug, on the principle that they don't really make sense to a
4309  * guest. Revisit this one day, would this principle change.
4310  */
4311 static const struct sys_reg_desc cp14_regs[] = {
4312 	/* DBGDIDR */
4313 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
4314 	/* DBGDTRRXext */
4315 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
4316 
4317 	DBG_BCR_BVR_WCR_WVR(0),
4318 	/* DBGDSCRint */
4319 	{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
4320 	DBG_BCR_BVR_WCR_WVR(1),
4321 	/* DBGDCCINT */
4322 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
4323 	/* DBGDSCRext */
4324 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
4325 	DBG_BCR_BVR_WCR_WVR(2),
4326 	/* DBGDTR[RT]Xint */
4327 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
4328 	/* DBGDTR[RT]Xext */
4329 	{ Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
4330 	DBG_BCR_BVR_WCR_WVR(3),
4331 	DBG_BCR_BVR_WCR_WVR(4),
4332 	DBG_BCR_BVR_WCR_WVR(5),
4333 	/* DBGWFAR */
4334 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
4335 	/* DBGOSECCR */
4336 	{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
4337 	DBG_BCR_BVR_WCR_WVR(6),
4338 	/* DBGVCR */
4339 	{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
4340 	DBG_BCR_BVR_WCR_WVR(7),
4341 	DBG_BCR_BVR_WCR_WVR(8),
4342 	DBG_BCR_BVR_WCR_WVR(9),
4343 	DBG_BCR_BVR_WCR_WVR(10),
4344 	DBG_BCR_BVR_WCR_WVR(11),
4345 	DBG_BCR_BVR_WCR_WVR(12),
4346 	DBG_BCR_BVR_WCR_WVR(13),
4347 	DBG_BCR_BVR_WCR_WVR(14),
4348 	DBG_BCR_BVR_WCR_WVR(15),
4349 
4350 	/* DBGDRAR (32bit) */
4351 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
4352 
4353 	DBGBXVR(0),
4354 	/* DBGOSLAR */
4355 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
4356 	DBGBXVR(1),
4357 	/* DBGOSLSR */
4358 	{ Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
4359 	DBGBXVR(2),
4360 	DBGBXVR(3),
4361 	/* DBGOSDLR */
4362 	{ Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
4363 	DBGBXVR(4),
4364 	/* DBGPRCR */
4365 	{ Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
4366 	DBGBXVR(5),
4367 	DBGBXVR(6),
4368 	DBGBXVR(7),
4369 	DBGBXVR(8),
4370 	DBGBXVR(9),
4371 	DBGBXVR(10),
4372 	DBGBXVR(11),
4373 	DBGBXVR(12),
4374 	DBGBXVR(13),
4375 	DBGBXVR(14),
4376 	DBGBXVR(15),
4377 
4378 	/* DBGDSAR (32bit) */
4379 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
4380 
4381 	/* DBGDEVID2 */
4382 	{ Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
4383 	/* DBGDEVID1 */
4384 	{ Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
4385 	/* DBGDEVID */
4386 	{ Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
4387 	/* DBGCLAIMSET */
4388 	{ Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
4389 	/* DBGCLAIMCLR */
4390 	{ Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
4391 	/* DBGAUTHSTATUS */
4392 	{ Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
4393 };
4394 
4395 /* Trapped cp14 64bit registers */
4396 static const struct sys_reg_desc cp14_64_regs[] = {
4397 	/* DBGDRAR (64bit) */
4398 	{ Op1( 0), CRm( 1), .access = trap_raz_wi },
4399 
4400 	/* DBGDSAR (64bit) */
4401 	{ Op1( 0), CRm( 2), .access = trap_raz_wi },
4402 };
4403 
4404 #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2)			\
4405 	AA32(_map),							\
4406 	Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2),			\
4407 	.visibility = pmu_visibility
4408 
4409 /* Macro to expand the PMEVCNTRn register */
4410 #define PMU_PMEVCNTR(n)							\
4411 	{ CP15_PMU_SYS_REG(DIRECT, 0, 0b1110,				\
4412 	  (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)),			\
4413 	  .access = access_pmu_evcntr }
4414 
4415 /* Macro to expand the PMEVTYPERn register */
4416 #define PMU_PMEVTYPER(n)						\
4417 	{ CP15_PMU_SYS_REG(DIRECT, 0, 0b1110,				\
4418 	  (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)),			\
4419 	  .access = access_pmu_evtyper }
4420 /*
4421  * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
4422  * depending on the way they are accessed (as a 32bit or a 64bit
4423  * register).
4424  */
4425 static const struct sys_reg_desc cp15_regs[] = {
4426 	{ Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
4427 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
4428 	/* ACTLR */
4429 	{ AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
4430 	/* ACTLR2 */
4431 	{ AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
4432 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
4433 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
4434 	/* TTBCR */
4435 	{ AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
4436 	/* TTBCR2 */
4437 	{ AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
4438 	{ Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
4439 	{ CP15_SYS_DESC(SYS_ICC_PMR_EL1), undef_access },
4440 	/* DFSR */
4441 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
4442 	{ Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
4443 	/* ADFSR */
4444 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
4445 	/* AIFSR */
4446 	{ Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
4447 	/* DFAR */
4448 	{ AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
4449 	/* IFAR */
4450 	{ AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
4451 
4452 	/*
4453 	 * DC{C,I,CI}SW operations:
4454 	 */
4455 	{ Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
4456 	{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
4457 	{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
4458 
4459 	/* PMU */
4460 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
4461 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
4462 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
4463 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
4464 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
4465 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
4466 	{ CP15_PMU_SYS_REG(LO,     0, 9, 12, 6), .access = access_pmceid },
4467 	{ CP15_PMU_SYS_REG(LO,     0, 9, 12, 7), .access = access_pmceid },
4468 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
4469 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
4470 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
4471 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
4472 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
4473 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
4474 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
4475 	{ CP15_PMU_SYS_REG(HI,     0, 9, 14, 4), .access = access_pmceid },
4476 	{ CP15_PMU_SYS_REG(HI,     0, 9, 14, 5), .access = access_pmceid },
4477 	/* PMMIR */
4478 	{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
4479 
4480 	/* PRRR/MAIR0 */
4481 	{ AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
4482 	/* NMRR/MAIR1 */
4483 	{ AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
4484 	/* AMAIR0 */
4485 	{ AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
4486 	/* AMAIR1 */
4487 	{ AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
4488 
4489 	{ CP15_SYS_DESC(SYS_ICC_IAR0_EL1), undef_access },
4490 	{ CP15_SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access },
4491 	{ CP15_SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access },
4492 	{ CP15_SYS_DESC(SYS_ICC_BPR0_EL1), undef_access },
4493 	{ CP15_SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access },
4494 	{ CP15_SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access },
4495 	{ CP15_SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access },
4496 	{ CP15_SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access },
4497 	{ CP15_SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access },
4498 	{ CP15_SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access },
4499 	{ CP15_SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access },
4500 	{ CP15_SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
4501 	{ CP15_SYS_DESC(SYS_ICC_DIR_EL1), undef_access },
4502 	{ CP15_SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
4503 	{ CP15_SYS_DESC(SYS_ICC_IAR1_EL1), undef_access },
4504 	{ CP15_SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access },
4505 	{ CP15_SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access },
4506 	{ CP15_SYS_DESC(SYS_ICC_BPR1_EL1), undef_access },
4507 	{ CP15_SYS_DESC(SYS_ICC_CTLR_EL1), undef_access },
4508 	{ CP15_SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
4509 	{ CP15_SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access },
4510 	{ CP15_SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access },
4511 
4512 	{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
4513 
4514 	/* Arch Tmers */
4515 	{ SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
4516 	{ SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
4517 
4518 	/* PMEVCNTRn */
4519 	PMU_PMEVCNTR(0),
4520 	PMU_PMEVCNTR(1),
4521 	PMU_PMEVCNTR(2),
4522 	PMU_PMEVCNTR(3),
4523 	PMU_PMEVCNTR(4),
4524 	PMU_PMEVCNTR(5),
4525 	PMU_PMEVCNTR(6),
4526 	PMU_PMEVCNTR(7),
4527 	PMU_PMEVCNTR(8),
4528 	PMU_PMEVCNTR(9),
4529 	PMU_PMEVCNTR(10),
4530 	PMU_PMEVCNTR(11),
4531 	PMU_PMEVCNTR(12),
4532 	PMU_PMEVCNTR(13),
4533 	PMU_PMEVCNTR(14),
4534 	PMU_PMEVCNTR(15),
4535 	PMU_PMEVCNTR(16),
4536 	PMU_PMEVCNTR(17),
4537 	PMU_PMEVCNTR(18),
4538 	PMU_PMEVCNTR(19),
4539 	PMU_PMEVCNTR(20),
4540 	PMU_PMEVCNTR(21),
4541 	PMU_PMEVCNTR(22),
4542 	PMU_PMEVCNTR(23),
4543 	PMU_PMEVCNTR(24),
4544 	PMU_PMEVCNTR(25),
4545 	PMU_PMEVCNTR(26),
4546 	PMU_PMEVCNTR(27),
4547 	PMU_PMEVCNTR(28),
4548 	PMU_PMEVCNTR(29),
4549 	PMU_PMEVCNTR(30),
4550 	/* PMEVTYPERn */
4551 	PMU_PMEVTYPER(0),
4552 	PMU_PMEVTYPER(1),
4553 	PMU_PMEVTYPER(2),
4554 	PMU_PMEVTYPER(3),
4555 	PMU_PMEVTYPER(4),
4556 	PMU_PMEVTYPER(5),
4557 	PMU_PMEVTYPER(6),
4558 	PMU_PMEVTYPER(7),
4559 	PMU_PMEVTYPER(8),
4560 	PMU_PMEVTYPER(9),
4561 	PMU_PMEVTYPER(10),
4562 	PMU_PMEVTYPER(11),
4563 	PMU_PMEVTYPER(12),
4564 	PMU_PMEVTYPER(13),
4565 	PMU_PMEVTYPER(14),
4566 	PMU_PMEVTYPER(15),
4567 	PMU_PMEVTYPER(16),
4568 	PMU_PMEVTYPER(17),
4569 	PMU_PMEVTYPER(18),
4570 	PMU_PMEVTYPER(19),
4571 	PMU_PMEVTYPER(20),
4572 	PMU_PMEVTYPER(21),
4573 	PMU_PMEVTYPER(22),
4574 	PMU_PMEVTYPER(23),
4575 	PMU_PMEVTYPER(24),
4576 	PMU_PMEVTYPER(25),
4577 	PMU_PMEVTYPER(26),
4578 	PMU_PMEVTYPER(27),
4579 	PMU_PMEVTYPER(28),
4580 	PMU_PMEVTYPER(29),
4581 	PMU_PMEVTYPER(30),
4582 	/* PMCCFILTR */
4583 	{ CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
4584 
4585 	{ Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
4586 	{ Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
4587 
4588 	/* CCSIDR2 */
4589 	{ Op1(1), CRn( 0), CRm( 0),  Op2(2), undef_access },
4590 
4591 	{ Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
4592 };
4593 
4594 static const struct sys_reg_desc cp15_64_regs[] = {
4595 	{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
4596 	{ CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
4597 	{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
4598 	{ SYS_DESC(SYS_AARCH32_CNTPCT),	      access_arch_timer },
4599 	{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
4600 	{ Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
4601 	{ SYS_DESC(SYS_AARCH32_CNTVCT),	      access_arch_timer },
4602 	{ Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
4603 	{ SYS_DESC(SYS_AARCH32_CNTP_CVAL),    access_arch_timer },
4604 	{ SYS_DESC(SYS_AARCH32_CNTPCTSS),     access_arch_timer },
4605 	{ SYS_DESC(SYS_AARCH32_CNTVCTSS),     access_arch_timer },
4606 };
4607 
check_sysreg_table(const struct sys_reg_desc * table,unsigned int n,bool reset_check)4608 static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
4609 			       bool reset_check)
4610 {
4611 	unsigned int i;
4612 
4613 	for (i = 0; i < n; i++) {
4614 		if (reset_check && table[i].reg && !table[i].reset) {
4615 			kvm_err("sys_reg table %pS entry %d (%s) lacks reset\n",
4616 				&table[i], i, table[i].name);
4617 			return false;
4618 		}
4619 
4620 		if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
4621 			kvm_err("sys_reg table %pS entry %d (%s -> %s) out of order\n",
4622 				&table[i], i, table[i - 1].name, table[i].name);
4623 			return false;
4624 		}
4625 	}
4626 
4627 	return true;
4628 }
4629 
kvm_handle_cp14_load_store(struct kvm_vcpu * vcpu)4630 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
4631 {
4632 	kvm_inject_undefined(vcpu);
4633 	return 1;
4634 }
4635 
perform_access(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * r)4636 static void perform_access(struct kvm_vcpu *vcpu,
4637 			   struct sys_reg_params *params,
4638 			   const struct sys_reg_desc *r)
4639 {
4640 	trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
4641 
4642 	/* Check for regs disabled by runtime config */
4643 	if (sysreg_hidden(vcpu, r)) {
4644 		kvm_inject_undefined(vcpu);
4645 		return;
4646 	}
4647 
4648 	/*
4649 	 * Not having an accessor means that we have configured a trap
4650 	 * that we don't know how to handle. This certainly qualifies
4651 	 * as a gross bug that should be fixed right away.
4652 	 */
4653 	BUG_ON(!r->access);
4654 
4655 	/* Skip instruction if instructed so */
4656 	if (likely(r->access(vcpu, params, r)))
4657 		kvm_incr_pc(vcpu);
4658 }
4659 
4660 /*
4661  * emulate_cp --  tries to match a sys_reg access in a handling table, and
4662  *                call the corresponding trap handler.
4663  *
4664  * @params: pointer to the descriptor of the access
4665  * @table: array of trap descriptors
4666  * @num: size of the trap descriptor array
4667  *
4668  * Return true if the access has been handled, false if not.
4669  */
emulate_cp(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * table,size_t num)4670 static bool emulate_cp(struct kvm_vcpu *vcpu,
4671 		       struct sys_reg_params *params,
4672 		       const struct sys_reg_desc *table,
4673 		       size_t num)
4674 {
4675 	const struct sys_reg_desc *r;
4676 
4677 	if (!table)
4678 		return false;	/* Not handled */
4679 
4680 	r = find_reg(params, table, num);
4681 
4682 	if (r) {
4683 		perform_access(vcpu, params, r);
4684 		return true;
4685 	}
4686 
4687 	/* Not handled */
4688 	return false;
4689 }
4690 
unhandled_cp_access(struct kvm_vcpu * vcpu,struct sys_reg_params * params)4691 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
4692 				struct sys_reg_params *params)
4693 {
4694 	u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
4695 	int cp = -1;
4696 
4697 	switch (esr_ec) {
4698 	case ESR_ELx_EC_CP15_32:
4699 	case ESR_ELx_EC_CP15_64:
4700 		cp = 15;
4701 		break;
4702 	case ESR_ELx_EC_CP14_MR:
4703 	case ESR_ELx_EC_CP14_64:
4704 		cp = 14;
4705 		break;
4706 	default:
4707 		WARN_ON(1);
4708 	}
4709 
4710 	print_sys_reg_msg(params,
4711 			  "Unsupported guest CP%d access at: %08lx [%08lx]\n",
4712 			  cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
4713 	kvm_inject_undefined(vcpu);
4714 }
4715 
4716 /**
4717  * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
4718  * @vcpu: The VCPU pointer
4719  * @global: &struct sys_reg_desc
4720  * @nr_global: size of the @global array
4721  */
kvm_handle_cp_64(struct kvm_vcpu * vcpu,const struct sys_reg_desc * global,size_t nr_global)4722 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
4723 			    const struct sys_reg_desc *global,
4724 			    size_t nr_global)
4725 {
4726 	struct sys_reg_params params;
4727 	u64 esr = kvm_vcpu_get_esr(vcpu);
4728 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
4729 	int Rt2 = (esr >> 10) & 0x1f;
4730 
4731 	params.CRm = (esr >> 1) & 0xf;
4732 	params.is_write = ((esr & 1) == 0);
4733 
4734 	params.Op0 = 0;
4735 	params.Op1 = (esr >> 16) & 0xf;
4736 	params.Op2 = 0;
4737 	params.CRn = 0;
4738 
4739 	/*
4740 	 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
4741 	 * backends between AArch32 and AArch64, we get away with it.
4742 	 */
4743 	if (params.is_write) {
4744 		params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
4745 		params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
4746 	}
4747 
4748 	/*
4749 	 * If the table contains a handler, handle the
4750 	 * potential register operation in the case of a read and return
4751 	 * with success.
4752 	 */
4753 	if (emulate_cp(vcpu, &params, global, nr_global)) {
4754 		/* Split up the value between registers for the read side */
4755 		if (!params.is_write) {
4756 			vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
4757 			vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
4758 		}
4759 
4760 		return 1;
4761 	}
4762 
4763 	unhandled_cp_access(vcpu, &params);
4764 	return 1;
4765 }
4766 
4767 static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
4768 
4769 /*
4770  * The CP10 ID registers are architecturally mapped to AArch64 feature
4771  * registers. Abuse that fact so we can rely on the AArch64 handler for accesses
4772  * from AArch32.
4773  */
kvm_esr_cp10_id_to_sys64(u64 esr,struct sys_reg_params * params)4774 static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
4775 {
4776 	u8 reg_id = (esr >> 10) & 0xf;
4777 	bool valid;
4778 
4779 	params->is_write = ((esr & 1) == 0);
4780 	params->Op0 = 3;
4781 	params->Op1 = 0;
4782 	params->CRn = 0;
4783 	params->CRm = 3;
4784 
4785 	/* CP10 ID registers are read-only */
4786 	valid = !params->is_write;
4787 
4788 	switch (reg_id) {
4789 	/* MVFR0 */
4790 	case 0b0111:
4791 		params->Op2 = 0;
4792 		break;
4793 	/* MVFR1 */
4794 	case 0b0110:
4795 		params->Op2 = 1;
4796 		break;
4797 	/* MVFR2 */
4798 	case 0b0101:
4799 		params->Op2 = 2;
4800 		break;
4801 	default:
4802 		valid = false;
4803 	}
4804 
4805 	if (valid)
4806 		return true;
4807 
4808 	kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
4809 		      str_write_read(params->is_write), reg_id);
4810 	return false;
4811 }
4812 
4813 /**
4814  * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
4815  *			  VFP Register' from AArch32.
4816  * @vcpu: The vCPU pointer
4817  *
4818  * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
4819  * Work out the correct AArch64 system register encoding and reroute to the
4820  * AArch64 system register emulation.
4821  */
kvm_handle_cp10_id(struct kvm_vcpu * vcpu)4822 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
4823 {
4824 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
4825 	u64 esr = kvm_vcpu_get_esr(vcpu);
4826 	struct sys_reg_params params;
4827 
4828 	/* UNDEF on any unhandled register access */
4829 	if (!kvm_esr_cp10_id_to_sys64(esr, &params)) {
4830 		kvm_inject_undefined(vcpu);
4831 		return 1;
4832 	}
4833 
4834 	if (emulate_sys_reg(vcpu, &params))
4835 		vcpu_set_reg(vcpu, Rt, params.regval);
4836 
4837 	return 1;
4838 }
4839 
4840 /**
4841  * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
4842  *			       CRn=0, which corresponds to the AArch32 feature
4843  *			       registers.
4844  * @vcpu: the vCPU pointer
4845  * @params: the system register access parameters.
4846  *
4847  * Our cp15 system register tables do not enumerate the AArch32 feature
4848  * registers. Conveniently, our AArch64 table does, and the AArch32 system
4849  * register encoding can be trivially remapped into the AArch64 for the feature
4850  * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
4851  *
4852  * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
4853  * System registers with (coproc=0b1111, CRn==c0)", read accesses from this
4854  * range are either UNKNOWN or RES0. Rerouting remains architectural as we
4855  * treat undefined registers in this range as RAZ.
4856  */
kvm_emulate_cp15_id_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * params)4857 static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
4858 				   struct sys_reg_params *params)
4859 {
4860 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
4861 
4862 	/* Treat impossible writes to RO registers as UNDEFINED */
4863 	if (params->is_write) {
4864 		unhandled_cp_access(vcpu, params);
4865 		return 1;
4866 	}
4867 
4868 	params->Op0 = 3;
4869 
4870 	/*
4871 	 * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
4872 	 * Avoid conflicting with future expansion of AArch64 feature registers
4873 	 * and simply treat them as RAZ here.
4874 	 */
4875 	if (params->CRm > 3)
4876 		params->regval = 0;
4877 	else if (!emulate_sys_reg(vcpu, params))
4878 		return 1;
4879 
4880 	vcpu_set_reg(vcpu, Rt, params->regval);
4881 	return 1;
4882 }
4883 
4884 /**
4885  * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
4886  * @vcpu: The VCPU pointer
4887  * @params: &struct sys_reg_params
4888  * @global: &struct sys_reg_desc
4889  * @nr_global: size of the @global array
4890  */
kvm_handle_cp_32(struct kvm_vcpu * vcpu,struct sys_reg_params * params,const struct sys_reg_desc * global,size_t nr_global)4891 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
4892 			    struct sys_reg_params *params,
4893 			    const struct sys_reg_desc *global,
4894 			    size_t nr_global)
4895 {
4896 	int Rt  = kvm_vcpu_sys_get_rt(vcpu);
4897 
4898 	params->regval = vcpu_get_reg(vcpu, Rt);
4899 
4900 	if (emulate_cp(vcpu, params, global, nr_global)) {
4901 		if (!params->is_write)
4902 			vcpu_set_reg(vcpu, Rt, params->regval);
4903 		return 1;
4904 	}
4905 
4906 	unhandled_cp_access(vcpu, params);
4907 	return 1;
4908 }
4909 
kvm_handle_cp15_64(struct kvm_vcpu * vcpu)4910 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
4911 {
4912 	return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
4913 }
4914 
kvm_handle_cp15_32(struct kvm_vcpu * vcpu)4915 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
4916 {
4917 	struct sys_reg_params params;
4918 
4919 	params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
4920 
4921 	/*
4922 	 * Certain AArch32 ID registers are handled by rerouting to the AArch64
4923 	 * system register table. Registers in the ID range where CRm=0 are
4924 	 * excluded from this scheme as they do not trivially map into AArch64
4925 	 * system register encodings, except for AIDR/REVIDR.
4926 	 */
4927 	if (params.Op1 == 0 && params.CRn == 0 &&
4928 	    (params.CRm || params.Op2 == 6 /* REVIDR */))
4929 		return kvm_emulate_cp15_id_reg(vcpu, &params);
4930 	if (params.Op1 == 1 && params.CRn == 0 &&
4931 	    params.CRm == 0 && params.Op2 == 7 /* AIDR */)
4932 		return kvm_emulate_cp15_id_reg(vcpu, &params);
4933 
4934 	return kvm_handle_cp_32(vcpu, &params, cp15_regs, ARRAY_SIZE(cp15_regs));
4935 }
4936 
kvm_handle_cp14_64(struct kvm_vcpu * vcpu)4937 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
4938 {
4939 	return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
4940 }
4941 
kvm_handle_cp14_32(struct kvm_vcpu * vcpu)4942 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
4943 {
4944 	struct sys_reg_params params;
4945 
4946 	params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
4947 
4948 	return kvm_handle_cp_32(vcpu, &params, cp14_regs, ARRAY_SIZE(cp14_regs));
4949 }
4950 
4951 /**
4952  * emulate_sys_reg - Emulate a guest access to an AArch64 system register
4953  * @vcpu: The VCPU pointer
4954  * @params: Decoded system register parameters
4955  *
4956  * Return: true if the system register access was successful, false otherwise.
4957  */
emulate_sys_reg(struct kvm_vcpu * vcpu,struct sys_reg_params * params)4958 static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
4959 			    struct sys_reg_params *params)
4960 {
4961 	const struct sys_reg_desc *r;
4962 
4963 	r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
4964 	if (likely(r)) {
4965 		perform_access(vcpu, params, r);
4966 		return true;
4967 	}
4968 
4969 	print_sys_reg_msg(params,
4970 			  "Unsupported guest sys_reg access at: %lx [%08lx]\n",
4971 			  *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
4972 	kvm_inject_undefined(vcpu);
4973 
4974 	return false;
4975 }
4976 
idregs_debug_find(struct kvm * kvm,u8 pos)4977 static const struct sys_reg_desc *idregs_debug_find(struct kvm *kvm, u8 pos)
4978 {
4979 	unsigned long i, idreg_idx = 0;
4980 
4981 	for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
4982 		const struct sys_reg_desc *r = &sys_reg_descs[i];
4983 
4984 		if (!is_vm_ftr_id_reg(reg_to_encoding(r)))
4985 			continue;
4986 
4987 		if (idreg_idx == pos)
4988 			return r;
4989 
4990 		idreg_idx++;
4991 	}
4992 
4993 	return NULL;
4994 }
4995 
idregs_debug_start(struct seq_file * s,loff_t * pos)4996 static void *idregs_debug_start(struct seq_file *s, loff_t *pos)
4997 {
4998 	struct kvm *kvm = s->private;
4999 	u8 *iter;
5000 
5001 	mutex_lock(&kvm->arch.config_lock);
5002 
5003 	iter = &kvm->arch.idreg_debugfs_iter;
5004 	if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags) &&
5005 	    *iter == (u8)~0) {
5006 		*iter = *pos;
5007 		if (!idregs_debug_find(kvm, *iter))
5008 			iter = NULL;
5009 	} else {
5010 		iter = ERR_PTR(-EBUSY);
5011 	}
5012 
5013 	mutex_unlock(&kvm->arch.config_lock);
5014 
5015 	return iter;
5016 }
5017 
idregs_debug_next(struct seq_file * s,void * v,loff_t * pos)5018 static void *idregs_debug_next(struct seq_file *s, void *v, loff_t *pos)
5019 {
5020 	struct kvm *kvm = s->private;
5021 
5022 	(*pos)++;
5023 
5024 	if (idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter + 1)) {
5025 		kvm->arch.idreg_debugfs_iter++;
5026 
5027 		return &kvm->arch.idreg_debugfs_iter;
5028 	}
5029 
5030 	return NULL;
5031 }
5032 
idregs_debug_stop(struct seq_file * s,void * v)5033 static void idregs_debug_stop(struct seq_file *s, void *v)
5034 {
5035 	struct kvm *kvm = s->private;
5036 
5037 	if (IS_ERR(v))
5038 		return;
5039 
5040 	mutex_lock(&kvm->arch.config_lock);
5041 
5042 	kvm->arch.idreg_debugfs_iter = ~0;
5043 
5044 	mutex_unlock(&kvm->arch.config_lock);
5045 }
5046 
idregs_debug_show(struct seq_file * s,void * v)5047 static int idregs_debug_show(struct seq_file *s, void *v)
5048 {
5049 	const struct sys_reg_desc *desc;
5050 	struct kvm *kvm = s->private;
5051 
5052 	desc = idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter);
5053 
5054 	if (!desc->name)
5055 		return 0;
5056 
5057 	seq_printf(s, "%20s:\t%016llx\n",
5058 		   desc->name, kvm_read_vm_id_reg(kvm, reg_to_encoding(desc)));
5059 
5060 	return 0;
5061 }
5062 
5063 static const struct seq_operations idregs_debug_sops = {
5064 	.start	= idregs_debug_start,
5065 	.next	= idregs_debug_next,
5066 	.stop	= idregs_debug_stop,
5067 	.show	= idregs_debug_show,
5068 };
5069 
5070 DEFINE_SEQ_ATTRIBUTE(idregs_debug);
5071 
kvm_sys_regs_create_debugfs(struct kvm * kvm)5072 void kvm_sys_regs_create_debugfs(struct kvm *kvm)
5073 {
5074 	kvm->arch.idreg_debugfs_iter = ~0;
5075 
5076 	debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm,
5077 			    &idregs_debug_fops);
5078 }
5079 
reset_vm_ftr_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * reg)5080 static void reset_vm_ftr_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *reg)
5081 {
5082 	u32 id = reg_to_encoding(reg);
5083 	struct kvm *kvm = vcpu->kvm;
5084 
5085 	if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
5086 		return;
5087 
5088 	kvm_set_vm_id_reg(kvm, id, reg->reset(vcpu, reg));
5089 }
5090 
reset_vcpu_ftr_id_reg(struct kvm_vcpu * vcpu,const struct sys_reg_desc * reg)5091 static void reset_vcpu_ftr_id_reg(struct kvm_vcpu *vcpu,
5092 				  const struct sys_reg_desc *reg)
5093 {
5094 	if (kvm_vcpu_initialized(vcpu))
5095 		return;
5096 
5097 	reg->reset(vcpu, reg);
5098 }
5099 
5100 /**
5101  * kvm_reset_sys_regs - sets system registers to reset value
5102  * @vcpu: The VCPU pointer
5103  *
5104  * This function finds the right table above and sets the registers on the
5105  * virtual CPU struct to their architecturally defined reset values.
5106  */
kvm_reset_sys_regs(struct kvm_vcpu * vcpu)5107 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
5108 {
5109 	struct kvm *kvm = vcpu->kvm;
5110 	unsigned long i;
5111 
5112 	for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
5113 		const struct sys_reg_desc *r = &sys_reg_descs[i];
5114 
5115 		if (!r->reset)
5116 			continue;
5117 
5118 		if (is_vm_ftr_id_reg(reg_to_encoding(r)))
5119 			reset_vm_ftr_id_reg(vcpu, r);
5120 		else if (is_vcpu_ftr_id_reg(reg_to_encoding(r)))
5121 			reset_vcpu_ftr_id_reg(vcpu, r);
5122 		else
5123 			r->reset(vcpu, r);
5124 
5125 		if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS)
5126 			__vcpu_rmw_sys_reg(vcpu, r->reg, |=, 0);
5127 	}
5128 
5129 	set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
5130 
5131 	if (kvm_vcpu_has_pmu(vcpu))
5132 		kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
5133 }
5134 
5135 /**
5136  * kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction
5137  *			 trap on a guest execution
5138  * @vcpu: The VCPU pointer
5139  */
kvm_handle_sys_reg(struct kvm_vcpu * vcpu)5140 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
5141 {
5142 	const struct sys_reg_desc *desc = NULL;
5143 	struct sys_reg_params params;
5144 	unsigned long esr = kvm_vcpu_get_esr(vcpu);
5145 	int Rt = kvm_vcpu_sys_get_rt(vcpu);
5146 	int sr_idx;
5147 
5148 	trace_kvm_handle_sys_reg(esr);
5149 
5150 	if (triage_sysreg_trap(vcpu, &sr_idx))
5151 		return 1;
5152 
5153 	params = esr_sys64_to_params(esr);
5154 	params.regval = vcpu_get_reg(vcpu, Rt);
5155 
5156 	/* System registers have Op0=={2,3}, as per DDI487 J.a C5.1.2 */
5157 	if (params.Op0 == 2 || params.Op0 == 3)
5158 		desc = &sys_reg_descs[sr_idx];
5159 	else
5160 		desc = &sys_insn_descs[sr_idx];
5161 
5162 	perform_access(vcpu, &params, desc);
5163 
5164 	/* Read from system register? */
5165 	if (!params.is_write &&
5166 	    (params.Op0 == 2 || params.Op0 == 3))
5167 		vcpu_set_reg(vcpu, Rt, params.regval);
5168 
5169 	return 1;
5170 }
5171 
5172 /******************************************************************************
5173  * Userspace API
5174  *****************************************************************************/
5175 
index_to_params(u64 id,struct sys_reg_params * params)5176 static bool index_to_params(u64 id, struct sys_reg_params *params)
5177 {
5178 	switch (id & KVM_REG_SIZE_MASK) {
5179 	case KVM_REG_SIZE_U64:
5180 		/* Any unused index bits means it's not valid. */
5181 		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
5182 			      | KVM_REG_ARM_COPROC_MASK
5183 			      | KVM_REG_ARM64_SYSREG_OP0_MASK
5184 			      | KVM_REG_ARM64_SYSREG_OP1_MASK
5185 			      | KVM_REG_ARM64_SYSREG_CRN_MASK
5186 			      | KVM_REG_ARM64_SYSREG_CRM_MASK
5187 			      | KVM_REG_ARM64_SYSREG_OP2_MASK))
5188 			return false;
5189 		params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
5190 			       >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
5191 		params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
5192 			       >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
5193 		params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
5194 			       >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
5195 		params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
5196 			       >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
5197 		params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
5198 			       >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
5199 		return true;
5200 	default:
5201 		return false;
5202 	}
5203 }
5204 
get_reg_by_id(u64 id,const struct sys_reg_desc table[],unsigned int num)5205 const struct sys_reg_desc *get_reg_by_id(u64 id,
5206 					 const struct sys_reg_desc table[],
5207 					 unsigned int num)
5208 {
5209 	struct sys_reg_params params;
5210 
5211 	if (!index_to_params(id, &params))
5212 		return NULL;
5213 
5214 	return find_reg(&params, table, num);
5215 }
5216 
5217 /* Decode an index value, and find the sys_reg_desc entry. */
5218 static const struct sys_reg_desc *
id_to_sys_reg_desc(struct kvm_vcpu * vcpu,u64 id,const struct sys_reg_desc table[],unsigned int num)5219 id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
5220 		   const struct sys_reg_desc table[], unsigned int num)
5221 
5222 {
5223 	const struct sys_reg_desc *r;
5224 
5225 	/* We only do sys_reg for now. */
5226 	if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
5227 		return NULL;
5228 
5229 	r = get_reg_by_id(id, table, num);
5230 
5231 	/* Not saved in the sys_reg array and not otherwise accessible? */
5232 	if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
5233 		r = NULL;
5234 
5235 	return r;
5236 }
5237 
demux_c15_get(struct kvm_vcpu * vcpu,u64 id,void __user * uaddr)5238 static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
5239 {
5240 	u32 val;
5241 	u32 __user *uval = uaddr;
5242 
5243 	/* Fail if we have unknown bits set. */
5244 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
5245 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
5246 		return -ENOENT;
5247 
5248 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
5249 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
5250 		if (KVM_REG_SIZE(id) != 4)
5251 			return -ENOENT;
5252 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
5253 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
5254 		if (val >= CSSELR_MAX)
5255 			return -ENOENT;
5256 
5257 		return put_user(get_ccsidr(vcpu, val), uval);
5258 	default:
5259 		return -ENOENT;
5260 	}
5261 }
5262 
demux_c15_set(struct kvm_vcpu * vcpu,u64 id,void __user * uaddr)5263 static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
5264 {
5265 	u32 val, newval;
5266 	u32 __user *uval = uaddr;
5267 
5268 	/* Fail if we have unknown bits set. */
5269 	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
5270 		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
5271 		return -ENOENT;
5272 
5273 	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
5274 	case KVM_REG_ARM_DEMUX_ID_CCSIDR:
5275 		if (KVM_REG_SIZE(id) != 4)
5276 			return -ENOENT;
5277 		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
5278 			>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
5279 		if (val >= CSSELR_MAX)
5280 			return -ENOENT;
5281 
5282 		if (get_user(newval, uval))
5283 			return -EFAULT;
5284 
5285 		return set_ccsidr(vcpu, val, newval);
5286 	default:
5287 		return -ENOENT;
5288 	}
5289 }
5290 
kvm_one_reg_to_id(const struct kvm_one_reg * reg)5291 static u64 kvm_one_reg_to_id(const struct kvm_one_reg *reg)
5292 {
5293 	switch(reg->id) {
5294 	case KVM_REG_ARM_TIMER_CVAL:
5295 		return TO_ARM64_SYS_REG(CNTV_CVAL_EL0);
5296 	case KVM_REG_ARM_TIMER_CNT:
5297 		return TO_ARM64_SYS_REG(CNTVCT_EL0);
5298 	default:
5299 		return reg->id;
5300 	}
5301 }
5302 
kvm_sys_reg_get_user(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,const struct sys_reg_desc table[],unsigned int num)5303 int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
5304 			 const struct sys_reg_desc table[], unsigned int num)
5305 {
5306 	u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
5307 	const struct sys_reg_desc *r;
5308 	u64 id = kvm_one_reg_to_id(reg);
5309 	u64 val;
5310 	int ret;
5311 
5312 	r = id_to_sys_reg_desc(vcpu, id, table, num);
5313 	if (!r || sysreg_hidden(vcpu, r))
5314 		return -ENOENT;
5315 
5316 	if (r->get_user) {
5317 		ret = (r->get_user)(vcpu, r, &val);
5318 	} else {
5319 		val = __vcpu_sys_reg(vcpu, r->reg);
5320 		ret = 0;
5321 	}
5322 
5323 	if (!ret)
5324 		ret = put_user(val, uaddr);
5325 
5326 	return ret;
5327 }
5328 
kvm_arm_sys_reg_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)5329 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
5330 {
5331 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
5332 
5333 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
5334 		return demux_c15_get(vcpu, reg->id, uaddr);
5335 
5336 	return kvm_sys_reg_get_user(vcpu, reg,
5337 				    sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
5338 }
5339 
kvm_sys_reg_set_user(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,const struct sys_reg_desc table[],unsigned int num)5340 int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
5341 			 const struct sys_reg_desc table[], unsigned int num)
5342 {
5343 	u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
5344 	const struct sys_reg_desc *r;
5345 	u64 id = kvm_one_reg_to_id(reg);
5346 	u64 val;
5347 	int ret;
5348 
5349 	if (get_user(val, uaddr))
5350 		return -EFAULT;
5351 
5352 	r = id_to_sys_reg_desc(vcpu, id, table, num);
5353 	if (!r || sysreg_hidden(vcpu, r))
5354 		return -ENOENT;
5355 
5356 	if (sysreg_user_write_ignore(vcpu, r))
5357 		return 0;
5358 
5359 	if (r->set_user) {
5360 		ret = (r->set_user)(vcpu, r, val);
5361 	} else {
5362 		__vcpu_assign_sys_reg(vcpu, r->reg, val);
5363 		ret = 0;
5364 	}
5365 
5366 	return ret;
5367 }
5368 
kvm_arm_sys_reg_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)5369 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
5370 {
5371 	void __user *uaddr = (void __user *)(unsigned long)reg->addr;
5372 
5373 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
5374 		return demux_c15_set(vcpu, reg->id, uaddr);
5375 
5376 	return kvm_sys_reg_set_user(vcpu, reg,
5377 				    sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
5378 }
5379 
num_demux_regs(void)5380 static unsigned int num_demux_regs(void)
5381 {
5382 	return CSSELR_MAX;
5383 }
5384 
write_demux_regids(u64 __user * uindices)5385 static int write_demux_regids(u64 __user *uindices)
5386 {
5387 	u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
5388 	unsigned int i;
5389 
5390 	val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
5391 	for (i = 0; i < CSSELR_MAX; i++) {
5392 		if (put_user(val | i, uindices))
5393 			return -EFAULT;
5394 		uindices++;
5395 	}
5396 	return 0;
5397 }
5398 
sys_reg_to_index(const struct sys_reg_desc * reg)5399 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
5400 {
5401 	return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
5402 		KVM_REG_ARM64_SYSREG |
5403 		(reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
5404 		(reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
5405 		(reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
5406 		(reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
5407 		(reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
5408 }
5409 
copy_reg_to_user(const struct sys_reg_desc * reg,u64 __user ** uind)5410 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
5411 {
5412 	u64 idx;
5413 
5414 	if (!*uind)
5415 		return true;
5416 
5417 	switch (reg_to_encoding(reg)) {
5418 	case SYS_CNTV_CVAL_EL0:
5419 		idx = KVM_REG_ARM_TIMER_CVAL;
5420 		break;
5421 	case SYS_CNTVCT_EL0:
5422 		idx = KVM_REG_ARM_TIMER_CNT;
5423 		break;
5424 	default:
5425 		idx = sys_reg_to_index(reg);
5426 	}
5427 
5428 	if (put_user(idx, *uind))
5429 		return false;
5430 
5431 	(*uind)++;
5432 	return true;
5433 }
5434 
walk_one_sys_reg(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * rd,u64 __user ** uind,unsigned int * total)5435 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
5436 			    const struct sys_reg_desc *rd,
5437 			    u64 __user **uind,
5438 			    unsigned int *total)
5439 {
5440 	/*
5441 	 * Ignore registers we trap but don't save,
5442 	 * and for which no custom user accessor is provided.
5443 	 */
5444 	if (!(rd->reg || rd->get_user))
5445 		return 0;
5446 
5447 	if (sysreg_hidden(vcpu, rd))
5448 		return 0;
5449 
5450 	if (!copy_reg_to_user(rd, uind))
5451 		return -EFAULT;
5452 
5453 	(*total)++;
5454 	return 0;
5455 }
5456 
5457 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
walk_sys_regs(struct kvm_vcpu * vcpu,u64 __user * uind)5458 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
5459 {
5460 	const struct sys_reg_desc *i2, *end2;
5461 	unsigned int total = 0;
5462 	int err;
5463 
5464 	i2 = sys_reg_descs;
5465 	end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
5466 
5467 	while (i2 != end2) {
5468 		err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
5469 		if (err)
5470 			return err;
5471 	}
5472 	return total;
5473 }
5474 
kvm_arm_num_sys_reg_descs(struct kvm_vcpu * vcpu)5475 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
5476 {
5477 	return num_demux_regs()
5478 		+ walk_sys_regs(vcpu, (u64 __user *)NULL);
5479 }
5480 
kvm_arm_copy_sys_reg_indices(struct kvm_vcpu * vcpu,u64 __user * uindices)5481 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
5482 {
5483 	int err;
5484 
5485 	err = walk_sys_regs(vcpu, uindices);
5486 	if (err < 0)
5487 		return err;
5488 	uindices += err;
5489 
5490 	return write_demux_regids(uindices);
5491 }
5492 
5493 #define KVM_ARM_FEATURE_ID_RANGE_INDEX(r)			\
5494 	KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(r),		\
5495 		sys_reg_Op1(r),					\
5496 		sys_reg_CRn(r),					\
5497 		sys_reg_CRm(r),					\
5498 		sys_reg_Op2(r))
5499 
kvm_vm_ioctl_get_reg_writable_masks(struct kvm * kvm,struct reg_mask_range * range)5500 int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range)
5501 {
5502 	const void *zero_page = page_to_virt(ZERO_PAGE(0));
5503 	u64 __user *masks = (u64 __user *)range->addr;
5504 
5505 	/* Only feature id range is supported, reserved[13] must be zero. */
5506 	if (range->range ||
5507 	    memcmp(range->reserved, zero_page, sizeof(range->reserved)))
5508 		return -EINVAL;
5509 
5510 	/* Wipe the whole thing first */
5511 	if (clear_user(masks, KVM_ARM_FEATURE_ID_RANGE_SIZE * sizeof(__u64)))
5512 		return -EFAULT;
5513 
5514 	for (int i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
5515 		const struct sys_reg_desc *reg = &sys_reg_descs[i];
5516 		u32 encoding = reg_to_encoding(reg);
5517 		u64 val;
5518 
5519 		if (!is_feature_id_reg(encoding) || !reg->set_user)
5520 			continue;
5521 
5522 		if (!reg->val ||
5523 		    (is_aa32_id_reg(encoding) && !kvm_supports_32bit_el0())) {
5524 			continue;
5525 		}
5526 		val = reg->val;
5527 
5528 		if (put_user(val, (masks + KVM_ARM_FEATURE_ID_RANGE_INDEX(encoding))))
5529 			return -EFAULT;
5530 	}
5531 
5532 	return 0;
5533 }
5534 
vcpu_set_hcr(struct kvm_vcpu * vcpu)5535 static void vcpu_set_hcr(struct kvm_vcpu *vcpu)
5536 {
5537 	struct kvm *kvm = vcpu->kvm;
5538 
5539 	if (has_vhe() || has_hvhe())
5540 		vcpu->arch.hcr_el2 |= HCR_E2H;
5541 	if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
5542 		/* route synchronous external abort exceptions to EL2 */
5543 		vcpu->arch.hcr_el2 |= HCR_TEA;
5544 		/* trap error record accesses */
5545 		vcpu->arch.hcr_el2 |= HCR_TERR;
5546 	}
5547 
5548 	if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
5549 		vcpu->arch.hcr_el2 |= HCR_FWB;
5550 
5551 	if (cpus_have_final_cap(ARM64_HAS_EVT) &&
5552 	    !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE) &&
5553 	    kvm_read_vm_id_reg(kvm, SYS_CTR_EL0) == read_sanitised_ftr_reg(SYS_CTR_EL0))
5554 		vcpu->arch.hcr_el2 |= HCR_TID4;
5555 	else
5556 		vcpu->arch.hcr_el2 |= HCR_TID2;
5557 
5558 	if (vcpu_el1_is_32bit(vcpu))
5559 		vcpu->arch.hcr_el2 &= ~HCR_RW;
5560 
5561 	if (kvm_has_mte(vcpu->kvm))
5562 		vcpu->arch.hcr_el2 |= HCR_ATA;
5563 
5564 	/*
5565 	 * In the absence of FGT, we cannot independently trap TLBI
5566 	 * Range instructions. This isn't great, but trapping all
5567 	 * TLBIs would be far worse. Live with it...
5568 	 */
5569 	if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
5570 		vcpu->arch.hcr_el2 |= HCR_TTLBOS;
5571 }
5572 
kvm_calculate_traps(struct kvm_vcpu * vcpu)5573 void kvm_calculate_traps(struct kvm_vcpu *vcpu)
5574 {
5575 	struct kvm *kvm = vcpu->kvm;
5576 
5577 	mutex_lock(&kvm->arch.config_lock);
5578 	vcpu_set_hcr(vcpu);
5579 	vcpu_set_ich_hcr(vcpu);
5580 	vcpu_set_hcrx(vcpu);
5581 
5582 	if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
5583 		goto out;
5584 
5585 	compute_fgu(kvm, HFGRTR_GROUP);
5586 	compute_fgu(kvm, HFGITR_GROUP);
5587 	compute_fgu(kvm, HDFGRTR_GROUP);
5588 	compute_fgu(kvm, HAFGRTR_GROUP);
5589 	compute_fgu(kvm, HFGRTR2_GROUP);
5590 	compute_fgu(kvm, HFGITR2_GROUP);
5591 	compute_fgu(kvm, HDFGRTR2_GROUP);
5592 
5593 	set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags);
5594 out:
5595 	mutex_unlock(&kvm->arch.config_lock);
5596 }
5597 
5598 /*
5599  * Perform last adjustments to the ID registers that are implied by the
5600  * configuration outside of the ID regs themselves, as well as any
5601  * initialisation that directly depend on these ID registers (such as
5602  * RES0/RES1 behaviours). This is not the place to configure traps though.
5603  *
5604  * Because this can be called once per CPU, changes must be idempotent.
5605  */
kvm_finalize_sys_regs(struct kvm_vcpu * vcpu)5606 int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu)
5607 {
5608 	struct kvm *kvm = vcpu->kvm;
5609 
5610 	guard(mutex)(&kvm->arch.config_lock);
5611 
5612 	if (!irqchip_in_kernel(kvm)) {
5613 		u64 val;
5614 
5615 		val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC;
5616 		kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, val);
5617 		val = kvm_read_vm_id_reg(kvm, SYS_ID_PFR1_EL1) & ~ID_PFR1_EL1_GIC;
5618 		kvm_set_vm_id_reg(kvm, SYS_ID_PFR1_EL1, val);
5619 	}
5620 
5621 	if (vcpu_has_nv(vcpu)) {
5622 		int ret = kvm_init_nv_sysregs(vcpu);
5623 		if (ret)
5624 			return ret;
5625 	}
5626 
5627 	return 0;
5628 }
5629 
kvm_sys_reg_table_init(void)5630 int __init kvm_sys_reg_table_init(void)
5631 {
5632 	const struct sys_reg_desc *gicv3_regs;
5633 	bool valid = true;
5634 	unsigned int i, sz;
5635 	int ret = 0;
5636 
5637 	/* Make sure tables are unique and in order. */
5638 	valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), true);
5639 	valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), false);
5640 	valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), false);
5641 	valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), false);
5642 	valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), false);
5643 	valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false);
5644 
5645 	gicv3_regs = vgic_v3_get_sysreg_table(&sz);
5646 	valid &= check_sysreg_table(gicv3_regs, sz, false);
5647 
5648 	if (!valid)
5649 		return -EINVAL;
5650 
5651 	init_imp_id_regs();
5652 
5653 	ret = populate_nv_trap_config();
5654 
5655 	check_feature_map();
5656 
5657 	for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++)
5658 		ret = populate_sysreg_config(sys_reg_descs + i, i);
5659 
5660 	for (i = 0; !ret && i < ARRAY_SIZE(sys_insn_descs); i++)
5661 		ret = populate_sysreg_config(sys_insn_descs + i, i);
5662 
5663 	return ret;
5664 }
5665