xref: /linux/arch/arm64/kvm/guest.c (revision d6869352cb3c3cf3450637a52349e2e87c1354aa)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/kvm/guest.c:
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #include <linux/bits.h>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/nospec.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/stddef.h>
18 #include <linux/string.h>
19 #include <linux/vmalloc.h>
20 #include <linux/fs.h>
21 #include <kvm/arm_psci.h>
22 #include <asm/cputype.h>
23 #include <linux/uaccess.h>
24 #include <asm/fpsimd.h>
25 #include <asm/kvm.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/kvm_coproc.h>
28 #include <asm/kvm_host.h>
29 #include <asm/sigcontext.h>
30 
31 #include "trace.h"
32 
33 #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
34 #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
35 
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37 	VCPU_STAT(hvc_exit_stat),
38 	VCPU_STAT(wfe_exit_stat),
39 	VCPU_STAT(wfi_exit_stat),
40 	VCPU_STAT(mmio_exit_user),
41 	VCPU_STAT(mmio_exit_kernel),
42 	VCPU_STAT(exits),
43 	{ NULL }
44 };
45 
46 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
47 {
48 	return 0;
49 }
50 
51 static bool core_reg_offset_is_vreg(u64 off)
52 {
53 	return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
54 		off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
55 }
56 
57 static u64 core_reg_offset_from_id(u64 id)
58 {
59 	return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
60 }
61 
62 static int validate_core_offset(const struct kvm_vcpu *vcpu,
63 				const struct kvm_one_reg *reg)
64 {
65 	u64 off = core_reg_offset_from_id(reg->id);
66 	int size;
67 
68 	switch (off) {
69 	case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
70 	     KVM_REG_ARM_CORE_REG(regs.regs[30]):
71 	case KVM_REG_ARM_CORE_REG(regs.sp):
72 	case KVM_REG_ARM_CORE_REG(regs.pc):
73 	case KVM_REG_ARM_CORE_REG(regs.pstate):
74 	case KVM_REG_ARM_CORE_REG(sp_el1):
75 	case KVM_REG_ARM_CORE_REG(elr_el1):
76 	case KVM_REG_ARM_CORE_REG(spsr[0]) ...
77 	     KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
78 		size = sizeof(__u64);
79 		break;
80 
81 	case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
82 	     KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
83 		size = sizeof(__uint128_t);
84 		break;
85 
86 	case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
87 	case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
88 		size = sizeof(__u32);
89 		break;
90 
91 	default:
92 		return -EINVAL;
93 	}
94 
95 	if (KVM_REG_SIZE(reg->id) != size ||
96 	    !IS_ALIGNED(off, size / sizeof(__u32)))
97 		return -EINVAL;
98 
99 	/*
100 	 * The KVM_REG_ARM64_SVE regs must be used instead of
101 	 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
102 	 * SVE-enabled vcpus:
103 	 */
104 	if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
105 		return -EINVAL;
106 
107 	return 0;
108 }
109 
110 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
111 {
112 	/*
113 	 * Because the kvm_regs structure is a mix of 32, 64 and
114 	 * 128bit fields, we index it as if it was a 32bit
115 	 * array. Hence below, nr_regs is the number of entries, and
116 	 * off the index in the "array".
117 	 */
118 	__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
119 	struct kvm_regs *regs = vcpu_gp_regs(vcpu);
120 	int nr_regs = sizeof(*regs) / sizeof(__u32);
121 	u32 off;
122 
123 	/* Our ID is an index into the kvm_regs struct. */
124 	off = core_reg_offset_from_id(reg->id);
125 	if (off >= nr_regs ||
126 	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
127 		return -ENOENT;
128 
129 	if (validate_core_offset(vcpu, reg))
130 		return -EINVAL;
131 
132 	if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
133 		return -EFAULT;
134 
135 	return 0;
136 }
137 
138 static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
139 {
140 	__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
141 	struct kvm_regs *regs = vcpu_gp_regs(vcpu);
142 	int nr_regs = sizeof(*regs) / sizeof(__u32);
143 	__uint128_t tmp;
144 	void *valp = &tmp;
145 	u64 off;
146 	int err = 0;
147 
148 	/* Our ID is an index into the kvm_regs struct. */
149 	off = core_reg_offset_from_id(reg->id);
150 	if (off >= nr_regs ||
151 	    (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
152 		return -ENOENT;
153 
154 	if (validate_core_offset(vcpu, reg))
155 		return -EINVAL;
156 
157 	if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
158 		return -EINVAL;
159 
160 	if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) {
161 		err = -EFAULT;
162 		goto out;
163 	}
164 
165 	if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
166 		u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
167 		switch (mode) {
168 		case PSR_AA32_MODE_USR:
169 			if (!system_supports_32bit_el0())
170 				return -EINVAL;
171 			break;
172 		case PSR_AA32_MODE_FIQ:
173 		case PSR_AA32_MODE_IRQ:
174 		case PSR_AA32_MODE_SVC:
175 		case PSR_AA32_MODE_ABT:
176 		case PSR_AA32_MODE_UND:
177 			if (!vcpu_el1_is_32bit(vcpu))
178 				return -EINVAL;
179 			break;
180 		case PSR_MODE_EL0t:
181 		case PSR_MODE_EL1t:
182 		case PSR_MODE_EL1h:
183 			if (vcpu_el1_is_32bit(vcpu))
184 				return -EINVAL;
185 			break;
186 		default:
187 			err = -EINVAL;
188 			goto out;
189 		}
190 	}
191 
192 	memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
193 out:
194 	return err;
195 }
196 
197 #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
198 #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
199 
200 static bool vq_present(
201 	const u64 (*const vqs)[KVM_ARM64_SVE_VLS_WORDS],
202 	unsigned int vq)
203 {
204 	return (*vqs)[vq_word(vq)] & vq_mask(vq);
205 }
206 
207 static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
208 {
209 	unsigned int max_vq, vq;
210 	u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
211 
212 	if (!vcpu_has_sve(vcpu))
213 		return -ENOENT;
214 
215 	if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
216 		return -EINVAL;
217 
218 	memset(vqs, 0, sizeof(vqs));
219 
220 	max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
221 	for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
222 		if (sve_vq_available(vq))
223 			vqs[vq_word(vq)] |= vq_mask(vq);
224 
225 	if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs)))
226 		return -EFAULT;
227 
228 	return 0;
229 }
230 
231 static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
232 {
233 	unsigned int max_vq, vq;
234 	u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
235 
236 	if (!vcpu_has_sve(vcpu))
237 		return -ENOENT;
238 
239 	if (kvm_arm_vcpu_sve_finalized(vcpu))
240 		return -EPERM; /* too late! */
241 
242 	if (WARN_ON(vcpu->arch.sve_state))
243 		return -EINVAL;
244 
245 	if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs)))
246 		return -EFAULT;
247 
248 	max_vq = 0;
249 	for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq)
250 		if (vq_present(&vqs, vq))
251 			max_vq = vq;
252 
253 	if (max_vq > sve_vq_from_vl(kvm_sve_max_vl))
254 		return -EINVAL;
255 
256 	/*
257 	 * Vector lengths supported by the host can't currently be
258 	 * hidden from the guest individually: instead we can only set a
259 	 * maxmium via ZCR_EL2.LEN.  So, make sure the available vector
260 	 * lengths match the set requested exactly up to the requested
261 	 * maximum:
262 	 */
263 	for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
264 		if (vq_present(&vqs, vq) != sve_vq_available(vq))
265 			return -EINVAL;
266 
267 	/* Can't run with no vector lengths at all: */
268 	if (max_vq < SVE_VQ_MIN)
269 		return -EINVAL;
270 
271 	/* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
272 	vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq);
273 
274 	return 0;
275 }
276 
277 #define SVE_REG_SLICE_SHIFT	0
278 #define SVE_REG_SLICE_BITS	5
279 #define SVE_REG_ID_SHIFT	(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
280 #define SVE_REG_ID_BITS		5
281 
282 #define SVE_REG_SLICE_MASK					\
283 	GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1,	\
284 		SVE_REG_SLICE_SHIFT)
285 #define SVE_REG_ID_MASK							\
286 	GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
287 
288 #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
289 
290 #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
291 #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
292 
293 /*
294  * Number of register slices required to cover each whole SVE register.
295  * NOTE: Only the first slice every exists, for now.
296  * If you are tempted to modify this, you must also rework sve_reg_to_region()
297  * to match:
298  */
299 #define vcpu_sve_slices(vcpu) 1
300 
301 /* Bounds of a single SVE register slice within vcpu->arch.sve_state */
302 struct sve_state_reg_region {
303 	unsigned int koffset;	/* offset into sve_state in kernel memory */
304 	unsigned int klen;	/* length in kernel memory */
305 	unsigned int upad;	/* extra trailing padding in user memory */
306 };
307 
308 /*
309  * Validate SVE register ID and get sanitised bounds for user/kernel SVE
310  * register copy
311  */
312 static int sve_reg_to_region(struct sve_state_reg_region *region,
313 			     struct kvm_vcpu *vcpu,
314 			     const struct kvm_one_reg *reg)
315 {
316 	/* reg ID ranges for Z- registers */
317 	const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0);
318 	const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1,
319 						       SVE_NUM_SLICES - 1);
320 
321 	/* reg ID ranges for P- registers and FFR (which are contiguous) */
322 	const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0);
323 	const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1);
324 
325 	unsigned int vq;
326 	unsigned int reg_num;
327 
328 	unsigned int reqoffset, reqlen; /* User-requested offset and length */
329 	unsigned int maxlen; /* Maxmimum permitted length */
330 
331 	size_t sve_state_size;
332 
333 	const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1,
334 							SVE_NUM_SLICES - 1);
335 
336 	/* Verify that the P-regs and FFR really do have contiguous IDs: */
337 	BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1);
338 
339 	/* Verify that we match the UAPI header: */
340 	BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
341 
342 	reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
343 
344 	if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
345 		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
346 			return -ENOENT;
347 
348 		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
349 
350 		reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
351 				SVE_SIG_REGS_OFFSET;
352 		reqlen = KVM_SVE_ZREG_SIZE;
353 		maxlen = SVE_SIG_ZREG_SIZE(vq);
354 	} else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
355 		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
356 			return -ENOENT;
357 
358 		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
359 
360 		reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
361 				SVE_SIG_REGS_OFFSET;
362 		reqlen = KVM_SVE_PREG_SIZE;
363 		maxlen = SVE_SIG_PREG_SIZE(vq);
364 	} else {
365 		return -EINVAL;
366 	}
367 
368 	sve_state_size = vcpu_sve_state_size(vcpu);
369 	if (WARN_ON(!sve_state_size))
370 		return -EINVAL;
371 
372 	region->koffset = array_index_nospec(reqoffset, sve_state_size);
373 	region->klen = min(maxlen, reqlen);
374 	region->upad = reqlen - region->klen;
375 
376 	return 0;
377 }
378 
379 static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
380 {
381 	int ret;
382 	struct sve_state_reg_region region;
383 	char __user *uptr = (char __user *)reg->addr;
384 
385 	/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
386 	if (reg->id == KVM_REG_ARM64_SVE_VLS)
387 		return get_sve_vls(vcpu, reg);
388 
389 	/* Try to interpret reg ID as an architectural SVE register... */
390 	ret = sve_reg_to_region(&region, vcpu, reg);
391 	if (ret)
392 		return ret;
393 
394 	if (!kvm_arm_vcpu_sve_finalized(vcpu))
395 		return -EPERM;
396 
397 	if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
398 			 region.klen) ||
399 	    clear_user(uptr + region.klen, region.upad))
400 		return -EFAULT;
401 
402 	return 0;
403 }
404 
405 static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
406 {
407 	int ret;
408 	struct sve_state_reg_region region;
409 	const char __user *uptr = (const char __user *)reg->addr;
410 
411 	/* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
412 	if (reg->id == KVM_REG_ARM64_SVE_VLS)
413 		return set_sve_vls(vcpu, reg);
414 
415 	/* Try to interpret reg ID as an architectural SVE register... */
416 	ret = sve_reg_to_region(&region, vcpu, reg);
417 	if (ret)
418 		return ret;
419 
420 	if (!kvm_arm_vcpu_sve_finalized(vcpu))
421 		return -EPERM;
422 
423 	if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
424 			   region.klen))
425 		return -EFAULT;
426 
427 	return 0;
428 }
429 
430 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
431 {
432 	return -EINVAL;
433 }
434 
435 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
436 {
437 	return -EINVAL;
438 }
439 
440 static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
441 				 u64 __user *uindices)
442 {
443 	unsigned int i;
444 	int n = 0;
445 	const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
446 
447 	for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
448 		/*
449 		 * The KVM_REG_ARM64_SVE regs must be used instead of
450 		 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
451 		 * SVE-enabled vcpus:
452 		 */
453 		if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(i))
454 			continue;
455 
456 		if (uindices) {
457 			if (put_user(core_reg | i, uindices))
458 				return -EFAULT;
459 			uindices++;
460 		}
461 
462 		n++;
463 	}
464 
465 	return n;
466 }
467 
468 static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
469 {
470 	return copy_core_reg_indices(vcpu, NULL);
471 }
472 
473 /**
474  * ARM64 versions of the TIMER registers, always available on arm64
475  */
476 
477 #define NUM_TIMER_REGS 3
478 
479 static bool is_timer_reg(u64 index)
480 {
481 	switch (index) {
482 	case KVM_REG_ARM_TIMER_CTL:
483 	case KVM_REG_ARM_TIMER_CNT:
484 	case KVM_REG_ARM_TIMER_CVAL:
485 		return true;
486 	}
487 	return false;
488 }
489 
490 static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
491 {
492 	if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
493 		return -EFAULT;
494 	uindices++;
495 	if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
496 		return -EFAULT;
497 	uindices++;
498 	if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
499 		return -EFAULT;
500 
501 	return 0;
502 }
503 
504 static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
505 {
506 	void __user *uaddr = (void __user *)(long)reg->addr;
507 	u64 val;
508 	int ret;
509 
510 	ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
511 	if (ret != 0)
512 		return -EFAULT;
513 
514 	return kvm_arm_timer_set_reg(vcpu, reg->id, val);
515 }
516 
517 static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
518 {
519 	void __user *uaddr = (void __user *)(long)reg->addr;
520 	u64 val;
521 
522 	val = kvm_arm_timer_get_reg(vcpu, reg->id);
523 	return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
524 }
525 
526 static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
527 {
528 	const unsigned int slices = vcpu_sve_slices(vcpu);
529 
530 	if (!vcpu_has_sve(vcpu))
531 		return 0;
532 
533 	/* Policed by KVM_GET_REG_LIST: */
534 	WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
535 
536 	return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */)
537 		+ 1; /* KVM_REG_ARM64_SVE_VLS */
538 }
539 
540 static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
541 				u64 __user *uindices)
542 {
543 	const unsigned int slices = vcpu_sve_slices(vcpu);
544 	u64 reg;
545 	unsigned int i, n;
546 	int num_regs = 0;
547 
548 	if (!vcpu_has_sve(vcpu))
549 		return 0;
550 
551 	/* Policed by KVM_GET_REG_LIST: */
552 	WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
553 
554 	/*
555 	 * Enumerate this first, so that userspace can save/restore in
556 	 * the order reported by KVM_GET_REG_LIST:
557 	 */
558 	reg = KVM_REG_ARM64_SVE_VLS;
559 	if (put_user(reg, uindices++))
560 		return -EFAULT;
561 	++num_regs;
562 
563 	for (i = 0; i < slices; i++) {
564 		for (n = 0; n < SVE_NUM_ZREGS; n++) {
565 			reg = KVM_REG_ARM64_SVE_ZREG(n, i);
566 			if (put_user(reg, uindices++))
567 				return -EFAULT;
568 			num_regs++;
569 		}
570 
571 		for (n = 0; n < SVE_NUM_PREGS; n++) {
572 			reg = KVM_REG_ARM64_SVE_PREG(n, i);
573 			if (put_user(reg, uindices++))
574 				return -EFAULT;
575 			num_regs++;
576 		}
577 
578 		reg = KVM_REG_ARM64_SVE_FFR(i);
579 		if (put_user(reg, uindices++))
580 			return -EFAULT;
581 		num_regs++;
582 	}
583 
584 	return num_regs;
585 }
586 
587 /**
588  * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
589  *
590  * This is for all registers.
591  */
592 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
593 {
594 	unsigned long res = 0;
595 
596 	res += num_core_regs(vcpu);
597 	res += num_sve_regs(vcpu);
598 	res += kvm_arm_num_sys_reg_descs(vcpu);
599 	res += kvm_arm_get_fw_num_regs(vcpu);
600 	res += NUM_TIMER_REGS;
601 
602 	return res;
603 }
604 
605 /**
606  * kvm_arm_copy_reg_indices - get indices of all registers.
607  *
608  * We do core registers right here, then we append system regs.
609  */
610 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
611 {
612 	int ret;
613 
614 	ret = copy_core_reg_indices(vcpu, uindices);
615 	if (ret < 0)
616 		return ret;
617 	uindices += ret;
618 
619 	ret = copy_sve_reg_indices(vcpu, uindices);
620 	if (ret < 0)
621 		return ret;
622 	uindices += ret;
623 
624 	ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
625 	if (ret < 0)
626 		return ret;
627 	uindices += kvm_arm_get_fw_num_regs(vcpu);
628 
629 	ret = copy_timer_indices(vcpu, uindices);
630 	if (ret < 0)
631 		return ret;
632 	uindices += NUM_TIMER_REGS;
633 
634 	return kvm_arm_copy_sys_reg_indices(vcpu, uindices);
635 }
636 
637 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
638 {
639 	/* We currently use nothing arch-specific in upper 32 bits */
640 	if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
641 		return -EINVAL;
642 
643 	switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
644 	case KVM_REG_ARM_CORE:	return get_core_reg(vcpu, reg);
645 	case KVM_REG_ARM_FW:	return kvm_arm_get_fw_reg(vcpu, reg);
646 	case KVM_REG_ARM64_SVE:	return get_sve_reg(vcpu, reg);
647 	}
648 
649 	if (is_timer_reg(reg->id))
650 		return get_timer_reg(vcpu, reg);
651 
652 	return kvm_arm_sys_reg_get_reg(vcpu, reg);
653 }
654 
655 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
656 {
657 	/* We currently use nothing arch-specific in upper 32 bits */
658 	if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
659 		return -EINVAL;
660 
661 	switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
662 	case KVM_REG_ARM_CORE:	return set_core_reg(vcpu, reg);
663 	case KVM_REG_ARM_FW:	return kvm_arm_set_fw_reg(vcpu, reg);
664 	case KVM_REG_ARM64_SVE:	return set_sve_reg(vcpu, reg);
665 	}
666 
667 	if (is_timer_reg(reg->id))
668 		return set_timer_reg(vcpu, reg);
669 
670 	return kvm_arm_sys_reg_set_reg(vcpu, reg);
671 }
672 
673 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
674 				  struct kvm_sregs *sregs)
675 {
676 	return -EINVAL;
677 }
678 
679 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
680 				  struct kvm_sregs *sregs)
681 {
682 	return -EINVAL;
683 }
684 
685 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
686 			      struct kvm_vcpu_events *events)
687 {
688 	events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
689 	events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN);
690 
691 	if (events->exception.serror_pending && events->exception.serror_has_esr)
692 		events->exception.serror_esr = vcpu_get_vsesr(vcpu);
693 
694 	return 0;
695 }
696 
697 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
698 			      struct kvm_vcpu_events *events)
699 {
700 	bool serror_pending = events->exception.serror_pending;
701 	bool has_esr = events->exception.serror_has_esr;
702 
703 	if (serror_pending && has_esr) {
704 		if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
705 			return -EINVAL;
706 
707 		if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
708 			kvm_set_sei_esr(vcpu, events->exception.serror_esr);
709 		else
710 			return -EINVAL;
711 	} else if (serror_pending) {
712 		kvm_inject_vabt(vcpu);
713 	}
714 
715 	return 0;
716 }
717 
718 int __attribute_const__ kvm_target_cpu(void)
719 {
720 	unsigned long implementor = read_cpuid_implementor();
721 	unsigned long part_number = read_cpuid_part_number();
722 
723 	switch (implementor) {
724 	case ARM_CPU_IMP_ARM:
725 		switch (part_number) {
726 		case ARM_CPU_PART_AEM_V8:
727 			return KVM_ARM_TARGET_AEM_V8;
728 		case ARM_CPU_PART_FOUNDATION:
729 			return KVM_ARM_TARGET_FOUNDATION_V8;
730 		case ARM_CPU_PART_CORTEX_A53:
731 			return KVM_ARM_TARGET_CORTEX_A53;
732 		case ARM_CPU_PART_CORTEX_A57:
733 			return KVM_ARM_TARGET_CORTEX_A57;
734 		}
735 		break;
736 	case ARM_CPU_IMP_APM:
737 		switch (part_number) {
738 		case APM_CPU_PART_POTENZA:
739 			return KVM_ARM_TARGET_XGENE_POTENZA;
740 		}
741 		break;
742 	}
743 
744 	/* Return a default generic target */
745 	return KVM_ARM_TARGET_GENERIC_V8;
746 }
747 
748 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
749 {
750 	int target = kvm_target_cpu();
751 
752 	if (target < 0)
753 		return -ENODEV;
754 
755 	memset(init, 0, sizeof(*init));
756 
757 	/*
758 	 * For now, we don't return any features.
759 	 * In future, we might use features to return target
760 	 * specific features available for the preferred
761 	 * target type.
762 	 */
763 	init->target = (__u32)target;
764 
765 	return 0;
766 }
767 
768 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
769 {
770 	return -EINVAL;
771 }
772 
773 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
774 {
775 	return -EINVAL;
776 }
777 
778 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
779 				  struct kvm_translation *tr)
780 {
781 	return -EINVAL;
782 }
783 
784 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE |    \
785 			    KVM_GUESTDBG_USE_SW_BP | \
786 			    KVM_GUESTDBG_USE_HW | \
787 			    KVM_GUESTDBG_SINGLESTEP)
788 
789 /**
790  * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
791  * @kvm:	pointer to the KVM struct
792  * @kvm_guest_debug: the ioctl data buffer
793  *
794  * This sets up and enables the VM for guest debugging. Userspace
795  * passes in a control flag to enable different debug types and
796  * potentially other architecture specific information in the rest of
797  * the structure.
798  */
799 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
800 					struct kvm_guest_debug *dbg)
801 {
802 	int ret = 0;
803 
804 	trace_kvm_set_guest_debug(vcpu, dbg->control);
805 
806 	if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
807 		ret = -EINVAL;
808 		goto out;
809 	}
810 
811 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
812 		vcpu->guest_debug = dbg->control;
813 
814 		/* Hardware assisted Break and Watch points */
815 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
816 			vcpu->arch.external_debug_state = dbg->arch;
817 		}
818 
819 	} else {
820 		/* If not enabled clear all flags */
821 		vcpu->guest_debug = 0;
822 	}
823 
824 out:
825 	return ret;
826 }
827 
828 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
829 			       struct kvm_device_attr *attr)
830 {
831 	int ret;
832 
833 	switch (attr->group) {
834 	case KVM_ARM_VCPU_PMU_V3_CTRL:
835 		ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
836 		break;
837 	case KVM_ARM_VCPU_TIMER_CTRL:
838 		ret = kvm_arm_timer_set_attr(vcpu, attr);
839 		break;
840 	default:
841 		ret = -ENXIO;
842 		break;
843 	}
844 
845 	return ret;
846 }
847 
848 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
849 			       struct kvm_device_attr *attr)
850 {
851 	int ret;
852 
853 	switch (attr->group) {
854 	case KVM_ARM_VCPU_PMU_V3_CTRL:
855 		ret = kvm_arm_pmu_v3_get_attr(vcpu, attr);
856 		break;
857 	case KVM_ARM_VCPU_TIMER_CTRL:
858 		ret = kvm_arm_timer_get_attr(vcpu, attr);
859 		break;
860 	default:
861 		ret = -ENXIO;
862 		break;
863 	}
864 
865 	return ret;
866 }
867 
868 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
869 			       struct kvm_device_attr *attr)
870 {
871 	int ret;
872 
873 	switch (attr->group) {
874 	case KVM_ARM_VCPU_PMU_V3_CTRL:
875 		ret = kvm_arm_pmu_v3_has_attr(vcpu, attr);
876 		break;
877 	case KVM_ARM_VCPU_TIMER_CTRL:
878 		ret = kvm_arm_timer_has_attr(vcpu, attr);
879 		break;
880 	default:
881 		ret = -ENXIO;
882 		break;
883 	}
884 
885 	return ret;
886 }
887