1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VGIC: KVM DEVICE API
4 *
5 * Copyright (C) 2015 ARM Ltd.
6 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 */
8 #include <linux/irqchip/arm-gic-v3.h>
9 #include <linux/kvm_host.h>
10 #include <kvm/arm_vgic.h>
11 #include <linux/uaccess.h>
12 #include <asm/kvm_mmu.h>
13 #include <asm/cputype.h>
14 #include "vgic.h"
15
16 /* common helpers */
17
vgic_check_iorange(struct kvm * kvm,phys_addr_t ioaddr,phys_addr_t addr,phys_addr_t alignment,phys_addr_t size)18 int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr,
19 phys_addr_t addr, phys_addr_t alignment,
20 phys_addr_t size)
21 {
22 if (!IS_VGIC_ADDR_UNDEF(ioaddr))
23 return -EEXIST;
24
25 if (!IS_ALIGNED(addr, alignment) || !IS_ALIGNED(size, alignment))
26 return -EINVAL;
27
28 if (addr + size < addr)
29 return -EINVAL;
30
31 if (addr & ~kvm_phys_mask(&kvm->arch.mmu) ||
32 (addr + size) > kvm_phys_size(&kvm->arch.mmu))
33 return -E2BIG;
34
35 return 0;
36 }
37
vgic_check_type(struct kvm * kvm,int type_needed)38 static int vgic_check_type(struct kvm *kvm, int type_needed)
39 {
40 if (kvm->arch.vgic.vgic_model != type_needed)
41 return -ENODEV;
42 else
43 return 0;
44 }
45
kvm_set_legacy_vgic_v2_addr(struct kvm * kvm,struct kvm_arm_device_addr * dev_addr)46 int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr)
47 {
48 struct vgic_dist *vgic = &kvm->arch.vgic;
49 int r;
50
51 mutex_lock(&kvm->arch.config_lock);
52 switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) {
53 case KVM_VGIC_V2_ADDR_TYPE_DIST:
54 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
55 if (!r)
56 r = vgic_check_iorange(kvm, vgic->vgic_dist_base, dev_addr->addr,
57 SZ_4K, KVM_VGIC_V2_DIST_SIZE);
58 if (!r)
59 vgic->vgic_dist_base = dev_addr->addr;
60 break;
61 case KVM_VGIC_V2_ADDR_TYPE_CPU:
62 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
63 if (!r)
64 r = vgic_check_iorange(kvm, vgic->vgic_cpu_base, dev_addr->addr,
65 SZ_4K, KVM_VGIC_V2_CPU_SIZE);
66 if (!r)
67 vgic->vgic_cpu_base = dev_addr->addr;
68 break;
69 default:
70 r = -ENODEV;
71 }
72
73 mutex_unlock(&kvm->arch.config_lock);
74
75 return r;
76 }
77
78 /**
79 * kvm_vgic_addr - set or get vgic VM base addresses
80 * @kvm: pointer to the vm struct
81 * @attr: pointer to the attribute being retrieved/updated
82 * @write: if true set the address in the VM address space, if false read the
83 * address
84 *
85 * Set or get the vgic base addresses for the distributor and the virtual CPU
86 * interface in the VM physical address space. These addresses are properties
87 * of the emulated core/SoC and therefore user space initially knows this
88 * information.
89 * Check them for sanity (alignment, double assignment). We can't check for
90 * overlapping regions in case of a virtual GICv3 here, since we don't know
91 * the number of VCPUs yet, so we defer this check to map_resources().
92 */
kvm_vgic_addr(struct kvm * kvm,struct kvm_device_attr * attr,bool write)93 static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool write)
94 {
95 u64 __user *uaddr = (u64 __user *)attr->addr;
96 struct vgic_dist *vgic = &kvm->arch.vgic;
97 phys_addr_t *addr_ptr, alignment, size;
98 u64 undef_value = VGIC_ADDR_UNDEF;
99 u64 addr;
100 int r;
101
102 /* Reading a redistributor region addr implies getting the index */
103 if (write || attr->attr == KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION)
104 if (get_user(addr, uaddr))
105 return -EFAULT;
106
107 /*
108 * Since we can't hold config_lock while registering the redistributor
109 * iodevs, take the slots_lock immediately.
110 */
111 mutex_lock(&kvm->slots_lock);
112 switch (attr->attr) {
113 case KVM_VGIC_V2_ADDR_TYPE_DIST:
114 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
115 addr_ptr = &vgic->vgic_dist_base;
116 alignment = SZ_4K;
117 size = KVM_VGIC_V2_DIST_SIZE;
118 break;
119 case KVM_VGIC_V2_ADDR_TYPE_CPU:
120 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
121 addr_ptr = &vgic->vgic_cpu_base;
122 alignment = SZ_4K;
123 size = KVM_VGIC_V2_CPU_SIZE;
124 break;
125 case KVM_VGIC_V3_ADDR_TYPE_DIST:
126 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
127 addr_ptr = &vgic->vgic_dist_base;
128 alignment = SZ_64K;
129 size = KVM_VGIC_V3_DIST_SIZE;
130 break;
131 case KVM_VGIC_V3_ADDR_TYPE_REDIST: {
132 struct vgic_redist_region *rdreg;
133
134 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
135 if (r)
136 break;
137 if (write) {
138 r = vgic_v3_set_redist_base(kvm, 0, addr, 0);
139 goto out;
140 }
141 rdreg = list_first_entry_or_null(&vgic->rd_regions,
142 struct vgic_redist_region, list);
143 if (!rdreg)
144 addr_ptr = &undef_value;
145 else
146 addr_ptr = &rdreg->base;
147 break;
148 }
149 case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
150 {
151 struct vgic_redist_region *rdreg;
152 u8 index;
153
154 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
155 if (r)
156 break;
157
158 index = addr & KVM_VGIC_V3_RDIST_INDEX_MASK;
159
160 if (write) {
161 gpa_t base = addr & KVM_VGIC_V3_RDIST_BASE_MASK;
162 u32 count = FIELD_GET(KVM_VGIC_V3_RDIST_COUNT_MASK, addr);
163 u8 flags = FIELD_GET(KVM_VGIC_V3_RDIST_FLAGS_MASK, addr);
164
165 if (!count || flags)
166 r = -EINVAL;
167 else
168 r = vgic_v3_set_redist_base(kvm, index,
169 base, count);
170 goto out;
171 }
172
173 rdreg = vgic_v3_rdist_region_from_index(kvm, index);
174 if (!rdreg) {
175 r = -ENOENT;
176 goto out;
177 }
178
179 addr = index;
180 addr |= rdreg->base;
181 addr |= (u64)rdreg->count << KVM_VGIC_V3_RDIST_COUNT_SHIFT;
182 goto out;
183 }
184 default:
185 r = -ENODEV;
186 }
187
188 if (r)
189 goto out;
190
191 mutex_lock(&kvm->arch.config_lock);
192 if (write) {
193 r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size);
194 if (!r)
195 *addr_ptr = addr;
196 } else {
197 addr = *addr_ptr;
198 }
199 mutex_unlock(&kvm->arch.config_lock);
200
201 out:
202 mutex_unlock(&kvm->slots_lock);
203
204 if (!r && !write)
205 r = put_user(addr, uaddr);
206
207 return r;
208 }
209
vgic_set_common_attr(struct kvm_device * dev,struct kvm_device_attr * attr)210 static int vgic_set_common_attr(struct kvm_device *dev,
211 struct kvm_device_attr *attr)
212 {
213 int r;
214
215 switch (attr->group) {
216 case KVM_DEV_ARM_VGIC_GRP_ADDR:
217 r = kvm_vgic_addr(dev->kvm, attr, true);
218 return (r == -ENODEV) ? -ENXIO : r;
219 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
220 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
221 u32 val;
222 int ret = 0;
223
224 if (get_user(val, uaddr))
225 return -EFAULT;
226
227 /*
228 * We require:
229 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
230 * - at most 1024 interrupts
231 * - a multiple of 32 interrupts
232 */
233 if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
234 val > VGIC_MAX_RESERVED ||
235 (val & 31))
236 return -EINVAL;
237
238 mutex_lock(&dev->kvm->arch.config_lock);
239
240 /*
241 * Either userspace has already configured NR_IRQS or
242 * the vgic has already been initialized and vgic_init()
243 * supplied a default amount of SPIs.
244 */
245 if (dev->kvm->arch.vgic.nr_spis)
246 ret = -EBUSY;
247 else
248 dev->kvm->arch.vgic.nr_spis =
249 val - VGIC_NR_PRIVATE_IRQS;
250
251 mutex_unlock(&dev->kvm->arch.config_lock);
252
253 return ret;
254 }
255 case KVM_DEV_ARM_VGIC_GRP_CTRL: {
256 switch (attr->attr) {
257 case KVM_DEV_ARM_VGIC_CTRL_INIT:
258 mutex_lock(&dev->kvm->arch.config_lock);
259 r = vgic_init(dev->kvm);
260 mutex_unlock(&dev->kvm->arch.config_lock);
261 return r;
262 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
263 /*
264 * OK, this one isn't common at all, but we
265 * want to handle all control group attributes
266 * in a single place.
267 */
268 if (vgic_check_type(dev->kvm, KVM_DEV_TYPE_ARM_VGIC_V3))
269 return -ENXIO;
270 mutex_lock(&dev->kvm->lock);
271
272 if (kvm_trylock_all_vcpus(dev->kvm)) {
273 mutex_unlock(&dev->kvm->lock);
274 return -EBUSY;
275 }
276
277 mutex_lock(&dev->kvm->arch.config_lock);
278 r = vgic_v3_save_pending_tables(dev->kvm);
279 mutex_unlock(&dev->kvm->arch.config_lock);
280 kvm_unlock_all_vcpus(dev->kvm);
281 mutex_unlock(&dev->kvm->lock);
282 return r;
283 }
284 break;
285 }
286 }
287
288 return -ENXIO;
289 }
290
vgic_get_common_attr(struct kvm_device * dev,struct kvm_device_attr * attr)291 static int vgic_get_common_attr(struct kvm_device *dev,
292 struct kvm_device_attr *attr)
293 {
294 int r = -ENXIO;
295
296 switch (attr->group) {
297 case KVM_DEV_ARM_VGIC_GRP_ADDR:
298 r = kvm_vgic_addr(dev->kvm, attr, false);
299 return (r == -ENODEV) ? -ENXIO : r;
300 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
301 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
302
303 r = put_user(dev->kvm->arch.vgic.nr_spis +
304 VGIC_NR_PRIVATE_IRQS, uaddr);
305 break;
306 }
307 }
308
309 return r;
310 }
311
vgic_create(struct kvm_device * dev,u32 type)312 static int vgic_create(struct kvm_device *dev, u32 type)
313 {
314 return kvm_vgic_create(dev->kvm, type);
315 }
316
vgic_destroy(struct kvm_device * dev)317 static void vgic_destroy(struct kvm_device *dev)
318 {
319 kfree(dev);
320 }
321
kvm_register_vgic_device(unsigned long type)322 int kvm_register_vgic_device(unsigned long type)
323 {
324 int ret = -ENODEV;
325
326 switch (type) {
327 case KVM_DEV_TYPE_ARM_VGIC_V2:
328 ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
329 KVM_DEV_TYPE_ARM_VGIC_V2);
330 break;
331 case KVM_DEV_TYPE_ARM_VGIC_V3:
332 ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
333 KVM_DEV_TYPE_ARM_VGIC_V3);
334
335 if (ret)
336 break;
337 ret = kvm_vgic_register_its_device();
338 break;
339 }
340
341 return ret;
342 }
343
vgic_v2_parse_attr(struct kvm_device * dev,struct kvm_device_attr * attr,struct vgic_reg_attr * reg_attr)344 int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
345 struct vgic_reg_attr *reg_attr)
346 {
347 int cpuid = FIELD_GET(KVM_DEV_ARM_VGIC_CPUID_MASK, attr->attr);
348
349 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
350 reg_attr->vcpu = kvm_get_vcpu_by_id(dev->kvm, cpuid);
351 if (!reg_attr->vcpu)
352 return -EINVAL;
353
354 return 0;
355 }
356
357 /**
358 * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state
359 *
360 * @dev: kvm device handle
361 * @attr: kvm device attribute
362 * @is_write: true if userspace is writing a register
363 */
vgic_v2_attr_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,bool is_write)364 static int vgic_v2_attr_regs_access(struct kvm_device *dev,
365 struct kvm_device_attr *attr,
366 bool is_write)
367 {
368 u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
369 struct vgic_reg_attr reg_attr;
370 gpa_t addr;
371 struct kvm_vcpu *vcpu;
372 int ret;
373 u32 val;
374
375 ret = vgic_v2_parse_attr(dev, attr, ®_attr);
376 if (ret)
377 return ret;
378
379 vcpu = reg_attr.vcpu;
380 addr = reg_attr.addr;
381
382 if (is_write)
383 if (get_user(val, uaddr))
384 return -EFAULT;
385
386 mutex_lock(&dev->kvm->lock);
387
388 if (kvm_trylock_all_vcpus(dev->kvm)) {
389 mutex_unlock(&dev->kvm->lock);
390 return -EBUSY;
391 }
392
393 mutex_lock(&dev->kvm->arch.config_lock);
394
395 ret = vgic_init(dev->kvm);
396 if (ret)
397 goto out;
398
399 switch (attr->group) {
400 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
401 ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val);
402 break;
403 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
404 ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, &val);
405 break;
406 default:
407 ret = -EINVAL;
408 break;
409 }
410
411 out:
412 mutex_unlock(&dev->kvm->arch.config_lock);
413 kvm_unlock_all_vcpus(dev->kvm);
414 mutex_unlock(&dev->kvm->lock);
415
416 if (!ret && !is_write)
417 ret = put_user(val, uaddr);
418
419 return ret;
420 }
421
vgic_v2_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)422 static int vgic_v2_set_attr(struct kvm_device *dev,
423 struct kvm_device_attr *attr)
424 {
425 switch (attr->group) {
426 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
427 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
428 return vgic_v2_attr_regs_access(dev, attr, true);
429 default:
430 return vgic_set_common_attr(dev, attr);
431 }
432 }
433
vgic_v2_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)434 static int vgic_v2_get_attr(struct kvm_device *dev,
435 struct kvm_device_attr *attr)
436 {
437 switch (attr->group) {
438 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
439 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
440 return vgic_v2_attr_regs_access(dev, attr, false);
441 default:
442 return vgic_get_common_attr(dev, attr);
443 }
444 }
445
vgic_v2_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)446 static int vgic_v2_has_attr(struct kvm_device *dev,
447 struct kvm_device_attr *attr)
448 {
449 switch (attr->group) {
450 case KVM_DEV_ARM_VGIC_GRP_ADDR:
451 switch (attr->attr) {
452 case KVM_VGIC_V2_ADDR_TYPE_DIST:
453 case KVM_VGIC_V2_ADDR_TYPE_CPU:
454 return 0;
455 }
456 break;
457 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
458 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
459 return vgic_v2_has_attr_regs(dev, attr);
460 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
461 return 0;
462 case KVM_DEV_ARM_VGIC_GRP_CTRL:
463 switch (attr->attr) {
464 case KVM_DEV_ARM_VGIC_CTRL_INIT:
465 return 0;
466 }
467 }
468 return -ENXIO;
469 }
470
471 struct kvm_device_ops kvm_arm_vgic_v2_ops = {
472 .name = "kvm-arm-vgic-v2",
473 .create = vgic_create,
474 .destroy = vgic_destroy,
475 .set_attr = vgic_v2_set_attr,
476 .get_attr = vgic_v2_get_attr,
477 .has_attr = vgic_v2_has_attr,
478 };
479
vgic_v3_parse_attr(struct kvm_device * dev,struct kvm_device_attr * attr,struct vgic_reg_attr * reg_attr)480 int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
481 struct vgic_reg_attr *reg_attr)
482 {
483 unsigned long vgic_mpidr, mpidr_reg;
484
485 /*
486 * For KVM_DEV_ARM_VGIC_GRP_DIST_REGS group,
487 * attr might not hold MPIDR. Hence assume vcpu0.
488 */
489 if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) {
490 vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >>
491 KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT;
492
493 mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr);
494 reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg);
495 } else {
496 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0);
497 }
498
499 if (!reg_attr->vcpu)
500 return -EINVAL;
501
502 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
503
504 return 0;
505 }
506
507 /*
508 * Allow access to certain ID-like registers prior to VGIC initialization,
509 * thereby allowing the VMM to provision the features / sizing of the VGIC.
510 */
reg_allowed_pre_init(struct kvm_device_attr * attr)511 static bool reg_allowed_pre_init(struct kvm_device_attr *attr)
512 {
513 if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS)
514 return false;
515
516 switch (attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK) {
517 case GICD_IIDR:
518 case GICD_TYPER2:
519 return true;
520 default:
521 return false;
522 }
523 }
524
525 /*
526 * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state
527 *
528 * @dev: kvm device handle
529 * @attr: kvm device attribute
530 * @is_write: true if userspace is writing a register
531 */
vgic_v3_attr_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,bool is_write)532 static int vgic_v3_attr_regs_access(struct kvm_device *dev,
533 struct kvm_device_attr *attr,
534 bool is_write)
535 {
536 struct vgic_reg_attr reg_attr;
537 gpa_t addr;
538 struct kvm_vcpu *vcpu;
539 bool uaccess;
540 u32 val;
541 int ret;
542
543 ret = vgic_v3_parse_attr(dev, attr, ®_attr);
544 if (ret)
545 return ret;
546
547 vcpu = reg_attr.vcpu;
548 addr = reg_attr.addr;
549
550 switch (attr->group) {
551 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
552 /* Sysregs uaccess is performed by the sysreg handling code */
553 uaccess = false;
554 break;
555 default:
556 uaccess = true;
557 }
558
559 if (uaccess && is_write) {
560 u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
561 if (get_user(val, uaddr))
562 return -EFAULT;
563 }
564
565 mutex_lock(&dev->kvm->lock);
566
567 if (kvm_trylock_all_vcpus(dev->kvm)) {
568 mutex_unlock(&dev->kvm->lock);
569 return -EBUSY;
570 }
571
572 mutex_lock(&dev->kvm->arch.config_lock);
573
574 if (!(vgic_initialized(dev->kvm) || reg_allowed_pre_init(attr))) {
575 ret = -EBUSY;
576 goto out;
577 }
578
579 switch (attr->group) {
580 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
581 ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &val);
582 break;
583 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
584 ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &val);
585 break;
586 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
587 ret = vgic_v3_cpu_sysregs_uaccess(vcpu, attr, is_write);
588 break;
589 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
590 unsigned int info, intid;
591
592 info = (attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
593 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT;
594 if (info == VGIC_LEVEL_INFO_LINE_LEVEL) {
595 intid = attr->attr &
596 KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK;
597 ret = vgic_v3_line_level_info_uaccess(vcpu, is_write,
598 intid, &val);
599 } else {
600 ret = -EINVAL;
601 }
602 break;
603 }
604 default:
605 ret = -EINVAL;
606 break;
607 }
608
609 out:
610 mutex_unlock(&dev->kvm->arch.config_lock);
611 kvm_unlock_all_vcpus(dev->kvm);
612 mutex_unlock(&dev->kvm->lock);
613
614 if (!ret && uaccess && !is_write) {
615 u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
616 ret = put_user(val, uaddr);
617 }
618
619 return ret;
620 }
621
vgic_v3_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)622 static int vgic_v3_set_attr(struct kvm_device *dev,
623 struct kvm_device_attr *attr)
624 {
625 switch (attr->group) {
626 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
627 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
628 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
629 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
630 return vgic_v3_attr_regs_access(dev, attr, true);
631 case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ: {
632 u32 __user *uaddr = (u32 __user *)attr->addr;
633 u32 val;
634
635 if (get_user(val, uaddr))
636 return -EFAULT;
637
638 guard(mutex)(&dev->kvm->arch.config_lock);
639 if (vgic_initialized(dev->kvm))
640 return -EBUSY;
641
642 if (!irq_is_ppi(val))
643 return -EINVAL;
644
645 dev->kvm->arch.vgic.mi_intid = val;
646 return 0;
647 }
648 default:
649 return vgic_set_common_attr(dev, attr);
650 }
651 }
652
vgic_v3_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)653 static int vgic_v3_get_attr(struct kvm_device *dev,
654 struct kvm_device_attr *attr)
655 {
656 switch (attr->group) {
657 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
658 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
659 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
660 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
661 return vgic_v3_attr_regs_access(dev, attr, false);
662 case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ: {
663 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
664
665 guard(mutex)(&dev->kvm->arch.config_lock);
666 return put_user(dev->kvm->arch.vgic.mi_intid, uaddr);
667 }
668 default:
669 return vgic_get_common_attr(dev, attr);
670 }
671 }
672
vgic_v3_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)673 static int vgic_v3_has_attr(struct kvm_device *dev,
674 struct kvm_device_attr *attr)
675 {
676 switch (attr->group) {
677 case KVM_DEV_ARM_VGIC_GRP_ADDR:
678 switch (attr->attr) {
679 case KVM_VGIC_V3_ADDR_TYPE_DIST:
680 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
681 case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
682 return 0;
683 }
684 break;
685 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
686 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
687 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
688 return vgic_v3_has_attr_regs(dev, attr);
689 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
690 case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ:
691 return 0;
692 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
693 if (((attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
694 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) ==
695 VGIC_LEVEL_INFO_LINE_LEVEL)
696 return 0;
697 break;
698 }
699 case KVM_DEV_ARM_VGIC_GRP_CTRL:
700 switch (attr->attr) {
701 case KVM_DEV_ARM_VGIC_CTRL_INIT:
702 return 0;
703 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
704 return 0;
705 }
706 }
707 return -ENXIO;
708 }
709
710 struct kvm_device_ops kvm_arm_vgic_v3_ops = {
711 .name = "kvm-arm-vgic-v3",
712 .create = vgic_create,
713 .destroy = vgic_destroy,
714 .set_attr = vgic_v3_set_attr,
715 .get_attr = vgic_v3_get_attr,
716 .has_attr = vgic_v3_has_attr,
717 };
718