1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015, 2016 ARM Ltd.
4 */
5
6 #include <linux/uaccess.h>
7 #include <linux/interrupt.h>
8 #include <linux/cpu.h>
9 #include <linux/kvm_host.h>
10 #include <kvm/arm_vgic.h>
11 #include <asm/kvm_emulate.h>
12 #include <asm/kvm_mmu.h>
13 #include "vgic.h"
14
15 /*
16 * Initialization rules: there are multiple stages to the vgic
17 * initialization, both for the distributor and the CPU interfaces. The basic
18 * idea is that even though the VGIC is not functional or not requested from
19 * user space, the critical path of the run loop can still call VGIC functions
20 * that just won't do anything, without them having to check additional
21 * initialization flags to ensure they don't look at uninitialized data
22 * structures.
23 *
24 * Distributor:
25 *
26 * - kvm_vgic_early_init(): initialization of static data that doesn't
27 * depend on any sizing information or emulation type. No allocation
28 * is allowed there.
29 *
30 * - vgic_init(): allocation and initialization of the generic data
31 * structures that depend on sizing information (number of CPUs,
32 * number of interrupts). Also initializes the vcpu specific data
33 * structures. Can be executed lazily for GICv2.
34 *
35 * CPU Interface:
36 *
37 * - kvm_vgic_vcpu_init(): initialization of static data that doesn't depend
38 * on any sizing information. Private interrupts are allocated if not
39 * already allocated at vgic-creation time.
40 */
41
42 /* EARLY INIT */
43
44 /**
45 * kvm_vgic_early_init() - Initialize static VGIC VCPU data structures
46 * @kvm: The VM whose VGIC districutor should be initialized
47 *
48 * Only do initialization of static structures that don't require any
49 * allocation or sizing information from userspace. vgic_init() called
50 * kvm_vgic_dist_init() which takes care of the rest.
51 */
kvm_vgic_early_init(struct kvm * kvm)52 void kvm_vgic_early_init(struct kvm *kvm)
53 {
54 struct vgic_dist *dist = &kvm->arch.vgic;
55
56 xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ);
57 }
58
59 /* CREATION */
60
61 static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu, u32 type);
62
63 /**
64 * kvm_vgic_create: triggered by the instantiation of the VGIC device by
65 * user space, either through the legacy KVM_CREATE_IRQCHIP ioctl (v2 only)
66 * or through the generic KVM_CREATE_DEVICE API ioctl.
67 * irqchip_in_kernel() tells you if this function succeeded or not.
68 * @kvm: kvm struct pointer
69 * @type: KVM_DEV_TYPE_ARM_VGIC_V[23]
70 */
kvm_vgic_create(struct kvm * kvm,u32 type)71 int kvm_vgic_create(struct kvm *kvm, u32 type)
72 {
73 struct kvm_vcpu *vcpu;
74 u64 aa64pfr0, pfr1;
75 unsigned long i;
76 int ret;
77
78 /*
79 * This function is also called by the KVM_CREATE_IRQCHIP handler,
80 * which had no chance yet to check the availability of the GICv2
81 * emulation. So check this here again. KVM_CREATE_DEVICE does
82 * the proper checks already.
83 */
84 if (type == KVM_DEV_TYPE_ARM_VGIC_V2 &&
85 !kvm_vgic_global_state.can_emulate_gicv2)
86 return -ENODEV;
87
88 /*
89 * Ensure mutual exclusion with vCPU creation and any vCPU ioctls by:
90 *
91 * - Holding kvm->lock to prevent KVM_CREATE_VCPU from reaching
92 * kvm_arch_vcpu_precreate() and ensuring created_vcpus is stable.
93 * This alone is insufficient, as kvm_vm_ioctl_create_vcpu() drops
94 * the kvm->lock before completing the vCPU creation.
95 */
96 lockdep_assert_held(&kvm->lock);
97
98 /*
99 * - Acquiring the vCPU mutex for every *online* vCPU to prevent
100 * concurrent vCPU ioctls for vCPUs already visible to userspace.
101 */
102 ret = -EBUSY;
103 if (kvm_trylock_all_vcpus(kvm))
104 return ret;
105
106 /*
107 * - Taking the config_lock which protects VGIC data structures such
108 * as the per-vCPU arrays of private IRQs (SGIs, PPIs).
109 */
110 mutex_lock(&kvm->arch.config_lock);
111
112 /*
113 * - Bailing on the entire thing if a vCPU is in the middle of creation,
114 * dropped the kvm->lock, but hasn't reached kvm_arch_vcpu_create().
115 *
116 * The whole combination of this guarantees that no vCPU can get into
117 * KVM with a VGIC configuration inconsistent with the VM's VGIC.
118 */
119 if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
120 goto out_unlock;
121
122 if (irqchip_in_kernel(kvm)) {
123 ret = -EEXIST;
124 goto out_unlock;
125 }
126
127 kvm_for_each_vcpu(i, vcpu, kvm) {
128 if (vcpu_has_run_once(vcpu))
129 goto out_unlock;
130 }
131 ret = 0;
132
133 if (type == KVM_DEV_TYPE_ARM_VGIC_V2)
134 kvm->max_vcpus = VGIC_V2_MAX_CPUS;
135 else
136 kvm->max_vcpus = VGIC_V3_MAX_CPUS;
137
138 if (atomic_read(&kvm->online_vcpus) > kvm->max_vcpus) {
139 ret = -E2BIG;
140 goto out_unlock;
141 }
142
143 kvm_for_each_vcpu(i, vcpu, kvm) {
144 ret = vgic_allocate_private_irqs_locked(vcpu, type);
145 if (ret)
146 break;
147 }
148
149 if (ret) {
150 kvm_for_each_vcpu(i, vcpu, kvm) {
151 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
152 kfree(vgic_cpu->private_irqs);
153 vgic_cpu->private_irqs = NULL;
154 }
155
156 goto out_unlock;
157 }
158
159 kvm->arch.vgic.in_kernel = true;
160 kvm->arch.vgic.vgic_model = type;
161 kvm->arch.vgic.implementation_rev = KVM_VGIC_IMP_REV_LATEST;
162
163 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
164
165 aa64pfr0 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC;
166 pfr1 = kvm_read_vm_id_reg(kvm, SYS_ID_PFR1_EL1) & ~ID_PFR1_EL1_GIC;
167
168 if (type == KVM_DEV_TYPE_ARM_VGIC_V2) {
169 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
170 } else {
171 INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
172 aa64pfr0 |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
173 pfr1 |= SYS_FIELD_PREP_ENUM(ID_PFR1_EL1, GIC, GICv3);
174 }
175
176 kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, aa64pfr0);
177 kvm_set_vm_id_reg(kvm, SYS_ID_PFR1_EL1, pfr1);
178
179 if (type == KVM_DEV_TYPE_ARM_VGIC_V3)
180 kvm->arch.vgic.nassgicap = system_supports_direct_sgis();
181
182 out_unlock:
183 mutex_unlock(&kvm->arch.config_lock);
184 kvm_unlock_all_vcpus(kvm);
185 return ret;
186 }
187
188 /* INIT/DESTROY */
189
190 /**
191 * kvm_vgic_dist_init: initialize the dist data structures
192 * @kvm: kvm struct pointer
193 * @nr_spis: number of spis, frozen by caller
194 */
kvm_vgic_dist_init(struct kvm * kvm,unsigned int nr_spis)195 static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
196 {
197 struct vgic_dist *dist = &kvm->arch.vgic;
198 struct kvm_vcpu *vcpu0 = kvm_get_vcpu(kvm, 0);
199 int i;
200
201 dist->spis = kcalloc(nr_spis, sizeof(struct vgic_irq), GFP_KERNEL_ACCOUNT);
202 if (!dist->spis)
203 return -ENOMEM;
204
205 /*
206 * In the following code we do not take the irq struct lock since
207 * no other action on irq structs can happen while the VGIC is
208 * not initialized yet:
209 * If someone wants to inject an interrupt or does a MMIO access, we
210 * require prior initialization in case of a virtual GICv3 or trigger
211 * initialization when using a virtual GICv2.
212 */
213 for (i = 0; i < nr_spis; i++) {
214 struct vgic_irq *irq = &dist->spis[i];
215
216 irq->intid = i + VGIC_NR_PRIVATE_IRQS;
217 INIT_LIST_HEAD(&irq->ap_list);
218 raw_spin_lock_init(&irq->irq_lock);
219 irq->vcpu = NULL;
220 irq->target_vcpu = vcpu0;
221 refcount_set(&irq->refcount, 0);
222 switch (dist->vgic_model) {
223 case KVM_DEV_TYPE_ARM_VGIC_V2:
224 irq->targets = 0;
225 irq->group = 0;
226 break;
227 case KVM_DEV_TYPE_ARM_VGIC_V3:
228 irq->mpidr = 0;
229 irq->group = 1;
230 break;
231 default:
232 kfree(dist->spis);
233 dist->spis = NULL;
234 return -EINVAL;
235 }
236 }
237 return 0;
238 }
239
240 /* Default GICv3 Maintenance Interrupt INTID, as per SBSA */
241 #define DEFAULT_MI_INTID 25
242
kvm_vgic_vcpu_nv_init(struct kvm_vcpu * vcpu)243 int kvm_vgic_vcpu_nv_init(struct kvm_vcpu *vcpu)
244 {
245 int ret;
246
247 guard(mutex)(&vcpu->kvm->arch.config_lock);
248
249 /*
250 * Matching the tradition established with the timers, provide
251 * a default PPI for the maintenance interrupt. It makes
252 * things easier to reason about.
253 */
254 if (vcpu->kvm->arch.vgic.mi_intid == 0)
255 vcpu->kvm->arch.vgic.mi_intid = DEFAULT_MI_INTID;
256 ret = kvm_vgic_set_owner(vcpu, vcpu->kvm->arch.vgic.mi_intid, vcpu);
257
258 return ret;
259 }
260
vgic_allocate_private_irqs_locked(struct kvm_vcpu * vcpu,u32 type)261 static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu, u32 type)
262 {
263 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
264 int i;
265
266 lockdep_assert_held(&vcpu->kvm->arch.config_lock);
267
268 if (vgic_cpu->private_irqs)
269 return 0;
270
271 vgic_cpu->private_irqs = kcalloc(VGIC_NR_PRIVATE_IRQS,
272 sizeof(struct vgic_irq),
273 GFP_KERNEL_ACCOUNT);
274
275 if (!vgic_cpu->private_irqs)
276 return -ENOMEM;
277
278 /*
279 * Enable and configure all SGIs to be edge-triggered and
280 * configure all PPIs as level-triggered.
281 */
282 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
283 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
284
285 INIT_LIST_HEAD(&irq->ap_list);
286 raw_spin_lock_init(&irq->irq_lock);
287 irq->intid = i;
288 irq->vcpu = NULL;
289 irq->target_vcpu = vcpu;
290 refcount_set(&irq->refcount, 0);
291 if (vgic_irq_is_sgi(i)) {
292 /* SGIs */
293 irq->enabled = 1;
294 irq->config = VGIC_CONFIG_EDGE;
295 } else {
296 /* PPIs */
297 irq->config = VGIC_CONFIG_LEVEL;
298 }
299
300 switch (type) {
301 case KVM_DEV_TYPE_ARM_VGIC_V3:
302 irq->group = 1;
303 irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
304 break;
305 case KVM_DEV_TYPE_ARM_VGIC_V2:
306 irq->group = 0;
307 irq->targets = BIT(vcpu->vcpu_id);
308 break;
309 }
310 }
311
312 return 0;
313 }
314
vgic_allocate_private_irqs(struct kvm_vcpu * vcpu,u32 type)315 static int vgic_allocate_private_irqs(struct kvm_vcpu *vcpu, u32 type)
316 {
317 int ret;
318
319 mutex_lock(&vcpu->kvm->arch.config_lock);
320 ret = vgic_allocate_private_irqs_locked(vcpu, type);
321 mutex_unlock(&vcpu->kvm->arch.config_lock);
322
323 return ret;
324 }
325
326 /**
327 * kvm_vgic_vcpu_init() - Initialize static VGIC VCPU data
328 * structures and register VCPU-specific KVM iodevs
329 *
330 * @vcpu: pointer to the VCPU being created and initialized
331 *
332 * Only do initialization, but do not actually enable the
333 * VGIC CPU interface
334 */
kvm_vgic_vcpu_init(struct kvm_vcpu * vcpu)335 int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
336 {
337 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
338 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
339 int ret = 0;
340
341 vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
342
343 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
344 raw_spin_lock_init(&vgic_cpu->ap_list_lock);
345 atomic_set(&vgic_cpu->vgic_v3.its_vpe.vlpi_count, 0);
346
347 if (!irqchip_in_kernel(vcpu->kvm))
348 return 0;
349
350 ret = vgic_allocate_private_irqs(vcpu, dist->vgic_model);
351 if (ret)
352 return ret;
353
354 /*
355 * If we are creating a VCPU with a GICv3 we must also register the
356 * KVM io device for the redistributor that belongs to this VCPU.
357 */
358 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
359 mutex_lock(&vcpu->kvm->slots_lock);
360 ret = vgic_register_redist_iodev(vcpu);
361 mutex_unlock(&vcpu->kvm->slots_lock);
362 }
363 return ret;
364 }
365
kvm_vgic_vcpu_enable(struct kvm_vcpu * vcpu)366 static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu)
367 {
368 if (kvm_vgic_global_state.type == VGIC_V2)
369 vgic_v2_enable(vcpu);
370 else
371 vgic_v3_enable(vcpu);
372 }
373
374 /*
375 * vgic_init: allocates and initializes dist and vcpu data structures
376 * depending on two dimensioning parameters:
377 * - the number of spis
378 * - the number of vcpus
379 * The function is generally called when nr_spis has been explicitly set
380 * by the guest through the KVM DEVICE API. If not nr_spis is set to 256.
381 * vgic_initialized() returns true when this function has succeeded.
382 */
vgic_init(struct kvm * kvm)383 int vgic_init(struct kvm *kvm)
384 {
385 struct vgic_dist *dist = &kvm->arch.vgic;
386 struct kvm_vcpu *vcpu;
387 int ret = 0;
388 unsigned long idx;
389
390 lockdep_assert_held(&kvm->arch.config_lock);
391
392 if (vgic_initialized(kvm))
393 return 0;
394
395 /* Are we also in the middle of creating a VCPU? */
396 if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
397 return -EBUSY;
398
399 /* freeze the number of spis */
400 if (!dist->nr_spis)
401 dist->nr_spis = VGIC_NR_IRQS_LEGACY - VGIC_NR_PRIVATE_IRQS;
402
403 ret = kvm_vgic_dist_init(kvm, dist->nr_spis);
404 if (ret)
405 goto out;
406
407 /*
408 * Ensure vPEs are allocated if direct IRQ injection (e.g. vSGIs,
409 * vLPIs) is supported.
410 */
411 if (vgic_supports_direct_irqs(kvm)) {
412 ret = vgic_v4_init(kvm);
413 if (ret)
414 goto out;
415 }
416
417 kvm_for_each_vcpu(idx, vcpu, kvm)
418 kvm_vgic_vcpu_enable(vcpu);
419
420 ret = kvm_vgic_setup_default_irq_routing(kvm);
421 if (ret)
422 goto out;
423
424 vgic_debug_init(kvm);
425 dist->initialized = true;
426 out:
427 return ret;
428 }
429
kvm_vgic_dist_destroy(struct kvm * kvm)430 static void kvm_vgic_dist_destroy(struct kvm *kvm)
431 {
432 struct vgic_dist *dist = &kvm->arch.vgic;
433 struct vgic_redist_region *rdreg, *next;
434
435 dist->ready = false;
436 dist->initialized = false;
437
438 kfree(dist->spis);
439 dist->spis = NULL;
440 dist->nr_spis = 0;
441 dist->vgic_dist_base = VGIC_ADDR_UNDEF;
442
443 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
444 list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list)
445 vgic_v3_free_redist_region(kvm, rdreg);
446 INIT_LIST_HEAD(&dist->rd_regions);
447 } else {
448 dist->vgic_cpu_base = VGIC_ADDR_UNDEF;
449 }
450
451 if (vgic_supports_direct_irqs(kvm))
452 vgic_v4_teardown(kvm);
453
454 xa_destroy(&dist->lpi_xa);
455 }
456
__kvm_vgic_vcpu_destroy(struct kvm_vcpu * vcpu)457 static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
458 {
459 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
460
461 /*
462 * Retire all pending LPIs on this vcpu anyway as we're
463 * going to destroy it.
464 */
465 vgic_flush_pending_lpis(vcpu);
466
467 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
468 kfree(vgic_cpu->private_irqs);
469 vgic_cpu->private_irqs = NULL;
470
471 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
472 /*
473 * If this vCPU is being destroyed because of a failed creation
474 * then unregister the redistributor to avoid leaving behind a
475 * dangling pointer to the vCPU struct.
476 *
477 * vCPUs that have been successfully created (i.e. added to
478 * kvm->vcpu_array) get unregistered in kvm_vgic_destroy(), as
479 * this function gets called while holding kvm->arch.config_lock
480 * in the VM teardown path and would otherwise introduce a lock
481 * inversion w.r.t. kvm->srcu.
482 *
483 * vCPUs that failed creation are torn down outside of the
484 * kvm->arch.config_lock and do not get unregistered in
485 * kvm_vgic_destroy(), meaning it is both safe and necessary to
486 * do so here.
487 */
488 if (kvm_get_vcpu_by_id(vcpu->kvm, vcpu->vcpu_id) != vcpu)
489 vgic_unregister_redist_iodev(vcpu);
490
491 vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
492 }
493 }
494
kvm_vgic_vcpu_destroy(struct kvm_vcpu * vcpu)495 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
496 {
497 struct kvm *kvm = vcpu->kvm;
498
499 mutex_lock(&kvm->slots_lock);
500 __kvm_vgic_vcpu_destroy(vcpu);
501 mutex_unlock(&kvm->slots_lock);
502 }
503
kvm_vgic_destroy(struct kvm * kvm)504 void kvm_vgic_destroy(struct kvm *kvm)
505 {
506 struct kvm_vcpu *vcpu;
507 unsigned long i;
508
509 mutex_lock(&kvm->slots_lock);
510 mutex_lock(&kvm->arch.config_lock);
511
512 vgic_debug_destroy(kvm);
513
514 kvm_for_each_vcpu(i, vcpu, kvm)
515 __kvm_vgic_vcpu_destroy(vcpu);
516
517 kvm_vgic_dist_destroy(kvm);
518
519 mutex_unlock(&kvm->arch.config_lock);
520
521 if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
522 kvm_for_each_vcpu(i, vcpu, kvm)
523 vgic_unregister_redist_iodev(vcpu);
524
525 mutex_unlock(&kvm->slots_lock);
526 }
527
528 /**
529 * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
530 * is a GICv2. A GICv3 must be explicitly initialized by userspace using the
531 * KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEVICE group.
532 * @kvm: kvm struct pointer
533 */
vgic_lazy_init(struct kvm * kvm)534 int vgic_lazy_init(struct kvm *kvm)
535 {
536 int ret = 0;
537
538 if (unlikely(!vgic_initialized(kvm))) {
539 /*
540 * We only provide the automatic initialization of the VGIC
541 * for the legacy case of a GICv2. Any other type must
542 * be explicitly initialized once setup with the respective
543 * KVM device call.
544 */
545 if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
546 return -EBUSY;
547
548 mutex_lock(&kvm->arch.config_lock);
549 ret = vgic_init(kvm);
550 mutex_unlock(&kvm->arch.config_lock);
551 }
552
553 return ret;
554 }
555
556 /* RESOURCE MAPPING */
557
558 /**
559 * kvm_vgic_map_resources - map the MMIO regions
560 * @kvm: kvm struct pointer
561 *
562 * Map the MMIO regions depending on the VGIC model exposed to the guest
563 * called on the first VCPU run.
564 * Also map the virtual CPU interface into the VM.
565 * v2 calls vgic_init() if not already done.
566 * v3 and derivatives return an error if the VGIC is not initialized.
567 */
kvm_vgic_map_resources(struct kvm * kvm)568 int kvm_vgic_map_resources(struct kvm *kvm)
569 {
570 struct vgic_dist *dist = &kvm->arch.vgic;
571 enum vgic_type type;
572 gpa_t dist_base;
573 int ret = 0;
574
575 if (likely(smp_load_acquire(&dist->ready)))
576 return 0;
577
578 mutex_lock(&kvm->slots_lock);
579 mutex_lock(&kvm->arch.config_lock);
580 if (dist->ready)
581 goto out;
582
583 if (!irqchip_in_kernel(kvm))
584 goto out;
585
586 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) {
587 ret = vgic_v2_map_resources(kvm);
588 type = VGIC_V2;
589 } else {
590 ret = vgic_v3_map_resources(kvm);
591 type = VGIC_V3;
592 }
593
594 if (ret)
595 goto out;
596
597 dist_base = dist->vgic_dist_base;
598 mutex_unlock(&kvm->arch.config_lock);
599
600 ret = vgic_register_dist_iodev(kvm, dist_base, type);
601 if (ret) {
602 kvm_err("Unable to register VGIC dist MMIO regions\n");
603 goto out_slots;
604 }
605
606 smp_store_release(&dist->ready, true);
607 goto out_slots;
608 out:
609 mutex_unlock(&kvm->arch.config_lock);
610 out_slots:
611 if (ret)
612 kvm_vm_dead(kvm);
613
614 mutex_unlock(&kvm->slots_lock);
615
616 return ret;
617 }
618
619 /* GENERIC PROBE */
620
kvm_vgic_cpu_up(void)621 void kvm_vgic_cpu_up(void)
622 {
623 enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0);
624 }
625
626
kvm_vgic_cpu_down(void)627 void kvm_vgic_cpu_down(void)
628 {
629 disable_percpu_irq(kvm_vgic_global_state.maint_irq);
630 }
631
vgic_maintenance_handler(int irq,void * data)632 static irqreturn_t vgic_maintenance_handler(int irq, void *data)
633 {
634 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)data;
635
636 /*
637 * We cannot rely on the vgic maintenance interrupt to be
638 * delivered synchronously. This means we can only use it to
639 * exit the VM, and we perform the handling of EOIed
640 * interrupts on the exit path (see vgic_fold_lr_state).
641 *
642 * Of course, NV throws a wrench in this plan, and needs
643 * something special.
644 */
645 if (vcpu && vgic_state_is_nested(vcpu))
646 vgic_v3_handle_nested_maint_irq(vcpu);
647
648 return IRQ_HANDLED;
649 }
650
651 static struct gic_kvm_info *gic_kvm_info;
652
vgic_set_kvm_info(const struct gic_kvm_info * info)653 void __init vgic_set_kvm_info(const struct gic_kvm_info *info)
654 {
655 BUG_ON(gic_kvm_info != NULL);
656 gic_kvm_info = kmalloc(sizeof(*info), GFP_KERNEL);
657 if (gic_kvm_info)
658 *gic_kvm_info = *info;
659 }
660
661 /**
662 * kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware
663 *
664 * For a specific CPU, initialize the GIC VE hardware.
665 */
kvm_vgic_init_cpu_hardware(void)666 void kvm_vgic_init_cpu_hardware(void)
667 {
668 BUG_ON(preemptible());
669
670 /*
671 * We want to make sure the list registers start out clear so that we
672 * only have the program the used registers.
673 */
674 if (kvm_vgic_global_state.type == VGIC_V2) {
675 vgic_v2_init_lrs();
676 } else if (kvm_vgic_global_state.type == VGIC_V3 ||
677 kvm_vgic_global_state.has_gcie_v3_compat) {
678 kvm_call_hyp(__vgic_v3_init_lrs);
679 }
680 }
681
682 /**
683 * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable
684 * according to the host GIC model. Accordingly calls either
685 * vgic_v2/v3_probe which registers the KVM_DEVICE that can be
686 * instantiated by a guest later on .
687 */
kvm_vgic_hyp_init(void)688 int kvm_vgic_hyp_init(void)
689 {
690 bool has_mask;
691 int ret;
692
693 if (!gic_kvm_info)
694 return -ENODEV;
695
696 has_mask = !gic_kvm_info->no_maint_irq_mask;
697
698 if (has_mask && !gic_kvm_info->maint_irq) {
699 kvm_err("No vgic maintenance irq\n");
700 return -ENXIO;
701 }
702
703 /*
704 * If we get one of these oddball non-GICs, taint the kernel,
705 * as we have no idea of how they *really* behave.
706 */
707 if (gic_kvm_info->no_hw_deactivation) {
708 kvm_info("Non-architectural vgic, tainting kernel\n");
709 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
710 kvm_vgic_global_state.no_hw_deactivation = true;
711 }
712
713 switch (gic_kvm_info->type) {
714 case GIC_V2:
715 ret = vgic_v2_probe(gic_kvm_info);
716 break;
717 case GIC_V3:
718 ret = vgic_v3_probe(gic_kvm_info);
719 if (!ret) {
720 static_branch_enable(&kvm_vgic_global_state.gicv3_cpuif);
721 kvm_info("GIC system register CPU interface enabled\n");
722 }
723 break;
724 case GIC_V5:
725 ret = vgic_v5_probe(gic_kvm_info);
726 break;
727 default:
728 ret = -ENODEV;
729 }
730
731 kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq;
732
733 kfree(gic_kvm_info);
734 gic_kvm_info = NULL;
735
736 if (ret)
737 return ret;
738
739 if (!has_mask && !kvm_vgic_global_state.maint_irq)
740 return 0;
741
742 ret = request_percpu_irq(kvm_vgic_global_state.maint_irq,
743 vgic_maintenance_handler,
744 "vgic", kvm_get_running_vcpus());
745 if (ret) {
746 kvm_err("Cannot register interrupt %d\n",
747 kvm_vgic_global_state.maint_irq);
748 return ret;
749 }
750
751 kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq);
752 return 0;
753 }
754