1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015, 2016 ARM Ltd.
4 */
5
6 #include <linux/uaccess.h>
7 #include <linux/interrupt.h>
8 #include <linux/cpu.h>
9 #include <linux/kvm_host.h>
10 #include <kvm/arm_vgic.h>
11 #include <asm/kvm_emulate.h>
12 #include <asm/kvm_mmu.h>
13 #include "vgic.h"
14
15 /*
16 * Initialization rules: there are multiple stages to the vgic
17 * initialization, both for the distributor and the CPU interfaces. The basic
18 * idea is that even though the VGIC is not functional or not requested from
19 * user space, the critical path of the run loop can still call VGIC functions
20 * that just won't do anything, without them having to check additional
21 * initialization flags to ensure they don't look at uninitialized data
22 * structures.
23 *
24 * Distributor:
25 *
26 * - kvm_vgic_early_init(): initialization of static data that doesn't
27 * depend on any sizing information or emulation type. No allocation
28 * is allowed there.
29 *
30 * - vgic_init(): allocation and initialization of the generic data
31 * structures that depend on sizing information (number of CPUs,
32 * number of interrupts). Also initializes the vcpu specific data
33 * structures. Can be executed lazily for GICv2.
34 *
35 * CPU Interface:
36 *
37 * - kvm_vgic_vcpu_init(): initialization of static data that doesn't depend
38 * on any sizing information. Private interrupts are allocated if not
39 * already allocated at vgic-creation time.
40 */
41
42 /* EARLY INIT */
43
44 /**
45 * kvm_vgic_early_init() - Initialize static VGIC VCPU data structures
46 * @kvm: The VM whose VGIC districutor should be initialized
47 *
48 * Only do initialization of static structures that don't require any
49 * allocation or sizing information from userspace. vgic_init() called
50 * kvm_vgic_dist_init() which takes care of the rest.
51 */
kvm_vgic_early_init(struct kvm * kvm)52 void kvm_vgic_early_init(struct kvm *kvm)
53 {
54 struct vgic_dist *dist = &kvm->arch.vgic;
55
56 xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ);
57 }
58
59 /* CREATION */
60
61 static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu, u32 type);
62
63 /**
64 * kvm_vgic_create: triggered by the instantiation of the VGIC device by
65 * user space, either through the legacy KVM_CREATE_IRQCHIP ioctl (v2 only)
66 * or through the generic KVM_CREATE_DEVICE API ioctl.
67 * irqchip_in_kernel() tells you if this function succeeded or not.
68 * @kvm: kvm struct pointer
69 * @type: KVM_DEV_TYPE_ARM_VGIC_V[23]
70 */
kvm_vgic_create(struct kvm * kvm,u32 type)71 int kvm_vgic_create(struct kvm *kvm, u32 type)
72 {
73 struct kvm_vcpu *vcpu;
74 unsigned long i;
75 int ret;
76
77 /*
78 * This function is also called by the KVM_CREATE_IRQCHIP handler,
79 * which had no chance yet to check the availability of the GICv2
80 * emulation. So check this here again. KVM_CREATE_DEVICE does
81 * the proper checks already.
82 */
83 if (type == KVM_DEV_TYPE_ARM_VGIC_V2 &&
84 !kvm_vgic_global_state.can_emulate_gicv2)
85 return -ENODEV;
86
87 /*
88 * Ensure mutual exclusion with vCPU creation and any vCPU ioctls by:
89 *
90 * - Holding kvm->lock to prevent KVM_CREATE_VCPU from reaching
91 * kvm_arch_vcpu_precreate() and ensuring created_vcpus is stable.
92 * This alone is insufficient, as kvm_vm_ioctl_create_vcpu() drops
93 * the kvm->lock before completing the vCPU creation.
94 */
95 lockdep_assert_held(&kvm->lock);
96
97 /*
98 * - Acquiring the vCPU mutex for every *online* vCPU to prevent
99 * concurrent vCPU ioctls for vCPUs already visible to userspace.
100 */
101 ret = -EBUSY;
102 if (kvm_trylock_all_vcpus(kvm))
103 return ret;
104
105 /*
106 * - Taking the config_lock which protects VGIC data structures such
107 * as the per-vCPU arrays of private IRQs (SGIs, PPIs).
108 */
109 mutex_lock(&kvm->arch.config_lock);
110
111 /*
112 * - Bailing on the entire thing if a vCPU is in the middle of creation,
113 * dropped the kvm->lock, but hasn't reached kvm_arch_vcpu_create().
114 *
115 * The whole combination of this guarantees that no vCPU can get into
116 * KVM with a VGIC configuration inconsistent with the VM's VGIC.
117 */
118 if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
119 goto out_unlock;
120
121 if (irqchip_in_kernel(kvm)) {
122 ret = -EEXIST;
123 goto out_unlock;
124 }
125
126 kvm_for_each_vcpu(i, vcpu, kvm) {
127 if (vcpu_has_run_once(vcpu))
128 goto out_unlock;
129 }
130 ret = 0;
131
132 if (type == KVM_DEV_TYPE_ARM_VGIC_V2)
133 kvm->max_vcpus = VGIC_V2_MAX_CPUS;
134 else
135 kvm->max_vcpus = VGIC_V3_MAX_CPUS;
136
137 if (atomic_read(&kvm->online_vcpus) > kvm->max_vcpus) {
138 ret = -E2BIG;
139 goto out_unlock;
140 }
141
142 kvm_for_each_vcpu(i, vcpu, kvm) {
143 ret = vgic_allocate_private_irqs_locked(vcpu, type);
144 if (ret)
145 break;
146 }
147
148 if (ret) {
149 kvm_for_each_vcpu(i, vcpu, kvm) {
150 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
151 kfree(vgic_cpu->private_irqs);
152 vgic_cpu->private_irqs = NULL;
153 }
154
155 goto out_unlock;
156 }
157
158 kvm->arch.vgic.in_kernel = true;
159 kvm->arch.vgic.vgic_model = type;
160 kvm->arch.vgic.implementation_rev = KVM_VGIC_IMP_REV_LATEST;
161
162 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
163
164 if (type == KVM_DEV_TYPE_ARM_VGIC_V2)
165 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
166 else
167 INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
168
169 if (type == KVM_DEV_TYPE_ARM_VGIC_V3)
170 kvm->arch.vgic.nassgicap = system_supports_direct_sgis();
171
172 out_unlock:
173 mutex_unlock(&kvm->arch.config_lock);
174 kvm_unlock_all_vcpus(kvm);
175 return ret;
176 }
177
178 /* INIT/DESTROY */
179
180 /**
181 * kvm_vgic_dist_init: initialize the dist data structures
182 * @kvm: kvm struct pointer
183 * @nr_spis: number of spis, frozen by caller
184 */
kvm_vgic_dist_init(struct kvm * kvm,unsigned int nr_spis)185 static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
186 {
187 struct vgic_dist *dist = &kvm->arch.vgic;
188 struct kvm_vcpu *vcpu0 = kvm_get_vcpu(kvm, 0);
189 int i;
190
191 dist->spis = kcalloc(nr_spis, sizeof(struct vgic_irq), GFP_KERNEL_ACCOUNT);
192 if (!dist->spis)
193 return -ENOMEM;
194
195 /*
196 * In the following code we do not take the irq struct lock since
197 * no other action on irq structs can happen while the VGIC is
198 * not initialized yet:
199 * If someone wants to inject an interrupt or does a MMIO access, we
200 * require prior initialization in case of a virtual GICv3 or trigger
201 * initialization when using a virtual GICv2.
202 */
203 for (i = 0; i < nr_spis; i++) {
204 struct vgic_irq *irq = &dist->spis[i];
205
206 irq->intid = i + VGIC_NR_PRIVATE_IRQS;
207 INIT_LIST_HEAD(&irq->ap_list);
208 raw_spin_lock_init(&irq->irq_lock);
209 irq->vcpu = NULL;
210 irq->target_vcpu = vcpu0;
211 kref_init(&irq->refcount);
212 switch (dist->vgic_model) {
213 case KVM_DEV_TYPE_ARM_VGIC_V2:
214 irq->targets = 0;
215 irq->group = 0;
216 break;
217 case KVM_DEV_TYPE_ARM_VGIC_V3:
218 irq->mpidr = 0;
219 irq->group = 1;
220 break;
221 default:
222 kfree(dist->spis);
223 dist->spis = NULL;
224 return -EINVAL;
225 }
226 }
227 return 0;
228 }
229
230 /* Default GICv3 Maintenance Interrupt INTID, as per SBSA */
231 #define DEFAULT_MI_INTID 25
232
kvm_vgic_vcpu_nv_init(struct kvm_vcpu * vcpu)233 int kvm_vgic_vcpu_nv_init(struct kvm_vcpu *vcpu)
234 {
235 int ret;
236
237 guard(mutex)(&vcpu->kvm->arch.config_lock);
238
239 /*
240 * Matching the tradition established with the timers, provide
241 * a default PPI for the maintenance interrupt. It makes
242 * things easier to reason about.
243 */
244 if (vcpu->kvm->arch.vgic.mi_intid == 0)
245 vcpu->kvm->arch.vgic.mi_intid = DEFAULT_MI_INTID;
246 ret = kvm_vgic_set_owner(vcpu, vcpu->kvm->arch.vgic.mi_intid, vcpu);
247
248 return ret;
249 }
250
vgic_allocate_private_irqs_locked(struct kvm_vcpu * vcpu,u32 type)251 static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu, u32 type)
252 {
253 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
254 int i;
255
256 lockdep_assert_held(&vcpu->kvm->arch.config_lock);
257
258 if (vgic_cpu->private_irqs)
259 return 0;
260
261 vgic_cpu->private_irqs = kcalloc(VGIC_NR_PRIVATE_IRQS,
262 sizeof(struct vgic_irq),
263 GFP_KERNEL_ACCOUNT);
264
265 if (!vgic_cpu->private_irqs)
266 return -ENOMEM;
267
268 /*
269 * Enable and configure all SGIs to be edge-triggered and
270 * configure all PPIs as level-triggered.
271 */
272 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
273 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
274
275 INIT_LIST_HEAD(&irq->ap_list);
276 raw_spin_lock_init(&irq->irq_lock);
277 irq->intid = i;
278 irq->vcpu = NULL;
279 irq->target_vcpu = vcpu;
280 kref_init(&irq->refcount);
281 if (vgic_irq_is_sgi(i)) {
282 /* SGIs */
283 irq->enabled = 1;
284 irq->config = VGIC_CONFIG_EDGE;
285 } else {
286 /* PPIs */
287 irq->config = VGIC_CONFIG_LEVEL;
288 }
289
290 switch (type) {
291 case KVM_DEV_TYPE_ARM_VGIC_V3:
292 irq->group = 1;
293 irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
294 break;
295 case KVM_DEV_TYPE_ARM_VGIC_V2:
296 irq->group = 0;
297 irq->targets = BIT(vcpu->vcpu_id);
298 break;
299 }
300 }
301
302 return 0;
303 }
304
vgic_allocate_private_irqs(struct kvm_vcpu * vcpu,u32 type)305 static int vgic_allocate_private_irqs(struct kvm_vcpu *vcpu, u32 type)
306 {
307 int ret;
308
309 mutex_lock(&vcpu->kvm->arch.config_lock);
310 ret = vgic_allocate_private_irqs_locked(vcpu, type);
311 mutex_unlock(&vcpu->kvm->arch.config_lock);
312
313 return ret;
314 }
315
316 /**
317 * kvm_vgic_vcpu_init() - Initialize static VGIC VCPU data
318 * structures and register VCPU-specific KVM iodevs
319 *
320 * @vcpu: pointer to the VCPU being created and initialized
321 *
322 * Only do initialization, but do not actually enable the
323 * VGIC CPU interface
324 */
kvm_vgic_vcpu_init(struct kvm_vcpu * vcpu)325 int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
326 {
327 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
328 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
329 int ret = 0;
330
331 vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
332
333 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
334 raw_spin_lock_init(&vgic_cpu->ap_list_lock);
335 atomic_set(&vgic_cpu->vgic_v3.its_vpe.vlpi_count, 0);
336
337 if (!irqchip_in_kernel(vcpu->kvm))
338 return 0;
339
340 ret = vgic_allocate_private_irqs(vcpu, dist->vgic_model);
341 if (ret)
342 return ret;
343
344 /*
345 * If we are creating a VCPU with a GICv3 we must also register the
346 * KVM io device for the redistributor that belongs to this VCPU.
347 */
348 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
349 mutex_lock(&vcpu->kvm->slots_lock);
350 ret = vgic_register_redist_iodev(vcpu);
351 mutex_unlock(&vcpu->kvm->slots_lock);
352 }
353 return ret;
354 }
355
kvm_vgic_vcpu_enable(struct kvm_vcpu * vcpu)356 static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu)
357 {
358 if (kvm_vgic_global_state.type == VGIC_V2)
359 vgic_v2_enable(vcpu);
360 else
361 vgic_v3_enable(vcpu);
362 }
363
364 /*
365 * vgic_init: allocates and initializes dist and vcpu data structures
366 * depending on two dimensioning parameters:
367 * - the number of spis
368 * - the number of vcpus
369 * The function is generally called when nr_spis has been explicitly set
370 * by the guest through the KVM DEVICE API. If not nr_spis is set to 256.
371 * vgic_initialized() returns true when this function has succeeded.
372 */
vgic_init(struct kvm * kvm)373 int vgic_init(struct kvm *kvm)
374 {
375 struct vgic_dist *dist = &kvm->arch.vgic;
376 struct kvm_vcpu *vcpu;
377 int ret = 0;
378 unsigned long idx;
379
380 lockdep_assert_held(&kvm->arch.config_lock);
381
382 if (vgic_initialized(kvm))
383 return 0;
384
385 /* Are we also in the middle of creating a VCPU? */
386 if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
387 return -EBUSY;
388
389 /* freeze the number of spis */
390 if (!dist->nr_spis)
391 dist->nr_spis = VGIC_NR_IRQS_LEGACY - VGIC_NR_PRIVATE_IRQS;
392
393 ret = kvm_vgic_dist_init(kvm, dist->nr_spis);
394 if (ret)
395 goto out;
396
397 /*
398 * Ensure vPEs are allocated if direct IRQ injection (e.g. vSGIs,
399 * vLPIs) is supported.
400 */
401 if (vgic_supports_direct_irqs(kvm)) {
402 ret = vgic_v4_init(kvm);
403 if (ret)
404 goto out;
405 }
406
407 kvm_for_each_vcpu(idx, vcpu, kvm)
408 kvm_vgic_vcpu_enable(vcpu);
409
410 ret = kvm_vgic_setup_default_irq_routing(kvm);
411 if (ret)
412 goto out;
413
414 vgic_debug_init(kvm);
415 dist->initialized = true;
416 out:
417 return ret;
418 }
419
kvm_vgic_dist_destroy(struct kvm * kvm)420 static void kvm_vgic_dist_destroy(struct kvm *kvm)
421 {
422 struct vgic_dist *dist = &kvm->arch.vgic;
423 struct vgic_redist_region *rdreg, *next;
424
425 dist->ready = false;
426 dist->initialized = false;
427
428 kfree(dist->spis);
429 dist->spis = NULL;
430 dist->nr_spis = 0;
431 dist->vgic_dist_base = VGIC_ADDR_UNDEF;
432
433 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
434 list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list)
435 vgic_v3_free_redist_region(kvm, rdreg);
436 INIT_LIST_HEAD(&dist->rd_regions);
437 } else {
438 dist->vgic_cpu_base = VGIC_ADDR_UNDEF;
439 }
440
441 if (vgic_supports_direct_irqs(kvm))
442 vgic_v4_teardown(kvm);
443
444 xa_destroy(&dist->lpi_xa);
445 }
446
__kvm_vgic_vcpu_destroy(struct kvm_vcpu * vcpu)447 static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
448 {
449 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
450
451 /*
452 * Retire all pending LPIs on this vcpu anyway as we're
453 * going to destroy it.
454 */
455 vgic_flush_pending_lpis(vcpu);
456
457 INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
458 kfree(vgic_cpu->private_irqs);
459 vgic_cpu->private_irqs = NULL;
460
461 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
462 /*
463 * If this vCPU is being destroyed because of a failed creation
464 * then unregister the redistributor to avoid leaving behind a
465 * dangling pointer to the vCPU struct.
466 *
467 * vCPUs that have been successfully created (i.e. added to
468 * kvm->vcpu_array) get unregistered in kvm_vgic_destroy(), as
469 * this function gets called while holding kvm->arch.config_lock
470 * in the VM teardown path and would otherwise introduce a lock
471 * inversion w.r.t. kvm->srcu.
472 *
473 * vCPUs that failed creation are torn down outside of the
474 * kvm->arch.config_lock and do not get unregistered in
475 * kvm_vgic_destroy(), meaning it is both safe and necessary to
476 * do so here.
477 */
478 if (kvm_get_vcpu_by_id(vcpu->kvm, vcpu->vcpu_id) != vcpu)
479 vgic_unregister_redist_iodev(vcpu);
480
481 vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
482 }
483 }
484
kvm_vgic_vcpu_destroy(struct kvm_vcpu * vcpu)485 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
486 {
487 struct kvm *kvm = vcpu->kvm;
488
489 mutex_lock(&kvm->slots_lock);
490 __kvm_vgic_vcpu_destroy(vcpu);
491 mutex_unlock(&kvm->slots_lock);
492 }
493
kvm_vgic_destroy(struct kvm * kvm)494 void kvm_vgic_destroy(struct kvm *kvm)
495 {
496 struct kvm_vcpu *vcpu;
497 unsigned long i;
498
499 mutex_lock(&kvm->slots_lock);
500 mutex_lock(&kvm->arch.config_lock);
501
502 vgic_debug_destroy(kvm);
503
504 kvm_for_each_vcpu(i, vcpu, kvm)
505 __kvm_vgic_vcpu_destroy(vcpu);
506
507 kvm_vgic_dist_destroy(kvm);
508
509 mutex_unlock(&kvm->arch.config_lock);
510
511 if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
512 kvm_for_each_vcpu(i, vcpu, kvm)
513 vgic_unregister_redist_iodev(vcpu);
514
515 mutex_unlock(&kvm->slots_lock);
516 }
517
518 /**
519 * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
520 * is a GICv2. A GICv3 must be explicitly initialized by userspace using the
521 * KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEVICE group.
522 * @kvm: kvm struct pointer
523 */
vgic_lazy_init(struct kvm * kvm)524 int vgic_lazy_init(struct kvm *kvm)
525 {
526 int ret = 0;
527
528 if (unlikely(!vgic_initialized(kvm))) {
529 /*
530 * We only provide the automatic initialization of the VGIC
531 * for the legacy case of a GICv2. Any other type must
532 * be explicitly initialized once setup with the respective
533 * KVM device call.
534 */
535 if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
536 return -EBUSY;
537
538 mutex_lock(&kvm->arch.config_lock);
539 ret = vgic_init(kvm);
540 mutex_unlock(&kvm->arch.config_lock);
541 }
542
543 return ret;
544 }
545
546 /* RESOURCE MAPPING */
547
548 /**
549 * kvm_vgic_map_resources - map the MMIO regions
550 * @kvm: kvm struct pointer
551 *
552 * Map the MMIO regions depending on the VGIC model exposed to the guest
553 * called on the first VCPU run.
554 * Also map the virtual CPU interface into the VM.
555 * v2 calls vgic_init() if not already done.
556 * v3 and derivatives return an error if the VGIC is not initialized.
557 * vgic_ready() returns true if this function has succeeded.
558 */
kvm_vgic_map_resources(struct kvm * kvm)559 int kvm_vgic_map_resources(struct kvm *kvm)
560 {
561 struct vgic_dist *dist = &kvm->arch.vgic;
562 enum vgic_type type;
563 gpa_t dist_base;
564 int ret = 0;
565
566 if (likely(vgic_ready(kvm)))
567 return 0;
568
569 mutex_lock(&kvm->slots_lock);
570 mutex_lock(&kvm->arch.config_lock);
571 if (vgic_ready(kvm))
572 goto out;
573
574 if (!irqchip_in_kernel(kvm))
575 goto out;
576
577 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) {
578 ret = vgic_v2_map_resources(kvm);
579 type = VGIC_V2;
580 } else {
581 ret = vgic_v3_map_resources(kvm);
582 type = VGIC_V3;
583 }
584
585 if (ret)
586 goto out;
587
588 dist_base = dist->vgic_dist_base;
589 mutex_unlock(&kvm->arch.config_lock);
590
591 ret = vgic_register_dist_iodev(kvm, dist_base, type);
592 if (ret) {
593 kvm_err("Unable to register VGIC dist MMIO regions\n");
594 goto out_slots;
595 }
596
597 /*
598 * kvm_io_bus_register_dev() guarantees all readers see the new MMIO
599 * registration before returning through synchronize_srcu(), which also
600 * implies a full memory barrier. As such, marking the distributor as
601 * 'ready' here is guaranteed to be ordered after all vCPUs having seen
602 * a completely configured distributor.
603 */
604 dist->ready = true;
605 goto out_slots;
606 out:
607 mutex_unlock(&kvm->arch.config_lock);
608 out_slots:
609 if (ret)
610 kvm_vm_dead(kvm);
611
612 mutex_unlock(&kvm->slots_lock);
613
614 return ret;
615 }
616
617 /* GENERIC PROBE */
618
kvm_vgic_cpu_up(void)619 void kvm_vgic_cpu_up(void)
620 {
621 enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0);
622 }
623
624
kvm_vgic_cpu_down(void)625 void kvm_vgic_cpu_down(void)
626 {
627 disable_percpu_irq(kvm_vgic_global_state.maint_irq);
628 }
629
vgic_maintenance_handler(int irq,void * data)630 static irqreturn_t vgic_maintenance_handler(int irq, void *data)
631 {
632 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)data;
633
634 /*
635 * We cannot rely on the vgic maintenance interrupt to be
636 * delivered synchronously. This means we can only use it to
637 * exit the VM, and we perform the handling of EOIed
638 * interrupts on the exit path (see vgic_fold_lr_state).
639 *
640 * Of course, NV throws a wrench in this plan, and needs
641 * something special.
642 */
643 if (vcpu && vgic_state_is_nested(vcpu))
644 vgic_v3_handle_nested_maint_irq(vcpu);
645
646 return IRQ_HANDLED;
647 }
648
649 static struct gic_kvm_info *gic_kvm_info;
650
vgic_set_kvm_info(const struct gic_kvm_info * info)651 void __init vgic_set_kvm_info(const struct gic_kvm_info *info)
652 {
653 BUG_ON(gic_kvm_info != NULL);
654 gic_kvm_info = kmalloc(sizeof(*info), GFP_KERNEL);
655 if (gic_kvm_info)
656 *gic_kvm_info = *info;
657 }
658
659 /**
660 * kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware
661 *
662 * For a specific CPU, initialize the GIC VE hardware.
663 */
kvm_vgic_init_cpu_hardware(void)664 void kvm_vgic_init_cpu_hardware(void)
665 {
666 BUG_ON(preemptible());
667
668 /*
669 * We want to make sure the list registers start out clear so that we
670 * only have the program the used registers.
671 */
672 if (kvm_vgic_global_state.type == VGIC_V2) {
673 vgic_v2_init_lrs();
674 } else if (kvm_vgic_global_state.type == VGIC_V3 ||
675 kvm_vgic_global_state.has_gcie_v3_compat) {
676 kvm_call_hyp(__vgic_v3_init_lrs);
677 }
678 }
679
680 /**
681 * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable
682 * according to the host GIC model. Accordingly calls either
683 * vgic_v2/v3_probe which registers the KVM_DEVICE that can be
684 * instantiated by a guest later on .
685 */
kvm_vgic_hyp_init(void)686 int kvm_vgic_hyp_init(void)
687 {
688 bool has_mask;
689 int ret;
690
691 if (!gic_kvm_info)
692 return -ENODEV;
693
694 has_mask = !gic_kvm_info->no_maint_irq_mask;
695
696 if (has_mask && !gic_kvm_info->maint_irq) {
697 kvm_err("No vgic maintenance irq\n");
698 return -ENXIO;
699 }
700
701 /*
702 * If we get one of these oddball non-GICs, taint the kernel,
703 * as we have no idea of how they *really* behave.
704 */
705 if (gic_kvm_info->no_hw_deactivation) {
706 kvm_info("Non-architectural vgic, tainting kernel\n");
707 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
708 kvm_vgic_global_state.no_hw_deactivation = true;
709 }
710
711 switch (gic_kvm_info->type) {
712 case GIC_V2:
713 ret = vgic_v2_probe(gic_kvm_info);
714 break;
715 case GIC_V3:
716 ret = vgic_v3_probe(gic_kvm_info);
717 if (!ret) {
718 static_branch_enable(&kvm_vgic_global_state.gicv3_cpuif);
719 kvm_info("GIC system register CPU interface enabled\n");
720 }
721 break;
722 case GIC_V5:
723 ret = vgic_v5_probe(gic_kvm_info);
724 break;
725 default:
726 ret = -ENODEV;
727 }
728
729 kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq;
730
731 kfree(gic_kvm_info);
732 gic_kvm_info = NULL;
733
734 if (ret)
735 return ret;
736
737 if (!has_mask && !kvm_vgic_global_state.maint_irq)
738 return 0;
739
740 ret = request_percpu_irq(kvm_vgic_global_state.maint_irq,
741 vgic_maintenance_handler,
742 "vgic", kvm_get_running_vcpus());
743 if (ret) {
744 kvm_err("Cannot register interrupt %d\n",
745 kvm_vgic_global_state.maint_irq);
746 return ret;
747 }
748
749 kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq);
750 return 0;
751 }
752