xref: /linux/arch/arm64/kvm/vgic/vgic.c (revision 51d90a15fedf8366cb96ef68d0ea2d0bf15417d2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015, 2016 ARM Ltd.
4  */
5 
6 #include <linux/interrupt.h>
7 #include <linux/irq.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/list_sort.h>
11 #include <linux/nospec.h>
12 
13 #include <asm/kvm_hyp.h>
14 
15 #include "vgic.h"
16 
17 #define CREATE_TRACE_POINTS
18 #include "trace.h"
19 
20 struct vgic_global kvm_vgic_global_state __ro_after_init = {
21 	.gicv3_cpuif = STATIC_KEY_FALSE_INIT,
22 };
23 
24 /*
25  * Locking order is always:
26  * kvm->lock (mutex)
27  *   vcpu->mutex (mutex)
28  *     kvm->arch.config_lock (mutex)
29  *       its->cmd_lock (mutex)
30  *         its->its_lock (mutex)
31  *           vgic_dist->lpi_xa.xa_lock		must be taken with IRQs disabled
32  *             vgic_cpu->ap_list_lock		must be taken with IRQs disabled
33  *               vgic_irq->irq_lock		must be taken with IRQs disabled
34  *
35  * As the ap_list_lock might be taken from the timer interrupt handler,
36  * we have to disable IRQs before taking this lock and everything lower
37  * than it.
38  *
39  * The config_lock has additional ordering requirements:
40  * kvm->slots_lock
41  *   kvm->srcu
42  *     kvm->arch.config_lock
43  *
44  * If you need to take multiple locks, always take the upper lock first,
45  * then the lower ones, e.g. first take the its_lock, then the irq_lock.
46  * If you are already holding a lock and need to take a higher one, you
47  * have to drop the lower ranking lock first and re-acquire it after having
48  * taken the upper one.
49  *
50  * When taking more than one ap_list_lock at the same time, always take the
51  * lowest numbered VCPU's ap_list_lock first, so:
52  *   vcpuX->vcpu_id < vcpuY->vcpu_id:
53  *     raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
54  *     raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
55  *
56  * Since the VGIC must support injecting virtual interrupts from ISRs, we have
57  * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer
58  * spinlocks for any lock that may be taken while injecting an interrupt.
59  */
60 
61 /*
62  * Index the VM's xarray of mapped LPIs and return a reference to the IRQ
63  * structure. The caller is expected to call vgic_put_irq() later once it's
64  * finished with the IRQ.
65  */
vgic_get_lpi(struct kvm * kvm,u32 intid)66 static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
67 {
68 	struct vgic_dist *dist = &kvm->arch.vgic;
69 	struct vgic_irq *irq = NULL;
70 
71 	rcu_read_lock();
72 
73 	irq = xa_load(&dist->lpi_xa, intid);
74 	if (!vgic_try_get_irq_ref(irq))
75 		irq = NULL;
76 
77 	rcu_read_unlock();
78 
79 	return irq;
80 }
81 
82 /*
83  * This looks up the virtual interrupt ID to get the corresponding
84  * struct vgic_irq. It also increases the refcount, so any caller is expected
85  * to call vgic_put_irq() once it's finished with this IRQ.
86  */
vgic_get_irq(struct kvm * kvm,u32 intid)87 struct vgic_irq *vgic_get_irq(struct kvm *kvm, u32 intid)
88 {
89 	/* SPIs */
90 	if (intid >= VGIC_NR_PRIVATE_IRQS &&
91 	    intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
92 		intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
93 		return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
94 	}
95 
96 	/* LPIs */
97 	if (intid >= VGIC_MIN_LPI)
98 		return vgic_get_lpi(kvm, intid);
99 
100 	return NULL;
101 }
102 
vgic_get_vcpu_irq(struct kvm_vcpu * vcpu,u32 intid)103 struct vgic_irq *vgic_get_vcpu_irq(struct kvm_vcpu *vcpu, u32 intid)
104 {
105 	if (WARN_ON(!vcpu))
106 		return NULL;
107 
108 	/* SGIs and PPIs */
109 	if (intid < VGIC_NR_PRIVATE_IRQS) {
110 		intid = array_index_nospec(intid, VGIC_NR_PRIVATE_IRQS);
111 		return &vcpu->arch.vgic_cpu.private_irqs[intid];
112 	}
113 
114 	return vgic_get_irq(vcpu->kvm, intid);
115 }
116 
vgic_release_lpi_locked(struct vgic_dist * dist,struct vgic_irq * irq)117 static void vgic_release_lpi_locked(struct vgic_dist *dist, struct vgic_irq *irq)
118 {
119 	lockdep_assert_held(&dist->lpi_xa.xa_lock);
120 	__xa_erase(&dist->lpi_xa, irq->intid);
121 	kfree_rcu(irq, rcu);
122 }
123 
__vgic_put_irq(struct kvm * kvm,struct vgic_irq * irq)124 static __must_check bool __vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
125 {
126 	if (irq->intid < VGIC_MIN_LPI)
127 		return false;
128 
129 	return refcount_dec_and_test(&irq->refcount);
130 }
131 
vgic_put_irq_norelease(struct kvm * kvm,struct vgic_irq * irq)132 static __must_check bool vgic_put_irq_norelease(struct kvm *kvm, struct vgic_irq *irq)
133 {
134 	if (!__vgic_put_irq(kvm, irq))
135 		return false;
136 
137 	irq->pending_release = true;
138 	return true;
139 }
140 
vgic_put_irq(struct kvm * kvm,struct vgic_irq * irq)141 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
142 {
143 	struct vgic_dist *dist = &kvm->arch.vgic;
144 	unsigned long flags;
145 
146 	/*
147 	 * Normally the lock is only taken when the refcount drops to 0.
148 	 * Acquire/release it early on lockdep kernels to make locking issues
149 	 * in rare release paths a bit more obvious.
150 	 */
151 	if (IS_ENABLED(CONFIG_LOCKDEP) && irq->intid >= VGIC_MIN_LPI) {
152 		guard(spinlock_irqsave)(&dist->lpi_xa.xa_lock);
153 	}
154 
155 	if (!__vgic_put_irq(kvm, irq))
156 		return;
157 
158 	xa_lock_irqsave(&dist->lpi_xa, flags);
159 	vgic_release_lpi_locked(dist, irq);
160 	xa_unlock_irqrestore(&dist->lpi_xa, flags);
161 }
162 
vgic_release_deleted_lpis(struct kvm * kvm)163 static void vgic_release_deleted_lpis(struct kvm *kvm)
164 {
165 	struct vgic_dist *dist = &kvm->arch.vgic;
166 	unsigned long flags, intid;
167 	struct vgic_irq *irq;
168 
169 	xa_lock_irqsave(&dist->lpi_xa, flags);
170 
171 	xa_for_each(&dist->lpi_xa, intid, irq) {
172 		if (irq->pending_release)
173 			vgic_release_lpi_locked(dist, irq);
174 	}
175 
176 	xa_unlock_irqrestore(&dist->lpi_xa, flags);
177 }
178 
vgic_flush_pending_lpis(struct kvm_vcpu * vcpu)179 void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
180 {
181 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
182 	struct vgic_irq *irq, *tmp;
183 	bool deleted = false;
184 	unsigned long flags;
185 
186 	raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
187 
188 	list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
189 		if (irq->intid >= VGIC_MIN_LPI) {
190 			raw_spin_lock(&irq->irq_lock);
191 			list_del(&irq->ap_list);
192 			irq->vcpu = NULL;
193 			raw_spin_unlock(&irq->irq_lock);
194 			deleted |= vgic_put_irq_norelease(vcpu->kvm, irq);
195 		}
196 	}
197 
198 	raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
199 
200 	if (deleted)
201 		vgic_release_deleted_lpis(vcpu->kvm);
202 }
203 
vgic_irq_set_phys_pending(struct vgic_irq * irq,bool pending)204 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending)
205 {
206 	WARN_ON(irq_set_irqchip_state(irq->host_irq,
207 				      IRQCHIP_STATE_PENDING,
208 				      pending));
209 }
210 
vgic_get_phys_line_level(struct vgic_irq * irq)211 bool vgic_get_phys_line_level(struct vgic_irq *irq)
212 {
213 	bool line_level;
214 
215 	BUG_ON(!irq->hw);
216 
217 	if (irq->ops && irq->ops->get_input_level)
218 		return irq->ops->get_input_level(irq->intid);
219 
220 	WARN_ON(irq_get_irqchip_state(irq->host_irq,
221 				      IRQCHIP_STATE_PENDING,
222 				      &line_level));
223 	return line_level;
224 }
225 
226 /* Set/Clear the physical active state */
vgic_irq_set_phys_active(struct vgic_irq * irq,bool active)227 void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
228 {
229 
230 	BUG_ON(!irq->hw);
231 	WARN_ON(irq_set_irqchip_state(irq->host_irq,
232 				      IRQCHIP_STATE_ACTIVE,
233 				      active));
234 }
235 
236 /**
237  * vgic_target_oracle - compute the target vcpu for an irq
238  *
239  * @irq:	The irq to route. Must be already locked.
240  *
241  * Based on the current state of the interrupt (enabled, pending,
242  * active, vcpu and target_vcpu), compute the next vcpu this should be
243  * given to. Return NULL if this shouldn't be injected at all.
244  *
245  * Requires the IRQ lock to be held.
246  */
vgic_target_oracle(struct vgic_irq * irq)247 struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
248 {
249 	lockdep_assert_held(&irq->irq_lock);
250 
251 	/* If the interrupt is active, it must stay on the current vcpu */
252 	if (irq->active)
253 		return irq->vcpu ? : irq->target_vcpu;
254 
255 	/*
256 	 * If the IRQ is not active but enabled and pending, we should direct
257 	 * it to its configured target VCPU.
258 	 * If the distributor is disabled, pending interrupts shouldn't be
259 	 * forwarded.
260 	 */
261 	if (irq->enabled && irq_is_pending(irq)) {
262 		if (unlikely(irq->target_vcpu &&
263 			     !irq->target_vcpu->kvm->arch.vgic.enabled))
264 			return NULL;
265 
266 		return irq->target_vcpu;
267 	}
268 
269 	/* If neither active nor pending and enabled, then this IRQ should not
270 	 * be queued to any VCPU.
271 	 */
272 	return NULL;
273 }
274 
275 struct vgic_sort_info {
276 	struct kvm_vcpu *vcpu;
277 	struct vgic_vmcr vmcr;
278 };
279 
280 /*
281  * The order of items in the ap_lists defines how we'll pack things in LRs as
282  * well, the first items in the list being the first things populated in the
283  * LRs.
284  *
285  * Pending, non-active interrupts must be placed at the head of the list.
286  * Otherwise things should be sorted by the priority field and the GIC
287  * hardware support will take care of preemption of priority groups etc.
288  * Interrupts that are not deliverable should be at the end of the list.
289  *
290  * Return negative if "a" sorts before "b", 0 to preserve order, and positive
291  * to sort "b" before "a".
292  */
vgic_irq_cmp(void * priv,const struct list_head * a,const struct list_head * b)293 static int vgic_irq_cmp(void *priv, const struct list_head *a,
294 			const struct list_head *b)
295 {
296 	struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
297 	struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
298 	struct vgic_sort_info *info = priv;
299 	struct kvm_vcpu *vcpu = info->vcpu;
300 	bool penda, pendb;
301 	int ret;
302 
303 	/*
304 	 * list_sort may call this function with the same element when
305 	 * the list is fairly long.
306 	 */
307 	if (unlikely(irqa == irqb))
308 		return 0;
309 
310 	raw_spin_lock(&irqa->irq_lock);
311 	raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
312 
313 	/* Undeliverable interrupts should be last */
314 	ret = (int)(vgic_target_oracle(irqb) == vcpu) - (int)(vgic_target_oracle(irqa) == vcpu);
315 	if (ret)
316 		goto out;
317 
318 	/* Same thing for interrupts targeting a disabled group */
319 	ret =  (int)(irqb->group ? info->vmcr.grpen1 : info->vmcr.grpen0);
320 	ret -= (int)(irqa->group ? info->vmcr.grpen1 : info->vmcr.grpen0);
321 	if (ret)
322 		goto out;
323 
324 	penda = irqa->enabled && irq_is_pending(irqa) && !irqa->active;
325 	pendb = irqb->enabled && irq_is_pending(irqb) && !irqb->active;
326 
327 	ret = (int)pendb - (int)penda;
328 	if (ret)
329 		goto out;
330 
331 	/* Both pending and enabled, sort by priority (lower number first) */
332 	ret = (int)irqa->priority - (int)irqb->priority;
333 	if (ret)
334 		goto out;
335 
336 	/* Finally, HW bit active interrupts have priority over non-HW ones */
337 	ret = (int)irqb->hw - (int)irqa->hw;
338 
339 out:
340 	raw_spin_unlock(&irqb->irq_lock);
341 	raw_spin_unlock(&irqa->irq_lock);
342 	return ret;
343 }
344 
345 /* Must be called with the ap_list_lock held */
vgic_sort_ap_list(struct kvm_vcpu * vcpu)346 static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
347 {
348 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
349 	struct vgic_sort_info info = { .vcpu = vcpu, };
350 
351 	lockdep_assert_held(&vgic_cpu->ap_list_lock);
352 
353 	vgic_get_vmcr(vcpu, &info.vmcr);
354 	list_sort(&info, &vgic_cpu->ap_list_head, vgic_irq_cmp);
355 }
356 
357 /*
358  * Only valid injection if changing level for level-triggered IRQs or for a
359  * rising edge, and in-kernel connected IRQ lines can only be controlled by
360  * their owner.
361  */
vgic_validate_injection(struct vgic_irq * irq,bool level,void * owner)362 static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
363 {
364 	if (irq->owner != owner)
365 		return false;
366 
367 	switch (irq->config) {
368 	case VGIC_CONFIG_LEVEL:
369 		return irq->line_level != level;
370 	case VGIC_CONFIG_EDGE:
371 		return level;
372 	}
373 
374 	return false;
375 }
376 
vgic_model_needs_bcst_kick(struct kvm * kvm)377 static bool vgic_model_needs_bcst_kick(struct kvm *kvm)
378 {
379 	/*
380 	 * A GICv3 (or GICv3-like) system exposing a GICv3 to the guest
381 	 * needs a broadcast kick to set TDIR globally.
382 	 *
383 	 * For systems that do not have TDIR (ARM's own v8.0 CPUs), the
384 	 * shadow TDIR bit is always set, and so is the register's TC bit,
385 	 * so no need to kick the CPUs.
386 	 */
387 	return (cpus_have_final_cap(ARM64_HAS_ICH_HCR_EL2_TDIR) &&
388 		kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3);
389 }
390 
391 /*
392  * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
393  * Do the queuing if necessary, taking the right locks in the right order.
394  * Returns true when the IRQ was queued, false otherwise.
395  *
396  * Needs to be entered with the IRQ lock already held, but will return
397  * with all locks dropped.
398  */
vgic_queue_irq_unlock(struct kvm * kvm,struct vgic_irq * irq,unsigned long flags)399 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
400 			   unsigned long flags) __releases(&irq->irq_lock)
401 {
402 	struct kvm_vcpu *vcpu;
403 	bool bcast;
404 
405 	lockdep_assert_held(&irq->irq_lock);
406 
407 retry:
408 	vcpu = vgic_target_oracle(irq);
409 	if (irq->vcpu || !vcpu) {
410 		/*
411 		 * If this IRQ is already on a VCPU's ap_list, then it
412 		 * cannot be moved or modified and there is no more work for
413 		 * us to do.
414 		 *
415 		 * Otherwise, if the irq is not pending and enabled, it does
416 		 * not need to be inserted into an ap_list and there is also
417 		 * no more work for us to do.
418 		 */
419 		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
420 
421 		/*
422 		 * We have to kick the VCPU here, because we could be
423 		 * queueing an edge-triggered interrupt for which we
424 		 * get no EOI maintenance interrupt. In that case,
425 		 * while the IRQ is already on the VCPU's AP list, the
426 		 * VCPU could have EOI'ed the original interrupt and
427 		 * won't see this one until it exits for some other
428 		 * reason.
429 		 */
430 		if (vcpu) {
431 			kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
432 			kvm_vcpu_kick(vcpu);
433 		}
434 		return false;
435 	}
436 
437 	/*
438 	 * We must unlock the irq lock to take the ap_list_lock where
439 	 * we are going to insert this new pending interrupt.
440 	 */
441 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
442 
443 	/* someone can do stuff here, which we re-check below */
444 
445 	raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
446 	raw_spin_lock(&irq->irq_lock);
447 
448 	/*
449 	 * Did something change behind our backs?
450 	 *
451 	 * There are two cases:
452 	 * 1) The irq lost its pending state or was disabled behind our
453 	 *    backs and/or it was queued to another VCPU's ap_list.
454 	 * 2) Someone changed the affinity on this irq behind our
455 	 *    backs and we are now holding the wrong ap_list_lock.
456 	 *
457 	 * In both cases, drop the locks and retry.
458 	 */
459 
460 	if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
461 		raw_spin_unlock(&irq->irq_lock);
462 		raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
463 					   flags);
464 
465 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
466 		goto retry;
467 	}
468 
469 	/*
470 	 * Grab a reference to the irq to reflect the fact that it is
471 	 * now in the ap_list. This is safe as the caller must already hold a
472 	 * reference on the irq.
473 	 */
474 	vgic_get_irq_ref(irq);
475 	list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
476 	irq->vcpu = vcpu;
477 
478 	/* A new SPI may result in deactivation trapping on all vcpus */
479 	bcast = (vgic_model_needs_bcst_kick(vcpu->kvm) &&
480 		 vgic_valid_spi(vcpu->kvm, irq->intid) &&
481 		 atomic_fetch_inc(&vcpu->kvm->arch.vgic.active_spis) == 0);
482 
483 	raw_spin_unlock(&irq->irq_lock);
484 	raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
485 
486 	if (!bcast) {
487 		kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
488 		kvm_vcpu_kick(vcpu);
489 	} else {
490 		kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_IRQ_PENDING);
491 	}
492 
493 	return true;
494 }
495 
496 /**
497  * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
498  * @kvm:     The VM structure pointer
499  * @vcpu:    The CPU for PPIs or NULL for global interrupts
500  * @intid:   The INTID to inject a new state to.
501  * @level:   Edge-triggered:  true:  to trigger the interrupt
502  *			      false: to ignore the call
503  *	     Level-sensitive  true:  raise the input signal
504  *			      false: lower the input signal
505  * @owner:   The opaque pointer to the owner of the IRQ being raised to verify
506  *           that the caller is allowed to inject this IRQ.  Userspace
507  *           injections will have owner == NULL.
508  *
509  * The VGIC is not concerned with devices being active-LOW or active-HIGH for
510  * level-sensitive interrupts.  You can think of the level parameter as 1
511  * being HIGH and 0 being LOW and all devices being active-HIGH.
512  */
kvm_vgic_inject_irq(struct kvm * kvm,struct kvm_vcpu * vcpu,unsigned int intid,bool level,void * owner)513 int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
514 			unsigned int intid, bool level, void *owner)
515 {
516 	struct vgic_irq *irq;
517 	unsigned long flags;
518 	int ret;
519 
520 	ret = vgic_lazy_init(kvm);
521 	if (ret)
522 		return ret;
523 
524 	if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
525 		return -EINVAL;
526 
527 	trace_vgic_update_irq_pending(vcpu ? vcpu->vcpu_idx : 0, intid, level);
528 
529 	if (intid < VGIC_NR_PRIVATE_IRQS)
530 		irq = vgic_get_vcpu_irq(vcpu, intid);
531 	else
532 		irq = vgic_get_irq(kvm, intid);
533 	if (!irq)
534 		return -EINVAL;
535 
536 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
537 
538 	if (!vgic_validate_injection(irq, level, owner)) {
539 		/* Nothing to see here, move along... */
540 		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
541 		vgic_put_irq(kvm, irq);
542 		return 0;
543 	}
544 
545 	if (irq->config == VGIC_CONFIG_LEVEL)
546 		irq->line_level = level;
547 	else
548 		irq->pending_latch = true;
549 
550 	vgic_queue_irq_unlock(kvm, irq, flags);
551 	vgic_put_irq(kvm, irq);
552 
553 	return 0;
554 }
555 
556 /* @irq->irq_lock must be held */
kvm_vgic_map_irq(struct kvm_vcpu * vcpu,struct vgic_irq * irq,unsigned int host_irq,struct irq_ops * ops)557 static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
558 			    unsigned int host_irq,
559 			    struct irq_ops *ops)
560 {
561 	struct irq_desc *desc;
562 	struct irq_data *data;
563 
564 	/*
565 	 * Find the physical IRQ number corresponding to @host_irq
566 	 */
567 	desc = irq_to_desc(host_irq);
568 	if (!desc) {
569 		kvm_err("%s: no interrupt descriptor\n", __func__);
570 		return -EINVAL;
571 	}
572 	data = irq_desc_get_irq_data(desc);
573 	while (data->parent_data)
574 		data = data->parent_data;
575 
576 	irq->hw = true;
577 	irq->host_irq = host_irq;
578 	irq->hwintid = data->hwirq;
579 	irq->ops = ops;
580 	return 0;
581 }
582 
583 /* @irq->irq_lock must be held */
kvm_vgic_unmap_irq(struct vgic_irq * irq)584 static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
585 {
586 	irq->hw = false;
587 	irq->hwintid = 0;
588 	irq->ops = NULL;
589 }
590 
kvm_vgic_map_phys_irq(struct kvm_vcpu * vcpu,unsigned int host_irq,u32 vintid,struct irq_ops * ops)591 int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
592 			  u32 vintid, struct irq_ops *ops)
593 {
594 	struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, vintid);
595 	unsigned long flags;
596 	int ret;
597 
598 	BUG_ON(!irq);
599 
600 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
601 	ret = kvm_vgic_map_irq(vcpu, irq, host_irq, ops);
602 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
603 	vgic_put_irq(vcpu->kvm, irq);
604 
605 	return ret;
606 }
607 
608 /**
609  * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ
610  * @vcpu: The VCPU pointer
611  * @vintid: The INTID of the interrupt
612  *
613  * Reset the active and pending states of a mapped interrupt.  Kernel
614  * subsystems injecting mapped interrupts should reset their interrupt lines
615  * when we are doing a reset of the VM.
616  */
kvm_vgic_reset_mapped_irq(struct kvm_vcpu * vcpu,u32 vintid)617 void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
618 {
619 	struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, vintid);
620 	unsigned long flags;
621 
622 	if (!irq->hw)
623 		goto out;
624 
625 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
626 	irq->active = false;
627 	irq->pending_latch = false;
628 	irq->line_level = false;
629 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
630 out:
631 	vgic_put_irq(vcpu->kvm, irq);
632 }
633 
kvm_vgic_unmap_phys_irq(struct kvm_vcpu * vcpu,unsigned int vintid)634 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
635 {
636 	struct vgic_irq *irq;
637 	unsigned long flags;
638 
639 	if (!vgic_initialized(vcpu->kvm))
640 		return -EAGAIN;
641 
642 	irq = vgic_get_vcpu_irq(vcpu, vintid);
643 	BUG_ON(!irq);
644 
645 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
646 	kvm_vgic_unmap_irq(irq);
647 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
648 	vgic_put_irq(vcpu->kvm, irq);
649 
650 	return 0;
651 }
652 
kvm_vgic_get_map(struct kvm_vcpu * vcpu,unsigned int vintid)653 int kvm_vgic_get_map(struct kvm_vcpu *vcpu, unsigned int vintid)
654 {
655 	struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, vintid);
656 	unsigned long flags;
657 	int ret = -1;
658 
659 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
660 	if (irq->hw)
661 		ret = irq->hwintid;
662 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
663 
664 	vgic_put_irq(vcpu->kvm, irq);
665 	return ret;
666 }
667 
668 /**
669  * kvm_vgic_set_owner - Set the owner of an interrupt for a VM
670  *
671  * @vcpu:   Pointer to the VCPU (used for PPIs)
672  * @intid:  The virtual INTID identifying the interrupt (PPI or SPI)
673  * @owner:  Opaque pointer to the owner
674  *
675  * Returns 0 if intid is not already used by another in-kernel device and the
676  * owner is set, otherwise returns an error code.
677  */
kvm_vgic_set_owner(struct kvm_vcpu * vcpu,unsigned int intid,void * owner)678 int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
679 {
680 	struct vgic_irq *irq;
681 	unsigned long flags;
682 	int ret = 0;
683 
684 	if (!vgic_initialized(vcpu->kvm))
685 		return -EAGAIN;
686 
687 	/* SGIs and LPIs cannot be wired up to any device */
688 	if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
689 		return -EINVAL;
690 
691 	irq = vgic_get_vcpu_irq(vcpu, intid);
692 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
693 	if (irq->owner && irq->owner != owner)
694 		ret = -EEXIST;
695 	else
696 		irq->owner = owner;
697 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
698 
699 	return ret;
700 }
701 
702 /**
703  * vgic_prune_ap_list - Remove non-relevant interrupts from the list
704  *
705  * @vcpu: The VCPU pointer
706  *
707  * Go over the list of "interesting" interrupts, and prune those that we
708  * won't have to consider in the near future.
709  */
vgic_prune_ap_list(struct kvm_vcpu * vcpu)710 static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
711 {
712 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
713 	struct vgic_irq *irq, *tmp;
714 	bool deleted_lpis = false;
715 
716 	DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
717 
718 retry:
719 	raw_spin_lock(&vgic_cpu->ap_list_lock);
720 
721 	list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
722 		struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
723 		bool target_vcpu_needs_kick = false;
724 
725 		raw_spin_lock(&irq->irq_lock);
726 
727 		BUG_ON(vcpu != irq->vcpu);
728 
729 		target_vcpu = vgic_target_oracle(irq);
730 
731 		if (!target_vcpu) {
732 			/*
733 			 * We don't need to process this interrupt any
734 			 * further, move it off the list.
735 			 */
736 			list_del(&irq->ap_list);
737 			irq->vcpu = NULL;
738 			raw_spin_unlock(&irq->irq_lock);
739 
740 			/*
741 			 * This vgic_put_irq call matches the
742 			 * vgic_get_irq_ref in vgic_queue_irq_unlock,
743 			 * where we added the LPI to the ap_list. As
744 			 * we remove the irq from the list, we drop
745 			 * also drop the refcount.
746 			 */
747 			deleted_lpis |= vgic_put_irq_norelease(vcpu->kvm, irq);
748 			continue;
749 		}
750 
751 		if (target_vcpu == vcpu) {
752 			/* We're on the right CPU */
753 			raw_spin_unlock(&irq->irq_lock);
754 			continue;
755 		}
756 
757 		/* This interrupt looks like it has to be migrated. */
758 
759 		raw_spin_unlock(&irq->irq_lock);
760 		raw_spin_unlock(&vgic_cpu->ap_list_lock);
761 
762 		/*
763 		 * Ensure locking order by always locking the smallest
764 		 * ID first.
765 		 */
766 		if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
767 			vcpuA = vcpu;
768 			vcpuB = target_vcpu;
769 		} else {
770 			vcpuA = target_vcpu;
771 			vcpuB = vcpu;
772 		}
773 
774 		raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
775 		raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
776 				      SINGLE_DEPTH_NESTING);
777 		raw_spin_lock(&irq->irq_lock);
778 
779 		/*
780 		 * If the affinity has been preserved, move the
781 		 * interrupt around. Otherwise, it means things have
782 		 * changed while the interrupt was unlocked, and we
783 		 * need to replay this.
784 		 *
785 		 * In all cases, we cannot trust the list not to have
786 		 * changed, so we restart from the beginning.
787 		 */
788 		if (target_vcpu == vgic_target_oracle(irq)) {
789 			struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
790 
791 			list_del(&irq->ap_list);
792 			irq->vcpu = target_vcpu;
793 			list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
794 			target_vcpu_needs_kick = true;
795 		}
796 
797 		raw_spin_unlock(&irq->irq_lock);
798 		raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
799 		raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
800 
801 		if (target_vcpu_needs_kick) {
802 			kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
803 			kvm_vcpu_kick(target_vcpu);
804 		}
805 
806 		goto retry;
807 	}
808 
809 	raw_spin_unlock(&vgic_cpu->ap_list_lock);
810 
811 	if (unlikely(deleted_lpis))
812 		vgic_release_deleted_lpis(vcpu->kvm);
813 }
814 
vgic_fold_lr_state(struct kvm_vcpu * vcpu)815 static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
816 {
817 	if (kvm_vgic_global_state.type == VGIC_V2)
818 		vgic_v2_fold_lr_state(vcpu);
819 	else
820 		vgic_v3_fold_lr_state(vcpu);
821 }
822 
823 /* Requires the irq_lock to be held. */
vgic_populate_lr(struct kvm_vcpu * vcpu,struct vgic_irq * irq,int lr)824 static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
825 				    struct vgic_irq *irq, int lr)
826 {
827 	lockdep_assert_held(&irq->irq_lock);
828 
829 	if (kvm_vgic_global_state.type == VGIC_V2)
830 		vgic_v2_populate_lr(vcpu, irq, lr);
831 	else
832 		vgic_v3_populate_lr(vcpu, irq, lr);
833 }
834 
vgic_clear_lr(struct kvm_vcpu * vcpu,int lr)835 static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
836 {
837 	if (kvm_vgic_global_state.type == VGIC_V2)
838 		vgic_v2_clear_lr(vcpu, lr);
839 	else
840 		vgic_v3_clear_lr(vcpu, lr);
841 }
842 
summarize_ap_list(struct kvm_vcpu * vcpu,struct ap_list_summary * als)843 static void summarize_ap_list(struct kvm_vcpu *vcpu,
844 			      struct ap_list_summary *als)
845 {
846 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
847 	struct vgic_irq *irq;
848 
849 	lockdep_assert_held(&vgic_cpu->ap_list_lock);
850 
851 	*als = (typeof(*als)){};
852 
853 	list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
854 		guard(raw_spinlock)(&irq->irq_lock);
855 
856 		if (unlikely(vgic_target_oracle(irq) != vcpu))
857 			continue;
858 
859 		if (!irq->active)
860 			als->nr_pend++;
861 		else
862 			als->nr_act++;
863 
864 		if (irq->intid < VGIC_NR_SGIS)
865 			als->nr_sgi++;
866 	}
867 }
868 
869 /*
870  * Dealing with LR overflow is close to black magic -- dress accordingly.
871  *
872  * We have to present an almost infinite number of interrupts through a very
873  * limited number of registers. Therefore crucial decisions must be made to
874  * ensure we feed the most relevant interrupts into the LRs, and yet have
875  * some facilities to let the guest interact with those that are not there.
876  *
877  * All considerations below are in the context of interrupts targeting a
878  * single vcpu with non-idle state (either pending, active, or both),
879  * colloquially called the ap_list:
880  *
881  * - Pending interrupts must have priority over active interrupts. This also
882  *   excludes pending+active interrupts. This ensures that a guest can
883  *   perform priority drops on any number of interrupts, and yet be
884  *   presented the next pending one.
885  *
886  * - Deactivation of interrupts outside of the LRs must be tracked by using
887  *   either the EOIcount-driven maintenance interrupt, and sometimes by
888  *   trapping the DIR register.
889  *
890  * - For EOImode=0, a non-zero EOIcount means walking the ap_list past the
891  *   point that made it into the LRs, and deactivate interrupts that would
892  *   have made it onto the LRs if we had the space.
893  *
894  * - The MI-generation bits must be used to try and force an exit when the
895  *   guest has done enough changes to the LRs that we want to reevaluate the
896  *   situation:
897  *
898  *	- if the total number of pending interrupts exceeds the number of
899  *	  LR, NPIE must be set in order to exit once no pending interrupts
900  *	  are present in the LRs, allowing us to populate the next batch.
901  *
902  *	- if there are active interrupts outside of the LRs, then LRENPIE
903  *	  must be set so that we exit on deactivation of one of these, and
904  *	  work out which one is to be deactivated.  Note that this is not
905  *	  enough to deal with EOImode=1, see below.
906  *
907  *	- if the overall number of interrupts exceeds the number of LRs,
908  *	  then UIE must be set to allow refilling of the LRs once the
909  *	  majority of them has been processed.
910  *
911  *	- as usual, MI triggers are only an optimisation, since we cannot
912  *        rely on the MI being delivered in timely manner...
913  *
914  * - EOImode=1 creates some additional problems:
915  *
916  *      - deactivation can happen in any order, and we cannot rely on
917  *	  EOImode=0's coupling of priority-drop and deactivation which
918  *	  imposes strict reverse Ack order. This means that DIR must
919  *	  trap if we have active interrupts outside of the LRs.
920  *
921  *      - deactivation of SPIs can occur on any CPU, while the SPI is only
922  *	  present in the ap_list of the CPU that actually ack-ed it. In that
923  *	  case, EOIcount doesn't provide enough information, and we must
924  *	  resort to trapping DIR even if we don't overflow the LRs. Bonus
925  *	  point for not trapping DIR when no SPIs are pending or active in
926  *	  the whole VM.
927  *
928  *	- LPIs do not suffer the same problem as SPIs on deactivation, as we
929  *	  have to essentially discard the active state, see below.
930  *
931  * - Virtual LPIs have an active state (surprise!), which gets removed on
932  *   priority drop (EOI). However, EOIcount doesn't get bumped when the LPI
933  *   is not present in the LR (surprise again!). Special care must therefore
934  *   be taken to remove the active state from any activated LPI when exiting
935  *   from the guest. This is in a way no different from what happens on the
936  *   physical side. We still rely on the running priority to have been
937  *   removed from the APRs, irrespective of the LPI being present in the LRs
938  *   or not.
939  *
940  * - Virtual SGIs directly injected via GICv4.1 must not affect EOIcount, as
941  *   they are not managed in SW and don't have a true active state. So only
942  *   set vSGIEOICount when no SGIs are in the ap_list.
943  *
944  * - GICv2 SGIs with multiple sources are injected one source at a time, as
945  *   if they were made pending sequentially. This may mean that we don't
946  *   always present the HPPI if other interrupts with lower priority are
947  *   pending in the LRs. Big deal.
948  */
vgic_flush_lr_state(struct kvm_vcpu * vcpu)949 static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
950 {
951 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
952 	struct ap_list_summary als;
953 	struct vgic_irq *irq;
954 	int count = 0;
955 
956 	lockdep_assert_held(&vgic_cpu->ap_list_lock);
957 
958 	summarize_ap_list(vcpu, &als);
959 
960 	if (irqs_outside_lrs(&als))
961 		vgic_sort_ap_list(vcpu);
962 
963 	list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
964 		scoped_guard(raw_spinlock,  &irq->irq_lock) {
965 			if (likely(vgic_target_oracle(irq) == vcpu)) {
966 				vgic_populate_lr(vcpu, irq, count++);
967 			}
968 		}
969 
970 		if (count == kvm_vgic_global_state.nr_lr)
971 			break;
972 	}
973 
974 	/* Nuke remaining LRs */
975 	for (int i = count ; i < kvm_vgic_global_state.nr_lr; i++)
976 		vgic_clear_lr(vcpu, i);
977 
978 	if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
979 		vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count;
980 		vgic_v2_configure_hcr(vcpu, &als);
981 	} else {
982 		vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count;
983 		vgic_v3_configure_hcr(vcpu, &als);
984 	}
985 }
986 
can_access_vgic_from_kernel(void)987 static inline bool can_access_vgic_from_kernel(void)
988 {
989 	/*
990 	 * GICv2 can always be accessed from the kernel because it is
991 	 * memory-mapped, and VHE systems can access GICv3 EL2 system
992 	 * registers.
993 	 */
994 	return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe();
995 }
996 
vgic_save_state(struct kvm_vcpu * vcpu)997 static inline void vgic_save_state(struct kvm_vcpu *vcpu)
998 {
999 	if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
1000 		vgic_v2_save_state(vcpu);
1001 	else
1002 		__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
1003 }
1004 
1005 /* Sync back the hardware VGIC state into our emulation after a guest's run. */
kvm_vgic_sync_hwstate(struct kvm_vcpu * vcpu)1006 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1007 {
1008 	/* If nesting, emulate the HW effect from L0 to L1 */
1009 	if (vgic_state_is_nested(vcpu)) {
1010 		vgic_v3_sync_nested(vcpu);
1011 		return;
1012 	}
1013 
1014 	if (vcpu_has_nv(vcpu))
1015 		vgic_v3_nested_update_mi(vcpu);
1016 
1017 	if (can_access_vgic_from_kernel())
1018 		vgic_save_state(vcpu);
1019 
1020 	vgic_fold_lr_state(vcpu);
1021 	vgic_prune_ap_list(vcpu);
1022 }
1023 
1024 /* Sync interrupts that were deactivated through a DIR trap */
kvm_vgic_process_async_update(struct kvm_vcpu * vcpu)1025 void kvm_vgic_process_async_update(struct kvm_vcpu *vcpu)
1026 {
1027 	unsigned long flags;
1028 
1029 	/* Make sure we're in the same context as LR handling */
1030 	local_irq_save(flags);
1031 	vgic_prune_ap_list(vcpu);
1032 	local_irq_restore(flags);
1033 }
1034 
vgic_restore_state(struct kvm_vcpu * vcpu)1035 static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
1036 {
1037 	if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
1038 		vgic_v2_restore_state(vcpu);
1039 	else
1040 		__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
1041 }
1042 
1043 /* Flush our emulation state into the GIC hardware before entering the guest. */
kvm_vgic_flush_hwstate(struct kvm_vcpu * vcpu)1044 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1045 {
1046 	/*
1047 	 * If in a nested state, we must return early. Two possibilities:
1048 	 *
1049 	 * - If we have any pending IRQ for the guest and the guest
1050 	 *   expects IRQs to be handled in its virtual EL2 mode (the
1051 	 *   virtual IMO bit is set) and it is not already running in
1052 	 *   virtual EL2 mode, then we have to emulate an IRQ
1053 	 *   exception to virtual EL2.
1054 	 *
1055 	 *   We do that by placing a request to ourselves which will
1056 	 *   abort the entry procedure and inject the exception at the
1057 	 *   beginning of the run loop.
1058 	 *
1059 	 * - Otherwise, do exactly *NOTHING* apart from enabling the virtual
1060 	 *   CPU interface. The guest state is already loaded, and we can
1061 	 *   carry on with running it.
1062 	 *
1063 	 * If we have NV, but are not in a nested state, compute the
1064 	 * maintenance interrupt state, as it may fire.
1065 	 */
1066 	if (vgic_state_is_nested(vcpu)) {
1067 		if (kvm_vgic_vcpu_pending_irq(vcpu))
1068 			kvm_make_request(KVM_REQ_GUEST_HYP_IRQ_PENDING, vcpu);
1069 
1070 		vgic_v3_flush_nested(vcpu);
1071 		return;
1072 	}
1073 
1074 	if (vcpu_has_nv(vcpu))
1075 		vgic_v3_nested_update_mi(vcpu);
1076 
1077 	DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
1078 
1079 	scoped_guard(raw_spinlock, &vcpu->arch.vgic_cpu.ap_list_lock)
1080 		vgic_flush_lr_state(vcpu);
1081 
1082 	if (can_access_vgic_from_kernel())
1083 		vgic_restore_state(vcpu);
1084 
1085 	if (vgic_supports_direct_irqs(vcpu->kvm))
1086 		vgic_v4_commit(vcpu);
1087 }
1088 
kvm_vgic_load(struct kvm_vcpu * vcpu)1089 void kvm_vgic_load(struct kvm_vcpu *vcpu)
1090 {
1091 	if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) {
1092 		if (has_vhe() && static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
1093 			__vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
1094 		return;
1095 	}
1096 
1097 	if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
1098 		vgic_v2_load(vcpu);
1099 	else
1100 		vgic_v3_load(vcpu);
1101 }
1102 
kvm_vgic_put(struct kvm_vcpu * vcpu)1103 void kvm_vgic_put(struct kvm_vcpu *vcpu)
1104 {
1105 	if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) {
1106 		if (has_vhe() && static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
1107 			__vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
1108 		return;
1109 	}
1110 
1111 	if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
1112 		vgic_v2_put(vcpu);
1113 	else
1114 		vgic_v3_put(vcpu);
1115 }
1116 
kvm_vgic_vcpu_pending_irq(struct kvm_vcpu * vcpu)1117 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1118 {
1119 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1120 	struct vgic_irq *irq;
1121 	bool pending = false;
1122 	unsigned long flags;
1123 	struct vgic_vmcr vmcr;
1124 
1125 	if (!vcpu->kvm->arch.vgic.enabled)
1126 		return false;
1127 
1128 	if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
1129 		return true;
1130 
1131 	vgic_get_vmcr(vcpu, &vmcr);
1132 
1133 	raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
1134 
1135 	list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
1136 		raw_spin_lock(&irq->irq_lock);
1137 		pending = irq_is_pending(irq) && irq->enabled &&
1138 			  !irq->active &&
1139 			  irq->priority < vmcr.pmr;
1140 		raw_spin_unlock(&irq->irq_lock);
1141 
1142 		if (pending)
1143 			break;
1144 	}
1145 
1146 	raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
1147 
1148 	return pending;
1149 }
1150 
vgic_kick_vcpus(struct kvm * kvm)1151 void vgic_kick_vcpus(struct kvm *kvm)
1152 {
1153 	struct kvm_vcpu *vcpu;
1154 	unsigned long c;
1155 
1156 	/*
1157 	 * We've injected an interrupt, time to find out who deserves
1158 	 * a good kick...
1159 	 */
1160 	kvm_for_each_vcpu(c, vcpu, kvm) {
1161 		if (kvm_vgic_vcpu_pending_irq(vcpu)) {
1162 			kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
1163 			kvm_vcpu_kick(vcpu);
1164 		}
1165 	}
1166 }
1167 
kvm_vgic_map_is_active(struct kvm_vcpu * vcpu,unsigned int vintid)1168 bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
1169 {
1170 	struct vgic_irq *irq;
1171 	bool map_is_active;
1172 	unsigned long flags;
1173 
1174 	if (!vgic_initialized(vcpu->kvm))
1175 		return false;
1176 
1177 	irq = vgic_get_vcpu_irq(vcpu, vintid);
1178 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
1179 	map_is_active = irq->hw && irq->active;
1180 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
1181 	vgic_put_irq(vcpu->kvm, irq);
1182 
1183 	return map_is_active;
1184 }
1185 
1186 /*
1187  * Level-triggered mapped IRQs are special because we only observe rising
1188  * edges as input to the VGIC.
1189  *
1190  * If the guest never acked the interrupt we have to sample the physical
1191  * line and set the line level, because the device state could have changed
1192  * or we simply need to process the still pending interrupt later.
1193  *
1194  * We could also have entered the guest with the interrupt active+pending.
1195  * On the next exit, we need to re-evaluate the pending state, as it could
1196  * otherwise result in a spurious interrupt by injecting a now potentially
1197  * stale pending state.
1198  *
1199  * If this causes us to lower the level, we have to also clear the physical
1200  * active state, since we will otherwise never be told when the interrupt
1201  * becomes asserted again.
1202  *
1203  * Another case is when the interrupt requires a helping hand on
1204  * deactivation (no HW deactivation, for example).
1205  */
vgic_irq_handle_resampling(struct vgic_irq * irq,bool lr_deactivated,bool lr_pending)1206 void vgic_irq_handle_resampling(struct vgic_irq *irq,
1207 				bool lr_deactivated, bool lr_pending)
1208 {
1209 	if (vgic_irq_is_mapped_level(irq)) {
1210 		bool resample = false;
1211 
1212 		if (unlikely(vgic_irq_needs_resampling(irq))) {
1213 			resample = !(irq->active || irq->pending_latch);
1214 		} else if (lr_pending || (lr_deactivated && irq->line_level)) {
1215 			irq->line_level = vgic_get_phys_line_level(irq);
1216 			resample = !irq->line_level;
1217 		}
1218 
1219 		if (resample)
1220 			vgic_irq_set_phys_active(irq, false);
1221 	}
1222 }
1223