xref: /linux/arch/arm64/kvm/vgic/vgic.c (revision 4ea7c1717f3f2344f7a1cdab4f5875cfa89c87a9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015, 2016 ARM Ltd.
4  */
5 
6 #include <linux/interrupt.h>
7 #include <linux/irq.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/list_sort.h>
11 #include <linux/nospec.h>
12 
13 #include <asm/kvm_hyp.h>
14 
15 #include "vgic.h"
16 
17 #define CREATE_TRACE_POINTS
18 #include "trace.h"
19 
20 struct vgic_global kvm_vgic_global_state __ro_after_init = {
21 	.gicv3_cpuif = STATIC_KEY_FALSE_INIT,
22 };
23 
24 /*
25  * Locking order is always:
26  * kvm->lock (mutex)
27  *   vcpu->mutex (mutex)
28  *     kvm->arch.config_lock (mutex)
29  *       its->cmd_lock (mutex)
30  *         its->its_lock (mutex)
31  *           vgic_dist->lpi_xa.xa_lock		must be taken with IRQs disabled
32  *             vgic_cpu->ap_list_lock		must be taken with IRQs disabled
33  *               vgic_irq->irq_lock		must be taken with IRQs disabled
34  *
35  * As the ap_list_lock might be taken from the timer interrupt handler,
36  * we have to disable IRQs before taking this lock and everything lower
37  * than it.
38  *
39  * The config_lock has additional ordering requirements:
40  * kvm->slots_lock
41  *   kvm->srcu
42  *     kvm->arch.config_lock
43  *
44  * If you need to take multiple locks, always take the upper lock first,
45  * then the lower ones, e.g. first take the its_lock, then the irq_lock.
46  * If you are already holding a lock and need to take a higher one, you
47  * have to drop the lower ranking lock first and re-acquire it after having
48  * taken the upper one.
49  *
50  * When taking more than one ap_list_lock at the same time, always take the
51  * lowest numbered VCPU's ap_list_lock first, so:
52  *   vcpuX->vcpu_id < vcpuY->vcpu_id:
53  *     raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
54  *     raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
55  *
56  * Since the VGIC must support injecting virtual interrupts from ISRs, we have
57  * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer
58  * spinlocks for any lock that may be taken while injecting an interrupt.
59  */
60 
61 /*
62  * Index the VM's xarray of mapped LPIs and return a reference to the IRQ
63  * structure. The caller is expected to call vgic_put_irq() later once it's
64  * finished with the IRQ.
65  */
vgic_get_lpi(struct kvm * kvm,u32 intid)66 static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
67 {
68 	struct vgic_dist *dist = &kvm->arch.vgic;
69 	struct vgic_irq *irq = NULL;
70 
71 	rcu_read_lock();
72 
73 	irq = xa_load(&dist->lpi_xa, intid);
74 	if (!vgic_try_get_irq_ref(irq))
75 		irq = NULL;
76 
77 	rcu_read_unlock();
78 
79 	return irq;
80 }
81 
82 /*
83  * This looks up the virtual interrupt ID to get the corresponding
84  * struct vgic_irq. It also increases the refcount, so any caller is expected
85  * to call vgic_put_irq() once it's finished with this IRQ.
86  */
vgic_get_irq(struct kvm * kvm,u32 intid)87 struct vgic_irq *vgic_get_irq(struct kvm *kvm, u32 intid)
88 {
89 	/* SPIs */
90 	if (intid >= VGIC_NR_PRIVATE_IRQS &&
91 	    intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
92 		intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
93 		return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
94 	}
95 
96 	/* LPIs */
97 	if (intid >= VGIC_MIN_LPI)
98 		return vgic_get_lpi(kvm, intid);
99 
100 	return NULL;
101 }
102 
vgic_get_vcpu_irq(struct kvm_vcpu * vcpu,u32 intid)103 struct vgic_irq *vgic_get_vcpu_irq(struct kvm_vcpu *vcpu, u32 intid)
104 {
105 	if (WARN_ON(!vcpu))
106 		return NULL;
107 
108 	/* SGIs and PPIs */
109 	if (intid < VGIC_NR_PRIVATE_IRQS) {
110 		intid = array_index_nospec(intid, VGIC_NR_PRIVATE_IRQS);
111 		return &vcpu->arch.vgic_cpu.private_irqs[intid];
112 	}
113 
114 	return vgic_get_irq(vcpu->kvm, intid);
115 }
116 
vgic_release_lpi_locked(struct vgic_dist * dist,struct vgic_irq * irq)117 static void vgic_release_lpi_locked(struct vgic_dist *dist, struct vgic_irq *irq)
118 {
119 	lockdep_assert_held(&dist->lpi_xa.xa_lock);
120 	__xa_erase(&dist->lpi_xa, irq->intid);
121 	kfree_rcu(irq, rcu);
122 }
123 
__vgic_put_irq(struct kvm * kvm,struct vgic_irq * irq)124 static __must_check bool __vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
125 {
126 	if (irq->intid < VGIC_MIN_LPI)
127 		return false;
128 
129 	return refcount_dec_and_test(&irq->refcount);
130 }
131 
vgic_put_irq_norelease(struct kvm * kvm,struct vgic_irq * irq)132 static __must_check bool vgic_put_irq_norelease(struct kvm *kvm, struct vgic_irq *irq)
133 {
134 	if (!__vgic_put_irq(kvm, irq))
135 		return false;
136 
137 	irq->pending_release = true;
138 	return true;
139 }
140 
vgic_put_irq(struct kvm * kvm,struct vgic_irq * irq)141 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
142 {
143 	struct vgic_dist *dist = &kvm->arch.vgic;
144 	unsigned long flags;
145 
146 	/*
147 	 * Normally the lock is only taken when the refcount drops to 0.
148 	 * Acquire/release it early on lockdep kernels to make locking issues
149 	 * in rare release paths a bit more obvious.
150 	 */
151 	if (IS_ENABLED(CONFIG_LOCKDEP) && irq->intid >= VGIC_MIN_LPI) {
152 		guard(spinlock_irqsave)(&dist->lpi_xa.xa_lock);
153 	}
154 
155 	if (!__vgic_put_irq(kvm, irq))
156 		return;
157 
158 	xa_lock_irqsave(&dist->lpi_xa, flags);
159 	vgic_release_lpi_locked(dist, irq);
160 	xa_unlock_irqrestore(&dist->lpi_xa, flags);
161 }
162 
vgic_release_deleted_lpis(struct kvm * kvm)163 static void vgic_release_deleted_lpis(struct kvm *kvm)
164 {
165 	struct vgic_dist *dist = &kvm->arch.vgic;
166 	unsigned long flags, intid;
167 	struct vgic_irq *irq;
168 
169 	xa_lock_irqsave(&dist->lpi_xa, flags);
170 
171 	xa_for_each(&dist->lpi_xa, intid, irq) {
172 		if (irq->pending_release)
173 			vgic_release_lpi_locked(dist, irq);
174 	}
175 
176 	xa_unlock_irqrestore(&dist->lpi_xa, flags);
177 }
178 
vgic_flush_pending_lpis(struct kvm_vcpu * vcpu)179 void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
180 {
181 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
182 	struct vgic_irq *irq, *tmp;
183 	bool deleted = false;
184 	unsigned long flags;
185 
186 	raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
187 
188 	list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
189 		if (irq->intid >= VGIC_MIN_LPI) {
190 			raw_spin_lock(&irq->irq_lock);
191 			list_del(&irq->ap_list);
192 			irq->vcpu = NULL;
193 			raw_spin_unlock(&irq->irq_lock);
194 			deleted |= vgic_put_irq_norelease(vcpu->kvm, irq);
195 		}
196 	}
197 
198 	raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
199 
200 	if (deleted)
201 		vgic_release_deleted_lpis(vcpu->kvm);
202 }
203 
vgic_irq_set_phys_pending(struct vgic_irq * irq,bool pending)204 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending)
205 {
206 	WARN_ON(irq_set_irqchip_state(irq->host_irq,
207 				      IRQCHIP_STATE_PENDING,
208 				      pending));
209 }
210 
vgic_get_phys_line_level(struct vgic_irq * irq)211 bool vgic_get_phys_line_level(struct vgic_irq *irq)
212 {
213 	bool line_level;
214 
215 	BUG_ON(!irq->hw);
216 
217 	if (irq->ops && irq->ops->get_input_level)
218 		return irq->ops->get_input_level(irq->intid);
219 
220 	WARN_ON(irq_get_irqchip_state(irq->host_irq,
221 				      IRQCHIP_STATE_PENDING,
222 				      &line_level));
223 	return line_level;
224 }
225 
226 /* Set/Clear the physical active state */
vgic_irq_set_phys_active(struct vgic_irq * irq,bool active)227 void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
228 {
229 
230 	BUG_ON(!irq->hw);
231 	WARN_ON(irq_set_irqchip_state(irq->host_irq,
232 				      IRQCHIP_STATE_ACTIVE,
233 				      active));
234 }
235 
236 /**
237  * vgic_target_oracle - compute the target vcpu for an irq
238  *
239  * @irq:	The irq to route. Must be already locked.
240  *
241  * Based on the current state of the interrupt (enabled, pending,
242  * active, vcpu and target_vcpu), compute the next vcpu this should be
243  * given to. Return NULL if this shouldn't be injected at all.
244  *
245  * Requires the IRQ lock to be held.
246  */
vgic_target_oracle(struct vgic_irq * irq)247 static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
248 {
249 	lockdep_assert_held(&irq->irq_lock);
250 
251 	/* If the interrupt is active, it must stay on the current vcpu */
252 	if (irq->active)
253 		return irq->vcpu ? : irq->target_vcpu;
254 
255 	/*
256 	 * If the IRQ is not active but enabled and pending, we should direct
257 	 * it to its configured target VCPU.
258 	 * If the distributor is disabled, pending interrupts shouldn't be
259 	 * forwarded.
260 	 */
261 	if (irq->enabled && irq_is_pending(irq)) {
262 		if (unlikely(irq->target_vcpu &&
263 			     !irq->target_vcpu->kvm->arch.vgic.enabled))
264 			return NULL;
265 
266 		return irq->target_vcpu;
267 	}
268 
269 	/* If neither active nor pending and enabled, then this IRQ should not
270 	 * be queued to any VCPU.
271 	 */
272 	return NULL;
273 }
274 
275 /*
276  * The order of items in the ap_lists defines how we'll pack things in LRs as
277  * well, the first items in the list being the first things populated in the
278  * LRs.
279  *
280  * A hard rule is that active interrupts can never be pushed out of the LRs
281  * (and therefore take priority) since we cannot reliably trap on deactivation
282  * of IRQs and therefore they have to be present in the LRs.
283  *
284  * Otherwise things should be sorted by the priority field and the GIC
285  * hardware support will take care of preemption of priority groups etc.
286  *
287  * Return negative if "a" sorts before "b", 0 to preserve order, and positive
288  * to sort "b" before "a".
289  */
vgic_irq_cmp(void * priv,const struct list_head * a,const struct list_head * b)290 static int vgic_irq_cmp(void *priv, const struct list_head *a,
291 			const struct list_head *b)
292 {
293 	struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
294 	struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
295 	bool penda, pendb;
296 	int ret;
297 
298 	/*
299 	 * list_sort may call this function with the same element when
300 	 * the list is fairly long.
301 	 */
302 	if (unlikely(irqa == irqb))
303 		return 0;
304 
305 	raw_spin_lock(&irqa->irq_lock);
306 	raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
307 
308 	if (irqa->active || irqb->active) {
309 		ret = (int)irqb->active - (int)irqa->active;
310 		goto out;
311 	}
312 
313 	penda = irqa->enabled && irq_is_pending(irqa);
314 	pendb = irqb->enabled && irq_is_pending(irqb);
315 
316 	if (!penda || !pendb) {
317 		ret = (int)pendb - (int)penda;
318 		goto out;
319 	}
320 
321 	/* Both pending and enabled, sort by priority */
322 	ret = irqa->priority - irqb->priority;
323 out:
324 	raw_spin_unlock(&irqb->irq_lock);
325 	raw_spin_unlock(&irqa->irq_lock);
326 	return ret;
327 }
328 
329 /* Must be called with the ap_list_lock held */
vgic_sort_ap_list(struct kvm_vcpu * vcpu)330 static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
331 {
332 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
333 
334 	lockdep_assert_held(&vgic_cpu->ap_list_lock);
335 
336 	list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
337 }
338 
339 /*
340  * Only valid injection if changing level for level-triggered IRQs or for a
341  * rising edge, and in-kernel connected IRQ lines can only be controlled by
342  * their owner.
343  */
vgic_validate_injection(struct vgic_irq * irq,bool level,void * owner)344 static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
345 {
346 	if (irq->owner != owner)
347 		return false;
348 
349 	switch (irq->config) {
350 	case VGIC_CONFIG_LEVEL:
351 		return irq->line_level != level;
352 	case VGIC_CONFIG_EDGE:
353 		return level;
354 	}
355 
356 	return false;
357 }
358 
359 /*
360  * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
361  * Do the queuing if necessary, taking the right locks in the right order.
362  * Returns true when the IRQ was queued, false otherwise.
363  *
364  * Needs to be entered with the IRQ lock already held, but will return
365  * with all locks dropped.
366  */
vgic_queue_irq_unlock(struct kvm * kvm,struct vgic_irq * irq,unsigned long flags)367 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
368 			   unsigned long flags) __releases(&irq->irq_lock)
369 {
370 	struct kvm_vcpu *vcpu;
371 
372 	lockdep_assert_held(&irq->irq_lock);
373 
374 retry:
375 	vcpu = vgic_target_oracle(irq);
376 	if (irq->vcpu || !vcpu) {
377 		/*
378 		 * If this IRQ is already on a VCPU's ap_list, then it
379 		 * cannot be moved or modified and there is no more work for
380 		 * us to do.
381 		 *
382 		 * Otherwise, if the irq is not pending and enabled, it does
383 		 * not need to be inserted into an ap_list and there is also
384 		 * no more work for us to do.
385 		 */
386 		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
387 
388 		/*
389 		 * We have to kick the VCPU here, because we could be
390 		 * queueing an edge-triggered interrupt for which we
391 		 * get no EOI maintenance interrupt. In that case,
392 		 * while the IRQ is already on the VCPU's AP list, the
393 		 * VCPU could have EOI'ed the original interrupt and
394 		 * won't see this one until it exits for some other
395 		 * reason.
396 		 */
397 		if (vcpu) {
398 			kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
399 			kvm_vcpu_kick(vcpu);
400 		}
401 		return false;
402 	}
403 
404 	/*
405 	 * We must unlock the irq lock to take the ap_list_lock where
406 	 * we are going to insert this new pending interrupt.
407 	 */
408 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
409 
410 	/* someone can do stuff here, which we re-check below */
411 
412 	raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
413 	raw_spin_lock(&irq->irq_lock);
414 
415 	/*
416 	 * Did something change behind our backs?
417 	 *
418 	 * There are two cases:
419 	 * 1) The irq lost its pending state or was disabled behind our
420 	 *    backs and/or it was queued to another VCPU's ap_list.
421 	 * 2) Someone changed the affinity on this irq behind our
422 	 *    backs and we are now holding the wrong ap_list_lock.
423 	 *
424 	 * In both cases, drop the locks and retry.
425 	 */
426 
427 	if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
428 		raw_spin_unlock(&irq->irq_lock);
429 		raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
430 					   flags);
431 
432 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
433 		goto retry;
434 	}
435 
436 	/*
437 	 * Grab a reference to the irq to reflect the fact that it is
438 	 * now in the ap_list. This is safe as the caller must already hold a
439 	 * reference on the irq.
440 	 */
441 	vgic_get_irq_ref(irq);
442 	list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
443 	irq->vcpu = vcpu;
444 
445 	raw_spin_unlock(&irq->irq_lock);
446 	raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
447 
448 	kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
449 	kvm_vcpu_kick(vcpu);
450 
451 	return true;
452 }
453 
454 /**
455  * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
456  * @kvm:     The VM structure pointer
457  * @vcpu:    The CPU for PPIs or NULL for global interrupts
458  * @intid:   The INTID to inject a new state to.
459  * @level:   Edge-triggered:  true:  to trigger the interrupt
460  *			      false: to ignore the call
461  *	     Level-sensitive  true:  raise the input signal
462  *			      false: lower the input signal
463  * @owner:   The opaque pointer to the owner of the IRQ being raised to verify
464  *           that the caller is allowed to inject this IRQ.  Userspace
465  *           injections will have owner == NULL.
466  *
467  * The VGIC is not concerned with devices being active-LOW or active-HIGH for
468  * level-sensitive interrupts.  You can think of the level parameter as 1
469  * being HIGH and 0 being LOW and all devices being active-HIGH.
470  */
kvm_vgic_inject_irq(struct kvm * kvm,struct kvm_vcpu * vcpu,unsigned int intid,bool level,void * owner)471 int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
472 			unsigned int intid, bool level, void *owner)
473 {
474 	struct vgic_irq *irq;
475 	unsigned long flags;
476 	int ret;
477 
478 	ret = vgic_lazy_init(kvm);
479 	if (ret)
480 		return ret;
481 
482 	if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
483 		return -EINVAL;
484 
485 	trace_vgic_update_irq_pending(vcpu ? vcpu->vcpu_idx : 0, intid, level);
486 
487 	if (intid < VGIC_NR_PRIVATE_IRQS)
488 		irq = vgic_get_vcpu_irq(vcpu, intid);
489 	else
490 		irq = vgic_get_irq(kvm, intid);
491 	if (!irq)
492 		return -EINVAL;
493 
494 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
495 
496 	if (!vgic_validate_injection(irq, level, owner)) {
497 		/* Nothing to see here, move along... */
498 		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
499 		vgic_put_irq(kvm, irq);
500 		return 0;
501 	}
502 
503 	if (irq->config == VGIC_CONFIG_LEVEL)
504 		irq->line_level = level;
505 	else
506 		irq->pending_latch = true;
507 
508 	vgic_queue_irq_unlock(kvm, irq, flags);
509 	vgic_put_irq(kvm, irq);
510 
511 	return 0;
512 }
513 
514 /* @irq->irq_lock must be held */
kvm_vgic_map_irq(struct kvm_vcpu * vcpu,struct vgic_irq * irq,unsigned int host_irq,struct irq_ops * ops)515 static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
516 			    unsigned int host_irq,
517 			    struct irq_ops *ops)
518 {
519 	struct irq_desc *desc;
520 	struct irq_data *data;
521 
522 	/*
523 	 * Find the physical IRQ number corresponding to @host_irq
524 	 */
525 	desc = irq_to_desc(host_irq);
526 	if (!desc) {
527 		kvm_err("%s: no interrupt descriptor\n", __func__);
528 		return -EINVAL;
529 	}
530 	data = irq_desc_get_irq_data(desc);
531 	while (data->parent_data)
532 		data = data->parent_data;
533 
534 	irq->hw = true;
535 	irq->host_irq = host_irq;
536 	irq->hwintid = data->hwirq;
537 	irq->ops = ops;
538 	return 0;
539 }
540 
541 /* @irq->irq_lock must be held */
kvm_vgic_unmap_irq(struct vgic_irq * irq)542 static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
543 {
544 	irq->hw = false;
545 	irq->hwintid = 0;
546 	irq->ops = NULL;
547 }
548 
kvm_vgic_map_phys_irq(struct kvm_vcpu * vcpu,unsigned int host_irq,u32 vintid,struct irq_ops * ops)549 int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
550 			  u32 vintid, struct irq_ops *ops)
551 {
552 	struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, vintid);
553 	unsigned long flags;
554 	int ret;
555 
556 	BUG_ON(!irq);
557 
558 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
559 	ret = kvm_vgic_map_irq(vcpu, irq, host_irq, ops);
560 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
561 	vgic_put_irq(vcpu->kvm, irq);
562 
563 	return ret;
564 }
565 
566 /**
567  * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ
568  * @vcpu: The VCPU pointer
569  * @vintid: The INTID of the interrupt
570  *
571  * Reset the active and pending states of a mapped interrupt.  Kernel
572  * subsystems injecting mapped interrupts should reset their interrupt lines
573  * when we are doing a reset of the VM.
574  */
kvm_vgic_reset_mapped_irq(struct kvm_vcpu * vcpu,u32 vintid)575 void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
576 {
577 	struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, vintid);
578 	unsigned long flags;
579 
580 	if (!irq->hw)
581 		goto out;
582 
583 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
584 	irq->active = false;
585 	irq->pending_latch = false;
586 	irq->line_level = false;
587 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
588 out:
589 	vgic_put_irq(vcpu->kvm, irq);
590 }
591 
kvm_vgic_unmap_phys_irq(struct kvm_vcpu * vcpu,unsigned int vintid)592 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
593 {
594 	struct vgic_irq *irq;
595 	unsigned long flags;
596 
597 	if (!vgic_initialized(vcpu->kvm))
598 		return -EAGAIN;
599 
600 	irq = vgic_get_vcpu_irq(vcpu, vintid);
601 	BUG_ON(!irq);
602 
603 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
604 	kvm_vgic_unmap_irq(irq);
605 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
606 	vgic_put_irq(vcpu->kvm, irq);
607 
608 	return 0;
609 }
610 
kvm_vgic_get_map(struct kvm_vcpu * vcpu,unsigned int vintid)611 int kvm_vgic_get_map(struct kvm_vcpu *vcpu, unsigned int vintid)
612 {
613 	struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, vintid);
614 	unsigned long flags;
615 	int ret = -1;
616 
617 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
618 	if (irq->hw)
619 		ret = irq->hwintid;
620 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
621 
622 	vgic_put_irq(vcpu->kvm, irq);
623 	return ret;
624 }
625 
626 /**
627  * kvm_vgic_set_owner - Set the owner of an interrupt for a VM
628  *
629  * @vcpu:   Pointer to the VCPU (used for PPIs)
630  * @intid:  The virtual INTID identifying the interrupt (PPI or SPI)
631  * @owner:  Opaque pointer to the owner
632  *
633  * Returns 0 if intid is not already used by another in-kernel device and the
634  * owner is set, otherwise returns an error code.
635  */
kvm_vgic_set_owner(struct kvm_vcpu * vcpu,unsigned int intid,void * owner)636 int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
637 {
638 	struct vgic_irq *irq;
639 	unsigned long flags;
640 	int ret = 0;
641 
642 	if (!vgic_initialized(vcpu->kvm))
643 		return -EAGAIN;
644 
645 	/* SGIs and LPIs cannot be wired up to any device */
646 	if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
647 		return -EINVAL;
648 
649 	irq = vgic_get_vcpu_irq(vcpu, intid);
650 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
651 	if (irq->owner && irq->owner != owner)
652 		ret = -EEXIST;
653 	else
654 		irq->owner = owner;
655 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
656 
657 	return ret;
658 }
659 
660 /**
661  * vgic_prune_ap_list - Remove non-relevant interrupts from the list
662  *
663  * @vcpu: The VCPU pointer
664  *
665  * Go over the list of "interesting" interrupts, and prune those that we
666  * won't have to consider in the near future.
667  */
vgic_prune_ap_list(struct kvm_vcpu * vcpu)668 static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
669 {
670 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
671 	struct vgic_irq *irq, *tmp;
672 	bool deleted_lpis = false;
673 
674 	DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
675 
676 retry:
677 	raw_spin_lock(&vgic_cpu->ap_list_lock);
678 
679 	list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
680 		struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
681 		bool target_vcpu_needs_kick = false;
682 
683 		raw_spin_lock(&irq->irq_lock);
684 
685 		BUG_ON(vcpu != irq->vcpu);
686 
687 		target_vcpu = vgic_target_oracle(irq);
688 
689 		if (!target_vcpu) {
690 			/*
691 			 * We don't need to process this interrupt any
692 			 * further, move it off the list.
693 			 */
694 			list_del(&irq->ap_list);
695 			irq->vcpu = NULL;
696 			raw_spin_unlock(&irq->irq_lock);
697 
698 			/*
699 			 * This vgic_put_irq call matches the
700 			 * vgic_get_irq_ref in vgic_queue_irq_unlock,
701 			 * where we added the LPI to the ap_list. As
702 			 * we remove the irq from the list, we drop
703 			 * also drop the refcount.
704 			 */
705 			deleted_lpis |= vgic_put_irq_norelease(vcpu->kvm, irq);
706 			continue;
707 		}
708 
709 		if (target_vcpu == vcpu) {
710 			/* We're on the right CPU */
711 			raw_spin_unlock(&irq->irq_lock);
712 			continue;
713 		}
714 
715 		/* This interrupt looks like it has to be migrated. */
716 
717 		raw_spin_unlock(&irq->irq_lock);
718 		raw_spin_unlock(&vgic_cpu->ap_list_lock);
719 
720 		/*
721 		 * Ensure locking order by always locking the smallest
722 		 * ID first.
723 		 */
724 		if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
725 			vcpuA = vcpu;
726 			vcpuB = target_vcpu;
727 		} else {
728 			vcpuA = target_vcpu;
729 			vcpuB = vcpu;
730 		}
731 
732 		raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
733 		raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
734 				      SINGLE_DEPTH_NESTING);
735 		raw_spin_lock(&irq->irq_lock);
736 
737 		/*
738 		 * If the affinity has been preserved, move the
739 		 * interrupt around. Otherwise, it means things have
740 		 * changed while the interrupt was unlocked, and we
741 		 * need to replay this.
742 		 *
743 		 * In all cases, we cannot trust the list not to have
744 		 * changed, so we restart from the beginning.
745 		 */
746 		if (target_vcpu == vgic_target_oracle(irq)) {
747 			struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
748 
749 			list_del(&irq->ap_list);
750 			irq->vcpu = target_vcpu;
751 			list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
752 			target_vcpu_needs_kick = true;
753 		}
754 
755 		raw_spin_unlock(&irq->irq_lock);
756 		raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
757 		raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
758 
759 		if (target_vcpu_needs_kick) {
760 			kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
761 			kvm_vcpu_kick(target_vcpu);
762 		}
763 
764 		goto retry;
765 	}
766 
767 	raw_spin_unlock(&vgic_cpu->ap_list_lock);
768 
769 	if (unlikely(deleted_lpis))
770 		vgic_release_deleted_lpis(vcpu->kvm);
771 }
772 
vgic_fold_lr_state(struct kvm_vcpu * vcpu)773 static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
774 {
775 	if (kvm_vgic_global_state.type == VGIC_V2)
776 		vgic_v2_fold_lr_state(vcpu);
777 	else
778 		vgic_v3_fold_lr_state(vcpu);
779 }
780 
781 /* Requires the irq_lock to be held. */
vgic_populate_lr(struct kvm_vcpu * vcpu,struct vgic_irq * irq,int lr)782 static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
783 				    struct vgic_irq *irq, int lr)
784 {
785 	lockdep_assert_held(&irq->irq_lock);
786 
787 	if (kvm_vgic_global_state.type == VGIC_V2)
788 		vgic_v2_populate_lr(vcpu, irq, lr);
789 	else
790 		vgic_v3_populate_lr(vcpu, irq, lr);
791 }
792 
vgic_clear_lr(struct kvm_vcpu * vcpu,int lr)793 static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
794 {
795 	if (kvm_vgic_global_state.type == VGIC_V2)
796 		vgic_v2_clear_lr(vcpu, lr);
797 	else
798 		vgic_v3_clear_lr(vcpu, lr);
799 }
800 
vgic_set_underflow(struct kvm_vcpu * vcpu)801 static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
802 {
803 	if (kvm_vgic_global_state.type == VGIC_V2)
804 		vgic_v2_set_underflow(vcpu);
805 	else
806 		vgic_v3_set_underflow(vcpu);
807 }
808 
809 /* Requires the ap_list_lock to be held. */
compute_ap_list_depth(struct kvm_vcpu * vcpu,bool * multi_sgi)810 static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
811 				 bool *multi_sgi)
812 {
813 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
814 	struct vgic_irq *irq;
815 	int count = 0;
816 
817 	*multi_sgi = false;
818 
819 	lockdep_assert_held(&vgic_cpu->ap_list_lock);
820 
821 	list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
822 		int w;
823 
824 		raw_spin_lock(&irq->irq_lock);
825 		/* GICv2 SGIs can count for more than one... */
826 		w = vgic_irq_get_lr_count(irq);
827 		raw_spin_unlock(&irq->irq_lock);
828 
829 		count += w;
830 		*multi_sgi |= (w > 1);
831 	}
832 	return count;
833 }
834 
835 /* Requires the VCPU's ap_list_lock to be held. */
vgic_flush_lr_state(struct kvm_vcpu * vcpu)836 static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
837 {
838 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
839 	struct vgic_irq *irq;
840 	int count;
841 	bool multi_sgi;
842 	u8 prio = 0xff;
843 	int i = 0;
844 
845 	lockdep_assert_held(&vgic_cpu->ap_list_lock);
846 
847 	count = compute_ap_list_depth(vcpu, &multi_sgi);
848 	if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
849 		vgic_sort_ap_list(vcpu);
850 
851 	count = 0;
852 
853 	list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
854 		raw_spin_lock(&irq->irq_lock);
855 
856 		/*
857 		 * If we have multi-SGIs in the pipeline, we need to
858 		 * guarantee that they are all seen before any IRQ of
859 		 * lower priority. In that case, we need to filter out
860 		 * these interrupts by exiting early. This is easy as
861 		 * the AP list has been sorted already.
862 		 */
863 		if (multi_sgi && irq->priority > prio) {
864 			raw_spin_unlock(&irq->irq_lock);
865 			break;
866 		}
867 
868 		if (likely(vgic_target_oracle(irq) == vcpu)) {
869 			vgic_populate_lr(vcpu, irq, count++);
870 
871 			if (irq->source)
872 				prio = irq->priority;
873 		}
874 
875 		raw_spin_unlock(&irq->irq_lock);
876 
877 		if (count == kvm_vgic_global_state.nr_lr) {
878 			if (!list_is_last(&irq->ap_list,
879 					  &vgic_cpu->ap_list_head))
880 				vgic_set_underflow(vcpu);
881 			break;
882 		}
883 	}
884 
885 	/* Nuke remaining LRs */
886 	for (i = count ; i < kvm_vgic_global_state.nr_lr; i++)
887 		vgic_clear_lr(vcpu, i);
888 
889 	if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
890 		vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count;
891 	else
892 		vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count;
893 }
894 
can_access_vgic_from_kernel(void)895 static inline bool can_access_vgic_from_kernel(void)
896 {
897 	/*
898 	 * GICv2 can always be accessed from the kernel because it is
899 	 * memory-mapped, and VHE systems can access GICv3 EL2 system
900 	 * registers.
901 	 */
902 	return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe();
903 }
904 
vgic_save_state(struct kvm_vcpu * vcpu)905 static inline void vgic_save_state(struct kvm_vcpu *vcpu)
906 {
907 	if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
908 		vgic_v2_save_state(vcpu);
909 	else
910 		__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
911 }
912 
913 /* Sync back the hardware VGIC state into our emulation after a guest's run. */
kvm_vgic_sync_hwstate(struct kvm_vcpu * vcpu)914 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
915 {
916 	int used_lrs;
917 
918 	/* If nesting, emulate the HW effect from L0 to L1 */
919 	if (vgic_state_is_nested(vcpu)) {
920 		vgic_v3_sync_nested(vcpu);
921 		return;
922 	}
923 
924 	if (vcpu_has_nv(vcpu))
925 		vgic_v3_nested_update_mi(vcpu);
926 
927 	/* An empty ap_list_head implies used_lrs == 0 */
928 	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
929 		return;
930 
931 	if (can_access_vgic_from_kernel())
932 		vgic_save_state(vcpu);
933 
934 	if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
935 		used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
936 	else
937 		used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
938 
939 	if (used_lrs)
940 		vgic_fold_lr_state(vcpu);
941 	vgic_prune_ap_list(vcpu);
942 }
943 
vgic_restore_state(struct kvm_vcpu * vcpu)944 static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
945 {
946 	if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
947 		vgic_v2_restore_state(vcpu);
948 	else
949 		__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
950 }
951 
952 /* Flush our emulation state into the GIC hardware before entering the guest. */
kvm_vgic_flush_hwstate(struct kvm_vcpu * vcpu)953 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
954 {
955 	/*
956 	 * If in a nested state, we must return early. Two possibilities:
957 	 *
958 	 * - If we have any pending IRQ for the guest and the guest
959 	 *   expects IRQs to be handled in its virtual EL2 mode (the
960 	 *   virtual IMO bit is set) and it is not already running in
961 	 *   virtual EL2 mode, then we have to emulate an IRQ
962 	 *   exception to virtual EL2.
963 	 *
964 	 *   We do that by placing a request to ourselves which will
965 	 *   abort the entry procedure and inject the exception at the
966 	 *   beginning of the run loop.
967 	 *
968 	 * - Otherwise, do exactly *NOTHING*. The guest state is
969 	 *   already loaded, and we can carry on with running it.
970 	 *
971 	 * If we have NV, but are not in a nested state, compute the
972 	 * maintenance interrupt state, as it may fire.
973 	 */
974 	if (vgic_state_is_nested(vcpu)) {
975 		if (kvm_vgic_vcpu_pending_irq(vcpu))
976 			kvm_make_request(KVM_REQ_GUEST_HYP_IRQ_PENDING, vcpu);
977 
978 		return;
979 	}
980 
981 	if (vcpu_has_nv(vcpu))
982 		vgic_v3_nested_update_mi(vcpu);
983 
984 	/*
985 	 * If there are no virtual interrupts active or pending for this
986 	 * VCPU, then there is no work to do and we can bail out without
987 	 * taking any lock.  There is a potential race with someone injecting
988 	 * interrupts to the VCPU, but it is a benign race as the VCPU will
989 	 * either observe the new interrupt before or after doing this check,
990 	 * and introducing additional synchronization mechanism doesn't change
991 	 * this.
992 	 *
993 	 * Note that we still need to go through the whole thing if anything
994 	 * can be directly injected (GICv4).
995 	 */
996 	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
997 	    !vgic_supports_direct_irqs(vcpu->kvm))
998 		return;
999 
1000 	DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
1001 
1002 	if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
1003 		raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
1004 		vgic_flush_lr_state(vcpu);
1005 		raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
1006 	}
1007 
1008 	if (can_access_vgic_from_kernel())
1009 		vgic_restore_state(vcpu);
1010 
1011 	if (vgic_supports_direct_irqs(vcpu->kvm))
1012 		vgic_v4_commit(vcpu);
1013 }
1014 
kvm_vgic_load(struct kvm_vcpu * vcpu)1015 void kvm_vgic_load(struct kvm_vcpu *vcpu)
1016 {
1017 	if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) {
1018 		if (has_vhe() && static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
1019 			__vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
1020 		return;
1021 	}
1022 
1023 	if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
1024 		vgic_v2_load(vcpu);
1025 	else
1026 		vgic_v3_load(vcpu);
1027 }
1028 
kvm_vgic_put(struct kvm_vcpu * vcpu)1029 void kvm_vgic_put(struct kvm_vcpu *vcpu)
1030 {
1031 	if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) {
1032 		if (has_vhe() && static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
1033 			__vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
1034 		return;
1035 	}
1036 
1037 	if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
1038 		vgic_v2_put(vcpu);
1039 	else
1040 		vgic_v3_put(vcpu);
1041 }
1042 
kvm_vgic_vcpu_pending_irq(struct kvm_vcpu * vcpu)1043 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1044 {
1045 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1046 	struct vgic_irq *irq;
1047 	bool pending = false;
1048 	unsigned long flags;
1049 	struct vgic_vmcr vmcr;
1050 
1051 	if (!vcpu->kvm->arch.vgic.enabled)
1052 		return false;
1053 
1054 	if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
1055 		return true;
1056 
1057 	vgic_get_vmcr(vcpu, &vmcr);
1058 
1059 	raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
1060 
1061 	list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
1062 		raw_spin_lock(&irq->irq_lock);
1063 		pending = irq_is_pending(irq) && irq->enabled &&
1064 			  !irq->active &&
1065 			  irq->priority < vmcr.pmr;
1066 		raw_spin_unlock(&irq->irq_lock);
1067 
1068 		if (pending)
1069 			break;
1070 	}
1071 
1072 	raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
1073 
1074 	return pending;
1075 }
1076 
vgic_kick_vcpus(struct kvm * kvm)1077 void vgic_kick_vcpus(struct kvm *kvm)
1078 {
1079 	struct kvm_vcpu *vcpu;
1080 	unsigned long c;
1081 
1082 	/*
1083 	 * We've injected an interrupt, time to find out who deserves
1084 	 * a good kick...
1085 	 */
1086 	kvm_for_each_vcpu(c, vcpu, kvm) {
1087 		if (kvm_vgic_vcpu_pending_irq(vcpu)) {
1088 			kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
1089 			kvm_vcpu_kick(vcpu);
1090 		}
1091 	}
1092 }
1093 
kvm_vgic_map_is_active(struct kvm_vcpu * vcpu,unsigned int vintid)1094 bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
1095 {
1096 	struct vgic_irq *irq;
1097 	bool map_is_active;
1098 	unsigned long flags;
1099 
1100 	if (!vgic_initialized(vcpu->kvm))
1101 		return false;
1102 
1103 	irq = vgic_get_vcpu_irq(vcpu, vintid);
1104 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
1105 	map_is_active = irq->hw && irq->active;
1106 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
1107 	vgic_put_irq(vcpu->kvm, irq);
1108 
1109 	return map_is_active;
1110 }
1111 
1112 /*
1113  * Level-triggered mapped IRQs are special because we only observe rising
1114  * edges as input to the VGIC.
1115  *
1116  * If the guest never acked the interrupt we have to sample the physical
1117  * line and set the line level, because the device state could have changed
1118  * or we simply need to process the still pending interrupt later.
1119  *
1120  * We could also have entered the guest with the interrupt active+pending.
1121  * On the next exit, we need to re-evaluate the pending state, as it could
1122  * otherwise result in a spurious interrupt by injecting a now potentially
1123  * stale pending state.
1124  *
1125  * If this causes us to lower the level, we have to also clear the physical
1126  * active state, since we will otherwise never be told when the interrupt
1127  * becomes asserted again.
1128  *
1129  * Another case is when the interrupt requires a helping hand on
1130  * deactivation (no HW deactivation, for example).
1131  */
vgic_irq_handle_resampling(struct vgic_irq * irq,bool lr_deactivated,bool lr_pending)1132 void vgic_irq_handle_resampling(struct vgic_irq *irq,
1133 				bool lr_deactivated, bool lr_pending)
1134 {
1135 	if (vgic_irq_is_mapped_level(irq)) {
1136 		bool resample = false;
1137 
1138 		if (unlikely(vgic_irq_needs_resampling(irq))) {
1139 			resample = !(irq->active || irq->pending_latch);
1140 		} else if (lr_pending || (lr_deactivated && irq->line_level)) {
1141 			irq->line_level = vgic_get_phys_line_level(irq);
1142 			resample = !irq->line_level;
1143 		}
1144 
1145 		if (resample)
1146 			vgic_irq_set_phys_active(irq, false);
1147 	}
1148 }
1149