xref: /linux/arch/x86/kvm/vmx/posted_intr.c (revision dee264c16a6334dcdbea5c186f5ff35f98b1df42)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/kvm_host.h>
5 
6 #include <asm/irq_remapping.h>
7 #include <asm/cpu.h>
8 
9 #include "lapic.h"
10 #include "irq.h"
11 #include "posted_intr.h"
12 #include "trace.h"
13 #include "vmx.h"
14 #include "tdx.h"
15 
16 /*
17  * Maintain a per-CPU list of vCPUs that need to be awakened by wakeup_handler()
18  * when a WAKEUP_VECTOR interrupted is posted.  vCPUs are added to the list when
19  * the vCPU is scheduled out and is blocking (e.g. in HLT) with IRQs enabled.
20  * The vCPUs posted interrupt descriptor is updated at the same time to set its
21  * notification vector to WAKEUP_VECTOR, so that posted interrupt from devices
22  * wake the target vCPUs.  vCPUs are removed from the list and the notification
23  * vector is reset when the vCPU is scheduled in.
24  */
25 static DEFINE_PER_CPU(struct list_head, wakeup_vcpus_on_cpu);
26 /*
27  * Protect the per-CPU list with a per-CPU spinlock to handle task migration.
28  * When a blocking vCPU is awakened _and_ migrated to a different pCPU, the
29  * ->sched_in() path will need to take the vCPU off the list of the _previous_
30  * CPU.  IRQs must be disabled when taking this lock, otherwise deadlock will
31  * occur if a wakeup IRQ arrives and attempts to acquire the lock.
32  */
33 static DEFINE_PER_CPU(raw_spinlock_t, wakeup_vcpus_on_cpu_lock);
34 
35 #define PI_LOCK_SCHED_OUT SINGLE_DEPTH_NESTING
36 
37 struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
38 {
39 	return &(to_vt(vcpu)->pi_desc);
40 }
41 
42 static int pi_try_set_control(struct pi_desc *pi_desc, u64 *pold, u64 new)
43 {
44 	/*
45 	 * PID.ON can be set at any time by a different vCPU or by hardware,
46 	 * e.g. a device.  PID.control must be written atomically, and the
47 	 * update must be retried with a fresh snapshot an ON change causes
48 	 * the cmpxchg to fail.
49 	 */
50 	if (!try_cmpxchg64(&pi_desc->control, pold, new))
51 		return -EBUSY;
52 
53 	return 0;
54 }
55 
56 void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
57 {
58 	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
59 	struct vcpu_vt *vt = to_vt(vcpu);
60 	struct pi_desc old, new;
61 	unsigned long flags;
62 	unsigned int dest;
63 
64 	/*
65 	 * To simplify hot-plug and dynamic toggling of APICv, keep PI.NDST and
66 	 * PI.SN up-to-date even if there is no assigned device or if APICv is
67 	 * deactivated due to a dynamic inhibit bit, e.g. for Hyper-V's SyncIC.
68 	 */
69 	if (!enable_apicv || !lapic_in_kernel(vcpu))
70 		return;
71 
72 	/*
73 	 * If the vCPU wasn't on the wakeup list and wasn't migrated, then the
74 	 * full update can be skipped as neither the vector nor the destination
75 	 * needs to be changed.
76 	 */
77 	if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR && vcpu->cpu == cpu) {
78 		/*
79 		 * Clear SN if it was set due to being preempted.  Again, do
80 		 * this even if there is no assigned device for simplicity.
81 		 */
82 		if (pi_test_and_clear_sn(pi_desc))
83 			goto after_clear_sn;
84 		return;
85 	}
86 
87 	local_irq_save(flags);
88 
89 	/*
90 	 * If the vCPU was waiting for wakeup, remove the vCPU from the wakeup
91 	 * list of the _previous_ pCPU, which will not be the same as the
92 	 * current pCPU if the task was migrated.
93 	 */
94 	if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR) {
95 		raw_spinlock_t *spinlock = &per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu);
96 
97 		/*
98 		 * In addition to taking the wakeup lock for the regular/IRQ
99 		 * context, tell lockdep it is being taken for the "sched out"
100 		 * context as well.  vCPU loads happens in task context, and
101 		 * this is taking the lock of the *previous* CPU, i.e. can race
102 		 * with both the scheduler and the wakeup handler.
103 		 */
104 		raw_spin_lock(spinlock);
105 		spin_acquire(&spinlock->dep_map, PI_LOCK_SCHED_OUT, 0, _RET_IP_);
106 		list_del(&vt->pi_wakeup_list);
107 		spin_release(&spinlock->dep_map, _RET_IP_);
108 		raw_spin_unlock(spinlock);
109 	}
110 
111 	dest = cpu_physical_id(cpu);
112 	if (!x2apic_mode)
113 		dest = (dest << 8) & 0xFF00;
114 
115 	old.control = READ_ONCE(pi_desc->control);
116 	do {
117 		new.control = old.control;
118 
119 		/*
120 		 * Clear SN (as above) and refresh the destination APIC ID to
121 		 * handle task migration (@cpu != vcpu->cpu).
122 		 */
123 		new.ndst = dest;
124 		__pi_clear_sn(&new);
125 
126 		/*
127 		 * Restore the notification vector; in the blocking case, the
128 		 * descriptor was modified on "put" to use the wakeup vector.
129 		 */
130 		new.nv = POSTED_INTR_VECTOR;
131 	} while (pi_try_set_control(pi_desc, &old.control, new.control));
132 
133 	local_irq_restore(flags);
134 
135 after_clear_sn:
136 
137 	/*
138 	 * Clear SN before reading the bitmap.  The VT-d firmware
139 	 * writes the bitmap and reads SN atomically (5.2.3 in the
140 	 * spec), so it doesn't really have a memory barrier that
141 	 * pairs with this, but we cannot do that and we need one.
142 	 */
143 	smp_mb__after_atomic();
144 
145 	if (!pi_is_pir_empty(pi_desc))
146 		pi_set_on(pi_desc);
147 }
148 
149 static bool vmx_can_use_vtd_pi(struct kvm *kvm)
150 {
151 	return irqchip_in_kernel(kvm) && enable_apicv &&
152 		kvm_arch_has_assigned_device(kvm) &&
153 		irq_remapping_cap(IRQ_POSTING_CAP);
154 }
155 
156 /*
157  * Put the vCPU on this pCPU's list of vCPUs that needs to be awakened and set
158  * WAKEUP as the notification vector in the PI descriptor.
159  */
160 static void pi_enable_wakeup_handler(struct kvm_vcpu *vcpu)
161 {
162 	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
163 	struct vcpu_vt *vt = to_vt(vcpu);
164 	struct pi_desc old, new;
165 
166 	lockdep_assert_irqs_disabled();
167 
168 	/*
169 	 * Acquire the wakeup lock using the "sched out" context to workaround
170 	 * a lockdep false positive.  When this is called, schedule() holds
171 	 * various per-CPU scheduler locks.  When the wakeup handler runs, it
172 	 * holds this CPU's wakeup lock while calling try_to_wake_up(), which
173 	 * can eventually take the aforementioned scheduler locks, which causes
174 	 * lockdep to assume there is deadlock.
175 	 *
176 	 * Deadlock can't actually occur because IRQs are disabled for the
177 	 * entirety of the sched_out critical section, i.e. the wakeup handler
178 	 * can't run while the scheduler locks are held.
179 	 */
180 	raw_spin_lock_nested(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu),
181 			     PI_LOCK_SCHED_OUT);
182 	list_add_tail(&vt->pi_wakeup_list,
183 		      &per_cpu(wakeup_vcpus_on_cpu, vcpu->cpu));
184 	raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
185 
186 	WARN(pi_test_sn(pi_desc), "PI descriptor SN field set before blocking");
187 
188 	old.control = READ_ONCE(pi_desc->control);
189 	do {
190 		/* set 'NV' to 'wakeup vector' */
191 		new.control = old.control;
192 		new.nv = POSTED_INTR_WAKEUP_VECTOR;
193 	} while (pi_try_set_control(pi_desc, &old.control, new.control));
194 
195 	/*
196 	 * Send a wakeup IPI to this CPU if an interrupt may have been posted
197 	 * before the notification vector was updated, in which case the IRQ
198 	 * will arrive on the non-wakeup vector.  An IPI is needed as calling
199 	 * try_to_wake_up() from ->sched_out() isn't allowed (IRQs are not
200 	 * enabled until it is safe to call try_to_wake_up() on the task being
201 	 * scheduled out).
202 	 */
203 	if (pi_test_on(&new))
204 		__apic_send_IPI_self(POSTED_INTR_WAKEUP_VECTOR);
205 }
206 
207 static bool vmx_needs_pi_wakeup(struct kvm_vcpu *vcpu)
208 {
209 	/*
210 	 * The default posted interrupt vector does nothing when
211 	 * invoked outside guest mode.   Return whether a blocked vCPU
212 	 * can be the target of posted interrupts, as is the case when
213 	 * using either IPI virtualization or VT-d PI, so that the
214 	 * notification vector is switched to the one that calls
215 	 * back to the pi_wakeup_handler() function.
216 	 */
217 	return (vmx_can_use_ipiv(vcpu) && !is_td_vcpu(vcpu)) ||
218 		vmx_can_use_vtd_pi(vcpu->kvm);
219 }
220 
221 void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
222 {
223 	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
224 
225 	if (!vmx_needs_pi_wakeup(vcpu))
226 		return;
227 
228 	if (kvm_vcpu_is_blocking(vcpu) &&
229 	    ((is_td_vcpu(vcpu) && tdx_interrupt_allowed(vcpu)) ||
230 	     (!is_td_vcpu(vcpu) && !vmx_interrupt_blocked(vcpu))))
231 		pi_enable_wakeup_handler(vcpu);
232 
233 	/*
234 	 * Set SN when the vCPU is preempted.  Note, the vCPU can both be seen
235 	 * as blocking and preempted, e.g. if it's preempted between setting
236 	 * its wait state and manually scheduling out.
237 	 */
238 	if (vcpu->preempted)
239 		pi_set_sn(pi_desc);
240 }
241 
242 /*
243  * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
244  */
245 void pi_wakeup_handler(void)
246 {
247 	int cpu = smp_processor_id();
248 	struct list_head *wakeup_list = &per_cpu(wakeup_vcpus_on_cpu, cpu);
249 	raw_spinlock_t *spinlock = &per_cpu(wakeup_vcpus_on_cpu_lock, cpu);
250 	struct vcpu_vt *vt;
251 
252 	raw_spin_lock(spinlock);
253 	list_for_each_entry(vt, wakeup_list, pi_wakeup_list) {
254 
255 		if (pi_test_on(&vt->pi_desc))
256 			kvm_vcpu_wake_up(vt_to_vcpu(vt));
257 	}
258 	raw_spin_unlock(spinlock);
259 }
260 
261 void __init pi_init_cpu(int cpu)
262 {
263 	INIT_LIST_HEAD(&per_cpu(wakeup_vcpus_on_cpu, cpu));
264 	raw_spin_lock_init(&per_cpu(wakeup_vcpus_on_cpu_lock, cpu));
265 }
266 
267 bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu)
268 {
269 	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
270 
271 	return pi_test_on(pi_desc) ||
272 		(pi_test_sn(pi_desc) && !pi_is_pir_empty(pi_desc));
273 }
274 
275 
276 /*
277  * Bail out of the block loop if the VM has an assigned
278  * device, but the blocking vCPU didn't reconfigure the
279  * PI.NV to the wakeup vector, i.e. the assigned device
280  * came along after the initial check in vmx_vcpu_pi_put().
281  */
282 void vmx_pi_start_assignment(struct kvm *kvm)
283 {
284 	if (!irq_remapping_cap(IRQ_POSTING_CAP))
285 		return;
286 
287 	kvm_make_all_cpus_request(kvm, KVM_REQ_UNBLOCK);
288 }
289 
290 /*
291  * vmx_pi_update_irte - set IRTE for Posted-Interrupts
292  *
293  * @kvm: kvm
294  * @host_irq: host irq of the interrupt
295  * @guest_irq: gsi of the interrupt
296  * @set: set or unset PI
297  * returns 0 on success, < 0 on failure
298  */
299 int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
300 		       uint32_t guest_irq, bool set)
301 {
302 	struct kvm_kernel_irq_routing_entry *e;
303 	struct kvm_irq_routing_table *irq_rt;
304 	bool enable_remapped_mode = true;
305 	struct kvm_lapic_irq irq;
306 	struct kvm_vcpu *vcpu;
307 	struct vcpu_data vcpu_info;
308 	int idx, ret = 0;
309 
310 	if (!vmx_can_use_vtd_pi(kvm))
311 		return 0;
312 
313 	idx = srcu_read_lock(&kvm->irq_srcu);
314 	irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
315 	if (guest_irq >= irq_rt->nr_rt_entries ||
316 	    hlist_empty(&irq_rt->map[guest_irq])) {
317 		pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
318 			     guest_irq, irq_rt->nr_rt_entries);
319 		goto out;
320 	}
321 
322 	hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
323 		if (e->type != KVM_IRQ_ROUTING_MSI)
324 			continue;
325 		/*
326 		 * VT-d PI cannot support posting multicast/broadcast
327 		 * interrupts to a vCPU, we still use interrupt remapping
328 		 * for these kind of interrupts.
329 		 *
330 		 * For lowest-priority interrupts, we only support
331 		 * those with single CPU as the destination, e.g. user
332 		 * configures the interrupts via /proc/irq or uses
333 		 * irqbalance to make the interrupts single-CPU.
334 		 *
335 		 * We will support full lowest-priority interrupt later.
336 		 *
337 		 * In addition, we can only inject generic interrupts using
338 		 * the PI mechanism, refuse to route others through it.
339 		 */
340 
341 		kvm_set_msi_irq(kvm, e, &irq);
342 		if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
343 		    !kvm_irq_is_postable(&irq))
344 			continue;
345 
346 		vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
347 		vcpu_info.vector = irq.vector;
348 
349 		trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
350 				vcpu_info.vector, vcpu_info.pi_desc_addr, set);
351 
352 		if (!set)
353 			continue;
354 
355 		enable_remapped_mode = false;
356 
357 		ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
358 		if (ret < 0) {
359 			printk(KERN_INFO "%s: failed to update PI IRTE\n",
360 					__func__);
361 			goto out;
362 		}
363 	}
364 
365 	if (enable_remapped_mode)
366 		ret = irq_set_vcpu_affinity(host_irq, NULL);
367 
368 	ret = 0;
369 out:
370 	srcu_read_unlock(&kvm->irq_srcu, idx);
371 	return ret;
372 }
373