1 // SPDX-License-Identifier: LGPL-2.1-or-later
2 /*
3 * Copyright (C) 2001 MandrakeSoft S.A.
4 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
5 *
6 * MandrakeSoft S.A.
7 * 43, rue d'Aboukir
8 * 75002 Paris - France
9 * http://www.linux-mandrake.com/
10 * http://www.mandrakesoft.com/
11 *
12 * Yunhong Jiang <yunhong.jiang@intel.com>
13 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
14 * Based on Xen 3.1 code.
15 */
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18 #include <linux/kvm_host.h>
19 #include <linux/kvm.h>
20 #include <linux/mm.h>
21 #include <linux/highmem.h>
22 #include <linux/smp.h>
23 #include <linux/hrtimer.h>
24 #include <linux/io.h>
25 #include <linux/slab.h>
26 #include <linux/export.h>
27 #include <linux/nospec.h>
28 #include <asm/processor.h>
29 #include <asm/page.h>
30 #include <asm/current.h>
31
32 #include "ioapic.h"
33 #include "lapic.h"
34 #include "irq.h"
35 #include "trace.h"
36
37 static int ioapic_service(struct kvm_ioapic *vioapic, int irq,
38 bool line_status);
39
ioapic_read_indirect(struct kvm_ioapic * ioapic)40 static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic)
41 {
42 unsigned long result = 0;
43
44 switch (ioapic->ioregsel) {
45 case IOAPIC_REG_VERSION:
46 result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
47 | (IOAPIC_VERSION_ID & 0xff));
48 break;
49
50 case IOAPIC_REG_APIC_ID:
51 case IOAPIC_REG_ARB_ID:
52 result = ((ioapic->id & 0xf) << 24);
53 break;
54
55 default:
56 {
57 u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
58 u64 redir_content = ~0ULL;
59
60 if (redir_index < IOAPIC_NUM_PINS) {
61 u32 index = array_index_nospec(
62 redir_index, IOAPIC_NUM_PINS);
63
64 redir_content = ioapic->redirtbl[index].bits;
65 }
66
67 result = (ioapic->ioregsel & 0x1) ?
68 (redir_content >> 32) & 0xffffffff :
69 redir_content & 0xffffffff;
70 break;
71 }
72 }
73
74 return result;
75 }
76
rtc_irq_eoi_tracking_reset(struct kvm_ioapic * ioapic)77 static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
78 {
79 ioapic->rtc_status.pending_eoi = 0;
80 bitmap_zero(ioapic->rtc_status.map, KVM_MAX_VCPU_IDS);
81 }
82
83 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
84
rtc_status_pending_eoi_check_valid(struct kvm_ioapic * ioapic)85 static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic)
86 {
87 if (WARN_ON(ioapic->rtc_status.pending_eoi < 0))
88 kvm_rtc_eoi_tracking_restore_all(ioapic);
89 }
90
__rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu * vcpu)91 static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
92 {
93 bool new_val, old_val;
94 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
95 struct rtc_status *status = &ioapic->rtc_status;
96 union kvm_ioapic_redirect_entry *e;
97
98 e = &ioapic->redirtbl[RTC_GSI];
99 if (!kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
100 e->fields.dest_id,
101 kvm_lapic_irq_dest_mode(!!e->fields.dest_mode)))
102 return;
103
104 new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
105 old_val = test_bit(vcpu->vcpu_id, status->map);
106
107 if (new_val == old_val)
108 return;
109
110 if (new_val) {
111 __set_bit(vcpu->vcpu_id, status->map);
112 status->vectors[vcpu->vcpu_id] = e->fields.vector;
113 ioapic->rtc_status.pending_eoi++;
114 } else {
115 __clear_bit(vcpu->vcpu_id, status->map);
116 ioapic->rtc_status.pending_eoi--;
117 rtc_status_pending_eoi_check_valid(ioapic);
118 }
119 }
120
kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu * vcpu)121 void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
122 {
123 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
124
125 spin_lock(&ioapic->lock);
126 __rtc_irq_eoi_tracking_restore_one(vcpu);
127 spin_unlock(&ioapic->lock);
128 }
129
kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic * ioapic)130 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
131 {
132 struct kvm_vcpu *vcpu;
133 unsigned long i;
134
135 if (RTC_GSI >= IOAPIC_NUM_PINS)
136 return;
137
138 rtc_irq_eoi_tracking_reset(ioapic);
139 kvm_for_each_vcpu(i, vcpu, ioapic->kvm)
140 __rtc_irq_eoi_tracking_restore_one(vcpu);
141 }
142
rtc_irq_eoi(struct kvm_ioapic * ioapic,struct kvm_vcpu * vcpu,int vector)143 static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu,
144 int vector)
145 {
146 struct rtc_status *status = &ioapic->rtc_status;
147
148 /* RTC special handling */
149 if (test_bit(vcpu->vcpu_id, status->map) &&
150 (vector == status->vectors[vcpu->vcpu_id]) &&
151 (test_and_clear_bit(vcpu->vcpu_id, status->map))) {
152 --ioapic->rtc_status.pending_eoi;
153 rtc_status_pending_eoi_check_valid(ioapic);
154 }
155 }
156
rtc_irq_check_coalesced(struct kvm_ioapic * ioapic)157 static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
158 {
159 if (ioapic->rtc_status.pending_eoi > 0)
160 return true; /* coalesced */
161
162 return false;
163 }
164
ioapic_lazy_update_eoi(struct kvm_ioapic * ioapic,int irq)165 static void ioapic_lazy_update_eoi(struct kvm_ioapic *ioapic, int irq)
166 {
167 unsigned long i;
168 struct kvm_vcpu *vcpu;
169 union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
170
171 kvm_for_each_vcpu(i, vcpu, ioapic->kvm) {
172 if (!kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
173 entry->fields.dest_id,
174 entry->fields.dest_mode) ||
175 kvm_apic_pending_eoi(vcpu, entry->fields.vector))
176 continue;
177
178 /*
179 * If no longer has pending EOI in LAPICs, update
180 * EOI for this vector.
181 */
182 rtc_irq_eoi(ioapic, vcpu, entry->fields.vector);
183 break;
184 }
185 }
186
ioapic_set_irq(struct kvm_ioapic * ioapic,unsigned int irq,int irq_level,bool line_status)187 static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
188 int irq_level, bool line_status)
189 {
190 union kvm_ioapic_redirect_entry entry;
191 u32 mask = 1 << irq;
192 u32 old_irr;
193 int edge, ret;
194
195 entry = ioapic->redirtbl[irq];
196 edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
197
198 if (!irq_level) {
199 ioapic->irr &= ~mask;
200 ret = 1;
201 goto out;
202 }
203
204 /*
205 * AMD SVM AVIC accelerate EOI write iff the interrupt is edge
206 * triggered, in which case the in-kernel IOAPIC will not be able
207 * to receive the EOI. In this case, we do a lazy update of the
208 * pending EOI when trying to set IOAPIC irq.
209 */
210 if (edge && kvm_apicv_activated(ioapic->kvm))
211 ioapic_lazy_update_eoi(ioapic, irq);
212
213 /*
214 * Return 0 for coalesced interrupts; for edge-triggered interrupts,
215 * this only happens if a previous edge has not been delivered due
216 * to masking. For level interrupts, the remote_irr field tells
217 * us if the interrupt is waiting for an EOI.
218 *
219 * RTC is special: it is edge-triggered, but userspace likes to know
220 * if it has been already ack-ed via EOI because coalesced RTC
221 * interrupts lead to time drift in Windows guests. So we track
222 * EOI manually for the RTC interrupt.
223 */
224 if (irq == RTC_GSI && line_status &&
225 rtc_irq_check_coalesced(ioapic)) {
226 ret = 0;
227 goto out;
228 }
229
230 old_irr = ioapic->irr;
231 ioapic->irr |= mask;
232 if (edge) {
233 ioapic->irr_delivered &= ~mask;
234 if (old_irr == ioapic->irr) {
235 ret = 0;
236 goto out;
237 }
238 }
239
240 ret = ioapic_service(ioapic, irq, line_status);
241
242 out:
243 trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
244 return ret;
245 }
246
kvm_ioapic_inject_all(struct kvm_ioapic * ioapic,unsigned long irr)247 static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr)
248 {
249 u32 idx;
250
251 rtc_irq_eoi_tracking_reset(ioapic);
252 for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS)
253 ioapic_set_irq(ioapic, idx, 1, true);
254
255 kvm_rtc_eoi_tracking_restore_all(ioapic);
256 }
257
258
kvm_ioapic_scan_entry(struct kvm_vcpu * vcpu,ulong * ioapic_handled_vectors)259 void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
260 {
261 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
262 struct rtc_status *status = &ioapic->rtc_status;
263 union kvm_ioapic_redirect_entry *e;
264 int index;
265
266 spin_lock(&ioapic->lock);
267
268 /* Make sure we see any missing RTC EOI */
269 if (test_bit(vcpu->vcpu_id, status->map))
270 __set_bit(status->vectors[vcpu->vcpu_id],
271 ioapic_handled_vectors);
272
273 for (index = 0; index < IOAPIC_NUM_PINS; index++) {
274 e = &ioapic->redirtbl[index];
275 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
276 kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
277 index == RTC_GSI) {
278 u16 dm = kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
279
280 kvm_scan_ioapic_irq(vcpu, e->fields.dest_id, dm,
281 e->fields.vector, ioapic_handled_vectors);
282 }
283 }
284 spin_unlock(&ioapic->lock);
285 }
286
kvm_arch_post_irq_ack_notifier_list_update(struct kvm * kvm)287 void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
288 {
289 if (!ioapic_in_kernel(kvm))
290 return;
291 kvm_make_scan_ioapic_request(kvm);
292 }
293
kvm_register_irq_mask_notifier(struct kvm * kvm,int irq,struct kvm_irq_mask_notifier * kimn)294 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
295 struct kvm_irq_mask_notifier *kimn)
296 {
297 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
298
299 mutex_lock(&kvm->irq_lock);
300 kimn->irq = irq;
301 hlist_add_head_rcu(&kimn->link, &ioapic->mask_notifier_list);
302 mutex_unlock(&kvm->irq_lock);
303 }
304
kvm_unregister_irq_mask_notifier(struct kvm * kvm,int irq,struct kvm_irq_mask_notifier * kimn)305 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
306 struct kvm_irq_mask_notifier *kimn)
307 {
308 mutex_lock(&kvm->irq_lock);
309 hlist_del_rcu(&kimn->link);
310 mutex_unlock(&kvm->irq_lock);
311 synchronize_srcu(&kvm->irq_srcu);
312 }
313
kvm_fire_mask_notifiers(struct kvm * kvm,unsigned irqchip,unsigned pin,bool mask)314 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
315 bool mask)
316 {
317 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
318 struct kvm_irq_mask_notifier *kimn;
319 int idx, gsi;
320
321 idx = srcu_read_lock(&kvm->irq_srcu);
322 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
323 if (gsi != -1)
324 hlist_for_each_entry_srcu(kimn, &ioapic->mask_notifier_list, link,
325 srcu_read_lock_held(&kvm->irq_srcu))
326 if (kimn->irq == gsi)
327 kimn->func(kimn, mask);
328 srcu_read_unlock(&kvm->irq_srcu, idx);
329 }
330
ioapic_write_indirect(struct kvm_ioapic * ioapic,u32 val)331 static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
332 {
333 unsigned index;
334 bool mask_before, mask_after;
335 union kvm_ioapic_redirect_entry *e;
336 int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode;
337 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
338
339 switch (ioapic->ioregsel) {
340 case IOAPIC_REG_VERSION:
341 /* Writes are ignored. */
342 break;
343
344 case IOAPIC_REG_APIC_ID:
345 ioapic->id = (val >> 24) & 0xf;
346 break;
347
348 case IOAPIC_REG_ARB_ID:
349 break;
350
351 default:
352 index = (ioapic->ioregsel - 0x10) >> 1;
353
354 if (index >= IOAPIC_NUM_PINS)
355 return;
356 index = array_index_nospec(index, IOAPIC_NUM_PINS);
357 e = &ioapic->redirtbl[index];
358 mask_before = e->fields.mask;
359 /* Preserve read-only fields */
360 old_remote_irr = e->fields.remote_irr;
361 old_delivery_status = e->fields.delivery_status;
362 old_dest_id = e->fields.dest_id;
363 old_dest_mode = e->fields.dest_mode;
364 if (ioapic->ioregsel & 1) {
365 e->bits &= 0xffffffff;
366 e->bits |= (u64) val << 32;
367 } else {
368 e->bits &= ~0xffffffffULL;
369 e->bits |= (u32) val;
370 }
371 e->fields.remote_irr = old_remote_irr;
372 e->fields.delivery_status = old_delivery_status;
373
374 /*
375 * Some OSes (Linux, Xen) assume that Remote IRR bit will
376 * be cleared by IOAPIC hardware when the entry is configured
377 * as edge-triggered. This behavior is used to simulate an
378 * explicit EOI on IOAPICs that don't have the EOI register.
379 */
380 if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
381 e->fields.remote_irr = 0;
382
383 mask_after = e->fields.mask;
384 if (mask_before != mask_after)
385 kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
386 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG &&
387 ioapic->irr & (1 << index) && !e->fields.mask && !e->fields.remote_irr) {
388 /*
389 * Pending status in irr may be outdated: the IRQ line may have
390 * already been deasserted by a device while the IRQ was masked.
391 * This occurs, for instance, if the interrupt is handled in a
392 * Linux guest as a oneshot interrupt (IRQF_ONESHOT). In this
393 * case the guest acknowledges the interrupt to the device in
394 * its threaded irq handler, i.e. after the EOI but before
395 * unmasking, so at the time of unmasking the IRQ line is
396 * already down but our pending irr bit is still set. In such
397 * cases, injecting this pending interrupt to the guest is
398 * buggy: the guest will receive an extra unwanted interrupt.
399 *
400 * So we need to check here if the IRQ is actually still pending.
401 * As we are generally not able to probe the IRQ line status
402 * directly, we do it through irqfd resampler. Namely, we clear
403 * the pending status and notify the resampler that this interrupt
404 * is done, without actually injecting it into the guest. If the
405 * IRQ line is actually already deasserted, we are done. If it is
406 * still asserted, a new interrupt will be shortly triggered
407 * through irqfd and injected into the guest.
408 *
409 * If, however, it's not possible to resample (no irqfd resampler
410 * registered for this irq), then unconditionally inject this
411 * pending interrupt into the guest, so the guest will not miss
412 * an interrupt, although may get an extra unwanted interrupt.
413 */
414 if (kvm_notify_irqfd_resampler(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index))
415 ioapic->irr &= ~(1 << index);
416 else
417 ioapic_service(ioapic, index, false);
418 }
419 if (e->fields.delivery_mode == APIC_DM_FIXED) {
420 struct kvm_lapic_irq irq;
421
422 irq.vector = e->fields.vector;
423 irq.delivery_mode = e->fields.delivery_mode << 8;
424 irq.dest_mode =
425 kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
426 irq.level = false;
427 irq.trig_mode = e->fields.trig_mode;
428 irq.shorthand = APIC_DEST_NOSHORT;
429 irq.dest_id = e->fields.dest_id;
430 irq.msi_redir_hint = false;
431 bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
432 kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
433 vcpu_bitmap);
434 if (old_dest_mode != e->fields.dest_mode ||
435 old_dest_id != e->fields.dest_id) {
436 /*
437 * Update vcpu_bitmap with vcpus specified in
438 * the previous request as well. This is done to
439 * keep ioapic_handled_vectors synchronized.
440 */
441 irq.dest_id = old_dest_id;
442 irq.dest_mode =
443 kvm_lapic_irq_dest_mode(
444 !!e->fields.dest_mode);
445 kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
446 vcpu_bitmap);
447 }
448 kvm_make_scan_ioapic_request_mask(ioapic->kvm,
449 vcpu_bitmap);
450 } else {
451 kvm_make_scan_ioapic_request(ioapic->kvm);
452 }
453 break;
454 }
455 }
456
ioapic_service(struct kvm_ioapic * ioapic,int irq,bool line_status)457 static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
458 {
459 union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
460 struct kvm_lapic_irq irqe;
461 int ret;
462
463 if (entry->fields.mask ||
464 (entry->fields.trig_mode == IOAPIC_LEVEL_TRIG &&
465 entry->fields.remote_irr))
466 return -1;
467
468 irqe.dest_id = entry->fields.dest_id;
469 irqe.vector = entry->fields.vector;
470 irqe.dest_mode = kvm_lapic_irq_dest_mode(!!entry->fields.dest_mode);
471 irqe.trig_mode = entry->fields.trig_mode;
472 irqe.delivery_mode = entry->fields.delivery_mode << 8;
473 irqe.level = 1;
474 irqe.shorthand = APIC_DEST_NOSHORT;
475 irqe.msi_redir_hint = false;
476
477 if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
478 ioapic->irr_delivered |= 1 << irq;
479
480 if (irq == RTC_GSI && line_status) {
481 /*
482 * pending_eoi cannot ever become negative (see
483 * rtc_status_pending_eoi_check_valid) and the caller
484 * ensures that it is only called if it is >= zero, namely
485 * if rtc_irq_check_coalesced returns false).
486 */
487 BUG_ON(ioapic->rtc_status.pending_eoi != 0);
488 ret = __kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
489 &ioapic->rtc_status);
490 ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
491 } else
492 ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
493
494 if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG)
495 entry->fields.remote_irr = 1;
496
497 return ret;
498 }
499
kvm_ioapic_set_irq(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)500 int kvm_ioapic_set_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
501 int irq_source_id, int level, bool line_status)
502 {
503 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
504 int irq = e->irqchip.pin;
505 int ret, irq_level;
506
507 BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS);
508
509 spin_lock(&ioapic->lock);
510 irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq],
511 irq_source_id, level);
512 ret = ioapic_set_irq(ioapic, irq, irq_level, line_status);
513
514 spin_unlock(&ioapic->lock);
515
516 return ret;
517 }
518
kvm_ioapic_eoi_inject_work(struct work_struct * work)519 static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
520 {
521 int i;
522 struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic,
523 eoi_inject.work);
524 spin_lock(&ioapic->lock);
525 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
526 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
527
528 if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG)
529 continue;
530
531 if (ioapic->irr & (1 << i) && !ent->fields.remote_irr)
532 ioapic_service(ioapic, i, false);
533 }
534 spin_unlock(&ioapic->lock);
535 }
536
537 #define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000
kvm_ioapic_update_eoi_one(struct kvm_vcpu * vcpu,struct kvm_ioapic * ioapic,int trigger_mode,int pin)538 static void kvm_ioapic_update_eoi_one(struct kvm_vcpu *vcpu,
539 struct kvm_ioapic *ioapic,
540 int trigger_mode,
541 int pin)
542 {
543 struct kvm_lapic *apic = vcpu->arch.apic;
544 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[pin];
545
546 /*
547 * We are dropping lock while calling ack notifiers because ack
548 * notifier callbacks for assigned devices call into IOAPIC
549 * recursively. Since remote_irr is cleared only after call
550 * to notifiers if the same vector will be delivered while lock
551 * is dropped it will be put into irr and will be delivered
552 * after ack notifier returns.
553 */
554 spin_unlock(&ioapic->lock);
555 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, pin);
556 spin_lock(&ioapic->lock);
557
558 if (trigger_mode != IOAPIC_LEVEL_TRIG ||
559 kvm_lapic_suppress_eoi_broadcast(apic))
560 return;
561
562 ent->fields.remote_irr = 0;
563 if (!ent->fields.mask && (ioapic->irr & (1 << pin))) {
564 ++ioapic->irq_eoi[pin];
565 if (ioapic->irq_eoi[pin] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
566 /*
567 * Real hardware does not deliver the interrupt
568 * immediately during eoi broadcast, and this
569 * lets a buggy guest make slow progress
570 * even if it does not correctly handle a
571 * level-triggered interrupt. Emulate this
572 * behavior if we detect an interrupt storm.
573 */
574 schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
575 ioapic->irq_eoi[pin] = 0;
576 trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
577 } else {
578 ioapic_service(ioapic, pin, false);
579 }
580 } else {
581 ioapic->irq_eoi[pin] = 0;
582 }
583 }
584
kvm_ioapic_update_eoi(struct kvm_vcpu * vcpu,int vector,int trigger_mode)585 void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
586 {
587 int i;
588 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
589
590 spin_lock(&ioapic->lock);
591 rtc_irq_eoi(ioapic, vcpu, vector);
592 for (i = 0; i < IOAPIC_NUM_PINS; i++) {
593 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
594
595 if (ent->fields.vector != vector)
596 continue;
597 kvm_ioapic_update_eoi_one(vcpu, ioapic, trigger_mode, i);
598 }
599 spin_unlock(&ioapic->lock);
600 }
601
to_ioapic(struct kvm_io_device * dev)602 static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
603 {
604 return container_of(dev, struct kvm_ioapic, dev);
605 }
606
ioapic_in_range(struct kvm_ioapic * ioapic,gpa_t addr)607 static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
608 {
609 return ((addr >= ioapic->base_address &&
610 (addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
611 }
612
ioapic_mmio_read(struct kvm_vcpu * vcpu,struct kvm_io_device * this,gpa_t addr,int len,void * val)613 static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
614 gpa_t addr, int len, void *val)
615 {
616 struct kvm_ioapic *ioapic = to_ioapic(this);
617 u32 result;
618 if (!ioapic_in_range(ioapic, addr))
619 return -EOPNOTSUPP;
620
621 addr &= 0xff;
622 spin_lock(&ioapic->lock);
623 switch (addr) {
624 case IOAPIC_REG_SELECT:
625 result = ioapic->ioregsel;
626 break;
627
628 case IOAPIC_REG_WINDOW:
629 result = ioapic_read_indirect(ioapic);
630 break;
631
632 default:
633 result = 0;
634 break;
635 }
636 spin_unlock(&ioapic->lock);
637
638 switch (len) {
639 case 8:
640 *(u64 *) val = result;
641 break;
642 case 1:
643 case 2:
644 case 4:
645 memcpy(val, (char *)&result, len);
646 break;
647 default:
648 printk(KERN_WARNING "ioapic: wrong length %d\n", len);
649 }
650 return 0;
651 }
652
ioapic_mmio_write(struct kvm_vcpu * vcpu,struct kvm_io_device * this,gpa_t addr,int len,const void * val)653 static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
654 gpa_t addr, int len, const void *val)
655 {
656 struct kvm_ioapic *ioapic = to_ioapic(this);
657 u32 data;
658 if (!ioapic_in_range(ioapic, addr))
659 return -EOPNOTSUPP;
660
661 switch (len) {
662 case 8:
663 case 4:
664 data = *(u32 *) val;
665 break;
666 case 2:
667 data = *(u16 *) val;
668 break;
669 case 1:
670 data = *(u8 *) val;
671 break;
672 default:
673 printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
674 return 0;
675 }
676
677 addr &= 0xff;
678 spin_lock(&ioapic->lock);
679 switch (addr) {
680 case IOAPIC_REG_SELECT:
681 ioapic->ioregsel = data & 0xFF; /* 8-bit register */
682 break;
683
684 case IOAPIC_REG_WINDOW:
685 ioapic_write_indirect(ioapic, data);
686 break;
687
688 default:
689 break;
690 }
691 spin_unlock(&ioapic->lock);
692 return 0;
693 }
694
kvm_ioapic_reset(struct kvm_ioapic * ioapic)695 static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
696 {
697 int i;
698
699 cancel_delayed_work_sync(&ioapic->eoi_inject);
700 for (i = 0; i < IOAPIC_NUM_PINS; i++)
701 ioapic->redirtbl[i].fields.mask = 1;
702 ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
703 ioapic->ioregsel = 0;
704 ioapic->irr = 0;
705 ioapic->irr_delivered = 0;
706 ioapic->id = 0;
707 memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
708 rtc_irq_eoi_tracking_reset(ioapic);
709 }
710
711 static const struct kvm_io_device_ops ioapic_mmio_ops = {
712 .read = ioapic_mmio_read,
713 .write = ioapic_mmio_write,
714 };
715
kvm_ioapic_init(struct kvm * kvm)716 int kvm_ioapic_init(struct kvm *kvm)
717 {
718 struct kvm_ioapic *ioapic;
719 int ret;
720
721 ioapic = kzalloc_obj(struct kvm_ioapic, GFP_KERNEL_ACCOUNT);
722 if (!ioapic)
723 return -ENOMEM;
724 spin_lock_init(&ioapic->lock);
725 INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work);
726 INIT_HLIST_HEAD(&ioapic->mask_notifier_list);
727 kvm->arch.vioapic = ioapic;
728 kvm_ioapic_reset(ioapic);
729 kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
730 ioapic->kvm = kvm;
731 mutex_lock(&kvm->slots_lock);
732 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address,
733 IOAPIC_MEM_LENGTH, &ioapic->dev);
734 mutex_unlock(&kvm->slots_lock);
735 if (ret < 0) {
736 kvm->arch.vioapic = NULL;
737 kfree(ioapic);
738 }
739
740 return ret;
741 }
742
kvm_ioapic_destroy(struct kvm * kvm)743 void kvm_ioapic_destroy(struct kvm *kvm)
744 {
745 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
746
747 if (!ioapic)
748 return;
749
750 cancel_delayed_work_sync(&ioapic->eoi_inject);
751 mutex_lock(&kvm->slots_lock);
752 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
753 mutex_unlock(&kvm->slots_lock);
754 kvm->arch.vioapic = NULL;
755 kfree(ioapic);
756 }
757
kvm_get_ioapic(struct kvm * kvm,struct kvm_ioapic_state * state)758 void kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
759 {
760 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
761
762 spin_lock(&ioapic->lock);
763 memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
764 state->irr &= ~ioapic->irr_delivered;
765 spin_unlock(&ioapic->lock);
766 }
767
kvm_set_ioapic(struct kvm * kvm,struct kvm_ioapic_state * state)768 void kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
769 {
770 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
771
772 spin_lock(&ioapic->lock);
773 memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
774 ioapic->irr = 0;
775 ioapic->irr_delivered = 0;
776 kvm_make_scan_ioapic_request(kvm);
777 kvm_ioapic_inject_all(ioapic, state->irr);
778 spin_unlock(&ioapic->lock);
779 }
780