xref: /linux/arch/x86/kvm/ioapic.c (revision 1b13885edf0a55a451a26d5fa53e7877b31debb5)
1 // SPDX-License-Identifier: LGPL-2.1-or-later
2 /*
3  *  Copyright (C) 2001  MandrakeSoft S.A.
4  *  Copyright 2010 Red Hat, Inc. and/or its affiliates.
5  *
6  *    MandrakeSoft S.A.
7  *    43, rue d'Aboukir
8  *    75002 Paris - France
9  *    http://www.linux-mandrake.com/
10  *    http://www.mandrakesoft.com/
11  *
12  *  Yunhong Jiang <yunhong.jiang@intel.com>
13  *  Yaozu (Eddie) Dong <eddie.dong@intel.com>
14  *  Based on Xen 3.1 code.
15  */
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 
18 #include <linux/kvm_host.h>
19 #include <linux/kvm.h>
20 #include <linux/mm.h>
21 #include <linux/highmem.h>
22 #include <linux/smp.h>
23 #include <linux/hrtimer.h>
24 #include <linux/io.h>
25 #include <linux/slab.h>
26 #include <linux/export.h>
27 #include <linux/nospec.h>
28 #include <asm/processor.h>
29 #include <asm/page.h>
30 #include <asm/current.h>
31 
32 #include "ioapic.h"
33 #include "lapic.h"
34 #include "irq.h"
35 #include "trace.h"
36 
37 static int ioapic_service(struct kvm_ioapic *vioapic, int irq,
38 		bool line_status);
39 
40 static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic)
41 {
42 	unsigned long result = 0;
43 
44 	switch (ioapic->ioregsel) {
45 	case IOAPIC_REG_VERSION:
46 		result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
47 			  | (IOAPIC_VERSION_ID & 0xff));
48 		break;
49 
50 	case IOAPIC_REG_APIC_ID:
51 	case IOAPIC_REG_ARB_ID:
52 		result = ((ioapic->id & 0xf) << 24);
53 		break;
54 
55 	default:
56 		{
57 			u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
58 			u64 redir_content = ~0ULL;
59 
60 			if (redir_index < IOAPIC_NUM_PINS) {
61 				u32 index = array_index_nospec(
62 					redir_index, IOAPIC_NUM_PINS);
63 
64 				redir_content = ioapic->redirtbl[index].bits;
65 			}
66 
67 			result = (ioapic->ioregsel & 0x1) ?
68 			    (redir_content >> 32) & 0xffffffff :
69 			    redir_content & 0xffffffff;
70 			break;
71 		}
72 	}
73 
74 	return result;
75 }
76 
77 static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
78 {
79 	ioapic->rtc_status.pending_eoi = 0;
80 	bitmap_zero(ioapic->rtc_status.map, KVM_MAX_VCPU_IDS);
81 }
82 
83 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
84 
85 static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic)
86 {
87 	if (WARN_ON(ioapic->rtc_status.pending_eoi < 0))
88 		kvm_rtc_eoi_tracking_restore_all(ioapic);
89 }
90 
91 static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
92 {
93 	bool new_val, old_val;
94 	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
95 	struct rtc_status *status = &ioapic->rtc_status;
96 	union kvm_ioapic_redirect_entry *e;
97 
98 	e = &ioapic->redirtbl[RTC_GSI];
99 	if (!kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
100 				 e->fields.dest_id,
101 				 kvm_lapic_irq_dest_mode(!!e->fields.dest_mode)))
102 		return;
103 
104 	new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
105 	old_val = test_bit(vcpu->vcpu_id, status->map);
106 
107 	if (new_val == old_val)
108 		return;
109 
110 	if (new_val) {
111 		__set_bit(vcpu->vcpu_id, status->map);
112 		status->vectors[vcpu->vcpu_id] = e->fields.vector;
113 		ioapic->rtc_status.pending_eoi++;
114 	} else {
115 		__clear_bit(vcpu->vcpu_id, status->map);
116 		ioapic->rtc_status.pending_eoi--;
117 		rtc_status_pending_eoi_check_valid(ioapic);
118 	}
119 }
120 
121 void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
122 {
123 	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
124 
125 	spin_lock(&ioapic->lock);
126 	__rtc_irq_eoi_tracking_restore_one(vcpu);
127 	spin_unlock(&ioapic->lock);
128 }
129 
130 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
131 {
132 	struct kvm_vcpu *vcpu;
133 	unsigned long i;
134 
135 	if (RTC_GSI >= IOAPIC_NUM_PINS)
136 		return;
137 
138 	rtc_irq_eoi_tracking_reset(ioapic);
139 	kvm_for_each_vcpu(i, vcpu, ioapic->kvm)
140 	    __rtc_irq_eoi_tracking_restore_one(vcpu);
141 }
142 
143 static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu,
144 			int vector)
145 {
146 	struct rtc_status *status = &ioapic->rtc_status;
147 
148 	/* RTC special handling */
149 	if (test_bit(vcpu->vcpu_id, status->map) &&
150 	    (vector == status->vectors[vcpu->vcpu_id]) &&
151 	    (test_and_clear_bit(vcpu->vcpu_id, status->map))) {
152 		--ioapic->rtc_status.pending_eoi;
153 		rtc_status_pending_eoi_check_valid(ioapic);
154 	}
155 }
156 
157 static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
158 {
159 	if (ioapic->rtc_status.pending_eoi > 0)
160 		return true; /* coalesced */
161 
162 	return false;
163 }
164 
165 static void ioapic_lazy_update_eoi(struct kvm_ioapic *ioapic, int irq)
166 {
167 	unsigned long i;
168 	struct kvm_vcpu *vcpu;
169 	union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
170 
171 	kvm_for_each_vcpu(i, vcpu, ioapic->kvm) {
172 		if (!kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
173 					 entry->fields.dest_id,
174 					 entry->fields.dest_mode) ||
175 		    kvm_apic_pending_eoi(vcpu, entry->fields.vector))
176 			continue;
177 
178 		/*
179 		 * If no longer has pending EOI in LAPICs, update
180 		 * EOI for this vector.
181 		 */
182 		rtc_irq_eoi(ioapic, vcpu, entry->fields.vector);
183 		break;
184 	}
185 }
186 
187 static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
188 		int irq_level, bool line_status)
189 {
190 	union kvm_ioapic_redirect_entry entry;
191 	u32 mask = 1 << irq;
192 	u32 old_irr;
193 	int edge, ret;
194 
195 	entry = ioapic->redirtbl[irq];
196 	edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
197 
198 	if (!irq_level) {
199 		ioapic->irr &= ~mask;
200 		ret = 1;
201 		goto out;
202 	}
203 
204 	/*
205 	 * AMD SVM AVIC accelerate EOI write iff the interrupt is edge
206 	 * triggered, in which case the in-kernel IOAPIC will not be able
207 	 * to receive the EOI.  In this case, we do a lazy update of the
208 	 * pending EOI when trying to set IOAPIC irq.
209 	 */
210 	if (edge && kvm_apicv_activated(ioapic->kvm))
211 		ioapic_lazy_update_eoi(ioapic, irq);
212 
213 	/*
214 	 * Return 0 for coalesced interrupts; for edge-triggered interrupts,
215 	 * this only happens if a previous edge has not been delivered due
216 	 * to masking.  For level interrupts, the remote_irr field tells
217 	 * us if the interrupt is waiting for an EOI.
218 	 *
219 	 * RTC is special: it is edge-triggered, but userspace likes to know
220 	 * if it has been already ack-ed via EOI because coalesced RTC
221 	 * interrupts lead to time drift in Windows guests.  So we track
222 	 * EOI manually for the RTC interrupt.
223 	 */
224 	if (irq == RTC_GSI && line_status &&
225 		rtc_irq_check_coalesced(ioapic)) {
226 		ret = 0;
227 		goto out;
228 	}
229 
230 	old_irr = ioapic->irr;
231 	ioapic->irr |= mask;
232 	if (edge) {
233 		ioapic->irr_delivered &= ~mask;
234 		if (old_irr == ioapic->irr) {
235 			ret = 0;
236 			goto out;
237 		}
238 	}
239 
240 	ret = ioapic_service(ioapic, irq, line_status);
241 
242 out:
243 	trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
244 	return ret;
245 }
246 
247 static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr)
248 {
249 	u32 idx;
250 
251 	rtc_irq_eoi_tracking_reset(ioapic);
252 	for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS)
253 		ioapic_set_irq(ioapic, idx, 1, true);
254 
255 	kvm_rtc_eoi_tracking_restore_all(ioapic);
256 }
257 
258 
259 void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
260 {
261 	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
262 	struct rtc_status *status = &ioapic->rtc_status;
263 	union kvm_ioapic_redirect_entry *e;
264 	int index;
265 
266 	spin_lock(&ioapic->lock);
267 
268 	/* Make sure we see any missing RTC EOI */
269 	if (test_bit(vcpu->vcpu_id, status->map))
270 		__set_bit(status->vectors[vcpu->vcpu_id],
271 			  ioapic_handled_vectors);
272 
273 	for (index = 0; index < IOAPIC_NUM_PINS; index++) {
274 		e = &ioapic->redirtbl[index];
275 		if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
276 		    kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
277 		    index == RTC_GSI) {
278 			u16 dm = kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
279 
280 			kvm_scan_ioapic_irq(vcpu, e->fields.dest_id, dm,
281 					    e->fields.vector, ioapic_handled_vectors);
282 		}
283 	}
284 	spin_unlock(&ioapic->lock);
285 }
286 
287 void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
288 {
289 	if (!ioapic_in_kernel(kvm))
290 		return;
291 	kvm_make_scan_ioapic_request(kvm);
292 }
293 
294 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
295 				    struct kvm_irq_mask_notifier *kimn)
296 {
297 	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
298 
299 	mutex_lock(&kvm->irq_lock);
300 	kimn->irq = irq;
301 	hlist_add_head_rcu(&kimn->link, &ioapic->mask_notifier_list);
302 	mutex_unlock(&kvm->irq_lock);
303 }
304 
305 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
306 				      struct kvm_irq_mask_notifier *kimn)
307 {
308 	mutex_lock(&kvm->irq_lock);
309 	hlist_del_rcu(&kimn->link);
310 	mutex_unlock(&kvm->irq_lock);
311 	synchronize_srcu(&kvm->irq_srcu);
312 }
313 
314 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
315 			     bool mask)
316 {
317 	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
318 	struct kvm_irq_mask_notifier *kimn;
319 	int idx, gsi;
320 
321 	idx = srcu_read_lock(&kvm->irq_srcu);
322 	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
323 	if (gsi != -1)
324 		hlist_for_each_entry_rcu(kimn, &ioapic->mask_notifier_list, link)
325 			if (kimn->irq == gsi)
326 				kimn->func(kimn, mask);
327 	srcu_read_unlock(&kvm->irq_srcu, idx);
328 }
329 
330 static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
331 {
332 	unsigned index;
333 	bool mask_before, mask_after;
334 	union kvm_ioapic_redirect_entry *e;
335 	int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode;
336 	DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
337 
338 	switch (ioapic->ioregsel) {
339 	case IOAPIC_REG_VERSION:
340 		/* Writes are ignored. */
341 		break;
342 
343 	case IOAPIC_REG_APIC_ID:
344 		ioapic->id = (val >> 24) & 0xf;
345 		break;
346 
347 	case IOAPIC_REG_ARB_ID:
348 		break;
349 
350 	default:
351 		index = (ioapic->ioregsel - 0x10) >> 1;
352 
353 		if (index >= IOAPIC_NUM_PINS)
354 			return;
355 		index = array_index_nospec(index, IOAPIC_NUM_PINS);
356 		e = &ioapic->redirtbl[index];
357 		mask_before = e->fields.mask;
358 		/* Preserve read-only fields */
359 		old_remote_irr = e->fields.remote_irr;
360 		old_delivery_status = e->fields.delivery_status;
361 		old_dest_id = e->fields.dest_id;
362 		old_dest_mode = e->fields.dest_mode;
363 		if (ioapic->ioregsel & 1) {
364 			e->bits &= 0xffffffff;
365 			e->bits |= (u64) val << 32;
366 		} else {
367 			e->bits &= ~0xffffffffULL;
368 			e->bits |= (u32) val;
369 		}
370 		e->fields.remote_irr = old_remote_irr;
371 		e->fields.delivery_status = old_delivery_status;
372 
373 		/*
374 		 * Some OSes (Linux, Xen) assume that Remote IRR bit will
375 		 * be cleared by IOAPIC hardware when the entry is configured
376 		 * as edge-triggered. This behavior is used to simulate an
377 		 * explicit EOI on IOAPICs that don't have the EOI register.
378 		 */
379 		if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
380 			e->fields.remote_irr = 0;
381 
382 		mask_after = e->fields.mask;
383 		if (mask_before != mask_after)
384 			kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
385 		if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG &&
386 		    ioapic->irr & (1 << index) && !e->fields.mask && !e->fields.remote_irr) {
387 			/*
388 			 * Pending status in irr may be outdated: the IRQ line may have
389 			 * already been deasserted by a device while the IRQ was masked.
390 			 * This occurs, for instance, if the interrupt is handled in a
391 			 * Linux guest as a oneshot interrupt (IRQF_ONESHOT). In this
392 			 * case the guest acknowledges the interrupt to the device in
393 			 * its threaded irq handler, i.e. after the EOI but before
394 			 * unmasking, so at the time of unmasking the IRQ line is
395 			 * already down but our pending irr bit is still set. In such
396 			 * cases, injecting this pending interrupt to the guest is
397 			 * buggy: the guest will receive an extra unwanted interrupt.
398 			 *
399 			 * So we need to check here if the IRQ is actually still pending.
400 			 * As we are generally not able to probe the IRQ line status
401 			 * directly, we do it through irqfd resampler. Namely, we clear
402 			 * the pending status and notify the resampler that this interrupt
403 			 * is done, without actually injecting it into the guest. If the
404 			 * IRQ line is actually already deasserted, we are done. If it is
405 			 * still asserted, a new interrupt will be shortly triggered
406 			 * through irqfd and injected into the guest.
407 			 *
408 			 * If, however, it's not possible to resample (no irqfd resampler
409 			 * registered for this irq), then unconditionally inject this
410 			 * pending interrupt into the guest, so the guest will not miss
411 			 * an interrupt, although may get an extra unwanted interrupt.
412 			 */
413 			if (kvm_notify_irqfd_resampler(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index))
414 				ioapic->irr &= ~(1 << index);
415 			else
416 				ioapic_service(ioapic, index, false);
417 		}
418 		if (e->fields.delivery_mode == APIC_DM_FIXED) {
419 			struct kvm_lapic_irq irq;
420 
421 			irq.vector = e->fields.vector;
422 			irq.delivery_mode = e->fields.delivery_mode << 8;
423 			irq.dest_mode =
424 			    kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
425 			irq.level = false;
426 			irq.trig_mode = e->fields.trig_mode;
427 			irq.shorthand = APIC_DEST_NOSHORT;
428 			irq.dest_id = e->fields.dest_id;
429 			irq.msi_redir_hint = false;
430 			bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
431 			kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
432 						 vcpu_bitmap);
433 			if (old_dest_mode != e->fields.dest_mode ||
434 			    old_dest_id != e->fields.dest_id) {
435 				/*
436 				 * Update vcpu_bitmap with vcpus specified in
437 				 * the previous request as well. This is done to
438 				 * keep ioapic_handled_vectors synchronized.
439 				 */
440 				irq.dest_id = old_dest_id;
441 				irq.dest_mode =
442 				    kvm_lapic_irq_dest_mode(
443 					!!e->fields.dest_mode);
444 				kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
445 							 vcpu_bitmap);
446 			}
447 			kvm_make_scan_ioapic_request_mask(ioapic->kvm,
448 							  vcpu_bitmap);
449 		} else {
450 			kvm_make_scan_ioapic_request(ioapic->kvm);
451 		}
452 		break;
453 	}
454 }
455 
456 static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
457 {
458 	union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
459 	struct kvm_lapic_irq irqe;
460 	int ret;
461 
462 	if (entry->fields.mask ||
463 	    (entry->fields.trig_mode == IOAPIC_LEVEL_TRIG &&
464 	    entry->fields.remote_irr))
465 		return -1;
466 
467 	irqe.dest_id = entry->fields.dest_id;
468 	irqe.vector = entry->fields.vector;
469 	irqe.dest_mode = kvm_lapic_irq_dest_mode(!!entry->fields.dest_mode);
470 	irqe.trig_mode = entry->fields.trig_mode;
471 	irqe.delivery_mode = entry->fields.delivery_mode << 8;
472 	irqe.level = 1;
473 	irqe.shorthand = APIC_DEST_NOSHORT;
474 	irqe.msi_redir_hint = false;
475 
476 	if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
477 		ioapic->irr_delivered |= 1 << irq;
478 
479 	if (irq == RTC_GSI && line_status) {
480 		/*
481 		 * pending_eoi cannot ever become negative (see
482 		 * rtc_status_pending_eoi_check_valid) and the caller
483 		 * ensures that it is only called if it is >= zero, namely
484 		 * if rtc_irq_check_coalesced returns false).
485 		 */
486 		BUG_ON(ioapic->rtc_status.pending_eoi != 0);
487 		ret = __kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
488 						 &ioapic->rtc_status);
489 		ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
490 	} else
491 		ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
492 
493 	if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG)
494 		entry->fields.remote_irr = 1;
495 
496 	return ret;
497 }
498 
499 int kvm_ioapic_set_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
500 		       int irq_source_id, int level, bool line_status)
501 {
502 	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
503 	int irq = e->irqchip.pin;
504 	int ret, irq_level;
505 
506 	BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS);
507 
508 	spin_lock(&ioapic->lock);
509 	irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq],
510 					 irq_source_id, level);
511 	ret = ioapic_set_irq(ioapic, irq, irq_level, line_status);
512 
513 	spin_unlock(&ioapic->lock);
514 
515 	return ret;
516 }
517 
518 static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
519 {
520 	int i;
521 	struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic,
522 						 eoi_inject.work);
523 	spin_lock(&ioapic->lock);
524 	for (i = 0; i < IOAPIC_NUM_PINS; i++) {
525 		union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
526 
527 		if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG)
528 			continue;
529 
530 		if (ioapic->irr & (1 << i) && !ent->fields.remote_irr)
531 			ioapic_service(ioapic, i, false);
532 	}
533 	spin_unlock(&ioapic->lock);
534 }
535 
536 #define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000
537 static void kvm_ioapic_update_eoi_one(struct kvm_vcpu *vcpu,
538 				      struct kvm_ioapic *ioapic,
539 				      int trigger_mode,
540 				      int pin)
541 {
542 	struct kvm_lapic *apic = vcpu->arch.apic;
543 	union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[pin];
544 
545 	/*
546 	 * We are dropping lock while calling ack notifiers because ack
547 	 * notifier callbacks for assigned devices call into IOAPIC
548 	 * recursively. Since remote_irr is cleared only after call
549 	 * to notifiers if the same vector will be delivered while lock
550 	 * is dropped it will be put into irr and will be delivered
551 	 * after ack notifier returns.
552 	 */
553 	spin_unlock(&ioapic->lock);
554 	kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, pin);
555 	spin_lock(&ioapic->lock);
556 
557 	if (trigger_mode != IOAPIC_LEVEL_TRIG ||
558 	    kvm_lapic_suppress_eoi_broadcast(apic))
559 		return;
560 
561 	ent->fields.remote_irr = 0;
562 	if (!ent->fields.mask && (ioapic->irr & (1 << pin))) {
563 		++ioapic->irq_eoi[pin];
564 		if (ioapic->irq_eoi[pin] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
565 			/*
566 			 * Real hardware does not deliver the interrupt
567 			 * immediately during eoi broadcast, and this
568 			 * lets a buggy guest make slow progress
569 			 * even if it does not correctly handle a
570 			 * level-triggered interrupt.  Emulate this
571 			 * behavior if we detect an interrupt storm.
572 			 */
573 			schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
574 			ioapic->irq_eoi[pin] = 0;
575 			trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
576 		} else {
577 			ioapic_service(ioapic, pin, false);
578 		}
579 	} else {
580 		ioapic->irq_eoi[pin] = 0;
581 	}
582 }
583 
584 void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
585 {
586 	int i;
587 	struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
588 
589 	spin_lock(&ioapic->lock);
590 	rtc_irq_eoi(ioapic, vcpu, vector);
591 	for (i = 0; i < IOAPIC_NUM_PINS; i++) {
592 		union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
593 
594 		if (ent->fields.vector != vector)
595 			continue;
596 		kvm_ioapic_update_eoi_one(vcpu, ioapic, trigger_mode, i);
597 	}
598 	spin_unlock(&ioapic->lock);
599 }
600 
601 static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
602 {
603 	return container_of(dev, struct kvm_ioapic, dev);
604 }
605 
606 static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
607 {
608 	return ((addr >= ioapic->base_address &&
609 		 (addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
610 }
611 
612 static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
613 				gpa_t addr, int len, void *val)
614 {
615 	struct kvm_ioapic *ioapic = to_ioapic(this);
616 	u32 result;
617 	if (!ioapic_in_range(ioapic, addr))
618 		return -EOPNOTSUPP;
619 
620 	addr &= 0xff;
621 	spin_lock(&ioapic->lock);
622 	switch (addr) {
623 	case IOAPIC_REG_SELECT:
624 		result = ioapic->ioregsel;
625 		break;
626 
627 	case IOAPIC_REG_WINDOW:
628 		result = ioapic_read_indirect(ioapic);
629 		break;
630 
631 	default:
632 		result = 0;
633 		break;
634 	}
635 	spin_unlock(&ioapic->lock);
636 
637 	switch (len) {
638 	case 8:
639 		*(u64 *) val = result;
640 		break;
641 	case 1:
642 	case 2:
643 	case 4:
644 		memcpy(val, (char *)&result, len);
645 		break;
646 	default:
647 		printk(KERN_WARNING "ioapic: wrong length %d\n", len);
648 	}
649 	return 0;
650 }
651 
652 static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
653 				 gpa_t addr, int len, const void *val)
654 {
655 	struct kvm_ioapic *ioapic = to_ioapic(this);
656 	u32 data;
657 	if (!ioapic_in_range(ioapic, addr))
658 		return -EOPNOTSUPP;
659 
660 	switch (len) {
661 	case 8:
662 	case 4:
663 		data = *(u32 *) val;
664 		break;
665 	case 2:
666 		data = *(u16 *) val;
667 		break;
668 	case 1:
669 		data = *(u8  *) val;
670 		break;
671 	default:
672 		printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
673 		return 0;
674 	}
675 
676 	addr &= 0xff;
677 	spin_lock(&ioapic->lock);
678 	switch (addr) {
679 	case IOAPIC_REG_SELECT:
680 		ioapic->ioregsel = data & 0xFF; /* 8-bit register */
681 		break;
682 
683 	case IOAPIC_REG_WINDOW:
684 		ioapic_write_indirect(ioapic, data);
685 		break;
686 
687 	default:
688 		break;
689 	}
690 	spin_unlock(&ioapic->lock);
691 	return 0;
692 }
693 
694 static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
695 {
696 	int i;
697 
698 	cancel_delayed_work_sync(&ioapic->eoi_inject);
699 	for (i = 0; i < IOAPIC_NUM_PINS; i++)
700 		ioapic->redirtbl[i].fields.mask = 1;
701 	ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
702 	ioapic->ioregsel = 0;
703 	ioapic->irr = 0;
704 	ioapic->irr_delivered = 0;
705 	ioapic->id = 0;
706 	memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
707 	rtc_irq_eoi_tracking_reset(ioapic);
708 }
709 
710 static const struct kvm_io_device_ops ioapic_mmio_ops = {
711 	.read     = ioapic_mmio_read,
712 	.write    = ioapic_mmio_write,
713 };
714 
715 int kvm_ioapic_init(struct kvm *kvm)
716 {
717 	struct kvm_ioapic *ioapic;
718 	int ret;
719 
720 	ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL_ACCOUNT);
721 	if (!ioapic)
722 		return -ENOMEM;
723 	spin_lock_init(&ioapic->lock);
724 	INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work);
725 	INIT_HLIST_HEAD(&ioapic->mask_notifier_list);
726 	kvm->arch.vioapic = ioapic;
727 	kvm_ioapic_reset(ioapic);
728 	kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
729 	ioapic->kvm = kvm;
730 	mutex_lock(&kvm->slots_lock);
731 	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address,
732 				      IOAPIC_MEM_LENGTH, &ioapic->dev);
733 	mutex_unlock(&kvm->slots_lock);
734 	if (ret < 0) {
735 		kvm->arch.vioapic = NULL;
736 		kfree(ioapic);
737 	}
738 
739 	return ret;
740 }
741 
742 void kvm_ioapic_destroy(struct kvm *kvm)
743 {
744 	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
745 
746 	if (!ioapic)
747 		return;
748 
749 	cancel_delayed_work_sync(&ioapic->eoi_inject);
750 	mutex_lock(&kvm->slots_lock);
751 	kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
752 	mutex_unlock(&kvm->slots_lock);
753 	kvm->arch.vioapic = NULL;
754 	kfree(ioapic);
755 }
756 
757 void kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
758 {
759 	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
760 
761 	spin_lock(&ioapic->lock);
762 	memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
763 	state->irr &= ~ioapic->irr_delivered;
764 	spin_unlock(&ioapic->lock);
765 }
766 
767 void kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
768 {
769 	struct kvm_ioapic *ioapic = kvm->arch.vioapic;
770 
771 	spin_lock(&ioapic->lock);
772 	memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
773 	ioapic->irr = 0;
774 	ioapic->irr_delivered = 0;
775 	kvm_make_scan_ioapic_request(kvm);
776 	kvm_ioapic_inject_all(ioapic, state->irr);
777 	spin_unlock(&ioapic->lock);
778 }
779