1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * irq.c: API for in kernel interrupt controller
4 * Copyright (c) 2007, Intel Corporation.
5 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
6 *
7 * Authors:
8 * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
9 */
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/export.h>
13 #include <linux/kvm_host.h>
14 #include <linux/kvm_irqfd.h>
15
16 #include "hyperv.h"
17 #include "ioapic.h"
18 #include "irq.h"
19 #include "trace.h"
20 #include "x86.h"
21 #include "xen.h"
22
23 /*
24 * check if there are pending timer events
25 * to be processed.
26 */
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)27 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
28 {
29 int r = 0;
30
31 if (lapic_in_kernel(vcpu))
32 r = apic_has_pending_timer(vcpu);
33 if (kvm_xen_timer_enabled(vcpu))
34 r += kvm_xen_has_pending_timer(vcpu);
35
36 return r;
37 }
38
39 /*
40 * check if there is a pending userspace external interrupt
41 */
pending_userspace_extint(struct kvm_vcpu * v)42 static int pending_userspace_extint(struct kvm_vcpu *v)
43 {
44 return v->arch.pending_external_vector != -1;
45 }
46
get_userspace_extint(struct kvm_vcpu * vcpu)47 static int get_userspace_extint(struct kvm_vcpu *vcpu)
48 {
49 int vector = vcpu->arch.pending_external_vector;
50
51 vcpu->arch.pending_external_vector = -1;
52 return vector;
53 }
54
55 /*
56 * check if there is pending interrupt from
57 * non-APIC source without intack.
58 */
kvm_cpu_has_extint(struct kvm_vcpu * v)59 int kvm_cpu_has_extint(struct kvm_vcpu *v)
60 {
61 /*
62 * FIXME: interrupt.injected represents an interrupt whose
63 * side-effects have already been applied (e.g. bit from IRR
64 * already moved to ISR). Therefore, it is incorrect to rely
65 * on interrupt.injected to know if there is a pending
66 * interrupt in the user-mode LAPIC.
67 * This leads to nVMX/nSVM not be able to distinguish
68 * if it should exit from L2 to L1 on EXTERNAL_INTERRUPT on
69 * pending interrupt or should re-inject an injected
70 * interrupt.
71 */
72 if (!lapic_in_kernel(v))
73 return v->arch.interrupt.injected;
74
75 if (kvm_xen_has_interrupt(v))
76 return 1;
77
78 if (!kvm_apic_accept_pic_intr(v))
79 return 0;
80
81 #ifdef CONFIG_KVM_IOAPIC
82 if (pic_in_kernel(v->kvm))
83 return v->kvm->arch.vpic->output;
84 #endif
85
86 WARN_ON_ONCE(!irqchip_split(v->kvm));
87 return pending_userspace_extint(v);
88 }
89
90 /*
91 * check if there is injectable interrupt:
92 * when virtual interrupt delivery enabled,
93 * interrupt from apic will handled by hardware,
94 * we don't need to check it here.
95 */
kvm_cpu_has_injectable_intr(struct kvm_vcpu * v)96 int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
97 {
98 if (kvm_cpu_has_extint(v))
99 return 1;
100
101 if (!is_guest_mode(v) && kvm_vcpu_apicv_active(v))
102 return 0;
103
104 return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
105 }
106 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_cpu_has_injectable_intr);
107
108 /*
109 * check if there is pending interrupt without
110 * intack.
111 */
kvm_cpu_has_interrupt(struct kvm_vcpu * v)112 int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
113 {
114 if (kvm_cpu_has_extint(v))
115 return 1;
116
117 if (lapic_in_kernel(v) && v->arch.apic->guest_apic_protected)
118 return kvm_x86_call(protected_apic_has_interrupt)(v);
119
120 return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
121 }
122 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_cpu_has_interrupt);
123
124 /*
125 * Read pending interrupt(from non-APIC source)
126 * vector and intack.
127 */
kvm_cpu_get_extint(struct kvm_vcpu * v)128 int kvm_cpu_get_extint(struct kvm_vcpu *v)
129 {
130 if (!kvm_cpu_has_extint(v)) {
131 WARN_ON(!lapic_in_kernel(v));
132 return -1;
133 }
134
135 if (!lapic_in_kernel(v))
136 return v->arch.interrupt.nr;
137
138 #ifdef CONFIG_KVM_XEN
139 if (kvm_xen_has_interrupt(v))
140 return v->kvm->arch.xen.upcall_vector;
141 #endif
142
143 #ifdef CONFIG_KVM_IOAPIC
144 if (pic_in_kernel(v->kvm))
145 return kvm_pic_read_irq(v->kvm); /* PIC */
146 #endif
147
148 WARN_ON_ONCE(!irqchip_split(v->kvm));
149 return get_userspace_extint(v);
150 }
151 EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_cpu_get_extint);
152
153 /*
154 * Read pending interrupt vector and intack.
155 */
kvm_cpu_get_interrupt(struct kvm_vcpu * v)156 int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
157 {
158 int vector = kvm_cpu_get_extint(v);
159 if (vector != -1)
160 return vector; /* PIC */
161
162 vector = kvm_apic_has_interrupt(v); /* APIC */
163 if (vector != -1)
164 kvm_apic_ack_interrupt(v, vector);
165
166 return vector;
167 }
168
kvm_inject_pending_timer_irqs(struct kvm_vcpu * vcpu)169 void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
170 {
171 if (lapic_in_kernel(vcpu))
172 kvm_inject_apic_timer_irqs(vcpu);
173 if (kvm_xen_timer_enabled(vcpu))
174 kvm_xen_inject_timer_irqs(vcpu);
175 }
176
__kvm_migrate_timers(struct kvm_vcpu * vcpu)177 void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
178 {
179 __kvm_migrate_apic_timer(vcpu);
180 #ifdef CONFIG_KVM_IOAPIC
181 __kvm_migrate_pit_timer(vcpu);
182 #endif
183 kvm_x86_call(migrate_timers)(vcpu);
184 }
185
kvm_arch_irqfd_allowed(struct kvm * kvm,struct kvm_irqfd * args)186 bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
187 {
188 bool resample = args->flags & KVM_IRQFD_FLAG_RESAMPLE;
189
190 return resample ? irqchip_full(kvm) : irqchip_in_kernel(kvm);
191 }
192
kvm_arch_irqchip_in_kernel(struct kvm * kvm)193 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
194 {
195 return irqchip_in_kernel(kvm);
196 }
197
kvm_msi_to_lapic_irq(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * e,struct kvm_lapic_irq * irq)198 static void kvm_msi_to_lapic_irq(struct kvm *kvm,
199 struct kvm_kernel_irq_routing_entry *e,
200 struct kvm_lapic_irq *irq)
201 {
202 struct msi_msg msg = { .address_lo = e->msi.address_lo,
203 .address_hi = e->msi.address_hi,
204 .data = e->msi.data };
205
206 trace_kvm_msi_set_irq(msg.address_lo | (kvm->arch.x2apic_format ?
207 (u64)msg.address_hi << 32 : 0), msg.data);
208
209 irq->dest_id = x86_msi_msg_get_destid(&msg, kvm->arch.x2apic_format);
210 irq->vector = msg.arch_data.vector;
211 irq->dest_mode = kvm_lapic_irq_dest_mode(msg.arch_addr_lo.dest_mode_logical);
212 irq->trig_mode = msg.arch_data.is_level;
213 irq->delivery_mode = msg.arch_data.delivery_mode << 8;
214 irq->msi_redir_hint = msg.arch_addr_lo.redirect_hint;
215 irq->level = 1;
216 irq->shorthand = APIC_DEST_NOSHORT;
217 }
218
kvm_msi_route_invalid(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * e)219 static inline bool kvm_msi_route_invalid(struct kvm *kvm,
220 struct kvm_kernel_irq_routing_entry *e)
221 {
222 return kvm->arch.x2apic_format && (e->msi.address_hi & 0xff);
223 }
224
kvm_set_msi(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)225 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
226 struct kvm *kvm, int irq_source_id, int level, bool line_status)
227 {
228 struct kvm_lapic_irq irq;
229
230 if (kvm_msi_route_invalid(kvm, e))
231 return -EINVAL;
232
233 if (!level)
234 return -1;
235
236 kvm_msi_to_lapic_irq(kvm, e, &irq);
237
238 return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL);
239 }
240
kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry * e,struct kvm * kvm,int irq_source_id,int level,bool line_status)241 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
242 struct kvm *kvm, int irq_source_id, int level,
243 bool line_status)
244 {
245 struct kvm_lapic_irq irq;
246 int r;
247
248 switch (e->type) {
249 #ifdef CONFIG_KVM_HYPERV
250 case KVM_IRQ_ROUTING_HV_SINT:
251 return kvm_hv_synic_set_irq(e, kvm, irq_source_id, level,
252 line_status);
253 #endif
254
255 case KVM_IRQ_ROUTING_MSI:
256 if (kvm_msi_route_invalid(kvm, e))
257 return -EINVAL;
258
259 kvm_msi_to_lapic_irq(kvm, e, &irq);
260
261 if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
262 return r;
263 break;
264
265 #ifdef CONFIG_KVM_XEN
266 case KVM_IRQ_ROUTING_XEN_EVTCHN:
267 if (!level)
268 return -1;
269
270 return kvm_xen_set_evtchn_fast(&e->xen_evtchn, kvm);
271 #endif
272 default:
273 break;
274 }
275
276 return -EWOULDBLOCK;
277 }
278
kvm_vm_ioctl_irq_line(struct kvm * kvm,struct kvm_irq_level * irq_event,bool line_status)279 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
280 bool line_status)
281 {
282 if (!irqchip_in_kernel(kvm))
283 return -ENXIO;
284
285 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
286 irq_event->irq, irq_event->level,
287 line_status);
288 return 0;
289 }
290
kvm_arch_can_set_irq_routing(struct kvm * kvm)291 bool kvm_arch_can_set_irq_routing(struct kvm *kvm)
292 {
293 return irqchip_in_kernel(kvm);
294 }
295
kvm_set_routing_entry(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * e,const struct kvm_irq_routing_entry * ue)296 int kvm_set_routing_entry(struct kvm *kvm,
297 struct kvm_kernel_irq_routing_entry *e,
298 const struct kvm_irq_routing_entry *ue)
299 {
300 /* We can't check irqchip_in_kernel() here as some callers are
301 * currently initializing the irqchip. Other callers should therefore
302 * check kvm_arch_can_set_irq_routing() before calling this function.
303 */
304 switch (ue->type) {
305 #ifdef CONFIG_KVM_IOAPIC
306 case KVM_IRQ_ROUTING_IRQCHIP:
307 if (irqchip_split(kvm))
308 return -EINVAL;
309 e->irqchip.pin = ue->u.irqchip.pin;
310 switch (ue->u.irqchip.irqchip) {
311 case KVM_IRQCHIP_PIC_SLAVE:
312 e->irqchip.pin += PIC_NUM_PINS / 2;
313 fallthrough;
314 case KVM_IRQCHIP_PIC_MASTER:
315 if (ue->u.irqchip.pin >= PIC_NUM_PINS / 2)
316 return -EINVAL;
317 e->set = kvm_pic_set_irq;
318 break;
319 case KVM_IRQCHIP_IOAPIC:
320 if (ue->u.irqchip.pin >= KVM_IOAPIC_NUM_PINS)
321 return -EINVAL;
322 e->set = kvm_ioapic_set_irq;
323 break;
324 default:
325 return -EINVAL;
326 }
327 e->irqchip.irqchip = ue->u.irqchip.irqchip;
328 break;
329 #endif
330 case KVM_IRQ_ROUTING_MSI:
331 e->set = kvm_set_msi;
332 e->msi.address_lo = ue->u.msi.address_lo;
333 e->msi.address_hi = ue->u.msi.address_hi;
334 e->msi.data = ue->u.msi.data;
335
336 if (kvm_msi_route_invalid(kvm, e))
337 return -EINVAL;
338 break;
339 #ifdef CONFIG_KVM_HYPERV
340 case KVM_IRQ_ROUTING_HV_SINT:
341 e->set = kvm_hv_synic_set_irq;
342 e->hv_sint.vcpu = ue->u.hv_sint.vcpu;
343 e->hv_sint.sint = ue->u.hv_sint.sint;
344 break;
345 #endif
346 #ifdef CONFIG_KVM_XEN
347 case KVM_IRQ_ROUTING_XEN_EVTCHN:
348 return kvm_xen_setup_evtchn(kvm, e, ue);
349 #endif
350 default:
351 return -EINVAL;
352 }
353
354 return 0;
355 }
356
kvm_scan_ioapic_irq(struct kvm_vcpu * vcpu,u32 dest_id,u16 dest_mode,u8 vector,unsigned long * ioapic_handled_vectors)357 void kvm_scan_ioapic_irq(struct kvm_vcpu *vcpu, u32 dest_id, u16 dest_mode,
358 u8 vector, unsigned long *ioapic_handled_vectors)
359 {
360 /*
361 * Intercept EOI if the vCPU is the target of the new IRQ routing, or
362 * the vCPU has a pending IRQ from the old routing, i.e. if the vCPU
363 * may receive a level-triggered IRQ in the future, or already received
364 * level-triggered IRQ. The EOI needs to be intercepted and forwarded
365 * to I/O APIC emulation so that the IRQ can be de-asserted.
366 */
367 if (kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT, dest_id, dest_mode)) {
368 __set_bit(vector, ioapic_handled_vectors);
369 } else if (kvm_apic_pending_eoi(vcpu, vector)) {
370 __set_bit(vector, ioapic_handled_vectors);
371
372 /*
373 * Track the highest pending EOI for which the vCPU is NOT the
374 * target in the new routing. Only the EOI for the IRQ that is
375 * in-flight (for the old routing) needs to be intercepted, any
376 * future IRQs that arrive on this vCPU will be coincidental to
377 * the level-triggered routing and don't need to be intercepted.
378 */
379 if ((int)vector > vcpu->arch.highest_stale_pending_ioapic_eoi)
380 vcpu->arch.highest_stale_pending_ioapic_eoi = vector;
381 }
382 }
383
kvm_scan_ioapic_routes(struct kvm_vcpu * vcpu,ulong * ioapic_handled_vectors)384 void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
385 ulong *ioapic_handled_vectors)
386 {
387 struct kvm *kvm = vcpu->kvm;
388 struct kvm_kernel_irq_routing_entry *entry;
389 struct kvm_irq_routing_table *table;
390 u32 i, nr_ioapic_pins;
391 int idx;
392
393 idx = srcu_read_lock(&kvm->irq_srcu);
394 table = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
395 nr_ioapic_pins = min_t(u32, table->nr_rt_entries,
396 kvm->arch.nr_reserved_ioapic_pins);
397 for (i = 0; i < nr_ioapic_pins; ++i) {
398 hlist_for_each_entry(entry, &table->map[i], link) {
399 struct kvm_lapic_irq irq;
400
401 if (entry->type != KVM_IRQ_ROUTING_MSI)
402 continue;
403
404 kvm_msi_to_lapic_irq(vcpu->kvm, entry, &irq);
405
406 if (!irq.trig_mode)
407 continue;
408
409 kvm_scan_ioapic_irq(vcpu, irq.dest_id, irq.dest_mode,
410 irq.vector, ioapic_handled_vectors);
411 }
412 }
413 srcu_read_unlock(&kvm->irq_srcu, idx);
414 }
415
kvm_arch_irq_routing_update(struct kvm * kvm)416 void kvm_arch_irq_routing_update(struct kvm *kvm)
417 {
418 #ifdef CONFIG_KVM_HYPERV
419 kvm_hv_irq_routing_update(kvm);
420 #endif
421
422 if (irqchip_split(kvm))
423 kvm_make_scan_ioapic_request(kvm);
424 }
425
kvm_pi_update_irte(struct kvm_kernel_irqfd * irqfd,struct kvm_kernel_irq_routing_entry * entry)426 static int kvm_pi_update_irte(struct kvm_kernel_irqfd *irqfd,
427 struct kvm_kernel_irq_routing_entry *entry)
428 {
429 unsigned int host_irq = irqfd->producer->irq;
430 struct kvm *kvm = irqfd->kvm;
431 struct kvm_vcpu *vcpu = NULL;
432 struct kvm_lapic_irq irq;
433 int r;
434
435 if (WARN_ON_ONCE(!irqchip_in_kernel(kvm) || !kvm_arch_has_irq_bypass()))
436 return -EINVAL;
437
438 if (entry && entry->type == KVM_IRQ_ROUTING_MSI) {
439 kvm_msi_to_lapic_irq(kvm, entry, &irq);
440
441 /*
442 * Force remapped mode if hardware doesn't support posting the
443 * virtual interrupt to a vCPU. Only IRQs are postable (NMIs,
444 * SMIs, etc. are not), and neither AMD nor Intel IOMMUs support
445 * posting multicast/broadcast IRQs. If the interrupt can't be
446 * posted, the device MSI needs to be routed to the host so that
447 * the guest's desired interrupt can be synthesized by KVM.
448 *
449 * This means that KVM can only post lowest-priority interrupts
450 * if they have a single CPU as the destination, e.g. only if
451 * the guest has affined the interrupt to a single vCPU.
452 */
453 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
454 !kvm_irq_is_postable(&irq))
455 vcpu = NULL;
456 }
457
458 if (!irqfd->irq_bypass_vcpu && !vcpu)
459 return 0;
460
461 r = kvm_x86_call(pi_update_irte)(irqfd, irqfd->kvm, host_irq, irqfd->gsi,
462 vcpu, irq.vector);
463 if (r) {
464 WARN_ON_ONCE(irqfd->irq_bypass_vcpu && !vcpu);
465 irqfd->irq_bypass_vcpu = NULL;
466 return r;
467 }
468
469 irqfd->irq_bypass_vcpu = vcpu;
470
471 trace_kvm_pi_irte_update(host_irq, vcpu, irqfd->gsi, irq.vector, !!vcpu);
472 return 0;
473 }
474
kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)475 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
476 struct irq_bypass_producer *prod)
477 {
478 struct kvm_kernel_irqfd *irqfd =
479 container_of(cons, struct kvm_kernel_irqfd, consumer);
480 struct kvm *kvm = irqfd->kvm;
481 int ret = 0;
482
483 spin_lock_irq(&kvm->irqfds.lock);
484 irqfd->producer = prod;
485
486 if (!kvm->arch.nr_possible_bypass_irqs++)
487 kvm_x86_call(pi_start_bypass)(kvm);
488
489 if (irqfd->irq_entry.type == KVM_IRQ_ROUTING_MSI) {
490 ret = kvm_pi_update_irte(irqfd, &irqfd->irq_entry);
491 if (ret)
492 kvm->arch.nr_possible_bypass_irqs--;
493 }
494 spin_unlock_irq(&kvm->irqfds.lock);
495
496 return ret;
497 }
498
kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)499 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
500 struct irq_bypass_producer *prod)
501 {
502 struct kvm_kernel_irqfd *irqfd =
503 container_of(cons, struct kvm_kernel_irqfd, consumer);
504 struct kvm *kvm = irqfd->kvm;
505 int ret;
506
507 WARN_ON(irqfd->producer != prod);
508
509 /*
510 * If the producer of an IRQ that is currently being posted to a vCPU
511 * is unregistered, change the associated IRTE back to remapped mode as
512 * the IRQ has been released (or repurposed) by the device driver, i.e.
513 * KVM must relinquish control of the IRTE.
514 */
515 spin_lock_irq(&kvm->irqfds.lock);
516
517 if (irqfd->irq_entry.type == KVM_IRQ_ROUTING_MSI) {
518 ret = kvm_pi_update_irte(irqfd, NULL);
519 if (ret)
520 pr_info("irq bypass consumer (eventfd %p) unregistration fails: %d\n",
521 irqfd->consumer.eventfd, ret);
522 }
523 irqfd->producer = NULL;
524
525 kvm->arch.nr_possible_bypass_irqs--;
526
527 spin_unlock_irq(&kvm->irqfds.lock);
528 }
529
kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd * irqfd,struct kvm_kernel_irq_routing_entry * old,struct kvm_kernel_irq_routing_entry * new)530 void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd,
531 struct kvm_kernel_irq_routing_entry *old,
532 struct kvm_kernel_irq_routing_entry *new)
533 {
534 if (new->type != KVM_IRQ_ROUTING_MSI &&
535 old->type != KVM_IRQ_ROUTING_MSI)
536 return;
537
538 if (old->type == KVM_IRQ_ROUTING_MSI &&
539 new->type == KVM_IRQ_ROUTING_MSI &&
540 !memcmp(&old->msi, &new->msi, sizeof(new->msi)))
541 return;
542
543 kvm_pi_update_irte(irqfd, new);
544 }
545
546 #ifdef CONFIG_KVM_IOAPIC
547 #define IOAPIC_ROUTING_ENTRY(irq) \
548 { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
549 .u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } }
550 #define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)
551
552 #define PIC_ROUTING_ENTRY(irq) \
553 { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
554 .u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } }
555 #define ROUTING_ENTRY2(irq) \
556 IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
557
558 static const struct kvm_irq_routing_entry default_routing[] = {
559 ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
560 ROUTING_ENTRY2(2), ROUTING_ENTRY2(3),
561 ROUTING_ENTRY2(4), ROUTING_ENTRY2(5),
562 ROUTING_ENTRY2(6), ROUTING_ENTRY2(7),
563 ROUTING_ENTRY2(8), ROUTING_ENTRY2(9),
564 ROUTING_ENTRY2(10), ROUTING_ENTRY2(11),
565 ROUTING_ENTRY2(12), ROUTING_ENTRY2(13),
566 ROUTING_ENTRY2(14), ROUTING_ENTRY2(15),
567 ROUTING_ENTRY1(16), ROUTING_ENTRY1(17),
568 ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
569 ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
570 ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
571 };
572
kvm_setup_default_ioapic_and_pic_routing(struct kvm * kvm)573 int kvm_setup_default_ioapic_and_pic_routing(struct kvm *kvm)
574 {
575 return kvm_set_irq_routing(kvm, default_routing,
576 ARRAY_SIZE(default_routing), 0);
577 }
578
kvm_vm_ioctl_get_irqchip(struct kvm * kvm,struct kvm_irqchip * chip)579 int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
580 {
581 struct kvm_pic *pic = kvm->arch.vpic;
582 int r;
583
584 r = 0;
585 switch (chip->chip_id) {
586 case KVM_IRQCHIP_PIC_MASTER:
587 memcpy(&chip->chip.pic, &pic->pics[0],
588 sizeof(struct kvm_pic_state));
589 break;
590 case KVM_IRQCHIP_PIC_SLAVE:
591 memcpy(&chip->chip.pic, &pic->pics[1],
592 sizeof(struct kvm_pic_state));
593 break;
594 case KVM_IRQCHIP_IOAPIC:
595 kvm_get_ioapic(kvm, &chip->chip.ioapic);
596 break;
597 default:
598 r = -EINVAL;
599 break;
600 }
601 return r;
602 }
603
kvm_vm_ioctl_set_irqchip(struct kvm * kvm,struct kvm_irqchip * chip)604 int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
605 {
606 struct kvm_pic *pic = kvm->arch.vpic;
607 int r;
608
609 r = 0;
610 switch (chip->chip_id) {
611 case KVM_IRQCHIP_PIC_MASTER:
612 spin_lock(&pic->lock);
613 memcpy(&pic->pics[0], &chip->chip.pic,
614 sizeof(struct kvm_pic_state));
615 spin_unlock(&pic->lock);
616 break;
617 case KVM_IRQCHIP_PIC_SLAVE:
618 spin_lock(&pic->lock);
619 memcpy(&pic->pics[1], &chip->chip.pic,
620 sizeof(struct kvm_pic_state));
621 spin_unlock(&pic->lock);
622 break;
623 case KVM_IRQCHIP_IOAPIC:
624 kvm_set_ioapic(kvm, &chip->chip.ioapic);
625 break;
626 default:
627 r = -EINVAL;
628 break;
629 }
630 kvm_pic_update_irq(pic);
631 return r;
632 }
633 #endif
634