1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * irqchip.c: Common API for in kernel interrupt controllers
4 * Copyright (c) 2007, Intel Corporation.
5 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6 * Copyright (c) 2013, Alexander Graf <agraf@suse.de>
7 *
8 * This file is derived from virt/kvm/irq_comm.c.
9 *
10 * Authors:
11 * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
12 * Alexander Graf <agraf@suse.de>
13 */
14
15 #include <linux/kvm_host.h>
16 #include <linux/slab.h>
17 #include <linux/srcu.h>
18 #include <linux/export.h>
19 #include <trace/events/kvm.h>
20
kvm_irq_map_gsi(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * entries,int gsi)21 int kvm_irq_map_gsi(struct kvm *kvm,
22 struct kvm_kernel_irq_routing_entry *entries, int gsi)
23 {
24 struct kvm_irq_routing_table *irq_rt;
25 struct kvm_kernel_irq_routing_entry *e;
26 int n = 0;
27
28 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
29 lockdep_is_held(&kvm->irq_lock));
30 if (irq_rt && gsi < irq_rt->nr_rt_entries) {
31 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
32 entries[n] = *e;
33 ++n;
34 }
35 }
36
37 return n;
38 }
39
kvm_irq_map_chip_pin(struct kvm * kvm,unsigned irqchip,unsigned pin)40 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
41 {
42 struct kvm_irq_routing_table *irq_rt;
43
44 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
45 return irq_rt->chip[irqchip][pin];
46 }
47
kvm_send_userspace_msi(struct kvm * kvm,struct kvm_msi * msi)48 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
49 {
50 struct kvm_kernel_irq_routing_entry route;
51
52 if (!kvm_arch_irqchip_in_kernel(kvm) || (msi->flags & ~KVM_MSI_VALID_DEVID))
53 return -EINVAL;
54
55 route.msi.address_lo = msi->address_lo;
56 route.msi.address_hi = msi->address_hi;
57 route.msi.data = msi->data;
58 route.msi.flags = msi->flags;
59 route.msi.devid = msi->devid;
60
61 return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false);
62 }
63
64 /*
65 * Return value:
66 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
67 * = 0 Interrupt was coalesced (previous irq is still pending)
68 * > 0 Number of CPUs interrupt was delivered to
69 */
kvm_set_irq(struct kvm * kvm,int irq_source_id,u32 irq,int level,bool line_status)70 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
71 bool line_status)
72 {
73 struct kvm_kernel_irq_routing_entry irq_set[KVM_NR_IRQCHIPS];
74 int ret = -1, i, idx;
75
76 trace_kvm_set_irq(irq, level, irq_source_id);
77
78 /* Not possible to detect if the guest uses the PIC or the
79 * IOAPIC. So set the bit in both. The guest will ignore
80 * writes to the unused one.
81 */
82 idx = srcu_read_lock(&kvm->irq_srcu);
83 i = kvm_irq_map_gsi(kvm, irq_set, irq);
84 srcu_read_unlock(&kvm->irq_srcu, idx);
85
86 while (i--) {
87 int r;
88 r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level,
89 line_status);
90 if (r < 0)
91 continue;
92
93 ret = r + ((ret < 0) ? 0 : ret);
94 }
95
96 return ret;
97 }
98
free_irq_routing_table(struct kvm_irq_routing_table * rt)99 static void free_irq_routing_table(struct kvm_irq_routing_table *rt)
100 {
101 int i;
102
103 if (!rt)
104 return;
105
106 for (i = 0; i < rt->nr_rt_entries; ++i) {
107 struct kvm_kernel_irq_routing_entry *e;
108 struct hlist_node *n;
109
110 hlist_for_each_entry_safe(e, n, &rt->map[i], link) {
111 hlist_del(&e->link);
112 kfree(e);
113 }
114 }
115
116 kfree(rt);
117 }
118
kvm_free_irq_routing(struct kvm * kvm)119 void kvm_free_irq_routing(struct kvm *kvm)
120 {
121 /* Called only during vm destruction. Nobody can use the pointer
122 at this stage */
123 struct kvm_irq_routing_table *rt = rcu_access_pointer(kvm->irq_routing);
124 free_irq_routing_table(rt);
125 }
126
setup_routing_entry(struct kvm * kvm,struct kvm_irq_routing_table * rt,struct kvm_kernel_irq_routing_entry * e,const struct kvm_irq_routing_entry * ue)127 static int setup_routing_entry(struct kvm *kvm,
128 struct kvm_irq_routing_table *rt,
129 struct kvm_kernel_irq_routing_entry *e,
130 const struct kvm_irq_routing_entry *ue)
131 {
132 struct kvm_kernel_irq_routing_entry *ei;
133 int r;
134 u32 gsi = array_index_nospec(ue->gsi, KVM_MAX_IRQ_ROUTES);
135
136 /*
137 * Do not allow GSI to be mapped to the same irqchip more than once.
138 * Allow only one to one mapping between GSI and non-irqchip routing.
139 */
140 hlist_for_each_entry(ei, &rt->map[gsi], link)
141 if (ei->type != KVM_IRQ_ROUTING_IRQCHIP ||
142 ue->type != KVM_IRQ_ROUTING_IRQCHIP ||
143 ue->u.irqchip.irqchip == ei->irqchip.irqchip)
144 return -EINVAL;
145
146 e->gsi = gsi;
147 e->type = ue->type;
148 r = kvm_set_routing_entry(kvm, e, ue);
149 if (r)
150 return r;
151 if (e->type == KVM_IRQ_ROUTING_IRQCHIP)
152 rt->chip[e->irqchip.irqchip][e->irqchip.pin] = e->gsi;
153
154 hlist_add_head(&e->link, &rt->map[e->gsi]);
155
156 return 0;
157 }
158
kvm_arch_irq_routing_update(struct kvm * kvm)159 void __attribute__((weak)) kvm_arch_irq_routing_update(struct kvm *kvm)
160 {
161 }
162
kvm_arch_can_set_irq_routing(struct kvm * kvm)163 bool __weak kvm_arch_can_set_irq_routing(struct kvm *kvm)
164 {
165 return true;
166 }
167
kvm_set_irq_routing(struct kvm * kvm,const struct kvm_irq_routing_entry * ue,unsigned nr,unsigned flags)168 int kvm_set_irq_routing(struct kvm *kvm,
169 const struct kvm_irq_routing_entry *ue,
170 unsigned nr,
171 unsigned flags)
172 {
173 struct kvm_irq_routing_table *new, *old;
174 struct kvm_kernel_irq_routing_entry *e;
175 u32 i, j, nr_rt_entries = 0;
176 int r;
177
178 for (i = 0; i < nr; ++i) {
179 if (ue[i].gsi >= KVM_MAX_IRQ_ROUTES)
180 return -EINVAL;
181 nr_rt_entries = max(nr_rt_entries, ue[i].gsi);
182 }
183
184 nr_rt_entries += 1;
185
186 new = kzalloc(struct_size(new, map, nr_rt_entries), GFP_KERNEL_ACCOUNT);
187 if (!new)
188 return -ENOMEM;
189
190 new->nr_rt_entries = nr_rt_entries;
191 for (i = 0; i < KVM_NR_IRQCHIPS; i++)
192 for (j = 0; j < KVM_IRQCHIP_NUM_PINS; j++)
193 new->chip[i][j] = -1;
194
195 for (i = 0; i < nr; ++i) {
196 r = -ENOMEM;
197 e = kzalloc(sizeof(*e), GFP_KERNEL_ACCOUNT);
198 if (!e)
199 goto out;
200
201 r = -EINVAL;
202 switch (ue->type) {
203 case KVM_IRQ_ROUTING_MSI:
204 if (ue->flags & ~KVM_MSI_VALID_DEVID)
205 goto free_entry;
206 break;
207 default:
208 if (ue->flags)
209 goto free_entry;
210 break;
211 }
212 r = setup_routing_entry(kvm, new, e, ue);
213 if (r)
214 goto free_entry;
215 ++ue;
216 }
217
218 mutex_lock(&kvm->irq_lock);
219 old = rcu_dereference_protected(kvm->irq_routing, 1);
220 rcu_assign_pointer(kvm->irq_routing, new);
221 kvm_irq_routing_update(kvm);
222 kvm_arch_irq_routing_update(kvm);
223 mutex_unlock(&kvm->irq_lock);
224
225 kvm_arch_post_irq_routing_update(kvm);
226
227 synchronize_srcu_expedited(&kvm->irq_srcu);
228
229 new = old;
230 r = 0;
231 goto out;
232
233 free_entry:
234 kfree(e);
235 out:
236 free_irq_routing_table(new);
237
238 return r;
239 }
240
241 /*
242 * Allocate empty IRQ routing by default so that additional setup isn't needed
243 * when userspace-driven IRQ routing is activated, and so that kvm->irq_routing
244 * is guaranteed to be non-NULL.
245 */
kvm_init_irq_routing(struct kvm * kvm)246 int kvm_init_irq_routing(struct kvm *kvm)
247 {
248 struct kvm_irq_routing_table *new;
249 int chip_size;
250
251 new = kzalloc(struct_size(new, map, 1), GFP_KERNEL_ACCOUNT);
252 if (!new)
253 return -ENOMEM;
254
255 new->nr_rt_entries = 1;
256
257 chip_size = sizeof(int) * KVM_NR_IRQCHIPS * KVM_IRQCHIP_NUM_PINS;
258 memset(new->chip, -1, chip_size);
259
260 RCU_INIT_POINTER(kvm->irq_routing, new);
261
262 return 0;
263 }
264