1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (C) 2018 Alexandru Elisei <alexandru.elisei@gmail.com>
5 * Copyright (C) 2020-2022 Andrew Turner
6 * Copyright (C) 2023 Arm Ltd
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31
32 #include <sys/types.h>
33 #include <sys/errno.h>
34 #include <sys/systm.h>
35 #include <sys/bitstring.h>
36 #include <sys/bus.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/rman.h>
43 #include <sys/smp.h>
44
45 #include <vm/vm.h>
46 #include <vm/pmap.h>
47
48 #include <dev/ofw/openfirm.h>
49
50 #include <machine/atomic.h>
51 #include <machine/bus.h>
52 #include <machine/cpufunc.h>
53 #include <machine/cpu.h>
54 #include <machine/machdep.h>
55 #include <machine/param.h>
56 #include <machine/pmap.h>
57 #include <machine/vmparam.h>
58 #include <machine/intr.h>
59 #include <machine/vmm.h>
60 #include <machine/vmm_dev.h>
61 #include <machine/vmm_instruction_emul.h>
62
63 #include <arm/arm/gic_common.h>
64 #include <arm64/arm64/gic_v3_reg.h>
65 #include <arm64/arm64/gic_v3_var.h>
66
67 #include <arm64/vmm/hyp.h>
68 #include <arm64/vmm/mmu.h>
69 #include <arm64/vmm/arm64.h>
70 #include <arm64/vmm/vmm_handlers.h>
71
72 #include "vgic.h"
73 #include "vgic_v3.h"
74 #include "vgic_v3_reg.h"
75
76 #include "vgic_if.h"
77
78 #define VGIC_SGI_NUM (GIC_LAST_SGI - GIC_FIRST_SGI + 1)
79 #define VGIC_PPI_NUM (GIC_LAST_PPI - GIC_FIRST_PPI + 1)
80 #define VGIC_SPI_NUM (GIC_LAST_SPI - GIC_FIRST_SPI + 1)
81 #define VGIC_PRV_I_NUM (VGIC_SGI_NUM + VGIC_PPI_NUM)
82 #define VGIC_SHR_I_NUM (VGIC_SPI_NUM)
83
84 MALLOC_DEFINE(M_VGIC_V3, "ARM VMM VGIC V3", "ARM VMM VGIC V3");
85
86 /* TODO: Move to softc */
87 struct vgic_v3_virt_features {
88 uint8_t min_prio;
89 size_t ich_lr_num;
90 size_t ich_apr_num;
91 };
92
93 struct vgic_v3_irq {
94 /* List of IRQs that are active or pending */
95 TAILQ_ENTRY(vgic_v3_irq) act_pend_list;
96 struct mtx irq_spinmtx;
97 uint64_t mpidr;
98 int target_vcpu;
99 uint32_t irq;
100 bool active;
101 bool pending;
102 bool enabled;
103 bool level;
104 bool on_aplist;
105 uint8_t priority;
106 uint8_t config;
107 #define VGIC_CONFIG_MASK 0x2
108 #define VGIC_CONFIG_LEVEL 0x0
109 #define VGIC_CONFIG_EDGE 0x2
110 };
111
112 /* Global data not needed by EL2 */
113 struct vgic_v3 {
114 struct mtx dist_mtx;
115 uint64_t dist_start;
116 size_t dist_end;
117
118 uint64_t redist_start;
119 size_t redist_end;
120
121 uint32_t gicd_ctlr; /* Distributor Control Register */
122
123 struct vgic_v3_irq *irqs;
124 };
125
126 /* Per-CPU data not needed by EL2 */
127 struct vgic_v3_cpu {
128 /*
129 * We need a mutex for accessing the list registers because they are
130 * modified asynchronously by the virtual timer.
131 *
132 * Note that the mutex *MUST* be a spin mutex because an interrupt can
133 * be injected by a callout callback function, thereby modifying the
134 * list registers from a context where sleeping is forbidden.
135 */
136 struct mtx lr_mtx;
137
138 struct vgic_v3_irq private_irqs[VGIC_PRV_I_NUM];
139 TAILQ_HEAD(, vgic_v3_irq) irq_act_pend;
140 u_int ich_lr_used;
141 };
142
143 /* How many IRQs we support (SGIs + PPIs + SPIs). Not including LPIs */
144 #define VGIC_NIRQS 1023
145 /* Pretend to be an Arm design */
146 #define VGIC_IIDR 0x43b
147
148 static vgic_inject_irq_t vgic_v3_inject_irq;
149 static vgic_inject_msi_t vgic_v3_inject_msi;
150
151 static int vgic_v3_max_cpu_count(device_t dev, struct hyp *hyp);
152
153 #define INJECT_IRQ(hyp, vcpuid, irqid, level) \
154 vgic_v3_inject_irq(NULL, (hyp), (vcpuid), (irqid), (level))
155
156 typedef void (register_read)(struct hypctx *, u_int, uint64_t *, void *);
157 typedef void (register_write)(struct hypctx *, u_int, u_int, u_int,
158 uint64_t, void *);
159
160 #define VGIC_8_BIT (1 << 0)
161 /* (1 << 1) is reserved for 16 bit accesses */
162 #define VGIC_32_BIT (1 << 2)
163 #define VGIC_64_BIT (1 << 3)
164
165 struct vgic_register {
166 u_int start; /* Start within a memory region */
167 u_int end;
168 u_int size;
169 u_int flags;
170 register_read *read;
171 register_write *write;
172 };
173
174 #define VGIC_REGISTER_RANGE(reg_start, reg_end, reg_size, reg_flags, readf, \
175 writef) \
176 { \
177 .start = (reg_start), \
178 .end = (reg_end), \
179 .size = (reg_size), \
180 .flags = (reg_flags), \
181 .read = (readf), \
182 .write = (writef), \
183 }
184
185 #define VGIC_REGISTER_RANGE_RAZ_WI(reg_start, reg_end, reg_size, reg_flags) \
186 VGIC_REGISTER_RANGE(reg_start, reg_end, reg_size, reg_flags, \
187 gic_zero_read, gic_ignore_write)
188
189 #define VGIC_REGISTER(start_addr, reg_size, reg_flags, readf, writef) \
190 VGIC_REGISTER_RANGE(start_addr, (start_addr) + (reg_size), \
191 reg_size, reg_flags, readf, writef)
192
193 #define VGIC_REGISTER_RAZ_WI(start_addr, reg_size, reg_flags) \
194 VGIC_REGISTER_RANGE_RAZ_WI(start_addr, \
195 (start_addr) + (reg_size), reg_size, reg_flags)
196
197 static register_read gic_pidr2_read;
198 static register_read gic_zero_read;
199 static register_write gic_ignore_write;
200
201 /* GICD_CTLR */
202 static register_read dist_ctlr_read;
203 static register_write dist_ctlr_write;
204 /* GICD_TYPER */
205 static register_read dist_typer_read;
206 /* GICD_IIDR */
207 static register_read dist_iidr_read;
208 /* GICD_STATUSR - RAZ/WI as we don't report errors (yet) */
209 /* GICD_SETSPI_NSR & GICD_CLRSPI_NSR */
210 static register_write dist_setclrspi_nsr_write;
211 /* GICD_SETSPI_SR - RAZ/WI */
212 /* GICD_CLRSPI_SR - RAZ/WI */
213 /* GICD_IGROUPR - RAZ/WI as GICD_CTLR.ARE == 1 */
214 /* GICD_ISENABLER */
215 static register_read dist_isenabler_read;
216 static register_write dist_isenabler_write;
217 /* GICD_ICENABLER */
218 static register_read dist_icenabler_read;
219 static register_write dist_icenabler_write;
220 /* GICD_ISPENDR */
221 static register_read dist_ispendr_read;
222 static register_write dist_ispendr_write;
223 /* GICD_ICPENDR */
224 static register_read dist_icpendr_read;
225 static register_write dist_icpendr_write;
226 /* GICD_ISACTIVER */
227 static register_read dist_isactiver_read;
228 static register_write dist_isactiver_write;
229 /* GICD_ICACTIVER */
230 static register_read dist_icactiver_read;
231 static register_write dist_icactiver_write;
232 /* GICD_IPRIORITYR */
233 static register_read dist_ipriorityr_read;
234 static register_write dist_ipriorityr_write;
235 /* GICD_ITARGETSR - RAZ/WI as GICD_CTLR.ARE == 1 */
236 /* GICD_ICFGR */
237 static register_read dist_icfgr_read;
238 static register_write dist_icfgr_write;
239 /* GICD_IGRPMODR - RAZ/WI from non-secure mode */
240 /* GICD_NSACR - RAZ/WI from non-secure mode */
241 /* GICD_SGIR - RAZ/WI as GICD_CTLR.ARE == 1 */
242 /* GICD_CPENDSGIR - RAZ/WI as GICD_CTLR.ARE == 1 */
243 /* GICD_SPENDSGIR - RAZ/WI as GICD_CTLR.ARE == 1 */
244 /* GICD_IROUTER */
245 static register_read dist_irouter_read;
246 static register_write dist_irouter_write;
247
248 static struct vgic_register dist_registers[] = {
249 VGIC_REGISTER(GICD_CTLR, 4, VGIC_32_BIT, dist_ctlr_read,
250 dist_ctlr_write),
251 VGIC_REGISTER(GICD_TYPER, 4, VGIC_32_BIT, dist_typer_read,
252 gic_ignore_write),
253 VGIC_REGISTER(GICD_IIDR, 4, VGIC_32_BIT, dist_iidr_read,
254 gic_ignore_write),
255 VGIC_REGISTER_RAZ_WI(GICD_STATUSR, 4, VGIC_32_BIT),
256 VGIC_REGISTER(GICD_SETSPI_NSR, 4, VGIC_32_BIT, gic_zero_read,
257 dist_setclrspi_nsr_write),
258 VGIC_REGISTER(GICD_CLRSPI_NSR, 4, VGIC_32_BIT, gic_zero_read,
259 dist_setclrspi_nsr_write),
260 VGIC_REGISTER_RAZ_WI(GICD_SETSPI_SR, 4, VGIC_32_BIT),
261 VGIC_REGISTER_RAZ_WI(GICD_CLRSPI_SR, 4, VGIC_32_BIT),
262 VGIC_REGISTER_RANGE_RAZ_WI(GICD_IGROUPR(0), GICD_IGROUPR(1024), 4,
263 VGIC_32_BIT),
264
265 VGIC_REGISTER_RAZ_WI(GICD_ISENABLER(0), 4, VGIC_32_BIT),
266 VGIC_REGISTER_RANGE(GICD_ISENABLER(32), GICD_ISENABLER(1024), 4,
267 VGIC_32_BIT, dist_isenabler_read, dist_isenabler_write),
268
269 VGIC_REGISTER_RAZ_WI(GICD_ICENABLER(0), 4, VGIC_32_BIT),
270 VGIC_REGISTER_RANGE(GICD_ICENABLER(32), GICD_ICENABLER(1024), 4,
271 VGIC_32_BIT, dist_icenabler_read, dist_icenabler_write),
272
273 VGIC_REGISTER_RAZ_WI(GICD_ISPENDR(0), 4, VGIC_32_BIT),
274 VGIC_REGISTER_RANGE(GICD_ISPENDR(32), GICD_ISPENDR(1024), 4,
275 VGIC_32_BIT, dist_ispendr_read, dist_ispendr_write),
276
277 VGIC_REGISTER_RAZ_WI(GICD_ICPENDR(0), 4, VGIC_32_BIT),
278 VGIC_REGISTER_RANGE(GICD_ICPENDR(32), GICD_ICPENDR(1024), 4,
279 VGIC_32_BIT, dist_icpendr_read, dist_icpendr_write),
280
281 VGIC_REGISTER_RAZ_WI(GICD_ISACTIVER(0), 4, VGIC_32_BIT),
282 VGIC_REGISTER_RANGE(GICD_ISACTIVER(32), GICD_ISACTIVER(1024), 4,
283 VGIC_32_BIT, dist_isactiver_read, dist_isactiver_write),
284
285 VGIC_REGISTER_RAZ_WI(GICD_ICACTIVER(0), 4, VGIC_32_BIT),
286 VGIC_REGISTER_RANGE(GICD_ICACTIVER(32), GICD_ICACTIVER(1024), 4,
287 VGIC_32_BIT, dist_icactiver_read, dist_icactiver_write),
288
289 VGIC_REGISTER_RANGE_RAZ_WI(GICD_IPRIORITYR(0), GICD_IPRIORITYR(32), 4,
290 VGIC_32_BIT | VGIC_8_BIT),
291 VGIC_REGISTER_RANGE(GICD_IPRIORITYR(32), GICD_IPRIORITYR(1024), 4,
292 VGIC_32_BIT | VGIC_8_BIT, dist_ipriorityr_read,
293 dist_ipriorityr_write),
294
295 VGIC_REGISTER_RANGE_RAZ_WI(GICD_ITARGETSR(0), GICD_ITARGETSR(1024), 4,
296 VGIC_32_BIT | VGIC_8_BIT),
297
298 VGIC_REGISTER_RANGE_RAZ_WI(GICD_ICFGR(0), GICD_ICFGR(32), 4,
299 VGIC_32_BIT),
300 VGIC_REGISTER_RANGE(GICD_ICFGR(32), GICD_ICFGR(1024), 4,
301 VGIC_32_BIT, dist_icfgr_read, dist_icfgr_write),
302 /*
303 VGIC_REGISTER_RANGE(GICD_IGRPMODR(0), GICD_IGRPMODR(1024), 4,
304 VGIC_32_BIT, dist_igrpmodr_read, dist_igrpmodr_write),
305 VGIC_REGISTER_RANGE(GICD_NSACR(0), GICD_NSACR(1024), 4,
306 VGIC_32_BIT, dist_nsacr_read, dist_nsacr_write),
307 */
308 VGIC_REGISTER_RAZ_WI(GICD_SGIR, 4, VGIC_32_BIT),
309 /*
310 VGIC_REGISTER_RANGE(GICD_CPENDSGIR(0), GICD_CPENDSGIR(1024), 4,
311 VGIC_32_BIT | VGIC_8_BIT, dist_cpendsgir_read,
312 dist_cpendsgir_write),
313 VGIC_REGISTER_RANGE(GICD_SPENDSGIR(0), GICD_SPENDSGIR(1024), 4,
314 VGIC_32_BIT | VGIC_8_BIT, dist_spendsgir_read,
315 dist_spendsgir_write),
316 */
317 VGIC_REGISTER_RANGE(GICD_IROUTER(32), GICD_IROUTER(1024), 8,
318 VGIC_64_BIT | VGIC_32_BIT, dist_irouter_read, dist_irouter_write),
319
320 VGIC_REGISTER_RANGE_RAZ_WI(GICD_PIDR4, GICD_PIDR2, 4, VGIC_32_BIT),
321 VGIC_REGISTER(GICD_PIDR2, 4, VGIC_32_BIT, gic_pidr2_read,
322 gic_ignore_write),
323 VGIC_REGISTER_RANGE_RAZ_WI(GICD_PIDR2 + 4, GICD_SIZE, 4, VGIC_32_BIT),
324 };
325
326 /* GICR_CTLR - Ignore writes as no bits can be set */
327 static register_read redist_ctlr_read;
328 /* GICR_IIDR */
329 static register_read redist_iidr_read;
330 /* GICR_TYPER */
331 static register_read redist_typer_read;
332 /* GICR_STATUSR - RAZ/WI as we don't report errors (yet) */
333 /* GICR_WAKER - RAZ/WI from non-secure mode */
334 /* GICR_SETLPIR - RAZ/WI as no LPIs are supported */
335 /* GICR_CLRLPIR - RAZ/WI as no LPIs are supported */
336 /* GICR_PROPBASER - RAZ/WI as no LPIs are supported */
337 /* GICR_PENDBASER - RAZ/WI as no LPIs are supported */
338 /* GICR_INVLPIR - RAZ/WI as no LPIs are supported */
339 /* GICR_INVALLR - RAZ/WI as no LPIs are supported */
340 /* GICR_SYNCR - RAZ/WI as no LPIs are supported */
341
342 static struct vgic_register redist_rd_registers[] = {
343 VGIC_REGISTER(GICR_CTLR, 4, VGIC_32_BIT, redist_ctlr_read,
344 gic_ignore_write),
345 VGIC_REGISTER(GICR_IIDR, 4, VGIC_32_BIT, redist_iidr_read,
346 gic_ignore_write),
347 VGIC_REGISTER(GICR_TYPER, 8, VGIC_64_BIT | VGIC_32_BIT,
348 redist_typer_read, gic_ignore_write),
349 VGIC_REGISTER_RAZ_WI(GICR_STATUSR, 4, VGIC_32_BIT),
350 VGIC_REGISTER_RAZ_WI(GICR_WAKER, 4, VGIC_32_BIT),
351 VGIC_REGISTER_RAZ_WI(GICR_SETLPIR, 8, VGIC_64_BIT | VGIC_32_BIT),
352 VGIC_REGISTER_RAZ_WI(GICR_CLRLPIR, 8, VGIC_64_BIT | VGIC_32_BIT),
353 VGIC_REGISTER_RAZ_WI(GICR_PROPBASER, 8, VGIC_64_BIT | VGIC_32_BIT),
354 VGIC_REGISTER_RAZ_WI(GICR_PENDBASER, 8, VGIC_64_BIT | VGIC_32_BIT),
355 VGIC_REGISTER_RAZ_WI(GICR_INVLPIR, 8, VGIC_64_BIT | VGIC_32_BIT),
356 VGIC_REGISTER_RAZ_WI(GICR_INVALLR, 8, VGIC_64_BIT | VGIC_32_BIT),
357 VGIC_REGISTER_RAZ_WI(GICR_SYNCR, 4, VGIC_32_BIT),
358
359 /* These are identical to the dist registers */
360 VGIC_REGISTER_RANGE_RAZ_WI(GICD_PIDR4, GICD_PIDR2, 4, VGIC_32_BIT),
361 VGIC_REGISTER(GICD_PIDR2, 4, VGIC_32_BIT, gic_pidr2_read,
362 gic_ignore_write),
363 VGIC_REGISTER_RANGE_RAZ_WI(GICD_PIDR2 + 4, GICD_SIZE, 4,
364 VGIC_32_BIT),
365 };
366
367 /* GICR_IGROUPR0 - RAZ/WI from non-secure mode */
368 /* GICR_ISENABLER0 */
369 static register_read redist_ienabler0_read;
370 static register_write redist_isenabler0_write;
371 /* GICR_ICENABLER0 */
372 static register_write redist_icenabler0_write;
373 /* GICR_ISPENDR0 */
374 static register_read redist_ipendr0_read;
375 static register_write redist_ispendr0_write;
376 /* GICR_ICPENDR0 */
377 static register_write redist_icpendr0_write;
378 /* GICR_ISACTIVER0 */
379 static register_read redist_iactiver0_read;
380 static register_write redist_isactiver0_write;
381 /* GICR_ICACTIVER0 */
382 static register_write redist_icactiver0_write;
383 /* GICR_IPRIORITYR */
384 static register_read redist_ipriorityr_read;
385 static register_write redist_ipriorityr_write;
386 /* GICR_ICFGR0 - RAZ/WI from non-secure mode */
387 /* GICR_ICFGR1 */
388 static register_read redist_icfgr1_read;
389 static register_write redist_icfgr1_write;
390 /* GICR_IGRPMODR0 - RAZ/WI from non-secure mode */
391 /* GICR_NSCAR - RAZ/WI from non-secure mode */
392
393 static struct vgic_register redist_sgi_registers[] = {
394 VGIC_REGISTER_RAZ_WI(GICR_IGROUPR0, 4, VGIC_32_BIT),
395 VGIC_REGISTER(GICR_ISENABLER0, 4, VGIC_32_BIT, redist_ienabler0_read,
396 redist_isenabler0_write),
397 VGIC_REGISTER(GICR_ICENABLER0, 4, VGIC_32_BIT, redist_ienabler0_read,
398 redist_icenabler0_write),
399 VGIC_REGISTER(GICR_ISPENDR0, 4, VGIC_32_BIT, redist_ipendr0_read,
400 redist_ispendr0_write),
401 VGIC_REGISTER(GICR_ICPENDR0, 4, VGIC_32_BIT, redist_ipendr0_read,
402 redist_icpendr0_write),
403 VGIC_REGISTER(GICR_ISACTIVER0, 4, VGIC_32_BIT, redist_iactiver0_read,
404 redist_isactiver0_write),
405 VGIC_REGISTER(GICR_ICACTIVER0, 4, VGIC_32_BIT, redist_iactiver0_read,
406 redist_icactiver0_write),
407 VGIC_REGISTER_RANGE(GICR_IPRIORITYR(0), GICR_IPRIORITYR(32), 4,
408 VGIC_32_BIT | VGIC_8_BIT, redist_ipriorityr_read,
409 redist_ipriorityr_write),
410 VGIC_REGISTER_RAZ_WI(GICR_ICFGR0, 4, VGIC_32_BIT),
411 VGIC_REGISTER(GICR_ICFGR1, 4, VGIC_32_BIT, redist_icfgr1_read,
412 redist_icfgr1_write),
413 VGIC_REGISTER_RAZ_WI(GICR_IGRPMODR0, 4, VGIC_32_BIT),
414 VGIC_REGISTER_RAZ_WI(GICR_NSACR, 4, VGIC_32_BIT),
415 };
416
417 static struct vgic_v3_virt_features virt_features;
418
419 static struct vgic_v3_irq *vgic_v3_get_irq(struct hyp *, int, uint32_t);
420 static void vgic_v3_release_irq(struct vgic_v3_irq *);
421
422 /* TODO: Move to a common file */
423 static int
mpidr_to_vcpu(struct hyp * hyp,uint64_t mpidr)424 mpidr_to_vcpu(struct hyp *hyp, uint64_t mpidr)
425 {
426 struct vm *vm;
427 struct hypctx *hypctx;
428
429 vm = hyp->vm;
430 for (int i = 0; i < vm_get_maxcpus(vm); i++) {
431 hypctx = hyp->ctx[i];
432 if (hypctx != NULL && (hypctx->vmpidr_el2 & GICD_AFF) == mpidr)
433 return (i);
434 }
435 return (-1);
436 }
437
438 static void
vgic_v3_vminit(device_t dev,struct hyp * hyp)439 vgic_v3_vminit(device_t dev, struct hyp *hyp)
440 {
441 struct vgic_v3 *vgic;
442
443 hyp->vgic = malloc(sizeof(*hyp->vgic), M_VGIC_V3,
444 M_WAITOK | M_ZERO);
445 vgic = hyp->vgic;
446
447 /*
448 * Configure the Distributor control register. The register resets to an
449 * architecturally UNKNOWN value, so we reset to 0 to disable all
450 * functionality controlled by the register.
451 *
452 * The exception is GICD_CTLR.DS, which is RA0/WI when the Distributor
453 * supports one security state (ARM GIC Architecture Specification for
454 * GICv3 and GICv4, p. 4-464)
455 */
456 vgic->gicd_ctlr = 0;
457
458 mtx_init(&vgic->dist_mtx, "VGICv3 Distributor lock", NULL,
459 MTX_SPIN);
460 }
461
462 static void
vgic_v3_cpuinit(device_t dev,struct hypctx * hypctx)463 vgic_v3_cpuinit(device_t dev, struct hypctx *hypctx)
464 {
465 struct vgic_v3_cpu *vgic_cpu;
466 struct vgic_v3_irq *irq;
467 int i, irqid;
468
469 hypctx->vgic_cpu = malloc(sizeof(*hypctx->vgic_cpu),
470 M_VGIC_V3, M_WAITOK | M_ZERO);
471 vgic_cpu = hypctx->vgic_cpu;
472
473 mtx_init(&vgic_cpu->lr_mtx, "VGICv3 ICH_LR_EL2 lock", NULL, MTX_SPIN);
474
475 /* Set the SGI and PPI state */
476 for (irqid = 0; irqid < VGIC_PRV_I_NUM; irqid++) {
477 irq = &vgic_cpu->private_irqs[irqid];
478
479 mtx_init(&irq->irq_spinmtx, "VGIC IRQ spinlock", NULL,
480 MTX_SPIN);
481 irq->irq = irqid;
482 irq->mpidr = hypctx->vmpidr_el2 & GICD_AFF;
483 irq->target_vcpu = vcpu_vcpuid(hypctx->vcpu);
484 MPASS(irq->target_vcpu >= 0);
485
486 if (irqid < VGIC_SGI_NUM) {
487 /* SGIs */
488 irq->enabled = true;
489 irq->config = VGIC_CONFIG_EDGE;
490 } else {
491 /* PPIs */
492 irq->config = VGIC_CONFIG_LEVEL;
493 }
494 irq->priority = 0;
495 }
496
497 /*
498 * Configure the Interrupt Controller Hyp Control Register.
499 *
500 * ICH_HCR_EL2_En: enable virtual CPU interface.
501 *
502 * Maintenance interrupts are disabled.
503 */
504 hypctx->vgic_v3_regs.ich_hcr_el2 = ICH_HCR_EL2_En;
505
506 /*
507 * Configure the Interrupt Controller Virtual Machine Control Register.
508 *
509 * ICH_VMCR_EL2_VPMR: lowest priority mask for the VCPU interface
510 * ICH_VMCR_EL2_VBPR1_NO_PREEMPTION: disable interrupt preemption for
511 * Group 1 interrupts
512 * ICH_VMCR_EL2_VBPR0_NO_PREEMPTION: disable interrupt preemption for
513 * Group 0 interrupts
514 * ~ICH_VMCR_EL2_VEOIM: writes to EOI registers perform priority drop
515 * and interrupt deactivation.
516 * ICH_VMCR_EL2_VENG0: virtual Group 0 interrupts enabled.
517 * ICH_VMCR_EL2_VENG1: virtual Group 1 interrupts enabled.
518 */
519 hypctx->vgic_v3_regs.ich_vmcr_el2 =
520 (virt_features.min_prio << ICH_VMCR_EL2_VPMR_SHIFT) |
521 ICH_VMCR_EL2_VBPR1_NO_PREEMPTION | ICH_VMCR_EL2_VBPR0_NO_PREEMPTION;
522 hypctx->vgic_v3_regs.ich_vmcr_el2 &= ~ICH_VMCR_EL2_VEOIM;
523 hypctx->vgic_v3_regs.ich_vmcr_el2 |= ICH_VMCR_EL2_VENG0 |
524 ICH_VMCR_EL2_VENG1;
525
526 hypctx->vgic_v3_regs.ich_lr_num = virt_features.ich_lr_num;
527 for (i = 0; i < hypctx->vgic_v3_regs.ich_lr_num; i++)
528 hypctx->vgic_v3_regs.ich_lr_el2[i] = 0UL;
529 vgic_cpu->ich_lr_used = 0;
530 TAILQ_INIT(&vgic_cpu->irq_act_pend);
531
532 hypctx->vgic_v3_regs.ich_apr_num = virt_features.ich_apr_num;
533 }
534
535 static void
vgic_v3_cpucleanup(device_t dev,struct hypctx * hypctx)536 vgic_v3_cpucleanup(device_t dev, struct hypctx *hypctx)
537 {
538 struct vgic_v3_cpu *vgic_cpu;
539 struct vgic_v3_irq *irq;
540 int irqid;
541
542 vgic_cpu = hypctx->vgic_cpu;
543 for (irqid = 0; irqid < VGIC_PRV_I_NUM; irqid++) {
544 irq = &vgic_cpu->private_irqs[irqid];
545 mtx_destroy(&irq->irq_spinmtx);
546 }
547
548 mtx_destroy(&vgic_cpu->lr_mtx);
549 free(hypctx->vgic_cpu, M_VGIC_V3);
550 }
551
552 static void
vgic_v3_vmcleanup(device_t dev,struct hyp * hyp)553 vgic_v3_vmcleanup(device_t dev, struct hyp *hyp)
554 {
555 mtx_destroy(&hyp->vgic->dist_mtx);
556 free(hyp->vgic, M_VGIC_V3);
557 }
558
559 static int
vgic_v3_max_cpu_count(device_t dev,struct hyp * hyp)560 vgic_v3_max_cpu_count(device_t dev, struct hyp *hyp)
561 {
562 struct vgic_v3 *vgic;
563 size_t count;
564 int16_t max_count;
565
566 vgic = hyp->vgic;
567 max_count = vm_get_maxcpus(hyp->vm);
568
569 /* No registers, assume the maximum CPUs */
570 if (vgic->redist_start == 0 && vgic->redist_end == 0)
571 return (max_count);
572
573 count = (vgic->redist_end - vgic->redist_start) /
574 (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
575
576 /*
577 * max_count is smaller than INT_MAX so will also limit count
578 * to a positive integer value.
579 */
580 if (count > max_count)
581 return (max_count);
582
583 return (count);
584 }
585
586 static bool
vgic_v3_irq_pending(struct vgic_v3_irq * irq)587 vgic_v3_irq_pending(struct vgic_v3_irq *irq)
588 {
589 if ((irq->config & VGIC_CONFIG_MASK) == VGIC_CONFIG_LEVEL) {
590 return (irq->pending || irq->level);
591 } else {
592 return (irq->pending);
593 }
594 }
595
596 static bool
vgic_v3_queue_irq(struct hyp * hyp,struct vgic_v3_cpu * vgic_cpu,int vcpuid,struct vgic_v3_irq * irq)597 vgic_v3_queue_irq(struct hyp *hyp, struct vgic_v3_cpu *vgic_cpu,
598 int vcpuid, struct vgic_v3_irq *irq)
599 {
600 MPASS(vcpuid >= 0);
601 MPASS(vcpuid < vm_get_maxcpus(hyp->vm));
602
603 mtx_assert(&vgic_cpu->lr_mtx, MA_OWNED);
604 mtx_assert(&irq->irq_spinmtx, MA_OWNED);
605
606 /* No need to queue the IRQ */
607 if (!irq->level && !irq->pending)
608 return (false);
609
610 if (!irq->on_aplist) {
611 irq->on_aplist = true;
612 TAILQ_INSERT_TAIL(&vgic_cpu->irq_act_pend, irq, act_pend_list);
613 }
614 return (true);
615 }
616
617 static uint64_t
gic_reg_value_64(uint64_t field,uint64_t val,u_int offset,u_int size)618 gic_reg_value_64(uint64_t field, uint64_t val, u_int offset, u_int size)
619 {
620 uint32_t mask;
621
622 if (offset != 0 || size != 8) {
623 mask = ((1ul << (size * 8)) - 1) << (offset * 8);
624 /* Shift the new bits to the correct place */
625 val <<= (offset * 8);
626 /* Keep only the interesting bits */
627 val &= mask;
628 /* Add the bits we are keeping from the old value */
629 val |= field & ~mask;
630 }
631
632 return (val);
633 }
634
635 static void
gic_pidr2_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)636 gic_pidr2_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
637 void *arg)
638 {
639 *rval = GICR_PIDR2_ARCH_GICv3 << GICR_PIDR2_ARCH_SHIFT;
640 }
641
642 /* Common read-only/write-ignored helpers */
643 static void
gic_zero_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)644 gic_zero_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
645 void *arg)
646 {
647 *rval = 0;
648 }
649
650 static void
gic_ignore_write(struct hypctx * hypctx,u_int reg,u_int offset,u_int size,uint64_t wval,void * arg)651 gic_ignore_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
652 uint64_t wval, void *arg)
653 {
654 /* Nothing to do */
655 }
656
657 static uint64_t
read_enabler(struct hypctx * hypctx,int n)658 read_enabler(struct hypctx *hypctx, int n)
659 {
660 struct vgic_v3_irq *irq;
661 uint64_t ret;
662 uint32_t irq_base;
663 int i;
664
665 ret = 0;
666 irq_base = n * 32;
667 for (i = 0; i < 32; i++) {
668 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
669 irq_base + i);
670 if (irq == NULL)
671 continue;
672
673 if (!irq->enabled)
674 ret |= 1u << i;
675 vgic_v3_release_irq(irq);
676 }
677
678 return (ret);
679 }
680
681 static void
write_enabler(struct hypctx * hypctx,int n,bool set,uint64_t val)682 write_enabler(struct hypctx *hypctx,int n, bool set, uint64_t val)
683 {
684 struct vgic_v3_irq *irq;
685 uint32_t irq_base;
686 int i;
687
688 irq_base = n * 32;
689 for (i = 0; i < 32; i++) {
690 /* We only change interrupts when the appropriate bit is set */
691 if ((val & (1u << i)) == 0)
692 continue;
693
694 /* Find the interrupt this bit represents */
695 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
696 irq_base + i);
697 if (irq == NULL)
698 continue;
699
700 irq->enabled = set;
701 vgic_v3_release_irq(irq);
702 }
703 }
704
705 static uint64_t
read_pendr(struct hypctx * hypctx,int n)706 read_pendr(struct hypctx *hypctx, int n)
707 {
708 struct vgic_v3_irq *irq;
709 uint64_t ret;
710 uint32_t irq_base;
711 int i;
712
713 ret = 0;
714 irq_base = n * 32;
715 for (i = 0; i < 32; i++) {
716 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
717 irq_base + i);
718 if (irq == NULL)
719 continue;
720
721 if (vgic_v3_irq_pending(irq))
722 ret |= 1u << i;
723 vgic_v3_release_irq(irq);
724 }
725
726 return (ret);
727 }
728
729 static uint64_t
write_pendr(struct hypctx * hypctx,int n,bool set,uint64_t val)730 write_pendr(struct hypctx *hypctx, int n, bool set, uint64_t val)
731 {
732 struct vgic_v3_cpu *vgic_cpu;
733 struct vgic_v3_irq *irq;
734 struct hyp *hyp;
735 struct hypctx *target_hypctx;
736 uint64_t ret;
737 uint32_t irq_base;
738 int target_vcpu, i;
739 bool notify;
740
741 hyp = hypctx->hyp;
742 ret = 0;
743 irq_base = n * 32;
744 for (i = 0; i < 32; i++) {
745 /* We only change interrupts when the appropriate bit is set */
746 if ((val & (1u << i)) == 0)
747 continue;
748
749 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
750 irq_base + i);
751 if (irq == NULL)
752 continue;
753
754 notify = false;
755 target_vcpu = irq->target_vcpu;
756 if (target_vcpu < 0)
757 goto next_irq;
758 target_hypctx = hyp->ctx[target_vcpu];
759 if (target_hypctx == NULL)
760 goto next_irq;
761 vgic_cpu = target_hypctx->vgic_cpu;
762
763 if (!set) {
764 /* pending -> not pending */
765 irq->pending = false;
766 } else {
767 irq->pending = true;
768 mtx_lock_spin(&vgic_cpu->lr_mtx);
769 notify = vgic_v3_queue_irq(hyp, vgic_cpu, target_vcpu,
770 irq);
771 mtx_unlock_spin(&vgic_cpu->lr_mtx);
772 }
773 next_irq:
774 vgic_v3_release_irq(irq);
775
776 if (notify)
777 vcpu_notify_event(vm_vcpu(hyp->vm, target_vcpu));
778 }
779
780 return (ret);
781 }
782
783 static uint64_t
read_activer(struct hypctx * hypctx,int n)784 read_activer(struct hypctx *hypctx, int n)
785 {
786 struct vgic_v3_irq *irq;
787 uint64_t ret;
788 uint32_t irq_base;
789 int i;
790
791 ret = 0;
792 irq_base = n * 32;
793 for (i = 0; i < 32; i++) {
794 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
795 irq_base + i);
796 if (irq == NULL)
797 continue;
798
799 if (irq->active)
800 ret |= 1u << i;
801 vgic_v3_release_irq(irq);
802 }
803
804 return (ret);
805 }
806
807 static void
write_activer(struct hypctx * hypctx,u_int n,bool set,uint64_t val)808 write_activer(struct hypctx *hypctx, u_int n, bool set, uint64_t val)
809 {
810 struct vgic_v3_cpu *vgic_cpu;
811 struct vgic_v3_irq *irq;
812 struct hyp *hyp;
813 struct hypctx *target_hypctx;
814 uint32_t irq_base;
815 int target_vcpu, i;
816 bool notify;
817
818 hyp = hypctx->hyp;
819 irq_base = n * 32;
820 for (i = 0; i < 32; i++) {
821 /* We only change interrupts when the appropriate bit is set */
822 if ((val & (1u << i)) == 0)
823 continue;
824
825 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
826 irq_base + i);
827 if (irq == NULL)
828 continue;
829
830 notify = false;
831 target_vcpu = irq->target_vcpu;
832 if (target_vcpu < 0)
833 goto next_irq;
834 target_hypctx = hyp->ctx[target_vcpu];
835 if (target_hypctx == NULL)
836 goto next_irq;
837 vgic_cpu = target_hypctx->vgic_cpu;
838
839 if (!set) {
840 /* active -> not active */
841 irq->active = false;
842 } else {
843 /* not active -> active */
844 irq->active = true;
845 mtx_lock_spin(&vgic_cpu->lr_mtx);
846 notify = vgic_v3_queue_irq(hyp, vgic_cpu, target_vcpu,
847 irq);
848 mtx_unlock_spin(&vgic_cpu->lr_mtx);
849 }
850 next_irq:
851 vgic_v3_release_irq(irq);
852
853 if (notify)
854 vcpu_notify_event(vm_vcpu(hyp->vm, target_vcpu));
855 }
856 }
857
858 static uint64_t
read_priorityr(struct hypctx * hypctx,int n)859 read_priorityr(struct hypctx *hypctx, int n)
860 {
861 struct vgic_v3_irq *irq;
862 uint64_t ret;
863 uint32_t irq_base;
864 int i;
865
866 ret = 0;
867 irq_base = n * 4;
868 for (i = 0; i < 4; i++) {
869 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
870 irq_base + i);
871 if (irq == NULL)
872 continue;
873
874 ret |= ((uint64_t)irq->priority) << (i * 8);
875 vgic_v3_release_irq(irq);
876 }
877
878 return (ret);
879 }
880
881 static void
write_priorityr(struct hypctx * hypctx,u_int irq_base,u_int size,uint64_t val)882 write_priorityr(struct hypctx *hypctx, u_int irq_base, u_int size, uint64_t val)
883 {
884 struct vgic_v3_irq *irq;
885 int i;
886
887 for (i = 0; i < size; i++) {
888 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
889 irq_base + i);
890 if (irq == NULL)
891 continue;
892
893 /* Set the priority. We support 32 priority steps (5 bits) */
894 irq->priority = (val >> (i * 8)) & 0xf8;
895 vgic_v3_release_irq(irq);
896 }
897 }
898
899 static uint64_t
read_config(struct hypctx * hypctx,int n)900 read_config(struct hypctx *hypctx, int n)
901 {
902 struct vgic_v3_irq *irq;
903 uint64_t ret;
904 uint32_t irq_base;
905 int i;
906
907 ret = 0;
908 irq_base = n * 16;
909 for (i = 0; i < 16; i++) {
910 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
911 irq_base + i);
912 if (irq == NULL)
913 continue;
914
915 ret |= ((uint64_t)irq->config) << (i * 2);
916 vgic_v3_release_irq(irq);
917 }
918
919 return (ret);
920 }
921
922 static void
write_config(struct hypctx * hypctx,int n,uint64_t val)923 write_config(struct hypctx *hypctx, int n, uint64_t val)
924 {
925 struct vgic_v3_irq *irq;
926 uint32_t irq_base;
927 int i;
928
929 irq_base = n * 16;
930 for (i = 0; i < 16; i++) {
931 /*
932 * The config can't be changed for SGIs and PPIs. SGIs have
933 * an edge-triggered behaviour, and the register is
934 * implementation defined to be read-only for PPIs.
935 */
936 if (irq_base + i < VGIC_PRV_I_NUM)
937 continue;
938
939 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
940 irq_base + i);
941 if (irq == NULL)
942 continue;
943
944 /* Bit 0 is RES0 */
945 irq->config = (val >> (i * 2)) & VGIC_CONFIG_MASK;
946 vgic_v3_release_irq(irq);
947 }
948 }
949
950 static uint64_t
read_route(struct hypctx * hypctx,int n)951 read_route(struct hypctx *hypctx, int n)
952 {
953 struct vgic_v3_irq *irq;
954 uint64_t mpidr;
955
956 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), n);
957 if (irq == NULL)
958 return (0);
959
960 mpidr = irq->mpidr;
961 vgic_v3_release_irq(irq);
962
963 return (mpidr);
964 }
965
966 static void
write_route(struct hypctx * hypctx,int n,uint64_t val,u_int offset,u_int size)967 write_route(struct hypctx *hypctx, int n, uint64_t val, u_int offset,
968 u_int size)
969 {
970 struct vgic_v3_irq *irq;
971
972 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), n);
973 if (irq == NULL)
974 return;
975
976 irq->mpidr = gic_reg_value_64(irq->mpidr, val, offset, size) & GICD_AFF;
977 irq->target_vcpu = mpidr_to_vcpu(hypctx->hyp, irq->mpidr);
978 /*
979 * If the interrupt is pending we can either use the old mpidr, or
980 * the new mpidr. To simplify this code we use the old value so we
981 * don't need to move the interrupt until the next time it is
982 * moved to the pending state.
983 */
984 vgic_v3_release_irq(irq);
985 }
986
987 /*
988 * Distributor register handlers.
989 */
990 /* GICD_CTLR */
991 static void
dist_ctlr_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)992 dist_ctlr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
993 void *arg)
994 {
995 struct hyp *hyp;
996 struct vgic_v3 *vgic;
997
998 hyp = hypctx->hyp;
999 vgic = hyp->vgic;
1000
1001 mtx_lock_spin(&vgic->dist_mtx);
1002 *rval = vgic->gicd_ctlr;
1003 mtx_unlock_spin(&vgic->dist_mtx);
1004
1005 /* Writes are never pending */
1006 *rval &= ~GICD_CTLR_RWP;
1007 }
1008
1009 static void
dist_ctlr_write(struct hypctx * hypctx,u_int reg,u_int offset,u_int size,uint64_t wval,void * arg)1010 dist_ctlr_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1011 uint64_t wval, void *arg)
1012 {
1013 struct vgic_v3 *vgic;
1014
1015 MPASS(offset == 0);
1016 MPASS(size == 4);
1017 vgic = hypctx->hyp->vgic;
1018
1019 /*
1020 * GICv2 backwards compatibility is not implemented so
1021 * ARE_NS is RAO/WI. This means EnableGrp1 is RES0.
1022 *
1023 * EnableGrp1A is supported, and RWP is read-only.
1024 *
1025 * All other bits are RES0 from non-secure mode as we
1026 * implement as if we are in a system with two security
1027 * states.
1028 */
1029 wval &= GICD_CTLR_G1A;
1030 wval |= GICD_CTLR_ARE_NS;
1031 mtx_lock_spin(&vgic->dist_mtx);
1032 vgic->gicd_ctlr = wval;
1033 /* TODO: Wake any vcpus that have interrupts pending */
1034 mtx_unlock_spin(&vgic->dist_mtx);
1035 }
1036
1037 /* GICD_TYPER */
1038 static void
dist_typer_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)1039 dist_typer_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
1040 void *arg)
1041 {
1042 uint32_t typer;
1043
1044 typer = (10 - 1) << GICD_TYPER_IDBITS_SHIFT;
1045 typer |= GICD_TYPER_MBIS;
1046 /* ITLinesNumber: */
1047 typer |= howmany(VGIC_NIRQS + 1, 32) - 1;
1048
1049 *rval = typer;
1050 }
1051
1052 /* GICD_IIDR */
1053 static void
dist_iidr_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)1054 dist_iidr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1055 {
1056 *rval = VGIC_IIDR;
1057 }
1058
1059 /* GICD_SETSPI_NSR & GICD_CLRSPI_NSR */
1060 static void
dist_setclrspi_nsr_write(struct hypctx * hypctx,u_int reg,u_int offset,u_int size,uint64_t wval,void * arg)1061 dist_setclrspi_nsr_write(struct hypctx *hypctx, u_int reg, u_int offset,
1062 u_int size, uint64_t wval, void *arg)
1063 {
1064 uint32_t irqid;
1065
1066 MPASS(offset == 0);
1067 MPASS(size == 4);
1068 irqid = wval & GICD_SPI_INTID_MASK;
1069 INJECT_IRQ(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu), irqid,
1070 reg == GICD_SETSPI_NSR);
1071 }
1072
1073 /* GICD_ISENABLER */
1074 static void
dist_isenabler_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)1075 dist_isenabler_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1076 {
1077 int n;
1078
1079 n = (reg - GICD_ISENABLER(0)) / 4;
1080 /* GICD_ISENABLER0 is RAZ/WI so handled separately */
1081 MPASS(n > 0);
1082 *rval = read_enabler(hypctx, n);
1083 }
1084
1085 static void
dist_isenabler_write(struct hypctx * hypctx,u_int reg,u_int offset,u_int size,uint64_t wval,void * arg)1086 dist_isenabler_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1087 uint64_t wval, void *arg)
1088 {
1089 int n;
1090
1091 MPASS(offset == 0);
1092 MPASS(size == 4);
1093 n = (reg - GICD_ISENABLER(0)) / 4;
1094 /* GICD_ISENABLER0 is RAZ/WI so handled separately */
1095 MPASS(n > 0);
1096 write_enabler(hypctx, n, true, wval);
1097 }
1098
1099 /* GICD_ICENABLER */
1100 static void
dist_icenabler_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)1101 dist_icenabler_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1102 {
1103 int n;
1104
1105 n = (reg - GICD_ICENABLER(0)) / 4;
1106 /* GICD_ICENABLER0 is RAZ/WI so handled separately */
1107 MPASS(n > 0);
1108 *rval = read_enabler(hypctx, n);
1109 }
1110
1111 static void
dist_icenabler_write(struct hypctx * hypctx,u_int reg,u_int offset,u_int size,uint64_t wval,void * arg)1112 dist_icenabler_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1113 uint64_t wval, void *arg)
1114 {
1115 int n;
1116
1117 MPASS(offset == 0);
1118 MPASS(size == 4);
1119 n = (reg - GICD_ISENABLER(0)) / 4;
1120 /* GICD_ICENABLER0 is RAZ/WI so handled separately */
1121 MPASS(n > 0);
1122 write_enabler(hypctx, n, false, wval);
1123 }
1124
1125 /* GICD_ISPENDR */
1126 static void
dist_ispendr_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)1127 dist_ispendr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1128 {
1129 int n;
1130
1131 n = (reg - GICD_ISPENDR(0)) / 4;
1132 /* GICD_ISPENDR0 is RAZ/WI so handled separately */
1133 MPASS(n > 0);
1134 *rval = read_pendr(hypctx, n);
1135 }
1136
1137 static void
dist_ispendr_write(struct hypctx * hypctx,u_int reg,u_int offset,u_int size,uint64_t wval,void * arg)1138 dist_ispendr_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1139 uint64_t wval, void *arg)
1140 {
1141 int n;
1142
1143 MPASS(offset == 0);
1144 MPASS(size == 4);
1145 n = (reg - GICD_ISPENDR(0)) / 4;
1146 /* GICD_ISPENDR0 is RAZ/WI so handled separately */
1147 MPASS(n > 0);
1148 write_pendr(hypctx, n, true, wval);
1149 }
1150
1151 /* GICD_ICPENDR */
1152 static void
dist_icpendr_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)1153 dist_icpendr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1154 {
1155 int n;
1156
1157 n = (reg - GICD_ICPENDR(0)) / 4;
1158 /* GICD_ICPENDR0 is RAZ/WI so handled separately */
1159 MPASS(n > 0);
1160 *rval = read_pendr(hypctx, n);
1161 }
1162
1163 static void
dist_icpendr_write(struct hypctx * hypctx,u_int reg,u_int offset,u_int size,uint64_t wval,void * arg)1164 dist_icpendr_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1165 uint64_t wval, void *arg)
1166 {
1167 int n;
1168
1169 MPASS(offset == 0);
1170 MPASS(size == 4);
1171 n = (reg - GICD_ICPENDR(0)) / 4;
1172 /* GICD_ICPENDR0 is RAZ/WI so handled separately */
1173 MPASS(n > 0);
1174 write_pendr(hypctx, n, false, wval);
1175 }
1176
1177 /* GICD_ISACTIVER */
1178 /* Affinity routing is enabled so isactiver0 is RAZ/WI */
1179 static void
dist_isactiver_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)1180 dist_isactiver_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1181 {
1182 int n;
1183
1184 n = (reg - GICD_ISACTIVER(0)) / 4;
1185 /* GICD_ISACTIVER0 is RAZ/WI so handled separately */
1186 MPASS(n > 0);
1187 *rval = read_activer(hypctx, n);
1188 }
1189
1190 static void
dist_isactiver_write(struct hypctx * hypctx,u_int reg,u_int offset,u_int size,uint64_t wval,void * arg)1191 dist_isactiver_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1192 uint64_t wval, void *arg)
1193 {
1194 int n;
1195
1196 MPASS(offset == 0);
1197 MPASS(size == 4);
1198 n = (reg - GICD_ISACTIVER(0)) / 4;
1199 /* GICD_ISACTIVE0 is RAZ/WI so handled separately */
1200 MPASS(n > 0);
1201 write_activer(hypctx, n, true, wval);
1202 }
1203
1204 /* GICD_ICACTIVER */
1205 static void
dist_icactiver_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)1206 dist_icactiver_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
1207 void *arg)
1208 {
1209 int n;
1210
1211 n = (reg - GICD_ICACTIVER(0)) / 4;
1212 /* GICD_ICACTIVE0 is RAZ/WI so handled separately */
1213 MPASS(n > 0);
1214 *rval = read_activer(hypctx, n);
1215 }
1216
1217 static void
dist_icactiver_write(struct hypctx * hypctx,u_int reg,u_int offset,u_int size,uint64_t wval,void * arg)1218 dist_icactiver_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1219 uint64_t wval, void *arg)
1220 {
1221 int n;
1222
1223 MPASS(offset == 0);
1224 MPASS(size == 4);
1225 n = (reg - GICD_ICACTIVER(0)) / 4;
1226 /* GICD_ICACTIVE0 is RAZ/WI so handled separately */
1227 MPASS(n > 0);
1228 write_activer(hypctx, n, false, wval);
1229 }
1230
1231 /* GICD_IPRIORITYR */
1232 /* Affinity routing is enabled so ipriorityr0-7 is RAZ/WI */
1233 static void
dist_ipriorityr_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)1234 dist_ipriorityr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
1235 void *arg)
1236 {
1237 int n;
1238
1239 n = (reg - GICD_IPRIORITYR(0)) / 4;
1240 /* GICD_IPRIORITY0-7 is RAZ/WI so handled separately */
1241 MPASS(n > 7);
1242 *rval = read_priorityr(hypctx, n);
1243 }
1244
1245 static void
dist_ipriorityr_write(struct hypctx * hypctx,u_int reg,u_int offset,u_int size,uint64_t wval,void * arg)1246 dist_ipriorityr_write(struct hypctx *hypctx, u_int reg, u_int offset,
1247 u_int size, uint64_t wval, void *arg)
1248 {
1249 u_int irq_base;
1250
1251 irq_base = (reg - GICD_IPRIORITYR(0)) + offset;
1252 /* GICD_IPRIORITY0-7 is RAZ/WI so handled separately */
1253 MPASS(irq_base > 31);
1254 write_priorityr(hypctx, irq_base, size, wval);
1255 }
1256
1257 /* GICD_ICFGR */
1258 static void
dist_icfgr_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)1259 dist_icfgr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1260 {
1261 int n;
1262
1263 n = (reg - GICD_ICFGR(0)) / 4;
1264 /* GICD_ICFGR0-1 are RAZ/WI so handled separately */
1265 MPASS(n > 1);
1266 *rval = read_config(hypctx, n);
1267 }
1268
1269 static void
dist_icfgr_write(struct hypctx * hypctx,u_int reg,u_int offset,u_int size,uint64_t wval,void * arg)1270 dist_icfgr_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1271 uint64_t wval, void *arg)
1272 {
1273 int n;
1274
1275 MPASS(offset == 0);
1276 MPASS(size == 4);
1277 n = (reg - GICD_ICFGR(0)) / 4;
1278 /* GICD_ICFGR0-1 are RAZ/WI so handled separately */
1279 MPASS(n > 1);
1280 write_config(hypctx, n, wval);
1281 }
1282
1283 /* GICD_IROUTER */
1284 static void
dist_irouter_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)1285 dist_irouter_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1286 {
1287 int n;
1288
1289 n = (reg - GICD_IROUTER(0)) / 8;
1290 /* GICD_IROUTER0-31 don't exist */
1291 MPASS(n > 31);
1292 *rval = read_route(hypctx, n);
1293 }
1294
1295 static void
dist_irouter_write(struct hypctx * hypctx,u_int reg,u_int offset,u_int size,uint64_t wval,void * arg)1296 dist_irouter_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1297 uint64_t wval, void *arg)
1298 {
1299 int n;
1300
1301 n = (reg - GICD_IROUTER(0)) / 8;
1302 /* GICD_IROUTER0-31 don't exist */
1303 MPASS(n > 31);
1304 write_route(hypctx, n, wval, offset, size);
1305 }
1306
1307 static bool
vgic_register_read(struct hypctx * hypctx,struct vgic_register * reg_list,u_int reg_list_size,u_int reg,u_int size,uint64_t * rval,void * arg)1308 vgic_register_read(struct hypctx *hypctx, struct vgic_register *reg_list,
1309 u_int reg_list_size, u_int reg, u_int size, uint64_t *rval, void *arg)
1310 {
1311 u_int i, offset;
1312
1313 for (i = 0; i < reg_list_size; i++) {
1314 if (reg_list[i].start <= reg && reg_list[i].end >= reg + size) {
1315 offset = reg & (reg_list[i].size - 1);
1316 reg -= offset;
1317 if ((reg_list[i].flags & size) != 0) {
1318 reg_list[i].read(hypctx, reg, rval, NULL);
1319
1320 /* Move the bits into the correct place */
1321 *rval >>= (offset * 8);
1322 if (size < 8) {
1323 *rval &= (1ul << (size * 8)) - 1;
1324 }
1325 } else {
1326 /*
1327 * The access is an invalid size. Section
1328 * 12.1.3 "GIC memory-mapped register access"
1329 * of the GICv3 and GICv4 spec issue H
1330 * (IHI0069) lists the options. For a read
1331 * the controller returns unknown data, in
1332 * this case it is zero.
1333 */
1334 *rval = 0;
1335 }
1336 return (true);
1337 }
1338 }
1339 return (false);
1340 }
1341
1342 static bool
vgic_register_write(struct hypctx * hypctx,struct vgic_register * reg_list,u_int reg_list_size,u_int reg,u_int size,uint64_t wval,void * arg)1343 vgic_register_write(struct hypctx *hypctx, struct vgic_register *reg_list,
1344 u_int reg_list_size, u_int reg, u_int size, uint64_t wval, void *arg)
1345 {
1346 u_int i, offset;
1347
1348 for (i = 0; i < reg_list_size; i++) {
1349 if (reg_list[i].start <= reg && reg_list[i].end >= reg + size) {
1350 offset = reg & (reg_list[i].size - 1);
1351 reg -= offset;
1352 if ((reg_list[i].flags & size) != 0) {
1353 reg_list[i].write(hypctx, reg, offset,
1354 size, wval, NULL);
1355 } else {
1356 /*
1357 * See the comment in vgic_register_read.
1358 * For writes the controller ignores the
1359 * operation.
1360 */
1361 }
1362 return (true);
1363 }
1364 }
1365 return (false);
1366 }
1367
1368 static int
dist_read(struct vcpu * vcpu,uint64_t fault_ipa,uint64_t * rval,int size,void * arg)1369 dist_read(struct vcpu *vcpu, uint64_t fault_ipa, uint64_t *rval,
1370 int size, void *arg)
1371 {
1372 struct hyp *hyp;
1373 struct hypctx *hypctx;
1374 struct vgic_v3 *vgic;
1375 uint64_t reg;
1376
1377 hypctx = vcpu_get_cookie(vcpu);
1378 hyp = hypctx->hyp;
1379 vgic = hyp->vgic;
1380
1381 /* Check the register is one of ours and is the correct size */
1382 if (fault_ipa < vgic->dist_start || fault_ipa + size > vgic->dist_end) {
1383 return (EINVAL);
1384 }
1385
1386 reg = fault_ipa - vgic->dist_start;
1387 /*
1388 * As described in vgic_register_read an access with an invalid
1389 * alignment is read with an unknown value
1390 */
1391 if ((reg & (size - 1)) != 0) {
1392 *rval = 0;
1393 return (0);
1394 }
1395
1396 if (vgic_register_read(hypctx, dist_registers, nitems(dist_registers),
1397 reg, size, rval, NULL))
1398 return (0);
1399
1400 /* Reserved register addresses are RES0 so we can hardware it to 0 */
1401 *rval = 0;
1402
1403 return (0);
1404 }
1405
1406 static int
dist_write(struct vcpu * vcpu,uint64_t fault_ipa,uint64_t wval,int size,void * arg)1407 dist_write(struct vcpu *vcpu, uint64_t fault_ipa, uint64_t wval,
1408 int size, void *arg)
1409 {
1410 struct hyp *hyp;
1411 struct hypctx *hypctx;
1412 struct vgic_v3 *vgic;
1413 uint64_t reg;
1414
1415 hypctx = vcpu_get_cookie(vcpu);
1416 hyp = hypctx->hyp;
1417 vgic = hyp->vgic;
1418
1419 /* Check the register is one of ours and is the correct size */
1420 if (fault_ipa < vgic->dist_start || fault_ipa + size > vgic->dist_end) {
1421 return (EINVAL);
1422 }
1423
1424 reg = fault_ipa - vgic->dist_start;
1425 /*
1426 * As described in vgic_register_read an access with an invalid
1427 * alignment is write ignored.
1428 */
1429 if ((reg & (size - 1)) != 0)
1430 return (0);
1431
1432 if (vgic_register_write(hypctx, dist_registers, nitems(dist_registers),
1433 reg, size, wval, NULL))
1434 return (0);
1435
1436 /* Reserved register addresses are RES0 so we can ignore the write */
1437 return (0);
1438 }
1439
1440 /*
1441 * Redistributor register handlers.
1442 *
1443 * RD_base:
1444 */
1445 /* GICR_CTLR */
1446 static void
redist_ctlr_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)1447 redist_ctlr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1448 {
1449 /* LPIs not supported */
1450 *rval = 0;
1451 }
1452
1453 /* GICR_IIDR */
1454 static void
redist_iidr_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)1455 redist_iidr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1456 {
1457 *rval = VGIC_IIDR;
1458 }
1459
1460 /* GICR_TYPER */
1461 static void
redist_typer_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)1462 redist_typer_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1463 {
1464 uint64_t aff, gicr_typer, vmpidr_el2;
1465 bool last_vcpu;
1466
1467 last_vcpu = false;
1468 if (vcpu_vcpuid(hypctx->vcpu) == (vgic_max_cpu_count(hypctx->hyp) - 1))
1469 last_vcpu = true;
1470
1471 vmpidr_el2 = hypctx->vmpidr_el2;
1472 MPASS(vmpidr_el2 != 0);
1473 /*
1474 * Get affinity for the current CPU. The guest CPU affinity is taken
1475 * from VMPIDR_EL2. The Redistributor corresponding to this CPU is
1476 * the Redistributor with the same affinity from GICR_TYPER.
1477 */
1478 aff = (CPU_AFF3(vmpidr_el2) << 24) | (CPU_AFF2(vmpidr_el2) << 16) |
1479 (CPU_AFF1(vmpidr_el2) << 8) | CPU_AFF0(vmpidr_el2);
1480
1481 /* Set up GICR_TYPER. */
1482 gicr_typer = aff << GICR_TYPER_AFF_SHIFT;
1483 /* Set the vcpu as the processsor ID */
1484 gicr_typer |=
1485 (uint64_t)vcpu_vcpuid(hypctx->vcpu) << GICR_TYPER_CPUNUM_SHIFT;
1486
1487 if (last_vcpu)
1488 /* Mark the last Redistributor */
1489 gicr_typer |= GICR_TYPER_LAST;
1490
1491 *rval = gicr_typer;
1492 }
1493
1494 /*
1495 * SGI_base:
1496 */
1497 /* GICR_ISENABLER0 */
1498 static void
redist_ienabler0_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)1499 redist_ienabler0_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
1500 void *arg)
1501 {
1502 *rval = read_enabler(hypctx, 0);
1503 }
1504
1505 static void
redist_isenabler0_write(struct hypctx * hypctx,u_int reg,u_int offset,u_int size,uint64_t wval,void * arg)1506 redist_isenabler0_write(struct hypctx *hypctx, u_int reg, u_int offset,
1507 u_int size, uint64_t wval, void *arg)
1508 {
1509 MPASS(offset == 0);
1510 MPASS(size == 4);
1511 write_enabler(hypctx, 0, true, wval);
1512 }
1513
1514 /* GICR_ICENABLER0 */
1515 static void
redist_icenabler0_write(struct hypctx * hypctx,u_int reg,u_int offset,u_int size,uint64_t wval,void * arg)1516 redist_icenabler0_write(struct hypctx *hypctx, u_int reg, u_int offset,
1517 u_int size, uint64_t wval, void *arg)
1518 {
1519 MPASS(offset == 0);
1520 MPASS(size == 4);
1521 write_enabler(hypctx, 0, false, wval);
1522 }
1523
1524 /* GICR_ISPENDR0 */
1525 static void
redist_ipendr0_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)1526 redist_ipendr0_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
1527 void *arg)
1528 {
1529 *rval = read_pendr(hypctx, 0);
1530 }
1531
1532 static void
redist_ispendr0_write(struct hypctx * hypctx,u_int reg,u_int offset,u_int size,uint64_t wval,void * arg)1533 redist_ispendr0_write(struct hypctx *hypctx, u_int reg, u_int offset,
1534 u_int size, uint64_t wval, void *arg)
1535 {
1536 MPASS(offset == 0);
1537 MPASS(size == 4);
1538 write_pendr(hypctx, 0, true, wval);
1539 }
1540
1541 /* GICR_ICPENDR0 */
1542 static void
redist_icpendr0_write(struct hypctx * hypctx,u_int reg,u_int offset,u_int size,uint64_t wval,void * arg)1543 redist_icpendr0_write(struct hypctx *hypctx, u_int reg, u_int offset,
1544 u_int size, uint64_t wval, void *arg)
1545 {
1546 MPASS(offset == 0);
1547 MPASS(size == 4);
1548 write_pendr(hypctx, 0, false, wval);
1549 }
1550
1551 /* GICR_ISACTIVER0 */
1552 static void
redist_iactiver0_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)1553 redist_iactiver0_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
1554 void *arg)
1555 {
1556 *rval = read_activer(hypctx, 0);
1557 }
1558
1559 static void
redist_isactiver0_write(struct hypctx * hypctx,u_int reg,u_int offset,u_int size,uint64_t wval,void * arg)1560 redist_isactiver0_write(struct hypctx *hypctx, u_int reg, u_int offset,
1561 u_int size, uint64_t wval, void *arg)
1562 {
1563 write_activer(hypctx, 0, true, wval);
1564 }
1565
1566 /* GICR_ICACTIVER0 */
1567 static void
redist_icactiver0_write(struct hypctx * hypctx,u_int reg,u_int offset,u_int size,uint64_t wval,void * arg)1568 redist_icactiver0_write(struct hypctx *hypctx, u_int reg, u_int offset,
1569 u_int size, uint64_t wval, void *arg)
1570 {
1571 write_activer(hypctx, 0, false, wval);
1572 }
1573
1574 /* GICR_IPRIORITYR */
1575 static void
redist_ipriorityr_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)1576 redist_ipriorityr_read(struct hypctx *hypctx, u_int reg, uint64_t *rval,
1577 void *arg)
1578 {
1579 int n;
1580
1581 n = (reg - GICR_IPRIORITYR(0)) / 4;
1582 *rval = read_priorityr(hypctx, n);
1583 }
1584
1585 static void
redist_ipriorityr_write(struct hypctx * hypctx,u_int reg,u_int offset,u_int size,uint64_t wval,void * arg)1586 redist_ipriorityr_write(struct hypctx *hypctx, u_int reg, u_int offset,
1587 u_int size, uint64_t wval, void *arg)
1588 {
1589 u_int irq_base;
1590
1591 irq_base = (reg - GICR_IPRIORITYR(0)) + offset;
1592 write_priorityr(hypctx, irq_base, size, wval);
1593 }
1594
1595 /* GICR_ICFGR1 */
1596 static void
redist_icfgr1_read(struct hypctx * hypctx,u_int reg,uint64_t * rval,void * arg)1597 redist_icfgr1_read(struct hypctx *hypctx, u_int reg, uint64_t *rval, void *arg)
1598 {
1599 *rval = read_config(hypctx, 1);
1600 }
1601
1602 static void
redist_icfgr1_write(struct hypctx * hypctx,u_int reg,u_int offset,u_int size,uint64_t wval,void * arg)1603 redist_icfgr1_write(struct hypctx *hypctx, u_int reg, u_int offset, u_int size,
1604 uint64_t wval, void *arg)
1605 {
1606 MPASS(offset == 0);
1607 MPASS(size == 4);
1608 write_config(hypctx, 1, wval);
1609 }
1610
1611 static int
redist_read(struct vcpu * vcpu,uint64_t fault_ipa,uint64_t * rval,int size,void * arg)1612 redist_read(struct vcpu *vcpu, uint64_t fault_ipa, uint64_t *rval,
1613 int size, void *arg)
1614 {
1615 struct hyp *hyp;
1616 struct hypctx *hypctx, *target_hypctx;
1617 struct vgic_v3 *vgic;
1618 uint64_t reg;
1619 int vcpuid;
1620
1621 /* Find the current vcpu ctx to get the vgic struct */
1622 hypctx = vcpu_get_cookie(vcpu);
1623 hyp = hypctx->hyp;
1624 vgic = hyp->vgic;
1625
1626 /* Check the register is one of ours and is the correct size */
1627 if (fault_ipa < vgic->redist_start ||
1628 fault_ipa + size > vgic->redist_end) {
1629 return (EINVAL);
1630 }
1631
1632 vcpuid = (fault_ipa - vgic->redist_start) /
1633 (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1634 if (vcpuid >= vm_get_maxcpus(hyp->vm)) {
1635 /*
1636 * This should never happen, but lets be defensive so if it
1637 * does we don't panic a non-INVARIANTS kernel.
1638 */
1639 #ifdef INVARIANTS
1640 panic("%s: Invalid vcpuid %d", __func__, vcpuid);
1641 #else
1642 *rval = 0;
1643 return (0);
1644 #endif
1645 }
1646
1647 /* Find the target vcpu ctx for the access */
1648 target_hypctx = hyp->ctx[vcpuid];
1649 if (target_hypctx == NULL) {
1650 /*
1651 * The CPU has not yet started. The redistributor and CPU are
1652 * in the same power domain. As such the redistributor will
1653 * also be powered down so any access will raise an external
1654 * abort.
1655 */
1656 raise_data_insn_abort(hypctx, fault_ipa, true,
1657 ISS_DATA_DFSC_EXT);
1658 return (0);
1659 }
1660
1661 reg = (fault_ipa - vgic->redist_start) %
1662 (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1663
1664 /*
1665 * As described in vgic_register_read an access with an invalid
1666 * alignment is read with an unknown value
1667 */
1668 if ((reg & (size - 1)) != 0) {
1669 *rval = 0;
1670 return (0);
1671 }
1672
1673 if (reg < GICR_RD_BASE_SIZE) {
1674 if (vgic_register_read(target_hypctx, redist_rd_registers,
1675 nitems(redist_rd_registers), reg, size, rval, NULL))
1676 return (0);
1677 } else if (reg < (GICR_SGI_BASE + GICR_SGI_BASE_SIZE)) {
1678 if (vgic_register_read(target_hypctx, redist_sgi_registers,
1679 nitems(redist_sgi_registers), reg - GICR_SGI_BASE, size,
1680 rval, NULL))
1681 return (0);
1682 }
1683
1684 /* Reserved register addresses are RES0 so we can hardware it to 0 */
1685 *rval = 0;
1686 return (0);
1687 }
1688
1689 static int
redist_write(struct vcpu * vcpu,uint64_t fault_ipa,uint64_t wval,int size,void * arg)1690 redist_write(struct vcpu *vcpu, uint64_t fault_ipa, uint64_t wval,
1691 int size, void *arg)
1692 {
1693 struct hyp *hyp;
1694 struct hypctx *hypctx, *target_hypctx;
1695 struct vgic_v3 *vgic;
1696 uint64_t reg;
1697 int vcpuid;
1698
1699 /* Find the current vcpu ctx to get the vgic struct */
1700 hypctx = vcpu_get_cookie(vcpu);
1701 hyp = hypctx->hyp;
1702 vgic = hyp->vgic;
1703
1704 /* Check the register is one of ours and is the correct size */
1705 if (fault_ipa < vgic->redist_start ||
1706 fault_ipa + size > vgic->redist_end) {
1707 return (EINVAL);
1708 }
1709
1710 vcpuid = (fault_ipa - vgic->redist_start) /
1711 (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1712 if (vcpuid >= vm_get_maxcpus(hyp->vm)) {
1713 /*
1714 * This should never happen, but lets be defensive so if it
1715 * does we don't panic a non-INVARIANTS kernel.
1716 */
1717 #ifdef INVARIANTS
1718 panic("%s: Invalid vcpuid %d", __func__, vcpuid);
1719 #else
1720 return (0);
1721 #endif
1722 }
1723
1724 /* Find the target vcpu ctx for the access */
1725 target_hypctx = hyp->ctx[vcpuid];
1726 if (target_hypctx == NULL) {
1727 /*
1728 * The CPU has not yet started. The redistributor and CPU are
1729 * in the same power domain. As such the redistributor will
1730 * also be powered down so any access will raise an external
1731 * abort.
1732 */
1733 raise_data_insn_abort(hypctx, fault_ipa, true,
1734 ISS_DATA_DFSC_EXT);
1735 return (0);
1736 }
1737
1738 reg = (fault_ipa - vgic->redist_start) %
1739 (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1740
1741 /*
1742 * As described in vgic_register_read an access with an invalid
1743 * alignment is write ignored.
1744 */
1745 if ((reg & (size - 1)) != 0)
1746 return (0);
1747
1748 if (reg < GICR_RD_BASE_SIZE) {
1749 if (vgic_register_write(target_hypctx, redist_rd_registers,
1750 nitems(redist_rd_registers), reg, size, wval, NULL))
1751 return (0);
1752 } else if (reg < (GICR_SGI_BASE + GICR_SGI_BASE_SIZE)) {
1753 if (vgic_register_write(target_hypctx, redist_sgi_registers,
1754 nitems(redist_sgi_registers), reg - GICR_SGI_BASE, size,
1755 wval, NULL))
1756 return (0);
1757 }
1758
1759 /* Reserved register addresses are RES0 so we can ignore the write */
1760 return (0);
1761 }
1762
1763 static int
vgic_v3_icc_sgi1r_read(struct vcpu * vcpu,uint64_t * rval,void * arg)1764 vgic_v3_icc_sgi1r_read(struct vcpu *vcpu, uint64_t *rval, void *arg)
1765 {
1766 /*
1767 * TODO: Inject an unknown exception.
1768 */
1769 *rval = 0;
1770 return (0);
1771 }
1772
1773 static int
vgic_v3_icc_sgi1r_write(struct vcpu * vcpu,uint64_t rval,void * arg)1774 vgic_v3_icc_sgi1r_write(struct vcpu *vcpu, uint64_t rval, void *arg)
1775 {
1776 struct vm *vm;
1777 struct hyp *hyp;
1778 cpuset_t active_cpus;
1779 uint64_t mpidr, aff1, aff2, aff3;
1780 uint32_t irqid;
1781 int cpus, cpu_off, target_vcpuid, vcpuid;
1782
1783 vm = vcpu_vm(vcpu);
1784 hyp = vm_get_cookie(vm);
1785 active_cpus = vm_active_cpus(vm);
1786 vcpuid = vcpu_vcpuid(vcpu);
1787
1788 irqid = ICC_SGI1R_EL1_SGIID_VAL(rval) >> ICC_SGI1R_EL1_SGIID_SHIFT;
1789 if ((rval & ICC_SGI1R_EL1_IRM) == 0) {
1790 /* Non-zero points at no vcpus */
1791 if (ICC_SGI1R_EL1_RS_VAL(rval) != 0)
1792 return (0);
1793
1794 aff1 = ICC_SGI1R_EL1_AFF1_VAL(rval) >> ICC_SGI1R_EL1_AFF1_SHIFT;
1795 aff2 = ICC_SGI1R_EL1_AFF2_VAL(rval) >> ICC_SGI1R_EL1_AFF2_SHIFT;
1796 aff3 = ICC_SGI1R_EL1_AFF3_VAL(rval) >> ICC_SGI1R_EL1_AFF3_SHIFT;
1797 mpidr = aff3 << MPIDR_AFF3_SHIFT |
1798 aff2 << MPIDR_AFF2_SHIFT | aff1 << MPIDR_AFF1_SHIFT;
1799
1800 cpus = ICC_SGI1R_EL1_TL_VAL(rval) >> ICC_SGI1R_EL1_TL_SHIFT;
1801 cpu_off = 0;
1802 while (cpus > 0) {
1803 if (cpus & 1) {
1804 target_vcpuid = mpidr_to_vcpu(hyp,
1805 mpidr | (cpu_off << MPIDR_AFF0_SHIFT));
1806 if (target_vcpuid >= 0 &&
1807 CPU_ISSET(target_vcpuid, &active_cpus)) {
1808 INJECT_IRQ(hyp, target_vcpuid, irqid,
1809 true);
1810 }
1811 }
1812 cpu_off++;
1813 cpus >>= 1;
1814 }
1815 } else {
1816 /* Send an IPI to all CPUs other than the current CPU */
1817 for (target_vcpuid = 0; target_vcpuid < vm_get_maxcpus(vm);
1818 target_vcpuid++) {
1819 if (CPU_ISSET(target_vcpuid, &active_cpus) &&
1820 target_vcpuid != vcpuid) {
1821 INJECT_IRQ(hyp, target_vcpuid, irqid, true);
1822 }
1823 }
1824 }
1825
1826 return (0);
1827 }
1828
1829 static void
vgic_v3_mmio_init(struct hyp * hyp)1830 vgic_v3_mmio_init(struct hyp *hyp)
1831 {
1832 struct vgic_v3 *vgic;
1833 struct vgic_v3_irq *irq;
1834 int i;
1835
1836 /* Allocate memory for the SPIs */
1837 vgic = hyp->vgic;
1838 vgic->irqs = malloc((VGIC_NIRQS - VGIC_PRV_I_NUM) *
1839 sizeof(*vgic->irqs), M_VGIC_V3, M_WAITOK | M_ZERO);
1840
1841 for (i = 0; i < VGIC_NIRQS - VGIC_PRV_I_NUM; i++) {
1842 irq = &vgic->irqs[i];
1843
1844 mtx_init(&irq->irq_spinmtx, "VGIC IRQ spinlock", NULL,
1845 MTX_SPIN);
1846
1847 irq->irq = i + VGIC_PRV_I_NUM;
1848 }
1849 }
1850
1851 static void
vgic_v3_mmio_destroy(struct hyp * hyp)1852 vgic_v3_mmio_destroy(struct hyp *hyp)
1853 {
1854 struct vgic_v3 *vgic;
1855 struct vgic_v3_irq *irq;
1856 int i;
1857
1858 vgic = hyp->vgic;
1859 for (i = 0; i < VGIC_NIRQS - VGIC_PRV_I_NUM; i++) {
1860 irq = &vgic->irqs[i];
1861
1862 mtx_destroy(&irq->irq_spinmtx);
1863 }
1864
1865 free(vgic->irqs, M_VGIC_V3);
1866 }
1867
1868 static int
vgic_v3_attach_to_vm(device_t dev,struct hyp * hyp,struct vm_vgic_descr * descr)1869 vgic_v3_attach_to_vm(device_t dev, struct hyp *hyp, struct vm_vgic_descr *descr)
1870 {
1871 struct vm *vm;
1872 struct vgic_v3 *vgic;
1873 size_t cpu_count;
1874
1875 if (descr->ver.version != 3)
1876 return (EINVAL);
1877
1878 /*
1879 * The register bases need to be 64k aligned
1880 * The redist register space is the RD + SGI size
1881 */
1882 if (!__is_aligned(descr->v3_regs.dist_start, PAGE_SIZE_64K) ||
1883 !__is_aligned(descr->v3_regs.redist_start, PAGE_SIZE_64K) ||
1884 !__is_aligned(descr->v3_regs.redist_size,
1885 GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE))
1886 return (EINVAL);
1887
1888 /* The dist register space is 1 64k block */
1889 if (descr->v3_regs.dist_size != PAGE_SIZE_64K)
1890 return (EINVAL);
1891
1892 vm = hyp->vm;
1893
1894 /*
1895 * Return an error if the redist space is too large for the maximum
1896 * number of CPUs we support.
1897 */
1898 cpu_count = descr->v3_regs.redist_size /
1899 (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE);
1900 if (cpu_count > vm_get_maxcpus(vm))
1901 return (EINVAL);
1902
1903 vgic = hyp->vgic;
1904
1905 /* Set the distributor address and size for trapping guest access. */
1906 vgic->dist_start = descr->v3_regs.dist_start;
1907 vgic->dist_end = descr->v3_regs.dist_start + descr->v3_regs.dist_size;
1908
1909 vgic->redist_start = descr->v3_regs.redist_start;
1910 vgic->redist_end = descr->v3_regs.redist_start +
1911 descr->v3_regs.redist_size;
1912
1913 vm_register_inst_handler(vm, descr->v3_regs.dist_start,
1914 descr->v3_regs.dist_size, dist_read, dist_write);
1915 vm_register_inst_handler(vm, descr->v3_regs.redist_start,
1916 descr->v3_regs.redist_size, redist_read, redist_write);
1917
1918 vm_register_reg_handler(vm, ISS_MSR_REG(ICC_SGI1R_EL1),
1919 ISS_MSR_REG_MASK, vgic_v3_icc_sgi1r_read, vgic_v3_icc_sgi1r_write,
1920 NULL);
1921
1922 vgic_v3_mmio_init(hyp);
1923
1924 hyp->vgic_attached = true;
1925
1926 return (0);
1927 }
1928
1929 static void
vgic_v3_detach_from_vm(device_t dev,struct hyp * hyp)1930 vgic_v3_detach_from_vm(device_t dev, struct hyp *hyp)
1931 {
1932 if (hyp->vgic_attached) {
1933 hyp->vgic_attached = false;
1934 vgic_v3_mmio_destroy(hyp);
1935 }
1936 }
1937
1938 static struct vgic_v3_irq *
vgic_v3_get_irq(struct hyp * hyp,int vcpuid,uint32_t irqid)1939 vgic_v3_get_irq(struct hyp *hyp, int vcpuid, uint32_t irqid)
1940 {
1941 struct vgic_v3_cpu *vgic_cpu;
1942 struct vgic_v3_irq *irq;
1943 struct hypctx *hypctx;
1944
1945 if (irqid < VGIC_PRV_I_NUM) {
1946 if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(hyp->vm))
1947 return (NULL);
1948 hypctx = hyp->ctx[vcpuid];
1949 if (hypctx == NULL)
1950 return (NULL);
1951 vgic_cpu = hypctx->vgic_cpu;
1952 irq = &vgic_cpu->private_irqs[irqid];
1953 } else if (irqid <= GIC_LAST_SPI) {
1954 irqid -= VGIC_PRV_I_NUM;
1955 if (irqid >= VGIC_NIRQS)
1956 return (NULL);
1957 irq = &hyp->vgic->irqs[irqid];
1958 } else if (irqid < GIC_FIRST_LPI) {
1959 return (NULL);
1960 } else {
1961 /* No support for LPIs */
1962 return (NULL);
1963 }
1964
1965 mtx_lock_spin(&irq->irq_spinmtx);
1966 return (irq);
1967 }
1968
1969 static void
vgic_v3_release_irq(struct vgic_v3_irq * irq)1970 vgic_v3_release_irq(struct vgic_v3_irq *irq)
1971 {
1972
1973 mtx_unlock_spin(&irq->irq_spinmtx);
1974 }
1975
1976 static bool
vgic_v3_has_pending_irq(device_t dev,struct hypctx * hypctx)1977 vgic_v3_has_pending_irq(device_t dev, struct hypctx *hypctx)
1978 {
1979 struct vgic_v3_cpu *vgic_cpu;
1980 bool empty;
1981
1982 vgic_cpu = hypctx->vgic_cpu;
1983 mtx_lock_spin(&vgic_cpu->lr_mtx);
1984 empty = TAILQ_EMPTY(&vgic_cpu->irq_act_pend);
1985 mtx_unlock_spin(&vgic_cpu->lr_mtx);
1986
1987 return (!empty);
1988 }
1989
1990 static bool
vgic_v3_check_irq(struct vgic_v3_irq * irq,bool level)1991 vgic_v3_check_irq(struct vgic_v3_irq *irq, bool level)
1992 {
1993 /*
1994 * Only inject if:
1995 * - Level-triggered IRQ: level changes low -> high
1996 * - Edge-triggered IRQ: level is high
1997 */
1998 switch (irq->config & VGIC_CONFIG_MASK) {
1999 case VGIC_CONFIG_LEVEL:
2000 return (level != irq->level);
2001 case VGIC_CONFIG_EDGE:
2002 return (level);
2003 default:
2004 break;
2005 }
2006
2007 return (false);
2008 }
2009
2010 static int
vgic_v3_inject_irq(device_t dev,struct hyp * hyp,int vcpuid,uint32_t irqid,bool level)2011 vgic_v3_inject_irq(device_t dev, struct hyp *hyp, int vcpuid, uint32_t irqid,
2012 bool level)
2013 {
2014 struct vgic_v3_cpu *vgic_cpu;
2015 struct vgic_v3_irq *irq;
2016 struct hypctx *hypctx;
2017 int target_vcpu;
2018 bool notify;
2019
2020 if (!hyp->vgic_attached)
2021 return (ENODEV);
2022
2023 KASSERT(vcpuid == -1 || irqid < VGIC_PRV_I_NUM,
2024 ("%s: SPI/LPI with vcpuid set: irq %u vcpuid %u", __func__, irqid,
2025 vcpuid));
2026
2027 irq = vgic_v3_get_irq(hyp, vcpuid, irqid);
2028 if (irq == NULL) {
2029 eprintf("Malformed IRQ %u.\n", irqid);
2030 return (EINVAL);
2031 }
2032
2033 target_vcpu = irq->target_vcpu;
2034 KASSERT(vcpuid == -1 || vcpuid == target_vcpu,
2035 ("%s: Interrupt %u has bad cpu affinity: vcpu %d target vcpu %d",
2036 __func__, irqid, vcpuid, target_vcpu));
2037 KASSERT(target_vcpu >= 0 && target_vcpu < vm_get_maxcpus(hyp->vm),
2038 ("%s: Interrupt %u sent to invalid vcpu %d", __func__, irqid,
2039 target_vcpu));
2040
2041 if (vcpuid == -1)
2042 vcpuid = target_vcpu;
2043 /* TODO: Check from 0 to vm->maxcpus */
2044 if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(hyp->vm)) {
2045 vgic_v3_release_irq(irq);
2046 return (EINVAL);
2047 }
2048
2049 hypctx = hyp->ctx[vcpuid];
2050 if (hypctx == NULL) {
2051 vgic_v3_release_irq(irq);
2052 return (EINVAL);
2053 }
2054
2055 notify = false;
2056 vgic_cpu = hypctx->vgic_cpu;
2057
2058 mtx_lock_spin(&vgic_cpu->lr_mtx);
2059
2060 if (!vgic_v3_check_irq(irq, level)) {
2061 goto out;
2062 }
2063
2064 if ((irq->config & VGIC_CONFIG_MASK) == VGIC_CONFIG_LEVEL)
2065 irq->level = level;
2066 else /* VGIC_CONFIG_EDGE */
2067 irq->pending = true;
2068
2069 notify = vgic_v3_queue_irq(hyp, vgic_cpu, vcpuid, irq);
2070
2071 out:
2072 mtx_unlock_spin(&vgic_cpu->lr_mtx);
2073 vgic_v3_release_irq(irq);
2074
2075 if (notify)
2076 vcpu_notify_event(vm_vcpu(hyp->vm, vcpuid));
2077
2078 return (0);
2079 }
2080
2081 static int
vgic_v3_inject_msi(device_t dev,struct hyp * hyp,uint64_t msg,uint64_t addr)2082 vgic_v3_inject_msi(device_t dev, struct hyp *hyp, uint64_t msg, uint64_t addr)
2083 {
2084 struct vgic_v3 *vgic;
2085 uint64_t reg;
2086
2087 vgic = hyp->vgic;
2088
2089 /* This is a 4 byte register */
2090 if (addr < vgic->dist_start || addr + 4 > vgic->dist_end) {
2091 return (EINVAL);
2092 }
2093
2094 reg = addr - vgic->dist_start;
2095 if (reg != GICD_SETSPI_NSR)
2096 return (EINVAL);
2097
2098 return (INJECT_IRQ(hyp, -1, msg, true));
2099 }
2100
2101 static void
vgic_v3_flush_hwstate(device_t dev,struct hypctx * hypctx)2102 vgic_v3_flush_hwstate(device_t dev, struct hypctx *hypctx)
2103 {
2104 struct vgic_v3_cpu *vgic_cpu;
2105 struct vgic_v3_irq *irq;
2106 int i;
2107
2108 vgic_cpu = hypctx->vgic_cpu;
2109
2110 /*
2111 * All Distributor writes have been executed at this point, do not
2112 * protect Distributor reads with a mutex.
2113 *
2114 * This is callled with all interrupts disabled, so there is no need for
2115 * a List Register spinlock either.
2116 */
2117 mtx_lock_spin(&vgic_cpu->lr_mtx);
2118
2119 hypctx->vgic_v3_regs.ich_hcr_el2 &= ~ICH_HCR_EL2_UIE;
2120
2121 /* Exit early if there are no buffered interrupts */
2122 if (TAILQ_EMPTY(&vgic_cpu->irq_act_pend))
2123 goto out;
2124
2125 KASSERT(vgic_cpu->ich_lr_used == 0, ("%s: Used LR count not zero %u",
2126 __func__, vgic_cpu->ich_lr_used));
2127
2128 i = 0;
2129 hypctx->vgic_v3_regs.ich_elrsr_el2 =
2130 (1u << hypctx->vgic_v3_regs.ich_lr_num) - 1;
2131 TAILQ_FOREACH(irq, &vgic_cpu->irq_act_pend, act_pend_list) {
2132 /* No free list register, stop searching for IRQs */
2133 if (i == hypctx->vgic_v3_regs.ich_lr_num)
2134 break;
2135
2136 if (!irq->enabled)
2137 continue;
2138
2139 hypctx->vgic_v3_regs.ich_lr_el2[i] = ICH_LR_EL2_GROUP1 |
2140 ((uint64_t)irq->priority << ICH_LR_EL2_PRIO_SHIFT) |
2141 irq->irq;
2142
2143 if (irq->active) {
2144 hypctx->vgic_v3_regs.ich_lr_el2[i] |=
2145 ICH_LR_EL2_STATE_ACTIVE;
2146 }
2147
2148 #ifdef notyet
2149 /* TODO: Check why this is needed */
2150 if ((irq->config & _MASK) == LEVEL)
2151 hypctx->vgic_v3_regs.ich_lr_el2[i] |= ICH_LR_EL2_EOI;
2152 #endif
2153
2154 if (!irq->active && vgic_v3_irq_pending(irq)) {
2155 hypctx->vgic_v3_regs.ich_lr_el2[i] |=
2156 ICH_LR_EL2_STATE_PENDING;
2157
2158 /*
2159 * This IRQ is now pending on the guest. Allow for
2160 * another edge that could cause the interrupt to
2161 * be raised again.
2162 */
2163 if ((irq->config & VGIC_CONFIG_MASK) ==
2164 VGIC_CONFIG_EDGE) {
2165 irq->pending = false;
2166 }
2167 }
2168
2169 i++;
2170 }
2171 vgic_cpu->ich_lr_used = i;
2172
2173 out:
2174 mtx_unlock_spin(&vgic_cpu->lr_mtx);
2175 }
2176
2177 static void
vgic_v3_sync_hwstate(device_t dev,struct hypctx * hypctx)2178 vgic_v3_sync_hwstate(device_t dev, struct hypctx *hypctx)
2179 {
2180 struct vgic_v3_cpu *vgic_cpu;
2181 struct vgic_v3_irq *irq;
2182 uint64_t lr;
2183 int i;
2184
2185 vgic_cpu = hypctx->vgic_cpu;
2186
2187 /* Exit early if there are no buffered interrupts */
2188 if (vgic_cpu->ich_lr_used == 0)
2189 return;
2190
2191 /*
2192 * Check on the IRQ state after running the guest. ich_lr_used and
2193 * ich_lr_el2 are only ever used within this thread so is safe to
2194 * access unlocked.
2195 */
2196 for (i = 0; i < vgic_cpu->ich_lr_used; i++) {
2197 lr = hypctx->vgic_v3_regs.ich_lr_el2[i];
2198 hypctx->vgic_v3_regs.ich_lr_el2[i] = 0;
2199
2200 irq = vgic_v3_get_irq(hypctx->hyp, vcpu_vcpuid(hypctx->vcpu),
2201 ICH_LR_EL2_VINTID(lr));
2202 if (irq == NULL)
2203 continue;
2204
2205 irq->active = (lr & ICH_LR_EL2_STATE_ACTIVE) != 0;
2206
2207 if ((irq->config & VGIC_CONFIG_MASK) == VGIC_CONFIG_EDGE) {
2208 /*
2209 * If we have an edge triggered IRQ preserve the
2210 * pending bit until the IRQ has been handled.
2211 */
2212 if ((lr & ICH_LR_EL2_STATE_PENDING) != 0) {
2213 irq->pending = true;
2214 }
2215 } else {
2216 /*
2217 * If we have a level triggerend IRQ remove the
2218 * pending bit if the IRQ has been handled.
2219 * The level is separate, so may still be high
2220 * triggering another IRQ.
2221 */
2222 if ((lr & ICH_LR_EL2_STATE_PENDING) == 0) {
2223 irq->pending = false;
2224 }
2225 }
2226
2227 /* Lock to update irq_act_pend */
2228 mtx_lock_spin(&vgic_cpu->lr_mtx);
2229 if (irq->active) {
2230 /* Ensure the active IRQ is at the head of the list */
2231 TAILQ_REMOVE(&vgic_cpu->irq_act_pend, irq,
2232 act_pend_list);
2233 TAILQ_INSERT_HEAD(&vgic_cpu->irq_act_pend, irq,
2234 act_pend_list);
2235 } else if (!vgic_v3_irq_pending(irq)) {
2236 /* If pending or active remove from the list */
2237 TAILQ_REMOVE(&vgic_cpu->irq_act_pend, irq,
2238 act_pend_list);
2239 irq->on_aplist = false;
2240 }
2241 mtx_unlock_spin(&vgic_cpu->lr_mtx);
2242 vgic_v3_release_irq(irq);
2243 }
2244
2245 hypctx->vgic_v3_regs.ich_hcr_el2 &= ~ICH_HCR_EL2_EOICOUNT_MASK;
2246 vgic_cpu->ich_lr_used = 0;
2247 }
2248
2249 static void
vgic_v3_init(device_t dev)2250 vgic_v3_init(device_t dev)
2251 {
2252 uint64_t ich_vtr_el2;
2253 uint32_t pribits, prebits;
2254
2255 ich_vtr_el2 = vmm_read_reg(HYP_REG_ICH_VTR);
2256
2257 /* TODO: These fields are common with the vgicv2 driver */
2258 pribits = ICH_VTR_EL2_PRIBITS(ich_vtr_el2);
2259 switch (pribits) {
2260 default:
2261 case 5:
2262 virt_features.min_prio = 0xf8;
2263 break;
2264 case 6:
2265 virt_features.min_prio = 0xfc;
2266 break;
2267 case 7:
2268 virt_features.min_prio = 0xfe;
2269 break;
2270 case 8:
2271 virt_features.min_prio = 0xff;
2272 break;
2273 }
2274
2275 prebits = ICH_VTR_EL2_PREBITS(ich_vtr_el2);
2276 switch (prebits) {
2277 default:
2278 case 5:
2279 virt_features.ich_apr_num = 1;
2280 break;
2281 case 6:
2282 virt_features.ich_apr_num = 2;
2283 break;
2284 case 7:
2285 virt_features.ich_apr_num = 4;
2286 break;
2287 }
2288
2289 virt_features.ich_lr_num = ICH_VTR_EL2_LISTREGS(ich_vtr_el2);
2290 }
2291
2292 static int
vgic_v3_probe(device_t dev)2293 vgic_v3_probe(device_t dev)
2294 {
2295 if (!gic_get_vgic(dev))
2296 return (EINVAL);
2297
2298 /* We currently only support the GICv3 */
2299 if (gic_get_hw_rev(dev) < 3)
2300 return (EINVAL);
2301
2302 device_set_desc(dev, "Virtual GIC v3");
2303 return (BUS_PROBE_DEFAULT);
2304 }
2305
2306 static int
vgic_v3_attach(device_t dev)2307 vgic_v3_attach(device_t dev)
2308 {
2309 vgic_dev = dev;
2310 return (0);
2311 }
2312
2313 static int
vgic_v3_detach(device_t dev)2314 vgic_v3_detach(device_t dev)
2315 {
2316 vgic_dev = NULL;
2317 return (0);
2318 }
2319
2320 static device_method_t vgic_v3_methods[] = {
2321 /* Device interface */
2322 DEVMETHOD(device_probe, vgic_v3_probe),
2323 DEVMETHOD(device_attach, vgic_v3_attach),
2324 DEVMETHOD(device_detach, vgic_v3_detach),
2325
2326 /* VGIC interface */
2327 DEVMETHOD(vgic_init, vgic_v3_init),
2328 DEVMETHOD(vgic_attach_to_vm, vgic_v3_attach_to_vm),
2329 DEVMETHOD(vgic_detach_from_vm, vgic_v3_detach_from_vm),
2330 DEVMETHOD(vgic_vminit, vgic_v3_vminit),
2331 DEVMETHOD(vgic_cpuinit, vgic_v3_cpuinit),
2332 DEVMETHOD(vgic_cpucleanup, vgic_v3_cpucleanup),
2333 DEVMETHOD(vgic_vmcleanup, vgic_v3_vmcleanup),
2334 DEVMETHOD(vgic_max_cpu_count, vgic_v3_max_cpu_count),
2335 DEVMETHOD(vgic_has_pending_irq, vgic_v3_has_pending_irq),
2336 DEVMETHOD(vgic_inject_irq, vgic_v3_inject_irq),
2337 DEVMETHOD(vgic_inject_msi, vgic_v3_inject_msi),
2338 DEVMETHOD(vgic_flush_hwstate, vgic_v3_flush_hwstate),
2339 DEVMETHOD(vgic_sync_hwstate, vgic_v3_sync_hwstate),
2340
2341 /* End */
2342 DEVMETHOD_END
2343 };
2344
2345 /* TODO: Create a vgic base class? */
2346 DEFINE_CLASS_0(vgic, vgic_v3_driver, vgic_v3_methods, 0);
2347
2348 DRIVER_MODULE(vgic_v3, gic, vgic_v3_driver, 0, 0);
2349