xref: /freebsd/sys/riscv/vmm/vmm_aplic.c (revision ae65d59d4b8c227ac27b58497b9295e09fa9a179)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2024 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by the University of Cambridge Computer
7  * Laboratory (Department of Computer Science and Technology) under Innovate
8  * UK project 105694, "Digital Security by Design (DSbD) Technology Platform
9  * Prototype".
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/errno.h>
35 #include <sys/systm.h>
36 #include <sys/bus.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/rman.h>
43 #include <sys/smp.h>
44 
45 #include <riscv/vmm/riscv.h>
46 #include <riscv/vmm/vmm_aplic.h>
47 
48 #include <machine/vmm_instruction_emul.h>
49 #include <machine/vmm_dev.h>
50 
51 MALLOC_DEFINE(M_APLIC, "RISC-V VMM APLIC", "RISC-V AIA APLIC");
52 
53 #define	APLIC_DOMAINCFG		0x0000
54 #define	 DOMAINCFG_IE		(1 << 8) /* Interrupt Enable. */
55 #define	 DOMAINCFG_DM		(1 << 2) /* Direct Mode. */
56 #define	 DOMAINCFG_BE		(1 << 0) /* Big-Endian. */
57 #define	APLIC_SOURCECFG(x)	(0x0004 + ((x) - 1) * 4)
58 #define	 SOURCECFG_D		(1 << 10) /* D - Delegate. */
59 /* If D == 0. */
60 #define	 SOURCECFG_SM_S		(0)
61 #define	 SOURCECFG_SM_M		(0x7 << SOURCECFG_SM_S)
62 #define	 SOURCECFG_SM_INACTIVE	(0) /* Not delegated. */
63 #define	 SOURCECFG_SM_DETACHED	(1)
64 #define	 SOURCECFG_SM_RESERVED	(2)
65 #define	 SOURCECFG_SM_RESERVED1	(3)
66 #define	 SOURCECFG_SM_EDGE1	(4) /* Rising edge. */
67 #define	 SOURCECFG_SM_EDGE0	(5) /* Falling edge. */
68 #define	 SOURCECFG_SM_LEVEL1	(6) /* High. */
69 #define	 SOURCECFG_SM_LEVEL0	(7) /* Low. */
70 /* If D == 1. */
71 #define	 SOURCECFG_CHILD_INDEX_S	(0)
72 #define	 SOURCECFG_CHILD_INDEX_M	(0x3ff << SOURCECFG_CHILD_INDEX_S)
73 #define	APLIC_SETIP		0x1c00
74 #define	APLIC_SETIPNUM		0x1cdc
75 #define	APLIC_CLRIP		0x1d00
76 #define	APLIC_CLRIPNUM		0x1ddc
77 #define	APLIC_SETIE		0x1e00
78 #define	APLIC_SETIENUM		0x1edc
79 #define	APLIC_CLRIE		0x1f00
80 #define	APLIC_CLRIENUM		0x1fdc
81 #define	APLIC_GENMSI		0x3000
82 #define	APLIC_TARGET(x)		(0x3004 + ((x) - 1) * 4)
83 #define	 TARGET_HART_S		18
84 #define	 TARGET_HART_M		0x3fff
85 #define	APLIC_IDC(x)		(0x4000 + (x) * 32)
86 #define	 IDC_IDELIVERY(x)	(APLIC_IDC(x) + 0x0)
87 #define	 IDC_IFORCE(x)		(APLIC_IDC(x) + 0x4)
88 #define	 IDC_ITHRESHOLD(x)	(APLIC_IDC(x) + 0x8)
89 #define	 IDC_TOPI(x)		(APLIC_IDC(x) + 0x18)
90 #define	 IDC_CLAIMI(x)		(APLIC_IDC(x) + 0x1C)
91 #define	   CLAIMI_IRQ_S		(16)
92 #define	   CLAIMI_IRQ_M		(0x3ff << CLAIMI_IRQ_S)
93 #define	   CLAIMI_PRIO_S	(0)
94 #define	   CLAIMI_PRIO_M	(0xff << CLAIMI_PRIO_S)
95 
96 #define	APLIC_NIRQS	63
97 
98 struct aplic_irq {
99 	uint32_t sourcecfg;
100 	uint32_t state;
101 #define	APLIC_IRQ_STATE_PENDING	(1 << 0)
102 #define	APLIC_IRQ_STATE_ENABLED	(1 << 1)
103 #define	APLIC_IRQ_STATE_INPUT	(1 << 2)
104 	uint32_t target;
105 	uint32_t target_hart;
106 };
107 
108 struct aplic {
109 	uint32_t mem_start;
110 	uint32_t mem_end;
111 	struct mtx mtx;
112 	struct aplic_irq *irqs;
113 	int nirqs;
114 	uint32_t domaincfg;
115 };
116 
117 static int
aplic_handle_sourcecfg(struct aplic * aplic,int i,bool write,uint64_t * val)118 aplic_handle_sourcecfg(struct aplic *aplic, int i, bool write, uint64_t *val)
119 {
120 	struct aplic_irq *irq;
121 
122 	if (i <= 0 || i > aplic->nirqs)
123 		return (ENOENT);
124 
125 	mtx_lock_spin(&aplic->mtx);
126 	irq = &aplic->irqs[i];
127 	if (write)
128 		irq->sourcecfg = *val;
129 	else
130 		*val = irq->sourcecfg;
131 	mtx_unlock_spin(&aplic->mtx);
132 
133 	return (0);
134 }
135 
136 static int
aplic_set_enabled(struct aplic * aplic,bool write,uint64_t * val,bool enabled)137 aplic_set_enabled(struct aplic *aplic, bool write, uint64_t *val, bool enabled)
138 {
139 	struct aplic_irq *irq;
140 	int i;
141 
142 	if (!write) {
143 		*val = 0;
144 		return (0);
145 	}
146 
147 	i = *val;
148 	if (i <= 0 || i > aplic->nirqs)
149 		return (-1);
150 
151 	irq = &aplic->irqs[i];
152 
153 	mtx_lock_spin(&aplic->mtx);
154 	if ((irq->sourcecfg & SOURCECFG_SM_M) != SOURCECFG_SM_INACTIVE) {
155 		if (enabled)
156 			irq->state |= APLIC_IRQ_STATE_ENABLED;
157 		else
158 			irq->state &= ~APLIC_IRQ_STATE_ENABLED;
159 	}
160 	mtx_unlock_spin(&aplic->mtx);
161 
162 	return (0);
163 }
164 
165 static void
aplic_set_enabled_word(struct aplic * aplic,bool write,uint32_t word,uint64_t * val,bool enabled)166 aplic_set_enabled_word(struct aplic *aplic, bool write, uint32_t word,
167     uint64_t *val, bool enabled)
168 {
169 	uint64_t v;
170 	int i;
171 
172 	if (!write) {
173 		*val = 0;
174 		return;
175 	}
176 
177 	/*
178 	 * The write is ignored if value written is not an active interrupt
179 	 * source number in the domain.
180 	 */
181 	for (i = 0; i < 32; i++)
182 		if (*val & (1u << i)) {
183 			v = word * 32 + i;
184 			(void)aplic_set_enabled(aplic, write, &v, enabled);
185 		}
186 }
187 
188 static int
aplic_handle_target(struct aplic * aplic,int i,bool write,uint64_t * val)189 aplic_handle_target(struct aplic *aplic, int i, bool write, uint64_t *val)
190 {
191 	struct aplic_irq *irq;
192 
193 	mtx_lock_spin(&aplic->mtx);
194 	irq = &aplic->irqs[i];
195 	if (write) {
196 		irq->target = *val;
197 		irq->target_hart = (irq->target >> TARGET_HART_S);
198 	} else
199 		*val = irq->target;
200 	mtx_unlock_spin(&aplic->mtx);
201 
202 	return (0);
203 }
204 
205 static int
aplic_handle_idc_claimi(struct hyp * hyp,struct aplic * aplic,int cpu_id,bool write,uint64_t * val)206 aplic_handle_idc_claimi(struct hyp *hyp, struct aplic *aplic, int cpu_id,
207     bool write, uint64_t *val)
208 {
209 	struct aplic_irq *irq;
210 	bool found;
211 	int i;
212 
213 	/* Writes to claimi are ignored. */
214 	if (write)
215 		return (-1);
216 
217 	found = false;
218 
219 	mtx_lock_spin(&aplic->mtx);
220 	for (i = 0; i < aplic->nirqs; i++) {
221 		irq = &aplic->irqs[i];
222 		if (irq->target_hart != cpu_id)
223 			continue;
224 		if (irq->state & APLIC_IRQ_STATE_PENDING) {
225 			*val = (i << CLAIMI_IRQ_S) | (0 << CLAIMI_PRIO_S);
226 			irq->state &= ~APLIC_IRQ_STATE_PENDING;
227 			found = true;
228 			break;
229 		}
230 	}
231 	mtx_unlock_spin(&aplic->mtx);
232 
233 	if (found == false)
234 		*val = 0;
235 
236 	return (0);
237 }
238 
239 static int
aplic_handle_idc(struct hyp * hyp,struct aplic * aplic,int cpu,int reg,bool write,uint64_t * val)240 aplic_handle_idc(struct hyp *hyp, struct aplic *aplic, int cpu, int reg,
241     bool write, uint64_t *val)
242 {
243 	int error;
244 
245 	switch (reg + APLIC_IDC(0)) {
246 	case IDC_IDELIVERY(0):
247 	case IDC_IFORCE(0):
248 	case IDC_ITHRESHOLD(0):
249 	case IDC_TOPI(0):
250 		error = 0;
251 		break;
252 	case IDC_CLAIMI(0):
253 		error = aplic_handle_idc_claimi(hyp, aplic, cpu, write, val);
254 		break;
255 	default:
256 		error = ENOENT;
257 	}
258 
259 	return (error);
260 }
261 
262 static int
aplic_mmio_access(struct hyp * hyp,struct aplic * aplic,uint64_t reg,bool write,uint64_t * val)263 aplic_mmio_access(struct hyp *hyp, struct aplic *aplic, uint64_t reg,
264     bool write, uint64_t *val)
265 {
266 	int error;
267 	int cpu;
268 	int r;
269 	int i;
270 
271 	dprintf("%s: reg %lx\n", __func__, reg);
272 
273 	if ((reg >= APLIC_SOURCECFG(1)) &&
274 	    (reg <= APLIC_SOURCECFG(aplic->nirqs))) {
275 		i = ((reg - APLIC_SOURCECFG(1)) >> 2) + 1;
276 		error = aplic_handle_sourcecfg(aplic, i, write, val);
277 		return (error);
278 	}
279 
280 	if ((reg >= APLIC_TARGET(1)) && (reg <= APLIC_TARGET(aplic->nirqs))) {
281 		i = ((reg - APLIC_TARGET(1)) >> 2) + 1;
282 		error = aplic_handle_target(aplic, i, write, val);
283 		return (error);
284 	}
285 
286 	if ((reg >= APLIC_IDC(0)) && (reg < APLIC_IDC(mp_ncpus))) {
287 		cpu = (reg - APLIC_IDC(0)) >> 5;
288 		r = (reg - APLIC_IDC(0)) % 32;
289 		error = aplic_handle_idc(hyp, aplic, cpu, r, write, val);
290 		return (error);
291 	}
292 
293 	if ((reg >= APLIC_CLRIE) && (reg < (APLIC_CLRIE + aplic->nirqs * 4))) {
294 		i = (reg - APLIC_CLRIE) >> 2;
295 		aplic_set_enabled_word(aplic, write, i, val, false);
296 		return (0);
297 	}
298 
299 	switch (reg) {
300 	case APLIC_DOMAINCFG:
301 		mtx_lock_spin(&aplic->mtx);
302 		if (write)
303 			aplic->domaincfg = *val & DOMAINCFG_IE;
304 		else
305 			*val = aplic->domaincfg;
306 		mtx_unlock_spin(&aplic->mtx);
307 		error = 0;
308 		break;
309 	case APLIC_SETIENUM:
310 		error = aplic_set_enabled(aplic, write, val, true);
311 		break;
312 	case APLIC_CLRIENUM:
313 		error = aplic_set_enabled(aplic, write, val, false);
314 		break;
315 	default:
316 		dprintf("%s: unknown reg %lx", __func__, reg);
317 		error = ENOENT;
318 		break;
319 	};
320 
321 	return (error);
322 }
323 
324 static int
mem_read(struct vcpu * vcpu,uint64_t fault_ipa,uint64_t * rval,int size,void * arg)325 mem_read(struct vcpu *vcpu, uint64_t fault_ipa, uint64_t *rval, int size,
326     void *arg)
327 {
328 	struct hypctx *hypctx;
329 	struct hyp *hyp;
330 	struct aplic *aplic;
331 	uint64_t reg;
332 	uint64_t val;
333 	int error;
334 
335 	hypctx = vcpu_get_cookie(vcpu);
336 	hyp = hypctx->hyp;
337 	aplic = hyp->aplic;
338 
339 	dprintf("%s: fault_ipa %lx size %d\n", __func__, fault_ipa, size);
340 
341 	if (fault_ipa < aplic->mem_start || fault_ipa + size > aplic->mem_end)
342 		return (EINVAL);
343 
344 	reg = fault_ipa - aplic->mem_start;
345 
346 	error = aplic_mmio_access(hyp, aplic, reg, false, &val);
347 	if (error == 0)
348 		*rval = val;
349 
350 	return (error);
351 }
352 
353 static int
mem_write(struct vcpu * vcpu,uint64_t fault_ipa,uint64_t wval,int size,void * arg)354 mem_write(struct vcpu *vcpu, uint64_t fault_ipa, uint64_t wval, int size,
355     void *arg)
356 {
357 	struct hypctx *hypctx;
358 	struct hyp *hyp;
359 	struct aplic *aplic;
360 	uint64_t reg;
361 	uint64_t val;
362 	int error;
363 
364 	hypctx = vcpu_get_cookie(vcpu);
365 	hyp = hypctx->hyp;
366 	aplic = hyp->aplic;
367 
368 	dprintf("%s: fault_ipa %lx wval %lx size %d\n", __func__, fault_ipa,
369 	    wval, size);
370 
371 	if (fault_ipa < aplic->mem_start || fault_ipa + size > aplic->mem_end)
372 		return (EINVAL);
373 
374 	reg = fault_ipa - aplic->mem_start;
375 
376 	val = wval;
377 
378 	error = aplic_mmio_access(hyp, aplic, reg, true, &val);
379 
380 	return (error);
381 }
382 
383 void
aplic_vminit(struct hyp * hyp)384 aplic_vminit(struct hyp *hyp)
385 {
386 	struct aplic *aplic;
387 
388 	hyp->aplic = malloc(sizeof(*hyp->aplic), M_APLIC,
389 	    M_WAITOK | M_ZERO);
390 	aplic = hyp->aplic;
391 
392 	mtx_init(&aplic->mtx, "APLIC lock", NULL, MTX_SPIN);
393 }
394 
395 void
aplic_vmcleanup(struct hyp * hyp)396 aplic_vmcleanup(struct hyp *hyp)
397 {
398 	struct aplic *aplic;
399 
400 	aplic = hyp->aplic;
401 
402 	mtx_destroy(&aplic->mtx);
403 
404 	free(hyp->aplic, M_APLIC);
405 }
406 
407 int
aplic_attach_to_vm(struct hyp * hyp,struct vm_aplic_descr * descr)408 aplic_attach_to_vm(struct hyp *hyp, struct vm_aplic_descr *descr)
409 {
410 	struct aplic *aplic;
411 	struct vm *vm;
412 
413 	vm = hyp->vm;
414 
415 	dprintf("%s\n", __func__);
416 
417 	vm_register_inst_handler(vm, descr->mem_start, descr->mem_size,
418 	    mem_read, mem_write);
419 
420 	aplic = hyp->aplic;
421 	aplic->nirqs = APLIC_NIRQS;
422 	aplic->mem_start = descr->mem_start;
423 	aplic->mem_end = descr->mem_start + descr->mem_size;
424 	aplic->irqs = malloc(sizeof(struct aplic_irq) * aplic->nirqs, M_APLIC,
425 	    M_WAITOK | M_ZERO);
426 
427 	hyp->aplic_attached = true;
428 
429 	return (0);
430 }
431 
432 void
aplic_detach_from_vm(struct hyp * hyp)433 aplic_detach_from_vm(struct hyp *hyp)
434 {
435 	struct aplic *aplic;
436 
437 	aplic = hyp->aplic;
438 
439 	dprintf("%s\n", __func__);
440 
441 	if (hyp->aplic_attached) {
442 		hyp->aplic_attached = false;
443 		free(aplic->irqs, M_APLIC);
444 	}
445 }
446 
447 int
aplic_check_pending(struct hypctx * hypctx)448 aplic_check_pending(struct hypctx *hypctx)
449 {
450 	struct aplic_irq *irq;
451 	struct aplic *aplic;
452 	struct hyp *hyp;
453 	int i;
454 
455 	hyp = hypctx->hyp;
456 	aplic = hyp->aplic;
457 
458 	mtx_lock_spin(&aplic->mtx);
459 	if ((aplic->domaincfg & DOMAINCFG_IE) == 0) {
460 		mtx_unlock_spin(&aplic->mtx);
461 		return (0);
462 	}
463 
464 	for (i = 0; i < aplic->nirqs; i++) {
465 		irq = &aplic->irqs[i];
466 		if (irq->target_hart != hypctx->cpu_id)
467 			continue;
468 		if ((irq->state & APLIC_IRQ_STATE_ENABLED) &&
469 		    (irq->state & APLIC_IRQ_STATE_PENDING)) {
470 			mtx_unlock_spin(&aplic->mtx);
471 			/* Found. */
472 			return (1);
473 		}
474 	}
475 	mtx_unlock_spin(&aplic->mtx);
476 
477 	return (0);
478 }
479 
480 int
aplic_inject_irq(struct hyp * hyp,int vcpuid,uint32_t irqid,bool level)481 aplic_inject_irq(struct hyp *hyp, int vcpuid, uint32_t irqid, bool level)
482 {
483 	struct aplic_irq *irq;
484 	struct aplic *aplic;
485 	bool notify;
486 	int error;
487 	int mask;
488 
489 	aplic = hyp->aplic;
490 
491 	error = 0;
492 
493 	mtx_lock_spin(&aplic->mtx);
494 	if ((aplic->domaincfg & DOMAINCFG_IE) == 0) {
495 		mtx_unlock_spin(&aplic->mtx);
496 		return (error);
497 	}
498 
499 	irq = &aplic->irqs[irqid];
500 	if (irq->sourcecfg & SOURCECFG_D) {
501 		mtx_unlock_spin(&aplic->mtx);
502 		return (error);
503 	}
504 
505 	notify = false;
506 	switch (irq->sourcecfg & SOURCECFG_SM_M) {
507 	case SOURCECFG_SM_LEVEL0:
508 		if (!level)
509 			irq->state |= APLIC_IRQ_STATE_PENDING;
510 		break;
511 	case SOURCECFG_SM_LEVEL1:
512 		if (level)
513 			irq->state |= APLIC_IRQ_STATE_PENDING;
514 		break;
515 	case SOURCECFG_SM_EDGE0:
516 		if (!level && (irq->state & APLIC_IRQ_STATE_INPUT))
517 			irq->state |= APLIC_IRQ_STATE_PENDING;
518 		break;
519 	case SOURCECFG_SM_EDGE1:
520 		if (level && !(irq->state & APLIC_IRQ_STATE_INPUT))
521 			irq->state |= APLIC_IRQ_STATE_PENDING;
522 		break;
523 	case SOURCECFG_SM_DETACHED:
524 	case SOURCECFG_SM_INACTIVE:
525 		break;
526 	default:
527 		error = ENXIO;
528 		break;
529 	}
530 
531 	if (level)
532 		irq->state |= APLIC_IRQ_STATE_INPUT;
533 	else
534 		irq->state &= ~APLIC_IRQ_STATE_INPUT;
535 
536 	mask = APLIC_IRQ_STATE_ENABLED | APLIC_IRQ_STATE_PENDING;
537 	if ((irq->state & mask) == mask)
538 		notify = true;
539 
540 	mtx_unlock_spin(&aplic->mtx);
541 
542 	if (notify)
543 		vcpu_notify_event(vm_vcpu(hyp->vm, irq->target_hart));
544 
545 	return (error);
546 }
547 
548 int
aplic_inject_msi(struct hyp * hyp,uint64_t msg,uint64_t addr)549 aplic_inject_msi(struct hyp *hyp, uint64_t msg, uint64_t addr)
550 {
551 
552 	/* TODO. */
553 
554 	return (ENXIO);
555 }
556 
557 void
aplic_cpuinit(struct hypctx * hypctx)558 aplic_cpuinit(struct hypctx *hypctx)
559 {
560 
561 }
562 
563 void
aplic_cpucleanup(struct hypctx * hypctx)564 aplic_cpucleanup(struct hypctx *hypctx)
565 {
566 
567 }
568 
569 void
aplic_flush_hwstate(struct hypctx * hypctx)570 aplic_flush_hwstate(struct hypctx *hypctx)
571 {
572 
573 }
574 
575 void
aplic_sync_hwstate(struct hypctx * hypctx)576 aplic_sync_hwstate(struct hypctx *hypctx)
577 {
578 
579 }
580 
581 int
aplic_max_cpu_count(struct hyp * hyp)582 aplic_max_cpu_count(struct hyp *hyp)
583 {
584 	int16_t max_count;
585 
586 	max_count = vm_get_maxcpus(hyp->vm);
587 
588 	return (max_count);
589 }
590