xref: /freebsd/sys/riscv/vmm/vmm_aplic.c (revision d3916eace506b8ab23537223f5c92924636a1c41)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2024 Ruslan Bukin <br@bsdpad.com>
5  *
6  * This software was developed by the University of Cambridge Computer
7  * Laboratory (Department of Computer Science and Technology) under Innovate
8  * UK project 105694, "Digital Security by Design (DSbD) Technology Platform
9  * Prototype".
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/errno.h>
35 #include <sys/systm.h>
36 #include <sys/bus.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/rman.h>
43 #include <sys/smp.h>
44 
45 #include <riscv/vmm/riscv.h>
46 #include <riscv/vmm/vmm_aplic.h>
47 
48 #include <machine/vmm_instruction_emul.h>
49 #include <machine/vmm_dev.h>
50 
51 MALLOC_DEFINE(M_APLIC, "RISC-V VMM APLIC", "RISC-V AIA APLIC");
52 
53 #define	APLIC_DOMAINCFG		0x0000
54 #define	 DOMAINCFG_IE		(1 << 8) /* Interrupt Enable. */
55 #define	 DOMAINCFG_DM		(1 << 2) /* Direct Mode. */
56 #define	 DOMAINCFG_BE		(1 << 0) /* Big-Endian. */
57 #define	APLIC_SOURCECFG(x)	(0x0004 + ((x) - 1) * 4)
58 #define	 SOURCECFG_D		(1 << 10) /* D - Delegate. */
59 /* If D == 0. */
60 #define	 SOURCECFG_SM_S		(0)
61 #define	 SOURCECFG_SM_M		(0x7 << SOURCECFG_SM_S)
62 #define	 SOURCECFG_SM_INACTIVE	(0) /* Not delegated. */
63 #define	 SOURCECFG_SM_DETACHED	(1)
64 #define	 SOURCECFG_SM_RESERVED	(2)
65 #define	 SOURCECFG_SM_RESERVED1	(3)
66 #define	 SOURCECFG_SM_EDGE1	(4) /* Rising edge. */
67 #define	 SOURCECFG_SM_EDGE0	(5) /* Falling edge. */
68 #define	 SOURCECFG_SM_LEVEL1	(6) /* High. */
69 #define	 SOURCECFG_SM_LEVEL0	(7) /* Low. */
70 /* If D == 1. */
71 #define	 SOURCECFG_CHILD_INDEX_S	(0)
72 #define	 SOURCECFG_CHILD_INDEX_M	(0x3ff << SOURCECFG_CHILD_INDEX_S)
73 #define	APLIC_SETIPNUM		0x1cdc
74 #define	APLIC_CLRIPNUM		0x1ddc
75 #define	APLIC_SETIENUM		0x1edc
76 #define	APLIC_CLRIENUM		0x1fdc
77 #define	APLIC_GENMSI		0x3000
78 #define	APLIC_TARGET(x)		(0x3004 + ((x) - 1) * 4)
79 #define	 TARGET_HART_S		18
80 #define	 TARGET_HART_M		0x3fff
81 #define	APLIC_IDC(x)		(0x4000 + (x) * 32)
82 #define	 IDC_IDELIVERY(x)	(APLIC_IDC(x) + 0x0)
83 #define	 IDC_IFORCE(x)		(APLIC_IDC(x) + 0x4)
84 #define	 IDC_ITHRESHOLD(x)	(APLIC_IDC(x) + 0x8)
85 #define	 IDC_TOPI(x)		(APLIC_IDC(x) + 0x18)
86 #define	 IDC_CLAIMI(x)		(APLIC_IDC(x) + 0x1C)
87 #define	   CLAIMI_IRQ_S		(16)
88 #define	   CLAIMI_IRQ_M		(0x3ff << CLAIMI_IRQ_S)
89 #define	   CLAIMI_PRIO_S	(0)
90 #define	   CLAIMI_PRIO_M	(0xff << CLAIMI_PRIO_S)
91 
92 #define	APLIC_NIRQS	63
93 
94 struct aplic_irq {
95 	uint32_t sourcecfg;
96 	uint32_t state;
97 #define	APLIC_IRQ_STATE_PENDING	(1 << 0)
98 #define	APLIC_IRQ_STATE_ENABLED	(1 << 1)
99 	uint32_t target;
100 	uint32_t target_hart;
101 };
102 
103 struct aplic {
104 	uint32_t mem_start;
105 	uint32_t mem_end;
106 	struct mtx mtx;
107 	struct aplic_irq *irqs;
108 	int nirqs;
109 	uint32_t domaincfg;
110 };
111 
112 static int
aplic_handle_sourcecfg(struct aplic * aplic,int i,bool write,uint64_t * val)113 aplic_handle_sourcecfg(struct aplic *aplic, int i, bool write, uint64_t *val)
114 {
115 	struct aplic_irq *irq;
116 
117 	if (i <= 0 || i > aplic->nirqs)
118 		return (ENOENT);
119 
120 	mtx_lock_spin(&aplic->mtx);
121 	irq = &aplic->irqs[i];
122 	if (write)
123 		irq->sourcecfg = *val;
124 	else
125 		*val = irq->sourcecfg;
126 	mtx_unlock_spin(&aplic->mtx);
127 
128 	return (0);
129 }
130 
131 static int
aplic_set_enabled(struct aplic * aplic,bool write,uint64_t * val,bool enabled)132 aplic_set_enabled(struct aplic *aplic, bool write, uint64_t *val, bool enabled)
133 {
134 	struct aplic_irq *irq;
135 	int i;
136 
137 	if (!write) {
138 		*val = 0;
139 		return (0);
140 	}
141 
142 	i = *val;
143 	if (i <= 0 || i > aplic->nirqs)
144 		return (-1);
145 
146 	irq = &aplic->irqs[i];
147 
148 	mtx_lock_spin(&aplic->mtx);
149 	if (enabled)
150 		irq->state |= APLIC_IRQ_STATE_ENABLED;
151 	else
152 		irq->state &= ~APLIC_IRQ_STATE_ENABLED;
153 	mtx_unlock_spin(&aplic->mtx);
154 
155 	return (0);
156 }
157 
158 static int
aplic_handle_target(struct aplic * aplic,int i,bool write,uint64_t * val)159 aplic_handle_target(struct aplic *aplic, int i, bool write, uint64_t *val)
160 {
161 	struct aplic_irq *irq;
162 
163 	mtx_lock_spin(&aplic->mtx);
164 	irq = &aplic->irqs[i];
165 	if (write) {
166 		irq->target = *val;
167 		irq->target_hart = (irq->target >> TARGET_HART_S);
168 	} else
169 		*val = irq->target;
170 	mtx_unlock_spin(&aplic->mtx);
171 
172 	return (0);
173 }
174 
175 static int
aplic_handle_idc_claimi(struct hyp * hyp,struct aplic * aplic,int cpu_id,bool write,uint64_t * val)176 aplic_handle_idc_claimi(struct hyp *hyp, struct aplic *aplic, int cpu_id,
177     bool write, uint64_t *val)
178 {
179 	struct aplic_irq *irq;
180 	bool found;
181 	int i;
182 
183 	/* Writes to claimi are ignored. */
184 	if (write)
185 		return (-1);
186 
187 	found = false;
188 
189 	mtx_lock_spin(&aplic->mtx);
190 	for (i = 0; i < aplic->nirqs; i++) {
191 		irq = &aplic->irqs[i];
192 		if (irq->target_hart != cpu_id)
193 			continue;
194 		if (irq->state & APLIC_IRQ_STATE_PENDING) {
195 			*val = (i << CLAIMI_IRQ_S) | (0 << CLAIMI_PRIO_S);
196 			irq->state &= ~APLIC_IRQ_STATE_PENDING;
197 			found = true;
198 			break;
199 		}
200 	}
201 	mtx_unlock_spin(&aplic->mtx);
202 
203 	if (found == false)
204 		*val = 0;
205 
206 	return (0);
207 }
208 
209 static int
aplic_handle_idc(struct hyp * hyp,struct aplic * aplic,int cpu,int reg,bool write,uint64_t * val)210 aplic_handle_idc(struct hyp *hyp, struct aplic *aplic, int cpu, int reg,
211     bool write, uint64_t *val)
212 {
213 	int error;
214 
215 	switch (reg + APLIC_IDC(0)) {
216 	case IDC_IDELIVERY(0):
217 	case IDC_IFORCE(0):
218 	case IDC_ITHRESHOLD(0):
219 	case IDC_TOPI(0):
220 		error = 0;
221 		break;
222 	case IDC_CLAIMI(0):
223 		error = aplic_handle_idc_claimi(hyp, aplic, cpu, write, val);
224 		break;
225 	default:
226 		error = ENOENT;
227 	}
228 
229 	return (error);
230 }
231 
232 static int
aplic_mmio_access(struct hyp * hyp,struct aplic * aplic,uint64_t reg,bool write,uint64_t * val)233 aplic_mmio_access(struct hyp *hyp, struct aplic *aplic, uint64_t reg,
234     bool write, uint64_t *val)
235 {
236 	int error;
237 	int cpu;
238 	int r;
239 	int i;
240 
241 	if ((reg >= APLIC_SOURCECFG(1)) &&
242 	    (reg <= APLIC_SOURCECFG(aplic->nirqs))) {
243 		i = ((reg - APLIC_SOURCECFG(1)) >> 2) + 1;
244 		error = aplic_handle_sourcecfg(aplic, i, write, val);
245 		return (error);
246 	}
247 
248 	if ((reg >= APLIC_TARGET(1)) && (reg <= APLIC_TARGET(aplic->nirqs))) {
249 		i = ((reg - APLIC_TARGET(1)) >> 2) + 1;
250 		error = aplic_handle_target(aplic, i, write, val);
251 		return (error);
252 	}
253 
254 	if ((reg >= APLIC_IDC(0)) && (reg < APLIC_IDC(mp_ncpus))) {
255 		cpu = (reg - APLIC_IDC(0)) >> 5;
256 		r = (reg - APLIC_IDC(0)) % 32;
257 		error = aplic_handle_idc(hyp, aplic, cpu, r, write, val);
258 		return (error);
259 	}
260 
261 	switch (reg) {
262 	case APLIC_DOMAINCFG:
263 		aplic->domaincfg = *val & DOMAINCFG_IE;
264 		error = 0;
265 		break;
266 	case APLIC_SETIENUM:
267 		error = aplic_set_enabled(aplic, write, val, true);
268 		break;
269 	case APLIC_CLRIENUM:
270 		error = aplic_set_enabled(aplic, write, val, false);
271 		break;
272 	default:
273 		dprintf("%s: unknown reg %lx", __func__, reg);
274 		error = ENOENT;
275 		break;
276 	};
277 
278 	return (error);
279 }
280 
281 static int
mem_read(struct vcpu * vcpu,uint64_t fault_ipa,uint64_t * rval,int size,void * arg)282 mem_read(struct vcpu *vcpu, uint64_t fault_ipa, uint64_t *rval, int size,
283     void *arg)
284 {
285 	struct hypctx *hypctx;
286 	struct hyp *hyp;
287 	struct aplic *aplic;
288 	uint64_t reg;
289 	uint64_t val;
290 	int error;
291 
292 	hypctx = vcpu_get_cookie(vcpu);
293 	hyp = hypctx->hyp;
294 	aplic = hyp->aplic;
295 
296 	dprintf("%s: fault_ipa %lx size %d\n", __func__, fault_ipa, size);
297 
298 	if (fault_ipa < aplic->mem_start || fault_ipa + size > aplic->mem_end)
299 		return (EINVAL);
300 
301 	reg = fault_ipa - aplic->mem_start;
302 
303 	error = aplic_mmio_access(hyp, aplic, reg, false, &val);
304 	if (error == 0)
305 		*rval = val;
306 
307 	return (error);
308 }
309 
310 static int
mem_write(struct vcpu * vcpu,uint64_t fault_ipa,uint64_t wval,int size,void * arg)311 mem_write(struct vcpu *vcpu, uint64_t fault_ipa, uint64_t wval, int size,
312     void *arg)
313 {
314 	struct hypctx *hypctx;
315 	struct hyp *hyp;
316 	struct aplic *aplic;
317 	uint64_t reg;
318 	uint64_t val;
319 	int error;
320 
321 	hypctx = vcpu_get_cookie(vcpu);
322 	hyp = hypctx->hyp;
323 	aplic = hyp->aplic;
324 
325 	dprintf("%s: fault_ipa %lx wval %lx size %d\n", __func__, fault_ipa,
326 	    wval, size);
327 
328 	if (fault_ipa < aplic->mem_start || fault_ipa + size > aplic->mem_end)
329 		return (EINVAL);
330 
331 	reg = fault_ipa - aplic->mem_start;
332 
333 	val = wval;
334 
335 	error = aplic_mmio_access(hyp, aplic, reg, true, &val);
336 
337 	return (error);
338 }
339 
340 void
aplic_vminit(struct hyp * hyp)341 aplic_vminit(struct hyp *hyp)
342 {
343 	struct aplic *aplic;
344 
345 	hyp->aplic = malloc(sizeof(*hyp->aplic), M_APLIC,
346 	    M_WAITOK | M_ZERO);
347 	aplic = hyp->aplic;
348 
349 	mtx_init(&aplic->mtx, "APLIC lock", NULL, MTX_SPIN);
350 }
351 
352 void
aplic_vmcleanup(struct hyp * hyp)353 aplic_vmcleanup(struct hyp *hyp)
354 {
355 	struct aplic *aplic;
356 
357 	aplic = hyp->aplic;
358 
359 	mtx_destroy(&aplic->mtx);
360 
361 	free(hyp->aplic, M_APLIC);
362 }
363 
364 int
aplic_attach_to_vm(struct hyp * hyp,struct vm_aplic_descr * descr)365 aplic_attach_to_vm(struct hyp *hyp, struct vm_aplic_descr *descr)
366 {
367 	struct aplic *aplic;
368 	struct vm *vm;
369 
370 	vm = hyp->vm;
371 
372 	dprintf("%s\n", __func__);
373 
374 	vm_register_inst_handler(vm, descr->mem_start, descr->mem_size,
375 	    mem_read, mem_write);
376 
377 	aplic = hyp->aplic;
378 	aplic->nirqs = APLIC_NIRQS;
379 	aplic->mem_start = descr->mem_start;
380 	aplic->mem_end = descr->mem_start + descr->mem_size;
381 	aplic->irqs = malloc(sizeof(struct aplic_irq) * aplic->nirqs, M_APLIC,
382 	    M_WAITOK | M_ZERO);
383 
384 	hyp->aplic_attached = true;
385 
386 	return (0);
387 }
388 
389 void
aplic_detach_from_vm(struct hyp * hyp)390 aplic_detach_from_vm(struct hyp *hyp)
391 {
392 	struct aplic *aplic;
393 
394 	aplic = hyp->aplic;
395 
396 	dprintf("%s\n", __func__);
397 
398 	if (hyp->aplic_attached) {
399 		hyp->aplic_attached = false;
400 		free(aplic->irqs, M_APLIC);
401 	}
402 }
403 
404 int
aplic_check_pending(struct hypctx * hypctx)405 aplic_check_pending(struct hypctx *hypctx)
406 {
407 	struct aplic_irq *irq;
408 	struct aplic *aplic;
409 	struct hyp *hyp;
410 	int i;
411 
412 	hyp = hypctx->hyp;
413 	aplic = hyp->aplic;
414 
415 	mtx_lock_spin(&aplic->mtx);
416 	if ((aplic->domaincfg & DOMAINCFG_IE) == 0) {
417 		mtx_unlock_spin(&aplic->mtx);
418 		return (0);
419 	}
420 
421 	for (i = 0; i < aplic->nirqs; i++) {
422 		irq = &aplic->irqs[i];
423 		if (irq->target_hart != hypctx->cpu_id)
424 			continue;
425 		if ((irq->state & APLIC_IRQ_STATE_ENABLED) &&
426 		    (irq->state & APLIC_IRQ_STATE_PENDING)) {
427 			mtx_unlock_spin(&aplic->mtx);
428 			/* Found. */
429 			return (1);
430 		}
431 	}
432 	mtx_unlock_spin(&aplic->mtx);
433 
434 	return (0);
435 }
436 
437 int
aplic_inject_irq(struct hyp * hyp,int vcpuid,uint32_t irqid,bool level)438 aplic_inject_irq(struct hyp *hyp, int vcpuid, uint32_t irqid, bool level)
439 {
440 	struct aplic_irq *irq;
441 	struct aplic *aplic;
442 	bool notify;
443 	int error;
444 
445 	aplic = hyp->aplic;
446 
447 	error = 0;
448 
449 	mtx_lock_spin(&aplic->mtx);
450 	if ((aplic->domaincfg & DOMAINCFG_IE) == 0) {
451 		mtx_unlock_spin(&aplic->mtx);
452 		return (error);
453 	}
454 
455 	irq = &aplic->irqs[irqid];
456 	if (irq->sourcecfg & SOURCECFG_D) {
457 		mtx_unlock_spin(&aplic->mtx);
458 		return (error);
459 	}
460 
461 	notify = false;
462 	switch (irq->sourcecfg & SOURCECFG_SM_M) {
463 	case SOURCECFG_SM_EDGE1:
464 		if (level) {
465 			irq->state |= APLIC_IRQ_STATE_PENDING;
466 			if (irq->state & APLIC_IRQ_STATE_ENABLED)
467 				notify = true;
468 		} else
469 			irq->state &= ~APLIC_IRQ_STATE_PENDING;
470 		break;
471 	case SOURCECFG_SM_DETACHED:
472 		break;
473 	default:
474 		/* TODO. */
475 		dprintf("sourcecfg %d\n", irq->sourcecfg & SOURCECFG_SM_M);
476 		error = ENXIO;
477 		break;
478 	}
479 	mtx_unlock_spin(&aplic->mtx);
480 
481 	if (notify)
482 		vcpu_notify_event(vm_vcpu(hyp->vm, irq->target_hart));
483 
484 	return (error);
485 }
486 
487 int
aplic_inject_msi(struct hyp * hyp,uint64_t msg,uint64_t addr)488 aplic_inject_msi(struct hyp *hyp, uint64_t msg, uint64_t addr)
489 {
490 
491 	/* TODO. */
492 
493 	return (ENXIO);
494 }
495 
496 void
aplic_cpuinit(struct hypctx * hypctx)497 aplic_cpuinit(struct hypctx *hypctx)
498 {
499 
500 }
501 
502 void
aplic_cpucleanup(struct hypctx * hypctx)503 aplic_cpucleanup(struct hypctx *hypctx)
504 {
505 
506 }
507 
508 void
aplic_flush_hwstate(struct hypctx * hypctx)509 aplic_flush_hwstate(struct hypctx *hypctx)
510 {
511 
512 }
513 
514 void
aplic_sync_hwstate(struct hypctx * hypctx)515 aplic_sync_hwstate(struct hypctx *hypctx)
516 {
517 
518 }
519 
520 int
aplic_max_cpu_count(struct hyp * hyp)521 aplic_max_cpu_count(struct hyp *hyp)
522 {
523 	int16_t max_count;
524 
525 	max_count = vm_get_maxcpus(hyp->vm);
526 
527 	return (max_count);
528 }
529