1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2018 Ruslan Bukin <br@bsdpad.com>
5 * All rights reserved.
6 * Copyright (c) 2019 Mitchell Horne <mhorne@FreeBSD.org>
7 *
8 * Portions of this software were developed by SRI International and the
9 * University of Cambridge Computer Laboratory (Department of Computer Science
10 * and Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of
11 * the DARPA SSITH research programme.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bus.h>
38 #include <sys/kernel.h>
39 #include <sys/ktr.h>
40 #include <sys/module.h>
41 #include <sys/proc.h>
42 #include <sys/rman.h>
43 #include <sys/smp.h>
44
45 #include <machine/bus.h>
46 #include <machine/intr.h>
47
48 #include <dev/ofw/openfirm.h>
49 #include <dev/ofw/ofw_bus.h>
50 #include <dev/ofw/ofw_bus_subr.h>
51
52 #include <dt-bindings/interrupt-controller/irq.h>
53
54 #include "pic_if.h"
55
56 #define PLIC_MAX_IRQS 1024
57
58 #define PLIC_PRIORITY_BASE 0x000000U
59
60 #define PLIC_ENABLE_BASE 0x002000U
61 #define PLIC_ENABLE_STRIDE 0x80U
62
63 #define PLIC_CONTEXT_BASE 0x200000U
64 #define PLIC_CONTEXT_STRIDE 0x1000U
65 #define PLIC_CONTEXT_THRESHOLD 0x0U
66 #define PLIC_CONTEXT_CLAIM 0x4U
67
68 #define PLIC_PRIORITY(_irq) (PLIC_PRIORITY_BASE + (_irq) * sizeof(uint32_t))
69 #define PLIC_ENABLE(_sc, _irq, _cpu) \
70 (_sc->contexts[_cpu].enable_offset + ((_irq) / 32) * sizeof(uint32_t))
71 #define PLIC_THRESHOLD(_sc, _cpu) \
72 (_sc->contexts[_cpu].context_offset + PLIC_CONTEXT_THRESHOLD)
73 #define PLIC_CLAIM(_sc, _cpu) \
74 (_sc->contexts[_cpu].context_offset + PLIC_CONTEXT_CLAIM)
75
76 static pic_disable_intr_t plic_disable_intr;
77 static pic_enable_intr_t plic_enable_intr;
78 static pic_map_intr_t plic_map_intr;
79 static pic_setup_intr_t plic_setup_intr;
80 static pic_post_ithread_t plic_post_ithread;
81 static pic_pre_ithread_t plic_pre_ithread;
82 static pic_bind_intr_t plic_bind_intr;
83
84 struct plic_irqsrc {
85 struct intr_irqsrc isrc;
86 u_int irq;
87 u_int trigtype;
88 };
89
90 struct plic_context {
91 bus_size_t enable_offset;
92 bus_size_t context_offset;
93 };
94
95 struct plic_softc {
96 device_t dev;
97 struct resource *mem_res;
98 struct resource *irq_res;
99 void *ih;
100 struct plic_irqsrc isrcs[PLIC_MAX_IRQS];
101 struct plic_context contexts[MAXCPU];
102 int ndev;
103 };
104
105 static struct ofw_compat_data compat_data[] = {
106 { "riscv,plic0", 1 },
107 { "sifive,plic-1.0.0", 1 },
108 { "thead,c900-plic", 1 },
109 { NULL, 0 }
110 };
111
112 #define RD4(sc, reg) \
113 bus_read_4(sc->mem_res, (reg))
114 #define WR4(sc, reg, val) \
115 bus_write_4(sc->mem_res, (reg), (val))
116
117 static u_int plic_irq_cpu;
118
119 static int
riscv_hartid_to_cpu(int hartid)120 riscv_hartid_to_cpu(int hartid)
121 {
122 int i;
123
124 CPU_FOREACH(i) {
125 if (pcpu_find(i)->pc_hart == hartid)
126 return (i);
127 }
128
129 return (-1);
130 }
131
132 static int
plic_get_hartid(device_t dev,phandle_t intc)133 plic_get_hartid(device_t dev, phandle_t intc)
134 {
135 int hart;
136
137 /* Check the interrupt controller layout. */
138 if (OF_searchencprop(intc, "#interrupt-cells", &hart,
139 sizeof(hart)) == -1) {
140 device_printf(dev,
141 "Could not find #interrupt-cells for phandle %u\n", intc);
142 return (-1);
143 }
144
145 /*
146 * The parent of the interrupt-controller is the CPU we are
147 * interested in, so search for its hart ID.
148 */
149 if (OF_searchencprop(OF_parent(intc), "reg", (pcell_t *)&hart,
150 sizeof(hart)) == -1) {
151 device_printf(dev, "Could not find hartid\n");
152 return (-1);
153 }
154
155 return (hart);
156 }
157
158 static inline void
plic_irq_dispatch(struct plic_softc * sc,u_int irq,struct trapframe * tf)159 plic_irq_dispatch(struct plic_softc *sc, u_int irq,
160 struct trapframe *tf)
161 {
162 struct plic_irqsrc *src;
163
164 src = &sc->isrcs[irq];
165
166 if (intr_isrc_dispatch(&src->isrc, tf) != 0)
167 device_printf(sc->dev, "Stray irq %u detected\n", irq);
168 }
169
170 static int
plic_intr(void * arg)171 plic_intr(void *arg)
172 {
173 struct plic_softc *sc;
174 struct trapframe *tf;
175 uint32_t pending;
176 uint32_t cpu;
177
178 sc = arg;
179 cpu = PCPU_GET(cpuid);
180
181 /* Claim all pending interrupts. */
182 while ((pending = RD4(sc, PLIC_CLAIM(sc, cpu))) != 0) {
183 tf = curthread->td_intr_frame;
184 plic_irq_dispatch(sc, pending, tf);
185 }
186
187 return (FILTER_HANDLED);
188 }
189
190 static void
plic_disable_intr(device_t dev,struct intr_irqsrc * isrc)191 plic_disable_intr(device_t dev, struct intr_irqsrc *isrc)
192 {
193 struct plic_softc *sc;
194 struct plic_irqsrc *src;
195
196 sc = device_get_softc(dev);
197 src = (struct plic_irqsrc *)isrc;
198
199 WR4(sc, PLIC_PRIORITY(src->irq), 0);
200 }
201
202 static void
plic_enable_intr(device_t dev,struct intr_irqsrc * isrc)203 plic_enable_intr(device_t dev, struct intr_irqsrc *isrc)
204 {
205 struct plic_softc *sc;
206 struct plic_irqsrc *src;
207
208 sc = device_get_softc(dev);
209 src = (struct plic_irqsrc *)isrc;
210
211 WR4(sc, PLIC_PRIORITY(src->irq), 1);
212 }
213
214 static int
plic_map_intr(device_t dev,struct intr_map_data * data,struct intr_irqsrc ** isrcp)215 plic_map_intr(device_t dev, struct intr_map_data *data,
216 struct intr_irqsrc **isrcp)
217 {
218 struct intr_map_data_fdt *daf;
219 struct plic_softc *sc;
220 u_int irq, type;
221
222 sc = device_get_softc(dev);
223
224 if (data->type != INTR_MAP_DATA_FDT)
225 return (ENOTSUP);
226
227 daf = (struct intr_map_data_fdt *)data;
228 if (daf->ncells != 1 && daf->ncells != 2) {
229 device_printf(dev, "invalid ncells value: %u\n", daf->ncells);
230 return (EINVAL);
231 }
232
233 irq = daf->cells[0];
234 type = daf->ncells == 2 ? daf->cells[1] : IRQ_TYPE_LEVEL_HIGH;
235
236 if (irq > sc->ndev) {
237 device_printf(dev, "irq (%u) > sc->ndev (%u)",
238 daf->cells[0], sc->ndev);
239 return (EINVAL);
240 }
241
242 /*
243 * TODO: handling of edge-triggered interrupts.
244 *
245 * From sifive,plic-1.0.0.yaml:
246 *
247 * "The PLIC supports both edge-triggered and level-triggered
248 * interrupts. For edge-triggered interrupts, the RISC-V PLIC spec
249 * allows two responses to edges seen while an interrupt handler is
250 * active; the PLIC may either queue them or ignore them. In the first
251 * case, handlers are oblivious to the trigger type, so it is not
252 * included in the interrupt specifier. In the second case, software
253 * needs to know the trigger type, so it can reorder the interrupt flow
254 * to avoid missing interrupts. This special handling is needed by at
255 * least the Renesas RZ/Five SoC (AX45MP AndesCore with a NCEPLIC100)
256 * and the T-HEAD C900 PLIC."
257 *
258 * For now, prevent interrupts with type IRQ_TYPE_EDGE_RISING from
259 * allocation. Emit a message so that when the relevant driver fails to
260 * attach, it will at least be clear why.
261 */
262 if (type != IRQ_TYPE_LEVEL_HIGH) {
263 device_printf(dev, "edge-triggered interrupts not supported\n");
264 return (EINVAL);
265 }
266
267 sc->isrcs[irq].trigtype = type;
268 *isrcp = &sc->isrcs[irq].isrc;
269
270 return (0);
271 }
272
273 static int
plic_probe(device_t dev)274 plic_probe(device_t dev)
275 {
276
277 if (!ofw_bus_status_okay(dev))
278 return (ENXIO);
279
280 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
281 return (ENXIO);
282
283 device_set_desc(dev, "RISC-V PLIC");
284
285 return (BUS_PROBE_DEFAULT);
286 }
287
288 static int
plic_attach(device_t dev)289 plic_attach(device_t dev)
290 {
291 struct plic_irqsrc *isrcs;
292 struct plic_softc *sc;
293 struct intr_pic *pic;
294 pcell_t *cells;
295 uint32_t irq;
296 const char *name;
297 phandle_t node;
298 phandle_t xref;
299 uint32_t cpu;
300 int error;
301 int rid;
302 int nintr;
303 int context;
304 int i;
305 int hart;
306
307 sc = device_get_softc(dev);
308
309 sc->dev = dev;
310
311 node = ofw_bus_get_node(dev);
312 if ((OF_getencprop(node, "riscv,ndev", &sc->ndev,
313 sizeof(sc->ndev))) < 0) {
314 device_printf(dev,
315 "Error: could not get number of devices\n");
316 return (ENXIO);
317 }
318
319 if (sc->ndev >= PLIC_MAX_IRQS) {
320 device_printf(dev,
321 "Error: invalid ndev (%d)\n", sc->ndev);
322 return (ENXIO);
323 }
324
325 /* Request memory resources */
326 rid = 0;
327 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
328 RF_ACTIVE);
329 if (sc->mem_res == NULL) {
330 device_printf(dev,
331 "Error: could not allocate memory resources\n");
332 return (ENXIO);
333 }
334
335 /* Register the interrupt sources */
336 isrcs = sc->isrcs;
337 name = device_get_nameunit(sc->dev);
338 for (irq = 1; irq <= sc->ndev; irq++) {
339 isrcs[irq].irq = irq;
340 error = intr_isrc_register(&isrcs[irq].isrc, sc->dev,
341 0, "%s,%u", name, irq);
342 if (error != 0)
343 return (error);
344
345 WR4(sc, PLIC_PRIORITY(irq), 0);
346 }
347
348 /*
349 * Calculate the per-cpu enable and context register offsets.
350 *
351 * This is tricky for a few reasons. The PLIC divides the interrupt
352 * enable, threshold, and claim bits by "context", where each context
353 * routes to a core's local interrupt controller.
354 *
355 * The tricky part is that the PLIC spec imposes no restrictions on how
356 * these contexts are laid out. So for example, there is no guarantee
357 * that each CPU will have both a machine mode and supervisor context,
358 * or that different PLIC implementations will organize the context
359 * registers in the same way. On top of this, we must handle the fact
360 * that cpuid != hartid, as they may have been renumbered during boot.
361 * We perform the following steps:
362 *
363 * 1. Examine the PLIC's "interrupts-extended" property and skip any
364 * entries that are not for supervisor external interrupts.
365 *
366 * 2. Walk up the device tree to find the corresponding CPU, and grab
367 * its hart ID.
368 *
369 * 3. Convert the hart to a cpuid, and calculate the register offsets
370 * based on the context number.
371 *
372 * 4. Save the index for the boot hart's S-mode external interrupt in
373 * order to allocate and setup the corresponding resource, since the
374 * local interrupt controller newbus device is associated with that
375 * specific node.
376 */
377 nintr = OF_getencprop_alloc_multi(node, "interrupts-extended",
378 sizeof(uint32_t), (void **)&cells);
379 if (nintr <= 0) {
380 device_printf(dev, "Could not read interrupts-extended\n");
381 return (ENXIO);
382 }
383
384 /* interrupts-extended is a list of phandles and interrupt types. */
385 rid = -1;
386 for (i = 0, context = 0; i < nintr; i += 2, context++) {
387 /* Skip M-mode external interrupts */
388 if (cells[i + 1] != IRQ_EXTERNAL_SUPERVISOR)
389 continue;
390
391 /*
392 * Get the hart ID from the core's interrupt controller
393 * phandle.
394 */
395 hart = plic_get_hartid(dev, OF_node_from_xref(cells[i]));
396 if (hart < 0) {
397 OF_prop_free(cells);
398 return (ENXIO);
399 }
400
401 /* Get the corresponding cpuid. */
402 cpu = riscv_hartid_to_cpu(hart);
403 if (cpu < 0) {
404 device_printf(dev, "Invalid hart!\n");
405 OF_prop_free(cells);
406 return (ENXIO);
407 }
408
409 if (cpu == 0)
410 rid = i / 2;
411
412 /* Set the enable and context register offsets for the CPU. */
413 sc->contexts[cpu].enable_offset = PLIC_ENABLE_BASE +
414 context * PLIC_ENABLE_STRIDE;
415 sc->contexts[cpu].context_offset = PLIC_CONTEXT_BASE +
416 context * PLIC_CONTEXT_STRIDE;
417 }
418 OF_prop_free(cells);
419
420 if (rid == -1) {
421 device_printf(dev,
422 "Could not find local interrupt controller\n");
423 return (ENXIO);
424 }
425
426 sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
427 RF_ACTIVE);
428 if (sc->irq_res == NULL) {
429 device_printf(dev,
430 "Error: could not allocate IRQ resources\n");
431 return (ENXIO);
432 }
433
434 /* Set the threshold for each CPU to accept all priorities. */
435 CPU_FOREACH(cpu)
436 WR4(sc, PLIC_THRESHOLD(sc, cpu), 0);
437
438 xref = OF_xref_from_node(node);
439 pic = intr_pic_register(sc->dev, xref);
440 if (pic == NULL)
441 return (ENXIO);
442
443 return (bus_setup_intr(dev, sc->irq_res, INTR_TYPE_CLK | INTR_MPSAFE,
444 plic_intr, NULL, sc, &sc->ih));
445 }
446
447 static void
plic_pre_ithread(device_t dev,struct intr_irqsrc * isrc)448 plic_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
449 {
450
451 plic_disable_intr(dev, isrc);
452 }
453
454 static void
plic_post_ithread(device_t dev,struct intr_irqsrc * isrc)455 plic_post_ithread(device_t dev, struct intr_irqsrc *isrc)
456 {
457 struct plic_softc *sc;
458 struct plic_irqsrc *src;
459 uint32_t cpu;
460
461 sc = device_get_softc(dev);
462 src = (struct plic_irqsrc *)isrc;
463
464 cpu = CPU_FFS(&isrc->isrc_cpu) - 1;
465
466 /* Complete the interrupt. */
467 WR4(sc, PLIC_CLAIM(sc, cpu), src->irq);
468 plic_enable_intr(dev, isrc);
469 }
470
471 static int
plic_setup_intr(device_t dev,struct intr_irqsrc * isrc,struct resource * res,struct intr_map_data * data)472 plic_setup_intr(device_t dev, struct intr_irqsrc *isrc,
473 struct resource *res, struct intr_map_data *data)
474 {
475 CPU_ZERO(&isrc->isrc_cpu);
476 plic_bind_intr(dev, isrc);
477
478 return (0);
479 }
480
481 static int
plic_bind_intr(device_t dev,struct intr_irqsrc * isrc)482 plic_bind_intr(device_t dev, struct intr_irqsrc *isrc)
483 {
484 struct plic_softc *sc;
485 struct plic_irqsrc *src;
486 uint32_t reg;
487 u_int cpu;
488
489 sc = device_get_softc(dev);
490 src = (struct plic_irqsrc *)isrc;
491
492 /* Disable the interrupt source on all CPUs. */
493 CPU_FOREACH(cpu) {
494 reg = RD4(sc, PLIC_ENABLE(sc, src->irq, cpu));
495 reg &= ~(1 << (src->irq % 32));
496 WR4(sc, PLIC_ENABLE(sc, src->irq, cpu), reg);
497 }
498
499 if (CPU_EMPTY(&isrc->isrc_cpu)) {
500 cpu = plic_irq_cpu = intr_irq_next_cpu(plic_irq_cpu, &all_cpus);
501 CPU_SETOF(cpu, &isrc->isrc_cpu);
502 } else {
503 /*
504 * We will only bind to a single CPU so select the first
505 * CPU found.
506 */
507 cpu = CPU_FFS(&isrc->isrc_cpu) - 1;
508 }
509
510 /* Enable the interrupt on the selected CPU only. */
511 reg = RD4(sc, PLIC_ENABLE(sc, src->irq, cpu));
512 reg |= (1 << (src->irq % 32));
513 WR4(sc, PLIC_ENABLE(sc, src->irq, cpu), reg);
514
515 return (0);
516 }
517
518 static device_method_t plic_methods[] = {
519 DEVMETHOD(device_probe, plic_probe),
520 DEVMETHOD(device_attach, plic_attach),
521
522 DEVMETHOD(pic_disable_intr, plic_disable_intr),
523 DEVMETHOD(pic_enable_intr, plic_enable_intr),
524 DEVMETHOD(pic_map_intr, plic_map_intr),
525 DEVMETHOD(pic_pre_ithread, plic_pre_ithread),
526 DEVMETHOD(pic_post_ithread, plic_post_ithread),
527 DEVMETHOD(pic_post_filter, plic_post_ithread),
528 DEVMETHOD(pic_setup_intr, plic_setup_intr),
529 DEVMETHOD(pic_bind_intr, plic_bind_intr),
530
531 DEVMETHOD_END
532 };
533
534 static driver_t plic_driver = {
535 "plic",
536 plic_methods,
537 sizeof(struct plic_softc),
538 };
539
540 EARLY_DRIVER_MODULE(plic, simplebus, plic_driver, 0, 0,
541 BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
542