1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2019 Ruslan Bukin <br@bsdpad.com>
5 *
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory (Department of Computer Science and
8 * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9 * DARPA SSITH research programme.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33 /* Xilinx AXI DMA controller driver. */
34
35 #include <sys/cdefs.h>
36 #include "opt_platform.h"
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/conf.h>
40 #include <sys/bus.h>
41 #include <sys/kernel.h>
42 #include <sys/module.h>
43 #include <sys/rman.h>
44
45 #include <machine/bus.h>
46
47 #include <vm/vm.h>
48 #include <vm/vm_extern.h>
49 #include <vm/vm_page.h>
50
51 #ifdef FDT
52 #include <dev/fdt/fdt_common.h>
53 #include <dev/ofw/ofw_bus.h>
54 #include <dev/ofw/ofw_bus_subr.h>
55 #endif
56
57 #include <dev/xdma/xdma.h>
58 #include <dev/xilinx/axidma.h>
59
60 #include "xdma_if.h"
61
62 #define READ4(_sc, _reg) \
63 bus_space_read_4(_sc->bst, _sc->bsh, _reg)
64 #define WRITE4(_sc, _reg, _val) \
65 bus_space_write_4(_sc->bst, _sc->bsh, _reg, _val)
66 #define READ8(_sc, _reg) \
67 bus_space_read_8(_sc->bst, _sc->bsh, _reg)
68 #define WRITE8(_sc, _reg, _val) \
69 bus_space_write_8(_sc->bst, _sc->bsh, _reg, _val)
70
71 #define AXIDMA_DEBUG
72 #undef AXIDMA_DEBUG
73
74 #ifdef AXIDMA_DEBUG
75 #define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
76 #else
77 #define dprintf(fmt, ...)
78 #endif
79
80 extern struct bus_space memmap_bus;
81
82 struct axidma_channel {
83 struct axidma_softc *sc;
84 xdma_channel_t *xchan;
85 bool used;
86 int idx_head;
87 int idx_tail;
88
89 struct axidma_desc **descs;
90 vm_paddr_t *descs_phys;
91 uint32_t descs_num;
92
93 vm_size_t mem_size;
94 vm_offset_t mem_paddr;
95 vm_offset_t mem_vaddr;
96
97 uint32_t descs_used_count;
98 };
99
100 struct axidma_softc {
101 device_t dev;
102 struct resource *res[3];
103 bus_space_tag_t bst;
104 bus_space_handle_t bsh;
105 void *ih[2];
106 struct axidma_desc desc;
107 struct axidma_channel channels[AXIDMA_NCHANNELS];
108 };
109
110 static struct resource_spec axidma_spec[] = {
111 { SYS_RES_MEMORY, 0, RF_ACTIVE },
112 { SYS_RES_IRQ, 0, RF_ACTIVE },
113 { SYS_RES_IRQ, 1, RF_ACTIVE },
114 { -1, 0 }
115 };
116
117 #define HWTYPE_NONE 0
118 #define HWTYPE_STD 1
119
120 static struct ofw_compat_data compat_data[] = {
121 { "xlnx,eth-dma", HWTYPE_STD },
122 { NULL, HWTYPE_NONE },
123 };
124
125 static int axidma_probe(device_t dev);
126 static int axidma_attach(device_t dev);
127 static int axidma_detach(device_t dev);
128
129 static inline uint32_t
axidma_next_desc(struct axidma_channel * chan,uint32_t curidx)130 axidma_next_desc(struct axidma_channel *chan, uint32_t curidx)
131 {
132
133 return ((curidx + 1) % chan->descs_num);
134 }
135
136 static void
axidma_intr(struct axidma_softc * sc,struct axidma_channel * chan)137 axidma_intr(struct axidma_softc *sc,
138 struct axidma_channel *chan)
139 {
140 xdma_transfer_status_t status;
141 xdma_transfer_status_t st;
142 struct axidma_fdt_data *data;
143 xdma_controller_t *xdma;
144 struct axidma_desc *desc;
145 struct xdma_channel *xchan;
146 uint32_t tot_copied;
147 int pending;
148 int errors;
149
150 xchan = chan->xchan;
151 xdma = xchan->xdma;
152 data = xdma->data;
153
154 pending = READ4(sc, AXI_DMASR(data->id));
155 WRITE4(sc, AXI_DMASR(data->id), pending);
156
157 errors = (pending & (DMASR_DMAINTERR | DMASR_DMASLVERR
158 | DMASR_DMADECOREERR | DMASR_SGINTERR
159 | DMASR_SGSLVERR | DMASR_SGDECERR));
160
161 dprintf("%s: AXI_DMASR %x\n", __func__,
162 READ4(sc, AXI_DMASR(data->id)));
163 dprintf("%s: AXI_CURDESC %x\n", __func__,
164 READ4(sc, AXI_CURDESC(data->id)));
165 dprintf("%s: AXI_TAILDESC %x\n", __func__,
166 READ4(sc, AXI_TAILDESC(data->id)));
167
168 tot_copied = 0;
169
170 while (chan->idx_tail != chan->idx_head) {
171 desc = chan->descs[chan->idx_tail];
172 cpu_dcache_wbinv_range((vm_offset_t)desc,
173 sizeof(struct axidma_desc));
174
175 if ((desc->status & BD_STATUS_CMPLT) == 0)
176 break;
177
178 st.error = errors;
179 st.transferred = desc->status & BD_CONTROL_LEN_M;
180 tot_copied += st.transferred;
181 xchan_seg_done(xchan, &st);
182
183 chan->idx_tail = axidma_next_desc(chan, chan->idx_tail);
184 atomic_subtract_int(&chan->descs_used_count, 1);
185 }
186
187 /* Finish operation */
188 status.error = errors;
189 status.transferred = tot_copied;
190 xdma_callback(chan->xchan, &status);
191 }
192
193 static void
axidma_intr_rx(void * arg)194 axidma_intr_rx(void *arg)
195 {
196 struct axidma_softc *sc;
197 struct axidma_channel *chan;
198
199 dprintf("%s\n", __func__);
200
201 sc = arg;
202 chan = &sc->channels[AXIDMA_RX_CHAN];
203
204 axidma_intr(sc, chan);
205 }
206
207 static void
axidma_intr_tx(void * arg)208 axidma_intr_tx(void *arg)
209 {
210 struct axidma_softc *sc;
211 struct axidma_channel *chan;
212
213 dprintf("%s\n", __func__);
214
215 sc = arg;
216 chan = &sc->channels[AXIDMA_TX_CHAN];
217
218 axidma_intr(sc, chan);
219 }
220
221 static int
axidma_reset(struct axidma_softc * sc,int chan_id)222 axidma_reset(struct axidma_softc *sc, int chan_id)
223 {
224 int timeout;
225
226 WRITE4(sc, AXI_DMACR(chan_id), DMACR_RESET);
227
228 timeout = 100;
229 do {
230 if ((READ4(sc, AXI_DMACR(chan_id)) & DMACR_RESET) == 0)
231 break;
232 } while (timeout--);
233
234 dprintf("timeout %d\n", timeout);
235
236 if (timeout == 0)
237 return (-1);
238
239 dprintf("%s: read control after reset: %x\n",
240 __func__, READ4(sc, AXI_DMACR(chan_id)));
241
242 return (0);
243 }
244
245 static int
axidma_probe(device_t dev)246 axidma_probe(device_t dev)
247 {
248 int hwtype;
249
250 if (!ofw_bus_status_okay(dev))
251 return (ENXIO);
252
253 hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
254 if (hwtype == HWTYPE_NONE)
255 return (ENXIO);
256
257 device_set_desc(dev, "Xilinx AXI DMA");
258
259 return (BUS_PROBE_DEFAULT);
260 }
261
262 static int
axidma_attach(device_t dev)263 axidma_attach(device_t dev)
264 {
265 struct axidma_softc *sc;
266 phandle_t xref, node;
267 int err;
268
269 sc = device_get_softc(dev);
270 sc->dev = dev;
271
272 if (bus_alloc_resources(dev, axidma_spec, sc->res)) {
273 device_printf(dev, "could not allocate resources.\n");
274 return (ENXIO);
275 }
276
277 /* CSR memory interface */
278 sc->bst = rman_get_bustag(sc->res[0]);
279 sc->bsh = rman_get_bushandle(sc->res[0]);
280
281 /* Setup interrupt handler */
282 err = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE,
283 NULL, axidma_intr_tx, sc, &sc->ih[0]);
284 if (err) {
285 device_printf(dev, "Unable to alloc interrupt resource.\n");
286 return (ENXIO);
287 }
288
289 /* Setup interrupt handler */
290 err = bus_setup_intr(dev, sc->res[2], INTR_TYPE_MISC | INTR_MPSAFE,
291 NULL, axidma_intr_rx, sc, &sc->ih[1]);
292 if (err) {
293 device_printf(dev, "Unable to alloc interrupt resource.\n");
294 return (ENXIO);
295 }
296
297 node = ofw_bus_get_node(dev);
298 xref = OF_xref_from_node(node);
299 OF_device_register_xref(xref, dev);
300
301 return (0);
302 }
303
304 static int
axidma_detach(device_t dev)305 axidma_detach(device_t dev)
306 {
307 struct axidma_softc *sc;
308
309 sc = device_get_softc(dev);
310
311 bus_teardown_intr(dev, sc->res[1], sc->ih[0]);
312 bus_teardown_intr(dev, sc->res[2], sc->ih[1]);
313 bus_release_resources(dev, axidma_spec, sc->res);
314
315 return (0);
316 }
317
318 static int
axidma_desc_free(struct axidma_softc * sc,struct axidma_channel * chan)319 axidma_desc_free(struct axidma_softc *sc, struct axidma_channel *chan)
320 {
321 struct xdma_channel *xchan;
322
323 xchan = chan->xchan;
324
325 free(chan->descs, M_DEVBUF);
326 free(chan->descs_phys, M_DEVBUF);
327
328 pmap_kremove_device(chan->mem_vaddr, chan->mem_size);
329 kva_free(chan->mem_vaddr, chan->mem_size);
330 vmem_free(xchan->vmem, chan->mem_paddr, chan->mem_size);
331
332 return (0);
333 }
334
335 static int
axidma_desc_alloc(struct axidma_softc * sc,struct xdma_channel * xchan,uint32_t desc_size)336 axidma_desc_alloc(struct axidma_softc *sc, struct xdma_channel *xchan,
337 uint32_t desc_size)
338 {
339 struct axidma_channel *chan;
340 int nsegments;
341 int i;
342
343 chan = (struct axidma_channel *)xchan->chan;
344 nsegments = chan->descs_num;
345
346 chan->descs = malloc(nsegments * sizeof(struct axidma_desc *),
347 M_DEVBUF, M_NOWAIT | M_ZERO);
348 if (chan->descs == NULL) {
349 device_printf(sc->dev,
350 "%s: Can't allocate memory.\n", __func__);
351 return (-1);
352 }
353
354 chan->descs_phys = malloc(nsegments * sizeof(bus_dma_segment_t),
355 M_DEVBUF, M_NOWAIT | M_ZERO);
356 chan->mem_size = desc_size * nsegments;
357 if (vmem_alloc(xchan->vmem, chan->mem_size, M_FIRSTFIT | M_NOWAIT,
358 &chan->mem_paddr)) {
359 device_printf(sc->dev, "Failed to allocate memory.\n");
360 return (-1);
361 }
362 chan->mem_vaddr = kva_alloc(chan->mem_size);
363 pmap_kenter(chan->mem_vaddr, chan->mem_size, chan->mem_paddr,
364 VM_MEMATTR_DEFAULT);
365
366 device_printf(sc->dev, "Allocated chunk %lx %lu\n",
367 chan->mem_paddr, chan->mem_size);
368
369 for (i = 0; i < nsegments; i++) {
370 chan->descs[i] = (struct axidma_desc *)
371 ((uint64_t)chan->mem_vaddr + desc_size * i);
372 chan->descs_phys[i] = chan->mem_paddr + desc_size * i;
373 }
374
375 return (0);
376 }
377
378 static int
axidma_channel_alloc(device_t dev,struct xdma_channel * xchan)379 axidma_channel_alloc(device_t dev, struct xdma_channel *xchan)
380 {
381 xdma_controller_t *xdma;
382 struct axidma_fdt_data *data;
383 struct axidma_channel *chan;
384 struct axidma_softc *sc;
385
386 sc = device_get_softc(dev);
387
388 if (xchan->caps & XCHAN_CAP_BUSDMA) {
389 device_printf(sc->dev,
390 "Error: busdma operation is not implemented.");
391 return (-1);
392 }
393
394 xdma = xchan->xdma;
395 data = xdma->data;
396
397 chan = &sc->channels[data->id];
398 if (chan->used == false) {
399 if (axidma_reset(sc, data->id) != 0)
400 return (-1);
401 chan->xchan = xchan;
402 xchan->caps |= XCHAN_CAP_BOUNCE;
403 xchan->chan = (void *)chan;
404 chan->sc = sc;
405 chan->used = true;
406 chan->idx_head = 0;
407 chan->idx_tail = 0;
408 chan->descs_used_count = 0;
409 chan->descs_num = AXIDMA_DESCS_NUM;
410
411 return (0);
412 }
413
414 return (-1);
415 }
416
417 static int
axidma_channel_free(device_t dev,struct xdma_channel * xchan)418 axidma_channel_free(device_t dev, struct xdma_channel *xchan)
419 {
420 struct axidma_channel *chan;
421 struct axidma_softc *sc;
422
423 sc = device_get_softc(dev);
424
425 chan = (struct axidma_channel *)xchan->chan;
426
427 axidma_desc_free(sc, chan);
428
429 chan->used = false;
430
431 return (0);
432 }
433
434 static int
axidma_channel_capacity(device_t dev,xdma_channel_t * xchan,uint32_t * capacity)435 axidma_channel_capacity(device_t dev, xdma_channel_t *xchan,
436 uint32_t *capacity)
437 {
438 struct axidma_channel *chan;
439 uint32_t c;
440
441 chan = (struct axidma_channel *)xchan->chan;
442
443 /* At least one descriptor must be left empty. */
444 c = (chan->descs_num - chan->descs_used_count - 1);
445
446 *capacity = c;
447
448 return (0);
449 }
450
451 static int
axidma_channel_submit_sg(device_t dev,struct xdma_channel * xchan,struct xdma_sglist * sg,uint32_t sg_n)452 axidma_channel_submit_sg(device_t dev, struct xdma_channel *xchan,
453 struct xdma_sglist *sg, uint32_t sg_n)
454 {
455 xdma_controller_t *xdma;
456 struct axidma_fdt_data *data;
457 struct axidma_channel *chan;
458 struct axidma_desc *desc;
459 struct axidma_softc *sc;
460 uint32_t src_addr;
461 uint32_t dst_addr;
462 uint32_t addr;
463 uint32_t len;
464 uint32_t tmp;
465 int i;
466
467 dprintf("%s: sg_n %d\n", __func__, sg_n);
468
469 sc = device_get_softc(dev);
470
471 chan = (struct axidma_channel *)xchan->chan;
472 xdma = xchan->xdma;
473 data = xdma->data;
474
475 if (sg_n == 0)
476 return (0);
477
478 tmp = 0;
479
480 for (i = 0; i < sg_n; i++) {
481 src_addr = (uint32_t)sg[i].src_addr;
482 dst_addr = (uint32_t)sg[i].dst_addr;
483 len = (uint32_t)sg[i].len;
484
485 dprintf("%s(%d): src %x dst %x len %d\n", __func__,
486 data->id, src_addr, dst_addr, len);
487
488 desc = chan->descs[chan->idx_head];
489 if (sg[i].direction == XDMA_MEM_TO_DEV)
490 desc->phys = src_addr;
491 else
492 desc->phys = dst_addr;
493 desc->status = 0;
494 desc->control = len;
495 if (sg[i].first == 1)
496 desc->control |= BD_CONTROL_TXSOF;
497 if (sg[i].last == 1)
498 desc->control |= BD_CONTROL_TXEOF;
499
500 cpu_dcache_wbinv_range((vm_offset_t)desc,
501 sizeof(struct axidma_desc));
502
503 tmp = chan->idx_head;
504
505 atomic_add_int(&chan->descs_used_count, 1);
506 chan->idx_head = axidma_next_desc(chan, chan->idx_head);
507 }
508
509 dprintf("%s(%d): _curdesc %x\n", __func__, data->id,
510 READ8(sc, AXI_CURDESC(data->id)));
511 dprintf("%s(%d): _curdesc %x\n", __func__, data->id,
512 READ8(sc, AXI_CURDESC(data->id)));
513 dprintf("%s(%d): status %x\n", __func__, data->id,
514 READ4(sc, AXI_DMASR(data->id)));
515
516 addr = chan->descs_phys[tmp];
517 WRITE8(sc, AXI_TAILDESC(data->id), addr);
518
519 return (0);
520 }
521
522 static int
axidma_channel_prep_sg(device_t dev,struct xdma_channel * xchan)523 axidma_channel_prep_sg(device_t dev, struct xdma_channel *xchan)
524 {
525 xdma_controller_t *xdma;
526 struct axidma_fdt_data *data;
527 struct axidma_channel *chan;
528 struct axidma_desc *desc;
529 struct axidma_softc *sc;
530 uint32_t addr;
531 uint32_t reg;
532 int ret;
533 int i;
534
535 sc = device_get_softc(dev);
536
537 chan = (struct axidma_channel *)xchan->chan;
538 xdma = xchan->xdma;
539 data = xdma->data;
540
541 dprintf("%s(%d)\n", __func__, data->id);
542
543 ret = axidma_desc_alloc(sc, xchan, sizeof(struct axidma_desc));
544 if (ret != 0) {
545 device_printf(sc->dev,
546 "%s: Can't allocate descriptors.\n", __func__);
547 return (-1);
548 }
549
550 for (i = 0; i < chan->descs_num; i++) {
551 desc = chan->descs[i];
552 bzero(desc, sizeof(struct axidma_desc));
553
554 if (i == (chan->descs_num - 1))
555 desc->next = chan->descs_phys[0];
556 else
557 desc->next = chan->descs_phys[i + 1];
558 desc->status = 0;
559 desc->control = 0;
560
561 dprintf("%s(%d): desc %d vaddr %lx next paddr %x\n", __func__,
562 data->id, i, (uint64_t)desc, le32toh(desc->next));
563 }
564
565 addr = chan->descs_phys[0];
566 WRITE8(sc, AXI_CURDESC(data->id), addr);
567
568 reg = READ4(sc, AXI_DMACR(data->id));
569 reg |= DMACR_IOC_IRQEN | DMACR_DLY_IRQEN | DMACR_ERR_IRQEN;
570 WRITE4(sc, AXI_DMACR(data->id), reg);
571 reg |= DMACR_RS;
572 WRITE4(sc, AXI_DMACR(data->id), reg);
573
574 return (0);
575 }
576
577 static int
axidma_channel_control(device_t dev,xdma_channel_t * xchan,int cmd)578 axidma_channel_control(device_t dev, xdma_channel_t *xchan, int cmd)
579 {
580
581 switch (cmd) {
582 case XDMA_CMD_BEGIN:
583 case XDMA_CMD_TERMINATE:
584 case XDMA_CMD_PAUSE:
585 /* TODO: implement me */
586 return (-1);
587 }
588
589 return (0);
590 }
591
592 #ifdef FDT
593 static int
axidma_ofw_md_data(device_t dev,pcell_t * cells,int ncells,void ** ptr)594 axidma_ofw_md_data(device_t dev, pcell_t *cells, int ncells, void **ptr)
595 {
596 struct axidma_fdt_data *data;
597
598 if (ncells != 1)
599 return (-1);
600
601 data = malloc(sizeof(struct axidma_fdt_data),
602 M_DEVBUF, (M_WAITOK | M_ZERO));
603 data->id = cells[0];
604
605 *ptr = data;
606
607 return (0);
608 }
609 #endif
610
611 static device_method_t axidma_methods[] = {
612 /* Device interface */
613 DEVMETHOD(device_probe, axidma_probe),
614 DEVMETHOD(device_attach, axidma_attach),
615 DEVMETHOD(device_detach, axidma_detach),
616
617 /* xDMA Interface */
618 DEVMETHOD(xdma_channel_alloc, axidma_channel_alloc),
619 DEVMETHOD(xdma_channel_free, axidma_channel_free),
620 DEVMETHOD(xdma_channel_control, axidma_channel_control),
621
622 /* xDMA SG Interface */
623 DEVMETHOD(xdma_channel_capacity, axidma_channel_capacity),
624 DEVMETHOD(xdma_channel_prep_sg, axidma_channel_prep_sg),
625 DEVMETHOD(xdma_channel_submit_sg, axidma_channel_submit_sg),
626
627 #ifdef FDT
628 DEVMETHOD(xdma_ofw_md_data, axidma_ofw_md_data),
629 #endif
630
631 DEVMETHOD_END
632 };
633
634 static driver_t axidma_driver = {
635 "axidma",
636 axidma_methods,
637 sizeof(struct axidma_softc),
638 };
639
640 EARLY_DRIVER_MODULE(axidma, simplebus, axidma_driver, 0, 0,
641 BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE);
642