1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2019 Ruslan Bukin <br@bsdpad.com> 5 * 6 * This software was developed by SRI International and the University of 7 * Cambridge Computer Laboratory (Department of Computer Science and 8 * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the 9 * DARPA SSITH research programme. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* Xilinx AXI DMA controller driver. */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include "opt_platform.h" 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/conf.h> 42 #include <sys/bus.h> 43 #include <sys/kernel.h> 44 #include <sys/module.h> 45 #include <sys/rman.h> 46 47 #include <machine/bus.h> 48 49 #include <vm/vm.h> 50 #include <vm/vm_extern.h> 51 #include <vm/vm_page.h> 52 53 #ifdef FDT 54 #include <dev/fdt/fdt_common.h> 55 #include <dev/ofw/ofw_bus.h> 56 #include <dev/ofw/ofw_bus_subr.h> 57 #endif 58 59 #include <dev/xdma/xdma.h> 60 #include <dev/xilinx/axidma.h> 61 62 #include "xdma_if.h" 63 64 #define AXIDMA_DEBUG 65 #undef AXIDMA_DEBUG 66 67 #ifdef AXIDMA_DEBUG 68 #define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__) 69 #else 70 #define dprintf(fmt, ...) 71 #endif 72 73 #define AXIDMA_NCHANNELS 2 74 #define AXIDMA_DESCS_NUM 512 75 #define AXIDMA_TX_CHAN 0 76 #define AXIDMA_RX_CHAN 1 77 78 extern struct bus_space memmap_bus; 79 80 struct axidma_fdt_data { 81 int id; 82 }; 83 84 struct axidma_channel { 85 struct axidma_softc *sc; 86 xdma_channel_t *xchan; 87 bool used; 88 int idx_head; 89 int idx_tail; 90 91 struct axidma_desc **descs; 92 vm_paddr_t *descs_phys; 93 uint32_t descs_num; 94 95 vm_size_t mem_size; 96 vm_offset_t mem_paddr; 97 vm_offset_t mem_vaddr; 98 99 uint32_t descs_used_count; 100 }; 101 102 struct axidma_softc { 103 device_t dev; 104 struct resource *res[3]; 105 bus_space_tag_t bst; 106 bus_space_handle_t bsh; 107 void *ih[2]; 108 struct axidma_desc desc; 109 struct axidma_channel channels[AXIDMA_NCHANNELS]; 110 }; 111 112 static struct resource_spec axidma_spec[] = { 113 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 114 { SYS_RES_IRQ, 0, RF_ACTIVE }, 115 { SYS_RES_IRQ, 1, RF_ACTIVE }, 116 { -1, 0 } 117 }; 118 119 #define HWTYPE_NONE 0 120 #define HWTYPE_STD 1 121 122 static struct ofw_compat_data compat_data[] = { 123 { "xlnx,eth-dma", HWTYPE_STD }, 124 { NULL, HWTYPE_NONE }, 125 }; 126 127 static int axidma_probe(device_t dev); 128 static int axidma_attach(device_t dev); 129 static int axidma_detach(device_t dev); 130 131 static inline uint32_t 132 axidma_next_desc(struct axidma_channel *chan, uint32_t curidx) 133 { 134 135 return ((curidx + 1) % chan->descs_num); 136 } 137 138 static void 139 axidma_intr(struct axidma_softc *sc, 140 struct axidma_channel *chan) 141 { 142 xdma_transfer_status_t status; 143 xdma_transfer_status_t st; 144 struct axidma_fdt_data *data; 145 xdma_controller_t *xdma; 146 struct axidma_desc *desc; 147 struct xdma_channel *xchan; 148 uint32_t tot_copied; 149 int pending; 150 int errors; 151 152 xchan = chan->xchan; 153 xdma = xchan->xdma; 154 data = xdma->data; 155 156 pending = READ4(sc, AXI_DMASR(data->id)); 157 WRITE4(sc, AXI_DMASR(data->id), pending); 158 159 errors = (pending & (DMASR_DMAINTERR | DMASR_DMASLVERR 160 | DMASR_DMADECOREERR | DMASR_SGINTERR 161 | DMASR_SGSLVERR | DMASR_SGDECERR)); 162 163 dprintf("%s: AXI_DMASR %x\n", __func__, 164 READ4(sc, AXI_DMASR(data->id))); 165 dprintf("%s: AXI_CURDESC %x\n", __func__, 166 READ4(sc, AXI_CURDESC(data->id))); 167 dprintf("%s: AXI_TAILDESC %x\n", __func__, 168 READ4(sc, AXI_TAILDESC(data->id))); 169 170 tot_copied = 0; 171 172 while (chan->idx_tail != chan->idx_head) { 173 desc = chan->descs[chan->idx_tail]; 174 if ((desc->status & BD_STATUS_CMPLT) == 0) 175 break; 176 177 st.error = errors; 178 st.transferred = desc->status & BD_CONTROL_LEN_M; 179 tot_copied += st.transferred; 180 xchan_seg_done(xchan, &st); 181 182 chan->idx_tail = axidma_next_desc(chan, chan->idx_tail); 183 atomic_subtract_int(&chan->descs_used_count, 1); 184 } 185 186 /* Finish operation */ 187 status.error = errors; 188 status.transferred = tot_copied; 189 xdma_callback(chan->xchan, &status); 190 } 191 192 static void 193 axidma_intr_rx(void *arg) 194 { 195 struct axidma_softc *sc; 196 struct axidma_channel *chan; 197 198 dprintf("%s\n", __func__); 199 200 sc = arg; 201 chan = &sc->channels[AXIDMA_RX_CHAN]; 202 203 axidma_intr(sc, chan); 204 } 205 206 static void 207 axidma_intr_tx(void *arg) 208 { 209 struct axidma_softc *sc; 210 struct axidma_channel *chan; 211 212 dprintf("%s\n", __func__); 213 214 sc = arg; 215 chan = &sc->channels[AXIDMA_TX_CHAN]; 216 217 axidma_intr(sc, chan); 218 } 219 220 static int 221 axidma_reset(struct axidma_softc *sc, int chan_id) 222 { 223 int timeout; 224 225 WRITE4(sc, AXI_DMACR(chan_id), DMACR_RESET); 226 227 timeout = 100; 228 do { 229 if ((READ4(sc, AXI_DMACR(chan_id)) & DMACR_RESET) == 0) 230 break; 231 } while (timeout--); 232 233 dprintf("timeout %d\n", timeout); 234 235 if (timeout == 0) 236 return (-1); 237 238 dprintf("%s: read control after reset: %x\n", 239 __func__, READ4(sc, AXI_DMACR(chan_id))); 240 241 return (0); 242 } 243 244 static int 245 axidma_probe(device_t dev) 246 { 247 int hwtype; 248 249 if (!ofw_bus_status_okay(dev)) 250 return (ENXIO); 251 252 hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data; 253 if (hwtype == HWTYPE_NONE) 254 return (ENXIO); 255 256 device_set_desc(dev, "Xilinx AXI DMA"); 257 258 return (BUS_PROBE_DEFAULT); 259 } 260 261 static int 262 axidma_attach(device_t dev) 263 { 264 struct axidma_softc *sc; 265 phandle_t xref, node; 266 int err; 267 268 sc = device_get_softc(dev); 269 sc->dev = dev; 270 271 if (bus_alloc_resources(dev, axidma_spec, sc->res)) { 272 device_printf(dev, "could not allocate resources.\n"); 273 return (ENXIO); 274 } 275 276 /* CSR memory interface */ 277 sc->bst = rman_get_bustag(sc->res[0]); 278 sc->bsh = rman_get_bushandle(sc->res[0]); 279 280 /* Setup interrupt handler */ 281 err = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE, 282 NULL, axidma_intr_tx, sc, &sc->ih[0]); 283 if (err) { 284 device_printf(dev, "Unable to alloc interrupt resource.\n"); 285 return (ENXIO); 286 } 287 288 /* Setup interrupt handler */ 289 err = bus_setup_intr(dev, sc->res[2], INTR_TYPE_MISC | INTR_MPSAFE, 290 NULL, axidma_intr_rx, sc, &sc->ih[1]); 291 if (err) { 292 device_printf(dev, "Unable to alloc interrupt resource.\n"); 293 return (ENXIO); 294 } 295 296 node = ofw_bus_get_node(dev); 297 xref = OF_xref_from_node(node); 298 OF_device_register_xref(xref, dev); 299 300 return (0); 301 } 302 303 static int 304 axidma_detach(device_t dev) 305 { 306 struct axidma_softc *sc; 307 308 sc = device_get_softc(dev); 309 310 bus_teardown_intr(dev, sc->res[1], sc->ih[0]); 311 bus_teardown_intr(dev, sc->res[2], sc->ih[1]); 312 bus_release_resources(dev, axidma_spec, sc->res); 313 314 return (0); 315 } 316 317 static int 318 axidma_desc_free(struct axidma_softc *sc, struct axidma_channel *chan) 319 { 320 struct xdma_channel *xchan; 321 int nsegments; 322 323 nsegments = chan->descs_num; 324 xchan = chan->xchan; 325 326 free(chan->descs, M_DEVBUF); 327 free(chan->descs_phys, M_DEVBUF); 328 329 pmap_kremove_device(chan->mem_vaddr, chan->mem_size); 330 kva_free(chan->mem_vaddr, chan->mem_size); 331 vmem_free(xchan->vmem, chan->mem_paddr, chan->mem_size); 332 333 return (0); 334 } 335 336 static int 337 axidma_desc_alloc(struct axidma_softc *sc, struct xdma_channel *xchan, 338 uint32_t desc_size) 339 { 340 struct axidma_channel *chan; 341 int nsegments; 342 int i; 343 344 chan = (struct axidma_channel *)xchan->chan; 345 nsegments = chan->descs_num; 346 347 chan->descs = malloc(nsegments * sizeof(struct axidma_desc *), 348 M_DEVBUF, M_NOWAIT | M_ZERO); 349 if (chan->descs == NULL) { 350 device_printf(sc->dev, 351 "%s: Can't allocate memory.\n", __func__); 352 return (-1); 353 } 354 355 chan->descs_phys = malloc(nsegments * sizeof(bus_dma_segment_t), 356 M_DEVBUF, M_NOWAIT | M_ZERO); 357 chan->mem_size = desc_size * nsegments; 358 if (vmem_alloc(xchan->vmem, chan->mem_size, M_FIRSTFIT | M_NOWAIT, 359 &chan->mem_paddr)) { 360 device_printf(sc->dev, "Failed to allocate memory.\n"); 361 return (-1); 362 } 363 chan->mem_vaddr = kva_alloc(chan->mem_size); 364 pmap_kenter_device(chan->mem_vaddr, chan->mem_size, chan->mem_paddr); 365 366 device_printf(sc->dev, "Allocated chunk %lx %d\n", 367 chan->mem_paddr, chan->mem_size); 368 369 for (i = 0; i < nsegments; i++) { 370 chan->descs[i] = (struct axidma_desc *) 371 ((uint64_t)chan->mem_vaddr + desc_size * i); 372 chan->descs_phys[i] = chan->mem_paddr + desc_size * i; 373 } 374 375 return (0); 376 } 377 378 static int 379 axidma_channel_alloc(device_t dev, struct xdma_channel *xchan) 380 { 381 xdma_controller_t *xdma; 382 struct axidma_fdt_data *data; 383 struct axidma_channel *chan; 384 struct axidma_softc *sc; 385 386 sc = device_get_softc(dev); 387 388 if (xchan->caps & XCHAN_CAP_BUSDMA) { 389 device_printf(sc->dev, 390 "Error: busdma operation is not implemented."); 391 return (-1); 392 } 393 394 xdma = xchan->xdma; 395 data = xdma->data; 396 397 chan = &sc->channels[data->id]; 398 if (chan->used == false) { 399 if (axidma_reset(sc, data->id) != 0) 400 return (-1); 401 chan->xchan = xchan; 402 xchan->chan = (void *)chan; 403 chan->sc = sc; 404 chan->used = true; 405 chan->idx_head = 0; 406 chan->idx_tail = 0; 407 chan->descs_used_count = 0; 408 chan->descs_num = AXIDMA_DESCS_NUM; 409 410 return (0); 411 } 412 413 return (-1); 414 } 415 416 static int 417 axidma_channel_free(device_t dev, struct xdma_channel *xchan) 418 { 419 struct axidma_channel *chan; 420 struct axidma_softc *sc; 421 422 sc = device_get_softc(dev); 423 424 chan = (struct axidma_channel *)xchan->chan; 425 426 axidma_desc_free(sc, chan); 427 428 chan->used = false; 429 430 return (0); 431 } 432 433 static int 434 axidma_channel_capacity(device_t dev, xdma_channel_t *xchan, 435 uint32_t *capacity) 436 { 437 struct axidma_channel *chan; 438 uint32_t c; 439 440 chan = (struct axidma_channel *)xchan->chan; 441 442 /* At least one descriptor must be left empty. */ 443 c = (chan->descs_num - chan->descs_used_count - 1); 444 445 *capacity = c; 446 447 return (0); 448 } 449 450 static int 451 axidma_channel_submit_sg(device_t dev, struct xdma_channel *xchan, 452 struct xdma_sglist *sg, uint32_t sg_n) 453 { 454 xdma_controller_t *xdma; 455 struct axidma_fdt_data *data; 456 struct axidma_channel *chan; 457 struct axidma_desc *desc; 458 struct axidma_softc *sc; 459 uint32_t src_addr; 460 uint32_t dst_addr; 461 uint32_t addr; 462 uint32_t len; 463 uint32_t tmp; 464 int i; 465 int tail; 466 467 dprintf("%s: sg_n %d\n", __func__, sg_n); 468 469 sc = device_get_softc(dev); 470 471 chan = (struct axidma_channel *)xchan->chan; 472 xdma = xchan->xdma; 473 data = xdma->data; 474 475 if (sg_n == 0) 476 return (0); 477 478 tail = chan->idx_head; 479 480 tmp = 0; 481 482 for (i = 0; i < sg_n; i++) { 483 src_addr = (uint32_t)sg[i].src_addr; 484 dst_addr = (uint32_t)sg[i].dst_addr; 485 len = (uint32_t)sg[i].len; 486 487 dprintf("%s(%d): src %x dst %x len %d\n", __func__, 488 data->id, src_addr, dst_addr, len); 489 490 desc = chan->descs[chan->idx_head]; 491 if (sg[i].direction == XDMA_MEM_TO_DEV) 492 desc->phys = src_addr; 493 else 494 desc->phys = dst_addr; 495 desc->status = 0; 496 desc->control = len; 497 if (sg[i].first == 1) 498 desc->control |= BD_CONTROL_TXSOF; 499 if (sg[i].last == 1) 500 desc->control |= BD_CONTROL_TXEOF; 501 502 tmp = chan->idx_head; 503 504 atomic_add_int(&chan->descs_used_count, 1); 505 chan->idx_head = axidma_next_desc(chan, chan->idx_head); 506 } 507 508 dprintf("%s(%d): _curdesc %x\n", __func__, data->id, 509 READ8(sc, AXI_CURDESC(data->id))); 510 dprintf("%s(%d): _curdesc %x\n", __func__, data->id, 511 READ8(sc, AXI_CURDESC(data->id))); 512 dprintf("%s(%d): status %x\n", __func__, data->id, 513 READ4(sc, AXI_DMASR(data->id))); 514 515 addr = chan->descs_phys[tmp]; 516 WRITE8(sc, AXI_TAILDESC(data->id), addr); 517 518 return (0); 519 } 520 521 static int 522 axidma_channel_prep_sg(device_t dev, struct xdma_channel *xchan) 523 { 524 xdma_controller_t *xdma; 525 struct axidma_fdt_data *data; 526 struct axidma_channel *chan; 527 struct axidma_desc *desc; 528 struct axidma_softc *sc; 529 uint32_t addr; 530 uint32_t reg; 531 int ret; 532 int i; 533 534 sc = device_get_softc(dev); 535 536 chan = (struct axidma_channel *)xchan->chan; 537 xdma = xchan->xdma; 538 data = xdma->data; 539 540 dprintf("%s(%d)\n", __func__, data->id); 541 542 ret = axidma_desc_alloc(sc, xchan, sizeof(struct axidma_desc)); 543 if (ret != 0) { 544 device_printf(sc->dev, 545 "%s: Can't allocate descriptors.\n", __func__); 546 return (-1); 547 } 548 549 for (i = 0; i < chan->descs_num; i++) { 550 desc = chan->descs[i]; 551 bzero(desc, sizeof(struct axidma_desc)); 552 553 if (i == (chan->descs_num - 1)) 554 desc->next = chan->descs_phys[0]; 555 else 556 desc->next = chan->descs_phys[i + 1]; 557 desc->status = 0; 558 desc->control = 0; 559 560 dprintf("%s(%d): desc %d vaddr %lx next paddr %x\n", __func__, 561 data->id, i, (uint64_t)desc, le32toh(desc->next)); 562 } 563 564 addr = chan->descs_phys[0]; 565 WRITE8(sc, AXI_CURDESC(data->id), addr); 566 567 reg = READ4(sc, AXI_DMACR(data->id)); 568 reg |= DMACR_IOC_IRQEN | DMACR_DLY_IRQEN | DMACR_ERR_IRQEN; 569 WRITE4(sc, AXI_DMACR(data->id), reg); 570 reg |= DMACR_RS; 571 WRITE4(sc, AXI_DMACR(data->id), reg); 572 573 return (0); 574 } 575 576 static int 577 axidma_channel_control(device_t dev, xdma_channel_t *xchan, int cmd) 578 { 579 struct axidma_channel *chan; 580 struct axidma_softc *sc; 581 582 sc = device_get_softc(dev); 583 584 chan = (struct axidma_channel *)xchan->chan; 585 586 switch (cmd) { 587 case XDMA_CMD_BEGIN: 588 case XDMA_CMD_TERMINATE: 589 case XDMA_CMD_PAUSE: 590 /* TODO: implement me */ 591 return (-1); 592 } 593 594 return (0); 595 } 596 597 #ifdef FDT 598 static int 599 axidma_ofw_md_data(device_t dev, pcell_t *cells, int ncells, void **ptr) 600 { 601 struct axidma_fdt_data *data; 602 603 if (ncells != 1) 604 return (-1); 605 606 data = malloc(sizeof(struct axidma_fdt_data), 607 M_DEVBUF, (M_WAITOK | M_ZERO)); 608 data->id = cells[0]; 609 610 *ptr = data; 611 612 return (0); 613 } 614 #endif 615 616 static device_method_t axidma_methods[] = { 617 /* Device interface */ 618 DEVMETHOD(device_probe, axidma_probe), 619 DEVMETHOD(device_attach, axidma_attach), 620 DEVMETHOD(device_detach, axidma_detach), 621 622 /* xDMA Interface */ 623 DEVMETHOD(xdma_channel_alloc, axidma_channel_alloc), 624 DEVMETHOD(xdma_channel_free, axidma_channel_free), 625 DEVMETHOD(xdma_channel_control, axidma_channel_control), 626 627 /* xDMA SG Interface */ 628 DEVMETHOD(xdma_channel_capacity, axidma_channel_capacity), 629 DEVMETHOD(xdma_channel_prep_sg, axidma_channel_prep_sg), 630 DEVMETHOD(xdma_channel_submit_sg, axidma_channel_submit_sg), 631 632 #ifdef FDT 633 DEVMETHOD(xdma_ofw_md_data, axidma_ofw_md_data), 634 #endif 635 636 DEVMETHOD_END 637 }; 638 639 static driver_t axidma_driver = { 640 "axidma", 641 axidma_methods, 642 sizeof(struct axidma_softc), 643 }; 644 645 static devclass_t axidma_devclass; 646 647 EARLY_DRIVER_MODULE(axidma, simplebus, axidma_driver, axidma_devclass, 0, 0, 648 BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE); 649