1 /*- 2 * Copyright (c) 2016 Jared D. McNeill <jmcneill@invisible.ca> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 19 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 20 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 21 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 /* 29 * Allwinner DMA controller 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/bus.h> 38 #include <sys/rman.h> 39 #include <sys/condvar.h> 40 #include <sys/kernel.h> 41 #include <sys/module.h> 42 #include <sys/endian.h> 43 44 #include <machine/bus.h> 45 46 #include <dev/ofw/ofw_bus.h> 47 #include <dev/ofw/ofw_bus_subr.h> 48 49 #include <arm/allwinner/a10_dmac.h> 50 #include <dev/extres/clk/clk.h> 51 #include <dev/extres/hwreset/hwreset.h> 52 53 #include "sunxi_dma_if.h" 54 55 #define DMA_IRQ_EN_REG0 0x00 56 #define DMA_IRQ_EN_REG1 0x04 57 #define DMA_IRQ_EN_REG(ch) (DMA_IRQ_EN_REG0 + ((ch) / 8) * 4) 58 #define DMA_PKG_IRQ_EN(ch) (1 << (((ch) % 8) * 4 + 1)) 59 #define DMA_PKG_IRQ_MASK 0x2222222222222222ULL 60 #define DMA_IRQ_PEND_REG0 0x10 61 #define DMA_IRQ_PEND_REG1 0x14 62 #define DMA_IRQ_PEND_REG(ch) (DMA_IRQ_PEND_REG0 + ((ch) / 8) * 4) 63 #define DMA_STA_REG 0x30 64 #define DMA_EN_REG(n) (0x100 + (n) * 0x40 + 0x00) 65 #define DMA_EN (1 << 0) 66 #define DMA_PAU_REG(n) (0x100 + (n) * 0x40 + 0x04) 67 #define DMA_STAR_ADDR_REG(n) (0x100 + (n) * 0x40 + 0x08) 68 #define DMA_CFG_REG(n) (0x100 + (n) * 0x40 + 0x0c) 69 #define DMA_DEST_DATA_WIDTH (0x3 << 25) 70 #define DMA_DEST_DATA_WIDTH_SHIFT 25 71 #define DMA_DEST_BST_LEN (0x3 << 22) 72 #define DMA_DEST_BST_LEN_SHIFT 22 73 #define DMA_DEST_ADDR_MODE (0x1 << 21) 74 #define DMA_DEST_ADDR_MODE_SHIFT 21 75 #define DMA_DEST_DRQ_TYPE (0x1f << 16) 76 #define DMA_DEST_DRQ_TYPE_SHIFT 16 77 #define DMA_SRC_DATA_WIDTH (0x3 << 9) 78 #define DMA_SRC_DATA_WIDTH_SHIFT 9 79 #define DMA_SRC_BST_LEN (0x3 << 6) 80 #define DMA_SRC_BST_LEN_SHIFT 6 81 #define DMA_SRC_ADDR_MODE (0x1 << 5) 82 #define DMA_SRC_ADDR_MODE_SHIFT 5 83 #define DMA_SRC_DRQ_TYPE (0x1f << 0) 84 #define DMA_SRC_DRQ_TYPE_SHIFT 0 85 #define DMA_DATA_WIDTH_8BIT 0 86 #define DMA_DATA_WIDTH_16BIT 1 87 #define DMA_DATA_WIDTH_32BIT 2 88 #define DMA_DATA_WIDTH_64BIT 3 89 #define DMA_ADDR_MODE_LINEAR 0 90 #define DMA_ADDR_MODE_IO 1 91 #define DMA_BST_LEN_1 0 92 #define DMA_BST_LEN_4 1 93 #define DMA_BST_LEN_8 2 94 #define DMA_BST_LEN_16 3 95 #define DMA_CUR_SRC_REG(n) (0x100 + (n) * 0x40 + 0x10) 96 #define DMA_CUR_DEST_REG(n) (0x100 + (n) * 0x40 + 0x14) 97 #define DMA_BCNT_LEFT_REG(n) (0x100 + (n) * 0x40 + 0x18) 98 #define DMA_PARA_REG(n) (0x100 + (n) * 0x40 + 0x1c) 99 #define WAIT_CYC (0xff << 0) 100 #define WAIT_CYC_SHIFT 0 101 102 struct a31dmac_desc { 103 uint32_t config; 104 uint32_t srcaddr; 105 uint32_t dstaddr; 106 uint32_t bcnt; 107 uint32_t para; 108 uint32_t next; 109 #define DMA_NULL 0xfffff800 110 }; 111 #define DESC_ALIGN 4 112 #define DESC_SIZE sizeof(struct a31dmac_desc) 113 114 struct a31dmac_config { 115 u_int nchans; 116 }; 117 118 static const struct a31dmac_config a31_config = { .nchans = 16 }; 119 static const struct a31dmac_config h3_config = { .nchans = 12 }; 120 static const struct a31dmac_config a83t_config = { .nchans = 8 }; 121 static const struct a31dmac_config a64_config = { .nchans = 8 }; 122 123 static struct ofw_compat_data compat_data[] = { 124 { "allwinner,sun6i-a31-dma", (uintptr_t)&a31_config }, 125 { "allwinner,sun8i-a83t-dma", (uintptr_t)&a83t_config }, 126 { "allwinner,sun8i-h3-dma", (uintptr_t)&h3_config }, 127 { "allwinner,sun50i-a64-dma", (uintptr_t)&a64_config }, 128 { NULL, (uintptr_t)NULL } 129 }; 130 131 struct a31dmac_softc; 132 133 struct a31dmac_channel { 134 struct a31dmac_softc * sc; 135 uint8_t index; 136 void (*callback)(void *); 137 void * callbackarg; 138 139 bus_dmamap_t dmamap; 140 struct a31dmac_desc *desc; 141 bus_addr_t physaddr; 142 }; 143 144 struct a31dmac_softc { 145 struct resource * res[2]; 146 struct mtx mtx; 147 void * ih; 148 149 bus_dma_tag_t dmat; 150 151 u_int nchans; 152 struct a31dmac_channel * chans; 153 }; 154 155 static struct resource_spec a31dmac_spec[] = { 156 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 157 { SYS_RES_IRQ, 0, RF_ACTIVE }, 158 { -1, 0 } 159 }; 160 161 #define DMA_READ(sc, reg) bus_read_4((sc)->res[0], (reg)) 162 #define DMA_WRITE(sc, reg, val) bus_write_4((sc)->res[0], (reg), (val)) 163 164 static void a31dmac_intr(void *); 165 static void a31dmac_dmamap_cb(void *, bus_dma_segment_t *, int, int); 166 167 static int 168 a31dmac_probe(device_t dev) 169 { 170 if (!ofw_bus_status_okay(dev)) 171 return (ENXIO); 172 173 if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) 174 return (ENXIO); 175 176 device_set_desc(dev, "Allwinner DMA controller"); 177 return (BUS_PROBE_DEFAULT); 178 } 179 180 static int 181 a31dmac_attach(device_t dev) 182 { 183 struct a31dmac_softc *sc; 184 struct a31dmac_config *conf; 185 u_int index; 186 hwreset_t rst; 187 clk_t clk; 188 int error; 189 190 sc = device_get_softc(dev); 191 conf = (void *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; 192 clk = NULL; 193 rst = NULL; 194 195 if (bus_alloc_resources(dev, a31dmac_spec, sc->res)) { 196 device_printf(dev, "cannot allocate resources for device\n"); 197 return (ENXIO); 198 } 199 200 mtx_init(&sc->mtx, "a31 dmac", NULL, MTX_SPIN); 201 202 /* Clock and reset setup */ 203 if (clk_get_by_ofw_index(dev, 0, 0, &clk) != 0) { 204 device_printf(dev, "cannot get clock\n"); 205 goto fail; 206 } 207 if (clk_enable(clk) != 0) { 208 device_printf(dev, "cannot enable clock\n"); 209 goto fail; 210 } 211 if (hwreset_get_by_ofw_idx(dev, 0, 0, &rst) != 0) { 212 device_printf(dev, "cannot get hwreset\n"); 213 goto fail; 214 } 215 if (hwreset_deassert(rst) != 0) { 216 device_printf(dev, "cannot de-assert reset\n"); 217 goto fail; 218 } 219 220 /* Descriptor DMA */ 221 error = bus_dma_tag_create( 222 bus_get_dma_tag(dev), /* Parent tag */ 223 DESC_ALIGN, 0, /* alignment, boundary */ 224 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 225 BUS_SPACE_MAXADDR, /* highaddr */ 226 NULL, NULL, /* filter, filterarg */ 227 DESC_SIZE, 1, /* maxsize, nsegs */ 228 DESC_SIZE, /* maxsegsize */ 229 0, /* flags */ 230 NULL, NULL, /* lockfunc, lockarg */ 231 &sc->dmat); 232 if (error != 0) { 233 device_printf(dev, "cannot create dma tag\n"); 234 goto fail; 235 } 236 237 /* Disable all interrupts and clear pending status */ 238 DMA_WRITE(sc, DMA_IRQ_EN_REG0, 0); 239 DMA_WRITE(sc, DMA_IRQ_EN_REG1, 0); 240 DMA_WRITE(sc, DMA_IRQ_PEND_REG0, ~0); 241 DMA_WRITE(sc, DMA_IRQ_PEND_REG1, ~0); 242 243 /* Initialize channels */ 244 sc->nchans = conf->nchans; 245 sc->chans = malloc(sizeof(*sc->chans) * sc->nchans, M_DEVBUF, 246 M_WAITOK | M_ZERO); 247 248 for (index = 0; index < sc->nchans; index++) { 249 sc->chans[index].sc = sc; 250 sc->chans[index].index = index; 251 sc->chans[index].callback = NULL; 252 sc->chans[index].callbackarg = NULL; 253 254 error = bus_dmamem_alloc(sc->dmat, 255 (void **)&sc->chans[index].desc, 256 BUS_DMA_WAITOK | BUS_DMA_COHERENT, 257 &sc->chans[index].dmamap); 258 if (error != 0) { 259 device_printf(dev, "cannot allocate dma mem\n"); 260 goto fail; 261 } 262 error = bus_dmamap_load(sc->dmat, sc->chans[index].dmamap, 263 sc->chans[index].desc, sizeof(*sc->chans[index].desc), 264 a31dmac_dmamap_cb, &sc->chans[index], BUS_DMA_WAITOK); 265 if (error != 0) { 266 device_printf(dev, "cannot load dma map\n"); 267 goto fail; 268 } 269 270 DMA_WRITE(sc, DMA_EN_REG(index), 0); 271 } 272 273 error = bus_setup_intr(dev, sc->res[1], INTR_MPSAFE | INTR_TYPE_MISC, 274 NULL, a31dmac_intr, sc, &sc->ih); 275 if (error != 0) { 276 device_printf(dev, "could not setup interrupt handler\n"); 277 bus_release_resources(dev, a31dmac_spec, sc->res); 278 mtx_destroy(&sc->mtx); 279 return (ENXIO); 280 } 281 282 OF_device_register_xref(OF_xref_from_node(ofw_bus_get_node(dev)), dev); 283 return (0); 284 285 fail: 286 for (index = 0; index < sc->nchans; index++) 287 if (sc->chans[index].desc != NULL) { 288 bus_dmamap_unload(sc->dmat, sc->chans[index].dmamap); 289 bus_dmamem_free(sc->dmat, sc->chans[index].desc, 290 sc->chans[index].dmamap); 291 } 292 if (sc->chans != NULL) 293 free(sc->chans, M_DEVBUF); 294 if (sc->ih != NULL) 295 bus_teardown_intr(dev, sc->res[1], sc->ih); 296 if (rst != NULL) 297 hwreset_release(rst); 298 if (clk != NULL) 299 clk_release(clk); 300 bus_release_resources(dev, a31dmac_spec, sc->res); 301 302 return (ENXIO); 303 } 304 305 static void 306 a31dmac_dmamap_cb(void *priv, bus_dma_segment_t *segs, int nsegs, int error) 307 { 308 struct a31dmac_channel *ch; 309 310 if (error != 0) 311 return; 312 313 ch = priv; 314 ch->physaddr = segs[0].ds_addr; 315 } 316 317 static void 318 a31dmac_intr(void *priv) 319 { 320 struct a31dmac_softc *sc; 321 uint32_t pend0, pend1, bit; 322 uint64_t pend, mask; 323 u_int index; 324 325 sc = priv; 326 pend0 = DMA_READ(sc, DMA_IRQ_PEND_REG0); 327 pend1 = sc->nchans > 8 ? DMA_READ(sc, DMA_IRQ_PEND_REG1) : 0; 328 if (pend0 == 0 && pend1 == 0) 329 return; 330 331 if (pend0 != 0) 332 DMA_WRITE(sc, DMA_IRQ_PEND_REG0, pend0); 333 if (pend1 != 0) 334 DMA_WRITE(sc, DMA_IRQ_PEND_REG1, pend1); 335 336 pend = pend0 | ((uint64_t)pend1 << 32); 337 338 while ((bit = ffsll(pend & DMA_PKG_IRQ_MASK)) != 0) { 339 mask = (1U << (bit - 1)); 340 pend &= ~mask; 341 index = (bit - 1) / 4; 342 343 if (index >= sc->nchans) 344 continue; 345 if (sc->chans[index].callback == NULL) 346 continue; 347 sc->chans[index].callback(sc->chans[index].callbackarg); 348 } 349 } 350 351 static int 352 a31dmac_set_config(device_t dev, void *priv, const struct sunxi_dma_config *cfg) 353 { 354 struct a31dmac_channel *ch; 355 uint32_t config, para; 356 unsigned int dst_dw, dst_bl, dst_wc, dst_am; 357 unsigned int src_dw, src_bl, src_wc, src_am; 358 359 ch = priv; 360 361 switch (cfg->dst_width) { 362 case 8: 363 dst_dw = DMA_DATA_WIDTH_8BIT; 364 break; 365 case 16: 366 dst_dw = DMA_DATA_WIDTH_16BIT; 367 break; 368 case 32: 369 dst_dw = DMA_DATA_WIDTH_32BIT; 370 break; 371 case 64: 372 dst_dw = DMA_DATA_WIDTH_64BIT; 373 break; 374 default: 375 return (EINVAL); 376 } 377 switch (cfg->dst_burst_len) { 378 case 1: 379 dst_bl = DMA_BST_LEN_1; 380 break; 381 case 4: 382 dst_bl = DMA_BST_LEN_4; 383 break; 384 case 8: 385 dst_bl = DMA_BST_LEN_8; 386 break; 387 case 16: 388 dst_bl = DMA_BST_LEN_16; 389 break; 390 default: 391 return (EINVAL); 392 } 393 switch (cfg->src_width) { 394 case 8: 395 src_dw = DMA_DATA_WIDTH_8BIT; 396 break; 397 case 16: 398 src_dw = DMA_DATA_WIDTH_16BIT; 399 break; 400 case 32: 401 src_dw = DMA_DATA_WIDTH_32BIT; 402 break; 403 case 64: 404 src_dw = DMA_DATA_WIDTH_64BIT; 405 default: 406 return (EINVAL); 407 } 408 switch (cfg->src_burst_len) { 409 case 1: 410 src_bl = DMA_BST_LEN_1; 411 break; 412 case 4: 413 src_bl = DMA_BST_LEN_4; 414 break; 415 case 8: 416 src_bl = DMA_BST_LEN_8; 417 break; 418 case 16: 419 src_bl = DMA_BST_LEN_16; 420 break; 421 default: 422 return (EINVAL); 423 } 424 dst_am = cfg->dst_noincr ? DMA_ADDR_MODE_IO : DMA_ADDR_MODE_LINEAR; 425 src_am = cfg->src_noincr ? DMA_ADDR_MODE_IO : DMA_ADDR_MODE_LINEAR; 426 dst_wc = cfg->dst_wait_cyc; 427 src_wc = cfg->src_wait_cyc; 428 if (dst_wc != src_wc) 429 return (EINVAL); 430 431 config = (dst_dw << DMA_DEST_DATA_WIDTH_SHIFT) | 432 (dst_bl << DMA_DEST_BST_LEN_SHIFT) | 433 (dst_am << DMA_DEST_ADDR_MODE_SHIFT) | 434 (cfg->dst_drqtype << DMA_DEST_DRQ_TYPE_SHIFT) | 435 (src_dw << DMA_SRC_DATA_WIDTH_SHIFT) | 436 (src_bl << DMA_SRC_BST_LEN_SHIFT) | 437 (src_am << DMA_SRC_ADDR_MODE_SHIFT) | 438 (cfg->src_drqtype << DMA_SRC_DRQ_TYPE_SHIFT); 439 para = (dst_wc << WAIT_CYC_SHIFT); 440 441 ch->desc->config = htole32(config); 442 ch->desc->para = htole32(para); 443 444 return (0); 445 } 446 447 static void * 448 a31dmac_alloc(device_t dev, bool dedicated, void (*cb)(void *), void *cbarg) 449 { 450 struct a31dmac_softc *sc; 451 struct a31dmac_channel *ch; 452 uint32_t irqen; 453 u_int index; 454 455 sc = device_get_softc(dev); 456 ch = NULL; 457 458 mtx_lock_spin(&sc->mtx); 459 for (index = 0; index < sc->nchans; index++) { 460 if (sc->chans[index].callback == NULL) { 461 ch = &sc->chans[index]; 462 ch->callback = cb; 463 ch->callbackarg = cbarg; 464 465 irqen = DMA_READ(sc, DMA_IRQ_EN_REG(index)); 466 irqen |= DMA_PKG_IRQ_EN(index); 467 DMA_WRITE(sc, DMA_IRQ_EN_REG(index), irqen); 468 break; 469 } 470 } 471 mtx_unlock_spin(&sc->mtx); 472 473 return (ch); 474 } 475 476 static void 477 a31dmac_free(device_t dev, void *priv) 478 { 479 struct a31dmac_channel *ch; 480 struct a31dmac_softc *sc; 481 uint32_t irqen; 482 u_int index; 483 484 ch = priv; 485 sc = ch->sc; 486 index = ch->index; 487 488 mtx_lock_spin(&sc->mtx); 489 490 irqen = DMA_READ(sc, DMA_IRQ_EN_REG(index)); 491 irqen &= ~DMA_PKG_IRQ_EN(index); 492 DMA_WRITE(sc, DMA_IRQ_EN_REG(index), irqen); 493 DMA_WRITE(sc, DMA_IRQ_PEND_REG(index), DMA_PKG_IRQ_EN(index)); 494 495 ch->callback = NULL; 496 ch->callbackarg = NULL; 497 498 mtx_unlock_spin(&sc->mtx); 499 } 500 501 static int 502 a31dmac_transfer(device_t dev, void *priv, bus_addr_t src, bus_addr_t dst, 503 size_t nbytes) 504 { 505 struct a31dmac_channel *ch; 506 struct a31dmac_softc *sc; 507 508 ch = priv; 509 sc = ch->sc; 510 511 ch->desc->srcaddr = htole32((uint32_t)src); 512 ch->desc->dstaddr = htole32((uint32_t)dst); 513 ch->desc->bcnt = htole32(nbytes); 514 ch->desc->next = htole32(DMA_NULL); 515 516 DMA_WRITE(sc, DMA_STAR_ADDR_REG(ch->index), (uint32_t)ch->physaddr); 517 DMA_WRITE(sc, DMA_EN_REG(ch->index), DMA_EN); 518 519 return (0); 520 } 521 522 static void 523 a31dmac_halt(device_t dev, void *priv) 524 { 525 struct a31dmac_channel *ch; 526 struct a31dmac_softc *sc; 527 528 ch = priv; 529 sc = ch->sc; 530 531 DMA_WRITE(sc, DMA_EN_REG(ch->index), 0); 532 } 533 534 static device_method_t a31dmac_methods[] = { 535 /* Device interface */ 536 DEVMETHOD(device_probe, a31dmac_probe), 537 DEVMETHOD(device_attach, a31dmac_attach), 538 539 /* sunxi DMA interface */ 540 DEVMETHOD(sunxi_dma_alloc, a31dmac_alloc), 541 DEVMETHOD(sunxi_dma_free, a31dmac_free), 542 DEVMETHOD(sunxi_dma_set_config, a31dmac_set_config), 543 DEVMETHOD(sunxi_dma_transfer, a31dmac_transfer), 544 DEVMETHOD(sunxi_dma_halt, a31dmac_halt), 545 546 DEVMETHOD_END 547 }; 548 549 static driver_t a31dmac_driver = { 550 "a31dmac", 551 a31dmac_methods, 552 sizeof(struct a31dmac_softc) 553 }; 554 555 static devclass_t a31dmac_devclass; 556 557 DRIVER_MODULE(a31dmac, simplebus, a31dmac_driver, a31dmac_devclass, 0, 0); 558