1 /*- 2 * Copyright (c) 2016 Jared D. McNeill <jmcneill@invisible.ca> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 19 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 20 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 21 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 /* 29 * Allwinner DMA controller 30 */ 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/bus.h> 35 #include <sys/rman.h> 36 #include <sys/condvar.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/module.h> 40 #include <sys/mutex.h> 41 #include <sys/endian.h> 42 43 #include <machine/bus.h> 44 45 #include <dev/ofw/ofw_bus.h> 46 #include <dev/ofw/ofw_bus_subr.h> 47 48 #include <arm/allwinner/a10_dmac.h> 49 #include <dev/extres/clk/clk.h> 50 #include <dev/extres/hwreset/hwreset.h> 51 52 #include "sunxi_dma_if.h" 53 54 #define DMA_IRQ_EN_REG0 0x00 55 #define DMA_IRQ_EN_REG1 0x04 56 #define DMA_IRQ_EN_REG(ch) (DMA_IRQ_EN_REG0 + ((ch) / 8) * 4) 57 #define DMA_PKG_IRQ_EN(ch) (1 << (((ch) % 8) * 4 + 1)) 58 #define DMA_PKG_IRQ_MASK 0x2222222222222222ULL 59 #define DMA_IRQ_PEND_REG0 0x10 60 #define DMA_IRQ_PEND_REG1 0x14 61 #define DMA_IRQ_PEND_REG(ch) (DMA_IRQ_PEND_REG0 + ((ch) / 8) * 4) 62 #define DMA_STA_REG 0x30 63 #define DMA_EN_REG(n) (0x100 + (n) * 0x40 + 0x00) 64 #define DMA_EN (1 << 0) 65 #define DMA_PAU_REG(n) (0x100 + (n) * 0x40 + 0x04) 66 #define DMA_STAR_ADDR_REG(n) (0x100 + (n) * 0x40 + 0x08) 67 #define DMA_CFG_REG(n) (0x100 + (n) * 0x40 + 0x0c) 68 #define DMA_DEST_DATA_WIDTH (0x3 << 25) 69 #define DMA_DEST_DATA_WIDTH_SHIFT 25 70 #define DMA_DEST_BST_LEN (0x3 << 22) 71 #define DMA_DEST_BST_LEN_SHIFT 22 72 #define DMA_DEST_ADDR_MODE (0x1 << 21) 73 #define DMA_DEST_ADDR_MODE_SHIFT 21 74 #define DMA_DEST_DRQ_TYPE (0x1f << 16) 75 #define DMA_DEST_DRQ_TYPE_SHIFT 16 76 #define DMA_SRC_DATA_WIDTH (0x3 << 9) 77 #define DMA_SRC_DATA_WIDTH_SHIFT 9 78 #define DMA_SRC_BST_LEN (0x3 << 6) 79 #define DMA_SRC_BST_LEN_SHIFT 6 80 #define DMA_SRC_ADDR_MODE (0x1 << 5) 81 #define DMA_SRC_ADDR_MODE_SHIFT 5 82 #define DMA_SRC_DRQ_TYPE (0x1f << 0) 83 #define DMA_SRC_DRQ_TYPE_SHIFT 0 84 #define DMA_DATA_WIDTH_8BIT 0 85 #define DMA_DATA_WIDTH_16BIT 1 86 #define DMA_DATA_WIDTH_32BIT 2 87 #define DMA_DATA_WIDTH_64BIT 3 88 #define DMA_ADDR_MODE_LINEAR 0 89 #define DMA_ADDR_MODE_IO 1 90 #define DMA_BST_LEN_1 0 91 #define DMA_BST_LEN_4 1 92 #define DMA_BST_LEN_8 2 93 #define DMA_BST_LEN_16 3 94 #define DMA_CUR_SRC_REG(n) (0x100 + (n) * 0x40 + 0x10) 95 #define DMA_CUR_DEST_REG(n) (0x100 + (n) * 0x40 + 0x14) 96 #define DMA_BCNT_LEFT_REG(n) (0x100 + (n) * 0x40 + 0x18) 97 #define DMA_PARA_REG(n) (0x100 + (n) * 0x40 + 0x1c) 98 #define WAIT_CYC (0xff << 0) 99 #define WAIT_CYC_SHIFT 0 100 101 struct a31dmac_desc { 102 uint32_t config; 103 uint32_t srcaddr; 104 uint32_t dstaddr; 105 uint32_t bcnt; 106 uint32_t para; 107 uint32_t next; 108 #define DMA_NULL 0xfffff800 109 }; 110 #define DESC_ALIGN 4 111 #define DESC_SIZE sizeof(struct a31dmac_desc) 112 113 struct a31dmac_config { 114 u_int nchans; 115 }; 116 117 static const struct a31dmac_config a31_config = { .nchans = 16 }; 118 static const struct a31dmac_config h3_config = { .nchans = 12 }; 119 static const struct a31dmac_config a83t_config = { .nchans = 8 }; 120 static const struct a31dmac_config a64_config = { .nchans = 8 }; 121 122 static struct ofw_compat_data compat_data[] = { 123 { "allwinner,sun6i-a31-dma", (uintptr_t)&a31_config }, 124 { "allwinner,sun8i-a83t-dma", (uintptr_t)&a83t_config }, 125 { "allwinner,sun8i-h3-dma", (uintptr_t)&h3_config }, 126 { "allwinner,sun50i-a64-dma", (uintptr_t)&a64_config }, 127 { NULL, (uintptr_t)NULL } 128 }; 129 130 struct a31dmac_softc; 131 132 struct a31dmac_channel { 133 struct a31dmac_softc * sc; 134 uint8_t index; 135 void (*callback)(void *); 136 void * callbackarg; 137 138 bus_dmamap_t dmamap; 139 struct a31dmac_desc *desc; 140 bus_addr_t physaddr; 141 }; 142 143 struct a31dmac_softc { 144 struct resource * res[2]; 145 struct mtx mtx; 146 void * ih; 147 148 bus_dma_tag_t dmat; 149 150 u_int nchans; 151 struct a31dmac_channel * chans; 152 }; 153 154 static struct resource_spec a31dmac_spec[] = { 155 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 156 { SYS_RES_IRQ, 0, RF_ACTIVE }, 157 { -1, 0 } 158 }; 159 160 #define DMA_READ(sc, reg) bus_read_4((sc)->res[0], (reg)) 161 #define DMA_WRITE(sc, reg, val) bus_write_4((sc)->res[0], (reg), (val)) 162 163 static void a31dmac_intr(void *); 164 static void a31dmac_dmamap_cb(void *, bus_dma_segment_t *, int, int); 165 166 static int 167 a31dmac_probe(device_t dev) 168 { 169 if (!ofw_bus_status_okay(dev)) 170 return (ENXIO); 171 172 if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) 173 return (ENXIO); 174 175 device_set_desc(dev, "Allwinner DMA controller"); 176 return (BUS_PROBE_DEFAULT); 177 } 178 179 static int 180 a31dmac_attach(device_t dev) 181 { 182 struct a31dmac_softc *sc; 183 struct a31dmac_config *conf; 184 u_int index; 185 hwreset_t rst; 186 clk_t clk; 187 int error; 188 189 sc = device_get_softc(dev); 190 conf = (void *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; 191 clk = NULL; 192 rst = NULL; 193 194 if (bus_alloc_resources(dev, a31dmac_spec, sc->res)) { 195 device_printf(dev, "cannot allocate resources for device\n"); 196 return (ENXIO); 197 } 198 199 mtx_init(&sc->mtx, "a31 dmac", NULL, MTX_SPIN); 200 201 /* Clock and reset setup */ 202 if (clk_get_by_ofw_index(dev, 0, 0, &clk) != 0) { 203 device_printf(dev, "cannot get clock\n"); 204 goto fail; 205 } 206 if (clk_enable(clk) != 0) { 207 device_printf(dev, "cannot enable clock\n"); 208 goto fail; 209 } 210 if (hwreset_get_by_ofw_idx(dev, 0, 0, &rst) != 0) { 211 device_printf(dev, "cannot get hwreset\n"); 212 goto fail; 213 } 214 if (hwreset_deassert(rst) != 0) { 215 device_printf(dev, "cannot de-assert reset\n"); 216 goto fail; 217 } 218 219 /* Descriptor DMA */ 220 error = bus_dma_tag_create( 221 bus_get_dma_tag(dev), /* Parent tag */ 222 DESC_ALIGN, 0, /* alignment, boundary */ 223 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 224 BUS_SPACE_MAXADDR, /* highaddr */ 225 NULL, NULL, /* filter, filterarg */ 226 DESC_SIZE, 1, /* maxsize, nsegs */ 227 DESC_SIZE, /* maxsegsize */ 228 0, /* flags */ 229 NULL, NULL, /* lockfunc, lockarg */ 230 &sc->dmat); 231 if (error != 0) { 232 device_printf(dev, "cannot create dma tag\n"); 233 goto fail; 234 } 235 236 /* Disable all interrupts and clear pending status */ 237 DMA_WRITE(sc, DMA_IRQ_EN_REG0, 0); 238 DMA_WRITE(sc, DMA_IRQ_EN_REG1, 0); 239 DMA_WRITE(sc, DMA_IRQ_PEND_REG0, ~0); 240 DMA_WRITE(sc, DMA_IRQ_PEND_REG1, ~0); 241 242 /* Initialize channels */ 243 sc->nchans = conf->nchans; 244 sc->chans = malloc(sizeof(*sc->chans) * sc->nchans, M_DEVBUF, 245 M_WAITOK | M_ZERO); 246 247 for (index = 0; index < sc->nchans; index++) { 248 sc->chans[index].sc = sc; 249 sc->chans[index].index = index; 250 sc->chans[index].callback = NULL; 251 sc->chans[index].callbackarg = NULL; 252 253 error = bus_dmamem_alloc(sc->dmat, 254 (void **)&sc->chans[index].desc, 255 BUS_DMA_WAITOK | BUS_DMA_COHERENT, 256 &sc->chans[index].dmamap); 257 if (error != 0) { 258 device_printf(dev, "cannot allocate dma mem\n"); 259 goto fail; 260 } 261 error = bus_dmamap_load(sc->dmat, sc->chans[index].dmamap, 262 sc->chans[index].desc, sizeof(*sc->chans[index].desc), 263 a31dmac_dmamap_cb, &sc->chans[index], BUS_DMA_WAITOK); 264 if (error != 0) { 265 device_printf(dev, "cannot load dma map\n"); 266 goto fail; 267 } 268 269 DMA_WRITE(sc, DMA_EN_REG(index), 0); 270 } 271 272 error = bus_setup_intr(dev, sc->res[1], INTR_MPSAFE | INTR_TYPE_MISC, 273 NULL, a31dmac_intr, sc, &sc->ih); 274 if (error != 0) { 275 device_printf(dev, "could not setup interrupt handler\n"); 276 bus_release_resources(dev, a31dmac_spec, sc->res); 277 mtx_destroy(&sc->mtx); 278 return (ENXIO); 279 } 280 281 OF_device_register_xref(OF_xref_from_node(ofw_bus_get_node(dev)), dev); 282 return (0); 283 284 fail: 285 for (index = 0; index < sc->nchans; index++) 286 if (sc->chans[index].desc != NULL) { 287 bus_dmamap_unload(sc->dmat, sc->chans[index].dmamap); 288 bus_dmamem_free(sc->dmat, sc->chans[index].desc, 289 sc->chans[index].dmamap); 290 } 291 if (sc->chans != NULL) 292 free(sc->chans, M_DEVBUF); 293 if (sc->ih != NULL) 294 bus_teardown_intr(dev, sc->res[1], sc->ih); 295 if (rst != NULL) 296 hwreset_release(rst); 297 if (clk != NULL) 298 clk_release(clk); 299 bus_release_resources(dev, a31dmac_spec, sc->res); 300 301 return (ENXIO); 302 } 303 304 static void 305 a31dmac_dmamap_cb(void *priv, bus_dma_segment_t *segs, int nsegs, int error) 306 { 307 struct a31dmac_channel *ch; 308 309 if (error != 0) 310 return; 311 312 ch = priv; 313 ch->physaddr = segs[0].ds_addr; 314 } 315 316 static void 317 a31dmac_intr(void *priv) 318 { 319 struct a31dmac_softc *sc; 320 uint32_t pend0, pend1, bit; 321 uint64_t pend, mask; 322 u_int index; 323 324 sc = priv; 325 pend0 = DMA_READ(sc, DMA_IRQ_PEND_REG0); 326 pend1 = sc->nchans > 8 ? DMA_READ(sc, DMA_IRQ_PEND_REG1) : 0; 327 if (pend0 == 0 && pend1 == 0) 328 return; 329 330 if (pend0 != 0) 331 DMA_WRITE(sc, DMA_IRQ_PEND_REG0, pend0); 332 if (pend1 != 0) 333 DMA_WRITE(sc, DMA_IRQ_PEND_REG1, pend1); 334 335 pend = pend0 | ((uint64_t)pend1 << 32); 336 337 while ((bit = ffsll(pend & DMA_PKG_IRQ_MASK)) != 0) { 338 mask = (1U << (bit - 1)); 339 pend &= ~mask; 340 index = (bit - 1) / 4; 341 342 if (index >= sc->nchans) 343 continue; 344 if (sc->chans[index].callback == NULL) 345 continue; 346 sc->chans[index].callback(sc->chans[index].callbackarg); 347 } 348 } 349 350 static int 351 a31dmac_set_config(device_t dev, void *priv, const struct sunxi_dma_config *cfg) 352 { 353 struct a31dmac_channel *ch; 354 uint32_t config, para; 355 unsigned int dst_dw, dst_bl, dst_wc, dst_am; 356 unsigned int src_dw, src_bl, src_wc, src_am; 357 358 ch = priv; 359 360 switch (cfg->dst_width) { 361 case 8: 362 dst_dw = DMA_DATA_WIDTH_8BIT; 363 break; 364 case 16: 365 dst_dw = DMA_DATA_WIDTH_16BIT; 366 break; 367 case 32: 368 dst_dw = DMA_DATA_WIDTH_32BIT; 369 break; 370 case 64: 371 dst_dw = DMA_DATA_WIDTH_64BIT; 372 break; 373 default: 374 return (EINVAL); 375 } 376 switch (cfg->dst_burst_len) { 377 case 1: 378 dst_bl = DMA_BST_LEN_1; 379 break; 380 case 4: 381 dst_bl = DMA_BST_LEN_4; 382 break; 383 case 8: 384 dst_bl = DMA_BST_LEN_8; 385 break; 386 case 16: 387 dst_bl = DMA_BST_LEN_16; 388 break; 389 default: 390 return (EINVAL); 391 } 392 switch (cfg->src_width) { 393 case 8: 394 src_dw = DMA_DATA_WIDTH_8BIT; 395 break; 396 case 16: 397 src_dw = DMA_DATA_WIDTH_16BIT; 398 break; 399 case 32: 400 src_dw = DMA_DATA_WIDTH_32BIT; 401 break; 402 case 64: 403 src_dw = DMA_DATA_WIDTH_64BIT; 404 default: 405 return (EINVAL); 406 } 407 switch (cfg->src_burst_len) { 408 case 1: 409 src_bl = DMA_BST_LEN_1; 410 break; 411 case 4: 412 src_bl = DMA_BST_LEN_4; 413 break; 414 case 8: 415 src_bl = DMA_BST_LEN_8; 416 break; 417 case 16: 418 src_bl = DMA_BST_LEN_16; 419 break; 420 default: 421 return (EINVAL); 422 } 423 dst_am = cfg->dst_noincr ? DMA_ADDR_MODE_IO : DMA_ADDR_MODE_LINEAR; 424 src_am = cfg->src_noincr ? DMA_ADDR_MODE_IO : DMA_ADDR_MODE_LINEAR; 425 dst_wc = cfg->dst_wait_cyc; 426 src_wc = cfg->src_wait_cyc; 427 if (dst_wc != src_wc) 428 return (EINVAL); 429 430 config = (dst_dw << DMA_DEST_DATA_WIDTH_SHIFT) | 431 (dst_bl << DMA_DEST_BST_LEN_SHIFT) | 432 (dst_am << DMA_DEST_ADDR_MODE_SHIFT) | 433 (cfg->dst_drqtype << DMA_DEST_DRQ_TYPE_SHIFT) | 434 (src_dw << DMA_SRC_DATA_WIDTH_SHIFT) | 435 (src_bl << DMA_SRC_BST_LEN_SHIFT) | 436 (src_am << DMA_SRC_ADDR_MODE_SHIFT) | 437 (cfg->src_drqtype << DMA_SRC_DRQ_TYPE_SHIFT); 438 para = (dst_wc << WAIT_CYC_SHIFT); 439 440 ch->desc->config = htole32(config); 441 ch->desc->para = htole32(para); 442 443 return (0); 444 } 445 446 static void * 447 a31dmac_alloc(device_t dev, bool dedicated, void (*cb)(void *), void *cbarg) 448 { 449 struct a31dmac_softc *sc; 450 struct a31dmac_channel *ch; 451 uint32_t irqen; 452 u_int index; 453 454 sc = device_get_softc(dev); 455 ch = NULL; 456 457 mtx_lock_spin(&sc->mtx); 458 for (index = 0; index < sc->nchans; index++) { 459 if (sc->chans[index].callback == NULL) { 460 ch = &sc->chans[index]; 461 ch->callback = cb; 462 ch->callbackarg = cbarg; 463 464 irqen = DMA_READ(sc, DMA_IRQ_EN_REG(index)); 465 irqen |= DMA_PKG_IRQ_EN(index); 466 DMA_WRITE(sc, DMA_IRQ_EN_REG(index), irqen); 467 break; 468 } 469 } 470 mtx_unlock_spin(&sc->mtx); 471 472 return (ch); 473 } 474 475 static void 476 a31dmac_free(device_t dev, void *priv) 477 { 478 struct a31dmac_channel *ch; 479 struct a31dmac_softc *sc; 480 uint32_t irqen; 481 u_int index; 482 483 ch = priv; 484 sc = ch->sc; 485 index = ch->index; 486 487 mtx_lock_spin(&sc->mtx); 488 489 irqen = DMA_READ(sc, DMA_IRQ_EN_REG(index)); 490 irqen &= ~DMA_PKG_IRQ_EN(index); 491 DMA_WRITE(sc, DMA_IRQ_EN_REG(index), irqen); 492 DMA_WRITE(sc, DMA_IRQ_PEND_REG(index), DMA_PKG_IRQ_EN(index)); 493 494 ch->callback = NULL; 495 ch->callbackarg = NULL; 496 497 mtx_unlock_spin(&sc->mtx); 498 } 499 500 static int 501 a31dmac_transfer(device_t dev, void *priv, bus_addr_t src, bus_addr_t dst, 502 size_t nbytes) 503 { 504 struct a31dmac_channel *ch; 505 struct a31dmac_softc *sc; 506 507 ch = priv; 508 sc = ch->sc; 509 510 ch->desc->srcaddr = htole32((uint32_t)src); 511 ch->desc->dstaddr = htole32((uint32_t)dst); 512 ch->desc->bcnt = htole32(nbytes); 513 ch->desc->next = htole32(DMA_NULL); 514 515 DMA_WRITE(sc, DMA_STAR_ADDR_REG(ch->index), (uint32_t)ch->physaddr); 516 DMA_WRITE(sc, DMA_EN_REG(ch->index), DMA_EN); 517 518 return (0); 519 } 520 521 static void 522 a31dmac_halt(device_t dev, void *priv) 523 { 524 struct a31dmac_channel *ch; 525 struct a31dmac_softc *sc; 526 527 ch = priv; 528 sc = ch->sc; 529 530 DMA_WRITE(sc, DMA_EN_REG(ch->index), 0); 531 } 532 533 static device_method_t a31dmac_methods[] = { 534 /* Device interface */ 535 DEVMETHOD(device_probe, a31dmac_probe), 536 DEVMETHOD(device_attach, a31dmac_attach), 537 538 /* sunxi DMA interface */ 539 DEVMETHOD(sunxi_dma_alloc, a31dmac_alloc), 540 DEVMETHOD(sunxi_dma_free, a31dmac_free), 541 DEVMETHOD(sunxi_dma_set_config, a31dmac_set_config), 542 DEVMETHOD(sunxi_dma_transfer, a31dmac_transfer), 543 DEVMETHOD(sunxi_dma_halt, a31dmac_halt), 544 545 DEVMETHOD_END 546 }; 547 548 static driver_t a31dmac_driver = { 549 "a31dmac", 550 a31dmac_methods, 551 sizeof(struct a31dmac_softc) 552 }; 553 554 DRIVER_MODULE(a31dmac, simplebus, a31dmac_driver, 0, 0); 555