1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2013 Daisuke Aoyama <aoyama@peach.ne.jp> 5 * Copyright (c) 2013 Oleksandr Tymoshenko <gonzo@bluezbox.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/bus.h> 36 #include <sys/kernel.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/module.h> 40 #include <sys/mutex.h> 41 #include <sys/queue.h> 42 #include <sys/resource.h> 43 #include <sys/rman.h> 44 45 #include <dev/ofw/openfirm.h> 46 #include <dev/ofw/ofw_bus.h> 47 #include <dev/ofw/ofw_bus_subr.h> 48 49 #include <vm/vm.h> 50 #include <vm/pmap.h> 51 #include <machine/bus.h> 52 53 #include "bcm2835_dma.h" 54 #include "bcm2835_vcbus.h" 55 56 #define MAX_REG 9 57 58 /* private flags */ 59 #define BCM_DMA_CH_USED 0x00000001 60 #define BCM_DMA_CH_FREE 0x40000000 61 #define BCM_DMA_CH_UNMAP 0x80000000 62 63 /* Register Map (4.2.1.2) */ 64 #define BCM_DMA_CS(n) (0x100*(n) + 0x00) 65 #define CS_ACTIVE (1 << 0) 66 #define CS_END (1 << 1) 67 #define CS_INT (1 << 2) 68 #define CS_DREQ (1 << 3) 69 #define CS_ISPAUSED (1 << 4) 70 #define CS_ISHELD (1 << 5) 71 #define CS_ISWAIT (1 << 6) 72 #define CS_ERR (1 << 8) 73 #define CS_WAITWRT (1 << 28) 74 #define CS_DISDBG (1 << 29) 75 #define CS_ABORT (1 << 30) 76 #define CS_RESET (1U << 31) 77 #define BCM_DMA_CBADDR(n) (0x100*(n) + 0x04) 78 #define BCM_DMA_INFO(n) (0x100*(n) + 0x08) 79 #define INFO_INT_EN (1 << 0) 80 #define INFO_TDMODE (1 << 1) 81 #define INFO_WAIT_RESP (1 << 3) 82 #define INFO_D_INC (1 << 4) 83 #define INFO_D_WIDTH (1 << 5) 84 #define INFO_D_DREQ (1 << 6) 85 #define INFO_S_INC (1 << 8) 86 #define INFO_S_WIDTH (1 << 9) 87 #define INFO_S_DREQ (1 << 10) 88 #define INFO_WAITS_SHIFT (21) 89 #define INFO_PERMAP_SHIFT (16) 90 #define INFO_PERMAP_MASK (0x1f << INFO_PERMAP_SHIFT) 91 92 #define BCM_DMA_SRC(n) (0x100*(n) + 0x0C) 93 #define BCM_DMA_DST(n) (0x100*(n) + 0x10) 94 #define BCM_DMA_LEN(n) (0x100*(n) + 0x14) 95 #define BCM_DMA_STRIDE(n) (0x100*(n) + 0x18) 96 #define BCM_DMA_CBNEXT(n) (0x100*(n) + 0x1C) 97 #define BCM_DMA_DEBUG(n) (0x100*(n) + 0x20) 98 #define DEBUG_ERROR_MASK (7) 99 100 #define BCM_DMA_INT_STATUS 0xfe0 101 #define BCM_DMA_ENABLE 0xff0 102 103 /* relative offset from BCM_VC_DMA0_BASE (p.39) */ 104 #define BCM_DMA_CH(n) (0x100*(n)) 105 106 /* channels used by GPU */ 107 #define BCM_DMA_CH_BULK 0 108 #define BCM_DMA_CH_FAST1 2 109 #define BCM_DMA_CH_FAST2 3 110 111 #define BCM_DMA_CH_GPU_MASK ((1 << BCM_DMA_CH_BULK) | \ 112 (1 << BCM_DMA_CH_FAST1) | \ 113 (1 << BCM_DMA_CH_FAST2)) 114 115 /* DMA Control Block - 256bit aligned (p.40) */ 116 struct bcm_dma_cb { 117 uint32_t info; /* Transfer Information */ 118 uint32_t src; /* Source Address */ 119 uint32_t dst; /* Destination Address */ 120 uint32_t len; /* Transfer Length */ 121 uint32_t stride; /* 2D Mode Stride */ 122 uint32_t next; /* Next Control Block Address */ 123 uint32_t rsvd1; /* Reserved */ 124 uint32_t rsvd2; /* Reserved */ 125 }; 126 127 #ifdef DEBUG 128 static void bcm_dma_cb_dump(struct bcm_dma_cb *cb); 129 static void bcm_dma_reg_dump(int ch); 130 #endif 131 132 /* DMA channel private info */ 133 struct bcm_dma_ch { 134 int ch; 135 uint32_t flags; 136 struct bcm_dma_cb * cb; 137 uint32_t vc_cb; 138 bus_dmamap_t dma_map; 139 void (*intr_func)(int, void *); 140 void * intr_arg; 141 }; 142 143 struct bcm_dma_softc { 144 device_t sc_dev; 145 struct mtx sc_mtx; 146 struct resource * sc_mem; 147 struct resource * sc_irq[BCM_DMA_CH_MAX]; 148 void * sc_intrhand[BCM_DMA_CH_MAX]; 149 struct bcm_dma_ch sc_dma_ch[BCM_DMA_CH_MAX]; 150 bus_dma_tag_t sc_dma_tag; 151 }; 152 153 static struct bcm_dma_softc *bcm_dma_sc = NULL; 154 static uint32_t bcm_dma_channel_mask; 155 156 static struct ofw_compat_data compat_data[] = { 157 {"broadcom,bcm2835-dma", 1}, 158 {"brcm,bcm2835-dma", 1}, 159 {NULL, 0} 160 }; 161 162 static void 163 bcm_dmamap_cb(void *arg, bus_dma_segment_t *segs, 164 int nseg, int err) 165 { 166 bus_addr_t *addr; 167 168 if (err) 169 return; 170 171 addr = (bus_addr_t*)arg; 172 *addr = ARMC_TO_VCBUS(segs[0].ds_addr); 173 } 174 175 static void 176 bcm_dma_reset(device_t dev, int ch) 177 { 178 struct bcm_dma_softc *sc = device_get_softc(dev); 179 struct bcm_dma_cb *cb; 180 uint32_t cs; 181 int count; 182 183 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 184 return; 185 186 cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch)); 187 188 if (cs & CS_ACTIVE) { 189 /* pause current task */ 190 bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), 0); 191 192 count = 1000; 193 do { 194 cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch)); 195 } while (!(cs & CS_ISPAUSED) && (count-- > 0)); 196 197 if (!(cs & CS_ISPAUSED)) { 198 device_printf(dev, 199 "Can't abort DMA transfer at channel %d\n", ch); 200 } 201 202 bus_write_4(sc->sc_mem, BCM_DMA_CBNEXT(ch), 0); 203 204 /* Complete everything, clear interrupt */ 205 bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), 206 CS_ABORT | CS_INT | CS_END| CS_ACTIVE); 207 } 208 209 /* clear control blocks */ 210 bus_write_4(sc->sc_mem, BCM_DMA_CBADDR(ch), 0); 211 bus_write_4(sc->sc_mem, BCM_DMA_CBNEXT(ch), 0); 212 213 /* Reset control block */ 214 cb = sc->sc_dma_ch[ch].cb; 215 bzero(cb, sizeof(*cb)); 216 cb->info = INFO_WAIT_RESP; 217 } 218 219 static int 220 bcm_dma_init(device_t dev) 221 { 222 struct bcm_dma_softc *sc = device_get_softc(dev); 223 uint32_t reg; 224 struct bcm_dma_ch *ch; 225 void *cb_virt; 226 vm_paddr_t cb_phys; 227 int err; 228 int i; 229 230 /* 231 * Only channels set in bcm_dma_channel_mask can be controlled by us. 232 * The others are out of our control as well as the corresponding bits 233 * in both BCM_DMA_ENABLE and BCM_DMA_INT_STATUS global registers. As 234 * these registers are RW ones, there is no safe way how to write only 235 * the bits which can be controlled by us. 236 * 237 * Fortunately, after reset, all channels are enabled in BCM_DMA_ENABLE 238 * register and all statuses are cleared in BCM_DMA_INT_STATUS one. 239 * Not touching these registers is a trade off between correct 240 * initialization which does not count on anything and not messing up 241 * something we have no control over. 242 */ 243 reg = bus_read_4(sc->sc_mem, BCM_DMA_ENABLE); 244 if ((reg & bcm_dma_channel_mask) != bcm_dma_channel_mask) 245 device_printf(dev, "channels are not enabled\n"); 246 reg = bus_read_4(sc->sc_mem, BCM_DMA_INT_STATUS); 247 if ((reg & bcm_dma_channel_mask) != 0) 248 device_printf(dev, "statuses are not cleared\n"); 249 250 /* 251 * Allocate DMA chunks control blocks based on p.40 of the peripheral 252 * spec - control block should be 32-bit aligned. The DMA controller 253 * has a full 32-bit register dedicated to this address, so we do not 254 * need to bother with the per-SoC peripheral restrictions. 255 */ 256 err = bus_dma_tag_create(bus_get_dma_tag(dev), 257 1, 0, BUS_SPACE_MAXADDR_32BIT, 258 BUS_SPACE_MAXADDR, NULL, NULL, 259 sizeof(struct bcm_dma_cb), 1, 260 sizeof(struct bcm_dma_cb), 261 BUS_DMA_ALLOCNOW, NULL, NULL, 262 &sc->sc_dma_tag); 263 264 if (err) { 265 device_printf(dev, "failed allocate DMA tag\n"); 266 return (err); 267 } 268 269 /* setup initial settings */ 270 for (i = 0; i < BCM_DMA_CH_MAX; i++) { 271 ch = &sc->sc_dma_ch[i]; 272 273 bzero(ch, sizeof(struct bcm_dma_ch)); 274 ch->ch = i; 275 ch->flags = BCM_DMA_CH_UNMAP; 276 277 if ((bcm_dma_channel_mask & (1 << i)) == 0) 278 continue; 279 280 err = bus_dmamem_alloc(sc->sc_dma_tag, &cb_virt, 281 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 282 &ch->dma_map); 283 if (err) { 284 device_printf(dev, "cannot allocate DMA memory\n"); 285 break; 286 } 287 288 /* 289 * Least alignment for busdma-allocated stuff is cache 290 * line size, so just make sure nothing stupid happened 291 * and we got properly aligned address 292 */ 293 if ((uintptr_t)cb_virt & 0x1f) { 294 device_printf(dev, 295 "DMA address is not 32-bytes aligned: %p\n", 296 (void*)cb_virt); 297 break; 298 } 299 300 err = bus_dmamap_load(sc->sc_dma_tag, ch->dma_map, cb_virt, 301 sizeof(struct bcm_dma_cb), bcm_dmamap_cb, &cb_phys, 302 BUS_DMA_WAITOK); 303 if (err) { 304 device_printf(dev, "cannot load DMA memory\n"); 305 break; 306 } 307 308 ch->cb = cb_virt; 309 ch->vc_cb = cb_phys; 310 ch->flags = BCM_DMA_CH_FREE; 311 ch->cb->info = INFO_WAIT_RESP; 312 313 /* reset DMA engine */ 314 bus_write_4(sc->sc_mem, BCM_DMA_CS(i), CS_RESET); 315 } 316 317 return (0); 318 } 319 320 /* 321 * Allocate DMA channel for further use, returns channel # or 322 * BCM_DMA_CH_INVALID 323 */ 324 int 325 bcm_dma_allocate(int req_ch) 326 { 327 struct bcm_dma_softc *sc = bcm_dma_sc; 328 int ch = BCM_DMA_CH_INVALID; 329 int i; 330 331 if (sc == NULL) 332 return (BCM_DMA_CH_INVALID); 333 334 if (req_ch >= BCM_DMA_CH_MAX) 335 return (BCM_DMA_CH_INVALID); 336 337 /* Auto(req_ch < 0) or CH specified */ 338 mtx_lock(&sc->sc_mtx); 339 340 if (req_ch < 0) { 341 for (i = 0; i < BCM_DMA_CH_MAX; i++) { 342 if (sc->sc_dma_ch[i].flags & BCM_DMA_CH_FREE) { 343 ch = i; 344 sc->sc_dma_ch[ch].flags &= ~BCM_DMA_CH_FREE; 345 sc->sc_dma_ch[ch].flags |= BCM_DMA_CH_USED; 346 break; 347 } 348 } 349 } else if (sc->sc_dma_ch[req_ch].flags & BCM_DMA_CH_FREE) { 350 ch = req_ch; 351 sc->sc_dma_ch[ch].flags &= ~BCM_DMA_CH_FREE; 352 sc->sc_dma_ch[ch].flags |= BCM_DMA_CH_USED; 353 } 354 355 mtx_unlock(&sc->sc_mtx); 356 return (ch); 357 } 358 359 /* 360 * Frees allocated channel. Returns 0 on success, -1 otherwise 361 */ 362 int 363 bcm_dma_free(int ch) 364 { 365 struct bcm_dma_softc *sc = bcm_dma_sc; 366 367 if (sc == NULL) 368 return (-1); 369 370 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 371 return (-1); 372 373 mtx_lock(&sc->sc_mtx); 374 if (sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED) { 375 sc->sc_dma_ch[ch].flags |= BCM_DMA_CH_FREE; 376 sc->sc_dma_ch[ch].flags &= ~BCM_DMA_CH_USED; 377 sc->sc_dma_ch[ch].intr_func = NULL; 378 sc->sc_dma_ch[ch].intr_arg = NULL; 379 380 /* reset DMA engine */ 381 bcm_dma_reset(sc->sc_dev, ch); 382 } 383 384 mtx_unlock(&sc->sc_mtx); 385 return (0); 386 } 387 388 /* 389 * Assign handler function for channel interrupt 390 * Returns 0 on success, -1 otherwise 391 */ 392 int 393 bcm_dma_setup_intr(int ch, void (*func)(int, void *), void *arg) 394 { 395 struct bcm_dma_softc *sc = bcm_dma_sc; 396 struct bcm_dma_cb *cb; 397 398 if (sc == NULL) 399 return (-1); 400 401 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 402 return (-1); 403 404 if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) 405 return (-1); 406 407 sc->sc_dma_ch[ch].intr_func = func; 408 sc->sc_dma_ch[ch].intr_arg = arg; 409 cb = sc->sc_dma_ch[ch].cb; 410 cb->info |= INFO_INT_EN; 411 412 return (0); 413 } 414 415 /* 416 * Setup DMA source parameters 417 * ch - channel number 418 * dreq - hardware DREQ # or BCM_DMA_DREQ_NONE if 419 * source is physical memory 420 * inc_addr - BCM_DMA_INC_ADDR if source address 421 * should be increased after each access or 422 * BCM_DMA_SAME_ADDR if address should remain 423 * the same 424 * width - size of read operation, BCM_DMA_32BIT 425 * for 32bit bursts, BCM_DMA_128BIT for 128 bits 426 * 427 * Returns 0 on success, -1 otherwise 428 */ 429 int 430 bcm_dma_setup_src(int ch, int dreq, int inc_addr, int width) 431 { 432 struct bcm_dma_softc *sc = bcm_dma_sc; 433 uint32_t info; 434 435 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 436 return (-1); 437 438 if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) 439 return (-1); 440 441 info = sc->sc_dma_ch[ch].cb->info; 442 info &= ~INFO_PERMAP_MASK; 443 info |= (dreq << INFO_PERMAP_SHIFT) & INFO_PERMAP_MASK; 444 445 if (dreq) 446 info |= INFO_S_DREQ; 447 else 448 info &= ~INFO_S_DREQ; 449 450 if (width == BCM_DMA_128BIT) 451 info |= INFO_S_WIDTH; 452 else 453 info &= ~INFO_S_WIDTH; 454 455 if (inc_addr == BCM_DMA_INC_ADDR) 456 info |= INFO_S_INC; 457 else 458 info &= ~INFO_S_INC; 459 460 sc->sc_dma_ch[ch].cb->info = info; 461 462 return (0); 463 } 464 465 /* 466 * Setup DMA destination parameters 467 * ch - channel number 468 * dreq - hardware DREQ # or BCM_DMA_DREQ_NONE if 469 * destination is physical memory 470 * inc_addr - BCM_DMA_INC_ADDR if source address 471 * should be increased after each access or 472 * BCM_DMA_SAME_ADDR if address should remain 473 * the same 474 * width - size of write operation, BCM_DMA_32BIT 475 * for 32bit bursts, BCM_DMA_128BIT for 128 bits 476 * 477 * Returns 0 on success, -1 otherwise 478 */ 479 int 480 bcm_dma_setup_dst(int ch, int dreq, int inc_addr, int width) 481 { 482 struct bcm_dma_softc *sc = bcm_dma_sc; 483 uint32_t info; 484 485 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 486 return (-1); 487 488 if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) 489 return (-1); 490 491 info = sc->sc_dma_ch[ch].cb->info; 492 info &= ~INFO_PERMAP_MASK; 493 info |= (dreq << INFO_PERMAP_SHIFT) & INFO_PERMAP_MASK; 494 495 if (dreq) 496 info |= INFO_D_DREQ; 497 else 498 info &= ~INFO_D_DREQ; 499 500 if (width == BCM_DMA_128BIT) 501 info |= INFO_D_WIDTH; 502 else 503 info &= ~INFO_D_WIDTH; 504 505 if (inc_addr == BCM_DMA_INC_ADDR) 506 info |= INFO_D_INC; 507 else 508 info &= ~INFO_D_INC; 509 510 sc->sc_dma_ch[ch].cb->info = info; 511 512 return (0); 513 } 514 515 #ifdef DEBUG 516 void 517 bcm_dma_cb_dump(struct bcm_dma_cb *cb) 518 { 519 520 printf("DMA CB "); 521 printf("INFO: %8.8x ", cb->info); 522 printf("SRC: %8.8x ", cb->src); 523 printf("DST: %8.8x ", cb->dst); 524 printf("LEN: %8.8x ", cb->len); 525 printf("\n"); 526 printf("STRIDE: %8.8x ", cb->stride); 527 printf("NEXT: %8.8x ", cb->next); 528 printf("RSVD1: %8.8x ", cb->rsvd1); 529 printf("RSVD2: %8.8x ", cb->rsvd2); 530 printf("\n"); 531 } 532 533 void 534 bcm_dma_reg_dump(int ch) 535 { 536 struct bcm_dma_softc *sc = bcm_dma_sc; 537 int i; 538 uint32_t reg; 539 540 if (sc == NULL) 541 return; 542 543 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 544 return; 545 546 printf("DMA%d: ", ch); 547 for (i = 0; i < MAX_REG; i++) { 548 reg = bus_read_4(sc->sc_mem, BCM_DMA_CH(ch) + i*4); 549 printf("%8.8x ", reg); 550 } 551 printf("\n"); 552 } 553 #endif 554 555 /* 556 * Start DMA transaction 557 * ch - channel number 558 * src, dst - source and destination address in 559 * ARM physical memory address space. 560 * len - amount of bytes to be transferred 561 * 562 * Returns 0 on success, -1 otherwise 563 */ 564 int 565 bcm_dma_start(int ch, vm_paddr_t src, vm_paddr_t dst, int len) 566 { 567 struct bcm_dma_softc *sc = bcm_dma_sc; 568 struct bcm_dma_cb *cb; 569 570 if (sc == NULL) 571 return (-1); 572 573 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 574 return (-1); 575 576 if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) 577 return (-1); 578 579 cb = sc->sc_dma_ch[ch].cb; 580 cb->src = ARMC_TO_VCBUS(src); 581 cb->dst = ARMC_TO_VCBUS(dst); 582 583 cb->len = len; 584 585 bus_dmamap_sync(sc->sc_dma_tag, 586 sc->sc_dma_ch[ch].dma_map, BUS_DMASYNC_PREWRITE); 587 588 bus_write_4(sc->sc_mem, BCM_DMA_CBADDR(ch), 589 sc->sc_dma_ch[ch].vc_cb); 590 bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), CS_ACTIVE); 591 592 #ifdef DEBUG 593 bcm_dma_cb_dump(sc->sc_dma_ch[ch].cb); 594 bcm_dma_reg_dump(ch); 595 #endif 596 597 return (0); 598 } 599 600 /* 601 * Get length requested for DMA transaction 602 * ch - channel number 603 * 604 * Returns size of transaction, 0 if channel is invalid 605 */ 606 uint32_t 607 bcm_dma_length(int ch) 608 { 609 struct bcm_dma_softc *sc = bcm_dma_sc; 610 struct bcm_dma_cb *cb; 611 612 if (sc == NULL) 613 return (0); 614 615 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 616 return (0); 617 618 if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) 619 return (0); 620 621 cb = sc->sc_dma_ch[ch].cb; 622 623 return (cb->len); 624 } 625 626 static void 627 bcm_dma_intr(void *arg) 628 { 629 struct bcm_dma_softc *sc = bcm_dma_sc; 630 struct bcm_dma_ch *ch = (struct bcm_dma_ch *)arg; 631 uint32_t cs, debug; 632 633 /* my interrupt? */ 634 cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch->ch)); 635 636 /* 637 * Is it an active channel? Our diagnostics could be better here, but 638 * it's not necessarily an easy task to resolve a rid/resource to an 639 * actual irq number. We'd want to do this to set a flag indicating 640 * whether the irq is shared or not, so we know to complain. 641 */ 642 if (!(ch->flags & BCM_DMA_CH_USED)) 643 return; 644 645 /* Again, we can't complain here. The same logic applies. */ 646 if (!(cs & (CS_INT | CS_ERR))) 647 return; 648 649 if (cs & CS_ERR) { 650 debug = bus_read_4(sc->sc_mem, BCM_DMA_DEBUG(ch->ch)); 651 device_printf(sc->sc_dev, "DMA error %d on CH%d\n", 652 debug & DEBUG_ERROR_MASK, ch->ch); 653 bus_write_4(sc->sc_mem, BCM_DMA_DEBUG(ch->ch), 654 debug & DEBUG_ERROR_MASK); 655 bcm_dma_reset(sc->sc_dev, ch->ch); 656 } 657 658 if (cs & CS_INT) { 659 /* acknowledge interrupt */ 660 bus_write_4(sc->sc_mem, BCM_DMA_CS(ch->ch), 661 CS_INT | CS_END); 662 663 /* Prepare for possible access to len field */ 664 bus_dmamap_sync(sc->sc_dma_tag, ch->dma_map, 665 BUS_DMASYNC_POSTWRITE); 666 667 /* save callback function and argument */ 668 if (ch->intr_func) 669 ch->intr_func(ch->ch, ch->intr_arg); 670 } 671 } 672 673 static int 674 bcm_dma_probe(device_t dev) 675 { 676 677 if (!ofw_bus_status_okay(dev)) 678 return (ENXIO); 679 680 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) 681 return (ENXIO); 682 683 device_set_desc(dev, "BCM2835 DMA Controller"); 684 return (BUS_PROBE_DEFAULT); 685 } 686 687 static int 688 bcm_dma_attach(device_t dev) 689 { 690 struct bcm_dma_softc *sc = device_get_softc(dev); 691 phandle_t node; 692 int rid, err = 0; 693 int i; 694 695 sc->sc_dev = dev; 696 697 if (bcm_dma_sc) 698 return (ENXIO); 699 700 for (i = 0; i < BCM_DMA_CH_MAX; i++) { 701 sc->sc_irq[i] = NULL; 702 sc->sc_intrhand[i] = NULL; 703 } 704 705 /* Get DMA channel mask. */ 706 node = ofw_bus_get_node(sc->sc_dev); 707 if (OF_getencprop(node, "brcm,dma-channel-mask", &bcm_dma_channel_mask, 708 sizeof(bcm_dma_channel_mask)) == -1 && 709 OF_getencprop(node, "broadcom,channels", &bcm_dma_channel_mask, 710 sizeof(bcm_dma_channel_mask)) == -1) { 711 device_printf(dev, "could not get channel mask property\n"); 712 return (ENXIO); 713 } 714 715 /* Mask out channels used by GPU. */ 716 bcm_dma_channel_mask &= ~BCM_DMA_CH_GPU_MASK; 717 718 /* DMA0 - DMA14 */ 719 rid = 0; 720 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); 721 if (sc->sc_mem == NULL) { 722 device_printf(dev, "could not allocate memory resource\n"); 723 return (ENXIO); 724 } 725 726 /* IRQ DMA0 - DMA11 XXX NOT USE DMA12(spurious?) */ 727 for (rid = 0; rid < BCM_DMA_CH_MAX; rid++) { 728 if ((bcm_dma_channel_mask & (1 << rid)) == 0) 729 continue; 730 731 sc->sc_irq[rid] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 732 RF_ACTIVE | RF_SHAREABLE); 733 if (sc->sc_irq[rid] == NULL) { 734 device_printf(dev, "cannot allocate interrupt\n"); 735 err = ENXIO; 736 goto fail; 737 } 738 if (bus_setup_intr(dev, sc->sc_irq[rid], INTR_TYPE_MISC | INTR_MPSAFE, 739 NULL, bcm_dma_intr, &sc->sc_dma_ch[rid], 740 &sc->sc_intrhand[rid])) { 741 device_printf(dev, "cannot setup interrupt handler\n"); 742 err = ENXIO; 743 goto fail; 744 } 745 } 746 747 mtx_init(&sc->sc_mtx, "bcmdma", "bcmdma", MTX_DEF); 748 bcm_dma_sc = sc; 749 750 err = bcm_dma_init(dev); 751 if (err) 752 goto fail; 753 754 return (err); 755 756 fail: 757 if (sc->sc_mem) 758 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem); 759 760 for (i = 0; i < BCM_DMA_CH_MAX; i++) { 761 if (sc->sc_intrhand[i]) 762 bus_teardown_intr(dev, sc->sc_irq[i], sc->sc_intrhand[i]); 763 if (sc->sc_irq[i]) 764 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq[i]); 765 } 766 767 return (err); 768 } 769 770 static device_method_t bcm_dma_methods[] = { 771 DEVMETHOD(device_probe, bcm_dma_probe), 772 DEVMETHOD(device_attach, bcm_dma_attach), 773 { 0, 0 } 774 }; 775 776 static driver_t bcm_dma_driver = { 777 "bcm_dma", 778 bcm_dma_methods, 779 sizeof(struct bcm_dma_softc), 780 }; 781 782 EARLY_DRIVER_MODULE(bcm_dma, simplebus, bcm_dma_driver, 0, 0, 783 BUS_PASS_SUPPORTDEV + BUS_PASS_ORDER_MIDDLE); 784 MODULE_VERSION(bcm_dma, 1); 785