1 /* 2 * Copyright (c) 2013 Daisuke Aoyama <aoyama@peach.ne.jp> 3 * Copyright (c) 2013 Oleksandr Tymoshenko <gonzo@bluezbox.com> 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/kernel.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/module.h> 38 #include <sys/mutex.h> 39 #include <sys/queue.h> 40 #include <sys/resource.h> 41 #include <sys/rman.h> 42 43 #include <dev/fdt/fdt_common.h> 44 #include <dev/ofw/openfirm.h> 45 #include <dev/ofw/ofw_bus.h> 46 #include <dev/ofw/ofw_bus_subr.h> 47 48 #include <vm/vm.h> 49 #include <vm/pmap.h> 50 #include <machine/bus.h> 51 #include <machine/cpu.h> 52 #include <machine/cpufunc.h> 53 #include <machine/pmap.h> 54 55 #include "bcm2835_dma.h" 56 #include "bcm2835_vcbus.h" 57 58 #define MAX_REG 9 59 60 /* private flags */ 61 #define BCM_DMA_CH_USED 0x00000001 62 #define BCM_DMA_CH_FREE 0x40000000 63 #define BCM_DMA_CH_UNMAP 0x80000000 64 65 /* Register Map (4.2.1.2) */ 66 #define BCM_DMA_CS(n) (0x100*(n) + 0x00) 67 #define CS_ACTIVE (1 << 0) 68 #define CS_END (1 << 1) 69 #define CS_INT (1 << 2) 70 #define CS_DREQ (1 << 3) 71 #define CS_ISPAUSED (1 << 4) 72 #define CS_ISHELD (1 << 5) 73 #define CS_ISWAIT (1 << 6) 74 #define CS_ERR (1 << 8) 75 #define CS_WAITWRT (1 << 28) 76 #define CS_DISDBG (1 << 29) 77 #define CS_ABORT (1 << 30) 78 #define CS_RESET (1 << 31) 79 #define BCM_DMA_CBADDR(n) (0x100*(n) + 0x04) 80 #define BCM_DMA_INFO(n) (0x100*(n) + 0x08) 81 #define INFO_INT_EN (1 << 0) 82 #define INFO_TDMODE (1 << 1) 83 #define INFO_WAIT_RESP (1 << 3) 84 #define INFO_D_INC (1 << 4) 85 #define INFO_D_WIDTH (1 << 5) 86 #define INFO_D_DREQ (1 << 6) 87 #define INFO_S_INC (1 << 8) 88 #define INFO_S_WIDTH (1 << 9) 89 #define INFO_S_DREQ (1 << 10) 90 #define INFO_WAITS_SHIFT (21) 91 #define INFO_PERMAP_SHIFT (16) 92 #define INFO_PERMAP_MASK (0x1f << INFO_PERMAP_SHIFT) 93 94 #define BCM_DMA_SRC(n) (0x100*(n) + 0x0C) 95 #define BCM_DMA_DST(n) (0x100*(n) + 0x10) 96 #define BCM_DMA_LEN(n) (0x100*(n) + 0x14) 97 #define BCM_DMA_STRIDE(n) (0x100*(n) + 0x18) 98 #define BCM_DMA_CBNEXT(n) (0x100*(n) + 0x1C) 99 #define BCM_DMA_DEBUG(n) (0x100*(n) + 0x20) 100 #define DEBUG_ERROR_MASK (7) 101 102 #define BCM_DMA_INT_STATUS 0xfe0 103 #define BCM_DMA_ENABLE 0xff0 104 105 /* relative offset from BCM_VC_DMA0_BASE (p.39) */ 106 #define BCM_DMA_CH(n) (0x100*(n)) 107 108 /* DMA Control Block - 256bit aligned (p.40) */ 109 struct bcm_dma_cb { 110 uint32_t info; /* Transfer Information */ 111 uint32_t src; /* Source Address */ 112 uint32_t dst; /* Destination Address */ 113 uint32_t len; /* Transfer Length */ 114 uint32_t stride; /* 2D Mode Stride */ 115 uint32_t next; /* Next Control Block Address */ 116 uint32_t rsvd1; /* Reserved */ 117 uint32_t rsvd2; /* Reserved */ 118 }; 119 120 #ifdef DEBUG 121 static void bcm_dma_cb_dump(struct bcm_dma_cb *cb); 122 static void bcm_dma_reg_dump(int ch); 123 #endif 124 125 /* DMA channel private info */ 126 struct bcm_dma_ch { 127 int ch; 128 uint32_t flags; 129 struct bcm_dma_cb * cb; 130 uint32_t vc_cb; 131 bus_dmamap_t dma_map; 132 void (*intr_func)(int, void *); 133 void * intr_arg; 134 }; 135 136 struct bcm_dma_softc { 137 device_t sc_dev; 138 struct mtx sc_mtx; 139 struct resource * sc_mem; 140 struct resource * sc_irq[BCM_DMA_CH_MAX]; 141 void * sc_intrhand[BCM_DMA_CH_MAX]; 142 struct bcm_dma_ch sc_dma_ch[BCM_DMA_CH_MAX]; 143 bus_dma_tag_t sc_dma_tag; 144 }; 145 146 static struct bcm_dma_softc *bcm_dma_sc = NULL; 147 148 static void 149 bcm_dmamap_cb(void *arg, bus_dma_segment_t *segs, 150 int nseg, int err) 151 { 152 bus_addr_t *addr; 153 154 if (err) 155 return; 156 157 addr = (bus_addr_t*)arg; 158 *addr = PHYS_TO_VCBUS(segs[0].ds_addr); 159 } 160 161 static void 162 bcm_dma_reset(device_t dev, int ch) 163 { 164 struct bcm_dma_softc *sc = device_get_softc(dev); 165 struct bcm_dma_cb *cb; 166 uint32_t cs; 167 int count; 168 169 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 170 return; 171 172 cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch)); 173 174 if (cs & CS_ACTIVE) { 175 /* pause current task */ 176 bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), 0); 177 178 count = 1000; 179 do { 180 cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch)); 181 } while (!(cs & CS_ISPAUSED) && (count-- > 0)); 182 183 if (!(cs & CS_ISPAUSED)) { 184 device_printf(dev, 185 "Can't abort DMA transfer at channel %d\n", ch); 186 } 187 188 bus_write_4(sc->sc_mem, BCM_DMA_CBNEXT(ch), 0); 189 190 /* Complete everything, clear interrupt */ 191 bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), 192 CS_ABORT | CS_INT | CS_END| CS_ACTIVE); 193 } 194 195 /* clear control blocks */ 196 bus_write_4(sc->sc_mem, BCM_DMA_CBADDR(ch), 0); 197 bus_write_4(sc->sc_mem, BCM_DMA_CBNEXT(ch), 0); 198 199 /* Reset control block */ 200 cb = sc->sc_dma_ch[ch].cb; 201 bzero(cb, sizeof(*cb)); 202 cb->info = INFO_WAIT_RESP; 203 } 204 205 static int 206 bcm_dma_init(device_t dev) 207 { 208 struct bcm_dma_softc *sc = device_get_softc(dev); 209 uint32_t mask; 210 struct bcm_dma_ch *ch; 211 void *cb_virt; 212 vm_paddr_t cb_phys; 213 int err; 214 int i; 215 216 /* disable and clear interrupt status */ 217 bus_write_4(sc->sc_mem, BCM_DMA_ENABLE, 0); 218 bus_write_4(sc->sc_mem, BCM_DMA_INT_STATUS, 0); 219 220 /* Allocate DMA chunks control blocks */ 221 /* p.40 of spec - control block should be 32-bit aligned */ 222 err = bus_dma_tag_create(bus_get_dma_tag(dev), 223 1, 0, BUS_SPACE_MAXADDR_32BIT, 224 BUS_SPACE_MAXADDR, NULL, NULL, 225 sizeof(struct bcm_dma_cb), 1, 226 sizeof(struct bcm_dma_cb), 227 BUS_DMA_ALLOCNOW, NULL, NULL, 228 &sc->sc_dma_tag); 229 230 if (err) { 231 device_printf(dev, "failed allocate DMA tag"); 232 return (err); 233 } 234 235 /* setup initial settings */ 236 for (i = 0; i < BCM_DMA_CH_MAX; i++) { 237 ch = &sc->sc_dma_ch[i]; 238 239 err = bus_dmamem_alloc(sc->sc_dma_tag, &cb_virt, 240 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 241 &ch->dma_map); 242 if (err) { 243 device_printf(dev, "cannot allocate DMA memory\n"); 244 break; 245 } 246 247 /* 248 * Least alignment for busdma-allocated stuff is cache 249 * line size, so just make sure nothing stupid happend 250 * and we got properly aligned address 251 */ 252 if ((uintptr_t)cb_virt & 0x1f) { 253 device_printf(dev, 254 "DMA address is not 32-bytes aligned: %p\n", 255 (void*)cb_virt); 256 break; 257 } 258 259 err = bus_dmamap_load(sc->sc_dma_tag, ch->dma_map, cb_virt, 260 sizeof(struct bcm_dma_cb), bcm_dmamap_cb, &cb_phys, 261 BUS_DMA_WAITOK); 262 if (err) { 263 device_printf(dev, "cannot load DMA memory\n"); 264 break; 265 } 266 267 bzero(ch, sizeof(struct bcm_dma_ch)); 268 ch->ch = i; 269 ch->cb = cb_virt; 270 ch->vc_cb = cb_phys; 271 ch->intr_func = NULL; 272 ch->intr_arg = NULL; 273 ch->flags = BCM_DMA_CH_UNMAP; 274 275 ch->cb->info = INFO_WAIT_RESP; 276 277 /* reset DMA engine */ 278 bcm_dma_reset(dev, i); 279 } 280 281 /* now use DMA2/DMA3 only */ 282 sc->sc_dma_ch[2].flags = BCM_DMA_CH_FREE; 283 sc->sc_dma_ch[3].flags = BCM_DMA_CH_FREE; 284 285 /* enable DMAs */ 286 mask = 0; 287 288 for (i = 0; i < BCM_DMA_CH_MAX; i++) 289 if (sc->sc_dma_ch[i].flags & BCM_DMA_CH_FREE) 290 mask |= (1 << i); 291 292 bus_write_4(sc->sc_mem, BCM_DMA_ENABLE, mask); 293 294 return (0); 295 } 296 297 /* 298 * Allocate DMA channel for further use, returns channel # or 299 * BCM_DMA_CH_INVALID 300 */ 301 int 302 bcm_dma_allocate(int req_ch) 303 { 304 struct bcm_dma_softc *sc = bcm_dma_sc; 305 int ch = BCM_DMA_CH_INVALID; 306 int i; 307 308 if (req_ch >= BCM_DMA_CH_MAX) 309 return (BCM_DMA_CH_INVALID); 310 311 /* Auto(req_ch < 0) or CH specified */ 312 mtx_lock(&sc->sc_mtx); 313 314 if (req_ch < 0) { 315 for (i = 0; i < BCM_DMA_CH_MAX; i++) { 316 if (sc->sc_dma_ch[i].flags & BCM_DMA_CH_FREE) { 317 ch = i; 318 sc->sc_dma_ch[ch].flags &= ~BCM_DMA_CH_FREE; 319 sc->sc_dma_ch[ch].flags |= BCM_DMA_CH_USED; 320 break; 321 } 322 } 323 } 324 else { 325 if (sc->sc_dma_ch[req_ch].flags & BCM_DMA_CH_FREE) { 326 ch = req_ch; 327 sc->sc_dma_ch[ch].flags &= ~BCM_DMA_CH_FREE; 328 sc->sc_dma_ch[ch].flags |= BCM_DMA_CH_USED; 329 } 330 } 331 332 mtx_unlock(&sc->sc_mtx); 333 return (ch); 334 } 335 336 /* 337 * Frees allocated channel. Returns 0 on success, -1 otherwise 338 */ 339 int 340 bcm_dma_free(int ch) 341 { 342 struct bcm_dma_softc *sc = bcm_dma_sc; 343 344 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 345 return (-1); 346 347 mtx_lock(&sc->sc_mtx); 348 if (sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED) { 349 sc->sc_dma_ch[ch].flags |= BCM_DMA_CH_FREE; 350 sc->sc_dma_ch[ch].flags &= ~BCM_DMA_CH_USED; 351 sc->sc_dma_ch[ch].intr_func = NULL; 352 sc->sc_dma_ch[ch].intr_arg = NULL; 353 354 /* reset DMA engine */ 355 bcm_dma_reset(sc->sc_dev, ch); 356 } 357 358 mtx_unlock(&sc->sc_mtx); 359 return (0); 360 } 361 362 /* 363 * Assign handler function for channel interrupt 364 * Returns 0 on success, -1 otherwise 365 */ 366 int 367 bcm_dma_setup_intr(int ch, void (*func)(int, void *), void *arg) 368 { 369 struct bcm_dma_softc *sc = bcm_dma_sc; 370 struct bcm_dma_cb *cb; 371 372 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 373 return (-1); 374 375 if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) 376 return (-1); 377 378 sc->sc_dma_ch[ch].intr_func = func; 379 sc->sc_dma_ch[ch].intr_arg = arg; 380 cb = sc->sc_dma_ch[ch].cb; 381 cb->info |= INFO_INT_EN; 382 383 return (0); 384 } 385 386 /* 387 * Setup DMA source parameters 388 * ch - channel number 389 * dreq - hardware DREQ # or BCM_DMA_DREQ_NONE if 390 * source is physical memory 391 * inc_addr - BCM_DMA_INC_ADDR if source address 392 * should be increased after each access or 393 * BCM_DMA_SAME_ADDR if address should remain 394 * the same 395 * width - size of read operation, BCM_DMA_32BIT 396 * for 32bit bursts, BCM_DMA_128BIT for 128 bits 397 * 398 * Returns 0 on success, -1 otherwise 399 */ 400 int 401 bcm_dma_setup_src(int ch, int dreq, int inc_addr, int width) 402 { 403 struct bcm_dma_softc *sc = bcm_dma_sc; 404 uint32_t info; 405 406 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 407 return (-1); 408 409 if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) 410 return (-1); 411 412 info = sc->sc_dma_ch[ch].cb->info; 413 info &= ~INFO_PERMAP_MASK; 414 info |= (dreq << INFO_PERMAP_SHIFT) & INFO_PERMAP_MASK; 415 416 if (dreq) 417 info |= INFO_S_DREQ; 418 else 419 info &= ~INFO_S_DREQ; 420 421 if (width == BCM_DMA_128BIT) 422 info |= INFO_S_WIDTH; 423 else 424 info &= ~INFO_S_WIDTH; 425 426 if (inc_addr == BCM_DMA_INC_ADDR) 427 info |= INFO_S_INC; 428 else 429 info &= ~INFO_S_INC; 430 431 sc->sc_dma_ch[ch].cb->info = info; 432 433 return (0); 434 } 435 436 /* 437 * Setup DMA destination parameters 438 * ch - channel number 439 * dreq - hardware DREQ # or BCM_DMA_DREQ_NONE if 440 * destination is physical memory 441 * inc_addr - BCM_DMA_INC_ADDR if source address 442 * should be increased after each access or 443 * BCM_DMA_SAME_ADDR if address should remain 444 * the same 445 * width - size of write operation, BCM_DMA_32BIT 446 * for 32bit bursts, BCM_DMA_128BIT for 128 bits 447 * 448 * Returns 0 on success, -1 otherwise 449 */ 450 int 451 bcm_dma_setup_dst(int ch, int dreq, int inc_addr, int width) 452 { 453 struct bcm_dma_softc *sc = bcm_dma_sc; 454 uint32_t info; 455 456 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 457 return (-1); 458 459 if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) 460 return (-1); 461 462 info = sc->sc_dma_ch[ch].cb->info; 463 info &= ~INFO_PERMAP_MASK; 464 info |= (dreq << INFO_PERMAP_SHIFT) & INFO_PERMAP_MASK; 465 466 if (dreq) 467 info |= INFO_D_DREQ; 468 else 469 info &= ~INFO_D_DREQ; 470 471 if (width == BCM_DMA_128BIT) 472 info |= INFO_D_WIDTH; 473 else 474 info &= ~INFO_D_WIDTH; 475 476 if (inc_addr == BCM_DMA_INC_ADDR) 477 info |= INFO_D_INC; 478 else 479 info &= ~INFO_D_INC; 480 481 sc->sc_dma_ch[ch].cb->info = info; 482 483 return (0); 484 } 485 486 #ifdef DEBUG 487 void 488 bcm_dma_cb_dump(struct bcm_dma_cb *cb) 489 { 490 491 printf("DMA CB "); 492 printf("INFO: %8.8x ", cb->info); 493 printf("SRC: %8.8x ", cb->src); 494 printf("DST: %8.8x ", cb->dst); 495 printf("LEN: %8.8x ", cb->len); 496 printf("\n"); 497 printf("STRIDE: %8.8x ", cb->stride); 498 printf("NEXT: %8.8x ", cb->next); 499 printf("RSVD1: %8.8x ", cb->rsvd1); 500 printf("RSVD2: %8.8x ", cb->rsvd2); 501 printf("\n"); 502 } 503 504 void 505 bcm_dma_reg_dump(int ch) 506 { 507 struct bcm_dma_softc *sc = bcm_dma_sc; 508 int i; 509 uint32_t reg; 510 511 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 512 return; 513 514 printf("DMA%d: ", ch); 515 for (i = 0; i < MAX_REG; i++) { 516 reg = bus_read_4(sc->sc_mem, BCM_DMA_CH(ch) + i*4); 517 printf("%8.8x ", reg); 518 } 519 printf("\n"); 520 } 521 #endif 522 523 /* 524 * Start DMA transaction 525 * ch - channel number 526 * src, dst - source and destination address in 527 * ARM physical memory address space. 528 * len - amount of bytes to be transfered 529 * 530 * Returns 0 on success, -1 otherwise 531 */ 532 int 533 bcm_dma_start(int ch, vm_paddr_t src, vm_paddr_t dst, int len) 534 { 535 struct bcm_dma_softc *sc = bcm_dma_sc; 536 struct bcm_dma_cb *cb; 537 538 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 539 return (-1); 540 541 if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) 542 return (-1); 543 544 cb = sc->sc_dma_ch[ch].cb; 545 if (BCM2835_ARM_IS_IO(src)) 546 cb->src = IO_TO_VCBUS(src); 547 else 548 cb->src = PHYS_TO_VCBUS(src); 549 if (BCM2835_ARM_IS_IO(dst)) 550 cb->dst = IO_TO_VCBUS(dst); 551 else 552 cb->dst = PHYS_TO_VCBUS(dst); 553 cb->len = len; 554 555 bus_dmamap_sync(sc->sc_dma_tag, 556 sc->sc_dma_ch[ch].dma_map, BUS_DMASYNC_PREWRITE); 557 558 bus_write_4(sc->sc_mem, BCM_DMA_CBADDR(ch), 559 sc->sc_dma_ch[ch].vc_cb); 560 bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), CS_ACTIVE); 561 562 #ifdef DEBUG 563 bcm_dma_cb_dump(sc->sc_dma_ch[ch].cb); 564 bcm_dma_reg_dump(ch); 565 #endif 566 567 return (0); 568 } 569 570 /* 571 * Get length requested for DMA transaction 572 * ch - channel number 573 * 574 * Returns size of transaction, 0 if channel is invalid 575 */ 576 uint32_t 577 bcm_dma_length(int ch) 578 { 579 struct bcm_dma_softc *sc = bcm_dma_sc; 580 struct bcm_dma_cb *cb; 581 582 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 583 return (0); 584 585 if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) 586 return (0); 587 588 cb = sc->sc_dma_ch[ch].cb; 589 590 return (cb->len); 591 } 592 593 static void 594 bcm_dma_intr(void *arg) 595 { 596 struct bcm_dma_softc *sc = bcm_dma_sc; 597 struct bcm_dma_ch *ch = (struct bcm_dma_ch *)arg; 598 uint32_t cs, debug; 599 600 /* my interrupt? */ 601 cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch->ch)); 602 603 if (!(cs & (CS_INT | CS_ERR))) 604 return; 605 606 /* running? */ 607 if (!(ch->flags & BCM_DMA_CH_USED)) { 608 device_printf(sc->sc_dev, 609 "unused DMA intr CH=%d, CS=%x\n", ch->ch, cs); 610 return; 611 } 612 613 if (cs & CS_ERR) { 614 debug = bus_read_4(sc->sc_mem, BCM_DMA_DEBUG(ch->ch)); 615 device_printf(sc->sc_dev, "DMA error %d on CH%d\n", 616 debug & DEBUG_ERROR_MASK, ch->ch); 617 bus_write_4(sc->sc_mem, BCM_DMA_DEBUG(ch->ch), 618 debug & DEBUG_ERROR_MASK); 619 bcm_dma_reset(sc->sc_dev, ch->ch); 620 } 621 622 if (cs & CS_INT) { 623 /* acknowledge interrupt */ 624 bus_write_4(sc->sc_mem, BCM_DMA_CS(ch->ch), 625 CS_INT | CS_END); 626 627 /* Prepare for possible access to len field */ 628 bus_dmamap_sync(sc->sc_dma_tag, ch->dma_map, 629 BUS_DMASYNC_POSTWRITE); 630 631 /* save callback function and argument */ 632 if (ch->intr_func) 633 ch->intr_func(ch->ch, ch->intr_arg); 634 } 635 } 636 637 static int 638 bcm_dma_probe(device_t dev) 639 { 640 641 if (!ofw_bus_is_compatible(dev, "broadcom,bcm2835-dma")) 642 return (ENXIO); 643 644 device_set_desc(dev, "BCM2835 DMA Controller"); 645 return (BUS_PROBE_DEFAULT); 646 } 647 648 static int 649 bcm_dma_attach(device_t dev) 650 { 651 struct bcm_dma_softc *sc = device_get_softc(dev); 652 int rid, err = 0; 653 int i; 654 655 sc->sc_dev = dev; 656 657 if (bcm_dma_sc) 658 return (ENXIO); 659 660 for (i = 0; i < BCM_DMA_CH_MAX; i++) { 661 sc->sc_irq[i] = NULL; 662 sc->sc_intrhand[i] = NULL; 663 } 664 665 /* DMA0 - DMA14 */ 666 rid = 0; 667 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); 668 if (sc->sc_mem == NULL) { 669 device_printf(dev, "could not allocate memory resource\n"); 670 return (ENXIO); 671 } 672 673 /* IRQ DMA0 - DMA11 XXX NOT USE DMA12(spurious?) */ 674 for (rid = 0; rid < BCM_DMA_CH_MAX; rid++) { 675 sc->sc_irq[rid] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 676 RF_ACTIVE); 677 if (sc->sc_irq[rid] == NULL) { 678 device_printf(dev, "cannot allocate interrupt\n"); 679 err = ENXIO; 680 goto fail; 681 } 682 if (bus_setup_intr(dev, sc->sc_irq[rid], INTR_TYPE_MISC | INTR_MPSAFE, 683 NULL, bcm_dma_intr, &sc->sc_dma_ch[rid], 684 &sc->sc_intrhand[rid])) { 685 device_printf(dev, "cannot setup interrupt handler\n"); 686 err = ENXIO; 687 goto fail; 688 } 689 } 690 691 mtx_init(&sc->sc_mtx, "bcmdma", "bcmdma", MTX_DEF); 692 bcm_dma_sc = sc; 693 694 err = bcm_dma_init(dev); 695 if (err) 696 goto fail; 697 698 return (err); 699 700 fail: 701 if (sc->sc_mem) 702 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem); 703 704 for (i = 0; i < BCM_DMA_CH_MAX; i++) { 705 if (sc->sc_intrhand[i]) 706 bus_teardown_intr(dev, sc->sc_irq[i], sc->sc_intrhand[i]); 707 if (sc->sc_irq[i]) 708 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq[i]); 709 } 710 711 return (err); 712 } 713 714 static device_method_t bcm_dma_methods[] = { 715 DEVMETHOD(device_probe, bcm_dma_probe), 716 DEVMETHOD(device_attach, bcm_dma_attach), 717 { 0, 0 } 718 }; 719 720 static driver_t bcm_dma_driver = { 721 "bcm_dma", 722 bcm_dma_methods, 723 sizeof(struct bcm_dma_softc), 724 }; 725 726 static devclass_t bcm_dma_devclass; 727 728 DRIVER_MODULE(bcm_dma, simplebus, bcm_dma_driver, bcm_dma_devclass, 0, 0); 729 MODULE_VERSION(bcm_dma, 1); 730