1 /* 2 * Copyright (c) 2013 Daisuke Aoyama <aoyama@peach.ne.jp> 3 * Copyright (c) 2013 Oleksandr Tymoshenko <gonzo@bluezbox.com> 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/kernel.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/module.h> 38 #include <sys/mutex.h> 39 #include <sys/queue.h> 40 #include <sys/resource.h> 41 #include <sys/rman.h> 42 43 #include <dev/fdt/fdt_common.h> 44 #include <dev/ofw/openfirm.h> 45 #include <dev/ofw/ofw_bus.h> 46 #include <dev/ofw/ofw_bus_subr.h> 47 48 #include <vm/vm.h> 49 #include <vm/pmap.h> 50 #include <machine/bus.h> 51 #include <machine/cpu.h> 52 #include <machine/cpufunc.h> 53 54 #include "bcm2835_dma.h" 55 #include "bcm2835_vcbus.h" 56 57 #define MAX_REG 9 58 59 /* private flags */ 60 #define BCM_DMA_CH_USED 0x00000001 61 #define BCM_DMA_CH_FREE 0x40000000 62 #define BCM_DMA_CH_UNMAP 0x80000000 63 64 /* Register Map (4.2.1.2) */ 65 #define BCM_DMA_CS(n) (0x100*(n) + 0x00) 66 #define CS_ACTIVE (1 << 0) 67 #define CS_END (1 << 1) 68 #define CS_INT (1 << 2) 69 #define CS_DREQ (1 << 3) 70 #define CS_ISPAUSED (1 << 4) 71 #define CS_ISHELD (1 << 5) 72 #define CS_ISWAIT (1 << 6) 73 #define CS_ERR (1 << 8) 74 #define CS_WAITWRT (1 << 28) 75 #define CS_DISDBG (1 << 29) 76 #define CS_ABORT (1 << 30) 77 #define CS_RESET (1U << 31) 78 #define BCM_DMA_CBADDR(n) (0x100*(n) + 0x04) 79 #define BCM_DMA_INFO(n) (0x100*(n) + 0x08) 80 #define INFO_INT_EN (1 << 0) 81 #define INFO_TDMODE (1 << 1) 82 #define INFO_WAIT_RESP (1 << 3) 83 #define INFO_D_INC (1 << 4) 84 #define INFO_D_WIDTH (1 << 5) 85 #define INFO_D_DREQ (1 << 6) 86 #define INFO_S_INC (1 << 8) 87 #define INFO_S_WIDTH (1 << 9) 88 #define INFO_S_DREQ (1 << 10) 89 #define INFO_WAITS_SHIFT (21) 90 #define INFO_PERMAP_SHIFT (16) 91 #define INFO_PERMAP_MASK (0x1f << INFO_PERMAP_SHIFT) 92 93 #define BCM_DMA_SRC(n) (0x100*(n) + 0x0C) 94 #define BCM_DMA_DST(n) (0x100*(n) + 0x10) 95 #define BCM_DMA_LEN(n) (0x100*(n) + 0x14) 96 #define BCM_DMA_STRIDE(n) (0x100*(n) + 0x18) 97 #define BCM_DMA_CBNEXT(n) (0x100*(n) + 0x1C) 98 #define BCM_DMA_DEBUG(n) (0x100*(n) + 0x20) 99 #define DEBUG_ERROR_MASK (7) 100 101 #define BCM_DMA_INT_STATUS 0xfe0 102 #define BCM_DMA_ENABLE 0xff0 103 104 /* relative offset from BCM_VC_DMA0_BASE (p.39) */ 105 #define BCM_DMA_CH(n) (0x100*(n)) 106 107 /* DMA Control Block - 256bit aligned (p.40) */ 108 struct bcm_dma_cb { 109 uint32_t info; /* Transfer Information */ 110 uint32_t src; /* Source Address */ 111 uint32_t dst; /* Destination Address */ 112 uint32_t len; /* Transfer Length */ 113 uint32_t stride; /* 2D Mode Stride */ 114 uint32_t next; /* Next Control Block Address */ 115 uint32_t rsvd1; /* Reserved */ 116 uint32_t rsvd2; /* Reserved */ 117 }; 118 119 #ifdef DEBUG 120 static void bcm_dma_cb_dump(struct bcm_dma_cb *cb); 121 static void bcm_dma_reg_dump(int ch); 122 #endif 123 124 /* DMA channel private info */ 125 struct bcm_dma_ch { 126 int ch; 127 uint32_t flags; 128 struct bcm_dma_cb * cb; 129 uint32_t vc_cb; 130 bus_dmamap_t dma_map; 131 void (*intr_func)(int, void *); 132 void * intr_arg; 133 }; 134 135 struct bcm_dma_softc { 136 device_t sc_dev; 137 struct mtx sc_mtx; 138 struct resource * sc_mem; 139 struct resource * sc_irq[BCM_DMA_CH_MAX]; 140 void * sc_intrhand[BCM_DMA_CH_MAX]; 141 struct bcm_dma_ch sc_dma_ch[BCM_DMA_CH_MAX]; 142 bus_dma_tag_t sc_dma_tag; 143 }; 144 145 static struct bcm_dma_softc *bcm_dma_sc = NULL; 146 147 static void 148 bcm_dmamap_cb(void *arg, bus_dma_segment_t *segs, 149 int nseg, int err) 150 { 151 bus_addr_t *addr; 152 153 if (err) 154 return; 155 156 addr = (bus_addr_t*)arg; 157 *addr = PHYS_TO_VCBUS(segs[0].ds_addr); 158 } 159 160 static void 161 bcm_dma_reset(device_t dev, int ch) 162 { 163 struct bcm_dma_softc *sc = device_get_softc(dev); 164 struct bcm_dma_cb *cb; 165 uint32_t cs; 166 int count; 167 168 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 169 return; 170 171 cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch)); 172 173 if (cs & CS_ACTIVE) { 174 /* pause current task */ 175 bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), 0); 176 177 count = 1000; 178 do { 179 cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch)); 180 } while (!(cs & CS_ISPAUSED) && (count-- > 0)); 181 182 if (!(cs & CS_ISPAUSED)) { 183 device_printf(dev, 184 "Can't abort DMA transfer at channel %d\n", ch); 185 } 186 187 bus_write_4(sc->sc_mem, BCM_DMA_CBNEXT(ch), 0); 188 189 /* Complete everything, clear interrupt */ 190 bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), 191 CS_ABORT | CS_INT | CS_END| CS_ACTIVE); 192 } 193 194 /* clear control blocks */ 195 bus_write_4(sc->sc_mem, BCM_DMA_CBADDR(ch), 0); 196 bus_write_4(sc->sc_mem, BCM_DMA_CBNEXT(ch), 0); 197 198 /* Reset control block */ 199 cb = sc->sc_dma_ch[ch].cb; 200 bzero(cb, sizeof(*cb)); 201 cb->info = INFO_WAIT_RESP; 202 } 203 204 static int 205 bcm_dma_init(device_t dev) 206 { 207 struct bcm_dma_softc *sc = device_get_softc(dev); 208 uint32_t mask; 209 struct bcm_dma_ch *ch; 210 void *cb_virt; 211 vm_paddr_t cb_phys; 212 int err; 213 int i; 214 215 /* disable and clear interrupt status */ 216 bus_write_4(sc->sc_mem, BCM_DMA_ENABLE, 0); 217 bus_write_4(sc->sc_mem, BCM_DMA_INT_STATUS, 0); 218 219 /* Allocate DMA chunks control blocks */ 220 /* p.40 of spec - control block should be 32-bit aligned */ 221 err = bus_dma_tag_create(bus_get_dma_tag(dev), 222 1, 0, BUS_SPACE_MAXADDR_32BIT, 223 BUS_SPACE_MAXADDR, NULL, NULL, 224 sizeof(struct bcm_dma_cb), 1, 225 sizeof(struct bcm_dma_cb), 226 BUS_DMA_ALLOCNOW, NULL, NULL, 227 &sc->sc_dma_tag); 228 229 if (err) { 230 device_printf(dev, "failed allocate DMA tag"); 231 return (err); 232 } 233 234 /* setup initial settings */ 235 for (i = 0; i < BCM_DMA_CH_MAX; i++) { 236 ch = &sc->sc_dma_ch[i]; 237 238 err = bus_dmamem_alloc(sc->sc_dma_tag, &cb_virt, 239 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 240 &ch->dma_map); 241 if (err) { 242 device_printf(dev, "cannot allocate DMA memory\n"); 243 break; 244 } 245 246 /* 247 * Least alignment for busdma-allocated stuff is cache 248 * line size, so just make sure nothing stupid happend 249 * and we got properly aligned address 250 */ 251 if ((uintptr_t)cb_virt & 0x1f) { 252 device_printf(dev, 253 "DMA address is not 32-bytes aligned: %p\n", 254 (void*)cb_virt); 255 break; 256 } 257 258 err = bus_dmamap_load(sc->sc_dma_tag, ch->dma_map, cb_virt, 259 sizeof(struct bcm_dma_cb), bcm_dmamap_cb, &cb_phys, 260 BUS_DMA_WAITOK); 261 if (err) { 262 device_printf(dev, "cannot load DMA memory\n"); 263 break; 264 } 265 266 bzero(ch, sizeof(struct bcm_dma_ch)); 267 ch->ch = i; 268 ch->cb = cb_virt; 269 ch->vc_cb = cb_phys; 270 ch->intr_func = NULL; 271 ch->intr_arg = NULL; 272 ch->flags = BCM_DMA_CH_UNMAP; 273 274 ch->cb->info = INFO_WAIT_RESP; 275 276 /* reset DMA engine */ 277 bcm_dma_reset(dev, i); 278 } 279 280 /* now use DMA2/DMA3 only */ 281 sc->sc_dma_ch[2].flags = BCM_DMA_CH_FREE; 282 sc->sc_dma_ch[3].flags = BCM_DMA_CH_FREE; 283 284 /* enable DMAs */ 285 mask = 0; 286 287 for (i = 0; i < BCM_DMA_CH_MAX; i++) 288 if (sc->sc_dma_ch[i].flags & BCM_DMA_CH_FREE) 289 mask |= (1 << i); 290 291 bus_write_4(sc->sc_mem, BCM_DMA_ENABLE, mask); 292 293 return (0); 294 } 295 296 /* 297 * Allocate DMA channel for further use, returns channel # or 298 * BCM_DMA_CH_INVALID 299 */ 300 int 301 bcm_dma_allocate(int req_ch) 302 { 303 struct bcm_dma_softc *sc = bcm_dma_sc; 304 int ch = BCM_DMA_CH_INVALID; 305 int i; 306 307 if (req_ch >= BCM_DMA_CH_MAX) 308 return (BCM_DMA_CH_INVALID); 309 310 /* Auto(req_ch < 0) or CH specified */ 311 mtx_lock(&sc->sc_mtx); 312 313 if (req_ch < 0) { 314 for (i = 0; i < BCM_DMA_CH_MAX; i++) { 315 if (sc->sc_dma_ch[i].flags & BCM_DMA_CH_FREE) { 316 ch = i; 317 sc->sc_dma_ch[ch].flags &= ~BCM_DMA_CH_FREE; 318 sc->sc_dma_ch[ch].flags |= BCM_DMA_CH_USED; 319 break; 320 } 321 } 322 } 323 else { 324 if (sc->sc_dma_ch[req_ch].flags & BCM_DMA_CH_FREE) { 325 ch = req_ch; 326 sc->sc_dma_ch[ch].flags &= ~BCM_DMA_CH_FREE; 327 sc->sc_dma_ch[ch].flags |= BCM_DMA_CH_USED; 328 } 329 } 330 331 mtx_unlock(&sc->sc_mtx); 332 return (ch); 333 } 334 335 /* 336 * Frees allocated channel. Returns 0 on success, -1 otherwise 337 */ 338 int 339 bcm_dma_free(int ch) 340 { 341 struct bcm_dma_softc *sc = bcm_dma_sc; 342 343 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 344 return (-1); 345 346 mtx_lock(&sc->sc_mtx); 347 if (sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED) { 348 sc->sc_dma_ch[ch].flags |= BCM_DMA_CH_FREE; 349 sc->sc_dma_ch[ch].flags &= ~BCM_DMA_CH_USED; 350 sc->sc_dma_ch[ch].intr_func = NULL; 351 sc->sc_dma_ch[ch].intr_arg = NULL; 352 353 /* reset DMA engine */ 354 bcm_dma_reset(sc->sc_dev, ch); 355 } 356 357 mtx_unlock(&sc->sc_mtx); 358 return (0); 359 } 360 361 /* 362 * Assign handler function for channel interrupt 363 * Returns 0 on success, -1 otherwise 364 */ 365 int 366 bcm_dma_setup_intr(int ch, void (*func)(int, void *), void *arg) 367 { 368 struct bcm_dma_softc *sc = bcm_dma_sc; 369 struct bcm_dma_cb *cb; 370 371 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 372 return (-1); 373 374 if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) 375 return (-1); 376 377 sc->sc_dma_ch[ch].intr_func = func; 378 sc->sc_dma_ch[ch].intr_arg = arg; 379 cb = sc->sc_dma_ch[ch].cb; 380 cb->info |= INFO_INT_EN; 381 382 return (0); 383 } 384 385 /* 386 * Setup DMA source parameters 387 * ch - channel number 388 * dreq - hardware DREQ # or BCM_DMA_DREQ_NONE if 389 * source is physical memory 390 * inc_addr - BCM_DMA_INC_ADDR if source address 391 * should be increased after each access or 392 * BCM_DMA_SAME_ADDR if address should remain 393 * the same 394 * width - size of read operation, BCM_DMA_32BIT 395 * for 32bit bursts, BCM_DMA_128BIT for 128 bits 396 * 397 * Returns 0 on success, -1 otherwise 398 */ 399 int 400 bcm_dma_setup_src(int ch, int dreq, int inc_addr, int width) 401 { 402 struct bcm_dma_softc *sc = bcm_dma_sc; 403 uint32_t info; 404 405 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 406 return (-1); 407 408 if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) 409 return (-1); 410 411 info = sc->sc_dma_ch[ch].cb->info; 412 info &= ~INFO_PERMAP_MASK; 413 info |= (dreq << INFO_PERMAP_SHIFT) & INFO_PERMAP_MASK; 414 415 if (dreq) 416 info |= INFO_S_DREQ; 417 else 418 info &= ~INFO_S_DREQ; 419 420 if (width == BCM_DMA_128BIT) 421 info |= INFO_S_WIDTH; 422 else 423 info &= ~INFO_S_WIDTH; 424 425 if (inc_addr == BCM_DMA_INC_ADDR) 426 info |= INFO_S_INC; 427 else 428 info &= ~INFO_S_INC; 429 430 sc->sc_dma_ch[ch].cb->info = info; 431 432 return (0); 433 } 434 435 /* 436 * Setup DMA destination parameters 437 * ch - channel number 438 * dreq - hardware DREQ # or BCM_DMA_DREQ_NONE if 439 * destination is physical memory 440 * inc_addr - BCM_DMA_INC_ADDR if source address 441 * should be increased after each access or 442 * BCM_DMA_SAME_ADDR if address should remain 443 * the same 444 * width - size of write operation, BCM_DMA_32BIT 445 * for 32bit bursts, BCM_DMA_128BIT for 128 bits 446 * 447 * Returns 0 on success, -1 otherwise 448 */ 449 int 450 bcm_dma_setup_dst(int ch, int dreq, int inc_addr, int width) 451 { 452 struct bcm_dma_softc *sc = bcm_dma_sc; 453 uint32_t info; 454 455 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 456 return (-1); 457 458 if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) 459 return (-1); 460 461 info = sc->sc_dma_ch[ch].cb->info; 462 info &= ~INFO_PERMAP_MASK; 463 info |= (dreq << INFO_PERMAP_SHIFT) & INFO_PERMAP_MASK; 464 465 if (dreq) 466 info |= INFO_D_DREQ; 467 else 468 info &= ~INFO_D_DREQ; 469 470 if (width == BCM_DMA_128BIT) 471 info |= INFO_D_WIDTH; 472 else 473 info &= ~INFO_D_WIDTH; 474 475 if (inc_addr == BCM_DMA_INC_ADDR) 476 info |= INFO_D_INC; 477 else 478 info &= ~INFO_D_INC; 479 480 sc->sc_dma_ch[ch].cb->info = info; 481 482 return (0); 483 } 484 485 #ifdef DEBUG 486 void 487 bcm_dma_cb_dump(struct bcm_dma_cb *cb) 488 { 489 490 printf("DMA CB "); 491 printf("INFO: %8.8x ", cb->info); 492 printf("SRC: %8.8x ", cb->src); 493 printf("DST: %8.8x ", cb->dst); 494 printf("LEN: %8.8x ", cb->len); 495 printf("\n"); 496 printf("STRIDE: %8.8x ", cb->stride); 497 printf("NEXT: %8.8x ", cb->next); 498 printf("RSVD1: %8.8x ", cb->rsvd1); 499 printf("RSVD2: %8.8x ", cb->rsvd2); 500 printf("\n"); 501 } 502 503 void 504 bcm_dma_reg_dump(int ch) 505 { 506 struct bcm_dma_softc *sc = bcm_dma_sc; 507 int i; 508 uint32_t reg; 509 510 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 511 return; 512 513 printf("DMA%d: ", ch); 514 for (i = 0; i < MAX_REG; i++) { 515 reg = bus_read_4(sc->sc_mem, BCM_DMA_CH(ch) + i*4); 516 printf("%8.8x ", reg); 517 } 518 printf("\n"); 519 } 520 #endif 521 522 /* 523 * Start DMA transaction 524 * ch - channel number 525 * src, dst - source and destination address in 526 * ARM physical memory address space. 527 * len - amount of bytes to be transfered 528 * 529 * Returns 0 on success, -1 otherwise 530 */ 531 int 532 bcm_dma_start(int ch, vm_paddr_t src, vm_paddr_t dst, int len) 533 { 534 struct bcm_dma_softc *sc = bcm_dma_sc; 535 struct bcm_dma_cb *cb; 536 537 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 538 return (-1); 539 540 if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) 541 return (-1); 542 543 cb = sc->sc_dma_ch[ch].cb; 544 if (BCM2835_ARM_IS_IO(src)) 545 cb->src = IO_TO_VCBUS(src); 546 else 547 cb->src = PHYS_TO_VCBUS(src); 548 if (BCM2835_ARM_IS_IO(dst)) 549 cb->dst = IO_TO_VCBUS(dst); 550 else 551 cb->dst = PHYS_TO_VCBUS(dst); 552 cb->len = len; 553 554 bus_dmamap_sync(sc->sc_dma_tag, 555 sc->sc_dma_ch[ch].dma_map, BUS_DMASYNC_PREWRITE); 556 557 bus_write_4(sc->sc_mem, BCM_DMA_CBADDR(ch), 558 sc->sc_dma_ch[ch].vc_cb); 559 bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), CS_ACTIVE); 560 561 #ifdef DEBUG 562 bcm_dma_cb_dump(sc->sc_dma_ch[ch].cb); 563 bcm_dma_reg_dump(ch); 564 #endif 565 566 return (0); 567 } 568 569 /* 570 * Get length requested for DMA transaction 571 * ch - channel number 572 * 573 * Returns size of transaction, 0 if channel is invalid 574 */ 575 uint32_t 576 bcm_dma_length(int ch) 577 { 578 struct bcm_dma_softc *sc = bcm_dma_sc; 579 struct bcm_dma_cb *cb; 580 581 if (ch < 0 || ch >= BCM_DMA_CH_MAX) 582 return (0); 583 584 if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED)) 585 return (0); 586 587 cb = sc->sc_dma_ch[ch].cb; 588 589 return (cb->len); 590 } 591 592 static void 593 bcm_dma_intr(void *arg) 594 { 595 struct bcm_dma_softc *sc = bcm_dma_sc; 596 struct bcm_dma_ch *ch = (struct bcm_dma_ch *)arg; 597 uint32_t cs, debug; 598 599 /* my interrupt? */ 600 cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch->ch)); 601 602 if (!(cs & (CS_INT | CS_ERR))) 603 return; 604 605 /* running? */ 606 if (!(ch->flags & BCM_DMA_CH_USED)) { 607 device_printf(sc->sc_dev, 608 "unused DMA intr CH=%d, CS=%x\n", ch->ch, cs); 609 return; 610 } 611 612 if (cs & CS_ERR) { 613 debug = bus_read_4(sc->sc_mem, BCM_DMA_DEBUG(ch->ch)); 614 device_printf(sc->sc_dev, "DMA error %d on CH%d\n", 615 debug & DEBUG_ERROR_MASK, ch->ch); 616 bus_write_4(sc->sc_mem, BCM_DMA_DEBUG(ch->ch), 617 debug & DEBUG_ERROR_MASK); 618 bcm_dma_reset(sc->sc_dev, ch->ch); 619 } 620 621 if (cs & CS_INT) { 622 /* acknowledge interrupt */ 623 bus_write_4(sc->sc_mem, BCM_DMA_CS(ch->ch), 624 CS_INT | CS_END); 625 626 /* Prepare for possible access to len field */ 627 bus_dmamap_sync(sc->sc_dma_tag, ch->dma_map, 628 BUS_DMASYNC_POSTWRITE); 629 630 /* save callback function and argument */ 631 if (ch->intr_func) 632 ch->intr_func(ch->ch, ch->intr_arg); 633 } 634 } 635 636 static int 637 bcm_dma_probe(device_t dev) 638 { 639 640 if (!ofw_bus_status_okay(dev)) 641 return (ENXIO); 642 643 if (!ofw_bus_is_compatible(dev, "broadcom,bcm2835-dma")) 644 return (ENXIO); 645 646 device_set_desc(dev, "BCM2835 DMA Controller"); 647 return (BUS_PROBE_DEFAULT); 648 } 649 650 static int 651 bcm_dma_attach(device_t dev) 652 { 653 struct bcm_dma_softc *sc = device_get_softc(dev); 654 int rid, err = 0; 655 int i; 656 657 sc->sc_dev = dev; 658 659 if (bcm_dma_sc) 660 return (ENXIO); 661 662 for (i = 0; i < BCM_DMA_CH_MAX; i++) { 663 sc->sc_irq[i] = NULL; 664 sc->sc_intrhand[i] = NULL; 665 } 666 667 /* DMA0 - DMA14 */ 668 rid = 0; 669 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); 670 if (sc->sc_mem == NULL) { 671 device_printf(dev, "could not allocate memory resource\n"); 672 return (ENXIO); 673 } 674 675 /* IRQ DMA0 - DMA11 XXX NOT USE DMA12(spurious?) */ 676 for (rid = 0; rid < BCM_DMA_CH_MAX; rid++) { 677 sc->sc_irq[rid] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 678 RF_ACTIVE); 679 if (sc->sc_irq[rid] == NULL) { 680 device_printf(dev, "cannot allocate interrupt\n"); 681 err = ENXIO; 682 goto fail; 683 } 684 if (bus_setup_intr(dev, sc->sc_irq[rid], INTR_TYPE_MISC | INTR_MPSAFE, 685 NULL, bcm_dma_intr, &sc->sc_dma_ch[rid], 686 &sc->sc_intrhand[rid])) { 687 device_printf(dev, "cannot setup interrupt handler\n"); 688 err = ENXIO; 689 goto fail; 690 } 691 } 692 693 mtx_init(&sc->sc_mtx, "bcmdma", "bcmdma", MTX_DEF); 694 bcm_dma_sc = sc; 695 696 err = bcm_dma_init(dev); 697 if (err) 698 goto fail; 699 700 return (err); 701 702 fail: 703 if (sc->sc_mem) 704 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem); 705 706 for (i = 0; i < BCM_DMA_CH_MAX; i++) { 707 if (sc->sc_intrhand[i]) 708 bus_teardown_intr(dev, sc->sc_irq[i], sc->sc_intrhand[i]); 709 if (sc->sc_irq[i]) 710 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq[i]); 711 } 712 713 return (err); 714 } 715 716 static device_method_t bcm_dma_methods[] = { 717 DEVMETHOD(device_probe, bcm_dma_probe), 718 DEVMETHOD(device_attach, bcm_dma_attach), 719 { 0, 0 } 720 }; 721 722 static driver_t bcm_dma_driver = { 723 "bcm_dma", 724 bcm_dma_methods, 725 sizeof(struct bcm_dma_softc), 726 }; 727 728 static devclass_t bcm_dma_devclass; 729 730 DRIVER_MODULE(bcm_dma, simplebus, bcm_dma_driver, bcm_dma_devclass, 0, 0); 731 MODULE_VERSION(bcm_dma, 1); 732