1 /*- 2 * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com> 3 * All rights reserved. 4 * 5 * This software was developed by SRI International and the University of 6 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) 7 * ("CTSRD"), as part of the DARPA CRASH research programme. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Synopsys DesignWare Mobile Storage Host Controller 33 * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22) 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/bus.h> 42 #include <sys/kernel.h> 43 #include <sys/lock.h> 44 #include <sys/module.h> 45 #include <sys/malloc.h> 46 #include <sys/mutex.h> 47 #include <sys/rman.h> 48 49 #include <dev/mmc/bridge.h> 50 #include <dev/mmc/mmcbrvar.h> 51 52 #include <dev/fdt/fdt_common.h> 53 #include <dev/ofw/openfirm.h> 54 #include <dev/ofw/ofw_bus.h> 55 #include <dev/ofw/ofw_bus_subr.h> 56 57 #include <machine/bus.h> 58 #include <machine/cpu.h> 59 #include <machine/intr.h> 60 61 #ifdef EXT_RESOURCES 62 #include <dev/extres/clk/clk.h> 63 #endif 64 65 #include <dev/mmc/host/dwmmc_reg.h> 66 #include <dev/mmc/host/dwmmc_var.h> 67 68 #include "opt_mmccam.h" 69 70 #include "mmcbr_if.h" 71 72 #define dprintf(x, arg...) 73 74 #define READ4(_sc, _reg) \ 75 bus_read_4((_sc)->res[0], _reg) 76 #define WRITE4(_sc, _reg, _val) \ 77 bus_write_4((_sc)->res[0], _reg, _val) 78 79 #define DIV_ROUND_UP(n, d) howmany(n, d) 80 81 #define DWMMC_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) 82 #define DWMMC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) 83 #define DWMMC_LOCK_INIT(_sc) \ 84 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \ 85 "dwmmc", MTX_DEF) 86 #define DWMMC_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); 87 #define DWMMC_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); 88 #define DWMMC_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); 89 90 #define PENDING_CMD 0x01 91 #define PENDING_STOP 0x02 92 #define CARD_INIT_DONE 0x04 93 94 #define DWMMC_DATA_ERR_FLAGS (SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \ 95 |SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE \ 96 |SDMMC_INTMASK_EBE) 97 #define DWMMC_CMD_ERR_FLAGS (SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \ 98 |SDMMC_INTMASK_RE) 99 #define DWMMC_ERR_FLAGS (DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \ 100 |SDMMC_INTMASK_HLE) 101 102 #define DES0_DIC (1 << 1) 103 #define DES0_LD (1 << 2) 104 #define DES0_FS (1 << 3) 105 #define DES0_CH (1 << 4) 106 #define DES0_ER (1 << 5) 107 #define DES0_CES (1 << 30) 108 #define DES0_OWN (1 << 31) 109 110 #define DES1_BS1_MASK 0xfff 111 #define DES1_BS1_SHIFT 0 112 113 struct idmac_desc { 114 uint32_t des0; /* control */ 115 uint32_t des1; /* bufsize */ 116 uint32_t des2; /* buf1 phys addr */ 117 uint32_t des3; /* buf2 phys addr or next descr */ 118 }; 119 120 #define DESC_MAX 256 121 #define DESC_SIZE (sizeof(struct idmac_desc) * DESC_MAX) 122 #define DEF_MSIZE 0x2 /* Burst size of multiple transaction */ 123 124 static void dwmmc_next_operation(struct dwmmc_softc *); 125 static int dwmmc_setup_bus(struct dwmmc_softc *, int); 126 static int dma_done(struct dwmmc_softc *, struct mmc_command *); 127 static int dma_stop(struct dwmmc_softc *); 128 static void pio_read(struct dwmmc_softc *, struct mmc_command *); 129 static void pio_write(struct dwmmc_softc *, struct mmc_command *); 130 131 static struct resource_spec dwmmc_spec[] = { 132 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 133 { SYS_RES_IRQ, 0, RF_ACTIVE }, 134 { -1, 0 } 135 }; 136 137 #define HWTYPE_MASK (0x0000ffff) 138 #define HWFLAG_MASK (0xffff << 16) 139 140 static void 141 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 142 { 143 144 if (error != 0) 145 return; 146 *(bus_addr_t *)arg = segs[0].ds_addr; 147 } 148 149 static void 150 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 151 { 152 struct dwmmc_softc *sc; 153 int idx; 154 155 if (error != 0) 156 return; 157 158 sc = arg; 159 160 dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len); 161 162 for (idx = 0; idx < nsegs; idx++) { 163 sc->desc_ring[idx].des0 = (DES0_OWN | DES0_DIC | DES0_CH); 164 sc->desc_ring[idx].des1 = segs[idx].ds_len; 165 sc->desc_ring[idx].des2 = segs[idx].ds_addr; 166 167 if (idx == 0) 168 sc->desc_ring[idx].des0 |= DES0_FS; 169 170 if (idx == (nsegs - 1)) { 171 sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH); 172 sc->desc_ring[idx].des0 |= DES0_LD; 173 } 174 } 175 } 176 177 static int 178 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits) 179 { 180 int reg; 181 int i; 182 183 reg = READ4(sc, SDMMC_CTRL); 184 reg |= (reset_bits); 185 WRITE4(sc, SDMMC_CTRL, reg); 186 187 /* Wait reset done */ 188 for (i = 0; i < 100; i++) { 189 if (!(READ4(sc, SDMMC_CTRL) & reset_bits)) 190 return (0); 191 DELAY(10); 192 } 193 194 device_printf(sc->dev, "Reset failed\n"); 195 196 return (1); 197 } 198 199 static int 200 dma_setup(struct dwmmc_softc *sc) 201 { 202 int error; 203 int nidx; 204 int idx; 205 206 /* 207 * Set up TX descriptor ring, descriptors, and dma maps. 208 */ 209 error = bus_dma_tag_create( 210 bus_get_dma_tag(sc->dev), /* Parent tag. */ 211 4096, 0, /* alignment, boundary */ 212 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 213 BUS_SPACE_MAXADDR, /* highaddr */ 214 NULL, NULL, /* filter, filterarg */ 215 DESC_SIZE, 1, /* maxsize, nsegments */ 216 DESC_SIZE, /* maxsegsize */ 217 0, /* flags */ 218 NULL, NULL, /* lockfunc, lockarg */ 219 &sc->desc_tag); 220 if (error != 0) { 221 device_printf(sc->dev, 222 "could not create ring DMA tag.\n"); 223 return (1); 224 } 225 226 error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring, 227 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, 228 &sc->desc_map); 229 if (error != 0) { 230 device_printf(sc->dev, 231 "could not allocate descriptor ring.\n"); 232 return (1); 233 } 234 235 error = bus_dmamap_load(sc->desc_tag, sc->desc_map, 236 sc->desc_ring, DESC_SIZE, dwmmc_get1paddr, 237 &sc->desc_ring_paddr, 0); 238 if (error != 0) { 239 device_printf(sc->dev, 240 "could not load descriptor ring map.\n"); 241 return (1); 242 } 243 244 for (idx = 0; idx < sc->desc_count; idx++) { 245 sc->desc_ring[idx].des0 = DES0_CH; 246 sc->desc_ring[idx].des1 = 0; 247 nidx = (idx + 1) % sc->desc_count; 248 sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \ 249 (nidx * sizeof(struct idmac_desc)); 250 } 251 252 error = bus_dma_tag_create( 253 bus_get_dma_tag(sc->dev), /* Parent tag. */ 254 4096, 0, /* alignment, boundary */ 255 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 256 BUS_SPACE_MAXADDR, /* highaddr */ 257 NULL, NULL, /* filter, filterarg */ 258 sc->desc_count * MMC_SECTOR_SIZE, /* maxsize */ 259 sc->desc_count, /* nsegments */ 260 MMC_SECTOR_SIZE, /* maxsegsize */ 261 0, /* flags */ 262 NULL, NULL, /* lockfunc, lockarg */ 263 &sc->buf_tag); 264 if (error != 0) { 265 device_printf(sc->dev, 266 "could not create ring DMA tag.\n"); 267 return (1); 268 } 269 270 error = bus_dmamap_create(sc->buf_tag, 0, 271 &sc->buf_map); 272 if (error != 0) { 273 device_printf(sc->dev, 274 "could not create TX buffer DMA map.\n"); 275 return (1); 276 } 277 278 return (0); 279 } 280 281 static void 282 dwmmc_cmd_done(struct dwmmc_softc *sc) 283 { 284 struct mmc_command *cmd; 285 286 cmd = sc->curcmd; 287 if (cmd == NULL) 288 return; 289 290 if (cmd->flags & MMC_RSP_PRESENT) { 291 if (cmd->flags & MMC_RSP_136) { 292 cmd->resp[3] = READ4(sc, SDMMC_RESP0); 293 cmd->resp[2] = READ4(sc, SDMMC_RESP1); 294 cmd->resp[1] = READ4(sc, SDMMC_RESP2); 295 cmd->resp[0] = READ4(sc, SDMMC_RESP3); 296 } else { 297 cmd->resp[3] = 0; 298 cmd->resp[2] = 0; 299 cmd->resp[1] = 0; 300 cmd->resp[0] = READ4(sc, SDMMC_RESP0); 301 } 302 } 303 } 304 305 static void 306 dwmmc_tasklet(struct dwmmc_softc *sc) 307 { 308 struct mmc_command *cmd; 309 310 cmd = sc->curcmd; 311 if (cmd == NULL) 312 return; 313 314 if (!sc->cmd_done) 315 return; 316 317 if (cmd->error != MMC_ERR_NONE || !cmd->data) { 318 dwmmc_next_operation(sc); 319 } else if (cmd->data && sc->dto_rcvd) { 320 if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || 321 cmd->opcode == MMC_READ_MULTIPLE_BLOCK) && 322 sc->use_auto_stop) { 323 if (sc->acd_rcvd) 324 dwmmc_next_operation(sc); 325 } else { 326 dwmmc_next_operation(sc); 327 } 328 } 329 } 330 331 static void 332 dwmmc_intr(void *arg) 333 { 334 struct mmc_command *cmd; 335 struct dwmmc_softc *sc; 336 uint32_t reg; 337 338 sc = arg; 339 340 DWMMC_LOCK(sc); 341 342 cmd = sc->curcmd; 343 344 /* First handle SDMMC controller interrupts */ 345 reg = READ4(sc, SDMMC_MINTSTS); 346 if (reg) { 347 dprintf("%s 0x%08x\n", __func__, reg); 348 349 if (reg & DWMMC_CMD_ERR_FLAGS) { 350 dprintf("cmd err 0x%08x cmd 0x%08x\n", 351 reg, cmd->opcode); 352 cmd->error = MMC_ERR_TIMEOUT; 353 } 354 355 if (reg & DWMMC_DATA_ERR_FLAGS) { 356 dprintf("data err 0x%08x cmd 0x%08x\n", 357 reg, cmd->opcode); 358 cmd->error = MMC_ERR_FAILED; 359 if (!sc->use_pio) { 360 dma_done(sc, cmd); 361 dma_stop(sc); 362 } 363 } 364 365 if (reg & SDMMC_INTMASK_CMD_DONE) { 366 dwmmc_cmd_done(sc); 367 sc->cmd_done = 1; 368 } 369 370 if (reg & SDMMC_INTMASK_ACD) 371 sc->acd_rcvd = 1; 372 373 if (reg & SDMMC_INTMASK_DTO) 374 sc->dto_rcvd = 1; 375 376 if (reg & SDMMC_INTMASK_CD) { 377 /* XXX: Handle card detect */ 378 } 379 } 380 381 /* Ack interrupts */ 382 WRITE4(sc, SDMMC_RINTSTS, reg); 383 384 if (sc->use_pio) { 385 if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) { 386 pio_read(sc, cmd); 387 } 388 if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) { 389 pio_write(sc, cmd); 390 } 391 } else { 392 /* Now handle DMA interrupts */ 393 reg = READ4(sc, SDMMC_IDSTS); 394 if (reg) { 395 dprintf("dma intr 0x%08x\n", reg); 396 if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) { 397 WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI | 398 SDMMC_IDINTEN_RI)); 399 WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI); 400 dma_done(sc, cmd); 401 } 402 } 403 } 404 405 dwmmc_tasklet(sc); 406 407 DWMMC_UNLOCK(sc); 408 } 409 410 static int 411 parse_fdt(struct dwmmc_softc *sc) 412 { 413 pcell_t dts_value[3]; 414 phandle_t node; 415 uint32_t bus_hz = 0, bus_width; 416 int len; 417 #ifdef EXT_RESOURCES 418 int error; 419 #endif 420 421 if ((node = ofw_bus_get_node(sc->dev)) == -1) 422 return (ENXIO); 423 424 /* bus-width */ 425 if (OF_getencprop(node, "bus-width", &bus_width, sizeof(uint32_t)) <= 0) 426 bus_width = 4; 427 if (bus_width >= 4) 428 sc->host.caps |= MMC_CAP_4_BIT_DATA; 429 if (bus_width >= 8) 430 sc->host.caps |= MMC_CAP_8_BIT_DATA; 431 432 /* max-frequency */ 433 if (OF_getencprop(node, "max-frequency", &sc->max_hz, sizeof(uint32_t)) <= 0) 434 sc->max_hz = 200000000; 435 436 /* fifo-depth */ 437 if ((len = OF_getproplen(node, "fifo-depth")) > 0) { 438 OF_getencprop(node, "fifo-depth", dts_value, len); 439 sc->fifo_depth = dts_value[0]; 440 } 441 442 /* num-slots (Deprecated) */ 443 sc->num_slots = 1; 444 if ((len = OF_getproplen(node, "num-slots")) > 0) { 445 device_printf(sc->dev, "num-slots property is deprecated\n"); 446 OF_getencprop(node, "num-slots", dts_value, len); 447 sc->num_slots = dts_value[0]; 448 } 449 450 /* clock-frequency */ 451 if ((len = OF_getproplen(node, "clock-frequency")) > 0) { 452 OF_getencprop(node, "clock-frequency", dts_value, len); 453 bus_hz = dts_value[0]; 454 } 455 456 #ifdef EXT_RESOURCES 457 458 /* IP block reset is optional */ 459 error = hwreset_get_by_ofw_name(sc->dev, 0, "reset", &sc->hwreset); 460 if (error != 0 && error != ENOENT) { 461 device_printf(sc->dev, "Cannot get reset\n"); 462 goto fail; 463 } 464 465 /* vmmc regulator is optional */ 466 error = regulator_get_by_ofw_property(sc->dev, 0, "vmmc-supply", 467 &sc->vmmc); 468 if (error != 0 && error != ENOENT) { 469 device_printf(sc->dev, "Cannot get regulator 'vmmc-supply'\n"); 470 goto fail; 471 } 472 473 /* vqmmc regulator is optional */ 474 error = regulator_get_by_ofw_property(sc->dev, 0, "vqmmc-supply", 475 &sc->vqmmc); 476 if (error != 0 && error != ENOENT) { 477 device_printf(sc->dev, "Cannot get regulator 'vqmmc-supply'\n"); 478 goto fail; 479 } 480 481 /* Assert reset first */ 482 if (sc->hwreset != NULL) { 483 error = hwreset_assert(sc->hwreset); 484 if (error != 0) { 485 device_printf(sc->dev, "Cannot assert reset\n"); 486 goto fail; 487 } 488 } 489 490 /* BIU (Bus Interface Unit clock) is optional */ 491 error = clk_get_by_ofw_name(sc->dev, 0, "biu", &sc->biu); 492 if (error != 0 && error != ENOENT) { 493 device_printf(sc->dev, "Cannot get 'biu' clock\n"); 494 goto fail; 495 } 496 if (sc->biu) { 497 error = clk_enable(sc->biu); 498 if (error != 0) { 499 device_printf(sc->dev, "cannot enable biu clock\n"); 500 goto fail; 501 } 502 } 503 504 /* 505 * CIU (Controller Interface Unit clock) is mandatory 506 * if no clock-frequency property is given 507 */ 508 error = clk_get_by_ofw_name(sc->dev, 0, "ciu", &sc->ciu); 509 if (error != 0 && error != ENOENT) { 510 device_printf(sc->dev, "Cannot get 'ciu'clock\n"); 511 goto fail; 512 } 513 if (sc->ciu) { 514 if (bus_hz != 0) { 515 error = clk_set_freq(sc->ciu, bus_hz, 0); 516 if (error != 0) 517 device_printf(sc->dev, 518 "cannot set ciu clock to %u\n", bus_hz); 519 } 520 error = clk_enable(sc->ciu); 521 if (error != 0) { 522 device_printf(sc->dev, "cannot enable ciu clock\n"); 523 goto fail; 524 } 525 clk_get_freq(sc->ciu, &sc->bus_hz); 526 } 527 528 /* Take dwmmc out of reset */ 529 if (sc->hwreset != NULL) { 530 error = hwreset_deassert(sc->hwreset); 531 if (error != 0) { 532 device_printf(sc->dev, "Cannot deassert reset\n"); 533 goto fail; 534 } 535 } 536 #endif /* EXT_RESOURCES */ 537 538 if (sc->bus_hz == 0) { 539 device_printf(sc->dev, "No bus speed provided\n"); 540 goto fail; 541 } 542 543 return (0); 544 545 fail: 546 return (ENXIO); 547 } 548 549 int 550 dwmmc_attach(device_t dev) 551 { 552 struct dwmmc_softc *sc; 553 int error; 554 int slot; 555 556 sc = device_get_softc(dev); 557 558 sc->dev = dev; 559 560 /* Why not to use Auto Stop? It save a hundred of irq per second */ 561 sc->use_auto_stop = 1; 562 563 error = parse_fdt(sc); 564 if (error != 0) { 565 device_printf(dev, "Can't get FDT property.\n"); 566 return (ENXIO); 567 } 568 569 DWMMC_LOCK_INIT(sc); 570 571 if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) { 572 device_printf(dev, "could not allocate resources\n"); 573 return (ENXIO); 574 } 575 576 /* Setup interrupt handler. */ 577 error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE, 578 NULL, dwmmc_intr, sc, &sc->intr_cookie); 579 if (error != 0) { 580 device_printf(dev, "could not setup interrupt handler.\n"); 581 return (ENXIO); 582 } 583 584 device_printf(dev, "Hardware version ID is %04x\n", 585 READ4(sc, SDMMC_VERID) & 0xffff); 586 587 if (sc->desc_count == 0) 588 sc->desc_count = DESC_MAX; 589 590 /* XXX: we support operation for slot index 0 only */ 591 slot = 0; 592 if (sc->pwren_inverted) { 593 WRITE4(sc, SDMMC_PWREN, (0 << slot)); 594 } else { 595 WRITE4(sc, SDMMC_PWREN, (1 << slot)); 596 } 597 598 /* Reset all */ 599 if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET | 600 SDMMC_CTRL_FIFO_RESET | 601 SDMMC_CTRL_DMA_RESET))) 602 return (ENXIO); 603 604 dwmmc_setup_bus(sc, sc->host.f_min); 605 606 if (sc->fifo_depth == 0) { 607 sc->fifo_depth = 1 + 608 ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff); 609 device_printf(dev, "No fifo-depth, using FIFOTH %x\n", 610 sc->fifo_depth); 611 } 612 613 if (!sc->use_pio) { 614 if (dma_setup(sc)) 615 return (ENXIO); 616 617 /* Install desc base */ 618 WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr); 619 620 /* Enable DMA interrupts */ 621 WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK); 622 WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI | 623 SDMMC_IDINTEN_RI | 624 SDMMC_IDINTEN_TI)); 625 } 626 627 /* Clear and disable interrups for a while */ 628 WRITE4(sc, SDMMC_RINTSTS, 0xffffffff); 629 WRITE4(sc, SDMMC_INTMASK, 0); 630 631 /* Maximum timeout */ 632 WRITE4(sc, SDMMC_TMOUT, 0xffffffff); 633 634 /* Enable interrupts */ 635 WRITE4(sc, SDMMC_RINTSTS, 0xffffffff); 636 WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE | 637 SDMMC_INTMASK_DTO | 638 SDMMC_INTMASK_ACD | 639 SDMMC_INTMASK_TXDR | 640 SDMMC_INTMASK_RXDR | 641 DWMMC_ERR_FLAGS | 642 SDMMC_INTMASK_CD)); 643 WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE); 644 645 sc->host.f_min = 400000; 646 sc->host.f_max = sc->max_hz; 647 sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; 648 sc->host.caps |= MMC_CAP_HSPEED; 649 sc->host.caps |= MMC_CAP_SIGNALING_330; 650 651 device_add_child(dev, "mmc", -1); 652 return (bus_generic_attach(dev)); 653 } 654 655 static int 656 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq) 657 { 658 int tout; 659 int div; 660 661 if (freq == 0) { 662 WRITE4(sc, SDMMC_CLKENA, 0); 663 WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA | 664 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START)); 665 666 tout = 1000; 667 do { 668 if (tout-- < 0) { 669 device_printf(sc->dev, "Failed update clk\n"); 670 return (1); 671 } 672 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); 673 674 return (0); 675 } 676 677 WRITE4(sc, SDMMC_CLKENA, 0); 678 WRITE4(sc, SDMMC_CLKSRC, 0); 679 680 div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0; 681 682 WRITE4(sc, SDMMC_CLKDIV, div); 683 WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA | 684 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START)); 685 686 tout = 1000; 687 do { 688 if (tout-- < 0) { 689 device_printf(sc->dev, "Failed to update clk"); 690 return (1); 691 } 692 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); 693 694 WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP)); 695 WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA | 696 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START); 697 698 tout = 1000; 699 do { 700 if (tout-- < 0) { 701 device_printf(sc->dev, "Failed to enable clk\n"); 702 return (1); 703 } 704 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); 705 706 return (0); 707 } 708 709 static int 710 dwmmc_update_ios(device_t brdev, device_t reqdev) 711 { 712 struct dwmmc_softc *sc; 713 struct mmc_ios *ios; 714 uint32_t reg; 715 int ret = 0; 716 717 sc = device_get_softc(brdev); 718 ios = &sc->host.ios; 719 720 dprintf("Setting up clk %u bus_width %d\n", 721 ios->clock, ios->bus_width); 722 723 if (ios->bus_width == bus_width_8) 724 WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT); 725 else if (ios->bus_width == bus_width_4) 726 WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT); 727 else 728 WRITE4(sc, SDMMC_CTYPE, 0); 729 730 if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) { 731 /* XXX: take care about DDR or SDR use here */ 732 WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing); 733 } 734 735 /* Set DDR mode */ 736 reg = READ4(sc, SDMMC_UHS_REG); 737 if (ios->timing == bus_timing_uhs_ddr50 || 738 ios->timing == bus_timing_mmc_ddr52 || 739 ios->timing == bus_timing_mmc_hs400) 740 reg |= (SDMMC_UHS_REG_DDR); 741 else 742 reg &= ~(SDMMC_UHS_REG_DDR); 743 WRITE4(sc, SDMMC_UHS_REG, reg); 744 745 if (sc->update_ios) 746 ret = sc->update_ios(sc, ios); 747 748 dwmmc_setup_bus(sc, ios->clock); 749 750 return (ret); 751 } 752 753 static int 754 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd) 755 { 756 struct mmc_data *data; 757 758 data = cmd->data; 759 760 if (data->flags & MMC_DATA_WRITE) 761 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 762 BUS_DMASYNC_POSTWRITE); 763 else 764 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 765 BUS_DMASYNC_POSTREAD); 766 767 bus_dmamap_sync(sc->desc_tag, sc->desc_map, 768 BUS_DMASYNC_POSTWRITE); 769 770 bus_dmamap_unload(sc->buf_tag, sc->buf_map); 771 772 return (0); 773 } 774 775 static int 776 dma_stop(struct dwmmc_softc *sc) 777 { 778 int reg; 779 780 reg = READ4(sc, SDMMC_CTRL); 781 reg &= ~(SDMMC_CTRL_USE_IDMAC); 782 reg |= (SDMMC_CTRL_DMA_RESET); 783 WRITE4(sc, SDMMC_CTRL, reg); 784 785 reg = READ4(sc, SDMMC_BMOD); 786 reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB); 787 reg |= (SDMMC_BMOD_SWR); 788 WRITE4(sc, SDMMC_BMOD, reg); 789 790 return (0); 791 } 792 793 static int 794 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd) 795 { 796 struct mmc_data *data; 797 int err; 798 int reg; 799 800 data = cmd->data; 801 802 reg = READ4(sc, SDMMC_INTMASK); 803 reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR); 804 WRITE4(sc, SDMMC_INTMASK, reg); 805 806 err = bus_dmamap_load(sc->buf_tag, sc->buf_map, 807 data->data, data->len, dwmmc_ring_setup, 808 sc, BUS_DMA_NOWAIT); 809 if (err != 0) 810 panic("dmamap_load failed\n"); 811 812 /* Ensure the device can see the desc */ 813 bus_dmamap_sync(sc->desc_tag, sc->desc_map, 814 BUS_DMASYNC_PREWRITE); 815 816 if (data->flags & MMC_DATA_WRITE) 817 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 818 BUS_DMASYNC_PREWRITE); 819 else 820 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 821 BUS_DMASYNC_PREREAD); 822 823 reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S); 824 reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S; 825 reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S; 826 827 WRITE4(sc, SDMMC_FIFOTH, reg); 828 wmb(); 829 830 reg = READ4(sc, SDMMC_CTRL); 831 reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE); 832 WRITE4(sc, SDMMC_CTRL, reg); 833 wmb(); 834 835 reg = READ4(sc, SDMMC_BMOD); 836 reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB); 837 WRITE4(sc, SDMMC_BMOD, reg); 838 839 /* Start */ 840 WRITE4(sc, SDMMC_PLDMND, 1); 841 842 return (0); 843 } 844 845 static int 846 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd) 847 { 848 struct mmc_data *data; 849 int reg; 850 851 data = cmd->data; 852 data->xfer_len = 0; 853 854 reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S); 855 reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S; 856 reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S; 857 858 WRITE4(sc, SDMMC_FIFOTH, reg); 859 wmb(); 860 861 return (0); 862 } 863 864 static void 865 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd) 866 { 867 struct mmc_data *data; 868 uint32_t *p, status; 869 870 if (cmd == NULL || cmd->data == NULL) 871 return; 872 873 data = cmd->data; 874 if ((data->flags & MMC_DATA_READ) == 0) 875 return; 876 877 KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned")); 878 p = (uint32_t *)data->data + (data->xfer_len >> 2); 879 880 while (data->xfer_len < data->len) { 881 status = READ4(sc, SDMMC_STATUS); 882 if (status & SDMMC_STATUS_FIFO_EMPTY) 883 break; 884 *p++ = READ4(sc, SDMMC_DATA); 885 data->xfer_len += 4; 886 } 887 888 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR); 889 } 890 891 static void 892 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd) 893 { 894 struct mmc_data *data; 895 uint32_t *p, status; 896 897 if (cmd == NULL || cmd->data == NULL) 898 return; 899 900 data = cmd->data; 901 if ((data->flags & MMC_DATA_WRITE) == 0) 902 return; 903 904 KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned")); 905 p = (uint32_t *)data->data + (data->xfer_len >> 2); 906 907 while (data->xfer_len < data->len) { 908 status = READ4(sc, SDMMC_STATUS); 909 if (status & SDMMC_STATUS_FIFO_FULL) 910 break; 911 WRITE4(sc, SDMMC_DATA, *p++); 912 data->xfer_len += 4; 913 } 914 915 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR); 916 } 917 918 static void 919 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd) 920 { 921 struct mmc_data *data; 922 uint32_t blksz; 923 uint32_t cmdr; 924 925 sc->curcmd = cmd; 926 data = cmd->data; 927 928 if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP) 929 dwmmc_setup_bus(sc, sc->host.ios.clock); 930 931 /* XXX Upper layers don't always set this */ 932 cmd->mrq = sc->req; 933 934 /* Begin setting up command register. */ 935 936 cmdr = cmd->opcode; 937 938 dprintf("cmd->opcode 0x%08x\n", cmd->opcode); 939 940 if (cmd->opcode == MMC_STOP_TRANSMISSION || 941 cmd->opcode == MMC_GO_IDLE_STATE || 942 cmd->opcode == MMC_GO_INACTIVE_STATE) 943 cmdr |= SDMMC_CMD_STOP_ABORT; 944 else if (cmd->opcode != MMC_SEND_STATUS && data) 945 cmdr |= SDMMC_CMD_WAIT_PRVDATA; 946 947 /* Set up response handling. */ 948 if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) { 949 cmdr |= SDMMC_CMD_RESP_EXP; 950 if (cmd->flags & MMC_RSP_136) 951 cmdr |= SDMMC_CMD_RESP_LONG; 952 } 953 954 if (cmd->flags & MMC_RSP_CRC) 955 cmdr |= SDMMC_CMD_RESP_CRC; 956 957 /* 958 * XXX: Not all platforms want this. 959 */ 960 cmdr |= SDMMC_CMD_USE_HOLD_REG; 961 962 if ((sc->flags & CARD_INIT_DONE) == 0) { 963 sc->flags |= (CARD_INIT_DONE); 964 cmdr |= SDMMC_CMD_SEND_INIT; 965 } 966 967 if (data) { 968 if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || 969 cmd->opcode == MMC_READ_MULTIPLE_BLOCK) && 970 sc->use_auto_stop) 971 cmdr |= SDMMC_CMD_SEND_ASTOP; 972 973 cmdr |= SDMMC_CMD_DATA_EXP; 974 if (data->flags & MMC_DATA_STREAM) 975 cmdr |= SDMMC_CMD_MODE_STREAM; 976 if (data->flags & MMC_DATA_WRITE) 977 cmdr |= SDMMC_CMD_DATA_WRITE; 978 979 WRITE4(sc, SDMMC_TMOUT, 0xffffffff); 980 WRITE4(sc, SDMMC_BYTCNT, data->len); 981 blksz = (data->len < MMC_SECTOR_SIZE) ? \ 982 data->len : MMC_SECTOR_SIZE; 983 WRITE4(sc, SDMMC_BLKSIZ, blksz); 984 985 if (sc->use_pio) { 986 pio_prepare(sc, cmd); 987 } else { 988 dma_prepare(sc, cmd); 989 } 990 wmb(); 991 } 992 993 dprintf("cmdr 0x%08x\n", cmdr); 994 995 WRITE4(sc, SDMMC_CMDARG, cmd->arg); 996 wmb(); 997 WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START); 998 }; 999 1000 static void 1001 dwmmc_next_operation(struct dwmmc_softc *sc) 1002 { 1003 struct mmc_request *req; 1004 1005 req = sc->req; 1006 if (req == NULL) 1007 return; 1008 1009 sc->acd_rcvd = 0; 1010 sc->dto_rcvd = 0; 1011 sc->cmd_done = 0; 1012 1013 /* 1014 * XXX: Wait until card is still busy. 1015 * We do need this to prevent data timeouts, 1016 * mostly caused by multi-block write command 1017 * followed by single-read. 1018 */ 1019 while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY)) 1020 continue; 1021 1022 if (sc->flags & PENDING_CMD) { 1023 sc->flags &= ~PENDING_CMD; 1024 dwmmc_start_cmd(sc, req->cmd); 1025 return; 1026 } else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) { 1027 sc->flags &= ~PENDING_STOP; 1028 dwmmc_start_cmd(sc, req->stop); 1029 return; 1030 } 1031 1032 sc->req = NULL; 1033 sc->curcmd = NULL; 1034 req->done(req); 1035 } 1036 1037 static int 1038 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req) 1039 { 1040 struct dwmmc_softc *sc; 1041 1042 sc = device_get_softc(brdev); 1043 1044 dprintf("%s\n", __func__); 1045 1046 DWMMC_LOCK(sc); 1047 1048 if (sc->req != NULL) { 1049 DWMMC_UNLOCK(sc); 1050 return (EBUSY); 1051 } 1052 1053 sc->req = req; 1054 sc->flags |= PENDING_CMD; 1055 if (sc->req->stop) 1056 sc->flags |= PENDING_STOP; 1057 dwmmc_next_operation(sc); 1058 1059 DWMMC_UNLOCK(sc); 1060 return (0); 1061 } 1062 1063 static int 1064 dwmmc_get_ro(device_t brdev, device_t reqdev) 1065 { 1066 1067 dprintf("%s\n", __func__); 1068 1069 return (0); 1070 } 1071 1072 static int 1073 dwmmc_acquire_host(device_t brdev, device_t reqdev) 1074 { 1075 struct dwmmc_softc *sc; 1076 1077 sc = device_get_softc(brdev); 1078 1079 DWMMC_LOCK(sc); 1080 while (sc->bus_busy) 1081 msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5); 1082 sc->bus_busy++; 1083 DWMMC_UNLOCK(sc); 1084 return (0); 1085 } 1086 1087 static int 1088 dwmmc_release_host(device_t brdev, device_t reqdev) 1089 { 1090 struct dwmmc_softc *sc; 1091 1092 sc = device_get_softc(brdev); 1093 1094 DWMMC_LOCK(sc); 1095 sc->bus_busy--; 1096 wakeup(sc); 1097 DWMMC_UNLOCK(sc); 1098 return (0); 1099 } 1100 1101 static int 1102 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) 1103 { 1104 struct dwmmc_softc *sc; 1105 1106 sc = device_get_softc(bus); 1107 1108 switch (which) { 1109 default: 1110 return (EINVAL); 1111 case MMCBR_IVAR_BUS_MODE: 1112 *(int *)result = sc->host.ios.bus_mode; 1113 break; 1114 case MMCBR_IVAR_BUS_WIDTH: 1115 *(int *)result = sc->host.ios.bus_width; 1116 break; 1117 case MMCBR_IVAR_CHIP_SELECT: 1118 *(int *)result = sc->host.ios.chip_select; 1119 break; 1120 case MMCBR_IVAR_CLOCK: 1121 *(int *)result = sc->host.ios.clock; 1122 break; 1123 case MMCBR_IVAR_F_MIN: 1124 *(int *)result = sc->host.f_min; 1125 break; 1126 case MMCBR_IVAR_F_MAX: 1127 *(int *)result = sc->host.f_max; 1128 break; 1129 case MMCBR_IVAR_HOST_OCR: 1130 *(int *)result = sc->host.host_ocr; 1131 break; 1132 case MMCBR_IVAR_MODE: 1133 *(int *)result = sc->host.mode; 1134 break; 1135 case MMCBR_IVAR_OCR: 1136 *(int *)result = sc->host.ocr; 1137 break; 1138 case MMCBR_IVAR_POWER_MODE: 1139 *(int *)result = sc->host.ios.power_mode; 1140 break; 1141 case MMCBR_IVAR_VDD: 1142 *(int *)result = sc->host.ios.vdd; 1143 break; 1144 case MMCBR_IVAR_VCCQ: 1145 *(int *)result = sc->host.ios.vccq; 1146 break; 1147 case MMCBR_IVAR_CAPS: 1148 *(int *)result = sc->host.caps; 1149 break; 1150 case MMCBR_IVAR_MAX_DATA: 1151 *(int *)result = sc->desc_count; 1152 break; 1153 case MMCBR_IVAR_TIMING: 1154 *(int *)result = sc->host.ios.timing; 1155 break; 1156 } 1157 return (0); 1158 } 1159 1160 static int 1161 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value) 1162 { 1163 struct dwmmc_softc *sc; 1164 1165 sc = device_get_softc(bus); 1166 1167 switch (which) { 1168 default: 1169 return (EINVAL); 1170 case MMCBR_IVAR_BUS_MODE: 1171 sc->host.ios.bus_mode = value; 1172 break; 1173 case MMCBR_IVAR_BUS_WIDTH: 1174 sc->host.ios.bus_width = value; 1175 break; 1176 case MMCBR_IVAR_CHIP_SELECT: 1177 sc->host.ios.chip_select = value; 1178 break; 1179 case MMCBR_IVAR_CLOCK: 1180 sc->host.ios.clock = value; 1181 break; 1182 case MMCBR_IVAR_MODE: 1183 sc->host.mode = value; 1184 break; 1185 case MMCBR_IVAR_OCR: 1186 sc->host.ocr = value; 1187 break; 1188 case MMCBR_IVAR_POWER_MODE: 1189 sc->host.ios.power_mode = value; 1190 break; 1191 case MMCBR_IVAR_VDD: 1192 sc->host.ios.vdd = value; 1193 break; 1194 case MMCBR_IVAR_TIMING: 1195 sc->host.ios.timing = value; 1196 break; 1197 case MMCBR_IVAR_VCCQ: 1198 sc->host.ios.vccq = value; 1199 break; 1200 /* These are read-only */ 1201 case MMCBR_IVAR_CAPS: 1202 case MMCBR_IVAR_HOST_OCR: 1203 case MMCBR_IVAR_F_MIN: 1204 case MMCBR_IVAR_F_MAX: 1205 case MMCBR_IVAR_MAX_DATA: 1206 return (EINVAL); 1207 } 1208 return (0); 1209 } 1210 1211 static device_method_t dwmmc_methods[] = { 1212 /* Bus interface */ 1213 DEVMETHOD(bus_read_ivar, dwmmc_read_ivar), 1214 DEVMETHOD(bus_write_ivar, dwmmc_write_ivar), 1215 1216 /* mmcbr_if */ 1217 DEVMETHOD(mmcbr_update_ios, dwmmc_update_ios), 1218 DEVMETHOD(mmcbr_request, dwmmc_request), 1219 DEVMETHOD(mmcbr_get_ro, dwmmc_get_ro), 1220 DEVMETHOD(mmcbr_acquire_host, dwmmc_acquire_host), 1221 DEVMETHOD(mmcbr_release_host, dwmmc_release_host), 1222 1223 DEVMETHOD_END 1224 }; 1225 1226 DEFINE_CLASS_0(dwmmc, dwmmc_driver, dwmmc_methods, 1227 sizeof(struct dwmmc_softc)); 1228