1 /*- 2 * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com> 3 * All rights reserved. 4 * 5 * This software was developed by SRI International and the University of 6 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) 7 * ("CTSRD"), as part of the DARPA CRASH research programme. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Synopsys DesignWare Mobile Storage Host Controller 33 * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22) 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/bus.h> 42 #include <sys/kernel.h> 43 #include <sys/module.h> 44 #include <sys/malloc.h> 45 #include <sys/rman.h> 46 #include <sys/timeet.h> 47 #include <sys/timetc.h> 48 49 #include <dev/mmc/bridge.h> 50 #include <dev/mmc/mmcreg.h> 51 #include <dev/mmc/mmcbrvar.h> 52 53 #include <dev/fdt/fdt_common.h> 54 #include <dev/ofw/openfirm.h> 55 #include <dev/ofw/ofw_bus.h> 56 #include <dev/ofw/ofw_bus_subr.h> 57 58 #include <machine/bus.h> 59 #include <machine/cpu.h> 60 #include <machine/intr.h> 61 62 #include <dev/mmc/host/dwmmc.h> 63 64 #include "mmcbr_if.h" 65 66 #define dprintf(x, arg...) 67 68 #define READ4(_sc, _reg) \ 69 bus_read_4((_sc)->res[0], _reg) 70 #define WRITE4(_sc, _reg, _val) \ 71 bus_write_4((_sc)->res[0], _reg, _val) 72 73 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) 74 75 #define DWMMC_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) 76 #define DWMMC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) 77 #define DWMMC_LOCK_INIT(_sc) \ 78 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \ 79 "dwmmc", MTX_DEF) 80 #define DWMMC_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); 81 #define DWMMC_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); 82 #define DWMMC_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); 83 84 #define PENDING_CMD 0x01 85 #define PENDING_STOP 0x02 86 #define CARD_INIT_DONE 0x04 87 88 #define DWMMC_DATA_ERR_FLAGS (SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \ 89 |SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE \ 90 |SDMMC_INTMASK_EBE) 91 #define DWMMC_CMD_ERR_FLAGS (SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \ 92 |SDMMC_INTMASK_RE) 93 #define DWMMC_ERR_FLAGS (DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \ 94 |SDMMC_INTMASK_HLE) 95 96 #define DES0_DIC (1 << 1) 97 #define DES0_LD (1 << 2) 98 #define DES0_FS (1 << 3) 99 #define DES0_CH (1 << 4) 100 #define DES0_ER (1 << 5) 101 #define DES0_CES (1 << 30) 102 #define DES0_OWN (1 << 31) 103 104 #define DES1_BS1_MASK 0xfff 105 #define DES1_BS1_SHIFT 0 106 107 struct idmac_desc { 108 uint32_t des0; /* control */ 109 uint32_t des1; /* bufsize */ 110 uint32_t des2; /* buf1 phys addr */ 111 uint32_t des3; /* buf2 phys addr or next descr */ 112 }; 113 114 #define DESC_COUNT 256 115 #define DESC_SIZE (sizeof(struct idmac_desc) * DESC_COUNT) 116 #define DEF_MSIZE 0x2 /* Burst size of multiple transaction */ 117 118 struct dwmmc_softc { 119 struct resource *res[2]; 120 bus_space_tag_t bst; 121 bus_space_handle_t bsh; 122 device_t dev; 123 void *intr_cookie; 124 struct mmc_host host; 125 struct mtx sc_mtx; 126 struct mmc_request *req; 127 struct mmc_command *curcmd; 128 uint32_t flags; 129 uint32_t hwtype; 130 uint32_t use_auto_stop; 131 uint32_t use_pio; 132 uint32_t pwren_inverted; 133 134 bus_dma_tag_t desc_tag; 135 bus_dmamap_t desc_map; 136 struct idmac_desc *desc_ring; 137 bus_addr_t desc_ring_paddr; 138 bus_dma_tag_t buf_tag; 139 bus_dmamap_t buf_map; 140 141 uint32_t bus_busy; 142 uint32_t dto_rcvd; 143 uint32_t acd_rcvd; 144 uint32_t cmd_done; 145 uint32_t bus_hz; 146 uint32_t fifo_depth; 147 uint32_t num_slots; 148 uint32_t sdr_timing; 149 uint32_t ddr_timing; 150 }; 151 152 static void dwmmc_next_operation(struct dwmmc_softc *); 153 static int dwmmc_setup_bus(struct dwmmc_softc *, int); 154 static int dma_done(struct dwmmc_softc *, struct mmc_command *); 155 static int dma_stop(struct dwmmc_softc *); 156 static void pio_read(struct dwmmc_softc *, struct mmc_command *); 157 static void pio_write(struct dwmmc_softc *, struct mmc_command *); 158 159 static struct resource_spec dwmmc_spec[] = { 160 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 161 { SYS_RES_IRQ, 0, RF_ACTIVE }, 162 { -1, 0 } 163 }; 164 165 enum { 166 HWTYPE_NONE, 167 HWTYPE_ALTERA, 168 HWTYPE_EXYNOS, 169 HWTYPE_ROCKCHIP, 170 }; 171 172 #define HWTYPE_MASK (0x0000ffff) 173 #define HWFLAG_MASK (0xffff << 16) 174 175 static struct ofw_compat_data compat_data[] = { 176 {"altr,socfpga-dw-mshc", HWTYPE_ALTERA}, 177 {"samsung,exynos5420-dw-mshc", HWTYPE_EXYNOS}, 178 {"rockchip,rk2928-dw-mshc", HWTYPE_ROCKCHIP}, 179 {NULL, HWTYPE_NONE}, 180 }; 181 182 static void 183 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 184 { 185 186 if (error != 0) 187 return; 188 *(bus_addr_t *)arg = segs[0].ds_addr; 189 } 190 191 static void 192 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 193 { 194 struct dwmmc_softc *sc; 195 int idx; 196 197 if (error != 0) 198 return; 199 200 sc = arg; 201 202 dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len); 203 204 for (idx = 0; idx < nsegs; idx++) { 205 sc->desc_ring[idx].des0 = (DES0_OWN | DES0_DIC | DES0_CH); 206 sc->desc_ring[idx].des1 = segs[idx].ds_len; 207 sc->desc_ring[idx].des2 = segs[idx].ds_addr; 208 209 if (idx == 0) 210 sc->desc_ring[idx].des0 |= DES0_FS; 211 212 if (idx == (nsegs - 1)) { 213 sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH); 214 sc->desc_ring[idx].des0 |= DES0_LD; 215 } 216 } 217 } 218 219 static int 220 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits) 221 { 222 int reg; 223 int i; 224 225 reg = READ4(sc, SDMMC_CTRL); 226 reg |= (reset_bits); 227 WRITE4(sc, SDMMC_CTRL, reg); 228 229 /* Wait reset done */ 230 for (i = 0; i < 100; i++) { 231 if (!(READ4(sc, SDMMC_CTRL) & reset_bits)) 232 return (0); 233 DELAY(10); 234 }; 235 236 device_printf(sc->dev, "Reset failed\n"); 237 238 return (1); 239 } 240 241 static int 242 dma_setup(struct dwmmc_softc *sc) 243 { 244 int error; 245 int nidx; 246 int idx; 247 248 /* 249 * Set up TX descriptor ring, descriptors, and dma maps. 250 */ 251 error = bus_dma_tag_create( 252 bus_get_dma_tag(sc->dev), /* Parent tag. */ 253 4096, 0, /* alignment, boundary */ 254 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 255 BUS_SPACE_MAXADDR, /* highaddr */ 256 NULL, NULL, /* filter, filterarg */ 257 DESC_SIZE, 1, /* maxsize, nsegments */ 258 DESC_SIZE, /* maxsegsize */ 259 0, /* flags */ 260 NULL, NULL, /* lockfunc, lockarg */ 261 &sc->desc_tag); 262 if (error != 0) { 263 device_printf(sc->dev, 264 "could not create ring DMA tag.\n"); 265 return (1); 266 } 267 268 error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring, 269 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, 270 &sc->desc_map); 271 if (error != 0) { 272 device_printf(sc->dev, 273 "could not allocate descriptor ring.\n"); 274 return (1); 275 } 276 277 error = bus_dmamap_load(sc->desc_tag, sc->desc_map, 278 sc->desc_ring, DESC_SIZE, dwmmc_get1paddr, 279 &sc->desc_ring_paddr, 0); 280 if (error != 0) { 281 device_printf(sc->dev, 282 "could not load descriptor ring map.\n"); 283 return (1); 284 } 285 286 for (idx = 0; idx < DESC_COUNT; idx++) { 287 sc->desc_ring[idx].des0 = DES0_CH; 288 sc->desc_ring[idx].des1 = 0; 289 nidx = (idx + 1) % DESC_COUNT; 290 sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \ 291 (nidx * sizeof(struct idmac_desc)); 292 } 293 294 error = bus_dma_tag_create( 295 bus_get_dma_tag(sc->dev), /* Parent tag. */ 296 4096, 0, /* alignment, boundary */ 297 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 298 BUS_SPACE_MAXADDR, /* highaddr */ 299 NULL, NULL, /* filter, filterarg */ 300 DESC_COUNT*MMC_SECTOR_SIZE, /* maxsize */ 301 DESC_COUNT, /* nsegments */ 302 MMC_SECTOR_SIZE, /* maxsegsize */ 303 0, /* flags */ 304 NULL, NULL, /* lockfunc, lockarg */ 305 &sc->buf_tag); 306 if (error != 0) { 307 device_printf(sc->dev, 308 "could not create ring DMA tag.\n"); 309 return (1); 310 } 311 312 error = bus_dmamap_create(sc->buf_tag, 0, 313 &sc->buf_map); 314 if (error != 0) { 315 device_printf(sc->dev, 316 "could not create TX buffer DMA map.\n"); 317 return (1); 318 } 319 320 return (0); 321 } 322 323 static void 324 dwmmc_cmd_done(struct dwmmc_softc *sc) 325 { 326 struct mmc_command *cmd; 327 328 cmd = sc->curcmd; 329 if (cmd == NULL) 330 return; 331 332 if (cmd->flags & MMC_RSP_PRESENT) { 333 if (cmd->flags & MMC_RSP_136) { 334 cmd->resp[3] = READ4(sc, SDMMC_RESP0); 335 cmd->resp[2] = READ4(sc, SDMMC_RESP1); 336 cmd->resp[1] = READ4(sc, SDMMC_RESP2); 337 cmd->resp[0] = READ4(sc, SDMMC_RESP3); 338 } else { 339 cmd->resp[3] = 0; 340 cmd->resp[2] = 0; 341 cmd->resp[1] = 0; 342 cmd->resp[0] = READ4(sc, SDMMC_RESP0); 343 } 344 } 345 } 346 347 static void 348 dwmmc_tasklet(struct dwmmc_softc *sc) 349 { 350 struct mmc_command *cmd; 351 352 cmd = sc->curcmd; 353 if (cmd == NULL) 354 return; 355 356 if (!sc->cmd_done) 357 return; 358 359 if (cmd->error != MMC_ERR_NONE || !cmd->data) { 360 dwmmc_next_operation(sc); 361 } else if (cmd->data && sc->dto_rcvd) { 362 if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || 363 cmd->opcode == MMC_READ_MULTIPLE_BLOCK) && 364 sc->use_auto_stop) { 365 if (sc->acd_rcvd) 366 dwmmc_next_operation(sc); 367 } else { 368 dwmmc_next_operation(sc); 369 } 370 } 371 } 372 373 static void 374 dwmmc_intr(void *arg) 375 { 376 struct mmc_command *cmd; 377 struct dwmmc_softc *sc; 378 uint32_t reg; 379 380 sc = arg; 381 382 DWMMC_LOCK(sc); 383 384 cmd = sc->curcmd; 385 386 /* First handle SDMMC controller interrupts */ 387 reg = READ4(sc, SDMMC_MINTSTS); 388 if (reg) { 389 dprintf("%s 0x%08x\n", __func__, reg); 390 391 if (reg & DWMMC_CMD_ERR_FLAGS) { 392 WRITE4(sc, SDMMC_RINTSTS, DWMMC_CMD_ERR_FLAGS); 393 dprintf("cmd err 0x%08x cmd 0x%08x\n", 394 reg, cmd->opcode); 395 cmd->error = MMC_ERR_TIMEOUT; 396 } 397 398 if (reg & DWMMC_DATA_ERR_FLAGS) { 399 WRITE4(sc, SDMMC_RINTSTS, DWMMC_DATA_ERR_FLAGS); 400 dprintf("data err 0x%08x cmd 0x%08x\n", 401 reg, cmd->opcode); 402 cmd->error = MMC_ERR_FAILED; 403 if (!sc->use_pio) { 404 dma_done(sc, cmd); 405 dma_stop(sc); 406 } 407 } 408 409 if (reg & SDMMC_INTMASK_CMD_DONE) { 410 dwmmc_cmd_done(sc); 411 sc->cmd_done = 1; 412 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CMD_DONE); 413 } 414 415 if (reg & SDMMC_INTMASK_ACD) { 416 sc->acd_rcvd = 1; 417 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_ACD); 418 } 419 420 if (reg & SDMMC_INTMASK_DTO) { 421 sc->dto_rcvd = 1; 422 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_DTO); 423 } 424 425 if (reg & SDMMC_INTMASK_CD) { 426 /* XXX: Handle card detect */ 427 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CD); 428 } 429 } 430 431 if (sc->use_pio) { 432 if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) { 433 pio_read(sc, cmd); 434 } 435 if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) { 436 pio_write(sc, cmd); 437 } 438 } else { 439 /* Now handle DMA interrupts */ 440 reg = READ4(sc, SDMMC_IDSTS); 441 if (reg) { 442 dprintf("dma intr 0x%08x\n", reg); 443 if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) { 444 WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI | 445 SDMMC_IDINTEN_RI)); 446 WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI); 447 dma_done(sc, cmd); 448 } 449 } 450 } 451 452 dwmmc_tasklet(sc); 453 454 DWMMC_UNLOCK(sc); 455 } 456 457 static int 458 parse_fdt(struct dwmmc_softc *sc) 459 { 460 pcell_t dts_value[3]; 461 phandle_t node; 462 int len; 463 464 if ((node = ofw_bus_get_node(sc->dev)) == -1) 465 return (ENXIO); 466 467 /* fifo-depth */ 468 if ((len = OF_getproplen(node, "fifo-depth")) <= 0) 469 return (ENXIO); 470 OF_getencprop(node, "fifo-depth", dts_value, len); 471 sc->fifo_depth = dts_value[0]; 472 473 /* num-slots */ 474 if ((len = OF_getproplen(node, "num-slots")) <= 0) 475 return (ENXIO); 476 OF_getencprop(node, "num-slots", dts_value, len); 477 sc->num_slots = dts_value[0]; 478 479 /* 480 * We need some platform-specific code to know 481 * what the clock is supplied for our device. 482 * For now rely on the value specified in FDT. 483 */ 484 if ((len = OF_getproplen(node, "bus-frequency")) <= 0) 485 return (ENXIO); 486 OF_getencprop(node, "bus-frequency", dts_value, len); 487 sc->bus_hz = dts_value[0]; 488 489 /* 490 * Platform-specific stuff 491 * XXX: Move to separate file 492 */ 493 494 if ((sc->hwtype & HWTYPE_MASK) != HWTYPE_EXYNOS) 495 return (0); 496 497 if ((len = OF_getproplen(node, "samsung,dw-mshc-ciu-div")) <= 0) 498 return (ENXIO); 499 OF_getencprop(node, "samsung,dw-mshc-ciu-div", dts_value, len); 500 sc->sdr_timing = (dts_value[0] << SDMMC_CLKSEL_DIVIDER_SHIFT); 501 sc->ddr_timing = (dts_value[0] << SDMMC_CLKSEL_DIVIDER_SHIFT); 502 503 if ((len = OF_getproplen(node, "samsung,dw-mshc-sdr-timing")) <= 0) 504 return (ENXIO); 505 OF_getencprop(node, "samsung,dw-mshc-sdr-timing", dts_value, len); 506 sc->sdr_timing |= ((dts_value[0] << SDMMC_CLKSEL_SAMPLE_SHIFT) | 507 (dts_value[1] << SDMMC_CLKSEL_DRIVE_SHIFT)); 508 509 if ((len = OF_getproplen(node, "samsung,dw-mshc-ddr-timing")) <= 0) 510 return (ENXIO); 511 OF_getencprop(node, "samsung,dw-mshc-ddr-timing", dts_value, len); 512 sc->ddr_timing |= ((dts_value[0] << SDMMC_CLKSEL_SAMPLE_SHIFT) | 513 (dts_value[1] << SDMMC_CLKSEL_DRIVE_SHIFT)); 514 515 return (0); 516 } 517 518 static int 519 dwmmc_probe(device_t dev) 520 { 521 uintptr_t hwtype; 522 523 if (!ofw_bus_status_okay(dev)) 524 return (ENXIO); 525 526 hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data; 527 if (hwtype == HWTYPE_NONE) 528 return (ENXIO); 529 530 device_set_desc(dev, "Synopsys DesignWare Mobile " 531 "Storage Host Controller"); 532 return (BUS_PROBE_DEFAULT); 533 } 534 535 static int 536 dwmmc_attach(device_t dev) 537 { 538 struct dwmmc_softc *sc; 539 device_t child; 540 int error; 541 int slot; 542 543 sc = device_get_softc(dev); 544 545 sc->dev = dev; 546 sc->hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data; 547 548 /* Why not to use Auto Stop? It save a hundred of irq per second */ 549 sc->use_auto_stop = 1; 550 551 error = parse_fdt(sc); 552 if (error != 0) { 553 device_printf(dev, "Can't get FDT property.\n"); 554 return (ENXIO); 555 } 556 557 DWMMC_LOCK_INIT(sc); 558 559 if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) { 560 device_printf(dev, "could not allocate resources\n"); 561 return (ENXIO); 562 } 563 564 /* Memory interface */ 565 sc->bst = rman_get_bustag(sc->res[0]); 566 sc->bsh = rman_get_bushandle(sc->res[0]); 567 568 /* Setup interrupt handler. */ 569 error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE, 570 NULL, dwmmc_intr, sc, &sc->intr_cookie); 571 if (error != 0) { 572 device_printf(dev, "could not setup interrupt handler.\n"); 573 return (ENXIO); 574 } 575 576 device_printf(dev, "Hardware version ID is %04x\n", 577 READ4(sc, SDMMC_VERID) & 0xffff); 578 579 sc->use_pio = 0; 580 sc->pwren_inverted = 0; 581 582 if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP) { 583 sc->use_pio = 1; 584 sc->pwren_inverted = 1; 585 } else { 586 WRITE4(sc, EMMCP_MPSBEGIN0, 0); 587 WRITE4(sc, EMMCP_SEND0, 0); 588 WRITE4(sc, EMMCP_CTRL0, (MPSCTRL_SECURE_READ_BIT | 589 MPSCTRL_SECURE_WRITE_BIT | 590 MPSCTRL_NON_SECURE_READ_BIT | 591 MPSCTRL_NON_SECURE_WRITE_BIT | 592 MPSCTRL_VALID)); 593 } 594 595 /* XXX: we support operation for slot index 0 only */ 596 slot = 0; 597 if (sc->pwren_inverted) { 598 WRITE4(sc, SDMMC_PWREN, (0 << slot)); 599 } else { 600 WRITE4(sc, SDMMC_PWREN, (1 << slot)); 601 } 602 603 /* Reset all */ 604 if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET | 605 SDMMC_CTRL_FIFO_RESET | 606 SDMMC_CTRL_DMA_RESET))) 607 return (ENXIO); 608 609 dwmmc_setup_bus(sc, sc->host.f_min); 610 611 if (!sc->use_pio) { 612 if (dma_setup(sc)) 613 return (ENXIO); 614 615 /* Install desc base */ 616 WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr); 617 618 /* Enable DMA interrupts */ 619 WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK); 620 WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI | 621 SDMMC_IDINTEN_RI | 622 SDMMC_IDINTEN_TI)); 623 } 624 625 /* Clear and disable interrups for a while */ 626 WRITE4(sc, SDMMC_RINTSTS, 0xffffffff); 627 WRITE4(sc, SDMMC_INTMASK, 0); 628 629 /* Maximum timeout */ 630 WRITE4(sc, SDMMC_TMOUT, 0xffffffff); 631 632 /* Enable interrupts */ 633 WRITE4(sc, SDMMC_RINTSTS, 0xffffffff); 634 WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE | 635 SDMMC_INTMASK_DTO | 636 SDMMC_INTMASK_ACD | 637 SDMMC_INTMASK_TXDR | 638 SDMMC_INTMASK_RXDR | 639 DWMMC_ERR_FLAGS | 640 SDMMC_INTMASK_CD)); 641 WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE); 642 643 sc->host.f_min = 400000; 644 sc->host.f_max = 200000000; 645 sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; 646 sc->host.caps = MMC_CAP_4_BIT_DATA; 647 648 child = device_add_child(dev, "mmc", 0); 649 return (bus_generic_attach(dev)); 650 } 651 652 static int 653 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq) 654 { 655 int tout; 656 int div; 657 658 if (freq == 0) { 659 WRITE4(sc, SDMMC_CLKENA, 0); 660 WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA | 661 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START)); 662 663 tout = 1000; 664 do { 665 if (tout-- < 0) { 666 device_printf(sc->dev, "Failed update clk\n"); 667 return (1); 668 } 669 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); 670 671 return (0); 672 } 673 674 WRITE4(sc, SDMMC_CLKENA, 0); 675 WRITE4(sc, SDMMC_CLKSRC, 0); 676 677 div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0; 678 679 WRITE4(sc, SDMMC_CLKDIV, div); 680 WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA | 681 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START)); 682 683 tout = 1000; 684 do { 685 if (tout-- < 0) { 686 device_printf(sc->dev, "Failed to update clk"); 687 return (1); 688 } 689 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); 690 691 WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP)); 692 WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA | 693 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START); 694 695 tout = 1000; 696 do { 697 if (tout-- < 0) { 698 device_printf(sc->dev, "Failed to enable clk\n"); 699 return (1); 700 } 701 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); 702 703 return (0); 704 } 705 706 static int 707 dwmmc_update_ios(device_t brdev, device_t reqdev) 708 { 709 struct dwmmc_softc *sc; 710 struct mmc_ios *ios; 711 712 sc = device_get_softc(brdev); 713 ios = &sc->host.ios; 714 715 dprintf("Setting up clk %u bus_width %d\n", 716 ios->clock, ios->bus_width); 717 718 dwmmc_setup_bus(sc, ios->clock); 719 720 if (ios->bus_width == bus_width_8) 721 WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT); 722 else if (ios->bus_width == bus_width_4) 723 WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT); 724 else 725 WRITE4(sc, SDMMC_CTYPE, 0); 726 727 if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) { 728 /* XXX: take care about DDR or SDR use here */ 729 WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing); 730 } 731 732 /* 733 * XXX: take care about DDR bit 734 * 735 * reg = READ4(sc, SDMMC_UHS_REG); 736 * reg |= (SDMMC_UHS_REG_DDR); 737 * WRITE4(sc, SDMMC_UHS_REG, reg); 738 */ 739 740 return (0); 741 } 742 743 static int 744 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd) 745 { 746 struct mmc_data *data; 747 748 data = cmd->data; 749 750 if (data->flags & MMC_DATA_WRITE) 751 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 752 BUS_DMASYNC_POSTWRITE); 753 else 754 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 755 BUS_DMASYNC_POSTREAD); 756 757 bus_dmamap_unload(sc->buf_tag, sc->buf_map); 758 759 return (0); 760 } 761 762 static int 763 dma_stop(struct dwmmc_softc *sc) 764 { 765 int reg; 766 767 reg = READ4(sc, SDMMC_CTRL); 768 reg &= ~(SDMMC_CTRL_USE_IDMAC); 769 reg |= (SDMMC_CTRL_DMA_RESET); 770 WRITE4(sc, SDMMC_CTRL, reg); 771 772 reg = READ4(sc, SDMMC_BMOD); 773 reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB); 774 reg |= (SDMMC_BMOD_SWR); 775 WRITE4(sc, SDMMC_BMOD, reg); 776 777 return (0); 778 } 779 780 static int 781 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd) 782 { 783 struct mmc_data *data; 784 int len; 785 int err; 786 int reg; 787 788 data = cmd->data; 789 len = data->len; 790 791 reg = READ4(sc, SDMMC_INTMASK); 792 reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR); 793 WRITE4(sc, SDMMC_INTMASK, reg); 794 795 err = bus_dmamap_load(sc->buf_tag, sc->buf_map, 796 data->data, data->len, dwmmc_ring_setup, 797 sc, BUS_DMA_NOWAIT); 798 if (err != 0) 799 panic("dmamap_load failed\n"); 800 801 if (data->flags & MMC_DATA_WRITE) 802 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 803 BUS_DMASYNC_PREWRITE); 804 else 805 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 806 BUS_DMASYNC_PREREAD); 807 808 reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S); 809 reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S; 810 reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S; 811 812 WRITE4(sc, SDMMC_FIFOTH, reg); 813 wmb(); 814 815 reg = READ4(sc, SDMMC_CTRL); 816 reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE); 817 WRITE4(sc, SDMMC_CTRL, reg); 818 wmb(); 819 820 reg = READ4(sc, SDMMC_BMOD); 821 reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB); 822 WRITE4(sc, SDMMC_BMOD, reg); 823 824 /* Start */ 825 WRITE4(sc, SDMMC_PLDMND, 1); 826 827 return (0); 828 } 829 830 static int 831 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd) 832 { 833 struct mmc_data *data; 834 int reg; 835 836 data = cmd->data; 837 data->xfer_len = 0; 838 839 reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S); 840 reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S; 841 reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S; 842 843 WRITE4(sc, SDMMC_FIFOTH, reg); 844 wmb(); 845 846 return (0); 847 } 848 849 static void 850 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd) 851 { 852 struct mmc_data *data; 853 uint32_t *p, status; 854 855 if (cmd == NULL || cmd->data == NULL) 856 return; 857 858 data = cmd->data; 859 if ((data->flags & MMC_DATA_READ) == 0) 860 return; 861 862 KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned")); 863 p = (uint32_t *)data->data + (data->xfer_len >> 2); 864 865 while (data->xfer_len < data->len) { 866 status = READ4(sc, SDMMC_STATUS); 867 if (status & SDMMC_STATUS_FIFO_EMPTY) 868 break; 869 *p++ = READ4(sc, SDMMC_DATA); 870 data->xfer_len += 4; 871 } 872 873 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR); 874 } 875 876 static void 877 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd) 878 { 879 struct mmc_data *data; 880 uint32_t *p, status; 881 882 if (cmd == NULL || cmd->data == NULL) 883 return; 884 885 data = cmd->data; 886 if ((data->flags & MMC_DATA_WRITE) == 0) 887 return; 888 889 KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned")); 890 p = (uint32_t *)data->data + (data->xfer_len >> 2); 891 892 while (data->xfer_len < data->len) { 893 status = READ4(sc, SDMMC_STATUS); 894 if (status & SDMMC_STATUS_FIFO_FULL) 895 break; 896 WRITE4(sc, SDMMC_DATA, *p++); 897 data->xfer_len += 4; 898 } 899 900 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR); 901 } 902 903 static void 904 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd) 905 { 906 struct mmc_data *data; 907 uint32_t blksz; 908 uint32_t cmdr; 909 910 sc->curcmd = cmd; 911 data = cmd->data; 912 913 if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP) 914 dwmmc_setup_bus(sc, sc->host.ios.clock); 915 916 /* XXX Upper layers don't always set this */ 917 cmd->mrq = sc->req; 918 919 /* Begin setting up command register. */ 920 921 cmdr = cmd->opcode; 922 923 dprintf("cmd->opcode 0x%08x\n", cmd->opcode); 924 925 if (cmd->opcode == MMC_STOP_TRANSMISSION || 926 cmd->opcode == MMC_GO_IDLE_STATE || 927 cmd->opcode == MMC_GO_INACTIVE_STATE) 928 cmdr |= SDMMC_CMD_STOP_ABORT; 929 else if (cmd->opcode != MMC_SEND_STATUS && data) 930 cmdr |= SDMMC_CMD_WAIT_PRVDATA; 931 932 /* Set up response handling. */ 933 if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) { 934 cmdr |= SDMMC_CMD_RESP_EXP; 935 if (cmd->flags & MMC_RSP_136) 936 cmdr |= SDMMC_CMD_RESP_LONG; 937 } 938 939 if (cmd->flags & MMC_RSP_CRC) 940 cmdr |= SDMMC_CMD_RESP_CRC; 941 942 /* 943 * XXX: Not all platforms want this. 944 */ 945 cmdr |= SDMMC_CMD_USE_HOLD_REG; 946 947 if ((sc->flags & CARD_INIT_DONE) == 0) { 948 sc->flags |= (CARD_INIT_DONE); 949 cmdr |= SDMMC_CMD_SEND_INIT; 950 } 951 952 if (data) { 953 if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || 954 cmd->opcode == MMC_READ_MULTIPLE_BLOCK) && 955 sc->use_auto_stop) 956 cmdr |= SDMMC_CMD_SEND_ASTOP; 957 958 cmdr |= SDMMC_CMD_DATA_EXP; 959 if (data->flags & MMC_DATA_STREAM) 960 cmdr |= SDMMC_CMD_MODE_STREAM; 961 if (data->flags & MMC_DATA_WRITE) 962 cmdr |= SDMMC_CMD_DATA_WRITE; 963 964 WRITE4(sc, SDMMC_TMOUT, 0xffffffff); 965 WRITE4(sc, SDMMC_BYTCNT, data->len); 966 blksz = (data->len < MMC_SECTOR_SIZE) ? \ 967 data->len : MMC_SECTOR_SIZE; 968 WRITE4(sc, SDMMC_BLKSIZ, blksz); 969 970 if (sc->use_pio) { 971 pio_prepare(sc, cmd); 972 } else { 973 dma_prepare(sc, cmd); 974 } 975 wmb(); 976 } 977 978 dprintf("cmdr 0x%08x\n", cmdr); 979 980 WRITE4(sc, SDMMC_CMDARG, cmd->arg); 981 wmb(); 982 WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START); 983 }; 984 985 static void 986 dwmmc_next_operation(struct dwmmc_softc *sc) 987 { 988 struct mmc_request *req; 989 990 req = sc->req; 991 if (req == NULL) 992 return; 993 994 sc->acd_rcvd = 0; 995 sc->dto_rcvd = 0; 996 sc->cmd_done = 0; 997 998 /* 999 * XXX: Wait until card is still busy. 1000 * We do need this to prevent data timeouts, 1001 * mostly caused by multi-block write command 1002 * followed by single-read. 1003 */ 1004 while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY)) 1005 continue; 1006 1007 if (sc->flags & PENDING_CMD) { 1008 sc->flags &= ~PENDING_CMD; 1009 dwmmc_start_cmd(sc, req->cmd); 1010 return; 1011 } else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) { 1012 sc->flags &= ~PENDING_STOP; 1013 dwmmc_start_cmd(sc, req->stop); 1014 return; 1015 } 1016 1017 sc->req = NULL; 1018 sc->curcmd = NULL; 1019 req->done(req); 1020 } 1021 1022 static int 1023 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req) 1024 { 1025 struct dwmmc_softc *sc; 1026 1027 sc = device_get_softc(brdev); 1028 1029 dprintf("%s\n", __func__); 1030 1031 DWMMC_LOCK(sc); 1032 1033 if (sc->req != NULL) { 1034 DWMMC_UNLOCK(sc); 1035 return (EBUSY); 1036 } 1037 1038 sc->req = req; 1039 sc->flags |= PENDING_CMD; 1040 if (sc->req->stop) 1041 sc->flags |= PENDING_STOP; 1042 dwmmc_next_operation(sc); 1043 1044 DWMMC_UNLOCK(sc); 1045 return (0); 1046 } 1047 1048 static int 1049 dwmmc_get_ro(device_t brdev, device_t reqdev) 1050 { 1051 1052 dprintf("%s\n", __func__); 1053 1054 return (0); 1055 } 1056 1057 static int 1058 dwmmc_acquire_host(device_t brdev, device_t reqdev) 1059 { 1060 struct dwmmc_softc *sc; 1061 1062 sc = device_get_softc(brdev); 1063 1064 DWMMC_LOCK(sc); 1065 while (sc->bus_busy) 1066 msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5); 1067 sc->bus_busy++; 1068 DWMMC_UNLOCK(sc); 1069 return (0); 1070 } 1071 1072 static int 1073 dwmmc_release_host(device_t brdev, device_t reqdev) 1074 { 1075 struct dwmmc_softc *sc; 1076 1077 sc = device_get_softc(brdev); 1078 1079 DWMMC_LOCK(sc); 1080 sc->bus_busy--; 1081 wakeup(sc); 1082 DWMMC_UNLOCK(sc); 1083 return (0); 1084 } 1085 1086 static int 1087 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) 1088 { 1089 struct dwmmc_softc *sc; 1090 1091 sc = device_get_softc(bus); 1092 1093 switch (which) { 1094 default: 1095 return (EINVAL); 1096 case MMCBR_IVAR_BUS_MODE: 1097 *(int *)result = sc->host.ios.bus_mode; 1098 break; 1099 case MMCBR_IVAR_BUS_WIDTH: 1100 *(int *)result = sc->host.ios.bus_width; 1101 break; 1102 case MMCBR_IVAR_CHIP_SELECT: 1103 *(int *)result = sc->host.ios.chip_select; 1104 break; 1105 case MMCBR_IVAR_CLOCK: 1106 *(int *)result = sc->host.ios.clock; 1107 break; 1108 case MMCBR_IVAR_F_MIN: 1109 *(int *)result = sc->host.f_min; 1110 break; 1111 case MMCBR_IVAR_F_MAX: 1112 *(int *)result = sc->host.f_max; 1113 break; 1114 case MMCBR_IVAR_HOST_OCR: 1115 *(int *)result = sc->host.host_ocr; 1116 break; 1117 case MMCBR_IVAR_MODE: 1118 *(int *)result = sc->host.mode; 1119 break; 1120 case MMCBR_IVAR_OCR: 1121 *(int *)result = sc->host.ocr; 1122 break; 1123 case MMCBR_IVAR_POWER_MODE: 1124 *(int *)result = sc->host.ios.power_mode; 1125 break; 1126 case MMCBR_IVAR_VDD: 1127 *(int *)result = sc->host.ios.vdd; 1128 break; 1129 case MMCBR_IVAR_CAPS: 1130 sc->host.caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; 1131 *(int *)result = sc->host.caps; 1132 break; 1133 case MMCBR_IVAR_MAX_DATA: 1134 *(int *)result = DESC_COUNT; 1135 } 1136 return (0); 1137 } 1138 1139 static int 1140 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value) 1141 { 1142 struct dwmmc_softc *sc; 1143 1144 sc = device_get_softc(bus); 1145 1146 switch (which) { 1147 default: 1148 return (EINVAL); 1149 case MMCBR_IVAR_BUS_MODE: 1150 sc->host.ios.bus_mode = value; 1151 break; 1152 case MMCBR_IVAR_BUS_WIDTH: 1153 sc->host.ios.bus_width = value; 1154 break; 1155 case MMCBR_IVAR_CHIP_SELECT: 1156 sc->host.ios.chip_select = value; 1157 break; 1158 case MMCBR_IVAR_CLOCK: 1159 sc->host.ios.clock = value; 1160 break; 1161 case MMCBR_IVAR_MODE: 1162 sc->host.mode = value; 1163 break; 1164 case MMCBR_IVAR_OCR: 1165 sc->host.ocr = value; 1166 break; 1167 case MMCBR_IVAR_POWER_MODE: 1168 sc->host.ios.power_mode = value; 1169 break; 1170 case MMCBR_IVAR_VDD: 1171 sc->host.ios.vdd = value; 1172 break; 1173 /* These are read-only */ 1174 case MMCBR_IVAR_CAPS: 1175 case MMCBR_IVAR_HOST_OCR: 1176 case MMCBR_IVAR_F_MIN: 1177 case MMCBR_IVAR_F_MAX: 1178 case MMCBR_IVAR_MAX_DATA: 1179 return (EINVAL); 1180 } 1181 return (0); 1182 } 1183 1184 static device_method_t dwmmc_methods[] = { 1185 DEVMETHOD(device_probe, dwmmc_probe), 1186 DEVMETHOD(device_attach, dwmmc_attach), 1187 1188 /* Bus interface */ 1189 DEVMETHOD(bus_read_ivar, dwmmc_read_ivar), 1190 DEVMETHOD(bus_write_ivar, dwmmc_write_ivar), 1191 1192 /* mmcbr_if */ 1193 DEVMETHOD(mmcbr_update_ios, dwmmc_update_ios), 1194 DEVMETHOD(mmcbr_request, dwmmc_request), 1195 DEVMETHOD(mmcbr_get_ro, dwmmc_get_ro), 1196 DEVMETHOD(mmcbr_acquire_host, dwmmc_acquire_host), 1197 DEVMETHOD(mmcbr_release_host, dwmmc_release_host), 1198 1199 DEVMETHOD_END 1200 }; 1201 1202 static driver_t dwmmc_driver = { 1203 "dwmmc", 1204 dwmmc_methods, 1205 sizeof(struct dwmmc_softc), 1206 }; 1207 1208 static devclass_t dwmmc_devclass; 1209 1210 DRIVER_MODULE(dwmmc, simplebus, dwmmc_driver, dwmmc_devclass, 0, 0); 1211 DRIVER_MODULE(dwmmc, ofwbus, dwmmc_driver, dwmmc_devclass, 0, 0); 1212 1213