1 /*- 2 * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com> 3 * All rights reserved. 4 * 5 * This software was developed by SRI International and the University of 6 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) 7 * ("CTSRD"), as part of the DARPA CRASH research programme. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Synopsys DesignWare Mobile Storage Host Controller 33 * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22) 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/bus.h> 42 #include <sys/kernel.h> 43 #include <sys/module.h> 44 #include <sys/malloc.h> 45 #include <sys/rman.h> 46 #include <sys/timeet.h> 47 #include <sys/timetc.h> 48 49 #include <dev/mmc/bridge.h> 50 #include <dev/mmc/mmcreg.h> 51 #include <dev/mmc/mmcbrvar.h> 52 53 #include <dev/fdt/fdt_common.h> 54 #include <dev/ofw/openfirm.h> 55 #include <dev/ofw/ofw_bus.h> 56 #include <dev/ofw/ofw_bus_subr.h> 57 58 #include <machine/bus.h> 59 #include <machine/fdt.h> 60 #include <machine/cpu.h> 61 #include <machine/intr.h> 62 63 #include <dev/mmc/host/dwmmc.h> 64 65 #include "mmcbr_if.h" 66 67 #define dprintf(x, arg...) 68 69 #define READ4(_sc, _reg) \ 70 bus_read_4((_sc)->res[0], _reg) 71 #define WRITE4(_sc, _reg, _val) \ 72 bus_write_4((_sc)->res[0], _reg, _val) 73 74 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) 75 76 #define DWMMC_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) 77 #define DWMMC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) 78 #define DWMMC_LOCK_INIT(_sc) \ 79 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \ 80 "dwmmc", MTX_DEF) 81 #define DWMMC_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); 82 #define DWMMC_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); 83 #define DWMMC_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); 84 85 #define PENDING_CMD 0x01 86 #define PENDING_STOP 0x02 87 #define CARD_INIT_DONE 0x04 88 89 #define DWMMC_DATA_ERR_FLAGS (SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \ 90 |SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE \ 91 |SDMMC_INTMASK_EBE) 92 #define DWMMC_CMD_ERR_FLAGS (SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \ 93 |SDMMC_INTMASK_RE) 94 #define DWMMC_ERR_FLAGS (DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \ 95 |SDMMC_INTMASK_HLE) 96 97 #define DES0_DIC (1 << 1) 98 #define DES0_LD (1 << 2) 99 #define DES0_FS (1 << 3) 100 #define DES0_CH (1 << 4) 101 #define DES0_ER (1 << 5) 102 #define DES0_CES (1 << 30) 103 #define DES0_OWN (1 << 31) 104 105 #define DES1_BS1_MASK 0xfff 106 #define DES1_BS1_SHIFT 0 107 108 struct idmac_desc { 109 uint32_t des0; /* control */ 110 uint32_t des1; /* bufsize */ 111 uint32_t des2; /* buf1 phys addr */ 112 uint32_t des3; /* buf2 phys addr or next descr */ 113 }; 114 115 #define DESC_COUNT 256 116 #define DESC_SIZE (sizeof(struct idmac_desc) * DESC_COUNT) 117 #define DEF_MSIZE 0x2 /* Burst size of multiple transaction */ 118 119 struct dwmmc_softc { 120 struct resource *res[2]; 121 bus_space_tag_t bst; 122 bus_space_handle_t bsh; 123 device_t dev; 124 void *intr_cookie; 125 struct mmc_host host; 126 struct mtx sc_mtx; 127 struct mmc_request *req; 128 struct mmc_command *curcmd; 129 uint32_t flags; 130 uint32_t hwtype; 131 uint32_t use_auto_stop; 132 uint32_t use_pio; 133 uint32_t pwren_inverted; 134 135 bus_dma_tag_t desc_tag; 136 bus_dmamap_t desc_map; 137 struct idmac_desc *desc_ring; 138 bus_addr_t desc_ring_paddr; 139 bus_dma_tag_t buf_tag; 140 bus_dmamap_t buf_map; 141 142 uint32_t bus_busy; 143 uint32_t dto_rcvd; 144 uint32_t acd_rcvd; 145 uint32_t cmd_done; 146 uint32_t bus_hz; 147 uint32_t fifo_depth; 148 uint32_t num_slots; 149 uint32_t sdr_timing; 150 uint32_t ddr_timing; 151 }; 152 153 static void dwmmc_next_operation(struct dwmmc_softc *); 154 static int dwmmc_setup_bus(struct dwmmc_softc *, int); 155 static int dma_done(struct dwmmc_softc *, struct mmc_command *); 156 static int dma_stop(struct dwmmc_softc *); 157 static void pio_read(struct dwmmc_softc *, struct mmc_command *); 158 static void pio_write(struct dwmmc_softc *, struct mmc_command *); 159 160 static struct resource_spec dwmmc_spec[] = { 161 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 162 { SYS_RES_IRQ, 0, RF_ACTIVE }, 163 { -1, 0 } 164 }; 165 166 enum { 167 HWTYPE_NONE, 168 HWTYPE_ALTERA, 169 HWTYPE_EXYNOS, 170 HWTYPE_ROCKCHIP, 171 }; 172 173 #define HWTYPE_MASK (0x0000ffff) 174 #define HWFLAG_MASK (0xffff << 16) 175 176 static struct ofw_compat_data compat_data[] = { 177 {"altr,socfpga-dw-mshc", HWTYPE_ALTERA}, 178 {"samsung,exynos5420-dw-mshc", HWTYPE_EXYNOS}, 179 {"rockchip,rk2928-dw-mshc", HWTYPE_ROCKCHIP}, 180 {NULL, HWTYPE_NONE}, 181 }; 182 183 static void 184 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 185 { 186 187 if (error != 0) 188 return; 189 *(bus_addr_t *)arg = segs[0].ds_addr; 190 } 191 192 static void 193 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 194 { 195 struct dwmmc_softc *sc; 196 int idx; 197 198 if (error != 0) 199 return; 200 201 sc = arg; 202 203 dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len); 204 205 for (idx = 0; idx < nsegs; idx++) { 206 sc->desc_ring[idx].des0 = (DES0_OWN | DES0_DIC | DES0_CH); 207 sc->desc_ring[idx].des1 = segs[idx].ds_len; 208 sc->desc_ring[idx].des2 = segs[idx].ds_addr; 209 210 if (idx == 0) 211 sc->desc_ring[idx].des0 |= DES0_FS; 212 213 if (idx == (nsegs - 1)) { 214 sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH); 215 sc->desc_ring[idx].des0 |= DES0_LD; 216 } 217 } 218 } 219 220 static int 221 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits) 222 { 223 int reg; 224 int i; 225 226 reg = READ4(sc, SDMMC_CTRL); 227 reg |= (reset_bits); 228 WRITE4(sc, SDMMC_CTRL, reg); 229 230 /* Wait reset done */ 231 for (i = 0; i < 100; i++) { 232 if (!(READ4(sc, SDMMC_CTRL) & reset_bits)) 233 return (0); 234 DELAY(10); 235 }; 236 237 device_printf(sc->dev, "Reset failed\n"); 238 239 return (1); 240 } 241 242 static int 243 dma_setup(struct dwmmc_softc *sc) 244 { 245 int error; 246 int nidx; 247 int idx; 248 249 /* 250 * Set up TX descriptor ring, descriptors, and dma maps. 251 */ 252 error = bus_dma_tag_create( 253 bus_get_dma_tag(sc->dev), /* Parent tag. */ 254 4096, 0, /* alignment, boundary */ 255 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 256 BUS_SPACE_MAXADDR, /* highaddr */ 257 NULL, NULL, /* filter, filterarg */ 258 DESC_SIZE, 1, /* maxsize, nsegments */ 259 DESC_SIZE, /* maxsegsize */ 260 0, /* flags */ 261 NULL, NULL, /* lockfunc, lockarg */ 262 &sc->desc_tag); 263 if (error != 0) { 264 device_printf(sc->dev, 265 "could not create ring DMA tag.\n"); 266 return (1); 267 } 268 269 error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring, 270 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, 271 &sc->desc_map); 272 if (error != 0) { 273 device_printf(sc->dev, 274 "could not allocate descriptor ring.\n"); 275 return (1); 276 } 277 278 error = bus_dmamap_load(sc->desc_tag, sc->desc_map, 279 sc->desc_ring, DESC_SIZE, dwmmc_get1paddr, 280 &sc->desc_ring_paddr, 0); 281 if (error != 0) { 282 device_printf(sc->dev, 283 "could not load descriptor ring map.\n"); 284 return (1); 285 } 286 287 for (idx = 0; idx < DESC_COUNT; idx++) { 288 sc->desc_ring[idx].des0 = DES0_CH; 289 sc->desc_ring[idx].des1 = 0; 290 nidx = (idx + 1) % DESC_COUNT; 291 sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \ 292 (nidx * sizeof(struct idmac_desc)); 293 } 294 295 error = bus_dma_tag_create( 296 bus_get_dma_tag(sc->dev), /* Parent tag. */ 297 4096, 0, /* alignment, boundary */ 298 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 299 BUS_SPACE_MAXADDR, /* highaddr */ 300 NULL, NULL, /* filter, filterarg */ 301 DESC_COUNT*MMC_SECTOR_SIZE, /* maxsize */ 302 DESC_COUNT, /* nsegments */ 303 MMC_SECTOR_SIZE, /* maxsegsize */ 304 0, /* flags */ 305 NULL, NULL, /* lockfunc, lockarg */ 306 &sc->buf_tag); 307 if (error != 0) { 308 device_printf(sc->dev, 309 "could not create ring DMA tag.\n"); 310 return (1); 311 } 312 313 error = bus_dmamap_create(sc->buf_tag, 0, 314 &sc->buf_map); 315 if (error != 0) { 316 device_printf(sc->dev, 317 "could not create TX buffer DMA map.\n"); 318 return (1); 319 } 320 321 return (0); 322 } 323 324 static void 325 dwmmc_cmd_done(struct dwmmc_softc *sc) 326 { 327 struct mmc_command *cmd; 328 329 cmd = sc->curcmd; 330 if (cmd == NULL) 331 return; 332 333 if (cmd->flags & MMC_RSP_PRESENT) { 334 if (cmd->flags & MMC_RSP_136) { 335 cmd->resp[3] = READ4(sc, SDMMC_RESP0); 336 cmd->resp[2] = READ4(sc, SDMMC_RESP1); 337 cmd->resp[1] = READ4(sc, SDMMC_RESP2); 338 cmd->resp[0] = READ4(sc, SDMMC_RESP3); 339 } else { 340 cmd->resp[3] = 0; 341 cmd->resp[2] = 0; 342 cmd->resp[1] = 0; 343 cmd->resp[0] = READ4(sc, SDMMC_RESP0); 344 } 345 } 346 } 347 348 static void 349 dwmmc_tasklet(struct dwmmc_softc *sc) 350 { 351 struct mmc_command *cmd; 352 353 cmd = sc->curcmd; 354 if (cmd == NULL) 355 return; 356 357 if (!sc->cmd_done) 358 return; 359 360 if (cmd->error != MMC_ERR_NONE || !cmd->data) { 361 dwmmc_next_operation(sc); 362 } else if (cmd->data && sc->dto_rcvd) { 363 if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || 364 cmd->opcode == MMC_READ_MULTIPLE_BLOCK) && 365 sc->use_auto_stop) { 366 if (sc->acd_rcvd) 367 dwmmc_next_operation(sc); 368 } else { 369 dwmmc_next_operation(sc); 370 } 371 } 372 } 373 374 static void 375 dwmmc_intr(void *arg) 376 { 377 struct mmc_command *cmd; 378 struct dwmmc_softc *sc; 379 uint32_t reg; 380 381 sc = arg; 382 383 DWMMC_LOCK(sc); 384 385 cmd = sc->curcmd; 386 387 /* First handle SDMMC controller interrupts */ 388 reg = READ4(sc, SDMMC_MINTSTS); 389 if (reg) { 390 dprintf("%s 0x%08x\n", __func__, reg); 391 392 if (reg & DWMMC_CMD_ERR_FLAGS) { 393 WRITE4(sc, SDMMC_RINTSTS, DWMMC_CMD_ERR_FLAGS); 394 dprintf("cmd err 0x%08x cmd 0x%08x\n", 395 reg, cmd->opcode); 396 cmd->error = MMC_ERR_TIMEOUT; 397 } 398 399 if (reg & DWMMC_DATA_ERR_FLAGS) { 400 WRITE4(sc, SDMMC_RINTSTS, DWMMC_DATA_ERR_FLAGS); 401 dprintf("data err 0x%08x cmd 0x%08x\n", 402 reg, cmd->opcode); 403 cmd->error = MMC_ERR_FAILED; 404 if (!sc->use_pio) { 405 dma_done(sc, cmd); 406 dma_stop(sc); 407 } 408 } 409 410 if (reg & SDMMC_INTMASK_CMD_DONE) { 411 dwmmc_cmd_done(sc); 412 sc->cmd_done = 1; 413 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CMD_DONE); 414 } 415 416 if (reg & SDMMC_INTMASK_ACD) { 417 sc->acd_rcvd = 1; 418 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_ACD); 419 } 420 421 if (reg & SDMMC_INTMASK_DTO) { 422 sc->dto_rcvd = 1; 423 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_DTO); 424 } 425 426 if (reg & SDMMC_INTMASK_CD) { 427 /* XXX: Handle card detect */ 428 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CD); 429 } 430 } 431 432 if (sc->use_pio) { 433 if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) { 434 pio_read(sc, cmd); 435 } 436 if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) { 437 pio_write(sc, cmd); 438 } 439 } else { 440 /* Now handle DMA interrupts */ 441 reg = READ4(sc, SDMMC_IDSTS); 442 if (reg) { 443 dprintf("dma intr 0x%08x\n", reg); 444 if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) { 445 WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI | 446 SDMMC_IDINTEN_RI)); 447 WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI); 448 dma_done(sc, cmd); 449 } 450 } 451 } 452 453 dwmmc_tasklet(sc); 454 455 DWMMC_UNLOCK(sc); 456 } 457 458 static int 459 parse_fdt(struct dwmmc_softc *sc) 460 { 461 pcell_t dts_value[3]; 462 phandle_t node; 463 int len; 464 465 if ((node = ofw_bus_get_node(sc->dev)) == -1) 466 return (ENXIO); 467 468 /* fifo-depth */ 469 if ((len = OF_getproplen(node, "fifo-depth")) <= 0) 470 return (ENXIO); 471 OF_getencprop(node, "fifo-depth", dts_value, len); 472 sc->fifo_depth = dts_value[0]; 473 474 /* num-slots */ 475 if ((len = OF_getproplen(node, "num-slots")) <= 0) 476 return (ENXIO); 477 OF_getencprop(node, "num-slots", dts_value, len); 478 sc->num_slots = dts_value[0]; 479 480 /* 481 * We need some platform-specific code to know 482 * what the clock is supplied for our device. 483 * For now rely on the value specified in FDT. 484 */ 485 if ((len = OF_getproplen(node, "bus-frequency")) <= 0) 486 return (ENXIO); 487 OF_getencprop(node, "bus-frequency", dts_value, len); 488 sc->bus_hz = dts_value[0]; 489 490 /* 491 * Platform-specific stuff 492 * XXX: Move to separate file 493 */ 494 495 if ((sc->hwtype & HWTYPE_MASK) != HWTYPE_EXYNOS) 496 return (0); 497 498 if ((len = OF_getproplen(node, "samsung,dw-mshc-ciu-div")) <= 0) 499 return (ENXIO); 500 OF_getencprop(node, "samsung,dw-mshc-ciu-div", dts_value, len); 501 sc->sdr_timing = (dts_value[0] << SDMMC_CLKSEL_DIVIDER_SHIFT); 502 sc->ddr_timing = (dts_value[0] << SDMMC_CLKSEL_DIVIDER_SHIFT); 503 504 if ((len = OF_getproplen(node, "samsung,dw-mshc-sdr-timing")) <= 0) 505 return (ENXIO); 506 OF_getencprop(node, "samsung,dw-mshc-sdr-timing", dts_value, len); 507 sc->sdr_timing |= ((dts_value[0] << SDMMC_CLKSEL_SAMPLE_SHIFT) | 508 (dts_value[1] << SDMMC_CLKSEL_DRIVE_SHIFT)); 509 510 if ((len = OF_getproplen(node, "samsung,dw-mshc-ddr-timing")) <= 0) 511 return (ENXIO); 512 OF_getencprop(node, "samsung,dw-mshc-ddr-timing", dts_value, len); 513 sc->ddr_timing |= ((dts_value[0] << SDMMC_CLKSEL_SAMPLE_SHIFT) | 514 (dts_value[1] << SDMMC_CLKSEL_DRIVE_SHIFT)); 515 516 return (0); 517 } 518 519 static int 520 dwmmc_probe(device_t dev) 521 { 522 uintptr_t hwtype; 523 524 if (!ofw_bus_status_okay(dev)) 525 return (ENXIO); 526 527 hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data; 528 if (hwtype == HWTYPE_NONE) 529 return (ENXIO); 530 531 device_set_desc(dev, "Synopsys DesignWare Mobile " 532 "Storage Host Controller"); 533 return (BUS_PROBE_DEFAULT); 534 } 535 536 static int 537 dwmmc_attach(device_t dev) 538 { 539 struct dwmmc_softc *sc; 540 device_t child; 541 int error; 542 int slot; 543 544 sc = device_get_softc(dev); 545 546 sc->dev = dev; 547 sc->hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data; 548 549 /* Why not to use Auto Stop? It save a hundred of irq per second */ 550 sc->use_auto_stop = 1; 551 552 error = parse_fdt(sc); 553 if (error != 0) { 554 device_printf(dev, "Can't get FDT property.\n"); 555 return (ENXIO); 556 } 557 558 DWMMC_LOCK_INIT(sc); 559 560 if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) { 561 device_printf(dev, "could not allocate resources\n"); 562 return (ENXIO); 563 } 564 565 /* Memory interface */ 566 sc->bst = rman_get_bustag(sc->res[0]); 567 sc->bsh = rman_get_bushandle(sc->res[0]); 568 569 /* Setup interrupt handler. */ 570 error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE, 571 NULL, dwmmc_intr, sc, &sc->intr_cookie); 572 if (error != 0) { 573 device_printf(dev, "could not setup interrupt handler.\n"); 574 return (ENXIO); 575 } 576 577 device_printf(dev, "Hardware version ID is %04x\n", 578 READ4(sc, SDMMC_VERID) & 0xffff); 579 580 sc->use_pio = 0; 581 sc->pwren_inverted = 0; 582 583 if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP) { 584 sc->use_pio = 1; 585 sc->pwren_inverted = 1; 586 } else { 587 WRITE4(sc, EMMCP_MPSBEGIN0, 0); 588 WRITE4(sc, EMMCP_SEND0, 0); 589 WRITE4(sc, EMMCP_CTRL0, (MPSCTRL_SECURE_READ_BIT | 590 MPSCTRL_SECURE_WRITE_BIT | 591 MPSCTRL_NON_SECURE_READ_BIT | 592 MPSCTRL_NON_SECURE_WRITE_BIT | 593 MPSCTRL_VALID)); 594 } 595 596 /* XXX: we support operation for slot index 0 only */ 597 slot = 0; 598 if (sc->pwren_inverted) { 599 WRITE4(sc, SDMMC_PWREN, (0 << slot)); 600 } else { 601 WRITE4(sc, SDMMC_PWREN, (1 << slot)); 602 } 603 604 /* Reset all */ 605 if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET | 606 SDMMC_CTRL_FIFO_RESET | 607 SDMMC_CTRL_DMA_RESET))) 608 return (ENXIO); 609 610 dwmmc_setup_bus(sc, sc->host.f_min); 611 612 if (!sc->use_pio) { 613 if (dma_setup(sc)) 614 return (ENXIO); 615 616 /* Install desc base */ 617 WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr); 618 619 /* Enable DMA interrupts */ 620 WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK); 621 WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI | 622 SDMMC_IDINTEN_RI | 623 SDMMC_IDINTEN_TI)); 624 } 625 626 /* Clear and disable interrups for a while */ 627 WRITE4(sc, SDMMC_RINTSTS, 0xffffffff); 628 WRITE4(sc, SDMMC_INTMASK, 0); 629 630 /* Maximum timeout */ 631 WRITE4(sc, SDMMC_TMOUT, 0xffffffff); 632 633 /* Enable interrupts */ 634 WRITE4(sc, SDMMC_RINTSTS, 0xffffffff); 635 WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE | 636 SDMMC_INTMASK_DTO | 637 SDMMC_INTMASK_ACD | 638 SDMMC_INTMASK_TXDR | 639 SDMMC_INTMASK_RXDR | 640 DWMMC_ERR_FLAGS | 641 SDMMC_INTMASK_CD)); 642 WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE); 643 644 sc->host.f_min = 400000; 645 sc->host.f_max = 200000000; 646 sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; 647 sc->host.caps = MMC_CAP_4_BIT_DATA; 648 649 child = device_add_child(dev, "mmc", 0); 650 return (bus_generic_attach(dev)); 651 } 652 653 static int 654 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq) 655 { 656 int tout; 657 int div; 658 659 if (freq == 0) { 660 WRITE4(sc, SDMMC_CLKENA, 0); 661 WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA | 662 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START)); 663 664 tout = 1000; 665 do { 666 if (tout-- < 0) { 667 device_printf(sc->dev, "Failed update clk\n"); 668 return (1); 669 } 670 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); 671 672 return (0); 673 } 674 675 WRITE4(sc, SDMMC_CLKENA, 0); 676 WRITE4(sc, SDMMC_CLKSRC, 0); 677 678 div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0; 679 680 WRITE4(sc, SDMMC_CLKDIV, div); 681 WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA | 682 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START)); 683 684 tout = 1000; 685 do { 686 if (tout-- < 0) { 687 device_printf(sc->dev, "Failed to update clk"); 688 return (1); 689 } 690 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); 691 692 WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP)); 693 WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA | 694 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START); 695 696 tout = 1000; 697 do { 698 if (tout-- < 0) { 699 device_printf(sc->dev, "Failed to enable clk\n"); 700 return (1); 701 } 702 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); 703 704 return (0); 705 } 706 707 static int 708 dwmmc_update_ios(device_t brdev, device_t reqdev) 709 { 710 struct dwmmc_softc *sc; 711 struct mmc_ios *ios; 712 713 sc = device_get_softc(brdev); 714 ios = &sc->host.ios; 715 716 dprintf("Setting up clk %u bus_width %d\n", 717 ios->clock, ios->bus_width); 718 719 dwmmc_setup_bus(sc, ios->clock); 720 721 if (ios->bus_width == bus_width_8) 722 WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT); 723 else if (ios->bus_width == bus_width_4) 724 WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT); 725 else 726 WRITE4(sc, SDMMC_CTYPE, 0); 727 728 if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) { 729 /* XXX: take care about DDR or SDR use here */ 730 WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing); 731 } 732 733 /* 734 * XXX: take care about DDR bit 735 * 736 * reg = READ4(sc, SDMMC_UHS_REG); 737 * reg |= (SDMMC_UHS_REG_DDR); 738 * WRITE4(sc, SDMMC_UHS_REG, reg); 739 */ 740 741 return (0); 742 } 743 744 static int 745 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd) 746 { 747 struct mmc_data *data; 748 749 data = cmd->data; 750 751 if (data->flags & MMC_DATA_WRITE) 752 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 753 BUS_DMASYNC_POSTWRITE); 754 else 755 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 756 BUS_DMASYNC_POSTREAD); 757 758 bus_dmamap_unload(sc->buf_tag, sc->buf_map); 759 760 return (0); 761 } 762 763 static int 764 dma_stop(struct dwmmc_softc *sc) 765 { 766 int reg; 767 768 reg = READ4(sc, SDMMC_CTRL); 769 reg &= ~(SDMMC_CTRL_USE_IDMAC); 770 reg |= (SDMMC_CTRL_DMA_RESET); 771 WRITE4(sc, SDMMC_CTRL, reg); 772 773 reg = READ4(sc, SDMMC_BMOD); 774 reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB); 775 reg |= (SDMMC_BMOD_SWR); 776 WRITE4(sc, SDMMC_BMOD, reg); 777 778 return (0); 779 } 780 781 static int 782 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd) 783 { 784 struct mmc_data *data; 785 int len; 786 int err; 787 int reg; 788 789 data = cmd->data; 790 len = data->len; 791 792 reg = READ4(sc, SDMMC_INTMASK); 793 reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR); 794 WRITE4(sc, SDMMC_INTMASK, reg); 795 796 err = bus_dmamap_load(sc->buf_tag, sc->buf_map, 797 data->data, data->len, dwmmc_ring_setup, 798 sc, BUS_DMA_NOWAIT); 799 if (err != 0) 800 panic("dmamap_load failed\n"); 801 802 if (data->flags & MMC_DATA_WRITE) 803 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 804 BUS_DMASYNC_PREWRITE); 805 else 806 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 807 BUS_DMASYNC_PREREAD); 808 809 reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S); 810 reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S; 811 reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S; 812 813 WRITE4(sc, SDMMC_FIFOTH, reg); 814 wmb(); 815 816 reg = READ4(sc, SDMMC_CTRL); 817 reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE); 818 WRITE4(sc, SDMMC_CTRL, reg); 819 wmb(); 820 821 reg = READ4(sc, SDMMC_BMOD); 822 reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB); 823 WRITE4(sc, SDMMC_BMOD, reg); 824 825 /* Start */ 826 WRITE4(sc, SDMMC_PLDMND, 1); 827 828 return (0); 829 } 830 831 static int 832 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd) 833 { 834 struct mmc_data *data; 835 int reg; 836 837 data = cmd->data; 838 data->xfer_len = 0; 839 840 reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S); 841 reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S; 842 reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S; 843 844 WRITE4(sc, SDMMC_FIFOTH, reg); 845 wmb(); 846 847 return (0); 848 } 849 850 static void 851 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd) 852 { 853 struct mmc_data *data; 854 uint32_t *p, status; 855 856 if (cmd == NULL || cmd->data == NULL) 857 return; 858 859 data = cmd->data; 860 if ((data->flags & MMC_DATA_READ) == 0) 861 return; 862 863 KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned")); 864 p = (uint32_t *)data->data + (data->xfer_len >> 2); 865 866 while (data->xfer_len < data->len) { 867 status = READ4(sc, SDMMC_STATUS); 868 if (status & SDMMC_STATUS_FIFO_EMPTY) 869 break; 870 *p++ = READ4(sc, SDMMC_DATA); 871 data->xfer_len += 4; 872 } 873 874 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR); 875 } 876 877 static void 878 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd) 879 { 880 struct mmc_data *data; 881 uint32_t *p, status; 882 883 if (cmd == NULL || cmd->data == NULL) 884 return; 885 886 data = cmd->data; 887 if ((data->flags & MMC_DATA_WRITE) == 0) 888 return; 889 890 KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned")); 891 p = (uint32_t *)data->data + (data->xfer_len >> 2); 892 893 while (data->xfer_len < data->len) { 894 status = READ4(sc, SDMMC_STATUS); 895 if (status & SDMMC_STATUS_FIFO_FULL) 896 break; 897 WRITE4(sc, SDMMC_DATA, *p++); 898 data->xfer_len += 4; 899 } 900 901 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR); 902 } 903 904 static void 905 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd) 906 { 907 struct mmc_data *data; 908 uint32_t blksz; 909 uint32_t cmdr; 910 911 sc->curcmd = cmd; 912 data = cmd->data; 913 914 if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP) 915 dwmmc_setup_bus(sc, sc->host.ios.clock); 916 917 /* XXX Upper layers don't always set this */ 918 cmd->mrq = sc->req; 919 920 /* Begin setting up command register. */ 921 922 cmdr = cmd->opcode; 923 924 dprintf("cmd->opcode 0x%08x\n", cmd->opcode); 925 926 if (cmd->opcode == MMC_STOP_TRANSMISSION || 927 cmd->opcode == MMC_GO_IDLE_STATE || 928 cmd->opcode == MMC_GO_INACTIVE_STATE) 929 cmdr |= SDMMC_CMD_STOP_ABORT; 930 else if (cmd->opcode != MMC_SEND_STATUS && data) 931 cmdr |= SDMMC_CMD_WAIT_PRVDATA; 932 933 /* Set up response handling. */ 934 if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) { 935 cmdr |= SDMMC_CMD_RESP_EXP; 936 if (cmd->flags & MMC_RSP_136) 937 cmdr |= SDMMC_CMD_RESP_LONG; 938 } 939 940 if (cmd->flags & MMC_RSP_CRC) 941 cmdr |= SDMMC_CMD_RESP_CRC; 942 943 /* 944 * XXX: Not all platforms want this. 945 */ 946 cmdr |= SDMMC_CMD_USE_HOLD_REG; 947 948 if ((sc->flags & CARD_INIT_DONE) == 0) { 949 sc->flags |= (CARD_INIT_DONE); 950 cmdr |= SDMMC_CMD_SEND_INIT; 951 } 952 953 if (data) { 954 if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || 955 cmd->opcode == MMC_READ_MULTIPLE_BLOCK) && 956 sc->use_auto_stop) 957 cmdr |= SDMMC_CMD_SEND_ASTOP; 958 959 cmdr |= SDMMC_CMD_DATA_EXP; 960 if (data->flags & MMC_DATA_STREAM) 961 cmdr |= SDMMC_CMD_MODE_STREAM; 962 if (data->flags & MMC_DATA_WRITE) 963 cmdr |= SDMMC_CMD_DATA_WRITE; 964 965 WRITE4(sc, SDMMC_TMOUT, 0xffffffff); 966 WRITE4(sc, SDMMC_BYTCNT, data->len); 967 blksz = (data->len < MMC_SECTOR_SIZE) ? \ 968 data->len : MMC_SECTOR_SIZE; 969 WRITE4(sc, SDMMC_BLKSIZ, blksz); 970 971 if (sc->use_pio) { 972 pio_prepare(sc, cmd); 973 } else { 974 dma_prepare(sc, cmd); 975 } 976 wmb(); 977 } 978 979 dprintf("cmdr 0x%08x\n", cmdr); 980 981 WRITE4(sc, SDMMC_CMDARG, cmd->arg); 982 wmb(); 983 WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START); 984 }; 985 986 static void 987 dwmmc_next_operation(struct dwmmc_softc *sc) 988 { 989 struct mmc_request *req; 990 991 req = sc->req; 992 if (req == NULL) 993 return; 994 995 sc->acd_rcvd = 0; 996 sc->dto_rcvd = 0; 997 sc->cmd_done = 0; 998 999 /* 1000 * XXX: Wait until card is still busy. 1001 * We do need this to prevent data timeouts, 1002 * mostly caused by multi-block write command 1003 * followed by single-read. 1004 */ 1005 while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY)) 1006 continue; 1007 1008 if (sc->flags & PENDING_CMD) { 1009 sc->flags &= ~PENDING_CMD; 1010 dwmmc_start_cmd(sc, req->cmd); 1011 return; 1012 } else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) { 1013 sc->flags &= ~PENDING_STOP; 1014 dwmmc_start_cmd(sc, req->stop); 1015 return; 1016 } 1017 1018 sc->req = NULL; 1019 sc->curcmd = NULL; 1020 req->done(req); 1021 } 1022 1023 static int 1024 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req) 1025 { 1026 struct dwmmc_softc *sc; 1027 1028 sc = device_get_softc(brdev); 1029 1030 dprintf("%s\n", __func__); 1031 1032 DWMMC_LOCK(sc); 1033 1034 if (sc->req != NULL) { 1035 DWMMC_UNLOCK(sc); 1036 return (EBUSY); 1037 } 1038 1039 sc->req = req; 1040 sc->flags |= PENDING_CMD; 1041 if (sc->req->stop) 1042 sc->flags |= PENDING_STOP; 1043 dwmmc_next_operation(sc); 1044 1045 DWMMC_UNLOCK(sc); 1046 return (0); 1047 } 1048 1049 static int 1050 dwmmc_get_ro(device_t brdev, device_t reqdev) 1051 { 1052 1053 dprintf("%s\n", __func__); 1054 1055 return (0); 1056 } 1057 1058 static int 1059 dwmmc_acquire_host(device_t brdev, device_t reqdev) 1060 { 1061 struct dwmmc_softc *sc; 1062 1063 sc = device_get_softc(brdev); 1064 1065 DWMMC_LOCK(sc); 1066 while (sc->bus_busy) 1067 msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5); 1068 sc->bus_busy++; 1069 DWMMC_UNLOCK(sc); 1070 return (0); 1071 } 1072 1073 static int 1074 dwmmc_release_host(device_t brdev, device_t reqdev) 1075 { 1076 struct dwmmc_softc *sc; 1077 1078 sc = device_get_softc(brdev); 1079 1080 DWMMC_LOCK(sc); 1081 sc->bus_busy--; 1082 wakeup(sc); 1083 DWMMC_UNLOCK(sc); 1084 return (0); 1085 } 1086 1087 static int 1088 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) 1089 { 1090 struct dwmmc_softc *sc; 1091 1092 sc = device_get_softc(bus); 1093 1094 switch (which) { 1095 default: 1096 return (EINVAL); 1097 case MMCBR_IVAR_BUS_MODE: 1098 *(int *)result = sc->host.ios.bus_mode; 1099 break; 1100 case MMCBR_IVAR_BUS_WIDTH: 1101 *(int *)result = sc->host.ios.bus_width; 1102 break; 1103 case MMCBR_IVAR_CHIP_SELECT: 1104 *(int *)result = sc->host.ios.chip_select; 1105 break; 1106 case MMCBR_IVAR_CLOCK: 1107 *(int *)result = sc->host.ios.clock; 1108 break; 1109 case MMCBR_IVAR_F_MIN: 1110 *(int *)result = sc->host.f_min; 1111 break; 1112 case MMCBR_IVAR_F_MAX: 1113 *(int *)result = sc->host.f_max; 1114 break; 1115 case MMCBR_IVAR_HOST_OCR: 1116 *(int *)result = sc->host.host_ocr; 1117 break; 1118 case MMCBR_IVAR_MODE: 1119 *(int *)result = sc->host.mode; 1120 break; 1121 case MMCBR_IVAR_OCR: 1122 *(int *)result = sc->host.ocr; 1123 break; 1124 case MMCBR_IVAR_POWER_MODE: 1125 *(int *)result = sc->host.ios.power_mode; 1126 break; 1127 case MMCBR_IVAR_VDD: 1128 *(int *)result = sc->host.ios.vdd; 1129 break; 1130 case MMCBR_IVAR_CAPS: 1131 sc->host.caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; 1132 *(int *)result = sc->host.caps; 1133 break; 1134 case MMCBR_IVAR_MAX_DATA: 1135 *(int *)result = DESC_COUNT; 1136 } 1137 return (0); 1138 } 1139 1140 static int 1141 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value) 1142 { 1143 struct dwmmc_softc *sc; 1144 1145 sc = device_get_softc(bus); 1146 1147 switch (which) { 1148 default: 1149 return (EINVAL); 1150 case MMCBR_IVAR_BUS_MODE: 1151 sc->host.ios.bus_mode = value; 1152 break; 1153 case MMCBR_IVAR_BUS_WIDTH: 1154 sc->host.ios.bus_width = value; 1155 break; 1156 case MMCBR_IVAR_CHIP_SELECT: 1157 sc->host.ios.chip_select = value; 1158 break; 1159 case MMCBR_IVAR_CLOCK: 1160 sc->host.ios.clock = value; 1161 break; 1162 case MMCBR_IVAR_MODE: 1163 sc->host.mode = value; 1164 break; 1165 case MMCBR_IVAR_OCR: 1166 sc->host.ocr = value; 1167 break; 1168 case MMCBR_IVAR_POWER_MODE: 1169 sc->host.ios.power_mode = value; 1170 break; 1171 case MMCBR_IVAR_VDD: 1172 sc->host.ios.vdd = value; 1173 break; 1174 /* These are read-only */ 1175 case MMCBR_IVAR_CAPS: 1176 case MMCBR_IVAR_HOST_OCR: 1177 case MMCBR_IVAR_F_MIN: 1178 case MMCBR_IVAR_F_MAX: 1179 case MMCBR_IVAR_MAX_DATA: 1180 return (EINVAL); 1181 } 1182 return (0); 1183 } 1184 1185 static device_method_t dwmmc_methods[] = { 1186 DEVMETHOD(device_probe, dwmmc_probe), 1187 DEVMETHOD(device_attach, dwmmc_attach), 1188 1189 /* Bus interface */ 1190 DEVMETHOD(bus_read_ivar, dwmmc_read_ivar), 1191 DEVMETHOD(bus_write_ivar, dwmmc_write_ivar), 1192 1193 /* mmcbr_if */ 1194 DEVMETHOD(mmcbr_update_ios, dwmmc_update_ios), 1195 DEVMETHOD(mmcbr_request, dwmmc_request), 1196 DEVMETHOD(mmcbr_get_ro, dwmmc_get_ro), 1197 DEVMETHOD(mmcbr_acquire_host, dwmmc_acquire_host), 1198 DEVMETHOD(mmcbr_release_host, dwmmc_release_host), 1199 1200 DEVMETHOD_END 1201 }; 1202 1203 static driver_t dwmmc_driver = { 1204 "dwmmc", 1205 dwmmc_methods, 1206 sizeof(struct dwmmc_softc), 1207 }; 1208 1209 static devclass_t dwmmc_devclass; 1210 1211 DRIVER_MODULE(dwmmc, simplebus, dwmmc_driver, dwmmc_devclass, 0, 0); 1212 1213