1 /*- 2 * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com> 3 * All rights reserved. 4 * 5 * This software was developed by SRI International and the University of 6 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) 7 * ("CTSRD"), as part of the DARPA CRASH research programme. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Synopsys DesignWare Mobile Storage Host Controller 33 * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22) 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/bus.h> 42 #include <sys/kernel.h> 43 #include <sys/module.h> 44 #include <sys/malloc.h> 45 #include <sys/rman.h> 46 47 #include <dev/mmc/bridge.h> 48 #include <dev/mmc/mmcbrvar.h> 49 50 #include <dev/fdt/fdt_common.h> 51 #include <dev/ofw/openfirm.h> 52 #include <dev/ofw/ofw_bus.h> 53 #include <dev/ofw/ofw_bus_subr.h> 54 55 #include <machine/bus.h> 56 #include <machine/cpu.h> 57 #include <machine/intr.h> 58 59 #include <dev/mmc/host/dwmmc_reg.h> 60 #include <dev/mmc/host/dwmmc_var.h> 61 62 #include "mmcbr_if.h" 63 64 #define dprintf(x, arg...) 65 66 #define READ4(_sc, _reg) \ 67 bus_read_4((_sc)->res[0], _reg) 68 #define WRITE4(_sc, _reg, _val) \ 69 bus_write_4((_sc)->res[0], _reg, _val) 70 71 #define DIV_ROUND_UP(n, d) howmany(n, d) 72 73 #define DWMMC_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) 74 #define DWMMC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) 75 #define DWMMC_LOCK_INIT(_sc) \ 76 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \ 77 "dwmmc", MTX_DEF) 78 #define DWMMC_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); 79 #define DWMMC_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); 80 #define DWMMC_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); 81 82 #define PENDING_CMD 0x01 83 #define PENDING_STOP 0x02 84 #define CARD_INIT_DONE 0x04 85 86 #define DWMMC_DATA_ERR_FLAGS (SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \ 87 |SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE \ 88 |SDMMC_INTMASK_EBE) 89 #define DWMMC_CMD_ERR_FLAGS (SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \ 90 |SDMMC_INTMASK_RE) 91 #define DWMMC_ERR_FLAGS (DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \ 92 |SDMMC_INTMASK_HLE) 93 94 #define DES0_DIC (1 << 1) 95 #define DES0_LD (1 << 2) 96 #define DES0_FS (1 << 3) 97 #define DES0_CH (1 << 4) 98 #define DES0_ER (1 << 5) 99 #define DES0_CES (1 << 30) 100 #define DES0_OWN (1 << 31) 101 102 #define DES1_BS1_MASK 0xfff 103 #define DES1_BS1_SHIFT 0 104 105 struct idmac_desc { 106 uint32_t des0; /* control */ 107 uint32_t des1; /* bufsize */ 108 uint32_t des2; /* buf1 phys addr */ 109 uint32_t des3; /* buf2 phys addr or next descr */ 110 }; 111 112 #define DESC_MAX 256 113 #define DESC_SIZE (sizeof(struct idmac_desc) * DESC_MAX) 114 #define DEF_MSIZE 0x2 /* Burst size of multiple transaction */ 115 116 static void dwmmc_next_operation(struct dwmmc_softc *); 117 static int dwmmc_setup_bus(struct dwmmc_softc *, int); 118 static int dma_done(struct dwmmc_softc *, struct mmc_command *); 119 static int dma_stop(struct dwmmc_softc *); 120 static void pio_read(struct dwmmc_softc *, struct mmc_command *); 121 static void pio_write(struct dwmmc_softc *, struct mmc_command *); 122 123 static struct resource_spec dwmmc_spec[] = { 124 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 125 { SYS_RES_IRQ, 0, RF_ACTIVE }, 126 { -1, 0 } 127 }; 128 129 #define HWTYPE_MASK (0x0000ffff) 130 #define HWFLAG_MASK (0xffff << 16) 131 132 static struct ofw_compat_data compat_data[] = { 133 {"altr,socfpga-dw-mshc", HWTYPE_ALTERA}, 134 {"samsung,exynos5420-dw-mshc", HWTYPE_EXYNOS}, 135 {"rockchip,rk2928-dw-mshc", HWTYPE_ROCKCHIP}, 136 {NULL, HWTYPE_NONE}, 137 }; 138 139 static void 140 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 141 { 142 143 if (error != 0) 144 return; 145 *(bus_addr_t *)arg = segs[0].ds_addr; 146 } 147 148 static void 149 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 150 { 151 struct dwmmc_softc *sc; 152 int idx; 153 154 if (error != 0) 155 return; 156 157 sc = arg; 158 159 dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len); 160 161 for (idx = 0; idx < nsegs; idx++) { 162 sc->desc_ring[idx].des0 = (DES0_OWN | DES0_DIC | DES0_CH); 163 sc->desc_ring[idx].des1 = segs[idx].ds_len; 164 sc->desc_ring[idx].des2 = segs[idx].ds_addr; 165 166 if (idx == 0) 167 sc->desc_ring[idx].des0 |= DES0_FS; 168 169 if (idx == (nsegs - 1)) { 170 sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH); 171 sc->desc_ring[idx].des0 |= DES0_LD; 172 } 173 } 174 } 175 176 static int 177 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits) 178 { 179 int reg; 180 int i; 181 182 reg = READ4(sc, SDMMC_CTRL); 183 reg |= (reset_bits); 184 WRITE4(sc, SDMMC_CTRL, reg); 185 186 /* Wait reset done */ 187 for (i = 0; i < 100; i++) { 188 if (!(READ4(sc, SDMMC_CTRL) & reset_bits)) 189 return (0); 190 DELAY(10); 191 } 192 193 device_printf(sc->dev, "Reset failed\n"); 194 195 return (1); 196 } 197 198 static int 199 dma_setup(struct dwmmc_softc *sc) 200 { 201 int error; 202 int nidx; 203 int idx; 204 205 /* 206 * Set up TX descriptor ring, descriptors, and dma maps. 207 */ 208 error = bus_dma_tag_create( 209 bus_get_dma_tag(sc->dev), /* Parent tag. */ 210 4096, 0, /* alignment, boundary */ 211 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 212 BUS_SPACE_MAXADDR, /* highaddr */ 213 NULL, NULL, /* filter, filterarg */ 214 DESC_SIZE, 1, /* maxsize, nsegments */ 215 DESC_SIZE, /* maxsegsize */ 216 0, /* flags */ 217 NULL, NULL, /* lockfunc, lockarg */ 218 &sc->desc_tag); 219 if (error != 0) { 220 device_printf(sc->dev, 221 "could not create ring DMA tag.\n"); 222 return (1); 223 } 224 225 error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring, 226 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, 227 &sc->desc_map); 228 if (error != 0) { 229 device_printf(sc->dev, 230 "could not allocate descriptor ring.\n"); 231 return (1); 232 } 233 234 error = bus_dmamap_load(sc->desc_tag, sc->desc_map, 235 sc->desc_ring, DESC_SIZE, dwmmc_get1paddr, 236 &sc->desc_ring_paddr, 0); 237 if (error != 0) { 238 device_printf(sc->dev, 239 "could not load descriptor ring map.\n"); 240 return (1); 241 } 242 243 for (idx = 0; idx < sc->desc_count; idx++) { 244 sc->desc_ring[idx].des0 = DES0_CH; 245 sc->desc_ring[idx].des1 = 0; 246 nidx = (idx + 1) % sc->desc_count; 247 sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \ 248 (nidx * sizeof(struct idmac_desc)); 249 } 250 251 error = bus_dma_tag_create( 252 bus_get_dma_tag(sc->dev), /* Parent tag. */ 253 4096, 0, /* alignment, boundary */ 254 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 255 BUS_SPACE_MAXADDR, /* highaddr */ 256 NULL, NULL, /* filter, filterarg */ 257 sc->desc_count * MMC_SECTOR_SIZE, /* maxsize */ 258 sc->desc_count, /* nsegments */ 259 MMC_SECTOR_SIZE, /* maxsegsize */ 260 0, /* flags */ 261 NULL, NULL, /* lockfunc, lockarg */ 262 &sc->buf_tag); 263 if (error != 0) { 264 device_printf(sc->dev, 265 "could not create ring DMA tag.\n"); 266 return (1); 267 } 268 269 error = bus_dmamap_create(sc->buf_tag, 0, 270 &sc->buf_map); 271 if (error != 0) { 272 device_printf(sc->dev, 273 "could not create TX buffer DMA map.\n"); 274 return (1); 275 } 276 277 return (0); 278 } 279 280 static void 281 dwmmc_cmd_done(struct dwmmc_softc *sc) 282 { 283 struct mmc_command *cmd; 284 285 cmd = sc->curcmd; 286 if (cmd == NULL) 287 return; 288 289 if (cmd->flags & MMC_RSP_PRESENT) { 290 if (cmd->flags & MMC_RSP_136) { 291 cmd->resp[3] = READ4(sc, SDMMC_RESP0); 292 cmd->resp[2] = READ4(sc, SDMMC_RESP1); 293 cmd->resp[1] = READ4(sc, SDMMC_RESP2); 294 cmd->resp[0] = READ4(sc, SDMMC_RESP3); 295 } else { 296 cmd->resp[3] = 0; 297 cmd->resp[2] = 0; 298 cmd->resp[1] = 0; 299 cmd->resp[0] = READ4(sc, SDMMC_RESP0); 300 } 301 } 302 } 303 304 static void 305 dwmmc_tasklet(struct dwmmc_softc *sc) 306 { 307 struct mmc_command *cmd; 308 309 cmd = sc->curcmd; 310 if (cmd == NULL) 311 return; 312 313 if (!sc->cmd_done) 314 return; 315 316 if (cmd->error != MMC_ERR_NONE || !cmd->data) { 317 dwmmc_next_operation(sc); 318 } else if (cmd->data && sc->dto_rcvd) { 319 if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || 320 cmd->opcode == MMC_READ_MULTIPLE_BLOCK) && 321 sc->use_auto_stop) { 322 if (sc->acd_rcvd) 323 dwmmc_next_operation(sc); 324 } else { 325 dwmmc_next_operation(sc); 326 } 327 } 328 } 329 330 static void 331 dwmmc_intr(void *arg) 332 { 333 struct mmc_command *cmd; 334 struct dwmmc_softc *sc; 335 uint32_t reg; 336 337 sc = arg; 338 339 DWMMC_LOCK(sc); 340 341 cmd = sc->curcmd; 342 343 /* First handle SDMMC controller interrupts */ 344 reg = READ4(sc, SDMMC_MINTSTS); 345 if (reg) { 346 dprintf("%s 0x%08x\n", __func__, reg); 347 348 if (reg & DWMMC_CMD_ERR_FLAGS) { 349 WRITE4(sc, SDMMC_RINTSTS, DWMMC_CMD_ERR_FLAGS); 350 dprintf("cmd err 0x%08x cmd 0x%08x\n", 351 reg, cmd->opcode); 352 cmd->error = MMC_ERR_TIMEOUT; 353 } 354 355 if (reg & DWMMC_DATA_ERR_FLAGS) { 356 WRITE4(sc, SDMMC_RINTSTS, DWMMC_DATA_ERR_FLAGS); 357 dprintf("data err 0x%08x cmd 0x%08x\n", 358 reg, cmd->opcode); 359 cmd->error = MMC_ERR_FAILED; 360 if (!sc->use_pio) { 361 dma_done(sc, cmd); 362 dma_stop(sc); 363 } 364 } 365 366 if (reg & SDMMC_INTMASK_CMD_DONE) { 367 dwmmc_cmd_done(sc); 368 sc->cmd_done = 1; 369 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CMD_DONE); 370 } 371 372 if (reg & SDMMC_INTMASK_ACD) { 373 sc->acd_rcvd = 1; 374 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_ACD); 375 } 376 377 if (reg & SDMMC_INTMASK_DTO) { 378 sc->dto_rcvd = 1; 379 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_DTO); 380 } 381 382 if (reg & SDMMC_INTMASK_CD) { 383 /* XXX: Handle card detect */ 384 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CD); 385 } 386 } 387 388 if (sc->use_pio) { 389 if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) { 390 pio_read(sc, cmd); 391 } 392 if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) { 393 pio_write(sc, cmd); 394 } 395 } else { 396 /* Now handle DMA interrupts */ 397 reg = READ4(sc, SDMMC_IDSTS); 398 if (reg) { 399 dprintf("dma intr 0x%08x\n", reg); 400 if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) { 401 WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI | 402 SDMMC_IDINTEN_RI)); 403 WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI); 404 dma_done(sc, cmd); 405 } 406 } 407 } 408 409 dwmmc_tasklet(sc); 410 411 DWMMC_UNLOCK(sc); 412 } 413 414 static int 415 parse_fdt(struct dwmmc_softc *sc) 416 { 417 pcell_t dts_value[3]; 418 phandle_t node; 419 int len; 420 421 if ((node = ofw_bus_get_node(sc->dev)) == -1) 422 return (ENXIO); 423 424 /* fifo-depth */ 425 if ((len = OF_getproplen(node, "fifo-depth")) > 0) { 426 OF_getencprop(node, "fifo-depth", dts_value, len); 427 sc->fifo_depth = dts_value[0]; 428 } 429 430 /* num-slots */ 431 sc->num_slots = 1; 432 if ((len = OF_getproplen(node, "num-slots")) > 0) { 433 OF_getencprop(node, "num-slots", dts_value, len); 434 sc->num_slots = dts_value[0]; 435 } 436 437 /* 438 * We need some platform-specific code to know 439 * what the clock is supplied for our device. 440 * For now rely on the value specified in FDT. 441 */ 442 if (sc->bus_hz == 0) { 443 if ((len = OF_getproplen(node, "bus-frequency")) <= 0) 444 return (ENXIO); 445 OF_getencprop(node, "bus-frequency", dts_value, len); 446 sc->bus_hz = dts_value[0]; 447 } 448 449 /* 450 * Platform-specific stuff 451 * XXX: Move to separate file 452 */ 453 454 if ((sc->hwtype & HWTYPE_MASK) != HWTYPE_EXYNOS) 455 return (0); 456 457 if ((len = OF_getproplen(node, "samsung,dw-mshc-ciu-div")) <= 0) 458 return (ENXIO); 459 OF_getencprop(node, "samsung,dw-mshc-ciu-div", dts_value, len); 460 sc->sdr_timing = (dts_value[0] << SDMMC_CLKSEL_DIVIDER_SHIFT); 461 sc->ddr_timing = (dts_value[0] << SDMMC_CLKSEL_DIVIDER_SHIFT); 462 463 if ((len = OF_getproplen(node, "samsung,dw-mshc-sdr-timing")) <= 0) 464 return (ENXIO); 465 OF_getencprop(node, "samsung,dw-mshc-sdr-timing", dts_value, len); 466 sc->sdr_timing |= ((dts_value[0] << SDMMC_CLKSEL_SAMPLE_SHIFT) | 467 (dts_value[1] << SDMMC_CLKSEL_DRIVE_SHIFT)); 468 469 if ((len = OF_getproplen(node, "samsung,dw-mshc-ddr-timing")) <= 0) 470 return (ENXIO); 471 OF_getencprop(node, "samsung,dw-mshc-ddr-timing", dts_value, len); 472 sc->ddr_timing |= ((dts_value[0] << SDMMC_CLKSEL_SAMPLE_SHIFT) | 473 (dts_value[1] << SDMMC_CLKSEL_DRIVE_SHIFT)); 474 475 return (0); 476 } 477 478 static int 479 dwmmc_probe(device_t dev) 480 { 481 uintptr_t hwtype; 482 483 if (!ofw_bus_status_okay(dev)) 484 return (ENXIO); 485 486 hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data; 487 if (hwtype == HWTYPE_NONE) 488 return (ENXIO); 489 490 device_set_desc(dev, "Synopsys DesignWare Mobile " 491 "Storage Host Controller"); 492 return (BUS_PROBE_DEFAULT); 493 } 494 495 int 496 dwmmc_attach(device_t dev) 497 { 498 struct dwmmc_softc *sc; 499 int error; 500 int slot; 501 502 sc = device_get_softc(dev); 503 504 sc->dev = dev; 505 if (sc->hwtype == HWTYPE_NONE) { 506 sc->hwtype = 507 ofw_bus_search_compatible(dev, compat_data)->ocd_data; 508 } 509 510 /* Why not to use Auto Stop? It save a hundred of irq per second */ 511 sc->use_auto_stop = 1; 512 513 error = parse_fdt(sc); 514 if (error != 0) { 515 device_printf(dev, "Can't get FDT property.\n"); 516 return (ENXIO); 517 } 518 519 DWMMC_LOCK_INIT(sc); 520 521 if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) { 522 device_printf(dev, "could not allocate resources\n"); 523 return (ENXIO); 524 } 525 526 /* Setup interrupt handler. */ 527 error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE, 528 NULL, dwmmc_intr, sc, &sc->intr_cookie); 529 if (error != 0) { 530 device_printf(dev, "could not setup interrupt handler.\n"); 531 return (ENXIO); 532 } 533 534 device_printf(dev, "Hardware version ID is %04x\n", 535 READ4(sc, SDMMC_VERID) & 0xffff); 536 537 if (sc->desc_count == 0) 538 sc->desc_count = DESC_MAX; 539 540 if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP) { 541 sc->use_pio = 1; 542 sc->pwren_inverted = 1; 543 } else if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) { 544 WRITE4(sc, EMMCP_MPSBEGIN0, 0); 545 WRITE4(sc, EMMCP_SEND0, 0); 546 WRITE4(sc, EMMCP_CTRL0, (MPSCTRL_SECURE_READ_BIT | 547 MPSCTRL_SECURE_WRITE_BIT | 548 MPSCTRL_NON_SECURE_READ_BIT | 549 MPSCTRL_NON_SECURE_WRITE_BIT | 550 MPSCTRL_VALID)); 551 } 552 553 /* XXX: we support operation for slot index 0 only */ 554 slot = 0; 555 if (sc->pwren_inverted) { 556 WRITE4(sc, SDMMC_PWREN, (0 << slot)); 557 } else { 558 WRITE4(sc, SDMMC_PWREN, (1 << slot)); 559 } 560 561 /* Reset all */ 562 if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET | 563 SDMMC_CTRL_FIFO_RESET | 564 SDMMC_CTRL_DMA_RESET))) 565 return (ENXIO); 566 567 dwmmc_setup_bus(sc, sc->host.f_min); 568 569 if (sc->fifo_depth == 0) { 570 sc->fifo_depth = 1 + 571 ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff); 572 device_printf(dev, "No fifo-depth, using FIFOTH %x\n", 573 sc->fifo_depth); 574 } 575 576 if (!sc->use_pio) { 577 if (dma_setup(sc)) 578 return (ENXIO); 579 580 /* Install desc base */ 581 WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr); 582 583 /* Enable DMA interrupts */ 584 WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK); 585 WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI | 586 SDMMC_IDINTEN_RI | 587 SDMMC_IDINTEN_TI)); 588 } 589 590 /* Clear and disable interrups for a while */ 591 WRITE4(sc, SDMMC_RINTSTS, 0xffffffff); 592 WRITE4(sc, SDMMC_INTMASK, 0); 593 594 /* Maximum timeout */ 595 WRITE4(sc, SDMMC_TMOUT, 0xffffffff); 596 597 /* Enable interrupts */ 598 WRITE4(sc, SDMMC_RINTSTS, 0xffffffff); 599 WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE | 600 SDMMC_INTMASK_DTO | 601 SDMMC_INTMASK_ACD | 602 SDMMC_INTMASK_TXDR | 603 SDMMC_INTMASK_RXDR | 604 DWMMC_ERR_FLAGS | 605 SDMMC_INTMASK_CD)); 606 WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE); 607 608 sc->host.f_min = 400000; 609 sc->host.f_max = min(200000000, sc->bus_hz); 610 sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; 611 sc->host.caps = MMC_CAP_4_BIT_DATA; 612 613 device_add_child(dev, "mmc", -1); 614 return (bus_generic_attach(dev)); 615 } 616 617 static int 618 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq) 619 { 620 int tout; 621 int div; 622 623 if (freq == 0) { 624 WRITE4(sc, SDMMC_CLKENA, 0); 625 WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA | 626 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START)); 627 628 tout = 1000; 629 do { 630 if (tout-- < 0) { 631 device_printf(sc->dev, "Failed update clk\n"); 632 return (1); 633 } 634 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); 635 636 return (0); 637 } 638 639 WRITE4(sc, SDMMC_CLKENA, 0); 640 WRITE4(sc, SDMMC_CLKSRC, 0); 641 642 div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0; 643 644 WRITE4(sc, SDMMC_CLKDIV, div); 645 WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA | 646 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START)); 647 648 tout = 1000; 649 do { 650 if (tout-- < 0) { 651 device_printf(sc->dev, "Failed to update clk"); 652 return (1); 653 } 654 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); 655 656 WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP)); 657 WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA | 658 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START); 659 660 tout = 1000; 661 do { 662 if (tout-- < 0) { 663 device_printf(sc->dev, "Failed to enable clk\n"); 664 return (1); 665 } 666 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); 667 668 return (0); 669 } 670 671 static int 672 dwmmc_update_ios(device_t brdev, device_t reqdev) 673 { 674 struct dwmmc_softc *sc; 675 struct mmc_ios *ios; 676 677 sc = device_get_softc(brdev); 678 ios = &sc->host.ios; 679 680 dprintf("Setting up clk %u bus_width %d\n", 681 ios->clock, ios->bus_width); 682 683 dwmmc_setup_bus(sc, ios->clock); 684 685 if (ios->bus_width == bus_width_8) 686 WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT); 687 else if (ios->bus_width == bus_width_4) 688 WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT); 689 else 690 WRITE4(sc, SDMMC_CTYPE, 0); 691 692 if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) { 693 /* XXX: take care about DDR or SDR use here */ 694 WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing); 695 } 696 697 /* 698 * XXX: take care about DDR bit 699 * 700 * reg = READ4(sc, SDMMC_UHS_REG); 701 * reg |= (SDMMC_UHS_REG_DDR); 702 * WRITE4(sc, SDMMC_UHS_REG, reg); 703 */ 704 705 return (0); 706 } 707 708 static int 709 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd) 710 { 711 struct mmc_data *data; 712 713 data = cmd->data; 714 715 if (data->flags & MMC_DATA_WRITE) 716 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 717 BUS_DMASYNC_POSTWRITE); 718 else 719 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 720 BUS_DMASYNC_POSTREAD); 721 722 bus_dmamap_sync(sc->desc_tag, sc->desc_map, 723 BUS_DMASYNC_POSTWRITE); 724 725 bus_dmamap_unload(sc->buf_tag, sc->buf_map); 726 727 return (0); 728 } 729 730 static int 731 dma_stop(struct dwmmc_softc *sc) 732 { 733 int reg; 734 735 reg = READ4(sc, SDMMC_CTRL); 736 reg &= ~(SDMMC_CTRL_USE_IDMAC); 737 reg |= (SDMMC_CTRL_DMA_RESET); 738 WRITE4(sc, SDMMC_CTRL, reg); 739 740 reg = READ4(sc, SDMMC_BMOD); 741 reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB); 742 reg |= (SDMMC_BMOD_SWR); 743 WRITE4(sc, SDMMC_BMOD, reg); 744 745 return (0); 746 } 747 748 static int 749 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd) 750 { 751 struct mmc_data *data; 752 int len; 753 int err; 754 int reg; 755 756 data = cmd->data; 757 len = data->len; 758 759 reg = READ4(sc, SDMMC_INTMASK); 760 reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR); 761 WRITE4(sc, SDMMC_INTMASK, reg); 762 763 err = bus_dmamap_load(sc->buf_tag, sc->buf_map, 764 data->data, data->len, dwmmc_ring_setup, 765 sc, BUS_DMA_NOWAIT); 766 if (err != 0) 767 panic("dmamap_load failed\n"); 768 769 /* Ensure the device can see the desc */ 770 bus_dmamap_sync(sc->desc_tag, sc->desc_map, 771 BUS_DMASYNC_PREWRITE); 772 773 if (data->flags & MMC_DATA_WRITE) 774 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 775 BUS_DMASYNC_PREWRITE); 776 else 777 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 778 BUS_DMASYNC_PREREAD); 779 780 reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S); 781 reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S; 782 reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S; 783 784 WRITE4(sc, SDMMC_FIFOTH, reg); 785 wmb(); 786 787 reg = READ4(sc, SDMMC_CTRL); 788 reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE); 789 WRITE4(sc, SDMMC_CTRL, reg); 790 wmb(); 791 792 reg = READ4(sc, SDMMC_BMOD); 793 reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB); 794 WRITE4(sc, SDMMC_BMOD, reg); 795 796 /* Start */ 797 WRITE4(sc, SDMMC_PLDMND, 1); 798 799 return (0); 800 } 801 802 static int 803 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd) 804 { 805 struct mmc_data *data; 806 int reg; 807 808 data = cmd->data; 809 data->xfer_len = 0; 810 811 reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S); 812 reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S; 813 reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S; 814 815 WRITE4(sc, SDMMC_FIFOTH, reg); 816 wmb(); 817 818 return (0); 819 } 820 821 static void 822 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd) 823 { 824 struct mmc_data *data; 825 uint32_t *p, status; 826 827 if (cmd == NULL || cmd->data == NULL) 828 return; 829 830 data = cmd->data; 831 if ((data->flags & MMC_DATA_READ) == 0) 832 return; 833 834 KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned")); 835 p = (uint32_t *)data->data + (data->xfer_len >> 2); 836 837 while (data->xfer_len < data->len) { 838 status = READ4(sc, SDMMC_STATUS); 839 if (status & SDMMC_STATUS_FIFO_EMPTY) 840 break; 841 *p++ = READ4(sc, SDMMC_DATA); 842 data->xfer_len += 4; 843 } 844 845 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR); 846 } 847 848 static void 849 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd) 850 { 851 struct mmc_data *data; 852 uint32_t *p, status; 853 854 if (cmd == NULL || cmd->data == NULL) 855 return; 856 857 data = cmd->data; 858 if ((data->flags & MMC_DATA_WRITE) == 0) 859 return; 860 861 KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned")); 862 p = (uint32_t *)data->data + (data->xfer_len >> 2); 863 864 while (data->xfer_len < data->len) { 865 status = READ4(sc, SDMMC_STATUS); 866 if (status & SDMMC_STATUS_FIFO_FULL) 867 break; 868 WRITE4(sc, SDMMC_DATA, *p++); 869 data->xfer_len += 4; 870 } 871 872 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR); 873 } 874 875 static void 876 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd) 877 { 878 struct mmc_data *data; 879 uint32_t blksz; 880 uint32_t cmdr; 881 882 sc->curcmd = cmd; 883 data = cmd->data; 884 885 if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP) 886 dwmmc_setup_bus(sc, sc->host.ios.clock); 887 888 /* XXX Upper layers don't always set this */ 889 cmd->mrq = sc->req; 890 891 /* Begin setting up command register. */ 892 893 cmdr = cmd->opcode; 894 895 dprintf("cmd->opcode 0x%08x\n", cmd->opcode); 896 897 if (cmd->opcode == MMC_STOP_TRANSMISSION || 898 cmd->opcode == MMC_GO_IDLE_STATE || 899 cmd->opcode == MMC_GO_INACTIVE_STATE) 900 cmdr |= SDMMC_CMD_STOP_ABORT; 901 else if (cmd->opcode != MMC_SEND_STATUS && data) 902 cmdr |= SDMMC_CMD_WAIT_PRVDATA; 903 904 /* Set up response handling. */ 905 if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) { 906 cmdr |= SDMMC_CMD_RESP_EXP; 907 if (cmd->flags & MMC_RSP_136) 908 cmdr |= SDMMC_CMD_RESP_LONG; 909 } 910 911 if (cmd->flags & MMC_RSP_CRC) 912 cmdr |= SDMMC_CMD_RESP_CRC; 913 914 /* 915 * XXX: Not all platforms want this. 916 */ 917 cmdr |= SDMMC_CMD_USE_HOLD_REG; 918 919 if ((sc->flags & CARD_INIT_DONE) == 0) { 920 sc->flags |= (CARD_INIT_DONE); 921 cmdr |= SDMMC_CMD_SEND_INIT; 922 } 923 924 if (data) { 925 if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || 926 cmd->opcode == MMC_READ_MULTIPLE_BLOCK) && 927 sc->use_auto_stop) 928 cmdr |= SDMMC_CMD_SEND_ASTOP; 929 930 cmdr |= SDMMC_CMD_DATA_EXP; 931 if (data->flags & MMC_DATA_STREAM) 932 cmdr |= SDMMC_CMD_MODE_STREAM; 933 if (data->flags & MMC_DATA_WRITE) 934 cmdr |= SDMMC_CMD_DATA_WRITE; 935 936 WRITE4(sc, SDMMC_TMOUT, 0xffffffff); 937 WRITE4(sc, SDMMC_BYTCNT, data->len); 938 blksz = (data->len < MMC_SECTOR_SIZE) ? \ 939 data->len : MMC_SECTOR_SIZE; 940 WRITE4(sc, SDMMC_BLKSIZ, blksz); 941 942 if (sc->use_pio) { 943 pio_prepare(sc, cmd); 944 } else { 945 dma_prepare(sc, cmd); 946 } 947 wmb(); 948 } 949 950 dprintf("cmdr 0x%08x\n", cmdr); 951 952 WRITE4(sc, SDMMC_CMDARG, cmd->arg); 953 wmb(); 954 WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START); 955 }; 956 957 static void 958 dwmmc_next_operation(struct dwmmc_softc *sc) 959 { 960 struct mmc_request *req; 961 962 req = sc->req; 963 if (req == NULL) 964 return; 965 966 sc->acd_rcvd = 0; 967 sc->dto_rcvd = 0; 968 sc->cmd_done = 0; 969 970 /* 971 * XXX: Wait until card is still busy. 972 * We do need this to prevent data timeouts, 973 * mostly caused by multi-block write command 974 * followed by single-read. 975 */ 976 while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY)) 977 continue; 978 979 if (sc->flags & PENDING_CMD) { 980 sc->flags &= ~PENDING_CMD; 981 dwmmc_start_cmd(sc, req->cmd); 982 return; 983 } else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) { 984 sc->flags &= ~PENDING_STOP; 985 dwmmc_start_cmd(sc, req->stop); 986 return; 987 } 988 989 sc->req = NULL; 990 sc->curcmd = NULL; 991 req->done(req); 992 } 993 994 static int 995 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req) 996 { 997 struct dwmmc_softc *sc; 998 999 sc = device_get_softc(brdev); 1000 1001 dprintf("%s\n", __func__); 1002 1003 DWMMC_LOCK(sc); 1004 1005 if (sc->req != NULL) { 1006 DWMMC_UNLOCK(sc); 1007 return (EBUSY); 1008 } 1009 1010 sc->req = req; 1011 sc->flags |= PENDING_CMD; 1012 if (sc->req->stop) 1013 sc->flags |= PENDING_STOP; 1014 dwmmc_next_operation(sc); 1015 1016 DWMMC_UNLOCK(sc); 1017 return (0); 1018 } 1019 1020 static int 1021 dwmmc_get_ro(device_t brdev, device_t reqdev) 1022 { 1023 1024 dprintf("%s\n", __func__); 1025 1026 return (0); 1027 } 1028 1029 static int 1030 dwmmc_acquire_host(device_t brdev, device_t reqdev) 1031 { 1032 struct dwmmc_softc *sc; 1033 1034 sc = device_get_softc(brdev); 1035 1036 DWMMC_LOCK(sc); 1037 while (sc->bus_busy) 1038 msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5); 1039 sc->bus_busy++; 1040 DWMMC_UNLOCK(sc); 1041 return (0); 1042 } 1043 1044 static int 1045 dwmmc_release_host(device_t brdev, device_t reqdev) 1046 { 1047 struct dwmmc_softc *sc; 1048 1049 sc = device_get_softc(brdev); 1050 1051 DWMMC_LOCK(sc); 1052 sc->bus_busy--; 1053 wakeup(sc); 1054 DWMMC_UNLOCK(sc); 1055 return (0); 1056 } 1057 1058 static int 1059 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) 1060 { 1061 struct dwmmc_softc *sc; 1062 1063 sc = device_get_softc(bus); 1064 1065 switch (which) { 1066 default: 1067 return (EINVAL); 1068 case MMCBR_IVAR_BUS_MODE: 1069 *(int *)result = sc->host.ios.bus_mode; 1070 break; 1071 case MMCBR_IVAR_BUS_WIDTH: 1072 *(int *)result = sc->host.ios.bus_width; 1073 break; 1074 case MMCBR_IVAR_CHIP_SELECT: 1075 *(int *)result = sc->host.ios.chip_select; 1076 break; 1077 case MMCBR_IVAR_CLOCK: 1078 *(int *)result = sc->host.ios.clock; 1079 break; 1080 case MMCBR_IVAR_F_MIN: 1081 *(int *)result = sc->host.f_min; 1082 break; 1083 case MMCBR_IVAR_F_MAX: 1084 *(int *)result = sc->host.f_max; 1085 break; 1086 case MMCBR_IVAR_HOST_OCR: 1087 *(int *)result = sc->host.host_ocr; 1088 break; 1089 case MMCBR_IVAR_MODE: 1090 *(int *)result = sc->host.mode; 1091 break; 1092 case MMCBR_IVAR_OCR: 1093 *(int *)result = sc->host.ocr; 1094 break; 1095 case MMCBR_IVAR_POWER_MODE: 1096 *(int *)result = sc->host.ios.power_mode; 1097 break; 1098 case MMCBR_IVAR_VDD: 1099 *(int *)result = sc->host.ios.vdd; 1100 break; 1101 case MMCBR_IVAR_CAPS: 1102 sc->host.caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; 1103 *(int *)result = sc->host.caps; 1104 break; 1105 case MMCBR_IVAR_MAX_DATA: 1106 *(int *)result = sc->desc_count; 1107 } 1108 return (0); 1109 } 1110 1111 static int 1112 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value) 1113 { 1114 struct dwmmc_softc *sc; 1115 1116 sc = device_get_softc(bus); 1117 1118 switch (which) { 1119 default: 1120 return (EINVAL); 1121 case MMCBR_IVAR_BUS_MODE: 1122 sc->host.ios.bus_mode = value; 1123 break; 1124 case MMCBR_IVAR_BUS_WIDTH: 1125 sc->host.ios.bus_width = value; 1126 break; 1127 case MMCBR_IVAR_CHIP_SELECT: 1128 sc->host.ios.chip_select = value; 1129 break; 1130 case MMCBR_IVAR_CLOCK: 1131 sc->host.ios.clock = value; 1132 break; 1133 case MMCBR_IVAR_MODE: 1134 sc->host.mode = value; 1135 break; 1136 case MMCBR_IVAR_OCR: 1137 sc->host.ocr = value; 1138 break; 1139 case MMCBR_IVAR_POWER_MODE: 1140 sc->host.ios.power_mode = value; 1141 break; 1142 case MMCBR_IVAR_VDD: 1143 sc->host.ios.vdd = value; 1144 break; 1145 /* These are read-only */ 1146 case MMCBR_IVAR_CAPS: 1147 case MMCBR_IVAR_HOST_OCR: 1148 case MMCBR_IVAR_F_MIN: 1149 case MMCBR_IVAR_F_MAX: 1150 case MMCBR_IVAR_MAX_DATA: 1151 return (EINVAL); 1152 } 1153 return (0); 1154 } 1155 1156 static device_method_t dwmmc_methods[] = { 1157 DEVMETHOD(device_probe, dwmmc_probe), 1158 DEVMETHOD(device_attach, dwmmc_attach), 1159 1160 /* Bus interface */ 1161 DEVMETHOD(bus_read_ivar, dwmmc_read_ivar), 1162 DEVMETHOD(bus_write_ivar, dwmmc_write_ivar), 1163 1164 /* mmcbr_if */ 1165 DEVMETHOD(mmcbr_update_ios, dwmmc_update_ios), 1166 DEVMETHOD(mmcbr_request, dwmmc_request), 1167 DEVMETHOD(mmcbr_get_ro, dwmmc_get_ro), 1168 DEVMETHOD(mmcbr_acquire_host, dwmmc_acquire_host), 1169 DEVMETHOD(mmcbr_release_host, dwmmc_release_host), 1170 1171 DEVMETHOD_END 1172 }; 1173 1174 driver_t dwmmc_driver = { 1175 "dwmmc", 1176 dwmmc_methods, 1177 sizeof(struct dwmmc_softc), 1178 }; 1179 1180 static devclass_t dwmmc_devclass; 1181 1182 DRIVER_MODULE(dwmmc, simplebus, dwmmc_driver, dwmmc_devclass, NULL, NULL); 1183 DRIVER_MODULE(dwmmc, ofwbus, dwmmc_driver, dwmmc_devclass, NULL, NULL); 1184 MMC_DECLARE_BRIDGE(dwmmc); 1185