1 /*- 2 * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com> 3 * All rights reserved. 4 * 5 * This software was developed by SRI International and the University of 6 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) 7 * ("CTSRD"), as part of the DARPA CRASH research programme. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Synopsys DesignWare Mobile Storage Host Controller 33 * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22) 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/bus.h> 42 #include <sys/kernel.h> 43 #include <sys/module.h> 44 #include <sys/malloc.h> 45 #include <sys/rman.h> 46 #include <sys/timeet.h> 47 #include <sys/timetc.h> 48 49 #include <dev/mmc/bridge.h> 50 #include <dev/mmc/mmcreg.h> 51 #include <dev/mmc/mmcbrvar.h> 52 53 #include <dev/fdt/fdt_common.h> 54 #include <dev/ofw/openfirm.h> 55 #include <dev/ofw/ofw_bus.h> 56 #include <dev/ofw/ofw_bus_subr.h> 57 58 #include <machine/bus.h> 59 #include <machine/cpu.h> 60 #include <machine/intr.h> 61 62 #include <dev/mmc/host/dwmmc_reg.h> 63 #include <dev/mmc/host/dwmmc_var.h> 64 65 #include "mmcbr_if.h" 66 67 #define dprintf(x, arg...) 68 69 #define READ4(_sc, _reg) \ 70 bus_read_4((_sc)->res[0], _reg) 71 #define WRITE4(_sc, _reg, _val) \ 72 bus_write_4((_sc)->res[0], _reg, _val) 73 74 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) 75 76 #define DWMMC_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) 77 #define DWMMC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) 78 #define DWMMC_LOCK_INIT(_sc) \ 79 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \ 80 "dwmmc", MTX_DEF) 81 #define DWMMC_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); 82 #define DWMMC_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); 83 #define DWMMC_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); 84 85 #define PENDING_CMD 0x01 86 #define PENDING_STOP 0x02 87 #define CARD_INIT_DONE 0x04 88 89 #define DWMMC_DATA_ERR_FLAGS (SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \ 90 |SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE \ 91 |SDMMC_INTMASK_EBE) 92 #define DWMMC_CMD_ERR_FLAGS (SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \ 93 |SDMMC_INTMASK_RE) 94 #define DWMMC_ERR_FLAGS (DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \ 95 |SDMMC_INTMASK_HLE) 96 97 #define DES0_DIC (1 << 1) 98 #define DES0_LD (1 << 2) 99 #define DES0_FS (1 << 3) 100 #define DES0_CH (1 << 4) 101 #define DES0_ER (1 << 5) 102 #define DES0_CES (1 << 30) 103 #define DES0_OWN (1 << 31) 104 105 #define DES1_BS1_MASK 0xfff 106 #define DES1_BS1_SHIFT 0 107 108 struct idmac_desc { 109 uint32_t des0; /* control */ 110 uint32_t des1; /* bufsize */ 111 uint32_t des2; /* buf1 phys addr */ 112 uint32_t des3; /* buf2 phys addr or next descr */ 113 }; 114 115 #define DESC_MAX 256 116 #define DESC_SIZE (sizeof(struct idmac_desc) * DESC_MAX) 117 #define DEF_MSIZE 0x2 /* Burst size of multiple transaction */ 118 119 static void dwmmc_next_operation(struct dwmmc_softc *); 120 static int dwmmc_setup_bus(struct dwmmc_softc *, int); 121 static int dma_done(struct dwmmc_softc *, struct mmc_command *); 122 static int dma_stop(struct dwmmc_softc *); 123 static void pio_read(struct dwmmc_softc *, struct mmc_command *); 124 static void pio_write(struct dwmmc_softc *, struct mmc_command *); 125 126 static struct resource_spec dwmmc_spec[] = { 127 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 128 { SYS_RES_IRQ, 0, RF_ACTIVE }, 129 { -1, 0 } 130 }; 131 132 #define HWTYPE_MASK (0x0000ffff) 133 #define HWFLAG_MASK (0xffff << 16) 134 135 static struct ofw_compat_data compat_data[] = { 136 {"altr,socfpga-dw-mshc", HWTYPE_ALTERA}, 137 {"samsung,exynos5420-dw-mshc", HWTYPE_EXYNOS}, 138 {"rockchip,rk2928-dw-mshc", HWTYPE_ROCKCHIP}, 139 {NULL, HWTYPE_NONE}, 140 }; 141 142 static void 143 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 144 { 145 146 if (error != 0) 147 return; 148 *(bus_addr_t *)arg = segs[0].ds_addr; 149 } 150 151 static void 152 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 153 { 154 struct dwmmc_softc *sc; 155 int idx; 156 157 if (error != 0) 158 return; 159 160 sc = arg; 161 162 dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len); 163 164 for (idx = 0; idx < nsegs; idx++) { 165 sc->desc_ring[idx].des0 = (DES0_OWN | DES0_DIC | DES0_CH); 166 sc->desc_ring[idx].des1 = segs[idx].ds_len; 167 sc->desc_ring[idx].des2 = segs[idx].ds_addr; 168 169 if (idx == 0) 170 sc->desc_ring[idx].des0 |= DES0_FS; 171 172 if (idx == (nsegs - 1)) { 173 sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH); 174 sc->desc_ring[idx].des0 |= DES0_LD; 175 } 176 } 177 } 178 179 static int 180 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits) 181 { 182 int reg; 183 int i; 184 185 reg = READ4(sc, SDMMC_CTRL); 186 reg |= (reset_bits); 187 WRITE4(sc, SDMMC_CTRL, reg); 188 189 /* Wait reset done */ 190 for (i = 0; i < 100; i++) { 191 if (!(READ4(sc, SDMMC_CTRL) & reset_bits)) 192 return (0); 193 DELAY(10); 194 } 195 196 device_printf(sc->dev, "Reset failed\n"); 197 198 return (1); 199 } 200 201 static int 202 dma_setup(struct dwmmc_softc *sc) 203 { 204 int error; 205 int nidx; 206 int idx; 207 208 /* 209 * Set up TX descriptor ring, descriptors, and dma maps. 210 */ 211 error = bus_dma_tag_create( 212 bus_get_dma_tag(sc->dev), /* Parent tag. */ 213 4096, 0, /* alignment, boundary */ 214 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 215 BUS_SPACE_MAXADDR, /* highaddr */ 216 NULL, NULL, /* filter, filterarg */ 217 DESC_SIZE, 1, /* maxsize, nsegments */ 218 DESC_SIZE, /* maxsegsize */ 219 0, /* flags */ 220 NULL, NULL, /* lockfunc, lockarg */ 221 &sc->desc_tag); 222 if (error != 0) { 223 device_printf(sc->dev, 224 "could not create ring DMA tag.\n"); 225 return (1); 226 } 227 228 error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring, 229 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, 230 &sc->desc_map); 231 if (error != 0) { 232 device_printf(sc->dev, 233 "could not allocate descriptor ring.\n"); 234 return (1); 235 } 236 237 error = bus_dmamap_load(sc->desc_tag, sc->desc_map, 238 sc->desc_ring, DESC_SIZE, dwmmc_get1paddr, 239 &sc->desc_ring_paddr, 0); 240 if (error != 0) { 241 device_printf(sc->dev, 242 "could not load descriptor ring map.\n"); 243 return (1); 244 } 245 246 for (idx = 0; idx < sc->desc_count; idx++) { 247 sc->desc_ring[idx].des0 = DES0_CH; 248 sc->desc_ring[idx].des1 = 0; 249 nidx = (idx + 1) % sc->desc_count; 250 sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \ 251 (nidx * sizeof(struct idmac_desc)); 252 } 253 254 error = bus_dma_tag_create( 255 bus_get_dma_tag(sc->dev), /* Parent tag. */ 256 4096, 0, /* alignment, boundary */ 257 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 258 BUS_SPACE_MAXADDR, /* highaddr */ 259 NULL, NULL, /* filter, filterarg */ 260 sc->desc_count * MMC_SECTOR_SIZE, /* maxsize */ 261 sc->desc_count, /* nsegments */ 262 MMC_SECTOR_SIZE, /* maxsegsize */ 263 0, /* flags */ 264 NULL, NULL, /* lockfunc, lockarg */ 265 &sc->buf_tag); 266 if (error != 0) { 267 device_printf(sc->dev, 268 "could not create ring DMA tag.\n"); 269 return (1); 270 } 271 272 error = bus_dmamap_create(sc->buf_tag, 0, 273 &sc->buf_map); 274 if (error != 0) { 275 device_printf(sc->dev, 276 "could not create TX buffer DMA map.\n"); 277 return (1); 278 } 279 280 return (0); 281 } 282 283 static void 284 dwmmc_cmd_done(struct dwmmc_softc *sc) 285 { 286 struct mmc_command *cmd; 287 288 cmd = sc->curcmd; 289 if (cmd == NULL) 290 return; 291 292 if (cmd->flags & MMC_RSP_PRESENT) { 293 if (cmd->flags & MMC_RSP_136) { 294 cmd->resp[3] = READ4(sc, SDMMC_RESP0); 295 cmd->resp[2] = READ4(sc, SDMMC_RESP1); 296 cmd->resp[1] = READ4(sc, SDMMC_RESP2); 297 cmd->resp[0] = READ4(sc, SDMMC_RESP3); 298 } else { 299 cmd->resp[3] = 0; 300 cmd->resp[2] = 0; 301 cmd->resp[1] = 0; 302 cmd->resp[0] = READ4(sc, SDMMC_RESP0); 303 } 304 } 305 } 306 307 static void 308 dwmmc_tasklet(struct dwmmc_softc *sc) 309 { 310 struct mmc_command *cmd; 311 312 cmd = sc->curcmd; 313 if (cmd == NULL) 314 return; 315 316 if (!sc->cmd_done) 317 return; 318 319 if (cmd->error != MMC_ERR_NONE || !cmd->data) { 320 dwmmc_next_operation(sc); 321 } else if (cmd->data && sc->dto_rcvd) { 322 if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || 323 cmd->opcode == MMC_READ_MULTIPLE_BLOCK) && 324 sc->use_auto_stop) { 325 if (sc->acd_rcvd) 326 dwmmc_next_operation(sc); 327 } else { 328 dwmmc_next_operation(sc); 329 } 330 } 331 } 332 333 static void 334 dwmmc_intr(void *arg) 335 { 336 struct mmc_command *cmd; 337 struct dwmmc_softc *sc; 338 uint32_t reg; 339 340 sc = arg; 341 342 DWMMC_LOCK(sc); 343 344 cmd = sc->curcmd; 345 346 /* First handle SDMMC controller interrupts */ 347 reg = READ4(sc, SDMMC_MINTSTS); 348 if (reg) { 349 dprintf("%s 0x%08x\n", __func__, reg); 350 351 if (reg & DWMMC_CMD_ERR_FLAGS) { 352 WRITE4(sc, SDMMC_RINTSTS, DWMMC_CMD_ERR_FLAGS); 353 dprintf("cmd err 0x%08x cmd 0x%08x\n", 354 reg, cmd->opcode); 355 cmd->error = MMC_ERR_TIMEOUT; 356 } 357 358 if (reg & DWMMC_DATA_ERR_FLAGS) { 359 WRITE4(sc, SDMMC_RINTSTS, DWMMC_DATA_ERR_FLAGS); 360 dprintf("data err 0x%08x cmd 0x%08x\n", 361 reg, cmd->opcode); 362 cmd->error = MMC_ERR_FAILED; 363 if (!sc->use_pio) { 364 dma_done(sc, cmd); 365 dma_stop(sc); 366 } 367 } 368 369 if (reg & SDMMC_INTMASK_CMD_DONE) { 370 dwmmc_cmd_done(sc); 371 sc->cmd_done = 1; 372 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CMD_DONE); 373 } 374 375 if (reg & SDMMC_INTMASK_ACD) { 376 sc->acd_rcvd = 1; 377 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_ACD); 378 } 379 380 if (reg & SDMMC_INTMASK_DTO) { 381 sc->dto_rcvd = 1; 382 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_DTO); 383 } 384 385 if (reg & SDMMC_INTMASK_CD) { 386 /* XXX: Handle card detect */ 387 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_CD); 388 } 389 } 390 391 if (sc->use_pio) { 392 if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) { 393 pio_read(sc, cmd); 394 } 395 if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) { 396 pio_write(sc, cmd); 397 } 398 } else { 399 /* Now handle DMA interrupts */ 400 reg = READ4(sc, SDMMC_IDSTS); 401 if (reg) { 402 dprintf("dma intr 0x%08x\n", reg); 403 if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) { 404 WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI | 405 SDMMC_IDINTEN_RI)); 406 WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI); 407 dma_done(sc, cmd); 408 } 409 } 410 } 411 412 dwmmc_tasklet(sc); 413 414 DWMMC_UNLOCK(sc); 415 } 416 417 static int 418 parse_fdt(struct dwmmc_softc *sc) 419 { 420 pcell_t dts_value[3]; 421 phandle_t node; 422 int len; 423 424 if ((node = ofw_bus_get_node(sc->dev)) == -1) 425 return (ENXIO); 426 427 /* fifo-depth */ 428 if ((len = OF_getproplen(node, "fifo-depth")) > 0) { 429 OF_getencprop(node, "fifo-depth", dts_value, len); 430 sc->fifo_depth = dts_value[0]; 431 } 432 433 /* num-slots */ 434 sc->num_slots = 1; 435 if ((len = OF_getproplen(node, "num-slots")) > 0) { 436 OF_getencprop(node, "num-slots", dts_value, len); 437 sc->num_slots = dts_value[0]; 438 } 439 440 /* 441 * We need some platform-specific code to know 442 * what the clock is supplied for our device. 443 * For now rely on the value specified in FDT. 444 */ 445 if (sc->bus_hz == 0) { 446 if ((len = OF_getproplen(node, "bus-frequency")) <= 0) 447 return (ENXIO); 448 OF_getencprop(node, "bus-frequency", dts_value, len); 449 sc->bus_hz = dts_value[0]; 450 } 451 452 /* 453 * Platform-specific stuff 454 * XXX: Move to separate file 455 */ 456 457 if ((sc->hwtype & HWTYPE_MASK) != HWTYPE_EXYNOS) 458 return (0); 459 460 if ((len = OF_getproplen(node, "samsung,dw-mshc-ciu-div")) <= 0) 461 return (ENXIO); 462 OF_getencprop(node, "samsung,dw-mshc-ciu-div", dts_value, len); 463 sc->sdr_timing = (dts_value[0] << SDMMC_CLKSEL_DIVIDER_SHIFT); 464 sc->ddr_timing = (dts_value[0] << SDMMC_CLKSEL_DIVIDER_SHIFT); 465 466 if ((len = OF_getproplen(node, "samsung,dw-mshc-sdr-timing")) <= 0) 467 return (ENXIO); 468 OF_getencprop(node, "samsung,dw-mshc-sdr-timing", dts_value, len); 469 sc->sdr_timing |= ((dts_value[0] << SDMMC_CLKSEL_SAMPLE_SHIFT) | 470 (dts_value[1] << SDMMC_CLKSEL_DRIVE_SHIFT)); 471 472 if ((len = OF_getproplen(node, "samsung,dw-mshc-ddr-timing")) <= 0) 473 return (ENXIO); 474 OF_getencprop(node, "samsung,dw-mshc-ddr-timing", dts_value, len); 475 sc->ddr_timing |= ((dts_value[0] << SDMMC_CLKSEL_SAMPLE_SHIFT) | 476 (dts_value[1] << SDMMC_CLKSEL_DRIVE_SHIFT)); 477 478 return (0); 479 } 480 481 static int 482 dwmmc_probe(device_t dev) 483 { 484 uintptr_t hwtype; 485 486 if (!ofw_bus_status_okay(dev)) 487 return (ENXIO); 488 489 hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data; 490 if (hwtype == HWTYPE_NONE) 491 return (ENXIO); 492 493 device_set_desc(dev, "Synopsys DesignWare Mobile " 494 "Storage Host Controller"); 495 return (BUS_PROBE_DEFAULT); 496 } 497 498 int 499 dwmmc_attach(device_t dev) 500 { 501 struct dwmmc_softc *sc; 502 int error; 503 int slot; 504 505 sc = device_get_softc(dev); 506 507 sc->dev = dev; 508 if (sc->hwtype == HWTYPE_NONE) { 509 sc->hwtype = 510 ofw_bus_search_compatible(dev, compat_data)->ocd_data; 511 } 512 513 /* Why not to use Auto Stop? It save a hundred of irq per second */ 514 sc->use_auto_stop = 1; 515 516 error = parse_fdt(sc); 517 if (error != 0) { 518 device_printf(dev, "Can't get FDT property.\n"); 519 return (ENXIO); 520 } 521 522 DWMMC_LOCK_INIT(sc); 523 524 if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) { 525 device_printf(dev, "could not allocate resources\n"); 526 return (ENXIO); 527 } 528 529 /* Setup interrupt handler. */ 530 error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE, 531 NULL, dwmmc_intr, sc, &sc->intr_cookie); 532 if (error != 0) { 533 device_printf(dev, "could not setup interrupt handler.\n"); 534 return (ENXIO); 535 } 536 537 device_printf(dev, "Hardware version ID is %04x\n", 538 READ4(sc, SDMMC_VERID) & 0xffff); 539 540 if (sc->desc_count == 0) 541 sc->desc_count = DESC_MAX; 542 543 if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP) { 544 sc->use_pio = 1; 545 sc->pwren_inverted = 1; 546 } else if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) { 547 WRITE4(sc, EMMCP_MPSBEGIN0, 0); 548 WRITE4(sc, EMMCP_SEND0, 0); 549 WRITE4(sc, EMMCP_CTRL0, (MPSCTRL_SECURE_READ_BIT | 550 MPSCTRL_SECURE_WRITE_BIT | 551 MPSCTRL_NON_SECURE_READ_BIT | 552 MPSCTRL_NON_SECURE_WRITE_BIT | 553 MPSCTRL_VALID)); 554 } 555 556 /* XXX: we support operation for slot index 0 only */ 557 slot = 0; 558 if (sc->pwren_inverted) { 559 WRITE4(sc, SDMMC_PWREN, (0 << slot)); 560 } else { 561 WRITE4(sc, SDMMC_PWREN, (1 << slot)); 562 } 563 564 /* Reset all */ 565 if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET | 566 SDMMC_CTRL_FIFO_RESET | 567 SDMMC_CTRL_DMA_RESET))) 568 return (ENXIO); 569 570 dwmmc_setup_bus(sc, sc->host.f_min); 571 572 if (sc->fifo_depth == 0) { 573 sc->fifo_depth = 1 + 574 ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff); 575 device_printf(dev, "No fifo-depth, using FIFOTH %x\n", 576 sc->fifo_depth); 577 } 578 579 if (!sc->use_pio) { 580 if (dma_setup(sc)) 581 return (ENXIO); 582 583 /* Install desc base */ 584 WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr); 585 586 /* Enable DMA interrupts */ 587 WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK); 588 WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI | 589 SDMMC_IDINTEN_RI | 590 SDMMC_IDINTEN_TI)); 591 } 592 593 /* Clear and disable interrups for a while */ 594 WRITE4(sc, SDMMC_RINTSTS, 0xffffffff); 595 WRITE4(sc, SDMMC_INTMASK, 0); 596 597 /* Maximum timeout */ 598 WRITE4(sc, SDMMC_TMOUT, 0xffffffff); 599 600 /* Enable interrupts */ 601 WRITE4(sc, SDMMC_RINTSTS, 0xffffffff); 602 WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE | 603 SDMMC_INTMASK_DTO | 604 SDMMC_INTMASK_ACD | 605 SDMMC_INTMASK_TXDR | 606 SDMMC_INTMASK_RXDR | 607 DWMMC_ERR_FLAGS | 608 SDMMC_INTMASK_CD)); 609 WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE); 610 611 sc->host.f_min = 400000; 612 sc->host.f_max = min(200000000, sc->bus_hz); 613 sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; 614 sc->host.caps = MMC_CAP_4_BIT_DATA; 615 616 device_add_child(dev, "mmc", -1); 617 return (bus_generic_attach(dev)); 618 } 619 620 static int 621 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq) 622 { 623 int tout; 624 int div; 625 626 if (freq == 0) { 627 WRITE4(sc, SDMMC_CLKENA, 0); 628 WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA | 629 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START)); 630 631 tout = 1000; 632 do { 633 if (tout-- < 0) { 634 device_printf(sc->dev, "Failed update clk\n"); 635 return (1); 636 } 637 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); 638 639 return (0); 640 } 641 642 WRITE4(sc, SDMMC_CLKENA, 0); 643 WRITE4(sc, SDMMC_CLKSRC, 0); 644 645 div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0; 646 647 WRITE4(sc, SDMMC_CLKDIV, div); 648 WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA | 649 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START)); 650 651 tout = 1000; 652 do { 653 if (tout-- < 0) { 654 device_printf(sc->dev, "Failed to update clk"); 655 return (1); 656 } 657 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); 658 659 WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP)); 660 WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA | 661 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START); 662 663 tout = 1000; 664 do { 665 if (tout-- < 0) { 666 device_printf(sc->dev, "Failed to enable clk\n"); 667 return (1); 668 } 669 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); 670 671 return (0); 672 } 673 674 static int 675 dwmmc_update_ios(device_t brdev, device_t reqdev) 676 { 677 struct dwmmc_softc *sc; 678 struct mmc_ios *ios; 679 680 sc = device_get_softc(brdev); 681 ios = &sc->host.ios; 682 683 dprintf("Setting up clk %u bus_width %d\n", 684 ios->clock, ios->bus_width); 685 686 dwmmc_setup_bus(sc, ios->clock); 687 688 if (ios->bus_width == bus_width_8) 689 WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT); 690 else if (ios->bus_width == bus_width_4) 691 WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT); 692 else 693 WRITE4(sc, SDMMC_CTYPE, 0); 694 695 if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) { 696 /* XXX: take care about DDR or SDR use here */ 697 WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing); 698 } 699 700 /* 701 * XXX: take care about DDR bit 702 * 703 * reg = READ4(sc, SDMMC_UHS_REG); 704 * reg |= (SDMMC_UHS_REG_DDR); 705 * WRITE4(sc, SDMMC_UHS_REG, reg); 706 */ 707 708 return (0); 709 } 710 711 static int 712 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd) 713 { 714 struct mmc_data *data; 715 716 data = cmd->data; 717 718 if (data->flags & MMC_DATA_WRITE) 719 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 720 BUS_DMASYNC_POSTWRITE); 721 else 722 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 723 BUS_DMASYNC_POSTREAD); 724 725 bus_dmamap_unload(sc->buf_tag, sc->buf_map); 726 727 return (0); 728 } 729 730 static int 731 dma_stop(struct dwmmc_softc *sc) 732 { 733 int reg; 734 735 reg = READ4(sc, SDMMC_CTRL); 736 reg &= ~(SDMMC_CTRL_USE_IDMAC); 737 reg |= (SDMMC_CTRL_DMA_RESET); 738 WRITE4(sc, SDMMC_CTRL, reg); 739 740 reg = READ4(sc, SDMMC_BMOD); 741 reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB); 742 reg |= (SDMMC_BMOD_SWR); 743 WRITE4(sc, SDMMC_BMOD, reg); 744 745 return (0); 746 } 747 748 static int 749 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd) 750 { 751 struct mmc_data *data; 752 int len; 753 int err; 754 int reg; 755 756 data = cmd->data; 757 len = data->len; 758 759 reg = READ4(sc, SDMMC_INTMASK); 760 reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR); 761 WRITE4(sc, SDMMC_INTMASK, reg); 762 763 err = bus_dmamap_load(sc->buf_tag, sc->buf_map, 764 data->data, data->len, dwmmc_ring_setup, 765 sc, BUS_DMA_NOWAIT); 766 if (err != 0) 767 panic("dmamap_load failed\n"); 768 769 if (data->flags & MMC_DATA_WRITE) 770 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 771 BUS_DMASYNC_PREWRITE); 772 else 773 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 774 BUS_DMASYNC_PREREAD); 775 776 reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S); 777 reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S; 778 reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S; 779 780 WRITE4(sc, SDMMC_FIFOTH, reg); 781 wmb(); 782 783 reg = READ4(sc, SDMMC_CTRL); 784 reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE); 785 WRITE4(sc, SDMMC_CTRL, reg); 786 wmb(); 787 788 reg = READ4(sc, SDMMC_BMOD); 789 reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB); 790 WRITE4(sc, SDMMC_BMOD, reg); 791 792 /* Start */ 793 WRITE4(sc, SDMMC_PLDMND, 1); 794 795 return (0); 796 } 797 798 static int 799 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd) 800 { 801 struct mmc_data *data; 802 int reg; 803 804 data = cmd->data; 805 data->xfer_len = 0; 806 807 reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S); 808 reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S; 809 reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S; 810 811 WRITE4(sc, SDMMC_FIFOTH, reg); 812 wmb(); 813 814 return (0); 815 } 816 817 static void 818 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd) 819 { 820 struct mmc_data *data; 821 uint32_t *p, status; 822 823 if (cmd == NULL || cmd->data == NULL) 824 return; 825 826 data = cmd->data; 827 if ((data->flags & MMC_DATA_READ) == 0) 828 return; 829 830 KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned")); 831 p = (uint32_t *)data->data + (data->xfer_len >> 2); 832 833 while (data->xfer_len < data->len) { 834 status = READ4(sc, SDMMC_STATUS); 835 if (status & SDMMC_STATUS_FIFO_EMPTY) 836 break; 837 *p++ = READ4(sc, SDMMC_DATA); 838 data->xfer_len += 4; 839 } 840 841 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR); 842 } 843 844 static void 845 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd) 846 { 847 struct mmc_data *data; 848 uint32_t *p, status; 849 850 if (cmd == NULL || cmd->data == NULL) 851 return; 852 853 data = cmd->data; 854 if ((data->flags & MMC_DATA_WRITE) == 0) 855 return; 856 857 KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned")); 858 p = (uint32_t *)data->data + (data->xfer_len >> 2); 859 860 while (data->xfer_len < data->len) { 861 status = READ4(sc, SDMMC_STATUS); 862 if (status & SDMMC_STATUS_FIFO_FULL) 863 break; 864 WRITE4(sc, SDMMC_DATA, *p++); 865 data->xfer_len += 4; 866 } 867 868 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR); 869 } 870 871 static void 872 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd) 873 { 874 struct mmc_data *data; 875 uint32_t blksz; 876 uint32_t cmdr; 877 878 sc->curcmd = cmd; 879 data = cmd->data; 880 881 if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP) 882 dwmmc_setup_bus(sc, sc->host.ios.clock); 883 884 /* XXX Upper layers don't always set this */ 885 cmd->mrq = sc->req; 886 887 /* Begin setting up command register. */ 888 889 cmdr = cmd->opcode; 890 891 dprintf("cmd->opcode 0x%08x\n", cmd->opcode); 892 893 if (cmd->opcode == MMC_STOP_TRANSMISSION || 894 cmd->opcode == MMC_GO_IDLE_STATE || 895 cmd->opcode == MMC_GO_INACTIVE_STATE) 896 cmdr |= SDMMC_CMD_STOP_ABORT; 897 else if (cmd->opcode != MMC_SEND_STATUS && data) 898 cmdr |= SDMMC_CMD_WAIT_PRVDATA; 899 900 /* Set up response handling. */ 901 if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) { 902 cmdr |= SDMMC_CMD_RESP_EXP; 903 if (cmd->flags & MMC_RSP_136) 904 cmdr |= SDMMC_CMD_RESP_LONG; 905 } 906 907 if (cmd->flags & MMC_RSP_CRC) 908 cmdr |= SDMMC_CMD_RESP_CRC; 909 910 /* 911 * XXX: Not all platforms want this. 912 */ 913 cmdr |= SDMMC_CMD_USE_HOLD_REG; 914 915 if ((sc->flags & CARD_INIT_DONE) == 0) { 916 sc->flags |= (CARD_INIT_DONE); 917 cmdr |= SDMMC_CMD_SEND_INIT; 918 } 919 920 if (data) { 921 if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || 922 cmd->opcode == MMC_READ_MULTIPLE_BLOCK) && 923 sc->use_auto_stop) 924 cmdr |= SDMMC_CMD_SEND_ASTOP; 925 926 cmdr |= SDMMC_CMD_DATA_EXP; 927 if (data->flags & MMC_DATA_STREAM) 928 cmdr |= SDMMC_CMD_MODE_STREAM; 929 if (data->flags & MMC_DATA_WRITE) 930 cmdr |= SDMMC_CMD_DATA_WRITE; 931 932 WRITE4(sc, SDMMC_TMOUT, 0xffffffff); 933 WRITE4(sc, SDMMC_BYTCNT, data->len); 934 blksz = (data->len < MMC_SECTOR_SIZE) ? \ 935 data->len : MMC_SECTOR_SIZE; 936 WRITE4(sc, SDMMC_BLKSIZ, blksz); 937 938 if (sc->use_pio) { 939 pio_prepare(sc, cmd); 940 } else { 941 dma_prepare(sc, cmd); 942 } 943 wmb(); 944 } 945 946 dprintf("cmdr 0x%08x\n", cmdr); 947 948 WRITE4(sc, SDMMC_CMDARG, cmd->arg); 949 wmb(); 950 WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START); 951 }; 952 953 static void 954 dwmmc_next_operation(struct dwmmc_softc *sc) 955 { 956 struct mmc_request *req; 957 958 req = sc->req; 959 if (req == NULL) 960 return; 961 962 sc->acd_rcvd = 0; 963 sc->dto_rcvd = 0; 964 sc->cmd_done = 0; 965 966 /* 967 * XXX: Wait until card is still busy. 968 * We do need this to prevent data timeouts, 969 * mostly caused by multi-block write command 970 * followed by single-read. 971 */ 972 while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY)) 973 continue; 974 975 if (sc->flags & PENDING_CMD) { 976 sc->flags &= ~PENDING_CMD; 977 dwmmc_start_cmd(sc, req->cmd); 978 return; 979 } else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) { 980 sc->flags &= ~PENDING_STOP; 981 dwmmc_start_cmd(sc, req->stop); 982 return; 983 } 984 985 sc->req = NULL; 986 sc->curcmd = NULL; 987 req->done(req); 988 } 989 990 static int 991 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req) 992 { 993 struct dwmmc_softc *sc; 994 995 sc = device_get_softc(brdev); 996 997 dprintf("%s\n", __func__); 998 999 DWMMC_LOCK(sc); 1000 1001 if (sc->req != NULL) { 1002 DWMMC_UNLOCK(sc); 1003 return (EBUSY); 1004 } 1005 1006 sc->req = req; 1007 sc->flags |= PENDING_CMD; 1008 if (sc->req->stop) 1009 sc->flags |= PENDING_STOP; 1010 dwmmc_next_operation(sc); 1011 1012 DWMMC_UNLOCK(sc); 1013 return (0); 1014 } 1015 1016 static int 1017 dwmmc_get_ro(device_t brdev, device_t reqdev) 1018 { 1019 1020 dprintf("%s\n", __func__); 1021 1022 return (0); 1023 } 1024 1025 static int 1026 dwmmc_acquire_host(device_t brdev, device_t reqdev) 1027 { 1028 struct dwmmc_softc *sc; 1029 1030 sc = device_get_softc(brdev); 1031 1032 DWMMC_LOCK(sc); 1033 while (sc->bus_busy) 1034 msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5); 1035 sc->bus_busy++; 1036 DWMMC_UNLOCK(sc); 1037 return (0); 1038 } 1039 1040 static int 1041 dwmmc_release_host(device_t brdev, device_t reqdev) 1042 { 1043 struct dwmmc_softc *sc; 1044 1045 sc = device_get_softc(brdev); 1046 1047 DWMMC_LOCK(sc); 1048 sc->bus_busy--; 1049 wakeup(sc); 1050 DWMMC_UNLOCK(sc); 1051 return (0); 1052 } 1053 1054 static int 1055 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) 1056 { 1057 struct dwmmc_softc *sc; 1058 1059 sc = device_get_softc(bus); 1060 1061 switch (which) { 1062 default: 1063 return (EINVAL); 1064 case MMCBR_IVAR_BUS_MODE: 1065 *(int *)result = sc->host.ios.bus_mode; 1066 break; 1067 case MMCBR_IVAR_BUS_WIDTH: 1068 *(int *)result = sc->host.ios.bus_width; 1069 break; 1070 case MMCBR_IVAR_CHIP_SELECT: 1071 *(int *)result = sc->host.ios.chip_select; 1072 break; 1073 case MMCBR_IVAR_CLOCK: 1074 *(int *)result = sc->host.ios.clock; 1075 break; 1076 case MMCBR_IVAR_F_MIN: 1077 *(int *)result = sc->host.f_min; 1078 break; 1079 case MMCBR_IVAR_F_MAX: 1080 *(int *)result = sc->host.f_max; 1081 break; 1082 case MMCBR_IVAR_HOST_OCR: 1083 *(int *)result = sc->host.host_ocr; 1084 break; 1085 case MMCBR_IVAR_MODE: 1086 *(int *)result = sc->host.mode; 1087 break; 1088 case MMCBR_IVAR_OCR: 1089 *(int *)result = sc->host.ocr; 1090 break; 1091 case MMCBR_IVAR_POWER_MODE: 1092 *(int *)result = sc->host.ios.power_mode; 1093 break; 1094 case MMCBR_IVAR_VDD: 1095 *(int *)result = sc->host.ios.vdd; 1096 break; 1097 case MMCBR_IVAR_CAPS: 1098 sc->host.caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; 1099 *(int *)result = sc->host.caps; 1100 break; 1101 case MMCBR_IVAR_MAX_DATA: 1102 *(int *)result = sc->desc_count; 1103 } 1104 return (0); 1105 } 1106 1107 static int 1108 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value) 1109 { 1110 struct dwmmc_softc *sc; 1111 1112 sc = device_get_softc(bus); 1113 1114 switch (which) { 1115 default: 1116 return (EINVAL); 1117 case MMCBR_IVAR_BUS_MODE: 1118 sc->host.ios.bus_mode = value; 1119 break; 1120 case MMCBR_IVAR_BUS_WIDTH: 1121 sc->host.ios.bus_width = value; 1122 break; 1123 case MMCBR_IVAR_CHIP_SELECT: 1124 sc->host.ios.chip_select = value; 1125 break; 1126 case MMCBR_IVAR_CLOCK: 1127 sc->host.ios.clock = value; 1128 break; 1129 case MMCBR_IVAR_MODE: 1130 sc->host.mode = value; 1131 break; 1132 case MMCBR_IVAR_OCR: 1133 sc->host.ocr = value; 1134 break; 1135 case MMCBR_IVAR_POWER_MODE: 1136 sc->host.ios.power_mode = value; 1137 break; 1138 case MMCBR_IVAR_VDD: 1139 sc->host.ios.vdd = value; 1140 break; 1141 /* These are read-only */ 1142 case MMCBR_IVAR_CAPS: 1143 case MMCBR_IVAR_HOST_OCR: 1144 case MMCBR_IVAR_F_MIN: 1145 case MMCBR_IVAR_F_MAX: 1146 case MMCBR_IVAR_MAX_DATA: 1147 return (EINVAL); 1148 } 1149 return (0); 1150 } 1151 1152 static device_method_t dwmmc_methods[] = { 1153 DEVMETHOD(device_probe, dwmmc_probe), 1154 DEVMETHOD(device_attach, dwmmc_attach), 1155 1156 /* Bus interface */ 1157 DEVMETHOD(bus_read_ivar, dwmmc_read_ivar), 1158 DEVMETHOD(bus_write_ivar, dwmmc_write_ivar), 1159 1160 /* mmcbr_if */ 1161 DEVMETHOD(mmcbr_update_ios, dwmmc_update_ios), 1162 DEVMETHOD(mmcbr_request, dwmmc_request), 1163 DEVMETHOD(mmcbr_get_ro, dwmmc_get_ro), 1164 DEVMETHOD(mmcbr_acquire_host, dwmmc_acquire_host), 1165 DEVMETHOD(mmcbr_release_host, dwmmc_release_host), 1166 1167 DEVMETHOD_END 1168 }; 1169 1170 driver_t dwmmc_driver = { 1171 "dwmmc", 1172 dwmmc_methods, 1173 sizeof(struct dwmmc_softc), 1174 }; 1175 1176 static devclass_t dwmmc_devclass; 1177 1178 DRIVER_MODULE(dwmmc, simplebus, dwmmc_driver, dwmmc_devclass, 0, 0); 1179 DRIVER_MODULE(dwmmc, ofwbus, dwmmc_driver, dwmmc_devclass, 0, 0); 1180 DRIVER_MODULE(mmc, dwmmc, mmc_driver, mmc_devclass, NULL, NULL); 1181 MODULE_DEPEND(dwmmc, mmc, 1, 1, 1); 1182