1 /*- 2 * Copyright (c) 2014-2019 Ruslan Bukin <br@bsdpad.com> 3 * All rights reserved. 4 * 5 * This software was developed by SRI International and the University of 6 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) 7 * ("CTSRD"), as part of the DARPA CRASH research programme. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* 32 * Synopsys DesignWare Mobile Storage Host Controller 33 * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22) 34 */ 35 36 #include <sys/cdefs.h> 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/conf.h> 40 #include <sys/bus.h> 41 #include <sys/kernel.h> 42 #include <sys/lock.h> 43 #include <sys/module.h> 44 #include <sys/malloc.h> 45 #include <sys/mutex.h> 46 #include <sys/proc.h> 47 #include <sys/rman.h> 48 #include <sys/queue.h> 49 #include <sys/taskqueue.h> 50 51 #include <dev/mmc/bridge.h> 52 #include <dev/mmc/mmcbrvar.h> 53 #include <dev/mmc/mmc_fdt_helpers.h> 54 55 #include <dev/fdt/fdt_common.h> 56 #include <dev/ofw/openfirm.h> 57 #include <dev/ofw/ofw_bus.h> 58 #include <dev/ofw/ofw_bus_subr.h> 59 60 #include <machine/bus.h> 61 #include <machine/cpu.h> 62 #include <machine/intr.h> 63 64 #include <dev/extres/clk/clk.h> 65 66 #include <dev/mmc/host/dwmmc_reg.h> 67 #include <dev/mmc/host/dwmmc_var.h> 68 69 #include "opt_mmccam.h" 70 71 #ifdef MMCCAM 72 #include <cam/cam.h> 73 #include <cam/cam_ccb.h> 74 #include <cam/cam_debug.h> 75 #include <cam/cam_sim.h> 76 #include <cam/cam_xpt_sim.h> 77 78 #include "mmc_sim_if.h" 79 #endif 80 81 #include "mmcbr_if.h" 82 83 #ifdef DEBUG 84 #define dprintf(fmt, args...) printf(fmt, ##args) 85 #else 86 #define dprintf(x, arg...) 87 #endif 88 89 #define READ4(_sc, _reg) \ 90 bus_read_4((_sc)->res[0], _reg) 91 #define WRITE4(_sc, _reg, _val) \ 92 bus_write_4((_sc)->res[0], _reg, _val) 93 94 #define DIV_ROUND_UP(n, d) howmany(n, d) 95 96 #define DWMMC_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) 97 #define DWMMC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) 98 #define DWMMC_LOCK_INIT(_sc) \ 99 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \ 100 "dwmmc", MTX_DEF) 101 #define DWMMC_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); 102 #define DWMMC_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); 103 #define DWMMC_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); 104 105 #define PENDING_CMD 0x01 106 #define PENDING_STOP 0x02 107 #define CARD_INIT_DONE 0x04 108 109 #define DWMMC_DATA_ERR_FLAGS (SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \ 110 |SDMMC_INTMASK_SBE | SDMMC_INTMASK_EBE) 111 #define DWMMC_CMD_ERR_FLAGS (SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \ 112 |SDMMC_INTMASK_RE) 113 #define DWMMC_ERR_FLAGS (DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \ 114 |SDMMC_INTMASK_HLE) 115 116 #define DES0_DIC (1 << 1) /* Disable Interrupt on Completion */ 117 #define DES0_LD (1 << 2) /* Last Descriptor */ 118 #define DES0_FS (1 << 3) /* First Descriptor */ 119 #define DES0_CH (1 << 4) /* second address CHained */ 120 #define DES0_ER (1 << 5) /* End of Ring */ 121 #define DES0_CES (1 << 30) /* Card Error Summary */ 122 #define DES0_OWN (1 << 31) /* OWN */ 123 124 #define DES1_BS1_MASK 0x1fff 125 126 struct idmac_desc { 127 uint32_t des0; /* control */ 128 uint32_t des1; /* bufsize */ 129 uint32_t des2; /* buf1 phys addr */ 130 uint32_t des3; /* buf2 phys addr or next descr */ 131 }; 132 133 #define IDMAC_DESC_SEGS (PAGE_SIZE / (sizeof(struct idmac_desc))) 134 #define IDMAC_DESC_SIZE (sizeof(struct idmac_desc) * IDMAC_DESC_SEGS) 135 #define DEF_MSIZE 0x2 /* Burst size of multiple transaction */ 136 /* 137 * Size field in DMA descriptor is 13 bits long (up to 4095 bytes), 138 * but must be a multiple of the data bus size.Additionally, we must ensure 139 * that bus_dmamap_load() doesn't additionally fragments buffer (because it 140 * is processed with page size granularity). Thus limit fragment size to half 141 * of page. 142 * XXX switch descriptor format to array and use second buffer pointer for 143 * second half of page 144 */ 145 #define IDMAC_MAX_SIZE 2048 146 /* 147 * Busdma may bounce buffers, so we must reserve 2 descriptors 148 * (on start and on end) for bounced fragments. 149 */ 150 #define DWMMC_MAX_DATA (IDMAC_MAX_SIZE * (IDMAC_DESC_SEGS - 2)) / MMC_SECTOR_SIZE 151 152 static void dwmmc_next_operation(struct dwmmc_softc *); 153 static int dwmmc_setup_bus(struct dwmmc_softc *, int); 154 static int dma_done(struct dwmmc_softc *, struct mmc_command *); 155 static int dma_stop(struct dwmmc_softc *); 156 static void pio_read(struct dwmmc_softc *, struct mmc_command *); 157 static void pio_write(struct dwmmc_softc *, struct mmc_command *); 158 static void dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present); 159 160 static struct resource_spec dwmmc_spec[] = { 161 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 162 { SYS_RES_IRQ, 0, RF_ACTIVE }, 163 { -1, 0 } 164 }; 165 166 #define HWTYPE_MASK (0x0000ffff) 167 #define HWFLAG_MASK (0xffff << 16) 168 169 static void 170 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 171 { 172 173 if (nsegs != 1) 174 panic("%s: nsegs != 1 (%d)\n", __func__, nsegs); 175 if (error != 0) 176 panic("%s: error != 0 (%d)\n", __func__, error); 177 178 *(bus_addr_t *)arg = segs[0].ds_addr; 179 } 180 181 static void 182 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 183 { 184 struct dwmmc_softc *sc; 185 int idx; 186 187 sc = arg; 188 dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len); 189 if (error != 0) 190 panic("%s: error != 0 (%d)\n", __func__, error); 191 192 for (idx = 0; idx < nsegs; idx++) { 193 sc->desc_ring[idx].des0 = DES0_DIC | DES0_CH; 194 sc->desc_ring[idx].des1 = segs[idx].ds_len & DES1_BS1_MASK; 195 sc->desc_ring[idx].des2 = segs[idx].ds_addr; 196 197 if (idx == 0) 198 sc->desc_ring[idx].des0 |= DES0_FS; 199 200 if (idx == (nsegs - 1)) { 201 sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH); 202 sc->desc_ring[idx].des0 |= DES0_LD; 203 } 204 wmb(); 205 sc->desc_ring[idx].des0 |= DES0_OWN; 206 } 207 } 208 209 static int 210 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits) 211 { 212 int reg; 213 int i; 214 215 reg = READ4(sc, SDMMC_CTRL); 216 reg |= (reset_bits); 217 WRITE4(sc, SDMMC_CTRL, reg); 218 219 /* Wait reset done */ 220 for (i = 0; i < 100; i++) { 221 if (!(READ4(sc, SDMMC_CTRL) & reset_bits)) 222 return (0); 223 DELAY(10); 224 } 225 226 device_printf(sc->dev, "Reset failed\n"); 227 228 return (1); 229 } 230 231 static int 232 dma_setup(struct dwmmc_softc *sc) 233 { 234 int error; 235 int nidx; 236 int idx; 237 238 /* 239 * Set up TX descriptor ring, descriptors, and dma maps. 240 */ 241 error = bus_dma_tag_create( 242 bus_get_dma_tag(sc->dev), /* Parent tag. */ 243 4096, 0, /* alignment, boundary */ 244 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 245 BUS_SPACE_MAXADDR, /* highaddr */ 246 NULL, NULL, /* filter, filterarg */ 247 IDMAC_DESC_SIZE, 1, /* maxsize, nsegments */ 248 IDMAC_DESC_SIZE, /* maxsegsize */ 249 0, /* flags */ 250 NULL, NULL, /* lockfunc, lockarg */ 251 &sc->desc_tag); 252 if (error != 0) { 253 device_printf(sc->dev, 254 "could not create ring DMA tag.\n"); 255 return (1); 256 } 257 258 error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring, 259 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, 260 &sc->desc_map); 261 if (error != 0) { 262 device_printf(sc->dev, 263 "could not allocate descriptor ring.\n"); 264 return (1); 265 } 266 267 error = bus_dmamap_load(sc->desc_tag, sc->desc_map, 268 sc->desc_ring, IDMAC_DESC_SIZE, dwmmc_get1paddr, 269 &sc->desc_ring_paddr, 0); 270 if (error != 0) { 271 device_printf(sc->dev, 272 "could not load descriptor ring map.\n"); 273 return (1); 274 } 275 276 for (idx = 0; idx < IDMAC_DESC_SEGS; idx++) { 277 sc->desc_ring[idx].des0 = DES0_CH; 278 sc->desc_ring[idx].des1 = 0; 279 nidx = (idx + 1) % IDMAC_DESC_SEGS; 280 sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \ 281 (nidx * sizeof(struct idmac_desc)); 282 } 283 sc->desc_ring[idx - 1].des3 = sc->desc_ring_paddr; 284 sc->desc_ring[idx - 1].des0 |= DES0_ER; 285 286 error = bus_dma_tag_create( 287 bus_get_dma_tag(sc->dev), /* Parent tag. */ 288 8, 0, /* alignment, boundary */ 289 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 290 BUS_SPACE_MAXADDR, /* highaddr */ 291 NULL, NULL, /* filter, filterarg */ 292 IDMAC_MAX_SIZE * IDMAC_DESC_SEGS, /* maxsize */ 293 IDMAC_DESC_SEGS, /* nsegments */ 294 IDMAC_MAX_SIZE, /* maxsegsize */ 295 0, /* flags */ 296 NULL, NULL, /* lockfunc, lockarg */ 297 &sc->buf_tag); 298 if (error != 0) { 299 device_printf(sc->dev, 300 "could not create ring DMA tag.\n"); 301 return (1); 302 } 303 304 error = bus_dmamap_create(sc->buf_tag, 0, 305 &sc->buf_map); 306 if (error != 0) { 307 device_printf(sc->dev, 308 "could not create TX buffer DMA map.\n"); 309 return (1); 310 } 311 312 return (0); 313 } 314 315 static void 316 dwmmc_cmd_done(struct dwmmc_softc *sc) 317 { 318 struct mmc_command *cmd; 319 #ifdef MMCCAM 320 union ccb *ccb; 321 #endif 322 323 #ifdef MMCCAM 324 ccb = sc->ccb; 325 if (ccb == NULL) 326 return; 327 cmd = &ccb->mmcio.cmd; 328 #else 329 cmd = sc->curcmd; 330 #endif 331 if (cmd == NULL) 332 return; 333 334 if (cmd->flags & MMC_RSP_PRESENT) { 335 if (cmd->flags & MMC_RSP_136) { 336 cmd->resp[3] = READ4(sc, SDMMC_RESP0); 337 cmd->resp[2] = READ4(sc, SDMMC_RESP1); 338 cmd->resp[1] = READ4(sc, SDMMC_RESP2); 339 cmd->resp[0] = READ4(sc, SDMMC_RESP3); 340 } else { 341 cmd->resp[3] = 0; 342 cmd->resp[2] = 0; 343 cmd->resp[1] = 0; 344 cmd->resp[0] = READ4(sc, SDMMC_RESP0); 345 } 346 } 347 } 348 349 static void 350 dwmmc_tasklet(struct dwmmc_softc *sc) 351 { 352 struct mmc_command *cmd; 353 354 cmd = sc->curcmd; 355 if (cmd == NULL) 356 return; 357 358 if (!sc->cmd_done) 359 return; 360 361 if (cmd->error != MMC_ERR_NONE || !cmd->data) { 362 dwmmc_next_operation(sc); 363 } else if (cmd->data && sc->dto_rcvd) { 364 if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || 365 cmd->opcode == MMC_READ_MULTIPLE_BLOCK) && 366 sc->use_auto_stop) { 367 if (sc->acd_rcvd) 368 dwmmc_next_operation(sc); 369 } else { 370 dwmmc_next_operation(sc); 371 } 372 } 373 } 374 375 static void 376 dwmmc_intr(void *arg) 377 { 378 struct mmc_command *cmd; 379 struct dwmmc_softc *sc; 380 uint32_t reg; 381 382 sc = arg; 383 384 DWMMC_LOCK(sc); 385 386 cmd = sc->curcmd; 387 388 /* First handle SDMMC controller interrupts */ 389 reg = READ4(sc, SDMMC_MINTSTS); 390 if (reg) { 391 dprintf("%s 0x%08x\n", __func__, reg); 392 393 if (reg & DWMMC_CMD_ERR_FLAGS) { 394 dprintf("cmd err 0x%08x cmd 0x%08x\n", 395 reg, cmd->opcode); 396 cmd->error = MMC_ERR_TIMEOUT; 397 } 398 399 if (reg & DWMMC_DATA_ERR_FLAGS) { 400 dprintf("data err 0x%08x cmd 0x%08x\n", 401 reg, cmd->opcode); 402 cmd->error = MMC_ERR_FAILED; 403 if (!sc->use_pio) { 404 dma_done(sc, cmd); 405 dma_stop(sc); 406 } 407 } 408 409 if (reg & SDMMC_INTMASK_CMD_DONE) { 410 dwmmc_cmd_done(sc); 411 sc->cmd_done = 1; 412 } 413 414 if (reg & SDMMC_INTMASK_ACD) 415 sc->acd_rcvd = 1; 416 417 if (reg & SDMMC_INTMASK_DTO) 418 sc->dto_rcvd = 1; 419 420 if (reg & SDMMC_INTMASK_CD) { 421 dwmmc_handle_card_present(sc, 422 READ4(sc, SDMMC_CDETECT) == 0 ? true : false); 423 } 424 } 425 426 /* Ack interrupts */ 427 WRITE4(sc, SDMMC_RINTSTS, reg); 428 429 if (sc->use_pio) { 430 if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) { 431 pio_read(sc, cmd); 432 } 433 if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) { 434 pio_write(sc, cmd); 435 } 436 } else { 437 /* Now handle DMA interrupts */ 438 reg = READ4(sc, SDMMC_IDSTS); 439 if (reg) { 440 dprintf("dma intr 0x%08x\n", reg); 441 if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) { 442 WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI | 443 SDMMC_IDINTEN_RI)); 444 WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI); 445 dma_done(sc, cmd); 446 } 447 } 448 } 449 450 dwmmc_tasklet(sc); 451 452 DWMMC_UNLOCK(sc); 453 } 454 455 static void 456 dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present) 457 { 458 bool was_present; 459 460 if (dumping || SCHEDULER_STOPPED()) 461 return; 462 463 was_present = sc->child != NULL; 464 465 if (!was_present && is_present) { 466 taskqueue_enqueue_timeout(taskqueue_swi_giant, 467 &sc->card_delayed_task, -(hz / 2)); 468 } else if (was_present && !is_present) { 469 taskqueue_enqueue(taskqueue_swi_giant, &sc->card_task); 470 } 471 } 472 473 static void 474 dwmmc_card_task(void *arg, int pending __unused) 475 { 476 struct dwmmc_softc *sc = arg; 477 478 #ifdef MMCCAM 479 mmc_cam_sim_discover(&sc->mmc_sim); 480 #else 481 DWMMC_LOCK(sc); 482 483 if (READ4(sc, SDMMC_CDETECT) == 0 || 484 (sc->mmc_helper.props & MMC_PROP_BROKEN_CD)) { 485 if (sc->child == NULL) { 486 if (bootverbose) 487 device_printf(sc->dev, "Card inserted\n"); 488 489 sc->child = device_add_child(sc->dev, "mmc", -1); 490 DWMMC_UNLOCK(sc); 491 if (sc->child) { 492 device_set_ivars(sc->child, sc); 493 (void)device_probe_and_attach(sc->child); 494 } 495 } else 496 DWMMC_UNLOCK(sc); 497 } else { 498 /* Card isn't present, detach if necessary */ 499 if (sc->child != NULL) { 500 if (bootverbose) 501 device_printf(sc->dev, "Card removed\n"); 502 503 DWMMC_UNLOCK(sc); 504 device_delete_child(sc->dev, sc->child); 505 sc->child = NULL; 506 } else 507 DWMMC_UNLOCK(sc); 508 } 509 #endif /* MMCCAM */ 510 } 511 512 static int 513 parse_fdt(struct dwmmc_softc *sc) 514 { 515 pcell_t dts_value[3]; 516 phandle_t node; 517 uint32_t bus_hz = 0; 518 int len; 519 int error; 520 521 if ((node = ofw_bus_get_node(sc->dev)) == -1) 522 return (ENXIO); 523 524 /* Set some defaults for freq and supported mode */ 525 sc->host.f_min = 400000; 526 sc->host.f_max = 200000000; 527 sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; 528 sc->host.caps = MMC_CAP_HSPEED | MMC_CAP_SIGNALING_330; 529 mmc_fdt_parse(sc->dev, node, &sc->mmc_helper, &sc->host); 530 531 /* fifo-depth */ 532 if ((len = OF_getproplen(node, "fifo-depth")) > 0) { 533 OF_getencprop(node, "fifo-depth", dts_value, len); 534 sc->fifo_depth = dts_value[0]; 535 } 536 537 /* num-slots (Deprecated) */ 538 sc->num_slots = 1; 539 if ((len = OF_getproplen(node, "num-slots")) > 0) { 540 device_printf(sc->dev, "num-slots property is deprecated\n"); 541 OF_getencprop(node, "num-slots", dts_value, len); 542 sc->num_slots = dts_value[0]; 543 } 544 545 /* clock-frequency */ 546 if ((len = OF_getproplen(node, "clock-frequency")) > 0) { 547 OF_getencprop(node, "clock-frequency", dts_value, len); 548 bus_hz = dts_value[0]; 549 } 550 551 /* IP block reset is optional */ 552 error = hwreset_get_by_ofw_name(sc->dev, 0, "reset", &sc->hwreset); 553 if (error != 0 && 554 error != ENOENT && 555 error != ENODEV) { 556 device_printf(sc->dev, "Cannot get reset\n"); 557 goto fail; 558 } 559 560 /* vmmc regulator is optional */ 561 error = regulator_get_by_ofw_property(sc->dev, 0, "vmmc-supply", 562 &sc->vmmc); 563 if (error != 0 && 564 error != ENOENT && 565 error != ENODEV) { 566 device_printf(sc->dev, "Cannot get regulator 'vmmc-supply'\n"); 567 goto fail; 568 } 569 570 /* vqmmc regulator is optional */ 571 error = regulator_get_by_ofw_property(sc->dev, 0, "vqmmc-supply", 572 &sc->vqmmc); 573 if (error != 0 && 574 error != ENOENT && 575 error != ENODEV) { 576 device_printf(sc->dev, "Cannot get regulator 'vqmmc-supply'\n"); 577 goto fail; 578 } 579 580 /* Assert reset first */ 581 if (sc->hwreset != NULL) { 582 error = hwreset_assert(sc->hwreset); 583 if (error != 0) { 584 device_printf(sc->dev, "Cannot assert reset\n"); 585 goto fail; 586 } 587 } 588 589 /* BIU (Bus Interface Unit clock) is optional */ 590 error = clk_get_by_ofw_name(sc->dev, 0, "biu", &sc->biu); 591 if (error != 0 && 592 error != ENOENT && 593 error != ENODEV) { 594 device_printf(sc->dev, "Cannot get 'biu' clock\n"); 595 goto fail; 596 } 597 598 if (sc->biu) { 599 error = clk_enable(sc->biu); 600 if (error != 0) { 601 device_printf(sc->dev, "cannot enable biu clock\n"); 602 goto fail; 603 } 604 } 605 606 /* 607 * CIU (Controller Interface Unit clock) is mandatory 608 * if no clock-frequency property is given 609 */ 610 error = clk_get_by_ofw_name(sc->dev, 0, "ciu", &sc->ciu); 611 if (error != 0 && 612 error != ENOENT && 613 error != ENODEV) { 614 device_printf(sc->dev, "Cannot get 'ciu' clock\n"); 615 goto fail; 616 } 617 618 if (sc->ciu) { 619 if (bus_hz != 0) { 620 error = clk_set_freq(sc->ciu, bus_hz, 0); 621 if (error != 0) 622 device_printf(sc->dev, 623 "cannot set ciu clock to %u\n", bus_hz); 624 } 625 error = clk_enable(sc->ciu); 626 if (error != 0) { 627 device_printf(sc->dev, "cannot enable ciu clock\n"); 628 goto fail; 629 } 630 clk_get_freq(sc->ciu, &sc->bus_hz); 631 } 632 633 /* Enable regulators */ 634 if (sc->vmmc != NULL) { 635 error = regulator_enable(sc->vmmc); 636 if (error != 0) { 637 device_printf(sc->dev, "Cannot enable vmmc regulator\n"); 638 goto fail; 639 } 640 } 641 if (sc->vqmmc != NULL) { 642 error = regulator_enable(sc->vqmmc); 643 if (error != 0) { 644 device_printf(sc->dev, "Cannot enable vqmmc regulator\n"); 645 goto fail; 646 } 647 } 648 649 /* Take dwmmc out of reset */ 650 if (sc->hwreset != NULL) { 651 error = hwreset_deassert(sc->hwreset); 652 if (error != 0) { 653 device_printf(sc->dev, "Cannot deassert reset\n"); 654 goto fail; 655 } 656 } 657 658 if (sc->bus_hz == 0) { 659 device_printf(sc->dev, "No bus speed provided\n"); 660 goto fail; 661 } 662 663 return (0); 664 665 fail: 666 return (ENXIO); 667 } 668 669 int 670 dwmmc_attach(device_t dev) 671 { 672 struct dwmmc_softc *sc; 673 int error; 674 675 sc = device_get_softc(dev); 676 677 sc->dev = dev; 678 679 /* Why not to use Auto Stop? It save a hundred of irq per second */ 680 sc->use_auto_stop = 1; 681 682 error = parse_fdt(sc); 683 if (error != 0) { 684 device_printf(dev, "Can't get FDT property.\n"); 685 return (ENXIO); 686 } 687 688 DWMMC_LOCK_INIT(sc); 689 690 if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) { 691 device_printf(dev, "could not allocate resources\n"); 692 return (ENXIO); 693 } 694 695 /* Setup interrupt handler. */ 696 error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE, 697 NULL, dwmmc_intr, sc, &sc->intr_cookie); 698 if (error != 0) { 699 device_printf(dev, "could not setup interrupt handler.\n"); 700 return (ENXIO); 701 } 702 703 device_printf(dev, "Hardware version ID is %04x\n", 704 READ4(sc, SDMMC_VERID) & 0xffff); 705 706 /* Reset all */ 707 if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET | 708 SDMMC_CTRL_FIFO_RESET | 709 SDMMC_CTRL_DMA_RESET))) 710 return (ENXIO); 711 712 dwmmc_setup_bus(sc, sc->host.f_min); 713 714 if (sc->fifo_depth == 0) { 715 sc->fifo_depth = 1 + 716 ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff); 717 device_printf(dev, "No fifo-depth, using FIFOTH %x\n", 718 sc->fifo_depth); 719 } 720 721 if (!sc->use_pio) { 722 dma_stop(sc); 723 if (dma_setup(sc)) 724 return (ENXIO); 725 726 /* Install desc base */ 727 WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr); 728 729 /* Enable DMA interrupts */ 730 WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK); 731 WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI | 732 SDMMC_IDINTEN_RI | 733 SDMMC_IDINTEN_TI)); 734 } 735 736 /* Clear and disable interrups for a while */ 737 WRITE4(sc, SDMMC_RINTSTS, 0xffffffff); 738 WRITE4(sc, SDMMC_INTMASK, 0); 739 740 /* Maximum timeout */ 741 WRITE4(sc, SDMMC_TMOUT, 0xffffffff); 742 743 /* Enable interrupts */ 744 WRITE4(sc, SDMMC_RINTSTS, 0xffffffff); 745 WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE | 746 SDMMC_INTMASK_DTO | 747 SDMMC_INTMASK_ACD | 748 SDMMC_INTMASK_TXDR | 749 SDMMC_INTMASK_RXDR | 750 DWMMC_ERR_FLAGS | 751 SDMMC_INTMASK_CD)); 752 WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE); 753 754 TASK_INIT(&sc->card_task, 0, dwmmc_card_task, sc); 755 TIMEOUT_TASK_INIT(taskqueue_swi_giant, &sc->card_delayed_task, 0, 756 dwmmc_card_task, sc); 757 758 #ifdef MMCCAM 759 sc->ccb = NULL; 760 if (mmc_cam_sim_alloc(dev, "dw_mmc", &sc->mmc_sim) != 0) { 761 device_printf(dev, "cannot alloc cam sim\n"); 762 dwmmc_detach(dev); 763 return (ENXIO); 764 } 765 #endif 766 /* 767 * Schedule a card detection as we won't get an interrupt 768 * if the card is inserted when we attach 769 */ 770 dwmmc_card_task(sc, 0); 771 return (0); 772 } 773 774 int 775 dwmmc_detach(device_t dev) 776 { 777 struct dwmmc_softc *sc; 778 int ret; 779 780 sc = device_get_softc(dev); 781 782 ret = device_delete_children(dev); 783 if (ret != 0) 784 return (ret); 785 786 taskqueue_drain(taskqueue_swi_giant, &sc->card_task); 787 taskqueue_drain_timeout(taskqueue_swi_giant, &sc->card_delayed_task); 788 789 if (sc->intr_cookie != NULL) { 790 ret = bus_teardown_intr(dev, sc->res[1], sc->intr_cookie); 791 if (ret != 0) 792 return (ret); 793 } 794 bus_release_resources(dev, dwmmc_spec, sc->res); 795 796 DWMMC_LOCK_DESTROY(sc); 797 798 if (sc->hwreset != NULL && hwreset_deassert(sc->hwreset) != 0) 799 device_printf(sc->dev, "cannot deassert reset\n"); 800 if (sc->biu != NULL && clk_disable(sc->biu) != 0) 801 device_printf(sc->dev, "cannot disable biu clock\n"); 802 if (sc->ciu != NULL && clk_disable(sc->ciu) != 0) 803 device_printf(sc->dev, "cannot disable ciu clock\n"); 804 805 if (sc->vmmc && regulator_disable(sc->vmmc) != 0) 806 device_printf(sc->dev, "Cannot disable vmmc regulator\n"); 807 if (sc->vqmmc && regulator_disable(sc->vqmmc) != 0) 808 device_printf(sc->dev, "Cannot disable vqmmc regulator\n"); 809 810 #ifdef MMCCAM 811 mmc_cam_sim_free(&sc->mmc_sim); 812 #endif 813 814 return (0); 815 } 816 817 static int 818 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq) 819 { 820 int tout; 821 int div; 822 823 if (freq == 0) { 824 WRITE4(sc, SDMMC_CLKENA, 0); 825 WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA | 826 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START)); 827 828 tout = 1000; 829 do { 830 if (tout-- < 0) { 831 device_printf(sc->dev, "Failed update clk\n"); 832 return (1); 833 } 834 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); 835 836 return (0); 837 } 838 839 WRITE4(sc, SDMMC_CLKENA, 0); 840 WRITE4(sc, SDMMC_CLKSRC, 0); 841 842 div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0; 843 844 WRITE4(sc, SDMMC_CLKDIV, div); 845 WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA | 846 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START)); 847 848 tout = 1000; 849 do { 850 if (tout-- < 0) { 851 device_printf(sc->dev, "Failed to update clk\n"); 852 return (1); 853 } 854 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); 855 856 WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP)); 857 WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA | 858 SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START); 859 860 tout = 1000; 861 do { 862 if (tout-- < 0) { 863 device_printf(sc->dev, "Failed to enable clk\n"); 864 return (1); 865 } 866 } while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START); 867 868 return (0); 869 } 870 871 static int 872 dwmmc_update_ios(device_t brdev, device_t reqdev) 873 { 874 struct dwmmc_softc *sc; 875 struct mmc_ios *ios; 876 uint32_t reg; 877 int ret = 0; 878 879 sc = device_get_softc(brdev); 880 ios = &sc->host.ios; 881 882 dprintf("Setting up clk %u bus_width %d, timming: %d\n", 883 ios->clock, ios->bus_width, ios->timing); 884 885 switch (ios->power_mode) { 886 case power_on: 887 break; 888 case power_off: 889 WRITE4(sc, SDMMC_PWREN, 0); 890 break; 891 case power_up: 892 WRITE4(sc, SDMMC_PWREN, 1); 893 break; 894 } 895 896 mmc_fdt_set_power(&sc->mmc_helper, ios->power_mode); 897 898 if (ios->bus_width == bus_width_8) 899 WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT); 900 else if (ios->bus_width == bus_width_4) 901 WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT); 902 else 903 WRITE4(sc, SDMMC_CTYPE, 0); 904 905 if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) { 906 /* XXX: take care about DDR or SDR use here */ 907 WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing); 908 } 909 910 /* Set DDR mode */ 911 reg = READ4(sc, SDMMC_UHS_REG); 912 if (ios->timing == bus_timing_uhs_ddr50 || 913 ios->timing == bus_timing_mmc_ddr52 || 914 ios->timing == bus_timing_mmc_hs400) 915 reg |= (SDMMC_UHS_REG_DDR); 916 else 917 reg &= ~(SDMMC_UHS_REG_DDR); 918 WRITE4(sc, SDMMC_UHS_REG, reg); 919 920 if (sc->update_ios) 921 ret = sc->update_ios(sc, ios); 922 923 dwmmc_setup_bus(sc, ios->clock); 924 925 return (ret); 926 } 927 928 static int 929 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd) 930 { 931 struct mmc_data *data; 932 933 data = cmd->data; 934 935 if (data->flags & MMC_DATA_WRITE) 936 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 937 BUS_DMASYNC_POSTWRITE); 938 else 939 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 940 BUS_DMASYNC_POSTREAD); 941 942 bus_dmamap_sync(sc->desc_tag, sc->desc_map, 943 BUS_DMASYNC_POSTWRITE); 944 945 bus_dmamap_unload(sc->buf_tag, sc->buf_map); 946 947 return (0); 948 } 949 950 static int 951 dma_stop(struct dwmmc_softc *sc) 952 { 953 int reg; 954 955 reg = READ4(sc, SDMMC_CTRL); 956 reg &= ~(SDMMC_CTRL_USE_IDMAC); 957 reg |= (SDMMC_CTRL_DMA_RESET); 958 WRITE4(sc, SDMMC_CTRL, reg); 959 960 reg = READ4(sc, SDMMC_BMOD); 961 reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB); 962 reg |= (SDMMC_BMOD_SWR); 963 WRITE4(sc, SDMMC_BMOD, reg); 964 965 return (0); 966 } 967 968 static int 969 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd) 970 { 971 struct mmc_data *data; 972 int err; 973 int reg; 974 975 data = cmd->data; 976 977 reg = READ4(sc, SDMMC_INTMASK); 978 reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR); 979 WRITE4(sc, SDMMC_INTMASK, reg); 980 dprintf("%s: bus_dmamap_load size: %zu\n", __func__, data->len); 981 err = bus_dmamap_load(sc->buf_tag, sc->buf_map, 982 data->data, data->len, dwmmc_ring_setup, 983 sc, BUS_DMA_NOWAIT); 984 if (err != 0) 985 panic("dmamap_load failed\n"); 986 987 /* Ensure the device can see the desc */ 988 bus_dmamap_sync(sc->desc_tag, sc->desc_map, 989 BUS_DMASYNC_PREWRITE); 990 991 if (data->flags & MMC_DATA_WRITE) 992 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 993 BUS_DMASYNC_PREWRITE); 994 else 995 bus_dmamap_sync(sc->buf_tag, sc->buf_map, 996 BUS_DMASYNC_PREREAD); 997 998 reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S); 999 reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S; 1000 reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S; 1001 1002 WRITE4(sc, SDMMC_FIFOTH, reg); 1003 wmb(); 1004 1005 reg = READ4(sc, SDMMC_CTRL); 1006 reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE); 1007 WRITE4(sc, SDMMC_CTRL, reg); 1008 wmb(); 1009 1010 reg = READ4(sc, SDMMC_BMOD); 1011 reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB); 1012 WRITE4(sc, SDMMC_BMOD, reg); 1013 1014 /* Start */ 1015 WRITE4(sc, SDMMC_PLDMND, 1); 1016 1017 return (0); 1018 } 1019 1020 static int 1021 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd) 1022 { 1023 struct mmc_data *data; 1024 int reg; 1025 1026 data = cmd->data; 1027 data->xfer_len = 0; 1028 1029 reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S); 1030 reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S; 1031 reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S; 1032 1033 WRITE4(sc, SDMMC_FIFOTH, reg); 1034 wmb(); 1035 1036 return (0); 1037 } 1038 1039 static void 1040 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd) 1041 { 1042 struct mmc_data *data; 1043 uint32_t *p, status; 1044 1045 if (cmd == NULL || cmd->data == NULL) 1046 return; 1047 1048 data = cmd->data; 1049 if ((data->flags & MMC_DATA_READ) == 0) 1050 return; 1051 1052 KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned")); 1053 p = (uint32_t *)data->data + (data->xfer_len >> 2); 1054 1055 while (data->xfer_len < data->len) { 1056 status = READ4(sc, SDMMC_STATUS); 1057 if (status & SDMMC_STATUS_FIFO_EMPTY) 1058 break; 1059 *p++ = READ4(sc, SDMMC_DATA); 1060 data->xfer_len += 4; 1061 } 1062 1063 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR); 1064 } 1065 1066 static void 1067 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd) 1068 { 1069 struct mmc_data *data; 1070 uint32_t *p, status; 1071 1072 if (cmd == NULL || cmd->data == NULL) 1073 return; 1074 1075 data = cmd->data; 1076 if ((data->flags & MMC_DATA_WRITE) == 0) 1077 return; 1078 1079 KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned")); 1080 p = (uint32_t *)data->data + (data->xfer_len >> 2); 1081 1082 while (data->xfer_len < data->len) { 1083 status = READ4(sc, SDMMC_STATUS); 1084 if (status & SDMMC_STATUS_FIFO_FULL) 1085 break; 1086 WRITE4(sc, SDMMC_DATA, *p++); 1087 data->xfer_len += 4; 1088 } 1089 1090 WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR); 1091 } 1092 1093 static void 1094 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd) 1095 { 1096 struct mmc_data *data; 1097 uint32_t blksz; 1098 uint32_t cmdr; 1099 1100 dprintf("%s\n", __func__); 1101 sc->curcmd = cmd; 1102 data = cmd->data; 1103 1104 #ifndef MMCCAM 1105 /* XXX Upper layers don't always set this */ 1106 cmd->mrq = sc->req; 1107 #endif 1108 /* Begin setting up command register. */ 1109 1110 cmdr = cmd->opcode; 1111 1112 dprintf("cmd->opcode 0x%08x\n", cmd->opcode); 1113 1114 if (cmd->opcode == MMC_STOP_TRANSMISSION || 1115 cmd->opcode == MMC_GO_IDLE_STATE || 1116 cmd->opcode == MMC_GO_INACTIVE_STATE) 1117 cmdr |= SDMMC_CMD_STOP_ABORT; 1118 else if (cmd->opcode != MMC_SEND_STATUS && data) 1119 cmdr |= SDMMC_CMD_WAIT_PRVDATA; 1120 1121 /* Set up response handling. */ 1122 if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) { 1123 cmdr |= SDMMC_CMD_RESP_EXP; 1124 if (cmd->flags & MMC_RSP_136) 1125 cmdr |= SDMMC_CMD_RESP_LONG; 1126 } 1127 1128 if (cmd->flags & MMC_RSP_CRC) 1129 cmdr |= SDMMC_CMD_RESP_CRC; 1130 1131 /* 1132 * XXX: Not all platforms want this. 1133 */ 1134 cmdr |= SDMMC_CMD_USE_HOLD_REG; 1135 1136 if ((sc->flags & CARD_INIT_DONE) == 0) { 1137 sc->flags |= (CARD_INIT_DONE); 1138 cmdr |= SDMMC_CMD_SEND_INIT; 1139 } 1140 1141 if (data) { 1142 if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || 1143 cmd->opcode == MMC_READ_MULTIPLE_BLOCK) && 1144 sc->use_auto_stop) 1145 cmdr |= SDMMC_CMD_SEND_ASTOP; 1146 1147 cmdr |= SDMMC_CMD_DATA_EXP; 1148 if (data->flags & MMC_DATA_STREAM) 1149 cmdr |= SDMMC_CMD_MODE_STREAM; 1150 if (data->flags & MMC_DATA_WRITE) 1151 cmdr |= SDMMC_CMD_DATA_WRITE; 1152 1153 WRITE4(sc, SDMMC_TMOUT, 0xffffffff); 1154 #ifdef MMCCAM 1155 if (cmd->data->flags & MMC_DATA_BLOCK_SIZE) { 1156 WRITE4(sc, SDMMC_BLKSIZ, cmd->data->block_size); 1157 WRITE4(sc, SDMMC_BYTCNT, cmd->data->len); 1158 } else 1159 #endif 1160 { 1161 WRITE4(sc, SDMMC_BYTCNT, data->len); 1162 blksz = (data->len < MMC_SECTOR_SIZE) ? \ 1163 data->len : MMC_SECTOR_SIZE; 1164 WRITE4(sc, SDMMC_BLKSIZ, blksz); 1165 } 1166 1167 if (sc->use_pio) { 1168 pio_prepare(sc, cmd); 1169 } else { 1170 dma_prepare(sc, cmd); 1171 } 1172 wmb(); 1173 } 1174 1175 dprintf("cmdr 0x%08x\n", cmdr); 1176 1177 WRITE4(sc, SDMMC_CMDARG, cmd->arg); 1178 wmb(); 1179 WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START); 1180 }; 1181 1182 static void 1183 dwmmc_next_operation(struct dwmmc_softc *sc) 1184 { 1185 struct mmc_command *cmd; 1186 dprintf("%s\n", __func__); 1187 #ifdef MMCCAM 1188 union ccb *ccb; 1189 1190 ccb = sc->ccb; 1191 if (ccb == NULL) 1192 return; 1193 cmd = &ccb->mmcio.cmd; 1194 #else 1195 struct mmc_request *req; 1196 1197 req = sc->req; 1198 if (req == NULL) 1199 return; 1200 cmd = req->cmd; 1201 #endif 1202 1203 sc->acd_rcvd = 0; 1204 sc->dto_rcvd = 0; 1205 sc->cmd_done = 0; 1206 1207 /* 1208 * XXX: Wait until card is still busy. 1209 * We do need this to prevent data timeouts, 1210 * mostly caused by multi-block write command 1211 * followed by single-read. 1212 */ 1213 while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY)) 1214 continue; 1215 1216 if (sc->flags & PENDING_CMD) { 1217 sc->flags &= ~PENDING_CMD; 1218 dwmmc_start_cmd(sc, cmd); 1219 return; 1220 } else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) { 1221 sc->flags &= ~PENDING_STOP; 1222 /// XXX: What to do with this? 1223 //dwmmc_start_cmd(sc, req->stop); 1224 return; 1225 } 1226 1227 #ifdef MMCCAM 1228 sc->ccb = NULL; 1229 sc->curcmd = NULL; 1230 ccb->ccb_h.status = 1231 (ccb->mmcio.cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR); 1232 xpt_done(ccb); 1233 #else 1234 sc->req = NULL; 1235 sc->curcmd = NULL; 1236 req->done(req); 1237 #endif 1238 } 1239 1240 static int 1241 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req) 1242 { 1243 struct dwmmc_softc *sc; 1244 1245 sc = device_get_softc(brdev); 1246 1247 dprintf("%s\n", __func__); 1248 1249 DWMMC_LOCK(sc); 1250 1251 #ifdef MMCCAM 1252 sc->flags |= PENDING_CMD; 1253 #else 1254 if (sc->req != NULL) { 1255 DWMMC_UNLOCK(sc); 1256 return (EBUSY); 1257 } 1258 1259 sc->req = req; 1260 sc->flags |= PENDING_CMD; 1261 if (sc->req->stop) 1262 sc->flags |= PENDING_STOP; 1263 #endif 1264 dwmmc_next_operation(sc); 1265 1266 DWMMC_UNLOCK(sc); 1267 return (0); 1268 } 1269 1270 #ifndef MMCCAM 1271 static int 1272 dwmmc_get_ro(device_t brdev, device_t reqdev) 1273 { 1274 1275 dprintf("%s\n", __func__); 1276 1277 return (0); 1278 } 1279 1280 static int 1281 dwmmc_acquire_host(device_t brdev, device_t reqdev) 1282 { 1283 struct dwmmc_softc *sc; 1284 1285 sc = device_get_softc(brdev); 1286 1287 DWMMC_LOCK(sc); 1288 while (sc->bus_busy) 1289 msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5); 1290 sc->bus_busy++; 1291 DWMMC_UNLOCK(sc); 1292 return (0); 1293 } 1294 1295 static int 1296 dwmmc_release_host(device_t brdev, device_t reqdev) 1297 { 1298 struct dwmmc_softc *sc; 1299 1300 sc = device_get_softc(brdev); 1301 1302 DWMMC_LOCK(sc); 1303 sc->bus_busy--; 1304 wakeup(sc); 1305 DWMMC_UNLOCK(sc); 1306 return (0); 1307 } 1308 #endif /* !MMCCAM */ 1309 1310 static int 1311 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) 1312 { 1313 struct dwmmc_softc *sc; 1314 1315 sc = device_get_softc(bus); 1316 1317 switch (which) { 1318 default: 1319 return (EINVAL); 1320 case MMCBR_IVAR_BUS_MODE: 1321 *(int *)result = sc->host.ios.bus_mode; 1322 break; 1323 case MMCBR_IVAR_BUS_WIDTH: 1324 *(int *)result = sc->host.ios.bus_width; 1325 break; 1326 case MMCBR_IVAR_CHIP_SELECT: 1327 *(int *)result = sc->host.ios.chip_select; 1328 break; 1329 case MMCBR_IVAR_CLOCK: 1330 *(int *)result = sc->host.ios.clock; 1331 break; 1332 case MMCBR_IVAR_F_MIN: 1333 *(int *)result = sc->host.f_min; 1334 break; 1335 case MMCBR_IVAR_F_MAX: 1336 *(int *)result = sc->host.f_max; 1337 break; 1338 case MMCBR_IVAR_HOST_OCR: 1339 *(int *)result = sc->host.host_ocr; 1340 break; 1341 case MMCBR_IVAR_MODE: 1342 *(int *)result = sc->host.mode; 1343 break; 1344 case MMCBR_IVAR_OCR: 1345 *(int *)result = sc->host.ocr; 1346 break; 1347 case MMCBR_IVAR_POWER_MODE: 1348 *(int *)result = sc->host.ios.power_mode; 1349 break; 1350 case MMCBR_IVAR_VDD: 1351 *(int *)result = sc->host.ios.vdd; 1352 break; 1353 case MMCBR_IVAR_VCCQ: 1354 *(int *)result = sc->host.ios.vccq; 1355 break; 1356 case MMCBR_IVAR_CAPS: 1357 *(int *)result = sc->host.caps; 1358 break; 1359 case MMCBR_IVAR_MAX_DATA: 1360 *(int *)result = DWMMC_MAX_DATA; 1361 break; 1362 case MMCBR_IVAR_TIMING: 1363 *(int *)result = sc->host.ios.timing; 1364 break; 1365 } 1366 return (0); 1367 } 1368 1369 static int 1370 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value) 1371 { 1372 struct dwmmc_softc *sc; 1373 1374 sc = device_get_softc(bus); 1375 1376 switch (which) { 1377 default: 1378 return (EINVAL); 1379 case MMCBR_IVAR_BUS_MODE: 1380 sc->host.ios.bus_mode = value; 1381 break; 1382 case MMCBR_IVAR_BUS_WIDTH: 1383 sc->host.ios.bus_width = value; 1384 break; 1385 case MMCBR_IVAR_CHIP_SELECT: 1386 sc->host.ios.chip_select = value; 1387 break; 1388 case MMCBR_IVAR_CLOCK: 1389 sc->host.ios.clock = value; 1390 break; 1391 case MMCBR_IVAR_MODE: 1392 sc->host.mode = value; 1393 break; 1394 case MMCBR_IVAR_OCR: 1395 sc->host.ocr = value; 1396 break; 1397 case MMCBR_IVAR_POWER_MODE: 1398 sc->host.ios.power_mode = value; 1399 break; 1400 case MMCBR_IVAR_VDD: 1401 sc->host.ios.vdd = value; 1402 break; 1403 case MMCBR_IVAR_TIMING: 1404 sc->host.ios.timing = value; 1405 break; 1406 case MMCBR_IVAR_VCCQ: 1407 sc->host.ios.vccq = value; 1408 break; 1409 /* These are read-only */ 1410 case MMCBR_IVAR_CAPS: 1411 case MMCBR_IVAR_HOST_OCR: 1412 case MMCBR_IVAR_F_MIN: 1413 case MMCBR_IVAR_F_MAX: 1414 case MMCBR_IVAR_MAX_DATA: 1415 return (EINVAL); 1416 } 1417 return (0); 1418 } 1419 1420 #ifdef MMCCAM 1421 /* Note: this function likely belongs to the specific driver impl */ 1422 static int 1423 dwmmc_switch_vccq(device_t dev, device_t child) 1424 { 1425 device_printf(dev, "This is a default impl of switch_vccq() that always fails\n"); 1426 return EINVAL; 1427 } 1428 1429 static int 1430 dwmmc_get_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts) 1431 { 1432 struct dwmmc_softc *sc; 1433 1434 sc = device_get_softc(dev); 1435 1436 cts->host_ocr = sc->host.host_ocr; 1437 cts->host_f_min = sc->host.f_min; 1438 cts->host_f_max = sc->host.f_max; 1439 cts->host_caps = sc->host.caps; 1440 cts->host_max_data = DWMMC_MAX_DATA; 1441 memcpy(&cts->ios, &sc->host.ios, sizeof(struct mmc_ios)); 1442 1443 return (0); 1444 } 1445 1446 static int 1447 dwmmc_set_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts) 1448 { 1449 struct dwmmc_softc *sc; 1450 struct mmc_ios *ios; 1451 struct mmc_ios *new_ios; 1452 int res; 1453 1454 sc = device_get_softc(dev); 1455 ios = &sc->host.ios; 1456 1457 new_ios = &cts->ios; 1458 1459 /* Update only requested fields */ 1460 if (cts->ios_valid & MMC_CLK) { 1461 ios->clock = new_ios->clock; 1462 if (bootverbose) 1463 device_printf(sc->dev, "Clock => %d\n", ios->clock); 1464 } 1465 if (cts->ios_valid & MMC_VDD) { 1466 ios->vdd = new_ios->vdd; 1467 if (bootverbose) 1468 device_printf(sc->dev, "VDD => %d\n", ios->vdd); 1469 } 1470 if (cts->ios_valid & MMC_CS) { 1471 ios->chip_select = new_ios->chip_select; 1472 if (bootverbose) 1473 device_printf(sc->dev, "CS => %d\n", ios->chip_select); 1474 } 1475 if (cts->ios_valid & MMC_BW) { 1476 ios->bus_width = new_ios->bus_width; 1477 if (bootverbose) 1478 device_printf(sc->dev, "Bus width => %d\n", ios->bus_width); 1479 } 1480 if (cts->ios_valid & MMC_PM) { 1481 ios->power_mode = new_ios->power_mode; 1482 if (bootverbose) 1483 device_printf(sc->dev, "Power mode => %d\n", ios->power_mode); 1484 } 1485 if (cts->ios_valid & MMC_BT) { 1486 ios->timing = new_ios->timing; 1487 if (bootverbose) 1488 device_printf(sc->dev, "Timing => %d\n", ios->timing); 1489 } 1490 if (cts->ios_valid & MMC_BM) { 1491 ios->bus_mode = new_ios->bus_mode; 1492 if (bootverbose) 1493 device_printf(sc->dev, "Bus mode => %d\n", ios->bus_mode); 1494 } 1495 if (cts->ios_valid & MMC_VCCQ) { 1496 ios->vccq = new_ios->vccq; 1497 if (bootverbose) 1498 device_printf(sc->dev, "VCCQ => %d\n", ios->vccq); 1499 res = dwmmc_switch_vccq(sc->dev, NULL); 1500 device_printf(sc->dev, "VCCQ switch result: %d\n", res); 1501 } 1502 1503 return (dwmmc_update_ios(sc->dev, NULL)); 1504 } 1505 1506 static int 1507 dwmmc_cam_request(device_t dev, union ccb *ccb) 1508 { 1509 struct dwmmc_softc *sc; 1510 struct ccb_mmcio *mmcio; 1511 1512 sc = device_get_softc(dev); 1513 mmcio = &ccb->mmcio; 1514 1515 DWMMC_LOCK(sc); 1516 1517 #ifdef DEBUG 1518 if (__predict_false(bootverbose)) { 1519 device_printf(sc->dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n", 1520 mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags, 1521 mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0, 1522 mmcio->cmd.data != NULL ? mmcio->cmd.data->flags: 0); 1523 } 1524 #endif 1525 if (mmcio->cmd.data != NULL) { 1526 if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0) 1527 panic("data->len = %d, data->flags = %d -- something is b0rked", 1528 (int)mmcio->cmd.data->len, mmcio->cmd.data->flags); 1529 } 1530 if (sc->ccb != NULL) { 1531 device_printf(sc->dev, "Controller still has an active command\n"); 1532 return (EBUSY); 1533 } 1534 sc->ccb = ccb; 1535 DWMMC_UNLOCK(sc); 1536 dwmmc_request(sc->dev, NULL, NULL); 1537 1538 return (0); 1539 } 1540 1541 static void 1542 dwmmc_cam_poll(device_t dev) 1543 { 1544 struct dwmmc_softc *sc; 1545 1546 sc = device_get_softc(dev); 1547 dwmmc_intr(sc); 1548 } 1549 #endif /* MMCCAM */ 1550 1551 static device_method_t dwmmc_methods[] = { 1552 /* Bus interface */ 1553 DEVMETHOD(bus_read_ivar, dwmmc_read_ivar), 1554 DEVMETHOD(bus_write_ivar, dwmmc_write_ivar), 1555 1556 #ifndef MMCCAM 1557 /* mmcbr_if */ 1558 DEVMETHOD(mmcbr_update_ios, dwmmc_update_ios), 1559 DEVMETHOD(mmcbr_request, dwmmc_request), 1560 DEVMETHOD(mmcbr_get_ro, dwmmc_get_ro), 1561 DEVMETHOD(mmcbr_acquire_host, dwmmc_acquire_host), 1562 DEVMETHOD(mmcbr_release_host, dwmmc_release_host), 1563 #endif 1564 1565 #ifdef MMCCAM 1566 /* MMCCAM interface */ 1567 DEVMETHOD(mmc_sim_get_tran_settings, dwmmc_get_tran_settings), 1568 DEVMETHOD(mmc_sim_set_tran_settings, dwmmc_set_tran_settings), 1569 DEVMETHOD(mmc_sim_cam_request, dwmmc_cam_request), 1570 DEVMETHOD(mmc_sim_cam_poll, dwmmc_cam_poll), 1571 1572 DEVMETHOD(bus_add_child, bus_generic_add_child), 1573 #endif 1574 1575 DEVMETHOD_END 1576 }; 1577 1578 DEFINE_CLASS_0(dwmmc, dwmmc_driver, dwmmc_methods, 1579 sizeof(struct dwmmc_softc)); 1580