1 /*- 2 * Copyright (c) 2013 Alexander Fedorov 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/bus.h> 33 #include <sys/kernel.h> 34 #include <sys/lock.h> 35 #include <sys/malloc.h> 36 #include <sys/module.h> 37 #include <sys/mutex.h> 38 #include <sys/resource.h> 39 #include <sys/rman.h> 40 #include <sys/sysctl.h> 41 42 #include <machine/bus.h> 43 44 #include <dev/ofw/ofw_bus.h> 45 #include <dev/ofw/ofw_bus_subr.h> 46 47 #include <dev/mmc/bridge.h> 48 #include <dev/mmc/mmcbrvar.h> 49 50 #include <arm/allwinner/aw_mmc.h> 51 #include <dev/extres/clk/clk.h> 52 #include <dev/extres/hwreset/hwreset.h> 53 #include <dev/extres/regulator/regulator.h> 54 55 #define AW_MMC_MEMRES 0 56 #define AW_MMC_IRQRES 1 57 #define AW_MMC_RESSZ 2 58 #define AW_MMC_DMA_SEGS ((MAXPHYS / PAGE_SIZE) + 1) 59 #define AW_MMC_DMA_FTRGLEVEL 0x20070008 60 #define AW_MMC_RESET_RETRY 1000 61 62 #define CARD_ID_FREQUENCY 400000 63 64 struct aw_mmc_conf { 65 uint32_t dma_xferlen; 66 bool mask_data0; 67 bool can_calibrate; 68 bool new_timing; 69 }; 70 71 static const struct aw_mmc_conf a10_mmc_conf = { 72 .dma_xferlen = 0x2000, 73 }; 74 75 static const struct aw_mmc_conf a13_mmc_conf = { 76 .dma_xferlen = 0x10000, 77 }; 78 79 static const struct aw_mmc_conf a64_mmc_conf = { 80 .dma_xferlen = 0x10000, 81 .mask_data0 = true, 82 .can_calibrate = true, 83 .new_timing = true, 84 }; 85 86 static const struct aw_mmc_conf a64_emmc_conf = { 87 .dma_xferlen = 0x2000, 88 .can_calibrate = true, 89 }; 90 91 static struct ofw_compat_data compat_data[] = { 92 {"allwinner,sun4i-a10-mmc", (uintptr_t)&a10_mmc_conf}, 93 {"allwinner,sun5i-a13-mmc", (uintptr_t)&a13_mmc_conf}, 94 {"allwinner,sun7i-a20-mmc", (uintptr_t)&a13_mmc_conf}, 95 {"allwinner,sun50i-a64-mmc", (uintptr_t)&a64_mmc_conf}, 96 {"allwinner,sun50i-a64-emmc", (uintptr_t)&a64_emmc_conf}, 97 {NULL, 0} 98 }; 99 100 struct aw_mmc_softc { 101 device_t aw_dev; 102 clk_t aw_clk_ahb; 103 clk_t aw_clk_mmc; 104 hwreset_t aw_rst_ahb; 105 int aw_bus_busy; 106 int aw_resid; 107 int aw_timeout; 108 struct callout aw_timeoutc; 109 struct mmc_host aw_host; 110 struct mmc_request * aw_req; 111 struct mtx aw_mtx; 112 struct resource * aw_res[AW_MMC_RESSZ]; 113 struct aw_mmc_conf * aw_mmc_conf; 114 uint32_t aw_intr; 115 uint32_t aw_intr_wait; 116 void * aw_intrhand; 117 int32_t aw_vdd; 118 int32_t aw_vccq; 119 regulator_t aw_reg_vmmc; 120 regulator_t aw_reg_vqmmc; 121 unsigned int aw_clock; 122 123 /* Fields required for DMA access. */ 124 bus_addr_t aw_dma_desc_phys; 125 bus_dmamap_t aw_dma_map; 126 bus_dma_tag_t aw_dma_tag; 127 void * aw_dma_desc; 128 bus_dmamap_t aw_dma_buf_map; 129 bus_dma_tag_t aw_dma_buf_tag; 130 int aw_dma_map_err; 131 }; 132 133 static struct resource_spec aw_mmc_res_spec[] = { 134 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 135 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 136 { -1, 0, 0 } 137 }; 138 139 static int aw_mmc_probe(device_t); 140 static int aw_mmc_attach(device_t); 141 static int aw_mmc_detach(device_t); 142 static int aw_mmc_setup_dma(struct aw_mmc_softc *); 143 static int aw_mmc_reset(struct aw_mmc_softc *); 144 static int aw_mmc_init(struct aw_mmc_softc *); 145 static void aw_mmc_intr(void *); 146 static int aw_mmc_update_clock(struct aw_mmc_softc *, uint32_t); 147 148 static int aw_mmc_update_ios(device_t, device_t); 149 static int aw_mmc_request(device_t, device_t, struct mmc_request *); 150 static int aw_mmc_get_ro(device_t, device_t); 151 static int aw_mmc_acquire_host(device_t, device_t); 152 static int aw_mmc_release_host(device_t, device_t); 153 154 #define AW_MMC_LOCK(_sc) mtx_lock(&(_sc)->aw_mtx) 155 #define AW_MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->aw_mtx) 156 #define AW_MMC_READ_4(_sc, _reg) \ 157 bus_read_4((_sc)->aw_res[AW_MMC_MEMRES], _reg) 158 #define AW_MMC_WRITE_4(_sc, _reg, _value) \ 159 bus_write_4((_sc)->aw_res[AW_MMC_MEMRES], _reg, _value) 160 161 static int 162 aw_mmc_probe(device_t dev) 163 { 164 165 if (!ofw_bus_status_okay(dev)) 166 return (ENXIO); 167 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) 168 return (ENXIO); 169 170 device_set_desc(dev, "Allwinner Integrated MMC/SD controller"); 171 172 return (BUS_PROBE_DEFAULT); 173 } 174 175 static int 176 aw_mmc_attach(device_t dev) 177 { 178 device_t child; 179 struct aw_mmc_softc *sc; 180 struct sysctl_ctx_list *ctx; 181 struct sysctl_oid_list *tree; 182 uint32_t bus_width; 183 phandle_t node; 184 int error; 185 186 node = ofw_bus_get_node(dev); 187 sc = device_get_softc(dev); 188 sc->aw_dev = dev; 189 190 sc->aw_mmc_conf = (struct aw_mmc_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; 191 192 sc->aw_req = NULL; 193 if (bus_alloc_resources(dev, aw_mmc_res_spec, sc->aw_res) != 0) { 194 device_printf(dev, "cannot allocate device resources\n"); 195 return (ENXIO); 196 } 197 if (bus_setup_intr(dev, sc->aw_res[AW_MMC_IRQRES], 198 INTR_TYPE_MISC | INTR_MPSAFE, NULL, aw_mmc_intr, sc, 199 &sc->aw_intrhand)) { 200 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res); 201 device_printf(dev, "cannot setup interrupt handler\n"); 202 return (ENXIO); 203 } 204 mtx_init(&sc->aw_mtx, device_get_nameunit(sc->aw_dev), "aw_mmc", 205 MTX_DEF); 206 callout_init_mtx(&sc->aw_timeoutc, &sc->aw_mtx, 0); 207 208 /* De-assert reset */ 209 if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->aw_rst_ahb) == 0) { 210 error = hwreset_deassert(sc->aw_rst_ahb); 211 if (error != 0) { 212 device_printf(dev, "cannot de-assert reset\n"); 213 goto fail; 214 } 215 } 216 217 /* Activate the module clock. */ 218 error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->aw_clk_ahb); 219 if (error != 0) { 220 device_printf(dev, "cannot get ahb clock\n"); 221 goto fail; 222 } 223 error = clk_enable(sc->aw_clk_ahb); 224 if (error != 0) { 225 device_printf(dev, "cannot enable ahb clock\n"); 226 goto fail; 227 } 228 error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->aw_clk_mmc); 229 if (error != 0) { 230 device_printf(dev, "cannot get mmc clock\n"); 231 goto fail; 232 } 233 error = clk_set_freq(sc->aw_clk_mmc, CARD_ID_FREQUENCY, 234 CLK_SET_ROUND_DOWN); 235 if (error != 0) { 236 device_printf(dev, "cannot init mmc clock\n"); 237 goto fail; 238 } 239 error = clk_enable(sc->aw_clk_mmc); 240 if (error != 0) { 241 device_printf(dev, "cannot enable mmc clock\n"); 242 goto fail; 243 } 244 245 sc->aw_timeout = 10; 246 ctx = device_get_sysctl_ctx(dev); 247 tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 248 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW, 249 &sc->aw_timeout, 0, "Request timeout in seconds"); 250 251 /* Soft Reset controller. */ 252 if (aw_mmc_reset(sc) != 0) { 253 device_printf(dev, "cannot reset the controller\n"); 254 goto fail; 255 } 256 257 if (aw_mmc_setup_dma(sc) != 0) { 258 device_printf(sc->aw_dev, "Couldn't setup DMA!\n"); 259 goto fail; 260 } 261 262 if (OF_getencprop(node, "bus-width", &bus_width, sizeof(uint32_t)) <= 0) 263 bus_width = 4; 264 265 if (regulator_get_by_ofw_property(dev, 0, "vmmc-supply", 266 &sc->aw_reg_vmmc) == 0) { 267 if (bootverbose) 268 device_printf(dev, "vmmc-supply regulator found\n"); 269 } 270 if (regulator_get_by_ofw_property(dev, 0, "vqmmc-supply", 271 &sc->aw_reg_vqmmc) == 0 && bootverbose) { 272 if (bootverbose) 273 device_printf(dev, "vqmmc-supply regulator found\n"); 274 } 275 276 sc->aw_host.f_min = 400000; 277 sc->aw_host.f_max = 52000000; 278 sc->aw_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; 279 sc->aw_host.caps = MMC_CAP_HSPEED | MMC_CAP_UHS_SDR12 | 280 MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 | 281 MMC_CAP_UHS_DDR50 | MMC_CAP_MMC_DDR52; 282 283 sc->aw_host.caps |= MMC_CAP_SIGNALING_330 | MMC_CAP_SIGNALING_180; 284 285 if (bus_width >= 4) 286 sc->aw_host.caps |= MMC_CAP_4_BIT_DATA; 287 if (bus_width >= 8) 288 sc->aw_host.caps |= MMC_CAP_8_BIT_DATA; 289 290 child = device_add_child(dev, "mmc", -1); 291 if (child == NULL) { 292 device_printf(dev, "attaching MMC bus failed!\n"); 293 goto fail; 294 } 295 if (device_probe_and_attach(child) != 0) { 296 device_printf(dev, "attaching MMC child failed!\n"); 297 device_delete_child(dev, child); 298 goto fail; 299 } 300 301 return (0); 302 303 fail: 304 callout_drain(&sc->aw_timeoutc); 305 mtx_destroy(&sc->aw_mtx); 306 bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand); 307 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res); 308 309 return (ENXIO); 310 } 311 312 static int 313 aw_mmc_detach(device_t dev) 314 { 315 316 return (EBUSY); 317 } 318 319 static void 320 aw_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err) 321 { 322 struct aw_mmc_softc *sc; 323 324 sc = (struct aw_mmc_softc *)arg; 325 if (err) { 326 sc->aw_dma_map_err = err; 327 return; 328 } 329 sc->aw_dma_desc_phys = segs[0].ds_addr; 330 } 331 332 static int 333 aw_mmc_setup_dma(struct aw_mmc_softc *sc) 334 { 335 int dma_desc_size, error; 336 337 /* Allocate the DMA descriptor memory. */ 338 dma_desc_size = sizeof(struct aw_mmc_dma_desc) * AW_MMC_DMA_SEGS; 339 error = bus_dma_tag_create(bus_get_dma_tag(sc->aw_dev), 340 AW_MMC_DMA_ALIGN, 0, 341 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 342 dma_desc_size, 1, dma_desc_size, 0, NULL, NULL, &sc->aw_dma_tag); 343 if (error) 344 return (error); 345 error = bus_dmamem_alloc(sc->aw_dma_tag, &sc->aw_dma_desc, 346 BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->aw_dma_map); 347 if (error) 348 return (error); 349 350 error = bus_dmamap_load(sc->aw_dma_tag, sc->aw_dma_map, 351 sc->aw_dma_desc, dma_desc_size, aw_dma_desc_cb, sc, 0); 352 if (error) 353 return (error); 354 if (sc->aw_dma_map_err) 355 return (sc->aw_dma_map_err); 356 357 /* Create the DMA map for data transfers. */ 358 error = bus_dma_tag_create(bus_get_dma_tag(sc->aw_dev), 359 AW_MMC_DMA_ALIGN, 0, 360 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 361 sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS, AW_MMC_DMA_SEGS, 362 sc->aw_mmc_conf->dma_xferlen, BUS_DMA_ALLOCNOW, NULL, NULL, 363 &sc->aw_dma_buf_tag); 364 if (error) 365 return (error); 366 error = bus_dmamap_create(sc->aw_dma_buf_tag, 0, 367 &sc->aw_dma_buf_map); 368 if (error) 369 return (error); 370 371 return (0); 372 } 373 374 static void 375 aw_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err) 376 { 377 int i; 378 struct aw_mmc_dma_desc *dma_desc; 379 struct aw_mmc_softc *sc; 380 381 sc = (struct aw_mmc_softc *)arg; 382 sc->aw_dma_map_err = err; 383 384 if (err) 385 return; 386 387 dma_desc = sc->aw_dma_desc; 388 for (i = 0; i < nsegs; i++) { 389 dma_desc[i].buf_size = segs[i].ds_len; 390 dma_desc[i].buf_addr = segs[i].ds_addr; 391 dma_desc[i].config = AW_MMC_DMA_CONFIG_CH | 392 AW_MMC_DMA_CONFIG_OWN; 393 if (i == 0) 394 dma_desc[i].config |= AW_MMC_DMA_CONFIG_FD; 395 if (i < (nsegs - 1)) { 396 dma_desc[i].config |= AW_MMC_DMA_CONFIG_DIC; 397 dma_desc[i].next = sc->aw_dma_desc_phys + 398 ((i + 1) * sizeof(struct aw_mmc_dma_desc)); 399 } else { 400 dma_desc[i].config |= AW_MMC_DMA_CONFIG_LD | 401 AW_MMC_DMA_CONFIG_ER; 402 dma_desc[i].next = 0; 403 } 404 } 405 } 406 407 static int 408 aw_mmc_prepare_dma(struct aw_mmc_softc *sc) 409 { 410 bus_dmasync_op_t sync_op; 411 int error; 412 struct mmc_command *cmd; 413 uint32_t val; 414 415 cmd = sc->aw_req->cmd; 416 if (cmd->data->len > (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS)) 417 return (EFBIG); 418 error = bus_dmamap_load(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, 419 cmd->data->data, cmd->data->len, aw_dma_cb, sc, 0); 420 if (error) 421 return (error); 422 if (sc->aw_dma_map_err) 423 return (sc->aw_dma_map_err); 424 425 if (cmd->data->flags & MMC_DATA_WRITE) 426 sync_op = BUS_DMASYNC_PREWRITE; 427 else 428 sync_op = BUS_DMASYNC_PREREAD; 429 bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, sync_op); 430 bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, BUS_DMASYNC_PREWRITE); 431 432 /* Enable DMA */ 433 val = AW_MMC_READ_4(sc, AW_MMC_GCTL); 434 val &= ~AW_MMC_CTRL_FIFO_AC_MOD; 435 val |= AW_MMC_CTRL_DMA_ENB; 436 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val); 437 438 /* Reset DMA */ 439 val |= AW_MMC_CTRL_DMA_RST; 440 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val); 441 442 AW_MMC_WRITE_4(sc, AW_MMC_DMAC, AW_MMC_DMAC_IDMAC_SOFT_RST); 443 AW_MMC_WRITE_4(sc, AW_MMC_DMAC, 444 AW_MMC_DMAC_IDMAC_IDMA_ON | AW_MMC_DMAC_IDMAC_FIX_BURST); 445 446 /* Enable RX or TX DMA interrupt */ 447 val = AW_MMC_READ_4(sc, AW_MMC_IDIE); 448 if (cmd->data->flags & MMC_DATA_WRITE) 449 val |= AW_MMC_IDST_TX_INT; 450 else 451 val |= AW_MMC_IDST_RX_INT; 452 AW_MMC_WRITE_4(sc, AW_MMC_IDIE, val); 453 454 /* Set DMA descritptor list address */ 455 AW_MMC_WRITE_4(sc, AW_MMC_DLBA, sc->aw_dma_desc_phys); 456 457 /* FIFO trigger level */ 458 AW_MMC_WRITE_4(sc, AW_MMC_FWLR, AW_MMC_DMA_FTRGLEVEL); 459 460 return (0); 461 } 462 463 static int 464 aw_mmc_reset(struct aw_mmc_softc *sc) 465 { 466 int timeout; 467 468 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, AW_MMC_RESET); 469 timeout = 1000; 470 while (--timeout > 0) { 471 if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & AW_MMC_RESET) == 0) 472 break; 473 DELAY(100); 474 } 475 if (timeout == 0) 476 return (ETIMEDOUT); 477 478 return (0); 479 } 480 481 static int 482 aw_mmc_init(struct aw_mmc_softc *sc) 483 { 484 int ret; 485 486 ret = aw_mmc_reset(sc); 487 if (ret != 0) 488 return (ret); 489 490 /* Set the timeout. */ 491 AW_MMC_WRITE_4(sc, AW_MMC_TMOR, 492 AW_MMC_TMOR_DTO_LMT_SHIFT(AW_MMC_TMOR_DTO_LMT_MASK) | 493 AW_MMC_TMOR_RTO_LMT_SHIFT(AW_MMC_TMOR_RTO_LMT_MASK)); 494 495 /* Unmask interrupts. */ 496 AW_MMC_WRITE_4(sc, AW_MMC_IMKR, 0); 497 498 /* Clear pending interrupts. */ 499 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff); 500 501 /* Debug register, undocumented */ 502 AW_MMC_WRITE_4(sc, AW_MMC_DBGC, 0xdeb); 503 504 /* Function select register */ 505 AW_MMC_WRITE_4(sc, AW_MMC_FUNS, 0xceaa0000); 506 507 AW_MMC_WRITE_4(sc, AW_MMC_IDST, 0xffffffff); 508 509 /* Enable interrupts and AHB access. */ 510 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, 511 AW_MMC_READ_4(sc, AW_MMC_GCTL) | AW_MMC_CTRL_INT_ENB); 512 513 return (0); 514 } 515 516 static void 517 aw_mmc_req_done(struct aw_mmc_softc *sc) 518 { 519 struct mmc_command *cmd; 520 struct mmc_request *req; 521 uint32_t val, mask; 522 int retry; 523 524 cmd = sc->aw_req->cmd; 525 if (cmd->error != MMC_ERR_NONE) { 526 /* Reset the FIFO and DMA engines. */ 527 mask = AW_MMC_CTRL_FIFO_RST | AW_MMC_CTRL_DMA_RST; 528 val = AW_MMC_READ_4(sc, AW_MMC_GCTL); 529 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val | mask); 530 531 retry = AW_MMC_RESET_RETRY; 532 while (--retry > 0) { 533 val = AW_MMC_READ_4(sc, AW_MMC_GCTL); 534 if ((val & mask) == 0) 535 break; 536 DELAY(10); 537 } 538 if (retry == 0) 539 device_printf(sc->aw_dev, 540 "timeout resetting DMA/FIFO\n"); 541 aw_mmc_update_clock(sc, 1); 542 } 543 544 req = sc->aw_req; 545 callout_stop(&sc->aw_timeoutc); 546 sc->aw_req = NULL; 547 sc->aw_intr = 0; 548 sc->aw_resid = 0; 549 sc->aw_dma_map_err = 0; 550 sc->aw_intr_wait = 0; 551 req->done(req); 552 } 553 554 static void 555 aw_mmc_req_ok(struct aw_mmc_softc *sc) 556 { 557 int timeout; 558 struct mmc_command *cmd; 559 uint32_t status; 560 561 timeout = 1000; 562 while (--timeout > 0) { 563 status = AW_MMC_READ_4(sc, AW_MMC_STAR); 564 if ((status & AW_MMC_STAR_CARD_BUSY) == 0) 565 break; 566 DELAY(1000); 567 } 568 cmd = sc->aw_req->cmd; 569 if (timeout == 0) { 570 cmd->error = MMC_ERR_FAILED; 571 aw_mmc_req_done(sc); 572 return; 573 } 574 if (cmd->flags & MMC_RSP_PRESENT) { 575 if (cmd->flags & MMC_RSP_136) { 576 cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP3); 577 cmd->resp[1] = AW_MMC_READ_4(sc, AW_MMC_RESP2); 578 cmd->resp[2] = AW_MMC_READ_4(sc, AW_MMC_RESP1); 579 cmd->resp[3] = AW_MMC_READ_4(sc, AW_MMC_RESP0); 580 } else 581 cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP0); 582 } 583 /* All data has been transferred ? */ 584 if (cmd->data != NULL && (sc->aw_resid << 2) < cmd->data->len) 585 cmd->error = MMC_ERR_FAILED; 586 aw_mmc_req_done(sc); 587 } 588 589 static void 590 aw_mmc_timeout(void *arg) 591 { 592 struct aw_mmc_softc *sc; 593 594 sc = (struct aw_mmc_softc *)arg; 595 if (sc->aw_req != NULL) { 596 device_printf(sc->aw_dev, "controller timeout\n"); 597 sc->aw_req->cmd->error = MMC_ERR_TIMEOUT; 598 aw_mmc_req_done(sc); 599 } else 600 device_printf(sc->aw_dev, 601 "Spurious timeout - no active request\n"); 602 } 603 604 static void 605 aw_mmc_intr(void *arg) 606 { 607 bus_dmasync_op_t sync_op; 608 struct aw_mmc_softc *sc; 609 struct mmc_data *data; 610 uint32_t idst, imask, rint; 611 612 sc = (struct aw_mmc_softc *)arg; 613 AW_MMC_LOCK(sc); 614 rint = AW_MMC_READ_4(sc, AW_MMC_RISR); 615 idst = AW_MMC_READ_4(sc, AW_MMC_IDST); 616 imask = AW_MMC_READ_4(sc, AW_MMC_IMKR); 617 if (idst == 0 && imask == 0 && rint == 0) { 618 AW_MMC_UNLOCK(sc); 619 return; 620 } 621 #ifdef DEBUG 622 device_printf(sc->aw_dev, "idst: %#x, imask: %#x, rint: %#x\n", 623 idst, imask, rint); 624 #endif 625 if (sc->aw_req == NULL) { 626 device_printf(sc->aw_dev, 627 "Spurious interrupt - no active request, rint: 0x%08X\n", 628 rint); 629 goto end; 630 } 631 if (rint & AW_MMC_INT_ERR_BIT) { 632 if (bootverbose) 633 device_printf(sc->aw_dev, "error rint: 0x%08X\n", rint); 634 if (rint & AW_MMC_INT_RESP_TIMEOUT) 635 sc->aw_req->cmd->error = MMC_ERR_TIMEOUT; 636 else 637 sc->aw_req->cmd->error = MMC_ERR_FAILED; 638 aw_mmc_req_done(sc); 639 goto end; 640 } 641 if (idst & AW_MMC_IDST_ERROR) { 642 device_printf(sc->aw_dev, "error idst: 0x%08x\n", idst); 643 sc->aw_req->cmd->error = MMC_ERR_FAILED; 644 aw_mmc_req_done(sc); 645 goto end; 646 } 647 648 sc->aw_intr |= rint; 649 data = sc->aw_req->cmd->data; 650 if (data != NULL && (idst & AW_MMC_IDST_COMPLETE) != 0) { 651 if (data->flags & MMC_DATA_WRITE) 652 sync_op = BUS_DMASYNC_POSTWRITE; 653 else 654 sync_op = BUS_DMASYNC_POSTREAD; 655 bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, 656 sync_op); 657 bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, 658 BUS_DMASYNC_POSTWRITE); 659 bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map); 660 sc->aw_resid = data->len >> 2; 661 } 662 if ((sc->aw_intr & sc->aw_intr_wait) == sc->aw_intr_wait) 663 aw_mmc_req_ok(sc); 664 665 end: 666 AW_MMC_WRITE_4(sc, AW_MMC_IDST, idst); 667 AW_MMC_WRITE_4(sc, AW_MMC_RISR, rint); 668 AW_MMC_UNLOCK(sc); 669 } 670 671 static int 672 aw_mmc_request(device_t bus, device_t child, struct mmc_request *req) 673 { 674 int blksz; 675 struct aw_mmc_softc *sc; 676 struct mmc_command *cmd; 677 uint32_t cmdreg, imask; 678 int err; 679 680 sc = device_get_softc(bus); 681 AW_MMC_LOCK(sc); 682 if (sc->aw_req) { 683 AW_MMC_UNLOCK(sc); 684 return (EBUSY); 685 } 686 687 sc->aw_req = req; 688 cmd = req->cmd; 689 cmdreg = AW_MMC_CMDR_LOAD; 690 imask = AW_MMC_INT_ERR_BIT; 691 sc->aw_intr_wait = 0; 692 sc->aw_intr = 0; 693 sc->aw_resid = 0; 694 cmd->error = MMC_ERR_NONE; 695 696 if (cmd->opcode == MMC_GO_IDLE_STATE) 697 cmdreg |= AW_MMC_CMDR_SEND_INIT_SEQ; 698 699 if (cmd->flags & MMC_RSP_PRESENT) 700 cmdreg |= AW_MMC_CMDR_RESP_RCV; 701 if (cmd->flags & MMC_RSP_136) 702 cmdreg |= AW_MMC_CMDR_LONG_RESP; 703 if (cmd->flags & MMC_RSP_CRC) 704 cmdreg |= AW_MMC_CMDR_CHK_RESP_CRC; 705 706 if (cmd->data) { 707 cmdreg |= AW_MMC_CMDR_DATA_TRANS | AW_MMC_CMDR_WAIT_PRE_OVER; 708 709 if (cmd->data->flags & MMC_DATA_MULTI) { 710 cmdreg |= AW_MMC_CMDR_STOP_CMD_FLAG; 711 imask |= AW_MMC_INT_AUTO_STOP_DONE; 712 sc->aw_intr_wait |= AW_MMC_INT_AUTO_STOP_DONE; 713 } else { 714 sc->aw_intr_wait |= AW_MMC_INT_DATA_OVER; 715 imask |= AW_MMC_INT_DATA_OVER; 716 } 717 if (cmd->data->flags & MMC_DATA_WRITE) 718 cmdreg |= AW_MMC_CMDR_DIR_WRITE; 719 720 blksz = min(cmd->data->len, MMC_SECTOR_SIZE); 721 AW_MMC_WRITE_4(sc, AW_MMC_BKSR, blksz); 722 AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len); 723 } else { 724 imask |= AW_MMC_INT_CMD_DONE; 725 } 726 727 /* Enable the interrupts we are interested in */ 728 AW_MMC_WRITE_4(sc, AW_MMC_IMKR, imask); 729 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff); 730 731 /* Enable auto stop if needed */ 732 AW_MMC_WRITE_4(sc, AW_MMC_A12A, 733 cmdreg & AW_MMC_CMDR_STOP_CMD_FLAG ? 0 : 0xffff); 734 735 /* Write the command argument */ 736 AW_MMC_WRITE_4(sc, AW_MMC_CAGR, cmd->arg); 737 738 /* 739 * If we don't have data start the request 740 * if we do prepare the dma request and start the request 741 */ 742 if (cmd->data == NULL) { 743 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode); 744 } else { 745 err = aw_mmc_prepare_dma(sc); 746 if (err != 0) 747 device_printf(sc->aw_dev, "prepare_dma failed: %d\n", err); 748 749 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode); 750 } 751 752 callout_reset(&sc->aw_timeoutc, sc->aw_timeout * hz, 753 aw_mmc_timeout, sc); 754 AW_MMC_UNLOCK(sc); 755 756 return (0); 757 } 758 759 static int 760 aw_mmc_read_ivar(device_t bus, device_t child, int which, 761 uintptr_t *result) 762 { 763 struct aw_mmc_softc *sc; 764 765 sc = device_get_softc(bus); 766 switch (which) { 767 default: 768 return (EINVAL); 769 case MMCBR_IVAR_BUS_MODE: 770 *(int *)result = sc->aw_host.ios.bus_mode; 771 break; 772 case MMCBR_IVAR_BUS_WIDTH: 773 *(int *)result = sc->aw_host.ios.bus_width; 774 break; 775 case MMCBR_IVAR_CHIP_SELECT: 776 *(int *)result = sc->aw_host.ios.chip_select; 777 break; 778 case MMCBR_IVAR_CLOCK: 779 *(int *)result = sc->aw_host.ios.clock; 780 break; 781 case MMCBR_IVAR_F_MIN: 782 *(int *)result = sc->aw_host.f_min; 783 break; 784 case MMCBR_IVAR_F_MAX: 785 *(int *)result = sc->aw_host.f_max; 786 break; 787 case MMCBR_IVAR_HOST_OCR: 788 *(int *)result = sc->aw_host.host_ocr; 789 break; 790 case MMCBR_IVAR_MODE: 791 *(int *)result = sc->aw_host.mode; 792 break; 793 case MMCBR_IVAR_OCR: 794 *(int *)result = sc->aw_host.ocr; 795 break; 796 case MMCBR_IVAR_POWER_MODE: 797 *(int *)result = sc->aw_host.ios.power_mode; 798 break; 799 case MMCBR_IVAR_VDD: 800 *(int *)result = sc->aw_host.ios.vdd; 801 break; 802 case MMCBR_IVAR_VCCQ: 803 *(int *)result = sc->aw_host.ios.vccq; 804 break; 805 case MMCBR_IVAR_CAPS: 806 *(int *)result = sc->aw_host.caps; 807 break; 808 case MMCBR_IVAR_TIMING: 809 *(int *)result = sc->aw_host.ios.timing; 810 break; 811 case MMCBR_IVAR_MAX_DATA: 812 *(int *)result = 65535; 813 break; 814 } 815 816 return (0); 817 } 818 819 static int 820 aw_mmc_write_ivar(device_t bus, device_t child, int which, 821 uintptr_t value) 822 { 823 struct aw_mmc_softc *sc; 824 825 sc = device_get_softc(bus); 826 switch (which) { 827 default: 828 return (EINVAL); 829 case MMCBR_IVAR_BUS_MODE: 830 sc->aw_host.ios.bus_mode = value; 831 break; 832 case MMCBR_IVAR_BUS_WIDTH: 833 sc->aw_host.ios.bus_width = value; 834 break; 835 case MMCBR_IVAR_CHIP_SELECT: 836 sc->aw_host.ios.chip_select = value; 837 break; 838 case MMCBR_IVAR_CLOCK: 839 sc->aw_host.ios.clock = value; 840 break; 841 case MMCBR_IVAR_MODE: 842 sc->aw_host.mode = value; 843 break; 844 case MMCBR_IVAR_OCR: 845 sc->aw_host.ocr = value; 846 break; 847 case MMCBR_IVAR_POWER_MODE: 848 sc->aw_host.ios.power_mode = value; 849 break; 850 case MMCBR_IVAR_VDD: 851 sc->aw_host.ios.vdd = value; 852 break; 853 case MMCBR_IVAR_VCCQ: 854 sc->aw_host.ios.vccq = value; 855 break; 856 case MMCBR_IVAR_TIMING: 857 sc->aw_host.ios.timing = value; 858 break; 859 /* These are read-only */ 860 case MMCBR_IVAR_CAPS: 861 case MMCBR_IVAR_HOST_OCR: 862 case MMCBR_IVAR_F_MIN: 863 case MMCBR_IVAR_F_MAX: 864 case MMCBR_IVAR_MAX_DATA: 865 return (EINVAL); 866 } 867 868 return (0); 869 } 870 871 static int 872 aw_mmc_update_clock(struct aw_mmc_softc *sc, uint32_t clkon) 873 { 874 uint32_t reg; 875 int retry; 876 877 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR); 878 reg &= ~(AW_MMC_CKCR_CCLK_ENB | AW_MMC_CKCR_CCLK_CTRL | 879 AW_MMC_CKCR_CCLK_MASK_DATA0); 880 881 if (clkon) 882 reg |= AW_MMC_CKCR_CCLK_ENB; 883 if (sc->aw_mmc_conf->mask_data0) 884 reg |= AW_MMC_CKCR_CCLK_MASK_DATA0; 885 886 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg); 887 888 reg = AW_MMC_CMDR_LOAD | AW_MMC_CMDR_PRG_CLK | 889 AW_MMC_CMDR_WAIT_PRE_OVER; 890 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, reg); 891 retry = 0xfffff; 892 893 while (reg & AW_MMC_CMDR_LOAD && --retry > 0) { 894 reg = AW_MMC_READ_4(sc, AW_MMC_CMDR); 895 DELAY(10); 896 } 897 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff); 898 899 if (reg & AW_MMC_CMDR_LOAD) { 900 device_printf(sc->aw_dev, "timeout updating clock\n"); 901 return (ETIMEDOUT); 902 } 903 904 if (sc->aw_mmc_conf->mask_data0) { 905 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR); 906 reg &= ~AW_MMC_CKCR_CCLK_MASK_DATA0; 907 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg); 908 } 909 910 return (0); 911 } 912 913 static void 914 aw_mmc_set_vccq(struct aw_mmc_softc *sc, int32_t vccq) 915 { 916 int uvolt; 917 918 if (sc->aw_reg_vqmmc == NULL) 919 return; 920 921 switch (vccq) { 922 case vccq_180: 923 uvolt = 1800000; 924 break; 925 case vccq_330: 926 uvolt = 3300000; 927 break; 928 default: 929 return; 930 } 931 932 if (regulator_set_voltage(sc->aw_reg_vqmmc, 933 uvolt, uvolt) != 0) 934 device_printf(sc->aw_dev, 935 "Cannot set vqmmc to %d<->%d\n", 936 uvolt, 937 uvolt); 938 } 939 940 static int 941 aw_mmc_update_ios(device_t bus, device_t child) 942 { 943 int error; 944 struct aw_mmc_softc *sc; 945 struct mmc_ios *ios; 946 unsigned int clock; 947 uint32_t reg, div = 1; 948 949 sc = device_get_softc(bus); 950 951 ios = &sc->aw_host.ios; 952 953 /* Set the bus width. */ 954 switch (ios->bus_width) { 955 case bus_width_1: 956 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR1); 957 break; 958 case bus_width_4: 959 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR4); 960 break; 961 case bus_width_8: 962 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR8); 963 break; 964 } 965 966 switch (ios->power_mode) { 967 case power_on: 968 break; 969 case power_off: 970 if (bootverbose) 971 device_printf(sc->aw_dev, "Powering down sd/mmc\n"); 972 973 if (sc->aw_reg_vmmc) 974 regulator_disable(sc->aw_reg_vmmc); 975 if (sc->aw_reg_vqmmc) 976 regulator_disable(sc->aw_reg_vqmmc); 977 978 aw_mmc_reset(sc); 979 break; 980 case power_up: 981 if (bootverbose) 982 device_printf(sc->aw_dev, "Powering up sd/mmc\n"); 983 984 if (sc->aw_reg_vmmc) 985 regulator_enable(sc->aw_reg_vmmc); 986 if (sc->aw_reg_vqmmc) 987 regulator_enable(sc->aw_reg_vqmmc); 988 aw_mmc_init(sc); 989 break; 990 }; 991 992 if (ios->vccq != sc->aw_vccq) { 993 aw_mmc_set_vccq(sc, ios->vccq); 994 sc->aw_vccq = ios->vccq; 995 } 996 997 /* Enable ddr mode if needed */ 998 reg = AW_MMC_READ_4(sc, AW_MMC_GCTL); 999 if (ios->timing == bus_timing_uhs_ddr50 || 1000 ios->timing == bus_timing_mmc_ddr52) 1001 reg |= AW_MMC_CTRL_DDR_MOD_SEL; 1002 else 1003 reg &= ~AW_MMC_CTRL_DDR_MOD_SEL; 1004 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg); 1005 1006 if (ios->clock && ios->clock != sc->aw_clock) { 1007 sc->aw_clock = clock = ios->clock; 1008 1009 /* Disable clock */ 1010 error = aw_mmc_update_clock(sc, 0); 1011 if (error != 0) 1012 return (error); 1013 1014 if (ios->timing == bus_timing_mmc_ddr52 && 1015 (sc->aw_mmc_conf->new_timing || 1016 ios->bus_width == bus_width_8)) { 1017 div = 2; 1018 clock <<= 1; 1019 } 1020 1021 /* Reset the divider. */ 1022 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR); 1023 reg &= ~AW_MMC_CKCR_CCLK_DIV; 1024 reg |= div - 1; 1025 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg); 1026 1027 /* New timing mode if needed */ 1028 if (sc->aw_mmc_conf->new_timing) { 1029 reg = AW_MMC_READ_4(sc, AW_MMC_NTSR); 1030 reg |= AW_MMC_NTSR_MODE_SELECT; 1031 AW_MMC_WRITE_4(sc, AW_MMC_NTSR, reg); 1032 } 1033 1034 /* Set the MMC clock. */ 1035 error = clk_set_freq(sc->aw_clk_mmc, clock, 1036 CLK_SET_ROUND_DOWN); 1037 if (error != 0) { 1038 device_printf(sc->aw_dev, 1039 "failed to set frequency to %u Hz: %d\n", 1040 clock, error); 1041 return (error); 1042 } 1043 1044 if (sc->aw_mmc_conf->can_calibrate) 1045 AW_MMC_WRITE_4(sc, AW_MMC_SAMP_DL, AW_MMC_SAMP_DL_SW_EN); 1046 1047 /* Enable clock. */ 1048 error = aw_mmc_update_clock(sc, 1); 1049 if (error != 0) 1050 return (error); 1051 } 1052 1053 1054 return (0); 1055 } 1056 1057 static int 1058 aw_mmc_get_ro(device_t bus, device_t child) 1059 { 1060 1061 return (0); 1062 } 1063 1064 static int 1065 aw_mmc_acquire_host(device_t bus, device_t child) 1066 { 1067 struct aw_mmc_softc *sc; 1068 int error; 1069 1070 sc = device_get_softc(bus); 1071 AW_MMC_LOCK(sc); 1072 while (sc->aw_bus_busy) { 1073 error = msleep(sc, &sc->aw_mtx, PCATCH, "mmchw", 0); 1074 if (error != 0) { 1075 AW_MMC_UNLOCK(sc); 1076 return (error); 1077 } 1078 } 1079 sc->aw_bus_busy++; 1080 AW_MMC_UNLOCK(sc); 1081 1082 return (0); 1083 } 1084 1085 static int 1086 aw_mmc_release_host(device_t bus, device_t child) 1087 { 1088 struct aw_mmc_softc *sc; 1089 1090 sc = device_get_softc(bus); 1091 AW_MMC_LOCK(sc); 1092 sc->aw_bus_busy--; 1093 wakeup(sc); 1094 AW_MMC_UNLOCK(sc); 1095 1096 return (0); 1097 } 1098 1099 static device_method_t aw_mmc_methods[] = { 1100 /* Device interface */ 1101 DEVMETHOD(device_probe, aw_mmc_probe), 1102 DEVMETHOD(device_attach, aw_mmc_attach), 1103 DEVMETHOD(device_detach, aw_mmc_detach), 1104 1105 /* Bus interface */ 1106 DEVMETHOD(bus_read_ivar, aw_mmc_read_ivar), 1107 DEVMETHOD(bus_write_ivar, aw_mmc_write_ivar), 1108 1109 /* MMC bridge interface */ 1110 DEVMETHOD(mmcbr_update_ios, aw_mmc_update_ios), 1111 DEVMETHOD(mmcbr_request, aw_mmc_request), 1112 DEVMETHOD(mmcbr_get_ro, aw_mmc_get_ro), 1113 DEVMETHOD(mmcbr_acquire_host, aw_mmc_acquire_host), 1114 DEVMETHOD(mmcbr_release_host, aw_mmc_release_host), 1115 1116 DEVMETHOD_END 1117 }; 1118 1119 static devclass_t aw_mmc_devclass; 1120 1121 static driver_t aw_mmc_driver = { 1122 "aw_mmc", 1123 aw_mmc_methods, 1124 sizeof(struct aw_mmc_softc), 1125 }; 1126 1127 DRIVER_MODULE(aw_mmc, simplebus, aw_mmc_driver, aw_mmc_devclass, NULL, 1128 NULL); 1129 MMC_DECLARE_BRIDGE(aw_mmc); 1130