1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2018 Emmanuel Vadot <manu@FreeBSD.org> 5 * Copyright (c) 2013 Alexander Fedorov 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/bus.h> 33 #include <sys/conf.h> 34 #include <sys/kernel.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/module.h> 38 #include <sys/mutex.h> 39 #include <sys/resource.h> 40 #include <sys/rman.h> 41 #include <sys/sysctl.h> 42 #include <sys/queue.h> 43 #include <sys/taskqueue.h> 44 45 #include <machine/bus.h> 46 47 #include <dev/ofw/ofw_bus.h> 48 #include <dev/ofw/ofw_bus_subr.h> 49 50 #include <dev/mmc/bridge.h> 51 #include <dev/mmc/mmcbrvar.h> 52 #include <dev/mmc/mmc_fdt_helpers.h> 53 54 #include <arm/allwinner/aw_mmc.h> 55 #include <dev/clk/clk.h> 56 #include <dev/hwreset/hwreset.h> 57 #include <dev/regulator/regulator.h> 58 59 #include "opt_mmccam.h" 60 61 #ifdef MMCCAM 62 #include <cam/cam.h> 63 #include <cam/cam_ccb.h> 64 #include <cam/cam_debug.h> 65 #include <cam/cam_sim.h> 66 #include <cam/cam_xpt_sim.h> 67 #include <cam/mmc/mmc_sim.h> 68 69 #include "mmc_sim_if.h" 70 #endif 71 72 #include "mmc_pwrseq_if.h" 73 74 #define AW_MMC_MEMRES 0 75 #define AW_MMC_IRQRES 1 76 #define AW_MMC_RESSZ 2 77 #define AW_MMC_DMA_SEGS (PAGE_SIZE / sizeof(struct aw_mmc_dma_desc)) 78 #define AW_MMC_DMA_DESC_SIZE (sizeof(struct aw_mmc_dma_desc) * AW_MMC_DMA_SEGS) 79 #define AW_MMC_DMA_FTRGLEVEL 0x20070008 80 81 #define AW_MMC_RESET_RETRY 1000 82 83 #define CARD_ID_FREQUENCY 400000 84 85 struct aw_mmc_conf { 86 uint32_t dma_xferlen; 87 bool mask_data0; 88 bool can_calibrate; 89 bool new_timing; 90 }; 91 92 static const struct aw_mmc_conf a10_mmc_conf = { 93 .dma_xferlen = 0x2000, 94 }; 95 96 static const struct aw_mmc_conf a13_mmc_conf = { 97 .dma_xferlen = 0x10000, 98 }; 99 100 static const struct aw_mmc_conf a64_mmc_conf = { 101 .dma_xferlen = 0x10000, 102 .mask_data0 = true, 103 .can_calibrate = true, 104 .new_timing = true, 105 }; 106 107 static const struct aw_mmc_conf a64_emmc_conf = { 108 .dma_xferlen = 0x2000, 109 .can_calibrate = true, 110 }; 111 112 static struct ofw_compat_data compat_data[] = { 113 {"allwinner,sun4i-a10-mmc", (uintptr_t)&a10_mmc_conf}, 114 {"allwinner,sun5i-a13-mmc", (uintptr_t)&a13_mmc_conf}, 115 {"allwinner,sun7i-a20-mmc", (uintptr_t)&a13_mmc_conf}, 116 {"allwinner,sun50i-a64-mmc", (uintptr_t)&a64_mmc_conf}, 117 {"allwinner,sun50i-a64-emmc", (uintptr_t)&a64_emmc_conf}, 118 {NULL, 0} 119 }; 120 121 struct aw_mmc_softc { 122 device_t aw_dev; 123 clk_t aw_clk_ahb; 124 clk_t aw_clk_mmc; 125 hwreset_t aw_rst_ahb; 126 int aw_bus_busy; 127 int aw_resid; 128 int aw_timeout; 129 struct callout aw_timeoutc; 130 struct mmc_host aw_host; 131 struct mmc_helper mmc_helper; 132 #ifdef MMCCAM 133 union ccb * ccb; 134 struct mmc_sim mmc_sim; 135 #else 136 struct mmc_request * aw_req; 137 #endif 138 struct mtx aw_mtx; 139 struct resource * aw_res[AW_MMC_RESSZ]; 140 struct aw_mmc_conf * aw_mmc_conf; 141 uint32_t aw_intr; 142 uint32_t aw_intr_wait; 143 void * aw_intrhand; 144 unsigned int aw_clock; 145 device_t child; 146 147 /* Fields required for DMA access. */ 148 bus_addr_t aw_dma_desc_phys; 149 bus_dmamap_t aw_dma_map; 150 bus_dma_tag_t aw_dma_tag; 151 void * aw_dma_desc; 152 bus_dmamap_t aw_dma_buf_map; 153 bus_dma_tag_t aw_dma_buf_tag; 154 int aw_dma_map_err; 155 }; 156 157 static struct resource_spec aw_mmc_res_spec[] = { 158 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 159 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 160 { -1, 0, 0 } 161 }; 162 163 static int aw_mmc_probe(device_t); 164 static int aw_mmc_attach(device_t); 165 static int aw_mmc_detach(device_t); 166 static int aw_mmc_setup_dma(struct aw_mmc_softc *); 167 static void aw_mmc_teardown_dma(struct aw_mmc_softc *sc); 168 static int aw_mmc_reset(struct aw_mmc_softc *); 169 static int aw_mmc_init(struct aw_mmc_softc *); 170 static void aw_mmc_intr(void *); 171 static int aw_mmc_update_clock(struct aw_mmc_softc *, uint32_t); 172 static void aw_mmc_helper_cd_handler(device_t, bool); 173 174 static void aw_mmc_print_error(uint32_t); 175 static int aw_mmc_update_ios(device_t, device_t); 176 static int aw_mmc_request(device_t, device_t, struct mmc_request *); 177 178 #ifndef MMCCAM 179 static int aw_mmc_get_ro(device_t, device_t); 180 static int aw_mmc_acquire_host(device_t, device_t); 181 static int aw_mmc_release_host(device_t, device_t); 182 #endif 183 184 #define AW_MMC_LOCK(_sc) mtx_lock(&(_sc)->aw_mtx) 185 #define AW_MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->aw_mtx) 186 #define AW_MMC_READ_4(_sc, _reg) \ 187 bus_read_4((_sc)->aw_res[AW_MMC_MEMRES], _reg) 188 #define AW_MMC_WRITE_4(_sc, _reg, _value) \ 189 bus_write_4((_sc)->aw_res[AW_MMC_MEMRES], _reg, _value) 190 191 SYSCTL_NODE(_hw, OID_AUTO, aw_mmc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 192 "aw_mmc driver"); 193 194 static int aw_mmc_debug = 0; 195 SYSCTL_INT(_hw_aw_mmc, OID_AUTO, debug, CTLFLAG_RWTUN, &aw_mmc_debug, 0, 196 "Debug level bit0=card changes bit1=ios changes, bit2=interrupts, bit3=commands"); 197 #define AW_MMC_DEBUG_CARD 0x1 198 #define AW_MMC_DEBUG_IOS 0x2 199 #define AW_MMC_DEBUG_INT 0x4 200 #define AW_MMC_DEBUG_CMD 0x8 201 202 #ifdef MMCCAM 203 static int 204 aw_mmc_get_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts) 205 { 206 struct aw_mmc_softc *sc; 207 208 sc = device_get_softc(dev); 209 210 cts->host_ocr = sc->aw_host.host_ocr; 211 cts->host_f_min = sc->aw_host.f_min; 212 cts->host_f_max = sc->aw_host.f_max; 213 cts->host_caps = sc->aw_host.caps; 214 cts->host_max_data = (sc->aw_mmc_conf->dma_xferlen * 215 AW_MMC_DMA_SEGS) / MMC_SECTOR_SIZE; 216 memcpy(&cts->ios, &sc->aw_host.ios, sizeof(struct mmc_ios)); 217 218 return (0); 219 } 220 221 static int 222 aw_mmc_set_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts) 223 { 224 struct aw_mmc_softc *sc; 225 struct mmc_ios *ios; 226 struct mmc_ios *new_ios; 227 228 sc = device_get_softc(dev); 229 ios = &sc->aw_host.ios; 230 new_ios = &cts->ios; 231 232 /* Update only requested fields */ 233 if (cts->ios_valid & MMC_CLK) { 234 ios->clock = new_ios->clock; 235 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS)) 236 device_printf(sc->aw_dev, "Clock => %d\n", ios->clock); 237 } 238 if (cts->ios_valid & MMC_VDD) { 239 ios->vdd = new_ios->vdd; 240 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS)) 241 device_printf(sc->aw_dev, "VDD => %d\n", ios->vdd); 242 } 243 if (cts->ios_valid & MMC_CS) { 244 ios->chip_select = new_ios->chip_select; 245 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS)) 246 device_printf(sc->aw_dev, "CS => %d\n", ios->chip_select); 247 } 248 if (cts->ios_valid & MMC_BW) { 249 ios->bus_width = new_ios->bus_width; 250 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS)) 251 device_printf(sc->aw_dev, "Bus width => %d\n", ios->bus_width); 252 } 253 if (cts->ios_valid & MMC_PM) { 254 ios->power_mode = new_ios->power_mode; 255 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS)) 256 device_printf(sc->aw_dev, "Power mode => %d\n", ios->power_mode); 257 } 258 if (cts->ios_valid & MMC_BT) { 259 ios->timing = new_ios->timing; 260 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS)) 261 device_printf(sc->aw_dev, "Timing => %d\n", ios->timing); 262 } 263 if (cts->ios_valid & MMC_BM) { 264 ios->bus_mode = new_ios->bus_mode; 265 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS)) 266 device_printf(sc->aw_dev, "Bus mode => %d\n", ios->bus_mode); 267 } 268 269 return (aw_mmc_update_ios(sc->aw_dev, NULL)); 270 } 271 272 static int 273 aw_mmc_cam_request(device_t dev, union ccb *ccb) 274 { 275 struct aw_mmc_softc *sc; 276 struct ccb_mmcio *mmcio; 277 278 sc = device_get_softc(dev); 279 mmcio = &ccb->mmcio; 280 281 AW_MMC_LOCK(sc); 282 283 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CMD)) { 284 device_printf(sc->aw_dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n", 285 mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags, 286 mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0, 287 mmcio->cmd.data != NULL ? mmcio->cmd.data->flags: 0); 288 } 289 if (mmcio->cmd.data != NULL) { 290 if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0) 291 panic("data->len = %d, data->flags = %d -- something is b0rked", 292 (int)mmcio->cmd.data->len, mmcio->cmd.data->flags); 293 } 294 if (sc->ccb != NULL) { 295 device_printf(sc->aw_dev, "Controller still has an active command\n"); 296 return (EBUSY); 297 } 298 sc->ccb = ccb; 299 /* aw_mmc_request locks again */ 300 AW_MMC_UNLOCK(sc); 301 aw_mmc_request(sc->aw_dev, NULL, NULL); 302 303 return (0); 304 } 305 306 static void 307 aw_mmc_cam_poll(device_t dev) 308 { 309 struct aw_mmc_softc *sc; 310 311 sc = device_get_softc(dev); 312 aw_mmc_intr(sc); 313 } 314 #endif /* MMCCAM */ 315 316 static void 317 aw_mmc_helper_cd_handler(device_t dev, bool present) 318 { 319 struct aw_mmc_softc *sc; 320 321 sc = device_get_softc(dev); 322 #ifdef MMCCAM 323 mmc_cam_sim_discover(&sc->mmc_sim); 324 #else 325 AW_MMC_LOCK(sc); 326 if (present) { 327 if (sc->child == NULL) { 328 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD)) 329 device_printf(sc->aw_dev, "Card inserted\n"); 330 331 sc->child = device_add_child(sc->aw_dev, "mmc", DEVICE_UNIT_ANY); 332 AW_MMC_UNLOCK(sc); 333 if (sc->child) { 334 device_set_ivars(sc->child, sc); 335 (void)device_probe_and_attach(sc->child); 336 } 337 } else 338 AW_MMC_UNLOCK(sc); 339 } else { 340 /* Card isn't present, detach if necessary */ 341 if (sc->child != NULL) { 342 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD)) 343 device_printf(sc->aw_dev, "Card removed\n"); 344 345 AW_MMC_UNLOCK(sc); 346 device_delete_child(sc->aw_dev, sc->child); 347 sc->child = NULL; 348 } else 349 AW_MMC_UNLOCK(sc); 350 } 351 #endif /* MMCCAM */ 352 } 353 354 static int 355 aw_mmc_probe(device_t dev) 356 { 357 358 if (!ofw_bus_status_okay(dev)) 359 return (ENXIO); 360 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) 361 return (ENXIO); 362 363 device_set_desc(dev, "Allwinner Integrated MMC/SD controller"); 364 365 return (BUS_PROBE_DEFAULT); 366 } 367 368 static int 369 aw_mmc_attach(device_t dev) 370 { 371 struct aw_mmc_softc *sc; 372 struct sysctl_ctx_list *ctx; 373 struct sysctl_oid_list *tree; 374 int error; 375 376 sc = device_get_softc(dev); 377 sc->aw_dev = dev; 378 379 sc->aw_mmc_conf = (struct aw_mmc_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; 380 381 #ifndef MMCCAM 382 sc->aw_req = NULL; 383 #endif 384 if (bus_alloc_resources(dev, aw_mmc_res_spec, sc->aw_res) != 0) { 385 device_printf(dev, "cannot allocate device resources\n"); 386 return (ENXIO); 387 } 388 if (bus_setup_intr(dev, sc->aw_res[AW_MMC_IRQRES], 389 INTR_TYPE_NET | INTR_MPSAFE, NULL, aw_mmc_intr, sc, 390 &sc->aw_intrhand)) { 391 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res); 392 device_printf(dev, "cannot setup interrupt handler\n"); 393 return (ENXIO); 394 } 395 mtx_init(&sc->aw_mtx, device_get_nameunit(sc->aw_dev), "aw_mmc", 396 MTX_DEF); 397 callout_init_mtx(&sc->aw_timeoutc, &sc->aw_mtx, 0); 398 399 /* De-assert reset */ 400 if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->aw_rst_ahb) == 0) { 401 error = hwreset_deassert(sc->aw_rst_ahb); 402 if (error != 0) { 403 device_printf(dev, "cannot de-assert reset\n"); 404 goto fail; 405 } 406 } 407 408 /* Activate the module clock. */ 409 error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->aw_clk_ahb); 410 if (error != 0) { 411 device_printf(dev, "cannot get ahb clock\n"); 412 goto fail; 413 } 414 error = clk_enable(sc->aw_clk_ahb); 415 if (error != 0) { 416 device_printf(dev, "cannot enable ahb clock\n"); 417 goto fail; 418 } 419 error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->aw_clk_mmc); 420 if (error != 0) { 421 device_printf(dev, "cannot get mmc clock\n"); 422 goto fail; 423 } 424 error = clk_set_freq(sc->aw_clk_mmc, CARD_ID_FREQUENCY, 425 CLK_SET_ROUND_DOWN); 426 if (error != 0) { 427 device_printf(dev, "cannot init mmc clock\n"); 428 goto fail; 429 } 430 error = clk_enable(sc->aw_clk_mmc); 431 if (error != 0) { 432 device_printf(dev, "cannot enable mmc clock\n"); 433 goto fail; 434 } 435 436 sc->aw_timeout = 10; 437 ctx = device_get_sysctl_ctx(dev); 438 tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 439 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW, 440 &sc->aw_timeout, 0, "Request timeout in seconds"); 441 442 /* Soft Reset controller. */ 443 if (aw_mmc_reset(sc) != 0) { 444 device_printf(dev, "cannot reset the controller\n"); 445 goto fail; 446 } 447 448 if (aw_mmc_setup_dma(sc) != 0) { 449 device_printf(sc->aw_dev, "Couldn't setup DMA!\n"); 450 goto fail; 451 } 452 453 /* Set some defaults for freq and supported mode */ 454 sc->aw_host.f_min = 400000; 455 sc->aw_host.f_max = 52000000; 456 sc->aw_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; 457 sc->aw_host.caps |= MMC_CAP_HSPEED | MMC_CAP_SIGNALING_330; 458 mmc_fdt_parse(dev, 0, &sc->mmc_helper, &sc->aw_host); 459 mmc_fdt_gpio_setup(dev, 0, &sc->mmc_helper, aw_mmc_helper_cd_handler); 460 461 #ifdef MMCCAM 462 sc->ccb = NULL; 463 464 if (mmc_cam_sim_alloc(dev, "aw_mmc", &sc->mmc_sim) != 0) { 465 device_printf(dev, "cannot alloc cam sim\n"); 466 goto fail; 467 } 468 #endif /* MMCCAM */ 469 470 return (0); 471 472 fail: 473 callout_drain(&sc->aw_timeoutc); 474 mtx_destroy(&sc->aw_mtx); 475 bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand); 476 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res); 477 478 return (ENXIO); 479 } 480 481 static int 482 aw_mmc_detach(device_t dev) 483 { 484 struct aw_mmc_softc *sc; 485 486 sc = device_get_softc(dev); 487 488 clk_disable(sc->aw_clk_mmc); 489 clk_disable(sc->aw_clk_ahb); 490 hwreset_assert(sc->aw_rst_ahb); 491 492 mmc_fdt_gpio_teardown(&sc->mmc_helper); 493 494 callout_drain(&sc->aw_timeoutc); 495 496 device_delete_children(sc->aw_dev); 497 498 aw_mmc_teardown_dma(sc); 499 500 mtx_destroy(&sc->aw_mtx); 501 502 bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand); 503 bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res); 504 505 #ifdef MMCCAM 506 mmc_cam_sim_free(&sc->mmc_sim); 507 #endif 508 509 return (0); 510 } 511 512 static void 513 aw_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err) 514 { 515 struct aw_mmc_softc *sc; 516 517 sc = (struct aw_mmc_softc *)arg; 518 if (err) { 519 sc->aw_dma_map_err = err; 520 return; 521 } 522 sc->aw_dma_desc_phys = segs[0].ds_addr; 523 } 524 525 static int 526 aw_mmc_setup_dma(struct aw_mmc_softc *sc) 527 { 528 int error; 529 530 /* Allocate the DMA descriptor memory. */ 531 error = bus_dma_tag_create( 532 bus_get_dma_tag(sc->aw_dev), /* parent */ 533 AW_MMC_DMA_ALIGN, 0, /* align, boundary */ 534 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 535 BUS_SPACE_MAXADDR, /* highaddr */ 536 NULL, NULL, /* filter, filterarg*/ 537 AW_MMC_DMA_DESC_SIZE, 1, /* maxsize, nsegment */ 538 AW_MMC_DMA_DESC_SIZE, /* maxsegsize */ 539 0, /* flags */ 540 NULL, NULL, /* lock, lockarg*/ 541 &sc->aw_dma_tag); 542 if (error) 543 return (error); 544 545 error = bus_dmamem_alloc(sc->aw_dma_tag, &sc->aw_dma_desc, 546 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, 547 &sc->aw_dma_map); 548 if (error) 549 return (error); 550 551 error = bus_dmamap_load(sc->aw_dma_tag, 552 sc->aw_dma_map, 553 sc->aw_dma_desc, AW_MMC_DMA_DESC_SIZE, 554 aw_dma_desc_cb, sc, 0); 555 if (error) 556 return (error); 557 if (sc->aw_dma_map_err) 558 return (sc->aw_dma_map_err); 559 560 /* Create the DMA map for data transfers. */ 561 error = bus_dma_tag_create( 562 bus_get_dma_tag(sc->aw_dev), /* parent */ 563 AW_MMC_DMA_ALIGN, 0, /* align, boundary */ 564 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 565 BUS_SPACE_MAXADDR, /* highaddr */ 566 NULL, NULL, /* filter, filterarg*/ 567 sc->aw_mmc_conf->dma_xferlen * 568 AW_MMC_DMA_SEGS, AW_MMC_DMA_SEGS, /* maxsize, nsegments */ 569 sc->aw_mmc_conf->dma_xferlen, /* maxsegsize */ 570 BUS_DMA_ALLOCNOW, /* flags */ 571 NULL, NULL, /* lock, lockarg*/ 572 &sc->aw_dma_buf_tag); 573 if (error) 574 return (error); 575 error = bus_dmamap_create(sc->aw_dma_buf_tag, 0, 576 &sc->aw_dma_buf_map); 577 if (error) 578 return (error); 579 580 return (0); 581 } 582 583 static void 584 aw_mmc_teardown_dma(struct aw_mmc_softc *sc) 585 { 586 587 bus_dmamap_unload(sc->aw_dma_tag, sc->aw_dma_map); 588 bus_dmamem_free(sc->aw_dma_tag, sc->aw_dma_desc, sc->aw_dma_map); 589 if (bus_dma_tag_destroy(sc->aw_dma_tag) != 0) 590 device_printf(sc->aw_dev, "Cannot destroy the dma tag\n"); 591 592 bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map); 593 bus_dmamap_destroy(sc->aw_dma_buf_tag, sc->aw_dma_buf_map); 594 if (bus_dma_tag_destroy(sc->aw_dma_buf_tag) != 0) 595 device_printf(sc->aw_dev, "Cannot destroy the dma buf tag\n"); 596 } 597 598 static void 599 aw_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err) 600 { 601 int i; 602 struct aw_mmc_dma_desc *dma_desc; 603 struct aw_mmc_softc *sc; 604 605 sc = (struct aw_mmc_softc *)arg; 606 sc->aw_dma_map_err = err; 607 608 if (err) 609 return; 610 611 dma_desc = sc->aw_dma_desc; 612 for (i = 0; i < nsegs; i++) { 613 if (segs[i].ds_len == sc->aw_mmc_conf->dma_xferlen) 614 dma_desc[i].buf_size = 0; /* Size of 0 indicate max len */ 615 else 616 dma_desc[i].buf_size = segs[i].ds_len; 617 dma_desc[i].buf_addr = segs[i].ds_addr; 618 dma_desc[i].config = AW_MMC_DMA_CONFIG_CH | 619 AW_MMC_DMA_CONFIG_OWN | AW_MMC_DMA_CONFIG_DIC; 620 621 dma_desc[i].next = sc->aw_dma_desc_phys + 622 ((i + 1) * sizeof(struct aw_mmc_dma_desc)); 623 } 624 625 dma_desc[0].config |= AW_MMC_DMA_CONFIG_FD; 626 dma_desc[nsegs - 1].config |= AW_MMC_DMA_CONFIG_LD | 627 AW_MMC_DMA_CONFIG_ER; 628 dma_desc[nsegs - 1].config &= ~AW_MMC_DMA_CONFIG_DIC; 629 dma_desc[nsegs - 1].next = 0; 630 } 631 632 static int 633 aw_mmc_prepare_dma(struct aw_mmc_softc *sc) 634 { 635 bus_dmasync_op_t sync_op; 636 int error; 637 struct mmc_command *cmd; 638 uint32_t val; 639 640 #ifdef MMCCAM 641 cmd = &sc->ccb->mmcio.cmd; 642 #else 643 cmd = sc->aw_req->cmd; 644 #endif 645 if (cmd->data->len > (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS)) 646 return (EFBIG); 647 error = bus_dmamap_load(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, 648 cmd->data->data, cmd->data->len, aw_dma_cb, sc, 0); 649 if (error) 650 return (error); 651 if (sc->aw_dma_map_err) 652 return (sc->aw_dma_map_err); 653 654 if (cmd->data->flags & MMC_DATA_WRITE) 655 sync_op = BUS_DMASYNC_PREWRITE; 656 else 657 sync_op = BUS_DMASYNC_PREREAD; 658 bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, sync_op); 659 bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, BUS_DMASYNC_PREWRITE); 660 661 /* Enable DMA */ 662 val = AW_MMC_READ_4(sc, AW_MMC_GCTL); 663 val &= ~AW_MMC_GCTL_FIFO_AC_MOD; 664 val |= AW_MMC_GCTL_DMA_ENB; 665 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val); 666 667 /* Reset DMA */ 668 val |= AW_MMC_GCTL_DMA_RST; 669 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val); 670 671 AW_MMC_WRITE_4(sc, AW_MMC_DMAC, AW_MMC_DMAC_IDMAC_SOFT_RST); 672 AW_MMC_WRITE_4(sc, AW_MMC_DMAC, 673 AW_MMC_DMAC_IDMAC_IDMA_ON | AW_MMC_DMAC_IDMAC_FIX_BURST); 674 675 /* Enable RX or TX DMA interrupt */ 676 val = AW_MMC_READ_4(sc, AW_MMC_IDIE); 677 if (cmd->data->flags & MMC_DATA_WRITE) 678 val |= AW_MMC_IDST_TX_INT; 679 else 680 val |= AW_MMC_IDST_RX_INT; 681 AW_MMC_WRITE_4(sc, AW_MMC_IDIE, val); 682 683 /* Set DMA descritptor list address */ 684 AW_MMC_WRITE_4(sc, AW_MMC_DLBA, sc->aw_dma_desc_phys); 685 686 /* FIFO trigger level */ 687 AW_MMC_WRITE_4(sc, AW_MMC_FWLR, AW_MMC_DMA_FTRGLEVEL); 688 689 return (0); 690 } 691 692 static int 693 aw_mmc_reset(struct aw_mmc_softc *sc) 694 { 695 uint32_t reg; 696 int timeout; 697 698 reg = AW_MMC_READ_4(sc, AW_MMC_GCTL); 699 reg |= AW_MMC_GCTL_RESET; 700 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg); 701 timeout = AW_MMC_RESET_RETRY; 702 while (--timeout > 0) { 703 if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & AW_MMC_GCTL_RESET) == 0) 704 break; 705 DELAY(100); 706 } 707 if (timeout == 0) 708 return (ETIMEDOUT); 709 710 return (0); 711 } 712 713 static int 714 aw_mmc_init(struct aw_mmc_softc *sc) 715 { 716 uint32_t reg; 717 int ret; 718 719 ret = aw_mmc_reset(sc); 720 if (ret != 0) 721 return (ret); 722 723 /* Set the timeout. */ 724 AW_MMC_WRITE_4(sc, AW_MMC_TMOR, 725 AW_MMC_TMOR_DTO_LMT_SHIFT(AW_MMC_TMOR_DTO_LMT_MASK) | 726 AW_MMC_TMOR_RTO_LMT_SHIFT(AW_MMC_TMOR_RTO_LMT_MASK)); 727 728 /* Unmask interrupts. */ 729 AW_MMC_WRITE_4(sc, AW_MMC_IMKR, 0); 730 731 /* Clear pending interrupts. */ 732 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff); 733 734 /* Debug register, undocumented */ 735 AW_MMC_WRITE_4(sc, AW_MMC_DBGC, 0xdeb); 736 737 /* Function select register */ 738 AW_MMC_WRITE_4(sc, AW_MMC_FUNS, 0xceaa0000); 739 740 AW_MMC_WRITE_4(sc, AW_MMC_IDST, 0xffffffff); 741 742 /* Enable interrupts and disable AHB access. */ 743 reg = AW_MMC_READ_4(sc, AW_MMC_GCTL); 744 reg |= AW_MMC_GCTL_INT_ENB; 745 reg &= ~AW_MMC_GCTL_FIFO_AC_MOD; 746 reg &= ~AW_MMC_GCTL_WAIT_MEM_ACCESS; 747 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg); 748 749 return (0); 750 } 751 752 static void 753 aw_mmc_req_done(struct aw_mmc_softc *sc) 754 { 755 struct mmc_command *cmd; 756 #ifdef MMCCAM 757 union ccb *ccb; 758 #else 759 struct mmc_request *req; 760 #endif 761 uint32_t val, mask; 762 int retry; 763 764 #ifdef MMCCAM 765 ccb = sc->ccb; 766 cmd = &ccb->mmcio.cmd; 767 #else 768 cmd = sc->aw_req->cmd; 769 #endif 770 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CMD)) { 771 device_printf(sc->aw_dev, "%s: cmd %d err %d\n", __func__, cmd->opcode, cmd->error); 772 } 773 if (cmd->error != MMC_ERR_NONE) { 774 /* Reset the FIFO and DMA engines. */ 775 mask = AW_MMC_GCTL_FIFO_RST | AW_MMC_GCTL_DMA_RST; 776 val = AW_MMC_READ_4(sc, AW_MMC_GCTL); 777 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val | mask); 778 779 retry = AW_MMC_RESET_RETRY; 780 while (--retry > 0) { 781 if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & 782 AW_MMC_GCTL_RESET) == 0) 783 break; 784 DELAY(100); 785 } 786 if (retry == 0) 787 device_printf(sc->aw_dev, 788 "timeout resetting DMA/FIFO\n"); 789 aw_mmc_update_clock(sc, 1); 790 } 791 792 if (!dumping) 793 callout_stop(&sc->aw_timeoutc); 794 sc->aw_intr = 0; 795 sc->aw_resid = 0; 796 sc->aw_dma_map_err = 0; 797 sc->aw_intr_wait = 0; 798 #ifdef MMCCAM 799 sc->ccb = NULL; 800 ccb->ccb_h.status = 801 (ccb->mmcio.cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR); 802 xpt_done(ccb); 803 #else 804 req = sc->aw_req; 805 sc->aw_req = NULL; 806 req->done(req); 807 #endif 808 } 809 810 static void 811 aw_mmc_req_ok(struct aw_mmc_softc *sc) 812 { 813 int timeout; 814 struct mmc_command *cmd; 815 uint32_t status; 816 817 timeout = 1000; 818 while (--timeout > 0) { 819 status = AW_MMC_READ_4(sc, AW_MMC_STAR); 820 if ((status & AW_MMC_STAR_CARD_BUSY) == 0) 821 break; 822 DELAY(1000); 823 } 824 #ifdef MMCCAM 825 cmd = &sc->ccb->mmcio.cmd; 826 #else 827 cmd = sc->aw_req->cmd; 828 #endif 829 if (timeout == 0) { 830 cmd->error = MMC_ERR_FAILED; 831 aw_mmc_req_done(sc); 832 return; 833 } 834 if (cmd->flags & MMC_RSP_PRESENT) { 835 if (cmd->flags & MMC_RSP_136) { 836 cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP3); 837 cmd->resp[1] = AW_MMC_READ_4(sc, AW_MMC_RESP2); 838 cmd->resp[2] = AW_MMC_READ_4(sc, AW_MMC_RESP1); 839 cmd->resp[3] = AW_MMC_READ_4(sc, AW_MMC_RESP0); 840 } else 841 cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP0); 842 } 843 /* All data has been transferred ? */ 844 if (cmd->data != NULL && (sc->aw_resid << 2) < cmd->data->len) 845 cmd->error = MMC_ERR_FAILED; 846 aw_mmc_req_done(sc); 847 } 848 849 static inline void 850 set_mmc_error(struct aw_mmc_softc *sc, int error_code) 851 { 852 #ifdef MMCCAM 853 sc->ccb->mmcio.cmd.error = error_code; 854 #else 855 sc->aw_req->cmd->error = error_code; 856 #endif 857 } 858 859 static void 860 aw_mmc_timeout(void *arg) 861 { 862 struct aw_mmc_softc *sc; 863 864 sc = (struct aw_mmc_softc *)arg; 865 #ifdef MMCCAM 866 if (sc->ccb != NULL) { 867 #else 868 if (sc->aw_req != NULL) { 869 #endif 870 device_printf(sc->aw_dev, "controller timeout\n"); 871 set_mmc_error(sc, MMC_ERR_TIMEOUT); 872 aw_mmc_req_done(sc); 873 } else 874 device_printf(sc->aw_dev, 875 "Spurious timeout - no active request\n"); 876 } 877 878 static void 879 aw_mmc_print_error(uint32_t err) 880 { 881 if(err & AW_MMC_INT_RESP_ERR) 882 printf("AW_MMC_INT_RESP_ERR "); 883 if (err & AW_MMC_INT_RESP_CRC_ERR) 884 printf("AW_MMC_INT_RESP_CRC_ERR "); 885 if (err & AW_MMC_INT_DATA_CRC_ERR) 886 printf("AW_MMC_INT_DATA_CRC_ERR "); 887 if (err & AW_MMC_INT_RESP_TIMEOUT) 888 printf("AW_MMC_INT_RESP_TIMEOUT "); 889 if (err & AW_MMC_INT_FIFO_RUN_ERR) 890 printf("AW_MMC_INT_FIFO_RUN_ERR "); 891 if (err & AW_MMC_INT_CMD_BUSY) 892 printf("AW_MMC_INT_CMD_BUSY "); 893 if (err & AW_MMC_INT_DATA_START_ERR) 894 printf("AW_MMC_INT_DATA_START_ERR "); 895 if (err & AW_MMC_INT_DATA_END_BIT_ERR) 896 printf("AW_MMC_INT_DATA_END_BIT_ERR"); 897 printf("\n"); 898 } 899 900 static void 901 aw_mmc_intr(void *arg) 902 { 903 bus_dmasync_op_t sync_op; 904 struct aw_mmc_softc *sc; 905 struct mmc_data *data; 906 uint32_t idst, imask, rint; 907 908 sc = (struct aw_mmc_softc *)arg; 909 AW_MMC_LOCK(sc); 910 rint = AW_MMC_READ_4(sc, AW_MMC_RISR); 911 idst = AW_MMC_READ_4(sc, AW_MMC_IDST); 912 imask = AW_MMC_READ_4(sc, AW_MMC_IMKR); 913 if (idst == 0 && imask == 0 && rint == 0) { 914 AW_MMC_UNLOCK(sc); 915 return; 916 } 917 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_INT)) { 918 device_printf(sc->aw_dev, "idst: %#x, imask: %#x, rint: %#x\n", 919 idst, imask, rint); 920 } 921 #ifdef MMCCAM 922 if (sc->ccb == NULL) { 923 #else 924 if (sc->aw_req == NULL) { 925 #endif 926 device_printf(sc->aw_dev, 927 "Spurious interrupt - no active request, rint: 0x%08X\n", 928 rint); 929 aw_mmc_print_error(rint); 930 goto end; 931 } 932 if (rint & AW_MMC_INT_ERR_BIT) { 933 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_INT)) { 934 device_printf(sc->aw_dev, "error rint: 0x%08X\n", rint); 935 aw_mmc_print_error(rint); 936 } 937 if (rint & AW_MMC_INT_RESP_TIMEOUT) 938 set_mmc_error(sc, MMC_ERR_TIMEOUT); 939 else 940 set_mmc_error(sc, MMC_ERR_FAILED); 941 aw_mmc_req_done(sc); 942 goto end; 943 } 944 if (idst & AW_MMC_IDST_ERROR) { 945 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_INT)) 946 device_printf(sc->aw_dev, "error idst: 0x%08x\n", idst); 947 set_mmc_error(sc, MMC_ERR_FAILED); 948 aw_mmc_req_done(sc); 949 goto end; 950 } 951 952 sc->aw_intr |= rint; 953 #ifdef MMCCAM 954 data = sc->ccb->mmcio.cmd.data; 955 #else 956 data = sc->aw_req->cmd->data; 957 #endif 958 if (data != NULL && (idst & AW_MMC_IDST_COMPLETE) != 0) { 959 if (data->flags & MMC_DATA_WRITE) 960 sync_op = BUS_DMASYNC_POSTWRITE; 961 else 962 sync_op = BUS_DMASYNC_POSTREAD; 963 bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, 964 sync_op); 965 bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, 966 BUS_DMASYNC_POSTWRITE); 967 bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map); 968 sc->aw_resid = data->len >> 2; 969 } 970 if ((sc->aw_intr & sc->aw_intr_wait) == sc->aw_intr_wait) 971 aw_mmc_req_ok(sc); 972 973 end: 974 AW_MMC_WRITE_4(sc, AW_MMC_IDST, idst); 975 AW_MMC_WRITE_4(sc, AW_MMC_RISR, rint); 976 AW_MMC_UNLOCK(sc); 977 } 978 979 static int 980 aw_mmc_request(device_t bus, device_t child, struct mmc_request *req) 981 { 982 int blksz; 983 struct aw_mmc_softc *sc; 984 struct mmc_command *cmd; 985 uint32_t cmdreg, imask; 986 int err; 987 988 sc = device_get_softc(bus); 989 990 AW_MMC_LOCK(sc); 991 #ifdef MMCCAM 992 KASSERT(req == NULL, ("req should be NULL in MMCCAM case!")); 993 /* 994 * For MMCCAM, sc->ccb has been NULL-checked and populated 995 * by aw_mmc_cam_request() already. 996 */ 997 cmd = &sc->ccb->mmcio.cmd; 998 #else 999 if (sc->aw_req) { 1000 AW_MMC_UNLOCK(sc); 1001 return (EBUSY); 1002 } 1003 sc->aw_req = req; 1004 cmd = req->cmd; 1005 1006 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CMD)) { 1007 device_printf(sc->aw_dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n", 1008 cmd->opcode, cmd->arg, cmd->flags, 1009 cmd->data != NULL ? (unsigned int)cmd->data->len : 0, 1010 cmd->data != NULL ? cmd->data->flags: 0); 1011 } 1012 #endif 1013 cmdreg = AW_MMC_CMDR_LOAD; 1014 imask = AW_MMC_INT_ERR_BIT; 1015 sc->aw_intr_wait = 0; 1016 sc->aw_intr = 0; 1017 sc->aw_resid = 0; 1018 cmd->error = MMC_ERR_NONE; 1019 1020 if (cmd->opcode == MMC_GO_IDLE_STATE) 1021 cmdreg |= AW_MMC_CMDR_SEND_INIT_SEQ; 1022 1023 if (cmd->flags & MMC_RSP_PRESENT) 1024 cmdreg |= AW_MMC_CMDR_RESP_RCV; 1025 if (cmd->flags & MMC_RSP_136) 1026 cmdreg |= AW_MMC_CMDR_LONG_RESP; 1027 if (cmd->flags & MMC_RSP_CRC) 1028 cmdreg |= AW_MMC_CMDR_CHK_RESP_CRC; 1029 1030 if (cmd->data) { 1031 cmdreg |= AW_MMC_CMDR_DATA_TRANS | AW_MMC_CMDR_WAIT_PRE_OVER; 1032 1033 if (cmd->data->flags & MMC_DATA_MULTI) { 1034 cmdreg |= AW_MMC_CMDR_STOP_CMD_FLAG; 1035 imask |= AW_MMC_INT_AUTO_STOP_DONE; 1036 sc->aw_intr_wait |= AW_MMC_INT_AUTO_STOP_DONE; 1037 } else { 1038 sc->aw_intr_wait |= AW_MMC_INT_DATA_OVER; 1039 imask |= AW_MMC_INT_DATA_OVER; 1040 } 1041 if (cmd->data->flags & MMC_DATA_WRITE) 1042 cmdreg |= AW_MMC_CMDR_DIR_WRITE; 1043 #ifdef MMCCAM 1044 if (cmd->data->flags & MMC_DATA_BLOCK_SIZE) { 1045 AW_MMC_WRITE_4(sc, AW_MMC_BKSR, cmd->data->block_size); 1046 AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len); 1047 } else 1048 #endif 1049 { 1050 blksz = min(cmd->data->len, MMC_SECTOR_SIZE); 1051 AW_MMC_WRITE_4(sc, AW_MMC_BKSR, blksz); 1052 AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len); 1053 } 1054 } else { 1055 imask |= AW_MMC_INT_CMD_DONE; 1056 } 1057 1058 /* Enable the interrupts we are interested in */ 1059 AW_MMC_WRITE_4(sc, AW_MMC_IMKR, imask); 1060 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff); 1061 1062 /* Enable auto stop if needed */ 1063 AW_MMC_WRITE_4(sc, AW_MMC_A12A, 1064 cmdreg & AW_MMC_CMDR_STOP_CMD_FLAG ? 0 : 0xffff); 1065 1066 /* Write the command argument */ 1067 AW_MMC_WRITE_4(sc, AW_MMC_CAGR, cmd->arg); 1068 1069 /* 1070 * If we don't have data start the request 1071 * if we do prepare the dma request and start the request 1072 */ 1073 if (cmd->data == NULL) { 1074 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode); 1075 } else { 1076 err = aw_mmc_prepare_dma(sc); 1077 if (err != 0) 1078 device_printf(sc->aw_dev, "prepare_dma failed: %d\n", err); 1079 1080 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode); 1081 } 1082 1083 if (!dumping) { 1084 callout_reset(&sc->aw_timeoutc, sc->aw_timeout * hz, 1085 aw_mmc_timeout, sc); 1086 } 1087 AW_MMC_UNLOCK(sc); 1088 1089 return (0); 1090 } 1091 1092 static int 1093 aw_mmc_read_ivar(device_t bus, device_t child, int which, 1094 uintptr_t *result) 1095 { 1096 struct aw_mmc_softc *sc; 1097 1098 sc = device_get_softc(bus); 1099 switch (which) { 1100 default: 1101 return (EINVAL); 1102 case MMCBR_IVAR_BUS_MODE: 1103 *(int *)result = sc->aw_host.ios.bus_mode; 1104 break; 1105 case MMCBR_IVAR_BUS_WIDTH: 1106 *(int *)result = sc->aw_host.ios.bus_width; 1107 break; 1108 case MMCBR_IVAR_CHIP_SELECT: 1109 *(int *)result = sc->aw_host.ios.chip_select; 1110 break; 1111 case MMCBR_IVAR_CLOCK: 1112 *(int *)result = sc->aw_host.ios.clock; 1113 break; 1114 case MMCBR_IVAR_F_MIN: 1115 *(int *)result = sc->aw_host.f_min; 1116 break; 1117 case MMCBR_IVAR_F_MAX: 1118 *(int *)result = sc->aw_host.f_max; 1119 break; 1120 case MMCBR_IVAR_HOST_OCR: 1121 *(int *)result = sc->aw_host.host_ocr; 1122 break; 1123 case MMCBR_IVAR_MODE: 1124 *(int *)result = sc->aw_host.mode; 1125 break; 1126 case MMCBR_IVAR_OCR: 1127 *(int *)result = sc->aw_host.ocr; 1128 break; 1129 case MMCBR_IVAR_POWER_MODE: 1130 *(int *)result = sc->aw_host.ios.power_mode; 1131 break; 1132 case MMCBR_IVAR_VDD: 1133 *(int *)result = sc->aw_host.ios.vdd; 1134 break; 1135 case MMCBR_IVAR_VCCQ: 1136 *(int *)result = sc->aw_host.ios.vccq; 1137 break; 1138 case MMCBR_IVAR_CAPS: 1139 *(int *)result = sc->aw_host.caps; 1140 break; 1141 case MMCBR_IVAR_TIMING: 1142 *(int *)result = sc->aw_host.ios.timing; 1143 break; 1144 case MMCBR_IVAR_MAX_DATA: 1145 *(int *)result = (sc->aw_mmc_conf->dma_xferlen * 1146 AW_MMC_DMA_SEGS) / MMC_SECTOR_SIZE; 1147 break; 1148 case MMCBR_IVAR_RETUNE_REQ: 1149 *(int *)result = retune_req_none; 1150 break; 1151 } 1152 1153 return (0); 1154 } 1155 1156 static int 1157 aw_mmc_write_ivar(device_t bus, device_t child, int which, 1158 uintptr_t value) 1159 { 1160 struct aw_mmc_softc *sc; 1161 1162 sc = device_get_softc(bus); 1163 switch (which) { 1164 default: 1165 return (EINVAL); 1166 case MMCBR_IVAR_BUS_MODE: 1167 sc->aw_host.ios.bus_mode = value; 1168 break; 1169 case MMCBR_IVAR_BUS_WIDTH: 1170 sc->aw_host.ios.bus_width = value; 1171 break; 1172 case MMCBR_IVAR_CHIP_SELECT: 1173 sc->aw_host.ios.chip_select = value; 1174 break; 1175 case MMCBR_IVAR_CLOCK: 1176 sc->aw_host.ios.clock = value; 1177 break; 1178 case MMCBR_IVAR_MODE: 1179 sc->aw_host.mode = value; 1180 break; 1181 case MMCBR_IVAR_OCR: 1182 sc->aw_host.ocr = value; 1183 break; 1184 case MMCBR_IVAR_POWER_MODE: 1185 sc->aw_host.ios.power_mode = value; 1186 break; 1187 case MMCBR_IVAR_VDD: 1188 sc->aw_host.ios.vdd = value; 1189 break; 1190 case MMCBR_IVAR_VCCQ: 1191 sc->aw_host.ios.vccq = value; 1192 break; 1193 case MMCBR_IVAR_TIMING: 1194 sc->aw_host.ios.timing = value; 1195 break; 1196 /* These are read-only */ 1197 case MMCBR_IVAR_CAPS: 1198 case MMCBR_IVAR_HOST_OCR: 1199 case MMCBR_IVAR_F_MIN: 1200 case MMCBR_IVAR_F_MAX: 1201 case MMCBR_IVAR_MAX_DATA: 1202 return (EINVAL); 1203 } 1204 1205 return (0); 1206 } 1207 1208 static int 1209 aw_mmc_update_clock(struct aw_mmc_softc *sc, uint32_t clkon) 1210 { 1211 uint32_t reg; 1212 int retry; 1213 1214 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR); 1215 reg &= ~(AW_MMC_CKCR_ENB | AW_MMC_CKCR_LOW_POWER | 1216 AW_MMC_CKCR_MASK_DATA0); 1217 1218 if (clkon) 1219 reg |= AW_MMC_CKCR_ENB; 1220 if (sc->aw_mmc_conf->mask_data0) 1221 reg |= AW_MMC_CKCR_MASK_DATA0; 1222 1223 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg); 1224 1225 reg = AW_MMC_CMDR_LOAD | AW_MMC_CMDR_PRG_CLK | 1226 AW_MMC_CMDR_WAIT_PRE_OVER; 1227 AW_MMC_WRITE_4(sc, AW_MMC_CMDR, reg); 1228 retry = 0xfffff; 1229 1230 while (reg & AW_MMC_CMDR_LOAD && --retry > 0) { 1231 reg = AW_MMC_READ_4(sc, AW_MMC_CMDR); 1232 DELAY(10); 1233 } 1234 AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff); 1235 1236 if (reg & AW_MMC_CMDR_LOAD) { 1237 device_printf(sc->aw_dev, "timeout updating clock\n"); 1238 return (ETIMEDOUT); 1239 } 1240 1241 if (sc->aw_mmc_conf->mask_data0) { 1242 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR); 1243 reg &= ~AW_MMC_CKCR_MASK_DATA0; 1244 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg); 1245 } 1246 1247 return (0); 1248 } 1249 1250 #ifndef MMCCAM 1251 static int 1252 aw_mmc_switch_vccq(device_t bus, device_t child) 1253 { 1254 struct aw_mmc_softc *sc; 1255 int uvolt, err; 1256 1257 sc = device_get_softc(bus); 1258 1259 if (sc->mmc_helper.vqmmc_supply == NULL) 1260 return EOPNOTSUPP; 1261 1262 switch (sc->aw_host.ios.vccq) { 1263 case vccq_180: 1264 uvolt = 1800000; 1265 break; 1266 case vccq_330: 1267 uvolt = 3300000; 1268 break; 1269 default: 1270 return EINVAL; 1271 } 1272 1273 err = regulator_set_voltage(sc->mmc_helper.vqmmc_supply, uvolt, uvolt); 1274 if (err != 0) { 1275 device_printf(sc->aw_dev, 1276 "Cannot set vqmmc to %d<->%d\n", 1277 uvolt, 1278 uvolt); 1279 return (err); 1280 } 1281 1282 return (0); 1283 } 1284 #endif 1285 1286 static int 1287 aw_mmc_update_ios(device_t bus, device_t child) 1288 { 1289 int error; 1290 struct aw_mmc_softc *sc; 1291 struct mmc_ios *ios; 1292 unsigned int clock; 1293 uint32_t reg, div = 1; 1294 int reg_status; 1295 int rv; 1296 1297 sc = device_get_softc(bus); 1298 1299 ios = &sc->aw_host.ios; 1300 1301 /* Set the bus width. */ 1302 switch (ios->bus_width) { 1303 case bus_width_1: 1304 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR1); 1305 break; 1306 case bus_width_4: 1307 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR4); 1308 break; 1309 case bus_width_8: 1310 AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR8); 1311 break; 1312 } 1313 1314 switch (ios->power_mode) { 1315 case power_on: 1316 break; 1317 case power_off: 1318 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD)) 1319 device_printf(sc->aw_dev, "Powering down sd/mmc\n"); 1320 1321 if (sc->mmc_helper.vmmc_supply) { 1322 rv = regulator_status(sc->mmc_helper.vmmc_supply, ®_status); 1323 if (rv == 0 && reg_status == REGULATOR_STATUS_ENABLED) 1324 regulator_disable(sc->mmc_helper.vmmc_supply); 1325 } 1326 if (sc->mmc_helper.vqmmc_supply) { 1327 rv = regulator_status(sc->mmc_helper.vqmmc_supply, ®_status); 1328 if (rv == 0 && reg_status == REGULATOR_STATUS_ENABLED) 1329 regulator_disable(sc->mmc_helper.vqmmc_supply); 1330 } 1331 1332 if (sc->mmc_helper.mmc_pwrseq) 1333 MMC_PWRSEQ_SET_POWER(sc->mmc_helper.mmc_pwrseq, false); 1334 1335 aw_mmc_reset(sc); 1336 break; 1337 case power_up: 1338 if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD)) 1339 device_printf(sc->aw_dev, "Powering up sd/mmc\n"); 1340 1341 if (sc->mmc_helper.vmmc_supply) { 1342 rv = regulator_status(sc->mmc_helper.vmmc_supply, ®_status); 1343 if (rv == 0 && reg_status != REGULATOR_STATUS_ENABLED) 1344 regulator_enable(sc->mmc_helper.vmmc_supply); 1345 } 1346 if (sc->mmc_helper.vqmmc_supply) { 1347 rv = regulator_status(sc->mmc_helper.vqmmc_supply, ®_status); 1348 if (rv == 0 && reg_status != REGULATOR_STATUS_ENABLED) 1349 regulator_enable(sc->mmc_helper.vqmmc_supply); 1350 } 1351 1352 if (sc->mmc_helper.mmc_pwrseq) 1353 MMC_PWRSEQ_SET_POWER(sc->mmc_helper.mmc_pwrseq, true); 1354 aw_mmc_init(sc); 1355 break; 1356 }; 1357 1358 /* Enable ddr mode if needed */ 1359 reg = AW_MMC_READ_4(sc, AW_MMC_GCTL); 1360 if (ios->timing == bus_timing_uhs_ddr50 || 1361 ios->timing == bus_timing_mmc_ddr52) 1362 reg |= AW_MMC_GCTL_DDR_MOD_SEL; 1363 else 1364 reg &= ~AW_MMC_GCTL_DDR_MOD_SEL; 1365 AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg); 1366 1367 if (ios->clock && ios->clock != sc->aw_clock) { 1368 sc->aw_clock = clock = ios->clock; 1369 1370 /* Disable clock */ 1371 error = aw_mmc_update_clock(sc, 0); 1372 if (error != 0) 1373 return (error); 1374 1375 if (ios->timing == bus_timing_mmc_ddr52 && 1376 (sc->aw_mmc_conf->new_timing || 1377 ios->bus_width == bus_width_8)) { 1378 div = 2; 1379 clock <<= 1; 1380 } 1381 1382 /* Reset the divider. */ 1383 reg = AW_MMC_READ_4(sc, AW_MMC_CKCR); 1384 reg &= ~AW_MMC_CKCR_DIV; 1385 reg |= div - 1; 1386 AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg); 1387 1388 /* New timing mode if needed */ 1389 if (sc->aw_mmc_conf->new_timing) { 1390 reg = AW_MMC_READ_4(sc, AW_MMC_NTSR); 1391 reg |= AW_MMC_NTSR_MODE_SELECT; 1392 AW_MMC_WRITE_4(sc, AW_MMC_NTSR, reg); 1393 } 1394 1395 /* Set the MMC clock. */ 1396 error = clk_disable(sc->aw_clk_mmc); 1397 if (error != 0 && bootverbose) 1398 device_printf(sc->aw_dev, 1399 "failed to disable mmc clock: %d\n", error); 1400 error = clk_set_freq(sc->aw_clk_mmc, clock, 1401 CLK_SET_ROUND_DOWN); 1402 if (error != 0) { 1403 device_printf(sc->aw_dev, 1404 "failed to set frequency to %u Hz: %d\n", 1405 clock, error); 1406 return (error); 1407 } 1408 error = clk_enable(sc->aw_clk_mmc); 1409 if (error != 0 && bootverbose) 1410 device_printf(sc->aw_dev, 1411 "failed to re-enable mmc clock: %d\n", error); 1412 1413 if (sc->aw_mmc_conf->can_calibrate) 1414 AW_MMC_WRITE_4(sc, AW_MMC_SAMP_DL, AW_MMC_SAMP_DL_SW_EN); 1415 1416 /* Enable clock. */ 1417 error = aw_mmc_update_clock(sc, 1); 1418 if (error != 0) 1419 return (error); 1420 } 1421 1422 return (0); 1423 } 1424 1425 #ifndef MMCCAM 1426 static int 1427 aw_mmc_get_ro(device_t bus, device_t child) 1428 { 1429 struct aw_mmc_softc *sc; 1430 1431 sc = device_get_softc(bus); 1432 1433 return (mmc_fdt_gpio_get_readonly(&sc->mmc_helper)); 1434 } 1435 1436 static int 1437 aw_mmc_acquire_host(device_t bus, device_t child) 1438 { 1439 struct aw_mmc_softc *sc; 1440 int error; 1441 1442 sc = device_get_softc(bus); 1443 AW_MMC_LOCK(sc); 1444 while (sc->aw_bus_busy) { 1445 error = msleep(sc, &sc->aw_mtx, PCATCH, "mmchw", 0); 1446 if (error != 0) { 1447 AW_MMC_UNLOCK(sc); 1448 return (error); 1449 } 1450 } 1451 sc->aw_bus_busy++; 1452 AW_MMC_UNLOCK(sc); 1453 1454 return (0); 1455 } 1456 1457 static int 1458 aw_mmc_release_host(device_t bus, device_t child) 1459 { 1460 struct aw_mmc_softc *sc; 1461 1462 sc = device_get_softc(bus); 1463 AW_MMC_LOCK(sc); 1464 sc->aw_bus_busy--; 1465 wakeup(sc); 1466 AW_MMC_UNLOCK(sc); 1467 1468 return (0); 1469 } 1470 #endif 1471 1472 static device_method_t aw_mmc_methods[] = { 1473 /* Device interface */ 1474 DEVMETHOD(device_probe, aw_mmc_probe), 1475 DEVMETHOD(device_attach, aw_mmc_attach), 1476 DEVMETHOD(device_detach, aw_mmc_detach), 1477 1478 /* Bus interface */ 1479 DEVMETHOD(bus_read_ivar, aw_mmc_read_ivar), 1480 DEVMETHOD(bus_write_ivar, aw_mmc_write_ivar), 1481 DEVMETHOD(bus_add_child, bus_generic_add_child), 1482 1483 #ifndef MMCCAM 1484 /* MMC bridge interface */ 1485 DEVMETHOD(mmcbr_update_ios, aw_mmc_update_ios), 1486 DEVMETHOD(mmcbr_request, aw_mmc_request), 1487 DEVMETHOD(mmcbr_get_ro, aw_mmc_get_ro), 1488 DEVMETHOD(mmcbr_switch_vccq, aw_mmc_switch_vccq), 1489 DEVMETHOD(mmcbr_acquire_host, aw_mmc_acquire_host), 1490 DEVMETHOD(mmcbr_release_host, aw_mmc_release_host), 1491 #endif 1492 1493 #ifdef MMCCAM 1494 /* MMCCAM interface */ 1495 DEVMETHOD(mmc_sim_get_tran_settings, aw_mmc_get_tran_settings), 1496 DEVMETHOD(mmc_sim_set_tran_settings, aw_mmc_set_tran_settings), 1497 DEVMETHOD(mmc_sim_cam_request, aw_mmc_cam_request), 1498 DEVMETHOD(mmc_sim_cam_poll, aw_mmc_cam_poll), 1499 #endif 1500 1501 DEVMETHOD_END 1502 }; 1503 1504 static driver_t aw_mmc_driver = { 1505 "aw_mmc", 1506 aw_mmc_methods, 1507 sizeof(struct aw_mmc_softc), 1508 }; 1509 1510 DRIVER_MODULE(aw_mmc, simplebus, aw_mmc_driver, NULL, NULL); 1511 #ifndef MMCCAM 1512 MMC_DECLARE_BRIDGE(aw_mmc); 1513 #endif 1514 SIMPLEBUS_PNP_INFO(compat_data); 1515