1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2008 Alexander Motin <mav@FreeBSD.org> 5 * Copyright (c) 2017 Marius Strobl <marius@FreeBSD.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/bus.h> 35 #include <sys/callout.h> 36 #include <sys/conf.h> 37 #include <sys/kernel.h> 38 #include <sys/kobj.h> 39 #include <sys/libkern.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/module.h> 43 #include <sys/mutex.h> 44 #include <sys/resource.h> 45 #include <sys/rman.h> 46 #include <sys/sysctl.h> 47 #include <sys/taskqueue.h> 48 #include <sys/sbuf.h> 49 50 #include <machine/bus.h> 51 #include <machine/resource.h> 52 #include <machine/stdarg.h> 53 54 #include <dev/mmc/bridge.h> 55 #include <dev/mmc/mmcreg.h> 56 #include <dev/mmc/mmcbrvar.h> 57 58 #include <dev/sdhci/sdhci.h> 59 60 #include <cam/cam.h> 61 #include <cam/cam_ccb.h> 62 #include <cam/cam_debug.h> 63 #include <cam/cam_sim.h> 64 #include <cam/cam_xpt_sim.h> 65 66 #include "mmcbr_if.h" 67 #include "sdhci_if.h" 68 69 #include "opt_mmccam.h" 70 71 SYSCTL_NODE(_hw, OID_AUTO, sdhci, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 72 "sdhci driver"); 73 74 static int sdhci_debug = 0; 75 SYSCTL_INT(_hw_sdhci, OID_AUTO, debug, CTLFLAG_RWTUN, &sdhci_debug, 0, 76 "Debug level"); 77 u_int sdhci_quirk_clear = 0; 78 SYSCTL_INT(_hw_sdhci, OID_AUTO, quirk_clear, CTLFLAG_RWTUN, &sdhci_quirk_clear, 79 0, "Mask of quirks to clear"); 80 u_int sdhci_quirk_set = 0; 81 SYSCTL_INT(_hw_sdhci, OID_AUTO, quirk_set, CTLFLAG_RWTUN, &sdhci_quirk_set, 0, 82 "Mask of quirks to set"); 83 84 #define RD1(slot, off) SDHCI_READ_1((slot)->bus, (slot), (off)) 85 #define RD2(slot, off) SDHCI_READ_2((slot)->bus, (slot), (off)) 86 #define RD4(slot, off) SDHCI_READ_4((slot)->bus, (slot), (off)) 87 #define RD_MULTI_4(slot, off, ptr, count) \ 88 SDHCI_READ_MULTI_4((slot)->bus, (slot), (off), (ptr), (count)) 89 90 #define WR1(slot, off, val) SDHCI_WRITE_1((slot)->bus, (slot), (off), (val)) 91 #define WR2(slot, off, val) SDHCI_WRITE_2((slot)->bus, (slot), (off), (val)) 92 #define WR4(slot, off, val) SDHCI_WRITE_4((slot)->bus, (slot), (off), (val)) 93 #define WR_MULTI_4(slot, off, ptr, count) \ 94 SDHCI_WRITE_MULTI_4((slot)->bus, (slot), (off), (ptr), (count)) 95 96 static void sdhci_acmd_irq(struct sdhci_slot *slot, uint16_t acmd_err); 97 static void sdhci_card_poll(void *arg); 98 static void sdhci_card_task(void *arg, int pending); 99 static void sdhci_cmd_irq(struct sdhci_slot *slot, uint32_t intmask); 100 static void sdhci_data_irq(struct sdhci_slot *slot, uint32_t intmask); 101 static int sdhci_exec_tuning(struct sdhci_slot *slot, bool reset); 102 static void sdhci_handle_card_present_locked(struct sdhci_slot *slot, 103 bool is_present); 104 static void sdhci_finish_command(struct sdhci_slot *slot); 105 static void sdhci_init(struct sdhci_slot *slot); 106 static void sdhci_read_block_pio(struct sdhci_slot *slot); 107 static void sdhci_req_done(struct sdhci_slot *slot); 108 static void sdhci_req_wakeup(struct mmc_request *req); 109 static void sdhci_reset(struct sdhci_slot *slot, uint8_t mask); 110 static void sdhci_retune(void *arg); 111 static void sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock); 112 static void sdhci_set_power(struct sdhci_slot *slot, u_char power); 113 static void sdhci_set_transfer_mode(struct sdhci_slot *slot, 114 const struct mmc_data *data); 115 static void sdhci_start(struct sdhci_slot *slot); 116 static void sdhci_timeout(void *arg); 117 static void sdhci_start_command(struct sdhci_slot *slot, 118 struct mmc_command *cmd); 119 static void sdhci_start_data(struct sdhci_slot *slot, 120 const struct mmc_data *data); 121 static void sdhci_write_block_pio(struct sdhci_slot *slot); 122 static void sdhci_transfer_pio(struct sdhci_slot *slot); 123 124 #ifdef MMCCAM 125 /* CAM-related */ 126 static void sdhci_cam_action(struct cam_sim *sim, union ccb *ccb); 127 static int sdhci_cam_get_possible_host_clock(const struct sdhci_slot *slot, 128 int proposed_clock); 129 static void sdhci_cam_poll(struct cam_sim *sim); 130 static int sdhci_cam_request(struct sdhci_slot *slot, union ccb *ccb); 131 static int sdhci_cam_settran_settings(struct sdhci_slot *slot, union ccb *ccb); 132 static int sdhci_cam_update_ios(struct sdhci_slot *slot); 133 #endif 134 135 /* helper routines */ 136 static int sdhci_dma_alloc(struct sdhci_slot *slot, uint32_t caps); 137 static void sdhci_dma_free(struct sdhci_slot *slot); 138 static void sdhci_dumpcaps(struct sdhci_slot *slot); 139 static void sdhci_dumpcaps_buf(struct sdhci_slot *slot, struct sbuf *s); 140 static void sdhci_dumpregs(struct sdhci_slot *slot); 141 static void sdhci_dumpregs_buf(struct sdhci_slot *slot, struct sbuf *s); 142 static int sdhci_syctl_dumpcaps(SYSCTL_HANDLER_ARGS); 143 static int sdhci_syctl_dumpregs(SYSCTL_HANDLER_ARGS); 144 static void sdhci_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, 145 int error); 146 static int slot_printf(const struct sdhci_slot *slot, const char * fmt, ...) 147 __printflike(2, 3); 148 static int slot_sprintf(const struct sdhci_slot *slot, struct sbuf *s, 149 const char * fmt, ...) __printflike(3, 4); 150 static uint32_t sdhci_tuning_intmask(const struct sdhci_slot *slot); 151 152 #define SDHCI_LOCK(_slot) mtx_lock(&(_slot)->mtx) 153 #define SDHCI_UNLOCK(_slot) mtx_unlock(&(_slot)->mtx) 154 #define SDHCI_LOCK_INIT(_slot) \ 155 mtx_init(&_slot->mtx, "SD slot mtx", "sdhci", MTX_DEF) 156 #define SDHCI_LOCK_DESTROY(_slot) mtx_destroy(&_slot->mtx); 157 #define SDHCI_ASSERT_LOCKED(_slot) mtx_assert(&_slot->mtx, MA_OWNED); 158 #define SDHCI_ASSERT_UNLOCKED(_slot) mtx_assert(&_slot->mtx, MA_NOTOWNED); 159 160 #define SDHCI_DEFAULT_MAX_FREQ 50 161 162 #define SDHCI_200_MAX_DIVIDER 256 163 #define SDHCI_300_MAX_DIVIDER 2046 164 165 #define SDHCI_CARD_PRESENT_TICKS (hz / 5) 166 #define SDHCI_INSERT_DELAY_TICKS (hz / 2) 167 168 /* 169 * Broadcom BCM577xx Controller Constants 170 */ 171 /* Maximum divider supported by the default clock source. */ 172 #define BCM577XX_DEFAULT_MAX_DIVIDER 256 173 /* Alternative clock's base frequency. */ 174 #define BCM577XX_ALT_CLOCK_BASE 63000000 175 176 #define BCM577XX_HOST_CONTROL 0x198 177 #define BCM577XX_CTRL_CLKSEL_MASK 0xFFFFCFFF 178 #define BCM577XX_CTRL_CLKSEL_SHIFT 12 179 #define BCM577XX_CTRL_CLKSEL_DEFAULT 0x0 180 #define BCM577XX_CTRL_CLKSEL_64MHZ 0x3 181 182 static void 183 sdhci_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 184 { 185 186 if (error != 0) { 187 printf("getaddr: error %d\n", error); 188 return; 189 } 190 *(bus_addr_t *)arg = segs[0].ds_addr; 191 } 192 193 static int 194 slot_printf(const struct sdhci_slot *slot, const char * fmt, ...) 195 { 196 char buf[128]; 197 va_list ap; 198 int retval; 199 200 /* 201 * Make sure we print a single line all together rather than in two 202 * halves to avoid console gibberish bingo. 203 */ 204 va_start(ap, fmt); 205 retval = vsnprintf(buf, sizeof(buf), fmt, ap); 206 va_end(ap); 207 208 retval += printf("%s-slot%d: %s", 209 device_get_nameunit(slot->bus), slot->num, buf); 210 return (retval); 211 } 212 213 static int 214 slot_sprintf(const struct sdhci_slot *slot, struct sbuf *s, 215 const char * fmt, ...) 216 { 217 va_list ap; 218 int retval; 219 220 retval = sbuf_printf(s, "%s-slot%d: ", device_get_nameunit(slot->bus), slot->num); 221 222 va_start(ap, fmt); 223 retval += sbuf_vprintf(s, fmt, ap); 224 va_end(ap); 225 226 return (retval); 227 } 228 229 static void 230 sdhci_dumpregs_buf(struct sdhci_slot *slot, struct sbuf *s) 231 { 232 slot_sprintf(slot, s, "============== REGISTER DUMP ==============\n"); 233 234 slot_sprintf(slot, s, "Sys addr: 0x%08x | Version: 0x%08x\n", 235 RD4(slot, SDHCI_DMA_ADDRESS), RD2(slot, SDHCI_HOST_VERSION)); 236 slot_sprintf(slot, s, "Blk size: 0x%08x | Blk cnt: 0x%08x\n", 237 RD2(slot, SDHCI_BLOCK_SIZE), RD2(slot, SDHCI_BLOCK_COUNT)); 238 slot_sprintf(slot, s, "Argument: 0x%08x | Trn mode: 0x%08x\n", 239 RD4(slot, SDHCI_ARGUMENT), RD2(slot, SDHCI_TRANSFER_MODE)); 240 slot_sprintf(slot, s, "Present: 0x%08x | Host ctl: 0x%08x\n", 241 RD4(slot, SDHCI_PRESENT_STATE), RD1(slot, SDHCI_HOST_CONTROL)); 242 slot_sprintf(slot, s, "Power: 0x%08x | Blk gap: 0x%08x\n", 243 RD1(slot, SDHCI_POWER_CONTROL), RD1(slot, SDHCI_BLOCK_GAP_CONTROL)); 244 slot_sprintf(slot, s, "Wake-up: 0x%08x | Clock: 0x%08x\n", 245 RD1(slot, SDHCI_WAKE_UP_CONTROL), RD2(slot, SDHCI_CLOCK_CONTROL)); 246 slot_sprintf(slot, s, "Timeout: 0x%08x | Int stat: 0x%08x\n", 247 RD1(slot, SDHCI_TIMEOUT_CONTROL), RD4(slot, SDHCI_INT_STATUS)); 248 slot_sprintf(slot, s, "Int enab: 0x%08x | Sig enab: 0x%08x\n", 249 RD4(slot, SDHCI_INT_ENABLE), RD4(slot, SDHCI_SIGNAL_ENABLE)); 250 slot_sprintf(slot, s, "AC12 err: 0x%08x | Host ctl2:0x%08x\n", 251 RD2(slot, SDHCI_ACMD12_ERR), RD2(slot, SDHCI_HOST_CONTROL2)); 252 slot_sprintf(slot, s, "Caps: 0x%08x | Caps2: 0x%08x\n", 253 RD4(slot, SDHCI_CAPABILITIES), RD4(slot, SDHCI_CAPABILITIES2)); 254 slot_sprintf(slot, s, "Max curr: 0x%08x | ADMA err: 0x%08x\n", 255 RD4(slot, SDHCI_MAX_CURRENT), RD1(slot, SDHCI_ADMA_ERR)); 256 slot_sprintf(slot, s, "ADMA addr:0x%08x | Slot int: 0x%08x\n", 257 RD4(slot, SDHCI_ADMA_ADDRESS_LO), RD2(slot, SDHCI_SLOT_INT_STATUS)); 258 259 slot_sprintf(slot, s, "===========================================\n"); 260 } 261 262 static void 263 sdhci_dumpregs(struct sdhci_slot *slot) 264 { 265 struct sbuf s; 266 267 sbuf_new(&s, NULL, 1024, SBUF_AUTOEXTEND); 268 sbuf_set_drain(&s, &sbuf_printf_drain, NULL); 269 sdhci_dumpregs_buf(slot, &s); 270 sbuf_finish(&s); 271 sbuf_delete(&s); 272 } 273 274 static int 275 sdhci_syctl_dumpregs(SYSCTL_HANDLER_ARGS) 276 { 277 struct sdhci_slot *slot = arg1; 278 struct sbuf s; 279 280 sbuf_new_for_sysctl(&s, NULL, 1024, req); 281 sbuf_putc(&s, '\n'); 282 sdhci_dumpregs_buf(slot, &s); 283 sbuf_finish(&s); 284 sbuf_delete(&s); 285 286 return (0); 287 } 288 289 static void 290 sdhci_dumpcaps_buf(struct sdhci_slot *slot, struct sbuf *s) 291 { 292 int host_caps = slot->host.caps; 293 int caps = slot->caps; 294 295 slot_sprintf(slot, s, 296 "%uMHz%s %s VDD:%s%s%s VCCQ: 3.3V%s%s DRV: B%s%s%s %s %s\n", 297 slot->max_clk / 1000000, 298 (caps & SDHCI_CAN_DO_HISPD) ? " HS" : "", 299 (host_caps & MMC_CAP_8_BIT_DATA) ? "8bits" : 300 ((host_caps & MMC_CAP_4_BIT_DATA) ? "4bits" : "1bit"), 301 (caps & SDHCI_CAN_VDD_330) ? " 3.3V" : "", 302 (caps & SDHCI_CAN_VDD_300) ? " 3.0V" : "", 303 ((caps & SDHCI_CAN_VDD_180) && 304 (slot->opt & SDHCI_SLOT_EMBEDDED)) ? " 1.8V" : "", 305 (host_caps & MMC_CAP_SIGNALING_180) ? " 1.8V" : "", 306 (host_caps & MMC_CAP_SIGNALING_120) ? " 1.2V" : "", 307 (host_caps & MMC_CAP_DRIVER_TYPE_A) ? "A" : "", 308 (host_caps & MMC_CAP_DRIVER_TYPE_C) ? "C" : "", 309 (host_caps & MMC_CAP_DRIVER_TYPE_D) ? "D" : "", 310 (slot->opt & SDHCI_HAVE_DMA) ? "DMA" : "PIO", 311 (slot->opt & SDHCI_SLOT_EMBEDDED) ? "embedded" : 312 (slot->opt & SDHCI_NON_REMOVABLE) ? "non-removable" : 313 "removable"); 314 if (host_caps & (MMC_CAP_MMC_DDR52 | MMC_CAP_MMC_HS200 | 315 MMC_CAP_MMC_HS400 | MMC_CAP_MMC_ENH_STROBE)) 316 slot_sprintf(slot, s, "eMMC:%s%s%s%s\n", 317 (host_caps & MMC_CAP_MMC_DDR52) ? " DDR52" : "", 318 (host_caps & MMC_CAP_MMC_HS200) ? " HS200" : "", 319 (host_caps & MMC_CAP_MMC_HS400) ? " HS400" : "", 320 ((host_caps & 321 (MMC_CAP_MMC_HS400 | MMC_CAP_MMC_ENH_STROBE)) == 322 (MMC_CAP_MMC_HS400 | MMC_CAP_MMC_ENH_STROBE)) ? 323 " HS400ES" : ""); 324 if (host_caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 325 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104)) 326 slot_sprintf(slot, s, "UHS-I:%s%s%s%s%s\n", 327 (host_caps & MMC_CAP_UHS_SDR12) ? " SDR12" : "", 328 (host_caps & MMC_CAP_UHS_SDR25) ? " SDR25" : "", 329 (host_caps & MMC_CAP_UHS_SDR50) ? " SDR50" : "", 330 (host_caps & MMC_CAP_UHS_SDR104) ? " SDR104" : "", 331 (host_caps & MMC_CAP_UHS_DDR50) ? " DDR50" : ""); 332 if (slot->opt & SDHCI_TUNING_SUPPORTED) 333 slot_sprintf(slot, s, 334 "Re-tuning count %d secs, mode %d\n", 335 slot->retune_count, slot->retune_mode + 1); 336 } 337 338 static void 339 sdhci_dumpcaps(struct sdhci_slot *slot) 340 { 341 struct sbuf s; 342 343 sbuf_new(&s, NULL, 1024, SBUF_AUTOEXTEND); 344 sbuf_set_drain(&s, &sbuf_printf_drain, NULL); 345 sdhci_dumpcaps_buf(slot, &s); 346 sbuf_finish(&s); 347 sbuf_delete(&s); 348 } 349 350 static int 351 sdhci_syctl_dumpcaps(SYSCTL_HANDLER_ARGS) 352 { 353 struct sdhci_slot *slot = arg1; 354 struct sbuf s; 355 356 sbuf_new_for_sysctl(&s, NULL, 1024, req); 357 sbuf_putc(&s, '\n'); 358 sdhci_dumpcaps_buf(slot, &s); 359 sbuf_finish(&s); 360 sbuf_delete(&s); 361 362 return (0); 363 } 364 365 static void 366 sdhci_reset(struct sdhci_slot *slot, uint8_t mask) 367 { 368 int timeout; 369 uint32_t clock; 370 371 if (slot->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 372 if (!SDHCI_GET_CARD_PRESENT(slot->bus, slot)) 373 return; 374 } 375 376 /* Some controllers need this kick or reset won't work. */ 377 if ((mask & SDHCI_RESET_ALL) == 0 && 378 (slot->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)) { 379 /* This is to force an update */ 380 clock = slot->clock; 381 slot->clock = 0; 382 sdhci_set_clock(slot, clock); 383 } 384 385 if (mask & SDHCI_RESET_ALL) { 386 slot->clock = 0; 387 slot->power = 0; 388 } 389 390 WR1(slot, SDHCI_SOFTWARE_RESET, mask); 391 392 if (slot->quirks & SDHCI_QUIRK_WAITFOR_RESET_ASSERTED) { 393 /* 394 * Resets on TI OMAPs and AM335x are incompatible with SDHCI 395 * specification. The reset bit has internal propagation delay, 396 * so a fast read after write returns 0 even if reset process is 397 * in progress. The workaround is to poll for 1 before polling 398 * for 0. In the worst case, if we miss seeing it asserted the 399 * time we spent waiting is enough to ensure the reset finishes. 400 */ 401 timeout = 10000; 402 while ((RD1(slot, SDHCI_SOFTWARE_RESET) & mask) != mask) { 403 if (timeout <= 0) 404 break; 405 timeout--; 406 DELAY(1); 407 } 408 } 409 410 /* Wait max 100 ms */ 411 timeout = 10000; 412 /* Controller clears the bits when it's done */ 413 while (RD1(slot, SDHCI_SOFTWARE_RESET) & mask) { 414 if (timeout <= 0) { 415 slot_printf(slot, "Reset 0x%x never completed.\n", 416 mask); 417 sdhci_dumpregs(slot); 418 return; 419 } 420 timeout--; 421 DELAY(10); 422 } 423 } 424 425 static uint32_t 426 sdhci_tuning_intmask(const struct sdhci_slot *slot) 427 { 428 uint32_t intmask; 429 430 intmask = 0; 431 if (slot->opt & SDHCI_TUNING_ENABLED) { 432 intmask |= SDHCI_INT_TUNEERR; 433 if (slot->retune_mode == SDHCI_RETUNE_MODE_2 || 434 slot->retune_mode == SDHCI_RETUNE_MODE_3) 435 intmask |= SDHCI_INT_RETUNE; 436 } 437 return (intmask); 438 } 439 440 static void 441 sdhci_init(struct sdhci_slot *slot) 442 { 443 444 sdhci_reset(slot, SDHCI_RESET_ALL); 445 446 /* Enable interrupts. */ 447 slot->intmask = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 448 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX | 449 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | 450 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | 451 SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE | 452 SDHCI_INT_ACMD12ERR; 453 454 if (!(slot->quirks & SDHCI_QUIRK_POLL_CARD_PRESENT) && 455 !(slot->opt & SDHCI_NON_REMOVABLE)) { 456 slot->intmask |= SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT; 457 } 458 459 WR4(slot, SDHCI_INT_ENABLE, slot->intmask); 460 WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); 461 } 462 463 static void 464 sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock) 465 { 466 uint32_t clk_base; 467 uint32_t clk_sel; 468 uint32_t res; 469 uint16_t clk; 470 uint16_t div; 471 int timeout; 472 473 if (clock == slot->clock) 474 return; 475 slot->clock = clock; 476 477 /* Turn off the clock. */ 478 clk = RD2(slot, SDHCI_CLOCK_CONTROL); 479 WR2(slot, SDHCI_CLOCK_CONTROL, clk & ~SDHCI_CLOCK_CARD_EN); 480 /* If no clock requested - leave it so. */ 481 if (clock == 0) 482 return; 483 484 /* Determine the clock base frequency */ 485 clk_base = slot->max_clk; 486 if (slot->quirks & SDHCI_QUIRK_BCM577XX_400KHZ_CLKSRC) { 487 clk_sel = RD2(slot, BCM577XX_HOST_CONTROL) & 488 BCM577XX_CTRL_CLKSEL_MASK; 489 490 /* 491 * Select clock source appropriate for the requested frequency. 492 */ 493 if ((clk_base / BCM577XX_DEFAULT_MAX_DIVIDER) > clock) { 494 clk_base = BCM577XX_ALT_CLOCK_BASE; 495 clk_sel |= (BCM577XX_CTRL_CLKSEL_64MHZ << 496 BCM577XX_CTRL_CLKSEL_SHIFT); 497 } else { 498 clk_sel |= (BCM577XX_CTRL_CLKSEL_DEFAULT << 499 BCM577XX_CTRL_CLKSEL_SHIFT); 500 } 501 502 WR2(slot, BCM577XX_HOST_CONTROL, clk_sel); 503 } 504 505 /* Recalculate timeout clock frequency based on the new sd clock. */ 506 if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) 507 slot->timeout_clk = slot->clock / 1000; 508 509 if (slot->version < SDHCI_SPEC_300) { 510 /* Looking for highest freq <= clock. */ 511 res = clk_base; 512 for (div = 1; div < SDHCI_200_MAX_DIVIDER; div <<= 1) { 513 if (res <= clock) 514 break; 515 res >>= 1; 516 } 517 /* Divider 1:1 is 0x00, 2:1 is 0x01, 256:1 is 0x80 ... */ 518 div >>= 1; 519 } else { 520 /* Version 3.0 divisors are multiples of two up to 1023 * 2 */ 521 if (clock >= clk_base) 522 div = 0; 523 else { 524 for (div = 2; div < SDHCI_300_MAX_DIVIDER; div += 2) { 525 if ((clk_base / div) <= clock) 526 break; 527 } 528 } 529 div >>= 1; 530 } 531 532 if (bootverbose || sdhci_debug) 533 slot_printf(slot, "Divider %d for freq %d (base %d)\n", 534 div, clock, clk_base); 535 536 /* Now we have got divider, set it. */ 537 clk = (div & SDHCI_DIVIDER_MASK) << SDHCI_DIVIDER_SHIFT; 538 clk |= ((div >> SDHCI_DIVIDER_MASK_LEN) & SDHCI_DIVIDER_HI_MASK) 539 << SDHCI_DIVIDER_HI_SHIFT; 540 541 WR2(slot, SDHCI_CLOCK_CONTROL, clk); 542 /* Enable clock. */ 543 clk |= SDHCI_CLOCK_INT_EN; 544 WR2(slot, SDHCI_CLOCK_CONTROL, clk); 545 /* Wait up to 10 ms until it stabilize. */ 546 timeout = 10; 547 while (!((clk = RD2(slot, SDHCI_CLOCK_CONTROL)) 548 & SDHCI_CLOCK_INT_STABLE)) { 549 if (timeout == 0) { 550 slot_printf(slot, 551 "Internal clock never stabilised.\n"); 552 sdhci_dumpregs(slot); 553 return; 554 } 555 timeout--; 556 DELAY(1000); 557 } 558 /* Pass clock signal to the bus. */ 559 clk |= SDHCI_CLOCK_CARD_EN; 560 WR2(slot, SDHCI_CLOCK_CONTROL, clk); 561 } 562 563 static void 564 sdhci_set_power(struct sdhci_slot *slot, u_char power) 565 { 566 int i; 567 uint8_t pwr; 568 569 if (slot->power == power) 570 return; 571 572 slot->power = power; 573 574 /* Turn off the power. */ 575 pwr = 0; 576 WR1(slot, SDHCI_POWER_CONTROL, pwr); 577 /* If power down requested - leave it so. */ 578 if (power == 0) 579 return; 580 /* Set voltage. */ 581 switch (1 << power) { 582 case MMC_OCR_LOW_VOLTAGE: 583 pwr |= SDHCI_POWER_180; 584 break; 585 case MMC_OCR_290_300: 586 case MMC_OCR_300_310: 587 pwr |= SDHCI_POWER_300; 588 break; 589 case MMC_OCR_320_330: 590 case MMC_OCR_330_340: 591 pwr |= SDHCI_POWER_330; 592 break; 593 } 594 WR1(slot, SDHCI_POWER_CONTROL, pwr); 595 /* 596 * Turn on VDD1 power. Note that at least some Intel controllers can 597 * fail to enable bus power on the first try after transiting from D3 598 * to D0, so we give them up to 2 ms. 599 */ 600 pwr |= SDHCI_POWER_ON; 601 for (i = 0; i < 20; i++) { 602 WR1(slot, SDHCI_POWER_CONTROL, pwr); 603 if (RD1(slot, SDHCI_POWER_CONTROL) & SDHCI_POWER_ON) 604 break; 605 DELAY(100); 606 } 607 if (!(RD1(slot, SDHCI_POWER_CONTROL) & SDHCI_POWER_ON)) 608 slot_printf(slot, "Bus power failed to enable\n"); 609 610 if (slot->quirks & SDHCI_QUIRK_INTEL_POWER_UP_RESET) { 611 WR1(slot, SDHCI_POWER_CONTROL, pwr | 0x10); 612 DELAY(10); 613 WR1(slot, SDHCI_POWER_CONTROL, pwr); 614 DELAY(300); 615 } 616 } 617 618 static void 619 sdhci_read_block_pio(struct sdhci_slot *slot) 620 { 621 uint32_t data; 622 char *buffer; 623 size_t left; 624 625 buffer = slot->curcmd->data->data; 626 buffer += slot->offset; 627 /* Transfer one block at a time. */ 628 #ifdef MMCCAM 629 if (slot->curcmd->data->flags & MMC_DATA_BLOCK_SIZE) 630 left = min(slot->curcmd->data->block_size, 631 slot->curcmd->data->len - slot->offset); 632 else 633 #endif 634 left = min(512, slot->curcmd->data->len - slot->offset); 635 slot->offset += left; 636 637 /* If we are too fast, broken controllers return zeroes. */ 638 if (slot->quirks & SDHCI_QUIRK_BROKEN_TIMINGS) 639 DELAY(10); 640 /* Handle unaligned and aligned buffer cases. */ 641 if ((intptr_t)buffer & 3) { 642 while (left > 3) { 643 data = RD4(slot, SDHCI_BUFFER); 644 buffer[0] = data; 645 buffer[1] = (data >> 8); 646 buffer[2] = (data >> 16); 647 buffer[3] = (data >> 24); 648 buffer += 4; 649 left -= 4; 650 } 651 } else { 652 RD_MULTI_4(slot, SDHCI_BUFFER, 653 (uint32_t *)buffer, left >> 2); 654 left &= 3; 655 } 656 /* Handle uneven size case. */ 657 if (left > 0) { 658 data = RD4(slot, SDHCI_BUFFER); 659 while (left > 0) { 660 *(buffer++) = data; 661 data >>= 8; 662 left--; 663 } 664 } 665 } 666 667 static void 668 sdhci_write_block_pio(struct sdhci_slot *slot) 669 { 670 uint32_t data = 0; 671 char *buffer; 672 size_t left; 673 674 buffer = slot->curcmd->data->data; 675 buffer += slot->offset; 676 /* Transfer one block at a time. */ 677 #ifdef MMCCAM 678 if (slot->curcmd->data->flags & MMC_DATA_BLOCK_SIZE) { 679 left = min(slot->curcmd->data->block_size, 680 slot->curcmd->data->len - slot->offset); 681 } else 682 #endif 683 left = min(512, slot->curcmd->data->len - slot->offset); 684 slot->offset += left; 685 686 /* Handle unaligned and aligned buffer cases. */ 687 if ((intptr_t)buffer & 3) { 688 while (left > 3) { 689 data = buffer[0] + 690 (buffer[1] << 8) + 691 (buffer[2] << 16) + 692 (buffer[3] << 24); 693 left -= 4; 694 buffer += 4; 695 WR4(slot, SDHCI_BUFFER, data); 696 } 697 } else { 698 WR_MULTI_4(slot, SDHCI_BUFFER, 699 (uint32_t *)buffer, left >> 2); 700 left &= 3; 701 } 702 /* Handle uneven size case. */ 703 if (left > 0) { 704 while (left > 0) { 705 data <<= 8; 706 data += *(buffer++); 707 left--; 708 } 709 WR4(slot, SDHCI_BUFFER, data); 710 } 711 } 712 713 static void 714 sdhci_transfer_pio(struct sdhci_slot *slot) 715 { 716 717 /* Read as many blocks as possible. */ 718 if (slot->curcmd->data->flags & MMC_DATA_READ) { 719 while (RD4(slot, SDHCI_PRESENT_STATE) & 720 SDHCI_DATA_AVAILABLE) { 721 sdhci_read_block_pio(slot); 722 if (slot->offset >= slot->curcmd->data->len) 723 break; 724 } 725 } else { 726 while (RD4(slot, SDHCI_PRESENT_STATE) & 727 SDHCI_SPACE_AVAILABLE) { 728 sdhci_write_block_pio(slot); 729 if (slot->offset >= slot->curcmd->data->len) 730 break; 731 } 732 } 733 } 734 735 static void 736 sdhci_card_task(void *arg, int pending __unused) 737 { 738 struct sdhci_slot *slot = arg; 739 device_t d; 740 741 SDHCI_LOCK(slot); 742 if (SDHCI_GET_CARD_PRESENT(slot->bus, slot)) { 743 #ifdef MMCCAM 744 if (slot->card_present == 0) { 745 #else 746 if (slot->dev == NULL) { 747 #endif 748 /* If card is present - attach mmc bus. */ 749 if (bootverbose || sdhci_debug) 750 slot_printf(slot, "Card inserted\n"); 751 #ifdef MMCCAM 752 slot->card_present = 1; 753 mmccam_start_discovery(slot->sim); 754 SDHCI_UNLOCK(slot); 755 #else 756 d = slot->dev = device_add_child(slot->bus, "mmc", -1); 757 SDHCI_UNLOCK(slot); 758 if (d) { 759 device_set_ivars(d, slot); 760 (void)device_probe_and_attach(d); 761 } 762 #endif 763 } else 764 SDHCI_UNLOCK(slot); 765 } else { 766 #ifdef MMCCAM 767 if (slot->card_present == 1) { 768 #else 769 if (slot->dev != NULL) { 770 #endif 771 /* If no card present - detach mmc bus. */ 772 if (bootverbose || sdhci_debug) 773 slot_printf(slot, "Card removed\n"); 774 d = slot->dev; 775 slot->dev = NULL; 776 #ifdef MMCCAM 777 slot->card_present = 0; 778 mmccam_start_discovery(slot->sim); 779 SDHCI_UNLOCK(slot); 780 #else 781 slot->intmask &= ~sdhci_tuning_intmask(slot); 782 WR4(slot, SDHCI_INT_ENABLE, slot->intmask); 783 WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); 784 slot->opt &= ~SDHCI_TUNING_ENABLED; 785 SDHCI_UNLOCK(slot); 786 callout_drain(&slot->retune_callout); 787 device_delete_child(slot->bus, d); 788 #endif 789 } else 790 SDHCI_UNLOCK(slot); 791 } 792 } 793 794 static void 795 sdhci_handle_card_present_locked(struct sdhci_slot *slot, bool is_present) 796 { 797 bool was_present; 798 799 /* 800 * If there was no card and now there is one, schedule the task to 801 * create the child device after a short delay. The delay is to 802 * debounce the card insert (sometimes the card detect pin stabilizes 803 * before the other pins have made good contact). 804 * 805 * If there was a card present and now it's gone, immediately schedule 806 * the task to delete the child device. No debouncing -- gone is gone, 807 * because once power is removed, a full card re-init is needed, and 808 * that happens by deleting and recreating the child device. 809 */ 810 #ifdef MMCCAM 811 was_present = slot->card_present; 812 #else 813 was_present = slot->dev != NULL; 814 #endif 815 if (!was_present && is_present) { 816 taskqueue_enqueue_timeout(taskqueue_swi_giant, 817 &slot->card_delayed_task, -SDHCI_INSERT_DELAY_TICKS); 818 } else if (was_present && !is_present) { 819 taskqueue_enqueue(taskqueue_swi_giant, &slot->card_task); 820 } 821 } 822 823 void 824 sdhci_handle_card_present(struct sdhci_slot *slot, bool is_present) 825 { 826 827 SDHCI_LOCK(slot); 828 sdhci_handle_card_present_locked(slot, is_present); 829 SDHCI_UNLOCK(slot); 830 } 831 832 static void 833 sdhci_card_poll(void *arg) 834 { 835 struct sdhci_slot *slot = arg; 836 837 sdhci_handle_card_present(slot, 838 SDHCI_GET_CARD_PRESENT(slot->bus, slot)); 839 callout_reset(&slot->card_poll_callout, SDHCI_CARD_PRESENT_TICKS, 840 sdhci_card_poll, slot); 841 } 842 843 static int 844 sdhci_dma_alloc(struct sdhci_slot *slot, uint32_t caps) 845 { 846 int err; 847 848 if (!(slot->quirks & SDHCI_QUIRK_BROKEN_SDMA_BOUNDARY)) { 849 if (maxphys <= 1024 * 4) 850 slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_4K; 851 else if (maxphys <= 1024 * 8) 852 slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_8K; 853 else if (maxphys <= 1024 * 16) 854 slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_16K; 855 else if (maxphys <= 1024 * 32) 856 slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_32K; 857 else if (maxphys <= 1024 * 64) 858 slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_64K; 859 else if (maxphys <= 1024 * 128) 860 slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_128K; 861 else if (maxphys <= 1024 * 256) 862 slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_256K; 863 else 864 slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_512K; 865 } 866 slot->sdma_bbufsz = SDHCI_SDMA_BNDRY_TO_BBUFSZ(slot->sdma_boundary); 867 868 /* 869 * Allocate the DMA tag for an SDMA bounce buffer. 870 * Note that the SDHCI specification doesn't state any alignment 871 * constraint for the SDMA system address. However, controllers 872 * typically ignore the SDMA boundary bits in SDHCI_DMA_ADDRESS when 873 * forming the actual address of data, requiring the SDMA buffer to 874 * be aligned to the SDMA boundary. 875 */ 876 err = bus_dma_tag_create(bus_get_dma_tag(slot->bus), slot->sdma_bbufsz, 877 0, (caps & SDHCI_CAN_DO_64BIT) ? BUS_SPACE_MAXADDR : 878 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 879 slot->sdma_bbufsz, 1, slot->sdma_bbufsz, BUS_DMA_ALLOCNOW, 880 NULL, NULL, &slot->dmatag); 881 if (err != 0) { 882 slot_printf(slot, "Can't create DMA tag for SDMA\n"); 883 return (err); 884 } 885 /* Allocate DMA memory for the SDMA bounce buffer. */ 886 err = bus_dmamem_alloc(slot->dmatag, (void **)&slot->dmamem, 887 BUS_DMA_NOWAIT, &slot->dmamap); 888 if (err != 0) { 889 slot_printf(slot, "Can't alloc DMA memory for SDMA\n"); 890 bus_dma_tag_destroy(slot->dmatag); 891 return (err); 892 } 893 /* Map the memory of the SDMA bounce buffer. */ 894 err = bus_dmamap_load(slot->dmatag, slot->dmamap, 895 (void *)slot->dmamem, slot->sdma_bbufsz, sdhci_getaddr, 896 &slot->paddr, 0); 897 if (err != 0 || slot->paddr == 0) { 898 slot_printf(slot, "Can't load DMA memory for SDMA\n"); 899 bus_dmamem_free(slot->dmatag, slot->dmamem, slot->dmamap); 900 bus_dma_tag_destroy(slot->dmatag); 901 if (err) 902 return (err); 903 else 904 return (EFAULT); 905 } 906 907 return (0); 908 } 909 910 static void 911 sdhci_dma_free(struct sdhci_slot *slot) 912 { 913 914 bus_dmamap_unload(slot->dmatag, slot->dmamap); 915 bus_dmamem_free(slot->dmatag, slot->dmamem, slot->dmamap); 916 bus_dma_tag_destroy(slot->dmatag); 917 } 918 919 int 920 sdhci_init_slot(device_t dev, struct sdhci_slot *slot, int num) 921 { 922 kobjop_desc_t kobj_desc; 923 kobj_method_t *kobj_method; 924 uint32_t caps, caps2, freq, host_caps; 925 int err; 926 char node_name[8]; 927 struct sysctl_oid *node_oid; 928 929 SDHCI_LOCK_INIT(slot); 930 931 slot->num = num; 932 slot->bus = dev; 933 934 slot->version = (RD2(slot, SDHCI_HOST_VERSION) 935 >> SDHCI_SPEC_VER_SHIFT) & SDHCI_SPEC_VER_MASK; 936 if (slot->quirks & SDHCI_QUIRK_MISSING_CAPS) { 937 caps = slot->caps; 938 caps2 = slot->caps2; 939 } else { 940 caps = RD4(slot, SDHCI_CAPABILITIES); 941 if (slot->version >= SDHCI_SPEC_300) 942 caps2 = RD4(slot, SDHCI_CAPABILITIES2); 943 else 944 caps2 = 0; 945 } 946 if (slot->version >= SDHCI_SPEC_300) { 947 if ((caps & SDHCI_SLOTTYPE_MASK) != SDHCI_SLOTTYPE_REMOVABLE && 948 (caps & SDHCI_SLOTTYPE_MASK) != SDHCI_SLOTTYPE_EMBEDDED) { 949 slot_printf(slot, 950 "Driver doesn't support shared bus slots\n"); 951 SDHCI_LOCK_DESTROY(slot); 952 return (ENXIO); 953 } else if ((caps & SDHCI_SLOTTYPE_MASK) == 954 SDHCI_SLOTTYPE_EMBEDDED) { 955 slot->opt |= SDHCI_SLOT_EMBEDDED | SDHCI_NON_REMOVABLE; 956 } 957 } 958 /* Calculate base clock frequency. */ 959 if (slot->version >= SDHCI_SPEC_300) 960 freq = (caps & SDHCI_CLOCK_V3_BASE_MASK) >> 961 SDHCI_CLOCK_BASE_SHIFT; 962 else 963 freq = (caps & SDHCI_CLOCK_BASE_MASK) >> 964 SDHCI_CLOCK_BASE_SHIFT; 965 if (freq != 0) 966 slot->max_clk = freq * 1000000; 967 /* 968 * If the frequency wasn't in the capabilities and the hardware driver 969 * hasn't already set max_clk we're probably not going to work right 970 * with an assumption, so complain about it. 971 */ 972 if (slot->max_clk == 0) { 973 slot->max_clk = SDHCI_DEFAULT_MAX_FREQ * 1000000; 974 slot_printf(slot, "Hardware doesn't specify base clock " 975 "frequency, using %dMHz as default.\n", 976 SDHCI_DEFAULT_MAX_FREQ); 977 } 978 /* Calculate/set timeout clock frequency. */ 979 if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) { 980 slot->timeout_clk = slot->max_clk / 1000; 981 } else if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_1MHZ) { 982 slot->timeout_clk = 1000; 983 } else { 984 slot->timeout_clk = (caps & SDHCI_TIMEOUT_CLK_MASK) >> 985 SDHCI_TIMEOUT_CLK_SHIFT; 986 if (caps & SDHCI_TIMEOUT_CLK_UNIT) 987 slot->timeout_clk *= 1000; 988 } 989 /* 990 * If the frequency wasn't in the capabilities and the hardware driver 991 * hasn't already set timeout_clk we'll probably work okay using the 992 * max timeout, but still mention it. 993 */ 994 if (slot->timeout_clk == 0) { 995 slot_printf(slot, "Hardware doesn't specify timeout clock " 996 "frequency, setting BROKEN_TIMEOUT quirk.\n"); 997 slot->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; 998 } 999 1000 slot->host.f_min = SDHCI_MIN_FREQ(slot->bus, slot); 1001 slot->host.f_max = slot->max_clk; 1002 slot->host.host_ocr = 0; 1003 if (caps & SDHCI_CAN_VDD_330) 1004 slot->host.host_ocr |= MMC_OCR_320_330 | MMC_OCR_330_340; 1005 if (caps & SDHCI_CAN_VDD_300) 1006 slot->host.host_ocr |= MMC_OCR_290_300 | MMC_OCR_300_310; 1007 /* 1008 * 1.8V VDD is not supposed to be used for removable cards. Hardware 1009 * prior to v3.0 had no way to indicate embedded slots, but did 1010 * sometimes support 1.8v for non-removable devices. 1011 */ 1012 if ((caps & SDHCI_CAN_VDD_180) && (slot->version < SDHCI_SPEC_300 || 1013 (slot->opt & SDHCI_SLOT_EMBEDDED))) 1014 slot->host.host_ocr |= MMC_OCR_LOW_VOLTAGE; 1015 if (slot->host.host_ocr == 0) { 1016 slot_printf(slot, "Hardware doesn't report any " 1017 "support voltages.\n"); 1018 } 1019 1020 host_caps = slot->host.caps; 1021 host_caps |= MMC_CAP_4_BIT_DATA; 1022 if (caps & SDHCI_CAN_DO_8BITBUS) 1023 host_caps |= MMC_CAP_8_BIT_DATA; 1024 if (caps & SDHCI_CAN_DO_HISPD) 1025 host_caps |= MMC_CAP_HSPEED; 1026 if (slot->quirks & SDHCI_QUIRK_BOOT_NOACC) 1027 host_caps |= MMC_CAP_BOOT_NOACC; 1028 if (slot->quirks & SDHCI_QUIRK_WAIT_WHILE_BUSY) 1029 host_caps |= MMC_CAP_WAIT_WHILE_BUSY; 1030 1031 /* Determine supported UHS-I and eMMC modes. */ 1032 if (caps2 & (SDHCI_CAN_SDR50 | SDHCI_CAN_SDR104 | SDHCI_CAN_DDR50)) 1033 host_caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; 1034 if (caps2 & SDHCI_CAN_SDR104) { 1035 host_caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; 1036 if (!(slot->quirks & SDHCI_QUIRK_BROKEN_MMC_HS200)) 1037 host_caps |= MMC_CAP_MMC_HS200; 1038 } else if (caps2 & SDHCI_CAN_SDR50) 1039 host_caps |= MMC_CAP_UHS_SDR50; 1040 if (caps2 & SDHCI_CAN_DDR50 && 1041 !(slot->quirks & SDHCI_QUIRK_BROKEN_UHS_DDR50)) 1042 host_caps |= MMC_CAP_UHS_DDR50; 1043 if (slot->quirks & SDHCI_QUIRK_MMC_DDR52) 1044 host_caps |= MMC_CAP_MMC_DDR52; 1045 if (slot->quirks & SDHCI_QUIRK_CAPS_BIT63_FOR_MMC_HS400 && 1046 caps2 & SDHCI_CAN_MMC_HS400) 1047 host_caps |= MMC_CAP_MMC_HS400; 1048 if (slot->quirks & SDHCI_QUIRK_MMC_HS400_IF_CAN_SDR104 && 1049 caps2 & SDHCI_CAN_SDR104) 1050 host_caps |= MMC_CAP_MMC_HS400; 1051 1052 /* 1053 * Disable UHS-I and eMMC modes if the set_uhs_timing method is the 1054 * default NULL implementation. 1055 */ 1056 kobj_desc = &sdhci_set_uhs_timing_desc; 1057 kobj_method = kobj_lookup_method(((kobj_t)dev)->ops->cls, NULL, 1058 kobj_desc); 1059 if (kobj_method == &kobj_desc->deflt) 1060 host_caps &= ~(MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 1061 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 | MMC_CAP_UHS_SDR104 | 1062 MMC_CAP_MMC_DDR52 | MMC_CAP_MMC_HS200 | MMC_CAP_MMC_HS400); 1063 1064 #define SDHCI_CAP_MODES_TUNING(caps2) \ 1065 (((caps2) & SDHCI_TUNE_SDR50 ? MMC_CAP_UHS_SDR50 : 0) | \ 1066 MMC_CAP_UHS_DDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_MMC_HS200 | \ 1067 MMC_CAP_MMC_HS400) 1068 1069 /* 1070 * Disable UHS-I and eMMC modes that require (re-)tuning if either 1071 * the tune or re-tune method is the default NULL implementation. 1072 */ 1073 kobj_desc = &mmcbr_tune_desc; 1074 kobj_method = kobj_lookup_method(((kobj_t)dev)->ops->cls, NULL, 1075 kobj_desc); 1076 if (kobj_method == &kobj_desc->deflt) 1077 goto no_tuning; 1078 kobj_desc = &mmcbr_retune_desc; 1079 kobj_method = kobj_lookup_method(((kobj_t)dev)->ops->cls, NULL, 1080 kobj_desc); 1081 if (kobj_method == &kobj_desc->deflt) { 1082 no_tuning: 1083 host_caps &= ~(SDHCI_CAP_MODES_TUNING(caps2)); 1084 } 1085 1086 /* Allocate tuning structures and determine tuning parameters. */ 1087 if (host_caps & SDHCI_CAP_MODES_TUNING(caps2)) { 1088 slot->opt |= SDHCI_TUNING_SUPPORTED; 1089 slot->tune_req = malloc(sizeof(*slot->tune_req), M_DEVBUF, 1090 M_WAITOK); 1091 slot->tune_cmd = malloc(sizeof(*slot->tune_cmd), M_DEVBUF, 1092 M_WAITOK); 1093 slot->tune_data = malloc(sizeof(*slot->tune_data), M_DEVBUF, 1094 M_WAITOK); 1095 if (caps2 & SDHCI_TUNE_SDR50) 1096 slot->opt |= SDHCI_SDR50_NEEDS_TUNING; 1097 slot->retune_mode = (caps2 & SDHCI_RETUNE_MODES_MASK) >> 1098 SDHCI_RETUNE_MODES_SHIFT; 1099 if (slot->retune_mode == SDHCI_RETUNE_MODE_1) { 1100 slot->retune_count = (caps2 & SDHCI_RETUNE_CNT_MASK) >> 1101 SDHCI_RETUNE_CNT_SHIFT; 1102 if (slot->retune_count > 0xb) { 1103 slot_printf(slot, "Unknown re-tuning count " 1104 "%x, using 1 sec\n", slot->retune_count); 1105 slot->retune_count = 1; 1106 } else if (slot->retune_count != 0) 1107 slot->retune_count = 1108 1 << (slot->retune_count - 1); 1109 } 1110 } 1111 1112 #undef SDHCI_CAP_MODES_TUNING 1113 1114 /* Determine supported VCCQ signaling levels. */ 1115 host_caps |= MMC_CAP_SIGNALING_330; 1116 if (host_caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 1117 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 | MMC_CAP_UHS_SDR104 | 1118 MMC_CAP_MMC_DDR52_180 | MMC_CAP_MMC_HS200_180 | 1119 MMC_CAP_MMC_HS400_180)) 1120 host_caps |= MMC_CAP_SIGNALING_120 | MMC_CAP_SIGNALING_180; 1121 1122 /* 1123 * Disable 1.2 V and 1.8 V signaling if the switch_vccq method is the 1124 * default NULL implementation. Disable 1.2 V support if it's the 1125 * generic SDHCI implementation. 1126 */ 1127 kobj_desc = &mmcbr_switch_vccq_desc; 1128 kobj_method = kobj_lookup_method(((kobj_t)dev)->ops->cls, NULL, 1129 kobj_desc); 1130 if (kobj_method == &kobj_desc->deflt) 1131 host_caps &= ~(MMC_CAP_SIGNALING_120 | MMC_CAP_SIGNALING_180); 1132 else if (kobj_method->func == (kobjop_t)sdhci_generic_switch_vccq) 1133 host_caps &= ~MMC_CAP_SIGNALING_120; 1134 1135 /* Determine supported driver types (type B is always mandatory). */ 1136 if (caps2 & SDHCI_CAN_DRIVE_TYPE_A) 1137 host_caps |= MMC_CAP_DRIVER_TYPE_A; 1138 if (caps2 & SDHCI_CAN_DRIVE_TYPE_C) 1139 host_caps |= MMC_CAP_DRIVER_TYPE_C; 1140 if (caps2 & SDHCI_CAN_DRIVE_TYPE_D) 1141 host_caps |= MMC_CAP_DRIVER_TYPE_D; 1142 slot->host.caps = host_caps; 1143 1144 /* Decide if we have usable DMA. */ 1145 if (caps & SDHCI_CAN_DO_DMA) 1146 slot->opt |= SDHCI_HAVE_DMA; 1147 1148 if (slot->quirks & SDHCI_QUIRK_BROKEN_DMA) 1149 slot->opt &= ~SDHCI_HAVE_DMA; 1150 if (slot->quirks & SDHCI_QUIRK_FORCE_DMA) 1151 slot->opt |= SDHCI_HAVE_DMA; 1152 if (slot->quirks & SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE) 1153 slot->opt |= SDHCI_NON_REMOVABLE; 1154 1155 /* 1156 * Use platform-provided transfer backend 1157 * with PIO as a fallback mechanism 1158 */ 1159 if (slot->opt & SDHCI_PLATFORM_TRANSFER) 1160 slot->opt &= ~SDHCI_HAVE_DMA; 1161 1162 if (slot->opt & SDHCI_HAVE_DMA) { 1163 err = sdhci_dma_alloc(slot, caps); 1164 if (err != 0) { 1165 if (slot->opt & SDHCI_TUNING_SUPPORTED) { 1166 free(slot->tune_req, M_DEVBUF); 1167 free(slot->tune_cmd, M_DEVBUF); 1168 free(slot->tune_data, M_DEVBUF); 1169 } 1170 SDHCI_LOCK_DESTROY(slot); 1171 return (err); 1172 } 1173 } 1174 1175 if (bootverbose || sdhci_debug) { 1176 sdhci_dumpcaps(slot); 1177 sdhci_dumpregs(slot); 1178 } 1179 1180 slot->timeout = 10; 1181 SYSCTL_ADD_INT(device_get_sysctl_ctx(slot->bus), 1182 SYSCTL_CHILDREN(device_get_sysctl_tree(slot->bus)), OID_AUTO, 1183 "timeout", CTLFLAG_RWTUN, &slot->timeout, 0, 1184 "Maximum timeout for SDHCI transfers (in secs)"); 1185 TASK_INIT(&slot->card_task, 0, sdhci_card_task, slot); 1186 TIMEOUT_TASK_INIT(taskqueue_swi_giant, &slot->card_delayed_task, 0, 1187 sdhci_card_task, slot); 1188 callout_init(&slot->card_poll_callout, 1); 1189 callout_init_mtx(&slot->timeout_callout, &slot->mtx, 0); 1190 callout_init_mtx(&slot->retune_callout, &slot->mtx, 0); 1191 1192 if ((slot->quirks & SDHCI_QUIRK_POLL_CARD_PRESENT) && 1193 !(slot->opt & SDHCI_NON_REMOVABLE)) { 1194 callout_reset(&slot->card_poll_callout, 1195 SDHCI_CARD_PRESENT_TICKS, sdhci_card_poll, slot); 1196 } 1197 1198 sdhci_init(slot); 1199 1200 snprintf(node_name, sizeof(node_name), "slot%d", slot->num); 1201 1202 node_oid = SYSCTL_ADD_NODE(device_get_sysctl_ctx(dev), 1203 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1204 OID_AUTO, node_name, CTLFLAG_RW, 0, "slot specific node"); 1205 1206 node_oid = SYSCTL_ADD_NODE(device_get_sysctl_ctx(dev), 1207 SYSCTL_CHILDREN(node_oid), OID_AUTO, "debug", CTLFLAG_RW, 0, 1208 "Debugging node"); 1209 1210 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(node_oid), 1211 OID_AUTO, "dumpregs", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 1212 slot, 0, &sdhci_syctl_dumpregs, 1213 "A", "Dump SDHCI registers"); 1214 1215 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(node_oid), 1216 OID_AUTO, "dumpcaps", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, 1217 slot, 0, &sdhci_syctl_dumpcaps, 1218 "A", "Dump SDHCI capabilites"); 1219 1220 return (0); 1221 } 1222 1223 #ifndef MMCCAM 1224 void 1225 sdhci_start_slot(struct sdhci_slot *slot) 1226 { 1227 1228 sdhci_card_task(slot, 0); 1229 } 1230 #endif 1231 1232 int 1233 sdhci_cleanup_slot(struct sdhci_slot *slot) 1234 { 1235 device_t d; 1236 1237 callout_drain(&slot->timeout_callout); 1238 callout_drain(&slot->card_poll_callout); 1239 callout_drain(&slot->retune_callout); 1240 taskqueue_drain(taskqueue_swi_giant, &slot->card_task); 1241 taskqueue_drain_timeout(taskqueue_swi_giant, &slot->card_delayed_task); 1242 1243 SDHCI_LOCK(slot); 1244 d = slot->dev; 1245 slot->dev = NULL; 1246 SDHCI_UNLOCK(slot); 1247 if (d != NULL) 1248 device_delete_child(slot->bus, d); 1249 1250 SDHCI_LOCK(slot); 1251 sdhci_reset(slot, SDHCI_RESET_ALL); 1252 SDHCI_UNLOCK(slot); 1253 if (slot->opt & SDHCI_HAVE_DMA) 1254 sdhci_dma_free(slot); 1255 if (slot->opt & SDHCI_TUNING_SUPPORTED) { 1256 free(slot->tune_req, M_DEVBUF); 1257 free(slot->tune_cmd, M_DEVBUF); 1258 free(slot->tune_data, M_DEVBUF); 1259 } 1260 1261 SDHCI_LOCK_DESTROY(slot); 1262 1263 return (0); 1264 } 1265 1266 int 1267 sdhci_generic_suspend(struct sdhci_slot *slot) 1268 { 1269 1270 /* 1271 * We expect the MMC layer to issue initial tuning after resume. 1272 * Otherwise, we'd need to indicate re-tuning including circuit reset 1273 * being required at least for re-tuning modes 1 and 2 ourselves. 1274 */ 1275 callout_drain(&slot->retune_callout); 1276 SDHCI_LOCK(slot); 1277 slot->opt &= ~SDHCI_TUNING_ENABLED; 1278 sdhci_reset(slot, SDHCI_RESET_ALL); 1279 SDHCI_UNLOCK(slot); 1280 1281 return (0); 1282 } 1283 1284 int 1285 sdhci_generic_resume(struct sdhci_slot *slot) 1286 { 1287 1288 SDHCI_LOCK(slot); 1289 sdhci_init(slot); 1290 SDHCI_UNLOCK(slot); 1291 1292 return (0); 1293 } 1294 1295 uint32_t 1296 sdhci_generic_min_freq(device_t brdev __unused, struct sdhci_slot *slot) 1297 { 1298 1299 if (slot->version >= SDHCI_SPEC_300) 1300 return (slot->max_clk / SDHCI_300_MAX_DIVIDER); 1301 else 1302 return (slot->max_clk / SDHCI_200_MAX_DIVIDER); 1303 } 1304 1305 bool 1306 sdhci_generic_get_card_present(device_t brdev __unused, struct sdhci_slot *slot) 1307 { 1308 1309 if (slot->opt & SDHCI_NON_REMOVABLE) 1310 return true; 1311 1312 return (RD4(slot, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 1313 } 1314 1315 void 1316 sdhci_generic_set_uhs_timing(device_t brdev __unused, struct sdhci_slot *slot) 1317 { 1318 const struct mmc_ios *ios; 1319 uint16_t hostctrl2; 1320 1321 if (slot->version < SDHCI_SPEC_300) 1322 return; 1323 1324 SDHCI_ASSERT_LOCKED(slot); 1325 ios = &slot->host.ios; 1326 sdhci_set_clock(slot, 0); 1327 hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2); 1328 hostctrl2 &= ~SDHCI_CTRL2_UHS_MASK; 1329 if (ios->clock > SD_SDR50_MAX) { 1330 if (ios->timing == bus_timing_mmc_hs400 || 1331 ios->timing == bus_timing_mmc_hs400es) 1332 hostctrl2 |= SDHCI_CTRL2_MMC_HS400; 1333 else 1334 hostctrl2 |= SDHCI_CTRL2_UHS_SDR104; 1335 } 1336 else if (ios->clock > SD_SDR25_MAX) 1337 hostctrl2 |= SDHCI_CTRL2_UHS_SDR50; 1338 else if (ios->clock > SD_SDR12_MAX) { 1339 if (ios->timing == bus_timing_uhs_ddr50 || 1340 ios->timing == bus_timing_mmc_ddr52) 1341 hostctrl2 |= SDHCI_CTRL2_UHS_DDR50; 1342 else 1343 hostctrl2 |= SDHCI_CTRL2_UHS_SDR25; 1344 } else if (ios->clock > SD_MMC_CARD_ID_FREQUENCY) 1345 hostctrl2 |= SDHCI_CTRL2_UHS_SDR12; 1346 WR2(slot, SDHCI_HOST_CONTROL2, hostctrl2); 1347 sdhci_set_clock(slot, ios->clock); 1348 } 1349 1350 int 1351 sdhci_generic_update_ios(device_t brdev, device_t reqdev) 1352 { 1353 struct sdhci_slot *slot = device_get_ivars(reqdev); 1354 struct mmc_ios *ios = &slot->host.ios; 1355 1356 SDHCI_LOCK(slot); 1357 /* Do full reset on bus power down to clear from any state. */ 1358 if (ios->power_mode == power_off) { 1359 WR4(slot, SDHCI_SIGNAL_ENABLE, 0); 1360 sdhci_init(slot); 1361 } 1362 /* Configure the bus. */ 1363 sdhci_set_clock(slot, ios->clock); 1364 sdhci_set_power(slot, (ios->power_mode == power_off) ? 0 : ios->vdd); 1365 if (ios->bus_width == bus_width_8) { 1366 slot->hostctrl |= SDHCI_CTRL_8BITBUS; 1367 slot->hostctrl &= ~SDHCI_CTRL_4BITBUS; 1368 } else if (ios->bus_width == bus_width_4) { 1369 slot->hostctrl &= ~SDHCI_CTRL_8BITBUS; 1370 slot->hostctrl |= SDHCI_CTRL_4BITBUS; 1371 } else if (ios->bus_width == bus_width_1) { 1372 slot->hostctrl &= ~SDHCI_CTRL_8BITBUS; 1373 slot->hostctrl &= ~SDHCI_CTRL_4BITBUS; 1374 } else { 1375 panic("Invalid bus width: %d", ios->bus_width); 1376 } 1377 if (ios->clock > SD_SDR12_MAX && 1378 !(slot->quirks & SDHCI_QUIRK_DONT_SET_HISPD_BIT)) 1379 slot->hostctrl |= SDHCI_CTRL_HISPD; 1380 else 1381 slot->hostctrl &= ~SDHCI_CTRL_HISPD; 1382 WR1(slot, SDHCI_HOST_CONTROL, slot->hostctrl); 1383 SDHCI_SET_UHS_TIMING(brdev, slot); 1384 /* Some controllers like reset after bus changes. */ 1385 if (slot->quirks & SDHCI_QUIRK_RESET_ON_IOS) 1386 sdhci_reset(slot, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1387 1388 SDHCI_UNLOCK(slot); 1389 return (0); 1390 } 1391 1392 int 1393 sdhci_generic_switch_vccq(device_t brdev __unused, device_t reqdev) 1394 { 1395 struct sdhci_slot *slot = device_get_ivars(reqdev); 1396 enum mmc_vccq vccq; 1397 int err; 1398 uint16_t hostctrl2; 1399 1400 if (slot->version < SDHCI_SPEC_300) 1401 return (0); 1402 1403 err = 0; 1404 vccq = slot->host.ios.vccq; 1405 SDHCI_LOCK(slot); 1406 sdhci_set_clock(slot, 0); 1407 hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2); 1408 switch (vccq) { 1409 case vccq_330: 1410 if (!(hostctrl2 & SDHCI_CTRL2_S18_ENABLE)) 1411 goto done; 1412 hostctrl2 &= ~SDHCI_CTRL2_S18_ENABLE; 1413 WR2(slot, SDHCI_HOST_CONTROL2, hostctrl2); 1414 DELAY(5000); 1415 hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2); 1416 if (!(hostctrl2 & SDHCI_CTRL2_S18_ENABLE)) 1417 goto done; 1418 err = EAGAIN; 1419 break; 1420 case vccq_180: 1421 if (!(slot->host.caps & MMC_CAP_SIGNALING_180)) { 1422 err = EINVAL; 1423 goto done; 1424 } 1425 if (hostctrl2 & SDHCI_CTRL2_S18_ENABLE) 1426 goto done; 1427 hostctrl2 |= SDHCI_CTRL2_S18_ENABLE; 1428 WR2(slot, SDHCI_HOST_CONTROL2, hostctrl2); 1429 DELAY(5000); 1430 hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2); 1431 if (hostctrl2 & SDHCI_CTRL2_S18_ENABLE) 1432 goto done; 1433 err = EAGAIN; 1434 break; 1435 default: 1436 slot_printf(slot, 1437 "Attempt to set unsupported signaling voltage\n"); 1438 err = EINVAL; 1439 break; 1440 } 1441 done: 1442 sdhci_set_clock(slot, slot->host.ios.clock); 1443 SDHCI_UNLOCK(slot); 1444 return (err); 1445 } 1446 1447 int 1448 sdhci_generic_tune(device_t brdev __unused, device_t reqdev, bool hs400) 1449 { 1450 struct sdhci_slot *slot = device_get_ivars(reqdev); 1451 const struct mmc_ios *ios = &slot->host.ios; 1452 struct mmc_command *tune_cmd; 1453 struct mmc_data *tune_data; 1454 uint32_t opcode; 1455 int err; 1456 1457 if (!(slot->opt & SDHCI_TUNING_SUPPORTED)) 1458 return (0); 1459 1460 slot->retune_ticks = slot->retune_count * hz; 1461 opcode = MMC_SEND_TUNING_BLOCK; 1462 SDHCI_LOCK(slot); 1463 switch (ios->timing) { 1464 case bus_timing_mmc_hs400: 1465 slot_printf(slot, "HS400 must be tuned in HS200 mode\n"); 1466 SDHCI_UNLOCK(slot); 1467 return (EINVAL); 1468 case bus_timing_mmc_hs200: 1469 /* 1470 * In HS400 mode, controllers use the data strobe line to 1471 * latch data from the devices so periodic re-tuning isn't 1472 * expected to be required. 1473 */ 1474 if (hs400) 1475 slot->retune_ticks = 0; 1476 opcode = MMC_SEND_TUNING_BLOCK_HS200; 1477 break; 1478 case bus_timing_uhs_ddr50: 1479 case bus_timing_uhs_sdr104: 1480 break; 1481 case bus_timing_uhs_sdr50: 1482 if (slot->opt & SDHCI_SDR50_NEEDS_TUNING) 1483 break; 1484 /* FALLTHROUGH */ 1485 default: 1486 SDHCI_UNLOCK(slot); 1487 return (0); 1488 } 1489 1490 tune_cmd = slot->tune_cmd; 1491 memset(tune_cmd, 0, sizeof(*tune_cmd)); 1492 tune_cmd->opcode = opcode; 1493 tune_cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; 1494 tune_data = tune_cmd->data = slot->tune_data; 1495 memset(tune_data, 0, sizeof(*tune_data)); 1496 tune_data->len = (opcode == MMC_SEND_TUNING_BLOCK_HS200 && 1497 ios->bus_width == bus_width_8) ? MMC_TUNING_LEN_HS200 : 1498 MMC_TUNING_LEN; 1499 tune_data->flags = MMC_DATA_READ; 1500 tune_data->mrq = tune_cmd->mrq = slot->tune_req; 1501 1502 slot->opt &= ~SDHCI_TUNING_ENABLED; 1503 err = sdhci_exec_tuning(slot, true); 1504 if (err == 0) { 1505 slot->opt |= SDHCI_TUNING_ENABLED; 1506 slot->intmask |= sdhci_tuning_intmask(slot); 1507 WR4(slot, SDHCI_INT_ENABLE, slot->intmask); 1508 WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); 1509 if (slot->retune_ticks) { 1510 callout_reset(&slot->retune_callout, slot->retune_ticks, 1511 sdhci_retune, slot); 1512 } 1513 } 1514 SDHCI_UNLOCK(slot); 1515 return (err); 1516 } 1517 1518 int 1519 sdhci_generic_retune(device_t brdev __unused, device_t reqdev, bool reset) 1520 { 1521 struct sdhci_slot *slot = device_get_ivars(reqdev); 1522 int err; 1523 1524 if (!(slot->opt & SDHCI_TUNING_ENABLED)) 1525 return (0); 1526 1527 /* HS400 must be tuned in HS200 mode. */ 1528 if (slot->host.ios.timing == bus_timing_mmc_hs400) 1529 return (EINVAL); 1530 1531 SDHCI_LOCK(slot); 1532 err = sdhci_exec_tuning(slot, reset); 1533 /* 1534 * There are two ways sdhci_exec_tuning() can fail: 1535 * EBUSY should not actually happen when requests are only issued 1536 * with the host properly acquired, and 1537 * EIO re-tuning failed (but it did work initially). 1538 * 1539 * In both cases, we should retry at later point if periodic re-tuning 1540 * is enabled. Note that due to slot->retune_req not being cleared in 1541 * these failure cases, the MMC layer should trigger another attempt at 1542 * re-tuning with the next request anyway, though. 1543 */ 1544 if (slot->retune_ticks) { 1545 callout_reset(&slot->retune_callout, slot->retune_ticks, 1546 sdhci_retune, slot); 1547 } 1548 SDHCI_UNLOCK(slot); 1549 return (err); 1550 } 1551 1552 static int 1553 sdhci_exec_tuning(struct sdhci_slot *slot, bool reset) 1554 { 1555 struct mmc_request *tune_req; 1556 struct mmc_command *tune_cmd; 1557 int i; 1558 uint32_t intmask; 1559 uint16_t hostctrl2; 1560 u_char opt; 1561 1562 SDHCI_ASSERT_LOCKED(slot); 1563 if (slot->req != NULL) 1564 return (EBUSY); 1565 1566 /* Tuning doesn't work with DMA enabled. */ 1567 opt = slot->opt; 1568 slot->opt = opt & ~SDHCI_HAVE_DMA; 1569 1570 /* 1571 * Ensure that as documented, SDHCI_INT_DATA_AVAIL is the only 1572 * kind of interrupt we receive in response to a tuning request. 1573 */ 1574 intmask = slot->intmask; 1575 slot->intmask = SDHCI_INT_DATA_AVAIL; 1576 WR4(slot, SDHCI_INT_ENABLE, SDHCI_INT_DATA_AVAIL); 1577 WR4(slot, SDHCI_SIGNAL_ENABLE, SDHCI_INT_DATA_AVAIL); 1578 1579 hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2); 1580 if (reset) 1581 hostctrl2 &= ~SDHCI_CTRL2_SAMPLING_CLOCK; 1582 else 1583 hostctrl2 |= SDHCI_CTRL2_SAMPLING_CLOCK; 1584 WR2(slot, SDHCI_HOST_CONTROL2, hostctrl2 | SDHCI_CTRL2_EXEC_TUNING); 1585 1586 tune_req = slot->tune_req; 1587 tune_cmd = slot->tune_cmd; 1588 for (i = 0; i < MMC_TUNING_MAX; i++) { 1589 memset(tune_req, 0, sizeof(*tune_req)); 1590 tune_req->cmd = tune_cmd; 1591 tune_req->done = sdhci_req_wakeup; 1592 tune_req->done_data = slot; 1593 slot->req = tune_req; 1594 slot->flags = 0; 1595 sdhci_start(slot); 1596 while (!(tune_req->flags & MMC_REQ_DONE)) 1597 msleep(tune_req, &slot->mtx, 0, "sdhciet", 0); 1598 if (!(tune_req->flags & MMC_TUNE_DONE)) 1599 break; 1600 hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2); 1601 if (!(hostctrl2 & SDHCI_CTRL2_EXEC_TUNING)) 1602 break; 1603 if (tune_cmd->opcode == MMC_SEND_TUNING_BLOCK) 1604 DELAY(1000); 1605 } 1606 1607 /* 1608 * Restore DMA usage and interrupts. 1609 * Note that the interrupt aggregation code might have cleared 1610 * SDHCI_INT_DMA_END and/or SDHCI_INT_RESPONSE in slot->intmask 1611 * and SDHCI_SIGNAL_ENABLE respectively so ensure SDHCI_INT_ENABLE 1612 * doesn't lose these. 1613 */ 1614 slot->opt = opt; 1615 slot->intmask = intmask; 1616 WR4(slot, SDHCI_INT_ENABLE, intmask | SDHCI_INT_DMA_END | 1617 SDHCI_INT_RESPONSE); 1618 WR4(slot, SDHCI_SIGNAL_ENABLE, intmask); 1619 1620 if ((hostctrl2 & (SDHCI_CTRL2_EXEC_TUNING | 1621 SDHCI_CTRL2_SAMPLING_CLOCK)) == SDHCI_CTRL2_SAMPLING_CLOCK) { 1622 slot->retune_req = 0; 1623 return (0); 1624 } 1625 1626 slot_printf(slot, "Tuning failed, using fixed sampling clock\n"); 1627 WR2(slot, SDHCI_HOST_CONTROL2, hostctrl2 & ~(SDHCI_CTRL2_EXEC_TUNING | 1628 SDHCI_CTRL2_SAMPLING_CLOCK)); 1629 sdhci_reset(slot, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1630 return (EIO); 1631 } 1632 1633 static void 1634 sdhci_retune(void *arg) 1635 { 1636 struct sdhci_slot *slot = arg; 1637 1638 slot->retune_req |= SDHCI_RETUNE_REQ_NEEDED; 1639 } 1640 1641 #ifdef MMCCAM 1642 static void 1643 sdhci_req_done(struct sdhci_slot *slot) 1644 { 1645 union ccb *ccb; 1646 1647 if (__predict_false(sdhci_debug > 1)) 1648 slot_printf(slot, "%s\n", __func__); 1649 if (slot->ccb != NULL && slot->curcmd != NULL) { 1650 callout_stop(&slot->timeout_callout); 1651 ccb = slot->ccb; 1652 slot->ccb = NULL; 1653 slot->curcmd = NULL; 1654 1655 /* Tell CAM the request is finished */ 1656 struct ccb_mmcio *mmcio; 1657 mmcio = &ccb->mmcio; 1658 1659 ccb->ccb_h.status = 1660 (mmcio->cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR); 1661 xpt_done(ccb); 1662 } 1663 } 1664 #else 1665 static void 1666 sdhci_req_done(struct sdhci_slot *slot) 1667 { 1668 struct mmc_request *req; 1669 1670 if (slot->req != NULL && slot->curcmd != NULL) { 1671 callout_stop(&slot->timeout_callout); 1672 req = slot->req; 1673 slot->req = NULL; 1674 slot->curcmd = NULL; 1675 req->done(req); 1676 } 1677 } 1678 #endif 1679 1680 static void 1681 sdhci_req_wakeup(struct mmc_request *req) 1682 { 1683 struct sdhci_slot *slot; 1684 1685 slot = req->done_data; 1686 req->flags |= MMC_REQ_DONE; 1687 wakeup(req); 1688 } 1689 1690 static void 1691 sdhci_timeout(void *arg) 1692 { 1693 struct sdhci_slot *slot = arg; 1694 1695 if (slot->curcmd != NULL) { 1696 slot_printf(slot, "Controller timeout\n"); 1697 sdhci_dumpregs(slot); 1698 sdhci_reset(slot, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1699 slot->curcmd->error = MMC_ERR_TIMEOUT; 1700 sdhci_req_done(slot); 1701 } else { 1702 slot_printf(slot, "Spurious timeout - no active command\n"); 1703 } 1704 } 1705 1706 static void 1707 sdhci_set_transfer_mode(struct sdhci_slot *slot, const struct mmc_data *data) 1708 { 1709 uint16_t mode; 1710 1711 if (data == NULL) 1712 return; 1713 1714 mode = SDHCI_TRNS_BLK_CNT_EN; 1715 if (data->len > 512 || data->block_count > 1) { 1716 mode |= SDHCI_TRNS_MULTI; 1717 if (data->block_count == 0 && __predict_true( 1718 #ifdef MMCCAM 1719 slot->ccb->mmcio.stop.opcode == MMC_STOP_TRANSMISSION && 1720 #else 1721 slot->req->stop != NULL && 1722 #endif 1723 !(slot->quirks & SDHCI_QUIRK_BROKEN_AUTO_STOP))) 1724 mode |= SDHCI_TRNS_ACMD12; 1725 } 1726 if (data->flags & MMC_DATA_READ) 1727 mode |= SDHCI_TRNS_READ; 1728 if (slot->flags & SDHCI_USE_DMA) 1729 mode |= SDHCI_TRNS_DMA; 1730 1731 WR2(slot, SDHCI_TRANSFER_MODE, mode); 1732 } 1733 1734 static void 1735 sdhci_start_command(struct sdhci_slot *slot, struct mmc_command *cmd) 1736 { 1737 int flags, timeout; 1738 uint32_t mask; 1739 1740 slot->curcmd = cmd; 1741 slot->cmd_done = 0; 1742 1743 cmd->error = MMC_ERR_NONE; 1744 1745 /* This flags combination is not supported by controller. */ 1746 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 1747 slot_printf(slot, "Unsupported response type!\n"); 1748 cmd->error = MMC_ERR_FAILED; 1749 sdhci_req_done(slot); 1750 return; 1751 } 1752 1753 /* 1754 * Do not issue command if there is no card, clock or power. 1755 * Controller will not detect timeout without clock active. 1756 */ 1757 if (!SDHCI_GET_CARD_PRESENT(slot->bus, slot) || 1758 slot->power == 0 || 1759 slot->clock == 0) { 1760 slot_printf(slot, 1761 "Cannot issue a command (power=%d clock=%d)\n", 1762 slot->power, slot->clock); 1763 cmd->error = MMC_ERR_FAILED; 1764 sdhci_req_done(slot); 1765 return; 1766 } 1767 /* Always wait for free CMD bus. */ 1768 mask = SDHCI_CMD_INHIBIT; 1769 /* Wait for free DAT if we have data or busy signal. */ 1770 if (cmd->data != NULL || (cmd->flags & MMC_RSP_BUSY)) 1771 mask |= SDHCI_DAT_INHIBIT; 1772 /* 1773 * We shouldn't wait for DAT for stop commands or CMD19/CMD21. Note 1774 * that these latter are also special in that SDHCI_CMD_DATA should 1775 * be set below but no actual data is ever read from the controller. 1776 */ 1777 #ifdef MMCCAM 1778 if (cmd == &slot->ccb->mmcio.stop || 1779 #else 1780 if (cmd == slot->req->stop || 1781 #endif 1782 __predict_false(cmd->opcode == MMC_SEND_TUNING_BLOCK || 1783 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)) 1784 mask &= ~SDHCI_DAT_INHIBIT; 1785 /* 1786 * Wait for bus no more then 250 ms. Typically there will be no wait 1787 * here at all, but when writing a crash dump we may be bypassing the 1788 * host platform's interrupt handler, and in some cases that handler 1789 * may be working around hardware quirks such as not respecting r1b 1790 * busy indications. In those cases, this wait-loop serves the purpose 1791 * of waiting for the prior command and data transfers to be done, and 1792 * SD cards are allowed to take up to 250ms for write and erase ops. 1793 * (It's usually more like 20-30ms in the real world.) 1794 */ 1795 timeout = 250; 1796 while (mask & RD4(slot, SDHCI_PRESENT_STATE)) { 1797 if (timeout == 0) { 1798 slot_printf(slot, "Controller never released " 1799 "inhibit bit(s).\n"); 1800 sdhci_dumpregs(slot); 1801 cmd->error = MMC_ERR_FAILED; 1802 sdhci_req_done(slot); 1803 return; 1804 } 1805 timeout--; 1806 DELAY(1000); 1807 } 1808 1809 /* Prepare command flags. */ 1810 if (!(cmd->flags & MMC_RSP_PRESENT)) 1811 flags = SDHCI_CMD_RESP_NONE; 1812 else if (cmd->flags & MMC_RSP_136) 1813 flags = SDHCI_CMD_RESP_LONG; 1814 else if (cmd->flags & MMC_RSP_BUSY) 1815 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1816 else 1817 flags = SDHCI_CMD_RESP_SHORT; 1818 if (cmd->flags & MMC_RSP_CRC) 1819 flags |= SDHCI_CMD_CRC; 1820 if (cmd->flags & MMC_RSP_OPCODE) 1821 flags |= SDHCI_CMD_INDEX; 1822 if (cmd->data != NULL) 1823 flags |= SDHCI_CMD_DATA; 1824 if (cmd->opcode == MMC_STOP_TRANSMISSION) 1825 flags |= SDHCI_CMD_TYPE_ABORT; 1826 /* Prepare data. */ 1827 sdhci_start_data(slot, cmd->data); 1828 /* 1829 * Interrupt aggregation: To reduce total number of interrupts 1830 * group response interrupt with data interrupt when possible. 1831 * If there going to be data interrupt, mask response one. 1832 */ 1833 if (slot->data_done == 0) { 1834 WR4(slot, SDHCI_SIGNAL_ENABLE, 1835 slot->intmask &= ~SDHCI_INT_RESPONSE); 1836 } 1837 /* Set command argument. */ 1838 WR4(slot, SDHCI_ARGUMENT, cmd->arg); 1839 /* Set data transfer mode. */ 1840 sdhci_set_transfer_mode(slot, cmd->data); 1841 if (__predict_false(sdhci_debug > 1)) 1842 slot_printf(slot, "Starting command opcode %#04x flags %#04x\n", 1843 cmd->opcode, flags); 1844 1845 /* Start command. */ 1846 WR2(slot, SDHCI_COMMAND_FLAGS, (cmd->opcode << 8) | (flags & 0xff)); 1847 /* Start timeout callout. */ 1848 callout_reset(&slot->timeout_callout, slot->timeout * hz, 1849 sdhci_timeout, slot); 1850 } 1851 1852 static void 1853 sdhci_finish_command(struct sdhci_slot *slot) 1854 { 1855 int i; 1856 uint32_t val; 1857 uint8_t extra; 1858 1859 if (__predict_false(sdhci_debug > 1)) 1860 slot_printf(slot, "%s: called, err %d flags %#04x\n", 1861 __func__, slot->curcmd->error, slot->curcmd->flags); 1862 slot->cmd_done = 1; 1863 /* 1864 * Interrupt aggregation: Restore command interrupt. 1865 * Main restore point for the case when command interrupt 1866 * happened first. 1867 */ 1868 if (__predict_true(slot->curcmd->opcode != MMC_SEND_TUNING_BLOCK && 1869 slot->curcmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)) 1870 WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask |= 1871 SDHCI_INT_RESPONSE); 1872 /* In case of error - reset host and return. */ 1873 if (slot->curcmd->error) { 1874 if (slot->curcmd->error == MMC_ERR_BADCRC) 1875 slot->retune_req |= SDHCI_RETUNE_REQ_RESET; 1876 sdhci_reset(slot, SDHCI_RESET_CMD); 1877 sdhci_reset(slot, SDHCI_RESET_DATA); 1878 sdhci_start(slot); 1879 return; 1880 } 1881 /* If command has response - fetch it. */ 1882 if (slot->curcmd->flags & MMC_RSP_PRESENT) { 1883 if (slot->curcmd->flags & MMC_RSP_136) { 1884 /* CRC is stripped so we need one byte shift. */ 1885 extra = 0; 1886 for (i = 0; i < 4; i++) { 1887 val = RD4(slot, SDHCI_RESPONSE + i * 4); 1888 if (slot->quirks & 1889 SDHCI_QUIRK_DONT_SHIFT_RESPONSE) 1890 slot->curcmd->resp[3 - i] = val; 1891 else { 1892 slot->curcmd->resp[3 - i] = 1893 (val << 8) | extra; 1894 extra = val >> 24; 1895 } 1896 } 1897 } else 1898 slot->curcmd->resp[0] = RD4(slot, SDHCI_RESPONSE); 1899 } 1900 if (__predict_false(sdhci_debug > 1)) 1901 slot_printf(slot, "Resp: %#04x %#04x %#04x %#04x\n", 1902 slot->curcmd->resp[0], slot->curcmd->resp[1], 1903 slot->curcmd->resp[2], slot->curcmd->resp[3]); 1904 1905 /* If data ready - finish. */ 1906 if (slot->data_done) 1907 sdhci_start(slot); 1908 } 1909 1910 static void 1911 sdhci_start_data(struct sdhci_slot *slot, const struct mmc_data *data) 1912 { 1913 uint32_t blkcnt, blksz, current_timeout, sdma_bbufsz, target_timeout; 1914 uint8_t div; 1915 1916 if (data == NULL && (slot->curcmd->flags & MMC_RSP_BUSY) == 0) { 1917 slot->data_done = 1; 1918 return; 1919 } 1920 1921 slot->data_done = 0; 1922 1923 /* Calculate and set data timeout.*/ 1924 /* XXX: We should have this from mmc layer, now assume 1 sec. */ 1925 if (slot->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) { 1926 div = 0xE; 1927 } else { 1928 target_timeout = 1000000; 1929 div = 0; 1930 current_timeout = (1 << 13) * 1000 / slot->timeout_clk; 1931 while (current_timeout < target_timeout && div < 0xE) { 1932 ++div; 1933 current_timeout <<= 1; 1934 } 1935 /* Compensate for an off-by-one error in the CaFe chip.*/ 1936 if (div < 0xE && 1937 (slot->quirks & SDHCI_QUIRK_INCR_TIMEOUT_CONTROL)) { 1938 ++div; 1939 } 1940 } 1941 WR1(slot, SDHCI_TIMEOUT_CONTROL, div); 1942 1943 if (data == NULL) 1944 return; 1945 1946 /* Use DMA if possible. */ 1947 if ((slot->opt & SDHCI_HAVE_DMA)) 1948 slot->flags |= SDHCI_USE_DMA; 1949 /* If data is small, broken DMA may return zeroes instead of data. */ 1950 if ((slot->quirks & SDHCI_QUIRK_BROKEN_TIMINGS) && 1951 (data->len <= 512)) 1952 slot->flags &= ~SDHCI_USE_DMA; 1953 /* Some controllers require even block sizes. */ 1954 if ((slot->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) && 1955 ((data->len) & 0x3)) 1956 slot->flags &= ~SDHCI_USE_DMA; 1957 /* Load DMA buffer. */ 1958 if (slot->flags & SDHCI_USE_DMA) { 1959 sdma_bbufsz = slot->sdma_bbufsz; 1960 if (data->flags & MMC_DATA_READ) 1961 bus_dmamap_sync(slot->dmatag, slot->dmamap, 1962 BUS_DMASYNC_PREREAD); 1963 else { 1964 memcpy(slot->dmamem, data->data, ulmin(data->len, 1965 sdma_bbufsz)); 1966 bus_dmamap_sync(slot->dmatag, slot->dmamap, 1967 BUS_DMASYNC_PREWRITE); 1968 } 1969 WR4(slot, SDHCI_DMA_ADDRESS, slot->paddr); 1970 /* 1971 * Interrupt aggregation: Mask border interrupt for the last 1972 * bounce buffer and unmask otherwise. 1973 */ 1974 if (data->len == sdma_bbufsz) 1975 slot->intmask &= ~SDHCI_INT_DMA_END; 1976 else 1977 slot->intmask |= SDHCI_INT_DMA_END; 1978 WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); 1979 } 1980 /* Current data offset for both PIO and DMA. */ 1981 slot->offset = 0; 1982 #ifdef MMCCAM 1983 if (data->flags & MMC_DATA_BLOCK_SIZE) { 1984 /* Set block size and request border interrupts on the SDMA boundary. */ 1985 blksz = SDHCI_MAKE_BLKSZ(slot->sdma_boundary, data->block_size); 1986 blkcnt = data->block_count; 1987 if (__predict_false(sdhci_debug > 0)) 1988 slot_printf(slot, "SDIO Custom block params: blksz: " 1989 "%#10x, blk cnt: %#10x\n", blksz, blkcnt); 1990 } else 1991 #endif 1992 { 1993 /* Set block size and request border interrupts on the SDMA boundary. */ 1994 blksz = SDHCI_MAKE_BLKSZ(slot->sdma_boundary, ulmin(data->len, 512)); 1995 blkcnt = howmany(data->len, 512); 1996 } 1997 1998 WR2(slot, SDHCI_BLOCK_SIZE, blksz); 1999 WR2(slot, SDHCI_BLOCK_COUNT, blkcnt); 2000 if (__predict_false(sdhci_debug > 1)) 2001 slot_printf(slot, "Blk size: 0x%08x | Blk cnt: 0x%08x\n", 2002 blksz, blkcnt); 2003 } 2004 2005 void 2006 sdhci_finish_data(struct sdhci_slot *slot) 2007 { 2008 struct mmc_data *data = slot->curcmd->data; 2009 size_t left; 2010 2011 /* Interrupt aggregation: Restore command interrupt. 2012 * Auxiliary restore point for the case when data interrupt 2013 * happened first. */ 2014 if (!slot->cmd_done) { 2015 WR4(slot, SDHCI_SIGNAL_ENABLE, 2016 slot->intmask |= SDHCI_INT_RESPONSE); 2017 } 2018 /* Unload rest of data from DMA buffer. */ 2019 if (!slot->data_done && (slot->flags & SDHCI_USE_DMA) && 2020 slot->curcmd->data != NULL) { 2021 if (data->flags & MMC_DATA_READ) { 2022 left = data->len - slot->offset; 2023 bus_dmamap_sync(slot->dmatag, slot->dmamap, 2024 BUS_DMASYNC_POSTREAD); 2025 memcpy((u_char*)data->data + slot->offset, slot->dmamem, 2026 ulmin(left, slot->sdma_bbufsz)); 2027 } else 2028 bus_dmamap_sync(slot->dmatag, slot->dmamap, 2029 BUS_DMASYNC_POSTWRITE); 2030 } 2031 slot->data_done = 1; 2032 /* If there was error - reset the host. */ 2033 if (slot->curcmd->error) { 2034 if (slot->curcmd->error == MMC_ERR_BADCRC) 2035 slot->retune_req |= SDHCI_RETUNE_REQ_RESET; 2036 sdhci_reset(slot, SDHCI_RESET_CMD); 2037 sdhci_reset(slot, SDHCI_RESET_DATA); 2038 sdhci_start(slot); 2039 return; 2040 } 2041 /* If we already have command response - finish. */ 2042 if (slot->cmd_done) 2043 sdhci_start(slot); 2044 } 2045 2046 #ifdef MMCCAM 2047 static void 2048 sdhci_start(struct sdhci_slot *slot) 2049 { 2050 union ccb *ccb; 2051 struct ccb_mmcio *mmcio; 2052 2053 ccb = slot->ccb; 2054 if (ccb == NULL) 2055 return; 2056 2057 mmcio = &ccb->mmcio; 2058 if (!(slot->flags & CMD_STARTED)) { 2059 slot->flags |= CMD_STARTED; 2060 sdhci_start_command(slot, &mmcio->cmd); 2061 return; 2062 } 2063 2064 /* 2065 * Old stack doesn't use this! 2066 * Enabling this code causes significant performance degradation 2067 * and IRQ storms on BBB, Wandboard behaves fine. 2068 * Not using this code does no harm... 2069 if (!(slot->flags & STOP_STARTED) && mmcio->stop.opcode != 0) { 2070 slot->flags |= STOP_STARTED; 2071 sdhci_start_command(slot, &mmcio->stop); 2072 return; 2073 } 2074 */ 2075 if (__predict_false(sdhci_debug > 1)) 2076 slot_printf(slot, "result: %d\n", mmcio->cmd.error); 2077 if (mmcio->cmd.error == 0 && 2078 (slot->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)) { 2079 sdhci_reset(slot, SDHCI_RESET_CMD); 2080 sdhci_reset(slot, SDHCI_RESET_DATA); 2081 } 2082 2083 sdhci_req_done(slot); 2084 } 2085 #else 2086 static void 2087 sdhci_start(struct sdhci_slot *slot) 2088 { 2089 const struct mmc_request *req; 2090 2091 req = slot->req; 2092 if (req == NULL) 2093 return; 2094 2095 if (!(slot->flags & CMD_STARTED)) { 2096 slot->flags |= CMD_STARTED; 2097 sdhci_start_command(slot, req->cmd); 2098 return; 2099 } 2100 if ((slot->quirks & SDHCI_QUIRK_BROKEN_AUTO_STOP) && 2101 !(slot->flags & STOP_STARTED) && req->stop) { 2102 slot->flags |= STOP_STARTED; 2103 sdhci_start_command(slot, req->stop); 2104 return; 2105 } 2106 if (__predict_false(sdhci_debug > 1)) 2107 slot_printf(slot, "result: %d\n", req->cmd->error); 2108 if (!req->cmd->error && 2109 ((slot->curcmd == req->stop && 2110 (slot->quirks & SDHCI_QUIRK_BROKEN_AUTO_STOP)) || 2111 (slot->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { 2112 sdhci_reset(slot, SDHCI_RESET_CMD); 2113 sdhci_reset(slot, SDHCI_RESET_DATA); 2114 } 2115 2116 sdhci_req_done(slot); 2117 } 2118 #endif 2119 2120 int 2121 sdhci_generic_request(device_t brdev __unused, device_t reqdev, 2122 struct mmc_request *req) 2123 { 2124 struct sdhci_slot *slot = device_get_ivars(reqdev); 2125 2126 SDHCI_LOCK(slot); 2127 if (slot->req != NULL) { 2128 SDHCI_UNLOCK(slot); 2129 return (EBUSY); 2130 } 2131 if (__predict_false(sdhci_debug > 1)) { 2132 slot_printf(slot, 2133 "CMD%u arg %#x flags %#x dlen %u dflags %#x\n", 2134 req->cmd->opcode, req->cmd->arg, req->cmd->flags, 2135 (req->cmd->data)?(u_int)req->cmd->data->len:0, 2136 (req->cmd->data)?req->cmd->data->flags:0); 2137 } 2138 slot->req = req; 2139 slot->flags = 0; 2140 sdhci_start(slot); 2141 SDHCI_UNLOCK(slot); 2142 if (dumping) { 2143 while (slot->req != NULL) { 2144 sdhci_generic_intr(slot); 2145 DELAY(10); 2146 } 2147 } 2148 return (0); 2149 } 2150 2151 int 2152 sdhci_generic_get_ro(device_t brdev __unused, device_t reqdev) 2153 { 2154 struct sdhci_slot *slot = device_get_ivars(reqdev); 2155 uint32_t val; 2156 2157 SDHCI_LOCK(slot); 2158 val = RD4(slot, SDHCI_PRESENT_STATE); 2159 SDHCI_UNLOCK(slot); 2160 return (!(val & SDHCI_WRITE_PROTECT)); 2161 } 2162 2163 int 2164 sdhci_generic_acquire_host(device_t brdev __unused, device_t reqdev) 2165 { 2166 struct sdhci_slot *slot = device_get_ivars(reqdev); 2167 int err = 0; 2168 2169 SDHCI_LOCK(slot); 2170 while (slot->bus_busy) 2171 msleep(slot, &slot->mtx, 0, "sdhciah", 0); 2172 slot->bus_busy++; 2173 /* Activate led. */ 2174 WR1(slot, SDHCI_HOST_CONTROL, slot->hostctrl |= SDHCI_CTRL_LED); 2175 SDHCI_UNLOCK(slot); 2176 return (err); 2177 } 2178 2179 int 2180 sdhci_generic_release_host(device_t brdev __unused, device_t reqdev) 2181 { 2182 struct sdhci_slot *slot = device_get_ivars(reqdev); 2183 2184 SDHCI_LOCK(slot); 2185 /* Deactivate led. */ 2186 WR1(slot, SDHCI_HOST_CONTROL, slot->hostctrl &= ~SDHCI_CTRL_LED); 2187 slot->bus_busy--; 2188 wakeup(slot); 2189 SDHCI_UNLOCK(slot); 2190 return (0); 2191 } 2192 2193 static void 2194 sdhci_cmd_irq(struct sdhci_slot *slot, uint32_t intmask) 2195 { 2196 2197 if (!slot->curcmd) { 2198 slot_printf(slot, "Got command interrupt 0x%08x, but " 2199 "there is no active command.\n", intmask); 2200 sdhci_dumpregs(slot); 2201 return; 2202 } 2203 if (intmask & SDHCI_INT_TIMEOUT) 2204 slot->curcmd->error = MMC_ERR_TIMEOUT; 2205 else if (intmask & SDHCI_INT_CRC) 2206 slot->curcmd->error = MMC_ERR_BADCRC; 2207 else if (intmask & (SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) 2208 slot->curcmd->error = MMC_ERR_FIFO; 2209 2210 sdhci_finish_command(slot); 2211 } 2212 2213 static void 2214 sdhci_data_irq(struct sdhci_slot *slot, uint32_t intmask) 2215 { 2216 struct mmc_data *data; 2217 size_t left; 2218 uint32_t sdma_bbufsz; 2219 2220 if (!slot->curcmd) { 2221 slot_printf(slot, "Got data interrupt 0x%08x, but " 2222 "there is no active command.\n", intmask); 2223 sdhci_dumpregs(slot); 2224 return; 2225 } 2226 if (slot->curcmd->data == NULL && 2227 (slot->curcmd->flags & MMC_RSP_BUSY) == 0) { 2228 slot_printf(slot, "Got data interrupt 0x%08x, but " 2229 "there is no active data operation.\n", 2230 intmask); 2231 sdhci_dumpregs(slot); 2232 return; 2233 } 2234 if (intmask & SDHCI_INT_DATA_TIMEOUT) 2235 slot->curcmd->error = MMC_ERR_TIMEOUT; 2236 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) 2237 slot->curcmd->error = MMC_ERR_BADCRC; 2238 if (slot->curcmd->data == NULL && 2239 (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | 2240 SDHCI_INT_DMA_END))) { 2241 slot_printf(slot, "Got data interrupt 0x%08x, but " 2242 "there is busy-only command.\n", intmask); 2243 sdhci_dumpregs(slot); 2244 slot->curcmd->error = MMC_ERR_INVALID; 2245 } 2246 if (slot->curcmd->error) { 2247 /* No need to continue after any error. */ 2248 goto done; 2249 } 2250 2251 /* Handle tuning completion interrupt. */ 2252 if (__predict_false((intmask & SDHCI_INT_DATA_AVAIL) && 2253 (slot->curcmd->opcode == MMC_SEND_TUNING_BLOCK || 2254 slot->curcmd->opcode == MMC_SEND_TUNING_BLOCK_HS200))) { 2255 slot->req->flags |= MMC_TUNE_DONE; 2256 sdhci_finish_command(slot); 2257 sdhci_finish_data(slot); 2258 return; 2259 } 2260 /* Handle PIO interrupt. */ 2261 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) { 2262 if ((slot->opt & SDHCI_PLATFORM_TRANSFER) && 2263 SDHCI_PLATFORM_WILL_HANDLE(slot->bus, slot)) { 2264 SDHCI_PLATFORM_START_TRANSFER(slot->bus, slot, 2265 &intmask); 2266 slot->flags |= PLATFORM_DATA_STARTED; 2267 } else 2268 sdhci_transfer_pio(slot); 2269 } 2270 /* Handle DMA border. */ 2271 if (intmask & SDHCI_INT_DMA_END) { 2272 data = slot->curcmd->data; 2273 sdma_bbufsz = slot->sdma_bbufsz; 2274 2275 /* Unload DMA buffer ... */ 2276 left = data->len - slot->offset; 2277 if (data->flags & MMC_DATA_READ) { 2278 bus_dmamap_sync(slot->dmatag, slot->dmamap, 2279 BUS_DMASYNC_POSTREAD); 2280 memcpy((u_char*)data->data + slot->offset, slot->dmamem, 2281 ulmin(left, sdma_bbufsz)); 2282 } else { 2283 bus_dmamap_sync(slot->dmatag, slot->dmamap, 2284 BUS_DMASYNC_POSTWRITE); 2285 } 2286 /* ... and reload it again. */ 2287 slot->offset += sdma_bbufsz; 2288 left = data->len - slot->offset; 2289 if (data->flags & MMC_DATA_READ) { 2290 bus_dmamap_sync(slot->dmatag, slot->dmamap, 2291 BUS_DMASYNC_PREREAD); 2292 } else { 2293 memcpy(slot->dmamem, (u_char*)data->data + slot->offset, 2294 ulmin(left, sdma_bbufsz)); 2295 bus_dmamap_sync(slot->dmatag, slot->dmamap, 2296 BUS_DMASYNC_PREWRITE); 2297 } 2298 /* 2299 * Interrupt aggregation: Mask border interrupt for the last 2300 * bounce buffer. 2301 */ 2302 if (left == sdma_bbufsz) { 2303 slot->intmask &= ~SDHCI_INT_DMA_END; 2304 WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); 2305 } 2306 /* Restart DMA. */ 2307 WR4(slot, SDHCI_DMA_ADDRESS, slot->paddr); 2308 } 2309 /* We have got all data. */ 2310 if (intmask & SDHCI_INT_DATA_END) { 2311 if (slot->flags & PLATFORM_DATA_STARTED) { 2312 slot->flags &= ~PLATFORM_DATA_STARTED; 2313 SDHCI_PLATFORM_FINISH_TRANSFER(slot->bus, slot); 2314 } else 2315 sdhci_finish_data(slot); 2316 } 2317 done: 2318 if (slot->curcmd != NULL && slot->curcmd->error != 0) { 2319 if (slot->flags & PLATFORM_DATA_STARTED) { 2320 slot->flags &= ~PLATFORM_DATA_STARTED; 2321 SDHCI_PLATFORM_FINISH_TRANSFER(slot->bus, slot); 2322 } else 2323 sdhci_finish_data(slot); 2324 } 2325 } 2326 2327 static void 2328 sdhci_acmd_irq(struct sdhci_slot *slot, uint16_t acmd_err) 2329 { 2330 2331 if (!slot->curcmd) { 2332 slot_printf(slot, "Got AutoCMD12 error 0x%04x, but " 2333 "there is no active command.\n", acmd_err); 2334 sdhci_dumpregs(slot); 2335 return; 2336 } 2337 slot_printf(slot, "Got AutoCMD12 error 0x%04x\n", acmd_err); 2338 sdhci_reset(slot, SDHCI_RESET_CMD); 2339 } 2340 2341 void 2342 sdhci_generic_intr(struct sdhci_slot *slot) 2343 { 2344 uint32_t intmask, present; 2345 uint16_t val16; 2346 2347 SDHCI_LOCK(slot); 2348 /* Read slot interrupt status. */ 2349 intmask = RD4(slot, SDHCI_INT_STATUS); 2350 if (intmask == 0 || intmask == 0xffffffff) { 2351 SDHCI_UNLOCK(slot); 2352 return; 2353 } 2354 if (__predict_false(sdhci_debug > 2)) 2355 slot_printf(slot, "Interrupt %#x\n", intmask); 2356 2357 /* Handle tuning error interrupt. */ 2358 if (__predict_false(intmask & SDHCI_INT_TUNEERR)) { 2359 WR4(slot, SDHCI_INT_STATUS, SDHCI_INT_TUNEERR); 2360 slot_printf(slot, "Tuning error indicated\n"); 2361 slot->retune_req |= SDHCI_RETUNE_REQ_RESET; 2362 if (slot->curcmd) { 2363 slot->curcmd->error = MMC_ERR_BADCRC; 2364 sdhci_finish_command(slot); 2365 } 2366 } 2367 /* Handle re-tuning interrupt. */ 2368 if (__predict_false(intmask & SDHCI_INT_RETUNE)) 2369 slot->retune_req |= SDHCI_RETUNE_REQ_NEEDED; 2370 /* Handle card presence interrupts. */ 2371 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 2372 present = (intmask & SDHCI_INT_CARD_INSERT) != 0; 2373 slot->intmask &= 2374 ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE); 2375 slot->intmask |= present ? SDHCI_INT_CARD_REMOVE : 2376 SDHCI_INT_CARD_INSERT; 2377 WR4(slot, SDHCI_INT_ENABLE, slot->intmask); 2378 WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); 2379 WR4(slot, SDHCI_INT_STATUS, intmask & 2380 (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)); 2381 sdhci_handle_card_present_locked(slot, present); 2382 } 2383 /* Handle command interrupts. */ 2384 if (intmask & SDHCI_INT_CMD_MASK) { 2385 WR4(slot, SDHCI_INT_STATUS, intmask & SDHCI_INT_CMD_MASK); 2386 sdhci_cmd_irq(slot, intmask & SDHCI_INT_CMD_MASK); 2387 } 2388 /* Handle data interrupts. */ 2389 if (intmask & SDHCI_INT_DATA_MASK) { 2390 WR4(slot, SDHCI_INT_STATUS, intmask & SDHCI_INT_DATA_MASK); 2391 /* Don't call data_irq in case of errored command. */ 2392 if ((intmask & SDHCI_INT_CMD_ERROR_MASK) == 0) 2393 sdhci_data_irq(slot, intmask & SDHCI_INT_DATA_MASK); 2394 } 2395 /* Handle AutoCMD12 error interrupt. */ 2396 if (intmask & SDHCI_INT_ACMD12ERR) { 2397 /* Clearing SDHCI_INT_ACMD12ERR may clear SDHCI_ACMD12_ERR. */ 2398 val16 = RD2(slot, SDHCI_ACMD12_ERR); 2399 WR4(slot, SDHCI_INT_STATUS, SDHCI_INT_ACMD12ERR); 2400 sdhci_acmd_irq(slot, val16); 2401 } 2402 /* Handle bus power interrupt. */ 2403 if (intmask & SDHCI_INT_BUS_POWER) { 2404 WR4(slot, SDHCI_INT_STATUS, SDHCI_INT_BUS_POWER); 2405 slot_printf(slot, "Card is consuming too much power!\n"); 2406 } 2407 intmask &= ~(SDHCI_INT_ERROR | SDHCI_INT_TUNEERR | SDHCI_INT_RETUNE | 2408 SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | SDHCI_INT_CMD_MASK | 2409 SDHCI_INT_DATA_MASK | SDHCI_INT_ACMD12ERR | SDHCI_INT_BUS_POWER); 2410 /* The rest is unknown. */ 2411 if (intmask) { 2412 WR4(slot, SDHCI_INT_STATUS, intmask); 2413 slot_printf(slot, "Unexpected interrupt 0x%08x.\n", 2414 intmask); 2415 sdhci_dumpregs(slot); 2416 } 2417 2418 SDHCI_UNLOCK(slot); 2419 } 2420 2421 int 2422 sdhci_generic_read_ivar(device_t bus, device_t child, int which, 2423 uintptr_t *result) 2424 { 2425 const struct sdhci_slot *slot = device_get_ivars(child); 2426 2427 switch (which) { 2428 default: 2429 return (EINVAL); 2430 case MMCBR_IVAR_BUS_MODE: 2431 *result = slot->host.ios.bus_mode; 2432 break; 2433 case MMCBR_IVAR_BUS_WIDTH: 2434 *result = slot->host.ios.bus_width; 2435 break; 2436 case MMCBR_IVAR_CHIP_SELECT: 2437 *result = slot->host.ios.chip_select; 2438 break; 2439 case MMCBR_IVAR_CLOCK: 2440 *result = slot->host.ios.clock; 2441 break; 2442 case MMCBR_IVAR_F_MIN: 2443 *result = slot->host.f_min; 2444 break; 2445 case MMCBR_IVAR_F_MAX: 2446 *result = slot->host.f_max; 2447 break; 2448 case MMCBR_IVAR_HOST_OCR: 2449 *result = slot->host.host_ocr; 2450 break; 2451 case MMCBR_IVAR_MODE: 2452 *result = slot->host.mode; 2453 break; 2454 case MMCBR_IVAR_OCR: 2455 *result = slot->host.ocr; 2456 break; 2457 case MMCBR_IVAR_POWER_MODE: 2458 *result = slot->host.ios.power_mode; 2459 break; 2460 case MMCBR_IVAR_VDD: 2461 *result = slot->host.ios.vdd; 2462 break; 2463 case MMCBR_IVAR_RETUNE_REQ: 2464 if (slot->opt & SDHCI_TUNING_ENABLED) { 2465 if (slot->retune_req & SDHCI_RETUNE_REQ_RESET) { 2466 *result = retune_req_reset; 2467 break; 2468 } 2469 if (slot->retune_req & SDHCI_RETUNE_REQ_NEEDED) { 2470 *result = retune_req_normal; 2471 break; 2472 } 2473 } 2474 *result = retune_req_none; 2475 break; 2476 case MMCBR_IVAR_VCCQ: 2477 *result = slot->host.ios.vccq; 2478 break; 2479 case MMCBR_IVAR_CAPS: 2480 *result = slot->host.caps; 2481 break; 2482 case MMCBR_IVAR_TIMING: 2483 *result = slot->host.ios.timing; 2484 break; 2485 case MMCBR_IVAR_MAX_DATA: 2486 /* 2487 * Re-tuning modes 1 and 2 restrict the maximum data length 2488 * per read/write command to 4 MiB. 2489 */ 2490 if (slot->opt & SDHCI_TUNING_ENABLED && 2491 (slot->retune_mode == SDHCI_RETUNE_MODE_1 || 2492 slot->retune_mode == SDHCI_RETUNE_MODE_2)) { 2493 *result = 4 * 1024 * 1024 / MMC_SECTOR_SIZE; 2494 break; 2495 } 2496 *result = 65535; 2497 break; 2498 case MMCBR_IVAR_MAX_BUSY_TIMEOUT: 2499 /* 2500 * Currently, sdhci_start_data() hardcodes 1 s for all CMDs. 2501 */ 2502 *result = 1000000; 2503 break; 2504 } 2505 return (0); 2506 } 2507 2508 int 2509 sdhci_generic_write_ivar(device_t bus, device_t child, int which, 2510 uintptr_t value) 2511 { 2512 struct sdhci_slot *slot = device_get_ivars(child); 2513 uint32_t clock, max_clock; 2514 int i; 2515 2516 if (sdhci_debug > 1) 2517 slot_printf(slot, "%s: var=%d\n", __func__, which); 2518 switch (which) { 2519 default: 2520 return (EINVAL); 2521 case MMCBR_IVAR_BUS_MODE: 2522 slot->host.ios.bus_mode = value; 2523 break; 2524 case MMCBR_IVAR_BUS_WIDTH: 2525 slot->host.ios.bus_width = value; 2526 break; 2527 case MMCBR_IVAR_CHIP_SELECT: 2528 slot->host.ios.chip_select = value; 2529 break; 2530 case MMCBR_IVAR_CLOCK: 2531 if (value > 0) { 2532 max_clock = slot->max_clk; 2533 clock = max_clock; 2534 2535 if (slot->version < SDHCI_SPEC_300) { 2536 for (i = 0; i < SDHCI_200_MAX_DIVIDER; 2537 i <<= 1) { 2538 if (clock <= value) 2539 break; 2540 clock >>= 1; 2541 } 2542 } else { 2543 for (i = 0; i < SDHCI_300_MAX_DIVIDER; 2544 i += 2) { 2545 if (clock <= value) 2546 break; 2547 clock = max_clock / (i + 2); 2548 } 2549 } 2550 2551 slot->host.ios.clock = clock; 2552 } else 2553 slot->host.ios.clock = 0; 2554 break; 2555 case MMCBR_IVAR_MODE: 2556 slot->host.mode = value; 2557 break; 2558 case MMCBR_IVAR_OCR: 2559 slot->host.ocr = value; 2560 break; 2561 case MMCBR_IVAR_POWER_MODE: 2562 slot->host.ios.power_mode = value; 2563 break; 2564 case MMCBR_IVAR_VDD: 2565 slot->host.ios.vdd = value; 2566 break; 2567 case MMCBR_IVAR_VCCQ: 2568 slot->host.ios.vccq = value; 2569 break; 2570 case MMCBR_IVAR_TIMING: 2571 slot->host.ios.timing = value; 2572 break; 2573 case MMCBR_IVAR_CAPS: 2574 case MMCBR_IVAR_HOST_OCR: 2575 case MMCBR_IVAR_F_MIN: 2576 case MMCBR_IVAR_F_MAX: 2577 case MMCBR_IVAR_MAX_DATA: 2578 case MMCBR_IVAR_RETUNE_REQ: 2579 return (EINVAL); 2580 } 2581 return (0); 2582 } 2583 2584 #ifdef MMCCAM 2585 void 2586 sdhci_start_slot(struct sdhci_slot *slot) 2587 { 2588 2589 if ((slot->devq = cam_simq_alloc(1)) == NULL) 2590 goto fail; 2591 2592 mtx_init(&slot->sim_mtx, "sdhcisim", NULL, MTX_DEF); 2593 slot->sim = cam_sim_alloc(sdhci_cam_action, sdhci_cam_poll, 2594 "sdhci_slot", slot, device_get_unit(slot->bus), 2595 &slot->sim_mtx, 1, 1, slot->devq); 2596 2597 if (slot->sim == NULL) { 2598 cam_simq_free(slot->devq); 2599 slot_printf(slot, "cannot allocate CAM SIM\n"); 2600 goto fail; 2601 } 2602 2603 mtx_lock(&slot->sim_mtx); 2604 if (xpt_bus_register(slot->sim, slot->bus, 0) != 0) { 2605 slot_printf(slot, "cannot register SCSI pass-through bus\n"); 2606 cam_sim_free(slot->sim, FALSE); 2607 cam_simq_free(slot->devq); 2608 mtx_unlock(&slot->sim_mtx); 2609 goto fail; 2610 } 2611 mtx_unlock(&slot->sim_mtx); 2612 2613 /* End CAM-specific init */ 2614 slot->card_present = 0; 2615 sdhci_card_task(slot, 0); 2616 return; 2617 2618 fail: 2619 if (slot->sim != NULL) { 2620 mtx_lock(&slot->sim_mtx); 2621 xpt_bus_deregister(cam_sim_path(slot->sim)); 2622 cam_sim_free(slot->sim, FALSE); 2623 mtx_unlock(&slot->sim_mtx); 2624 } 2625 2626 if (slot->devq != NULL) 2627 cam_simq_free(slot->devq); 2628 } 2629 2630 void 2631 sdhci_cam_action(struct cam_sim *sim, union ccb *ccb) 2632 { 2633 struct sdhci_slot *slot; 2634 2635 slot = cam_sim_softc(sim); 2636 if (slot == NULL) { 2637 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 2638 xpt_done(ccb); 2639 return; 2640 } 2641 2642 mtx_assert(&slot->sim_mtx, MA_OWNED); 2643 2644 switch (ccb->ccb_h.func_code) { 2645 case XPT_PATH_INQ: 2646 mmc_path_inq(&ccb->cpi, "Deglitch Networks", sim, maxphys); 2647 break; 2648 2649 case XPT_MMC_GET_TRAN_SETTINGS: 2650 case XPT_GET_TRAN_SETTINGS: 2651 { 2652 struct ccb_trans_settings *cts = &ccb->cts; 2653 uint32_t max_data; 2654 2655 if (sdhci_debug > 1) 2656 slot_printf(slot, "Got XPT_GET_TRAN_SETTINGS\n"); 2657 2658 cts->protocol = PROTO_MMCSD; 2659 cts->protocol_version = 1; 2660 cts->transport = XPORT_MMCSD; 2661 cts->transport_version = 1; 2662 cts->xport_specific.valid = 0; 2663 cts->proto_specific.mmc.host_ocr = slot->host.host_ocr; 2664 cts->proto_specific.mmc.host_f_min = slot->host.f_min; 2665 cts->proto_specific.mmc.host_f_max = slot->host.f_max; 2666 cts->proto_specific.mmc.host_caps = slot->host.caps; 2667 /* 2668 * Re-tuning modes 1 and 2 restrict the maximum data length 2669 * per read/write command to 4 MiB. 2670 */ 2671 if (slot->opt & SDHCI_TUNING_ENABLED && 2672 (slot->retune_mode == SDHCI_RETUNE_MODE_1 || 2673 slot->retune_mode == SDHCI_RETUNE_MODE_2)) { 2674 max_data = 4 * 1024 * 1024 / MMC_SECTOR_SIZE; 2675 } else { 2676 max_data = 65535; 2677 } 2678 cts->proto_specific.mmc.host_max_data = max_data; 2679 2680 memcpy(&cts->proto_specific.mmc.ios, &slot->host.ios, sizeof(struct mmc_ios)); 2681 ccb->ccb_h.status = CAM_REQ_CMP; 2682 break; 2683 } 2684 case XPT_MMC_SET_TRAN_SETTINGS: 2685 case XPT_SET_TRAN_SETTINGS: 2686 if (sdhci_debug > 1) 2687 slot_printf(slot, "Got XPT_SET_TRAN_SETTINGS\n"); 2688 sdhci_cam_settran_settings(slot, ccb); 2689 ccb->ccb_h.status = CAM_REQ_CMP; 2690 break; 2691 case XPT_RESET_BUS: 2692 if (sdhci_debug > 1) 2693 slot_printf(slot, "Got XPT_RESET_BUS, ACK it...\n"); 2694 ccb->ccb_h.status = CAM_REQ_CMP; 2695 break; 2696 case XPT_MMC_IO: 2697 /* 2698 * Here is the HW-dependent part of 2699 * sending the command to the underlying h/w 2700 * At some point in the future an interrupt comes. 2701 * Then the request will be marked as completed. 2702 */ 2703 if (__predict_false(sdhci_debug > 1)) 2704 slot_printf(slot, "Got XPT_MMC_IO\n"); 2705 ccb->ccb_h.status = CAM_REQ_INPROG; 2706 2707 sdhci_cam_request(cam_sim_softc(sim), ccb); 2708 return; 2709 default: 2710 ccb->ccb_h.status = CAM_REQ_INVALID; 2711 break; 2712 } 2713 xpt_done(ccb); 2714 return; 2715 } 2716 2717 void 2718 sdhci_cam_poll(struct cam_sim *sim) 2719 { 2720 return; 2721 } 2722 2723 static int 2724 sdhci_cam_get_possible_host_clock(const struct sdhci_slot *slot, 2725 int proposed_clock) 2726 { 2727 int max_clock, clock, i; 2728 2729 if (proposed_clock == 0) 2730 return 0; 2731 max_clock = slot->max_clk; 2732 clock = max_clock; 2733 2734 if (slot->version < SDHCI_SPEC_300) { 2735 for (i = 0; i < SDHCI_200_MAX_DIVIDER; i <<= 1) { 2736 if (clock <= proposed_clock) 2737 break; 2738 clock >>= 1; 2739 } 2740 } else { 2741 for (i = 0; i < SDHCI_300_MAX_DIVIDER; i += 2) { 2742 if (clock <= proposed_clock) 2743 break; 2744 clock = max_clock / (i + 2); 2745 } 2746 } 2747 return clock; 2748 } 2749 2750 static int 2751 sdhci_cam_settran_settings(struct sdhci_slot *slot, union ccb *ccb) 2752 { 2753 struct mmc_ios *ios; 2754 const struct mmc_ios *new_ios; 2755 const struct ccb_trans_settings_mmc *cts; 2756 2757 ios = &slot->host.ios; 2758 cts = &ccb->cts.proto_specific.mmc; 2759 new_ios = &cts->ios; 2760 2761 /* Update only requested fields */ 2762 if (cts->ios_valid & MMC_CLK) { 2763 ios->clock = sdhci_cam_get_possible_host_clock(slot, new_ios->clock); 2764 if (sdhci_debug > 1) 2765 slot_printf(slot, "Clock => %d\n", ios->clock); 2766 } 2767 if (cts->ios_valid & MMC_VDD) { 2768 ios->vdd = new_ios->vdd; 2769 if (sdhci_debug > 1) 2770 slot_printf(slot, "VDD => %d\n", ios->vdd); 2771 } 2772 if (cts->ios_valid & MMC_CS) { 2773 ios->chip_select = new_ios->chip_select; 2774 if (sdhci_debug > 1) 2775 slot_printf(slot, "CS => %d\n", ios->chip_select); 2776 } 2777 if (cts->ios_valid & MMC_BW) { 2778 ios->bus_width = new_ios->bus_width; 2779 if (sdhci_debug > 1) 2780 slot_printf(slot, "Bus width => %d\n", ios->bus_width); 2781 } 2782 if (cts->ios_valid & MMC_PM) { 2783 ios->power_mode = new_ios->power_mode; 2784 if (sdhci_debug > 1) 2785 slot_printf(slot, "Power mode => %d\n", ios->power_mode); 2786 } 2787 if (cts->ios_valid & MMC_BT) { 2788 ios->timing = new_ios->timing; 2789 if (sdhci_debug > 1) 2790 slot_printf(slot, "Timing => %d\n", ios->timing); 2791 } 2792 if (cts->ios_valid & MMC_BM) { 2793 ios->bus_mode = new_ios->bus_mode; 2794 if (sdhci_debug > 1) 2795 slot_printf(slot, "Bus mode => %d\n", ios->bus_mode); 2796 } 2797 if (cts->ios_valid & MMC_VCCQ) { 2798 ios->vccq = new_ios->vccq; 2799 if (sdhci_debug > 1) 2800 slot_printf(slot, "VCCQ => %d\n", ios->vccq); 2801 } 2802 2803 /* XXX Provide a way to call a chip-specific IOS update, required for TI */ 2804 return (sdhci_cam_update_ios(slot)); 2805 } 2806 2807 static int 2808 sdhci_cam_update_ios(struct sdhci_slot *slot) 2809 { 2810 struct mmc_ios *ios = &slot->host.ios; 2811 2812 if (sdhci_debug > 1) 2813 slot_printf(slot, "%s: power_mode=%d, clk=%d, bus_width=%d, timing=%d\n", 2814 __func__, ios->power_mode, ios->clock, ios->bus_width, ios->timing); 2815 SDHCI_LOCK(slot); 2816 /* Do full reset on bus power down to clear from any state. */ 2817 if (ios->power_mode == power_off) { 2818 WR4(slot, SDHCI_SIGNAL_ENABLE, 0); 2819 sdhci_init(slot); 2820 } 2821 /* Configure the bus. */ 2822 sdhci_set_clock(slot, ios->clock); 2823 sdhci_set_power(slot, (ios->power_mode == power_off) ? 0 : ios->vdd); 2824 if (ios->bus_width == bus_width_8) { 2825 slot->hostctrl |= SDHCI_CTRL_8BITBUS; 2826 slot->hostctrl &= ~SDHCI_CTRL_4BITBUS; 2827 } else if (ios->bus_width == bus_width_4) { 2828 slot->hostctrl &= ~SDHCI_CTRL_8BITBUS; 2829 slot->hostctrl |= SDHCI_CTRL_4BITBUS; 2830 } else if (ios->bus_width == bus_width_1) { 2831 slot->hostctrl &= ~SDHCI_CTRL_8BITBUS; 2832 slot->hostctrl &= ~SDHCI_CTRL_4BITBUS; 2833 } else { 2834 panic("Invalid bus width: %d", ios->bus_width); 2835 } 2836 if (ios->timing == bus_timing_hs && 2837 !(slot->quirks & SDHCI_QUIRK_DONT_SET_HISPD_BIT)) 2838 slot->hostctrl |= SDHCI_CTRL_HISPD; 2839 else 2840 slot->hostctrl &= ~SDHCI_CTRL_HISPD; 2841 WR1(slot, SDHCI_HOST_CONTROL, slot->hostctrl); 2842 /* Some controllers like reset after bus changes. */ 2843 if(slot->quirks & SDHCI_QUIRK_RESET_ON_IOS) 2844 sdhci_reset(slot, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 2845 2846 SDHCI_UNLOCK(slot); 2847 return (0); 2848 } 2849 2850 static int 2851 sdhci_cam_request(struct sdhci_slot *slot, union ccb *ccb) 2852 { 2853 const struct ccb_mmcio *mmcio; 2854 2855 mmcio = &ccb->mmcio; 2856 2857 SDHCI_LOCK(slot); 2858 /* if (slot->req != NULL) { 2859 SDHCI_UNLOCK(slot); 2860 return (EBUSY); 2861 } 2862 */ 2863 if (__predict_false(sdhci_debug > 1)) { 2864 slot_printf(slot, "CMD%u arg %#x flags %#x dlen %u dflags %#x " 2865 "blksz=%zu blkcnt=%zu\n", 2866 mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags, 2867 mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0, 2868 mmcio->cmd.data != NULL ? mmcio->cmd.data->flags : 0, 2869 mmcio->cmd.data != NULL ? mmcio->cmd.data->block_size : 0, 2870 mmcio->cmd.data != NULL ? mmcio->cmd.data->block_count : 0); 2871 } 2872 if (mmcio->cmd.data != NULL) { 2873 if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0) 2874 panic("data->len = %d, data->flags = %d -- something is b0rked", 2875 (int)mmcio->cmd.data->len, mmcio->cmd.data->flags); 2876 } 2877 slot->ccb = ccb; 2878 slot->flags = 0; 2879 sdhci_start(slot); 2880 SDHCI_UNLOCK(slot); 2881 if (dumping) { 2882 while (slot->ccb != NULL) { 2883 sdhci_generic_intr(slot); 2884 DELAY(10); 2885 } 2886 } 2887 return (0); 2888 } 2889 #endif /* MMCCAM */ 2890 2891 MODULE_VERSION(sdhci, SDHCI_VERSION); 2892