1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2008 Alexander Motin <mav@FreeBSD.org> 5 * Copyright (c) 2017 Marius Strobl <marius@FreeBSD.org> 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/bus.h> 35 #include <sys/callout.h> 36 #include <sys/conf.h> 37 #include <sys/kernel.h> 38 #include <sys/kobj.h> 39 #include <sys/libkern.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/module.h> 43 #include <sys/mutex.h> 44 #include <sys/resource.h> 45 #include <sys/rman.h> 46 #include <sys/sysctl.h> 47 #include <sys/taskqueue.h> 48 49 #include <machine/bus.h> 50 #include <machine/resource.h> 51 #include <machine/stdarg.h> 52 53 #include <dev/mmc/bridge.h> 54 #include <dev/mmc/mmcreg.h> 55 #include <dev/mmc/mmcbrvar.h> 56 57 #include <dev/sdhci/sdhci.h> 58 59 #include <cam/cam.h> 60 #include <cam/cam_ccb.h> 61 #include <cam/cam_debug.h> 62 #include <cam/cam_sim.h> 63 #include <cam/cam_xpt_sim.h> 64 65 #include "mmcbr_if.h" 66 #include "sdhci_if.h" 67 68 #include "opt_mmccam.h" 69 70 SYSCTL_NODE(_hw, OID_AUTO, sdhci, CTLFLAG_RD, 0, "sdhci driver"); 71 72 static int sdhci_debug = 0; 73 SYSCTL_INT(_hw_sdhci, OID_AUTO, debug, CTLFLAG_RWTUN, &sdhci_debug, 0, 74 "Debug level"); 75 u_int sdhci_quirk_clear = 0; 76 SYSCTL_INT(_hw_sdhci, OID_AUTO, quirk_clear, CTLFLAG_RWTUN, &sdhci_quirk_clear, 77 0, "Mask of quirks to clear"); 78 u_int sdhci_quirk_set = 0; 79 SYSCTL_INT(_hw_sdhci, OID_AUTO, quirk_set, CTLFLAG_RWTUN, &sdhci_quirk_set, 0, 80 "Mask of quirks to set"); 81 82 #define RD1(slot, off) SDHCI_READ_1((slot)->bus, (slot), (off)) 83 #define RD2(slot, off) SDHCI_READ_2((slot)->bus, (slot), (off)) 84 #define RD4(slot, off) SDHCI_READ_4((slot)->bus, (slot), (off)) 85 #define RD_MULTI_4(slot, off, ptr, count) \ 86 SDHCI_READ_MULTI_4((slot)->bus, (slot), (off), (ptr), (count)) 87 88 #define WR1(slot, off, val) SDHCI_WRITE_1((slot)->bus, (slot), (off), (val)) 89 #define WR2(slot, off, val) SDHCI_WRITE_2((slot)->bus, (slot), (off), (val)) 90 #define WR4(slot, off, val) SDHCI_WRITE_4((slot)->bus, (slot), (off), (val)) 91 #define WR_MULTI_4(slot, off, ptr, count) \ 92 SDHCI_WRITE_MULTI_4((slot)->bus, (slot), (off), (ptr), (count)) 93 94 static void sdhci_acmd_irq(struct sdhci_slot *slot, uint16_t acmd_err); 95 static void sdhci_card_poll(void *arg); 96 static void sdhci_card_task(void *arg, int pending); 97 static void sdhci_cmd_irq(struct sdhci_slot *slot, uint32_t intmask); 98 static void sdhci_data_irq(struct sdhci_slot *slot, uint32_t intmask); 99 static int sdhci_exec_tuning(struct sdhci_slot *slot, bool reset); 100 static void sdhci_handle_card_present_locked(struct sdhci_slot *slot, 101 bool is_present); 102 static void sdhci_finish_command(struct sdhci_slot *slot); 103 static void sdhci_init(struct sdhci_slot *slot); 104 static void sdhci_read_block_pio(struct sdhci_slot *slot); 105 static void sdhci_req_done(struct sdhci_slot *slot); 106 static void sdhci_req_wakeup(struct mmc_request *req); 107 static void sdhci_reset(struct sdhci_slot *slot, uint8_t mask); 108 static void sdhci_retune(void *arg); 109 static void sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock); 110 static void sdhci_set_power(struct sdhci_slot *slot, u_char power); 111 static void sdhci_set_transfer_mode(struct sdhci_slot *slot, 112 const struct mmc_data *data); 113 static void sdhci_start(struct sdhci_slot *slot); 114 static void sdhci_timeout(void *arg); 115 static void sdhci_start_command(struct sdhci_slot *slot, 116 struct mmc_command *cmd); 117 static void sdhci_start_data(struct sdhci_slot *slot, 118 const struct mmc_data *data); 119 static void sdhci_write_block_pio(struct sdhci_slot *slot); 120 static void sdhci_transfer_pio(struct sdhci_slot *slot); 121 122 #ifdef MMCCAM 123 /* CAM-related */ 124 static void sdhci_cam_action(struct cam_sim *sim, union ccb *ccb); 125 static int sdhci_cam_get_possible_host_clock(const struct sdhci_slot *slot, 126 int proposed_clock); 127 static void sdhci_cam_poll(struct cam_sim *sim); 128 static int sdhci_cam_request(struct sdhci_slot *slot, union ccb *ccb); 129 static int sdhci_cam_settran_settings(struct sdhci_slot *slot, union ccb *ccb); 130 static int sdhci_cam_update_ios(struct sdhci_slot *slot); 131 #endif 132 133 /* helper routines */ 134 static int sdhci_dma_alloc(struct sdhci_slot *slot); 135 static void sdhci_dma_free(struct sdhci_slot *slot); 136 static void sdhci_dumpregs(struct sdhci_slot *slot); 137 static void sdhci_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, 138 int error); 139 static int slot_printf(const struct sdhci_slot *slot, const char * fmt, ...) 140 __printflike(2, 3); 141 static uint32_t sdhci_tuning_intmask(const struct sdhci_slot *slot); 142 143 #define SDHCI_LOCK(_slot) mtx_lock(&(_slot)->mtx) 144 #define SDHCI_UNLOCK(_slot) mtx_unlock(&(_slot)->mtx) 145 #define SDHCI_LOCK_INIT(_slot) \ 146 mtx_init(&_slot->mtx, "SD slot mtx", "sdhci", MTX_DEF) 147 #define SDHCI_LOCK_DESTROY(_slot) mtx_destroy(&_slot->mtx); 148 #define SDHCI_ASSERT_LOCKED(_slot) mtx_assert(&_slot->mtx, MA_OWNED); 149 #define SDHCI_ASSERT_UNLOCKED(_slot) mtx_assert(&_slot->mtx, MA_NOTOWNED); 150 151 #define SDHCI_DEFAULT_MAX_FREQ 50 152 153 #define SDHCI_200_MAX_DIVIDER 256 154 #define SDHCI_300_MAX_DIVIDER 2046 155 156 #define SDHCI_CARD_PRESENT_TICKS (hz / 5) 157 #define SDHCI_INSERT_DELAY_TICKS (hz / 2) 158 159 /* 160 * Broadcom BCM577xx Controller Constants 161 */ 162 /* Maximum divider supported by the default clock source. */ 163 #define BCM577XX_DEFAULT_MAX_DIVIDER 256 164 /* Alternative clock's base frequency. */ 165 #define BCM577XX_ALT_CLOCK_BASE 63000000 166 167 #define BCM577XX_HOST_CONTROL 0x198 168 #define BCM577XX_CTRL_CLKSEL_MASK 0xFFFFCFFF 169 #define BCM577XX_CTRL_CLKSEL_SHIFT 12 170 #define BCM577XX_CTRL_CLKSEL_DEFAULT 0x0 171 #define BCM577XX_CTRL_CLKSEL_64MHZ 0x3 172 173 static void 174 sdhci_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 175 { 176 177 if (error != 0) { 178 printf("getaddr: error %d\n", error); 179 return; 180 } 181 *(bus_addr_t *)arg = segs[0].ds_addr; 182 } 183 184 static int 185 slot_printf(const struct sdhci_slot *slot, const char * fmt, ...) 186 { 187 char buf[128]; 188 va_list ap; 189 int retval; 190 191 /* 192 * Make sure we print a single line all together rather than in two 193 * halves to avoid console gibberish bingo. 194 */ 195 va_start(ap, fmt); 196 retval = vsnprintf(buf, sizeof(buf), fmt, ap); 197 va_end(ap); 198 199 retval += printf("%s-slot%d: %s", 200 device_get_nameunit(slot->bus), slot->num, buf); 201 return (retval); 202 } 203 204 static void 205 sdhci_dumpregs(struct sdhci_slot *slot) 206 { 207 208 slot_printf(slot, 209 "============== REGISTER DUMP ==============\n"); 210 211 slot_printf(slot, "Sys addr: 0x%08x | Version: 0x%08x\n", 212 RD4(slot, SDHCI_DMA_ADDRESS), RD2(slot, SDHCI_HOST_VERSION)); 213 slot_printf(slot, "Blk size: 0x%08x | Blk cnt: 0x%08x\n", 214 RD2(slot, SDHCI_BLOCK_SIZE), RD2(slot, SDHCI_BLOCK_COUNT)); 215 slot_printf(slot, "Argument: 0x%08x | Trn mode: 0x%08x\n", 216 RD4(slot, SDHCI_ARGUMENT), RD2(slot, SDHCI_TRANSFER_MODE)); 217 slot_printf(slot, "Present: 0x%08x | Host ctl: 0x%08x\n", 218 RD4(slot, SDHCI_PRESENT_STATE), RD1(slot, SDHCI_HOST_CONTROL)); 219 slot_printf(slot, "Power: 0x%08x | Blk gap: 0x%08x\n", 220 RD1(slot, SDHCI_POWER_CONTROL), RD1(slot, SDHCI_BLOCK_GAP_CONTROL)); 221 slot_printf(slot, "Wake-up: 0x%08x | Clock: 0x%08x\n", 222 RD1(slot, SDHCI_WAKE_UP_CONTROL), RD2(slot, SDHCI_CLOCK_CONTROL)); 223 slot_printf(slot, "Timeout: 0x%08x | Int stat: 0x%08x\n", 224 RD1(slot, SDHCI_TIMEOUT_CONTROL), RD4(slot, SDHCI_INT_STATUS)); 225 slot_printf(slot, "Int enab: 0x%08x | Sig enab: 0x%08x\n", 226 RD4(slot, SDHCI_INT_ENABLE), RD4(slot, SDHCI_SIGNAL_ENABLE)); 227 slot_printf(slot, "AC12 err: 0x%08x | Host ctl2:0x%08x\n", 228 RD2(slot, SDHCI_ACMD12_ERR), RD2(slot, SDHCI_HOST_CONTROL2)); 229 slot_printf(slot, "Caps: 0x%08x | Caps2: 0x%08x\n", 230 RD4(slot, SDHCI_CAPABILITIES), RD4(slot, SDHCI_CAPABILITIES2)); 231 slot_printf(slot, "Max curr: 0x%08x | ADMA err: 0x%08x\n", 232 RD4(slot, SDHCI_MAX_CURRENT), RD1(slot, SDHCI_ADMA_ERR)); 233 slot_printf(slot, "ADMA addr:0x%08x | Slot int: 0x%08x\n", 234 RD4(slot, SDHCI_ADMA_ADDRESS_LO), RD2(slot, SDHCI_SLOT_INT_STATUS)); 235 236 slot_printf(slot, 237 "===========================================\n"); 238 } 239 240 static void 241 sdhci_reset(struct sdhci_slot *slot, uint8_t mask) 242 { 243 int timeout; 244 uint32_t clock; 245 246 if (slot->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 247 if (!SDHCI_GET_CARD_PRESENT(slot->bus, slot)) 248 return; 249 } 250 251 /* Some controllers need this kick or reset won't work. */ 252 if ((mask & SDHCI_RESET_ALL) == 0 && 253 (slot->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)) { 254 /* This is to force an update */ 255 clock = slot->clock; 256 slot->clock = 0; 257 sdhci_set_clock(slot, clock); 258 } 259 260 if (mask & SDHCI_RESET_ALL) { 261 slot->clock = 0; 262 slot->power = 0; 263 } 264 265 WR1(slot, SDHCI_SOFTWARE_RESET, mask); 266 267 if (slot->quirks & SDHCI_QUIRK_WAITFOR_RESET_ASSERTED) { 268 /* 269 * Resets on TI OMAPs and AM335x are incompatible with SDHCI 270 * specification. The reset bit has internal propagation delay, 271 * so a fast read after write returns 0 even if reset process is 272 * in progress. The workaround is to poll for 1 before polling 273 * for 0. In the worst case, if we miss seeing it asserted the 274 * time we spent waiting is enough to ensure the reset finishes. 275 */ 276 timeout = 10000; 277 while ((RD1(slot, SDHCI_SOFTWARE_RESET) & mask) != mask) { 278 if (timeout <= 0) 279 break; 280 timeout--; 281 DELAY(1); 282 } 283 } 284 285 /* Wait max 100 ms */ 286 timeout = 10000; 287 /* Controller clears the bits when it's done */ 288 while (RD1(slot, SDHCI_SOFTWARE_RESET) & mask) { 289 if (timeout <= 0) { 290 slot_printf(slot, "Reset 0x%x never completed.\n", 291 mask); 292 sdhci_dumpregs(slot); 293 return; 294 } 295 timeout--; 296 DELAY(10); 297 } 298 } 299 300 static uint32_t 301 sdhci_tuning_intmask(const struct sdhci_slot *slot) 302 { 303 uint32_t intmask; 304 305 intmask = 0; 306 if (slot->opt & SDHCI_TUNING_ENABLED) { 307 intmask |= SDHCI_INT_TUNEERR; 308 if (slot->retune_mode == SDHCI_RETUNE_MODE_2 || 309 slot->retune_mode == SDHCI_RETUNE_MODE_3) 310 intmask |= SDHCI_INT_RETUNE; 311 } 312 return (intmask); 313 } 314 315 static void 316 sdhci_init(struct sdhci_slot *slot) 317 { 318 319 sdhci_reset(slot, SDHCI_RESET_ALL); 320 321 /* Enable interrupts. */ 322 slot->intmask = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 323 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX | 324 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | 325 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | 326 SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE | 327 SDHCI_INT_ACMD12ERR; 328 329 if (!(slot->quirks & SDHCI_QUIRK_POLL_CARD_PRESENT) && 330 !(slot->opt & SDHCI_NON_REMOVABLE)) { 331 slot->intmask |= SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT; 332 } 333 334 WR4(slot, SDHCI_INT_ENABLE, slot->intmask); 335 WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); 336 } 337 338 static void 339 sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock) 340 { 341 uint32_t clk_base; 342 uint32_t clk_sel; 343 uint32_t res; 344 uint16_t clk; 345 uint16_t div; 346 int timeout; 347 348 if (clock == slot->clock) 349 return; 350 slot->clock = clock; 351 352 /* Turn off the clock. */ 353 clk = RD2(slot, SDHCI_CLOCK_CONTROL); 354 WR2(slot, SDHCI_CLOCK_CONTROL, clk & ~SDHCI_CLOCK_CARD_EN); 355 /* If no clock requested - leave it so. */ 356 if (clock == 0) 357 return; 358 359 /* Determine the clock base frequency */ 360 clk_base = slot->max_clk; 361 if (slot->quirks & SDHCI_QUIRK_BCM577XX_400KHZ_CLKSRC) { 362 clk_sel = RD2(slot, BCM577XX_HOST_CONTROL) & 363 BCM577XX_CTRL_CLKSEL_MASK; 364 365 /* 366 * Select clock source appropriate for the requested frequency. 367 */ 368 if ((clk_base / BCM577XX_DEFAULT_MAX_DIVIDER) > clock) { 369 clk_base = BCM577XX_ALT_CLOCK_BASE; 370 clk_sel |= (BCM577XX_CTRL_CLKSEL_64MHZ << 371 BCM577XX_CTRL_CLKSEL_SHIFT); 372 } else { 373 clk_sel |= (BCM577XX_CTRL_CLKSEL_DEFAULT << 374 BCM577XX_CTRL_CLKSEL_SHIFT); 375 } 376 377 WR2(slot, BCM577XX_HOST_CONTROL, clk_sel); 378 } 379 380 /* Recalculate timeout clock frequency based on the new sd clock. */ 381 if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) 382 slot->timeout_clk = slot->clock / 1000; 383 384 if (slot->version < SDHCI_SPEC_300) { 385 /* Looking for highest freq <= clock. */ 386 res = clk_base; 387 for (div = 1; div < SDHCI_200_MAX_DIVIDER; div <<= 1) { 388 if (res <= clock) 389 break; 390 res >>= 1; 391 } 392 /* Divider 1:1 is 0x00, 2:1 is 0x01, 256:1 is 0x80 ... */ 393 div >>= 1; 394 } else { 395 /* Version 3.0 divisors are multiples of two up to 1023 * 2 */ 396 if (clock >= clk_base) 397 div = 0; 398 else { 399 for (div = 2; div < SDHCI_300_MAX_DIVIDER; div += 2) { 400 if ((clk_base / div) <= clock) 401 break; 402 } 403 } 404 div >>= 1; 405 } 406 407 if (bootverbose || sdhci_debug) 408 slot_printf(slot, "Divider %d for freq %d (base %d)\n", 409 div, clock, clk_base); 410 411 /* Now we have got divider, set it. */ 412 clk = (div & SDHCI_DIVIDER_MASK) << SDHCI_DIVIDER_SHIFT; 413 clk |= ((div >> SDHCI_DIVIDER_MASK_LEN) & SDHCI_DIVIDER_HI_MASK) 414 << SDHCI_DIVIDER_HI_SHIFT; 415 416 WR2(slot, SDHCI_CLOCK_CONTROL, clk); 417 /* Enable clock. */ 418 clk |= SDHCI_CLOCK_INT_EN; 419 WR2(slot, SDHCI_CLOCK_CONTROL, clk); 420 /* Wait up to 10 ms until it stabilize. */ 421 timeout = 10; 422 while (!((clk = RD2(slot, SDHCI_CLOCK_CONTROL)) 423 & SDHCI_CLOCK_INT_STABLE)) { 424 if (timeout == 0) { 425 slot_printf(slot, 426 "Internal clock never stabilised.\n"); 427 sdhci_dumpregs(slot); 428 return; 429 } 430 timeout--; 431 DELAY(1000); 432 } 433 /* Pass clock signal to the bus. */ 434 clk |= SDHCI_CLOCK_CARD_EN; 435 WR2(slot, SDHCI_CLOCK_CONTROL, clk); 436 } 437 438 static void 439 sdhci_set_power(struct sdhci_slot *slot, u_char power) 440 { 441 int i; 442 uint8_t pwr; 443 444 if (slot->power == power) 445 return; 446 447 slot->power = power; 448 449 /* Turn off the power. */ 450 pwr = 0; 451 WR1(slot, SDHCI_POWER_CONTROL, pwr); 452 /* If power down requested - leave it so. */ 453 if (power == 0) 454 return; 455 /* Set voltage. */ 456 switch (1 << power) { 457 case MMC_OCR_LOW_VOLTAGE: 458 pwr |= SDHCI_POWER_180; 459 break; 460 case MMC_OCR_290_300: 461 case MMC_OCR_300_310: 462 pwr |= SDHCI_POWER_300; 463 break; 464 case MMC_OCR_320_330: 465 case MMC_OCR_330_340: 466 pwr |= SDHCI_POWER_330; 467 break; 468 } 469 WR1(slot, SDHCI_POWER_CONTROL, pwr); 470 /* 471 * Turn on VDD1 power. Note that at least some Intel controllers can 472 * fail to enable bus power on the first try after transiting from D3 473 * to D0, so we give them up to 2 ms. 474 */ 475 pwr |= SDHCI_POWER_ON; 476 for (i = 0; i < 20; i++) { 477 WR1(slot, SDHCI_POWER_CONTROL, pwr); 478 if (RD1(slot, SDHCI_POWER_CONTROL) & SDHCI_POWER_ON) 479 break; 480 DELAY(100); 481 } 482 if (!(RD1(slot, SDHCI_POWER_CONTROL) & SDHCI_POWER_ON)) 483 slot_printf(slot, "Bus power failed to enable\n"); 484 485 if (slot->quirks & SDHCI_QUIRK_INTEL_POWER_UP_RESET) { 486 WR1(slot, SDHCI_POWER_CONTROL, pwr | 0x10); 487 DELAY(10); 488 WR1(slot, SDHCI_POWER_CONTROL, pwr); 489 DELAY(300); 490 } 491 } 492 493 static void 494 sdhci_read_block_pio(struct sdhci_slot *slot) 495 { 496 uint32_t data; 497 char *buffer; 498 size_t left; 499 500 buffer = slot->curcmd->data->data; 501 buffer += slot->offset; 502 /* Transfer one block at a time. */ 503 #ifdef MMCCAM 504 if (slot->curcmd->data->flags & MMC_DATA_BLOCK_SIZE) 505 left = min(slot->curcmd->data->block_size, 506 slot->curcmd->data->len - slot->offset); 507 else 508 #endif 509 left = min(512, slot->curcmd->data->len - slot->offset); 510 slot->offset += left; 511 512 /* If we are too fast, broken controllers return zeroes. */ 513 if (slot->quirks & SDHCI_QUIRK_BROKEN_TIMINGS) 514 DELAY(10); 515 /* Handle unaligned and aligned buffer cases. */ 516 if ((intptr_t)buffer & 3) { 517 while (left > 3) { 518 data = RD4(slot, SDHCI_BUFFER); 519 buffer[0] = data; 520 buffer[1] = (data >> 8); 521 buffer[2] = (data >> 16); 522 buffer[3] = (data >> 24); 523 buffer += 4; 524 left -= 4; 525 } 526 } else { 527 RD_MULTI_4(slot, SDHCI_BUFFER, 528 (uint32_t *)buffer, left >> 2); 529 left &= 3; 530 } 531 /* Handle uneven size case. */ 532 if (left > 0) { 533 data = RD4(slot, SDHCI_BUFFER); 534 while (left > 0) { 535 *(buffer++) = data; 536 data >>= 8; 537 left--; 538 } 539 } 540 } 541 542 static void 543 sdhci_write_block_pio(struct sdhci_slot *slot) 544 { 545 uint32_t data = 0; 546 char *buffer; 547 size_t left; 548 549 buffer = slot->curcmd->data->data; 550 buffer += slot->offset; 551 /* Transfer one block at a time. */ 552 #ifdef MMCCAM 553 if (slot->curcmd->data->flags & MMC_DATA_BLOCK_SIZE) { 554 left = min(slot->curcmd->data->block_size, 555 slot->curcmd->data->len - slot->offset); 556 } else 557 #endif 558 left = min(512, slot->curcmd->data->len - slot->offset); 559 slot->offset += left; 560 561 /* Handle unaligned and aligned buffer cases. */ 562 if ((intptr_t)buffer & 3) { 563 while (left > 3) { 564 data = buffer[0] + 565 (buffer[1] << 8) + 566 (buffer[2] << 16) + 567 (buffer[3] << 24); 568 left -= 4; 569 buffer += 4; 570 WR4(slot, SDHCI_BUFFER, data); 571 } 572 } else { 573 WR_MULTI_4(slot, SDHCI_BUFFER, 574 (uint32_t *)buffer, left >> 2); 575 left &= 3; 576 } 577 /* Handle uneven size case. */ 578 if (left > 0) { 579 while (left > 0) { 580 data <<= 8; 581 data += *(buffer++); 582 left--; 583 } 584 WR4(slot, SDHCI_BUFFER, data); 585 } 586 } 587 588 static void 589 sdhci_transfer_pio(struct sdhci_slot *slot) 590 { 591 592 /* Read as many blocks as possible. */ 593 if (slot->curcmd->data->flags & MMC_DATA_READ) { 594 while (RD4(slot, SDHCI_PRESENT_STATE) & 595 SDHCI_DATA_AVAILABLE) { 596 sdhci_read_block_pio(slot); 597 if (slot->offset >= slot->curcmd->data->len) 598 break; 599 } 600 } else { 601 while (RD4(slot, SDHCI_PRESENT_STATE) & 602 SDHCI_SPACE_AVAILABLE) { 603 sdhci_write_block_pio(slot); 604 if (slot->offset >= slot->curcmd->data->len) 605 break; 606 } 607 } 608 } 609 610 static void 611 sdhci_card_task(void *arg, int pending __unused) 612 { 613 struct sdhci_slot *slot = arg; 614 device_t d; 615 616 SDHCI_LOCK(slot); 617 if (SDHCI_GET_CARD_PRESENT(slot->bus, slot)) { 618 #ifdef MMCCAM 619 if (slot->card_present == 0) { 620 #else 621 if (slot->dev == NULL) { 622 #endif 623 /* If card is present - attach mmc bus. */ 624 if (bootverbose || sdhci_debug) 625 slot_printf(slot, "Card inserted\n"); 626 #ifdef MMCCAM 627 slot->card_present = 1; 628 union ccb *ccb; 629 uint32_t pathid; 630 pathid = cam_sim_path(slot->sim); 631 ccb = xpt_alloc_ccb_nowait(); 632 if (ccb == NULL) { 633 slot_printf(slot, "Unable to alloc CCB for rescan\n"); 634 SDHCI_UNLOCK(slot); 635 return; 636 } 637 638 /* 639 * We create a rescan request for BUS:0:0, since the card 640 * will be at lun 0. 641 */ 642 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, 643 /* target */ 0, /* lun */ 0) != CAM_REQ_CMP) { 644 slot_printf(slot, "Unable to create path for rescan\n"); 645 SDHCI_UNLOCK(slot); 646 xpt_free_ccb(ccb); 647 return; 648 } 649 SDHCI_UNLOCK(slot); 650 xpt_rescan(ccb); 651 #else 652 d = slot->dev = device_add_child(slot->bus, "mmc", -1); 653 SDHCI_UNLOCK(slot); 654 if (d) { 655 device_set_ivars(d, slot); 656 (void)device_probe_and_attach(d); 657 } 658 #endif 659 } else 660 SDHCI_UNLOCK(slot); 661 } else { 662 #ifdef MMCCAM 663 if (slot->card_present == 1) { 664 #else 665 if (slot->dev != NULL) { 666 #endif 667 /* If no card present - detach mmc bus. */ 668 if (bootverbose || sdhci_debug) 669 slot_printf(slot, "Card removed\n"); 670 d = slot->dev; 671 slot->dev = NULL; 672 #ifdef MMCCAM 673 slot->card_present = 0; 674 union ccb *ccb; 675 uint32_t pathid; 676 pathid = cam_sim_path(slot->sim); 677 ccb = xpt_alloc_ccb_nowait(); 678 if (ccb == NULL) { 679 slot_printf(slot, "Unable to alloc CCB for rescan\n"); 680 SDHCI_UNLOCK(slot); 681 return; 682 } 683 684 /* 685 * We create a rescan request for BUS:0:0, since the card 686 * will be at lun 0. 687 */ 688 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, 689 /* target */ 0, /* lun */ 0) != CAM_REQ_CMP) { 690 slot_printf(slot, "Unable to create path for rescan\n"); 691 SDHCI_UNLOCK(slot); 692 xpt_free_ccb(ccb); 693 return; 694 } 695 SDHCI_UNLOCK(slot); 696 xpt_rescan(ccb); 697 #else 698 slot->intmask &= ~sdhci_tuning_intmask(slot); 699 WR4(slot, SDHCI_INT_ENABLE, slot->intmask); 700 WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); 701 slot->opt &= ~SDHCI_TUNING_ENABLED; 702 SDHCI_UNLOCK(slot); 703 callout_drain(&slot->retune_callout); 704 device_delete_child(slot->bus, d); 705 #endif 706 } else 707 SDHCI_UNLOCK(slot); 708 } 709 } 710 711 static void 712 sdhci_handle_card_present_locked(struct sdhci_slot *slot, bool is_present) 713 { 714 bool was_present; 715 716 /* 717 * If there was no card and now there is one, schedule the task to 718 * create the child device after a short delay. The delay is to 719 * debounce the card insert (sometimes the card detect pin stabilizes 720 * before the other pins have made good contact). 721 * 722 * If there was a card present and now it's gone, immediately schedule 723 * the task to delete the child device. No debouncing -- gone is gone, 724 * because once power is removed, a full card re-init is needed, and 725 * that happens by deleting and recreating the child device. 726 */ 727 #ifdef MMCCAM 728 was_present = slot->card_present; 729 #else 730 was_present = slot->dev != NULL; 731 #endif 732 if (!was_present && is_present) { 733 taskqueue_enqueue_timeout(taskqueue_swi_giant, 734 &slot->card_delayed_task, -SDHCI_INSERT_DELAY_TICKS); 735 } else if (was_present && !is_present) { 736 taskqueue_enqueue(taskqueue_swi_giant, &slot->card_task); 737 } 738 } 739 740 void 741 sdhci_handle_card_present(struct sdhci_slot *slot, bool is_present) 742 { 743 744 SDHCI_LOCK(slot); 745 sdhci_handle_card_present_locked(slot, is_present); 746 SDHCI_UNLOCK(slot); 747 } 748 749 static void 750 sdhci_card_poll(void *arg) 751 { 752 struct sdhci_slot *slot = arg; 753 754 sdhci_handle_card_present(slot, 755 SDHCI_GET_CARD_PRESENT(slot->bus, slot)); 756 callout_reset(&slot->card_poll_callout, SDHCI_CARD_PRESENT_TICKS, 757 sdhci_card_poll, slot); 758 } 759 760 static int 761 sdhci_dma_alloc(struct sdhci_slot *slot) 762 { 763 int err; 764 765 if (!(slot->quirks & SDHCI_QUIRK_BROKEN_SDMA_BOUNDARY)) { 766 if (MAXPHYS <= 1024 * 4) 767 slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_4K; 768 else if (MAXPHYS <= 1024 * 8) 769 slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_8K; 770 else if (MAXPHYS <= 1024 * 16) 771 slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_16K; 772 else if (MAXPHYS <= 1024 * 32) 773 slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_32K; 774 else if (MAXPHYS <= 1024 * 64) 775 slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_64K; 776 else if (MAXPHYS <= 1024 * 128) 777 slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_128K; 778 else if (MAXPHYS <= 1024 * 256) 779 slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_256K; 780 else 781 slot->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_512K; 782 } 783 slot->sdma_bbufsz = SDHCI_SDMA_BNDRY_TO_BBUFSZ(slot->sdma_boundary); 784 785 /* 786 * Allocate the DMA tag for an SDMA bounce buffer. 787 * Note that the SDHCI specification doesn't state any alignment 788 * constraint for the SDMA system address. However, controllers 789 * typically ignore the SDMA boundary bits in SDHCI_DMA_ADDRESS when 790 * forming the actual address of data, requiring the SDMA buffer to 791 * be aligned to the SDMA boundary. 792 */ 793 err = bus_dma_tag_create(bus_get_dma_tag(slot->bus), slot->sdma_bbufsz, 794 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 795 slot->sdma_bbufsz, 1, slot->sdma_bbufsz, BUS_DMA_ALLOCNOW, 796 NULL, NULL, &slot->dmatag); 797 if (err != 0) { 798 slot_printf(slot, "Can't create DMA tag for SDMA\n"); 799 return (err); 800 } 801 /* Allocate DMA memory for the SDMA bounce buffer. */ 802 err = bus_dmamem_alloc(slot->dmatag, (void **)&slot->dmamem, 803 BUS_DMA_NOWAIT, &slot->dmamap); 804 if (err != 0) { 805 slot_printf(slot, "Can't alloc DMA memory for SDMA\n"); 806 bus_dma_tag_destroy(slot->dmatag); 807 return (err); 808 } 809 /* Map the memory of the SDMA bounce buffer. */ 810 err = bus_dmamap_load(slot->dmatag, slot->dmamap, 811 (void *)slot->dmamem, slot->sdma_bbufsz, sdhci_getaddr, 812 &slot->paddr, 0); 813 if (err != 0 || slot->paddr == 0) { 814 slot_printf(slot, "Can't load DMA memory for SDMA\n"); 815 bus_dmamem_free(slot->dmatag, slot->dmamem, slot->dmamap); 816 bus_dma_tag_destroy(slot->dmatag); 817 if (err) 818 return (err); 819 else 820 return (EFAULT); 821 } 822 823 return (0); 824 } 825 826 static void 827 sdhci_dma_free(struct sdhci_slot *slot) 828 { 829 830 bus_dmamap_unload(slot->dmatag, slot->dmamap); 831 bus_dmamem_free(slot->dmatag, slot->dmamem, slot->dmamap); 832 bus_dma_tag_destroy(slot->dmatag); 833 } 834 835 int 836 sdhci_init_slot(device_t dev, struct sdhci_slot *slot, int num) 837 { 838 kobjop_desc_t kobj_desc; 839 kobj_method_t *kobj_method; 840 uint32_t caps, caps2, freq, host_caps; 841 int err; 842 843 SDHCI_LOCK_INIT(slot); 844 845 slot->num = num; 846 slot->bus = dev; 847 848 slot->version = (RD2(slot, SDHCI_HOST_VERSION) 849 >> SDHCI_SPEC_VER_SHIFT) & SDHCI_SPEC_VER_MASK; 850 if (slot->quirks & SDHCI_QUIRK_MISSING_CAPS) { 851 caps = slot->caps; 852 caps2 = slot->caps2; 853 } else { 854 caps = RD4(slot, SDHCI_CAPABILITIES); 855 if (slot->version >= SDHCI_SPEC_300) 856 caps2 = RD4(slot, SDHCI_CAPABILITIES2); 857 else 858 caps2 = 0; 859 } 860 if (slot->version >= SDHCI_SPEC_300) { 861 if ((caps & SDHCI_SLOTTYPE_MASK) != SDHCI_SLOTTYPE_REMOVABLE && 862 (caps & SDHCI_SLOTTYPE_MASK) != SDHCI_SLOTTYPE_EMBEDDED) { 863 slot_printf(slot, 864 "Driver doesn't support shared bus slots\n"); 865 SDHCI_LOCK_DESTROY(slot); 866 return (ENXIO); 867 } else if ((caps & SDHCI_SLOTTYPE_MASK) == 868 SDHCI_SLOTTYPE_EMBEDDED) { 869 slot->opt |= SDHCI_SLOT_EMBEDDED | SDHCI_NON_REMOVABLE; 870 } 871 } 872 /* Calculate base clock frequency. */ 873 if (slot->version >= SDHCI_SPEC_300) 874 freq = (caps & SDHCI_CLOCK_V3_BASE_MASK) >> 875 SDHCI_CLOCK_BASE_SHIFT; 876 else 877 freq = (caps & SDHCI_CLOCK_BASE_MASK) >> 878 SDHCI_CLOCK_BASE_SHIFT; 879 if (freq != 0) 880 slot->max_clk = freq * 1000000; 881 /* 882 * If the frequency wasn't in the capabilities and the hardware driver 883 * hasn't already set max_clk we're probably not going to work right 884 * with an assumption, so complain about it. 885 */ 886 if (slot->max_clk == 0) { 887 slot->max_clk = SDHCI_DEFAULT_MAX_FREQ * 1000000; 888 slot_printf(slot, "Hardware doesn't specify base clock " 889 "frequency, using %dMHz as default.\n", 890 SDHCI_DEFAULT_MAX_FREQ); 891 } 892 /* Calculate/set timeout clock frequency. */ 893 if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) { 894 slot->timeout_clk = slot->max_clk / 1000; 895 } else if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_1MHZ) { 896 slot->timeout_clk = 1000; 897 } else { 898 slot->timeout_clk = (caps & SDHCI_TIMEOUT_CLK_MASK) >> 899 SDHCI_TIMEOUT_CLK_SHIFT; 900 if (caps & SDHCI_TIMEOUT_CLK_UNIT) 901 slot->timeout_clk *= 1000; 902 } 903 /* 904 * If the frequency wasn't in the capabilities and the hardware driver 905 * hasn't already set timeout_clk we'll probably work okay using the 906 * max timeout, but still mention it. 907 */ 908 if (slot->timeout_clk == 0) { 909 slot_printf(slot, "Hardware doesn't specify timeout clock " 910 "frequency, setting BROKEN_TIMEOUT quirk.\n"); 911 slot->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; 912 } 913 914 slot->host.f_min = SDHCI_MIN_FREQ(slot->bus, slot); 915 slot->host.f_max = slot->max_clk; 916 slot->host.host_ocr = 0; 917 if (caps & SDHCI_CAN_VDD_330) 918 slot->host.host_ocr |= MMC_OCR_320_330 | MMC_OCR_330_340; 919 if (caps & SDHCI_CAN_VDD_300) 920 slot->host.host_ocr |= MMC_OCR_290_300 | MMC_OCR_300_310; 921 /* 922 * 1.8V VDD is not supposed to be used for removable cards. Hardware 923 * prior to v3.0 had no way to indicate embedded slots, but did 924 * sometimes support 1.8v for non-removable devices. 925 */ 926 if ((caps & SDHCI_CAN_VDD_180) && (slot->version < SDHCI_SPEC_300 || 927 (slot->opt & SDHCI_SLOT_EMBEDDED))) 928 slot->host.host_ocr |= MMC_OCR_LOW_VOLTAGE; 929 if (slot->host.host_ocr == 0) { 930 slot_printf(slot, "Hardware doesn't report any " 931 "support voltages.\n"); 932 } 933 934 host_caps = MMC_CAP_4_BIT_DATA; 935 if (caps & SDHCI_CAN_DO_8BITBUS) 936 host_caps |= MMC_CAP_8_BIT_DATA; 937 if (caps & SDHCI_CAN_DO_HISPD) 938 host_caps |= MMC_CAP_HSPEED; 939 if (slot->quirks & SDHCI_QUIRK_BOOT_NOACC) 940 host_caps |= MMC_CAP_BOOT_NOACC; 941 if (slot->quirks & SDHCI_QUIRK_WAIT_WHILE_BUSY) 942 host_caps |= MMC_CAP_WAIT_WHILE_BUSY; 943 944 /* Determine supported UHS-I and eMMC modes. */ 945 if (caps2 & (SDHCI_CAN_SDR50 | SDHCI_CAN_SDR104 | SDHCI_CAN_DDR50)) 946 host_caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; 947 if (caps2 & SDHCI_CAN_SDR104) { 948 host_caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; 949 if (!(slot->quirks & SDHCI_QUIRK_BROKEN_MMC_HS200)) 950 host_caps |= MMC_CAP_MMC_HS200; 951 } else if (caps2 & SDHCI_CAN_SDR50) 952 host_caps |= MMC_CAP_UHS_SDR50; 953 if (caps2 & SDHCI_CAN_DDR50 && 954 !(slot->quirks & SDHCI_QUIRK_BROKEN_UHS_DDR50)) 955 host_caps |= MMC_CAP_UHS_DDR50; 956 if (slot->quirks & SDHCI_QUIRK_MMC_DDR52) 957 host_caps |= MMC_CAP_MMC_DDR52; 958 if (slot->quirks & SDHCI_QUIRK_CAPS_BIT63_FOR_MMC_HS400 && 959 caps2 & SDHCI_CAN_MMC_HS400) 960 host_caps |= MMC_CAP_MMC_HS400; 961 if (slot->quirks & SDHCI_QUIRK_MMC_HS400_IF_CAN_SDR104 && 962 caps2 & SDHCI_CAN_SDR104) 963 host_caps |= MMC_CAP_MMC_HS400; 964 965 /* 966 * Disable UHS-I and eMMC modes if the set_uhs_timing method is the 967 * default NULL implementation. 968 */ 969 kobj_desc = &sdhci_set_uhs_timing_desc; 970 kobj_method = kobj_lookup_method(((kobj_t)dev)->ops->cls, NULL, 971 kobj_desc); 972 if (kobj_method == &kobj_desc->deflt) 973 host_caps &= ~(MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 974 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 | MMC_CAP_UHS_SDR104 | 975 MMC_CAP_MMC_DDR52 | MMC_CAP_MMC_HS200 | MMC_CAP_MMC_HS400); 976 977 #define SDHCI_CAP_MODES_TUNING(caps2) \ 978 (((caps2) & SDHCI_TUNE_SDR50 ? MMC_CAP_UHS_SDR50 : 0) | \ 979 MMC_CAP_UHS_DDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_MMC_HS200 | \ 980 MMC_CAP_MMC_HS400) 981 982 /* 983 * Disable UHS-I and eMMC modes that require (re-)tuning if either 984 * the tune or re-tune method is the default NULL implementation. 985 */ 986 kobj_desc = &mmcbr_tune_desc; 987 kobj_method = kobj_lookup_method(((kobj_t)dev)->ops->cls, NULL, 988 kobj_desc); 989 if (kobj_method == &kobj_desc->deflt) 990 goto no_tuning; 991 kobj_desc = &mmcbr_retune_desc; 992 kobj_method = kobj_lookup_method(((kobj_t)dev)->ops->cls, NULL, 993 kobj_desc); 994 if (kobj_method == &kobj_desc->deflt) { 995 no_tuning: 996 host_caps &= ~(SDHCI_CAP_MODES_TUNING(caps2)); 997 } 998 999 /* Allocate tuning structures and determine tuning parameters. */ 1000 if (host_caps & SDHCI_CAP_MODES_TUNING(caps2)) { 1001 slot->opt |= SDHCI_TUNING_SUPPORTED; 1002 slot->tune_req = malloc(sizeof(*slot->tune_req), M_DEVBUF, 1003 M_WAITOK); 1004 slot->tune_cmd = malloc(sizeof(*slot->tune_cmd), M_DEVBUF, 1005 M_WAITOK); 1006 slot->tune_data = malloc(sizeof(*slot->tune_data), M_DEVBUF, 1007 M_WAITOK); 1008 if (caps2 & SDHCI_TUNE_SDR50) 1009 slot->opt |= SDHCI_SDR50_NEEDS_TUNING; 1010 slot->retune_mode = (caps2 & SDHCI_RETUNE_MODES_MASK) >> 1011 SDHCI_RETUNE_MODES_SHIFT; 1012 if (slot->retune_mode == SDHCI_RETUNE_MODE_1) { 1013 slot->retune_count = (caps2 & SDHCI_RETUNE_CNT_MASK) >> 1014 SDHCI_RETUNE_CNT_SHIFT; 1015 if (slot->retune_count > 0xb) { 1016 slot_printf(slot, "Unknown re-tuning count " 1017 "%x, using 1 sec\n", slot->retune_count); 1018 slot->retune_count = 1; 1019 } else if (slot->retune_count != 0) 1020 slot->retune_count = 1021 1 << (slot->retune_count - 1); 1022 } 1023 } 1024 1025 #undef SDHCI_CAP_MODES_TUNING 1026 1027 /* Determine supported VCCQ signaling levels. */ 1028 host_caps |= MMC_CAP_SIGNALING_330; 1029 if (host_caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 1030 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 | MMC_CAP_UHS_SDR104 | 1031 MMC_CAP_MMC_DDR52_180 | MMC_CAP_MMC_HS200_180 | 1032 MMC_CAP_MMC_HS400_180)) 1033 host_caps |= MMC_CAP_SIGNALING_120 | MMC_CAP_SIGNALING_180; 1034 1035 /* 1036 * Disable 1.2 V and 1.8 V signaling if the switch_vccq method is the 1037 * default NULL implementation. Disable 1.2 V support if it's the 1038 * generic SDHCI implementation. 1039 */ 1040 kobj_desc = &mmcbr_switch_vccq_desc; 1041 kobj_method = kobj_lookup_method(((kobj_t)dev)->ops->cls, NULL, 1042 kobj_desc); 1043 if (kobj_method == &kobj_desc->deflt) 1044 host_caps &= ~(MMC_CAP_SIGNALING_120 | MMC_CAP_SIGNALING_180); 1045 else if (kobj_method->func == (kobjop_t)sdhci_generic_switch_vccq) 1046 host_caps &= ~MMC_CAP_SIGNALING_120; 1047 1048 /* Determine supported driver types (type B is always mandatory). */ 1049 if (caps2 & SDHCI_CAN_DRIVE_TYPE_A) 1050 host_caps |= MMC_CAP_DRIVER_TYPE_A; 1051 if (caps2 & SDHCI_CAN_DRIVE_TYPE_C) 1052 host_caps |= MMC_CAP_DRIVER_TYPE_C; 1053 if (caps2 & SDHCI_CAN_DRIVE_TYPE_D) 1054 host_caps |= MMC_CAP_DRIVER_TYPE_D; 1055 slot->host.caps = host_caps; 1056 1057 /* Decide if we have usable DMA. */ 1058 if (caps & SDHCI_CAN_DO_DMA) 1059 slot->opt |= SDHCI_HAVE_DMA; 1060 1061 if (slot->quirks & SDHCI_QUIRK_BROKEN_DMA) 1062 slot->opt &= ~SDHCI_HAVE_DMA; 1063 if (slot->quirks & SDHCI_QUIRK_FORCE_DMA) 1064 slot->opt |= SDHCI_HAVE_DMA; 1065 if (slot->quirks & SDHCI_QUIRK_ALL_SLOTS_NON_REMOVABLE) 1066 slot->opt |= SDHCI_NON_REMOVABLE; 1067 1068 /* 1069 * Use platform-provided transfer backend 1070 * with PIO as a fallback mechanism 1071 */ 1072 if (slot->opt & SDHCI_PLATFORM_TRANSFER) 1073 slot->opt &= ~SDHCI_HAVE_DMA; 1074 1075 if (slot->opt & SDHCI_HAVE_DMA) { 1076 err = sdhci_dma_alloc(slot); 1077 if (err != 0) { 1078 if (slot->opt & SDHCI_TUNING_SUPPORTED) { 1079 free(slot->tune_req, M_DEVBUF); 1080 free(slot->tune_cmd, M_DEVBUF); 1081 free(slot->tune_data, M_DEVBUF); 1082 } 1083 SDHCI_LOCK_DESTROY(slot); 1084 return (err); 1085 } 1086 } 1087 1088 if (bootverbose || sdhci_debug) { 1089 slot_printf(slot, 1090 "%uMHz%s %s VDD:%s%s%s VCCQ: 3.3V%s%s DRV: B%s%s%s %s %s\n", 1091 slot->max_clk / 1000000, 1092 (caps & SDHCI_CAN_DO_HISPD) ? " HS" : "", 1093 (host_caps & MMC_CAP_8_BIT_DATA) ? "8bits" : 1094 ((host_caps & MMC_CAP_4_BIT_DATA) ? "4bits" : "1bit"), 1095 (caps & SDHCI_CAN_VDD_330) ? " 3.3V" : "", 1096 (caps & SDHCI_CAN_VDD_300) ? " 3.0V" : "", 1097 ((caps & SDHCI_CAN_VDD_180) && 1098 (slot->opt & SDHCI_SLOT_EMBEDDED)) ? " 1.8V" : "", 1099 (host_caps & MMC_CAP_SIGNALING_180) ? " 1.8V" : "", 1100 (host_caps & MMC_CAP_SIGNALING_120) ? " 1.2V" : "", 1101 (host_caps & MMC_CAP_DRIVER_TYPE_A) ? "A" : "", 1102 (host_caps & MMC_CAP_DRIVER_TYPE_C) ? "C" : "", 1103 (host_caps & MMC_CAP_DRIVER_TYPE_D) ? "D" : "", 1104 (slot->opt & SDHCI_HAVE_DMA) ? "DMA" : "PIO", 1105 (slot->opt & SDHCI_SLOT_EMBEDDED) ? "embedded" : 1106 (slot->opt & SDHCI_NON_REMOVABLE) ? "non-removable" : 1107 "removable"); 1108 if (host_caps & (MMC_CAP_MMC_DDR52 | MMC_CAP_MMC_HS200 | 1109 MMC_CAP_MMC_HS400 | MMC_CAP_MMC_ENH_STROBE)) 1110 slot_printf(slot, "eMMC:%s%s%s%s\n", 1111 (host_caps & MMC_CAP_MMC_DDR52) ? " DDR52" : "", 1112 (host_caps & MMC_CAP_MMC_HS200) ? " HS200" : "", 1113 (host_caps & MMC_CAP_MMC_HS400) ? " HS400" : "", 1114 ((host_caps & 1115 (MMC_CAP_MMC_HS400 | MMC_CAP_MMC_ENH_STROBE)) == 1116 (MMC_CAP_MMC_HS400 | MMC_CAP_MMC_ENH_STROBE)) ? 1117 " HS400ES" : ""); 1118 if (host_caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 1119 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104)) 1120 slot_printf(slot, "UHS-I:%s%s%s%s%s\n", 1121 (host_caps & MMC_CAP_UHS_SDR12) ? " SDR12" : "", 1122 (host_caps & MMC_CAP_UHS_SDR25) ? " SDR25" : "", 1123 (host_caps & MMC_CAP_UHS_SDR50) ? " SDR50" : "", 1124 (host_caps & MMC_CAP_UHS_SDR104) ? " SDR104" : "", 1125 (host_caps & MMC_CAP_UHS_DDR50) ? " DDR50" : ""); 1126 if (slot->opt & SDHCI_TUNING_SUPPORTED) 1127 slot_printf(slot, "Re-tuning count %d secs, mode %d\n", 1128 slot->retune_count, slot->retune_mode + 1); 1129 sdhci_dumpregs(slot); 1130 } 1131 1132 slot->timeout = 10; 1133 SYSCTL_ADD_INT(device_get_sysctl_ctx(slot->bus), 1134 SYSCTL_CHILDREN(device_get_sysctl_tree(slot->bus)), OID_AUTO, 1135 "timeout", CTLFLAG_RWTUN, &slot->timeout, 0, 1136 "Maximum timeout for SDHCI transfers (in secs)"); 1137 TASK_INIT(&slot->card_task, 0, sdhci_card_task, slot); 1138 TIMEOUT_TASK_INIT(taskqueue_swi_giant, &slot->card_delayed_task, 0, 1139 sdhci_card_task, slot); 1140 callout_init(&slot->card_poll_callout, 1); 1141 callout_init_mtx(&slot->timeout_callout, &slot->mtx, 0); 1142 callout_init_mtx(&slot->retune_callout, &slot->mtx, 0); 1143 1144 if ((slot->quirks & SDHCI_QUIRK_POLL_CARD_PRESENT) && 1145 !(slot->opt & SDHCI_NON_REMOVABLE)) { 1146 callout_reset(&slot->card_poll_callout, 1147 SDHCI_CARD_PRESENT_TICKS, sdhci_card_poll, slot); 1148 } 1149 1150 sdhci_init(slot); 1151 1152 return (0); 1153 } 1154 1155 #ifndef MMCCAM 1156 void 1157 sdhci_start_slot(struct sdhci_slot *slot) 1158 { 1159 1160 sdhci_card_task(slot, 0); 1161 } 1162 #endif 1163 1164 int 1165 sdhci_cleanup_slot(struct sdhci_slot *slot) 1166 { 1167 device_t d; 1168 1169 callout_drain(&slot->timeout_callout); 1170 callout_drain(&slot->card_poll_callout); 1171 callout_drain(&slot->retune_callout); 1172 taskqueue_drain(taskqueue_swi_giant, &slot->card_task); 1173 taskqueue_drain_timeout(taskqueue_swi_giant, &slot->card_delayed_task); 1174 1175 SDHCI_LOCK(slot); 1176 d = slot->dev; 1177 slot->dev = NULL; 1178 SDHCI_UNLOCK(slot); 1179 if (d != NULL) 1180 device_delete_child(slot->bus, d); 1181 1182 SDHCI_LOCK(slot); 1183 sdhci_reset(slot, SDHCI_RESET_ALL); 1184 SDHCI_UNLOCK(slot); 1185 if (slot->opt & SDHCI_HAVE_DMA) 1186 sdhci_dma_free(slot); 1187 if (slot->opt & SDHCI_TUNING_SUPPORTED) { 1188 free(slot->tune_req, M_DEVBUF); 1189 free(slot->tune_cmd, M_DEVBUF); 1190 free(slot->tune_data, M_DEVBUF); 1191 } 1192 1193 SDHCI_LOCK_DESTROY(slot); 1194 1195 return (0); 1196 } 1197 1198 int 1199 sdhci_generic_suspend(struct sdhci_slot *slot) 1200 { 1201 1202 /* 1203 * We expect the MMC layer to issue initial tuning after resume. 1204 * Otherwise, we'd need to indicate re-tuning including circuit reset 1205 * being required at least for re-tuning modes 1 and 2 ourselves. 1206 */ 1207 callout_drain(&slot->retune_callout); 1208 SDHCI_LOCK(slot); 1209 slot->opt &= ~SDHCI_TUNING_ENABLED; 1210 sdhci_reset(slot, SDHCI_RESET_ALL); 1211 SDHCI_UNLOCK(slot); 1212 1213 return (0); 1214 } 1215 1216 int 1217 sdhci_generic_resume(struct sdhci_slot *slot) 1218 { 1219 1220 SDHCI_LOCK(slot); 1221 sdhci_init(slot); 1222 SDHCI_UNLOCK(slot); 1223 1224 return (0); 1225 } 1226 1227 uint32_t 1228 sdhci_generic_min_freq(device_t brdev __unused, struct sdhci_slot *slot) 1229 { 1230 1231 if (slot->version >= SDHCI_SPEC_300) 1232 return (slot->max_clk / SDHCI_300_MAX_DIVIDER); 1233 else 1234 return (slot->max_clk / SDHCI_200_MAX_DIVIDER); 1235 } 1236 1237 bool 1238 sdhci_generic_get_card_present(device_t brdev __unused, struct sdhci_slot *slot) 1239 { 1240 1241 if (slot->opt & SDHCI_NON_REMOVABLE) 1242 return true; 1243 1244 return (RD4(slot, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 1245 } 1246 1247 void 1248 sdhci_generic_set_uhs_timing(device_t brdev __unused, struct sdhci_slot *slot) 1249 { 1250 const struct mmc_ios *ios; 1251 uint16_t hostctrl2; 1252 1253 if (slot->version < SDHCI_SPEC_300) 1254 return; 1255 1256 SDHCI_ASSERT_LOCKED(slot); 1257 ios = &slot->host.ios; 1258 sdhci_set_clock(slot, 0); 1259 hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2); 1260 hostctrl2 &= ~SDHCI_CTRL2_UHS_MASK; 1261 if (ios->clock > SD_SDR50_MAX) { 1262 if (ios->timing == bus_timing_mmc_hs400 || 1263 ios->timing == bus_timing_mmc_hs400es) 1264 hostctrl2 |= SDHCI_CTRL2_MMC_HS400; 1265 else 1266 hostctrl2 |= SDHCI_CTRL2_UHS_SDR104; 1267 } 1268 else if (ios->clock > SD_SDR25_MAX) 1269 hostctrl2 |= SDHCI_CTRL2_UHS_SDR50; 1270 else if (ios->clock > SD_SDR12_MAX) { 1271 if (ios->timing == bus_timing_uhs_ddr50 || 1272 ios->timing == bus_timing_mmc_ddr52) 1273 hostctrl2 |= SDHCI_CTRL2_UHS_DDR50; 1274 else 1275 hostctrl2 |= SDHCI_CTRL2_UHS_SDR25; 1276 } else if (ios->clock > SD_MMC_CARD_ID_FREQUENCY) 1277 hostctrl2 |= SDHCI_CTRL2_UHS_SDR12; 1278 WR2(slot, SDHCI_HOST_CONTROL2, hostctrl2); 1279 sdhci_set_clock(slot, ios->clock); 1280 } 1281 1282 int 1283 sdhci_generic_update_ios(device_t brdev, device_t reqdev) 1284 { 1285 struct sdhci_slot *slot = device_get_ivars(reqdev); 1286 struct mmc_ios *ios = &slot->host.ios; 1287 1288 SDHCI_LOCK(slot); 1289 /* Do full reset on bus power down to clear from any state. */ 1290 if (ios->power_mode == power_off) { 1291 WR4(slot, SDHCI_SIGNAL_ENABLE, 0); 1292 sdhci_init(slot); 1293 } 1294 /* Configure the bus. */ 1295 sdhci_set_clock(slot, ios->clock); 1296 sdhci_set_power(slot, (ios->power_mode == power_off) ? 0 : ios->vdd); 1297 if (ios->bus_width == bus_width_8) { 1298 slot->hostctrl |= SDHCI_CTRL_8BITBUS; 1299 slot->hostctrl &= ~SDHCI_CTRL_4BITBUS; 1300 } else if (ios->bus_width == bus_width_4) { 1301 slot->hostctrl &= ~SDHCI_CTRL_8BITBUS; 1302 slot->hostctrl |= SDHCI_CTRL_4BITBUS; 1303 } else if (ios->bus_width == bus_width_1) { 1304 slot->hostctrl &= ~SDHCI_CTRL_8BITBUS; 1305 slot->hostctrl &= ~SDHCI_CTRL_4BITBUS; 1306 } else { 1307 panic("Invalid bus width: %d", ios->bus_width); 1308 } 1309 if (ios->clock > SD_SDR12_MAX && 1310 !(slot->quirks & SDHCI_QUIRK_DONT_SET_HISPD_BIT)) 1311 slot->hostctrl |= SDHCI_CTRL_HISPD; 1312 else 1313 slot->hostctrl &= ~SDHCI_CTRL_HISPD; 1314 WR1(slot, SDHCI_HOST_CONTROL, slot->hostctrl); 1315 SDHCI_SET_UHS_TIMING(brdev, slot); 1316 /* Some controllers like reset after bus changes. */ 1317 if (slot->quirks & SDHCI_QUIRK_RESET_ON_IOS) 1318 sdhci_reset(slot, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1319 1320 SDHCI_UNLOCK(slot); 1321 return (0); 1322 } 1323 1324 int 1325 sdhci_generic_switch_vccq(device_t brdev __unused, device_t reqdev) 1326 { 1327 struct sdhci_slot *slot = device_get_ivars(reqdev); 1328 enum mmc_vccq vccq; 1329 int err; 1330 uint16_t hostctrl2; 1331 1332 if (slot->version < SDHCI_SPEC_300) 1333 return (0); 1334 1335 err = 0; 1336 vccq = slot->host.ios.vccq; 1337 SDHCI_LOCK(slot); 1338 sdhci_set_clock(slot, 0); 1339 hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2); 1340 switch (vccq) { 1341 case vccq_330: 1342 if (!(hostctrl2 & SDHCI_CTRL2_S18_ENABLE)) 1343 goto done; 1344 hostctrl2 &= ~SDHCI_CTRL2_S18_ENABLE; 1345 WR2(slot, SDHCI_HOST_CONTROL2, hostctrl2); 1346 DELAY(5000); 1347 hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2); 1348 if (!(hostctrl2 & SDHCI_CTRL2_S18_ENABLE)) 1349 goto done; 1350 err = EAGAIN; 1351 break; 1352 case vccq_180: 1353 if (!(slot->host.caps & MMC_CAP_SIGNALING_180)) { 1354 err = EINVAL; 1355 goto done; 1356 } 1357 if (hostctrl2 & SDHCI_CTRL2_S18_ENABLE) 1358 goto done; 1359 hostctrl2 |= SDHCI_CTRL2_S18_ENABLE; 1360 WR2(slot, SDHCI_HOST_CONTROL2, hostctrl2); 1361 DELAY(5000); 1362 hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2); 1363 if (hostctrl2 & SDHCI_CTRL2_S18_ENABLE) 1364 goto done; 1365 err = EAGAIN; 1366 break; 1367 default: 1368 slot_printf(slot, 1369 "Attempt to set unsupported signaling voltage\n"); 1370 err = EINVAL; 1371 break; 1372 } 1373 done: 1374 sdhci_set_clock(slot, slot->host.ios.clock); 1375 SDHCI_UNLOCK(slot); 1376 return (err); 1377 } 1378 1379 int 1380 sdhci_generic_tune(device_t brdev __unused, device_t reqdev, bool hs400) 1381 { 1382 struct sdhci_slot *slot = device_get_ivars(reqdev); 1383 const struct mmc_ios *ios = &slot->host.ios; 1384 struct mmc_command *tune_cmd; 1385 struct mmc_data *tune_data; 1386 uint32_t opcode; 1387 int err; 1388 1389 if (!(slot->opt & SDHCI_TUNING_SUPPORTED)) 1390 return (0); 1391 1392 slot->retune_ticks = slot->retune_count * hz; 1393 opcode = MMC_SEND_TUNING_BLOCK; 1394 SDHCI_LOCK(slot); 1395 switch (ios->timing) { 1396 case bus_timing_mmc_hs400: 1397 slot_printf(slot, "HS400 must be tuned in HS200 mode\n"); 1398 SDHCI_UNLOCK(slot); 1399 return (EINVAL); 1400 case bus_timing_mmc_hs200: 1401 /* 1402 * In HS400 mode, controllers use the data strobe line to 1403 * latch data from the devices so periodic re-tuning isn't 1404 * expected to be required. 1405 */ 1406 if (hs400) 1407 slot->retune_ticks = 0; 1408 opcode = MMC_SEND_TUNING_BLOCK_HS200; 1409 break; 1410 case bus_timing_uhs_ddr50: 1411 case bus_timing_uhs_sdr104: 1412 break; 1413 case bus_timing_uhs_sdr50: 1414 if (slot->opt & SDHCI_SDR50_NEEDS_TUNING) 1415 break; 1416 /* FALLTHROUGH */ 1417 default: 1418 SDHCI_UNLOCK(slot); 1419 return (0); 1420 } 1421 1422 tune_cmd = slot->tune_cmd; 1423 memset(tune_cmd, 0, sizeof(*tune_cmd)); 1424 tune_cmd->opcode = opcode; 1425 tune_cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; 1426 tune_data = tune_cmd->data = slot->tune_data; 1427 memset(tune_data, 0, sizeof(*tune_data)); 1428 tune_data->len = (opcode == MMC_SEND_TUNING_BLOCK_HS200 && 1429 ios->bus_width == bus_width_8) ? MMC_TUNING_LEN_HS200 : 1430 MMC_TUNING_LEN; 1431 tune_data->flags = MMC_DATA_READ; 1432 tune_data->mrq = tune_cmd->mrq = slot->tune_req; 1433 1434 slot->opt &= ~SDHCI_TUNING_ENABLED; 1435 err = sdhci_exec_tuning(slot, true); 1436 if (err == 0) { 1437 slot->opt |= SDHCI_TUNING_ENABLED; 1438 slot->intmask |= sdhci_tuning_intmask(slot); 1439 WR4(slot, SDHCI_INT_ENABLE, slot->intmask); 1440 WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); 1441 if (slot->retune_ticks) { 1442 callout_reset(&slot->retune_callout, slot->retune_ticks, 1443 sdhci_retune, slot); 1444 } 1445 } 1446 SDHCI_UNLOCK(slot); 1447 return (err); 1448 } 1449 1450 int 1451 sdhci_generic_retune(device_t brdev __unused, device_t reqdev, bool reset) 1452 { 1453 struct sdhci_slot *slot = device_get_ivars(reqdev); 1454 int err; 1455 1456 if (!(slot->opt & SDHCI_TUNING_ENABLED)) 1457 return (0); 1458 1459 /* HS400 must be tuned in HS200 mode. */ 1460 if (slot->host.ios.timing == bus_timing_mmc_hs400) 1461 return (EINVAL); 1462 1463 SDHCI_LOCK(slot); 1464 err = sdhci_exec_tuning(slot, reset); 1465 /* 1466 * There are two ways sdhci_exec_tuning() can fail: 1467 * EBUSY should not actually happen when requests are only issued 1468 * with the host properly acquired, and 1469 * EIO re-tuning failed (but it did work initially). 1470 * 1471 * In both cases, we should retry at later point if periodic re-tuning 1472 * is enabled. Note that due to slot->retune_req not being cleared in 1473 * these failure cases, the MMC layer should trigger another attempt at 1474 * re-tuning with the next request anyway, though. 1475 */ 1476 if (slot->retune_ticks) { 1477 callout_reset(&slot->retune_callout, slot->retune_ticks, 1478 sdhci_retune, slot); 1479 } 1480 SDHCI_UNLOCK(slot); 1481 return (err); 1482 } 1483 1484 static int 1485 sdhci_exec_tuning(struct sdhci_slot *slot, bool reset) 1486 { 1487 struct mmc_request *tune_req; 1488 struct mmc_command *tune_cmd; 1489 int i; 1490 uint32_t intmask; 1491 uint16_t hostctrl2; 1492 u_char opt; 1493 1494 SDHCI_ASSERT_LOCKED(slot); 1495 if (slot->req != NULL) 1496 return (EBUSY); 1497 1498 /* Tuning doesn't work with DMA enabled. */ 1499 opt = slot->opt; 1500 slot->opt = opt & ~SDHCI_HAVE_DMA; 1501 1502 /* 1503 * Ensure that as documented, SDHCI_INT_DATA_AVAIL is the only 1504 * kind of interrupt we receive in response to a tuning request. 1505 */ 1506 intmask = slot->intmask; 1507 slot->intmask = SDHCI_INT_DATA_AVAIL; 1508 WR4(slot, SDHCI_INT_ENABLE, SDHCI_INT_DATA_AVAIL); 1509 WR4(slot, SDHCI_SIGNAL_ENABLE, SDHCI_INT_DATA_AVAIL); 1510 1511 hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2); 1512 if (reset) 1513 hostctrl2 &= ~SDHCI_CTRL2_SAMPLING_CLOCK; 1514 else 1515 hostctrl2 |= SDHCI_CTRL2_SAMPLING_CLOCK; 1516 WR2(slot, SDHCI_HOST_CONTROL2, hostctrl2 | SDHCI_CTRL2_EXEC_TUNING); 1517 1518 tune_req = slot->tune_req; 1519 tune_cmd = slot->tune_cmd; 1520 for (i = 0; i < MMC_TUNING_MAX; i++) { 1521 memset(tune_req, 0, sizeof(*tune_req)); 1522 tune_req->cmd = tune_cmd; 1523 tune_req->done = sdhci_req_wakeup; 1524 tune_req->done_data = slot; 1525 slot->req = tune_req; 1526 slot->flags = 0; 1527 sdhci_start(slot); 1528 while (!(tune_req->flags & MMC_REQ_DONE)) 1529 msleep(tune_req, &slot->mtx, 0, "sdhciet", 0); 1530 if (!(tune_req->flags & MMC_TUNE_DONE)) 1531 break; 1532 hostctrl2 = RD2(slot, SDHCI_HOST_CONTROL2); 1533 if (!(hostctrl2 & SDHCI_CTRL2_EXEC_TUNING)) 1534 break; 1535 if (tune_cmd->opcode == MMC_SEND_TUNING_BLOCK) 1536 DELAY(1000); 1537 } 1538 1539 /* 1540 * Restore DMA usage and interrupts. 1541 * Note that the interrupt aggregation code might have cleared 1542 * SDHCI_INT_DMA_END and/or SDHCI_INT_RESPONSE in slot->intmask 1543 * and SDHCI_SIGNAL_ENABLE respectively so ensure SDHCI_INT_ENABLE 1544 * doesn't lose these. 1545 */ 1546 slot->opt = opt; 1547 slot->intmask = intmask; 1548 WR4(slot, SDHCI_INT_ENABLE, intmask | SDHCI_INT_DMA_END | 1549 SDHCI_INT_RESPONSE); 1550 WR4(slot, SDHCI_SIGNAL_ENABLE, intmask); 1551 1552 if ((hostctrl2 & (SDHCI_CTRL2_EXEC_TUNING | 1553 SDHCI_CTRL2_SAMPLING_CLOCK)) == SDHCI_CTRL2_SAMPLING_CLOCK) { 1554 slot->retune_req = 0; 1555 return (0); 1556 } 1557 1558 slot_printf(slot, "Tuning failed, using fixed sampling clock\n"); 1559 WR2(slot, SDHCI_HOST_CONTROL2, hostctrl2 & ~(SDHCI_CTRL2_EXEC_TUNING | 1560 SDHCI_CTRL2_SAMPLING_CLOCK)); 1561 sdhci_reset(slot, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1562 return (EIO); 1563 } 1564 1565 static void 1566 sdhci_retune(void *arg) 1567 { 1568 struct sdhci_slot *slot = arg; 1569 1570 slot->retune_req |= SDHCI_RETUNE_REQ_NEEDED; 1571 } 1572 1573 #ifdef MMCCAM 1574 static void 1575 sdhci_req_done(struct sdhci_slot *slot) 1576 { 1577 union ccb *ccb; 1578 1579 if (__predict_false(sdhci_debug > 1)) 1580 slot_printf(slot, "%s\n", __func__); 1581 if (slot->ccb != NULL && slot->curcmd != NULL) { 1582 callout_stop(&slot->timeout_callout); 1583 ccb = slot->ccb; 1584 slot->ccb = NULL; 1585 slot->curcmd = NULL; 1586 1587 /* Tell CAM the request is finished */ 1588 struct ccb_mmcio *mmcio; 1589 mmcio = &ccb->mmcio; 1590 1591 ccb->ccb_h.status = 1592 (mmcio->cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR); 1593 xpt_done(ccb); 1594 } 1595 } 1596 #else 1597 static void 1598 sdhci_req_done(struct sdhci_slot *slot) 1599 { 1600 struct mmc_request *req; 1601 1602 if (slot->req != NULL && slot->curcmd != NULL) { 1603 callout_stop(&slot->timeout_callout); 1604 req = slot->req; 1605 slot->req = NULL; 1606 slot->curcmd = NULL; 1607 req->done(req); 1608 } 1609 } 1610 #endif 1611 1612 static void 1613 sdhci_req_wakeup(struct mmc_request *req) 1614 { 1615 struct sdhci_slot *slot; 1616 1617 slot = req->done_data; 1618 req->flags |= MMC_REQ_DONE; 1619 wakeup(req); 1620 } 1621 1622 static void 1623 sdhci_timeout(void *arg) 1624 { 1625 struct sdhci_slot *slot = arg; 1626 1627 if (slot->curcmd != NULL) { 1628 slot_printf(slot, "Controller timeout\n"); 1629 sdhci_dumpregs(slot); 1630 sdhci_reset(slot, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 1631 slot->curcmd->error = MMC_ERR_TIMEOUT; 1632 sdhci_req_done(slot); 1633 } else { 1634 slot_printf(slot, "Spurious timeout - no active command\n"); 1635 } 1636 } 1637 1638 static void 1639 sdhci_set_transfer_mode(struct sdhci_slot *slot, const struct mmc_data *data) 1640 { 1641 uint16_t mode; 1642 1643 if (data == NULL) 1644 return; 1645 1646 mode = SDHCI_TRNS_BLK_CNT_EN; 1647 if (data->len > 512 || data->block_count > 1) { 1648 mode |= SDHCI_TRNS_MULTI; 1649 if (data->block_count == 0 && __predict_true( 1650 #ifdef MMCCAM 1651 slot->ccb->mmcio.stop.opcode == MMC_STOP_TRANSMISSION && 1652 #else 1653 slot->req->stop != NULL && 1654 #endif 1655 !(slot->quirks & SDHCI_QUIRK_BROKEN_AUTO_STOP))) 1656 mode |= SDHCI_TRNS_ACMD12; 1657 } 1658 if (data->flags & MMC_DATA_READ) 1659 mode |= SDHCI_TRNS_READ; 1660 if (slot->flags & SDHCI_USE_DMA) 1661 mode |= SDHCI_TRNS_DMA; 1662 1663 WR2(slot, SDHCI_TRANSFER_MODE, mode); 1664 } 1665 1666 static void 1667 sdhci_start_command(struct sdhci_slot *slot, struct mmc_command *cmd) 1668 { 1669 int flags, timeout; 1670 uint32_t mask; 1671 1672 slot->curcmd = cmd; 1673 slot->cmd_done = 0; 1674 1675 cmd->error = MMC_ERR_NONE; 1676 1677 /* This flags combination is not supported by controller. */ 1678 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 1679 slot_printf(slot, "Unsupported response type!\n"); 1680 cmd->error = MMC_ERR_FAILED; 1681 sdhci_req_done(slot); 1682 return; 1683 } 1684 1685 /* 1686 * Do not issue command if there is no card, clock or power. 1687 * Controller will not detect timeout without clock active. 1688 */ 1689 if (!SDHCI_GET_CARD_PRESENT(slot->bus, slot) || 1690 slot->power == 0 || 1691 slot->clock == 0) { 1692 slot_printf(slot, 1693 "Cannot issue a command (power=%d clock=%d)", 1694 slot->power, slot->clock); 1695 cmd->error = MMC_ERR_FAILED; 1696 sdhci_req_done(slot); 1697 return; 1698 } 1699 /* Always wait for free CMD bus. */ 1700 mask = SDHCI_CMD_INHIBIT; 1701 /* Wait for free DAT if we have data or busy signal. */ 1702 if (cmd->data != NULL || (cmd->flags & MMC_RSP_BUSY)) 1703 mask |= SDHCI_DAT_INHIBIT; 1704 /* 1705 * We shouldn't wait for DAT for stop commands or CMD19/CMD21. Note 1706 * that these latter are also special in that SDHCI_CMD_DATA should 1707 * be set below but no actual data is ever read from the controller. 1708 */ 1709 #ifdef MMCCAM 1710 if (cmd == &slot->ccb->mmcio.stop || 1711 #else 1712 if (cmd == slot->req->stop || 1713 #endif 1714 __predict_false(cmd->opcode == MMC_SEND_TUNING_BLOCK || 1715 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)) 1716 mask &= ~SDHCI_DAT_INHIBIT; 1717 /* 1718 * Wait for bus no more then 250 ms. Typically there will be no wait 1719 * here at all, but when writing a crash dump we may be bypassing the 1720 * host platform's interrupt handler, and in some cases that handler 1721 * may be working around hardware quirks such as not respecting r1b 1722 * busy indications. In those cases, this wait-loop serves the purpose 1723 * of waiting for the prior command and data transfers to be done, and 1724 * SD cards are allowed to take up to 250ms for write and erase ops. 1725 * (It's usually more like 20-30ms in the real world.) 1726 */ 1727 timeout = 250; 1728 while (mask & RD4(slot, SDHCI_PRESENT_STATE)) { 1729 if (timeout == 0) { 1730 slot_printf(slot, "Controller never released " 1731 "inhibit bit(s).\n"); 1732 sdhci_dumpregs(slot); 1733 cmd->error = MMC_ERR_FAILED; 1734 sdhci_req_done(slot); 1735 return; 1736 } 1737 timeout--; 1738 DELAY(1000); 1739 } 1740 1741 /* Prepare command flags. */ 1742 if (!(cmd->flags & MMC_RSP_PRESENT)) 1743 flags = SDHCI_CMD_RESP_NONE; 1744 else if (cmd->flags & MMC_RSP_136) 1745 flags = SDHCI_CMD_RESP_LONG; 1746 else if (cmd->flags & MMC_RSP_BUSY) 1747 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1748 else 1749 flags = SDHCI_CMD_RESP_SHORT; 1750 if (cmd->flags & MMC_RSP_CRC) 1751 flags |= SDHCI_CMD_CRC; 1752 if (cmd->flags & MMC_RSP_OPCODE) 1753 flags |= SDHCI_CMD_INDEX; 1754 if (cmd->data != NULL) 1755 flags |= SDHCI_CMD_DATA; 1756 if (cmd->opcode == MMC_STOP_TRANSMISSION) 1757 flags |= SDHCI_CMD_TYPE_ABORT; 1758 /* Prepare data. */ 1759 sdhci_start_data(slot, cmd->data); 1760 /* 1761 * Interrupt aggregation: To reduce total number of interrupts 1762 * group response interrupt with data interrupt when possible. 1763 * If there going to be data interrupt, mask response one. 1764 */ 1765 if (slot->data_done == 0) { 1766 WR4(slot, SDHCI_SIGNAL_ENABLE, 1767 slot->intmask &= ~SDHCI_INT_RESPONSE); 1768 } 1769 /* Set command argument. */ 1770 WR4(slot, SDHCI_ARGUMENT, cmd->arg); 1771 /* Set data transfer mode. */ 1772 sdhci_set_transfer_mode(slot, cmd->data); 1773 if (__predict_false(sdhci_debug > 1)) 1774 slot_printf(slot, "Starting command opcode %#04x flags %#04x\n", 1775 cmd->opcode, flags); 1776 1777 /* Start command. */ 1778 WR2(slot, SDHCI_COMMAND_FLAGS, (cmd->opcode << 8) | (flags & 0xff)); 1779 /* Start timeout callout. */ 1780 callout_reset(&slot->timeout_callout, slot->timeout * hz, 1781 sdhci_timeout, slot); 1782 } 1783 1784 static void 1785 sdhci_finish_command(struct sdhci_slot *slot) 1786 { 1787 int i; 1788 uint32_t val; 1789 uint8_t extra; 1790 1791 if (__predict_false(sdhci_debug > 1)) 1792 slot_printf(slot, "%s: called, err %d flags %#04x\n", 1793 __func__, slot->curcmd->error, slot->curcmd->flags); 1794 slot->cmd_done = 1; 1795 /* 1796 * Interrupt aggregation: Restore command interrupt. 1797 * Main restore point for the case when command interrupt 1798 * happened first. 1799 */ 1800 if (__predict_true(slot->curcmd->opcode != MMC_SEND_TUNING_BLOCK && 1801 slot->curcmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)) 1802 WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask |= 1803 SDHCI_INT_RESPONSE); 1804 /* In case of error - reset host and return. */ 1805 if (slot->curcmd->error) { 1806 if (slot->curcmd->error == MMC_ERR_BADCRC) 1807 slot->retune_req |= SDHCI_RETUNE_REQ_RESET; 1808 sdhci_reset(slot, SDHCI_RESET_CMD); 1809 sdhci_reset(slot, SDHCI_RESET_DATA); 1810 sdhci_start(slot); 1811 return; 1812 } 1813 /* If command has response - fetch it. */ 1814 if (slot->curcmd->flags & MMC_RSP_PRESENT) { 1815 if (slot->curcmd->flags & MMC_RSP_136) { 1816 /* CRC is stripped so we need one byte shift. */ 1817 extra = 0; 1818 for (i = 0; i < 4; i++) { 1819 val = RD4(slot, SDHCI_RESPONSE + i * 4); 1820 if (slot->quirks & 1821 SDHCI_QUIRK_DONT_SHIFT_RESPONSE) 1822 slot->curcmd->resp[3 - i] = val; 1823 else { 1824 slot->curcmd->resp[3 - i] = 1825 (val << 8) | extra; 1826 extra = val >> 24; 1827 } 1828 } 1829 } else 1830 slot->curcmd->resp[0] = RD4(slot, SDHCI_RESPONSE); 1831 } 1832 if (__predict_false(sdhci_debug > 1)) 1833 slot_printf(slot, "Resp: %#04x %#04x %#04x %#04x\n", 1834 slot->curcmd->resp[0], slot->curcmd->resp[1], 1835 slot->curcmd->resp[2], slot->curcmd->resp[3]); 1836 1837 /* If data ready - finish. */ 1838 if (slot->data_done) 1839 sdhci_start(slot); 1840 } 1841 1842 static void 1843 sdhci_start_data(struct sdhci_slot *slot, const struct mmc_data *data) 1844 { 1845 uint32_t blkcnt, blksz, current_timeout, sdma_bbufsz, target_timeout; 1846 uint8_t div; 1847 1848 if (data == NULL && (slot->curcmd->flags & MMC_RSP_BUSY) == 0) { 1849 slot->data_done = 1; 1850 return; 1851 } 1852 1853 slot->data_done = 0; 1854 1855 /* Calculate and set data timeout.*/ 1856 /* XXX: We should have this from mmc layer, now assume 1 sec. */ 1857 if (slot->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) { 1858 div = 0xE; 1859 } else { 1860 target_timeout = 1000000; 1861 div = 0; 1862 current_timeout = (1 << 13) * 1000 / slot->timeout_clk; 1863 while (current_timeout < target_timeout && div < 0xE) { 1864 ++div; 1865 current_timeout <<= 1; 1866 } 1867 /* Compensate for an off-by-one error in the CaFe chip.*/ 1868 if (div < 0xE && 1869 (slot->quirks & SDHCI_QUIRK_INCR_TIMEOUT_CONTROL)) { 1870 ++div; 1871 } 1872 } 1873 WR1(slot, SDHCI_TIMEOUT_CONTROL, div); 1874 1875 if (data == NULL) 1876 return; 1877 1878 /* Use DMA if possible. */ 1879 if ((slot->opt & SDHCI_HAVE_DMA)) 1880 slot->flags |= SDHCI_USE_DMA; 1881 /* If data is small, broken DMA may return zeroes instead of data. */ 1882 if ((slot->quirks & SDHCI_QUIRK_BROKEN_TIMINGS) && 1883 (data->len <= 512)) 1884 slot->flags &= ~SDHCI_USE_DMA; 1885 /* Some controllers require even block sizes. */ 1886 if ((slot->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) && 1887 ((data->len) & 0x3)) 1888 slot->flags &= ~SDHCI_USE_DMA; 1889 /* Load DMA buffer. */ 1890 if (slot->flags & SDHCI_USE_DMA) { 1891 sdma_bbufsz = slot->sdma_bbufsz; 1892 if (data->flags & MMC_DATA_READ) 1893 bus_dmamap_sync(slot->dmatag, slot->dmamap, 1894 BUS_DMASYNC_PREREAD); 1895 else { 1896 memcpy(slot->dmamem, data->data, ulmin(data->len, 1897 sdma_bbufsz)); 1898 bus_dmamap_sync(slot->dmatag, slot->dmamap, 1899 BUS_DMASYNC_PREWRITE); 1900 } 1901 WR4(slot, SDHCI_DMA_ADDRESS, slot->paddr); 1902 /* 1903 * Interrupt aggregation: Mask border interrupt for the last 1904 * bounce buffer and unmask otherwise. 1905 */ 1906 if (data->len == sdma_bbufsz) 1907 slot->intmask &= ~SDHCI_INT_DMA_END; 1908 else 1909 slot->intmask |= SDHCI_INT_DMA_END; 1910 WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); 1911 } 1912 /* Current data offset for both PIO and DMA. */ 1913 slot->offset = 0; 1914 #ifdef MMCCAM 1915 if (data->flags & MMC_DATA_BLOCK_SIZE) { 1916 /* Set block size and request border interrupts on the SDMA boundary. */ 1917 blksz = SDHCI_MAKE_BLKSZ(slot->sdma_boundary, data->block_size); 1918 blkcnt = data->block_count; 1919 if (__predict_false(sdhci_debug > 0)) 1920 slot_printf(slot, "SDIO Custom block params: blksz: " 1921 "%#10x, blk cnt: %#10x\n", blksz, blkcnt); 1922 } else 1923 #endif 1924 { 1925 /* Set block size and request border interrupts on the SDMA boundary. */ 1926 blksz = SDHCI_MAKE_BLKSZ(slot->sdma_boundary, ulmin(data->len, 512)); 1927 blkcnt = howmany(data->len, 512); 1928 } 1929 1930 WR2(slot, SDHCI_BLOCK_SIZE, blksz); 1931 WR2(slot, SDHCI_BLOCK_COUNT, blkcnt); 1932 if (__predict_false(sdhci_debug > 1)) 1933 slot_printf(slot, "Blk size: 0x%08x | Blk cnt: 0x%08x\n", 1934 blksz, blkcnt); 1935 } 1936 1937 void 1938 sdhci_finish_data(struct sdhci_slot *slot) 1939 { 1940 struct mmc_data *data = slot->curcmd->data; 1941 size_t left; 1942 1943 /* Interrupt aggregation: Restore command interrupt. 1944 * Auxiliary restore point for the case when data interrupt 1945 * happened first. */ 1946 if (!slot->cmd_done) { 1947 WR4(slot, SDHCI_SIGNAL_ENABLE, 1948 slot->intmask |= SDHCI_INT_RESPONSE); 1949 } 1950 /* Unload rest of data from DMA buffer. */ 1951 if (!slot->data_done && (slot->flags & SDHCI_USE_DMA) && 1952 slot->curcmd->data != NULL) { 1953 if (data->flags & MMC_DATA_READ) { 1954 left = data->len - slot->offset; 1955 bus_dmamap_sync(slot->dmatag, slot->dmamap, 1956 BUS_DMASYNC_POSTREAD); 1957 memcpy((u_char*)data->data + slot->offset, slot->dmamem, 1958 ulmin(left, slot->sdma_bbufsz)); 1959 } else 1960 bus_dmamap_sync(slot->dmatag, slot->dmamap, 1961 BUS_DMASYNC_POSTWRITE); 1962 } 1963 slot->data_done = 1; 1964 /* If there was error - reset the host. */ 1965 if (slot->curcmd->error) { 1966 if (slot->curcmd->error == MMC_ERR_BADCRC) 1967 slot->retune_req |= SDHCI_RETUNE_REQ_RESET; 1968 sdhci_reset(slot, SDHCI_RESET_CMD); 1969 sdhci_reset(slot, SDHCI_RESET_DATA); 1970 sdhci_start(slot); 1971 return; 1972 } 1973 /* If we already have command response - finish. */ 1974 if (slot->cmd_done) 1975 sdhci_start(slot); 1976 } 1977 1978 #ifdef MMCCAM 1979 static void 1980 sdhci_start(struct sdhci_slot *slot) 1981 { 1982 union ccb *ccb; 1983 struct ccb_mmcio *mmcio; 1984 1985 ccb = slot->ccb; 1986 if (ccb == NULL) 1987 return; 1988 1989 mmcio = &ccb->mmcio; 1990 if (!(slot->flags & CMD_STARTED)) { 1991 slot->flags |= CMD_STARTED; 1992 sdhci_start_command(slot, &mmcio->cmd); 1993 return; 1994 } 1995 1996 /* 1997 * Old stack doesn't use this! 1998 * Enabling this code causes significant performance degradation 1999 * and IRQ storms on BBB, Wandboard behaves fine. 2000 * Not using this code does no harm... 2001 if (!(slot->flags & STOP_STARTED) && mmcio->stop.opcode != 0) { 2002 slot->flags |= STOP_STARTED; 2003 sdhci_start_command(slot, &mmcio->stop); 2004 return; 2005 } 2006 */ 2007 if (__predict_false(sdhci_debug > 1)) 2008 slot_printf(slot, "result: %d\n", mmcio->cmd.error); 2009 if (mmcio->cmd.error == 0 && 2010 (slot->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)) { 2011 sdhci_reset(slot, SDHCI_RESET_CMD); 2012 sdhci_reset(slot, SDHCI_RESET_DATA); 2013 } 2014 2015 sdhci_req_done(slot); 2016 } 2017 #else 2018 static void 2019 sdhci_start(struct sdhci_slot *slot) 2020 { 2021 const struct mmc_request *req; 2022 2023 req = slot->req; 2024 if (req == NULL) 2025 return; 2026 2027 if (!(slot->flags & CMD_STARTED)) { 2028 slot->flags |= CMD_STARTED; 2029 sdhci_start_command(slot, req->cmd); 2030 return; 2031 } 2032 if ((slot->quirks & SDHCI_QUIRK_BROKEN_AUTO_STOP) && 2033 !(slot->flags & STOP_STARTED) && req->stop) { 2034 slot->flags |= STOP_STARTED; 2035 sdhci_start_command(slot, req->stop); 2036 return; 2037 } 2038 if (__predict_false(sdhci_debug > 1)) 2039 slot_printf(slot, "result: %d\n", req->cmd->error); 2040 if (!req->cmd->error && 2041 ((slot->curcmd == req->stop && 2042 (slot->quirks & SDHCI_QUIRK_BROKEN_AUTO_STOP)) || 2043 (slot->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { 2044 sdhci_reset(slot, SDHCI_RESET_CMD); 2045 sdhci_reset(slot, SDHCI_RESET_DATA); 2046 } 2047 2048 sdhci_req_done(slot); 2049 } 2050 #endif 2051 2052 int 2053 sdhci_generic_request(device_t brdev __unused, device_t reqdev, 2054 struct mmc_request *req) 2055 { 2056 struct sdhci_slot *slot = device_get_ivars(reqdev); 2057 2058 SDHCI_LOCK(slot); 2059 if (slot->req != NULL) { 2060 SDHCI_UNLOCK(slot); 2061 return (EBUSY); 2062 } 2063 if (__predict_false(sdhci_debug > 1)) { 2064 slot_printf(slot, 2065 "CMD%u arg %#x flags %#x dlen %u dflags %#x\n", 2066 req->cmd->opcode, req->cmd->arg, req->cmd->flags, 2067 (req->cmd->data)?(u_int)req->cmd->data->len:0, 2068 (req->cmd->data)?req->cmd->data->flags:0); 2069 } 2070 slot->req = req; 2071 slot->flags = 0; 2072 sdhci_start(slot); 2073 SDHCI_UNLOCK(slot); 2074 if (dumping) { 2075 while (slot->req != NULL) { 2076 sdhci_generic_intr(slot); 2077 DELAY(10); 2078 } 2079 } 2080 return (0); 2081 } 2082 2083 int 2084 sdhci_generic_get_ro(device_t brdev __unused, device_t reqdev) 2085 { 2086 struct sdhci_slot *slot = device_get_ivars(reqdev); 2087 uint32_t val; 2088 2089 SDHCI_LOCK(slot); 2090 val = RD4(slot, SDHCI_PRESENT_STATE); 2091 SDHCI_UNLOCK(slot); 2092 return (!(val & SDHCI_WRITE_PROTECT)); 2093 } 2094 2095 int 2096 sdhci_generic_acquire_host(device_t brdev __unused, device_t reqdev) 2097 { 2098 struct sdhci_slot *slot = device_get_ivars(reqdev); 2099 int err = 0; 2100 2101 SDHCI_LOCK(slot); 2102 while (slot->bus_busy) 2103 msleep(slot, &slot->mtx, 0, "sdhciah", 0); 2104 slot->bus_busy++; 2105 /* Activate led. */ 2106 WR1(slot, SDHCI_HOST_CONTROL, slot->hostctrl |= SDHCI_CTRL_LED); 2107 SDHCI_UNLOCK(slot); 2108 return (err); 2109 } 2110 2111 int 2112 sdhci_generic_release_host(device_t brdev __unused, device_t reqdev) 2113 { 2114 struct sdhci_slot *slot = device_get_ivars(reqdev); 2115 2116 SDHCI_LOCK(slot); 2117 /* Deactivate led. */ 2118 WR1(slot, SDHCI_HOST_CONTROL, slot->hostctrl &= ~SDHCI_CTRL_LED); 2119 slot->bus_busy--; 2120 SDHCI_UNLOCK(slot); 2121 wakeup(slot); 2122 return (0); 2123 } 2124 2125 static void 2126 sdhci_cmd_irq(struct sdhci_slot *slot, uint32_t intmask) 2127 { 2128 2129 if (!slot->curcmd) { 2130 slot_printf(slot, "Got command interrupt 0x%08x, but " 2131 "there is no active command.\n", intmask); 2132 sdhci_dumpregs(slot); 2133 return; 2134 } 2135 if (intmask & SDHCI_INT_TIMEOUT) 2136 slot->curcmd->error = MMC_ERR_TIMEOUT; 2137 else if (intmask & SDHCI_INT_CRC) 2138 slot->curcmd->error = MMC_ERR_BADCRC; 2139 else if (intmask & (SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) 2140 slot->curcmd->error = MMC_ERR_FIFO; 2141 2142 sdhci_finish_command(slot); 2143 } 2144 2145 static void 2146 sdhci_data_irq(struct sdhci_slot *slot, uint32_t intmask) 2147 { 2148 struct mmc_data *data; 2149 size_t left; 2150 uint32_t sdma_bbufsz; 2151 2152 if (!slot->curcmd) { 2153 slot_printf(slot, "Got data interrupt 0x%08x, but " 2154 "there is no active command.\n", intmask); 2155 sdhci_dumpregs(slot); 2156 return; 2157 } 2158 if (slot->curcmd->data == NULL && 2159 (slot->curcmd->flags & MMC_RSP_BUSY) == 0) { 2160 slot_printf(slot, "Got data interrupt 0x%08x, but " 2161 "there is no active data operation.\n", 2162 intmask); 2163 sdhci_dumpregs(slot); 2164 return; 2165 } 2166 if (intmask & SDHCI_INT_DATA_TIMEOUT) 2167 slot->curcmd->error = MMC_ERR_TIMEOUT; 2168 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) 2169 slot->curcmd->error = MMC_ERR_BADCRC; 2170 if (slot->curcmd->data == NULL && 2171 (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | 2172 SDHCI_INT_DMA_END))) { 2173 slot_printf(slot, "Got data interrupt 0x%08x, but " 2174 "there is busy-only command.\n", intmask); 2175 sdhci_dumpregs(slot); 2176 slot->curcmd->error = MMC_ERR_INVALID; 2177 } 2178 if (slot->curcmd->error) { 2179 /* No need to continue after any error. */ 2180 goto done; 2181 } 2182 2183 /* Handle tuning completion interrupt. */ 2184 if (__predict_false((intmask & SDHCI_INT_DATA_AVAIL) && 2185 (slot->curcmd->opcode == MMC_SEND_TUNING_BLOCK || 2186 slot->curcmd->opcode == MMC_SEND_TUNING_BLOCK_HS200))) { 2187 slot->req->flags |= MMC_TUNE_DONE; 2188 sdhci_finish_command(slot); 2189 sdhci_finish_data(slot); 2190 return; 2191 } 2192 /* Handle PIO interrupt. */ 2193 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) { 2194 if ((slot->opt & SDHCI_PLATFORM_TRANSFER) && 2195 SDHCI_PLATFORM_WILL_HANDLE(slot->bus, slot)) { 2196 SDHCI_PLATFORM_START_TRANSFER(slot->bus, slot, 2197 &intmask); 2198 slot->flags |= PLATFORM_DATA_STARTED; 2199 } else 2200 sdhci_transfer_pio(slot); 2201 } 2202 /* Handle DMA border. */ 2203 if (intmask & SDHCI_INT_DMA_END) { 2204 data = slot->curcmd->data; 2205 sdma_bbufsz = slot->sdma_bbufsz; 2206 2207 /* Unload DMA buffer ... */ 2208 left = data->len - slot->offset; 2209 if (data->flags & MMC_DATA_READ) { 2210 bus_dmamap_sync(slot->dmatag, slot->dmamap, 2211 BUS_DMASYNC_POSTREAD); 2212 memcpy((u_char*)data->data + slot->offset, slot->dmamem, 2213 ulmin(left, sdma_bbufsz)); 2214 } else { 2215 bus_dmamap_sync(slot->dmatag, slot->dmamap, 2216 BUS_DMASYNC_POSTWRITE); 2217 } 2218 /* ... and reload it again. */ 2219 slot->offset += sdma_bbufsz; 2220 left = data->len - slot->offset; 2221 if (data->flags & MMC_DATA_READ) { 2222 bus_dmamap_sync(slot->dmatag, slot->dmamap, 2223 BUS_DMASYNC_PREREAD); 2224 } else { 2225 memcpy(slot->dmamem, (u_char*)data->data + slot->offset, 2226 ulmin(left, sdma_bbufsz)); 2227 bus_dmamap_sync(slot->dmatag, slot->dmamap, 2228 BUS_DMASYNC_PREWRITE); 2229 } 2230 /* 2231 * Interrupt aggregation: Mask border interrupt for the last 2232 * bounce buffer. 2233 */ 2234 if (left == sdma_bbufsz) { 2235 slot->intmask &= ~SDHCI_INT_DMA_END; 2236 WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); 2237 } 2238 /* Restart DMA. */ 2239 WR4(slot, SDHCI_DMA_ADDRESS, slot->paddr); 2240 } 2241 /* We have got all data. */ 2242 if (intmask & SDHCI_INT_DATA_END) { 2243 if (slot->flags & PLATFORM_DATA_STARTED) { 2244 slot->flags &= ~PLATFORM_DATA_STARTED; 2245 SDHCI_PLATFORM_FINISH_TRANSFER(slot->bus, slot); 2246 } else 2247 sdhci_finish_data(slot); 2248 } 2249 done: 2250 if (slot->curcmd != NULL && slot->curcmd->error != 0) { 2251 if (slot->flags & PLATFORM_DATA_STARTED) { 2252 slot->flags &= ~PLATFORM_DATA_STARTED; 2253 SDHCI_PLATFORM_FINISH_TRANSFER(slot->bus, slot); 2254 } else 2255 sdhci_finish_data(slot); 2256 } 2257 } 2258 2259 static void 2260 sdhci_acmd_irq(struct sdhci_slot *slot, uint16_t acmd_err) 2261 { 2262 2263 if (!slot->curcmd) { 2264 slot_printf(slot, "Got AutoCMD12 error 0x%04x, but " 2265 "there is no active command.\n", acmd_err); 2266 sdhci_dumpregs(slot); 2267 return; 2268 } 2269 slot_printf(slot, "Got AutoCMD12 error 0x%04x\n", acmd_err); 2270 sdhci_reset(slot, SDHCI_RESET_CMD); 2271 } 2272 2273 void 2274 sdhci_generic_intr(struct sdhci_slot *slot) 2275 { 2276 uint32_t intmask, present; 2277 uint16_t val16; 2278 2279 SDHCI_LOCK(slot); 2280 /* Read slot interrupt status. */ 2281 intmask = RD4(slot, SDHCI_INT_STATUS); 2282 if (intmask == 0 || intmask == 0xffffffff) { 2283 SDHCI_UNLOCK(slot); 2284 return; 2285 } 2286 if (__predict_false(sdhci_debug > 2)) 2287 slot_printf(slot, "Interrupt %#x\n", intmask); 2288 2289 /* Handle tuning error interrupt. */ 2290 if (__predict_false(intmask & SDHCI_INT_TUNEERR)) { 2291 WR4(slot, SDHCI_INT_STATUS, SDHCI_INT_TUNEERR); 2292 slot_printf(slot, "Tuning error indicated\n"); 2293 slot->retune_req |= SDHCI_RETUNE_REQ_RESET; 2294 if (slot->curcmd) { 2295 slot->curcmd->error = MMC_ERR_BADCRC; 2296 sdhci_finish_command(slot); 2297 } 2298 } 2299 /* Handle re-tuning interrupt. */ 2300 if (__predict_false(intmask & SDHCI_INT_RETUNE)) 2301 slot->retune_req |= SDHCI_RETUNE_REQ_NEEDED; 2302 /* Handle card presence interrupts. */ 2303 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 2304 present = (intmask & SDHCI_INT_CARD_INSERT) != 0; 2305 slot->intmask &= 2306 ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE); 2307 slot->intmask |= present ? SDHCI_INT_CARD_REMOVE : 2308 SDHCI_INT_CARD_INSERT; 2309 WR4(slot, SDHCI_INT_ENABLE, slot->intmask); 2310 WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); 2311 WR4(slot, SDHCI_INT_STATUS, intmask & 2312 (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)); 2313 sdhci_handle_card_present_locked(slot, present); 2314 } 2315 /* Handle command interrupts. */ 2316 if (intmask & SDHCI_INT_CMD_MASK) { 2317 WR4(slot, SDHCI_INT_STATUS, intmask & SDHCI_INT_CMD_MASK); 2318 sdhci_cmd_irq(slot, intmask & SDHCI_INT_CMD_MASK); 2319 } 2320 /* Handle data interrupts. */ 2321 if (intmask & SDHCI_INT_DATA_MASK) { 2322 WR4(slot, SDHCI_INT_STATUS, intmask & SDHCI_INT_DATA_MASK); 2323 /* Don't call data_irq in case of errored command. */ 2324 if ((intmask & SDHCI_INT_CMD_ERROR_MASK) == 0) 2325 sdhci_data_irq(slot, intmask & SDHCI_INT_DATA_MASK); 2326 } 2327 /* Handle AutoCMD12 error interrupt. */ 2328 if (intmask & SDHCI_INT_ACMD12ERR) { 2329 /* Clearing SDHCI_INT_ACMD12ERR may clear SDHCI_ACMD12_ERR. */ 2330 val16 = RD2(slot, SDHCI_ACMD12_ERR); 2331 WR4(slot, SDHCI_INT_STATUS, SDHCI_INT_ACMD12ERR); 2332 sdhci_acmd_irq(slot, val16); 2333 } 2334 /* Handle bus power interrupt. */ 2335 if (intmask & SDHCI_INT_BUS_POWER) { 2336 WR4(slot, SDHCI_INT_STATUS, SDHCI_INT_BUS_POWER); 2337 slot_printf(slot, "Card is consuming too much power!\n"); 2338 } 2339 intmask &= ~(SDHCI_INT_ERROR | SDHCI_INT_TUNEERR | SDHCI_INT_RETUNE | 2340 SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | SDHCI_INT_CMD_MASK | 2341 SDHCI_INT_DATA_MASK | SDHCI_INT_ACMD12ERR | SDHCI_INT_BUS_POWER); 2342 /* The rest is unknown. */ 2343 if (intmask) { 2344 WR4(slot, SDHCI_INT_STATUS, intmask); 2345 slot_printf(slot, "Unexpected interrupt 0x%08x.\n", 2346 intmask); 2347 sdhci_dumpregs(slot); 2348 } 2349 2350 SDHCI_UNLOCK(slot); 2351 } 2352 2353 int 2354 sdhci_generic_read_ivar(device_t bus, device_t child, int which, 2355 uintptr_t *result) 2356 { 2357 const struct sdhci_slot *slot = device_get_ivars(child); 2358 2359 switch (which) { 2360 default: 2361 return (EINVAL); 2362 case MMCBR_IVAR_BUS_MODE: 2363 *result = slot->host.ios.bus_mode; 2364 break; 2365 case MMCBR_IVAR_BUS_WIDTH: 2366 *result = slot->host.ios.bus_width; 2367 break; 2368 case MMCBR_IVAR_CHIP_SELECT: 2369 *result = slot->host.ios.chip_select; 2370 break; 2371 case MMCBR_IVAR_CLOCK: 2372 *result = slot->host.ios.clock; 2373 break; 2374 case MMCBR_IVAR_F_MIN: 2375 *result = slot->host.f_min; 2376 break; 2377 case MMCBR_IVAR_F_MAX: 2378 *result = slot->host.f_max; 2379 break; 2380 case MMCBR_IVAR_HOST_OCR: 2381 *result = slot->host.host_ocr; 2382 break; 2383 case MMCBR_IVAR_MODE: 2384 *result = slot->host.mode; 2385 break; 2386 case MMCBR_IVAR_OCR: 2387 *result = slot->host.ocr; 2388 break; 2389 case MMCBR_IVAR_POWER_MODE: 2390 *result = slot->host.ios.power_mode; 2391 break; 2392 case MMCBR_IVAR_VDD: 2393 *result = slot->host.ios.vdd; 2394 break; 2395 case MMCBR_IVAR_RETUNE_REQ: 2396 if (slot->opt & SDHCI_TUNING_ENABLED) { 2397 if (slot->retune_req & SDHCI_RETUNE_REQ_RESET) { 2398 *result = retune_req_reset; 2399 break; 2400 } 2401 if (slot->retune_req & SDHCI_RETUNE_REQ_NEEDED) { 2402 *result = retune_req_normal; 2403 break; 2404 } 2405 } 2406 *result = retune_req_none; 2407 break; 2408 case MMCBR_IVAR_VCCQ: 2409 *result = slot->host.ios.vccq; 2410 break; 2411 case MMCBR_IVAR_CAPS: 2412 *result = slot->host.caps; 2413 break; 2414 case MMCBR_IVAR_TIMING: 2415 *result = slot->host.ios.timing; 2416 break; 2417 case MMCBR_IVAR_MAX_DATA: 2418 /* 2419 * Re-tuning modes 1 and 2 restrict the maximum data length 2420 * per read/write command to 4 MiB. 2421 */ 2422 if (slot->opt & SDHCI_TUNING_ENABLED && 2423 (slot->retune_mode == SDHCI_RETUNE_MODE_1 || 2424 slot->retune_mode == SDHCI_RETUNE_MODE_2)) { 2425 *result = 4 * 1024 * 1024 / MMC_SECTOR_SIZE; 2426 break; 2427 } 2428 *result = 65535; 2429 break; 2430 case MMCBR_IVAR_MAX_BUSY_TIMEOUT: 2431 /* 2432 * Currently, sdhci_start_data() hardcodes 1 s for all CMDs. 2433 */ 2434 *result = 1000000; 2435 break; 2436 } 2437 return (0); 2438 } 2439 2440 int 2441 sdhci_generic_write_ivar(device_t bus, device_t child, int which, 2442 uintptr_t value) 2443 { 2444 struct sdhci_slot *slot = device_get_ivars(child); 2445 uint32_t clock, max_clock; 2446 int i; 2447 2448 if (sdhci_debug > 1) 2449 slot_printf(slot, "%s: var=%d\n", __func__, which); 2450 switch (which) { 2451 default: 2452 return (EINVAL); 2453 case MMCBR_IVAR_BUS_MODE: 2454 slot->host.ios.bus_mode = value; 2455 break; 2456 case MMCBR_IVAR_BUS_WIDTH: 2457 slot->host.ios.bus_width = value; 2458 break; 2459 case MMCBR_IVAR_CHIP_SELECT: 2460 slot->host.ios.chip_select = value; 2461 break; 2462 case MMCBR_IVAR_CLOCK: 2463 if (value > 0) { 2464 max_clock = slot->max_clk; 2465 clock = max_clock; 2466 2467 if (slot->version < SDHCI_SPEC_300) { 2468 for (i = 0; i < SDHCI_200_MAX_DIVIDER; 2469 i <<= 1) { 2470 if (clock <= value) 2471 break; 2472 clock >>= 1; 2473 } 2474 } else { 2475 for (i = 0; i < SDHCI_300_MAX_DIVIDER; 2476 i += 2) { 2477 if (clock <= value) 2478 break; 2479 clock = max_clock / (i + 2); 2480 } 2481 } 2482 2483 slot->host.ios.clock = clock; 2484 } else 2485 slot->host.ios.clock = 0; 2486 break; 2487 case MMCBR_IVAR_MODE: 2488 slot->host.mode = value; 2489 break; 2490 case MMCBR_IVAR_OCR: 2491 slot->host.ocr = value; 2492 break; 2493 case MMCBR_IVAR_POWER_MODE: 2494 slot->host.ios.power_mode = value; 2495 break; 2496 case MMCBR_IVAR_VDD: 2497 slot->host.ios.vdd = value; 2498 break; 2499 case MMCBR_IVAR_VCCQ: 2500 slot->host.ios.vccq = value; 2501 break; 2502 case MMCBR_IVAR_TIMING: 2503 slot->host.ios.timing = value; 2504 break; 2505 case MMCBR_IVAR_CAPS: 2506 case MMCBR_IVAR_HOST_OCR: 2507 case MMCBR_IVAR_F_MIN: 2508 case MMCBR_IVAR_F_MAX: 2509 case MMCBR_IVAR_MAX_DATA: 2510 case MMCBR_IVAR_RETUNE_REQ: 2511 return (EINVAL); 2512 } 2513 return (0); 2514 } 2515 2516 #ifdef MMCCAM 2517 void 2518 sdhci_start_slot(struct sdhci_slot *slot) 2519 { 2520 2521 if ((slot->devq = cam_simq_alloc(1)) == NULL) 2522 goto fail; 2523 2524 mtx_init(&slot->sim_mtx, "sdhcisim", NULL, MTX_DEF); 2525 slot->sim = cam_sim_alloc_dev(sdhci_cam_action, sdhci_cam_poll, 2526 "sdhci_slot", slot, slot->bus, 2527 &slot->sim_mtx, 1, 1, slot->devq); 2528 2529 if (slot->sim == NULL) { 2530 cam_simq_free(slot->devq); 2531 slot_printf(slot, "cannot allocate CAM SIM\n"); 2532 goto fail; 2533 } 2534 2535 mtx_lock(&slot->sim_mtx); 2536 if (xpt_bus_register(slot->sim, slot->bus, 0) != 0) { 2537 slot_printf(slot, "cannot register SCSI pass-through bus\n"); 2538 cam_sim_free(slot->sim, FALSE); 2539 cam_simq_free(slot->devq); 2540 mtx_unlock(&slot->sim_mtx); 2541 goto fail; 2542 } 2543 mtx_unlock(&slot->sim_mtx); 2544 2545 /* End CAM-specific init */ 2546 slot->card_present = 0; 2547 sdhci_card_task(slot, 0); 2548 return; 2549 2550 fail: 2551 if (slot->sim != NULL) { 2552 mtx_lock(&slot->sim_mtx); 2553 xpt_bus_deregister(cam_sim_path(slot->sim)); 2554 cam_sim_free(slot->sim, FALSE); 2555 mtx_unlock(&slot->sim_mtx); 2556 } 2557 2558 if (slot->devq != NULL) 2559 cam_simq_free(slot->devq); 2560 } 2561 2562 void 2563 sdhci_cam_action(struct cam_sim *sim, union ccb *ccb) 2564 { 2565 struct sdhci_slot *slot; 2566 2567 slot = cam_sim_softc(sim); 2568 if (slot == NULL) { 2569 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 2570 xpt_done(ccb); 2571 return; 2572 } 2573 2574 mtx_assert(&slot->sim_mtx, MA_OWNED); 2575 2576 switch (ccb->ccb_h.func_code) { 2577 case XPT_PATH_INQ: 2578 mmc_path_inq(&ccb->cpi, "Deglitch Networks", sim, MAXPHYS); 2579 break; 2580 2581 case XPT_GET_TRAN_SETTINGS: 2582 { 2583 struct ccb_trans_settings *cts = &ccb->cts; 2584 uint32_t max_data; 2585 2586 if (sdhci_debug > 1) 2587 slot_printf(slot, "Got XPT_GET_TRAN_SETTINGS\n"); 2588 2589 cts->protocol = PROTO_MMCSD; 2590 cts->protocol_version = 1; 2591 cts->transport = XPORT_MMCSD; 2592 cts->transport_version = 1; 2593 cts->xport_specific.valid = 0; 2594 cts->proto_specific.mmc.host_ocr = slot->host.host_ocr; 2595 cts->proto_specific.mmc.host_f_min = slot->host.f_min; 2596 cts->proto_specific.mmc.host_f_max = slot->host.f_max; 2597 cts->proto_specific.mmc.host_caps = slot->host.caps; 2598 /* 2599 * Re-tuning modes 1 and 2 restrict the maximum data length 2600 * per read/write command to 4 MiB. 2601 */ 2602 if (slot->opt & SDHCI_TUNING_ENABLED && 2603 (slot->retune_mode == SDHCI_RETUNE_MODE_1 || 2604 slot->retune_mode == SDHCI_RETUNE_MODE_2)) { 2605 max_data = 4 * 1024 * 1024 / MMC_SECTOR_SIZE; 2606 } else { 2607 max_data = 65535; 2608 } 2609 cts->proto_specific.mmc.host_max_data = max_data; 2610 2611 memcpy(&cts->proto_specific.mmc.ios, &slot->host.ios, sizeof(struct mmc_ios)); 2612 ccb->ccb_h.status = CAM_REQ_CMP; 2613 break; 2614 } 2615 case XPT_SET_TRAN_SETTINGS: 2616 if (sdhci_debug > 1) 2617 slot_printf(slot, "Got XPT_SET_TRAN_SETTINGS\n"); 2618 sdhci_cam_settran_settings(slot, ccb); 2619 ccb->ccb_h.status = CAM_REQ_CMP; 2620 break; 2621 case XPT_RESET_BUS: 2622 if (sdhci_debug > 1) 2623 slot_printf(slot, "Got XPT_RESET_BUS, ACK it...\n"); 2624 ccb->ccb_h.status = CAM_REQ_CMP; 2625 break; 2626 case XPT_MMC_IO: 2627 /* 2628 * Here is the HW-dependent part of 2629 * sending the command to the underlying h/w 2630 * At some point in the future an interrupt comes. 2631 * Then the request will be marked as completed. 2632 */ 2633 if (__predict_false(sdhci_debug > 1)) 2634 slot_printf(slot, "Got XPT_MMC_IO\n"); 2635 ccb->ccb_h.status = CAM_REQ_INPROG; 2636 2637 sdhci_cam_request(cam_sim_softc(sim), ccb); 2638 return; 2639 default: 2640 ccb->ccb_h.status = CAM_REQ_INVALID; 2641 break; 2642 } 2643 xpt_done(ccb); 2644 return; 2645 } 2646 2647 void 2648 sdhci_cam_poll(struct cam_sim *sim) 2649 { 2650 return; 2651 } 2652 2653 static int 2654 sdhci_cam_get_possible_host_clock(const struct sdhci_slot *slot, 2655 int proposed_clock) 2656 { 2657 int max_clock, clock, i; 2658 2659 if (proposed_clock == 0) 2660 return 0; 2661 max_clock = slot->max_clk; 2662 clock = max_clock; 2663 2664 if (slot->version < SDHCI_SPEC_300) { 2665 for (i = 0; i < SDHCI_200_MAX_DIVIDER; i <<= 1) { 2666 if (clock <= proposed_clock) 2667 break; 2668 clock >>= 1; 2669 } 2670 } else { 2671 for (i = 0; i < SDHCI_300_MAX_DIVIDER; i += 2) { 2672 if (clock <= proposed_clock) 2673 break; 2674 clock = max_clock / (i + 2); 2675 } 2676 } 2677 return clock; 2678 } 2679 2680 static int 2681 sdhci_cam_settran_settings(struct sdhci_slot *slot, union ccb *ccb) 2682 { 2683 struct mmc_ios *ios; 2684 const struct mmc_ios *new_ios; 2685 const struct ccb_trans_settings_mmc *cts; 2686 2687 ios = &slot->host.ios; 2688 cts = &ccb->cts.proto_specific.mmc; 2689 new_ios = &cts->ios; 2690 2691 /* Update only requested fields */ 2692 if (cts->ios_valid & MMC_CLK) { 2693 ios->clock = sdhci_cam_get_possible_host_clock(slot, new_ios->clock); 2694 slot_printf(slot, "Clock => %d\n", ios->clock); 2695 } 2696 if (cts->ios_valid & MMC_VDD) { 2697 ios->vdd = new_ios->vdd; 2698 slot_printf(slot, "VDD => %d\n", ios->vdd); 2699 } 2700 if (cts->ios_valid & MMC_CS) { 2701 ios->chip_select = new_ios->chip_select; 2702 slot_printf(slot, "CS => %d\n", ios->chip_select); 2703 } 2704 if (cts->ios_valid & MMC_BW) { 2705 ios->bus_width = new_ios->bus_width; 2706 slot_printf(slot, "Bus width => %d\n", ios->bus_width); 2707 } 2708 if (cts->ios_valid & MMC_PM) { 2709 ios->power_mode = new_ios->power_mode; 2710 slot_printf(slot, "Power mode => %d\n", ios->power_mode); 2711 } 2712 if (cts->ios_valid & MMC_BT) { 2713 ios->timing = new_ios->timing; 2714 slot_printf(slot, "Timing => %d\n", ios->timing); 2715 } 2716 if (cts->ios_valid & MMC_BM) { 2717 ios->bus_mode = new_ios->bus_mode; 2718 slot_printf(slot, "Bus mode => %d\n", ios->bus_mode); 2719 } 2720 2721 /* XXX Provide a way to call a chip-specific IOS update, required for TI */ 2722 return (sdhci_cam_update_ios(slot)); 2723 } 2724 2725 static int 2726 sdhci_cam_update_ios(struct sdhci_slot *slot) 2727 { 2728 struct mmc_ios *ios = &slot->host.ios; 2729 2730 slot_printf(slot, "%s: power_mode=%d, clk=%d, bus_width=%d, timing=%d\n", 2731 __func__, ios->power_mode, ios->clock, ios->bus_width, ios->timing); 2732 SDHCI_LOCK(slot); 2733 /* Do full reset on bus power down to clear from any state. */ 2734 if (ios->power_mode == power_off) { 2735 WR4(slot, SDHCI_SIGNAL_ENABLE, 0); 2736 sdhci_init(slot); 2737 } 2738 /* Configure the bus. */ 2739 sdhci_set_clock(slot, ios->clock); 2740 sdhci_set_power(slot, (ios->power_mode == power_off) ? 0 : ios->vdd); 2741 if (ios->bus_width == bus_width_8) { 2742 slot->hostctrl |= SDHCI_CTRL_8BITBUS; 2743 slot->hostctrl &= ~SDHCI_CTRL_4BITBUS; 2744 } else if (ios->bus_width == bus_width_4) { 2745 slot->hostctrl &= ~SDHCI_CTRL_8BITBUS; 2746 slot->hostctrl |= SDHCI_CTRL_4BITBUS; 2747 } else if (ios->bus_width == bus_width_1) { 2748 slot->hostctrl &= ~SDHCI_CTRL_8BITBUS; 2749 slot->hostctrl &= ~SDHCI_CTRL_4BITBUS; 2750 } else { 2751 panic("Invalid bus width: %d", ios->bus_width); 2752 } 2753 if (ios->timing == bus_timing_hs && 2754 !(slot->quirks & SDHCI_QUIRK_DONT_SET_HISPD_BIT)) 2755 slot->hostctrl |= SDHCI_CTRL_HISPD; 2756 else 2757 slot->hostctrl &= ~SDHCI_CTRL_HISPD; 2758 WR1(slot, SDHCI_HOST_CONTROL, slot->hostctrl); 2759 /* Some controllers like reset after bus changes. */ 2760 if(slot->quirks & SDHCI_QUIRK_RESET_ON_IOS) 2761 sdhci_reset(slot, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 2762 2763 SDHCI_UNLOCK(slot); 2764 return (0); 2765 } 2766 2767 static int 2768 sdhci_cam_request(struct sdhci_slot *slot, union ccb *ccb) 2769 { 2770 const struct ccb_mmcio *mmcio; 2771 2772 mmcio = &ccb->mmcio; 2773 2774 SDHCI_LOCK(slot); 2775 /* if (slot->req != NULL) { 2776 SDHCI_UNLOCK(slot); 2777 return (EBUSY); 2778 } 2779 */ 2780 if (__predict_false(sdhci_debug > 1)) { 2781 slot_printf(slot, "CMD%u arg %#x flags %#x dlen %u dflags %#x " 2782 "blksz=%zu blkcnt=%zu\n", 2783 mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags, 2784 mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0, 2785 mmcio->cmd.data != NULL ? mmcio->cmd.data->flags : 0, 2786 mmcio->cmd.data != NULL ? mmcio->cmd.data->block_size : 0, 2787 mmcio->cmd.data != NULL ? mmcio->cmd.data->block_count : 0); 2788 } 2789 if (mmcio->cmd.data != NULL) { 2790 if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0) 2791 panic("data->len = %d, data->flags = %d -- something is b0rked", 2792 (int)mmcio->cmd.data->len, mmcio->cmd.data->flags); 2793 } 2794 slot->ccb = ccb; 2795 slot->flags = 0; 2796 sdhci_start(slot); 2797 SDHCI_UNLOCK(slot); 2798 if (dumping) { 2799 while (slot->ccb != NULL) { 2800 sdhci_generic_intr(slot); 2801 DELAY(10); 2802 } 2803 } 2804 return (0); 2805 } 2806 #endif /* MMCCAM */ 2807 2808 MODULE_VERSION(sdhci, SDHCI_VERSION); 2809