1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 4 * 5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 6 * 7 * Thanks to the following companies for their support: 8 * 9 * - JMicron (hardware and technical support) 10 */ 11 12 #include <linux/bitfield.h> 13 #include <linux/delay.h> 14 #include <linux/dmaengine.h> 15 #include <linux/ktime.h> 16 #include <linux/highmem.h> 17 #include <linux/io.h> 18 #include <linux/module.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/slab.h> 21 #include <linux/scatterlist.h> 22 #include <linux/sizes.h> 23 #include <linux/regulator/consumer.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/of.h> 26 27 #include <linux/leds.h> 28 29 #include <linux/mmc/mmc.h> 30 #include <linux/mmc/host.h> 31 #include <linux/mmc/card.h> 32 #include <linux/mmc/sdio.h> 33 #include <linux/mmc/slot-gpio.h> 34 35 #include "sdhci.h" 36 37 #define DRIVER_NAME "sdhci" 38 39 #define DBG(f, x...) \ 40 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 41 42 #define SDHCI_DUMP(f, x...) \ 43 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 44 45 #define MAX_TUNING_LOOP 40 46 47 static unsigned int debug_quirks = 0; 48 static unsigned int debug_quirks2; 49 50 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); 51 52 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd); 53 54 void sdhci_dumpregs(struct sdhci_host *host) 55 { 56 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n"); 57 58 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n", 59 sdhci_readl(host, SDHCI_DMA_ADDRESS), 60 sdhci_readw(host, SDHCI_HOST_VERSION)); 61 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n", 62 sdhci_readw(host, SDHCI_BLOCK_SIZE), 63 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 64 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n", 65 sdhci_readl(host, SDHCI_ARGUMENT), 66 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 67 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n", 68 sdhci_readl(host, SDHCI_PRESENT_STATE), 69 sdhci_readb(host, SDHCI_HOST_CONTROL)); 70 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n", 71 sdhci_readb(host, SDHCI_POWER_CONTROL), 72 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 73 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n", 74 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 75 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 76 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n", 77 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 78 sdhci_readl(host, SDHCI_INT_STATUS)); 79 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n", 80 sdhci_readl(host, SDHCI_INT_ENABLE), 81 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 82 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n", 83 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS), 84 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 85 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n", 86 sdhci_readl(host, SDHCI_CAPABILITIES), 87 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 88 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n", 89 sdhci_readw(host, SDHCI_COMMAND), 90 sdhci_readl(host, SDHCI_MAX_CURRENT)); 91 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n", 92 sdhci_readl(host, SDHCI_RESPONSE), 93 sdhci_readl(host, SDHCI_RESPONSE + 4)); 94 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n", 95 sdhci_readl(host, SDHCI_RESPONSE + 8), 96 sdhci_readl(host, SDHCI_RESPONSE + 12)); 97 SDHCI_DUMP("Host ctl2: 0x%08x\n", 98 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 99 100 if (host->flags & SDHCI_USE_ADMA) { 101 if (host->flags & SDHCI_USE_64_BIT_DMA) { 102 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", 103 sdhci_readl(host, SDHCI_ADMA_ERROR), 104 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI), 105 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 106 } else { 107 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 108 sdhci_readl(host, SDHCI_ADMA_ERROR), 109 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 110 } 111 } 112 113 if (host->ops->dump_vendor_regs) 114 host->ops->dump_vendor_regs(host); 115 116 SDHCI_DUMP("============================================\n"); 117 } 118 EXPORT_SYMBOL_GPL(sdhci_dumpregs); 119 120 /*****************************************************************************\ 121 * * 122 * Low level functions * 123 * * 124 \*****************************************************************************/ 125 126 static void sdhci_do_enable_v4_mode(struct sdhci_host *host) 127 { 128 u16 ctrl2; 129 130 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 131 if (ctrl2 & SDHCI_CTRL_V4_MODE) 132 return; 133 134 ctrl2 |= SDHCI_CTRL_V4_MODE; 135 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 136 } 137 138 /* 139 * This can be called before sdhci_add_host() by Vendor's host controller 140 * driver to enable v4 mode if supported. 141 */ 142 void sdhci_enable_v4_mode(struct sdhci_host *host) 143 { 144 host->v4_mode = true; 145 sdhci_do_enable_v4_mode(host); 146 } 147 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode); 148 149 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd) 150 { 151 return cmd->data || cmd->flags & MMC_RSP_BUSY; 152 } 153 154 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 155 { 156 u32 present; 157 158 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 159 !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc)) 160 return; 161 162 if (enable) { 163 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 164 SDHCI_CARD_PRESENT; 165 166 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 167 SDHCI_INT_CARD_INSERT; 168 } else { 169 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 170 } 171 172 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 173 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 174 } 175 176 static void sdhci_enable_card_detection(struct sdhci_host *host) 177 { 178 sdhci_set_card_detection(host, true); 179 } 180 181 static void sdhci_disable_card_detection(struct sdhci_host *host) 182 { 183 sdhci_set_card_detection(host, false); 184 } 185 186 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 187 { 188 if (host->bus_on) 189 return; 190 host->bus_on = true; 191 pm_runtime_get_noresume(mmc_dev(host->mmc)); 192 } 193 194 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 195 { 196 if (!host->bus_on) 197 return; 198 host->bus_on = false; 199 pm_runtime_put_noidle(mmc_dev(host->mmc)); 200 } 201 202 void sdhci_reset(struct sdhci_host *host, u8 mask) 203 { 204 ktime_t timeout; 205 206 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 207 208 if (mask & SDHCI_RESET_ALL) { 209 host->clock = 0; 210 /* Reset-all turns off SD Bus Power */ 211 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 212 sdhci_runtime_pm_bus_off(host); 213 } 214 215 /* Wait max 100 ms */ 216 timeout = ktime_add_ms(ktime_get(), 100); 217 218 /* hw clears the bit when it's done */ 219 while (1) { 220 bool timedout = ktime_after(ktime_get(), timeout); 221 222 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) 223 break; 224 if (timedout) { 225 pr_err("%s: Reset 0x%x never completed.\n", 226 mmc_hostname(host->mmc), (int)mask); 227 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 228 sdhci_dumpregs(host); 229 return; 230 } 231 udelay(10); 232 } 233 } 234 EXPORT_SYMBOL_GPL(sdhci_reset); 235 236 static bool sdhci_do_reset(struct sdhci_host *host, u8 mask) 237 { 238 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 239 struct mmc_host *mmc = host->mmc; 240 241 if (!mmc->ops->get_cd(mmc)) 242 return false; 243 } 244 245 host->ops->reset(host, mask); 246 247 return true; 248 } 249 250 static void sdhci_reset_for_all(struct sdhci_host *host) 251 { 252 if (sdhci_do_reset(host, SDHCI_RESET_ALL)) { 253 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 254 if (host->ops->enable_dma) 255 host->ops->enable_dma(host); 256 } 257 /* Resetting the controller clears many */ 258 host->preset_enabled = false; 259 } 260 } 261 262 enum sdhci_reset_reason { 263 SDHCI_RESET_FOR_INIT, 264 SDHCI_RESET_FOR_REQUEST_ERROR, 265 SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY, 266 SDHCI_RESET_FOR_TUNING_ABORT, 267 SDHCI_RESET_FOR_CARD_REMOVED, 268 SDHCI_RESET_FOR_CQE_RECOVERY, 269 }; 270 271 static void sdhci_reset_for_reason(struct sdhci_host *host, enum sdhci_reset_reason reason) 272 { 273 if (host->quirks2 & SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER) { 274 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 275 return; 276 } 277 278 switch (reason) { 279 case SDHCI_RESET_FOR_INIT: 280 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 281 break; 282 case SDHCI_RESET_FOR_REQUEST_ERROR: 283 case SDHCI_RESET_FOR_TUNING_ABORT: 284 case SDHCI_RESET_FOR_CARD_REMOVED: 285 case SDHCI_RESET_FOR_CQE_RECOVERY: 286 sdhci_do_reset(host, SDHCI_RESET_CMD); 287 sdhci_do_reset(host, SDHCI_RESET_DATA); 288 break; 289 case SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY: 290 sdhci_do_reset(host, SDHCI_RESET_DATA); 291 break; 292 } 293 } 294 295 #define sdhci_reset_for(h, r) sdhci_reset_for_reason((h), SDHCI_RESET_FOR_##r) 296 297 static void sdhci_set_default_irqs(struct sdhci_host *host) 298 { 299 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 300 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | 301 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | 302 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | 303 SDHCI_INT_RESPONSE; 304 305 if (host->tuning_mode == SDHCI_TUNING_MODE_2 || 306 host->tuning_mode == SDHCI_TUNING_MODE_3) 307 host->ier |= SDHCI_INT_RETUNE; 308 309 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 310 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 311 } 312 313 static void sdhci_config_dma(struct sdhci_host *host) 314 { 315 u8 ctrl; 316 u16 ctrl2; 317 318 if (host->version < SDHCI_SPEC_200) 319 return; 320 321 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 322 323 /* 324 * Always adjust the DMA selection as some controllers 325 * (e.g. JMicron) can't do PIO properly when the selection 326 * is ADMA. 327 */ 328 ctrl &= ~SDHCI_CTRL_DMA_MASK; 329 if (!(host->flags & SDHCI_REQ_USE_DMA)) 330 goto out; 331 332 /* Note if DMA Select is zero then SDMA is selected */ 333 if (host->flags & SDHCI_USE_ADMA) 334 ctrl |= SDHCI_CTRL_ADMA32; 335 336 if (host->flags & SDHCI_USE_64_BIT_DMA) { 337 /* 338 * If v4 mode, all supported DMA can be 64-bit addressing if 339 * controller supports 64-bit system address, otherwise only 340 * ADMA can support 64-bit addressing. 341 */ 342 if (host->v4_mode) { 343 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 344 ctrl2 |= SDHCI_CTRL_64BIT_ADDR; 345 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 346 } else if (host->flags & SDHCI_USE_ADMA) { 347 /* 348 * Don't need to undo SDHCI_CTRL_ADMA32 in order to 349 * set SDHCI_CTRL_ADMA64. 350 */ 351 ctrl |= SDHCI_CTRL_ADMA64; 352 } 353 } 354 355 out: 356 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 357 } 358 359 static void sdhci_init(struct sdhci_host *host, int soft) 360 { 361 struct mmc_host *mmc = host->mmc; 362 unsigned long flags; 363 364 if (soft) 365 sdhci_reset_for(host, INIT); 366 else 367 sdhci_reset_for_all(host); 368 369 if (host->v4_mode) 370 sdhci_do_enable_v4_mode(host); 371 372 spin_lock_irqsave(&host->lock, flags); 373 sdhci_set_default_irqs(host); 374 spin_unlock_irqrestore(&host->lock, flags); 375 376 host->cqe_on = false; 377 378 if (soft) { 379 /* force clock reconfiguration */ 380 host->clock = 0; 381 host->reinit_uhs = true; 382 mmc->ops->set_ios(mmc, &mmc->ios); 383 } 384 } 385 386 static void sdhci_reinit(struct sdhci_host *host) 387 { 388 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 389 390 sdhci_init(host, 0); 391 sdhci_enable_card_detection(host); 392 393 /* 394 * A change to the card detect bits indicates a change in present state, 395 * refer sdhci_set_card_detection(). A card detect interrupt might have 396 * been missed while the host controller was being reset, so trigger a 397 * rescan to check. 398 */ 399 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT))) 400 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 401 } 402 403 static void __sdhci_led_activate(struct sdhci_host *host) 404 { 405 u8 ctrl; 406 407 if (host->quirks & SDHCI_QUIRK_NO_LED) 408 return; 409 410 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 411 ctrl |= SDHCI_CTRL_LED; 412 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 413 } 414 415 static void __sdhci_led_deactivate(struct sdhci_host *host) 416 { 417 u8 ctrl; 418 419 if (host->quirks & SDHCI_QUIRK_NO_LED) 420 return; 421 422 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 423 ctrl &= ~SDHCI_CTRL_LED; 424 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 425 } 426 427 #if IS_REACHABLE(CONFIG_LEDS_CLASS) 428 static void sdhci_led_control(struct led_classdev *led, 429 enum led_brightness brightness) 430 { 431 struct sdhci_host *host = container_of(led, struct sdhci_host, led); 432 unsigned long flags; 433 434 spin_lock_irqsave(&host->lock, flags); 435 436 if (host->runtime_suspended) 437 goto out; 438 439 if (brightness == LED_OFF) 440 __sdhci_led_deactivate(host); 441 else 442 __sdhci_led_activate(host); 443 out: 444 spin_unlock_irqrestore(&host->lock, flags); 445 } 446 447 static int sdhci_led_register(struct sdhci_host *host) 448 { 449 struct mmc_host *mmc = host->mmc; 450 451 if (host->quirks & SDHCI_QUIRK_NO_LED) 452 return 0; 453 454 snprintf(host->led_name, sizeof(host->led_name), 455 "%s::", mmc_hostname(mmc)); 456 457 host->led.name = host->led_name; 458 host->led.brightness = LED_OFF; 459 host->led.default_trigger = mmc_hostname(mmc); 460 host->led.brightness_set = sdhci_led_control; 461 462 return led_classdev_register(mmc_dev(mmc), &host->led); 463 } 464 465 static void sdhci_led_unregister(struct sdhci_host *host) 466 { 467 if (host->quirks & SDHCI_QUIRK_NO_LED) 468 return; 469 470 led_classdev_unregister(&host->led); 471 } 472 473 static inline void sdhci_led_activate(struct sdhci_host *host) 474 { 475 } 476 477 static inline void sdhci_led_deactivate(struct sdhci_host *host) 478 { 479 } 480 481 #else 482 483 static inline int sdhci_led_register(struct sdhci_host *host) 484 { 485 return 0; 486 } 487 488 static inline void sdhci_led_unregister(struct sdhci_host *host) 489 { 490 } 491 492 static inline void sdhci_led_activate(struct sdhci_host *host) 493 { 494 __sdhci_led_activate(host); 495 } 496 497 static inline void sdhci_led_deactivate(struct sdhci_host *host) 498 { 499 __sdhci_led_deactivate(host); 500 } 501 502 #endif 503 504 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq, 505 unsigned long timeout) 506 { 507 if (sdhci_data_line_cmd(mrq->cmd)) 508 mod_timer(&host->data_timer, timeout); 509 else 510 mod_timer(&host->timer, timeout); 511 } 512 513 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq) 514 { 515 if (sdhci_data_line_cmd(mrq->cmd)) 516 del_timer(&host->data_timer); 517 else 518 del_timer(&host->timer); 519 } 520 521 static inline bool sdhci_has_requests(struct sdhci_host *host) 522 { 523 return host->cmd || host->data_cmd; 524 } 525 526 /*****************************************************************************\ 527 * * 528 * Core functions * 529 * * 530 \*****************************************************************************/ 531 532 static void sdhci_read_block_pio(struct sdhci_host *host) 533 { 534 size_t blksize, len, chunk; 535 u32 scratch; 536 u8 *buf; 537 538 DBG("PIO reading\n"); 539 540 blksize = host->data->blksz; 541 chunk = 0; 542 543 while (blksize) { 544 BUG_ON(!sg_miter_next(&host->sg_miter)); 545 546 len = min(host->sg_miter.length, blksize); 547 548 blksize -= len; 549 host->sg_miter.consumed = len; 550 551 buf = host->sg_miter.addr; 552 553 while (len) { 554 if (chunk == 0) { 555 scratch = sdhci_readl(host, SDHCI_BUFFER); 556 chunk = 4; 557 } 558 559 *buf = scratch & 0xFF; 560 561 buf++; 562 scratch >>= 8; 563 chunk--; 564 len--; 565 } 566 } 567 568 sg_miter_stop(&host->sg_miter); 569 } 570 571 static void sdhci_write_block_pio(struct sdhci_host *host) 572 { 573 size_t blksize, len, chunk; 574 u32 scratch; 575 u8 *buf; 576 577 DBG("PIO writing\n"); 578 579 blksize = host->data->blksz; 580 chunk = 0; 581 scratch = 0; 582 583 while (blksize) { 584 BUG_ON(!sg_miter_next(&host->sg_miter)); 585 586 len = min(host->sg_miter.length, blksize); 587 588 blksize -= len; 589 host->sg_miter.consumed = len; 590 591 buf = host->sg_miter.addr; 592 593 while (len) { 594 scratch |= (u32)*buf << (chunk * 8); 595 596 buf++; 597 chunk++; 598 len--; 599 600 if ((chunk == 4) || ((len == 0) && (blksize == 0))) { 601 sdhci_writel(host, scratch, SDHCI_BUFFER); 602 chunk = 0; 603 scratch = 0; 604 } 605 } 606 } 607 608 sg_miter_stop(&host->sg_miter); 609 } 610 611 static void sdhci_transfer_pio(struct sdhci_host *host) 612 { 613 u32 mask; 614 615 if (host->blocks == 0) 616 return; 617 618 if (host->data->flags & MMC_DATA_READ) 619 mask = SDHCI_DATA_AVAILABLE; 620 else 621 mask = SDHCI_SPACE_AVAILABLE; 622 623 /* 624 * Some controllers (JMicron JMB38x) mess up the buffer bits 625 * for transfers < 4 bytes. As long as it is just one block, 626 * we can ignore the bits. 627 */ 628 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && 629 (host->data->blocks == 1)) 630 mask = ~0; 631 632 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 633 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) 634 udelay(100); 635 636 if (host->data->flags & MMC_DATA_READ) 637 sdhci_read_block_pio(host); 638 else 639 sdhci_write_block_pio(host); 640 641 host->blocks--; 642 if (host->blocks == 0) 643 break; 644 } 645 646 DBG("PIO transfer complete.\n"); 647 } 648 649 static int sdhci_pre_dma_transfer(struct sdhci_host *host, 650 struct mmc_data *data, int cookie) 651 { 652 int sg_count; 653 654 /* 655 * If the data buffers are already mapped, return the previous 656 * dma_map_sg() result. 657 */ 658 if (data->host_cookie == COOKIE_PRE_MAPPED) 659 return data->sg_count; 660 661 /* Bounce write requests to the bounce buffer */ 662 if (host->bounce_buffer) { 663 unsigned int length = data->blksz * data->blocks; 664 665 if (length > host->bounce_buffer_size) { 666 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n", 667 mmc_hostname(host->mmc), length, 668 host->bounce_buffer_size); 669 return -EIO; 670 } 671 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { 672 /* Copy the data to the bounce buffer */ 673 if (host->ops->copy_to_bounce_buffer) { 674 host->ops->copy_to_bounce_buffer(host, 675 data, length); 676 } else { 677 sg_copy_to_buffer(data->sg, data->sg_len, 678 host->bounce_buffer, length); 679 } 680 } 681 /* Switch ownership to the DMA */ 682 dma_sync_single_for_device(mmc_dev(host->mmc), 683 host->bounce_addr, 684 host->bounce_buffer_size, 685 mmc_get_dma_dir(data)); 686 /* Just a dummy value */ 687 sg_count = 1; 688 } else { 689 /* Just access the data directly from memory */ 690 sg_count = dma_map_sg(mmc_dev(host->mmc), 691 data->sg, data->sg_len, 692 mmc_get_dma_dir(data)); 693 } 694 695 if (sg_count == 0) 696 return -ENOSPC; 697 698 data->sg_count = sg_count; 699 data->host_cookie = cookie; 700 701 return sg_count; 702 } 703 704 static char *sdhci_kmap_atomic(struct scatterlist *sg) 705 { 706 return kmap_local_page(sg_page(sg)) + sg->offset; 707 } 708 709 static void sdhci_kunmap_atomic(void *buffer) 710 { 711 kunmap_local(buffer); 712 } 713 714 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc, 715 dma_addr_t addr, int len, unsigned int cmd) 716 { 717 struct sdhci_adma2_64_desc *dma_desc = *desc; 718 719 /* 32-bit and 64-bit descriptors have these members in same position */ 720 dma_desc->cmd = cpu_to_le16(cmd); 721 dma_desc->len = cpu_to_le16(len); 722 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr)); 723 724 if (host->flags & SDHCI_USE_64_BIT_DMA) 725 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr)); 726 727 *desc += host->desc_sz; 728 } 729 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc); 730 731 static inline void __sdhci_adma_write_desc(struct sdhci_host *host, 732 void **desc, dma_addr_t addr, 733 int len, unsigned int cmd) 734 { 735 if (host->ops->adma_write_desc) 736 host->ops->adma_write_desc(host, desc, addr, len, cmd); 737 else 738 sdhci_adma_write_desc(host, desc, addr, len, cmd); 739 } 740 741 static void sdhci_adma_mark_end(void *desc) 742 { 743 struct sdhci_adma2_64_desc *dma_desc = desc; 744 745 /* 32-bit and 64-bit descriptors have 'cmd' in same position */ 746 dma_desc->cmd |= cpu_to_le16(ADMA2_END); 747 } 748 749 static void sdhci_adma_table_pre(struct sdhci_host *host, 750 struct mmc_data *data, int sg_count) 751 { 752 struct scatterlist *sg; 753 dma_addr_t addr, align_addr; 754 void *desc, *align; 755 char *buffer; 756 int len, offset, i; 757 758 /* 759 * The spec does not specify endianness of descriptor table. 760 * We currently guess that it is LE. 761 */ 762 763 host->sg_count = sg_count; 764 765 desc = host->adma_table; 766 align = host->align_buffer; 767 768 align_addr = host->align_addr; 769 770 for_each_sg(data->sg, sg, host->sg_count, i) { 771 addr = sg_dma_address(sg); 772 len = sg_dma_len(sg); 773 774 /* 775 * The SDHCI specification states that ADMA addresses must 776 * be 32-bit aligned. If they aren't, then we use a bounce 777 * buffer for the (up to three) bytes that screw up the 778 * alignment. 779 */ 780 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) & 781 SDHCI_ADMA2_MASK; 782 if (offset) { 783 if (data->flags & MMC_DATA_WRITE) { 784 buffer = sdhci_kmap_atomic(sg); 785 memcpy(align, buffer, offset); 786 sdhci_kunmap_atomic(buffer); 787 } 788 789 /* tran, valid */ 790 __sdhci_adma_write_desc(host, &desc, align_addr, 791 offset, ADMA2_TRAN_VALID); 792 793 BUG_ON(offset > 65536); 794 795 align += SDHCI_ADMA2_ALIGN; 796 align_addr += SDHCI_ADMA2_ALIGN; 797 798 addr += offset; 799 len -= offset; 800 } 801 802 /* 803 * The block layer forces a minimum segment size of PAGE_SIZE, 804 * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write 805 * multiple descriptors, noting that the ADMA table is sized 806 * for 4KiB chunks anyway, so it will be big enough. 807 */ 808 while (len > host->max_adma) { 809 int n = 32 * 1024; /* 32KiB*/ 810 811 __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID); 812 addr += n; 813 len -= n; 814 } 815 816 /* tran, valid */ 817 if (len) 818 __sdhci_adma_write_desc(host, &desc, addr, len, 819 ADMA2_TRAN_VALID); 820 821 /* 822 * If this triggers then we have a calculation bug 823 * somewhere. :/ 824 */ 825 WARN_ON((desc - host->adma_table) >= host->adma_table_sz); 826 } 827 828 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 829 /* Mark the last descriptor as the terminating descriptor */ 830 if (desc != host->adma_table) { 831 desc -= host->desc_sz; 832 sdhci_adma_mark_end(desc); 833 } 834 } else { 835 /* Add a terminating entry - nop, end, valid */ 836 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID); 837 } 838 } 839 840 static void sdhci_adma_table_post(struct sdhci_host *host, 841 struct mmc_data *data) 842 { 843 struct scatterlist *sg; 844 int i, size; 845 void *align; 846 char *buffer; 847 848 if (data->flags & MMC_DATA_READ) { 849 bool has_unaligned = false; 850 851 /* Do a quick scan of the SG list for any unaligned mappings */ 852 for_each_sg(data->sg, sg, host->sg_count, i) 853 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 854 has_unaligned = true; 855 break; 856 } 857 858 if (has_unaligned) { 859 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 860 data->sg_len, DMA_FROM_DEVICE); 861 862 align = host->align_buffer; 863 864 for_each_sg(data->sg, sg, host->sg_count, i) { 865 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 866 size = SDHCI_ADMA2_ALIGN - 867 (sg_dma_address(sg) & SDHCI_ADMA2_MASK); 868 869 buffer = sdhci_kmap_atomic(sg); 870 memcpy(buffer, align, size); 871 sdhci_kunmap_atomic(buffer); 872 873 align += SDHCI_ADMA2_ALIGN; 874 } 875 } 876 } 877 } 878 } 879 880 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr) 881 { 882 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS); 883 if (host->flags & SDHCI_USE_64_BIT_DMA) 884 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI); 885 } 886 887 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host) 888 { 889 if (host->bounce_buffer) 890 return host->bounce_addr; 891 else 892 return sg_dma_address(host->data->sg); 893 } 894 895 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr) 896 { 897 if (host->v4_mode) 898 sdhci_set_adma_addr(host, addr); 899 else 900 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS); 901 } 902 903 static unsigned int sdhci_target_timeout(struct sdhci_host *host, 904 struct mmc_command *cmd, 905 struct mmc_data *data) 906 { 907 unsigned int target_timeout; 908 909 /* timeout in us */ 910 if (!data) { 911 target_timeout = cmd->busy_timeout * 1000; 912 } else { 913 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000); 914 if (host->clock && data->timeout_clks) { 915 unsigned long long val; 916 917 /* 918 * data->timeout_clks is in units of clock cycles. 919 * host->clock is in Hz. target_timeout is in us. 920 * Hence, us = 1000000 * cycles / Hz. Round up. 921 */ 922 val = 1000000ULL * data->timeout_clks; 923 if (do_div(val, host->clock)) 924 target_timeout++; 925 target_timeout += val; 926 } 927 } 928 929 return target_timeout; 930 } 931 932 static void sdhci_calc_sw_timeout(struct sdhci_host *host, 933 struct mmc_command *cmd) 934 { 935 struct mmc_data *data = cmd->data; 936 struct mmc_host *mmc = host->mmc; 937 struct mmc_ios *ios = &mmc->ios; 938 unsigned char bus_width = 1 << ios->bus_width; 939 unsigned int blksz; 940 unsigned int freq; 941 u64 target_timeout; 942 u64 transfer_time; 943 944 target_timeout = sdhci_target_timeout(host, cmd, data); 945 target_timeout *= NSEC_PER_USEC; 946 947 if (data) { 948 blksz = data->blksz; 949 freq = mmc->actual_clock ? : host->clock; 950 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width); 951 do_div(transfer_time, freq); 952 /* multiply by '2' to account for any unknowns */ 953 transfer_time = transfer_time * 2; 954 /* calculate timeout for the entire data */ 955 host->data_timeout = data->blocks * target_timeout + 956 transfer_time; 957 } else { 958 host->data_timeout = target_timeout; 959 } 960 961 if (host->data_timeout) 962 host->data_timeout += MMC_CMD_TRANSFER_TIME; 963 } 964 965 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd, 966 bool *too_big) 967 { 968 u8 count; 969 struct mmc_data *data; 970 unsigned target_timeout, current_timeout; 971 972 *too_big = false; 973 974 /* 975 * If the host controller provides us with an incorrect timeout 976 * value, just skip the check and use the maximum. The hardware may take 977 * longer to time out, but that's much better than having a too-short 978 * timeout value. 979 */ 980 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 981 return host->max_timeout_count; 982 983 /* Unspecified command, assume max */ 984 if (cmd == NULL) 985 return host->max_timeout_count; 986 987 data = cmd->data; 988 /* Unspecified timeout, assume max */ 989 if (!data && !cmd->busy_timeout) 990 return host->max_timeout_count; 991 992 /* timeout in us */ 993 target_timeout = sdhci_target_timeout(host, cmd, data); 994 995 /* 996 * Figure out needed cycles. 997 * We do this in steps in order to fit inside a 32 bit int. 998 * The first step is the minimum timeout, which will have a 999 * minimum resolution of 6 bits: 1000 * (1) 2^13*1000 > 2^22, 1001 * (2) host->timeout_clk < 2^16 1002 * => 1003 * (1) / (2) > 2^6 1004 */ 1005 count = 0; 1006 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 1007 while (current_timeout < target_timeout) { 1008 count++; 1009 current_timeout <<= 1; 1010 if (count > host->max_timeout_count) { 1011 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT)) 1012 DBG("Too large timeout 0x%x requested for CMD%d!\n", 1013 count, cmd->opcode); 1014 count = host->max_timeout_count; 1015 *too_big = true; 1016 break; 1017 } 1018 } 1019 1020 return count; 1021 } 1022 1023 static void sdhci_set_transfer_irqs(struct sdhci_host *host) 1024 { 1025 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; 1026 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; 1027 1028 if (host->flags & SDHCI_REQ_USE_DMA) 1029 host->ier = (host->ier & ~pio_irqs) | dma_irqs; 1030 else 1031 host->ier = (host->ier & ~dma_irqs) | pio_irqs; 1032 1033 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12)) 1034 host->ier |= SDHCI_INT_AUTO_CMD_ERR; 1035 else 1036 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR; 1037 1038 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1039 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1040 } 1041 1042 void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) 1043 { 1044 if (enable) 1045 host->ier |= SDHCI_INT_DATA_TIMEOUT; 1046 else 1047 host->ier &= ~SDHCI_INT_DATA_TIMEOUT; 1048 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1049 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1050 } 1051 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq); 1052 1053 void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1054 { 1055 bool too_big = false; 1056 u8 count = sdhci_calc_timeout(host, cmd, &too_big); 1057 1058 if (too_big && 1059 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { 1060 sdhci_calc_sw_timeout(host, cmd); 1061 sdhci_set_data_timeout_irq(host, false); 1062 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { 1063 sdhci_set_data_timeout_irq(host, true); 1064 } 1065 1066 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); 1067 } 1068 EXPORT_SYMBOL_GPL(__sdhci_set_timeout); 1069 1070 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1071 { 1072 if (host->ops->set_timeout) 1073 host->ops->set_timeout(host, cmd); 1074 else 1075 __sdhci_set_timeout(host, cmd); 1076 } 1077 1078 static void sdhci_initialize_data(struct sdhci_host *host, 1079 struct mmc_data *data) 1080 { 1081 WARN_ON(host->data); 1082 1083 /* Sanity checks */ 1084 BUG_ON(data->blksz * data->blocks > 524288); 1085 BUG_ON(data->blksz > host->mmc->max_blk_size); 1086 BUG_ON(data->blocks > 65535); 1087 1088 host->data = data; 1089 host->data_early = 0; 1090 host->data->bytes_xfered = 0; 1091 } 1092 1093 static inline void sdhci_set_block_info(struct sdhci_host *host, 1094 struct mmc_data *data) 1095 { 1096 /* Set the DMA boundary value and block size */ 1097 sdhci_writew(host, 1098 SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), 1099 SDHCI_BLOCK_SIZE); 1100 /* 1101 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count 1102 * can be supported, in that case 16-bit block count register must be 0. 1103 */ 1104 if (host->version >= SDHCI_SPEC_410 && host->v4_mode && 1105 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { 1106 if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) 1107 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); 1108 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); 1109 } else { 1110 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 1111 } 1112 } 1113 1114 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) 1115 { 1116 struct mmc_data *data = cmd->data; 1117 1118 sdhci_initialize_data(host, data); 1119 1120 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 1121 struct scatterlist *sg; 1122 unsigned int length_mask, offset_mask; 1123 int i; 1124 1125 host->flags |= SDHCI_REQ_USE_DMA; 1126 1127 /* 1128 * FIXME: This doesn't account for merging when mapping the 1129 * scatterlist. 1130 * 1131 * The assumption here being that alignment and lengths are 1132 * the same after DMA mapping to device address space. 1133 */ 1134 length_mask = 0; 1135 offset_mask = 0; 1136 if (host->flags & SDHCI_USE_ADMA) { 1137 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) { 1138 length_mask = 3; 1139 /* 1140 * As we use up to 3 byte chunks to work 1141 * around alignment problems, we need to 1142 * check the offset as well. 1143 */ 1144 offset_mask = 3; 1145 } 1146 } else { 1147 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) 1148 length_mask = 3; 1149 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) 1150 offset_mask = 3; 1151 } 1152 1153 if (unlikely(length_mask | offset_mask)) { 1154 for_each_sg(data->sg, sg, data->sg_len, i) { 1155 if (sg->length & length_mask) { 1156 DBG("Reverting to PIO because of transfer size (%d)\n", 1157 sg->length); 1158 host->flags &= ~SDHCI_REQ_USE_DMA; 1159 break; 1160 } 1161 if (sg->offset & offset_mask) { 1162 DBG("Reverting to PIO because of bad alignment\n"); 1163 host->flags &= ~SDHCI_REQ_USE_DMA; 1164 break; 1165 } 1166 } 1167 } 1168 } 1169 1170 sdhci_config_dma(host); 1171 1172 if (host->flags & SDHCI_REQ_USE_DMA) { 1173 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1174 1175 if (sg_cnt <= 0) { 1176 /* 1177 * This only happens when someone fed 1178 * us an invalid request. 1179 */ 1180 WARN_ON(1); 1181 host->flags &= ~SDHCI_REQ_USE_DMA; 1182 } else if (host->flags & SDHCI_USE_ADMA) { 1183 sdhci_adma_table_pre(host, data, sg_cnt); 1184 sdhci_set_adma_addr(host, host->adma_addr); 1185 } else { 1186 WARN_ON(sg_cnt != 1); 1187 sdhci_set_sdma_addr(host, sdhci_sdma_address(host)); 1188 } 1189 } 1190 1191 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 1192 int flags; 1193 1194 flags = SG_MITER_ATOMIC; 1195 if (host->data->flags & MMC_DATA_READ) 1196 flags |= SG_MITER_TO_SG; 1197 else 1198 flags |= SG_MITER_FROM_SG; 1199 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1200 host->blocks = data->blocks; 1201 } 1202 1203 sdhci_set_transfer_irqs(host); 1204 1205 sdhci_set_block_info(host, data); 1206 } 1207 1208 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA) 1209 1210 static int sdhci_external_dma_init(struct sdhci_host *host) 1211 { 1212 int ret = 0; 1213 struct mmc_host *mmc = host->mmc; 1214 1215 host->tx_chan = dma_request_chan(mmc_dev(mmc), "tx"); 1216 if (IS_ERR(host->tx_chan)) { 1217 ret = PTR_ERR(host->tx_chan); 1218 if (ret != -EPROBE_DEFER) 1219 pr_warn("Failed to request TX DMA channel.\n"); 1220 host->tx_chan = NULL; 1221 return ret; 1222 } 1223 1224 host->rx_chan = dma_request_chan(mmc_dev(mmc), "rx"); 1225 if (IS_ERR(host->rx_chan)) { 1226 if (host->tx_chan) { 1227 dma_release_channel(host->tx_chan); 1228 host->tx_chan = NULL; 1229 } 1230 1231 ret = PTR_ERR(host->rx_chan); 1232 if (ret != -EPROBE_DEFER) 1233 pr_warn("Failed to request RX DMA channel.\n"); 1234 host->rx_chan = NULL; 1235 } 1236 1237 return ret; 1238 } 1239 1240 static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, 1241 struct mmc_data *data) 1242 { 1243 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; 1244 } 1245 1246 static int sdhci_external_dma_setup(struct sdhci_host *host, 1247 struct mmc_command *cmd) 1248 { 1249 int ret, i; 1250 enum dma_transfer_direction dir; 1251 struct dma_async_tx_descriptor *desc; 1252 struct mmc_data *data = cmd->data; 1253 struct dma_chan *chan; 1254 struct dma_slave_config cfg; 1255 dma_cookie_t cookie; 1256 int sg_cnt; 1257 1258 if (!host->mapbase) 1259 return -EINVAL; 1260 1261 memset(&cfg, 0, sizeof(cfg)); 1262 cfg.src_addr = host->mapbase + SDHCI_BUFFER; 1263 cfg.dst_addr = host->mapbase + SDHCI_BUFFER; 1264 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1265 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1266 cfg.src_maxburst = data->blksz / 4; 1267 cfg.dst_maxburst = data->blksz / 4; 1268 1269 /* Sanity check: all the SG entries must be aligned by block size. */ 1270 for (i = 0; i < data->sg_len; i++) { 1271 if ((data->sg + i)->length % data->blksz) 1272 return -EINVAL; 1273 } 1274 1275 chan = sdhci_external_dma_channel(host, data); 1276 1277 ret = dmaengine_slave_config(chan, &cfg); 1278 if (ret) 1279 return ret; 1280 1281 sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1282 if (sg_cnt <= 0) 1283 return -EINVAL; 1284 1285 dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 1286 desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir, 1287 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1288 if (!desc) 1289 return -EINVAL; 1290 1291 desc->callback = NULL; 1292 desc->callback_param = NULL; 1293 1294 cookie = dmaengine_submit(desc); 1295 if (dma_submit_error(cookie)) 1296 ret = cookie; 1297 1298 return ret; 1299 } 1300 1301 static void sdhci_external_dma_release(struct sdhci_host *host) 1302 { 1303 if (host->tx_chan) { 1304 dma_release_channel(host->tx_chan); 1305 host->tx_chan = NULL; 1306 } 1307 1308 if (host->rx_chan) { 1309 dma_release_channel(host->rx_chan); 1310 host->rx_chan = NULL; 1311 } 1312 1313 sdhci_switch_external_dma(host, false); 1314 } 1315 1316 static void __sdhci_external_dma_prepare_data(struct sdhci_host *host, 1317 struct mmc_command *cmd) 1318 { 1319 struct mmc_data *data = cmd->data; 1320 1321 sdhci_initialize_data(host, data); 1322 1323 host->flags |= SDHCI_REQ_USE_DMA; 1324 sdhci_set_transfer_irqs(host); 1325 1326 sdhci_set_block_info(host, data); 1327 } 1328 1329 static void sdhci_external_dma_prepare_data(struct sdhci_host *host, 1330 struct mmc_command *cmd) 1331 { 1332 if (!sdhci_external_dma_setup(host, cmd)) { 1333 __sdhci_external_dma_prepare_data(host, cmd); 1334 } else { 1335 sdhci_external_dma_release(host); 1336 pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n", 1337 mmc_hostname(host->mmc)); 1338 sdhci_prepare_data(host, cmd); 1339 } 1340 } 1341 1342 static void sdhci_external_dma_pre_transfer(struct sdhci_host *host, 1343 struct mmc_command *cmd) 1344 { 1345 struct dma_chan *chan; 1346 1347 if (!cmd->data) 1348 return; 1349 1350 chan = sdhci_external_dma_channel(host, cmd->data); 1351 if (chan) 1352 dma_async_issue_pending(chan); 1353 } 1354 1355 #else 1356 1357 static inline int sdhci_external_dma_init(struct sdhci_host *host) 1358 { 1359 return -EOPNOTSUPP; 1360 } 1361 1362 static inline void sdhci_external_dma_release(struct sdhci_host *host) 1363 { 1364 } 1365 1366 static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host, 1367 struct mmc_command *cmd) 1368 { 1369 /* This should never happen */ 1370 WARN_ON_ONCE(1); 1371 } 1372 1373 static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host, 1374 struct mmc_command *cmd) 1375 { 1376 } 1377 1378 static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, 1379 struct mmc_data *data) 1380 { 1381 return NULL; 1382 } 1383 1384 #endif 1385 1386 void sdhci_switch_external_dma(struct sdhci_host *host, bool en) 1387 { 1388 host->use_external_dma = en; 1389 } 1390 EXPORT_SYMBOL_GPL(sdhci_switch_external_dma); 1391 1392 static inline bool sdhci_auto_cmd12(struct sdhci_host *host, 1393 struct mmc_request *mrq) 1394 { 1395 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) && 1396 !mrq->cap_cmd_during_tfr; 1397 } 1398 1399 static inline bool sdhci_auto_cmd23(struct sdhci_host *host, 1400 struct mmc_request *mrq) 1401 { 1402 return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23); 1403 } 1404 1405 static inline bool sdhci_manual_cmd23(struct sdhci_host *host, 1406 struct mmc_request *mrq) 1407 { 1408 return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23); 1409 } 1410 1411 static inline void sdhci_auto_cmd_select(struct sdhci_host *host, 1412 struct mmc_command *cmd, 1413 u16 *mode) 1414 { 1415 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) && 1416 (cmd->opcode != SD_IO_RW_EXTENDED); 1417 bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq); 1418 u16 ctrl2; 1419 1420 /* 1421 * In case of Version 4.10 or later, use of 'Auto CMD Auto 1422 * Select' is recommended rather than use of 'Auto CMD12 1423 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode 1424 * here because some controllers (e.g sdhci-of-dwmshc) expect it. 1425 */ 1426 if (host->version >= SDHCI_SPEC_410 && host->v4_mode && 1427 (use_cmd12 || use_cmd23)) { 1428 *mode |= SDHCI_TRNS_AUTO_SEL; 1429 1430 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1431 if (use_cmd23) 1432 ctrl2 |= SDHCI_CMD23_ENABLE; 1433 else 1434 ctrl2 &= ~SDHCI_CMD23_ENABLE; 1435 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 1436 1437 return; 1438 } 1439 1440 /* 1441 * If we are sending CMD23, CMD12 never gets sent 1442 * on successful completion (so no Auto-CMD12). 1443 */ 1444 if (use_cmd12) 1445 *mode |= SDHCI_TRNS_AUTO_CMD12; 1446 else if (use_cmd23) 1447 *mode |= SDHCI_TRNS_AUTO_CMD23; 1448 } 1449 1450 static void sdhci_set_transfer_mode(struct sdhci_host *host, 1451 struct mmc_command *cmd) 1452 { 1453 u16 mode = 0; 1454 struct mmc_data *data = cmd->data; 1455 1456 if (data == NULL) { 1457 if (host->quirks2 & 1458 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) { 1459 /* must not clear SDHCI_TRANSFER_MODE when tuning */ 1460 if (!mmc_op_tuning(cmd->opcode)) 1461 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); 1462 } else { 1463 /* clear Auto CMD settings for no data CMDs */ 1464 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); 1465 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | 1466 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE); 1467 } 1468 return; 1469 } 1470 1471 WARN_ON(!host->data); 1472 1473 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) 1474 mode = SDHCI_TRNS_BLK_CNT_EN; 1475 1476 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { 1477 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI; 1478 sdhci_auto_cmd_select(host, cmd, &mode); 1479 if (sdhci_auto_cmd23(host, cmd->mrq)) 1480 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2); 1481 } 1482 1483 if (data->flags & MMC_DATA_READ) 1484 mode |= SDHCI_TRNS_READ; 1485 if (host->flags & SDHCI_REQ_USE_DMA) 1486 mode |= SDHCI_TRNS_DMA; 1487 1488 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 1489 } 1490 1491 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) 1492 { 1493 return (!(host->flags & SDHCI_DEVICE_DEAD) && 1494 ((mrq->cmd && mrq->cmd->error) || 1495 (mrq->sbc && mrq->sbc->error) || 1496 (mrq->data && mrq->data->stop && mrq->data->stop->error) || 1497 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); 1498 } 1499 1500 static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq) 1501 { 1502 int i; 1503 1504 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1505 if (host->mrqs_done[i] == mrq) { 1506 WARN_ON(1); 1507 return; 1508 } 1509 } 1510 1511 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1512 if (!host->mrqs_done[i]) { 1513 host->mrqs_done[i] = mrq; 1514 break; 1515 } 1516 } 1517 1518 WARN_ON(i >= SDHCI_MAX_MRQS); 1519 } 1520 1521 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1522 { 1523 if (host->cmd && host->cmd->mrq == mrq) 1524 host->cmd = NULL; 1525 1526 if (host->data_cmd && host->data_cmd->mrq == mrq) 1527 host->data_cmd = NULL; 1528 1529 if (host->deferred_cmd && host->deferred_cmd->mrq == mrq) 1530 host->deferred_cmd = NULL; 1531 1532 if (host->data && host->data->mrq == mrq) 1533 host->data = NULL; 1534 1535 if (sdhci_needs_reset(host, mrq)) 1536 host->pending_reset = true; 1537 1538 sdhci_set_mrq_done(host, mrq); 1539 1540 sdhci_del_timer(host, mrq); 1541 1542 if (!sdhci_has_requests(host)) 1543 sdhci_led_deactivate(host); 1544 } 1545 1546 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1547 { 1548 __sdhci_finish_mrq(host, mrq); 1549 1550 queue_work(host->complete_wq, &host->complete_work); 1551 } 1552 1553 static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout) 1554 { 1555 struct mmc_command *data_cmd = host->data_cmd; 1556 struct mmc_data *data = host->data; 1557 1558 host->data = NULL; 1559 host->data_cmd = NULL; 1560 1561 /* 1562 * The controller needs a reset of internal state machines upon error 1563 * conditions. 1564 */ 1565 if (data->error) { 1566 if (!host->cmd || host->cmd == data_cmd) 1567 sdhci_reset_for(host, REQUEST_ERROR); 1568 else 1569 sdhci_reset_for(host, REQUEST_ERROR_DATA_ONLY); 1570 } 1571 1572 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) == 1573 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) 1574 sdhci_adma_table_post(host, data); 1575 1576 /* 1577 * The specification states that the block count register must 1578 * be updated, but it does not specify at what point in the 1579 * data flow. That makes the register entirely useless to read 1580 * back so we have to assume that nothing made it to the card 1581 * in the event of an error. 1582 */ 1583 if (data->error) 1584 data->bytes_xfered = 0; 1585 else 1586 data->bytes_xfered = data->blksz * data->blocks; 1587 1588 /* 1589 * Need to send CMD12 if - 1590 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23) 1591 * b) error in multiblock transfer 1592 */ 1593 if (data->stop && 1594 ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) || 1595 data->error)) { 1596 /* 1597 * 'cap_cmd_during_tfr' request must not use the command line 1598 * after mmc_command_done() has been called. It is upper layer's 1599 * responsibility to send the stop command if required. 1600 */ 1601 if (data->mrq->cap_cmd_during_tfr) { 1602 __sdhci_finish_mrq(host, data->mrq); 1603 } else { 1604 /* Avoid triggering warning in sdhci_send_command() */ 1605 host->cmd = NULL; 1606 if (!sdhci_send_command(host, data->stop)) { 1607 if (sw_data_timeout) { 1608 /* 1609 * This is anyway a sw data timeout, so 1610 * give up now. 1611 */ 1612 data->stop->error = -EIO; 1613 __sdhci_finish_mrq(host, data->mrq); 1614 } else { 1615 WARN_ON(host->deferred_cmd); 1616 host->deferred_cmd = data->stop; 1617 } 1618 } 1619 } 1620 } else { 1621 __sdhci_finish_mrq(host, data->mrq); 1622 } 1623 } 1624 1625 static void sdhci_finish_data(struct sdhci_host *host) 1626 { 1627 __sdhci_finish_data(host, false); 1628 } 1629 1630 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 1631 { 1632 int flags; 1633 u32 mask; 1634 unsigned long timeout; 1635 1636 WARN_ON(host->cmd); 1637 1638 /* Initially, a command has no error */ 1639 cmd->error = 0; 1640 1641 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && 1642 cmd->opcode == MMC_STOP_TRANSMISSION) 1643 cmd->flags |= MMC_RSP_BUSY; 1644 1645 mask = SDHCI_CMD_INHIBIT; 1646 if (sdhci_data_line_cmd(cmd)) 1647 mask |= SDHCI_DATA_INHIBIT; 1648 1649 /* We shouldn't wait for data inihibit for stop commands, even 1650 though they might use busy signaling */ 1651 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop)) 1652 mask &= ~SDHCI_DATA_INHIBIT; 1653 1654 if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) 1655 return false; 1656 1657 host->cmd = cmd; 1658 host->data_timeout = 0; 1659 if (sdhci_data_line_cmd(cmd)) { 1660 WARN_ON(host->data_cmd); 1661 host->data_cmd = cmd; 1662 sdhci_set_timeout(host, cmd); 1663 } 1664 1665 if (cmd->data) { 1666 if (host->use_external_dma) 1667 sdhci_external_dma_prepare_data(host, cmd); 1668 else 1669 sdhci_prepare_data(host, cmd); 1670 } 1671 1672 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 1673 1674 sdhci_set_transfer_mode(host, cmd); 1675 1676 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 1677 WARN_ONCE(1, "Unsupported response type!\n"); 1678 /* 1679 * This does not happen in practice because 136-bit response 1680 * commands never have busy waiting, so rather than complicate 1681 * the error path, just remove busy waiting and continue. 1682 */ 1683 cmd->flags &= ~MMC_RSP_BUSY; 1684 } 1685 1686 if (!(cmd->flags & MMC_RSP_PRESENT)) 1687 flags = SDHCI_CMD_RESP_NONE; 1688 else if (cmd->flags & MMC_RSP_136) 1689 flags = SDHCI_CMD_RESP_LONG; 1690 else if (cmd->flags & MMC_RSP_BUSY) 1691 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1692 else 1693 flags = SDHCI_CMD_RESP_SHORT; 1694 1695 if (cmd->flags & MMC_RSP_CRC) 1696 flags |= SDHCI_CMD_CRC; 1697 if (cmd->flags & MMC_RSP_OPCODE) 1698 flags |= SDHCI_CMD_INDEX; 1699 1700 /* CMD19 is special in that the Data Present Select should be set */ 1701 if (cmd->data || mmc_op_tuning(cmd->opcode)) 1702 flags |= SDHCI_CMD_DATA; 1703 1704 timeout = jiffies; 1705 if (host->data_timeout) 1706 timeout += nsecs_to_jiffies(host->data_timeout); 1707 else if (!cmd->data && cmd->busy_timeout > 9000) 1708 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; 1709 else 1710 timeout += 10 * HZ; 1711 sdhci_mod_timer(host, cmd->mrq, timeout); 1712 1713 if (host->use_external_dma) 1714 sdhci_external_dma_pre_transfer(host, cmd); 1715 1716 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 1717 1718 return true; 1719 } 1720 1721 static bool sdhci_present_error(struct sdhci_host *host, 1722 struct mmc_command *cmd, bool present) 1723 { 1724 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1725 cmd->error = -ENOMEDIUM; 1726 return true; 1727 } 1728 1729 return false; 1730 } 1731 1732 static bool sdhci_send_command_retry(struct sdhci_host *host, 1733 struct mmc_command *cmd, 1734 unsigned long flags) 1735 __releases(host->lock) 1736 __acquires(host->lock) 1737 { 1738 struct mmc_command *deferred_cmd = host->deferred_cmd; 1739 int timeout = 10; /* Approx. 10 ms */ 1740 bool present; 1741 1742 while (!sdhci_send_command(host, cmd)) { 1743 if (!timeout--) { 1744 pr_err("%s: Controller never released inhibit bit(s).\n", 1745 mmc_hostname(host->mmc)); 1746 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 1747 sdhci_dumpregs(host); 1748 cmd->error = -EIO; 1749 return false; 1750 } 1751 1752 spin_unlock_irqrestore(&host->lock, flags); 1753 1754 usleep_range(1000, 1250); 1755 1756 present = host->mmc->ops->get_cd(host->mmc); 1757 1758 spin_lock_irqsave(&host->lock, flags); 1759 1760 /* A deferred command might disappear, handle that */ 1761 if (cmd == deferred_cmd && cmd != host->deferred_cmd) 1762 return true; 1763 1764 if (sdhci_present_error(host, cmd, present)) 1765 return false; 1766 } 1767 1768 if (cmd == host->deferred_cmd) 1769 host->deferred_cmd = NULL; 1770 1771 return true; 1772 } 1773 1774 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd) 1775 { 1776 int i, reg; 1777 1778 for (i = 0; i < 4; i++) { 1779 reg = SDHCI_RESPONSE + (3 - i) * 4; 1780 cmd->resp[i] = sdhci_readl(host, reg); 1781 } 1782 1783 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC) 1784 return; 1785 1786 /* CRC is stripped so we need to do some shifting */ 1787 for (i = 0; i < 4; i++) { 1788 cmd->resp[i] <<= 8; 1789 if (i != 3) 1790 cmd->resp[i] |= cmd->resp[i + 1] >> 24; 1791 } 1792 } 1793 1794 static void sdhci_finish_command(struct sdhci_host *host) 1795 { 1796 struct mmc_command *cmd = host->cmd; 1797 1798 host->cmd = NULL; 1799 1800 if (cmd->flags & MMC_RSP_PRESENT) { 1801 if (cmd->flags & MMC_RSP_136) { 1802 sdhci_read_rsp_136(host, cmd); 1803 } else { 1804 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); 1805 } 1806 } 1807 1808 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd) 1809 mmc_command_done(host->mmc, cmd->mrq); 1810 1811 /* 1812 * The host can send and interrupt when the busy state has 1813 * ended, allowing us to wait without wasting CPU cycles. 1814 * The busy signal uses DAT0 so this is similar to waiting 1815 * for data to complete. 1816 * 1817 * Note: The 1.0 specification is a bit ambiguous about this 1818 * feature so there might be some problems with older 1819 * controllers. 1820 */ 1821 if (cmd->flags & MMC_RSP_BUSY) { 1822 if (cmd->data) { 1823 DBG("Cannot wait for busy signal when also doing a data transfer"); 1824 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && 1825 cmd == host->data_cmd) { 1826 /* Command complete before busy is ended */ 1827 return; 1828 } 1829 } 1830 1831 /* Finished CMD23, now send actual command. */ 1832 if (cmd == cmd->mrq->sbc) { 1833 if (!sdhci_send_command(host, cmd->mrq->cmd)) { 1834 WARN_ON(host->deferred_cmd); 1835 host->deferred_cmd = cmd->mrq->cmd; 1836 } 1837 } else { 1838 1839 /* Processed actual command. */ 1840 if (host->data && host->data_early) 1841 sdhci_finish_data(host); 1842 1843 if (!cmd->data) 1844 __sdhci_finish_mrq(host, cmd->mrq); 1845 } 1846 } 1847 1848 static u16 sdhci_get_preset_value(struct sdhci_host *host) 1849 { 1850 u16 preset = 0; 1851 1852 switch (host->timing) { 1853 case MMC_TIMING_MMC_HS: 1854 case MMC_TIMING_SD_HS: 1855 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED); 1856 break; 1857 case MMC_TIMING_UHS_SDR12: 1858 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1859 break; 1860 case MMC_TIMING_UHS_SDR25: 1861 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); 1862 break; 1863 case MMC_TIMING_UHS_SDR50: 1864 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); 1865 break; 1866 case MMC_TIMING_UHS_SDR104: 1867 case MMC_TIMING_MMC_HS200: 1868 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 1869 break; 1870 case MMC_TIMING_UHS_DDR50: 1871 case MMC_TIMING_MMC_DDR52: 1872 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); 1873 break; 1874 case MMC_TIMING_MMC_HS400: 1875 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400); 1876 break; 1877 default: 1878 pr_warn("%s: Invalid UHS-I mode selected\n", 1879 mmc_hostname(host->mmc)); 1880 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1881 break; 1882 } 1883 return preset; 1884 } 1885 1886 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock, 1887 unsigned int *actual_clock) 1888 { 1889 int div = 0; /* Initialized for compiler warning */ 1890 int real_div = div, clk_mul = 1; 1891 u16 clk = 0; 1892 bool switch_base_clk = false; 1893 1894 if (host->version >= SDHCI_SPEC_300) { 1895 if (host->preset_enabled) { 1896 u16 pre_val; 1897 1898 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1899 pre_val = sdhci_get_preset_value(host); 1900 div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val); 1901 if (host->clk_mul && 1902 (pre_val & SDHCI_PRESET_CLKGEN_SEL)) { 1903 clk = SDHCI_PROG_CLOCK_MODE; 1904 real_div = div + 1; 1905 clk_mul = host->clk_mul; 1906 } else { 1907 real_div = max_t(int, 1, div << 1); 1908 } 1909 goto clock_set; 1910 } 1911 1912 /* 1913 * Check if the Host Controller supports Programmable Clock 1914 * Mode. 1915 */ 1916 if (host->clk_mul) { 1917 for (div = 1; div <= 1024; div++) { 1918 if ((host->max_clk * host->clk_mul / div) 1919 <= clock) 1920 break; 1921 } 1922 if ((host->max_clk * host->clk_mul / div) <= clock) { 1923 /* 1924 * Set Programmable Clock Mode in the Clock 1925 * Control register. 1926 */ 1927 clk = SDHCI_PROG_CLOCK_MODE; 1928 real_div = div; 1929 clk_mul = host->clk_mul; 1930 div--; 1931 } else { 1932 /* 1933 * Divisor can be too small to reach clock 1934 * speed requirement. Then use the base clock. 1935 */ 1936 switch_base_clk = true; 1937 } 1938 } 1939 1940 if (!host->clk_mul || switch_base_clk) { 1941 /* Version 3.00 divisors must be a multiple of 2. */ 1942 if (host->max_clk <= clock) 1943 div = 1; 1944 else { 1945 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; 1946 div += 2) { 1947 if ((host->max_clk / div) <= clock) 1948 break; 1949 } 1950 } 1951 real_div = div; 1952 div >>= 1; 1953 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN) 1954 && !div && host->max_clk <= 25000000) 1955 div = 1; 1956 } 1957 } else { 1958 /* Version 2.00 divisors must be a power of 2. */ 1959 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 1960 if ((host->max_clk / div) <= clock) 1961 break; 1962 } 1963 real_div = div; 1964 div >>= 1; 1965 } 1966 1967 clock_set: 1968 if (real_div) 1969 *actual_clock = (host->max_clk * clk_mul) / real_div; 1970 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 1971 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 1972 << SDHCI_DIVIDER_HI_SHIFT; 1973 1974 return clk; 1975 } 1976 EXPORT_SYMBOL_GPL(sdhci_calc_clk); 1977 1978 void sdhci_enable_clk(struct sdhci_host *host, u16 clk) 1979 { 1980 ktime_t timeout; 1981 1982 clk |= SDHCI_CLOCK_INT_EN; 1983 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1984 1985 /* Wait max 150 ms */ 1986 timeout = ktime_add_ms(ktime_get(), 150); 1987 while (1) { 1988 bool timedout = ktime_after(ktime_get(), timeout); 1989 1990 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1991 if (clk & SDHCI_CLOCK_INT_STABLE) 1992 break; 1993 if (timedout) { 1994 pr_err("%s: Internal clock never stabilised.\n", 1995 mmc_hostname(host->mmc)); 1996 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 1997 sdhci_dumpregs(host); 1998 return; 1999 } 2000 udelay(10); 2001 } 2002 2003 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) { 2004 clk |= SDHCI_CLOCK_PLL_EN; 2005 clk &= ~SDHCI_CLOCK_INT_STABLE; 2006 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2007 2008 /* Wait max 150 ms */ 2009 timeout = ktime_add_ms(ktime_get(), 150); 2010 while (1) { 2011 bool timedout = ktime_after(ktime_get(), timeout); 2012 2013 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2014 if (clk & SDHCI_CLOCK_INT_STABLE) 2015 break; 2016 if (timedout) { 2017 pr_err("%s: PLL clock never stabilised.\n", 2018 mmc_hostname(host->mmc)); 2019 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 2020 sdhci_dumpregs(host); 2021 return; 2022 } 2023 udelay(10); 2024 } 2025 } 2026 2027 clk |= SDHCI_CLOCK_CARD_EN; 2028 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2029 } 2030 EXPORT_SYMBOL_GPL(sdhci_enable_clk); 2031 2032 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 2033 { 2034 u16 clk; 2035 2036 host->mmc->actual_clock = 0; 2037 2038 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 2039 2040 if (clock == 0) 2041 return; 2042 2043 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); 2044 sdhci_enable_clk(host, clk); 2045 } 2046 EXPORT_SYMBOL_GPL(sdhci_set_clock); 2047 2048 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode, 2049 unsigned short vdd) 2050 { 2051 struct mmc_host *mmc = host->mmc; 2052 2053 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 2054 2055 if (mode != MMC_POWER_OFF) 2056 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); 2057 else 2058 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2059 } 2060 2061 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, 2062 unsigned short vdd) 2063 { 2064 u8 pwr = 0; 2065 2066 if (mode != MMC_POWER_OFF) { 2067 switch (1 << vdd) { 2068 case MMC_VDD_165_195: 2069 /* 2070 * Without a regulator, SDHCI does not support 2.0v 2071 * so we only get here if the driver deliberately 2072 * added the 2.0v range to ocr_avail. Map it to 1.8v 2073 * for the purpose of turning on the power. 2074 */ 2075 case MMC_VDD_20_21: 2076 pwr = SDHCI_POWER_180; 2077 break; 2078 case MMC_VDD_29_30: 2079 case MMC_VDD_30_31: 2080 pwr = SDHCI_POWER_300; 2081 break; 2082 case MMC_VDD_32_33: 2083 case MMC_VDD_33_34: 2084 /* 2085 * 3.4 ~ 3.6V are valid only for those platforms where it's 2086 * known that the voltage range is supported by hardware. 2087 */ 2088 case MMC_VDD_34_35: 2089 case MMC_VDD_35_36: 2090 pwr = SDHCI_POWER_330; 2091 break; 2092 default: 2093 WARN(1, "%s: Invalid vdd %#x\n", 2094 mmc_hostname(host->mmc), vdd); 2095 break; 2096 } 2097 } 2098 2099 if (host->pwr == pwr) 2100 return; 2101 2102 host->pwr = pwr; 2103 2104 if (pwr == 0) { 2105 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2106 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 2107 sdhci_runtime_pm_bus_off(host); 2108 } else { 2109 /* 2110 * Spec says that we should clear the power reg before setting 2111 * a new value. Some controllers don't seem to like this though. 2112 */ 2113 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 2114 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2115 2116 /* 2117 * At least the Marvell CaFe chip gets confused if we set the 2118 * voltage and set turn on power at the same time, so set the 2119 * voltage first. 2120 */ 2121 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 2122 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 2123 2124 pwr |= SDHCI_POWER_ON; 2125 2126 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 2127 2128 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 2129 sdhci_runtime_pm_bus_on(host); 2130 2131 /* 2132 * Some controllers need an extra 10ms delay of 10ms before 2133 * they can apply clock after applying power 2134 */ 2135 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 2136 mdelay(10); 2137 } 2138 } 2139 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg); 2140 2141 void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 2142 unsigned short vdd) 2143 { 2144 if (IS_ERR(host->mmc->supply.vmmc)) 2145 sdhci_set_power_noreg(host, mode, vdd); 2146 else 2147 sdhci_set_power_reg(host, mode, vdd); 2148 } 2149 EXPORT_SYMBOL_GPL(sdhci_set_power); 2150 2151 /* 2152 * Some controllers need to configure a valid bus voltage on their power 2153 * register regardless of whether an external regulator is taking care of power 2154 * supply. This helper function takes care of it if set as the controller's 2155 * sdhci_ops.set_power callback. 2156 */ 2157 void sdhci_set_power_and_bus_voltage(struct sdhci_host *host, 2158 unsigned char mode, 2159 unsigned short vdd) 2160 { 2161 if (!IS_ERR(host->mmc->supply.vmmc)) { 2162 struct mmc_host *mmc = host->mmc; 2163 2164 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 2165 } 2166 sdhci_set_power_noreg(host, mode, vdd); 2167 } 2168 EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage); 2169 2170 /*****************************************************************************\ 2171 * * 2172 * MMC callbacks * 2173 * * 2174 \*****************************************************************************/ 2175 2176 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 2177 { 2178 struct sdhci_host *host = mmc_priv(mmc); 2179 struct mmc_command *cmd; 2180 unsigned long flags; 2181 bool present; 2182 2183 /* Firstly check card presence */ 2184 present = mmc->ops->get_cd(mmc); 2185 2186 spin_lock_irqsave(&host->lock, flags); 2187 2188 sdhci_led_activate(host); 2189 2190 if (sdhci_present_error(host, mrq->cmd, present)) 2191 goto out_finish; 2192 2193 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd; 2194 2195 if (!sdhci_send_command_retry(host, cmd, flags)) 2196 goto out_finish; 2197 2198 spin_unlock_irqrestore(&host->lock, flags); 2199 2200 return; 2201 2202 out_finish: 2203 sdhci_finish_mrq(host, mrq); 2204 spin_unlock_irqrestore(&host->lock, flags); 2205 } 2206 EXPORT_SYMBOL_GPL(sdhci_request); 2207 2208 int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq) 2209 { 2210 struct sdhci_host *host = mmc_priv(mmc); 2211 struct mmc_command *cmd; 2212 unsigned long flags; 2213 int ret = 0; 2214 2215 spin_lock_irqsave(&host->lock, flags); 2216 2217 if (sdhci_present_error(host, mrq->cmd, true)) { 2218 sdhci_finish_mrq(host, mrq); 2219 goto out_finish; 2220 } 2221 2222 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd; 2223 2224 /* 2225 * The HSQ may send a command in interrupt context without polling 2226 * the busy signaling, which means we should return BUSY if controller 2227 * has not released inhibit bits to allow HSQ trying to send request 2228 * again in non-atomic context. So we should not finish this request 2229 * here. 2230 */ 2231 if (!sdhci_send_command(host, cmd)) 2232 ret = -EBUSY; 2233 else 2234 sdhci_led_activate(host); 2235 2236 out_finish: 2237 spin_unlock_irqrestore(&host->lock, flags); 2238 return ret; 2239 } 2240 EXPORT_SYMBOL_GPL(sdhci_request_atomic); 2241 2242 void sdhci_set_bus_width(struct sdhci_host *host, int width) 2243 { 2244 u8 ctrl; 2245 2246 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 2247 if (width == MMC_BUS_WIDTH_8) { 2248 ctrl &= ~SDHCI_CTRL_4BITBUS; 2249 ctrl |= SDHCI_CTRL_8BITBUS; 2250 } else { 2251 if (host->mmc->caps & MMC_CAP_8_BIT_DATA) 2252 ctrl &= ~SDHCI_CTRL_8BITBUS; 2253 if (width == MMC_BUS_WIDTH_4) 2254 ctrl |= SDHCI_CTRL_4BITBUS; 2255 else 2256 ctrl &= ~SDHCI_CTRL_4BITBUS; 2257 } 2258 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2259 } 2260 EXPORT_SYMBOL_GPL(sdhci_set_bus_width); 2261 2262 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 2263 { 2264 u16 ctrl_2; 2265 2266 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2267 /* Select Bus Speed Mode for host */ 2268 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 2269 if ((timing == MMC_TIMING_MMC_HS200) || 2270 (timing == MMC_TIMING_UHS_SDR104)) 2271 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 2272 else if (timing == MMC_TIMING_UHS_SDR12) 2273 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 2274 else if (timing == MMC_TIMING_UHS_SDR25) 2275 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 2276 else if (timing == MMC_TIMING_UHS_SDR50) 2277 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 2278 else if ((timing == MMC_TIMING_UHS_DDR50) || 2279 (timing == MMC_TIMING_MMC_DDR52)) 2280 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 2281 else if (timing == MMC_TIMING_MMC_HS400) 2282 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ 2283 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 2284 } 2285 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); 2286 2287 static bool sdhci_timing_has_preset(unsigned char timing) 2288 { 2289 switch (timing) { 2290 case MMC_TIMING_UHS_SDR12: 2291 case MMC_TIMING_UHS_SDR25: 2292 case MMC_TIMING_UHS_SDR50: 2293 case MMC_TIMING_UHS_SDR104: 2294 case MMC_TIMING_UHS_DDR50: 2295 case MMC_TIMING_MMC_DDR52: 2296 return true; 2297 } 2298 return false; 2299 } 2300 2301 static bool sdhci_preset_needed(struct sdhci_host *host, unsigned char timing) 2302 { 2303 return !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && 2304 sdhci_timing_has_preset(timing); 2305 } 2306 2307 static bool sdhci_presetable_values_change(struct sdhci_host *host, struct mmc_ios *ios) 2308 { 2309 /* 2310 * Preset Values are: Driver Strength, Clock Generator and SDCLK/RCLK 2311 * Frequency. Check if preset values need to be enabled, or the Driver 2312 * Strength needs updating. Note, clock changes are handled separately. 2313 */ 2314 return !host->preset_enabled && 2315 (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type); 2316 } 2317 2318 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 2319 { 2320 struct sdhci_host *host = mmc_priv(mmc); 2321 bool reinit_uhs = host->reinit_uhs; 2322 bool turning_on_clk = false; 2323 u8 ctrl; 2324 2325 host->reinit_uhs = false; 2326 2327 if (ios->power_mode == MMC_POWER_UNDEFINED) 2328 return; 2329 2330 if (host->flags & SDHCI_DEVICE_DEAD) { 2331 if (!IS_ERR(mmc->supply.vmmc) && 2332 ios->power_mode == MMC_POWER_OFF) 2333 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 2334 return; 2335 } 2336 2337 /* 2338 * Reset the chip on each power off. 2339 * Should clear out any weird states. 2340 */ 2341 if (ios->power_mode == MMC_POWER_OFF) { 2342 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 2343 sdhci_reinit(host); 2344 } 2345 2346 if (host->version >= SDHCI_SPEC_300 && 2347 (ios->power_mode == MMC_POWER_UP) && 2348 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) 2349 sdhci_enable_preset_value(host, false); 2350 2351 if (!ios->clock || ios->clock != host->clock) { 2352 turning_on_clk = ios->clock && !host->clock; 2353 2354 host->ops->set_clock(host, ios->clock); 2355 host->clock = ios->clock; 2356 2357 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && 2358 host->clock) { 2359 host->timeout_clk = mmc->actual_clock ? 2360 mmc->actual_clock / 1000 : 2361 host->clock / 1000; 2362 mmc->max_busy_timeout = 2363 host->ops->get_max_timeout_count ? 2364 host->ops->get_max_timeout_count(host) : 2365 1 << 27; 2366 mmc->max_busy_timeout /= host->timeout_clk; 2367 } 2368 } 2369 2370 if (host->ops->set_power) 2371 host->ops->set_power(host, ios->power_mode, ios->vdd); 2372 else 2373 sdhci_set_power(host, ios->power_mode, ios->vdd); 2374 2375 if (host->ops->platform_send_init_74_clocks) 2376 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 2377 2378 host->ops->set_bus_width(host, ios->bus_width); 2379 2380 /* 2381 * Special case to avoid multiple clock changes during voltage 2382 * switching. 2383 */ 2384 if (!reinit_uhs && 2385 turning_on_clk && 2386 host->timing == ios->timing && 2387 host->version >= SDHCI_SPEC_300 && 2388 !sdhci_presetable_values_change(host, ios)) 2389 return; 2390 2391 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 2392 2393 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) { 2394 if (ios->timing == MMC_TIMING_SD_HS || 2395 ios->timing == MMC_TIMING_MMC_HS || 2396 ios->timing == MMC_TIMING_MMC_HS400 || 2397 ios->timing == MMC_TIMING_MMC_HS200 || 2398 ios->timing == MMC_TIMING_MMC_DDR52 || 2399 ios->timing == MMC_TIMING_UHS_SDR50 || 2400 ios->timing == MMC_TIMING_UHS_SDR104 || 2401 ios->timing == MMC_TIMING_UHS_DDR50 || 2402 ios->timing == MMC_TIMING_UHS_SDR25) 2403 ctrl |= SDHCI_CTRL_HISPD; 2404 else 2405 ctrl &= ~SDHCI_CTRL_HISPD; 2406 } 2407 2408 if (host->version >= SDHCI_SPEC_300) { 2409 u16 clk, ctrl_2; 2410 2411 /* 2412 * According to SDHCI Spec v3.00, if the Preset Value 2413 * Enable in the Host Control 2 register is set, we 2414 * need to reset SD Clock Enable before changing High 2415 * Speed Enable to avoid generating clock glitches. 2416 */ 2417 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2418 if (clk & SDHCI_CLOCK_CARD_EN) { 2419 clk &= ~SDHCI_CLOCK_CARD_EN; 2420 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2421 } 2422 2423 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2424 2425 if (!host->preset_enabled) { 2426 /* 2427 * We only need to set Driver Strength if the 2428 * preset value enable is not set. 2429 */ 2430 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2431 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; 2432 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) 2433 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; 2434 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B) 2435 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 2436 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) 2437 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; 2438 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D) 2439 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D; 2440 else { 2441 pr_warn("%s: invalid driver type, default to driver type B\n", 2442 mmc_hostname(mmc)); 2443 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 2444 } 2445 2446 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 2447 host->drv_type = ios->drv_type; 2448 } 2449 2450 host->ops->set_uhs_signaling(host, ios->timing); 2451 host->timing = ios->timing; 2452 2453 if (sdhci_preset_needed(host, ios->timing)) { 2454 u16 preset; 2455 2456 sdhci_enable_preset_value(host, true); 2457 preset = sdhci_get_preset_value(host); 2458 ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK, 2459 preset); 2460 host->drv_type = ios->drv_type; 2461 } 2462 2463 /* Re-enable SD Clock */ 2464 host->ops->set_clock(host, host->clock); 2465 } else 2466 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2467 } 2468 EXPORT_SYMBOL_GPL(sdhci_set_ios); 2469 2470 static int sdhci_get_cd(struct mmc_host *mmc) 2471 { 2472 struct sdhci_host *host = mmc_priv(mmc); 2473 int gpio_cd = mmc_gpio_get_cd(mmc); 2474 2475 if (host->flags & SDHCI_DEVICE_DEAD) 2476 return 0; 2477 2478 /* If nonremovable, assume that the card is always present. */ 2479 if (!mmc_card_is_removable(mmc)) 2480 return 1; 2481 2482 /* 2483 * Try slot gpio detect, if defined it take precedence 2484 * over build in controller functionality 2485 */ 2486 if (gpio_cd >= 0) 2487 return !!gpio_cd; 2488 2489 /* If polling, assume that the card is always present. */ 2490 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 2491 return 1; 2492 2493 /* Host native card detect */ 2494 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 2495 } 2496 2497 int sdhci_get_cd_nogpio(struct mmc_host *mmc) 2498 { 2499 struct sdhci_host *host = mmc_priv(mmc); 2500 unsigned long flags; 2501 int ret = 0; 2502 2503 spin_lock_irqsave(&host->lock, flags); 2504 2505 if (host->flags & SDHCI_DEVICE_DEAD) 2506 goto out; 2507 2508 ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 2509 out: 2510 spin_unlock_irqrestore(&host->lock, flags); 2511 2512 return ret; 2513 } 2514 EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio); 2515 2516 int sdhci_get_ro(struct mmc_host *mmc) 2517 { 2518 struct sdhci_host *host = mmc_priv(mmc); 2519 bool allow_invert = false; 2520 int is_readonly; 2521 2522 if (host->flags & SDHCI_DEVICE_DEAD) { 2523 is_readonly = 0; 2524 } else if (host->ops->get_ro) { 2525 is_readonly = host->ops->get_ro(host); 2526 } else if (mmc_can_gpio_ro(mmc)) { 2527 is_readonly = mmc_gpio_get_ro(mmc); 2528 /* Do not invert twice */ 2529 allow_invert = !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH); 2530 } else { 2531 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) 2532 & SDHCI_WRITE_PROTECT); 2533 allow_invert = true; 2534 } 2535 2536 if (is_readonly >= 0 && 2537 allow_invert && 2538 (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT)) 2539 is_readonly = !is_readonly; 2540 2541 return is_readonly; 2542 } 2543 EXPORT_SYMBOL_GPL(sdhci_get_ro); 2544 2545 static void sdhci_hw_reset(struct mmc_host *mmc) 2546 { 2547 struct sdhci_host *host = mmc_priv(mmc); 2548 2549 if (host->ops && host->ops->hw_reset) 2550 host->ops->hw_reset(host); 2551 } 2552 2553 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) 2554 { 2555 if (!(host->flags & SDHCI_DEVICE_DEAD)) { 2556 if (enable) 2557 host->ier |= SDHCI_INT_CARD_INT; 2558 else 2559 host->ier &= ~SDHCI_INT_CARD_INT; 2560 2561 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2562 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2563 } 2564 } 2565 2566 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 2567 { 2568 struct sdhci_host *host = mmc_priv(mmc); 2569 unsigned long flags; 2570 2571 if (enable) 2572 pm_runtime_get_noresume(mmc_dev(mmc)); 2573 2574 spin_lock_irqsave(&host->lock, flags); 2575 sdhci_enable_sdio_irq_nolock(host, enable); 2576 spin_unlock_irqrestore(&host->lock, flags); 2577 2578 if (!enable) 2579 pm_runtime_put_noidle(mmc_dev(mmc)); 2580 } 2581 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq); 2582 2583 static void sdhci_ack_sdio_irq(struct mmc_host *mmc) 2584 { 2585 struct sdhci_host *host = mmc_priv(mmc); 2586 unsigned long flags; 2587 2588 spin_lock_irqsave(&host->lock, flags); 2589 sdhci_enable_sdio_irq_nolock(host, true); 2590 spin_unlock_irqrestore(&host->lock, flags); 2591 } 2592 2593 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 2594 struct mmc_ios *ios) 2595 { 2596 struct sdhci_host *host = mmc_priv(mmc); 2597 u16 ctrl; 2598 int ret; 2599 2600 /* 2601 * Signal Voltage Switching is only applicable for Host Controllers 2602 * v3.00 and above. 2603 */ 2604 if (host->version < SDHCI_SPEC_300) 2605 return 0; 2606 2607 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2608 2609 switch (ios->signal_voltage) { 2610 case MMC_SIGNAL_VOLTAGE_330: 2611 if (!(host->flags & SDHCI_SIGNALING_330)) 2612 return -EINVAL; 2613 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 2614 ctrl &= ~SDHCI_CTRL_VDD_180; 2615 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2616 2617 if (!IS_ERR(mmc->supply.vqmmc)) { 2618 ret = mmc_regulator_set_vqmmc(mmc, ios); 2619 if (ret < 0) { 2620 pr_warn("%s: Switching to 3.3V signalling voltage failed\n", 2621 mmc_hostname(mmc)); 2622 return -EIO; 2623 } 2624 } 2625 /* Wait for 5ms */ 2626 usleep_range(5000, 5500); 2627 2628 /* 3.3V regulator output should be stable within 5 ms */ 2629 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2630 if (!(ctrl & SDHCI_CTRL_VDD_180)) 2631 return 0; 2632 2633 pr_warn("%s: 3.3V regulator output did not become stable\n", 2634 mmc_hostname(mmc)); 2635 2636 return -EAGAIN; 2637 case MMC_SIGNAL_VOLTAGE_180: 2638 if (!(host->flags & SDHCI_SIGNALING_180)) 2639 return -EINVAL; 2640 if (!IS_ERR(mmc->supply.vqmmc)) { 2641 ret = mmc_regulator_set_vqmmc(mmc, ios); 2642 if (ret < 0) { 2643 pr_warn("%s: Switching to 1.8V signalling voltage failed\n", 2644 mmc_hostname(mmc)); 2645 return -EIO; 2646 } 2647 } 2648 2649 /* 2650 * Enable 1.8V Signal Enable in the Host Control2 2651 * register 2652 */ 2653 ctrl |= SDHCI_CTRL_VDD_180; 2654 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2655 2656 /* Some controller need to do more when switching */ 2657 if (host->ops->voltage_switch) 2658 host->ops->voltage_switch(host); 2659 2660 /* 1.8V regulator output should be stable within 5 ms */ 2661 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2662 if (ctrl & SDHCI_CTRL_VDD_180) 2663 return 0; 2664 2665 pr_warn("%s: 1.8V regulator output did not become stable\n", 2666 mmc_hostname(mmc)); 2667 2668 return -EAGAIN; 2669 case MMC_SIGNAL_VOLTAGE_120: 2670 if (!(host->flags & SDHCI_SIGNALING_120)) 2671 return -EINVAL; 2672 if (!IS_ERR(mmc->supply.vqmmc)) { 2673 ret = mmc_regulator_set_vqmmc(mmc, ios); 2674 if (ret < 0) { 2675 pr_warn("%s: Switching to 1.2V signalling voltage failed\n", 2676 mmc_hostname(mmc)); 2677 return -EIO; 2678 } 2679 } 2680 return 0; 2681 default: 2682 /* No signal voltage switch required */ 2683 return 0; 2684 } 2685 } 2686 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch); 2687 2688 static int sdhci_card_busy(struct mmc_host *mmc) 2689 { 2690 struct sdhci_host *host = mmc_priv(mmc); 2691 u32 present_state; 2692 2693 /* Check whether DAT[0] is 0 */ 2694 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 2695 2696 return !(present_state & SDHCI_DATA_0_LVL_MASK); 2697 } 2698 2699 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 2700 { 2701 struct sdhci_host *host = mmc_priv(mmc); 2702 unsigned long flags; 2703 2704 spin_lock_irqsave(&host->lock, flags); 2705 host->flags |= SDHCI_HS400_TUNING; 2706 spin_unlock_irqrestore(&host->lock, flags); 2707 2708 return 0; 2709 } 2710 2711 void sdhci_start_tuning(struct sdhci_host *host) 2712 { 2713 u16 ctrl; 2714 2715 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2716 ctrl |= SDHCI_CTRL_EXEC_TUNING; 2717 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND) 2718 ctrl |= SDHCI_CTRL_TUNED_CLK; 2719 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2720 2721 /* 2722 * As per the Host Controller spec v3.00, tuning command 2723 * generates Buffer Read Ready interrupt, so enable that. 2724 * 2725 * Note: The spec clearly says that when tuning sequence 2726 * is being performed, the controller does not generate 2727 * interrupts other than Buffer Read Ready interrupt. But 2728 * to make sure we don't hit a controller bug, we _only_ 2729 * enable Buffer Read Ready interrupt here. 2730 */ 2731 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); 2732 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); 2733 } 2734 EXPORT_SYMBOL_GPL(sdhci_start_tuning); 2735 2736 void sdhci_end_tuning(struct sdhci_host *host) 2737 { 2738 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2739 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2740 } 2741 EXPORT_SYMBOL_GPL(sdhci_end_tuning); 2742 2743 void sdhci_reset_tuning(struct sdhci_host *host) 2744 { 2745 u16 ctrl; 2746 2747 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2748 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 2749 ctrl &= ~SDHCI_CTRL_EXEC_TUNING; 2750 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2751 } 2752 EXPORT_SYMBOL_GPL(sdhci_reset_tuning); 2753 2754 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode) 2755 { 2756 sdhci_reset_tuning(host); 2757 2758 sdhci_reset_for(host, TUNING_ABORT); 2759 2760 sdhci_end_tuning(host); 2761 2762 mmc_send_abort_tuning(host->mmc, opcode); 2763 } 2764 EXPORT_SYMBOL_GPL(sdhci_abort_tuning); 2765 2766 /* 2767 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI 2768 * tuning command does not have a data payload (or rather the hardware does it 2769 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command 2770 * interrupt setup is different to other commands and there is no timeout 2771 * interrupt so special handling is needed. 2772 */ 2773 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) 2774 { 2775 struct mmc_host *mmc = host->mmc; 2776 struct mmc_command cmd = {}; 2777 struct mmc_request mrq = {}; 2778 unsigned long flags; 2779 u32 b = host->sdma_boundary; 2780 2781 spin_lock_irqsave(&host->lock, flags); 2782 2783 cmd.opcode = opcode; 2784 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 2785 cmd.mrq = &mrq; 2786 2787 mrq.cmd = &cmd; 2788 /* 2789 * In response to CMD19, the card sends 64 bytes of tuning 2790 * block to the Host Controller. So we set the block size 2791 * to 64 here. 2792 */ 2793 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 && 2794 mmc->ios.bus_width == MMC_BUS_WIDTH_8) 2795 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE); 2796 else 2797 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE); 2798 2799 /* 2800 * The tuning block is sent by the card to the host controller. 2801 * So we set the TRNS_READ bit in the Transfer Mode register. 2802 * This also takes care of setting DMA Enable and Multi Block 2803 * Select in the same register to 0. 2804 */ 2805 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); 2806 2807 if (!sdhci_send_command_retry(host, &cmd, flags)) { 2808 spin_unlock_irqrestore(&host->lock, flags); 2809 host->tuning_done = 0; 2810 return; 2811 } 2812 2813 host->cmd = NULL; 2814 2815 sdhci_del_timer(host, &mrq); 2816 2817 host->tuning_done = 0; 2818 2819 spin_unlock_irqrestore(&host->lock, flags); 2820 2821 /* Wait for Buffer Read Ready interrupt */ 2822 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1), 2823 msecs_to_jiffies(50)); 2824 2825 } 2826 EXPORT_SYMBOL_GPL(sdhci_send_tuning); 2827 2828 int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) 2829 { 2830 int i; 2831 2832 /* 2833 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number 2834 * of loops reaches tuning loop count. 2835 */ 2836 for (i = 0; i < host->tuning_loop_count; i++) { 2837 u16 ctrl; 2838 2839 sdhci_send_tuning(host, opcode); 2840 2841 if (!host->tuning_done) { 2842 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n", 2843 mmc_hostname(host->mmc)); 2844 sdhci_abort_tuning(host, opcode); 2845 return -ETIMEDOUT; 2846 } 2847 2848 /* Spec does not require a delay between tuning cycles */ 2849 if (host->tuning_delay > 0) 2850 mdelay(host->tuning_delay); 2851 2852 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2853 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { 2854 if (ctrl & SDHCI_CTRL_TUNED_CLK) 2855 return 0; /* Success! */ 2856 break; 2857 } 2858 2859 } 2860 2861 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", 2862 mmc_hostname(host->mmc)); 2863 sdhci_reset_tuning(host); 2864 return -EAGAIN; 2865 } 2866 EXPORT_SYMBOL_GPL(__sdhci_execute_tuning); 2867 2868 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 2869 { 2870 struct sdhci_host *host = mmc_priv(mmc); 2871 int err = 0; 2872 unsigned int tuning_count = 0; 2873 bool hs400_tuning; 2874 2875 hs400_tuning = host->flags & SDHCI_HS400_TUNING; 2876 2877 if (host->tuning_mode == SDHCI_TUNING_MODE_1) 2878 tuning_count = host->tuning_count; 2879 2880 /* 2881 * The Host Controller needs tuning in case of SDR104 and DDR50 2882 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in 2883 * the Capabilities register. 2884 * If the Host Controller supports the HS200 mode then the 2885 * tuning function has to be executed. 2886 */ 2887 switch (host->timing) { 2888 /* HS400 tuning is done in HS200 mode */ 2889 case MMC_TIMING_MMC_HS400: 2890 err = -EINVAL; 2891 goto out; 2892 2893 case MMC_TIMING_MMC_HS200: 2894 /* 2895 * Periodic re-tuning for HS400 is not expected to be needed, so 2896 * disable it here. 2897 */ 2898 if (hs400_tuning) 2899 tuning_count = 0; 2900 break; 2901 2902 case MMC_TIMING_UHS_SDR104: 2903 case MMC_TIMING_UHS_DDR50: 2904 break; 2905 2906 case MMC_TIMING_UHS_SDR50: 2907 if (host->flags & SDHCI_SDR50_NEEDS_TUNING) 2908 break; 2909 fallthrough; 2910 2911 default: 2912 goto out; 2913 } 2914 2915 if (host->ops->platform_execute_tuning) { 2916 err = host->ops->platform_execute_tuning(host, opcode); 2917 goto out; 2918 } 2919 2920 mmc->retune_period = tuning_count; 2921 2922 if (host->tuning_delay < 0) 2923 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK; 2924 2925 sdhci_start_tuning(host); 2926 2927 host->tuning_err = __sdhci_execute_tuning(host, opcode); 2928 2929 sdhci_end_tuning(host); 2930 out: 2931 host->flags &= ~SDHCI_HS400_TUNING; 2932 2933 return err; 2934 } 2935 EXPORT_SYMBOL_GPL(sdhci_execute_tuning); 2936 2937 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) 2938 { 2939 /* Host Controller v3.00 defines preset value registers */ 2940 if (host->version < SDHCI_SPEC_300) 2941 return; 2942 2943 /* 2944 * We only enable or disable Preset Value if they are not already 2945 * enabled or disabled respectively. Otherwise, we bail out. 2946 */ 2947 if (host->preset_enabled != enable) { 2948 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2949 2950 if (enable) 2951 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 2952 else 2953 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 2954 2955 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2956 2957 if (enable) 2958 host->flags |= SDHCI_PV_ENABLED; 2959 else 2960 host->flags &= ~SDHCI_PV_ENABLED; 2961 2962 host->preset_enabled = enable; 2963 } 2964 } 2965 2966 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 2967 int err) 2968 { 2969 struct mmc_data *data = mrq->data; 2970 2971 if (data->host_cookie != COOKIE_UNMAPPED) 2972 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, 2973 mmc_get_dma_dir(data)); 2974 2975 data->host_cookie = COOKIE_UNMAPPED; 2976 } 2977 2978 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 2979 { 2980 struct sdhci_host *host = mmc_priv(mmc); 2981 2982 mrq->data->host_cookie = COOKIE_UNMAPPED; 2983 2984 /* 2985 * No pre-mapping in the pre hook if we're using the bounce buffer, 2986 * for that we would need two bounce buffers since one buffer is 2987 * in flight when this is getting called. 2988 */ 2989 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer) 2990 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); 2991 } 2992 2993 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err) 2994 { 2995 if (host->data_cmd) { 2996 host->data_cmd->error = err; 2997 sdhci_finish_mrq(host, host->data_cmd->mrq); 2998 } 2999 3000 if (host->cmd) { 3001 host->cmd->error = err; 3002 sdhci_finish_mrq(host, host->cmd->mrq); 3003 } 3004 } 3005 3006 static void sdhci_card_event(struct mmc_host *mmc) 3007 { 3008 struct sdhci_host *host = mmc_priv(mmc); 3009 unsigned long flags; 3010 int present; 3011 3012 /* First check if client has provided their own card event */ 3013 if (host->ops->card_event) 3014 host->ops->card_event(host); 3015 3016 present = mmc->ops->get_cd(mmc); 3017 3018 spin_lock_irqsave(&host->lock, flags); 3019 3020 /* Check sdhci_has_requests() first in case we are runtime suspended */ 3021 if (sdhci_has_requests(host) && !present) { 3022 pr_err("%s: Card removed during transfer!\n", 3023 mmc_hostname(mmc)); 3024 pr_err("%s: Resetting controller.\n", 3025 mmc_hostname(mmc)); 3026 3027 sdhci_reset_for(host, CARD_REMOVED); 3028 3029 sdhci_error_out_mrqs(host, -ENOMEDIUM); 3030 } 3031 3032 spin_unlock_irqrestore(&host->lock, flags); 3033 } 3034 3035 static const struct mmc_host_ops sdhci_ops = { 3036 .request = sdhci_request, 3037 .post_req = sdhci_post_req, 3038 .pre_req = sdhci_pre_req, 3039 .set_ios = sdhci_set_ios, 3040 .get_cd = sdhci_get_cd, 3041 .get_ro = sdhci_get_ro, 3042 .card_hw_reset = sdhci_hw_reset, 3043 .enable_sdio_irq = sdhci_enable_sdio_irq, 3044 .ack_sdio_irq = sdhci_ack_sdio_irq, 3045 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 3046 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, 3047 .execute_tuning = sdhci_execute_tuning, 3048 .card_event = sdhci_card_event, 3049 .card_busy = sdhci_card_busy, 3050 }; 3051 3052 /*****************************************************************************\ 3053 * * 3054 * Request done * 3055 * * 3056 \*****************************************************************************/ 3057 3058 static bool sdhci_request_done(struct sdhci_host *host) 3059 { 3060 unsigned long flags; 3061 struct mmc_request *mrq; 3062 int i; 3063 3064 spin_lock_irqsave(&host->lock, flags); 3065 3066 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3067 mrq = host->mrqs_done[i]; 3068 if (mrq) 3069 break; 3070 } 3071 3072 if (!mrq) { 3073 spin_unlock_irqrestore(&host->lock, flags); 3074 return true; 3075 } 3076 3077 /* 3078 * The controller needs a reset of internal state machines 3079 * upon error conditions. 3080 */ 3081 if (sdhci_needs_reset(host, mrq)) { 3082 /* 3083 * Do not finish until command and data lines are available for 3084 * reset. Note there can only be one other mrq, so it cannot 3085 * also be in mrqs_done, otherwise host->cmd and host->data_cmd 3086 * would both be null. 3087 */ 3088 if (host->cmd || host->data_cmd) { 3089 spin_unlock_irqrestore(&host->lock, flags); 3090 return true; 3091 } 3092 3093 /* Some controllers need this kick or reset won't work here */ 3094 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 3095 /* This is to force an update */ 3096 host->ops->set_clock(host, host->clock); 3097 3098 sdhci_reset_for(host, REQUEST_ERROR); 3099 3100 host->pending_reset = false; 3101 } 3102 3103 /* 3104 * Always unmap the data buffers if they were mapped by 3105 * sdhci_prepare_data() whenever we finish with a request. 3106 * This avoids leaking DMA mappings on error. 3107 */ 3108 if (host->flags & SDHCI_REQ_USE_DMA) { 3109 struct mmc_data *data = mrq->data; 3110 3111 if (host->use_external_dma && data && 3112 (mrq->cmd->error || data->error)) { 3113 struct dma_chan *chan = sdhci_external_dma_channel(host, data); 3114 3115 host->mrqs_done[i] = NULL; 3116 spin_unlock_irqrestore(&host->lock, flags); 3117 dmaengine_terminate_sync(chan); 3118 spin_lock_irqsave(&host->lock, flags); 3119 sdhci_set_mrq_done(host, mrq); 3120 } 3121 3122 if (data && data->host_cookie == COOKIE_MAPPED) { 3123 if (host->bounce_buffer) { 3124 /* 3125 * On reads, copy the bounced data into the 3126 * sglist 3127 */ 3128 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { 3129 unsigned int length = data->bytes_xfered; 3130 3131 if (length > host->bounce_buffer_size) { 3132 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", 3133 mmc_hostname(host->mmc), 3134 host->bounce_buffer_size, 3135 data->bytes_xfered); 3136 /* Cap it down and continue */ 3137 length = host->bounce_buffer_size; 3138 } 3139 dma_sync_single_for_cpu( 3140 mmc_dev(host->mmc), 3141 host->bounce_addr, 3142 host->bounce_buffer_size, 3143 DMA_FROM_DEVICE); 3144 sg_copy_from_buffer(data->sg, 3145 data->sg_len, 3146 host->bounce_buffer, 3147 length); 3148 } else { 3149 /* No copying, just switch ownership */ 3150 dma_sync_single_for_cpu( 3151 mmc_dev(host->mmc), 3152 host->bounce_addr, 3153 host->bounce_buffer_size, 3154 mmc_get_dma_dir(data)); 3155 } 3156 } else { 3157 /* Unmap the raw data */ 3158 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 3159 data->sg_len, 3160 mmc_get_dma_dir(data)); 3161 } 3162 data->host_cookie = COOKIE_UNMAPPED; 3163 } 3164 } 3165 3166 host->mrqs_done[i] = NULL; 3167 3168 spin_unlock_irqrestore(&host->lock, flags); 3169 3170 if (host->ops->request_done) 3171 host->ops->request_done(host, mrq); 3172 else 3173 mmc_request_done(host->mmc, mrq); 3174 3175 return false; 3176 } 3177 3178 static void sdhci_complete_work(struct work_struct *work) 3179 { 3180 struct sdhci_host *host = container_of(work, struct sdhci_host, 3181 complete_work); 3182 3183 while (!sdhci_request_done(host)) 3184 ; 3185 } 3186 3187 static void sdhci_timeout_timer(struct timer_list *t) 3188 { 3189 struct sdhci_host *host; 3190 unsigned long flags; 3191 3192 host = from_timer(host, t, timer); 3193 3194 spin_lock_irqsave(&host->lock, flags); 3195 3196 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) { 3197 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n", 3198 mmc_hostname(host->mmc)); 3199 sdhci_err_stats_inc(host, REQ_TIMEOUT); 3200 sdhci_dumpregs(host); 3201 3202 host->cmd->error = -ETIMEDOUT; 3203 sdhci_finish_mrq(host, host->cmd->mrq); 3204 } 3205 3206 spin_unlock_irqrestore(&host->lock, flags); 3207 } 3208 3209 static void sdhci_timeout_data_timer(struct timer_list *t) 3210 { 3211 struct sdhci_host *host; 3212 unsigned long flags; 3213 3214 host = from_timer(host, t, data_timer); 3215 3216 spin_lock_irqsave(&host->lock, flags); 3217 3218 if (host->data || host->data_cmd || 3219 (host->cmd && sdhci_data_line_cmd(host->cmd))) { 3220 pr_err("%s: Timeout waiting for hardware interrupt.\n", 3221 mmc_hostname(host->mmc)); 3222 sdhci_err_stats_inc(host, REQ_TIMEOUT); 3223 sdhci_dumpregs(host); 3224 3225 if (host->data) { 3226 host->data->error = -ETIMEDOUT; 3227 __sdhci_finish_data(host, true); 3228 queue_work(host->complete_wq, &host->complete_work); 3229 } else if (host->data_cmd) { 3230 host->data_cmd->error = -ETIMEDOUT; 3231 sdhci_finish_mrq(host, host->data_cmd->mrq); 3232 } else { 3233 host->cmd->error = -ETIMEDOUT; 3234 sdhci_finish_mrq(host, host->cmd->mrq); 3235 } 3236 } 3237 3238 spin_unlock_irqrestore(&host->lock, flags); 3239 } 3240 3241 /*****************************************************************************\ 3242 * * 3243 * Interrupt handling * 3244 * * 3245 \*****************************************************************************/ 3246 3247 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p) 3248 { 3249 /* Handle auto-CMD12 error */ 3250 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) { 3251 struct mmc_request *mrq = host->data_cmd->mrq; 3252 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 3253 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 3254 SDHCI_INT_DATA_TIMEOUT : 3255 SDHCI_INT_DATA_CRC; 3256 3257 /* Treat auto-CMD12 error the same as data error */ 3258 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { 3259 *intmask_p |= data_err_bit; 3260 return; 3261 } 3262 } 3263 3264 if (!host->cmd) { 3265 /* 3266 * SDHCI recovers from errors by resetting the cmd and data 3267 * circuits. Until that is done, there very well might be more 3268 * interrupts, so ignore them in that case. 3269 */ 3270 if (host->pending_reset) 3271 return; 3272 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", 3273 mmc_hostname(host->mmc), (unsigned)intmask); 3274 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3275 sdhci_dumpregs(host); 3276 return; 3277 } 3278 3279 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC | 3280 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) { 3281 if (intmask & SDHCI_INT_TIMEOUT) { 3282 host->cmd->error = -ETIMEDOUT; 3283 sdhci_err_stats_inc(host, CMD_TIMEOUT); 3284 } else { 3285 host->cmd->error = -EILSEQ; 3286 if (!mmc_op_tuning(host->cmd->opcode)) 3287 sdhci_err_stats_inc(host, CMD_CRC); 3288 } 3289 /* Treat data command CRC error the same as data CRC error */ 3290 if (host->cmd->data && 3291 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) == 3292 SDHCI_INT_CRC) { 3293 host->cmd = NULL; 3294 *intmask_p |= SDHCI_INT_DATA_CRC; 3295 return; 3296 } 3297 3298 __sdhci_finish_mrq(host, host->cmd->mrq); 3299 return; 3300 } 3301 3302 /* Handle auto-CMD23 error */ 3303 if (intmask & SDHCI_INT_AUTO_CMD_ERR) { 3304 struct mmc_request *mrq = host->cmd->mrq; 3305 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 3306 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 3307 -ETIMEDOUT : 3308 -EILSEQ; 3309 3310 sdhci_err_stats_inc(host, AUTO_CMD); 3311 3312 if (sdhci_auto_cmd23(host, mrq)) { 3313 mrq->sbc->error = err; 3314 __sdhci_finish_mrq(host, mrq); 3315 return; 3316 } 3317 } 3318 3319 if (intmask & SDHCI_INT_RESPONSE) 3320 sdhci_finish_command(host); 3321 } 3322 3323 static void sdhci_adma_show_error(struct sdhci_host *host) 3324 { 3325 void *desc = host->adma_table; 3326 dma_addr_t dma = host->adma_addr; 3327 3328 sdhci_dumpregs(host); 3329 3330 while (true) { 3331 struct sdhci_adma2_64_desc *dma_desc = desc; 3332 3333 if (host->flags & SDHCI_USE_64_BIT_DMA) 3334 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", 3335 (unsigned long long)dma, 3336 le32_to_cpu(dma_desc->addr_hi), 3337 le32_to_cpu(dma_desc->addr_lo), 3338 le16_to_cpu(dma_desc->len), 3339 le16_to_cpu(dma_desc->cmd)); 3340 else 3341 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 3342 (unsigned long long)dma, 3343 le32_to_cpu(dma_desc->addr_lo), 3344 le16_to_cpu(dma_desc->len), 3345 le16_to_cpu(dma_desc->cmd)); 3346 3347 desc += host->desc_sz; 3348 dma += host->desc_sz; 3349 3350 if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) 3351 break; 3352 } 3353 } 3354 3355 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 3356 { 3357 /* 3358 * CMD19 generates _only_ Buffer Read Ready interrupt if 3359 * use sdhci_send_tuning. 3360 * Need to exclude this case: PIO mode and use mmc_send_tuning, 3361 * If not, sdhci_transfer_pio will never be called, make the 3362 * SDHCI_INT_DATA_AVAIL always there, stuck in irq storm. 3363 */ 3364 if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) { 3365 if (mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) { 3366 host->tuning_done = 1; 3367 wake_up(&host->buf_ready_int); 3368 return; 3369 } 3370 } 3371 3372 if (!host->data) { 3373 struct mmc_command *data_cmd = host->data_cmd; 3374 3375 /* 3376 * The "data complete" interrupt is also used to 3377 * indicate that a busy state has ended. See comment 3378 * above in sdhci_cmd_irq(). 3379 */ 3380 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { 3381 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 3382 host->data_cmd = NULL; 3383 data_cmd->error = -ETIMEDOUT; 3384 sdhci_err_stats_inc(host, CMD_TIMEOUT); 3385 __sdhci_finish_mrq(host, data_cmd->mrq); 3386 return; 3387 } 3388 if (intmask & SDHCI_INT_DATA_END) { 3389 host->data_cmd = NULL; 3390 /* 3391 * Some cards handle busy-end interrupt 3392 * before the command completed, so make 3393 * sure we do things in the proper order. 3394 */ 3395 if (host->cmd == data_cmd) 3396 return; 3397 3398 __sdhci_finish_mrq(host, data_cmd->mrq); 3399 return; 3400 } 3401 } 3402 3403 /* 3404 * SDHCI recovers from errors by resetting the cmd and data 3405 * circuits. Until that is done, there very well might be more 3406 * interrupts, so ignore them in that case. 3407 */ 3408 if (host->pending_reset) 3409 return; 3410 3411 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", 3412 mmc_hostname(host->mmc), (unsigned)intmask); 3413 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3414 sdhci_dumpregs(host); 3415 3416 return; 3417 } 3418 3419 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 3420 host->data->error = -ETIMEDOUT; 3421 sdhci_err_stats_inc(host, DAT_TIMEOUT); 3422 } else if (intmask & SDHCI_INT_DATA_END_BIT) { 3423 host->data->error = -EILSEQ; 3424 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3425 sdhci_err_stats_inc(host, DAT_CRC); 3426 } else if ((intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) && 3427 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) 3428 != MMC_BUS_TEST_R) { 3429 host->data->error = -EILSEQ; 3430 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3431 sdhci_err_stats_inc(host, DAT_CRC); 3432 if (intmask & SDHCI_INT_TUNING_ERROR) { 3433 u16 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 3434 3435 ctrl2 &= ~SDHCI_CTRL_TUNED_CLK; 3436 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 3437 } 3438 } else if (intmask & SDHCI_INT_ADMA_ERROR) { 3439 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc), 3440 intmask); 3441 sdhci_adma_show_error(host); 3442 sdhci_err_stats_inc(host, ADMA); 3443 host->data->error = -EIO; 3444 if (host->ops->adma_workaround) 3445 host->ops->adma_workaround(host, intmask); 3446 } 3447 3448 if (host->data->error) 3449 sdhci_finish_data(host); 3450 else { 3451 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 3452 sdhci_transfer_pio(host); 3453 3454 /* 3455 * We currently don't do anything fancy with DMA 3456 * boundaries, but as we can't disable the feature 3457 * we need to at least restart the transfer. 3458 * 3459 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) 3460 * should return a valid address to continue from, but as 3461 * some controllers are faulty, don't trust them. 3462 */ 3463 if (intmask & SDHCI_INT_DMA_END) { 3464 dma_addr_t dmastart, dmanow; 3465 3466 dmastart = sdhci_sdma_address(host); 3467 dmanow = dmastart + host->data->bytes_xfered; 3468 /* 3469 * Force update to the next DMA block boundary. 3470 */ 3471 dmanow = (dmanow & 3472 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + 3473 SDHCI_DEFAULT_BOUNDARY_SIZE; 3474 host->data->bytes_xfered = dmanow - dmastart; 3475 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n", 3476 &dmastart, host->data->bytes_xfered, &dmanow); 3477 sdhci_set_sdma_addr(host, dmanow); 3478 } 3479 3480 if (intmask & SDHCI_INT_DATA_END) { 3481 if (host->cmd == host->data_cmd) { 3482 /* 3483 * Data managed to finish before the 3484 * command completed. Make sure we do 3485 * things in the proper order. 3486 */ 3487 host->data_early = 1; 3488 } else { 3489 sdhci_finish_data(host); 3490 } 3491 } 3492 } 3493 } 3494 3495 static inline bool sdhci_defer_done(struct sdhci_host *host, 3496 struct mmc_request *mrq) 3497 { 3498 struct mmc_data *data = mrq->data; 3499 3500 return host->pending_reset || host->always_defer_done || 3501 ((host->flags & SDHCI_REQ_USE_DMA) && data && 3502 data->host_cookie == COOKIE_MAPPED); 3503 } 3504 3505 static irqreturn_t sdhci_irq(int irq, void *dev_id) 3506 { 3507 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0}; 3508 irqreturn_t result = IRQ_NONE; 3509 struct sdhci_host *host = dev_id; 3510 u32 intmask, mask, unexpected = 0; 3511 int max_loops = 16; 3512 int i; 3513 3514 spin_lock(&host->lock); 3515 3516 if (host->runtime_suspended) { 3517 spin_unlock(&host->lock); 3518 return IRQ_NONE; 3519 } 3520 3521 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3522 if (!intmask || intmask == 0xffffffff) { 3523 result = IRQ_NONE; 3524 goto out; 3525 } 3526 3527 do { 3528 DBG("IRQ status 0x%08x\n", intmask); 3529 3530 if (host->ops->irq) { 3531 intmask = host->ops->irq(host, intmask); 3532 if (!intmask) 3533 goto cont; 3534 } 3535 3536 /* Clear selected interrupts. */ 3537 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3538 SDHCI_INT_BUS_POWER); 3539 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3540 3541 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3542 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 3543 SDHCI_CARD_PRESENT; 3544 3545 /* 3546 * There is a observation on i.mx esdhc. INSERT 3547 * bit will be immediately set again when it gets 3548 * cleared, if a card is inserted. We have to mask 3549 * the irq to prevent interrupt storm which will 3550 * freeze the system. And the REMOVE gets the 3551 * same situation. 3552 * 3553 * More testing are needed here to ensure it works 3554 * for other platforms though. 3555 */ 3556 host->ier &= ~(SDHCI_INT_CARD_INSERT | 3557 SDHCI_INT_CARD_REMOVE); 3558 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 3559 SDHCI_INT_CARD_INSERT; 3560 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3561 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3562 3563 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 3564 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 3565 3566 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | 3567 SDHCI_INT_CARD_REMOVE); 3568 result = IRQ_WAKE_THREAD; 3569 } 3570 3571 if (intmask & SDHCI_INT_CMD_MASK) 3572 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask); 3573 3574 if (intmask & SDHCI_INT_DATA_MASK) 3575 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 3576 3577 if (intmask & SDHCI_INT_BUS_POWER) 3578 pr_err("%s: Card is consuming too much power!\n", 3579 mmc_hostname(host->mmc)); 3580 3581 if (intmask & SDHCI_INT_RETUNE) 3582 mmc_retune_needed(host->mmc); 3583 3584 if ((intmask & SDHCI_INT_CARD_INT) && 3585 (host->ier & SDHCI_INT_CARD_INT)) { 3586 sdhci_enable_sdio_irq_nolock(host, false); 3587 sdio_signal_irq(host->mmc); 3588 } 3589 3590 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | 3591 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3592 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | 3593 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT); 3594 3595 if (intmask) { 3596 unexpected |= intmask; 3597 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3598 } 3599 cont: 3600 if (result == IRQ_NONE) 3601 result = IRQ_HANDLED; 3602 3603 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3604 } while (intmask && --max_loops); 3605 3606 /* Determine if mrqs can be completed immediately */ 3607 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3608 struct mmc_request *mrq = host->mrqs_done[i]; 3609 3610 if (!mrq) 3611 continue; 3612 3613 if (sdhci_defer_done(host, mrq)) { 3614 result = IRQ_WAKE_THREAD; 3615 } else { 3616 mrqs_done[i] = mrq; 3617 host->mrqs_done[i] = NULL; 3618 } 3619 } 3620 out: 3621 if (host->deferred_cmd) 3622 result = IRQ_WAKE_THREAD; 3623 3624 spin_unlock(&host->lock); 3625 3626 /* Process mrqs ready for immediate completion */ 3627 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3628 if (!mrqs_done[i]) 3629 continue; 3630 3631 if (host->ops->request_done) 3632 host->ops->request_done(host, mrqs_done[i]); 3633 else 3634 mmc_request_done(host->mmc, mrqs_done[i]); 3635 } 3636 3637 if (unexpected) { 3638 pr_err("%s: Unexpected interrupt 0x%08x.\n", 3639 mmc_hostname(host->mmc), unexpected); 3640 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3641 sdhci_dumpregs(host); 3642 } 3643 3644 return result; 3645 } 3646 3647 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) 3648 { 3649 struct sdhci_host *host = dev_id; 3650 struct mmc_command *cmd; 3651 unsigned long flags; 3652 u32 isr; 3653 3654 while (!sdhci_request_done(host)) 3655 ; 3656 3657 spin_lock_irqsave(&host->lock, flags); 3658 3659 isr = host->thread_isr; 3660 host->thread_isr = 0; 3661 3662 cmd = host->deferred_cmd; 3663 if (cmd && !sdhci_send_command_retry(host, cmd, flags)) 3664 sdhci_finish_mrq(host, cmd->mrq); 3665 3666 spin_unlock_irqrestore(&host->lock, flags); 3667 3668 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3669 struct mmc_host *mmc = host->mmc; 3670 3671 mmc->ops->card_event(mmc); 3672 mmc_detect_change(mmc, msecs_to_jiffies(200)); 3673 } 3674 3675 return IRQ_HANDLED; 3676 } 3677 3678 /*****************************************************************************\ 3679 * * 3680 * Suspend/resume * 3681 * * 3682 \*****************************************************************************/ 3683 3684 #ifdef CONFIG_PM 3685 3686 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host) 3687 { 3688 return mmc_card_is_removable(host->mmc) && 3689 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3690 !mmc_can_gpio_cd(host->mmc); 3691 } 3692 3693 /* 3694 * To enable wakeup events, the corresponding events have to be enabled in 3695 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal 3696 * Table' in the SD Host Controller Standard Specification. 3697 * It is useless to restore SDHCI_INT_ENABLE state in 3698 * sdhci_disable_irq_wakeups() since it will be set by 3699 * sdhci_enable_card_detection() or sdhci_init(). 3700 */ 3701 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host) 3702 { 3703 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE | 3704 SDHCI_WAKE_ON_INT; 3705 u32 irq_val = 0; 3706 u8 wake_val = 0; 3707 u8 val; 3708 3709 if (sdhci_cd_irq_can_wakeup(host)) { 3710 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE; 3711 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE; 3712 } 3713 3714 if (mmc_card_wake_sdio_irq(host->mmc)) { 3715 wake_val |= SDHCI_WAKE_ON_INT; 3716 irq_val |= SDHCI_INT_CARD_INT; 3717 } 3718 3719 if (!irq_val) 3720 return false; 3721 3722 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3723 val &= ~mask; 3724 val |= wake_val; 3725 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3726 3727 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE); 3728 3729 host->irq_wake_enabled = !enable_irq_wake(host->irq); 3730 3731 return host->irq_wake_enabled; 3732 } 3733 3734 static void sdhci_disable_irq_wakeups(struct sdhci_host *host) 3735 { 3736 u8 val; 3737 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 3738 | SDHCI_WAKE_ON_INT; 3739 3740 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3741 val &= ~mask; 3742 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3743 3744 disable_irq_wake(host->irq); 3745 3746 host->irq_wake_enabled = false; 3747 } 3748 3749 int sdhci_suspend_host(struct sdhci_host *host) 3750 { 3751 sdhci_disable_card_detection(host); 3752 3753 mmc_retune_timer_stop(host->mmc); 3754 3755 if (!device_may_wakeup(mmc_dev(host->mmc)) || 3756 !sdhci_enable_irq_wakeups(host)) { 3757 host->ier = 0; 3758 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3759 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3760 free_irq(host->irq, host); 3761 } 3762 3763 return 0; 3764 } 3765 3766 EXPORT_SYMBOL_GPL(sdhci_suspend_host); 3767 3768 int sdhci_resume_host(struct sdhci_host *host) 3769 { 3770 struct mmc_host *mmc = host->mmc; 3771 int ret = 0; 3772 3773 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3774 if (host->ops->enable_dma) 3775 host->ops->enable_dma(host); 3776 } 3777 3778 if ((mmc->pm_flags & MMC_PM_KEEP_POWER) && 3779 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 3780 /* Card keeps power but host controller does not */ 3781 sdhci_init(host, 0); 3782 host->pwr = 0; 3783 host->clock = 0; 3784 host->reinit_uhs = true; 3785 mmc->ops->set_ios(mmc, &mmc->ios); 3786 } else { 3787 sdhci_init(host, (mmc->pm_flags & MMC_PM_KEEP_POWER)); 3788 } 3789 3790 if (host->irq_wake_enabled) { 3791 sdhci_disable_irq_wakeups(host); 3792 } else { 3793 ret = request_threaded_irq(host->irq, sdhci_irq, 3794 sdhci_thread_irq, IRQF_SHARED, 3795 mmc_hostname(mmc), host); 3796 if (ret) 3797 return ret; 3798 } 3799 3800 sdhci_enable_card_detection(host); 3801 3802 return ret; 3803 } 3804 3805 EXPORT_SYMBOL_GPL(sdhci_resume_host); 3806 3807 int sdhci_runtime_suspend_host(struct sdhci_host *host) 3808 { 3809 unsigned long flags; 3810 3811 mmc_retune_timer_stop(host->mmc); 3812 3813 spin_lock_irqsave(&host->lock, flags); 3814 host->ier &= SDHCI_INT_CARD_INT; 3815 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3816 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3817 spin_unlock_irqrestore(&host->lock, flags); 3818 3819 synchronize_hardirq(host->irq); 3820 3821 spin_lock_irqsave(&host->lock, flags); 3822 host->runtime_suspended = true; 3823 spin_unlock_irqrestore(&host->lock, flags); 3824 3825 return 0; 3826 } 3827 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); 3828 3829 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset) 3830 { 3831 struct mmc_host *mmc = host->mmc; 3832 unsigned long flags; 3833 int host_flags = host->flags; 3834 3835 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3836 if (host->ops->enable_dma) 3837 host->ops->enable_dma(host); 3838 } 3839 3840 sdhci_init(host, soft_reset); 3841 3842 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED && 3843 mmc->ios.power_mode != MMC_POWER_OFF) { 3844 /* Force clock and power re-program */ 3845 host->pwr = 0; 3846 host->clock = 0; 3847 host->reinit_uhs = true; 3848 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios); 3849 mmc->ops->set_ios(mmc, &mmc->ios); 3850 3851 if ((host_flags & SDHCI_PV_ENABLED) && 3852 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { 3853 spin_lock_irqsave(&host->lock, flags); 3854 sdhci_enable_preset_value(host, true); 3855 spin_unlock_irqrestore(&host->lock, flags); 3856 } 3857 3858 if ((mmc->caps2 & MMC_CAP2_HS400_ES) && 3859 mmc->ops->hs400_enhanced_strobe) 3860 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios); 3861 } 3862 3863 spin_lock_irqsave(&host->lock, flags); 3864 3865 host->runtime_suspended = false; 3866 3867 /* Enable SDIO IRQ */ 3868 if (sdio_irq_claimed(mmc)) 3869 sdhci_enable_sdio_irq_nolock(host, true); 3870 3871 /* Enable Card Detection */ 3872 sdhci_enable_card_detection(host); 3873 3874 spin_unlock_irqrestore(&host->lock, flags); 3875 3876 return 0; 3877 } 3878 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); 3879 3880 #endif /* CONFIG_PM */ 3881 3882 /*****************************************************************************\ 3883 * * 3884 * Command Queue Engine (CQE) helpers * 3885 * * 3886 \*****************************************************************************/ 3887 3888 void sdhci_cqe_enable(struct mmc_host *mmc) 3889 { 3890 struct sdhci_host *host = mmc_priv(mmc); 3891 unsigned long flags; 3892 u8 ctrl; 3893 3894 spin_lock_irqsave(&host->lock, flags); 3895 3896 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 3897 ctrl &= ~SDHCI_CTRL_DMA_MASK; 3898 /* 3899 * Host from V4.10 supports ADMA3 DMA type. 3900 * ADMA3 performs integrated descriptor which is more suitable 3901 * for cmd queuing to fetch both command and transfer descriptors. 3902 */ 3903 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3)) 3904 ctrl |= SDHCI_CTRL_ADMA3; 3905 else if (host->flags & SDHCI_USE_64_BIT_DMA) 3906 ctrl |= SDHCI_CTRL_ADMA64; 3907 else 3908 ctrl |= SDHCI_CTRL_ADMA32; 3909 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 3910 3911 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512), 3912 SDHCI_BLOCK_SIZE); 3913 3914 /* Set maximum timeout */ 3915 sdhci_set_timeout(host, NULL); 3916 3917 host->ier = host->cqe_ier; 3918 3919 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3920 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3921 3922 host->cqe_on = true; 3923 3924 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n", 3925 mmc_hostname(mmc), host->ier, 3926 sdhci_readl(host, SDHCI_INT_STATUS)); 3927 3928 spin_unlock_irqrestore(&host->lock, flags); 3929 } 3930 EXPORT_SYMBOL_GPL(sdhci_cqe_enable); 3931 3932 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery) 3933 { 3934 struct sdhci_host *host = mmc_priv(mmc); 3935 unsigned long flags; 3936 3937 spin_lock_irqsave(&host->lock, flags); 3938 3939 sdhci_set_default_irqs(host); 3940 3941 host->cqe_on = false; 3942 3943 if (recovery) 3944 sdhci_reset_for(host, CQE_RECOVERY); 3945 3946 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n", 3947 mmc_hostname(mmc), host->ier, 3948 sdhci_readl(host, SDHCI_INT_STATUS)); 3949 3950 spin_unlock_irqrestore(&host->lock, flags); 3951 } 3952 EXPORT_SYMBOL_GPL(sdhci_cqe_disable); 3953 3954 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error, 3955 int *data_error) 3956 { 3957 u32 mask; 3958 3959 if (!host->cqe_on) 3960 return false; 3961 3962 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) { 3963 *cmd_error = -EILSEQ; 3964 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3965 sdhci_err_stats_inc(host, CMD_CRC); 3966 } else if (intmask & SDHCI_INT_TIMEOUT) { 3967 *cmd_error = -ETIMEDOUT; 3968 sdhci_err_stats_inc(host, CMD_TIMEOUT); 3969 } else 3970 *cmd_error = 0; 3971 3972 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) { 3973 *data_error = -EILSEQ; 3974 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3975 sdhci_err_stats_inc(host, DAT_CRC); 3976 } else if (intmask & SDHCI_INT_DATA_TIMEOUT) { 3977 *data_error = -ETIMEDOUT; 3978 sdhci_err_stats_inc(host, DAT_TIMEOUT); 3979 } else if (intmask & SDHCI_INT_ADMA_ERROR) { 3980 *data_error = -EIO; 3981 sdhci_err_stats_inc(host, ADMA); 3982 } else 3983 *data_error = 0; 3984 3985 /* Clear selected interrupts. */ 3986 mask = intmask & host->cqe_ier; 3987 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3988 3989 if (intmask & SDHCI_INT_BUS_POWER) 3990 pr_err("%s: Card is consuming too much power!\n", 3991 mmc_hostname(host->mmc)); 3992 3993 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR); 3994 if (intmask) { 3995 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3996 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n", 3997 mmc_hostname(host->mmc), intmask); 3998 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3999 sdhci_dumpregs(host); 4000 } 4001 4002 return true; 4003 } 4004 EXPORT_SYMBOL_GPL(sdhci_cqe_irq); 4005 4006 /*****************************************************************************\ 4007 * * 4008 * Device allocation/registration * 4009 * * 4010 \*****************************************************************************/ 4011 4012 struct sdhci_host *sdhci_alloc_host(struct device *dev, 4013 size_t priv_size) 4014 { 4015 struct mmc_host *mmc; 4016 struct sdhci_host *host; 4017 4018 WARN_ON(dev == NULL); 4019 4020 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); 4021 if (!mmc) 4022 return ERR_PTR(-ENOMEM); 4023 4024 host = mmc_priv(mmc); 4025 host->mmc = mmc; 4026 host->mmc_host_ops = sdhci_ops; 4027 mmc->ops = &host->mmc_host_ops; 4028 4029 host->flags = SDHCI_SIGNALING_330; 4030 4031 host->cqe_ier = SDHCI_CQE_INT_MASK; 4032 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK; 4033 4034 host->tuning_delay = -1; 4035 host->tuning_loop_count = MAX_TUNING_LOOP; 4036 4037 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG; 4038 4039 /* 4040 * The DMA table descriptor count is calculated as the maximum 4041 * number of segments times 2, to allow for an alignment 4042 * descriptor for each segment, plus 1 for a nop end descriptor. 4043 */ 4044 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1; 4045 host->max_adma = 65536; 4046 4047 host->max_timeout_count = 0xE; 4048 4049 return host; 4050 } 4051 4052 EXPORT_SYMBOL_GPL(sdhci_alloc_host); 4053 4054 static int sdhci_set_dma_mask(struct sdhci_host *host) 4055 { 4056 struct mmc_host *mmc = host->mmc; 4057 struct device *dev = mmc_dev(mmc); 4058 int ret = -EINVAL; 4059 4060 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) 4061 host->flags &= ~SDHCI_USE_64_BIT_DMA; 4062 4063 /* Try 64-bit mask if hardware is capable of it */ 4064 if (host->flags & SDHCI_USE_64_BIT_DMA) { 4065 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 4066 if (ret) { 4067 pr_warn("%s: Failed to set 64-bit DMA mask.\n", 4068 mmc_hostname(mmc)); 4069 host->flags &= ~SDHCI_USE_64_BIT_DMA; 4070 } 4071 } 4072 4073 /* 32-bit mask as default & fallback */ 4074 if (ret) { 4075 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 4076 if (ret) 4077 pr_warn("%s: Failed to set 32-bit DMA mask.\n", 4078 mmc_hostname(mmc)); 4079 } 4080 4081 return ret; 4082 } 4083 4084 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver, 4085 const u32 *caps, const u32 *caps1) 4086 { 4087 u16 v; 4088 u64 dt_caps_mask = 0; 4089 u64 dt_caps = 0; 4090 4091 if (host->read_caps) 4092 return; 4093 4094 host->read_caps = true; 4095 4096 if (debug_quirks) 4097 host->quirks = debug_quirks; 4098 4099 if (debug_quirks2) 4100 host->quirks2 = debug_quirks2; 4101 4102 sdhci_reset_for_all(host); 4103 4104 if (host->v4_mode) 4105 sdhci_do_enable_v4_mode(host); 4106 4107 device_property_read_u64(mmc_dev(host->mmc), 4108 "sdhci-caps-mask", &dt_caps_mask); 4109 device_property_read_u64(mmc_dev(host->mmc), 4110 "sdhci-caps", &dt_caps); 4111 4112 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION); 4113 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; 4114 4115 if (caps) { 4116 host->caps = *caps; 4117 } else { 4118 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); 4119 host->caps &= ~lower_32_bits(dt_caps_mask); 4120 host->caps |= lower_32_bits(dt_caps); 4121 } 4122 4123 if (host->version < SDHCI_SPEC_300) 4124 return; 4125 4126 if (caps1) { 4127 host->caps1 = *caps1; 4128 } else { 4129 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); 4130 host->caps1 &= ~upper_32_bits(dt_caps_mask); 4131 host->caps1 |= upper_32_bits(dt_caps); 4132 } 4133 } 4134 EXPORT_SYMBOL_GPL(__sdhci_read_caps); 4135 4136 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host) 4137 { 4138 struct mmc_host *mmc = host->mmc; 4139 unsigned int max_blocks; 4140 unsigned int bounce_size; 4141 int ret; 4142 4143 /* 4144 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer 4145 * has diminishing returns, this is probably because SD/MMC 4146 * cards are usually optimized to handle this size of requests. 4147 */ 4148 bounce_size = SZ_64K; 4149 /* 4150 * Adjust downwards to maximum request size if this is less 4151 * than our segment size, else hammer down the maximum 4152 * request size to the maximum buffer size. 4153 */ 4154 if (mmc->max_req_size < bounce_size) 4155 bounce_size = mmc->max_req_size; 4156 max_blocks = bounce_size / 512; 4157 4158 /* 4159 * When we just support one segment, we can get significant 4160 * speedups by the help of a bounce buffer to group scattered 4161 * reads/writes together. 4162 */ 4163 host->bounce_buffer = devm_kmalloc(mmc_dev(mmc), 4164 bounce_size, 4165 GFP_KERNEL); 4166 if (!host->bounce_buffer) { 4167 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n", 4168 mmc_hostname(mmc), 4169 bounce_size); 4170 /* 4171 * Exiting with zero here makes sure we proceed with 4172 * mmc->max_segs == 1. 4173 */ 4174 return; 4175 } 4176 4177 host->bounce_addr = dma_map_single(mmc_dev(mmc), 4178 host->bounce_buffer, 4179 bounce_size, 4180 DMA_BIDIRECTIONAL); 4181 ret = dma_mapping_error(mmc_dev(mmc), host->bounce_addr); 4182 if (ret) { 4183 devm_kfree(mmc_dev(mmc), host->bounce_buffer); 4184 host->bounce_buffer = NULL; 4185 /* Again fall back to max_segs == 1 */ 4186 return; 4187 } 4188 4189 host->bounce_buffer_size = bounce_size; 4190 4191 /* Lie about this since we're bouncing */ 4192 mmc->max_segs = max_blocks; 4193 mmc->max_seg_size = bounce_size; 4194 mmc->max_req_size = bounce_size; 4195 4196 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n", 4197 mmc_hostname(mmc), max_blocks, bounce_size); 4198 } 4199 4200 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host) 4201 { 4202 /* 4203 * According to SD Host Controller spec v4.10, bit[27] added from 4204 * version 4.10 in Capabilities Register is used as 64-bit System 4205 * Address support for V4 mode. 4206 */ 4207 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) 4208 return host->caps & SDHCI_CAN_64BIT_V4; 4209 4210 return host->caps & SDHCI_CAN_64BIT; 4211 } 4212 4213 int sdhci_setup_host(struct sdhci_host *host) 4214 { 4215 struct mmc_host *mmc; 4216 u32 max_current_caps; 4217 unsigned int ocr_avail; 4218 unsigned int override_timeout_clk; 4219 u32 max_clk; 4220 int ret = 0; 4221 bool enable_vqmmc = false; 4222 4223 WARN_ON(host == NULL); 4224 if (host == NULL) 4225 return -EINVAL; 4226 4227 mmc = host->mmc; 4228 4229 /* 4230 * If there are external regulators, get them. Note this must be done 4231 * early before resetting the host and reading the capabilities so that 4232 * the host can take the appropriate action if regulators are not 4233 * available. 4234 */ 4235 if (!mmc->supply.vqmmc) { 4236 ret = mmc_regulator_get_supply(mmc); 4237 if (ret) 4238 return ret; 4239 enable_vqmmc = true; 4240 } 4241 4242 DBG("Version: 0x%08x | Present: 0x%08x\n", 4243 sdhci_readw(host, SDHCI_HOST_VERSION), 4244 sdhci_readl(host, SDHCI_PRESENT_STATE)); 4245 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n", 4246 sdhci_readl(host, SDHCI_CAPABILITIES), 4247 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 4248 4249 sdhci_read_caps(host); 4250 4251 override_timeout_clk = host->timeout_clk; 4252 4253 if (host->version > SDHCI_SPEC_420) { 4254 pr_err("%s: Unknown controller version (%d). You may experience problems.\n", 4255 mmc_hostname(mmc), host->version); 4256 } 4257 4258 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 4259 host->flags |= SDHCI_USE_SDMA; 4260 else if (!(host->caps & SDHCI_CAN_DO_SDMA)) 4261 DBG("Controller doesn't have SDMA capability\n"); 4262 else 4263 host->flags |= SDHCI_USE_SDMA; 4264 4265 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 4266 (host->flags & SDHCI_USE_SDMA)) { 4267 DBG("Disabling DMA as it is marked broken\n"); 4268 host->flags &= ~SDHCI_USE_SDMA; 4269 } 4270 4271 if ((host->version >= SDHCI_SPEC_200) && 4272 (host->caps & SDHCI_CAN_DO_ADMA2)) 4273 host->flags |= SDHCI_USE_ADMA; 4274 4275 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 4276 (host->flags & SDHCI_USE_ADMA)) { 4277 DBG("Disabling ADMA as it is marked broken\n"); 4278 host->flags &= ~SDHCI_USE_ADMA; 4279 } 4280 4281 if (sdhci_can_64bit_dma(host)) 4282 host->flags |= SDHCI_USE_64_BIT_DMA; 4283 4284 if (host->use_external_dma) { 4285 ret = sdhci_external_dma_init(host); 4286 if (ret == -EPROBE_DEFER) 4287 goto unreg; 4288 /* 4289 * Fall back to use the DMA/PIO integrated in standard SDHCI 4290 * instead of external DMA devices. 4291 */ 4292 else if (ret) 4293 sdhci_switch_external_dma(host, false); 4294 /* Disable internal DMA sources */ 4295 else 4296 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 4297 } 4298 4299 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 4300 if (host->ops->set_dma_mask) 4301 ret = host->ops->set_dma_mask(host); 4302 else 4303 ret = sdhci_set_dma_mask(host); 4304 4305 if (!ret && host->ops->enable_dma) 4306 ret = host->ops->enable_dma(host); 4307 4308 if (ret) { 4309 pr_warn("%s: No suitable DMA available - falling back to PIO\n", 4310 mmc_hostname(mmc)); 4311 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 4312 4313 ret = 0; 4314 } 4315 } 4316 4317 /* SDMA does not support 64-bit DMA if v4 mode not set */ 4318 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode) 4319 host->flags &= ~SDHCI_USE_SDMA; 4320 4321 if (host->flags & SDHCI_USE_ADMA) { 4322 dma_addr_t dma; 4323 void *buf; 4324 4325 if (!(host->flags & SDHCI_USE_64_BIT_DMA)) 4326 host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ; 4327 else if (!host->alloc_desc_sz) 4328 host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); 4329 4330 host->desc_sz = host->alloc_desc_sz; 4331 host->adma_table_sz = host->adma_table_cnt * host->desc_sz; 4332 4333 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; 4334 /* 4335 * Use zalloc to zero the reserved high 32-bits of 128-bit 4336 * descriptors so that they never need to be written. 4337 */ 4338 buf = dma_alloc_coherent(mmc_dev(mmc), 4339 host->align_buffer_sz + host->adma_table_sz, 4340 &dma, GFP_KERNEL); 4341 if (!buf) { 4342 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 4343 mmc_hostname(mmc)); 4344 host->flags &= ~SDHCI_USE_ADMA; 4345 } else if ((dma + host->align_buffer_sz) & 4346 (SDHCI_ADMA2_DESC_ALIGN - 1)) { 4347 pr_warn("%s: unable to allocate aligned ADMA descriptor\n", 4348 mmc_hostname(mmc)); 4349 host->flags &= ~SDHCI_USE_ADMA; 4350 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4351 host->adma_table_sz, buf, dma); 4352 } else { 4353 host->align_buffer = buf; 4354 host->align_addr = dma; 4355 4356 host->adma_table = buf + host->align_buffer_sz; 4357 host->adma_addr = dma + host->align_buffer_sz; 4358 } 4359 } 4360 4361 /* 4362 * If we use DMA, then it's up to the caller to set the DMA 4363 * mask, but PIO does not need the hw shim so we set a new 4364 * mask here in that case. 4365 */ 4366 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { 4367 host->dma_mask = DMA_BIT_MASK(64); 4368 mmc_dev(mmc)->dma_mask = &host->dma_mask; 4369 } 4370 4371 if (host->version >= SDHCI_SPEC_300) 4372 host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps); 4373 else 4374 host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps); 4375 4376 host->max_clk *= 1000000; 4377 if (host->max_clk == 0 || host->quirks & 4378 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 4379 if (!host->ops->get_max_clock) { 4380 pr_err("%s: Hardware doesn't specify base clock frequency.\n", 4381 mmc_hostname(mmc)); 4382 ret = -ENODEV; 4383 goto undma; 4384 } 4385 host->max_clk = host->ops->get_max_clock(host); 4386 } 4387 4388 /* 4389 * In case of Host Controller v3.00, find out whether clock 4390 * multiplier is supported. 4391 */ 4392 host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1); 4393 4394 /* 4395 * In case the value in Clock Multiplier is 0, then programmable 4396 * clock mode is not supported, otherwise the actual clock 4397 * multiplier is one more than the value of Clock Multiplier 4398 * in the Capabilities Register. 4399 */ 4400 if (host->clk_mul) 4401 host->clk_mul += 1; 4402 4403 /* 4404 * Set host parameters. 4405 */ 4406 max_clk = host->max_clk; 4407 4408 if (host->ops->get_min_clock) 4409 mmc->f_min = host->ops->get_min_clock(host); 4410 else if (host->version >= SDHCI_SPEC_300) { 4411 if (host->clk_mul) 4412 max_clk = host->max_clk * host->clk_mul; 4413 /* 4414 * Divided Clock Mode minimum clock rate is always less than 4415 * Programmable Clock Mode minimum clock rate. 4416 */ 4417 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 4418 } else 4419 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 4420 4421 if (!mmc->f_max || mmc->f_max > max_clk) 4422 mmc->f_max = max_clk; 4423 4424 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 4425 host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps); 4426 4427 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT) 4428 host->timeout_clk *= 1000; 4429 4430 if (host->timeout_clk == 0) { 4431 if (!host->ops->get_timeout_clock) { 4432 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n", 4433 mmc_hostname(mmc)); 4434 ret = -ENODEV; 4435 goto undma; 4436 } 4437 4438 host->timeout_clk = 4439 DIV_ROUND_UP(host->ops->get_timeout_clock(host), 4440 1000); 4441 } 4442 4443 if (override_timeout_clk) 4444 host->timeout_clk = override_timeout_clk; 4445 4446 mmc->max_busy_timeout = host->ops->get_max_timeout_count ? 4447 host->ops->get_max_timeout_count(host) : 1 << 27; 4448 mmc->max_busy_timeout /= host->timeout_clk; 4449 } 4450 4451 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT && 4452 !host->ops->get_max_timeout_count) 4453 mmc->max_busy_timeout = 0; 4454 4455 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23; 4456 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 4457 4458 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 4459 host->flags |= SDHCI_AUTO_CMD12; 4460 4461 /* 4462 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO. 4463 * For v4 mode, SDMA may use Auto-CMD23 as well. 4464 */ 4465 if ((host->version >= SDHCI_SPEC_300) && 4466 ((host->flags & SDHCI_USE_ADMA) || 4467 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) && 4468 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { 4469 host->flags |= SDHCI_AUTO_CMD23; 4470 DBG("Auto-CMD23 available\n"); 4471 } else { 4472 DBG("Auto-CMD23 unavailable\n"); 4473 } 4474 4475 /* 4476 * A controller may support 8-bit width, but the board itself 4477 * might not have the pins brought out. Boards that support 4478 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in 4479 * their platform code before calling sdhci_add_host(), and we 4480 * won't assume 8-bit width for hosts without that CAP. 4481 */ 4482 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 4483 mmc->caps |= MMC_CAP_4_BIT_DATA; 4484 4485 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) 4486 mmc->caps &= ~MMC_CAP_CMD23; 4487 4488 if (host->caps & SDHCI_CAN_DO_HISPD) 4489 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 4490 4491 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 4492 mmc_card_is_removable(mmc) && 4493 mmc_gpio_get_cd(mmc) < 0) 4494 mmc->caps |= MMC_CAP_NEEDS_POLL; 4495 4496 if (!IS_ERR(mmc->supply.vqmmc)) { 4497 if (enable_vqmmc) { 4498 ret = regulator_enable(mmc->supply.vqmmc); 4499 host->sdhci_core_to_disable_vqmmc = !ret; 4500 } 4501 4502 /* If vqmmc provides no 1.8V signalling, then there's no UHS */ 4503 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, 4504 1950000)) 4505 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | 4506 SDHCI_SUPPORT_SDR50 | 4507 SDHCI_SUPPORT_DDR50); 4508 4509 /* In eMMC case vqmmc might be a fixed 1.8V regulator */ 4510 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000, 4511 3600000)) 4512 host->flags &= ~SDHCI_SIGNALING_330; 4513 4514 if (ret) { 4515 pr_warn("%s: Failed to enable vqmmc regulator: %d\n", 4516 mmc_hostname(mmc), ret); 4517 mmc->supply.vqmmc = ERR_PTR(-EINVAL); 4518 } 4519 4520 } 4521 4522 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) { 4523 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4524 SDHCI_SUPPORT_DDR50); 4525 /* 4526 * The SDHCI controller in a SoC might support HS200/HS400 4527 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property), 4528 * but if the board is modeled such that the IO lines are not 4529 * connected to 1.8v then HS200/HS400 cannot be supported. 4530 * Disable HS200/HS400 if the board does not have 1.8v connected 4531 * to the IO lines. (Applicable for other modes in 1.8v) 4532 */ 4533 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES); 4534 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS); 4535 } 4536 4537 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ 4538 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4539 SDHCI_SUPPORT_DDR50)) 4540 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; 4541 4542 /* SDR104 supports also implies SDR50 support */ 4543 if (host->caps1 & SDHCI_SUPPORT_SDR104) { 4544 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; 4545 /* SD3.0: SDR104 is supported so (for eMMC) the caps2 4546 * field can be promoted to support HS200. 4547 */ 4548 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) 4549 mmc->caps2 |= MMC_CAP2_HS200; 4550 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) { 4551 mmc->caps |= MMC_CAP_UHS_SDR50; 4552 } 4553 4554 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && 4555 (host->caps1 & SDHCI_SUPPORT_HS400)) 4556 mmc->caps2 |= MMC_CAP2_HS400; 4557 4558 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && 4559 (IS_ERR(mmc->supply.vqmmc) || 4560 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000, 4561 1300000))) 4562 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V; 4563 4564 if ((host->caps1 & SDHCI_SUPPORT_DDR50) && 4565 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) 4566 mmc->caps |= MMC_CAP_UHS_DDR50; 4567 4568 /* Does the host need tuning for SDR50? */ 4569 if (host->caps1 & SDHCI_USE_SDR50_TUNING) 4570 host->flags |= SDHCI_SDR50_NEEDS_TUNING; 4571 4572 /* Driver Type(s) (A, C, D) supported by the host */ 4573 if (host->caps1 & SDHCI_DRIVER_TYPE_A) 4574 mmc->caps |= MMC_CAP_DRIVER_TYPE_A; 4575 if (host->caps1 & SDHCI_DRIVER_TYPE_C) 4576 mmc->caps |= MMC_CAP_DRIVER_TYPE_C; 4577 if (host->caps1 & SDHCI_DRIVER_TYPE_D) 4578 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 4579 4580 /* Initial value for re-tuning timer count */ 4581 host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK, 4582 host->caps1); 4583 4584 /* 4585 * In case Re-tuning Timer is not disabled, the actual value of 4586 * re-tuning timer will be 2 ^ (n - 1). 4587 */ 4588 if (host->tuning_count) 4589 host->tuning_count = 1 << (host->tuning_count - 1); 4590 4591 /* Re-tuning mode supported by the Host Controller */ 4592 host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1); 4593 4594 ocr_avail = 0; 4595 4596 /* 4597 * According to SD Host Controller spec v3.00, if the Host System 4598 * can afford more than 150mA, Host Driver should set XPC to 1. Also 4599 * the value is meaningful only if Voltage Support in the Capabilities 4600 * register is set. The actual current value is 4 times the register 4601 * value. 4602 */ 4603 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); 4604 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) { 4605 int curr = regulator_get_current_limit(mmc->supply.vmmc); 4606 if (curr > 0) { 4607 4608 /* convert to SDHCI_MAX_CURRENT format */ 4609 curr = curr/1000; /* convert to mA */ 4610 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; 4611 4612 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); 4613 max_current_caps = 4614 FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) | 4615 FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) | 4616 FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr); 4617 } 4618 } 4619 4620 if (host->caps & SDHCI_CAN_VDD_330) { 4621 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 4622 4623 mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK, 4624 max_current_caps) * 4625 SDHCI_MAX_CURRENT_MULTIPLIER; 4626 } 4627 if (host->caps & SDHCI_CAN_VDD_300) { 4628 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 4629 4630 mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK, 4631 max_current_caps) * 4632 SDHCI_MAX_CURRENT_MULTIPLIER; 4633 } 4634 if (host->caps & SDHCI_CAN_VDD_180) { 4635 ocr_avail |= MMC_VDD_165_195; 4636 4637 mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK, 4638 max_current_caps) * 4639 SDHCI_MAX_CURRENT_MULTIPLIER; 4640 } 4641 4642 /* If OCR set by host, use it instead. */ 4643 if (host->ocr_mask) 4644 ocr_avail = host->ocr_mask; 4645 4646 /* If OCR set by external regulators, give it highest prio. */ 4647 if (mmc->ocr_avail) 4648 ocr_avail = mmc->ocr_avail; 4649 4650 mmc->ocr_avail = ocr_avail; 4651 mmc->ocr_avail_sdio = ocr_avail; 4652 if (host->ocr_avail_sdio) 4653 mmc->ocr_avail_sdio &= host->ocr_avail_sdio; 4654 mmc->ocr_avail_sd = ocr_avail; 4655 if (host->ocr_avail_sd) 4656 mmc->ocr_avail_sd &= host->ocr_avail_sd; 4657 else /* normal SD controllers don't support 1.8V */ 4658 mmc->ocr_avail_sd &= ~MMC_VDD_165_195; 4659 mmc->ocr_avail_mmc = ocr_avail; 4660 if (host->ocr_avail_mmc) 4661 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; 4662 4663 if (mmc->ocr_avail == 0) { 4664 pr_err("%s: Hardware doesn't report any support voltages.\n", 4665 mmc_hostname(mmc)); 4666 ret = -ENODEV; 4667 goto unreg; 4668 } 4669 4670 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 4671 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | 4672 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) || 4673 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) 4674 host->flags |= SDHCI_SIGNALING_180; 4675 4676 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V) 4677 host->flags |= SDHCI_SIGNALING_120; 4678 4679 spin_lock_init(&host->lock); 4680 4681 /* 4682 * Maximum number of sectors in one transfer. Limited by SDMA boundary 4683 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this 4684 * is less anyway. 4685 */ 4686 mmc->max_req_size = 524288; 4687 4688 /* 4689 * Maximum number of segments. Depends on if the hardware 4690 * can do scatter/gather or not. 4691 */ 4692 if (host->flags & SDHCI_USE_ADMA) { 4693 mmc->max_segs = SDHCI_MAX_SEGS; 4694 } else if (host->flags & SDHCI_USE_SDMA) { 4695 mmc->max_segs = 1; 4696 mmc->max_req_size = min_t(size_t, mmc->max_req_size, 4697 dma_max_mapping_size(mmc_dev(mmc))); 4698 } else { /* PIO */ 4699 mmc->max_segs = SDHCI_MAX_SEGS; 4700 } 4701 4702 /* 4703 * Maximum segment size. Could be one segment with the maximum number 4704 * of bytes. When doing hardware scatter/gather, each entry cannot 4705 * be larger than 64 KiB though. 4706 */ 4707 if (host->flags & SDHCI_USE_ADMA) { 4708 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) { 4709 host->max_adma = 65532; /* 32-bit alignment */ 4710 mmc->max_seg_size = 65535; 4711 /* 4712 * sdhci_adma_table_pre() expects to define 1 DMA 4713 * descriptor per segment, so the maximum segment size 4714 * is set accordingly. SDHCI allows up to 64KiB per DMA 4715 * descriptor (16-bit field), but some controllers do 4716 * not support "zero means 65536" reducing the maximum 4717 * for them to 65535. That is a problem if PAGE_SIZE is 4718 * 64KiB because the block layer does not support 4719 * max_seg_size < PAGE_SIZE, however 4720 * sdhci_adma_table_pre() has a workaround to handle 4721 * that case, and split the descriptor. Refer also 4722 * comment in sdhci_adma_table_pre(). 4723 */ 4724 if (mmc->max_seg_size < PAGE_SIZE) 4725 mmc->max_seg_size = PAGE_SIZE; 4726 } else { 4727 mmc->max_seg_size = 65536; 4728 } 4729 } else { 4730 mmc->max_seg_size = mmc->max_req_size; 4731 } 4732 4733 /* 4734 * Maximum block size. This varies from controller to controller and 4735 * is specified in the capabilities register. 4736 */ 4737 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 4738 mmc->max_blk_size = 2; 4739 } else { 4740 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >> 4741 SDHCI_MAX_BLOCK_SHIFT; 4742 if (mmc->max_blk_size >= 3) { 4743 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n", 4744 mmc_hostname(mmc)); 4745 mmc->max_blk_size = 0; 4746 } 4747 } 4748 4749 mmc->max_blk_size = 512 << mmc->max_blk_size; 4750 4751 /* 4752 * Maximum block count. 4753 */ 4754 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 4755 4756 if (mmc->max_segs == 1) 4757 /* This may alter mmc->*_blk_* parameters */ 4758 sdhci_allocate_bounce_buffer(host); 4759 4760 return 0; 4761 4762 unreg: 4763 if (host->sdhci_core_to_disable_vqmmc) 4764 regulator_disable(mmc->supply.vqmmc); 4765 undma: 4766 if (host->align_buffer) 4767 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4768 host->adma_table_sz, host->align_buffer, 4769 host->align_addr); 4770 host->adma_table = NULL; 4771 host->align_buffer = NULL; 4772 4773 return ret; 4774 } 4775 EXPORT_SYMBOL_GPL(sdhci_setup_host); 4776 4777 void sdhci_cleanup_host(struct sdhci_host *host) 4778 { 4779 struct mmc_host *mmc = host->mmc; 4780 4781 if (host->sdhci_core_to_disable_vqmmc) 4782 regulator_disable(mmc->supply.vqmmc); 4783 4784 if (host->align_buffer) 4785 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4786 host->adma_table_sz, host->align_buffer, 4787 host->align_addr); 4788 4789 if (host->use_external_dma) 4790 sdhci_external_dma_release(host); 4791 4792 host->adma_table = NULL; 4793 host->align_buffer = NULL; 4794 } 4795 EXPORT_SYMBOL_GPL(sdhci_cleanup_host); 4796 4797 int __sdhci_add_host(struct sdhci_host *host) 4798 { 4799 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI; 4800 struct mmc_host *mmc = host->mmc; 4801 int ret; 4802 4803 if ((mmc->caps2 & MMC_CAP2_CQE) && 4804 (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) { 4805 mmc->caps2 &= ~MMC_CAP2_CQE; 4806 mmc->cqe_ops = NULL; 4807 } 4808 4809 host->complete_wq = alloc_workqueue("sdhci", flags, 0); 4810 if (!host->complete_wq) 4811 return -ENOMEM; 4812 4813 INIT_WORK(&host->complete_work, sdhci_complete_work); 4814 4815 timer_setup(&host->timer, sdhci_timeout_timer, 0); 4816 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0); 4817 4818 init_waitqueue_head(&host->buf_ready_int); 4819 4820 sdhci_init(host, 0); 4821 4822 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, 4823 IRQF_SHARED, mmc_hostname(mmc), host); 4824 if (ret) { 4825 pr_err("%s: Failed to request IRQ %d: %d\n", 4826 mmc_hostname(mmc), host->irq, ret); 4827 goto unwq; 4828 } 4829 4830 ret = sdhci_led_register(host); 4831 if (ret) { 4832 pr_err("%s: Failed to register LED device: %d\n", 4833 mmc_hostname(mmc), ret); 4834 goto unirq; 4835 } 4836 4837 ret = mmc_add_host(mmc); 4838 if (ret) 4839 goto unled; 4840 4841 pr_info("%s: SDHCI controller on %s [%s] using %s\n", 4842 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 4843 host->use_external_dma ? "External DMA" : 4844 (host->flags & SDHCI_USE_ADMA) ? 4845 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : 4846 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); 4847 4848 sdhci_enable_card_detection(host); 4849 4850 return 0; 4851 4852 unled: 4853 sdhci_led_unregister(host); 4854 unirq: 4855 sdhci_reset_for_all(host); 4856 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4857 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4858 free_irq(host->irq, host); 4859 unwq: 4860 destroy_workqueue(host->complete_wq); 4861 4862 return ret; 4863 } 4864 EXPORT_SYMBOL_GPL(__sdhci_add_host); 4865 4866 int sdhci_add_host(struct sdhci_host *host) 4867 { 4868 int ret; 4869 4870 ret = sdhci_setup_host(host); 4871 if (ret) 4872 return ret; 4873 4874 ret = __sdhci_add_host(host); 4875 if (ret) 4876 goto cleanup; 4877 4878 return 0; 4879 4880 cleanup: 4881 sdhci_cleanup_host(host); 4882 4883 return ret; 4884 } 4885 EXPORT_SYMBOL_GPL(sdhci_add_host); 4886 4887 void sdhci_remove_host(struct sdhci_host *host, int dead) 4888 { 4889 struct mmc_host *mmc = host->mmc; 4890 unsigned long flags; 4891 4892 if (dead) { 4893 spin_lock_irqsave(&host->lock, flags); 4894 4895 host->flags |= SDHCI_DEVICE_DEAD; 4896 4897 if (sdhci_has_requests(host)) { 4898 pr_err("%s: Controller removed during " 4899 " transfer!\n", mmc_hostname(mmc)); 4900 sdhci_error_out_mrqs(host, -ENOMEDIUM); 4901 } 4902 4903 spin_unlock_irqrestore(&host->lock, flags); 4904 } 4905 4906 sdhci_disable_card_detection(host); 4907 4908 mmc_remove_host(mmc); 4909 4910 sdhci_led_unregister(host); 4911 4912 if (!dead) 4913 sdhci_reset_for_all(host); 4914 4915 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4916 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4917 free_irq(host->irq, host); 4918 4919 del_timer_sync(&host->timer); 4920 del_timer_sync(&host->data_timer); 4921 4922 destroy_workqueue(host->complete_wq); 4923 4924 if (host->sdhci_core_to_disable_vqmmc) 4925 regulator_disable(mmc->supply.vqmmc); 4926 4927 if (host->align_buffer) 4928 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4929 host->adma_table_sz, host->align_buffer, 4930 host->align_addr); 4931 4932 if (host->use_external_dma) 4933 sdhci_external_dma_release(host); 4934 4935 host->adma_table = NULL; 4936 host->align_buffer = NULL; 4937 } 4938 4939 EXPORT_SYMBOL_GPL(sdhci_remove_host); 4940 4941 void sdhci_free_host(struct sdhci_host *host) 4942 { 4943 mmc_free_host(host->mmc); 4944 } 4945 4946 EXPORT_SYMBOL_GPL(sdhci_free_host); 4947 4948 /*****************************************************************************\ 4949 * * 4950 * Driver init/exit * 4951 * * 4952 \*****************************************************************************/ 4953 4954 static int __init sdhci_drv_init(void) 4955 { 4956 pr_info(DRIVER_NAME 4957 ": Secure Digital Host Controller Interface driver\n"); 4958 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 4959 4960 return 0; 4961 } 4962 4963 static void __exit sdhci_drv_exit(void) 4964 { 4965 } 4966 4967 module_init(sdhci_drv_init); 4968 module_exit(sdhci_drv_exit); 4969 4970 module_param(debug_quirks, uint, 0444); 4971 module_param(debug_quirks2, uint, 0444); 4972 4973 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 4974 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 4975 MODULE_LICENSE("GPL"); 4976 4977 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 4978 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); 4979