1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 4 * 5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 6 * 7 * Thanks to the following companies for their support: 8 * 9 * - JMicron (hardware and technical support) 10 */ 11 12 #include <linux/bitfield.h> 13 #include <linux/delay.h> 14 #include <linux/dmaengine.h> 15 #include <linux/ktime.h> 16 #include <linux/highmem.h> 17 #include <linux/io.h> 18 #include <linux/module.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/slab.h> 21 #include <linux/scatterlist.h> 22 #include <linux/sizes.h> 23 #include <linux/regulator/consumer.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/of.h> 26 #include <linux/bug.h> 27 #include <linux/leds.h> 28 29 #include <linux/mmc/mmc.h> 30 #include <linux/mmc/host.h> 31 #include <linux/mmc/card.h> 32 #include <linux/mmc/sdio.h> 33 #include <linux/mmc/slot-gpio.h> 34 35 #include "sdhci.h" 36 37 #define DRIVER_NAME "sdhci" 38 39 #define DBG(f, x...) \ 40 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 41 42 #define SDHCI_DUMP(f, x...) \ 43 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 44 45 #define MAX_TUNING_LOOP 40 46 47 static unsigned int debug_quirks = 0; 48 static unsigned int debug_quirks2; 49 50 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd); 51 52 void sdhci_dumpregs(struct sdhci_host *host) 53 { 54 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n"); 55 56 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n", 57 sdhci_readl(host, SDHCI_DMA_ADDRESS), 58 sdhci_readw(host, SDHCI_HOST_VERSION)); 59 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n", 60 sdhci_readw(host, SDHCI_BLOCK_SIZE), 61 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 62 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n", 63 sdhci_readl(host, SDHCI_ARGUMENT), 64 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 65 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n", 66 sdhci_readl(host, SDHCI_PRESENT_STATE), 67 sdhci_readb(host, SDHCI_HOST_CONTROL)); 68 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n", 69 sdhci_readb(host, SDHCI_POWER_CONTROL), 70 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 71 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n", 72 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 73 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 74 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n", 75 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 76 sdhci_readl(host, SDHCI_INT_STATUS)); 77 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n", 78 sdhci_readl(host, SDHCI_INT_ENABLE), 79 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 80 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n", 81 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS), 82 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 83 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n", 84 sdhci_readl(host, SDHCI_CAPABILITIES), 85 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 86 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n", 87 sdhci_readw(host, SDHCI_COMMAND), 88 sdhci_readl(host, SDHCI_MAX_CURRENT)); 89 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n", 90 sdhci_readl(host, SDHCI_RESPONSE), 91 sdhci_readl(host, SDHCI_RESPONSE + 4)); 92 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n", 93 sdhci_readl(host, SDHCI_RESPONSE + 8), 94 sdhci_readl(host, SDHCI_RESPONSE + 12)); 95 SDHCI_DUMP("Host ctl2: 0x%08x\n", 96 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 97 98 if (host->flags & SDHCI_USE_ADMA) { 99 if (host->flags & SDHCI_USE_64_BIT_DMA) { 100 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", 101 sdhci_readl(host, SDHCI_ADMA_ERROR), 102 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI), 103 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 104 } else { 105 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 106 sdhci_readl(host, SDHCI_ADMA_ERROR), 107 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 108 } 109 } 110 111 if (host->ops->dump_uhs2_regs) 112 host->ops->dump_uhs2_regs(host); 113 114 if (host->ops->dump_vendor_regs) 115 host->ops->dump_vendor_regs(host); 116 117 SDHCI_DUMP("============================================\n"); 118 } 119 EXPORT_SYMBOL_GPL(sdhci_dumpregs); 120 121 /*****************************************************************************\ 122 * * 123 * Low level functions * 124 * * 125 \*****************************************************************************/ 126 127 static void sdhci_do_enable_v4_mode(struct sdhci_host *host) 128 { 129 u16 ctrl2; 130 131 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 132 if (ctrl2 & SDHCI_CTRL_V4_MODE) 133 return; 134 135 ctrl2 |= SDHCI_CTRL_V4_MODE; 136 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 137 } 138 139 /* 140 * This can be called before sdhci_add_host() by Vendor's host controller 141 * driver to enable v4 mode if supported. 142 */ 143 void sdhci_enable_v4_mode(struct sdhci_host *host) 144 { 145 host->v4_mode = true; 146 sdhci_do_enable_v4_mode(host); 147 } 148 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode); 149 150 bool sdhci_data_line_cmd(struct mmc_command *cmd) 151 { 152 return cmd->data || cmd->flags & MMC_RSP_BUSY; 153 } 154 EXPORT_SYMBOL_GPL(sdhci_data_line_cmd); 155 156 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 157 { 158 u32 present; 159 160 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 161 !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc)) 162 return; 163 164 if (enable) { 165 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 166 SDHCI_CARD_PRESENT; 167 168 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 169 SDHCI_INT_CARD_INSERT; 170 } else { 171 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 172 } 173 174 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 175 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 176 } 177 178 static void sdhci_enable_card_detection(struct sdhci_host *host) 179 { 180 sdhci_set_card_detection(host, true); 181 } 182 183 static void sdhci_disable_card_detection(struct sdhci_host *host) 184 { 185 sdhci_set_card_detection(host, false); 186 } 187 188 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 189 { 190 if (host->bus_on) 191 return; 192 host->bus_on = true; 193 pm_runtime_get_noresume(mmc_dev(host->mmc)); 194 } 195 196 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 197 { 198 if (!host->bus_on) 199 return; 200 host->bus_on = false; 201 pm_runtime_put_noidle(mmc_dev(host->mmc)); 202 } 203 204 void sdhci_reset(struct sdhci_host *host, u8 mask) 205 { 206 ktime_t timeout; 207 208 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 209 210 if (mask & SDHCI_RESET_ALL) { 211 host->clock = 0; 212 /* Reset-all turns off SD Bus Power */ 213 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 214 sdhci_runtime_pm_bus_off(host); 215 } 216 217 /* Wait max 100 ms */ 218 timeout = ktime_add_ms(ktime_get(), 100); 219 220 /* hw clears the bit when it's done */ 221 while (1) { 222 bool timedout = ktime_after(ktime_get(), timeout); 223 224 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) 225 break; 226 if (timedout) { 227 pr_err("%s: Reset 0x%x never completed.\n", 228 mmc_hostname(host->mmc), (int)mask); 229 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 230 sdhci_dumpregs(host); 231 return; 232 } 233 udelay(10); 234 } 235 } 236 EXPORT_SYMBOL_GPL(sdhci_reset); 237 238 bool sdhci_do_reset(struct sdhci_host *host, u8 mask) 239 { 240 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 241 struct mmc_host *mmc = host->mmc; 242 243 if (!mmc->ops->get_cd(mmc)) 244 return false; 245 } 246 247 host->ops->reset(host, mask); 248 249 return true; 250 } 251 EXPORT_SYMBOL_GPL(sdhci_do_reset); 252 253 static void sdhci_reset_for_all(struct sdhci_host *host) 254 { 255 if (sdhci_do_reset(host, SDHCI_RESET_ALL)) { 256 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 257 if (host->ops->enable_dma) 258 host->ops->enable_dma(host); 259 } 260 /* Resetting the controller clears many */ 261 host->preset_enabled = false; 262 } 263 } 264 265 enum sdhci_reset_reason { 266 SDHCI_RESET_FOR_INIT, 267 SDHCI_RESET_FOR_REQUEST_ERROR, 268 SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY, 269 SDHCI_RESET_FOR_TUNING_ABORT, 270 SDHCI_RESET_FOR_CARD_REMOVED, 271 SDHCI_RESET_FOR_CQE_RECOVERY, 272 }; 273 274 static void sdhci_reset_for_reason(struct sdhci_host *host, enum sdhci_reset_reason reason) 275 { 276 if (host->quirks2 & SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER) { 277 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 278 return; 279 } 280 281 switch (reason) { 282 case SDHCI_RESET_FOR_INIT: 283 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 284 break; 285 case SDHCI_RESET_FOR_REQUEST_ERROR: 286 case SDHCI_RESET_FOR_TUNING_ABORT: 287 case SDHCI_RESET_FOR_CARD_REMOVED: 288 case SDHCI_RESET_FOR_CQE_RECOVERY: 289 sdhci_do_reset(host, SDHCI_RESET_CMD); 290 sdhci_do_reset(host, SDHCI_RESET_DATA); 291 break; 292 case SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY: 293 sdhci_do_reset(host, SDHCI_RESET_DATA); 294 break; 295 } 296 } 297 298 #define sdhci_reset_for(h, r) sdhci_reset_for_reason((h), SDHCI_RESET_FOR_##r) 299 300 static void sdhci_set_default_irqs(struct sdhci_host *host) 301 { 302 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 303 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | 304 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | 305 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | 306 SDHCI_INT_RESPONSE; 307 308 if (host->tuning_mode == SDHCI_TUNING_MODE_2 || 309 host->tuning_mode == SDHCI_TUNING_MODE_3) 310 host->ier |= SDHCI_INT_RETUNE; 311 312 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 313 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 314 } 315 316 static void sdhci_config_dma(struct sdhci_host *host) 317 { 318 u8 ctrl; 319 u16 ctrl2; 320 321 if (host->version < SDHCI_SPEC_200) 322 return; 323 324 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 325 326 /* 327 * Always adjust the DMA selection as some controllers 328 * (e.g. JMicron) can't do PIO properly when the selection 329 * is ADMA. 330 */ 331 ctrl &= ~SDHCI_CTRL_DMA_MASK; 332 if (!(host->flags & SDHCI_REQ_USE_DMA)) 333 goto out; 334 335 /* Note if DMA Select is zero then SDMA is selected */ 336 if (host->flags & SDHCI_USE_ADMA) 337 ctrl |= SDHCI_CTRL_ADMA32; 338 339 if (host->flags & SDHCI_USE_64_BIT_DMA) { 340 /* 341 * If v4 mode, all supported DMA can be 64-bit addressing if 342 * controller supports 64-bit system address, otherwise only 343 * ADMA can support 64-bit addressing. 344 */ 345 if (host->v4_mode) { 346 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 347 ctrl2 |= SDHCI_CTRL_64BIT_ADDR; 348 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 349 } else if (host->flags & SDHCI_USE_ADMA) { 350 /* 351 * Don't need to undo SDHCI_CTRL_ADMA32 in order to 352 * set SDHCI_CTRL_ADMA64. 353 */ 354 ctrl |= SDHCI_CTRL_ADMA64; 355 } 356 } 357 358 out: 359 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 360 } 361 362 static void sdhci_init(struct sdhci_host *host, int soft) 363 { 364 struct mmc_host *mmc = host->mmc; 365 unsigned long flags; 366 367 if (soft) 368 sdhci_reset_for(host, INIT); 369 else 370 sdhci_reset_for_all(host); 371 372 if (host->v4_mode) 373 sdhci_do_enable_v4_mode(host); 374 375 spin_lock_irqsave(&host->lock, flags); 376 sdhci_set_default_irqs(host); 377 spin_unlock_irqrestore(&host->lock, flags); 378 379 host->cqe_on = false; 380 381 if (soft) { 382 /* force clock reconfiguration */ 383 host->clock = 0; 384 host->reinit_uhs = true; 385 mmc->ops->set_ios(mmc, &mmc->ios); 386 } 387 } 388 389 static void sdhci_reinit(struct sdhci_host *host) 390 { 391 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 392 393 sdhci_init(host, 0); 394 sdhci_enable_card_detection(host); 395 396 /* 397 * A change to the card detect bits indicates a change in present state, 398 * refer sdhci_set_card_detection(). A card detect interrupt might have 399 * been missed while the host controller was being reset, so trigger a 400 * rescan to check. 401 */ 402 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT))) 403 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 404 } 405 406 static void __sdhci_led_activate(struct sdhci_host *host) 407 { 408 u8 ctrl; 409 410 if (host->quirks & SDHCI_QUIRK_NO_LED) 411 return; 412 413 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 414 ctrl |= SDHCI_CTRL_LED; 415 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 416 } 417 418 static void __sdhci_led_deactivate(struct sdhci_host *host) 419 { 420 u8 ctrl; 421 422 if (host->quirks & SDHCI_QUIRK_NO_LED) 423 return; 424 425 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 426 ctrl &= ~SDHCI_CTRL_LED; 427 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 428 } 429 430 #if IS_REACHABLE(CONFIG_LEDS_CLASS) 431 static void sdhci_led_control(struct led_classdev *led, 432 enum led_brightness brightness) 433 { 434 struct sdhci_host *host = container_of(led, struct sdhci_host, led); 435 unsigned long flags; 436 437 spin_lock_irqsave(&host->lock, flags); 438 439 if (host->runtime_suspended) 440 goto out; 441 442 if (brightness == LED_OFF) 443 __sdhci_led_deactivate(host); 444 else 445 __sdhci_led_activate(host); 446 out: 447 spin_unlock_irqrestore(&host->lock, flags); 448 } 449 450 static int sdhci_led_register(struct sdhci_host *host) 451 { 452 struct mmc_host *mmc = host->mmc; 453 454 if (host->quirks & SDHCI_QUIRK_NO_LED) 455 return 0; 456 457 snprintf(host->led_name, sizeof(host->led_name), 458 "%s::", mmc_hostname(mmc)); 459 460 host->led.name = host->led_name; 461 host->led.brightness = LED_OFF; 462 host->led.default_trigger = mmc_hostname(mmc); 463 host->led.brightness_set = sdhci_led_control; 464 465 return led_classdev_register(mmc_dev(mmc), &host->led); 466 } 467 468 static void sdhci_led_unregister(struct sdhci_host *host) 469 { 470 if (host->quirks & SDHCI_QUIRK_NO_LED) 471 return; 472 473 led_classdev_unregister(&host->led); 474 } 475 476 static inline void sdhci_led_activate(struct sdhci_host *host) 477 { 478 } 479 480 static inline void sdhci_led_deactivate(struct sdhci_host *host) 481 { 482 } 483 484 #else 485 486 static inline int sdhci_led_register(struct sdhci_host *host) 487 { 488 return 0; 489 } 490 491 static inline void sdhci_led_unregister(struct sdhci_host *host) 492 { 493 } 494 495 static inline void sdhci_led_activate(struct sdhci_host *host) 496 { 497 __sdhci_led_activate(host); 498 } 499 500 static inline void sdhci_led_deactivate(struct sdhci_host *host) 501 { 502 __sdhci_led_deactivate(host); 503 } 504 505 #endif 506 507 void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq, 508 unsigned long timeout) 509 { 510 if (sdhci_data_line_cmd(mrq->cmd)) 511 mod_timer(&host->data_timer, timeout); 512 else 513 mod_timer(&host->timer, timeout); 514 } 515 EXPORT_SYMBOL_GPL(sdhci_mod_timer); 516 517 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq) 518 { 519 if (sdhci_data_line_cmd(mrq->cmd)) 520 del_timer(&host->data_timer); 521 else 522 del_timer(&host->timer); 523 } 524 525 static inline bool sdhci_has_requests(struct sdhci_host *host) 526 { 527 return host->cmd || host->data_cmd; 528 } 529 530 /*****************************************************************************\ 531 * * 532 * Core functions * 533 * * 534 \*****************************************************************************/ 535 536 static void sdhci_read_block_pio(struct sdhci_host *host) 537 { 538 size_t blksize, len, chunk; 539 u32 scratch; 540 u8 *buf; 541 542 DBG("PIO reading\n"); 543 544 blksize = host->data->blksz; 545 chunk = 0; 546 547 while (blksize) { 548 BUG_ON(!sg_miter_next(&host->sg_miter)); 549 550 len = min(host->sg_miter.length, blksize); 551 552 blksize -= len; 553 host->sg_miter.consumed = len; 554 555 buf = host->sg_miter.addr; 556 557 while (len) { 558 if (chunk == 0) { 559 scratch = sdhci_readl(host, SDHCI_BUFFER); 560 chunk = 4; 561 } 562 563 *buf = scratch & 0xFF; 564 565 buf++; 566 scratch >>= 8; 567 chunk--; 568 len--; 569 } 570 } 571 572 sg_miter_stop(&host->sg_miter); 573 } 574 575 static void sdhci_write_block_pio(struct sdhci_host *host) 576 { 577 size_t blksize, len, chunk; 578 u32 scratch; 579 u8 *buf; 580 581 DBG("PIO writing\n"); 582 583 blksize = host->data->blksz; 584 chunk = 0; 585 scratch = 0; 586 587 while (blksize) { 588 BUG_ON(!sg_miter_next(&host->sg_miter)); 589 590 len = min(host->sg_miter.length, blksize); 591 592 blksize -= len; 593 host->sg_miter.consumed = len; 594 595 buf = host->sg_miter.addr; 596 597 while (len) { 598 scratch |= (u32)*buf << (chunk * 8); 599 600 buf++; 601 chunk++; 602 len--; 603 604 if ((chunk == 4) || ((len == 0) && (blksize == 0))) { 605 sdhci_writel(host, scratch, SDHCI_BUFFER); 606 chunk = 0; 607 scratch = 0; 608 } 609 } 610 } 611 612 sg_miter_stop(&host->sg_miter); 613 } 614 615 static void sdhci_transfer_pio(struct sdhci_host *host) 616 { 617 u32 mask; 618 619 if (host->blocks == 0) 620 return; 621 622 if (host->data->flags & MMC_DATA_READ) 623 mask = SDHCI_DATA_AVAILABLE; 624 else 625 mask = SDHCI_SPACE_AVAILABLE; 626 627 /* 628 * Some controllers (JMicron JMB38x) mess up the buffer bits 629 * for transfers < 4 bytes. As long as it is just one block, 630 * we can ignore the bits. 631 */ 632 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && 633 (host->data->blocks == 1)) 634 mask = ~0; 635 636 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 637 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) 638 udelay(100); 639 640 if (host->data->flags & MMC_DATA_READ) 641 sdhci_read_block_pio(host); 642 else 643 sdhci_write_block_pio(host); 644 645 host->blocks--; 646 if (host->blocks == 0) 647 break; 648 } 649 650 DBG("PIO transfer complete.\n"); 651 } 652 653 static int sdhci_pre_dma_transfer(struct sdhci_host *host, 654 struct mmc_data *data, int cookie) 655 { 656 int sg_count; 657 658 /* 659 * If the data buffers are already mapped, return the previous 660 * dma_map_sg() result. 661 */ 662 if (data->host_cookie == COOKIE_PRE_MAPPED) 663 return data->sg_count; 664 665 /* Bounce write requests to the bounce buffer */ 666 if (host->bounce_buffer) { 667 unsigned int length = data->blksz * data->blocks; 668 669 if (length > host->bounce_buffer_size) { 670 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n", 671 mmc_hostname(host->mmc), length, 672 host->bounce_buffer_size); 673 return -EIO; 674 } 675 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { 676 /* Copy the data to the bounce buffer */ 677 if (host->ops->copy_to_bounce_buffer) { 678 host->ops->copy_to_bounce_buffer(host, 679 data, length); 680 } else { 681 sg_copy_to_buffer(data->sg, data->sg_len, 682 host->bounce_buffer, length); 683 } 684 } 685 /* Switch ownership to the DMA */ 686 dma_sync_single_for_device(mmc_dev(host->mmc), 687 host->bounce_addr, 688 host->bounce_buffer_size, 689 mmc_get_dma_dir(data)); 690 /* Just a dummy value */ 691 sg_count = 1; 692 } else { 693 /* Just access the data directly from memory */ 694 sg_count = dma_map_sg(mmc_dev(host->mmc), 695 data->sg, data->sg_len, 696 mmc_get_dma_dir(data)); 697 } 698 699 if (sg_count == 0) 700 return -ENOSPC; 701 702 data->sg_count = sg_count; 703 data->host_cookie = cookie; 704 705 return sg_count; 706 } 707 708 static char *sdhci_kmap_atomic(struct scatterlist *sg) 709 { 710 return kmap_local_page(sg_page(sg)) + sg->offset; 711 } 712 713 static void sdhci_kunmap_atomic(void *buffer) 714 { 715 kunmap_local(buffer); 716 } 717 718 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc, 719 dma_addr_t addr, int len, unsigned int cmd) 720 { 721 struct sdhci_adma2_64_desc *dma_desc = *desc; 722 723 /* 32-bit and 64-bit descriptors have these members in same position */ 724 dma_desc->cmd = cpu_to_le16(cmd); 725 dma_desc->len = cpu_to_le16(len); 726 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr)); 727 728 if (host->flags & SDHCI_USE_64_BIT_DMA) 729 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr)); 730 731 *desc += host->desc_sz; 732 } 733 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc); 734 735 static inline void __sdhci_adma_write_desc(struct sdhci_host *host, 736 void **desc, dma_addr_t addr, 737 int len, unsigned int cmd) 738 { 739 if (host->ops->adma_write_desc) 740 host->ops->adma_write_desc(host, desc, addr, len, cmd); 741 else 742 sdhci_adma_write_desc(host, desc, addr, len, cmd); 743 } 744 745 static void sdhci_adma_mark_end(void *desc) 746 { 747 struct sdhci_adma2_64_desc *dma_desc = desc; 748 749 /* 32-bit and 64-bit descriptors have 'cmd' in same position */ 750 dma_desc->cmd |= cpu_to_le16(ADMA2_END); 751 } 752 753 static void sdhci_adma_table_pre(struct sdhci_host *host, 754 struct mmc_data *data, int sg_count) 755 { 756 struct scatterlist *sg; 757 dma_addr_t addr, align_addr; 758 void *desc, *align; 759 char *buffer; 760 int len, offset, i; 761 762 /* 763 * The spec does not specify endianness of descriptor table. 764 * We currently guess that it is LE. 765 */ 766 767 host->sg_count = sg_count; 768 769 desc = host->adma_table; 770 align = host->align_buffer; 771 772 align_addr = host->align_addr; 773 774 for_each_sg(data->sg, sg, host->sg_count, i) { 775 addr = sg_dma_address(sg); 776 len = sg_dma_len(sg); 777 778 /* 779 * The SDHCI specification states that ADMA addresses must 780 * be 32-bit aligned. If they aren't, then we use a bounce 781 * buffer for the (up to three) bytes that screw up the 782 * alignment. 783 */ 784 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) & 785 SDHCI_ADMA2_MASK; 786 if (offset) { 787 if (data->flags & MMC_DATA_WRITE) { 788 buffer = sdhci_kmap_atomic(sg); 789 memcpy(align, buffer, offset); 790 sdhci_kunmap_atomic(buffer); 791 } 792 793 /* tran, valid */ 794 __sdhci_adma_write_desc(host, &desc, align_addr, 795 offset, ADMA2_TRAN_VALID); 796 797 BUG_ON(offset > 65536); 798 799 align += SDHCI_ADMA2_ALIGN; 800 align_addr += SDHCI_ADMA2_ALIGN; 801 802 addr += offset; 803 len -= offset; 804 } 805 806 /* 807 * The block layer forces a minimum segment size of PAGE_SIZE, 808 * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write 809 * multiple descriptors, noting that the ADMA table is sized 810 * for 4KiB chunks anyway, so it will be big enough. 811 */ 812 while (len > host->max_adma) { 813 int n = 32 * 1024; /* 32KiB*/ 814 815 __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID); 816 addr += n; 817 len -= n; 818 } 819 820 /* tran, valid */ 821 if (len) 822 __sdhci_adma_write_desc(host, &desc, addr, len, 823 ADMA2_TRAN_VALID); 824 825 /* 826 * If this triggers then we have a calculation bug 827 * somewhere. :/ 828 */ 829 WARN_ON((desc - host->adma_table) >= host->adma_table_sz); 830 } 831 832 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 833 /* Mark the last descriptor as the terminating descriptor */ 834 if (desc != host->adma_table) { 835 desc -= host->desc_sz; 836 sdhci_adma_mark_end(desc); 837 } 838 } else { 839 /* Add a terminating entry - nop, end, valid */ 840 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID); 841 } 842 } 843 844 static void sdhci_adma_table_post(struct sdhci_host *host, 845 struct mmc_data *data) 846 { 847 struct scatterlist *sg; 848 int i, size; 849 void *align; 850 char *buffer; 851 852 if (data->flags & MMC_DATA_READ) { 853 bool has_unaligned = false; 854 855 /* Do a quick scan of the SG list for any unaligned mappings */ 856 for_each_sg(data->sg, sg, host->sg_count, i) 857 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 858 has_unaligned = true; 859 break; 860 } 861 862 if (has_unaligned) { 863 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 864 data->sg_len, DMA_FROM_DEVICE); 865 866 align = host->align_buffer; 867 868 for_each_sg(data->sg, sg, host->sg_count, i) { 869 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 870 size = SDHCI_ADMA2_ALIGN - 871 (sg_dma_address(sg) & SDHCI_ADMA2_MASK); 872 873 buffer = sdhci_kmap_atomic(sg); 874 memcpy(buffer, align, size); 875 sdhci_kunmap_atomic(buffer); 876 877 align += SDHCI_ADMA2_ALIGN; 878 } 879 } 880 } 881 } 882 } 883 884 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr) 885 { 886 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS); 887 if (host->flags & SDHCI_USE_64_BIT_DMA) 888 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI); 889 } 890 891 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host) 892 { 893 if (host->bounce_buffer) 894 return host->bounce_addr; 895 else 896 return sg_dma_address(host->data->sg); 897 } 898 899 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr) 900 { 901 if (host->v4_mode) 902 sdhci_set_adma_addr(host, addr); 903 else 904 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS); 905 } 906 907 static unsigned int sdhci_target_timeout(struct sdhci_host *host, 908 struct mmc_command *cmd, 909 struct mmc_data *data) 910 { 911 unsigned int target_timeout; 912 913 /* timeout in us */ 914 if (!data) { 915 target_timeout = cmd->busy_timeout * 1000; 916 } else { 917 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000); 918 if (host->clock && data->timeout_clks) { 919 unsigned long long val; 920 921 /* 922 * data->timeout_clks is in units of clock cycles. 923 * host->clock is in Hz. target_timeout is in us. 924 * Hence, us = 1000000 * cycles / Hz. Round up. 925 */ 926 val = 1000000ULL * data->timeout_clks; 927 if (do_div(val, host->clock)) 928 target_timeout++; 929 target_timeout += val; 930 } 931 } 932 933 return target_timeout; 934 } 935 936 static void sdhci_calc_sw_timeout(struct sdhci_host *host, 937 struct mmc_command *cmd) 938 { 939 struct mmc_data *data = cmd->data; 940 struct mmc_host *mmc = host->mmc; 941 struct mmc_ios *ios = &mmc->ios; 942 unsigned char bus_width = 1 << ios->bus_width; 943 unsigned int blksz; 944 unsigned int freq; 945 u64 target_timeout; 946 u64 transfer_time; 947 948 target_timeout = sdhci_target_timeout(host, cmd, data); 949 target_timeout *= NSEC_PER_USEC; 950 951 if (data) { 952 blksz = data->blksz; 953 freq = mmc->actual_clock ? : host->clock; 954 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width); 955 do_div(transfer_time, freq); 956 /* multiply by '2' to account for any unknowns */ 957 transfer_time = transfer_time * 2; 958 /* calculate timeout for the entire data */ 959 host->data_timeout = data->blocks * target_timeout + 960 transfer_time; 961 } else { 962 host->data_timeout = target_timeout; 963 } 964 965 if (host->data_timeout) 966 host->data_timeout += MMC_CMD_TRANSFER_TIME; 967 } 968 969 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd, 970 bool *too_big) 971 { 972 u8 count; 973 struct mmc_data *data; 974 unsigned target_timeout, current_timeout; 975 976 *too_big = false; 977 978 /* 979 * If the host controller provides us with an incorrect timeout 980 * value, just skip the check and use the maximum. The hardware may take 981 * longer to time out, but that's much better than having a too-short 982 * timeout value. 983 */ 984 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 985 return host->max_timeout_count; 986 987 /* Unspecified command, assume max */ 988 if (cmd == NULL) 989 return host->max_timeout_count; 990 991 data = cmd->data; 992 /* Unspecified timeout, assume max */ 993 if (!data && !cmd->busy_timeout) 994 return host->max_timeout_count; 995 996 /* timeout in us */ 997 target_timeout = sdhci_target_timeout(host, cmd, data); 998 999 /* 1000 * Figure out needed cycles. 1001 * We do this in steps in order to fit inside a 32 bit int. 1002 * The first step is the minimum timeout, which will have a 1003 * minimum resolution of 6 bits: 1004 * (1) 2^13*1000 > 2^22, 1005 * (2) host->timeout_clk < 2^16 1006 * => 1007 * (1) / (2) > 2^6 1008 */ 1009 count = 0; 1010 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 1011 while (current_timeout < target_timeout) { 1012 count++; 1013 current_timeout <<= 1; 1014 if (count > host->max_timeout_count) { 1015 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT)) 1016 DBG("Too large timeout 0x%x requested for CMD%d!\n", 1017 count, cmd->opcode); 1018 count = host->max_timeout_count; 1019 *too_big = true; 1020 break; 1021 } 1022 } 1023 1024 return count; 1025 } 1026 1027 static void sdhci_set_transfer_irqs(struct sdhci_host *host) 1028 { 1029 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; 1030 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; 1031 1032 if (host->flags & SDHCI_REQ_USE_DMA) 1033 host->ier = (host->ier & ~pio_irqs) | dma_irqs; 1034 else 1035 host->ier = (host->ier & ~dma_irqs) | pio_irqs; 1036 1037 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12)) 1038 host->ier |= SDHCI_INT_AUTO_CMD_ERR; 1039 else 1040 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR; 1041 1042 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1043 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1044 } 1045 1046 void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) 1047 { 1048 if (enable) 1049 host->ier |= SDHCI_INT_DATA_TIMEOUT; 1050 else 1051 host->ier &= ~SDHCI_INT_DATA_TIMEOUT; 1052 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1053 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1054 } 1055 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq); 1056 1057 void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1058 { 1059 bool too_big = false; 1060 u8 count = sdhci_calc_timeout(host, cmd, &too_big); 1061 1062 if (too_big && 1063 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { 1064 sdhci_calc_sw_timeout(host, cmd); 1065 sdhci_set_data_timeout_irq(host, false); 1066 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { 1067 sdhci_set_data_timeout_irq(host, true); 1068 } 1069 1070 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); 1071 } 1072 EXPORT_SYMBOL_GPL(__sdhci_set_timeout); 1073 1074 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1075 { 1076 if (host->ops->set_timeout) 1077 host->ops->set_timeout(host, cmd); 1078 else 1079 __sdhci_set_timeout(host, cmd); 1080 } 1081 1082 void sdhci_initialize_data(struct sdhci_host *host, struct mmc_data *data) 1083 { 1084 WARN_ON(host->data); 1085 1086 /* Sanity checks */ 1087 BUG_ON(data->blksz * data->blocks > 524288); 1088 BUG_ON(data->blksz > host->mmc->max_blk_size); 1089 BUG_ON(data->blocks > 65535); 1090 1091 host->data = data; 1092 host->data_early = 0; 1093 host->data->bytes_xfered = 0; 1094 } 1095 EXPORT_SYMBOL_GPL(sdhci_initialize_data); 1096 1097 static inline void sdhci_set_block_info(struct sdhci_host *host, 1098 struct mmc_data *data) 1099 { 1100 /* Set the DMA boundary value and block size */ 1101 sdhci_writew(host, 1102 SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), 1103 SDHCI_BLOCK_SIZE); 1104 /* 1105 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count 1106 * can be supported, in that case 16-bit block count register must be 0. 1107 */ 1108 if (host->version >= SDHCI_SPEC_410 && host->v4_mode && 1109 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { 1110 if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) 1111 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); 1112 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); 1113 } else { 1114 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 1115 } 1116 } 1117 1118 void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data) 1119 { 1120 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 1121 struct scatterlist *sg; 1122 unsigned int length_mask, offset_mask; 1123 int i; 1124 1125 host->flags |= SDHCI_REQ_USE_DMA; 1126 1127 /* 1128 * FIXME: This doesn't account for merging when mapping the 1129 * scatterlist. 1130 * 1131 * The assumption here being that alignment and lengths are 1132 * the same after DMA mapping to device address space. 1133 */ 1134 length_mask = 0; 1135 offset_mask = 0; 1136 if (host->flags & SDHCI_USE_ADMA) { 1137 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) { 1138 length_mask = 3; 1139 /* 1140 * As we use up to 3 byte chunks to work 1141 * around alignment problems, we need to 1142 * check the offset as well. 1143 */ 1144 offset_mask = 3; 1145 } 1146 } else { 1147 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) 1148 length_mask = 3; 1149 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) 1150 offset_mask = 3; 1151 } 1152 1153 if (unlikely(length_mask | offset_mask)) { 1154 for_each_sg(data->sg, sg, data->sg_len, i) { 1155 if (sg->length & length_mask) { 1156 DBG("Reverting to PIO because of transfer size (%d)\n", 1157 sg->length); 1158 host->flags &= ~SDHCI_REQ_USE_DMA; 1159 break; 1160 } 1161 if (sg->offset & offset_mask) { 1162 DBG("Reverting to PIO because of bad alignment\n"); 1163 host->flags &= ~SDHCI_REQ_USE_DMA; 1164 break; 1165 } 1166 } 1167 } 1168 } 1169 1170 sdhci_config_dma(host); 1171 1172 if (host->flags & SDHCI_REQ_USE_DMA) { 1173 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1174 1175 if (sg_cnt <= 0) { 1176 /* 1177 * This only happens when someone fed 1178 * us an invalid request. 1179 */ 1180 WARN_ON(1); 1181 host->flags &= ~SDHCI_REQ_USE_DMA; 1182 } else if (host->flags & SDHCI_USE_ADMA) { 1183 sdhci_adma_table_pre(host, data, sg_cnt); 1184 sdhci_set_adma_addr(host, host->adma_addr); 1185 } else { 1186 WARN_ON(sg_cnt != 1); 1187 sdhci_set_sdma_addr(host, sdhci_sdma_address(host)); 1188 } 1189 } 1190 1191 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 1192 int flags; 1193 1194 flags = SG_MITER_ATOMIC; 1195 if (host->data->flags & MMC_DATA_READ) 1196 flags |= SG_MITER_TO_SG; 1197 else 1198 flags |= SG_MITER_FROM_SG; 1199 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1200 host->blocks = data->blocks; 1201 } 1202 1203 sdhci_set_transfer_irqs(host); 1204 } 1205 EXPORT_SYMBOL_GPL(sdhci_prepare_dma); 1206 1207 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) 1208 { 1209 struct mmc_data *data = cmd->data; 1210 1211 sdhci_initialize_data(host, data); 1212 1213 sdhci_prepare_dma(host, data); 1214 1215 sdhci_set_block_info(host, data); 1216 } 1217 1218 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA) 1219 1220 static int sdhci_external_dma_init(struct sdhci_host *host) 1221 { 1222 int ret = 0; 1223 struct mmc_host *mmc = host->mmc; 1224 1225 host->tx_chan = dma_request_chan(mmc_dev(mmc), "tx"); 1226 if (IS_ERR(host->tx_chan)) { 1227 ret = PTR_ERR(host->tx_chan); 1228 if (ret != -EPROBE_DEFER) 1229 pr_warn("Failed to request TX DMA channel.\n"); 1230 host->tx_chan = NULL; 1231 return ret; 1232 } 1233 1234 host->rx_chan = dma_request_chan(mmc_dev(mmc), "rx"); 1235 if (IS_ERR(host->rx_chan)) { 1236 if (host->tx_chan) { 1237 dma_release_channel(host->tx_chan); 1238 host->tx_chan = NULL; 1239 } 1240 1241 ret = PTR_ERR(host->rx_chan); 1242 if (ret != -EPROBE_DEFER) 1243 pr_warn("Failed to request RX DMA channel.\n"); 1244 host->rx_chan = NULL; 1245 } 1246 1247 return ret; 1248 } 1249 1250 static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, 1251 struct mmc_data *data) 1252 { 1253 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; 1254 } 1255 1256 static int sdhci_external_dma_setup(struct sdhci_host *host, 1257 struct mmc_command *cmd) 1258 { 1259 int ret, i; 1260 enum dma_transfer_direction dir; 1261 struct dma_async_tx_descriptor *desc; 1262 struct mmc_data *data = cmd->data; 1263 struct dma_chan *chan; 1264 struct dma_slave_config cfg; 1265 dma_cookie_t cookie; 1266 int sg_cnt; 1267 1268 if (!host->mapbase) 1269 return -EINVAL; 1270 1271 memset(&cfg, 0, sizeof(cfg)); 1272 cfg.src_addr = host->mapbase + SDHCI_BUFFER; 1273 cfg.dst_addr = host->mapbase + SDHCI_BUFFER; 1274 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1275 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1276 cfg.src_maxburst = data->blksz / 4; 1277 cfg.dst_maxburst = data->blksz / 4; 1278 1279 /* Sanity check: all the SG entries must be aligned by block size. */ 1280 for (i = 0; i < data->sg_len; i++) { 1281 if ((data->sg + i)->length % data->blksz) 1282 return -EINVAL; 1283 } 1284 1285 chan = sdhci_external_dma_channel(host, data); 1286 1287 ret = dmaengine_slave_config(chan, &cfg); 1288 if (ret) 1289 return ret; 1290 1291 sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1292 if (sg_cnt <= 0) 1293 return -EINVAL; 1294 1295 dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 1296 desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir, 1297 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1298 if (!desc) 1299 return -EINVAL; 1300 1301 desc->callback = NULL; 1302 desc->callback_param = NULL; 1303 1304 cookie = dmaengine_submit(desc); 1305 if (dma_submit_error(cookie)) 1306 ret = cookie; 1307 1308 return ret; 1309 } 1310 1311 static void sdhci_external_dma_release(struct sdhci_host *host) 1312 { 1313 if (host->tx_chan) { 1314 dma_release_channel(host->tx_chan); 1315 host->tx_chan = NULL; 1316 } 1317 1318 if (host->rx_chan) { 1319 dma_release_channel(host->rx_chan); 1320 host->rx_chan = NULL; 1321 } 1322 1323 sdhci_switch_external_dma(host, false); 1324 } 1325 1326 static void __sdhci_external_dma_prepare_data(struct sdhci_host *host, 1327 struct mmc_command *cmd) 1328 { 1329 struct mmc_data *data = cmd->data; 1330 1331 sdhci_initialize_data(host, data); 1332 1333 host->flags |= SDHCI_REQ_USE_DMA; 1334 sdhci_set_transfer_irqs(host); 1335 1336 sdhci_set_block_info(host, data); 1337 } 1338 1339 static void sdhci_external_dma_prepare_data(struct sdhci_host *host, 1340 struct mmc_command *cmd) 1341 { 1342 if (!sdhci_external_dma_setup(host, cmd)) { 1343 __sdhci_external_dma_prepare_data(host, cmd); 1344 } else { 1345 sdhci_external_dma_release(host); 1346 pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n", 1347 mmc_hostname(host->mmc)); 1348 sdhci_prepare_data(host, cmd); 1349 } 1350 } 1351 1352 static void sdhci_external_dma_pre_transfer(struct sdhci_host *host, 1353 struct mmc_command *cmd) 1354 { 1355 struct dma_chan *chan; 1356 1357 if (!cmd->data) 1358 return; 1359 1360 chan = sdhci_external_dma_channel(host, cmd->data); 1361 if (chan) 1362 dma_async_issue_pending(chan); 1363 } 1364 1365 #else 1366 1367 static inline int sdhci_external_dma_init(struct sdhci_host *host) 1368 { 1369 return -EOPNOTSUPP; 1370 } 1371 1372 static inline void sdhci_external_dma_release(struct sdhci_host *host) 1373 { 1374 } 1375 1376 static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host, 1377 struct mmc_command *cmd) 1378 { 1379 /* This should never happen */ 1380 WARN_ON_ONCE(1); 1381 } 1382 1383 static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host, 1384 struct mmc_command *cmd) 1385 { 1386 } 1387 1388 static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, 1389 struct mmc_data *data) 1390 { 1391 return NULL; 1392 } 1393 1394 #endif 1395 1396 void sdhci_switch_external_dma(struct sdhci_host *host, bool en) 1397 { 1398 host->use_external_dma = en; 1399 } 1400 EXPORT_SYMBOL_GPL(sdhci_switch_external_dma); 1401 1402 static inline bool sdhci_auto_cmd12(struct sdhci_host *host, 1403 struct mmc_request *mrq) 1404 { 1405 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) && 1406 !mrq->cap_cmd_during_tfr; 1407 } 1408 1409 static inline bool sdhci_auto_cmd23(struct sdhci_host *host, 1410 struct mmc_request *mrq) 1411 { 1412 return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23); 1413 } 1414 1415 static inline bool sdhci_manual_cmd23(struct sdhci_host *host, 1416 struct mmc_request *mrq) 1417 { 1418 return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23); 1419 } 1420 1421 static inline void sdhci_auto_cmd_select(struct sdhci_host *host, 1422 struct mmc_command *cmd, 1423 u16 *mode) 1424 { 1425 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) && 1426 (cmd->opcode != SD_IO_RW_EXTENDED); 1427 bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq); 1428 u16 ctrl2; 1429 1430 /* 1431 * In case of Version 4.10 or later, use of 'Auto CMD Auto 1432 * Select' is recommended rather than use of 'Auto CMD12 1433 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode 1434 * here because some controllers (e.g sdhci-of-dwmshc) expect it. 1435 */ 1436 if (host->version >= SDHCI_SPEC_410 && host->v4_mode && 1437 (use_cmd12 || use_cmd23)) { 1438 *mode |= SDHCI_TRNS_AUTO_SEL; 1439 1440 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1441 if (use_cmd23) 1442 ctrl2 |= SDHCI_CMD23_ENABLE; 1443 else 1444 ctrl2 &= ~SDHCI_CMD23_ENABLE; 1445 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 1446 1447 return; 1448 } 1449 1450 /* 1451 * If we are sending CMD23, CMD12 never gets sent 1452 * on successful completion (so no Auto-CMD12). 1453 */ 1454 if (use_cmd12) 1455 *mode |= SDHCI_TRNS_AUTO_CMD12; 1456 else if (use_cmd23) 1457 *mode |= SDHCI_TRNS_AUTO_CMD23; 1458 } 1459 1460 static void sdhci_set_transfer_mode(struct sdhci_host *host, 1461 struct mmc_command *cmd) 1462 { 1463 u16 mode = 0; 1464 struct mmc_data *data = cmd->data; 1465 1466 if (data == NULL) { 1467 if (host->quirks2 & 1468 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) { 1469 /* must not clear SDHCI_TRANSFER_MODE when tuning */ 1470 if (!mmc_op_tuning(cmd->opcode)) 1471 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); 1472 } else { 1473 /* clear Auto CMD settings for no data CMDs */ 1474 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); 1475 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | 1476 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE); 1477 } 1478 return; 1479 } 1480 1481 WARN_ON(!host->data); 1482 1483 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) 1484 mode = SDHCI_TRNS_BLK_CNT_EN; 1485 1486 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { 1487 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI; 1488 sdhci_auto_cmd_select(host, cmd, &mode); 1489 if (sdhci_auto_cmd23(host, cmd->mrq)) 1490 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2); 1491 } 1492 1493 if (data->flags & MMC_DATA_READ) 1494 mode |= SDHCI_TRNS_READ; 1495 if (host->flags & SDHCI_REQ_USE_DMA) 1496 mode |= SDHCI_TRNS_DMA; 1497 1498 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 1499 } 1500 1501 bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) 1502 { 1503 return (!(host->flags & SDHCI_DEVICE_DEAD) && 1504 ((mrq->cmd && mrq->cmd->error) || 1505 (mrq->sbc && mrq->sbc->error) || 1506 (mrq->data && mrq->data->stop && mrq->data->stop->error) || 1507 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); 1508 } 1509 EXPORT_SYMBOL_GPL(sdhci_needs_reset); 1510 1511 static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq) 1512 { 1513 int i; 1514 1515 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1516 if (host->mrqs_done[i] == mrq) { 1517 WARN_ON(1); 1518 return; 1519 } 1520 } 1521 1522 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1523 if (!host->mrqs_done[i]) { 1524 host->mrqs_done[i] = mrq; 1525 break; 1526 } 1527 } 1528 1529 WARN_ON(i >= SDHCI_MAX_MRQS); 1530 } 1531 1532 void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1533 { 1534 if (host->cmd && host->cmd->mrq == mrq) 1535 host->cmd = NULL; 1536 1537 if (host->data_cmd && host->data_cmd->mrq == mrq) 1538 host->data_cmd = NULL; 1539 1540 if (host->deferred_cmd && host->deferred_cmd->mrq == mrq) 1541 host->deferred_cmd = NULL; 1542 1543 if (host->data && host->data->mrq == mrq) 1544 host->data = NULL; 1545 1546 if (sdhci_needs_reset(host, mrq)) 1547 host->pending_reset = true; 1548 1549 sdhci_set_mrq_done(host, mrq); 1550 1551 sdhci_del_timer(host, mrq); 1552 1553 if (!sdhci_has_requests(host)) 1554 sdhci_led_deactivate(host); 1555 } 1556 EXPORT_SYMBOL_GPL(__sdhci_finish_mrq); 1557 1558 void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1559 { 1560 __sdhci_finish_mrq(host, mrq); 1561 1562 queue_work(host->complete_wq, &host->complete_work); 1563 } 1564 EXPORT_SYMBOL_GPL(sdhci_finish_mrq); 1565 1566 void __sdhci_finish_data_common(struct sdhci_host *host, bool defer_reset) 1567 { 1568 struct mmc_command *data_cmd = host->data_cmd; 1569 struct mmc_data *data = host->data; 1570 1571 host->data = NULL; 1572 host->data_cmd = NULL; 1573 1574 /* 1575 * The controller needs a reset of internal state machines upon error 1576 * conditions. 1577 */ 1578 if (data->error) { 1579 if (defer_reset) 1580 host->pending_reset = true; 1581 else if (!host->cmd || host->cmd == data_cmd) 1582 sdhci_reset_for(host, REQUEST_ERROR); 1583 else 1584 sdhci_reset_for(host, REQUEST_ERROR_DATA_ONLY); 1585 } 1586 1587 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) == 1588 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) 1589 sdhci_adma_table_post(host, data); 1590 1591 /* 1592 * The specification states that the block count register must 1593 * be updated, but it does not specify at what point in the 1594 * data flow. That makes the register entirely useless to read 1595 * back so we have to assume that nothing made it to the card 1596 * in the event of an error. 1597 */ 1598 if (data->error) 1599 data->bytes_xfered = 0; 1600 else 1601 data->bytes_xfered = data->blksz * data->blocks; 1602 } 1603 EXPORT_SYMBOL_GPL(__sdhci_finish_data_common); 1604 1605 static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout) 1606 { 1607 struct mmc_data *data = host->data; 1608 1609 __sdhci_finish_data_common(host, false); 1610 1611 /* 1612 * Need to send CMD12 if - 1613 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23) 1614 * b) error in multiblock transfer 1615 */ 1616 if (data->stop && 1617 ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) || 1618 data->error)) { 1619 /* 1620 * 'cap_cmd_during_tfr' request must not use the command line 1621 * after mmc_command_done() has been called. It is upper layer's 1622 * responsibility to send the stop command if required. 1623 */ 1624 if (data->mrq->cap_cmd_during_tfr) { 1625 __sdhci_finish_mrq(host, data->mrq); 1626 } else { 1627 /* Avoid triggering warning in sdhci_send_command() */ 1628 host->cmd = NULL; 1629 if (!sdhci_send_command(host, data->stop)) { 1630 if (sw_data_timeout) { 1631 /* 1632 * This is anyway a sw data timeout, so 1633 * give up now. 1634 */ 1635 data->stop->error = -EIO; 1636 __sdhci_finish_mrq(host, data->mrq); 1637 } else { 1638 WARN_ON(host->deferred_cmd); 1639 host->deferred_cmd = data->stop; 1640 } 1641 } 1642 } 1643 } else { 1644 __sdhci_finish_mrq(host, data->mrq); 1645 } 1646 } 1647 1648 static void sdhci_finish_data(struct sdhci_host *host) 1649 { 1650 __sdhci_finish_data(host, false); 1651 } 1652 1653 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 1654 { 1655 int flags; 1656 u32 mask; 1657 unsigned long timeout; 1658 1659 WARN_ON(host->cmd); 1660 1661 /* Initially, a command has no error */ 1662 cmd->error = 0; 1663 1664 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && 1665 cmd->opcode == MMC_STOP_TRANSMISSION) 1666 cmd->flags |= MMC_RSP_BUSY; 1667 1668 mask = SDHCI_CMD_INHIBIT; 1669 if (sdhci_data_line_cmd(cmd)) 1670 mask |= SDHCI_DATA_INHIBIT; 1671 1672 /* We shouldn't wait for data inihibit for stop commands, even 1673 though they might use busy signaling */ 1674 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop)) 1675 mask &= ~SDHCI_DATA_INHIBIT; 1676 1677 if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) 1678 return false; 1679 1680 host->cmd = cmd; 1681 host->data_timeout = 0; 1682 if (sdhci_data_line_cmd(cmd)) { 1683 WARN_ON(host->data_cmd); 1684 host->data_cmd = cmd; 1685 sdhci_set_timeout(host, cmd); 1686 } 1687 1688 if (cmd->data) { 1689 if (host->use_external_dma) 1690 sdhci_external_dma_prepare_data(host, cmd); 1691 else 1692 sdhci_prepare_data(host, cmd); 1693 } 1694 1695 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 1696 1697 sdhci_set_transfer_mode(host, cmd); 1698 1699 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 1700 WARN_ONCE(1, "Unsupported response type!\n"); 1701 /* 1702 * This does not happen in practice because 136-bit response 1703 * commands never have busy waiting, so rather than complicate 1704 * the error path, just remove busy waiting and continue. 1705 */ 1706 cmd->flags &= ~MMC_RSP_BUSY; 1707 } 1708 1709 if (!(cmd->flags & MMC_RSP_PRESENT)) 1710 flags = SDHCI_CMD_RESP_NONE; 1711 else if (cmd->flags & MMC_RSP_136) 1712 flags = SDHCI_CMD_RESP_LONG; 1713 else if (cmd->flags & MMC_RSP_BUSY) 1714 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1715 else 1716 flags = SDHCI_CMD_RESP_SHORT; 1717 1718 if (cmd->flags & MMC_RSP_CRC) 1719 flags |= SDHCI_CMD_CRC; 1720 if (cmd->flags & MMC_RSP_OPCODE) 1721 flags |= SDHCI_CMD_INDEX; 1722 1723 /* CMD19 is special in that the Data Present Select should be set */ 1724 if (cmd->data || mmc_op_tuning(cmd->opcode)) 1725 flags |= SDHCI_CMD_DATA; 1726 1727 timeout = jiffies; 1728 if (host->data_timeout) 1729 timeout += nsecs_to_jiffies(host->data_timeout); 1730 else if (!cmd->data && cmd->busy_timeout > 9000) 1731 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; 1732 else 1733 timeout += 10 * HZ; 1734 sdhci_mod_timer(host, cmd->mrq, timeout); 1735 1736 if (host->use_external_dma) 1737 sdhci_external_dma_pre_transfer(host, cmd); 1738 1739 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 1740 1741 return true; 1742 } 1743 1744 bool sdhci_present_error(struct sdhci_host *host, 1745 struct mmc_command *cmd, bool present) 1746 { 1747 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1748 cmd->error = -ENOMEDIUM; 1749 return true; 1750 } 1751 1752 return false; 1753 } 1754 EXPORT_SYMBOL_GPL(sdhci_present_error); 1755 1756 static bool sdhci_send_command_retry(struct sdhci_host *host, 1757 struct mmc_command *cmd, 1758 unsigned long flags) 1759 __releases(host->lock) 1760 __acquires(host->lock) 1761 { 1762 struct mmc_command *deferred_cmd = host->deferred_cmd; 1763 int timeout = 10; /* Approx. 10 ms */ 1764 bool present; 1765 1766 while (!sdhci_send_command(host, cmd)) { 1767 if (!timeout--) { 1768 pr_err("%s: Controller never released inhibit bit(s).\n", 1769 mmc_hostname(host->mmc)); 1770 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 1771 sdhci_dumpregs(host); 1772 cmd->error = -EIO; 1773 return false; 1774 } 1775 1776 spin_unlock_irqrestore(&host->lock, flags); 1777 1778 usleep_range(1000, 1250); 1779 1780 present = host->mmc->ops->get_cd(host->mmc); 1781 1782 spin_lock_irqsave(&host->lock, flags); 1783 1784 /* A deferred command might disappear, handle that */ 1785 if (cmd == deferred_cmd && cmd != host->deferred_cmd) 1786 return true; 1787 1788 if (sdhci_present_error(host, cmd, present)) 1789 return false; 1790 } 1791 1792 if (cmd == host->deferred_cmd) 1793 host->deferred_cmd = NULL; 1794 1795 return true; 1796 } 1797 1798 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd) 1799 { 1800 int i, reg; 1801 1802 for (i = 0; i < 4; i++) { 1803 reg = SDHCI_RESPONSE + (3 - i) * 4; 1804 cmd->resp[i] = sdhci_readl(host, reg); 1805 } 1806 1807 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC) 1808 return; 1809 1810 /* CRC is stripped so we need to do some shifting */ 1811 for (i = 0; i < 4; i++) { 1812 cmd->resp[i] <<= 8; 1813 if (i != 3) 1814 cmd->resp[i] |= cmd->resp[i + 1] >> 24; 1815 } 1816 } 1817 1818 static void sdhci_finish_command(struct sdhci_host *host) 1819 { 1820 struct mmc_command *cmd = host->cmd; 1821 1822 host->cmd = NULL; 1823 1824 if (cmd->flags & MMC_RSP_PRESENT) { 1825 if (cmd->flags & MMC_RSP_136) { 1826 sdhci_read_rsp_136(host, cmd); 1827 } else { 1828 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); 1829 } 1830 } 1831 1832 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd) 1833 mmc_command_done(host->mmc, cmd->mrq); 1834 1835 /* 1836 * The host can send and interrupt when the busy state has 1837 * ended, allowing us to wait without wasting CPU cycles. 1838 * The busy signal uses DAT0 so this is similar to waiting 1839 * for data to complete. 1840 * 1841 * Note: The 1.0 specification is a bit ambiguous about this 1842 * feature so there might be some problems with older 1843 * controllers. 1844 */ 1845 if (cmd->flags & MMC_RSP_BUSY) { 1846 if (cmd->data) { 1847 DBG("Cannot wait for busy signal when also doing a data transfer"); 1848 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && 1849 cmd == host->data_cmd) { 1850 /* Command complete before busy is ended */ 1851 return; 1852 } 1853 } 1854 1855 /* Finished CMD23, now send actual command. */ 1856 if (cmd == cmd->mrq->sbc) { 1857 if (!sdhci_send_command(host, cmd->mrq->cmd)) { 1858 WARN_ON(host->deferred_cmd); 1859 host->deferred_cmd = cmd->mrq->cmd; 1860 } 1861 } else { 1862 1863 /* Processed actual command. */ 1864 if (host->data && host->data_early) 1865 sdhci_finish_data(host); 1866 1867 if (!cmd->data) 1868 __sdhci_finish_mrq(host, cmd->mrq); 1869 } 1870 } 1871 1872 static u16 sdhci_get_preset_value(struct sdhci_host *host) 1873 { 1874 u16 preset = 0; 1875 1876 switch (host->timing) { 1877 case MMC_TIMING_MMC_HS: 1878 case MMC_TIMING_SD_HS: 1879 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED); 1880 break; 1881 case MMC_TIMING_UHS_SDR12: 1882 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1883 break; 1884 case MMC_TIMING_UHS_SDR25: 1885 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); 1886 break; 1887 case MMC_TIMING_UHS_SDR50: 1888 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); 1889 break; 1890 case MMC_TIMING_UHS_SDR104: 1891 case MMC_TIMING_MMC_HS200: 1892 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 1893 break; 1894 case MMC_TIMING_UHS_DDR50: 1895 case MMC_TIMING_MMC_DDR52: 1896 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); 1897 break; 1898 case MMC_TIMING_MMC_HS400: 1899 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400); 1900 break; 1901 case MMC_TIMING_UHS2_SPEED_A: 1902 case MMC_TIMING_UHS2_SPEED_A_HD: 1903 case MMC_TIMING_UHS2_SPEED_B: 1904 case MMC_TIMING_UHS2_SPEED_B_HD: 1905 preset = sdhci_readw(host, SDHCI_PRESET_FOR_UHS2); 1906 break; 1907 default: 1908 pr_warn("%s: Invalid UHS-I mode selected\n", 1909 mmc_hostname(host->mmc)); 1910 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1911 break; 1912 } 1913 return preset; 1914 } 1915 1916 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock, 1917 unsigned int *actual_clock) 1918 { 1919 int div = 0; /* Initialized for compiler warning */ 1920 int real_div = div, clk_mul = 1; 1921 u16 clk = 0; 1922 bool switch_base_clk = false; 1923 1924 if (host->version >= SDHCI_SPEC_300) { 1925 if (host->preset_enabled) { 1926 u16 pre_val; 1927 1928 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1929 pre_val = sdhci_get_preset_value(host); 1930 div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val); 1931 if (host->clk_mul && 1932 (pre_val & SDHCI_PRESET_CLKGEN_SEL)) { 1933 clk = SDHCI_PROG_CLOCK_MODE; 1934 real_div = div + 1; 1935 clk_mul = host->clk_mul; 1936 } else { 1937 real_div = max_t(int, 1, div << 1); 1938 } 1939 goto clock_set; 1940 } 1941 1942 /* 1943 * Check if the Host Controller supports Programmable Clock 1944 * Mode. 1945 */ 1946 if (host->clk_mul) { 1947 for (div = 1; div <= 1024; div++) { 1948 if ((host->max_clk * host->clk_mul / div) 1949 <= clock) 1950 break; 1951 } 1952 if ((host->max_clk * host->clk_mul / div) <= clock) { 1953 /* 1954 * Set Programmable Clock Mode in the Clock 1955 * Control register. 1956 */ 1957 clk = SDHCI_PROG_CLOCK_MODE; 1958 real_div = div; 1959 clk_mul = host->clk_mul; 1960 div--; 1961 } else { 1962 /* 1963 * Divisor can be too small to reach clock 1964 * speed requirement. Then use the base clock. 1965 */ 1966 switch_base_clk = true; 1967 } 1968 } 1969 1970 if (!host->clk_mul || switch_base_clk) { 1971 /* Version 3.00 divisors must be a multiple of 2. */ 1972 if (host->max_clk <= clock) 1973 div = 1; 1974 else { 1975 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; 1976 div += 2) { 1977 if ((host->max_clk / div) <= clock) 1978 break; 1979 } 1980 } 1981 real_div = div; 1982 div >>= 1; 1983 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN) 1984 && !div && host->max_clk <= 25000000) 1985 div = 1; 1986 } 1987 } else { 1988 /* Version 2.00 divisors must be a power of 2. */ 1989 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 1990 if ((host->max_clk / div) <= clock) 1991 break; 1992 } 1993 real_div = div; 1994 div >>= 1; 1995 } 1996 1997 clock_set: 1998 if (real_div) 1999 *actual_clock = (host->max_clk * clk_mul) / real_div; 2000 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 2001 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 2002 << SDHCI_DIVIDER_HI_SHIFT; 2003 2004 return clk; 2005 } 2006 EXPORT_SYMBOL_GPL(sdhci_calc_clk); 2007 2008 void sdhci_enable_clk(struct sdhci_host *host, u16 clk) 2009 { 2010 ktime_t timeout; 2011 2012 clk |= SDHCI_CLOCK_INT_EN; 2013 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2014 2015 /* Wait max 150 ms */ 2016 timeout = ktime_add_ms(ktime_get(), 150); 2017 while (1) { 2018 bool timedout = ktime_after(ktime_get(), timeout); 2019 2020 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2021 if (clk & SDHCI_CLOCK_INT_STABLE) 2022 break; 2023 if (timedout) { 2024 pr_err("%s: Internal clock never stabilised.\n", 2025 mmc_hostname(host->mmc)); 2026 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 2027 sdhci_dumpregs(host); 2028 return; 2029 } 2030 udelay(10); 2031 } 2032 2033 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) { 2034 clk |= SDHCI_CLOCK_PLL_EN; 2035 clk &= ~SDHCI_CLOCK_INT_STABLE; 2036 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2037 2038 /* Wait max 150 ms */ 2039 timeout = ktime_add_ms(ktime_get(), 150); 2040 while (1) { 2041 bool timedout = ktime_after(ktime_get(), timeout); 2042 2043 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2044 if (clk & SDHCI_CLOCK_INT_STABLE) 2045 break; 2046 if (timedout) { 2047 pr_err("%s: PLL clock never stabilised.\n", 2048 mmc_hostname(host->mmc)); 2049 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 2050 sdhci_dumpregs(host); 2051 return; 2052 } 2053 udelay(10); 2054 } 2055 } 2056 2057 clk |= SDHCI_CLOCK_CARD_EN; 2058 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2059 } 2060 EXPORT_SYMBOL_GPL(sdhci_enable_clk); 2061 2062 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 2063 { 2064 u16 clk; 2065 2066 host->mmc->actual_clock = 0; 2067 2068 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 2069 2070 if (clock == 0) 2071 return; 2072 2073 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); 2074 sdhci_enable_clk(host, clk); 2075 } 2076 EXPORT_SYMBOL_GPL(sdhci_set_clock); 2077 2078 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode, 2079 unsigned short vdd) 2080 { 2081 struct mmc_host *mmc = host->mmc; 2082 2083 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 2084 2085 if (mode != MMC_POWER_OFF) 2086 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); 2087 else 2088 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2089 } 2090 2091 unsigned short sdhci_get_vdd_value(unsigned short vdd) 2092 { 2093 switch (1 << vdd) { 2094 case MMC_VDD_165_195: 2095 /* 2096 * Without a regulator, SDHCI does not support 2.0v 2097 * so we only get here if the driver deliberately 2098 * added the 2.0v range to ocr_avail. Map it to 1.8v 2099 * for the purpose of turning on the power. 2100 */ 2101 case MMC_VDD_20_21: 2102 return SDHCI_POWER_180; 2103 case MMC_VDD_29_30: 2104 case MMC_VDD_30_31: 2105 return SDHCI_POWER_300; 2106 case MMC_VDD_32_33: 2107 case MMC_VDD_33_34: 2108 /* 2109 * 3.4V ~ 3.6V are valid only for those platforms where it's 2110 * known that the voltage range is supported by hardware. 2111 */ 2112 case MMC_VDD_34_35: 2113 case MMC_VDD_35_36: 2114 return SDHCI_POWER_330; 2115 default: 2116 return 0; 2117 } 2118 } 2119 EXPORT_SYMBOL_GPL(sdhci_get_vdd_value); 2120 2121 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, 2122 unsigned short vdd) 2123 { 2124 u8 pwr = 0; 2125 2126 if (mode != MMC_POWER_OFF) { 2127 pwr = sdhci_get_vdd_value(vdd); 2128 if (!pwr) { 2129 WARN(1, "%s: Invalid vdd %#x\n", 2130 mmc_hostname(host->mmc), vdd); 2131 } 2132 } 2133 2134 if (host->pwr == pwr) 2135 return; 2136 2137 host->pwr = pwr; 2138 2139 if (pwr == 0) { 2140 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2141 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 2142 sdhci_runtime_pm_bus_off(host); 2143 } else { 2144 /* 2145 * Spec says that we should clear the power reg before setting 2146 * a new value. Some controllers don't seem to like this though. 2147 */ 2148 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 2149 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2150 2151 /* 2152 * At least the Marvell CaFe chip gets confused if we set the 2153 * voltage and set turn on power at the same time, so set the 2154 * voltage first. 2155 */ 2156 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 2157 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 2158 2159 pwr |= SDHCI_POWER_ON; 2160 2161 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 2162 2163 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 2164 sdhci_runtime_pm_bus_on(host); 2165 2166 /* 2167 * Some controllers need an extra 10ms delay of 10ms before 2168 * they can apply clock after applying power 2169 */ 2170 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 2171 mdelay(10); 2172 } 2173 } 2174 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg); 2175 2176 void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 2177 unsigned short vdd) 2178 { 2179 if (IS_ERR(host->mmc->supply.vmmc)) 2180 sdhci_set_power_noreg(host, mode, vdd); 2181 else 2182 sdhci_set_power_reg(host, mode, vdd); 2183 } 2184 EXPORT_SYMBOL_GPL(sdhci_set_power); 2185 2186 /* 2187 * Some controllers need to configure a valid bus voltage on their power 2188 * register regardless of whether an external regulator is taking care of power 2189 * supply. This helper function takes care of it if set as the controller's 2190 * sdhci_ops.set_power callback. 2191 */ 2192 void sdhci_set_power_and_bus_voltage(struct sdhci_host *host, 2193 unsigned char mode, 2194 unsigned short vdd) 2195 { 2196 if (!IS_ERR(host->mmc->supply.vmmc)) { 2197 struct mmc_host *mmc = host->mmc; 2198 2199 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 2200 } 2201 sdhci_set_power_noreg(host, mode, vdd); 2202 } 2203 EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage); 2204 2205 /*****************************************************************************\ 2206 * * 2207 * MMC callbacks * 2208 * * 2209 \*****************************************************************************/ 2210 2211 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 2212 { 2213 struct sdhci_host *host = mmc_priv(mmc); 2214 struct mmc_command *cmd; 2215 unsigned long flags; 2216 bool present; 2217 2218 /* Firstly check card presence */ 2219 present = mmc->ops->get_cd(mmc); 2220 2221 spin_lock_irqsave(&host->lock, flags); 2222 2223 sdhci_led_activate(host); 2224 2225 if (sdhci_present_error(host, mrq->cmd, present)) 2226 goto out_finish; 2227 2228 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd; 2229 2230 if (!sdhci_send_command_retry(host, cmd, flags)) 2231 goto out_finish; 2232 2233 spin_unlock_irqrestore(&host->lock, flags); 2234 2235 return; 2236 2237 out_finish: 2238 sdhci_finish_mrq(host, mrq); 2239 spin_unlock_irqrestore(&host->lock, flags); 2240 } 2241 EXPORT_SYMBOL_GPL(sdhci_request); 2242 2243 int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq) 2244 { 2245 struct sdhci_host *host = mmc_priv(mmc); 2246 struct mmc_command *cmd; 2247 unsigned long flags; 2248 int ret = 0; 2249 2250 spin_lock_irqsave(&host->lock, flags); 2251 2252 if (sdhci_present_error(host, mrq->cmd, true)) { 2253 sdhci_finish_mrq(host, mrq); 2254 goto out_finish; 2255 } 2256 2257 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd; 2258 2259 /* 2260 * The HSQ may send a command in interrupt context without polling 2261 * the busy signaling, which means we should return BUSY if controller 2262 * has not released inhibit bits to allow HSQ trying to send request 2263 * again in non-atomic context. So we should not finish this request 2264 * here. 2265 */ 2266 if (!sdhci_send_command(host, cmd)) 2267 ret = -EBUSY; 2268 else 2269 sdhci_led_activate(host); 2270 2271 out_finish: 2272 spin_unlock_irqrestore(&host->lock, flags); 2273 return ret; 2274 } 2275 EXPORT_SYMBOL_GPL(sdhci_request_atomic); 2276 2277 void sdhci_set_bus_width(struct sdhci_host *host, int width) 2278 { 2279 u8 ctrl; 2280 2281 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 2282 if (width == MMC_BUS_WIDTH_8) { 2283 ctrl &= ~SDHCI_CTRL_4BITBUS; 2284 ctrl |= SDHCI_CTRL_8BITBUS; 2285 } else { 2286 if (host->mmc->caps & MMC_CAP_8_BIT_DATA) 2287 ctrl &= ~SDHCI_CTRL_8BITBUS; 2288 if (width == MMC_BUS_WIDTH_4) 2289 ctrl |= SDHCI_CTRL_4BITBUS; 2290 else 2291 ctrl &= ~SDHCI_CTRL_4BITBUS; 2292 } 2293 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2294 } 2295 EXPORT_SYMBOL_GPL(sdhci_set_bus_width); 2296 2297 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 2298 { 2299 u16 ctrl_2; 2300 2301 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2302 /* Select Bus Speed Mode for host */ 2303 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 2304 if ((timing == MMC_TIMING_MMC_HS200) || 2305 (timing == MMC_TIMING_UHS_SDR104)) 2306 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 2307 else if (timing == MMC_TIMING_UHS_SDR12) 2308 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 2309 else if (timing == MMC_TIMING_UHS_SDR25) 2310 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 2311 else if (timing == MMC_TIMING_UHS_SDR50) 2312 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 2313 else if ((timing == MMC_TIMING_UHS_DDR50) || 2314 (timing == MMC_TIMING_MMC_DDR52)) 2315 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 2316 else if (timing == MMC_TIMING_MMC_HS400) 2317 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ 2318 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 2319 } 2320 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); 2321 2322 static bool sdhci_timing_has_preset(unsigned char timing) 2323 { 2324 switch (timing) { 2325 case MMC_TIMING_UHS_SDR12: 2326 case MMC_TIMING_UHS_SDR25: 2327 case MMC_TIMING_UHS_SDR50: 2328 case MMC_TIMING_UHS_SDR104: 2329 case MMC_TIMING_UHS_DDR50: 2330 case MMC_TIMING_MMC_DDR52: 2331 return true; 2332 } 2333 return false; 2334 } 2335 2336 static bool sdhci_preset_needed(struct sdhci_host *host, unsigned char timing) 2337 { 2338 return !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && 2339 sdhci_timing_has_preset(timing); 2340 } 2341 2342 static bool sdhci_presetable_values_change(struct sdhci_host *host, struct mmc_ios *ios) 2343 { 2344 /* 2345 * Preset Values are: Driver Strength, Clock Generator and SDCLK/RCLK 2346 * Frequency. Check if preset values need to be enabled, or the Driver 2347 * Strength needs updating. Note, clock changes are handled separately. 2348 */ 2349 return !host->preset_enabled && 2350 (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type); 2351 } 2352 2353 void sdhci_set_ios_common(struct mmc_host *mmc, struct mmc_ios *ios) 2354 { 2355 struct sdhci_host *host = mmc_priv(mmc); 2356 2357 /* 2358 * Reset the chip on each power off. 2359 * Should clear out any weird states. 2360 */ 2361 if (ios->power_mode == MMC_POWER_OFF) { 2362 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 2363 sdhci_reinit(host); 2364 } 2365 2366 if (host->version >= SDHCI_SPEC_300 && 2367 (ios->power_mode == MMC_POWER_UP) && 2368 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) 2369 sdhci_enable_preset_value(host, false); 2370 2371 if (!ios->clock || ios->clock != host->clock) { 2372 host->ops->set_clock(host, ios->clock); 2373 host->clock = ios->clock; 2374 2375 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && 2376 host->clock) { 2377 host->timeout_clk = mmc->actual_clock ? 2378 mmc->actual_clock / 1000 : 2379 host->clock / 1000; 2380 mmc->max_busy_timeout = 2381 host->ops->get_max_timeout_count ? 2382 host->ops->get_max_timeout_count(host) : 2383 1 << 27; 2384 mmc->max_busy_timeout /= host->timeout_clk; 2385 } 2386 } 2387 } 2388 EXPORT_SYMBOL_GPL(sdhci_set_ios_common); 2389 2390 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 2391 { 2392 struct sdhci_host *host = mmc_priv(mmc); 2393 bool reinit_uhs = host->reinit_uhs; 2394 bool turning_on_clk; 2395 u8 ctrl; 2396 2397 host->reinit_uhs = false; 2398 2399 if (ios->power_mode == MMC_POWER_UNDEFINED) 2400 return; 2401 2402 if (host->flags & SDHCI_DEVICE_DEAD) { 2403 if (!IS_ERR(mmc->supply.vmmc) && 2404 ios->power_mode == MMC_POWER_OFF) 2405 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 2406 return; 2407 } 2408 2409 turning_on_clk = ios->clock != host->clock && ios->clock && !host->clock; 2410 2411 sdhci_set_ios_common(mmc, ios); 2412 2413 if (host->ops->set_power) 2414 host->ops->set_power(host, ios->power_mode, ios->vdd); 2415 else 2416 sdhci_set_power(host, ios->power_mode, ios->vdd); 2417 2418 if (host->ops->platform_send_init_74_clocks) 2419 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 2420 2421 host->ops->set_bus_width(host, ios->bus_width); 2422 2423 /* 2424 * Special case to avoid multiple clock changes during voltage 2425 * switching. 2426 */ 2427 if (!reinit_uhs && 2428 turning_on_clk && 2429 host->timing == ios->timing && 2430 host->version >= SDHCI_SPEC_300 && 2431 !sdhci_presetable_values_change(host, ios)) 2432 return; 2433 2434 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 2435 2436 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) { 2437 if (ios->timing == MMC_TIMING_SD_HS || 2438 ios->timing == MMC_TIMING_MMC_HS || 2439 ios->timing == MMC_TIMING_MMC_HS400 || 2440 ios->timing == MMC_TIMING_MMC_HS200 || 2441 ios->timing == MMC_TIMING_MMC_DDR52 || 2442 ios->timing == MMC_TIMING_UHS_SDR50 || 2443 ios->timing == MMC_TIMING_UHS_SDR104 || 2444 ios->timing == MMC_TIMING_UHS_DDR50 || 2445 ios->timing == MMC_TIMING_UHS_SDR25) 2446 ctrl |= SDHCI_CTRL_HISPD; 2447 else 2448 ctrl &= ~SDHCI_CTRL_HISPD; 2449 } 2450 2451 if (host->version >= SDHCI_SPEC_300) { 2452 u16 clk, ctrl_2; 2453 2454 /* 2455 * According to SDHCI Spec v3.00, if the Preset Value 2456 * Enable in the Host Control 2 register is set, we 2457 * need to reset SD Clock Enable before changing High 2458 * Speed Enable to avoid generating clock glitches. 2459 */ 2460 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2461 if (clk & SDHCI_CLOCK_CARD_EN) { 2462 clk &= ~SDHCI_CLOCK_CARD_EN; 2463 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2464 } 2465 2466 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2467 2468 if (!host->preset_enabled) { 2469 /* 2470 * We only need to set Driver Strength if the 2471 * preset value enable is not set. 2472 */ 2473 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2474 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; 2475 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) 2476 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; 2477 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B) 2478 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 2479 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) 2480 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; 2481 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D) 2482 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D; 2483 else { 2484 pr_warn("%s: invalid driver type, default to driver type B\n", 2485 mmc_hostname(mmc)); 2486 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 2487 } 2488 2489 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 2490 host->drv_type = ios->drv_type; 2491 } 2492 2493 host->ops->set_uhs_signaling(host, ios->timing); 2494 host->timing = ios->timing; 2495 2496 if (sdhci_preset_needed(host, ios->timing)) { 2497 u16 preset; 2498 2499 sdhci_enable_preset_value(host, true); 2500 preset = sdhci_get_preset_value(host); 2501 ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK, 2502 preset); 2503 host->drv_type = ios->drv_type; 2504 } 2505 2506 /* Re-enable SD Clock */ 2507 host->ops->set_clock(host, host->clock); 2508 } else 2509 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2510 } 2511 EXPORT_SYMBOL_GPL(sdhci_set_ios); 2512 2513 static int sdhci_get_cd(struct mmc_host *mmc) 2514 { 2515 struct sdhci_host *host = mmc_priv(mmc); 2516 int gpio_cd = mmc_gpio_get_cd(mmc); 2517 2518 if (host->flags & SDHCI_DEVICE_DEAD) 2519 return 0; 2520 2521 /* If nonremovable, assume that the card is always present. */ 2522 if (!mmc_card_is_removable(mmc)) 2523 return 1; 2524 2525 /* 2526 * Try slot gpio detect, if defined it take precedence 2527 * over build in controller functionality 2528 */ 2529 if (gpio_cd >= 0) 2530 return !!gpio_cd; 2531 2532 /* If polling, assume that the card is always present. */ 2533 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 2534 return 1; 2535 2536 /* Host native card detect */ 2537 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 2538 } 2539 2540 int sdhci_get_cd_nogpio(struct mmc_host *mmc) 2541 { 2542 struct sdhci_host *host = mmc_priv(mmc); 2543 unsigned long flags; 2544 int ret = 0; 2545 2546 spin_lock_irqsave(&host->lock, flags); 2547 2548 if (host->flags & SDHCI_DEVICE_DEAD) 2549 goto out; 2550 2551 ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 2552 out: 2553 spin_unlock_irqrestore(&host->lock, flags); 2554 2555 return ret; 2556 } 2557 EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio); 2558 2559 int sdhci_get_ro(struct mmc_host *mmc) 2560 { 2561 struct sdhci_host *host = mmc_priv(mmc); 2562 bool allow_invert = false; 2563 int is_readonly; 2564 2565 if (host->flags & SDHCI_DEVICE_DEAD) { 2566 is_readonly = 0; 2567 } else if (host->ops->get_ro) { 2568 is_readonly = host->ops->get_ro(host); 2569 } else if (mmc_can_gpio_ro(mmc)) { 2570 is_readonly = mmc_gpio_get_ro(mmc); 2571 /* Do not invert twice */ 2572 allow_invert = !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH); 2573 } else { 2574 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) 2575 & SDHCI_WRITE_PROTECT); 2576 allow_invert = true; 2577 } 2578 2579 if (is_readonly >= 0 && 2580 allow_invert && 2581 (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT)) 2582 is_readonly = !is_readonly; 2583 2584 return is_readonly; 2585 } 2586 EXPORT_SYMBOL_GPL(sdhci_get_ro); 2587 2588 static void sdhci_hw_reset(struct mmc_host *mmc) 2589 { 2590 struct sdhci_host *host = mmc_priv(mmc); 2591 2592 if (host->ops && host->ops->hw_reset) 2593 host->ops->hw_reset(host); 2594 } 2595 2596 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) 2597 { 2598 if (!(host->flags & SDHCI_DEVICE_DEAD)) { 2599 if (enable) 2600 host->ier |= SDHCI_INT_CARD_INT; 2601 else 2602 host->ier &= ~SDHCI_INT_CARD_INT; 2603 2604 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2605 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2606 } 2607 } 2608 2609 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 2610 { 2611 struct sdhci_host *host = mmc_priv(mmc); 2612 unsigned long flags; 2613 2614 if (enable) 2615 pm_runtime_get_noresume(mmc_dev(mmc)); 2616 2617 spin_lock_irqsave(&host->lock, flags); 2618 sdhci_enable_sdio_irq_nolock(host, enable); 2619 spin_unlock_irqrestore(&host->lock, flags); 2620 2621 if (!enable) 2622 pm_runtime_put_noidle(mmc_dev(mmc)); 2623 } 2624 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq); 2625 2626 static void sdhci_ack_sdio_irq(struct mmc_host *mmc) 2627 { 2628 struct sdhci_host *host = mmc_priv(mmc); 2629 unsigned long flags; 2630 2631 spin_lock_irqsave(&host->lock, flags); 2632 sdhci_enable_sdio_irq_nolock(host, true); 2633 spin_unlock_irqrestore(&host->lock, flags); 2634 } 2635 2636 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 2637 struct mmc_ios *ios) 2638 { 2639 struct sdhci_host *host = mmc_priv(mmc); 2640 u16 ctrl; 2641 int ret; 2642 2643 /* 2644 * Signal Voltage Switching is only applicable for Host Controllers 2645 * v3.00 and above. 2646 */ 2647 if (host->version < SDHCI_SPEC_300) 2648 return 0; 2649 2650 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2651 2652 switch (ios->signal_voltage) { 2653 case MMC_SIGNAL_VOLTAGE_330: 2654 if (!(host->flags & SDHCI_SIGNALING_330)) 2655 return -EINVAL; 2656 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 2657 ctrl &= ~SDHCI_CTRL_VDD_180; 2658 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2659 2660 if (!IS_ERR(mmc->supply.vqmmc)) { 2661 ret = mmc_regulator_set_vqmmc(mmc, ios); 2662 if (ret < 0) { 2663 pr_warn("%s: Switching to 3.3V signalling voltage failed\n", 2664 mmc_hostname(mmc)); 2665 return -EIO; 2666 } 2667 } 2668 /* Wait for 5ms */ 2669 usleep_range(5000, 5500); 2670 2671 /* 3.3V regulator output should be stable within 5 ms */ 2672 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2673 if (!(ctrl & SDHCI_CTRL_VDD_180)) 2674 return 0; 2675 2676 pr_warn("%s: 3.3V regulator output did not become stable\n", 2677 mmc_hostname(mmc)); 2678 2679 return -EAGAIN; 2680 case MMC_SIGNAL_VOLTAGE_180: 2681 if (!(host->flags & SDHCI_SIGNALING_180)) 2682 return -EINVAL; 2683 if (!IS_ERR(mmc->supply.vqmmc)) { 2684 ret = mmc_regulator_set_vqmmc(mmc, ios); 2685 if (ret < 0) { 2686 pr_warn("%s: Switching to 1.8V signalling voltage failed\n", 2687 mmc_hostname(mmc)); 2688 return -EIO; 2689 } 2690 } 2691 2692 /* 2693 * Enable 1.8V Signal Enable in the Host Control2 2694 * register 2695 */ 2696 ctrl |= SDHCI_CTRL_VDD_180; 2697 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2698 2699 /* Some controller need to do more when switching */ 2700 if (host->ops->voltage_switch) 2701 host->ops->voltage_switch(host); 2702 2703 /* 1.8V regulator output should be stable within 5 ms */ 2704 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2705 if (ctrl & SDHCI_CTRL_VDD_180) 2706 return 0; 2707 2708 pr_warn("%s: 1.8V regulator output did not become stable\n", 2709 mmc_hostname(mmc)); 2710 2711 return -EAGAIN; 2712 case MMC_SIGNAL_VOLTAGE_120: 2713 if (!(host->flags & SDHCI_SIGNALING_120)) 2714 return -EINVAL; 2715 if (!IS_ERR(mmc->supply.vqmmc)) { 2716 ret = mmc_regulator_set_vqmmc(mmc, ios); 2717 if (ret < 0) { 2718 pr_warn("%s: Switching to 1.2V signalling voltage failed\n", 2719 mmc_hostname(mmc)); 2720 return -EIO; 2721 } 2722 } 2723 return 0; 2724 default: 2725 /* No signal voltage switch required */ 2726 return 0; 2727 } 2728 } 2729 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch); 2730 2731 static int sdhci_card_busy(struct mmc_host *mmc) 2732 { 2733 struct sdhci_host *host = mmc_priv(mmc); 2734 u32 present_state; 2735 2736 /* Check whether DAT[0] is 0 */ 2737 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 2738 2739 return !(present_state & SDHCI_DATA_0_LVL_MASK); 2740 } 2741 2742 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 2743 { 2744 struct sdhci_host *host = mmc_priv(mmc); 2745 unsigned long flags; 2746 2747 spin_lock_irqsave(&host->lock, flags); 2748 host->flags |= SDHCI_HS400_TUNING; 2749 spin_unlock_irqrestore(&host->lock, flags); 2750 2751 return 0; 2752 } 2753 2754 void sdhci_start_tuning(struct sdhci_host *host) 2755 { 2756 u16 ctrl; 2757 2758 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2759 ctrl |= SDHCI_CTRL_EXEC_TUNING; 2760 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND) 2761 ctrl |= SDHCI_CTRL_TUNED_CLK; 2762 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2763 2764 /* 2765 * As per the Host Controller spec v3.00, tuning command 2766 * generates Buffer Read Ready interrupt, so enable that. 2767 * 2768 * Note: The spec clearly says that when tuning sequence 2769 * is being performed, the controller does not generate 2770 * interrupts other than Buffer Read Ready interrupt. But 2771 * to make sure we don't hit a controller bug, we _only_ 2772 * enable Buffer Read Ready interrupt here. 2773 */ 2774 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); 2775 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); 2776 } 2777 EXPORT_SYMBOL_GPL(sdhci_start_tuning); 2778 2779 void sdhci_end_tuning(struct sdhci_host *host) 2780 { 2781 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2782 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2783 } 2784 EXPORT_SYMBOL_GPL(sdhci_end_tuning); 2785 2786 void sdhci_reset_tuning(struct sdhci_host *host) 2787 { 2788 u16 ctrl; 2789 2790 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2791 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 2792 ctrl &= ~SDHCI_CTRL_EXEC_TUNING; 2793 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2794 } 2795 EXPORT_SYMBOL_GPL(sdhci_reset_tuning); 2796 2797 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode) 2798 { 2799 sdhci_reset_tuning(host); 2800 2801 sdhci_reset_for(host, TUNING_ABORT); 2802 2803 sdhci_end_tuning(host); 2804 2805 mmc_send_abort_tuning(host->mmc, opcode); 2806 } 2807 EXPORT_SYMBOL_GPL(sdhci_abort_tuning); 2808 2809 /* 2810 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI 2811 * tuning command does not have a data payload (or rather the hardware does it 2812 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command 2813 * interrupt setup is different to other commands and there is no timeout 2814 * interrupt so special handling is needed. 2815 */ 2816 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) 2817 { 2818 struct mmc_host *mmc = host->mmc; 2819 struct mmc_command cmd = {}; 2820 struct mmc_request mrq = {}; 2821 unsigned long flags; 2822 u32 b = host->sdma_boundary; 2823 2824 spin_lock_irqsave(&host->lock, flags); 2825 2826 cmd.opcode = opcode; 2827 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 2828 cmd.mrq = &mrq; 2829 2830 mrq.cmd = &cmd; 2831 /* 2832 * In response to CMD19, the card sends 64 bytes of tuning 2833 * block to the Host Controller. So we set the block size 2834 * to 64 here. 2835 */ 2836 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 && 2837 mmc->ios.bus_width == MMC_BUS_WIDTH_8) 2838 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE); 2839 else 2840 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE); 2841 2842 /* 2843 * The tuning block is sent by the card to the host controller. 2844 * So we set the TRNS_READ bit in the Transfer Mode register. 2845 * This also takes care of setting DMA Enable and Multi Block 2846 * Select in the same register to 0. 2847 */ 2848 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); 2849 2850 if (!sdhci_send_command_retry(host, &cmd, flags)) { 2851 spin_unlock_irqrestore(&host->lock, flags); 2852 host->tuning_done = 0; 2853 return; 2854 } 2855 2856 host->cmd = NULL; 2857 2858 sdhci_del_timer(host, &mrq); 2859 2860 host->tuning_done = 0; 2861 2862 spin_unlock_irqrestore(&host->lock, flags); 2863 2864 /* Wait for Buffer Read Ready interrupt */ 2865 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1), 2866 msecs_to_jiffies(50)); 2867 2868 } 2869 EXPORT_SYMBOL_GPL(sdhci_send_tuning); 2870 2871 int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) 2872 { 2873 int i; 2874 2875 /* 2876 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number 2877 * of loops reaches tuning loop count. 2878 */ 2879 for (i = 0; i < host->tuning_loop_count; i++) { 2880 u16 ctrl; 2881 2882 sdhci_send_tuning(host, opcode); 2883 2884 if (!host->tuning_done) { 2885 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n", 2886 mmc_hostname(host->mmc)); 2887 sdhci_abort_tuning(host, opcode); 2888 return -ETIMEDOUT; 2889 } 2890 2891 /* Spec does not require a delay between tuning cycles */ 2892 if (host->tuning_delay > 0) 2893 mdelay(host->tuning_delay); 2894 2895 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2896 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { 2897 if (ctrl & SDHCI_CTRL_TUNED_CLK) 2898 return 0; /* Success! */ 2899 break; 2900 } 2901 2902 } 2903 2904 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", 2905 mmc_hostname(host->mmc)); 2906 sdhci_reset_tuning(host); 2907 return -EAGAIN; 2908 } 2909 EXPORT_SYMBOL_GPL(__sdhci_execute_tuning); 2910 2911 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 2912 { 2913 struct sdhci_host *host = mmc_priv(mmc); 2914 int err = 0; 2915 unsigned int tuning_count = 0; 2916 bool hs400_tuning; 2917 2918 hs400_tuning = host->flags & SDHCI_HS400_TUNING; 2919 2920 if (host->tuning_mode == SDHCI_TUNING_MODE_1) 2921 tuning_count = host->tuning_count; 2922 2923 /* 2924 * The Host Controller needs tuning in case of SDR104 and DDR50 2925 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in 2926 * the Capabilities register. 2927 * If the Host Controller supports the HS200 mode then the 2928 * tuning function has to be executed. 2929 */ 2930 switch (host->timing) { 2931 /* HS400 tuning is done in HS200 mode */ 2932 case MMC_TIMING_MMC_HS400: 2933 err = -EINVAL; 2934 goto out; 2935 2936 case MMC_TIMING_MMC_HS200: 2937 /* 2938 * Periodic re-tuning for HS400 is not expected to be needed, so 2939 * disable it here. 2940 */ 2941 if (hs400_tuning) 2942 tuning_count = 0; 2943 break; 2944 2945 case MMC_TIMING_UHS_SDR104: 2946 case MMC_TIMING_UHS_DDR50: 2947 break; 2948 2949 case MMC_TIMING_UHS_SDR50: 2950 if (host->flags & SDHCI_SDR50_NEEDS_TUNING) 2951 break; 2952 fallthrough; 2953 2954 default: 2955 goto out; 2956 } 2957 2958 if (host->ops->platform_execute_tuning) { 2959 err = host->ops->platform_execute_tuning(host, opcode); 2960 goto out; 2961 } 2962 2963 mmc->retune_period = tuning_count; 2964 2965 if (host->tuning_delay < 0) 2966 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK; 2967 2968 sdhci_start_tuning(host); 2969 2970 host->tuning_err = __sdhci_execute_tuning(host, opcode); 2971 2972 sdhci_end_tuning(host); 2973 out: 2974 host->flags &= ~SDHCI_HS400_TUNING; 2975 2976 return err; 2977 } 2978 EXPORT_SYMBOL_GPL(sdhci_execute_tuning); 2979 2980 void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) 2981 { 2982 /* Host Controller v3.00 defines preset value registers */ 2983 if (host->version < SDHCI_SPEC_300) 2984 return; 2985 2986 /* 2987 * We only enable or disable Preset Value if they are not already 2988 * enabled or disabled respectively. Otherwise, we bail out. 2989 */ 2990 if (host->preset_enabled != enable) { 2991 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2992 2993 if (enable) 2994 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 2995 else 2996 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 2997 2998 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2999 3000 if (enable) 3001 host->flags |= SDHCI_PV_ENABLED; 3002 else 3003 host->flags &= ~SDHCI_PV_ENABLED; 3004 3005 host->preset_enabled = enable; 3006 } 3007 } 3008 EXPORT_SYMBOL_GPL(sdhci_enable_preset_value); 3009 3010 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 3011 int err) 3012 { 3013 struct mmc_data *data = mrq->data; 3014 3015 if (data->host_cookie != COOKIE_UNMAPPED) 3016 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, 3017 mmc_get_dma_dir(data)); 3018 3019 data->host_cookie = COOKIE_UNMAPPED; 3020 } 3021 3022 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 3023 { 3024 struct sdhci_host *host = mmc_priv(mmc); 3025 3026 mrq->data->host_cookie = COOKIE_UNMAPPED; 3027 3028 /* 3029 * No pre-mapping in the pre hook if we're using the bounce buffer, 3030 * for that we would need two bounce buffers since one buffer is 3031 * in flight when this is getting called. 3032 */ 3033 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer) 3034 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); 3035 } 3036 3037 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err) 3038 { 3039 if (host->data_cmd) { 3040 host->data_cmd->error = err; 3041 sdhci_finish_mrq(host, host->data_cmd->mrq); 3042 } 3043 3044 if (host->cmd) { 3045 host->cmd->error = err; 3046 sdhci_finish_mrq(host, host->cmd->mrq); 3047 } 3048 } 3049 3050 static void sdhci_card_event(struct mmc_host *mmc) 3051 { 3052 struct sdhci_host *host = mmc_priv(mmc); 3053 unsigned long flags; 3054 int present; 3055 3056 /* First check if client has provided their own card event */ 3057 if (host->ops->card_event) 3058 host->ops->card_event(host); 3059 3060 present = mmc->ops->get_cd(mmc); 3061 3062 spin_lock_irqsave(&host->lock, flags); 3063 3064 /* Check sdhci_has_requests() first in case we are runtime suspended */ 3065 if (sdhci_has_requests(host) && !present) { 3066 pr_err("%s: Card removed during transfer!\n", 3067 mmc_hostname(mmc)); 3068 pr_err("%s: Resetting controller.\n", 3069 mmc_hostname(mmc)); 3070 3071 sdhci_reset_for(host, CARD_REMOVED); 3072 3073 sdhci_error_out_mrqs(host, -ENOMEDIUM); 3074 } 3075 3076 spin_unlock_irqrestore(&host->lock, flags); 3077 } 3078 3079 static const struct mmc_host_ops sdhci_ops = { 3080 .request = sdhci_request, 3081 .post_req = sdhci_post_req, 3082 .pre_req = sdhci_pre_req, 3083 .set_ios = sdhci_set_ios, 3084 .get_cd = sdhci_get_cd, 3085 .get_ro = sdhci_get_ro, 3086 .card_hw_reset = sdhci_hw_reset, 3087 .enable_sdio_irq = sdhci_enable_sdio_irq, 3088 .ack_sdio_irq = sdhci_ack_sdio_irq, 3089 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 3090 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, 3091 .execute_tuning = sdhci_execute_tuning, 3092 .card_event = sdhci_card_event, 3093 .card_busy = sdhci_card_busy, 3094 }; 3095 3096 /*****************************************************************************\ 3097 * * 3098 * Request done * 3099 * * 3100 \*****************************************************************************/ 3101 3102 void sdhci_request_done_dma(struct sdhci_host *host, struct mmc_request *mrq) 3103 { 3104 struct mmc_data *data = mrq->data; 3105 3106 if (data && data->host_cookie == COOKIE_MAPPED) { 3107 if (host->bounce_buffer) { 3108 /* 3109 * On reads, copy the bounced data into the 3110 * sglist 3111 */ 3112 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { 3113 unsigned int length = data->bytes_xfered; 3114 3115 if (length > host->bounce_buffer_size) { 3116 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", 3117 mmc_hostname(host->mmc), 3118 host->bounce_buffer_size, 3119 data->bytes_xfered); 3120 /* Cap it down and continue */ 3121 length = host->bounce_buffer_size; 3122 } 3123 dma_sync_single_for_cpu(mmc_dev(host->mmc), 3124 host->bounce_addr, 3125 host->bounce_buffer_size, 3126 DMA_FROM_DEVICE); 3127 sg_copy_from_buffer(data->sg, 3128 data->sg_len, 3129 host->bounce_buffer, 3130 length); 3131 } else { 3132 /* No copying, just switch ownership */ 3133 dma_sync_single_for_cpu(mmc_dev(host->mmc), 3134 host->bounce_addr, 3135 host->bounce_buffer_size, 3136 mmc_get_dma_dir(data)); 3137 } 3138 } else { 3139 /* Unmap the raw data */ 3140 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 3141 data->sg_len, 3142 mmc_get_dma_dir(data)); 3143 } 3144 data->host_cookie = COOKIE_UNMAPPED; 3145 } 3146 } 3147 EXPORT_SYMBOL_GPL(sdhci_request_done_dma); 3148 3149 static bool sdhci_request_done(struct sdhci_host *host) 3150 { 3151 unsigned long flags; 3152 struct mmc_request *mrq; 3153 int i; 3154 3155 spin_lock_irqsave(&host->lock, flags); 3156 3157 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3158 mrq = host->mrqs_done[i]; 3159 if (mrq) 3160 break; 3161 } 3162 3163 if (!mrq) { 3164 spin_unlock_irqrestore(&host->lock, flags); 3165 return true; 3166 } 3167 3168 /* 3169 * The controller needs a reset of internal state machines 3170 * upon error conditions. 3171 */ 3172 if (sdhci_needs_reset(host, mrq)) { 3173 /* 3174 * Do not finish until command and data lines are available for 3175 * reset. Note there can only be one other mrq, so it cannot 3176 * also be in mrqs_done, otherwise host->cmd and host->data_cmd 3177 * would both be null. 3178 */ 3179 if (host->cmd || host->data_cmd) { 3180 spin_unlock_irqrestore(&host->lock, flags); 3181 return true; 3182 } 3183 3184 /* Some controllers need this kick or reset won't work here */ 3185 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 3186 /* This is to force an update */ 3187 host->ops->set_clock(host, host->clock); 3188 3189 sdhci_reset_for(host, REQUEST_ERROR); 3190 3191 host->pending_reset = false; 3192 } 3193 3194 /* 3195 * Always unmap the data buffers if they were mapped by 3196 * sdhci_prepare_data() whenever we finish with a request. 3197 * This avoids leaking DMA mappings on error. 3198 */ 3199 if (host->flags & SDHCI_REQ_USE_DMA) { 3200 struct mmc_data *data = mrq->data; 3201 3202 if (host->use_external_dma && data && 3203 (mrq->cmd->error || data->error)) { 3204 struct dma_chan *chan = sdhci_external_dma_channel(host, data); 3205 3206 host->mrqs_done[i] = NULL; 3207 spin_unlock_irqrestore(&host->lock, flags); 3208 dmaengine_terminate_sync(chan); 3209 spin_lock_irqsave(&host->lock, flags); 3210 sdhci_set_mrq_done(host, mrq); 3211 } 3212 3213 sdhci_request_done_dma(host, mrq); 3214 } 3215 3216 host->mrqs_done[i] = NULL; 3217 3218 spin_unlock_irqrestore(&host->lock, flags); 3219 3220 if (host->ops->request_done) 3221 host->ops->request_done(host, mrq); 3222 else 3223 mmc_request_done(host->mmc, mrq); 3224 3225 return false; 3226 } 3227 3228 void sdhci_complete_work(struct work_struct *work) 3229 { 3230 struct sdhci_host *host = container_of(work, struct sdhci_host, 3231 complete_work); 3232 3233 while (!sdhci_request_done(host)) 3234 ; 3235 } 3236 EXPORT_SYMBOL_GPL(sdhci_complete_work); 3237 3238 static void sdhci_timeout_timer(struct timer_list *t) 3239 { 3240 struct sdhci_host *host; 3241 unsigned long flags; 3242 3243 host = from_timer(host, t, timer); 3244 3245 spin_lock_irqsave(&host->lock, flags); 3246 3247 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) { 3248 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n", 3249 mmc_hostname(host->mmc)); 3250 sdhci_err_stats_inc(host, REQ_TIMEOUT); 3251 sdhci_dumpregs(host); 3252 3253 host->cmd->error = -ETIMEDOUT; 3254 sdhci_finish_mrq(host, host->cmd->mrq); 3255 } 3256 3257 spin_unlock_irqrestore(&host->lock, flags); 3258 } 3259 3260 static void sdhci_timeout_data_timer(struct timer_list *t) 3261 { 3262 struct sdhci_host *host; 3263 unsigned long flags; 3264 3265 host = from_timer(host, t, data_timer); 3266 3267 spin_lock_irqsave(&host->lock, flags); 3268 3269 if (host->data || host->data_cmd || 3270 (host->cmd && sdhci_data_line_cmd(host->cmd))) { 3271 pr_err("%s: Timeout waiting for hardware interrupt.\n", 3272 mmc_hostname(host->mmc)); 3273 sdhci_err_stats_inc(host, REQ_TIMEOUT); 3274 sdhci_dumpregs(host); 3275 3276 if (host->data) { 3277 host->data->error = -ETIMEDOUT; 3278 __sdhci_finish_data(host, true); 3279 queue_work(host->complete_wq, &host->complete_work); 3280 } else if (host->data_cmd) { 3281 host->data_cmd->error = -ETIMEDOUT; 3282 sdhci_finish_mrq(host, host->data_cmd->mrq); 3283 } else { 3284 host->cmd->error = -ETIMEDOUT; 3285 sdhci_finish_mrq(host, host->cmd->mrq); 3286 } 3287 } 3288 3289 spin_unlock_irqrestore(&host->lock, flags); 3290 } 3291 3292 /*****************************************************************************\ 3293 * * 3294 * Interrupt handling * 3295 * * 3296 \*****************************************************************************/ 3297 3298 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p) 3299 { 3300 /* Handle auto-CMD12 error */ 3301 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) { 3302 struct mmc_request *mrq = host->data_cmd->mrq; 3303 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 3304 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 3305 SDHCI_INT_DATA_TIMEOUT : 3306 SDHCI_INT_DATA_CRC; 3307 3308 /* Treat auto-CMD12 error the same as data error */ 3309 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { 3310 *intmask_p |= data_err_bit; 3311 return; 3312 } 3313 } 3314 3315 if (!host->cmd) { 3316 /* 3317 * SDHCI recovers from errors by resetting the cmd and data 3318 * circuits. Until that is done, there very well might be more 3319 * interrupts, so ignore them in that case. 3320 */ 3321 if (host->pending_reset) 3322 return; 3323 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", 3324 mmc_hostname(host->mmc), (unsigned)intmask); 3325 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3326 sdhci_dumpregs(host); 3327 return; 3328 } 3329 3330 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC | 3331 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) { 3332 if (intmask & SDHCI_INT_TIMEOUT) { 3333 host->cmd->error = -ETIMEDOUT; 3334 sdhci_err_stats_inc(host, CMD_TIMEOUT); 3335 } else { 3336 host->cmd->error = -EILSEQ; 3337 if (!mmc_op_tuning(host->cmd->opcode)) 3338 sdhci_err_stats_inc(host, CMD_CRC); 3339 } 3340 /* Treat data command CRC error the same as data CRC error */ 3341 if (host->cmd->data && 3342 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) == 3343 SDHCI_INT_CRC) { 3344 host->cmd = NULL; 3345 *intmask_p |= SDHCI_INT_DATA_CRC; 3346 return; 3347 } 3348 3349 __sdhci_finish_mrq(host, host->cmd->mrq); 3350 return; 3351 } 3352 3353 /* Handle auto-CMD23 error */ 3354 if (intmask & SDHCI_INT_AUTO_CMD_ERR) { 3355 struct mmc_request *mrq = host->cmd->mrq; 3356 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 3357 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 3358 -ETIMEDOUT : 3359 -EILSEQ; 3360 3361 sdhci_err_stats_inc(host, AUTO_CMD); 3362 3363 if (sdhci_auto_cmd23(host, mrq)) { 3364 mrq->sbc->error = err; 3365 __sdhci_finish_mrq(host, mrq); 3366 return; 3367 } 3368 } 3369 3370 if (intmask & SDHCI_INT_RESPONSE) 3371 sdhci_finish_command(host); 3372 } 3373 3374 static void sdhci_adma_show_error(struct sdhci_host *host) 3375 { 3376 void *desc = host->adma_table; 3377 dma_addr_t dma = host->adma_addr; 3378 3379 sdhci_dumpregs(host); 3380 3381 while (true) { 3382 struct sdhci_adma2_64_desc *dma_desc = desc; 3383 3384 if (host->flags & SDHCI_USE_64_BIT_DMA) 3385 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", 3386 (unsigned long long)dma, 3387 le32_to_cpu(dma_desc->addr_hi), 3388 le32_to_cpu(dma_desc->addr_lo), 3389 le16_to_cpu(dma_desc->len), 3390 le16_to_cpu(dma_desc->cmd)); 3391 else 3392 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 3393 (unsigned long long)dma, 3394 le32_to_cpu(dma_desc->addr_lo), 3395 le16_to_cpu(dma_desc->len), 3396 le16_to_cpu(dma_desc->cmd)); 3397 3398 desc += host->desc_sz; 3399 dma += host->desc_sz; 3400 3401 if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) 3402 break; 3403 } 3404 } 3405 3406 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 3407 { 3408 /* 3409 * CMD19 generates _only_ Buffer Read Ready interrupt if 3410 * use sdhci_send_tuning. 3411 * Need to exclude this case: PIO mode and use mmc_send_tuning, 3412 * If not, sdhci_transfer_pio will never be called, make the 3413 * SDHCI_INT_DATA_AVAIL always there, stuck in irq storm. 3414 */ 3415 if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) { 3416 if (mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) { 3417 host->tuning_done = 1; 3418 wake_up(&host->buf_ready_int); 3419 return; 3420 } 3421 } 3422 3423 if (!host->data) { 3424 struct mmc_command *data_cmd = host->data_cmd; 3425 3426 /* 3427 * The "data complete" interrupt is also used to 3428 * indicate that a busy state has ended. See comment 3429 * above in sdhci_cmd_irq(). 3430 */ 3431 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { 3432 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 3433 host->data_cmd = NULL; 3434 data_cmd->error = -ETIMEDOUT; 3435 sdhci_err_stats_inc(host, CMD_TIMEOUT); 3436 __sdhci_finish_mrq(host, data_cmd->mrq); 3437 return; 3438 } 3439 if (intmask & SDHCI_INT_DATA_END) { 3440 host->data_cmd = NULL; 3441 /* 3442 * Some cards handle busy-end interrupt 3443 * before the command completed, so make 3444 * sure we do things in the proper order. 3445 */ 3446 if (host->cmd == data_cmd) 3447 return; 3448 3449 __sdhci_finish_mrq(host, data_cmd->mrq); 3450 return; 3451 } 3452 } 3453 3454 /* 3455 * SDHCI recovers from errors by resetting the cmd and data 3456 * circuits. Until that is done, there very well might be more 3457 * interrupts, so ignore them in that case. 3458 */ 3459 if (host->pending_reset) 3460 return; 3461 3462 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", 3463 mmc_hostname(host->mmc), (unsigned)intmask); 3464 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3465 sdhci_dumpregs(host); 3466 3467 return; 3468 } 3469 3470 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 3471 host->data->error = -ETIMEDOUT; 3472 sdhci_err_stats_inc(host, DAT_TIMEOUT); 3473 } else if (intmask & SDHCI_INT_DATA_END_BIT) { 3474 host->data->error = -EILSEQ; 3475 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3476 sdhci_err_stats_inc(host, DAT_CRC); 3477 } else if ((intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) && 3478 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) 3479 != MMC_BUS_TEST_R) { 3480 host->data->error = -EILSEQ; 3481 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3482 sdhci_err_stats_inc(host, DAT_CRC); 3483 if (intmask & SDHCI_INT_TUNING_ERROR) { 3484 u16 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 3485 3486 ctrl2 &= ~SDHCI_CTRL_TUNED_CLK; 3487 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 3488 } 3489 } else if (intmask & SDHCI_INT_ADMA_ERROR) { 3490 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc), 3491 intmask); 3492 sdhci_adma_show_error(host); 3493 sdhci_err_stats_inc(host, ADMA); 3494 host->data->error = -EIO; 3495 if (host->ops->adma_workaround) 3496 host->ops->adma_workaround(host, intmask); 3497 } 3498 3499 if (host->data->error) 3500 sdhci_finish_data(host); 3501 else { 3502 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 3503 sdhci_transfer_pio(host); 3504 3505 /* 3506 * We currently don't do anything fancy with DMA 3507 * boundaries, but as we can't disable the feature 3508 * we need to at least restart the transfer. 3509 * 3510 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) 3511 * should return a valid address to continue from, but as 3512 * some controllers are faulty, don't trust them. 3513 */ 3514 if (intmask & SDHCI_INT_DMA_END) { 3515 dma_addr_t dmastart, dmanow; 3516 3517 dmastart = sdhci_sdma_address(host); 3518 dmanow = dmastart + host->data->bytes_xfered; 3519 /* 3520 * Force update to the next DMA block boundary. 3521 */ 3522 dmanow = (dmanow & 3523 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + 3524 SDHCI_DEFAULT_BOUNDARY_SIZE; 3525 host->data->bytes_xfered = dmanow - dmastart; 3526 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n", 3527 &dmastart, host->data->bytes_xfered, &dmanow); 3528 sdhci_set_sdma_addr(host, dmanow); 3529 } 3530 3531 if (intmask & SDHCI_INT_DATA_END) { 3532 if (host->cmd == host->data_cmd) { 3533 /* 3534 * Data managed to finish before the 3535 * command completed. Make sure we do 3536 * things in the proper order. 3537 */ 3538 host->data_early = 1; 3539 } else { 3540 sdhci_finish_data(host); 3541 } 3542 } 3543 } 3544 } 3545 3546 static inline bool sdhci_defer_done(struct sdhci_host *host, 3547 struct mmc_request *mrq) 3548 { 3549 struct mmc_data *data = mrq->data; 3550 3551 return host->pending_reset || host->always_defer_done || 3552 ((host->flags & SDHCI_REQ_USE_DMA) && data && 3553 data->host_cookie == COOKIE_MAPPED); 3554 } 3555 3556 static irqreturn_t sdhci_irq(int irq, void *dev_id) 3557 { 3558 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0}; 3559 irqreturn_t result = IRQ_NONE; 3560 struct sdhci_host *host = dev_id; 3561 u32 intmask, mask, unexpected = 0; 3562 int max_loops = 16; 3563 int i; 3564 3565 spin_lock(&host->lock); 3566 3567 if (host->runtime_suspended) { 3568 spin_unlock(&host->lock); 3569 return IRQ_NONE; 3570 } 3571 3572 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3573 if (!intmask || intmask == 0xffffffff) { 3574 result = IRQ_NONE; 3575 goto out; 3576 } 3577 3578 do { 3579 DBG("IRQ status 0x%08x\n", intmask); 3580 3581 if (host->ops->irq) { 3582 intmask = host->ops->irq(host, intmask); 3583 if (!intmask) 3584 goto cont; 3585 } 3586 3587 /* Clear selected interrupts. */ 3588 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3589 SDHCI_INT_BUS_POWER); 3590 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3591 3592 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3593 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 3594 SDHCI_CARD_PRESENT; 3595 3596 /* 3597 * There is a observation on i.mx esdhc. INSERT 3598 * bit will be immediately set again when it gets 3599 * cleared, if a card is inserted. We have to mask 3600 * the irq to prevent interrupt storm which will 3601 * freeze the system. And the REMOVE gets the 3602 * same situation. 3603 * 3604 * More testing are needed here to ensure it works 3605 * for other platforms though. 3606 */ 3607 host->ier &= ~(SDHCI_INT_CARD_INSERT | 3608 SDHCI_INT_CARD_REMOVE); 3609 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 3610 SDHCI_INT_CARD_INSERT; 3611 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3612 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3613 3614 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 3615 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 3616 3617 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | 3618 SDHCI_INT_CARD_REMOVE); 3619 result = IRQ_WAKE_THREAD; 3620 } 3621 3622 if (intmask & SDHCI_INT_CMD_MASK) 3623 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask); 3624 3625 if (intmask & SDHCI_INT_DATA_MASK) 3626 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 3627 3628 if (intmask & SDHCI_INT_BUS_POWER) 3629 pr_err("%s: Card is consuming too much power!\n", 3630 mmc_hostname(host->mmc)); 3631 3632 if (intmask & SDHCI_INT_RETUNE) 3633 mmc_retune_needed(host->mmc); 3634 3635 if ((intmask & SDHCI_INT_CARD_INT) && 3636 (host->ier & SDHCI_INT_CARD_INT)) { 3637 sdhci_enable_sdio_irq_nolock(host, false); 3638 sdio_signal_irq(host->mmc); 3639 } 3640 3641 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | 3642 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3643 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | 3644 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT); 3645 3646 if (intmask) { 3647 unexpected |= intmask; 3648 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3649 } 3650 cont: 3651 if (result == IRQ_NONE) 3652 result = IRQ_HANDLED; 3653 3654 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3655 } while (intmask && --max_loops); 3656 3657 /* Determine if mrqs can be completed immediately */ 3658 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3659 struct mmc_request *mrq = host->mrqs_done[i]; 3660 3661 if (!mrq) 3662 continue; 3663 3664 if (sdhci_defer_done(host, mrq)) { 3665 result = IRQ_WAKE_THREAD; 3666 } else { 3667 mrqs_done[i] = mrq; 3668 host->mrqs_done[i] = NULL; 3669 } 3670 } 3671 out: 3672 if (host->deferred_cmd) 3673 result = IRQ_WAKE_THREAD; 3674 3675 spin_unlock(&host->lock); 3676 3677 /* Process mrqs ready for immediate completion */ 3678 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3679 if (!mrqs_done[i]) 3680 continue; 3681 3682 if (host->ops->request_done) 3683 host->ops->request_done(host, mrqs_done[i]); 3684 else 3685 mmc_request_done(host->mmc, mrqs_done[i]); 3686 } 3687 3688 if (unexpected) { 3689 pr_err("%s: Unexpected interrupt 0x%08x.\n", 3690 mmc_hostname(host->mmc), unexpected); 3691 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3692 sdhci_dumpregs(host); 3693 } 3694 3695 return result; 3696 } 3697 3698 irqreturn_t sdhci_thread_irq(int irq, void *dev_id) 3699 { 3700 struct sdhci_host *host = dev_id; 3701 struct mmc_command *cmd; 3702 unsigned long flags; 3703 u32 isr; 3704 3705 while (!sdhci_request_done(host)) 3706 ; 3707 3708 spin_lock_irqsave(&host->lock, flags); 3709 3710 isr = host->thread_isr; 3711 host->thread_isr = 0; 3712 3713 cmd = host->deferred_cmd; 3714 if (cmd && !sdhci_send_command_retry(host, cmd, flags)) 3715 sdhci_finish_mrq(host, cmd->mrq); 3716 3717 spin_unlock_irqrestore(&host->lock, flags); 3718 3719 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3720 struct mmc_host *mmc = host->mmc; 3721 3722 mmc->ops->card_event(mmc); 3723 mmc_detect_change(mmc, msecs_to_jiffies(200)); 3724 } 3725 3726 return IRQ_HANDLED; 3727 } 3728 EXPORT_SYMBOL_GPL(sdhci_thread_irq); 3729 3730 /*****************************************************************************\ 3731 * * 3732 * Suspend/resume * 3733 * * 3734 \*****************************************************************************/ 3735 3736 #ifdef CONFIG_PM 3737 3738 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host) 3739 { 3740 return mmc_card_is_removable(host->mmc) && 3741 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3742 !mmc_can_gpio_cd(host->mmc); 3743 } 3744 3745 /* 3746 * To enable wakeup events, the corresponding events have to be enabled in 3747 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal 3748 * Table' in the SD Host Controller Standard Specification. 3749 * It is useless to restore SDHCI_INT_ENABLE state in 3750 * sdhci_disable_irq_wakeups() since it will be set by 3751 * sdhci_enable_card_detection() or sdhci_init(). 3752 */ 3753 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host) 3754 { 3755 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE | 3756 SDHCI_WAKE_ON_INT; 3757 u32 irq_val = 0; 3758 u8 wake_val = 0; 3759 u8 val; 3760 3761 if (sdhci_cd_irq_can_wakeup(host)) { 3762 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE; 3763 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE; 3764 } 3765 3766 if (mmc_card_wake_sdio_irq(host->mmc)) { 3767 wake_val |= SDHCI_WAKE_ON_INT; 3768 irq_val |= SDHCI_INT_CARD_INT; 3769 } 3770 3771 if (!irq_val) 3772 return false; 3773 3774 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3775 val &= ~mask; 3776 val |= wake_val; 3777 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3778 3779 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE); 3780 3781 host->irq_wake_enabled = !enable_irq_wake(host->irq); 3782 3783 return host->irq_wake_enabled; 3784 } 3785 3786 static void sdhci_disable_irq_wakeups(struct sdhci_host *host) 3787 { 3788 u8 val; 3789 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 3790 | SDHCI_WAKE_ON_INT; 3791 3792 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3793 val &= ~mask; 3794 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3795 3796 disable_irq_wake(host->irq); 3797 3798 host->irq_wake_enabled = false; 3799 } 3800 3801 int sdhci_suspend_host(struct sdhci_host *host) 3802 { 3803 sdhci_disable_card_detection(host); 3804 3805 mmc_retune_timer_stop(host->mmc); 3806 3807 if (!device_may_wakeup(mmc_dev(host->mmc)) || 3808 !sdhci_enable_irq_wakeups(host)) { 3809 host->ier = 0; 3810 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3811 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3812 free_irq(host->irq, host); 3813 } 3814 3815 return 0; 3816 } 3817 3818 EXPORT_SYMBOL_GPL(sdhci_suspend_host); 3819 3820 int sdhci_resume_host(struct sdhci_host *host) 3821 { 3822 struct mmc_host *mmc = host->mmc; 3823 int ret = 0; 3824 3825 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3826 if (host->ops->enable_dma) 3827 host->ops->enable_dma(host); 3828 } 3829 3830 if ((mmc->pm_flags & MMC_PM_KEEP_POWER) && 3831 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 3832 /* Card keeps power but host controller does not */ 3833 sdhci_init(host, 0); 3834 host->pwr = 0; 3835 host->clock = 0; 3836 host->reinit_uhs = true; 3837 mmc->ops->set_ios(mmc, &mmc->ios); 3838 } else { 3839 sdhci_init(host, (mmc->pm_flags & MMC_PM_KEEP_POWER)); 3840 } 3841 3842 if (host->irq_wake_enabled) { 3843 sdhci_disable_irq_wakeups(host); 3844 } else { 3845 ret = request_threaded_irq(host->irq, sdhci_irq, 3846 sdhci_thread_irq, IRQF_SHARED, 3847 mmc_hostname(mmc), host); 3848 if (ret) 3849 return ret; 3850 } 3851 3852 sdhci_enable_card_detection(host); 3853 3854 return ret; 3855 } 3856 3857 EXPORT_SYMBOL_GPL(sdhci_resume_host); 3858 3859 int sdhci_runtime_suspend_host(struct sdhci_host *host) 3860 { 3861 unsigned long flags; 3862 3863 mmc_retune_timer_stop(host->mmc); 3864 3865 spin_lock_irqsave(&host->lock, flags); 3866 host->ier &= SDHCI_INT_CARD_INT; 3867 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3868 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3869 spin_unlock_irqrestore(&host->lock, flags); 3870 3871 synchronize_hardirq(host->irq); 3872 3873 spin_lock_irqsave(&host->lock, flags); 3874 host->runtime_suspended = true; 3875 spin_unlock_irqrestore(&host->lock, flags); 3876 3877 return 0; 3878 } 3879 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); 3880 3881 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset) 3882 { 3883 struct mmc_host *mmc = host->mmc; 3884 unsigned long flags; 3885 int host_flags = host->flags; 3886 3887 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3888 if (host->ops->enable_dma) 3889 host->ops->enable_dma(host); 3890 } 3891 3892 sdhci_init(host, soft_reset); 3893 3894 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED && 3895 mmc->ios.power_mode != MMC_POWER_OFF) { 3896 /* Force clock and power re-program */ 3897 host->pwr = 0; 3898 host->clock = 0; 3899 host->reinit_uhs = true; 3900 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios); 3901 mmc->ops->set_ios(mmc, &mmc->ios); 3902 3903 if ((host_flags & SDHCI_PV_ENABLED) && 3904 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { 3905 spin_lock_irqsave(&host->lock, flags); 3906 sdhci_enable_preset_value(host, true); 3907 spin_unlock_irqrestore(&host->lock, flags); 3908 } 3909 3910 if ((mmc->caps2 & MMC_CAP2_HS400_ES) && 3911 mmc->ops->hs400_enhanced_strobe) 3912 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios); 3913 } 3914 3915 spin_lock_irqsave(&host->lock, flags); 3916 3917 host->runtime_suspended = false; 3918 3919 /* Enable SDIO IRQ */ 3920 if (sdio_irq_claimed(mmc)) 3921 sdhci_enable_sdio_irq_nolock(host, true); 3922 3923 /* Enable Card Detection */ 3924 sdhci_enable_card_detection(host); 3925 3926 spin_unlock_irqrestore(&host->lock, flags); 3927 3928 return 0; 3929 } 3930 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); 3931 3932 #endif /* CONFIG_PM */ 3933 3934 /*****************************************************************************\ 3935 * * 3936 * Command Queue Engine (CQE) helpers * 3937 * * 3938 \*****************************************************************************/ 3939 3940 void sdhci_cqe_enable(struct mmc_host *mmc) 3941 { 3942 struct sdhci_host *host = mmc_priv(mmc); 3943 unsigned long flags; 3944 u8 ctrl; 3945 3946 spin_lock_irqsave(&host->lock, flags); 3947 3948 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 3949 ctrl &= ~SDHCI_CTRL_DMA_MASK; 3950 /* 3951 * Host from V4.10 supports ADMA3 DMA type. 3952 * ADMA3 performs integrated descriptor which is more suitable 3953 * for cmd queuing to fetch both command and transfer descriptors. 3954 */ 3955 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3)) 3956 ctrl |= SDHCI_CTRL_ADMA3; 3957 else if (host->flags & SDHCI_USE_64_BIT_DMA) 3958 ctrl |= SDHCI_CTRL_ADMA64; 3959 else 3960 ctrl |= SDHCI_CTRL_ADMA32; 3961 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 3962 3963 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512), 3964 SDHCI_BLOCK_SIZE); 3965 3966 /* Set maximum timeout */ 3967 sdhci_set_timeout(host, NULL); 3968 3969 host->ier = host->cqe_ier; 3970 3971 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3972 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3973 3974 host->cqe_on = true; 3975 3976 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n", 3977 mmc_hostname(mmc), host->ier, 3978 sdhci_readl(host, SDHCI_INT_STATUS)); 3979 3980 spin_unlock_irqrestore(&host->lock, flags); 3981 } 3982 EXPORT_SYMBOL_GPL(sdhci_cqe_enable); 3983 3984 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery) 3985 { 3986 struct sdhci_host *host = mmc_priv(mmc); 3987 unsigned long flags; 3988 3989 spin_lock_irqsave(&host->lock, flags); 3990 3991 sdhci_set_default_irqs(host); 3992 3993 host->cqe_on = false; 3994 3995 if (recovery) 3996 sdhci_reset_for(host, CQE_RECOVERY); 3997 3998 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n", 3999 mmc_hostname(mmc), host->ier, 4000 sdhci_readl(host, SDHCI_INT_STATUS)); 4001 4002 spin_unlock_irqrestore(&host->lock, flags); 4003 } 4004 EXPORT_SYMBOL_GPL(sdhci_cqe_disable); 4005 4006 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error, 4007 int *data_error) 4008 { 4009 u32 mask; 4010 4011 if (!host->cqe_on) 4012 return false; 4013 4014 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) { 4015 *cmd_error = -EILSEQ; 4016 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 4017 sdhci_err_stats_inc(host, CMD_CRC); 4018 } else if (intmask & SDHCI_INT_TIMEOUT) { 4019 *cmd_error = -ETIMEDOUT; 4020 sdhci_err_stats_inc(host, CMD_TIMEOUT); 4021 } else 4022 *cmd_error = 0; 4023 4024 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) { 4025 *data_error = -EILSEQ; 4026 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 4027 sdhci_err_stats_inc(host, DAT_CRC); 4028 } else if (intmask & SDHCI_INT_DATA_TIMEOUT) { 4029 *data_error = -ETIMEDOUT; 4030 sdhci_err_stats_inc(host, DAT_TIMEOUT); 4031 } else if (intmask & SDHCI_INT_ADMA_ERROR) { 4032 *data_error = -EIO; 4033 sdhci_err_stats_inc(host, ADMA); 4034 } else 4035 *data_error = 0; 4036 4037 /* Clear selected interrupts. */ 4038 mask = intmask & host->cqe_ier; 4039 sdhci_writel(host, mask, SDHCI_INT_STATUS); 4040 4041 if (intmask & SDHCI_INT_BUS_POWER) 4042 pr_err("%s: Card is consuming too much power!\n", 4043 mmc_hostname(host->mmc)); 4044 4045 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR); 4046 if (intmask) { 4047 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 4048 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n", 4049 mmc_hostname(host->mmc), intmask); 4050 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 4051 sdhci_dumpregs(host); 4052 } 4053 4054 return true; 4055 } 4056 EXPORT_SYMBOL_GPL(sdhci_cqe_irq); 4057 4058 /*****************************************************************************\ 4059 * * 4060 * Device allocation/registration * 4061 * * 4062 \*****************************************************************************/ 4063 4064 struct sdhci_host *sdhci_alloc_host(struct device *dev, 4065 size_t priv_size) 4066 { 4067 struct mmc_host *mmc; 4068 struct sdhci_host *host; 4069 4070 WARN_ON(dev == NULL); 4071 4072 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); 4073 if (!mmc) 4074 return ERR_PTR(-ENOMEM); 4075 4076 host = mmc_priv(mmc); 4077 host->mmc = mmc; 4078 host->mmc_host_ops = sdhci_ops; 4079 mmc->ops = &host->mmc_host_ops; 4080 4081 host->flags = SDHCI_SIGNALING_330; 4082 4083 host->cqe_ier = SDHCI_CQE_INT_MASK; 4084 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK; 4085 4086 host->tuning_delay = -1; 4087 host->tuning_loop_count = MAX_TUNING_LOOP; 4088 4089 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG; 4090 4091 /* 4092 * The DMA table descriptor count is calculated as the maximum 4093 * number of segments times 2, to allow for an alignment 4094 * descriptor for each segment, plus 1 for a nop end descriptor. 4095 */ 4096 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1; 4097 host->max_adma = 65536; 4098 4099 host->max_timeout_count = 0xE; 4100 4101 host->complete_work_fn = sdhci_complete_work; 4102 host->thread_irq_fn = sdhci_thread_irq; 4103 4104 return host; 4105 } 4106 4107 EXPORT_SYMBOL_GPL(sdhci_alloc_host); 4108 4109 static int sdhci_set_dma_mask(struct sdhci_host *host) 4110 { 4111 struct mmc_host *mmc = host->mmc; 4112 struct device *dev = mmc_dev(mmc); 4113 int ret = -EINVAL; 4114 4115 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) 4116 host->flags &= ~SDHCI_USE_64_BIT_DMA; 4117 4118 /* Try 64-bit mask if hardware is capable of it */ 4119 if (host->flags & SDHCI_USE_64_BIT_DMA) { 4120 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 4121 if (ret) { 4122 pr_warn("%s: Failed to set 64-bit DMA mask.\n", 4123 mmc_hostname(mmc)); 4124 host->flags &= ~SDHCI_USE_64_BIT_DMA; 4125 } 4126 } 4127 4128 /* 32-bit mask as default & fallback */ 4129 if (ret) { 4130 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 4131 if (ret) 4132 pr_warn("%s: Failed to set 32-bit DMA mask.\n", 4133 mmc_hostname(mmc)); 4134 } 4135 4136 return ret; 4137 } 4138 4139 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver, 4140 const u32 *caps, const u32 *caps1) 4141 { 4142 u16 v; 4143 u64 dt_caps_mask = 0; 4144 u64 dt_caps = 0; 4145 4146 if (host->read_caps) 4147 return; 4148 4149 host->read_caps = true; 4150 4151 if (debug_quirks) 4152 host->quirks = debug_quirks; 4153 4154 if (debug_quirks2) 4155 host->quirks2 = debug_quirks2; 4156 4157 sdhci_reset_for_all(host); 4158 4159 if (host->v4_mode) 4160 sdhci_do_enable_v4_mode(host); 4161 4162 device_property_read_u64(mmc_dev(host->mmc), 4163 "sdhci-caps-mask", &dt_caps_mask); 4164 device_property_read_u64(mmc_dev(host->mmc), 4165 "sdhci-caps", &dt_caps); 4166 4167 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION); 4168 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; 4169 4170 if (caps) { 4171 host->caps = *caps; 4172 } else { 4173 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); 4174 host->caps &= ~lower_32_bits(dt_caps_mask); 4175 host->caps |= lower_32_bits(dt_caps); 4176 } 4177 4178 if (host->version < SDHCI_SPEC_300) 4179 return; 4180 4181 if (caps1) { 4182 host->caps1 = *caps1; 4183 } else { 4184 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); 4185 host->caps1 &= ~upper_32_bits(dt_caps_mask); 4186 host->caps1 |= upper_32_bits(dt_caps); 4187 } 4188 } 4189 EXPORT_SYMBOL_GPL(__sdhci_read_caps); 4190 4191 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host) 4192 { 4193 struct mmc_host *mmc = host->mmc; 4194 unsigned int max_blocks; 4195 unsigned int bounce_size; 4196 int ret; 4197 4198 /* 4199 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer 4200 * has diminishing returns, this is probably because SD/MMC 4201 * cards are usually optimized to handle this size of requests. 4202 */ 4203 bounce_size = SZ_64K; 4204 /* 4205 * Adjust downwards to maximum request size if this is less 4206 * than our segment size, else hammer down the maximum 4207 * request size to the maximum buffer size. 4208 */ 4209 if (mmc->max_req_size < bounce_size) 4210 bounce_size = mmc->max_req_size; 4211 max_blocks = bounce_size / 512; 4212 4213 /* 4214 * When we just support one segment, we can get significant 4215 * speedups by the help of a bounce buffer to group scattered 4216 * reads/writes together. 4217 */ 4218 host->bounce_buffer = devm_kmalloc(mmc_dev(mmc), 4219 bounce_size, 4220 GFP_KERNEL); 4221 if (!host->bounce_buffer) { 4222 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n", 4223 mmc_hostname(mmc), 4224 bounce_size); 4225 /* 4226 * Exiting with zero here makes sure we proceed with 4227 * mmc->max_segs == 1. 4228 */ 4229 return; 4230 } 4231 4232 host->bounce_addr = dma_map_single(mmc_dev(mmc), 4233 host->bounce_buffer, 4234 bounce_size, 4235 DMA_BIDIRECTIONAL); 4236 ret = dma_mapping_error(mmc_dev(mmc), host->bounce_addr); 4237 if (ret) { 4238 devm_kfree(mmc_dev(mmc), host->bounce_buffer); 4239 host->bounce_buffer = NULL; 4240 /* Again fall back to max_segs == 1 */ 4241 return; 4242 } 4243 4244 host->bounce_buffer_size = bounce_size; 4245 4246 /* Lie about this since we're bouncing */ 4247 mmc->max_segs = max_blocks; 4248 mmc->max_seg_size = bounce_size; 4249 mmc->max_req_size = bounce_size; 4250 4251 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n", 4252 mmc_hostname(mmc), max_blocks, bounce_size); 4253 } 4254 4255 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host) 4256 { 4257 /* 4258 * According to SD Host Controller spec v4.10, bit[27] added from 4259 * version 4.10 in Capabilities Register is used as 64-bit System 4260 * Address support for V4 mode. 4261 */ 4262 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) 4263 return host->caps & SDHCI_CAN_64BIT_V4; 4264 4265 return host->caps & SDHCI_CAN_64BIT; 4266 } 4267 4268 int sdhci_setup_host(struct sdhci_host *host) 4269 { 4270 struct mmc_host *mmc; 4271 u32 max_current_caps; 4272 unsigned int ocr_avail; 4273 unsigned int override_timeout_clk; 4274 u32 max_clk; 4275 int ret = 0; 4276 bool enable_vqmmc = false; 4277 4278 WARN_ON(host == NULL); 4279 if (host == NULL) 4280 return -EINVAL; 4281 4282 mmc = host->mmc; 4283 4284 /* 4285 * If there are external regulators, get them. Note this must be done 4286 * early before resetting the host and reading the capabilities so that 4287 * the host can take the appropriate action if regulators are not 4288 * available. 4289 */ 4290 if (!mmc->supply.vqmmc) { 4291 ret = mmc_regulator_get_supply(mmc); 4292 if (ret) 4293 return ret; 4294 enable_vqmmc = true; 4295 } 4296 4297 DBG("Version: 0x%08x | Present: 0x%08x\n", 4298 sdhci_readw(host, SDHCI_HOST_VERSION), 4299 sdhci_readl(host, SDHCI_PRESENT_STATE)); 4300 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n", 4301 sdhci_readl(host, SDHCI_CAPABILITIES), 4302 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 4303 4304 sdhci_read_caps(host); 4305 4306 override_timeout_clk = host->timeout_clk; 4307 4308 if (host->version > SDHCI_SPEC_420) { 4309 pr_err("%s: Unknown controller version (%d). You may experience problems.\n", 4310 mmc_hostname(mmc), host->version); 4311 } 4312 4313 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 4314 host->flags |= SDHCI_USE_SDMA; 4315 else if (!(host->caps & SDHCI_CAN_DO_SDMA)) 4316 DBG("Controller doesn't have SDMA capability\n"); 4317 else 4318 host->flags |= SDHCI_USE_SDMA; 4319 4320 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 4321 (host->flags & SDHCI_USE_SDMA)) { 4322 DBG("Disabling DMA as it is marked broken\n"); 4323 host->flags &= ~SDHCI_USE_SDMA; 4324 } 4325 4326 if ((host->version >= SDHCI_SPEC_200) && 4327 (host->caps & SDHCI_CAN_DO_ADMA2)) 4328 host->flags |= SDHCI_USE_ADMA; 4329 4330 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 4331 (host->flags & SDHCI_USE_ADMA)) { 4332 DBG("Disabling ADMA as it is marked broken\n"); 4333 host->flags &= ~SDHCI_USE_ADMA; 4334 } 4335 4336 if (sdhci_can_64bit_dma(host)) 4337 host->flags |= SDHCI_USE_64_BIT_DMA; 4338 4339 if (host->use_external_dma) { 4340 ret = sdhci_external_dma_init(host); 4341 if (ret == -EPROBE_DEFER) 4342 goto unreg; 4343 /* 4344 * Fall back to use the DMA/PIO integrated in standard SDHCI 4345 * instead of external DMA devices. 4346 */ 4347 else if (ret) 4348 sdhci_switch_external_dma(host, false); 4349 /* Disable internal DMA sources */ 4350 else 4351 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 4352 } 4353 4354 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 4355 if (host->ops->set_dma_mask) 4356 ret = host->ops->set_dma_mask(host); 4357 else 4358 ret = sdhci_set_dma_mask(host); 4359 4360 if (!ret && host->ops->enable_dma) 4361 ret = host->ops->enable_dma(host); 4362 4363 if (ret) { 4364 pr_warn("%s: No suitable DMA available - falling back to PIO\n", 4365 mmc_hostname(mmc)); 4366 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 4367 4368 ret = 0; 4369 } 4370 } 4371 4372 /* SDMA does not support 64-bit DMA if v4 mode not set */ 4373 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode) 4374 host->flags &= ~SDHCI_USE_SDMA; 4375 4376 if (host->flags & SDHCI_USE_ADMA) { 4377 dma_addr_t dma; 4378 void *buf; 4379 4380 if (!(host->flags & SDHCI_USE_64_BIT_DMA)) 4381 host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ; 4382 else if (!host->alloc_desc_sz) 4383 host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); 4384 4385 host->desc_sz = host->alloc_desc_sz; 4386 host->adma_table_sz = host->adma_table_cnt * host->desc_sz; 4387 4388 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; 4389 /* 4390 * Use zalloc to zero the reserved high 32-bits of 128-bit 4391 * descriptors so that they never need to be written. 4392 */ 4393 buf = dma_alloc_coherent(mmc_dev(mmc), 4394 host->align_buffer_sz + host->adma_table_sz, 4395 &dma, GFP_KERNEL); 4396 if (!buf) { 4397 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 4398 mmc_hostname(mmc)); 4399 host->flags &= ~SDHCI_USE_ADMA; 4400 } else if ((dma + host->align_buffer_sz) & 4401 (SDHCI_ADMA2_DESC_ALIGN - 1)) { 4402 pr_warn("%s: unable to allocate aligned ADMA descriptor\n", 4403 mmc_hostname(mmc)); 4404 host->flags &= ~SDHCI_USE_ADMA; 4405 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4406 host->adma_table_sz, buf, dma); 4407 } else { 4408 host->align_buffer = buf; 4409 host->align_addr = dma; 4410 4411 host->adma_table = buf + host->align_buffer_sz; 4412 host->adma_addr = dma + host->align_buffer_sz; 4413 } 4414 } 4415 4416 /* 4417 * If we use DMA, then it's up to the caller to set the DMA 4418 * mask, but PIO does not need the hw shim so we set a new 4419 * mask here in that case. 4420 */ 4421 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { 4422 host->dma_mask = DMA_BIT_MASK(64); 4423 mmc_dev(mmc)->dma_mask = &host->dma_mask; 4424 } 4425 4426 if (host->version >= SDHCI_SPEC_300) 4427 host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps); 4428 else 4429 host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps); 4430 4431 host->max_clk *= 1000000; 4432 if (host->max_clk == 0 || host->quirks & 4433 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 4434 if (!host->ops->get_max_clock) { 4435 pr_err("%s: Hardware doesn't specify base clock frequency.\n", 4436 mmc_hostname(mmc)); 4437 ret = -ENODEV; 4438 goto undma; 4439 } 4440 host->max_clk = host->ops->get_max_clock(host); 4441 } 4442 4443 /* 4444 * In case of Host Controller v3.00, find out whether clock 4445 * multiplier is supported. 4446 */ 4447 host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1); 4448 4449 /* 4450 * In case the value in Clock Multiplier is 0, then programmable 4451 * clock mode is not supported, otherwise the actual clock 4452 * multiplier is one more than the value of Clock Multiplier 4453 * in the Capabilities Register. 4454 */ 4455 if (host->clk_mul) 4456 host->clk_mul += 1; 4457 4458 /* 4459 * Set host parameters. 4460 */ 4461 max_clk = host->max_clk; 4462 4463 if (host->ops->get_min_clock) 4464 mmc->f_min = host->ops->get_min_clock(host); 4465 else if (host->version >= SDHCI_SPEC_300) { 4466 if (host->clk_mul) 4467 max_clk = host->max_clk * host->clk_mul; 4468 /* 4469 * Divided Clock Mode minimum clock rate is always less than 4470 * Programmable Clock Mode minimum clock rate. 4471 */ 4472 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 4473 } else 4474 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 4475 4476 if (!mmc->f_max || mmc->f_max > max_clk) 4477 mmc->f_max = max_clk; 4478 4479 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 4480 host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps); 4481 4482 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT) 4483 host->timeout_clk *= 1000; 4484 4485 if (host->timeout_clk == 0) { 4486 if (!host->ops->get_timeout_clock) { 4487 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n", 4488 mmc_hostname(mmc)); 4489 ret = -ENODEV; 4490 goto undma; 4491 } 4492 4493 host->timeout_clk = 4494 DIV_ROUND_UP(host->ops->get_timeout_clock(host), 4495 1000); 4496 } 4497 4498 if (override_timeout_clk) 4499 host->timeout_clk = override_timeout_clk; 4500 4501 mmc->max_busy_timeout = host->ops->get_max_timeout_count ? 4502 host->ops->get_max_timeout_count(host) : 1 << 27; 4503 mmc->max_busy_timeout /= host->timeout_clk; 4504 } 4505 4506 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT && 4507 !host->ops->get_max_timeout_count) 4508 mmc->max_busy_timeout = 0; 4509 4510 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23; 4511 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 4512 4513 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 4514 host->flags |= SDHCI_AUTO_CMD12; 4515 4516 /* 4517 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO. 4518 * For v4 mode, SDMA may use Auto-CMD23 as well. 4519 */ 4520 if ((host->version >= SDHCI_SPEC_300) && 4521 ((host->flags & SDHCI_USE_ADMA) || 4522 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) && 4523 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { 4524 host->flags |= SDHCI_AUTO_CMD23; 4525 DBG("Auto-CMD23 available\n"); 4526 } else { 4527 DBG("Auto-CMD23 unavailable\n"); 4528 } 4529 4530 /* 4531 * A controller may support 8-bit width, but the board itself 4532 * might not have the pins brought out. Boards that support 4533 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in 4534 * their platform code before calling sdhci_add_host(), and we 4535 * won't assume 8-bit width for hosts without that CAP. 4536 */ 4537 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 4538 mmc->caps |= MMC_CAP_4_BIT_DATA; 4539 4540 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) 4541 mmc->caps &= ~MMC_CAP_CMD23; 4542 4543 if (host->caps & SDHCI_CAN_DO_HISPD) 4544 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 4545 4546 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 4547 mmc_card_is_removable(mmc) && 4548 mmc_gpio_get_cd(mmc) < 0) 4549 mmc->caps |= MMC_CAP_NEEDS_POLL; 4550 4551 if (!IS_ERR(mmc->supply.vqmmc)) { 4552 if (enable_vqmmc) { 4553 ret = regulator_enable(mmc->supply.vqmmc); 4554 host->sdhci_core_to_disable_vqmmc = !ret; 4555 } 4556 4557 /* If vqmmc provides no 1.8V signalling, then there's no UHS */ 4558 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, 4559 1950000)) 4560 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | 4561 SDHCI_SUPPORT_SDR50 | 4562 SDHCI_SUPPORT_DDR50); 4563 4564 /* In eMMC case vqmmc might be a fixed 1.8V regulator */ 4565 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000, 4566 3600000)) 4567 host->flags &= ~SDHCI_SIGNALING_330; 4568 4569 if (ret) { 4570 pr_warn("%s: Failed to enable vqmmc regulator: %d\n", 4571 mmc_hostname(mmc), ret); 4572 mmc->supply.vqmmc = ERR_PTR(-EINVAL); 4573 } 4574 4575 } 4576 4577 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) { 4578 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4579 SDHCI_SUPPORT_DDR50); 4580 /* 4581 * The SDHCI controller in a SoC might support HS200/HS400 4582 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property), 4583 * but if the board is modeled such that the IO lines are not 4584 * connected to 1.8v then HS200/HS400 cannot be supported. 4585 * Disable HS200/HS400 if the board does not have 1.8v connected 4586 * to the IO lines. (Applicable for other modes in 1.8v) 4587 */ 4588 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES); 4589 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS); 4590 } 4591 4592 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ 4593 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4594 SDHCI_SUPPORT_DDR50)) 4595 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; 4596 4597 /* SDR104 supports also implies SDR50 support */ 4598 if (host->caps1 & SDHCI_SUPPORT_SDR104) { 4599 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; 4600 /* SD3.0: SDR104 is supported so (for eMMC) the caps2 4601 * field can be promoted to support HS200. 4602 */ 4603 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) 4604 mmc->caps2 |= MMC_CAP2_HS200; 4605 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) { 4606 mmc->caps |= MMC_CAP_UHS_SDR50; 4607 } 4608 4609 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && 4610 (host->caps1 & SDHCI_SUPPORT_HS400)) 4611 mmc->caps2 |= MMC_CAP2_HS400; 4612 4613 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && 4614 (IS_ERR(mmc->supply.vqmmc) || 4615 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000, 4616 1300000))) 4617 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V; 4618 4619 if ((host->caps1 & SDHCI_SUPPORT_DDR50) && 4620 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) 4621 mmc->caps |= MMC_CAP_UHS_DDR50; 4622 4623 /* Does the host need tuning for SDR50? */ 4624 if (host->caps1 & SDHCI_USE_SDR50_TUNING) 4625 host->flags |= SDHCI_SDR50_NEEDS_TUNING; 4626 4627 /* Driver Type(s) (A, C, D) supported by the host */ 4628 if (host->caps1 & SDHCI_DRIVER_TYPE_A) 4629 mmc->caps |= MMC_CAP_DRIVER_TYPE_A; 4630 if (host->caps1 & SDHCI_DRIVER_TYPE_C) 4631 mmc->caps |= MMC_CAP_DRIVER_TYPE_C; 4632 if (host->caps1 & SDHCI_DRIVER_TYPE_D) 4633 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 4634 4635 /* Initial value for re-tuning timer count */ 4636 host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK, 4637 host->caps1); 4638 4639 /* 4640 * In case Re-tuning Timer is not disabled, the actual value of 4641 * re-tuning timer will be 2 ^ (n - 1). 4642 */ 4643 if (host->tuning_count) 4644 host->tuning_count = 1 << (host->tuning_count - 1); 4645 4646 /* Re-tuning mode supported by the Host Controller */ 4647 host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1); 4648 4649 ocr_avail = 0; 4650 4651 /* 4652 * According to SD Host Controller spec v3.00, if the Host System 4653 * can afford more than 150mA, Host Driver should set XPC to 1. Also 4654 * the value is meaningful only if Voltage Support in the Capabilities 4655 * register is set. The actual current value is 4 times the register 4656 * value. 4657 */ 4658 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); 4659 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) { 4660 int curr = regulator_get_current_limit(mmc->supply.vmmc); 4661 if (curr > 0) { 4662 4663 /* convert to SDHCI_MAX_CURRENT format */ 4664 curr = curr/1000; /* convert to mA */ 4665 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; 4666 4667 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); 4668 max_current_caps = 4669 FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) | 4670 FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) | 4671 FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr); 4672 } 4673 } 4674 4675 if (host->caps & SDHCI_CAN_VDD_330) { 4676 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 4677 4678 mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK, 4679 max_current_caps) * 4680 SDHCI_MAX_CURRENT_MULTIPLIER; 4681 } 4682 if (host->caps & SDHCI_CAN_VDD_300) { 4683 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 4684 4685 mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK, 4686 max_current_caps) * 4687 SDHCI_MAX_CURRENT_MULTIPLIER; 4688 } 4689 if (host->caps & SDHCI_CAN_VDD_180) { 4690 ocr_avail |= MMC_VDD_165_195; 4691 4692 mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK, 4693 max_current_caps) * 4694 SDHCI_MAX_CURRENT_MULTIPLIER; 4695 } 4696 4697 /* If OCR set by host, use it instead. */ 4698 if (host->ocr_mask) 4699 ocr_avail = host->ocr_mask; 4700 4701 /* If OCR set by external regulators, give it highest prio. */ 4702 if (mmc->ocr_avail) 4703 ocr_avail = mmc->ocr_avail; 4704 4705 mmc->ocr_avail = ocr_avail; 4706 mmc->ocr_avail_sdio = ocr_avail; 4707 if (host->ocr_avail_sdio) 4708 mmc->ocr_avail_sdio &= host->ocr_avail_sdio; 4709 mmc->ocr_avail_sd = ocr_avail; 4710 if (host->ocr_avail_sd) 4711 mmc->ocr_avail_sd &= host->ocr_avail_sd; 4712 else /* normal SD controllers don't support 1.8V */ 4713 mmc->ocr_avail_sd &= ~MMC_VDD_165_195; 4714 mmc->ocr_avail_mmc = ocr_avail; 4715 if (host->ocr_avail_mmc) 4716 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; 4717 4718 if (mmc->ocr_avail == 0) { 4719 pr_err("%s: Hardware doesn't report any support voltages.\n", 4720 mmc_hostname(mmc)); 4721 ret = -ENODEV; 4722 goto unreg; 4723 } 4724 4725 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 4726 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | 4727 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) || 4728 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) 4729 host->flags |= SDHCI_SIGNALING_180; 4730 4731 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V) 4732 host->flags |= SDHCI_SIGNALING_120; 4733 4734 spin_lock_init(&host->lock); 4735 4736 /* 4737 * Maximum number of sectors in one transfer. Limited by SDMA boundary 4738 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this 4739 * is less anyway. 4740 */ 4741 mmc->max_req_size = 524288; 4742 4743 /* 4744 * Maximum number of segments. Depends on if the hardware 4745 * can do scatter/gather or not. 4746 */ 4747 if (host->flags & SDHCI_USE_ADMA) { 4748 mmc->max_segs = SDHCI_MAX_SEGS; 4749 } else if (host->flags & SDHCI_USE_SDMA) { 4750 mmc->max_segs = 1; 4751 mmc->max_req_size = min_t(size_t, mmc->max_req_size, 4752 dma_max_mapping_size(mmc_dev(mmc))); 4753 } else { /* PIO */ 4754 mmc->max_segs = SDHCI_MAX_SEGS; 4755 } 4756 4757 /* 4758 * Maximum segment size. Could be one segment with the maximum number 4759 * of bytes. When doing hardware scatter/gather, each entry cannot 4760 * be larger than 64 KiB though. 4761 */ 4762 if (host->flags & SDHCI_USE_ADMA) { 4763 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) { 4764 host->max_adma = 65532; /* 32-bit alignment */ 4765 mmc->max_seg_size = 65535; 4766 /* 4767 * sdhci_adma_table_pre() expects to define 1 DMA 4768 * descriptor per segment, so the maximum segment size 4769 * is set accordingly. SDHCI allows up to 64KiB per DMA 4770 * descriptor (16-bit field), but some controllers do 4771 * not support "zero means 65536" reducing the maximum 4772 * for them to 65535. That is a problem if PAGE_SIZE is 4773 * 64KiB because the block layer does not support 4774 * max_seg_size < PAGE_SIZE, however 4775 * sdhci_adma_table_pre() has a workaround to handle 4776 * that case, and split the descriptor. Refer also 4777 * comment in sdhci_adma_table_pre(). 4778 */ 4779 if (mmc->max_seg_size < PAGE_SIZE) 4780 mmc->max_seg_size = PAGE_SIZE; 4781 } else { 4782 mmc->max_seg_size = 65536; 4783 } 4784 } else { 4785 mmc->max_seg_size = mmc->max_req_size; 4786 } 4787 4788 /* 4789 * Maximum block size. This varies from controller to controller and 4790 * is specified in the capabilities register. 4791 */ 4792 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 4793 mmc->max_blk_size = 2; 4794 } else { 4795 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >> 4796 SDHCI_MAX_BLOCK_SHIFT; 4797 if (mmc->max_blk_size >= 3) { 4798 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n", 4799 mmc_hostname(mmc)); 4800 mmc->max_blk_size = 0; 4801 } 4802 } 4803 4804 mmc->max_blk_size = 512 << mmc->max_blk_size; 4805 4806 /* 4807 * Maximum block count. 4808 */ 4809 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 4810 4811 if (mmc->max_segs == 1) 4812 /* This may alter mmc->*_blk_* parameters */ 4813 sdhci_allocate_bounce_buffer(host); 4814 4815 return 0; 4816 4817 unreg: 4818 if (host->sdhci_core_to_disable_vqmmc) 4819 regulator_disable(mmc->supply.vqmmc); 4820 undma: 4821 if (host->align_buffer) 4822 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4823 host->adma_table_sz, host->align_buffer, 4824 host->align_addr); 4825 host->adma_table = NULL; 4826 host->align_buffer = NULL; 4827 4828 return ret; 4829 } 4830 EXPORT_SYMBOL_GPL(sdhci_setup_host); 4831 4832 void sdhci_cleanup_host(struct sdhci_host *host) 4833 { 4834 struct mmc_host *mmc = host->mmc; 4835 4836 if (host->sdhci_core_to_disable_vqmmc) 4837 regulator_disable(mmc->supply.vqmmc); 4838 4839 if (host->align_buffer) 4840 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4841 host->adma_table_sz, host->align_buffer, 4842 host->align_addr); 4843 4844 if (host->use_external_dma) 4845 sdhci_external_dma_release(host); 4846 4847 host->adma_table = NULL; 4848 host->align_buffer = NULL; 4849 } 4850 EXPORT_SYMBOL_GPL(sdhci_cleanup_host); 4851 4852 int __sdhci_add_host(struct sdhci_host *host) 4853 { 4854 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI; 4855 struct mmc_host *mmc = host->mmc; 4856 int ret; 4857 4858 if ((mmc->caps2 & MMC_CAP2_CQE) && 4859 (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) { 4860 mmc->caps2 &= ~MMC_CAP2_CQE; 4861 mmc->cqe_ops = NULL; 4862 } 4863 4864 host->complete_wq = alloc_workqueue("sdhci", flags, 0); 4865 if (!host->complete_wq) 4866 return -ENOMEM; 4867 4868 INIT_WORK(&host->complete_work, host->complete_work_fn); 4869 4870 timer_setup(&host->timer, sdhci_timeout_timer, 0); 4871 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0); 4872 4873 init_waitqueue_head(&host->buf_ready_int); 4874 4875 sdhci_init(host, 0); 4876 4877 ret = request_threaded_irq(host->irq, sdhci_irq, host->thread_irq_fn, 4878 IRQF_SHARED, mmc_hostname(mmc), host); 4879 if (ret) { 4880 pr_err("%s: Failed to request IRQ %d: %d\n", 4881 mmc_hostname(mmc), host->irq, ret); 4882 goto unwq; 4883 } 4884 4885 ret = sdhci_led_register(host); 4886 if (ret) { 4887 pr_err("%s: Failed to register LED device: %d\n", 4888 mmc_hostname(mmc), ret); 4889 goto unirq; 4890 } 4891 4892 ret = mmc_add_host(mmc); 4893 if (ret) 4894 goto unled; 4895 4896 pr_info("%s: SDHCI controller on %s [%s] using %s\n", 4897 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 4898 host->use_external_dma ? "External DMA" : 4899 (host->flags & SDHCI_USE_ADMA) ? 4900 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : 4901 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); 4902 4903 sdhci_enable_card_detection(host); 4904 4905 return 0; 4906 4907 unled: 4908 sdhci_led_unregister(host); 4909 unirq: 4910 sdhci_reset_for_all(host); 4911 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4912 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4913 free_irq(host->irq, host); 4914 unwq: 4915 destroy_workqueue(host->complete_wq); 4916 4917 return ret; 4918 } 4919 EXPORT_SYMBOL_GPL(__sdhci_add_host); 4920 4921 int sdhci_add_host(struct sdhci_host *host) 4922 { 4923 int ret; 4924 4925 ret = sdhci_setup_host(host); 4926 if (ret) 4927 return ret; 4928 4929 ret = __sdhci_add_host(host); 4930 if (ret) 4931 goto cleanup; 4932 4933 return 0; 4934 4935 cleanup: 4936 sdhci_cleanup_host(host); 4937 4938 return ret; 4939 } 4940 EXPORT_SYMBOL_GPL(sdhci_add_host); 4941 4942 void sdhci_remove_host(struct sdhci_host *host, int dead) 4943 { 4944 struct mmc_host *mmc = host->mmc; 4945 unsigned long flags; 4946 4947 if (dead) { 4948 spin_lock_irqsave(&host->lock, flags); 4949 4950 host->flags |= SDHCI_DEVICE_DEAD; 4951 4952 if (sdhci_has_requests(host)) { 4953 pr_err("%s: Controller removed during " 4954 " transfer!\n", mmc_hostname(mmc)); 4955 sdhci_error_out_mrqs(host, -ENOMEDIUM); 4956 } 4957 4958 spin_unlock_irqrestore(&host->lock, flags); 4959 } 4960 4961 sdhci_disable_card_detection(host); 4962 4963 mmc_remove_host(mmc); 4964 4965 sdhci_led_unregister(host); 4966 4967 if (!dead) 4968 sdhci_reset_for_all(host); 4969 4970 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4971 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4972 free_irq(host->irq, host); 4973 4974 del_timer_sync(&host->timer); 4975 del_timer_sync(&host->data_timer); 4976 4977 destroy_workqueue(host->complete_wq); 4978 4979 if (host->sdhci_core_to_disable_vqmmc) 4980 regulator_disable(mmc->supply.vqmmc); 4981 4982 if (host->align_buffer) 4983 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4984 host->adma_table_sz, host->align_buffer, 4985 host->align_addr); 4986 4987 if (host->use_external_dma) 4988 sdhci_external_dma_release(host); 4989 4990 host->adma_table = NULL; 4991 host->align_buffer = NULL; 4992 } 4993 4994 EXPORT_SYMBOL_GPL(sdhci_remove_host); 4995 4996 void sdhci_free_host(struct sdhci_host *host) 4997 { 4998 mmc_free_host(host->mmc); 4999 } 5000 5001 EXPORT_SYMBOL_GPL(sdhci_free_host); 5002 5003 /*****************************************************************************\ 5004 * * 5005 * Driver init/exit * 5006 * * 5007 \*****************************************************************************/ 5008 5009 static int __init sdhci_drv_init(void) 5010 { 5011 pr_info(DRIVER_NAME 5012 ": Secure Digital Host Controller Interface driver\n"); 5013 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 5014 5015 return 0; 5016 } 5017 5018 static void __exit sdhci_drv_exit(void) 5019 { 5020 } 5021 5022 module_init(sdhci_drv_init); 5023 module_exit(sdhci_drv_exit); 5024 5025 module_param(debug_quirks, uint, 0444); 5026 module_param(debug_quirks2, uint, 0444); 5027 5028 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 5029 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 5030 MODULE_LICENSE("GPL"); 5031 5032 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 5033 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); 5034