1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 4 * 5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 6 * 7 * Thanks to the following companies for their support: 8 * 9 * - JMicron (hardware and technical support) 10 */ 11 12 #include <linux/bitfield.h> 13 #include <linux/delay.h> 14 #include <linux/dmaengine.h> 15 #include <linux/ktime.h> 16 #include <linux/highmem.h> 17 #include <linux/io.h> 18 #include <linux/module.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/slab.h> 21 #include <linux/scatterlist.h> 22 #include <linux/sizes.h> 23 #include <linux/regulator/consumer.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/of.h> 26 27 #include <linux/leds.h> 28 29 #include <linux/mmc/mmc.h> 30 #include <linux/mmc/host.h> 31 #include <linux/mmc/card.h> 32 #include <linux/mmc/sdio.h> 33 #include <linux/mmc/slot-gpio.h> 34 35 #include "sdhci.h" 36 37 #define DRIVER_NAME "sdhci" 38 39 #define DBG(f, x...) \ 40 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 41 42 #define SDHCI_DUMP(f, x...) \ 43 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 44 45 #define MAX_TUNING_LOOP 40 46 47 static unsigned int debug_quirks = 0; 48 static unsigned int debug_quirks2; 49 50 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); 51 52 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd); 53 54 void sdhci_dumpregs(struct sdhci_host *host) 55 { 56 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n"); 57 58 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n", 59 sdhci_readl(host, SDHCI_DMA_ADDRESS), 60 sdhci_readw(host, SDHCI_HOST_VERSION)); 61 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n", 62 sdhci_readw(host, SDHCI_BLOCK_SIZE), 63 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 64 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n", 65 sdhci_readl(host, SDHCI_ARGUMENT), 66 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 67 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n", 68 sdhci_readl(host, SDHCI_PRESENT_STATE), 69 sdhci_readb(host, SDHCI_HOST_CONTROL)); 70 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n", 71 sdhci_readb(host, SDHCI_POWER_CONTROL), 72 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 73 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n", 74 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 75 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 76 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n", 77 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 78 sdhci_readl(host, SDHCI_INT_STATUS)); 79 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n", 80 sdhci_readl(host, SDHCI_INT_ENABLE), 81 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 82 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n", 83 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS), 84 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 85 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n", 86 sdhci_readl(host, SDHCI_CAPABILITIES), 87 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 88 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n", 89 sdhci_readw(host, SDHCI_COMMAND), 90 sdhci_readl(host, SDHCI_MAX_CURRENT)); 91 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n", 92 sdhci_readl(host, SDHCI_RESPONSE), 93 sdhci_readl(host, SDHCI_RESPONSE + 4)); 94 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n", 95 sdhci_readl(host, SDHCI_RESPONSE + 8), 96 sdhci_readl(host, SDHCI_RESPONSE + 12)); 97 SDHCI_DUMP("Host ctl2: 0x%08x\n", 98 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 99 100 if (host->flags & SDHCI_USE_ADMA) { 101 if (host->flags & SDHCI_USE_64_BIT_DMA) { 102 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", 103 sdhci_readl(host, SDHCI_ADMA_ERROR), 104 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI), 105 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 106 } else { 107 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 108 sdhci_readl(host, SDHCI_ADMA_ERROR), 109 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 110 } 111 } 112 113 if (host->ops->dump_vendor_regs) 114 host->ops->dump_vendor_regs(host); 115 116 SDHCI_DUMP("============================================\n"); 117 } 118 EXPORT_SYMBOL_GPL(sdhci_dumpregs); 119 120 /*****************************************************************************\ 121 * * 122 * Low level functions * 123 * * 124 \*****************************************************************************/ 125 126 static void sdhci_do_enable_v4_mode(struct sdhci_host *host) 127 { 128 u16 ctrl2; 129 130 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 131 if (ctrl2 & SDHCI_CTRL_V4_MODE) 132 return; 133 134 ctrl2 |= SDHCI_CTRL_V4_MODE; 135 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 136 } 137 138 /* 139 * This can be called before sdhci_add_host() by Vendor's host controller 140 * driver to enable v4 mode if supported. 141 */ 142 void sdhci_enable_v4_mode(struct sdhci_host *host) 143 { 144 host->v4_mode = true; 145 sdhci_do_enable_v4_mode(host); 146 } 147 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode); 148 149 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd) 150 { 151 return cmd->data || cmd->flags & MMC_RSP_BUSY; 152 } 153 154 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 155 { 156 u32 present; 157 158 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 159 !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc)) 160 return; 161 162 if (enable) { 163 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 164 SDHCI_CARD_PRESENT; 165 166 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 167 SDHCI_INT_CARD_INSERT; 168 } else { 169 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 170 } 171 172 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 173 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 174 } 175 176 static void sdhci_enable_card_detection(struct sdhci_host *host) 177 { 178 sdhci_set_card_detection(host, true); 179 } 180 181 static void sdhci_disable_card_detection(struct sdhci_host *host) 182 { 183 sdhci_set_card_detection(host, false); 184 } 185 186 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 187 { 188 if (host->bus_on) 189 return; 190 host->bus_on = true; 191 pm_runtime_get_noresume(mmc_dev(host->mmc)); 192 } 193 194 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 195 { 196 if (!host->bus_on) 197 return; 198 host->bus_on = false; 199 pm_runtime_put_noidle(mmc_dev(host->mmc)); 200 } 201 202 void sdhci_reset(struct sdhci_host *host, u8 mask) 203 { 204 ktime_t timeout; 205 206 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 207 208 if (mask & SDHCI_RESET_ALL) { 209 host->clock = 0; 210 /* Reset-all turns off SD Bus Power */ 211 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 212 sdhci_runtime_pm_bus_off(host); 213 } 214 215 /* Wait max 100 ms */ 216 timeout = ktime_add_ms(ktime_get(), 100); 217 218 /* hw clears the bit when it's done */ 219 while (1) { 220 bool timedout = ktime_after(ktime_get(), timeout); 221 222 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) 223 break; 224 if (timedout) { 225 pr_err("%s: Reset 0x%x never completed.\n", 226 mmc_hostname(host->mmc), (int)mask); 227 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 228 sdhci_dumpregs(host); 229 return; 230 } 231 udelay(10); 232 } 233 } 234 EXPORT_SYMBOL_GPL(sdhci_reset); 235 236 static bool sdhci_do_reset(struct sdhci_host *host, u8 mask) 237 { 238 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 239 struct mmc_host *mmc = host->mmc; 240 241 if (!mmc->ops->get_cd(mmc)) 242 return false; 243 } 244 245 host->ops->reset(host, mask); 246 247 return true; 248 } 249 250 static void sdhci_reset_for_all(struct sdhci_host *host) 251 { 252 if (sdhci_do_reset(host, SDHCI_RESET_ALL)) { 253 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 254 if (host->ops->enable_dma) 255 host->ops->enable_dma(host); 256 } 257 /* Resetting the controller clears many */ 258 host->preset_enabled = false; 259 } 260 } 261 262 enum sdhci_reset_reason { 263 SDHCI_RESET_FOR_INIT, 264 SDHCI_RESET_FOR_REQUEST_ERROR, 265 SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY, 266 SDHCI_RESET_FOR_TUNING_ABORT, 267 SDHCI_RESET_FOR_CARD_REMOVED, 268 SDHCI_RESET_FOR_CQE_RECOVERY, 269 }; 270 271 static void sdhci_reset_for_reason(struct sdhci_host *host, enum sdhci_reset_reason reason) 272 { 273 switch (reason) { 274 case SDHCI_RESET_FOR_INIT: 275 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 276 break; 277 case SDHCI_RESET_FOR_REQUEST_ERROR: 278 case SDHCI_RESET_FOR_TUNING_ABORT: 279 case SDHCI_RESET_FOR_CARD_REMOVED: 280 case SDHCI_RESET_FOR_CQE_RECOVERY: 281 sdhci_do_reset(host, SDHCI_RESET_CMD); 282 sdhci_do_reset(host, SDHCI_RESET_DATA); 283 break; 284 case SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY: 285 sdhci_do_reset(host, SDHCI_RESET_DATA); 286 break; 287 } 288 } 289 290 #define sdhci_reset_for(h, r) sdhci_reset_for_reason((h), SDHCI_RESET_FOR_##r) 291 292 static void sdhci_set_default_irqs(struct sdhci_host *host) 293 { 294 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 295 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | 296 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | 297 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | 298 SDHCI_INT_RESPONSE; 299 300 if (host->tuning_mode == SDHCI_TUNING_MODE_2 || 301 host->tuning_mode == SDHCI_TUNING_MODE_3) 302 host->ier |= SDHCI_INT_RETUNE; 303 304 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 305 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 306 } 307 308 static void sdhci_config_dma(struct sdhci_host *host) 309 { 310 u8 ctrl; 311 u16 ctrl2; 312 313 if (host->version < SDHCI_SPEC_200) 314 return; 315 316 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 317 318 /* 319 * Always adjust the DMA selection as some controllers 320 * (e.g. JMicron) can't do PIO properly when the selection 321 * is ADMA. 322 */ 323 ctrl &= ~SDHCI_CTRL_DMA_MASK; 324 if (!(host->flags & SDHCI_REQ_USE_DMA)) 325 goto out; 326 327 /* Note if DMA Select is zero then SDMA is selected */ 328 if (host->flags & SDHCI_USE_ADMA) 329 ctrl |= SDHCI_CTRL_ADMA32; 330 331 if (host->flags & SDHCI_USE_64_BIT_DMA) { 332 /* 333 * If v4 mode, all supported DMA can be 64-bit addressing if 334 * controller supports 64-bit system address, otherwise only 335 * ADMA can support 64-bit addressing. 336 */ 337 if (host->v4_mode) { 338 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 339 ctrl2 |= SDHCI_CTRL_64BIT_ADDR; 340 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 341 } else if (host->flags & SDHCI_USE_ADMA) { 342 /* 343 * Don't need to undo SDHCI_CTRL_ADMA32 in order to 344 * set SDHCI_CTRL_ADMA64. 345 */ 346 ctrl |= SDHCI_CTRL_ADMA64; 347 } 348 } 349 350 out: 351 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 352 } 353 354 static void sdhci_init(struct sdhci_host *host, int soft) 355 { 356 struct mmc_host *mmc = host->mmc; 357 unsigned long flags; 358 359 if (soft) 360 sdhci_reset_for(host, INIT); 361 else 362 sdhci_reset_for_all(host); 363 364 if (host->v4_mode) 365 sdhci_do_enable_v4_mode(host); 366 367 spin_lock_irqsave(&host->lock, flags); 368 sdhci_set_default_irqs(host); 369 spin_unlock_irqrestore(&host->lock, flags); 370 371 host->cqe_on = false; 372 373 if (soft) { 374 /* force clock reconfiguration */ 375 host->clock = 0; 376 mmc->ops->set_ios(mmc, &mmc->ios); 377 } 378 } 379 380 static void sdhci_reinit(struct sdhci_host *host) 381 { 382 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 383 384 sdhci_init(host, 0); 385 sdhci_enable_card_detection(host); 386 387 /* 388 * A change to the card detect bits indicates a change in present state, 389 * refer sdhci_set_card_detection(). A card detect interrupt might have 390 * been missed while the host controller was being reset, so trigger a 391 * rescan to check. 392 */ 393 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT))) 394 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 395 } 396 397 static void __sdhci_led_activate(struct sdhci_host *host) 398 { 399 u8 ctrl; 400 401 if (host->quirks & SDHCI_QUIRK_NO_LED) 402 return; 403 404 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 405 ctrl |= SDHCI_CTRL_LED; 406 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 407 } 408 409 static void __sdhci_led_deactivate(struct sdhci_host *host) 410 { 411 u8 ctrl; 412 413 if (host->quirks & SDHCI_QUIRK_NO_LED) 414 return; 415 416 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 417 ctrl &= ~SDHCI_CTRL_LED; 418 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 419 } 420 421 #if IS_REACHABLE(CONFIG_LEDS_CLASS) 422 static void sdhci_led_control(struct led_classdev *led, 423 enum led_brightness brightness) 424 { 425 struct sdhci_host *host = container_of(led, struct sdhci_host, led); 426 unsigned long flags; 427 428 spin_lock_irqsave(&host->lock, flags); 429 430 if (host->runtime_suspended) 431 goto out; 432 433 if (brightness == LED_OFF) 434 __sdhci_led_deactivate(host); 435 else 436 __sdhci_led_activate(host); 437 out: 438 spin_unlock_irqrestore(&host->lock, flags); 439 } 440 441 static int sdhci_led_register(struct sdhci_host *host) 442 { 443 struct mmc_host *mmc = host->mmc; 444 445 if (host->quirks & SDHCI_QUIRK_NO_LED) 446 return 0; 447 448 snprintf(host->led_name, sizeof(host->led_name), 449 "%s::", mmc_hostname(mmc)); 450 451 host->led.name = host->led_name; 452 host->led.brightness = LED_OFF; 453 host->led.default_trigger = mmc_hostname(mmc); 454 host->led.brightness_set = sdhci_led_control; 455 456 return led_classdev_register(mmc_dev(mmc), &host->led); 457 } 458 459 static void sdhci_led_unregister(struct sdhci_host *host) 460 { 461 if (host->quirks & SDHCI_QUIRK_NO_LED) 462 return; 463 464 led_classdev_unregister(&host->led); 465 } 466 467 static inline void sdhci_led_activate(struct sdhci_host *host) 468 { 469 } 470 471 static inline void sdhci_led_deactivate(struct sdhci_host *host) 472 { 473 } 474 475 #else 476 477 static inline int sdhci_led_register(struct sdhci_host *host) 478 { 479 return 0; 480 } 481 482 static inline void sdhci_led_unregister(struct sdhci_host *host) 483 { 484 } 485 486 static inline void sdhci_led_activate(struct sdhci_host *host) 487 { 488 __sdhci_led_activate(host); 489 } 490 491 static inline void sdhci_led_deactivate(struct sdhci_host *host) 492 { 493 __sdhci_led_deactivate(host); 494 } 495 496 #endif 497 498 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq, 499 unsigned long timeout) 500 { 501 if (sdhci_data_line_cmd(mrq->cmd)) 502 mod_timer(&host->data_timer, timeout); 503 else 504 mod_timer(&host->timer, timeout); 505 } 506 507 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq) 508 { 509 if (sdhci_data_line_cmd(mrq->cmd)) 510 del_timer(&host->data_timer); 511 else 512 del_timer(&host->timer); 513 } 514 515 static inline bool sdhci_has_requests(struct sdhci_host *host) 516 { 517 return host->cmd || host->data_cmd; 518 } 519 520 /*****************************************************************************\ 521 * * 522 * Core functions * 523 * * 524 \*****************************************************************************/ 525 526 static void sdhci_read_block_pio(struct sdhci_host *host) 527 { 528 unsigned long flags; 529 size_t blksize, len, chunk; 530 u32 scratch; 531 u8 *buf; 532 533 DBG("PIO reading\n"); 534 535 blksize = host->data->blksz; 536 chunk = 0; 537 538 local_irq_save(flags); 539 540 while (blksize) { 541 BUG_ON(!sg_miter_next(&host->sg_miter)); 542 543 len = min(host->sg_miter.length, blksize); 544 545 blksize -= len; 546 host->sg_miter.consumed = len; 547 548 buf = host->sg_miter.addr; 549 550 while (len) { 551 if (chunk == 0) { 552 scratch = sdhci_readl(host, SDHCI_BUFFER); 553 chunk = 4; 554 } 555 556 *buf = scratch & 0xFF; 557 558 buf++; 559 scratch >>= 8; 560 chunk--; 561 len--; 562 } 563 } 564 565 sg_miter_stop(&host->sg_miter); 566 567 local_irq_restore(flags); 568 } 569 570 static void sdhci_write_block_pio(struct sdhci_host *host) 571 { 572 unsigned long flags; 573 size_t blksize, len, chunk; 574 u32 scratch; 575 u8 *buf; 576 577 DBG("PIO writing\n"); 578 579 blksize = host->data->blksz; 580 chunk = 0; 581 scratch = 0; 582 583 local_irq_save(flags); 584 585 while (blksize) { 586 BUG_ON(!sg_miter_next(&host->sg_miter)); 587 588 len = min(host->sg_miter.length, blksize); 589 590 blksize -= len; 591 host->sg_miter.consumed = len; 592 593 buf = host->sg_miter.addr; 594 595 while (len) { 596 scratch |= (u32)*buf << (chunk * 8); 597 598 buf++; 599 chunk++; 600 len--; 601 602 if ((chunk == 4) || ((len == 0) && (blksize == 0))) { 603 sdhci_writel(host, scratch, SDHCI_BUFFER); 604 chunk = 0; 605 scratch = 0; 606 } 607 } 608 } 609 610 sg_miter_stop(&host->sg_miter); 611 612 local_irq_restore(flags); 613 } 614 615 static void sdhci_transfer_pio(struct sdhci_host *host) 616 { 617 u32 mask; 618 619 if (host->blocks == 0) 620 return; 621 622 if (host->data->flags & MMC_DATA_READ) 623 mask = SDHCI_DATA_AVAILABLE; 624 else 625 mask = SDHCI_SPACE_AVAILABLE; 626 627 /* 628 * Some controllers (JMicron JMB38x) mess up the buffer bits 629 * for transfers < 4 bytes. As long as it is just one block, 630 * we can ignore the bits. 631 */ 632 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && 633 (host->data->blocks == 1)) 634 mask = ~0; 635 636 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 637 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) 638 udelay(100); 639 640 if (host->data->flags & MMC_DATA_READ) 641 sdhci_read_block_pio(host); 642 else 643 sdhci_write_block_pio(host); 644 645 host->blocks--; 646 if (host->blocks == 0) 647 break; 648 } 649 650 DBG("PIO transfer complete.\n"); 651 } 652 653 static int sdhci_pre_dma_transfer(struct sdhci_host *host, 654 struct mmc_data *data, int cookie) 655 { 656 int sg_count; 657 658 /* 659 * If the data buffers are already mapped, return the previous 660 * dma_map_sg() result. 661 */ 662 if (data->host_cookie == COOKIE_PRE_MAPPED) 663 return data->sg_count; 664 665 /* Bounce write requests to the bounce buffer */ 666 if (host->bounce_buffer) { 667 unsigned int length = data->blksz * data->blocks; 668 669 if (length > host->bounce_buffer_size) { 670 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n", 671 mmc_hostname(host->mmc), length, 672 host->bounce_buffer_size); 673 return -EIO; 674 } 675 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { 676 /* Copy the data to the bounce buffer */ 677 if (host->ops->copy_to_bounce_buffer) { 678 host->ops->copy_to_bounce_buffer(host, 679 data, length); 680 } else { 681 sg_copy_to_buffer(data->sg, data->sg_len, 682 host->bounce_buffer, length); 683 } 684 } 685 /* Switch ownership to the DMA */ 686 dma_sync_single_for_device(mmc_dev(host->mmc), 687 host->bounce_addr, 688 host->bounce_buffer_size, 689 mmc_get_dma_dir(data)); 690 /* Just a dummy value */ 691 sg_count = 1; 692 } else { 693 /* Just access the data directly from memory */ 694 sg_count = dma_map_sg(mmc_dev(host->mmc), 695 data->sg, data->sg_len, 696 mmc_get_dma_dir(data)); 697 } 698 699 if (sg_count == 0) 700 return -ENOSPC; 701 702 data->sg_count = sg_count; 703 data->host_cookie = cookie; 704 705 return sg_count; 706 } 707 708 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags) 709 { 710 local_irq_save(*flags); 711 return kmap_atomic(sg_page(sg)) + sg->offset; 712 } 713 714 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) 715 { 716 kunmap_atomic(buffer); 717 local_irq_restore(*flags); 718 } 719 720 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc, 721 dma_addr_t addr, int len, unsigned int cmd) 722 { 723 struct sdhci_adma2_64_desc *dma_desc = *desc; 724 725 /* 32-bit and 64-bit descriptors have these members in same position */ 726 dma_desc->cmd = cpu_to_le16(cmd); 727 dma_desc->len = cpu_to_le16(len); 728 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr)); 729 730 if (host->flags & SDHCI_USE_64_BIT_DMA) 731 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr)); 732 733 *desc += host->desc_sz; 734 } 735 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc); 736 737 static inline void __sdhci_adma_write_desc(struct sdhci_host *host, 738 void **desc, dma_addr_t addr, 739 int len, unsigned int cmd) 740 { 741 if (host->ops->adma_write_desc) 742 host->ops->adma_write_desc(host, desc, addr, len, cmd); 743 else 744 sdhci_adma_write_desc(host, desc, addr, len, cmd); 745 } 746 747 static void sdhci_adma_mark_end(void *desc) 748 { 749 struct sdhci_adma2_64_desc *dma_desc = desc; 750 751 /* 32-bit and 64-bit descriptors have 'cmd' in same position */ 752 dma_desc->cmd |= cpu_to_le16(ADMA2_END); 753 } 754 755 static void sdhci_adma_table_pre(struct sdhci_host *host, 756 struct mmc_data *data, int sg_count) 757 { 758 struct scatterlist *sg; 759 unsigned long flags; 760 dma_addr_t addr, align_addr; 761 void *desc, *align; 762 char *buffer; 763 int len, offset, i; 764 765 /* 766 * The spec does not specify endianness of descriptor table. 767 * We currently guess that it is LE. 768 */ 769 770 host->sg_count = sg_count; 771 772 desc = host->adma_table; 773 align = host->align_buffer; 774 775 align_addr = host->align_addr; 776 777 for_each_sg(data->sg, sg, host->sg_count, i) { 778 addr = sg_dma_address(sg); 779 len = sg_dma_len(sg); 780 781 /* 782 * The SDHCI specification states that ADMA addresses must 783 * be 32-bit aligned. If they aren't, then we use a bounce 784 * buffer for the (up to three) bytes that screw up the 785 * alignment. 786 */ 787 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) & 788 SDHCI_ADMA2_MASK; 789 if (offset) { 790 if (data->flags & MMC_DATA_WRITE) { 791 buffer = sdhci_kmap_atomic(sg, &flags); 792 memcpy(align, buffer, offset); 793 sdhci_kunmap_atomic(buffer, &flags); 794 } 795 796 /* tran, valid */ 797 __sdhci_adma_write_desc(host, &desc, align_addr, 798 offset, ADMA2_TRAN_VALID); 799 800 BUG_ON(offset > 65536); 801 802 align += SDHCI_ADMA2_ALIGN; 803 align_addr += SDHCI_ADMA2_ALIGN; 804 805 addr += offset; 806 len -= offset; 807 } 808 809 /* 810 * The block layer forces a minimum segment size of PAGE_SIZE, 811 * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write 812 * multiple descriptors, noting that the ADMA table is sized 813 * for 4KiB chunks anyway, so it will be big enough. 814 */ 815 while (len > host->max_adma) { 816 int n = 32 * 1024; /* 32KiB*/ 817 818 __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID); 819 addr += n; 820 len -= n; 821 } 822 823 /* tran, valid */ 824 if (len) 825 __sdhci_adma_write_desc(host, &desc, addr, len, 826 ADMA2_TRAN_VALID); 827 828 /* 829 * If this triggers then we have a calculation bug 830 * somewhere. :/ 831 */ 832 WARN_ON((desc - host->adma_table) >= host->adma_table_sz); 833 } 834 835 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 836 /* Mark the last descriptor as the terminating descriptor */ 837 if (desc != host->adma_table) { 838 desc -= host->desc_sz; 839 sdhci_adma_mark_end(desc); 840 } 841 } else { 842 /* Add a terminating entry - nop, end, valid */ 843 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID); 844 } 845 } 846 847 static void sdhci_adma_table_post(struct sdhci_host *host, 848 struct mmc_data *data) 849 { 850 struct scatterlist *sg; 851 int i, size; 852 void *align; 853 char *buffer; 854 unsigned long flags; 855 856 if (data->flags & MMC_DATA_READ) { 857 bool has_unaligned = false; 858 859 /* Do a quick scan of the SG list for any unaligned mappings */ 860 for_each_sg(data->sg, sg, host->sg_count, i) 861 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 862 has_unaligned = true; 863 break; 864 } 865 866 if (has_unaligned) { 867 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 868 data->sg_len, DMA_FROM_DEVICE); 869 870 align = host->align_buffer; 871 872 for_each_sg(data->sg, sg, host->sg_count, i) { 873 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 874 size = SDHCI_ADMA2_ALIGN - 875 (sg_dma_address(sg) & SDHCI_ADMA2_MASK); 876 877 buffer = sdhci_kmap_atomic(sg, &flags); 878 memcpy(buffer, align, size); 879 sdhci_kunmap_atomic(buffer, &flags); 880 881 align += SDHCI_ADMA2_ALIGN; 882 } 883 } 884 } 885 } 886 } 887 888 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr) 889 { 890 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS); 891 if (host->flags & SDHCI_USE_64_BIT_DMA) 892 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI); 893 } 894 895 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host) 896 { 897 if (host->bounce_buffer) 898 return host->bounce_addr; 899 else 900 return sg_dma_address(host->data->sg); 901 } 902 903 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr) 904 { 905 if (host->v4_mode) 906 sdhci_set_adma_addr(host, addr); 907 else 908 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS); 909 } 910 911 static unsigned int sdhci_target_timeout(struct sdhci_host *host, 912 struct mmc_command *cmd, 913 struct mmc_data *data) 914 { 915 unsigned int target_timeout; 916 917 /* timeout in us */ 918 if (!data) { 919 target_timeout = cmd->busy_timeout * 1000; 920 } else { 921 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000); 922 if (host->clock && data->timeout_clks) { 923 unsigned long long val; 924 925 /* 926 * data->timeout_clks is in units of clock cycles. 927 * host->clock is in Hz. target_timeout is in us. 928 * Hence, us = 1000000 * cycles / Hz. Round up. 929 */ 930 val = 1000000ULL * data->timeout_clks; 931 if (do_div(val, host->clock)) 932 target_timeout++; 933 target_timeout += val; 934 } 935 } 936 937 return target_timeout; 938 } 939 940 static void sdhci_calc_sw_timeout(struct sdhci_host *host, 941 struct mmc_command *cmd) 942 { 943 struct mmc_data *data = cmd->data; 944 struct mmc_host *mmc = host->mmc; 945 struct mmc_ios *ios = &mmc->ios; 946 unsigned char bus_width = 1 << ios->bus_width; 947 unsigned int blksz; 948 unsigned int freq; 949 u64 target_timeout; 950 u64 transfer_time; 951 952 target_timeout = sdhci_target_timeout(host, cmd, data); 953 target_timeout *= NSEC_PER_USEC; 954 955 if (data) { 956 blksz = data->blksz; 957 freq = mmc->actual_clock ? : host->clock; 958 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width); 959 do_div(transfer_time, freq); 960 /* multiply by '2' to account for any unknowns */ 961 transfer_time = transfer_time * 2; 962 /* calculate timeout for the entire data */ 963 host->data_timeout = data->blocks * target_timeout + 964 transfer_time; 965 } else { 966 host->data_timeout = target_timeout; 967 } 968 969 if (host->data_timeout) 970 host->data_timeout += MMC_CMD_TRANSFER_TIME; 971 } 972 973 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd, 974 bool *too_big) 975 { 976 u8 count; 977 struct mmc_data *data; 978 unsigned target_timeout, current_timeout; 979 980 *too_big = false; 981 982 /* 983 * If the host controller provides us with an incorrect timeout 984 * value, just skip the check and use the maximum. The hardware may take 985 * longer to time out, but that's much better than having a too-short 986 * timeout value. 987 */ 988 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 989 return host->max_timeout_count; 990 991 /* Unspecified command, assume max */ 992 if (cmd == NULL) 993 return host->max_timeout_count; 994 995 data = cmd->data; 996 /* Unspecified timeout, assume max */ 997 if (!data && !cmd->busy_timeout) 998 return host->max_timeout_count; 999 1000 /* timeout in us */ 1001 target_timeout = sdhci_target_timeout(host, cmd, data); 1002 1003 /* 1004 * Figure out needed cycles. 1005 * We do this in steps in order to fit inside a 32 bit int. 1006 * The first step is the minimum timeout, which will have a 1007 * minimum resolution of 6 bits: 1008 * (1) 2^13*1000 > 2^22, 1009 * (2) host->timeout_clk < 2^16 1010 * => 1011 * (1) / (2) > 2^6 1012 */ 1013 count = 0; 1014 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 1015 while (current_timeout < target_timeout) { 1016 count++; 1017 current_timeout <<= 1; 1018 if (count > host->max_timeout_count) { 1019 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT)) 1020 DBG("Too large timeout 0x%x requested for CMD%d!\n", 1021 count, cmd->opcode); 1022 count = host->max_timeout_count; 1023 *too_big = true; 1024 break; 1025 } 1026 } 1027 1028 return count; 1029 } 1030 1031 static void sdhci_set_transfer_irqs(struct sdhci_host *host) 1032 { 1033 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; 1034 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; 1035 1036 if (host->flags & SDHCI_REQ_USE_DMA) 1037 host->ier = (host->ier & ~pio_irqs) | dma_irqs; 1038 else 1039 host->ier = (host->ier & ~dma_irqs) | pio_irqs; 1040 1041 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12)) 1042 host->ier |= SDHCI_INT_AUTO_CMD_ERR; 1043 else 1044 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR; 1045 1046 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1047 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1048 } 1049 1050 void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) 1051 { 1052 if (enable) 1053 host->ier |= SDHCI_INT_DATA_TIMEOUT; 1054 else 1055 host->ier &= ~SDHCI_INT_DATA_TIMEOUT; 1056 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1057 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1058 } 1059 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq); 1060 1061 void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1062 { 1063 bool too_big = false; 1064 u8 count = sdhci_calc_timeout(host, cmd, &too_big); 1065 1066 if (too_big && 1067 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { 1068 sdhci_calc_sw_timeout(host, cmd); 1069 sdhci_set_data_timeout_irq(host, false); 1070 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { 1071 sdhci_set_data_timeout_irq(host, true); 1072 } 1073 1074 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); 1075 } 1076 EXPORT_SYMBOL_GPL(__sdhci_set_timeout); 1077 1078 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1079 { 1080 if (host->ops->set_timeout) 1081 host->ops->set_timeout(host, cmd); 1082 else 1083 __sdhci_set_timeout(host, cmd); 1084 } 1085 1086 static void sdhci_initialize_data(struct sdhci_host *host, 1087 struct mmc_data *data) 1088 { 1089 WARN_ON(host->data); 1090 1091 /* Sanity checks */ 1092 BUG_ON(data->blksz * data->blocks > 524288); 1093 BUG_ON(data->blksz > host->mmc->max_blk_size); 1094 BUG_ON(data->blocks > 65535); 1095 1096 host->data = data; 1097 host->data_early = 0; 1098 host->data->bytes_xfered = 0; 1099 } 1100 1101 static inline void sdhci_set_block_info(struct sdhci_host *host, 1102 struct mmc_data *data) 1103 { 1104 /* Set the DMA boundary value and block size */ 1105 sdhci_writew(host, 1106 SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), 1107 SDHCI_BLOCK_SIZE); 1108 /* 1109 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count 1110 * can be supported, in that case 16-bit block count register must be 0. 1111 */ 1112 if (host->version >= SDHCI_SPEC_410 && host->v4_mode && 1113 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { 1114 if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) 1115 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); 1116 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); 1117 } else { 1118 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 1119 } 1120 } 1121 1122 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) 1123 { 1124 struct mmc_data *data = cmd->data; 1125 1126 sdhci_initialize_data(host, data); 1127 1128 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 1129 struct scatterlist *sg; 1130 unsigned int length_mask, offset_mask; 1131 int i; 1132 1133 host->flags |= SDHCI_REQ_USE_DMA; 1134 1135 /* 1136 * FIXME: This doesn't account for merging when mapping the 1137 * scatterlist. 1138 * 1139 * The assumption here being that alignment and lengths are 1140 * the same after DMA mapping to device address space. 1141 */ 1142 length_mask = 0; 1143 offset_mask = 0; 1144 if (host->flags & SDHCI_USE_ADMA) { 1145 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) { 1146 length_mask = 3; 1147 /* 1148 * As we use up to 3 byte chunks to work 1149 * around alignment problems, we need to 1150 * check the offset as well. 1151 */ 1152 offset_mask = 3; 1153 } 1154 } else { 1155 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) 1156 length_mask = 3; 1157 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) 1158 offset_mask = 3; 1159 } 1160 1161 if (unlikely(length_mask | offset_mask)) { 1162 for_each_sg(data->sg, sg, data->sg_len, i) { 1163 if (sg->length & length_mask) { 1164 DBG("Reverting to PIO because of transfer size (%d)\n", 1165 sg->length); 1166 host->flags &= ~SDHCI_REQ_USE_DMA; 1167 break; 1168 } 1169 if (sg->offset & offset_mask) { 1170 DBG("Reverting to PIO because of bad alignment\n"); 1171 host->flags &= ~SDHCI_REQ_USE_DMA; 1172 break; 1173 } 1174 } 1175 } 1176 } 1177 1178 if (host->flags & SDHCI_REQ_USE_DMA) { 1179 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1180 1181 if (sg_cnt <= 0) { 1182 /* 1183 * This only happens when someone fed 1184 * us an invalid request. 1185 */ 1186 WARN_ON(1); 1187 host->flags &= ~SDHCI_REQ_USE_DMA; 1188 } else if (host->flags & SDHCI_USE_ADMA) { 1189 sdhci_adma_table_pre(host, data, sg_cnt); 1190 sdhci_set_adma_addr(host, host->adma_addr); 1191 } else { 1192 WARN_ON(sg_cnt != 1); 1193 sdhci_set_sdma_addr(host, sdhci_sdma_address(host)); 1194 } 1195 } 1196 1197 sdhci_config_dma(host); 1198 1199 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 1200 int flags; 1201 1202 flags = SG_MITER_ATOMIC; 1203 if (host->data->flags & MMC_DATA_READ) 1204 flags |= SG_MITER_TO_SG; 1205 else 1206 flags |= SG_MITER_FROM_SG; 1207 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1208 host->blocks = data->blocks; 1209 } 1210 1211 sdhci_set_transfer_irqs(host); 1212 1213 sdhci_set_block_info(host, data); 1214 } 1215 1216 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA) 1217 1218 static int sdhci_external_dma_init(struct sdhci_host *host) 1219 { 1220 int ret = 0; 1221 struct mmc_host *mmc = host->mmc; 1222 1223 host->tx_chan = dma_request_chan(mmc_dev(mmc), "tx"); 1224 if (IS_ERR(host->tx_chan)) { 1225 ret = PTR_ERR(host->tx_chan); 1226 if (ret != -EPROBE_DEFER) 1227 pr_warn("Failed to request TX DMA channel.\n"); 1228 host->tx_chan = NULL; 1229 return ret; 1230 } 1231 1232 host->rx_chan = dma_request_chan(mmc_dev(mmc), "rx"); 1233 if (IS_ERR(host->rx_chan)) { 1234 if (host->tx_chan) { 1235 dma_release_channel(host->tx_chan); 1236 host->tx_chan = NULL; 1237 } 1238 1239 ret = PTR_ERR(host->rx_chan); 1240 if (ret != -EPROBE_DEFER) 1241 pr_warn("Failed to request RX DMA channel.\n"); 1242 host->rx_chan = NULL; 1243 } 1244 1245 return ret; 1246 } 1247 1248 static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, 1249 struct mmc_data *data) 1250 { 1251 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; 1252 } 1253 1254 static int sdhci_external_dma_setup(struct sdhci_host *host, 1255 struct mmc_command *cmd) 1256 { 1257 int ret, i; 1258 enum dma_transfer_direction dir; 1259 struct dma_async_tx_descriptor *desc; 1260 struct mmc_data *data = cmd->data; 1261 struct dma_chan *chan; 1262 struct dma_slave_config cfg; 1263 dma_cookie_t cookie; 1264 int sg_cnt; 1265 1266 if (!host->mapbase) 1267 return -EINVAL; 1268 1269 memset(&cfg, 0, sizeof(cfg)); 1270 cfg.src_addr = host->mapbase + SDHCI_BUFFER; 1271 cfg.dst_addr = host->mapbase + SDHCI_BUFFER; 1272 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1273 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1274 cfg.src_maxburst = data->blksz / 4; 1275 cfg.dst_maxburst = data->blksz / 4; 1276 1277 /* Sanity check: all the SG entries must be aligned by block size. */ 1278 for (i = 0; i < data->sg_len; i++) { 1279 if ((data->sg + i)->length % data->blksz) 1280 return -EINVAL; 1281 } 1282 1283 chan = sdhci_external_dma_channel(host, data); 1284 1285 ret = dmaengine_slave_config(chan, &cfg); 1286 if (ret) 1287 return ret; 1288 1289 sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1290 if (sg_cnt <= 0) 1291 return -EINVAL; 1292 1293 dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 1294 desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir, 1295 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1296 if (!desc) 1297 return -EINVAL; 1298 1299 desc->callback = NULL; 1300 desc->callback_param = NULL; 1301 1302 cookie = dmaengine_submit(desc); 1303 if (dma_submit_error(cookie)) 1304 ret = cookie; 1305 1306 return ret; 1307 } 1308 1309 static void sdhci_external_dma_release(struct sdhci_host *host) 1310 { 1311 if (host->tx_chan) { 1312 dma_release_channel(host->tx_chan); 1313 host->tx_chan = NULL; 1314 } 1315 1316 if (host->rx_chan) { 1317 dma_release_channel(host->rx_chan); 1318 host->rx_chan = NULL; 1319 } 1320 1321 sdhci_switch_external_dma(host, false); 1322 } 1323 1324 static void __sdhci_external_dma_prepare_data(struct sdhci_host *host, 1325 struct mmc_command *cmd) 1326 { 1327 struct mmc_data *data = cmd->data; 1328 1329 sdhci_initialize_data(host, data); 1330 1331 host->flags |= SDHCI_REQ_USE_DMA; 1332 sdhci_set_transfer_irqs(host); 1333 1334 sdhci_set_block_info(host, data); 1335 } 1336 1337 static void sdhci_external_dma_prepare_data(struct sdhci_host *host, 1338 struct mmc_command *cmd) 1339 { 1340 if (!sdhci_external_dma_setup(host, cmd)) { 1341 __sdhci_external_dma_prepare_data(host, cmd); 1342 } else { 1343 sdhci_external_dma_release(host); 1344 pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n", 1345 mmc_hostname(host->mmc)); 1346 sdhci_prepare_data(host, cmd); 1347 } 1348 } 1349 1350 static void sdhci_external_dma_pre_transfer(struct sdhci_host *host, 1351 struct mmc_command *cmd) 1352 { 1353 struct dma_chan *chan; 1354 1355 if (!cmd->data) 1356 return; 1357 1358 chan = sdhci_external_dma_channel(host, cmd->data); 1359 if (chan) 1360 dma_async_issue_pending(chan); 1361 } 1362 1363 #else 1364 1365 static inline int sdhci_external_dma_init(struct sdhci_host *host) 1366 { 1367 return -EOPNOTSUPP; 1368 } 1369 1370 static inline void sdhci_external_dma_release(struct sdhci_host *host) 1371 { 1372 } 1373 1374 static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host, 1375 struct mmc_command *cmd) 1376 { 1377 /* This should never happen */ 1378 WARN_ON_ONCE(1); 1379 } 1380 1381 static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host, 1382 struct mmc_command *cmd) 1383 { 1384 } 1385 1386 static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, 1387 struct mmc_data *data) 1388 { 1389 return NULL; 1390 } 1391 1392 #endif 1393 1394 void sdhci_switch_external_dma(struct sdhci_host *host, bool en) 1395 { 1396 host->use_external_dma = en; 1397 } 1398 EXPORT_SYMBOL_GPL(sdhci_switch_external_dma); 1399 1400 static inline bool sdhci_auto_cmd12(struct sdhci_host *host, 1401 struct mmc_request *mrq) 1402 { 1403 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) && 1404 !mrq->cap_cmd_during_tfr; 1405 } 1406 1407 static inline bool sdhci_auto_cmd23(struct sdhci_host *host, 1408 struct mmc_request *mrq) 1409 { 1410 return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23); 1411 } 1412 1413 static inline bool sdhci_manual_cmd23(struct sdhci_host *host, 1414 struct mmc_request *mrq) 1415 { 1416 return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23); 1417 } 1418 1419 static inline void sdhci_auto_cmd_select(struct sdhci_host *host, 1420 struct mmc_command *cmd, 1421 u16 *mode) 1422 { 1423 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) && 1424 (cmd->opcode != SD_IO_RW_EXTENDED); 1425 bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq); 1426 u16 ctrl2; 1427 1428 /* 1429 * In case of Version 4.10 or later, use of 'Auto CMD Auto 1430 * Select' is recommended rather than use of 'Auto CMD12 1431 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode 1432 * here because some controllers (e.g sdhci-of-dwmshc) expect it. 1433 */ 1434 if (host->version >= SDHCI_SPEC_410 && host->v4_mode && 1435 (use_cmd12 || use_cmd23)) { 1436 *mode |= SDHCI_TRNS_AUTO_SEL; 1437 1438 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1439 if (use_cmd23) 1440 ctrl2 |= SDHCI_CMD23_ENABLE; 1441 else 1442 ctrl2 &= ~SDHCI_CMD23_ENABLE; 1443 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 1444 1445 return; 1446 } 1447 1448 /* 1449 * If we are sending CMD23, CMD12 never gets sent 1450 * on successful completion (so no Auto-CMD12). 1451 */ 1452 if (use_cmd12) 1453 *mode |= SDHCI_TRNS_AUTO_CMD12; 1454 else if (use_cmd23) 1455 *mode |= SDHCI_TRNS_AUTO_CMD23; 1456 } 1457 1458 static void sdhci_set_transfer_mode(struct sdhci_host *host, 1459 struct mmc_command *cmd) 1460 { 1461 u16 mode = 0; 1462 struct mmc_data *data = cmd->data; 1463 1464 if (data == NULL) { 1465 if (host->quirks2 & 1466 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) { 1467 /* must not clear SDHCI_TRANSFER_MODE when tuning */ 1468 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) 1469 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); 1470 } else { 1471 /* clear Auto CMD settings for no data CMDs */ 1472 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); 1473 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | 1474 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE); 1475 } 1476 return; 1477 } 1478 1479 WARN_ON(!host->data); 1480 1481 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) 1482 mode = SDHCI_TRNS_BLK_CNT_EN; 1483 1484 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { 1485 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI; 1486 sdhci_auto_cmd_select(host, cmd, &mode); 1487 if (sdhci_auto_cmd23(host, cmd->mrq)) 1488 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2); 1489 } 1490 1491 if (data->flags & MMC_DATA_READ) 1492 mode |= SDHCI_TRNS_READ; 1493 if (host->flags & SDHCI_REQ_USE_DMA) 1494 mode |= SDHCI_TRNS_DMA; 1495 1496 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 1497 } 1498 1499 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) 1500 { 1501 return (!(host->flags & SDHCI_DEVICE_DEAD) && 1502 ((mrq->cmd && mrq->cmd->error) || 1503 (mrq->sbc && mrq->sbc->error) || 1504 (mrq->data && mrq->data->stop && mrq->data->stop->error) || 1505 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); 1506 } 1507 1508 static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq) 1509 { 1510 int i; 1511 1512 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1513 if (host->mrqs_done[i] == mrq) { 1514 WARN_ON(1); 1515 return; 1516 } 1517 } 1518 1519 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1520 if (!host->mrqs_done[i]) { 1521 host->mrqs_done[i] = mrq; 1522 break; 1523 } 1524 } 1525 1526 WARN_ON(i >= SDHCI_MAX_MRQS); 1527 } 1528 1529 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1530 { 1531 if (host->cmd && host->cmd->mrq == mrq) 1532 host->cmd = NULL; 1533 1534 if (host->data_cmd && host->data_cmd->mrq == mrq) 1535 host->data_cmd = NULL; 1536 1537 if (host->deferred_cmd && host->deferred_cmd->mrq == mrq) 1538 host->deferred_cmd = NULL; 1539 1540 if (host->data && host->data->mrq == mrq) 1541 host->data = NULL; 1542 1543 if (sdhci_needs_reset(host, mrq)) 1544 host->pending_reset = true; 1545 1546 sdhci_set_mrq_done(host, mrq); 1547 1548 sdhci_del_timer(host, mrq); 1549 1550 if (!sdhci_has_requests(host)) 1551 sdhci_led_deactivate(host); 1552 } 1553 1554 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1555 { 1556 __sdhci_finish_mrq(host, mrq); 1557 1558 queue_work(host->complete_wq, &host->complete_work); 1559 } 1560 1561 static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout) 1562 { 1563 struct mmc_command *data_cmd = host->data_cmd; 1564 struct mmc_data *data = host->data; 1565 1566 host->data = NULL; 1567 host->data_cmd = NULL; 1568 1569 /* 1570 * The controller needs a reset of internal state machines upon error 1571 * conditions. 1572 */ 1573 if (data->error) { 1574 if (!host->cmd || host->cmd == data_cmd) 1575 sdhci_reset_for(host, REQUEST_ERROR); 1576 else 1577 sdhci_reset_for(host, REQUEST_ERROR_DATA_ONLY); 1578 } 1579 1580 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) == 1581 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) 1582 sdhci_adma_table_post(host, data); 1583 1584 /* 1585 * The specification states that the block count register must 1586 * be updated, but it does not specify at what point in the 1587 * data flow. That makes the register entirely useless to read 1588 * back so we have to assume that nothing made it to the card 1589 * in the event of an error. 1590 */ 1591 if (data->error) 1592 data->bytes_xfered = 0; 1593 else 1594 data->bytes_xfered = data->blksz * data->blocks; 1595 1596 /* 1597 * Need to send CMD12 if - 1598 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23) 1599 * b) error in multiblock transfer 1600 */ 1601 if (data->stop && 1602 ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) || 1603 data->error)) { 1604 /* 1605 * 'cap_cmd_during_tfr' request must not use the command line 1606 * after mmc_command_done() has been called. It is upper layer's 1607 * responsibility to send the stop command if required. 1608 */ 1609 if (data->mrq->cap_cmd_during_tfr) { 1610 __sdhci_finish_mrq(host, data->mrq); 1611 } else { 1612 /* Avoid triggering warning in sdhci_send_command() */ 1613 host->cmd = NULL; 1614 if (!sdhci_send_command(host, data->stop)) { 1615 if (sw_data_timeout) { 1616 /* 1617 * This is anyway a sw data timeout, so 1618 * give up now. 1619 */ 1620 data->stop->error = -EIO; 1621 __sdhci_finish_mrq(host, data->mrq); 1622 } else { 1623 WARN_ON(host->deferred_cmd); 1624 host->deferred_cmd = data->stop; 1625 } 1626 } 1627 } 1628 } else { 1629 __sdhci_finish_mrq(host, data->mrq); 1630 } 1631 } 1632 1633 static void sdhci_finish_data(struct sdhci_host *host) 1634 { 1635 __sdhci_finish_data(host, false); 1636 } 1637 1638 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 1639 { 1640 int flags; 1641 u32 mask; 1642 unsigned long timeout; 1643 1644 WARN_ON(host->cmd); 1645 1646 /* Initially, a command has no error */ 1647 cmd->error = 0; 1648 1649 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && 1650 cmd->opcode == MMC_STOP_TRANSMISSION) 1651 cmd->flags |= MMC_RSP_BUSY; 1652 1653 mask = SDHCI_CMD_INHIBIT; 1654 if (sdhci_data_line_cmd(cmd)) 1655 mask |= SDHCI_DATA_INHIBIT; 1656 1657 /* We shouldn't wait for data inihibit for stop commands, even 1658 though they might use busy signaling */ 1659 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop)) 1660 mask &= ~SDHCI_DATA_INHIBIT; 1661 1662 if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) 1663 return false; 1664 1665 host->cmd = cmd; 1666 host->data_timeout = 0; 1667 if (sdhci_data_line_cmd(cmd)) { 1668 WARN_ON(host->data_cmd); 1669 host->data_cmd = cmd; 1670 sdhci_set_timeout(host, cmd); 1671 } 1672 1673 if (cmd->data) { 1674 if (host->use_external_dma) 1675 sdhci_external_dma_prepare_data(host, cmd); 1676 else 1677 sdhci_prepare_data(host, cmd); 1678 } 1679 1680 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 1681 1682 sdhci_set_transfer_mode(host, cmd); 1683 1684 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 1685 WARN_ONCE(1, "Unsupported response type!\n"); 1686 /* 1687 * This does not happen in practice because 136-bit response 1688 * commands never have busy waiting, so rather than complicate 1689 * the error path, just remove busy waiting and continue. 1690 */ 1691 cmd->flags &= ~MMC_RSP_BUSY; 1692 } 1693 1694 if (!(cmd->flags & MMC_RSP_PRESENT)) 1695 flags = SDHCI_CMD_RESP_NONE; 1696 else if (cmd->flags & MMC_RSP_136) 1697 flags = SDHCI_CMD_RESP_LONG; 1698 else if (cmd->flags & MMC_RSP_BUSY) 1699 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1700 else 1701 flags = SDHCI_CMD_RESP_SHORT; 1702 1703 if (cmd->flags & MMC_RSP_CRC) 1704 flags |= SDHCI_CMD_CRC; 1705 if (cmd->flags & MMC_RSP_OPCODE) 1706 flags |= SDHCI_CMD_INDEX; 1707 1708 /* CMD19 is special in that the Data Present Select should be set */ 1709 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK || 1710 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) 1711 flags |= SDHCI_CMD_DATA; 1712 1713 timeout = jiffies; 1714 if (host->data_timeout) 1715 timeout += nsecs_to_jiffies(host->data_timeout); 1716 else if (!cmd->data && cmd->busy_timeout > 9000) 1717 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; 1718 else 1719 timeout += 10 * HZ; 1720 sdhci_mod_timer(host, cmd->mrq, timeout); 1721 1722 if (host->use_external_dma) 1723 sdhci_external_dma_pre_transfer(host, cmd); 1724 1725 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 1726 1727 return true; 1728 } 1729 1730 static bool sdhci_present_error(struct sdhci_host *host, 1731 struct mmc_command *cmd, bool present) 1732 { 1733 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1734 cmd->error = -ENOMEDIUM; 1735 return true; 1736 } 1737 1738 return false; 1739 } 1740 1741 static bool sdhci_send_command_retry(struct sdhci_host *host, 1742 struct mmc_command *cmd, 1743 unsigned long flags) 1744 __releases(host->lock) 1745 __acquires(host->lock) 1746 { 1747 struct mmc_command *deferred_cmd = host->deferred_cmd; 1748 int timeout = 10; /* Approx. 10 ms */ 1749 bool present; 1750 1751 while (!sdhci_send_command(host, cmd)) { 1752 if (!timeout--) { 1753 pr_err("%s: Controller never released inhibit bit(s).\n", 1754 mmc_hostname(host->mmc)); 1755 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 1756 sdhci_dumpregs(host); 1757 cmd->error = -EIO; 1758 return false; 1759 } 1760 1761 spin_unlock_irqrestore(&host->lock, flags); 1762 1763 usleep_range(1000, 1250); 1764 1765 present = host->mmc->ops->get_cd(host->mmc); 1766 1767 spin_lock_irqsave(&host->lock, flags); 1768 1769 /* A deferred command might disappear, handle that */ 1770 if (cmd == deferred_cmd && cmd != host->deferred_cmd) 1771 return true; 1772 1773 if (sdhci_present_error(host, cmd, present)) 1774 return false; 1775 } 1776 1777 if (cmd == host->deferred_cmd) 1778 host->deferred_cmd = NULL; 1779 1780 return true; 1781 } 1782 1783 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd) 1784 { 1785 int i, reg; 1786 1787 for (i = 0; i < 4; i++) { 1788 reg = SDHCI_RESPONSE + (3 - i) * 4; 1789 cmd->resp[i] = sdhci_readl(host, reg); 1790 } 1791 1792 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC) 1793 return; 1794 1795 /* CRC is stripped so we need to do some shifting */ 1796 for (i = 0; i < 4; i++) { 1797 cmd->resp[i] <<= 8; 1798 if (i != 3) 1799 cmd->resp[i] |= cmd->resp[i + 1] >> 24; 1800 } 1801 } 1802 1803 static void sdhci_finish_command(struct sdhci_host *host) 1804 { 1805 struct mmc_command *cmd = host->cmd; 1806 1807 host->cmd = NULL; 1808 1809 if (cmd->flags & MMC_RSP_PRESENT) { 1810 if (cmd->flags & MMC_RSP_136) { 1811 sdhci_read_rsp_136(host, cmd); 1812 } else { 1813 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); 1814 } 1815 } 1816 1817 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd) 1818 mmc_command_done(host->mmc, cmd->mrq); 1819 1820 /* 1821 * The host can send and interrupt when the busy state has 1822 * ended, allowing us to wait without wasting CPU cycles. 1823 * The busy signal uses DAT0 so this is similar to waiting 1824 * for data to complete. 1825 * 1826 * Note: The 1.0 specification is a bit ambiguous about this 1827 * feature so there might be some problems with older 1828 * controllers. 1829 */ 1830 if (cmd->flags & MMC_RSP_BUSY) { 1831 if (cmd->data) { 1832 DBG("Cannot wait for busy signal when also doing a data transfer"); 1833 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && 1834 cmd == host->data_cmd) { 1835 /* Command complete before busy is ended */ 1836 return; 1837 } 1838 } 1839 1840 /* Finished CMD23, now send actual command. */ 1841 if (cmd == cmd->mrq->sbc) { 1842 if (!sdhci_send_command(host, cmd->mrq->cmd)) { 1843 WARN_ON(host->deferred_cmd); 1844 host->deferred_cmd = cmd->mrq->cmd; 1845 } 1846 } else { 1847 1848 /* Processed actual command. */ 1849 if (host->data && host->data_early) 1850 sdhci_finish_data(host); 1851 1852 if (!cmd->data) 1853 __sdhci_finish_mrq(host, cmd->mrq); 1854 } 1855 } 1856 1857 static u16 sdhci_get_preset_value(struct sdhci_host *host) 1858 { 1859 u16 preset = 0; 1860 1861 switch (host->timing) { 1862 case MMC_TIMING_MMC_HS: 1863 case MMC_TIMING_SD_HS: 1864 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED); 1865 break; 1866 case MMC_TIMING_UHS_SDR12: 1867 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1868 break; 1869 case MMC_TIMING_UHS_SDR25: 1870 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); 1871 break; 1872 case MMC_TIMING_UHS_SDR50: 1873 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); 1874 break; 1875 case MMC_TIMING_UHS_SDR104: 1876 case MMC_TIMING_MMC_HS200: 1877 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 1878 break; 1879 case MMC_TIMING_UHS_DDR50: 1880 case MMC_TIMING_MMC_DDR52: 1881 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); 1882 break; 1883 case MMC_TIMING_MMC_HS400: 1884 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400); 1885 break; 1886 default: 1887 pr_warn("%s: Invalid UHS-I mode selected\n", 1888 mmc_hostname(host->mmc)); 1889 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1890 break; 1891 } 1892 return preset; 1893 } 1894 1895 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock, 1896 unsigned int *actual_clock) 1897 { 1898 int div = 0; /* Initialized for compiler warning */ 1899 int real_div = div, clk_mul = 1; 1900 u16 clk = 0; 1901 bool switch_base_clk = false; 1902 1903 if (host->version >= SDHCI_SPEC_300) { 1904 if (host->preset_enabled) { 1905 u16 pre_val; 1906 1907 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1908 pre_val = sdhci_get_preset_value(host); 1909 div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val); 1910 if (host->clk_mul && 1911 (pre_val & SDHCI_PRESET_CLKGEN_SEL)) { 1912 clk = SDHCI_PROG_CLOCK_MODE; 1913 real_div = div + 1; 1914 clk_mul = host->clk_mul; 1915 } else { 1916 real_div = max_t(int, 1, div << 1); 1917 } 1918 goto clock_set; 1919 } 1920 1921 /* 1922 * Check if the Host Controller supports Programmable Clock 1923 * Mode. 1924 */ 1925 if (host->clk_mul) { 1926 for (div = 1; div <= 1024; div++) { 1927 if ((host->max_clk * host->clk_mul / div) 1928 <= clock) 1929 break; 1930 } 1931 if ((host->max_clk * host->clk_mul / div) <= clock) { 1932 /* 1933 * Set Programmable Clock Mode in the Clock 1934 * Control register. 1935 */ 1936 clk = SDHCI_PROG_CLOCK_MODE; 1937 real_div = div; 1938 clk_mul = host->clk_mul; 1939 div--; 1940 } else { 1941 /* 1942 * Divisor can be too small to reach clock 1943 * speed requirement. Then use the base clock. 1944 */ 1945 switch_base_clk = true; 1946 } 1947 } 1948 1949 if (!host->clk_mul || switch_base_clk) { 1950 /* Version 3.00 divisors must be a multiple of 2. */ 1951 if (host->max_clk <= clock) 1952 div = 1; 1953 else { 1954 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; 1955 div += 2) { 1956 if ((host->max_clk / div) <= clock) 1957 break; 1958 } 1959 } 1960 real_div = div; 1961 div >>= 1; 1962 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN) 1963 && !div && host->max_clk <= 25000000) 1964 div = 1; 1965 } 1966 } else { 1967 /* Version 2.00 divisors must be a power of 2. */ 1968 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 1969 if ((host->max_clk / div) <= clock) 1970 break; 1971 } 1972 real_div = div; 1973 div >>= 1; 1974 } 1975 1976 clock_set: 1977 if (real_div) 1978 *actual_clock = (host->max_clk * clk_mul) / real_div; 1979 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 1980 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 1981 << SDHCI_DIVIDER_HI_SHIFT; 1982 1983 return clk; 1984 } 1985 EXPORT_SYMBOL_GPL(sdhci_calc_clk); 1986 1987 void sdhci_enable_clk(struct sdhci_host *host, u16 clk) 1988 { 1989 ktime_t timeout; 1990 1991 clk |= SDHCI_CLOCK_INT_EN; 1992 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 1993 1994 /* Wait max 150 ms */ 1995 timeout = ktime_add_ms(ktime_get(), 150); 1996 while (1) { 1997 bool timedout = ktime_after(ktime_get(), timeout); 1998 1999 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2000 if (clk & SDHCI_CLOCK_INT_STABLE) 2001 break; 2002 if (timedout) { 2003 pr_err("%s: Internal clock never stabilised.\n", 2004 mmc_hostname(host->mmc)); 2005 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 2006 sdhci_dumpregs(host); 2007 return; 2008 } 2009 udelay(10); 2010 } 2011 2012 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) { 2013 clk |= SDHCI_CLOCK_PLL_EN; 2014 clk &= ~SDHCI_CLOCK_INT_STABLE; 2015 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2016 2017 /* Wait max 150 ms */ 2018 timeout = ktime_add_ms(ktime_get(), 150); 2019 while (1) { 2020 bool timedout = ktime_after(ktime_get(), timeout); 2021 2022 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2023 if (clk & SDHCI_CLOCK_INT_STABLE) 2024 break; 2025 if (timedout) { 2026 pr_err("%s: PLL clock never stabilised.\n", 2027 mmc_hostname(host->mmc)); 2028 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 2029 sdhci_dumpregs(host); 2030 return; 2031 } 2032 udelay(10); 2033 } 2034 } 2035 2036 clk |= SDHCI_CLOCK_CARD_EN; 2037 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2038 } 2039 EXPORT_SYMBOL_GPL(sdhci_enable_clk); 2040 2041 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 2042 { 2043 u16 clk; 2044 2045 host->mmc->actual_clock = 0; 2046 2047 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 2048 2049 if (clock == 0) 2050 return; 2051 2052 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); 2053 sdhci_enable_clk(host, clk); 2054 } 2055 EXPORT_SYMBOL_GPL(sdhci_set_clock); 2056 2057 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode, 2058 unsigned short vdd) 2059 { 2060 struct mmc_host *mmc = host->mmc; 2061 2062 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 2063 2064 if (mode != MMC_POWER_OFF) 2065 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); 2066 else 2067 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2068 } 2069 2070 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, 2071 unsigned short vdd) 2072 { 2073 u8 pwr = 0; 2074 2075 if (mode != MMC_POWER_OFF) { 2076 switch (1 << vdd) { 2077 case MMC_VDD_165_195: 2078 /* 2079 * Without a regulator, SDHCI does not support 2.0v 2080 * so we only get here if the driver deliberately 2081 * added the 2.0v range to ocr_avail. Map it to 1.8v 2082 * for the purpose of turning on the power. 2083 */ 2084 case MMC_VDD_20_21: 2085 pwr = SDHCI_POWER_180; 2086 break; 2087 case MMC_VDD_29_30: 2088 case MMC_VDD_30_31: 2089 pwr = SDHCI_POWER_300; 2090 break; 2091 case MMC_VDD_32_33: 2092 case MMC_VDD_33_34: 2093 /* 2094 * 3.4 ~ 3.6V are valid only for those platforms where it's 2095 * known that the voltage range is supported by hardware. 2096 */ 2097 case MMC_VDD_34_35: 2098 case MMC_VDD_35_36: 2099 pwr = SDHCI_POWER_330; 2100 break; 2101 default: 2102 WARN(1, "%s: Invalid vdd %#x\n", 2103 mmc_hostname(host->mmc), vdd); 2104 break; 2105 } 2106 } 2107 2108 if (host->pwr == pwr) 2109 return; 2110 2111 host->pwr = pwr; 2112 2113 if (pwr == 0) { 2114 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2115 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 2116 sdhci_runtime_pm_bus_off(host); 2117 } else { 2118 /* 2119 * Spec says that we should clear the power reg before setting 2120 * a new value. Some controllers don't seem to like this though. 2121 */ 2122 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 2123 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2124 2125 /* 2126 * At least the Marvell CaFe chip gets confused if we set the 2127 * voltage and set turn on power at the same time, so set the 2128 * voltage first. 2129 */ 2130 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 2131 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 2132 2133 pwr |= SDHCI_POWER_ON; 2134 2135 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 2136 2137 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 2138 sdhci_runtime_pm_bus_on(host); 2139 2140 /* 2141 * Some controllers need an extra 10ms delay of 10ms before 2142 * they can apply clock after applying power 2143 */ 2144 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 2145 mdelay(10); 2146 } 2147 } 2148 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg); 2149 2150 void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 2151 unsigned short vdd) 2152 { 2153 if (IS_ERR(host->mmc->supply.vmmc)) 2154 sdhci_set_power_noreg(host, mode, vdd); 2155 else 2156 sdhci_set_power_reg(host, mode, vdd); 2157 } 2158 EXPORT_SYMBOL_GPL(sdhci_set_power); 2159 2160 /* 2161 * Some controllers need to configure a valid bus voltage on their power 2162 * register regardless of whether an external regulator is taking care of power 2163 * supply. This helper function takes care of it if set as the controller's 2164 * sdhci_ops.set_power callback. 2165 */ 2166 void sdhci_set_power_and_bus_voltage(struct sdhci_host *host, 2167 unsigned char mode, 2168 unsigned short vdd) 2169 { 2170 if (!IS_ERR(host->mmc->supply.vmmc)) { 2171 struct mmc_host *mmc = host->mmc; 2172 2173 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 2174 } 2175 sdhci_set_power_noreg(host, mode, vdd); 2176 } 2177 EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage); 2178 2179 /*****************************************************************************\ 2180 * * 2181 * MMC callbacks * 2182 * * 2183 \*****************************************************************************/ 2184 2185 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 2186 { 2187 struct sdhci_host *host = mmc_priv(mmc); 2188 struct mmc_command *cmd; 2189 unsigned long flags; 2190 bool present; 2191 2192 /* Firstly check card presence */ 2193 present = mmc->ops->get_cd(mmc); 2194 2195 spin_lock_irqsave(&host->lock, flags); 2196 2197 sdhci_led_activate(host); 2198 2199 if (sdhci_present_error(host, mrq->cmd, present)) 2200 goto out_finish; 2201 2202 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd; 2203 2204 if (!sdhci_send_command_retry(host, cmd, flags)) 2205 goto out_finish; 2206 2207 spin_unlock_irqrestore(&host->lock, flags); 2208 2209 return; 2210 2211 out_finish: 2212 sdhci_finish_mrq(host, mrq); 2213 spin_unlock_irqrestore(&host->lock, flags); 2214 } 2215 EXPORT_SYMBOL_GPL(sdhci_request); 2216 2217 int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq) 2218 { 2219 struct sdhci_host *host = mmc_priv(mmc); 2220 struct mmc_command *cmd; 2221 unsigned long flags; 2222 int ret = 0; 2223 2224 spin_lock_irqsave(&host->lock, flags); 2225 2226 if (sdhci_present_error(host, mrq->cmd, true)) { 2227 sdhci_finish_mrq(host, mrq); 2228 goto out_finish; 2229 } 2230 2231 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd; 2232 2233 /* 2234 * The HSQ may send a command in interrupt context without polling 2235 * the busy signaling, which means we should return BUSY if controller 2236 * has not released inhibit bits to allow HSQ trying to send request 2237 * again in non-atomic context. So we should not finish this request 2238 * here. 2239 */ 2240 if (!sdhci_send_command(host, cmd)) 2241 ret = -EBUSY; 2242 else 2243 sdhci_led_activate(host); 2244 2245 out_finish: 2246 spin_unlock_irqrestore(&host->lock, flags); 2247 return ret; 2248 } 2249 EXPORT_SYMBOL_GPL(sdhci_request_atomic); 2250 2251 void sdhci_set_bus_width(struct sdhci_host *host, int width) 2252 { 2253 u8 ctrl; 2254 2255 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 2256 if (width == MMC_BUS_WIDTH_8) { 2257 ctrl &= ~SDHCI_CTRL_4BITBUS; 2258 ctrl |= SDHCI_CTRL_8BITBUS; 2259 } else { 2260 if (host->mmc->caps & MMC_CAP_8_BIT_DATA) 2261 ctrl &= ~SDHCI_CTRL_8BITBUS; 2262 if (width == MMC_BUS_WIDTH_4) 2263 ctrl |= SDHCI_CTRL_4BITBUS; 2264 else 2265 ctrl &= ~SDHCI_CTRL_4BITBUS; 2266 } 2267 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2268 } 2269 EXPORT_SYMBOL_GPL(sdhci_set_bus_width); 2270 2271 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 2272 { 2273 u16 ctrl_2; 2274 2275 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2276 /* Select Bus Speed Mode for host */ 2277 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 2278 if ((timing == MMC_TIMING_MMC_HS200) || 2279 (timing == MMC_TIMING_UHS_SDR104)) 2280 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 2281 else if (timing == MMC_TIMING_UHS_SDR12) 2282 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 2283 else if (timing == MMC_TIMING_UHS_SDR25) 2284 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 2285 else if (timing == MMC_TIMING_UHS_SDR50) 2286 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 2287 else if ((timing == MMC_TIMING_UHS_DDR50) || 2288 (timing == MMC_TIMING_MMC_DDR52)) 2289 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 2290 else if (timing == MMC_TIMING_MMC_HS400) 2291 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ 2292 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 2293 } 2294 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); 2295 2296 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 2297 { 2298 struct sdhci_host *host = mmc_priv(mmc); 2299 u8 ctrl; 2300 2301 if (ios->power_mode == MMC_POWER_UNDEFINED) 2302 return; 2303 2304 if (host->flags & SDHCI_DEVICE_DEAD) { 2305 if (!IS_ERR(mmc->supply.vmmc) && 2306 ios->power_mode == MMC_POWER_OFF) 2307 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 2308 return; 2309 } 2310 2311 /* 2312 * Reset the chip on each power off. 2313 * Should clear out any weird states. 2314 */ 2315 if (ios->power_mode == MMC_POWER_OFF) { 2316 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 2317 sdhci_reinit(host); 2318 } 2319 2320 if (host->version >= SDHCI_SPEC_300 && 2321 (ios->power_mode == MMC_POWER_UP) && 2322 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) 2323 sdhci_enable_preset_value(host, false); 2324 2325 if (!ios->clock || ios->clock != host->clock) { 2326 host->ops->set_clock(host, ios->clock); 2327 host->clock = ios->clock; 2328 2329 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && 2330 host->clock) { 2331 host->timeout_clk = mmc->actual_clock ? 2332 mmc->actual_clock / 1000 : 2333 host->clock / 1000; 2334 mmc->max_busy_timeout = 2335 host->ops->get_max_timeout_count ? 2336 host->ops->get_max_timeout_count(host) : 2337 1 << 27; 2338 mmc->max_busy_timeout /= host->timeout_clk; 2339 } 2340 } 2341 2342 if (host->ops->set_power) 2343 host->ops->set_power(host, ios->power_mode, ios->vdd); 2344 else 2345 sdhci_set_power(host, ios->power_mode, ios->vdd); 2346 2347 if (host->ops->platform_send_init_74_clocks) 2348 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 2349 2350 host->ops->set_bus_width(host, ios->bus_width); 2351 2352 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 2353 2354 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) { 2355 if (ios->timing == MMC_TIMING_SD_HS || 2356 ios->timing == MMC_TIMING_MMC_HS || 2357 ios->timing == MMC_TIMING_MMC_HS400 || 2358 ios->timing == MMC_TIMING_MMC_HS200 || 2359 ios->timing == MMC_TIMING_MMC_DDR52 || 2360 ios->timing == MMC_TIMING_UHS_SDR50 || 2361 ios->timing == MMC_TIMING_UHS_SDR104 || 2362 ios->timing == MMC_TIMING_UHS_DDR50 || 2363 ios->timing == MMC_TIMING_UHS_SDR25) 2364 ctrl |= SDHCI_CTRL_HISPD; 2365 else 2366 ctrl &= ~SDHCI_CTRL_HISPD; 2367 } 2368 2369 if (host->version >= SDHCI_SPEC_300) { 2370 u16 clk, ctrl_2; 2371 2372 if (!host->preset_enabled) { 2373 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2374 /* 2375 * We only need to set Driver Strength if the 2376 * preset value enable is not set. 2377 */ 2378 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2379 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; 2380 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) 2381 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; 2382 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B) 2383 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 2384 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) 2385 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; 2386 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D) 2387 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D; 2388 else { 2389 pr_warn("%s: invalid driver type, default to driver type B\n", 2390 mmc_hostname(mmc)); 2391 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 2392 } 2393 2394 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 2395 } else { 2396 /* 2397 * According to SDHC Spec v3.00, if the Preset Value 2398 * Enable in the Host Control 2 register is set, we 2399 * need to reset SD Clock Enable before changing High 2400 * Speed Enable to avoid generating clock gliches. 2401 */ 2402 2403 /* Reset SD Clock Enable */ 2404 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2405 clk &= ~SDHCI_CLOCK_CARD_EN; 2406 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2407 2408 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2409 2410 /* Re-enable SD Clock */ 2411 host->ops->set_clock(host, host->clock); 2412 } 2413 2414 /* Reset SD Clock Enable */ 2415 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2416 clk &= ~SDHCI_CLOCK_CARD_EN; 2417 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2418 2419 host->ops->set_uhs_signaling(host, ios->timing); 2420 host->timing = ios->timing; 2421 2422 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && 2423 ((ios->timing == MMC_TIMING_UHS_SDR12) || 2424 (ios->timing == MMC_TIMING_UHS_SDR25) || 2425 (ios->timing == MMC_TIMING_UHS_SDR50) || 2426 (ios->timing == MMC_TIMING_UHS_SDR104) || 2427 (ios->timing == MMC_TIMING_UHS_DDR50) || 2428 (ios->timing == MMC_TIMING_MMC_DDR52))) { 2429 u16 preset; 2430 2431 sdhci_enable_preset_value(host, true); 2432 preset = sdhci_get_preset_value(host); 2433 ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK, 2434 preset); 2435 } 2436 2437 /* Re-enable SD Clock */ 2438 host->ops->set_clock(host, host->clock); 2439 } else 2440 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2441 } 2442 EXPORT_SYMBOL_GPL(sdhci_set_ios); 2443 2444 static int sdhci_get_cd(struct mmc_host *mmc) 2445 { 2446 struct sdhci_host *host = mmc_priv(mmc); 2447 int gpio_cd = mmc_gpio_get_cd(mmc); 2448 2449 if (host->flags & SDHCI_DEVICE_DEAD) 2450 return 0; 2451 2452 /* If nonremovable, assume that the card is always present. */ 2453 if (!mmc_card_is_removable(mmc)) 2454 return 1; 2455 2456 /* 2457 * Try slot gpio detect, if defined it take precedence 2458 * over build in controller functionality 2459 */ 2460 if (gpio_cd >= 0) 2461 return !!gpio_cd; 2462 2463 /* If polling, assume that the card is always present. */ 2464 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 2465 return 1; 2466 2467 /* Host native card detect */ 2468 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 2469 } 2470 2471 int sdhci_get_cd_nogpio(struct mmc_host *mmc) 2472 { 2473 struct sdhci_host *host = mmc_priv(mmc); 2474 unsigned long flags; 2475 int ret = 0; 2476 2477 spin_lock_irqsave(&host->lock, flags); 2478 2479 if (host->flags & SDHCI_DEVICE_DEAD) 2480 goto out; 2481 2482 ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 2483 out: 2484 spin_unlock_irqrestore(&host->lock, flags); 2485 2486 return ret; 2487 } 2488 EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio); 2489 2490 static int sdhci_check_ro(struct sdhci_host *host) 2491 { 2492 unsigned long flags; 2493 int is_readonly; 2494 2495 spin_lock_irqsave(&host->lock, flags); 2496 2497 if (host->flags & SDHCI_DEVICE_DEAD) 2498 is_readonly = 0; 2499 else if (host->ops->get_ro) 2500 is_readonly = host->ops->get_ro(host); 2501 else if (mmc_can_gpio_ro(host->mmc)) 2502 is_readonly = mmc_gpio_get_ro(host->mmc); 2503 else 2504 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) 2505 & SDHCI_WRITE_PROTECT); 2506 2507 spin_unlock_irqrestore(&host->lock, flags); 2508 2509 /* This quirk needs to be replaced by a callback-function later */ 2510 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ? 2511 !is_readonly : is_readonly; 2512 } 2513 2514 #define SAMPLE_COUNT 5 2515 2516 static int sdhci_get_ro(struct mmc_host *mmc) 2517 { 2518 struct sdhci_host *host = mmc_priv(mmc); 2519 int i, ro_count; 2520 2521 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) 2522 return sdhci_check_ro(host); 2523 2524 ro_count = 0; 2525 for (i = 0; i < SAMPLE_COUNT; i++) { 2526 if (sdhci_check_ro(host)) { 2527 if (++ro_count > SAMPLE_COUNT / 2) 2528 return 1; 2529 } 2530 msleep(30); 2531 } 2532 return 0; 2533 } 2534 2535 static void sdhci_hw_reset(struct mmc_host *mmc) 2536 { 2537 struct sdhci_host *host = mmc_priv(mmc); 2538 2539 if (host->ops && host->ops->hw_reset) 2540 host->ops->hw_reset(host); 2541 } 2542 2543 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) 2544 { 2545 if (!(host->flags & SDHCI_DEVICE_DEAD)) { 2546 if (enable) 2547 host->ier |= SDHCI_INT_CARD_INT; 2548 else 2549 host->ier &= ~SDHCI_INT_CARD_INT; 2550 2551 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2552 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2553 } 2554 } 2555 2556 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 2557 { 2558 struct sdhci_host *host = mmc_priv(mmc); 2559 unsigned long flags; 2560 2561 if (enable) 2562 pm_runtime_get_noresume(mmc_dev(mmc)); 2563 2564 spin_lock_irqsave(&host->lock, flags); 2565 sdhci_enable_sdio_irq_nolock(host, enable); 2566 spin_unlock_irqrestore(&host->lock, flags); 2567 2568 if (!enable) 2569 pm_runtime_put_noidle(mmc_dev(mmc)); 2570 } 2571 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq); 2572 2573 static void sdhci_ack_sdio_irq(struct mmc_host *mmc) 2574 { 2575 struct sdhci_host *host = mmc_priv(mmc); 2576 unsigned long flags; 2577 2578 spin_lock_irqsave(&host->lock, flags); 2579 sdhci_enable_sdio_irq_nolock(host, true); 2580 spin_unlock_irqrestore(&host->lock, flags); 2581 } 2582 2583 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 2584 struct mmc_ios *ios) 2585 { 2586 struct sdhci_host *host = mmc_priv(mmc); 2587 u16 ctrl; 2588 int ret; 2589 2590 /* 2591 * Signal Voltage Switching is only applicable for Host Controllers 2592 * v3.00 and above. 2593 */ 2594 if (host->version < SDHCI_SPEC_300) 2595 return 0; 2596 2597 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2598 2599 switch (ios->signal_voltage) { 2600 case MMC_SIGNAL_VOLTAGE_330: 2601 if (!(host->flags & SDHCI_SIGNALING_330)) 2602 return -EINVAL; 2603 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 2604 ctrl &= ~SDHCI_CTRL_VDD_180; 2605 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2606 2607 if (!IS_ERR(mmc->supply.vqmmc)) { 2608 ret = mmc_regulator_set_vqmmc(mmc, ios); 2609 if (ret < 0) { 2610 pr_warn("%s: Switching to 3.3V signalling voltage failed\n", 2611 mmc_hostname(mmc)); 2612 return -EIO; 2613 } 2614 } 2615 /* Wait for 5ms */ 2616 usleep_range(5000, 5500); 2617 2618 /* 3.3V regulator output should be stable within 5 ms */ 2619 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2620 if (!(ctrl & SDHCI_CTRL_VDD_180)) 2621 return 0; 2622 2623 pr_warn("%s: 3.3V regulator output did not become stable\n", 2624 mmc_hostname(mmc)); 2625 2626 return -EAGAIN; 2627 case MMC_SIGNAL_VOLTAGE_180: 2628 if (!(host->flags & SDHCI_SIGNALING_180)) 2629 return -EINVAL; 2630 if (!IS_ERR(mmc->supply.vqmmc)) { 2631 ret = mmc_regulator_set_vqmmc(mmc, ios); 2632 if (ret < 0) { 2633 pr_warn("%s: Switching to 1.8V signalling voltage failed\n", 2634 mmc_hostname(mmc)); 2635 return -EIO; 2636 } 2637 } 2638 2639 /* 2640 * Enable 1.8V Signal Enable in the Host Control2 2641 * register 2642 */ 2643 ctrl |= SDHCI_CTRL_VDD_180; 2644 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2645 2646 /* Some controller need to do more when switching */ 2647 if (host->ops->voltage_switch) 2648 host->ops->voltage_switch(host); 2649 2650 /* 1.8V regulator output should be stable within 5 ms */ 2651 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2652 if (ctrl & SDHCI_CTRL_VDD_180) 2653 return 0; 2654 2655 pr_warn("%s: 1.8V regulator output did not become stable\n", 2656 mmc_hostname(mmc)); 2657 2658 return -EAGAIN; 2659 case MMC_SIGNAL_VOLTAGE_120: 2660 if (!(host->flags & SDHCI_SIGNALING_120)) 2661 return -EINVAL; 2662 if (!IS_ERR(mmc->supply.vqmmc)) { 2663 ret = mmc_regulator_set_vqmmc(mmc, ios); 2664 if (ret < 0) { 2665 pr_warn("%s: Switching to 1.2V signalling voltage failed\n", 2666 mmc_hostname(mmc)); 2667 return -EIO; 2668 } 2669 } 2670 return 0; 2671 default: 2672 /* No signal voltage switch required */ 2673 return 0; 2674 } 2675 } 2676 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch); 2677 2678 static int sdhci_card_busy(struct mmc_host *mmc) 2679 { 2680 struct sdhci_host *host = mmc_priv(mmc); 2681 u32 present_state; 2682 2683 /* Check whether DAT[0] is 0 */ 2684 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 2685 2686 return !(present_state & SDHCI_DATA_0_LVL_MASK); 2687 } 2688 2689 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 2690 { 2691 struct sdhci_host *host = mmc_priv(mmc); 2692 unsigned long flags; 2693 2694 spin_lock_irqsave(&host->lock, flags); 2695 host->flags |= SDHCI_HS400_TUNING; 2696 spin_unlock_irqrestore(&host->lock, flags); 2697 2698 return 0; 2699 } 2700 2701 void sdhci_start_tuning(struct sdhci_host *host) 2702 { 2703 u16 ctrl; 2704 2705 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2706 ctrl |= SDHCI_CTRL_EXEC_TUNING; 2707 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND) 2708 ctrl |= SDHCI_CTRL_TUNED_CLK; 2709 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2710 2711 /* 2712 * As per the Host Controller spec v3.00, tuning command 2713 * generates Buffer Read Ready interrupt, so enable that. 2714 * 2715 * Note: The spec clearly says that when tuning sequence 2716 * is being performed, the controller does not generate 2717 * interrupts other than Buffer Read Ready interrupt. But 2718 * to make sure we don't hit a controller bug, we _only_ 2719 * enable Buffer Read Ready interrupt here. 2720 */ 2721 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); 2722 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); 2723 } 2724 EXPORT_SYMBOL_GPL(sdhci_start_tuning); 2725 2726 void sdhci_end_tuning(struct sdhci_host *host) 2727 { 2728 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2729 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2730 } 2731 EXPORT_SYMBOL_GPL(sdhci_end_tuning); 2732 2733 void sdhci_reset_tuning(struct sdhci_host *host) 2734 { 2735 u16 ctrl; 2736 2737 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2738 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 2739 ctrl &= ~SDHCI_CTRL_EXEC_TUNING; 2740 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2741 } 2742 EXPORT_SYMBOL_GPL(sdhci_reset_tuning); 2743 2744 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode) 2745 { 2746 sdhci_reset_tuning(host); 2747 2748 sdhci_reset_for(host, TUNING_ABORT); 2749 2750 sdhci_end_tuning(host); 2751 2752 mmc_send_abort_tuning(host->mmc, opcode); 2753 } 2754 EXPORT_SYMBOL_GPL(sdhci_abort_tuning); 2755 2756 /* 2757 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI 2758 * tuning command does not have a data payload (or rather the hardware does it 2759 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command 2760 * interrupt setup is different to other commands and there is no timeout 2761 * interrupt so special handling is needed. 2762 */ 2763 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) 2764 { 2765 struct mmc_host *mmc = host->mmc; 2766 struct mmc_command cmd = {}; 2767 struct mmc_request mrq = {}; 2768 unsigned long flags; 2769 u32 b = host->sdma_boundary; 2770 2771 spin_lock_irqsave(&host->lock, flags); 2772 2773 cmd.opcode = opcode; 2774 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 2775 cmd.mrq = &mrq; 2776 2777 mrq.cmd = &cmd; 2778 /* 2779 * In response to CMD19, the card sends 64 bytes of tuning 2780 * block to the Host Controller. So we set the block size 2781 * to 64 here. 2782 */ 2783 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 && 2784 mmc->ios.bus_width == MMC_BUS_WIDTH_8) 2785 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE); 2786 else 2787 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE); 2788 2789 /* 2790 * The tuning block is sent by the card to the host controller. 2791 * So we set the TRNS_READ bit in the Transfer Mode register. 2792 * This also takes care of setting DMA Enable and Multi Block 2793 * Select in the same register to 0. 2794 */ 2795 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); 2796 2797 if (!sdhci_send_command_retry(host, &cmd, flags)) { 2798 spin_unlock_irqrestore(&host->lock, flags); 2799 host->tuning_done = 0; 2800 return; 2801 } 2802 2803 host->cmd = NULL; 2804 2805 sdhci_del_timer(host, &mrq); 2806 2807 host->tuning_done = 0; 2808 2809 spin_unlock_irqrestore(&host->lock, flags); 2810 2811 /* Wait for Buffer Read Ready interrupt */ 2812 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1), 2813 msecs_to_jiffies(50)); 2814 2815 } 2816 EXPORT_SYMBOL_GPL(sdhci_send_tuning); 2817 2818 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) 2819 { 2820 int i; 2821 2822 /* 2823 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number 2824 * of loops reaches tuning loop count. 2825 */ 2826 for (i = 0; i < host->tuning_loop_count; i++) { 2827 u16 ctrl; 2828 2829 sdhci_send_tuning(host, opcode); 2830 2831 if (!host->tuning_done) { 2832 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n", 2833 mmc_hostname(host->mmc)); 2834 sdhci_abort_tuning(host, opcode); 2835 return -ETIMEDOUT; 2836 } 2837 2838 /* Spec does not require a delay between tuning cycles */ 2839 if (host->tuning_delay > 0) 2840 mdelay(host->tuning_delay); 2841 2842 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2843 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { 2844 if (ctrl & SDHCI_CTRL_TUNED_CLK) 2845 return 0; /* Success! */ 2846 break; 2847 } 2848 2849 } 2850 2851 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", 2852 mmc_hostname(host->mmc)); 2853 sdhci_reset_tuning(host); 2854 return -EAGAIN; 2855 } 2856 2857 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 2858 { 2859 struct sdhci_host *host = mmc_priv(mmc); 2860 int err = 0; 2861 unsigned int tuning_count = 0; 2862 bool hs400_tuning; 2863 2864 hs400_tuning = host->flags & SDHCI_HS400_TUNING; 2865 2866 if (host->tuning_mode == SDHCI_TUNING_MODE_1) 2867 tuning_count = host->tuning_count; 2868 2869 /* 2870 * The Host Controller needs tuning in case of SDR104 and DDR50 2871 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in 2872 * the Capabilities register. 2873 * If the Host Controller supports the HS200 mode then the 2874 * tuning function has to be executed. 2875 */ 2876 switch (host->timing) { 2877 /* HS400 tuning is done in HS200 mode */ 2878 case MMC_TIMING_MMC_HS400: 2879 err = -EINVAL; 2880 goto out; 2881 2882 case MMC_TIMING_MMC_HS200: 2883 /* 2884 * Periodic re-tuning for HS400 is not expected to be needed, so 2885 * disable it here. 2886 */ 2887 if (hs400_tuning) 2888 tuning_count = 0; 2889 break; 2890 2891 case MMC_TIMING_UHS_SDR104: 2892 case MMC_TIMING_UHS_DDR50: 2893 break; 2894 2895 case MMC_TIMING_UHS_SDR50: 2896 if (host->flags & SDHCI_SDR50_NEEDS_TUNING) 2897 break; 2898 fallthrough; 2899 2900 default: 2901 goto out; 2902 } 2903 2904 if (host->ops->platform_execute_tuning) { 2905 err = host->ops->platform_execute_tuning(host, opcode); 2906 goto out; 2907 } 2908 2909 mmc->retune_period = tuning_count; 2910 2911 if (host->tuning_delay < 0) 2912 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK; 2913 2914 sdhci_start_tuning(host); 2915 2916 host->tuning_err = __sdhci_execute_tuning(host, opcode); 2917 2918 sdhci_end_tuning(host); 2919 out: 2920 host->flags &= ~SDHCI_HS400_TUNING; 2921 2922 return err; 2923 } 2924 EXPORT_SYMBOL_GPL(sdhci_execute_tuning); 2925 2926 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) 2927 { 2928 /* Host Controller v3.00 defines preset value registers */ 2929 if (host->version < SDHCI_SPEC_300) 2930 return; 2931 2932 /* 2933 * We only enable or disable Preset Value if they are not already 2934 * enabled or disabled respectively. Otherwise, we bail out. 2935 */ 2936 if (host->preset_enabled != enable) { 2937 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2938 2939 if (enable) 2940 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 2941 else 2942 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 2943 2944 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2945 2946 if (enable) 2947 host->flags |= SDHCI_PV_ENABLED; 2948 else 2949 host->flags &= ~SDHCI_PV_ENABLED; 2950 2951 host->preset_enabled = enable; 2952 } 2953 } 2954 2955 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 2956 int err) 2957 { 2958 struct mmc_data *data = mrq->data; 2959 2960 if (data->host_cookie != COOKIE_UNMAPPED) 2961 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, 2962 mmc_get_dma_dir(data)); 2963 2964 data->host_cookie = COOKIE_UNMAPPED; 2965 } 2966 2967 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 2968 { 2969 struct sdhci_host *host = mmc_priv(mmc); 2970 2971 mrq->data->host_cookie = COOKIE_UNMAPPED; 2972 2973 /* 2974 * No pre-mapping in the pre hook if we're using the bounce buffer, 2975 * for that we would need two bounce buffers since one buffer is 2976 * in flight when this is getting called. 2977 */ 2978 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer) 2979 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); 2980 } 2981 2982 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err) 2983 { 2984 if (host->data_cmd) { 2985 host->data_cmd->error = err; 2986 sdhci_finish_mrq(host, host->data_cmd->mrq); 2987 } 2988 2989 if (host->cmd) { 2990 host->cmd->error = err; 2991 sdhci_finish_mrq(host, host->cmd->mrq); 2992 } 2993 } 2994 2995 static void sdhci_card_event(struct mmc_host *mmc) 2996 { 2997 struct sdhci_host *host = mmc_priv(mmc); 2998 unsigned long flags; 2999 int present; 3000 3001 /* First check if client has provided their own card event */ 3002 if (host->ops->card_event) 3003 host->ops->card_event(host); 3004 3005 present = mmc->ops->get_cd(mmc); 3006 3007 spin_lock_irqsave(&host->lock, flags); 3008 3009 /* Check sdhci_has_requests() first in case we are runtime suspended */ 3010 if (sdhci_has_requests(host) && !present) { 3011 pr_err("%s: Card removed during transfer!\n", 3012 mmc_hostname(mmc)); 3013 pr_err("%s: Resetting controller.\n", 3014 mmc_hostname(mmc)); 3015 3016 sdhci_reset_for(host, CARD_REMOVED); 3017 3018 sdhci_error_out_mrqs(host, -ENOMEDIUM); 3019 } 3020 3021 spin_unlock_irqrestore(&host->lock, flags); 3022 } 3023 3024 static const struct mmc_host_ops sdhci_ops = { 3025 .request = sdhci_request, 3026 .post_req = sdhci_post_req, 3027 .pre_req = sdhci_pre_req, 3028 .set_ios = sdhci_set_ios, 3029 .get_cd = sdhci_get_cd, 3030 .get_ro = sdhci_get_ro, 3031 .card_hw_reset = sdhci_hw_reset, 3032 .enable_sdio_irq = sdhci_enable_sdio_irq, 3033 .ack_sdio_irq = sdhci_ack_sdio_irq, 3034 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 3035 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, 3036 .execute_tuning = sdhci_execute_tuning, 3037 .card_event = sdhci_card_event, 3038 .card_busy = sdhci_card_busy, 3039 }; 3040 3041 /*****************************************************************************\ 3042 * * 3043 * Request done * 3044 * * 3045 \*****************************************************************************/ 3046 3047 static bool sdhci_request_done(struct sdhci_host *host) 3048 { 3049 unsigned long flags; 3050 struct mmc_request *mrq; 3051 int i; 3052 3053 spin_lock_irqsave(&host->lock, flags); 3054 3055 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3056 mrq = host->mrqs_done[i]; 3057 if (mrq) 3058 break; 3059 } 3060 3061 if (!mrq) { 3062 spin_unlock_irqrestore(&host->lock, flags); 3063 return true; 3064 } 3065 3066 /* 3067 * The controller needs a reset of internal state machines 3068 * upon error conditions. 3069 */ 3070 if (sdhci_needs_reset(host, mrq)) { 3071 /* 3072 * Do not finish until command and data lines are available for 3073 * reset. Note there can only be one other mrq, so it cannot 3074 * also be in mrqs_done, otherwise host->cmd and host->data_cmd 3075 * would both be null. 3076 */ 3077 if (host->cmd || host->data_cmd) { 3078 spin_unlock_irqrestore(&host->lock, flags); 3079 return true; 3080 } 3081 3082 /* Some controllers need this kick or reset won't work here */ 3083 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 3084 /* This is to force an update */ 3085 host->ops->set_clock(host, host->clock); 3086 3087 sdhci_reset_for(host, REQUEST_ERROR); 3088 3089 host->pending_reset = false; 3090 } 3091 3092 /* 3093 * Always unmap the data buffers if they were mapped by 3094 * sdhci_prepare_data() whenever we finish with a request. 3095 * This avoids leaking DMA mappings on error. 3096 */ 3097 if (host->flags & SDHCI_REQ_USE_DMA) { 3098 struct mmc_data *data = mrq->data; 3099 3100 if (host->use_external_dma && data && 3101 (mrq->cmd->error || data->error)) { 3102 struct dma_chan *chan = sdhci_external_dma_channel(host, data); 3103 3104 host->mrqs_done[i] = NULL; 3105 spin_unlock_irqrestore(&host->lock, flags); 3106 dmaengine_terminate_sync(chan); 3107 spin_lock_irqsave(&host->lock, flags); 3108 sdhci_set_mrq_done(host, mrq); 3109 } 3110 3111 if (data && data->host_cookie == COOKIE_MAPPED) { 3112 if (host->bounce_buffer) { 3113 /* 3114 * On reads, copy the bounced data into the 3115 * sglist 3116 */ 3117 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { 3118 unsigned int length = data->bytes_xfered; 3119 3120 if (length > host->bounce_buffer_size) { 3121 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", 3122 mmc_hostname(host->mmc), 3123 host->bounce_buffer_size, 3124 data->bytes_xfered); 3125 /* Cap it down and continue */ 3126 length = host->bounce_buffer_size; 3127 } 3128 dma_sync_single_for_cpu( 3129 mmc_dev(host->mmc), 3130 host->bounce_addr, 3131 host->bounce_buffer_size, 3132 DMA_FROM_DEVICE); 3133 sg_copy_from_buffer(data->sg, 3134 data->sg_len, 3135 host->bounce_buffer, 3136 length); 3137 } else { 3138 /* No copying, just switch ownership */ 3139 dma_sync_single_for_cpu( 3140 mmc_dev(host->mmc), 3141 host->bounce_addr, 3142 host->bounce_buffer_size, 3143 mmc_get_dma_dir(data)); 3144 } 3145 } else { 3146 /* Unmap the raw data */ 3147 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 3148 data->sg_len, 3149 mmc_get_dma_dir(data)); 3150 } 3151 data->host_cookie = COOKIE_UNMAPPED; 3152 } 3153 } 3154 3155 host->mrqs_done[i] = NULL; 3156 3157 spin_unlock_irqrestore(&host->lock, flags); 3158 3159 if (host->ops->request_done) 3160 host->ops->request_done(host, mrq); 3161 else 3162 mmc_request_done(host->mmc, mrq); 3163 3164 return false; 3165 } 3166 3167 static void sdhci_complete_work(struct work_struct *work) 3168 { 3169 struct sdhci_host *host = container_of(work, struct sdhci_host, 3170 complete_work); 3171 3172 while (!sdhci_request_done(host)) 3173 ; 3174 } 3175 3176 static void sdhci_timeout_timer(struct timer_list *t) 3177 { 3178 struct sdhci_host *host; 3179 unsigned long flags; 3180 3181 host = from_timer(host, t, timer); 3182 3183 spin_lock_irqsave(&host->lock, flags); 3184 3185 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) { 3186 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n", 3187 mmc_hostname(host->mmc)); 3188 sdhci_err_stats_inc(host, REQ_TIMEOUT); 3189 sdhci_dumpregs(host); 3190 3191 host->cmd->error = -ETIMEDOUT; 3192 sdhci_finish_mrq(host, host->cmd->mrq); 3193 } 3194 3195 spin_unlock_irqrestore(&host->lock, flags); 3196 } 3197 3198 static void sdhci_timeout_data_timer(struct timer_list *t) 3199 { 3200 struct sdhci_host *host; 3201 unsigned long flags; 3202 3203 host = from_timer(host, t, data_timer); 3204 3205 spin_lock_irqsave(&host->lock, flags); 3206 3207 if (host->data || host->data_cmd || 3208 (host->cmd && sdhci_data_line_cmd(host->cmd))) { 3209 pr_err("%s: Timeout waiting for hardware interrupt.\n", 3210 mmc_hostname(host->mmc)); 3211 sdhci_err_stats_inc(host, REQ_TIMEOUT); 3212 sdhci_dumpregs(host); 3213 3214 if (host->data) { 3215 host->data->error = -ETIMEDOUT; 3216 __sdhci_finish_data(host, true); 3217 queue_work(host->complete_wq, &host->complete_work); 3218 } else if (host->data_cmd) { 3219 host->data_cmd->error = -ETIMEDOUT; 3220 sdhci_finish_mrq(host, host->data_cmd->mrq); 3221 } else { 3222 host->cmd->error = -ETIMEDOUT; 3223 sdhci_finish_mrq(host, host->cmd->mrq); 3224 } 3225 } 3226 3227 spin_unlock_irqrestore(&host->lock, flags); 3228 } 3229 3230 /*****************************************************************************\ 3231 * * 3232 * Interrupt handling * 3233 * * 3234 \*****************************************************************************/ 3235 3236 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p) 3237 { 3238 /* Handle auto-CMD12 error */ 3239 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) { 3240 struct mmc_request *mrq = host->data_cmd->mrq; 3241 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 3242 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 3243 SDHCI_INT_DATA_TIMEOUT : 3244 SDHCI_INT_DATA_CRC; 3245 3246 /* Treat auto-CMD12 error the same as data error */ 3247 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { 3248 *intmask_p |= data_err_bit; 3249 return; 3250 } 3251 } 3252 3253 if (!host->cmd) { 3254 /* 3255 * SDHCI recovers from errors by resetting the cmd and data 3256 * circuits. Until that is done, there very well might be more 3257 * interrupts, so ignore them in that case. 3258 */ 3259 if (host->pending_reset) 3260 return; 3261 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", 3262 mmc_hostname(host->mmc), (unsigned)intmask); 3263 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3264 sdhci_dumpregs(host); 3265 return; 3266 } 3267 3268 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC | 3269 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) { 3270 if (intmask & SDHCI_INT_TIMEOUT) { 3271 host->cmd->error = -ETIMEDOUT; 3272 sdhci_err_stats_inc(host, CMD_TIMEOUT); 3273 } else { 3274 host->cmd->error = -EILSEQ; 3275 if (!mmc_op_tuning(host->cmd->opcode)) 3276 sdhci_err_stats_inc(host, CMD_CRC); 3277 } 3278 /* Treat data command CRC error the same as data CRC error */ 3279 if (host->cmd->data && 3280 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) == 3281 SDHCI_INT_CRC) { 3282 host->cmd = NULL; 3283 *intmask_p |= SDHCI_INT_DATA_CRC; 3284 return; 3285 } 3286 3287 __sdhci_finish_mrq(host, host->cmd->mrq); 3288 return; 3289 } 3290 3291 /* Handle auto-CMD23 error */ 3292 if (intmask & SDHCI_INT_AUTO_CMD_ERR) { 3293 struct mmc_request *mrq = host->cmd->mrq; 3294 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 3295 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 3296 -ETIMEDOUT : 3297 -EILSEQ; 3298 3299 sdhci_err_stats_inc(host, AUTO_CMD); 3300 3301 if (sdhci_auto_cmd23(host, mrq)) { 3302 mrq->sbc->error = err; 3303 __sdhci_finish_mrq(host, mrq); 3304 return; 3305 } 3306 } 3307 3308 if (intmask & SDHCI_INT_RESPONSE) 3309 sdhci_finish_command(host); 3310 } 3311 3312 static void sdhci_adma_show_error(struct sdhci_host *host) 3313 { 3314 void *desc = host->adma_table; 3315 dma_addr_t dma = host->adma_addr; 3316 3317 sdhci_dumpregs(host); 3318 3319 while (true) { 3320 struct sdhci_adma2_64_desc *dma_desc = desc; 3321 3322 if (host->flags & SDHCI_USE_64_BIT_DMA) 3323 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", 3324 (unsigned long long)dma, 3325 le32_to_cpu(dma_desc->addr_hi), 3326 le32_to_cpu(dma_desc->addr_lo), 3327 le16_to_cpu(dma_desc->len), 3328 le16_to_cpu(dma_desc->cmd)); 3329 else 3330 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 3331 (unsigned long long)dma, 3332 le32_to_cpu(dma_desc->addr_lo), 3333 le16_to_cpu(dma_desc->len), 3334 le16_to_cpu(dma_desc->cmd)); 3335 3336 desc += host->desc_sz; 3337 dma += host->desc_sz; 3338 3339 if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) 3340 break; 3341 } 3342 } 3343 3344 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 3345 { 3346 u32 command; 3347 3348 /* 3349 * CMD19 generates _only_ Buffer Read Ready interrupt if 3350 * use sdhci_send_tuning. 3351 * Need to exclude this case: PIO mode and use mmc_send_tuning, 3352 * If not, sdhci_transfer_pio will never be called, make the 3353 * SDHCI_INT_DATA_AVAIL always there, stuck in irq storm. 3354 */ 3355 if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) { 3356 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); 3357 if (command == MMC_SEND_TUNING_BLOCK || 3358 command == MMC_SEND_TUNING_BLOCK_HS200) { 3359 host->tuning_done = 1; 3360 wake_up(&host->buf_ready_int); 3361 return; 3362 } 3363 } 3364 3365 if (!host->data) { 3366 struct mmc_command *data_cmd = host->data_cmd; 3367 3368 /* 3369 * The "data complete" interrupt is also used to 3370 * indicate that a busy state has ended. See comment 3371 * above in sdhci_cmd_irq(). 3372 */ 3373 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { 3374 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 3375 host->data_cmd = NULL; 3376 data_cmd->error = -ETIMEDOUT; 3377 sdhci_err_stats_inc(host, CMD_TIMEOUT); 3378 __sdhci_finish_mrq(host, data_cmd->mrq); 3379 return; 3380 } 3381 if (intmask & SDHCI_INT_DATA_END) { 3382 host->data_cmd = NULL; 3383 /* 3384 * Some cards handle busy-end interrupt 3385 * before the command completed, so make 3386 * sure we do things in the proper order. 3387 */ 3388 if (host->cmd == data_cmd) 3389 return; 3390 3391 __sdhci_finish_mrq(host, data_cmd->mrq); 3392 return; 3393 } 3394 } 3395 3396 /* 3397 * SDHCI recovers from errors by resetting the cmd and data 3398 * circuits. Until that is done, there very well might be more 3399 * interrupts, so ignore them in that case. 3400 */ 3401 if (host->pending_reset) 3402 return; 3403 3404 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", 3405 mmc_hostname(host->mmc), (unsigned)intmask); 3406 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3407 sdhci_dumpregs(host); 3408 3409 return; 3410 } 3411 3412 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 3413 host->data->error = -ETIMEDOUT; 3414 sdhci_err_stats_inc(host, DAT_TIMEOUT); 3415 } else if (intmask & SDHCI_INT_DATA_END_BIT) { 3416 host->data->error = -EILSEQ; 3417 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3418 sdhci_err_stats_inc(host, DAT_CRC); 3419 } else if ((intmask & SDHCI_INT_DATA_CRC) && 3420 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) 3421 != MMC_BUS_TEST_R) { 3422 host->data->error = -EILSEQ; 3423 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3424 sdhci_err_stats_inc(host, DAT_CRC); 3425 } else if (intmask & SDHCI_INT_ADMA_ERROR) { 3426 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc), 3427 intmask); 3428 sdhci_adma_show_error(host); 3429 sdhci_err_stats_inc(host, ADMA); 3430 host->data->error = -EIO; 3431 if (host->ops->adma_workaround) 3432 host->ops->adma_workaround(host, intmask); 3433 } 3434 3435 if (host->data->error) 3436 sdhci_finish_data(host); 3437 else { 3438 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 3439 sdhci_transfer_pio(host); 3440 3441 /* 3442 * We currently don't do anything fancy with DMA 3443 * boundaries, but as we can't disable the feature 3444 * we need to at least restart the transfer. 3445 * 3446 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) 3447 * should return a valid address to continue from, but as 3448 * some controllers are faulty, don't trust them. 3449 */ 3450 if (intmask & SDHCI_INT_DMA_END) { 3451 dma_addr_t dmastart, dmanow; 3452 3453 dmastart = sdhci_sdma_address(host); 3454 dmanow = dmastart + host->data->bytes_xfered; 3455 /* 3456 * Force update to the next DMA block boundary. 3457 */ 3458 dmanow = (dmanow & 3459 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + 3460 SDHCI_DEFAULT_BOUNDARY_SIZE; 3461 host->data->bytes_xfered = dmanow - dmastart; 3462 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n", 3463 &dmastart, host->data->bytes_xfered, &dmanow); 3464 sdhci_set_sdma_addr(host, dmanow); 3465 } 3466 3467 if (intmask & SDHCI_INT_DATA_END) { 3468 if (host->cmd == host->data_cmd) { 3469 /* 3470 * Data managed to finish before the 3471 * command completed. Make sure we do 3472 * things in the proper order. 3473 */ 3474 host->data_early = 1; 3475 } else { 3476 sdhci_finish_data(host); 3477 } 3478 } 3479 } 3480 } 3481 3482 static inline bool sdhci_defer_done(struct sdhci_host *host, 3483 struct mmc_request *mrq) 3484 { 3485 struct mmc_data *data = mrq->data; 3486 3487 return host->pending_reset || host->always_defer_done || 3488 ((host->flags & SDHCI_REQ_USE_DMA) && data && 3489 data->host_cookie == COOKIE_MAPPED); 3490 } 3491 3492 static irqreturn_t sdhci_irq(int irq, void *dev_id) 3493 { 3494 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0}; 3495 irqreturn_t result = IRQ_NONE; 3496 struct sdhci_host *host = dev_id; 3497 u32 intmask, mask, unexpected = 0; 3498 int max_loops = 16; 3499 int i; 3500 3501 spin_lock(&host->lock); 3502 3503 if (host->runtime_suspended) { 3504 spin_unlock(&host->lock); 3505 return IRQ_NONE; 3506 } 3507 3508 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3509 if (!intmask || intmask == 0xffffffff) { 3510 result = IRQ_NONE; 3511 goto out; 3512 } 3513 3514 do { 3515 DBG("IRQ status 0x%08x\n", intmask); 3516 3517 if (host->ops->irq) { 3518 intmask = host->ops->irq(host, intmask); 3519 if (!intmask) 3520 goto cont; 3521 } 3522 3523 /* Clear selected interrupts. */ 3524 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3525 SDHCI_INT_BUS_POWER); 3526 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3527 3528 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3529 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 3530 SDHCI_CARD_PRESENT; 3531 3532 /* 3533 * There is a observation on i.mx esdhc. INSERT 3534 * bit will be immediately set again when it gets 3535 * cleared, if a card is inserted. We have to mask 3536 * the irq to prevent interrupt storm which will 3537 * freeze the system. And the REMOVE gets the 3538 * same situation. 3539 * 3540 * More testing are needed here to ensure it works 3541 * for other platforms though. 3542 */ 3543 host->ier &= ~(SDHCI_INT_CARD_INSERT | 3544 SDHCI_INT_CARD_REMOVE); 3545 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 3546 SDHCI_INT_CARD_INSERT; 3547 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3548 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3549 3550 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 3551 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 3552 3553 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | 3554 SDHCI_INT_CARD_REMOVE); 3555 result = IRQ_WAKE_THREAD; 3556 } 3557 3558 if (intmask & SDHCI_INT_CMD_MASK) 3559 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask); 3560 3561 if (intmask & SDHCI_INT_DATA_MASK) 3562 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 3563 3564 if (intmask & SDHCI_INT_BUS_POWER) 3565 pr_err("%s: Card is consuming too much power!\n", 3566 mmc_hostname(host->mmc)); 3567 3568 if (intmask & SDHCI_INT_RETUNE) 3569 mmc_retune_needed(host->mmc); 3570 3571 if ((intmask & SDHCI_INT_CARD_INT) && 3572 (host->ier & SDHCI_INT_CARD_INT)) { 3573 sdhci_enable_sdio_irq_nolock(host, false); 3574 sdio_signal_irq(host->mmc); 3575 } 3576 3577 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | 3578 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3579 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | 3580 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT); 3581 3582 if (intmask) { 3583 unexpected |= intmask; 3584 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3585 } 3586 cont: 3587 if (result == IRQ_NONE) 3588 result = IRQ_HANDLED; 3589 3590 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3591 } while (intmask && --max_loops); 3592 3593 /* Determine if mrqs can be completed immediately */ 3594 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3595 struct mmc_request *mrq = host->mrqs_done[i]; 3596 3597 if (!mrq) 3598 continue; 3599 3600 if (sdhci_defer_done(host, mrq)) { 3601 result = IRQ_WAKE_THREAD; 3602 } else { 3603 mrqs_done[i] = mrq; 3604 host->mrqs_done[i] = NULL; 3605 } 3606 } 3607 out: 3608 if (host->deferred_cmd) 3609 result = IRQ_WAKE_THREAD; 3610 3611 spin_unlock(&host->lock); 3612 3613 /* Process mrqs ready for immediate completion */ 3614 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3615 if (!mrqs_done[i]) 3616 continue; 3617 3618 if (host->ops->request_done) 3619 host->ops->request_done(host, mrqs_done[i]); 3620 else 3621 mmc_request_done(host->mmc, mrqs_done[i]); 3622 } 3623 3624 if (unexpected) { 3625 pr_err("%s: Unexpected interrupt 0x%08x.\n", 3626 mmc_hostname(host->mmc), unexpected); 3627 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3628 sdhci_dumpregs(host); 3629 } 3630 3631 return result; 3632 } 3633 3634 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) 3635 { 3636 struct sdhci_host *host = dev_id; 3637 struct mmc_command *cmd; 3638 unsigned long flags; 3639 u32 isr; 3640 3641 while (!sdhci_request_done(host)) 3642 ; 3643 3644 spin_lock_irqsave(&host->lock, flags); 3645 3646 isr = host->thread_isr; 3647 host->thread_isr = 0; 3648 3649 cmd = host->deferred_cmd; 3650 if (cmd && !sdhci_send_command_retry(host, cmd, flags)) 3651 sdhci_finish_mrq(host, cmd->mrq); 3652 3653 spin_unlock_irqrestore(&host->lock, flags); 3654 3655 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3656 struct mmc_host *mmc = host->mmc; 3657 3658 mmc->ops->card_event(mmc); 3659 mmc_detect_change(mmc, msecs_to_jiffies(200)); 3660 } 3661 3662 return IRQ_HANDLED; 3663 } 3664 3665 /*****************************************************************************\ 3666 * * 3667 * Suspend/resume * 3668 * * 3669 \*****************************************************************************/ 3670 3671 #ifdef CONFIG_PM 3672 3673 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host) 3674 { 3675 return mmc_card_is_removable(host->mmc) && 3676 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3677 !mmc_can_gpio_cd(host->mmc); 3678 } 3679 3680 /* 3681 * To enable wakeup events, the corresponding events have to be enabled in 3682 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal 3683 * Table' in the SD Host Controller Standard Specification. 3684 * It is useless to restore SDHCI_INT_ENABLE state in 3685 * sdhci_disable_irq_wakeups() since it will be set by 3686 * sdhci_enable_card_detection() or sdhci_init(). 3687 */ 3688 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host) 3689 { 3690 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE | 3691 SDHCI_WAKE_ON_INT; 3692 u32 irq_val = 0; 3693 u8 wake_val = 0; 3694 u8 val; 3695 3696 if (sdhci_cd_irq_can_wakeup(host)) { 3697 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE; 3698 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE; 3699 } 3700 3701 if (mmc_card_wake_sdio_irq(host->mmc)) { 3702 wake_val |= SDHCI_WAKE_ON_INT; 3703 irq_val |= SDHCI_INT_CARD_INT; 3704 } 3705 3706 if (!irq_val) 3707 return false; 3708 3709 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3710 val &= ~mask; 3711 val |= wake_val; 3712 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3713 3714 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE); 3715 3716 host->irq_wake_enabled = !enable_irq_wake(host->irq); 3717 3718 return host->irq_wake_enabled; 3719 } 3720 3721 static void sdhci_disable_irq_wakeups(struct sdhci_host *host) 3722 { 3723 u8 val; 3724 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 3725 | SDHCI_WAKE_ON_INT; 3726 3727 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3728 val &= ~mask; 3729 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3730 3731 disable_irq_wake(host->irq); 3732 3733 host->irq_wake_enabled = false; 3734 } 3735 3736 int sdhci_suspend_host(struct sdhci_host *host) 3737 { 3738 sdhci_disable_card_detection(host); 3739 3740 mmc_retune_timer_stop(host->mmc); 3741 3742 if (!device_may_wakeup(mmc_dev(host->mmc)) || 3743 !sdhci_enable_irq_wakeups(host)) { 3744 host->ier = 0; 3745 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3746 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3747 free_irq(host->irq, host); 3748 } 3749 3750 return 0; 3751 } 3752 3753 EXPORT_SYMBOL_GPL(sdhci_suspend_host); 3754 3755 int sdhci_resume_host(struct sdhci_host *host) 3756 { 3757 struct mmc_host *mmc = host->mmc; 3758 int ret = 0; 3759 3760 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3761 if (host->ops->enable_dma) 3762 host->ops->enable_dma(host); 3763 } 3764 3765 if ((mmc->pm_flags & MMC_PM_KEEP_POWER) && 3766 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 3767 /* Card keeps power but host controller does not */ 3768 sdhci_init(host, 0); 3769 host->pwr = 0; 3770 host->clock = 0; 3771 mmc->ops->set_ios(mmc, &mmc->ios); 3772 } else { 3773 sdhci_init(host, (mmc->pm_flags & MMC_PM_KEEP_POWER)); 3774 } 3775 3776 if (host->irq_wake_enabled) { 3777 sdhci_disable_irq_wakeups(host); 3778 } else { 3779 ret = request_threaded_irq(host->irq, sdhci_irq, 3780 sdhci_thread_irq, IRQF_SHARED, 3781 mmc_hostname(mmc), host); 3782 if (ret) 3783 return ret; 3784 } 3785 3786 sdhci_enable_card_detection(host); 3787 3788 return ret; 3789 } 3790 3791 EXPORT_SYMBOL_GPL(sdhci_resume_host); 3792 3793 int sdhci_runtime_suspend_host(struct sdhci_host *host) 3794 { 3795 unsigned long flags; 3796 3797 mmc_retune_timer_stop(host->mmc); 3798 3799 spin_lock_irqsave(&host->lock, flags); 3800 host->ier &= SDHCI_INT_CARD_INT; 3801 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3802 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3803 spin_unlock_irqrestore(&host->lock, flags); 3804 3805 synchronize_hardirq(host->irq); 3806 3807 spin_lock_irqsave(&host->lock, flags); 3808 host->runtime_suspended = true; 3809 spin_unlock_irqrestore(&host->lock, flags); 3810 3811 return 0; 3812 } 3813 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); 3814 3815 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset) 3816 { 3817 struct mmc_host *mmc = host->mmc; 3818 unsigned long flags; 3819 int host_flags = host->flags; 3820 3821 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3822 if (host->ops->enable_dma) 3823 host->ops->enable_dma(host); 3824 } 3825 3826 sdhci_init(host, soft_reset); 3827 3828 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED && 3829 mmc->ios.power_mode != MMC_POWER_OFF) { 3830 /* Force clock and power re-program */ 3831 host->pwr = 0; 3832 host->clock = 0; 3833 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios); 3834 mmc->ops->set_ios(mmc, &mmc->ios); 3835 3836 if ((host_flags & SDHCI_PV_ENABLED) && 3837 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { 3838 spin_lock_irqsave(&host->lock, flags); 3839 sdhci_enable_preset_value(host, true); 3840 spin_unlock_irqrestore(&host->lock, flags); 3841 } 3842 3843 if ((mmc->caps2 & MMC_CAP2_HS400_ES) && 3844 mmc->ops->hs400_enhanced_strobe) 3845 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios); 3846 } 3847 3848 spin_lock_irqsave(&host->lock, flags); 3849 3850 host->runtime_suspended = false; 3851 3852 /* Enable SDIO IRQ */ 3853 if (sdio_irq_claimed(mmc)) 3854 sdhci_enable_sdio_irq_nolock(host, true); 3855 3856 /* Enable Card Detection */ 3857 sdhci_enable_card_detection(host); 3858 3859 spin_unlock_irqrestore(&host->lock, flags); 3860 3861 return 0; 3862 } 3863 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); 3864 3865 #endif /* CONFIG_PM */ 3866 3867 /*****************************************************************************\ 3868 * * 3869 * Command Queue Engine (CQE) helpers * 3870 * * 3871 \*****************************************************************************/ 3872 3873 void sdhci_cqe_enable(struct mmc_host *mmc) 3874 { 3875 struct sdhci_host *host = mmc_priv(mmc); 3876 unsigned long flags; 3877 u8 ctrl; 3878 3879 spin_lock_irqsave(&host->lock, flags); 3880 3881 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 3882 ctrl &= ~SDHCI_CTRL_DMA_MASK; 3883 /* 3884 * Host from V4.10 supports ADMA3 DMA type. 3885 * ADMA3 performs integrated descriptor which is more suitable 3886 * for cmd queuing to fetch both command and transfer descriptors. 3887 */ 3888 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3)) 3889 ctrl |= SDHCI_CTRL_ADMA3; 3890 else if (host->flags & SDHCI_USE_64_BIT_DMA) 3891 ctrl |= SDHCI_CTRL_ADMA64; 3892 else 3893 ctrl |= SDHCI_CTRL_ADMA32; 3894 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 3895 3896 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512), 3897 SDHCI_BLOCK_SIZE); 3898 3899 /* Set maximum timeout */ 3900 sdhci_set_timeout(host, NULL); 3901 3902 host->ier = host->cqe_ier; 3903 3904 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3905 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3906 3907 host->cqe_on = true; 3908 3909 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n", 3910 mmc_hostname(mmc), host->ier, 3911 sdhci_readl(host, SDHCI_INT_STATUS)); 3912 3913 spin_unlock_irqrestore(&host->lock, flags); 3914 } 3915 EXPORT_SYMBOL_GPL(sdhci_cqe_enable); 3916 3917 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery) 3918 { 3919 struct sdhci_host *host = mmc_priv(mmc); 3920 unsigned long flags; 3921 3922 spin_lock_irqsave(&host->lock, flags); 3923 3924 sdhci_set_default_irqs(host); 3925 3926 host->cqe_on = false; 3927 3928 if (recovery) 3929 sdhci_reset_for(host, CQE_RECOVERY); 3930 3931 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n", 3932 mmc_hostname(mmc), host->ier, 3933 sdhci_readl(host, SDHCI_INT_STATUS)); 3934 3935 spin_unlock_irqrestore(&host->lock, flags); 3936 } 3937 EXPORT_SYMBOL_GPL(sdhci_cqe_disable); 3938 3939 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error, 3940 int *data_error) 3941 { 3942 u32 mask; 3943 3944 if (!host->cqe_on) 3945 return false; 3946 3947 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) { 3948 *cmd_error = -EILSEQ; 3949 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3950 sdhci_err_stats_inc(host, CMD_CRC); 3951 } else if (intmask & SDHCI_INT_TIMEOUT) { 3952 *cmd_error = -ETIMEDOUT; 3953 sdhci_err_stats_inc(host, CMD_TIMEOUT); 3954 } else 3955 *cmd_error = 0; 3956 3957 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) { 3958 *data_error = -EILSEQ; 3959 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3960 sdhci_err_stats_inc(host, DAT_CRC); 3961 } else if (intmask & SDHCI_INT_DATA_TIMEOUT) { 3962 *data_error = -ETIMEDOUT; 3963 sdhci_err_stats_inc(host, DAT_TIMEOUT); 3964 } else if (intmask & SDHCI_INT_ADMA_ERROR) { 3965 *data_error = -EIO; 3966 sdhci_err_stats_inc(host, ADMA); 3967 } else 3968 *data_error = 0; 3969 3970 /* Clear selected interrupts. */ 3971 mask = intmask & host->cqe_ier; 3972 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3973 3974 if (intmask & SDHCI_INT_BUS_POWER) 3975 pr_err("%s: Card is consuming too much power!\n", 3976 mmc_hostname(host->mmc)); 3977 3978 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR); 3979 if (intmask) { 3980 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3981 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n", 3982 mmc_hostname(host->mmc), intmask); 3983 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3984 sdhci_dumpregs(host); 3985 } 3986 3987 return true; 3988 } 3989 EXPORT_SYMBOL_GPL(sdhci_cqe_irq); 3990 3991 /*****************************************************************************\ 3992 * * 3993 * Device allocation/registration * 3994 * * 3995 \*****************************************************************************/ 3996 3997 struct sdhci_host *sdhci_alloc_host(struct device *dev, 3998 size_t priv_size) 3999 { 4000 struct mmc_host *mmc; 4001 struct sdhci_host *host; 4002 4003 WARN_ON(dev == NULL); 4004 4005 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); 4006 if (!mmc) 4007 return ERR_PTR(-ENOMEM); 4008 4009 host = mmc_priv(mmc); 4010 host->mmc = mmc; 4011 host->mmc_host_ops = sdhci_ops; 4012 mmc->ops = &host->mmc_host_ops; 4013 4014 host->flags = SDHCI_SIGNALING_330; 4015 4016 host->cqe_ier = SDHCI_CQE_INT_MASK; 4017 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK; 4018 4019 host->tuning_delay = -1; 4020 host->tuning_loop_count = MAX_TUNING_LOOP; 4021 4022 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG; 4023 4024 /* 4025 * The DMA table descriptor count is calculated as the maximum 4026 * number of segments times 2, to allow for an alignment 4027 * descriptor for each segment, plus 1 for a nop end descriptor. 4028 */ 4029 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1; 4030 host->max_adma = 65536; 4031 4032 host->max_timeout_count = 0xE; 4033 4034 return host; 4035 } 4036 4037 EXPORT_SYMBOL_GPL(sdhci_alloc_host); 4038 4039 static int sdhci_set_dma_mask(struct sdhci_host *host) 4040 { 4041 struct mmc_host *mmc = host->mmc; 4042 struct device *dev = mmc_dev(mmc); 4043 int ret = -EINVAL; 4044 4045 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) 4046 host->flags &= ~SDHCI_USE_64_BIT_DMA; 4047 4048 /* Try 64-bit mask if hardware is capable of it */ 4049 if (host->flags & SDHCI_USE_64_BIT_DMA) { 4050 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 4051 if (ret) { 4052 pr_warn("%s: Failed to set 64-bit DMA mask.\n", 4053 mmc_hostname(mmc)); 4054 host->flags &= ~SDHCI_USE_64_BIT_DMA; 4055 } 4056 } 4057 4058 /* 32-bit mask as default & fallback */ 4059 if (ret) { 4060 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 4061 if (ret) 4062 pr_warn("%s: Failed to set 32-bit DMA mask.\n", 4063 mmc_hostname(mmc)); 4064 } 4065 4066 return ret; 4067 } 4068 4069 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver, 4070 const u32 *caps, const u32 *caps1) 4071 { 4072 u16 v; 4073 u64 dt_caps_mask = 0; 4074 u64 dt_caps = 0; 4075 4076 if (host->read_caps) 4077 return; 4078 4079 host->read_caps = true; 4080 4081 if (debug_quirks) 4082 host->quirks = debug_quirks; 4083 4084 if (debug_quirks2) 4085 host->quirks2 = debug_quirks2; 4086 4087 sdhci_reset_for_all(host); 4088 4089 if (host->v4_mode) 4090 sdhci_do_enable_v4_mode(host); 4091 4092 device_property_read_u64(mmc_dev(host->mmc), 4093 "sdhci-caps-mask", &dt_caps_mask); 4094 device_property_read_u64(mmc_dev(host->mmc), 4095 "sdhci-caps", &dt_caps); 4096 4097 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION); 4098 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; 4099 4100 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS) 4101 return; 4102 4103 if (caps) { 4104 host->caps = *caps; 4105 } else { 4106 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); 4107 host->caps &= ~lower_32_bits(dt_caps_mask); 4108 host->caps |= lower_32_bits(dt_caps); 4109 } 4110 4111 if (host->version < SDHCI_SPEC_300) 4112 return; 4113 4114 if (caps1) { 4115 host->caps1 = *caps1; 4116 } else { 4117 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); 4118 host->caps1 &= ~upper_32_bits(dt_caps_mask); 4119 host->caps1 |= upper_32_bits(dt_caps); 4120 } 4121 } 4122 EXPORT_SYMBOL_GPL(__sdhci_read_caps); 4123 4124 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host) 4125 { 4126 struct mmc_host *mmc = host->mmc; 4127 unsigned int max_blocks; 4128 unsigned int bounce_size; 4129 int ret; 4130 4131 /* 4132 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer 4133 * has diminishing returns, this is probably because SD/MMC 4134 * cards are usually optimized to handle this size of requests. 4135 */ 4136 bounce_size = SZ_64K; 4137 /* 4138 * Adjust downwards to maximum request size if this is less 4139 * than our segment size, else hammer down the maximum 4140 * request size to the maximum buffer size. 4141 */ 4142 if (mmc->max_req_size < bounce_size) 4143 bounce_size = mmc->max_req_size; 4144 max_blocks = bounce_size / 512; 4145 4146 /* 4147 * When we just support one segment, we can get significant 4148 * speedups by the help of a bounce buffer to group scattered 4149 * reads/writes together. 4150 */ 4151 host->bounce_buffer = devm_kmalloc(mmc_dev(mmc), 4152 bounce_size, 4153 GFP_KERNEL); 4154 if (!host->bounce_buffer) { 4155 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n", 4156 mmc_hostname(mmc), 4157 bounce_size); 4158 /* 4159 * Exiting with zero here makes sure we proceed with 4160 * mmc->max_segs == 1. 4161 */ 4162 return; 4163 } 4164 4165 host->bounce_addr = dma_map_single(mmc_dev(mmc), 4166 host->bounce_buffer, 4167 bounce_size, 4168 DMA_BIDIRECTIONAL); 4169 ret = dma_mapping_error(mmc_dev(mmc), host->bounce_addr); 4170 if (ret) { 4171 devm_kfree(mmc_dev(mmc), host->bounce_buffer); 4172 host->bounce_buffer = NULL; 4173 /* Again fall back to max_segs == 1 */ 4174 return; 4175 } 4176 4177 host->bounce_buffer_size = bounce_size; 4178 4179 /* Lie about this since we're bouncing */ 4180 mmc->max_segs = max_blocks; 4181 mmc->max_seg_size = bounce_size; 4182 mmc->max_req_size = bounce_size; 4183 4184 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n", 4185 mmc_hostname(mmc), max_blocks, bounce_size); 4186 } 4187 4188 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host) 4189 { 4190 /* 4191 * According to SD Host Controller spec v4.10, bit[27] added from 4192 * version 4.10 in Capabilities Register is used as 64-bit System 4193 * Address support for V4 mode. 4194 */ 4195 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) 4196 return host->caps & SDHCI_CAN_64BIT_V4; 4197 4198 return host->caps & SDHCI_CAN_64BIT; 4199 } 4200 4201 int sdhci_setup_host(struct sdhci_host *host) 4202 { 4203 struct mmc_host *mmc; 4204 u32 max_current_caps; 4205 unsigned int ocr_avail; 4206 unsigned int override_timeout_clk; 4207 u32 max_clk; 4208 int ret = 0; 4209 bool enable_vqmmc = false; 4210 4211 WARN_ON(host == NULL); 4212 if (host == NULL) 4213 return -EINVAL; 4214 4215 mmc = host->mmc; 4216 4217 /* 4218 * If there are external regulators, get them. Note this must be done 4219 * early before resetting the host and reading the capabilities so that 4220 * the host can take the appropriate action if regulators are not 4221 * available. 4222 */ 4223 if (!mmc->supply.vqmmc) { 4224 ret = mmc_regulator_get_supply(mmc); 4225 if (ret) 4226 return ret; 4227 enable_vqmmc = true; 4228 } 4229 4230 DBG("Version: 0x%08x | Present: 0x%08x\n", 4231 sdhci_readw(host, SDHCI_HOST_VERSION), 4232 sdhci_readl(host, SDHCI_PRESENT_STATE)); 4233 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n", 4234 sdhci_readl(host, SDHCI_CAPABILITIES), 4235 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 4236 4237 sdhci_read_caps(host); 4238 4239 override_timeout_clk = host->timeout_clk; 4240 4241 if (host->version > SDHCI_SPEC_420) { 4242 pr_err("%s: Unknown controller version (%d). You may experience problems.\n", 4243 mmc_hostname(mmc), host->version); 4244 } 4245 4246 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 4247 host->flags |= SDHCI_USE_SDMA; 4248 else if (!(host->caps & SDHCI_CAN_DO_SDMA)) 4249 DBG("Controller doesn't have SDMA capability\n"); 4250 else 4251 host->flags |= SDHCI_USE_SDMA; 4252 4253 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 4254 (host->flags & SDHCI_USE_SDMA)) { 4255 DBG("Disabling DMA as it is marked broken\n"); 4256 host->flags &= ~SDHCI_USE_SDMA; 4257 } 4258 4259 if ((host->version >= SDHCI_SPEC_200) && 4260 (host->caps & SDHCI_CAN_DO_ADMA2)) 4261 host->flags |= SDHCI_USE_ADMA; 4262 4263 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 4264 (host->flags & SDHCI_USE_ADMA)) { 4265 DBG("Disabling ADMA as it is marked broken\n"); 4266 host->flags &= ~SDHCI_USE_ADMA; 4267 } 4268 4269 if (sdhci_can_64bit_dma(host)) 4270 host->flags |= SDHCI_USE_64_BIT_DMA; 4271 4272 if (host->use_external_dma) { 4273 ret = sdhci_external_dma_init(host); 4274 if (ret == -EPROBE_DEFER) 4275 goto unreg; 4276 /* 4277 * Fall back to use the DMA/PIO integrated in standard SDHCI 4278 * instead of external DMA devices. 4279 */ 4280 else if (ret) 4281 sdhci_switch_external_dma(host, false); 4282 /* Disable internal DMA sources */ 4283 else 4284 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 4285 } 4286 4287 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 4288 if (host->ops->set_dma_mask) 4289 ret = host->ops->set_dma_mask(host); 4290 else 4291 ret = sdhci_set_dma_mask(host); 4292 4293 if (!ret && host->ops->enable_dma) 4294 ret = host->ops->enable_dma(host); 4295 4296 if (ret) { 4297 pr_warn("%s: No suitable DMA available - falling back to PIO\n", 4298 mmc_hostname(mmc)); 4299 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 4300 4301 ret = 0; 4302 } 4303 } 4304 4305 /* SDMA does not support 64-bit DMA if v4 mode not set */ 4306 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode) 4307 host->flags &= ~SDHCI_USE_SDMA; 4308 4309 if (host->flags & SDHCI_USE_ADMA) { 4310 dma_addr_t dma; 4311 void *buf; 4312 4313 if (!(host->flags & SDHCI_USE_64_BIT_DMA)) 4314 host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ; 4315 else if (!host->alloc_desc_sz) 4316 host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); 4317 4318 host->desc_sz = host->alloc_desc_sz; 4319 host->adma_table_sz = host->adma_table_cnt * host->desc_sz; 4320 4321 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; 4322 /* 4323 * Use zalloc to zero the reserved high 32-bits of 128-bit 4324 * descriptors so that they never need to be written. 4325 */ 4326 buf = dma_alloc_coherent(mmc_dev(mmc), 4327 host->align_buffer_sz + host->adma_table_sz, 4328 &dma, GFP_KERNEL); 4329 if (!buf) { 4330 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 4331 mmc_hostname(mmc)); 4332 host->flags &= ~SDHCI_USE_ADMA; 4333 } else if ((dma + host->align_buffer_sz) & 4334 (SDHCI_ADMA2_DESC_ALIGN - 1)) { 4335 pr_warn("%s: unable to allocate aligned ADMA descriptor\n", 4336 mmc_hostname(mmc)); 4337 host->flags &= ~SDHCI_USE_ADMA; 4338 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4339 host->adma_table_sz, buf, dma); 4340 } else { 4341 host->align_buffer = buf; 4342 host->align_addr = dma; 4343 4344 host->adma_table = buf + host->align_buffer_sz; 4345 host->adma_addr = dma + host->align_buffer_sz; 4346 } 4347 } 4348 4349 /* 4350 * If we use DMA, then it's up to the caller to set the DMA 4351 * mask, but PIO does not need the hw shim so we set a new 4352 * mask here in that case. 4353 */ 4354 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { 4355 host->dma_mask = DMA_BIT_MASK(64); 4356 mmc_dev(mmc)->dma_mask = &host->dma_mask; 4357 } 4358 4359 if (host->version >= SDHCI_SPEC_300) 4360 host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps); 4361 else 4362 host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps); 4363 4364 host->max_clk *= 1000000; 4365 if (host->max_clk == 0 || host->quirks & 4366 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 4367 if (!host->ops->get_max_clock) { 4368 pr_err("%s: Hardware doesn't specify base clock frequency.\n", 4369 mmc_hostname(mmc)); 4370 ret = -ENODEV; 4371 goto undma; 4372 } 4373 host->max_clk = host->ops->get_max_clock(host); 4374 } 4375 4376 /* 4377 * In case of Host Controller v3.00, find out whether clock 4378 * multiplier is supported. 4379 */ 4380 host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1); 4381 4382 /* 4383 * In case the value in Clock Multiplier is 0, then programmable 4384 * clock mode is not supported, otherwise the actual clock 4385 * multiplier is one more than the value of Clock Multiplier 4386 * in the Capabilities Register. 4387 */ 4388 if (host->clk_mul) 4389 host->clk_mul += 1; 4390 4391 /* 4392 * Set host parameters. 4393 */ 4394 max_clk = host->max_clk; 4395 4396 if (host->ops->get_min_clock) 4397 mmc->f_min = host->ops->get_min_clock(host); 4398 else if (host->version >= SDHCI_SPEC_300) { 4399 if (host->clk_mul) 4400 max_clk = host->max_clk * host->clk_mul; 4401 /* 4402 * Divided Clock Mode minimum clock rate is always less than 4403 * Programmable Clock Mode minimum clock rate. 4404 */ 4405 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 4406 } else 4407 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 4408 4409 if (!mmc->f_max || mmc->f_max > max_clk) 4410 mmc->f_max = max_clk; 4411 4412 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 4413 host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps); 4414 4415 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT) 4416 host->timeout_clk *= 1000; 4417 4418 if (host->timeout_clk == 0) { 4419 if (!host->ops->get_timeout_clock) { 4420 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n", 4421 mmc_hostname(mmc)); 4422 ret = -ENODEV; 4423 goto undma; 4424 } 4425 4426 host->timeout_clk = 4427 DIV_ROUND_UP(host->ops->get_timeout_clock(host), 4428 1000); 4429 } 4430 4431 if (override_timeout_clk) 4432 host->timeout_clk = override_timeout_clk; 4433 4434 mmc->max_busy_timeout = host->ops->get_max_timeout_count ? 4435 host->ops->get_max_timeout_count(host) : 1 << 27; 4436 mmc->max_busy_timeout /= host->timeout_clk; 4437 } 4438 4439 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT && 4440 !host->ops->get_max_timeout_count) 4441 mmc->max_busy_timeout = 0; 4442 4443 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23; 4444 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 4445 4446 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 4447 host->flags |= SDHCI_AUTO_CMD12; 4448 4449 /* 4450 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO. 4451 * For v4 mode, SDMA may use Auto-CMD23 as well. 4452 */ 4453 if ((host->version >= SDHCI_SPEC_300) && 4454 ((host->flags & SDHCI_USE_ADMA) || 4455 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) && 4456 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { 4457 host->flags |= SDHCI_AUTO_CMD23; 4458 DBG("Auto-CMD23 available\n"); 4459 } else { 4460 DBG("Auto-CMD23 unavailable\n"); 4461 } 4462 4463 /* 4464 * A controller may support 8-bit width, but the board itself 4465 * might not have the pins brought out. Boards that support 4466 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in 4467 * their platform code before calling sdhci_add_host(), and we 4468 * won't assume 8-bit width for hosts without that CAP. 4469 */ 4470 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 4471 mmc->caps |= MMC_CAP_4_BIT_DATA; 4472 4473 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) 4474 mmc->caps &= ~MMC_CAP_CMD23; 4475 4476 if (host->caps & SDHCI_CAN_DO_HISPD) 4477 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 4478 4479 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 4480 mmc_card_is_removable(mmc) && 4481 mmc_gpio_get_cd(mmc) < 0) 4482 mmc->caps |= MMC_CAP_NEEDS_POLL; 4483 4484 if (!IS_ERR(mmc->supply.vqmmc)) { 4485 if (enable_vqmmc) { 4486 ret = regulator_enable(mmc->supply.vqmmc); 4487 host->sdhci_core_to_disable_vqmmc = !ret; 4488 } 4489 4490 /* If vqmmc provides no 1.8V signalling, then there's no UHS */ 4491 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, 4492 1950000)) 4493 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | 4494 SDHCI_SUPPORT_SDR50 | 4495 SDHCI_SUPPORT_DDR50); 4496 4497 /* In eMMC case vqmmc might be a fixed 1.8V regulator */ 4498 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000, 4499 3600000)) 4500 host->flags &= ~SDHCI_SIGNALING_330; 4501 4502 if (ret) { 4503 pr_warn("%s: Failed to enable vqmmc regulator: %d\n", 4504 mmc_hostname(mmc), ret); 4505 mmc->supply.vqmmc = ERR_PTR(-EINVAL); 4506 } 4507 4508 } 4509 4510 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) { 4511 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4512 SDHCI_SUPPORT_DDR50); 4513 /* 4514 * The SDHCI controller in a SoC might support HS200/HS400 4515 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property), 4516 * but if the board is modeled such that the IO lines are not 4517 * connected to 1.8v then HS200/HS400 cannot be supported. 4518 * Disable HS200/HS400 if the board does not have 1.8v connected 4519 * to the IO lines. (Applicable for other modes in 1.8v) 4520 */ 4521 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES); 4522 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS); 4523 } 4524 4525 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ 4526 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4527 SDHCI_SUPPORT_DDR50)) 4528 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; 4529 4530 /* SDR104 supports also implies SDR50 support */ 4531 if (host->caps1 & SDHCI_SUPPORT_SDR104) { 4532 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; 4533 /* SD3.0: SDR104 is supported so (for eMMC) the caps2 4534 * field can be promoted to support HS200. 4535 */ 4536 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) 4537 mmc->caps2 |= MMC_CAP2_HS200; 4538 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) { 4539 mmc->caps |= MMC_CAP_UHS_SDR50; 4540 } 4541 4542 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && 4543 (host->caps1 & SDHCI_SUPPORT_HS400)) 4544 mmc->caps2 |= MMC_CAP2_HS400; 4545 4546 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && 4547 (IS_ERR(mmc->supply.vqmmc) || 4548 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000, 4549 1300000))) 4550 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V; 4551 4552 if ((host->caps1 & SDHCI_SUPPORT_DDR50) && 4553 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) 4554 mmc->caps |= MMC_CAP_UHS_DDR50; 4555 4556 /* Does the host need tuning for SDR50? */ 4557 if (host->caps1 & SDHCI_USE_SDR50_TUNING) 4558 host->flags |= SDHCI_SDR50_NEEDS_TUNING; 4559 4560 /* Driver Type(s) (A, C, D) supported by the host */ 4561 if (host->caps1 & SDHCI_DRIVER_TYPE_A) 4562 mmc->caps |= MMC_CAP_DRIVER_TYPE_A; 4563 if (host->caps1 & SDHCI_DRIVER_TYPE_C) 4564 mmc->caps |= MMC_CAP_DRIVER_TYPE_C; 4565 if (host->caps1 & SDHCI_DRIVER_TYPE_D) 4566 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 4567 4568 /* Initial value for re-tuning timer count */ 4569 host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK, 4570 host->caps1); 4571 4572 /* 4573 * In case Re-tuning Timer is not disabled, the actual value of 4574 * re-tuning timer will be 2 ^ (n - 1). 4575 */ 4576 if (host->tuning_count) 4577 host->tuning_count = 1 << (host->tuning_count - 1); 4578 4579 /* Re-tuning mode supported by the Host Controller */ 4580 host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1); 4581 4582 ocr_avail = 0; 4583 4584 /* 4585 * According to SD Host Controller spec v3.00, if the Host System 4586 * can afford more than 150mA, Host Driver should set XPC to 1. Also 4587 * the value is meaningful only if Voltage Support in the Capabilities 4588 * register is set. The actual current value is 4 times the register 4589 * value. 4590 */ 4591 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); 4592 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) { 4593 int curr = regulator_get_current_limit(mmc->supply.vmmc); 4594 if (curr > 0) { 4595 4596 /* convert to SDHCI_MAX_CURRENT format */ 4597 curr = curr/1000; /* convert to mA */ 4598 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; 4599 4600 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); 4601 max_current_caps = 4602 FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) | 4603 FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) | 4604 FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr); 4605 } 4606 } 4607 4608 if (host->caps & SDHCI_CAN_VDD_330) { 4609 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 4610 4611 mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK, 4612 max_current_caps) * 4613 SDHCI_MAX_CURRENT_MULTIPLIER; 4614 } 4615 if (host->caps & SDHCI_CAN_VDD_300) { 4616 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 4617 4618 mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK, 4619 max_current_caps) * 4620 SDHCI_MAX_CURRENT_MULTIPLIER; 4621 } 4622 if (host->caps & SDHCI_CAN_VDD_180) { 4623 ocr_avail |= MMC_VDD_165_195; 4624 4625 mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK, 4626 max_current_caps) * 4627 SDHCI_MAX_CURRENT_MULTIPLIER; 4628 } 4629 4630 /* If OCR set by host, use it instead. */ 4631 if (host->ocr_mask) 4632 ocr_avail = host->ocr_mask; 4633 4634 /* If OCR set by external regulators, give it highest prio. */ 4635 if (mmc->ocr_avail) 4636 ocr_avail = mmc->ocr_avail; 4637 4638 mmc->ocr_avail = ocr_avail; 4639 mmc->ocr_avail_sdio = ocr_avail; 4640 if (host->ocr_avail_sdio) 4641 mmc->ocr_avail_sdio &= host->ocr_avail_sdio; 4642 mmc->ocr_avail_sd = ocr_avail; 4643 if (host->ocr_avail_sd) 4644 mmc->ocr_avail_sd &= host->ocr_avail_sd; 4645 else /* normal SD controllers don't support 1.8V */ 4646 mmc->ocr_avail_sd &= ~MMC_VDD_165_195; 4647 mmc->ocr_avail_mmc = ocr_avail; 4648 if (host->ocr_avail_mmc) 4649 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; 4650 4651 if (mmc->ocr_avail == 0) { 4652 pr_err("%s: Hardware doesn't report any support voltages.\n", 4653 mmc_hostname(mmc)); 4654 ret = -ENODEV; 4655 goto unreg; 4656 } 4657 4658 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 4659 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | 4660 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) || 4661 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) 4662 host->flags |= SDHCI_SIGNALING_180; 4663 4664 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V) 4665 host->flags |= SDHCI_SIGNALING_120; 4666 4667 spin_lock_init(&host->lock); 4668 4669 /* 4670 * Maximum number of sectors in one transfer. Limited by SDMA boundary 4671 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this 4672 * is less anyway. 4673 */ 4674 mmc->max_req_size = 524288; 4675 4676 /* 4677 * Maximum number of segments. Depends on if the hardware 4678 * can do scatter/gather or not. 4679 */ 4680 if (host->flags & SDHCI_USE_ADMA) { 4681 mmc->max_segs = SDHCI_MAX_SEGS; 4682 } else if (host->flags & SDHCI_USE_SDMA) { 4683 mmc->max_segs = 1; 4684 mmc->max_req_size = min_t(size_t, mmc->max_req_size, 4685 dma_max_mapping_size(mmc_dev(mmc))); 4686 } else { /* PIO */ 4687 mmc->max_segs = SDHCI_MAX_SEGS; 4688 } 4689 4690 /* 4691 * Maximum segment size. Could be one segment with the maximum number 4692 * of bytes. When doing hardware scatter/gather, each entry cannot 4693 * be larger than 64 KiB though. 4694 */ 4695 if (host->flags & SDHCI_USE_ADMA) { 4696 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) { 4697 host->max_adma = 65532; /* 32-bit alignment */ 4698 mmc->max_seg_size = 65535; 4699 } else { 4700 mmc->max_seg_size = 65536; 4701 } 4702 } else { 4703 mmc->max_seg_size = mmc->max_req_size; 4704 } 4705 4706 /* 4707 * Maximum block size. This varies from controller to controller and 4708 * is specified in the capabilities register. 4709 */ 4710 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 4711 mmc->max_blk_size = 2; 4712 } else { 4713 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >> 4714 SDHCI_MAX_BLOCK_SHIFT; 4715 if (mmc->max_blk_size >= 3) { 4716 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n", 4717 mmc_hostname(mmc)); 4718 mmc->max_blk_size = 0; 4719 } 4720 } 4721 4722 mmc->max_blk_size = 512 << mmc->max_blk_size; 4723 4724 /* 4725 * Maximum block count. 4726 */ 4727 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 4728 4729 if (mmc->max_segs == 1) 4730 /* This may alter mmc->*_blk_* parameters */ 4731 sdhci_allocate_bounce_buffer(host); 4732 4733 return 0; 4734 4735 unreg: 4736 if (host->sdhci_core_to_disable_vqmmc) 4737 regulator_disable(mmc->supply.vqmmc); 4738 undma: 4739 if (host->align_buffer) 4740 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4741 host->adma_table_sz, host->align_buffer, 4742 host->align_addr); 4743 host->adma_table = NULL; 4744 host->align_buffer = NULL; 4745 4746 return ret; 4747 } 4748 EXPORT_SYMBOL_GPL(sdhci_setup_host); 4749 4750 void sdhci_cleanup_host(struct sdhci_host *host) 4751 { 4752 struct mmc_host *mmc = host->mmc; 4753 4754 if (host->sdhci_core_to_disable_vqmmc) 4755 regulator_disable(mmc->supply.vqmmc); 4756 4757 if (host->align_buffer) 4758 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4759 host->adma_table_sz, host->align_buffer, 4760 host->align_addr); 4761 4762 if (host->use_external_dma) 4763 sdhci_external_dma_release(host); 4764 4765 host->adma_table = NULL; 4766 host->align_buffer = NULL; 4767 } 4768 EXPORT_SYMBOL_GPL(sdhci_cleanup_host); 4769 4770 int __sdhci_add_host(struct sdhci_host *host) 4771 { 4772 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI; 4773 struct mmc_host *mmc = host->mmc; 4774 int ret; 4775 4776 if ((mmc->caps2 & MMC_CAP2_CQE) && 4777 (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) { 4778 mmc->caps2 &= ~MMC_CAP2_CQE; 4779 mmc->cqe_ops = NULL; 4780 } 4781 4782 host->complete_wq = alloc_workqueue("sdhci", flags, 0); 4783 if (!host->complete_wq) 4784 return -ENOMEM; 4785 4786 INIT_WORK(&host->complete_work, sdhci_complete_work); 4787 4788 timer_setup(&host->timer, sdhci_timeout_timer, 0); 4789 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0); 4790 4791 init_waitqueue_head(&host->buf_ready_int); 4792 4793 sdhci_init(host, 0); 4794 4795 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, 4796 IRQF_SHARED, mmc_hostname(mmc), host); 4797 if (ret) { 4798 pr_err("%s: Failed to request IRQ %d: %d\n", 4799 mmc_hostname(mmc), host->irq, ret); 4800 goto unwq; 4801 } 4802 4803 ret = sdhci_led_register(host); 4804 if (ret) { 4805 pr_err("%s: Failed to register LED device: %d\n", 4806 mmc_hostname(mmc), ret); 4807 goto unirq; 4808 } 4809 4810 ret = mmc_add_host(mmc); 4811 if (ret) 4812 goto unled; 4813 4814 pr_info("%s: SDHCI controller on %s [%s] using %s\n", 4815 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 4816 host->use_external_dma ? "External DMA" : 4817 (host->flags & SDHCI_USE_ADMA) ? 4818 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : 4819 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); 4820 4821 sdhci_enable_card_detection(host); 4822 4823 return 0; 4824 4825 unled: 4826 sdhci_led_unregister(host); 4827 unirq: 4828 sdhci_reset_for_all(host); 4829 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4830 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4831 free_irq(host->irq, host); 4832 unwq: 4833 destroy_workqueue(host->complete_wq); 4834 4835 return ret; 4836 } 4837 EXPORT_SYMBOL_GPL(__sdhci_add_host); 4838 4839 int sdhci_add_host(struct sdhci_host *host) 4840 { 4841 int ret; 4842 4843 ret = sdhci_setup_host(host); 4844 if (ret) 4845 return ret; 4846 4847 ret = __sdhci_add_host(host); 4848 if (ret) 4849 goto cleanup; 4850 4851 return 0; 4852 4853 cleanup: 4854 sdhci_cleanup_host(host); 4855 4856 return ret; 4857 } 4858 EXPORT_SYMBOL_GPL(sdhci_add_host); 4859 4860 void sdhci_remove_host(struct sdhci_host *host, int dead) 4861 { 4862 struct mmc_host *mmc = host->mmc; 4863 unsigned long flags; 4864 4865 if (dead) { 4866 spin_lock_irqsave(&host->lock, flags); 4867 4868 host->flags |= SDHCI_DEVICE_DEAD; 4869 4870 if (sdhci_has_requests(host)) { 4871 pr_err("%s: Controller removed during " 4872 " transfer!\n", mmc_hostname(mmc)); 4873 sdhci_error_out_mrqs(host, -ENOMEDIUM); 4874 } 4875 4876 spin_unlock_irqrestore(&host->lock, flags); 4877 } 4878 4879 sdhci_disable_card_detection(host); 4880 4881 mmc_remove_host(mmc); 4882 4883 sdhci_led_unregister(host); 4884 4885 if (!dead) 4886 sdhci_reset_for_all(host); 4887 4888 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4889 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4890 free_irq(host->irq, host); 4891 4892 del_timer_sync(&host->timer); 4893 del_timer_sync(&host->data_timer); 4894 4895 destroy_workqueue(host->complete_wq); 4896 4897 if (host->sdhci_core_to_disable_vqmmc) 4898 regulator_disable(mmc->supply.vqmmc); 4899 4900 if (host->align_buffer) 4901 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4902 host->adma_table_sz, host->align_buffer, 4903 host->align_addr); 4904 4905 if (host->use_external_dma) 4906 sdhci_external_dma_release(host); 4907 4908 host->adma_table = NULL; 4909 host->align_buffer = NULL; 4910 } 4911 4912 EXPORT_SYMBOL_GPL(sdhci_remove_host); 4913 4914 void sdhci_free_host(struct sdhci_host *host) 4915 { 4916 mmc_free_host(host->mmc); 4917 } 4918 4919 EXPORT_SYMBOL_GPL(sdhci_free_host); 4920 4921 /*****************************************************************************\ 4922 * * 4923 * Driver init/exit * 4924 * * 4925 \*****************************************************************************/ 4926 4927 static int __init sdhci_drv_init(void) 4928 { 4929 pr_info(DRIVER_NAME 4930 ": Secure Digital Host Controller Interface driver\n"); 4931 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 4932 4933 return 0; 4934 } 4935 4936 static void __exit sdhci_drv_exit(void) 4937 { 4938 } 4939 4940 module_init(sdhci_drv_init); 4941 module_exit(sdhci_drv_exit); 4942 4943 module_param(debug_quirks, uint, 0444); 4944 module_param(debug_quirks2, uint, 0444); 4945 4946 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 4947 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 4948 MODULE_LICENSE("GPL"); 4949 4950 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 4951 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); 4952