1 /* 2 * Synopsys DesignWare Multimedia Card Interface driver 3 * (Based on NXP driver for lpc 31xx) 4 * 5 * Copyright (C) 2009 NXP Semiconductors 6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 */ 13 14 #include <linux/blkdev.h> 15 #include <linux/clk.h> 16 #include <linux/debugfs.h> 17 #include <linux/device.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/interrupt.h> 22 #include <linux/ioport.h> 23 #include <linux/module.h> 24 #include <linux/platform_device.h> 25 #include <linux/seq_file.h> 26 #include <linux/slab.h> 27 #include <linux/stat.h> 28 #include <linux/delay.h> 29 #include <linux/irq.h> 30 #include <linux/mmc/card.h> 31 #include <linux/mmc/host.h> 32 #include <linux/mmc/mmc.h> 33 #include <linux/mmc/sd.h> 34 #include <linux/mmc/sdio.h> 35 #include <linux/mmc/dw_mmc.h> 36 #include <linux/bitops.h> 37 #include <linux/regulator/consumer.h> 38 #include <linux/of.h> 39 #include <linux/of_gpio.h> 40 #include <linux/mmc/slot-gpio.h> 41 42 #include "dw_mmc.h" 43 44 /* Common flag combinations */ 45 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \ 46 SDMMC_INT_HTO | SDMMC_INT_SBE | \ 47 SDMMC_INT_EBE | SDMMC_INT_HLE) 48 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ 49 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE) 50 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \ 51 DW_MCI_CMD_ERROR_FLAGS) 52 #define DW_MCI_SEND_STATUS 1 53 #define DW_MCI_RECV_STATUS 2 54 #define DW_MCI_DMA_THRESHOLD 16 55 56 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */ 57 #define DW_MCI_FREQ_MIN 400000 /* unit: HZ */ 58 59 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ 60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ 61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ 62 SDMMC_IDMAC_INT_TI) 63 64 struct idmac_desc_64addr { 65 u32 des0; /* Control Descriptor */ 66 67 u32 des1; /* Reserved */ 68 69 u32 des2; /*Buffer sizes */ 70 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \ 71 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \ 72 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff))) 73 74 u32 des3; /* Reserved */ 75 76 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/ 77 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/ 78 79 u32 des6; /* Lower 32-bits of Next Descriptor Address */ 80 u32 des7; /* Upper 32-bits of Next Descriptor Address */ 81 }; 82 83 struct idmac_desc { 84 __le32 des0; /* Control Descriptor */ 85 #define IDMAC_DES0_DIC BIT(1) 86 #define IDMAC_DES0_LD BIT(2) 87 #define IDMAC_DES0_FD BIT(3) 88 #define IDMAC_DES0_CH BIT(4) 89 #define IDMAC_DES0_ER BIT(5) 90 #define IDMAC_DES0_CES BIT(30) 91 #define IDMAC_DES0_OWN BIT(31) 92 93 __le32 des1; /* Buffer sizes */ 94 #define IDMAC_SET_BUFFER1_SIZE(d, s) \ 95 ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff))) 96 97 __le32 des2; /* buffer 1 physical address */ 98 99 __le32 des3; /* buffer 2 physical address */ 100 }; 101 102 /* Each descriptor can transfer up to 4KB of data in chained mode */ 103 #define DW_MCI_DESC_DATA_LENGTH 0x1000 104 105 static bool dw_mci_reset(struct dw_mci *host); 106 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset); 107 static int dw_mci_card_busy(struct mmc_host *mmc); 108 static int dw_mci_get_cd(struct mmc_host *mmc); 109 110 #if defined(CONFIG_DEBUG_FS) 111 static int dw_mci_req_show(struct seq_file *s, void *v) 112 { 113 struct dw_mci_slot *slot = s->private; 114 struct mmc_request *mrq; 115 struct mmc_command *cmd; 116 struct mmc_command *stop; 117 struct mmc_data *data; 118 119 /* Make sure we get a consistent snapshot */ 120 spin_lock_bh(&slot->host->lock); 121 mrq = slot->mrq; 122 123 if (mrq) { 124 cmd = mrq->cmd; 125 data = mrq->data; 126 stop = mrq->stop; 127 128 if (cmd) 129 seq_printf(s, 130 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 131 cmd->opcode, cmd->arg, cmd->flags, 132 cmd->resp[0], cmd->resp[1], cmd->resp[2], 133 cmd->resp[2], cmd->error); 134 if (data) 135 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", 136 data->bytes_xfered, data->blocks, 137 data->blksz, data->flags, data->error); 138 if (stop) 139 seq_printf(s, 140 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 141 stop->opcode, stop->arg, stop->flags, 142 stop->resp[0], stop->resp[1], stop->resp[2], 143 stop->resp[2], stop->error); 144 } 145 146 spin_unlock_bh(&slot->host->lock); 147 148 return 0; 149 } 150 151 static int dw_mci_req_open(struct inode *inode, struct file *file) 152 { 153 return single_open(file, dw_mci_req_show, inode->i_private); 154 } 155 156 static const struct file_operations dw_mci_req_fops = { 157 .owner = THIS_MODULE, 158 .open = dw_mci_req_open, 159 .read = seq_read, 160 .llseek = seq_lseek, 161 .release = single_release, 162 }; 163 164 static int dw_mci_regs_show(struct seq_file *s, void *v) 165 { 166 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS); 167 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS); 168 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD); 169 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL); 170 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK); 171 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA); 172 173 return 0; 174 } 175 176 static int dw_mci_regs_open(struct inode *inode, struct file *file) 177 { 178 return single_open(file, dw_mci_regs_show, inode->i_private); 179 } 180 181 static const struct file_operations dw_mci_regs_fops = { 182 .owner = THIS_MODULE, 183 .open = dw_mci_regs_open, 184 .read = seq_read, 185 .llseek = seq_lseek, 186 .release = single_release, 187 }; 188 189 static void dw_mci_init_debugfs(struct dw_mci_slot *slot) 190 { 191 struct mmc_host *mmc = slot->mmc; 192 struct dw_mci *host = slot->host; 193 struct dentry *root; 194 struct dentry *node; 195 196 root = mmc->debugfs_root; 197 if (!root) 198 return; 199 200 node = debugfs_create_file("regs", S_IRUSR, root, host, 201 &dw_mci_regs_fops); 202 if (!node) 203 goto err; 204 205 node = debugfs_create_file("req", S_IRUSR, root, slot, 206 &dw_mci_req_fops); 207 if (!node) 208 goto err; 209 210 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); 211 if (!node) 212 goto err; 213 214 node = debugfs_create_x32("pending_events", S_IRUSR, root, 215 (u32 *)&host->pending_events); 216 if (!node) 217 goto err; 218 219 node = debugfs_create_x32("completed_events", S_IRUSR, root, 220 (u32 *)&host->completed_events); 221 if (!node) 222 goto err; 223 224 return; 225 226 err: 227 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); 228 } 229 #endif /* defined(CONFIG_DEBUG_FS) */ 230 231 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg); 232 233 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) 234 { 235 struct mmc_data *data; 236 struct dw_mci_slot *slot = mmc_priv(mmc); 237 struct dw_mci *host = slot->host; 238 u32 cmdr; 239 240 cmd->error = -EINPROGRESS; 241 cmdr = cmd->opcode; 242 243 if (cmd->opcode == MMC_STOP_TRANSMISSION || 244 cmd->opcode == MMC_GO_IDLE_STATE || 245 cmd->opcode == MMC_GO_INACTIVE_STATE || 246 (cmd->opcode == SD_IO_RW_DIRECT && 247 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) 248 cmdr |= SDMMC_CMD_STOP; 249 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data) 250 cmdr |= SDMMC_CMD_PRV_DAT_WAIT; 251 252 if (cmd->opcode == SD_SWITCH_VOLTAGE) { 253 u32 clk_en_a; 254 255 /* Special bit makes CMD11 not die */ 256 cmdr |= SDMMC_CMD_VOLT_SWITCH; 257 258 /* Change state to continue to handle CMD11 weirdness */ 259 WARN_ON(slot->host->state != STATE_SENDING_CMD); 260 slot->host->state = STATE_SENDING_CMD11; 261 262 /* 263 * We need to disable low power mode (automatic clock stop) 264 * while doing voltage switch so we don't confuse the card, 265 * since stopping the clock is a specific part of the UHS 266 * voltage change dance. 267 * 268 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be 269 * unconditionally turned back on in dw_mci_setup_bus() if it's 270 * ever called with a non-zero clock. That shouldn't happen 271 * until the voltage change is all done. 272 */ 273 clk_en_a = mci_readl(host, CLKENA); 274 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id); 275 mci_writel(host, CLKENA, clk_en_a); 276 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 277 SDMMC_CMD_PRV_DAT_WAIT, 0); 278 } 279 280 if (cmd->flags & MMC_RSP_PRESENT) { 281 /* We expect a response, so set this bit */ 282 cmdr |= SDMMC_CMD_RESP_EXP; 283 if (cmd->flags & MMC_RSP_136) 284 cmdr |= SDMMC_CMD_RESP_LONG; 285 } 286 287 if (cmd->flags & MMC_RSP_CRC) 288 cmdr |= SDMMC_CMD_RESP_CRC; 289 290 data = cmd->data; 291 if (data) { 292 cmdr |= SDMMC_CMD_DAT_EXP; 293 if (data->flags & MMC_DATA_WRITE) 294 cmdr |= SDMMC_CMD_DAT_WR; 295 } 296 297 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags)) 298 cmdr |= SDMMC_CMD_USE_HOLD_REG; 299 300 return cmdr; 301 } 302 303 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) 304 { 305 struct mmc_command *stop; 306 u32 cmdr; 307 308 if (!cmd->data) 309 return 0; 310 311 stop = &host->stop_abort; 312 cmdr = cmd->opcode; 313 memset(stop, 0, sizeof(struct mmc_command)); 314 315 if (cmdr == MMC_READ_SINGLE_BLOCK || 316 cmdr == MMC_READ_MULTIPLE_BLOCK || 317 cmdr == MMC_WRITE_BLOCK || 318 cmdr == MMC_WRITE_MULTIPLE_BLOCK || 319 cmdr == MMC_SEND_TUNING_BLOCK || 320 cmdr == MMC_SEND_TUNING_BLOCK_HS200) { 321 stop->opcode = MMC_STOP_TRANSMISSION; 322 stop->arg = 0; 323 stop->flags = MMC_RSP_R1B | MMC_CMD_AC; 324 } else if (cmdr == SD_IO_RW_EXTENDED) { 325 stop->opcode = SD_IO_RW_DIRECT; 326 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) | 327 ((cmd->arg >> 28) & 0x7); 328 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC; 329 } else { 330 return 0; 331 } 332 333 cmdr = stop->opcode | SDMMC_CMD_STOP | 334 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP; 335 336 return cmdr; 337 } 338 339 static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags) 340 { 341 unsigned long timeout = jiffies + msecs_to_jiffies(500); 342 343 /* 344 * Databook says that before issuing a new data transfer command 345 * we need to check to see if the card is busy. Data transfer commands 346 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that. 347 * 348 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is 349 * expected. 350 */ 351 if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) && 352 !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) { 353 while (mci_readl(host, STATUS) & SDMMC_STATUS_BUSY) { 354 if (time_after(jiffies, timeout)) { 355 /* Command will fail; we'll pass error then */ 356 dev_err(host->dev, "Busy; trying anyway\n"); 357 break; 358 } 359 udelay(10); 360 } 361 } 362 } 363 364 static void dw_mci_start_command(struct dw_mci *host, 365 struct mmc_command *cmd, u32 cmd_flags) 366 { 367 host->cmd = cmd; 368 dev_vdbg(host->dev, 369 "start command: ARGR=0x%08x CMDR=0x%08x\n", 370 cmd->arg, cmd_flags); 371 372 mci_writel(host, CMDARG, cmd->arg); 373 wmb(); /* drain writebuffer */ 374 dw_mci_wait_while_busy(host, cmd_flags); 375 376 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); 377 } 378 379 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) 380 { 381 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort; 382 383 dw_mci_start_command(host, stop, host->stop_cmdr); 384 } 385 386 /* DMA interface functions */ 387 static void dw_mci_stop_dma(struct dw_mci *host) 388 { 389 if (host->using_dma) { 390 host->dma_ops->stop(host); 391 host->dma_ops->cleanup(host); 392 } 393 394 /* Data transfer was stopped by the interrupt handler */ 395 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 396 } 397 398 static int dw_mci_get_dma_dir(struct mmc_data *data) 399 { 400 if (data->flags & MMC_DATA_WRITE) 401 return DMA_TO_DEVICE; 402 else 403 return DMA_FROM_DEVICE; 404 } 405 406 static void dw_mci_dma_cleanup(struct dw_mci *host) 407 { 408 struct mmc_data *data = host->data; 409 410 if (data) 411 if (!data->host_cookie) 412 dma_unmap_sg(host->dev, 413 data->sg, 414 data->sg_len, 415 dw_mci_get_dma_dir(data)); 416 } 417 418 static void dw_mci_idmac_reset(struct dw_mci *host) 419 { 420 u32 bmod = mci_readl(host, BMOD); 421 /* Software reset of DMA */ 422 bmod |= SDMMC_IDMAC_SWRESET; 423 mci_writel(host, BMOD, bmod); 424 } 425 426 static void dw_mci_idmac_stop_dma(struct dw_mci *host) 427 { 428 u32 temp; 429 430 /* Disable and reset the IDMAC interface */ 431 temp = mci_readl(host, CTRL); 432 temp &= ~SDMMC_CTRL_USE_IDMAC; 433 temp |= SDMMC_CTRL_DMA_RESET; 434 mci_writel(host, CTRL, temp); 435 436 /* Stop the IDMAC running */ 437 temp = mci_readl(host, BMOD); 438 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); 439 temp |= SDMMC_IDMAC_SWRESET; 440 mci_writel(host, BMOD, temp); 441 } 442 443 static void dw_mci_dmac_complete_dma(void *arg) 444 { 445 struct dw_mci *host = arg; 446 struct mmc_data *data = host->data; 447 448 dev_vdbg(host->dev, "DMA complete\n"); 449 450 if ((host->use_dma == TRANS_MODE_EDMAC) && 451 data && (data->flags & MMC_DATA_READ)) 452 /* Invalidate cache after read */ 453 dma_sync_sg_for_cpu(mmc_dev(host->cur_slot->mmc), 454 data->sg, 455 data->sg_len, 456 DMA_FROM_DEVICE); 457 458 host->dma_ops->cleanup(host); 459 460 /* 461 * If the card was removed, data will be NULL. No point in trying to 462 * send the stop command or waiting for NBUSY in this case. 463 */ 464 if (data) { 465 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 466 tasklet_schedule(&host->tasklet); 467 } 468 } 469 470 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data, 471 unsigned int sg_len) 472 { 473 unsigned int desc_len; 474 int i; 475 476 if (host->dma_64bit_address == 1) { 477 struct idmac_desc_64addr *desc_first, *desc_last, *desc; 478 479 desc_first = desc_last = desc = host->sg_cpu; 480 481 for (i = 0; i < sg_len; i++) { 482 unsigned int length = sg_dma_len(&data->sg[i]); 483 484 u64 mem_addr = sg_dma_address(&data->sg[i]); 485 486 for ( ; length ; desc++) { 487 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 488 length : DW_MCI_DESC_DATA_LENGTH; 489 490 length -= desc_len; 491 492 /* 493 * Set the OWN bit and disable interrupts 494 * for this descriptor 495 */ 496 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | 497 IDMAC_DES0_CH; 498 499 /* Buffer length */ 500 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len); 501 502 /* Physical address to DMA to/from */ 503 desc->des4 = mem_addr & 0xffffffff; 504 desc->des5 = mem_addr >> 32; 505 506 /* Update physical address for the next desc */ 507 mem_addr += desc_len; 508 509 /* Save pointer to the last descriptor */ 510 desc_last = desc; 511 } 512 } 513 514 /* Set first descriptor */ 515 desc_first->des0 |= IDMAC_DES0_FD; 516 517 /* Set last descriptor */ 518 desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); 519 desc_last->des0 |= IDMAC_DES0_LD; 520 521 } else { 522 struct idmac_desc *desc_first, *desc_last, *desc; 523 524 desc_first = desc_last = desc = host->sg_cpu; 525 526 for (i = 0; i < sg_len; i++) { 527 unsigned int length = sg_dma_len(&data->sg[i]); 528 529 u32 mem_addr = sg_dma_address(&data->sg[i]); 530 531 for ( ; length ; desc++) { 532 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 533 length : DW_MCI_DESC_DATA_LENGTH; 534 535 length -= desc_len; 536 537 /* 538 * Set the OWN bit and disable interrupts 539 * for this descriptor 540 */ 541 desc->des0 = cpu_to_le32(IDMAC_DES0_OWN | 542 IDMAC_DES0_DIC | 543 IDMAC_DES0_CH); 544 545 /* Buffer length */ 546 IDMAC_SET_BUFFER1_SIZE(desc, desc_len); 547 548 /* Physical address to DMA to/from */ 549 desc->des2 = cpu_to_le32(mem_addr); 550 551 /* Update physical address for the next desc */ 552 mem_addr += desc_len; 553 554 /* Save pointer to the last descriptor */ 555 desc_last = desc; 556 } 557 } 558 559 /* Set first descriptor */ 560 desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD); 561 562 /* Set last descriptor */ 563 desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | 564 IDMAC_DES0_DIC)); 565 desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD); 566 } 567 568 wmb(); /* drain writebuffer */ 569 } 570 571 static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) 572 { 573 u32 temp; 574 575 dw_mci_translate_sglist(host, host->data, sg_len); 576 577 /* Make sure to reset DMA in case we did PIO before this */ 578 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET); 579 dw_mci_idmac_reset(host); 580 581 /* Select IDMAC interface */ 582 temp = mci_readl(host, CTRL); 583 temp |= SDMMC_CTRL_USE_IDMAC; 584 mci_writel(host, CTRL, temp); 585 586 /* drain writebuffer */ 587 wmb(); 588 589 /* Enable the IDMAC */ 590 temp = mci_readl(host, BMOD); 591 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; 592 mci_writel(host, BMOD, temp); 593 594 /* Start it running */ 595 mci_writel(host, PLDMND, 1); 596 597 return 0; 598 } 599 600 static int dw_mci_idmac_init(struct dw_mci *host) 601 { 602 int i; 603 604 if (host->dma_64bit_address == 1) { 605 struct idmac_desc_64addr *p; 606 /* Number of descriptors in the ring buffer */ 607 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr); 608 609 /* Forward link the descriptor list */ 610 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; 611 i++, p++) { 612 p->des6 = (host->sg_dma + 613 (sizeof(struct idmac_desc_64addr) * 614 (i + 1))) & 0xffffffff; 615 616 p->des7 = (u64)(host->sg_dma + 617 (sizeof(struct idmac_desc_64addr) * 618 (i + 1))) >> 32; 619 /* Initialize reserved and buffer size fields to "0" */ 620 p->des1 = 0; 621 p->des2 = 0; 622 p->des3 = 0; 623 } 624 625 /* Set the last descriptor as the end-of-ring descriptor */ 626 p->des6 = host->sg_dma & 0xffffffff; 627 p->des7 = (u64)host->sg_dma >> 32; 628 p->des0 = IDMAC_DES0_ER; 629 630 } else { 631 struct idmac_desc *p; 632 /* Number of descriptors in the ring buffer */ 633 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); 634 635 /* Forward link the descriptor list */ 636 for (i = 0, p = host->sg_cpu; 637 i < host->ring_size - 1; 638 i++, p++) { 639 p->des3 = cpu_to_le32(host->sg_dma + 640 (sizeof(struct idmac_desc) * (i + 1))); 641 p->des1 = 0; 642 } 643 644 /* Set the last descriptor as the end-of-ring descriptor */ 645 p->des3 = cpu_to_le32(host->sg_dma); 646 p->des0 = cpu_to_le32(IDMAC_DES0_ER); 647 } 648 649 dw_mci_idmac_reset(host); 650 651 if (host->dma_64bit_address == 1) { 652 /* Mask out interrupts - get Tx & Rx complete only */ 653 mci_writel(host, IDSTS64, IDMAC_INT_CLR); 654 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI | 655 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 656 657 /* Set the descriptor base address */ 658 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff); 659 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32); 660 661 } else { 662 /* Mask out interrupts - get Tx & Rx complete only */ 663 mci_writel(host, IDSTS, IDMAC_INT_CLR); 664 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | 665 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 666 667 /* Set the descriptor base address */ 668 mci_writel(host, DBADDR, host->sg_dma); 669 } 670 671 return 0; 672 } 673 674 static const struct dw_mci_dma_ops dw_mci_idmac_ops = { 675 .init = dw_mci_idmac_init, 676 .start = dw_mci_idmac_start_dma, 677 .stop = dw_mci_idmac_stop_dma, 678 .complete = dw_mci_dmac_complete_dma, 679 .cleanup = dw_mci_dma_cleanup, 680 }; 681 682 static void dw_mci_edmac_stop_dma(struct dw_mci *host) 683 { 684 dmaengine_terminate_async(host->dms->ch); 685 } 686 687 static int dw_mci_edmac_start_dma(struct dw_mci *host, 688 unsigned int sg_len) 689 { 690 struct dma_slave_config cfg; 691 struct dma_async_tx_descriptor *desc = NULL; 692 struct scatterlist *sgl = host->data->sg; 693 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 694 u32 sg_elems = host->data->sg_len; 695 u32 fifoth_val; 696 u32 fifo_offset = host->fifo_reg - host->regs; 697 int ret = 0; 698 699 /* Set external dma config: burst size, burst width */ 700 cfg.dst_addr = host->phy_regs + fifo_offset; 701 cfg.src_addr = cfg.dst_addr; 702 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 703 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 704 705 /* Match burst msize with external dma config */ 706 fifoth_val = mci_readl(host, FIFOTH); 707 cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7]; 708 cfg.src_maxburst = cfg.dst_maxburst; 709 710 if (host->data->flags & MMC_DATA_WRITE) 711 cfg.direction = DMA_MEM_TO_DEV; 712 else 713 cfg.direction = DMA_DEV_TO_MEM; 714 715 ret = dmaengine_slave_config(host->dms->ch, &cfg); 716 if (ret) { 717 dev_err(host->dev, "Failed to config edmac.\n"); 718 return -EBUSY; 719 } 720 721 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, 722 sg_len, cfg.direction, 723 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 724 if (!desc) { 725 dev_err(host->dev, "Can't prepare slave sg.\n"); 726 return -EBUSY; 727 } 728 729 /* Set dw_mci_dmac_complete_dma as callback */ 730 desc->callback = dw_mci_dmac_complete_dma; 731 desc->callback_param = (void *)host; 732 dmaengine_submit(desc); 733 734 /* Flush cache before write */ 735 if (host->data->flags & MMC_DATA_WRITE) 736 dma_sync_sg_for_device(mmc_dev(host->cur_slot->mmc), sgl, 737 sg_elems, DMA_TO_DEVICE); 738 739 dma_async_issue_pending(host->dms->ch); 740 741 return 0; 742 } 743 744 static int dw_mci_edmac_init(struct dw_mci *host) 745 { 746 /* Request external dma channel */ 747 host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL); 748 if (!host->dms) 749 return -ENOMEM; 750 751 host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx"); 752 if (!host->dms->ch) { 753 dev_err(host->dev, "Failed to get external DMA channel.\n"); 754 kfree(host->dms); 755 host->dms = NULL; 756 return -ENXIO; 757 } 758 759 return 0; 760 } 761 762 static void dw_mci_edmac_exit(struct dw_mci *host) 763 { 764 if (host->dms) { 765 if (host->dms->ch) { 766 dma_release_channel(host->dms->ch); 767 host->dms->ch = NULL; 768 } 769 kfree(host->dms); 770 host->dms = NULL; 771 } 772 } 773 774 static const struct dw_mci_dma_ops dw_mci_edmac_ops = { 775 .init = dw_mci_edmac_init, 776 .exit = dw_mci_edmac_exit, 777 .start = dw_mci_edmac_start_dma, 778 .stop = dw_mci_edmac_stop_dma, 779 .complete = dw_mci_dmac_complete_dma, 780 .cleanup = dw_mci_dma_cleanup, 781 }; 782 783 static int dw_mci_pre_dma_transfer(struct dw_mci *host, 784 struct mmc_data *data, 785 bool next) 786 { 787 struct scatterlist *sg; 788 unsigned int i, sg_len; 789 790 if (!next && data->host_cookie) 791 return data->host_cookie; 792 793 /* 794 * We don't do DMA on "complex" transfers, i.e. with 795 * non-word-aligned buffers or lengths. Also, we don't bother 796 * with all the DMA setup overhead for short transfers. 797 */ 798 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) 799 return -EINVAL; 800 801 if (data->blksz & 3) 802 return -EINVAL; 803 804 for_each_sg(data->sg, sg, data->sg_len, i) { 805 if (sg->offset & 3 || sg->length & 3) 806 return -EINVAL; 807 } 808 809 sg_len = dma_map_sg(host->dev, 810 data->sg, 811 data->sg_len, 812 dw_mci_get_dma_dir(data)); 813 if (sg_len == 0) 814 return -EINVAL; 815 816 if (next) 817 data->host_cookie = sg_len; 818 819 return sg_len; 820 } 821 822 static void dw_mci_pre_req(struct mmc_host *mmc, 823 struct mmc_request *mrq, 824 bool is_first_req) 825 { 826 struct dw_mci_slot *slot = mmc_priv(mmc); 827 struct mmc_data *data = mrq->data; 828 829 if (!slot->host->use_dma || !data) 830 return; 831 832 if (data->host_cookie) { 833 data->host_cookie = 0; 834 return; 835 } 836 837 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0) 838 data->host_cookie = 0; 839 } 840 841 static void dw_mci_post_req(struct mmc_host *mmc, 842 struct mmc_request *mrq, 843 int err) 844 { 845 struct dw_mci_slot *slot = mmc_priv(mmc); 846 struct mmc_data *data = mrq->data; 847 848 if (!slot->host->use_dma || !data) 849 return; 850 851 if (data->host_cookie) 852 dma_unmap_sg(slot->host->dev, 853 data->sg, 854 data->sg_len, 855 dw_mci_get_dma_dir(data)); 856 data->host_cookie = 0; 857 } 858 859 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) 860 { 861 unsigned int blksz = data->blksz; 862 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 863 u32 fifo_width = 1 << host->data_shift; 864 u32 blksz_depth = blksz / fifo_width, fifoth_val; 865 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; 866 int idx = ARRAY_SIZE(mszs) - 1; 867 868 /* pio should ship this scenario */ 869 if (!host->use_dma) 870 return; 871 872 tx_wmark = (host->fifo_depth) / 2; 873 tx_wmark_invers = host->fifo_depth - tx_wmark; 874 875 /* 876 * MSIZE is '1', 877 * if blksz is not a multiple of the FIFO width 878 */ 879 if (blksz % fifo_width) { 880 msize = 0; 881 rx_wmark = 1; 882 goto done; 883 } 884 885 do { 886 if (!((blksz_depth % mszs[idx]) || 887 (tx_wmark_invers % mszs[idx]))) { 888 msize = idx; 889 rx_wmark = mszs[idx] - 1; 890 break; 891 } 892 } while (--idx > 0); 893 /* 894 * If idx is '0', it won't be tried 895 * Thus, initial values are uesed 896 */ 897 done: 898 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); 899 mci_writel(host, FIFOTH, fifoth_val); 900 } 901 902 static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data) 903 { 904 unsigned int blksz = data->blksz; 905 u32 blksz_depth, fifo_depth; 906 u16 thld_size; 907 u8 enable; 908 909 /* 910 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is 911 * in the FIFO region, so we really shouldn't access it). 912 */ 913 if (host->verid < DW_MMC_240A || 914 (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE)) 915 return; 916 917 /* 918 * Card write Threshold is introduced since 2.80a 919 * It's used when HS400 mode is enabled. 920 */ 921 if (data->flags & MMC_DATA_WRITE && 922 !(host->timing != MMC_TIMING_MMC_HS400)) 923 return; 924 925 if (data->flags & MMC_DATA_WRITE) 926 enable = SDMMC_CARD_WR_THR_EN; 927 else 928 enable = SDMMC_CARD_RD_THR_EN; 929 930 if (host->timing != MMC_TIMING_MMC_HS200 && 931 host->timing != MMC_TIMING_UHS_SDR104) 932 goto disable; 933 934 blksz_depth = blksz / (1 << host->data_shift); 935 fifo_depth = host->fifo_depth; 936 937 if (blksz_depth > fifo_depth) 938 goto disable; 939 940 /* 941 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz' 942 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz 943 * Currently just choose blksz. 944 */ 945 thld_size = blksz; 946 mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable)); 947 return; 948 949 disable: 950 mci_writel(host, CDTHRCTL, 0); 951 } 952 953 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) 954 { 955 unsigned long irqflags; 956 int sg_len; 957 u32 temp; 958 959 host->using_dma = 0; 960 961 /* If we don't have a channel, we can't do DMA */ 962 if (!host->use_dma) 963 return -ENODEV; 964 965 sg_len = dw_mci_pre_dma_transfer(host, data, 0); 966 if (sg_len < 0) { 967 host->dma_ops->stop(host); 968 return sg_len; 969 } 970 971 host->using_dma = 1; 972 973 if (host->use_dma == TRANS_MODE_IDMAC) 974 dev_vdbg(host->dev, 975 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 976 (unsigned long)host->sg_cpu, 977 (unsigned long)host->sg_dma, 978 sg_len); 979 980 /* 981 * Decide the MSIZE and RX/TX Watermark. 982 * If current block size is same with previous size, 983 * no need to update fifoth. 984 */ 985 if (host->prev_blksz != data->blksz) 986 dw_mci_adjust_fifoth(host, data); 987 988 /* Enable the DMA interface */ 989 temp = mci_readl(host, CTRL); 990 temp |= SDMMC_CTRL_DMA_ENABLE; 991 mci_writel(host, CTRL, temp); 992 993 /* Disable RX/TX IRQs, let DMA handle it */ 994 spin_lock_irqsave(&host->irq_lock, irqflags); 995 temp = mci_readl(host, INTMASK); 996 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); 997 mci_writel(host, INTMASK, temp); 998 spin_unlock_irqrestore(&host->irq_lock, irqflags); 999 1000 if (host->dma_ops->start(host, sg_len)) { 1001 /* We can't do DMA */ 1002 dev_err(host->dev, "%s: failed to start DMA.\n", __func__); 1003 return -ENODEV; 1004 } 1005 1006 return 0; 1007 } 1008 1009 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) 1010 { 1011 unsigned long irqflags; 1012 int flags = SG_MITER_ATOMIC; 1013 u32 temp; 1014 1015 data->error = -EINPROGRESS; 1016 1017 WARN_ON(host->data); 1018 host->sg = NULL; 1019 host->data = data; 1020 1021 if (data->flags & MMC_DATA_READ) 1022 host->dir_status = DW_MCI_RECV_STATUS; 1023 else 1024 host->dir_status = DW_MCI_SEND_STATUS; 1025 1026 dw_mci_ctrl_thld(host, data); 1027 1028 if (dw_mci_submit_data_dma(host, data)) { 1029 if (host->data->flags & MMC_DATA_READ) 1030 flags |= SG_MITER_TO_SG; 1031 else 1032 flags |= SG_MITER_FROM_SG; 1033 1034 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1035 host->sg = data->sg; 1036 host->part_buf_start = 0; 1037 host->part_buf_count = 0; 1038 1039 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); 1040 1041 spin_lock_irqsave(&host->irq_lock, irqflags); 1042 temp = mci_readl(host, INTMASK); 1043 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; 1044 mci_writel(host, INTMASK, temp); 1045 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1046 1047 temp = mci_readl(host, CTRL); 1048 temp &= ~SDMMC_CTRL_DMA_ENABLE; 1049 mci_writel(host, CTRL, temp); 1050 1051 /* 1052 * Use the initial fifoth_val for PIO mode. 1053 * If next issued data may be transfered by DMA mode, 1054 * prev_blksz should be invalidated. 1055 */ 1056 mci_writel(host, FIFOTH, host->fifoth_val); 1057 host->prev_blksz = 0; 1058 } else { 1059 /* 1060 * Keep the current block size. 1061 * It will be used to decide whether to update 1062 * fifoth register next time. 1063 */ 1064 host->prev_blksz = data->blksz; 1065 } 1066 } 1067 1068 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) 1069 { 1070 struct dw_mci *host = slot->host; 1071 unsigned long timeout = jiffies + msecs_to_jiffies(500); 1072 unsigned int cmd_status = 0; 1073 1074 mci_writel(host, CMDARG, arg); 1075 wmb(); /* drain writebuffer */ 1076 dw_mci_wait_while_busy(host, cmd); 1077 mci_writel(host, CMD, SDMMC_CMD_START | cmd); 1078 1079 while (time_before(jiffies, timeout)) { 1080 cmd_status = mci_readl(host, CMD); 1081 if (!(cmd_status & SDMMC_CMD_START)) 1082 return; 1083 } 1084 dev_err(&slot->mmc->class_dev, 1085 "Timeout sending command (cmd %#x arg %#x status %#x)\n", 1086 cmd, arg, cmd_status); 1087 } 1088 1089 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) 1090 { 1091 struct dw_mci *host = slot->host; 1092 unsigned int clock = slot->clock; 1093 u32 div; 1094 u32 clk_en_a; 1095 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT; 1096 1097 /* We must continue to set bit 28 in CMD until the change is complete */ 1098 if (host->state == STATE_WAITING_CMD11_DONE) 1099 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH; 1100 1101 if (!clock) { 1102 mci_writel(host, CLKENA, 0); 1103 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1104 } else if (clock != host->current_speed || force_clkinit) { 1105 div = host->bus_hz / clock; 1106 if (host->bus_hz % clock && host->bus_hz > clock) 1107 /* 1108 * move the + 1 after the divide to prevent 1109 * over-clocking the card. 1110 */ 1111 div += 1; 1112 1113 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; 1114 1115 if (clock != slot->__clk_old || force_clkinit) 1116 dev_info(&slot->mmc->class_dev, 1117 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", 1118 slot->id, host->bus_hz, clock, 1119 div ? ((host->bus_hz / div) >> 1) : 1120 host->bus_hz, div); 1121 1122 /* disable clock */ 1123 mci_writel(host, CLKENA, 0); 1124 mci_writel(host, CLKSRC, 0); 1125 1126 /* inform CIU */ 1127 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1128 1129 /* set clock to desired speed */ 1130 mci_writel(host, CLKDIV, div); 1131 1132 /* inform CIU */ 1133 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1134 1135 /* enable clock; only low power if no SDIO */ 1136 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; 1137 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) 1138 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; 1139 mci_writel(host, CLKENA, clk_en_a); 1140 1141 /* inform CIU */ 1142 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1143 1144 /* keep the last clock value that was requested from core */ 1145 slot->__clk_old = clock; 1146 } 1147 1148 host->current_speed = clock; 1149 1150 /* Set the current slot bus width */ 1151 mci_writel(host, CTYPE, (slot->ctype << slot->id)); 1152 } 1153 1154 static void __dw_mci_start_request(struct dw_mci *host, 1155 struct dw_mci_slot *slot, 1156 struct mmc_command *cmd) 1157 { 1158 struct mmc_request *mrq; 1159 struct mmc_data *data; 1160 u32 cmdflags; 1161 1162 mrq = slot->mrq; 1163 1164 host->cur_slot = slot; 1165 host->mrq = mrq; 1166 1167 host->pending_events = 0; 1168 host->completed_events = 0; 1169 host->cmd_status = 0; 1170 host->data_status = 0; 1171 host->dir_status = 0; 1172 1173 data = cmd->data; 1174 if (data) { 1175 mci_writel(host, TMOUT, 0xFFFFFFFF); 1176 mci_writel(host, BYTCNT, data->blksz*data->blocks); 1177 mci_writel(host, BLKSIZ, data->blksz); 1178 } 1179 1180 cmdflags = dw_mci_prepare_command(slot->mmc, cmd); 1181 1182 /* this is the first command, send the initialization clock */ 1183 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags)) 1184 cmdflags |= SDMMC_CMD_INIT; 1185 1186 if (data) { 1187 dw_mci_submit_data(host, data); 1188 wmb(); /* drain writebuffer */ 1189 } 1190 1191 dw_mci_start_command(host, cmd, cmdflags); 1192 1193 if (cmd->opcode == SD_SWITCH_VOLTAGE) { 1194 unsigned long irqflags; 1195 1196 /* 1197 * Databook says to fail after 2ms w/ no response, but evidence 1198 * shows that sometimes the cmd11 interrupt takes over 130ms. 1199 * We'll set to 500ms, plus an extra jiffy just in case jiffies 1200 * is just about to roll over. 1201 * 1202 * We do this whole thing under spinlock and only if the 1203 * command hasn't already completed (indicating the the irq 1204 * already ran so we don't want the timeout). 1205 */ 1206 spin_lock_irqsave(&host->irq_lock, irqflags); 1207 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 1208 mod_timer(&host->cmd11_timer, 1209 jiffies + msecs_to_jiffies(500) + 1); 1210 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1211 } 1212 1213 if (mrq->stop) 1214 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); 1215 else 1216 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); 1217 } 1218 1219 static void dw_mci_start_request(struct dw_mci *host, 1220 struct dw_mci_slot *slot) 1221 { 1222 struct mmc_request *mrq = slot->mrq; 1223 struct mmc_command *cmd; 1224 1225 cmd = mrq->sbc ? mrq->sbc : mrq->cmd; 1226 __dw_mci_start_request(host, slot, cmd); 1227 } 1228 1229 /* must be called with host->lock held */ 1230 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, 1231 struct mmc_request *mrq) 1232 { 1233 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", 1234 host->state); 1235 1236 slot->mrq = mrq; 1237 1238 if (host->state == STATE_WAITING_CMD11_DONE) { 1239 dev_warn(&slot->mmc->class_dev, 1240 "Voltage change didn't complete\n"); 1241 /* 1242 * this case isn't expected to happen, so we can 1243 * either crash here or just try to continue on 1244 * in the closest possible state 1245 */ 1246 host->state = STATE_IDLE; 1247 } 1248 1249 if (host->state == STATE_IDLE) { 1250 host->state = STATE_SENDING_CMD; 1251 dw_mci_start_request(host, slot); 1252 } else { 1253 list_add_tail(&slot->queue_node, &host->queue); 1254 } 1255 } 1256 1257 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1258 { 1259 struct dw_mci_slot *slot = mmc_priv(mmc); 1260 struct dw_mci *host = slot->host; 1261 1262 WARN_ON(slot->mrq); 1263 1264 /* 1265 * The check for card presence and queueing of the request must be 1266 * atomic, otherwise the card could be removed in between and the 1267 * request wouldn't fail until another card was inserted. 1268 */ 1269 1270 if (!dw_mci_get_cd(mmc)) { 1271 mrq->cmd->error = -ENOMEDIUM; 1272 mmc_request_done(mmc, mrq); 1273 return; 1274 } 1275 1276 spin_lock_bh(&host->lock); 1277 1278 dw_mci_queue_request(host, slot, mrq); 1279 1280 spin_unlock_bh(&host->lock); 1281 } 1282 1283 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1284 { 1285 struct dw_mci_slot *slot = mmc_priv(mmc); 1286 const struct dw_mci_drv_data *drv_data = slot->host->drv_data; 1287 u32 regs; 1288 int ret; 1289 1290 switch (ios->bus_width) { 1291 case MMC_BUS_WIDTH_4: 1292 slot->ctype = SDMMC_CTYPE_4BIT; 1293 break; 1294 case MMC_BUS_WIDTH_8: 1295 slot->ctype = SDMMC_CTYPE_8BIT; 1296 break; 1297 default: 1298 /* set default 1 bit mode */ 1299 slot->ctype = SDMMC_CTYPE_1BIT; 1300 } 1301 1302 regs = mci_readl(slot->host, UHS_REG); 1303 1304 /* DDR mode set */ 1305 if (ios->timing == MMC_TIMING_MMC_DDR52 || 1306 ios->timing == MMC_TIMING_UHS_DDR50 || 1307 ios->timing == MMC_TIMING_MMC_HS400) 1308 regs |= ((0x1 << slot->id) << 16); 1309 else 1310 regs &= ~((0x1 << slot->id) << 16); 1311 1312 mci_writel(slot->host, UHS_REG, regs); 1313 slot->host->timing = ios->timing; 1314 1315 /* 1316 * Use mirror of ios->clock to prevent race with mmc 1317 * core ios update when finding the minimum. 1318 */ 1319 slot->clock = ios->clock; 1320 1321 if (drv_data && drv_data->set_ios) 1322 drv_data->set_ios(slot->host, ios); 1323 1324 switch (ios->power_mode) { 1325 case MMC_POWER_UP: 1326 if (!IS_ERR(mmc->supply.vmmc)) { 1327 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 1328 ios->vdd); 1329 if (ret) { 1330 dev_err(slot->host->dev, 1331 "failed to enable vmmc regulator\n"); 1332 /*return, if failed turn on vmmc*/ 1333 return; 1334 } 1335 } 1336 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); 1337 regs = mci_readl(slot->host, PWREN); 1338 regs |= (1 << slot->id); 1339 mci_writel(slot->host, PWREN, regs); 1340 break; 1341 case MMC_POWER_ON: 1342 if (!slot->host->vqmmc_enabled) { 1343 if (!IS_ERR(mmc->supply.vqmmc)) { 1344 ret = regulator_enable(mmc->supply.vqmmc); 1345 if (ret < 0) 1346 dev_err(slot->host->dev, 1347 "failed to enable vqmmc\n"); 1348 else 1349 slot->host->vqmmc_enabled = true; 1350 1351 } else { 1352 /* Keep track so we don't reset again */ 1353 slot->host->vqmmc_enabled = true; 1354 } 1355 1356 /* Reset our state machine after powering on */ 1357 dw_mci_ctrl_reset(slot->host, 1358 SDMMC_CTRL_ALL_RESET_FLAGS); 1359 } 1360 1361 /* Adjust clock / bus width after power is up */ 1362 dw_mci_setup_bus(slot, false); 1363 1364 break; 1365 case MMC_POWER_OFF: 1366 /* Turn clock off before power goes down */ 1367 dw_mci_setup_bus(slot, false); 1368 1369 if (!IS_ERR(mmc->supply.vmmc)) 1370 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1371 1372 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) 1373 regulator_disable(mmc->supply.vqmmc); 1374 slot->host->vqmmc_enabled = false; 1375 1376 regs = mci_readl(slot->host, PWREN); 1377 regs &= ~(1 << slot->id); 1378 mci_writel(slot->host, PWREN, regs); 1379 break; 1380 default: 1381 break; 1382 } 1383 1384 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0) 1385 slot->host->state = STATE_IDLE; 1386 } 1387 1388 static int dw_mci_card_busy(struct mmc_host *mmc) 1389 { 1390 struct dw_mci_slot *slot = mmc_priv(mmc); 1391 u32 status; 1392 1393 /* 1394 * Check the busy bit which is low when DAT[3:0] 1395 * (the data lines) are 0000 1396 */ 1397 status = mci_readl(slot->host, STATUS); 1398 1399 return !!(status & SDMMC_STATUS_BUSY); 1400 } 1401 1402 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) 1403 { 1404 struct dw_mci_slot *slot = mmc_priv(mmc); 1405 struct dw_mci *host = slot->host; 1406 const struct dw_mci_drv_data *drv_data = host->drv_data; 1407 u32 uhs; 1408 u32 v18 = SDMMC_UHS_18V << slot->id; 1409 int ret; 1410 1411 if (drv_data && drv_data->switch_voltage) 1412 return drv_data->switch_voltage(mmc, ios); 1413 1414 /* 1415 * Program the voltage. Note that some instances of dw_mmc may use 1416 * the UHS_REG for this. For other instances (like exynos) the UHS_REG 1417 * does no harm but you need to set the regulator directly. Try both. 1418 */ 1419 uhs = mci_readl(host, UHS_REG); 1420 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) 1421 uhs &= ~v18; 1422 else 1423 uhs |= v18; 1424 1425 if (!IS_ERR(mmc->supply.vqmmc)) { 1426 ret = mmc_regulator_set_vqmmc(mmc, ios); 1427 1428 if (ret) { 1429 dev_dbg(&mmc->class_dev, 1430 "Regulator set error %d - %s V\n", 1431 ret, uhs & v18 ? "1.8" : "3.3"); 1432 return ret; 1433 } 1434 } 1435 mci_writel(host, UHS_REG, uhs); 1436 1437 return 0; 1438 } 1439 1440 static int dw_mci_get_ro(struct mmc_host *mmc) 1441 { 1442 int read_only; 1443 struct dw_mci_slot *slot = mmc_priv(mmc); 1444 int gpio_ro = mmc_gpio_get_ro(mmc); 1445 1446 /* Use platform get_ro function, else try on board write protect */ 1447 if (gpio_ro >= 0) 1448 read_only = gpio_ro; 1449 else 1450 read_only = 1451 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; 1452 1453 dev_dbg(&mmc->class_dev, "card is %s\n", 1454 read_only ? "read-only" : "read-write"); 1455 1456 return read_only; 1457 } 1458 1459 static int dw_mci_get_cd(struct mmc_host *mmc) 1460 { 1461 int present; 1462 struct dw_mci_slot *slot = mmc_priv(mmc); 1463 struct dw_mci *host = slot->host; 1464 int gpio_cd = mmc_gpio_get_cd(mmc); 1465 1466 /* Use platform get_cd function, else try onboard card detect */ 1467 if ((mmc->caps & MMC_CAP_NEEDS_POLL) || !mmc_card_is_removable(mmc)) 1468 present = 1; 1469 else if (gpio_cd >= 0) 1470 present = gpio_cd; 1471 else 1472 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) 1473 == 0 ? 1 : 0; 1474 1475 spin_lock_bh(&host->lock); 1476 if (present) { 1477 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1478 dev_dbg(&mmc->class_dev, "card is present\n"); 1479 } else { 1480 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1481 dev_dbg(&mmc->class_dev, "card is not present\n"); 1482 } 1483 spin_unlock_bh(&host->lock); 1484 1485 return present; 1486 } 1487 1488 static void dw_mci_hw_reset(struct mmc_host *mmc) 1489 { 1490 struct dw_mci_slot *slot = mmc_priv(mmc); 1491 struct dw_mci *host = slot->host; 1492 int reset; 1493 1494 if (host->use_dma == TRANS_MODE_IDMAC) 1495 dw_mci_idmac_reset(host); 1496 1497 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET | 1498 SDMMC_CTRL_FIFO_RESET)) 1499 return; 1500 1501 /* 1502 * According to eMMC spec, card reset procedure: 1503 * tRstW >= 1us: RST_n pulse width 1504 * tRSCA >= 200us: RST_n to Command time 1505 * tRSTH >= 1us: RST_n high period 1506 */ 1507 reset = mci_readl(host, RST_N); 1508 reset &= ~(SDMMC_RST_HWACTIVE << slot->id); 1509 mci_writel(host, RST_N, reset); 1510 usleep_range(1, 2); 1511 reset |= SDMMC_RST_HWACTIVE << slot->id; 1512 mci_writel(host, RST_N, reset); 1513 usleep_range(200, 300); 1514 } 1515 1516 static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card) 1517 { 1518 struct dw_mci_slot *slot = mmc_priv(mmc); 1519 struct dw_mci *host = slot->host; 1520 1521 /* 1522 * Low power mode will stop the card clock when idle. According to the 1523 * description of the CLKENA register we should disable low power mode 1524 * for SDIO cards if we need SDIO interrupts to work. 1525 */ 1526 if (mmc->caps & MMC_CAP_SDIO_IRQ) { 1527 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; 1528 u32 clk_en_a_old; 1529 u32 clk_en_a; 1530 1531 clk_en_a_old = mci_readl(host, CLKENA); 1532 1533 if (card->type == MMC_TYPE_SDIO || 1534 card->type == MMC_TYPE_SD_COMBO) { 1535 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1536 clk_en_a = clk_en_a_old & ~clken_low_pwr; 1537 } else { 1538 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1539 clk_en_a = clk_en_a_old | clken_low_pwr; 1540 } 1541 1542 if (clk_en_a != clk_en_a_old) { 1543 mci_writel(host, CLKENA, clk_en_a); 1544 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 1545 SDMMC_CMD_PRV_DAT_WAIT, 0); 1546 } 1547 } 1548 } 1549 1550 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) 1551 { 1552 struct dw_mci_slot *slot = mmc_priv(mmc); 1553 struct dw_mci *host = slot->host; 1554 unsigned long irqflags; 1555 u32 int_mask; 1556 1557 spin_lock_irqsave(&host->irq_lock, irqflags); 1558 1559 /* Enable/disable Slot Specific SDIO interrupt */ 1560 int_mask = mci_readl(host, INTMASK); 1561 if (enb) 1562 int_mask |= SDMMC_INT_SDIO(slot->sdio_id); 1563 else 1564 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id); 1565 mci_writel(host, INTMASK, int_mask); 1566 1567 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1568 } 1569 1570 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode) 1571 { 1572 struct dw_mci_slot *slot = mmc_priv(mmc); 1573 struct dw_mci *host = slot->host; 1574 const struct dw_mci_drv_data *drv_data = host->drv_data; 1575 int err = -EINVAL; 1576 1577 if (drv_data && drv_data->execute_tuning) 1578 err = drv_data->execute_tuning(slot, opcode); 1579 return err; 1580 } 1581 1582 static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, 1583 struct mmc_ios *ios) 1584 { 1585 struct dw_mci_slot *slot = mmc_priv(mmc); 1586 struct dw_mci *host = slot->host; 1587 const struct dw_mci_drv_data *drv_data = host->drv_data; 1588 1589 if (drv_data && drv_data->prepare_hs400_tuning) 1590 return drv_data->prepare_hs400_tuning(host, ios); 1591 1592 return 0; 1593 } 1594 1595 static const struct mmc_host_ops dw_mci_ops = { 1596 .request = dw_mci_request, 1597 .pre_req = dw_mci_pre_req, 1598 .post_req = dw_mci_post_req, 1599 .set_ios = dw_mci_set_ios, 1600 .get_ro = dw_mci_get_ro, 1601 .get_cd = dw_mci_get_cd, 1602 .hw_reset = dw_mci_hw_reset, 1603 .enable_sdio_irq = dw_mci_enable_sdio_irq, 1604 .execute_tuning = dw_mci_execute_tuning, 1605 .card_busy = dw_mci_card_busy, 1606 .start_signal_voltage_switch = dw_mci_switch_voltage, 1607 .init_card = dw_mci_init_card, 1608 .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning, 1609 }; 1610 1611 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) 1612 __releases(&host->lock) 1613 __acquires(&host->lock) 1614 { 1615 struct dw_mci_slot *slot; 1616 struct mmc_host *prev_mmc = host->cur_slot->mmc; 1617 1618 WARN_ON(host->cmd || host->data); 1619 1620 host->cur_slot->mrq = NULL; 1621 host->mrq = NULL; 1622 if (!list_empty(&host->queue)) { 1623 slot = list_entry(host->queue.next, 1624 struct dw_mci_slot, queue_node); 1625 list_del(&slot->queue_node); 1626 dev_vdbg(host->dev, "list not empty: %s is next\n", 1627 mmc_hostname(slot->mmc)); 1628 host->state = STATE_SENDING_CMD; 1629 dw_mci_start_request(host, slot); 1630 } else { 1631 dev_vdbg(host->dev, "list empty\n"); 1632 1633 if (host->state == STATE_SENDING_CMD11) 1634 host->state = STATE_WAITING_CMD11_DONE; 1635 else 1636 host->state = STATE_IDLE; 1637 } 1638 1639 spin_unlock(&host->lock); 1640 mmc_request_done(prev_mmc, mrq); 1641 spin_lock(&host->lock); 1642 } 1643 1644 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) 1645 { 1646 u32 status = host->cmd_status; 1647 1648 host->cmd_status = 0; 1649 1650 /* Read the response from the card (up to 16 bytes) */ 1651 if (cmd->flags & MMC_RSP_PRESENT) { 1652 if (cmd->flags & MMC_RSP_136) { 1653 cmd->resp[3] = mci_readl(host, RESP0); 1654 cmd->resp[2] = mci_readl(host, RESP1); 1655 cmd->resp[1] = mci_readl(host, RESP2); 1656 cmd->resp[0] = mci_readl(host, RESP3); 1657 } else { 1658 cmd->resp[0] = mci_readl(host, RESP0); 1659 cmd->resp[1] = 0; 1660 cmd->resp[2] = 0; 1661 cmd->resp[3] = 0; 1662 } 1663 } 1664 1665 if (status & SDMMC_INT_RTO) 1666 cmd->error = -ETIMEDOUT; 1667 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)) 1668 cmd->error = -EILSEQ; 1669 else if (status & SDMMC_INT_RESP_ERR) 1670 cmd->error = -EIO; 1671 else 1672 cmd->error = 0; 1673 1674 return cmd->error; 1675 } 1676 1677 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) 1678 { 1679 u32 status = host->data_status; 1680 1681 if (status & DW_MCI_DATA_ERROR_FLAGS) { 1682 if (status & SDMMC_INT_DRTO) { 1683 data->error = -ETIMEDOUT; 1684 } else if (status & SDMMC_INT_DCRC) { 1685 data->error = -EILSEQ; 1686 } else if (status & SDMMC_INT_EBE) { 1687 if (host->dir_status == 1688 DW_MCI_SEND_STATUS) { 1689 /* 1690 * No data CRC status was returned. 1691 * The number of bytes transferred 1692 * will be exaggerated in PIO mode. 1693 */ 1694 data->bytes_xfered = 0; 1695 data->error = -ETIMEDOUT; 1696 } else if (host->dir_status == 1697 DW_MCI_RECV_STATUS) { 1698 data->error = -EIO; 1699 } 1700 } else { 1701 /* SDMMC_INT_SBE is included */ 1702 data->error = -EIO; 1703 } 1704 1705 dev_dbg(host->dev, "data error, status 0x%08x\n", status); 1706 1707 /* 1708 * After an error, there may be data lingering 1709 * in the FIFO 1710 */ 1711 dw_mci_reset(host); 1712 } else { 1713 data->bytes_xfered = data->blocks * data->blksz; 1714 data->error = 0; 1715 } 1716 1717 return data->error; 1718 } 1719 1720 static void dw_mci_set_drto(struct dw_mci *host) 1721 { 1722 unsigned int drto_clks; 1723 unsigned int drto_ms; 1724 1725 drto_clks = mci_readl(host, TMOUT) >> 8; 1726 drto_ms = DIV_ROUND_UP(drto_clks, host->bus_hz / 1000); 1727 1728 /* add a bit spare time */ 1729 drto_ms += 10; 1730 1731 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms)); 1732 } 1733 1734 static void dw_mci_tasklet_func(unsigned long priv) 1735 { 1736 struct dw_mci *host = (struct dw_mci *)priv; 1737 struct mmc_data *data; 1738 struct mmc_command *cmd; 1739 struct mmc_request *mrq; 1740 enum dw_mci_state state; 1741 enum dw_mci_state prev_state; 1742 unsigned int err; 1743 1744 spin_lock(&host->lock); 1745 1746 state = host->state; 1747 data = host->data; 1748 mrq = host->mrq; 1749 1750 do { 1751 prev_state = state; 1752 1753 switch (state) { 1754 case STATE_IDLE: 1755 case STATE_WAITING_CMD11_DONE: 1756 break; 1757 1758 case STATE_SENDING_CMD11: 1759 case STATE_SENDING_CMD: 1760 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 1761 &host->pending_events)) 1762 break; 1763 1764 cmd = host->cmd; 1765 host->cmd = NULL; 1766 set_bit(EVENT_CMD_COMPLETE, &host->completed_events); 1767 err = dw_mci_command_complete(host, cmd); 1768 if (cmd == mrq->sbc && !err) { 1769 prev_state = state = STATE_SENDING_CMD; 1770 __dw_mci_start_request(host, host->cur_slot, 1771 mrq->cmd); 1772 goto unlock; 1773 } 1774 1775 if (cmd->data && err) { 1776 /* 1777 * During UHS tuning sequence, sending the stop 1778 * command after the response CRC error would 1779 * throw the system into a confused state 1780 * causing all future tuning phases to report 1781 * failure. 1782 * 1783 * In such case controller will move into a data 1784 * transfer state after a response error or 1785 * response CRC error. Let's let that finish 1786 * before trying to send a stop, so we'll go to 1787 * STATE_SENDING_DATA. 1788 * 1789 * Although letting the data transfer take place 1790 * will waste a bit of time (we already know 1791 * the command was bad), it can't cause any 1792 * errors since it's possible it would have 1793 * taken place anyway if this tasklet got 1794 * delayed. Allowing the transfer to take place 1795 * avoids races and keeps things simple. 1796 */ 1797 if ((err != -ETIMEDOUT) && 1798 (cmd->opcode == MMC_SEND_TUNING_BLOCK)) { 1799 state = STATE_SENDING_DATA; 1800 continue; 1801 } 1802 1803 dw_mci_stop_dma(host); 1804 send_stop_abort(host, data); 1805 state = STATE_SENDING_STOP; 1806 break; 1807 } 1808 1809 if (!cmd->data || err) { 1810 dw_mci_request_end(host, mrq); 1811 goto unlock; 1812 } 1813 1814 prev_state = state = STATE_SENDING_DATA; 1815 /* fall through */ 1816 1817 case STATE_SENDING_DATA: 1818 /* 1819 * We could get a data error and never a transfer 1820 * complete so we'd better check for it here. 1821 * 1822 * Note that we don't really care if we also got a 1823 * transfer complete; stopping the DMA and sending an 1824 * abort won't hurt. 1825 */ 1826 if (test_and_clear_bit(EVENT_DATA_ERROR, 1827 &host->pending_events)) { 1828 dw_mci_stop_dma(host); 1829 if (data->stop || 1830 !(host->data_status & (SDMMC_INT_DRTO | 1831 SDMMC_INT_EBE))) 1832 send_stop_abort(host, data); 1833 state = STATE_DATA_ERROR; 1834 break; 1835 } 1836 1837 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 1838 &host->pending_events)) { 1839 /* 1840 * If all data-related interrupts don't come 1841 * within the given time in reading data state. 1842 */ 1843 if (host->dir_status == DW_MCI_RECV_STATUS) 1844 dw_mci_set_drto(host); 1845 break; 1846 } 1847 1848 set_bit(EVENT_XFER_COMPLETE, &host->completed_events); 1849 1850 /* 1851 * Handle an EVENT_DATA_ERROR that might have shown up 1852 * before the transfer completed. This might not have 1853 * been caught by the check above because the interrupt 1854 * could have gone off between the previous check and 1855 * the check for transfer complete. 1856 * 1857 * Technically this ought not be needed assuming we 1858 * get a DATA_COMPLETE eventually (we'll notice the 1859 * error and end the request), but it shouldn't hurt. 1860 * 1861 * This has the advantage of sending the stop command. 1862 */ 1863 if (test_and_clear_bit(EVENT_DATA_ERROR, 1864 &host->pending_events)) { 1865 dw_mci_stop_dma(host); 1866 if (data->stop || 1867 !(host->data_status & (SDMMC_INT_DRTO | 1868 SDMMC_INT_EBE))) 1869 send_stop_abort(host, data); 1870 state = STATE_DATA_ERROR; 1871 break; 1872 } 1873 prev_state = state = STATE_DATA_BUSY; 1874 1875 /* fall through */ 1876 1877 case STATE_DATA_BUSY: 1878 if (!test_and_clear_bit(EVENT_DATA_COMPLETE, 1879 &host->pending_events)) { 1880 /* 1881 * If data error interrupt comes but data over 1882 * interrupt doesn't come within the given time. 1883 * in reading data state. 1884 */ 1885 if (host->dir_status == DW_MCI_RECV_STATUS) 1886 dw_mci_set_drto(host); 1887 break; 1888 } 1889 1890 host->data = NULL; 1891 set_bit(EVENT_DATA_COMPLETE, &host->completed_events); 1892 err = dw_mci_data_complete(host, data); 1893 1894 if (!err) { 1895 if (!data->stop || mrq->sbc) { 1896 if (mrq->sbc && data->stop) 1897 data->stop->error = 0; 1898 dw_mci_request_end(host, mrq); 1899 goto unlock; 1900 } 1901 1902 /* stop command for open-ended transfer*/ 1903 if (data->stop) 1904 send_stop_abort(host, data); 1905 } else { 1906 /* 1907 * If we don't have a command complete now we'll 1908 * never get one since we just reset everything; 1909 * better end the request. 1910 * 1911 * If we do have a command complete we'll fall 1912 * through to the SENDING_STOP command and 1913 * everything will be peachy keen. 1914 */ 1915 if (!test_bit(EVENT_CMD_COMPLETE, 1916 &host->pending_events)) { 1917 host->cmd = NULL; 1918 dw_mci_request_end(host, mrq); 1919 goto unlock; 1920 } 1921 } 1922 1923 /* 1924 * If err has non-zero, 1925 * stop-abort command has been already issued. 1926 */ 1927 prev_state = state = STATE_SENDING_STOP; 1928 1929 /* fall through */ 1930 1931 case STATE_SENDING_STOP: 1932 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 1933 &host->pending_events)) 1934 break; 1935 1936 /* CMD error in data command */ 1937 if (mrq->cmd->error && mrq->data) 1938 dw_mci_reset(host); 1939 1940 host->cmd = NULL; 1941 host->data = NULL; 1942 1943 if (mrq->stop) 1944 dw_mci_command_complete(host, mrq->stop); 1945 else 1946 host->cmd_status = 0; 1947 1948 dw_mci_request_end(host, mrq); 1949 goto unlock; 1950 1951 case STATE_DATA_ERROR: 1952 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 1953 &host->pending_events)) 1954 break; 1955 1956 state = STATE_DATA_BUSY; 1957 break; 1958 } 1959 } while (state != prev_state); 1960 1961 host->state = state; 1962 unlock: 1963 spin_unlock(&host->lock); 1964 1965 } 1966 1967 /* push final bytes to part_buf, only use during push */ 1968 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) 1969 { 1970 memcpy((void *)&host->part_buf, buf, cnt); 1971 host->part_buf_count = cnt; 1972 } 1973 1974 /* append bytes to part_buf, only use during push */ 1975 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) 1976 { 1977 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); 1978 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); 1979 host->part_buf_count += cnt; 1980 return cnt; 1981 } 1982 1983 /* pull first bytes from part_buf, only use during pull */ 1984 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) 1985 { 1986 cnt = min_t(int, cnt, host->part_buf_count); 1987 if (cnt) { 1988 memcpy(buf, (void *)&host->part_buf + host->part_buf_start, 1989 cnt); 1990 host->part_buf_count -= cnt; 1991 host->part_buf_start += cnt; 1992 } 1993 return cnt; 1994 } 1995 1996 /* pull final bytes from the part_buf, assuming it's just been filled */ 1997 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) 1998 { 1999 memcpy(buf, &host->part_buf, cnt); 2000 host->part_buf_start = cnt; 2001 host->part_buf_count = (1 << host->data_shift) - cnt; 2002 } 2003 2004 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) 2005 { 2006 struct mmc_data *data = host->data; 2007 int init_cnt = cnt; 2008 2009 /* try and push anything in the part_buf */ 2010 if (unlikely(host->part_buf_count)) { 2011 int len = dw_mci_push_part_bytes(host, buf, cnt); 2012 2013 buf += len; 2014 cnt -= len; 2015 if (host->part_buf_count == 2) { 2016 mci_fifo_writew(host->fifo_reg, host->part_buf16); 2017 host->part_buf_count = 0; 2018 } 2019 } 2020 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2021 if (unlikely((unsigned long)buf & 0x1)) { 2022 while (cnt >= 2) { 2023 u16 aligned_buf[64]; 2024 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 2025 int items = len >> 1; 2026 int i; 2027 /* memcpy from input buffer into aligned buffer */ 2028 memcpy(aligned_buf, buf, len); 2029 buf += len; 2030 cnt -= len; 2031 /* push data from aligned buffer into fifo */ 2032 for (i = 0; i < items; ++i) 2033 mci_fifo_writew(host->fifo_reg, aligned_buf[i]); 2034 } 2035 } else 2036 #endif 2037 { 2038 u16 *pdata = buf; 2039 2040 for (; cnt >= 2; cnt -= 2) 2041 mci_fifo_writew(host->fifo_reg, *pdata++); 2042 buf = pdata; 2043 } 2044 /* put anything remaining in the part_buf */ 2045 if (cnt) { 2046 dw_mci_set_part_bytes(host, buf, cnt); 2047 /* Push data if we have reached the expected data length */ 2048 if ((data->bytes_xfered + init_cnt) == 2049 (data->blksz * data->blocks)) 2050 mci_fifo_writew(host->fifo_reg, host->part_buf16); 2051 } 2052 } 2053 2054 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) 2055 { 2056 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2057 if (unlikely((unsigned long)buf & 0x1)) { 2058 while (cnt >= 2) { 2059 /* pull data from fifo into aligned buffer */ 2060 u16 aligned_buf[64]; 2061 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 2062 int items = len >> 1; 2063 int i; 2064 2065 for (i = 0; i < items; ++i) 2066 aligned_buf[i] = mci_fifo_readw(host->fifo_reg); 2067 /* memcpy from aligned buffer into output buffer */ 2068 memcpy(buf, aligned_buf, len); 2069 buf += len; 2070 cnt -= len; 2071 } 2072 } else 2073 #endif 2074 { 2075 u16 *pdata = buf; 2076 2077 for (; cnt >= 2; cnt -= 2) 2078 *pdata++ = mci_fifo_readw(host->fifo_reg); 2079 buf = pdata; 2080 } 2081 if (cnt) { 2082 host->part_buf16 = mci_fifo_readw(host->fifo_reg); 2083 dw_mci_pull_final_bytes(host, buf, cnt); 2084 } 2085 } 2086 2087 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) 2088 { 2089 struct mmc_data *data = host->data; 2090 int init_cnt = cnt; 2091 2092 /* try and push anything in the part_buf */ 2093 if (unlikely(host->part_buf_count)) { 2094 int len = dw_mci_push_part_bytes(host, buf, cnt); 2095 2096 buf += len; 2097 cnt -= len; 2098 if (host->part_buf_count == 4) { 2099 mci_fifo_writel(host->fifo_reg, host->part_buf32); 2100 host->part_buf_count = 0; 2101 } 2102 } 2103 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2104 if (unlikely((unsigned long)buf & 0x3)) { 2105 while (cnt >= 4) { 2106 u32 aligned_buf[32]; 2107 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 2108 int items = len >> 2; 2109 int i; 2110 /* memcpy from input buffer into aligned buffer */ 2111 memcpy(aligned_buf, buf, len); 2112 buf += len; 2113 cnt -= len; 2114 /* push data from aligned buffer into fifo */ 2115 for (i = 0; i < items; ++i) 2116 mci_fifo_writel(host->fifo_reg, aligned_buf[i]); 2117 } 2118 } else 2119 #endif 2120 { 2121 u32 *pdata = buf; 2122 2123 for (; cnt >= 4; cnt -= 4) 2124 mci_fifo_writel(host->fifo_reg, *pdata++); 2125 buf = pdata; 2126 } 2127 /* put anything remaining in the part_buf */ 2128 if (cnt) { 2129 dw_mci_set_part_bytes(host, buf, cnt); 2130 /* Push data if we have reached the expected data length */ 2131 if ((data->bytes_xfered + init_cnt) == 2132 (data->blksz * data->blocks)) 2133 mci_fifo_writel(host->fifo_reg, host->part_buf32); 2134 } 2135 } 2136 2137 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) 2138 { 2139 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2140 if (unlikely((unsigned long)buf & 0x3)) { 2141 while (cnt >= 4) { 2142 /* pull data from fifo into aligned buffer */ 2143 u32 aligned_buf[32]; 2144 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 2145 int items = len >> 2; 2146 int i; 2147 2148 for (i = 0; i < items; ++i) 2149 aligned_buf[i] = mci_fifo_readl(host->fifo_reg); 2150 /* memcpy from aligned buffer into output buffer */ 2151 memcpy(buf, aligned_buf, len); 2152 buf += len; 2153 cnt -= len; 2154 } 2155 } else 2156 #endif 2157 { 2158 u32 *pdata = buf; 2159 2160 for (; cnt >= 4; cnt -= 4) 2161 *pdata++ = mci_fifo_readl(host->fifo_reg); 2162 buf = pdata; 2163 } 2164 if (cnt) { 2165 host->part_buf32 = mci_fifo_readl(host->fifo_reg); 2166 dw_mci_pull_final_bytes(host, buf, cnt); 2167 } 2168 } 2169 2170 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) 2171 { 2172 struct mmc_data *data = host->data; 2173 int init_cnt = cnt; 2174 2175 /* try and push anything in the part_buf */ 2176 if (unlikely(host->part_buf_count)) { 2177 int len = dw_mci_push_part_bytes(host, buf, cnt); 2178 2179 buf += len; 2180 cnt -= len; 2181 2182 if (host->part_buf_count == 8) { 2183 mci_fifo_writeq(host->fifo_reg, host->part_buf); 2184 host->part_buf_count = 0; 2185 } 2186 } 2187 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2188 if (unlikely((unsigned long)buf & 0x7)) { 2189 while (cnt >= 8) { 2190 u64 aligned_buf[16]; 2191 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 2192 int items = len >> 3; 2193 int i; 2194 /* memcpy from input buffer into aligned buffer */ 2195 memcpy(aligned_buf, buf, len); 2196 buf += len; 2197 cnt -= len; 2198 /* push data from aligned buffer into fifo */ 2199 for (i = 0; i < items; ++i) 2200 mci_fifo_writeq(host->fifo_reg, aligned_buf[i]); 2201 } 2202 } else 2203 #endif 2204 { 2205 u64 *pdata = buf; 2206 2207 for (; cnt >= 8; cnt -= 8) 2208 mci_fifo_writeq(host->fifo_reg, *pdata++); 2209 buf = pdata; 2210 } 2211 /* put anything remaining in the part_buf */ 2212 if (cnt) { 2213 dw_mci_set_part_bytes(host, buf, cnt); 2214 /* Push data if we have reached the expected data length */ 2215 if ((data->bytes_xfered + init_cnt) == 2216 (data->blksz * data->blocks)) 2217 mci_fifo_writeq(host->fifo_reg, host->part_buf); 2218 } 2219 } 2220 2221 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) 2222 { 2223 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2224 if (unlikely((unsigned long)buf & 0x7)) { 2225 while (cnt >= 8) { 2226 /* pull data from fifo into aligned buffer */ 2227 u64 aligned_buf[16]; 2228 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 2229 int items = len >> 3; 2230 int i; 2231 2232 for (i = 0; i < items; ++i) 2233 aligned_buf[i] = mci_fifo_readq(host->fifo_reg); 2234 2235 /* memcpy from aligned buffer into output buffer */ 2236 memcpy(buf, aligned_buf, len); 2237 buf += len; 2238 cnt -= len; 2239 } 2240 } else 2241 #endif 2242 { 2243 u64 *pdata = buf; 2244 2245 for (; cnt >= 8; cnt -= 8) 2246 *pdata++ = mci_fifo_readq(host->fifo_reg); 2247 buf = pdata; 2248 } 2249 if (cnt) { 2250 host->part_buf = mci_fifo_readq(host->fifo_reg); 2251 dw_mci_pull_final_bytes(host, buf, cnt); 2252 } 2253 } 2254 2255 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) 2256 { 2257 int len; 2258 2259 /* get remaining partial bytes */ 2260 len = dw_mci_pull_part_bytes(host, buf, cnt); 2261 if (unlikely(len == cnt)) 2262 return; 2263 buf += len; 2264 cnt -= len; 2265 2266 /* get the rest of the data */ 2267 host->pull_data(host, buf, cnt); 2268 } 2269 2270 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto) 2271 { 2272 struct sg_mapping_iter *sg_miter = &host->sg_miter; 2273 void *buf; 2274 unsigned int offset; 2275 struct mmc_data *data = host->data; 2276 int shift = host->data_shift; 2277 u32 status; 2278 unsigned int len; 2279 unsigned int remain, fcnt; 2280 2281 do { 2282 if (!sg_miter_next(sg_miter)) 2283 goto done; 2284 2285 host->sg = sg_miter->piter.sg; 2286 buf = sg_miter->addr; 2287 remain = sg_miter->length; 2288 offset = 0; 2289 2290 do { 2291 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) 2292 << shift) + host->part_buf_count; 2293 len = min(remain, fcnt); 2294 if (!len) 2295 break; 2296 dw_mci_pull_data(host, (void *)(buf + offset), len); 2297 data->bytes_xfered += len; 2298 offset += len; 2299 remain -= len; 2300 } while (remain); 2301 2302 sg_miter->consumed = offset; 2303 status = mci_readl(host, MINTSTS); 2304 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 2305 /* if the RXDR is ready read again */ 2306 } while ((status & SDMMC_INT_RXDR) || 2307 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS)))); 2308 2309 if (!remain) { 2310 if (!sg_miter_next(sg_miter)) 2311 goto done; 2312 sg_miter->consumed = 0; 2313 } 2314 sg_miter_stop(sg_miter); 2315 return; 2316 2317 done: 2318 sg_miter_stop(sg_miter); 2319 host->sg = NULL; 2320 smp_wmb(); /* drain writebuffer */ 2321 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2322 } 2323 2324 static void dw_mci_write_data_pio(struct dw_mci *host) 2325 { 2326 struct sg_mapping_iter *sg_miter = &host->sg_miter; 2327 void *buf; 2328 unsigned int offset; 2329 struct mmc_data *data = host->data; 2330 int shift = host->data_shift; 2331 u32 status; 2332 unsigned int len; 2333 unsigned int fifo_depth = host->fifo_depth; 2334 unsigned int remain, fcnt; 2335 2336 do { 2337 if (!sg_miter_next(sg_miter)) 2338 goto done; 2339 2340 host->sg = sg_miter->piter.sg; 2341 buf = sg_miter->addr; 2342 remain = sg_miter->length; 2343 offset = 0; 2344 2345 do { 2346 fcnt = ((fifo_depth - 2347 SDMMC_GET_FCNT(mci_readl(host, STATUS))) 2348 << shift) - host->part_buf_count; 2349 len = min(remain, fcnt); 2350 if (!len) 2351 break; 2352 host->push_data(host, (void *)(buf + offset), len); 2353 data->bytes_xfered += len; 2354 offset += len; 2355 remain -= len; 2356 } while (remain); 2357 2358 sg_miter->consumed = offset; 2359 status = mci_readl(host, MINTSTS); 2360 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2361 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 2362 2363 if (!remain) { 2364 if (!sg_miter_next(sg_miter)) 2365 goto done; 2366 sg_miter->consumed = 0; 2367 } 2368 sg_miter_stop(sg_miter); 2369 return; 2370 2371 done: 2372 sg_miter_stop(sg_miter); 2373 host->sg = NULL; 2374 smp_wmb(); /* drain writebuffer */ 2375 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2376 } 2377 2378 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) 2379 { 2380 if (!host->cmd_status) 2381 host->cmd_status = status; 2382 2383 smp_wmb(); /* drain writebuffer */ 2384 2385 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2386 tasklet_schedule(&host->tasklet); 2387 } 2388 2389 static void dw_mci_handle_cd(struct dw_mci *host) 2390 { 2391 int i; 2392 2393 for (i = 0; i < host->num_slots; i++) { 2394 struct dw_mci_slot *slot = host->slot[i]; 2395 2396 if (!slot) 2397 continue; 2398 2399 if (slot->mmc->ops->card_event) 2400 slot->mmc->ops->card_event(slot->mmc); 2401 mmc_detect_change(slot->mmc, 2402 msecs_to_jiffies(host->pdata->detect_delay_ms)); 2403 } 2404 } 2405 2406 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) 2407 { 2408 struct dw_mci *host = dev_id; 2409 u32 pending; 2410 int i; 2411 2412 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 2413 2414 if (pending) { 2415 /* Check volt switch first, since it can look like an error */ 2416 if ((host->state == STATE_SENDING_CMD11) && 2417 (pending & SDMMC_INT_VOLT_SWITCH)) { 2418 unsigned long irqflags; 2419 2420 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH); 2421 pending &= ~SDMMC_INT_VOLT_SWITCH; 2422 2423 /* 2424 * Hold the lock; we know cmd11_timer can't be kicked 2425 * off after the lock is released, so safe to delete. 2426 */ 2427 spin_lock_irqsave(&host->irq_lock, irqflags); 2428 dw_mci_cmd_interrupt(host, pending); 2429 spin_unlock_irqrestore(&host->irq_lock, irqflags); 2430 2431 del_timer(&host->cmd11_timer); 2432 } 2433 2434 if (pending & DW_MCI_CMD_ERROR_FLAGS) { 2435 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 2436 host->cmd_status = pending; 2437 smp_wmb(); /* drain writebuffer */ 2438 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2439 } 2440 2441 if (pending & DW_MCI_DATA_ERROR_FLAGS) { 2442 /* if there is an error report DATA_ERROR */ 2443 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); 2444 host->data_status = pending; 2445 smp_wmb(); /* drain writebuffer */ 2446 set_bit(EVENT_DATA_ERROR, &host->pending_events); 2447 tasklet_schedule(&host->tasklet); 2448 } 2449 2450 if (pending & SDMMC_INT_DATA_OVER) { 2451 del_timer(&host->dto_timer); 2452 2453 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 2454 if (!host->data_status) 2455 host->data_status = pending; 2456 smp_wmb(); /* drain writebuffer */ 2457 if (host->dir_status == DW_MCI_RECV_STATUS) { 2458 if (host->sg != NULL) 2459 dw_mci_read_data_pio(host, true); 2460 } 2461 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2462 tasklet_schedule(&host->tasklet); 2463 } 2464 2465 if (pending & SDMMC_INT_RXDR) { 2466 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 2467 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) 2468 dw_mci_read_data_pio(host, false); 2469 } 2470 2471 if (pending & SDMMC_INT_TXDR) { 2472 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2473 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) 2474 dw_mci_write_data_pio(host); 2475 } 2476 2477 if (pending & SDMMC_INT_CMD_DONE) { 2478 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); 2479 dw_mci_cmd_interrupt(host, pending); 2480 } 2481 2482 if (pending & SDMMC_INT_CD) { 2483 mci_writel(host, RINTSTS, SDMMC_INT_CD); 2484 dw_mci_handle_cd(host); 2485 } 2486 2487 /* Handle SDIO Interrupts */ 2488 for (i = 0; i < host->num_slots; i++) { 2489 struct dw_mci_slot *slot = host->slot[i]; 2490 2491 if (!slot) 2492 continue; 2493 2494 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) { 2495 mci_writel(host, RINTSTS, 2496 SDMMC_INT_SDIO(slot->sdio_id)); 2497 mmc_signal_sdio_irq(slot->mmc); 2498 } 2499 } 2500 2501 } 2502 2503 if (host->use_dma != TRANS_MODE_IDMAC) 2504 return IRQ_HANDLED; 2505 2506 /* Handle IDMA interrupts */ 2507 if (host->dma_64bit_address == 1) { 2508 pending = mci_readl(host, IDSTS64); 2509 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 2510 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI | 2511 SDMMC_IDMAC_INT_RI); 2512 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI); 2513 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) 2514 host->dma_ops->complete((void *)host); 2515 } 2516 } else { 2517 pending = mci_readl(host, IDSTS); 2518 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 2519 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | 2520 SDMMC_IDMAC_INT_RI); 2521 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 2522 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) 2523 host->dma_ops->complete((void *)host); 2524 } 2525 } 2526 2527 return IRQ_HANDLED; 2528 } 2529 2530 #ifdef CONFIG_OF 2531 /* given a slot, find out the device node representing that slot */ 2532 static struct device_node *dw_mci_of_find_slot_node(struct dw_mci_slot *slot) 2533 { 2534 struct device *dev = slot->mmc->parent; 2535 struct device_node *np; 2536 const __be32 *addr; 2537 int len; 2538 2539 if (!dev || !dev->of_node) 2540 return NULL; 2541 2542 for_each_child_of_node(dev->of_node, np) { 2543 addr = of_get_property(np, "reg", &len); 2544 if (!addr || (len < sizeof(int))) 2545 continue; 2546 if (be32_to_cpup(addr) == slot->id) 2547 return np; 2548 } 2549 return NULL; 2550 } 2551 2552 static void dw_mci_slot_of_parse(struct dw_mci_slot *slot) 2553 { 2554 struct device_node *np = dw_mci_of_find_slot_node(slot); 2555 2556 if (!np) 2557 return; 2558 2559 if (of_property_read_bool(np, "disable-wp")) { 2560 slot->mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; 2561 dev_warn(slot->mmc->parent, 2562 "Slot quirk 'disable-wp' is deprecated\n"); 2563 } 2564 } 2565 #else /* CONFIG_OF */ 2566 static void dw_mci_slot_of_parse(struct dw_mci_slot *slot) 2567 { 2568 } 2569 #endif /* CONFIG_OF */ 2570 2571 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) 2572 { 2573 struct mmc_host *mmc; 2574 struct dw_mci_slot *slot; 2575 const struct dw_mci_drv_data *drv_data = host->drv_data; 2576 int ctrl_id, ret; 2577 u32 freq[2]; 2578 2579 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); 2580 if (!mmc) 2581 return -ENOMEM; 2582 2583 slot = mmc_priv(mmc); 2584 slot->id = id; 2585 slot->sdio_id = host->sdio_id0 + id; 2586 slot->mmc = mmc; 2587 slot->host = host; 2588 host->slot[id] = slot; 2589 2590 mmc->ops = &dw_mci_ops; 2591 if (of_property_read_u32_array(host->dev->of_node, 2592 "clock-freq-min-max", freq, 2)) { 2593 mmc->f_min = DW_MCI_FREQ_MIN; 2594 mmc->f_max = DW_MCI_FREQ_MAX; 2595 } else { 2596 mmc->f_min = freq[0]; 2597 mmc->f_max = freq[1]; 2598 } 2599 2600 /*if there are external regulators, get them*/ 2601 ret = mmc_regulator_get_supply(mmc); 2602 if (ret == -EPROBE_DEFER) 2603 goto err_host_allocated; 2604 2605 if (!mmc->ocr_avail) 2606 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 2607 2608 if (host->pdata->caps) 2609 mmc->caps = host->pdata->caps; 2610 2611 /* 2612 * Support MMC_CAP_ERASE by default. 2613 * It needs to use trim/discard/erase commands. 2614 */ 2615 mmc->caps |= MMC_CAP_ERASE; 2616 2617 if (host->pdata->pm_caps) 2618 mmc->pm_caps = host->pdata->pm_caps; 2619 2620 if (host->dev->of_node) { 2621 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); 2622 if (ctrl_id < 0) 2623 ctrl_id = 0; 2624 } else { 2625 ctrl_id = to_platform_device(host->dev)->id; 2626 } 2627 if (drv_data && drv_data->caps) 2628 mmc->caps |= drv_data->caps[ctrl_id]; 2629 2630 if (host->pdata->caps2) 2631 mmc->caps2 = host->pdata->caps2; 2632 2633 dw_mci_slot_of_parse(slot); 2634 2635 ret = mmc_of_parse(mmc); 2636 if (ret) 2637 goto err_host_allocated; 2638 2639 /* Useful defaults if platform data is unset. */ 2640 if (host->use_dma == TRANS_MODE_IDMAC) { 2641 mmc->max_segs = host->ring_size; 2642 mmc->max_blk_size = 65535; 2643 mmc->max_seg_size = 0x1000; 2644 mmc->max_req_size = mmc->max_seg_size * host->ring_size; 2645 mmc->max_blk_count = mmc->max_req_size / 512; 2646 } else if (host->use_dma == TRANS_MODE_EDMAC) { 2647 mmc->max_segs = 64; 2648 mmc->max_blk_size = 65535; 2649 mmc->max_blk_count = 65535; 2650 mmc->max_req_size = 2651 mmc->max_blk_size * mmc->max_blk_count; 2652 mmc->max_seg_size = mmc->max_req_size; 2653 } else { 2654 /* TRANS_MODE_PIO */ 2655 mmc->max_segs = 64; 2656 mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */ 2657 mmc->max_blk_count = 512; 2658 mmc->max_req_size = mmc->max_blk_size * 2659 mmc->max_blk_count; 2660 mmc->max_seg_size = mmc->max_req_size; 2661 } 2662 2663 dw_mci_get_cd(mmc); 2664 2665 ret = mmc_add_host(mmc); 2666 if (ret) 2667 goto err_host_allocated; 2668 2669 #if defined(CONFIG_DEBUG_FS) 2670 dw_mci_init_debugfs(slot); 2671 #endif 2672 2673 return 0; 2674 2675 err_host_allocated: 2676 mmc_free_host(mmc); 2677 return ret; 2678 } 2679 2680 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) 2681 { 2682 /* Debugfs stuff is cleaned up by mmc core */ 2683 mmc_remove_host(slot->mmc); 2684 slot->host->slot[id] = NULL; 2685 mmc_free_host(slot->mmc); 2686 } 2687 2688 static void dw_mci_init_dma(struct dw_mci *host) 2689 { 2690 int addr_config; 2691 struct device *dev = host->dev; 2692 struct device_node *np = dev->of_node; 2693 2694 /* 2695 * Check tansfer mode from HCON[17:16] 2696 * Clear the ambiguous description of dw_mmc databook: 2697 * 2b'00: No DMA Interface -> Actually means using Internal DMA block 2698 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block 2699 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block 2700 * 2b'11: Non DW DMA Interface -> pio only 2701 * Compared to DesignWare DMA Interface, Generic DMA Interface has a 2702 * simpler request/acknowledge handshake mechanism and both of them 2703 * are regarded as external dma master for dw_mmc. 2704 */ 2705 host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON)); 2706 if (host->use_dma == DMA_INTERFACE_IDMA) { 2707 host->use_dma = TRANS_MODE_IDMAC; 2708 } else if (host->use_dma == DMA_INTERFACE_DWDMA || 2709 host->use_dma == DMA_INTERFACE_GDMA) { 2710 host->use_dma = TRANS_MODE_EDMAC; 2711 } else { 2712 goto no_dma; 2713 } 2714 2715 /* Determine which DMA interface to use */ 2716 if (host->use_dma == TRANS_MODE_IDMAC) { 2717 /* 2718 * Check ADDR_CONFIG bit in HCON to find 2719 * IDMAC address bus width 2720 */ 2721 addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON)); 2722 2723 if (addr_config == 1) { 2724 /* host supports IDMAC in 64-bit address mode */ 2725 host->dma_64bit_address = 1; 2726 dev_info(host->dev, 2727 "IDMAC supports 64-bit address mode.\n"); 2728 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64))) 2729 dma_set_coherent_mask(host->dev, 2730 DMA_BIT_MASK(64)); 2731 } else { 2732 /* host supports IDMAC in 32-bit address mode */ 2733 host->dma_64bit_address = 0; 2734 dev_info(host->dev, 2735 "IDMAC supports 32-bit address mode.\n"); 2736 } 2737 2738 /* Alloc memory for sg translation */ 2739 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE, 2740 &host->sg_dma, GFP_KERNEL); 2741 if (!host->sg_cpu) { 2742 dev_err(host->dev, 2743 "%s: could not alloc DMA memory\n", 2744 __func__); 2745 goto no_dma; 2746 } 2747 2748 host->dma_ops = &dw_mci_idmac_ops; 2749 dev_info(host->dev, "Using internal DMA controller.\n"); 2750 } else { 2751 /* TRANS_MODE_EDMAC: check dma bindings again */ 2752 if ((of_property_count_strings(np, "dma-names") < 0) || 2753 (!of_find_property(np, "dmas", NULL))) { 2754 goto no_dma; 2755 } 2756 host->dma_ops = &dw_mci_edmac_ops; 2757 dev_info(host->dev, "Using external DMA controller.\n"); 2758 } 2759 2760 if (host->dma_ops->init && host->dma_ops->start && 2761 host->dma_ops->stop && host->dma_ops->cleanup) { 2762 if (host->dma_ops->init(host)) { 2763 dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n", 2764 __func__); 2765 goto no_dma; 2766 } 2767 } else { 2768 dev_err(host->dev, "DMA initialization not found.\n"); 2769 goto no_dma; 2770 } 2771 2772 return; 2773 2774 no_dma: 2775 dev_info(host->dev, "Using PIO mode.\n"); 2776 host->use_dma = TRANS_MODE_PIO; 2777 } 2778 2779 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) 2780 { 2781 unsigned long timeout = jiffies + msecs_to_jiffies(500); 2782 u32 ctrl; 2783 2784 ctrl = mci_readl(host, CTRL); 2785 ctrl |= reset; 2786 mci_writel(host, CTRL, ctrl); 2787 2788 /* wait till resets clear */ 2789 do { 2790 ctrl = mci_readl(host, CTRL); 2791 if (!(ctrl & reset)) 2792 return true; 2793 } while (time_before(jiffies, timeout)); 2794 2795 dev_err(host->dev, 2796 "Timeout resetting block (ctrl reset %#x)\n", 2797 ctrl & reset); 2798 2799 return false; 2800 } 2801 2802 static bool dw_mci_reset(struct dw_mci *host) 2803 { 2804 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET; 2805 bool ret = false; 2806 2807 /* 2808 * Reseting generates a block interrupt, hence setting 2809 * the scatter-gather pointer to NULL. 2810 */ 2811 if (host->sg) { 2812 sg_miter_stop(&host->sg_miter); 2813 host->sg = NULL; 2814 } 2815 2816 if (host->use_dma) 2817 flags |= SDMMC_CTRL_DMA_RESET; 2818 2819 if (dw_mci_ctrl_reset(host, flags)) { 2820 /* 2821 * In all cases we clear the RAWINTS register to clear any 2822 * interrupts. 2823 */ 2824 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2825 2826 /* if using dma we wait for dma_req to clear */ 2827 if (host->use_dma) { 2828 unsigned long timeout = jiffies + msecs_to_jiffies(500); 2829 u32 status; 2830 2831 do { 2832 status = mci_readl(host, STATUS); 2833 if (!(status & SDMMC_STATUS_DMA_REQ)) 2834 break; 2835 cpu_relax(); 2836 } while (time_before(jiffies, timeout)); 2837 2838 if (status & SDMMC_STATUS_DMA_REQ) { 2839 dev_err(host->dev, 2840 "%s: Timeout waiting for dma_req to clear during reset\n", 2841 __func__); 2842 goto ciu_out; 2843 } 2844 2845 /* when using DMA next we reset the fifo again */ 2846 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET)) 2847 goto ciu_out; 2848 } 2849 } else { 2850 /* if the controller reset bit did clear, then set clock regs */ 2851 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) { 2852 dev_err(host->dev, 2853 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n", 2854 __func__); 2855 goto ciu_out; 2856 } 2857 } 2858 2859 if (host->use_dma == TRANS_MODE_IDMAC) 2860 /* It is also recommended that we reset and reprogram idmac */ 2861 dw_mci_idmac_reset(host); 2862 2863 ret = true; 2864 2865 ciu_out: 2866 /* After a CTRL reset we need to have CIU set clock registers */ 2867 mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0); 2868 2869 return ret; 2870 } 2871 2872 static void dw_mci_cmd11_timer(unsigned long arg) 2873 { 2874 struct dw_mci *host = (struct dw_mci *)arg; 2875 2876 if (host->state != STATE_SENDING_CMD11) { 2877 dev_warn(host->dev, "Unexpected CMD11 timeout\n"); 2878 return; 2879 } 2880 2881 host->cmd_status = SDMMC_INT_RTO; 2882 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2883 tasklet_schedule(&host->tasklet); 2884 } 2885 2886 static void dw_mci_dto_timer(unsigned long arg) 2887 { 2888 struct dw_mci *host = (struct dw_mci *)arg; 2889 2890 switch (host->state) { 2891 case STATE_SENDING_DATA: 2892 case STATE_DATA_BUSY: 2893 /* 2894 * If DTO interrupt does NOT come in sending data state, 2895 * we should notify the driver to terminate current transfer 2896 * and report a data timeout to the core. 2897 */ 2898 host->data_status = SDMMC_INT_DRTO; 2899 set_bit(EVENT_DATA_ERROR, &host->pending_events); 2900 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2901 tasklet_schedule(&host->tasklet); 2902 break; 2903 default: 2904 break; 2905 } 2906 } 2907 2908 #ifdef CONFIG_OF 2909 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 2910 { 2911 struct dw_mci_board *pdata; 2912 struct device *dev = host->dev; 2913 struct device_node *np = dev->of_node; 2914 const struct dw_mci_drv_data *drv_data = host->drv_data; 2915 int ret; 2916 u32 clock_frequency; 2917 2918 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 2919 if (!pdata) 2920 return ERR_PTR(-ENOMEM); 2921 2922 /* find out number of slots supported */ 2923 of_property_read_u32(np, "num-slots", &pdata->num_slots); 2924 2925 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth)) 2926 dev_info(dev, 2927 "fifo-depth property not found, using value of FIFOTH register as default\n"); 2928 2929 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms); 2930 2931 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency)) 2932 pdata->bus_hz = clock_frequency; 2933 2934 if (drv_data && drv_data->parse_dt) { 2935 ret = drv_data->parse_dt(host); 2936 if (ret) 2937 return ERR_PTR(ret); 2938 } 2939 2940 if (of_find_property(np, "supports-highspeed", NULL)) { 2941 dev_info(dev, "supports-highspeed property is deprecated.\n"); 2942 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 2943 } 2944 2945 return pdata; 2946 } 2947 2948 #else /* CONFIG_OF */ 2949 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 2950 { 2951 return ERR_PTR(-EINVAL); 2952 } 2953 #endif /* CONFIG_OF */ 2954 2955 static void dw_mci_enable_cd(struct dw_mci *host) 2956 { 2957 unsigned long irqflags; 2958 u32 temp; 2959 int i; 2960 struct dw_mci_slot *slot; 2961 2962 /* 2963 * No need for CD if all slots have a non-error GPIO 2964 * as well as broken card detection is found. 2965 */ 2966 for (i = 0; i < host->num_slots; i++) { 2967 slot = host->slot[i]; 2968 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL) 2969 return; 2970 2971 if (mmc_gpio_get_cd(slot->mmc) < 0) 2972 break; 2973 } 2974 if (i == host->num_slots) 2975 return; 2976 2977 spin_lock_irqsave(&host->irq_lock, irqflags); 2978 temp = mci_readl(host, INTMASK); 2979 temp |= SDMMC_INT_CD; 2980 mci_writel(host, INTMASK, temp); 2981 spin_unlock_irqrestore(&host->irq_lock, irqflags); 2982 } 2983 2984 int dw_mci_probe(struct dw_mci *host) 2985 { 2986 const struct dw_mci_drv_data *drv_data = host->drv_data; 2987 int width, i, ret = 0; 2988 u32 fifo_size; 2989 int init_slots = 0; 2990 2991 if (!host->pdata) { 2992 host->pdata = dw_mci_parse_dt(host); 2993 if (IS_ERR(host->pdata)) { 2994 dev_err(host->dev, "platform data not available\n"); 2995 return -EINVAL; 2996 } 2997 } 2998 2999 host->biu_clk = devm_clk_get(host->dev, "biu"); 3000 if (IS_ERR(host->biu_clk)) { 3001 dev_dbg(host->dev, "biu clock not available\n"); 3002 } else { 3003 ret = clk_prepare_enable(host->biu_clk); 3004 if (ret) { 3005 dev_err(host->dev, "failed to enable biu clock\n"); 3006 return ret; 3007 } 3008 } 3009 3010 host->ciu_clk = devm_clk_get(host->dev, "ciu"); 3011 if (IS_ERR(host->ciu_clk)) { 3012 dev_dbg(host->dev, "ciu clock not available\n"); 3013 host->bus_hz = host->pdata->bus_hz; 3014 } else { 3015 ret = clk_prepare_enable(host->ciu_clk); 3016 if (ret) { 3017 dev_err(host->dev, "failed to enable ciu clock\n"); 3018 goto err_clk_biu; 3019 } 3020 3021 if (host->pdata->bus_hz) { 3022 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz); 3023 if (ret) 3024 dev_warn(host->dev, 3025 "Unable to set bus rate to %uHz\n", 3026 host->pdata->bus_hz); 3027 } 3028 host->bus_hz = clk_get_rate(host->ciu_clk); 3029 } 3030 3031 if (!host->bus_hz) { 3032 dev_err(host->dev, 3033 "Platform data must supply bus speed\n"); 3034 ret = -ENODEV; 3035 goto err_clk_ciu; 3036 } 3037 3038 if (drv_data && drv_data->init) { 3039 ret = drv_data->init(host); 3040 if (ret) { 3041 dev_err(host->dev, 3042 "implementation specific init failed\n"); 3043 goto err_clk_ciu; 3044 } 3045 } 3046 3047 setup_timer(&host->cmd11_timer, 3048 dw_mci_cmd11_timer, (unsigned long)host); 3049 3050 setup_timer(&host->dto_timer, 3051 dw_mci_dto_timer, (unsigned long)host); 3052 3053 spin_lock_init(&host->lock); 3054 spin_lock_init(&host->irq_lock); 3055 INIT_LIST_HEAD(&host->queue); 3056 3057 /* 3058 * Get the host data width - this assumes that HCON has been set with 3059 * the correct values. 3060 */ 3061 i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON)); 3062 if (!i) { 3063 host->push_data = dw_mci_push_data16; 3064 host->pull_data = dw_mci_pull_data16; 3065 width = 16; 3066 host->data_shift = 1; 3067 } else if (i == 2) { 3068 host->push_data = dw_mci_push_data64; 3069 host->pull_data = dw_mci_pull_data64; 3070 width = 64; 3071 host->data_shift = 3; 3072 } else { 3073 /* Check for a reserved value, and warn if it is */ 3074 WARN((i != 1), 3075 "HCON reports a reserved host data width!\n" 3076 "Defaulting to 32-bit access.\n"); 3077 host->push_data = dw_mci_push_data32; 3078 host->pull_data = dw_mci_pull_data32; 3079 width = 32; 3080 host->data_shift = 2; 3081 } 3082 3083 /* Reset all blocks */ 3084 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 3085 ret = -ENODEV; 3086 goto err_clk_ciu; 3087 } 3088 3089 host->dma_ops = host->pdata->dma_ops; 3090 dw_mci_init_dma(host); 3091 3092 /* Clear the interrupts for the host controller */ 3093 mci_writel(host, RINTSTS, 0xFFFFFFFF); 3094 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3095 3096 /* Put in max timeout */ 3097 mci_writel(host, TMOUT, 0xFFFFFFFF); 3098 3099 /* 3100 * FIFO threshold settings RxMark = fifo_size / 2 - 1, 3101 * Tx Mark = fifo_size / 2 DMA Size = 8 3102 */ 3103 if (!host->pdata->fifo_depth) { 3104 /* 3105 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may 3106 * have been overwritten by the bootloader, just like we're 3107 * about to do, so if you know the value for your hardware, you 3108 * should put it in the platform data. 3109 */ 3110 fifo_size = mci_readl(host, FIFOTH); 3111 fifo_size = 1 + ((fifo_size >> 16) & 0xfff); 3112 } else { 3113 fifo_size = host->pdata->fifo_depth; 3114 } 3115 host->fifo_depth = fifo_size; 3116 host->fifoth_val = 3117 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2); 3118 mci_writel(host, FIFOTH, host->fifoth_val); 3119 3120 /* disable clock to CIU */ 3121 mci_writel(host, CLKENA, 0); 3122 mci_writel(host, CLKSRC, 0); 3123 3124 /* 3125 * In 2.40a spec, Data offset is changed. 3126 * Need to check the version-id and set data-offset for DATA register. 3127 */ 3128 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); 3129 dev_info(host->dev, "Version ID is %04x\n", host->verid); 3130 3131 if (host->verid < DW_MMC_240A) 3132 host->fifo_reg = host->regs + DATA_OFFSET; 3133 else 3134 host->fifo_reg = host->regs + DATA_240A_OFFSET; 3135 3136 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); 3137 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt, 3138 host->irq_flags, "dw-mci", host); 3139 if (ret) 3140 goto err_dmaunmap; 3141 3142 if (host->pdata->num_slots) 3143 host->num_slots = host->pdata->num_slots; 3144 else 3145 host->num_slots = 1; 3146 3147 if (host->num_slots < 1 || 3148 host->num_slots > SDMMC_GET_SLOT_NUM(mci_readl(host, HCON))) { 3149 dev_err(host->dev, 3150 "Platform data must supply correct num_slots.\n"); 3151 ret = -ENODEV; 3152 goto err_clk_ciu; 3153 } 3154 3155 /* 3156 * Enable interrupts for command done, data over, data empty, 3157 * receive ready and error such as transmit, receive timeout, crc error 3158 */ 3159 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 3160 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3161 DW_MCI_ERROR_FLAGS); 3162 /* Enable mci interrupt */ 3163 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 3164 3165 dev_info(host->dev, 3166 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n", 3167 host->irq, width, fifo_size); 3168 3169 /* We need at least one slot to succeed */ 3170 for (i = 0; i < host->num_slots; i++) { 3171 ret = dw_mci_init_slot(host, i); 3172 if (ret) 3173 dev_dbg(host->dev, "slot %d init failed\n", i); 3174 else 3175 init_slots++; 3176 } 3177 3178 if (init_slots) { 3179 dev_info(host->dev, "%d slots initialized\n", init_slots); 3180 } else { 3181 dev_dbg(host->dev, 3182 "attempted to initialize %d slots, but failed on all\n", 3183 host->num_slots); 3184 goto err_dmaunmap; 3185 } 3186 3187 /* Now that slots are all setup, we can enable card detect */ 3188 dw_mci_enable_cd(host); 3189 3190 return 0; 3191 3192 err_dmaunmap: 3193 if (host->use_dma && host->dma_ops->exit) 3194 host->dma_ops->exit(host); 3195 3196 err_clk_ciu: 3197 if (!IS_ERR(host->ciu_clk)) 3198 clk_disable_unprepare(host->ciu_clk); 3199 3200 err_clk_biu: 3201 if (!IS_ERR(host->biu_clk)) 3202 clk_disable_unprepare(host->biu_clk); 3203 3204 return ret; 3205 } 3206 EXPORT_SYMBOL(dw_mci_probe); 3207 3208 void dw_mci_remove(struct dw_mci *host) 3209 { 3210 int i; 3211 3212 for (i = 0; i < host->num_slots; i++) { 3213 dev_dbg(host->dev, "remove slot %d\n", i); 3214 if (host->slot[i]) 3215 dw_mci_cleanup_slot(host->slot[i], i); 3216 } 3217 3218 mci_writel(host, RINTSTS, 0xFFFFFFFF); 3219 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3220 3221 /* disable clock to CIU */ 3222 mci_writel(host, CLKENA, 0); 3223 mci_writel(host, CLKSRC, 0); 3224 3225 if (host->use_dma && host->dma_ops->exit) 3226 host->dma_ops->exit(host); 3227 3228 if (!IS_ERR(host->ciu_clk)) 3229 clk_disable_unprepare(host->ciu_clk); 3230 3231 if (!IS_ERR(host->biu_clk)) 3232 clk_disable_unprepare(host->biu_clk); 3233 } 3234 EXPORT_SYMBOL(dw_mci_remove); 3235 3236 3237 3238 #ifdef CONFIG_PM_SLEEP 3239 /* 3240 * TODO: we should probably disable the clock to the card in the suspend path. 3241 */ 3242 int dw_mci_suspend(struct dw_mci *host) 3243 { 3244 if (host->use_dma && host->dma_ops->exit) 3245 host->dma_ops->exit(host); 3246 3247 return 0; 3248 } 3249 EXPORT_SYMBOL(dw_mci_suspend); 3250 3251 int dw_mci_resume(struct dw_mci *host) 3252 { 3253 int i, ret; 3254 3255 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 3256 ret = -ENODEV; 3257 return ret; 3258 } 3259 3260 if (host->use_dma && host->dma_ops->init) 3261 host->dma_ops->init(host); 3262 3263 /* 3264 * Restore the initial value at FIFOTH register 3265 * And Invalidate the prev_blksz with zero 3266 */ 3267 mci_writel(host, FIFOTH, host->fifoth_val); 3268 host->prev_blksz = 0; 3269 3270 /* Put in max timeout */ 3271 mci_writel(host, TMOUT, 0xFFFFFFFF); 3272 3273 mci_writel(host, RINTSTS, 0xFFFFFFFF); 3274 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 3275 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3276 DW_MCI_ERROR_FLAGS); 3277 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 3278 3279 for (i = 0; i < host->num_slots; i++) { 3280 struct dw_mci_slot *slot = host->slot[i]; 3281 3282 if (!slot) 3283 continue; 3284 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) { 3285 dw_mci_set_ios(slot->mmc, &slot->mmc->ios); 3286 dw_mci_setup_bus(slot, true); 3287 } 3288 } 3289 3290 /* Now that slots are all setup, we can enable card detect */ 3291 dw_mci_enable_cd(host); 3292 3293 return 0; 3294 } 3295 EXPORT_SYMBOL(dw_mci_resume); 3296 #endif /* CONFIG_PM_SLEEP */ 3297 3298 static int __init dw_mci_init(void) 3299 { 3300 pr_info("Synopsys Designware Multimedia Card Interface Driver\n"); 3301 return 0; 3302 } 3303 3304 static void __exit dw_mci_exit(void) 3305 { 3306 } 3307 3308 module_init(dw_mci_init); 3309 module_exit(dw_mci_exit); 3310 3311 MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); 3312 MODULE_AUTHOR("NXP Semiconductor VietNam"); 3313 MODULE_AUTHOR("Imagination Technologies Ltd"); 3314 MODULE_LICENSE("GPL v2"); 3315