1 /* 2 * Synopsys DesignWare Multimedia Card Interface driver 3 * (Based on NXP driver for lpc 31xx) 4 * 5 * Copyright (C) 2009 NXP Semiconductors 6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 */ 13 14 #include <linux/blkdev.h> 15 #include <linux/clk.h> 16 #include <linux/debugfs.h> 17 #include <linux/device.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/err.h> 20 #include <linux/init.h> 21 #include <linux/interrupt.h> 22 #include <linux/ioport.h> 23 #include <linux/module.h> 24 #include <linux/platform_device.h> 25 #include <linux/seq_file.h> 26 #include <linux/slab.h> 27 #include <linux/stat.h> 28 #include <linux/delay.h> 29 #include <linux/irq.h> 30 #include <linux/mmc/card.h> 31 #include <linux/mmc/host.h> 32 #include <linux/mmc/mmc.h> 33 #include <linux/mmc/sd.h> 34 #include <linux/mmc/sdio.h> 35 #include <linux/mmc/dw_mmc.h> 36 #include <linux/bitops.h> 37 #include <linux/regulator/consumer.h> 38 #include <linux/of.h> 39 #include <linux/of_gpio.h> 40 #include <linux/mmc/slot-gpio.h> 41 42 #include "dw_mmc.h" 43 44 /* Common flag combinations */ 45 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \ 46 SDMMC_INT_HTO | SDMMC_INT_SBE | \ 47 SDMMC_INT_EBE | SDMMC_INT_HLE) 48 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ 49 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE) 50 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \ 51 DW_MCI_CMD_ERROR_FLAGS) 52 #define DW_MCI_SEND_STATUS 1 53 #define DW_MCI_RECV_STATUS 2 54 #define DW_MCI_DMA_THRESHOLD 16 55 56 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */ 57 #define DW_MCI_FREQ_MIN 400000 /* unit: HZ */ 58 59 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ 60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ 61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ 62 SDMMC_IDMAC_INT_TI) 63 64 struct idmac_desc_64addr { 65 u32 des0; /* Control Descriptor */ 66 67 u32 des1; /* Reserved */ 68 69 u32 des2; /*Buffer sizes */ 70 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \ 71 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \ 72 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff))) 73 74 u32 des3; /* Reserved */ 75 76 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/ 77 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/ 78 79 u32 des6; /* Lower 32-bits of Next Descriptor Address */ 80 u32 des7; /* Upper 32-bits of Next Descriptor Address */ 81 }; 82 83 struct idmac_desc { 84 __le32 des0; /* Control Descriptor */ 85 #define IDMAC_DES0_DIC BIT(1) 86 #define IDMAC_DES0_LD BIT(2) 87 #define IDMAC_DES0_FD BIT(3) 88 #define IDMAC_DES0_CH BIT(4) 89 #define IDMAC_DES0_ER BIT(5) 90 #define IDMAC_DES0_CES BIT(30) 91 #define IDMAC_DES0_OWN BIT(31) 92 93 __le32 des1; /* Buffer sizes */ 94 #define IDMAC_SET_BUFFER1_SIZE(d, s) \ 95 ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff))) 96 97 __le32 des2; /* buffer 1 physical address */ 98 99 __le32 des3; /* buffer 2 physical address */ 100 }; 101 102 /* Each descriptor can transfer up to 4KB of data in chained mode */ 103 #define DW_MCI_DESC_DATA_LENGTH 0x1000 104 105 static bool dw_mci_reset(struct dw_mci *host); 106 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset); 107 static int dw_mci_card_busy(struct mmc_host *mmc); 108 static int dw_mci_get_cd(struct mmc_host *mmc); 109 110 #if defined(CONFIG_DEBUG_FS) 111 static int dw_mci_req_show(struct seq_file *s, void *v) 112 { 113 struct dw_mci_slot *slot = s->private; 114 struct mmc_request *mrq; 115 struct mmc_command *cmd; 116 struct mmc_command *stop; 117 struct mmc_data *data; 118 119 /* Make sure we get a consistent snapshot */ 120 spin_lock_bh(&slot->host->lock); 121 mrq = slot->mrq; 122 123 if (mrq) { 124 cmd = mrq->cmd; 125 data = mrq->data; 126 stop = mrq->stop; 127 128 if (cmd) 129 seq_printf(s, 130 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 131 cmd->opcode, cmd->arg, cmd->flags, 132 cmd->resp[0], cmd->resp[1], cmd->resp[2], 133 cmd->resp[2], cmd->error); 134 if (data) 135 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", 136 data->bytes_xfered, data->blocks, 137 data->blksz, data->flags, data->error); 138 if (stop) 139 seq_printf(s, 140 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 141 stop->opcode, stop->arg, stop->flags, 142 stop->resp[0], stop->resp[1], stop->resp[2], 143 stop->resp[2], stop->error); 144 } 145 146 spin_unlock_bh(&slot->host->lock); 147 148 return 0; 149 } 150 151 static int dw_mci_req_open(struct inode *inode, struct file *file) 152 { 153 return single_open(file, dw_mci_req_show, inode->i_private); 154 } 155 156 static const struct file_operations dw_mci_req_fops = { 157 .owner = THIS_MODULE, 158 .open = dw_mci_req_open, 159 .read = seq_read, 160 .llseek = seq_lseek, 161 .release = single_release, 162 }; 163 164 static int dw_mci_regs_show(struct seq_file *s, void *v) 165 { 166 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS); 167 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS); 168 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD); 169 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL); 170 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK); 171 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA); 172 173 return 0; 174 } 175 176 static int dw_mci_regs_open(struct inode *inode, struct file *file) 177 { 178 return single_open(file, dw_mci_regs_show, inode->i_private); 179 } 180 181 static const struct file_operations dw_mci_regs_fops = { 182 .owner = THIS_MODULE, 183 .open = dw_mci_regs_open, 184 .read = seq_read, 185 .llseek = seq_lseek, 186 .release = single_release, 187 }; 188 189 static void dw_mci_init_debugfs(struct dw_mci_slot *slot) 190 { 191 struct mmc_host *mmc = slot->mmc; 192 struct dw_mci *host = slot->host; 193 struct dentry *root; 194 struct dentry *node; 195 196 root = mmc->debugfs_root; 197 if (!root) 198 return; 199 200 node = debugfs_create_file("regs", S_IRUSR, root, host, 201 &dw_mci_regs_fops); 202 if (!node) 203 goto err; 204 205 node = debugfs_create_file("req", S_IRUSR, root, slot, 206 &dw_mci_req_fops); 207 if (!node) 208 goto err; 209 210 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); 211 if (!node) 212 goto err; 213 214 node = debugfs_create_x32("pending_events", S_IRUSR, root, 215 (u32 *)&host->pending_events); 216 if (!node) 217 goto err; 218 219 node = debugfs_create_x32("completed_events", S_IRUSR, root, 220 (u32 *)&host->completed_events); 221 if (!node) 222 goto err; 223 224 return; 225 226 err: 227 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); 228 } 229 #endif /* defined(CONFIG_DEBUG_FS) */ 230 231 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg); 232 233 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) 234 { 235 struct mmc_data *data; 236 struct dw_mci_slot *slot = mmc_priv(mmc); 237 struct dw_mci *host = slot->host; 238 u32 cmdr; 239 240 cmd->error = -EINPROGRESS; 241 cmdr = cmd->opcode; 242 243 if (cmd->opcode == MMC_STOP_TRANSMISSION || 244 cmd->opcode == MMC_GO_IDLE_STATE || 245 cmd->opcode == MMC_GO_INACTIVE_STATE || 246 (cmd->opcode == SD_IO_RW_DIRECT && 247 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) 248 cmdr |= SDMMC_CMD_STOP; 249 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data) 250 cmdr |= SDMMC_CMD_PRV_DAT_WAIT; 251 252 if (cmd->opcode == SD_SWITCH_VOLTAGE) { 253 u32 clk_en_a; 254 255 /* Special bit makes CMD11 not die */ 256 cmdr |= SDMMC_CMD_VOLT_SWITCH; 257 258 /* Change state to continue to handle CMD11 weirdness */ 259 WARN_ON(slot->host->state != STATE_SENDING_CMD); 260 slot->host->state = STATE_SENDING_CMD11; 261 262 /* 263 * We need to disable low power mode (automatic clock stop) 264 * while doing voltage switch so we don't confuse the card, 265 * since stopping the clock is a specific part of the UHS 266 * voltage change dance. 267 * 268 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be 269 * unconditionally turned back on in dw_mci_setup_bus() if it's 270 * ever called with a non-zero clock. That shouldn't happen 271 * until the voltage change is all done. 272 */ 273 clk_en_a = mci_readl(host, CLKENA); 274 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id); 275 mci_writel(host, CLKENA, clk_en_a); 276 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 277 SDMMC_CMD_PRV_DAT_WAIT, 0); 278 } 279 280 if (cmd->flags & MMC_RSP_PRESENT) { 281 /* We expect a response, so set this bit */ 282 cmdr |= SDMMC_CMD_RESP_EXP; 283 if (cmd->flags & MMC_RSP_136) 284 cmdr |= SDMMC_CMD_RESP_LONG; 285 } 286 287 if (cmd->flags & MMC_RSP_CRC) 288 cmdr |= SDMMC_CMD_RESP_CRC; 289 290 data = cmd->data; 291 if (data) { 292 cmdr |= SDMMC_CMD_DAT_EXP; 293 if (data->flags & MMC_DATA_WRITE) 294 cmdr |= SDMMC_CMD_DAT_WR; 295 } 296 297 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags)) 298 cmdr |= SDMMC_CMD_USE_HOLD_REG; 299 300 return cmdr; 301 } 302 303 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) 304 { 305 struct mmc_command *stop; 306 u32 cmdr; 307 308 if (!cmd->data) 309 return 0; 310 311 stop = &host->stop_abort; 312 cmdr = cmd->opcode; 313 memset(stop, 0, sizeof(struct mmc_command)); 314 315 if (cmdr == MMC_READ_SINGLE_BLOCK || 316 cmdr == MMC_READ_MULTIPLE_BLOCK || 317 cmdr == MMC_WRITE_BLOCK || 318 cmdr == MMC_WRITE_MULTIPLE_BLOCK || 319 cmdr == MMC_SEND_TUNING_BLOCK || 320 cmdr == MMC_SEND_TUNING_BLOCK_HS200) { 321 stop->opcode = MMC_STOP_TRANSMISSION; 322 stop->arg = 0; 323 stop->flags = MMC_RSP_R1B | MMC_CMD_AC; 324 } else if (cmdr == SD_IO_RW_EXTENDED) { 325 stop->opcode = SD_IO_RW_DIRECT; 326 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) | 327 ((cmd->arg >> 28) & 0x7); 328 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC; 329 } else { 330 return 0; 331 } 332 333 cmdr = stop->opcode | SDMMC_CMD_STOP | 334 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP; 335 336 return cmdr; 337 } 338 339 static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags) 340 { 341 unsigned long timeout = jiffies + msecs_to_jiffies(500); 342 343 /* 344 * Databook says that before issuing a new data transfer command 345 * we need to check to see if the card is busy. Data transfer commands 346 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that. 347 * 348 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is 349 * expected. 350 */ 351 if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) && 352 !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) { 353 while (mci_readl(host, STATUS) & SDMMC_STATUS_BUSY) { 354 if (time_after(jiffies, timeout)) { 355 /* Command will fail; we'll pass error then */ 356 dev_err(host->dev, "Busy; trying anyway\n"); 357 break; 358 } 359 udelay(10); 360 } 361 } 362 } 363 364 static void dw_mci_start_command(struct dw_mci *host, 365 struct mmc_command *cmd, u32 cmd_flags) 366 { 367 host->cmd = cmd; 368 dev_vdbg(host->dev, 369 "start command: ARGR=0x%08x CMDR=0x%08x\n", 370 cmd->arg, cmd_flags); 371 372 mci_writel(host, CMDARG, cmd->arg); 373 wmb(); /* drain writebuffer */ 374 dw_mci_wait_while_busy(host, cmd_flags); 375 376 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); 377 } 378 379 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) 380 { 381 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort; 382 383 dw_mci_start_command(host, stop, host->stop_cmdr); 384 } 385 386 /* DMA interface functions */ 387 static void dw_mci_stop_dma(struct dw_mci *host) 388 { 389 if (host->using_dma) { 390 host->dma_ops->stop(host); 391 host->dma_ops->cleanup(host); 392 } 393 394 /* Data transfer was stopped by the interrupt handler */ 395 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 396 } 397 398 static int dw_mci_get_dma_dir(struct mmc_data *data) 399 { 400 if (data->flags & MMC_DATA_WRITE) 401 return DMA_TO_DEVICE; 402 else 403 return DMA_FROM_DEVICE; 404 } 405 406 static void dw_mci_dma_cleanup(struct dw_mci *host) 407 { 408 struct mmc_data *data = host->data; 409 410 if (data) 411 if (!data->host_cookie) 412 dma_unmap_sg(host->dev, 413 data->sg, 414 data->sg_len, 415 dw_mci_get_dma_dir(data)); 416 } 417 418 static void dw_mci_idmac_reset(struct dw_mci *host) 419 { 420 u32 bmod = mci_readl(host, BMOD); 421 /* Software reset of DMA */ 422 bmod |= SDMMC_IDMAC_SWRESET; 423 mci_writel(host, BMOD, bmod); 424 } 425 426 static void dw_mci_idmac_stop_dma(struct dw_mci *host) 427 { 428 u32 temp; 429 430 /* Disable and reset the IDMAC interface */ 431 temp = mci_readl(host, CTRL); 432 temp &= ~SDMMC_CTRL_USE_IDMAC; 433 temp |= SDMMC_CTRL_DMA_RESET; 434 mci_writel(host, CTRL, temp); 435 436 /* Stop the IDMAC running */ 437 temp = mci_readl(host, BMOD); 438 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); 439 temp |= SDMMC_IDMAC_SWRESET; 440 mci_writel(host, BMOD, temp); 441 } 442 443 static void dw_mci_dmac_complete_dma(void *arg) 444 { 445 struct dw_mci *host = arg; 446 struct mmc_data *data = host->data; 447 448 dev_vdbg(host->dev, "DMA complete\n"); 449 450 if ((host->use_dma == TRANS_MODE_EDMAC) && 451 data && (data->flags & MMC_DATA_READ)) 452 /* Invalidate cache after read */ 453 dma_sync_sg_for_cpu(mmc_dev(host->cur_slot->mmc), 454 data->sg, 455 data->sg_len, 456 DMA_FROM_DEVICE); 457 458 host->dma_ops->cleanup(host); 459 460 /* 461 * If the card was removed, data will be NULL. No point in trying to 462 * send the stop command or waiting for NBUSY in this case. 463 */ 464 if (data) { 465 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 466 tasklet_schedule(&host->tasklet); 467 } 468 } 469 470 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data, 471 unsigned int sg_len) 472 { 473 unsigned int desc_len; 474 int i; 475 476 if (host->dma_64bit_address == 1) { 477 struct idmac_desc_64addr *desc_first, *desc_last, *desc; 478 479 desc_first = desc_last = desc = host->sg_cpu; 480 481 for (i = 0; i < sg_len; i++) { 482 unsigned int length = sg_dma_len(&data->sg[i]); 483 484 u64 mem_addr = sg_dma_address(&data->sg[i]); 485 486 for ( ; length ; desc++) { 487 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 488 length : DW_MCI_DESC_DATA_LENGTH; 489 490 length -= desc_len; 491 492 /* 493 * Set the OWN bit and disable interrupts 494 * for this descriptor 495 */ 496 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | 497 IDMAC_DES0_CH; 498 499 /* Buffer length */ 500 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len); 501 502 /* Physical address to DMA to/from */ 503 desc->des4 = mem_addr & 0xffffffff; 504 desc->des5 = mem_addr >> 32; 505 506 /* Update physical address for the next desc */ 507 mem_addr += desc_len; 508 509 /* Save pointer to the last descriptor */ 510 desc_last = desc; 511 } 512 } 513 514 /* Set first descriptor */ 515 desc_first->des0 |= IDMAC_DES0_FD; 516 517 /* Set last descriptor */ 518 desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); 519 desc_last->des0 |= IDMAC_DES0_LD; 520 521 } else { 522 struct idmac_desc *desc_first, *desc_last, *desc; 523 524 desc_first = desc_last = desc = host->sg_cpu; 525 526 for (i = 0; i < sg_len; i++) { 527 unsigned int length = sg_dma_len(&data->sg[i]); 528 529 u32 mem_addr = sg_dma_address(&data->sg[i]); 530 531 for ( ; length ; desc++) { 532 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 533 length : DW_MCI_DESC_DATA_LENGTH; 534 535 length -= desc_len; 536 537 /* 538 * Set the OWN bit and disable interrupts 539 * for this descriptor 540 */ 541 desc->des0 = cpu_to_le32(IDMAC_DES0_OWN | 542 IDMAC_DES0_DIC | 543 IDMAC_DES0_CH); 544 545 /* Buffer length */ 546 IDMAC_SET_BUFFER1_SIZE(desc, desc_len); 547 548 /* Physical address to DMA to/from */ 549 desc->des2 = cpu_to_le32(mem_addr); 550 551 /* Update physical address for the next desc */ 552 mem_addr += desc_len; 553 554 /* Save pointer to the last descriptor */ 555 desc_last = desc; 556 } 557 } 558 559 /* Set first descriptor */ 560 desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD); 561 562 /* Set last descriptor */ 563 desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | 564 IDMAC_DES0_DIC)); 565 desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD); 566 } 567 568 wmb(); /* drain writebuffer */ 569 } 570 571 static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) 572 { 573 u32 temp; 574 575 dw_mci_translate_sglist(host, host->data, sg_len); 576 577 /* Make sure to reset DMA in case we did PIO before this */ 578 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET); 579 dw_mci_idmac_reset(host); 580 581 /* Select IDMAC interface */ 582 temp = mci_readl(host, CTRL); 583 temp |= SDMMC_CTRL_USE_IDMAC; 584 mci_writel(host, CTRL, temp); 585 586 /* drain writebuffer */ 587 wmb(); 588 589 /* Enable the IDMAC */ 590 temp = mci_readl(host, BMOD); 591 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; 592 mci_writel(host, BMOD, temp); 593 594 /* Start it running */ 595 mci_writel(host, PLDMND, 1); 596 597 return 0; 598 } 599 600 static int dw_mci_idmac_init(struct dw_mci *host) 601 { 602 int i; 603 604 if (host->dma_64bit_address == 1) { 605 struct idmac_desc_64addr *p; 606 /* Number of descriptors in the ring buffer */ 607 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc_64addr); 608 609 /* Forward link the descriptor list */ 610 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; 611 i++, p++) { 612 p->des6 = (host->sg_dma + 613 (sizeof(struct idmac_desc_64addr) * 614 (i + 1))) & 0xffffffff; 615 616 p->des7 = (u64)(host->sg_dma + 617 (sizeof(struct idmac_desc_64addr) * 618 (i + 1))) >> 32; 619 /* Initialize reserved and buffer size fields to "0" */ 620 p->des1 = 0; 621 p->des2 = 0; 622 p->des3 = 0; 623 } 624 625 /* Set the last descriptor as the end-of-ring descriptor */ 626 p->des6 = host->sg_dma & 0xffffffff; 627 p->des7 = (u64)host->sg_dma >> 32; 628 p->des0 = IDMAC_DES0_ER; 629 630 } else { 631 struct idmac_desc *p; 632 /* Number of descriptors in the ring buffer */ 633 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); 634 635 /* Forward link the descriptor list */ 636 for (i = 0, p = host->sg_cpu; 637 i < host->ring_size - 1; 638 i++, p++) { 639 p->des3 = cpu_to_le32(host->sg_dma + 640 (sizeof(struct idmac_desc) * (i + 1))); 641 p->des1 = 0; 642 } 643 644 /* Set the last descriptor as the end-of-ring descriptor */ 645 p->des3 = cpu_to_le32(host->sg_dma); 646 p->des0 = cpu_to_le32(IDMAC_DES0_ER); 647 } 648 649 dw_mci_idmac_reset(host); 650 651 if (host->dma_64bit_address == 1) { 652 /* Mask out interrupts - get Tx & Rx complete only */ 653 mci_writel(host, IDSTS64, IDMAC_INT_CLR); 654 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI | 655 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 656 657 /* Set the descriptor base address */ 658 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff); 659 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32); 660 661 } else { 662 /* Mask out interrupts - get Tx & Rx complete only */ 663 mci_writel(host, IDSTS, IDMAC_INT_CLR); 664 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | 665 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 666 667 /* Set the descriptor base address */ 668 mci_writel(host, DBADDR, host->sg_dma); 669 } 670 671 return 0; 672 } 673 674 static const struct dw_mci_dma_ops dw_mci_idmac_ops = { 675 .init = dw_mci_idmac_init, 676 .start = dw_mci_idmac_start_dma, 677 .stop = dw_mci_idmac_stop_dma, 678 .complete = dw_mci_dmac_complete_dma, 679 .cleanup = dw_mci_dma_cleanup, 680 }; 681 682 static void dw_mci_edmac_stop_dma(struct dw_mci *host) 683 { 684 dmaengine_terminate_async(host->dms->ch); 685 } 686 687 static int dw_mci_edmac_start_dma(struct dw_mci *host, 688 unsigned int sg_len) 689 { 690 struct dma_slave_config cfg; 691 struct dma_async_tx_descriptor *desc = NULL; 692 struct scatterlist *sgl = host->data->sg; 693 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 694 u32 sg_elems = host->data->sg_len; 695 u32 fifoth_val; 696 u32 fifo_offset = host->fifo_reg - host->regs; 697 int ret = 0; 698 699 /* Set external dma config: burst size, burst width */ 700 cfg.dst_addr = host->phy_regs + fifo_offset; 701 cfg.src_addr = cfg.dst_addr; 702 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 703 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 704 705 /* Match burst msize with external dma config */ 706 fifoth_val = mci_readl(host, FIFOTH); 707 cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7]; 708 cfg.src_maxburst = cfg.dst_maxburst; 709 710 if (host->data->flags & MMC_DATA_WRITE) 711 cfg.direction = DMA_MEM_TO_DEV; 712 else 713 cfg.direction = DMA_DEV_TO_MEM; 714 715 ret = dmaengine_slave_config(host->dms->ch, &cfg); 716 if (ret) { 717 dev_err(host->dev, "Failed to config edmac.\n"); 718 return -EBUSY; 719 } 720 721 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, 722 sg_len, cfg.direction, 723 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 724 if (!desc) { 725 dev_err(host->dev, "Can't prepare slave sg.\n"); 726 return -EBUSY; 727 } 728 729 /* Set dw_mci_dmac_complete_dma as callback */ 730 desc->callback = dw_mci_dmac_complete_dma; 731 desc->callback_param = (void *)host; 732 dmaengine_submit(desc); 733 734 /* Flush cache before write */ 735 if (host->data->flags & MMC_DATA_WRITE) 736 dma_sync_sg_for_device(mmc_dev(host->cur_slot->mmc), sgl, 737 sg_elems, DMA_TO_DEVICE); 738 739 dma_async_issue_pending(host->dms->ch); 740 741 return 0; 742 } 743 744 static int dw_mci_edmac_init(struct dw_mci *host) 745 { 746 /* Request external dma channel */ 747 host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL); 748 if (!host->dms) 749 return -ENOMEM; 750 751 host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx"); 752 if (!host->dms->ch) { 753 dev_err(host->dev, "Failed to get external DMA channel.\n"); 754 kfree(host->dms); 755 host->dms = NULL; 756 return -ENXIO; 757 } 758 759 return 0; 760 } 761 762 static void dw_mci_edmac_exit(struct dw_mci *host) 763 { 764 if (host->dms) { 765 if (host->dms->ch) { 766 dma_release_channel(host->dms->ch); 767 host->dms->ch = NULL; 768 } 769 kfree(host->dms); 770 host->dms = NULL; 771 } 772 } 773 774 static const struct dw_mci_dma_ops dw_mci_edmac_ops = { 775 .init = dw_mci_edmac_init, 776 .exit = dw_mci_edmac_exit, 777 .start = dw_mci_edmac_start_dma, 778 .stop = dw_mci_edmac_stop_dma, 779 .complete = dw_mci_dmac_complete_dma, 780 .cleanup = dw_mci_dma_cleanup, 781 }; 782 783 static int dw_mci_pre_dma_transfer(struct dw_mci *host, 784 struct mmc_data *data, 785 bool next) 786 { 787 struct scatterlist *sg; 788 unsigned int i, sg_len; 789 790 if (!next && data->host_cookie) 791 return data->host_cookie; 792 793 /* 794 * We don't do DMA on "complex" transfers, i.e. with 795 * non-word-aligned buffers or lengths. Also, we don't bother 796 * with all the DMA setup overhead for short transfers. 797 */ 798 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) 799 return -EINVAL; 800 801 if (data->blksz & 3) 802 return -EINVAL; 803 804 for_each_sg(data->sg, sg, data->sg_len, i) { 805 if (sg->offset & 3 || sg->length & 3) 806 return -EINVAL; 807 } 808 809 sg_len = dma_map_sg(host->dev, 810 data->sg, 811 data->sg_len, 812 dw_mci_get_dma_dir(data)); 813 if (sg_len == 0) 814 return -EINVAL; 815 816 if (next) 817 data->host_cookie = sg_len; 818 819 return sg_len; 820 } 821 822 static void dw_mci_pre_req(struct mmc_host *mmc, 823 struct mmc_request *mrq, 824 bool is_first_req) 825 { 826 struct dw_mci_slot *slot = mmc_priv(mmc); 827 struct mmc_data *data = mrq->data; 828 829 if (!slot->host->use_dma || !data) 830 return; 831 832 if (data->host_cookie) { 833 data->host_cookie = 0; 834 return; 835 } 836 837 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0) 838 data->host_cookie = 0; 839 } 840 841 static void dw_mci_post_req(struct mmc_host *mmc, 842 struct mmc_request *mrq, 843 int err) 844 { 845 struct dw_mci_slot *slot = mmc_priv(mmc); 846 struct mmc_data *data = mrq->data; 847 848 if (!slot->host->use_dma || !data) 849 return; 850 851 if (data->host_cookie) 852 dma_unmap_sg(slot->host->dev, 853 data->sg, 854 data->sg_len, 855 dw_mci_get_dma_dir(data)); 856 data->host_cookie = 0; 857 } 858 859 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) 860 { 861 unsigned int blksz = data->blksz; 862 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 863 u32 fifo_width = 1 << host->data_shift; 864 u32 blksz_depth = blksz / fifo_width, fifoth_val; 865 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; 866 int idx = ARRAY_SIZE(mszs) - 1; 867 868 /* pio should ship this scenario */ 869 if (!host->use_dma) 870 return; 871 872 tx_wmark = (host->fifo_depth) / 2; 873 tx_wmark_invers = host->fifo_depth - tx_wmark; 874 875 /* 876 * MSIZE is '1', 877 * if blksz is not a multiple of the FIFO width 878 */ 879 if (blksz % fifo_width) { 880 msize = 0; 881 rx_wmark = 1; 882 goto done; 883 } 884 885 do { 886 if (!((blksz_depth % mszs[idx]) || 887 (tx_wmark_invers % mszs[idx]))) { 888 msize = idx; 889 rx_wmark = mszs[idx] - 1; 890 break; 891 } 892 } while (--idx > 0); 893 /* 894 * If idx is '0', it won't be tried 895 * Thus, initial values are uesed 896 */ 897 done: 898 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); 899 mci_writel(host, FIFOTH, fifoth_val); 900 } 901 902 static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data) 903 { 904 unsigned int blksz = data->blksz; 905 u32 blksz_depth, fifo_depth; 906 u16 thld_size; 907 u8 enable; 908 909 /* 910 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is 911 * in the FIFO region, so we really shouldn't access it). 912 */ 913 if (host->verid < DW_MMC_240A || 914 (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE)) 915 return; 916 917 /* 918 * Card write Threshold is introduced since 2.80a 919 * It's used when HS400 mode is enabled. 920 */ 921 if (data->flags & MMC_DATA_WRITE && 922 !(host->timing != MMC_TIMING_MMC_HS400)) 923 return; 924 925 if (data->flags & MMC_DATA_WRITE) 926 enable = SDMMC_CARD_WR_THR_EN; 927 else 928 enable = SDMMC_CARD_RD_THR_EN; 929 930 if (host->timing != MMC_TIMING_MMC_HS200 && 931 host->timing != MMC_TIMING_UHS_SDR104) 932 goto disable; 933 934 blksz_depth = blksz / (1 << host->data_shift); 935 fifo_depth = host->fifo_depth; 936 937 if (blksz_depth > fifo_depth) 938 goto disable; 939 940 /* 941 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz' 942 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz 943 * Currently just choose blksz. 944 */ 945 thld_size = blksz; 946 mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable)); 947 return; 948 949 disable: 950 mci_writel(host, CDTHRCTL, 0); 951 } 952 953 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) 954 { 955 unsigned long irqflags; 956 int sg_len; 957 u32 temp; 958 959 host->using_dma = 0; 960 961 /* If we don't have a channel, we can't do DMA */ 962 if (!host->use_dma) 963 return -ENODEV; 964 965 sg_len = dw_mci_pre_dma_transfer(host, data, 0); 966 if (sg_len < 0) { 967 host->dma_ops->stop(host); 968 return sg_len; 969 } 970 971 host->using_dma = 1; 972 973 if (host->use_dma == TRANS_MODE_IDMAC) 974 dev_vdbg(host->dev, 975 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 976 (unsigned long)host->sg_cpu, 977 (unsigned long)host->sg_dma, 978 sg_len); 979 980 /* 981 * Decide the MSIZE and RX/TX Watermark. 982 * If current block size is same with previous size, 983 * no need to update fifoth. 984 */ 985 if (host->prev_blksz != data->blksz) 986 dw_mci_adjust_fifoth(host, data); 987 988 /* Enable the DMA interface */ 989 temp = mci_readl(host, CTRL); 990 temp |= SDMMC_CTRL_DMA_ENABLE; 991 mci_writel(host, CTRL, temp); 992 993 /* Disable RX/TX IRQs, let DMA handle it */ 994 spin_lock_irqsave(&host->irq_lock, irqflags); 995 temp = mci_readl(host, INTMASK); 996 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); 997 mci_writel(host, INTMASK, temp); 998 spin_unlock_irqrestore(&host->irq_lock, irqflags); 999 1000 if (host->dma_ops->start(host, sg_len)) { 1001 /* We can't do DMA */ 1002 dev_err(host->dev, "%s: failed to start DMA.\n", __func__); 1003 return -ENODEV; 1004 } 1005 1006 return 0; 1007 } 1008 1009 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) 1010 { 1011 unsigned long irqflags; 1012 int flags = SG_MITER_ATOMIC; 1013 u32 temp; 1014 1015 data->error = -EINPROGRESS; 1016 1017 WARN_ON(host->data); 1018 host->sg = NULL; 1019 host->data = data; 1020 1021 if (data->flags & MMC_DATA_READ) 1022 host->dir_status = DW_MCI_RECV_STATUS; 1023 else 1024 host->dir_status = DW_MCI_SEND_STATUS; 1025 1026 dw_mci_ctrl_thld(host, data); 1027 1028 if (dw_mci_submit_data_dma(host, data)) { 1029 if (host->data->flags & MMC_DATA_READ) 1030 flags |= SG_MITER_TO_SG; 1031 else 1032 flags |= SG_MITER_FROM_SG; 1033 1034 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1035 host->sg = data->sg; 1036 host->part_buf_start = 0; 1037 host->part_buf_count = 0; 1038 1039 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); 1040 1041 spin_lock_irqsave(&host->irq_lock, irqflags); 1042 temp = mci_readl(host, INTMASK); 1043 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; 1044 mci_writel(host, INTMASK, temp); 1045 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1046 1047 temp = mci_readl(host, CTRL); 1048 temp &= ~SDMMC_CTRL_DMA_ENABLE; 1049 mci_writel(host, CTRL, temp); 1050 1051 /* 1052 * Use the initial fifoth_val for PIO mode. 1053 * If next issued data may be transfered by DMA mode, 1054 * prev_blksz should be invalidated. 1055 */ 1056 mci_writel(host, FIFOTH, host->fifoth_val); 1057 host->prev_blksz = 0; 1058 } else { 1059 /* 1060 * Keep the current block size. 1061 * It will be used to decide whether to update 1062 * fifoth register next time. 1063 */ 1064 host->prev_blksz = data->blksz; 1065 } 1066 } 1067 1068 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) 1069 { 1070 struct dw_mci *host = slot->host; 1071 unsigned long timeout = jiffies + msecs_to_jiffies(500); 1072 unsigned int cmd_status = 0; 1073 1074 mci_writel(host, CMDARG, arg); 1075 wmb(); /* drain writebuffer */ 1076 dw_mci_wait_while_busy(host, cmd); 1077 mci_writel(host, CMD, SDMMC_CMD_START | cmd); 1078 1079 while (time_before(jiffies, timeout)) { 1080 cmd_status = mci_readl(host, CMD); 1081 if (!(cmd_status & SDMMC_CMD_START)) 1082 return; 1083 } 1084 dev_err(&slot->mmc->class_dev, 1085 "Timeout sending command (cmd %#x arg %#x status %#x)\n", 1086 cmd, arg, cmd_status); 1087 } 1088 1089 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) 1090 { 1091 struct dw_mci *host = slot->host; 1092 unsigned int clock = slot->clock; 1093 u32 div; 1094 u32 clk_en_a; 1095 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT; 1096 1097 /* We must continue to set bit 28 in CMD until the change is complete */ 1098 if (host->state == STATE_WAITING_CMD11_DONE) 1099 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH; 1100 1101 if (!clock) { 1102 mci_writel(host, CLKENA, 0); 1103 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1104 } else if (clock != host->current_speed || force_clkinit) { 1105 div = host->bus_hz / clock; 1106 if (host->bus_hz % clock && host->bus_hz > clock) 1107 /* 1108 * move the + 1 after the divide to prevent 1109 * over-clocking the card. 1110 */ 1111 div += 1; 1112 1113 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; 1114 1115 dev_info(&slot->mmc->class_dev, 1116 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", 1117 slot->id, host->bus_hz, clock, 1118 div ? ((host->bus_hz / div) >> 1) : 1119 host->bus_hz, div); 1120 1121 /* disable clock */ 1122 mci_writel(host, CLKENA, 0); 1123 mci_writel(host, CLKSRC, 0); 1124 1125 /* inform CIU */ 1126 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1127 1128 /* set clock to desired speed */ 1129 mci_writel(host, CLKDIV, div); 1130 1131 /* inform CIU */ 1132 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1133 1134 /* enable clock; only low power if no SDIO */ 1135 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; 1136 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) 1137 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; 1138 mci_writel(host, CLKENA, clk_en_a); 1139 1140 /* inform CIU */ 1141 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1142 } 1143 1144 host->current_speed = clock; 1145 1146 /* Set the current slot bus width */ 1147 mci_writel(host, CTYPE, (slot->ctype << slot->id)); 1148 } 1149 1150 static void __dw_mci_start_request(struct dw_mci *host, 1151 struct dw_mci_slot *slot, 1152 struct mmc_command *cmd) 1153 { 1154 struct mmc_request *mrq; 1155 struct mmc_data *data; 1156 u32 cmdflags; 1157 1158 mrq = slot->mrq; 1159 1160 host->cur_slot = slot; 1161 host->mrq = mrq; 1162 1163 host->pending_events = 0; 1164 host->completed_events = 0; 1165 host->cmd_status = 0; 1166 host->data_status = 0; 1167 host->dir_status = 0; 1168 1169 data = cmd->data; 1170 if (data) { 1171 mci_writel(host, TMOUT, 0xFFFFFFFF); 1172 mci_writel(host, BYTCNT, data->blksz*data->blocks); 1173 mci_writel(host, BLKSIZ, data->blksz); 1174 } 1175 1176 cmdflags = dw_mci_prepare_command(slot->mmc, cmd); 1177 1178 /* this is the first command, send the initialization clock */ 1179 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags)) 1180 cmdflags |= SDMMC_CMD_INIT; 1181 1182 if (data) { 1183 dw_mci_submit_data(host, data); 1184 wmb(); /* drain writebuffer */ 1185 } 1186 1187 dw_mci_start_command(host, cmd, cmdflags); 1188 1189 if (cmd->opcode == SD_SWITCH_VOLTAGE) { 1190 unsigned long irqflags; 1191 1192 /* 1193 * Databook says to fail after 2ms w/ no response, but evidence 1194 * shows that sometimes the cmd11 interrupt takes over 130ms. 1195 * We'll set to 500ms, plus an extra jiffy just in case jiffies 1196 * is just about to roll over. 1197 * 1198 * We do this whole thing under spinlock and only if the 1199 * command hasn't already completed (indicating the the irq 1200 * already ran so we don't want the timeout). 1201 */ 1202 spin_lock_irqsave(&host->irq_lock, irqflags); 1203 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 1204 mod_timer(&host->cmd11_timer, 1205 jiffies + msecs_to_jiffies(500) + 1); 1206 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1207 } 1208 1209 if (mrq->stop) 1210 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); 1211 else 1212 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); 1213 } 1214 1215 static void dw_mci_start_request(struct dw_mci *host, 1216 struct dw_mci_slot *slot) 1217 { 1218 struct mmc_request *mrq = slot->mrq; 1219 struct mmc_command *cmd; 1220 1221 cmd = mrq->sbc ? mrq->sbc : mrq->cmd; 1222 __dw_mci_start_request(host, slot, cmd); 1223 } 1224 1225 /* must be called with host->lock held */ 1226 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, 1227 struct mmc_request *mrq) 1228 { 1229 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", 1230 host->state); 1231 1232 slot->mrq = mrq; 1233 1234 if (host->state == STATE_WAITING_CMD11_DONE) { 1235 dev_warn(&slot->mmc->class_dev, 1236 "Voltage change didn't complete\n"); 1237 /* 1238 * this case isn't expected to happen, so we can 1239 * either crash here or just try to continue on 1240 * in the closest possible state 1241 */ 1242 host->state = STATE_IDLE; 1243 } 1244 1245 if (host->state == STATE_IDLE) { 1246 host->state = STATE_SENDING_CMD; 1247 dw_mci_start_request(host, slot); 1248 } else { 1249 list_add_tail(&slot->queue_node, &host->queue); 1250 } 1251 } 1252 1253 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1254 { 1255 struct dw_mci_slot *slot = mmc_priv(mmc); 1256 struct dw_mci *host = slot->host; 1257 1258 WARN_ON(slot->mrq); 1259 1260 /* 1261 * The check for card presence and queueing of the request must be 1262 * atomic, otherwise the card could be removed in between and the 1263 * request wouldn't fail until another card was inserted. 1264 */ 1265 1266 if (!dw_mci_get_cd(mmc)) { 1267 mrq->cmd->error = -ENOMEDIUM; 1268 mmc_request_done(mmc, mrq); 1269 return; 1270 } 1271 1272 spin_lock_bh(&host->lock); 1273 1274 dw_mci_queue_request(host, slot, mrq); 1275 1276 spin_unlock_bh(&host->lock); 1277 } 1278 1279 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1280 { 1281 struct dw_mci_slot *slot = mmc_priv(mmc); 1282 const struct dw_mci_drv_data *drv_data = slot->host->drv_data; 1283 u32 regs; 1284 int ret; 1285 1286 switch (ios->bus_width) { 1287 case MMC_BUS_WIDTH_4: 1288 slot->ctype = SDMMC_CTYPE_4BIT; 1289 break; 1290 case MMC_BUS_WIDTH_8: 1291 slot->ctype = SDMMC_CTYPE_8BIT; 1292 break; 1293 default: 1294 /* set default 1 bit mode */ 1295 slot->ctype = SDMMC_CTYPE_1BIT; 1296 } 1297 1298 regs = mci_readl(slot->host, UHS_REG); 1299 1300 /* DDR mode set */ 1301 if (ios->timing == MMC_TIMING_MMC_DDR52 || 1302 ios->timing == MMC_TIMING_UHS_DDR50 || 1303 ios->timing == MMC_TIMING_MMC_HS400) 1304 regs |= ((0x1 << slot->id) << 16); 1305 else 1306 regs &= ~((0x1 << slot->id) << 16); 1307 1308 mci_writel(slot->host, UHS_REG, regs); 1309 slot->host->timing = ios->timing; 1310 1311 /* 1312 * Use mirror of ios->clock to prevent race with mmc 1313 * core ios update when finding the minimum. 1314 */ 1315 slot->clock = ios->clock; 1316 1317 if (drv_data && drv_data->set_ios) 1318 drv_data->set_ios(slot->host, ios); 1319 1320 switch (ios->power_mode) { 1321 case MMC_POWER_UP: 1322 if (!IS_ERR(mmc->supply.vmmc)) { 1323 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 1324 ios->vdd); 1325 if (ret) { 1326 dev_err(slot->host->dev, 1327 "failed to enable vmmc regulator\n"); 1328 /*return, if failed turn on vmmc*/ 1329 return; 1330 } 1331 } 1332 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); 1333 regs = mci_readl(slot->host, PWREN); 1334 regs |= (1 << slot->id); 1335 mci_writel(slot->host, PWREN, regs); 1336 break; 1337 case MMC_POWER_ON: 1338 if (!slot->host->vqmmc_enabled) { 1339 if (!IS_ERR(mmc->supply.vqmmc)) { 1340 ret = regulator_enable(mmc->supply.vqmmc); 1341 if (ret < 0) 1342 dev_err(slot->host->dev, 1343 "failed to enable vqmmc\n"); 1344 else 1345 slot->host->vqmmc_enabled = true; 1346 1347 } else { 1348 /* Keep track so we don't reset again */ 1349 slot->host->vqmmc_enabled = true; 1350 } 1351 1352 /* Reset our state machine after powering on */ 1353 dw_mci_ctrl_reset(slot->host, 1354 SDMMC_CTRL_ALL_RESET_FLAGS); 1355 } 1356 1357 /* Adjust clock / bus width after power is up */ 1358 dw_mci_setup_bus(slot, false); 1359 1360 break; 1361 case MMC_POWER_OFF: 1362 /* Turn clock off before power goes down */ 1363 dw_mci_setup_bus(slot, false); 1364 1365 if (!IS_ERR(mmc->supply.vmmc)) 1366 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1367 1368 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) 1369 regulator_disable(mmc->supply.vqmmc); 1370 slot->host->vqmmc_enabled = false; 1371 1372 regs = mci_readl(slot->host, PWREN); 1373 regs &= ~(1 << slot->id); 1374 mci_writel(slot->host, PWREN, regs); 1375 break; 1376 default: 1377 break; 1378 } 1379 1380 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0) 1381 slot->host->state = STATE_IDLE; 1382 } 1383 1384 static int dw_mci_card_busy(struct mmc_host *mmc) 1385 { 1386 struct dw_mci_slot *slot = mmc_priv(mmc); 1387 u32 status; 1388 1389 /* 1390 * Check the busy bit which is low when DAT[3:0] 1391 * (the data lines) are 0000 1392 */ 1393 status = mci_readl(slot->host, STATUS); 1394 1395 return !!(status & SDMMC_STATUS_BUSY); 1396 } 1397 1398 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) 1399 { 1400 struct dw_mci_slot *slot = mmc_priv(mmc); 1401 struct dw_mci *host = slot->host; 1402 const struct dw_mci_drv_data *drv_data = host->drv_data; 1403 u32 uhs; 1404 u32 v18 = SDMMC_UHS_18V << slot->id; 1405 int ret; 1406 1407 if (drv_data && drv_data->switch_voltage) 1408 return drv_data->switch_voltage(mmc, ios); 1409 1410 /* 1411 * Program the voltage. Note that some instances of dw_mmc may use 1412 * the UHS_REG for this. For other instances (like exynos) the UHS_REG 1413 * does no harm but you need to set the regulator directly. Try both. 1414 */ 1415 uhs = mci_readl(host, UHS_REG); 1416 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) 1417 uhs &= ~v18; 1418 else 1419 uhs |= v18; 1420 1421 if (!IS_ERR(mmc->supply.vqmmc)) { 1422 ret = mmc_regulator_set_vqmmc(mmc, ios); 1423 1424 if (ret) { 1425 dev_dbg(&mmc->class_dev, 1426 "Regulator set error %d - %s V\n", 1427 ret, uhs & v18 ? "1.8" : "3.3"); 1428 return ret; 1429 } 1430 } 1431 mci_writel(host, UHS_REG, uhs); 1432 1433 return 0; 1434 } 1435 1436 static int dw_mci_get_ro(struct mmc_host *mmc) 1437 { 1438 int read_only; 1439 struct dw_mci_slot *slot = mmc_priv(mmc); 1440 int gpio_ro = mmc_gpio_get_ro(mmc); 1441 1442 /* Use platform get_ro function, else try on board write protect */ 1443 if (gpio_ro >= 0) 1444 read_only = gpio_ro; 1445 else 1446 read_only = 1447 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; 1448 1449 dev_dbg(&mmc->class_dev, "card is %s\n", 1450 read_only ? "read-only" : "read-write"); 1451 1452 return read_only; 1453 } 1454 1455 static int dw_mci_get_cd(struct mmc_host *mmc) 1456 { 1457 int present; 1458 struct dw_mci_slot *slot = mmc_priv(mmc); 1459 struct dw_mci *host = slot->host; 1460 int gpio_cd = mmc_gpio_get_cd(mmc); 1461 1462 /* Use platform get_cd function, else try onboard card detect */ 1463 if ((mmc->caps & MMC_CAP_NEEDS_POLL) || !mmc_card_is_removable(mmc)) 1464 present = 1; 1465 else if (gpio_cd >= 0) 1466 present = gpio_cd; 1467 else 1468 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) 1469 == 0 ? 1 : 0; 1470 1471 spin_lock_bh(&host->lock); 1472 if (present) { 1473 set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1474 dev_dbg(&mmc->class_dev, "card is present\n"); 1475 } else { 1476 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1477 dev_dbg(&mmc->class_dev, "card is not present\n"); 1478 } 1479 spin_unlock_bh(&host->lock); 1480 1481 return present; 1482 } 1483 1484 static void dw_mci_hw_reset(struct mmc_host *mmc) 1485 { 1486 struct dw_mci_slot *slot = mmc_priv(mmc); 1487 struct dw_mci *host = slot->host; 1488 int reset; 1489 1490 if (host->use_dma == TRANS_MODE_IDMAC) 1491 dw_mci_idmac_reset(host); 1492 1493 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET | 1494 SDMMC_CTRL_FIFO_RESET)) 1495 return; 1496 1497 /* 1498 * According to eMMC spec, card reset procedure: 1499 * tRstW >= 1us: RST_n pulse width 1500 * tRSCA >= 200us: RST_n to Command time 1501 * tRSTH >= 1us: RST_n high period 1502 */ 1503 reset = mci_readl(host, RST_N); 1504 reset &= ~(SDMMC_RST_HWACTIVE << slot->id); 1505 mci_writel(host, RST_N, reset); 1506 usleep_range(1, 2); 1507 reset |= SDMMC_RST_HWACTIVE << slot->id; 1508 mci_writel(host, RST_N, reset); 1509 usleep_range(200, 300); 1510 } 1511 1512 static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card) 1513 { 1514 struct dw_mci_slot *slot = mmc_priv(mmc); 1515 struct dw_mci *host = slot->host; 1516 1517 /* 1518 * Low power mode will stop the card clock when idle. According to the 1519 * description of the CLKENA register we should disable low power mode 1520 * for SDIO cards if we need SDIO interrupts to work. 1521 */ 1522 if (mmc->caps & MMC_CAP_SDIO_IRQ) { 1523 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; 1524 u32 clk_en_a_old; 1525 u32 clk_en_a; 1526 1527 clk_en_a_old = mci_readl(host, CLKENA); 1528 1529 if (card->type == MMC_TYPE_SDIO || 1530 card->type == MMC_TYPE_SD_COMBO) { 1531 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1532 clk_en_a = clk_en_a_old & ~clken_low_pwr; 1533 } else { 1534 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1535 clk_en_a = clk_en_a_old | clken_low_pwr; 1536 } 1537 1538 if (clk_en_a != clk_en_a_old) { 1539 mci_writel(host, CLKENA, clk_en_a); 1540 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 1541 SDMMC_CMD_PRV_DAT_WAIT, 0); 1542 } 1543 } 1544 } 1545 1546 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) 1547 { 1548 struct dw_mci_slot *slot = mmc_priv(mmc); 1549 struct dw_mci *host = slot->host; 1550 unsigned long irqflags; 1551 u32 int_mask; 1552 1553 spin_lock_irqsave(&host->irq_lock, irqflags); 1554 1555 /* Enable/disable Slot Specific SDIO interrupt */ 1556 int_mask = mci_readl(host, INTMASK); 1557 if (enb) 1558 int_mask |= SDMMC_INT_SDIO(slot->sdio_id); 1559 else 1560 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id); 1561 mci_writel(host, INTMASK, int_mask); 1562 1563 spin_unlock_irqrestore(&host->irq_lock, irqflags); 1564 } 1565 1566 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode) 1567 { 1568 struct dw_mci_slot *slot = mmc_priv(mmc); 1569 struct dw_mci *host = slot->host; 1570 const struct dw_mci_drv_data *drv_data = host->drv_data; 1571 int err = -EINVAL; 1572 1573 if (drv_data && drv_data->execute_tuning) 1574 err = drv_data->execute_tuning(slot, opcode); 1575 return err; 1576 } 1577 1578 static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, 1579 struct mmc_ios *ios) 1580 { 1581 struct dw_mci_slot *slot = mmc_priv(mmc); 1582 struct dw_mci *host = slot->host; 1583 const struct dw_mci_drv_data *drv_data = host->drv_data; 1584 1585 if (drv_data && drv_data->prepare_hs400_tuning) 1586 return drv_data->prepare_hs400_tuning(host, ios); 1587 1588 return 0; 1589 } 1590 1591 static const struct mmc_host_ops dw_mci_ops = { 1592 .request = dw_mci_request, 1593 .pre_req = dw_mci_pre_req, 1594 .post_req = dw_mci_post_req, 1595 .set_ios = dw_mci_set_ios, 1596 .get_ro = dw_mci_get_ro, 1597 .get_cd = dw_mci_get_cd, 1598 .hw_reset = dw_mci_hw_reset, 1599 .enable_sdio_irq = dw_mci_enable_sdio_irq, 1600 .execute_tuning = dw_mci_execute_tuning, 1601 .card_busy = dw_mci_card_busy, 1602 .start_signal_voltage_switch = dw_mci_switch_voltage, 1603 .init_card = dw_mci_init_card, 1604 .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning, 1605 }; 1606 1607 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) 1608 __releases(&host->lock) 1609 __acquires(&host->lock) 1610 { 1611 struct dw_mci_slot *slot; 1612 struct mmc_host *prev_mmc = host->cur_slot->mmc; 1613 1614 WARN_ON(host->cmd || host->data); 1615 1616 host->cur_slot->mrq = NULL; 1617 host->mrq = NULL; 1618 if (!list_empty(&host->queue)) { 1619 slot = list_entry(host->queue.next, 1620 struct dw_mci_slot, queue_node); 1621 list_del(&slot->queue_node); 1622 dev_vdbg(host->dev, "list not empty: %s is next\n", 1623 mmc_hostname(slot->mmc)); 1624 host->state = STATE_SENDING_CMD; 1625 dw_mci_start_request(host, slot); 1626 } else { 1627 dev_vdbg(host->dev, "list empty\n"); 1628 1629 if (host->state == STATE_SENDING_CMD11) 1630 host->state = STATE_WAITING_CMD11_DONE; 1631 else 1632 host->state = STATE_IDLE; 1633 } 1634 1635 spin_unlock(&host->lock); 1636 mmc_request_done(prev_mmc, mrq); 1637 spin_lock(&host->lock); 1638 } 1639 1640 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) 1641 { 1642 u32 status = host->cmd_status; 1643 1644 host->cmd_status = 0; 1645 1646 /* Read the response from the card (up to 16 bytes) */ 1647 if (cmd->flags & MMC_RSP_PRESENT) { 1648 if (cmd->flags & MMC_RSP_136) { 1649 cmd->resp[3] = mci_readl(host, RESP0); 1650 cmd->resp[2] = mci_readl(host, RESP1); 1651 cmd->resp[1] = mci_readl(host, RESP2); 1652 cmd->resp[0] = mci_readl(host, RESP3); 1653 } else { 1654 cmd->resp[0] = mci_readl(host, RESP0); 1655 cmd->resp[1] = 0; 1656 cmd->resp[2] = 0; 1657 cmd->resp[3] = 0; 1658 } 1659 } 1660 1661 if (status & SDMMC_INT_RTO) 1662 cmd->error = -ETIMEDOUT; 1663 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)) 1664 cmd->error = -EILSEQ; 1665 else if (status & SDMMC_INT_RESP_ERR) 1666 cmd->error = -EIO; 1667 else 1668 cmd->error = 0; 1669 1670 return cmd->error; 1671 } 1672 1673 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) 1674 { 1675 u32 status = host->data_status; 1676 1677 if (status & DW_MCI_DATA_ERROR_FLAGS) { 1678 if (status & SDMMC_INT_DRTO) { 1679 data->error = -ETIMEDOUT; 1680 } else if (status & SDMMC_INT_DCRC) { 1681 data->error = -EILSEQ; 1682 } else if (status & SDMMC_INT_EBE) { 1683 if (host->dir_status == 1684 DW_MCI_SEND_STATUS) { 1685 /* 1686 * No data CRC status was returned. 1687 * The number of bytes transferred 1688 * will be exaggerated in PIO mode. 1689 */ 1690 data->bytes_xfered = 0; 1691 data->error = -ETIMEDOUT; 1692 } else if (host->dir_status == 1693 DW_MCI_RECV_STATUS) { 1694 data->error = -EIO; 1695 } 1696 } else { 1697 /* SDMMC_INT_SBE is included */ 1698 data->error = -EIO; 1699 } 1700 1701 dev_dbg(host->dev, "data error, status 0x%08x\n", status); 1702 1703 /* 1704 * After an error, there may be data lingering 1705 * in the FIFO 1706 */ 1707 dw_mci_reset(host); 1708 } else { 1709 data->bytes_xfered = data->blocks * data->blksz; 1710 data->error = 0; 1711 } 1712 1713 return data->error; 1714 } 1715 1716 static void dw_mci_set_drto(struct dw_mci *host) 1717 { 1718 unsigned int drto_clks; 1719 unsigned int drto_ms; 1720 1721 drto_clks = mci_readl(host, TMOUT) >> 8; 1722 drto_ms = DIV_ROUND_UP(drto_clks, host->bus_hz / 1000); 1723 1724 /* add a bit spare time */ 1725 drto_ms += 10; 1726 1727 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms)); 1728 } 1729 1730 static void dw_mci_tasklet_func(unsigned long priv) 1731 { 1732 struct dw_mci *host = (struct dw_mci *)priv; 1733 struct mmc_data *data; 1734 struct mmc_command *cmd; 1735 struct mmc_request *mrq; 1736 enum dw_mci_state state; 1737 enum dw_mci_state prev_state; 1738 unsigned int err; 1739 1740 spin_lock(&host->lock); 1741 1742 state = host->state; 1743 data = host->data; 1744 mrq = host->mrq; 1745 1746 do { 1747 prev_state = state; 1748 1749 switch (state) { 1750 case STATE_IDLE: 1751 case STATE_WAITING_CMD11_DONE: 1752 break; 1753 1754 case STATE_SENDING_CMD11: 1755 case STATE_SENDING_CMD: 1756 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 1757 &host->pending_events)) 1758 break; 1759 1760 cmd = host->cmd; 1761 host->cmd = NULL; 1762 set_bit(EVENT_CMD_COMPLETE, &host->completed_events); 1763 err = dw_mci_command_complete(host, cmd); 1764 if (cmd == mrq->sbc && !err) { 1765 prev_state = state = STATE_SENDING_CMD; 1766 __dw_mci_start_request(host, host->cur_slot, 1767 mrq->cmd); 1768 goto unlock; 1769 } 1770 1771 if (cmd->data && err) { 1772 /* 1773 * During UHS tuning sequence, sending the stop 1774 * command after the response CRC error would 1775 * throw the system into a confused state 1776 * causing all future tuning phases to report 1777 * failure. 1778 * 1779 * In such case controller will move into a data 1780 * transfer state after a response error or 1781 * response CRC error. Let's let that finish 1782 * before trying to send a stop, so we'll go to 1783 * STATE_SENDING_DATA. 1784 * 1785 * Although letting the data transfer take place 1786 * will waste a bit of time (we already know 1787 * the command was bad), it can't cause any 1788 * errors since it's possible it would have 1789 * taken place anyway if this tasklet got 1790 * delayed. Allowing the transfer to take place 1791 * avoids races and keeps things simple. 1792 */ 1793 if ((err != -ETIMEDOUT) && 1794 (cmd->opcode == MMC_SEND_TUNING_BLOCK)) { 1795 state = STATE_SENDING_DATA; 1796 continue; 1797 } 1798 1799 dw_mci_stop_dma(host); 1800 send_stop_abort(host, data); 1801 state = STATE_SENDING_STOP; 1802 break; 1803 } 1804 1805 if (!cmd->data || err) { 1806 dw_mci_request_end(host, mrq); 1807 goto unlock; 1808 } 1809 1810 prev_state = state = STATE_SENDING_DATA; 1811 /* fall through */ 1812 1813 case STATE_SENDING_DATA: 1814 /* 1815 * We could get a data error and never a transfer 1816 * complete so we'd better check for it here. 1817 * 1818 * Note that we don't really care if we also got a 1819 * transfer complete; stopping the DMA and sending an 1820 * abort won't hurt. 1821 */ 1822 if (test_and_clear_bit(EVENT_DATA_ERROR, 1823 &host->pending_events)) { 1824 dw_mci_stop_dma(host); 1825 if (data->stop || 1826 !(host->data_status & (SDMMC_INT_DRTO | 1827 SDMMC_INT_EBE))) 1828 send_stop_abort(host, data); 1829 state = STATE_DATA_ERROR; 1830 break; 1831 } 1832 1833 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 1834 &host->pending_events)) { 1835 /* 1836 * If all data-related interrupts don't come 1837 * within the given time in reading data state. 1838 */ 1839 if (host->dir_status == DW_MCI_RECV_STATUS) 1840 dw_mci_set_drto(host); 1841 break; 1842 } 1843 1844 set_bit(EVENT_XFER_COMPLETE, &host->completed_events); 1845 1846 /* 1847 * Handle an EVENT_DATA_ERROR that might have shown up 1848 * before the transfer completed. This might not have 1849 * been caught by the check above because the interrupt 1850 * could have gone off between the previous check and 1851 * the check for transfer complete. 1852 * 1853 * Technically this ought not be needed assuming we 1854 * get a DATA_COMPLETE eventually (we'll notice the 1855 * error and end the request), but it shouldn't hurt. 1856 * 1857 * This has the advantage of sending the stop command. 1858 */ 1859 if (test_and_clear_bit(EVENT_DATA_ERROR, 1860 &host->pending_events)) { 1861 dw_mci_stop_dma(host); 1862 if (data->stop || 1863 !(host->data_status & (SDMMC_INT_DRTO | 1864 SDMMC_INT_EBE))) 1865 send_stop_abort(host, data); 1866 state = STATE_DATA_ERROR; 1867 break; 1868 } 1869 prev_state = state = STATE_DATA_BUSY; 1870 1871 /* fall through */ 1872 1873 case STATE_DATA_BUSY: 1874 if (!test_and_clear_bit(EVENT_DATA_COMPLETE, 1875 &host->pending_events)) { 1876 /* 1877 * If data error interrupt comes but data over 1878 * interrupt doesn't come within the given time. 1879 * in reading data state. 1880 */ 1881 if (host->dir_status == DW_MCI_RECV_STATUS) 1882 dw_mci_set_drto(host); 1883 break; 1884 } 1885 1886 host->data = NULL; 1887 set_bit(EVENT_DATA_COMPLETE, &host->completed_events); 1888 err = dw_mci_data_complete(host, data); 1889 1890 if (!err) { 1891 if (!data->stop || mrq->sbc) { 1892 if (mrq->sbc && data->stop) 1893 data->stop->error = 0; 1894 dw_mci_request_end(host, mrq); 1895 goto unlock; 1896 } 1897 1898 /* stop command for open-ended transfer*/ 1899 if (data->stop) 1900 send_stop_abort(host, data); 1901 } else { 1902 /* 1903 * If we don't have a command complete now we'll 1904 * never get one since we just reset everything; 1905 * better end the request. 1906 * 1907 * If we do have a command complete we'll fall 1908 * through to the SENDING_STOP command and 1909 * everything will be peachy keen. 1910 */ 1911 if (!test_bit(EVENT_CMD_COMPLETE, 1912 &host->pending_events)) { 1913 host->cmd = NULL; 1914 dw_mci_request_end(host, mrq); 1915 goto unlock; 1916 } 1917 } 1918 1919 /* 1920 * If err has non-zero, 1921 * stop-abort command has been already issued. 1922 */ 1923 prev_state = state = STATE_SENDING_STOP; 1924 1925 /* fall through */ 1926 1927 case STATE_SENDING_STOP: 1928 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 1929 &host->pending_events)) 1930 break; 1931 1932 /* CMD error in data command */ 1933 if (mrq->cmd->error && mrq->data) 1934 dw_mci_reset(host); 1935 1936 host->cmd = NULL; 1937 host->data = NULL; 1938 1939 if (mrq->stop) 1940 dw_mci_command_complete(host, mrq->stop); 1941 else 1942 host->cmd_status = 0; 1943 1944 dw_mci_request_end(host, mrq); 1945 goto unlock; 1946 1947 case STATE_DATA_ERROR: 1948 if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 1949 &host->pending_events)) 1950 break; 1951 1952 state = STATE_DATA_BUSY; 1953 break; 1954 } 1955 } while (state != prev_state); 1956 1957 host->state = state; 1958 unlock: 1959 spin_unlock(&host->lock); 1960 1961 } 1962 1963 /* push final bytes to part_buf, only use during push */ 1964 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) 1965 { 1966 memcpy((void *)&host->part_buf, buf, cnt); 1967 host->part_buf_count = cnt; 1968 } 1969 1970 /* append bytes to part_buf, only use during push */ 1971 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) 1972 { 1973 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); 1974 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); 1975 host->part_buf_count += cnt; 1976 return cnt; 1977 } 1978 1979 /* pull first bytes from part_buf, only use during pull */ 1980 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) 1981 { 1982 cnt = min_t(int, cnt, host->part_buf_count); 1983 if (cnt) { 1984 memcpy(buf, (void *)&host->part_buf + host->part_buf_start, 1985 cnt); 1986 host->part_buf_count -= cnt; 1987 host->part_buf_start += cnt; 1988 } 1989 return cnt; 1990 } 1991 1992 /* pull final bytes from the part_buf, assuming it's just been filled */ 1993 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) 1994 { 1995 memcpy(buf, &host->part_buf, cnt); 1996 host->part_buf_start = cnt; 1997 host->part_buf_count = (1 << host->data_shift) - cnt; 1998 } 1999 2000 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) 2001 { 2002 struct mmc_data *data = host->data; 2003 int init_cnt = cnt; 2004 2005 /* try and push anything in the part_buf */ 2006 if (unlikely(host->part_buf_count)) { 2007 int len = dw_mci_push_part_bytes(host, buf, cnt); 2008 2009 buf += len; 2010 cnt -= len; 2011 if (host->part_buf_count == 2) { 2012 mci_fifo_writew(host->fifo_reg, host->part_buf16); 2013 host->part_buf_count = 0; 2014 } 2015 } 2016 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2017 if (unlikely((unsigned long)buf & 0x1)) { 2018 while (cnt >= 2) { 2019 u16 aligned_buf[64]; 2020 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 2021 int items = len >> 1; 2022 int i; 2023 /* memcpy from input buffer into aligned buffer */ 2024 memcpy(aligned_buf, buf, len); 2025 buf += len; 2026 cnt -= len; 2027 /* push data from aligned buffer into fifo */ 2028 for (i = 0; i < items; ++i) 2029 mci_fifo_writew(host->fifo_reg, aligned_buf[i]); 2030 } 2031 } else 2032 #endif 2033 { 2034 u16 *pdata = buf; 2035 2036 for (; cnt >= 2; cnt -= 2) 2037 mci_fifo_writew(host->fifo_reg, *pdata++); 2038 buf = pdata; 2039 } 2040 /* put anything remaining in the part_buf */ 2041 if (cnt) { 2042 dw_mci_set_part_bytes(host, buf, cnt); 2043 /* Push data if we have reached the expected data length */ 2044 if ((data->bytes_xfered + init_cnt) == 2045 (data->blksz * data->blocks)) 2046 mci_fifo_writew(host->fifo_reg, host->part_buf16); 2047 } 2048 } 2049 2050 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) 2051 { 2052 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2053 if (unlikely((unsigned long)buf & 0x1)) { 2054 while (cnt >= 2) { 2055 /* pull data from fifo into aligned buffer */ 2056 u16 aligned_buf[64]; 2057 int len = min(cnt & -2, (int)sizeof(aligned_buf)); 2058 int items = len >> 1; 2059 int i; 2060 2061 for (i = 0; i < items; ++i) 2062 aligned_buf[i] = mci_fifo_readw(host->fifo_reg); 2063 /* memcpy from aligned buffer into output buffer */ 2064 memcpy(buf, aligned_buf, len); 2065 buf += len; 2066 cnt -= len; 2067 } 2068 } else 2069 #endif 2070 { 2071 u16 *pdata = buf; 2072 2073 for (; cnt >= 2; cnt -= 2) 2074 *pdata++ = mci_fifo_readw(host->fifo_reg); 2075 buf = pdata; 2076 } 2077 if (cnt) { 2078 host->part_buf16 = mci_fifo_readw(host->fifo_reg); 2079 dw_mci_pull_final_bytes(host, buf, cnt); 2080 } 2081 } 2082 2083 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) 2084 { 2085 struct mmc_data *data = host->data; 2086 int init_cnt = cnt; 2087 2088 /* try and push anything in the part_buf */ 2089 if (unlikely(host->part_buf_count)) { 2090 int len = dw_mci_push_part_bytes(host, buf, cnt); 2091 2092 buf += len; 2093 cnt -= len; 2094 if (host->part_buf_count == 4) { 2095 mci_fifo_writel(host->fifo_reg, host->part_buf32); 2096 host->part_buf_count = 0; 2097 } 2098 } 2099 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2100 if (unlikely((unsigned long)buf & 0x3)) { 2101 while (cnt >= 4) { 2102 u32 aligned_buf[32]; 2103 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 2104 int items = len >> 2; 2105 int i; 2106 /* memcpy from input buffer into aligned buffer */ 2107 memcpy(aligned_buf, buf, len); 2108 buf += len; 2109 cnt -= len; 2110 /* push data from aligned buffer into fifo */ 2111 for (i = 0; i < items; ++i) 2112 mci_fifo_writel(host->fifo_reg, aligned_buf[i]); 2113 } 2114 } else 2115 #endif 2116 { 2117 u32 *pdata = buf; 2118 2119 for (; cnt >= 4; cnt -= 4) 2120 mci_fifo_writel(host->fifo_reg, *pdata++); 2121 buf = pdata; 2122 } 2123 /* put anything remaining in the part_buf */ 2124 if (cnt) { 2125 dw_mci_set_part_bytes(host, buf, cnt); 2126 /* Push data if we have reached the expected data length */ 2127 if ((data->bytes_xfered + init_cnt) == 2128 (data->blksz * data->blocks)) 2129 mci_fifo_writel(host->fifo_reg, host->part_buf32); 2130 } 2131 } 2132 2133 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) 2134 { 2135 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2136 if (unlikely((unsigned long)buf & 0x3)) { 2137 while (cnt >= 4) { 2138 /* pull data from fifo into aligned buffer */ 2139 u32 aligned_buf[32]; 2140 int len = min(cnt & -4, (int)sizeof(aligned_buf)); 2141 int items = len >> 2; 2142 int i; 2143 2144 for (i = 0; i < items; ++i) 2145 aligned_buf[i] = mci_fifo_readl(host->fifo_reg); 2146 /* memcpy from aligned buffer into output buffer */ 2147 memcpy(buf, aligned_buf, len); 2148 buf += len; 2149 cnt -= len; 2150 } 2151 } else 2152 #endif 2153 { 2154 u32 *pdata = buf; 2155 2156 for (; cnt >= 4; cnt -= 4) 2157 *pdata++ = mci_fifo_readl(host->fifo_reg); 2158 buf = pdata; 2159 } 2160 if (cnt) { 2161 host->part_buf32 = mci_fifo_readl(host->fifo_reg); 2162 dw_mci_pull_final_bytes(host, buf, cnt); 2163 } 2164 } 2165 2166 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) 2167 { 2168 struct mmc_data *data = host->data; 2169 int init_cnt = cnt; 2170 2171 /* try and push anything in the part_buf */ 2172 if (unlikely(host->part_buf_count)) { 2173 int len = dw_mci_push_part_bytes(host, buf, cnt); 2174 2175 buf += len; 2176 cnt -= len; 2177 2178 if (host->part_buf_count == 8) { 2179 mci_fifo_writeq(host->fifo_reg, host->part_buf); 2180 host->part_buf_count = 0; 2181 } 2182 } 2183 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2184 if (unlikely((unsigned long)buf & 0x7)) { 2185 while (cnt >= 8) { 2186 u64 aligned_buf[16]; 2187 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 2188 int items = len >> 3; 2189 int i; 2190 /* memcpy from input buffer into aligned buffer */ 2191 memcpy(aligned_buf, buf, len); 2192 buf += len; 2193 cnt -= len; 2194 /* push data from aligned buffer into fifo */ 2195 for (i = 0; i < items; ++i) 2196 mci_fifo_writeq(host->fifo_reg, aligned_buf[i]); 2197 } 2198 } else 2199 #endif 2200 { 2201 u64 *pdata = buf; 2202 2203 for (; cnt >= 8; cnt -= 8) 2204 mci_fifo_writeq(host->fifo_reg, *pdata++); 2205 buf = pdata; 2206 } 2207 /* put anything remaining in the part_buf */ 2208 if (cnt) { 2209 dw_mci_set_part_bytes(host, buf, cnt); 2210 /* Push data if we have reached the expected data length */ 2211 if ((data->bytes_xfered + init_cnt) == 2212 (data->blksz * data->blocks)) 2213 mci_fifo_writeq(host->fifo_reg, host->part_buf); 2214 } 2215 } 2216 2217 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) 2218 { 2219 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2220 if (unlikely((unsigned long)buf & 0x7)) { 2221 while (cnt >= 8) { 2222 /* pull data from fifo into aligned buffer */ 2223 u64 aligned_buf[16]; 2224 int len = min(cnt & -8, (int)sizeof(aligned_buf)); 2225 int items = len >> 3; 2226 int i; 2227 2228 for (i = 0; i < items; ++i) 2229 aligned_buf[i] = mci_fifo_readq(host->fifo_reg); 2230 2231 /* memcpy from aligned buffer into output buffer */ 2232 memcpy(buf, aligned_buf, len); 2233 buf += len; 2234 cnt -= len; 2235 } 2236 } else 2237 #endif 2238 { 2239 u64 *pdata = buf; 2240 2241 for (; cnt >= 8; cnt -= 8) 2242 *pdata++ = mci_fifo_readq(host->fifo_reg); 2243 buf = pdata; 2244 } 2245 if (cnt) { 2246 host->part_buf = mci_fifo_readq(host->fifo_reg); 2247 dw_mci_pull_final_bytes(host, buf, cnt); 2248 } 2249 } 2250 2251 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) 2252 { 2253 int len; 2254 2255 /* get remaining partial bytes */ 2256 len = dw_mci_pull_part_bytes(host, buf, cnt); 2257 if (unlikely(len == cnt)) 2258 return; 2259 buf += len; 2260 cnt -= len; 2261 2262 /* get the rest of the data */ 2263 host->pull_data(host, buf, cnt); 2264 } 2265 2266 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto) 2267 { 2268 struct sg_mapping_iter *sg_miter = &host->sg_miter; 2269 void *buf; 2270 unsigned int offset; 2271 struct mmc_data *data = host->data; 2272 int shift = host->data_shift; 2273 u32 status; 2274 unsigned int len; 2275 unsigned int remain, fcnt; 2276 2277 do { 2278 if (!sg_miter_next(sg_miter)) 2279 goto done; 2280 2281 host->sg = sg_miter->piter.sg; 2282 buf = sg_miter->addr; 2283 remain = sg_miter->length; 2284 offset = 0; 2285 2286 do { 2287 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) 2288 << shift) + host->part_buf_count; 2289 len = min(remain, fcnt); 2290 if (!len) 2291 break; 2292 dw_mci_pull_data(host, (void *)(buf + offset), len); 2293 data->bytes_xfered += len; 2294 offset += len; 2295 remain -= len; 2296 } while (remain); 2297 2298 sg_miter->consumed = offset; 2299 status = mci_readl(host, MINTSTS); 2300 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 2301 /* if the RXDR is ready read again */ 2302 } while ((status & SDMMC_INT_RXDR) || 2303 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS)))); 2304 2305 if (!remain) { 2306 if (!sg_miter_next(sg_miter)) 2307 goto done; 2308 sg_miter->consumed = 0; 2309 } 2310 sg_miter_stop(sg_miter); 2311 return; 2312 2313 done: 2314 sg_miter_stop(sg_miter); 2315 host->sg = NULL; 2316 smp_wmb(); /* drain writebuffer */ 2317 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2318 } 2319 2320 static void dw_mci_write_data_pio(struct dw_mci *host) 2321 { 2322 struct sg_mapping_iter *sg_miter = &host->sg_miter; 2323 void *buf; 2324 unsigned int offset; 2325 struct mmc_data *data = host->data; 2326 int shift = host->data_shift; 2327 u32 status; 2328 unsigned int len; 2329 unsigned int fifo_depth = host->fifo_depth; 2330 unsigned int remain, fcnt; 2331 2332 do { 2333 if (!sg_miter_next(sg_miter)) 2334 goto done; 2335 2336 host->sg = sg_miter->piter.sg; 2337 buf = sg_miter->addr; 2338 remain = sg_miter->length; 2339 offset = 0; 2340 2341 do { 2342 fcnt = ((fifo_depth - 2343 SDMMC_GET_FCNT(mci_readl(host, STATUS))) 2344 << shift) - host->part_buf_count; 2345 len = min(remain, fcnt); 2346 if (!len) 2347 break; 2348 host->push_data(host, (void *)(buf + offset), len); 2349 data->bytes_xfered += len; 2350 offset += len; 2351 remain -= len; 2352 } while (remain); 2353 2354 sg_miter->consumed = offset; 2355 status = mci_readl(host, MINTSTS); 2356 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2357 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 2358 2359 if (!remain) { 2360 if (!sg_miter_next(sg_miter)) 2361 goto done; 2362 sg_miter->consumed = 0; 2363 } 2364 sg_miter_stop(sg_miter); 2365 return; 2366 2367 done: 2368 sg_miter_stop(sg_miter); 2369 host->sg = NULL; 2370 smp_wmb(); /* drain writebuffer */ 2371 set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2372 } 2373 2374 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) 2375 { 2376 if (!host->cmd_status) 2377 host->cmd_status = status; 2378 2379 smp_wmb(); /* drain writebuffer */ 2380 2381 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2382 tasklet_schedule(&host->tasklet); 2383 } 2384 2385 static void dw_mci_handle_cd(struct dw_mci *host) 2386 { 2387 int i; 2388 2389 for (i = 0; i < host->num_slots; i++) { 2390 struct dw_mci_slot *slot = host->slot[i]; 2391 2392 if (!slot) 2393 continue; 2394 2395 if (slot->mmc->ops->card_event) 2396 slot->mmc->ops->card_event(slot->mmc); 2397 mmc_detect_change(slot->mmc, 2398 msecs_to_jiffies(host->pdata->detect_delay_ms)); 2399 } 2400 } 2401 2402 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) 2403 { 2404 struct dw_mci *host = dev_id; 2405 u32 pending; 2406 int i; 2407 2408 pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 2409 2410 if (pending) { 2411 /* Check volt switch first, since it can look like an error */ 2412 if ((host->state == STATE_SENDING_CMD11) && 2413 (pending & SDMMC_INT_VOLT_SWITCH)) { 2414 unsigned long irqflags; 2415 2416 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH); 2417 pending &= ~SDMMC_INT_VOLT_SWITCH; 2418 2419 /* 2420 * Hold the lock; we know cmd11_timer can't be kicked 2421 * off after the lock is released, so safe to delete. 2422 */ 2423 spin_lock_irqsave(&host->irq_lock, irqflags); 2424 dw_mci_cmd_interrupt(host, pending); 2425 spin_unlock_irqrestore(&host->irq_lock, irqflags); 2426 2427 del_timer(&host->cmd11_timer); 2428 } 2429 2430 if (pending & DW_MCI_CMD_ERROR_FLAGS) { 2431 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 2432 host->cmd_status = pending; 2433 smp_wmb(); /* drain writebuffer */ 2434 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2435 } 2436 2437 if (pending & DW_MCI_DATA_ERROR_FLAGS) { 2438 /* if there is an error report DATA_ERROR */ 2439 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); 2440 host->data_status = pending; 2441 smp_wmb(); /* drain writebuffer */ 2442 set_bit(EVENT_DATA_ERROR, &host->pending_events); 2443 tasklet_schedule(&host->tasklet); 2444 } 2445 2446 if (pending & SDMMC_INT_DATA_OVER) { 2447 del_timer(&host->dto_timer); 2448 2449 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 2450 if (!host->data_status) 2451 host->data_status = pending; 2452 smp_wmb(); /* drain writebuffer */ 2453 if (host->dir_status == DW_MCI_RECV_STATUS) { 2454 if (host->sg != NULL) 2455 dw_mci_read_data_pio(host, true); 2456 } 2457 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2458 tasklet_schedule(&host->tasklet); 2459 } 2460 2461 if (pending & SDMMC_INT_RXDR) { 2462 mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 2463 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) 2464 dw_mci_read_data_pio(host, false); 2465 } 2466 2467 if (pending & SDMMC_INT_TXDR) { 2468 mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2469 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) 2470 dw_mci_write_data_pio(host); 2471 } 2472 2473 if (pending & SDMMC_INT_CMD_DONE) { 2474 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); 2475 dw_mci_cmd_interrupt(host, pending); 2476 } 2477 2478 if (pending & SDMMC_INT_CD) { 2479 mci_writel(host, RINTSTS, SDMMC_INT_CD); 2480 dw_mci_handle_cd(host); 2481 } 2482 2483 /* Handle SDIO Interrupts */ 2484 for (i = 0; i < host->num_slots; i++) { 2485 struct dw_mci_slot *slot = host->slot[i]; 2486 2487 if (!slot) 2488 continue; 2489 2490 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) { 2491 mci_writel(host, RINTSTS, 2492 SDMMC_INT_SDIO(slot->sdio_id)); 2493 mmc_signal_sdio_irq(slot->mmc); 2494 } 2495 } 2496 2497 } 2498 2499 if (host->use_dma != TRANS_MODE_IDMAC) 2500 return IRQ_HANDLED; 2501 2502 /* Handle IDMA interrupts */ 2503 if (host->dma_64bit_address == 1) { 2504 pending = mci_readl(host, IDSTS64); 2505 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 2506 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI | 2507 SDMMC_IDMAC_INT_RI); 2508 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI); 2509 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) 2510 host->dma_ops->complete((void *)host); 2511 } 2512 } else { 2513 pending = mci_readl(host, IDSTS); 2514 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 2515 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | 2516 SDMMC_IDMAC_INT_RI); 2517 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 2518 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) 2519 host->dma_ops->complete((void *)host); 2520 } 2521 } 2522 2523 return IRQ_HANDLED; 2524 } 2525 2526 #ifdef CONFIG_OF 2527 /* given a slot, find out the device node representing that slot */ 2528 static struct device_node *dw_mci_of_find_slot_node(struct dw_mci_slot *slot) 2529 { 2530 struct device *dev = slot->mmc->parent; 2531 struct device_node *np; 2532 const __be32 *addr; 2533 int len; 2534 2535 if (!dev || !dev->of_node) 2536 return NULL; 2537 2538 for_each_child_of_node(dev->of_node, np) { 2539 addr = of_get_property(np, "reg", &len); 2540 if (!addr || (len < sizeof(int))) 2541 continue; 2542 if (be32_to_cpup(addr) == slot->id) 2543 return np; 2544 } 2545 return NULL; 2546 } 2547 2548 static void dw_mci_slot_of_parse(struct dw_mci_slot *slot) 2549 { 2550 struct device_node *np = dw_mci_of_find_slot_node(slot); 2551 2552 if (!np) 2553 return; 2554 2555 if (of_property_read_bool(np, "disable-wp")) { 2556 slot->mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; 2557 dev_warn(slot->mmc->parent, 2558 "Slot quirk 'disable-wp' is deprecated\n"); 2559 } 2560 } 2561 #else /* CONFIG_OF */ 2562 static void dw_mci_slot_of_parse(struct dw_mci_slot *slot) 2563 { 2564 } 2565 #endif /* CONFIG_OF */ 2566 2567 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) 2568 { 2569 struct mmc_host *mmc; 2570 struct dw_mci_slot *slot; 2571 const struct dw_mci_drv_data *drv_data = host->drv_data; 2572 int ctrl_id, ret; 2573 u32 freq[2]; 2574 2575 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); 2576 if (!mmc) 2577 return -ENOMEM; 2578 2579 slot = mmc_priv(mmc); 2580 slot->id = id; 2581 slot->sdio_id = host->sdio_id0 + id; 2582 slot->mmc = mmc; 2583 slot->host = host; 2584 host->slot[id] = slot; 2585 2586 mmc->ops = &dw_mci_ops; 2587 if (of_property_read_u32_array(host->dev->of_node, 2588 "clock-freq-min-max", freq, 2)) { 2589 mmc->f_min = DW_MCI_FREQ_MIN; 2590 mmc->f_max = DW_MCI_FREQ_MAX; 2591 } else { 2592 mmc->f_min = freq[0]; 2593 mmc->f_max = freq[1]; 2594 } 2595 2596 /*if there are external regulators, get them*/ 2597 ret = mmc_regulator_get_supply(mmc); 2598 if (ret == -EPROBE_DEFER) 2599 goto err_host_allocated; 2600 2601 if (!mmc->ocr_avail) 2602 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 2603 2604 if (host->pdata->caps) 2605 mmc->caps = host->pdata->caps; 2606 2607 /* 2608 * Support MMC_CAP_ERASE by default. 2609 * It needs to use trim/discard/erase commands. 2610 */ 2611 mmc->caps |= MMC_CAP_ERASE; 2612 2613 if (host->pdata->pm_caps) 2614 mmc->pm_caps = host->pdata->pm_caps; 2615 2616 if (host->dev->of_node) { 2617 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); 2618 if (ctrl_id < 0) 2619 ctrl_id = 0; 2620 } else { 2621 ctrl_id = to_platform_device(host->dev)->id; 2622 } 2623 if (drv_data && drv_data->caps) 2624 mmc->caps |= drv_data->caps[ctrl_id]; 2625 2626 if (host->pdata->caps2) 2627 mmc->caps2 = host->pdata->caps2; 2628 2629 dw_mci_slot_of_parse(slot); 2630 2631 ret = mmc_of_parse(mmc); 2632 if (ret) 2633 goto err_host_allocated; 2634 2635 /* Useful defaults if platform data is unset. */ 2636 if (host->use_dma == TRANS_MODE_IDMAC) { 2637 mmc->max_segs = host->ring_size; 2638 mmc->max_blk_size = 65535; 2639 mmc->max_seg_size = 0x1000; 2640 mmc->max_req_size = mmc->max_seg_size * host->ring_size; 2641 mmc->max_blk_count = mmc->max_req_size / 512; 2642 } else if (host->use_dma == TRANS_MODE_EDMAC) { 2643 mmc->max_segs = 64; 2644 mmc->max_blk_size = 65535; 2645 mmc->max_blk_count = 65535; 2646 mmc->max_req_size = 2647 mmc->max_blk_size * mmc->max_blk_count; 2648 mmc->max_seg_size = mmc->max_req_size; 2649 } else { 2650 /* TRANS_MODE_PIO */ 2651 mmc->max_segs = 64; 2652 mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */ 2653 mmc->max_blk_count = 512; 2654 mmc->max_req_size = mmc->max_blk_size * 2655 mmc->max_blk_count; 2656 mmc->max_seg_size = mmc->max_req_size; 2657 } 2658 2659 dw_mci_get_cd(mmc); 2660 2661 ret = mmc_add_host(mmc); 2662 if (ret) 2663 goto err_host_allocated; 2664 2665 #if defined(CONFIG_DEBUG_FS) 2666 dw_mci_init_debugfs(slot); 2667 #endif 2668 2669 return 0; 2670 2671 err_host_allocated: 2672 mmc_free_host(mmc); 2673 return ret; 2674 } 2675 2676 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) 2677 { 2678 /* Debugfs stuff is cleaned up by mmc core */ 2679 mmc_remove_host(slot->mmc); 2680 slot->host->slot[id] = NULL; 2681 mmc_free_host(slot->mmc); 2682 } 2683 2684 static void dw_mci_init_dma(struct dw_mci *host) 2685 { 2686 int addr_config; 2687 struct device *dev = host->dev; 2688 struct device_node *np = dev->of_node; 2689 2690 /* 2691 * Check tansfer mode from HCON[17:16] 2692 * Clear the ambiguous description of dw_mmc databook: 2693 * 2b'00: No DMA Interface -> Actually means using Internal DMA block 2694 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block 2695 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block 2696 * 2b'11: Non DW DMA Interface -> pio only 2697 * Compared to DesignWare DMA Interface, Generic DMA Interface has a 2698 * simpler request/acknowledge handshake mechanism and both of them 2699 * are regarded as external dma master for dw_mmc. 2700 */ 2701 host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON)); 2702 if (host->use_dma == DMA_INTERFACE_IDMA) { 2703 host->use_dma = TRANS_MODE_IDMAC; 2704 } else if (host->use_dma == DMA_INTERFACE_DWDMA || 2705 host->use_dma == DMA_INTERFACE_GDMA) { 2706 host->use_dma = TRANS_MODE_EDMAC; 2707 } else { 2708 goto no_dma; 2709 } 2710 2711 /* Determine which DMA interface to use */ 2712 if (host->use_dma == TRANS_MODE_IDMAC) { 2713 /* 2714 * Check ADDR_CONFIG bit in HCON to find 2715 * IDMAC address bus width 2716 */ 2717 addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON)); 2718 2719 if (addr_config == 1) { 2720 /* host supports IDMAC in 64-bit address mode */ 2721 host->dma_64bit_address = 1; 2722 dev_info(host->dev, 2723 "IDMAC supports 64-bit address mode.\n"); 2724 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64))) 2725 dma_set_coherent_mask(host->dev, 2726 DMA_BIT_MASK(64)); 2727 } else { 2728 /* host supports IDMAC in 32-bit address mode */ 2729 host->dma_64bit_address = 0; 2730 dev_info(host->dev, 2731 "IDMAC supports 32-bit address mode.\n"); 2732 } 2733 2734 /* Alloc memory for sg translation */ 2735 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE, 2736 &host->sg_dma, GFP_KERNEL); 2737 if (!host->sg_cpu) { 2738 dev_err(host->dev, 2739 "%s: could not alloc DMA memory\n", 2740 __func__); 2741 goto no_dma; 2742 } 2743 2744 host->dma_ops = &dw_mci_idmac_ops; 2745 dev_info(host->dev, "Using internal DMA controller.\n"); 2746 } else { 2747 /* TRANS_MODE_EDMAC: check dma bindings again */ 2748 if ((of_property_count_strings(np, "dma-names") < 0) || 2749 (!of_find_property(np, "dmas", NULL))) { 2750 goto no_dma; 2751 } 2752 host->dma_ops = &dw_mci_edmac_ops; 2753 dev_info(host->dev, "Using external DMA controller.\n"); 2754 } 2755 2756 if (host->dma_ops->init && host->dma_ops->start && 2757 host->dma_ops->stop && host->dma_ops->cleanup) { 2758 if (host->dma_ops->init(host)) { 2759 dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n", 2760 __func__); 2761 goto no_dma; 2762 } 2763 } else { 2764 dev_err(host->dev, "DMA initialization not found.\n"); 2765 goto no_dma; 2766 } 2767 2768 return; 2769 2770 no_dma: 2771 dev_info(host->dev, "Using PIO mode.\n"); 2772 host->use_dma = TRANS_MODE_PIO; 2773 } 2774 2775 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) 2776 { 2777 unsigned long timeout = jiffies + msecs_to_jiffies(500); 2778 u32 ctrl; 2779 2780 ctrl = mci_readl(host, CTRL); 2781 ctrl |= reset; 2782 mci_writel(host, CTRL, ctrl); 2783 2784 /* wait till resets clear */ 2785 do { 2786 ctrl = mci_readl(host, CTRL); 2787 if (!(ctrl & reset)) 2788 return true; 2789 } while (time_before(jiffies, timeout)); 2790 2791 dev_err(host->dev, 2792 "Timeout resetting block (ctrl reset %#x)\n", 2793 ctrl & reset); 2794 2795 return false; 2796 } 2797 2798 static bool dw_mci_reset(struct dw_mci *host) 2799 { 2800 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET; 2801 bool ret = false; 2802 2803 /* 2804 * Reseting generates a block interrupt, hence setting 2805 * the scatter-gather pointer to NULL. 2806 */ 2807 if (host->sg) { 2808 sg_miter_stop(&host->sg_miter); 2809 host->sg = NULL; 2810 } 2811 2812 if (host->use_dma) 2813 flags |= SDMMC_CTRL_DMA_RESET; 2814 2815 if (dw_mci_ctrl_reset(host, flags)) { 2816 /* 2817 * In all cases we clear the RAWINTS register to clear any 2818 * interrupts. 2819 */ 2820 mci_writel(host, RINTSTS, 0xFFFFFFFF); 2821 2822 /* if using dma we wait for dma_req to clear */ 2823 if (host->use_dma) { 2824 unsigned long timeout = jiffies + msecs_to_jiffies(500); 2825 u32 status; 2826 2827 do { 2828 status = mci_readl(host, STATUS); 2829 if (!(status & SDMMC_STATUS_DMA_REQ)) 2830 break; 2831 cpu_relax(); 2832 } while (time_before(jiffies, timeout)); 2833 2834 if (status & SDMMC_STATUS_DMA_REQ) { 2835 dev_err(host->dev, 2836 "%s: Timeout waiting for dma_req to clear during reset\n", 2837 __func__); 2838 goto ciu_out; 2839 } 2840 2841 /* when using DMA next we reset the fifo again */ 2842 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET)) 2843 goto ciu_out; 2844 } 2845 } else { 2846 /* if the controller reset bit did clear, then set clock regs */ 2847 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) { 2848 dev_err(host->dev, 2849 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n", 2850 __func__); 2851 goto ciu_out; 2852 } 2853 } 2854 2855 if (host->use_dma == TRANS_MODE_IDMAC) 2856 /* It is also recommended that we reset and reprogram idmac */ 2857 dw_mci_idmac_reset(host); 2858 2859 ret = true; 2860 2861 ciu_out: 2862 /* After a CTRL reset we need to have CIU set clock registers */ 2863 mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0); 2864 2865 return ret; 2866 } 2867 2868 static void dw_mci_cmd11_timer(unsigned long arg) 2869 { 2870 struct dw_mci *host = (struct dw_mci *)arg; 2871 2872 if (host->state != STATE_SENDING_CMD11) { 2873 dev_warn(host->dev, "Unexpected CMD11 timeout\n"); 2874 return; 2875 } 2876 2877 host->cmd_status = SDMMC_INT_RTO; 2878 set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2879 tasklet_schedule(&host->tasklet); 2880 } 2881 2882 static void dw_mci_dto_timer(unsigned long arg) 2883 { 2884 struct dw_mci *host = (struct dw_mci *)arg; 2885 2886 switch (host->state) { 2887 case STATE_SENDING_DATA: 2888 case STATE_DATA_BUSY: 2889 /* 2890 * If DTO interrupt does NOT come in sending data state, 2891 * we should notify the driver to terminate current transfer 2892 * and report a data timeout to the core. 2893 */ 2894 host->data_status = SDMMC_INT_DRTO; 2895 set_bit(EVENT_DATA_ERROR, &host->pending_events); 2896 set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2897 tasklet_schedule(&host->tasklet); 2898 break; 2899 default: 2900 break; 2901 } 2902 } 2903 2904 #ifdef CONFIG_OF 2905 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 2906 { 2907 struct dw_mci_board *pdata; 2908 struct device *dev = host->dev; 2909 struct device_node *np = dev->of_node; 2910 const struct dw_mci_drv_data *drv_data = host->drv_data; 2911 int ret; 2912 u32 clock_frequency; 2913 2914 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 2915 if (!pdata) 2916 return ERR_PTR(-ENOMEM); 2917 2918 /* find out number of slots supported */ 2919 of_property_read_u32(np, "num-slots", &pdata->num_slots); 2920 2921 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth)) 2922 dev_info(dev, 2923 "fifo-depth property not found, using value of FIFOTH register as default\n"); 2924 2925 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms); 2926 2927 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency)) 2928 pdata->bus_hz = clock_frequency; 2929 2930 if (drv_data && drv_data->parse_dt) { 2931 ret = drv_data->parse_dt(host); 2932 if (ret) 2933 return ERR_PTR(ret); 2934 } 2935 2936 if (of_find_property(np, "supports-highspeed", NULL)) { 2937 dev_info(dev, "supports-highspeed property is deprecated.\n"); 2938 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 2939 } 2940 2941 return pdata; 2942 } 2943 2944 #else /* CONFIG_OF */ 2945 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 2946 { 2947 return ERR_PTR(-EINVAL); 2948 } 2949 #endif /* CONFIG_OF */ 2950 2951 static void dw_mci_enable_cd(struct dw_mci *host) 2952 { 2953 unsigned long irqflags; 2954 u32 temp; 2955 int i; 2956 struct dw_mci_slot *slot; 2957 2958 /* 2959 * No need for CD if all slots have a non-error GPIO 2960 * as well as broken card detection is found. 2961 */ 2962 for (i = 0; i < host->num_slots; i++) { 2963 slot = host->slot[i]; 2964 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL) 2965 return; 2966 2967 if (mmc_gpio_get_cd(slot->mmc) < 0) 2968 break; 2969 } 2970 if (i == host->num_slots) 2971 return; 2972 2973 spin_lock_irqsave(&host->irq_lock, irqflags); 2974 temp = mci_readl(host, INTMASK); 2975 temp |= SDMMC_INT_CD; 2976 mci_writel(host, INTMASK, temp); 2977 spin_unlock_irqrestore(&host->irq_lock, irqflags); 2978 } 2979 2980 int dw_mci_probe(struct dw_mci *host) 2981 { 2982 const struct dw_mci_drv_data *drv_data = host->drv_data; 2983 int width, i, ret = 0; 2984 u32 fifo_size; 2985 int init_slots = 0; 2986 2987 if (!host->pdata) { 2988 host->pdata = dw_mci_parse_dt(host); 2989 if (IS_ERR(host->pdata)) { 2990 dev_err(host->dev, "platform data not available\n"); 2991 return -EINVAL; 2992 } 2993 } 2994 2995 host->biu_clk = devm_clk_get(host->dev, "biu"); 2996 if (IS_ERR(host->biu_clk)) { 2997 dev_dbg(host->dev, "biu clock not available\n"); 2998 } else { 2999 ret = clk_prepare_enable(host->biu_clk); 3000 if (ret) { 3001 dev_err(host->dev, "failed to enable biu clock\n"); 3002 return ret; 3003 } 3004 } 3005 3006 host->ciu_clk = devm_clk_get(host->dev, "ciu"); 3007 if (IS_ERR(host->ciu_clk)) { 3008 dev_dbg(host->dev, "ciu clock not available\n"); 3009 host->bus_hz = host->pdata->bus_hz; 3010 } else { 3011 ret = clk_prepare_enable(host->ciu_clk); 3012 if (ret) { 3013 dev_err(host->dev, "failed to enable ciu clock\n"); 3014 goto err_clk_biu; 3015 } 3016 3017 if (host->pdata->bus_hz) { 3018 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz); 3019 if (ret) 3020 dev_warn(host->dev, 3021 "Unable to set bus rate to %uHz\n", 3022 host->pdata->bus_hz); 3023 } 3024 host->bus_hz = clk_get_rate(host->ciu_clk); 3025 } 3026 3027 if (!host->bus_hz) { 3028 dev_err(host->dev, 3029 "Platform data must supply bus speed\n"); 3030 ret = -ENODEV; 3031 goto err_clk_ciu; 3032 } 3033 3034 if (drv_data && drv_data->init) { 3035 ret = drv_data->init(host); 3036 if (ret) { 3037 dev_err(host->dev, 3038 "implementation specific init failed\n"); 3039 goto err_clk_ciu; 3040 } 3041 } 3042 3043 setup_timer(&host->cmd11_timer, 3044 dw_mci_cmd11_timer, (unsigned long)host); 3045 3046 setup_timer(&host->dto_timer, 3047 dw_mci_dto_timer, (unsigned long)host); 3048 3049 spin_lock_init(&host->lock); 3050 spin_lock_init(&host->irq_lock); 3051 INIT_LIST_HEAD(&host->queue); 3052 3053 /* 3054 * Get the host data width - this assumes that HCON has been set with 3055 * the correct values. 3056 */ 3057 i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON)); 3058 if (!i) { 3059 host->push_data = dw_mci_push_data16; 3060 host->pull_data = dw_mci_pull_data16; 3061 width = 16; 3062 host->data_shift = 1; 3063 } else if (i == 2) { 3064 host->push_data = dw_mci_push_data64; 3065 host->pull_data = dw_mci_pull_data64; 3066 width = 64; 3067 host->data_shift = 3; 3068 } else { 3069 /* Check for a reserved value, and warn if it is */ 3070 WARN((i != 1), 3071 "HCON reports a reserved host data width!\n" 3072 "Defaulting to 32-bit access.\n"); 3073 host->push_data = dw_mci_push_data32; 3074 host->pull_data = dw_mci_pull_data32; 3075 width = 32; 3076 host->data_shift = 2; 3077 } 3078 3079 /* Reset all blocks */ 3080 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 3081 ret = -ENODEV; 3082 goto err_clk_ciu; 3083 } 3084 3085 host->dma_ops = host->pdata->dma_ops; 3086 dw_mci_init_dma(host); 3087 3088 /* Clear the interrupts for the host controller */ 3089 mci_writel(host, RINTSTS, 0xFFFFFFFF); 3090 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3091 3092 /* Put in max timeout */ 3093 mci_writel(host, TMOUT, 0xFFFFFFFF); 3094 3095 /* 3096 * FIFO threshold settings RxMark = fifo_size / 2 - 1, 3097 * Tx Mark = fifo_size / 2 DMA Size = 8 3098 */ 3099 if (!host->pdata->fifo_depth) { 3100 /* 3101 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may 3102 * have been overwritten by the bootloader, just like we're 3103 * about to do, so if you know the value for your hardware, you 3104 * should put it in the platform data. 3105 */ 3106 fifo_size = mci_readl(host, FIFOTH); 3107 fifo_size = 1 + ((fifo_size >> 16) & 0xfff); 3108 } else { 3109 fifo_size = host->pdata->fifo_depth; 3110 } 3111 host->fifo_depth = fifo_size; 3112 host->fifoth_val = 3113 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2); 3114 mci_writel(host, FIFOTH, host->fifoth_val); 3115 3116 /* disable clock to CIU */ 3117 mci_writel(host, CLKENA, 0); 3118 mci_writel(host, CLKSRC, 0); 3119 3120 /* 3121 * In 2.40a spec, Data offset is changed. 3122 * Need to check the version-id and set data-offset for DATA register. 3123 */ 3124 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); 3125 dev_info(host->dev, "Version ID is %04x\n", host->verid); 3126 3127 if (host->verid < DW_MMC_240A) 3128 host->fifo_reg = host->regs + DATA_OFFSET; 3129 else 3130 host->fifo_reg = host->regs + DATA_240A_OFFSET; 3131 3132 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); 3133 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt, 3134 host->irq_flags, "dw-mci", host); 3135 if (ret) 3136 goto err_dmaunmap; 3137 3138 if (host->pdata->num_slots) 3139 host->num_slots = host->pdata->num_slots; 3140 else 3141 host->num_slots = 1; 3142 3143 if (host->num_slots < 1 || 3144 host->num_slots > SDMMC_GET_SLOT_NUM(mci_readl(host, HCON))) { 3145 dev_err(host->dev, 3146 "Platform data must supply correct num_slots.\n"); 3147 ret = -ENODEV; 3148 goto err_clk_ciu; 3149 } 3150 3151 /* 3152 * Enable interrupts for command done, data over, data empty, 3153 * receive ready and error such as transmit, receive timeout, crc error 3154 */ 3155 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 3156 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3157 DW_MCI_ERROR_FLAGS); 3158 /* Enable mci interrupt */ 3159 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 3160 3161 dev_info(host->dev, 3162 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n", 3163 host->irq, width, fifo_size); 3164 3165 /* We need at least one slot to succeed */ 3166 for (i = 0; i < host->num_slots; i++) { 3167 ret = dw_mci_init_slot(host, i); 3168 if (ret) 3169 dev_dbg(host->dev, "slot %d init failed\n", i); 3170 else 3171 init_slots++; 3172 } 3173 3174 if (init_slots) { 3175 dev_info(host->dev, "%d slots initialized\n", init_slots); 3176 } else { 3177 dev_dbg(host->dev, 3178 "attempted to initialize %d slots, but failed on all\n", 3179 host->num_slots); 3180 goto err_dmaunmap; 3181 } 3182 3183 /* Now that slots are all setup, we can enable card detect */ 3184 dw_mci_enable_cd(host); 3185 3186 return 0; 3187 3188 err_dmaunmap: 3189 if (host->use_dma && host->dma_ops->exit) 3190 host->dma_ops->exit(host); 3191 3192 err_clk_ciu: 3193 if (!IS_ERR(host->ciu_clk)) 3194 clk_disable_unprepare(host->ciu_clk); 3195 3196 err_clk_biu: 3197 if (!IS_ERR(host->biu_clk)) 3198 clk_disable_unprepare(host->biu_clk); 3199 3200 return ret; 3201 } 3202 EXPORT_SYMBOL(dw_mci_probe); 3203 3204 void dw_mci_remove(struct dw_mci *host) 3205 { 3206 int i; 3207 3208 for (i = 0; i < host->num_slots; i++) { 3209 dev_dbg(host->dev, "remove slot %d\n", i); 3210 if (host->slot[i]) 3211 dw_mci_cleanup_slot(host->slot[i], i); 3212 } 3213 3214 mci_writel(host, RINTSTS, 0xFFFFFFFF); 3215 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3216 3217 /* disable clock to CIU */ 3218 mci_writel(host, CLKENA, 0); 3219 mci_writel(host, CLKSRC, 0); 3220 3221 if (host->use_dma && host->dma_ops->exit) 3222 host->dma_ops->exit(host); 3223 3224 if (!IS_ERR(host->ciu_clk)) 3225 clk_disable_unprepare(host->ciu_clk); 3226 3227 if (!IS_ERR(host->biu_clk)) 3228 clk_disable_unprepare(host->biu_clk); 3229 } 3230 EXPORT_SYMBOL(dw_mci_remove); 3231 3232 3233 3234 #ifdef CONFIG_PM_SLEEP 3235 /* 3236 * TODO: we should probably disable the clock to the card in the suspend path. 3237 */ 3238 int dw_mci_suspend(struct dw_mci *host) 3239 { 3240 if (host->use_dma && host->dma_ops->exit) 3241 host->dma_ops->exit(host); 3242 3243 return 0; 3244 } 3245 EXPORT_SYMBOL(dw_mci_suspend); 3246 3247 int dw_mci_resume(struct dw_mci *host) 3248 { 3249 int i, ret; 3250 3251 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 3252 ret = -ENODEV; 3253 return ret; 3254 } 3255 3256 if (host->use_dma && host->dma_ops->init) 3257 host->dma_ops->init(host); 3258 3259 /* 3260 * Restore the initial value at FIFOTH register 3261 * And Invalidate the prev_blksz with zero 3262 */ 3263 mci_writel(host, FIFOTH, host->fifoth_val); 3264 host->prev_blksz = 0; 3265 3266 /* Put in max timeout */ 3267 mci_writel(host, TMOUT, 0xFFFFFFFF); 3268 3269 mci_writel(host, RINTSTS, 0xFFFFFFFF); 3270 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 3271 SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3272 DW_MCI_ERROR_FLAGS); 3273 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 3274 3275 for (i = 0; i < host->num_slots; i++) { 3276 struct dw_mci_slot *slot = host->slot[i]; 3277 3278 if (!slot) 3279 continue; 3280 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) { 3281 dw_mci_set_ios(slot->mmc, &slot->mmc->ios); 3282 dw_mci_setup_bus(slot, true); 3283 } 3284 } 3285 3286 /* Now that slots are all setup, we can enable card detect */ 3287 dw_mci_enable_cd(host); 3288 3289 return 0; 3290 } 3291 EXPORT_SYMBOL(dw_mci_resume); 3292 #endif /* CONFIG_PM_SLEEP */ 3293 3294 static int __init dw_mci_init(void) 3295 { 3296 pr_info("Synopsys Designware Multimedia Card Interface Driver\n"); 3297 return 0; 3298 } 3299 3300 static void __exit dw_mci_exit(void) 3301 { 3302 } 3303 3304 module_init(dw_mci_init); 3305 module_exit(dw_mci_exit); 3306 3307 MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); 3308 MODULE_AUTHOR("NXP Semiconductor VietNam"); 3309 MODULE_AUTHOR("Imagination Technologies Ltd"); 3310 MODULE_LICENSE("GPL v2"); 3311