1f95f3850SWill Newton /* 2f95f3850SWill Newton * Synopsys DesignWare Multimedia Card Interface driver 3f95f3850SWill Newton * (Based on NXP driver for lpc 31xx) 4f95f3850SWill Newton * 5f95f3850SWill Newton * Copyright (C) 2009 NXP Semiconductors 6f95f3850SWill Newton * Copyright (C) 2009, 2010 Imagination Technologies Ltd. 7f95f3850SWill Newton * 8f95f3850SWill Newton * This program is free software; you can redistribute it and/or modify 9f95f3850SWill Newton * it under the terms of the GNU General Public License as published by 10f95f3850SWill Newton * the Free Software Foundation; either version 2 of the License, or 11f95f3850SWill Newton * (at your option) any later version. 12f95f3850SWill Newton */ 13f95f3850SWill Newton 14f95f3850SWill Newton #include <linux/blkdev.h> 15f95f3850SWill Newton #include <linux/clk.h> 16f95f3850SWill Newton #include <linux/debugfs.h> 17f95f3850SWill Newton #include <linux/device.h> 18f95f3850SWill Newton #include <linux/dma-mapping.h> 19f95f3850SWill Newton #include <linux/err.h> 20f95f3850SWill Newton #include <linux/init.h> 21f95f3850SWill Newton #include <linux/interrupt.h> 22b6d2d81cSShawn Lin #include <linux/iopoll.h> 23f95f3850SWill Newton #include <linux/ioport.h> 24f95f3850SWill Newton #include <linux/module.h> 25f95f3850SWill Newton #include <linux/platform_device.h> 26a6db2c86SDouglas Anderson #include <linux/pm_runtime.h> 27f95f3850SWill Newton #include <linux/seq_file.h> 28f95f3850SWill Newton #include <linux/slab.h> 29f95f3850SWill Newton #include <linux/stat.h> 30f95f3850SWill Newton #include <linux/delay.h> 31f95f3850SWill Newton #include <linux/irq.h> 32b24c8b26SDoug Anderson #include <linux/mmc/card.h> 33f95f3850SWill Newton #include <linux/mmc/host.h> 34f95f3850SWill Newton #include <linux/mmc/mmc.h> 3501730558SDoug Anderson #include <linux/mmc/sd.h> 3690c2143aSSeungwon Jeon #include <linux/mmc/sdio.h> 37f95f3850SWill Newton #include <linux/bitops.h> 38c07946a3SJaehoon Chung #include <linux/regulator/consumer.h> 39c91eab4bSThomas Abraham #include <linux/of.h> 4055a6ceb2SDoug Anderson #include <linux/of_gpio.h> 41bf626e55SZhangfei Gao #include <linux/mmc/slot-gpio.h> 42f95f3850SWill Newton 43f95f3850SWill Newton #include "dw_mmc.h" 44f95f3850SWill Newton 45f95f3850SWill Newton /* Common flag combinations */ 463f7eec62SJaehoon Chung #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \ 47f95f3850SWill Newton SDMMC_INT_HTO | SDMMC_INT_SBE | \ 487a3c5677SDoug Anderson SDMMC_INT_EBE | SDMMC_INT_HLE) 49f95f3850SWill Newton #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ 507a3c5677SDoug Anderson SDMMC_INT_RESP_ERR | SDMMC_INT_HLE) 51f95f3850SWill Newton #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \ 527a3c5677SDoug Anderson DW_MCI_CMD_ERROR_FLAGS) 53f95f3850SWill Newton #define DW_MCI_SEND_STATUS 1 54f95f3850SWill Newton #define DW_MCI_RECV_STATUS 2 55f95f3850SWill Newton #define DW_MCI_DMA_THRESHOLD 16 56f95f3850SWill Newton 571f44a2a5SSeungwon Jeon #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */ 5872e83577SJaehoon Chung #define DW_MCI_FREQ_MIN 100000 /* unit: HZ */ 591f44a2a5SSeungwon Jeon 60fc79a4d6SJoonyoung Shim #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ 61fc79a4d6SJoonyoung Shim SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ 62fc79a4d6SJoonyoung Shim SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ 63fc79a4d6SJoonyoung Shim SDMMC_IDMAC_INT_TI) 64fc79a4d6SJoonyoung Shim 65cc190d4cSShawn Lin #define DESC_RING_BUF_SZ PAGE_SIZE 66cc190d4cSShawn Lin 6769d99fdcSPrabu Thangamuthu struct idmac_desc_64addr { 6869d99fdcSPrabu Thangamuthu u32 des0; /* Control Descriptor */ 69b6d2d81cSShawn Lin #define IDMAC_OWN_CLR64(x) \ 70b6d2d81cSShawn Lin !((x) & cpu_to_le32(IDMAC_DES0_OWN)) 7169d99fdcSPrabu Thangamuthu 7269d99fdcSPrabu Thangamuthu u32 des1; /* Reserved */ 7369d99fdcSPrabu Thangamuthu 7469d99fdcSPrabu Thangamuthu u32 des2; /*Buffer sizes */ 7569d99fdcSPrabu Thangamuthu #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \ 766687c42fSBen Dooks ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \ 776687c42fSBen Dooks ((cpu_to_le32(s)) & cpu_to_le32(0x1fff))) 7869d99fdcSPrabu Thangamuthu 7969d99fdcSPrabu Thangamuthu u32 des3; /* Reserved */ 8069d99fdcSPrabu Thangamuthu 8169d99fdcSPrabu Thangamuthu u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/ 8269d99fdcSPrabu Thangamuthu u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/ 8369d99fdcSPrabu Thangamuthu 8469d99fdcSPrabu Thangamuthu u32 des6; /* Lower 32-bits of Next Descriptor Address */ 8569d99fdcSPrabu Thangamuthu u32 des7; /* Upper 32-bits of Next Descriptor Address */ 8669d99fdcSPrabu Thangamuthu }; 8769d99fdcSPrabu Thangamuthu 88f95f3850SWill Newton struct idmac_desc { 896687c42fSBen Dooks __le32 des0; /* Control Descriptor */ 90f95f3850SWill Newton #define IDMAC_DES0_DIC BIT(1) 91f95f3850SWill Newton #define IDMAC_DES0_LD BIT(2) 92f95f3850SWill Newton #define IDMAC_DES0_FD BIT(3) 93f95f3850SWill Newton #define IDMAC_DES0_CH BIT(4) 94f95f3850SWill Newton #define IDMAC_DES0_ER BIT(5) 95f95f3850SWill Newton #define IDMAC_DES0_CES BIT(30) 96f95f3850SWill Newton #define IDMAC_DES0_OWN BIT(31) 97f95f3850SWill Newton 986687c42fSBen Dooks __le32 des1; /* Buffer sizes */ 99f95f3850SWill Newton #define IDMAC_SET_BUFFER1_SIZE(d, s) \ 100e5306c3aSBen Dooks ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff))) 101f95f3850SWill Newton 1026687c42fSBen Dooks __le32 des2; /* buffer 1 physical address */ 103f95f3850SWill Newton 1046687c42fSBen Dooks __le32 des3; /* buffer 2 physical address */ 105f95f3850SWill Newton }; 1065959b32eSAlexey Brodkin 1075959b32eSAlexey Brodkin /* Each descriptor can transfer up to 4KB of data in chained mode */ 1085959b32eSAlexey Brodkin #define DW_MCI_DESC_DATA_LENGTH 0x1000 109f95f3850SWill Newton 110f95f3850SWill Newton #if defined(CONFIG_DEBUG_FS) 111f95f3850SWill Newton static int dw_mci_req_show(struct seq_file *s, void *v) 112f95f3850SWill Newton { 113f95f3850SWill Newton struct dw_mci_slot *slot = s->private; 114f95f3850SWill Newton struct mmc_request *mrq; 115f95f3850SWill Newton struct mmc_command *cmd; 116f95f3850SWill Newton struct mmc_command *stop; 117f95f3850SWill Newton struct mmc_data *data; 118f95f3850SWill Newton 119f95f3850SWill Newton /* Make sure we get a consistent snapshot */ 120f95f3850SWill Newton spin_lock_bh(&slot->host->lock); 121f95f3850SWill Newton mrq = slot->mrq; 122f95f3850SWill Newton 123f95f3850SWill Newton if (mrq) { 124f95f3850SWill Newton cmd = mrq->cmd; 125f95f3850SWill Newton data = mrq->data; 126f95f3850SWill Newton stop = mrq->stop; 127f95f3850SWill Newton 128f95f3850SWill Newton if (cmd) 129f95f3850SWill Newton seq_printf(s, 130f95f3850SWill Newton "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 131f95f3850SWill Newton cmd->opcode, cmd->arg, cmd->flags, 132f95f3850SWill Newton cmd->resp[0], cmd->resp[1], cmd->resp[2], 133f95f3850SWill Newton cmd->resp[2], cmd->error); 134f95f3850SWill Newton if (data) 135f95f3850SWill Newton seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", 136f95f3850SWill Newton data->bytes_xfered, data->blocks, 137f95f3850SWill Newton data->blksz, data->flags, data->error); 138f95f3850SWill Newton if (stop) 139f95f3850SWill Newton seq_printf(s, 140f95f3850SWill Newton "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 141f95f3850SWill Newton stop->opcode, stop->arg, stop->flags, 142f95f3850SWill Newton stop->resp[0], stop->resp[1], stop->resp[2], 143f95f3850SWill Newton stop->resp[2], stop->error); 144f95f3850SWill Newton } 145f95f3850SWill Newton 146f95f3850SWill Newton spin_unlock_bh(&slot->host->lock); 147f95f3850SWill Newton 148f95f3850SWill Newton return 0; 149f95f3850SWill Newton } 150f95f3850SWill Newton 151f95f3850SWill Newton static int dw_mci_req_open(struct inode *inode, struct file *file) 152f95f3850SWill Newton { 153f95f3850SWill Newton return single_open(file, dw_mci_req_show, inode->i_private); 154f95f3850SWill Newton } 155f95f3850SWill Newton 156f95f3850SWill Newton static const struct file_operations dw_mci_req_fops = { 157f95f3850SWill Newton .owner = THIS_MODULE, 158f95f3850SWill Newton .open = dw_mci_req_open, 159f95f3850SWill Newton .read = seq_read, 160f95f3850SWill Newton .llseek = seq_lseek, 161f95f3850SWill Newton .release = single_release, 162f95f3850SWill Newton }; 163f95f3850SWill Newton 164f95f3850SWill Newton static int dw_mci_regs_show(struct seq_file *s, void *v) 165f95f3850SWill Newton { 16621657ebdSJaehoon Chung struct dw_mci *host = s->private; 16721657ebdSJaehoon Chung 16821657ebdSJaehoon Chung seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS)); 16921657ebdSJaehoon Chung seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS)); 17021657ebdSJaehoon Chung seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD)); 17121657ebdSJaehoon Chung seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL)); 17221657ebdSJaehoon Chung seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK)); 17321657ebdSJaehoon Chung seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA)); 174f95f3850SWill Newton 175f95f3850SWill Newton return 0; 176f95f3850SWill Newton } 177f95f3850SWill Newton 178f95f3850SWill Newton static int dw_mci_regs_open(struct inode *inode, struct file *file) 179f95f3850SWill Newton { 180f95f3850SWill Newton return single_open(file, dw_mci_regs_show, inode->i_private); 181f95f3850SWill Newton } 182f95f3850SWill Newton 183f95f3850SWill Newton static const struct file_operations dw_mci_regs_fops = { 184f95f3850SWill Newton .owner = THIS_MODULE, 185f95f3850SWill Newton .open = dw_mci_regs_open, 186f95f3850SWill Newton .read = seq_read, 187f95f3850SWill Newton .llseek = seq_lseek, 188f95f3850SWill Newton .release = single_release, 189f95f3850SWill Newton }; 190f95f3850SWill Newton 191f95f3850SWill Newton static void dw_mci_init_debugfs(struct dw_mci_slot *slot) 192f95f3850SWill Newton { 193f95f3850SWill Newton struct mmc_host *mmc = slot->mmc; 194f95f3850SWill Newton struct dw_mci *host = slot->host; 195f95f3850SWill Newton struct dentry *root; 196f95f3850SWill Newton struct dentry *node; 197f95f3850SWill Newton 198f95f3850SWill Newton root = mmc->debugfs_root; 199f95f3850SWill Newton if (!root) 200f95f3850SWill Newton return; 201f95f3850SWill Newton 202f95f3850SWill Newton node = debugfs_create_file("regs", S_IRUSR, root, host, 203f95f3850SWill Newton &dw_mci_regs_fops); 204f95f3850SWill Newton if (!node) 205f95f3850SWill Newton goto err; 206f95f3850SWill Newton 207f95f3850SWill Newton node = debugfs_create_file("req", S_IRUSR, root, slot, 208f95f3850SWill Newton &dw_mci_req_fops); 209f95f3850SWill Newton if (!node) 210f95f3850SWill Newton goto err; 211f95f3850SWill Newton 212f95f3850SWill Newton node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); 213f95f3850SWill Newton if (!node) 214f95f3850SWill Newton goto err; 215f95f3850SWill Newton 216f95f3850SWill Newton node = debugfs_create_x32("pending_events", S_IRUSR, root, 217f95f3850SWill Newton (u32 *)&host->pending_events); 218f95f3850SWill Newton if (!node) 219f95f3850SWill Newton goto err; 220f95f3850SWill Newton 221f95f3850SWill Newton node = debugfs_create_x32("completed_events", S_IRUSR, root, 222f95f3850SWill Newton (u32 *)&host->completed_events); 223f95f3850SWill Newton if (!node) 224f95f3850SWill Newton goto err; 225f95f3850SWill Newton 226f95f3850SWill Newton return; 227f95f3850SWill Newton 228f95f3850SWill Newton err: 229f95f3850SWill Newton dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); 230f95f3850SWill Newton } 231f95f3850SWill Newton #endif /* defined(CONFIG_DEBUG_FS) */ 232f95f3850SWill Newton 2338e6db1f6SShawn Lin static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) 2348e6db1f6SShawn Lin { 2358e6db1f6SShawn Lin u32 ctrl; 2368e6db1f6SShawn Lin 2378e6db1f6SShawn Lin ctrl = mci_readl(host, CTRL); 2388e6db1f6SShawn Lin ctrl |= reset; 2398e6db1f6SShawn Lin mci_writel(host, CTRL, ctrl); 2408e6db1f6SShawn Lin 2418e6db1f6SShawn Lin /* wait till resets clear */ 2428e6db1f6SShawn Lin if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl, 2438e6db1f6SShawn Lin !(ctrl & reset), 2448e6db1f6SShawn Lin 1, 500 * USEC_PER_MSEC)) { 2458e6db1f6SShawn Lin dev_err(host->dev, 2468e6db1f6SShawn Lin "Timeout resetting block (ctrl reset %#x)\n", 2478e6db1f6SShawn Lin ctrl & reset); 2488e6db1f6SShawn Lin return false; 2498e6db1f6SShawn Lin } 2508e6db1f6SShawn Lin 2518e6db1f6SShawn Lin return true; 2528e6db1f6SShawn Lin } 25301730558SDoug Anderson 2544dba18deSShawn Lin static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags) 2554dba18deSShawn Lin { 2564dba18deSShawn Lin u32 status; 2574dba18deSShawn Lin 2584dba18deSShawn Lin /* 2594dba18deSShawn Lin * Databook says that before issuing a new data transfer command 2604dba18deSShawn Lin * we need to check to see if the card is busy. Data transfer commands 2614dba18deSShawn Lin * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that. 2624dba18deSShawn Lin * 2634dba18deSShawn Lin * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is 2644dba18deSShawn Lin * expected. 2654dba18deSShawn Lin */ 2664dba18deSShawn Lin if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) && 2674dba18deSShawn Lin !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) { 2684dba18deSShawn Lin if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS, 2694dba18deSShawn Lin status, 2704dba18deSShawn Lin !(status & SDMMC_STATUS_BUSY), 2714dba18deSShawn Lin 10, 500 * USEC_PER_MSEC)) 2724dba18deSShawn Lin dev_err(host->dev, "Busy; trying anyway\n"); 2734dba18deSShawn Lin } 2744dba18deSShawn Lin } 2754dba18deSShawn Lin 2764dba18deSShawn Lin static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) 2774dba18deSShawn Lin { 2784dba18deSShawn Lin struct dw_mci *host = slot->host; 2794dba18deSShawn Lin unsigned int cmd_status = 0; 2804dba18deSShawn Lin 2814dba18deSShawn Lin mci_writel(host, CMDARG, arg); 2824dba18deSShawn Lin wmb(); /* drain writebuffer */ 2834dba18deSShawn Lin dw_mci_wait_while_busy(host, cmd); 2844dba18deSShawn Lin mci_writel(host, CMD, SDMMC_CMD_START | cmd); 2854dba18deSShawn Lin 2864dba18deSShawn Lin if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status, 2874dba18deSShawn Lin !(cmd_status & SDMMC_CMD_START), 2884dba18deSShawn Lin 1, 500 * USEC_PER_MSEC)) 2894dba18deSShawn Lin dev_err(&slot->mmc->class_dev, 2904dba18deSShawn Lin "Timeout sending command (cmd %#x arg %#x status %#x)\n", 2914dba18deSShawn Lin cmd, arg, cmd_status); 2924dba18deSShawn Lin } 2934dba18deSShawn Lin 294f95f3850SWill Newton static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) 295f95f3850SWill Newton { 296800d78bfSThomas Abraham struct dw_mci_slot *slot = mmc_priv(mmc); 29701730558SDoug Anderson struct dw_mci *host = slot->host; 298f95f3850SWill Newton u32 cmdr; 299f95f3850SWill Newton 3000e3a22c0SShawn Lin cmd->error = -EINPROGRESS; 301f95f3850SWill Newton cmdr = cmd->opcode; 302f95f3850SWill Newton 30390c2143aSSeungwon Jeon if (cmd->opcode == MMC_STOP_TRANSMISSION || 30490c2143aSSeungwon Jeon cmd->opcode == MMC_GO_IDLE_STATE || 30590c2143aSSeungwon Jeon cmd->opcode == MMC_GO_INACTIVE_STATE || 30690c2143aSSeungwon Jeon (cmd->opcode == SD_IO_RW_DIRECT && 30790c2143aSSeungwon Jeon ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) 308f95f3850SWill Newton cmdr |= SDMMC_CMD_STOP; 3094a1b27adSJaehoon Chung else if (cmd->opcode != MMC_SEND_STATUS && cmd->data) 310f95f3850SWill Newton cmdr |= SDMMC_CMD_PRV_DAT_WAIT; 311f95f3850SWill Newton 31201730558SDoug Anderson if (cmd->opcode == SD_SWITCH_VOLTAGE) { 31301730558SDoug Anderson u32 clk_en_a; 31401730558SDoug Anderson 31501730558SDoug Anderson /* Special bit makes CMD11 not die */ 31601730558SDoug Anderson cmdr |= SDMMC_CMD_VOLT_SWITCH; 31701730558SDoug Anderson 31801730558SDoug Anderson /* Change state to continue to handle CMD11 weirdness */ 31901730558SDoug Anderson WARN_ON(slot->host->state != STATE_SENDING_CMD); 32001730558SDoug Anderson slot->host->state = STATE_SENDING_CMD11; 32101730558SDoug Anderson 32201730558SDoug Anderson /* 32301730558SDoug Anderson * We need to disable low power mode (automatic clock stop) 32401730558SDoug Anderson * while doing voltage switch so we don't confuse the card, 32501730558SDoug Anderson * since stopping the clock is a specific part of the UHS 32601730558SDoug Anderson * voltage change dance. 32701730558SDoug Anderson * 32801730558SDoug Anderson * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be 32901730558SDoug Anderson * unconditionally turned back on in dw_mci_setup_bus() if it's 33001730558SDoug Anderson * ever called with a non-zero clock. That shouldn't happen 33101730558SDoug Anderson * until the voltage change is all done. 33201730558SDoug Anderson */ 33301730558SDoug Anderson clk_en_a = mci_readl(host, CLKENA); 33401730558SDoug Anderson clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id); 33501730558SDoug Anderson mci_writel(host, CLKENA, clk_en_a); 33601730558SDoug Anderson mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 33701730558SDoug Anderson SDMMC_CMD_PRV_DAT_WAIT, 0); 33801730558SDoug Anderson } 33901730558SDoug Anderson 340f95f3850SWill Newton if (cmd->flags & MMC_RSP_PRESENT) { 341f95f3850SWill Newton /* We expect a response, so set this bit */ 342f95f3850SWill Newton cmdr |= SDMMC_CMD_RESP_EXP; 343f95f3850SWill Newton if (cmd->flags & MMC_RSP_136) 344f95f3850SWill Newton cmdr |= SDMMC_CMD_RESP_LONG; 345f95f3850SWill Newton } 346f95f3850SWill Newton 347f95f3850SWill Newton if (cmd->flags & MMC_RSP_CRC) 348f95f3850SWill Newton cmdr |= SDMMC_CMD_RESP_CRC; 349f95f3850SWill Newton 3500349c085SJaehoon Chung if (cmd->data) { 351f95f3850SWill Newton cmdr |= SDMMC_CMD_DAT_EXP; 3520349c085SJaehoon Chung if (cmd->data->flags & MMC_DATA_WRITE) 353f95f3850SWill Newton cmdr |= SDMMC_CMD_DAT_WR; 354f95f3850SWill Newton } 355f95f3850SWill Newton 356aaaaeb7aSJaehoon Chung if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags)) 357aaaaeb7aSJaehoon Chung cmdr |= SDMMC_CMD_USE_HOLD_REG; 358800d78bfSThomas Abraham 359f95f3850SWill Newton return cmdr; 360f95f3850SWill Newton } 361f95f3850SWill Newton 36290c2143aSSeungwon Jeon static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) 36390c2143aSSeungwon Jeon { 36490c2143aSSeungwon Jeon struct mmc_command *stop; 36590c2143aSSeungwon Jeon u32 cmdr; 36690c2143aSSeungwon Jeon 36790c2143aSSeungwon Jeon if (!cmd->data) 36890c2143aSSeungwon Jeon return 0; 36990c2143aSSeungwon Jeon 37090c2143aSSeungwon Jeon stop = &host->stop_abort; 37190c2143aSSeungwon Jeon cmdr = cmd->opcode; 37290c2143aSSeungwon Jeon memset(stop, 0, sizeof(struct mmc_command)); 37390c2143aSSeungwon Jeon 37490c2143aSSeungwon Jeon if (cmdr == MMC_READ_SINGLE_BLOCK || 37590c2143aSSeungwon Jeon cmdr == MMC_READ_MULTIPLE_BLOCK || 37690c2143aSSeungwon Jeon cmdr == MMC_WRITE_BLOCK || 3776c2c6506SUlf Hansson cmdr == MMC_WRITE_MULTIPLE_BLOCK || 3786c2c6506SUlf Hansson cmdr == MMC_SEND_TUNING_BLOCK || 3796c2c6506SUlf Hansson cmdr == MMC_SEND_TUNING_BLOCK_HS200) { 38090c2143aSSeungwon Jeon stop->opcode = MMC_STOP_TRANSMISSION; 38190c2143aSSeungwon Jeon stop->arg = 0; 38290c2143aSSeungwon Jeon stop->flags = MMC_RSP_R1B | MMC_CMD_AC; 38390c2143aSSeungwon Jeon } else if (cmdr == SD_IO_RW_EXTENDED) { 38490c2143aSSeungwon Jeon stop->opcode = SD_IO_RW_DIRECT; 38590c2143aSSeungwon Jeon stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) | 38690c2143aSSeungwon Jeon ((cmd->arg >> 28) & 0x7); 38790c2143aSSeungwon Jeon stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC; 38890c2143aSSeungwon Jeon } else { 38990c2143aSSeungwon Jeon return 0; 39090c2143aSSeungwon Jeon } 39190c2143aSSeungwon Jeon 39290c2143aSSeungwon Jeon cmdr = stop->opcode | SDMMC_CMD_STOP | 39390c2143aSSeungwon Jeon SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP; 39490c2143aSSeungwon Jeon 39542f989c0SJaehoon Chung if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags)) 3968c005b40SJaehoon Chung cmdr |= SDMMC_CMD_USE_HOLD_REG; 3978c005b40SJaehoon Chung 39890c2143aSSeungwon Jeon return cmdr; 39990c2143aSSeungwon Jeon } 40090c2143aSSeungwon Jeon 40103de1921SAddy Ke static inline void dw_mci_set_cto(struct dw_mci *host) 40203de1921SAddy Ke { 40303de1921SAddy Ke unsigned int cto_clks; 4044c2357f5SDouglas Anderson unsigned int cto_div; 40503de1921SAddy Ke unsigned int cto_ms; 4068892b705SDouglas Anderson unsigned long irqflags; 40703de1921SAddy Ke 40803de1921SAddy Ke cto_clks = mci_readl(host, TMOUT) & 0xff; 4094c2357f5SDouglas Anderson cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; 4104c2357f5SDouglas Anderson if (cto_div == 0) 4114c2357f5SDouglas Anderson cto_div = 1; 4124c2357f5SDouglas Anderson cto_ms = DIV_ROUND_UP(MSEC_PER_SEC * cto_clks * cto_div, host->bus_hz); 41303de1921SAddy Ke 41403de1921SAddy Ke /* add a bit spare time */ 41503de1921SAddy Ke cto_ms += 10; 41603de1921SAddy Ke 4178892b705SDouglas Anderson /* 4188892b705SDouglas Anderson * The durations we're working with are fairly short so we have to be 4198892b705SDouglas Anderson * extra careful about synchronization here. Specifically in hardware a 4208892b705SDouglas Anderson * command timeout is _at most_ 5.1 ms, so that means we expect an 4218892b705SDouglas Anderson * interrupt (either command done or timeout) to come rather quickly 4228892b705SDouglas Anderson * after the mci_writel. ...but just in case we have a long interrupt 4238892b705SDouglas Anderson * latency let's add a bit of paranoia. 4248892b705SDouglas Anderson * 4258892b705SDouglas Anderson * In general we'll assume that at least an interrupt will be asserted 4268892b705SDouglas Anderson * in hardware by the time the cto_timer runs. ...and if it hasn't 4278892b705SDouglas Anderson * been asserted in hardware by that time then we'll assume it'll never 4288892b705SDouglas Anderson * come. 4298892b705SDouglas Anderson */ 4308892b705SDouglas Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 4318892b705SDouglas Anderson if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 43203de1921SAddy Ke mod_timer(&host->cto_timer, 43303de1921SAddy Ke jiffies + msecs_to_jiffies(cto_ms) + 1); 4348892b705SDouglas Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 43503de1921SAddy Ke } 43603de1921SAddy Ke 437f95f3850SWill Newton static void dw_mci_start_command(struct dw_mci *host, 438f95f3850SWill Newton struct mmc_command *cmd, u32 cmd_flags) 439f95f3850SWill Newton { 440f95f3850SWill Newton host->cmd = cmd; 4414a90920cSThomas Abraham dev_vdbg(host->dev, 442f95f3850SWill Newton "start command: ARGR=0x%08x CMDR=0x%08x\n", 443f95f3850SWill Newton cmd->arg, cmd_flags); 444f95f3850SWill Newton 445f95f3850SWill Newton mci_writel(host, CMDARG, cmd->arg); 4460e3a22c0SShawn Lin wmb(); /* drain writebuffer */ 4470bdbd0e8SDoug Anderson dw_mci_wait_while_busy(host, cmd_flags); 448f95f3850SWill Newton 4498892b705SDouglas Anderson mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); 4508892b705SDouglas Anderson 45103de1921SAddy Ke /* response expected command only */ 45203de1921SAddy Ke if (cmd_flags & SDMMC_CMD_RESP_EXP) 45303de1921SAddy Ke dw_mci_set_cto(host); 454f95f3850SWill Newton } 455f95f3850SWill Newton 45690c2143aSSeungwon Jeon static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) 457f95f3850SWill Newton { 458e13c3c08SJaehoon Chung struct mmc_command *stop = &host->stop_abort; 4590e3a22c0SShawn Lin 46090c2143aSSeungwon Jeon dw_mci_start_command(host, stop, host->stop_cmdr); 461f95f3850SWill Newton } 462f95f3850SWill Newton 463f95f3850SWill Newton /* DMA interface functions */ 464f95f3850SWill Newton static void dw_mci_stop_dma(struct dw_mci *host) 465f95f3850SWill Newton { 46603e8cb53SJames Hogan if (host->using_dma) { 467f95f3850SWill Newton host->dma_ops->stop(host); 468f95f3850SWill Newton host->dma_ops->cleanup(host); 469aa50f259SSeungwon Jeon } 470aa50f259SSeungwon Jeon 471f95f3850SWill Newton /* Data transfer was stopped by the interrupt handler */ 472f95f3850SWill Newton set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 473f95f3850SWill Newton } 474f95f3850SWill Newton 475f95f3850SWill Newton static void dw_mci_dma_cleanup(struct dw_mci *host) 476f95f3850SWill Newton { 477f95f3850SWill Newton struct mmc_data *data = host->data; 478f95f3850SWill Newton 479a4cc7eb4SJaehoon Chung if (data && data->host_cookie == COOKIE_MAPPED) { 4804a90920cSThomas Abraham dma_unmap_sg(host->dev, 4819aa51408SSeungwon Jeon data->sg, 4829aa51408SSeungwon Jeon data->sg_len, 483feeef096SHeiner Kallweit mmc_get_dma_dir(data)); 484a4cc7eb4SJaehoon Chung data->host_cookie = COOKIE_UNMAPPED; 485a4cc7eb4SJaehoon Chung } 486f95f3850SWill Newton } 487f95f3850SWill Newton 4885ce9d961SSeungwon Jeon static void dw_mci_idmac_reset(struct dw_mci *host) 4895ce9d961SSeungwon Jeon { 4905ce9d961SSeungwon Jeon u32 bmod = mci_readl(host, BMOD); 4915ce9d961SSeungwon Jeon /* Software reset of DMA */ 4925ce9d961SSeungwon Jeon bmod |= SDMMC_IDMAC_SWRESET; 4935ce9d961SSeungwon Jeon mci_writel(host, BMOD, bmod); 4945ce9d961SSeungwon Jeon } 4955ce9d961SSeungwon Jeon 496f95f3850SWill Newton static void dw_mci_idmac_stop_dma(struct dw_mci *host) 497f95f3850SWill Newton { 498f95f3850SWill Newton u32 temp; 499f95f3850SWill Newton 500f95f3850SWill Newton /* Disable and reset the IDMAC interface */ 501f95f3850SWill Newton temp = mci_readl(host, CTRL); 502f95f3850SWill Newton temp &= ~SDMMC_CTRL_USE_IDMAC; 503f95f3850SWill Newton temp |= SDMMC_CTRL_DMA_RESET; 504f95f3850SWill Newton mci_writel(host, CTRL, temp); 505f95f3850SWill Newton 506f95f3850SWill Newton /* Stop the IDMAC running */ 507f95f3850SWill Newton temp = mci_readl(host, BMOD); 508a5289a43SJaehoon Chung temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); 5095ce9d961SSeungwon Jeon temp |= SDMMC_IDMAC_SWRESET; 510f95f3850SWill Newton mci_writel(host, BMOD, temp); 511f95f3850SWill Newton } 512f95f3850SWill Newton 5133fc7eaefSShawn Lin static void dw_mci_dmac_complete_dma(void *arg) 514f95f3850SWill Newton { 5153fc7eaefSShawn Lin struct dw_mci *host = arg; 516f95f3850SWill Newton struct mmc_data *data = host->data; 517f95f3850SWill Newton 5184a90920cSThomas Abraham dev_vdbg(host->dev, "DMA complete\n"); 519f95f3850SWill Newton 5203fc7eaefSShawn Lin if ((host->use_dma == TRANS_MODE_EDMAC) && 5213fc7eaefSShawn Lin data && (data->flags & MMC_DATA_READ)) 5223fc7eaefSShawn Lin /* Invalidate cache after read */ 52342f989c0SJaehoon Chung dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc), 5243fc7eaefSShawn Lin data->sg, 5253fc7eaefSShawn Lin data->sg_len, 5263fc7eaefSShawn Lin DMA_FROM_DEVICE); 5273fc7eaefSShawn Lin 528f95f3850SWill Newton host->dma_ops->cleanup(host); 529f95f3850SWill Newton 530f95f3850SWill Newton /* 531f95f3850SWill Newton * If the card was removed, data will be NULL. No point in trying to 532f95f3850SWill Newton * send the stop command or waiting for NBUSY in this case. 533f95f3850SWill Newton */ 534f95f3850SWill Newton if (data) { 535f95f3850SWill Newton set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 536f95f3850SWill Newton tasklet_schedule(&host->tasklet); 537f95f3850SWill Newton } 538f95f3850SWill Newton } 539f95f3850SWill Newton 540f95f3850SWill Newton static int dw_mci_idmac_init(struct dw_mci *host) 541f95f3850SWill Newton { 542897b69e7SSeungwon Jeon int i; 543f95f3850SWill Newton 54469d99fdcSPrabu Thangamuthu if (host->dma_64bit_address == 1) { 54569d99fdcSPrabu Thangamuthu struct idmac_desc_64addr *p; 54669d99fdcSPrabu Thangamuthu /* Number of descriptors in the ring buffer */ 547cc190d4cSShawn Lin host->ring_size = 548cc190d4cSShawn Lin DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr); 54969d99fdcSPrabu Thangamuthu 55069d99fdcSPrabu Thangamuthu /* Forward link the descriptor list */ 55169d99fdcSPrabu Thangamuthu for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; 55269d99fdcSPrabu Thangamuthu i++, p++) { 55369d99fdcSPrabu Thangamuthu p->des6 = (host->sg_dma + 55469d99fdcSPrabu Thangamuthu (sizeof(struct idmac_desc_64addr) * 55569d99fdcSPrabu Thangamuthu (i + 1))) & 0xffffffff; 55669d99fdcSPrabu Thangamuthu 55769d99fdcSPrabu Thangamuthu p->des7 = (u64)(host->sg_dma + 55869d99fdcSPrabu Thangamuthu (sizeof(struct idmac_desc_64addr) * 55969d99fdcSPrabu Thangamuthu (i + 1))) >> 32; 56069d99fdcSPrabu Thangamuthu /* Initialize reserved and buffer size fields to "0" */ 56169d99fdcSPrabu Thangamuthu p->des1 = 0; 56269d99fdcSPrabu Thangamuthu p->des2 = 0; 56369d99fdcSPrabu Thangamuthu p->des3 = 0; 56469d99fdcSPrabu Thangamuthu } 56569d99fdcSPrabu Thangamuthu 56669d99fdcSPrabu Thangamuthu /* Set the last descriptor as the end-of-ring descriptor */ 56769d99fdcSPrabu Thangamuthu p->des6 = host->sg_dma & 0xffffffff; 56869d99fdcSPrabu Thangamuthu p->des7 = (u64)host->sg_dma >> 32; 56969d99fdcSPrabu Thangamuthu p->des0 = IDMAC_DES0_ER; 57069d99fdcSPrabu Thangamuthu 57169d99fdcSPrabu Thangamuthu } else { 57269d99fdcSPrabu Thangamuthu struct idmac_desc *p; 573f95f3850SWill Newton /* Number of descriptors in the ring buffer */ 574cc190d4cSShawn Lin host->ring_size = 575cc190d4cSShawn Lin DESC_RING_BUF_SZ / sizeof(struct idmac_desc); 576f95f3850SWill Newton 577f95f3850SWill Newton /* Forward link the descriptor list */ 5780e3a22c0SShawn Lin for (i = 0, p = host->sg_cpu; 5790e3a22c0SShawn Lin i < host->ring_size - 1; 5800e3a22c0SShawn Lin i++, p++) { 5816687c42fSBen Dooks p->des3 = cpu_to_le32(host->sg_dma + 5826687c42fSBen Dooks (sizeof(struct idmac_desc) * (i + 1))); 5834b244724SZhangfei Gao p->des1 = 0; 5844b244724SZhangfei Gao } 585f95f3850SWill Newton 586f95f3850SWill Newton /* Set the last descriptor as the end-of-ring descriptor */ 5876687c42fSBen Dooks p->des3 = cpu_to_le32(host->sg_dma); 5886687c42fSBen Dooks p->des0 = cpu_to_le32(IDMAC_DES0_ER); 58969d99fdcSPrabu Thangamuthu } 590f95f3850SWill Newton 5915ce9d961SSeungwon Jeon dw_mci_idmac_reset(host); 592141a712aSSeungwon Jeon 59369d99fdcSPrabu Thangamuthu if (host->dma_64bit_address == 1) { 59469d99fdcSPrabu Thangamuthu /* Mask out interrupts - get Tx & Rx complete only */ 59569d99fdcSPrabu Thangamuthu mci_writel(host, IDSTS64, IDMAC_INT_CLR); 59669d99fdcSPrabu Thangamuthu mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI | 59769d99fdcSPrabu Thangamuthu SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 59869d99fdcSPrabu Thangamuthu 59969d99fdcSPrabu Thangamuthu /* Set the descriptor base address */ 60069d99fdcSPrabu Thangamuthu mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff); 60169d99fdcSPrabu Thangamuthu mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32); 60269d99fdcSPrabu Thangamuthu 60369d99fdcSPrabu Thangamuthu } else { 604f95f3850SWill Newton /* Mask out interrupts - get Tx & Rx complete only */ 605fc79a4d6SJoonyoung Shim mci_writel(host, IDSTS, IDMAC_INT_CLR); 60669d99fdcSPrabu Thangamuthu mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | 60769d99fdcSPrabu Thangamuthu SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 608f95f3850SWill Newton 609f95f3850SWill Newton /* Set the descriptor base address */ 610f95f3850SWill Newton mci_writel(host, DBADDR, host->sg_dma); 61169d99fdcSPrabu Thangamuthu } 61269d99fdcSPrabu Thangamuthu 613f95f3850SWill Newton return 0; 614f95f3850SWill Newton } 615f95f3850SWill Newton 6163b2a067bSShawn Lin static inline int dw_mci_prepare_desc64(struct dw_mci *host, 6173b2a067bSShawn Lin struct mmc_data *data, 6183b2a067bSShawn Lin unsigned int sg_len) 6193b2a067bSShawn Lin { 6203b2a067bSShawn Lin unsigned int desc_len; 6213b2a067bSShawn Lin struct idmac_desc_64addr *desc_first, *desc_last, *desc; 622b6d2d81cSShawn Lin u32 val; 6233b2a067bSShawn Lin int i; 6243b2a067bSShawn Lin 6253b2a067bSShawn Lin desc_first = desc_last = desc = host->sg_cpu; 6263b2a067bSShawn Lin 6273b2a067bSShawn Lin for (i = 0; i < sg_len; i++) { 6283b2a067bSShawn Lin unsigned int length = sg_dma_len(&data->sg[i]); 6293b2a067bSShawn Lin 6303b2a067bSShawn Lin u64 mem_addr = sg_dma_address(&data->sg[i]); 6313b2a067bSShawn Lin 6323b2a067bSShawn Lin for ( ; length ; desc++) { 6333b2a067bSShawn Lin desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 6343b2a067bSShawn Lin length : DW_MCI_DESC_DATA_LENGTH; 6353b2a067bSShawn Lin 6363b2a067bSShawn Lin length -= desc_len; 6373b2a067bSShawn Lin 6383b2a067bSShawn Lin /* 6393b2a067bSShawn Lin * Wait for the former clear OWN bit operation 6403b2a067bSShawn Lin * of IDMAC to make sure that this descriptor 6413b2a067bSShawn Lin * isn't still owned by IDMAC as IDMAC's write 6423b2a067bSShawn Lin * ops and CPU's read ops are asynchronous. 6433b2a067bSShawn Lin */ 644b6d2d81cSShawn Lin if (readl_poll_timeout_atomic(&desc->des0, val, 645b6d2d81cSShawn Lin !(val & IDMAC_DES0_OWN), 646b6d2d81cSShawn Lin 10, 100 * USEC_PER_MSEC)) 6473b2a067bSShawn Lin goto err_own_bit; 6483b2a067bSShawn Lin 6493b2a067bSShawn Lin /* 6503b2a067bSShawn Lin * Set the OWN bit and disable interrupts 6513b2a067bSShawn Lin * for this descriptor 6523b2a067bSShawn Lin */ 6533b2a067bSShawn Lin desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | 6543b2a067bSShawn Lin IDMAC_DES0_CH; 6553b2a067bSShawn Lin 6563b2a067bSShawn Lin /* Buffer length */ 6573b2a067bSShawn Lin IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len); 6583b2a067bSShawn Lin 6593b2a067bSShawn Lin /* Physical address to DMA to/from */ 6603b2a067bSShawn Lin desc->des4 = mem_addr & 0xffffffff; 6613b2a067bSShawn Lin desc->des5 = mem_addr >> 32; 6623b2a067bSShawn Lin 6633b2a067bSShawn Lin /* Update physical address for the next desc */ 6643b2a067bSShawn Lin mem_addr += desc_len; 6653b2a067bSShawn Lin 6663b2a067bSShawn Lin /* Save pointer to the last descriptor */ 6673b2a067bSShawn Lin desc_last = desc; 6683b2a067bSShawn Lin } 6693b2a067bSShawn Lin } 6703b2a067bSShawn Lin 6713b2a067bSShawn Lin /* Set first descriptor */ 6723b2a067bSShawn Lin desc_first->des0 |= IDMAC_DES0_FD; 6733b2a067bSShawn Lin 6743b2a067bSShawn Lin /* Set last descriptor */ 6753b2a067bSShawn Lin desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); 6763b2a067bSShawn Lin desc_last->des0 |= IDMAC_DES0_LD; 6773b2a067bSShawn Lin 6783b2a067bSShawn Lin return 0; 6793b2a067bSShawn Lin err_own_bit: 6803b2a067bSShawn Lin /* restore the descriptor chain as it's polluted */ 68126be9d70SColin Ian King dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n"); 682cc190d4cSShawn Lin memset(host->sg_cpu, 0, DESC_RING_BUF_SZ); 6833b2a067bSShawn Lin dw_mci_idmac_init(host); 6843b2a067bSShawn Lin return -EINVAL; 6853b2a067bSShawn Lin } 6863b2a067bSShawn Lin 6873b2a067bSShawn Lin 6883b2a067bSShawn Lin static inline int dw_mci_prepare_desc32(struct dw_mci *host, 6893b2a067bSShawn Lin struct mmc_data *data, 6903b2a067bSShawn Lin unsigned int sg_len) 6913b2a067bSShawn Lin { 6923b2a067bSShawn Lin unsigned int desc_len; 6933b2a067bSShawn Lin struct idmac_desc *desc_first, *desc_last, *desc; 694b6d2d81cSShawn Lin u32 val; 6953b2a067bSShawn Lin int i; 6963b2a067bSShawn Lin 6973b2a067bSShawn Lin desc_first = desc_last = desc = host->sg_cpu; 6983b2a067bSShawn Lin 6993b2a067bSShawn Lin for (i = 0; i < sg_len; i++) { 7003b2a067bSShawn Lin unsigned int length = sg_dma_len(&data->sg[i]); 7013b2a067bSShawn Lin 7023b2a067bSShawn Lin u32 mem_addr = sg_dma_address(&data->sg[i]); 7033b2a067bSShawn Lin 7043b2a067bSShawn Lin for ( ; length ; desc++) { 7053b2a067bSShawn Lin desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 7063b2a067bSShawn Lin length : DW_MCI_DESC_DATA_LENGTH; 7073b2a067bSShawn Lin 7083b2a067bSShawn Lin length -= desc_len; 7093b2a067bSShawn Lin 7103b2a067bSShawn Lin /* 7113b2a067bSShawn Lin * Wait for the former clear OWN bit operation 7123b2a067bSShawn Lin * of IDMAC to make sure that this descriptor 7133b2a067bSShawn Lin * isn't still owned by IDMAC as IDMAC's write 7143b2a067bSShawn Lin * ops and CPU's read ops are asynchronous. 7153b2a067bSShawn Lin */ 716b6d2d81cSShawn Lin if (readl_poll_timeout_atomic(&desc->des0, val, 717b6d2d81cSShawn Lin IDMAC_OWN_CLR64(val), 718b6d2d81cSShawn Lin 10, 719b6d2d81cSShawn Lin 100 * USEC_PER_MSEC)) 7203b2a067bSShawn Lin goto err_own_bit; 7213b2a067bSShawn Lin 7223b2a067bSShawn Lin /* 7233b2a067bSShawn Lin * Set the OWN bit and disable interrupts 7243b2a067bSShawn Lin * for this descriptor 7253b2a067bSShawn Lin */ 7263b2a067bSShawn Lin desc->des0 = cpu_to_le32(IDMAC_DES0_OWN | 7273b2a067bSShawn Lin IDMAC_DES0_DIC | 7283b2a067bSShawn Lin IDMAC_DES0_CH); 7293b2a067bSShawn Lin 7303b2a067bSShawn Lin /* Buffer length */ 7313b2a067bSShawn Lin IDMAC_SET_BUFFER1_SIZE(desc, desc_len); 7323b2a067bSShawn Lin 7333b2a067bSShawn Lin /* Physical address to DMA to/from */ 7343b2a067bSShawn Lin desc->des2 = cpu_to_le32(mem_addr); 7353b2a067bSShawn Lin 7363b2a067bSShawn Lin /* Update physical address for the next desc */ 7373b2a067bSShawn Lin mem_addr += desc_len; 7383b2a067bSShawn Lin 7393b2a067bSShawn Lin /* Save pointer to the last descriptor */ 7403b2a067bSShawn Lin desc_last = desc; 7413b2a067bSShawn Lin } 7423b2a067bSShawn Lin } 7433b2a067bSShawn Lin 7443b2a067bSShawn Lin /* Set first descriptor */ 7453b2a067bSShawn Lin desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD); 7463b2a067bSShawn Lin 7473b2a067bSShawn Lin /* Set last descriptor */ 7483b2a067bSShawn Lin desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | 7493b2a067bSShawn Lin IDMAC_DES0_DIC)); 7503b2a067bSShawn Lin desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD); 7513b2a067bSShawn Lin 7523b2a067bSShawn Lin return 0; 7533b2a067bSShawn Lin err_own_bit: 7543b2a067bSShawn Lin /* restore the descriptor chain as it's polluted */ 75526be9d70SColin Ian King dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n"); 756cc190d4cSShawn Lin memset(host->sg_cpu, 0, DESC_RING_BUF_SZ); 7573b2a067bSShawn Lin dw_mci_idmac_init(host); 7583b2a067bSShawn Lin return -EINVAL; 7593b2a067bSShawn Lin } 7603b2a067bSShawn Lin 7613b2a067bSShawn Lin static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) 7623b2a067bSShawn Lin { 7633b2a067bSShawn Lin u32 temp; 7643b2a067bSShawn Lin int ret; 7653b2a067bSShawn Lin 7663b2a067bSShawn Lin if (host->dma_64bit_address == 1) 7673b2a067bSShawn Lin ret = dw_mci_prepare_desc64(host, host->data, sg_len); 7683b2a067bSShawn Lin else 7693b2a067bSShawn Lin ret = dw_mci_prepare_desc32(host, host->data, sg_len); 7703b2a067bSShawn Lin 7713b2a067bSShawn Lin if (ret) 7723b2a067bSShawn Lin goto out; 7733b2a067bSShawn Lin 7743b2a067bSShawn Lin /* drain writebuffer */ 7753b2a067bSShawn Lin wmb(); 7763b2a067bSShawn Lin 7773b2a067bSShawn Lin /* Make sure to reset DMA in case we did PIO before this */ 7783b2a067bSShawn Lin dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET); 7793b2a067bSShawn Lin dw_mci_idmac_reset(host); 7803b2a067bSShawn Lin 7813b2a067bSShawn Lin /* Select IDMAC interface */ 7823b2a067bSShawn Lin temp = mci_readl(host, CTRL); 7833b2a067bSShawn Lin temp |= SDMMC_CTRL_USE_IDMAC; 7843b2a067bSShawn Lin mci_writel(host, CTRL, temp); 7853b2a067bSShawn Lin 7863b2a067bSShawn Lin /* drain writebuffer */ 7873b2a067bSShawn Lin wmb(); 7883b2a067bSShawn Lin 7893b2a067bSShawn Lin /* Enable the IDMAC */ 7903b2a067bSShawn Lin temp = mci_readl(host, BMOD); 7913b2a067bSShawn Lin temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; 7923b2a067bSShawn Lin mci_writel(host, BMOD, temp); 7933b2a067bSShawn Lin 7943b2a067bSShawn Lin /* Start it running */ 7953b2a067bSShawn Lin mci_writel(host, PLDMND, 1); 7963b2a067bSShawn Lin 7973b2a067bSShawn Lin out: 7983b2a067bSShawn Lin return ret; 7993b2a067bSShawn Lin } 8003b2a067bSShawn Lin 8018e2b36eaSArnd Bergmann static const struct dw_mci_dma_ops dw_mci_idmac_ops = { 802885c3e80SSeungwon Jeon .init = dw_mci_idmac_init, 803885c3e80SSeungwon Jeon .start = dw_mci_idmac_start_dma, 804885c3e80SSeungwon Jeon .stop = dw_mci_idmac_stop_dma, 8053fc7eaefSShawn Lin .complete = dw_mci_dmac_complete_dma, 806885c3e80SSeungwon Jeon .cleanup = dw_mci_dma_cleanup, 807885c3e80SSeungwon Jeon }; 8083fc7eaefSShawn Lin 8093fc7eaefSShawn Lin static void dw_mci_edmac_stop_dma(struct dw_mci *host) 8103fc7eaefSShawn Lin { 811ab925a31SShawn Lin dmaengine_terminate_async(host->dms->ch); 8123fc7eaefSShawn Lin } 8133fc7eaefSShawn Lin 8143fc7eaefSShawn Lin static int dw_mci_edmac_start_dma(struct dw_mci *host, 8153fc7eaefSShawn Lin unsigned int sg_len) 8163fc7eaefSShawn Lin { 8173fc7eaefSShawn Lin struct dma_slave_config cfg; 8183fc7eaefSShawn Lin struct dma_async_tx_descriptor *desc = NULL; 8193fc7eaefSShawn Lin struct scatterlist *sgl = host->data->sg; 82027d70d36SColin Ian King static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 8213fc7eaefSShawn Lin u32 sg_elems = host->data->sg_len; 8223fc7eaefSShawn Lin u32 fifoth_val; 8233fc7eaefSShawn Lin u32 fifo_offset = host->fifo_reg - host->regs; 8243fc7eaefSShawn Lin int ret = 0; 8253fc7eaefSShawn Lin 8263fc7eaefSShawn Lin /* Set external dma config: burst size, burst width */ 827260b3164SArnd Bergmann cfg.dst_addr = host->phy_regs + fifo_offset; 8283fc7eaefSShawn Lin cfg.src_addr = cfg.dst_addr; 8293fc7eaefSShawn Lin cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 8303fc7eaefSShawn Lin cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 8313fc7eaefSShawn Lin 8323fc7eaefSShawn Lin /* Match burst msize with external dma config */ 8333fc7eaefSShawn Lin fifoth_val = mci_readl(host, FIFOTH); 8343fc7eaefSShawn Lin cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7]; 8353fc7eaefSShawn Lin cfg.src_maxburst = cfg.dst_maxburst; 8363fc7eaefSShawn Lin 8373fc7eaefSShawn Lin if (host->data->flags & MMC_DATA_WRITE) 8383fc7eaefSShawn Lin cfg.direction = DMA_MEM_TO_DEV; 8393fc7eaefSShawn Lin else 8403fc7eaefSShawn Lin cfg.direction = DMA_DEV_TO_MEM; 8413fc7eaefSShawn Lin 8423fc7eaefSShawn Lin ret = dmaengine_slave_config(host->dms->ch, &cfg); 8433fc7eaefSShawn Lin if (ret) { 8443fc7eaefSShawn Lin dev_err(host->dev, "Failed to config edmac.\n"); 8453fc7eaefSShawn Lin return -EBUSY; 8463fc7eaefSShawn Lin } 8473fc7eaefSShawn Lin 8483fc7eaefSShawn Lin desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, 8493fc7eaefSShawn Lin sg_len, cfg.direction, 8503fc7eaefSShawn Lin DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 8513fc7eaefSShawn Lin if (!desc) { 8523fc7eaefSShawn Lin dev_err(host->dev, "Can't prepare slave sg.\n"); 8533fc7eaefSShawn Lin return -EBUSY; 8543fc7eaefSShawn Lin } 8553fc7eaefSShawn Lin 8563fc7eaefSShawn Lin /* Set dw_mci_dmac_complete_dma as callback */ 8573fc7eaefSShawn Lin desc->callback = dw_mci_dmac_complete_dma; 8583fc7eaefSShawn Lin desc->callback_param = (void *)host; 8593fc7eaefSShawn Lin dmaengine_submit(desc); 8603fc7eaefSShawn Lin 8613fc7eaefSShawn Lin /* Flush cache before write */ 8623fc7eaefSShawn Lin if (host->data->flags & MMC_DATA_WRITE) 86342f989c0SJaehoon Chung dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl, 8643fc7eaefSShawn Lin sg_elems, DMA_TO_DEVICE); 8653fc7eaefSShawn Lin 8663fc7eaefSShawn Lin dma_async_issue_pending(host->dms->ch); 8673fc7eaefSShawn Lin 8683fc7eaefSShawn Lin return 0; 8693fc7eaefSShawn Lin } 8703fc7eaefSShawn Lin 8713fc7eaefSShawn Lin static int dw_mci_edmac_init(struct dw_mci *host) 8723fc7eaefSShawn Lin { 8733fc7eaefSShawn Lin /* Request external dma channel */ 8743fc7eaefSShawn Lin host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL); 8753fc7eaefSShawn Lin if (!host->dms) 8763fc7eaefSShawn Lin return -ENOMEM; 8773fc7eaefSShawn Lin 8783fc7eaefSShawn Lin host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx"); 8793fc7eaefSShawn Lin if (!host->dms->ch) { 8804539d36eSDan Carpenter dev_err(host->dev, "Failed to get external DMA channel.\n"); 8813fc7eaefSShawn Lin kfree(host->dms); 8823fc7eaefSShawn Lin host->dms = NULL; 8833fc7eaefSShawn Lin return -ENXIO; 8843fc7eaefSShawn Lin } 8853fc7eaefSShawn Lin 8863fc7eaefSShawn Lin return 0; 8873fc7eaefSShawn Lin } 8883fc7eaefSShawn Lin 8893fc7eaefSShawn Lin static void dw_mci_edmac_exit(struct dw_mci *host) 8903fc7eaefSShawn Lin { 8913fc7eaefSShawn Lin if (host->dms) { 8923fc7eaefSShawn Lin if (host->dms->ch) { 8933fc7eaefSShawn Lin dma_release_channel(host->dms->ch); 8943fc7eaefSShawn Lin host->dms->ch = NULL; 8953fc7eaefSShawn Lin } 8963fc7eaefSShawn Lin kfree(host->dms); 8973fc7eaefSShawn Lin host->dms = NULL; 8983fc7eaefSShawn Lin } 8993fc7eaefSShawn Lin } 9003fc7eaefSShawn Lin 9013fc7eaefSShawn Lin static const struct dw_mci_dma_ops dw_mci_edmac_ops = { 9023fc7eaefSShawn Lin .init = dw_mci_edmac_init, 9033fc7eaefSShawn Lin .exit = dw_mci_edmac_exit, 9043fc7eaefSShawn Lin .start = dw_mci_edmac_start_dma, 9053fc7eaefSShawn Lin .stop = dw_mci_edmac_stop_dma, 9063fc7eaefSShawn Lin .complete = dw_mci_dmac_complete_dma, 9073fc7eaefSShawn Lin .cleanup = dw_mci_dma_cleanup, 9083fc7eaefSShawn Lin }; 909885c3e80SSeungwon Jeon 9109aa51408SSeungwon Jeon static int dw_mci_pre_dma_transfer(struct dw_mci *host, 9119aa51408SSeungwon Jeon struct mmc_data *data, 912a4cc7eb4SJaehoon Chung int cookie) 913f95f3850SWill Newton { 914f95f3850SWill Newton struct scatterlist *sg; 9159aa51408SSeungwon Jeon unsigned int i, sg_len; 916f95f3850SWill Newton 917a4cc7eb4SJaehoon Chung if (data->host_cookie == COOKIE_PRE_MAPPED) 918a4cc7eb4SJaehoon Chung return data->sg_len; 919f95f3850SWill Newton 920f95f3850SWill Newton /* 921f95f3850SWill Newton * We don't do DMA on "complex" transfers, i.e. with 922f95f3850SWill Newton * non-word-aligned buffers or lengths. Also, we don't bother 923f95f3850SWill Newton * with all the DMA setup overhead for short transfers. 924f95f3850SWill Newton */ 925f95f3850SWill Newton if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) 926f95f3850SWill Newton return -EINVAL; 9279aa51408SSeungwon Jeon 928f95f3850SWill Newton if (data->blksz & 3) 929f95f3850SWill Newton return -EINVAL; 930f95f3850SWill Newton 931f95f3850SWill Newton for_each_sg(data->sg, sg, data->sg_len, i) { 932f95f3850SWill Newton if (sg->offset & 3 || sg->length & 3) 933f95f3850SWill Newton return -EINVAL; 934f95f3850SWill Newton } 935f95f3850SWill Newton 9364a90920cSThomas Abraham sg_len = dma_map_sg(host->dev, 9379aa51408SSeungwon Jeon data->sg, 9389aa51408SSeungwon Jeon data->sg_len, 939feeef096SHeiner Kallweit mmc_get_dma_dir(data)); 9409aa51408SSeungwon Jeon if (sg_len == 0) 9419aa51408SSeungwon Jeon return -EINVAL; 9429aa51408SSeungwon Jeon 943a4cc7eb4SJaehoon Chung data->host_cookie = cookie; 9449aa51408SSeungwon Jeon 9459aa51408SSeungwon Jeon return sg_len; 9469aa51408SSeungwon Jeon } 9479aa51408SSeungwon Jeon 9489aa51408SSeungwon Jeon static void dw_mci_pre_req(struct mmc_host *mmc, 949d3c6aac3SLinus Walleij struct mmc_request *mrq) 9509aa51408SSeungwon Jeon { 9519aa51408SSeungwon Jeon struct dw_mci_slot *slot = mmc_priv(mmc); 9529aa51408SSeungwon Jeon struct mmc_data *data = mrq->data; 9539aa51408SSeungwon Jeon 9549aa51408SSeungwon Jeon if (!slot->host->use_dma || !data) 9559aa51408SSeungwon Jeon return; 9569aa51408SSeungwon Jeon 957a4cc7eb4SJaehoon Chung /* This data might be unmapped at this time */ 958a4cc7eb4SJaehoon Chung data->host_cookie = COOKIE_UNMAPPED; 9599aa51408SSeungwon Jeon 960a4cc7eb4SJaehoon Chung if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 961a4cc7eb4SJaehoon Chung COOKIE_PRE_MAPPED) < 0) 962a4cc7eb4SJaehoon Chung data->host_cookie = COOKIE_UNMAPPED; 9639aa51408SSeungwon Jeon } 9649aa51408SSeungwon Jeon 9659aa51408SSeungwon Jeon static void dw_mci_post_req(struct mmc_host *mmc, 9669aa51408SSeungwon Jeon struct mmc_request *mrq, 9679aa51408SSeungwon Jeon int err) 9689aa51408SSeungwon Jeon { 9699aa51408SSeungwon Jeon struct dw_mci_slot *slot = mmc_priv(mmc); 9709aa51408SSeungwon Jeon struct mmc_data *data = mrq->data; 9719aa51408SSeungwon Jeon 9729aa51408SSeungwon Jeon if (!slot->host->use_dma || !data) 9739aa51408SSeungwon Jeon return; 9749aa51408SSeungwon Jeon 975a4cc7eb4SJaehoon Chung if (data->host_cookie != COOKIE_UNMAPPED) 9764a90920cSThomas Abraham dma_unmap_sg(slot->host->dev, 9779aa51408SSeungwon Jeon data->sg, 9789aa51408SSeungwon Jeon data->sg_len, 979feeef096SHeiner Kallweit mmc_get_dma_dir(data)); 980a4cc7eb4SJaehoon Chung data->host_cookie = COOKIE_UNMAPPED; 9819aa51408SSeungwon Jeon } 9829aa51408SSeungwon Jeon 983671fa142SShawn Lin static int dw_mci_get_cd(struct mmc_host *mmc) 984671fa142SShawn Lin { 985671fa142SShawn Lin int present; 986671fa142SShawn Lin struct dw_mci_slot *slot = mmc_priv(mmc); 987671fa142SShawn Lin struct dw_mci *host = slot->host; 988671fa142SShawn Lin int gpio_cd = mmc_gpio_get_cd(mmc); 989671fa142SShawn Lin 990671fa142SShawn Lin /* Use platform get_cd function, else try onboard card detect */ 991671fa142SShawn Lin if (((mmc->caps & MMC_CAP_NEEDS_POLL) 992671fa142SShawn Lin || !mmc_card_is_removable(mmc))) { 993671fa142SShawn Lin present = 1; 994671fa142SShawn Lin 995671fa142SShawn Lin if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { 996671fa142SShawn Lin if (mmc->caps & MMC_CAP_NEEDS_POLL) { 997671fa142SShawn Lin dev_info(&mmc->class_dev, 998671fa142SShawn Lin "card is polling.\n"); 999671fa142SShawn Lin } else { 1000671fa142SShawn Lin dev_info(&mmc->class_dev, 1001671fa142SShawn Lin "card is non-removable.\n"); 1002671fa142SShawn Lin } 1003671fa142SShawn Lin set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 1004671fa142SShawn Lin } 1005671fa142SShawn Lin 1006671fa142SShawn Lin return present; 1007671fa142SShawn Lin } else if (gpio_cd >= 0) 1008671fa142SShawn Lin present = gpio_cd; 1009671fa142SShawn Lin else 1010671fa142SShawn Lin present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) 1011671fa142SShawn Lin == 0 ? 1 : 0; 1012671fa142SShawn Lin 1013671fa142SShawn Lin spin_lock_bh(&host->lock); 1014671fa142SShawn Lin if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags)) 1015671fa142SShawn Lin dev_dbg(&mmc->class_dev, "card is present\n"); 1016671fa142SShawn Lin else if (!present && 1017671fa142SShawn Lin !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags)) 1018671fa142SShawn Lin dev_dbg(&mmc->class_dev, "card is not present\n"); 1019671fa142SShawn Lin spin_unlock_bh(&host->lock); 1020671fa142SShawn Lin 1021671fa142SShawn Lin return present; 1022671fa142SShawn Lin } 1023671fa142SShawn Lin 102452426899SSeungwon Jeon static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) 102552426899SSeungwon Jeon { 102652426899SSeungwon Jeon unsigned int blksz = data->blksz; 102727d70d36SColin Ian King static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 102852426899SSeungwon Jeon u32 fifo_width = 1 << host->data_shift; 102952426899SSeungwon Jeon u32 blksz_depth = blksz / fifo_width, fifoth_val; 103052426899SSeungwon Jeon u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; 10310e3a22c0SShawn Lin int idx = ARRAY_SIZE(mszs) - 1; 103252426899SSeungwon Jeon 10333fc7eaefSShawn Lin /* pio should ship this scenario */ 10343fc7eaefSShawn Lin if (!host->use_dma) 10353fc7eaefSShawn Lin return; 10363fc7eaefSShawn Lin 103752426899SSeungwon Jeon tx_wmark = (host->fifo_depth) / 2; 103852426899SSeungwon Jeon tx_wmark_invers = host->fifo_depth - tx_wmark; 103952426899SSeungwon Jeon 104052426899SSeungwon Jeon /* 104152426899SSeungwon Jeon * MSIZE is '1', 104252426899SSeungwon Jeon * if blksz is not a multiple of the FIFO width 104352426899SSeungwon Jeon */ 104420753569SShawn Lin if (blksz % fifo_width) 104552426899SSeungwon Jeon goto done; 104652426899SSeungwon Jeon 104752426899SSeungwon Jeon do { 104852426899SSeungwon Jeon if (!((blksz_depth % mszs[idx]) || 104952426899SSeungwon Jeon (tx_wmark_invers % mszs[idx]))) { 105052426899SSeungwon Jeon msize = idx; 105152426899SSeungwon Jeon rx_wmark = mszs[idx] - 1; 105252426899SSeungwon Jeon break; 105352426899SSeungwon Jeon } 105452426899SSeungwon Jeon } while (--idx > 0); 105552426899SSeungwon Jeon /* 105652426899SSeungwon Jeon * If idx is '0', it won't be tried 105752426899SSeungwon Jeon * Thus, initial values are uesed 105852426899SSeungwon Jeon */ 105952426899SSeungwon Jeon done: 106052426899SSeungwon Jeon fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); 106152426899SSeungwon Jeon mci_writel(host, FIFOTH, fifoth_val); 106252426899SSeungwon Jeon } 106352426899SSeungwon Jeon 10647e4bf1bcSJaehoon Chung static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data) 1065f1d2736cSSeungwon Jeon { 1066f1d2736cSSeungwon Jeon unsigned int blksz = data->blksz; 1067f1d2736cSSeungwon Jeon u32 blksz_depth, fifo_depth; 1068f1d2736cSSeungwon Jeon u16 thld_size; 10697e4bf1bcSJaehoon Chung u8 enable; 1070f1d2736cSSeungwon Jeon 107166dfd101SJames Hogan /* 107266dfd101SJames Hogan * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is 107366dfd101SJames Hogan * in the FIFO region, so we really shouldn't access it). 107466dfd101SJames Hogan */ 10757e4bf1bcSJaehoon Chung if (host->verid < DW_MMC_240A || 10767e4bf1bcSJaehoon Chung (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE)) 107766dfd101SJames Hogan return; 107866dfd101SJames Hogan 10797e4bf1bcSJaehoon Chung /* 10807e4bf1bcSJaehoon Chung * Card write Threshold is introduced since 2.80a 10817e4bf1bcSJaehoon Chung * It's used when HS400 mode is enabled. 10827e4bf1bcSJaehoon Chung */ 10837e4bf1bcSJaehoon Chung if (data->flags & MMC_DATA_WRITE && 10847e4bf1bcSJaehoon Chung !(host->timing != MMC_TIMING_MMC_HS400)) 10857e4bf1bcSJaehoon Chung return; 10867e4bf1bcSJaehoon Chung 10877e4bf1bcSJaehoon Chung if (data->flags & MMC_DATA_WRITE) 10887e4bf1bcSJaehoon Chung enable = SDMMC_CARD_WR_THR_EN; 10897e4bf1bcSJaehoon Chung else 10907e4bf1bcSJaehoon Chung enable = SDMMC_CARD_RD_THR_EN; 10917e4bf1bcSJaehoon Chung 1092f1d2736cSSeungwon Jeon if (host->timing != MMC_TIMING_MMC_HS200 && 1093f1d2736cSSeungwon Jeon host->timing != MMC_TIMING_UHS_SDR104) 1094f1d2736cSSeungwon Jeon goto disable; 1095f1d2736cSSeungwon Jeon 1096f1d2736cSSeungwon Jeon blksz_depth = blksz / (1 << host->data_shift); 1097f1d2736cSSeungwon Jeon fifo_depth = host->fifo_depth; 1098f1d2736cSSeungwon Jeon 1099f1d2736cSSeungwon Jeon if (blksz_depth > fifo_depth) 1100f1d2736cSSeungwon Jeon goto disable; 1101f1d2736cSSeungwon Jeon 1102f1d2736cSSeungwon Jeon /* 1103f1d2736cSSeungwon Jeon * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz' 1104f1d2736cSSeungwon Jeon * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz 1105f1d2736cSSeungwon Jeon * Currently just choose blksz. 1106f1d2736cSSeungwon Jeon */ 1107f1d2736cSSeungwon Jeon thld_size = blksz; 11087e4bf1bcSJaehoon Chung mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable)); 1109f1d2736cSSeungwon Jeon return; 1110f1d2736cSSeungwon Jeon 1111f1d2736cSSeungwon Jeon disable: 11127e4bf1bcSJaehoon Chung mci_writel(host, CDTHRCTL, 0); 1113f1d2736cSSeungwon Jeon } 1114f1d2736cSSeungwon Jeon 11159aa51408SSeungwon Jeon static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) 11169aa51408SSeungwon Jeon { 1117f8c58c11SDoug Anderson unsigned long irqflags; 11189aa51408SSeungwon Jeon int sg_len; 11199aa51408SSeungwon Jeon u32 temp; 11209aa51408SSeungwon Jeon 11219aa51408SSeungwon Jeon host->using_dma = 0; 11229aa51408SSeungwon Jeon 11239aa51408SSeungwon Jeon /* If we don't have a channel, we can't do DMA */ 11249aa51408SSeungwon Jeon if (!host->use_dma) 11259aa51408SSeungwon Jeon return -ENODEV; 11269aa51408SSeungwon Jeon 1127a4cc7eb4SJaehoon Chung sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1128a99aa9b9SSeungwon Jeon if (sg_len < 0) { 1129a99aa9b9SSeungwon Jeon host->dma_ops->stop(host); 11309aa51408SSeungwon Jeon return sg_len; 1131a99aa9b9SSeungwon Jeon } 11329aa51408SSeungwon Jeon 113303e8cb53SJames Hogan host->using_dma = 1; 113403e8cb53SJames Hogan 11353fc7eaefSShawn Lin if (host->use_dma == TRANS_MODE_IDMAC) 11364a90920cSThomas Abraham dev_vdbg(host->dev, 1137f95f3850SWill Newton "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 11383fc7eaefSShawn Lin (unsigned long)host->sg_cpu, 11393fc7eaefSShawn Lin (unsigned long)host->sg_dma, 1140f95f3850SWill Newton sg_len); 1141f95f3850SWill Newton 114252426899SSeungwon Jeon /* 114352426899SSeungwon Jeon * Decide the MSIZE and RX/TX Watermark. 114452426899SSeungwon Jeon * If current block size is same with previous size, 114552426899SSeungwon Jeon * no need to update fifoth. 114652426899SSeungwon Jeon */ 114752426899SSeungwon Jeon if (host->prev_blksz != data->blksz) 114852426899SSeungwon Jeon dw_mci_adjust_fifoth(host, data); 114952426899SSeungwon Jeon 1150f95f3850SWill Newton /* Enable the DMA interface */ 1151f95f3850SWill Newton temp = mci_readl(host, CTRL); 1152f95f3850SWill Newton temp |= SDMMC_CTRL_DMA_ENABLE; 1153f95f3850SWill Newton mci_writel(host, CTRL, temp); 1154f95f3850SWill Newton 1155f95f3850SWill Newton /* Disable RX/TX IRQs, let DMA handle it */ 1156f8c58c11SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 1157f95f3850SWill Newton temp = mci_readl(host, INTMASK); 1158f95f3850SWill Newton temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); 1159f95f3850SWill Newton mci_writel(host, INTMASK, temp); 1160f8c58c11SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 1161f95f3850SWill Newton 11623fc7eaefSShawn Lin if (host->dma_ops->start(host, sg_len)) { 1163647f80a1SJaehoon Chung host->dma_ops->stop(host); 1164d12d0cb1SShawn Lin /* We can't do DMA, try PIO for this one */ 1165d12d0cb1SShawn Lin dev_dbg(host->dev, 1166d12d0cb1SShawn Lin "%s: fall back to PIO mode for current transfer\n", 1167d12d0cb1SShawn Lin __func__); 11683fc7eaefSShawn Lin return -ENODEV; 11693fc7eaefSShawn Lin } 1170f95f3850SWill Newton 1171f95f3850SWill Newton return 0; 1172f95f3850SWill Newton } 1173f95f3850SWill Newton 1174f95f3850SWill Newton static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) 1175f95f3850SWill Newton { 1176f8c58c11SDoug Anderson unsigned long irqflags; 11770e3a22c0SShawn Lin int flags = SG_MITER_ATOMIC; 1178f95f3850SWill Newton u32 temp; 1179f95f3850SWill Newton 1180f95f3850SWill Newton data->error = -EINPROGRESS; 1181f95f3850SWill Newton 1182f95f3850SWill Newton WARN_ON(host->data); 1183f95f3850SWill Newton host->sg = NULL; 1184f95f3850SWill Newton host->data = data; 1185f95f3850SWill Newton 11867e4bf1bcSJaehoon Chung if (data->flags & MMC_DATA_READ) 118755c5efbcSJames Hogan host->dir_status = DW_MCI_RECV_STATUS; 11887e4bf1bcSJaehoon Chung else 118955c5efbcSJames Hogan host->dir_status = DW_MCI_SEND_STATUS; 11907e4bf1bcSJaehoon Chung 11917e4bf1bcSJaehoon Chung dw_mci_ctrl_thld(host, data); 119255c5efbcSJames Hogan 1193f95f3850SWill Newton if (dw_mci_submit_data_dma(host, data)) { 1194f9c2a0dcSSeungwon Jeon if (host->data->flags & MMC_DATA_READ) 1195f9c2a0dcSSeungwon Jeon flags |= SG_MITER_TO_SG; 1196f9c2a0dcSSeungwon Jeon else 1197f9c2a0dcSSeungwon Jeon flags |= SG_MITER_FROM_SG; 1198f9c2a0dcSSeungwon Jeon 1199f9c2a0dcSSeungwon Jeon sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1200f95f3850SWill Newton host->sg = data->sg; 120134b664a2SJames Hogan host->part_buf_start = 0; 120234b664a2SJames Hogan host->part_buf_count = 0; 1203f95f3850SWill Newton 1204b40af3aaSJames Hogan mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); 1205f8c58c11SDoug Anderson 1206f8c58c11SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 1207f95f3850SWill Newton temp = mci_readl(host, INTMASK); 1208f95f3850SWill Newton temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; 1209f95f3850SWill Newton mci_writel(host, INTMASK, temp); 1210f8c58c11SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 1211f95f3850SWill Newton 1212f95f3850SWill Newton temp = mci_readl(host, CTRL); 1213f95f3850SWill Newton temp &= ~SDMMC_CTRL_DMA_ENABLE; 1214f95f3850SWill Newton mci_writel(host, CTRL, temp); 121552426899SSeungwon Jeon 121652426899SSeungwon Jeon /* 1217d6fced83SJun Nie * Use the initial fifoth_val for PIO mode. If wm_algined 1218d6fced83SJun Nie * is set, we set watermark same as data size. 121952426899SSeungwon Jeon * If next issued data may be transfered by DMA mode, 122052426899SSeungwon Jeon * prev_blksz should be invalidated. 122152426899SSeungwon Jeon */ 1222d6fced83SJun Nie if (host->wm_aligned) 1223d6fced83SJun Nie dw_mci_adjust_fifoth(host, data); 1224d6fced83SJun Nie else 122552426899SSeungwon Jeon mci_writel(host, FIFOTH, host->fifoth_val); 122652426899SSeungwon Jeon host->prev_blksz = 0; 122752426899SSeungwon Jeon } else { 122852426899SSeungwon Jeon /* 122952426899SSeungwon Jeon * Keep the current block size. 123052426899SSeungwon Jeon * It will be used to decide whether to update 123152426899SSeungwon Jeon * fifoth register next time. 123252426899SSeungwon Jeon */ 123352426899SSeungwon Jeon host->prev_blksz = data->blksz; 1234f95f3850SWill Newton } 1235f95f3850SWill Newton } 1236f95f3850SWill Newton 1237ab269128SAbhilash Kesavan static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) 1238f95f3850SWill Newton { 1239f95f3850SWill Newton struct dw_mci *host = slot->host; 1240fdf492a1SDoug Anderson unsigned int clock = slot->clock; 1241f95f3850SWill Newton u32 div; 12429623b5b9SDoug Anderson u32 clk_en_a; 124301730558SDoug Anderson u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT; 124401730558SDoug Anderson 124501730558SDoug Anderson /* We must continue to set bit 28 in CMD until the change is complete */ 124601730558SDoug Anderson if (host->state == STATE_WAITING_CMD11_DONE) 124701730558SDoug Anderson sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH; 1248f95f3850SWill Newton 1249fdf492a1SDoug Anderson if (!clock) { 1250fdf492a1SDoug Anderson mci_writel(host, CLKENA, 0); 125101730558SDoug Anderson mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1252fdf492a1SDoug Anderson } else if (clock != host->current_speed || force_clkinit) { 1253fdf492a1SDoug Anderson div = host->bus_hz / clock; 1254fdf492a1SDoug Anderson if (host->bus_hz % clock && host->bus_hz > clock) 1255f95f3850SWill Newton /* 1256f95f3850SWill Newton * move the + 1 after the divide to prevent 1257f95f3850SWill Newton * over-clocking the card. 1258f95f3850SWill Newton */ 1259e419990bSSeungwon Jeon div += 1; 1260e419990bSSeungwon Jeon 1261fdf492a1SDoug Anderson div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; 1262f95f3850SWill Newton 1263e6cd7a8eSJaehoon Chung if ((clock != slot->__clk_old && 1264e6cd7a8eSJaehoon Chung !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) || 1265e6cd7a8eSJaehoon Chung force_clkinit) { 1266ce69e2feSShawn Lin /* Silent the verbose log if calling from PM context */ 1267ce69e2feSShawn Lin if (!force_clkinit) 1268f95f3850SWill Newton dev_info(&slot->mmc->class_dev, 1269fdf492a1SDoug Anderson "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", 1270fdf492a1SDoug Anderson slot->id, host->bus_hz, clock, 1271fdf492a1SDoug Anderson div ? ((host->bus_hz / div) >> 1) : 1272fdf492a1SDoug Anderson host->bus_hz, div); 1273f95f3850SWill Newton 1274e6cd7a8eSJaehoon Chung /* 1275e6cd7a8eSJaehoon Chung * If card is polling, display the message only 1276e6cd7a8eSJaehoon Chung * one time at boot time. 1277e6cd7a8eSJaehoon Chung */ 1278e6cd7a8eSJaehoon Chung if (slot->mmc->caps & MMC_CAP_NEEDS_POLL && 1279e6cd7a8eSJaehoon Chung slot->mmc->f_min == clock) 1280e6cd7a8eSJaehoon Chung set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags); 1281e6cd7a8eSJaehoon Chung } 1282e6cd7a8eSJaehoon Chung 1283f95f3850SWill Newton /* disable clock */ 1284f95f3850SWill Newton mci_writel(host, CLKENA, 0); 1285f95f3850SWill Newton mci_writel(host, CLKSRC, 0); 1286f95f3850SWill Newton 1287f95f3850SWill Newton /* inform CIU */ 128801730558SDoug Anderson mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1289f95f3850SWill Newton 1290f95f3850SWill Newton /* set clock to desired speed */ 1291f95f3850SWill Newton mci_writel(host, CLKDIV, div); 1292f95f3850SWill Newton 1293f95f3850SWill Newton /* inform CIU */ 129401730558SDoug Anderson mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1295f95f3850SWill Newton 12969623b5b9SDoug Anderson /* enable clock; only low power if no SDIO */ 12979623b5b9SDoug Anderson clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; 1298b24c8b26SDoug Anderson if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) 12999623b5b9SDoug Anderson clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; 13009623b5b9SDoug Anderson mci_writel(host, CLKENA, clk_en_a); 1301f95f3850SWill Newton 1302f95f3850SWill Newton /* inform CIU */ 130301730558SDoug Anderson mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1304005d675aSJaehoon Chung 1305005d675aSJaehoon Chung /* keep the last clock value that was requested from core */ 1306005d675aSJaehoon Chung slot->__clk_old = clock; 1307f95f3850SWill Newton } 1308f95f3850SWill Newton 1309fdf492a1SDoug Anderson host->current_speed = clock; 1310fdf492a1SDoug Anderson 1311f95f3850SWill Newton /* Set the current slot bus width */ 13121d56c453SSeungwon Jeon mci_writel(host, CTYPE, (slot->ctype << slot->id)); 1313f95f3850SWill Newton } 1314f95f3850SWill Newton 1315053b3ce6SSeungwon Jeon static void __dw_mci_start_request(struct dw_mci *host, 1316053b3ce6SSeungwon Jeon struct dw_mci_slot *slot, 1317053b3ce6SSeungwon Jeon struct mmc_command *cmd) 1318f95f3850SWill Newton { 1319f95f3850SWill Newton struct mmc_request *mrq; 1320f95f3850SWill Newton struct mmc_data *data; 1321f95f3850SWill Newton u32 cmdflags; 1322f95f3850SWill Newton 1323f95f3850SWill Newton mrq = slot->mrq; 1324f95f3850SWill Newton 1325f95f3850SWill Newton host->mrq = mrq; 1326f95f3850SWill Newton 1327f95f3850SWill Newton host->pending_events = 0; 1328f95f3850SWill Newton host->completed_events = 0; 1329e352c813SSeungwon Jeon host->cmd_status = 0; 1330f95f3850SWill Newton host->data_status = 0; 1331e352c813SSeungwon Jeon host->dir_status = 0; 1332f95f3850SWill Newton 1333053b3ce6SSeungwon Jeon data = cmd->data; 1334f95f3850SWill Newton if (data) { 1335f16afa88SJaehoon Chung mci_writel(host, TMOUT, 0xFFFFFFFF); 1336f95f3850SWill Newton mci_writel(host, BYTCNT, data->blksz*data->blocks); 1337f95f3850SWill Newton mci_writel(host, BLKSIZ, data->blksz); 1338f95f3850SWill Newton } 1339f95f3850SWill Newton 1340f95f3850SWill Newton cmdflags = dw_mci_prepare_command(slot->mmc, cmd); 1341f95f3850SWill Newton 1342f95f3850SWill Newton /* this is the first command, send the initialization clock */ 1343f95f3850SWill Newton if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags)) 1344f95f3850SWill Newton cmdflags |= SDMMC_CMD_INIT; 1345f95f3850SWill Newton 1346f95f3850SWill Newton if (data) { 1347f95f3850SWill Newton dw_mci_submit_data(host, data); 13480e3a22c0SShawn Lin wmb(); /* drain writebuffer */ 1349f95f3850SWill Newton } 1350f95f3850SWill Newton 1351f95f3850SWill Newton dw_mci_start_command(host, cmd, cmdflags); 1352f95f3850SWill Newton 13535c935165SDoug Anderson if (cmd->opcode == SD_SWITCH_VOLTAGE) { 135449ba0302SDoug Anderson unsigned long irqflags; 135549ba0302SDoug Anderson 13565c935165SDoug Anderson /* 13578886a6fdSDoug Anderson * Databook says to fail after 2ms w/ no response, but evidence 13588886a6fdSDoug Anderson * shows that sometimes the cmd11 interrupt takes over 130ms. 13598886a6fdSDoug Anderson * We'll set to 500ms, plus an extra jiffy just in case jiffies 13608886a6fdSDoug Anderson * is just about to roll over. 136149ba0302SDoug Anderson * 136249ba0302SDoug Anderson * We do this whole thing under spinlock and only if the 136349ba0302SDoug Anderson * command hasn't already completed (indicating the the irq 136449ba0302SDoug Anderson * already ran so we don't want the timeout). 13655c935165SDoug Anderson */ 136649ba0302SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 136749ba0302SDoug Anderson if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 13685c935165SDoug Anderson mod_timer(&host->cmd11_timer, 13698886a6fdSDoug Anderson jiffies + msecs_to_jiffies(500) + 1); 137049ba0302SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 13715c935165SDoug Anderson } 13725c935165SDoug Anderson 137390c2143aSSeungwon Jeon host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); 1374f95f3850SWill Newton } 1375f95f3850SWill Newton 1376053b3ce6SSeungwon Jeon static void dw_mci_start_request(struct dw_mci *host, 1377053b3ce6SSeungwon Jeon struct dw_mci_slot *slot) 1378053b3ce6SSeungwon Jeon { 1379053b3ce6SSeungwon Jeon struct mmc_request *mrq = slot->mrq; 1380053b3ce6SSeungwon Jeon struct mmc_command *cmd; 1381053b3ce6SSeungwon Jeon 1382053b3ce6SSeungwon Jeon cmd = mrq->sbc ? mrq->sbc : mrq->cmd; 1383053b3ce6SSeungwon Jeon __dw_mci_start_request(host, slot, cmd); 1384053b3ce6SSeungwon Jeon } 1385053b3ce6SSeungwon Jeon 13867456caaeSJames Hogan /* must be called with host->lock held */ 1387f95f3850SWill Newton static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, 1388f95f3850SWill Newton struct mmc_request *mrq) 1389f95f3850SWill Newton { 1390f95f3850SWill Newton dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", 1391f95f3850SWill Newton host->state); 1392f95f3850SWill Newton 1393f95f3850SWill Newton slot->mrq = mrq; 1394f95f3850SWill Newton 139501730558SDoug Anderson if (host->state == STATE_WAITING_CMD11_DONE) { 139601730558SDoug Anderson dev_warn(&slot->mmc->class_dev, 139701730558SDoug Anderson "Voltage change didn't complete\n"); 139801730558SDoug Anderson /* 139901730558SDoug Anderson * this case isn't expected to happen, so we can 140001730558SDoug Anderson * either crash here or just try to continue on 140101730558SDoug Anderson * in the closest possible state 140201730558SDoug Anderson */ 140301730558SDoug Anderson host->state = STATE_IDLE; 140401730558SDoug Anderson } 140501730558SDoug Anderson 1406f95f3850SWill Newton if (host->state == STATE_IDLE) { 1407f95f3850SWill Newton host->state = STATE_SENDING_CMD; 1408f95f3850SWill Newton dw_mci_start_request(host, slot); 1409f95f3850SWill Newton } else { 1410f95f3850SWill Newton list_add_tail(&slot->queue_node, &host->queue); 1411f95f3850SWill Newton } 1412f95f3850SWill Newton } 1413f95f3850SWill Newton 1414f95f3850SWill Newton static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1415f95f3850SWill Newton { 1416f95f3850SWill Newton struct dw_mci_slot *slot = mmc_priv(mmc); 1417f95f3850SWill Newton struct dw_mci *host = slot->host; 1418f95f3850SWill Newton 1419f95f3850SWill Newton WARN_ON(slot->mrq); 1420f95f3850SWill Newton 14217456caaeSJames Hogan /* 14227456caaeSJames Hogan * The check for card presence and queueing of the request must be 14237456caaeSJames Hogan * atomic, otherwise the card could be removed in between and the 14247456caaeSJames Hogan * request wouldn't fail until another card was inserted. 14257456caaeSJames Hogan */ 14267456caaeSJames Hogan 142756f6911cSShawn Lin if (!dw_mci_get_cd(mmc)) { 1428f95f3850SWill Newton mrq->cmd->error = -ENOMEDIUM; 1429f95f3850SWill Newton mmc_request_done(mmc, mrq); 1430f95f3850SWill Newton return; 1431f95f3850SWill Newton } 1432f95f3850SWill Newton 143356f6911cSShawn Lin spin_lock_bh(&host->lock); 143456f6911cSShawn Lin 1435f95f3850SWill Newton dw_mci_queue_request(host, slot, mrq); 14367456caaeSJames Hogan 14377456caaeSJames Hogan spin_unlock_bh(&host->lock); 1438f95f3850SWill Newton } 1439f95f3850SWill Newton 1440f95f3850SWill Newton static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1441f95f3850SWill Newton { 1442f95f3850SWill Newton struct dw_mci_slot *slot = mmc_priv(mmc); 1443e95baf13SArnd Bergmann const struct dw_mci_drv_data *drv_data = slot->host->drv_data; 144441babf75SJaehoon Chung u32 regs; 144551da2240SYuvaraj CD int ret; 1446f95f3850SWill Newton 1447f95f3850SWill Newton switch (ios->bus_width) { 1448f95f3850SWill Newton case MMC_BUS_WIDTH_4: 1449f95f3850SWill Newton slot->ctype = SDMMC_CTYPE_4BIT; 1450f95f3850SWill Newton break; 1451c9b2a06fSJaehoon Chung case MMC_BUS_WIDTH_8: 1452c9b2a06fSJaehoon Chung slot->ctype = SDMMC_CTYPE_8BIT; 1453c9b2a06fSJaehoon Chung break; 1454b2f7cb45SJaehoon Chung default: 1455b2f7cb45SJaehoon Chung /* set default 1 bit mode */ 1456b2f7cb45SJaehoon Chung slot->ctype = SDMMC_CTYPE_1BIT; 1457f95f3850SWill Newton } 1458f95f3850SWill Newton 145941babf75SJaehoon Chung regs = mci_readl(slot->host, UHS_REG); 14603f514291SSeungwon Jeon 14613f514291SSeungwon Jeon /* DDR mode set */ 146280113132SSeungwon Jeon if (ios->timing == MMC_TIMING_MMC_DDR52 || 14637cc8d580SJaehoon Chung ios->timing == MMC_TIMING_UHS_DDR50 || 146480113132SSeungwon Jeon ios->timing == MMC_TIMING_MMC_HS400) 1465c69042a5SHyeonsu Kim regs |= ((0x1 << slot->id) << 16); 14663f514291SSeungwon Jeon else 1467c69042a5SHyeonsu Kim regs &= ~((0x1 << slot->id) << 16); 14683f514291SSeungwon Jeon 146941babf75SJaehoon Chung mci_writel(slot->host, UHS_REG, regs); 1470f1d2736cSSeungwon Jeon slot->host->timing = ios->timing; 147141babf75SJaehoon Chung 1472f95f3850SWill Newton /* 1473f95f3850SWill Newton * Use mirror of ios->clock to prevent race with mmc 1474f95f3850SWill Newton * core ios update when finding the minimum. 1475f95f3850SWill Newton */ 1476f95f3850SWill Newton slot->clock = ios->clock; 1477f95f3850SWill Newton 1478cb27a843SJames Hogan if (drv_data && drv_data->set_ios) 1479cb27a843SJames Hogan drv_data->set_ios(slot->host, ios); 1480800d78bfSThomas Abraham 1481f95f3850SWill Newton switch (ios->power_mode) { 1482f95f3850SWill Newton case MMC_POWER_UP: 148351da2240SYuvaraj CD if (!IS_ERR(mmc->supply.vmmc)) { 148451da2240SYuvaraj CD ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 148551da2240SYuvaraj CD ios->vdd); 148651da2240SYuvaraj CD if (ret) { 148751da2240SYuvaraj CD dev_err(slot->host->dev, 148851da2240SYuvaraj CD "failed to enable vmmc regulator\n"); 148951da2240SYuvaraj CD /*return, if failed turn on vmmc*/ 149051da2240SYuvaraj CD return; 149151da2240SYuvaraj CD } 149251da2240SYuvaraj CD } 149329d0d161SDoug Anderson set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); 149429d0d161SDoug Anderson regs = mci_readl(slot->host, PWREN); 149529d0d161SDoug Anderson regs |= (1 << slot->id); 149629d0d161SDoug Anderson mci_writel(slot->host, PWREN, regs); 149729d0d161SDoug Anderson break; 149829d0d161SDoug Anderson case MMC_POWER_ON: 1499d1f1dd86SDoug Anderson if (!slot->host->vqmmc_enabled) { 1500d1f1dd86SDoug Anderson if (!IS_ERR(mmc->supply.vqmmc)) { 150151da2240SYuvaraj CD ret = regulator_enable(mmc->supply.vqmmc); 150251da2240SYuvaraj CD if (ret < 0) 150351da2240SYuvaraj CD dev_err(slot->host->dev, 1504d1f1dd86SDoug Anderson "failed to enable vqmmc\n"); 150551da2240SYuvaraj CD else 150651da2240SYuvaraj CD slot->host->vqmmc_enabled = true; 1507d1f1dd86SDoug Anderson 1508d1f1dd86SDoug Anderson } else { 1509d1f1dd86SDoug Anderson /* Keep track so we don't reset again */ 1510d1f1dd86SDoug Anderson slot->host->vqmmc_enabled = true; 1511d1f1dd86SDoug Anderson } 1512d1f1dd86SDoug Anderson 1513d1f1dd86SDoug Anderson /* Reset our state machine after powering on */ 1514d1f1dd86SDoug Anderson dw_mci_ctrl_reset(slot->host, 1515d1f1dd86SDoug Anderson SDMMC_CTRL_ALL_RESET_FLAGS); 151651da2240SYuvaraj CD } 1517655babbdSDoug Anderson 1518655babbdSDoug Anderson /* Adjust clock / bus width after power is up */ 1519655babbdSDoug Anderson dw_mci_setup_bus(slot, false); 1520655babbdSDoug Anderson 1521e6f34e2fSJames Hogan break; 1522e6f34e2fSJames Hogan case MMC_POWER_OFF: 1523655babbdSDoug Anderson /* Turn clock off before power goes down */ 1524655babbdSDoug Anderson dw_mci_setup_bus(slot, false); 1525655babbdSDoug Anderson 152651da2240SYuvaraj CD if (!IS_ERR(mmc->supply.vmmc)) 152751da2240SYuvaraj CD mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 152851da2240SYuvaraj CD 1529d1f1dd86SDoug Anderson if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) 153051da2240SYuvaraj CD regulator_disable(mmc->supply.vqmmc); 153151da2240SYuvaraj CD slot->host->vqmmc_enabled = false; 153251da2240SYuvaraj CD 15334366dcc5SJaehoon Chung regs = mci_readl(slot->host, PWREN); 15344366dcc5SJaehoon Chung regs &= ~(1 << slot->id); 15354366dcc5SJaehoon Chung mci_writel(slot->host, PWREN, regs); 1536f95f3850SWill Newton break; 1537f95f3850SWill Newton default: 1538f95f3850SWill Newton break; 1539f95f3850SWill Newton } 1540655babbdSDoug Anderson 1541655babbdSDoug Anderson if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0) 1542655babbdSDoug Anderson slot->host->state = STATE_IDLE; 1543f95f3850SWill Newton } 1544f95f3850SWill Newton 154501730558SDoug Anderson static int dw_mci_card_busy(struct mmc_host *mmc) 154601730558SDoug Anderson { 154701730558SDoug Anderson struct dw_mci_slot *slot = mmc_priv(mmc); 154801730558SDoug Anderson u32 status; 154901730558SDoug Anderson 155001730558SDoug Anderson /* 155101730558SDoug Anderson * Check the busy bit which is low when DAT[3:0] 155201730558SDoug Anderson * (the data lines) are 0000 155301730558SDoug Anderson */ 155401730558SDoug Anderson status = mci_readl(slot->host, STATUS); 155501730558SDoug Anderson 155601730558SDoug Anderson return !!(status & SDMMC_STATUS_BUSY); 155701730558SDoug Anderson } 155801730558SDoug Anderson 155901730558SDoug Anderson static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) 156001730558SDoug Anderson { 156101730558SDoug Anderson struct dw_mci_slot *slot = mmc_priv(mmc); 156201730558SDoug Anderson struct dw_mci *host = slot->host; 15638f7849c4SZhangfei Gao const struct dw_mci_drv_data *drv_data = host->drv_data; 156401730558SDoug Anderson u32 uhs; 156501730558SDoug Anderson u32 v18 = SDMMC_UHS_18V << slot->id; 156601730558SDoug Anderson int ret; 156701730558SDoug Anderson 15688f7849c4SZhangfei Gao if (drv_data && drv_data->switch_voltage) 15698f7849c4SZhangfei Gao return drv_data->switch_voltage(mmc, ios); 15708f7849c4SZhangfei Gao 157101730558SDoug Anderson /* 157201730558SDoug Anderson * Program the voltage. Note that some instances of dw_mmc may use 157301730558SDoug Anderson * the UHS_REG for this. For other instances (like exynos) the UHS_REG 157401730558SDoug Anderson * does no harm but you need to set the regulator directly. Try both. 157501730558SDoug Anderson */ 157601730558SDoug Anderson uhs = mci_readl(host, UHS_REG); 1577e0848f5dSDouglas Anderson if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) 157801730558SDoug Anderson uhs &= ~v18; 1579e0848f5dSDouglas Anderson else 158001730558SDoug Anderson uhs |= v18; 1581e0848f5dSDouglas Anderson 158201730558SDoug Anderson if (!IS_ERR(mmc->supply.vqmmc)) { 1583e0848f5dSDouglas Anderson ret = mmc_regulator_set_vqmmc(mmc, ios); 158401730558SDoug Anderson 158501730558SDoug Anderson if (ret) { 1586b19caf37SDoug Anderson dev_dbg(&mmc->class_dev, 1587e0848f5dSDouglas Anderson "Regulator set error %d - %s V\n", 1588e0848f5dSDouglas Anderson ret, uhs & v18 ? "1.8" : "3.3"); 158901730558SDoug Anderson return ret; 159001730558SDoug Anderson } 159101730558SDoug Anderson } 159201730558SDoug Anderson mci_writel(host, UHS_REG, uhs); 159301730558SDoug Anderson 159401730558SDoug Anderson return 0; 159501730558SDoug Anderson } 159601730558SDoug Anderson 1597f95f3850SWill Newton static int dw_mci_get_ro(struct mmc_host *mmc) 1598f95f3850SWill Newton { 1599f95f3850SWill Newton int read_only; 1600f95f3850SWill Newton struct dw_mci_slot *slot = mmc_priv(mmc); 16019795a846SJaehoon Chung int gpio_ro = mmc_gpio_get_ro(mmc); 1602f95f3850SWill Newton 1603f95f3850SWill Newton /* Use platform get_ro function, else try on board write protect */ 1604287980e4SArnd Bergmann if (gpio_ro >= 0) 16059795a846SJaehoon Chung read_only = gpio_ro; 1606f95f3850SWill Newton else 1607f95f3850SWill Newton read_only = 1608f95f3850SWill Newton mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; 1609f95f3850SWill Newton 1610f95f3850SWill Newton dev_dbg(&mmc->class_dev, "card is %s\n", 1611f95f3850SWill Newton read_only ? "read-only" : "read-write"); 1612f95f3850SWill Newton 1613f95f3850SWill Newton return read_only; 1614f95f3850SWill Newton } 1615f95f3850SWill Newton 1616935a665eSShawn Lin static void dw_mci_hw_reset(struct mmc_host *mmc) 1617935a665eSShawn Lin { 1618935a665eSShawn Lin struct dw_mci_slot *slot = mmc_priv(mmc); 1619935a665eSShawn Lin struct dw_mci *host = slot->host; 1620935a665eSShawn Lin int reset; 1621935a665eSShawn Lin 1622935a665eSShawn Lin if (host->use_dma == TRANS_MODE_IDMAC) 1623935a665eSShawn Lin dw_mci_idmac_reset(host); 1624935a665eSShawn Lin 1625935a665eSShawn Lin if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET | 1626935a665eSShawn Lin SDMMC_CTRL_FIFO_RESET)) 1627935a665eSShawn Lin return; 1628935a665eSShawn Lin 1629935a665eSShawn Lin /* 1630935a665eSShawn Lin * According to eMMC spec, card reset procedure: 1631935a665eSShawn Lin * tRstW >= 1us: RST_n pulse width 1632935a665eSShawn Lin * tRSCA >= 200us: RST_n to Command time 1633935a665eSShawn Lin * tRSTH >= 1us: RST_n high period 1634935a665eSShawn Lin */ 1635935a665eSShawn Lin reset = mci_readl(host, RST_N); 1636935a665eSShawn Lin reset &= ~(SDMMC_RST_HWACTIVE << slot->id); 1637935a665eSShawn Lin mci_writel(host, RST_N, reset); 1638935a665eSShawn Lin usleep_range(1, 2); 1639935a665eSShawn Lin reset |= SDMMC_RST_HWACTIVE << slot->id; 1640935a665eSShawn Lin mci_writel(host, RST_N, reset); 1641935a665eSShawn Lin usleep_range(200, 300); 1642935a665eSShawn Lin } 1643935a665eSShawn Lin 1644b24c8b26SDoug Anderson static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card) 1645b24c8b26SDoug Anderson { 1646b24c8b26SDoug Anderson struct dw_mci_slot *slot = mmc_priv(mmc); 1647b24c8b26SDoug Anderson struct dw_mci *host = slot->host; 1648b24c8b26SDoug Anderson 16499623b5b9SDoug Anderson /* 16509623b5b9SDoug Anderson * Low power mode will stop the card clock when idle. According to the 16519623b5b9SDoug Anderson * description of the CLKENA register we should disable low power mode 16529623b5b9SDoug Anderson * for SDIO cards if we need SDIO interrupts to work. 16539623b5b9SDoug Anderson */ 1654b24c8b26SDoug Anderson if (mmc->caps & MMC_CAP_SDIO_IRQ) { 16559623b5b9SDoug Anderson const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; 1656b24c8b26SDoug Anderson u32 clk_en_a_old; 1657b24c8b26SDoug Anderson u32 clk_en_a; 16589623b5b9SDoug Anderson 1659b24c8b26SDoug Anderson clk_en_a_old = mci_readl(host, CLKENA); 16609623b5b9SDoug Anderson 1661b24c8b26SDoug Anderson if (card->type == MMC_TYPE_SDIO || 1662b24c8b26SDoug Anderson card->type == MMC_TYPE_SD_COMBO) { 1663b24c8b26SDoug Anderson set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1664b24c8b26SDoug Anderson clk_en_a = clk_en_a_old & ~clken_low_pwr; 1665b24c8b26SDoug Anderson } else { 1666b24c8b26SDoug Anderson clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1667b24c8b26SDoug Anderson clk_en_a = clk_en_a_old | clken_low_pwr; 1668b24c8b26SDoug Anderson } 1669b24c8b26SDoug Anderson 1670b24c8b26SDoug Anderson if (clk_en_a != clk_en_a_old) { 1671b24c8b26SDoug Anderson mci_writel(host, CLKENA, clk_en_a); 16729623b5b9SDoug Anderson mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 16739623b5b9SDoug Anderson SDMMC_CMD_PRV_DAT_WAIT, 0); 16749623b5b9SDoug Anderson } 16759623b5b9SDoug Anderson } 1676b24c8b26SDoug Anderson } 16779623b5b9SDoug Anderson 167832dba737SUlf Hansson static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb) 16791a5c8e1fSShashidhar Hiremath { 16801a5c8e1fSShashidhar Hiremath struct dw_mci *host = slot->host; 1681f8c58c11SDoug Anderson unsigned long irqflags; 16821a5c8e1fSShashidhar Hiremath u32 int_mask; 16831a5c8e1fSShashidhar Hiremath 1684f8c58c11SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 1685f8c58c11SDoug Anderson 16861a5c8e1fSShashidhar Hiremath /* Enable/disable Slot Specific SDIO interrupt */ 16871a5c8e1fSShashidhar Hiremath int_mask = mci_readl(host, INTMASK); 1688b24c8b26SDoug Anderson if (enb) 1689b24c8b26SDoug Anderson int_mask |= SDMMC_INT_SDIO(slot->sdio_id); 1690b24c8b26SDoug Anderson else 1691b24c8b26SDoug Anderson int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id); 1692b24c8b26SDoug Anderson mci_writel(host, INTMASK, int_mask); 1693f8c58c11SDoug Anderson 1694f8c58c11SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 16951a5c8e1fSShashidhar Hiremath } 16961a5c8e1fSShashidhar Hiremath 169732dba737SUlf Hansson static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) 169832dba737SUlf Hansson { 169932dba737SUlf Hansson struct dw_mci_slot *slot = mmc_priv(mmc); 1700ca8971caSUlf Hansson struct dw_mci *host = slot->host; 170132dba737SUlf Hansson 170232dba737SUlf Hansson __dw_mci_enable_sdio_irq(slot, enb); 1703ca8971caSUlf Hansson 1704ca8971caSUlf Hansson /* Avoid runtime suspending the device when SDIO IRQ is enabled */ 1705ca8971caSUlf Hansson if (enb) 1706ca8971caSUlf Hansson pm_runtime_get_noresume(host->dev); 1707ca8971caSUlf Hansson else 1708ca8971caSUlf Hansson pm_runtime_put_noidle(host->dev); 170932dba737SUlf Hansson } 171032dba737SUlf Hansson 171132dba737SUlf Hansson static void dw_mci_ack_sdio_irq(struct mmc_host *mmc) 171232dba737SUlf Hansson { 171332dba737SUlf Hansson struct dw_mci_slot *slot = mmc_priv(mmc); 171432dba737SUlf Hansson 171532dba737SUlf Hansson __dw_mci_enable_sdio_irq(slot, 1); 171632dba737SUlf Hansson } 171732dba737SUlf Hansson 17180976f16dSSeungwon Jeon static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode) 17190976f16dSSeungwon Jeon { 17200976f16dSSeungwon Jeon struct dw_mci_slot *slot = mmc_priv(mmc); 17210976f16dSSeungwon Jeon struct dw_mci *host = slot->host; 17220976f16dSSeungwon Jeon const struct dw_mci_drv_data *drv_data = host->drv_data; 17230e3a22c0SShawn Lin int err = -EINVAL; 17240976f16dSSeungwon Jeon 17250976f16dSSeungwon Jeon if (drv_data && drv_data->execute_tuning) 17269979dbe5SChaotian Jing err = drv_data->execute_tuning(slot, opcode); 17270976f16dSSeungwon Jeon return err; 17280976f16dSSeungwon Jeon } 17290976f16dSSeungwon Jeon 17300e3a22c0SShawn Lin static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, 17310e3a22c0SShawn Lin struct mmc_ios *ios) 173280113132SSeungwon Jeon { 173380113132SSeungwon Jeon struct dw_mci_slot *slot = mmc_priv(mmc); 173480113132SSeungwon Jeon struct dw_mci *host = slot->host; 173580113132SSeungwon Jeon const struct dw_mci_drv_data *drv_data = host->drv_data; 173680113132SSeungwon Jeon 173780113132SSeungwon Jeon if (drv_data && drv_data->prepare_hs400_tuning) 173880113132SSeungwon Jeon return drv_data->prepare_hs400_tuning(host, ios); 173980113132SSeungwon Jeon 174080113132SSeungwon Jeon return 0; 174180113132SSeungwon Jeon } 174280113132SSeungwon Jeon 17434e7392b2SShawn Lin static bool dw_mci_reset(struct dw_mci *host) 17444e7392b2SShawn Lin { 17454e7392b2SShawn Lin u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET; 17464e7392b2SShawn Lin bool ret = false; 1747bc2dcc1aSShawn Lin u32 status = 0; 17484e7392b2SShawn Lin 17494e7392b2SShawn Lin /* 17504e7392b2SShawn Lin * Resetting generates a block interrupt, hence setting 17514e7392b2SShawn Lin * the scatter-gather pointer to NULL. 17524e7392b2SShawn Lin */ 17534e7392b2SShawn Lin if (host->sg) { 17544e7392b2SShawn Lin sg_miter_stop(&host->sg_miter); 17554e7392b2SShawn Lin host->sg = NULL; 17564e7392b2SShawn Lin } 17574e7392b2SShawn Lin 17584e7392b2SShawn Lin if (host->use_dma) 17594e7392b2SShawn Lin flags |= SDMMC_CTRL_DMA_RESET; 17604e7392b2SShawn Lin 17614e7392b2SShawn Lin if (dw_mci_ctrl_reset(host, flags)) { 17624e7392b2SShawn Lin /* 1763bc2dcc1aSShawn Lin * In all cases we clear the RAWINTS 1764bc2dcc1aSShawn Lin * register to clear any interrupts. 17654e7392b2SShawn Lin */ 17664e7392b2SShawn Lin mci_writel(host, RINTSTS, 0xFFFFFFFF); 17674e7392b2SShawn Lin 1768bc2dcc1aSShawn Lin if (!host->use_dma) { 1769bc2dcc1aSShawn Lin ret = true; 1770bc2dcc1aSShawn Lin goto ciu_out; 1771bc2dcc1aSShawn Lin } 17724e7392b2SShawn Lin 1773bc2dcc1aSShawn Lin /* Wait for dma_req to be cleared */ 17744e7392b2SShawn Lin if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS, 17754e7392b2SShawn Lin status, 17764e7392b2SShawn Lin !(status & SDMMC_STATUS_DMA_REQ), 17774e7392b2SShawn Lin 1, 500 * USEC_PER_MSEC)) { 17784e7392b2SShawn Lin dev_err(host->dev, 1779bc2dcc1aSShawn Lin "%s: Timeout waiting for dma_req to be cleared\n", 17804e7392b2SShawn Lin __func__); 17814e7392b2SShawn Lin goto ciu_out; 17824e7392b2SShawn Lin } 17834e7392b2SShawn Lin 17844e7392b2SShawn Lin /* when using DMA next we reset the fifo again */ 17854e7392b2SShawn Lin if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET)) 17864e7392b2SShawn Lin goto ciu_out; 17874e7392b2SShawn Lin } else { 17884e7392b2SShawn Lin /* if the controller reset bit did clear, then set clock regs */ 17894e7392b2SShawn Lin if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) { 17904e7392b2SShawn Lin dev_err(host->dev, 17914e7392b2SShawn Lin "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n", 17924e7392b2SShawn Lin __func__); 17934e7392b2SShawn Lin goto ciu_out; 17944e7392b2SShawn Lin } 17954e7392b2SShawn Lin } 17964e7392b2SShawn Lin 17974e7392b2SShawn Lin if (host->use_dma == TRANS_MODE_IDMAC) 17984e7392b2SShawn Lin /* It is also recommended that we reset and reprogram idmac */ 17994e7392b2SShawn Lin dw_mci_idmac_reset(host); 18004e7392b2SShawn Lin 18014e7392b2SShawn Lin ret = true; 18024e7392b2SShawn Lin 18034e7392b2SShawn Lin ciu_out: 18044e7392b2SShawn Lin /* After a CTRL reset we need to have CIU set clock registers */ 180542f989c0SJaehoon Chung mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0); 18064e7392b2SShawn Lin 18074e7392b2SShawn Lin return ret; 18084e7392b2SShawn Lin } 18094e7392b2SShawn Lin 1810f95f3850SWill Newton static const struct mmc_host_ops dw_mci_ops = { 1811f95f3850SWill Newton .request = dw_mci_request, 18129aa51408SSeungwon Jeon .pre_req = dw_mci_pre_req, 18139aa51408SSeungwon Jeon .post_req = dw_mci_post_req, 1814f95f3850SWill Newton .set_ios = dw_mci_set_ios, 1815f95f3850SWill Newton .get_ro = dw_mci_get_ro, 1816f95f3850SWill Newton .get_cd = dw_mci_get_cd, 1817935a665eSShawn Lin .hw_reset = dw_mci_hw_reset, 18181a5c8e1fSShashidhar Hiremath .enable_sdio_irq = dw_mci_enable_sdio_irq, 181932dba737SUlf Hansson .ack_sdio_irq = dw_mci_ack_sdio_irq, 18200976f16dSSeungwon Jeon .execute_tuning = dw_mci_execute_tuning, 182101730558SDoug Anderson .card_busy = dw_mci_card_busy, 182201730558SDoug Anderson .start_signal_voltage_switch = dw_mci_switch_voltage, 1823b24c8b26SDoug Anderson .init_card = dw_mci_init_card, 182480113132SSeungwon Jeon .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning, 1825f95f3850SWill Newton }; 1826f95f3850SWill Newton 1827f95f3850SWill Newton static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) 1828f95f3850SWill Newton __releases(&host->lock) 1829f95f3850SWill Newton __acquires(&host->lock) 1830f95f3850SWill Newton { 1831f95f3850SWill Newton struct dw_mci_slot *slot; 183242f989c0SJaehoon Chung struct mmc_host *prev_mmc = host->slot->mmc; 1833f95f3850SWill Newton 1834f95f3850SWill Newton WARN_ON(host->cmd || host->data); 1835f95f3850SWill Newton 183642f989c0SJaehoon Chung host->slot->mrq = NULL; 1837f95f3850SWill Newton host->mrq = NULL; 1838f95f3850SWill Newton if (!list_empty(&host->queue)) { 1839f95f3850SWill Newton slot = list_entry(host->queue.next, 1840f95f3850SWill Newton struct dw_mci_slot, queue_node); 1841f95f3850SWill Newton list_del(&slot->queue_node); 18424a90920cSThomas Abraham dev_vdbg(host->dev, "list not empty: %s is next\n", 1843f95f3850SWill Newton mmc_hostname(slot->mmc)); 1844f95f3850SWill Newton host->state = STATE_SENDING_CMD; 1845f95f3850SWill Newton dw_mci_start_request(host, slot); 1846f95f3850SWill Newton } else { 18474a90920cSThomas Abraham dev_vdbg(host->dev, "list empty\n"); 184801730558SDoug Anderson 184901730558SDoug Anderson if (host->state == STATE_SENDING_CMD11) 185001730558SDoug Anderson host->state = STATE_WAITING_CMD11_DONE; 185101730558SDoug Anderson else 1852f95f3850SWill Newton host->state = STATE_IDLE; 1853f95f3850SWill Newton } 1854f95f3850SWill Newton 1855f95f3850SWill Newton spin_unlock(&host->lock); 1856f95f3850SWill Newton mmc_request_done(prev_mmc, mrq); 1857f95f3850SWill Newton spin_lock(&host->lock); 1858f95f3850SWill Newton } 1859f95f3850SWill Newton 1860e352c813SSeungwon Jeon static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) 1861f95f3850SWill Newton { 1862f95f3850SWill Newton u32 status = host->cmd_status; 1863f95f3850SWill Newton 1864f95f3850SWill Newton host->cmd_status = 0; 1865f95f3850SWill Newton 1866f95f3850SWill Newton /* Read the response from the card (up to 16 bytes) */ 1867f95f3850SWill Newton if (cmd->flags & MMC_RSP_PRESENT) { 1868f95f3850SWill Newton if (cmd->flags & MMC_RSP_136) { 1869f95f3850SWill Newton cmd->resp[3] = mci_readl(host, RESP0); 1870f95f3850SWill Newton cmd->resp[2] = mci_readl(host, RESP1); 1871f95f3850SWill Newton cmd->resp[1] = mci_readl(host, RESP2); 1872f95f3850SWill Newton cmd->resp[0] = mci_readl(host, RESP3); 1873f95f3850SWill Newton } else { 1874f95f3850SWill Newton cmd->resp[0] = mci_readl(host, RESP0); 1875f95f3850SWill Newton cmd->resp[1] = 0; 1876f95f3850SWill Newton cmd->resp[2] = 0; 1877f95f3850SWill Newton cmd->resp[3] = 0; 1878f95f3850SWill Newton } 1879f95f3850SWill Newton } 1880f95f3850SWill Newton 1881f95f3850SWill Newton if (status & SDMMC_INT_RTO) 1882f95f3850SWill Newton cmd->error = -ETIMEDOUT; 1883f95f3850SWill Newton else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)) 1884f95f3850SWill Newton cmd->error = -EILSEQ; 1885f95f3850SWill Newton else if (status & SDMMC_INT_RESP_ERR) 1886f95f3850SWill Newton cmd->error = -EIO; 1887f95f3850SWill Newton else 1888f95f3850SWill Newton cmd->error = 0; 1889f95f3850SWill Newton 1890e352c813SSeungwon Jeon return cmd->error; 1891e352c813SSeungwon Jeon } 1892e352c813SSeungwon Jeon 1893e352c813SSeungwon Jeon static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) 1894e352c813SSeungwon Jeon { 189531bff450SSeungwon Jeon u32 status = host->data_status; 1896e352c813SSeungwon Jeon 1897e352c813SSeungwon Jeon if (status & DW_MCI_DATA_ERROR_FLAGS) { 1898e352c813SSeungwon Jeon if (status & SDMMC_INT_DRTO) { 1899e352c813SSeungwon Jeon data->error = -ETIMEDOUT; 1900e352c813SSeungwon Jeon } else if (status & SDMMC_INT_DCRC) { 1901e352c813SSeungwon Jeon data->error = -EILSEQ; 1902e352c813SSeungwon Jeon } else if (status & SDMMC_INT_EBE) { 1903e352c813SSeungwon Jeon if (host->dir_status == 1904e352c813SSeungwon Jeon DW_MCI_SEND_STATUS) { 1905e352c813SSeungwon Jeon /* 1906e352c813SSeungwon Jeon * No data CRC status was returned. 1907e352c813SSeungwon Jeon * The number of bytes transferred 1908e352c813SSeungwon Jeon * will be exaggerated in PIO mode. 1909e352c813SSeungwon Jeon */ 1910e352c813SSeungwon Jeon data->bytes_xfered = 0; 1911e352c813SSeungwon Jeon data->error = -ETIMEDOUT; 1912e352c813SSeungwon Jeon } else if (host->dir_status == 1913e352c813SSeungwon Jeon DW_MCI_RECV_STATUS) { 1914e7a1dec1SShawn Lin data->error = -EILSEQ; 1915e352c813SSeungwon Jeon } 1916e352c813SSeungwon Jeon } else { 1917e352c813SSeungwon Jeon /* SDMMC_INT_SBE is included */ 1918e7a1dec1SShawn Lin data->error = -EILSEQ; 1919e352c813SSeungwon Jeon } 1920e352c813SSeungwon Jeon 1921e6cc0123SDoug Anderson dev_dbg(host->dev, "data error, status 0x%08x\n", status); 1922e352c813SSeungwon Jeon 1923e352c813SSeungwon Jeon /* 1924e352c813SSeungwon Jeon * After an error, there may be data lingering 192531bff450SSeungwon Jeon * in the FIFO 1926e352c813SSeungwon Jeon */ 19273a33a94cSSonny Rao dw_mci_reset(host); 1928e352c813SSeungwon Jeon } else { 1929e352c813SSeungwon Jeon data->bytes_xfered = data->blocks * data->blksz; 1930e352c813SSeungwon Jeon data->error = 0; 1931e352c813SSeungwon Jeon } 1932e352c813SSeungwon Jeon 1933e352c813SSeungwon Jeon return data->error; 1934f95f3850SWill Newton } 1935f95f3850SWill Newton 193657e10486SAddy Ke static void dw_mci_set_drto(struct dw_mci *host) 193757e10486SAddy Ke { 193857e10486SAddy Ke unsigned int drto_clks; 19399d9491a7SDouglas Anderson unsigned int drto_div; 194057e10486SAddy Ke unsigned int drto_ms; 194193c23ae3SDouglas Anderson unsigned long irqflags; 194257e10486SAddy Ke 194357e10486SAddy Ke drto_clks = mci_readl(host, TMOUT) >> 8; 19449d9491a7SDouglas Anderson drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; 19459d9491a7SDouglas Anderson if (drto_div == 0) 19469d9491a7SDouglas Anderson drto_div = 1; 19479d9491a7SDouglas Anderson drto_ms = DIV_ROUND_UP(MSEC_PER_SEC * drto_clks * drto_div, 19489d9491a7SDouglas Anderson host->bus_hz); 194957e10486SAddy Ke 195057e10486SAddy Ke /* add a bit spare time */ 195157e10486SAddy Ke drto_ms += 10; 195257e10486SAddy Ke 195393c23ae3SDouglas Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 195493c23ae3SDouglas Anderson if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) 195593c23ae3SDouglas Anderson mod_timer(&host->dto_timer, 195693c23ae3SDouglas Anderson jiffies + msecs_to_jiffies(drto_ms)); 195793c23ae3SDouglas Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 195857e10486SAddy Ke } 195957e10486SAddy Ke 19608892b705SDouglas Anderson static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host) 19618892b705SDouglas Anderson { 19628892b705SDouglas Anderson if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 19638892b705SDouglas Anderson return false; 19648892b705SDouglas Anderson 19658892b705SDouglas Anderson /* 19668892b705SDouglas Anderson * Really be certain that the timer has stopped. This is a bit of 19678892b705SDouglas Anderson * paranoia and could only really happen if we had really bad 19688892b705SDouglas Anderson * interrupt latency and the interrupt routine and timeout were 19698892b705SDouglas Anderson * running concurrently so that the del_timer() in the interrupt 19708892b705SDouglas Anderson * handler couldn't run. 19718892b705SDouglas Anderson */ 19728892b705SDouglas Anderson WARN_ON(del_timer_sync(&host->cto_timer)); 19738892b705SDouglas Anderson clear_bit(EVENT_CMD_COMPLETE, &host->pending_events); 19748892b705SDouglas Anderson 19758892b705SDouglas Anderson return true; 19768892b705SDouglas Anderson } 19778892b705SDouglas Anderson 197893c23ae3SDouglas Anderson static bool dw_mci_clear_pending_data_complete(struct dw_mci *host) 197993c23ae3SDouglas Anderson { 198093c23ae3SDouglas Anderson if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) 198193c23ae3SDouglas Anderson return false; 198293c23ae3SDouglas Anderson 198393c23ae3SDouglas Anderson /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */ 198493c23ae3SDouglas Anderson WARN_ON(del_timer_sync(&host->dto_timer)); 198593c23ae3SDouglas Anderson clear_bit(EVENT_DATA_COMPLETE, &host->pending_events); 198693c23ae3SDouglas Anderson 198793c23ae3SDouglas Anderson return true; 198893c23ae3SDouglas Anderson } 198993c23ae3SDouglas Anderson 1990f95f3850SWill Newton static void dw_mci_tasklet_func(unsigned long priv) 1991f95f3850SWill Newton { 1992f95f3850SWill Newton struct dw_mci *host = (struct dw_mci *)priv; 1993f95f3850SWill Newton struct mmc_data *data; 1994f95f3850SWill Newton struct mmc_command *cmd; 1995e352c813SSeungwon Jeon struct mmc_request *mrq; 1996f95f3850SWill Newton enum dw_mci_state state; 1997f95f3850SWill Newton enum dw_mci_state prev_state; 1998e352c813SSeungwon Jeon unsigned int err; 1999f95f3850SWill Newton 2000f95f3850SWill Newton spin_lock(&host->lock); 2001f95f3850SWill Newton 2002f95f3850SWill Newton state = host->state; 2003f95f3850SWill Newton data = host->data; 2004e352c813SSeungwon Jeon mrq = host->mrq; 2005f95f3850SWill Newton 2006f95f3850SWill Newton do { 2007f95f3850SWill Newton prev_state = state; 2008f95f3850SWill Newton 2009f95f3850SWill Newton switch (state) { 2010f95f3850SWill Newton case STATE_IDLE: 201101730558SDoug Anderson case STATE_WAITING_CMD11_DONE: 2012f95f3850SWill Newton break; 2013f95f3850SWill Newton 201401730558SDoug Anderson case STATE_SENDING_CMD11: 2015f95f3850SWill Newton case STATE_SENDING_CMD: 20168892b705SDouglas Anderson if (!dw_mci_clear_pending_cmd_complete(host)) 2017f95f3850SWill Newton break; 2018f95f3850SWill Newton 2019f95f3850SWill Newton cmd = host->cmd; 2020f95f3850SWill Newton host->cmd = NULL; 2021f95f3850SWill Newton set_bit(EVENT_CMD_COMPLETE, &host->completed_events); 2022e352c813SSeungwon Jeon err = dw_mci_command_complete(host, cmd); 2023e352c813SSeungwon Jeon if (cmd == mrq->sbc && !err) { 2024053b3ce6SSeungwon Jeon prev_state = state = STATE_SENDING_CMD; 202542f989c0SJaehoon Chung __dw_mci_start_request(host, host->slot, 2026e352c813SSeungwon Jeon mrq->cmd); 2027053b3ce6SSeungwon Jeon goto unlock; 2028053b3ce6SSeungwon Jeon } 2029053b3ce6SSeungwon Jeon 2030e352c813SSeungwon Jeon if (cmd->data && err) { 203146d17952SDoug Anderson /* 203246d17952SDoug Anderson * During UHS tuning sequence, sending the stop 203346d17952SDoug Anderson * command after the response CRC error would 203446d17952SDoug Anderson * throw the system into a confused state 203546d17952SDoug Anderson * causing all future tuning phases to report 203646d17952SDoug Anderson * failure. 203746d17952SDoug Anderson * 203846d17952SDoug Anderson * In such case controller will move into a data 203946d17952SDoug Anderson * transfer state after a response error or 204046d17952SDoug Anderson * response CRC error. Let's let that finish 204146d17952SDoug Anderson * before trying to send a stop, so we'll go to 204246d17952SDoug Anderson * STATE_SENDING_DATA. 204346d17952SDoug Anderson * 204446d17952SDoug Anderson * Although letting the data transfer take place 204546d17952SDoug Anderson * will waste a bit of time (we already know 204646d17952SDoug Anderson * the command was bad), it can't cause any 204746d17952SDoug Anderson * errors since it's possible it would have 204846d17952SDoug Anderson * taken place anyway if this tasklet got 204946d17952SDoug Anderson * delayed. Allowing the transfer to take place 205046d17952SDoug Anderson * avoids races and keeps things simple. 205146d17952SDoug Anderson */ 205246d17952SDoug Anderson if ((err != -ETIMEDOUT) && 205346d17952SDoug Anderson (cmd->opcode == MMC_SEND_TUNING_BLOCK)) { 205446d17952SDoug Anderson state = STATE_SENDING_DATA; 205546d17952SDoug Anderson continue; 205646d17952SDoug Anderson } 205746d17952SDoug Anderson 205871abb133SSeungwon Jeon dw_mci_stop_dma(host); 205990c2143aSSeungwon Jeon send_stop_abort(host, data); 206071abb133SSeungwon Jeon state = STATE_SENDING_STOP; 206171abb133SSeungwon Jeon break; 206271abb133SSeungwon Jeon } 206371abb133SSeungwon Jeon 2064e352c813SSeungwon Jeon if (!cmd->data || err) { 2065e352c813SSeungwon Jeon dw_mci_request_end(host, mrq); 2066f95f3850SWill Newton goto unlock; 2067f95f3850SWill Newton } 2068f95f3850SWill Newton 2069f95f3850SWill Newton prev_state = state = STATE_SENDING_DATA; 2070f95f3850SWill Newton /* fall through */ 2071f95f3850SWill Newton 2072f95f3850SWill Newton case STATE_SENDING_DATA: 20732aa35465SDoug Anderson /* 20742aa35465SDoug Anderson * We could get a data error and never a transfer 20752aa35465SDoug Anderson * complete so we'd better check for it here. 20762aa35465SDoug Anderson * 20772aa35465SDoug Anderson * Note that we don't really care if we also got a 20782aa35465SDoug Anderson * transfer complete; stopping the DMA and sending an 20792aa35465SDoug Anderson * abort won't hurt. 20802aa35465SDoug Anderson */ 2081f95f3850SWill Newton if (test_and_clear_bit(EVENT_DATA_ERROR, 2082f95f3850SWill Newton &host->pending_events)) { 2083f95f3850SWill Newton dw_mci_stop_dma(host); 2084e13c3c08SJaehoon Chung if (!(host->data_status & (SDMMC_INT_DRTO | 2085bdb9a90bSaddy ke SDMMC_INT_EBE))) 208690c2143aSSeungwon Jeon send_stop_abort(host, data); 2087f95f3850SWill Newton state = STATE_DATA_ERROR; 2088f95f3850SWill Newton break; 2089f95f3850SWill Newton } 2090f95f3850SWill Newton 2091f95f3850SWill Newton if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 209257e10486SAddy Ke &host->pending_events)) { 209357e10486SAddy Ke /* 209457e10486SAddy Ke * If all data-related interrupts don't come 209557e10486SAddy Ke * within the given time in reading data state. 209657e10486SAddy Ke */ 209716a34574SJaehoon Chung if (host->dir_status == DW_MCI_RECV_STATUS) 209857e10486SAddy Ke dw_mci_set_drto(host); 2099f95f3850SWill Newton break; 210057e10486SAddy Ke } 2101f95f3850SWill Newton 2102f95f3850SWill Newton set_bit(EVENT_XFER_COMPLETE, &host->completed_events); 21032aa35465SDoug Anderson 21042aa35465SDoug Anderson /* 21052aa35465SDoug Anderson * Handle an EVENT_DATA_ERROR that might have shown up 21062aa35465SDoug Anderson * before the transfer completed. This might not have 21072aa35465SDoug Anderson * been caught by the check above because the interrupt 21082aa35465SDoug Anderson * could have gone off between the previous check and 21092aa35465SDoug Anderson * the check for transfer complete. 21102aa35465SDoug Anderson * 21112aa35465SDoug Anderson * Technically this ought not be needed assuming we 21122aa35465SDoug Anderson * get a DATA_COMPLETE eventually (we'll notice the 21132aa35465SDoug Anderson * error and end the request), but it shouldn't hurt. 21142aa35465SDoug Anderson * 21152aa35465SDoug Anderson * This has the advantage of sending the stop command. 21162aa35465SDoug Anderson */ 21172aa35465SDoug Anderson if (test_and_clear_bit(EVENT_DATA_ERROR, 21182aa35465SDoug Anderson &host->pending_events)) { 21192aa35465SDoug Anderson dw_mci_stop_dma(host); 2120e13c3c08SJaehoon Chung if (!(host->data_status & (SDMMC_INT_DRTO | 2121bdb9a90bSaddy ke SDMMC_INT_EBE))) 21222aa35465SDoug Anderson send_stop_abort(host, data); 21232aa35465SDoug Anderson state = STATE_DATA_ERROR; 21242aa35465SDoug Anderson break; 21252aa35465SDoug Anderson } 2126f95f3850SWill Newton prev_state = state = STATE_DATA_BUSY; 21272aa35465SDoug Anderson 2128f95f3850SWill Newton /* fall through */ 2129f95f3850SWill Newton 2130f95f3850SWill Newton case STATE_DATA_BUSY: 213193c23ae3SDouglas Anderson if (!dw_mci_clear_pending_data_complete(host)) { 213257e10486SAddy Ke /* 213357e10486SAddy Ke * If data error interrupt comes but data over 213457e10486SAddy Ke * interrupt doesn't come within the given time. 213557e10486SAddy Ke * in reading data state. 213657e10486SAddy Ke */ 213716a34574SJaehoon Chung if (host->dir_status == DW_MCI_RECV_STATUS) 213857e10486SAddy Ke dw_mci_set_drto(host); 2139f95f3850SWill Newton break; 214057e10486SAddy Ke } 2141f95f3850SWill Newton 2142f95f3850SWill Newton host->data = NULL; 2143f95f3850SWill Newton set_bit(EVENT_DATA_COMPLETE, &host->completed_events); 2144e352c813SSeungwon Jeon err = dw_mci_data_complete(host, data); 2145f95f3850SWill Newton 2146e352c813SSeungwon Jeon if (!err) { 2147e352c813SSeungwon Jeon if (!data->stop || mrq->sbc) { 214817c8bc85SSachin Kamat if (mrq->sbc && data->stop) 2149053b3ce6SSeungwon Jeon data->stop->error = 0; 2150e352c813SSeungwon Jeon dw_mci_request_end(host, mrq); 2151053b3ce6SSeungwon Jeon goto unlock; 2152053b3ce6SSeungwon Jeon } 2153053b3ce6SSeungwon Jeon 215490c2143aSSeungwon Jeon /* stop command for open-ended transfer*/ 2155e352c813SSeungwon Jeon if (data->stop) 215690c2143aSSeungwon Jeon send_stop_abort(host, data); 21572aa35465SDoug Anderson } else { 21582aa35465SDoug Anderson /* 21592aa35465SDoug Anderson * If we don't have a command complete now we'll 21602aa35465SDoug Anderson * never get one since we just reset everything; 21612aa35465SDoug Anderson * better end the request. 21622aa35465SDoug Anderson * 21632aa35465SDoug Anderson * If we do have a command complete we'll fall 21642aa35465SDoug Anderson * through to the SENDING_STOP command and 21652aa35465SDoug Anderson * everything will be peachy keen. 21662aa35465SDoug Anderson */ 21672aa35465SDoug Anderson if (!test_bit(EVENT_CMD_COMPLETE, 21682aa35465SDoug Anderson &host->pending_events)) { 21692aa35465SDoug Anderson host->cmd = NULL; 21702aa35465SDoug Anderson dw_mci_request_end(host, mrq); 21712aa35465SDoug Anderson goto unlock; 21722aa35465SDoug Anderson } 217390c2143aSSeungwon Jeon } 2174e352c813SSeungwon Jeon 2175e352c813SSeungwon Jeon /* 2176e352c813SSeungwon Jeon * If err has non-zero, 2177e352c813SSeungwon Jeon * stop-abort command has been already issued. 2178e352c813SSeungwon Jeon */ 2179e352c813SSeungwon Jeon prev_state = state = STATE_SENDING_STOP; 2180e352c813SSeungwon Jeon 2181f95f3850SWill Newton /* fall through */ 2182f95f3850SWill Newton 2183f95f3850SWill Newton case STATE_SENDING_STOP: 21848892b705SDouglas Anderson if (!dw_mci_clear_pending_cmd_complete(host)) 2185f95f3850SWill Newton break; 2186f95f3850SWill Newton 218771abb133SSeungwon Jeon /* CMD error in data command */ 218831bff450SSeungwon Jeon if (mrq->cmd->error && mrq->data) 21893a33a94cSSonny Rao dw_mci_reset(host); 219071abb133SSeungwon Jeon 2191f95f3850SWill Newton host->cmd = NULL; 219271abb133SSeungwon Jeon host->data = NULL; 219390c2143aSSeungwon Jeon 2194e13c3c08SJaehoon Chung if (!mrq->sbc && mrq->stop) 2195e352c813SSeungwon Jeon dw_mci_command_complete(host, mrq->stop); 219690c2143aSSeungwon Jeon else 219790c2143aSSeungwon Jeon host->cmd_status = 0; 219890c2143aSSeungwon Jeon 2199e352c813SSeungwon Jeon dw_mci_request_end(host, mrq); 2200f95f3850SWill Newton goto unlock; 2201f95f3850SWill Newton 2202f95f3850SWill Newton case STATE_DATA_ERROR: 2203f95f3850SWill Newton if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 2204f95f3850SWill Newton &host->pending_events)) 2205f95f3850SWill Newton break; 2206f95f3850SWill Newton 2207f95f3850SWill Newton state = STATE_DATA_BUSY; 2208f95f3850SWill Newton break; 2209f95f3850SWill Newton } 2210f95f3850SWill Newton } while (state != prev_state); 2211f95f3850SWill Newton 2212f95f3850SWill Newton host->state = state; 2213f95f3850SWill Newton unlock: 2214f95f3850SWill Newton spin_unlock(&host->lock); 2215f95f3850SWill Newton 2216f95f3850SWill Newton } 2217f95f3850SWill Newton 221834b664a2SJames Hogan /* push final bytes to part_buf, only use during push */ 221934b664a2SJames Hogan static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) 222034b664a2SJames Hogan { 222134b664a2SJames Hogan memcpy((void *)&host->part_buf, buf, cnt); 222234b664a2SJames Hogan host->part_buf_count = cnt; 222334b664a2SJames Hogan } 222434b664a2SJames Hogan 222534b664a2SJames Hogan /* append bytes to part_buf, only use during push */ 222634b664a2SJames Hogan static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) 222734b664a2SJames Hogan { 222834b664a2SJames Hogan cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); 222934b664a2SJames Hogan memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); 223034b664a2SJames Hogan host->part_buf_count += cnt; 223134b664a2SJames Hogan return cnt; 223234b664a2SJames Hogan } 223334b664a2SJames Hogan 223434b664a2SJames Hogan /* pull first bytes from part_buf, only use during pull */ 223534b664a2SJames Hogan static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) 223634b664a2SJames Hogan { 22370e3a22c0SShawn Lin cnt = min_t(int, cnt, host->part_buf_count); 223834b664a2SJames Hogan if (cnt) { 223934b664a2SJames Hogan memcpy(buf, (void *)&host->part_buf + host->part_buf_start, 224034b664a2SJames Hogan cnt); 224134b664a2SJames Hogan host->part_buf_count -= cnt; 224234b664a2SJames Hogan host->part_buf_start += cnt; 224334b664a2SJames Hogan } 224434b664a2SJames Hogan return cnt; 224534b664a2SJames Hogan } 224634b664a2SJames Hogan 224734b664a2SJames Hogan /* pull final bytes from the part_buf, assuming it's just been filled */ 224834b664a2SJames Hogan static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) 224934b664a2SJames Hogan { 225034b664a2SJames Hogan memcpy(buf, &host->part_buf, cnt); 225134b664a2SJames Hogan host->part_buf_start = cnt; 225234b664a2SJames Hogan host->part_buf_count = (1 << host->data_shift) - cnt; 225334b664a2SJames Hogan } 225434b664a2SJames Hogan 2255f95f3850SWill Newton static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) 2256f95f3850SWill Newton { 2257cfbeb59cSMarkos Chandras struct mmc_data *data = host->data; 2258cfbeb59cSMarkos Chandras int init_cnt = cnt; 2259cfbeb59cSMarkos Chandras 226034b664a2SJames Hogan /* try and push anything in the part_buf */ 226134b664a2SJames Hogan if (unlikely(host->part_buf_count)) { 226234b664a2SJames Hogan int len = dw_mci_push_part_bytes(host, buf, cnt); 22630e3a22c0SShawn Lin 226434b664a2SJames Hogan buf += len; 226534b664a2SJames Hogan cnt -= len; 2266cfbeb59cSMarkos Chandras if (host->part_buf_count == 2) { 226776184ac1SBen Dooks mci_fifo_writew(host->fifo_reg, host->part_buf16); 226834b664a2SJames Hogan host->part_buf_count = 0; 226934b664a2SJames Hogan } 227034b664a2SJames Hogan } 227134b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 227234b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x1)) { 227334b664a2SJames Hogan while (cnt >= 2) { 227434b664a2SJames Hogan u16 aligned_buf[64]; 227534b664a2SJames Hogan int len = min(cnt & -2, (int)sizeof(aligned_buf)); 227634b664a2SJames Hogan int items = len >> 1; 227734b664a2SJames Hogan int i; 227834b664a2SJames Hogan /* memcpy from input buffer into aligned buffer */ 227934b664a2SJames Hogan memcpy(aligned_buf, buf, len); 228034b664a2SJames Hogan buf += len; 228134b664a2SJames Hogan cnt -= len; 228234b664a2SJames Hogan /* push data from aligned buffer into fifo */ 228334b664a2SJames Hogan for (i = 0; i < items; ++i) 228476184ac1SBen Dooks mci_fifo_writew(host->fifo_reg, aligned_buf[i]); 228534b664a2SJames Hogan } 228634b664a2SJames Hogan } else 228734b664a2SJames Hogan #endif 228834b664a2SJames Hogan { 228934b664a2SJames Hogan u16 *pdata = buf; 22900e3a22c0SShawn Lin 229134b664a2SJames Hogan for (; cnt >= 2; cnt -= 2) 229276184ac1SBen Dooks mci_fifo_writew(host->fifo_reg, *pdata++); 229334b664a2SJames Hogan buf = pdata; 229434b664a2SJames Hogan } 229534b664a2SJames Hogan /* put anything remaining in the part_buf */ 229634b664a2SJames Hogan if (cnt) { 229734b664a2SJames Hogan dw_mci_set_part_bytes(host, buf, cnt); 2298cfbeb59cSMarkos Chandras /* Push data if we have reached the expected data length */ 2299cfbeb59cSMarkos Chandras if ((data->bytes_xfered + init_cnt) == 2300cfbeb59cSMarkos Chandras (data->blksz * data->blocks)) 230176184ac1SBen Dooks mci_fifo_writew(host->fifo_reg, host->part_buf16); 2302f95f3850SWill Newton } 2303f95f3850SWill Newton } 2304f95f3850SWill Newton 2305f95f3850SWill Newton static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) 2306f95f3850SWill Newton { 230734b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 230834b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x1)) { 230934b664a2SJames Hogan while (cnt >= 2) { 231034b664a2SJames Hogan /* pull data from fifo into aligned buffer */ 231134b664a2SJames Hogan u16 aligned_buf[64]; 231234b664a2SJames Hogan int len = min(cnt & -2, (int)sizeof(aligned_buf)); 231334b664a2SJames Hogan int items = len >> 1; 231434b664a2SJames Hogan int i; 23150e3a22c0SShawn Lin 231634b664a2SJames Hogan for (i = 0; i < items; ++i) 231776184ac1SBen Dooks aligned_buf[i] = mci_fifo_readw(host->fifo_reg); 231834b664a2SJames Hogan /* memcpy from aligned buffer into output buffer */ 231934b664a2SJames Hogan memcpy(buf, aligned_buf, len); 232034b664a2SJames Hogan buf += len; 232134b664a2SJames Hogan cnt -= len; 232234b664a2SJames Hogan } 232334b664a2SJames Hogan } else 232434b664a2SJames Hogan #endif 232534b664a2SJames Hogan { 232634b664a2SJames Hogan u16 *pdata = buf; 23270e3a22c0SShawn Lin 232834b664a2SJames Hogan for (; cnt >= 2; cnt -= 2) 232976184ac1SBen Dooks *pdata++ = mci_fifo_readw(host->fifo_reg); 233034b664a2SJames Hogan buf = pdata; 233134b664a2SJames Hogan } 233234b664a2SJames Hogan if (cnt) { 233376184ac1SBen Dooks host->part_buf16 = mci_fifo_readw(host->fifo_reg); 233434b664a2SJames Hogan dw_mci_pull_final_bytes(host, buf, cnt); 2335f95f3850SWill Newton } 2336f95f3850SWill Newton } 2337f95f3850SWill Newton 2338f95f3850SWill Newton static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) 2339f95f3850SWill Newton { 2340cfbeb59cSMarkos Chandras struct mmc_data *data = host->data; 2341cfbeb59cSMarkos Chandras int init_cnt = cnt; 2342cfbeb59cSMarkos Chandras 234334b664a2SJames Hogan /* try and push anything in the part_buf */ 234434b664a2SJames Hogan if (unlikely(host->part_buf_count)) { 234534b664a2SJames Hogan int len = dw_mci_push_part_bytes(host, buf, cnt); 23460e3a22c0SShawn Lin 234734b664a2SJames Hogan buf += len; 234834b664a2SJames Hogan cnt -= len; 2349cfbeb59cSMarkos Chandras if (host->part_buf_count == 4) { 235076184ac1SBen Dooks mci_fifo_writel(host->fifo_reg, host->part_buf32); 235134b664a2SJames Hogan host->part_buf_count = 0; 235234b664a2SJames Hogan } 235334b664a2SJames Hogan } 235434b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 235534b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x3)) { 235634b664a2SJames Hogan while (cnt >= 4) { 235734b664a2SJames Hogan u32 aligned_buf[32]; 235834b664a2SJames Hogan int len = min(cnt & -4, (int)sizeof(aligned_buf)); 235934b664a2SJames Hogan int items = len >> 2; 236034b664a2SJames Hogan int i; 236134b664a2SJames Hogan /* memcpy from input buffer into aligned buffer */ 236234b664a2SJames Hogan memcpy(aligned_buf, buf, len); 236334b664a2SJames Hogan buf += len; 236434b664a2SJames Hogan cnt -= len; 236534b664a2SJames Hogan /* push data from aligned buffer into fifo */ 236634b664a2SJames Hogan for (i = 0; i < items; ++i) 236776184ac1SBen Dooks mci_fifo_writel(host->fifo_reg, aligned_buf[i]); 236834b664a2SJames Hogan } 236934b664a2SJames Hogan } else 237034b664a2SJames Hogan #endif 237134b664a2SJames Hogan { 237234b664a2SJames Hogan u32 *pdata = buf; 23730e3a22c0SShawn Lin 237434b664a2SJames Hogan for (; cnt >= 4; cnt -= 4) 237576184ac1SBen Dooks mci_fifo_writel(host->fifo_reg, *pdata++); 237634b664a2SJames Hogan buf = pdata; 237734b664a2SJames Hogan } 237834b664a2SJames Hogan /* put anything remaining in the part_buf */ 237934b664a2SJames Hogan if (cnt) { 238034b664a2SJames Hogan dw_mci_set_part_bytes(host, buf, cnt); 2381cfbeb59cSMarkos Chandras /* Push data if we have reached the expected data length */ 2382cfbeb59cSMarkos Chandras if ((data->bytes_xfered + init_cnt) == 2383cfbeb59cSMarkos Chandras (data->blksz * data->blocks)) 238476184ac1SBen Dooks mci_fifo_writel(host->fifo_reg, host->part_buf32); 2385f95f3850SWill Newton } 2386f95f3850SWill Newton } 2387f95f3850SWill Newton 2388f95f3850SWill Newton static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) 2389f95f3850SWill Newton { 239034b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 239134b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x3)) { 239234b664a2SJames Hogan while (cnt >= 4) { 239334b664a2SJames Hogan /* pull data from fifo into aligned buffer */ 239434b664a2SJames Hogan u32 aligned_buf[32]; 239534b664a2SJames Hogan int len = min(cnt & -4, (int)sizeof(aligned_buf)); 239634b664a2SJames Hogan int items = len >> 2; 239734b664a2SJames Hogan int i; 23980e3a22c0SShawn Lin 239934b664a2SJames Hogan for (i = 0; i < items; ++i) 240076184ac1SBen Dooks aligned_buf[i] = mci_fifo_readl(host->fifo_reg); 240134b664a2SJames Hogan /* memcpy from aligned buffer into output buffer */ 240234b664a2SJames Hogan memcpy(buf, aligned_buf, len); 240334b664a2SJames Hogan buf += len; 240434b664a2SJames Hogan cnt -= len; 240534b664a2SJames Hogan } 240634b664a2SJames Hogan } else 240734b664a2SJames Hogan #endif 240834b664a2SJames Hogan { 240934b664a2SJames Hogan u32 *pdata = buf; 24100e3a22c0SShawn Lin 241134b664a2SJames Hogan for (; cnt >= 4; cnt -= 4) 241276184ac1SBen Dooks *pdata++ = mci_fifo_readl(host->fifo_reg); 241334b664a2SJames Hogan buf = pdata; 241434b664a2SJames Hogan } 241534b664a2SJames Hogan if (cnt) { 241676184ac1SBen Dooks host->part_buf32 = mci_fifo_readl(host->fifo_reg); 241734b664a2SJames Hogan dw_mci_pull_final_bytes(host, buf, cnt); 2418f95f3850SWill Newton } 2419f95f3850SWill Newton } 2420f95f3850SWill Newton 2421f95f3850SWill Newton static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) 2422f95f3850SWill Newton { 2423cfbeb59cSMarkos Chandras struct mmc_data *data = host->data; 2424cfbeb59cSMarkos Chandras int init_cnt = cnt; 2425cfbeb59cSMarkos Chandras 242634b664a2SJames Hogan /* try and push anything in the part_buf */ 242734b664a2SJames Hogan if (unlikely(host->part_buf_count)) { 242834b664a2SJames Hogan int len = dw_mci_push_part_bytes(host, buf, cnt); 24290e3a22c0SShawn Lin 243034b664a2SJames Hogan buf += len; 243134b664a2SJames Hogan cnt -= len; 2432c09fbd74SSeungwon Jeon 2433cfbeb59cSMarkos Chandras if (host->part_buf_count == 8) { 243476184ac1SBen Dooks mci_fifo_writeq(host->fifo_reg, host->part_buf); 243534b664a2SJames Hogan host->part_buf_count = 0; 243634b664a2SJames Hogan } 243734b664a2SJames Hogan } 243834b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 243934b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x7)) { 244034b664a2SJames Hogan while (cnt >= 8) { 244134b664a2SJames Hogan u64 aligned_buf[16]; 244234b664a2SJames Hogan int len = min(cnt & -8, (int)sizeof(aligned_buf)); 244334b664a2SJames Hogan int items = len >> 3; 244434b664a2SJames Hogan int i; 244534b664a2SJames Hogan /* memcpy from input buffer into aligned buffer */ 244634b664a2SJames Hogan memcpy(aligned_buf, buf, len); 244734b664a2SJames Hogan buf += len; 244834b664a2SJames Hogan cnt -= len; 244934b664a2SJames Hogan /* push data from aligned buffer into fifo */ 245034b664a2SJames Hogan for (i = 0; i < items; ++i) 245176184ac1SBen Dooks mci_fifo_writeq(host->fifo_reg, aligned_buf[i]); 245234b664a2SJames Hogan } 245334b664a2SJames Hogan } else 245434b664a2SJames Hogan #endif 245534b664a2SJames Hogan { 245634b664a2SJames Hogan u64 *pdata = buf; 24570e3a22c0SShawn Lin 245834b664a2SJames Hogan for (; cnt >= 8; cnt -= 8) 245976184ac1SBen Dooks mci_fifo_writeq(host->fifo_reg, *pdata++); 246034b664a2SJames Hogan buf = pdata; 246134b664a2SJames Hogan } 246234b664a2SJames Hogan /* put anything remaining in the part_buf */ 246334b664a2SJames Hogan if (cnt) { 246434b664a2SJames Hogan dw_mci_set_part_bytes(host, buf, cnt); 2465cfbeb59cSMarkos Chandras /* Push data if we have reached the expected data length */ 2466cfbeb59cSMarkos Chandras if ((data->bytes_xfered + init_cnt) == 2467cfbeb59cSMarkos Chandras (data->blksz * data->blocks)) 246876184ac1SBen Dooks mci_fifo_writeq(host->fifo_reg, host->part_buf); 2469f95f3850SWill Newton } 2470f95f3850SWill Newton } 2471f95f3850SWill Newton 2472f95f3850SWill Newton static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) 2473f95f3850SWill Newton { 247434b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 247534b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x7)) { 247634b664a2SJames Hogan while (cnt >= 8) { 247734b664a2SJames Hogan /* pull data from fifo into aligned buffer */ 247834b664a2SJames Hogan u64 aligned_buf[16]; 247934b664a2SJames Hogan int len = min(cnt & -8, (int)sizeof(aligned_buf)); 248034b664a2SJames Hogan int items = len >> 3; 248134b664a2SJames Hogan int i; 24820e3a22c0SShawn Lin 248334b664a2SJames Hogan for (i = 0; i < items; ++i) 248476184ac1SBen Dooks aligned_buf[i] = mci_fifo_readq(host->fifo_reg); 248576184ac1SBen Dooks 248634b664a2SJames Hogan /* memcpy from aligned buffer into output buffer */ 248734b664a2SJames Hogan memcpy(buf, aligned_buf, len); 248834b664a2SJames Hogan buf += len; 248934b664a2SJames Hogan cnt -= len; 2490f95f3850SWill Newton } 249134b664a2SJames Hogan } else 249234b664a2SJames Hogan #endif 249334b664a2SJames Hogan { 249434b664a2SJames Hogan u64 *pdata = buf; 24950e3a22c0SShawn Lin 249634b664a2SJames Hogan for (; cnt >= 8; cnt -= 8) 249776184ac1SBen Dooks *pdata++ = mci_fifo_readq(host->fifo_reg); 249834b664a2SJames Hogan buf = pdata; 249934b664a2SJames Hogan } 250034b664a2SJames Hogan if (cnt) { 250176184ac1SBen Dooks host->part_buf = mci_fifo_readq(host->fifo_reg); 250234b664a2SJames Hogan dw_mci_pull_final_bytes(host, buf, cnt); 250334b664a2SJames Hogan } 250434b664a2SJames Hogan } 250534b664a2SJames Hogan 250634b664a2SJames Hogan static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) 250734b664a2SJames Hogan { 250834b664a2SJames Hogan int len; 250934b664a2SJames Hogan 251034b664a2SJames Hogan /* get remaining partial bytes */ 251134b664a2SJames Hogan len = dw_mci_pull_part_bytes(host, buf, cnt); 251234b664a2SJames Hogan if (unlikely(len == cnt)) 251334b664a2SJames Hogan return; 251434b664a2SJames Hogan buf += len; 251534b664a2SJames Hogan cnt -= len; 251634b664a2SJames Hogan 251734b664a2SJames Hogan /* get the rest of the data */ 251834b664a2SJames Hogan host->pull_data(host, buf, cnt); 2519f95f3850SWill Newton } 2520f95f3850SWill Newton 252187a74d39SKyoungil Kim static void dw_mci_read_data_pio(struct dw_mci *host, bool dto) 2522f95f3850SWill Newton { 2523f9c2a0dcSSeungwon Jeon struct sg_mapping_iter *sg_miter = &host->sg_miter; 2524f9c2a0dcSSeungwon Jeon void *buf; 2525f9c2a0dcSSeungwon Jeon unsigned int offset; 2526f95f3850SWill Newton struct mmc_data *data = host->data; 2527f95f3850SWill Newton int shift = host->data_shift; 2528f95f3850SWill Newton u32 status; 25293e4b0d8bSMarkos Chandras unsigned int len; 2530f9c2a0dcSSeungwon Jeon unsigned int remain, fcnt; 2531f95f3850SWill Newton 2532f95f3850SWill Newton do { 2533f9c2a0dcSSeungwon Jeon if (!sg_miter_next(sg_miter)) 2534f9c2a0dcSSeungwon Jeon goto done; 2535f95f3850SWill Newton 25364225fc85SImre Deak host->sg = sg_miter->piter.sg; 2537f9c2a0dcSSeungwon Jeon buf = sg_miter->addr; 2538f9c2a0dcSSeungwon Jeon remain = sg_miter->length; 2539f9c2a0dcSSeungwon Jeon offset = 0; 2540f9c2a0dcSSeungwon Jeon 2541f9c2a0dcSSeungwon Jeon do { 2542f9c2a0dcSSeungwon Jeon fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) 2543f9c2a0dcSSeungwon Jeon << shift) + host->part_buf_count; 2544f9c2a0dcSSeungwon Jeon len = min(remain, fcnt); 2545f9c2a0dcSSeungwon Jeon if (!len) 2546f9c2a0dcSSeungwon Jeon break; 2547f9c2a0dcSSeungwon Jeon dw_mci_pull_data(host, (void *)(buf + offset), len); 25483e4b0d8bSMarkos Chandras data->bytes_xfered += len; 2549f95f3850SWill Newton offset += len; 2550f9c2a0dcSSeungwon Jeon remain -= len; 2551f9c2a0dcSSeungwon Jeon } while (remain); 2552f95f3850SWill Newton 2553e74f3a9cSSeungwon Jeon sg_miter->consumed = offset; 2554f95f3850SWill Newton status = mci_readl(host, MINTSTS); 2555f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 255687a74d39SKyoungil Kim /* if the RXDR is ready read again */ 255787a74d39SKyoungil Kim } while ((status & SDMMC_INT_RXDR) || 255887a74d39SKyoungil Kim (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS)))); 2559f9c2a0dcSSeungwon Jeon 2560f9c2a0dcSSeungwon Jeon if (!remain) { 2561f9c2a0dcSSeungwon Jeon if (!sg_miter_next(sg_miter)) 2562f9c2a0dcSSeungwon Jeon goto done; 2563f9c2a0dcSSeungwon Jeon sg_miter->consumed = 0; 2564f9c2a0dcSSeungwon Jeon } 2565f9c2a0dcSSeungwon Jeon sg_miter_stop(sg_miter); 2566f95f3850SWill Newton return; 2567f95f3850SWill Newton 2568f95f3850SWill Newton done: 2569f9c2a0dcSSeungwon Jeon sg_miter_stop(sg_miter); 2570f9c2a0dcSSeungwon Jeon host->sg = NULL; 25710e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2572f95f3850SWill Newton set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2573f95f3850SWill Newton } 2574f95f3850SWill Newton 2575f95f3850SWill Newton static void dw_mci_write_data_pio(struct dw_mci *host) 2576f95f3850SWill Newton { 2577f9c2a0dcSSeungwon Jeon struct sg_mapping_iter *sg_miter = &host->sg_miter; 2578f9c2a0dcSSeungwon Jeon void *buf; 2579f9c2a0dcSSeungwon Jeon unsigned int offset; 2580f95f3850SWill Newton struct mmc_data *data = host->data; 2581f95f3850SWill Newton int shift = host->data_shift; 2582f95f3850SWill Newton u32 status; 25833e4b0d8bSMarkos Chandras unsigned int len; 2584f9c2a0dcSSeungwon Jeon unsigned int fifo_depth = host->fifo_depth; 2585f9c2a0dcSSeungwon Jeon unsigned int remain, fcnt; 2586f95f3850SWill Newton 2587f95f3850SWill Newton do { 2588f9c2a0dcSSeungwon Jeon if (!sg_miter_next(sg_miter)) 2589f9c2a0dcSSeungwon Jeon goto done; 2590f95f3850SWill Newton 25914225fc85SImre Deak host->sg = sg_miter->piter.sg; 2592f9c2a0dcSSeungwon Jeon buf = sg_miter->addr; 2593f9c2a0dcSSeungwon Jeon remain = sg_miter->length; 2594f9c2a0dcSSeungwon Jeon offset = 0; 2595f9c2a0dcSSeungwon Jeon 2596f9c2a0dcSSeungwon Jeon do { 2597f9c2a0dcSSeungwon Jeon fcnt = ((fifo_depth - 2598f9c2a0dcSSeungwon Jeon SDMMC_GET_FCNT(mci_readl(host, STATUS))) 2599f9c2a0dcSSeungwon Jeon << shift) - host->part_buf_count; 2600f9c2a0dcSSeungwon Jeon len = min(remain, fcnt); 2601f9c2a0dcSSeungwon Jeon if (!len) 2602f9c2a0dcSSeungwon Jeon break; 2603f9c2a0dcSSeungwon Jeon host->push_data(host, (void *)(buf + offset), len); 26043e4b0d8bSMarkos Chandras data->bytes_xfered += len; 2605f95f3850SWill Newton offset += len; 2606f9c2a0dcSSeungwon Jeon remain -= len; 2607f9c2a0dcSSeungwon Jeon } while (remain); 2608f95f3850SWill Newton 2609e74f3a9cSSeungwon Jeon sg_miter->consumed = offset; 2610f95f3850SWill Newton status = mci_readl(host, MINTSTS); 2611f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2612f95f3850SWill Newton } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 2613f9c2a0dcSSeungwon Jeon 2614f9c2a0dcSSeungwon Jeon if (!remain) { 2615f9c2a0dcSSeungwon Jeon if (!sg_miter_next(sg_miter)) 2616f9c2a0dcSSeungwon Jeon goto done; 2617f9c2a0dcSSeungwon Jeon sg_miter->consumed = 0; 2618f9c2a0dcSSeungwon Jeon } 2619f9c2a0dcSSeungwon Jeon sg_miter_stop(sg_miter); 2620f95f3850SWill Newton return; 2621f95f3850SWill Newton 2622f95f3850SWill Newton done: 2623f9c2a0dcSSeungwon Jeon sg_miter_stop(sg_miter); 2624f9c2a0dcSSeungwon Jeon host->sg = NULL; 26250e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2626f95f3850SWill Newton set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2627f95f3850SWill Newton } 2628f95f3850SWill Newton 2629f95f3850SWill Newton static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) 2630f95f3850SWill Newton { 26310363b12dSDouglas Anderson del_timer(&host->cto_timer); 26320363b12dSDouglas Anderson 2633f95f3850SWill Newton if (!host->cmd_status) 2634f95f3850SWill Newton host->cmd_status = status; 2635f95f3850SWill Newton 26360e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2637f95f3850SWill Newton 2638f95f3850SWill Newton set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2639f95f3850SWill Newton tasklet_schedule(&host->tasklet); 2640f95f3850SWill Newton } 2641f95f3850SWill Newton 26426130e7a9SDoug Anderson static void dw_mci_handle_cd(struct dw_mci *host) 26436130e7a9SDoug Anderson { 2644b23475faSJaehoon Chung struct dw_mci_slot *slot = host->slot; 26456130e7a9SDoug Anderson 26466130e7a9SDoug Anderson if (slot->mmc->ops->card_event) 26476130e7a9SDoug Anderson slot->mmc->ops->card_event(slot->mmc); 26486130e7a9SDoug Anderson mmc_detect_change(slot->mmc, 26496130e7a9SDoug Anderson msecs_to_jiffies(host->pdata->detect_delay_ms)); 26506130e7a9SDoug Anderson } 26516130e7a9SDoug Anderson 2652f95f3850SWill Newton static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) 2653f95f3850SWill Newton { 2654f95f3850SWill Newton struct dw_mci *host = dev_id; 2655182c9081SSeungwon Jeon u32 pending; 2656b23475faSJaehoon Chung struct dw_mci_slot *slot = host->slot; 26578892b705SDouglas Anderson unsigned long irqflags; 2658f95f3850SWill Newton 2659f95f3850SWill Newton pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 2660f95f3850SWill Newton 2661476d79f1SDoug Anderson if (pending) { 266201730558SDoug Anderson /* Check volt switch first, since it can look like an error */ 266301730558SDoug Anderson if ((host->state == STATE_SENDING_CMD11) && 266401730558SDoug Anderson (pending & SDMMC_INT_VOLT_SWITCH)) { 266501730558SDoug Anderson mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH); 266601730558SDoug Anderson pending &= ~SDMMC_INT_VOLT_SWITCH; 266749ba0302SDoug Anderson 266849ba0302SDoug Anderson /* 266949ba0302SDoug Anderson * Hold the lock; we know cmd11_timer can't be kicked 267049ba0302SDoug Anderson * off after the lock is released, so safe to delete. 267149ba0302SDoug Anderson */ 267249ba0302SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 267301730558SDoug Anderson dw_mci_cmd_interrupt(host, pending); 267449ba0302SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 267549ba0302SDoug Anderson 267649ba0302SDoug Anderson del_timer(&host->cmd11_timer); 267701730558SDoug Anderson } 267801730558SDoug Anderson 2679f95f3850SWill Newton if (pending & DW_MCI_CMD_ERROR_FLAGS) { 26808892b705SDouglas Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 26818892b705SDouglas Anderson 268203de1921SAddy Ke del_timer(&host->cto_timer); 2683f95f3850SWill Newton mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 2684182c9081SSeungwon Jeon host->cmd_status = pending; 26850e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2686f95f3850SWill Newton set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 26878892b705SDouglas Anderson 26888892b705SDouglas Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 2689f95f3850SWill Newton } 2690f95f3850SWill Newton 2691f95f3850SWill Newton if (pending & DW_MCI_DATA_ERROR_FLAGS) { 2692f95f3850SWill Newton /* if there is an error report DATA_ERROR */ 2693f95f3850SWill Newton mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); 2694182c9081SSeungwon Jeon host->data_status = pending; 26950e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2696f95f3850SWill Newton set_bit(EVENT_DATA_ERROR, &host->pending_events); 2697f95f3850SWill Newton tasklet_schedule(&host->tasklet); 2698f95f3850SWill Newton } 2699f95f3850SWill Newton 2700f95f3850SWill Newton if (pending & SDMMC_INT_DATA_OVER) { 270193c23ae3SDouglas Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 270293c23ae3SDouglas Anderson 270357e10486SAddy Ke del_timer(&host->dto_timer); 270457e10486SAddy Ke 2705f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 2706f95f3850SWill Newton if (!host->data_status) 2707182c9081SSeungwon Jeon host->data_status = pending; 27080e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2709f95f3850SWill Newton if (host->dir_status == DW_MCI_RECV_STATUS) { 2710f95f3850SWill Newton if (host->sg != NULL) 271187a74d39SKyoungil Kim dw_mci_read_data_pio(host, true); 2712f95f3850SWill Newton } 2713f95f3850SWill Newton set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2714f95f3850SWill Newton tasklet_schedule(&host->tasklet); 271593c23ae3SDouglas Anderson 271693c23ae3SDouglas Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 2717f95f3850SWill Newton } 2718f95f3850SWill Newton 2719f95f3850SWill Newton if (pending & SDMMC_INT_RXDR) { 2720f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 2721b40af3aaSJames Hogan if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) 272287a74d39SKyoungil Kim dw_mci_read_data_pio(host, false); 2723f95f3850SWill Newton } 2724f95f3850SWill Newton 2725f95f3850SWill Newton if (pending & SDMMC_INT_TXDR) { 2726f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2727b40af3aaSJames Hogan if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) 2728f95f3850SWill Newton dw_mci_write_data_pio(host); 2729f95f3850SWill Newton } 2730f95f3850SWill Newton 2731f95f3850SWill Newton if (pending & SDMMC_INT_CMD_DONE) { 27328892b705SDouglas Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 27338892b705SDouglas Anderson 2734f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); 2735182c9081SSeungwon Jeon dw_mci_cmd_interrupt(host, pending); 27368892b705SDouglas Anderson 27378892b705SDouglas Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 2738f95f3850SWill Newton } 2739f95f3850SWill Newton 2740f95f3850SWill Newton if (pending & SDMMC_INT_CD) { 2741f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_CD); 27426130e7a9SDoug Anderson dw_mci_handle_cd(host); 2743f95f3850SWill Newton } 2744f95f3850SWill Newton 274576756234SAddy Ke if (pending & SDMMC_INT_SDIO(slot->sdio_id)) { 274676756234SAddy Ke mci_writel(host, RINTSTS, 274776756234SAddy Ke SDMMC_INT_SDIO(slot->sdio_id)); 274832dba737SUlf Hansson __dw_mci_enable_sdio_irq(slot, 0); 274932dba737SUlf Hansson sdio_signal_irq(slot->mmc); 27501a5c8e1fSShashidhar Hiremath } 27511a5c8e1fSShashidhar Hiremath 27521fb5f68aSMarkos Chandras } 2753f95f3850SWill Newton 27543fc7eaefSShawn Lin if (host->use_dma != TRANS_MODE_IDMAC) 27553fc7eaefSShawn Lin return IRQ_HANDLED; 27563fc7eaefSShawn Lin 27573fc7eaefSShawn Lin /* Handle IDMA interrupts */ 275869d99fdcSPrabu Thangamuthu if (host->dma_64bit_address == 1) { 275969d99fdcSPrabu Thangamuthu pending = mci_readl(host, IDSTS64); 276069d99fdcSPrabu Thangamuthu if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 276169d99fdcSPrabu Thangamuthu mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI | 276269d99fdcSPrabu Thangamuthu SDMMC_IDMAC_INT_RI); 276369d99fdcSPrabu Thangamuthu mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI); 2764faecf411SShawn Lin if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) 27653fc7eaefSShawn Lin host->dma_ops->complete((void *)host); 276669d99fdcSPrabu Thangamuthu } 276769d99fdcSPrabu Thangamuthu } else { 2768f95f3850SWill Newton pending = mci_readl(host, IDSTS); 2769f95f3850SWill Newton if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 277069d99fdcSPrabu Thangamuthu mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | 277169d99fdcSPrabu Thangamuthu SDMMC_IDMAC_INT_RI); 2772f95f3850SWill Newton mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 2773faecf411SShawn Lin if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) 27743fc7eaefSShawn Lin host->dma_ops->complete((void *)host); 2775f95f3850SWill Newton } 277669d99fdcSPrabu Thangamuthu } 2777f95f3850SWill Newton 2778f95f3850SWill Newton return IRQ_HANDLED; 2779f95f3850SWill Newton } 2780f95f3850SWill Newton 2781a4faa492SShawn Lin static int dw_mci_init_slot_caps(struct dw_mci_slot *slot) 2782a4faa492SShawn Lin { 2783a4faa492SShawn Lin struct dw_mci *host = slot->host; 2784a4faa492SShawn Lin const struct dw_mci_drv_data *drv_data = host->drv_data; 2785a4faa492SShawn Lin struct mmc_host *mmc = slot->mmc; 2786a4faa492SShawn Lin int ctrl_id; 2787a4faa492SShawn Lin 2788a4faa492SShawn Lin if (host->pdata->caps) 2789a4faa492SShawn Lin mmc->caps = host->pdata->caps; 2790a4faa492SShawn Lin 2791a4faa492SShawn Lin /* 2792a4faa492SShawn Lin * Support MMC_CAP_ERASE by default. 2793a4faa492SShawn Lin * It needs to use trim/discard/erase commands. 2794a4faa492SShawn Lin */ 2795a4faa492SShawn Lin mmc->caps |= MMC_CAP_ERASE; 2796a4faa492SShawn Lin 2797a4faa492SShawn Lin if (host->pdata->pm_caps) 2798a4faa492SShawn Lin mmc->pm_caps = host->pdata->pm_caps; 2799a4faa492SShawn Lin 2800a4faa492SShawn Lin if (host->dev->of_node) { 2801a4faa492SShawn Lin ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); 2802a4faa492SShawn Lin if (ctrl_id < 0) 2803a4faa492SShawn Lin ctrl_id = 0; 2804a4faa492SShawn Lin } else { 2805a4faa492SShawn Lin ctrl_id = to_platform_device(host->dev)->id; 2806a4faa492SShawn Lin } 2807*0d84b9e5SShawn Lin 2808*0d84b9e5SShawn Lin if (drv_data && drv_data->caps) { 2809*0d84b9e5SShawn Lin if (ctrl_id >= drv_data->num_caps) { 2810*0d84b9e5SShawn Lin dev_err(host->dev, "invalid controller id %d\n", 2811*0d84b9e5SShawn Lin ctrl_id); 2812*0d84b9e5SShawn Lin return -EINVAL; 2813*0d84b9e5SShawn Lin } 2814a4faa492SShawn Lin mmc->caps |= drv_data->caps[ctrl_id]; 2815*0d84b9e5SShawn Lin } 2816a4faa492SShawn Lin 2817a4faa492SShawn Lin if (host->pdata->caps2) 2818a4faa492SShawn Lin mmc->caps2 = host->pdata->caps2; 2819a4faa492SShawn Lin 2820a4faa492SShawn Lin /* Process SDIO IRQs through the sdio_irq_work. */ 2821a4faa492SShawn Lin if (mmc->caps & MMC_CAP_SDIO_IRQ) 2822a4faa492SShawn Lin mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 2823a4faa492SShawn Lin 2824a4faa492SShawn Lin return 0; 2825a4faa492SShawn Lin } 2826a4faa492SShawn Lin 2827e4a65ef7SJaehoon Chung static int dw_mci_init_slot(struct dw_mci *host) 2828f95f3850SWill Newton { 2829f95f3850SWill Newton struct mmc_host *mmc; 2830f95f3850SWill Newton struct dw_mci_slot *slot; 2831a4faa492SShawn Lin int ret; 28321f44a2a5SSeungwon Jeon u32 freq[2]; 2833f95f3850SWill Newton 28344a90920cSThomas Abraham mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); 2835f95f3850SWill Newton if (!mmc) 2836f95f3850SWill Newton return -ENOMEM; 2837f95f3850SWill Newton 2838f95f3850SWill Newton slot = mmc_priv(mmc); 2839e4a65ef7SJaehoon Chung slot->id = 0; 2840e4a65ef7SJaehoon Chung slot->sdio_id = host->sdio_id0 + slot->id; 2841f95f3850SWill Newton slot->mmc = mmc; 2842f95f3850SWill Newton slot->host = host; 2843b23475faSJaehoon Chung host->slot = slot; 2844f95f3850SWill Newton 2845f95f3850SWill Newton mmc->ops = &dw_mci_ops; 2846852ff5feSDavid Woods if (device_property_read_u32_array(host->dev, "clock-freq-min-max", 2847852ff5feSDavid Woods freq, 2)) { 28481f44a2a5SSeungwon Jeon mmc->f_min = DW_MCI_FREQ_MIN; 28491f44a2a5SSeungwon Jeon mmc->f_max = DW_MCI_FREQ_MAX; 28501f44a2a5SSeungwon Jeon } else { 2851b023030fSJaehoon Chung dev_info(host->dev, 2852b023030fSJaehoon Chung "'clock-freq-min-max' property was deprecated.\n"); 28531f44a2a5SSeungwon Jeon mmc->f_min = freq[0]; 28541f44a2a5SSeungwon Jeon mmc->f_max = freq[1]; 28551f44a2a5SSeungwon Jeon } 2856f95f3850SWill Newton 285751da2240SYuvaraj CD /*if there are external regulators, get them*/ 285851da2240SYuvaraj CD ret = mmc_regulator_get_supply(mmc); 28590f3a47b8SWolfram Sang if (ret) 28603cf890fcSDoug Anderson goto err_host_allocated; 286151da2240SYuvaraj CD 286251da2240SYuvaraj CD if (!mmc->ocr_avail) 2863f95f3850SWill Newton mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 2864f95f3850SWill Newton 28653cf890fcSDoug Anderson ret = mmc_of_parse(mmc); 28663cf890fcSDoug Anderson if (ret) 28673cf890fcSDoug Anderson goto err_host_allocated; 2868f95f3850SWill Newton 2869a4faa492SShawn Lin ret = dw_mci_init_slot_caps(slot); 2870a4faa492SShawn Lin if (ret) 2871a4faa492SShawn Lin goto err_host_allocated; 287232dba737SUlf Hansson 2873f95f3850SWill Newton /* Useful defaults if platform data is unset. */ 28743fc7eaefSShawn Lin if (host->use_dma == TRANS_MODE_IDMAC) { 2875a39e5746SJaehoon Chung mmc->max_segs = host->ring_size; 2876225faf87SJaehoon Chung mmc->max_blk_size = 65535; 2877575c319dSHeiko Stuebner mmc->max_seg_size = 0x1000; 28781a25b1b4SSeungwon Jeon mmc->max_req_size = mmc->max_seg_size * host->ring_size; 28791a25b1b4SSeungwon Jeon mmc->max_blk_count = mmc->max_req_size / 512; 28803fc7eaefSShawn Lin } else if (host->use_dma == TRANS_MODE_EDMAC) { 28813fc7eaefSShawn Lin mmc->max_segs = 64; 2882225faf87SJaehoon Chung mmc->max_blk_size = 65535; 28833fc7eaefSShawn Lin mmc->max_blk_count = 65535; 28843fc7eaefSShawn Lin mmc->max_req_size = 28853fc7eaefSShawn Lin mmc->max_blk_size * mmc->max_blk_count; 28863fc7eaefSShawn Lin mmc->max_seg_size = mmc->max_req_size; 2887575c319dSHeiko Stuebner } else { 28883fc7eaefSShawn Lin /* TRANS_MODE_PIO */ 2889f95f3850SWill Newton mmc->max_segs = 64; 2890225faf87SJaehoon Chung mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */ 2891f95f3850SWill Newton mmc->max_blk_count = 512; 2892575c319dSHeiko Stuebner mmc->max_req_size = mmc->max_blk_size * 2893575c319dSHeiko Stuebner mmc->max_blk_count; 2894f95f3850SWill Newton mmc->max_seg_size = mmc->max_req_size; 2895575c319dSHeiko Stuebner } 2896f95f3850SWill Newton 2897c0834a58SShawn Lin dw_mci_get_cd(mmc); 2898ae0eb348SJaehoon Chung 28990cea529dSJaehoon Chung ret = mmc_add_host(mmc); 29000cea529dSJaehoon Chung if (ret) 29013cf890fcSDoug Anderson goto err_host_allocated; 2902f95f3850SWill Newton 2903f95f3850SWill Newton #if defined(CONFIG_DEBUG_FS) 2904f95f3850SWill Newton dw_mci_init_debugfs(slot); 2905f95f3850SWill Newton #endif 2906f95f3850SWill Newton 2907f95f3850SWill Newton return 0; 2908800d78bfSThomas Abraham 29093cf890fcSDoug Anderson err_host_allocated: 2910800d78bfSThomas Abraham mmc_free_host(mmc); 291151da2240SYuvaraj CD return ret; 2912f95f3850SWill Newton } 2913f95f3850SWill Newton 2914e4a65ef7SJaehoon Chung static void dw_mci_cleanup_slot(struct dw_mci_slot *slot) 2915f95f3850SWill Newton { 2916f95f3850SWill Newton /* Debugfs stuff is cleaned up by mmc core */ 2917f95f3850SWill Newton mmc_remove_host(slot->mmc); 2918b23475faSJaehoon Chung slot->host->slot = NULL; 2919f95f3850SWill Newton mmc_free_host(slot->mmc); 2920f95f3850SWill Newton } 2921f95f3850SWill Newton 2922f95f3850SWill Newton static void dw_mci_init_dma(struct dw_mci *host) 2923f95f3850SWill Newton { 292469d99fdcSPrabu Thangamuthu int addr_config; 29253fc7eaefSShawn Lin struct device *dev = host->dev; 29263fc7eaefSShawn Lin 29273fc7eaefSShawn Lin /* 29283fc7eaefSShawn Lin * Check tansfer mode from HCON[17:16] 29293fc7eaefSShawn Lin * Clear the ambiguous description of dw_mmc databook: 29303fc7eaefSShawn Lin * 2b'00: No DMA Interface -> Actually means using Internal DMA block 29313fc7eaefSShawn Lin * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block 29323fc7eaefSShawn Lin * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block 29333fc7eaefSShawn Lin * 2b'11: Non DW DMA Interface -> pio only 29343fc7eaefSShawn Lin * Compared to DesignWare DMA Interface, Generic DMA Interface has a 29353fc7eaefSShawn Lin * simpler request/acknowledge handshake mechanism and both of them 29363fc7eaefSShawn Lin * are regarded as external dma master for dw_mmc. 29373fc7eaefSShawn Lin */ 29383fc7eaefSShawn Lin host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON)); 29393fc7eaefSShawn Lin if (host->use_dma == DMA_INTERFACE_IDMA) { 29403fc7eaefSShawn Lin host->use_dma = TRANS_MODE_IDMAC; 29413fc7eaefSShawn Lin } else if (host->use_dma == DMA_INTERFACE_DWDMA || 29423fc7eaefSShawn Lin host->use_dma == DMA_INTERFACE_GDMA) { 29433fc7eaefSShawn Lin host->use_dma = TRANS_MODE_EDMAC; 29443fc7eaefSShawn Lin } else { 29453fc7eaefSShawn Lin goto no_dma; 29463fc7eaefSShawn Lin } 29473fc7eaefSShawn Lin 29483fc7eaefSShawn Lin /* Determine which DMA interface to use */ 29493fc7eaefSShawn Lin if (host->use_dma == TRANS_MODE_IDMAC) { 29503fc7eaefSShawn Lin /* 29513fc7eaefSShawn Lin * Check ADDR_CONFIG bit in HCON to find 29523fc7eaefSShawn Lin * IDMAC address bus width 29533fc7eaefSShawn Lin */ 295470692752SShawn Lin addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON)); 295569d99fdcSPrabu Thangamuthu 295669d99fdcSPrabu Thangamuthu if (addr_config == 1) { 295769d99fdcSPrabu Thangamuthu /* host supports IDMAC in 64-bit address mode */ 295869d99fdcSPrabu Thangamuthu host->dma_64bit_address = 1; 29593fc7eaefSShawn Lin dev_info(host->dev, 29603fc7eaefSShawn Lin "IDMAC supports 64-bit address mode.\n"); 296169d99fdcSPrabu Thangamuthu if (!dma_set_mask(host->dev, DMA_BIT_MASK(64))) 29623fc7eaefSShawn Lin dma_set_coherent_mask(host->dev, 29633fc7eaefSShawn Lin DMA_BIT_MASK(64)); 296469d99fdcSPrabu Thangamuthu } else { 296569d99fdcSPrabu Thangamuthu /* host supports IDMAC in 32-bit address mode */ 296669d99fdcSPrabu Thangamuthu host->dma_64bit_address = 0; 29673fc7eaefSShawn Lin dev_info(host->dev, 29683fc7eaefSShawn Lin "IDMAC supports 32-bit address mode.\n"); 296969d99fdcSPrabu Thangamuthu } 297069d99fdcSPrabu Thangamuthu 2971f95f3850SWill Newton /* Alloc memory for sg translation */ 2972cc190d4cSShawn Lin host->sg_cpu = dmam_alloc_coherent(host->dev, 2973cc190d4cSShawn Lin DESC_RING_BUF_SZ, 2974f95f3850SWill Newton &host->sg_dma, GFP_KERNEL); 2975f95f3850SWill Newton if (!host->sg_cpu) { 29763fc7eaefSShawn Lin dev_err(host->dev, 29773fc7eaefSShawn Lin "%s: could not alloc DMA memory\n", 2978f95f3850SWill Newton __func__); 2979f95f3850SWill Newton goto no_dma; 2980f95f3850SWill Newton } 2981f95f3850SWill Newton 2982f95f3850SWill Newton host->dma_ops = &dw_mci_idmac_ops; 298300956ea3SSeungwon Jeon dev_info(host->dev, "Using internal DMA controller.\n"); 29843fc7eaefSShawn Lin } else { 29853fc7eaefSShawn Lin /* TRANS_MODE_EDMAC: check dma bindings again */ 2986852ff5feSDavid Woods if ((device_property_read_string_array(dev, "dma-names", 2987852ff5feSDavid Woods NULL, 0) < 0) || 2988852ff5feSDavid Woods !device_property_present(dev, "dmas")) { 2989f95f3850SWill Newton goto no_dma; 29903fc7eaefSShawn Lin } 29913fc7eaefSShawn Lin host->dma_ops = &dw_mci_edmac_ops; 29923fc7eaefSShawn Lin dev_info(host->dev, "Using external DMA controller.\n"); 29933fc7eaefSShawn Lin } 2994f95f3850SWill Newton 2995e1631f98SJaehoon Chung if (host->dma_ops->init && host->dma_ops->start && 2996e1631f98SJaehoon Chung host->dma_ops->stop && host->dma_ops->cleanup) { 2997f95f3850SWill Newton if (host->dma_ops->init(host)) { 29980e3a22c0SShawn Lin dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n", 29990e3a22c0SShawn Lin __func__); 3000f95f3850SWill Newton goto no_dma; 3001f95f3850SWill Newton } 3002f95f3850SWill Newton } else { 30034a90920cSThomas Abraham dev_err(host->dev, "DMA initialization not found.\n"); 3004f95f3850SWill Newton goto no_dma; 3005f95f3850SWill Newton } 3006f95f3850SWill Newton 3007f95f3850SWill Newton return; 3008f95f3850SWill Newton 3009f95f3850SWill Newton no_dma: 30104a90920cSThomas Abraham dev_info(host->dev, "Using PIO mode.\n"); 30113fc7eaefSShawn Lin host->use_dma = TRANS_MODE_PIO; 3012f95f3850SWill Newton } 3013f95f3850SWill Newton 301437977729SKees Cook static void dw_mci_cmd11_timer(struct timer_list *t) 30155c935165SDoug Anderson { 301637977729SKees Cook struct dw_mci *host = from_timer(host, t, cmd11_timer); 30175c935165SDoug Anderson 3018fd674198SDoug Anderson if (host->state != STATE_SENDING_CMD11) { 3019fd674198SDoug Anderson dev_warn(host->dev, "Unexpected CMD11 timeout\n"); 3020fd674198SDoug Anderson return; 3021fd674198SDoug Anderson } 30225c935165SDoug Anderson 30235c935165SDoug Anderson host->cmd_status = SDMMC_INT_RTO; 30245c935165SDoug Anderson set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 30255c935165SDoug Anderson tasklet_schedule(&host->tasklet); 30265c935165SDoug Anderson } 30275c935165SDoug Anderson 302837977729SKees Cook static void dw_mci_cto_timer(struct timer_list *t) 302903de1921SAddy Ke { 303037977729SKees Cook struct dw_mci *host = from_timer(host, t, cto_timer); 30318892b705SDouglas Anderson unsigned long irqflags; 30328892b705SDouglas Anderson u32 pending; 303303de1921SAddy Ke 30348892b705SDouglas Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 30358892b705SDouglas Anderson 30368892b705SDouglas Anderson /* 30378892b705SDouglas Anderson * If somehow we have very bad interrupt latency it's remotely possible 30388892b705SDouglas Anderson * that the timer could fire while the interrupt is still pending or 30398892b705SDouglas Anderson * while the interrupt is midway through running. Let's be paranoid 30408892b705SDouglas Anderson * and detect those two cases. Note that this is paranoia is somewhat 30418892b705SDouglas Anderson * justified because in this function we don't actually cancel the 30428892b705SDouglas Anderson * pending command in the controller--we just assume it will never come. 30438892b705SDouglas Anderson */ 30448892b705SDouglas Anderson pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 30458892b705SDouglas Anderson if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) { 30468892b705SDouglas Anderson /* The interrupt should fire; no need to act but we can warn */ 30478892b705SDouglas Anderson dev_warn(host->dev, "Unexpected interrupt latency\n"); 30488892b705SDouglas Anderson goto exit; 30498892b705SDouglas Anderson } 30508892b705SDouglas Anderson if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) { 30518892b705SDouglas Anderson /* Presumably interrupt handler couldn't delete the timer */ 30528892b705SDouglas Anderson dev_warn(host->dev, "CTO timeout when already completed\n"); 30538892b705SDouglas Anderson goto exit; 30548892b705SDouglas Anderson } 30558892b705SDouglas Anderson 30568892b705SDouglas Anderson /* 30578892b705SDouglas Anderson * Continued paranoia to make sure we're in the state we expect. 30588892b705SDouglas Anderson * This paranoia isn't really justified but it seems good to be safe. 30598892b705SDouglas Anderson */ 306003de1921SAddy Ke switch (host->state) { 306103de1921SAddy Ke case STATE_SENDING_CMD11: 306203de1921SAddy Ke case STATE_SENDING_CMD: 306303de1921SAddy Ke case STATE_SENDING_STOP: 306403de1921SAddy Ke /* 306503de1921SAddy Ke * If CMD_DONE interrupt does NOT come in sending command 306603de1921SAddy Ke * state, we should notify the driver to terminate current 306703de1921SAddy Ke * transfer and report a command timeout to the core. 306803de1921SAddy Ke */ 306903de1921SAddy Ke host->cmd_status = SDMMC_INT_RTO; 307003de1921SAddy Ke set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 307103de1921SAddy Ke tasklet_schedule(&host->tasklet); 307203de1921SAddy Ke break; 307303de1921SAddy Ke default: 307403de1921SAddy Ke dev_warn(host->dev, "Unexpected command timeout, state %d\n", 307503de1921SAddy Ke host->state); 307603de1921SAddy Ke break; 307703de1921SAddy Ke } 30788892b705SDouglas Anderson 30798892b705SDouglas Anderson exit: 30808892b705SDouglas Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 308103de1921SAddy Ke } 308203de1921SAddy Ke 308337977729SKees Cook static void dw_mci_dto_timer(struct timer_list *t) 308457e10486SAddy Ke { 308537977729SKees Cook struct dw_mci *host = from_timer(host, t, dto_timer); 308693c23ae3SDouglas Anderson unsigned long irqflags; 308793c23ae3SDouglas Anderson u32 pending; 308857e10486SAddy Ke 308993c23ae3SDouglas Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 309093c23ae3SDouglas Anderson 309193c23ae3SDouglas Anderson /* 309293c23ae3SDouglas Anderson * The DTO timer is much longer than the CTO timer, so it's even less 309393c23ae3SDouglas Anderson * likely that we'll these cases, but it pays to be paranoid. 309493c23ae3SDouglas Anderson */ 309593c23ae3SDouglas Anderson pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 309693c23ae3SDouglas Anderson if (pending & SDMMC_INT_DATA_OVER) { 309793c23ae3SDouglas Anderson /* The interrupt should fire; no need to act but we can warn */ 309893c23ae3SDouglas Anderson dev_warn(host->dev, "Unexpected data interrupt latency\n"); 309993c23ae3SDouglas Anderson goto exit; 310093c23ae3SDouglas Anderson } 310193c23ae3SDouglas Anderson if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) { 310293c23ae3SDouglas Anderson /* Presumably interrupt handler couldn't delete the timer */ 310393c23ae3SDouglas Anderson dev_warn(host->dev, "DTO timeout when already completed\n"); 310493c23ae3SDouglas Anderson goto exit; 310593c23ae3SDouglas Anderson } 310693c23ae3SDouglas Anderson 310793c23ae3SDouglas Anderson /* 310893c23ae3SDouglas Anderson * Continued paranoia to make sure we're in the state we expect. 310993c23ae3SDouglas Anderson * This paranoia isn't really justified but it seems good to be safe. 311093c23ae3SDouglas Anderson */ 311157e10486SAddy Ke switch (host->state) { 311257e10486SAddy Ke case STATE_SENDING_DATA: 311357e10486SAddy Ke case STATE_DATA_BUSY: 311457e10486SAddy Ke /* 311557e10486SAddy Ke * If DTO interrupt does NOT come in sending data state, 311657e10486SAddy Ke * we should notify the driver to terminate current transfer 311757e10486SAddy Ke * and report a data timeout to the core. 311857e10486SAddy Ke */ 311957e10486SAddy Ke host->data_status = SDMMC_INT_DRTO; 312057e10486SAddy Ke set_bit(EVENT_DATA_ERROR, &host->pending_events); 312157e10486SAddy Ke set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 312257e10486SAddy Ke tasklet_schedule(&host->tasklet); 312357e10486SAddy Ke break; 312457e10486SAddy Ke default: 312593c23ae3SDouglas Anderson dev_warn(host->dev, "Unexpected data timeout, state %d\n", 312693c23ae3SDouglas Anderson host->state); 312757e10486SAddy Ke break; 312857e10486SAddy Ke } 312993c23ae3SDouglas Anderson 313093c23ae3SDouglas Anderson exit: 313193c23ae3SDouglas Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 313257e10486SAddy Ke } 313357e10486SAddy Ke 3134c91eab4bSThomas Abraham #ifdef CONFIG_OF 3135c91eab4bSThomas Abraham static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 3136c91eab4bSThomas Abraham { 3137c91eab4bSThomas Abraham struct dw_mci_board *pdata; 3138c91eab4bSThomas Abraham struct device *dev = host->dev; 3139e95baf13SArnd Bergmann const struct dw_mci_drv_data *drv_data = host->drv_data; 3140e8cc37b8SShawn Lin int ret; 31413c6d89eaSDoug Anderson u32 clock_frequency; 3142c91eab4bSThomas Abraham 3143c91eab4bSThomas Abraham pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 3144bf3707eaSBeomho Seo if (!pdata) 3145c91eab4bSThomas Abraham return ERR_PTR(-ENOMEM); 3146c91eab4bSThomas Abraham 3147d6786fefSGuodong Xu /* find reset controller when exist */ 3148a93d6f31SPhilipp Zabel pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset"); 3149d6786fefSGuodong Xu if (IS_ERR(pdata->rstc)) { 3150d6786fefSGuodong Xu if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER) 3151d6786fefSGuodong Xu return ERR_PTR(-EPROBE_DEFER); 3152d6786fefSGuodong Xu } 3153d6786fefSGuodong Xu 3154c91eab4bSThomas Abraham /* find out number of slots supported */ 315516f5df8bSShawn Lin if (!device_property_read_u32(dev, "num-slots", &pdata->num_slots)) 3156d30a8f7bSJaehoon Chung dev_info(dev, "'num-slots' was deprecated.\n"); 3157c91eab4bSThomas Abraham 3158852ff5feSDavid Woods if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth)) 31590e3a22c0SShawn Lin dev_info(dev, 31600e3a22c0SShawn Lin "fifo-depth property not found, using value of FIFOTH register as default\n"); 3161c91eab4bSThomas Abraham 3162852ff5feSDavid Woods device_property_read_u32(dev, "card-detect-delay", 3163852ff5feSDavid Woods &pdata->detect_delay_ms); 3164c91eab4bSThomas Abraham 3165852ff5feSDavid Woods device_property_read_u32(dev, "data-addr", &host->data_addr_override); 3166a0361c1aSJun Nie 3167852ff5feSDavid Woods if (device_property_present(dev, "fifo-watermark-aligned")) 3168d6fced83SJun Nie host->wm_aligned = true; 3169d6fced83SJun Nie 3170852ff5feSDavid Woods if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency)) 31713c6d89eaSDoug Anderson pdata->bus_hz = clock_frequency; 31723c6d89eaSDoug Anderson 3173cb27a843SJames Hogan if (drv_data && drv_data->parse_dt) { 3174cb27a843SJames Hogan ret = drv_data->parse_dt(host); 3175800d78bfSThomas Abraham if (ret) 3176800d78bfSThomas Abraham return ERR_PTR(ret); 3177800d78bfSThomas Abraham } 3178800d78bfSThomas Abraham 3179c91eab4bSThomas Abraham return pdata; 3180c91eab4bSThomas Abraham } 3181c91eab4bSThomas Abraham 3182c91eab4bSThomas Abraham #else /* CONFIG_OF */ 3183c91eab4bSThomas Abraham static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 3184c91eab4bSThomas Abraham { 3185c91eab4bSThomas Abraham return ERR_PTR(-EINVAL); 3186c91eab4bSThomas Abraham } 3187c91eab4bSThomas Abraham #endif /* CONFIG_OF */ 3188c91eab4bSThomas Abraham 3189fa0c3283SDoug Anderson static void dw_mci_enable_cd(struct dw_mci *host) 3190fa0c3283SDoug Anderson { 3191fa0c3283SDoug Anderson unsigned long irqflags; 3192fa0c3283SDoug Anderson u32 temp; 3193fa0c3283SDoug Anderson 3194e8cc37b8SShawn Lin /* 3195e8cc37b8SShawn Lin * No need for CD if all slots have a non-error GPIO 3196e8cc37b8SShawn Lin * as well as broken card detection is found. 3197e8cc37b8SShawn Lin */ 3198e47c0b96SJaehoon Chung if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL) 3199e8cc37b8SShawn Lin return; 3200fa0c3283SDoug Anderson 3201e47c0b96SJaehoon Chung if (mmc_gpio_get_cd(host->slot->mmc) < 0) { 3202fa0c3283SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 3203fa0c3283SDoug Anderson temp = mci_readl(host, INTMASK); 3204fa0c3283SDoug Anderson temp |= SDMMC_INT_CD; 3205fa0c3283SDoug Anderson mci_writel(host, INTMASK, temp); 3206fa0c3283SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 3207fa0c3283SDoug Anderson } 320858870241SJaehoon Chung } 3209fa0c3283SDoug Anderson 321062ca8034SShashidhar Hiremath int dw_mci_probe(struct dw_mci *host) 3211f95f3850SWill Newton { 3212e95baf13SArnd Bergmann const struct dw_mci_drv_data *drv_data = host->drv_data; 321362ca8034SShashidhar Hiremath int width, i, ret = 0; 3214f95f3850SWill Newton u32 fifo_size; 3215f95f3850SWill Newton 3216c91eab4bSThomas Abraham if (!host->pdata) { 3217c91eab4bSThomas Abraham host->pdata = dw_mci_parse_dt(host); 3218d6786fefSGuodong Xu if (PTR_ERR(host->pdata) == -EPROBE_DEFER) { 3219d6786fefSGuodong Xu return -EPROBE_DEFER; 3220d6786fefSGuodong Xu } else if (IS_ERR(host->pdata)) { 3221c91eab4bSThomas Abraham dev_err(host->dev, "platform data not available\n"); 3222c91eab4bSThomas Abraham return -EINVAL; 3223c91eab4bSThomas Abraham } 3224f95f3850SWill Newton } 3225f95f3850SWill Newton 3226780f22afSSeungwon Jeon host->biu_clk = devm_clk_get(host->dev, "biu"); 3227f90a0612SThomas Abraham if (IS_ERR(host->biu_clk)) { 3228f90a0612SThomas Abraham dev_dbg(host->dev, "biu clock not available\n"); 3229f90a0612SThomas Abraham } else { 3230f90a0612SThomas Abraham ret = clk_prepare_enable(host->biu_clk); 3231f90a0612SThomas Abraham if (ret) { 3232f90a0612SThomas Abraham dev_err(host->dev, "failed to enable biu clock\n"); 3233f90a0612SThomas Abraham return ret; 3234f90a0612SThomas Abraham } 3235f95f3850SWill Newton } 3236f95f3850SWill Newton 3237780f22afSSeungwon Jeon host->ciu_clk = devm_clk_get(host->dev, "ciu"); 3238f90a0612SThomas Abraham if (IS_ERR(host->ciu_clk)) { 3239f90a0612SThomas Abraham dev_dbg(host->dev, "ciu clock not available\n"); 32403c6d89eaSDoug Anderson host->bus_hz = host->pdata->bus_hz; 3241f90a0612SThomas Abraham } else { 3242f90a0612SThomas Abraham ret = clk_prepare_enable(host->ciu_clk); 3243f90a0612SThomas Abraham if (ret) { 3244f90a0612SThomas Abraham dev_err(host->dev, "failed to enable ciu clock\n"); 3245f90a0612SThomas Abraham goto err_clk_biu; 3246f90a0612SThomas Abraham } 3247f90a0612SThomas Abraham 32483c6d89eaSDoug Anderson if (host->pdata->bus_hz) { 32493c6d89eaSDoug Anderson ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz); 32503c6d89eaSDoug Anderson if (ret) 32513c6d89eaSDoug Anderson dev_warn(host->dev, 3252612de4c1SJaehoon Chung "Unable to set bus rate to %uHz\n", 32533c6d89eaSDoug Anderson host->pdata->bus_hz); 32543c6d89eaSDoug Anderson } 3255f90a0612SThomas Abraham host->bus_hz = clk_get_rate(host->ciu_clk); 32563c6d89eaSDoug Anderson } 3257f90a0612SThomas Abraham 3258612de4c1SJaehoon Chung if (!host->bus_hz) { 3259612de4c1SJaehoon Chung dev_err(host->dev, 3260612de4c1SJaehoon Chung "Platform data must supply bus speed\n"); 3261612de4c1SJaehoon Chung ret = -ENODEV; 3262612de4c1SJaehoon Chung goto err_clk_ciu; 3263612de4c1SJaehoon Chung } 3264612de4c1SJaehoon Chung 3265941e372dSliwei if (!IS_ERR(host->pdata->rstc)) { 3266941e372dSliwei reset_control_assert(host->pdata->rstc); 3267941e372dSliwei usleep_range(10, 50); 3268941e372dSliwei reset_control_deassert(host->pdata->rstc); 3269941e372dSliwei } 3270941e372dSliwei 3271002f0d5cSYuvaraj Kumar C D if (drv_data && drv_data->init) { 3272002f0d5cSYuvaraj Kumar C D ret = drv_data->init(host); 3273002f0d5cSYuvaraj Kumar C D if (ret) { 3274002f0d5cSYuvaraj Kumar C D dev_err(host->dev, 3275002f0d5cSYuvaraj Kumar C D "implementation specific init failed\n"); 3276002f0d5cSYuvaraj Kumar C D goto err_clk_ciu; 3277002f0d5cSYuvaraj Kumar C D } 3278002f0d5cSYuvaraj Kumar C D } 3279002f0d5cSYuvaraj Kumar C D 328037977729SKees Cook timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0); 328137977729SKees Cook timer_setup(&host->cto_timer, dw_mci_cto_timer, 0); 328237977729SKees Cook timer_setup(&host->dto_timer, dw_mci_dto_timer, 0); 328357e10486SAddy Ke 3284f95f3850SWill Newton spin_lock_init(&host->lock); 3285f8c58c11SDoug Anderson spin_lock_init(&host->irq_lock); 3286f95f3850SWill Newton INIT_LIST_HEAD(&host->queue); 3287f95f3850SWill Newton 3288f95f3850SWill Newton /* 3289f95f3850SWill Newton * Get the host data width - this assumes that HCON has been set with 3290f95f3850SWill Newton * the correct values. 3291f95f3850SWill Newton */ 329270692752SShawn Lin i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON)); 3293f95f3850SWill Newton if (!i) { 3294f95f3850SWill Newton host->push_data = dw_mci_push_data16; 3295f95f3850SWill Newton host->pull_data = dw_mci_pull_data16; 3296f95f3850SWill Newton width = 16; 3297f95f3850SWill Newton host->data_shift = 1; 3298f95f3850SWill Newton } else if (i == 2) { 3299f95f3850SWill Newton host->push_data = dw_mci_push_data64; 3300f95f3850SWill Newton host->pull_data = dw_mci_pull_data64; 3301f95f3850SWill Newton width = 64; 3302f95f3850SWill Newton host->data_shift = 3; 3303f95f3850SWill Newton } else { 3304f95f3850SWill Newton /* Check for a reserved value, and warn if it is */ 3305f95f3850SWill Newton WARN((i != 1), 3306f95f3850SWill Newton "HCON reports a reserved host data width!\n" 3307f95f3850SWill Newton "Defaulting to 32-bit access.\n"); 3308f95f3850SWill Newton host->push_data = dw_mci_push_data32; 3309f95f3850SWill Newton host->pull_data = dw_mci_pull_data32; 3310f95f3850SWill Newton width = 32; 3311f95f3850SWill Newton host->data_shift = 2; 3312f95f3850SWill Newton } 3313f95f3850SWill Newton 3314f95f3850SWill Newton /* Reset all blocks */ 33153744415cSShawn Lin if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 33163744415cSShawn Lin ret = -ENODEV; 33173744415cSShawn Lin goto err_clk_ciu; 33183744415cSShawn Lin } 3319141a712aSSeungwon Jeon 3320141a712aSSeungwon Jeon host->dma_ops = host->pdata->dma_ops; 3321141a712aSSeungwon Jeon dw_mci_init_dma(host); 3322f95f3850SWill Newton 3323f95f3850SWill Newton /* Clear the interrupts for the host controller */ 3324f95f3850SWill Newton mci_writel(host, RINTSTS, 0xFFFFFFFF); 3325f95f3850SWill Newton mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3326f95f3850SWill Newton 3327f95f3850SWill Newton /* Put in max timeout */ 3328f95f3850SWill Newton mci_writel(host, TMOUT, 0xFFFFFFFF); 3329f95f3850SWill Newton 3330f95f3850SWill Newton /* 3331f95f3850SWill Newton * FIFO threshold settings RxMark = fifo_size / 2 - 1, 3332f95f3850SWill Newton * Tx Mark = fifo_size / 2 DMA Size = 8 3333f95f3850SWill Newton */ 3334b86d8253SJames Hogan if (!host->pdata->fifo_depth) { 3335b86d8253SJames Hogan /* 3336b86d8253SJames Hogan * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may 3337b86d8253SJames Hogan * have been overwritten by the bootloader, just like we're 3338b86d8253SJames Hogan * about to do, so if you know the value for your hardware, you 3339b86d8253SJames Hogan * should put it in the platform data. 3340b86d8253SJames Hogan */ 3341f95f3850SWill Newton fifo_size = mci_readl(host, FIFOTH); 33428234e869SJaehoon Chung fifo_size = 1 + ((fifo_size >> 16) & 0xfff); 3343b86d8253SJames Hogan } else { 3344b86d8253SJames Hogan fifo_size = host->pdata->fifo_depth; 3345b86d8253SJames Hogan } 3346b86d8253SJames Hogan host->fifo_depth = fifo_size; 334752426899SSeungwon Jeon host->fifoth_val = 334852426899SSeungwon Jeon SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2); 3349e61cf118SJaehoon Chung mci_writel(host, FIFOTH, host->fifoth_val); 3350f95f3850SWill Newton 3351f95f3850SWill Newton /* disable clock to CIU */ 3352f95f3850SWill Newton mci_writel(host, CLKENA, 0); 3353f95f3850SWill Newton mci_writel(host, CLKSRC, 0); 3354f95f3850SWill Newton 335563008768SJames Hogan /* 335663008768SJames Hogan * In 2.40a spec, Data offset is changed. 335763008768SJames Hogan * Need to check the version-id and set data-offset for DATA register. 335863008768SJames Hogan */ 335963008768SJames Hogan host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); 336063008768SJames Hogan dev_info(host->dev, "Version ID is %04x\n", host->verid); 336163008768SJames Hogan 3362a0361c1aSJun Nie if (host->data_addr_override) 3363a0361c1aSJun Nie host->fifo_reg = host->regs + host->data_addr_override; 3364a0361c1aSJun Nie else if (host->verid < DW_MMC_240A) 336576184ac1SBen Dooks host->fifo_reg = host->regs + DATA_OFFSET; 336663008768SJames Hogan else 336776184ac1SBen Dooks host->fifo_reg = host->regs + DATA_240A_OFFSET; 336863008768SJames Hogan 3369f95f3850SWill Newton tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); 3370780f22afSSeungwon Jeon ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt, 3371780f22afSSeungwon Jeon host->irq_flags, "dw-mci", host); 3372f95f3850SWill Newton if (ret) 33736130e7a9SDoug Anderson goto err_dmaunmap; 3374f95f3850SWill Newton 3375d30a8f7bSJaehoon Chung /* 3376fa0c3283SDoug Anderson * Enable interrupts for command done, data over, data empty, 33772da1d7f2SYuvaraj CD * receive ready and error such as transmit, receive timeout, crc error 33782da1d7f2SYuvaraj CD */ 33792da1d7f2SYuvaraj CD mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 33802da1d7f2SYuvaraj CD SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3381fa0c3283SDoug Anderson DW_MCI_ERROR_FLAGS); 33820e3a22c0SShawn Lin /* Enable mci interrupt */ 33830e3a22c0SShawn Lin mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 33842da1d7f2SYuvaraj CD 33850e3a22c0SShawn Lin dev_info(host->dev, 33860e3a22c0SShawn Lin "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n", 33872da1d7f2SYuvaraj CD host->irq, width, fifo_size); 33882da1d7f2SYuvaraj CD 3389f95f3850SWill Newton /* We need at least one slot to succeed */ 3390e4a65ef7SJaehoon Chung ret = dw_mci_init_slot(host); 339158870241SJaehoon Chung if (ret) { 33921c2215b7SThomas Abraham dev_dbg(host->dev, "slot %d init failed\n", i); 33936130e7a9SDoug Anderson goto err_dmaunmap; 3394f95f3850SWill Newton } 3395f95f3850SWill Newton 3396b793f658SDoug Anderson /* Now that slots are all setup, we can enable card detect */ 3397b793f658SDoug Anderson dw_mci_enable_cd(host); 3398b793f658SDoug Anderson 3399f95f3850SWill Newton return 0; 3400f95f3850SWill Newton 3401f95f3850SWill Newton err_dmaunmap: 3402f95f3850SWill Newton if (host->use_dma && host->dma_ops->exit) 3403f95f3850SWill Newton host->dma_ops->exit(host); 3404f90a0612SThomas Abraham 3405d6786fefSGuodong Xu if (!IS_ERR(host->pdata->rstc)) 3406d6786fefSGuodong Xu reset_control_assert(host->pdata->rstc); 3407d6786fefSGuodong Xu 3408f90a0612SThomas Abraham err_clk_ciu: 3409f90a0612SThomas Abraham clk_disable_unprepare(host->ciu_clk); 3410780f22afSSeungwon Jeon 3411f90a0612SThomas Abraham err_clk_biu: 3412f90a0612SThomas Abraham clk_disable_unprepare(host->biu_clk); 3413780f22afSSeungwon Jeon 3414f95f3850SWill Newton return ret; 3415f95f3850SWill Newton } 341662ca8034SShashidhar Hiremath EXPORT_SYMBOL(dw_mci_probe); 3417f95f3850SWill Newton 341862ca8034SShashidhar Hiremath void dw_mci_remove(struct dw_mci *host) 3419f95f3850SWill Newton { 3420e4a65ef7SJaehoon Chung dev_dbg(host->dev, "remove slot\n"); 3421b23475faSJaehoon Chung if (host->slot) 3422e4a65ef7SJaehoon Chung dw_mci_cleanup_slot(host->slot); 3423f95f3850SWill Newton 3424048fd7e6SPrabu Thangamuthu mci_writel(host, RINTSTS, 0xFFFFFFFF); 3425048fd7e6SPrabu Thangamuthu mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3426048fd7e6SPrabu Thangamuthu 3427f95f3850SWill Newton /* disable clock to CIU */ 3428f95f3850SWill Newton mci_writel(host, CLKENA, 0); 3429f95f3850SWill Newton mci_writel(host, CLKSRC, 0); 3430f95f3850SWill Newton 3431f95f3850SWill Newton if (host->use_dma && host->dma_ops->exit) 3432f95f3850SWill Newton host->dma_ops->exit(host); 3433f95f3850SWill Newton 3434d6786fefSGuodong Xu if (!IS_ERR(host->pdata->rstc)) 3435d6786fefSGuodong Xu reset_control_assert(host->pdata->rstc); 3436d6786fefSGuodong Xu 3437f90a0612SThomas Abraham clk_disable_unprepare(host->ciu_clk); 3438f90a0612SThomas Abraham clk_disable_unprepare(host->biu_clk); 3439f95f3850SWill Newton } 344062ca8034SShashidhar Hiremath EXPORT_SYMBOL(dw_mci_remove); 344162ca8034SShashidhar Hiremath 344262ca8034SShashidhar Hiremath 3443f95f3850SWill Newton 3444e9ed8835SShawn Lin #ifdef CONFIG_PM 3445ed24e1ffSShawn Lin int dw_mci_runtime_suspend(struct device *dev) 3446f95f3850SWill Newton { 3447ed24e1ffSShawn Lin struct dw_mci *host = dev_get_drvdata(dev); 3448ed24e1ffSShawn Lin 34493fc7eaefSShawn Lin if (host->use_dma && host->dma_ops->exit) 34503fc7eaefSShawn Lin host->dma_ops->exit(host); 34513fc7eaefSShawn Lin 3452ed24e1ffSShawn Lin clk_disable_unprepare(host->ciu_clk); 3453ed24e1ffSShawn Lin 345442f989c0SJaehoon Chung if (host->slot && 345542f989c0SJaehoon Chung (mmc_can_gpio_cd(host->slot->mmc) || 345642f989c0SJaehoon Chung !mmc_card_is_removable(host->slot->mmc))) 3457ed24e1ffSShawn Lin clk_disable_unprepare(host->biu_clk); 3458ed24e1ffSShawn Lin 3459f95f3850SWill Newton return 0; 3460f95f3850SWill Newton } 3461ed24e1ffSShawn Lin EXPORT_SYMBOL(dw_mci_runtime_suspend); 3462f95f3850SWill Newton 3463ed24e1ffSShawn Lin int dw_mci_runtime_resume(struct device *dev) 3464f95f3850SWill Newton { 3465b23475faSJaehoon Chung int ret = 0; 3466ed24e1ffSShawn Lin struct dw_mci *host = dev_get_drvdata(dev); 3467f95f3850SWill Newton 346842f989c0SJaehoon Chung if (host->slot && 346942f989c0SJaehoon Chung (mmc_can_gpio_cd(host->slot->mmc) || 347042f989c0SJaehoon Chung !mmc_card_is_removable(host->slot->mmc))) { 3471ed24e1ffSShawn Lin ret = clk_prepare_enable(host->biu_clk); 3472ed24e1ffSShawn Lin if (ret) 3473e61cf118SJaehoon Chung return ret; 3474e61cf118SJaehoon Chung } 3475e61cf118SJaehoon Chung 3476ed24e1ffSShawn Lin ret = clk_prepare_enable(host->ciu_clk); 3477ed24e1ffSShawn Lin if (ret) 3478df9bcc2bSJoonyoung Shim goto err; 3479df9bcc2bSJoonyoung Shim 3480df9bcc2bSJoonyoung Shim if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 3481df9bcc2bSJoonyoung Shim clk_disable_unprepare(host->ciu_clk); 3482df9bcc2bSJoonyoung Shim ret = -ENODEV; 3483df9bcc2bSJoonyoung Shim goto err; 3484df9bcc2bSJoonyoung Shim } 3485ed24e1ffSShawn Lin 34863bfe619dSJonathan Kliegman if (host->use_dma && host->dma_ops->init) 3487141a712aSSeungwon Jeon host->dma_ops->init(host); 3488141a712aSSeungwon Jeon 348952426899SSeungwon Jeon /* 349052426899SSeungwon Jeon * Restore the initial value at FIFOTH register 349152426899SSeungwon Jeon * And Invalidate the prev_blksz with zero 349252426899SSeungwon Jeon */ 3493e61cf118SJaehoon Chung mci_writel(host, FIFOTH, host->fifoth_val); 349452426899SSeungwon Jeon host->prev_blksz = 0; 3495e61cf118SJaehoon Chung 34962eb2944fSDoug Anderson /* Put in max timeout */ 34972eb2944fSDoug Anderson mci_writel(host, TMOUT, 0xFFFFFFFF); 34982eb2944fSDoug Anderson 3499e61cf118SJaehoon Chung mci_writel(host, RINTSTS, 0xFFFFFFFF); 3500e61cf118SJaehoon Chung mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 3501e61cf118SJaehoon Chung SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3502fa0c3283SDoug Anderson DW_MCI_ERROR_FLAGS); 3503e61cf118SJaehoon Chung mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 3504e61cf118SJaehoon Chung 35050e3a22c0SShawn Lin 3506e47c0b96SJaehoon Chung if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER) 3507e47c0b96SJaehoon Chung dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios); 3508e9748e03SZiyuan Xu 3509e9748e03SZiyuan Xu /* Force setup bus to guarantee available clock output */ 3510e47c0b96SJaehoon Chung dw_mci_setup_bus(host->slot, true); 3511fa0c3283SDoug Anderson 3512fa0c3283SDoug Anderson /* Now that slots are all setup, we can enable card detect */ 3513fa0c3283SDoug Anderson dw_mci_enable_cd(host); 3514fa0c3283SDoug Anderson 3515df9bcc2bSJoonyoung Shim return 0; 3516df9bcc2bSJoonyoung Shim 3517df9bcc2bSJoonyoung Shim err: 351842f989c0SJaehoon Chung if (host->slot && 351942f989c0SJaehoon Chung (mmc_can_gpio_cd(host->slot->mmc) || 352042f989c0SJaehoon Chung !mmc_card_is_removable(host->slot->mmc))) 3521df9bcc2bSJoonyoung Shim clk_disable_unprepare(host->biu_clk); 3522df9bcc2bSJoonyoung Shim 35231f5c51d7SShawn Lin return ret; 35241f5c51d7SShawn Lin } 3525e9ed8835SShawn Lin EXPORT_SYMBOL(dw_mci_runtime_resume); 3526e9ed8835SShawn Lin #endif /* CONFIG_PM */ 35276fe8890dSJaehoon Chung 3528f95f3850SWill Newton static int __init dw_mci_init(void) 3529f95f3850SWill Newton { 35308e1c4e4dSSachin Kamat pr_info("Synopsys Designware Multimedia Card Interface Driver\n"); 353162ca8034SShashidhar Hiremath return 0; 3532f95f3850SWill Newton } 3533f95f3850SWill Newton 3534f95f3850SWill Newton static void __exit dw_mci_exit(void) 3535f95f3850SWill Newton { 3536f95f3850SWill Newton } 3537f95f3850SWill Newton 3538f95f3850SWill Newton module_init(dw_mci_init); 3539f95f3850SWill Newton module_exit(dw_mci_exit); 3540f95f3850SWill Newton 3541f95f3850SWill Newton MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); 3542f95f3850SWill Newton MODULE_AUTHOR("NXP Semiconductor VietNam"); 3543f95f3850SWill Newton MODULE_AUTHOR("Imagination Technologies Ltd"); 3544f95f3850SWill Newton MODULE_LICENSE("GPL v2"); 3545