12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2f95f3850SWill Newton /* 3f95f3850SWill Newton * Synopsys DesignWare Multimedia Card Interface driver 4f95f3850SWill Newton * (Based on NXP driver for lpc 31xx) 5f95f3850SWill Newton * 6f95f3850SWill Newton * Copyright (C) 2009 NXP Semiconductors 7f95f3850SWill Newton * Copyright (C) 2009, 2010 Imagination Technologies Ltd. 8f95f3850SWill Newton */ 9f95f3850SWill Newton 10f95f3850SWill Newton #include <linux/blkdev.h> 11f95f3850SWill Newton #include <linux/clk.h> 12f95f3850SWill Newton #include <linux/debugfs.h> 13f95f3850SWill Newton #include <linux/device.h> 14f95f3850SWill Newton #include <linux/dma-mapping.h> 15f95f3850SWill Newton #include <linux/err.h> 16f95f3850SWill Newton #include <linux/init.h> 17f95f3850SWill Newton #include <linux/interrupt.h> 18b6d2d81cSShawn Lin #include <linux/iopoll.h> 19f95f3850SWill Newton #include <linux/ioport.h> 202b8ac062SVincent Whitchurch #include <linux/ktime.h> 21f95f3850SWill Newton #include <linux/module.h> 22f95f3850SWill Newton #include <linux/platform_device.h> 23a6db2c86SDouglas Anderson #include <linux/pm_runtime.h> 242b8ac062SVincent Whitchurch #include <linux/prandom.h> 25f95f3850SWill Newton #include <linux/seq_file.h> 26f95f3850SWill Newton #include <linux/slab.h> 27f95f3850SWill Newton #include <linux/stat.h> 28f95f3850SWill Newton #include <linux/delay.h> 29f95f3850SWill Newton #include <linux/irq.h> 30b24c8b26SDoug Anderson #include <linux/mmc/card.h> 31f95f3850SWill Newton #include <linux/mmc/host.h> 32f95f3850SWill Newton #include <linux/mmc/mmc.h> 3301730558SDoug Anderson #include <linux/mmc/sd.h> 3490c2143aSSeungwon Jeon #include <linux/mmc/sdio.h> 35f95f3850SWill Newton #include <linux/bitops.h> 36c07946a3SJaehoon Chung #include <linux/regulator/consumer.h> 37c91eab4bSThomas Abraham #include <linux/of.h> 3855a6ceb2SDoug Anderson #include <linux/of_gpio.h> 39bf626e55SZhangfei Gao #include <linux/mmc/slot-gpio.h> 40f95f3850SWill Newton 41f95f3850SWill Newton #include "dw_mmc.h" 42f95f3850SWill Newton 43f95f3850SWill Newton /* Common flag combinations */ 443f7eec62SJaehoon Chung #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \ 45f95f3850SWill Newton SDMMC_INT_HTO | SDMMC_INT_SBE | \ 467a3c5677SDoug Anderson SDMMC_INT_EBE | SDMMC_INT_HLE) 47f95f3850SWill Newton #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ 487a3c5677SDoug Anderson SDMMC_INT_RESP_ERR | SDMMC_INT_HLE) 49f95f3850SWill Newton #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \ 507a3c5677SDoug Anderson DW_MCI_CMD_ERROR_FLAGS) 51f95f3850SWill Newton #define DW_MCI_SEND_STATUS 1 52f95f3850SWill Newton #define DW_MCI_RECV_STATUS 2 53f95f3850SWill Newton #define DW_MCI_DMA_THRESHOLD 16 54f95f3850SWill Newton 551f44a2a5SSeungwon Jeon #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */ 5672e83577SJaehoon Chung #define DW_MCI_FREQ_MIN 100000 /* unit: HZ */ 571f44a2a5SSeungwon Jeon 58fc79a4d6SJoonyoung Shim #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ 59fc79a4d6SJoonyoung Shim SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ 60fc79a4d6SJoonyoung Shim SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ 61fc79a4d6SJoonyoung Shim SDMMC_IDMAC_INT_TI) 62fc79a4d6SJoonyoung Shim 63cc190d4cSShawn Lin #define DESC_RING_BUF_SZ PAGE_SIZE 64cc190d4cSShawn Lin 6569d99fdcSPrabu Thangamuthu struct idmac_desc_64addr { 6669d99fdcSPrabu Thangamuthu u32 des0; /* Control Descriptor */ 67b6d2d81cSShawn Lin #define IDMAC_OWN_CLR64(x) \ 68b6d2d81cSShawn Lin !((x) & cpu_to_le32(IDMAC_DES0_OWN)) 6969d99fdcSPrabu Thangamuthu 7069d99fdcSPrabu Thangamuthu u32 des1; /* Reserved */ 7169d99fdcSPrabu Thangamuthu 7269d99fdcSPrabu Thangamuthu u32 des2; /*Buffer sizes */ 7369d99fdcSPrabu Thangamuthu #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \ 746687c42fSBen Dooks ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \ 756687c42fSBen Dooks ((cpu_to_le32(s)) & cpu_to_le32(0x1fff))) 7669d99fdcSPrabu Thangamuthu 7769d99fdcSPrabu Thangamuthu u32 des3; /* Reserved */ 7869d99fdcSPrabu Thangamuthu 7969d99fdcSPrabu Thangamuthu u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/ 8069d99fdcSPrabu Thangamuthu u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/ 8169d99fdcSPrabu Thangamuthu 8269d99fdcSPrabu Thangamuthu u32 des6; /* Lower 32-bits of Next Descriptor Address */ 8369d99fdcSPrabu Thangamuthu u32 des7; /* Upper 32-bits of Next Descriptor Address */ 8469d99fdcSPrabu Thangamuthu }; 8569d99fdcSPrabu Thangamuthu 86f95f3850SWill Newton struct idmac_desc { 876687c42fSBen Dooks __le32 des0; /* Control Descriptor */ 88f95f3850SWill Newton #define IDMAC_DES0_DIC BIT(1) 89f95f3850SWill Newton #define IDMAC_DES0_LD BIT(2) 90f95f3850SWill Newton #define IDMAC_DES0_FD BIT(3) 91f95f3850SWill Newton #define IDMAC_DES0_CH BIT(4) 92f95f3850SWill Newton #define IDMAC_DES0_ER BIT(5) 93f95f3850SWill Newton #define IDMAC_DES0_CES BIT(30) 94f95f3850SWill Newton #define IDMAC_DES0_OWN BIT(31) 95f95f3850SWill Newton 966687c42fSBen Dooks __le32 des1; /* Buffer sizes */ 97f95f3850SWill Newton #define IDMAC_SET_BUFFER1_SIZE(d, s) \ 98e5306c3aSBen Dooks ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff))) 99f95f3850SWill Newton 1006687c42fSBen Dooks __le32 des2; /* buffer 1 physical address */ 101f95f3850SWill Newton 1026687c42fSBen Dooks __le32 des3; /* buffer 2 physical address */ 103f95f3850SWill Newton }; 1045959b32eSAlexey Brodkin 1055959b32eSAlexey Brodkin /* Each descriptor can transfer up to 4KB of data in chained mode */ 1065959b32eSAlexey Brodkin #define DW_MCI_DESC_DATA_LENGTH 0x1000 107f95f3850SWill Newton 108f95f3850SWill Newton #if defined(CONFIG_DEBUG_FS) 109f95f3850SWill Newton static int dw_mci_req_show(struct seq_file *s, void *v) 110f95f3850SWill Newton { 111f95f3850SWill Newton struct dw_mci_slot *slot = s->private; 112f95f3850SWill Newton struct mmc_request *mrq; 113f95f3850SWill Newton struct mmc_command *cmd; 114f95f3850SWill Newton struct mmc_command *stop; 115f95f3850SWill Newton struct mmc_data *data; 116f95f3850SWill Newton 117f95f3850SWill Newton /* Make sure we get a consistent snapshot */ 118f95f3850SWill Newton spin_lock_bh(&slot->host->lock); 119f95f3850SWill Newton mrq = slot->mrq; 120f95f3850SWill Newton 121f95f3850SWill Newton if (mrq) { 122f95f3850SWill Newton cmd = mrq->cmd; 123f95f3850SWill Newton data = mrq->data; 124f95f3850SWill Newton stop = mrq->stop; 125f95f3850SWill Newton 126f95f3850SWill Newton if (cmd) 127f95f3850SWill Newton seq_printf(s, 128f95f3850SWill Newton "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 129f95f3850SWill Newton cmd->opcode, cmd->arg, cmd->flags, 130f95f3850SWill Newton cmd->resp[0], cmd->resp[1], cmd->resp[2], 131f95f3850SWill Newton cmd->resp[2], cmd->error); 132f95f3850SWill Newton if (data) 133f95f3850SWill Newton seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", 134f95f3850SWill Newton data->bytes_xfered, data->blocks, 135f95f3850SWill Newton data->blksz, data->flags, data->error); 136f95f3850SWill Newton if (stop) 137f95f3850SWill Newton seq_printf(s, 138f95f3850SWill Newton "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 139f95f3850SWill Newton stop->opcode, stop->arg, stop->flags, 140f95f3850SWill Newton stop->resp[0], stop->resp[1], stop->resp[2], 141f95f3850SWill Newton stop->resp[2], stop->error); 142f95f3850SWill Newton } 143f95f3850SWill Newton 144f95f3850SWill Newton spin_unlock_bh(&slot->host->lock); 145f95f3850SWill Newton 146f95f3850SWill Newton return 0; 147f95f3850SWill Newton } 14864c1412bSShawn Lin DEFINE_SHOW_ATTRIBUTE(dw_mci_req); 149f95f3850SWill Newton 150f95f3850SWill Newton static int dw_mci_regs_show(struct seq_file *s, void *v) 151f95f3850SWill Newton { 15221657ebdSJaehoon Chung struct dw_mci *host = s->private; 15321657ebdSJaehoon Chung 1545b43df8bSShawn Lin pm_runtime_get_sync(host->dev); 1555b43df8bSShawn Lin 15621657ebdSJaehoon Chung seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS)); 15721657ebdSJaehoon Chung seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS)); 15821657ebdSJaehoon Chung seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD)); 15921657ebdSJaehoon Chung seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL)); 16021657ebdSJaehoon Chung seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK)); 16121657ebdSJaehoon Chung seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA)); 162f95f3850SWill Newton 1635b43df8bSShawn Lin pm_runtime_put_autosuspend(host->dev); 1645b43df8bSShawn Lin 165f95f3850SWill Newton return 0; 166f95f3850SWill Newton } 16764c1412bSShawn Lin DEFINE_SHOW_ATTRIBUTE(dw_mci_regs); 168f95f3850SWill Newton 169f95f3850SWill Newton static void dw_mci_init_debugfs(struct dw_mci_slot *slot) 170f95f3850SWill Newton { 171f95f3850SWill Newton struct mmc_host *mmc = slot->mmc; 172f95f3850SWill Newton struct dw_mci *host = slot->host; 173f95f3850SWill Newton struct dentry *root; 174f95f3850SWill Newton 175f95f3850SWill Newton root = mmc->debugfs_root; 176f95f3850SWill Newton if (!root) 177f95f3850SWill Newton return; 178f95f3850SWill Newton 179fcac1527SGreg Kroah-Hartman debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops); 180fcac1527SGreg Kroah-Hartman debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops); 181118e1118SGeert Uytterhoeven debugfs_create_u32("state", S_IRUSR, root, &host->state); 1820c40c1beSGeert Uytterhoeven debugfs_create_xul("pending_events", S_IRUSR, root, 1830c40c1beSGeert Uytterhoeven &host->pending_events); 1840c40c1beSGeert Uytterhoeven debugfs_create_xul("completed_events", S_IRUSR, root, 1850c40c1beSGeert Uytterhoeven &host->completed_events); 1862b8ac062SVincent Whitchurch #ifdef CONFIG_FAULT_INJECTION 1872b8ac062SVincent Whitchurch fault_create_debugfs_attr("fail_data_crc", root, &host->fail_data_crc); 1882b8ac062SVincent Whitchurch #endif 189f95f3850SWill Newton } 190f95f3850SWill Newton #endif /* defined(CONFIG_DEBUG_FS) */ 191f95f3850SWill Newton 1928e6db1f6SShawn Lin static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) 1938e6db1f6SShawn Lin { 1948e6db1f6SShawn Lin u32 ctrl; 1958e6db1f6SShawn Lin 1968e6db1f6SShawn Lin ctrl = mci_readl(host, CTRL); 1978e6db1f6SShawn Lin ctrl |= reset; 1988e6db1f6SShawn Lin mci_writel(host, CTRL, ctrl); 1998e6db1f6SShawn Lin 2008e6db1f6SShawn Lin /* wait till resets clear */ 2018e6db1f6SShawn Lin if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl, 2028e6db1f6SShawn Lin !(ctrl & reset), 2038e6db1f6SShawn Lin 1, 500 * USEC_PER_MSEC)) { 2048e6db1f6SShawn Lin dev_err(host->dev, 2058e6db1f6SShawn Lin "Timeout resetting block (ctrl reset %#x)\n", 2068e6db1f6SShawn Lin ctrl & reset); 2078e6db1f6SShawn Lin return false; 2088e6db1f6SShawn Lin } 2098e6db1f6SShawn Lin 2108e6db1f6SShawn Lin return true; 2118e6db1f6SShawn Lin } 21201730558SDoug Anderson 2134dba18deSShawn Lin static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags) 2144dba18deSShawn Lin { 2154dba18deSShawn Lin u32 status; 2164dba18deSShawn Lin 2174dba18deSShawn Lin /* 2184dba18deSShawn Lin * Databook says that before issuing a new data transfer command 2194dba18deSShawn Lin * we need to check to see if the card is busy. Data transfer commands 2204dba18deSShawn Lin * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that. 2214dba18deSShawn Lin * 2224dba18deSShawn Lin * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is 2234dba18deSShawn Lin * expected. 2244dba18deSShawn Lin */ 2254dba18deSShawn Lin if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) && 2264dba18deSShawn Lin !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) { 2274dba18deSShawn Lin if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS, 2284dba18deSShawn Lin status, 2294dba18deSShawn Lin !(status & SDMMC_STATUS_BUSY), 2304dba18deSShawn Lin 10, 500 * USEC_PER_MSEC)) 2314dba18deSShawn Lin dev_err(host->dev, "Busy; trying anyway\n"); 2324dba18deSShawn Lin } 2334dba18deSShawn Lin } 2344dba18deSShawn Lin 2354dba18deSShawn Lin static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) 2364dba18deSShawn Lin { 2374dba18deSShawn Lin struct dw_mci *host = slot->host; 2384dba18deSShawn Lin unsigned int cmd_status = 0; 2394dba18deSShawn Lin 2404dba18deSShawn Lin mci_writel(host, CMDARG, arg); 2414dba18deSShawn Lin wmb(); /* drain writebuffer */ 2424dba18deSShawn Lin dw_mci_wait_while_busy(host, cmd); 2434dba18deSShawn Lin mci_writel(host, CMD, SDMMC_CMD_START | cmd); 2444dba18deSShawn Lin 2454dba18deSShawn Lin if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status, 2464dba18deSShawn Lin !(cmd_status & SDMMC_CMD_START), 2474dba18deSShawn Lin 1, 500 * USEC_PER_MSEC)) 2484dba18deSShawn Lin dev_err(&slot->mmc->class_dev, 2494dba18deSShawn Lin "Timeout sending command (cmd %#x arg %#x status %#x)\n", 2504dba18deSShawn Lin cmd, arg, cmd_status); 2514dba18deSShawn Lin } 2524dba18deSShawn Lin 253f95f3850SWill Newton static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) 254f95f3850SWill Newton { 255800d78bfSThomas Abraham struct dw_mci_slot *slot = mmc_priv(mmc); 25601730558SDoug Anderson struct dw_mci *host = slot->host; 257f95f3850SWill Newton u32 cmdr; 258f95f3850SWill Newton 2590e3a22c0SShawn Lin cmd->error = -EINPROGRESS; 260f95f3850SWill Newton cmdr = cmd->opcode; 261f95f3850SWill Newton 26290c2143aSSeungwon Jeon if (cmd->opcode == MMC_STOP_TRANSMISSION || 26390c2143aSSeungwon Jeon cmd->opcode == MMC_GO_IDLE_STATE || 26490c2143aSSeungwon Jeon cmd->opcode == MMC_GO_INACTIVE_STATE || 26590c2143aSSeungwon Jeon (cmd->opcode == SD_IO_RW_DIRECT && 26690c2143aSSeungwon Jeon ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) 267f95f3850SWill Newton cmdr |= SDMMC_CMD_STOP; 2684a1b27adSJaehoon Chung else if (cmd->opcode != MMC_SEND_STATUS && cmd->data) 269f95f3850SWill Newton cmdr |= SDMMC_CMD_PRV_DAT_WAIT; 270f95f3850SWill Newton 27101730558SDoug Anderson if (cmd->opcode == SD_SWITCH_VOLTAGE) { 27201730558SDoug Anderson u32 clk_en_a; 27301730558SDoug Anderson 27401730558SDoug Anderson /* Special bit makes CMD11 not die */ 27501730558SDoug Anderson cmdr |= SDMMC_CMD_VOLT_SWITCH; 27601730558SDoug Anderson 27701730558SDoug Anderson /* Change state to continue to handle CMD11 weirdness */ 27801730558SDoug Anderson WARN_ON(slot->host->state != STATE_SENDING_CMD); 27901730558SDoug Anderson slot->host->state = STATE_SENDING_CMD11; 28001730558SDoug Anderson 28101730558SDoug Anderson /* 28201730558SDoug Anderson * We need to disable low power mode (automatic clock stop) 28301730558SDoug Anderson * while doing voltage switch so we don't confuse the card, 28401730558SDoug Anderson * since stopping the clock is a specific part of the UHS 28501730558SDoug Anderson * voltage change dance. 28601730558SDoug Anderson * 28701730558SDoug Anderson * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be 28801730558SDoug Anderson * unconditionally turned back on in dw_mci_setup_bus() if it's 28901730558SDoug Anderson * ever called with a non-zero clock. That shouldn't happen 29001730558SDoug Anderson * until the voltage change is all done. 29101730558SDoug Anderson */ 29201730558SDoug Anderson clk_en_a = mci_readl(host, CLKENA); 29301730558SDoug Anderson clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id); 29401730558SDoug Anderson mci_writel(host, CLKENA, clk_en_a); 29501730558SDoug Anderson mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 29601730558SDoug Anderson SDMMC_CMD_PRV_DAT_WAIT, 0); 29701730558SDoug Anderson } 29801730558SDoug Anderson 299f95f3850SWill Newton if (cmd->flags & MMC_RSP_PRESENT) { 300f95f3850SWill Newton /* We expect a response, so set this bit */ 301f95f3850SWill Newton cmdr |= SDMMC_CMD_RESP_EXP; 302f95f3850SWill Newton if (cmd->flags & MMC_RSP_136) 303f95f3850SWill Newton cmdr |= SDMMC_CMD_RESP_LONG; 304f95f3850SWill Newton } 305f95f3850SWill Newton 306f95f3850SWill Newton if (cmd->flags & MMC_RSP_CRC) 307f95f3850SWill Newton cmdr |= SDMMC_CMD_RESP_CRC; 308f95f3850SWill Newton 3090349c085SJaehoon Chung if (cmd->data) { 310f95f3850SWill Newton cmdr |= SDMMC_CMD_DAT_EXP; 3110349c085SJaehoon Chung if (cmd->data->flags & MMC_DATA_WRITE) 312f95f3850SWill Newton cmdr |= SDMMC_CMD_DAT_WR; 313f95f3850SWill Newton } 314f95f3850SWill Newton 315aaaaeb7aSJaehoon Chung if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags)) 316aaaaeb7aSJaehoon Chung cmdr |= SDMMC_CMD_USE_HOLD_REG; 317800d78bfSThomas Abraham 318f95f3850SWill Newton return cmdr; 319f95f3850SWill Newton } 320f95f3850SWill Newton 32190c2143aSSeungwon Jeon static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) 32290c2143aSSeungwon Jeon { 32390c2143aSSeungwon Jeon struct mmc_command *stop; 32490c2143aSSeungwon Jeon u32 cmdr; 32590c2143aSSeungwon Jeon 32690c2143aSSeungwon Jeon if (!cmd->data) 32790c2143aSSeungwon Jeon return 0; 32890c2143aSSeungwon Jeon 32990c2143aSSeungwon Jeon stop = &host->stop_abort; 33090c2143aSSeungwon Jeon cmdr = cmd->opcode; 33190c2143aSSeungwon Jeon memset(stop, 0, sizeof(struct mmc_command)); 33290c2143aSSeungwon Jeon 33390c2143aSSeungwon Jeon if (cmdr == MMC_READ_SINGLE_BLOCK || 33490c2143aSSeungwon Jeon cmdr == MMC_READ_MULTIPLE_BLOCK || 33590c2143aSSeungwon Jeon cmdr == MMC_WRITE_BLOCK || 3366c2c6506SUlf Hansson cmdr == MMC_WRITE_MULTIPLE_BLOCK || 3376c2c6506SUlf Hansson cmdr == MMC_SEND_TUNING_BLOCK || 3389f0d3cc2SMårten Lindahl cmdr == MMC_SEND_TUNING_BLOCK_HS200 || 3399f0d3cc2SMårten Lindahl cmdr == MMC_GEN_CMD) { 34090c2143aSSeungwon Jeon stop->opcode = MMC_STOP_TRANSMISSION; 34190c2143aSSeungwon Jeon stop->arg = 0; 34290c2143aSSeungwon Jeon stop->flags = MMC_RSP_R1B | MMC_CMD_AC; 34390c2143aSSeungwon Jeon } else if (cmdr == SD_IO_RW_EXTENDED) { 34490c2143aSSeungwon Jeon stop->opcode = SD_IO_RW_DIRECT; 34590c2143aSSeungwon Jeon stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) | 34690c2143aSSeungwon Jeon ((cmd->arg >> 28) & 0x7); 34790c2143aSSeungwon Jeon stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC; 34890c2143aSSeungwon Jeon } else { 34990c2143aSSeungwon Jeon return 0; 35090c2143aSSeungwon Jeon } 35190c2143aSSeungwon Jeon 35290c2143aSSeungwon Jeon cmdr = stop->opcode | SDMMC_CMD_STOP | 35390c2143aSSeungwon Jeon SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP; 35490c2143aSSeungwon Jeon 35542f989c0SJaehoon Chung if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags)) 3568c005b40SJaehoon Chung cmdr |= SDMMC_CMD_USE_HOLD_REG; 3578c005b40SJaehoon Chung 35890c2143aSSeungwon Jeon return cmdr; 35990c2143aSSeungwon Jeon } 36090c2143aSSeungwon Jeon 36103de1921SAddy Ke static inline void dw_mci_set_cto(struct dw_mci *host) 36203de1921SAddy Ke { 36303de1921SAddy Ke unsigned int cto_clks; 3644c2357f5SDouglas Anderson unsigned int cto_div; 36503de1921SAddy Ke unsigned int cto_ms; 3668892b705SDouglas Anderson unsigned long irqflags; 36703de1921SAddy Ke 36803de1921SAddy Ke cto_clks = mci_readl(host, TMOUT) & 0xff; 3694c2357f5SDouglas Anderson cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; 3704c2357f5SDouglas Anderson if (cto_div == 0) 3714c2357f5SDouglas Anderson cto_div = 1; 372c7151602SEvgeniy Didin 373c7151602SEvgeniy Didin cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div, 374c7151602SEvgeniy Didin host->bus_hz); 37503de1921SAddy Ke 37603de1921SAddy Ke /* add a bit spare time */ 37703de1921SAddy Ke cto_ms += 10; 37803de1921SAddy Ke 3798892b705SDouglas Anderson /* 3808892b705SDouglas Anderson * The durations we're working with are fairly short so we have to be 3818892b705SDouglas Anderson * extra careful about synchronization here. Specifically in hardware a 3828892b705SDouglas Anderson * command timeout is _at most_ 5.1 ms, so that means we expect an 3838892b705SDouglas Anderson * interrupt (either command done or timeout) to come rather quickly 3848892b705SDouglas Anderson * after the mci_writel. ...but just in case we have a long interrupt 3858892b705SDouglas Anderson * latency let's add a bit of paranoia. 3868892b705SDouglas Anderson * 3878892b705SDouglas Anderson * In general we'll assume that at least an interrupt will be asserted 3888892b705SDouglas Anderson * in hardware by the time the cto_timer runs. ...and if it hasn't 3898892b705SDouglas Anderson * been asserted in hardware by that time then we'll assume it'll never 3908892b705SDouglas Anderson * come. 3918892b705SDouglas Anderson */ 3928892b705SDouglas Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 3938892b705SDouglas Anderson if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 39403de1921SAddy Ke mod_timer(&host->cto_timer, 39503de1921SAddy Ke jiffies + msecs_to_jiffies(cto_ms) + 1); 3968892b705SDouglas Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 39703de1921SAddy Ke } 39803de1921SAddy Ke 399f95f3850SWill Newton static void dw_mci_start_command(struct dw_mci *host, 400f95f3850SWill Newton struct mmc_command *cmd, u32 cmd_flags) 401f95f3850SWill Newton { 402f95f3850SWill Newton host->cmd = cmd; 4034a90920cSThomas Abraham dev_vdbg(host->dev, 404f95f3850SWill Newton "start command: ARGR=0x%08x CMDR=0x%08x\n", 405f95f3850SWill Newton cmd->arg, cmd_flags); 406f95f3850SWill Newton 407f95f3850SWill Newton mci_writel(host, CMDARG, cmd->arg); 4080e3a22c0SShawn Lin wmb(); /* drain writebuffer */ 4090bdbd0e8SDoug Anderson dw_mci_wait_while_busy(host, cmd_flags); 410f95f3850SWill Newton 4118892b705SDouglas Anderson mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); 4128892b705SDouglas Anderson 41303de1921SAddy Ke /* response expected command only */ 41403de1921SAddy Ke if (cmd_flags & SDMMC_CMD_RESP_EXP) 41503de1921SAddy Ke dw_mci_set_cto(host); 416f95f3850SWill Newton } 417f95f3850SWill Newton 41890c2143aSSeungwon Jeon static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) 419f95f3850SWill Newton { 420e13c3c08SJaehoon Chung struct mmc_command *stop = &host->stop_abort; 4210e3a22c0SShawn Lin 42290c2143aSSeungwon Jeon dw_mci_start_command(host, stop, host->stop_cmdr); 423f95f3850SWill Newton } 424f95f3850SWill Newton 425f95f3850SWill Newton /* DMA interface functions */ 426f95f3850SWill Newton static void dw_mci_stop_dma(struct dw_mci *host) 427f95f3850SWill Newton { 42803e8cb53SJames Hogan if (host->using_dma) { 429f95f3850SWill Newton host->dma_ops->stop(host); 430f95f3850SWill Newton host->dma_ops->cleanup(host); 431aa50f259SSeungwon Jeon } 432aa50f259SSeungwon Jeon 433f95f3850SWill Newton /* Data transfer was stopped by the interrupt handler */ 434f95f3850SWill Newton set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 435f95f3850SWill Newton } 436f95f3850SWill Newton 437f95f3850SWill Newton static void dw_mci_dma_cleanup(struct dw_mci *host) 438f95f3850SWill Newton { 439f95f3850SWill Newton struct mmc_data *data = host->data; 440f95f3850SWill Newton 441a4cc7eb4SJaehoon Chung if (data && data->host_cookie == COOKIE_MAPPED) { 4424a90920cSThomas Abraham dma_unmap_sg(host->dev, 4439aa51408SSeungwon Jeon data->sg, 4449aa51408SSeungwon Jeon data->sg_len, 445feeef096SHeiner Kallweit mmc_get_dma_dir(data)); 446a4cc7eb4SJaehoon Chung data->host_cookie = COOKIE_UNMAPPED; 447a4cc7eb4SJaehoon Chung } 448f95f3850SWill Newton } 449f95f3850SWill Newton 4505ce9d961SSeungwon Jeon static void dw_mci_idmac_reset(struct dw_mci *host) 4515ce9d961SSeungwon Jeon { 4525ce9d961SSeungwon Jeon u32 bmod = mci_readl(host, BMOD); 4535ce9d961SSeungwon Jeon /* Software reset of DMA */ 4545ce9d961SSeungwon Jeon bmod |= SDMMC_IDMAC_SWRESET; 4555ce9d961SSeungwon Jeon mci_writel(host, BMOD, bmod); 4565ce9d961SSeungwon Jeon } 4575ce9d961SSeungwon Jeon 458f95f3850SWill Newton static void dw_mci_idmac_stop_dma(struct dw_mci *host) 459f95f3850SWill Newton { 460f95f3850SWill Newton u32 temp; 461f95f3850SWill Newton 462f95f3850SWill Newton /* Disable and reset the IDMAC interface */ 463f95f3850SWill Newton temp = mci_readl(host, CTRL); 464f95f3850SWill Newton temp &= ~SDMMC_CTRL_USE_IDMAC; 465f95f3850SWill Newton temp |= SDMMC_CTRL_DMA_RESET; 466f95f3850SWill Newton mci_writel(host, CTRL, temp); 467f95f3850SWill Newton 468f95f3850SWill Newton /* Stop the IDMAC running */ 469f95f3850SWill Newton temp = mci_readl(host, BMOD); 470a5289a43SJaehoon Chung temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); 4715ce9d961SSeungwon Jeon temp |= SDMMC_IDMAC_SWRESET; 472f95f3850SWill Newton mci_writel(host, BMOD, temp); 473f95f3850SWill Newton } 474f95f3850SWill Newton 4753fc7eaefSShawn Lin static void dw_mci_dmac_complete_dma(void *arg) 476f95f3850SWill Newton { 4773fc7eaefSShawn Lin struct dw_mci *host = arg; 478f95f3850SWill Newton struct mmc_data *data = host->data; 479f95f3850SWill Newton 4804a90920cSThomas Abraham dev_vdbg(host->dev, "DMA complete\n"); 481f95f3850SWill Newton 4823fc7eaefSShawn Lin if ((host->use_dma == TRANS_MODE_EDMAC) && 4833fc7eaefSShawn Lin data && (data->flags & MMC_DATA_READ)) 4843fc7eaefSShawn Lin /* Invalidate cache after read */ 48542f989c0SJaehoon Chung dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc), 4863fc7eaefSShawn Lin data->sg, 4873fc7eaefSShawn Lin data->sg_len, 4883fc7eaefSShawn Lin DMA_FROM_DEVICE); 4893fc7eaefSShawn Lin 490f95f3850SWill Newton host->dma_ops->cleanup(host); 491f95f3850SWill Newton 492f95f3850SWill Newton /* 493f95f3850SWill Newton * If the card was removed, data will be NULL. No point in trying to 494f95f3850SWill Newton * send the stop command or waiting for NBUSY in this case. 495f95f3850SWill Newton */ 496f95f3850SWill Newton if (data) { 497f95f3850SWill Newton set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 498f95f3850SWill Newton tasklet_schedule(&host->tasklet); 499f95f3850SWill Newton } 500f95f3850SWill Newton } 501f95f3850SWill Newton 502f95f3850SWill Newton static int dw_mci_idmac_init(struct dw_mci *host) 503f95f3850SWill Newton { 504897b69e7SSeungwon Jeon int i; 505f95f3850SWill Newton 50669d99fdcSPrabu Thangamuthu if (host->dma_64bit_address == 1) { 50769d99fdcSPrabu Thangamuthu struct idmac_desc_64addr *p; 50869d99fdcSPrabu Thangamuthu /* Number of descriptors in the ring buffer */ 509cc190d4cSShawn Lin host->ring_size = 510cc190d4cSShawn Lin DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr); 51169d99fdcSPrabu Thangamuthu 51269d99fdcSPrabu Thangamuthu /* Forward link the descriptor list */ 51369d99fdcSPrabu Thangamuthu for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; 51469d99fdcSPrabu Thangamuthu i++, p++) { 51569d99fdcSPrabu Thangamuthu p->des6 = (host->sg_dma + 51669d99fdcSPrabu Thangamuthu (sizeof(struct idmac_desc_64addr) * 51769d99fdcSPrabu Thangamuthu (i + 1))) & 0xffffffff; 51869d99fdcSPrabu Thangamuthu 51969d99fdcSPrabu Thangamuthu p->des7 = (u64)(host->sg_dma + 52069d99fdcSPrabu Thangamuthu (sizeof(struct idmac_desc_64addr) * 52169d99fdcSPrabu Thangamuthu (i + 1))) >> 32; 52269d99fdcSPrabu Thangamuthu /* Initialize reserved and buffer size fields to "0" */ 52347b7de2fSEvgeniy Didin p->des0 = 0; 52469d99fdcSPrabu Thangamuthu p->des1 = 0; 52569d99fdcSPrabu Thangamuthu p->des2 = 0; 52669d99fdcSPrabu Thangamuthu p->des3 = 0; 52769d99fdcSPrabu Thangamuthu } 52869d99fdcSPrabu Thangamuthu 52969d99fdcSPrabu Thangamuthu /* Set the last descriptor as the end-of-ring descriptor */ 53069d99fdcSPrabu Thangamuthu p->des6 = host->sg_dma & 0xffffffff; 53169d99fdcSPrabu Thangamuthu p->des7 = (u64)host->sg_dma >> 32; 53269d99fdcSPrabu Thangamuthu p->des0 = IDMAC_DES0_ER; 53369d99fdcSPrabu Thangamuthu 53469d99fdcSPrabu Thangamuthu } else { 53569d99fdcSPrabu Thangamuthu struct idmac_desc *p; 536f95f3850SWill Newton /* Number of descriptors in the ring buffer */ 537cc190d4cSShawn Lin host->ring_size = 538cc190d4cSShawn Lin DESC_RING_BUF_SZ / sizeof(struct idmac_desc); 539f95f3850SWill Newton 540f95f3850SWill Newton /* Forward link the descriptor list */ 5410e3a22c0SShawn Lin for (i = 0, p = host->sg_cpu; 5420e3a22c0SShawn Lin i < host->ring_size - 1; 5430e3a22c0SShawn Lin i++, p++) { 5446687c42fSBen Dooks p->des3 = cpu_to_le32(host->sg_dma + 5456687c42fSBen Dooks (sizeof(struct idmac_desc) * (i + 1))); 54647b7de2fSEvgeniy Didin p->des0 = 0; 5474b244724SZhangfei Gao p->des1 = 0; 5484b244724SZhangfei Gao } 549f95f3850SWill Newton 550f95f3850SWill Newton /* Set the last descriptor as the end-of-ring descriptor */ 5516687c42fSBen Dooks p->des3 = cpu_to_le32(host->sg_dma); 5526687c42fSBen Dooks p->des0 = cpu_to_le32(IDMAC_DES0_ER); 55369d99fdcSPrabu Thangamuthu } 554f95f3850SWill Newton 5555ce9d961SSeungwon Jeon dw_mci_idmac_reset(host); 556141a712aSSeungwon Jeon 55769d99fdcSPrabu Thangamuthu if (host->dma_64bit_address == 1) { 55869d99fdcSPrabu Thangamuthu /* Mask out interrupts - get Tx & Rx complete only */ 55969d99fdcSPrabu Thangamuthu mci_writel(host, IDSTS64, IDMAC_INT_CLR); 56069d99fdcSPrabu Thangamuthu mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI | 56169d99fdcSPrabu Thangamuthu SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 56269d99fdcSPrabu Thangamuthu 56369d99fdcSPrabu Thangamuthu /* Set the descriptor base address */ 56469d99fdcSPrabu Thangamuthu mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff); 56569d99fdcSPrabu Thangamuthu mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32); 56669d99fdcSPrabu Thangamuthu 56769d99fdcSPrabu Thangamuthu } else { 568f95f3850SWill Newton /* Mask out interrupts - get Tx & Rx complete only */ 569fc79a4d6SJoonyoung Shim mci_writel(host, IDSTS, IDMAC_INT_CLR); 57069d99fdcSPrabu Thangamuthu mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | 57169d99fdcSPrabu Thangamuthu SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 572f95f3850SWill Newton 573f95f3850SWill Newton /* Set the descriptor base address */ 574f95f3850SWill Newton mci_writel(host, DBADDR, host->sg_dma); 57569d99fdcSPrabu Thangamuthu } 57669d99fdcSPrabu Thangamuthu 577f95f3850SWill Newton return 0; 578f95f3850SWill Newton } 579f95f3850SWill Newton 5803b2a067bSShawn Lin static inline int dw_mci_prepare_desc64(struct dw_mci *host, 5813b2a067bSShawn Lin struct mmc_data *data, 5823b2a067bSShawn Lin unsigned int sg_len) 5833b2a067bSShawn Lin { 5843b2a067bSShawn Lin unsigned int desc_len; 5853b2a067bSShawn Lin struct idmac_desc_64addr *desc_first, *desc_last, *desc; 586b6d2d81cSShawn Lin u32 val; 5873b2a067bSShawn Lin int i; 5883b2a067bSShawn Lin 5893b2a067bSShawn Lin desc_first = desc_last = desc = host->sg_cpu; 5903b2a067bSShawn Lin 5913b2a067bSShawn Lin for (i = 0; i < sg_len; i++) { 5923b2a067bSShawn Lin unsigned int length = sg_dma_len(&data->sg[i]); 5933b2a067bSShawn Lin 5943b2a067bSShawn Lin u64 mem_addr = sg_dma_address(&data->sg[i]); 5953b2a067bSShawn Lin 5963b2a067bSShawn Lin for ( ; length ; desc++) { 5973b2a067bSShawn Lin desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 5983b2a067bSShawn Lin length : DW_MCI_DESC_DATA_LENGTH; 5993b2a067bSShawn Lin 6003b2a067bSShawn Lin length -= desc_len; 6013b2a067bSShawn Lin 6023b2a067bSShawn Lin /* 6033b2a067bSShawn Lin * Wait for the former clear OWN bit operation 6043b2a067bSShawn Lin * of IDMAC to make sure that this descriptor 6053b2a067bSShawn Lin * isn't still owned by IDMAC as IDMAC's write 6063b2a067bSShawn Lin * ops and CPU's read ops are asynchronous. 6073b2a067bSShawn Lin */ 608b6d2d81cSShawn Lin if (readl_poll_timeout_atomic(&desc->des0, val, 609b6d2d81cSShawn Lin !(val & IDMAC_DES0_OWN), 610b6d2d81cSShawn Lin 10, 100 * USEC_PER_MSEC)) 6113b2a067bSShawn Lin goto err_own_bit; 6123b2a067bSShawn Lin 6133b2a067bSShawn Lin /* 6143b2a067bSShawn Lin * Set the OWN bit and disable interrupts 6153b2a067bSShawn Lin * for this descriptor 6163b2a067bSShawn Lin */ 6173b2a067bSShawn Lin desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | 6183b2a067bSShawn Lin IDMAC_DES0_CH; 6193b2a067bSShawn Lin 6203b2a067bSShawn Lin /* Buffer length */ 6213b2a067bSShawn Lin IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len); 6223b2a067bSShawn Lin 6233b2a067bSShawn Lin /* Physical address to DMA to/from */ 6243b2a067bSShawn Lin desc->des4 = mem_addr & 0xffffffff; 6253b2a067bSShawn Lin desc->des5 = mem_addr >> 32; 6263b2a067bSShawn Lin 6273b2a067bSShawn Lin /* Update physical address for the next desc */ 6283b2a067bSShawn Lin mem_addr += desc_len; 6293b2a067bSShawn Lin 6303b2a067bSShawn Lin /* Save pointer to the last descriptor */ 6313b2a067bSShawn Lin desc_last = desc; 6323b2a067bSShawn Lin } 6333b2a067bSShawn Lin } 6343b2a067bSShawn Lin 6353b2a067bSShawn Lin /* Set first descriptor */ 6363b2a067bSShawn Lin desc_first->des0 |= IDMAC_DES0_FD; 6373b2a067bSShawn Lin 6383b2a067bSShawn Lin /* Set last descriptor */ 6393b2a067bSShawn Lin desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); 6403b2a067bSShawn Lin desc_last->des0 |= IDMAC_DES0_LD; 6413b2a067bSShawn Lin 6423b2a067bSShawn Lin return 0; 6433b2a067bSShawn Lin err_own_bit: 6443b2a067bSShawn Lin /* restore the descriptor chain as it's polluted */ 64526be9d70SColin Ian King dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n"); 646cc190d4cSShawn Lin memset(host->sg_cpu, 0, DESC_RING_BUF_SZ); 6473b2a067bSShawn Lin dw_mci_idmac_init(host); 6483b2a067bSShawn Lin return -EINVAL; 6493b2a067bSShawn Lin } 6503b2a067bSShawn Lin 6513b2a067bSShawn Lin 6523b2a067bSShawn Lin static inline int dw_mci_prepare_desc32(struct dw_mci *host, 6533b2a067bSShawn Lin struct mmc_data *data, 6543b2a067bSShawn Lin unsigned int sg_len) 6553b2a067bSShawn Lin { 6563b2a067bSShawn Lin unsigned int desc_len; 6573b2a067bSShawn Lin struct idmac_desc *desc_first, *desc_last, *desc; 658b6d2d81cSShawn Lin u32 val; 6593b2a067bSShawn Lin int i; 6603b2a067bSShawn Lin 6613b2a067bSShawn Lin desc_first = desc_last = desc = host->sg_cpu; 6623b2a067bSShawn Lin 6633b2a067bSShawn Lin for (i = 0; i < sg_len; i++) { 6643b2a067bSShawn Lin unsigned int length = sg_dma_len(&data->sg[i]); 6653b2a067bSShawn Lin 6663b2a067bSShawn Lin u32 mem_addr = sg_dma_address(&data->sg[i]); 6673b2a067bSShawn Lin 6683b2a067bSShawn Lin for ( ; length ; desc++) { 6693b2a067bSShawn Lin desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 6703b2a067bSShawn Lin length : DW_MCI_DESC_DATA_LENGTH; 6713b2a067bSShawn Lin 6723b2a067bSShawn Lin length -= desc_len; 6733b2a067bSShawn Lin 6743b2a067bSShawn Lin /* 6753b2a067bSShawn Lin * Wait for the former clear OWN bit operation 6763b2a067bSShawn Lin * of IDMAC to make sure that this descriptor 6773b2a067bSShawn Lin * isn't still owned by IDMAC as IDMAC's write 6783b2a067bSShawn Lin * ops and CPU's read ops are asynchronous. 6793b2a067bSShawn Lin */ 680b6d2d81cSShawn Lin if (readl_poll_timeout_atomic(&desc->des0, val, 681b6d2d81cSShawn Lin IDMAC_OWN_CLR64(val), 682b6d2d81cSShawn Lin 10, 683b6d2d81cSShawn Lin 100 * USEC_PER_MSEC)) 6843b2a067bSShawn Lin goto err_own_bit; 6853b2a067bSShawn Lin 6863b2a067bSShawn Lin /* 6873b2a067bSShawn Lin * Set the OWN bit and disable interrupts 6883b2a067bSShawn Lin * for this descriptor 6893b2a067bSShawn Lin */ 6903b2a067bSShawn Lin desc->des0 = cpu_to_le32(IDMAC_DES0_OWN | 6913b2a067bSShawn Lin IDMAC_DES0_DIC | 6923b2a067bSShawn Lin IDMAC_DES0_CH); 6933b2a067bSShawn Lin 6943b2a067bSShawn Lin /* Buffer length */ 6953b2a067bSShawn Lin IDMAC_SET_BUFFER1_SIZE(desc, desc_len); 6963b2a067bSShawn Lin 6973b2a067bSShawn Lin /* Physical address to DMA to/from */ 6983b2a067bSShawn Lin desc->des2 = cpu_to_le32(mem_addr); 6993b2a067bSShawn Lin 7003b2a067bSShawn Lin /* Update physical address for the next desc */ 7013b2a067bSShawn Lin mem_addr += desc_len; 7023b2a067bSShawn Lin 7033b2a067bSShawn Lin /* Save pointer to the last descriptor */ 7043b2a067bSShawn Lin desc_last = desc; 7053b2a067bSShawn Lin } 7063b2a067bSShawn Lin } 7073b2a067bSShawn Lin 7083b2a067bSShawn Lin /* Set first descriptor */ 7093b2a067bSShawn Lin desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD); 7103b2a067bSShawn Lin 7113b2a067bSShawn Lin /* Set last descriptor */ 7123b2a067bSShawn Lin desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | 7133b2a067bSShawn Lin IDMAC_DES0_DIC)); 7143b2a067bSShawn Lin desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD); 7153b2a067bSShawn Lin 7163b2a067bSShawn Lin return 0; 7173b2a067bSShawn Lin err_own_bit: 7183b2a067bSShawn Lin /* restore the descriptor chain as it's polluted */ 71926be9d70SColin Ian King dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n"); 720cc190d4cSShawn Lin memset(host->sg_cpu, 0, DESC_RING_BUF_SZ); 7213b2a067bSShawn Lin dw_mci_idmac_init(host); 7223b2a067bSShawn Lin return -EINVAL; 7233b2a067bSShawn Lin } 7243b2a067bSShawn Lin 7253b2a067bSShawn Lin static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) 7263b2a067bSShawn Lin { 7273b2a067bSShawn Lin u32 temp; 7283b2a067bSShawn Lin int ret; 7293b2a067bSShawn Lin 7303b2a067bSShawn Lin if (host->dma_64bit_address == 1) 7313b2a067bSShawn Lin ret = dw_mci_prepare_desc64(host, host->data, sg_len); 7323b2a067bSShawn Lin else 7333b2a067bSShawn Lin ret = dw_mci_prepare_desc32(host, host->data, sg_len); 7343b2a067bSShawn Lin 7353b2a067bSShawn Lin if (ret) 7363b2a067bSShawn Lin goto out; 7373b2a067bSShawn Lin 7383b2a067bSShawn Lin /* drain writebuffer */ 7393b2a067bSShawn Lin wmb(); 7403b2a067bSShawn Lin 7413b2a067bSShawn Lin /* Make sure to reset DMA in case we did PIO before this */ 7423b2a067bSShawn Lin dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET); 7433b2a067bSShawn Lin dw_mci_idmac_reset(host); 7443b2a067bSShawn Lin 7453b2a067bSShawn Lin /* Select IDMAC interface */ 7463b2a067bSShawn Lin temp = mci_readl(host, CTRL); 7473b2a067bSShawn Lin temp |= SDMMC_CTRL_USE_IDMAC; 7483b2a067bSShawn Lin mci_writel(host, CTRL, temp); 7493b2a067bSShawn Lin 7503b2a067bSShawn Lin /* drain writebuffer */ 7513b2a067bSShawn Lin wmb(); 7523b2a067bSShawn Lin 7533b2a067bSShawn Lin /* Enable the IDMAC */ 7543b2a067bSShawn Lin temp = mci_readl(host, BMOD); 7553b2a067bSShawn Lin temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; 7563b2a067bSShawn Lin mci_writel(host, BMOD, temp); 7573b2a067bSShawn Lin 7583b2a067bSShawn Lin /* Start it running */ 7593b2a067bSShawn Lin mci_writel(host, PLDMND, 1); 7603b2a067bSShawn Lin 7613b2a067bSShawn Lin out: 7623b2a067bSShawn Lin return ret; 7633b2a067bSShawn Lin } 7643b2a067bSShawn Lin 7658e2b36eaSArnd Bergmann static const struct dw_mci_dma_ops dw_mci_idmac_ops = { 766885c3e80SSeungwon Jeon .init = dw_mci_idmac_init, 767885c3e80SSeungwon Jeon .start = dw_mci_idmac_start_dma, 768885c3e80SSeungwon Jeon .stop = dw_mci_idmac_stop_dma, 7693fc7eaefSShawn Lin .complete = dw_mci_dmac_complete_dma, 770885c3e80SSeungwon Jeon .cleanup = dw_mci_dma_cleanup, 771885c3e80SSeungwon Jeon }; 7723fc7eaefSShawn Lin 7733fc7eaefSShawn Lin static void dw_mci_edmac_stop_dma(struct dw_mci *host) 7743fc7eaefSShawn Lin { 775ab925a31SShawn Lin dmaengine_terminate_async(host->dms->ch); 7763fc7eaefSShawn Lin } 7773fc7eaefSShawn Lin 7783fc7eaefSShawn Lin static int dw_mci_edmac_start_dma(struct dw_mci *host, 7793fc7eaefSShawn Lin unsigned int sg_len) 7803fc7eaefSShawn Lin { 7813fc7eaefSShawn Lin struct dma_slave_config cfg; 7823fc7eaefSShawn Lin struct dma_async_tx_descriptor *desc = NULL; 7833fc7eaefSShawn Lin struct scatterlist *sgl = host->data->sg; 78427d70d36SColin Ian King static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 7853fc7eaefSShawn Lin u32 sg_elems = host->data->sg_len; 7863fc7eaefSShawn Lin u32 fifoth_val; 7873fc7eaefSShawn Lin u32 fifo_offset = host->fifo_reg - host->regs; 7883fc7eaefSShawn Lin int ret = 0; 7893fc7eaefSShawn Lin 7903fc7eaefSShawn Lin /* Set external dma config: burst size, burst width */ 791c3ff0189STony Lindgren memset(&cfg, 0, sizeof(cfg)); 792260b3164SArnd Bergmann cfg.dst_addr = host->phy_regs + fifo_offset; 7933fc7eaefSShawn Lin cfg.src_addr = cfg.dst_addr; 7943fc7eaefSShawn Lin cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 7953fc7eaefSShawn Lin cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 7963fc7eaefSShawn Lin 7973fc7eaefSShawn Lin /* Match burst msize with external dma config */ 7983fc7eaefSShawn Lin fifoth_val = mci_readl(host, FIFOTH); 7993fc7eaefSShawn Lin cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7]; 8003fc7eaefSShawn Lin cfg.src_maxburst = cfg.dst_maxburst; 8013fc7eaefSShawn Lin 8023fc7eaefSShawn Lin if (host->data->flags & MMC_DATA_WRITE) 8033fc7eaefSShawn Lin cfg.direction = DMA_MEM_TO_DEV; 8043fc7eaefSShawn Lin else 8053fc7eaefSShawn Lin cfg.direction = DMA_DEV_TO_MEM; 8063fc7eaefSShawn Lin 8073fc7eaefSShawn Lin ret = dmaengine_slave_config(host->dms->ch, &cfg); 8083fc7eaefSShawn Lin if (ret) { 8093fc7eaefSShawn Lin dev_err(host->dev, "Failed to config edmac.\n"); 8103fc7eaefSShawn Lin return -EBUSY; 8113fc7eaefSShawn Lin } 8123fc7eaefSShawn Lin 8133fc7eaefSShawn Lin desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, 8143fc7eaefSShawn Lin sg_len, cfg.direction, 8153fc7eaefSShawn Lin DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 8163fc7eaefSShawn Lin if (!desc) { 8173fc7eaefSShawn Lin dev_err(host->dev, "Can't prepare slave sg.\n"); 8183fc7eaefSShawn Lin return -EBUSY; 8193fc7eaefSShawn Lin } 8203fc7eaefSShawn Lin 8213fc7eaefSShawn Lin /* Set dw_mci_dmac_complete_dma as callback */ 8223fc7eaefSShawn Lin desc->callback = dw_mci_dmac_complete_dma; 8233fc7eaefSShawn Lin desc->callback_param = (void *)host; 8243fc7eaefSShawn Lin dmaengine_submit(desc); 8253fc7eaefSShawn Lin 8263fc7eaefSShawn Lin /* Flush cache before write */ 8273fc7eaefSShawn Lin if (host->data->flags & MMC_DATA_WRITE) 82842f989c0SJaehoon Chung dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl, 8293fc7eaefSShawn Lin sg_elems, DMA_TO_DEVICE); 8303fc7eaefSShawn Lin 8313fc7eaefSShawn Lin dma_async_issue_pending(host->dms->ch); 8323fc7eaefSShawn Lin 8333fc7eaefSShawn Lin return 0; 8343fc7eaefSShawn Lin } 8353fc7eaefSShawn Lin 8363fc7eaefSShawn Lin static int dw_mci_edmac_init(struct dw_mci *host) 8373fc7eaefSShawn Lin { 8383fc7eaefSShawn Lin /* Request external dma channel */ 8393fc7eaefSShawn Lin host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL); 8403fc7eaefSShawn Lin if (!host->dms) 8413fc7eaefSShawn Lin return -ENOMEM; 8423fc7eaefSShawn Lin 843c1fce225SPeter Ujfalusi host->dms->ch = dma_request_chan(host->dev, "rx-tx"); 844c1fce225SPeter Ujfalusi if (IS_ERR(host->dms->ch)) { 845c1fce225SPeter Ujfalusi int ret = PTR_ERR(host->dms->ch); 846c1fce225SPeter Ujfalusi 8474539d36eSDan Carpenter dev_err(host->dev, "Failed to get external DMA channel.\n"); 8483fc7eaefSShawn Lin kfree(host->dms); 8493fc7eaefSShawn Lin host->dms = NULL; 850c1fce225SPeter Ujfalusi return ret; 8513fc7eaefSShawn Lin } 8523fc7eaefSShawn Lin 8533fc7eaefSShawn Lin return 0; 8543fc7eaefSShawn Lin } 8553fc7eaefSShawn Lin 8563fc7eaefSShawn Lin static void dw_mci_edmac_exit(struct dw_mci *host) 8573fc7eaefSShawn Lin { 8583fc7eaefSShawn Lin if (host->dms) { 8593fc7eaefSShawn Lin if (host->dms->ch) { 8603fc7eaefSShawn Lin dma_release_channel(host->dms->ch); 8613fc7eaefSShawn Lin host->dms->ch = NULL; 8623fc7eaefSShawn Lin } 8633fc7eaefSShawn Lin kfree(host->dms); 8643fc7eaefSShawn Lin host->dms = NULL; 8653fc7eaefSShawn Lin } 8663fc7eaefSShawn Lin } 8673fc7eaefSShawn Lin 8683fc7eaefSShawn Lin static const struct dw_mci_dma_ops dw_mci_edmac_ops = { 8693fc7eaefSShawn Lin .init = dw_mci_edmac_init, 8703fc7eaefSShawn Lin .exit = dw_mci_edmac_exit, 8713fc7eaefSShawn Lin .start = dw_mci_edmac_start_dma, 8723fc7eaefSShawn Lin .stop = dw_mci_edmac_stop_dma, 8733fc7eaefSShawn Lin .complete = dw_mci_dmac_complete_dma, 8743fc7eaefSShawn Lin .cleanup = dw_mci_dma_cleanup, 8753fc7eaefSShawn Lin }; 876885c3e80SSeungwon Jeon 8779aa51408SSeungwon Jeon static int dw_mci_pre_dma_transfer(struct dw_mci *host, 8789aa51408SSeungwon Jeon struct mmc_data *data, 879a4cc7eb4SJaehoon Chung int cookie) 880f95f3850SWill Newton { 881f95f3850SWill Newton struct scatterlist *sg; 8829aa51408SSeungwon Jeon unsigned int i, sg_len; 883f95f3850SWill Newton 884a4cc7eb4SJaehoon Chung if (data->host_cookie == COOKIE_PRE_MAPPED) 885a4cc7eb4SJaehoon Chung return data->sg_len; 886f95f3850SWill Newton 887f95f3850SWill Newton /* 888f95f3850SWill Newton * We don't do DMA on "complex" transfers, i.e. with 889f95f3850SWill Newton * non-word-aligned buffers or lengths. Also, we don't bother 890f95f3850SWill Newton * with all the DMA setup overhead for short transfers. 891f95f3850SWill Newton */ 892f95f3850SWill Newton if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) 893f95f3850SWill Newton return -EINVAL; 8949aa51408SSeungwon Jeon 895f95f3850SWill Newton if (data->blksz & 3) 896f95f3850SWill Newton return -EINVAL; 897f95f3850SWill Newton 898f95f3850SWill Newton for_each_sg(data->sg, sg, data->sg_len, i) { 899f95f3850SWill Newton if (sg->offset & 3 || sg->length & 3) 900f95f3850SWill Newton return -EINVAL; 901f95f3850SWill Newton } 902f95f3850SWill Newton 9034a90920cSThomas Abraham sg_len = dma_map_sg(host->dev, 9049aa51408SSeungwon Jeon data->sg, 9059aa51408SSeungwon Jeon data->sg_len, 906feeef096SHeiner Kallweit mmc_get_dma_dir(data)); 9079aa51408SSeungwon Jeon if (sg_len == 0) 9089aa51408SSeungwon Jeon return -EINVAL; 9099aa51408SSeungwon Jeon 910a4cc7eb4SJaehoon Chung data->host_cookie = cookie; 9119aa51408SSeungwon Jeon 9129aa51408SSeungwon Jeon return sg_len; 9139aa51408SSeungwon Jeon } 9149aa51408SSeungwon Jeon 9159aa51408SSeungwon Jeon static void dw_mci_pre_req(struct mmc_host *mmc, 916d3c6aac3SLinus Walleij struct mmc_request *mrq) 9179aa51408SSeungwon Jeon { 9189aa51408SSeungwon Jeon struct dw_mci_slot *slot = mmc_priv(mmc); 9199aa51408SSeungwon Jeon struct mmc_data *data = mrq->data; 9209aa51408SSeungwon Jeon 9219aa51408SSeungwon Jeon if (!slot->host->use_dma || !data) 9229aa51408SSeungwon Jeon return; 9239aa51408SSeungwon Jeon 924a4cc7eb4SJaehoon Chung /* This data might be unmapped at this time */ 925a4cc7eb4SJaehoon Chung data->host_cookie = COOKIE_UNMAPPED; 9269aa51408SSeungwon Jeon 927a4cc7eb4SJaehoon Chung if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 928a4cc7eb4SJaehoon Chung COOKIE_PRE_MAPPED) < 0) 929a4cc7eb4SJaehoon Chung data->host_cookie = COOKIE_UNMAPPED; 9309aa51408SSeungwon Jeon } 9319aa51408SSeungwon Jeon 9329aa51408SSeungwon Jeon static void dw_mci_post_req(struct mmc_host *mmc, 9339aa51408SSeungwon Jeon struct mmc_request *mrq, 9349aa51408SSeungwon Jeon int err) 9359aa51408SSeungwon Jeon { 9369aa51408SSeungwon Jeon struct dw_mci_slot *slot = mmc_priv(mmc); 9379aa51408SSeungwon Jeon struct mmc_data *data = mrq->data; 9389aa51408SSeungwon Jeon 9399aa51408SSeungwon Jeon if (!slot->host->use_dma || !data) 9409aa51408SSeungwon Jeon return; 9419aa51408SSeungwon Jeon 942a4cc7eb4SJaehoon Chung if (data->host_cookie != COOKIE_UNMAPPED) 9434a90920cSThomas Abraham dma_unmap_sg(slot->host->dev, 9449aa51408SSeungwon Jeon data->sg, 9459aa51408SSeungwon Jeon data->sg_len, 946feeef096SHeiner Kallweit mmc_get_dma_dir(data)); 947a4cc7eb4SJaehoon Chung data->host_cookie = COOKIE_UNMAPPED; 9489aa51408SSeungwon Jeon } 9499aa51408SSeungwon Jeon 950671fa142SShawn Lin static int dw_mci_get_cd(struct mmc_host *mmc) 951671fa142SShawn Lin { 952671fa142SShawn Lin int present; 953671fa142SShawn Lin struct dw_mci_slot *slot = mmc_priv(mmc); 954671fa142SShawn Lin struct dw_mci *host = slot->host; 955671fa142SShawn Lin int gpio_cd = mmc_gpio_get_cd(mmc); 956671fa142SShawn Lin 957671fa142SShawn Lin /* Use platform get_cd function, else try onboard card detect */ 958671fa142SShawn Lin if (((mmc->caps & MMC_CAP_NEEDS_POLL) 959671fa142SShawn Lin || !mmc_card_is_removable(mmc))) { 960671fa142SShawn Lin present = 1; 961671fa142SShawn Lin 962671fa142SShawn Lin if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { 963671fa142SShawn Lin if (mmc->caps & MMC_CAP_NEEDS_POLL) { 964671fa142SShawn Lin dev_info(&mmc->class_dev, 965671fa142SShawn Lin "card is polling.\n"); 966671fa142SShawn Lin } else { 967671fa142SShawn Lin dev_info(&mmc->class_dev, 968671fa142SShawn Lin "card is non-removable.\n"); 969671fa142SShawn Lin } 970671fa142SShawn Lin set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 971671fa142SShawn Lin } 972671fa142SShawn Lin 973671fa142SShawn Lin return present; 974671fa142SShawn Lin } else if (gpio_cd >= 0) 975671fa142SShawn Lin present = gpio_cd; 976671fa142SShawn Lin else 977671fa142SShawn Lin present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) 978671fa142SShawn Lin == 0 ? 1 : 0; 979671fa142SShawn Lin 980671fa142SShawn Lin spin_lock_bh(&host->lock); 981671fa142SShawn Lin if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags)) 982671fa142SShawn Lin dev_dbg(&mmc->class_dev, "card is present\n"); 983671fa142SShawn Lin else if (!present && 984671fa142SShawn Lin !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags)) 985671fa142SShawn Lin dev_dbg(&mmc->class_dev, "card is not present\n"); 986671fa142SShawn Lin spin_unlock_bh(&host->lock); 987671fa142SShawn Lin 988671fa142SShawn Lin return present; 989671fa142SShawn Lin } 990671fa142SShawn Lin 99152426899SSeungwon Jeon static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) 99252426899SSeungwon Jeon { 99352426899SSeungwon Jeon unsigned int blksz = data->blksz; 99427d70d36SColin Ian King static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 99552426899SSeungwon Jeon u32 fifo_width = 1 << host->data_shift; 99652426899SSeungwon Jeon u32 blksz_depth = blksz / fifo_width, fifoth_val; 99752426899SSeungwon Jeon u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; 9980e3a22c0SShawn Lin int idx = ARRAY_SIZE(mszs) - 1; 99952426899SSeungwon Jeon 10003fc7eaefSShawn Lin /* pio should ship this scenario */ 10013fc7eaefSShawn Lin if (!host->use_dma) 10023fc7eaefSShawn Lin return; 10033fc7eaefSShawn Lin 100452426899SSeungwon Jeon tx_wmark = (host->fifo_depth) / 2; 100552426899SSeungwon Jeon tx_wmark_invers = host->fifo_depth - tx_wmark; 100652426899SSeungwon Jeon 100752426899SSeungwon Jeon /* 100852426899SSeungwon Jeon * MSIZE is '1', 100952426899SSeungwon Jeon * if blksz is not a multiple of the FIFO width 101052426899SSeungwon Jeon */ 101120753569SShawn Lin if (blksz % fifo_width) 101252426899SSeungwon Jeon goto done; 101352426899SSeungwon Jeon 101452426899SSeungwon Jeon do { 101552426899SSeungwon Jeon if (!((blksz_depth % mszs[idx]) || 101652426899SSeungwon Jeon (tx_wmark_invers % mszs[idx]))) { 101752426899SSeungwon Jeon msize = idx; 101852426899SSeungwon Jeon rx_wmark = mszs[idx] - 1; 101952426899SSeungwon Jeon break; 102052426899SSeungwon Jeon } 102152426899SSeungwon Jeon } while (--idx > 0); 102252426899SSeungwon Jeon /* 102352426899SSeungwon Jeon * If idx is '0', it won't be tried 102452426899SSeungwon Jeon * Thus, initial values are uesed 102552426899SSeungwon Jeon */ 102652426899SSeungwon Jeon done: 102752426899SSeungwon Jeon fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); 102852426899SSeungwon Jeon mci_writel(host, FIFOTH, fifoth_val); 102952426899SSeungwon Jeon } 103052426899SSeungwon Jeon 10317e4bf1bcSJaehoon Chung static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data) 1032f1d2736cSSeungwon Jeon { 1033f1d2736cSSeungwon Jeon unsigned int blksz = data->blksz; 1034f1d2736cSSeungwon Jeon u32 blksz_depth, fifo_depth; 1035f1d2736cSSeungwon Jeon u16 thld_size; 10367e4bf1bcSJaehoon Chung u8 enable; 1037f1d2736cSSeungwon Jeon 103866dfd101SJames Hogan /* 103966dfd101SJames Hogan * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is 104066dfd101SJames Hogan * in the FIFO region, so we really shouldn't access it). 104166dfd101SJames Hogan */ 10427e4bf1bcSJaehoon Chung if (host->verid < DW_MMC_240A || 10437e4bf1bcSJaehoon Chung (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE)) 104466dfd101SJames Hogan return; 104566dfd101SJames Hogan 10467e4bf1bcSJaehoon Chung /* 10477e4bf1bcSJaehoon Chung * Card write Threshold is introduced since 2.80a 10487e4bf1bcSJaehoon Chung * It's used when HS400 mode is enabled. 10497e4bf1bcSJaehoon Chung */ 10507e4bf1bcSJaehoon Chung if (data->flags & MMC_DATA_WRITE && 10517a6b9f4dSx00270170 host->timing != MMC_TIMING_MMC_HS400) 10527a6b9f4dSx00270170 goto disable; 10537e4bf1bcSJaehoon Chung 10547e4bf1bcSJaehoon Chung if (data->flags & MMC_DATA_WRITE) 10557e4bf1bcSJaehoon Chung enable = SDMMC_CARD_WR_THR_EN; 10567e4bf1bcSJaehoon Chung else 10577e4bf1bcSJaehoon Chung enable = SDMMC_CARD_RD_THR_EN; 10587e4bf1bcSJaehoon Chung 1059f1d2736cSSeungwon Jeon if (host->timing != MMC_TIMING_MMC_HS200 && 10607a6b9f4dSx00270170 host->timing != MMC_TIMING_UHS_SDR104 && 10617a6b9f4dSx00270170 host->timing != MMC_TIMING_MMC_HS400) 1062f1d2736cSSeungwon Jeon goto disable; 1063f1d2736cSSeungwon Jeon 1064f1d2736cSSeungwon Jeon blksz_depth = blksz / (1 << host->data_shift); 1065f1d2736cSSeungwon Jeon fifo_depth = host->fifo_depth; 1066f1d2736cSSeungwon Jeon 1067f1d2736cSSeungwon Jeon if (blksz_depth > fifo_depth) 1068f1d2736cSSeungwon Jeon goto disable; 1069f1d2736cSSeungwon Jeon 1070f1d2736cSSeungwon Jeon /* 1071f1d2736cSSeungwon Jeon * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz' 1072f1d2736cSSeungwon Jeon * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz 1073f1d2736cSSeungwon Jeon * Currently just choose blksz. 1074f1d2736cSSeungwon Jeon */ 1075f1d2736cSSeungwon Jeon thld_size = blksz; 10767e4bf1bcSJaehoon Chung mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable)); 1077f1d2736cSSeungwon Jeon return; 1078f1d2736cSSeungwon Jeon 1079f1d2736cSSeungwon Jeon disable: 10807e4bf1bcSJaehoon Chung mci_writel(host, CDTHRCTL, 0); 1081f1d2736cSSeungwon Jeon } 1082f1d2736cSSeungwon Jeon 10839aa51408SSeungwon Jeon static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) 10849aa51408SSeungwon Jeon { 1085f8c58c11SDoug Anderson unsigned long irqflags; 10869aa51408SSeungwon Jeon int sg_len; 10879aa51408SSeungwon Jeon u32 temp; 10889aa51408SSeungwon Jeon 10899aa51408SSeungwon Jeon host->using_dma = 0; 10909aa51408SSeungwon Jeon 10919aa51408SSeungwon Jeon /* If we don't have a channel, we can't do DMA */ 10929aa51408SSeungwon Jeon if (!host->use_dma) 10939aa51408SSeungwon Jeon return -ENODEV; 10949aa51408SSeungwon Jeon 1095a4cc7eb4SJaehoon Chung sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1096a99aa9b9SSeungwon Jeon if (sg_len < 0) { 1097a99aa9b9SSeungwon Jeon host->dma_ops->stop(host); 10989aa51408SSeungwon Jeon return sg_len; 1099a99aa9b9SSeungwon Jeon } 11009aa51408SSeungwon Jeon 110103e8cb53SJames Hogan host->using_dma = 1; 110203e8cb53SJames Hogan 11033fc7eaefSShawn Lin if (host->use_dma == TRANS_MODE_IDMAC) 11044a90920cSThomas Abraham dev_vdbg(host->dev, 1105f95f3850SWill Newton "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 11063fc7eaefSShawn Lin (unsigned long)host->sg_cpu, 11073fc7eaefSShawn Lin (unsigned long)host->sg_dma, 1108f95f3850SWill Newton sg_len); 1109f95f3850SWill Newton 111052426899SSeungwon Jeon /* 111152426899SSeungwon Jeon * Decide the MSIZE and RX/TX Watermark. 111252426899SSeungwon Jeon * If current block size is same with previous size, 111352426899SSeungwon Jeon * no need to update fifoth. 111452426899SSeungwon Jeon */ 111552426899SSeungwon Jeon if (host->prev_blksz != data->blksz) 111652426899SSeungwon Jeon dw_mci_adjust_fifoth(host, data); 111752426899SSeungwon Jeon 1118f95f3850SWill Newton /* Enable the DMA interface */ 1119f95f3850SWill Newton temp = mci_readl(host, CTRL); 1120f95f3850SWill Newton temp |= SDMMC_CTRL_DMA_ENABLE; 1121f95f3850SWill Newton mci_writel(host, CTRL, temp); 1122f95f3850SWill Newton 1123f95f3850SWill Newton /* Disable RX/TX IRQs, let DMA handle it */ 1124f8c58c11SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 1125f95f3850SWill Newton temp = mci_readl(host, INTMASK); 1126f95f3850SWill Newton temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); 1127f95f3850SWill Newton mci_writel(host, INTMASK, temp); 1128f8c58c11SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 1129f95f3850SWill Newton 11303fc7eaefSShawn Lin if (host->dma_ops->start(host, sg_len)) { 1131647f80a1SJaehoon Chung host->dma_ops->stop(host); 1132d12d0cb1SShawn Lin /* We can't do DMA, try PIO for this one */ 1133d12d0cb1SShawn Lin dev_dbg(host->dev, 1134d12d0cb1SShawn Lin "%s: fall back to PIO mode for current transfer\n", 1135d12d0cb1SShawn Lin __func__); 11363fc7eaefSShawn Lin return -ENODEV; 11373fc7eaefSShawn Lin } 1138f95f3850SWill Newton 1139f95f3850SWill Newton return 0; 1140f95f3850SWill Newton } 1141f95f3850SWill Newton 1142f95f3850SWill Newton static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) 1143f95f3850SWill Newton { 1144f8c58c11SDoug Anderson unsigned long irqflags; 11450e3a22c0SShawn Lin int flags = SG_MITER_ATOMIC; 1146f95f3850SWill Newton u32 temp; 1147f95f3850SWill Newton 1148f95f3850SWill Newton data->error = -EINPROGRESS; 1149f95f3850SWill Newton 1150f95f3850SWill Newton WARN_ON(host->data); 1151f95f3850SWill Newton host->sg = NULL; 1152f95f3850SWill Newton host->data = data; 1153f95f3850SWill Newton 11547e4bf1bcSJaehoon Chung if (data->flags & MMC_DATA_READ) 115555c5efbcSJames Hogan host->dir_status = DW_MCI_RECV_STATUS; 11567e4bf1bcSJaehoon Chung else 115755c5efbcSJames Hogan host->dir_status = DW_MCI_SEND_STATUS; 11587e4bf1bcSJaehoon Chung 11597e4bf1bcSJaehoon Chung dw_mci_ctrl_thld(host, data); 116055c5efbcSJames Hogan 1161f95f3850SWill Newton if (dw_mci_submit_data_dma(host, data)) { 1162f9c2a0dcSSeungwon Jeon if (host->data->flags & MMC_DATA_READ) 1163f9c2a0dcSSeungwon Jeon flags |= SG_MITER_TO_SG; 1164f9c2a0dcSSeungwon Jeon else 1165f9c2a0dcSSeungwon Jeon flags |= SG_MITER_FROM_SG; 1166f9c2a0dcSSeungwon Jeon 1167f9c2a0dcSSeungwon Jeon sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1168f95f3850SWill Newton host->sg = data->sg; 116934b664a2SJames Hogan host->part_buf_start = 0; 117034b664a2SJames Hogan host->part_buf_count = 0; 1171f95f3850SWill Newton 1172b40af3aaSJames Hogan mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); 1173f8c58c11SDoug Anderson 1174f8c58c11SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 1175f95f3850SWill Newton temp = mci_readl(host, INTMASK); 1176f95f3850SWill Newton temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; 1177f95f3850SWill Newton mci_writel(host, INTMASK, temp); 1178f8c58c11SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 1179f95f3850SWill Newton 1180f95f3850SWill Newton temp = mci_readl(host, CTRL); 1181f95f3850SWill Newton temp &= ~SDMMC_CTRL_DMA_ENABLE; 1182f95f3850SWill Newton mci_writel(host, CTRL, temp); 118352426899SSeungwon Jeon 118452426899SSeungwon Jeon /* 1185d6fced83SJun Nie * Use the initial fifoth_val for PIO mode. If wm_algined 1186d6fced83SJun Nie * is set, we set watermark same as data size. 118752426899SSeungwon Jeon * If next issued data may be transfered by DMA mode, 118852426899SSeungwon Jeon * prev_blksz should be invalidated. 118952426899SSeungwon Jeon */ 1190d6fced83SJun Nie if (host->wm_aligned) 1191d6fced83SJun Nie dw_mci_adjust_fifoth(host, data); 1192d6fced83SJun Nie else 119352426899SSeungwon Jeon mci_writel(host, FIFOTH, host->fifoth_val); 119452426899SSeungwon Jeon host->prev_blksz = 0; 119552426899SSeungwon Jeon } else { 119652426899SSeungwon Jeon /* 119752426899SSeungwon Jeon * Keep the current block size. 119852426899SSeungwon Jeon * It will be used to decide whether to update 119952426899SSeungwon Jeon * fifoth register next time. 120052426899SSeungwon Jeon */ 120152426899SSeungwon Jeon host->prev_blksz = data->blksz; 1202f95f3850SWill Newton } 1203f95f3850SWill Newton } 1204f95f3850SWill Newton 1205ab269128SAbhilash Kesavan static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) 1206f95f3850SWill Newton { 1207f95f3850SWill Newton struct dw_mci *host = slot->host; 1208fdf492a1SDoug Anderson unsigned int clock = slot->clock; 1209f95f3850SWill Newton u32 div; 12109623b5b9SDoug Anderson u32 clk_en_a; 121101730558SDoug Anderson u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT; 121201730558SDoug Anderson 121301730558SDoug Anderson /* We must continue to set bit 28 in CMD until the change is complete */ 121401730558SDoug Anderson if (host->state == STATE_WAITING_CMD11_DONE) 121501730558SDoug Anderson sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH; 1216f95f3850SWill Newton 1217ff178981SShawn Lin slot->mmc->actual_clock = 0; 1218ff178981SShawn Lin 1219fdf492a1SDoug Anderson if (!clock) { 1220fdf492a1SDoug Anderson mci_writel(host, CLKENA, 0); 122101730558SDoug Anderson mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1222fdf492a1SDoug Anderson } else if (clock != host->current_speed || force_clkinit) { 1223fdf492a1SDoug Anderson div = host->bus_hz / clock; 1224fdf492a1SDoug Anderson if (host->bus_hz % clock && host->bus_hz > clock) 1225f95f3850SWill Newton /* 1226f95f3850SWill Newton * move the + 1 after the divide to prevent 1227f95f3850SWill Newton * over-clocking the card. 1228f95f3850SWill Newton */ 1229e419990bSSeungwon Jeon div += 1; 1230e419990bSSeungwon Jeon 1231fdf492a1SDoug Anderson div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; 1232f95f3850SWill Newton 1233e6cd7a8eSJaehoon Chung if ((clock != slot->__clk_old && 1234e6cd7a8eSJaehoon Chung !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) || 1235e6cd7a8eSJaehoon Chung force_clkinit) { 1236ce69e2feSShawn Lin /* Silent the verbose log if calling from PM context */ 1237ce69e2feSShawn Lin if (!force_clkinit) 1238f95f3850SWill Newton dev_info(&slot->mmc->class_dev, 1239fdf492a1SDoug Anderson "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", 1240fdf492a1SDoug Anderson slot->id, host->bus_hz, clock, 1241fdf492a1SDoug Anderson div ? ((host->bus_hz / div) >> 1) : 1242fdf492a1SDoug Anderson host->bus_hz, div); 1243f95f3850SWill Newton 1244e6cd7a8eSJaehoon Chung /* 1245e6cd7a8eSJaehoon Chung * If card is polling, display the message only 1246e6cd7a8eSJaehoon Chung * one time at boot time. 1247e6cd7a8eSJaehoon Chung */ 1248e6cd7a8eSJaehoon Chung if (slot->mmc->caps & MMC_CAP_NEEDS_POLL && 1249e6cd7a8eSJaehoon Chung slot->mmc->f_min == clock) 1250e6cd7a8eSJaehoon Chung set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags); 1251e6cd7a8eSJaehoon Chung } 1252e6cd7a8eSJaehoon Chung 1253f95f3850SWill Newton /* disable clock */ 1254f95f3850SWill Newton mci_writel(host, CLKENA, 0); 1255f95f3850SWill Newton mci_writel(host, CLKSRC, 0); 1256f95f3850SWill Newton 1257f95f3850SWill Newton /* inform CIU */ 125801730558SDoug Anderson mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1259f95f3850SWill Newton 1260f95f3850SWill Newton /* set clock to desired speed */ 1261f95f3850SWill Newton mci_writel(host, CLKDIV, div); 1262f95f3850SWill Newton 1263f95f3850SWill Newton /* inform CIU */ 126401730558SDoug Anderson mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1265f95f3850SWill Newton 12669623b5b9SDoug Anderson /* enable clock; only low power if no SDIO */ 12679623b5b9SDoug Anderson clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; 1268b24c8b26SDoug Anderson if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) 12699623b5b9SDoug Anderson clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; 12709623b5b9SDoug Anderson mci_writel(host, CLKENA, clk_en_a); 1271f95f3850SWill Newton 1272f95f3850SWill Newton /* inform CIU */ 127301730558SDoug Anderson mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1274005d675aSJaehoon Chung 1275005d675aSJaehoon Chung /* keep the last clock value that was requested from core */ 1276005d675aSJaehoon Chung slot->__clk_old = clock; 1277ff178981SShawn Lin slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) : 1278ff178981SShawn Lin host->bus_hz; 1279f95f3850SWill Newton } 1280f95f3850SWill Newton 1281fdf492a1SDoug Anderson host->current_speed = clock; 1282fdf492a1SDoug Anderson 1283f95f3850SWill Newton /* Set the current slot bus width */ 12841d56c453SSeungwon Jeon mci_writel(host, CTYPE, (slot->ctype << slot->id)); 1285f95f3850SWill Newton } 1286f95f3850SWill Newton 12876a8c2018SMårten Lindahl static void dw_mci_set_data_timeout(struct dw_mci *host, 12886a8c2018SMårten Lindahl unsigned int timeout_ns) 12896a8c2018SMårten Lindahl { 129025d5417aSMårten Lindahl const struct dw_mci_drv_data *drv_data = host->drv_data; 12916a8c2018SMårten Lindahl u32 clk_div, tmout; 12926a8c2018SMårten Lindahl u64 tmp; 12936a8c2018SMårten Lindahl 129425d5417aSMårten Lindahl if (drv_data && drv_data->set_data_timeout) 129525d5417aSMårten Lindahl return drv_data->set_data_timeout(host, timeout_ns); 129625d5417aSMårten Lindahl 12976a8c2018SMårten Lindahl clk_div = (mci_readl(host, CLKDIV) & 0xFF) * 2; 12986a8c2018SMårten Lindahl if (clk_div == 0) 12996a8c2018SMårten Lindahl clk_div = 1; 13006a8c2018SMårten Lindahl 13016a8c2018SMårten Lindahl tmp = DIV_ROUND_UP_ULL((u64)timeout_ns * host->bus_hz, NSEC_PER_SEC); 13026a8c2018SMårten Lindahl tmp = DIV_ROUND_UP_ULL(tmp, clk_div); 13036a8c2018SMårten Lindahl 13046a8c2018SMårten Lindahl /* TMOUT[7:0] (RESPONSE_TIMEOUT) */ 13056a8c2018SMårten Lindahl tmout = 0xFF; /* Set maximum */ 13066a8c2018SMårten Lindahl 13076a8c2018SMårten Lindahl /* TMOUT[31:8] (DATA_TIMEOUT) */ 13086a8c2018SMårten Lindahl if (!tmp || tmp > 0xFFFFFF) 13096a8c2018SMårten Lindahl tmout |= (0xFFFFFF << 8); 13106a8c2018SMårten Lindahl else 13116a8c2018SMårten Lindahl tmout |= (tmp & 0xFFFFFF) << 8; 13126a8c2018SMårten Lindahl 13136a8c2018SMårten Lindahl mci_writel(host, TMOUT, tmout); 1314ebc4dcf1SDan Carpenter dev_dbg(host->dev, "timeout_ns: %u => TMOUT[31:8]: %#08x", 13156a8c2018SMårten Lindahl timeout_ns, tmout >> 8); 13166a8c2018SMårten Lindahl } 13176a8c2018SMårten Lindahl 1318053b3ce6SSeungwon Jeon static void __dw_mci_start_request(struct dw_mci *host, 1319053b3ce6SSeungwon Jeon struct dw_mci_slot *slot, 1320053b3ce6SSeungwon Jeon struct mmc_command *cmd) 1321f95f3850SWill Newton { 1322f95f3850SWill Newton struct mmc_request *mrq; 1323f95f3850SWill Newton struct mmc_data *data; 1324f95f3850SWill Newton u32 cmdflags; 1325f95f3850SWill Newton 1326f95f3850SWill Newton mrq = slot->mrq; 1327f95f3850SWill Newton 1328f95f3850SWill Newton host->mrq = mrq; 1329f95f3850SWill Newton 1330f95f3850SWill Newton host->pending_events = 0; 1331f95f3850SWill Newton host->completed_events = 0; 1332e352c813SSeungwon Jeon host->cmd_status = 0; 1333f95f3850SWill Newton host->data_status = 0; 1334e352c813SSeungwon Jeon host->dir_status = 0; 1335f95f3850SWill Newton 1336053b3ce6SSeungwon Jeon data = cmd->data; 1337f95f3850SWill Newton if (data) { 13386a8c2018SMårten Lindahl dw_mci_set_data_timeout(host, data->timeout_ns); 1339f95f3850SWill Newton mci_writel(host, BYTCNT, data->blksz*data->blocks); 1340f95f3850SWill Newton mci_writel(host, BLKSIZ, data->blksz); 1341f95f3850SWill Newton } 1342f95f3850SWill Newton 1343f95f3850SWill Newton cmdflags = dw_mci_prepare_command(slot->mmc, cmd); 1344f95f3850SWill Newton 1345f95f3850SWill Newton /* this is the first command, send the initialization clock */ 1346f95f3850SWill Newton if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags)) 1347f95f3850SWill Newton cmdflags |= SDMMC_CMD_INIT; 1348f95f3850SWill Newton 1349f95f3850SWill Newton if (data) { 1350f95f3850SWill Newton dw_mci_submit_data(host, data); 13510e3a22c0SShawn Lin wmb(); /* drain writebuffer */ 1352f95f3850SWill Newton } 1353f95f3850SWill Newton 1354f95f3850SWill Newton dw_mci_start_command(host, cmd, cmdflags); 1355f95f3850SWill Newton 13565c935165SDoug Anderson if (cmd->opcode == SD_SWITCH_VOLTAGE) { 135749ba0302SDoug Anderson unsigned long irqflags; 135849ba0302SDoug Anderson 13595c935165SDoug Anderson /* 13608886a6fdSDoug Anderson * Databook says to fail after 2ms w/ no response, but evidence 13618886a6fdSDoug Anderson * shows that sometimes the cmd11 interrupt takes over 130ms. 13628886a6fdSDoug Anderson * We'll set to 500ms, plus an extra jiffy just in case jiffies 13638886a6fdSDoug Anderson * is just about to roll over. 136449ba0302SDoug Anderson * 136549ba0302SDoug Anderson * We do this whole thing under spinlock and only if the 136649ba0302SDoug Anderson * command hasn't already completed (indicating the the irq 136749ba0302SDoug Anderson * already ran so we don't want the timeout). 13685c935165SDoug Anderson */ 136949ba0302SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 137049ba0302SDoug Anderson if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 13715c935165SDoug Anderson mod_timer(&host->cmd11_timer, 13728886a6fdSDoug Anderson jiffies + msecs_to_jiffies(500) + 1); 137349ba0302SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 13745c935165SDoug Anderson } 13755c935165SDoug Anderson 137690c2143aSSeungwon Jeon host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); 1377f95f3850SWill Newton } 1378f95f3850SWill Newton 1379053b3ce6SSeungwon Jeon static void dw_mci_start_request(struct dw_mci *host, 1380053b3ce6SSeungwon Jeon struct dw_mci_slot *slot) 1381053b3ce6SSeungwon Jeon { 1382053b3ce6SSeungwon Jeon struct mmc_request *mrq = slot->mrq; 1383053b3ce6SSeungwon Jeon struct mmc_command *cmd; 1384053b3ce6SSeungwon Jeon 1385053b3ce6SSeungwon Jeon cmd = mrq->sbc ? mrq->sbc : mrq->cmd; 1386053b3ce6SSeungwon Jeon __dw_mci_start_request(host, slot, cmd); 1387053b3ce6SSeungwon Jeon } 1388053b3ce6SSeungwon Jeon 13897456caaeSJames Hogan /* must be called with host->lock held */ 1390f95f3850SWill Newton static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, 1391f95f3850SWill Newton struct mmc_request *mrq) 1392f95f3850SWill Newton { 1393f95f3850SWill Newton dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", 1394f95f3850SWill Newton host->state); 1395f95f3850SWill Newton 1396f95f3850SWill Newton slot->mrq = mrq; 1397f95f3850SWill Newton 139801730558SDoug Anderson if (host->state == STATE_WAITING_CMD11_DONE) { 139901730558SDoug Anderson dev_warn(&slot->mmc->class_dev, 140001730558SDoug Anderson "Voltage change didn't complete\n"); 140101730558SDoug Anderson /* 140201730558SDoug Anderson * this case isn't expected to happen, so we can 140301730558SDoug Anderson * either crash here or just try to continue on 140401730558SDoug Anderson * in the closest possible state 140501730558SDoug Anderson */ 140601730558SDoug Anderson host->state = STATE_IDLE; 140701730558SDoug Anderson } 140801730558SDoug Anderson 1409f95f3850SWill Newton if (host->state == STATE_IDLE) { 1410f95f3850SWill Newton host->state = STATE_SENDING_CMD; 1411f95f3850SWill Newton dw_mci_start_request(host, slot); 1412f95f3850SWill Newton } else { 1413f95f3850SWill Newton list_add_tail(&slot->queue_node, &host->queue); 1414f95f3850SWill Newton } 1415f95f3850SWill Newton } 1416f95f3850SWill Newton 1417f95f3850SWill Newton static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1418f95f3850SWill Newton { 1419f95f3850SWill Newton struct dw_mci_slot *slot = mmc_priv(mmc); 1420f95f3850SWill Newton struct dw_mci *host = slot->host; 1421f95f3850SWill Newton 1422f95f3850SWill Newton WARN_ON(slot->mrq); 1423f95f3850SWill Newton 14247456caaeSJames Hogan /* 14257456caaeSJames Hogan * The check for card presence and queueing of the request must be 14267456caaeSJames Hogan * atomic, otherwise the card could be removed in between and the 14277456caaeSJames Hogan * request wouldn't fail until another card was inserted. 14287456caaeSJames Hogan */ 14297456caaeSJames Hogan 143056f6911cSShawn Lin if (!dw_mci_get_cd(mmc)) { 1431f95f3850SWill Newton mrq->cmd->error = -ENOMEDIUM; 1432f95f3850SWill Newton mmc_request_done(mmc, mrq); 1433f95f3850SWill Newton return; 1434f95f3850SWill Newton } 1435f95f3850SWill Newton 143656f6911cSShawn Lin spin_lock_bh(&host->lock); 143756f6911cSShawn Lin 1438f95f3850SWill Newton dw_mci_queue_request(host, slot, mrq); 14397456caaeSJames Hogan 14407456caaeSJames Hogan spin_unlock_bh(&host->lock); 1441f95f3850SWill Newton } 1442f95f3850SWill Newton 1443f95f3850SWill Newton static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1444f95f3850SWill Newton { 1445f95f3850SWill Newton struct dw_mci_slot *slot = mmc_priv(mmc); 1446e95baf13SArnd Bergmann const struct dw_mci_drv_data *drv_data = slot->host->drv_data; 144741babf75SJaehoon Chung u32 regs; 144851da2240SYuvaraj CD int ret; 1449f95f3850SWill Newton 1450f95f3850SWill Newton switch (ios->bus_width) { 1451f95f3850SWill Newton case MMC_BUS_WIDTH_4: 1452f95f3850SWill Newton slot->ctype = SDMMC_CTYPE_4BIT; 1453f95f3850SWill Newton break; 1454c9b2a06fSJaehoon Chung case MMC_BUS_WIDTH_8: 1455c9b2a06fSJaehoon Chung slot->ctype = SDMMC_CTYPE_8BIT; 1456c9b2a06fSJaehoon Chung break; 1457b2f7cb45SJaehoon Chung default: 1458b2f7cb45SJaehoon Chung /* set default 1 bit mode */ 1459b2f7cb45SJaehoon Chung slot->ctype = SDMMC_CTYPE_1BIT; 1460f95f3850SWill Newton } 1461f95f3850SWill Newton 146241babf75SJaehoon Chung regs = mci_readl(slot->host, UHS_REG); 14633f514291SSeungwon Jeon 14643f514291SSeungwon Jeon /* DDR mode set */ 146580113132SSeungwon Jeon if (ios->timing == MMC_TIMING_MMC_DDR52 || 14667cc8d580SJaehoon Chung ios->timing == MMC_TIMING_UHS_DDR50 || 146780113132SSeungwon Jeon ios->timing == MMC_TIMING_MMC_HS400) 1468c69042a5SHyeonsu Kim regs |= ((0x1 << slot->id) << 16); 14693f514291SSeungwon Jeon else 1470c69042a5SHyeonsu Kim regs &= ~((0x1 << slot->id) << 16); 14713f514291SSeungwon Jeon 147241babf75SJaehoon Chung mci_writel(slot->host, UHS_REG, regs); 1473f1d2736cSSeungwon Jeon slot->host->timing = ios->timing; 147441babf75SJaehoon Chung 1475f95f3850SWill Newton /* 1476f95f3850SWill Newton * Use mirror of ios->clock to prevent race with mmc 1477f95f3850SWill Newton * core ios update when finding the minimum. 1478f95f3850SWill Newton */ 1479f95f3850SWill Newton slot->clock = ios->clock; 1480f95f3850SWill Newton 1481cb27a843SJames Hogan if (drv_data && drv_data->set_ios) 1482cb27a843SJames Hogan drv_data->set_ios(slot->host, ios); 1483800d78bfSThomas Abraham 1484f95f3850SWill Newton switch (ios->power_mode) { 1485f95f3850SWill Newton case MMC_POWER_UP: 148651da2240SYuvaraj CD if (!IS_ERR(mmc->supply.vmmc)) { 148751da2240SYuvaraj CD ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 148851da2240SYuvaraj CD ios->vdd); 148951da2240SYuvaraj CD if (ret) { 149051da2240SYuvaraj CD dev_err(slot->host->dev, 149151da2240SYuvaraj CD "failed to enable vmmc regulator\n"); 149251da2240SYuvaraj CD /*return, if failed turn on vmmc*/ 149351da2240SYuvaraj CD return; 149451da2240SYuvaraj CD } 149551da2240SYuvaraj CD } 149629d0d161SDoug Anderson set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); 149729d0d161SDoug Anderson regs = mci_readl(slot->host, PWREN); 149829d0d161SDoug Anderson regs |= (1 << slot->id); 149929d0d161SDoug Anderson mci_writel(slot->host, PWREN, regs); 150029d0d161SDoug Anderson break; 150129d0d161SDoug Anderson case MMC_POWER_ON: 1502d1f1dd86SDoug Anderson if (!slot->host->vqmmc_enabled) { 1503d1f1dd86SDoug Anderson if (!IS_ERR(mmc->supply.vqmmc)) { 150451da2240SYuvaraj CD ret = regulator_enable(mmc->supply.vqmmc); 150551da2240SYuvaraj CD if (ret < 0) 150651da2240SYuvaraj CD dev_err(slot->host->dev, 1507d1f1dd86SDoug Anderson "failed to enable vqmmc\n"); 150851da2240SYuvaraj CD else 150951da2240SYuvaraj CD slot->host->vqmmc_enabled = true; 1510d1f1dd86SDoug Anderson 1511d1f1dd86SDoug Anderson } else { 1512d1f1dd86SDoug Anderson /* Keep track so we don't reset again */ 1513d1f1dd86SDoug Anderson slot->host->vqmmc_enabled = true; 1514d1f1dd86SDoug Anderson } 1515d1f1dd86SDoug Anderson 1516d1f1dd86SDoug Anderson /* Reset our state machine after powering on */ 1517d1f1dd86SDoug Anderson dw_mci_ctrl_reset(slot->host, 1518d1f1dd86SDoug Anderson SDMMC_CTRL_ALL_RESET_FLAGS); 151951da2240SYuvaraj CD } 1520655babbdSDoug Anderson 1521655babbdSDoug Anderson /* Adjust clock / bus width after power is up */ 1522655babbdSDoug Anderson dw_mci_setup_bus(slot, false); 1523655babbdSDoug Anderson 1524e6f34e2fSJames Hogan break; 1525e6f34e2fSJames Hogan case MMC_POWER_OFF: 1526655babbdSDoug Anderson /* Turn clock off before power goes down */ 1527655babbdSDoug Anderson dw_mci_setup_bus(slot, false); 1528655babbdSDoug Anderson 152951da2240SYuvaraj CD if (!IS_ERR(mmc->supply.vmmc)) 153051da2240SYuvaraj CD mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 153151da2240SYuvaraj CD 1532d1f1dd86SDoug Anderson if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) 153351da2240SYuvaraj CD regulator_disable(mmc->supply.vqmmc); 153451da2240SYuvaraj CD slot->host->vqmmc_enabled = false; 153551da2240SYuvaraj CD 15364366dcc5SJaehoon Chung regs = mci_readl(slot->host, PWREN); 15374366dcc5SJaehoon Chung regs &= ~(1 << slot->id); 15384366dcc5SJaehoon Chung mci_writel(slot->host, PWREN, regs); 1539f95f3850SWill Newton break; 1540f95f3850SWill Newton default: 1541f95f3850SWill Newton break; 1542f95f3850SWill Newton } 1543655babbdSDoug Anderson 1544655babbdSDoug Anderson if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0) 1545655babbdSDoug Anderson slot->host->state = STATE_IDLE; 1546f95f3850SWill Newton } 1547f95f3850SWill Newton 154801730558SDoug Anderson static int dw_mci_card_busy(struct mmc_host *mmc) 154901730558SDoug Anderson { 155001730558SDoug Anderson struct dw_mci_slot *slot = mmc_priv(mmc); 155101730558SDoug Anderson u32 status; 155201730558SDoug Anderson 155301730558SDoug Anderson /* 155401730558SDoug Anderson * Check the busy bit which is low when DAT[3:0] 155501730558SDoug Anderson * (the data lines) are 0000 155601730558SDoug Anderson */ 155701730558SDoug Anderson status = mci_readl(slot->host, STATUS); 155801730558SDoug Anderson 155901730558SDoug Anderson return !!(status & SDMMC_STATUS_BUSY); 156001730558SDoug Anderson } 156101730558SDoug Anderson 156201730558SDoug Anderson static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) 156301730558SDoug Anderson { 156401730558SDoug Anderson struct dw_mci_slot *slot = mmc_priv(mmc); 156501730558SDoug Anderson struct dw_mci *host = slot->host; 15668f7849c4SZhangfei Gao const struct dw_mci_drv_data *drv_data = host->drv_data; 156701730558SDoug Anderson u32 uhs; 156801730558SDoug Anderson u32 v18 = SDMMC_UHS_18V << slot->id; 156901730558SDoug Anderson int ret; 157001730558SDoug Anderson 15718f7849c4SZhangfei Gao if (drv_data && drv_data->switch_voltage) 15728f7849c4SZhangfei Gao return drv_data->switch_voltage(mmc, ios); 15738f7849c4SZhangfei Gao 157401730558SDoug Anderson /* 157501730558SDoug Anderson * Program the voltage. Note that some instances of dw_mmc may use 157601730558SDoug Anderson * the UHS_REG for this. For other instances (like exynos) the UHS_REG 157701730558SDoug Anderson * does no harm but you need to set the regulator directly. Try both. 157801730558SDoug Anderson */ 157901730558SDoug Anderson uhs = mci_readl(host, UHS_REG); 1580e0848f5dSDouglas Anderson if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) 158101730558SDoug Anderson uhs &= ~v18; 1582e0848f5dSDouglas Anderson else 158301730558SDoug Anderson uhs |= v18; 1584e0848f5dSDouglas Anderson 158501730558SDoug Anderson if (!IS_ERR(mmc->supply.vqmmc)) { 1586e0848f5dSDouglas Anderson ret = mmc_regulator_set_vqmmc(mmc, ios); 15879cbe0fc8SMarek Vasut if (ret < 0) { 1588b19caf37SDoug Anderson dev_dbg(&mmc->class_dev, 1589e0848f5dSDouglas Anderson "Regulator set error %d - %s V\n", 1590e0848f5dSDouglas Anderson ret, uhs & v18 ? "1.8" : "3.3"); 159101730558SDoug Anderson return ret; 159201730558SDoug Anderson } 159301730558SDoug Anderson } 159401730558SDoug Anderson mci_writel(host, UHS_REG, uhs); 159501730558SDoug Anderson 159601730558SDoug Anderson return 0; 159701730558SDoug Anderson } 159801730558SDoug Anderson 1599f95f3850SWill Newton static int dw_mci_get_ro(struct mmc_host *mmc) 1600f95f3850SWill Newton { 1601f95f3850SWill Newton int read_only; 1602f95f3850SWill Newton struct dw_mci_slot *slot = mmc_priv(mmc); 16039795a846SJaehoon Chung int gpio_ro = mmc_gpio_get_ro(mmc); 1604f95f3850SWill Newton 1605f95f3850SWill Newton /* Use platform get_ro function, else try on board write protect */ 1606287980e4SArnd Bergmann if (gpio_ro >= 0) 16079795a846SJaehoon Chung read_only = gpio_ro; 1608f95f3850SWill Newton else 1609f95f3850SWill Newton read_only = 1610f95f3850SWill Newton mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; 1611f95f3850SWill Newton 1612f95f3850SWill Newton dev_dbg(&mmc->class_dev, "card is %s\n", 1613f95f3850SWill Newton read_only ? "read-only" : "read-write"); 1614f95f3850SWill Newton 1615f95f3850SWill Newton return read_only; 1616f95f3850SWill Newton } 1617f95f3850SWill Newton 1618935a665eSShawn Lin static void dw_mci_hw_reset(struct mmc_host *mmc) 1619935a665eSShawn Lin { 1620935a665eSShawn Lin struct dw_mci_slot *slot = mmc_priv(mmc); 1621935a665eSShawn Lin struct dw_mci *host = slot->host; 1622935a665eSShawn Lin int reset; 1623935a665eSShawn Lin 1624935a665eSShawn Lin if (host->use_dma == TRANS_MODE_IDMAC) 1625935a665eSShawn Lin dw_mci_idmac_reset(host); 1626935a665eSShawn Lin 1627935a665eSShawn Lin if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET | 1628935a665eSShawn Lin SDMMC_CTRL_FIFO_RESET)) 1629935a665eSShawn Lin return; 1630935a665eSShawn Lin 1631935a665eSShawn Lin /* 1632935a665eSShawn Lin * According to eMMC spec, card reset procedure: 1633935a665eSShawn Lin * tRstW >= 1us: RST_n pulse width 1634935a665eSShawn Lin * tRSCA >= 200us: RST_n to Command time 1635935a665eSShawn Lin * tRSTH >= 1us: RST_n high period 1636935a665eSShawn Lin */ 1637935a665eSShawn Lin reset = mci_readl(host, RST_N); 1638935a665eSShawn Lin reset &= ~(SDMMC_RST_HWACTIVE << slot->id); 1639935a665eSShawn Lin mci_writel(host, RST_N, reset); 1640935a665eSShawn Lin usleep_range(1, 2); 1641935a665eSShawn Lin reset |= SDMMC_RST_HWACTIVE << slot->id; 1642935a665eSShawn Lin mci_writel(host, RST_N, reset); 1643935a665eSShawn Lin usleep_range(200, 300); 1644935a665eSShawn Lin } 1645935a665eSShawn Lin 164661840edcSUlf Hansson static void dw_mci_prepare_sdio_irq(struct dw_mci_slot *slot, bool prepare) 1647b24c8b26SDoug Anderson { 1648b24c8b26SDoug Anderson struct dw_mci *host = slot->host; 164961840edcSUlf Hansson const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; 165061840edcSUlf Hansson u32 clk_en_a_old; 165161840edcSUlf Hansson u32 clk_en_a; 1652b24c8b26SDoug Anderson 16539623b5b9SDoug Anderson /* 16549623b5b9SDoug Anderson * Low power mode will stop the card clock when idle. According to the 16559623b5b9SDoug Anderson * description of the CLKENA register we should disable low power mode 16569623b5b9SDoug Anderson * for SDIO cards if we need SDIO interrupts to work. 16579623b5b9SDoug Anderson */ 16589623b5b9SDoug Anderson 1659b24c8b26SDoug Anderson clk_en_a_old = mci_readl(host, CLKENA); 166061840edcSUlf Hansson if (prepare) { 1661b24c8b26SDoug Anderson set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1662b24c8b26SDoug Anderson clk_en_a = clk_en_a_old & ~clken_low_pwr; 1663b24c8b26SDoug Anderson } else { 1664b24c8b26SDoug Anderson clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1665b24c8b26SDoug Anderson clk_en_a = clk_en_a_old | clken_low_pwr; 1666b24c8b26SDoug Anderson } 1667b24c8b26SDoug Anderson 1668b24c8b26SDoug Anderson if (clk_en_a != clk_en_a_old) { 1669b24c8b26SDoug Anderson mci_writel(host, CLKENA, clk_en_a); 167061840edcSUlf Hansson mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 167161840edcSUlf Hansson 0); 16729623b5b9SDoug Anderson } 1673b24c8b26SDoug Anderson } 16749623b5b9SDoug Anderson 167532dba737SUlf Hansson static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb) 16761a5c8e1fSShashidhar Hiremath { 16771a5c8e1fSShashidhar Hiremath struct dw_mci *host = slot->host; 1678f8c58c11SDoug Anderson unsigned long irqflags; 16791a5c8e1fSShashidhar Hiremath u32 int_mask; 16801a5c8e1fSShashidhar Hiremath 1681f8c58c11SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 1682f8c58c11SDoug Anderson 16831a5c8e1fSShashidhar Hiremath /* Enable/disable Slot Specific SDIO interrupt */ 16841a5c8e1fSShashidhar Hiremath int_mask = mci_readl(host, INTMASK); 1685b24c8b26SDoug Anderson if (enb) 1686b24c8b26SDoug Anderson int_mask |= SDMMC_INT_SDIO(slot->sdio_id); 1687b24c8b26SDoug Anderson else 1688b24c8b26SDoug Anderson int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id); 1689b24c8b26SDoug Anderson mci_writel(host, INTMASK, int_mask); 1690f8c58c11SDoug Anderson 1691f8c58c11SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 16921a5c8e1fSShashidhar Hiremath } 16931a5c8e1fSShashidhar Hiremath 169432dba737SUlf Hansson static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) 169532dba737SUlf Hansson { 169632dba737SUlf Hansson struct dw_mci_slot *slot = mmc_priv(mmc); 1697ca8971caSUlf Hansson struct dw_mci *host = slot->host; 169832dba737SUlf Hansson 169961840edcSUlf Hansson dw_mci_prepare_sdio_irq(slot, enb); 170032dba737SUlf Hansson __dw_mci_enable_sdio_irq(slot, enb); 1701ca8971caSUlf Hansson 1702ca8971caSUlf Hansson /* Avoid runtime suspending the device when SDIO IRQ is enabled */ 1703ca8971caSUlf Hansson if (enb) 1704ca8971caSUlf Hansson pm_runtime_get_noresume(host->dev); 1705ca8971caSUlf Hansson else 1706ca8971caSUlf Hansson pm_runtime_put_noidle(host->dev); 170732dba737SUlf Hansson } 170832dba737SUlf Hansson 170932dba737SUlf Hansson static void dw_mci_ack_sdio_irq(struct mmc_host *mmc) 171032dba737SUlf Hansson { 171132dba737SUlf Hansson struct dw_mci_slot *slot = mmc_priv(mmc); 171232dba737SUlf Hansson 171332dba737SUlf Hansson __dw_mci_enable_sdio_irq(slot, 1); 171432dba737SUlf Hansson } 171532dba737SUlf Hansson 17160976f16dSSeungwon Jeon static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode) 17170976f16dSSeungwon Jeon { 17180976f16dSSeungwon Jeon struct dw_mci_slot *slot = mmc_priv(mmc); 17190976f16dSSeungwon Jeon struct dw_mci *host = slot->host; 17200976f16dSSeungwon Jeon const struct dw_mci_drv_data *drv_data = host->drv_data; 17210e3a22c0SShawn Lin int err = -EINVAL; 17220976f16dSSeungwon Jeon 17230976f16dSSeungwon Jeon if (drv_data && drv_data->execute_tuning) 17249979dbe5SChaotian Jing err = drv_data->execute_tuning(slot, opcode); 17250976f16dSSeungwon Jeon return err; 17260976f16dSSeungwon Jeon } 17270976f16dSSeungwon Jeon 17280e3a22c0SShawn Lin static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, 17290e3a22c0SShawn Lin struct mmc_ios *ios) 173080113132SSeungwon Jeon { 173180113132SSeungwon Jeon struct dw_mci_slot *slot = mmc_priv(mmc); 173280113132SSeungwon Jeon struct dw_mci *host = slot->host; 173380113132SSeungwon Jeon const struct dw_mci_drv_data *drv_data = host->drv_data; 173480113132SSeungwon Jeon 173580113132SSeungwon Jeon if (drv_data && drv_data->prepare_hs400_tuning) 173680113132SSeungwon Jeon return drv_data->prepare_hs400_tuning(host, ios); 173780113132SSeungwon Jeon 173880113132SSeungwon Jeon return 0; 173980113132SSeungwon Jeon } 174080113132SSeungwon Jeon 17414e7392b2SShawn Lin static bool dw_mci_reset(struct dw_mci *host) 17424e7392b2SShawn Lin { 17434e7392b2SShawn Lin u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET; 17444e7392b2SShawn Lin bool ret = false; 1745bc2dcc1aSShawn Lin u32 status = 0; 17464e7392b2SShawn Lin 17474e7392b2SShawn Lin /* 17484e7392b2SShawn Lin * Resetting generates a block interrupt, hence setting 17494e7392b2SShawn Lin * the scatter-gather pointer to NULL. 17504e7392b2SShawn Lin */ 17514e7392b2SShawn Lin if (host->sg) { 17524e7392b2SShawn Lin sg_miter_stop(&host->sg_miter); 17534e7392b2SShawn Lin host->sg = NULL; 17544e7392b2SShawn Lin } 17554e7392b2SShawn Lin 17564e7392b2SShawn Lin if (host->use_dma) 17574e7392b2SShawn Lin flags |= SDMMC_CTRL_DMA_RESET; 17584e7392b2SShawn Lin 17594e7392b2SShawn Lin if (dw_mci_ctrl_reset(host, flags)) { 17604e7392b2SShawn Lin /* 1761bc2dcc1aSShawn Lin * In all cases we clear the RAWINTS 1762bc2dcc1aSShawn Lin * register to clear any interrupts. 17634e7392b2SShawn Lin */ 17644e7392b2SShawn Lin mci_writel(host, RINTSTS, 0xFFFFFFFF); 17654e7392b2SShawn Lin 1766bc2dcc1aSShawn Lin if (!host->use_dma) { 1767bc2dcc1aSShawn Lin ret = true; 1768bc2dcc1aSShawn Lin goto ciu_out; 1769bc2dcc1aSShawn Lin } 17704e7392b2SShawn Lin 1771bc2dcc1aSShawn Lin /* Wait for dma_req to be cleared */ 17724e7392b2SShawn Lin if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS, 17734e7392b2SShawn Lin status, 17744e7392b2SShawn Lin !(status & SDMMC_STATUS_DMA_REQ), 17754e7392b2SShawn Lin 1, 500 * USEC_PER_MSEC)) { 17764e7392b2SShawn Lin dev_err(host->dev, 1777bc2dcc1aSShawn Lin "%s: Timeout waiting for dma_req to be cleared\n", 17784e7392b2SShawn Lin __func__); 17794e7392b2SShawn Lin goto ciu_out; 17804e7392b2SShawn Lin } 17814e7392b2SShawn Lin 17824e7392b2SShawn Lin /* when using DMA next we reset the fifo again */ 17834e7392b2SShawn Lin if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET)) 17844e7392b2SShawn Lin goto ciu_out; 17854e7392b2SShawn Lin } else { 17864e7392b2SShawn Lin /* if the controller reset bit did clear, then set clock regs */ 17874e7392b2SShawn Lin if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) { 17884e7392b2SShawn Lin dev_err(host->dev, 17894e7392b2SShawn Lin "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n", 17904e7392b2SShawn Lin __func__); 17914e7392b2SShawn Lin goto ciu_out; 17924e7392b2SShawn Lin } 17934e7392b2SShawn Lin } 17944e7392b2SShawn Lin 17954e7392b2SShawn Lin if (host->use_dma == TRANS_MODE_IDMAC) 179647b7de2fSEvgeniy Didin /* It is also required that we reinit idmac */ 179747b7de2fSEvgeniy Didin dw_mci_idmac_init(host); 17984e7392b2SShawn Lin 17994e7392b2SShawn Lin ret = true; 18004e7392b2SShawn Lin 18014e7392b2SShawn Lin ciu_out: 18024e7392b2SShawn Lin /* After a CTRL reset we need to have CIU set clock registers */ 180342f989c0SJaehoon Chung mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0); 18044e7392b2SShawn Lin 18054e7392b2SShawn Lin return ret; 18064e7392b2SShawn Lin } 18074e7392b2SShawn Lin 1808f95f3850SWill Newton static const struct mmc_host_ops dw_mci_ops = { 1809f95f3850SWill Newton .request = dw_mci_request, 18109aa51408SSeungwon Jeon .pre_req = dw_mci_pre_req, 18119aa51408SSeungwon Jeon .post_req = dw_mci_post_req, 1812f95f3850SWill Newton .set_ios = dw_mci_set_ios, 1813f95f3850SWill Newton .get_ro = dw_mci_get_ro, 1814f95f3850SWill Newton .get_cd = dw_mci_get_cd, 1815*32f18e59SWolfram Sang .card_hw_reset = dw_mci_hw_reset, 18161a5c8e1fSShashidhar Hiremath .enable_sdio_irq = dw_mci_enable_sdio_irq, 181732dba737SUlf Hansson .ack_sdio_irq = dw_mci_ack_sdio_irq, 18180976f16dSSeungwon Jeon .execute_tuning = dw_mci_execute_tuning, 181901730558SDoug Anderson .card_busy = dw_mci_card_busy, 182001730558SDoug Anderson .start_signal_voltage_switch = dw_mci_switch_voltage, 182180113132SSeungwon Jeon .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning, 1822f95f3850SWill Newton }; 1823f95f3850SWill Newton 18242b8ac062SVincent Whitchurch #ifdef CONFIG_FAULT_INJECTION 18252b8ac062SVincent Whitchurch static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t) 18262b8ac062SVincent Whitchurch { 18272b8ac062SVincent Whitchurch struct dw_mci *host = container_of(t, struct dw_mci, fault_timer); 18282b8ac062SVincent Whitchurch unsigned long flags; 18292b8ac062SVincent Whitchurch 18302b8ac062SVincent Whitchurch spin_lock_irqsave(&host->irq_lock, flags); 18312b8ac062SVincent Whitchurch 183226391e49SVincent Whitchurch /* 183326391e49SVincent Whitchurch * Only inject an error if we haven't already got an error or data over 183426391e49SVincent Whitchurch * interrupt. 183526391e49SVincent Whitchurch */ 183626391e49SVincent Whitchurch if (!host->data_status) { 18372b8ac062SVincent Whitchurch host->data_status = SDMMC_INT_DCRC; 18382b8ac062SVincent Whitchurch set_bit(EVENT_DATA_ERROR, &host->pending_events); 18392b8ac062SVincent Whitchurch tasklet_schedule(&host->tasklet); 184026391e49SVincent Whitchurch } 18412b8ac062SVincent Whitchurch 18422b8ac062SVincent Whitchurch spin_unlock_irqrestore(&host->irq_lock, flags); 18432b8ac062SVincent Whitchurch 18442b8ac062SVincent Whitchurch return HRTIMER_NORESTART; 18452b8ac062SVincent Whitchurch } 18462b8ac062SVincent Whitchurch 18472b8ac062SVincent Whitchurch static void dw_mci_start_fault_timer(struct dw_mci *host) 18482b8ac062SVincent Whitchurch { 18492b8ac062SVincent Whitchurch struct mmc_data *data = host->data; 18502b8ac062SVincent Whitchurch 18512b8ac062SVincent Whitchurch if (!data || data->blocks <= 1) 18522b8ac062SVincent Whitchurch return; 18532b8ac062SVincent Whitchurch 18542b8ac062SVincent Whitchurch if (!should_fail(&host->fail_data_crc, 1)) 18552b8ac062SVincent Whitchurch return; 18562b8ac062SVincent Whitchurch 18572b8ac062SVincent Whitchurch /* 18582b8ac062SVincent Whitchurch * Try to inject the error at random points during the data transfer. 18592b8ac062SVincent Whitchurch */ 18602b8ac062SVincent Whitchurch hrtimer_start(&host->fault_timer, 18612b8ac062SVincent Whitchurch ms_to_ktime(prandom_u32() % 25), 18622b8ac062SVincent Whitchurch HRTIMER_MODE_REL); 18632b8ac062SVincent Whitchurch } 18642b8ac062SVincent Whitchurch 18652b8ac062SVincent Whitchurch static void dw_mci_stop_fault_timer(struct dw_mci *host) 18662b8ac062SVincent Whitchurch { 18672b8ac062SVincent Whitchurch hrtimer_cancel(&host->fault_timer); 18682b8ac062SVincent Whitchurch } 18692b8ac062SVincent Whitchurch 18702b8ac062SVincent Whitchurch static void dw_mci_init_fault(struct dw_mci *host) 18712b8ac062SVincent Whitchurch { 18722b8ac062SVincent Whitchurch host->fail_data_crc = (struct fault_attr) FAULT_ATTR_INITIALIZER; 18732b8ac062SVincent Whitchurch 18742b8ac062SVincent Whitchurch hrtimer_init(&host->fault_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 18752b8ac062SVincent Whitchurch host->fault_timer.function = dw_mci_fault_timer; 18762b8ac062SVincent Whitchurch } 18772b8ac062SVincent Whitchurch #else 18782b8ac062SVincent Whitchurch static void dw_mci_init_fault(struct dw_mci *host) 18792b8ac062SVincent Whitchurch { 18802b8ac062SVincent Whitchurch } 18812b8ac062SVincent Whitchurch 18822b8ac062SVincent Whitchurch static void dw_mci_start_fault_timer(struct dw_mci *host) 18832b8ac062SVincent Whitchurch { 18842b8ac062SVincent Whitchurch } 18852b8ac062SVincent Whitchurch 18862b8ac062SVincent Whitchurch static void dw_mci_stop_fault_timer(struct dw_mci *host) 18872b8ac062SVincent Whitchurch { 18882b8ac062SVincent Whitchurch } 18892b8ac062SVincent Whitchurch #endif 18902b8ac062SVincent Whitchurch 1891f95f3850SWill Newton static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) 1892f95f3850SWill Newton __releases(&host->lock) 1893f95f3850SWill Newton __acquires(&host->lock) 1894f95f3850SWill Newton { 1895f95f3850SWill Newton struct dw_mci_slot *slot; 189642f989c0SJaehoon Chung struct mmc_host *prev_mmc = host->slot->mmc; 1897f95f3850SWill Newton 1898f95f3850SWill Newton WARN_ON(host->cmd || host->data); 1899f95f3850SWill Newton 190042f989c0SJaehoon Chung host->slot->mrq = NULL; 1901f95f3850SWill Newton host->mrq = NULL; 1902f95f3850SWill Newton if (!list_empty(&host->queue)) { 1903f95f3850SWill Newton slot = list_entry(host->queue.next, 1904f95f3850SWill Newton struct dw_mci_slot, queue_node); 1905f95f3850SWill Newton list_del(&slot->queue_node); 19064a90920cSThomas Abraham dev_vdbg(host->dev, "list not empty: %s is next\n", 1907f95f3850SWill Newton mmc_hostname(slot->mmc)); 1908f95f3850SWill Newton host->state = STATE_SENDING_CMD; 1909f95f3850SWill Newton dw_mci_start_request(host, slot); 1910f95f3850SWill Newton } else { 19114a90920cSThomas Abraham dev_vdbg(host->dev, "list empty\n"); 191201730558SDoug Anderson 191301730558SDoug Anderson if (host->state == STATE_SENDING_CMD11) 191401730558SDoug Anderson host->state = STATE_WAITING_CMD11_DONE; 191501730558SDoug Anderson else 1916f95f3850SWill Newton host->state = STATE_IDLE; 1917f95f3850SWill Newton } 1918f95f3850SWill Newton 1919f95f3850SWill Newton spin_unlock(&host->lock); 1920f95f3850SWill Newton mmc_request_done(prev_mmc, mrq); 1921f95f3850SWill Newton spin_lock(&host->lock); 1922f95f3850SWill Newton } 1923f95f3850SWill Newton 1924e352c813SSeungwon Jeon static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) 1925f95f3850SWill Newton { 1926f95f3850SWill Newton u32 status = host->cmd_status; 1927f95f3850SWill Newton 1928f95f3850SWill Newton host->cmd_status = 0; 1929f95f3850SWill Newton 1930f95f3850SWill Newton /* Read the response from the card (up to 16 bytes) */ 1931f95f3850SWill Newton if (cmd->flags & MMC_RSP_PRESENT) { 1932f95f3850SWill Newton if (cmd->flags & MMC_RSP_136) { 1933f95f3850SWill Newton cmd->resp[3] = mci_readl(host, RESP0); 1934f95f3850SWill Newton cmd->resp[2] = mci_readl(host, RESP1); 1935f95f3850SWill Newton cmd->resp[1] = mci_readl(host, RESP2); 1936f95f3850SWill Newton cmd->resp[0] = mci_readl(host, RESP3); 1937f95f3850SWill Newton } else { 1938f95f3850SWill Newton cmd->resp[0] = mci_readl(host, RESP0); 1939f95f3850SWill Newton cmd->resp[1] = 0; 1940f95f3850SWill Newton cmd->resp[2] = 0; 1941f95f3850SWill Newton cmd->resp[3] = 0; 1942f95f3850SWill Newton } 1943f95f3850SWill Newton } 1944f95f3850SWill Newton 1945f95f3850SWill Newton if (status & SDMMC_INT_RTO) 1946f95f3850SWill Newton cmd->error = -ETIMEDOUT; 1947f95f3850SWill Newton else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)) 1948f95f3850SWill Newton cmd->error = -EILSEQ; 1949f95f3850SWill Newton else if (status & SDMMC_INT_RESP_ERR) 1950f95f3850SWill Newton cmd->error = -EIO; 1951f95f3850SWill Newton else 1952f95f3850SWill Newton cmd->error = 0; 1953f95f3850SWill Newton 1954e352c813SSeungwon Jeon return cmd->error; 1955e352c813SSeungwon Jeon } 1956e352c813SSeungwon Jeon 1957e352c813SSeungwon Jeon static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) 1958e352c813SSeungwon Jeon { 195931bff450SSeungwon Jeon u32 status = host->data_status; 1960e352c813SSeungwon Jeon 1961e352c813SSeungwon Jeon if (status & DW_MCI_DATA_ERROR_FLAGS) { 1962e352c813SSeungwon Jeon if (status & SDMMC_INT_DRTO) { 1963e352c813SSeungwon Jeon data->error = -ETIMEDOUT; 1964e352c813SSeungwon Jeon } else if (status & SDMMC_INT_DCRC) { 1965e352c813SSeungwon Jeon data->error = -EILSEQ; 1966e352c813SSeungwon Jeon } else if (status & SDMMC_INT_EBE) { 1967e352c813SSeungwon Jeon if (host->dir_status == 1968e352c813SSeungwon Jeon DW_MCI_SEND_STATUS) { 1969e352c813SSeungwon Jeon /* 1970e352c813SSeungwon Jeon * No data CRC status was returned. 1971e352c813SSeungwon Jeon * The number of bytes transferred 1972e352c813SSeungwon Jeon * will be exaggerated in PIO mode. 1973e352c813SSeungwon Jeon */ 1974e352c813SSeungwon Jeon data->bytes_xfered = 0; 1975e352c813SSeungwon Jeon data->error = -ETIMEDOUT; 1976e352c813SSeungwon Jeon } else if (host->dir_status == 1977e352c813SSeungwon Jeon DW_MCI_RECV_STATUS) { 1978e7a1dec1SShawn Lin data->error = -EILSEQ; 1979e352c813SSeungwon Jeon } 1980e352c813SSeungwon Jeon } else { 1981e352c813SSeungwon Jeon /* SDMMC_INT_SBE is included */ 1982e7a1dec1SShawn Lin data->error = -EILSEQ; 1983e352c813SSeungwon Jeon } 1984e352c813SSeungwon Jeon 1985e6cc0123SDoug Anderson dev_dbg(host->dev, "data error, status 0x%08x\n", status); 1986e352c813SSeungwon Jeon 1987e352c813SSeungwon Jeon /* 1988e352c813SSeungwon Jeon * After an error, there may be data lingering 198931bff450SSeungwon Jeon * in the FIFO 1990e352c813SSeungwon Jeon */ 19913a33a94cSSonny Rao dw_mci_reset(host); 1992e352c813SSeungwon Jeon } else { 1993e352c813SSeungwon Jeon data->bytes_xfered = data->blocks * data->blksz; 1994e352c813SSeungwon Jeon data->error = 0; 1995e352c813SSeungwon Jeon } 1996e352c813SSeungwon Jeon 1997e352c813SSeungwon Jeon return data->error; 1998f95f3850SWill Newton } 1999f95f3850SWill Newton 200057e10486SAddy Ke static void dw_mci_set_drto(struct dw_mci *host) 200157e10486SAddy Ke { 200225d5417aSMårten Lindahl const struct dw_mci_drv_data *drv_data = host->drv_data; 200357e10486SAddy Ke unsigned int drto_clks; 20049d9491a7SDouglas Anderson unsigned int drto_div; 200557e10486SAddy Ke unsigned int drto_ms; 200693c23ae3SDouglas Anderson unsigned long irqflags; 200757e10486SAddy Ke 200825d5417aSMårten Lindahl if (drv_data && drv_data->get_drto_clks) 200925d5417aSMårten Lindahl drto_clks = drv_data->get_drto_clks(host); 201025d5417aSMårten Lindahl else 201157e10486SAddy Ke drto_clks = mci_readl(host, TMOUT) >> 8; 20129d9491a7SDouglas Anderson drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; 20139d9491a7SDouglas Anderson if (drto_div == 0) 20149d9491a7SDouglas Anderson drto_div = 1; 2015c7151602SEvgeniy Didin 2016c7151602SEvgeniy Didin drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div, 20179d9491a7SDouglas Anderson host->bus_hz); 201857e10486SAddy Ke 201925d5417aSMårten Lindahl dev_dbg(host->dev, "drto_ms: %u\n", drto_ms); 202025d5417aSMårten Lindahl 202157e10486SAddy Ke /* add a bit spare time */ 202257e10486SAddy Ke drto_ms += 10; 202357e10486SAddy Ke 202493c23ae3SDouglas Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 202593c23ae3SDouglas Anderson if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) 202693c23ae3SDouglas Anderson mod_timer(&host->dto_timer, 202793c23ae3SDouglas Anderson jiffies + msecs_to_jiffies(drto_ms)); 202893c23ae3SDouglas Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 202957e10486SAddy Ke } 203057e10486SAddy Ke 20318892b705SDouglas Anderson static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host) 20328892b705SDouglas Anderson { 20338892b705SDouglas Anderson if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 20348892b705SDouglas Anderson return false; 20358892b705SDouglas Anderson 20368892b705SDouglas Anderson /* 20378892b705SDouglas Anderson * Really be certain that the timer has stopped. This is a bit of 20388892b705SDouglas Anderson * paranoia and could only really happen if we had really bad 20398892b705SDouglas Anderson * interrupt latency and the interrupt routine and timeout were 20408892b705SDouglas Anderson * running concurrently so that the del_timer() in the interrupt 20418892b705SDouglas Anderson * handler couldn't run. 20428892b705SDouglas Anderson */ 20438892b705SDouglas Anderson WARN_ON(del_timer_sync(&host->cto_timer)); 20448892b705SDouglas Anderson clear_bit(EVENT_CMD_COMPLETE, &host->pending_events); 20458892b705SDouglas Anderson 20468892b705SDouglas Anderson return true; 20478892b705SDouglas Anderson } 20488892b705SDouglas Anderson 204993c23ae3SDouglas Anderson static bool dw_mci_clear_pending_data_complete(struct dw_mci *host) 205093c23ae3SDouglas Anderson { 205193c23ae3SDouglas Anderson if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) 205293c23ae3SDouglas Anderson return false; 205393c23ae3SDouglas Anderson 205493c23ae3SDouglas Anderson /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */ 205593c23ae3SDouglas Anderson WARN_ON(del_timer_sync(&host->dto_timer)); 205693c23ae3SDouglas Anderson clear_bit(EVENT_DATA_COMPLETE, &host->pending_events); 205793c23ae3SDouglas Anderson 205893c23ae3SDouglas Anderson return true; 205993c23ae3SDouglas Anderson } 206093c23ae3SDouglas Anderson 20616078df15SEmil Renner Berthing static void dw_mci_tasklet_func(struct tasklet_struct *t) 2062f95f3850SWill Newton { 20636078df15SEmil Renner Berthing struct dw_mci *host = from_tasklet(host, t, tasklet); 2064f95f3850SWill Newton struct mmc_data *data; 2065f95f3850SWill Newton struct mmc_command *cmd; 2066e352c813SSeungwon Jeon struct mmc_request *mrq; 2067f95f3850SWill Newton enum dw_mci_state state; 2068f95f3850SWill Newton enum dw_mci_state prev_state; 2069e352c813SSeungwon Jeon unsigned int err; 2070f95f3850SWill Newton 2071f95f3850SWill Newton spin_lock(&host->lock); 2072f95f3850SWill Newton 2073f95f3850SWill Newton state = host->state; 2074f95f3850SWill Newton data = host->data; 2075e352c813SSeungwon Jeon mrq = host->mrq; 2076f95f3850SWill Newton 2077f95f3850SWill Newton do { 2078f95f3850SWill Newton prev_state = state; 2079f95f3850SWill Newton 2080f95f3850SWill Newton switch (state) { 2081f95f3850SWill Newton case STATE_IDLE: 208201730558SDoug Anderson case STATE_WAITING_CMD11_DONE: 2083f95f3850SWill Newton break; 2084f95f3850SWill Newton 208501730558SDoug Anderson case STATE_SENDING_CMD11: 2086f95f3850SWill Newton case STATE_SENDING_CMD: 20878892b705SDouglas Anderson if (!dw_mci_clear_pending_cmd_complete(host)) 2088f95f3850SWill Newton break; 2089f95f3850SWill Newton 2090f95f3850SWill Newton cmd = host->cmd; 2091f95f3850SWill Newton host->cmd = NULL; 2092f95f3850SWill Newton set_bit(EVENT_CMD_COMPLETE, &host->completed_events); 2093e352c813SSeungwon Jeon err = dw_mci_command_complete(host, cmd); 2094e352c813SSeungwon Jeon if (cmd == mrq->sbc && !err) { 209542f989c0SJaehoon Chung __dw_mci_start_request(host, host->slot, 2096e352c813SSeungwon Jeon mrq->cmd); 2097053b3ce6SSeungwon Jeon goto unlock; 2098053b3ce6SSeungwon Jeon } 2099053b3ce6SSeungwon Jeon 2100e352c813SSeungwon Jeon if (cmd->data && err) { 210146d17952SDoug Anderson /* 210246d17952SDoug Anderson * During UHS tuning sequence, sending the stop 210346d17952SDoug Anderson * command after the response CRC error would 210446d17952SDoug Anderson * throw the system into a confused state 210546d17952SDoug Anderson * causing all future tuning phases to report 210646d17952SDoug Anderson * failure. 210746d17952SDoug Anderson * 210846d17952SDoug Anderson * In such case controller will move into a data 210946d17952SDoug Anderson * transfer state after a response error or 211046d17952SDoug Anderson * response CRC error. Let's let that finish 211146d17952SDoug Anderson * before trying to send a stop, so we'll go to 211246d17952SDoug Anderson * STATE_SENDING_DATA. 211346d17952SDoug Anderson * 211446d17952SDoug Anderson * Although letting the data transfer take place 211546d17952SDoug Anderson * will waste a bit of time (we already know 211646d17952SDoug Anderson * the command was bad), it can't cause any 211746d17952SDoug Anderson * errors since it's possible it would have 211846d17952SDoug Anderson * taken place anyway if this tasklet got 211946d17952SDoug Anderson * delayed. Allowing the transfer to take place 212046d17952SDoug Anderson * avoids races and keeps things simple. 212146d17952SDoug Anderson */ 212243592c87SChristian Löhle if (err != -ETIMEDOUT && 212343592c87SChristian Löhle host->dir_status == DW_MCI_RECV_STATUS) { 212446d17952SDoug Anderson state = STATE_SENDING_DATA; 212546d17952SDoug Anderson continue; 212646d17952SDoug Anderson } 212746d17952SDoug Anderson 212890c2143aSSeungwon Jeon send_stop_abort(host, data); 212925f8203bSVincent Whitchurch dw_mci_stop_dma(host); 213071abb133SSeungwon Jeon state = STATE_SENDING_STOP; 213171abb133SSeungwon Jeon break; 213271abb133SSeungwon Jeon } 213371abb133SSeungwon Jeon 2134e352c813SSeungwon Jeon if (!cmd->data || err) { 2135e352c813SSeungwon Jeon dw_mci_request_end(host, mrq); 2136f95f3850SWill Newton goto unlock; 2137f95f3850SWill Newton } 2138f95f3850SWill Newton 2139f95f3850SWill Newton prev_state = state = STATE_SENDING_DATA; 2140df561f66SGustavo A. R. Silva fallthrough; 2141f95f3850SWill Newton 2142f95f3850SWill Newton case STATE_SENDING_DATA: 21432aa35465SDoug Anderson /* 21442aa35465SDoug Anderson * We could get a data error and never a transfer 21452aa35465SDoug Anderson * complete so we'd better check for it here. 21462aa35465SDoug Anderson * 21472aa35465SDoug Anderson * Note that we don't really care if we also got a 21482aa35465SDoug Anderson * transfer complete; stopping the DMA and sending an 21492aa35465SDoug Anderson * abort won't hurt. 21502aa35465SDoug Anderson */ 2151f95f3850SWill Newton if (test_and_clear_bit(EVENT_DATA_ERROR, 2152f95f3850SWill Newton &host->pending_events)) { 2153e13c3c08SJaehoon Chung if (!(host->data_status & (SDMMC_INT_DRTO | 2154bdb9a90bSaddy ke SDMMC_INT_EBE))) 215590c2143aSSeungwon Jeon send_stop_abort(host, data); 215625f8203bSVincent Whitchurch dw_mci_stop_dma(host); 2157f95f3850SWill Newton state = STATE_DATA_ERROR; 2158f95f3850SWill Newton break; 2159f95f3850SWill Newton } 2160f95f3850SWill Newton 2161f95f3850SWill Newton if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 216257e10486SAddy Ke &host->pending_events)) { 216357e10486SAddy Ke /* 216457e10486SAddy Ke * If all data-related interrupts don't come 216557e10486SAddy Ke * within the given time in reading data state. 216657e10486SAddy Ke */ 216716a34574SJaehoon Chung if (host->dir_status == DW_MCI_RECV_STATUS) 216857e10486SAddy Ke dw_mci_set_drto(host); 2169f95f3850SWill Newton break; 217057e10486SAddy Ke } 2171f95f3850SWill Newton 2172f95f3850SWill Newton set_bit(EVENT_XFER_COMPLETE, &host->completed_events); 21732aa35465SDoug Anderson 21742aa35465SDoug Anderson /* 21752aa35465SDoug Anderson * Handle an EVENT_DATA_ERROR that might have shown up 21762aa35465SDoug Anderson * before the transfer completed. This might not have 21772aa35465SDoug Anderson * been caught by the check above because the interrupt 21782aa35465SDoug Anderson * could have gone off between the previous check and 21792aa35465SDoug Anderson * the check for transfer complete. 21802aa35465SDoug Anderson * 21812aa35465SDoug Anderson * Technically this ought not be needed assuming we 21822aa35465SDoug Anderson * get a DATA_COMPLETE eventually (we'll notice the 21832aa35465SDoug Anderson * error and end the request), but it shouldn't hurt. 21842aa35465SDoug Anderson * 21852aa35465SDoug Anderson * This has the advantage of sending the stop command. 21862aa35465SDoug Anderson */ 21872aa35465SDoug Anderson if (test_and_clear_bit(EVENT_DATA_ERROR, 21882aa35465SDoug Anderson &host->pending_events)) { 2189e13c3c08SJaehoon Chung if (!(host->data_status & (SDMMC_INT_DRTO | 2190bdb9a90bSaddy ke SDMMC_INT_EBE))) 21912aa35465SDoug Anderson send_stop_abort(host, data); 219225f8203bSVincent Whitchurch dw_mci_stop_dma(host); 21932aa35465SDoug Anderson state = STATE_DATA_ERROR; 21942aa35465SDoug Anderson break; 21952aa35465SDoug Anderson } 2196f95f3850SWill Newton prev_state = state = STATE_DATA_BUSY; 21972aa35465SDoug Anderson 2198df561f66SGustavo A. R. Silva fallthrough; 2199f95f3850SWill Newton 2200f95f3850SWill Newton case STATE_DATA_BUSY: 220193c23ae3SDouglas Anderson if (!dw_mci_clear_pending_data_complete(host)) { 220257e10486SAddy Ke /* 220357e10486SAddy Ke * If data error interrupt comes but data over 220457e10486SAddy Ke * interrupt doesn't come within the given time. 220557e10486SAddy Ke * in reading data state. 220657e10486SAddy Ke */ 220716a34574SJaehoon Chung if (host->dir_status == DW_MCI_RECV_STATUS) 220857e10486SAddy Ke dw_mci_set_drto(host); 2209f95f3850SWill Newton break; 221057e10486SAddy Ke } 2211f95f3850SWill Newton 22122b8ac062SVincent Whitchurch dw_mci_stop_fault_timer(host); 2213f95f3850SWill Newton host->data = NULL; 2214f95f3850SWill Newton set_bit(EVENT_DATA_COMPLETE, &host->completed_events); 2215e352c813SSeungwon Jeon err = dw_mci_data_complete(host, data); 2216f95f3850SWill Newton 2217e352c813SSeungwon Jeon if (!err) { 2218e352c813SSeungwon Jeon if (!data->stop || mrq->sbc) { 221917c8bc85SSachin Kamat if (mrq->sbc && data->stop) 2220053b3ce6SSeungwon Jeon data->stop->error = 0; 2221e352c813SSeungwon Jeon dw_mci_request_end(host, mrq); 2222053b3ce6SSeungwon Jeon goto unlock; 2223053b3ce6SSeungwon Jeon } 2224053b3ce6SSeungwon Jeon 222590c2143aSSeungwon Jeon /* stop command for open-ended transfer*/ 2226e352c813SSeungwon Jeon if (data->stop) 222790c2143aSSeungwon Jeon send_stop_abort(host, data); 22282aa35465SDoug Anderson } else { 22292aa35465SDoug Anderson /* 22302aa35465SDoug Anderson * If we don't have a command complete now we'll 22312aa35465SDoug Anderson * never get one since we just reset everything; 22322aa35465SDoug Anderson * better end the request. 22332aa35465SDoug Anderson * 22342aa35465SDoug Anderson * If we do have a command complete we'll fall 22352aa35465SDoug Anderson * through to the SENDING_STOP command and 22362aa35465SDoug Anderson * everything will be peachy keen. 22372aa35465SDoug Anderson */ 22382aa35465SDoug Anderson if (!test_bit(EVENT_CMD_COMPLETE, 22392aa35465SDoug Anderson &host->pending_events)) { 22402aa35465SDoug Anderson host->cmd = NULL; 22412aa35465SDoug Anderson dw_mci_request_end(host, mrq); 22422aa35465SDoug Anderson goto unlock; 22432aa35465SDoug Anderson } 224490c2143aSSeungwon Jeon } 2245e352c813SSeungwon Jeon 2246e352c813SSeungwon Jeon /* 2247e352c813SSeungwon Jeon * If err has non-zero, 2248e352c813SSeungwon Jeon * stop-abort command has been already issued. 2249e352c813SSeungwon Jeon */ 2250e352c813SSeungwon Jeon prev_state = state = STATE_SENDING_STOP; 2251e352c813SSeungwon Jeon 2252df561f66SGustavo A. R. Silva fallthrough; 2253f95f3850SWill Newton 2254f95f3850SWill Newton case STATE_SENDING_STOP: 22558892b705SDouglas Anderson if (!dw_mci_clear_pending_cmd_complete(host)) 2256f95f3850SWill Newton break; 2257f95f3850SWill Newton 225871abb133SSeungwon Jeon /* CMD error in data command */ 225931bff450SSeungwon Jeon if (mrq->cmd->error && mrq->data) 22603a33a94cSSonny Rao dw_mci_reset(host); 226171abb133SSeungwon Jeon 22622b8ac062SVincent Whitchurch dw_mci_stop_fault_timer(host); 2263f95f3850SWill Newton host->cmd = NULL; 226471abb133SSeungwon Jeon host->data = NULL; 226590c2143aSSeungwon Jeon 2266e13c3c08SJaehoon Chung if (!mrq->sbc && mrq->stop) 2267e352c813SSeungwon Jeon dw_mci_command_complete(host, mrq->stop); 226890c2143aSSeungwon Jeon else 226990c2143aSSeungwon Jeon host->cmd_status = 0; 227090c2143aSSeungwon Jeon 2271e352c813SSeungwon Jeon dw_mci_request_end(host, mrq); 2272f95f3850SWill Newton goto unlock; 2273f95f3850SWill Newton 2274f95f3850SWill Newton case STATE_DATA_ERROR: 2275f95f3850SWill Newton if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 2276f95f3850SWill Newton &host->pending_events)) 2277f95f3850SWill Newton break; 2278f95f3850SWill Newton 2279f95f3850SWill Newton state = STATE_DATA_BUSY; 2280f95f3850SWill Newton break; 2281f95f3850SWill Newton } 2282f95f3850SWill Newton } while (state != prev_state); 2283f95f3850SWill Newton 2284f95f3850SWill Newton host->state = state; 2285f95f3850SWill Newton unlock: 2286f95f3850SWill Newton spin_unlock(&host->lock); 2287f95f3850SWill Newton 2288f95f3850SWill Newton } 2289f95f3850SWill Newton 229034b664a2SJames Hogan /* push final bytes to part_buf, only use during push */ 229134b664a2SJames Hogan static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) 229234b664a2SJames Hogan { 229334b664a2SJames Hogan memcpy((void *)&host->part_buf, buf, cnt); 229434b664a2SJames Hogan host->part_buf_count = cnt; 229534b664a2SJames Hogan } 229634b664a2SJames Hogan 229734b664a2SJames Hogan /* append bytes to part_buf, only use during push */ 229834b664a2SJames Hogan static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) 229934b664a2SJames Hogan { 230034b664a2SJames Hogan cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); 230134b664a2SJames Hogan memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); 230234b664a2SJames Hogan host->part_buf_count += cnt; 230334b664a2SJames Hogan return cnt; 230434b664a2SJames Hogan } 230534b664a2SJames Hogan 230634b664a2SJames Hogan /* pull first bytes from part_buf, only use during pull */ 230734b664a2SJames Hogan static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) 230834b664a2SJames Hogan { 23090e3a22c0SShawn Lin cnt = min_t(int, cnt, host->part_buf_count); 231034b664a2SJames Hogan if (cnt) { 231134b664a2SJames Hogan memcpy(buf, (void *)&host->part_buf + host->part_buf_start, 231234b664a2SJames Hogan cnt); 231334b664a2SJames Hogan host->part_buf_count -= cnt; 231434b664a2SJames Hogan host->part_buf_start += cnt; 231534b664a2SJames Hogan } 231634b664a2SJames Hogan return cnt; 231734b664a2SJames Hogan } 231834b664a2SJames Hogan 231934b664a2SJames Hogan /* pull final bytes from the part_buf, assuming it's just been filled */ 232034b664a2SJames Hogan static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) 232134b664a2SJames Hogan { 232234b664a2SJames Hogan memcpy(buf, &host->part_buf, cnt); 232334b664a2SJames Hogan host->part_buf_start = cnt; 232434b664a2SJames Hogan host->part_buf_count = (1 << host->data_shift) - cnt; 232534b664a2SJames Hogan } 232634b664a2SJames Hogan 2327f95f3850SWill Newton static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) 2328f95f3850SWill Newton { 2329cfbeb59cSMarkos Chandras struct mmc_data *data = host->data; 2330cfbeb59cSMarkos Chandras int init_cnt = cnt; 2331cfbeb59cSMarkos Chandras 233234b664a2SJames Hogan /* try and push anything in the part_buf */ 233334b664a2SJames Hogan if (unlikely(host->part_buf_count)) { 233434b664a2SJames Hogan int len = dw_mci_push_part_bytes(host, buf, cnt); 23350e3a22c0SShawn Lin 233634b664a2SJames Hogan buf += len; 233734b664a2SJames Hogan cnt -= len; 2338cfbeb59cSMarkos Chandras if (host->part_buf_count == 2) { 233976184ac1SBen Dooks mci_fifo_writew(host->fifo_reg, host->part_buf16); 234034b664a2SJames Hogan host->part_buf_count = 0; 234134b664a2SJames Hogan } 234234b664a2SJames Hogan } 234334b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 234434b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x1)) { 234534b664a2SJames Hogan while (cnt >= 2) { 234634b664a2SJames Hogan u16 aligned_buf[64]; 234734b664a2SJames Hogan int len = min(cnt & -2, (int)sizeof(aligned_buf)); 234834b664a2SJames Hogan int items = len >> 1; 234934b664a2SJames Hogan int i; 235034b664a2SJames Hogan /* memcpy from input buffer into aligned buffer */ 235134b664a2SJames Hogan memcpy(aligned_buf, buf, len); 235234b664a2SJames Hogan buf += len; 235334b664a2SJames Hogan cnt -= len; 235434b664a2SJames Hogan /* push data from aligned buffer into fifo */ 235534b664a2SJames Hogan for (i = 0; i < items; ++i) 235676184ac1SBen Dooks mci_fifo_writew(host->fifo_reg, aligned_buf[i]); 235734b664a2SJames Hogan } 235834b664a2SJames Hogan } else 235934b664a2SJames Hogan #endif 236034b664a2SJames Hogan { 236134b664a2SJames Hogan u16 *pdata = buf; 23620e3a22c0SShawn Lin 236334b664a2SJames Hogan for (; cnt >= 2; cnt -= 2) 236476184ac1SBen Dooks mci_fifo_writew(host->fifo_reg, *pdata++); 236534b664a2SJames Hogan buf = pdata; 236634b664a2SJames Hogan } 236734b664a2SJames Hogan /* put anything remaining in the part_buf */ 236834b664a2SJames Hogan if (cnt) { 236934b664a2SJames Hogan dw_mci_set_part_bytes(host, buf, cnt); 2370cfbeb59cSMarkos Chandras /* Push data if we have reached the expected data length */ 2371cfbeb59cSMarkos Chandras if ((data->bytes_xfered + init_cnt) == 2372cfbeb59cSMarkos Chandras (data->blksz * data->blocks)) 237376184ac1SBen Dooks mci_fifo_writew(host->fifo_reg, host->part_buf16); 2374f95f3850SWill Newton } 2375f95f3850SWill Newton } 2376f95f3850SWill Newton 2377f95f3850SWill Newton static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) 2378f95f3850SWill Newton { 237934b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 238034b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x1)) { 238134b664a2SJames Hogan while (cnt >= 2) { 238234b664a2SJames Hogan /* pull data from fifo into aligned buffer */ 238334b664a2SJames Hogan u16 aligned_buf[64]; 238434b664a2SJames Hogan int len = min(cnt & -2, (int)sizeof(aligned_buf)); 238534b664a2SJames Hogan int items = len >> 1; 238634b664a2SJames Hogan int i; 23870e3a22c0SShawn Lin 238834b664a2SJames Hogan for (i = 0; i < items; ++i) 238976184ac1SBen Dooks aligned_buf[i] = mci_fifo_readw(host->fifo_reg); 239034b664a2SJames Hogan /* memcpy from aligned buffer into output buffer */ 239134b664a2SJames Hogan memcpy(buf, aligned_buf, len); 239234b664a2SJames Hogan buf += len; 239334b664a2SJames Hogan cnt -= len; 239434b664a2SJames Hogan } 239534b664a2SJames Hogan } else 239634b664a2SJames Hogan #endif 239734b664a2SJames Hogan { 239834b664a2SJames Hogan u16 *pdata = buf; 23990e3a22c0SShawn Lin 240034b664a2SJames Hogan for (; cnt >= 2; cnt -= 2) 240176184ac1SBen Dooks *pdata++ = mci_fifo_readw(host->fifo_reg); 240234b664a2SJames Hogan buf = pdata; 240334b664a2SJames Hogan } 240434b664a2SJames Hogan if (cnt) { 240576184ac1SBen Dooks host->part_buf16 = mci_fifo_readw(host->fifo_reg); 240634b664a2SJames Hogan dw_mci_pull_final_bytes(host, buf, cnt); 2407f95f3850SWill Newton } 2408f95f3850SWill Newton } 2409f95f3850SWill Newton 2410f95f3850SWill Newton static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) 2411f95f3850SWill Newton { 2412cfbeb59cSMarkos Chandras struct mmc_data *data = host->data; 2413cfbeb59cSMarkos Chandras int init_cnt = cnt; 2414cfbeb59cSMarkos Chandras 241534b664a2SJames Hogan /* try and push anything in the part_buf */ 241634b664a2SJames Hogan if (unlikely(host->part_buf_count)) { 241734b664a2SJames Hogan int len = dw_mci_push_part_bytes(host, buf, cnt); 24180e3a22c0SShawn Lin 241934b664a2SJames Hogan buf += len; 242034b664a2SJames Hogan cnt -= len; 2421cfbeb59cSMarkos Chandras if (host->part_buf_count == 4) { 242276184ac1SBen Dooks mci_fifo_writel(host->fifo_reg, host->part_buf32); 242334b664a2SJames Hogan host->part_buf_count = 0; 242434b664a2SJames Hogan } 242534b664a2SJames Hogan } 242634b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 242734b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x3)) { 242834b664a2SJames Hogan while (cnt >= 4) { 242934b664a2SJames Hogan u32 aligned_buf[32]; 243034b664a2SJames Hogan int len = min(cnt & -4, (int)sizeof(aligned_buf)); 243134b664a2SJames Hogan int items = len >> 2; 243234b664a2SJames Hogan int i; 243334b664a2SJames Hogan /* memcpy from input buffer into aligned buffer */ 243434b664a2SJames Hogan memcpy(aligned_buf, buf, len); 243534b664a2SJames Hogan buf += len; 243634b664a2SJames Hogan cnt -= len; 243734b664a2SJames Hogan /* push data from aligned buffer into fifo */ 243834b664a2SJames Hogan for (i = 0; i < items; ++i) 243976184ac1SBen Dooks mci_fifo_writel(host->fifo_reg, aligned_buf[i]); 244034b664a2SJames Hogan } 244134b664a2SJames Hogan } else 244234b664a2SJames Hogan #endif 244334b664a2SJames Hogan { 244434b664a2SJames Hogan u32 *pdata = buf; 24450e3a22c0SShawn Lin 244634b664a2SJames Hogan for (; cnt >= 4; cnt -= 4) 244776184ac1SBen Dooks mci_fifo_writel(host->fifo_reg, *pdata++); 244834b664a2SJames Hogan buf = pdata; 244934b664a2SJames Hogan } 245034b664a2SJames Hogan /* put anything remaining in the part_buf */ 245134b664a2SJames Hogan if (cnt) { 245234b664a2SJames Hogan dw_mci_set_part_bytes(host, buf, cnt); 2453cfbeb59cSMarkos Chandras /* Push data if we have reached the expected data length */ 2454cfbeb59cSMarkos Chandras if ((data->bytes_xfered + init_cnt) == 2455cfbeb59cSMarkos Chandras (data->blksz * data->blocks)) 245676184ac1SBen Dooks mci_fifo_writel(host->fifo_reg, host->part_buf32); 2457f95f3850SWill Newton } 2458f95f3850SWill Newton } 2459f95f3850SWill Newton 2460f95f3850SWill Newton static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) 2461f95f3850SWill Newton { 246234b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 246334b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x3)) { 246434b664a2SJames Hogan while (cnt >= 4) { 246534b664a2SJames Hogan /* pull data from fifo into aligned buffer */ 246634b664a2SJames Hogan u32 aligned_buf[32]; 246734b664a2SJames Hogan int len = min(cnt & -4, (int)sizeof(aligned_buf)); 246834b664a2SJames Hogan int items = len >> 2; 246934b664a2SJames Hogan int i; 24700e3a22c0SShawn Lin 247134b664a2SJames Hogan for (i = 0; i < items; ++i) 247276184ac1SBen Dooks aligned_buf[i] = mci_fifo_readl(host->fifo_reg); 247334b664a2SJames Hogan /* memcpy from aligned buffer into output buffer */ 247434b664a2SJames Hogan memcpy(buf, aligned_buf, len); 247534b664a2SJames Hogan buf += len; 247634b664a2SJames Hogan cnt -= len; 247734b664a2SJames Hogan } 247834b664a2SJames Hogan } else 247934b664a2SJames Hogan #endif 248034b664a2SJames Hogan { 248134b664a2SJames Hogan u32 *pdata = buf; 24820e3a22c0SShawn Lin 248334b664a2SJames Hogan for (; cnt >= 4; cnt -= 4) 248476184ac1SBen Dooks *pdata++ = mci_fifo_readl(host->fifo_reg); 248534b664a2SJames Hogan buf = pdata; 248634b664a2SJames Hogan } 248734b664a2SJames Hogan if (cnt) { 248876184ac1SBen Dooks host->part_buf32 = mci_fifo_readl(host->fifo_reg); 248934b664a2SJames Hogan dw_mci_pull_final_bytes(host, buf, cnt); 2490f95f3850SWill Newton } 2491f95f3850SWill Newton } 2492f95f3850SWill Newton 2493f95f3850SWill Newton static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) 2494f95f3850SWill Newton { 2495cfbeb59cSMarkos Chandras struct mmc_data *data = host->data; 2496cfbeb59cSMarkos Chandras int init_cnt = cnt; 2497cfbeb59cSMarkos Chandras 249834b664a2SJames Hogan /* try and push anything in the part_buf */ 249934b664a2SJames Hogan if (unlikely(host->part_buf_count)) { 250034b664a2SJames Hogan int len = dw_mci_push_part_bytes(host, buf, cnt); 25010e3a22c0SShawn Lin 250234b664a2SJames Hogan buf += len; 250334b664a2SJames Hogan cnt -= len; 2504c09fbd74SSeungwon Jeon 2505cfbeb59cSMarkos Chandras if (host->part_buf_count == 8) { 250676184ac1SBen Dooks mci_fifo_writeq(host->fifo_reg, host->part_buf); 250734b664a2SJames Hogan host->part_buf_count = 0; 250834b664a2SJames Hogan } 250934b664a2SJames Hogan } 251034b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 251134b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x7)) { 251234b664a2SJames Hogan while (cnt >= 8) { 251334b664a2SJames Hogan u64 aligned_buf[16]; 251434b664a2SJames Hogan int len = min(cnt & -8, (int)sizeof(aligned_buf)); 251534b664a2SJames Hogan int items = len >> 3; 251634b664a2SJames Hogan int i; 251734b664a2SJames Hogan /* memcpy from input buffer into aligned buffer */ 251834b664a2SJames Hogan memcpy(aligned_buf, buf, len); 251934b664a2SJames Hogan buf += len; 252034b664a2SJames Hogan cnt -= len; 252134b664a2SJames Hogan /* push data from aligned buffer into fifo */ 252234b664a2SJames Hogan for (i = 0; i < items; ++i) 252376184ac1SBen Dooks mci_fifo_writeq(host->fifo_reg, aligned_buf[i]); 252434b664a2SJames Hogan } 252534b664a2SJames Hogan } else 252634b664a2SJames Hogan #endif 252734b664a2SJames Hogan { 252834b664a2SJames Hogan u64 *pdata = buf; 25290e3a22c0SShawn Lin 253034b664a2SJames Hogan for (; cnt >= 8; cnt -= 8) 253176184ac1SBen Dooks mci_fifo_writeq(host->fifo_reg, *pdata++); 253234b664a2SJames Hogan buf = pdata; 253334b664a2SJames Hogan } 253434b664a2SJames Hogan /* put anything remaining in the part_buf */ 253534b664a2SJames Hogan if (cnt) { 253634b664a2SJames Hogan dw_mci_set_part_bytes(host, buf, cnt); 2537cfbeb59cSMarkos Chandras /* Push data if we have reached the expected data length */ 2538cfbeb59cSMarkos Chandras if ((data->bytes_xfered + init_cnt) == 2539cfbeb59cSMarkos Chandras (data->blksz * data->blocks)) 254076184ac1SBen Dooks mci_fifo_writeq(host->fifo_reg, host->part_buf); 2541f95f3850SWill Newton } 2542f95f3850SWill Newton } 2543f95f3850SWill Newton 2544f95f3850SWill Newton static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) 2545f95f3850SWill Newton { 254634b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 254734b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x7)) { 254834b664a2SJames Hogan while (cnt >= 8) { 254934b664a2SJames Hogan /* pull data from fifo into aligned buffer */ 255034b664a2SJames Hogan u64 aligned_buf[16]; 255134b664a2SJames Hogan int len = min(cnt & -8, (int)sizeof(aligned_buf)); 255234b664a2SJames Hogan int items = len >> 3; 255334b664a2SJames Hogan int i; 25540e3a22c0SShawn Lin 255534b664a2SJames Hogan for (i = 0; i < items; ++i) 255676184ac1SBen Dooks aligned_buf[i] = mci_fifo_readq(host->fifo_reg); 255776184ac1SBen Dooks 255834b664a2SJames Hogan /* memcpy from aligned buffer into output buffer */ 255934b664a2SJames Hogan memcpy(buf, aligned_buf, len); 256034b664a2SJames Hogan buf += len; 256134b664a2SJames Hogan cnt -= len; 2562f95f3850SWill Newton } 256334b664a2SJames Hogan } else 256434b664a2SJames Hogan #endif 256534b664a2SJames Hogan { 256634b664a2SJames Hogan u64 *pdata = buf; 25670e3a22c0SShawn Lin 256834b664a2SJames Hogan for (; cnt >= 8; cnt -= 8) 256976184ac1SBen Dooks *pdata++ = mci_fifo_readq(host->fifo_reg); 257034b664a2SJames Hogan buf = pdata; 257134b664a2SJames Hogan } 257234b664a2SJames Hogan if (cnt) { 257376184ac1SBen Dooks host->part_buf = mci_fifo_readq(host->fifo_reg); 257434b664a2SJames Hogan dw_mci_pull_final_bytes(host, buf, cnt); 257534b664a2SJames Hogan } 257634b664a2SJames Hogan } 257734b664a2SJames Hogan 257834b664a2SJames Hogan static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) 257934b664a2SJames Hogan { 258034b664a2SJames Hogan int len; 258134b664a2SJames Hogan 258234b664a2SJames Hogan /* get remaining partial bytes */ 258334b664a2SJames Hogan len = dw_mci_pull_part_bytes(host, buf, cnt); 258434b664a2SJames Hogan if (unlikely(len == cnt)) 258534b664a2SJames Hogan return; 258634b664a2SJames Hogan buf += len; 258734b664a2SJames Hogan cnt -= len; 258834b664a2SJames Hogan 258934b664a2SJames Hogan /* get the rest of the data */ 259034b664a2SJames Hogan host->pull_data(host, buf, cnt); 2591f95f3850SWill Newton } 2592f95f3850SWill Newton 259387a74d39SKyoungil Kim static void dw_mci_read_data_pio(struct dw_mci *host, bool dto) 2594f95f3850SWill Newton { 2595f9c2a0dcSSeungwon Jeon struct sg_mapping_iter *sg_miter = &host->sg_miter; 2596f9c2a0dcSSeungwon Jeon void *buf; 2597f9c2a0dcSSeungwon Jeon unsigned int offset; 2598f95f3850SWill Newton struct mmc_data *data = host->data; 2599f95f3850SWill Newton int shift = host->data_shift; 2600f95f3850SWill Newton u32 status; 26013e4b0d8bSMarkos Chandras unsigned int len; 2602f9c2a0dcSSeungwon Jeon unsigned int remain, fcnt; 2603f95f3850SWill Newton 2604f95f3850SWill Newton do { 2605f9c2a0dcSSeungwon Jeon if (!sg_miter_next(sg_miter)) 2606f9c2a0dcSSeungwon Jeon goto done; 2607f95f3850SWill Newton 26084225fc85SImre Deak host->sg = sg_miter->piter.sg; 2609f9c2a0dcSSeungwon Jeon buf = sg_miter->addr; 2610f9c2a0dcSSeungwon Jeon remain = sg_miter->length; 2611f9c2a0dcSSeungwon Jeon offset = 0; 2612f9c2a0dcSSeungwon Jeon 2613f9c2a0dcSSeungwon Jeon do { 2614f9c2a0dcSSeungwon Jeon fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) 2615f9c2a0dcSSeungwon Jeon << shift) + host->part_buf_count; 2616f9c2a0dcSSeungwon Jeon len = min(remain, fcnt); 2617f9c2a0dcSSeungwon Jeon if (!len) 2618f9c2a0dcSSeungwon Jeon break; 2619f9c2a0dcSSeungwon Jeon dw_mci_pull_data(host, (void *)(buf + offset), len); 26203e4b0d8bSMarkos Chandras data->bytes_xfered += len; 2621f95f3850SWill Newton offset += len; 2622f9c2a0dcSSeungwon Jeon remain -= len; 2623f9c2a0dcSSeungwon Jeon } while (remain); 2624f95f3850SWill Newton 2625e74f3a9cSSeungwon Jeon sg_miter->consumed = offset; 2626f95f3850SWill Newton status = mci_readl(host, MINTSTS); 2627f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 262887a74d39SKyoungil Kim /* if the RXDR is ready read again */ 262987a74d39SKyoungil Kim } while ((status & SDMMC_INT_RXDR) || 263087a74d39SKyoungil Kim (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS)))); 2631f9c2a0dcSSeungwon Jeon 2632f9c2a0dcSSeungwon Jeon if (!remain) { 2633f9c2a0dcSSeungwon Jeon if (!sg_miter_next(sg_miter)) 2634f9c2a0dcSSeungwon Jeon goto done; 2635f9c2a0dcSSeungwon Jeon sg_miter->consumed = 0; 2636f9c2a0dcSSeungwon Jeon } 2637f9c2a0dcSSeungwon Jeon sg_miter_stop(sg_miter); 2638f95f3850SWill Newton return; 2639f95f3850SWill Newton 2640f95f3850SWill Newton done: 2641f9c2a0dcSSeungwon Jeon sg_miter_stop(sg_miter); 2642f9c2a0dcSSeungwon Jeon host->sg = NULL; 26430e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2644f95f3850SWill Newton set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2645f95f3850SWill Newton } 2646f95f3850SWill Newton 2647f95f3850SWill Newton static void dw_mci_write_data_pio(struct dw_mci *host) 2648f95f3850SWill Newton { 2649f9c2a0dcSSeungwon Jeon struct sg_mapping_iter *sg_miter = &host->sg_miter; 2650f9c2a0dcSSeungwon Jeon void *buf; 2651f9c2a0dcSSeungwon Jeon unsigned int offset; 2652f95f3850SWill Newton struct mmc_data *data = host->data; 2653f95f3850SWill Newton int shift = host->data_shift; 2654f95f3850SWill Newton u32 status; 26553e4b0d8bSMarkos Chandras unsigned int len; 2656f9c2a0dcSSeungwon Jeon unsigned int fifo_depth = host->fifo_depth; 2657f9c2a0dcSSeungwon Jeon unsigned int remain, fcnt; 2658f95f3850SWill Newton 2659f95f3850SWill Newton do { 2660f9c2a0dcSSeungwon Jeon if (!sg_miter_next(sg_miter)) 2661f9c2a0dcSSeungwon Jeon goto done; 2662f95f3850SWill Newton 26634225fc85SImre Deak host->sg = sg_miter->piter.sg; 2664f9c2a0dcSSeungwon Jeon buf = sg_miter->addr; 2665f9c2a0dcSSeungwon Jeon remain = sg_miter->length; 2666f9c2a0dcSSeungwon Jeon offset = 0; 2667f9c2a0dcSSeungwon Jeon 2668f9c2a0dcSSeungwon Jeon do { 2669f9c2a0dcSSeungwon Jeon fcnt = ((fifo_depth - 2670f9c2a0dcSSeungwon Jeon SDMMC_GET_FCNT(mci_readl(host, STATUS))) 2671f9c2a0dcSSeungwon Jeon << shift) - host->part_buf_count; 2672f9c2a0dcSSeungwon Jeon len = min(remain, fcnt); 2673f9c2a0dcSSeungwon Jeon if (!len) 2674f9c2a0dcSSeungwon Jeon break; 2675f9c2a0dcSSeungwon Jeon host->push_data(host, (void *)(buf + offset), len); 26763e4b0d8bSMarkos Chandras data->bytes_xfered += len; 2677f95f3850SWill Newton offset += len; 2678f9c2a0dcSSeungwon Jeon remain -= len; 2679f9c2a0dcSSeungwon Jeon } while (remain); 2680f95f3850SWill Newton 2681e74f3a9cSSeungwon Jeon sg_miter->consumed = offset; 2682f95f3850SWill Newton status = mci_readl(host, MINTSTS); 2683f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2684f95f3850SWill Newton } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 2685f9c2a0dcSSeungwon Jeon 2686f9c2a0dcSSeungwon Jeon if (!remain) { 2687f9c2a0dcSSeungwon Jeon if (!sg_miter_next(sg_miter)) 2688f9c2a0dcSSeungwon Jeon goto done; 2689f9c2a0dcSSeungwon Jeon sg_miter->consumed = 0; 2690f9c2a0dcSSeungwon Jeon } 2691f9c2a0dcSSeungwon Jeon sg_miter_stop(sg_miter); 2692f95f3850SWill Newton return; 2693f95f3850SWill Newton 2694f95f3850SWill Newton done: 2695f9c2a0dcSSeungwon Jeon sg_miter_stop(sg_miter); 2696f9c2a0dcSSeungwon Jeon host->sg = NULL; 26970e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2698f95f3850SWill Newton set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2699f95f3850SWill Newton } 2700f95f3850SWill Newton 2701f95f3850SWill Newton static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) 2702f95f3850SWill Newton { 27030363b12dSDouglas Anderson del_timer(&host->cto_timer); 27040363b12dSDouglas Anderson 2705f95f3850SWill Newton if (!host->cmd_status) 2706f95f3850SWill Newton host->cmd_status = status; 2707f95f3850SWill Newton 27080e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2709f95f3850SWill Newton 2710f95f3850SWill Newton set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2711f95f3850SWill Newton tasklet_schedule(&host->tasklet); 27122b8ac062SVincent Whitchurch 27132b8ac062SVincent Whitchurch dw_mci_start_fault_timer(host); 2714f95f3850SWill Newton } 2715f95f3850SWill Newton 27166130e7a9SDoug Anderson static void dw_mci_handle_cd(struct dw_mci *host) 27176130e7a9SDoug Anderson { 2718b23475faSJaehoon Chung struct dw_mci_slot *slot = host->slot; 27196130e7a9SDoug Anderson 27206130e7a9SDoug Anderson mmc_detect_change(slot->mmc, 27216130e7a9SDoug Anderson msecs_to_jiffies(host->pdata->detect_delay_ms)); 27226130e7a9SDoug Anderson } 27236130e7a9SDoug Anderson 2724f95f3850SWill Newton static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) 2725f95f3850SWill Newton { 2726f95f3850SWill Newton struct dw_mci *host = dev_id; 2727182c9081SSeungwon Jeon u32 pending; 2728b23475faSJaehoon Chung struct dw_mci_slot *slot = host->slot; 2729f95f3850SWill Newton 2730f95f3850SWill Newton pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 2731f95f3850SWill Newton 2732476d79f1SDoug Anderson if (pending) { 273301730558SDoug Anderson /* Check volt switch first, since it can look like an error */ 273401730558SDoug Anderson if ((host->state == STATE_SENDING_CMD11) && 273501730558SDoug Anderson (pending & SDMMC_INT_VOLT_SWITCH)) { 273601730558SDoug Anderson mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH); 273701730558SDoug Anderson pending &= ~SDMMC_INT_VOLT_SWITCH; 273849ba0302SDoug Anderson 273949ba0302SDoug Anderson /* 274049ba0302SDoug Anderson * Hold the lock; we know cmd11_timer can't be kicked 274149ba0302SDoug Anderson * off after the lock is released, so safe to delete. 274249ba0302SDoug Anderson */ 27439f7d4c91STian Tao spin_lock(&host->irq_lock); 274401730558SDoug Anderson dw_mci_cmd_interrupt(host, pending); 27459f7d4c91STian Tao spin_unlock(&host->irq_lock); 274649ba0302SDoug Anderson 274749ba0302SDoug Anderson del_timer(&host->cmd11_timer); 274801730558SDoug Anderson } 274901730558SDoug Anderson 2750f95f3850SWill Newton if (pending & DW_MCI_CMD_ERROR_FLAGS) { 27519f7d4c91STian Tao spin_lock(&host->irq_lock); 27528892b705SDouglas Anderson 275303de1921SAddy Ke del_timer(&host->cto_timer); 2754f95f3850SWill Newton mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 2755182c9081SSeungwon Jeon host->cmd_status = pending; 27560e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2757f95f3850SWill Newton set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 27588892b705SDouglas Anderson 27599f7d4c91STian Tao spin_unlock(&host->irq_lock); 2760f95f3850SWill Newton } 2761f95f3850SWill Newton 2762f95f3850SWill Newton if (pending & DW_MCI_DATA_ERROR_FLAGS) { 276326391e49SVincent Whitchurch spin_lock(&host->irq_lock); 276426391e49SVincent Whitchurch 27651a6fe7bbSMårten Lindahl if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT) 27661a6fe7bbSMårten Lindahl del_timer(&host->dto_timer); 27671a6fe7bbSMårten Lindahl 2768f95f3850SWill Newton /* if there is an error report DATA_ERROR */ 2769f95f3850SWill Newton mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); 2770182c9081SSeungwon Jeon host->data_status = pending; 27710e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2772f95f3850SWill Newton set_bit(EVENT_DATA_ERROR, &host->pending_events); 27731a6fe7bbSMårten Lindahl 27741a6fe7bbSMårten Lindahl if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT) 27751a6fe7bbSMårten Lindahl /* In case of error, we cannot expect a DTO */ 27761a6fe7bbSMårten Lindahl set_bit(EVENT_DATA_COMPLETE, 27771a6fe7bbSMårten Lindahl &host->pending_events); 27781a6fe7bbSMårten Lindahl 2779f95f3850SWill Newton tasklet_schedule(&host->tasklet); 278026391e49SVincent Whitchurch 278126391e49SVincent Whitchurch spin_unlock(&host->irq_lock); 2782f95f3850SWill Newton } 2783f95f3850SWill Newton 2784f95f3850SWill Newton if (pending & SDMMC_INT_DATA_OVER) { 27859f7d4c91STian Tao spin_lock(&host->irq_lock); 278693c23ae3SDouglas Anderson 278757e10486SAddy Ke del_timer(&host->dto_timer); 278857e10486SAddy Ke 2789f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 2790f95f3850SWill Newton if (!host->data_status) 2791182c9081SSeungwon Jeon host->data_status = pending; 27920e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2793f95f3850SWill Newton if (host->dir_status == DW_MCI_RECV_STATUS) { 2794f95f3850SWill Newton if (host->sg != NULL) 279587a74d39SKyoungil Kim dw_mci_read_data_pio(host, true); 2796f95f3850SWill Newton } 2797f95f3850SWill Newton set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2798f95f3850SWill Newton tasklet_schedule(&host->tasklet); 279993c23ae3SDouglas Anderson 28009f7d4c91STian Tao spin_unlock(&host->irq_lock); 2801f95f3850SWill Newton } 2802f95f3850SWill Newton 2803f95f3850SWill Newton if (pending & SDMMC_INT_RXDR) { 2804f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 2805b40af3aaSJames Hogan if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) 280687a74d39SKyoungil Kim dw_mci_read_data_pio(host, false); 2807f95f3850SWill Newton } 2808f95f3850SWill Newton 2809f95f3850SWill Newton if (pending & SDMMC_INT_TXDR) { 2810f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2811b40af3aaSJames Hogan if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) 2812f95f3850SWill Newton dw_mci_write_data_pio(host); 2813f95f3850SWill Newton } 2814f95f3850SWill Newton 2815f95f3850SWill Newton if (pending & SDMMC_INT_CMD_DONE) { 28169f7d4c91STian Tao spin_lock(&host->irq_lock); 28178892b705SDouglas Anderson 2818f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); 2819182c9081SSeungwon Jeon dw_mci_cmd_interrupt(host, pending); 28208892b705SDouglas Anderson 28219f7d4c91STian Tao spin_unlock(&host->irq_lock); 2822f95f3850SWill Newton } 2823f95f3850SWill Newton 2824f95f3850SWill Newton if (pending & SDMMC_INT_CD) { 2825f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_CD); 28266130e7a9SDoug Anderson dw_mci_handle_cd(host); 2827f95f3850SWill Newton } 2828f95f3850SWill Newton 282976756234SAddy Ke if (pending & SDMMC_INT_SDIO(slot->sdio_id)) { 283076756234SAddy Ke mci_writel(host, RINTSTS, 283176756234SAddy Ke SDMMC_INT_SDIO(slot->sdio_id)); 283232dba737SUlf Hansson __dw_mci_enable_sdio_irq(slot, 0); 283332dba737SUlf Hansson sdio_signal_irq(slot->mmc); 28341a5c8e1fSShashidhar Hiremath } 28351a5c8e1fSShashidhar Hiremath 28361fb5f68aSMarkos Chandras } 2837f95f3850SWill Newton 28383fc7eaefSShawn Lin if (host->use_dma != TRANS_MODE_IDMAC) 28393fc7eaefSShawn Lin return IRQ_HANDLED; 28403fc7eaefSShawn Lin 28413fc7eaefSShawn Lin /* Handle IDMA interrupts */ 284269d99fdcSPrabu Thangamuthu if (host->dma_64bit_address == 1) { 284369d99fdcSPrabu Thangamuthu pending = mci_readl(host, IDSTS64); 284469d99fdcSPrabu Thangamuthu if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 284569d99fdcSPrabu Thangamuthu mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI | 284669d99fdcSPrabu Thangamuthu SDMMC_IDMAC_INT_RI); 284769d99fdcSPrabu Thangamuthu mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI); 2848faecf411SShawn Lin if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) 28493fc7eaefSShawn Lin host->dma_ops->complete((void *)host); 285069d99fdcSPrabu Thangamuthu } 285169d99fdcSPrabu Thangamuthu } else { 2852f95f3850SWill Newton pending = mci_readl(host, IDSTS); 2853f95f3850SWill Newton if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 285469d99fdcSPrabu Thangamuthu mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | 285569d99fdcSPrabu Thangamuthu SDMMC_IDMAC_INT_RI); 2856f95f3850SWill Newton mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 2857faecf411SShawn Lin if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) 28583fc7eaefSShawn Lin host->dma_ops->complete((void *)host); 2859f95f3850SWill Newton } 286069d99fdcSPrabu Thangamuthu } 2861f95f3850SWill Newton 2862f95f3850SWill Newton return IRQ_HANDLED; 2863f95f3850SWill Newton } 2864f95f3850SWill Newton 2865a4faa492SShawn Lin static int dw_mci_init_slot_caps(struct dw_mci_slot *slot) 2866a4faa492SShawn Lin { 2867a4faa492SShawn Lin struct dw_mci *host = slot->host; 2868a4faa492SShawn Lin const struct dw_mci_drv_data *drv_data = host->drv_data; 2869a4faa492SShawn Lin struct mmc_host *mmc = slot->mmc; 2870a4faa492SShawn Lin int ctrl_id; 2871a4faa492SShawn Lin 2872a4faa492SShawn Lin if (host->pdata->caps) 2873a4faa492SShawn Lin mmc->caps = host->pdata->caps; 2874a4faa492SShawn Lin 2875a4faa492SShawn Lin if (host->pdata->pm_caps) 2876a4faa492SShawn Lin mmc->pm_caps = host->pdata->pm_caps; 2877a4faa492SShawn Lin 28780dc7a3ecSJohn Keeping if (drv_data) 28790dc7a3ecSJohn Keeping mmc->caps |= drv_data->common_caps; 28800dc7a3ecSJohn Keeping 2881a4faa492SShawn Lin if (host->dev->of_node) { 2882a4faa492SShawn Lin ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); 2883a4faa492SShawn Lin if (ctrl_id < 0) 2884a4faa492SShawn Lin ctrl_id = 0; 2885a4faa492SShawn Lin } else { 2886a4faa492SShawn Lin ctrl_id = to_platform_device(host->dev)->id; 2887a4faa492SShawn Lin } 28880d84b9e5SShawn Lin 28890d84b9e5SShawn Lin if (drv_data && drv_data->caps) { 28900d84b9e5SShawn Lin if (ctrl_id >= drv_data->num_caps) { 28910d84b9e5SShawn Lin dev_err(host->dev, "invalid controller id %d\n", 28920d84b9e5SShawn Lin ctrl_id); 28930d84b9e5SShawn Lin return -EINVAL; 28940d84b9e5SShawn Lin } 2895a4faa492SShawn Lin mmc->caps |= drv_data->caps[ctrl_id]; 28960d84b9e5SShawn Lin } 2897a4faa492SShawn Lin 2898a4faa492SShawn Lin if (host->pdata->caps2) 2899a4faa492SShawn Lin mmc->caps2 = host->pdata->caps2; 2900a4faa492SShawn Lin 2901c4313e75SPeter Geis /* if host has set a minimum_freq, we should respect it */ 2902c4313e75SPeter Geis if (host->minimum_speed) 2903c4313e75SPeter Geis mmc->f_min = host->minimum_speed; 2904c4313e75SPeter Geis else 290586b93a48SJaehoon Chung mmc->f_min = DW_MCI_FREQ_MIN; 2906c4313e75SPeter Geis 290786b93a48SJaehoon Chung if (!mmc->f_max) 290886b93a48SJaehoon Chung mmc->f_max = DW_MCI_FREQ_MAX; 290986b93a48SJaehoon Chung 2910a4faa492SShawn Lin /* Process SDIO IRQs through the sdio_irq_work. */ 2911a4faa492SShawn Lin if (mmc->caps & MMC_CAP_SDIO_IRQ) 2912a4faa492SShawn Lin mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 2913a4faa492SShawn Lin 2914a4faa492SShawn Lin return 0; 2915a4faa492SShawn Lin } 2916a4faa492SShawn Lin 2917e4a65ef7SJaehoon Chung static int dw_mci_init_slot(struct dw_mci *host) 2918f95f3850SWill Newton { 2919f95f3850SWill Newton struct mmc_host *mmc; 2920f95f3850SWill Newton struct dw_mci_slot *slot; 2921a4faa492SShawn Lin int ret; 2922f95f3850SWill Newton 29234a90920cSThomas Abraham mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); 2924f95f3850SWill Newton if (!mmc) 2925f95f3850SWill Newton return -ENOMEM; 2926f95f3850SWill Newton 2927f95f3850SWill Newton slot = mmc_priv(mmc); 2928e4a65ef7SJaehoon Chung slot->id = 0; 2929e4a65ef7SJaehoon Chung slot->sdio_id = host->sdio_id0 + slot->id; 2930f95f3850SWill Newton slot->mmc = mmc; 2931f95f3850SWill Newton slot->host = host; 2932b23475faSJaehoon Chung host->slot = slot; 2933f95f3850SWill Newton 2934f95f3850SWill Newton mmc->ops = &dw_mci_ops; 2935f95f3850SWill Newton 293651da2240SYuvaraj CD /*if there are external regulators, get them*/ 293751da2240SYuvaraj CD ret = mmc_regulator_get_supply(mmc); 29380f3a47b8SWolfram Sang if (ret) 29393cf890fcSDoug Anderson goto err_host_allocated; 294051da2240SYuvaraj CD 294151da2240SYuvaraj CD if (!mmc->ocr_avail) 2942f95f3850SWill Newton mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 2943f95f3850SWill Newton 29443cf890fcSDoug Anderson ret = mmc_of_parse(mmc); 29453cf890fcSDoug Anderson if (ret) 29463cf890fcSDoug Anderson goto err_host_allocated; 2947f95f3850SWill Newton 2948a4faa492SShawn Lin ret = dw_mci_init_slot_caps(slot); 2949a4faa492SShawn Lin if (ret) 2950a4faa492SShawn Lin goto err_host_allocated; 295132dba737SUlf Hansson 2952f95f3850SWill Newton /* Useful defaults if platform data is unset. */ 29533fc7eaefSShawn Lin if (host->use_dma == TRANS_MODE_IDMAC) { 2954a39e5746SJaehoon Chung mmc->max_segs = host->ring_size; 2955225faf87SJaehoon Chung mmc->max_blk_size = 65535; 2956575c319dSHeiko Stuebner mmc->max_seg_size = 0x1000; 29571a25b1b4SSeungwon Jeon mmc->max_req_size = mmc->max_seg_size * host->ring_size; 29581a25b1b4SSeungwon Jeon mmc->max_blk_count = mmc->max_req_size / 512; 29593fc7eaefSShawn Lin } else if (host->use_dma == TRANS_MODE_EDMAC) { 29603fc7eaefSShawn Lin mmc->max_segs = 64; 2961225faf87SJaehoon Chung mmc->max_blk_size = 65535; 29623fc7eaefSShawn Lin mmc->max_blk_count = 65535; 29633fc7eaefSShawn Lin mmc->max_req_size = 29643fc7eaefSShawn Lin mmc->max_blk_size * mmc->max_blk_count; 29653fc7eaefSShawn Lin mmc->max_seg_size = mmc->max_req_size; 2966575c319dSHeiko Stuebner } else { 29673fc7eaefSShawn Lin /* TRANS_MODE_PIO */ 2968f95f3850SWill Newton mmc->max_segs = 64; 2969225faf87SJaehoon Chung mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */ 2970f95f3850SWill Newton mmc->max_blk_count = 512; 2971575c319dSHeiko Stuebner mmc->max_req_size = mmc->max_blk_size * 2972575c319dSHeiko Stuebner mmc->max_blk_count; 2973f95f3850SWill Newton mmc->max_seg_size = mmc->max_req_size; 2974575c319dSHeiko Stuebner } 2975f95f3850SWill Newton 2976c0834a58SShawn Lin dw_mci_get_cd(mmc); 2977ae0eb348SJaehoon Chung 29780cea529dSJaehoon Chung ret = mmc_add_host(mmc); 29790cea529dSJaehoon Chung if (ret) 29803cf890fcSDoug Anderson goto err_host_allocated; 2981f95f3850SWill Newton 2982f95f3850SWill Newton #if defined(CONFIG_DEBUG_FS) 2983f95f3850SWill Newton dw_mci_init_debugfs(slot); 2984f95f3850SWill Newton #endif 2985f95f3850SWill Newton 2986f95f3850SWill Newton return 0; 2987800d78bfSThomas Abraham 29883cf890fcSDoug Anderson err_host_allocated: 2989800d78bfSThomas Abraham mmc_free_host(mmc); 299051da2240SYuvaraj CD return ret; 2991f95f3850SWill Newton } 2992f95f3850SWill Newton 2993e4a65ef7SJaehoon Chung static void dw_mci_cleanup_slot(struct dw_mci_slot *slot) 2994f95f3850SWill Newton { 2995f95f3850SWill Newton /* Debugfs stuff is cleaned up by mmc core */ 2996f95f3850SWill Newton mmc_remove_host(slot->mmc); 2997b23475faSJaehoon Chung slot->host->slot = NULL; 2998f95f3850SWill Newton mmc_free_host(slot->mmc); 2999f95f3850SWill Newton } 3000f95f3850SWill Newton 3001f95f3850SWill Newton static void dw_mci_init_dma(struct dw_mci *host) 3002f95f3850SWill Newton { 300369d99fdcSPrabu Thangamuthu int addr_config; 30043fc7eaefSShawn Lin struct device *dev = host->dev; 30053fc7eaefSShawn Lin 30063fc7eaefSShawn Lin /* 30073fc7eaefSShawn Lin * Check tansfer mode from HCON[17:16] 30083fc7eaefSShawn Lin * Clear the ambiguous description of dw_mmc databook: 30093fc7eaefSShawn Lin * 2b'00: No DMA Interface -> Actually means using Internal DMA block 30103fc7eaefSShawn Lin * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block 30113fc7eaefSShawn Lin * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block 30123fc7eaefSShawn Lin * 2b'11: Non DW DMA Interface -> pio only 30133fc7eaefSShawn Lin * Compared to DesignWare DMA Interface, Generic DMA Interface has a 30143fc7eaefSShawn Lin * simpler request/acknowledge handshake mechanism and both of them 30153fc7eaefSShawn Lin * are regarded as external dma master for dw_mmc. 30163fc7eaefSShawn Lin */ 30173fc7eaefSShawn Lin host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON)); 30183fc7eaefSShawn Lin if (host->use_dma == DMA_INTERFACE_IDMA) { 30193fc7eaefSShawn Lin host->use_dma = TRANS_MODE_IDMAC; 30203fc7eaefSShawn Lin } else if (host->use_dma == DMA_INTERFACE_DWDMA || 30213fc7eaefSShawn Lin host->use_dma == DMA_INTERFACE_GDMA) { 30223fc7eaefSShawn Lin host->use_dma = TRANS_MODE_EDMAC; 30233fc7eaefSShawn Lin } else { 30243fc7eaefSShawn Lin goto no_dma; 30253fc7eaefSShawn Lin } 30263fc7eaefSShawn Lin 30273fc7eaefSShawn Lin /* Determine which DMA interface to use */ 30283fc7eaefSShawn Lin if (host->use_dma == TRANS_MODE_IDMAC) { 30293fc7eaefSShawn Lin /* 30303fc7eaefSShawn Lin * Check ADDR_CONFIG bit in HCON to find 30313fc7eaefSShawn Lin * IDMAC address bus width 30323fc7eaefSShawn Lin */ 303370692752SShawn Lin addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON)); 303469d99fdcSPrabu Thangamuthu 303569d99fdcSPrabu Thangamuthu if (addr_config == 1) { 303669d99fdcSPrabu Thangamuthu /* host supports IDMAC in 64-bit address mode */ 303769d99fdcSPrabu Thangamuthu host->dma_64bit_address = 1; 30383fc7eaefSShawn Lin dev_info(host->dev, 30393fc7eaefSShawn Lin "IDMAC supports 64-bit address mode.\n"); 304069d99fdcSPrabu Thangamuthu if (!dma_set_mask(host->dev, DMA_BIT_MASK(64))) 30413fc7eaefSShawn Lin dma_set_coherent_mask(host->dev, 30423fc7eaefSShawn Lin DMA_BIT_MASK(64)); 304369d99fdcSPrabu Thangamuthu } else { 304469d99fdcSPrabu Thangamuthu /* host supports IDMAC in 32-bit address mode */ 304569d99fdcSPrabu Thangamuthu host->dma_64bit_address = 0; 30463fc7eaefSShawn Lin dev_info(host->dev, 30473fc7eaefSShawn Lin "IDMAC supports 32-bit address mode.\n"); 304869d99fdcSPrabu Thangamuthu } 304969d99fdcSPrabu Thangamuthu 3050f95f3850SWill Newton /* Alloc memory for sg translation */ 3051cc190d4cSShawn Lin host->sg_cpu = dmam_alloc_coherent(host->dev, 3052cc190d4cSShawn Lin DESC_RING_BUF_SZ, 3053f95f3850SWill Newton &host->sg_dma, GFP_KERNEL); 3054f95f3850SWill Newton if (!host->sg_cpu) { 30553fc7eaefSShawn Lin dev_err(host->dev, 30563fc7eaefSShawn Lin "%s: could not alloc DMA memory\n", 3057f95f3850SWill Newton __func__); 3058f95f3850SWill Newton goto no_dma; 3059f95f3850SWill Newton } 3060f95f3850SWill Newton 3061f95f3850SWill Newton host->dma_ops = &dw_mci_idmac_ops; 306200956ea3SSeungwon Jeon dev_info(host->dev, "Using internal DMA controller.\n"); 30633fc7eaefSShawn Lin } else { 30643fc7eaefSShawn Lin /* TRANS_MODE_EDMAC: check dma bindings again */ 306543fa33aaSAndy Shevchenko if ((device_property_string_array_count(dev, "dma-names") < 0) || 3066852ff5feSDavid Woods !device_property_present(dev, "dmas")) { 3067f95f3850SWill Newton goto no_dma; 30683fc7eaefSShawn Lin } 30693fc7eaefSShawn Lin host->dma_ops = &dw_mci_edmac_ops; 30703fc7eaefSShawn Lin dev_info(host->dev, "Using external DMA controller.\n"); 30713fc7eaefSShawn Lin } 3072f95f3850SWill Newton 3073e1631f98SJaehoon Chung if (host->dma_ops->init && host->dma_ops->start && 3074e1631f98SJaehoon Chung host->dma_ops->stop && host->dma_ops->cleanup) { 3075f95f3850SWill Newton if (host->dma_ops->init(host)) { 30760e3a22c0SShawn Lin dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n", 30770e3a22c0SShawn Lin __func__); 3078f95f3850SWill Newton goto no_dma; 3079f95f3850SWill Newton } 3080f95f3850SWill Newton } else { 30814a90920cSThomas Abraham dev_err(host->dev, "DMA initialization not found.\n"); 3082f95f3850SWill Newton goto no_dma; 3083f95f3850SWill Newton } 3084f95f3850SWill Newton 3085f95f3850SWill Newton return; 3086f95f3850SWill Newton 3087f95f3850SWill Newton no_dma: 30884a90920cSThomas Abraham dev_info(host->dev, "Using PIO mode.\n"); 30893fc7eaefSShawn Lin host->use_dma = TRANS_MODE_PIO; 3090f95f3850SWill Newton } 3091f95f3850SWill Newton 309237977729SKees Cook static void dw_mci_cmd11_timer(struct timer_list *t) 30935c935165SDoug Anderson { 309437977729SKees Cook struct dw_mci *host = from_timer(host, t, cmd11_timer); 30955c935165SDoug Anderson 3096fd674198SDoug Anderson if (host->state != STATE_SENDING_CMD11) { 3097fd674198SDoug Anderson dev_warn(host->dev, "Unexpected CMD11 timeout\n"); 3098fd674198SDoug Anderson return; 3099fd674198SDoug Anderson } 31005c935165SDoug Anderson 31015c935165SDoug Anderson host->cmd_status = SDMMC_INT_RTO; 31025c935165SDoug Anderson set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 31035c935165SDoug Anderson tasklet_schedule(&host->tasklet); 31045c935165SDoug Anderson } 31055c935165SDoug Anderson 310637977729SKees Cook static void dw_mci_cto_timer(struct timer_list *t) 310703de1921SAddy Ke { 310837977729SKees Cook struct dw_mci *host = from_timer(host, t, cto_timer); 31098892b705SDouglas Anderson unsigned long irqflags; 31108892b705SDouglas Anderson u32 pending; 311103de1921SAddy Ke 31128892b705SDouglas Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 31138892b705SDouglas Anderson 31148892b705SDouglas Anderson /* 31158892b705SDouglas Anderson * If somehow we have very bad interrupt latency it's remotely possible 31168892b705SDouglas Anderson * that the timer could fire while the interrupt is still pending or 31178892b705SDouglas Anderson * while the interrupt is midway through running. Let's be paranoid 31188892b705SDouglas Anderson * and detect those two cases. Note that this is paranoia is somewhat 31198892b705SDouglas Anderson * justified because in this function we don't actually cancel the 31208892b705SDouglas Anderson * pending command in the controller--we just assume it will never come. 31218892b705SDouglas Anderson */ 31228892b705SDouglas Anderson pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 31238892b705SDouglas Anderson if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) { 31248892b705SDouglas Anderson /* The interrupt should fire; no need to act but we can warn */ 31258892b705SDouglas Anderson dev_warn(host->dev, "Unexpected interrupt latency\n"); 31268892b705SDouglas Anderson goto exit; 31278892b705SDouglas Anderson } 31288892b705SDouglas Anderson if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) { 31298892b705SDouglas Anderson /* Presumably interrupt handler couldn't delete the timer */ 31308892b705SDouglas Anderson dev_warn(host->dev, "CTO timeout when already completed\n"); 31318892b705SDouglas Anderson goto exit; 31328892b705SDouglas Anderson } 31338892b705SDouglas Anderson 31348892b705SDouglas Anderson /* 31358892b705SDouglas Anderson * Continued paranoia to make sure we're in the state we expect. 31368892b705SDouglas Anderson * This paranoia isn't really justified but it seems good to be safe. 31378892b705SDouglas Anderson */ 313803de1921SAddy Ke switch (host->state) { 313903de1921SAddy Ke case STATE_SENDING_CMD11: 314003de1921SAddy Ke case STATE_SENDING_CMD: 314103de1921SAddy Ke case STATE_SENDING_STOP: 314203de1921SAddy Ke /* 314303de1921SAddy Ke * If CMD_DONE interrupt does NOT come in sending command 314403de1921SAddy Ke * state, we should notify the driver to terminate current 314503de1921SAddy Ke * transfer and report a command timeout to the core. 314603de1921SAddy Ke */ 314703de1921SAddy Ke host->cmd_status = SDMMC_INT_RTO; 314803de1921SAddy Ke set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 314903de1921SAddy Ke tasklet_schedule(&host->tasklet); 315003de1921SAddy Ke break; 315103de1921SAddy Ke default: 315203de1921SAddy Ke dev_warn(host->dev, "Unexpected command timeout, state %d\n", 315303de1921SAddy Ke host->state); 315403de1921SAddy Ke break; 315503de1921SAddy Ke } 31568892b705SDouglas Anderson 31578892b705SDouglas Anderson exit: 31588892b705SDouglas Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 315903de1921SAddy Ke } 316003de1921SAddy Ke 316137977729SKees Cook static void dw_mci_dto_timer(struct timer_list *t) 316257e10486SAddy Ke { 316337977729SKees Cook struct dw_mci *host = from_timer(host, t, dto_timer); 316493c23ae3SDouglas Anderson unsigned long irqflags; 316593c23ae3SDouglas Anderson u32 pending; 316657e10486SAddy Ke 316793c23ae3SDouglas Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 316893c23ae3SDouglas Anderson 316993c23ae3SDouglas Anderson /* 317093c23ae3SDouglas Anderson * The DTO timer is much longer than the CTO timer, so it's even less 317193c23ae3SDouglas Anderson * likely that we'll these cases, but it pays to be paranoid. 317293c23ae3SDouglas Anderson */ 317393c23ae3SDouglas Anderson pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 317493c23ae3SDouglas Anderson if (pending & SDMMC_INT_DATA_OVER) { 317593c23ae3SDouglas Anderson /* The interrupt should fire; no need to act but we can warn */ 317693c23ae3SDouglas Anderson dev_warn(host->dev, "Unexpected data interrupt latency\n"); 317793c23ae3SDouglas Anderson goto exit; 317893c23ae3SDouglas Anderson } 317993c23ae3SDouglas Anderson if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) { 318093c23ae3SDouglas Anderson /* Presumably interrupt handler couldn't delete the timer */ 318193c23ae3SDouglas Anderson dev_warn(host->dev, "DTO timeout when already completed\n"); 318293c23ae3SDouglas Anderson goto exit; 318393c23ae3SDouglas Anderson } 318493c23ae3SDouglas Anderson 318593c23ae3SDouglas Anderson /* 318693c23ae3SDouglas Anderson * Continued paranoia to make sure we're in the state we expect. 318793c23ae3SDouglas Anderson * This paranoia isn't really justified but it seems good to be safe. 318893c23ae3SDouglas Anderson */ 318957e10486SAddy Ke switch (host->state) { 319057e10486SAddy Ke case STATE_SENDING_DATA: 319157e10486SAddy Ke case STATE_DATA_BUSY: 319257e10486SAddy Ke /* 319357e10486SAddy Ke * If DTO interrupt does NOT come in sending data state, 319457e10486SAddy Ke * we should notify the driver to terminate current transfer 319557e10486SAddy Ke * and report a data timeout to the core. 319657e10486SAddy Ke */ 319757e10486SAddy Ke host->data_status = SDMMC_INT_DRTO; 319857e10486SAddy Ke set_bit(EVENT_DATA_ERROR, &host->pending_events); 319957e10486SAddy Ke set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 320057e10486SAddy Ke tasklet_schedule(&host->tasklet); 320157e10486SAddy Ke break; 320257e10486SAddy Ke default: 320393c23ae3SDouglas Anderson dev_warn(host->dev, "Unexpected data timeout, state %d\n", 320493c23ae3SDouglas Anderson host->state); 320557e10486SAddy Ke break; 320657e10486SAddy Ke } 320793c23ae3SDouglas Anderson 320893c23ae3SDouglas Anderson exit: 320993c23ae3SDouglas Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 321057e10486SAddy Ke } 321157e10486SAddy Ke 3212c91eab4bSThomas Abraham #ifdef CONFIG_OF 3213c91eab4bSThomas Abraham static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 3214c91eab4bSThomas Abraham { 3215c91eab4bSThomas Abraham struct dw_mci_board *pdata; 3216c91eab4bSThomas Abraham struct device *dev = host->dev; 3217e95baf13SArnd Bergmann const struct dw_mci_drv_data *drv_data = host->drv_data; 3218e8cc37b8SShawn Lin int ret; 32193c6d89eaSDoug Anderson u32 clock_frequency; 3220c91eab4bSThomas Abraham 3221c91eab4bSThomas Abraham pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 3222bf3707eaSBeomho Seo if (!pdata) 3223c91eab4bSThomas Abraham return ERR_PTR(-ENOMEM); 3224c91eab4bSThomas Abraham 3225d6786fefSGuodong Xu /* find reset controller when exist */ 3226a93d6f31SPhilipp Zabel pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset"); 3227baf6fe40SPhilipp Zabel if (IS_ERR(pdata->rstc)) 3228baf6fe40SPhilipp Zabel return ERR_CAST(pdata->rstc); 3229d6786fefSGuodong Xu 3230852ff5feSDavid Woods if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth)) 32310e3a22c0SShawn Lin dev_info(dev, 32320e3a22c0SShawn Lin "fifo-depth property not found, using value of FIFOTH register as default\n"); 3233c91eab4bSThomas Abraham 3234852ff5feSDavid Woods device_property_read_u32(dev, "card-detect-delay", 3235852ff5feSDavid Woods &pdata->detect_delay_ms); 3236c91eab4bSThomas Abraham 3237852ff5feSDavid Woods device_property_read_u32(dev, "data-addr", &host->data_addr_override); 3238a0361c1aSJun Nie 3239852ff5feSDavid Woods if (device_property_present(dev, "fifo-watermark-aligned")) 3240d6fced83SJun Nie host->wm_aligned = true; 3241d6fced83SJun Nie 3242852ff5feSDavid Woods if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency)) 32433c6d89eaSDoug Anderson pdata->bus_hz = clock_frequency; 32443c6d89eaSDoug Anderson 3245cb27a843SJames Hogan if (drv_data && drv_data->parse_dt) { 3246cb27a843SJames Hogan ret = drv_data->parse_dt(host); 3247800d78bfSThomas Abraham if (ret) 3248800d78bfSThomas Abraham return ERR_PTR(ret); 3249800d78bfSThomas Abraham } 3250800d78bfSThomas Abraham 3251c91eab4bSThomas Abraham return pdata; 3252c91eab4bSThomas Abraham } 3253c91eab4bSThomas Abraham 3254c91eab4bSThomas Abraham #else /* CONFIG_OF */ 3255c91eab4bSThomas Abraham static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 3256c91eab4bSThomas Abraham { 3257c91eab4bSThomas Abraham return ERR_PTR(-EINVAL); 3258c91eab4bSThomas Abraham } 3259c91eab4bSThomas Abraham #endif /* CONFIG_OF */ 3260c91eab4bSThomas Abraham 3261fa0c3283SDoug Anderson static void dw_mci_enable_cd(struct dw_mci *host) 3262fa0c3283SDoug Anderson { 3263fa0c3283SDoug Anderson unsigned long irqflags; 3264fa0c3283SDoug Anderson u32 temp; 3265fa0c3283SDoug Anderson 3266e8cc37b8SShawn Lin /* 3267e8cc37b8SShawn Lin * No need for CD if all slots have a non-error GPIO 3268e8cc37b8SShawn Lin * as well as broken card detection is found. 3269e8cc37b8SShawn Lin */ 3270e47c0b96SJaehoon Chung if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL) 3271e8cc37b8SShawn Lin return; 3272fa0c3283SDoug Anderson 3273e47c0b96SJaehoon Chung if (mmc_gpio_get_cd(host->slot->mmc) < 0) { 3274fa0c3283SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 3275fa0c3283SDoug Anderson temp = mci_readl(host, INTMASK); 3276fa0c3283SDoug Anderson temp |= SDMMC_INT_CD; 3277fa0c3283SDoug Anderson mci_writel(host, INTMASK, temp); 3278fa0c3283SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 3279fa0c3283SDoug Anderson } 328058870241SJaehoon Chung } 3281fa0c3283SDoug Anderson 328262ca8034SShashidhar Hiremath int dw_mci_probe(struct dw_mci *host) 3283f95f3850SWill Newton { 3284e95baf13SArnd Bergmann const struct dw_mci_drv_data *drv_data = host->drv_data; 328562ca8034SShashidhar Hiremath int width, i, ret = 0; 3286f95f3850SWill Newton u32 fifo_size; 3287f95f3850SWill Newton 3288c91eab4bSThomas Abraham if (!host->pdata) { 3289c91eab4bSThomas Abraham host->pdata = dw_mci_parse_dt(host); 3290308d2722SKrzysztof Kozlowski if (IS_ERR(host->pdata)) 3291308d2722SKrzysztof Kozlowski return dev_err_probe(host->dev, PTR_ERR(host->pdata), 3292308d2722SKrzysztof Kozlowski "platform data not available\n"); 3293f95f3850SWill Newton } 3294f95f3850SWill Newton 3295780f22afSSeungwon Jeon host->biu_clk = devm_clk_get(host->dev, "biu"); 3296f90a0612SThomas Abraham if (IS_ERR(host->biu_clk)) { 3297f90a0612SThomas Abraham dev_dbg(host->dev, "biu clock not available\n"); 3298f90a0612SThomas Abraham } else { 3299f90a0612SThomas Abraham ret = clk_prepare_enable(host->biu_clk); 3300f90a0612SThomas Abraham if (ret) { 3301f90a0612SThomas Abraham dev_err(host->dev, "failed to enable biu clock\n"); 3302f90a0612SThomas Abraham return ret; 3303f90a0612SThomas Abraham } 3304f95f3850SWill Newton } 3305f95f3850SWill Newton 3306780f22afSSeungwon Jeon host->ciu_clk = devm_clk_get(host->dev, "ciu"); 3307f90a0612SThomas Abraham if (IS_ERR(host->ciu_clk)) { 3308f90a0612SThomas Abraham dev_dbg(host->dev, "ciu clock not available\n"); 33093c6d89eaSDoug Anderson host->bus_hz = host->pdata->bus_hz; 3310f90a0612SThomas Abraham } else { 3311f90a0612SThomas Abraham ret = clk_prepare_enable(host->ciu_clk); 3312f90a0612SThomas Abraham if (ret) { 3313f90a0612SThomas Abraham dev_err(host->dev, "failed to enable ciu clock\n"); 3314f90a0612SThomas Abraham goto err_clk_biu; 3315f90a0612SThomas Abraham } 3316f90a0612SThomas Abraham 33173c6d89eaSDoug Anderson if (host->pdata->bus_hz) { 33183c6d89eaSDoug Anderson ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz); 33193c6d89eaSDoug Anderson if (ret) 33203c6d89eaSDoug Anderson dev_warn(host->dev, 3321612de4c1SJaehoon Chung "Unable to set bus rate to %uHz\n", 33223c6d89eaSDoug Anderson host->pdata->bus_hz); 33233c6d89eaSDoug Anderson } 3324f90a0612SThomas Abraham host->bus_hz = clk_get_rate(host->ciu_clk); 33253c6d89eaSDoug Anderson } 3326f90a0612SThomas Abraham 3327612de4c1SJaehoon Chung if (!host->bus_hz) { 3328612de4c1SJaehoon Chung dev_err(host->dev, 3329612de4c1SJaehoon Chung "Platform data must supply bus speed\n"); 3330612de4c1SJaehoon Chung ret = -ENODEV; 3331612de4c1SJaehoon Chung goto err_clk_ciu; 3332612de4c1SJaehoon Chung } 3333612de4c1SJaehoon Chung 3334baf6fe40SPhilipp Zabel if (host->pdata->rstc) { 3335941e372dSliwei reset_control_assert(host->pdata->rstc); 3336941e372dSliwei usleep_range(10, 50); 3337941e372dSliwei reset_control_deassert(host->pdata->rstc); 3338941e372dSliwei } 3339941e372dSliwei 3340002f0d5cSYuvaraj Kumar C D if (drv_data && drv_data->init) { 3341002f0d5cSYuvaraj Kumar C D ret = drv_data->init(host); 3342002f0d5cSYuvaraj Kumar C D if (ret) { 3343002f0d5cSYuvaraj Kumar C D dev_err(host->dev, 3344002f0d5cSYuvaraj Kumar C D "implementation specific init failed\n"); 3345002f0d5cSYuvaraj Kumar C D goto err_clk_ciu; 3346002f0d5cSYuvaraj Kumar C D } 3347002f0d5cSYuvaraj Kumar C D } 3348002f0d5cSYuvaraj Kumar C D 334937977729SKees Cook timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0); 335037977729SKees Cook timer_setup(&host->cto_timer, dw_mci_cto_timer, 0); 335137977729SKees Cook timer_setup(&host->dto_timer, dw_mci_dto_timer, 0); 335257e10486SAddy Ke 3353f95f3850SWill Newton spin_lock_init(&host->lock); 3354f8c58c11SDoug Anderson spin_lock_init(&host->irq_lock); 3355f95f3850SWill Newton INIT_LIST_HEAD(&host->queue); 3356f95f3850SWill Newton 33572b8ac062SVincent Whitchurch dw_mci_init_fault(host); 33582b8ac062SVincent Whitchurch 3359f95f3850SWill Newton /* 3360f95f3850SWill Newton * Get the host data width - this assumes that HCON has been set with 3361f95f3850SWill Newton * the correct values. 3362f95f3850SWill Newton */ 336370692752SShawn Lin i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON)); 3364f95f3850SWill Newton if (!i) { 3365f95f3850SWill Newton host->push_data = dw_mci_push_data16; 3366f95f3850SWill Newton host->pull_data = dw_mci_pull_data16; 3367f95f3850SWill Newton width = 16; 3368f95f3850SWill Newton host->data_shift = 1; 3369f95f3850SWill Newton } else if (i == 2) { 3370f95f3850SWill Newton host->push_data = dw_mci_push_data64; 3371f95f3850SWill Newton host->pull_data = dw_mci_pull_data64; 3372f95f3850SWill Newton width = 64; 3373f95f3850SWill Newton host->data_shift = 3; 3374f95f3850SWill Newton } else { 3375f95f3850SWill Newton /* Check for a reserved value, and warn if it is */ 3376f95f3850SWill Newton WARN((i != 1), 3377f95f3850SWill Newton "HCON reports a reserved host data width!\n" 3378f95f3850SWill Newton "Defaulting to 32-bit access.\n"); 3379f95f3850SWill Newton host->push_data = dw_mci_push_data32; 3380f95f3850SWill Newton host->pull_data = dw_mci_pull_data32; 3381f95f3850SWill Newton width = 32; 3382f95f3850SWill Newton host->data_shift = 2; 3383f95f3850SWill Newton } 3384f95f3850SWill Newton 3385f95f3850SWill Newton /* Reset all blocks */ 33863744415cSShawn Lin if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 33873744415cSShawn Lin ret = -ENODEV; 33883744415cSShawn Lin goto err_clk_ciu; 33893744415cSShawn Lin } 3390141a712aSSeungwon Jeon 3391141a712aSSeungwon Jeon host->dma_ops = host->pdata->dma_ops; 3392141a712aSSeungwon Jeon dw_mci_init_dma(host); 3393f95f3850SWill Newton 3394f95f3850SWill Newton /* Clear the interrupts for the host controller */ 3395f95f3850SWill Newton mci_writel(host, RINTSTS, 0xFFFFFFFF); 3396f95f3850SWill Newton mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3397f95f3850SWill Newton 3398f95f3850SWill Newton /* Put in max timeout */ 3399f95f3850SWill Newton mci_writel(host, TMOUT, 0xFFFFFFFF); 3400f95f3850SWill Newton 3401f95f3850SWill Newton /* 3402f95f3850SWill Newton * FIFO threshold settings RxMark = fifo_size / 2 - 1, 3403f95f3850SWill Newton * Tx Mark = fifo_size / 2 DMA Size = 8 3404f95f3850SWill Newton */ 3405b86d8253SJames Hogan if (!host->pdata->fifo_depth) { 3406b86d8253SJames Hogan /* 3407b86d8253SJames Hogan * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may 3408b86d8253SJames Hogan * have been overwritten by the bootloader, just like we're 3409b86d8253SJames Hogan * about to do, so if you know the value for your hardware, you 3410b86d8253SJames Hogan * should put it in the platform data. 3411b86d8253SJames Hogan */ 3412f95f3850SWill Newton fifo_size = mci_readl(host, FIFOTH); 34138234e869SJaehoon Chung fifo_size = 1 + ((fifo_size >> 16) & 0xfff); 3414b86d8253SJames Hogan } else { 3415b86d8253SJames Hogan fifo_size = host->pdata->fifo_depth; 3416b86d8253SJames Hogan } 3417b86d8253SJames Hogan host->fifo_depth = fifo_size; 341852426899SSeungwon Jeon host->fifoth_val = 341952426899SSeungwon Jeon SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2); 3420e61cf118SJaehoon Chung mci_writel(host, FIFOTH, host->fifoth_val); 3421f95f3850SWill Newton 3422f95f3850SWill Newton /* disable clock to CIU */ 3423f95f3850SWill Newton mci_writel(host, CLKENA, 0); 3424f95f3850SWill Newton mci_writel(host, CLKSRC, 0); 3425f95f3850SWill Newton 342663008768SJames Hogan /* 342763008768SJames Hogan * In 2.40a spec, Data offset is changed. 342863008768SJames Hogan * Need to check the version-id and set data-offset for DATA register. 342963008768SJames Hogan */ 343063008768SJames Hogan host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); 343163008768SJames Hogan dev_info(host->dev, "Version ID is %04x\n", host->verid); 343263008768SJames Hogan 3433a0361c1aSJun Nie if (host->data_addr_override) 3434a0361c1aSJun Nie host->fifo_reg = host->regs + host->data_addr_override; 3435a0361c1aSJun Nie else if (host->verid < DW_MMC_240A) 343676184ac1SBen Dooks host->fifo_reg = host->regs + DATA_OFFSET; 343763008768SJames Hogan else 343876184ac1SBen Dooks host->fifo_reg = host->regs + DATA_240A_OFFSET; 343963008768SJames Hogan 34406078df15SEmil Renner Berthing tasklet_setup(&host->tasklet, dw_mci_tasklet_func); 3441780f22afSSeungwon Jeon ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt, 3442780f22afSSeungwon Jeon host->irq_flags, "dw-mci", host); 3443f95f3850SWill Newton if (ret) 34446130e7a9SDoug Anderson goto err_dmaunmap; 3445f95f3850SWill Newton 3446d30a8f7bSJaehoon Chung /* 3447fa0c3283SDoug Anderson * Enable interrupts for command done, data over, data empty, 34482da1d7f2SYuvaraj CD * receive ready and error such as transmit, receive timeout, crc error 34492da1d7f2SYuvaraj CD */ 34502da1d7f2SYuvaraj CD mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 34512da1d7f2SYuvaraj CD SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3452fa0c3283SDoug Anderson DW_MCI_ERROR_FLAGS); 34530e3a22c0SShawn Lin /* Enable mci interrupt */ 34540e3a22c0SShawn Lin mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 34552da1d7f2SYuvaraj CD 34560e3a22c0SShawn Lin dev_info(host->dev, 34570e3a22c0SShawn Lin "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n", 34582da1d7f2SYuvaraj CD host->irq, width, fifo_size); 34592da1d7f2SYuvaraj CD 3460f95f3850SWill Newton /* We need at least one slot to succeed */ 3461e4a65ef7SJaehoon Chung ret = dw_mci_init_slot(host); 346258870241SJaehoon Chung if (ret) { 34631c2215b7SThomas Abraham dev_dbg(host->dev, "slot %d init failed\n", i); 34646130e7a9SDoug Anderson goto err_dmaunmap; 3465f95f3850SWill Newton } 3466f95f3850SWill Newton 3467b793f658SDoug Anderson /* Now that slots are all setup, we can enable card detect */ 3468b793f658SDoug Anderson dw_mci_enable_cd(host); 3469b793f658SDoug Anderson 3470f95f3850SWill Newton return 0; 3471f95f3850SWill Newton 3472f95f3850SWill Newton err_dmaunmap: 3473f95f3850SWill Newton if (host->use_dma && host->dma_ops->exit) 3474f95f3850SWill Newton host->dma_ops->exit(host); 3475f90a0612SThomas Abraham 3476d6786fefSGuodong Xu reset_control_assert(host->pdata->rstc); 3477d6786fefSGuodong Xu 3478f90a0612SThomas Abraham err_clk_ciu: 3479f90a0612SThomas Abraham clk_disable_unprepare(host->ciu_clk); 3480780f22afSSeungwon Jeon 3481f90a0612SThomas Abraham err_clk_biu: 3482f90a0612SThomas Abraham clk_disable_unprepare(host->biu_clk); 3483780f22afSSeungwon Jeon 3484f95f3850SWill Newton return ret; 3485f95f3850SWill Newton } 348662ca8034SShashidhar Hiremath EXPORT_SYMBOL(dw_mci_probe); 3487f95f3850SWill Newton 348862ca8034SShashidhar Hiremath void dw_mci_remove(struct dw_mci *host) 3489f95f3850SWill Newton { 3490e4a65ef7SJaehoon Chung dev_dbg(host->dev, "remove slot\n"); 3491b23475faSJaehoon Chung if (host->slot) 3492e4a65ef7SJaehoon Chung dw_mci_cleanup_slot(host->slot); 3493f95f3850SWill Newton 3494048fd7e6SPrabu Thangamuthu mci_writel(host, RINTSTS, 0xFFFFFFFF); 3495048fd7e6SPrabu Thangamuthu mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3496048fd7e6SPrabu Thangamuthu 3497f95f3850SWill Newton /* disable clock to CIU */ 3498f95f3850SWill Newton mci_writel(host, CLKENA, 0); 3499f95f3850SWill Newton mci_writel(host, CLKSRC, 0); 3500f95f3850SWill Newton 3501f95f3850SWill Newton if (host->use_dma && host->dma_ops->exit) 3502f95f3850SWill Newton host->dma_ops->exit(host); 3503f95f3850SWill Newton 3504d6786fefSGuodong Xu reset_control_assert(host->pdata->rstc); 3505d6786fefSGuodong Xu 3506f90a0612SThomas Abraham clk_disable_unprepare(host->ciu_clk); 3507f90a0612SThomas Abraham clk_disable_unprepare(host->biu_clk); 3508f95f3850SWill Newton } 350962ca8034SShashidhar Hiremath EXPORT_SYMBOL(dw_mci_remove); 351062ca8034SShashidhar Hiremath 351162ca8034SShashidhar Hiremath 3512f95f3850SWill Newton 3513e9ed8835SShawn Lin #ifdef CONFIG_PM 3514ed24e1ffSShawn Lin int dw_mci_runtime_suspend(struct device *dev) 3515f95f3850SWill Newton { 3516ed24e1ffSShawn Lin struct dw_mci *host = dev_get_drvdata(dev); 3517ed24e1ffSShawn Lin 35183fc7eaefSShawn Lin if (host->use_dma && host->dma_ops->exit) 35193fc7eaefSShawn Lin host->dma_ops->exit(host); 35203fc7eaefSShawn Lin 3521ed24e1ffSShawn Lin clk_disable_unprepare(host->ciu_clk); 3522ed24e1ffSShawn Lin 352342f989c0SJaehoon Chung if (host->slot && 352442f989c0SJaehoon Chung (mmc_can_gpio_cd(host->slot->mmc) || 352542f989c0SJaehoon Chung !mmc_card_is_removable(host->slot->mmc))) 3526ed24e1ffSShawn Lin clk_disable_unprepare(host->biu_clk); 3527ed24e1ffSShawn Lin 3528f95f3850SWill Newton return 0; 3529f95f3850SWill Newton } 3530ed24e1ffSShawn Lin EXPORT_SYMBOL(dw_mci_runtime_suspend); 3531f95f3850SWill Newton 3532ed24e1ffSShawn Lin int dw_mci_runtime_resume(struct device *dev) 3533f95f3850SWill Newton { 3534b23475faSJaehoon Chung int ret = 0; 3535ed24e1ffSShawn Lin struct dw_mci *host = dev_get_drvdata(dev); 3536f95f3850SWill Newton 353742f989c0SJaehoon Chung if (host->slot && 353842f989c0SJaehoon Chung (mmc_can_gpio_cd(host->slot->mmc) || 353942f989c0SJaehoon Chung !mmc_card_is_removable(host->slot->mmc))) { 3540ed24e1ffSShawn Lin ret = clk_prepare_enable(host->biu_clk); 3541ed24e1ffSShawn Lin if (ret) 3542e61cf118SJaehoon Chung return ret; 3543e61cf118SJaehoon Chung } 3544e61cf118SJaehoon Chung 3545ed24e1ffSShawn Lin ret = clk_prepare_enable(host->ciu_clk); 3546ed24e1ffSShawn Lin if (ret) 3547df9bcc2bSJoonyoung Shim goto err; 3548df9bcc2bSJoonyoung Shim 3549df9bcc2bSJoonyoung Shim if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 3550df9bcc2bSJoonyoung Shim clk_disable_unprepare(host->ciu_clk); 3551df9bcc2bSJoonyoung Shim ret = -ENODEV; 3552df9bcc2bSJoonyoung Shim goto err; 3553df9bcc2bSJoonyoung Shim } 3554ed24e1ffSShawn Lin 35553bfe619dSJonathan Kliegman if (host->use_dma && host->dma_ops->init) 3556141a712aSSeungwon Jeon host->dma_ops->init(host); 3557141a712aSSeungwon Jeon 355852426899SSeungwon Jeon /* 355952426899SSeungwon Jeon * Restore the initial value at FIFOTH register 356052426899SSeungwon Jeon * And Invalidate the prev_blksz with zero 356152426899SSeungwon Jeon */ 3562e61cf118SJaehoon Chung mci_writel(host, FIFOTH, host->fifoth_val); 356352426899SSeungwon Jeon host->prev_blksz = 0; 3564e61cf118SJaehoon Chung 35652eb2944fSDoug Anderson /* Put in max timeout */ 35662eb2944fSDoug Anderson mci_writel(host, TMOUT, 0xFFFFFFFF); 35672eb2944fSDoug Anderson 3568e61cf118SJaehoon Chung mci_writel(host, RINTSTS, 0xFFFFFFFF); 3569e61cf118SJaehoon Chung mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 3570e61cf118SJaehoon Chung SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3571fa0c3283SDoug Anderson DW_MCI_ERROR_FLAGS); 3572e61cf118SJaehoon Chung mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 3573e61cf118SJaehoon Chung 35740e3a22c0SShawn Lin 35754a835afdSWen Zhiwei if (host->slot && host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER) 3576e47c0b96SJaehoon Chung dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios); 3577e9748e03SZiyuan Xu 3578e9748e03SZiyuan Xu /* Force setup bus to guarantee available clock output */ 3579e47c0b96SJaehoon Chung dw_mci_setup_bus(host->slot, true); 3580fa0c3283SDoug Anderson 35817c526608SUlf Hansson /* Re-enable SDIO interrupts. */ 35827c526608SUlf Hansson if (sdio_irq_claimed(host->slot->mmc)) 35837c526608SUlf Hansson __dw_mci_enable_sdio_irq(host->slot, 1); 35847c526608SUlf Hansson 3585fa0c3283SDoug Anderson /* Now that slots are all setup, we can enable card detect */ 3586fa0c3283SDoug Anderson dw_mci_enable_cd(host); 3587fa0c3283SDoug Anderson 3588df9bcc2bSJoonyoung Shim return 0; 3589df9bcc2bSJoonyoung Shim 3590df9bcc2bSJoonyoung Shim err: 359142f989c0SJaehoon Chung if (host->slot && 359242f989c0SJaehoon Chung (mmc_can_gpio_cd(host->slot->mmc) || 359342f989c0SJaehoon Chung !mmc_card_is_removable(host->slot->mmc))) 3594df9bcc2bSJoonyoung Shim clk_disable_unprepare(host->biu_clk); 3595df9bcc2bSJoonyoung Shim 35961f5c51d7SShawn Lin return ret; 35971f5c51d7SShawn Lin } 3598e9ed8835SShawn Lin EXPORT_SYMBOL(dw_mci_runtime_resume); 3599e9ed8835SShawn Lin #endif /* CONFIG_PM */ 36006fe8890dSJaehoon Chung 3601f95f3850SWill Newton static int __init dw_mci_init(void) 3602f95f3850SWill Newton { 36038e1c4e4dSSachin Kamat pr_info("Synopsys Designware Multimedia Card Interface Driver\n"); 360462ca8034SShashidhar Hiremath return 0; 3605f95f3850SWill Newton } 3606f95f3850SWill Newton 3607f95f3850SWill Newton static void __exit dw_mci_exit(void) 3608f95f3850SWill Newton { 3609f95f3850SWill Newton } 3610f95f3850SWill Newton 3611f95f3850SWill Newton module_init(dw_mci_init); 3612f95f3850SWill Newton module_exit(dw_mci_exit); 3613f95f3850SWill Newton 3614f95f3850SWill Newton MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); 3615f95f3850SWill Newton MODULE_AUTHOR("NXP Semiconductor VietNam"); 3616f95f3850SWill Newton MODULE_AUTHOR("Imagination Technologies Ltd"); 3617f95f3850SWill Newton MODULE_LICENSE("GPL v2"); 3618