12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2f95f3850SWill Newton /* 3f95f3850SWill Newton * Synopsys DesignWare Multimedia Card Interface driver 4f95f3850SWill Newton * (Based on NXP driver for lpc 31xx) 5f95f3850SWill Newton * 6f95f3850SWill Newton * Copyright (C) 2009 NXP Semiconductors 7f95f3850SWill Newton * Copyright (C) 2009, 2010 Imagination Technologies Ltd. 8f95f3850SWill Newton */ 9f95f3850SWill Newton 10f95f3850SWill Newton #include <linux/blkdev.h> 11f95f3850SWill Newton #include <linux/clk.h> 12f95f3850SWill Newton #include <linux/debugfs.h> 13f95f3850SWill Newton #include <linux/device.h> 14f95f3850SWill Newton #include <linux/dma-mapping.h> 15f95f3850SWill Newton #include <linux/err.h> 16f95f3850SWill Newton #include <linux/init.h> 17f95f3850SWill Newton #include <linux/interrupt.h> 18b6d2d81cSShawn Lin #include <linux/iopoll.h> 19f95f3850SWill Newton #include <linux/ioport.h> 20f95f3850SWill Newton #include <linux/module.h> 21f95f3850SWill Newton #include <linux/platform_device.h> 22a6db2c86SDouglas Anderson #include <linux/pm_runtime.h> 23f95f3850SWill Newton #include <linux/seq_file.h> 24f95f3850SWill Newton #include <linux/slab.h> 25f95f3850SWill Newton #include <linux/stat.h> 26f95f3850SWill Newton #include <linux/delay.h> 27f95f3850SWill Newton #include <linux/irq.h> 28b24c8b26SDoug Anderson #include <linux/mmc/card.h> 29f95f3850SWill Newton #include <linux/mmc/host.h> 30f95f3850SWill Newton #include <linux/mmc/mmc.h> 3101730558SDoug Anderson #include <linux/mmc/sd.h> 3290c2143aSSeungwon Jeon #include <linux/mmc/sdio.h> 33f95f3850SWill Newton #include <linux/bitops.h> 34c07946a3SJaehoon Chung #include <linux/regulator/consumer.h> 35c91eab4bSThomas Abraham #include <linux/of.h> 3655a6ceb2SDoug Anderson #include <linux/of_gpio.h> 37bf626e55SZhangfei Gao #include <linux/mmc/slot-gpio.h> 38f95f3850SWill Newton 39f95f3850SWill Newton #include "dw_mmc.h" 40f95f3850SWill Newton 41f95f3850SWill Newton /* Common flag combinations */ 423f7eec62SJaehoon Chung #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \ 43f95f3850SWill Newton SDMMC_INT_HTO | SDMMC_INT_SBE | \ 447a3c5677SDoug Anderson SDMMC_INT_EBE | SDMMC_INT_HLE) 45f95f3850SWill Newton #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ 467a3c5677SDoug Anderson SDMMC_INT_RESP_ERR | SDMMC_INT_HLE) 47f95f3850SWill Newton #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \ 487a3c5677SDoug Anderson DW_MCI_CMD_ERROR_FLAGS) 49f95f3850SWill Newton #define DW_MCI_SEND_STATUS 1 50f95f3850SWill Newton #define DW_MCI_RECV_STATUS 2 51f95f3850SWill Newton #define DW_MCI_DMA_THRESHOLD 16 52f95f3850SWill Newton 531f44a2a5SSeungwon Jeon #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */ 5472e83577SJaehoon Chung #define DW_MCI_FREQ_MIN 100000 /* unit: HZ */ 551f44a2a5SSeungwon Jeon 56fc79a4d6SJoonyoung Shim #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ 57fc79a4d6SJoonyoung Shim SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ 58fc79a4d6SJoonyoung Shim SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ 59fc79a4d6SJoonyoung Shim SDMMC_IDMAC_INT_TI) 60fc79a4d6SJoonyoung Shim 61cc190d4cSShawn Lin #define DESC_RING_BUF_SZ PAGE_SIZE 62cc190d4cSShawn Lin 6369d99fdcSPrabu Thangamuthu struct idmac_desc_64addr { 6469d99fdcSPrabu Thangamuthu u32 des0; /* Control Descriptor */ 65b6d2d81cSShawn Lin #define IDMAC_OWN_CLR64(x) \ 66b6d2d81cSShawn Lin !((x) & cpu_to_le32(IDMAC_DES0_OWN)) 6769d99fdcSPrabu Thangamuthu 6869d99fdcSPrabu Thangamuthu u32 des1; /* Reserved */ 6969d99fdcSPrabu Thangamuthu 7069d99fdcSPrabu Thangamuthu u32 des2; /*Buffer sizes */ 7169d99fdcSPrabu Thangamuthu #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \ 726687c42fSBen Dooks ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \ 736687c42fSBen Dooks ((cpu_to_le32(s)) & cpu_to_le32(0x1fff))) 7469d99fdcSPrabu Thangamuthu 7569d99fdcSPrabu Thangamuthu u32 des3; /* Reserved */ 7669d99fdcSPrabu Thangamuthu 7769d99fdcSPrabu Thangamuthu u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/ 7869d99fdcSPrabu Thangamuthu u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/ 7969d99fdcSPrabu Thangamuthu 8069d99fdcSPrabu Thangamuthu u32 des6; /* Lower 32-bits of Next Descriptor Address */ 8169d99fdcSPrabu Thangamuthu u32 des7; /* Upper 32-bits of Next Descriptor Address */ 8269d99fdcSPrabu Thangamuthu }; 8369d99fdcSPrabu Thangamuthu 84f95f3850SWill Newton struct idmac_desc { 856687c42fSBen Dooks __le32 des0; /* Control Descriptor */ 86f95f3850SWill Newton #define IDMAC_DES0_DIC BIT(1) 87f95f3850SWill Newton #define IDMAC_DES0_LD BIT(2) 88f95f3850SWill Newton #define IDMAC_DES0_FD BIT(3) 89f95f3850SWill Newton #define IDMAC_DES0_CH BIT(4) 90f95f3850SWill Newton #define IDMAC_DES0_ER BIT(5) 91f95f3850SWill Newton #define IDMAC_DES0_CES BIT(30) 92f95f3850SWill Newton #define IDMAC_DES0_OWN BIT(31) 93f95f3850SWill Newton 946687c42fSBen Dooks __le32 des1; /* Buffer sizes */ 95f95f3850SWill Newton #define IDMAC_SET_BUFFER1_SIZE(d, s) \ 96e5306c3aSBen Dooks ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff))) 97f95f3850SWill Newton 986687c42fSBen Dooks __le32 des2; /* buffer 1 physical address */ 99f95f3850SWill Newton 1006687c42fSBen Dooks __le32 des3; /* buffer 2 physical address */ 101f95f3850SWill Newton }; 1025959b32eSAlexey Brodkin 1035959b32eSAlexey Brodkin /* Each descriptor can transfer up to 4KB of data in chained mode */ 1045959b32eSAlexey Brodkin #define DW_MCI_DESC_DATA_LENGTH 0x1000 105f95f3850SWill Newton 106f95f3850SWill Newton #if defined(CONFIG_DEBUG_FS) 107f95f3850SWill Newton static int dw_mci_req_show(struct seq_file *s, void *v) 108f95f3850SWill Newton { 109f95f3850SWill Newton struct dw_mci_slot *slot = s->private; 110f95f3850SWill Newton struct mmc_request *mrq; 111f95f3850SWill Newton struct mmc_command *cmd; 112f95f3850SWill Newton struct mmc_command *stop; 113f95f3850SWill Newton struct mmc_data *data; 114f95f3850SWill Newton 115f95f3850SWill Newton /* Make sure we get a consistent snapshot */ 116f95f3850SWill Newton spin_lock_bh(&slot->host->lock); 117f95f3850SWill Newton mrq = slot->mrq; 118f95f3850SWill Newton 119f95f3850SWill Newton if (mrq) { 120f95f3850SWill Newton cmd = mrq->cmd; 121f95f3850SWill Newton data = mrq->data; 122f95f3850SWill Newton stop = mrq->stop; 123f95f3850SWill Newton 124f95f3850SWill Newton if (cmd) 125f95f3850SWill Newton seq_printf(s, 126f95f3850SWill Newton "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 127f95f3850SWill Newton cmd->opcode, cmd->arg, cmd->flags, 128f95f3850SWill Newton cmd->resp[0], cmd->resp[1], cmd->resp[2], 129f95f3850SWill Newton cmd->resp[2], cmd->error); 130f95f3850SWill Newton if (data) 131f95f3850SWill Newton seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", 132f95f3850SWill Newton data->bytes_xfered, data->blocks, 133f95f3850SWill Newton data->blksz, data->flags, data->error); 134f95f3850SWill Newton if (stop) 135f95f3850SWill Newton seq_printf(s, 136f95f3850SWill Newton "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 137f95f3850SWill Newton stop->opcode, stop->arg, stop->flags, 138f95f3850SWill Newton stop->resp[0], stop->resp[1], stop->resp[2], 139f95f3850SWill Newton stop->resp[2], stop->error); 140f95f3850SWill Newton } 141f95f3850SWill Newton 142f95f3850SWill Newton spin_unlock_bh(&slot->host->lock); 143f95f3850SWill Newton 144f95f3850SWill Newton return 0; 145f95f3850SWill Newton } 14664c1412bSShawn Lin DEFINE_SHOW_ATTRIBUTE(dw_mci_req); 147f95f3850SWill Newton 148f95f3850SWill Newton static int dw_mci_regs_show(struct seq_file *s, void *v) 149f95f3850SWill Newton { 15021657ebdSJaehoon Chung struct dw_mci *host = s->private; 15121657ebdSJaehoon Chung 1525b43df8bSShawn Lin pm_runtime_get_sync(host->dev); 1535b43df8bSShawn Lin 15421657ebdSJaehoon Chung seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS)); 15521657ebdSJaehoon Chung seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS)); 15621657ebdSJaehoon Chung seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD)); 15721657ebdSJaehoon Chung seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL)); 15821657ebdSJaehoon Chung seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK)); 15921657ebdSJaehoon Chung seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA)); 160f95f3850SWill Newton 1615b43df8bSShawn Lin pm_runtime_put_autosuspend(host->dev); 1625b43df8bSShawn Lin 163f95f3850SWill Newton return 0; 164f95f3850SWill Newton } 16564c1412bSShawn Lin DEFINE_SHOW_ATTRIBUTE(dw_mci_regs); 166f95f3850SWill Newton 167f95f3850SWill Newton static void dw_mci_init_debugfs(struct dw_mci_slot *slot) 168f95f3850SWill Newton { 169f95f3850SWill Newton struct mmc_host *mmc = slot->mmc; 170f95f3850SWill Newton struct dw_mci *host = slot->host; 171f95f3850SWill Newton struct dentry *root; 172f95f3850SWill Newton 173f95f3850SWill Newton root = mmc->debugfs_root; 174f95f3850SWill Newton if (!root) 175f95f3850SWill Newton return; 176f95f3850SWill Newton 177fcac1527SGreg Kroah-Hartman debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops); 178fcac1527SGreg Kroah-Hartman debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops); 179fcac1527SGreg Kroah-Hartman debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); 180*0c40c1beSGeert Uytterhoeven debugfs_create_xul("pending_events", S_IRUSR, root, 181*0c40c1beSGeert Uytterhoeven &host->pending_events); 182*0c40c1beSGeert Uytterhoeven debugfs_create_xul("completed_events", S_IRUSR, root, 183*0c40c1beSGeert Uytterhoeven &host->completed_events); 184f95f3850SWill Newton } 185f95f3850SWill Newton #endif /* defined(CONFIG_DEBUG_FS) */ 186f95f3850SWill Newton 1878e6db1f6SShawn Lin static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) 1888e6db1f6SShawn Lin { 1898e6db1f6SShawn Lin u32 ctrl; 1908e6db1f6SShawn Lin 1918e6db1f6SShawn Lin ctrl = mci_readl(host, CTRL); 1928e6db1f6SShawn Lin ctrl |= reset; 1938e6db1f6SShawn Lin mci_writel(host, CTRL, ctrl); 1948e6db1f6SShawn Lin 1958e6db1f6SShawn Lin /* wait till resets clear */ 1968e6db1f6SShawn Lin if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl, 1978e6db1f6SShawn Lin !(ctrl & reset), 1988e6db1f6SShawn Lin 1, 500 * USEC_PER_MSEC)) { 1998e6db1f6SShawn Lin dev_err(host->dev, 2008e6db1f6SShawn Lin "Timeout resetting block (ctrl reset %#x)\n", 2018e6db1f6SShawn Lin ctrl & reset); 2028e6db1f6SShawn Lin return false; 2038e6db1f6SShawn Lin } 2048e6db1f6SShawn Lin 2058e6db1f6SShawn Lin return true; 2068e6db1f6SShawn Lin } 20701730558SDoug Anderson 2084dba18deSShawn Lin static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags) 2094dba18deSShawn Lin { 2104dba18deSShawn Lin u32 status; 2114dba18deSShawn Lin 2124dba18deSShawn Lin /* 2134dba18deSShawn Lin * Databook says that before issuing a new data transfer command 2144dba18deSShawn Lin * we need to check to see if the card is busy. Data transfer commands 2154dba18deSShawn Lin * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that. 2164dba18deSShawn Lin * 2174dba18deSShawn Lin * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is 2184dba18deSShawn Lin * expected. 2194dba18deSShawn Lin */ 2204dba18deSShawn Lin if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) && 2214dba18deSShawn Lin !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) { 2224dba18deSShawn Lin if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS, 2234dba18deSShawn Lin status, 2244dba18deSShawn Lin !(status & SDMMC_STATUS_BUSY), 2254dba18deSShawn Lin 10, 500 * USEC_PER_MSEC)) 2264dba18deSShawn Lin dev_err(host->dev, "Busy; trying anyway\n"); 2274dba18deSShawn Lin } 2284dba18deSShawn Lin } 2294dba18deSShawn Lin 2304dba18deSShawn Lin static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) 2314dba18deSShawn Lin { 2324dba18deSShawn Lin struct dw_mci *host = slot->host; 2334dba18deSShawn Lin unsigned int cmd_status = 0; 2344dba18deSShawn Lin 2354dba18deSShawn Lin mci_writel(host, CMDARG, arg); 2364dba18deSShawn Lin wmb(); /* drain writebuffer */ 2374dba18deSShawn Lin dw_mci_wait_while_busy(host, cmd); 2384dba18deSShawn Lin mci_writel(host, CMD, SDMMC_CMD_START | cmd); 2394dba18deSShawn Lin 2404dba18deSShawn Lin if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status, 2414dba18deSShawn Lin !(cmd_status & SDMMC_CMD_START), 2424dba18deSShawn Lin 1, 500 * USEC_PER_MSEC)) 2434dba18deSShawn Lin dev_err(&slot->mmc->class_dev, 2444dba18deSShawn Lin "Timeout sending command (cmd %#x arg %#x status %#x)\n", 2454dba18deSShawn Lin cmd, arg, cmd_status); 2464dba18deSShawn Lin } 2474dba18deSShawn Lin 248f95f3850SWill Newton static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) 249f95f3850SWill Newton { 250800d78bfSThomas Abraham struct dw_mci_slot *slot = mmc_priv(mmc); 25101730558SDoug Anderson struct dw_mci *host = slot->host; 252f95f3850SWill Newton u32 cmdr; 253f95f3850SWill Newton 2540e3a22c0SShawn Lin cmd->error = -EINPROGRESS; 255f95f3850SWill Newton cmdr = cmd->opcode; 256f95f3850SWill Newton 25790c2143aSSeungwon Jeon if (cmd->opcode == MMC_STOP_TRANSMISSION || 25890c2143aSSeungwon Jeon cmd->opcode == MMC_GO_IDLE_STATE || 25990c2143aSSeungwon Jeon cmd->opcode == MMC_GO_INACTIVE_STATE || 26090c2143aSSeungwon Jeon (cmd->opcode == SD_IO_RW_DIRECT && 26190c2143aSSeungwon Jeon ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) 262f95f3850SWill Newton cmdr |= SDMMC_CMD_STOP; 2634a1b27adSJaehoon Chung else if (cmd->opcode != MMC_SEND_STATUS && cmd->data) 264f95f3850SWill Newton cmdr |= SDMMC_CMD_PRV_DAT_WAIT; 265f95f3850SWill Newton 26601730558SDoug Anderson if (cmd->opcode == SD_SWITCH_VOLTAGE) { 26701730558SDoug Anderson u32 clk_en_a; 26801730558SDoug Anderson 26901730558SDoug Anderson /* Special bit makes CMD11 not die */ 27001730558SDoug Anderson cmdr |= SDMMC_CMD_VOLT_SWITCH; 27101730558SDoug Anderson 27201730558SDoug Anderson /* Change state to continue to handle CMD11 weirdness */ 27301730558SDoug Anderson WARN_ON(slot->host->state != STATE_SENDING_CMD); 27401730558SDoug Anderson slot->host->state = STATE_SENDING_CMD11; 27501730558SDoug Anderson 27601730558SDoug Anderson /* 27701730558SDoug Anderson * We need to disable low power mode (automatic clock stop) 27801730558SDoug Anderson * while doing voltage switch so we don't confuse the card, 27901730558SDoug Anderson * since stopping the clock is a specific part of the UHS 28001730558SDoug Anderson * voltage change dance. 28101730558SDoug Anderson * 28201730558SDoug Anderson * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be 28301730558SDoug Anderson * unconditionally turned back on in dw_mci_setup_bus() if it's 28401730558SDoug Anderson * ever called with a non-zero clock. That shouldn't happen 28501730558SDoug Anderson * until the voltage change is all done. 28601730558SDoug Anderson */ 28701730558SDoug Anderson clk_en_a = mci_readl(host, CLKENA); 28801730558SDoug Anderson clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id); 28901730558SDoug Anderson mci_writel(host, CLKENA, clk_en_a); 29001730558SDoug Anderson mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 29101730558SDoug Anderson SDMMC_CMD_PRV_DAT_WAIT, 0); 29201730558SDoug Anderson } 29301730558SDoug Anderson 294f95f3850SWill Newton if (cmd->flags & MMC_RSP_PRESENT) { 295f95f3850SWill Newton /* We expect a response, so set this bit */ 296f95f3850SWill Newton cmdr |= SDMMC_CMD_RESP_EXP; 297f95f3850SWill Newton if (cmd->flags & MMC_RSP_136) 298f95f3850SWill Newton cmdr |= SDMMC_CMD_RESP_LONG; 299f95f3850SWill Newton } 300f95f3850SWill Newton 301f95f3850SWill Newton if (cmd->flags & MMC_RSP_CRC) 302f95f3850SWill Newton cmdr |= SDMMC_CMD_RESP_CRC; 303f95f3850SWill Newton 3040349c085SJaehoon Chung if (cmd->data) { 305f95f3850SWill Newton cmdr |= SDMMC_CMD_DAT_EXP; 3060349c085SJaehoon Chung if (cmd->data->flags & MMC_DATA_WRITE) 307f95f3850SWill Newton cmdr |= SDMMC_CMD_DAT_WR; 308f95f3850SWill Newton } 309f95f3850SWill Newton 310aaaaeb7aSJaehoon Chung if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags)) 311aaaaeb7aSJaehoon Chung cmdr |= SDMMC_CMD_USE_HOLD_REG; 312800d78bfSThomas Abraham 313f95f3850SWill Newton return cmdr; 314f95f3850SWill Newton } 315f95f3850SWill Newton 31690c2143aSSeungwon Jeon static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) 31790c2143aSSeungwon Jeon { 31890c2143aSSeungwon Jeon struct mmc_command *stop; 31990c2143aSSeungwon Jeon u32 cmdr; 32090c2143aSSeungwon Jeon 32190c2143aSSeungwon Jeon if (!cmd->data) 32290c2143aSSeungwon Jeon return 0; 32390c2143aSSeungwon Jeon 32490c2143aSSeungwon Jeon stop = &host->stop_abort; 32590c2143aSSeungwon Jeon cmdr = cmd->opcode; 32690c2143aSSeungwon Jeon memset(stop, 0, sizeof(struct mmc_command)); 32790c2143aSSeungwon Jeon 32890c2143aSSeungwon Jeon if (cmdr == MMC_READ_SINGLE_BLOCK || 32990c2143aSSeungwon Jeon cmdr == MMC_READ_MULTIPLE_BLOCK || 33090c2143aSSeungwon Jeon cmdr == MMC_WRITE_BLOCK || 3316c2c6506SUlf Hansson cmdr == MMC_WRITE_MULTIPLE_BLOCK || 3326c2c6506SUlf Hansson cmdr == MMC_SEND_TUNING_BLOCK || 3336c2c6506SUlf Hansson cmdr == MMC_SEND_TUNING_BLOCK_HS200) { 33490c2143aSSeungwon Jeon stop->opcode = MMC_STOP_TRANSMISSION; 33590c2143aSSeungwon Jeon stop->arg = 0; 33690c2143aSSeungwon Jeon stop->flags = MMC_RSP_R1B | MMC_CMD_AC; 33790c2143aSSeungwon Jeon } else if (cmdr == SD_IO_RW_EXTENDED) { 33890c2143aSSeungwon Jeon stop->opcode = SD_IO_RW_DIRECT; 33990c2143aSSeungwon Jeon stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) | 34090c2143aSSeungwon Jeon ((cmd->arg >> 28) & 0x7); 34190c2143aSSeungwon Jeon stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC; 34290c2143aSSeungwon Jeon } else { 34390c2143aSSeungwon Jeon return 0; 34490c2143aSSeungwon Jeon } 34590c2143aSSeungwon Jeon 34690c2143aSSeungwon Jeon cmdr = stop->opcode | SDMMC_CMD_STOP | 34790c2143aSSeungwon Jeon SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP; 34890c2143aSSeungwon Jeon 34942f989c0SJaehoon Chung if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags)) 3508c005b40SJaehoon Chung cmdr |= SDMMC_CMD_USE_HOLD_REG; 3518c005b40SJaehoon Chung 35290c2143aSSeungwon Jeon return cmdr; 35390c2143aSSeungwon Jeon } 35490c2143aSSeungwon Jeon 35503de1921SAddy Ke static inline void dw_mci_set_cto(struct dw_mci *host) 35603de1921SAddy Ke { 35703de1921SAddy Ke unsigned int cto_clks; 3584c2357f5SDouglas Anderson unsigned int cto_div; 35903de1921SAddy Ke unsigned int cto_ms; 3608892b705SDouglas Anderson unsigned long irqflags; 36103de1921SAddy Ke 36203de1921SAddy Ke cto_clks = mci_readl(host, TMOUT) & 0xff; 3634c2357f5SDouglas Anderson cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; 3644c2357f5SDouglas Anderson if (cto_div == 0) 3654c2357f5SDouglas Anderson cto_div = 1; 366c7151602SEvgeniy Didin 367c7151602SEvgeniy Didin cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div, 368c7151602SEvgeniy Didin host->bus_hz); 36903de1921SAddy Ke 37003de1921SAddy Ke /* add a bit spare time */ 37103de1921SAddy Ke cto_ms += 10; 37203de1921SAddy Ke 3738892b705SDouglas Anderson /* 3748892b705SDouglas Anderson * The durations we're working with are fairly short so we have to be 3758892b705SDouglas Anderson * extra careful about synchronization here. Specifically in hardware a 3768892b705SDouglas Anderson * command timeout is _at most_ 5.1 ms, so that means we expect an 3778892b705SDouglas Anderson * interrupt (either command done or timeout) to come rather quickly 3788892b705SDouglas Anderson * after the mci_writel. ...but just in case we have a long interrupt 3798892b705SDouglas Anderson * latency let's add a bit of paranoia. 3808892b705SDouglas Anderson * 3818892b705SDouglas Anderson * In general we'll assume that at least an interrupt will be asserted 3828892b705SDouglas Anderson * in hardware by the time the cto_timer runs. ...and if it hasn't 3838892b705SDouglas Anderson * been asserted in hardware by that time then we'll assume it'll never 3848892b705SDouglas Anderson * come. 3858892b705SDouglas Anderson */ 3868892b705SDouglas Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 3878892b705SDouglas Anderson if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 38803de1921SAddy Ke mod_timer(&host->cto_timer, 38903de1921SAddy Ke jiffies + msecs_to_jiffies(cto_ms) + 1); 3908892b705SDouglas Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 39103de1921SAddy Ke } 39203de1921SAddy Ke 393f95f3850SWill Newton static void dw_mci_start_command(struct dw_mci *host, 394f95f3850SWill Newton struct mmc_command *cmd, u32 cmd_flags) 395f95f3850SWill Newton { 396f95f3850SWill Newton host->cmd = cmd; 3974a90920cSThomas Abraham dev_vdbg(host->dev, 398f95f3850SWill Newton "start command: ARGR=0x%08x CMDR=0x%08x\n", 399f95f3850SWill Newton cmd->arg, cmd_flags); 400f95f3850SWill Newton 401f95f3850SWill Newton mci_writel(host, CMDARG, cmd->arg); 4020e3a22c0SShawn Lin wmb(); /* drain writebuffer */ 4030bdbd0e8SDoug Anderson dw_mci_wait_while_busy(host, cmd_flags); 404f95f3850SWill Newton 4058892b705SDouglas Anderson mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); 4068892b705SDouglas Anderson 40703de1921SAddy Ke /* response expected command only */ 40803de1921SAddy Ke if (cmd_flags & SDMMC_CMD_RESP_EXP) 40903de1921SAddy Ke dw_mci_set_cto(host); 410f95f3850SWill Newton } 411f95f3850SWill Newton 41290c2143aSSeungwon Jeon static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) 413f95f3850SWill Newton { 414e13c3c08SJaehoon Chung struct mmc_command *stop = &host->stop_abort; 4150e3a22c0SShawn Lin 41690c2143aSSeungwon Jeon dw_mci_start_command(host, stop, host->stop_cmdr); 417f95f3850SWill Newton } 418f95f3850SWill Newton 419f95f3850SWill Newton /* DMA interface functions */ 420f95f3850SWill Newton static void dw_mci_stop_dma(struct dw_mci *host) 421f95f3850SWill Newton { 42203e8cb53SJames Hogan if (host->using_dma) { 423f95f3850SWill Newton host->dma_ops->stop(host); 424f95f3850SWill Newton host->dma_ops->cleanup(host); 425aa50f259SSeungwon Jeon } 426aa50f259SSeungwon Jeon 427f95f3850SWill Newton /* Data transfer was stopped by the interrupt handler */ 428f95f3850SWill Newton set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 429f95f3850SWill Newton } 430f95f3850SWill Newton 431f95f3850SWill Newton static void dw_mci_dma_cleanup(struct dw_mci *host) 432f95f3850SWill Newton { 433f95f3850SWill Newton struct mmc_data *data = host->data; 434f95f3850SWill Newton 435a4cc7eb4SJaehoon Chung if (data && data->host_cookie == COOKIE_MAPPED) { 4364a90920cSThomas Abraham dma_unmap_sg(host->dev, 4379aa51408SSeungwon Jeon data->sg, 4389aa51408SSeungwon Jeon data->sg_len, 439feeef096SHeiner Kallweit mmc_get_dma_dir(data)); 440a4cc7eb4SJaehoon Chung data->host_cookie = COOKIE_UNMAPPED; 441a4cc7eb4SJaehoon Chung } 442f95f3850SWill Newton } 443f95f3850SWill Newton 4445ce9d961SSeungwon Jeon static void dw_mci_idmac_reset(struct dw_mci *host) 4455ce9d961SSeungwon Jeon { 4465ce9d961SSeungwon Jeon u32 bmod = mci_readl(host, BMOD); 4475ce9d961SSeungwon Jeon /* Software reset of DMA */ 4485ce9d961SSeungwon Jeon bmod |= SDMMC_IDMAC_SWRESET; 4495ce9d961SSeungwon Jeon mci_writel(host, BMOD, bmod); 4505ce9d961SSeungwon Jeon } 4515ce9d961SSeungwon Jeon 452f95f3850SWill Newton static void dw_mci_idmac_stop_dma(struct dw_mci *host) 453f95f3850SWill Newton { 454f95f3850SWill Newton u32 temp; 455f95f3850SWill Newton 456f95f3850SWill Newton /* Disable and reset the IDMAC interface */ 457f95f3850SWill Newton temp = mci_readl(host, CTRL); 458f95f3850SWill Newton temp &= ~SDMMC_CTRL_USE_IDMAC; 459f95f3850SWill Newton temp |= SDMMC_CTRL_DMA_RESET; 460f95f3850SWill Newton mci_writel(host, CTRL, temp); 461f95f3850SWill Newton 462f95f3850SWill Newton /* Stop the IDMAC running */ 463f95f3850SWill Newton temp = mci_readl(host, BMOD); 464a5289a43SJaehoon Chung temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); 4655ce9d961SSeungwon Jeon temp |= SDMMC_IDMAC_SWRESET; 466f95f3850SWill Newton mci_writel(host, BMOD, temp); 467f95f3850SWill Newton } 468f95f3850SWill Newton 4693fc7eaefSShawn Lin static void dw_mci_dmac_complete_dma(void *arg) 470f95f3850SWill Newton { 4713fc7eaefSShawn Lin struct dw_mci *host = arg; 472f95f3850SWill Newton struct mmc_data *data = host->data; 473f95f3850SWill Newton 4744a90920cSThomas Abraham dev_vdbg(host->dev, "DMA complete\n"); 475f95f3850SWill Newton 4763fc7eaefSShawn Lin if ((host->use_dma == TRANS_MODE_EDMAC) && 4773fc7eaefSShawn Lin data && (data->flags & MMC_DATA_READ)) 4783fc7eaefSShawn Lin /* Invalidate cache after read */ 47942f989c0SJaehoon Chung dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc), 4803fc7eaefSShawn Lin data->sg, 4813fc7eaefSShawn Lin data->sg_len, 4823fc7eaefSShawn Lin DMA_FROM_DEVICE); 4833fc7eaefSShawn Lin 484f95f3850SWill Newton host->dma_ops->cleanup(host); 485f95f3850SWill Newton 486f95f3850SWill Newton /* 487f95f3850SWill Newton * If the card was removed, data will be NULL. No point in trying to 488f95f3850SWill Newton * send the stop command or waiting for NBUSY in this case. 489f95f3850SWill Newton */ 490f95f3850SWill Newton if (data) { 491f95f3850SWill Newton set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 492f95f3850SWill Newton tasklet_schedule(&host->tasklet); 493f95f3850SWill Newton } 494f95f3850SWill Newton } 495f95f3850SWill Newton 496f95f3850SWill Newton static int dw_mci_idmac_init(struct dw_mci *host) 497f95f3850SWill Newton { 498897b69e7SSeungwon Jeon int i; 499f95f3850SWill Newton 50069d99fdcSPrabu Thangamuthu if (host->dma_64bit_address == 1) { 50169d99fdcSPrabu Thangamuthu struct idmac_desc_64addr *p; 50269d99fdcSPrabu Thangamuthu /* Number of descriptors in the ring buffer */ 503cc190d4cSShawn Lin host->ring_size = 504cc190d4cSShawn Lin DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr); 50569d99fdcSPrabu Thangamuthu 50669d99fdcSPrabu Thangamuthu /* Forward link the descriptor list */ 50769d99fdcSPrabu Thangamuthu for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; 50869d99fdcSPrabu Thangamuthu i++, p++) { 50969d99fdcSPrabu Thangamuthu p->des6 = (host->sg_dma + 51069d99fdcSPrabu Thangamuthu (sizeof(struct idmac_desc_64addr) * 51169d99fdcSPrabu Thangamuthu (i + 1))) & 0xffffffff; 51269d99fdcSPrabu Thangamuthu 51369d99fdcSPrabu Thangamuthu p->des7 = (u64)(host->sg_dma + 51469d99fdcSPrabu Thangamuthu (sizeof(struct idmac_desc_64addr) * 51569d99fdcSPrabu Thangamuthu (i + 1))) >> 32; 51669d99fdcSPrabu Thangamuthu /* Initialize reserved and buffer size fields to "0" */ 51747b7de2fSEvgeniy Didin p->des0 = 0; 51869d99fdcSPrabu Thangamuthu p->des1 = 0; 51969d99fdcSPrabu Thangamuthu p->des2 = 0; 52069d99fdcSPrabu Thangamuthu p->des3 = 0; 52169d99fdcSPrabu Thangamuthu } 52269d99fdcSPrabu Thangamuthu 52369d99fdcSPrabu Thangamuthu /* Set the last descriptor as the end-of-ring descriptor */ 52469d99fdcSPrabu Thangamuthu p->des6 = host->sg_dma & 0xffffffff; 52569d99fdcSPrabu Thangamuthu p->des7 = (u64)host->sg_dma >> 32; 52669d99fdcSPrabu Thangamuthu p->des0 = IDMAC_DES0_ER; 52769d99fdcSPrabu Thangamuthu 52869d99fdcSPrabu Thangamuthu } else { 52969d99fdcSPrabu Thangamuthu struct idmac_desc *p; 530f95f3850SWill Newton /* Number of descriptors in the ring buffer */ 531cc190d4cSShawn Lin host->ring_size = 532cc190d4cSShawn Lin DESC_RING_BUF_SZ / sizeof(struct idmac_desc); 533f95f3850SWill Newton 534f95f3850SWill Newton /* Forward link the descriptor list */ 5350e3a22c0SShawn Lin for (i = 0, p = host->sg_cpu; 5360e3a22c0SShawn Lin i < host->ring_size - 1; 5370e3a22c0SShawn Lin i++, p++) { 5386687c42fSBen Dooks p->des3 = cpu_to_le32(host->sg_dma + 5396687c42fSBen Dooks (sizeof(struct idmac_desc) * (i + 1))); 54047b7de2fSEvgeniy Didin p->des0 = 0; 5414b244724SZhangfei Gao p->des1 = 0; 5424b244724SZhangfei Gao } 543f95f3850SWill Newton 544f95f3850SWill Newton /* Set the last descriptor as the end-of-ring descriptor */ 5456687c42fSBen Dooks p->des3 = cpu_to_le32(host->sg_dma); 5466687c42fSBen Dooks p->des0 = cpu_to_le32(IDMAC_DES0_ER); 54769d99fdcSPrabu Thangamuthu } 548f95f3850SWill Newton 5495ce9d961SSeungwon Jeon dw_mci_idmac_reset(host); 550141a712aSSeungwon Jeon 55169d99fdcSPrabu Thangamuthu if (host->dma_64bit_address == 1) { 55269d99fdcSPrabu Thangamuthu /* Mask out interrupts - get Tx & Rx complete only */ 55369d99fdcSPrabu Thangamuthu mci_writel(host, IDSTS64, IDMAC_INT_CLR); 55469d99fdcSPrabu Thangamuthu mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI | 55569d99fdcSPrabu Thangamuthu SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 55669d99fdcSPrabu Thangamuthu 55769d99fdcSPrabu Thangamuthu /* Set the descriptor base address */ 55869d99fdcSPrabu Thangamuthu mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff); 55969d99fdcSPrabu Thangamuthu mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32); 56069d99fdcSPrabu Thangamuthu 56169d99fdcSPrabu Thangamuthu } else { 562f95f3850SWill Newton /* Mask out interrupts - get Tx & Rx complete only */ 563fc79a4d6SJoonyoung Shim mci_writel(host, IDSTS, IDMAC_INT_CLR); 56469d99fdcSPrabu Thangamuthu mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | 56569d99fdcSPrabu Thangamuthu SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 566f95f3850SWill Newton 567f95f3850SWill Newton /* Set the descriptor base address */ 568f95f3850SWill Newton mci_writel(host, DBADDR, host->sg_dma); 56969d99fdcSPrabu Thangamuthu } 57069d99fdcSPrabu Thangamuthu 571f95f3850SWill Newton return 0; 572f95f3850SWill Newton } 573f95f3850SWill Newton 5743b2a067bSShawn Lin static inline int dw_mci_prepare_desc64(struct dw_mci *host, 5753b2a067bSShawn Lin struct mmc_data *data, 5763b2a067bSShawn Lin unsigned int sg_len) 5773b2a067bSShawn Lin { 5783b2a067bSShawn Lin unsigned int desc_len; 5793b2a067bSShawn Lin struct idmac_desc_64addr *desc_first, *desc_last, *desc; 580b6d2d81cSShawn Lin u32 val; 5813b2a067bSShawn Lin int i; 5823b2a067bSShawn Lin 5833b2a067bSShawn Lin desc_first = desc_last = desc = host->sg_cpu; 5843b2a067bSShawn Lin 5853b2a067bSShawn Lin for (i = 0; i < sg_len; i++) { 5863b2a067bSShawn Lin unsigned int length = sg_dma_len(&data->sg[i]); 5873b2a067bSShawn Lin 5883b2a067bSShawn Lin u64 mem_addr = sg_dma_address(&data->sg[i]); 5893b2a067bSShawn Lin 5903b2a067bSShawn Lin for ( ; length ; desc++) { 5913b2a067bSShawn Lin desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 5923b2a067bSShawn Lin length : DW_MCI_DESC_DATA_LENGTH; 5933b2a067bSShawn Lin 5943b2a067bSShawn Lin length -= desc_len; 5953b2a067bSShawn Lin 5963b2a067bSShawn Lin /* 5973b2a067bSShawn Lin * Wait for the former clear OWN bit operation 5983b2a067bSShawn Lin * of IDMAC to make sure that this descriptor 5993b2a067bSShawn Lin * isn't still owned by IDMAC as IDMAC's write 6003b2a067bSShawn Lin * ops and CPU's read ops are asynchronous. 6013b2a067bSShawn Lin */ 602b6d2d81cSShawn Lin if (readl_poll_timeout_atomic(&desc->des0, val, 603b6d2d81cSShawn Lin !(val & IDMAC_DES0_OWN), 604b6d2d81cSShawn Lin 10, 100 * USEC_PER_MSEC)) 6053b2a067bSShawn Lin goto err_own_bit; 6063b2a067bSShawn Lin 6073b2a067bSShawn Lin /* 6083b2a067bSShawn Lin * Set the OWN bit and disable interrupts 6093b2a067bSShawn Lin * for this descriptor 6103b2a067bSShawn Lin */ 6113b2a067bSShawn Lin desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | 6123b2a067bSShawn Lin IDMAC_DES0_CH; 6133b2a067bSShawn Lin 6143b2a067bSShawn Lin /* Buffer length */ 6153b2a067bSShawn Lin IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len); 6163b2a067bSShawn Lin 6173b2a067bSShawn Lin /* Physical address to DMA to/from */ 6183b2a067bSShawn Lin desc->des4 = mem_addr & 0xffffffff; 6193b2a067bSShawn Lin desc->des5 = mem_addr >> 32; 6203b2a067bSShawn Lin 6213b2a067bSShawn Lin /* Update physical address for the next desc */ 6223b2a067bSShawn Lin mem_addr += desc_len; 6233b2a067bSShawn Lin 6243b2a067bSShawn Lin /* Save pointer to the last descriptor */ 6253b2a067bSShawn Lin desc_last = desc; 6263b2a067bSShawn Lin } 6273b2a067bSShawn Lin } 6283b2a067bSShawn Lin 6293b2a067bSShawn Lin /* Set first descriptor */ 6303b2a067bSShawn Lin desc_first->des0 |= IDMAC_DES0_FD; 6313b2a067bSShawn Lin 6323b2a067bSShawn Lin /* Set last descriptor */ 6333b2a067bSShawn Lin desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); 6343b2a067bSShawn Lin desc_last->des0 |= IDMAC_DES0_LD; 6353b2a067bSShawn Lin 6363b2a067bSShawn Lin return 0; 6373b2a067bSShawn Lin err_own_bit: 6383b2a067bSShawn Lin /* restore the descriptor chain as it's polluted */ 63926be9d70SColin Ian King dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n"); 640cc190d4cSShawn Lin memset(host->sg_cpu, 0, DESC_RING_BUF_SZ); 6413b2a067bSShawn Lin dw_mci_idmac_init(host); 6423b2a067bSShawn Lin return -EINVAL; 6433b2a067bSShawn Lin } 6443b2a067bSShawn Lin 6453b2a067bSShawn Lin 6463b2a067bSShawn Lin static inline int dw_mci_prepare_desc32(struct dw_mci *host, 6473b2a067bSShawn Lin struct mmc_data *data, 6483b2a067bSShawn Lin unsigned int sg_len) 6493b2a067bSShawn Lin { 6503b2a067bSShawn Lin unsigned int desc_len; 6513b2a067bSShawn Lin struct idmac_desc *desc_first, *desc_last, *desc; 652b6d2d81cSShawn Lin u32 val; 6533b2a067bSShawn Lin int i; 6543b2a067bSShawn Lin 6553b2a067bSShawn Lin desc_first = desc_last = desc = host->sg_cpu; 6563b2a067bSShawn Lin 6573b2a067bSShawn Lin for (i = 0; i < sg_len; i++) { 6583b2a067bSShawn Lin unsigned int length = sg_dma_len(&data->sg[i]); 6593b2a067bSShawn Lin 6603b2a067bSShawn Lin u32 mem_addr = sg_dma_address(&data->sg[i]); 6613b2a067bSShawn Lin 6623b2a067bSShawn Lin for ( ; length ; desc++) { 6633b2a067bSShawn Lin desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 6643b2a067bSShawn Lin length : DW_MCI_DESC_DATA_LENGTH; 6653b2a067bSShawn Lin 6663b2a067bSShawn Lin length -= desc_len; 6673b2a067bSShawn Lin 6683b2a067bSShawn Lin /* 6693b2a067bSShawn Lin * Wait for the former clear OWN bit operation 6703b2a067bSShawn Lin * of IDMAC to make sure that this descriptor 6713b2a067bSShawn Lin * isn't still owned by IDMAC as IDMAC's write 6723b2a067bSShawn Lin * ops and CPU's read ops are asynchronous. 6733b2a067bSShawn Lin */ 674b6d2d81cSShawn Lin if (readl_poll_timeout_atomic(&desc->des0, val, 675b6d2d81cSShawn Lin IDMAC_OWN_CLR64(val), 676b6d2d81cSShawn Lin 10, 677b6d2d81cSShawn Lin 100 * USEC_PER_MSEC)) 6783b2a067bSShawn Lin goto err_own_bit; 6793b2a067bSShawn Lin 6803b2a067bSShawn Lin /* 6813b2a067bSShawn Lin * Set the OWN bit and disable interrupts 6823b2a067bSShawn Lin * for this descriptor 6833b2a067bSShawn Lin */ 6843b2a067bSShawn Lin desc->des0 = cpu_to_le32(IDMAC_DES0_OWN | 6853b2a067bSShawn Lin IDMAC_DES0_DIC | 6863b2a067bSShawn Lin IDMAC_DES0_CH); 6873b2a067bSShawn Lin 6883b2a067bSShawn Lin /* Buffer length */ 6893b2a067bSShawn Lin IDMAC_SET_BUFFER1_SIZE(desc, desc_len); 6903b2a067bSShawn Lin 6913b2a067bSShawn Lin /* Physical address to DMA to/from */ 6923b2a067bSShawn Lin desc->des2 = cpu_to_le32(mem_addr); 6933b2a067bSShawn Lin 6943b2a067bSShawn Lin /* Update physical address for the next desc */ 6953b2a067bSShawn Lin mem_addr += desc_len; 6963b2a067bSShawn Lin 6973b2a067bSShawn Lin /* Save pointer to the last descriptor */ 6983b2a067bSShawn Lin desc_last = desc; 6993b2a067bSShawn Lin } 7003b2a067bSShawn Lin } 7013b2a067bSShawn Lin 7023b2a067bSShawn Lin /* Set first descriptor */ 7033b2a067bSShawn Lin desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD); 7043b2a067bSShawn Lin 7053b2a067bSShawn Lin /* Set last descriptor */ 7063b2a067bSShawn Lin desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | 7073b2a067bSShawn Lin IDMAC_DES0_DIC)); 7083b2a067bSShawn Lin desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD); 7093b2a067bSShawn Lin 7103b2a067bSShawn Lin return 0; 7113b2a067bSShawn Lin err_own_bit: 7123b2a067bSShawn Lin /* restore the descriptor chain as it's polluted */ 71326be9d70SColin Ian King dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n"); 714cc190d4cSShawn Lin memset(host->sg_cpu, 0, DESC_RING_BUF_SZ); 7153b2a067bSShawn Lin dw_mci_idmac_init(host); 7163b2a067bSShawn Lin return -EINVAL; 7173b2a067bSShawn Lin } 7183b2a067bSShawn Lin 7193b2a067bSShawn Lin static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) 7203b2a067bSShawn Lin { 7213b2a067bSShawn Lin u32 temp; 7223b2a067bSShawn Lin int ret; 7233b2a067bSShawn Lin 7243b2a067bSShawn Lin if (host->dma_64bit_address == 1) 7253b2a067bSShawn Lin ret = dw_mci_prepare_desc64(host, host->data, sg_len); 7263b2a067bSShawn Lin else 7273b2a067bSShawn Lin ret = dw_mci_prepare_desc32(host, host->data, sg_len); 7283b2a067bSShawn Lin 7293b2a067bSShawn Lin if (ret) 7303b2a067bSShawn Lin goto out; 7313b2a067bSShawn Lin 7323b2a067bSShawn Lin /* drain writebuffer */ 7333b2a067bSShawn Lin wmb(); 7343b2a067bSShawn Lin 7353b2a067bSShawn Lin /* Make sure to reset DMA in case we did PIO before this */ 7363b2a067bSShawn Lin dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET); 7373b2a067bSShawn Lin dw_mci_idmac_reset(host); 7383b2a067bSShawn Lin 7393b2a067bSShawn Lin /* Select IDMAC interface */ 7403b2a067bSShawn Lin temp = mci_readl(host, CTRL); 7413b2a067bSShawn Lin temp |= SDMMC_CTRL_USE_IDMAC; 7423b2a067bSShawn Lin mci_writel(host, CTRL, temp); 7433b2a067bSShawn Lin 7443b2a067bSShawn Lin /* drain writebuffer */ 7453b2a067bSShawn Lin wmb(); 7463b2a067bSShawn Lin 7473b2a067bSShawn Lin /* Enable the IDMAC */ 7483b2a067bSShawn Lin temp = mci_readl(host, BMOD); 7493b2a067bSShawn Lin temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; 7503b2a067bSShawn Lin mci_writel(host, BMOD, temp); 7513b2a067bSShawn Lin 7523b2a067bSShawn Lin /* Start it running */ 7533b2a067bSShawn Lin mci_writel(host, PLDMND, 1); 7543b2a067bSShawn Lin 7553b2a067bSShawn Lin out: 7563b2a067bSShawn Lin return ret; 7573b2a067bSShawn Lin } 7583b2a067bSShawn Lin 7598e2b36eaSArnd Bergmann static const struct dw_mci_dma_ops dw_mci_idmac_ops = { 760885c3e80SSeungwon Jeon .init = dw_mci_idmac_init, 761885c3e80SSeungwon Jeon .start = dw_mci_idmac_start_dma, 762885c3e80SSeungwon Jeon .stop = dw_mci_idmac_stop_dma, 7633fc7eaefSShawn Lin .complete = dw_mci_dmac_complete_dma, 764885c3e80SSeungwon Jeon .cleanup = dw_mci_dma_cleanup, 765885c3e80SSeungwon Jeon }; 7663fc7eaefSShawn Lin 7673fc7eaefSShawn Lin static void dw_mci_edmac_stop_dma(struct dw_mci *host) 7683fc7eaefSShawn Lin { 769ab925a31SShawn Lin dmaengine_terminate_async(host->dms->ch); 7703fc7eaefSShawn Lin } 7713fc7eaefSShawn Lin 7723fc7eaefSShawn Lin static int dw_mci_edmac_start_dma(struct dw_mci *host, 7733fc7eaefSShawn Lin unsigned int sg_len) 7743fc7eaefSShawn Lin { 7753fc7eaefSShawn Lin struct dma_slave_config cfg; 7763fc7eaefSShawn Lin struct dma_async_tx_descriptor *desc = NULL; 7773fc7eaefSShawn Lin struct scatterlist *sgl = host->data->sg; 77827d70d36SColin Ian King static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 7793fc7eaefSShawn Lin u32 sg_elems = host->data->sg_len; 7803fc7eaefSShawn Lin u32 fifoth_val; 7813fc7eaefSShawn Lin u32 fifo_offset = host->fifo_reg - host->regs; 7823fc7eaefSShawn Lin int ret = 0; 7833fc7eaefSShawn Lin 7843fc7eaefSShawn Lin /* Set external dma config: burst size, burst width */ 785260b3164SArnd Bergmann cfg.dst_addr = host->phy_regs + fifo_offset; 7863fc7eaefSShawn Lin cfg.src_addr = cfg.dst_addr; 7873fc7eaefSShawn Lin cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 7883fc7eaefSShawn Lin cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 7893fc7eaefSShawn Lin 7903fc7eaefSShawn Lin /* Match burst msize with external dma config */ 7913fc7eaefSShawn Lin fifoth_val = mci_readl(host, FIFOTH); 7923fc7eaefSShawn Lin cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7]; 7933fc7eaefSShawn Lin cfg.src_maxburst = cfg.dst_maxburst; 7943fc7eaefSShawn Lin 7953fc7eaefSShawn Lin if (host->data->flags & MMC_DATA_WRITE) 7963fc7eaefSShawn Lin cfg.direction = DMA_MEM_TO_DEV; 7973fc7eaefSShawn Lin else 7983fc7eaefSShawn Lin cfg.direction = DMA_DEV_TO_MEM; 7993fc7eaefSShawn Lin 8003fc7eaefSShawn Lin ret = dmaengine_slave_config(host->dms->ch, &cfg); 8013fc7eaefSShawn Lin if (ret) { 8023fc7eaefSShawn Lin dev_err(host->dev, "Failed to config edmac.\n"); 8033fc7eaefSShawn Lin return -EBUSY; 8043fc7eaefSShawn Lin } 8053fc7eaefSShawn Lin 8063fc7eaefSShawn Lin desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, 8073fc7eaefSShawn Lin sg_len, cfg.direction, 8083fc7eaefSShawn Lin DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 8093fc7eaefSShawn Lin if (!desc) { 8103fc7eaefSShawn Lin dev_err(host->dev, "Can't prepare slave sg.\n"); 8113fc7eaefSShawn Lin return -EBUSY; 8123fc7eaefSShawn Lin } 8133fc7eaefSShawn Lin 8143fc7eaefSShawn Lin /* Set dw_mci_dmac_complete_dma as callback */ 8153fc7eaefSShawn Lin desc->callback = dw_mci_dmac_complete_dma; 8163fc7eaefSShawn Lin desc->callback_param = (void *)host; 8173fc7eaefSShawn Lin dmaengine_submit(desc); 8183fc7eaefSShawn Lin 8193fc7eaefSShawn Lin /* Flush cache before write */ 8203fc7eaefSShawn Lin if (host->data->flags & MMC_DATA_WRITE) 82142f989c0SJaehoon Chung dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl, 8223fc7eaefSShawn Lin sg_elems, DMA_TO_DEVICE); 8233fc7eaefSShawn Lin 8243fc7eaefSShawn Lin dma_async_issue_pending(host->dms->ch); 8253fc7eaefSShawn Lin 8263fc7eaefSShawn Lin return 0; 8273fc7eaefSShawn Lin } 8283fc7eaefSShawn Lin 8293fc7eaefSShawn Lin static int dw_mci_edmac_init(struct dw_mci *host) 8303fc7eaefSShawn Lin { 8313fc7eaefSShawn Lin /* Request external dma channel */ 8323fc7eaefSShawn Lin host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL); 8333fc7eaefSShawn Lin if (!host->dms) 8343fc7eaefSShawn Lin return -ENOMEM; 8353fc7eaefSShawn Lin 8363fc7eaefSShawn Lin host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx"); 8373fc7eaefSShawn Lin if (!host->dms->ch) { 8384539d36eSDan Carpenter dev_err(host->dev, "Failed to get external DMA channel.\n"); 8393fc7eaefSShawn Lin kfree(host->dms); 8403fc7eaefSShawn Lin host->dms = NULL; 8413fc7eaefSShawn Lin return -ENXIO; 8423fc7eaefSShawn Lin } 8433fc7eaefSShawn Lin 8443fc7eaefSShawn Lin return 0; 8453fc7eaefSShawn Lin } 8463fc7eaefSShawn Lin 8473fc7eaefSShawn Lin static void dw_mci_edmac_exit(struct dw_mci *host) 8483fc7eaefSShawn Lin { 8493fc7eaefSShawn Lin if (host->dms) { 8503fc7eaefSShawn Lin if (host->dms->ch) { 8513fc7eaefSShawn Lin dma_release_channel(host->dms->ch); 8523fc7eaefSShawn Lin host->dms->ch = NULL; 8533fc7eaefSShawn Lin } 8543fc7eaefSShawn Lin kfree(host->dms); 8553fc7eaefSShawn Lin host->dms = NULL; 8563fc7eaefSShawn Lin } 8573fc7eaefSShawn Lin } 8583fc7eaefSShawn Lin 8593fc7eaefSShawn Lin static const struct dw_mci_dma_ops dw_mci_edmac_ops = { 8603fc7eaefSShawn Lin .init = dw_mci_edmac_init, 8613fc7eaefSShawn Lin .exit = dw_mci_edmac_exit, 8623fc7eaefSShawn Lin .start = dw_mci_edmac_start_dma, 8633fc7eaefSShawn Lin .stop = dw_mci_edmac_stop_dma, 8643fc7eaefSShawn Lin .complete = dw_mci_dmac_complete_dma, 8653fc7eaefSShawn Lin .cleanup = dw_mci_dma_cleanup, 8663fc7eaefSShawn Lin }; 867885c3e80SSeungwon Jeon 8689aa51408SSeungwon Jeon static int dw_mci_pre_dma_transfer(struct dw_mci *host, 8699aa51408SSeungwon Jeon struct mmc_data *data, 870a4cc7eb4SJaehoon Chung int cookie) 871f95f3850SWill Newton { 872f95f3850SWill Newton struct scatterlist *sg; 8739aa51408SSeungwon Jeon unsigned int i, sg_len; 874f95f3850SWill Newton 875a4cc7eb4SJaehoon Chung if (data->host_cookie == COOKIE_PRE_MAPPED) 876a4cc7eb4SJaehoon Chung return data->sg_len; 877f95f3850SWill Newton 878f95f3850SWill Newton /* 879f95f3850SWill Newton * We don't do DMA on "complex" transfers, i.e. with 880f95f3850SWill Newton * non-word-aligned buffers or lengths. Also, we don't bother 881f95f3850SWill Newton * with all the DMA setup overhead for short transfers. 882f95f3850SWill Newton */ 883f95f3850SWill Newton if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) 884f95f3850SWill Newton return -EINVAL; 8859aa51408SSeungwon Jeon 886f95f3850SWill Newton if (data->blksz & 3) 887f95f3850SWill Newton return -EINVAL; 888f95f3850SWill Newton 889f95f3850SWill Newton for_each_sg(data->sg, sg, data->sg_len, i) { 890f95f3850SWill Newton if (sg->offset & 3 || sg->length & 3) 891f95f3850SWill Newton return -EINVAL; 892f95f3850SWill Newton } 893f95f3850SWill Newton 8944a90920cSThomas Abraham sg_len = dma_map_sg(host->dev, 8959aa51408SSeungwon Jeon data->sg, 8969aa51408SSeungwon Jeon data->sg_len, 897feeef096SHeiner Kallweit mmc_get_dma_dir(data)); 8989aa51408SSeungwon Jeon if (sg_len == 0) 8999aa51408SSeungwon Jeon return -EINVAL; 9009aa51408SSeungwon Jeon 901a4cc7eb4SJaehoon Chung data->host_cookie = cookie; 9029aa51408SSeungwon Jeon 9039aa51408SSeungwon Jeon return sg_len; 9049aa51408SSeungwon Jeon } 9059aa51408SSeungwon Jeon 9069aa51408SSeungwon Jeon static void dw_mci_pre_req(struct mmc_host *mmc, 907d3c6aac3SLinus Walleij struct mmc_request *mrq) 9089aa51408SSeungwon Jeon { 9099aa51408SSeungwon Jeon struct dw_mci_slot *slot = mmc_priv(mmc); 9109aa51408SSeungwon Jeon struct mmc_data *data = mrq->data; 9119aa51408SSeungwon Jeon 9129aa51408SSeungwon Jeon if (!slot->host->use_dma || !data) 9139aa51408SSeungwon Jeon return; 9149aa51408SSeungwon Jeon 915a4cc7eb4SJaehoon Chung /* This data might be unmapped at this time */ 916a4cc7eb4SJaehoon Chung data->host_cookie = COOKIE_UNMAPPED; 9179aa51408SSeungwon Jeon 918a4cc7eb4SJaehoon Chung if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 919a4cc7eb4SJaehoon Chung COOKIE_PRE_MAPPED) < 0) 920a4cc7eb4SJaehoon Chung data->host_cookie = COOKIE_UNMAPPED; 9219aa51408SSeungwon Jeon } 9229aa51408SSeungwon Jeon 9239aa51408SSeungwon Jeon static void dw_mci_post_req(struct mmc_host *mmc, 9249aa51408SSeungwon Jeon struct mmc_request *mrq, 9259aa51408SSeungwon Jeon int err) 9269aa51408SSeungwon Jeon { 9279aa51408SSeungwon Jeon struct dw_mci_slot *slot = mmc_priv(mmc); 9289aa51408SSeungwon Jeon struct mmc_data *data = mrq->data; 9299aa51408SSeungwon Jeon 9309aa51408SSeungwon Jeon if (!slot->host->use_dma || !data) 9319aa51408SSeungwon Jeon return; 9329aa51408SSeungwon Jeon 933a4cc7eb4SJaehoon Chung if (data->host_cookie != COOKIE_UNMAPPED) 9344a90920cSThomas Abraham dma_unmap_sg(slot->host->dev, 9359aa51408SSeungwon Jeon data->sg, 9369aa51408SSeungwon Jeon data->sg_len, 937feeef096SHeiner Kallweit mmc_get_dma_dir(data)); 938a4cc7eb4SJaehoon Chung data->host_cookie = COOKIE_UNMAPPED; 9399aa51408SSeungwon Jeon } 9409aa51408SSeungwon Jeon 941671fa142SShawn Lin static int dw_mci_get_cd(struct mmc_host *mmc) 942671fa142SShawn Lin { 943671fa142SShawn Lin int present; 944671fa142SShawn Lin struct dw_mci_slot *slot = mmc_priv(mmc); 945671fa142SShawn Lin struct dw_mci *host = slot->host; 946671fa142SShawn Lin int gpio_cd = mmc_gpio_get_cd(mmc); 947671fa142SShawn Lin 948671fa142SShawn Lin /* Use platform get_cd function, else try onboard card detect */ 949671fa142SShawn Lin if (((mmc->caps & MMC_CAP_NEEDS_POLL) 950671fa142SShawn Lin || !mmc_card_is_removable(mmc))) { 951671fa142SShawn Lin present = 1; 952671fa142SShawn Lin 953671fa142SShawn Lin if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { 954671fa142SShawn Lin if (mmc->caps & MMC_CAP_NEEDS_POLL) { 955671fa142SShawn Lin dev_info(&mmc->class_dev, 956671fa142SShawn Lin "card is polling.\n"); 957671fa142SShawn Lin } else { 958671fa142SShawn Lin dev_info(&mmc->class_dev, 959671fa142SShawn Lin "card is non-removable.\n"); 960671fa142SShawn Lin } 961671fa142SShawn Lin set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 962671fa142SShawn Lin } 963671fa142SShawn Lin 964671fa142SShawn Lin return present; 965671fa142SShawn Lin } else if (gpio_cd >= 0) 966671fa142SShawn Lin present = gpio_cd; 967671fa142SShawn Lin else 968671fa142SShawn Lin present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) 969671fa142SShawn Lin == 0 ? 1 : 0; 970671fa142SShawn Lin 971671fa142SShawn Lin spin_lock_bh(&host->lock); 972671fa142SShawn Lin if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags)) 973671fa142SShawn Lin dev_dbg(&mmc->class_dev, "card is present\n"); 974671fa142SShawn Lin else if (!present && 975671fa142SShawn Lin !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags)) 976671fa142SShawn Lin dev_dbg(&mmc->class_dev, "card is not present\n"); 977671fa142SShawn Lin spin_unlock_bh(&host->lock); 978671fa142SShawn Lin 979671fa142SShawn Lin return present; 980671fa142SShawn Lin } 981671fa142SShawn Lin 98252426899SSeungwon Jeon static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) 98352426899SSeungwon Jeon { 98452426899SSeungwon Jeon unsigned int blksz = data->blksz; 98527d70d36SColin Ian King static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 98652426899SSeungwon Jeon u32 fifo_width = 1 << host->data_shift; 98752426899SSeungwon Jeon u32 blksz_depth = blksz / fifo_width, fifoth_val; 98852426899SSeungwon Jeon u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; 9890e3a22c0SShawn Lin int idx = ARRAY_SIZE(mszs) - 1; 99052426899SSeungwon Jeon 9913fc7eaefSShawn Lin /* pio should ship this scenario */ 9923fc7eaefSShawn Lin if (!host->use_dma) 9933fc7eaefSShawn Lin return; 9943fc7eaefSShawn Lin 99552426899SSeungwon Jeon tx_wmark = (host->fifo_depth) / 2; 99652426899SSeungwon Jeon tx_wmark_invers = host->fifo_depth - tx_wmark; 99752426899SSeungwon Jeon 99852426899SSeungwon Jeon /* 99952426899SSeungwon Jeon * MSIZE is '1', 100052426899SSeungwon Jeon * if blksz is not a multiple of the FIFO width 100152426899SSeungwon Jeon */ 100220753569SShawn Lin if (blksz % fifo_width) 100352426899SSeungwon Jeon goto done; 100452426899SSeungwon Jeon 100552426899SSeungwon Jeon do { 100652426899SSeungwon Jeon if (!((blksz_depth % mszs[idx]) || 100752426899SSeungwon Jeon (tx_wmark_invers % mszs[idx]))) { 100852426899SSeungwon Jeon msize = idx; 100952426899SSeungwon Jeon rx_wmark = mszs[idx] - 1; 101052426899SSeungwon Jeon break; 101152426899SSeungwon Jeon } 101252426899SSeungwon Jeon } while (--idx > 0); 101352426899SSeungwon Jeon /* 101452426899SSeungwon Jeon * If idx is '0', it won't be tried 101552426899SSeungwon Jeon * Thus, initial values are uesed 101652426899SSeungwon Jeon */ 101752426899SSeungwon Jeon done: 101852426899SSeungwon Jeon fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); 101952426899SSeungwon Jeon mci_writel(host, FIFOTH, fifoth_val); 102052426899SSeungwon Jeon } 102152426899SSeungwon Jeon 10227e4bf1bcSJaehoon Chung static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data) 1023f1d2736cSSeungwon Jeon { 1024f1d2736cSSeungwon Jeon unsigned int blksz = data->blksz; 1025f1d2736cSSeungwon Jeon u32 blksz_depth, fifo_depth; 1026f1d2736cSSeungwon Jeon u16 thld_size; 10277e4bf1bcSJaehoon Chung u8 enable; 1028f1d2736cSSeungwon Jeon 102966dfd101SJames Hogan /* 103066dfd101SJames Hogan * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is 103166dfd101SJames Hogan * in the FIFO region, so we really shouldn't access it). 103266dfd101SJames Hogan */ 10337e4bf1bcSJaehoon Chung if (host->verid < DW_MMC_240A || 10347e4bf1bcSJaehoon Chung (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE)) 103566dfd101SJames Hogan return; 103666dfd101SJames Hogan 10377e4bf1bcSJaehoon Chung /* 10387e4bf1bcSJaehoon Chung * Card write Threshold is introduced since 2.80a 10397e4bf1bcSJaehoon Chung * It's used when HS400 mode is enabled. 10407e4bf1bcSJaehoon Chung */ 10417e4bf1bcSJaehoon Chung if (data->flags & MMC_DATA_WRITE && 10427a6b9f4dSx00270170 host->timing != MMC_TIMING_MMC_HS400) 10437a6b9f4dSx00270170 goto disable; 10447e4bf1bcSJaehoon Chung 10457e4bf1bcSJaehoon Chung if (data->flags & MMC_DATA_WRITE) 10467e4bf1bcSJaehoon Chung enable = SDMMC_CARD_WR_THR_EN; 10477e4bf1bcSJaehoon Chung else 10487e4bf1bcSJaehoon Chung enable = SDMMC_CARD_RD_THR_EN; 10497e4bf1bcSJaehoon Chung 1050f1d2736cSSeungwon Jeon if (host->timing != MMC_TIMING_MMC_HS200 && 10517a6b9f4dSx00270170 host->timing != MMC_TIMING_UHS_SDR104 && 10527a6b9f4dSx00270170 host->timing != MMC_TIMING_MMC_HS400) 1053f1d2736cSSeungwon Jeon goto disable; 1054f1d2736cSSeungwon Jeon 1055f1d2736cSSeungwon Jeon blksz_depth = blksz / (1 << host->data_shift); 1056f1d2736cSSeungwon Jeon fifo_depth = host->fifo_depth; 1057f1d2736cSSeungwon Jeon 1058f1d2736cSSeungwon Jeon if (blksz_depth > fifo_depth) 1059f1d2736cSSeungwon Jeon goto disable; 1060f1d2736cSSeungwon Jeon 1061f1d2736cSSeungwon Jeon /* 1062f1d2736cSSeungwon Jeon * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz' 1063f1d2736cSSeungwon Jeon * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz 1064f1d2736cSSeungwon Jeon * Currently just choose blksz. 1065f1d2736cSSeungwon Jeon */ 1066f1d2736cSSeungwon Jeon thld_size = blksz; 10677e4bf1bcSJaehoon Chung mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable)); 1068f1d2736cSSeungwon Jeon return; 1069f1d2736cSSeungwon Jeon 1070f1d2736cSSeungwon Jeon disable: 10717e4bf1bcSJaehoon Chung mci_writel(host, CDTHRCTL, 0); 1072f1d2736cSSeungwon Jeon } 1073f1d2736cSSeungwon Jeon 10749aa51408SSeungwon Jeon static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) 10759aa51408SSeungwon Jeon { 1076f8c58c11SDoug Anderson unsigned long irqflags; 10779aa51408SSeungwon Jeon int sg_len; 10789aa51408SSeungwon Jeon u32 temp; 10799aa51408SSeungwon Jeon 10809aa51408SSeungwon Jeon host->using_dma = 0; 10819aa51408SSeungwon Jeon 10829aa51408SSeungwon Jeon /* If we don't have a channel, we can't do DMA */ 10839aa51408SSeungwon Jeon if (!host->use_dma) 10849aa51408SSeungwon Jeon return -ENODEV; 10859aa51408SSeungwon Jeon 1086a4cc7eb4SJaehoon Chung sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1087a99aa9b9SSeungwon Jeon if (sg_len < 0) { 1088a99aa9b9SSeungwon Jeon host->dma_ops->stop(host); 10899aa51408SSeungwon Jeon return sg_len; 1090a99aa9b9SSeungwon Jeon } 10919aa51408SSeungwon Jeon 109203e8cb53SJames Hogan host->using_dma = 1; 109303e8cb53SJames Hogan 10943fc7eaefSShawn Lin if (host->use_dma == TRANS_MODE_IDMAC) 10954a90920cSThomas Abraham dev_vdbg(host->dev, 1096f95f3850SWill Newton "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 10973fc7eaefSShawn Lin (unsigned long)host->sg_cpu, 10983fc7eaefSShawn Lin (unsigned long)host->sg_dma, 1099f95f3850SWill Newton sg_len); 1100f95f3850SWill Newton 110152426899SSeungwon Jeon /* 110252426899SSeungwon Jeon * Decide the MSIZE and RX/TX Watermark. 110352426899SSeungwon Jeon * If current block size is same with previous size, 110452426899SSeungwon Jeon * no need to update fifoth. 110552426899SSeungwon Jeon */ 110652426899SSeungwon Jeon if (host->prev_blksz != data->blksz) 110752426899SSeungwon Jeon dw_mci_adjust_fifoth(host, data); 110852426899SSeungwon Jeon 1109f95f3850SWill Newton /* Enable the DMA interface */ 1110f95f3850SWill Newton temp = mci_readl(host, CTRL); 1111f95f3850SWill Newton temp |= SDMMC_CTRL_DMA_ENABLE; 1112f95f3850SWill Newton mci_writel(host, CTRL, temp); 1113f95f3850SWill Newton 1114f95f3850SWill Newton /* Disable RX/TX IRQs, let DMA handle it */ 1115f8c58c11SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 1116f95f3850SWill Newton temp = mci_readl(host, INTMASK); 1117f95f3850SWill Newton temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); 1118f95f3850SWill Newton mci_writel(host, INTMASK, temp); 1119f8c58c11SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 1120f95f3850SWill Newton 11213fc7eaefSShawn Lin if (host->dma_ops->start(host, sg_len)) { 1122647f80a1SJaehoon Chung host->dma_ops->stop(host); 1123d12d0cb1SShawn Lin /* We can't do DMA, try PIO for this one */ 1124d12d0cb1SShawn Lin dev_dbg(host->dev, 1125d12d0cb1SShawn Lin "%s: fall back to PIO mode for current transfer\n", 1126d12d0cb1SShawn Lin __func__); 11273fc7eaefSShawn Lin return -ENODEV; 11283fc7eaefSShawn Lin } 1129f95f3850SWill Newton 1130f95f3850SWill Newton return 0; 1131f95f3850SWill Newton } 1132f95f3850SWill Newton 1133f95f3850SWill Newton static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) 1134f95f3850SWill Newton { 1135f8c58c11SDoug Anderson unsigned long irqflags; 11360e3a22c0SShawn Lin int flags = SG_MITER_ATOMIC; 1137f95f3850SWill Newton u32 temp; 1138f95f3850SWill Newton 1139f95f3850SWill Newton data->error = -EINPROGRESS; 1140f95f3850SWill Newton 1141f95f3850SWill Newton WARN_ON(host->data); 1142f95f3850SWill Newton host->sg = NULL; 1143f95f3850SWill Newton host->data = data; 1144f95f3850SWill Newton 11457e4bf1bcSJaehoon Chung if (data->flags & MMC_DATA_READ) 114655c5efbcSJames Hogan host->dir_status = DW_MCI_RECV_STATUS; 11477e4bf1bcSJaehoon Chung else 114855c5efbcSJames Hogan host->dir_status = DW_MCI_SEND_STATUS; 11497e4bf1bcSJaehoon Chung 11507e4bf1bcSJaehoon Chung dw_mci_ctrl_thld(host, data); 115155c5efbcSJames Hogan 1152f95f3850SWill Newton if (dw_mci_submit_data_dma(host, data)) { 1153f9c2a0dcSSeungwon Jeon if (host->data->flags & MMC_DATA_READ) 1154f9c2a0dcSSeungwon Jeon flags |= SG_MITER_TO_SG; 1155f9c2a0dcSSeungwon Jeon else 1156f9c2a0dcSSeungwon Jeon flags |= SG_MITER_FROM_SG; 1157f9c2a0dcSSeungwon Jeon 1158f9c2a0dcSSeungwon Jeon sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1159f95f3850SWill Newton host->sg = data->sg; 116034b664a2SJames Hogan host->part_buf_start = 0; 116134b664a2SJames Hogan host->part_buf_count = 0; 1162f95f3850SWill Newton 1163b40af3aaSJames Hogan mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); 1164f8c58c11SDoug Anderson 1165f8c58c11SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 1166f95f3850SWill Newton temp = mci_readl(host, INTMASK); 1167f95f3850SWill Newton temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; 1168f95f3850SWill Newton mci_writel(host, INTMASK, temp); 1169f8c58c11SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 1170f95f3850SWill Newton 1171f95f3850SWill Newton temp = mci_readl(host, CTRL); 1172f95f3850SWill Newton temp &= ~SDMMC_CTRL_DMA_ENABLE; 1173f95f3850SWill Newton mci_writel(host, CTRL, temp); 117452426899SSeungwon Jeon 117552426899SSeungwon Jeon /* 1176d6fced83SJun Nie * Use the initial fifoth_val for PIO mode. If wm_algined 1177d6fced83SJun Nie * is set, we set watermark same as data size. 117852426899SSeungwon Jeon * If next issued data may be transfered by DMA mode, 117952426899SSeungwon Jeon * prev_blksz should be invalidated. 118052426899SSeungwon Jeon */ 1181d6fced83SJun Nie if (host->wm_aligned) 1182d6fced83SJun Nie dw_mci_adjust_fifoth(host, data); 1183d6fced83SJun Nie else 118452426899SSeungwon Jeon mci_writel(host, FIFOTH, host->fifoth_val); 118552426899SSeungwon Jeon host->prev_blksz = 0; 118652426899SSeungwon Jeon } else { 118752426899SSeungwon Jeon /* 118852426899SSeungwon Jeon * Keep the current block size. 118952426899SSeungwon Jeon * It will be used to decide whether to update 119052426899SSeungwon Jeon * fifoth register next time. 119152426899SSeungwon Jeon */ 119252426899SSeungwon Jeon host->prev_blksz = data->blksz; 1193f95f3850SWill Newton } 1194f95f3850SWill Newton } 1195f95f3850SWill Newton 1196ab269128SAbhilash Kesavan static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) 1197f95f3850SWill Newton { 1198f95f3850SWill Newton struct dw_mci *host = slot->host; 1199fdf492a1SDoug Anderson unsigned int clock = slot->clock; 1200f95f3850SWill Newton u32 div; 12019623b5b9SDoug Anderson u32 clk_en_a; 120201730558SDoug Anderson u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT; 120301730558SDoug Anderson 120401730558SDoug Anderson /* We must continue to set bit 28 in CMD until the change is complete */ 120501730558SDoug Anderson if (host->state == STATE_WAITING_CMD11_DONE) 120601730558SDoug Anderson sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH; 1207f95f3850SWill Newton 1208ff178981SShawn Lin slot->mmc->actual_clock = 0; 1209ff178981SShawn Lin 1210fdf492a1SDoug Anderson if (!clock) { 1211fdf492a1SDoug Anderson mci_writel(host, CLKENA, 0); 121201730558SDoug Anderson mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1213fdf492a1SDoug Anderson } else if (clock != host->current_speed || force_clkinit) { 1214fdf492a1SDoug Anderson div = host->bus_hz / clock; 1215fdf492a1SDoug Anderson if (host->bus_hz % clock && host->bus_hz > clock) 1216f95f3850SWill Newton /* 1217f95f3850SWill Newton * move the + 1 after the divide to prevent 1218f95f3850SWill Newton * over-clocking the card. 1219f95f3850SWill Newton */ 1220e419990bSSeungwon Jeon div += 1; 1221e419990bSSeungwon Jeon 1222fdf492a1SDoug Anderson div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; 1223f95f3850SWill Newton 1224e6cd7a8eSJaehoon Chung if ((clock != slot->__clk_old && 1225e6cd7a8eSJaehoon Chung !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) || 1226e6cd7a8eSJaehoon Chung force_clkinit) { 1227ce69e2feSShawn Lin /* Silent the verbose log if calling from PM context */ 1228ce69e2feSShawn Lin if (!force_clkinit) 1229f95f3850SWill Newton dev_info(&slot->mmc->class_dev, 1230fdf492a1SDoug Anderson "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", 1231fdf492a1SDoug Anderson slot->id, host->bus_hz, clock, 1232fdf492a1SDoug Anderson div ? ((host->bus_hz / div) >> 1) : 1233fdf492a1SDoug Anderson host->bus_hz, div); 1234f95f3850SWill Newton 1235e6cd7a8eSJaehoon Chung /* 1236e6cd7a8eSJaehoon Chung * If card is polling, display the message only 1237e6cd7a8eSJaehoon Chung * one time at boot time. 1238e6cd7a8eSJaehoon Chung */ 1239e6cd7a8eSJaehoon Chung if (slot->mmc->caps & MMC_CAP_NEEDS_POLL && 1240e6cd7a8eSJaehoon Chung slot->mmc->f_min == clock) 1241e6cd7a8eSJaehoon Chung set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags); 1242e6cd7a8eSJaehoon Chung } 1243e6cd7a8eSJaehoon Chung 1244f95f3850SWill Newton /* disable clock */ 1245f95f3850SWill Newton mci_writel(host, CLKENA, 0); 1246f95f3850SWill Newton mci_writel(host, CLKSRC, 0); 1247f95f3850SWill Newton 1248f95f3850SWill Newton /* inform CIU */ 124901730558SDoug Anderson mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1250f95f3850SWill Newton 1251f95f3850SWill Newton /* set clock to desired speed */ 1252f95f3850SWill Newton mci_writel(host, CLKDIV, div); 1253f95f3850SWill Newton 1254f95f3850SWill Newton /* inform CIU */ 125501730558SDoug Anderson mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1256f95f3850SWill Newton 12579623b5b9SDoug Anderson /* enable clock; only low power if no SDIO */ 12589623b5b9SDoug Anderson clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; 1259b24c8b26SDoug Anderson if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) 12609623b5b9SDoug Anderson clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; 12619623b5b9SDoug Anderson mci_writel(host, CLKENA, clk_en_a); 1262f95f3850SWill Newton 1263f95f3850SWill Newton /* inform CIU */ 126401730558SDoug Anderson mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1265005d675aSJaehoon Chung 1266005d675aSJaehoon Chung /* keep the last clock value that was requested from core */ 1267005d675aSJaehoon Chung slot->__clk_old = clock; 1268ff178981SShawn Lin slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) : 1269ff178981SShawn Lin host->bus_hz; 1270f95f3850SWill Newton } 1271f95f3850SWill Newton 1272fdf492a1SDoug Anderson host->current_speed = clock; 1273fdf492a1SDoug Anderson 1274f95f3850SWill Newton /* Set the current slot bus width */ 12751d56c453SSeungwon Jeon mci_writel(host, CTYPE, (slot->ctype << slot->id)); 1276f95f3850SWill Newton } 1277f95f3850SWill Newton 1278053b3ce6SSeungwon Jeon static void __dw_mci_start_request(struct dw_mci *host, 1279053b3ce6SSeungwon Jeon struct dw_mci_slot *slot, 1280053b3ce6SSeungwon Jeon struct mmc_command *cmd) 1281f95f3850SWill Newton { 1282f95f3850SWill Newton struct mmc_request *mrq; 1283f95f3850SWill Newton struct mmc_data *data; 1284f95f3850SWill Newton u32 cmdflags; 1285f95f3850SWill Newton 1286f95f3850SWill Newton mrq = slot->mrq; 1287f95f3850SWill Newton 1288f95f3850SWill Newton host->mrq = mrq; 1289f95f3850SWill Newton 1290f95f3850SWill Newton host->pending_events = 0; 1291f95f3850SWill Newton host->completed_events = 0; 1292e352c813SSeungwon Jeon host->cmd_status = 0; 1293f95f3850SWill Newton host->data_status = 0; 1294e352c813SSeungwon Jeon host->dir_status = 0; 1295f95f3850SWill Newton 1296053b3ce6SSeungwon Jeon data = cmd->data; 1297f95f3850SWill Newton if (data) { 1298f16afa88SJaehoon Chung mci_writel(host, TMOUT, 0xFFFFFFFF); 1299f95f3850SWill Newton mci_writel(host, BYTCNT, data->blksz*data->blocks); 1300f95f3850SWill Newton mci_writel(host, BLKSIZ, data->blksz); 1301f95f3850SWill Newton } 1302f95f3850SWill Newton 1303f95f3850SWill Newton cmdflags = dw_mci_prepare_command(slot->mmc, cmd); 1304f95f3850SWill Newton 1305f95f3850SWill Newton /* this is the first command, send the initialization clock */ 1306f95f3850SWill Newton if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags)) 1307f95f3850SWill Newton cmdflags |= SDMMC_CMD_INIT; 1308f95f3850SWill Newton 1309f95f3850SWill Newton if (data) { 1310f95f3850SWill Newton dw_mci_submit_data(host, data); 13110e3a22c0SShawn Lin wmb(); /* drain writebuffer */ 1312f95f3850SWill Newton } 1313f95f3850SWill Newton 1314f95f3850SWill Newton dw_mci_start_command(host, cmd, cmdflags); 1315f95f3850SWill Newton 13165c935165SDoug Anderson if (cmd->opcode == SD_SWITCH_VOLTAGE) { 131749ba0302SDoug Anderson unsigned long irqflags; 131849ba0302SDoug Anderson 13195c935165SDoug Anderson /* 13208886a6fdSDoug Anderson * Databook says to fail after 2ms w/ no response, but evidence 13218886a6fdSDoug Anderson * shows that sometimes the cmd11 interrupt takes over 130ms. 13228886a6fdSDoug Anderson * We'll set to 500ms, plus an extra jiffy just in case jiffies 13238886a6fdSDoug Anderson * is just about to roll over. 132449ba0302SDoug Anderson * 132549ba0302SDoug Anderson * We do this whole thing under spinlock and only if the 132649ba0302SDoug Anderson * command hasn't already completed (indicating the the irq 132749ba0302SDoug Anderson * already ran so we don't want the timeout). 13285c935165SDoug Anderson */ 132949ba0302SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 133049ba0302SDoug Anderson if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 13315c935165SDoug Anderson mod_timer(&host->cmd11_timer, 13328886a6fdSDoug Anderson jiffies + msecs_to_jiffies(500) + 1); 133349ba0302SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 13345c935165SDoug Anderson } 13355c935165SDoug Anderson 133690c2143aSSeungwon Jeon host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); 1337f95f3850SWill Newton } 1338f95f3850SWill Newton 1339053b3ce6SSeungwon Jeon static void dw_mci_start_request(struct dw_mci *host, 1340053b3ce6SSeungwon Jeon struct dw_mci_slot *slot) 1341053b3ce6SSeungwon Jeon { 1342053b3ce6SSeungwon Jeon struct mmc_request *mrq = slot->mrq; 1343053b3ce6SSeungwon Jeon struct mmc_command *cmd; 1344053b3ce6SSeungwon Jeon 1345053b3ce6SSeungwon Jeon cmd = mrq->sbc ? mrq->sbc : mrq->cmd; 1346053b3ce6SSeungwon Jeon __dw_mci_start_request(host, slot, cmd); 1347053b3ce6SSeungwon Jeon } 1348053b3ce6SSeungwon Jeon 13497456caaeSJames Hogan /* must be called with host->lock held */ 1350f95f3850SWill Newton static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, 1351f95f3850SWill Newton struct mmc_request *mrq) 1352f95f3850SWill Newton { 1353f95f3850SWill Newton dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", 1354f95f3850SWill Newton host->state); 1355f95f3850SWill Newton 1356f95f3850SWill Newton slot->mrq = mrq; 1357f95f3850SWill Newton 135801730558SDoug Anderson if (host->state == STATE_WAITING_CMD11_DONE) { 135901730558SDoug Anderson dev_warn(&slot->mmc->class_dev, 136001730558SDoug Anderson "Voltage change didn't complete\n"); 136101730558SDoug Anderson /* 136201730558SDoug Anderson * this case isn't expected to happen, so we can 136301730558SDoug Anderson * either crash here or just try to continue on 136401730558SDoug Anderson * in the closest possible state 136501730558SDoug Anderson */ 136601730558SDoug Anderson host->state = STATE_IDLE; 136701730558SDoug Anderson } 136801730558SDoug Anderson 1369f95f3850SWill Newton if (host->state == STATE_IDLE) { 1370f95f3850SWill Newton host->state = STATE_SENDING_CMD; 1371f95f3850SWill Newton dw_mci_start_request(host, slot); 1372f95f3850SWill Newton } else { 1373f95f3850SWill Newton list_add_tail(&slot->queue_node, &host->queue); 1374f95f3850SWill Newton } 1375f95f3850SWill Newton } 1376f95f3850SWill Newton 1377f95f3850SWill Newton static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1378f95f3850SWill Newton { 1379f95f3850SWill Newton struct dw_mci_slot *slot = mmc_priv(mmc); 1380f95f3850SWill Newton struct dw_mci *host = slot->host; 1381f95f3850SWill Newton 1382f95f3850SWill Newton WARN_ON(slot->mrq); 1383f95f3850SWill Newton 13847456caaeSJames Hogan /* 13857456caaeSJames Hogan * The check for card presence and queueing of the request must be 13867456caaeSJames Hogan * atomic, otherwise the card could be removed in between and the 13877456caaeSJames Hogan * request wouldn't fail until another card was inserted. 13887456caaeSJames Hogan */ 13897456caaeSJames Hogan 139056f6911cSShawn Lin if (!dw_mci_get_cd(mmc)) { 1391f95f3850SWill Newton mrq->cmd->error = -ENOMEDIUM; 1392f95f3850SWill Newton mmc_request_done(mmc, mrq); 1393f95f3850SWill Newton return; 1394f95f3850SWill Newton } 1395f95f3850SWill Newton 139656f6911cSShawn Lin spin_lock_bh(&host->lock); 139756f6911cSShawn Lin 1398f95f3850SWill Newton dw_mci_queue_request(host, slot, mrq); 13997456caaeSJames Hogan 14007456caaeSJames Hogan spin_unlock_bh(&host->lock); 1401f95f3850SWill Newton } 1402f95f3850SWill Newton 1403f95f3850SWill Newton static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1404f95f3850SWill Newton { 1405f95f3850SWill Newton struct dw_mci_slot *slot = mmc_priv(mmc); 1406e95baf13SArnd Bergmann const struct dw_mci_drv_data *drv_data = slot->host->drv_data; 140741babf75SJaehoon Chung u32 regs; 140851da2240SYuvaraj CD int ret; 1409f95f3850SWill Newton 1410f95f3850SWill Newton switch (ios->bus_width) { 1411f95f3850SWill Newton case MMC_BUS_WIDTH_4: 1412f95f3850SWill Newton slot->ctype = SDMMC_CTYPE_4BIT; 1413f95f3850SWill Newton break; 1414c9b2a06fSJaehoon Chung case MMC_BUS_WIDTH_8: 1415c9b2a06fSJaehoon Chung slot->ctype = SDMMC_CTYPE_8BIT; 1416c9b2a06fSJaehoon Chung break; 1417b2f7cb45SJaehoon Chung default: 1418b2f7cb45SJaehoon Chung /* set default 1 bit mode */ 1419b2f7cb45SJaehoon Chung slot->ctype = SDMMC_CTYPE_1BIT; 1420f95f3850SWill Newton } 1421f95f3850SWill Newton 142241babf75SJaehoon Chung regs = mci_readl(slot->host, UHS_REG); 14233f514291SSeungwon Jeon 14243f514291SSeungwon Jeon /* DDR mode set */ 142580113132SSeungwon Jeon if (ios->timing == MMC_TIMING_MMC_DDR52 || 14267cc8d580SJaehoon Chung ios->timing == MMC_TIMING_UHS_DDR50 || 142780113132SSeungwon Jeon ios->timing == MMC_TIMING_MMC_HS400) 1428c69042a5SHyeonsu Kim regs |= ((0x1 << slot->id) << 16); 14293f514291SSeungwon Jeon else 1430c69042a5SHyeonsu Kim regs &= ~((0x1 << slot->id) << 16); 14313f514291SSeungwon Jeon 143241babf75SJaehoon Chung mci_writel(slot->host, UHS_REG, regs); 1433f1d2736cSSeungwon Jeon slot->host->timing = ios->timing; 143441babf75SJaehoon Chung 1435f95f3850SWill Newton /* 1436f95f3850SWill Newton * Use mirror of ios->clock to prevent race with mmc 1437f95f3850SWill Newton * core ios update when finding the minimum. 1438f95f3850SWill Newton */ 1439f95f3850SWill Newton slot->clock = ios->clock; 1440f95f3850SWill Newton 1441cb27a843SJames Hogan if (drv_data && drv_data->set_ios) 1442cb27a843SJames Hogan drv_data->set_ios(slot->host, ios); 1443800d78bfSThomas Abraham 1444f95f3850SWill Newton switch (ios->power_mode) { 1445f95f3850SWill Newton case MMC_POWER_UP: 144651da2240SYuvaraj CD if (!IS_ERR(mmc->supply.vmmc)) { 144751da2240SYuvaraj CD ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 144851da2240SYuvaraj CD ios->vdd); 144951da2240SYuvaraj CD if (ret) { 145051da2240SYuvaraj CD dev_err(slot->host->dev, 145151da2240SYuvaraj CD "failed to enable vmmc regulator\n"); 145251da2240SYuvaraj CD /*return, if failed turn on vmmc*/ 145351da2240SYuvaraj CD return; 145451da2240SYuvaraj CD } 145551da2240SYuvaraj CD } 145629d0d161SDoug Anderson set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); 145729d0d161SDoug Anderson regs = mci_readl(slot->host, PWREN); 145829d0d161SDoug Anderson regs |= (1 << slot->id); 145929d0d161SDoug Anderson mci_writel(slot->host, PWREN, regs); 146029d0d161SDoug Anderson break; 146129d0d161SDoug Anderson case MMC_POWER_ON: 1462d1f1dd86SDoug Anderson if (!slot->host->vqmmc_enabled) { 1463d1f1dd86SDoug Anderson if (!IS_ERR(mmc->supply.vqmmc)) { 146451da2240SYuvaraj CD ret = regulator_enable(mmc->supply.vqmmc); 146551da2240SYuvaraj CD if (ret < 0) 146651da2240SYuvaraj CD dev_err(slot->host->dev, 1467d1f1dd86SDoug Anderson "failed to enable vqmmc\n"); 146851da2240SYuvaraj CD else 146951da2240SYuvaraj CD slot->host->vqmmc_enabled = true; 1470d1f1dd86SDoug Anderson 1471d1f1dd86SDoug Anderson } else { 1472d1f1dd86SDoug Anderson /* Keep track so we don't reset again */ 1473d1f1dd86SDoug Anderson slot->host->vqmmc_enabled = true; 1474d1f1dd86SDoug Anderson } 1475d1f1dd86SDoug Anderson 1476d1f1dd86SDoug Anderson /* Reset our state machine after powering on */ 1477d1f1dd86SDoug Anderson dw_mci_ctrl_reset(slot->host, 1478d1f1dd86SDoug Anderson SDMMC_CTRL_ALL_RESET_FLAGS); 147951da2240SYuvaraj CD } 1480655babbdSDoug Anderson 1481655babbdSDoug Anderson /* Adjust clock / bus width after power is up */ 1482655babbdSDoug Anderson dw_mci_setup_bus(slot, false); 1483655babbdSDoug Anderson 1484e6f34e2fSJames Hogan break; 1485e6f34e2fSJames Hogan case MMC_POWER_OFF: 1486655babbdSDoug Anderson /* Turn clock off before power goes down */ 1487655babbdSDoug Anderson dw_mci_setup_bus(slot, false); 1488655babbdSDoug Anderson 148951da2240SYuvaraj CD if (!IS_ERR(mmc->supply.vmmc)) 149051da2240SYuvaraj CD mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 149151da2240SYuvaraj CD 1492d1f1dd86SDoug Anderson if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) 149351da2240SYuvaraj CD regulator_disable(mmc->supply.vqmmc); 149451da2240SYuvaraj CD slot->host->vqmmc_enabled = false; 149551da2240SYuvaraj CD 14964366dcc5SJaehoon Chung regs = mci_readl(slot->host, PWREN); 14974366dcc5SJaehoon Chung regs &= ~(1 << slot->id); 14984366dcc5SJaehoon Chung mci_writel(slot->host, PWREN, regs); 1499f95f3850SWill Newton break; 1500f95f3850SWill Newton default: 1501f95f3850SWill Newton break; 1502f95f3850SWill Newton } 1503655babbdSDoug Anderson 1504655babbdSDoug Anderson if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0) 1505655babbdSDoug Anderson slot->host->state = STATE_IDLE; 1506f95f3850SWill Newton } 1507f95f3850SWill Newton 150801730558SDoug Anderson static int dw_mci_card_busy(struct mmc_host *mmc) 150901730558SDoug Anderson { 151001730558SDoug Anderson struct dw_mci_slot *slot = mmc_priv(mmc); 151101730558SDoug Anderson u32 status; 151201730558SDoug Anderson 151301730558SDoug Anderson /* 151401730558SDoug Anderson * Check the busy bit which is low when DAT[3:0] 151501730558SDoug Anderson * (the data lines) are 0000 151601730558SDoug Anderson */ 151701730558SDoug Anderson status = mci_readl(slot->host, STATUS); 151801730558SDoug Anderson 151901730558SDoug Anderson return !!(status & SDMMC_STATUS_BUSY); 152001730558SDoug Anderson } 152101730558SDoug Anderson 152201730558SDoug Anderson static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) 152301730558SDoug Anderson { 152401730558SDoug Anderson struct dw_mci_slot *slot = mmc_priv(mmc); 152501730558SDoug Anderson struct dw_mci *host = slot->host; 15268f7849c4SZhangfei Gao const struct dw_mci_drv_data *drv_data = host->drv_data; 152701730558SDoug Anderson u32 uhs; 152801730558SDoug Anderson u32 v18 = SDMMC_UHS_18V << slot->id; 152901730558SDoug Anderson int ret; 153001730558SDoug Anderson 15318f7849c4SZhangfei Gao if (drv_data && drv_data->switch_voltage) 15328f7849c4SZhangfei Gao return drv_data->switch_voltage(mmc, ios); 15338f7849c4SZhangfei Gao 153401730558SDoug Anderson /* 153501730558SDoug Anderson * Program the voltage. Note that some instances of dw_mmc may use 153601730558SDoug Anderson * the UHS_REG for this. For other instances (like exynos) the UHS_REG 153701730558SDoug Anderson * does no harm but you need to set the regulator directly. Try both. 153801730558SDoug Anderson */ 153901730558SDoug Anderson uhs = mci_readl(host, UHS_REG); 1540e0848f5dSDouglas Anderson if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) 154101730558SDoug Anderson uhs &= ~v18; 1542e0848f5dSDouglas Anderson else 154301730558SDoug Anderson uhs |= v18; 1544e0848f5dSDouglas Anderson 154501730558SDoug Anderson if (!IS_ERR(mmc->supply.vqmmc)) { 1546e0848f5dSDouglas Anderson ret = mmc_regulator_set_vqmmc(mmc, ios); 154701730558SDoug Anderson 154801730558SDoug Anderson if (ret) { 1549b19caf37SDoug Anderson dev_dbg(&mmc->class_dev, 1550e0848f5dSDouglas Anderson "Regulator set error %d - %s V\n", 1551e0848f5dSDouglas Anderson ret, uhs & v18 ? "1.8" : "3.3"); 155201730558SDoug Anderson return ret; 155301730558SDoug Anderson } 155401730558SDoug Anderson } 155501730558SDoug Anderson mci_writel(host, UHS_REG, uhs); 155601730558SDoug Anderson 155701730558SDoug Anderson return 0; 155801730558SDoug Anderson } 155901730558SDoug Anderson 1560f95f3850SWill Newton static int dw_mci_get_ro(struct mmc_host *mmc) 1561f95f3850SWill Newton { 1562f95f3850SWill Newton int read_only; 1563f95f3850SWill Newton struct dw_mci_slot *slot = mmc_priv(mmc); 15649795a846SJaehoon Chung int gpio_ro = mmc_gpio_get_ro(mmc); 1565f95f3850SWill Newton 1566f95f3850SWill Newton /* Use platform get_ro function, else try on board write protect */ 1567287980e4SArnd Bergmann if (gpio_ro >= 0) 15689795a846SJaehoon Chung read_only = gpio_ro; 1569f95f3850SWill Newton else 1570f95f3850SWill Newton read_only = 1571f95f3850SWill Newton mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; 1572f95f3850SWill Newton 1573f95f3850SWill Newton dev_dbg(&mmc->class_dev, "card is %s\n", 1574f95f3850SWill Newton read_only ? "read-only" : "read-write"); 1575f95f3850SWill Newton 1576f95f3850SWill Newton return read_only; 1577f95f3850SWill Newton } 1578f95f3850SWill Newton 1579935a665eSShawn Lin static void dw_mci_hw_reset(struct mmc_host *mmc) 1580935a665eSShawn Lin { 1581935a665eSShawn Lin struct dw_mci_slot *slot = mmc_priv(mmc); 1582935a665eSShawn Lin struct dw_mci *host = slot->host; 1583935a665eSShawn Lin int reset; 1584935a665eSShawn Lin 1585935a665eSShawn Lin if (host->use_dma == TRANS_MODE_IDMAC) 1586935a665eSShawn Lin dw_mci_idmac_reset(host); 1587935a665eSShawn Lin 1588935a665eSShawn Lin if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET | 1589935a665eSShawn Lin SDMMC_CTRL_FIFO_RESET)) 1590935a665eSShawn Lin return; 1591935a665eSShawn Lin 1592935a665eSShawn Lin /* 1593935a665eSShawn Lin * According to eMMC spec, card reset procedure: 1594935a665eSShawn Lin * tRstW >= 1us: RST_n pulse width 1595935a665eSShawn Lin * tRSCA >= 200us: RST_n to Command time 1596935a665eSShawn Lin * tRSTH >= 1us: RST_n high period 1597935a665eSShawn Lin */ 1598935a665eSShawn Lin reset = mci_readl(host, RST_N); 1599935a665eSShawn Lin reset &= ~(SDMMC_RST_HWACTIVE << slot->id); 1600935a665eSShawn Lin mci_writel(host, RST_N, reset); 1601935a665eSShawn Lin usleep_range(1, 2); 1602935a665eSShawn Lin reset |= SDMMC_RST_HWACTIVE << slot->id; 1603935a665eSShawn Lin mci_writel(host, RST_N, reset); 1604935a665eSShawn Lin usleep_range(200, 300); 1605935a665eSShawn Lin } 1606935a665eSShawn Lin 1607b24c8b26SDoug Anderson static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card) 1608b24c8b26SDoug Anderson { 1609b24c8b26SDoug Anderson struct dw_mci_slot *slot = mmc_priv(mmc); 1610b24c8b26SDoug Anderson struct dw_mci *host = slot->host; 1611b24c8b26SDoug Anderson 16129623b5b9SDoug Anderson /* 16139623b5b9SDoug Anderson * Low power mode will stop the card clock when idle. According to the 16149623b5b9SDoug Anderson * description of the CLKENA register we should disable low power mode 16159623b5b9SDoug Anderson * for SDIO cards if we need SDIO interrupts to work. 16169623b5b9SDoug Anderson */ 1617b24c8b26SDoug Anderson if (mmc->caps & MMC_CAP_SDIO_IRQ) { 16189623b5b9SDoug Anderson const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; 1619b24c8b26SDoug Anderson u32 clk_en_a_old; 1620b24c8b26SDoug Anderson u32 clk_en_a; 16219623b5b9SDoug Anderson 1622b24c8b26SDoug Anderson clk_en_a_old = mci_readl(host, CLKENA); 16239623b5b9SDoug Anderson 1624b24c8b26SDoug Anderson if (card->type == MMC_TYPE_SDIO || 1625b24c8b26SDoug Anderson card->type == MMC_TYPE_SD_COMBO) { 1626b24c8b26SDoug Anderson set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1627b24c8b26SDoug Anderson clk_en_a = clk_en_a_old & ~clken_low_pwr; 1628b24c8b26SDoug Anderson } else { 1629b24c8b26SDoug Anderson clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1630b24c8b26SDoug Anderson clk_en_a = clk_en_a_old | clken_low_pwr; 1631b24c8b26SDoug Anderson } 1632b24c8b26SDoug Anderson 1633b24c8b26SDoug Anderson if (clk_en_a != clk_en_a_old) { 1634b24c8b26SDoug Anderson mci_writel(host, CLKENA, clk_en_a); 16359623b5b9SDoug Anderson mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 16369623b5b9SDoug Anderson SDMMC_CMD_PRV_DAT_WAIT, 0); 16379623b5b9SDoug Anderson } 16389623b5b9SDoug Anderson } 1639b24c8b26SDoug Anderson } 16409623b5b9SDoug Anderson 164132dba737SUlf Hansson static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb) 16421a5c8e1fSShashidhar Hiremath { 16431a5c8e1fSShashidhar Hiremath struct dw_mci *host = slot->host; 1644f8c58c11SDoug Anderson unsigned long irqflags; 16451a5c8e1fSShashidhar Hiremath u32 int_mask; 16461a5c8e1fSShashidhar Hiremath 1647f8c58c11SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 1648f8c58c11SDoug Anderson 16491a5c8e1fSShashidhar Hiremath /* Enable/disable Slot Specific SDIO interrupt */ 16501a5c8e1fSShashidhar Hiremath int_mask = mci_readl(host, INTMASK); 1651b24c8b26SDoug Anderson if (enb) 1652b24c8b26SDoug Anderson int_mask |= SDMMC_INT_SDIO(slot->sdio_id); 1653b24c8b26SDoug Anderson else 1654b24c8b26SDoug Anderson int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id); 1655b24c8b26SDoug Anderson mci_writel(host, INTMASK, int_mask); 1656f8c58c11SDoug Anderson 1657f8c58c11SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 16581a5c8e1fSShashidhar Hiremath } 16591a5c8e1fSShashidhar Hiremath 166032dba737SUlf Hansson static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) 166132dba737SUlf Hansson { 166232dba737SUlf Hansson struct dw_mci_slot *slot = mmc_priv(mmc); 1663ca8971caSUlf Hansson struct dw_mci *host = slot->host; 166432dba737SUlf Hansson 166532dba737SUlf Hansson __dw_mci_enable_sdio_irq(slot, enb); 1666ca8971caSUlf Hansson 1667ca8971caSUlf Hansson /* Avoid runtime suspending the device when SDIO IRQ is enabled */ 1668ca8971caSUlf Hansson if (enb) 1669ca8971caSUlf Hansson pm_runtime_get_noresume(host->dev); 1670ca8971caSUlf Hansson else 1671ca8971caSUlf Hansson pm_runtime_put_noidle(host->dev); 167232dba737SUlf Hansson } 167332dba737SUlf Hansson 167432dba737SUlf Hansson static void dw_mci_ack_sdio_irq(struct mmc_host *mmc) 167532dba737SUlf Hansson { 167632dba737SUlf Hansson struct dw_mci_slot *slot = mmc_priv(mmc); 167732dba737SUlf Hansson 167832dba737SUlf Hansson __dw_mci_enable_sdio_irq(slot, 1); 167932dba737SUlf Hansson } 168032dba737SUlf Hansson 16810976f16dSSeungwon Jeon static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode) 16820976f16dSSeungwon Jeon { 16830976f16dSSeungwon Jeon struct dw_mci_slot *slot = mmc_priv(mmc); 16840976f16dSSeungwon Jeon struct dw_mci *host = slot->host; 16850976f16dSSeungwon Jeon const struct dw_mci_drv_data *drv_data = host->drv_data; 16860e3a22c0SShawn Lin int err = -EINVAL; 16870976f16dSSeungwon Jeon 16880976f16dSSeungwon Jeon if (drv_data && drv_data->execute_tuning) 16899979dbe5SChaotian Jing err = drv_data->execute_tuning(slot, opcode); 16900976f16dSSeungwon Jeon return err; 16910976f16dSSeungwon Jeon } 16920976f16dSSeungwon Jeon 16930e3a22c0SShawn Lin static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, 16940e3a22c0SShawn Lin struct mmc_ios *ios) 169580113132SSeungwon Jeon { 169680113132SSeungwon Jeon struct dw_mci_slot *slot = mmc_priv(mmc); 169780113132SSeungwon Jeon struct dw_mci *host = slot->host; 169880113132SSeungwon Jeon const struct dw_mci_drv_data *drv_data = host->drv_data; 169980113132SSeungwon Jeon 170080113132SSeungwon Jeon if (drv_data && drv_data->prepare_hs400_tuning) 170180113132SSeungwon Jeon return drv_data->prepare_hs400_tuning(host, ios); 170280113132SSeungwon Jeon 170380113132SSeungwon Jeon return 0; 170480113132SSeungwon Jeon } 170580113132SSeungwon Jeon 17064e7392b2SShawn Lin static bool dw_mci_reset(struct dw_mci *host) 17074e7392b2SShawn Lin { 17084e7392b2SShawn Lin u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET; 17094e7392b2SShawn Lin bool ret = false; 1710bc2dcc1aSShawn Lin u32 status = 0; 17114e7392b2SShawn Lin 17124e7392b2SShawn Lin /* 17134e7392b2SShawn Lin * Resetting generates a block interrupt, hence setting 17144e7392b2SShawn Lin * the scatter-gather pointer to NULL. 17154e7392b2SShawn Lin */ 17164e7392b2SShawn Lin if (host->sg) { 17174e7392b2SShawn Lin sg_miter_stop(&host->sg_miter); 17184e7392b2SShawn Lin host->sg = NULL; 17194e7392b2SShawn Lin } 17204e7392b2SShawn Lin 17214e7392b2SShawn Lin if (host->use_dma) 17224e7392b2SShawn Lin flags |= SDMMC_CTRL_DMA_RESET; 17234e7392b2SShawn Lin 17244e7392b2SShawn Lin if (dw_mci_ctrl_reset(host, flags)) { 17254e7392b2SShawn Lin /* 1726bc2dcc1aSShawn Lin * In all cases we clear the RAWINTS 1727bc2dcc1aSShawn Lin * register to clear any interrupts. 17284e7392b2SShawn Lin */ 17294e7392b2SShawn Lin mci_writel(host, RINTSTS, 0xFFFFFFFF); 17304e7392b2SShawn Lin 1731bc2dcc1aSShawn Lin if (!host->use_dma) { 1732bc2dcc1aSShawn Lin ret = true; 1733bc2dcc1aSShawn Lin goto ciu_out; 1734bc2dcc1aSShawn Lin } 17354e7392b2SShawn Lin 1736bc2dcc1aSShawn Lin /* Wait for dma_req to be cleared */ 17374e7392b2SShawn Lin if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS, 17384e7392b2SShawn Lin status, 17394e7392b2SShawn Lin !(status & SDMMC_STATUS_DMA_REQ), 17404e7392b2SShawn Lin 1, 500 * USEC_PER_MSEC)) { 17414e7392b2SShawn Lin dev_err(host->dev, 1742bc2dcc1aSShawn Lin "%s: Timeout waiting for dma_req to be cleared\n", 17434e7392b2SShawn Lin __func__); 17444e7392b2SShawn Lin goto ciu_out; 17454e7392b2SShawn Lin } 17464e7392b2SShawn Lin 17474e7392b2SShawn Lin /* when using DMA next we reset the fifo again */ 17484e7392b2SShawn Lin if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET)) 17494e7392b2SShawn Lin goto ciu_out; 17504e7392b2SShawn Lin } else { 17514e7392b2SShawn Lin /* if the controller reset bit did clear, then set clock regs */ 17524e7392b2SShawn Lin if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) { 17534e7392b2SShawn Lin dev_err(host->dev, 17544e7392b2SShawn Lin "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n", 17554e7392b2SShawn Lin __func__); 17564e7392b2SShawn Lin goto ciu_out; 17574e7392b2SShawn Lin } 17584e7392b2SShawn Lin } 17594e7392b2SShawn Lin 17604e7392b2SShawn Lin if (host->use_dma == TRANS_MODE_IDMAC) 176147b7de2fSEvgeniy Didin /* It is also required that we reinit idmac */ 176247b7de2fSEvgeniy Didin dw_mci_idmac_init(host); 17634e7392b2SShawn Lin 17644e7392b2SShawn Lin ret = true; 17654e7392b2SShawn Lin 17664e7392b2SShawn Lin ciu_out: 17674e7392b2SShawn Lin /* After a CTRL reset we need to have CIU set clock registers */ 176842f989c0SJaehoon Chung mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0); 17694e7392b2SShawn Lin 17704e7392b2SShawn Lin return ret; 17714e7392b2SShawn Lin } 17724e7392b2SShawn Lin 1773f95f3850SWill Newton static const struct mmc_host_ops dw_mci_ops = { 1774f95f3850SWill Newton .request = dw_mci_request, 17759aa51408SSeungwon Jeon .pre_req = dw_mci_pre_req, 17769aa51408SSeungwon Jeon .post_req = dw_mci_post_req, 1777f95f3850SWill Newton .set_ios = dw_mci_set_ios, 1778f95f3850SWill Newton .get_ro = dw_mci_get_ro, 1779f95f3850SWill Newton .get_cd = dw_mci_get_cd, 1780935a665eSShawn Lin .hw_reset = dw_mci_hw_reset, 17811a5c8e1fSShashidhar Hiremath .enable_sdio_irq = dw_mci_enable_sdio_irq, 178232dba737SUlf Hansson .ack_sdio_irq = dw_mci_ack_sdio_irq, 17830976f16dSSeungwon Jeon .execute_tuning = dw_mci_execute_tuning, 178401730558SDoug Anderson .card_busy = dw_mci_card_busy, 178501730558SDoug Anderson .start_signal_voltage_switch = dw_mci_switch_voltage, 1786b24c8b26SDoug Anderson .init_card = dw_mci_init_card, 178780113132SSeungwon Jeon .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning, 1788f95f3850SWill Newton }; 1789f95f3850SWill Newton 1790f95f3850SWill Newton static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) 1791f95f3850SWill Newton __releases(&host->lock) 1792f95f3850SWill Newton __acquires(&host->lock) 1793f95f3850SWill Newton { 1794f95f3850SWill Newton struct dw_mci_slot *slot; 179542f989c0SJaehoon Chung struct mmc_host *prev_mmc = host->slot->mmc; 1796f95f3850SWill Newton 1797f95f3850SWill Newton WARN_ON(host->cmd || host->data); 1798f95f3850SWill Newton 179942f989c0SJaehoon Chung host->slot->mrq = NULL; 1800f95f3850SWill Newton host->mrq = NULL; 1801f95f3850SWill Newton if (!list_empty(&host->queue)) { 1802f95f3850SWill Newton slot = list_entry(host->queue.next, 1803f95f3850SWill Newton struct dw_mci_slot, queue_node); 1804f95f3850SWill Newton list_del(&slot->queue_node); 18054a90920cSThomas Abraham dev_vdbg(host->dev, "list not empty: %s is next\n", 1806f95f3850SWill Newton mmc_hostname(slot->mmc)); 1807f95f3850SWill Newton host->state = STATE_SENDING_CMD; 1808f95f3850SWill Newton dw_mci_start_request(host, slot); 1809f95f3850SWill Newton } else { 18104a90920cSThomas Abraham dev_vdbg(host->dev, "list empty\n"); 181101730558SDoug Anderson 181201730558SDoug Anderson if (host->state == STATE_SENDING_CMD11) 181301730558SDoug Anderson host->state = STATE_WAITING_CMD11_DONE; 181401730558SDoug Anderson else 1815f95f3850SWill Newton host->state = STATE_IDLE; 1816f95f3850SWill Newton } 1817f95f3850SWill Newton 1818f95f3850SWill Newton spin_unlock(&host->lock); 1819f95f3850SWill Newton mmc_request_done(prev_mmc, mrq); 1820f95f3850SWill Newton spin_lock(&host->lock); 1821f95f3850SWill Newton } 1822f95f3850SWill Newton 1823e352c813SSeungwon Jeon static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) 1824f95f3850SWill Newton { 1825f95f3850SWill Newton u32 status = host->cmd_status; 1826f95f3850SWill Newton 1827f95f3850SWill Newton host->cmd_status = 0; 1828f95f3850SWill Newton 1829f95f3850SWill Newton /* Read the response from the card (up to 16 bytes) */ 1830f95f3850SWill Newton if (cmd->flags & MMC_RSP_PRESENT) { 1831f95f3850SWill Newton if (cmd->flags & MMC_RSP_136) { 1832f95f3850SWill Newton cmd->resp[3] = mci_readl(host, RESP0); 1833f95f3850SWill Newton cmd->resp[2] = mci_readl(host, RESP1); 1834f95f3850SWill Newton cmd->resp[1] = mci_readl(host, RESP2); 1835f95f3850SWill Newton cmd->resp[0] = mci_readl(host, RESP3); 1836f95f3850SWill Newton } else { 1837f95f3850SWill Newton cmd->resp[0] = mci_readl(host, RESP0); 1838f95f3850SWill Newton cmd->resp[1] = 0; 1839f95f3850SWill Newton cmd->resp[2] = 0; 1840f95f3850SWill Newton cmd->resp[3] = 0; 1841f95f3850SWill Newton } 1842f95f3850SWill Newton } 1843f95f3850SWill Newton 1844f95f3850SWill Newton if (status & SDMMC_INT_RTO) 1845f95f3850SWill Newton cmd->error = -ETIMEDOUT; 1846f95f3850SWill Newton else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)) 1847f95f3850SWill Newton cmd->error = -EILSEQ; 1848f95f3850SWill Newton else if (status & SDMMC_INT_RESP_ERR) 1849f95f3850SWill Newton cmd->error = -EIO; 1850f95f3850SWill Newton else 1851f95f3850SWill Newton cmd->error = 0; 1852f95f3850SWill Newton 1853e352c813SSeungwon Jeon return cmd->error; 1854e352c813SSeungwon Jeon } 1855e352c813SSeungwon Jeon 1856e352c813SSeungwon Jeon static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) 1857e352c813SSeungwon Jeon { 185831bff450SSeungwon Jeon u32 status = host->data_status; 1859e352c813SSeungwon Jeon 1860e352c813SSeungwon Jeon if (status & DW_MCI_DATA_ERROR_FLAGS) { 1861e352c813SSeungwon Jeon if (status & SDMMC_INT_DRTO) { 1862e352c813SSeungwon Jeon data->error = -ETIMEDOUT; 1863e352c813SSeungwon Jeon } else if (status & SDMMC_INT_DCRC) { 1864e352c813SSeungwon Jeon data->error = -EILSEQ; 1865e352c813SSeungwon Jeon } else if (status & SDMMC_INT_EBE) { 1866e352c813SSeungwon Jeon if (host->dir_status == 1867e352c813SSeungwon Jeon DW_MCI_SEND_STATUS) { 1868e352c813SSeungwon Jeon /* 1869e352c813SSeungwon Jeon * No data CRC status was returned. 1870e352c813SSeungwon Jeon * The number of bytes transferred 1871e352c813SSeungwon Jeon * will be exaggerated in PIO mode. 1872e352c813SSeungwon Jeon */ 1873e352c813SSeungwon Jeon data->bytes_xfered = 0; 1874e352c813SSeungwon Jeon data->error = -ETIMEDOUT; 1875e352c813SSeungwon Jeon } else if (host->dir_status == 1876e352c813SSeungwon Jeon DW_MCI_RECV_STATUS) { 1877e7a1dec1SShawn Lin data->error = -EILSEQ; 1878e352c813SSeungwon Jeon } 1879e352c813SSeungwon Jeon } else { 1880e352c813SSeungwon Jeon /* SDMMC_INT_SBE is included */ 1881e7a1dec1SShawn Lin data->error = -EILSEQ; 1882e352c813SSeungwon Jeon } 1883e352c813SSeungwon Jeon 1884e6cc0123SDoug Anderson dev_dbg(host->dev, "data error, status 0x%08x\n", status); 1885e352c813SSeungwon Jeon 1886e352c813SSeungwon Jeon /* 1887e352c813SSeungwon Jeon * After an error, there may be data lingering 188831bff450SSeungwon Jeon * in the FIFO 1889e352c813SSeungwon Jeon */ 18903a33a94cSSonny Rao dw_mci_reset(host); 1891e352c813SSeungwon Jeon } else { 1892e352c813SSeungwon Jeon data->bytes_xfered = data->blocks * data->blksz; 1893e352c813SSeungwon Jeon data->error = 0; 1894e352c813SSeungwon Jeon } 1895e352c813SSeungwon Jeon 1896e352c813SSeungwon Jeon return data->error; 1897f95f3850SWill Newton } 1898f95f3850SWill Newton 189957e10486SAddy Ke static void dw_mci_set_drto(struct dw_mci *host) 190057e10486SAddy Ke { 190157e10486SAddy Ke unsigned int drto_clks; 19029d9491a7SDouglas Anderson unsigned int drto_div; 190357e10486SAddy Ke unsigned int drto_ms; 190493c23ae3SDouglas Anderson unsigned long irqflags; 190557e10486SAddy Ke 190657e10486SAddy Ke drto_clks = mci_readl(host, TMOUT) >> 8; 19079d9491a7SDouglas Anderson drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; 19089d9491a7SDouglas Anderson if (drto_div == 0) 19099d9491a7SDouglas Anderson drto_div = 1; 1910c7151602SEvgeniy Didin 1911c7151602SEvgeniy Didin drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div, 19129d9491a7SDouglas Anderson host->bus_hz); 191357e10486SAddy Ke 191457e10486SAddy Ke /* add a bit spare time */ 191557e10486SAddy Ke drto_ms += 10; 191657e10486SAddy Ke 191793c23ae3SDouglas Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 191893c23ae3SDouglas Anderson if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) 191993c23ae3SDouglas Anderson mod_timer(&host->dto_timer, 192093c23ae3SDouglas Anderson jiffies + msecs_to_jiffies(drto_ms)); 192193c23ae3SDouglas Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 192257e10486SAddy Ke } 192357e10486SAddy Ke 19248892b705SDouglas Anderson static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host) 19258892b705SDouglas Anderson { 19268892b705SDouglas Anderson if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 19278892b705SDouglas Anderson return false; 19288892b705SDouglas Anderson 19298892b705SDouglas Anderson /* 19308892b705SDouglas Anderson * Really be certain that the timer has stopped. This is a bit of 19318892b705SDouglas Anderson * paranoia and could only really happen if we had really bad 19328892b705SDouglas Anderson * interrupt latency and the interrupt routine and timeout were 19338892b705SDouglas Anderson * running concurrently so that the del_timer() in the interrupt 19348892b705SDouglas Anderson * handler couldn't run. 19358892b705SDouglas Anderson */ 19368892b705SDouglas Anderson WARN_ON(del_timer_sync(&host->cto_timer)); 19378892b705SDouglas Anderson clear_bit(EVENT_CMD_COMPLETE, &host->pending_events); 19388892b705SDouglas Anderson 19398892b705SDouglas Anderson return true; 19408892b705SDouglas Anderson } 19418892b705SDouglas Anderson 194293c23ae3SDouglas Anderson static bool dw_mci_clear_pending_data_complete(struct dw_mci *host) 194393c23ae3SDouglas Anderson { 194493c23ae3SDouglas Anderson if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) 194593c23ae3SDouglas Anderson return false; 194693c23ae3SDouglas Anderson 194793c23ae3SDouglas Anderson /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */ 194893c23ae3SDouglas Anderson WARN_ON(del_timer_sync(&host->dto_timer)); 194993c23ae3SDouglas Anderson clear_bit(EVENT_DATA_COMPLETE, &host->pending_events); 195093c23ae3SDouglas Anderson 195193c23ae3SDouglas Anderson return true; 195293c23ae3SDouglas Anderson } 195393c23ae3SDouglas Anderson 1954f95f3850SWill Newton static void dw_mci_tasklet_func(unsigned long priv) 1955f95f3850SWill Newton { 1956f95f3850SWill Newton struct dw_mci *host = (struct dw_mci *)priv; 1957f95f3850SWill Newton struct mmc_data *data; 1958f95f3850SWill Newton struct mmc_command *cmd; 1959e352c813SSeungwon Jeon struct mmc_request *mrq; 1960f95f3850SWill Newton enum dw_mci_state state; 1961f95f3850SWill Newton enum dw_mci_state prev_state; 1962e352c813SSeungwon Jeon unsigned int err; 1963f95f3850SWill Newton 1964f95f3850SWill Newton spin_lock(&host->lock); 1965f95f3850SWill Newton 1966f95f3850SWill Newton state = host->state; 1967f95f3850SWill Newton data = host->data; 1968e352c813SSeungwon Jeon mrq = host->mrq; 1969f95f3850SWill Newton 1970f95f3850SWill Newton do { 1971f95f3850SWill Newton prev_state = state; 1972f95f3850SWill Newton 1973f95f3850SWill Newton switch (state) { 1974f95f3850SWill Newton case STATE_IDLE: 197501730558SDoug Anderson case STATE_WAITING_CMD11_DONE: 1976f95f3850SWill Newton break; 1977f95f3850SWill Newton 197801730558SDoug Anderson case STATE_SENDING_CMD11: 1979f95f3850SWill Newton case STATE_SENDING_CMD: 19808892b705SDouglas Anderson if (!dw_mci_clear_pending_cmd_complete(host)) 1981f95f3850SWill Newton break; 1982f95f3850SWill Newton 1983f95f3850SWill Newton cmd = host->cmd; 1984f95f3850SWill Newton host->cmd = NULL; 1985f95f3850SWill Newton set_bit(EVENT_CMD_COMPLETE, &host->completed_events); 1986e352c813SSeungwon Jeon err = dw_mci_command_complete(host, cmd); 1987e352c813SSeungwon Jeon if (cmd == mrq->sbc && !err) { 198842f989c0SJaehoon Chung __dw_mci_start_request(host, host->slot, 1989e352c813SSeungwon Jeon mrq->cmd); 1990053b3ce6SSeungwon Jeon goto unlock; 1991053b3ce6SSeungwon Jeon } 1992053b3ce6SSeungwon Jeon 1993e352c813SSeungwon Jeon if (cmd->data && err) { 199446d17952SDoug Anderson /* 199546d17952SDoug Anderson * During UHS tuning sequence, sending the stop 199646d17952SDoug Anderson * command after the response CRC error would 199746d17952SDoug Anderson * throw the system into a confused state 199846d17952SDoug Anderson * causing all future tuning phases to report 199946d17952SDoug Anderson * failure. 200046d17952SDoug Anderson * 200146d17952SDoug Anderson * In such case controller will move into a data 200246d17952SDoug Anderson * transfer state after a response error or 200346d17952SDoug Anderson * response CRC error. Let's let that finish 200446d17952SDoug Anderson * before trying to send a stop, so we'll go to 200546d17952SDoug Anderson * STATE_SENDING_DATA. 200646d17952SDoug Anderson * 200746d17952SDoug Anderson * Although letting the data transfer take place 200846d17952SDoug Anderson * will waste a bit of time (we already know 200946d17952SDoug Anderson * the command was bad), it can't cause any 201046d17952SDoug Anderson * errors since it's possible it would have 201146d17952SDoug Anderson * taken place anyway if this tasklet got 201246d17952SDoug Anderson * delayed. Allowing the transfer to take place 201346d17952SDoug Anderson * avoids races and keeps things simple. 201446d17952SDoug Anderson */ 2015ba2d139bSDouglas Anderson if (err != -ETIMEDOUT) { 201646d17952SDoug Anderson state = STATE_SENDING_DATA; 201746d17952SDoug Anderson continue; 201846d17952SDoug Anderson } 201946d17952SDoug Anderson 202071abb133SSeungwon Jeon dw_mci_stop_dma(host); 202190c2143aSSeungwon Jeon send_stop_abort(host, data); 202271abb133SSeungwon Jeon state = STATE_SENDING_STOP; 202371abb133SSeungwon Jeon break; 202471abb133SSeungwon Jeon } 202571abb133SSeungwon Jeon 2026e352c813SSeungwon Jeon if (!cmd->data || err) { 2027e352c813SSeungwon Jeon dw_mci_request_end(host, mrq); 2028f95f3850SWill Newton goto unlock; 2029f95f3850SWill Newton } 2030f95f3850SWill Newton 2031f95f3850SWill Newton prev_state = state = STATE_SENDING_DATA; 2032f95f3850SWill Newton /* fall through */ 2033f95f3850SWill Newton 2034f95f3850SWill Newton case STATE_SENDING_DATA: 20352aa35465SDoug Anderson /* 20362aa35465SDoug Anderson * We could get a data error and never a transfer 20372aa35465SDoug Anderson * complete so we'd better check for it here. 20382aa35465SDoug Anderson * 20392aa35465SDoug Anderson * Note that we don't really care if we also got a 20402aa35465SDoug Anderson * transfer complete; stopping the DMA and sending an 20412aa35465SDoug Anderson * abort won't hurt. 20422aa35465SDoug Anderson */ 2043f95f3850SWill Newton if (test_and_clear_bit(EVENT_DATA_ERROR, 2044f95f3850SWill Newton &host->pending_events)) { 2045f95f3850SWill Newton dw_mci_stop_dma(host); 2046e13c3c08SJaehoon Chung if (!(host->data_status & (SDMMC_INT_DRTO | 2047bdb9a90bSaddy ke SDMMC_INT_EBE))) 204890c2143aSSeungwon Jeon send_stop_abort(host, data); 2049f95f3850SWill Newton state = STATE_DATA_ERROR; 2050f95f3850SWill Newton break; 2051f95f3850SWill Newton } 2052f95f3850SWill Newton 2053f95f3850SWill Newton if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 205457e10486SAddy Ke &host->pending_events)) { 205557e10486SAddy Ke /* 205657e10486SAddy Ke * If all data-related interrupts don't come 205757e10486SAddy Ke * within the given time in reading data state. 205857e10486SAddy Ke */ 205916a34574SJaehoon Chung if (host->dir_status == DW_MCI_RECV_STATUS) 206057e10486SAddy Ke dw_mci_set_drto(host); 2061f95f3850SWill Newton break; 206257e10486SAddy Ke } 2063f95f3850SWill Newton 2064f95f3850SWill Newton set_bit(EVENT_XFER_COMPLETE, &host->completed_events); 20652aa35465SDoug Anderson 20662aa35465SDoug Anderson /* 20672aa35465SDoug Anderson * Handle an EVENT_DATA_ERROR that might have shown up 20682aa35465SDoug Anderson * before the transfer completed. This might not have 20692aa35465SDoug Anderson * been caught by the check above because the interrupt 20702aa35465SDoug Anderson * could have gone off between the previous check and 20712aa35465SDoug Anderson * the check for transfer complete. 20722aa35465SDoug Anderson * 20732aa35465SDoug Anderson * Technically this ought not be needed assuming we 20742aa35465SDoug Anderson * get a DATA_COMPLETE eventually (we'll notice the 20752aa35465SDoug Anderson * error and end the request), but it shouldn't hurt. 20762aa35465SDoug Anderson * 20772aa35465SDoug Anderson * This has the advantage of sending the stop command. 20782aa35465SDoug Anderson */ 20792aa35465SDoug Anderson if (test_and_clear_bit(EVENT_DATA_ERROR, 20802aa35465SDoug Anderson &host->pending_events)) { 20812aa35465SDoug Anderson dw_mci_stop_dma(host); 2082e13c3c08SJaehoon Chung if (!(host->data_status & (SDMMC_INT_DRTO | 2083bdb9a90bSaddy ke SDMMC_INT_EBE))) 20842aa35465SDoug Anderson send_stop_abort(host, data); 20852aa35465SDoug Anderson state = STATE_DATA_ERROR; 20862aa35465SDoug Anderson break; 20872aa35465SDoug Anderson } 2088f95f3850SWill Newton prev_state = state = STATE_DATA_BUSY; 20892aa35465SDoug Anderson 2090f95f3850SWill Newton /* fall through */ 2091f95f3850SWill Newton 2092f95f3850SWill Newton case STATE_DATA_BUSY: 209393c23ae3SDouglas Anderson if (!dw_mci_clear_pending_data_complete(host)) { 209457e10486SAddy Ke /* 209557e10486SAddy Ke * If data error interrupt comes but data over 209657e10486SAddy Ke * interrupt doesn't come within the given time. 209757e10486SAddy Ke * in reading data state. 209857e10486SAddy Ke */ 209916a34574SJaehoon Chung if (host->dir_status == DW_MCI_RECV_STATUS) 210057e10486SAddy Ke dw_mci_set_drto(host); 2101f95f3850SWill Newton break; 210257e10486SAddy Ke } 2103f95f3850SWill Newton 2104f95f3850SWill Newton host->data = NULL; 2105f95f3850SWill Newton set_bit(EVENT_DATA_COMPLETE, &host->completed_events); 2106e352c813SSeungwon Jeon err = dw_mci_data_complete(host, data); 2107f95f3850SWill Newton 2108e352c813SSeungwon Jeon if (!err) { 2109e352c813SSeungwon Jeon if (!data->stop || mrq->sbc) { 211017c8bc85SSachin Kamat if (mrq->sbc && data->stop) 2111053b3ce6SSeungwon Jeon data->stop->error = 0; 2112e352c813SSeungwon Jeon dw_mci_request_end(host, mrq); 2113053b3ce6SSeungwon Jeon goto unlock; 2114053b3ce6SSeungwon Jeon } 2115053b3ce6SSeungwon Jeon 211690c2143aSSeungwon Jeon /* stop command for open-ended transfer*/ 2117e352c813SSeungwon Jeon if (data->stop) 211890c2143aSSeungwon Jeon send_stop_abort(host, data); 21192aa35465SDoug Anderson } else { 21202aa35465SDoug Anderson /* 21212aa35465SDoug Anderson * If we don't have a command complete now we'll 21222aa35465SDoug Anderson * never get one since we just reset everything; 21232aa35465SDoug Anderson * better end the request. 21242aa35465SDoug Anderson * 21252aa35465SDoug Anderson * If we do have a command complete we'll fall 21262aa35465SDoug Anderson * through to the SENDING_STOP command and 21272aa35465SDoug Anderson * everything will be peachy keen. 21282aa35465SDoug Anderson */ 21292aa35465SDoug Anderson if (!test_bit(EVENT_CMD_COMPLETE, 21302aa35465SDoug Anderson &host->pending_events)) { 21312aa35465SDoug Anderson host->cmd = NULL; 21322aa35465SDoug Anderson dw_mci_request_end(host, mrq); 21332aa35465SDoug Anderson goto unlock; 21342aa35465SDoug Anderson } 213590c2143aSSeungwon Jeon } 2136e352c813SSeungwon Jeon 2137e352c813SSeungwon Jeon /* 2138e352c813SSeungwon Jeon * If err has non-zero, 2139e352c813SSeungwon Jeon * stop-abort command has been already issued. 2140e352c813SSeungwon Jeon */ 2141e352c813SSeungwon Jeon prev_state = state = STATE_SENDING_STOP; 2142e352c813SSeungwon Jeon 2143f95f3850SWill Newton /* fall through */ 2144f95f3850SWill Newton 2145f95f3850SWill Newton case STATE_SENDING_STOP: 21468892b705SDouglas Anderson if (!dw_mci_clear_pending_cmd_complete(host)) 2147f95f3850SWill Newton break; 2148f95f3850SWill Newton 214971abb133SSeungwon Jeon /* CMD error in data command */ 215031bff450SSeungwon Jeon if (mrq->cmd->error && mrq->data) 21513a33a94cSSonny Rao dw_mci_reset(host); 215271abb133SSeungwon Jeon 2153f95f3850SWill Newton host->cmd = NULL; 215471abb133SSeungwon Jeon host->data = NULL; 215590c2143aSSeungwon Jeon 2156e13c3c08SJaehoon Chung if (!mrq->sbc && mrq->stop) 2157e352c813SSeungwon Jeon dw_mci_command_complete(host, mrq->stop); 215890c2143aSSeungwon Jeon else 215990c2143aSSeungwon Jeon host->cmd_status = 0; 216090c2143aSSeungwon Jeon 2161e352c813SSeungwon Jeon dw_mci_request_end(host, mrq); 2162f95f3850SWill Newton goto unlock; 2163f95f3850SWill Newton 2164f95f3850SWill Newton case STATE_DATA_ERROR: 2165f95f3850SWill Newton if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 2166f95f3850SWill Newton &host->pending_events)) 2167f95f3850SWill Newton break; 2168f95f3850SWill Newton 2169f95f3850SWill Newton state = STATE_DATA_BUSY; 2170f95f3850SWill Newton break; 2171f95f3850SWill Newton } 2172f95f3850SWill Newton } while (state != prev_state); 2173f95f3850SWill Newton 2174f95f3850SWill Newton host->state = state; 2175f95f3850SWill Newton unlock: 2176f95f3850SWill Newton spin_unlock(&host->lock); 2177f95f3850SWill Newton 2178f95f3850SWill Newton } 2179f95f3850SWill Newton 218034b664a2SJames Hogan /* push final bytes to part_buf, only use during push */ 218134b664a2SJames Hogan static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) 218234b664a2SJames Hogan { 218334b664a2SJames Hogan memcpy((void *)&host->part_buf, buf, cnt); 218434b664a2SJames Hogan host->part_buf_count = cnt; 218534b664a2SJames Hogan } 218634b664a2SJames Hogan 218734b664a2SJames Hogan /* append bytes to part_buf, only use during push */ 218834b664a2SJames Hogan static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) 218934b664a2SJames Hogan { 219034b664a2SJames Hogan cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); 219134b664a2SJames Hogan memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); 219234b664a2SJames Hogan host->part_buf_count += cnt; 219334b664a2SJames Hogan return cnt; 219434b664a2SJames Hogan } 219534b664a2SJames Hogan 219634b664a2SJames Hogan /* pull first bytes from part_buf, only use during pull */ 219734b664a2SJames Hogan static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) 219834b664a2SJames Hogan { 21990e3a22c0SShawn Lin cnt = min_t(int, cnt, host->part_buf_count); 220034b664a2SJames Hogan if (cnt) { 220134b664a2SJames Hogan memcpy(buf, (void *)&host->part_buf + host->part_buf_start, 220234b664a2SJames Hogan cnt); 220334b664a2SJames Hogan host->part_buf_count -= cnt; 220434b664a2SJames Hogan host->part_buf_start += cnt; 220534b664a2SJames Hogan } 220634b664a2SJames Hogan return cnt; 220734b664a2SJames Hogan } 220834b664a2SJames Hogan 220934b664a2SJames Hogan /* pull final bytes from the part_buf, assuming it's just been filled */ 221034b664a2SJames Hogan static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) 221134b664a2SJames Hogan { 221234b664a2SJames Hogan memcpy(buf, &host->part_buf, cnt); 221334b664a2SJames Hogan host->part_buf_start = cnt; 221434b664a2SJames Hogan host->part_buf_count = (1 << host->data_shift) - cnt; 221534b664a2SJames Hogan } 221634b664a2SJames Hogan 2217f95f3850SWill Newton static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) 2218f95f3850SWill Newton { 2219cfbeb59cSMarkos Chandras struct mmc_data *data = host->data; 2220cfbeb59cSMarkos Chandras int init_cnt = cnt; 2221cfbeb59cSMarkos Chandras 222234b664a2SJames Hogan /* try and push anything in the part_buf */ 222334b664a2SJames Hogan if (unlikely(host->part_buf_count)) { 222434b664a2SJames Hogan int len = dw_mci_push_part_bytes(host, buf, cnt); 22250e3a22c0SShawn Lin 222634b664a2SJames Hogan buf += len; 222734b664a2SJames Hogan cnt -= len; 2228cfbeb59cSMarkos Chandras if (host->part_buf_count == 2) { 222976184ac1SBen Dooks mci_fifo_writew(host->fifo_reg, host->part_buf16); 223034b664a2SJames Hogan host->part_buf_count = 0; 223134b664a2SJames Hogan } 223234b664a2SJames Hogan } 223334b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 223434b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x1)) { 223534b664a2SJames Hogan while (cnt >= 2) { 223634b664a2SJames Hogan u16 aligned_buf[64]; 223734b664a2SJames Hogan int len = min(cnt & -2, (int)sizeof(aligned_buf)); 223834b664a2SJames Hogan int items = len >> 1; 223934b664a2SJames Hogan int i; 224034b664a2SJames Hogan /* memcpy from input buffer into aligned buffer */ 224134b664a2SJames Hogan memcpy(aligned_buf, buf, len); 224234b664a2SJames Hogan buf += len; 224334b664a2SJames Hogan cnt -= len; 224434b664a2SJames Hogan /* push data from aligned buffer into fifo */ 224534b664a2SJames Hogan for (i = 0; i < items; ++i) 224676184ac1SBen Dooks mci_fifo_writew(host->fifo_reg, aligned_buf[i]); 224734b664a2SJames Hogan } 224834b664a2SJames Hogan } else 224934b664a2SJames Hogan #endif 225034b664a2SJames Hogan { 225134b664a2SJames Hogan u16 *pdata = buf; 22520e3a22c0SShawn Lin 225334b664a2SJames Hogan for (; cnt >= 2; cnt -= 2) 225476184ac1SBen Dooks mci_fifo_writew(host->fifo_reg, *pdata++); 225534b664a2SJames Hogan buf = pdata; 225634b664a2SJames Hogan } 225734b664a2SJames Hogan /* put anything remaining in the part_buf */ 225834b664a2SJames Hogan if (cnt) { 225934b664a2SJames Hogan dw_mci_set_part_bytes(host, buf, cnt); 2260cfbeb59cSMarkos Chandras /* Push data if we have reached the expected data length */ 2261cfbeb59cSMarkos Chandras if ((data->bytes_xfered + init_cnt) == 2262cfbeb59cSMarkos Chandras (data->blksz * data->blocks)) 226376184ac1SBen Dooks mci_fifo_writew(host->fifo_reg, host->part_buf16); 2264f95f3850SWill Newton } 2265f95f3850SWill Newton } 2266f95f3850SWill Newton 2267f95f3850SWill Newton static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) 2268f95f3850SWill Newton { 226934b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 227034b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x1)) { 227134b664a2SJames Hogan while (cnt >= 2) { 227234b664a2SJames Hogan /* pull data from fifo into aligned buffer */ 227334b664a2SJames Hogan u16 aligned_buf[64]; 227434b664a2SJames Hogan int len = min(cnt & -2, (int)sizeof(aligned_buf)); 227534b664a2SJames Hogan int items = len >> 1; 227634b664a2SJames Hogan int i; 22770e3a22c0SShawn Lin 227834b664a2SJames Hogan for (i = 0; i < items; ++i) 227976184ac1SBen Dooks aligned_buf[i] = mci_fifo_readw(host->fifo_reg); 228034b664a2SJames Hogan /* memcpy from aligned buffer into output buffer */ 228134b664a2SJames Hogan memcpy(buf, aligned_buf, len); 228234b664a2SJames Hogan buf += len; 228334b664a2SJames Hogan cnt -= len; 228434b664a2SJames Hogan } 228534b664a2SJames Hogan } else 228634b664a2SJames Hogan #endif 228734b664a2SJames Hogan { 228834b664a2SJames Hogan u16 *pdata = buf; 22890e3a22c0SShawn Lin 229034b664a2SJames Hogan for (; cnt >= 2; cnt -= 2) 229176184ac1SBen Dooks *pdata++ = mci_fifo_readw(host->fifo_reg); 229234b664a2SJames Hogan buf = pdata; 229334b664a2SJames Hogan } 229434b664a2SJames Hogan if (cnt) { 229576184ac1SBen Dooks host->part_buf16 = mci_fifo_readw(host->fifo_reg); 229634b664a2SJames Hogan dw_mci_pull_final_bytes(host, buf, cnt); 2297f95f3850SWill Newton } 2298f95f3850SWill Newton } 2299f95f3850SWill Newton 2300f95f3850SWill Newton static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) 2301f95f3850SWill Newton { 2302cfbeb59cSMarkos Chandras struct mmc_data *data = host->data; 2303cfbeb59cSMarkos Chandras int init_cnt = cnt; 2304cfbeb59cSMarkos Chandras 230534b664a2SJames Hogan /* try and push anything in the part_buf */ 230634b664a2SJames Hogan if (unlikely(host->part_buf_count)) { 230734b664a2SJames Hogan int len = dw_mci_push_part_bytes(host, buf, cnt); 23080e3a22c0SShawn Lin 230934b664a2SJames Hogan buf += len; 231034b664a2SJames Hogan cnt -= len; 2311cfbeb59cSMarkos Chandras if (host->part_buf_count == 4) { 231276184ac1SBen Dooks mci_fifo_writel(host->fifo_reg, host->part_buf32); 231334b664a2SJames Hogan host->part_buf_count = 0; 231434b664a2SJames Hogan } 231534b664a2SJames Hogan } 231634b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 231734b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x3)) { 231834b664a2SJames Hogan while (cnt >= 4) { 231934b664a2SJames Hogan u32 aligned_buf[32]; 232034b664a2SJames Hogan int len = min(cnt & -4, (int)sizeof(aligned_buf)); 232134b664a2SJames Hogan int items = len >> 2; 232234b664a2SJames Hogan int i; 232334b664a2SJames Hogan /* memcpy from input buffer into aligned buffer */ 232434b664a2SJames Hogan memcpy(aligned_buf, buf, len); 232534b664a2SJames Hogan buf += len; 232634b664a2SJames Hogan cnt -= len; 232734b664a2SJames Hogan /* push data from aligned buffer into fifo */ 232834b664a2SJames Hogan for (i = 0; i < items; ++i) 232976184ac1SBen Dooks mci_fifo_writel(host->fifo_reg, aligned_buf[i]); 233034b664a2SJames Hogan } 233134b664a2SJames Hogan } else 233234b664a2SJames Hogan #endif 233334b664a2SJames Hogan { 233434b664a2SJames Hogan u32 *pdata = buf; 23350e3a22c0SShawn Lin 233634b664a2SJames Hogan for (; cnt >= 4; cnt -= 4) 233776184ac1SBen Dooks mci_fifo_writel(host->fifo_reg, *pdata++); 233834b664a2SJames Hogan buf = pdata; 233934b664a2SJames Hogan } 234034b664a2SJames Hogan /* put anything remaining in the part_buf */ 234134b664a2SJames Hogan if (cnt) { 234234b664a2SJames Hogan dw_mci_set_part_bytes(host, buf, cnt); 2343cfbeb59cSMarkos Chandras /* Push data if we have reached the expected data length */ 2344cfbeb59cSMarkos Chandras if ((data->bytes_xfered + init_cnt) == 2345cfbeb59cSMarkos Chandras (data->blksz * data->blocks)) 234676184ac1SBen Dooks mci_fifo_writel(host->fifo_reg, host->part_buf32); 2347f95f3850SWill Newton } 2348f95f3850SWill Newton } 2349f95f3850SWill Newton 2350f95f3850SWill Newton static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) 2351f95f3850SWill Newton { 235234b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 235334b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x3)) { 235434b664a2SJames Hogan while (cnt >= 4) { 235534b664a2SJames Hogan /* pull data from fifo into aligned buffer */ 235634b664a2SJames Hogan u32 aligned_buf[32]; 235734b664a2SJames Hogan int len = min(cnt & -4, (int)sizeof(aligned_buf)); 235834b664a2SJames Hogan int items = len >> 2; 235934b664a2SJames Hogan int i; 23600e3a22c0SShawn Lin 236134b664a2SJames Hogan for (i = 0; i < items; ++i) 236276184ac1SBen Dooks aligned_buf[i] = mci_fifo_readl(host->fifo_reg); 236334b664a2SJames Hogan /* memcpy from aligned buffer into output buffer */ 236434b664a2SJames Hogan memcpy(buf, aligned_buf, len); 236534b664a2SJames Hogan buf += len; 236634b664a2SJames Hogan cnt -= len; 236734b664a2SJames Hogan } 236834b664a2SJames Hogan } else 236934b664a2SJames Hogan #endif 237034b664a2SJames Hogan { 237134b664a2SJames Hogan u32 *pdata = buf; 23720e3a22c0SShawn Lin 237334b664a2SJames Hogan for (; cnt >= 4; cnt -= 4) 237476184ac1SBen Dooks *pdata++ = mci_fifo_readl(host->fifo_reg); 237534b664a2SJames Hogan buf = pdata; 237634b664a2SJames Hogan } 237734b664a2SJames Hogan if (cnt) { 237876184ac1SBen Dooks host->part_buf32 = mci_fifo_readl(host->fifo_reg); 237934b664a2SJames Hogan dw_mci_pull_final_bytes(host, buf, cnt); 2380f95f3850SWill Newton } 2381f95f3850SWill Newton } 2382f95f3850SWill Newton 2383f95f3850SWill Newton static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) 2384f95f3850SWill Newton { 2385cfbeb59cSMarkos Chandras struct mmc_data *data = host->data; 2386cfbeb59cSMarkos Chandras int init_cnt = cnt; 2387cfbeb59cSMarkos Chandras 238834b664a2SJames Hogan /* try and push anything in the part_buf */ 238934b664a2SJames Hogan if (unlikely(host->part_buf_count)) { 239034b664a2SJames Hogan int len = dw_mci_push_part_bytes(host, buf, cnt); 23910e3a22c0SShawn Lin 239234b664a2SJames Hogan buf += len; 239334b664a2SJames Hogan cnt -= len; 2394c09fbd74SSeungwon Jeon 2395cfbeb59cSMarkos Chandras if (host->part_buf_count == 8) { 239676184ac1SBen Dooks mci_fifo_writeq(host->fifo_reg, host->part_buf); 239734b664a2SJames Hogan host->part_buf_count = 0; 239834b664a2SJames Hogan } 239934b664a2SJames Hogan } 240034b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 240134b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x7)) { 240234b664a2SJames Hogan while (cnt >= 8) { 240334b664a2SJames Hogan u64 aligned_buf[16]; 240434b664a2SJames Hogan int len = min(cnt & -8, (int)sizeof(aligned_buf)); 240534b664a2SJames Hogan int items = len >> 3; 240634b664a2SJames Hogan int i; 240734b664a2SJames Hogan /* memcpy from input buffer into aligned buffer */ 240834b664a2SJames Hogan memcpy(aligned_buf, buf, len); 240934b664a2SJames Hogan buf += len; 241034b664a2SJames Hogan cnt -= len; 241134b664a2SJames Hogan /* push data from aligned buffer into fifo */ 241234b664a2SJames Hogan for (i = 0; i < items; ++i) 241376184ac1SBen Dooks mci_fifo_writeq(host->fifo_reg, aligned_buf[i]); 241434b664a2SJames Hogan } 241534b664a2SJames Hogan } else 241634b664a2SJames Hogan #endif 241734b664a2SJames Hogan { 241834b664a2SJames Hogan u64 *pdata = buf; 24190e3a22c0SShawn Lin 242034b664a2SJames Hogan for (; cnt >= 8; cnt -= 8) 242176184ac1SBen Dooks mci_fifo_writeq(host->fifo_reg, *pdata++); 242234b664a2SJames Hogan buf = pdata; 242334b664a2SJames Hogan } 242434b664a2SJames Hogan /* put anything remaining in the part_buf */ 242534b664a2SJames Hogan if (cnt) { 242634b664a2SJames Hogan dw_mci_set_part_bytes(host, buf, cnt); 2427cfbeb59cSMarkos Chandras /* Push data if we have reached the expected data length */ 2428cfbeb59cSMarkos Chandras if ((data->bytes_xfered + init_cnt) == 2429cfbeb59cSMarkos Chandras (data->blksz * data->blocks)) 243076184ac1SBen Dooks mci_fifo_writeq(host->fifo_reg, host->part_buf); 2431f95f3850SWill Newton } 2432f95f3850SWill Newton } 2433f95f3850SWill Newton 2434f95f3850SWill Newton static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) 2435f95f3850SWill Newton { 243634b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 243734b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x7)) { 243834b664a2SJames Hogan while (cnt >= 8) { 243934b664a2SJames Hogan /* pull data from fifo into aligned buffer */ 244034b664a2SJames Hogan u64 aligned_buf[16]; 244134b664a2SJames Hogan int len = min(cnt & -8, (int)sizeof(aligned_buf)); 244234b664a2SJames Hogan int items = len >> 3; 244334b664a2SJames Hogan int i; 24440e3a22c0SShawn Lin 244534b664a2SJames Hogan for (i = 0; i < items; ++i) 244676184ac1SBen Dooks aligned_buf[i] = mci_fifo_readq(host->fifo_reg); 244776184ac1SBen Dooks 244834b664a2SJames Hogan /* memcpy from aligned buffer into output buffer */ 244934b664a2SJames Hogan memcpy(buf, aligned_buf, len); 245034b664a2SJames Hogan buf += len; 245134b664a2SJames Hogan cnt -= len; 2452f95f3850SWill Newton } 245334b664a2SJames Hogan } else 245434b664a2SJames Hogan #endif 245534b664a2SJames Hogan { 245634b664a2SJames Hogan u64 *pdata = buf; 24570e3a22c0SShawn Lin 245834b664a2SJames Hogan for (; cnt >= 8; cnt -= 8) 245976184ac1SBen Dooks *pdata++ = mci_fifo_readq(host->fifo_reg); 246034b664a2SJames Hogan buf = pdata; 246134b664a2SJames Hogan } 246234b664a2SJames Hogan if (cnt) { 246376184ac1SBen Dooks host->part_buf = mci_fifo_readq(host->fifo_reg); 246434b664a2SJames Hogan dw_mci_pull_final_bytes(host, buf, cnt); 246534b664a2SJames Hogan } 246634b664a2SJames Hogan } 246734b664a2SJames Hogan 246834b664a2SJames Hogan static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) 246934b664a2SJames Hogan { 247034b664a2SJames Hogan int len; 247134b664a2SJames Hogan 247234b664a2SJames Hogan /* get remaining partial bytes */ 247334b664a2SJames Hogan len = dw_mci_pull_part_bytes(host, buf, cnt); 247434b664a2SJames Hogan if (unlikely(len == cnt)) 247534b664a2SJames Hogan return; 247634b664a2SJames Hogan buf += len; 247734b664a2SJames Hogan cnt -= len; 247834b664a2SJames Hogan 247934b664a2SJames Hogan /* get the rest of the data */ 248034b664a2SJames Hogan host->pull_data(host, buf, cnt); 2481f95f3850SWill Newton } 2482f95f3850SWill Newton 248387a74d39SKyoungil Kim static void dw_mci_read_data_pio(struct dw_mci *host, bool dto) 2484f95f3850SWill Newton { 2485f9c2a0dcSSeungwon Jeon struct sg_mapping_iter *sg_miter = &host->sg_miter; 2486f9c2a0dcSSeungwon Jeon void *buf; 2487f9c2a0dcSSeungwon Jeon unsigned int offset; 2488f95f3850SWill Newton struct mmc_data *data = host->data; 2489f95f3850SWill Newton int shift = host->data_shift; 2490f95f3850SWill Newton u32 status; 24913e4b0d8bSMarkos Chandras unsigned int len; 2492f9c2a0dcSSeungwon Jeon unsigned int remain, fcnt; 2493f95f3850SWill Newton 2494f95f3850SWill Newton do { 2495f9c2a0dcSSeungwon Jeon if (!sg_miter_next(sg_miter)) 2496f9c2a0dcSSeungwon Jeon goto done; 2497f95f3850SWill Newton 24984225fc85SImre Deak host->sg = sg_miter->piter.sg; 2499f9c2a0dcSSeungwon Jeon buf = sg_miter->addr; 2500f9c2a0dcSSeungwon Jeon remain = sg_miter->length; 2501f9c2a0dcSSeungwon Jeon offset = 0; 2502f9c2a0dcSSeungwon Jeon 2503f9c2a0dcSSeungwon Jeon do { 2504f9c2a0dcSSeungwon Jeon fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) 2505f9c2a0dcSSeungwon Jeon << shift) + host->part_buf_count; 2506f9c2a0dcSSeungwon Jeon len = min(remain, fcnt); 2507f9c2a0dcSSeungwon Jeon if (!len) 2508f9c2a0dcSSeungwon Jeon break; 2509f9c2a0dcSSeungwon Jeon dw_mci_pull_data(host, (void *)(buf + offset), len); 25103e4b0d8bSMarkos Chandras data->bytes_xfered += len; 2511f95f3850SWill Newton offset += len; 2512f9c2a0dcSSeungwon Jeon remain -= len; 2513f9c2a0dcSSeungwon Jeon } while (remain); 2514f95f3850SWill Newton 2515e74f3a9cSSeungwon Jeon sg_miter->consumed = offset; 2516f95f3850SWill Newton status = mci_readl(host, MINTSTS); 2517f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 251887a74d39SKyoungil Kim /* if the RXDR is ready read again */ 251987a74d39SKyoungil Kim } while ((status & SDMMC_INT_RXDR) || 252087a74d39SKyoungil Kim (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS)))); 2521f9c2a0dcSSeungwon Jeon 2522f9c2a0dcSSeungwon Jeon if (!remain) { 2523f9c2a0dcSSeungwon Jeon if (!sg_miter_next(sg_miter)) 2524f9c2a0dcSSeungwon Jeon goto done; 2525f9c2a0dcSSeungwon Jeon sg_miter->consumed = 0; 2526f9c2a0dcSSeungwon Jeon } 2527f9c2a0dcSSeungwon Jeon sg_miter_stop(sg_miter); 2528f95f3850SWill Newton return; 2529f95f3850SWill Newton 2530f95f3850SWill Newton done: 2531f9c2a0dcSSeungwon Jeon sg_miter_stop(sg_miter); 2532f9c2a0dcSSeungwon Jeon host->sg = NULL; 25330e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2534f95f3850SWill Newton set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2535f95f3850SWill Newton } 2536f95f3850SWill Newton 2537f95f3850SWill Newton static void dw_mci_write_data_pio(struct dw_mci *host) 2538f95f3850SWill Newton { 2539f9c2a0dcSSeungwon Jeon struct sg_mapping_iter *sg_miter = &host->sg_miter; 2540f9c2a0dcSSeungwon Jeon void *buf; 2541f9c2a0dcSSeungwon Jeon unsigned int offset; 2542f95f3850SWill Newton struct mmc_data *data = host->data; 2543f95f3850SWill Newton int shift = host->data_shift; 2544f95f3850SWill Newton u32 status; 25453e4b0d8bSMarkos Chandras unsigned int len; 2546f9c2a0dcSSeungwon Jeon unsigned int fifo_depth = host->fifo_depth; 2547f9c2a0dcSSeungwon Jeon unsigned int remain, fcnt; 2548f95f3850SWill Newton 2549f95f3850SWill Newton do { 2550f9c2a0dcSSeungwon Jeon if (!sg_miter_next(sg_miter)) 2551f9c2a0dcSSeungwon Jeon goto done; 2552f95f3850SWill Newton 25534225fc85SImre Deak host->sg = sg_miter->piter.sg; 2554f9c2a0dcSSeungwon Jeon buf = sg_miter->addr; 2555f9c2a0dcSSeungwon Jeon remain = sg_miter->length; 2556f9c2a0dcSSeungwon Jeon offset = 0; 2557f9c2a0dcSSeungwon Jeon 2558f9c2a0dcSSeungwon Jeon do { 2559f9c2a0dcSSeungwon Jeon fcnt = ((fifo_depth - 2560f9c2a0dcSSeungwon Jeon SDMMC_GET_FCNT(mci_readl(host, STATUS))) 2561f9c2a0dcSSeungwon Jeon << shift) - host->part_buf_count; 2562f9c2a0dcSSeungwon Jeon len = min(remain, fcnt); 2563f9c2a0dcSSeungwon Jeon if (!len) 2564f9c2a0dcSSeungwon Jeon break; 2565f9c2a0dcSSeungwon Jeon host->push_data(host, (void *)(buf + offset), len); 25663e4b0d8bSMarkos Chandras data->bytes_xfered += len; 2567f95f3850SWill Newton offset += len; 2568f9c2a0dcSSeungwon Jeon remain -= len; 2569f9c2a0dcSSeungwon Jeon } while (remain); 2570f95f3850SWill Newton 2571e74f3a9cSSeungwon Jeon sg_miter->consumed = offset; 2572f95f3850SWill Newton status = mci_readl(host, MINTSTS); 2573f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2574f95f3850SWill Newton } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 2575f9c2a0dcSSeungwon Jeon 2576f9c2a0dcSSeungwon Jeon if (!remain) { 2577f9c2a0dcSSeungwon Jeon if (!sg_miter_next(sg_miter)) 2578f9c2a0dcSSeungwon Jeon goto done; 2579f9c2a0dcSSeungwon Jeon sg_miter->consumed = 0; 2580f9c2a0dcSSeungwon Jeon } 2581f9c2a0dcSSeungwon Jeon sg_miter_stop(sg_miter); 2582f95f3850SWill Newton return; 2583f95f3850SWill Newton 2584f95f3850SWill Newton done: 2585f9c2a0dcSSeungwon Jeon sg_miter_stop(sg_miter); 2586f9c2a0dcSSeungwon Jeon host->sg = NULL; 25870e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2588f95f3850SWill Newton set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2589f95f3850SWill Newton } 2590f95f3850SWill Newton 2591f95f3850SWill Newton static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) 2592f95f3850SWill Newton { 25930363b12dSDouglas Anderson del_timer(&host->cto_timer); 25940363b12dSDouglas Anderson 2595f95f3850SWill Newton if (!host->cmd_status) 2596f95f3850SWill Newton host->cmd_status = status; 2597f95f3850SWill Newton 25980e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2599f95f3850SWill Newton 2600f95f3850SWill Newton set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2601f95f3850SWill Newton tasklet_schedule(&host->tasklet); 2602f95f3850SWill Newton } 2603f95f3850SWill Newton 26046130e7a9SDoug Anderson static void dw_mci_handle_cd(struct dw_mci *host) 26056130e7a9SDoug Anderson { 2606b23475faSJaehoon Chung struct dw_mci_slot *slot = host->slot; 26076130e7a9SDoug Anderson 26086130e7a9SDoug Anderson if (slot->mmc->ops->card_event) 26096130e7a9SDoug Anderson slot->mmc->ops->card_event(slot->mmc); 26106130e7a9SDoug Anderson mmc_detect_change(slot->mmc, 26116130e7a9SDoug Anderson msecs_to_jiffies(host->pdata->detect_delay_ms)); 26126130e7a9SDoug Anderson } 26136130e7a9SDoug Anderson 2614f95f3850SWill Newton static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) 2615f95f3850SWill Newton { 2616f95f3850SWill Newton struct dw_mci *host = dev_id; 2617182c9081SSeungwon Jeon u32 pending; 2618b23475faSJaehoon Chung struct dw_mci_slot *slot = host->slot; 26198892b705SDouglas Anderson unsigned long irqflags; 2620f95f3850SWill Newton 2621f95f3850SWill Newton pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 2622f95f3850SWill Newton 2623476d79f1SDoug Anderson if (pending) { 262401730558SDoug Anderson /* Check volt switch first, since it can look like an error */ 262501730558SDoug Anderson if ((host->state == STATE_SENDING_CMD11) && 262601730558SDoug Anderson (pending & SDMMC_INT_VOLT_SWITCH)) { 262701730558SDoug Anderson mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH); 262801730558SDoug Anderson pending &= ~SDMMC_INT_VOLT_SWITCH; 262949ba0302SDoug Anderson 263049ba0302SDoug Anderson /* 263149ba0302SDoug Anderson * Hold the lock; we know cmd11_timer can't be kicked 263249ba0302SDoug Anderson * off after the lock is released, so safe to delete. 263349ba0302SDoug Anderson */ 263449ba0302SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 263501730558SDoug Anderson dw_mci_cmd_interrupt(host, pending); 263649ba0302SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 263749ba0302SDoug Anderson 263849ba0302SDoug Anderson del_timer(&host->cmd11_timer); 263901730558SDoug Anderson } 264001730558SDoug Anderson 2641f95f3850SWill Newton if (pending & DW_MCI_CMD_ERROR_FLAGS) { 26428892b705SDouglas Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 26438892b705SDouglas Anderson 264403de1921SAddy Ke del_timer(&host->cto_timer); 2645f95f3850SWill Newton mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 2646182c9081SSeungwon Jeon host->cmd_status = pending; 26470e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2648f95f3850SWill Newton set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 26498892b705SDouglas Anderson 26508892b705SDouglas Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 2651f95f3850SWill Newton } 2652f95f3850SWill Newton 2653f95f3850SWill Newton if (pending & DW_MCI_DATA_ERROR_FLAGS) { 2654f95f3850SWill Newton /* if there is an error report DATA_ERROR */ 2655f95f3850SWill Newton mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); 2656182c9081SSeungwon Jeon host->data_status = pending; 26570e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2658f95f3850SWill Newton set_bit(EVENT_DATA_ERROR, &host->pending_events); 2659f95f3850SWill Newton tasklet_schedule(&host->tasklet); 2660f95f3850SWill Newton } 2661f95f3850SWill Newton 2662f95f3850SWill Newton if (pending & SDMMC_INT_DATA_OVER) { 266393c23ae3SDouglas Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 266493c23ae3SDouglas Anderson 266557e10486SAddy Ke del_timer(&host->dto_timer); 266657e10486SAddy Ke 2667f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 2668f95f3850SWill Newton if (!host->data_status) 2669182c9081SSeungwon Jeon host->data_status = pending; 26700e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2671f95f3850SWill Newton if (host->dir_status == DW_MCI_RECV_STATUS) { 2672f95f3850SWill Newton if (host->sg != NULL) 267387a74d39SKyoungil Kim dw_mci_read_data_pio(host, true); 2674f95f3850SWill Newton } 2675f95f3850SWill Newton set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2676f95f3850SWill Newton tasklet_schedule(&host->tasklet); 267793c23ae3SDouglas Anderson 267893c23ae3SDouglas Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 2679f95f3850SWill Newton } 2680f95f3850SWill Newton 2681f95f3850SWill Newton if (pending & SDMMC_INT_RXDR) { 2682f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 2683b40af3aaSJames Hogan if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) 268487a74d39SKyoungil Kim dw_mci_read_data_pio(host, false); 2685f95f3850SWill Newton } 2686f95f3850SWill Newton 2687f95f3850SWill Newton if (pending & SDMMC_INT_TXDR) { 2688f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2689b40af3aaSJames Hogan if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) 2690f95f3850SWill Newton dw_mci_write_data_pio(host); 2691f95f3850SWill Newton } 2692f95f3850SWill Newton 2693f95f3850SWill Newton if (pending & SDMMC_INT_CMD_DONE) { 26948892b705SDouglas Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 26958892b705SDouglas Anderson 2696f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); 2697182c9081SSeungwon Jeon dw_mci_cmd_interrupt(host, pending); 26988892b705SDouglas Anderson 26998892b705SDouglas Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 2700f95f3850SWill Newton } 2701f95f3850SWill Newton 2702f95f3850SWill Newton if (pending & SDMMC_INT_CD) { 2703f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_CD); 27046130e7a9SDoug Anderson dw_mci_handle_cd(host); 2705f95f3850SWill Newton } 2706f95f3850SWill Newton 270776756234SAddy Ke if (pending & SDMMC_INT_SDIO(slot->sdio_id)) { 270876756234SAddy Ke mci_writel(host, RINTSTS, 270976756234SAddy Ke SDMMC_INT_SDIO(slot->sdio_id)); 271032dba737SUlf Hansson __dw_mci_enable_sdio_irq(slot, 0); 271132dba737SUlf Hansson sdio_signal_irq(slot->mmc); 27121a5c8e1fSShashidhar Hiremath } 27131a5c8e1fSShashidhar Hiremath 27141fb5f68aSMarkos Chandras } 2715f95f3850SWill Newton 27163fc7eaefSShawn Lin if (host->use_dma != TRANS_MODE_IDMAC) 27173fc7eaefSShawn Lin return IRQ_HANDLED; 27183fc7eaefSShawn Lin 27193fc7eaefSShawn Lin /* Handle IDMA interrupts */ 272069d99fdcSPrabu Thangamuthu if (host->dma_64bit_address == 1) { 272169d99fdcSPrabu Thangamuthu pending = mci_readl(host, IDSTS64); 272269d99fdcSPrabu Thangamuthu if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 272369d99fdcSPrabu Thangamuthu mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI | 272469d99fdcSPrabu Thangamuthu SDMMC_IDMAC_INT_RI); 272569d99fdcSPrabu Thangamuthu mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI); 2726faecf411SShawn Lin if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) 27273fc7eaefSShawn Lin host->dma_ops->complete((void *)host); 272869d99fdcSPrabu Thangamuthu } 272969d99fdcSPrabu Thangamuthu } else { 2730f95f3850SWill Newton pending = mci_readl(host, IDSTS); 2731f95f3850SWill Newton if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 273269d99fdcSPrabu Thangamuthu mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | 273369d99fdcSPrabu Thangamuthu SDMMC_IDMAC_INT_RI); 2734f95f3850SWill Newton mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 2735faecf411SShawn Lin if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) 27363fc7eaefSShawn Lin host->dma_ops->complete((void *)host); 2737f95f3850SWill Newton } 273869d99fdcSPrabu Thangamuthu } 2739f95f3850SWill Newton 2740f95f3850SWill Newton return IRQ_HANDLED; 2741f95f3850SWill Newton } 2742f95f3850SWill Newton 2743a4faa492SShawn Lin static int dw_mci_init_slot_caps(struct dw_mci_slot *slot) 2744a4faa492SShawn Lin { 2745a4faa492SShawn Lin struct dw_mci *host = slot->host; 2746a4faa492SShawn Lin const struct dw_mci_drv_data *drv_data = host->drv_data; 2747a4faa492SShawn Lin struct mmc_host *mmc = slot->mmc; 2748a4faa492SShawn Lin int ctrl_id; 2749a4faa492SShawn Lin 2750a4faa492SShawn Lin if (host->pdata->caps) 2751a4faa492SShawn Lin mmc->caps = host->pdata->caps; 2752a4faa492SShawn Lin 2753a4faa492SShawn Lin /* 2754a4faa492SShawn Lin * Support MMC_CAP_ERASE by default. 2755a4faa492SShawn Lin * It needs to use trim/discard/erase commands. 2756a4faa492SShawn Lin */ 2757a4faa492SShawn Lin mmc->caps |= MMC_CAP_ERASE; 2758a4faa492SShawn Lin 2759a4faa492SShawn Lin if (host->pdata->pm_caps) 2760a4faa492SShawn Lin mmc->pm_caps = host->pdata->pm_caps; 2761a4faa492SShawn Lin 2762a4faa492SShawn Lin if (host->dev->of_node) { 2763a4faa492SShawn Lin ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); 2764a4faa492SShawn Lin if (ctrl_id < 0) 2765a4faa492SShawn Lin ctrl_id = 0; 2766a4faa492SShawn Lin } else { 2767a4faa492SShawn Lin ctrl_id = to_platform_device(host->dev)->id; 2768a4faa492SShawn Lin } 27690d84b9e5SShawn Lin 27700d84b9e5SShawn Lin if (drv_data && drv_data->caps) { 27710d84b9e5SShawn Lin if (ctrl_id >= drv_data->num_caps) { 27720d84b9e5SShawn Lin dev_err(host->dev, "invalid controller id %d\n", 27730d84b9e5SShawn Lin ctrl_id); 27740d84b9e5SShawn Lin return -EINVAL; 27750d84b9e5SShawn Lin } 2776a4faa492SShawn Lin mmc->caps |= drv_data->caps[ctrl_id]; 27770d84b9e5SShawn Lin } 2778a4faa492SShawn Lin 2779a4faa492SShawn Lin if (host->pdata->caps2) 2780a4faa492SShawn Lin mmc->caps2 = host->pdata->caps2; 2781a4faa492SShawn Lin 278286b93a48SJaehoon Chung mmc->f_min = DW_MCI_FREQ_MIN; 278386b93a48SJaehoon Chung if (!mmc->f_max) 278486b93a48SJaehoon Chung mmc->f_max = DW_MCI_FREQ_MAX; 278586b93a48SJaehoon Chung 2786a4faa492SShawn Lin /* Process SDIO IRQs through the sdio_irq_work. */ 2787a4faa492SShawn Lin if (mmc->caps & MMC_CAP_SDIO_IRQ) 2788a4faa492SShawn Lin mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 2789a4faa492SShawn Lin 2790a4faa492SShawn Lin return 0; 2791a4faa492SShawn Lin } 2792a4faa492SShawn Lin 2793e4a65ef7SJaehoon Chung static int dw_mci_init_slot(struct dw_mci *host) 2794f95f3850SWill Newton { 2795f95f3850SWill Newton struct mmc_host *mmc; 2796f95f3850SWill Newton struct dw_mci_slot *slot; 2797a4faa492SShawn Lin int ret; 2798f95f3850SWill Newton 27994a90920cSThomas Abraham mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); 2800f95f3850SWill Newton if (!mmc) 2801f95f3850SWill Newton return -ENOMEM; 2802f95f3850SWill Newton 2803f95f3850SWill Newton slot = mmc_priv(mmc); 2804e4a65ef7SJaehoon Chung slot->id = 0; 2805e4a65ef7SJaehoon Chung slot->sdio_id = host->sdio_id0 + slot->id; 2806f95f3850SWill Newton slot->mmc = mmc; 2807f95f3850SWill Newton slot->host = host; 2808b23475faSJaehoon Chung host->slot = slot; 2809f95f3850SWill Newton 2810f95f3850SWill Newton mmc->ops = &dw_mci_ops; 2811f95f3850SWill Newton 281251da2240SYuvaraj CD /*if there are external regulators, get them*/ 281351da2240SYuvaraj CD ret = mmc_regulator_get_supply(mmc); 28140f3a47b8SWolfram Sang if (ret) 28153cf890fcSDoug Anderson goto err_host_allocated; 281651da2240SYuvaraj CD 281751da2240SYuvaraj CD if (!mmc->ocr_avail) 2818f95f3850SWill Newton mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 2819f95f3850SWill Newton 28203cf890fcSDoug Anderson ret = mmc_of_parse(mmc); 28213cf890fcSDoug Anderson if (ret) 28223cf890fcSDoug Anderson goto err_host_allocated; 2823f95f3850SWill Newton 2824a4faa492SShawn Lin ret = dw_mci_init_slot_caps(slot); 2825a4faa492SShawn Lin if (ret) 2826a4faa492SShawn Lin goto err_host_allocated; 282732dba737SUlf Hansson 2828f95f3850SWill Newton /* Useful defaults if platform data is unset. */ 28293fc7eaefSShawn Lin if (host->use_dma == TRANS_MODE_IDMAC) { 2830a39e5746SJaehoon Chung mmc->max_segs = host->ring_size; 2831225faf87SJaehoon Chung mmc->max_blk_size = 65535; 2832575c319dSHeiko Stuebner mmc->max_seg_size = 0x1000; 28331a25b1b4SSeungwon Jeon mmc->max_req_size = mmc->max_seg_size * host->ring_size; 28341a25b1b4SSeungwon Jeon mmc->max_blk_count = mmc->max_req_size / 512; 28353fc7eaefSShawn Lin } else if (host->use_dma == TRANS_MODE_EDMAC) { 28363fc7eaefSShawn Lin mmc->max_segs = 64; 2837225faf87SJaehoon Chung mmc->max_blk_size = 65535; 28383fc7eaefSShawn Lin mmc->max_blk_count = 65535; 28393fc7eaefSShawn Lin mmc->max_req_size = 28403fc7eaefSShawn Lin mmc->max_blk_size * mmc->max_blk_count; 28413fc7eaefSShawn Lin mmc->max_seg_size = mmc->max_req_size; 2842575c319dSHeiko Stuebner } else { 28433fc7eaefSShawn Lin /* TRANS_MODE_PIO */ 2844f95f3850SWill Newton mmc->max_segs = 64; 2845225faf87SJaehoon Chung mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */ 2846f95f3850SWill Newton mmc->max_blk_count = 512; 2847575c319dSHeiko Stuebner mmc->max_req_size = mmc->max_blk_size * 2848575c319dSHeiko Stuebner mmc->max_blk_count; 2849f95f3850SWill Newton mmc->max_seg_size = mmc->max_req_size; 2850575c319dSHeiko Stuebner } 2851f95f3850SWill Newton 2852c0834a58SShawn Lin dw_mci_get_cd(mmc); 2853ae0eb348SJaehoon Chung 28540cea529dSJaehoon Chung ret = mmc_add_host(mmc); 28550cea529dSJaehoon Chung if (ret) 28563cf890fcSDoug Anderson goto err_host_allocated; 2857f95f3850SWill Newton 2858f95f3850SWill Newton #if defined(CONFIG_DEBUG_FS) 2859f95f3850SWill Newton dw_mci_init_debugfs(slot); 2860f95f3850SWill Newton #endif 2861f95f3850SWill Newton 2862f95f3850SWill Newton return 0; 2863800d78bfSThomas Abraham 28643cf890fcSDoug Anderson err_host_allocated: 2865800d78bfSThomas Abraham mmc_free_host(mmc); 286651da2240SYuvaraj CD return ret; 2867f95f3850SWill Newton } 2868f95f3850SWill Newton 2869e4a65ef7SJaehoon Chung static void dw_mci_cleanup_slot(struct dw_mci_slot *slot) 2870f95f3850SWill Newton { 2871f95f3850SWill Newton /* Debugfs stuff is cleaned up by mmc core */ 2872f95f3850SWill Newton mmc_remove_host(slot->mmc); 2873b23475faSJaehoon Chung slot->host->slot = NULL; 2874f95f3850SWill Newton mmc_free_host(slot->mmc); 2875f95f3850SWill Newton } 2876f95f3850SWill Newton 2877f95f3850SWill Newton static void dw_mci_init_dma(struct dw_mci *host) 2878f95f3850SWill Newton { 287969d99fdcSPrabu Thangamuthu int addr_config; 28803fc7eaefSShawn Lin struct device *dev = host->dev; 28813fc7eaefSShawn Lin 28823fc7eaefSShawn Lin /* 28833fc7eaefSShawn Lin * Check tansfer mode from HCON[17:16] 28843fc7eaefSShawn Lin * Clear the ambiguous description of dw_mmc databook: 28853fc7eaefSShawn Lin * 2b'00: No DMA Interface -> Actually means using Internal DMA block 28863fc7eaefSShawn Lin * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block 28873fc7eaefSShawn Lin * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block 28883fc7eaefSShawn Lin * 2b'11: Non DW DMA Interface -> pio only 28893fc7eaefSShawn Lin * Compared to DesignWare DMA Interface, Generic DMA Interface has a 28903fc7eaefSShawn Lin * simpler request/acknowledge handshake mechanism and both of them 28913fc7eaefSShawn Lin * are regarded as external dma master for dw_mmc. 28923fc7eaefSShawn Lin */ 28933fc7eaefSShawn Lin host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON)); 28943fc7eaefSShawn Lin if (host->use_dma == DMA_INTERFACE_IDMA) { 28953fc7eaefSShawn Lin host->use_dma = TRANS_MODE_IDMAC; 28963fc7eaefSShawn Lin } else if (host->use_dma == DMA_INTERFACE_DWDMA || 28973fc7eaefSShawn Lin host->use_dma == DMA_INTERFACE_GDMA) { 28983fc7eaefSShawn Lin host->use_dma = TRANS_MODE_EDMAC; 28993fc7eaefSShawn Lin } else { 29003fc7eaefSShawn Lin goto no_dma; 29013fc7eaefSShawn Lin } 29023fc7eaefSShawn Lin 29033fc7eaefSShawn Lin /* Determine which DMA interface to use */ 29043fc7eaefSShawn Lin if (host->use_dma == TRANS_MODE_IDMAC) { 29053fc7eaefSShawn Lin /* 29063fc7eaefSShawn Lin * Check ADDR_CONFIG bit in HCON to find 29073fc7eaefSShawn Lin * IDMAC address bus width 29083fc7eaefSShawn Lin */ 290970692752SShawn Lin addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON)); 291069d99fdcSPrabu Thangamuthu 291169d99fdcSPrabu Thangamuthu if (addr_config == 1) { 291269d99fdcSPrabu Thangamuthu /* host supports IDMAC in 64-bit address mode */ 291369d99fdcSPrabu Thangamuthu host->dma_64bit_address = 1; 29143fc7eaefSShawn Lin dev_info(host->dev, 29153fc7eaefSShawn Lin "IDMAC supports 64-bit address mode.\n"); 291669d99fdcSPrabu Thangamuthu if (!dma_set_mask(host->dev, DMA_BIT_MASK(64))) 29173fc7eaefSShawn Lin dma_set_coherent_mask(host->dev, 29183fc7eaefSShawn Lin DMA_BIT_MASK(64)); 291969d99fdcSPrabu Thangamuthu } else { 292069d99fdcSPrabu Thangamuthu /* host supports IDMAC in 32-bit address mode */ 292169d99fdcSPrabu Thangamuthu host->dma_64bit_address = 0; 29223fc7eaefSShawn Lin dev_info(host->dev, 29233fc7eaefSShawn Lin "IDMAC supports 32-bit address mode.\n"); 292469d99fdcSPrabu Thangamuthu } 292569d99fdcSPrabu Thangamuthu 2926f95f3850SWill Newton /* Alloc memory for sg translation */ 2927cc190d4cSShawn Lin host->sg_cpu = dmam_alloc_coherent(host->dev, 2928cc190d4cSShawn Lin DESC_RING_BUF_SZ, 2929f95f3850SWill Newton &host->sg_dma, GFP_KERNEL); 2930f95f3850SWill Newton if (!host->sg_cpu) { 29313fc7eaefSShawn Lin dev_err(host->dev, 29323fc7eaefSShawn Lin "%s: could not alloc DMA memory\n", 2933f95f3850SWill Newton __func__); 2934f95f3850SWill Newton goto no_dma; 2935f95f3850SWill Newton } 2936f95f3850SWill Newton 2937f95f3850SWill Newton host->dma_ops = &dw_mci_idmac_ops; 293800956ea3SSeungwon Jeon dev_info(host->dev, "Using internal DMA controller.\n"); 29393fc7eaefSShawn Lin } else { 29403fc7eaefSShawn Lin /* TRANS_MODE_EDMAC: check dma bindings again */ 2941852ff5feSDavid Woods if ((device_property_read_string_array(dev, "dma-names", 2942852ff5feSDavid Woods NULL, 0) < 0) || 2943852ff5feSDavid Woods !device_property_present(dev, "dmas")) { 2944f95f3850SWill Newton goto no_dma; 29453fc7eaefSShawn Lin } 29463fc7eaefSShawn Lin host->dma_ops = &dw_mci_edmac_ops; 29473fc7eaefSShawn Lin dev_info(host->dev, "Using external DMA controller.\n"); 29483fc7eaefSShawn Lin } 2949f95f3850SWill Newton 2950e1631f98SJaehoon Chung if (host->dma_ops->init && host->dma_ops->start && 2951e1631f98SJaehoon Chung host->dma_ops->stop && host->dma_ops->cleanup) { 2952f95f3850SWill Newton if (host->dma_ops->init(host)) { 29530e3a22c0SShawn Lin dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n", 29540e3a22c0SShawn Lin __func__); 2955f95f3850SWill Newton goto no_dma; 2956f95f3850SWill Newton } 2957f95f3850SWill Newton } else { 29584a90920cSThomas Abraham dev_err(host->dev, "DMA initialization not found.\n"); 2959f95f3850SWill Newton goto no_dma; 2960f95f3850SWill Newton } 2961f95f3850SWill Newton 2962f95f3850SWill Newton return; 2963f95f3850SWill Newton 2964f95f3850SWill Newton no_dma: 29654a90920cSThomas Abraham dev_info(host->dev, "Using PIO mode.\n"); 29663fc7eaefSShawn Lin host->use_dma = TRANS_MODE_PIO; 2967f95f3850SWill Newton } 2968f95f3850SWill Newton 296937977729SKees Cook static void dw_mci_cmd11_timer(struct timer_list *t) 29705c935165SDoug Anderson { 297137977729SKees Cook struct dw_mci *host = from_timer(host, t, cmd11_timer); 29725c935165SDoug Anderson 2973fd674198SDoug Anderson if (host->state != STATE_SENDING_CMD11) { 2974fd674198SDoug Anderson dev_warn(host->dev, "Unexpected CMD11 timeout\n"); 2975fd674198SDoug Anderson return; 2976fd674198SDoug Anderson } 29775c935165SDoug Anderson 29785c935165SDoug Anderson host->cmd_status = SDMMC_INT_RTO; 29795c935165SDoug Anderson set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 29805c935165SDoug Anderson tasklet_schedule(&host->tasklet); 29815c935165SDoug Anderson } 29825c935165SDoug Anderson 298337977729SKees Cook static void dw_mci_cto_timer(struct timer_list *t) 298403de1921SAddy Ke { 298537977729SKees Cook struct dw_mci *host = from_timer(host, t, cto_timer); 29868892b705SDouglas Anderson unsigned long irqflags; 29878892b705SDouglas Anderson u32 pending; 298803de1921SAddy Ke 29898892b705SDouglas Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 29908892b705SDouglas Anderson 29918892b705SDouglas Anderson /* 29928892b705SDouglas Anderson * If somehow we have very bad interrupt latency it's remotely possible 29938892b705SDouglas Anderson * that the timer could fire while the interrupt is still pending or 29948892b705SDouglas Anderson * while the interrupt is midway through running. Let's be paranoid 29958892b705SDouglas Anderson * and detect those two cases. Note that this is paranoia is somewhat 29968892b705SDouglas Anderson * justified because in this function we don't actually cancel the 29978892b705SDouglas Anderson * pending command in the controller--we just assume it will never come. 29988892b705SDouglas Anderson */ 29998892b705SDouglas Anderson pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 30008892b705SDouglas Anderson if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) { 30018892b705SDouglas Anderson /* The interrupt should fire; no need to act but we can warn */ 30028892b705SDouglas Anderson dev_warn(host->dev, "Unexpected interrupt latency\n"); 30038892b705SDouglas Anderson goto exit; 30048892b705SDouglas Anderson } 30058892b705SDouglas Anderson if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) { 30068892b705SDouglas Anderson /* Presumably interrupt handler couldn't delete the timer */ 30078892b705SDouglas Anderson dev_warn(host->dev, "CTO timeout when already completed\n"); 30088892b705SDouglas Anderson goto exit; 30098892b705SDouglas Anderson } 30108892b705SDouglas Anderson 30118892b705SDouglas Anderson /* 30128892b705SDouglas Anderson * Continued paranoia to make sure we're in the state we expect. 30138892b705SDouglas Anderson * This paranoia isn't really justified but it seems good to be safe. 30148892b705SDouglas Anderson */ 301503de1921SAddy Ke switch (host->state) { 301603de1921SAddy Ke case STATE_SENDING_CMD11: 301703de1921SAddy Ke case STATE_SENDING_CMD: 301803de1921SAddy Ke case STATE_SENDING_STOP: 301903de1921SAddy Ke /* 302003de1921SAddy Ke * If CMD_DONE interrupt does NOT come in sending command 302103de1921SAddy Ke * state, we should notify the driver to terminate current 302203de1921SAddy Ke * transfer and report a command timeout to the core. 302303de1921SAddy Ke */ 302403de1921SAddy Ke host->cmd_status = SDMMC_INT_RTO; 302503de1921SAddy Ke set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 302603de1921SAddy Ke tasklet_schedule(&host->tasklet); 302703de1921SAddy Ke break; 302803de1921SAddy Ke default: 302903de1921SAddy Ke dev_warn(host->dev, "Unexpected command timeout, state %d\n", 303003de1921SAddy Ke host->state); 303103de1921SAddy Ke break; 303203de1921SAddy Ke } 30338892b705SDouglas Anderson 30348892b705SDouglas Anderson exit: 30358892b705SDouglas Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 303603de1921SAddy Ke } 303703de1921SAddy Ke 303837977729SKees Cook static void dw_mci_dto_timer(struct timer_list *t) 303957e10486SAddy Ke { 304037977729SKees Cook struct dw_mci *host = from_timer(host, t, dto_timer); 304193c23ae3SDouglas Anderson unsigned long irqflags; 304293c23ae3SDouglas Anderson u32 pending; 304357e10486SAddy Ke 304493c23ae3SDouglas Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 304593c23ae3SDouglas Anderson 304693c23ae3SDouglas Anderson /* 304793c23ae3SDouglas Anderson * The DTO timer is much longer than the CTO timer, so it's even less 304893c23ae3SDouglas Anderson * likely that we'll these cases, but it pays to be paranoid. 304993c23ae3SDouglas Anderson */ 305093c23ae3SDouglas Anderson pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 305193c23ae3SDouglas Anderson if (pending & SDMMC_INT_DATA_OVER) { 305293c23ae3SDouglas Anderson /* The interrupt should fire; no need to act but we can warn */ 305393c23ae3SDouglas Anderson dev_warn(host->dev, "Unexpected data interrupt latency\n"); 305493c23ae3SDouglas Anderson goto exit; 305593c23ae3SDouglas Anderson } 305693c23ae3SDouglas Anderson if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) { 305793c23ae3SDouglas Anderson /* Presumably interrupt handler couldn't delete the timer */ 305893c23ae3SDouglas Anderson dev_warn(host->dev, "DTO timeout when already completed\n"); 305993c23ae3SDouglas Anderson goto exit; 306093c23ae3SDouglas Anderson } 306193c23ae3SDouglas Anderson 306293c23ae3SDouglas Anderson /* 306393c23ae3SDouglas Anderson * Continued paranoia to make sure we're in the state we expect. 306493c23ae3SDouglas Anderson * This paranoia isn't really justified but it seems good to be safe. 306593c23ae3SDouglas Anderson */ 306657e10486SAddy Ke switch (host->state) { 306757e10486SAddy Ke case STATE_SENDING_DATA: 306857e10486SAddy Ke case STATE_DATA_BUSY: 306957e10486SAddy Ke /* 307057e10486SAddy Ke * If DTO interrupt does NOT come in sending data state, 307157e10486SAddy Ke * we should notify the driver to terminate current transfer 307257e10486SAddy Ke * and report a data timeout to the core. 307357e10486SAddy Ke */ 307457e10486SAddy Ke host->data_status = SDMMC_INT_DRTO; 307557e10486SAddy Ke set_bit(EVENT_DATA_ERROR, &host->pending_events); 307657e10486SAddy Ke set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 307757e10486SAddy Ke tasklet_schedule(&host->tasklet); 307857e10486SAddy Ke break; 307957e10486SAddy Ke default: 308093c23ae3SDouglas Anderson dev_warn(host->dev, "Unexpected data timeout, state %d\n", 308193c23ae3SDouglas Anderson host->state); 308257e10486SAddy Ke break; 308357e10486SAddy Ke } 308493c23ae3SDouglas Anderson 308593c23ae3SDouglas Anderson exit: 308693c23ae3SDouglas Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 308757e10486SAddy Ke } 308857e10486SAddy Ke 3089c91eab4bSThomas Abraham #ifdef CONFIG_OF 3090c91eab4bSThomas Abraham static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 3091c91eab4bSThomas Abraham { 3092c91eab4bSThomas Abraham struct dw_mci_board *pdata; 3093c91eab4bSThomas Abraham struct device *dev = host->dev; 3094e95baf13SArnd Bergmann const struct dw_mci_drv_data *drv_data = host->drv_data; 3095e8cc37b8SShawn Lin int ret; 30963c6d89eaSDoug Anderson u32 clock_frequency; 3097c91eab4bSThomas Abraham 3098c91eab4bSThomas Abraham pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 3099bf3707eaSBeomho Seo if (!pdata) 3100c91eab4bSThomas Abraham return ERR_PTR(-ENOMEM); 3101c91eab4bSThomas Abraham 3102d6786fefSGuodong Xu /* find reset controller when exist */ 3103a93d6f31SPhilipp Zabel pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset"); 3104d6786fefSGuodong Xu if (IS_ERR(pdata->rstc)) { 3105d6786fefSGuodong Xu if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER) 3106d6786fefSGuodong Xu return ERR_PTR(-EPROBE_DEFER); 3107d6786fefSGuodong Xu } 3108d6786fefSGuodong Xu 3109852ff5feSDavid Woods if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth)) 31100e3a22c0SShawn Lin dev_info(dev, 31110e3a22c0SShawn Lin "fifo-depth property not found, using value of FIFOTH register as default\n"); 3112c91eab4bSThomas Abraham 3113852ff5feSDavid Woods device_property_read_u32(dev, "card-detect-delay", 3114852ff5feSDavid Woods &pdata->detect_delay_ms); 3115c91eab4bSThomas Abraham 3116852ff5feSDavid Woods device_property_read_u32(dev, "data-addr", &host->data_addr_override); 3117a0361c1aSJun Nie 3118852ff5feSDavid Woods if (device_property_present(dev, "fifo-watermark-aligned")) 3119d6fced83SJun Nie host->wm_aligned = true; 3120d6fced83SJun Nie 3121852ff5feSDavid Woods if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency)) 31223c6d89eaSDoug Anderson pdata->bus_hz = clock_frequency; 31233c6d89eaSDoug Anderson 3124cb27a843SJames Hogan if (drv_data && drv_data->parse_dt) { 3125cb27a843SJames Hogan ret = drv_data->parse_dt(host); 3126800d78bfSThomas Abraham if (ret) 3127800d78bfSThomas Abraham return ERR_PTR(ret); 3128800d78bfSThomas Abraham } 3129800d78bfSThomas Abraham 3130c91eab4bSThomas Abraham return pdata; 3131c91eab4bSThomas Abraham } 3132c91eab4bSThomas Abraham 3133c91eab4bSThomas Abraham #else /* CONFIG_OF */ 3134c91eab4bSThomas Abraham static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 3135c91eab4bSThomas Abraham { 3136c91eab4bSThomas Abraham return ERR_PTR(-EINVAL); 3137c91eab4bSThomas Abraham } 3138c91eab4bSThomas Abraham #endif /* CONFIG_OF */ 3139c91eab4bSThomas Abraham 3140fa0c3283SDoug Anderson static void dw_mci_enable_cd(struct dw_mci *host) 3141fa0c3283SDoug Anderson { 3142fa0c3283SDoug Anderson unsigned long irqflags; 3143fa0c3283SDoug Anderson u32 temp; 3144fa0c3283SDoug Anderson 3145e8cc37b8SShawn Lin /* 3146e8cc37b8SShawn Lin * No need for CD if all slots have a non-error GPIO 3147e8cc37b8SShawn Lin * as well as broken card detection is found. 3148e8cc37b8SShawn Lin */ 3149e47c0b96SJaehoon Chung if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL) 3150e8cc37b8SShawn Lin return; 3151fa0c3283SDoug Anderson 3152e47c0b96SJaehoon Chung if (mmc_gpio_get_cd(host->slot->mmc) < 0) { 3153fa0c3283SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 3154fa0c3283SDoug Anderson temp = mci_readl(host, INTMASK); 3155fa0c3283SDoug Anderson temp |= SDMMC_INT_CD; 3156fa0c3283SDoug Anderson mci_writel(host, INTMASK, temp); 3157fa0c3283SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 3158fa0c3283SDoug Anderson } 315958870241SJaehoon Chung } 3160fa0c3283SDoug Anderson 316162ca8034SShashidhar Hiremath int dw_mci_probe(struct dw_mci *host) 3162f95f3850SWill Newton { 3163e95baf13SArnd Bergmann const struct dw_mci_drv_data *drv_data = host->drv_data; 316462ca8034SShashidhar Hiremath int width, i, ret = 0; 3165f95f3850SWill Newton u32 fifo_size; 3166f95f3850SWill Newton 3167c91eab4bSThomas Abraham if (!host->pdata) { 3168c91eab4bSThomas Abraham host->pdata = dw_mci_parse_dt(host); 3169d6786fefSGuodong Xu if (PTR_ERR(host->pdata) == -EPROBE_DEFER) { 3170d6786fefSGuodong Xu return -EPROBE_DEFER; 3171d6786fefSGuodong Xu } else if (IS_ERR(host->pdata)) { 3172c91eab4bSThomas Abraham dev_err(host->dev, "platform data not available\n"); 3173c91eab4bSThomas Abraham return -EINVAL; 3174c91eab4bSThomas Abraham } 3175f95f3850SWill Newton } 3176f95f3850SWill Newton 3177780f22afSSeungwon Jeon host->biu_clk = devm_clk_get(host->dev, "biu"); 3178f90a0612SThomas Abraham if (IS_ERR(host->biu_clk)) { 3179f90a0612SThomas Abraham dev_dbg(host->dev, "biu clock not available\n"); 3180f90a0612SThomas Abraham } else { 3181f90a0612SThomas Abraham ret = clk_prepare_enable(host->biu_clk); 3182f90a0612SThomas Abraham if (ret) { 3183f90a0612SThomas Abraham dev_err(host->dev, "failed to enable biu clock\n"); 3184f90a0612SThomas Abraham return ret; 3185f90a0612SThomas Abraham } 3186f95f3850SWill Newton } 3187f95f3850SWill Newton 3188780f22afSSeungwon Jeon host->ciu_clk = devm_clk_get(host->dev, "ciu"); 3189f90a0612SThomas Abraham if (IS_ERR(host->ciu_clk)) { 3190f90a0612SThomas Abraham dev_dbg(host->dev, "ciu clock not available\n"); 31913c6d89eaSDoug Anderson host->bus_hz = host->pdata->bus_hz; 3192f90a0612SThomas Abraham } else { 3193f90a0612SThomas Abraham ret = clk_prepare_enable(host->ciu_clk); 3194f90a0612SThomas Abraham if (ret) { 3195f90a0612SThomas Abraham dev_err(host->dev, "failed to enable ciu clock\n"); 3196f90a0612SThomas Abraham goto err_clk_biu; 3197f90a0612SThomas Abraham } 3198f90a0612SThomas Abraham 31993c6d89eaSDoug Anderson if (host->pdata->bus_hz) { 32003c6d89eaSDoug Anderson ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz); 32013c6d89eaSDoug Anderson if (ret) 32023c6d89eaSDoug Anderson dev_warn(host->dev, 3203612de4c1SJaehoon Chung "Unable to set bus rate to %uHz\n", 32043c6d89eaSDoug Anderson host->pdata->bus_hz); 32053c6d89eaSDoug Anderson } 3206f90a0612SThomas Abraham host->bus_hz = clk_get_rate(host->ciu_clk); 32073c6d89eaSDoug Anderson } 3208f90a0612SThomas Abraham 3209612de4c1SJaehoon Chung if (!host->bus_hz) { 3210612de4c1SJaehoon Chung dev_err(host->dev, 3211612de4c1SJaehoon Chung "Platform data must supply bus speed\n"); 3212612de4c1SJaehoon Chung ret = -ENODEV; 3213612de4c1SJaehoon Chung goto err_clk_ciu; 3214612de4c1SJaehoon Chung } 3215612de4c1SJaehoon Chung 3216941e372dSliwei if (!IS_ERR(host->pdata->rstc)) { 3217941e372dSliwei reset_control_assert(host->pdata->rstc); 3218941e372dSliwei usleep_range(10, 50); 3219941e372dSliwei reset_control_deassert(host->pdata->rstc); 3220941e372dSliwei } 3221941e372dSliwei 3222002f0d5cSYuvaraj Kumar C D if (drv_data && drv_data->init) { 3223002f0d5cSYuvaraj Kumar C D ret = drv_data->init(host); 3224002f0d5cSYuvaraj Kumar C D if (ret) { 3225002f0d5cSYuvaraj Kumar C D dev_err(host->dev, 3226002f0d5cSYuvaraj Kumar C D "implementation specific init failed\n"); 3227002f0d5cSYuvaraj Kumar C D goto err_clk_ciu; 3228002f0d5cSYuvaraj Kumar C D } 3229002f0d5cSYuvaraj Kumar C D } 3230002f0d5cSYuvaraj Kumar C D 323137977729SKees Cook timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0); 323237977729SKees Cook timer_setup(&host->cto_timer, dw_mci_cto_timer, 0); 323337977729SKees Cook timer_setup(&host->dto_timer, dw_mci_dto_timer, 0); 323457e10486SAddy Ke 3235f95f3850SWill Newton spin_lock_init(&host->lock); 3236f8c58c11SDoug Anderson spin_lock_init(&host->irq_lock); 3237f95f3850SWill Newton INIT_LIST_HEAD(&host->queue); 3238f95f3850SWill Newton 3239f95f3850SWill Newton /* 3240f95f3850SWill Newton * Get the host data width - this assumes that HCON has been set with 3241f95f3850SWill Newton * the correct values. 3242f95f3850SWill Newton */ 324370692752SShawn Lin i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON)); 3244f95f3850SWill Newton if (!i) { 3245f95f3850SWill Newton host->push_data = dw_mci_push_data16; 3246f95f3850SWill Newton host->pull_data = dw_mci_pull_data16; 3247f95f3850SWill Newton width = 16; 3248f95f3850SWill Newton host->data_shift = 1; 3249f95f3850SWill Newton } else if (i == 2) { 3250f95f3850SWill Newton host->push_data = dw_mci_push_data64; 3251f95f3850SWill Newton host->pull_data = dw_mci_pull_data64; 3252f95f3850SWill Newton width = 64; 3253f95f3850SWill Newton host->data_shift = 3; 3254f95f3850SWill Newton } else { 3255f95f3850SWill Newton /* Check for a reserved value, and warn if it is */ 3256f95f3850SWill Newton WARN((i != 1), 3257f95f3850SWill Newton "HCON reports a reserved host data width!\n" 3258f95f3850SWill Newton "Defaulting to 32-bit access.\n"); 3259f95f3850SWill Newton host->push_data = dw_mci_push_data32; 3260f95f3850SWill Newton host->pull_data = dw_mci_pull_data32; 3261f95f3850SWill Newton width = 32; 3262f95f3850SWill Newton host->data_shift = 2; 3263f95f3850SWill Newton } 3264f95f3850SWill Newton 3265f95f3850SWill Newton /* Reset all blocks */ 32663744415cSShawn Lin if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 32673744415cSShawn Lin ret = -ENODEV; 32683744415cSShawn Lin goto err_clk_ciu; 32693744415cSShawn Lin } 3270141a712aSSeungwon Jeon 3271141a712aSSeungwon Jeon host->dma_ops = host->pdata->dma_ops; 3272141a712aSSeungwon Jeon dw_mci_init_dma(host); 3273f95f3850SWill Newton 3274f95f3850SWill Newton /* Clear the interrupts for the host controller */ 3275f95f3850SWill Newton mci_writel(host, RINTSTS, 0xFFFFFFFF); 3276f95f3850SWill Newton mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3277f95f3850SWill Newton 3278f95f3850SWill Newton /* Put in max timeout */ 3279f95f3850SWill Newton mci_writel(host, TMOUT, 0xFFFFFFFF); 3280f95f3850SWill Newton 3281f95f3850SWill Newton /* 3282f95f3850SWill Newton * FIFO threshold settings RxMark = fifo_size / 2 - 1, 3283f95f3850SWill Newton * Tx Mark = fifo_size / 2 DMA Size = 8 3284f95f3850SWill Newton */ 3285b86d8253SJames Hogan if (!host->pdata->fifo_depth) { 3286b86d8253SJames Hogan /* 3287b86d8253SJames Hogan * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may 3288b86d8253SJames Hogan * have been overwritten by the bootloader, just like we're 3289b86d8253SJames Hogan * about to do, so if you know the value for your hardware, you 3290b86d8253SJames Hogan * should put it in the platform data. 3291b86d8253SJames Hogan */ 3292f95f3850SWill Newton fifo_size = mci_readl(host, FIFOTH); 32938234e869SJaehoon Chung fifo_size = 1 + ((fifo_size >> 16) & 0xfff); 3294b86d8253SJames Hogan } else { 3295b86d8253SJames Hogan fifo_size = host->pdata->fifo_depth; 3296b86d8253SJames Hogan } 3297b86d8253SJames Hogan host->fifo_depth = fifo_size; 329852426899SSeungwon Jeon host->fifoth_val = 329952426899SSeungwon Jeon SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2); 3300e61cf118SJaehoon Chung mci_writel(host, FIFOTH, host->fifoth_val); 3301f95f3850SWill Newton 3302f95f3850SWill Newton /* disable clock to CIU */ 3303f95f3850SWill Newton mci_writel(host, CLKENA, 0); 3304f95f3850SWill Newton mci_writel(host, CLKSRC, 0); 3305f95f3850SWill Newton 330663008768SJames Hogan /* 330763008768SJames Hogan * In 2.40a spec, Data offset is changed. 330863008768SJames Hogan * Need to check the version-id and set data-offset for DATA register. 330963008768SJames Hogan */ 331063008768SJames Hogan host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); 331163008768SJames Hogan dev_info(host->dev, "Version ID is %04x\n", host->verid); 331263008768SJames Hogan 3313a0361c1aSJun Nie if (host->data_addr_override) 3314a0361c1aSJun Nie host->fifo_reg = host->regs + host->data_addr_override; 3315a0361c1aSJun Nie else if (host->verid < DW_MMC_240A) 331676184ac1SBen Dooks host->fifo_reg = host->regs + DATA_OFFSET; 331763008768SJames Hogan else 331876184ac1SBen Dooks host->fifo_reg = host->regs + DATA_240A_OFFSET; 331963008768SJames Hogan 3320f95f3850SWill Newton tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); 3321780f22afSSeungwon Jeon ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt, 3322780f22afSSeungwon Jeon host->irq_flags, "dw-mci", host); 3323f95f3850SWill Newton if (ret) 33246130e7a9SDoug Anderson goto err_dmaunmap; 3325f95f3850SWill Newton 3326d30a8f7bSJaehoon Chung /* 3327fa0c3283SDoug Anderson * Enable interrupts for command done, data over, data empty, 33282da1d7f2SYuvaraj CD * receive ready and error such as transmit, receive timeout, crc error 33292da1d7f2SYuvaraj CD */ 33302da1d7f2SYuvaraj CD mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 33312da1d7f2SYuvaraj CD SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3332fa0c3283SDoug Anderson DW_MCI_ERROR_FLAGS); 33330e3a22c0SShawn Lin /* Enable mci interrupt */ 33340e3a22c0SShawn Lin mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 33352da1d7f2SYuvaraj CD 33360e3a22c0SShawn Lin dev_info(host->dev, 33370e3a22c0SShawn Lin "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n", 33382da1d7f2SYuvaraj CD host->irq, width, fifo_size); 33392da1d7f2SYuvaraj CD 3340f95f3850SWill Newton /* We need at least one slot to succeed */ 3341e4a65ef7SJaehoon Chung ret = dw_mci_init_slot(host); 334258870241SJaehoon Chung if (ret) { 33431c2215b7SThomas Abraham dev_dbg(host->dev, "slot %d init failed\n", i); 33446130e7a9SDoug Anderson goto err_dmaunmap; 3345f95f3850SWill Newton } 3346f95f3850SWill Newton 3347b793f658SDoug Anderson /* Now that slots are all setup, we can enable card detect */ 3348b793f658SDoug Anderson dw_mci_enable_cd(host); 3349b793f658SDoug Anderson 3350f95f3850SWill Newton return 0; 3351f95f3850SWill Newton 3352f95f3850SWill Newton err_dmaunmap: 3353f95f3850SWill Newton if (host->use_dma && host->dma_ops->exit) 3354f95f3850SWill Newton host->dma_ops->exit(host); 3355f90a0612SThomas Abraham 3356d6786fefSGuodong Xu if (!IS_ERR(host->pdata->rstc)) 3357d6786fefSGuodong Xu reset_control_assert(host->pdata->rstc); 3358d6786fefSGuodong Xu 3359f90a0612SThomas Abraham err_clk_ciu: 3360f90a0612SThomas Abraham clk_disable_unprepare(host->ciu_clk); 3361780f22afSSeungwon Jeon 3362f90a0612SThomas Abraham err_clk_biu: 3363f90a0612SThomas Abraham clk_disable_unprepare(host->biu_clk); 3364780f22afSSeungwon Jeon 3365f95f3850SWill Newton return ret; 3366f95f3850SWill Newton } 336762ca8034SShashidhar Hiremath EXPORT_SYMBOL(dw_mci_probe); 3368f95f3850SWill Newton 336962ca8034SShashidhar Hiremath void dw_mci_remove(struct dw_mci *host) 3370f95f3850SWill Newton { 3371e4a65ef7SJaehoon Chung dev_dbg(host->dev, "remove slot\n"); 3372b23475faSJaehoon Chung if (host->slot) 3373e4a65ef7SJaehoon Chung dw_mci_cleanup_slot(host->slot); 3374f95f3850SWill Newton 3375048fd7e6SPrabu Thangamuthu mci_writel(host, RINTSTS, 0xFFFFFFFF); 3376048fd7e6SPrabu Thangamuthu mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3377048fd7e6SPrabu Thangamuthu 3378f95f3850SWill Newton /* disable clock to CIU */ 3379f95f3850SWill Newton mci_writel(host, CLKENA, 0); 3380f95f3850SWill Newton mci_writel(host, CLKSRC, 0); 3381f95f3850SWill Newton 3382f95f3850SWill Newton if (host->use_dma && host->dma_ops->exit) 3383f95f3850SWill Newton host->dma_ops->exit(host); 3384f95f3850SWill Newton 3385d6786fefSGuodong Xu if (!IS_ERR(host->pdata->rstc)) 3386d6786fefSGuodong Xu reset_control_assert(host->pdata->rstc); 3387d6786fefSGuodong Xu 3388f90a0612SThomas Abraham clk_disable_unprepare(host->ciu_clk); 3389f90a0612SThomas Abraham clk_disable_unprepare(host->biu_clk); 3390f95f3850SWill Newton } 339162ca8034SShashidhar Hiremath EXPORT_SYMBOL(dw_mci_remove); 339262ca8034SShashidhar Hiremath 339362ca8034SShashidhar Hiremath 3394f95f3850SWill Newton 3395e9ed8835SShawn Lin #ifdef CONFIG_PM 3396ed24e1ffSShawn Lin int dw_mci_runtime_suspend(struct device *dev) 3397f95f3850SWill Newton { 3398ed24e1ffSShawn Lin struct dw_mci *host = dev_get_drvdata(dev); 3399ed24e1ffSShawn Lin 34003fc7eaefSShawn Lin if (host->use_dma && host->dma_ops->exit) 34013fc7eaefSShawn Lin host->dma_ops->exit(host); 34023fc7eaefSShawn Lin 3403ed24e1ffSShawn Lin clk_disable_unprepare(host->ciu_clk); 3404ed24e1ffSShawn Lin 340542f989c0SJaehoon Chung if (host->slot && 340642f989c0SJaehoon Chung (mmc_can_gpio_cd(host->slot->mmc) || 340742f989c0SJaehoon Chung !mmc_card_is_removable(host->slot->mmc))) 3408ed24e1ffSShawn Lin clk_disable_unprepare(host->biu_clk); 3409ed24e1ffSShawn Lin 3410f95f3850SWill Newton return 0; 3411f95f3850SWill Newton } 3412ed24e1ffSShawn Lin EXPORT_SYMBOL(dw_mci_runtime_suspend); 3413f95f3850SWill Newton 3414ed24e1ffSShawn Lin int dw_mci_runtime_resume(struct device *dev) 3415f95f3850SWill Newton { 3416b23475faSJaehoon Chung int ret = 0; 3417ed24e1ffSShawn Lin struct dw_mci *host = dev_get_drvdata(dev); 3418f95f3850SWill Newton 341942f989c0SJaehoon Chung if (host->slot && 342042f989c0SJaehoon Chung (mmc_can_gpio_cd(host->slot->mmc) || 342142f989c0SJaehoon Chung !mmc_card_is_removable(host->slot->mmc))) { 3422ed24e1ffSShawn Lin ret = clk_prepare_enable(host->biu_clk); 3423ed24e1ffSShawn Lin if (ret) 3424e61cf118SJaehoon Chung return ret; 3425e61cf118SJaehoon Chung } 3426e61cf118SJaehoon Chung 3427ed24e1ffSShawn Lin ret = clk_prepare_enable(host->ciu_clk); 3428ed24e1ffSShawn Lin if (ret) 3429df9bcc2bSJoonyoung Shim goto err; 3430df9bcc2bSJoonyoung Shim 3431df9bcc2bSJoonyoung Shim if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 3432df9bcc2bSJoonyoung Shim clk_disable_unprepare(host->ciu_clk); 3433df9bcc2bSJoonyoung Shim ret = -ENODEV; 3434df9bcc2bSJoonyoung Shim goto err; 3435df9bcc2bSJoonyoung Shim } 3436ed24e1ffSShawn Lin 34373bfe619dSJonathan Kliegman if (host->use_dma && host->dma_ops->init) 3438141a712aSSeungwon Jeon host->dma_ops->init(host); 3439141a712aSSeungwon Jeon 344052426899SSeungwon Jeon /* 344152426899SSeungwon Jeon * Restore the initial value at FIFOTH register 344252426899SSeungwon Jeon * And Invalidate the prev_blksz with zero 344352426899SSeungwon Jeon */ 3444e61cf118SJaehoon Chung mci_writel(host, FIFOTH, host->fifoth_val); 344552426899SSeungwon Jeon host->prev_blksz = 0; 3446e61cf118SJaehoon Chung 34472eb2944fSDoug Anderson /* Put in max timeout */ 34482eb2944fSDoug Anderson mci_writel(host, TMOUT, 0xFFFFFFFF); 34492eb2944fSDoug Anderson 3450e61cf118SJaehoon Chung mci_writel(host, RINTSTS, 0xFFFFFFFF); 3451e61cf118SJaehoon Chung mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 3452e61cf118SJaehoon Chung SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3453fa0c3283SDoug Anderson DW_MCI_ERROR_FLAGS); 3454e61cf118SJaehoon Chung mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 3455e61cf118SJaehoon Chung 34560e3a22c0SShawn Lin 3457e47c0b96SJaehoon Chung if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER) 3458e47c0b96SJaehoon Chung dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios); 3459e9748e03SZiyuan Xu 3460e9748e03SZiyuan Xu /* Force setup bus to guarantee available clock output */ 3461e47c0b96SJaehoon Chung dw_mci_setup_bus(host->slot, true); 3462fa0c3283SDoug Anderson 34637c526608SUlf Hansson /* Re-enable SDIO interrupts. */ 34647c526608SUlf Hansson if (sdio_irq_claimed(host->slot->mmc)) 34657c526608SUlf Hansson __dw_mci_enable_sdio_irq(host->slot, 1); 34667c526608SUlf Hansson 3467fa0c3283SDoug Anderson /* Now that slots are all setup, we can enable card detect */ 3468fa0c3283SDoug Anderson dw_mci_enable_cd(host); 3469fa0c3283SDoug Anderson 3470df9bcc2bSJoonyoung Shim return 0; 3471df9bcc2bSJoonyoung Shim 3472df9bcc2bSJoonyoung Shim err: 347342f989c0SJaehoon Chung if (host->slot && 347442f989c0SJaehoon Chung (mmc_can_gpio_cd(host->slot->mmc) || 347542f989c0SJaehoon Chung !mmc_card_is_removable(host->slot->mmc))) 3476df9bcc2bSJoonyoung Shim clk_disable_unprepare(host->biu_clk); 3477df9bcc2bSJoonyoung Shim 34781f5c51d7SShawn Lin return ret; 34791f5c51d7SShawn Lin } 3480e9ed8835SShawn Lin EXPORT_SYMBOL(dw_mci_runtime_resume); 3481e9ed8835SShawn Lin #endif /* CONFIG_PM */ 34826fe8890dSJaehoon Chung 3483f95f3850SWill Newton static int __init dw_mci_init(void) 3484f95f3850SWill Newton { 34858e1c4e4dSSachin Kamat pr_info("Synopsys Designware Multimedia Card Interface Driver\n"); 348662ca8034SShashidhar Hiremath return 0; 3487f95f3850SWill Newton } 3488f95f3850SWill Newton 3489f95f3850SWill Newton static void __exit dw_mci_exit(void) 3490f95f3850SWill Newton { 3491f95f3850SWill Newton } 3492f95f3850SWill Newton 3493f95f3850SWill Newton module_init(dw_mci_init); 3494f95f3850SWill Newton module_exit(dw_mci_exit); 3495f95f3850SWill Newton 3496f95f3850SWill Newton MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); 3497f95f3850SWill Newton MODULE_AUTHOR("NXP Semiconductor VietNam"); 3498f95f3850SWill Newton MODULE_AUTHOR("Imagination Technologies Ltd"); 3499f95f3850SWill Newton MODULE_LICENSE("GPL v2"); 3500