1f95f3850SWill Newton /* 2f95f3850SWill Newton * Synopsys DesignWare Multimedia Card Interface driver 3f95f3850SWill Newton * (Based on NXP driver for lpc 31xx) 4f95f3850SWill Newton * 5f95f3850SWill Newton * Copyright (C) 2009 NXP Semiconductors 6f95f3850SWill Newton * Copyright (C) 2009, 2010 Imagination Technologies Ltd. 7f95f3850SWill Newton * 8f95f3850SWill Newton * This program is free software; you can redistribute it and/or modify 9f95f3850SWill Newton * it under the terms of the GNU General Public License as published by 10f95f3850SWill Newton * the Free Software Foundation; either version 2 of the License, or 11f95f3850SWill Newton * (at your option) any later version. 12f95f3850SWill Newton */ 13f95f3850SWill Newton 14f95f3850SWill Newton #include <linux/blkdev.h> 15f95f3850SWill Newton #include <linux/clk.h> 16f95f3850SWill Newton #include <linux/debugfs.h> 17f95f3850SWill Newton #include <linux/device.h> 18f95f3850SWill Newton #include <linux/dma-mapping.h> 19f95f3850SWill Newton #include <linux/err.h> 20f95f3850SWill Newton #include <linux/init.h> 21f95f3850SWill Newton #include <linux/interrupt.h> 22b6d2d81cSShawn Lin #include <linux/iopoll.h> 23f95f3850SWill Newton #include <linux/ioport.h> 24f95f3850SWill Newton #include <linux/module.h> 25f95f3850SWill Newton #include <linux/platform_device.h> 26a6db2c86SDouglas Anderson #include <linux/pm_runtime.h> 27f95f3850SWill Newton #include <linux/seq_file.h> 28f95f3850SWill Newton #include <linux/slab.h> 29f95f3850SWill Newton #include <linux/stat.h> 30f95f3850SWill Newton #include <linux/delay.h> 31f95f3850SWill Newton #include <linux/irq.h> 32b24c8b26SDoug Anderson #include <linux/mmc/card.h> 33f95f3850SWill Newton #include <linux/mmc/host.h> 34f95f3850SWill Newton #include <linux/mmc/mmc.h> 3501730558SDoug Anderson #include <linux/mmc/sd.h> 3690c2143aSSeungwon Jeon #include <linux/mmc/sdio.h> 37f95f3850SWill Newton #include <linux/bitops.h> 38c07946a3SJaehoon Chung #include <linux/regulator/consumer.h> 39c91eab4bSThomas Abraham #include <linux/of.h> 4055a6ceb2SDoug Anderson #include <linux/of_gpio.h> 41bf626e55SZhangfei Gao #include <linux/mmc/slot-gpio.h> 42f95f3850SWill Newton 43f95f3850SWill Newton #include "dw_mmc.h" 44f95f3850SWill Newton 45f95f3850SWill Newton /* Common flag combinations */ 463f7eec62SJaehoon Chung #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \ 47f95f3850SWill Newton SDMMC_INT_HTO | SDMMC_INT_SBE | \ 487a3c5677SDoug Anderson SDMMC_INT_EBE | SDMMC_INT_HLE) 49f95f3850SWill Newton #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ 507a3c5677SDoug Anderson SDMMC_INT_RESP_ERR | SDMMC_INT_HLE) 51f95f3850SWill Newton #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \ 527a3c5677SDoug Anderson DW_MCI_CMD_ERROR_FLAGS) 53f95f3850SWill Newton #define DW_MCI_SEND_STATUS 1 54f95f3850SWill Newton #define DW_MCI_RECV_STATUS 2 55f95f3850SWill Newton #define DW_MCI_DMA_THRESHOLD 16 56f95f3850SWill Newton 571f44a2a5SSeungwon Jeon #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */ 5872e83577SJaehoon Chung #define DW_MCI_FREQ_MIN 100000 /* unit: HZ */ 591f44a2a5SSeungwon Jeon 60fc79a4d6SJoonyoung Shim #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ 61fc79a4d6SJoonyoung Shim SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ 62fc79a4d6SJoonyoung Shim SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ 63fc79a4d6SJoonyoung Shim SDMMC_IDMAC_INT_TI) 64fc79a4d6SJoonyoung Shim 65cc190d4cSShawn Lin #define DESC_RING_BUF_SZ PAGE_SIZE 66cc190d4cSShawn Lin 6769d99fdcSPrabu Thangamuthu struct idmac_desc_64addr { 6869d99fdcSPrabu Thangamuthu u32 des0; /* Control Descriptor */ 69b6d2d81cSShawn Lin #define IDMAC_OWN_CLR64(x) \ 70b6d2d81cSShawn Lin !((x) & cpu_to_le32(IDMAC_DES0_OWN)) 7169d99fdcSPrabu Thangamuthu 7269d99fdcSPrabu Thangamuthu u32 des1; /* Reserved */ 7369d99fdcSPrabu Thangamuthu 7469d99fdcSPrabu Thangamuthu u32 des2; /*Buffer sizes */ 7569d99fdcSPrabu Thangamuthu #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \ 766687c42fSBen Dooks ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \ 776687c42fSBen Dooks ((cpu_to_le32(s)) & cpu_to_le32(0x1fff))) 7869d99fdcSPrabu Thangamuthu 7969d99fdcSPrabu Thangamuthu u32 des3; /* Reserved */ 8069d99fdcSPrabu Thangamuthu 8169d99fdcSPrabu Thangamuthu u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/ 8269d99fdcSPrabu Thangamuthu u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/ 8369d99fdcSPrabu Thangamuthu 8469d99fdcSPrabu Thangamuthu u32 des6; /* Lower 32-bits of Next Descriptor Address */ 8569d99fdcSPrabu Thangamuthu u32 des7; /* Upper 32-bits of Next Descriptor Address */ 8669d99fdcSPrabu Thangamuthu }; 8769d99fdcSPrabu Thangamuthu 88f95f3850SWill Newton struct idmac_desc { 896687c42fSBen Dooks __le32 des0; /* Control Descriptor */ 90f95f3850SWill Newton #define IDMAC_DES0_DIC BIT(1) 91f95f3850SWill Newton #define IDMAC_DES0_LD BIT(2) 92f95f3850SWill Newton #define IDMAC_DES0_FD BIT(3) 93f95f3850SWill Newton #define IDMAC_DES0_CH BIT(4) 94f95f3850SWill Newton #define IDMAC_DES0_ER BIT(5) 95f95f3850SWill Newton #define IDMAC_DES0_CES BIT(30) 96f95f3850SWill Newton #define IDMAC_DES0_OWN BIT(31) 97f95f3850SWill Newton 986687c42fSBen Dooks __le32 des1; /* Buffer sizes */ 99f95f3850SWill Newton #define IDMAC_SET_BUFFER1_SIZE(d, s) \ 100e5306c3aSBen Dooks ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff))) 101f95f3850SWill Newton 1026687c42fSBen Dooks __le32 des2; /* buffer 1 physical address */ 103f95f3850SWill Newton 1046687c42fSBen Dooks __le32 des3; /* buffer 2 physical address */ 105f95f3850SWill Newton }; 1065959b32eSAlexey Brodkin 1075959b32eSAlexey Brodkin /* Each descriptor can transfer up to 4KB of data in chained mode */ 1085959b32eSAlexey Brodkin #define DW_MCI_DESC_DATA_LENGTH 0x1000 109f95f3850SWill Newton 1100bdbd0e8SDoug Anderson static int dw_mci_card_busy(struct mmc_host *mmc); 11131bff450SSeungwon Jeon 112f95f3850SWill Newton #if defined(CONFIG_DEBUG_FS) 113f95f3850SWill Newton static int dw_mci_req_show(struct seq_file *s, void *v) 114f95f3850SWill Newton { 115f95f3850SWill Newton struct dw_mci_slot *slot = s->private; 116f95f3850SWill Newton struct mmc_request *mrq; 117f95f3850SWill Newton struct mmc_command *cmd; 118f95f3850SWill Newton struct mmc_command *stop; 119f95f3850SWill Newton struct mmc_data *data; 120f95f3850SWill Newton 121f95f3850SWill Newton /* Make sure we get a consistent snapshot */ 122f95f3850SWill Newton spin_lock_bh(&slot->host->lock); 123f95f3850SWill Newton mrq = slot->mrq; 124f95f3850SWill Newton 125f95f3850SWill Newton if (mrq) { 126f95f3850SWill Newton cmd = mrq->cmd; 127f95f3850SWill Newton data = mrq->data; 128f95f3850SWill Newton stop = mrq->stop; 129f95f3850SWill Newton 130f95f3850SWill Newton if (cmd) 131f95f3850SWill Newton seq_printf(s, 132f95f3850SWill Newton "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 133f95f3850SWill Newton cmd->opcode, cmd->arg, cmd->flags, 134f95f3850SWill Newton cmd->resp[0], cmd->resp[1], cmd->resp[2], 135f95f3850SWill Newton cmd->resp[2], cmd->error); 136f95f3850SWill Newton if (data) 137f95f3850SWill Newton seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", 138f95f3850SWill Newton data->bytes_xfered, data->blocks, 139f95f3850SWill Newton data->blksz, data->flags, data->error); 140f95f3850SWill Newton if (stop) 141f95f3850SWill Newton seq_printf(s, 142f95f3850SWill Newton "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", 143f95f3850SWill Newton stop->opcode, stop->arg, stop->flags, 144f95f3850SWill Newton stop->resp[0], stop->resp[1], stop->resp[2], 145f95f3850SWill Newton stop->resp[2], stop->error); 146f95f3850SWill Newton } 147f95f3850SWill Newton 148f95f3850SWill Newton spin_unlock_bh(&slot->host->lock); 149f95f3850SWill Newton 150f95f3850SWill Newton return 0; 151f95f3850SWill Newton } 152f95f3850SWill Newton 153f95f3850SWill Newton static int dw_mci_req_open(struct inode *inode, struct file *file) 154f95f3850SWill Newton { 155f95f3850SWill Newton return single_open(file, dw_mci_req_show, inode->i_private); 156f95f3850SWill Newton } 157f95f3850SWill Newton 158f95f3850SWill Newton static const struct file_operations dw_mci_req_fops = { 159f95f3850SWill Newton .owner = THIS_MODULE, 160f95f3850SWill Newton .open = dw_mci_req_open, 161f95f3850SWill Newton .read = seq_read, 162f95f3850SWill Newton .llseek = seq_lseek, 163f95f3850SWill Newton .release = single_release, 164f95f3850SWill Newton }; 165f95f3850SWill Newton 166f95f3850SWill Newton static int dw_mci_regs_show(struct seq_file *s, void *v) 167f95f3850SWill Newton { 16821657ebdSJaehoon Chung struct dw_mci *host = s->private; 16921657ebdSJaehoon Chung 17021657ebdSJaehoon Chung seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS)); 17121657ebdSJaehoon Chung seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS)); 17221657ebdSJaehoon Chung seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD)); 17321657ebdSJaehoon Chung seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL)); 17421657ebdSJaehoon Chung seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK)); 17521657ebdSJaehoon Chung seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA)); 176f95f3850SWill Newton 177f95f3850SWill Newton return 0; 178f95f3850SWill Newton } 179f95f3850SWill Newton 180f95f3850SWill Newton static int dw_mci_regs_open(struct inode *inode, struct file *file) 181f95f3850SWill Newton { 182f95f3850SWill Newton return single_open(file, dw_mci_regs_show, inode->i_private); 183f95f3850SWill Newton } 184f95f3850SWill Newton 185f95f3850SWill Newton static const struct file_operations dw_mci_regs_fops = { 186f95f3850SWill Newton .owner = THIS_MODULE, 187f95f3850SWill Newton .open = dw_mci_regs_open, 188f95f3850SWill Newton .read = seq_read, 189f95f3850SWill Newton .llseek = seq_lseek, 190f95f3850SWill Newton .release = single_release, 191f95f3850SWill Newton }; 192f95f3850SWill Newton 193f95f3850SWill Newton static void dw_mci_init_debugfs(struct dw_mci_slot *slot) 194f95f3850SWill Newton { 195f95f3850SWill Newton struct mmc_host *mmc = slot->mmc; 196f95f3850SWill Newton struct dw_mci *host = slot->host; 197f95f3850SWill Newton struct dentry *root; 198f95f3850SWill Newton struct dentry *node; 199f95f3850SWill Newton 200f95f3850SWill Newton root = mmc->debugfs_root; 201f95f3850SWill Newton if (!root) 202f95f3850SWill Newton return; 203f95f3850SWill Newton 204f95f3850SWill Newton node = debugfs_create_file("regs", S_IRUSR, root, host, 205f95f3850SWill Newton &dw_mci_regs_fops); 206f95f3850SWill Newton if (!node) 207f95f3850SWill Newton goto err; 208f95f3850SWill Newton 209f95f3850SWill Newton node = debugfs_create_file("req", S_IRUSR, root, slot, 210f95f3850SWill Newton &dw_mci_req_fops); 211f95f3850SWill Newton if (!node) 212f95f3850SWill Newton goto err; 213f95f3850SWill Newton 214f95f3850SWill Newton node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); 215f95f3850SWill Newton if (!node) 216f95f3850SWill Newton goto err; 217f95f3850SWill Newton 218f95f3850SWill Newton node = debugfs_create_x32("pending_events", S_IRUSR, root, 219f95f3850SWill Newton (u32 *)&host->pending_events); 220f95f3850SWill Newton if (!node) 221f95f3850SWill Newton goto err; 222f95f3850SWill Newton 223f95f3850SWill Newton node = debugfs_create_x32("completed_events", S_IRUSR, root, 224f95f3850SWill Newton (u32 *)&host->completed_events); 225f95f3850SWill Newton if (!node) 226f95f3850SWill Newton goto err; 227f95f3850SWill Newton 228f95f3850SWill Newton return; 229f95f3850SWill Newton 230f95f3850SWill Newton err: 231f95f3850SWill Newton dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); 232f95f3850SWill Newton } 233f95f3850SWill Newton #endif /* defined(CONFIG_DEBUG_FS) */ 234f95f3850SWill Newton 23501730558SDoug Anderson static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg); 2368e6db1f6SShawn Lin static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) 2378e6db1f6SShawn Lin { 2388e6db1f6SShawn Lin u32 ctrl; 2398e6db1f6SShawn Lin 2408e6db1f6SShawn Lin ctrl = mci_readl(host, CTRL); 2418e6db1f6SShawn Lin ctrl |= reset; 2428e6db1f6SShawn Lin mci_writel(host, CTRL, ctrl); 2438e6db1f6SShawn Lin 2448e6db1f6SShawn Lin /* wait till resets clear */ 2458e6db1f6SShawn Lin if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl, 2468e6db1f6SShawn Lin !(ctrl & reset), 2478e6db1f6SShawn Lin 1, 500 * USEC_PER_MSEC)) { 2488e6db1f6SShawn Lin dev_err(host->dev, 2498e6db1f6SShawn Lin "Timeout resetting block (ctrl reset %#x)\n", 2508e6db1f6SShawn Lin ctrl & reset); 2518e6db1f6SShawn Lin return false; 2528e6db1f6SShawn Lin } 2538e6db1f6SShawn Lin 2548e6db1f6SShawn Lin return true; 2558e6db1f6SShawn Lin } 25601730558SDoug Anderson 257f95f3850SWill Newton static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) 258f95f3850SWill Newton { 259800d78bfSThomas Abraham struct dw_mci_slot *slot = mmc_priv(mmc); 26001730558SDoug Anderson struct dw_mci *host = slot->host; 261f95f3850SWill Newton u32 cmdr; 262f95f3850SWill Newton 2630e3a22c0SShawn Lin cmd->error = -EINPROGRESS; 264f95f3850SWill Newton cmdr = cmd->opcode; 265f95f3850SWill Newton 26690c2143aSSeungwon Jeon if (cmd->opcode == MMC_STOP_TRANSMISSION || 26790c2143aSSeungwon Jeon cmd->opcode == MMC_GO_IDLE_STATE || 26890c2143aSSeungwon Jeon cmd->opcode == MMC_GO_INACTIVE_STATE || 26990c2143aSSeungwon Jeon (cmd->opcode == SD_IO_RW_DIRECT && 27090c2143aSSeungwon Jeon ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) 271f95f3850SWill Newton cmdr |= SDMMC_CMD_STOP; 2724a1b27adSJaehoon Chung else if (cmd->opcode != MMC_SEND_STATUS && cmd->data) 273f95f3850SWill Newton cmdr |= SDMMC_CMD_PRV_DAT_WAIT; 274f95f3850SWill Newton 27501730558SDoug Anderson if (cmd->opcode == SD_SWITCH_VOLTAGE) { 27601730558SDoug Anderson u32 clk_en_a; 27701730558SDoug Anderson 27801730558SDoug Anderson /* Special bit makes CMD11 not die */ 27901730558SDoug Anderson cmdr |= SDMMC_CMD_VOLT_SWITCH; 28001730558SDoug Anderson 28101730558SDoug Anderson /* Change state to continue to handle CMD11 weirdness */ 28201730558SDoug Anderson WARN_ON(slot->host->state != STATE_SENDING_CMD); 28301730558SDoug Anderson slot->host->state = STATE_SENDING_CMD11; 28401730558SDoug Anderson 28501730558SDoug Anderson /* 28601730558SDoug Anderson * We need to disable low power mode (automatic clock stop) 28701730558SDoug Anderson * while doing voltage switch so we don't confuse the card, 28801730558SDoug Anderson * since stopping the clock is a specific part of the UHS 28901730558SDoug Anderson * voltage change dance. 29001730558SDoug Anderson * 29101730558SDoug Anderson * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be 29201730558SDoug Anderson * unconditionally turned back on in dw_mci_setup_bus() if it's 29301730558SDoug Anderson * ever called with a non-zero clock. That shouldn't happen 29401730558SDoug Anderson * until the voltage change is all done. 29501730558SDoug Anderson */ 29601730558SDoug Anderson clk_en_a = mci_readl(host, CLKENA); 29701730558SDoug Anderson clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id); 29801730558SDoug Anderson mci_writel(host, CLKENA, clk_en_a); 29901730558SDoug Anderson mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 30001730558SDoug Anderson SDMMC_CMD_PRV_DAT_WAIT, 0); 30101730558SDoug Anderson } 30201730558SDoug Anderson 303f95f3850SWill Newton if (cmd->flags & MMC_RSP_PRESENT) { 304f95f3850SWill Newton /* We expect a response, so set this bit */ 305f95f3850SWill Newton cmdr |= SDMMC_CMD_RESP_EXP; 306f95f3850SWill Newton if (cmd->flags & MMC_RSP_136) 307f95f3850SWill Newton cmdr |= SDMMC_CMD_RESP_LONG; 308f95f3850SWill Newton } 309f95f3850SWill Newton 310f95f3850SWill Newton if (cmd->flags & MMC_RSP_CRC) 311f95f3850SWill Newton cmdr |= SDMMC_CMD_RESP_CRC; 312f95f3850SWill Newton 3130349c085SJaehoon Chung if (cmd->data) { 314f95f3850SWill Newton cmdr |= SDMMC_CMD_DAT_EXP; 3150349c085SJaehoon Chung if (cmd->data->flags & MMC_DATA_WRITE) 316f95f3850SWill Newton cmdr |= SDMMC_CMD_DAT_WR; 317f95f3850SWill Newton } 318f95f3850SWill Newton 319aaaaeb7aSJaehoon Chung if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags)) 320aaaaeb7aSJaehoon Chung cmdr |= SDMMC_CMD_USE_HOLD_REG; 321800d78bfSThomas Abraham 322f95f3850SWill Newton return cmdr; 323f95f3850SWill Newton } 324f95f3850SWill Newton 32590c2143aSSeungwon Jeon static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) 32690c2143aSSeungwon Jeon { 32790c2143aSSeungwon Jeon struct mmc_command *stop; 32890c2143aSSeungwon Jeon u32 cmdr; 32990c2143aSSeungwon Jeon 33090c2143aSSeungwon Jeon if (!cmd->data) 33190c2143aSSeungwon Jeon return 0; 33290c2143aSSeungwon Jeon 33390c2143aSSeungwon Jeon stop = &host->stop_abort; 33490c2143aSSeungwon Jeon cmdr = cmd->opcode; 33590c2143aSSeungwon Jeon memset(stop, 0, sizeof(struct mmc_command)); 33690c2143aSSeungwon Jeon 33790c2143aSSeungwon Jeon if (cmdr == MMC_READ_SINGLE_BLOCK || 33890c2143aSSeungwon Jeon cmdr == MMC_READ_MULTIPLE_BLOCK || 33990c2143aSSeungwon Jeon cmdr == MMC_WRITE_BLOCK || 3406c2c6506SUlf Hansson cmdr == MMC_WRITE_MULTIPLE_BLOCK || 3416c2c6506SUlf Hansson cmdr == MMC_SEND_TUNING_BLOCK || 3426c2c6506SUlf Hansson cmdr == MMC_SEND_TUNING_BLOCK_HS200) { 34390c2143aSSeungwon Jeon stop->opcode = MMC_STOP_TRANSMISSION; 34490c2143aSSeungwon Jeon stop->arg = 0; 34590c2143aSSeungwon Jeon stop->flags = MMC_RSP_R1B | MMC_CMD_AC; 34690c2143aSSeungwon Jeon } else if (cmdr == SD_IO_RW_EXTENDED) { 34790c2143aSSeungwon Jeon stop->opcode = SD_IO_RW_DIRECT; 34890c2143aSSeungwon Jeon stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) | 34990c2143aSSeungwon Jeon ((cmd->arg >> 28) & 0x7); 35090c2143aSSeungwon Jeon stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC; 35190c2143aSSeungwon Jeon } else { 35290c2143aSSeungwon Jeon return 0; 35390c2143aSSeungwon Jeon } 35490c2143aSSeungwon Jeon 35590c2143aSSeungwon Jeon cmdr = stop->opcode | SDMMC_CMD_STOP | 35690c2143aSSeungwon Jeon SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP; 35790c2143aSSeungwon Jeon 3588c005b40SJaehoon Chung if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->cur_slot->flags)) 3598c005b40SJaehoon Chung cmdr |= SDMMC_CMD_USE_HOLD_REG; 3608c005b40SJaehoon Chung 36190c2143aSSeungwon Jeon return cmdr; 36290c2143aSSeungwon Jeon } 36390c2143aSSeungwon Jeon 3640bdbd0e8SDoug Anderson static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags) 3650bdbd0e8SDoug Anderson { 366b6d2d81cSShawn Lin u32 status; 3670bdbd0e8SDoug Anderson 3680bdbd0e8SDoug Anderson /* 3690bdbd0e8SDoug Anderson * Databook says that before issuing a new data transfer command 3700bdbd0e8SDoug Anderson * we need to check to see if the card is busy. Data transfer commands 3710bdbd0e8SDoug Anderson * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that. 3720bdbd0e8SDoug Anderson * 3730bdbd0e8SDoug Anderson * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is 3740bdbd0e8SDoug Anderson * expected. 3750bdbd0e8SDoug Anderson */ 3760bdbd0e8SDoug Anderson if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) && 3770bdbd0e8SDoug Anderson !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) { 378b6d2d81cSShawn Lin if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS, 379b6d2d81cSShawn Lin status, 380b6d2d81cSShawn Lin !(status & SDMMC_STATUS_BUSY), 381b6d2d81cSShawn Lin 10, 500 * USEC_PER_MSEC)) 3820bdbd0e8SDoug Anderson dev_err(host->dev, "Busy; trying anyway\n"); 3830bdbd0e8SDoug Anderson } 3840bdbd0e8SDoug Anderson } 3850bdbd0e8SDoug Anderson 386f95f3850SWill Newton static void dw_mci_start_command(struct dw_mci *host, 387f95f3850SWill Newton struct mmc_command *cmd, u32 cmd_flags) 388f95f3850SWill Newton { 389f95f3850SWill Newton host->cmd = cmd; 3904a90920cSThomas Abraham dev_vdbg(host->dev, 391f95f3850SWill Newton "start command: ARGR=0x%08x CMDR=0x%08x\n", 392f95f3850SWill Newton cmd->arg, cmd_flags); 393f95f3850SWill Newton 394f95f3850SWill Newton mci_writel(host, CMDARG, cmd->arg); 3950e3a22c0SShawn Lin wmb(); /* drain writebuffer */ 3960bdbd0e8SDoug Anderson dw_mci_wait_while_busy(host, cmd_flags); 397f95f3850SWill Newton 398f95f3850SWill Newton mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); 399f95f3850SWill Newton } 400f95f3850SWill Newton 40190c2143aSSeungwon Jeon static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) 402f95f3850SWill Newton { 403e13c3c08SJaehoon Chung struct mmc_command *stop = &host->stop_abort; 4040e3a22c0SShawn Lin 40590c2143aSSeungwon Jeon dw_mci_start_command(host, stop, host->stop_cmdr); 406f95f3850SWill Newton } 407f95f3850SWill Newton 408f95f3850SWill Newton /* DMA interface functions */ 409f95f3850SWill Newton static void dw_mci_stop_dma(struct dw_mci *host) 410f95f3850SWill Newton { 41103e8cb53SJames Hogan if (host->using_dma) { 412f95f3850SWill Newton host->dma_ops->stop(host); 413f95f3850SWill Newton host->dma_ops->cleanup(host); 414aa50f259SSeungwon Jeon } 415aa50f259SSeungwon Jeon 416f95f3850SWill Newton /* Data transfer was stopped by the interrupt handler */ 417f95f3850SWill Newton set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 418f95f3850SWill Newton } 419f95f3850SWill Newton 4209aa51408SSeungwon Jeon static int dw_mci_get_dma_dir(struct mmc_data *data) 4219aa51408SSeungwon Jeon { 4229aa51408SSeungwon Jeon if (data->flags & MMC_DATA_WRITE) 4239aa51408SSeungwon Jeon return DMA_TO_DEVICE; 4249aa51408SSeungwon Jeon else 4259aa51408SSeungwon Jeon return DMA_FROM_DEVICE; 4269aa51408SSeungwon Jeon } 4279aa51408SSeungwon Jeon 428f95f3850SWill Newton static void dw_mci_dma_cleanup(struct dw_mci *host) 429f95f3850SWill Newton { 430f95f3850SWill Newton struct mmc_data *data = host->data; 431f95f3850SWill Newton 432a4cc7eb4SJaehoon Chung if (data && data->host_cookie == COOKIE_MAPPED) { 4334a90920cSThomas Abraham dma_unmap_sg(host->dev, 4349aa51408SSeungwon Jeon data->sg, 4359aa51408SSeungwon Jeon data->sg_len, 4369aa51408SSeungwon Jeon dw_mci_get_dma_dir(data)); 437a4cc7eb4SJaehoon Chung data->host_cookie = COOKIE_UNMAPPED; 438a4cc7eb4SJaehoon Chung } 439f95f3850SWill Newton } 440f95f3850SWill Newton 4415ce9d961SSeungwon Jeon static void dw_mci_idmac_reset(struct dw_mci *host) 4425ce9d961SSeungwon Jeon { 4435ce9d961SSeungwon Jeon u32 bmod = mci_readl(host, BMOD); 4445ce9d961SSeungwon Jeon /* Software reset of DMA */ 4455ce9d961SSeungwon Jeon bmod |= SDMMC_IDMAC_SWRESET; 4465ce9d961SSeungwon Jeon mci_writel(host, BMOD, bmod); 4475ce9d961SSeungwon Jeon } 4485ce9d961SSeungwon Jeon 449f95f3850SWill Newton static void dw_mci_idmac_stop_dma(struct dw_mci *host) 450f95f3850SWill Newton { 451f95f3850SWill Newton u32 temp; 452f95f3850SWill Newton 453f95f3850SWill Newton /* Disable and reset the IDMAC interface */ 454f95f3850SWill Newton temp = mci_readl(host, CTRL); 455f95f3850SWill Newton temp &= ~SDMMC_CTRL_USE_IDMAC; 456f95f3850SWill Newton temp |= SDMMC_CTRL_DMA_RESET; 457f95f3850SWill Newton mci_writel(host, CTRL, temp); 458f95f3850SWill Newton 459f95f3850SWill Newton /* Stop the IDMAC running */ 460f95f3850SWill Newton temp = mci_readl(host, BMOD); 461a5289a43SJaehoon Chung temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); 4625ce9d961SSeungwon Jeon temp |= SDMMC_IDMAC_SWRESET; 463f95f3850SWill Newton mci_writel(host, BMOD, temp); 464f95f3850SWill Newton } 465f95f3850SWill Newton 4663fc7eaefSShawn Lin static void dw_mci_dmac_complete_dma(void *arg) 467f95f3850SWill Newton { 4683fc7eaefSShawn Lin struct dw_mci *host = arg; 469f95f3850SWill Newton struct mmc_data *data = host->data; 470f95f3850SWill Newton 4714a90920cSThomas Abraham dev_vdbg(host->dev, "DMA complete\n"); 472f95f3850SWill Newton 4733fc7eaefSShawn Lin if ((host->use_dma == TRANS_MODE_EDMAC) && 4743fc7eaefSShawn Lin data && (data->flags & MMC_DATA_READ)) 4753fc7eaefSShawn Lin /* Invalidate cache after read */ 4763fc7eaefSShawn Lin dma_sync_sg_for_cpu(mmc_dev(host->cur_slot->mmc), 4773fc7eaefSShawn Lin data->sg, 4783fc7eaefSShawn Lin data->sg_len, 4793fc7eaefSShawn Lin DMA_FROM_DEVICE); 4803fc7eaefSShawn Lin 481f95f3850SWill Newton host->dma_ops->cleanup(host); 482f95f3850SWill Newton 483f95f3850SWill Newton /* 484f95f3850SWill Newton * If the card was removed, data will be NULL. No point in trying to 485f95f3850SWill Newton * send the stop command or waiting for NBUSY in this case. 486f95f3850SWill Newton */ 487f95f3850SWill Newton if (data) { 488f95f3850SWill Newton set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 489f95f3850SWill Newton tasklet_schedule(&host->tasklet); 490f95f3850SWill Newton } 491f95f3850SWill Newton } 492f95f3850SWill Newton 493f95f3850SWill Newton static int dw_mci_idmac_init(struct dw_mci *host) 494f95f3850SWill Newton { 495897b69e7SSeungwon Jeon int i; 496f95f3850SWill Newton 49769d99fdcSPrabu Thangamuthu if (host->dma_64bit_address == 1) { 49869d99fdcSPrabu Thangamuthu struct idmac_desc_64addr *p; 49969d99fdcSPrabu Thangamuthu /* Number of descriptors in the ring buffer */ 500cc190d4cSShawn Lin host->ring_size = 501cc190d4cSShawn Lin DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr); 50269d99fdcSPrabu Thangamuthu 50369d99fdcSPrabu Thangamuthu /* Forward link the descriptor list */ 50469d99fdcSPrabu Thangamuthu for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; 50569d99fdcSPrabu Thangamuthu i++, p++) { 50669d99fdcSPrabu Thangamuthu p->des6 = (host->sg_dma + 50769d99fdcSPrabu Thangamuthu (sizeof(struct idmac_desc_64addr) * 50869d99fdcSPrabu Thangamuthu (i + 1))) & 0xffffffff; 50969d99fdcSPrabu Thangamuthu 51069d99fdcSPrabu Thangamuthu p->des7 = (u64)(host->sg_dma + 51169d99fdcSPrabu Thangamuthu (sizeof(struct idmac_desc_64addr) * 51269d99fdcSPrabu Thangamuthu (i + 1))) >> 32; 51369d99fdcSPrabu Thangamuthu /* Initialize reserved and buffer size fields to "0" */ 51469d99fdcSPrabu Thangamuthu p->des1 = 0; 51569d99fdcSPrabu Thangamuthu p->des2 = 0; 51669d99fdcSPrabu Thangamuthu p->des3 = 0; 51769d99fdcSPrabu Thangamuthu } 51869d99fdcSPrabu Thangamuthu 51969d99fdcSPrabu Thangamuthu /* Set the last descriptor as the end-of-ring descriptor */ 52069d99fdcSPrabu Thangamuthu p->des6 = host->sg_dma & 0xffffffff; 52169d99fdcSPrabu Thangamuthu p->des7 = (u64)host->sg_dma >> 32; 52269d99fdcSPrabu Thangamuthu p->des0 = IDMAC_DES0_ER; 52369d99fdcSPrabu Thangamuthu 52469d99fdcSPrabu Thangamuthu } else { 52569d99fdcSPrabu Thangamuthu struct idmac_desc *p; 526f95f3850SWill Newton /* Number of descriptors in the ring buffer */ 527cc190d4cSShawn Lin host->ring_size = 528cc190d4cSShawn Lin DESC_RING_BUF_SZ / sizeof(struct idmac_desc); 529f95f3850SWill Newton 530f95f3850SWill Newton /* Forward link the descriptor list */ 5310e3a22c0SShawn Lin for (i = 0, p = host->sg_cpu; 5320e3a22c0SShawn Lin i < host->ring_size - 1; 5330e3a22c0SShawn Lin i++, p++) { 5346687c42fSBen Dooks p->des3 = cpu_to_le32(host->sg_dma + 5356687c42fSBen Dooks (sizeof(struct idmac_desc) * (i + 1))); 5364b244724SZhangfei Gao p->des1 = 0; 5374b244724SZhangfei Gao } 538f95f3850SWill Newton 539f95f3850SWill Newton /* Set the last descriptor as the end-of-ring descriptor */ 5406687c42fSBen Dooks p->des3 = cpu_to_le32(host->sg_dma); 5416687c42fSBen Dooks p->des0 = cpu_to_le32(IDMAC_DES0_ER); 54269d99fdcSPrabu Thangamuthu } 543f95f3850SWill Newton 5445ce9d961SSeungwon Jeon dw_mci_idmac_reset(host); 545141a712aSSeungwon Jeon 54669d99fdcSPrabu Thangamuthu if (host->dma_64bit_address == 1) { 54769d99fdcSPrabu Thangamuthu /* Mask out interrupts - get Tx & Rx complete only */ 54869d99fdcSPrabu Thangamuthu mci_writel(host, IDSTS64, IDMAC_INT_CLR); 54969d99fdcSPrabu Thangamuthu mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI | 55069d99fdcSPrabu Thangamuthu SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 55169d99fdcSPrabu Thangamuthu 55269d99fdcSPrabu Thangamuthu /* Set the descriptor base address */ 55369d99fdcSPrabu Thangamuthu mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff); 55469d99fdcSPrabu Thangamuthu mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32); 55569d99fdcSPrabu Thangamuthu 55669d99fdcSPrabu Thangamuthu } else { 557f95f3850SWill Newton /* Mask out interrupts - get Tx & Rx complete only */ 558fc79a4d6SJoonyoung Shim mci_writel(host, IDSTS, IDMAC_INT_CLR); 55969d99fdcSPrabu Thangamuthu mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | 56069d99fdcSPrabu Thangamuthu SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); 561f95f3850SWill Newton 562f95f3850SWill Newton /* Set the descriptor base address */ 563f95f3850SWill Newton mci_writel(host, DBADDR, host->sg_dma); 56469d99fdcSPrabu Thangamuthu } 56569d99fdcSPrabu Thangamuthu 566f95f3850SWill Newton return 0; 567f95f3850SWill Newton } 568f95f3850SWill Newton 5693b2a067bSShawn Lin static inline int dw_mci_prepare_desc64(struct dw_mci *host, 5703b2a067bSShawn Lin struct mmc_data *data, 5713b2a067bSShawn Lin unsigned int sg_len) 5723b2a067bSShawn Lin { 5733b2a067bSShawn Lin unsigned int desc_len; 5743b2a067bSShawn Lin struct idmac_desc_64addr *desc_first, *desc_last, *desc; 575b6d2d81cSShawn Lin u32 val; 5763b2a067bSShawn Lin int i; 5773b2a067bSShawn Lin 5783b2a067bSShawn Lin desc_first = desc_last = desc = host->sg_cpu; 5793b2a067bSShawn Lin 5803b2a067bSShawn Lin for (i = 0; i < sg_len; i++) { 5813b2a067bSShawn Lin unsigned int length = sg_dma_len(&data->sg[i]); 5823b2a067bSShawn Lin 5833b2a067bSShawn Lin u64 mem_addr = sg_dma_address(&data->sg[i]); 5843b2a067bSShawn Lin 5853b2a067bSShawn Lin for ( ; length ; desc++) { 5863b2a067bSShawn Lin desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 5873b2a067bSShawn Lin length : DW_MCI_DESC_DATA_LENGTH; 5883b2a067bSShawn Lin 5893b2a067bSShawn Lin length -= desc_len; 5903b2a067bSShawn Lin 5913b2a067bSShawn Lin /* 5923b2a067bSShawn Lin * Wait for the former clear OWN bit operation 5933b2a067bSShawn Lin * of IDMAC to make sure that this descriptor 5943b2a067bSShawn Lin * isn't still owned by IDMAC as IDMAC's write 5953b2a067bSShawn Lin * ops and CPU's read ops are asynchronous. 5963b2a067bSShawn Lin */ 597b6d2d81cSShawn Lin if (readl_poll_timeout_atomic(&desc->des0, val, 598b6d2d81cSShawn Lin !(val & IDMAC_DES0_OWN), 599b6d2d81cSShawn Lin 10, 100 * USEC_PER_MSEC)) 6003b2a067bSShawn Lin goto err_own_bit; 6013b2a067bSShawn Lin 6023b2a067bSShawn Lin /* 6033b2a067bSShawn Lin * Set the OWN bit and disable interrupts 6043b2a067bSShawn Lin * for this descriptor 6053b2a067bSShawn Lin */ 6063b2a067bSShawn Lin desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | 6073b2a067bSShawn Lin IDMAC_DES0_CH; 6083b2a067bSShawn Lin 6093b2a067bSShawn Lin /* Buffer length */ 6103b2a067bSShawn Lin IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len); 6113b2a067bSShawn Lin 6123b2a067bSShawn Lin /* Physical address to DMA to/from */ 6133b2a067bSShawn Lin desc->des4 = mem_addr & 0xffffffff; 6143b2a067bSShawn Lin desc->des5 = mem_addr >> 32; 6153b2a067bSShawn Lin 6163b2a067bSShawn Lin /* Update physical address for the next desc */ 6173b2a067bSShawn Lin mem_addr += desc_len; 6183b2a067bSShawn Lin 6193b2a067bSShawn Lin /* Save pointer to the last descriptor */ 6203b2a067bSShawn Lin desc_last = desc; 6213b2a067bSShawn Lin } 6223b2a067bSShawn Lin } 6233b2a067bSShawn Lin 6243b2a067bSShawn Lin /* Set first descriptor */ 6253b2a067bSShawn Lin desc_first->des0 |= IDMAC_DES0_FD; 6263b2a067bSShawn Lin 6273b2a067bSShawn Lin /* Set last descriptor */ 6283b2a067bSShawn Lin desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); 6293b2a067bSShawn Lin desc_last->des0 |= IDMAC_DES0_LD; 6303b2a067bSShawn Lin 6313b2a067bSShawn Lin return 0; 6323b2a067bSShawn Lin err_own_bit: 6333b2a067bSShawn Lin /* restore the descriptor chain as it's polluted */ 63426be9d70SColin Ian King dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n"); 635cc190d4cSShawn Lin memset(host->sg_cpu, 0, DESC_RING_BUF_SZ); 6363b2a067bSShawn Lin dw_mci_idmac_init(host); 6373b2a067bSShawn Lin return -EINVAL; 6383b2a067bSShawn Lin } 6393b2a067bSShawn Lin 6403b2a067bSShawn Lin 6413b2a067bSShawn Lin static inline int dw_mci_prepare_desc32(struct dw_mci *host, 6423b2a067bSShawn Lin struct mmc_data *data, 6433b2a067bSShawn Lin unsigned int sg_len) 6443b2a067bSShawn Lin { 6453b2a067bSShawn Lin unsigned int desc_len; 6463b2a067bSShawn Lin struct idmac_desc *desc_first, *desc_last, *desc; 647b6d2d81cSShawn Lin u32 val; 6483b2a067bSShawn Lin int i; 6493b2a067bSShawn Lin 6503b2a067bSShawn Lin desc_first = desc_last = desc = host->sg_cpu; 6513b2a067bSShawn Lin 6523b2a067bSShawn Lin for (i = 0; i < sg_len; i++) { 6533b2a067bSShawn Lin unsigned int length = sg_dma_len(&data->sg[i]); 6543b2a067bSShawn Lin 6553b2a067bSShawn Lin u32 mem_addr = sg_dma_address(&data->sg[i]); 6563b2a067bSShawn Lin 6573b2a067bSShawn Lin for ( ; length ; desc++) { 6583b2a067bSShawn Lin desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? 6593b2a067bSShawn Lin length : DW_MCI_DESC_DATA_LENGTH; 6603b2a067bSShawn Lin 6613b2a067bSShawn Lin length -= desc_len; 6623b2a067bSShawn Lin 6633b2a067bSShawn Lin /* 6643b2a067bSShawn Lin * Wait for the former clear OWN bit operation 6653b2a067bSShawn Lin * of IDMAC to make sure that this descriptor 6663b2a067bSShawn Lin * isn't still owned by IDMAC as IDMAC's write 6673b2a067bSShawn Lin * ops and CPU's read ops are asynchronous. 6683b2a067bSShawn Lin */ 669b6d2d81cSShawn Lin if (readl_poll_timeout_atomic(&desc->des0, val, 670b6d2d81cSShawn Lin IDMAC_OWN_CLR64(val), 671b6d2d81cSShawn Lin 10, 672b6d2d81cSShawn Lin 100 * USEC_PER_MSEC)) 6733b2a067bSShawn Lin goto err_own_bit; 6743b2a067bSShawn Lin 6753b2a067bSShawn Lin /* 6763b2a067bSShawn Lin * Set the OWN bit and disable interrupts 6773b2a067bSShawn Lin * for this descriptor 6783b2a067bSShawn Lin */ 6793b2a067bSShawn Lin desc->des0 = cpu_to_le32(IDMAC_DES0_OWN | 6803b2a067bSShawn Lin IDMAC_DES0_DIC | 6813b2a067bSShawn Lin IDMAC_DES0_CH); 6823b2a067bSShawn Lin 6833b2a067bSShawn Lin /* Buffer length */ 6843b2a067bSShawn Lin IDMAC_SET_BUFFER1_SIZE(desc, desc_len); 6853b2a067bSShawn Lin 6863b2a067bSShawn Lin /* Physical address to DMA to/from */ 6873b2a067bSShawn Lin desc->des2 = cpu_to_le32(mem_addr); 6883b2a067bSShawn Lin 6893b2a067bSShawn Lin /* Update physical address for the next desc */ 6903b2a067bSShawn Lin mem_addr += desc_len; 6913b2a067bSShawn Lin 6923b2a067bSShawn Lin /* Save pointer to the last descriptor */ 6933b2a067bSShawn Lin desc_last = desc; 6943b2a067bSShawn Lin } 6953b2a067bSShawn Lin } 6963b2a067bSShawn Lin 6973b2a067bSShawn Lin /* Set first descriptor */ 6983b2a067bSShawn Lin desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD); 6993b2a067bSShawn Lin 7003b2a067bSShawn Lin /* Set last descriptor */ 7013b2a067bSShawn Lin desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | 7023b2a067bSShawn Lin IDMAC_DES0_DIC)); 7033b2a067bSShawn Lin desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD); 7043b2a067bSShawn Lin 7053b2a067bSShawn Lin return 0; 7063b2a067bSShawn Lin err_own_bit: 7073b2a067bSShawn Lin /* restore the descriptor chain as it's polluted */ 70826be9d70SColin Ian King dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n"); 709cc190d4cSShawn Lin memset(host->sg_cpu, 0, DESC_RING_BUF_SZ); 7103b2a067bSShawn Lin dw_mci_idmac_init(host); 7113b2a067bSShawn Lin return -EINVAL; 7123b2a067bSShawn Lin } 7133b2a067bSShawn Lin 7143b2a067bSShawn Lin static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) 7153b2a067bSShawn Lin { 7163b2a067bSShawn Lin u32 temp; 7173b2a067bSShawn Lin int ret; 7183b2a067bSShawn Lin 7193b2a067bSShawn Lin if (host->dma_64bit_address == 1) 7203b2a067bSShawn Lin ret = dw_mci_prepare_desc64(host, host->data, sg_len); 7213b2a067bSShawn Lin else 7223b2a067bSShawn Lin ret = dw_mci_prepare_desc32(host, host->data, sg_len); 7233b2a067bSShawn Lin 7243b2a067bSShawn Lin if (ret) 7253b2a067bSShawn Lin goto out; 7263b2a067bSShawn Lin 7273b2a067bSShawn Lin /* drain writebuffer */ 7283b2a067bSShawn Lin wmb(); 7293b2a067bSShawn Lin 7303b2a067bSShawn Lin /* Make sure to reset DMA in case we did PIO before this */ 7313b2a067bSShawn Lin dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET); 7323b2a067bSShawn Lin dw_mci_idmac_reset(host); 7333b2a067bSShawn Lin 7343b2a067bSShawn Lin /* Select IDMAC interface */ 7353b2a067bSShawn Lin temp = mci_readl(host, CTRL); 7363b2a067bSShawn Lin temp |= SDMMC_CTRL_USE_IDMAC; 7373b2a067bSShawn Lin mci_writel(host, CTRL, temp); 7383b2a067bSShawn Lin 7393b2a067bSShawn Lin /* drain writebuffer */ 7403b2a067bSShawn Lin wmb(); 7413b2a067bSShawn Lin 7423b2a067bSShawn Lin /* Enable the IDMAC */ 7433b2a067bSShawn Lin temp = mci_readl(host, BMOD); 7443b2a067bSShawn Lin temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; 7453b2a067bSShawn Lin mci_writel(host, BMOD, temp); 7463b2a067bSShawn Lin 7473b2a067bSShawn Lin /* Start it running */ 7483b2a067bSShawn Lin mci_writel(host, PLDMND, 1); 7493b2a067bSShawn Lin 7503b2a067bSShawn Lin out: 7513b2a067bSShawn Lin return ret; 7523b2a067bSShawn Lin } 7533b2a067bSShawn Lin 7548e2b36eaSArnd Bergmann static const struct dw_mci_dma_ops dw_mci_idmac_ops = { 755885c3e80SSeungwon Jeon .init = dw_mci_idmac_init, 756885c3e80SSeungwon Jeon .start = dw_mci_idmac_start_dma, 757885c3e80SSeungwon Jeon .stop = dw_mci_idmac_stop_dma, 7583fc7eaefSShawn Lin .complete = dw_mci_dmac_complete_dma, 759885c3e80SSeungwon Jeon .cleanup = dw_mci_dma_cleanup, 760885c3e80SSeungwon Jeon }; 7613fc7eaefSShawn Lin 7623fc7eaefSShawn Lin static void dw_mci_edmac_stop_dma(struct dw_mci *host) 7633fc7eaefSShawn Lin { 764ab925a31SShawn Lin dmaengine_terminate_async(host->dms->ch); 7653fc7eaefSShawn Lin } 7663fc7eaefSShawn Lin 7673fc7eaefSShawn Lin static int dw_mci_edmac_start_dma(struct dw_mci *host, 7683fc7eaefSShawn Lin unsigned int sg_len) 7693fc7eaefSShawn Lin { 7703fc7eaefSShawn Lin struct dma_slave_config cfg; 7713fc7eaefSShawn Lin struct dma_async_tx_descriptor *desc = NULL; 7723fc7eaefSShawn Lin struct scatterlist *sgl = host->data->sg; 7733fc7eaefSShawn Lin const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 7743fc7eaefSShawn Lin u32 sg_elems = host->data->sg_len; 7753fc7eaefSShawn Lin u32 fifoth_val; 7763fc7eaefSShawn Lin u32 fifo_offset = host->fifo_reg - host->regs; 7773fc7eaefSShawn Lin int ret = 0; 7783fc7eaefSShawn Lin 7793fc7eaefSShawn Lin /* Set external dma config: burst size, burst width */ 780260b3164SArnd Bergmann cfg.dst_addr = host->phy_regs + fifo_offset; 7813fc7eaefSShawn Lin cfg.src_addr = cfg.dst_addr; 7823fc7eaefSShawn Lin cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 7833fc7eaefSShawn Lin cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 7843fc7eaefSShawn Lin 7853fc7eaefSShawn Lin /* Match burst msize with external dma config */ 7863fc7eaefSShawn Lin fifoth_val = mci_readl(host, FIFOTH); 7873fc7eaefSShawn Lin cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7]; 7883fc7eaefSShawn Lin cfg.src_maxburst = cfg.dst_maxburst; 7893fc7eaefSShawn Lin 7903fc7eaefSShawn Lin if (host->data->flags & MMC_DATA_WRITE) 7913fc7eaefSShawn Lin cfg.direction = DMA_MEM_TO_DEV; 7923fc7eaefSShawn Lin else 7933fc7eaefSShawn Lin cfg.direction = DMA_DEV_TO_MEM; 7943fc7eaefSShawn Lin 7953fc7eaefSShawn Lin ret = dmaengine_slave_config(host->dms->ch, &cfg); 7963fc7eaefSShawn Lin if (ret) { 7973fc7eaefSShawn Lin dev_err(host->dev, "Failed to config edmac.\n"); 7983fc7eaefSShawn Lin return -EBUSY; 7993fc7eaefSShawn Lin } 8003fc7eaefSShawn Lin 8013fc7eaefSShawn Lin desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, 8023fc7eaefSShawn Lin sg_len, cfg.direction, 8033fc7eaefSShawn Lin DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 8043fc7eaefSShawn Lin if (!desc) { 8053fc7eaefSShawn Lin dev_err(host->dev, "Can't prepare slave sg.\n"); 8063fc7eaefSShawn Lin return -EBUSY; 8073fc7eaefSShawn Lin } 8083fc7eaefSShawn Lin 8093fc7eaefSShawn Lin /* Set dw_mci_dmac_complete_dma as callback */ 8103fc7eaefSShawn Lin desc->callback = dw_mci_dmac_complete_dma; 8113fc7eaefSShawn Lin desc->callback_param = (void *)host; 8123fc7eaefSShawn Lin dmaengine_submit(desc); 8133fc7eaefSShawn Lin 8143fc7eaefSShawn Lin /* Flush cache before write */ 8153fc7eaefSShawn Lin if (host->data->flags & MMC_DATA_WRITE) 8163fc7eaefSShawn Lin dma_sync_sg_for_device(mmc_dev(host->cur_slot->mmc), sgl, 8173fc7eaefSShawn Lin sg_elems, DMA_TO_DEVICE); 8183fc7eaefSShawn Lin 8193fc7eaefSShawn Lin dma_async_issue_pending(host->dms->ch); 8203fc7eaefSShawn Lin 8213fc7eaefSShawn Lin return 0; 8223fc7eaefSShawn Lin } 8233fc7eaefSShawn Lin 8243fc7eaefSShawn Lin static int dw_mci_edmac_init(struct dw_mci *host) 8253fc7eaefSShawn Lin { 8263fc7eaefSShawn Lin /* Request external dma channel */ 8273fc7eaefSShawn Lin host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL); 8283fc7eaefSShawn Lin if (!host->dms) 8293fc7eaefSShawn Lin return -ENOMEM; 8303fc7eaefSShawn Lin 8313fc7eaefSShawn Lin host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx"); 8323fc7eaefSShawn Lin if (!host->dms->ch) { 8334539d36eSDan Carpenter dev_err(host->dev, "Failed to get external DMA channel.\n"); 8343fc7eaefSShawn Lin kfree(host->dms); 8353fc7eaefSShawn Lin host->dms = NULL; 8363fc7eaefSShawn Lin return -ENXIO; 8373fc7eaefSShawn Lin } 8383fc7eaefSShawn Lin 8393fc7eaefSShawn Lin return 0; 8403fc7eaefSShawn Lin } 8413fc7eaefSShawn Lin 8423fc7eaefSShawn Lin static void dw_mci_edmac_exit(struct dw_mci *host) 8433fc7eaefSShawn Lin { 8443fc7eaefSShawn Lin if (host->dms) { 8453fc7eaefSShawn Lin if (host->dms->ch) { 8463fc7eaefSShawn Lin dma_release_channel(host->dms->ch); 8473fc7eaefSShawn Lin host->dms->ch = NULL; 8483fc7eaefSShawn Lin } 8493fc7eaefSShawn Lin kfree(host->dms); 8503fc7eaefSShawn Lin host->dms = NULL; 8513fc7eaefSShawn Lin } 8523fc7eaefSShawn Lin } 8533fc7eaefSShawn Lin 8543fc7eaefSShawn Lin static const struct dw_mci_dma_ops dw_mci_edmac_ops = { 8553fc7eaefSShawn Lin .init = dw_mci_edmac_init, 8563fc7eaefSShawn Lin .exit = dw_mci_edmac_exit, 8573fc7eaefSShawn Lin .start = dw_mci_edmac_start_dma, 8583fc7eaefSShawn Lin .stop = dw_mci_edmac_stop_dma, 8593fc7eaefSShawn Lin .complete = dw_mci_dmac_complete_dma, 8603fc7eaefSShawn Lin .cleanup = dw_mci_dma_cleanup, 8613fc7eaefSShawn Lin }; 862885c3e80SSeungwon Jeon 8639aa51408SSeungwon Jeon static int dw_mci_pre_dma_transfer(struct dw_mci *host, 8649aa51408SSeungwon Jeon struct mmc_data *data, 865a4cc7eb4SJaehoon Chung int cookie) 866f95f3850SWill Newton { 867f95f3850SWill Newton struct scatterlist *sg; 8689aa51408SSeungwon Jeon unsigned int i, sg_len; 869f95f3850SWill Newton 870a4cc7eb4SJaehoon Chung if (data->host_cookie == COOKIE_PRE_MAPPED) 871a4cc7eb4SJaehoon Chung return data->sg_len; 872f95f3850SWill Newton 873f95f3850SWill Newton /* 874f95f3850SWill Newton * We don't do DMA on "complex" transfers, i.e. with 875f95f3850SWill Newton * non-word-aligned buffers or lengths. Also, we don't bother 876f95f3850SWill Newton * with all the DMA setup overhead for short transfers. 877f95f3850SWill Newton */ 878f95f3850SWill Newton if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) 879f95f3850SWill Newton return -EINVAL; 8809aa51408SSeungwon Jeon 881f95f3850SWill Newton if (data->blksz & 3) 882f95f3850SWill Newton return -EINVAL; 883f95f3850SWill Newton 884f95f3850SWill Newton for_each_sg(data->sg, sg, data->sg_len, i) { 885f95f3850SWill Newton if (sg->offset & 3 || sg->length & 3) 886f95f3850SWill Newton return -EINVAL; 887f95f3850SWill Newton } 888f95f3850SWill Newton 8894a90920cSThomas Abraham sg_len = dma_map_sg(host->dev, 8909aa51408SSeungwon Jeon data->sg, 8919aa51408SSeungwon Jeon data->sg_len, 8929aa51408SSeungwon Jeon dw_mci_get_dma_dir(data)); 8939aa51408SSeungwon Jeon if (sg_len == 0) 8949aa51408SSeungwon Jeon return -EINVAL; 8959aa51408SSeungwon Jeon 896a4cc7eb4SJaehoon Chung data->host_cookie = cookie; 8979aa51408SSeungwon Jeon 8989aa51408SSeungwon Jeon return sg_len; 8999aa51408SSeungwon Jeon } 9009aa51408SSeungwon Jeon 9019aa51408SSeungwon Jeon static void dw_mci_pre_req(struct mmc_host *mmc, 902d3c6aac3SLinus Walleij struct mmc_request *mrq) 9039aa51408SSeungwon Jeon { 9049aa51408SSeungwon Jeon struct dw_mci_slot *slot = mmc_priv(mmc); 9059aa51408SSeungwon Jeon struct mmc_data *data = mrq->data; 9069aa51408SSeungwon Jeon 9079aa51408SSeungwon Jeon if (!slot->host->use_dma || !data) 9089aa51408SSeungwon Jeon return; 9099aa51408SSeungwon Jeon 910a4cc7eb4SJaehoon Chung /* This data might be unmapped at this time */ 911a4cc7eb4SJaehoon Chung data->host_cookie = COOKIE_UNMAPPED; 9129aa51408SSeungwon Jeon 913a4cc7eb4SJaehoon Chung if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 914a4cc7eb4SJaehoon Chung COOKIE_PRE_MAPPED) < 0) 915a4cc7eb4SJaehoon Chung data->host_cookie = COOKIE_UNMAPPED; 9169aa51408SSeungwon Jeon } 9179aa51408SSeungwon Jeon 9189aa51408SSeungwon Jeon static void dw_mci_post_req(struct mmc_host *mmc, 9199aa51408SSeungwon Jeon struct mmc_request *mrq, 9209aa51408SSeungwon Jeon int err) 9219aa51408SSeungwon Jeon { 9229aa51408SSeungwon Jeon struct dw_mci_slot *slot = mmc_priv(mmc); 9239aa51408SSeungwon Jeon struct mmc_data *data = mrq->data; 9249aa51408SSeungwon Jeon 9259aa51408SSeungwon Jeon if (!slot->host->use_dma || !data) 9269aa51408SSeungwon Jeon return; 9279aa51408SSeungwon Jeon 928a4cc7eb4SJaehoon Chung if (data->host_cookie != COOKIE_UNMAPPED) 9294a90920cSThomas Abraham dma_unmap_sg(slot->host->dev, 9309aa51408SSeungwon Jeon data->sg, 9319aa51408SSeungwon Jeon data->sg_len, 9329aa51408SSeungwon Jeon dw_mci_get_dma_dir(data)); 933a4cc7eb4SJaehoon Chung data->host_cookie = COOKIE_UNMAPPED; 9349aa51408SSeungwon Jeon } 9359aa51408SSeungwon Jeon 936*671fa142SShawn Lin static int dw_mci_get_cd(struct mmc_host *mmc) 937*671fa142SShawn Lin { 938*671fa142SShawn Lin int present; 939*671fa142SShawn Lin struct dw_mci_slot *slot = mmc_priv(mmc); 940*671fa142SShawn Lin struct dw_mci *host = slot->host; 941*671fa142SShawn Lin int gpio_cd = mmc_gpio_get_cd(mmc); 942*671fa142SShawn Lin 943*671fa142SShawn Lin /* Use platform get_cd function, else try onboard card detect */ 944*671fa142SShawn Lin if (((mmc->caps & MMC_CAP_NEEDS_POLL) 945*671fa142SShawn Lin || !mmc_card_is_removable(mmc))) { 946*671fa142SShawn Lin present = 1; 947*671fa142SShawn Lin 948*671fa142SShawn Lin if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { 949*671fa142SShawn Lin if (mmc->caps & MMC_CAP_NEEDS_POLL) { 950*671fa142SShawn Lin dev_info(&mmc->class_dev, 951*671fa142SShawn Lin "card is polling.\n"); 952*671fa142SShawn Lin } else { 953*671fa142SShawn Lin dev_info(&mmc->class_dev, 954*671fa142SShawn Lin "card is non-removable.\n"); 955*671fa142SShawn Lin } 956*671fa142SShawn Lin set_bit(DW_MMC_CARD_PRESENT, &slot->flags); 957*671fa142SShawn Lin } 958*671fa142SShawn Lin 959*671fa142SShawn Lin return present; 960*671fa142SShawn Lin } else if (gpio_cd >= 0) 961*671fa142SShawn Lin present = gpio_cd; 962*671fa142SShawn Lin else 963*671fa142SShawn Lin present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) 964*671fa142SShawn Lin == 0 ? 1 : 0; 965*671fa142SShawn Lin 966*671fa142SShawn Lin spin_lock_bh(&host->lock); 967*671fa142SShawn Lin if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags)) 968*671fa142SShawn Lin dev_dbg(&mmc->class_dev, "card is present\n"); 969*671fa142SShawn Lin else if (!present && 970*671fa142SShawn Lin !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags)) 971*671fa142SShawn Lin dev_dbg(&mmc->class_dev, "card is not present\n"); 972*671fa142SShawn Lin spin_unlock_bh(&host->lock); 973*671fa142SShawn Lin 974*671fa142SShawn Lin return present; 975*671fa142SShawn Lin } 976*671fa142SShawn Lin 97752426899SSeungwon Jeon static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) 97852426899SSeungwon Jeon { 97952426899SSeungwon Jeon unsigned int blksz = data->blksz; 98052426899SSeungwon Jeon const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; 98152426899SSeungwon Jeon u32 fifo_width = 1 << host->data_shift; 98252426899SSeungwon Jeon u32 blksz_depth = blksz / fifo_width, fifoth_val; 98352426899SSeungwon Jeon u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; 9840e3a22c0SShawn Lin int idx = ARRAY_SIZE(mszs) - 1; 98552426899SSeungwon Jeon 9863fc7eaefSShawn Lin /* pio should ship this scenario */ 9873fc7eaefSShawn Lin if (!host->use_dma) 9883fc7eaefSShawn Lin return; 9893fc7eaefSShawn Lin 99052426899SSeungwon Jeon tx_wmark = (host->fifo_depth) / 2; 99152426899SSeungwon Jeon tx_wmark_invers = host->fifo_depth - tx_wmark; 99252426899SSeungwon Jeon 99352426899SSeungwon Jeon /* 99452426899SSeungwon Jeon * MSIZE is '1', 99552426899SSeungwon Jeon * if blksz is not a multiple of the FIFO width 99652426899SSeungwon Jeon */ 99720753569SShawn Lin if (blksz % fifo_width) 99852426899SSeungwon Jeon goto done; 99952426899SSeungwon Jeon 100052426899SSeungwon Jeon do { 100152426899SSeungwon Jeon if (!((blksz_depth % mszs[idx]) || 100252426899SSeungwon Jeon (tx_wmark_invers % mszs[idx]))) { 100352426899SSeungwon Jeon msize = idx; 100452426899SSeungwon Jeon rx_wmark = mszs[idx] - 1; 100552426899SSeungwon Jeon break; 100652426899SSeungwon Jeon } 100752426899SSeungwon Jeon } while (--idx > 0); 100852426899SSeungwon Jeon /* 100952426899SSeungwon Jeon * If idx is '0', it won't be tried 101052426899SSeungwon Jeon * Thus, initial values are uesed 101152426899SSeungwon Jeon */ 101252426899SSeungwon Jeon done: 101352426899SSeungwon Jeon fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); 101452426899SSeungwon Jeon mci_writel(host, FIFOTH, fifoth_val); 101552426899SSeungwon Jeon } 101652426899SSeungwon Jeon 10177e4bf1bcSJaehoon Chung static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data) 1018f1d2736cSSeungwon Jeon { 1019f1d2736cSSeungwon Jeon unsigned int blksz = data->blksz; 1020f1d2736cSSeungwon Jeon u32 blksz_depth, fifo_depth; 1021f1d2736cSSeungwon Jeon u16 thld_size; 10227e4bf1bcSJaehoon Chung u8 enable; 1023f1d2736cSSeungwon Jeon 102466dfd101SJames Hogan /* 102566dfd101SJames Hogan * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is 102666dfd101SJames Hogan * in the FIFO region, so we really shouldn't access it). 102766dfd101SJames Hogan */ 10287e4bf1bcSJaehoon Chung if (host->verid < DW_MMC_240A || 10297e4bf1bcSJaehoon Chung (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE)) 103066dfd101SJames Hogan return; 103166dfd101SJames Hogan 10327e4bf1bcSJaehoon Chung /* 10337e4bf1bcSJaehoon Chung * Card write Threshold is introduced since 2.80a 10347e4bf1bcSJaehoon Chung * It's used when HS400 mode is enabled. 10357e4bf1bcSJaehoon Chung */ 10367e4bf1bcSJaehoon Chung if (data->flags & MMC_DATA_WRITE && 10377e4bf1bcSJaehoon Chung !(host->timing != MMC_TIMING_MMC_HS400)) 10387e4bf1bcSJaehoon Chung return; 10397e4bf1bcSJaehoon Chung 10407e4bf1bcSJaehoon Chung if (data->flags & MMC_DATA_WRITE) 10417e4bf1bcSJaehoon Chung enable = SDMMC_CARD_WR_THR_EN; 10427e4bf1bcSJaehoon Chung else 10437e4bf1bcSJaehoon Chung enable = SDMMC_CARD_RD_THR_EN; 10447e4bf1bcSJaehoon Chung 1045f1d2736cSSeungwon Jeon if (host->timing != MMC_TIMING_MMC_HS200 && 1046f1d2736cSSeungwon Jeon host->timing != MMC_TIMING_UHS_SDR104) 1047f1d2736cSSeungwon Jeon goto disable; 1048f1d2736cSSeungwon Jeon 1049f1d2736cSSeungwon Jeon blksz_depth = blksz / (1 << host->data_shift); 1050f1d2736cSSeungwon Jeon fifo_depth = host->fifo_depth; 1051f1d2736cSSeungwon Jeon 1052f1d2736cSSeungwon Jeon if (blksz_depth > fifo_depth) 1053f1d2736cSSeungwon Jeon goto disable; 1054f1d2736cSSeungwon Jeon 1055f1d2736cSSeungwon Jeon /* 1056f1d2736cSSeungwon Jeon * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz' 1057f1d2736cSSeungwon Jeon * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz 1058f1d2736cSSeungwon Jeon * Currently just choose blksz. 1059f1d2736cSSeungwon Jeon */ 1060f1d2736cSSeungwon Jeon thld_size = blksz; 10617e4bf1bcSJaehoon Chung mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable)); 1062f1d2736cSSeungwon Jeon return; 1063f1d2736cSSeungwon Jeon 1064f1d2736cSSeungwon Jeon disable: 10657e4bf1bcSJaehoon Chung mci_writel(host, CDTHRCTL, 0); 1066f1d2736cSSeungwon Jeon } 1067f1d2736cSSeungwon Jeon 10689aa51408SSeungwon Jeon static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) 10699aa51408SSeungwon Jeon { 1070f8c58c11SDoug Anderson unsigned long irqflags; 10719aa51408SSeungwon Jeon int sg_len; 10729aa51408SSeungwon Jeon u32 temp; 10739aa51408SSeungwon Jeon 10749aa51408SSeungwon Jeon host->using_dma = 0; 10759aa51408SSeungwon Jeon 10769aa51408SSeungwon Jeon /* If we don't have a channel, we can't do DMA */ 10779aa51408SSeungwon Jeon if (!host->use_dma) 10789aa51408SSeungwon Jeon return -ENODEV; 10799aa51408SSeungwon Jeon 1080a4cc7eb4SJaehoon Chung sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1081a99aa9b9SSeungwon Jeon if (sg_len < 0) { 1082a99aa9b9SSeungwon Jeon host->dma_ops->stop(host); 10839aa51408SSeungwon Jeon return sg_len; 1084a99aa9b9SSeungwon Jeon } 10859aa51408SSeungwon Jeon 108603e8cb53SJames Hogan host->using_dma = 1; 108703e8cb53SJames Hogan 10883fc7eaefSShawn Lin if (host->use_dma == TRANS_MODE_IDMAC) 10894a90920cSThomas Abraham dev_vdbg(host->dev, 1090f95f3850SWill Newton "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", 10913fc7eaefSShawn Lin (unsigned long)host->sg_cpu, 10923fc7eaefSShawn Lin (unsigned long)host->sg_dma, 1093f95f3850SWill Newton sg_len); 1094f95f3850SWill Newton 109552426899SSeungwon Jeon /* 109652426899SSeungwon Jeon * Decide the MSIZE and RX/TX Watermark. 109752426899SSeungwon Jeon * If current block size is same with previous size, 109852426899SSeungwon Jeon * no need to update fifoth. 109952426899SSeungwon Jeon */ 110052426899SSeungwon Jeon if (host->prev_blksz != data->blksz) 110152426899SSeungwon Jeon dw_mci_adjust_fifoth(host, data); 110252426899SSeungwon Jeon 1103f95f3850SWill Newton /* Enable the DMA interface */ 1104f95f3850SWill Newton temp = mci_readl(host, CTRL); 1105f95f3850SWill Newton temp |= SDMMC_CTRL_DMA_ENABLE; 1106f95f3850SWill Newton mci_writel(host, CTRL, temp); 1107f95f3850SWill Newton 1108f95f3850SWill Newton /* Disable RX/TX IRQs, let DMA handle it */ 1109f8c58c11SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 1110f95f3850SWill Newton temp = mci_readl(host, INTMASK); 1111f95f3850SWill Newton temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); 1112f95f3850SWill Newton mci_writel(host, INTMASK, temp); 1113f8c58c11SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 1114f95f3850SWill Newton 11153fc7eaefSShawn Lin if (host->dma_ops->start(host, sg_len)) { 1116647f80a1SJaehoon Chung host->dma_ops->stop(host); 1117d12d0cb1SShawn Lin /* We can't do DMA, try PIO for this one */ 1118d12d0cb1SShawn Lin dev_dbg(host->dev, 1119d12d0cb1SShawn Lin "%s: fall back to PIO mode for current transfer\n", 1120d12d0cb1SShawn Lin __func__); 11213fc7eaefSShawn Lin return -ENODEV; 11223fc7eaefSShawn Lin } 1123f95f3850SWill Newton 1124f95f3850SWill Newton return 0; 1125f95f3850SWill Newton } 1126f95f3850SWill Newton 1127f95f3850SWill Newton static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) 1128f95f3850SWill Newton { 1129f8c58c11SDoug Anderson unsigned long irqflags; 11300e3a22c0SShawn Lin int flags = SG_MITER_ATOMIC; 1131f95f3850SWill Newton u32 temp; 1132f95f3850SWill Newton 1133f95f3850SWill Newton data->error = -EINPROGRESS; 1134f95f3850SWill Newton 1135f95f3850SWill Newton WARN_ON(host->data); 1136f95f3850SWill Newton host->sg = NULL; 1137f95f3850SWill Newton host->data = data; 1138f95f3850SWill Newton 11397e4bf1bcSJaehoon Chung if (data->flags & MMC_DATA_READ) 114055c5efbcSJames Hogan host->dir_status = DW_MCI_RECV_STATUS; 11417e4bf1bcSJaehoon Chung else 114255c5efbcSJames Hogan host->dir_status = DW_MCI_SEND_STATUS; 11437e4bf1bcSJaehoon Chung 11447e4bf1bcSJaehoon Chung dw_mci_ctrl_thld(host, data); 114555c5efbcSJames Hogan 1146f95f3850SWill Newton if (dw_mci_submit_data_dma(host, data)) { 1147f9c2a0dcSSeungwon Jeon if (host->data->flags & MMC_DATA_READ) 1148f9c2a0dcSSeungwon Jeon flags |= SG_MITER_TO_SG; 1149f9c2a0dcSSeungwon Jeon else 1150f9c2a0dcSSeungwon Jeon flags |= SG_MITER_FROM_SG; 1151f9c2a0dcSSeungwon Jeon 1152f9c2a0dcSSeungwon Jeon sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1153f95f3850SWill Newton host->sg = data->sg; 115434b664a2SJames Hogan host->part_buf_start = 0; 115534b664a2SJames Hogan host->part_buf_count = 0; 1156f95f3850SWill Newton 1157b40af3aaSJames Hogan mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); 1158f8c58c11SDoug Anderson 1159f8c58c11SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 1160f95f3850SWill Newton temp = mci_readl(host, INTMASK); 1161f95f3850SWill Newton temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; 1162f95f3850SWill Newton mci_writel(host, INTMASK, temp); 1163f8c58c11SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 1164f95f3850SWill Newton 1165f95f3850SWill Newton temp = mci_readl(host, CTRL); 1166f95f3850SWill Newton temp &= ~SDMMC_CTRL_DMA_ENABLE; 1167f95f3850SWill Newton mci_writel(host, CTRL, temp); 116852426899SSeungwon Jeon 116952426899SSeungwon Jeon /* 1170d6fced83SJun Nie * Use the initial fifoth_val for PIO mode. If wm_algined 1171d6fced83SJun Nie * is set, we set watermark same as data size. 117252426899SSeungwon Jeon * If next issued data may be transfered by DMA mode, 117352426899SSeungwon Jeon * prev_blksz should be invalidated. 117452426899SSeungwon Jeon */ 1175d6fced83SJun Nie if (host->wm_aligned) 1176d6fced83SJun Nie dw_mci_adjust_fifoth(host, data); 1177d6fced83SJun Nie else 117852426899SSeungwon Jeon mci_writel(host, FIFOTH, host->fifoth_val); 117952426899SSeungwon Jeon host->prev_blksz = 0; 118052426899SSeungwon Jeon } else { 118152426899SSeungwon Jeon /* 118252426899SSeungwon Jeon * Keep the current block size. 118352426899SSeungwon Jeon * It will be used to decide whether to update 118452426899SSeungwon Jeon * fifoth register next time. 118552426899SSeungwon Jeon */ 118652426899SSeungwon Jeon host->prev_blksz = data->blksz; 1187f95f3850SWill Newton } 1188f95f3850SWill Newton } 1189f95f3850SWill Newton 1190f95f3850SWill Newton static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) 1191f95f3850SWill Newton { 1192f95f3850SWill Newton struct dw_mci *host = slot->host; 1193f95f3850SWill Newton unsigned int cmd_status = 0; 1194f95f3850SWill Newton 1195f95f3850SWill Newton mci_writel(host, CMDARG, arg); 11960e3a22c0SShawn Lin wmb(); /* drain writebuffer */ 11970bdbd0e8SDoug Anderson dw_mci_wait_while_busy(host, cmd); 1198f95f3850SWill Newton mci_writel(host, CMD, SDMMC_CMD_START | cmd); 1199f95f3850SWill Newton 1200b6d2d81cSShawn Lin if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status, 1201b6d2d81cSShawn Lin !(cmd_status & SDMMC_CMD_START), 1202b6d2d81cSShawn Lin 1, 500 * USEC_PER_MSEC)) 1203f95f3850SWill Newton dev_err(&slot->mmc->class_dev, 1204f95f3850SWill Newton "Timeout sending command (cmd %#x arg %#x status %#x)\n", 1205f95f3850SWill Newton cmd, arg, cmd_status); 1206f95f3850SWill Newton } 1207f95f3850SWill Newton 1208ab269128SAbhilash Kesavan static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) 1209f95f3850SWill Newton { 1210f95f3850SWill Newton struct dw_mci *host = slot->host; 1211fdf492a1SDoug Anderson unsigned int clock = slot->clock; 1212f95f3850SWill Newton u32 div; 12139623b5b9SDoug Anderson u32 clk_en_a; 121401730558SDoug Anderson u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT; 121501730558SDoug Anderson 121601730558SDoug Anderson /* We must continue to set bit 28 in CMD until the change is complete */ 121701730558SDoug Anderson if (host->state == STATE_WAITING_CMD11_DONE) 121801730558SDoug Anderson sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH; 1219f95f3850SWill Newton 1220fdf492a1SDoug Anderson if (!clock) { 1221fdf492a1SDoug Anderson mci_writel(host, CLKENA, 0); 122201730558SDoug Anderson mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1223fdf492a1SDoug Anderson } else if (clock != host->current_speed || force_clkinit) { 1224fdf492a1SDoug Anderson div = host->bus_hz / clock; 1225fdf492a1SDoug Anderson if (host->bus_hz % clock && host->bus_hz > clock) 1226f95f3850SWill Newton /* 1227f95f3850SWill Newton * move the + 1 after the divide to prevent 1228f95f3850SWill Newton * over-clocking the card. 1229f95f3850SWill Newton */ 1230e419990bSSeungwon Jeon div += 1; 1231e419990bSSeungwon Jeon 1232fdf492a1SDoug Anderson div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; 1233f95f3850SWill Newton 1234e6cd7a8eSJaehoon Chung if ((clock != slot->__clk_old && 1235e6cd7a8eSJaehoon Chung !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) || 1236e6cd7a8eSJaehoon Chung force_clkinit) { 1237ce69e2feSShawn Lin /* Silent the verbose log if calling from PM context */ 1238ce69e2feSShawn Lin if (!force_clkinit) 1239f95f3850SWill Newton dev_info(&slot->mmc->class_dev, 1240fdf492a1SDoug Anderson "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", 1241fdf492a1SDoug Anderson slot->id, host->bus_hz, clock, 1242fdf492a1SDoug Anderson div ? ((host->bus_hz / div) >> 1) : 1243fdf492a1SDoug Anderson host->bus_hz, div); 1244f95f3850SWill Newton 1245e6cd7a8eSJaehoon Chung /* 1246e6cd7a8eSJaehoon Chung * If card is polling, display the message only 1247e6cd7a8eSJaehoon Chung * one time at boot time. 1248e6cd7a8eSJaehoon Chung */ 1249e6cd7a8eSJaehoon Chung if (slot->mmc->caps & MMC_CAP_NEEDS_POLL && 1250e6cd7a8eSJaehoon Chung slot->mmc->f_min == clock) 1251e6cd7a8eSJaehoon Chung set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags); 1252e6cd7a8eSJaehoon Chung } 1253e6cd7a8eSJaehoon Chung 1254f95f3850SWill Newton /* disable clock */ 1255f95f3850SWill Newton mci_writel(host, CLKENA, 0); 1256f95f3850SWill Newton mci_writel(host, CLKSRC, 0); 1257f95f3850SWill Newton 1258f95f3850SWill Newton /* inform CIU */ 125901730558SDoug Anderson mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1260f95f3850SWill Newton 1261f95f3850SWill Newton /* set clock to desired speed */ 1262f95f3850SWill Newton mci_writel(host, CLKDIV, div); 1263f95f3850SWill Newton 1264f95f3850SWill Newton /* inform CIU */ 126501730558SDoug Anderson mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1266f95f3850SWill Newton 12679623b5b9SDoug Anderson /* enable clock; only low power if no SDIO */ 12689623b5b9SDoug Anderson clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; 1269b24c8b26SDoug Anderson if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) 12709623b5b9SDoug Anderson clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; 12719623b5b9SDoug Anderson mci_writel(host, CLKENA, clk_en_a); 1272f95f3850SWill Newton 1273f95f3850SWill Newton /* inform CIU */ 127401730558SDoug Anderson mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1275005d675aSJaehoon Chung 1276005d675aSJaehoon Chung /* keep the last clock value that was requested from core */ 1277005d675aSJaehoon Chung slot->__clk_old = clock; 1278f95f3850SWill Newton } 1279f95f3850SWill Newton 1280fdf492a1SDoug Anderson host->current_speed = clock; 1281fdf492a1SDoug Anderson 1282f95f3850SWill Newton /* Set the current slot bus width */ 12831d56c453SSeungwon Jeon mci_writel(host, CTYPE, (slot->ctype << slot->id)); 1284f95f3850SWill Newton } 1285f95f3850SWill Newton 1286053b3ce6SSeungwon Jeon static void __dw_mci_start_request(struct dw_mci *host, 1287053b3ce6SSeungwon Jeon struct dw_mci_slot *slot, 1288053b3ce6SSeungwon Jeon struct mmc_command *cmd) 1289f95f3850SWill Newton { 1290f95f3850SWill Newton struct mmc_request *mrq; 1291f95f3850SWill Newton struct mmc_data *data; 1292f95f3850SWill Newton u32 cmdflags; 1293f95f3850SWill Newton 1294f95f3850SWill Newton mrq = slot->mrq; 1295f95f3850SWill Newton 1296f95f3850SWill Newton host->cur_slot = slot; 1297f95f3850SWill Newton host->mrq = mrq; 1298f95f3850SWill Newton 1299f95f3850SWill Newton host->pending_events = 0; 1300f95f3850SWill Newton host->completed_events = 0; 1301e352c813SSeungwon Jeon host->cmd_status = 0; 1302f95f3850SWill Newton host->data_status = 0; 1303e352c813SSeungwon Jeon host->dir_status = 0; 1304f95f3850SWill Newton 1305053b3ce6SSeungwon Jeon data = cmd->data; 1306f95f3850SWill Newton if (data) { 1307f16afa88SJaehoon Chung mci_writel(host, TMOUT, 0xFFFFFFFF); 1308f95f3850SWill Newton mci_writel(host, BYTCNT, data->blksz*data->blocks); 1309f95f3850SWill Newton mci_writel(host, BLKSIZ, data->blksz); 1310f95f3850SWill Newton } 1311f95f3850SWill Newton 1312f95f3850SWill Newton cmdflags = dw_mci_prepare_command(slot->mmc, cmd); 1313f95f3850SWill Newton 1314f95f3850SWill Newton /* this is the first command, send the initialization clock */ 1315f95f3850SWill Newton if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags)) 1316f95f3850SWill Newton cmdflags |= SDMMC_CMD_INIT; 1317f95f3850SWill Newton 1318f95f3850SWill Newton if (data) { 1319f95f3850SWill Newton dw_mci_submit_data(host, data); 13200e3a22c0SShawn Lin wmb(); /* drain writebuffer */ 1321f95f3850SWill Newton } 1322f95f3850SWill Newton 1323f95f3850SWill Newton dw_mci_start_command(host, cmd, cmdflags); 1324f95f3850SWill Newton 13255c935165SDoug Anderson if (cmd->opcode == SD_SWITCH_VOLTAGE) { 132649ba0302SDoug Anderson unsigned long irqflags; 132749ba0302SDoug Anderson 13285c935165SDoug Anderson /* 13298886a6fdSDoug Anderson * Databook says to fail after 2ms w/ no response, but evidence 13308886a6fdSDoug Anderson * shows that sometimes the cmd11 interrupt takes over 130ms. 13318886a6fdSDoug Anderson * We'll set to 500ms, plus an extra jiffy just in case jiffies 13328886a6fdSDoug Anderson * is just about to roll over. 133349ba0302SDoug Anderson * 133449ba0302SDoug Anderson * We do this whole thing under spinlock and only if the 133549ba0302SDoug Anderson * command hasn't already completed (indicating the the irq 133649ba0302SDoug Anderson * already ran so we don't want the timeout). 13375c935165SDoug Anderson */ 133849ba0302SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 133949ba0302SDoug Anderson if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) 13405c935165SDoug Anderson mod_timer(&host->cmd11_timer, 13418886a6fdSDoug Anderson jiffies + msecs_to_jiffies(500) + 1); 134249ba0302SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 13435c935165SDoug Anderson } 13445c935165SDoug Anderson 134590c2143aSSeungwon Jeon host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); 1346f95f3850SWill Newton } 1347f95f3850SWill Newton 1348053b3ce6SSeungwon Jeon static void dw_mci_start_request(struct dw_mci *host, 1349053b3ce6SSeungwon Jeon struct dw_mci_slot *slot) 1350053b3ce6SSeungwon Jeon { 1351053b3ce6SSeungwon Jeon struct mmc_request *mrq = slot->mrq; 1352053b3ce6SSeungwon Jeon struct mmc_command *cmd; 1353053b3ce6SSeungwon Jeon 1354053b3ce6SSeungwon Jeon cmd = mrq->sbc ? mrq->sbc : mrq->cmd; 1355053b3ce6SSeungwon Jeon __dw_mci_start_request(host, slot, cmd); 1356053b3ce6SSeungwon Jeon } 1357053b3ce6SSeungwon Jeon 13587456caaeSJames Hogan /* must be called with host->lock held */ 1359f95f3850SWill Newton static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, 1360f95f3850SWill Newton struct mmc_request *mrq) 1361f95f3850SWill Newton { 1362f95f3850SWill Newton dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", 1363f95f3850SWill Newton host->state); 1364f95f3850SWill Newton 1365f95f3850SWill Newton slot->mrq = mrq; 1366f95f3850SWill Newton 136701730558SDoug Anderson if (host->state == STATE_WAITING_CMD11_DONE) { 136801730558SDoug Anderson dev_warn(&slot->mmc->class_dev, 136901730558SDoug Anderson "Voltage change didn't complete\n"); 137001730558SDoug Anderson /* 137101730558SDoug Anderson * this case isn't expected to happen, so we can 137201730558SDoug Anderson * either crash here or just try to continue on 137301730558SDoug Anderson * in the closest possible state 137401730558SDoug Anderson */ 137501730558SDoug Anderson host->state = STATE_IDLE; 137601730558SDoug Anderson } 137701730558SDoug Anderson 1378f95f3850SWill Newton if (host->state == STATE_IDLE) { 1379f95f3850SWill Newton host->state = STATE_SENDING_CMD; 1380f95f3850SWill Newton dw_mci_start_request(host, slot); 1381f95f3850SWill Newton } else { 1382f95f3850SWill Newton list_add_tail(&slot->queue_node, &host->queue); 1383f95f3850SWill Newton } 1384f95f3850SWill Newton } 1385f95f3850SWill Newton 1386f95f3850SWill Newton static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) 1387f95f3850SWill Newton { 1388f95f3850SWill Newton struct dw_mci_slot *slot = mmc_priv(mmc); 1389f95f3850SWill Newton struct dw_mci *host = slot->host; 1390f95f3850SWill Newton 1391f95f3850SWill Newton WARN_ON(slot->mrq); 1392f95f3850SWill Newton 13937456caaeSJames Hogan /* 13947456caaeSJames Hogan * The check for card presence and queueing of the request must be 13957456caaeSJames Hogan * atomic, otherwise the card could be removed in between and the 13967456caaeSJames Hogan * request wouldn't fail until another card was inserted. 13977456caaeSJames Hogan */ 13987456caaeSJames Hogan 139956f6911cSShawn Lin if (!dw_mci_get_cd(mmc)) { 1400f95f3850SWill Newton mrq->cmd->error = -ENOMEDIUM; 1401f95f3850SWill Newton mmc_request_done(mmc, mrq); 1402f95f3850SWill Newton return; 1403f95f3850SWill Newton } 1404f95f3850SWill Newton 140556f6911cSShawn Lin spin_lock_bh(&host->lock); 140656f6911cSShawn Lin 1407f95f3850SWill Newton dw_mci_queue_request(host, slot, mrq); 14087456caaeSJames Hogan 14097456caaeSJames Hogan spin_unlock_bh(&host->lock); 1410f95f3850SWill Newton } 1411f95f3850SWill Newton 1412f95f3850SWill Newton static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1413f95f3850SWill Newton { 1414f95f3850SWill Newton struct dw_mci_slot *slot = mmc_priv(mmc); 1415e95baf13SArnd Bergmann const struct dw_mci_drv_data *drv_data = slot->host->drv_data; 141641babf75SJaehoon Chung u32 regs; 141751da2240SYuvaraj CD int ret; 1418f95f3850SWill Newton 1419f95f3850SWill Newton switch (ios->bus_width) { 1420f95f3850SWill Newton case MMC_BUS_WIDTH_4: 1421f95f3850SWill Newton slot->ctype = SDMMC_CTYPE_4BIT; 1422f95f3850SWill Newton break; 1423c9b2a06fSJaehoon Chung case MMC_BUS_WIDTH_8: 1424c9b2a06fSJaehoon Chung slot->ctype = SDMMC_CTYPE_8BIT; 1425c9b2a06fSJaehoon Chung break; 1426b2f7cb45SJaehoon Chung default: 1427b2f7cb45SJaehoon Chung /* set default 1 bit mode */ 1428b2f7cb45SJaehoon Chung slot->ctype = SDMMC_CTYPE_1BIT; 1429f95f3850SWill Newton } 1430f95f3850SWill Newton 143141babf75SJaehoon Chung regs = mci_readl(slot->host, UHS_REG); 14323f514291SSeungwon Jeon 14333f514291SSeungwon Jeon /* DDR mode set */ 143480113132SSeungwon Jeon if (ios->timing == MMC_TIMING_MMC_DDR52 || 14357cc8d580SJaehoon Chung ios->timing == MMC_TIMING_UHS_DDR50 || 143680113132SSeungwon Jeon ios->timing == MMC_TIMING_MMC_HS400) 1437c69042a5SHyeonsu Kim regs |= ((0x1 << slot->id) << 16); 14383f514291SSeungwon Jeon else 1439c69042a5SHyeonsu Kim regs &= ~((0x1 << slot->id) << 16); 14403f514291SSeungwon Jeon 144141babf75SJaehoon Chung mci_writel(slot->host, UHS_REG, regs); 1442f1d2736cSSeungwon Jeon slot->host->timing = ios->timing; 144341babf75SJaehoon Chung 1444f95f3850SWill Newton /* 1445f95f3850SWill Newton * Use mirror of ios->clock to prevent race with mmc 1446f95f3850SWill Newton * core ios update when finding the minimum. 1447f95f3850SWill Newton */ 1448f95f3850SWill Newton slot->clock = ios->clock; 1449f95f3850SWill Newton 1450cb27a843SJames Hogan if (drv_data && drv_data->set_ios) 1451cb27a843SJames Hogan drv_data->set_ios(slot->host, ios); 1452800d78bfSThomas Abraham 1453f95f3850SWill Newton switch (ios->power_mode) { 1454f95f3850SWill Newton case MMC_POWER_UP: 145551da2240SYuvaraj CD if (!IS_ERR(mmc->supply.vmmc)) { 145651da2240SYuvaraj CD ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 145751da2240SYuvaraj CD ios->vdd); 145851da2240SYuvaraj CD if (ret) { 145951da2240SYuvaraj CD dev_err(slot->host->dev, 146051da2240SYuvaraj CD "failed to enable vmmc regulator\n"); 146151da2240SYuvaraj CD /*return, if failed turn on vmmc*/ 146251da2240SYuvaraj CD return; 146351da2240SYuvaraj CD } 146451da2240SYuvaraj CD } 146529d0d161SDoug Anderson set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); 146629d0d161SDoug Anderson regs = mci_readl(slot->host, PWREN); 146729d0d161SDoug Anderson regs |= (1 << slot->id); 146829d0d161SDoug Anderson mci_writel(slot->host, PWREN, regs); 146929d0d161SDoug Anderson break; 147029d0d161SDoug Anderson case MMC_POWER_ON: 1471d1f1dd86SDoug Anderson if (!slot->host->vqmmc_enabled) { 1472d1f1dd86SDoug Anderson if (!IS_ERR(mmc->supply.vqmmc)) { 147351da2240SYuvaraj CD ret = regulator_enable(mmc->supply.vqmmc); 147451da2240SYuvaraj CD if (ret < 0) 147551da2240SYuvaraj CD dev_err(slot->host->dev, 1476d1f1dd86SDoug Anderson "failed to enable vqmmc\n"); 147751da2240SYuvaraj CD else 147851da2240SYuvaraj CD slot->host->vqmmc_enabled = true; 1479d1f1dd86SDoug Anderson 1480d1f1dd86SDoug Anderson } else { 1481d1f1dd86SDoug Anderson /* Keep track so we don't reset again */ 1482d1f1dd86SDoug Anderson slot->host->vqmmc_enabled = true; 1483d1f1dd86SDoug Anderson } 1484d1f1dd86SDoug Anderson 1485d1f1dd86SDoug Anderson /* Reset our state machine after powering on */ 1486d1f1dd86SDoug Anderson dw_mci_ctrl_reset(slot->host, 1487d1f1dd86SDoug Anderson SDMMC_CTRL_ALL_RESET_FLAGS); 148851da2240SYuvaraj CD } 1489655babbdSDoug Anderson 1490655babbdSDoug Anderson /* Adjust clock / bus width after power is up */ 1491655babbdSDoug Anderson dw_mci_setup_bus(slot, false); 1492655babbdSDoug Anderson 1493e6f34e2fSJames Hogan break; 1494e6f34e2fSJames Hogan case MMC_POWER_OFF: 1495655babbdSDoug Anderson /* Turn clock off before power goes down */ 1496655babbdSDoug Anderson dw_mci_setup_bus(slot, false); 1497655babbdSDoug Anderson 149851da2240SYuvaraj CD if (!IS_ERR(mmc->supply.vmmc)) 149951da2240SYuvaraj CD mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 150051da2240SYuvaraj CD 1501d1f1dd86SDoug Anderson if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) 150251da2240SYuvaraj CD regulator_disable(mmc->supply.vqmmc); 150351da2240SYuvaraj CD slot->host->vqmmc_enabled = false; 150451da2240SYuvaraj CD 15054366dcc5SJaehoon Chung regs = mci_readl(slot->host, PWREN); 15064366dcc5SJaehoon Chung regs &= ~(1 << slot->id); 15074366dcc5SJaehoon Chung mci_writel(slot->host, PWREN, regs); 1508f95f3850SWill Newton break; 1509f95f3850SWill Newton default: 1510f95f3850SWill Newton break; 1511f95f3850SWill Newton } 1512655babbdSDoug Anderson 1513655babbdSDoug Anderson if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0) 1514655babbdSDoug Anderson slot->host->state = STATE_IDLE; 1515f95f3850SWill Newton } 1516f95f3850SWill Newton 151701730558SDoug Anderson static int dw_mci_card_busy(struct mmc_host *mmc) 151801730558SDoug Anderson { 151901730558SDoug Anderson struct dw_mci_slot *slot = mmc_priv(mmc); 152001730558SDoug Anderson u32 status; 152101730558SDoug Anderson 152201730558SDoug Anderson /* 152301730558SDoug Anderson * Check the busy bit which is low when DAT[3:0] 152401730558SDoug Anderson * (the data lines) are 0000 152501730558SDoug Anderson */ 152601730558SDoug Anderson status = mci_readl(slot->host, STATUS); 152701730558SDoug Anderson 152801730558SDoug Anderson return !!(status & SDMMC_STATUS_BUSY); 152901730558SDoug Anderson } 153001730558SDoug Anderson 153101730558SDoug Anderson static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) 153201730558SDoug Anderson { 153301730558SDoug Anderson struct dw_mci_slot *slot = mmc_priv(mmc); 153401730558SDoug Anderson struct dw_mci *host = slot->host; 15358f7849c4SZhangfei Gao const struct dw_mci_drv_data *drv_data = host->drv_data; 153601730558SDoug Anderson u32 uhs; 153701730558SDoug Anderson u32 v18 = SDMMC_UHS_18V << slot->id; 153801730558SDoug Anderson int ret; 153901730558SDoug Anderson 15408f7849c4SZhangfei Gao if (drv_data && drv_data->switch_voltage) 15418f7849c4SZhangfei Gao return drv_data->switch_voltage(mmc, ios); 15428f7849c4SZhangfei Gao 154301730558SDoug Anderson /* 154401730558SDoug Anderson * Program the voltage. Note that some instances of dw_mmc may use 154501730558SDoug Anderson * the UHS_REG for this. For other instances (like exynos) the UHS_REG 154601730558SDoug Anderson * does no harm but you need to set the regulator directly. Try both. 154701730558SDoug Anderson */ 154801730558SDoug Anderson uhs = mci_readl(host, UHS_REG); 1549e0848f5dSDouglas Anderson if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) 155001730558SDoug Anderson uhs &= ~v18; 1551e0848f5dSDouglas Anderson else 155201730558SDoug Anderson uhs |= v18; 1553e0848f5dSDouglas Anderson 155401730558SDoug Anderson if (!IS_ERR(mmc->supply.vqmmc)) { 1555e0848f5dSDouglas Anderson ret = mmc_regulator_set_vqmmc(mmc, ios); 155601730558SDoug Anderson 155701730558SDoug Anderson if (ret) { 1558b19caf37SDoug Anderson dev_dbg(&mmc->class_dev, 1559e0848f5dSDouglas Anderson "Regulator set error %d - %s V\n", 1560e0848f5dSDouglas Anderson ret, uhs & v18 ? "1.8" : "3.3"); 156101730558SDoug Anderson return ret; 156201730558SDoug Anderson } 156301730558SDoug Anderson } 156401730558SDoug Anderson mci_writel(host, UHS_REG, uhs); 156501730558SDoug Anderson 156601730558SDoug Anderson return 0; 156701730558SDoug Anderson } 156801730558SDoug Anderson 1569f95f3850SWill Newton static int dw_mci_get_ro(struct mmc_host *mmc) 1570f95f3850SWill Newton { 1571f95f3850SWill Newton int read_only; 1572f95f3850SWill Newton struct dw_mci_slot *slot = mmc_priv(mmc); 15739795a846SJaehoon Chung int gpio_ro = mmc_gpio_get_ro(mmc); 1574f95f3850SWill Newton 1575f95f3850SWill Newton /* Use platform get_ro function, else try on board write protect */ 1576287980e4SArnd Bergmann if (gpio_ro >= 0) 15779795a846SJaehoon Chung read_only = gpio_ro; 1578f95f3850SWill Newton else 1579f95f3850SWill Newton read_only = 1580f95f3850SWill Newton mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; 1581f95f3850SWill Newton 1582f95f3850SWill Newton dev_dbg(&mmc->class_dev, "card is %s\n", 1583f95f3850SWill Newton read_only ? "read-only" : "read-write"); 1584f95f3850SWill Newton 1585f95f3850SWill Newton return read_only; 1586f95f3850SWill Newton } 1587f95f3850SWill Newton 1588935a665eSShawn Lin static void dw_mci_hw_reset(struct mmc_host *mmc) 1589935a665eSShawn Lin { 1590935a665eSShawn Lin struct dw_mci_slot *slot = mmc_priv(mmc); 1591935a665eSShawn Lin struct dw_mci *host = slot->host; 1592935a665eSShawn Lin int reset; 1593935a665eSShawn Lin 1594935a665eSShawn Lin if (host->use_dma == TRANS_MODE_IDMAC) 1595935a665eSShawn Lin dw_mci_idmac_reset(host); 1596935a665eSShawn Lin 1597935a665eSShawn Lin if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET | 1598935a665eSShawn Lin SDMMC_CTRL_FIFO_RESET)) 1599935a665eSShawn Lin return; 1600935a665eSShawn Lin 1601935a665eSShawn Lin /* 1602935a665eSShawn Lin * According to eMMC spec, card reset procedure: 1603935a665eSShawn Lin * tRstW >= 1us: RST_n pulse width 1604935a665eSShawn Lin * tRSCA >= 200us: RST_n to Command time 1605935a665eSShawn Lin * tRSTH >= 1us: RST_n high period 1606935a665eSShawn Lin */ 1607935a665eSShawn Lin reset = mci_readl(host, RST_N); 1608935a665eSShawn Lin reset &= ~(SDMMC_RST_HWACTIVE << slot->id); 1609935a665eSShawn Lin mci_writel(host, RST_N, reset); 1610935a665eSShawn Lin usleep_range(1, 2); 1611935a665eSShawn Lin reset |= SDMMC_RST_HWACTIVE << slot->id; 1612935a665eSShawn Lin mci_writel(host, RST_N, reset); 1613935a665eSShawn Lin usleep_range(200, 300); 1614935a665eSShawn Lin } 1615935a665eSShawn Lin 1616b24c8b26SDoug Anderson static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card) 1617b24c8b26SDoug Anderson { 1618b24c8b26SDoug Anderson struct dw_mci_slot *slot = mmc_priv(mmc); 1619b24c8b26SDoug Anderson struct dw_mci *host = slot->host; 1620b24c8b26SDoug Anderson 16219623b5b9SDoug Anderson /* 16229623b5b9SDoug Anderson * Low power mode will stop the card clock when idle. According to the 16239623b5b9SDoug Anderson * description of the CLKENA register we should disable low power mode 16249623b5b9SDoug Anderson * for SDIO cards if we need SDIO interrupts to work. 16259623b5b9SDoug Anderson */ 1626b24c8b26SDoug Anderson if (mmc->caps & MMC_CAP_SDIO_IRQ) { 16279623b5b9SDoug Anderson const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; 1628b24c8b26SDoug Anderson u32 clk_en_a_old; 1629b24c8b26SDoug Anderson u32 clk_en_a; 16309623b5b9SDoug Anderson 1631b24c8b26SDoug Anderson clk_en_a_old = mci_readl(host, CLKENA); 16329623b5b9SDoug Anderson 1633b24c8b26SDoug Anderson if (card->type == MMC_TYPE_SDIO || 1634b24c8b26SDoug Anderson card->type == MMC_TYPE_SD_COMBO) { 1635a6db2c86SDouglas Anderson if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) { 1636a6db2c86SDouglas Anderson pm_runtime_get_noresume(mmc->parent); 1637b24c8b26SDoug Anderson set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1638a6db2c86SDouglas Anderson } 1639b24c8b26SDoug Anderson clk_en_a = clk_en_a_old & ~clken_low_pwr; 1640b24c8b26SDoug Anderson } else { 1641a6db2c86SDouglas Anderson if (test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) { 1642a6db2c86SDouglas Anderson pm_runtime_put_noidle(mmc->parent); 1643b24c8b26SDoug Anderson clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags); 1644a6db2c86SDouglas Anderson } 1645b24c8b26SDoug Anderson clk_en_a = clk_en_a_old | clken_low_pwr; 1646b24c8b26SDoug Anderson } 1647b24c8b26SDoug Anderson 1648b24c8b26SDoug Anderson if (clk_en_a != clk_en_a_old) { 1649b24c8b26SDoug Anderson mci_writel(host, CLKENA, clk_en_a); 16509623b5b9SDoug Anderson mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | 16519623b5b9SDoug Anderson SDMMC_CMD_PRV_DAT_WAIT, 0); 16529623b5b9SDoug Anderson } 16539623b5b9SDoug Anderson } 1654b24c8b26SDoug Anderson } 16559623b5b9SDoug Anderson 16561a5c8e1fSShashidhar Hiremath static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) 16571a5c8e1fSShashidhar Hiremath { 16581a5c8e1fSShashidhar Hiremath struct dw_mci_slot *slot = mmc_priv(mmc); 16591a5c8e1fSShashidhar Hiremath struct dw_mci *host = slot->host; 1660f8c58c11SDoug Anderson unsigned long irqflags; 16611a5c8e1fSShashidhar Hiremath u32 int_mask; 16621a5c8e1fSShashidhar Hiremath 1663f8c58c11SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 1664f8c58c11SDoug Anderson 16651a5c8e1fSShashidhar Hiremath /* Enable/disable Slot Specific SDIO interrupt */ 16661a5c8e1fSShashidhar Hiremath int_mask = mci_readl(host, INTMASK); 1667b24c8b26SDoug Anderson if (enb) 1668b24c8b26SDoug Anderson int_mask |= SDMMC_INT_SDIO(slot->sdio_id); 1669b24c8b26SDoug Anderson else 1670b24c8b26SDoug Anderson int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id); 1671b24c8b26SDoug Anderson mci_writel(host, INTMASK, int_mask); 1672f8c58c11SDoug Anderson 1673f8c58c11SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 16741a5c8e1fSShashidhar Hiremath } 16751a5c8e1fSShashidhar Hiremath 16760976f16dSSeungwon Jeon static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode) 16770976f16dSSeungwon Jeon { 16780976f16dSSeungwon Jeon struct dw_mci_slot *slot = mmc_priv(mmc); 16790976f16dSSeungwon Jeon struct dw_mci *host = slot->host; 16800976f16dSSeungwon Jeon const struct dw_mci_drv_data *drv_data = host->drv_data; 16810e3a22c0SShawn Lin int err = -EINVAL; 16820976f16dSSeungwon Jeon 16830976f16dSSeungwon Jeon if (drv_data && drv_data->execute_tuning) 16849979dbe5SChaotian Jing err = drv_data->execute_tuning(slot, opcode); 16850976f16dSSeungwon Jeon return err; 16860976f16dSSeungwon Jeon } 16870976f16dSSeungwon Jeon 16880e3a22c0SShawn Lin static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, 16890e3a22c0SShawn Lin struct mmc_ios *ios) 169080113132SSeungwon Jeon { 169180113132SSeungwon Jeon struct dw_mci_slot *slot = mmc_priv(mmc); 169280113132SSeungwon Jeon struct dw_mci *host = slot->host; 169380113132SSeungwon Jeon const struct dw_mci_drv_data *drv_data = host->drv_data; 169480113132SSeungwon Jeon 169580113132SSeungwon Jeon if (drv_data && drv_data->prepare_hs400_tuning) 169680113132SSeungwon Jeon return drv_data->prepare_hs400_tuning(host, ios); 169780113132SSeungwon Jeon 169880113132SSeungwon Jeon return 0; 169980113132SSeungwon Jeon } 170080113132SSeungwon Jeon 17014e7392b2SShawn Lin static bool dw_mci_reset(struct dw_mci *host) 17024e7392b2SShawn Lin { 17034e7392b2SShawn Lin u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET; 17044e7392b2SShawn Lin bool ret = false; 17054e7392b2SShawn Lin 17064e7392b2SShawn Lin /* 17074e7392b2SShawn Lin * Resetting generates a block interrupt, hence setting 17084e7392b2SShawn Lin * the scatter-gather pointer to NULL. 17094e7392b2SShawn Lin */ 17104e7392b2SShawn Lin if (host->sg) { 17114e7392b2SShawn Lin sg_miter_stop(&host->sg_miter); 17124e7392b2SShawn Lin host->sg = NULL; 17134e7392b2SShawn Lin } 17144e7392b2SShawn Lin 17154e7392b2SShawn Lin if (host->use_dma) 17164e7392b2SShawn Lin flags |= SDMMC_CTRL_DMA_RESET; 17174e7392b2SShawn Lin 17184e7392b2SShawn Lin if (dw_mci_ctrl_reset(host, flags)) { 17194e7392b2SShawn Lin /* 17204e7392b2SShawn Lin * In all cases we clear the RAWINTS register to clear any 17214e7392b2SShawn Lin * interrupts. 17224e7392b2SShawn Lin */ 17234e7392b2SShawn Lin mci_writel(host, RINTSTS, 0xFFFFFFFF); 17244e7392b2SShawn Lin 17254e7392b2SShawn Lin /* if using dma we wait for dma_req to clear */ 17264e7392b2SShawn Lin if (host->use_dma) { 17274e7392b2SShawn Lin u32 status; 17284e7392b2SShawn Lin 17294e7392b2SShawn Lin if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS, 17304e7392b2SShawn Lin status, 17314e7392b2SShawn Lin !(status & SDMMC_STATUS_DMA_REQ), 17324e7392b2SShawn Lin 1, 500 * USEC_PER_MSEC)) { 17334e7392b2SShawn Lin dev_err(host->dev, 17344e7392b2SShawn Lin "%s: Timeout waiting for dma_req to clear during reset\n", 17354e7392b2SShawn Lin __func__); 17364e7392b2SShawn Lin goto ciu_out; 17374e7392b2SShawn Lin } 17384e7392b2SShawn Lin 17394e7392b2SShawn Lin /* when using DMA next we reset the fifo again */ 17404e7392b2SShawn Lin if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET)) 17414e7392b2SShawn Lin goto ciu_out; 17424e7392b2SShawn Lin } 17434e7392b2SShawn Lin } else { 17444e7392b2SShawn Lin /* if the controller reset bit did clear, then set clock regs */ 17454e7392b2SShawn Lin if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) { 17464e7392b2SShawn Lin dev_err(host->dev, 17474e7392b2SShawn Lin "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n", 17484e7392b2SShawn Lin __func__); 17494e7392b2SShawn Lin goto ciu_out; 17504e7392b2SShawn Lin } 17514e7392b2SShawn Lin } 17524e7392b2SShawn Lin 17534e7392b2SShawn Lin if (host->use_dma == TRANS_MODE_IDMAC) 17544e7392b2SShawn Lin /* It is also recommended that we reset and reprogram idmac */ 17554e7392b2SShawn Lin dw_mci_idmac_reset(host); 17564e7392b2SShawn Lin 17574e7392b2SShawn Lin ret = true; 17584e7392b2SShawn Lin 17594e7392b2SShawn Lin ciu_out: 17604e7392b2SShawn Lin /* After a CTRL reset we need to have CIU set clock registers */ 17614e7392b2SShawn Lin mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0); 17624e7392b2SShawn Lin 17634e7392b2SShawn Lin return ret; 17644e7392b2SShawn Lin } 17654e7392b2SShawn Lin 1766f95f3850SWill Newton static const struct mmc_host_ops dw_mci_ops = { 1767f95f3850SWill Newton .request = dw_mci_request, 17689aa51408SSeungwon Jeon .pre_req = dw_mci_pre_req, 17699aa51408SSeungwon Jeon .post_req = dw_mci_post_req, 1770f95f3850SWill Newton .set_ios = dw_mci_set_ios, 1771f95f3850SWill Newton .get_ro = dw_mci_get_ro, 1772f95f3850SWill Newton .get_cd = dw_mci_get_cd, 1773935a665eSShawn Lin .hw_reset = dw_mci_hw_reset, 17741a5c8e1fSShashidhar Hiremath .enable_sdio_irq = dw_mci_enable_sdio_irq, 17750976f16dSSeungwon Jeon .execute_tuning = dw_mci_execute_tuning, 177601730558SDoug Anderson .card_busy = dw_mci_card_busy, 177701730558SDoug Anderson .start_signal_voltage_switch = dw_mci_switch_voltage, 1778b24c8b26SDoug Anderson .init_card = dw_mci_init_card, 177980113132SSeungwon Jeon .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning, 1780f95f3850SWill Newton }; 1781f95f3850SWill Newton 1782f95f3850SWill Newton static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) 1783f95f3850SWill Newton __releases(&host->lock) 1784f95f3850SWill Newton __acquires(&host->lock) 1785f95f3850SWill Newton { 1786f95f3850SWill Newton struct dw_mci_slot *slot; 1787f95f3850SWill Newton struct mmc_host *prev_mmc = host->cur_slot->mmc; 1788f95f3850SWill Newton 1789f95f3850SWill Newton WARN_ON(host->cmd || host->data); 1790f95f3850SWill Newton 1791f95f3850SWill Newton host->cur_slot->mrq = NULL; 1792f95f3850SWill Newton host->mrq = NULL; 1793f95f3850SWill Newton if (!list_empty(&host->queue)) { 1794f95f3850SWill Newton slot = list_entry(host->queue.next, 1795f95f3850SWill Newton struct dw_mci_slot, queue_node); 1796f95f3850SWill Newton list_del(&slot->queue_node); 17974a90920cSThomas Abraham dev_vdbg(host->dev, "list not empty: %s is next\n", 1798f95f3850SWill Newton mmc_hostname(slot->mmc)); 1799f95f3850SWill Newton host->state = STATE_SENDING_CMD; 1800f95f3850SWill Newton dw_mci_start_request(host, slot); 1801f95f3850SWill Newton } else { 18024a90920cSThomas Abraham dev_vdbg(host->dev, "list empty\n"); 180301730558SDoug Anderson 180401730558SDoug Anderson if (host->state == STATE_SENDING_CMD11) 180501730558SDoug Anderson host->state = STATE_WAITING_CMD11_DONE; 180601730558SDoug Anderson else 1807f95f3850SWill Newton host->state = STATE_IDLE; 1808f95f3850SWill Newton } 1809f95f3850SWill Newton 1810f95f3850SWill Newton spin_unlock(&host->lock); 1811f95f3850SWill Newton mmc_request_done(prev_mmc, mrq); 1812f95f3850SWill Newton spin_lock(&host->lock); 1813f95f3850SWill Newton } 1814f95f3850SWill Newton 1815e352c813SSeungwon Jeon static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) 1816f95f3850SWill Newton { 1817f95f3850SWill Newton u32 status = host->cmd_status; 1818f95f3850SWill Newton 1819f95f3850SWill Newton host->cmd_status = 0; 1820f95f3850SWill Newton 1821f95f3850SWill Newton /* Read the response from the card (up to 16 bytes) */ 1822f95f3850SWill Newton if (cmd->flags & MMC_RSP_PRESENT) { 1823f95f3850SWill Newton if (cmd->flags & MMC_RSP_136) { 1824f95f3850SWill Newton cmd->resp[3] = mci_readl(host, RESP0); 1825f95f3850SWill Newton cmd->resp[2] = mci_readl(host, RESP1); 1826f95f3850SWill Newton cmd->resp[1] = mci_readl(host, RESP2); 1827f95f3850SWill Newton cmd->resp[0] = mci_readl(host, RESP3); 1828f95f3850SWill Newton } else { 1829f95f3850SWill Newton cmd->resp[0] = mci_readl(host, RESP0); 1830f95f3850SWill Newton cmd->resp[1] = 0; 1831f95f3850SWill Newton cmd->resp[2] = 0; 1832f95f3850SWill Newton cmd->resp[3] = 0; 1833f95f3850SWill Newton } 1834f95f3850SWill Newton } 1835f95f3850SWill Newton 1836f95f3850SWill Newton if (status & SDMMC_INT_RTO) 1837f95f3850SWill Newton cmd->error = -ETIMEDOUT; 1838f95f3850SWill Newton else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)) 1839f95f3850SWill Newton cmd->error = -EILSEQ; 1840f95f3850SWill Newton else if (status & SDMMC_INT_RESP_ERR) 1841f95f3850SWill Newton cmd->error = -EIO; 1842f95f3850SWill Newton else 1843f95f3850SWill Newton cmd->error = 0; 1844f95f3850SWill Newton 1845e352c813SSeungwon Jeon return cmd->error; 1846e352c813SSeungwon Jeon } 1847e352c813SSeungwon Jeon 1848e352c813SSeungwon Jeon static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) 1849e352c813SSeungwon Jeon { 185031bff450SSeungwon Jeon u32 status = host->data_status; 1851e352c813SSeungwon Jeon 1852e352c813SSeungwon Jeon if (status & DW_MCI_DATA_ERROR_FLAGS) { 1853e352c813SSeungwon Jeon if (status & SDMMC_INT_DRTO) { 1854e352c813SSeungwon Jeon data->error = -ETIMEDOUT; 1855e352c813SSeungwon Jeon } else if (status & SDMMC_INT_DCRC) { 1856e352c813SSeungwon Jeon data->error = -EILSEQ; 1857e352c813SSeungwon Jeon } else if (status & SDMMC_INT_EBE) { 1858e352c813SSeungwon Jeon if (host->dir_status == 1859e352c813SSeungwon Jeon DW_MCI_SEND_STATUS) { 1860e352c813SSeungwon Jeon /* 1861e352c813SSeungwon Jeon * No data CRC status was returned. 1862e352c813SSeungwon Jeon * The number of bytes transferred 1863e352c813SSeungwon Jeon * will be exaggerated in PIO mode. 1864e352c813SSeungwon Jeon */ 1865e352c813SSeungwon Jeon data->bytes_xfered = 0; 1866e352c813SSeungwon Jeon data->error = -ETIMEDOUT; 1867e352c813SSeungwon Jeon } else if (host->dir_status == 1868e352c813SSeungwon Jeon DW_MCI_RECV_STATUS) { 1869e7a1dec1SShawn Lin data->error = -EILSEQ; 1870e352c813SSeungwon Jeon } 1871e352c813SSeungwon Jeon } else { 1872e352c813SSeungwon Jeon /* SDMMC_INT_SBE is included */ 1873e7a1dec1SShawn Lin data->error = -EILSEQ; 1874e352c813SSeungwon Jeon } 1875e352c813SSeungwon Jeon 1876e6cc0123SDoug Anderson dev_dbg(host->dev, "data error, status 0x%08x\n", status); 1877e352c813SSeungwon Jeon 1878e352c813SSeungwon Jeon /* 1879e352c813SSeungwon Jeon * After an error, there may be data lingering 188031bff450SSeungwon Jeon * in the FIFO 1881e352c813SSeungwon Jeon */ 18823a33a94cSSonny Rao dw_mci_reset(host); 1883e352c813SSeungwon Jeon } else { 1884e352c813SSeungwon Jeon data->bytes_xfered = data->blocks * data->blksz; 1885e352c813SSeungwon Jeon data->error = 0; 1886e352c813SSeungwon Jeon } 1887e352c813SSeungwon Jeon 1888e352c813SSeungwon Jeon return data->error; 1889f95f3850SWill Newton } 1890f95f3850SWill Newton 189157e10486SAddy Ke static void dw_mci_set_drto(struct dw_mci *host) 189257e10486SAddy Ke { 189357e10486SAddy Ke unsigned int drto_clks; 189457e10486SAddy Ke unsigned int drto_ms; 189557e10486SAddy Ke 189657e10486SAddy Ke drto_clks = mci_readl(host, TMOUT) >> 8; 189757e10486SAddy Ke drto_ms = DIV_ROUND_UP(drto_clks, host->bus_hz / 1000); 189857e10486SAddy Ke 189957e10486SAddy Ke /* add a bit spare time */ 190057e10486SAddy Ke drto_ms += 10; 190157e10486SAddy Ke 190257e10486SAddy Ke mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms)); 190357e10486SAddy Ke } 190457e10486SAddy Ke 1905f95f3850SWill Newton static void dw_mci_tasklet_func(unsigned long priv) 1906f95f3850SWill Newton { 1907f95f3850SWill Newton struct dw_mci *host = (struct dw_mci *)priv; 1908f95f3850SWill Newton struct mmc_data *data; 1909f95f3850SWill Newton struct mmc_command *cmd; 1910e352c813SSeungwon Jeon struct mmc_request *mrq; 1911f95f3850SWill Newton enum dw_mci_state state; 1912f95f3850SWill Newton enum dw_mci_state prev_state; 1913e352c813SSeungwon Jeon unsigned int err; 1914f95f3850SWill Newton 1915f95f3850SWill Newton spin_lock(&host->lock); 1916f95f3850SWill Newton 1917f95f3850SWill Newton state = host->state; 1918f95f3850SWill Newton data = host->data; 1919e352c813SSeungwon Jeon mrq = host->mrq; 1920f95f3850SWill Newton 1921f95f3850SWill Newton do { 1922f95f3850SWill Newton prev_state = state; 1923f95f3850SWill Newton 1924f95f3850SWill Newton switch (state) { 1925f95f3850SWill Newton case STATE_IDLE: 192601730558SDoug Anderson case STATE_WAITING_CMD11_DONE: 1927f95f3850SWill Newton break; 1928f95f3850SWill Newton 192901730558SDoug Anderson case STATE_SENDING_CMD11: 1930f95f3850SWill Newton case STATE_SENDING_CMD: 1931f95f3850SWill Newton if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 1932f95f3850SWill Newton &host->pending_events)) 1933f95f3850SWill Newton break; 1934f95f3850SWill Newton 1935f95f3850SWill Newton cmd = host->cmd; 1936f95f3850SWill Newton host->cmd = NULL; 1937f95f3850SWill Newton set_bit(EVENT_CMD_COMPLETE, &host->completed_events); 1938e352c813SSeungwon Jeon err = dw_mci_command_complete(host, cmd); 1939e352c813SSeungwon Jeon if (cmd == mrq->sbc && !err) { 1940053b3ce6SSeungwon Jeon prev_state = state = STATE_SENDING_CMD; 1941053b3ce6SSeungwon Jeon __dw_mci_start_request(host, host->cur_slot, 1942e352c813SSeungwon Jeon mrq->cmd); 1943053b3ce6SSeungwon Jeon goto unlock; 1944053b3ce6SSeungwon Jeon } 1945053b3ce6SSeungwon Jeon 1946e352c813SSeungwon Jeon if (cmd->data && err) { 194746d17952SDoug Anderson /* 194846d17952SDoug Anderson * During UHS tuning sequence, sending the stop 194946d17952SDoug Anderson * command after the response CRC error would 195046d17952SDoug Anderson * throw the system into a confused state 195146d17952SDoug Anderson * causing all future tuning phases to report 195246d17952SDoug Anderson * failure. 195346d17952SDoug Anderson * 195446d17952SDoug Anderson * In such case controller will move into a data 195546d17952SDoug Anderson * transfer state after a response error or 195646d17952SDoug Anderson * response CRC error. Let's let that finish 195746d17952SDoug Anderson * before trying to send a stop, so we'll go to 195846d17952SDoug Anderson * STATE_SENDING_DATA. 195946d17952SDoug Anderson * 196046d17952SDoug Anderson * Although letting the data transfer take place 196146d17952SDoug Anderson * will waste a bit of time (we already know 196246d17952SDoug Anderson * the command was bad), it can't cause any 196346d17952SDoug Anderson * errors since it's possible it would have 196446d17952SDoug Anderson * taken place anyway if this tasklet got 196546d17952SDoug Anderson * delayed. Allowing the transfer to take place 196646d17952SDoug Anderson * avoids races and keeps things simple. 196746d17952SDoug Anderson */ 196846d17952SDoug Anderson if ((err != -ETIMEDOUT) && 196946d17952SDoug Anderson (cmd->opcode == MMC_SEND_TUNING_BLOCK)) { 197046d17952SDoug Anderson state = STATE_SENDING_DATA; 197146d17952SDoug Anderson continue; 197246d17952SDoug Anderson } 197346d17952SDoug Anderson 197471abb133SSeungwon Jeon dw_mci_stop_dma(host); 197590c2143aSSeungwon Jeon send_stop_abort(host, data); 197671abb133SSeungwon Jeon state = STATE_SENDING_STOP; 197771abb133SSeungwon Jeon break; 197871abb133SSeungwon Jeon } 197971abb133SSeungwon Jeon 1980e352c813SSeungwon Jeon if (!cmd->data || err) { 1981e352c813SSeungwon Jeon dw_mci_request_end(host, mrq); 1982f95f3850SWill Newton goto unlock; 1983f95f3850SWill Newton } 1984f95f3850SWill Newton 1985f95f3850SWill Newton prev_state = state = STATE_SENDING_DATA; 1986f95f3850SWill Newton /* fall through */ 1987f95f3850SWill Newton 1988f95f3850SWill Newton case STATE_SENDING_DATA: 19892aa35465SDoug Anderson /* 19902aa35465SDoug Anderson * We could get a data error and never a transfer 19912aa35465SDoug Anderson * complete so we'd better check for it here. 19922aa35465SDoug Anderson * 19932aa35465SDoug Anderson * Note that we don't really care if we also got a 19942aa35465SDoug Anderson * transfer complete; stopping the DMA and sending an 19952aa35465SDoug Anderson * abort won't hurt. 19962aa35465SDoug Anderson */ 1997f95f3850SWill Newton if (test_and_clear_bit(EVENT_DATA_ERROR, 1998f95f3850SWill Newton &host->pending_events)) { 1999f95f3850SWill Newton dw_mci_stop_dma(host); 2000e13c3c08SJaehoon Chung if (!(host->data_status & (SDMMC_INT_DRTO | 2001bdb9a90bSaddy ke SDMMC_INT_EBE))) 200290c2143aSSeungwon Jeon send_stop_abort(host, data); 2003f95f3850SWill Newton state = STATE_DATA_ERROR; 2004f95f3850SWill Newton break; 2005f95f3850SWill Newton } 2006f95f3850SWill Newton 2007f95f3850SWill Newton if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 200857e10486SAddy Ke &host->pending_events)) { 200957e10486SAddy Ke /* 201057e10486SAddy Ke * If all data-related interrupts don't come 201157e10486SAddy Ke * within the given time in reading data state. 201257e10486SAddy Ke */ 201316a34574SJaehoon Chung if (host->dir_status == DW_MCI_RECV_STATUS) 201457e10486SAddy Ke dw_mci_set_drto(host); 2015f95f3850SWill Newton break; 201657e10486SAddy Ke } 2017f95f3850SWill Newton 2018f95f3850SWill Newton set_bit(EVENT_XFER_COMPLETE, &host->completed_events); 20192aa35465SDoug Anderson 20202aa35465SDoug Anderson /* 20212aa35465SDoug Anderson * Handle an EVENT_DATA_ERROR that might have shown up 20222aa35465SDoug Anderson * before the transfer completed. This might not have 20232aa35465SDoug Anderson * been caught by the check above because the interrupt 20242aa35465SDoug Anderson * could have gone off between the previous check and 20252aa35465SDoug Anderson * the check for transfer complete. 20262aa35465SDoug Anderson * 20272aa35465SDoug Anderson * Technically this ought not be needed assuming we 20282aa35465SDoug Anderson * get a DATA_COMPLETE eventually (we'll notice the 20292aa35465SDoug Anderson * error and end the request), but it shouldn't hurt. 20302aa35465SDoug Anderson * 20312aa35465SDoug Anderson * This has the advantage of sending the stop command. 20322aa35465SDoug Anderson */ 20332aa35465SDoug Anderson if (test_and_clear_bit(EVENT_DATA_ERROR, 20342aa35465SDoug Anderson &host->pending_events)) { 20352aa35465SDoug Anderson dw_mci_stop_dma(host); 2036e13c3c08SJaehoon Chung if (!(host->data_status & (SDMMC_INT_DRTO | 2037bdb9a90bSaddy ke SDMMC_INT_EBE))) 20382aa35465SDoug Anderson send_stop_abort(host, data); 20392aa35465SDoug Anderson state = STATE_DATA_ERROR; 20402aa35465SDoug Anderson break; 20412aa35465SDoug Anderson } 2042f95f3850SWill Newton prev_state = state = STATE_DATA_BUSY; 20432aa35465SDoug Anderson 2044f95f3850SWill Newton /* fall through */ 2045f95f3850SWill Newton 2046f95f3850SWill Newton case STATE_DATA_BUSY: 2047f95f3850SWill Newton if (!test_and_clear_bit(EVENT_DATA_COMPLETE, 204857e10486SAddy Ke &host->pending_events)) { 204957e10486SAddy Ke /* 205057e10486SAddy Ke * If data error interrupt comes but data over 205157e10486SAddy Ke * interrupt doesn't come within the given time. 205257e10486SAddy Ke * in reading data state. 205357e10486SAddy Ke */ 205416a34574SJaehoon Chung if (host->dir_status == DW_MCI_RECV_STATUS) 205557e10486SAddy Ke dw_mci_set_drto(host); 2056f95f3850SWill Newton break; 205757e10486SAddy Ke } 2058f95f3850SWill Newton 2059f95f3850SWill Newton host->data = NULL; 2060f95f3850SWill Newton set_bit(EVENT_DATA_COMPLETE, &host->completed_events); 2061e352c813SSeungwon Jeon err = dw_mci_data_complete(host, data); 2062f95f3850SWill Newton 2063e352c813SSeungwon Jeon if (!err) { 2064e352c813SSeungwon Jeon if (!data->stop || mrq->sbc) { 206517c8bc85SSachin Kamat if (mrq->sbc && data->stop) 2066053b3ce6SSeungwon Jeon data->stop->error = 0; 2067e352c813SSeungwon Jeon dw_mci_request_end(host, mrq); 2068053b3ce6SSeungwon Jeon goto unlock; 2069053b3ce6SSeungwon Jeon } 2070053b3ce6SSeungwon Jeon 207190c2143aSSeungwon Jeon /* stop command for open-ended transfer*/ 2072e352c813SSeungwon Jeon if (data->stop) 207390c2143aSSeungwon Jeon send_stop_abort(host, data); 20742aa35465SDoug Anderson } else { 20752aa35465SDoug Anderson /* 20762aa35465SDoug Anderson * If we don't have a command complete now we'll 20772aa35465SDoug Anderson * never get one since we just reset everything; 20782aa35465SDoug Anderson * better end the request. 20792aa35465SDoug Anderson * 20802aa35465SDoug Anderson * If we do have a command complete we'll fall 20812aa35465SDoug Anderson * through to the SENDING_STOP command and 20822aa35465SDoug Anderson * everything will be peachy keen. 20832aa35465SDoug Anderson */ 20842aa35465SDoug Anderson if (!test_bit(EVENT_CMD_COMPLETE, 20852aa35465SDoug Anderson &host->pending_events)) { 20862aa35465SDoug Anderson host->cmd = NULL; 20872aa35465SDoug Anderson dw_mci_request_end(host, mrq); 20882aa35465SDoug Anderson goto unlock; 20892aa35465SDoug Anderson } 209090c2143aSSeungwon Jeon } 2091e352c813SSeungwon Jeon 2092e352c813SSeungwon Jeon /* 2093e352c813SSeungwon Jeon * If err has non-zero, 2094e352c813SSeungwon Jeon * stop-abort command has been already issued. 2095e352c813SSeungwon Jeon */ 2096e352c813SSeungwon Jeon prev_state = state = STATE_SENDING_STOP; 2097e352c813SSeungwon Jeon 2098f95f3850SWill Newton /* fall through */ 2099f95f3850SWill Newton 2100f95f3850SWill Newton case STATE_SENDING_STOP: 2101f95f3850SWill Newton if (!test_and_clear_bit(EVENT_CMD_COMPLETE, 2102f95f3850SWill Newton &host->pending_events)) 2103f95f3850SWill Newton break; 2104f95f3850SWill Newton 210571abb133SSeungwon Jeon /* CMD error in data command */ 210631bff450SSeungwon Jeon if (mrq->cmd->error && mrq->data) 21073a33a94cSSonny Rao dw_mci_reset(host); 210871abb133SSeungwon Jeon 2109f95f3850SWill Newton host->cmd = NULL; 211071abb133SSeungwon Jeon host->data = NULL; 211190c2143aSSeungwon Jeon 2112e13c3c08SJaehoon Chung if (!mrq->sbc && mrq->stop) 2113e352c813SSeungwon Jeon dw_mci_command_complete(host, mrq->stop); 211490c2143aSSeungwon Jeon else 211590c2143aSSeungwon Jeon host->cmd_status = 0; 211690c2143aSSeungwon Jeon 2117e352c813SSeungwon Jeon dw_mci_request_end(host, mrq); 2118f95f3850SWill Newton goto unlock; 2119f95f3850SWill Newton 2120f95f3850SWill Newton case STATE_DATA_ERROR: 2121f95f3850SWill Newton if (!test_and_clear_bit(EVENT_XFER_COMPLETE, 2122f95f3850SWill Newton &host->pending_events)) 2123f95f3850SWill Newton break; 2124f95f3850SWill Newton 2125f95f3850SWill Newton state = STATE_DATA_BUSY; 2126f95f3850SWill Newton break; 2127f95f3850SWill Newton } 2128f95f3850SWill Newton } while (state != prev_state); 2129f95f3850SWill Newton 2130f95f3850SWill Newton host->state = state; 2131f95f3850SWill Newton unlock: 2132f95f3850SWill Newton spin_unlock(&host->lock); 2133f95f3850SWill Newton 2134f95f3850SWill Newton } 2135f95f3850SWill Newton 213634b664a2SJames Hogan /* push final bytes to part_buf, only use during push */ 213734b664a2SJames Hogan static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) 213834b664a2SJames Hogan { 213934b664a2SJames Hogan memcpy((void *)&host->part_buf, buf, cnt); 214034b664a2SJames Hogan host->part_buf_count = cnt; 214134b664a2SJames Hogan } 214234b664a2SJames Hogan 214334b664a2SJames Hogan /* append bytes to part_buf, only use during push */ 214434b664a2SJames Hogan static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) 214534b664a2SJames Hogan { 214634b664a2SJames Hogan cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); 214734b664a2SJames Hogan memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); 214834b664a2SJames Hogan host->part_buf_count += cnt; 214934b664a2SJames Hogan return cnt; 215034b664a2SJames Hogan } 215134b664a2SJames Hogan 215234b664a2SJames Hogan /* pull first bytes from part_buf, only use during pull */ 215334b664a2SJames Hogan static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) 215434b664a2SJames Hogan { 21550e3a22c0SShawn Lin cnt = min_t(int, cnt, host->part_buf_count); 215634b664a2SJames Hogan if (cnt) { 215734b664a2SJames Hogan memcpy(buf, (void *)&host->part_buf + host->part_buf_start, 215834b664a2SJames Hogan cnt); 215934b664a2SJames Hogan host->part_buf_count -= cnt; 216034b664a2SJames Hogan host->part_buf_start += cnt; 216134b664a2SJames Hogan } 216234b664a2SJames Hogan return cnt; 216334b664a2SJames Hogan } 216434b664a2SJames Hogan 216534b664a2SJames Hogan /* pull final bytes from the part_buf, assuming it's just been filled */ 216634b664a2SJames Hogan static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) 216734b664a2SJames Hogan { 216834b664a2SJames Hogan memcpy(buf, &host->part_buf, cnt); 216934b664a2SJames Hogan host->part_buf_start = cnt; 217034b664a2SJames Hogan host->part_buf_count = (1 << host->data_shift) - cnt; 217134b664a2SJames Hogan } 217234b664a2SJames Hogan 2173f95f3850SWill Newton static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) 2174f95f3850SWill Newton { 2175cfbeb59cSMarkos Chandras struct mmc_data *data = host->data; 2176cfbeb59cSMarkos Chandras int init_cnt = cnt; 2177cfbeb59cSMarkos Chandras 217834b664a2SJames Hogan /* try and push anything in the part_buf */ 217934b664a2SJames Hogan if (unlikely(host->part_buf_count)) { 218034b664a2SJames Hogan int len = dw_mci_push_part_bytes(host, buf, cnt); 21810e3a22c0SShawn Lin 218234b664a2SJames Hogan buf += len; 218334b664a2SJames Hogan cnt -= len; 2184cfbeb59cSMarkos Chandras if (host->part_buf_count == 2) { 218576184ac1SBen Dooks mci_fifo_writew(host->fifo_reg, host->part_buf16); 218634b664a2SJames Hogan host->part_buf_count = 0; 218734b664a2SJames Hogan } 218834b664a2SJames Hogan } 218934b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 219034b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x1)) { 219134b664a2SJames Hogan while (cnt >= 2) { 219234b664a2SJames Hogan u16 aligned_buf[64]; 219334b664a2SJames Hogan int len = min(cnt & -2, (int)sizeof(aligned_buf)); 219434b664a2SJames Hogan int items = len >> 1; 219534b664a2SJames Hogan int i; 219634b664a2SJames Hogan /* memcpy from input buffer into aligned buffer */ 219734b664a2SJames Hogan memcpy(aligned_buf, buf, len); 219834b664a2SJames Hogan buf += len; 219934b664a2SJames Hogan cnt -= len; 220034b664a2SJames Hogan /* push data from aligned buffer into fifo */ 220134b664a2SJames Hogan for (i = 0; i < items; ++i) 220276184ac1SBen Dooks mci_fifo_writew(host->fifo_reg, aligned_buf[i]); 220334b664a2SJames Hogan } 220434b664a2SJames Hogan } else 220534b664a2SJames Hogan #endif 220634b664a2SJames Hogan { 220734b664a2SJames Hogan u16 *pdata = buf; 22080e3a22c0SShawn Lin 220934b664a2SJames Hogan for (; cnt >= 2; cnt -= 2) 221076184ac1SBen Dooks mci_fifo_writew(host->fifo_reg, *pdata++); 221134b664a2SJames Hogan buf = pdata; 221234b664a2SJames Hogan } 221334b664a2SJames Hogan /* put anything remaining in the part_buf */ 221434b664a2SJames Hogan if (cnt) { 221534b664a2SJames Hogan dw_mci_set_part_bytes(host, buf, cnt); 2216cfbeb59cSMarkos Chandras /* Push data if we have reached the expected data length */ 2217cfbeb59cSMarkos Chandras if ((data->bytes_xfered + init_cnt) == 2218cfbeb59cSMarkos Chandras (data->blksz * data->blocks)) 221976184ac1SBen Dooks mci_fifo_writew(host->fifo_reg, host->part_buf16); 2220f95f3850SWill Newton } 2221f95f3850SWill Newton } 2222f95f3850SWill Newton 2223f95f3850SWill Newton static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) 2224f95f3850SWill Newton { 222534b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 222634b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x1)) { 222734b664a2SJames Hogan while (cnt >= 2) { 222834b664a2SJames Hogan /* pull data from fifo into aligned buffer */ 222934b664a2SJames Hogan u16 aligned_buf[64]; 223034b664a2SJames Hogan int len = min(cnt & -2, (int)sizeof(aligned_buf)); 223134b664a2SJames Hogan int items = len >> 1; 223234b664a2SJames Hogan int i; 22330e3a22c0SShawn Lin 223434b664a2SJames Hogan for (i = 0; i < items; ++i) 223576184ac1SBen Dooks aligned_buf[i] = mci_fifo_readw(host->fifo_reg); 223634b664a2SJames Hogan /* memcpy from aligned buffer into output buffer */ 223734b664a2SJames Hogan memcpy(buf, aligned_buf, len); 223834b664a2SJames Hogan buf += len; 223934b664a2SJames Hogan cnt -= len; 224034b664a2SJames Hogan } 224134b664a2SJames Hogan } else 224234b664a2SJames Hogan #endif 224334b664a2SJames Hogan { 224434b664a2SJames Hogan u16 *pdata = buf; 22450e3a22c0SShawn Lin 224634b664a2SJames Hogan for (; cnt >= 2; cnt -= 2) 224776184ac1SBen Dooks *pdata++ = mci_fifo_readw(host->fifo_reg); 224834b664a2SJames Hogan buf = pdata; 224934b664a2SJames Hogan } 225034b664a2SJames Hogan if (cnt) { 225176184ac1SBen Dooks host->part_buf16 = mci_fifo_readw(host->fifo_reg); 225234b664a2SJames Hogan dw_mci_pull_final_bytes(host, buf, cnt); 2253f95f3850SWill Newton } 2254f95f3850SWill Newton } 2255f95f3850SWill Newton 2256f95f3850SWill Newton static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) 2257f95f3850SWill Newton { 2258cfbeb59cSMarkos Chandras struct mmc_data *data = host->data; 2259cfbeb59cSMarkos Chandras int init_cnt = cnt; 2260cfbeb59cSMarkos Chandras 226134b664a2SJames Hogan /* try and push anything in the part_buf */ 226234b664a2SJames Hogan if (unlikely(host->part_buf_count)) { 226334b664a2SJames Hogan int len = dw_mci_push_part_bytes(host, buf, cnt); 22640e3a22c0SShawn Lin 226534b664a2SJames Hogan buf += len; 226634b664a2SJames Hogan cnt -= len; 2267cfbeb59cSMarkos Chandras if (host->part_buf_count == 4) { 226876184ac1SBen Dooks mci_fifo_writel(host->fifo_reg, host->part_buf32); 226934b664a2SJames Hogan host->part_buf_count = 0; 227034b664a2SJames Hogan } 227134b664a2SJames Hogan } 227234b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 227334b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x3)) { 227434b664a2SJames Hogan while (cnt >= 4) { 227534b664a2SJames Hogan u32 aligned_buf[32]; 227634b664a2SJames Hogan int len = min(cnt & -4, (int)sizeof(aligned_buf)); 227734b664a2SJames Hogan int items = len >> 2; 227834b664a2SJames Hogan int i; 227934b664a2SJames Hogan /* memcpy from input buffer into aligned buffer */ 228034b664a2SJames Hogan memcpy(aligned_buf, buf, len); 228134b664a2SJames Hogan buf += len; 228234b664a2SJames Hogan cnt -= len; 228334b664a2SJames Hogan /* push data from aligned buffer into fifo */ 228434b664a2SJames Hogan for (i = 0; i < items; ++i) 228576184ac1SBen Dooks mci_fifo_writel(host->fifo_reg, aligned_buf[i]); 228634b664a2SJames Hogan } 228734b664a2SJames Hogan } else 228834b664a2SJames Hogan #endif 228934b664a2SJames Hogan { 229034b664a2SJames Hogan u32 *pdata = buf; 22910e3a22c0SShawn Lin 229234b664a2SJames Hogan for (; cnt >= 4; cnt -= 4) 229376184ac1SBen Dooks mci_fifo_writel(host->fifo_reg, *pdata++); 229434b664a2SJames Hogan buf = pdata; 229534b664a2SJames Hogan } 229634b664a2SJames Hogan /* put anything remaining in the part_buf */ 229734b664a2SJames Hogan if (cnt) { 229834b664a2SJames Hogan dw_mci_set_part_bytes(host, buf, cnt); 2299cfbeb59cSMarkos Chandras /* Push data if we have reached the expected data length */ 2300cfbeb59cSMarkos Chandras if ((data->bytes_xfered + init_cnt) == 2301cfbeb59cSMarkos Chandras (data->blksz * data->blocks)) 230276184ac1SBen Dooks mci_fifo_writel(host->fifo_reg, host->part_buf32); 2303f95f3850SWill Newton } 2304f95f3850SWill Newton } 2305f95f3850SWill Newton 2306f95f3850SWill Newton static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) 2307f95f3850SWill Newton { 230834b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 230934b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x3)) { 231034b664a2SJames Hogan while (cnt >= 4) { 231134b664a2SJames Hogan /* pull data from fifo into aligned buffer */ 231234b664a2SJames Hogan u32 aligned_buf[32]; 231334b664a2SJames Hogan int len = min(cnt & -4, (int)sizeof(aligned_buf)); 231434b664a2SJames Hogan int items = len >> 2; 231534b664a2SJames Hogan int i; 23160e3a22c0SShawn Lin 231734b664a2SJames Hogan for (i = 0; i < items; ++i) 231876184ac1SBen Dooks aligned_buf[i] = mci_fifo_readl(host->fifo_reg); 231934b664a2SJames Hogan /* memcpy from aligned buffer into output buffer */ 232034b664a2SJames Hogan memcpy(buf, aligned_buf, len); 232134b664a2SJames Hogan buf += len; 232234b664a2SJames Hogan cnt -= len; 232334b664a2SJames Hogan } 232434b664a2SJames Hogan } else 232534b664a2SJames Hogan #endif 232634b664a2SJames Hogan { 232734b664a2SJames Hogan u32 *pdata = buf; 23280e3a22c0SShawn Lin 232934b664a2SJames Hogan for (; cnt >= 4; cnt -= 4) 233076184ac1SBen Dooks *pdata++ = mci_fifo_readl(host->fifo_reg); 233134b664a2SJames Hogan buf = pdata; 233234b664a2SJames Hogan } 233334b664a2SJames Hogan if (cnt) { 233476184ac1SBen Dooks host->part_buf32 = mci_fifo_readl(host->fifo_reg); 233534b664a2SJames Hogan dw_mci_pull_final_bytes(host, buf, cnt); 2336f95f3850SWill Newton } 2337f95f3850SWill Newton } 2338f95f3850SWill Newton 2339f95f3850SWill Newton static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) 2340f95f3850SWill Newton { 2341cfbeb59cSMarkos Chandras struct mmc_data *data = host->data; 2342cfbeb59cSMarkos Chandras int init_cnt = cnt; 2343cfbeb59cSMarkos Chandras 234434b664a2SJames Hogan /* try and push anything in the part_buf */ 234534b664a2SJames Hogan if (unlikely(host->part_buf_count)) { 234634b664a2SJames Hogan int len = dw_mci_push_part_bytes(host, buf, cnt); 23470e3a22c0SShawn Lin 234834b664a2SJames Hogan buf += len; 234934b664a2SJames Hogan cnt -= len; 2350c09fbd74SSeungwon Jeon 2351cfbeb59cSMarkos Chandras if (host->part_buf_count == 8) { 235276184ac1SBen Dooks mci_fifo_writeq(host->fifo_reg, host->part_buf); 235334b664a2SJames Hogan host->part_buf_count = 0; 235434b664a2SJames Hogan } 235534b664a2SJames Hogan } 235634b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 235734b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x7)) { 235834b664a2SJames Hogan while (cnt >= 8) { 235934b664a2SJames Hogan u64 aligned_buf[16]; 236034b664a2SJames Hogan int len = min(cnt & -8, (int)sizeof(aligned_buf)); 236134b664a2SJames Hogan int items = len >> 3; 236234b664a2SJames Hogan int i; 236334b664a2SJames Hogan /* memcpy from input buffer into aligned buffer */ 236434b664a2SJames Hogan memcpy(aligned_buf, buf, len); 236534b664a2SJames Hogan buf += len; 236634b664a2SJames Hogan cnt -= len; 236734b664a2SJames Hogan /* push data from aligned buffer into fifo */ 236834b664a2SJames Hogan for (i = 0; i < items; ++i) 236976184ac1SBen Dooks mci_fifo_writeq(host->fifo_reg, aligned_buf[i]); 237034b664a2SJames Hogan } 237134b664a2SJames Hogan } else 237234b664a2SJames Hogan #endif 237334b664a2SJames Hogan { 237434b664a2SJames Hogan u64 *pdata = buf; 23750e3a22c0SShawn Lin 237634b664a2SJames Hogan for (; cnt >= 8; cnt -= 8) 237776184ac1SBen Dooks mci_fifo_writeq(host->fifo_reg, *pdata++); 237834b664a2SJames Hogan buf = pdata; 237934b664a2SJames Hogan } 238034b664a2SJames Hogan /* put anything remaining in the part_buf */ 238134b664a2SJames Hogan if (cnt) { 238234b664a2SJames Hogan dw_mci_set_part_bytes(host, buf, cnt); 2383cfbeb59cSMarkos Chandras /* Push data if we have reached the expected data length */ 2384cfbeb59cSMarkos Chandras if ((data->bytes_xfered + init_cnt) == 2385cfbeb59cSMarkos Chandras (data->blksz * data->blocks)) 238676184ac1SBen Dooks mci_fifo_writeq(host->fifo_reg, host->part_buf); 2387f95f3850SWill Newton } 2388f95f3850SWill Newton } 2389f95f3850SWill Newton 2390f95f3850SWill Newton static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) 2391f95f3850SWill Newton { 239234b664a2SJames Hogan #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 239334b664a2SJames Hogan if (unlikely((unsigned long)buf & 0x7)) { 239434b664a2SJames Hogan while (cnt >= 8) { 239534b664a2SJames Hogan /* pull data from fifo into aligned buffer */ 239634b664a2SJames Hogan u64 aligned_buf[16]; 239734b664a2SJames Hogan int len = min(cnt & -8, (int)sizeof(aligned_buf)); 239834b664a2SJames Hogan int items = len >> 3; 239934b664a2SJames Hogan int i; 24000e3a22c0SShawn Lin 240134b664a2SJames Hogan for (i = 0; i < items; ++i) 240276184ac1SBen Dooks aligned_buf[i] = mci_fifo_readq(host->fifo_reg); 240376184ac1SBen Dooks 240434b664a2SJames Hogan /* memcpy from aligned buffer into output buffer */ 240534b664a2SJames Hogan memcpy(buf, aligned_buf, len); 240634b664a2SJames Hogan buf += len; 240734b664a2SJames Hogan cnt -= len; 2408f95f3850SWill Newton } 240934b664a2SJames Hogan } else 241034b664a2SJames Hogan #endif 241134b664a2SJames Hogan { 241234b664a2SJames Hogan u64 *pdata = buf; 24130e3a22c0SShawn Lin 241434b664a2SJames Hogan for (; cnt >= 8; cnt -= 8) 241576184ac1SBen Dooks *pdata++ = mci_fifo_readq(host->fifo_reg); 241634b664a2SJames Hogan buf = pdata; 241734b664a2SJames Hogan } 241834b664a2SJames Hogan if (cnt) { 241976184ac1SBen Dooks host->part_buf = mci_fifo_readq(host->fifo_reg); 242034b664a2SJames Hogan dw_mci_pull_final_bytes(host, buf, cnt); 242134b664a2SJames Hogan } 242234b664a2SJames Hogan } 242334b664a2SJames Hogan 242434b664a2SJames Hogan static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) 242534b664a2SJames Hogan { 242634b664a2SJames Hogan int len; 242734b664a2SJames Hogan 242834b664a2SJames Hogan /* get remaining partial bytes */ 242934b664a2SJames Hogan len = dw_mci_pull_part_bytes(host, buf, cnt); 243034b664a2SJames Hogan if (unlikely(len == cnt)) 243134b664a2SJames Hogan return; 243234b664a2SJames Hogan buf += len; 243334b664a2SJames Hogan cnt -= len; 243434b664a2SJames Hogan 243534b664a2SJames Hogan /* get the rest of the data */ 243634b664a2SJames Hogan host->pull_data(host, buf, cnt); 2437f95f3850SWill Newton } 2438f95f3850SWill Newton 243987a74d39SKyoungil Kim static void dw_mci_read_data_pio(struct dw_mci *host, bool dto) 2440f95f3850SWill Newton { 2441f9c2a0dcSSeungwon Jeon struct sg_mapping_iter *sg_miter = &host->sg_miter; 2442f9c2a0dcSSeungwon Jeon void *buf; 2443f9c2a0dcSSeungwon Jeon unsigned int offset; 2444f95f3850SWill Newton struct mmc_data *data = host->data; 2445f95f3850SWill Newton int shift = host->data_shift; 2446f95f3850SWill Newton u32 status; 24473e4b0d8bSMarkos Chandras unsigned int len; 2448f9c2a0dcSSeungwon Jeon unsigned int remain, fcnt; 2449f95f3850SWill Newton 2450f95f3850SWill Newton do { 2451f9c2a0dcSSeungwon Jeon if (!sg_miter_next(sg_miter)) 2452f9c2a0dcSSeungwon Jeon goto done; 2453f95f3850SWill Newton 24544225fc85SImre Deak host->sg = sg_miter->piter.sg; 2455f9c2a0dcSSeungwon Jeon buf = sg_miter->addr; 2456f9c2a0dcSSeungwon Jeon remain = sg_miter->length; 2457f9c2a0dcSSeungwon Jeon offset = 0; 2458f9c2a0dcSSeungwon Jeon 2459f9c2a0dcSSeungwon Jeon do { 2460f9c2a0dcSSeungwon Jeon fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) 2461f9c2a0dcSSeungwon Jeon << shift) + host->part_buf_count; 2462f9c2a0dcSSeungwon Jeon len = min(remain, fcnt); 2463f9c2a0dcSSeungwon Jeon if (!len) 2464f9c2a0dcSSeungwon Jeon break; 2465f9c2a0dcSSeungwon Jeon dw_mci_pull_data(host, (void *)(buf + offset), len); 24663e4b0d8bSMarkos Chandras data->bytes_xfered += len; 2467f95f3850SWill Newton offset += len; 2468f9c2a0dcSSeungwon Jeon remain -= len; 2469f9c2a0dcSSeungwon Jeon } while (remain); 2470f95f3850SWill Newton 2471e74f3a9cSSeungwon Jeon sg_miter->consumed = offset; 2472f95f3850SWill Newton status = mci_readl(host, MINTSTS); 2473f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 247487a74d39SKyoungil Kim /* if the RXDR is ready read again */ 247587a74d39SKyoungil Kim } while ((status & SDMMC_INT_RXDR) || 247687a74d39SKyoungil Kim (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS)))); 2477f9c2a0dcSSeungwon Jeon 2478f9c2a0dcSSeungwon Jeon if (!remain) { 2479f9c2a0dcSSeungwon Jeon if (!sg_miter_next(sg_miter)) 2480f9c2a0dcSSeungwon Jeon goto done; 2481f9c2a0dcSSeungwon Jeon sg_miter->consumed = 0; 2482f9c2a0dcSSeungwon Jeon } 2483f9c2a0dcSSeungwon Jeon sg_miter_stop(sg_miter); 2484f95f3850SWill Newton return; 2485f95f3850SWill Newton 2486f95f3850SWill Newton done: 2487f9c2a0dcSSeungwon Jeon sg_miter_stop(sg_miter); 2488f9c2a0dcSSeungwon Jeon host->sg = NULL; 24890e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2490f95f3850SWill Newton set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2491f95f3850SWill Newton } 2492f95f3850SWill Newton 2493f95f3850SWill Newton static void dw_mci_write_data_pio(struct dw_mci *host) 2494f95f3850SWill Newton { 2495f9c2a0dcSSeungwon Jeon struct sg_mapping_iter *sg_miter = &host->sg_miter; 2496f9c2a0dcSSeungwon Jeon void *buf; 2497f9c2a0dcSSeungwon Jeon unsigned int offset; 2498f95f3850SWill Newton struct mmc_data *data = host->data; 2499f95f3850SWill Newton int shift = host->data_shift; 2500f95f3850SWill Newton u32 status; 25013e4b0d8bSMarkos Chandras unsigned int len; 2502f9c2a0dcSSeungwon Jeon unsigned int fifo_depth = host->fifo_depth; 2503f9c2a0dcSSeungwon Jeon unsigned int remain, fcnt; 2504f95f3850SWill Newton 2505f95f3850SWill Newton do { 2506f9c2a0dcSSeungwon Jeon if (!sg_miter_next(sg_miter)) 2507f9c2a0dcSSeungwon Jeon goto done; 2508f95f3850SWill Newton 25094225fc85SImre Deak host->sg = sg_miter->piter.sg; 2510f9c2a0dcSSeungwon Jeon buf = sg_miter->addr; 2511f9c2a0dcSSeungwon Jeon remain = sg_miter->length; 2512f9c2a0dcSSeungwon Jeon offset = 0; 2513f9c2a0dcSSeungwon Jeon 2514f9c2a0dcSSeungwon Jeon do { 2515f9c2a0dcSSeungwon Jeon fcnt = ((fifo_depth - 2516f9c2a0dcSSeungwon Jeon SDMMC_GET_FCNT(mci_readl(host, STATUS))) 2517f9c2a0dcSSeungwon Jeon << shift) - host->part_buf_count; 2518f9c2a0dcSSeungwon Jeon len = min(remain, fcnt); 2519f9c2a0dcSSeungwon Jeon if (!len) 2520f9c2a0dcSSeungwon Jeon break; 2521f9c2a0dcSSeungwon Jeon host->push_data(host, (void *)(buf + offset), len); 25223e4b0d8bSMarkos Chandras data->bytes_xfered += len; 2523f95f3850SWill Newton offset += len; 2524f9c2a0dcSSeungwon Jeon remain -= len; 2525f9c2a0dcSSeungwon Jeon } while (remain); 2526f95f3850SWill Newton 2527e74f3a9cSSeungwon Jeon sg_miter->consumed = offset; 2528f95f3850SWill Newton status = mci_readl(host, MINTSTS); 2529f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2530f95f3850SWill Newton } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ 2531f9c2a0dcSSeungwon Jeon 2532f9c2a0dcSSeungwon Jeon if (!remain) { 2533f9c2a0dcSSeungwon Jeon if (!sg_miter_next(sg_miter)) 2534f9c2a0dcSSeungwon Jeon goto done; 2535f9c2a0dcSSeungwon Jeon sg_miter->consumed = 0; 2536f9c2a0dcSSeungwon Jeon } 2537f9c2a0dcSSeungwon Jeon sg_miter_stop(sg_miter); 2538f95f3850SWill Newton return; 2539f95f3850SWill Newton 2540f95f3850SWill Newton done: 2541f9c2a0dcSSeungwon Jeon sg_miter_stop(sg_miter); 2542f9c2a0dcSSeungwon Jeon host->sg = NULL; 25430e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2544f95f3850SWill Newton set_bit(EVENT_XFER_COMPLETE, &host->pending_events); 2545f95f3850SWill Newton } 2546f95f3850SWill Newton 2547f95f3850SWill Newton static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) 2548f95f3850SWill Newton { 2549f95f3850SWill Newton if (!host->cmd_status) 2550f95f3850SWill Newton host->cmd_status = status; 2551f95f3850SWill Newton 25520e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2553f95f3850SWill Newton 2554f95f3850SWill Newton set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2555f95f3850SWill Newton tasklet_schedule(&host->tasklet); 2556f95f3850SWill Newton } 2557f95f3850SWill Newton 25586130e7a9SDoug Anderson static void dw_mci_handle_cd(struct dw_mci *host) 25596130e7a9SDoug Anderson { 25606130e7a9SDoug Anderson int i; 25616130e7a9SDoug Anderson 25626130e7a9SDoug Anderson for (i = 0; i < host->num_slots; i++) { 25636130e7a9SDoug Anderson struct dw_mci_slot *slot = host->slot[i]; 25646130e7a9SDoug Anderson 25656130e7a9SDoug Anderson if (!slot) 25666130e7a9SDoug Anderson continue; 25676130e7a9SDoug Anderson 25686130e7a9SDoug Anderson if (slot->mmc->ops->card_event) 25696130e7a9SDoug Anderson slot->mmc->ops->card_event(slot->mmc); 25706130e7a9SDoug Anderson mmc_detect_change(slot->mmc, 25716130e7a9SDoug Anderson msecs_to_jiffies(host->pdata->detect_delay_ms)); 25726130e7a9SDoug Anderson } 25736130e7a9SDoug Anderson } 25746130e7a9SDoug Anderson 2575f95f3850SWill Newton static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) 2576f95f3850SWill Newton { 2577f95f3850SWill Newton struct dw_mci *host = dev_id; 2578182c9081SSeungwon Jeon u32 pending; 25791a5c8e1fSShashidhar Hiremath int i; 2580f95f3850SWill Newton 2581f95f3850SWill Newton pending = mci_readl(host, MINTSTS); /* read-only mask reg */ 2582f95f3850SWill Newton 2583476d79f1SDoug Anderson if (pending) { 258401730558SDoug Anderson /* Check volt switch first, since it can look like an error */ 258501730558SDoug Anderson if ((host->state == STATE_SENDING_CMD11) && 258601730558SDoug Anderson (pending & SDMMC_INT_VOLT_SWITCH)) { 258749ba0302SDoug Anderson unsigned long irqflags; 25885c935165SDoug Anderson 258901730558SDoug Anderson mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH); 259001730558SDoug Anderson pending &= ~SDMMC_INT_VOLT_SWITCH; 259149ba0302SDoug Anderson 259249ba0302SDoug Anderson /* 259349ba0302SDoug Anderson * Hold the lock; we know cmd11_timer can't be kicked 259449ba0302SDoug Anderson * off after the lock is released, so safe to delete. 259549ba0302SDoug Anderson */ 259649ba0302SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 259701730558SDoug Anderson dw_mci_cmd_interrupt(host, pending); 259849ba0302SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 259949ba0302SDoug Anderson 260049ba0302SDoug Anderson del_timer(&host->cmd11_timer); 260101730558SDoug Anderson } 260201730558SDoug Anderson 2603f95f3850SWill Newton if (pending & DW_MCI_CMD_ERROR_FLAGS) { 2604f95f3850SWill Newton mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); 2605182c9081SSeungwon Jeon host->cmd_status = pending; 26060e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2607f95f3850SWill Newton set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 2608f95f3850SWill Newton } 2609f95f3850SWill Newton 2610f95f3850SWill Newton if (pending & DW_MCI_DATA_ERROR_FLAGS) { 2611f95f3850SWill Newton /* if there is an error report DATA_ERROR */ 2612f95f3850SWill Newton mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); 2613182c9081SSeungwon Jeon host->data_status = pending; 26140e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2615f95f3850SWill Newton set_bit(EVENT_DATA_ERROR, &host->pending_events); 2616f95f3850SWill Newton tasklet_schedule(&host->tasklet); 2617f95f3850SWill Newton } 2618f95f3850SWill Newton 2619f95f3850SWill Newton if (pending & SDMMC_INT_DATA_OVER) { 262057e10486SAddy Ke del_timer(&host->dto_timer); 262157e10486SAddy Ke 2622f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); 2623f95f3850SWill Newton if (!host->data_status) 2624182c9081SSeungwon Jeon host->data_status = pending; 26250e3a22c0SShawn Lin smp_wmb(); /* drain writebuffer */ 2626f95f3850SWill Newton if (host->dir_status == DW_MCI_RECV_STATUS) { 2627f95f3850SWill Newton if (host->sg != NULL) 262887a74d39SKyoungil Kim dw_mci_read_data_pio(host, true); 2629f95f3850SWill Newton } 2630f95f3850SWill Newton set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 2631f95f3850SWill Newton tasklet_schedule(&host->tasklet); 2632f95f3850SWill Newton } 2633f95f3850SWill Newton 2634f95f3850SWill Newton if (pending & SDMMC_INT_RXDR) { 2635f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_RXDR); 2636b40af3aaSJames Hogan if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) 263787a74d39SKyoungil Kim dw_mci_read_data_pio(host, false); 2638f95f3850SWill Newton } 2639f95f3850SWill Newton 2640f95f3850SWill Newton if (pending & SDMMC_INT_TXDR) { 2641f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_TXDR); 2642b40af3aaSJames Hogan if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) 2643f95f3850SWill Newton dw_mci_write_data_pio(host); 2644f95f3850SWill Newton } 2645f95f3850SWill Newton 2646f95f3850SWill Newton if (pending & SDMMC_INT_CMD_DONE) { 2647f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); 2648182c9081SSeungwon Jeon dw_mci_cmd_interrupt(host, pending); 2649f95f3850SWill Newton } 2650f95f3850SWill Newton 2651f95f3850SWill Newton if (pending & SDMMC_INT_CD) { 2652f95f3850SWill Newton mci_writel(host, RINTSTS, SDMMC_INT_CD); 26536130e7a9SDoug Anderson dw_mci_handle_cd(host); 2654f95f3850SWill Newton } 2655f95f3850SWill Newton 26561a5c8e1fSShashidhar Hiremath /* Handle SDIO Interrupts */ 26571a5c8e1fSShashidhar Hiremath for (i = 0; i < host->num_slots; i++) { 26581a5c8e1fSShashidhar Hiremath struct dw_mci_slot *slot = host->slot[i]; 2659ed2540efSDoug Anderson 2660ed2540efSDoug Anderson if (!slot) 2661ed2540efSDoug Anderson continue; 2662ed2540efSDoug Anderson 266376756234SAddy Ke if (pending & SDMMC_INT_SDIO(slot->sdio_id)) { 266476756234SAddy Ke mci_writel(host, RINTSTS, 266576756234SAddy Ke SDMMC_INT_SDIO(slot->sdio_id)); 26661a5c8e1fSShashidhar Hiremath mmc_signal_sdio_irq(slot->mmc); 26671a5c8e1fSShashidhar Hiremath } 26681a5c8e1fSShashidhar Hiremath } 26691a5c8e1fSShashidhar Hiremath 26701fb5f68aSMarkos Chandras } 2671f95f3850SWill Newton 26723fc7eaefSShawn Lin if (host->use_dma != TRANS_MODE_IDMAC) 26733fc7eaefSShawn Lin return IRQ_HANDLED; 26743fc7eaefSShawn Lin 26753fc7eaefSShawn Lin /* Handle IDMA interrupts */ 267669d99fdcSPrabu Thangamuthu if (host->dma_64bit_address == 1) { 267769d99fdcSPrabu Thangamuthu pending = mci_readl(host, IDSTS64); 267869d99fdcSPrabu Thangamuthu if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 267969d99fdcSPrabu Thangamuthu mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI | 268069d99fdcSPrabu Thangamuthu SDMMC_IDMAC_INT_RI); 268169d99fdcSPrabu Thangamuthu mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI); 2682faecf411SShawn Lin if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) 26833fc7eaefSShawn Lin host->dma_ops->complete((void *)host); 268469d99fdcSPrabu Thangamuthu } 268569d99fdcSPrabu Thangamuthu } else { 2686f95f3850SWill Newton pending = mci_readl(host, IDSTS); 2687f95f3850SWill Newton if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 268869d99fdcSPrabu Thangamuthu mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | 268969d99fdcSPrabu Thangamuthu SDMMC_IDMAC_INT_RI); 2690f95f3850SWill Newton mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 2691faecf411SShawn Lin if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) 26923fc7eaefSShawn Lin host->dma_ops->complete((void *)host); 2693f95f3850SWill Newton } 269469d99fdcSPrabu Thangamuthu } 2695f95f3850SWill Newton 2696f95f3850SWill Newton return IRQ_HANDLED; 2697f95f3850SWill Newton } 2698f95f3850SWill Newton 269936c179a9SJaehoon Chung static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) 2700f95f3850SWill Newton { 2701f95f3850SWill Newton struct mmc_host *mmc; 2702f95f3850SWill Newton struct dw_mci_slot *slot; 2703e95baf13SArnd Bergmann const struct dw_mci_drv_data *drv_data = host->drv_data; 2704800d78bfSThomas Abraham int ctrl_id, ret; 27051f44a2a5SSeungwon Jeon u32 freq[2]; 2706f95f3850SWill Newton 27074a90920cSThomas Abraham mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); 2708f95f3850SWill Newton if (!mmc) 2709f95f3850SWill Newton return -ENOMEM; 2710f95f3850SWill Newton 2711f95f3850SWill Newton slot = mmc_priv(mmc); 2712f95f3850SWill Newton slot->id = id; 271376756234SAddy Ke slot->sdio_id = host->sdio_id0 + id; 2714f95f3850SWill Newton slot->mmc = mmc; 2715f95f3850SWill Newton slot->host = host; 2716c91eab4bSThomas Abraham host->slot[id] = slot; 2717f95f3850SWill Newton 2718f95f3850SWill Newton mmc->ops = &dw_mci_ops; 27191f44a2a5SSeungwon Jeon if (of_property_read_u32_array(host->dev->of_node, 27201f44a2a5SSeungwon Jeon "clock-freq-min-max", freq, 2)) { 27211f44a2a5SSeungwon Jeon mmc->f_min = DW_MCI_FREQ_MIN; 27221f44a2a5SSeungwon Jeon mmc->f_max = DW_MCI_FREQ_MAX; 27231f44a2a5SSeungwon Jeon } else { 2724b023030fSJaehoon Chung dev_info(host->dev, 2725b023030fSJaehoon Chung "'clock-freq-min-max' property was deprecated.\n"); 27261f44a2a5SSeungwon Jeon mmc->f_min = freq[0]; 27271f44a2a5SSeungwon Jeon mmc->f_max = freq[1]; 27281f44a2a5SSeungwon Jeon } 2729f95f3850SWill Newton 273051da2240SYuvaraj CD /*if there are external regulators, get them*/ 273151da2240SYuvaraj CD ret = mmc_regulator_get_supply(mmc); 273251da2240SYuvaraj CD if (ret == -EPROBE_DEFER) 27333cf890fcSDoug Anderson goto err_host_allocated; 273451da2240SYuvaraj CD 273551da2240SYuvaraj CD if (!mmc->ocr_avail) 2736f95f3850SWill Newton mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 2737f95f3850SWill Newton 2738fc3d7720SJaehoon Chung if (host->pdata->caps) 2739fc3d7720SJaehoon Chung mmc->caps = host->pdata->caps; 2740fc3d7720SJaehoon Chung 27416024e166SJaehoon Chung /* 27426024e166SJaehoon Chung * Support MMC_CAP_ERASE by default. 27436024e166SJaehoon Chung * It needs to use trim/discard/erase commands. 27446024e166SJaehoon Chung */ 27456024e166SJaehoon Chung mmc->caps |= MMC_CAP_ERASE; 27466024e166SJaehoon Chung 2747ab269128SAbhilash Kesavan if (host->pdata->pm_caps) 2748ab269128SAbhilash Kesavan mmc->pm_caps = host->pdata->pm_caps; 2749ab269128SAbhilash Kesavan 2750800d78bfSThomas Abraham if (host->dev->of_node) { 2751800d78bfSThomas Abraham ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); 2752800d78bfSThomas Abraham if (ctrl_id < 0) 2753800d78bfSThomas Abraham ctrl_id = 0; 2754800d78bfSThomas Abraham } else { 2755800d78bfSThomas Abraham ctrl_id = to_platform_device(host->dev)->id; 2756800d78bfSThomas Abraham } 2757cb27a843SJames Hogan if (drv_data && drv_data->caps) 2758cb27a843SJames Hogan mmc->caps |= drv_data->caps[ctrl_id]; 2759800d78bfSThomas Abraham 27604f408cc6SSeungwon Jeon if (host->pdata->caps2) 27614f408cc6SSeungwon Jeon mmc->caps2 = host->pdata->caps2; 27624f408cc6SSeungwon Jeon 27633cf890fcSDoug Anderson ret = mmc_of_parse(mmc); 27643cf890fcSDoug Anderson if (ret) 27653cf890fcSDoug Anderson goto err_host_allocated; 2766f95f3850SWill Newton 2767f95f3850SWill Newton /* Useful defaults if platform data is unset. */ 27683fc7eaefSShawn Lin if (host->use_dma == TRANS_MODE_IDMAC) { 2769a39e5746SJaehoon Chung mmc->max_segs = host->ring_size; 2770225faf87SJaehoon Chung mmc->max_blk_size = 65535; 2771575c319dSHeiko Stuebner mmc->max_seg_size = 0x1000; 27721a25b1b4SSeungwon Jeon mmc->max_req_size = mmc->max_seg_size * host->ring_size; 27731a25b1b4SSeungwon Jeon mmc->max_blk_count = mmc->max_req_size / 512; 27743fc7eaefSShawn Lin } else if (host->use_dma == TRANS_MODE_EDMAC) { 27753fc7eaefSShawn Lin mmc->max_segs = 64; 2776225faf87SJaehoon Chung mmc->max_blk_size = 65535; 27773fc7eaefSShawn Lin mmc->max_blk_count = 65535; 27783fc7eaefSShawn Lin mmc->max_req_size = 27793fc7eaefSShawn Lin mmc->max_blk_size * mmc->max_blk_count; 27803fc7eaefSShawn Lin mmc->max_seg_size = mmc->max_req_size; 2781575c319dSHeiko Stuebner } else { 27823fc7eaefSShawn Lin /* TRANS_MODE_PIO */ 2783f95f3850SWill Newton mmc->max_segs = 64; 2784225faf87SJaehoon Chung mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */ 2785f95f3850SWill Newton mmc->max_blk_count = 512; 2786575c319dSHeiko Stuebner mmc->max_req_size = mmc->max_blk_size * 2787575c319dSHeiko Stuebner mmc->max_blk_count; 2788f95f3850SWill Newton mmc->max_seg_size = mmc->max_req_size; 2789575c319dSHeiko Stuebner } 2790f95f3850SWill Newton 2791c0834a58SShawn Lin dw_mci_get_cd(mmc); 2792ae0eb348SJaehoon Chung 27930cea529dSJaehoon Chung ret = mmc_add_host(mmc); 27940cea529dSJaehoon Chung if (ret) 27953cf890fcSDoug Anderson goto err_host_allocated; 2796f95f3850SWill Newton 2797f95f3850SWill Newton #if defined(CONFIG_DEBUG_FS) 2798f95f3850SWill Newton dw_mci_init_debugfs(slot); 2799f95f3850SWill Newton #endif 2800f95f3850SWill Newton 2801f95f3850SWill Newton return 0; 2802800d78bfSThomas Abraham 28033cf890fcSDoug Anderson err_host_allocated: 2804800d78bfSThomas Abraham mmc_free_host(mmc); 280551da2240SYuvaraj CD return ret; 2806f95f3850SWill Newton } 2807f95f3850SWill Newton 2808f95f3850SWill Newton static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) 2809f95f3850SWill Newton { 2810f95f3850SWill Newton /* Debugfs stuff is cleaned up by mmc core */ 2811f95f3850SWill Newton mmc_remove_host(slot->mmc); 2812f95f3850SWill Newton slot->host->slot[id] = NULL; 2813f95f3850SWill Newton mmc_free_host(slot->mmc); 2814f95f3850SWill Newton } 2815f95f3850SWill Newton 2816f95f3850SWill Newton static void dw_mci_init_dma(struct dw_mci *host) 2817f95f3850SWill Newton { 281869d99fdcSPrabu Thangamuthu int addr_config; 28193fc7eaefSShawn Lin struct device *dev = host->dev; 28203fc7eaefSShawn Lin struct device_node *np = dev->of_node; 28213fc7eaefSShawn Lin 28223fc7eaefSShawn Lin /* 28233fc7eaefSShawn Lin * Check tansfer mode from HCON[17:16] 28243fc7eaefSShawn Lin * Clear the ambiguous description of dw_mmc databook: 28253fc7eaefSShawn Lin * 2b'00: No DMA Interface -> Actually means using Internal DMA block 28263fc7eaefSShawn Lin * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block 28273fc7eaefSShawn Lin * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block 28283fc7eaefSShawn Lin * 2b'11: Non DW DMA Interface -> pio only 28293fc7eaefSShawn Lin * Compared to DesignWare DMA Interface, Generic DMA Interface has a 28303fc7eaefSShawn Lin * simpler request/acknowledge handshake mechanism and both of them 28313fc7eaefSShawn Lin * are regarded as external dma master for dw_mmc. 28323fc7eaefSShawn Lin */ 28333fc7eaefSShawn Lin host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON)); 28343fc7eaefSShawn Lin if (host->use_dma == DMA_INTERFACE_IDMA) { 28353fc7eaefSShawn Lin host->use_dma = TRANS_MODE_IDMAC; 28363fc7eaefSShawn Lin } else if (host->use_dma == DMA_INTERFACE_DWDMA || 28373fc7eaefSShawn Lin host->use_dma == DMA_INTERFACE_GDMA) { 28383fc7eaefSShawn Lin host->use_dma = TRANS_MODE_EDMAC; 28393fc7eaefSShawn Lin } else { 28403fc7eaefSShawn Lin goto no_dma; 28413fc7eaefSShawn Lin } 28423fc7eaefSShawn Lin 28433fc7eaefSShawn Lin /* Determine which DMA interface to use */ 28443fc7eaefSShawn Lin if (host->use_dma == TRANS_MODE_IDMAC) { 28453fc7eaefSShawn Lin /* 28463fc7eaefSShawn Lin * Check ADDR_CONFIG bit in HCON to find 28473fc7eaefSShawn Lin * IDMAC address bus width 28483fc7eaefSShawn Lin */ 284970692752SShawn Lin addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON)); 285069d99fdcSPrabu Thangamuthu 285169d99fdcSPrabu Thangamuthu if (addr_config == 1) { 285269d99fdcSPrabu Thangamuthu /* host supports IDMAC in 64-bit address mode */ 285369d99fdcSPrabu Thangamuthu host->dma_64bit_address = 1; 28543fc7eaefSShawn Lin dev_info(host->dev, 28553fc7eaefSShawn Lin "IDMAC supports 64-bit address mode.\n"); 285669d99fdcSPrabu Thangamuthu if (!dma_set_mask(host->dev, DMA_BIT_MASK(64))) 28573fc7eaefSShawn Lin dma_set_coherent_mask(host->dev, 28583fc7eaefSShawn Lin DMA_BIT_MASK(64)); 285969d99fdcSPrabu Thangamuthu } else { 286069d99fdcSPrabu Thangamuthu /* host supports IDMAC in 32-bit address mode */ 286169d99fdcSPrabu Thangamuthu host->dma_64bit_address = 0; 28623fc7eaefSShawn Lin dev_info(host->dev, 28633fc7eaefSShawn Lin "IDMAC supports 32-bit address mode.\n"); 286469d99fdcSPrabu Thangamuthu } 286569d99fdcSPrabu Thangamuthu 2866f95f3850SWill Newton /* Alloc memory for sg translation */ 2867cc190d4cSShawn Lin host->sg_cpu = dmam_alloc_coherent(host->dev, 2868cc190d4cSShawn Lin DESC_RING_BUF_SZ, 2869f95f3850SWill Newton &host->sg_dma, GFP_KERNEL); 2870f95f3850SWill Newton if (!host->sg_cpu) { 28713fc7eaefSShawn Lin dev_err(host->dev, 28723fc7eaefSShawn Lin "%s: could not alloc DMA memory\n", 2873f95f3850SWill Newton __func__); 2874f95f3850SWill Newton goto no_dma; 2875f95f3850SWill Newton } 2876f95f3850SWill Newton 2877f95f3850SWill Newton host->dma_ops = &dw_mci_idmac_ops; 287800956ea3SSeungwon Jeon dev_info(host->dev, "Using internal DMA controller.\n"); 28793fc7eaefSShawn Lin } else { 28803fc7eaefSShawn Lin /* TRANS_MODE_EDMAC: check dma bindings again */ 28813fc7eaefSShawn Lin if ((of_property_count_strings(np, "dma-names") < 0) || 28823fc7eaefSShawn Lin (!of_find_property(np, "dmas", NULL))) { 2883f95f3850SWill Newton goto no_dma; 28843fc7eaefSShawn Lin } 28853fc7eaefSShawn Lin host->dma_ops = &dw_mci_edmac_ops; 28863fc7eaefSShawn Lin dev_info(host->dev, "Using external DMA controller.\n"); 28873fc7eaefSShawn Lin } 2888f95f3850SWill Newton 2889e1631f98SJaehoon Chung if (host->dma_ops->init && host->dma_ops->start && 2890e1631f98SJaehoon Chung host->dma_ops->stop && host->dma_ops->cleanup) { 2891f95f3850SWill Newton if (host->dma_ops->init(host)) { 28920e3a22c0SShawn Lin dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n", 28930e3a22c0SShawn Lin __func__); 2894f95f3850SWill Newton goto no_dma; 2895f95f3850SWill Newton } 2896f95f3850SWill Newton } else { 28974a90920cSThomas Abraham dev_err(host->dev, "DMA initialization not found.\n"); 2898f95f3850SWill Newton goto no_dma; 2899f95f3850SWill Newton } 2900f95f3850SWill Newton 2901f95f3850SWill Newton return; 2902f95f3850SWill Newton 2903f95f3850SWill Newton no_dma: 29044a90920cSThomas Abraham dev_info(host->dev, "Using PIO mode.\n"); 29053fc7eaefSShawn Lin host->use_dma = TRANS_MODE_PIO; 2906f95f3850SWill Newton } 2907f95f3850SWill Newton 29085c935165SDoug Anderson static void dw_mci_cmd11_timer(unsigned long arg) 29095c935165SDoug Anderson { 29105c935165SDoug Anderson struct dw_mci *host = (struct dw_mci *)arg; 29115c935165SDoug Anderson 2912fd674198SDoug Anderson if (host->state != STATE_SENDING_CMD11) { 2913fd674198SDoug Anderson dev_warn(host->dev, "Unexpected CMD11 timeout\n"); 2914fd674198SDoug Anderson return; 2915fd674198SDoug Anderson } 29165c935165SDoug Anderson 29175c935165SDoug Anderson host->cmd_status = SDMMC_INT_RTO; 29185c935165SDoug Anderson set_bit(EVENT_CMD_COMPLETE, &host->pending_events); 29195c935165SDoug Anderson tasklet_schedule(&host->tasklet); 29205c935165SDoug Anderson } 29215c935165SDoug Anderson 292257e10486SAddy Ke static void dw_mci_dto_timer(unsigned long arg) 292357e10486SAddy Ke { 292457e10486SAddy Ke struct dw_mci *host = (struct dw_mci *)arg; 292557e10486SAddy Ke 292657e10486SAddy Ke switch (host->state) { 292757e10486SAddy Ke case STATE_SENDING_DATA: 292857e10486SAddy Ke case STATE_DATA_BUSY: 292957e10486SAddy Ke /* 293057e10486SAddy Ke * If DTO interrupt does NOT come in sending data state, 293157e10486SAddy Ke * we should notify the driver to terminate current transfer 293257e10486SAddy Ke * and report a data timeout to the core. 293357e10486SAddy Ke */ 293457e10486SAddy Ke host->data_status = SDMMC_INT_DRTO; 293557e10486SAddy Ke set_bit(EVENT_DATA_ERROR, &host->pending_events); 293657e10486SAddy Ke set_bit(EVENT_DATA_COMPLETE, &host->pending_events); 293757e10486SAddy Ke tasklet_schedule(&host->tasklet); 293857e10486SAddy Ke break; 293957e10486SAddy Ke default: 294057e10486SAddy Ke break; 294157e10486SAddy Ke } 294257e10486SAddy Ke } 294357e10486SAddy Ke 2944c91eab4bSThomas Abraham #ifdef CONFIG_OF 2945c91eab4bSThomas Abraham static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 2946c91eab4bSThomas Abraham { 2947c91eab4bSThomas Abraham struct dw_mci_board *pdata; 2948c91eab4bSThomas Abraham struct device *dev = host->dev; 2949c91eab4bSThomas Abraham struct device_node *np = dev->of_node; 2950e95baf13SArnd Bergmann const struct dw_mci_drv_data *drv_data = host->drv_data; 2951e8cc37b8SShawn Lin int ret; 29523c6d89eaSDoug Anderson u32 clock_frequency; 2953c91eab4bSThomas Abraham 2954c91eab4bSThomas Abraham pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 2955bf3707eaSBeomho Seo if (!pdata) 2956c91eab4bSThomas Abraham return ERR_PTR(-ENOMEM); 2957c91eab4bSThomas Abraham 2958d6786fefSGuodong Xu /* find reset controller when exist */ 29593a667e3fSJaehoon Chung pdata->rstc = devm_reset_control_get_optional(dev, "reset"); 2960d6786fefSGuodong Xu if (IS_ERR(pdata->rstc)) { 2961d6786fefSGuodong Xu if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER) 2962d6786fefSGuodong Xu return ERR_PTR(-EPROBE_DEFER); 2963d6786fefSGuodong Xu } 2964d6786fefSGuodong Xu 2965c91eab4bSThomas Abraham /* find out number of slots supported */ 29668a629d26SShawn Lin of_property_read_u32(np, "num-slots", &pdata->num_slots); 2967c91eab4bSThomas Abraham 2968c91eab4bSThomas Abraham if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth)) 29690e3a22c0SShawn Lin dev_info(dev, 29700e3a22c0SShawn Lin "fifo-depth property not found, using value of FIFOTH register as default\n"); 2971c91eab4bSThomas Abraham 2972c91eab4bSThomas Abraham of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms); 2973c91eab4bSThomas Abraham 2974a0361c1aSJun Nie of_property_read_u32(np, "data-addr", &host->data_addr_override); 2975a0361c1aSJun Nie 2976d6fced83SJun Nie if (of_get_property(np, "fifo-watermark-aligned", NULL)) 2977d6fced83SJun Nie host->wm_aligned = true; 2978d6fced83SJun Nie 29793c6d89eaSDoug Anderson if (!of_property_read_u32(np, "clock-frequency", &clock_frequency)) 29803c6d89eaSDoug Anderson pdata->bus_hz = clock_frequency; 29813c6d89eaSDoug Anderson 2982cb27a843SJames Hogan if (drv_data && drv_data->parse_dt) { 2983cb27a843SJames Hogan ret = drv_data->parse_dt(host); 2984800d78bfSThomas Abraham if (ret) 2985800d78bfSThomas Abraham return ERR_PTR(ret); 2986800d78bfSThomas Abraham } 2987800d78bfSThomas Abraham 2988c91eab4bSThomas Abraham return pdata; 2989c91eab4bSThomas Abraham } 2990c91eab4bSThomas Abraham 2991c91eab4bSThomas Abraham #else /* CONFIG_OF */ 2992c91eab4bSThomas Abraham static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) 2993c91eab4bSThomas Abraham { 2994c91eab4bSThomas Abraham return ERR_PTR(-EINVAL); 2995c91eab4bSThomas Abraham } 2996c91eab4bSThomas Abraham #endif /* CONFIG_OF */ 2997c91eab4bSThomas Abraham 2998fa0c3283SDoug Anderson static void dw_mci_enable_cd(struct dw_mci *host) 2999fa0c3283SDoug Anderson { 3000fa0c3283SDoug Anderson unsigned long irqflags; 3001fa0c3283SDoug Anderson u32 temp; 3002fa0c3283SDoug Anderson int i; 3003e8cc37b8SShawn Lin struct dw_mci_slot *slot; 3004fa0c3283SDoug Anderson 3005e8cc37b8SShawn Lin /* 3006e8cc37b8SShawn Lin * No need for CD if all slots have a non-error GPIO 3007e8cc37b8SShawn Lin * as well as broken card detection is found. 3008e8cc37b8SShawn Lin */ 3009fa0c3283SDoug Anderson for (i = 0; i < host->num_slots; i++) { 3010e8cc37b8SShawn Lin slot = host->slot[i]; 3011e8cc37b8SShawn Lin if (slot->mmc->caps & MMC_CAP_NEEDS_POLL) 3012e8cc37b8SShawn Lin return; 3013fa0c3283SDoug Anderson 3014287980e4SArnd Bergmann if (mmc_gpio_get_cd(slot->mmc) < 0) 3015fa0c3283SDoug Anderson break; 3016fa0c3283SDoug Anderson } 3017fa0c3283SDoug Anderson if (i == host->num_slots) 3018fa0c3283SDoug Anderson return; 3019fa0c3283SDoug Anderson 3020fa0c3283SDoug Anderson spin_lock_irqsave(&host->irq_lock, irqflags); 3021fa0c3283SDoug Anderson temp = mci_readl(host, INTMASK); 3022fa0c3283SDoug Anderson temp |= SDMMC_INT_CD; 3023fa0c3283SDoug Anderson mci_writel(host, INTMASK, temp); 3024fa0c3283SDoug Anderson spin_unlock_irqrestore(&host->irq_lock, irqflags); 3025fa0c3283SDoug Anderson } 3026fa0c3283SDoug Anderson 302762ca8034SShashidhar Hiremath int dw_mci_probe(struct dw_mci *host) 3028f95f3850SWill Newton { 3029e95baf13SArnd Bergmann const struct dw_mci_drv_data *drv_data = host->drv_data; 303062ca8034SShashidhar Hiremath int width, i, ret = 0; 3031f95f3850SWill Newton u32 fifo_size; 30321c2215b7SThomas Abraham int init_slots = 0; 3033f95f3850SWill Newton 3034c91eab4bSThomas Abraham if (!host->pdata) { 3035c91eab4bSThomas Abraham host->pdata = dw_mci_parse_dt(host); 3036d6786fefSGuodong Xu if (PTR_ERR(host->pdata) == -EPROBE_DEFER) { 3037d6786fefSGuodong Xu return -EPROBE_DEFER; 3038d6786fefSGuodong Xu } else if (IS_ERR(host->pdata)) { 3039c91eab4bSThomas Abraham dev_err(host->dev, "platform data not available\n"); 3040c91eab4bSThomas Abraham return -EINVAL; 3041c91eab4bSThomas Abraham } 3042f95f3850SWill Newton } 3043f95f3850SWill Newton 3044780f22afSSeungwon Jeon host->biu_clk = devm_clk_get(host->dev, "biu"); 3045f90a0612SThomas Abraham if (IS_ERR(host->biu_clk)) { 3046f90a0612SThomas Abraham dev_dbg(host->dev, "biu clock not available\n"); 3047f90a0612SThomas Abraham } else { 3048f90a0612SThomas Abraham ret = clk_prepare_enable(host->biu_clk); 3049f90a0612SThomas Abraham if (ret) { 3050f90a0612SThomas Abraham dev_err(host->dev, "failed to enable biu clock\n"); 3051f90a0612SThomas Abraham return ret; 3052f90a0612SThomas Abraham } 3053f95f3850SWill Newton } 3054f95f3850SWill Newton 3055780f22afSSeungwon Jeon host->ciu_clk = devm_clk_get(host->dev, "ciu"); 3056f90a0612SThomas Abraham if (IS_ERR(host->ciu_clk)) { 3057f90a0612SThomas Abraham dev_dbg(host->dev, "ciu clock not available\n"); 30583c6d89eaSDoug Anderson host->bus_hz = host->pdata->bus_hz; 3059f90a0612SThomas Abraham } else { 3060f90a0612SThomas Abraham ret = clk_prepare_enable(host->ciu_clk); 3061f90a0612SThomas Abraham if (ret) { 3062f90a0612SThomas Abraham dev_err(host->dev, "failed to enable ciu clock\n"); 3063f90a0612SThomas Abraham goto err_clk_biu; 3064f90a0612SThomas Abraham } 3065f90a0612SThomas Abraham 30663c6d89eaSDoug Anderson if (host->pdata->bus_hz) { 30673c6d89eaSDoug Anderson ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz); 30683c6d89eaSDoug Anderson if (ret) 30693c6d89eaSDoug Anderson dev_warn(host->dev, 3070612de4c1SJaehoon Chung "Unable to set bus rate to %uHz\n", 30713c6d89eaSDoug Anderson host->pdata->bus_hz); 30723c6d89eaSDoug Anderson } 3073f90a0612SThomas Abraham host->bus_hz = clk_get_rate(host->ciu_clk); 30743c6d89eaSDoug Anderson } 3075f90a0612SThomas Abraham 3076612de4c1SJaehoon Chung if (!host->bus_hz) { 3077612de4c1SJaehoon Chung dev_err(host->dev, 3078612de4c1SJaehoon Chung "Platform data must supply bus speed\n"); 3079612de4c1SJaehoon Chung ret = -ENODEV; 3080612de4c1SJaehoon Chung goto err_clk_ciu; 3081612de4c1SJaehoon Chung } 3082612de4c1SJaehoon Chung 3083002f0d5cSYuvaraj Kumar C D if (drv_data && drv_data->init) { 3084002f0d5cSYuvaraj Kumar C D ret = drv_data->init(host); 3085002f0d5cSYuvaraj Kumar C D if (ret) { 3086002f0d5cSYuvaraj Kumar C D dev_err(host->dev, 3087002f0d5cSYuvaraj Kumar C D "implementation specific init failed\n"); 3088002f0d5cSYuvaraj Kumar C D goto err_clk_ciu; 3089002f0d5cSYuvaraj Kumar C D } 3090002f0d5cSYuvaraj Kumar C D } 3091002f0d5cSYuvaraj Kumar C D 3092d6786fefSGuodong Xu if (!IS_ERR(host->pdata->rstc)) { 3093d6786fefSGuodong Xu reset_control_assert(host->pdata->rstc); 3094d6786fefSGuodong Xu usleep_range(10, 50); 3095d6786fefSGuodong Xu reset_control_deassert(host->pdata->rstc); 3096d6786fefSGuodong Xu } 3097d6786fefSGuodong Xu 30985c935165SDoug Anderson setup_timer(&host->cmd11_timer, 30995c935165SDoug Anderson dw_mci_cmd11_timer, (unsigned long)host); 31005c935165SDoug Anderson 310157e10486SAddy Ke setup_timer(&host->dto_timer, 310257e10486SAddy Ke dw_mci_dto_timer, (unsigned long)host); 310357e10486SAddy Ke 3104f95f3850SWill Newton spin_lock_init(&host->lock); 3105f8c58c11SDoug Anderson spin_lock_init(&host->irq_lock); 3106f95f3850SWill Newton INIT_LIST_HEAD(&host->queue); 3107f95f3850SWill Newton 3108f95f3850SWill Newton /* 3109f95f3850SWill Newton * Get the host data width - this assumes that HCON has been set with 3110f95f3850SWill Newton * the correct values. 3111f95f3850SWill Newton */ 311270692752SShawn Lin i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON)); 3113f95f3850SWill Newton if (!i) { 3114f95f3850SWill Newton host->push_data = dw_mci_push_data16; 3115f95f3850SWill Newton host->pull_data = dw_mci_pull_data16; 3116f95f3850SWill Newton width = 16; 3117f95f3850SWill Newton host->data_shift = 1; 3118f95f3850SWill Newton } else if (i == 2) { 3119f95f3850SWill Newton host->push_data = dw_mci_push_data64; 3120f95f3850SWill Newton host->pull_data = dw_mci_pull_data64; 3121f95f3850SWill Newton width = 64; 3122f95f3850SWill Newton host->data_shift = 3; 3123f95f3850SWill Newton } else { 3124f95f3850SWill Newton /* Check for a reserved value, and warn if it is */ 3125f95f3850SWill Newton WARN((i != 1), 3126f95f3850SWill Newton "HCON reports a reserved host data width!\n" 3127f95f3850SWill Newton "Defaulting to 32-bit access.\n"); 3128f95f3850SWill Newton host->push_data = dw_mci_push_data32; 3129f95f3850SWill Newton host->pull_data = dw_mci_pull_data32; 3130f95f3850SWill Newton width = 32; 3131f95f3850SWill Newton host->data_shift = 2; 3132f95f3850SWill Newton } 3133f95f3850SWill Newton 3134f95f3850SWill Newton /* Reset all blocks */ 31353744415cSShawn Lin if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 31363744415cSShawn Lin ret = -ENODEV; 31373744415cSShawn Lin goto err_clk_ciu; 31383744415cSShawn Lin } 3139141a712aSSeungwon Jeon 3140141a712aSSeungwon Jeon host->dma_ops = host->pdata->dma_ops; 3141141a712aSSeungwon Jeon dw_mci_init_dma(host); 3142f95f3850SWill Newton 3143f95f3850SWill Newton /* Clear the interrupts for the host controller */ 3144f95f3850SWill Newton mci_writel(host, RINTSTS, 0xFFFFFFFF); 3145f95f3850SWill Newton mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3146f95f3850SWill Newton 3147f95f3850SWill Newton /* Put in max timeout */ 3148f95f3850SWill Newton mci_writel(host, TMOUT, 0xFFFFFFFF); 3149f95f3850SWill Newton 3150f95f3850SWill Newton /* 3151f95f3850SWill Newton * FIFO threshold settings RxMark = fifo_size / 2 - 1, 3152f95f3850SWill Newton * Tx Mark = fifo_size / 2 DMA Size = 8 3153f95f3850SWill Newton */ 3154b86d8253SJames Hogan if (!host->pdata->fifo_depth) { 3155b86d8253SJames Hogan /* 3156b86d8253SJames Hogan * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may 3157b86d8253SJames Hogan * have been overwritten by the bootloader, just like we're 3158b86d8253SJames Hogan * about to do, so if you know the value for your hardware, you 3159b86d8253SJames Hogan * should put it in the platform data. 3160b86d8253SJames Hogan */ 3161f95f3850SWill Newton fifo_size = mci_readl(host, FIFOTH); 31628234e869SJaehoon Chung fifo_size = 1 + ((fifo_size >> 16) & 0xfff); 3163b86d8253SJames Hogan } else { 3164b86d8253SJames Hogan fifo_size = host->pdata->fifo_depth; 3165b86d8253SJames Hogan } 3166b86d8253SJames Hogan host->fifo_depth = fifo_size; 316752426899SSeungwon Jeon host->fifoth_val = 316852426899SSeungwon Jeon SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2); 3169e61cf118SJaehoon Chung mci_writel(host, FIFOTH, host->fifoth_val); 3170f95f3850SWill Newton 3171f95f3850SWill Newton /* disable clock to CIU */ 3172f95f3850SWill Newton mci_writel(host, CLKENA, 0); 3173f95f3850SWill Newton mci_writel(host, CLKSRC, 0); 3174f95f3850SWill Newton 317563008768SJames Hogan /* 317663008768SJames Hogan * In 2.40a spec, Data offset is changed. 317763008768SJames Hogan * Need to check the version-id and set data-offset for DATA register. 317863008768SJames Hogan */ 317963008768SJames Hogan host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); 318063008768SJames Hogan dev_info(host->dev, "Version ID is %04x\n", host->verid); 318163008768SJames Hogan 3182a0361c1aSJun Nie if (host->data_addr_override) 3183a0361c1aSJun Nie host->fifo_reg = host->regs + host->data_addr_override; 3184a0361c1aSJun Nie else if (host->verid < DW_MMC_240A) 318576184ac1SBen Dooks host->fifo_reg = host->regs + DATA_OFFSET; 318663008768SJames Hogan else 318776184ac1SBen Dooks host->fifo_reg = host->regs + DATA_240A_OFFSET; 318863008768SJames Hogan 3189f95f3850SWill Newton tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); 3190780f22afSSeungwon Jeon ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt, 3191780f22afSSeungwon Jeon host->irq_flags, "dw-mci", host); 3192f95f3850SWill Newton if (ret) 31936130e7a9SDoug Anderson goto err_dmaunmap; 3194f95f3850SWill Newton 3195f95f3850SWill Newton if (host->pdata->num_slots) 3196f95f3850SWill Newton host->num_slots = host->pdata->num_slots; 3197f95f3850SWill Newton else 31988a629d26SShawn Lin host->num_slots = 1; 31998a629d26SShawn Lin 32008a629d26SShawn Lin if (host->num_slots < 1 || 32018a629d26SShawn Lin host->num_slots > SDMMC_GET_SLOT_NUM(mci_readl(host, HCON))) { 32028a629d26SShawn Lin dev_err(host->dev, 32038a629d26SShawn Lin "Platform data must supply correct num_slots.\n"); 32048a629d26SShawn Lin ret = -ENODEV; 32058a629d26SShawn Lin goto err_clk_ciu; 32068a629d26SShawn Lin } 3207f95f3850SWill Newton 32082da1d7f2SYuvaraj CD /* 3209fa0c3283SDoug Anderson * Enable interrupts for command done, data over, data empty, 32102da1d7f2SYuvaraj CD * receive ready and error such as transmit, receive timeout, crc error 32112da1d7f2SYuvaraj CD */ 32122da1d7f2SYuvaraj CD mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 32132da1d7f2SYuvaraj CD SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3214fa0c3283SDoug Anderson DW_MCI_ERROR_FLAGS); 32150e3a22c0SShawn Lin /* Enable mci interrupt */ 32160e3a22c0SShawn Lin mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 32172da1d7f2SYuvaraj CD 32180e3a22c0SShawn Lin dev_info(host->dev, 32190e3a22c0SShawn Lin "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n", 32202da1d7f2SYuvaraj CD host->irq, width, fifo_size); 32212da1d7f2SYuvaraj CD 3222f95f3850SWill Newton /* We need at least one slot to succeed */ 3223f95f3850SWill Newton for (i = 0; i < host->num_slots; i++) { 3224f95f3850SWill Newton ret = dw_mci_init_slot(host, i); 32251c2215b7SThomas Abraham if (ret) 32261c2215b7SThomas Abraham dev_dbg(host->dev, "slot %d init failed\n", i); 32271c2215b7SThomas Abraham else 32281c2215b7SThomas Abraham init_slots++; 3229f95f3850SWill Newton } 32301c2215b7SThomas Abraham 32311c2215b7SThomas Abraham if (init_slots) { 32321c2215b7SThomas Abraham dev_info(host->dev, "%d slots initialized\n", init_slots); 32331c2215b7SThomas Abraham } else { 32340e3a22c0SShawn Lin dev_dbg(host->dev, 32350e3a22c0SShawn Lin "attempted to initialize %d slots, but failed on all\n", 32360e3a22c0SShawn Lin host->num_slots); 32376130e7a9SDoug Anderson goto err_dmaunmap; 3238f95f3850SWill Newton } 3239f95f3850SWill Newton 3240b793f658SDoug Anderson /* Now that slots are all setup, we can enable card detect */ 3241b793f658SDoug Anderson dw_mci_enable_cd(host); 3242b793f658SDoug Anderson 3243f95f3850SWill Newton return 0; 3244f95f3850SWill Newton 3245f95f3850SWill Newton err_dmaunmap: 3246f95f3850SWill Newton if (host->use_dma && host->dma_ops->exit) 3247f95f3850SWill Newton host->dma_ops->exit(host); 3248f90a0612SThomas Abraham 3249d6786fefSGuodong Xu if (!IS_ERR(host->pdata->rstc)) 3250d6786fefSGuodong Xu reset_control_assert(host->pdata->rstc); 3251d6786fefSGuodong Xu 3252f90a0612SThomas Abraham err_clk_ciu: 3253f90a0612SThomas Abraham clk_disable_unprepare(host->ciu_clk); 3254780f22afSSeungwon Jeon 3255f90a0612SThomas Abraham err_clk_biu: 3256f90a0612SThomas Abraham clk_disable_unprepare(host->biu_clk); 3257780f22afSSeungwon Jeon 3258f95f3850SWill Newton return ret; 3259f95f3850SWill Newton } 326062ca8034SShashidhar Hiremath EXPORT_SYMBOL(dw_mci_probe); 3261f95f3850SWill Newton 326262ca8034SShashidhar Hiremath void dw_mci_remove(struct dw_mci *host) 3263f95f3850SWill Newton { 3264f95f3850SWill Newton int i; 3265f95f3850SWill Newton 3266f95f3850SWill Newton for (i = 0; i < host->num_slots; i++) { 32674a90920cSThomas Abraham dev_dbg(host->dev, "remove slot %d\n", i); 3268f95f3850SWill Newton if (host->slot[i]) 3269f95f3850SWill Newton dw_mci_cleanup_slot(host->slot[i], i); 3270f95f3850SWill Newton } 3271f95f3850SWill Newton 3272048fd7e6SPrabu Thangamuthu mci_writel(host, RINTSTS, 0xFFFFFFFF); 3273048fd7e6SPrabu Thangamuthu mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ 3274048fd7e6SPrabu Thangamuthu 3275f95f3850SWill Newton /* disable clock to CIU */ 3276f95f3850SWill Newton mci_writel(host, CLKENA, 0); 3277f95f3850SWill Newton mci_writel(host, CLKSRC, 0); 3278f95f3850SWill Newton 3279f95f3850SWill Newton if (host->use_dma && host->dma_ops->exit) 3280f95f3850SWill Newton host->dma_ops->exit(host); 3281f95f3850SWill Newton 3282d6786fefSGuodong Xu if (!IS_ERR(host->pdata->rstc)) 3283d6786fefSGuodong Xu reset_control_assert(host->pdata->rstc); 3284d6786fefSGuodong Xu 3285f90a0612SThomas Abraham clk_disable_unprepare(host->ciu_clk); 3286f90a0612SThomas Abraham clk_disable_unprepare(host->biu_clk); 3287f95f3850SWill Newton } 328862ca8034SShashidhar Hiremath EXPORT_SYMBOL(dw_mci_remove); 328962ca8034SShashidhar Hiremath 329062ca8034SShashidhar Hiremath 3291f95f3850SWill Newton 3292e9ed8835SShawn Lin #ifdef CONFIG_PM 3293ed24e1ffSShawn Lin int dw_mci_runtime_suspend(struct device *dev) 3294f95f3850SWill Newton { 3295ed24e1ffSShawn Lin struct dw_mci *host = dev_get_drvdata(dev); 3296ed24e1ffSShawn Lin 32973fc7eaefSShawn Lin if (host->use_dma && host->dma_ops->exit) 32983fc7eaefSShawn Lin host->dma_ops->exit(host); 32993fc7eaefSShawn Lin 3300ed24e1ffSShawn Lin clk_disable_unprepare(host->ciu_clk); 3301ed24e1ffSShawn Lin 3302ed24e1ffSShawn Lin if (host->cur_slot && 3303ed24e1ffSShawn Lin (mmc_can_gpio_cd(host->cur_slot->mmc) || 3304ed24e1ffSShawn Lin !mmc_card_is_removable(host->cur_slot->mmc))) 3305ed24e1ffSShawn Lin clk_disable_unprepare(host->biu_clk); 3306ed24e1ffSShawn Lin 3307f95f3850SWill Newton return 0; 3308f95f3850SWill Newton } 3309ed24e1ffSShawn Lin EXPORT_SYMBOL(dw_mci_runtime_suspend); 3310f95f3850SWill Newton 3311ed24e1ffSShawn Lin int dw_mci_runtime_resume(struct device *dev) 3312f95f3850SWill Newton { 3313ed24e1ffSShawn Lin int i, ret = 0; 3314ed24e1ffSShawn Lin struct dw_mci *host = dev_get_drvdata(dev); 3315f95f3850SWill Newton 3316ed24e1ffSShawn Lin if (host->cur_slot && 3317ed24e1ffSShawn Lin (mmc_can_gpio_cd(host->cur_slot->mmc) || 3318ed24e1ffSShawn Lin !mmc_card_is_removable(host->cur_slot->mmc))) { 3319ed24e1ffSShawn Lin ret = clk_prepare_enable(host->biu_clk); 3320ed24e1ffSShawn Lin if (ret) 3321e61cf118SJaehoon Chung return ret; 3322e61cf118SJaehoon Chung } 3323e61cf118SJaehoon Chung 3324ed24e1ffSShawn Lin ret = clk_prepare_enable(host->ciu_clk); 3325ed24e1ffSShawn Lin if (ret) 3326df9bcc2bSJoonyoung Shim goto err; 3327df9bcc2bSJoonyoung Shim 3328df9bcc2bSJoonyoung Shim if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { 3329df9bcc2bSJoonyoung Shim clk_disable_unprepare(host->ciu_clk); 3330df9bcc2bSJoonyoung Shim ret = -ENODEV; 3331df9bcc2bSJoonyoung Shim goto err; 3332df9bcc2bSJoonyoung Shim } 3333ed24e1ffSShawn Lin 33343bfe619dSJonathan Kliegman if (host->use_dma && host->dma_ops->init) 3335141a712aSSeungwon Jeon host->dma_ops->init(host); 3336141a712aSSeungwon Jeon 333752426899SSeungwon Jeon /* 333852426899SSeungwon Jeon * Restore the initial value at FIFOTH register 333952426899SSeungwon Jeon * And Invalidate the prev_blksz with zero 334052426899SSeungwon Jeon */ 3341e61cf118SJaehoon Chung mci_writel(host, FIFOTH, host->fifoth_val); 334252426899SSeungwon Jeon host->prev_blksz = 0; 3343e61cf118SJaehoon Chung 33442eb2944fSDoug Anderson /* Put in max timeout */ 33452eb2944fSDoug Anderson mci_writel(host, TMOUT, 0xFFFFFFFF); 33462eb2944fSDoug Anderson 3347e61cf118SJaehoon Chung mci_writel(host, RINTSTS, 0xFFFFFFFF); 3348e61cf118SJaehoon Chung mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | 3349e61cf118SJaehoon Chung SDMMC_INT_TXDR | SDMMC_INT_RXDR | 3350fa0c3283SDoug Anderson DW_MCI_ERROR_FLAGS); 3351e61cf118SJaehoon Chung mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); 3352e61cf118SJaehoon Chung 3353f95f3850SWill Newton for (i = 0; i < host->num_slots; i++) { 3354f95f3850SWill Newton struct dw_mci_slot *slot = host->slot[i]; 33550e3a22c0SShawn Lin 3356f95f3850SWill Newton if (!slot) 3357f95f3850SWill Newton continue; 3358e9748e03SZiyuan Xu if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) 3359ab269128SAbhilash Kesavan dw_mci_set_ios(slot->mmc, &slot->mmc->ios); 3360e9748e03SZiyuan Xu 3361e9748e03SZiyuan Xu /* Force setup bus to guarantee available clock output */ 3362ab269128SAbhilash Kesavan dw_mci_setup_bus(slot, true); 3363ab269128SAbhilash Kesavan } 3364fa0c3283SDoug Anderson 3365fa0c3283SDoug Anderson /* Now that slots are all setup, we can enable card detect */ 3366fa0c3283SDoug Anderson dw_mci_enable_cd(host); 3367fa0c3283SDoug Anderson 3368df9bcc2bSJoonyoung Shim return 0; 3369df9bcc2bSJoonyoung Shim 3370df9bcc2bSJoonyoung Shim err: 3371df9bcc2bSJoonyoung Shim if (host->cur_slot && 3372df9bcc2bSJoonyoung Shim (mmc_can_gpio_cd(host->cur_slot->mmc) || 3373df9bcc2bSJoonyoung Shim !mmc_card_is_removable(host->cur_slot->mmc))) 3374df9bcc2bSJoonyoung Shim clk_disable_unprepare(host->biu_clk); 3375df9bcc2bSJoonyoung Shim 33761f5c51d7SShawn Lin return ret; 33771f5c51d7SShawn Lin } 3378e9ed8835SShawn Lin EXPORT_SYMBOL(dw_mci_runtime_resume); 3379e9ed8835SShawn Lin #endif /* CONFIG_PM */ 33806fe8890dSJaehoon Chung 3381f95f3850SWill Newton static int __init dw_mci_init(void) 3382f95f3850SWill Newton { 33838e1c4e4dSSachin Kamat pr_info("Synopsys Designware Multimedia Card Interface Driver\n"); 338462ca8034SShashidhar Hiremath return 0; 3385f95f3850SWill Newton } 3386f95f3850SWill Newton 3387f95f3850SWill Newton static void __exit dw_mci_exit(void) 3388f95f3850SWill Newton { 3389f95f3850SWill Newton } 3390f95f3850SWill Newton 3391f95f3850SWill Newton module_init(dw_mci_init); 3392f95f3850SWill Newton module_exit(dw_mci_exit); 3393f95f3850SWill Newton 3394f95f3850SWill Newton MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); 3395f95f3850SWill Newton MODULE_AUTHOR("NXP Semiconductor VietNam"); 3396f95f3850SWill Newton MODULE_AUTHOR("Imagination Technologies Ltd"); 3397f95f3850SWill Newton MODULE_LICENSE("GPL v2"); 3398