1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2014-2015 MediaTek Inc. 4 * Author: Chaotian.Jing <chaotian.jing@mediatek.com> 5 */ 6 7 #include <linux/module.h> 8 #include <linux/clk.h> 9 #include <linux/delay.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/iopoll.h> 12 #include <linux/ioport.h> 13 #include <linux/irq.h> 14 #include <linux/of_address.h> 15 #include <linux/of_device.h> 16 #include <linux/of_irq.h> 17 #include <linux/of_gpio.h> 18 #include <linux/pinctrl/consumer.h> 19 #include <linux/platform_device.h> 20 #include <linux/pm.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/regulator/consumer.h> 23 #include <linux/slab.h> 24 #include <linux/spinlock.h> 25 #include <linux/interrupt.h> 26 #include <linux/reset.h> 27 28 #include <linux/mmc/card.h> 29 #include <linux/mmc/core.h> 30 #include <linux/mmc/host.h> 31 #include <linux/mmc/mmc.h> 32 #include <linux/mmc/sd.h> 33 #include <linux/mmc/sdio.h> 34 #include <linux/mmc/slot-gpio.h> 35 36 #include "cqhci.h" 37 38 #define MAX_BD_NUM 1024 39 #define MSDC_NR_CLOCKS 3 40 41 /*--------------------------------------------------------------------------*/ 42 /* Common Definition */ 43 /*--------------------------------------------------------------------------*/ 44 #define MSDC_BUS_1BITS 0x0 45 #define MSDC_BUS_4BITS 0x1 46 #define MSDC_BUS_8BITS 0x2 47 48 #define MSDC_BURST_64B 0x6 49 50 /*--------------------------------------------------------------------------*/ 51 /* Register Offset */ 52 /*--------------------------------------------------------------------------*/ 53 #define MSDC_CFG 0x0 54 #define MSDC_IOCON 0x04 55 #define MSDC_PS 0x08 56 #define MSDC_INT 0x0c 57 #define MSDC_INTEN 0x10 58 #define MSDC_FIFOCS 0x14 59 #define SDC_CFG 0x30 60 #define SDC_CMD 0x34 61 #define SDC_ARG 0x38 62 #define SDC_STS 0x3c 63 #define SDC_RESP0 0x40 64 #define SDC_RESP1 0x44 65 #define SDC_RESP2 0x48 66 #define SDC_RESP3 0x4c 67 #define SDC_BLK_NUM 0x50 68 #define SDC_ADV_CFG0 0x64 69 #define EMMC_IOCON 0x7c 70 #define SDC_ACMD_RESP 0x80 71 #define DMA_SA_H4BIT 0x8c 72 #define MSDC_DMA_SA 0x90 73 #define MSDC_DMA_CTRL 0x98 74 #define MSDC_DMA_CFG 0x9c 75 #define MSDC_PATCH_BIT 0xb0 76 #define MSDC_PATCH_BIT1 0xb4 77 #define MSDC_PATCH_BIT2 0xb8 78 #define MSDC_PAD_TUNE 0xec 79 #define MSDC_PAD_TUNE0 0xf0 80 #define PAD_DS_TUNE 0x188 81 #define PAD_CMD_TUNE 0x18c 82 #define EMMC51_CFG0 0x204 83 #define EMMC50_CFG0 0x208 84 #define EMMC50_CFG1 0x20c 85 #define EMMC50_CFG3 0x220 86 #define SDC_FIFO_CFG 0x228 87 #define CQHCI_SETTING 0x7fc 88 89 /*--------------------------------------------------------------------------*/ 90 /* Top Pad Register Offset */ 91 /*--------------------------------------------------------------------------*/ 92 #define EMMC_TOP_CONTROL 0x00 93 #define EMMC_TOP_CMD 0x04 94 #define EMMC50_PAD_DS_TUNE 0x0c 95 96 /*--------------------------------------------------------------------------*/ 97 /* Register Mask */ 98 /*--------------------------------------------------------------------------*/ 99 100 /* MSDC_CFG mask */ 101 #define MSDC_CFG_MODE (0x1 << 0) /* RW */ 102 #define MSDC_CFG_CKPDN (0x1 << 1) /* RW */ 103 #define MSDC_CFG_RST (0x1 << 2) /* RW */ 104 #define MSDC_CFG_PIO (0x1 << 3) /* RW */ 105 #define MSDC_CFG_CKDRVEN (0x1 << 4) /* RW */ 106 #define MSDC_CFG_BV18SDT (0x1 << 5) /* RW */ 107 #define MSDC_CFG_BV18PSS (0x1 << 6) /* R */ 108 #define MSDC_CFG_CKSTB (0x1 << 7) /* R */ 109 #define MSDC_CFG_CKDIV (0xff << 8) /* RW */ 110 #define MSDC_CFG_CKMOD (0x3 << 16) /* RW */ 111 #define MSDC_CFG_HS400_CK_MODE (0x1 << 18) /* RW */ 112 #define MSDC_CFG_HS400_CK_MODE_EXTRA (0x1 << 22) /* RW */ 113 #define MSDC_CFG_CKDIV_EXTRA (0xfff << 8) /* RW */ 114 #define MSDC_CFG_CKMOD_EXTRA (0x3 << 20) /* RW */ 115 116 /* MSDC_IOCON mask */ 117 #define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */ 118 #define MSDC_IOCON_RSPL (0x1 << 1) /* RW */ 119 #define MSDC_IOCON_DSPL (0x1 << 2) /* RW */ 120 #define MSDC_IOCON_DDLSEL (0x1 << 3) /* RW */ 121 #define MSDC_IOCON_DDR50CKD (0x1 << 4) /* RW */ 122 #define MSDC_IOCON_DSPLSEL (0x1 << 5) /* RW */ 123 #define MSDC_IOCON_W_DSPL (0x1 << 8) /* RW */ 124 #define MSDC_IOCON_D0SPL (0x1 << 16) /* RW */ 125 #define MSDC_IOCON_D1SPL (0x1 << 17) /* RW */ 126 #define MSDC_IOCON_D2SPL (0x1 << 18) /* RW */ 127 #define MSDC_IOCON_D3SPL (0x1 << 19) /* RW */ 128 #define MSDC_IOCON_D4SPL (0x1 << 20) /* RW */ 129 #define MSDC_IOCON_D5SPL (0x1 << 21) /* RW */ 130 #define MSDC_IOCON_D6SPL (0x1 << 22) /* RW */ 131 #define MSDC_IOCON_D7SPL (0x1 << 23) /* RW */ 132 #define MSDC_IOCON_RISCSZ (0x3 << 24) /* RW */ 133 134 /* MSDC_PS mask */ 135 #define MSDC_PS_CDEN (0x1 << 0) /* RW */ 136 #define MSDC_PS_CDSTS (0x1 << 1) /* R */ 137 #define MSDC_PS_CDDEBOUNCE (0xf << 12) /* RW */ 138 #define MSDC_PS_DAT (0xff << 16) /* R */ 139 #define MSDC_PS_DATA1 (0x1 << 17) /* R */ 140 #define MSDC_PS_CMD (0x1 << 24) /* R */ 141 #define MSDC_PS_WP (0x1 << 31) /* R */ 142 143 /* MSDC_INT mask */ 144 #define MSDC_INT_MMCIRQ (0x1 << 0) /* W1C */ 145 #define MSDC_INT_CDSC (0x1 << 1) /* W1C */ 146 #define MSDC_INT_ACMDRDY (0x1 << 3) /* W1C */ 147 #define MSDC_INT_ACMDTMO (0x1 << 4) /* W1C */ 148 #define MSDC_INT_ACMDCRCERR (0x1 << 5) /* W1C */ 149 #define MSDC_INT_DMAQ_EMPTY (0x1 << 6) /* W1C */ 150 #define MSDC_INT_SDIOIRQ (0x1 << 7) /* W1C */ 151 #define MSDC_INT_CMDRDY (0x1 << 8) /* W1C */ 152 #define MSDC_INT_CMDTMO (0x1 << 9) /* W1C */ 153 #define MSDC_INT_RSPCRCERR (0x1 << 10) /* W1C */ 154 #define MSDC_INT_CSTA (0x1 << 11) /* R */ 155 #define MSDC_INT_XFER_COMPL (0x1 << 12) /* W1C */ 156 #define MSDC_INT_DXFER_DONE (0x1 << 13) /* W1C */ 157 #define MSDC_INT_DATTMO (0x1 << 14) /* W1C */ 158 #define MSDC_INT_DATCRCERR (0x1 << 15) /* W1C */ 159 #define MSDC_INT_ACMD19_DONE (0x1 << 16) /* W1C */ 160 #define MSDC_INT_DMA_BDCSERR (0x1 << 17) /* W1C */ 161 #define MSDC_INT_DMA_GPDCSERR (0x1 << 18) /* W1C */ 162 #define MSDC_INT_DMA_PROTECT (0x1 << 19) /* W1C */ 163 #define MSDC_INT_CMDQ (0x1 << 28) /* W1C */ 164 165 /* MSDC_INTEN mask */ 166 #define MSDC_INTEN_MMCIRQ (0x1 << 0) /* RW */ 167 #define MSDC_INTEN_CDSC (0x1 << 1) /* RW */ 168 #define MSDC_INTEN_ACMDRDY (0x1 << 3) /* RW */ 169 #define MSDC_INTEN_ACMDTMO (0x1 << 4) /* RW */ 170 #define MSDC_INTEN_ACMDCRCERR (0x1 << 5) /* RW */ 171 #define MSDC_INTEN_DMAQ_EMPTY (0x1 << 6) /* RW */ 172 #define MSDC_INTEN_SDIOIRQ (0x1 << 7) /* RW */ 173 #define MSDC_INTEN_CMDRDY (0x1 << 8) /* RW */ 174 #define MSDC_INTEN_CMDTMO (0x1 << 9) /* RW */ 175 #define MSDC_INTEN_RSPCRCERR (0x1 << 10) /* RW */ 176 #define MSDC_INTEN_CSTA (0x1 << 11) /* RW */ 177 #define MSDC_INTEN_XFER_COMPL (0x1 << 12) /* RW */ 178 #define MSDC_INTEN_DXFER_DONE (0x1 << 13) /* RW */ 179 #define MSDC_INTEN_DATTMO (0x1 << 14) /* RW */ 180 #define MSDC_INTEN_DATCRCERR (0x1 << 15) /* RW */ 181 #define MSDC_INTEN_ACMD19_DONE (0x1 << 16) /* RW */ 182 #define MSDC_INTEN_DMA_BDCSERR (0x1 << 17) /* RW */ 183 #define MSDC_INTEN_DMA_GPDCSERR (0x1 << 18) /* RW */ 184 #define MSDC_INTEN_DMA_PROTECT (0x1 << 19) /* RW */ 185 186 /* MSDC_FIFOCS mask */ 187 #define MSDC_FIFOCS_RXCNT (0xff << 0) /* R */ 188 #define MSDC_FIFOCS_TXCNT (0xff << 16) /* R */ 189 #define MSDC_FIFOCS_CLR (0x1 << 31) /* RW */ 190 191 /* SDC_CFG mask */ 192 #define SDC_CFG_SDIOINTWKUP (0x1 << 0) /* RW */ 193 #define SDC_CFG_INSWKUP (0x1 << 1) /* RW */ 194 #define SDC_CFG_WRDTOC (0x1fff << 2) /* RW */ 195 #define SDC_CFG_BUSWIDTH (0x3 << 16) /* RW */ 196 #define SDC_CFG_SDIO (0x1 << 19) /* RW */ 197 #define SDC_CFG_SDIOIDE (0x1 << 20) /* RW */ 198 #define SDC_CFG_INTATGAP (0x1 << 21) /* RW */ 199 #define SDC_CFG_DTOC (0xff << 24) /* RW */ 200 201 /* SDC_STS mask */ 202 #define SDC_STS_SDCBUSY (0x1 << 0) /* RW */ 203 #define SDC_STS_CMDBUSY (0x1 << 1) /* RW */ 204 #define SDC_STS_SWR_COMPL (0x1 << 31) /* RW */ 205 206 #define SDC_DAT1_IRQ_TRIGGER (0x1 << 19) /* RW */ 207 /* SDC_ADV_CFG0 mask */ 208 #define SDC_RX_ENHANCE_EN (0x1 << 20) /* RW */ 209 210 /* DMA_SA_H4BIT mask */ 211 #define DMA_ADDR_HIGH_4BIT (0xf << 0) /* RW */ 212 213 /* MSDC_DMA_CTRL mask */ 214 #define MSDC_DMA_CTRL_START (0x1 << 0) /* W */ 215 #define MSDC_DMA_CTRL_STOP (0x1 << 1) /* W */ 216 #define MSDC_DMA_CTRL_RESUME (0x1 << 2) /* W */ 217 #define MSDC_DMA_CTRL_MODE (0x1 << 8) /* RW */ 218 #define MSDC_DMA_CTRL_LASTBUF (0x1 << 10) /* RW */ 219 #define MSDC_DMA_CTRL_BRUSTSZ (0x7 << 12) /* RW */ 220 221 /* MSDC_DMA_CFG mask */ 222 #define MSDC_DMA_CFG_STS (0x1 << 0) /* R */ 223 #define MSDC_DMA_CFG_DECSEN (0x1 << 1) /* RW */ 224 #define MSDC_DMA_CFG_AHBHPROT2 (0x2 << 8) /* RW */ 225 #define MSDC_DMA_CFG_ACTIVEEN (0x2 << 12) /* RW */ 226 #define MSDC_DMA_CFG_CS12B16B (0x1 << 16) /* RW */ 227 228 /* MSDC_PATCH_BIT mask */ 229 #define MSDC_PATCH_BIT_ODDSUPP (0x1 << 1) /* RW */ 230 #define MSDC_INT_DAT_LATCH_CK_SEL (0x7 << 7) 231 #define MSDC_CKGEN_MSDC_DLY_SEL (0x1f << 10) 232 #define MSDC_PATCH_BIT_IODSSEL (0x1 << 16) /* RW */ 233 #define MSDC_PATCH_BIT_IOINTSEL (0x1 << 17) /* RW */ 234 #define MSDC_PATCH_BIT_BUSYDLY (0xf << 18) /* RW */ 235 #define MSDC_PATCH_BIT_WDOD (0xf << 22) /* RW */ 236 #define MSDC_PATCH_BIT_IDRTSEL (0x1 << 26) /* RW */ 237 #define MSDC_PATCH_BIT_CMDFSEL (0x1 << 27) /* RW */ 238 #define MSDC_PATCH_BIT_INTDLSEL (0x1 << 28) /* RW */ 239 #define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */ 240 #define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */ 241 242 #define MSDC_PATCH_BIT1_CMDTA (0x7 << 3) /* RW */ 243 #define MSDC_PB1_BUSY_CHECK_SEL (0x1 << 7) /* RW */ 244 #define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */ 245 246 #define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */ 247 #define MSDC_PATCH_BIT2_CFGCRCSTS (0x1 << 28) /* RW */ 248 #define MSDC_PB2_SUPPORT_64G (0x1 << 1) /* RW */ 249 #define MSDC_PB2_RESPWAIT (0x3 << 2) /* RW */ 250 #define MSDC_PB2_RESPSTSENSEL (0x7 << 16) /* RW */ 251 #define MSDC_PB2_CRCSTSENSEL (0x7 << 29) /* RW */ 252 253 #define MSDC_PAD_TUNE_DATWRDLY (0x1f << 0) /* RW */ 254 #define MSDC_PAD_TUNE_DATRRDLY (0x1f << 8) /* RW */ 255 #define MSDC_PAD_TUNE_CMDRDLY (0x1f << 16) /* RW */ 256 #define MSDC_PAD_TUNE_CMDRRDLY (0x1f << 22) /* RW */ 257 #define MSDC_PAD_TUNE_CLKTDLY (0x1f << 27) /* RW */ 258 #define MSDC_PAD_TUNE_RXDLYSEL (0x1 << 15) /* RW */ 259 #define MSDC_PAD_TUNE_RD_SEL (0x1 << 13) /* RW */ 260 #define MSDC_PAD_TUNE_CMD_SEL (0x1 << 21) /* RW */ 261 262 #define PAD_DS_TUNE_DLY_SEL (0x1 << 0) /* RW */ 263 #define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */ 264 #define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */ 265 #define PAD_DS_TUNE_DLY3 (0x1f << 12) /* RW */ 266 267 #define PAD_CMD_TUNE_RX_DLY3 (0x1f << 1) /* RW */ 268 269 /* EMMC51_CFG0 mask */ 270 #define CMDQ_RDAT_CNT (0x3ff << 12) /* RW */ 271 272 #define EMMC50_CFG_PADCMD_LATCHCK (0x1 << 0) /* RW */ 273 #define EMMC50_CFG_CRCSTS_EDGE (0x1 << 3) /* RW */ 274 #define EMMC50_CFG_CFCSTS_SEL (0x1 << 4) /* RW */ 275 #define EMMC50_CFG_CMD_RESP_SEL (0x1 << 9) /* RW */ 276 277 /* EMMC50_CFG1 mask */ 278 #define EMMC50_CFG1_DS_CFG (0x1 << 28) /* RW */ 279 280 #define EMMC50_CFG3_OUTS_WR (0x1f << 0) /* RW */ 281 282 #define SDC_FIFO_CFG_WRVALIDSEL (0x1 << 24) /* RW */ 283 #define SDC_FIFO_CFG_RDVALIDSEL (0x1 << 25) /* RW */ 284 285 /* CQHCI_SETTING */ 286 #define CQHCI_RD_CMD_WND_SEL (0x1 << 14) /* RW */ 287 #define CQHCI_WR_CMD_WND_SEL (0x1 << 15) /* RW */ 288 289 /* EMMC_TOP_CONTROL mask */ 290 #define PAD_RXDLY_SEL (0x1 << 0) /* RW */ 291 #define DELAY_EN (0x1 << 1) /* RW */ 292 #define PAD_DAT_RD_RXDLY2 (0x1f << 2) /* RW */ 293 #define PAD_DAT_RD_RXDLY (0x1f << 7) /* RW */ 294 #define PAD_DAT_RD_RXDLY2_SEL (0x1 << 12) /* RW */ 295 #define PAD_DAT_RD_RXDLY_SEL (0x1 << 13) /* RW */ 296 #define DATA_K_VALUE_SEL (0x1 << 14) /* RW */ 297 #define SDC_RX_ENH_EN (0x1 << 15) /* TW */ 298 299 /* EMMC_TOP_CMD mask */ 300 #define PAD_CMD_RXDLY2 (0x1f << 0) /* RW */ 301 #define PAD_CMD_RXDLY (0x1f << 5) /* RW */ 302 #define PAD_CMD_RD_RXDLY2_SEL (0x1 << 10) /* RW */ 303 #define PAD_CMD_RD_RXDLY_SEL (0x1 << 11) /* RW */ 304 #define PAD_CMD_TX_DLY (0x1f << 12) /* RW */ 305 306 /* EMMC50_PAD_DS_TUNE mask */ 307 #define PAD_DS_DLY_SEL (0x1 << 16) /* RW */ 308 #define PAD_DS_DLY1 (0x1f << 10) /* RW */ 309 #define PAD_DS_DLY3 (0x1f << 0) /* RW */ 310 311 #define REQ_CMD_EIO (0x1 << 0) 312 #define REQ_CMD_TMO (0x1 << 1) 313 #define REQ_DAT_ERR (0x1 << 2) 314 #define REQ_STOP_EIO (0x1 << 3) 315 #define REQ_STOP_TMO (0x1 << 4) 316 #define REQ_CMD_BUSY (0x1 << 5) 317 318 #define MSDC_PREPARE_FLAG (0x1 << 0) 319 #define MSDC_ASYNC_FLAG (0x1 << 1) 320 #define MSDC_MMAP_FLAG (0x1 << 2) 321 322 #define MTK_MMC_AUTOSUSPEND_DELAY 50 323 #define CMD_TIMEOUT (HZ/10 * 5) /* 100ms x5 */ 324 #define DAT_TIMEOUT (HZ * 5) /* 1000ms x5 */ 325 326 #define DEFAULT_DEBOUNCE (8) /* 8 cycles CD debounce */ 327 328 #define PAD_DELAY_MAX 32 /* PAD delay cells */ 329 /*--------------------------------------------------------------------------*/ 330 /* Descriptor Structure */ 331 /*--------------------------------------------------------------------------*/ 332 struct mt_gpdma_desc { 333 u32 gpd_info; 334 #define GPDMA_DESC_HWO (0x1 << 0) 335 #define GPDMA_DESC_BDP (0x1 << 1) 336 #define GPDMA_DESC_CHECKSUM (0xff << 8) /* bit8 ~ bit15 */ 337 #define GPDMA_DESC_INT (0x1 << 16) 338 #define GPDMA_DESC_NEXT_H4 (0xf << 24) 339 #define GPDMA_DESC_PTR_H4 (0xf << 28) 340 u32 next; 341 u32 ptr; 342 u32 gpd_data_len; 343 #define GPDMA_DESC_BUFLEN (0xffff) /* bit0 ~ bit15 */ 344 #define GPDMA_DESC_EXTLEN (0xff << 16) /* bit16 ~ bit23 */ 345 u32 arg; 346 u32 blknum; 347 u32 cmd; 348 }; 349 350 struct mt_bdma_desc { 351 u32 bd_info; 352 #define BDMA_DESC_EOL (0x1 << 0) 353 #define BDMA_DESC_CHECKSUM (0xff << 8) /* bit8 ~ bit15 */ 354 #define BDMA_DESC_BLKPAD (0x1 << 17) 355 #define BDMA_DESC_DWPAD (0x1 << 18) 356 #define BDMA_DESC_NEXT_H4 (0xf << 24) 357 #define BDMA_DESC_PTR_H4 (0xf << 28) 358 u32 next; 359 u32 ptr; 360 u32 bd_data_len; 361 #define BDMA_DESC_BUFLEN (0xffff) /* bit0 ~ bit15 */ 362 #define BDMA_DESC_BUFLEN_EXT (0xffffff) /* bit0 ~ bit23 */ 363 }; 364 365 struct msdc_dma { 366 struct scatterlist *sg; /* I/O scatter list */ 367 struct mt_gpdma_desc *gpd; /* pointer to gpd array */ 368 struct mt_bdma_desc *bd; /* pointer to bd array */ 369 dma_addr_t gpd_addr; /* the physical address of gpd array */ 370 dma_addr_t bd_addr; /* the physical address of bd array */ 371 }; 372 373 struct msdc_save_para { 374 u32 msdc_cfg; 375 u32 iocon; 376 u32 sdc_cfg; 377 u32 pad_tune; 378 u32 patch_bit0; 379 u32 patch_bit1; 380 u32 patch_bit2; 381 u32 pad_ds_tune; 382 u32 pad_cmd_tune; 383 u32 emmc50_cfg0; 384 u32 emmc50_cfg3; 385 u32 sdc_fifo_cfg; 386 u32 emmc_top_control; 387 u32 emmc_top_cmd; 388 u32 emmc50_pad_ds_tune; 389 }; 390 391 struct mtk_mmc_compatible { 392 u8 clk_div_bits; 393 bool recheck_sdio_irq; 394 bool hs400_tune; /* only used for MT8173 */ 395 u32 pad_tune_reg; 396 bool async_fifo; 397 bool data_tune; 398 bool busy_check; 399 bool stop_clk_fix; 400 bool enhance_rx; 401 bool support_64g; 402 bool use_internal_cd; 403 }; 404 405 struct msdc_tune_para { 406 u32 iocon; 407 u32 pad_tune; 408 u32 pad_cmd_tune; 409 u32 emmc_top_control; 410 u32 emmc_top_cmd; 411 }; 412 413 struct msdc_delay_phase { 414 u8 maxlen; 415 u8 start; 416 u8 final_phase; 417 }; 418 419 struct msdc_host { 420 struct device *dev; 421 const struct mtk_mmc_compatible *dev_comp; 422 int cmd_rsp; 423 424 spinlock_t lock; 425 struct mmc_request *mrq; 426 struct mmc_command *cmd; 427 struct mmc_data *data; 428 int error; 429 430 void __iomem *base; /* host base address */ 431 void __iomem *top_base; /* host top register base address */ 432 433 struct msdc_dma dma; /* dma channel */ 434 u64 dma_mask; 435 436 u32 timeout_ns; /* data timeout ns */ 437 u32 timeout_clks; /* data timeout clks */ 438 439 struct pinctrl *pinctrl; 440 struct pinctrl_state *pins_default; 441 struct pinctrl_state *pins_uhs; 442 struct delayed_work req_timeout; 443 int irq; /* host interrupt */ 444 struct reset_control *reset; 445 446 struct clk *src_clk; /* msdc source clock */ 447 struct clk *h_clk; /* msdc h_clk */ 448 struct clk *bus_clk; /* bus clock which used to access register */ 449 struct clk *src_clk_cg; /* msdc source clock control gate */ 450 struct clk *sys_clk_cg; /* msdc subsys clock control gate */ 451 struct clk_bulk_data bulk_clks[MSDC_NR_CLOCKS]; 452 u32 mclk; /* mmc subsystem clock frequency */ 453 u32 src_clk_freq; /* source clock frequency */ 454 unsigned char timing; 455 bool vqmmc_enabled; 456 u32 latch_ck; 457 u32 hs400_ds_delay; 458 u32 hs400_ds_dly3; 459 u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */ 460 u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */ 461 bool hs400_cmd_resp_sel_rising; 462 /* cmd response sample selection for HS400 */ 463 bool hs400_mode; /* current eMMC will run at hs400 mode */ 464 bool hs400_tuning; /* hs400 mode online tuning */ 465 bool internal_cd; /* Use internal card-detect logic */ 466 bool cqhci; /* support eMMC hw cmdq */ 467 struct msdc_save_para save_para; /* used when gate HCLK */ 468 struct msdc_tune_para def_tune_para; /* default tune setting */ 469 struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */ 470 struct cqhci_host *cq_host; 471 }; 472 473 static const struct mtk_mmc_compatible mt8135_compat = { 474 .clk_div_bits = 8, 475 .recheck_sdio_irq = true, 476 .hs400_tune = false, 477 .pad_tune_reg = MSDC_PAD_TUNE, 478 .async_fifo = false, 479 .data_tune = false, 480 .busy_check = false, 481 .stop_clk_fix = false, 482 .enhance_rx = false, 483 .support_64g = false, 484 }; 485 486 static const struct mtk_mmc_compatible mt8173_compat = { 487 .clk_div_bits = 8, 488 .recheck_sdio_irq = true, 489 .hs400_tune = true, 490 .pad_tune_reg = MSDC_PAD_TUNE, 491 .async_fifo = false, 492 .data_tune = false, 493 .busy_check = false, 494 .stop_clk_fix = false, 495 .enhance_rx = false, 496 .support_64g = false, 497 }; 498 499 static const struct mtk_mmc_compatible mt8183_compat = { 500 .clk_div_bits = 12, 501 .recheck_sdio_irq = false, 502 .hs400_tune = false, 503 .pad_tune_reg = MSDC_PAD_TUNE0, 504 .async_fifo = true, 505 .data_tune = true, 506 .busy_check = true, 507 .stop_clk_fix = true, 508 .enhance_rx = true, 509 .support_64g = true, 510 }; 511 512 static const struct mtk_mmc_compatible mt2701_compat = { 513 .clk_div_bits = 12, 514 .recheck_sdio_irq = true, 515 .hs400_tune = false, 516 .pad_tune_reg = MSDC_PAD_TUNE0, 517 .async_fifo = true, 518 .data_tune = true, 519 .busy_check = false, 520 .stop_clk_fix = false, 521 .enhance_rx = false, 522 .support_64g = false, 523 }; 524 525 static const struct mtk_mmc_compatible mt2712_compat = { 526 .clk_div_bits = 12, 527 .recheck_sdio_irq = false, 528 .hs400_tune = false, 529 .pad_tune_reg = MSDC_PAD_TUNE0, 530 .async_fifo = true, 531 .data_tune = true, 532 .busy_check = true, 533 .stop_clk_fix = true, 534 .enhance_rx = true, 535 .support_64g = true, 536 }; 537 538 static const struct mtk_mmc_compatible mt7622_compat = { 539 .clk_div_bits = 12, 540 .recheck_sdio_irq = true, 541 .hs400_tune = false, 542 .pad_tune_reg = MSDC_PAD_TUNE0, 543 .async_fifo = true, 544 .data_tune = true, 545 .busy_check = true, 546 .stop_clk_fix = true, 547 .enhance_rx = true, 548 .support_64g = false, 549 }; 550 551 static const struct mtk_mmc_compatible mt8516_compat = { 552 .clk_div_bits = 12, 553 .recheck_sdio_irq = true, 554 .hs400_tune = false, 555 .pad_tune_reg = MSDC_PAD_TUNE0, 556 .async_fifo = true, 557 .data_tune = true, 558 .busy_check = true, 559 .stop_clk_fix = true, 560 }; 561 562 static const struct mtk_mmc_compatible mt7620_compat = { 563 .clk_div_bits = 8, 564 .recheck_sdio_irq = true, 565 .hs400_tune = false, 566 .pad_tune_reg = MSDC_PAD_TUNE, 567 .async_fifo = false, 568 .data_tune = false, 569 .busy_check = false, 570 .stop_clk_fix = false, 571 .enhance_rx = false, 572 .use_internal_cd = true, 573 }; 574 575 static const struct mtk_mmc_compatible mt6779_compat = { 576 .clk_div_bits = 12, 577 .recheck_sdio_irq = false, 578 .hs400_tune = false, 579 .pad_tune_reg = MSDC_PAD_TUNE0, 580 .async_fifo = true, 581 .data_tune = true, 582 .busy_check = true, 583 .stop_clk_fix = true, 584 .enhance_rx = true, 585 .support_64g = true, 586 }; 587 588 static const struct of_device_id msdc_of_ids[] = { 589 { .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat}, 590 { .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat}, 591 { .compatible = "mediatek,mt8183-mmc", .data = &mt8183_compat}, 592 { .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat}, 593 { .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat}, 594 { .compatible = "mediatek,mt7622-mmc", .data = &mt7622_compat}, 595 { .compatible = "mediatek,mt8516-mmc", .data = &mt8516_compat}, 596 { .compatible = "mediatek,mt7620-mmc", .data = &mt7620_compat}, 597 { .compatible = "mediatek,mt6779-mmc", .data = &mt6779_compat}, 598 {} 599 }; 600 MODULE_DEVICE_TABLE(of, msdc_of_ids); 601 602 static void sdr_set_bits(void __iomem *reg, u32 bs) 603 { 604 u32 val = readl(reg); 605 606 val |= bs; 607 writel(val, reg); 608 } 609 610 static void sdr_clr_bits(void __iomem *reg, u32 bs) 611 { 612 u32 val = readl(reg); 613 614 val &= ~bs; 615 writel(val, reg); 616 } 617 618 static void sdr_set_field(void __iomem *reg, u32 field, u32 val) 619 { 620 unsigned int tv = readl(reg); 621 622 tv &= ~field; 623 tv |= ((val) << (ffs((unsigned int)field) - 1)); 624 writel(tv, reg); 625 } 626 627 static void sdr_get_field(void __iomem *reg, u32 field, u32 *val) 628 { 629 unsigned int tv = readl(reg); 630 631 *val = ((tv & field) >> (ffs((unsigned int)field) - 1)); 632 } 633 634 static void msdc_reset_hw(struct msdc_host *host) 635 { 636 u32 val; 637 638 sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST); 639 readl_poll_timeout(host->base + MSDC_CFG, val, !(val & MSDC_CFG_RST), 0, 0); 640 641 sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR); 642 readl_poll_timeout(host->base + MSDC_FIFOCS, val, 643 !(val & MSDC_FIFOCS_CLR), 0, 0); 644 645 val = readl(host->base + MSDC_INT); 646 writel(val, host->base + MSDC_INT); 647 } 648 649 static void msdc_cmd_next(struct msdc_host *host, 650 struct mmc_request *mrq, struct mmc_command *cmd); 651 static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb); 652 653 static const u32 cmd_ints_mask = MSDC_INTEN_CMDRDY | MSDC_INTEN_RSPCRCERR | 654 MSDC_INTEN_CMDTMO | MSDC_INTEN_ACMDRDY | 655 MSDC_INTEN_ACMDCRCERR | MSDC_INTEN_ACMDTMO; 656 static const u32 data_ints_mask = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | 657 MSDC_INTEN_DATCRCERR | MSDC_INTEN_DMA_BDCSERR | 658 MSDC_INTEN_DMA_GPDCSERR | MSDC_INTEN_DMA_PROTECT; 659 660 static u8 msdc_dma_calcs(u8 *buf, u32 len) 661 { 662 u32 i, sum = 0; 663 664 for (i = 0; i < len; i++) 665 sum += buf[i]; 666 return 0xff - (u8) sum; 667 } 668 669 static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma, 670 struct mmc_data *data) 671 { 672 unsigned int j, dma_len; 673 dma_addr_t dma_address; 674 u32 dma_ctrl; 675 struct scatterlist *sg; 676 struct mt_gpdma_desc *gpd; 677 struct mt_bdma_desc *bd; 678 679 sg = data->sg; 680 681 gpd = dma->gpd; 682 bd = dma->bd; 683 684 /* modify gpd */ 685 gpd->gpd_info |= GPDMA_DESC_HWO; 686 gpd->gpd_info |= GPDMA_DESC_BDP; 687 /* need to clear first. use these bits to calc checksum */ 688 gpd->gpd_info &= ~GPDMA_DESC_CHECKSUM; 689 gpd->gpd_info |= msdc_dma_calcs((u8 *) gpd, 16) << 8; 690 691 /* modify bd */ 692 for_each_sg(data->sg, sg, data->sg_count, j) { 693 dma_address = sg_dma_address(sg); 694 dma_len = sg_dma_len(sg); 695 696 /* init bd */ 697 bd[j].bd_info &= ~BDMA_DESC_BLKPAD; 698 bd[j].bd_info &= ~BDMA_DESC_DWPAD; 699 bd[j].ptr = lower_32_bits(dma_address); 700 if (host->dev_comp->support_64g) { 701 bd[j].bd_info &= ~BDMA_DESC_PTR_H4; 702 bd[j].bd_info |= (upper_32_bits(dma_address) & 0xf) 703 << 28; 704 } 705 706 if (host->dev_comp->support_64g) { 707 bd[j].bd_data_len &= ~BDMA_DESC_BUFLEN_EXT; 708 bd[j].bd_data_len |= (dma_len & BDMA_DESC_BUFLEN_EXT); 709 } else { 710 bd[j].bd_data_len &= ~BDMA_DESC_BUFLEN; 711 bd[j].bd_data_len |= (dma_len & BDMA_DESC_BUFLEN); 712 } 713 714 if (j == data->sg_count - 1) /* the last bd */ 715 bd[j].bd_info |= BDMA_DESC_EOL; 716 else 717 bd[j].bd_info &= ~BDMA_DESC_EOL; 718 719 /* checksume need to clear first */ 720 bd[j].bd_info &= ~BDMA_DESC_CHECKSUM; 721 bd[j].bd_info |= msdc_dma_calcs((u8 *)(&bd[j]), 16) << 8; 722 } 723 724 sdr_set_field(host->base + MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, 1); 725 dma_ctrl = readl_relaxed(host->base + MSDC_DMA_CTRL); 726 dma_ctrl &= ~(MSDC_DMA_CTRL_BRUSTSZ | MSDC_DMA_CTRL_MODE); 727 dma_ctrl |= (MSDC_BURST_64B << 12 | 1 << 8); 728 writel_relaxed(dma_ctrl, host->base + MSDC_DMA_CTRL); 729 if (host->dev_comp->support_64g) 730 sdr_set_field(host->base + DMA_SA_H4BIT, DMA_ADDR_HIGH_4BIT, 731 upper_32_bits(dma->gpd_addr) & 0xf); 732 writel(lower_32_bits(dma->gpd_addr), host->base + MSDC_DMA_SA); 733 } 734 735 static void msdc_prepare_data(struct msdc_host *host, struct mmc_data *data) 736 { 737 if (!(data->host_cookie & MSDC_PREPARE_FLAG)) { 738 data->host_cookie |= MSDC_PREPARE_FLAG; 739 data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len, 740 mmc_get_dma_dir(data)); 741 } 742 } 743 744 static void msdc_unprepare_data(struct msdc_host *host, struct mmc_data *data) 745 { 746 if (data->host_cookie & MSDC_ASYNC_FLAG) 747 return; 748 749 if (data->host_cookie & MSDC_PREPARE_FLAG) { 750 dma_unmap_sg(host->dev, data->sg, data->sg_len, 751 mmc_get_dma_dir(data)); 752 data->host_cookie &= ~MSDC_PREPARE_FLAG; 753 } 754 } 755 756 static u64 msdc_timeout_cal(struct msdc_host *host, u64 ns, u64 clks) 757 { 758 struct mmc_host *mmc = mmc_from_priv(host); 759 u64 timeout, clk_ns; 760 u32 mode = 0; 761 762 if (mmc->actual_clock == 0) { 763 timeout = 0; 764 } else { 765 clk_ns = 1000000000ULL; 766 do_div(clk_ns, mmc->actual_clock); 767 timeout = ns + clk_ns - 1; 768 do_div(timeout, clk_ns); 769 timeout += clks; 770 /* in 1048576 sclk cycle unit */ 771 timeout = DIV_ROUND_UP(timeout, (0x1 << 20)); 772 if (host->dev_comp->clk_div_bits == 8) 773 sdr_get_field(host->base + MSDC_CFG, 774 MSDC_CFG_CKMOD, &mode); 775 else 776 sdr_get_field(host->base + MSDC_CFG, 777 MSDC_CFG_CKMOD_EXTRA, &mode); 778 /*DDR mode will double the clk cycles for data timeout */ 779 timeout = mode >= 2 ? timeout * 2 : timeout; 780 timeout = timeout > 1 ? timeout - 1 : 0; 781 } 782 return timeout; 783 } 784 785 /* clock control primitives */ 786 static void msdc_set_timeout(struct msdc_host *host, u64 ns, u64 clks) 787 { 788 u64 timeout; 789 790 host->timeout_ns = ns; 791 host->timeout_clks = clks; 792 793 timeout = msdc_timeout_cal(host, ns, clks); 794 sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 795 (u32)(timeout > 255 ? 255 : timeout)); 796 } 797 798 static void msdc_set_busy_timeout(struct msdc_host *host, u64 ns, u64 clks) 799 { 800 u64 timeout; 801 802 timeout = msdc_timeout_cal(host, ns, clks); 803 sdr_set_field(host->base + SDC_CFG, SDC_CFG_WRDTOC, 804 (u32)(timeout > 8191 ? 8191 : timeout)); 805 } 806 807 static void msdc_gate_clock(struct msdc_host *host) 808 { 809 clk_bulk_disable_unprepare(MSDC_NR_CLOCKS, host->bulk_clks); 810 clk_disable_unprepare(host->src_clk_cg); 811 clk_disable_unprepare(host->src_clk); 812 clk_disable_unprepare(host->bus_clk); 813 clk_disable_unprepare(host->h_clk); 814 } 815 816 static int msdc_ungate_clock(struct msdc_host *host) 817 { 818 u32 val; 819 int ret; 820 821 clk_prepare_enable(host->h_clk); 822 clk_prepare_enable(host->bus_clk); 823 clk_prepare_enable(host->src_clk); 824 clk_prepare_enable(host->src_clk_cg); 825 ret = clk_bulk_prepare_enable(MSDC_NR_CLOCKS, host->bulk_clks); 826 if (ret) { 827 dev_err(host->dev, "Cannot enable pclk/axi/ahb clock gates\n"); 828 return ret; 829 } 830 831 return readl_poll_timeout(host->base + MSDC_CFG, val, 832 (val & MSDC_CFG_CKSTB), 1, 20000); 833 } 834 835 static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) 836 { 837 struct mmc_host *mmc = mmc_from_priv(host); 838 u32 mode; 839 u32 flags; 840 u32 div; 841 u32 sclk; 842 u32 tune_reg = host->dev_comp->pad_tune_reg; 843 u32 val; 844 845 if (!hz) { 846 dev_dbg(host->dev, "set mclk to 0\n"); 847 host->mclk = 0; 848 mmc->actual_clock = 0; 849 sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); 850 return; 851 } 852 853 flags = readl(host->base + MSDC_INTEN); 854 sdr_clr_bits(host->base + MSDC_INTEN, flags); 855 if (host->dev_comp->clk_div_bits == 8) 856 sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE); 857 else 858 sdr_clr_bits(host->base + MSDC_CFG, 859 MSDC_CFG_HS400_CK_MODE_EXTRA); 860 if (timing == MMC_TIMING_UHS_DDR50 || 861 timing == MMC_TIMING_MMC_DDR52 || 862 timing == MMC_TIMING_MMC_HS400) { 863 if (timing == MMC_TIMING_MMC_HS400) 864 mode = 0x3; 865 else 866 mode = 0x2; /* ddr mode and use divisor */ 867 868 if (hz >= (host->src_clk_freq >> 2)) { 869 div = 0; /* mean div = 1/4 */ 870 sclk = host->src_clk_freq >> 2; /* sclk = clk / 4 */ 871 } else { 872 div = (host->src_clk_freq + ((hz << 2) - 1)) / (hz << 2); 873 sclk = (host->src_clk_freq >> 2) / div; 874 div = (div >> 1); 875 } 876 877 if (timing == MMC_TIMING_MMC_HS400 && 878 hz >= (host->src_clk_freq >> 1)) { 879 if (host->dev_comp->clk_div_bits == 8) 880 sdr_set_bits(host->base + MSDC_CFG, 881 MSDC_CFG_HS400_CK_MODE); 882 else 883 sdr_set_bits(host->base + MSDC_CFG, 884 MSDC_CFG_HS400_CK_MODE_EXTRA); 885 sclk = host->src_clk_freq >> 1; 886 div = 0; /* div is ignore when bit18 is set */ 887 } 888 } else if (hz >= host->src_clk_freq) { 889 mode = 0x1; /* no divisor */ 890 div = 0; 891 sclk = host->src_clk_freq; 892 } else { 893 mode = 0x0; /* use divisor */ 894 if (hz >= (host->src_clk_freq >> 1)) { 895 div = 0; /* mean div = 1/2 */ 896 sclk = host->src_clk_freq >> 1; /* sclk = clk / 2 */ 897 } else { 898 div = (host->src_clk_freq + ((hz << 2) - 1)) / (hz << 2); 899 sclk = (host->src_clk_freq >> 2) / div; 900 } 901 } 902 sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); 903 /* 904 * As src_clk/HCLK use the same bit to gate/ungate, 905 * So if want to only gate src_clk, need gate its parent(mux). 906 */ 907 if (host->src_clk_cg) 908 clk_disable_unprepare(host->src_clk_cg); 909 else 910 clk_disable_unprepare(clk_get_parent(host->src_clk)); 911 if (host->dev_comp->clk_div_bits == 8) 912 sdr_set_field(host->base + MSDC_CFG, 913 MSDC_CFG_CKMOD | MSDC_CFG_CKDIV, 914 (mode << 8) | div); 915 else 916 sdr_set_field(host->base + MSDC_CFG, 917 MSDC_CFG_CKMOD_EXTRA | MSDC_CFG_CKDIV_EXTRA, 918 (mode << 12) | div); 919 if (host->src_clk_cg) 920 clk_prepare_enable(host->src_clk_cg); 921 else 922 clk_prepare_enable(clk_get_parent(host->src_clk)); 923 924 readl_poll_timeout(host->base + MSDC_CFG, val, (val & MSDC_CFG_CKSTB), 0, 0); 925 sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); 926 mmc->actual_clock = sclk; 927 host->mclk = hz; 928 host->timing = timing; 929 /* need because clk changed. */ 930 msdc_set_timeout(host, host->timeout_ns, host->timeout_clks); 931 sdr_set_bits(host->base + MSDC_INTEN, flags); 932 933 /* 934 * mmc_select_hs400() will drop to 50Mhz and High speed mode, 935 * tune result of hs200/200Mhz is not suitable for 50Mhz 936 */ 937 if (mmc->actual_clock <= 52000000) { 938 writel(host->def_tune_para.iocon, host->base + MSDC_IOCON); 939 if (host->top_base) { 940 writel(host->def_tune_para.emmc_top_control, 941 host->top_base + EMMC_TOP_CONTROL); 942 writel(host->def_tune_para.emmc_top_cmd, 943 host->top_base + EMMC_TOP_CMD); 944 } else { 945 writel(host->def_tune_para.pad_tune, 946 host->base + tune_reg); 947 } 948 } else { 949 writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON); 950 writel(host->saved_tune_para.pad_cmd_tune, 951 host->base + PAD_CMD_TUNE); 952 if (host->top_base) { 953 writel(host->saved_tune_para.emmc_top_control, 954 host->top_base + EMMC_TOP_CONTROL); 955 writel(host->saved_tune_para.emmc_top_cmd, 956 host->top_base + EMMC_TOP_CMD); 957 } else { 958 writel(host->saved_tune_para.pad_tune, 959 host->base + tune_reg); 960 } 961 } 962 963 if (timing == MMC_TIMING_MMC_HS400 && 964 host->dev_comp->hs400_tune) 965 sdr_set_field(host->base + tune_reg, 966 MSDC_PAD_TUNE_CMDRRDLY, 967 host->hs400_cmd_int_delay); 968 dev_dbg(host->dev, "sclk: %d, timing: %d\n", mmc->actual_clock, 969 timing); 970 } 971 972 static inline u32 msdc_cmd_find_resp(struct msdc_host *host, 973 struct mmc_command *cmd) 974 { 975 u32 resp; 976 977 switch (mmc_resp_type(cmd)) { 978 /* Actually, R1, R5, R6, R7 are the same */ 979 case MMC_RSP_R1: 980 resp = 0x1; 981 break; 982 case MMC_RSP_R1B: 983 resp = 0x7; 984 break; 985 case MMC_RSP_R2: 986 resp = 0x2; 987 break; 988 case MMC_RSP_R3: 989 resp = 0x3; 990 break; 991 case MMC_RSP_NONE: 992 default: 993 resp = 0x0; 994 break; 995 } 996 997 return resp; 998 } 999 1000 static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host, 1001 struct mmc_request *mrq, struct mmc_command *cmd) 1002 { 1003 struct mmc_host *mmc = mmc_from_priv(host); 1004 /* rawcmd : 1005 * vol_swt << 30 | auto_cmd << 28 | blklen << 16 | go_irq << 15 | 1006 * stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode 1007 */ 1008 u32 opcode = cmd->opcode; 1009 u32 resp = msdc_cmd_find_resp(host, cmd); 1010 u32 rawcmd = (opcode & 0x3f) | ((resp & 0x7) << 7); 1011 1012 host->cmd_rsp = resp; 1013 1014 if ((opcode == SD_IO_RW_DIRECT && cmd->flags == (unsigned int) -1) || 1015 opcode == MMC_STOP_TRANSMISSION) 1016 rawcmd |= (0x1 << 14); 1017 else if (opcode == SD_SWITCH_VOLTAGE) 1018 rawcmd |= (0x1 << 30); 1019 else if (opcode == SD_APP_SEND_SCR || 1020 opcode == SD_APP_SEND_NUM_WR_BLKS || 1021 (opcode == SD_SWITCH && mmc_cmd_type(cmd) == MMC_CMD_ADTC) || 1022 (opcode == SD_APP_SD_STATUS && mmc_cmd_type(cmd) == MMC_CMD_ADTC) || 1023 (opcode == MMC_SEND_EXT_CSD && mmc_cmd_type(cmd) == MMC_CMD_ADTC)) 1024 rawcmd |= (0x1 << 11); 1025 1026 if (cmd->data) { 1027 struct mmc_data *data = cmd->data; 1028 1029 if (mmc_op_multi(opcode)) { 1030 if (mmc_card_mmc(mmc->card) && mrq->sbc && 1031 !(mrq->sbc->arg & 0xFFFF0000)) 1032 rawcmd |= 0x2 << 28; /* AutoCMD23 */ 1033 } 1034 1035 rawcmd |= ((data->blksz & 0xFFF) << 16); 1036 if (data->flags & MMC_DATA_WRITE) 1037 rawcmd |= (0x1 << 13); 1038 if (data->blocks > 1) 1039 rawcmd |= (0x2 << 11); 1040 else 1041 rawcmd |= (0x1 << 11); 1042 /* Always use dma mode */ 1043 sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_PIO); 1044 1045 if (host->timeout_ns != data->timeout_ns || 1046 host->timeout_clks != data->timeout_clks) 1047 msdc_set_timeout(host, data->timeout_ns, 1048 data->timeout_clks); 1049 1050 writel(data->blocks, host->base + SDC_BLK_NUM); 1051 } 1052 return rawcmd; 1053 } 1054 1055 static void msdc_start_data(struct msdc_host *host, struct mmc_command *cmd, 1056 struct mmc_data *data) 1057 { 1058 bool read; 1059 1060 WARN_ON(host->data); 1061 host->data = data; 1062 read = data->flags & MMC_DATA_READ; 1063 1064 mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT); 1065 msdc_dma_setup(host, &host->dma, data); 1066 sdr_set_bits(host->base + MSDC_INTEN, data_ints_mask); 1067 sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1); 1068 dev_dbg(host->dev, "DMA start\n"); 1069 dev_dbg(host->dev, "%s: cmd=%d DMA data: %d blocks; read=%d\n", 1070 __func__, cmd->opcode, data->blocks, read); 1071 } 1072 1073 static int msdc_auto_cmd_done(struct msdc_host *host, int events, 1074 struct mmc_command *cmd) 1075 { 1076 u32 *rsp = cmd->resp; 1077 1078 rsp[0] = readl(host->base + SDC_ACMD_RESP); 1079 1080 if (events & MSDC_INT_ACMDRDY) { 1081 cmd->error = 0; 1082 } else { 1083 msdc_reset_hw(host); 1084 if (events & MSDC_INT_ACMDCRCERR) { 1085 cmd->error = -EILSEQ; 1086 host->error |= REQ_STOP_EIO; 1087 } else if (events & MSDC_INT_ACMDTMO) { 1088 cmd->error = -ETIMEDOUT; 1089 host->error |= REQ_STOP_TMO; 1090 } 1091 dev_err(host->dev, 1092 "%s: AUTO_CMD%d arg=%08X; rsp %08X; cmd_error=%d\n", 1093 __func__, cmd->opcode, cmd->arg, rsp[0], cmd->error); 1094 } 1095 return cmd->error; 1096 } 1097 1098 /* 1099 * msdc_recheck_sdio_irq - recheck whether the SDIO irq is lost 1100 * 1101 * Host controller may lost interrupt in some special case. 1102 * Add SDIO irq recheck mechanism to make sure all interrupts 1103 * can be processed immediately 1104 */ 1105 static void msdc_recheck_sdio_irq(struct msdc_host *host) 1106 { 1107 struct mmc_host *mmc = mmc_from_priv(host); 1108 u32 reg_int, reg_inten, reg_ps; 1109 1110 if (mmc->caps & MMC_CAP_SDIO_IRQ) { 1111 reg_inten = readl(host->base + MSDC_INTEN); 1112 if (reg_inten & MSDC_INTEN_SDIOIRQ) { 1113 reg_int = readl(host->base + MSDC_INT); 1114 reg_ps = readl(host->base + MSDC_PS); 1115 if (!(reg_int & MSDC_INT_SDIOIRQ || 1116 reg_ps & MSDC_PS_DATA1)) { 1117 __msdc_enable_sdio_irq(host, 0); 1118 sdio_signal_irq(mmc); 1119 } 1120 } 1121 } 1122 } 1123 1124 static void msdc_track_cmd_data(struct msdc_host *host, struct mmc_command *cmd) 1125 { 1126 if (host->error) 1127 dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n", 1128 __func__, cmd->opcode, cmd->arg, host->error); 1129 } 1130 1131 static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq) 1132 { 1133 unsigned long flags; 1134 1135 /* 1136 * No need check the return value of cancel_delayed_work, as only ONE 1137 * path will go here! 1138 */ 1139 cancel_delayed_work(&host->req_timeout); 1140 1141 spin_lock_irqsave(&host->lock, flags); 1142 host->mrq = NULL; 1143 spin_unlock_irqrestore(&host->lock, flags); 1144 1145 msdc_track_cmd_data(host, mrq->cmd); 1146 if (mrq->data) 1147 msdc_unprepare_data(host, mrq->data); 1148 if (host->error) 1149 msdc_reset_hw(host); 1150 mmc_request_done(mmc_from_priv(host), mrq); 1151 if (host->dev_comp->recheck_sdio_irq) 1152 msdc_recheck_sdio_irq(host); 1153 } 1154 1155 /* returns true if command is fully handled; returns false otherwise */ 1156 static bool msdc_cmd_done(struct msdc_host *host, int events, 1157 struct mmc_request *mrq, struct mmc_command *cmd) 1158 { 1159 bool done = false; 1160 bool sbc_error; 1161 unsigned long flags; 1162 u32 *rsp; 1163 1164 if (mrq->sbc && cmd == mrq->cmd && 1165 (events & (MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR 1166 | MSDC_INT_ACMDTMO))) 1167 msdc_auto_cmd_done(host, events, mrq->sbc); 1168 1169 sbc_error = mrq->sbc && mrq->sbc->error; 1170 1171 if (!sbc_error && !(events & (MSDC_INT_CMDRDY 1172 | MSDC_INT_RSPCRCERR 1173 | MSDC_INT_CMDTMO))) 1174 return done; 1175 1176 spin_lock_irqsave(&host->lock, flags); 1177 done = !host->cmd; 1178 host->cmd = NULL; 1179 spin_unlock_irqrestore(&host->lock, flags); 1180 1181 if (done) 1182 return true; 1183 rsp = cmd->resp; 1184 1185 sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask); 1186 1187 if (cmd->flags & MMC_RSP_PRESENT) { 1188 if (cmd->flags & MMC_RSP_136) { 1189 rsp[0] = readl(host->base + SDC_RESP3); 1190 rsp[1] = readl(host->base + SDC_RESP2); 1191 rsp[2] = readl(host->base + SDC_RESP1); 1192 rsp[3] = readl(host->base + SDC_RESP0); 1193 } else { 1194 rsp[0] = readl(host->base + SDC_RESP0); 1195 } 1196 } 1197 1198 if (!sbc_error && !(events & MSDC_INT_CMDRDY)) { 1199 if (events & MSDC_INT_CMDTMO || 1200 (cmd->opcode != MMC_SEND_TUNING_BLOCK && 1201 cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200 && 1202 !host->hs400_tuning)) 1203 /* 1204 * should not clear fifo/interrupt as the tune data 1205 * may have alreay come when cmd19/cmd21 gets response 1206 * CRC error. 1207 */ 1208 msdc_reset_hw(host); 1209 if (events & MSDC_INT_RSPCRCERR) { 1210 cmd->error = -EILSEQ; 1211 host->error |= REQ_CMD_EIO; 1212 } else if (events & MSDC_INT_CMDTMO) { 1213 cmd->error = -ETIMEDOUT; 1214 host->error |= REQ_CMD_TMO; 1215 } 1216 } 1217 if (cmd->error) 1218 dev_dbg(host->dev, 1219 "%s: cmd=%d arg=%08X; rsp %08X; cmd_error=%d\n", 1220 __func__, cmd->opcode, cmd->arg, rsp[0], 1221 cmd->error); 1222 1223 msdc_cmd_next(host, mrq, cmd); 1224 return true; 1225 } 1226 1227 /* It is the core layer's responsibility to ensure card status 1228 * is correct before issue a request. but host design do below 1229 * checks recommended. 1230 */ 1231 static inline bool msdc_cmd_is_ready(struct msdc_host *host, 1232 struct mmc_request *mrq, struct mmc_command *cmd) 1233 { 1234 u32 val; 1235 int ret; 1236 1237 /* The max busy time we can endure is 20ms */ 1238 ret = readl_poll_timeout_atomic(host->base + SDC_STS, val, 1239 !(val & SDC_STS_CMDBUSY), 1, 20000); 1240 if (ret) { 1241 dev_err(host->dev, "CMD bus busy detected\n"); 1242 host->error |= REQ_CMD_BUSY; 1243 msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd); 1244 return false; 1245 } 1246 1247 if (mmc_resp_type(cmd) == MMC_RSP_R1B || cmd->data) { 1248 /* R1B or with data, should check SDCBUSY */ 1249 ret = readl_poll_timeout_atomic(host->base + SDC_STS, val, 1250 !(val & SDC_STS_SDCBUSY), 1, 20000); 1251 if (ret) { 1252 dev_err(host->dev, "Controller busy detected\n"); 1253 host->error |= REQ_CMD_BUSY; 1254 msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd); 1255 return false; 1256 } 1257 } 1258 return true; 1259 } 1260 1261 static void msdc_start_command(struct msdc_host *host, 1262 struct mmc_request *mrq, struct mmc_command *cmd) 1263 { 1264 u32 rawcmd; 1265 unsigned long flags; 1266 1267 WARN_ON(host->cmd); 1268 host->cmd = cmd; 1269 1270 mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT); 1271 if (!msdc_cmd_is_ready(host, mrq, cmd)) 1272 return; 1273 1274 if ((readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_TXCNT) >> 16 || 1275 readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_RXCNT) { 1276 dev_err(host->dev, "TX/RX FIFO non-empty before start of IO. Reset\n"); 1277 msdc_reset_hw(host); 1278 } 1279 1280 cmd->error = 0; 1281 rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd); 1282 1283 spin_lock_irqsave(&host->lock, flags); 1284 sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask); 1285 spin_unlock_irqrestore(&host->lock, flags); 1286 1287 writel(cmd->arg, host->base + SDC_ARG); 1288 writel(rawcmd, host->base + SDC_CMD); 1289 } 1290 1291 static void msdc_cmd_next(struct msdc_host *host, 1292 struct mmc_request *mrq, struct mmc_command *cmd) 1293 { 1294 if ((cmd->error && 1295 !(cmd->error == -EILSEQ && 1296 (cmd->opcode == MMC_SEND_TUNING_BLOCK || 1297 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200 || 1298 host->hs400_tuning))) || 1299 (mrq->sbc && mrq->sbc->error)) 1300 msdc_request_done(host, mrq); 1301 else if (cmd == mrq->sbc) 1302 msdc_start_command(host, mrq, mrq->cmd); 1303 else if (!cmd->data) 1304 msdc_request_done(host, mrq); 1305 else 1306 msdc_start_data(host, cmd, cmd->data); 1307 } 1308 1309 static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq) 1310 { 1311 struct msdc_host *host = mmc_priv(mmc); 1312 1313 host->error = 0; 1314 WARN_ON(host->mrq); 1315 host->mrq = mrq; 1316 1317 if (mrq->data) 1318 msdc_prepare_data(host, mrq->data); 1319 1320 /* if SBC is required, we have HW option and SW option. 1321 * if HW option is enabled, and SBC does not have "special" flags, 1322 * use HW option, otherwise use SW option 1323 */ 1324 if (mrq->sbc && (!mmc_card_mmc(mmc->card) || 1325 (mrq->sbc->arg & 0xFFFF0000))) 1326 msdc_start_command(host, mrq, mrq->sbc); 1327 else 1328 msdc_start_command(host, mrq, mrq->cmd); 1329 } 1330 1331 static void msdc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 1332 { 1333 struct msdc_host *host = mmc_priv(mmc); 1334 struct mmc_data *data = mrq->data; 1335 1336 if (!data) 1337 return; 1338 1339 msdc_prepare_data(host, data); 1340 data->host_cookie |= MSDC_ASYNC_FLAG; 1341 } 1342 1343 static void msdc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 1344 int err) 1345 { 1346 struct msdc_host *host = mmc_priv(mmc); 1347 struct mmc_data *data = mrq->data; 1348 1349 if (!data) 1350 return; 1351 1352 if (data->host_cookie) { 1353 data->host_cookie &= ~MSDC_ASYNC_FLAG; 1354 msdc_unprepare_data(host, data); 1355 } 1356 } 1357 1358 static void msdc_data_xfer_next(struct msdc_host *host, struct mmc_request *mrq) 1359 { 1360 if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && !mrq->stop->error && 1361 !mrq->sbc) 1362 msdc_start_command(host, mrq, mrq->stop); 1363 else 1364 msdc_request_done(host, mrq); 1365 } 1366 1367 static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, 1368 struct mmc_request *mrq, struct mmc_data *data) 1369 { 1370 struct mmc_command *stop; 1371 unsigned long flags; 1372 bool done; 1373 unsigned int check_data = events & 1374 (MSDC_INT_XFER_COMPL | MSDC_INT_DATCRCERR | MSDC_INT_DATTMO 1375 | MSDC_INT_DMA_BDCSERR | MSDC_INT_DMA_GPDCSERR 1376 | MSDC_INT_DMA_PROTECT); 1377 u32 val; 1378 int ret; 1379 1380 spin_lock_irqsave(&host->lock, flags); 1381 done = !host->data; 1382 if (check_data) 1383 host->data = NULL; 1384 spin_unlock_irqrestore(&host->lock, flags); 1385 1386 if (done) 1387 return true; 1388 stop = data->stop; 1389 1390 if (check_data || (stop && stop->error)) { 1391 dev_dbg(host->dev, "DMA status: 0x%8X\n", 1392 readl(host->base + MSDC_DMA_CFG)); 1393 sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, 1394 1); 1395 1396 ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CFG, val, 1397 !(val & MSDC_DMA_CFG_STS), 1, 20000); 1398 if (ret) { 1399 dev_dbg(host->dev, "DMA stop timed out\n"); 1400 return false; 1401 } 1402 1403 sdr_clr_bits(host->base + MSDC_INTEN, data_ints_mask); 1404 dev_dbg(host->dev, "DMA stop\n"); 1405 1406 if ((events & MSDC_INT_XFER_COMPL) && (!stop || !stop->error)) { 1407 data->bytes_xfered = data->blocks * data->blksz; 1408 } else { 1409 dev_dbg(host->dev, "interrupt events: %x\n", events); 1410 msdc_reset_hw(host); 1411 host->error |= REQ_DAT_ERR; 1412 data->bytes_xfered = 0; 1413 1414 if (events & MSDC_INT_DATTMO) 1415 data->error = -ETIMEDOUT; 1416 else if (events & MSDC_INT_DATCRCERR) 1417 data->error = -EILSEQ; 1418 1419 dev_dbg(host->dev, "%s: cmd=%d; blocks=%d", 1420 __func__, mrq->cmd->opcode, data->blocks); 1421 dev_dbg(host->dev, "data_error=%d xfer_size=%d\n", 1422 (int)data->error, data->bytes_xfered); 1423 } 1424 1425 msdc_data_xfer_next(host, mrq); 1426 done = true; 1427 } 1428 return done; 1429 } 1430 1431 static void msdc_set_buswidth(struct msdc_host *host, u32 width) 1432 { 1433 u32 val = readl(host->base + SDC_CFG); 1434 1435 val &= ~SDC_CFG_BUSWIDTH; 1436 1437 switch (width) { 1438 default: 1439 case MMC_BUS_WIDTH_1: 1440 val |= (MSDC_BUS_1BITS << 16); 1441 break; 1442 case MMC_BUS_WIDTH_4: 1443 val |= (MSDC_BUS_4BITS << 16); 1444 break; 1445 case MMC_BUS_WIDTH_8: 1446 val |= (MSDC_BUS_8BITS << 16); 1447 break; 1448 } 1449 1450 writel(val, host->base + SDC_CFG); 1451 dev_dbg(host->dev, "Bus Width = %d", width); 1452 } 1453 1454 static int msdc_ops_switch_volt(struct mmc_host *mmc, struct mmc_ios *ios) 1455 { 1456 struct msdc_host *host = mmc_priv(mmc); 1457 int ret; 1458 1459 if (!IS_ERR(mmc->supply.vqmmc)) { 1460 if (ios->signal_voltage != MMC_SIGNAL_VOLTAGE_330 && 1461 ios->signal_voltage != MMC_SIGNAL_VOLTAGE_180) { 1462 dev_err(host->dev, "Unsupported signal voltage!\n"); 1463 return -EINVAL; 1464 } 1465 1466 ret = mmc_regulator_set_vqmmc(mmc, ios); 1467 if (ret < 0) { 1468 dev_dbg(host->dev, "Regulator set error %d (%d)\n", 1469 ret, ios->signal_voltage); 1470 return ret; 1471 } 1472 1473 /* Apply different pinctrl settings for different signal voltage */ 1474 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) 1475 pinctrl_select_state(host->pinctrl, host->pins_uhs); 1476 else 1477 pinctrl_select_state(host->pinctrl, host->pins_default); 1478 } 1479 return 0; 1480 } 1481 1482 static int msdc_card_busy(struct mmc_host *mmc) 1483 { 1484 struct msdc_host *host = mmc_priv(mmc); 1485 u32 status = readl(host->base + MSDC_PS); 1486 1487 /* only check if data0 is low */ 1488 return !(status & BIT(16)); 1489 } 1490 1491 static void msdc_request_timeout(struct work_struct *work) 1492 { 1493 struct msdc_host *host = container_of(work, struct msdc_host, 1494 req_timeout.work); 1495 1496 /* simulate HW timeout status */ 1497 dev_err(host->dev, "%s: aborting cmd/data/mrq\n", __func__); 1498 if (host->mrq) { 1499 dev_err(host->dev, "%s: aborting mrq=%p cmd=%d\n", __func__, 1500 host->mrq, host->mrq->cmd->opcode); 1501 if (host->cmd) { 1502 dev_err(host->dev, "%s: aborting cmd=%d\n", 1503 __func__, host->cmd->opcode); 1504 msdc_cmd_done(host, MSDC_INT_CMDTMO, host->mrq, 1505 host->cmd); 1506 } else if (host->data) { 1507 dev_err(host->dev, "%s: abort data: cmd%d; %d blocks\n", 1508 __func__, host->mrq->cmd->opcode, 1509 host->data->blocks); 1510 msdc_data_xfer_done(host, MSDC_INT_DATTMO, host->mrq, 1511 host->data); 1512 } 1513 } 1514 } 1515 1516 static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb) 1517 { 1518 if (enb) { 1519 sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ); 1520 sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE); 1521 if (host->dev_comp->recheck_sdio_irq) 1522 msdc_recheck_sdio_irq(host); 1523 } else { 1524 sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ); 1525 sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE); 1526 } 1527 } 1528 1529 static void msdc_enable_sdio_irq(struct mmc_host *mmc, int enb) 1530 { 1531 unsigned long flags; 1532 struct msdc_host *host = mmc_priv(mmc); 1533 1534 spin_lock_irqsave(&host->lock, flags); 1535 __msdc_enable_sdio_irq(host, enb); 1536 spin_unlock_irqrestore(&host->lock, flags); 1537 1538 if (enb) 1539 pm_runtime_get_noresume(host->dev); 1540 else 1541 pm_runtime_put_noidle(host->dev); 1542 } 1543 1544 static irqreturn_t msdc_cmdq_irq(struct msdc_host *host, u32 intsts) 1545 { 1546 struct mmc_host *mmc = mmc_from_priv(host); 1547 int cmd_err = 0, dat_err = 0; 1548 1549 if (intsts & MSDC_INT_RSPCRCERR) { 1550 cmd_err = -EILSEQ; 1551 dev_err(host->dev, "%s: CMD CRC ERR", __func__); 1552 } else if (intsts & MSDC_INT_CMDTMO) { 1553 cmd_err = -ETIMEDOUT; 1554 dev_err(host->dev, "%s: CMD TIMEOUT ERR", __func__); 1555 } 1556 1557 if (intsts & MSDC_INT_DATCRCERR) { 1558 dat_err = -EILSEQ; 1559 dev_err(host->dev, "%s: DATA CRC ERR", __func__); 1560 } else if (intsts & MSDC_INT_DATTMO) { 1561 dat_err = -ETIMEDOUT; 1562 dev_err(host->dev, "%s: DATA TIMEOUT ERR", __func__); 1563 } 1564 1565 if (cmd_err || dat_err) { 1566 dev_err(host->dev, "cmd_err = %d, dat_err =%d, intsts = 0x%x", 1567 cmd_err, dat_err, intsts); 1568 } 1569 1570 return cqhci_irq(mmc, 0, cmd_err, dat_err); 1571 } 1572 1573 static irqreturn_t msdc_irq(int irq, void *dev_id) 1574 { 1575 struct msdc_host *host = (struct msdc_host *) dev_id; 1576 struct mmc_host *mmc = mmc_from_priv(host); 1577 1578 while (true) { 1579 struct mmc_request *mrq; 1580 struct mmc_command *cmd; 1581 struct mmc_data *data; 1582 u32 events, event_mask; 1583 1584 spin_lock(&host->lock); 1585 events = readl(host->base + MSDC_INT); 1586 event_mask = readl(host->base + MSDC_INTEN); 1587 if ((events & event_mask) & MSDC_INT_SDIOIRQ) 1588 __msdc_enable_sdio_irq(host, 0); 1589 /* clear interrupts */ 1590 writel(events & event_mask, host->base + MSDC_INT); 1591 1592 mrq = host->mrq; 1593 cmd = host->cmd; 1594 data = host->data; 1595 spin_unlock(&host->lock); 1596 1597 if ((events & event_mask) & MSDC_INT_SDIOIRQ) 1598 sdio_signal_irq(mmc); 1599 1600 if ((events & event_mask) & MSDC_INT_CDSC) { 1601 if (host->internal_cd) 1602 mmc_detect_change(mmc, msecs_to_jiffies(20)); 1603 events &= ~MSDC_INT_CDSC; 1604 } 1605 1606 if (!(events & (event_mask & ~MSDC_INT_SDIOIRQ))) 1607 break; 1608 1609 if ((mmc->caps2 & MMC_CAP2_CQE) && 1610 (events & MSDC_INT_CMDQ)) { 1611 msdc_cmdq_irq(host, events); 1612 /* clear interrupts */ 1613 writel(events, host->base + MSDC_INT); 1614 return IRQ_HANDLED; 1615 } 1616 1617 if (!mrq) { 1618 dev_err(host->dev, 1619 "%s: MRQ=NULL; events=%08X; event_mask=%08X\n", 1620 __func__, events, event_mask); 1621 WARN_ON(1); 1622 break; 1623 } 1624 1625 dev_dbg(host->dev, "%s: events=%08X\n", __func__, events); 1626 1627 if (cmd) 1628 msdc_cmd_done(host, events, mrq, cmd); 1629 else if (data) 1630 msdc_data_xfer_done(host, events, mrq, data); 1631 } 1632 1633 return IRQ_HANDLED; 1634 } 1635 1636 static void msdc_init_hw(struct msdc_host *host) 1637 { 1638 u32 val; 1639 u32 tune_reg = host->dev_comp->pad_tune_reg; 1640 1641 if (host->reset) { 1642 reset_control_assert(host->reset); 1643 usleep_range(10, 50); 1644 reset_control_deassert(host->reset); 1645 } 1646 1647 /* Configure to MMC/SD mode, clock free running */ 1648 sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN); 1649 1650 /* Reset */ 1651 msdc_reset_hw(host); 1652 1653 /* Disable and clear all interrupts */ 1654 writel(0, host->base + MSDC_INTEN); 1655 val = readl(host->base + MSDC_INT); 1656 writel(val, host->base + MSDC_INT); 1657 1658 /* Configure card detection */ 1659 if (host->internal_cd) { 1660 sdr_set_field(host->base + MSDC_PS, MSDC_PS_CDDEBOUNCE, 1661 DEFAULT_DEBOUNCE); 1662 sdr_set_bits(host->base + MSDC_PS, MSDC_PS_CDEN); 1663 sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC); 1664 sdr_set_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP); 1665 } else { 1666 sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP); 1667 sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN); 1668 sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC); 1669 } 1670 1671 if (host->top_base) { 1672 writel(0, host->top_base + EMMC_TOP_CONTROL); 1673 writel(0, host->top_base + EMMC_TOP_CMD); 1674 } else { 1675 writel(0, host->base + tune_reg); 1676 } 1677 writel(0, host->base + MSDC_IOCON); 1678 sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0); 1679 writel(0x403c0046, host->base + MSDC_PATCH_BIT); 1680 sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1); 1681 writel(0xffff4089, host->base + MSDC_PATCH_BIT1); 1682 sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL); 1683 1684 if (host->dev_comp->stop_clk_fix) { 1685 sdr_set_field(host->base + MSDC_PATCH_BIT1, 1686 MSDC_PATCH_BIT1_STOP_DLY, 3); 1687 sdr_clr_bits(host->base + SDC_FIFO_CFG, 1688 SDC_FIFO_CFG_WRVALIDSEL); 1689 sdr_clr_bits(host->base + SDC_FIFO_CFG, 1690 SDC_FIFO_CFG_RDVALIDSEL); 1691 } 1692 1693 if (host->dev_comp->busy_check) 1694 sdr_clr_bits(host->base + MSDC_PATCH_BIT1, (1 << 7)); 1695 1696 if (host->dev_comp->async_fifo) { 1697 sdr_set_field(host->base + MSDC_PATCH_BIT2, 1698 MSDC_PB2_RESPWAIT, 3); 1699 if (host->dev_comp->enhance_rx) { 1700 if (host->top_base) 1701 sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, 1702 SDC_RX_ENH_EN); 1703 else 1704 sdr_set_bits(host->base + SDC_ADV_CFG0, 1705 SDC_RX_ENHANCE_EN); 1706 } else { 1707 sdr_set_field(host->base + MSDC_PATCH_BIT2, 1708 MSDC_PB2_RESPSTSENSEL, 2); 1709 sdr_set_field(host->base + MSDC_PATCH_BIT2, 1710 MSDC_PB2_CRCSTSENSEL, 2); 1711 } 1712 /* use async fifo, then no need tune internal delay */ 1713 sdr_clr_bits(host->base + MSDC_PATCH_BIT2, 1714 MSDC_PATCH_BIT2_CFGRESP); 1715 sdr_set_bits(host->base + MSDC_PATCH_BIT2, 1716 MSDC_PATCH_BIT2_CFGCRCSTS); 1717 } 1718 1719 if (host->dev_comp->support_64g) 1720 sdr_set_bits(host->base + MSDC_PATCH_BIT2, 1721 MSDC_PB2_SUPPORT_64G); 1722 if (host->dev_comp->data_tune) { 1723 if (host->top_base) { 1724 sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, 1725 PAD_DAT_RD_RXDLY_SEL); 1726 sdr_clr_bits(host->top_base + EMMC_TOP_CONTROL, 1727 DATA_K_VALUE_SEL); 1728 sdr_set_bits(host->top_base + EMMC_TOP_CMD, 1729 PAD_CMD_RD_RXDLY_SEL); 1730 } else { 1731 sdr_set_bits(host->base + tune_reg, 1732 MSDC_PAD_TUNE_RD_SEL | 1733 MSDC_PAD_TUNE_CMD_SEL); 1734 } 1735 } else { 1736 /* choose clock tune */ 1737 if (host->top_base) 1738 sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, 1739 PAD_RXDLY_SEL); 1740 else 1741 sdr_set_bits(host->base + tune_reg, 1742 MSDC_PAD_TUNE_RXDLYSEL); 1743 } 1744 1745 /* Configure to enable SDIO mode. 1746 * it's must otherwise sdio cmd5 failed 1747 */ 1748 sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIO); 1749 1750 /* Config SDIO device detect interrupt function */ 1751 sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE); 1752 sdr_set_bits(host->base + SDC_ADV_CFG0, SDC_DAT1_IRQ_TRIGGER); 1753 1754 /* Configure to default data timeout */ 1755 sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3); 1756 1757 host->def_tune_para.iocon = readl(host->base + MSDC_IOCON); 1758 host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON); 1759 if (host->top_base) { 1760 host->def_tune_para.emmc_top_control = 1761 readl(host->top_base + EMMC_TOP_CONTROL); 1762 host->def_tune_para.emmc_top_cmd = 1763 readl(host->top_base + EMMC_TOP_CMD); 1764 host->saved_tune_para.emmc_top_control = 1765 readl(host->top_base + EMMC_TOP_CONTROL); 1766 host->saved_tune_para.emmc_top_cmd = 1767 readl(host->top_base + EMMC_TOP_CMD); 1768 } else { 1769 host->def_tune_para.pad_tune = readl(host->base + tune_reg); 1770 host->saved_tune_para.pad_tune = readl(host->base + tune_reg); 1771 } 1772 dev_dbg(host->dev, "init hardware done!"); 1773 } 1774 1775 static void msdc_deinit_hw(struct msdc_host *host) 1776 { 1777 u32 val; 1778 1779 if (host->internal_cd) { 1780 /* Disabled card-detect */ 1781 sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN); 1782 sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP); 1783 } 1784 1785 /* Disable and clear all interrupts */ 1786 writel(0, host->base + MSDC_INTEN); 1787 1788 val = readl(host->base + MSDC_INT); 1789 writel(val, host->base + MSDC_INT); 1790 } 1791 1792 /* init gpd and bd list in msdc_drv_probe */ 1793 static void msdc_init_gpd_bd(struct msdc_host *host, struct msdc_dma *dma) 1794 { 1795 struct mt_gpdma_desc *gpd = dma->gpd; 1796 struct mt_bdma_desc *bd = dma->bd; 1797 dma_addr_t dma_addr; 1798 int i; 1799 1800 memset(gpd, 0, sizeof(struct mt_gpdma_desc) * 2); 1801 1802 dma_addr = dma->gpd_addr + sizeof(struct mt_gpdma_desc); 1803 gpd->gpd_info = GPDMA_DESC_BDP; /* hwo, cs, bd pointer */ 1804 /* gpd->next is must set for desc DMA 1805 * That's why must alloc 2 gpd structure. 1806 */ 1807 gpd->next = lower_32_bits(dma_addr); 1808 if (host->dev_comp->support_64g) 1809 gpd->gpd_info |= (upper_32_bits(dma_addr) & 0xf) << 24; 1810 1811 dma_addr = dma->bd_addr; 1812 gpd->ptr = lower_32_bits(dma->bd_addr); /* physical address */ 1813 if (host->dev_comp->support_64g) 1814 gpd->gpd_info |= (upper_32_bits(dma_addr) & 0xf) << 28; 1815 1816 memset(bd, 0, sizeof(struct mt_bdma_desc) * MAX_BD_NUM); 1817 for (i = 0; i < (MAX_BD_NUM - 1); i++) { 1818 dma_addr = dma->bd_addr + sizeof(*bd) * (i + 1); 1819 bd[i].next = lower_32_bits(dma_addr); 1820 if (host->dev_comp->support_64g) 1821 bd[i].bd_info |= (upper_32_bits(dma_addr) & 0xf) << 24; 1822 } 1823 } 1824 1825 static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1826 { 1827 struct msdc_host *host = mmc_priv(mmc); 1828 int ret; 1829 1830 msdc_set_buswidth(host, ios->bus_width); 1831 1832 /* Suspend/Resume will do power off/on */ 1833 switch (ios->power_mode) { 1834 case MMC_POWER_UP: 1835 if (!IS_ERR(mmc->supply.vmmc)) { 1836 msdc_init_hw(host); 1837 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 1838 ios->vdd); 1839 if (ret) { 1840 dev_err(host->dev, "Failed to set vmmc power!\n"); 1841 return; 1842 } 1843 } 1844 break; 1845 case MMC_POWER_ON: 1846 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { 1847 ret = regulator_enable(mmc->supply.vqmmc); 1848 if (ret) 1849 dev_err(host->dev, "Failed to set vqmmc power!\n"); 1850 else 1851 host->vqmmc_enabled = true; 1852 } 1853 break; 1854 case MMC_POWER_OFF: 1855 if (!IS_ERR(mmc->supply.vmmc)) 1856 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 1857 1858 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { 1859 regulator_disable(mmc->supply.vqmmc); 1860 host->vqmmc_enabled = false; 1861 } 1862 break; 1863 default: 1864 break; 1865 } 1866 1867 if (host->mclk != ios->clock || host->timing != ios->timing) 1868 msdc_set_mclk(host, ios->timing, ios->clock); 1869 } 1870 1871 static u32 test_delay_bit(u32 delay, u32 bit) 1872 { 1873 bit %= PAD_DELAY_MAX; 1874 return delay & (1 << bit); 1875 } 1876 1877 static int get_delay_len(u32 delay, u32 start_bit) 1878 { 1879 int i; 1880 1881 for (i = 0; i < (PAD_DELAY_MAX - start_bit); i++) { 1882 if (test_delay_bit(delay, start_bit + i) == 0) 1883 return i; 1884 } 1885 return PAD_DELAY_MAX - start_bit; 1886 } 1887 1888 static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay) 1889 { 1890 int start = 0, len = 0; 1891 int start_final = 0, len_final = 0; 1892 u8 final_phase = 0xff; 1893 struct msdc_delay_phase delay_phase = { 0, }; 1894 1895 if (delay == 0) { 1896 dev_err(host->dev, "phase error: [map:%x]\n", delay); 1897 delay_phase.final_phase = final_phase; 1898 return delay_phase; 1899 } 1900 1901 while (start < PAD_DELAY_MAX) { 1902 len = get_delay_len(delay, start); 1903 if (len_final < len) { 1904 start_final = start; 1905 len_final = len; 1906 } 1907 start += len ? len : 1; 1908 if (len >= 12 && start_final < 4) 1909 break; 1910 } 1911 1912 /* The rule is that to find the smallest delay cell */ 1913 if (start_final == 0) 1914 final_phase = (start_final + len_final / 3) % PAD_DELAY_MAX; 1915 else 1916 final_phase = (start_final + len_final / 2) % PAD_DELAY_MAX; 1917 dev_info(host->dev, "phase: [map:%x] [maxlen:%d] [final:%d]\n", 1918 delay, len_final, final_phase); 1919 1920 delay_phase.maxlen = len_final; 1921 delay_phase.start = start_final; 1922 delay_phase.final_phase = final_phase; 1923 return delay_phase; 1924 } 1925 1926 static inline void msdc_set_cmd_delay(struct msdc_host *host, u32 value) 1927 { 1928 u32 tune_reg = host->dev_comp->pad_tune_reg; 1929 1930 if (host->top_base) 1931 sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY, 1932 value); 1933 else 1934 sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, 1935 value); 1936 } 1937 1938 static inline void msdc_set_data_delay(struct msdc_host *host, u32 value) 1939 { 1940 u32 tune_reg = host->dev_comp->pad_tune_reg; 1941 1942 if (host->top_base) 1943 sdr_set_field(host->top_base + EMMC_TOP_CONTROL, 1944 PAD_DAT_RD_RXDLY, value); 1945 else 1946 sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, 1947 value); 1948 } 1949 1950 static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) 1951 { 1952 struct msdc_host *host = mmc_priv(mmc); 1953 u32 rise_delay = 0, fall_delay = 0; 1954 struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; 1955 struct msdc_delay_phase internal_delay_phase; 1956 u8 final_delay, final_maxlen; 1957 u32 internal_delay = 0; 1958 u32 tune_reg = host->dev_comp->pad_tune_reg; 1959 int cmd_err; 1960 int i, j; 1961 1962 if (mmc->ios.timing == MMC_TIMING_MMC_HS200 || 1963 mmc->ios.timing == MMC_TIMING_UHS_SDR104) 1964 sdr_set_field(host->base + tune_reg, 1965 MSDC_PAD_TUNE_CMDRRDLY, 1966 host->hs200_cmd_int_delay); 1967 1968 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 1969 for (i = 0 ; i < PAD_DELAY_MAX; i++) { 1970 msdc_set_cmd_delay(host, i); 1971 /* 1972 * Using the same parameters, it may sometimes pass the test, 1973 * but sometimes it may fail. To make sure the parameters are 1974 * more stable, we test each set of parameters 3 times. 1975 */ 1976 for (j = 0; j < 3; j++) { 1977 mmc_send_tuning(mmc, opcode, &cmd_err); 1978 if (!cmd_err) { 1979 rise_delay |= (1 << i); 1980 } else { 1981 rise_delay &= ~(1 << i); 1982 break; 1983 } 1984 } 1985 } 1986 final_rise_delay = get_best_delay(host, rise_delay); 1987 /* if rising edge has enough margin, then do not scan falling edge */ 1988 if (final_rise_delay.maxlen >= 12 || 1989 (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4)) 1990 goto skip_fall; 1991 1992 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 1993 for (i = 0; i < PAD_DELAY_MAX; i++) { 1994 msdc_set_cmd_delay(host, i); 1995 /* 1996 * Using the same parameters, it may sometimes pass the test, 1997 * but sometimes it may fail. To make sure the parameters are 1998 * more stable, we test each set of parameters 3 times. 1999 */ 2000 for (j = 0; j < 3; j++) { 2001 mmc_send_tuning(mmc, opcode, &cmd_err); 2002 if (!cmd_err) { 2003 fall_delay |= (1 << i); 2004 } else { 2005 fall_delay &= ~(1 << i); 2006 break; 2007 } 2008 } 2009 } 2010 final_fall_delay = get_best_delay(host, fall_delay); 2011 2012 skip_fall: 2013 final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen); 2014 if (final_fall_delay.maxlen >= 12 && final_fall_delay.start < 4) 2015 final_maxlen = final_fall_delay.maxlen; 2016 if (final_maxlen == final_rise_delay.maxlen) { 2017 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2018 final_delay = final_rise_delay.final_phase; 2019 } else { 2020 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2021 final_delay = final_fall_delay.final_phase; 2022 } 2023 msdc_set_cmd_delay(host, final_delay); 2024 2025 if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay) 2026 goto skip_internal; 2027 2028 for (i = 0; i < PAD_DELAY_MAX; i++) { 2029 sdr_set_field(host->base + tune_reg, 2030 MSDC_PAD_TUNE_CMDRRDLY, i); 2031 mmc_send_tuning(mmc, opcode, &cmd_err); 2032 if (!cmd_err) 2033 internal_delay |= (1 << i); 2034 } 2035 dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay); 2036 internal_delay_phase = get_best_delay(host, internal_delay); 2037 sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY, 2038 internal_delay_phase.final_phase); 2039 skip_internal: 2040 dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay); 2041 return final_delay == 0xff ? -EIO : 0; 2042 } 2043 2044 static int hs400_tune_response(struct mmc_host *mmc, u32 opcode) 2045 { 2046 struct msdc_host *host = mmc_priv(mmc); 2047 u32 cmd_delay = 0; 2048 struct msdc_delay_phase final_cmd_delay = { 0,}; 2049 u8 final_delay; 2050 int cmd_err; 2051 int i, j; 2052 2053 /* select EMMC50 PAD CMD tune */ 2054 sdr_set_bits(host->base + PAD_CMD_TUNE, BIT(0)); 2055 sdr_set_field(host->base + MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMDTA, 2); 2056 2057 if (mmc->ios.timing == MMC_TIMING_MMC_HS200 || 2058 mmc->ios.timing == MMC_TIMING_UHS_SDR104) 2059 sdr_set_field(host->base + MSDC_PAD_TUNE, 2060 MSDC_PAD_TUNE_CMDRRDLY, 2061 host->hs200_cmd_int_delay); 2062 2063 if (host->hs400_cmd_resp_sel_rising) 2064 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2065 else 2066 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2067 for (i = 0 ; i < PAD_DELAY_MAX; i++) { 2068 sdr_set_field(host->base + PAD_CMD_TUNE, 2069 PAD_CMD_TUNE_RX_DLY3, i); 2070 /* 2071 * Using the same parameters, it may sometimes pass the test, 2072 * but sometimes it may fail. To make sure the parameters are 2073 * more stable, we test each set of parameters 3 times. 2074 */ 2075 for (j = 0; j < 3; j++) { 2076 mmc_send_tuning(mmc, opcode, &cmd_err); 2077 if (!cmd_err) { 2078 cmd_delay |= (1 << i); 2079 } else { 2080 cmd_delay &= ~(1 << i); 2081 break; 2082 } 2083 } 2084 } 2085 final_cmd_delay = get_best_delay(host, cmd_delay); 2086 sdr_set_field(host->base + PAD_CMD_TUNE, PAD_CMD_TUNE_RX_DLY3, 2087 final_cmd_delay.final_phase); 2088 final_delay = final_cmd_delay.final_phase; 2089 2090 dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay); 2091 return final_delay == 0xff ? -EIO : 0; 2092 } 2093 2094 static int msdc_tune_data(struct mmc_host *mmc, u32 opcode) 2095 { 2096 struct msdc_host *host = mmc_priv(mmc); 2097 u32 rise_delay = 0, fall_delay = 0; 2098 struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; 2099 u8 final_delay, final_maxlen; 2100 int i, ret; 2101 2102 sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL, 2103 host->latch_ck); 2104 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); 2105 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); 2106 for (i = 0 ; i < PAD_DELAY_MAX; i++) { 2107 msdc_set_data_delay(host, i); 2108 ret = mmc_send_tuning(mmc, opcode, NULL); 2109 if (!ret) 2110 rise_delay |= (1 << i); 2111 } 2112 final_rise_delay = get_best_delay(host, rise_delay); 2113 /* if rising edge has enough margin, then do not scan falling edge */ 2114 if (final_rise_delay.maxlen >= 12 || 2115 (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4)) 2116 goto skip_fall; 2117 2118 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); 2119 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); 2120 for (i = 0; i < PAD_DELAY_MAX; i++) { 2121 msdc_set_data_delay(host, i); 2122 ret = mmc_send_tuning(mmc, opcode, NULL); 2123 if (!ret) 2124 fall_delay |= (1 << i); 2125 } 2126 final_fall_delay = get_best_delay(host, fall_delay); 2127 2128 skip_fall: 2129 final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen); 2130 if (final_maxlen == final_rise_delay.maxlen) { 2131 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); 2132 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); 2133 final_delay = final_rise_delay.final_phase; 2134 } else { 2135 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); 2136 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); 2137 final_delay = final_fall_delay.final_phase; 2138 } 2139 msdc_set_data_delay(host, final_delay); 2140 2141 dev_dbg(host->dev, "Final data pad delay: %x\n", final_delay); 2142 return final_delay == 0xff ? -EIO : 0; 2143 } 2144 2145 /* 2146 * MSDC IP which supports data tune + async fifo can do CMD/DAT tune 2147 * together, which can save the tuning time. 2148 */ 2149 static int msdc_tune_together(struct mmc_host *mmc, u32 opcode) 2150 { 2151 struct msdc_host *host = mmc_priv(mmc); 2152 u32 rise_delay = 0, fall_delay = 0; 2153 struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; 2154 u8 final_delay, final_maxlen; 2155 int i, ret; 2156 2157 sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL, 2158 host->latch_ck); 2159 2160 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2161 sdr_clr_bits(host->base + MSDC_IOCON, 2162 MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL); 2163 for (i = 0 ; i < PAD_DELAY_MAX; i++) { 2164 msdc_set_cmd_delay(host, i); 2165 msdc_set_data_delay(host, i); 2166 ret = mmc_send_tuning(mmc, opcode, NULL); 2167 if (!ret) 2168 rise_delay |= (1 << i); 2169 } 2170 final_rise_delay = get_best_delay(host, rise_delay); 2171 /* if rising edge has enough margin, then do not scan falling edge */ 2172 if (final_rise_delay.maxlen >= 12 || 2173 (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4)) 2174 goto skip_fall; 2175 2176 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2177 sdr_set_bits(host->base + MSDC_IOCON, 2178 MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL); 2179 for (i = 0; i < PAD_DELAY_MAX; i++) { 2180 msdc_set_cmd_delay(host, i); 2181 msdc_set_data_delay(host, i); 2182 ret = mmc_send_tuning(mmc, opcode, NULL); 2183 if (!ret) 2184 fall_delay |= (1 << i); 2185 } 2186 final_fall_delay = get_best_delay(host, fall_delay); 2187 2188 skip_fall: 2189 final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen); 2190 if (final_maxlen == final_rise_delay.maxlen) { 2191 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2192 sdr_clr_bits(host->base + MSDC_IOCON, 2193 MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL); 2194 final_delay = final_rise_delay.final_phase; 2195 } else { 2196 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2197 sdr_set_bits(host->base + MSDC_IOCON, 2198 MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL); 2199 final_delay = final_fall_delay.final_phase; 2200 } 2201 2202 msdc_set_cmd_delay(host, final_delay); 2203 msdc_set_data_delay(host, final_delay); 2204 2205 dev_dbg(host->dev, "Final pad delay: %x\n", final_delay); 2206 return final_delay == 0xff ? -EIO : 0; 2207 } 2208 2209 static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode) 2210 { 2211 struct msdc_host *host = mmc_priv(mmc); 2212 int ret; 2213 u32 tune_reg = host->dev_comp->pad_tune_reg; 2214 2215 if (host->dev_comp->data_tune && host->dev_comp->async_fifo) { 2216 ret = msdc_tune_together(mmc, opcode); 2217 if (host->hs400_mode) { 2218 sdr_clr_bits(host->base + MSDC_IOCON, 2219 MSDC_IOCON_DSPL | MSDC_IOCON_W_DSPL); 2220 msdc_set_data_delay(host, 0); 2221 } 2222 goto tune_done; 2223 } 2224 if (host->hs400_mode && 2225 host->dev_comp->hs400_tune) 2226 ret = hs400_tune_response(mmc, opcode); 2227 else 2228 ret = msdc_tune_response(mmc, opcode); 2229 if (ret == -EIO) { 2230 dev_err(host->dev, "Tune response fail!\n"); 2231 return ret; 2232 } 2233 if (host->hs400_mode == false) { 2234 ret = msdc_tune_data(mmc, opcode); 2235 if (ret == -EIO) 2236 dev_err(host->dev, "Tune data fail!\n"); 2237 } 2238 2239 tune_done: 2240 host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON); 2241 host->saved_tune_para.pad_tune = readl(host->base + tune_reg); 2242 host->saved_tune_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE); 2243 if (host->top_base) { 2244 host->saved_tune_para.emmc_top_control = readl(host->top_base + 2245 EMMC_TOP_CONTROL); 2246 host->saved_tune_para.emmc_top_cmd = readl(host->top_base + 2247 EMMC_TOP_CMD); 2248 } 2249 return ret; 2250 } 2251 2252 static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 2253 { 2254 struct msdc_host *host = mmc_priv(mmc); 2255 host->hs400_mode = true; 2256 2257 if (host->top_base) 2258 writel(host->hs400_ds_delay, 2259 host->top_base + EMMC50_PAD_DS_TUNE); 2260 else 2261 writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE); 2262 /* hs400 mode must set it to 0 */ 2263 sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS); 2264 /* to improve read performance, set outstanding to 2 */ 2265 sdr_set_field(host->base + EMMC50_CFG3, EMMC50_CFG3_OUTS_WR, 2); 2266 2267 return 0; 2268 } 2269 2270 static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card) 2271 { 2272 struct msdc_host *host = mmc_priv(mmc); 2273 struct msdc_delay_phase dly1_delay; 2274 u32 val, result_dly1 = 0; 2275 u8 *ext_csd; 2276 int i, ret; 2277 2278 if (host->top_base) { 2279 sdr_set_bits(host->top_base + EMMC50_PAD_DS_TUNE, 2280 PAD_DS_DLY_SEL); 2281 if (host->hs400_ds_dly3) 2282 sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE, 2283 PAD_DS_DLY3, host->hs400_ds_dly3); 2284 } else { 2285 sdr_set_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY_SEL); 2286 if (host->hs400_ds_dly3) 2287 sdr_set_field(host->base + PAD_DS_TUNE, 2288 PAD_DS_TUNE_DLY3, host->hs400_ds_dly3); 2289 } 2290 2291 host->hs400_tuning = true; 2292 for (i = 0; i < PAD_DELAY_MAX; i++) { 2293 if (host->top_base) 2294 sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE, 2295 PAD_DS_DLY1, i); 2296 else 2297 sdr_set_field(host->base + PAD_DS_TUNE, 2298 PAD_DS_TUNE_DLY1, i); 2299 ret = mmc_get_ext_csd(card, &ext_csd); 2300 if (!ret) { 2301 result_dly1 |= (1 << i); 2302 kfree(ext_csd); 2303 } 2304 } 2305 host->hs400_tuning = false; 2306 2307 dly1_delay = get_best_delay(host, result_dly1); 2308 if (dly1_delay.maxlen == 0) { 2309 dev_err(host->dev, "Failed to get DLY1 delay!\n"); 2310 goto fail; 2311 } 2312 if (host->top_base) 2313 sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE, 2314 PAD_DS_DLY1, dly1_delay.final_phase); 2315 else 2316 sdr_set_field(host->base + PAD_DS_TUNE, 2317 PAD_DS_TUNE_DLY1, dly1_delay.final_phase); 2318 2319 if (host->top_base) 2320 val = readl(host->top_base + EMMC50_PAD_DS_TUNE); 2321 else 2322 val = readl(host->base + PAD_DS_TUNE); 2323 2324 dev_info(host->dev, "Fianl PAD_DS_TUNE: 0x%x\n", val); 2325 2326 return 0; 2327 2328 fail: 2329 dev_err(host->dev, "Failed to tuning DS pin delay!\n"); 2330 return -EIO; 2331 } 2332 2333 static void msdc_hw_reset(struct mmc_host *mmc) 2334 { 2335 struct msdc_host *host = mmc_priv(mmc); 2336 2337 sdr_set_bits(host->base + EMMC_IOCON, 1); 2338 udelay(10); /* 10us is enough */ 2339 sdr_clr_bits(host->base + EMMC_IOCON, 1); 2340 } 2341 2342 static void msdc_ack_sdio_irq(struct mmc_host *mmc) 2343 { 2344 unsigned long flags; 2345 struct msdc_host *host = mmc_priv(mmc); 2346 2347 spin_lock_irqsave(&host->lock, flags); 2348 __msdc_enable_sdio_irq(host, 1); 2349 spin_unlock_irqrestore(&host->lock, flags); 2350 } 2351 2352 static int msdc_get_cd(struct mmc_host *mmc) 2353 { 2354 struct msdc_host *host = mmc_priv(mmc); 2355 int val; 2356 2357 if (mmc->caps & MMC_CAP_NONREMOVABLE) 2358 return 1; 2359 2360 if (!host->internal_cd) 2361 return mmc_gpio_get_cd(mmc); 2362 2363 val = readl(host->base + MSDC_PS) & MSDC_PS_CDSTS; 2364 if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH) 2365 return !!val; 2366 else 2367 return !val; 2368 } 2369 2370 static void msdc_hs400_enhanced_strobe(struct mmc_host *mmc, 2371 struct mmc_ios *ios) 2372 { 2373 struct msdc_host *host = mmc_priv(mmc); 2374 2375 if (ios->enhanced_strobe) { 2376 msdc_prepare_hs400_tuning(mmc, ios); 2377 sdr_set_field(host->base + EMMC50_CFG0, EMMC50_CFG_PADCMD_LATCHCK, 1); 2378 sdr_set_field(host->base + EMMC50_CFG0, EMMC50_CFG_CMD_RESP_SEL, 1); 2379 sdr_set_field(host->base + EMMC50_CFG1, EMMC50_CFG1_DS_CFG, 1); 2380 2381 sdr_clr_bits(host->base + CQHCI_SETTING, CQHCI_RD_CMD_WND_SEL); 2382 sdr_clr_bits(host->base + CQHCI_SETTING, CQHCI_WR_CMD_WND_SEL); 2383 sdr_clr_bits(host->base + EMMC51_CFG0, CMDQ_RDAT_CNT); 2384 } else { 2385 sdr_set_field(host->base + EMMC50_CFG0, EMMC50_CFG_PADCMD_LATCHCK, 0); 2386 sdr_set_field(host->base + EMMC50_CFG0, EMMC50_CFG_CMD_RESP_SEL, 0); 2387 sdr_set_field(host->base + EMMC50_CFG1, EMMC50_CFG1_DS_CFG, 0); 2388 2389 sdr_set_bits(host->base + CQHCI_SETTING, CQHCI_RD_CMD_WND_SEL); 2390 sdr_set_bits(host->base + CQHCI_SETTING, CQHCI_WR_CMD_WND_SEL); 2391 sdr_set_field(host->base + EMMC51_CFG0, CMDQ_RDAT_CNT, 0xb4); 2392 } 2393 } 2394 2395 static void msdc_cqe_enable(struct mmc_host *mmc) 2396 { 2397 struct msdc_host *host = mmc_priv(mmc); 2398 2399 /* enable cmdq irq */ 2400 writel(MSDC_INT_CMDQ, host->base + MSDC_INTEN); 2401 /* enable busy check */ 2402 sdr_set_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL); 2403 /* default write data / busy timeout 20s */ 2404 msdc_set_busy_timeout(host, 20 * 1000000000ULL, 0); 2405 /* default read data timeout 1s */ 2406 msdc_set_timeout(host, 1000000000ULL, 0); 2407 } 2408 2409 static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery) 2410 { 2411 struct msdc_host *host = mmc_priv(mmc); 2412 unsigned int val = 0; 2413 2414 /* disable cmdq irq */ 2415 sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INT_CMDQ); 2416 /* disable busy check */ 2417 sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL); 2418 2419 if (recovery) { 2420 sdr_set_field(host->base + MSDC_DMA_CTRL, 2421 MSDC_DMA_CTRL_STOP, 1); 2422 if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CFG, val, 2423 !(val & MSDC_DMA_CFG_STS), 1, 3000))) 2424 return; 2425 msdc_reset_hw(host); 2426 } 2427 } 2428 2429 static void msdc_cqe_pre_enable(struct mmc_host *mmc) 2430 { 2431 struct cqhci_host *cq_host = mmc->cqe_private; 2432 u32 reg; 2433 2434 reg = cqhci_readl(cq_host, CQHCI_CFG); 2435 reg |= CQHCI_ENABLE; 2436 cqhci_writel(cq_host, reg, CQHCI_CFG); 2437 } 2438 2439 static void msdc_cqe_post_disable(struct mmc_host *mmc) 2440 { 2441 struct cqhci_host *cq_host = mmc->cqe_private; 2442 u32 reg; 2443 2444 reg = cqhci_readl(cq_host, CQHCI_CFG); 2445 reg &= ~CQHCI_ENABLE; 2446 cqhci_writel(cq_host, reg, CQHCI_CFG); 2447 } 2448 2449 static const struct mmc_host_ops mt_msdc_ops = { 2450 .post_req = msdc_post_req, 2451 .pre_req = msdc_pre_req, 2452 .request = msdc_ops_request, 2453 .set_ios = msdc_ops_set_ios, 2454 .get_ro = mmc_gpio_get_ro, 2455 .get_cd = msdc_get_cd, 2456 .hs400_enhanced_strobe = msdc_hs400_enhanced_strobe, 2457 .enable_sdio_irq = msdc_enable_sdio_irq, 2458 .ack_sdio_irq = msdc_ack_sdio_irq, 2459 .start_signal_voltage_switch = msdc_ops_switch_volt, 2460 .card_busy = msdc_card_busy, 2461 .execute_tuning = msdc_execute_tuning, 2462 .prepare_hs400_tuning = msdc_prepare_hs400_tuning, 2463 .execute_hs400_tuning = msdc_execute_hs400_tuning, 2464 .hw_reset = msdc_hw_reset, 2465 }; 2466 2467 static const struct cqhci_host_ops msdc_cmdq_ops = { 2468 .enable = msdc_cqe_enable, 2469 .disable = msdc_cqe_disable, 2470 .pre_enable = msdc_cqe_pre_enable, 2471 .post_disable = msdc_cqe_post_disable, 2472 }; 2473 2474 static void msdc_of_property_parse(struct platform_device *pdev, 2475 struct msdc_host *host) 2476 { 2477 of_property_read_u32(pdev->dev.of_node, "mediatek,latch-ck", 2478 &host->latch_ck); 2479 2480 of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay", 2481 &host->hs400_ds_delay); 2482 2483 of_property_read_u32(pdev->dev.of_node, "mediatek,hs400-ds-dly3", 2484 &host->hs400_ds_dly3); 2485 2486 of_property_read_u32(pdev->dev.of_node, "mediatek,hs200-cmd-int-delay", 2487 &host->hs200_cmd_int_delay); 2488 2489 of_property_read_u32(pdev->dev.of_node, "mediatek,hs400-cmd-int-delay", 2490 &host->hs400_cmd_int_delay); 2491 2492 if (of_property_read_bool(pdev->dev.of_node, 2493 "mediatek,hs400-cmd-resp-sel-rising")) 2494 host->hs400_cmd_resp_sel_rising = true; 2495 else 2496 host->hs400_cmd_resp_sel_rising = false; 2497 2498 if (of_property_read_bool(pdev->dev.of_node, 2499 "supports-cqe")) 2500 host->cqhci = true; 2501 else 2502 host->cqhci = false; 2503 } 2504 2505 static int msdc_of_clock_parse(struct platform_device *pdev, 2506 struct msdc_host *host) 2507 { 2508 int ret; 2509 2510 host->src_clk = devm_clk_get(&pdev->dev, "source"); 2511 if (IS_ERR(host->src_clk)) 2512 return PTR_ERR(host->src_clk); 2513 2514 host->h_clk = devm_clk_get(&pdev->dev, "hclk"); 2515 if (IS_ERR(host->h_clk)) 2516 return PTR_ERR(host->h_clk); 2517 2518 host->bus_clk = devm_clk_get_optional(&pdev->dev, "bus_clk"); 2519 if (IS_ERR(host->bus_clk)) 2520 host->bus_clk = NULL; 2521 2522 /*source clock control gate is optional clock*/ 2523 host->src_clk_cg = devm_clk_get_optional(&pdev->dev, "source_cg"); 2524 if (IS_ERR(host->src_clk_cg)) 2525 host->src_clk_cg = NULL; 2526 2527 host->sys_clk_cg = devm_clk_get_optional(&pdev->dev, "sys_cg"); 2528 if (IS_ERR(host->sys_clk_cg)) 2529 host->sys_clk_cg = NULL; 2530 2531 /* If present, always enable for this clock gate */ 2532 clk_prepare_enable(host->sys_clk_cg); 2533 2534 host->bulk_clks[0].id = "pclk_cg"; 2535 host->bulk_clks[1].id = "axi_cg"; 2536 host->bulk_clks[2].id = "ahb_cg"; 2537 ret = devm_clk_bulk_get_optional(&pdev->dev, MSDC_NR_CLOCKS, 2538 host->bulk_clks); 2539 if (ret) { 2540 dev_err(&pdev->dev, "Cannot get pclk/axi/ahb clock gates\n"); 2541 return ret; 2542 } 2543 2544 return 0; 2545 } 2546 2547 static int msdc_drv_probe(struct platform_device *pdev) 2548 { 2549 struct mmc_host *mmc; 2550 struct msdc_host *host; 2551 struct resource *res; 2552 int ret; 2553 2554 if (!pdev->dev.of_node) { 2555 dev_err(&pdev->dev, "No DT found\n"); 2556 return -EINVAL; 2557 } 2558 2559 /* Allocate MMC host for this device */ 2560 mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev); 2561 if (!mmc) 2562 return -ENOMEM; 2563 2564 host = mmc_priv(mmc); 2565 ret = mmc_of_parse(mmc); 2566 if (ret) 2567 goto host_free; 2568 2569 host->base = devm_platform_ioremap_resource(pdev, 0); 2570 if (IS_ERR(host->base)) { 2571 ret = PTR_ERR(host->base); 2572 goto host_free; 2573 } 2574 2575 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2576 if (res) { 2577 host->top_base = devm_ioremap_resource(&pdev->dev, res); 2578 if (IS_ERR(host->top_base)) 2579 host->top_base = NULL; 2580 } 2581 2582 ret = mmc_regulator_get_supply(mmc); 2583 if (ret) 2584 goto host_free; 2585 2586 ret = msdc_of_clock_parse(pdev, host); 2587 if (ret) 2588 goto host_free; 2589 2590 host->reset = devm_reset_control_get_optional_exclusive(&pdev->dev, 2591 "hrst"); 2592 if (IS_ERR(host->reset)) { 2593 ret = PTR_ERR(host->reset); 2594 goto host_free; 2595 } 2596 2597 host->irq = platform_get_irq(pdev, 0); 2598 if (host->irq < 0) { 2599 ret = -EINVAL; 2600 goto host_free; 2601 } 2602 2603 host->pinctrl = devm_pinctrl_get(&pdev->dev); 2604 if (IS_ERR(host->pinctrl)) { 2605 ret = PTR_ERR(host->pinctrl); 2606 dev_err(&pdev->dev, "Cannot find pinctrl!\n"); 2607 goto host_free; 2608 } 2609 2610 host->pins_default = pinctrl_lookup_state(host->pinctrl, "default"); 2611 if (IS_ERR(host->pins_default)) { 2612 ret = PTR_ERR(host->pins_default); 2613 dev_err(&pdev->dev, "Cannot find pinctrl default!\n"); 2614 goto host_free; 2615 } 2616 2617 host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs"); 2618 if (IS_ERR(host->pins_uhs)) { 2619 ret = PTR_ERR(host->pins_uhs); 2620 dev_err(&pdev->dev, "Cannot find pinctrl uhs!\n"); 2621 goto host_free; 2622 } 2623 2624 msdc_of_property_parse(pdev, host); 2625 2626 host->dev = &pdev->dev; 2627 host->dev_comp = of_device_get_match_data(&pdev->dev); 2628 host->src_clk_freq = clk_get_rate(host->src_clk); 2629 /* Set host parameters to mmc */ 2630 mmc->ops = &mt_msdc_ops; 2631 if (host->dev_comp->clk_div_bits == 8) 2632 mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255); 2633 else 2634 mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 4095); 2635 2636 if (!(mmc->caps & MMC_CAP_NONREMOVABLE) && 2637 !mmc_can_gpio_cd(mmc) && 2638 host->dev_comp->use_internal_cd) { 2639 /* 2640 * Is removable but no GPIO declared, so 2641 * use internal functionality. 2642 */ 2643 host->internal_cd = true; 2644 } 2645 2646 if (mmc->caps & MMC_CAP_SDIO_IRQ) 2647 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 2648 2649 mmc->caps |= MMC_CAP_CMD23; 2650 if (host->cqhci) 2651 mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; 2652 /* MMC core transfer sizes tunable parameters */ 2653 mmc->max_segs = MAX_BD_NUM; 2654 if (host->dev_comp->support_64g) 2655 mmc->max_seg_size = BDMA_DESC_BUFLEN_EXT; 2656 else 2657 mmc->max_seg_size = BDMA_DESC_BUFLEN; 2658 mmc->max_blk_size = 2048; 2659 mmc->max_req_size = 512 * 1024; 2660 mmc->max_blk_count = mmc->max_req_size / 512; 2661 if (host->dev_comp->support_64g) 2662 host->dma_mask = DMA_BIT_MASK(36); 2663 else 2664 host->dma_mask = DMA_BIT_MASK(32); 2665 mmc_dev(mmc)->dma_mask = &host->dma_mask; 2666 2667 host->timeout_clks = 3 * 1048576; 2668 host->dma.gpd = dma_alloc_coherent(&pdev->dev, 2669 2 * sizeof(struct mt_gpdma_desc), 2670 &host->dma.gpd_addr, GFP_KERNEL); 2671 host->dma.bd = dma_alloc_coherent(&pdev->dev, 2672 MAX_BD_NUM * sizeof(struct mt_bdma_desc), 2673 &host->dma.bd_addr, GFP_KERNEL); 2674 if (!host->dma.gpd || !host->dma.bd) { 2675 ret = -ENOMEM; 2676 goto release_mem; 2677 } 2678 msdc_init_gpd_bd(host, &host->dma); 2679 INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout); 2680 spin_lock_init(&host->lock); 2681 2682 platform_set_drvdata(pdev, mmc); 2683 ret = msdc_ungate_clock(host); 2684 if (ret) { 2685 dev_err(&pdev->dev, "Cannot ungate clocks!\n"); 2686 goto release_mem; 2687 } 2688 msdc_init_hw(host); 2689 2690 if (mmc->caps2 & MMC_CAP2_CQE) { 2691 host->cq_host = devm_kzalloc(mmc->parent, 2692 sizeof(*host->cq_host), 2693 GFP_KERNEL); 2694 if (!host->cq_host) { 2695 ret = -ENOMEM; 2696 goto host_free; 2697 } 2698 host->cq_host->caps |= CQHCI_TASK_DESC_SZ_128; 2699 host->cq_host->mmio = host->base + 0x800; 2700 host->cq_host->ops = &msdc_cmdq_ops; 2701 ret = cqhci_init(host->cq_host, mmc, true); 2702 if (ret) 2703 goto host_free; 2704 mmc->max_segs = 128; 2705 /* cqhci 16bit length */ 2706 /* 0 size, means 65536 so we don't have to -1 here */ 2707 mmc->max_seg_size = 64 * 1024; 2708 } 2709 2710 ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq, 2711 IRQF_TRIGGER_NONE, pdev->name, host); 2712 if (ret) 2713 goto release; 2714 2715 pm_runtime_set_active(host->dev); 2716 pm_runtime_set_autosuspend_delay(host->dev, MTK_MMC_AUTOSUSPEND_DELAY); 2717 pm_runtime_use_autosuspend(host->dev); 2718 pm_runtime_enable(host->dev); 2719 ret = mmc_add_host(mmc); 2720 2721 if (ret) 2722 goto end; 2723 2724 return 0; 2725 end: 2726 pm_runtime_disable(host->dev); 2727 release: 2728 platform_set_drvdata(pdev, NULL); 2729 msdc_deinit_hw(host); 2730 msdc_gate_clock(host); 2731 release_mem: 2732 if (host->dma.gpd) 2733 dma_free_coherent(&pdev->dev, 2734 2 * sizeof(struct mt_gpdma_desc), 2735 host->dma.gpd, host->dma.gpd_addr); 2736 if (host->dma.bd) 2737 dma_free_coherent(&pdev->dev, 2738 MAX_BD_NUM * sizeof(struct mt_bdma_desc), 2739 host->dma.bd, host->dma.bd_addr); 2740 host_free: 2741 mmc_free_host(mmc); 2742 2743 return ret; 2744 } 2745 2746 static int msdc_drv_remove(struct platform_device *pdev) 2747 { 2748 struct mmc_host *mmc; 2749 struct msdc_host *host; 2750 2751 mmc = platform_get_drvdata(pdev); 2752 host = mmc_priv(mmc); 2753 2754 pm_runtime_get_sync(host->dev); 2755 2756 platform_set_drvdata(pdev, NULL); 2757 mmc_remove_host(mmc); 2758 msdc_deinit_hw(host); 2759 msdc_gate_clock(host); 2760 2761 pm_runtime_disable(host->dev); 2762 pm_runtime_put_noidle(host->dev); 2763 dma_free_coherent(&pdev->dev, 2764 2 * sizeof(struct mt_gpdma_desc), 2765 host->dma.gpd, host->dma.gpd_addr); 2766 dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct mt_bdma_desc), 2767 host->dma.bd, host->dma.bd_addr); 2768 2769 mmc_free_host(mmc); 2770 2771 return 0; 2772 } 2773 2774 static void msdc_save_reg(struct msdc_host *host) 2775 { 2776 u32 tune_reg = host->dev_comp->pad_tune_reg; 2777 2778 host->save_para.msdc_cfg = readl(host->base + MSDC_CFG); 2779 host->save_para.iocon = readl(host->base + MSDC_IOCON); 2780 host->save_para.sdc_cfg = readl(host->base + SDC_CFG); 2781 host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT); 2782 host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1); 2783 host->save_para.patch_bit2 = readl(host->base + MSDC_PATCH_BIT2); 2784 host->save_para.pad_ds_tune = readl(host->base + PAD_DS_TUNE); 2785 host->save_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE); 2786 host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0); 2787 host->save_para.emmc50_cfg3 = readl(host->base + EMMC50_CFG3); 2788 host->save_para.sdc_fifo_cfg = readl(host->base + SDC_FIFO_CFG); 2789 if (host->top_base) { 2790 host->save_para.emmc_top_control = 2791 readl(host->top_base + EMMC_TOP_CONTROL); 2792 host->save_para.emmc_top_cmd = 2793 readl(host->top_base + EMMC_TOP_CMD); 2794 host->save_para.emmc50_pad_ds_tune = 2795 readl(host->top_base + EMMC50_PAD_DS_TUNE); 2796 } else { 2797 host->save_para.pad_tune = readl(host->base + tune_reg); 2798 } 2799 } 2800 2801 static void msdc_restore_reg(struct msdc_host *host) 2802 { 2803 struct mmc_host *mmc = mmc_from_priv(host); 2804 u32 tune_reg = host->dev_comp->pad_tune_reg; 2805 2806 writel(host->save_para.msdc_cfg, host->base + MSDC_CFG); 2807 writel(host->save_para.iocon, host->base + MSDC_IOCON); 2808 writel(host->save_para.sdc_cfg, host->base + SDC_CFG); 2809 writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT); 2810 writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1); 2811 writel(host->save_para.patch_bit2, host->base + MSDC_PATCH_BIT2); 2812 writel(host->save_para.pad_ds_tune, host->base + PAD_DS_TUNE); 2813 writel(host->save_para.pad_cmd_tune, host->base + PAD_CMD_TUNE); 2814 writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0); 2815 writel(host->save_para.emmc50_cfg3, host->base + EMMC50_CFG3); 2816 writel(host->save_para.sdc_fifo_cfg, host->base + SDC_FIFO_CFG); 2817 if (host->top_base) { 2818 writel(host->save_para.emmc_top_control, 2819 host->top_base + EMMC_TOP_CONTROL); 2820 writel(host->save_para.emmc_top_cmd, 2821 host->top_base + EMMC_TOP_CMD); 2822 writel(host->save_para.emmc50_pad_ds_tune, 2823 host->top_base + EMMC50_PAD_DS_TUNE); 2824 } else { 2825 writel(host->save_para.pad_tune, host->base + tune_reg); 2826 } 2827 2828 if (sdio_irq_claimed(mmc)) 2829 __msdc_enable_sdio_irq(host, 1); 2830 } 2831 2832 static int __maybe_unused msdc_runtime_suspend(struct device *dev) 2833 { 2834 struct mmc_host *mmc = dev_get_drvdata(dev); 2835 struct msdc_host *host = mmc_priv(mmc); 2836 2837 msdc_save_reg(host); 2838 msdc_gate_clock(host); 2839 return 0; 2840 } 2841 2842 static int __maybe_unused msdc_runtime_resume(struct device *dev) 2843 { 2844 struct mmc_host *mmc = dev_get_drvdata(dev); 2845 struct msdc_host *host = mmc_priv(mmc); 2846 int ret; 2847 2848 ret = msdc_ungate_clock(host); 2849 if (ret) 2850 return ret; 2851 2852 msdc_restore_reg(host); 2853 return 0; 2854 } 2855 2856 static int __maybe_unused msdc_suspend(struct device *dev) 2857 { 2858 struct mmc_host *mmc = dev_get_drvdata(dev); 2859 int ret; 2860 2861 if (mmc->caps2 & MMC_CAP2_CQE) { 2862 ret = cqhci_suspend(mmc); 2863 if (ret) 2864 return ret; 2865 } 2866 2867 return pm_runtime_force_suspend(dev); 2868 } 2869 2870 static int __maybe_unused msdc_resume(struct device *dev) 2871 { 2872 return pm_runtime_force_resume(dev); 2873 } 2874 2875 static const struct dev_pm_ops msdc_dev_pm_ops = { 2876 SET_SYSTEM_SLEEP_PM_OPS(msdc_suspend, msdc_resume) 2877 SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL) 2878 }; 2879 2880 static struct platform_driver mt_msdc_driver = { 2881 .probe = msdc_drv_probe, 2882 .remove = msdc_drv_remove, 2883 .driver = { 2884 .name = "mtk-msdc", 2885 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 2886 .of_match_table = msdc_of_ids, 2887 .pm = &msdc_dev_pm_ops, 2888 }, 2889 }; 2890 2891 module_platform_driver(mt_msdc_driver); 2892 MODULE_LICENSE("GPL v2"); 2893 MODULE_DESCRIPTION("MediaTek SD/MMC Card Driver"); 2894