1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2014-2015, 2022 MediaTek Inc. 4 * Author: Chaotian.Jing <chaotian.jing@mediatek.com> 5 */ 6 7 #include <linux/module.h> 8 #include <linux/bitops.h> 9 #include <linux/clk.h> 10 #include <linux/delay.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/iopoll.h> 13 #include <linux/ioport.h> 14 #include <linux/irq.h> 15 #include <linux/of.h> 16 #include <linux/pinctrl/consumer.h> 17 #include <linux/platform_device.h> 18 #include <linux/pm.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/pm_wakeirq.h> 21 #include <linux/regulator/consumer.h> 22 #include <linux/slab.h> 23 #include <linux/spinlock.h> 24 #include <linux/interrupt.h> 25 #include <linux/reset.h> 26 27 #include <linux/mmc/card.h> 28 #include <linux/mmc/core.h> 29 #include <linux/mmc/host.h> 30 #include <linux/mmc/mmc.h> 31 #include <linux/mmc/sd.h> 32 #include <linux/mmc/sdio.h> 33 #include <linux/mmc/slot-gpio.h> 34 35 #include "cqhci.h" 36 #include "mmc_hsq.h" 37 38 #define MAX_BD_NUM 1024 39 #define MSDC_NR_CLOCKS 3 40 41 /*--------------------------------------------------------------------------*/ 42 /* Common Definition */ 43 /*--------------------------------------------------------------------------*/ 44 #define MSDC_BUS_1BITS 0x0 45 #define MSDC_BUS_4BITS 0x1 46 #define MSDC_BUS_8BITS 0x2 47 48 #define MSDC_BURST_64B 0x6 49 50 /*--------------------------------------------------------------------------*/ 51 /* Register Offset */ 52 /*--------------------------------------------------------------------------*/ 53 #define MSDC_CFG 0x0 54 #define MSDC_IOCON 0x04 55 #define MSDC_PS 0x08 56 #define MSDC_INT 0x0c 57 #define MSDC_INTEN 0x10 58 #define MSDC_FIFOCS 0x14 59 #define SDC_CFG 0x30 60 #define SDC_CMD 0x34 61 #define SDC_ARG 0x38 62 #define SDC_STS 0x3c 63 #define SDC_RESP0 0x40 64 #define SDC_RESP1 0x44 65 #define SDC_RESP2 0x48 66 #define SDC_RESP3 0x4c 67 #define SDC_BLK_NUM 0x50 68 #define SDC_ADV_CFG0 0x64 69 #define MSDC_NEW_RX_CFG 0x68 70 #define EMMC_IOCON 0x7c 71 #define SDC_ACMD_RESP 0x80 72 #define DMA_SA_H4BIT 0x8c 73 #define MSDC_DMA_SA 0x90 74 #define MSDC_DMA_CTRL 0x98 75 #define MSDC_DMA_CFG 0x9c 76 #define MSDC_PATCH_BIT 0xb0 77 #define MSDC_PATCH_BIT1 0xb4 78 #define MSDC_PATCH_BIT2 0xb8 79 #define MSDC_PAD_TUNE 0xec 80 #define MSDC_PAD_TUNE0 0xf0 81 #define PAD_DS_TUNE 0x188 82 #define PAD_CMD_TUNE 0x18c 83 #define EMMC51_CFG0 0x204 84 #define EMMC50_CFG0 0x208 85 #define EMMC50_CFG1 0x20c 86 #define EMMC50_CFG3 0x220 87 #define SDC_FIFO_CFG 0x228 88 #define CQHCI_SETTING 0x7fc 89 90 /*--------------------------------------------------------------------------*/ 91 /* Top Pad Register Offset */ 92 /*--------------------------------------------------------------------------*/ 93 #define EMMC_TOP_CONTROL 0x00 94 #define EMMC_TOP_CMD 0x04 95 #define EMMC50_PAD_DS_TUNE 0x0c 96 #define LOOP_TEST_CONTROL 0x30 97 98 /*--------------------------------------------------------------------------*/ 99 /* Register Mask */ 100 /*--------------------------------------------------------------------------*/ 101 102 /* MSDC_CFG mask */ 103 #define MSDC_CFG_MODE BIT(0) /* RW */ 104 #define MSDC_CFG_CKPDN BIT(1) /* RW */ 105 #define MSDC_CFG_RST BIT(2) /* RW */ 106 #define MSDC_CFG_PIO BIT(3) /* RW */ 107 #define MSDC_CFG_CKDRVEN BIT(4) /* RW */ 108 #define MSDC_CFG_BV18SDT BIT(5) /* RW */ 109 #define MSDC_CFG_BV18PSS BIT(6) /* R */ 110 #define MSDC_CFG_CKSTB BIT(7) /* R */ 111 #define MSDC_CFG_CKDIV GENMASK(15, 8) /* RW */ 112 #define MSDC_CFG_CKMOD GENMASK(17, 16) /* RW */ 113 #define MSDC_CFG_HS400_CK_MODE BIT(18) /* RW */ 114 #define MSDC_CFG_HS400_CK_MODE_EXTRA BIT(22) /* RW */ 115 #define MSDC_CFG_CKDIV_EXTRA GENMASK(19, 8) /* RW */ 116 #define MSDC_CFG_CKMOD_EXTRA GENMASK(21, 20) /* RW */ 117 118 /* MSDC_IOCON mask */ 119 #define MSDC_IOCON_SDR104CKS BIT(0) /* RW */ 120 #define MSDC_IOCON_RSPL BIT(1) /* RW */ 121 #define MSDC_IOCON_DSPL BIT(2) /* RW */ 122 #define MSDC_IOCON_DDLSEL BIT(3) /* RW */ 123 #define MSDC_IOCON_DDR50CKD BIT(4) /* RW */ 124 #define MSDC_IOCON_DSPLSEL BIT(5) /* RW */ 125 #define MSDC_IOCON_W_DSPL BIT(8) /* RW */ 126 #define MSDC_IOCON_D0SPL BIT(16) /* RW */ 127 #define MSDC_IOCON_D1SPL BIT(17) /* RW */ 128 #define MSDC_IOCON_D2SPL BIT(18) /* RW */ 129 #define MSDC_IOCON_D3SPL BIT(19) /* RW */ 130 #define MSDC_IOCON_D4SPL BIT(20) /* RW */ 131 #define MSDC_IOCON_D5SPL BIT(21) /* RW */ 132 #define MSDC_IOCON_D6SPL BIT(22) /* RW */ 133 #define MSDC_IOCON_D7SPL BIT(23) /* RW */ 134 #define MSDC_IOCON_RISCSZ GENMASK(25, 24) /* RW */ 135 136 /* MSDC_PS mask */ 137 #define MSDC_PS_CDEN BIT(0) /* RW */ 138 #define MSDC_PS_CDSTS BIT(1) /* R */ 139 #define MSDC_PS_CDDEBOUNCE GENMASK(15, 12) /* RW */ 140 #define MSDC_PS_DAT GENMASK(23, 16) /* R */ 141 #define MSDC_PS_DATA1 BIT(17) /* R */ 142 #define MSDC_PS_CMD BIT(24) /* R */ 143 #define MSDC_PS_WP BIT(31) /* R */ 144 145 /* MSDC_INT mask */ 146 #define MSDC_INT_MMCIRQ BIT(0) /* W1C */ 147 #define MSDC_INT_CDSC BIT(1) /* W1C */ 148 #define MSDC_INT_ACMDRDY BIT(3) /* W1C */ 149 #define MSDC_INT_ACMDTMO BIT(4) /* W1C */ 150 #define MSDC_INT_ACMDCRCERR BIT(5) /* W1C */ 151 #define MSDC_INT_DMAQ_EMPTY BIT(6) /* W1C */ 152 #define MSDC_INT_SDIOIRQ BIT(7) /* W1C */ 153 #define MSDC_INT_CMDRDY BIT(8) /* W1C */ 154 #define MSDC_INT_CMDTMO BIT(9) /* W1C */ 155 #define MSDC_INT_RSPCRCERR BIT(10) /* W1C */ 156 #define MSDC_INT_CSTA BIT(11) /* R */ 157 #define MSDC_INT_XFER_COMPL BIT(12) /* W1C */ 158 #define MSDC_INT_DXFER_DONE BIT(13) /* W1C */ 159 #define MSDC_INT_DATTMO BIT(14) /* W1C */ 160 #define MSDC_INT_DATCRCERR BIT(15) /* W1C */ 161 #define MSDC_INT_ACMD19_DONE BIT(16) /* W1C */ 162 #define MSDC_INT_DMA_BDCSERR BIT(17) /* W1C */ 163 #define MSDC_INT_DMA_GPDCSERR BIT(18) /* W1C */ 164 #define MSDC_INT_DMA_PROTECT BIT(19) /* W1C */ 165 #define MSDC_INT_CMDQ BIT(28) /* W1C */ 166 167 /* MSDC_INTEN mask */ 168 #define MSDC_INTEN_MMCIRQ BIT(0) /* RW */ 169 #define MSDC_INTEN_CDSC BIT(1) /* RW */ 170 #define MSDC_INTEN_ACMDRDY BIT(3) /* RW */ 171 #define MSDC_INTEN_ACMDTMO BIT(4) /* RW */ 172 #define MSDC_INTEN_ACMDCRCERR BIT(5) /* RW */ 173 #define MSDC_INTEN_DMAQ_EMPTY BIT(6) /* RW */ 174 #define MSDC_INTEN_SDIOIRQ BIT(7) /* RW */ 175 #define MSDC_INTEN_CMDRDY BIT(8) /* RW */ 176 #define MSDC_INTEN_CMDTMO BIT(9) /* RW */ 177 #define MSDC_INTEN_RSPCRCERR BIT(10) /* RW */ 178 #define MSDC_INTEN_CSTA BIT(11) /* RW */ 179 #define MSDC_INTEN_XFER_COMPL BIT(12) /* RW */ 180 #define MSDC_INTEN_DXFER_DONE BIT(13) /* RW */ 181 #define MSDC_INTEN_DATTMO BIT(14) /* RW */ 182 #define MSDC_INTEN_DATCRCERR BIT(15) /* RW */ 183 #define MSDC_INTEN_ACMD19_DONE BIT(16) /* RW */ 184 #define MSDC_INTEN_DMA_BDCSERR BIT(17) /* RW */ 185 #define MSDC_INTEN_DMA_GPDCSERR BIT(18) /* RW */ 186 #define MSDC_INTEN_DMA_PROTECT BIT(19) /* RW */ 187 188 /* MSDC_FIFOCS mask */ 189 #define MSDC_FIFOCS_RXCNT GENMASK(7, 0) /* R */ 190 #define MSDC_FIFOCS_TXCNT GENMASK(23, 16) /* R */ 191 #define MSDC_FIFOCS_CLR BIT(31) /* RW */ 192 193 /* SDC_CFG mask */ 194 #define SDC_CFG_SDIOINTWKUP BIT(0) /* RW */ 195 #define SDC_CFG_INSWKUP BIT(1) /* RW */ 196 #define SDC_CFG_WRDTOC GENMASK(14, 2) /* RW */ 197 #define SDC_CFG_BUSWIDTH GENMASK(17, 16) /* RW */ 198 #define SDC_CFG_SDIO BIT(19) /* RW */ 199 #define SDC_CFG_SDIOIDE BIT(20) /* RW */ 200 #define SDC_CFG_INTATGAP BIT(21) /* RW */ 201 #define SDC_CFG_DTOC GENMASK(31, 24) /* RW */ 202 203 /* SDC_STS mask */ 204 #define SDC_STS_SDCBUSY BIT(0) /* RW */ 205 #define SDC_STS_CMDBUSY BIT(1) /* RW */ 206 #define SDC_STS_SWR_COMPL BIT(31) /* RW */ 207 208 /* SDC_ADV_CFG0 mask */ 209 #define SDC_DAT1_IRQ_TRIGGER BIT(19) /* RW */ 210 #define SDC_RX_ENHANCE_EN BIT(20) /* RW */ 211 #define SDC_NEW_TX_EN BIT(31) /* RW */ 212 213 /* MSDC_NEW_RX_CFG mask */ 214 #define MSDC_NEW_RX_PATH_SEL BIT(0) /* RW */ 215 216 /* DMA_SA_H4BIT mask */ 217 #define DMA_ADDR_HIGH_4BIT GENMASK(3, 0) /* RW */ 218 219 /* MSDC_DMA_CTRL mask */ 220 #define MSDC_DMA_CTRL_START BIT(0) /* W */ 221 #define MSDC_DMA_CTRL_STOP BIT(1) /* W */ 222 #define MSDC_DMA_CTRL_RESUME BIT(2) /* W */ 223 #define MSDC_DMA_CTRL_MODE BIT(8) /* RW */ 224 #define MSDC_DMA_CTRL_LASTBUF BIT(10) /* RW */ 225 #define MSDC_DMA_CTRL_BRUSTSZ GENMASK(14, 12) /* RW */ 226 227 /* MSDC_DMA_CFG mask */ 228 #define MSDC_DMA_CFG_STS BIT(0) /* R */ 229 #define MSDC_DMA_CFG_DECSEN BIT(1) /* RW */ 230 #define MSDC_DMA_CFG_AHBHPROT2 BIT(9) /* RW */ 231 #define MSDC_DMA_CFG_ACTIVEEN BIT(13) /* RW */ 232 #define MSDC_DMA_CFG_CS12B16B BIT(16) /* RW */ 233 234 /* MSDC_PATCH_BIT mask */ 235 #define MSDC_PATCH_BIT_ODDSUPP BIT(1) /* RW */ 236 #define MSDC_PATCH_BIT_RD_DAT_SEL BIT(3) /* RW */ 237 #define MSDC_INT_DAT_LATCH_CK_SEL GENMASK(9, 7) 238 #define MSDC_CKGEN_MSDC_DLY_SEL GENMASK(14, 10) 239 #define MSDC_PATCH_BIT_IODSSEL BIT(16) /* RW */ 240 #define MSDC_PATCH_BIT_IOINTSEL BIT(17) /* RW */ 241 #define MSDC_PATCH_BIT_BUSYDLY GENMASK(21, 18) /* RW */ 242 #define MSDC_PATCH_BIT_WDOD GENMASK(25, 22) /* RW */ 243 #define MSDC_PATCH_BIT_IDRTSEL BIT(26) /* RW */ 244 #define MSDC_PATCH_BIT_CMDFSEL BIT(27) /* RW */ 245 #define MSDC_PATCH_BIT_INTDLSEL BIT(28) /* RW */ 246 #define MSDC_PATCH_BIT_SPCPUSH BIT(29) /* RW */ 247 #define MSDC_PATCH_BIT_DECRCTMO BIT(30) /* RW */ 248 249 #define MSDC_PATCH_BIT1_CMDTA GENMASK(5, 3) /* RW */ 250 #define MSDC_PB1_BUSY_CHECK_SEL BIT(7) /* RW */ 251 #define MSDC_PATCH_BIT1_STOP_DLY GENMASK(11, 8) /* RW */ 252 253 #define MSDC_PATCH_BIT2_CFGRESP BIT(15) /* RW */ 254 #define MSDC_PATCH_BIT2_CFGCRCSTS BIT(28) /* RW */ 255 #define MSDC_PB2_SUPPORT_64G BIT(1) /* RW */ 256 #define MSDC_PB2_RESPWAIT GENMASK(3, 2) /* RW */ 257 #define MSDC_PB2_RESPSTSENSEL GENMASK(18, 16) /* RW */ 258 #define MSDC_PB2_POP_EN_CNT GENMASK(23, 20) /* RW */ 259 #define MSDC_PB2_CFGCRCSTSEDGE BIT(25) /* RW */ 260 #define MSDC_PB2_CRCSTSENSEL GENMASK(31, 29) /* RW */ 261 262 #define MSDC_PAD_TUNE_DATWRDLY GENMASK(4, 0) /* RW */ 263 #define MSDC_PAD_TUNE_DATRRDLY GENMASK(12, 8) /* RW */ 264 #define MSDC_PAD_TUNE_DATRRDLY2 GENMASK(12, 8) /* RW */ 265 #define MSDC_PAD_TUNE_CMDRDLY GENMASK(20, 16) /* RW */ 266 #define MSDC_PAD_TUNE_CMDRDLY2 GENMASK(20, 16) /* RW */ 267 #define MSDC_PAD_TUNE_CMDRRDLY GENMASK(26, 22) /* RW */ 268 #define MSDC_PAD_TUNE_CLKTDLY GENMASK(31, 27) /* RW */ 269 #define MSDC_PAD_TUNE_RXDLYSEL BIT(15) /* RW */ 270 #define MSDC_PAD_TUNE_RD_SEL BIT(13) /* RW */ 271 #define MSDC_PAD_TUNE_CMD_SEL BIT(21) /* RW */ 272 #define MSDC_PAD_TUNE_RD2_SEL BIT(13) /* RW */ 273 #define MSDC_PAD_TUNE_CMD2_SEL BIT(21) /* RW */ 274 275 #define PAD_DS_TUNE_DLY_SEL BIT(0) /* RW */ 276 #define PAD_DS_TUNE_DLY2_SEL BIT(1) /* RW */ 277 #define PAD_DS_TUNE_DLY1 GENMASK(6, 2) /* RW */ 278 #define PAD_DS_TUNE_DLY2 GENMASK(11, 7) /* RW */ 279 #define PAD_DS_TUNE_DLY3 GENMASK(16, 12) /* RW */ 280 281 #define PAD_CMD_TUNE_RX_DLY3 GENMASK(5, 1) /* RW */ 282 283 /* EMMC51_CFG0 mask */ 284 #define CMDQ_RDAT_CNT GENMASK(21, 12) /* RW */ 285 286 #define EMMC50_CFG_PADCMD_LATCHCK BIT(0) /* RW */ 287 #define EMMC50_CFG_CRCSTS_EDGE BIT(3) /* RW */ 288 #define EMMC50_CFG_CFCSTS_SEL BIT(4) /* RW */ 289 #define EMMC50_CFG_CMD_RESP_SEL BIT(9) /* RW */ 290 291 /* EMMC50_CFG1 mask */ 292 #define EMMC50_CFG1_DS_CFG BIT(28) /* RW */ 293 294 #define EMMC50_CFG3_OUTS_WR GENMASK(4, 0) /* RW */ 295 296 #define SDC_FIFO_CFG_WRVALIDSEL BIT(24) /* RW */ 297 #define SDC_FIFO_CFG_RDVALIDSEL BIT(25) /* RW */ 298 299 /* CQHCI_SETTING */ 300 #define CQHCI_RD_CMD_WND_SEL BIT(14) /* RW */ 301 #define CQHCI_WR_CMD_WND_SEL BIT(15) /* RW */ 302 303 /* EMMC_TOP_CONTROL mask */ 304 #define PAD_RXDLY_SEL BIT(0) /* RW */ 305 #define DELAY_EN BIT(1) /* RW */ 306 #define PAD_DAT_RD_RXDLY2 GENMASK(6, 2) /* RW */ 307 #define PAD_DAT_RD_RXDLY GENMASK(11, 7) /* RW */ 308 #define PAD_DAT_RD_RXDLY2_SEL BIT(12) /* RW */ 309 #define PAD_DAT_RD_RXDLY_SEL BIT(13) /* RW */ 310 #define DATA_K_VALUE_SEL BIT(14) /* RW */ 311 #define SDC_RX_ENH_EN BIT(15) /* TW */ 312 313 /* EMMC_TOP_CMD mask */ 314 #define PAD_CMD_RXDLY2 GENMASK(4, 0) /* RW */ 315 #define PAD_CMD_RXDLY GENMASK(9, 5) /* RW */ 316 #define PAD_CMD_RD_RXDLY2_SEL BIT(10) /* RW */ 317 #define PAD_CMD_RD_RXDLY_SEL BIT(11) /* RW */ 318 #define PAD_CMD_TX_DLY GENMASK(16, 12) /* RW */ 319 320 /* EMMC50_PAD_DS_TUNE mask */ 321 #define PAD_DS_DLY_SEL BIT(16) /* RW */ 322 #define PAD_DS_DLY2_SEL BIT(15) /* RW */ 323 #define PAD_DS_DLY1 GENMASK(14, 10) /* RW */ 324 #define PAD_DS_DLY3 GENMASK(4, 0) /* RW */ 325 326 /* LOOP_TEST_CONTROL mask */ 327 #define TEST_LOOP_DSCLK_MUX_SEL BIT(0) /* RW */ 328 #define TEST_LOOP_LATCH_MUX_SEL BIT(1) /* RW */ 329 #define LOOP_EN_SEL_CLK BIT(20) /* RW */ 330 #define TEST_HS400_CMD_LOOP_MUX_SEL BIT(31) /* RW */ 331 332 #define REQ_CMD_EIO BIT(0) 333 #define REQ_CMD_TMO BIT(1) 334 #define REQ_DAT_ERR BIT(2) 335 #define REQ_STOP_EIO BIT(3) 336 #define REQ_STOP_TMO BIT(4) 337 #define REQ_CMD_BUSY BIT(5) 338 339 #define MSDC_PREPARE_FLAG BIT(0) 340 #define MSDC_ASYNC_FLAG BIT(1) 341 #define MSDC_MMAP_FLAG BIT(2) 342 343 #define MTK_MMC_AUTOSUSPEND_DELAY 50 344 #define CMD_TIMEOUT (HZ/10 * 5) /* 100ms x5 */ 345 #define DAT_TIMEOUT (HZ * 5) /* 1000ms x5 */ 346 347 #define DEFAULT_DEBOUNCE (8) /* 8 cycles CD debounce */ 348 349 #define TUNING_REG2_FIXED_OFFEST 4 350 #define PAD_DELAY_HALF 32 /* PAD delay cells */ 351 #define PAD_DELAY_FULL 64 352 /*--------------------------------------------------------------------------*/ 353 /* Descriptor Structure */ 354 /*--------------------------------------------------------------------------*/ 355 struct mt_gpdma_desc { 356 u32 gpd_info; 357 #define GPDMA_DESC_HWO BIT(0) 358 #define GPDMA_DESC_BDP BIT(1) 359 #define GPDMA_DESC_CHECKSUM GENMASK(15, 8) 360 #define GPDMA_DESC_INT BIT(16) 361 #define GPDMA_DESC_NEXT_H4 GENMASK(27, 24) 362 #define GPDMA_DESC_PTR_H4 GENMASK(31, 28) 363 u32 next; 364 u32 ptr; 365 u32 gpd_data_len; 366 #define GPDMA_DESC_BUFLEN GENMASK(15, 0) 367 #define GPDMA_DESC_EXTLEN GENMASK(23, 16) 368 u32 arg; 369 u32 blknum; 370 u32 cmd; 371 }; 372 373 struct mt_bdma_desc { 374 u32 bd_info; 375 #define BDMA_DESC_EOL BIT(0) 376 #define BDMA_DESC_CHECKSUM GENMASK(15, 8) 377 #define BDMA_DESC_BLKPAD BIT(17) 378 #define BDMA_DESC_DWPAD BIT(18) 379 #define BDMA_DESC_NEXT_H4 GENMASK(27, 24) 380 #define BDMA_DESC_PTR_H4 GENMASK(31, 28) 381 u32 next; 382 u32 ptr; 383 u32 bd_data_len; 384 #define BDMA_DESC_BUFLEN GENMASK(15, 0) 385 #define BDMA_DESC_BUFLEN_EXT GENMASK(23, 0) 386 }; 387 388 struct msdc_dma { 389 struct scatterlist *sg; /* I/O scatter list */ 390 struct mt_gpdma_desc *gpd; /* pointer to gpd array */ 391 struct mt_bdma_desc *bd; /* pointer to bd array */ 392 dma_addr_t gpd_addr; /* the physical address of gpd array */ 393 dma_addr_t bd_addr; /* the physical address of bd array */ 394 }; 395 396 struct msdc_save_para { 397 u32 msdc_cfg; 398 u32 iocon; 399 u32 sdc_cfg; 400 u32 pad_tune; 401 u32 patch_bit0; 402 u32 patch_bit1; 403 u32 patch_bit2; 404 u32 pad_ds_tune; 405 u32 pad_cmd_tune; 406 u32 emmc50_cfg0; 407 u32 emmc50_cfg3; 408 u32 sdc_fifo_cfg; 409 u32 emmc_top_control; 410 u32 emmc_top_cmd; 411 u32 emmc50_pad_ds_tune; 412 u32 loop_test_control; 413 }; 414 415 struct mtk_mmc_compatible { 416 u8 clk_div_bits; 417 bool recheck_sdio_irq; 418 bool hs400_tune; /* only used for MT8173 */ 419 bool needs_top_base; 420 u32 pad_tune_reg; 421 bool async_fifo; 422 bool data_tune; 423 bool busy_check; 424 bool stop_clk_fix; 425 u8 stop_dly_sel; 426 u8 pop_en_cnt; 427 bool enhance_rx; 428 bool support_64g; 429 bool use_internal_cd; 430 bool support_new_tx; 431 bool support_new_rx; 432 }; 433 434 struct msdc_tune_para { 435 u32 iocon; 436 u32 pad_tune; 437 u32 pad_cmd_tune; 438 u32 emmc_top_control; 439 u32 emmc_top_cmd; 440 }; 441 442 struct msdc_delay_phase { 443 u8 maxlen; 444 u8 start; 445 u8 final_phase; 446 }; 447 448 struct msdc_host { 449 struct device *dev; 450 const struct mtk_mmc_compatible *dev_comp; 451 int cmd_rsp; 452 453 spinlock_t lock; 454 struct mmc_request *mrq; 455 struct mmc_command *cmd; 456 struct mmc_data *data; 457 int error; 458 459 void __iomem *base; /* host base address */ 460 void __iomem *top_base; /* host top register base address */ 461 462 struct msdc_dma dma; /* dma channel */ 463 u64 dma_mask; 464 465 u32 timeout_ns; /* data timeout ns */ 466 u32 timeout_clks; /* data timeout clks */ 467 468 struct pinctrl *pinctrl; 469 struct pinctrl_state *pins_default; 470 struct pinctrl_state *pins_uhs; 471 struct pinctrl_state *pins_eint; 472 struct delayed_work req_timeout; 473 int irq; /* host interrupt */ 474 int eint_irq; /* interrupt from sdio device for waking up system */ 475 struct reset_control *reset; 476 477 struct clk *src_clk; /* msdc source clock */ 478 struct clk *h_clk; /* msdc h_clk */ 479 struct clk *bus_clk; /* bus clock which used to access register */ 480 struct clk *src_clk_cg; /* msdc source clock control gate */ 481 struct clk *sys_clk_cg; /* msdc subsys clock control gate */ 482 struct clk *crypto_clk; /* msdc crypto clock control gate */ 483 struct clk_bulk_data bulk_clks[MSDC_NR_CLOCKS]; 484 u32 mclk; /* mmc subsystem clock frequency */ 485 u32 src_clk_freq; /* source clock frequency */ 486 unsigned char timing; 487 bool vqmmc_enabled; 488 u32 latch_ck; 489 u32 hs400_ds_delay; 490 u32 hs400_ds_dly3; 491 u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */ 492 u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */ 493 u32 tuning_step; 494 bool hs400_cmd_resp_sel_rising; 495 /* cmd response sample selection for HS400 */ 496 bool hs400_mode; /* current eMMC will run at hs400 mode */ 497 bool hs400_tuning; /* hs400 mode online tuning */ 498 bool internal_cd; /* Use internal card-detect logic */ 499 bool cqhci; /* support eMMC hw cmdq */ 500 bool hsq_en; /* Host Software Queue is enabled */ 501 struct msdc_save_para save_para; /* used when gate HCLK */ 502 struct msdc_tune_para def_tune_para; /* default tune setting */ 503 struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */ 504 struct cqhci_host *cq_host; 505 u32 cq_ssc1_time; 506 }; 507 508 static const struct mtk_mmc_compatible mt2701_compat = { 509 .clk_div_bits = 12, 510 .recheck_sdio_irq = true, 511 .hs400_tune = false, 512 .pad_tune_reg = MSDC_PAD_TUNE0, 513 .async_fifo = true, 514 .data_tune = true, 515 .busy_check = false, 516 .stop_clk_fix = false, 517 .enhance_rx = false, 518 .support_64g = false, 519 }; 520 521 static const struct mtk_mmc_compatible mt2712_compat = { 522 .clk_div_bits = 12, 523 .recheck_sdio_irq = false, 524 .hs400_tune = false, 525 .pad_tune_reg = MSDC_PAD_TUNE0, 526 .async_fifo = true, 527 .data_tune = true, 528 .busy_check = true, 529 .stop_clk_fix = true, 530 .stop_dly_sel = 3, 531 .enhance_rx = true, 532 .support_64g = true, 533 }; 534 535 static const struct mtk_mmc_compatible mt6779_compat = { 536 .clk_div_bits = 12, 537 .recheck_sdio_irq = false, 538 .hs400_tune = false, 539 .pad_tune_reg = MSDC_PAD_TUNE0, 540 .async_fifo = true, 541 .data_tune = true, 542 .busy_check = true, 543 .stop_clk_fix = true, 544 .stop_dly_sel = 3, 545 .enhance_rx = true, 546 .support_64g = true, 547 }; 548 549 static const struct mtk_mmc_compatible mt6795_compat = { 550 .clk_div_bits = 8, 551 .recheck_sdio_irq = false, 552 .hs400_tune = true, 553 .pad_tune_reg = MSDC_PAD_TUNE, 554 .async_fifo = false, 555 .data_tune = false, 556 .busy_check = false, 557 .stop_clk_fix = false, 558 .enhance_rx = false, 559 .support_64g = false, 560 }; 561 562 static const struct mtk_mmc_compatible mt7620_compat = { 563 .clk_div_bits = 8, 564 .recheck_sdio_irq = true, 565 .hs400_tune = false, 566 .pad_tune_reg = MSDC_PAD_TUNE, 567 .async_fifo = false, 568 .data_tune = false, 569 .busy_check = false, 570 .stop_clk_fix = false, 571 .enhance_rx = false, 572 .use_internal_cd = true, 573 }; 574 575 static const struct mtk_mmc_compatible mt7622_compat = { 576 .clk_div_bits = 12, 577 .recheck_sdio_irq = true, 578 .hs400_tune = false, 579 .pad_tune_reg = MSDC_PAD_TUNE0, 580 .async_fifo = true, 581 .data_tune = true, 582 .busy_check = true, 583 .stop_clk_fix = true, 584 .stop_dly_sel = 3, 585 .enhance_rx = true, 586 .support_64g = false, 587 }; 588 589 static const struct mtk_mmc_compatible mt7986_compat = { 590 .clk_div_bits = 12, 591 .recheck_sdio_irq = true, 592 .hs400_tune = false, 593 .needs_top_base = true, 594 .pad_tune_reg = MSDC_PAD_TUNE0, 595 .async_fifo = true, 596 .data_tune = true, 597 .busy_check = true, 598 .stop_clk_fix = true, 599 .stop_dly_sel = 3, 600 .enhance_rx = true, 601 .support_64g = true, 602 }; 603 604 static const struct mtk_mmc_compatible mt8135_compat = { 605 .clk_div_bits = 8, 606 .recheck_sdio_irq = true, 607 .hs400_tune = false, 608 .pad_tune_reg = MSDC_PAD_TUNE, 609 .async_fifo = false, 610 .data_tune = false, 611 .busy_check = false, 612 .stop_clk_fix = false, 613 .enhance_rx = false, 614 .support_64g = false, 615 }; 616 617 static const struct mtk_mmc_compatible mt8173_compat = { 618 .clk_div_bits = 8, 619 .recheck_sdio_irq = true, 620 .hs400_tune = true, 621 .pad_tune_reg = MSDC_PAD_TUNE, 622 .async_fifo = false, 623 .data_tune = false, 624 .busy_check = false, 625 .stop_clk_fix = false, 626 .enhance_rx = false, 627 .support_64g = false, 628 }; 629 630 static const struct mtk_mmc_compatible mt8183_compat = { 631 .clk_div_bits = 12, 632 .recheck_sdio_irq = false, 633 .hs400_tune = false, 634 .needs_top_base = true, 635 .pad_tune_reg = MSDC_PAD_TUNE0, 636 .async_fifo = true, 637 .data_tune = true, 638 .busy_check = true, 639 .stop_clk_fix = true, 640 .stop_dly_sel = 3, 641 .enhance_rx = true, 642 .support_64g = true, 643 }; 644 645 static const struct mtk_mmc_compatible mt8516_compat = { 646 .clk_div_bits = 12, 647 .recheck_sdio_irq = true, 648 .hs400_tune = false, 649 .pad_tune_reg = MSDC_PAD_TUNE0, 650 .async_fifo = true, 651 .data_tune = true, 652 .busy_check = true, 653 .stop_clk_fix = true, 654 .stop_dly_sel = 3, 655 }; 656 657 static const struct mtk_mmc_compatible mt8196_compat = { 658 .clk_div_bits = 12, 659 .recheck_sdio_irq = false, 660 .hs400_tune = false, 661 .needs_top_base = true, 662 .pad_tune_reg = MSDC_PAD_TUNE0, 663 .async_fifo = true, 664 .data_tune = true, 665 .busy_check = true, 666 .stop_clk_fix = true, 667 .stop_dly_sel = 1, 668 .pop_en_cnt = 2, 669 .enhance_rx = true, 670 .support_64g = true, 671 .support_new_tx = true, 672 .support_new_rx = true, 673 }; 674 675 static const struct of_device_id msdc_of_ids[] = { 676 { .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat}, 677 { .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat}, 678 { .compatible = "mediatek,mt6779-mmc", .data = &mt6779_compat}, 679 { .compatible = "mediatek,mt6795-mmc", .data = &mt6795_compat}, 680 { .compatible = "mediatek,mt7620-mmc", .data = &mt7620_compat}, 681 { .compatible = "mediatek,mt7622-mmc", .data = &mt7622_compat}, 682 { .compatible = "mediatek,mt7986-mmc", .data = &mt7986_compat}, 683 { .compatible = "mediatek,mt7988-mmc", .data = &mt7986_compat}, 684 { .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat}, 685 { .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat}, 686 { .compatible = "mediatek,mt8183-mmc", .data = &mt8183_compat}, 687 { .compatible = "mediatek,mt8196-mmc", .data = &mt8196_compat}, 688 { .compatible = "mediatek,mt8516-mmc", .data = &mt8516_compat}, 689 690 {} 691 }; 692 MODULE_DEVICE_TABLE(of, msdc_of_ids); 693 694 static void sdr_set_bits(void __iomem *reg, u32 bs) 695 { 696 u32 val = readl(reg); 697 698 val |= bs; 699 writel(val, reg); 700 } 701 702 static void sdr_clr_bits(void __iomem *reg, u32 bs) 703 { 704 u32 val = readl(reg); 705 706 val &= ~bs; 707 writel(val, reg); 708 } 709 710 static void sdr_set_field(void __iomem *reg, u32 field, u32 val) 711 { 712 unsigned int tv = readl(reg); 713 714 tv &= ~field; 715 tv |= ((val) << (ffs((unsigned int)field) - 1)); 716 writel(tv, reg); 717 } 718 719 static void sdr_get_field(void __iomem *reg, u32 field, u32 *val) 720 { 721 unsigned int tv = readl(reg); 722 723 *val = ((tv & field) >> (ffs((unsigned int)field) - 1)); 724 } 725 726 static void msdc_reset_hw(struct msdc_host *host) 727 { 728 u32 val; 729 730 sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST); 731 readl_poll_timeout_atomic(host->base + MSDC_CFG, val, !(val & MSDC_CFG_RST), 0, 0); 732 733 sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR); 734 readl_poll_timeout_atomic(host->base + MSDC_FIFOCS, val, 735 !(val & MSDC_FIFOCS_CLR), 0, 0); 736 737 val = readl(host->base + MSDC_INT); 738 writel(val, host->base + MSDC_INT); 739 } 740 741 static void msdc_cmd_next(struct msdc_host *host, 742 struct mmc_request *mrq, struct mmc_command *cmd); 743 static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb); 744 745 static const u32 cmd_ints_mask = MSDC_INTEN_CMDRDY | MSDC_INTEN_RSPCRCERR | 746 MSDC_INTEN_CMDTMO | MSDC_INTEN_ACMDRDY | 747 MSDC_INTEN_ACMDCRCERR | MSDC_INTEN_ACMDTMO; 748 static const u32 data_ints_mask = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | 749 MSDC_INTEN_DATCRCERR | MSDC_INTEN_DMA_BDCSERR | 750 MSDC_INTEN_DMA_GPDCSERR | MSDC_INTEN_DMA_PROTECT; 751 752 static u8 msdc_dma_calcs(u8 *buf, u32 len) 753 { 754 u32 i, sum = 0; 755 756 for (i = 0; i < len; i++) 757 sum += buf[i]; 758 return 0xff - (u8) sum; 759 } 760 761 static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma, 762 struct mmc_data *data) 763 { 764 unsigned int j, dma_len; 765 dma_addr_t dma_address; 766 u32 dma_ctrl; 767 struct scatterlist *sg; 768 struct mt_gpdma_desc *gpd; 769 struct mt_bdma_desc *bd; 770 771 sg = data->sg; 772 773 gpd = dma->gpd; 774 bd = dma->bd; 775 776 /* modify gpd */ 777 gpd->gpd_info |= GPDMA_DESC_HWO; 778 gpd->gpd_info |= GPDMA_DESC_BDP; 779 /* need to clear first. use these bits to calc checksum */ 780 gpd->gpd_info &= ~GPDMA_DESC_CHECKSUM; 781 gpd->gpd_info |= msdc_dma_calcs((u8 *) gpd, 16) << 8; 782 783 /* modify bd */ 784 for_each_sg(data->sg, sg, data->sg_count, j) { 785 dma_address = sg_dma_address(sg); 786 dma_len = sg_dma_len(sg); 787 788 /* init bd */ 789 bd[j].bd_info &= ~BDMA_DESC_BLKPAD; 790 bd[j].bd_info &= ~BDMA_DESC_DWPAD; 791 bd[j].ptr = lower_32_bits(dma_address); 792 if (host->dev_comp->support_64g) { 793 bd[j].bd_info &= ~BDMA_DESC_PTR_H4; 794 bd[j].bd_info |= (upper_32_bits(dma_address) & 0xf) 795 << 28; 796 } 797 798 if (host->dev_comp->support_64g) { 799 bd[j].bd_data_len &= ~BDMA_DESC_BUFLEN_EXT; 800 bd[j].bd_data_len |= (dma_len & BDMA_DESC_BUFLEN_EXT); 801 } else { 802 bd[j].bd_data_len &= ~BDMA_DESC_BUFLEN; 803 bd[j].bd_data_len |= (dma_len & BDMA_DESC_BUFLEN); 804 } 805 806 if (j == data->sg_count - 1) /* the last bd */ 807 bd[j].bd_info |= BDMA_DESC_EOL; 808 else 809 bd[j].bd_info &= ~BDMA_DESC_EOL; 810 811 /* checksum need to clear first */ 812 bd[j].bd_info &= ~BDMA_DESC_CHECKSUM; 813 bd[j].bd_info |= msdc_dma_calcs((u8 *)(&bd[j]), 16) << 8; 814 } 815 816 sdr_set_field(host->base + MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, 1); 817 dma_ctrl = readl_relaxed(host->base + MSDC_DMA_CTRL); 818 dma_ctrl &= ~(MSDC_DMA_CTRL_BRUSTSZ | MSDC_DMA_CTRL_MODE); 819 dma_ctrl |= (MSDC_BURST_64B << 12 | BIT(8)); 820 writel_relaxed(dma_ctrl, host->base + MSDC_DMA_CTRL); 821 if (host->dev_comp->support_64g) 822 sdr_set_field(host->base + DMA_SA_H4BIT, DMA_ADDR_HIGH_4BIT, 823 upper_32_bits(dma->gpd_addr) & 0xf); 824 writel(lower_32_bits(dma->gpd_addr), host->base + MSDC_DMA_SA); 825 } 826 827 static void msdc_prepare_data(struct msdc_host *host, struct mmc_data *data) 828 { 829 if (!(data->host_cookie & MSDC_PREPARE_FLAG)) { 830 data->host_cookie |= MSDC_PREPARE_FLAG; 831 data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len, 832 mmc_get_dma_dir(data)); 833 } 834 } 835 836 static void msdc_unprepare_data(struct msdc_host *host, struct mmc_data *data) 837 { 838 if (data->host_cookie & MSDC_ASYNC_FLAG) 839 return; 840 841 if (data->host_cookie & MSDC_PREPARE_FLAG) { 842 dma_unmap_sg(host->dev, data->sg, data->sg_len, 843 mmc_get_dma_dir(data)); 844 data->host_cookie &= ~MSDC_PREPARE_FLAG; 845 } 846 } 847 848 static u64 msdc_timeout_cal(struct msdc_host *host, u64 ns, u64 clks) 849 { 850 struct mmc_host *mmc = mmc_from_priv(host); 851 u64 timeout; 852 u32 clk_ns, mode = 0; 853 854 if (mmc->actual_clock == 0) { 855 timeout = 0; 856 } else { 857 clk_ns = 1000000000U / mmc->actual_clock; 858 timeout = ns + clk_ns - 1; 859 do_div(timeout, clk_ns); 860 timeout += clks; 861 /* in 1048576 sclk cycle unit */ 862 timeout = DIV_ROUND_UP(timeout, BIT(20)); 863 if (host->dev_comp->clk_div_bits == 8) 864 sdr_get_field(host->base + MSDC_CFG, 865 MSDC_CFG_CKMOD, &mode); 866 else 867 sdr_get_field(host->base + MSDC_CFG, 868 MSDC_CFG_CKMOD_EXTRA, &mode); 869 /*DDR mode will double the clk cycles for data timeout */ 870 timeout = mode >= 2 ? timeout * 2 : timeout; 871 timeout = timeout > 1 ? timeout - 1 : 0; 872 } 873 return timeout; 874 } 875 876 /* clock control primitives */ 877 static void msdc_set_timeout(struct msdc_host *host, u64 ns, u64 clks) 878 { 879 u64 timeout; 880 881 host->timeout_ns = ns; 882 host->timeout_clks = clks; 883 884 timeout = msdc_timeout_cal(host, ns, clks); 885 sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 886 min_t(u32, timeout, 255)); 887 } 888 889 static void msdc_set_busy_timeout(struct msdc_host *host, u64 ns, u64 clks) 890 { 891 u64 timeout; 892 893 timeout = msdc_timeout_cal(host, ns, clks); 894 sdr_set_field(host->base + SDC_CFG, SDC_CFG_WRDTOC, 895 min_t(u32, timeout, 8191)); 896 } 897 898 static void msdc_gate_clock(struct msdc_host *host) 899 { 900 clk_bulk_disable_unprepare(MSDC_NR_CLOCKS, host->bulk_clks); 901 clk_disable_unprepare(host->crypto_clk); 902 clk_disable_unprepare(host->src_clk_cg); 903 clk_disable_unprepare(host->src_clk); 904 clk_disable_unprepare(host->bus_clk); 905 clk_disable_unprepare(host->h_clk); 906 } 907 908 static int msdc_ungate_clock(struct msdc_host *host) 909 { 910 u32 val; 911 int ret; 912 913 clk_prepare_enable(host->h_clk); 914 clk_prepare_enable(host->bus_clk); 915 clk_prepare_enable(host->src_clk); 916 clk_prepare_enable(host->src_clk_cg); 917 clk_prepare_enable(host->crypto_clk); 918 ret = clk_bulk_prepare_enable(MSDC_NR_CLOCKS, host->bulk_clks); 919 if (ret) { 920 dev_err(host->dev, "Cannot enable pclk/axi/ahb clock gates\n"); 921 return ret; 922 } 923 924 return readl_poll_timeout(host->base + MSDC_CFG, val, 925 (val & MSDC_CFG_CKSTB), 1, 20000); 926 } 927 928 static void msdc_new_tx_setting(struct msdc_host *host) 929 { 930 if (!host->top_base) 931 return; 932 933 sdr_set_bits(host->top_base + LOOP_TEST_CONTROL, 934 TEST_LOOP_DSCLK_MUX_SEL); 935 sdr_set_bits(host->top_base + LOOP_TEST_CONTROL, 936 TEST_LOOP_LATCH_MUX_SEL); 937 sdr_clr_bits(host->top_base + LOOP_TEST_CONTROL, 938 TEST_HS400_CMD_LOOP_MUX_SEL); 939 940 switch (host->timing) { 941 case MMC_TIMING_LEGACY: 942 case MMC_TIMING_MMC_HS: 943 case MMC_TIMING_SD_HS: 944 case MMC_TIMING_UHS_SDR12: 945 case MMC_TIMING_UHS_SDR25: 946 case MMC_TIMING_UHS_DDR50: 947 case MMC_TIMING_MMC_DDR52: 948 sdr_clr_bits(host->top_base + LOOP_TEST_CONTROL, 949 LOOP_EN_SEL_CLK); 950 break; 951 case MMC_TIMING_UHS_SDR50: 952 case MMC_TIMING_UHS_SDR104: 953 case MMC_TIMING_MMC_HS200: 954 case MMC_TIMING_MMC_HS400: 955 sdr_set_bits(host->top_base + LOOP_TEST_CONTROL, 956 LOOP_EN_SEL_CLK); 957 break; 958 default: 959 break; 960 } 961 } 962 963 static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) 964 { 965 struct mmc_host *mmc = mmc_from_priv(host); 966 u32 mode; 967 u32 flags; 968 u32 div; 969 u32 sclk; 970 u32 tune_reg = host->dev_comp->pad_tune_reg; 971 u32 val; 972 bool timing_changed; 973 974 if (!hz) { 975 dev_dbg(host->dev, "set mclk to 0\n"); 976 host->mclk = 0; 977 mmc->actual_clock = 0; 978 sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); 979 return; 980 } 981 982 if (host->timing != timing) 983 timing_changed = true; 984 else 985 timing_changed = false; 986 987 flags = readl(host->base + MSDC_INTEN); 988 sdr_clr_bits(host->base + MSDC_INTEN, flags); 989 if (host->dev_comp->clk_div_bits == 8) 990 sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE); 991 else 992 sdr_clr_bits(host->base + MSDC_CFG, 993 MSDC_CFG_HS400_CK_MODE_EXTRA); 994 if (timing == MMC_TIMING_UHS_DDR50 || 995 timing == MMC_TIMING_MMC_DDR52 || 996 timing == MMC_TIMING_MMC_HS400) { 997 if (timing == MMC_TIMING_MMC_HS400) 998 mode = 0x3; 999 else 1000 mode = 0x2; /* ddr mode and use divisor */ 1001 1002 if (hz >= (host->src_clk_freq >> 2)) { 1003 div = 0; /* mean div = 1/4 */ 1004 sclk = host->src_clk_freq >> 2; /* sclk = clk / 4 */ 1005 } else { 1006 div = (host->src_clk_freq + ((hz << 2) - 1)) / (hz << 2); 1007 sclk = (host->src_clk_freq >> 2) / div; 1008 div = (div >> 1); 1009 } 1010 1011 if (timing == MMC_TIMING_MMC_HS400 && 1012 hz >= (host->src_clk_freq >> 1)) { 1013 if (host->dev_comp->clk_div_bits == 8) 1014 sdr_set_bits(host->base + MSDC_CFG, 1015 MSDC_CFG_HS400_CK_MODE); 1016 else 1017 sdr_set_bits(host->base + MSDC_CFG, 1018 MSDC_CFG_HS400_CK_MODE_EXTRA); 1019 sclk = host->src_clk_freq >> 1; 1020 div = 0; /* div is ignore when bit18 is set */ 1021 } 1022 } else if (hz >= host->src_clk_freq) { 1023 mode = 0x1; /* no divisor */ 1024 div = 0; 1025 sclk = host->src_clk_freq; 1026 } else { 1027 mode = 0x0; /* use divisor */ 1028 if (hz >= (host->src_clk_freq >> 1)) { 1029 div = 0; /* mean div = 1/2 */ 1030 sclk = host->src_clk_freq >> 1; /* sclk = clk / 2 */ 1031 } else { 1032 div = (host->src_clk_freq + ((hz << 2) - 1)) / (hz << 2); 1033 sclk = (host->src_clk_freq >> 2) / div; 1034 } 1035 } 1036 sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); 1037 1038 clk_disable_unprepare(host->src_clk_cg); 1039 if (host->dev_comp->clk_div_bits == 8) 1040 sdr_set_field(host->base + MSDC_CFG, 1041 MSDC_CFG_CKMOD | MSDC_CFG_CKDIV, 1042 (mode << 8) | div); 1043 else 1044 sdr_set_field(host->base + MSDC_CFG, 1045 MSDC_CFG_CKMOD_EXTRA | MSDC_CFG_CKDIV_EXTRA, 1046 (mode << 12) | div); 1047 1048 clk_prepare_enable(host->src_clk_cg); 1049 readl_poll_timeout(host->base + MSDC_CFG, val, (val & MSDC_CFG_CKSTB), 0, 0); 1050 sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); 1051 mmc->actual_clock = sclk; 1052 host->mclk = hz; 1053 host->timing = timing; 1054 /* need because clk changed. */ 1055 msdc_set_timeout(host, host->timeout_ns, host->timeout_clks); 1056 sdr_set_bits(host->base + MSDC_INTEN, flags); 1057 1058 /* 1059 * mmc_select_hs400() will drop to 50Mhz and High speed mode, 1060 * tune result of hs200/200Mhz is not suitable for 50Mhz 1061 */ 1062 if (mmc->actual_clock <= 52000000) { 1063 writel(host->def_tune_para.iocon, host->base + MSDC_IOCON); 1064 if (host->top_base) { 1065 writel(host->def_tune_para.emmc_top_control, 1066 host->top_base + EMMC_TOP_CONTROL); 1067 writel(host->def_tune_para.emmc_top_cmd, 1068 host->top_base + EMMC_TOP_CMD); 1069 } else { 1070 writel(host->def_tune_para.pad_tune, 1071 host->base + tune_reg); 1072 } 1073 } else { 1074 writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON); 1075 writel(host->saved_tune_para.pad_cmd_tune, 1076 host->base + PAD_CMD_TUNE); 1077 if (host->top_base) { 1078 writel(host->saved_tune_para.emmc_top_control, 1079 host->top_base + EMMC_TOP_CONTROL); 1080 writel(host->saved_tune_para.emmc_top_cmd, 1081 host->top_base + EMMC_TOP_CMD); 1082 } else { 1083 writel(host->saved_tune_para.pad_tune, 1084 host->base + tune_reg); 1085 } 1086 } 1087 1088 if (timing == MMC_TIMING_MMC_HS400 && 1089 host->dev_comp->hs400_tune) 1090 sdr_set_field(host->base + tune_reg, 1091 MSDC_PAD_TUNE_CMDRRDLY, 1092 host->hs400_cmd_int_delay); 1093 if (host->dev_comp->support_new_tx && timing_changed) 1094 msdc_new_tx_setting(host); 1095 1096 dev_dbg(host->dev, "sclk: %d, timing: %d\n", mmc->actual_clock, 1097 timing); 1098 } 1099 1100 static inline u32 msdc_cmd_find_resp(struct msdc_host *host, 1101 struct mmc_command *cmd) 1102 { 1103 u32 resp; 1104 1105 switch (mmc_resp_type(cmd)) { 1106 /* Actually, R1, R5, R6, R7 are the same */ 1107 case MMC_RSP_R1: 1108 resp = 0x1; 1109 break; 1110 case MMC_RSP_R1B: 1111 case MMC_RSP_R1B_NO_CRC: 1112 resp = 0x7; 1113 break; 1114 case MMC_RSP_R2: 1115 resp = 0x2; 1116 break; 1117 case MMC_RSP_R3: 1118 resp = 0x3; 1119 break; 1120 case MMC_RSP_NONE: 1121 default: 1122 resp = 0x0; 1123 break; 1124 } 1125 1126 return resp; 1127 } 1128 1129 static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host, 1130 struct mmc_request *mrq, struct mmc_command *cmd) 1131 { 1132 struct mmc_host *mmc = mmc_from_priv(host); 1133 /* rawcmd : 1134 * vol_swt << 30 | auto_cmd << 28 | blklen << 16 | go_irq << 15 | 1135 * stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode 1136 */ 1137 u32 opcode = cmd->opcode; 1138 u32 resp = msdc_cmd_find_resp(host, cmd); 1139 u32 rawcmd = (opcode & 0x3f) | ((resp & 0x7) << 7); 1140 1141 host->cmd_rsp = resp; 1142 1143 if ((opcode == SD_IO_RW_DIRECT && cmd->flags == (unsigned int) -1) || 1144 opcode == MMC_STOP_TRANSMISSION) 1145 rawcmd |= BIT(14); 1146 else if (opcode == SD_SWITCH_VOLTAGE) 1147 rawcmd |= BIT(30); 1148 else if (opcode == SD_APP_SEND_SCR || 1149 opcode == SD_APP_SEND_NUM_WR_BLKS || 1150 (opcode == SD_SWITCH && mmc_cmd_type(cmd) == MMC_CMD_ADTC) || 1151 (opcode == SD_APP_SD_STATUS && mmc_cmd_type(cmd) == MMC_CMD_ADTC) || 1152 (opcode == MMC_SEND_EXT_CSD && mmc_cmd_type(cmd) == MMC_CMD_ADTC)) 1153 rawcmd |= BIT(11); 1154 1155 if (cmd->data) { 1156 struct mmc_data *data = cmd->data; 1157 1158 if (mmc_op_multi(opcode)) { 1159 if (mmc_card_mmc(mmc->card) && mrq->sbc && 1160 !(mrq->sbc->arg & 0xFFFF0000)) 1161 rawcmd |= BIT(29); /* AutoCMD23 */ 1162 } 1163 1164 rawcmd |= ((data->blksz & 0xFFF) << 16); 1165 if (data->flags & MMC_DATA_WRITE) 1166 rawcmd |= BIT(13); 1167 if (data->blocks > 1) 1168 rawcmd |= BIT(12); 1169 else 1170 rawcmd |= BIT(11); 1171 /* Always use dma mode */ 1172 sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_PIO); 1173 1174 if (host->timeout_ns != data->timeout_ns || 1175 host->timeout_clks != data->timeout_clks) 1176 msdc_set_timeout(host, data->timeout_ns, 1177 data->timeout_clks); 1178 1179 writel(data->blocks, host->base + SDC_BLK_NUM); 1180 } 1181 return rawcmd; 1182 } 1183 1184 static void msdc_start_data(struct msdc_host *host, struct mmc_command *cmd, 1185 struct mmc_data *data) 1186 { 1187 bool read; 1188 1189 WARN_ON(host->data); 1190 host->data = data; 1191 read = data->flags & MMC_DATA_READ; 1192 1193 mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT); 1194 msdc_dma_setup(host, &host->dma, data); 1195 sdr_set_bits(host->base + MSDC_INTEN, data_ints_mask); 1196 sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1); 1197 dev_dbg(host->dev, "DMA start\n"); 1198 dev_dbg(host->dev, "%s: cmd=%d DMA data: %d blocks; read=%d\n", 1199 __func__, cmd->opcode, data->blocks, read); 1200 } 1201 1202 static int msdc_auto_cmd_done(struct msdc_host *host, int events, 1203 struct mmc_command *cmd) 1204 { 1205 u32 *rsp = cmd->resp; 1206 1207 rsp[0] = readl(host->base + SDC_ACMD_RESP); 1208 1209 if (events & MSDC_INT_ACMDRDY) { 1210 cmd->error = 0; 1211 } else { 1212 msdc_reset_hw(host); 1213 if (events & MSDC_INT_ACMDCRCERR) { 1214 cmd->error = -EILSEQ; 1215 host->error |= REQ_STOP_EIO; 1216 } else if (events & MSDC_INT_ACMDTMO) { 1217 cmd->error = -ETIMEDOUT; 1218 host->error |= REQ_STOP_TMO; 1219 } 1220 dev_err(host->dev, 1221 "%s: AUTO_CMD%d arg=%08X; rsp %08X; cmd_error=%d\n", 1222 __func__, cmd->opcode, cmd->arg, rsp[0], cmd->error); 1223 } 1224 return cmd->error; 1225 } 1226 1227 /* 1228 * msdc_recheck_sdio_irq - recheck whether the SDIO irq is lost 1229 * 1230 * Host controller may lost interrupt in some special case. 1231 * Add SDIO irq recheck mechanism to make sure all interrupts 1232 * can be processed immediately 1233 */ 1234 static void msdc_recheck_sdio_irq(struct msdc_host *host) 1235 { 1236 struct mmc_host *mmc = mmc_from_priv(host); 1237 u32 reg_int, reg_inten, reg_ps; 1238 1239 if (mmc->caps & MMC_CAP_SDIO_IRQ) { 1240 reg_inten = readl(host->base + MSDC_INTEN); 1241 if (reg_inten & MSDC_INTEN_SDIOIRQ) { 1242 reg_int = readl(host->base + MSDC_INT); 1243 reg_ps = readl(host->base + MSDC_PS); 1244 if (!(reg_int & MSDC_INT_SDIOIRQ || 1245 reg_ps & MSDC_PS_DATA1)) { 1246 __msdc_enable_sdio_irq(host, 0); 1247 sdio_signal_irq(mmc); 1248 } 1249 } 1250 } 1251 } 1252 1253 static void msdc_track_cmd_data(struct msdc_host *host, struct mmc_command *cmd) 1254 { 1255 if (host->error && 1256 ((!mmc_op_tuning(cmd->opcode) && !host->hs400_tuning) || 1257 cmd->error == -ETIMEDOUT)) 1258 dev_warn(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n", 1259 __func__, cmd->opcode, cmd->arg, host->error); 1260 } 1261 1262 static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq) 1263 { 1264 struct mmc_host *mmc = mmc_from_priv(host); 1265 unsigned long flags; 1266 bool hsq_req_done; 1267 1268 /* 1269 * No need check the return value of cancel_delayed_work, as only ONE 1270 * path will go here! 1271 */ 1272 cancel_delayed_work(&host->req_timeout); 1273 1274 /* 1275 * If the request was handled from Host Software Queue, there's almost 1276 * nothing to do here, and we also don't need to reset mrq as any race 1277 * condition would not have any room to happen, since HSQ stores the 1278 * "scheduled" mrqs in an internal array of mrq slots anyway. 1279 * However, if the controller experienced an error, we still want to 1280 * reset it as soon as possible. 1281 * 1282 * Note that non-HSQ requests will still be happening at times, even 1283 * though it is enabled, and that's what is going to reset host->mrq. 1284 * Also, msdc_unprepare_data() is going to be called by HSQ when needed 1285 * as HSQ request finalization will eventually call the .post_req() 1286 * callback of this driver which, in turn, unprepares the data. 1287 */ 1288 hsq_req_done = host->hsq_en ? mmc_hsq_finalize_request(mmc, mrq) : false; 1289 if (hsq_req_done) { 1290 if (host->error) 1291 msdc_reset_hw(host); 1292 return; 1293 } 1294 1295 spin_lock_irqsave(&host->lock, flags); 1296 host->mrq = NULL; 1297 spin_unlock_irqrestore(&host->lock, flags); 1298 1299 msdc_track_cmd_data(host, mrq->cmd); 1300 if (mrq->data) 1301 msdc_unprepare_data(host, mrq->data); 1302 if (host->error) 1303 msdc_reset_hw(host); 1304 mmc_request_done(mmc, mrq); 1305 if (host->dev_comp->recheck_sdio_irq) 1306 msdc_recheck_sdio_irq(host); 1307 } 1308 1309 /* returns true if command is fully handled; returns false otherwise */ 1310 static bool msdc_cmd_done(struct msdc_host *host, int events, 1311 struct mmc_request *mrq, struct mmc_command *cmd) 1312 { 1313 bool done = false; 1314 bool sbc_error; 1315 unsigned long flags; 1316 u32 *rsp; 1317 1318 if (mrq->sbc && cmd == mrq->cmd && 1319 (events & (MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR 1320 | MSDC_INT_ACMDTMO))) 1321 msdc_auto_cmd_done(host, events, mrq->sbc); 1322 1323 sbc_error = mrq->sbc && mrq->sbc->error; 1324 1325 if (!sbc_error && !(events & (MSDC_INT_CMDRDY 1326 | MSDC_INT_RSPCRCERR 1327 | MSDC_INT_CMDTMO))) 1328 return done; 1329 1330 spin_lock_irqsave(&host->lock, flags); 1331 done = !host->cmd; 1332 host->cmd = NULL; 1333 spin_unlock_irqrestore(&host->lock, flags); 1334 1335 if (done) 1336 return true; 1337 rsp = cmd->resp; 1338 1339 sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask); 1340 1341 if (cmd->flags & MMC_RSP_PRESENT) { 1342 if (cmd->flags & MMC_RSP_136) { 1343 rsp[0] = readl(host->base + SDC_RESP3); 1344 rsp[1] = readl(host->base + SDC_RESP2); 1345 rsp[2] = readl(host->base + SDC_RESP1); 1346 rsp[3] = readl(host->base + SDC_RESP0); 1347 } else { 1348 rsp[0] = readl(host->base + SDC_RESP0); 1349 } 1350 } 1351 1352 if (!sbc_error && !(events & MSDC_INT_CMDRDY)) { 1353 if ((events & MSDC_INT_CMDTMO && !host->hs400_tuning) || 1354 (!mmc_op_tuning(cmd->opcode) && !host->hs400_tuning)) 1355 /* 1356 * should not clear fifo/interrupt as the tune data 1357 * may have already come when cmd19/cmd21 gets response 1358 * CRC error. 1359 */ 1360 msdc_reset_hw(host); 1361 if (events & MSDC_INT_RSPCRCERR && 1362 mmc_resp_type(cmd) != MMC_RSP_R1B_NO_CRC) { 1363 cmd->error = -EILSEQ; 1364 host->error |= REQ_CMD_EIO; 1365 } else if (events & MSDC_INT_CMDTMO) { 1366 cmd->error = -ETIMEDOUT; 1367 host->error |= REQ_CMD_TMO; 1368 } 1369 } 1370 if (cmd->error) 1371 dev_dbg(host->dev, 1372 "%s: cmd=%d arg=%08X; rsp %08X; cmd_error=%d\n", 1373 __func__, cmd->opcode, cmd->arg, rsp[0], 1374 cmd->error); 1375 1376 msdc_cmd_next(host, mrq, cmd); 1377 return true; 1378 } 1379 1380 /* It is the core layer's responsibility to ensure card status 1381 * is correct before issue a request. but host design do below 1382 * checks recommended. 1383 */ 1384 static inline bool msdc_cmd_is_ready(struct msdc_host *host, 1385 struct mmc_request *mrq, struct mmc_command *cmd) 1386 { 1387 u32 val; 1388 int ret; 1389 1390 /* The max busy time we can endure is 20ms */ 1391 ret = readl_poll_timeout_atomic(host->base + SDC_STS, val, 1392 !(val & SDC_STS_CMDBUSY), 1, 20000); 1393 if (ret) { 1394 dev_err(host->dev, "CMD bus busy detected\n"); 1395 host->error |= REQ_CMD_BUSY; 1396 msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd); 1397 return false; 1398 } 1399 1400 if (mmc_resp_type(cmd) == MMC_RSP_R1B || cmd->data) { 1401 /* R1B or with data, should check SDCBUSY */ 1402 ret = readl_poll_timeout_atomic(host->base + SDC_STS, val, 1403 !(val & SDC_STS_SDCBUSY), 1, 20000); 1404 if (ret) { 1405 dev_err(host->dev, "Controller busy detected\n"); 1406 host->error |= REQ_CMD_BUSY; 1407 msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd); 1408 return false; 1409 } 1410 } 1411 return true; 1412 } 1413 1414 static void msdc_start_command(struct msdc_host *host, 1415 struct mmc_request *mrq, struct mmc_command *cmd) 1416 { 1417 u32 rawcmd; 1418 unsigned long flags; 1419 1420 WARN_ON(host->cmd); 1421 host->cmd = cmd; 1422 1423 mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT); 1424 if (!msdc_cmd_is_ready(host, mrq, cmd)) 1425 return; 1426 1427 if ((readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_TXCNT) >> 16 || 1428 readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_RXCNT) { 1429 dev_err(host->dev, "TX/RX FIFO non-empty before start of IO. Reset\n"); 1430 msdc_reset_hw(host); 1431 } 1432 1433 cmd->error = 0; 1434 rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd); 1435 1436 spin_lock_irqsave(&host->lock, flags); 1437 sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask); 1438 spin_unlock_irqrestore(&host->lock, flags); 1439 1440 writel(cmd->arg, host->base + SDC_ARG); 1441 writel(rawcmd, host->base + SDC_CMD); 1442 } 1443 1444 static void msdc_cmd_next(struct msdc_host *host, 1445 struct mmc_request *mrq, struct mmc_command *cmd) 1446 { 1447 if ((cmd->error && !host->hs400_tuning && 1448 !(cmd->error == -EILSEQ && 1449 mmc_op_tuning(cmd->opcode))) || 1450 (mrq->sbc && mrq->sbc->error)) 1451 msdc_request_done(host, mrq); 1452 else if (cmd == mrq->sbc) 1453 msdc_start_command(host, mrq, mrq->cmd); 1454 else if (!cmd->data) 1455 msdc_request_done(host, mrq); 1456 else 1457 msdc_start_data(host, cmd, cmd->data); 1458 } 1459 1460 static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq) 1461 { 1462 struct msdc_host *host = mmc_priv(mmc); 1463 1464 host->error = 0; 1465 WARN_ON(!host->hsq_en && host->mrq); 1466 host->mrq = mrq; 1467 1468 if (mrq->data) 1469 msdc_prepare_data(host, mrq->data); 1470 1471 /* if SBC is required, we have HW option and SW option. 1472 * if HW option is enabled, and SBC does not have "special" flags, 1473 * use HW option, otherwise use SW option 1474 */ 1475 if (mrq->sbc && (!mmc_card_mmc(mmc->card) || 1476 (mrq->sbc->arg & 0xFFFF0000))) 1477 msdc_start_command(host, mrq, mrq->sbc); 1478 else 1479 msdc_start_command(host, mrq, mrq->cmd); 1480 } 1481 1482 static void msdc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 1483 { 1484 struct msdc_host *host = mmc_priv(mmc); 1485 struct mmc_data *data = mrq->data; 1486 1487 if (!data) 1488 return; 1489 1490 msdc_prepare_data(host, data); 1491 data->host_cookie |= MSDC_ASYNC_FLAG; 1492 } 1493 1494 static void msdc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 1495 int err) 1496 { 1497 struct msdc_host *host = mmc_priv(mmc); 1498 struct mmc_data *data = mrq->data; 1499 1500 if (!data) 1501 return; 1502 1503 if (data->host_cookie) { 1504 data->host_cookie &= ~MSDC_ASYNC_FLAG; 1505 msdc_unprepare_data(host, data); 1506 } 1507 } 1508 1509 static void msdc_data_xfer_next(struct msdc_host *host, struct mmc_request *mrq) 1510 { 1511 if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && !mrq->stop->error && 1512 !mrq->sbc) 1513 msdc_start_command(host, mrq, mrq->stop); 1514 else 1515 msdc_request_done(host, mrq); 1516 } 1517 1518 static void msdc_data_xfer_done(struct msdc_host *host, u32 events, 1519 struct mmc_request *mrq, struct mmc_data *data) 1520 { 1521 struct mmc_command *stop; 1522 unsigned long flags; 1523 bool done; 1524 unsigned int check_data = events & 1525 (MSDC_INT_XFER_COMPL | MSDC_INT_DATCRCERR | MSDC_INT_DATTMO 1526 | MSDC_INT_DMA_BDCSERR | MSDC_INT_DMA_GPDCSERR 1527 | MSDC_INT_DMA_PROTECT); 1528 u32 val; 1529 int ret; 1530 1531 spin_lock_irqsave(&host->lock, flags); 1532 done = !host->data; 1533 if (check_data) 1534 host->data = NULL; 1535 spin_unlock_irqrestore(&host->lock, flags); 1536 1537 if (done) 1538 return; 1539 stop = data->stop; 1540 1541 if (check_data || (stop && stop->error)) { 1542 dev_dbg(host->dev, "DMA status: 0x%8X\n", 1543 readl(host->base + MSDC_DMA_CFG)); 1544 sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, 1545 1); 1546 1547 ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CTRL, val, 1548 !(val & MSDC_DMA_CTRL_STOP), 1, 20000); 1549 if (ret) 1550 dev_dbg(host->dev, "DMA stop timed out\n"); 1551 1552 ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CFG, val, 1553 !(val & MSDC_DMA_CFG_STS), 1, 20000); 1554 if (ret) 1555 dev_dbg(host->dev, "DMA inactive timed out\n"); 1556 1557 sdr_clr_bits(host->base + MSDC_INTEN, data_ints_mask); 1558 dev_dbg(host->dev, "DMA stop\n"); 1559 1560 if ((events & MSDC_INT_XFER_COMPL) && (!stop || !stop->error)) { 1561 data->bytes_xfered = data->blocks * data->blksz; 1562 } else { 1563 dev_dbg(host->dev, "interrupt events: %x\n", events); 1564 msdc_reset_hw(host); 1565 host->error |= REQ_DAT_ERR; 1566 data->bytes_xfered = 0; 1567 1568 if (events & MSDC_INT_DATTMO) 1569 data->error = -ETIMEDOUT; 1570 else if (events & MSDC_INT_DATCRCERR) 1571 data->error = -EILSEQ; 1572 1573 dev_dbg(host->dev, "%s: cmd=%d; blocks=%d", 1574 __func__, mrq->cmd->opcode, data->blocks); 1575 dev_dbg(host->dev, "data_error=%d xfer_size=%d\n", 1576 (int)data->error, data->bytes_xfered); 1577 } 1578 1579 msdc_data_xfer_next(host, mrq); 1580 } 1581 } 1582 1583 static void msdc_set_buswidth(struct msdc_host *host, u32 width) 1584 { 1585 u32 val = readl(host->base + SDC_CFG); 1586 1587 val &= ~SDC_CFG_BUSWIDTH; 1588 1589 switch (width) { 1590 default: 1591 case MMC_BUS_WIDTH_1: 1592 val |= (MSDC_BUS_1BITS << 16); 1593 break; 1594 case MMC_BUS_WIDTH_4: 1595 val |= (MSDC_BUS_4BITS << 16); 1596 break; 1597 case MMC_BUS_WIDTH_8: 1598 val |= (MSDC_BUS_8BITS << 16); 1599 break; 1600 } 1601 1602 writel(val, host->base + SDC_CFG); 1603 dev_dbg(host->dev, "Bus Width = %d", width); 1604 } 1605 1606 static int msdc_ops_switch_volt(struct mmc_host *mmc, struct mmc_ios *ios) 1607 { 1608 struct msdc_host *host = mmc_priv(mmc); 1609 int ret; 1610 1611 if (!IS_ERR(mmc->supply.vqmmc)) { 1612 if (ios->signal_voltage != MMC_SIGNAL_VOLTAGE_330 && 1613 ios->signal_voltage != MMC_SIGNAL_VOLTAGE_180) { 1614 dev_err(host->dev, "Unsupported signal voltage!\n"); 1615 return -EINVAL; 1616 } 1617 1618 ret = mmc_regulator_set_vqmmc(mmc, ios); 1619 if (ret < 0) { 1620 dev_dbg(host->dev, "Regulator set error %d (%d)\n", 1621 ret, ios->signal_voltage); 1622 return ret; 1623 } 1624 1625 /* Apply different pinctrl settings for different signal voltage */ 1626 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) 1627 pinctrl_select_state(host->pinctrl, host->pins_uhs); 1628 else 1629 pinctrl_select_state(host->pinctrl, host->pins_default); 1630 } 1631 return 0; 1632 } 1633 1634 static int msdc_card_busy(struct mmc_host *mmc) 1635 { 1636 struct msdc_host *host = mmc_priv(mmc); 1637 u32 status = readl(host->base + MSDC_PS); 1638 1639 /* only check if data0 is low */ 1640 return !(status & BIT(16)); 1641 } 1642 1643 static void msdc_request_timeout(struct work_struct *work) 1644 { 1645 struct msdc_host *host = container_of(work, struct msdc_host, 1646 req_timeout.work); 1647 1648 /* simulate HW timeout status */ 1649 dev_err(host->dev, "%s: aborting cmd/data/mrq\n", __func__); 1650 if (host->mrq) { 1651 dev_err(host->dev, "%s: aborting mrq=%p cmd=%d\n", __func__, 1652 host->mrq, host->mrq->cmd->opcode); 1653 if (host->cmd) { 1654 dev_err(host->dev, "%s: aborting cmd=%d\n", 1655 __func__, host->cmd->opcode); 1656 msdc_cmd_done(host, MSDC_INT_CMDTMO, host->mrq, 1657 host->cmd); 1658 } else if (host->data) { 1659 dev_err(host->dev, "%s: abort data: cmd%d; %d blocks\n", 1660 __func__, host->mrq->cmd->opcode, 1661 host->data->blocks); 1662 msdc_data_xfer_done(host, MSDC_INT_DATTMO, host->mrq, 1663 host->data); 1664 } 1665 } 1666 } 1667 1668 static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb) 1669 { 1670 if (enb) { 1671 sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ); 1672 sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE); 1673 if (host->dev_comp->recheck_sdio_irq) 1674 msdc_recheck_sdio_irq(host); 1675 } else { 1676 sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ); 1677 sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE); 1678 } 1679 } 1680 1681 static void msdc_enable_sdio_irq(struct mmc_host *mmc, int enb) 1682 { 1683 struct msdc_host *host = mmc_priv(mmc); 1684 unsigned long flags; 1685 int ret; 1686 1687 spin_lock_irqsave(&host->lock, flags); 1688 __msdc_enable_sdio_irq(host, enb); 1689 spin_unlock_irqrestore(&host->lock, flags); 1690 1691 if (mmc_card_enable_async_irq(mmc->card) && host->pins_eint) { 1692 if (enb) { 1693 /* 1694 * In dev_pm_set_dedicated_wake_irq_reverse(), eint pin will be set to 1695 * GPIO mode. We need to restore it to SDIO DAT1 mode after that. 1696 * Since the current pinstate is pins_uhs, to ensure pinctrl select take 1697 * affect successfully, we change the pinstate to pins_eint firstly. 1698 */ 1699 pinctrl_select_state(host->pinctrl, host->pins_eint); 1700 ret = dev_pm_set_dedicated_wake_irq_reverse(host->dev, host->eint_irq); 1701 1702 if (ret) { 1703 dev_err(host->dev, "Failed to register SDIO wakeup irq!\n"); 1704 host->pins_eint = NULL; 1705 pm_runtime_get_noresume(host->dev); 1706 } else { 1707 dev_dbg(host->dev, "SDIO eint irq: %d!\n", host->eint_irq); 1708 } 1709 1710 pinctrl_select_state(host->pinctrl, host->pins_uhs); 1711 } else { 1712 dev_pm_clear_wake_irq(host->dev); 1713 } 1714 } else { 1715 if (enb) { 1716 /* Ensure host->pins_eint is NULL */ 1717 host->pins_eint = NULL; 1718 pm_runtime_get_noresume(host->dev); 1719 } else { 1720 pm_runtime_put_noidle(host->dev); 1721 } 1722 } 1723 } 1724 1725 static irqreturn_t msdc_cmdq_irq(struct msdc_host *host, u32 intsts) 1726 { 1727 struct mmc_host *mmc = mmc_from_priv(host); 1728 int cmd_err = 0, dat_err = 0; 1729 1730 if (intsts & MSDC_INT_RSPCRCERR) { 1731 cmd_err = -EILSEQ; 1732 dev_err(host->dev, "%s: CMD CRC ERR", __func__); 1733 } else if (intsts & MSDC_INT_CMDTMO) { 1734 cmd_err = -ETIMEDOUT; 1735 dev_err(host->dev, "%s: CMD TIMEOUT ERR", __func__); 1736 } 1737 1738 if (intsts & MSDC_INT_DATCRCERR) { 1739 dat_err = -EILSEQ; 1740 dev_err(host->dev, "%s: DATA CRC ERR", __func__); 1741 } else if (intsts & MSDC_INT_DATTMO) { 1742 dat_err = -ETIMEDOUT; 1743 dev_err(host->dev, "%s: DATA TIMEOUT ERR", __func__); 1744 } 1745 1746 if (cmd_err || dat_err) { 1747 dev_err(host->dev, "cmd_err = %d, dat_err = %d, intsts = 0x%x", 1748 cmd_err, dat_err, intsts); 1749 } 1750 1751 return cqhci_irq(mmc, 0, cmd_err, dat_err); 1752 } 1753 1754 static irqreturn_t msdc_irq(int irq, void *dev_id) 1755 { 1756 struct msdc_host *host = (struct msdc_host *) dev_id; 1757 struct mmc_host *mmc = mmc_from_priv(host); 1758 1759 while (true) { 1760 struct mmc_request *mrq; 1761 struct mmc_command *cmd; 1762 struct mmc_data *data; 1763 u32 events, event_mask; 1764 1765 spin_lock(&host->lock); 1766 events = readl(host->base + MSDC_INT); 1767 event_mask = readl(host->base + MSDC_INTEN); 1768 if ((events & event_mask) & MSDC_INT_SDIOIRQ) 1769 __msdc_enable_sdio_irq(host, 0); 1770 /* clear interrupts */ 1771 writel(events & event_mask, host->base + MSDC_INT); 1772 1773 mrq = host->mrq; 1774 cmd = host->cmd; 1775 data = host->data; 1776 spin_unlock(&host->lock); 1777 1778 if ((events & event_mask) & MSDC_INT_SDIOIRQ) 1779 sdio_signal_irq(mmc); 1780 1781 if ((events & event_mask) & MSDC_INT_CDSC) { 1782 if (host->internal_cd) 1783 mmc_detect_change(mmc, msecs_to_jiffies(20)); 1784 events &= ~MSDC_INT_CDSC; 1785 } 1786 1787 if (!(events & (event_mask & ~MSDC_INT_SDIOIRQ))) 1788 break; 1789 1790 if ((mmc->caps2 & MMC_CAP2_CQE) && 1791 (events & MSDC_INT_CMDQ)) { 1792 msdc_cmdq_irq(host, events); 1793 /* clear interrupts */ 1794 writel(events, host->base + MSDC_INT); 1795 return IRQ_HANDLED; 1796 } 1797 1798 if (!mrq) { 1799 dev_err(host->dev, 1800 "%s: MRQ=NULL; events=%08X; event_mask=%08X\n", 1801 __func__, events, event_mask); 1802 WARN_ON(1); 1803 break; 1804 } 1805 1806 dev_dbg(host->dev, "%s: events=%08X\n", __func__, events); 1807 1808 if (cmd) 1809 msdc_cmd_done(host, events, mrq, cmd); 1810 else if (data) 1811 msdc_data_xfer_done(host, events, mrq, data); 1812 } 1813 1814 return IRQ_HANDLED; 1815 } 1816 1817 static void msdc_init_hw(struct msdc_host *host) 1818 { 1819 u32 val; 1820 u32 tune_reg = host->dev_comp->pad_tune_reg; 1821 struct mmc_host *mmc = mmc_from_priv(host); 1822 1823 if (host->reset) { 1824 reset_control_assert(host->reset); 1825 usleep_range(10, 50); 1826 reset_control_deassert(host->reset); 1827 } 1828 1829 /* New tx/rx enable bit need to be 0->1 for hardware check */ 1830 if (host->dev_comp->support_new_tx) { 1831 sdr_clr_bits(host->base + SDC_ADV_CFG0, SDC_NEW_TX_EN); 1832 sdr_set_bits(host->base + SDC_ADV_CFG0, SDC_NEW_TX_EN); 1833 msdc_new_tx_setting(host); 1834 } 1835 if (host->dev_comp->support_new_rx) { 1836 sdr_clr_bits(host->base + MSDC_NEW_RX_CFG, MSDC_NEW_RX_PATH_SEL); 1837 sdr_set_bits(host->base + MSDC_NEW_RX_CFG, MSDC_NEW_RX_PATH_SEL); 1838 } 1839 1840 /* Configure to MMC/SD mode, clock free running */ 1841 sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN); 1842 1843 /* Reset */ 1844 msdc_reset_hw(host); 1845 1846 /* Disable and clear all interrupts */ 1847 writel(0, host->base + MSDC_INTEN); 1848 val = readl(host->base + MSDC_INT); 1849 writel(val, host->base + MSDC_INT); 1850 1851 /* Configure card detection */ 1852 if (host->internal_cd) { 1853 sdr_set_field(host->base + MSDC_PS, MSDC_PS_CDDEBOUNCE, 1854 DEFAULT_DEBOUNCE); 1855 sdr_set_bits(host->base + MSDC_PS, MSDC_PS_CDEN); 1856 sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC); 1857 sdr_set_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP); 1858 } else { 1859 sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP); 1860 sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN); 1861 sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC); 1862 } 1863 1864 if (host->top_base) { 1865 writel(0, host->top_base + EMMC_TOP_CONTROL); 1866 writel(0, host->top_base + EMMC_TOP_CMD); 1867 } else { 1868 writel(0, host->base + tune_reg); 1869 } 1870 writel(0, host->base + MSDC_IOCON); 1871 sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0); 1872 writel(0x403c0046, host->base + MSDC_PATCH_BIT); 1873 sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1); 1874 writel(0xffff4089, host->base + MSDC_PATCH_BIT1); 1875 sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL); 1876 1877 if (host->dev_comp->stop_clk_fix) { 1878 if (host->dev_comp->stop_dly_sel) 1879 sdr_set_field(host->base + MSDC_PATCH_BIT1, 1880 MSDC_PATCH_BIT1_STOP_DLY, 1881 host->dev_comp->stop_dly_sel); 1882 1883 if (host->dev_comp->pop_en_cnt) 1884 sdr_set_field(host->base + MSDC_PATCH_BIT2, 1885 MSDC_PB2_POP_EN_CNT, 1886 host->dev_comp->pop_en_cnt); 1887 1888 sdr_clr_bits(host->base + SDC_FIFO_CFG, 1889 SDC_FIFO_CFG_WRVALIDSEL); 1890 sdr_clr_bits(host->base + SDC_FIFO_CFG, 1891 SDC_FIFO_CFG_RDVALIDSEL); 1892 } 1893 1894 if (host->dev_comp->busy_check) 1895 sdr_clr_bits(host->base + MSDC_PATCH_BIT1, BIT(7)); 1896 1897 if (host->dev_comp->async_fifo) { 1898 sdr_set_field(host->base + MSDC_PATCH_BIT2, 1899 MSDC_PB2_RESPWAIT, 3); 1900 if (host->dev_comp->enhance_rx) { 1901 if (host->top_base) 1902 sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, 1903 SDC_RX_ENH_EN); 1904 else 1905 sdr_set_bits(host->base + SDC_ADV_CFG0, 1906 SDC_RX_ENHANCE_EN); 1907 } else { 1908 sdr_set_field(host->base + MSDC_PATCH_BIT2, 1909 MSDC_PB2_RESPSTSENSEL, 2); 1910 sdr_set_field(host->base + MSDC_PATCH_BIT2, 1911 MSDC_PB2_CRCSTSENSEL, 2); 1912 } 1913 /* use async fifo, then no need tune internal delay */ 1914 sdr_clr_bits(host->base + MSDC_PATCH_BIT2, 1915 MSDC_PATCH_BIT2_CFGRESP); 1916 sdr_set_bits(host->base + MSDC_PATCH_BIT2, 1917 MSDC_PATCH_BIT2_CFGCRCSTS); 1918 } 1919 1920 if (host->dev_comp->support_64g) 1921 sdr_set_bits(host->base + MSDC_PATCH_BIT2, 1922 MSDC_PB2_SUPPORT_64G); 1923 if (host->dev_comp->data_tune) { 1924 if (host->top_base) { 1925 sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, 1926 PAD_DAT_RD_RXDLY_SEL); 1927 sdr_clr_bits(host->top_base + EMMC_TOP_CONTROL, 1928 DATA_K_VALUE_SEL); 1929 sdr_set_bits(host->top_base + EMMC_TOP_CMD, 1930 PAD_CMD_RD_RXDLY_SEL); 1931 if (host->tuning_step > PAD_DELAY_HALF) { 1932 sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, 1933 PAD_DAT_RD_RXDLY2_SEL); 1934 sdr_set_bits(host->top_base + EMMC_TOP_CMD, 1935 PAD_CMD_RD_RXDLY2_SEL); 1936 } 1937 } else { 1938 sdr_set_bits(host->base + tune_reg, 1939 MSDC_PAD_TUNE_RD_SEL | 1940 MSDC_PAD_TUNE_CMD_SEL); 1941 if (host->tuning_step > PAD_DELAY_HALF) 1942 sdr_set_bits(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, 1943 MSDC_PAD_TUNE_RD2_SEL | 1944 MSDC_PAD_TUNE_CMD2_SEL); 1945 } 1946 } else { 1947 /* choose clock tune */ 1948 if (host->top_base) 1949 sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, 1950 PAD_RXDLY_SEL); 1951 else 1952 sdr_set_bits(host->base + tune_reg, 1953 MSDC_PAD_TUNE_RXDLYSEL); 1954 } 1955 1956 if (mmc->caps2 & MMC_CAP2_NO_SDIO) { 1957 sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIO); 1958 sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ); 1959 sdr_clr_bits(host->base + SDC_ADV_CFG0, SDC_DAT1_IRQ_TRIGGER); 1960 } else { 1961 /* Configure to enable SDIO mode, otherwise SDIO CMD5 fails */ 1962 sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIO); 1963 1964 /* Config SDIO device detect interrupt function */ 1965 sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE); 1966 sdr_set_bits(host->base + SDC_ADV_CFG0, SDC_DAT1_IRQ_TRIGGER); 1967 } 1968 1969 /* Configure to default data timeout */ 1970 sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3); 1971 1972 host->def_tune_para.iocon = readl(host->base + MSDC_IOCON); 1973 host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON); 1974 if (host->top_base) { 1975 host->def_tune_para.emmc_top_control = 1976 readl(host->top_base + EMMC_TOP_CONTROL); 1977 host->def_tune_para.emmc_top_cmd = 1978 readl(host->top_base + EMMC_TOP_CMD); 1979 host->saved_tune_para.emmc_top_control = 1980 readl(host->top_base + EMMC_TOP_CONTROL); 1981 host->saved_tune_para.emmc_top_cmd = 1982 readl(host->top_base + EMMC_TOP_CMD); 1983 } else { 1984 host->def_tune_para.pad_tune = readl(host->base + tune_reg); 1985 host->saved_tune_para.pad_tune = readl(host->base + tune_reg); 1986 } 1987 dev_dbg(host->dev, "init hardware done!"); 1988 } 1989 1990 static void msdc_deinit_hw(struct msdc_host *host) 1991 { 1992 u32 val; 1993 1994 if (host->internal_cd) { 1995 /* Disabled card-detect */ 1996 sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN); 1997 sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP); 1998 } 1999 2000 /* Disable and clear all interrupts */ 2001 writel(0, host->base + MSDC_INTEN); 2002 2003 val = readl(host->base + MSDC_INT); 2004 writel(val, host->base + MSDC_INT); 2005 } 2006 2007 /* init gpd and bd list in msdc_drv_probe */ 2008 static void msdc_init_gpd_bd(struct msdc_host *host, struct msdc_dma *dma) 2009 { 2010 struct mt_gpdma_desc *gpd = dma->gpd; 2011 struct mt_bdma_desc *bd = dma->bd; 2012 dma_addr_t dma_addr; 2013 int i; 2014 2015 memset(gpd, 0, sizeof(struct mt_gpdma_desc) * 2); 2016 2017 dma_addr = dma->gpd_addr + sizeof(struct mt_gpdma_desc); 2018 gpd->gpd_info = GPDMA_DESC_BDP; /* hwo, cs, bd pointer */ 2019 /* gpd->next is must set for desc DMA 2020 * That's why must alloc 2 gpd structure. 2021 */ 2022 gpd->next = lower_32_bits(dma_addr); 2023 if (host->dev_comp->support_64g) 2024 gpd->gpd_info |= (upper_32_bits(dma_addr) & 0xf) << 24; 2025 2026 dma_addr = dma->bd_addr; 2027 gpd->ptr = lower_32_bits(dma->bd_addr); /* physical address */ 2028 if (host->dev_comp->support_64g) 2029 gpd->gpd_info |= (upper_32_bits(dma_addr) & 0xf) << 28; 2030 2031 memset(bd, 0, sizeof(struct mt_bdma_desc) * MAX_BD_NUM); 2032 for (i = 0; i < (MAX_BD_NUM - 1); i++) { 2033 dma_addr = dma->bd_addr + sizeof(*bd) * (i + 1); 2034 bd[i].next = lower_32_bits(dma_addr); 2035 if (host->dev_comp->support_64g) 2036 bd[i].bd_info |= (upper_32_bits(dma_addr) & 0xf) << 24; 2037 } 2038 } 2039 2040 static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 2041 { 2042 struct msdc_host *host = mmc_priv(mmc); 2043 int ret; 2044 2045 msdc_set_buswidth(host, ios->bus_width); 2046 2047 /* Suspend/Resume will do power off/on */ 2048 switch (ios->power_mode) { 2049 case MMC_POWER_UP: 2050 if (!IS_ERR(mmc->supply.vmmc)) { 2051 msdc_init_hw(host); 2052 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 2053 ios->vdd); 2054 if (ret) { 2055 dev_err(host->dev, "Failed to set vmmc power!\n"); 2056 return; 2057 } 2058 } 2059 break; 2060 case MMC_POWER_ON: 2061 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { 2062 ret = regulator_enable(mmc->supply.vqmmc); 2063 if (ret) 2064 dev_err(host->dev, "Failed to set vqmmc power!\n"); 2065 else 2066 host->vqmmc_enabled = true; 2067 } 2068 break; 2069 case MMC_POWER_OFF: 2070 if (!IS_ERR(mmc->supply.vmmc)) 2071 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 2072 2073 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { 2074 regulator_disable(mmc->supply.vqmmc); 2075 host->vqmmc_enabled = false; 2076 } 2077 break; 2078 default: 2079 break; 2080 } 2081 2082 if (host->mclk != ios->clock || host->timing != ios->timing) 2083 msdc_set_mclk(host, ios->timing, ios->clock); 2084 } 2085 2086 static u64 test_delay_bit(u64 delay, u32 bit) 2087 { 2088 bit %= PAD_DELAY_FULL; 2089 return delay & BIT_ULL(bit); 2090 } 2091 2092 static int get_delay_len(u64 delay, u32 start_bit) 2093 { 2094 int i; 2095 2096 for (i = 0; i < (PAD_DELAY_FULL - start_bit); i++) { 2097 if (test_delay_bit(delay, start_bit + i) == 0) 2098 return i; 2099 } 2100 return PAD_DELAY_FULL - start_bit; 2101 } 2102 2103 static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u64 delay) 2104 { 2105 int start = 0, len = 0; 2106 int start_final = 0, len_final = 0; 2107 u8 final_phase = 0xff; 2108 struct msdc_delay_phase delay_phase = { 0, }; 2109 2110 if (delay == 0) { 2111 dev_err(host->dev, "phase error: [map:%016llx]\n", delay); 2112 delay_phase.final_phase = final_phase; 2113 return delay_phase; 2114 } 2115 2116 while (start < PAD_DELAY_FULL) { 2117 len = get_delay_len(delay, start); 2118 if (len_final < len) { 2119 start_final = start; 2120 len_final = len; 2121 } 2122 start += len ? len : 1; 2123 if (!upper_32_bits(delay) && len >= 12 && start_final < 4) 2124 break; 2125 } 2126 2127 /* The rule is that to find the smallest delay cell */ 2128 if (start_final == 0) 2129 final_phase = (start_final + len_final / 3) % PAD_DELAY_FULL; 2130 else 2131 final_phase = (start_final + len_final / 2) % PAD_DELAY_FULL; 2132 dev_dbg(host->dev, "phase: [map:%016llx] [maxlen:%d] [final:%d]\n", 2133 delay, len_final, final_phase); 2134 2135 delay_phase.maxlen = len_final; 2136 delay_phase.start = start_final; 2137 delay_phase.final_phase = final_phase; 2138 return delay_phase; 2139 } 2140 2141 static inline void msdc_set_cmd_delay(struct msdc_host *host, u32 value) 2142 { 2143 u32 tune_reg = host->dev_comp->pad_tune_reg; 2144 2145 if (host->top_base) { 2146 if (value < PAD_DELAY_HALF) { 2147 sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY, value); 2148 sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY2, 0); 2149 } else { 2150 sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY, 2151 PAD_DELAY_HALF - 1); 2152 sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY2, 2153 value - PAD_DELAY_HALF); 2154 } 2155 } else { 2156 if (value < PAD_DELAY_HALF) { 2157 sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, value); 2158 sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, 2159 MSDC_PAD_TUNE_CMDRDLY2, 0); 2160 } else { 2161 sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, 2162 PAD_DELAY_HALF - 1); 2163 sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, 2164 MSDC_PAD_TUNE_CMDRDLY2, value - PAD_DELAY_HALF); 2165 } 2166 } 2167 } 2168 2169 static inline void msdc_set_data_delay(struct msdc_host *host, u32 value) 2170 { 2171 u32 tune_reg = host->dev_comp->pad_tune_reg; 2172 2173 if (host->top_base) { 2174 if (value < PAD_DELAY_HALF) { 2175 sdr_set_field(host->top_base + EMMC_TOP_CONTROL, 2176 PAD_DAT_RD_RXDLY, value); 2177 sdr_set_field(host->top_base + EMMC_TOP_CONTROL, 2178 PAD_DAT_RD_RXDLY2, 0); 2179 } else { 2180 sdr_set_field(host->top_base + EMMC_TOP_CONTROL, 2181 PAD_DAT_RD_RXDLY, PAD_DELAY_HALF - 1); 2182 sdr_set_field(host->top_base + EMMC_TOP_CONTROL, 2183 PAD_DAT_RD_RXDLY2, value - PAD_DELAY_HALF); 2184 } 2185 } else { 2186 if (value < PAD_DELAY_HALF) { 2187 sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, value); 2188 sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, 2189 MSDC_PAD_TUNE_DATRRDLY2, 0); 2190 } else { 2191 sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, 2192 PAD_DELAY_HALF - 1); 2193 sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, 2194 MSDC_PAD_TUNE_DATRRDLY2, value - PAD_DELAY_HALF); 2195 } 2196 } 2197 } 2198 2199 static inline void msdc_set_data_sample_edge(struct msdc_host *host, bool rising) 2200 { 2201 u32 value = rising ? 0 : 1; 2202 2203 if (host->dev_comp->support_new_rx) { 2204 sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_PATCH_BIT_RD_DAT_SEL, value); 2205 sdr_set_field(host->base + MSDC_PATCH_BIT2, MSDC_PB2_CFGCRCSTSEDGE, value); 2206 } else { 2207 sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DSPL, value); 2208 sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL, value); 2209 } 2210 } 2211 2212 static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) 2213 { 2214 struct msdc_host *host = mmc_priv(mmc); 2215 u64 rise_delay = 0, fall_delay = 0; 2216 struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; 2217 struct msdc_delay_phase internal_delay_phase; 2218 u8 final_delay, final_maxlen; 2219 u32 internal_delay = 0; 2220 u32 tune_reg = host->dev_comp->pad_tune_reg; 2221 int cmd_err; 2222 int i, j; 2223 2224 if (mmc->ios.timing == MMC_TIMING_MMC_HS200 || 2225 mmc->ios.timing == MMC_TIMING_UHS_SDR104) 2226 sdr_set_field(host->base + tune_reg, 2227 MSDC_PAD_TUNE_CMDRRDLY, 2228 host->hs200_cmd_int_delay); 2229 2230 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2231 for (i = 0; i < host->tuning_step; i++) { 2232 msdc_set_cmd_delay(host, i); 2233 /* 2234 * Using the same parameters, it may sometimes pass the test, 2235 * but sometimes it may fail. To make sure the parameters are 2236 * more stable, we test each set of parameters 3 times. 2237 */ 2238 for (j = 0; j < 3; j++) { 2239 mmc_send_tuning(mmc, opcode, &cmd_err); 2240 if (!cmd_err) { 2241 rise_delay |= BIT_ULL(i); 2242 } else { 2243 rise_delay &= ~BIT_ULL(i); 2244 break; 2245 } 2246 } 2247 } 2248 final_rise_delay = get_best_delay(host, rise_delay); 2249 /* if rising edge has enough margin, then do not scan falling edge */ 2250 if (final_rise_delay.maxlen >= 12 || 2251 (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4)) 2252 goto skip_fall; 2253 2254 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2255 for (i = 0; i < host->tuning_step; i++) { 2256 msdc_set_cmd_delay(host, i); 2257 /* 2258 * Using the same parameters, it may sometimes pass the test, 2259 * but sometimes it may fail. To make sure the parameters are 2260 * more stable, we test each set of parameters 3 times. 2261 */ 2262 for (j = 0; j < 3; j++) { 2263 mmc_send_tuning(mmc, opcode, &cmd_err); 2264 if (!cmd_err) { 2265 fall_delay |= BIT_ULL(i); 2266 } else { 2267 fall_delay &= ~BIT_ULL(i); 2268 break; 2269 } 2270 } 2271 } 2272 final_fall_delay = get_best_delay(host, fall_delay); 2273 2274 skip_fall: 2275 final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen); 2276 if (final_fall_delay.maxlen >= 12 && final_fall_delay.start < 4) 2277 final_maxlen = final_fall_delay.maxlen; 2278 if (final_maxlen == final_rise_delay.maxlen) { 2279 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2280 final_delay = final_rise_delay.final_phase; 2281 } else { 2282 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2283 final_delay = final_fall_delay.final_phase; 2284 } 2285 msdc_set_cmd_delay(host, final_delay); 2286 2287 if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay) 2288 goto skip_internal; 2289 2290 for (i = 0; i < host->tuning_step; i++) { 2291 sdr_set_field(host->base + tune_reg, 2292 MSDC_PAD_TUNE_CMDRRDLY, i); 2293 mmc_send_tuning(mmc, opcode, &cmd_err); 2294 if (!cmd_err) 2295 internal_delay |= BIT_ULL(i); 2296 } 2297 dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay); 2298 internal_delay_phase = get_best_delay(host, internal_delay); 2299 sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY, 2300 internal_delay_phase.final_phase); 2301 skip_internal: 2302 dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay); 2303 return final_delay == 0xff ? -EIO : 0; 2304 } 2305 2306 static int hs400_tune_response(struct mmc_host *mmc, u32 opcode) 2307 { 2308 struct msdc_host *host = mmc_priv(mmc); 2309 u32 cmd_delay = 0; 2310 struct msdc_delay_phase final_cmd_delay = { 0,}; 2311 u8 final_delay; 2312 int cmd_err; 2313 int i, j; 2314 2315 /* select EMMC50 PAD CMD tune */ 2316 sdr_set_bits(host->base + PAD_CMD_TUNE, BIT(0)); 2317 sdr_set_field(host->base + MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMDTA, 2); 2318 2319 if (mmc->ios.timing == MMC_TIMING_MMC_HS200 || 2320 mmc->ios.timing == MMC_TIMING_UHS_SDR104) 2321 sdr_set_field(host->base + MSDC_PAD_TUNE, 2322 MSDC_PAD_TUNE_CMDRRDLY, 2323 host->hs200_cmd_int_delay); 2324 2325 if (host->hs400_cmd_resp_sel_rising) 2326 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2327 else 2328 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2329 2330 for (i = 0; i < PAD_DELAY_HALF; i++) { 2331 sdr_set_field(host->base + PAD_CMD_TUNE, 2332 PAD_CMD_TUNE_RX_DLY3, i); 2333 /* 2334 * Using the same parameters, it may sometimes pass the test, 2335 * but sometimes it may fail. To make sure the parameters are 2336 * more stable, we test each set of parameters 3 times. 2337 */ 2338 for (j = 0; j < 3; j++) { 2339 mmc_send_tuning(mmc, opcode, &cmd_err); 2340 if (!cmd_err) { 2341 cmd_delay |= BIT(i); 2342 } else { 2343 cmd_delay &= ~BIT(i); 2344 break; 2345 } 2346 } 2347 } 2348 final_cmd_delay = get_best_delay(host, cmd_delay); 2349 sdr_set_field(host->base + PAD_CMD_TUNE, PAD_CMD_TUNE_RX_DLY3, 2350 final_cmd_delay.final_phase); 2351 final_delay = final_cmd_delay.final_phase; 2352 2353 dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay); 2354 return final_delay == 0xff ? -EIO : 0; 2355 } 2356 2357 static int msdc_tune_data(struct mmc_host *mmc, u32 opcode) 2358 { 2359 struct msdc_host *host = mmc_priv(mmc); 2360 u64 rise_delay = 0, fall_delay = 0; 2361 struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; 2362 u8 final_delay, final_maxlen; 2363 int i, ret; 2364 2365 sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL, 2366 host->latch_ck); 2367 msdc_set_data_sample_edge(host, true); 2368 for (i = 0; i < host->tuning_step; i++) { 2369 msdc_set_data_delay(host, i); 2370 ret = mmc_send_tuning(mmc, opcode, NULL); 2371 if (!ret) 2372 rise_delay |= BIT_ULL(i); 2373 } 2374 final_rise_delay = get_best_delay(host, rise_delay); 2375 /* if rising edge has enough margin, then do not scan falling edge */ 2376 if (final_rise_delay.maxlen >= 12 || 2377 (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4)) 2378 goto skip_fall; 2379 2380 msdc_set_data_sample_edge(host, false); 2381 for (i = 0; i < host->tuning_step; i++) { 2382 msdc_set_data_delay(host, i); 2383 ret = mmc_send_tuning(mmc, opcode, NULL); 2384 if (!ret) 2385 fall_delay |= BIT_ULL(i); 2386 } 2387 final_fall_delay = get_best_delay(host, fall_delay); 2388 2389 skip_fall: 2390 final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen); 2391 if (final_maxlen == final_rise_delay.maxlen) { 2392 msdc_set_data_sample_edge(host, true); 2393 final_delay = final_rise_delay.final_phase; 2394 } else { 2395 msdc_set_data_sample_edge(host, false); 2396 final_delay = final_fall_delay.final_phase; 2397 } 2398 msdc_set_data_delay(host, final_delay); 2399 2400 dev_dbg(host->dev, "Final data pad delay: %x\n", final_delay); 2401 return final_delay == 0xff ? -EIO : 0; 2402 } 2403 2404 /* 2405 * MSDC IP which supports data tune + async fifo can do CMD/DAT tune 2406 * together, which can save the tuning time. 2407 */ 2408 static int msdc_tune_together(struct mmc_host *mmc, u32 opcode) 2409 { 2410 struct msdc_host *host = mmc_priv(mmc); 2411 u64 rise_delay = 0, fall_delay = 0; 2412 struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; 2413 u8 final_delay, final_maxlen; 2414 int i, ret; 2415 2416 sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL, 2417 host->latch_ck); 2418 2419 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2420 msdc_set_data_sample_edge(host, true); 2421 for (i = 0; i < host->tuning_step; i++) { 2422 msdc_set_cmd_delay(host, i); 2423 msdc_set_data_delay(host, i); 2424 ret = mmc_send_tuning(mmc, opcode, NULL); 2425 if (!ret) 2426 rise_delay |= BIT_ULL(i); 2427 } 2428 final_rise_delay = get_best_delay(host, rise_delay); 2429 /* if rising edge has enough margin, then do not scan falling edge */ 2430 if (final_rise_delay.maxlen >= 12 || 2431 (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4)) 2432 goto skip_fall; 2433 2434 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2435 msdc_set_data_sample_edge(host, false); 2436 for (i = 0; i < host->tuning_step; i++) { 2437 msdc_set_cmd_delay(host, i); 2438 msdc_set_data_delay(host, i); 2439 ret = mmc_send_tuning(mmc, opcode, NULL); 2440 if (!ret) 2441 fall_delay |= BIT_ULL(i); 2442 } 2443 final_fall_delay = get_best_delay(host, fall_delay); 2444 2445 skip_fall: 2446 final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen); 2447 if (final_maxlen == final_rise_delay.maxlen) { 2448 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2449 msdc_set_data_sample_edge(host, true); 2450 final_delay = final_rise_delay.final_phase; 2451 } else { 2452 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2453 msdc_set_data_sample_edge(host, false); 2454 final_delay = final_fall_delay.final_phase; 2455 } 2456 2457 msdc_set_cmd_delay(host, final_delay); 2458 msdc_set_data_delay(host, final_delay); 2459 2460 dev_dbg(host->dev, "Final pad delay: %x\n", final_delay); 2461 return final_delay == 0xff ? -EIO : 0; 2462 } 2463 2464 static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode) 2465 { 2466 struct msdc_host *host = mmc_priv(mmc); 2467 int ret; 2468 u32 tune_reg = host->dev_comp->pad_tune_reg; 2469 2470 if (host->dev_comp->data_tune && host->dev_comp->async_fifo) { 2471 ret = msdc_tune_together(mmc, opcode); 2472 if (host->hs400_mode) { 2473 msdc_set_data_sample_edge(host, true); 2474 msdc_set_data_delay(host, 0); 2475 } 2476 goto tune_done; 2477 } 2478 if (host->hs400_mode && 2479 host->dev_comp->hs400_tune) 2480 ret = hs400_tune_response(mmc, opcode); 2481 else 2482 ret = msdc_tune_response(mmc, opcode); 2483 if (ret == -EIO) { 2484 dev_err(host->dev, "Tune response fail!\n"); 2485 return ret; 2486 } 2487 if (host->hs400_mode == false) { 2488 ret = msdc_tune_data(mmc, opcode); 2489 if (ret == -EIO) 2490 dev_err(host->dev, "Tune data fail!\n"); 2491 } 2492 2493 tune_done: 2494 host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON); 2495 host->saved_tune_para.pad_tune = readl(host->base + tune_reg); 2496 host->saved_tune_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE); 2497 if (host->top_base) { 2498 host->saved_tune_para.emmc_top_control = readl(host->top_base + 2499 EMMC_TOP_CONTROL); 2500 host->saved_tune_para.emmc_top_cmd = readl(host->top_base + 2501 EMMC_TOP_CMD); 2502 } 2503 return ret; 2504 } 2505 2506 static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 2507 { 2508 struct msdc_host *host = mmc_priv(mmc); 2509 2510 host->hs400_mode = true; 2511 2512 if (host->top_base) { 2513 if (host->hs400_ds_dly3) 2514 sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE, 2515 PAD_DS_DLY3, host->hs400_ds_dly3); 2516 if (host->hs400_ds_delay) 2517 writel(host->hs400_ds_delay, 2518 host->top_base + EMMC50_PAD_DS_TUNE); 2519 } else { 2520 if (host->hs400_ds_dly3) 2521 sdr_set_field(host->base + PAD_DS_TUNE, 2522 PAD_DS_TUNE_DLY3, host->hs400_ds_dly3); 2523 if (host->hs400_ds_delay) 2524 writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE); 2525 } 2526 /* hs400 mode must set it to 0 */ 2527 sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS); 2528 /* to improve read performance, set outstanding to 2 */ 2529 sdr_set_field(host->base + EMMC50_CFG3, EMMC50_CFG3_OUTS_WR, 2); 2530 2531 return 0; 2532 } 2533 2534 static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card) 2535 { 2536 struct msdc_host *host = mmc_priv(mmc); 2537 struct msdc_delay_phase dly1_delay; 2538 u32 val, result_dly1 = 0; 2539 u8 *ext_csd; 2540 int i, ret; 2541 2542 if (host->top_base) { 2543 sdr_set_bits(host->top_base + EMMC50_PAD_DS_TUNE, 2544 PAD_DS_DLY_SEL); 2545 sdr_clr_bits(host->top_base + EMMC50_PAD_DS_TUNE, 2546 PAD_DS_DLY2_SEL); 2547 } else { 2548 sdr_set_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY_SEL); 2549 sdr_clr_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY2_SEL); 2550 } 2551 2552 host->hs400_tuning = true; 2553 for (i = 0; i < PAD_DELAY_HALF; i++) { 2554 if (host->top_base) 2555 sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE, 2556 PAD_DS_DLY1, i); 2557 else 2558 sdr_set_field(host->base + PAD_DS_TUNE, 2559 PAD_DS_TUNE_DLY1, i); 2560 ret = mmc_get_ext_csd(card, &ext_csd); 2561 if (!ret) { 2562 result_dly1 |= BIT(i); 2563 kfree(ext_csd); 2564 } 2565 } 2566 host->hs400_tuning = false; 2567 2568 dly1_delay = get_best_delay(host, result_dly1); 2569 if (dly1_delay.maxlen == 0) { 2570 dev_err(host->dev, "Failed to get DLY1 delay!\n"); 2571 goto fail; 2572 } 2573 if (host->top_base) 2574 sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE, 2575 PAD_DS_DLY1, dly1_delay.final_phase); 2576 else 2577 sdr_set_field(host->base + PAD_DS_TUNE, 2578 PAD_DS_TUNE_DLY1, dly1_delay.final_phase); 2579 2580 if (host->top_base) 2581 val = readl(host->top_base + EMMC50_PAD_DS_TUNE); 2582 else 2583 val = readl(host->base + PAD_DS_TUNE); 2584 2585 dev_info(host->dev, "Final PAD_DS_TUNE: 0x%x\n", val); 2586 2587 return 0; 2588 2589 fail: 2590 dev_err(host->dev, "Failed to tuning DS pin delay!\n"); 2591 return -EIO; 2592 } 2593 2594 static void msdc_hw_reset(struct mmc_host *mmc) 2595 { 2596 struct msdc_host *host = mmc_priv(mmc); 2597 2598 sdr_set_bits(host->base + EMMC_IOCON, 1); 2599 udelay(10); /* 10us is enough */ 2600 sdr_clr_bits(host->base + EMMC_IOCON, 1); 2601 } 2602 2603 static void msdc_ack_sdio_irq(struct mmc_host *mmc) 2604 { 2605 unsigned long flags; 2606 struct msdc_host *host = mmc_priv(mmc); 2607 2608 spin_lock_irqsave(&host->lock, flags); 2609 __msdc_enable_sdio_irq(host, 1); 2610 spin_unlock_irqrestore(&host->lock, flags); 2611 } 2612 2613 static int msdc_get_cd(struct mmc_host *mmc) 2614 { 2615 struct msdc_host *host = mmc_priv(mmc); 2616 int val; 2617 2618 if (mmc->caps & MMC_CAP_NONREMOVABLE) 2619 return 1; 2620 2621 if (!host->internal_cd) 2622 return mmc_gpio_get_cd(mmc); 2623 2624 val = readl(host->base + MSDC_PS) & MSDC_PS_CDSTS; 2625 if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH) 2626 return !!val; 2627 else 2628 return !val; 2629 } 2630 2631 static void msdc_hs400_enhanced_strobe(struct mmc_host *mmc, 2632 struct mmc_ios *ios) 2633 { 2634 struct msdc_host *host = mmc_priv(mmc); 2635 2636 if (ios->enhanced_strobe) { 2637 msdc_prepare_hs400_tuning(mmc, ios); 2638 sdr_set_field(host->base + EMMC50_CFG0, EMMC50_CFG_PADCMD_LATCHCK, 1); 2639 sdr_set_field(host->base + EMMC50_CFG0, EMMC50_CFG_CMD_RESP_SEL, 1); 2640 sdr_set_field(host->base + EMMC50_CFG1, EMMC50_CFG1_DS_CFG, 1); 2641 2642 sdr_clr_bits(host->base + CQHCI_SETTING, CQHCI_RD_CMD_WND_SEL); 2643 sdr_clr_bits(host->base + CQHCI_SETTING, CQHCI_WR_CMD_WND_SEL); 2644 sdr_clr_bits(host->base + EMMC51_CFG0, CMDQ_RDAT_CNT); 2645 } else { 2646 sdr_set_field(host->base + EMMC50_CFG0, EMMC50_CFG_PADCMD_LATCHCK, 0); 2647 sdr_set_field(host->base + EMMC50_CFG0, EMMC50_CFG_CMD_RESP_SEL, 0); 2648 sdr_set_field(host->base + EMMC50_CFG1, EMMC50_CFG1_DS_CFG, 0); 2649 2650 sdr_set_bits(host->base + CQHCI_SETTING, CQHCI_RD_CMD_WND_SEL); 2651 sdr_set_bits(host->base + CQHCI_SETTING, CQHCI_WR_CMD_WND_SEL); 2652 sdr_set_field(host->base + EMMC51_CFG0, CMDQ_RDAT_CNT, 0xb4); 2653 } 2654 } 2655 2656 static void msdc_cqe_cit_cal(struct msdc_host *host, u64 timer_ns) 2657 { 2658 struct mmc_host *mmc = mmc_from_priv(host); 2659 struct cqhci_host *cq_host = mmc->cqe_private; 2660 u8 itcfmul; 2661 u64 hclk_freq, value; 2662 2663 /* 2664 * On MediaTek SoCs the MSDC controller's CQE uses msdc_hclk as ITCFVAL 2665 * so we multiply/divide the HCLK frequency by ITCFMUL to calculate the 2666 * Send Status Command Idle Timer (CIT) value. 2667 */ 2668 hclk_freq = (u64)clk_get_rate(host->h_clk); 2669 itcfmul = CQHCI_ITCFMUL(cqhci_readl(cq_host, CQHCI_CAP)); 2670 switch (itcfmul) { 2671 case 0x0: 2672 do_div(hclk_freq, 1000); 2673 break; 2674 case 0x1: 2675 do_div(hclk_freq, 100); 2676 break; 2677 case 0x2: 2678 do_div(hclk_freq, 10); 2679 break; 2680 case 0x3: 2681 break; 2682 case 0x4: 2683 hclk_freq = hclk_freq * 10; 2684 break; 2685 default: 2686 host->cq_ssc1_time = 0x40; 2687 return; 2688 } 2689 2690 value = hclk_freq * timer_ns; 2691 do_div(value, 1000000000); 2692 host->cq_ssc1_time = value; 2693 } 2694 2695 static void msdc_cqe_enable(struct mmc_host *mmc) 2696 { 2697 struct msdc_host *host = mmc_priv(mmc); 2698 struct cqhci_host *cq_host = mmc->cqe_private; 2699 2700 /* enable cmdq irq */ 2701 writel(MSDC_INT_CMDQ, host->base + MSDC_INTEN); 2702 /* enable busy check */ 2703 sdr_set_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL); 2704 /* default write data / busy timeout 20s */ 2705 msdc_set_busy_timeout(host, 20 * 1000000000ULL, 0); 2706 /* default read data timeout 1s */ 2707 msdc_set_timeout(host, 1000000000ULL, 0); 2708 2709 /* Set the send status command idle timer */ 2710 cqhci_writel(cq_host, host->cq_ssc1_time, CQHCI_SSC1); 2711 } 2712 2713 static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery) 2714 { 2715 struct msdc_host *host = mmc_priv(mmc); 2716 unsigned int val = 0; 2717 2718 /* disable cmdq irq */ 2719 sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INT_CMDQ); 2720 /* disable busy check */ 2721 sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL); 2722 2723 val = readl(host->base + MSDC_INT); 2724 writel(val, host->base + MSDC_INT); 2725 2726 if (recovery) { 2727 sdr_set_field(host->base + MSDC_DMA_CTRL, 2728 MSDC_DMA_CTRL_STOP, 1); 2729 if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CTRL, val, 2730 !(val & MSDC_DMA_CTRL_STOP), 1, 3000))) 2731 return; 2732 if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CFG, val, 2733 !(val & MSDC_DMA_CFG_STS), 1, 3000))) 2734 return; 2735 msdc_reset_hw(host); 2736 } 2737 } 2738 2739 static void msdc_cqe_pre_enable(struct mmc_host *mmc) 2740 { 2741 struct cqhci_host *cq_host = mmc->cqe_private; 2742 u32 reg; 2743 2744 reg = cqhci_readl(cq_host, CQHCI_CFG); 2745 reg |= CQHCI_ENABLE; 2746 cqhci_writel(cq_host, reg, CQHCI_CFG); 2747 } 2748 2749 static void msdc_cqe_post_disable(struct mmc_host *mmc) 2750 { 2751 struct cqhci_host *cq_host = mmc->cqe_private; 2752 u32 reg; 2753 2754 reg = cqhci_readl(cq_host, CQHCI_CFG); 2755 reg &= ~CQHCI_ENABLE; 2756 cqhci_writel(cq_host, reg, CQHCI_CFG); 2757 } 2758 2759 static const struct mmc_host_ops mt_msdc_ops = { 2760 .post_req = msdc_post_req, 2761 .pre_req = msdc_pre_req, 2762 .request = msdc_ops_request, 2763 .set_ios = msdc_ops_set_ios, 2764 .get_ro = mmc_gpio_get_ro, 2765 .get_cd = msdc_get_cd, 2766 .hs400_enhanced_strobe = msdc_hs400_enhanced_strobe, 2767 .enable_sdio_irq = msdc_enable_sdio_irq, 2768 .ack_sdio_irq = msdc_ack_sdio_irq, 2769 .start_signal_voltage_switch = msdc_ops_switch_volt, 2770 .card_busy = msdc_card_busy, 2771 .execute_tuning = msdc_execute_tuning, 2772 .prepare_hs400_tuning = msdc_prepare_hs400_tuning, 2773 .execute_hs400_tuning = msdc_execute_hs400_tuning, 2774 .card_hw_reset = msdc_hw_reset, 2775 }; 2776 2777 static const struct cqhci_host_ops msdc_cmdq_ops = { 2778 .enable = msdc_cqe_enable, 2779 .disable = msdc_cqe_disable, 2780 .pre_enable = msdc_cqe_pre_enable, 2781 .post_disable = msdc_cqe_post_disable, 2782 }; 2783 2784 static void msdc_of_property_parse(struct platform_device *pdev, 2785 struct msdc_host *host) 2786 { 2787 struct mmc_host *mmc = mmc_from_priv(host); 2788 2789 of_property_read_u32(pdev->dev.of_node, "mediatek,latch-ck", 2790 &host->latch_ck); 2791 2792 of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay", 2793 &host->hs400_ds_delay); 2794 2795 of_property_read_u32(pdev->dev.of_node, "mediatek,hs400-ds-dly3", 2796 &host->hs400_ds_dly3); 2797 2798 of_property_read_u32(pdev->dev.of_node, "mediatek,hs200-cmd-int-delay", 2799 &host->hs200_cmd_int_delay); 2800 2801 of_property_read_u32(pdev->dev.of_node, "mediatek,hs400-cmd-int-delay", 2802 &host->hs400_cmd_int_delay); 2803 2804 if (of_property_read_bool(pdev->dev.of_node, 2805 "mediatek,hs400-cmd-resp-sel-rising")) 2806 host->hs400_cmd_resp_sel_rising = true; 2807 else 2808 host->hs400_cmd_resp_sel_rising = false; 2809 2810 if (of_property_read_u32(pdev->dev.of_node, "mediatek,tuning-step", 2811 &host->tuning_step)) { 2812 if (mmc->caps2 & MMC_CAP2_NO_MMC) 2813 host->tuning_step = PAD_DELAY_FULL; 2814 else 2815 host->tuning_step = PAD_DELAY_HALF; 2816 } 2817 2818 if (of_property_read_bool(pdev->dev.of_node, 2819 "supports-cqe")) 2820 host->cqhci = true; 2821 else 2822 host->cqhci = false; 2823 } 2824 2825 static int msdc_of_clock_parse(struct platform_device *pdev, 2826 struct msdc_host *host) 2827 { 2828 int ret; 2829 2830 host->src_clk = devm_clk_get(&pdev->dev, "source"); 2831 if (IS_ERR(host->src_clk)) 2832 return PTR_ERR(host->src_clk); 2833 2834 host->h_clk = devm_clk_get(&pdev->dev, "hclk"); 2835 if (IS_ERR(host->h_clk)) 2836 return PTR_ERR(host->h_clk); 2837 2838 host->bus_clk = devm_clk_get_optional(&pdev->dev, "bus_clk"); 2839 if (IS_ERR(host->bus_clk)) 2840 host->bus_clk = NULL; 2841 2842 /*source clock control gate is optional clock*/ 2843 host->src_clk_cg = devm_clk_get_optional(&pdev->dev, "source_cg"); 2844 if (IS_ERR(host->src_clk_cg)) 2845 return PTR_ERR(host->src_clk_cg); 2846 2847 /* 2848 * Fallback for legacy device-trees: src_clk and HCLK use the same 2849 * bit to control gating but they are parented to a different mux, 2850 * hence if our intention is to gate only the source, required 2851 * during a clk mode switch to avoid hw hangs, we need to gate 2852 * its parent (specified as a different clock only on new DTs). 2853 */ 2854 if (!host->src_clk_cg) { 2855 host->src_clk_cg = clk_get_parent(host->src_clk); 2856 if (IS_ERR(host->src_clk_cg)) 2857 return PTR_ERR(host->src_clk_cg); 2858 } 2859 2860 /* If present, always enable for this clock gate */ 2861 host->sys_clk_cg = devm_clk_get_optional_enabled(&pdev->dev, "sys_cg"); 2862 if (IS_ERR(host->sys_clk_cg)) 2863 host->sys_clk_cg = NULL; 2864 2865 host->bulk_clks[0].id = "pclk_cg"; 2866 host->bulk_clks[1].id = "axi_cg"; 2867 host->bulk_clks[2].id = "ahb_cg"; 2868 ret = devm_clk_bulk_get_optional(&pdev->dev, MSDC_NR_CLOCKS, 2869 host->bulk_clks); 2870 if (ret) { 2871 dev_err(&pdev->dev, "Cannot get pclk/axi/ahb clock gates\n"); 2872 return ret; 2873 } 2874 2875 return 0; 2876 } 2877 2878 static int msdc_drv_probe(struct platform_device *pdev) 2879 { 2880 struct mmc_host *mmc; 2881 struct msdc_host *host; 2882 int ret; 2883 2884 if (!pdev->dev.of_node) { 2885 dev_err(&pdev->dev, "No DT found\n"); 2886 return -EINVAL; 2887 } 2888 2889 /* Allocate MMC host for this device */ 2890 mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(struct msdc_host)); 2891 if (!mmc) 2892 return -ENOMEM; 2893 2894 host = mmc_priv(mmc); 2895 ret = mmc_of_parse(mmc); 2896 if (ret) 2897 return ret; 2898 2899 host->base = devm_platform_ioremap_resource(pdev, 0); 2900 if (IS_ERR(host->base)) 2901 return PTR_ERR(host->base); 2902 2903 host->dev_comp = of_device_get_match_data(&pdev->dev); 2904 2905 if (host->dev_comp->needs_top_base) { 2906 host->top_base = devm_platform_ioremap_resource(pdev, 1); 2907 if (IS_ERR(host->top_base)) 2908 return PTR_ERR(host->top_base); 2909 } 2910 2911 ret = mmc_regulator_get_supply(mmc); 2912 if (ret) 2913 return ret; 2914 2915 ret = msdc_of_clock_parse(pdev, host); 2916 if (ret) 2917 return ret; 2918 2919 host->reset = devm_reset_control_get_optional_exclusive(&pdev->dev, 2920 "hrst"); 2921 if (IS_ERR(host->reset)) 2922 return PTR_ERR(host->reset); 2923 2924 /* only eMMC has crypto property */ 2925 if (!(mmc->caps2 & MMC_CAP2_NO_MMC)) { 2926 host->crypto_clk = devm_clk_get_optional(&pdev->dev, "crypto"); 2927 if (IS_ERR(host->crypto_clk)) 2928 return PTR_ERR(host->crypto_clk); 2929 else if (host->crypto_clk) 2930 mmc->caps2 |= MMC_CAP2_CRYPTO; 2931 } 2932 2933 host->irq = platform_get_irq(pdev, 0); 2934 if (host->irq < 0) 2935 return host->irq; 2936 2937 host->pinctrl = devm_pinctrl_get(&pdev->dev); 2938 if (IS_ERR(host->pinctrl)) 2939 return dev_err_probe(&pdev->dev, PTR_ERR(host->pinctrl), 2940 "Cannot find pinctrl"); 2941 2942 host->pins_default = pinctrl_lookup_state(host->pinctrl, "default"); 2943 if (IS_ERR(host->pins_default)) { 2944 dev_err(&pdev->dev, "Cannot find pinctrl default!\n"); 2945 return PTR_ERR(host->pins_default); 2946 } 2947 2948 host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs"); 2949 if (IS_ERR(host->pins_uhs)) { 2950 dev_err(&pdev->dev, "Cannot find pinctrl uhs!\n"); 2951 return PTR_ERR(host->pins_uhs); 2952 } 2953 2954 /* Support for SDIO eint irq ? */ 2955 if ((mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ) && (mmc->pm_caps & MMC_PM_KEEP_POWER)) { 2956 host->eint_irq = platform_get_irq_byname_optional(pdev, "sdio_wakeup"); 2957 if (host->eint_irq > 0) { 2958 host->pins_eint = pinctrl_lookup_state(host->pinctrl, "state_eint"); 2959 if (IS_ERR(host->pins_eint)) { 2960 dev_err(&pdev->dev, "Cannot find pinctrl eint!\n"); 2961 host->pins_eint = NULL; 2962 } else { 2963 device_init_wakeup(&pdev->dev, true); 2964 } 2965 } 2966 } 2967 2968 msdc_of_property_parse(pdev, host); 2969 2970 host->dev = &pdev->dev; 2971 host->src_clk_freq = clk_get_rate(host->src_clk); 2972 /* Set host parameters to mmc */ 2973 mmc->ops = &mt_msdc_ops; 2974 if (host->dev_comp->clk_div_bits == 8) 2975 mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255); 2976 else 2977 mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 4095); 2978 2979 if (!(mmc->caps & MMC_CAP_NONREMOVABLE) && 2980 !mmc_can_gpio_cd(mmc) && 2981 host->dev_comp->use_internal_cd) { 2982 /* 2983 * Is removable but no GPIO declared, so 2984 * use internal functionality. 2985 */ 2986 host->internal_cd = true; 2987 } 2988 2989 if (mmc->caps & MMC_CAP_SDIO_IRQ) 2990 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 2991 2992 mmc->caps |= MMC_CAP_CMD23; 2993 if (host->cqhci) 2994 mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; 2995 /* MMC core transfer sizes tunable parameters */ 2996 mmc->max_segs = MAX_BD_NUM; 2997 if (host->dev_comp->support_64g) 2998 mmc->max_seg_size = BDMA_DESC_BUFLEN_EXT; 2999 else 3000 mmc->max_seg_size = BDMA_DESC_BUFLEN; 3001 mmc->max_blk_size = 2048; 3002 mmc->max_req_size = 512 * 1024; 3003 mmc->max_blk_count = mmc->max_req_size / 512; 3004 if (host->dev_comp->support_64g) 3005 host->dma_mask = DMA_BIT_MASK(36); 3006 else 3007 host->dma_mask = DMA_BIT_MASK(32); 3008 mmc_dev(mmc)->dma_mask = &host->dma_mask; 3009 3010 host->timeout_clks = 3 * 1048576; 3011 host->dma.gpd = dma_alloc_coherent(&pdev->dev, 3012 2 * sizeof(struct mt_gpdma_desc), 3013 &host->dma.gpd_addr, GFP_KERNEL); 3014 host->dma.bd = dma_alloc_coherent(&pdev->dev, 3015 MAX_BD_NUM * sizeof(struct mt_bdma_desc), 3016 &host->dma.bd_addr, GFP_KERNEL); 3017 if (!host->dma.gpd || !host->dma.bd) { 3018 ret = -ENOMEM; 3019 goto release_mem; 3020 } 3021 msdc_init_gpd_bd(host, &host->dma); 3022 INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout); 3023 spin_lock_init(&host->lock); 3024 3025 platform_set_drvdata(pdev, mmc); 3026 ret = msdc_ungate_clock(host); 3027 if (ret) { 3028 dev_err(&pdev->dev, "Cannot ungate clocks!\n"); 3029 goto release_clk; 3030 } 3031 msdc_init_hw(host); 3032 3033 if (mmc->caps2 & MMC_CAP2_CQE) { 3034 host->cq_host = devm_kzalloc(mmc->parent, 3035 sizeof(*host->cq_host), 3036 GFP_KERNEL); 3037 if (!host->cq_host) { 3038 ret = -ENOMEM; 3039 goto release; 3040 } 3041 host->cq_host->caps |= CQHCI_TASK_DESC_SZ_128; 3042 host->cq_host->mmio = host->base + 0x800; 3043 host->cq_host->ops = &msdc_cmdq_ops; 3044 ret = cqhci_init(host->cq_host, mmc, true); 3045 if (ret) 3046 goto release; 3047 mmc->max_segs = 128; 3048 /* cqhci 16bit length */ 3049 /* 0 size, means 65536 so we don't have to -1 here */ 3050 mmc->max_seg_size = 64 * 1024; 3051 /* Reduce CIT to 0x40 that corresponds to 2.35us */ 3052 msdc_cqe_cit_cal(host, 2350); 3053 } else if (mmc->caps2 & MMC_CAP2_NO_SDIO) { 3054 /* Use HSQ on eMMC/SD (but not on SDIO) if HW CQE not supported */ 3055 struct mmc_hsq *hsq = devm_kzalloc(&pdev->dev, sizeof(*hsq), GFP_KERNEL); 3056 if (!hsq) { 3057 ret = -ENOMEM; 3058 goto release; 3059 } 3060 3061 ret = mmc_hsq_init(hsq, mmc); 3062 if (ret) 3063 goto release; 3064 3065 host->hsq_en = true; 3066 } 3067 3068 ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq, 3069 IRQF_TRIGGER_NONE, pdev->name, host); 3070 if (ret) 3071 goto release; 3072 3073 pm_runtime_set_active(host->dev); 3074 pm_runtime_set_autosuspend_delay(host->dev, MTK_MMC_AUTOSUSPEND_DELAY); 3075 pm_runtime_use_autosuspend(host->dev); 3076 pm_runtime_enable(host->dev); 3077 ret = mmc_add_host(mmc); 3078 3079 if (ret) 3080 goto end; 3081 3082 return 0; 3083 end: 3084 pm_runtime_disable(host->dev); 3085 release: 3086 msdc_deinit_hw(host); 3087 release_clk: 3088 msdc_gate_clock(host); 3089 platform_set_drvdata(pdev, NULL); 3090 release_mem: 3091 device_init_wakeup(&pdev->dev, false); 3092 if (host->dma.gpd) 3093 dma_free_coherent(&pdev->dev, 3094 2 * sizeof(struct mt_gpdma_desc), 3095 host->dma.gpd, host->dma.gpd_addr); 3096 if (host->dma.bd) 3097 dma_free_coherent(&pdev->dev, 3098 MAX_BD_NUM * sizeof(struct mt_bdma_desc), 3099 host->dma.bd, host->dma.bd_addr); 3100 return ret; 3101 } 3102 3103 static void msdc_drv_remove(struct platform_device *pdev) 3104 { 3105 struct mmc_host *mmc; 3106 struct msdc_host *host; 3107 3108 mmc = platform_get_drvdata(pdev); 3109 host = mmc_priv(mmc); 3110 3111 pm_runtime_get_sync(host->dev); 3112 3113 platform_set_drvdata(pdev, NULL); 3114 mmc_remove_host(mmc); 3115 msdc_deinit_hw(host); 3116 msdc_gate_clock(host); 3117 3118 pm_runtime_disable(host->dev); 3119 pm_runtime_put_noidle(host->dev); 3120 dma_free_coherent(&pdev->dev, 3121 2 * sizeof(struct mt_gpdma_desc), 3122 host->dma.gpd, host->dma.gpd_addr); 3123 dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct mt_bdma_desc), 3124 host->dma.bd, host->dma.bd_addr); 3125 device_init_wakeup(&pdev->dev, false); 3126 } 3127 3128 static void msdc_save_reg(struct msdc_host *host) 3129 { 3130 u32 tune_reg = host->dev_comp->pad_tune_reg; 3131 3132 host->save_para.msdc_cfg = readl(host->base + MSDC_CFG); 3133 host->save_para.iocon = readl(host->base + MSDC_IOCON); 3134 host->save_para.sdc_cfg = readl(host->base + SDC_CFG); 3135 host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT); 3136 host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1); 3137 host->save_para.patch_bit2 = readl(host->base + MSDC_PATCH_BIT2); 3138 host->save_para.pad_ds_tune = readl(host->base + PAD_DS_TUNE); 3139 host->save_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE); 3140 host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0); 3141 host->save_para.emmc50_cfg3 = readl(host->base + EMMC50_CFG3); 3142 host->save_para.sdc_fifo_cfg = readl(host->base + SDC_FIFO_CFG); 3143 if (host->top_base) { 3144 host->save_para.emmc_top_control = 3145 readl(host->top_base + EMMC_TOP_CONTROL); 3146 host->save_para.emmc_top_cmd = 3147 readl(host->top_base + EMMC_TOP_CMD); 3148 host->save_para.emmc50_pad_ds_tune = 3149 readl(host->top_base + EMMC50_PAD_DS_TUNE); 3150 host->save_para.loop_test_control = 3151 readl(host->top_base + LOOP_TEST_CONTROL); 3152 } else { 3153 host->save_para.pad_tune = readl(host->base + tune_reg); 3154 } 3155 } 3156 3157 static void msdc_restore_reg(struct msdc_host *host) 3158 { 3159 struct mmc_host *mmc = mmc_from_priv(host); 3160 u32 tune_reg = host->dev_comp->pad_tune_reg; 3161 3162 if (host->dev_comp->support_new_tx) { 3163 sdr_clr_bits(host->base + SDC_ADV_CFG0, SDC_NEW_TX_EN); 3164 sdr_set_bits(host->base + SDC_ADV_CFG0, SDC_NEW_TX_EN); 3165 } 3166 if (host->dev_comp->support_new_rx) { 3167 sdr_clr_bits(host->base + MSDC_NEW_RX_CFG, MSDC_NEW_RX_PATH_SEL); 3168 sdr_set_bits(host->base + MSDC_NEW_RX_CFG, MSDC_NEW_RX_PATH_SEL); 3169 } 3170 3171 writel(host->save_para.msdc_cfg, host->base + MSDC_CFG); 3172 writel(host->save_para.iocon, host->base + MSDC_IOCON); 3173 writel(host->save_para.sdc_cfg, host->base + SDC_CFG); 3174 writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT); 3175 writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1); 3176 writel(host->save_para.patch_bit2, host->base + MSDC_PATCH_BIT2); 3177 writel(host->save_para.pad_ds_tune, host->base + PAD_DS_TUNE); 3178 writel(host->save_para.pad_cmd_tune, host->base + PAD_CMD_TUNE); 3179 writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0); 3180 writel(host->save_para.emmc50_cfg3, host->base + EMMC50_CFG3); 3181 writel(host->save_para.sdc_fifo_cfg, host->base + SDC_FIFO_CFG); 3182 if (host->top_base) { 3183 writel(host->save_para.emmc_top_control, 3184 host->top_base + EMMC_TOP_CONTROL); 3185 writel(host->save_para.emmc_top_cmd, 3186 host->top_base + EMMC_TOP_CMD); 3187 writel(host->save_para.emmc50_pad_ds_tune, 3188 host->top_base + EMMC50_PAD_DS_TUNE); 3189 writel(host->save_para.loop_test_control, 3190 host->top_base + LOOP_TEST_CONTROL); 3191 } else { 3192 writel(host->save_para.pad_tune, host->base + tune_reg); 3193 } 3194 3195 if (sdio_irq_claimed(mmc)) 3196 __msdc_enable_sdio_irq(host, 1); 3197 } 3198 3199 static int __maybe_unused msdc_runtime_suspend(struct device *dev) 3200 { 3201 struct mmc_host *mmc = dev_get_drvdata(dev); 3202 struct msdc_host *host = mmc_priv(mmc); 3203 3204 if (host->hsq_en) 3205 mmc_hsq_suspend(mmc); 3206 3207 msdc_save_reg(host); 3208 3209 if (sdio_irq_claimed(mmc)) { 3210 if (host->pins_eint) { 3211 disable_irq(host->irq); 3212 pinctrl_select_state(host->pinctrl, host->pins_eint); 3213 } 3214 3215 __msdc_enable_sdio_irq(host, 0); 3216 } 3217 msdc_gate_clock(host); 3218 return 0; 3219 } 3220 3221 static int __maybe_unused msdc_runtime_resume(struct device *dev) 3222 { 3223 struct mmc_host *mmc = dev_get_drvdata(dev); 3224 struct msdc_host *host = mmc_priv(mmc); 3225 int ret; 3226 3227 ret = msdc_ungate_clock(host); 3228 if (ret) 3229 return ret; 3230 3231 msdc_restore_reg(host); 3232 3233 if (sdio_irq_claimed(mmc) && host->pins_eint) { 3234 pinctrl_select_state(host->pinctrl, host->pins_uhs); 3235 enable_irq(host->irq); 3236 } 3237 3238 if (host->hsq_en) 3239 mmc_hsq_resume(mmc); 3240 3241 return 0; 3242 } 3243 3244 static int __maybe_unused msdc_suspend(struct device *dev) 3245 { 3246 struct mmc_host *mmc = dev_get_drvdata(dev); 3247 struct msdc_host *host = mmc_priv(mmc); 3248 int ret; 3249 u32 val; 3250 3251 if (mmc->caps2 & MMC_CAP2_CQE) { 3252 ret = cqhci_suspend(mmc); 3253 if (ret) 3254 return ret; 3255 val = readl(host->base + MSDC_INT); 3256 writel(val, host->base + MSDC_INT); 3257 } 3258 3259 /* 3260 * Bump up runtime PM usage counter otherwise dev->power.needs_force_resume will 3261 * not be marked as 1, pm_runtime_force_resume() will go out directly. 3262 */ 3263 if (sdio_irq_claimed(mmc) && host->pins_eint) 3264 pm_runtime_get_noresume(dev); 3265 3266 return pm_runtime_force_suspend(dev); 3267 } 3268 3269 static int __maybe_unused msdc_resume(struct device *dev) 3270 { 3271 struct mmc_host *mmc = dev_get_drvdata(dev); 3272 struct msdc_host *host = mmc_priv(mmc); 3273 3274 if (sdio_irq_claimed(mmc) && host->pins_eint) 3275 pm_runtime_put_noidle(dev); 3276 3277 return pm_runtime_force_resume(dev); 3278 } 3279 3280 static const struct dev_pm_ops msdc_dev_pm_ops = { 3281 SET_SYSTEM_SLEEP_PM_OPS(msdc_suspend, msdc_resume) 3282 SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL) 3283 }; 3284 3285 static struct platform_driver mt_msdc_driver = { 3286 .probe = msdc_drv_probe, 3287 .remove = msdc_drv_remove, 3288 .driver = { 3289 .name = "mtk-msdc", 3290 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 3291 .of_match_table = msdc_of_ids, 3292 .pm = &msdc_dev_pm_ops, 3293 }, 3294 }; 3295 3296 module_platform_driver(mt_msdc_driver); 3297 MODULE_LICENSE("GPL v2"); 3298 MODULE_DESCRIPTION("MediaTek SD/MMC Card Driver"); 3299