1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2014-2015, 2022 MediaTek Inc. 4 * Author: Chaotian.Jing <chaotian.jing@mediatek.com> 5 */ 6 7 #include <linux/module.h> 8 #include <linux/bitops.h> 9 #include <linux/clk.h> 10 #include <linux/delay.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/iopoll.h> 13 #include <linux/ioport.h> 14 #include <linux/irq.h> 15 #include <linux/of.h> 16 #include <linux/pinctrl/consumer.h> 17 #include <linux/platform_device.h> 18 #include <linux/pm.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/pm_wakeirq.h> 21 #include <linux/regulator/consumer.h> 22 #include <linux/slab.h> 23 #include <linux/spinlock.h> 24 #include <linux/interrupt.h> 25 #include <linux/reset.h> 26 27 #include <linux/mmc/card.h> 28 #include <linux/mmc/core.h> 29 #include <linux/mmc/host.h> 30 #include <linux/mmc/mmc.h> 31 #include <linux/mmc/sd.h> 32 #include <linux/mmc/sdio.h> 33 #include <linux/mmc/slot-gpio.h> 34 35 #include "cqhci.h" 36 #include "mmc_hsq.h" 37 38 #define MAX_BD_NUM 1024 39 #define MSDC_NR_CLOCKS 3 40 41 /*--------------------------------------------------------------------------*/ 42 /* Common Definition */ 43 /*--------------------------------------------------------------------------*/ 44 #define MSDC_BUS_1BITS 0x0 45 #define MSDC_BUS_4BITS 0x1 46 #define MSDC_BUS_8BITS 0x2 47 48 #define MSDC_BURST_64B 0x6 49 50 /*--------------------------------------------------------------------------*/ 51 /* Register Offset */ 52 /*--------------------------------------------------------------------------*/ 53 #define MSDC_CFG 0x0 54 #define MSDC_IOCON 0x04 55 #define MSDC_PS 0x08 56 #define MSDC_INT 0x0c 57 #define MSDC_INTEN 0x10 58 #define MSDC_FIFOCS 0x14 59 #define SDC_CFG 0x30 60 #define SDC_CMD 0x34 61 #define SDC_ARG 0x38 62 #define SDC_STS 0x3c 63 #define SDC_RESP0 0x40 64 #define SDC_RESP1 0x44 65 #define SDC_RESP2 0x48 66 #define SDC_RESP3 0x4c 67 #define SDC_BLK_NUM 0x50 68 #define SDC_ADV_CFG0 0x64 69 #define MSDC_NEW_RX_CFG 0x68 70 #define EMMC_IOCON 0x7c 71 #define SDC_ACMD_RESP 0x80 72 #define DMA_SA_H4BIT 0x8c 73 #define MSDC_DMA_SA 0x90 74 #define MSDC_DMA_CTRL 0x98 75 #define MSDC_DMA_CFG 0x9c 76 #define MSDC_PATCH_BIT 0xb0 77 #define MSDC_PATCH_BIT1 0xb4 78 #define MSDC_PATCH_BIT2 0xb8 79 #define MSDC_PAD_TUNE 0xec 80 #define MSDC_PAD_TUNE0 0xf0 81 #define PAD_DS_TUNE 0x188 82 #define PAD_CMD_TUNE 0x18c 83 #define EMMC51_CFG0 0x204 84 #define EMMC50_CFG0 0x208 85 #define EMMC50_CFG1 0x20c 86 #define EMMC50_CFG3 0x220 87 #define SDC_FIFO_CFG 0x228 88 #define CQHCI_SETTING 0x7fc 89 90 /*--------------------------------------------------------------------------*/ 91 /* Top Pad Register Offset */ 92 /*--------------------------------------------------------------------------*/ 93 #define EMMC_TOP_CONTROL 0x00 94 #define EMMC_TOP_CMD 0x04 95 #define EMMC50_PAD_DS_TUNE 0x0c 96 #define LOOP_TEST_CONTROL 0x30 97 98 /*--------------------------------------------------------------------------*/ 99 /* Register Mask */ 100 /*--------------------------------------------------------------------------*/ 101 102 /* MSDC_CFG mask */ 103 #define MSDC_CFG_MODE BIT(0) /* RW */ 104 #define MSDC_CFG_CKPDN BIT(1) /* RW */ 105 #define MSDC_CFG_RST BIT(2) /* RW */ 106 #define MSDC_CFG_PIO BIT(3) /* RW */ 107 #define MSDC_CFG_CKDRVEN BIT(4) /* RW */ 108 #define MSDC_CFG_BV18SDT BIT(5) /* RW */ 109 #define MSDC_CFG_BV18PSS BIT(6) /* R */ 110 #define MSDC_CFG_CKSTB BIT(7) /* R */ 111 #define MSDC_CFG_CKDIV GENMASK(15, 8) /* RW */ 112 #define MSDC_CFG_CKMOD GENMASK(17, 16) /* RW */ 113 #define MSDC_CFG_HS400_CK_MODE BIT(18) /* RW */ 114 #define MSDC_CFG_HS400_CK_MODE_EXTRA BIT(22) /* RW */ 115 #define MSDC_CFG_CKDIV_EXTRA GENMASK(19, 8) /* RW */ 116 #define MSDC_CFG_CKMOD_EXTRA GENMASK(21, 20) /* RW */ 117 118 /* MSDC_IOCON mask */ 119 #define MSDC_IOCON_SDR104CKS BIT(0) /* RW */ 120 #define MSDC_IOCON_RSPL BIT(1) /* RW */ 121 #define MSDC_IOCON_DSPL BIT(2) /* RW */ 122 #define MSDC_IOCON_DDLSEL BIT(3) /* RW */ 123 #define MSDC_IOCON_DDR50CKD BIT(4) /* RW */ 124 #define MSDC_IOCON_DSPLSEL BIT(5) /* RW */ 125 #define MSDC_IOCON_W_DSPL BIT(8) /* RW */ 126 #define MSDC_IOCON_D0SPL BIT(16) /* RW */ 127 #define MSDC_IOCON_D1SPL BIT(17) /* RW */ 128 #define MSDC_IOCON_D2SPL BIT(18) /* RW */ 129 #define MSDC_IOCON_D3SPL BIT(19) /* RW */ 130 #define MSDC_IOCON_D4SPL BIT(20) /* RW */ 131 #define MSDC_IOCON_D5SPL BIT(21) /* RW */ 132 #define MSDC_IOCON_D6SPL BIT(22) /* RW */ 133 #define MSDC_IOCON_D7SPL BIT(23) /* RW */ 134 #define MSDC_IOCON_RISCSZ GENMASK(25, 24) /* RW */ 135 136 /* MSDC_PS mask */ 137 #define MSDC_PS_CDEN BIT(0) /* RW */ 138 #define MSDC_PS_CDSTS BIT(1) /* R */ 139 #define MSDC_PS_CDDEBOUNCE GENMASK(15, 12) /* RW */ 140 #define MSDC_PS_DAT GENMASK(23, 16) /* R */ 141 #define MSDC_PS_DATA1 BIT(17) /* R */ 142 #define MSDC_PS_CMD BIT(24) /* R */ 143 #define MSDC_PS_WP BIT(31) /* R */ 144 145 /* MSDC_INT mask */ 146 #define MSDC_INT_MMCIRQ BIT(0) /* W1C */ 147 #define MSDC_INT_CDSC BIT(1) /* W1C */ 148 #define MSDC_INT_ACMDRDY BIT(3) /* W1C */ 149 #define MSDC_INT_ACMDTMO BIT(4) /* W1C */ 150 #define MSDC_INT_ACMDCRCERR BIT(5) /* W1C */ 151 #define MSDC_INT_DMAQ_EMPTY BIT(6) /* W1C */ 152 #define MSDC_INT_SDIOIRQ BIT(7) /* W1C */ 153 #define MSDC_INT_CMDRDY BIT(8) /* W1C */ 154 #define MSDC_INT_CMDTMO BIT(9) /* W1C */ 155 #define MSDC_INT_RSPCRCERR BIT(10) /* W1C */ 156 #define MSDC_INT_CSTA BIT(11) /* R */ 157 #define MSDC_INT_XFER_COMPL BIT(12) /* W1C */ 158 #define MSDC_INT_DXFER_DONE BIT(13) /* W1C */ 159 #define MSDC_INT_DATTMO BIT(14) /* W1C */ 160 #define MSDC_INT_DATCRCERR BIT(15) /* W1C */ 161 #define MSDC_INT_ACMD19_DONE BIT(16) /* W1C */ 162 #define MSDC_INT_DMA_BDCSERR BIT(17) /* W1C */ 163 #define MSDC_INT_DMA_GPDCSERR BIT(18) /* W1C */ 164 #define MSDC_INT_DMA_PROTECT BIT(19) /* W1C */ 165 #define MSDC_INT_CMDQ BIT(28) /* W1C */ 166 167 /* MSDC_INTEN mask */ 168 #define MSDC_INTEN_MMCIRQ BIT(0) /* RW */ 169 #define MSDC_INTEN_CDSC BIT(1) /* RW */ 170 #define MSDC_INTEN_ACMDRDY BIT(3) /* RW */ 171 #define MSDC_INTEN_ACMDTMO BIT(4) /* RW */ 172 #define MSDC_INTEN_ACMDCRCERR BIT(5) /* RW */ 173 #define MSDC_INTEN_DMAQ_EMPTY BIT(6) /* RW */ 174 #define MSDC_INTEN_SDIOIRQ BIT(7) /* RW */ 175 #define MSDC_INTEN_CMDRDY BIT(8) /* RW */ 176 #define MSDC_INTEN_CMDTMO BIT(9) /* RW */ 177 #define MSDC_INTEN_RSPCRCERR BIT(10) /* RW */ 178 #define MSDC_INTEN_CSTA BIT(11) /* RW */ 179 #define MSDC_INTEN_XFER_COMPL BIT(12) /* RW */ 180 #define MSDC_INTEN_DXFER_DONE BIT(13) /* RW */ 181 #define MSDC_INTEN_DATTMO BIT(14) /* RW */ 182 #define MSDC_INTEN_DATCRCERR BIT(15) /* RW */ 183 #define MSDC_INTEN_ACMD19_DONE BIT(16) /* RW */ 184 #define MSDC_INTEN_DMA_BDCSERR BIT(17) /* RW */ 185 #define MSDC_INTEN_DMA_GPDCSERR BIT(18) /* RW */ 186 #define MSDC_INTEN_DMA_PROTECT BIT(19) /* RW */ 187 188 /* MSDC_FIFOCS mask */ 189 #define MSDC_FIFOCS_RXCNT GENMASK(7, 0) /* R */ 190 #define MSDC_FIFOCS_TXCNT GENMASK(23, 16) /* R */ 191 #define MSDC_FIFOCS_CLR BIT(31) /* RW */ 192 193 /* SDC_CFG mask */ 194 #define SDC_CFG_SDIOINTWKUP BIT(0) /* RW */ 195 #define SDC_CFG_INSWKUP BIT(1) /* RW */ 196 #define SDC_CFG_WRDTOC GENMASK(14, 2) /* RW */ 197 #define SDC_CFG_BUSWIDTH GENMASK(17, 16) /* RW */ 198 #define SDC_CFG_SDIO BIT(19) /* RW */ 199 #define SDC_CFG_SDIOIDE BIT(20) /* RW */ 200 #define SDC_CFG_INTATGAP BIT(21) /* RW */ 201 #define SDC_CFG_DTOC GENMASK(31, 24) /* RW */ 202 203 /* SDC_STS mask */ 204 #define SDC_STS_SDCBUSY BIT(0) /* RW */ 205 #define SDC_STS_CMDBUSY BIT(1) /* RW */ 206 #define SDC_STS_SWR_COMPL BIT(31) /* RW */ 207 208 /* SDC_ADV_CFG0 mask */ 209 #define SDC_DAT1_IRQ_TRIGGER BIT(19) /* RW */ 210 #define SDC_RX_ENHANCE_EN BIT(20) /* RW */ 211 #define SDC_NEW_TX_EN BIT(31) /* RW */ 212 213 /* MSDC_NEW_RX_CFG mask */ 214 #define MSDC_NEW_RX_PATH_SEL BIT(0) /* RW */ 215 216 /* DMA_SA_H4BIT mask */ 217 #define DMA_ADDR_HIGH_4BIT GENMASK(3, 0) /* RW */ 218 219 /* MSDC_DMA_CTRL mask */ 220 #define MSDC_DMA_CTRL_START BIT(0) /* W */ 221 #define MSDC_DMA_CTRL_STOP BIT(1) /* W */ 222 #define MSDC_DMA_CTRL_RESUME BIT(2) /* W */ 223 #define MSDC_DMA_CTRL_MODE BIT(8) /* RW */ 224 #define MSDC_DMA_CTRL_LASTBUF BIT(10) /* RW */ 225 #define MSDC_DMA_CTRL_BRUSTSZ GENMASK(14, 12) /* RW */ 226 227 /* MSDC_DMA_CFG mask */ 228 #define MSDC_DMA_CFG_STS BIT(0) /* R */ 229 #define MSDC_DMA_CFG_DECSEN BIT(1) /* RW */ 230 #define MSDC_DMA_CFG_AHBHPROT2 BIT(9) /* RW */ 231 #define MSDC_DMA_CFG_ACTIVEEN BIT(13) /* RW */ 232 #define MSDC_DMA_CFG_CS12B16B BIT(16) /* RW */ 233 234 /* MSDC_PATCH_BIT mask */ 235 #define MSDC_PATCH_BIT_ODDSUPP BIT(1) /* RW */ 236 #define MSDC_PATCH_BIT_RD_DAT_SEL BIT(3) /* RW */ 237 #define MSDC_INT_DAT_LATCH_CK_SEL GENMASK(9, 7) 238 #define MSDC_CKGEN_MSDC_DLY_SEL GENMASK(14, 10) 239 #define MSDC_PATCH_BIT_IODSSEL BIT(16) /* RW */ 240 #define MSDC_PATCH_BIT_IOINTSEL BIT(17) /* RW */ 241 #define MSDC_PATCH_BIT_BUSYDLY GENMASK(21, 18) /* RW */ 242 #define MSDC_PATCH_BIT_WDOD GENMASK(25, 22) /* RW */ 243 #define MSDC_PATCH_BIT_IDRTSEL BIT(26) /* RW */ 244 #define MSDC_PATCH_BIT_CMDFSEL BIT(27) /* RW */ 245 #define MSDC_PATCH_BIT_INTDLSEL BIT(28) /* RW */ 246 #define MSDC_PATCH_BIT_SPCPUSH BIT(29) /* RW */ 247 #define MSDC_PATCH_BIT_DECRCTMO BIT(30) /* RW */ 248 249 #define MSDC_PATCH_BIT1_CMDTA GENMASK(5, 3) /* RW */ 250 #define MSDC_PB1_BUSY_CHECK_SEL BIT(7) /* RW */ 251 #define MSDC_PATCH_BIT1_STOP_DLY GENMASK(11, 8) /* RW */ 252 253 #define MSDC_PATCH_BIT2_CFGRESP BIT(15) /* RW */ 254 #define MSDC_PATCH_BIT2_CFGCRCSTS BIT(28) /* RW */ 255 #define MSDC_PB2_SUPPORT_64G BIT(1) /* RW */ 256 #define MSDC_PB2_RESPWAIT GENMASK(3, 2) /* RW */ 257 #define MSDC_PB2_RESPSTSENSEL GENMASK(18, 16) /* RW */ 258 #define MSDC_PB2_POP_EN_CNT GENMASK(23, 20) /* RW */ 259 #define MSDC_PB2_CFGCRCSTSEDGE BIT(25) /* RW */ 260 #define MSDC_PB2_CRCSTSENSEL GENMASK(31, 29) /* RW */ 261 262 #define MSDC_PAD_TUNE_DATWRDLY GENMASK(4, 0) /* RW */ 263 #define MSDC_PAD_TUNE_DATRRDLY GENMASK(12, 8) /* RW */ 264 #define MSDC_PAD_TUNE_DATRRDLY2 GENMASK(12, 8) /* RW */ 265 #define MSDC_PAD_TUNE_CMDRDLY GENMASK(20, 16) /* RW */ 266 #define MSDC_PAD_TUNE_CMDRDLY2 GENMASK(20, 16) /* RW */ 267 #define MSDC_PAD_TUNE_CMDRRDLY GENMASK(26, 22) /* RW */ 268 #define MSDC_PAD_TUNE_CLKTDLY GENMASK(31, 27) /* RW */ 269 #define MSDC_PAD_TUNE_RXDLYSEL BIT(15) /* RW */ 270 #define MSDC_PAD_TUNE_RD_SEL BIT(13) /* RW */ 271 #define MSDC_PAD_TUNE_CMD_SEL BIT(21) /* RW */ 272 #define MSDC_PAD_TUNE_RD2_SEL BIT(13) /* RW */ 273 #define MSDC_PAD_TUNE_CMD2_SEL BIT(21) /* RW */ 274 275 #define PAD_DS_TUNE_DLY_SEL BIT(0) /* RW */ 276 #define PAD_DS_TUNE_DLY1 GENMASK(6, 2) /* RW */ 277 #define PAD_DS_TUNE_DLY2 GENMASK(11, 7) /* RW */ 278 #define PAD_DS_TUNE_DLY3 GENMASK(16, 12) /* RW */ 279 280 #define PAD_CMD_TUNE_RX_DLY3 GENMASK(5, 1) /* RW */ 281 282 /* EMMC51_CFG0 mask */ 283 #define CMDQ_RDAT_CNT GENMASK(21, 12) /* RW */ 284 285 #define EMMC50_CFG_PADCMD_LATCHCK BIT(0) /* RW */ 286 #define EMMC50_CFG_CRCSTS_EDGE BIT(3) /* RW */ 287 #define EMMC50_CFG_CFCSTS_SEL BIT(4) /* RW */ 288 #define EMMC50_CFG_CMD_RESP_SEL BIT(9) /* RW */ 289 290 /* EMMC50_CFG1 mask */ 291 #define EMMC50_CFG1_DS_CFG BIT(28) /* RW */ 292 293 #define EMMC50_CFG3_OUTS_WR GENMASK(4, 0) /* RW */ 294 295 #define SDC_FIFO_CFG_WRVALIDSEL BIT(24) /* RW */ 296 #define SDC_FIFO_CFG_RDVALIDSEL BIT(25) /* RW */ 297 298 /* CQHCI_SETTING */ 299 #define CQHCI_RD_CMD_WND_SEL BIT(14) /* RW */ 300 #define CQHCI_WR_CMD_WND_SEL BIT(15) /* RW */ 301 302 /* EMMC_TOP_CONTROL mask */ 303 #define PAD_RXDLY_SEL BIT(0) /* RW */ 304 #define DELAY_EN BIT(1) /* RW */ 305 #define PAD_DAT_RD_RXDLY2 GENMASK(6, 2) /* RW */ 306 #define PAD_DAT_RD_RXDLY GENMASK(11, 7) /* RW */ 307 #define PAD_DAT_RD_RXDLY2_SEL BIT(12) /* RW */ 308 #define PAD_DAT_RD_RXDLY_SEL BIT(13) /* RW */ 309 #define DATA_K_VALUE_SEL BIT(14) /* RW */ 310 #define SDC_RX_ENH_EN BIT(15) /* TW */ 311 312 /* EMMC_TOP_CMD mask */ 313 #define PAD_CMD_RXDLY2 GENMASK(4, 0) /* RW */ 314 #define PAD_CMD_RXDLY GENMASK(9, 5) /* RW */ 315 #define PAD_CMD_RD_RXDLY2_SEL BIT(10) /* RW */ 316 #define PAD_CMD_RD_RXDLY_SEL BIT(11) /* RW */ 317 #define PAD_CMD_TX_DLY GENMASK(16, 12) /* RW */ 318 319 /* EMMC50_PAD_DS_TUNE mask */ 320 #define PAD_DS_DLY_SEL BIT(16) /* RW */ 321 #define PAD_DS_DLY1 GENMASK(14, 10) /* RW */ 322 #define PAD_DS_DLY3 GENMASK(4, 0) /* RW */ 323 324 /* LOOP_TEST_CONTROL mask */ 325 #define TEST_LOOP_DSCLK_MUX_SEL BIT(0) /* RW */ 326 #define TEST_LOOP_LATCH_MUX_SEL BIT(1) /* RW */ 327 #define LOOP_EN_SEL_CLK BIT(20) /* RW */ 328 #define TEST_HS400_CMD_LOOP_MUX_SEL BIT(31) /* RW */ 329 330 #define REQ_CMD_EIO BIT(0) 331 #define REQ_CMD_TMO BIT(1) 332 #define REQ_DAT_ERR BIT(2) 333 #define REQ_STOP_EIO BIT(3) 334 #define REQ_STOP_TMO BIT(4) 335 #define REQ_CMD_BUSY BIT(5) 336 337 #define MSDC_PREPARE_FLAG BIT(0) 338 #define MSDC_ASYNC_FLAG BIT(1) 339 #define MSDC_MMAP_FLAG BIT(2) 340 341 #define MTK_MMC_AUTOSUSPEND_DELAY 50 342 #define CMD_TIMEOUT (HZ/10 * 5) /* 100ms x5 */ 343 #define DAT_TIMEOUT (HZ * 5) /* 1000ms x5 */ 344 345 #define DEFAULT_DEBOUNCE (8) /* 8 cycles CD debounce */ 346 347 #define TUNING_REG2_FIXED_OFFEST 4 348 #define PAD_DELAY_HALF 32 /* PAD delay cells */ 349 #define PAD_DELAY_FULL 64 350 /*--------------------------------------------------------------------------*/ 351 /* Descriptor Structure */ 352 /*--------------------------------------------------------------------------*/ 353 struct mt_gpdma_desc { 354 u32 gpd_info; 355 #define GPDMA_DESC_HWO BIT(0) 356 #define GPDMA_DESC_BDP BIT(1) 357 #define GPDMA_DESC_CHECKSUM GENMASK(15, 8) 358 #define GPDMA_DESC_INT BIT(16) 359 #define GPDMA_DESC_NEXT_H4 GENMASK(27, 24) 360 #define GPDMA_DESC_PTR_H4 GENMASK(31, 28) 361 u32 next; 362 u32 ptr; 363 u32 gpd_data_len; 364 #define GPDMA_DESC_BUFLEN GENMASK(15, 0) 365 #define GPDMA_DESC_EXTLEN GENMASK(23, 16) 366 u32 arg; 367 u32 blknum; 368 u32 cmd; 369 }; 370 371 struct mt_bdma_desc { 372 u32 bd_info; 373 #define BDMA_DESC_EOL BIT(0) 374 #define BDMA_DESC_CHECKSUM GENMASK(15, 8) 375 #define BDMA_DESC_BLKPAD BIT(17) 376 #define BDMA_DESC_DWPAD BIT(18) 377 #define BDMA_DESC_NEXT_H4 GENMASK(27, 24) 378 #define BDMA_DESC_PTR_H4 GENMASK(31, 28) 379 u32 next; 380 u32 ptr; 381 u32 bd_data_len; 382 #define BDMA_DESC_BUFLEN GENMASK(15, 0) 383 #define BDMA_DESC_BUFLEN_EXT GENMASK(23, 0) 384 }; 385 386 struct msdc_dma { 387 struct scatterlist *sg; /* I/O scatter list */ 388 struct mt_gpdma_desc *gpd; /* pointer to gpd array */ 389 struct mt_bdma_desc *bd; /* pointer to bd array */ 390 dma_addr_t gpd_addr; /* the physical address of gpd array */ 391 dma_addr_t bd_addr; /* the physical address of bd array */ 392 }; 393 394 struct msdc_save_para { 395 u32 msdc_cfg; 396 u32 iocon; 397 u32 sdc_cfg; 398 u32 pad_tune; 399 u32 patch_bit0; 400 u32 patch_bit1; 401 u32 patch_bit2; 402 u32 pad_ds_tune; 403 u32 pad_cmd_tune; 404 u32 emmc50_cfg0; 405 u32 emmc50_cfg3; 406 u32 sdc_fifo_cfg; 407 u32 emmc_top_control; 408 u32 emmc_top_cmd; 409 u32 emmc50_pad_ds_tune; 410 u32 loop_test_control; 411 }; 412 413 struct mtk_mmc_compatible { 414 u8 clk_div_bits; 415 bool recheck_sdio_irq; 416 bool hs400_tune; /* only used for MT8173 */ 417 bool needs_top_base; 418 u32 pad_tune_reg; 419 bool async_fifo; 420 bool data_tune; 421 bool busy_check; 422 bool stop_clk_fix; 423 u8 stop_dly_sel; 424 u8 pop_en_cnt; 425 bool enhance_rx; 426 bool support_64g; 427 bool use_internal_cd; 428 bool support_new_tx; 429 bool support_new_rx; 430 }; 431 432 struct msdc_tune_para { 433 u32 iocon; 434 u32 pad_tune; 435 u32 pad_cmd_tune; 436 u32 emmc_top_control; 437 u32 emmc_top_cmd; 438 }; 439 440 struct msdc_delay_phase { 441 u8 maxlen; 442 u8 start; 443 u8 final_phase; 444 }; 445 446 struct msdc_host { 447 struct device *dev; 448 const struct mtk_mmc_compatible *dev_comp; 449 int cmd_rsp; 450 451 spinlock_t lock; 452 struct mmc_request *mrq; 453 struct mmc_command *cmd; 454 struct mmc_data *data; 455 int error; 456 457 void __iomem *base; /* host base address */ 458 void __iomem *top_base; /* host top register base address */ 459 460 struct msdc_dma dma; /* dma channel */ 461 u64 dma_mask; 462 463 u32 timeout_ns; /* data timeout ns */ 464 u32 timeout_clks; /* data timeout clks */ 465 466 struct pinctrl *pinctrl; 467 struct pinctrl_state *pins_default; 468 struct pinctrl_state *pins_uhs; 469 struct pinctrl_state *pins_eint; 470 struct delayed_work req_timeout; 471 int irq; /* host interrupt */ 472 int eint_irq; /* interrupt from sdio device for waking up system */ 473 struct reset_control *reset; 474 475 struct clk *src_clk; /* msdc source clock */ 476 struct clk *h_clk; /* msdc h_clk */ 477 struct clk *bus_clk; /* bus clock which used to access register */ 478 struct clk *src_clk_cg; /* msdc source clock control gate */ 479 struct clk *sys_clk_cg; /* msdc subsys clock control gate */ 480 struct clk *crypto_clk; /* msdc crypto clock control gate */ 481 struct clk_bulk_data bulk_clks[MSDC_NR_CLOCKS]; 482 u32 mclk; /* mmc subsystem clock frequency */ 483 u32 src_clk_freq; /* source clock frequency */ 484 unsigned char timing; 485 bool vqmmc_enabled; 486 u32 latch_ck; 487 u32 hs400_ds_delay; 488 u32 hs400_ds_dly3; 489 u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */ 490 u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */ 491 u32 tuning_step; 492 bool hs400_cmd_resp_sel_rising; 493 /* cmd response sample selection for HS400 */ 494 bool hs400_mode; /* current eMMC will run at hs400 mode */ 495 bool hs400_tuning; /* hs400 mode online tuning */ 496 bool internal_cd; /* Use internal card-detect logic */ 497 bool cqhci; /* support eMMC hw cmdq */ 498 bool hsq_en; /* Host Software Queue is enabled */ 499 struct msdc_save_para save_para; /* used when gate HCLK */ 500 struct msdc_tune_para def_tune_para; /* default tune setting */ 501 struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */ 502 struct cqhci_host *cq_host; 503 u32 cq_ssc1_time; 504 }; 505 506 static const struct mtk_mmc_compatible mt2701_compat = { 507 .clk_div_bits = 12, 508 .recheck_sdio_irq = true, 509 .hs400_tune = false, 510 .pad_tune_reg = MSDC_PAD_TUNE0, 511 .async_fifo = true, 512 .data_tune = true, 513 .busy_check = false, 514 .stop_clk_fix = false, 515 .enhance_rx = false, 516 .support_64g = false, 517 }; 518 519 static const struct mtk_mmc_compatible mt2712_compat = { 520 .clk_div_bits = 12, 521 .recheck_sdio_irq = false, 522 .hs400_tune = false, 523 .pad_tune_reg = MSDC_PAD_TUNE0, 524 .async_fifo = true, 525 .data_tune = true, 526 .busy_check = true, 527 .stop_clk_fix = true, 528 .stop_dly_sel = 3, 529 .enhance_rx = true, 530 .support_64g = true, 531 }; 532 533 static const struct mtk_mmc_compatible mt6779_compat = { 534 .clk_div_bits = 12, 535 .recheck_sdio_irq = false, 536 .hs400_tune = false, 537 .pad_tune_reg = MSDC_PAD_TUNE0, 538 .async_fifo = true, 539 .data_tune = true, 540 .busy_check = true, 541 .stop_clk_fix = true, 542 .stop_dly_sel = 3, 543 .enhance_rx = true, 544 .support_64g = true, 545 }; 546 547 static const struct mtk_mmc_compatible mt6795_compat = { 548 .clk_div_bits = 8, 549 .recheck_sdio_irq = false, 550 .hs400_tune = true, 551 .pad_tune_reg = MSDC_PAD_TUNE, 552 .async_fifo = false, 553 .data_tune = false, 554 .busy_check = false, 555 .stop_clk_fix = false, 556 .enhance_rx = false, 557 .support_64g = false, 558 }; 559 560 static const struct mtk_mmc_compatible mt7620_compat = { 561 .clk_div_bits = 8, 562 .recheck_sdio_irq = true, 563 .hs400_tune = false, 564 .pad_tune_reg = MSDC_PAD_TUNE, 565 .async_fifo = false, 566 .data_tune = false, 567 .busy_check = false, 568 .stop_clk_fix = false, 569 .enhance_rx = false, 570 .use_internal_cd = true, 571 }; 572 573 static const struct mtk_mmc_compatible mt7622_compat = { 574 .clk_div_bits = 12, 575 .recheck_sdio_irq = true, 576 .hs400_tune = false, 577 .pad_tune_reg = MSDC_PAD_TUNE0, 578 .async_fifo = true, 579 .data_tune = true, 580 .busy_check = true, 581 .stop_clk_fix = true, 582 .stop_dly_sel = 3, 583 .enhance_rx = true, 584 .support_64g = false, 585 }; 586 587 static const struct mtk_mmc_compatible mt7986_compat = { 588 .clk_div_bits = 12, 589 .recheck_sdio_irq = true, 590 .hs400_tune = false, 591 .needs_top_base = true, 592 .pad_tune_reg = MSDC_PAD_TUNE0, 593 .async_fifo = true, 594 .data_tune = true, 595 .busy_check = true, 596 .stop_clk_fix = true, 597 .stop_dly_sel = 3, 598 .enhance_rx = true, 599 .support_64g = true, 600 }; 601 602 static const struct mtk_mmc_compatible mt8135_compat = { 603 .clk_div_bits = 8, 604 .recheck_sdio_irq = true, 605 .hs400_tune = false, 606 .pad_tune_reg = MSDC_PAD_TUNE, 607 .async_fifo = false, 608 .data_tune = false, 609 .busy_check = false, 610 .stop_clk_fix = false, 611 .enhance_rx = false, 612 .support_64g = false, 613 }; 614 615 static const struct mtk_mmc_compatible mt8173_compat = { 616 .clk_div_bits = 8, 617 .recheck_sdio_irq = true, 618 .hs400_tune = true, 619 .pad_tune_reg = MSDC_PAD_TUNE, 620 .async_fifo = false, 621 .data_tune = false, 622 .busy_check = false, 623 .stop_clk_fix = false, 624 .enhance_rx = false, 625 .support_64g = false, 626 }; 627 628 static const struct mtk_mmc_compatible mt8183_compat = { 629 .clk_div_bits = 12, 630 .recheck_sdio_irq = false, 631 .hs400_tune = false, 632 .needs_top_base = true, 633 .pad_tune_reg = MSDC_PAD_TUNE0, 634 .async_fifo = true, 635 .data_tune = true, 636 .busy_check = true, 637 .stop_clk_fix = true, 638 .stop_dly_sel = 3, 639 .enhance_rx = true, 640 .support_64g = true, 641 }; 642 643 static const struct mtk_mmc_compatible mt8516_compat = { 644 .clk_div_bits = 12, 645 .recheck_sdio_irq = true, 646 .hs400_tune = false, 647 .pad_tune_reg = MSDC_PAD_TUNE0, 648 .async_fifo = true, 649 .data_tune = true, 650 .busy_check = true, 651 .stop_clk_fix = true, 652 .stop_dly_sel = 3, 653 }; 654 655 static const struct mtk_mmc_compatible mt8196_compat = { 656 .clk_div_bits = 12, 657 .recheck_sdio_irq = false, 658 .hs400_tune = false, 659 .needs_top_base = true, 660 .pad_tune_reg = MSDC_PAD_TUNE0, 661 .async_fifo = true, 662 .data_tune = true, 663 .busy_check = true, 664 .stop_clk_fix = true, 665 .stop_dly_sel = 1, 666 .pop_en_cnt = 2, 667 .enhance_rx = true, 668 .support_64g = true, 669 .support_new_tx = true, 670 .support_new_rx = true, 671 }; 672 673 static const struct of_device_id msdc_of_ids[] = { 674 { .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat}, 675 { .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat}, 676 { .compatible = "mediatek,mt6779-mmc", .data = &mt6779_compat}, 677 { .compatible = "mediatek,mt6795-mmc", .data = &mt6795_compat}, 678 { .compatible = "mediatek,mt7620-mmc", .data = &mt7620_compat}, 679 { .compatible = "mediatek,mt7622-mmc", .data = &mt7622_compat}, 680 { .compatible = "mediatek,mt7986-mmc", .data = &mt7986_compat}, 681 { .compatible = "mediatek,mt7988-mmc", .data = &mt7986_compat}, 682 { .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat}, 683 { .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat}, 684 { .compatible = "mediatek,mt8183-mmc", .data = &mt8183_compat}, 685 { .compatible = "mediatek,mt8196-mmc", .data = &mt8196_compat}, 686 { .compatible = "mediatek,mt8516-mmc", .data = &mt8516_compat}, 687 688 {} 689 }; 690 MODULE_DEVICE_TABLE(of, msdc_of_ids); 691 692 static void sdr_set_bits(void __iomem *reg, u32 bs) 693 { 694 u32 val = readl(reg); 695 696 val |= bs; 697 writel(val, reg); 698 } 699 700 static void sdr_clr_bits(void __iomem *reg, u32 bs) 701 { 702 u32 val = readl(reg); 703 704 val &= ~bs; 705 writel(val, reg); 706 } 707 708 static void sdr_set_field(void __iomem *reg, u32 field, u32 val) 709 { 710 unsigned int tv = readl(reg); 711 712 tv &= ~field; 713 tv |= ((val) << (ffs((unsigned int)field) - 1)); 714 writel(tv, reg); 715 } 716 717 static void sdr_get_field(void __iomem *reg, u32 field, u32 *val) 718 { 719 unsigned int tv = readl(reg); 720 721 *val = ((tv & field) >> (ffs((unsigned int)field) - 1)); 722 } 723 724 static void msdc_reset_hw(struct msdc_host *host) 725 { 726 u32 val; 727 728 sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST); 729 readl_poll_timeout_atomic(host->base + MSDC_CFG, val, !(val & MSDC_CFG_RST), 0, 0); 730 731 sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR); 732 readl_poll_timeout_atomic(host->base + MSDC_FIFOCS, val, 733 !(val & MSDC_FIFOCS_CLR), 0, 0); 734 735 val = readl(host->base + MSDC_INT); 736 writel(val, host->base + MSDC_INT); 737 } 738 739 static void msdc_cmd_next(struct msdc_host *host, 740 struct mmc_request *mrq, struct mmc_command *cmd); 741 static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb); 742 743 static const u32 cmd_ints_mask = MSDC_INTEN_CMDRDY | MSDC_INTEN_RSPCRCERR | 744 MSDC_INTEN_CMDTMO | MSDC_INTEN_ACMDRDY | 745 MSDC_INTEN_ACMDCRCERR | MSDC_INTEN_ACMDTMO; 746 static const u32 data_ints_mask = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | 747 MSDC_INTEN_DATCRCERR | MSDC_INTEN_DMA_BDCSERR | 748 MSDC_INTEN_DMA_GPDCSERR | MSDC_INTEN_DMA_PROTECT; 749 750 static u8 msdc_dma_calcs(u8 *buf, u32 len) 751 { 752 u32 i, sum = 0; 753 754 for (i = 0; i < len; i++) 755 sum += buf[i]; 756 return 0xff - (u8) sum; 757 } 758 759 static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma, 760 struct mmc_data *data) 761 { 762 unsigned int j, dma_len; 763 dma_addr_t dma_address; 764 u32 dma_ctrl; 765 struct scatterlist *sg; 766 struct mt_gpdma_desc *gpd; 767 struct mt_bdma_desc *bd; 768 769 sg = data->sg; 770 771 gpd = dma->gpd; 772 bd = dma->bd; 773 774 /* modify gpd */ 775 gpd->gpd_info |= GPDMA_DESC_HWO; 776 gpd->gpd_info |= GPDMA_DESC_BDP; 777 /* need to clear first. use these bits to calc checksum */ 778 gpd->gpd_info &= ~GPDMA_DESC_CHECKSUM; 779 gpd->gpd_info |= msdc_dma_calcs((u8 *) gpd, 16) << 8; 780 781 /* modify bd */ 782 for_each_sg(data->sg, sg, data->sg_count, j) { 783 dma_address = sg_dma_address(sg); 784 dma_len = sg_dma_len(sg); 785 786 /* init bd */ 787 bd[j].bd_info &= ~BDMA_DESC_BLKPAD; 788 bd[j].bd_info &= ~BDMA_DESC_DWPAD; 789 bd[j].ptr = lower_32_bits(dma_address); 790 if (host->dev_comp->support_64g) { 791 bd[j].bd_info &= ~BDMA_DESC_PTR_H4; 792 bd[j].bd_info |= (upper_32_bits(dma_address) & 0xf) 793 << 28; 794 } 795 796 if (host->dev_comp->support_64g) { 797 bd[j].bd_data_len &= ~BDMA_DESC_BUFLEN_EXT; 798 bd[j].bd_data_len |= (dma_len & BDMA_DESC_BUFLEN_EXT); 799 } else { 800 bd[j].bd_data_len &= ~BDMA_DESC_BUFLEN; 801 bd[j].bd_data_len |= (dma_len & BDMA_DESC_BUFLEN); 802 } 803 804 if (j == data->sg_count - 1) /* the last bd */ 805 bd[j].bd_info |= BDMA_DESC_EOL; 806 else 807 bd[j].bd_info &= ~BDMA_DESC_EOL; 808 809 /* checksum need to clear first */ 810 bd[j].bd_info &= ~BDMA_DESC_CHECKSUM; 811 bd[j].bd_info |= msdc_dma_calcs((u8 *)(&bd[j]), 16) << 8; 812 } 813 814 sdr_set_field(host->base + MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, 1); 815 dma_ctrl = readl_relaxed(host->base + MSDC_DMA_CTRL); 816 dma_ctrl &= ~(MSDC_DMA_CTRL_BRUSTSZ | MSDC_DMA_CTRL_MODE); 817 dma_ctrl |= (MSDC_BURST_64B << 12 | BIT(8)); 818 writel_relaxed(dma_ctrl, host->base + MSDC_DMA_CTRL); 819 if (host->dev_comp->support_64g) 820 sdr_set_field(host->base + DMA_SA_H4BIT, DMA_ADDR_HIGH_4BIT, 821 upper_32_bits(dma->gpd_addr) & 0xf); 822 writel(lower_32_bits(dma->gpd_addr), host->base + MSDC_DMA_SA); 823 } 824 825 static void msdc_prepare_data(struct msdc_host *host, struct mmc_data *data) 826 { 827 if (!(data->host_cookie & MSDC_PREPARE_FLAG)) { 828 data->host_cookie |= MSDC_PREPARE_FLAG; 829 data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len, 830 mmc_get_dma_dir(data)); 831 } 832 } 833 834 static void msdc_unprepare_data(struct msdc_host *host, struct mmc_data *data) 835 { 836 if (data->host_cookie & MSDC_ASYNC_FLAG) 837 return; 838 839 if (data->host_cookie & MSDC_PREPARE_FLAG) { 840 dma_unmap_sg(host->dev, data->sg, data->sg_len, 841 mmc_get_dma_dir(data)); 842 data->host_cookie &= ~MSDC_PREPARE_FLAG; 843 } 844 } 845 846 static u64 msdc_timeout_cal(struct msdc_host *host, u64 ns, u64 clks) 847 { 848 struct mmc_host *mmc = mmc_from_priv(host); 849 u64 timeout; 850 u32 clk_ns, mode = 0; 851 852 if (mmc->actual_clock == 0) { 853 timeout = 0; 854 } else { 855 clk_ns = 1000000000U / mmc->actual_clock; 856 timeout = ns + clk_ns - 1; 857 do_div(timeout, clk_ns); 858 timeout += clks; 859 /* in 1048576 sclk cycle unit */ 860 timeout = DIV_ROUND_UP(timeout, BIT(20)); 861 if (host->dev_comp->clk_div_bits == 8) 862 sdr_get_field(host->base + MSDC_CFG, 863 MSDC_CFG_CKMOD, &mode); 864 else 865 sdr_get_field(host->base + MSDC_CFG, 866 MSDC_CFG_CKMOD_EXTRA, &mode); 867 /*DDR mode will double the clk cycles for data timeout */ 868 timeout = mode >= 2 ? timeout * 2 : timeout; 869 timeout = timeout > 1 ? timeout - 1 : 0; 870 } 871 return timeout; 872 } 873 874 /* clock control primitives */ 875 static void msdc_set_timeout(struct msdc_host *host, u64 ns, u64 clks) 876 { 877 u64 timeout; 878 879 host->timeout_ns = ns; 880 host->timeout_clks = clks; 881 882 timeout = msdc_timeout_cal(host, ns, clks); 883 sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 884 min_t(u32, timeout, 255)); 885 } 886 887 static void msdc_set_busy_timeout(struct msdc_host *host, u64 ns, u64 clks) 888 { 889 u64 timeout; 890 891 timeout = msdc_timeout_cal(host, ns, clks); 892 sdr_set_field(host->base + SDC_CFG, SDC_CFG_WRDTOC, 893 min_t(u32, timeout, 8191)); 894 } 895 896 static void msdc_gate_clock(struct msdc_host *host) 897 { 898 clk_bulk_disable_unprepare(MSDC_NR_CLOCKS, host->bulk_clks); 899 clk_disable_unprepare(host->crypto_clk); 900 clk_disable_unprepare(host->src_clk_cg); 901 clk_disable_unprepare(host->src_clk); 902 clk_disable_unprepare(host->bus_clk); 903 clk_disable_unprepare(host->h_clk); 904 } 905 906 static int msdc_ungate_clock(struct msdc_host *host) 907 { 908 u32 val; 909 int ret; 910 911 clk_prepare_enable(host->h_clk); 912 clk_prepare_enable(host->bus_clk); 913 clk_prepare_enable(host->src_clk); 914 clk_prepare_enable(host->src_clk_cg); 915 clk_prepare_enable(host->crypto_clk); 916 ret = clk_bulk_prepare_enable(MSDC_NR_CLOCKS, host->bulk_clks); 917 if (ret) { 918 dev_err(host->dev, "Cannot enable pclk/axi/ahb clock gates\n"); 919 return ret; 920 } 921 922 return readl_poll_timeout(host->base + MSDC_CFG, val, 923 (val & MSDC_CFG_CKSTB), 1, 20000); 924 } 925 926 static void msdc_new_tx_setting(struct msdc_host *host) 927 { 928 if (!host->top_base) 929 return; 930 931 sdr_set_bits(host->top_base + LOOP_TEST_CONTROL, 932 TEST_LOOP_DSCLK_MUX_SEL); 933 sdr_set_bits(host->top_base + LOOP_TEST_CONTROL, 934 TEST_LOOP_LATCH_MUX_SEL); 935 sdr_clr_bits(host->top_base + LOOP_TEST_CONTROL, 936 TEST_HS400_CMD_LOOP_MUX_SEL); 937 938 switch (host->timing) { 939 case MMC_TIMING_LEGACY: 940 case MMC_TIMING_MMC_HS: 941 case MMC_TIMING_SD_HS: 942 case MMC_TIMING_UHS_SDR12: 943 case MMC_TIMING_UHS_SDR25: 944 case MMC_TIMING_UHS_DDR50: 945 case MMC_TIMING_MMC_DDR52: 946 sdr_clr_bits(host->top_base + LOOP_TEST_CONTROL, 947 LOOP_EN_SEL_CLK); 948 break; 949 case MMC_TIMING_UHS_SDR50: 950 case MMC_TIMING_UHS_SDR104: 951 case MMC_TIMING_MMC_HS200: 952 case MMC_TIMING_MMC_HS400: 953 sdr_set_bits(host->top_base + LOOP_TEST_CONTROL, 954 LOOP_EN_SEL_CLK); 955 break; 956 default: 957 break; 958 } 959 } 960 961 static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) 962 { 963 struct mmc_host *mmc = mmc_from_priv(host); 964 u32 mode; 965 u32 flags; 966 u32 div; 967 u32 sclk; 968 u32 tune_reg = host->dev_comp->pad_tune_reg; 969 u32 val; 970 bool timing_changed; 971 972 if (!hz) { 973 dev_dbg(host->dev, "set mclk to 0\n"); 974 host->mclk = 0; 975 mmc->actual_clock = 0; 976 sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); 977 return; 978 } 979 980 if (host->timing != timing) 981 timing_changed = true; 982 else 983 timing_changed = false; 984 985 flags = readl(host->base + MSDC_INTEN); 986 sdr_clr_bits(host->base + MSDC_INTEN, flags); 987 if (host->dev_comp->clk_div_bits == 8) 988 sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE); 989 else 990 sdr_clr_bits(host->base + MSDC_CFG, 991 MSDC_CFG_HS400_CK_MODE_EXTRA); 992 if (timing == MMC_TIMING_UHS_DDR50 || 993 timing == MMC_TIMING_MMC_DDR52 || 994 timing == MMC_TIMING_MMC_HS400) { 995 if (timing == MMC_TIMING_MMC_HS400) 996 mode = 0x3; 997 else 998 mode = 0x2; /* ddr mode and use divisor */ 999 1000 if (hz >= (host->src_clk_freq >> 2)) { 1001 div = 0; /* mean div = 1/4 */ 1002 sclk = host->src_clk_freq >> 2; /* sclk = clk / 4 */ 1003 } else { 1004 div = (host->src_clk_freq + ((hz << 2) - 1)) / (hz << 2); 1005 sclk = (host->src_clk_freq >> 2) / div; 1006 div = (div >> 1); 1007 } 1008 1009 if (timing == MMC_TIMING_MMC_HS400 && 1010 hz >= (host->src_clk_freq >> 1)) { 1011 if (host->dev_comp->clk_div_bits == 8) 1012 sdr_set_bits(host->base + MSDC_CFG, 1013 MSDC_CFG_HS400_CK_MODE); 1014 else 1015 sdr_set_bits(host->base + MSDC_CFG, 1016 MSDC_CFG_HS400_CK_MODE_EXTRA); 1017 sclk = host->src_clk_freq >> 1; 1018 div = 0; /* div is ignore when bit18 is set */ 1019 } 1020 } else if (hz >= host->src_clk_freq) { 1021 mode = 0x1; /* no divisor */ 1022 div = 0; 1023 sclk = host->src_clk_freq; 1024 } else { 1025 mode = 0x0; /* use divisor */ 1026 if (hz >= (host->src_clk_freq >> 1)) { 1027 div = 0; /* mean div = 1/2 */ 1028 sclk = host->src_clk_freq >> 1; /* sclk = clk / 2 */ 1029 } else { 1030 div = (host->src_clk_freq + ((hz << 2) - 1)) / (hz << 2); 1031 sclk = (host->src_clk_freq >> 2) / div; 1032 } 1033 } 1034 sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); 1035 1036 clk_disable_unprepare(host->src_clk_cg); 1037 if (host->dev_comp->clk_div_bits == 8) 1038 sdr_set_field(host->base + MSDC_CFG, 1039 MSDC_CFG_CKMOD | MSDC_CFG_CKDIV, 1040 (mode << 8) | div); 1041 else 1042 sdr_set_field(host->base + MSDC_CFG, 1043 MSDC_CFG_CKMOD_EXTRA | MSDC_CFG_CKDIV_EXTRA, 1044 (mode << 12) | div); 1045 1046 clk_prepare_enable(host->src_clk_cg); 1047 readl_poll_timeout(host->base + MSDC_CFG, val, (val & MSDC_CFG_CKSTB), 0, 0); 1048 sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); 1049 mmc->actual_clock = sclk; 1050 host->mclk = hz; 1051 host->timing = timing; 1052 /* need because clk changed. */ 1053 msdc_set_timeout(host, host->timeout_ns, host->timeout_clks); 1054 sdr_set_bits(host->base + MSDC_INTEN, flags); 1055 1056 /* 1057 * mmc_select_hs400() will drop to 50Mhz and High speed mode, 1058 * tune result of hs200/200Mhz is not suitable for 50Mhz 1059 */ 1060 if (mmc->actual_clock <= 52000000) { 1061 writel(host->def_tune_para.iocon, host->base + MSDC_IOCON); 1062 if (host->top_base) { 1063 writel(host->def_tune_para.emmc_top_control, 1064 host->top_base + EMMC_TOP_CONTROL); 1065 writel(host->def_tune_para.emmc_top_cmd, 1066 host->top_base + EMMC_TOP_CMD); 1067 } else { 1068 writel(host->def_tune_para.pad_tune, 1069 host->base + tune_reg); 1070 } 1071 } else { 1072 writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON); 1073 writel(host->saved_tune_para.pad_cmd_tune, 1074 host->base + PAD_CMD_TUNE); 1075 if (host->top_base) { 1076 writel(host->saved_tune_para.emmc_top_control, 1077 host->top_base + EMMC_TOP_CONTROL); 1078 writel(host->saved_tune_para.emmc_top_cmd, 1079 host->top_base + EMMC_TOP_CMD); 1080 } else { 1081 writel(host->saved_tune_para.pad_tune, 1082 host->base + tune_reg); 1083 } 1084 } 1085 1086 if (timing == MMC_TIMING_MMC_HS400 && 1087 host->dev_comp->hs400_tune) 1088 sdr_set_field(host->base + tune_reg, 1089 MSDC_PAD_TUNE_CMDRRDLY, 1090 host->hs400_cmd_int_delay); 1091 if (host->dev_comp->support_new_tx && timing_changed) 1092 msdc_new_tx_setting(host); 1093 1094 dev_dbg(host->dev, "sclk: %d, timing: %d\n", mmc->actual_clock, 1095 timing); 1096 } 1097 1098 static inline u32 msdc_cmd_find_resp(struct msdc_host *host, 1099 struct mmc_command *cmd) 1100 { 1101 u32 resp; 1102 1103 switch (mmc_resp_type(cmd)) { 1104 /* Actually, R1, R5, R6, R7 are the same */ 1105 case MMC_RSP_R1: 1106 resp = 0x1; 1107 break; 1108 case MMC_RSP_R1B: 1109 case MMC_RSP_R1B_NO_CRC: 1110 resp = 0x7; 1111 break; 1112 case MMC_RSP_R2: 1113 resp = 0x2; 1114 break; 1115 case MMC_RSP_R3: 1116 resp = 0x3; 1117 break; 1118 case MMC_RSP_NONE: 1119 default: 1120 resp = 0x0; 1121 break; 1122 } 1123 1124 return resp; 1125 } 1126 1127 static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host, 1128 struct mmc_request *mrq, struct mmc_command *cmd) 1129 { 1130 struct mmc_host *mmc = mmc_from_priv(host); 1131 /* rawcmd : 1132 * vol_swt << 30 | auto_cmd << 28 | blklen << 16 | go_irq << 15 | 1133 * stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode 1134 */ 1135 u32 opcode = cmd->opcode; 1136 u32 resp = msdc_cmd_find_resp(host, cmd); 1137 u32 rawcmd = (opcode & 0x3f) | ((resp & 0x7) << 7); 1138 1139 host->cmd_rsp = resp; 1140 1141 if ((opcode == SD_IO_RW_DIRECT && cmd->flags == (unsigned int) -1) || 1142 opcode == MMC_STOP_TRANSMISSION) 1143 rawcmd |= BIT(14); 1144 else if (opcode == SD_SWITCH_VOLTAGE) 1145 rawcmd |= BIT(30); 1146 else if (opcode == SD_APP_SEND_SCR || 1147 opcode == SD_APP_SEND_NUM_WR_BLKS || 1148 (opcode == SD_SWITCH && mmc_cmd_type(cmd) == MMC_CMD_ADTC) || 1149 (opcode == SD_APP_SD_STATUS && mmc_cmd_type(cmd) == MMC_CMD_ADTC) || 1150 (opcode == MMC_SEND_EXT_CSD && mmc_cmd_type(cmd) == MMC_CMD_ADTC)) 1151 rawcmd |= BIT(11); 1152 1153 if (cmd->data) { 1154 struct mmc_data *data = cmd->data; 1155 1156 if (mmc_op_multi(opcode)) { 1157 if (mmc_card_mmc(mmc->card) && mrq->sbc && 1158 !(mrq->sbc->arg & 0xFFFF0000)) 1159 rawcmd |= BIT(29); /* AutoCMD23 */ 1160 } 1161 1162 rawcmd |= ((data->blksz & 0xFFF) << 16); 1163 if (data->flags & MMC_DATA_WRITE) 1164 rawcmd |= BIT(13); 1165 if (data->blocks > 1) 1166 rawcmd |= BIT(12); 1167 else 1168 rawcmd |= BIT(11); 1169 /* Always use dma mode */ 1170 sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_PIO); 1171 1172 if (host->timeout_ns != data->timeout_ns || 1173 host->timeout_clks != data->timeout_clks) 1174 msdc_set_timeout(host, data->timeout_ns, 1175 data->timeout_clks); 1176 1177 writel(data->blocks, host->base + SDC_BLK_NUM); 1178 } 1179 return rawcmd; 1180 } 1181 1182 static void msdc_start_data(struct msdc_host *host, struct mmc_command *cmd, 1183 struct mmc_data *data) 1184 { 1185 bool read; 1186 1187 WARN_ON(host->data); 1188 host->data = data; 1189 read = data->flags & MMC_DATA_READ; 1190 1191 mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT); 1192 msdc_dma_setup(host, &host->dma, data); 1193 sdr_set_bits(host->base + MSDC_INTEN, data_ints_mask); 1194 sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1); 1195 dev_dbg(host->dev, "DMA start\n"); 1196 dev_dbg(host->dev, "%s: cmd=%d DMA data: %d blocks; read=%d\n", 1197 __func__, cmd->opcode, data->blocks, read); 1198 } 1199 1200 static int msdc_auto_cmd_done(struct msdc_host *host, int events, 1201 struct mmc_command *cmd) 1202 { 1203 u32 *rsp = cmd->resp; 1204 1205 rsp[0] = readl(host->base + SDC_ACMD_RESP); 1206 1207 if (events & MSDC_INT_ACMDRDY) { 1208 cmd->error = 0; 1209 } else { 1210 msdc_reset_hw(host); 1211 if (events & MSDC_INT_ACMDCRCERR) { 1212 cmd->error = -EILSEQ; 1213 host->error |= REQ_STOP_EIO; 1214 } else if (events & MSDC_INT_ACMDTMO) { 1215 cmd->error = -ETIMEDOUT; 1216 host->error |= REQ_STOP_TMO; 1217 } 1218 dev_err(host->dev, 1219 "%s: AUTO_CMD%d arg=%08X; rsp %08X; cmd_error=%d\n", 1220 __func__, cmd->opcode, cmd->arg, rsp[0], cmd->error); 1221 } 1222 return cmd->error; 1223 } 1224 1225 /* 1226 * msdc_recheck_sdio_irq - recheck whether the SDIO irq is lost 1227 * 1228 * Host controller may lost interrupt in some special case. 1229 * Add SDIO irq recheck mechanism to make sure all interrupts 1230 * can be processed immediately 1231 */ 1232 static void msdc_recheck_sdio_irq(struct msdc_host *host) 1233 { 1234 struct mmc_host *mmc = mmc_from_priv(host); 1235 u32 reg_int, reg_inten, reg_ps; 1236 1237 if (mmc->caps & MMC_CAP_SDIO_IRQ) { 1238 reg_inten = readl(host->base + MSDC_INTEN); 1239 if (reg_inten & MSDC_INTEN_SDIOIRQ) { 1240 reg_int = readl(host->base + MSDC_INT); 1241 reg_ps = readl(host->base + MSDC_PS); 1242 if (!(reg_int & MSDC_INT_SDIOIRQ || 1243 reg_ps & MSDC_PS_DATA1)) { 1244 __msdc_enable_sdio_irq(host, 0); 1245 sdio_signal_irq(mmc); 1246 } 1247 } 1248 } 1249 } 1250 1251 static void msdc_track_cmd_data(struct msdc_host *host, struct mmc_command *cmd) 1252 { 1253 if (host->error && 1254 ((!mmc_op_tuning(cmd->opcode) && !host->hs400_tuning) || 1255 cmd->error == -ETIMEDOUT)) 1256 dev_warn(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n", 1257 __func__, cmd->opcode, cmd->arg, host->error); 1258 } 1259 1260 static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq) 1261 { 1262 struct mmc_host *mmc = mmc_from_priv(host); 1263 unsigned long flags; 1264 bool hsq_req_done; 1265 1266 /* 1267 * No need check the return value of cancel_delayed_work, as only ONE 1268 * path will go here! 1269 */ 1270 cancel_delayed_work(&host->req_timeout); 1271 1272 /* 1273 * If the request was handled from Host Software Queue, there's almost 1274 * nothing to do here, and we also don't need to reset mrq as any race 1275 * condition would not have any room to happen, since HSQ stores the 1276 * "scheduled" mrqs in an internal array of mrq slots anyway. 1277 * However, if the controller experienced an error, we still want to 1278 * reset it as soon as possible. 1279 * 1280 * Note that non-HSQ requests will still be happening at times, even 1281 * though it is enabled, and that's what is going to reset host->mrq. 1282 * Also, msdc_unprepare_data() is going to be called by HSQ when needed 1283 * as HSQ request finalization will eventually call the .post_req() 1284 * callback of this driver which, in turn, unprepares the data. 1285 */ 1286 hsq_req_done = host->hsq_en ? mmc_hsq_finalize_request(mmc, mrq) : false; 1287 if (hsq_req_done) { 1288 if (host->error) 1289 msdc_reset_hw(host); 1290 return; 1291 } 1292 1293 spin_lock_irqsave(&host->lock, flags); 1294 host->mrq = NULL; 1295 spin_unlock_irqrestore(&host->lock, flags); 1296 1297 msdc_track_cmd_data(host, mrq->cmd); 1298 if (mrq->data) 1299 msdc_unprepare_data(host, mrq->data); 1300 if (host->error) 1301 msdc_reset_hw(host); 1302 mmc_request_done(mmc, mrq); 1303 if (host->dev_comp->recheck_sdio_irq) 1304 msdc_recheck_sdio_irq(host); 1305 } 1306 1307 /* returns true if command is fully handled; returns false otherwise */ 1308 static bool msdc_cmd_done(struct msdc_host *host, int events, 1309 struct mmc_request *mrq, struct mmc_command *cmd) 1310 { 1311 bool done = false; 1312 bool sbc_error; 1313 unsigned long flags; 1314 u32 *rsp; 1315 1316 if (mrq->sbc && cmd == mrq->cmd && 1317 (events & (MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR 1318 | MSDC_INT_ACMDTMO))) 1319 msdc_auto_cmd_done(host, events, mrq->sbc); 1320 1321 sbc_error = mrq->sbc && mrq->sbc->error; 1322 1323 if (!sbc_error && !(events & (MSDC_INT_CMDRDY 1324 | MSDC_INT_RSPCRCERR 1325 | MSDC_INT_CMDTMO))) 1326 return done; 1327 1328 spin_lock_irqsave(&host->lock, flags); 1329 done = !host->cmd; 1330 host->cmd = NULL; 1331 spin_unlock_irqrestore(&host->lock, flags); 1332 1333 if (done) 1334 return true; 1335 rsp = cmd->resp; 1336 1337 sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask); 1338 1339 if (cmd->flags & MMC_RSP_PRESENT) { 1340 if (cmd->flags & MMC_RSP_136) { 1341 rsp[0] = readl(host->base + SDC_RESP3); 1342 rsp[1] = readl(host->base + SDC_RESP2); 1343 rsp[2] = readl(host->base + SDC_RESP1); 1344 rsp[3] = readl(host->base + SDC_RESP0); 1345 } else { 1346 rsp[0] = readl(host->base + SDC_RESP0); 1347 } 1348 } 1349 1350 if (!sbc_error && !(events & MSDC_INT_CMDRDY)) { 1351 if ((events & MSDC_INT_CMDTMO && !host->hs400_tuning) || 1352 (!mmc_op_tuning(cmd->opcode) && !host->hs400_tuning)) 1353 /* 1354 * should not clear fifo/interrupt as the tune data 1355 * may have already come when cmd19/cmd21 gets response 1356 * CRC error. 1357 */ 1358 msdc_reset_hw(host); 1359 if (events & MSDC_INT_RSPCRCERR && 1360 mmc_resp_type(cmd) != MMC_RSP_R1B_NO_CRC) { 1361 cmd->error = -EILSEQ; 1362 host->error |= REQ_CMD_EIO; 1363 } else if (events & MSDC_INT_CMDTMO) { 1364 cmd->error = -ETIMEDOUT; 1365 host->error |= REQ_CMD_TMO; 1366 } 1367 } 1368 if (cmd->error) 1369 dev_dbg(host->dev, 1370 "%s: cmd=%d arg=%08X; rsp %08X; cmd_error=%d\n", 1371 __func__, cmd->opcode, cmd->arg, rsp[0], 1372 cmd->error); 1373 1374 msdc_cmd_next(host, mrq, cmd); 1375 return true; 1376 } 1377 1378 /* It is the core layer's responsibility to ensure card status 1379 * is correct before issue a request. but host design do below 1380 * checks recommended. 1381 */ 1382 static inline bool msdc_cmd_is_ready(struct msdc_host *host, 1383 struct mmc_request *mrq, struct mmc_command *cmd) 1384 { 1385 u32 val; 1386 int ret; 1387 1388 /* The max busy time we can endure is 20ms */ 1389 ret = readl_poll_timeout_atomic(host->base + SDC_STS, val, 1390 !(val & SDC_STS_CMDBUSY), 1, 20000); 1391 if (ret) { 1392 dev_err(host->dev, "CMD bus busy detected\n"); 1393 host->error |= REQ_CMD_BUSY; 1394 msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd); 1395 return false; 1396 } 1397 1398 if (mmc_resp_type(cmd) == MMC_RSP_R1B || cmd->data) { 1399 /* R1B or with data, should check SDCBUSY */ 1400 ret = readl_poll_timeout_atomic(host->base + SDC_STS, val, 1401 !(val & SDC_STS_SDCBUSY), 1, 20000); 1402 if (ret) { 1403 dev_err(host->dev, "Controller busy detected\n"); 1404 host->error |= REQ_CMD_BUSY; 1405 msdc_cmd_done(host, MSDC_INT_CMDTMO, mrq, cmd); 1406 return false; 1407 } 1408 } 1409 return true; 1410 } 1411 1412 static void msdc_start_command(struct msdc_host *host, 1413 struct mmc_request *mrq, struct mmc_command *cmd) 1414 { 1415 u32 rawcmd; 1416 unsigned long flags; 1417 1418 WARN_ON(host->cmd); 1419 host->cmd = cmd; 1420 1421 mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT); 1422 if (!msdc_cmd_is_ready(host, mrq, cmd)) 1423 return; 1424 1425 if ((readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_TXCNT) >> 16 || 1426 readl(host->base + MSDC_FIFOCS) & MSDC_FIFOCS_RXCNT) { 1427 dev_err(host->dev, "TX/RX FIFO non-empty before start of IO. Reset\n"); 1428 msdc_reset_hw(host); 1429 } 1430 1431 cmd->error = 0; 1432 rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd); 1433 1434 spin_lock_irqsave(&host->lock, flags); 1435 sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask); 1436 spin_unlock_irqrestore(&host->lock, flags); 1437 1438 writel(cmd->arg, host->base + SDC_ARG); 1439 writel(rawcmd, host->base + SDC_CMD); 1440 } 1441 1442 static void msdc_cmd_next(struct msdc_host *host, 1443 struct mmc_request *mrq, struct mmc_command *cmd) 1444 { 1445 if ((cmd->error && !host->hs400_tuning && 1446 !(cmd->error == -EILSEQ && 1447 mmc_op_tuning(cmd->opcode))) || 1448 (mrq->sbc && mrq->sbc->error)) 1449 msdc_request_done(host, mrq); 1450 else if (cmd == mrq->sbc) 1451 msdc_start_command(host, mrq, mrq->cmd); 1452 else if (!cmd->data) 1453 msdc_request_done(host, mrq); 1454 else 1455 msdc_start_data(host, cmd, cmd->data); 1456 } 1457 1458 static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq) 1459 { 1460 struct msdc_host *host = mmc_priv(mmc); 1461 1462 host->error = 0; 1463 WARN_ON(!host->hsq_en && host->mrq); 1464 host->mrq = mrq; 1465 1466 if (mrq->data) 1467 msdc_prepare_data(host, mrq->data); 1468 1469 /* if SBC is required, we have HW option and SW option. 1470 * if HW option is enabled, and SBC does not have "special" flags, 1471 * use HW option, otherwise use SW option 1472 */ 1473 if (mrq->sbc && (!mmc_card_mmc(mmc->card) || 1474 (mrq->sbc->arg & 0xFFFF0000))) 1475 msdc_start_command(host, mrq, mrq->sbc); 1476 else 1477 msdc_start_command(host, mrq, mrq->cmd); 1478 } 1479 1480 static void msdc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 1481 { 1482 struct msdc_host *host = mmc_priv(mmc); 1483 struct mmc_data *data = mrq->data; 1484 1485 if (!data) 1486 return; 1487 1488 msdc_prepare_data(host, data); 1489 data->host_cookie |= MSDC_ASYNC_FLAG; 1490 } 1491 1492 static void msdc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 1493 int err) 1494 { 1495 struct msdc_host *host = mmc_priv(mmc); 1496 struct mmc_data *data = mrq->data; 1497 1498 if (!data) 1499 return; 1500 1501 if (data->host_cookie) { 1502 data->host_cookie &= ~MSDC_ASYNC_FLAG; 1503 msdc_unprepare_data(host, data); 1504 } 1505 } 1506 1507 static void msdc_data_xfer_next(struct msdc_host *host, struct mmc_request *mrq) 1508 { 1509 if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && !mrq->stop->error && 1510 !mrq->sbc) 1511 msdc_start_command(host, mrq, mrq->stop); 1512 else 1513 msdc_request_done(host, mrq); 1514 } 1515 1516 static void msdc_data_xfer_done(struct msdc_host *host, u32 events, 1517 struct mmc_request *mrq, struct mmc_data *data) 1518 { 1519 struct mmc_command *stop; 1520 unsigned long flags; 1521 bool done; 1522 unsigned int check_data = events & 1523 (MSDC_INT_XFER_COMPL | MSDC_INT_DATCRCERR | MSDC_INT_DATTMO 1524 | MSDC_INT_DMA_BDCSERR | MSDC_INT_DMA_GPDCSERR 1525 | MSDC_INT_DMA_PROTECT); 1526 u32 val; 1527 int ret; 1528 1529 spin_lock_irqsave(&host->lock, flags); 1530 done = !host->data; 1531 if (check_data) 1532 host->data = NULL; 1533 spin_unlock_irqrestore(&host->lock, flags); 1534 1535 if (done) 1536 return; 1537 stop = data->stop; 1538 1539 if (check_data || (stop && stop->error)) { 1540 dev_dbg(host->dev, "DMA status: 0x%8X\n", 1541 readl(host->base + MSDC_DMA_CFG)); 1542 sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, 1543 1); 1544 1545 ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CTRL, val, 1546 !(val & MSDC_DMA_CTRL_STOP), 1, 20000); 1547 if (ret) 1548 dev_dbg(host->dev, "DMA stop timed out\n"); 1549 1550 ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CFG, val, 1551 !(val & MSDC_DMA_CFG_STS), 1, 20000); 1552 if (ret) 1553 dev_dbg(host->dev, "DMA inactive timed out\n"); 1554 1555 sdr_clr_bits(host->base + MSDC_INTEN, data_ints_mask); 1556 dev_dbg(host->dev, "DMA stop\n"); 1557 1558 if ((events & MSDC_INT_XFER_COMPL) && (!stop || !stop->error)) { 1559 data->bytes_xfered = data->blocks * data->blksz; 1560 } else { 1561 dev_dbg(host->dev, "interrupt events: %x\n", events); 1562 msdc_reset_hw(host); 1563 host->error |= REQ_DAT_ERR; 1564 data->bytes_xfered = 0; 1565 1566 if (events & MSDC_INT_DATTMO) 1567 data->error = -ETIMEDOUT; 1568 else if (events & MSDC_INT_DATCRCERR) 1569 data->error = -EILSEQ; 1570 1571 dev_dbg(host->dev, "%s: cmd=%d; blocks=%d", 1572 __func__, mrq->cmd->opcode, data->blocks); 1573 dev_dbg(host->dev, "data_error=%d xfer_size=%d\n", 1574 (int)data->error, data->bytes_xfered); 1575 } 1576 1577 msdc_data_xfer_next(host, mrq); 1578 } 1579 } 1580 1581 static void msdc_set_buswidth(struct msdc_host *host, u32 width) 1582 { 1583 u32 val = readl(host->base + SDC_CFG); 1584 1585 val &= ~SDC_CFG_BUSWIDTH; 1586 1587 switch (width) { 1588 default: 1589 case MMC_BUS_WIDTH_1: 1590 val |= (MSDC_BUS_1BITS << 16); 1591 break; 1592 case MMC_BUS_WIDTH_4: 1593 val |= (MSDC_BUS_4BITS << 16); 1594 break; 1595 case MMC_BUS_WIDTH_8: 1596 val |= (MSDC_BUS_8BITS << 16); 1597 break; 1598 } 1599 1600 writel(val, host->base + SDC_CFG); 1601 dev_dbg(host->dev, "Bus Width = %d", width); 1602 } 1603 1604 static int msdc_ops_switch_volt(struct mmc_host *mmc, struct mmc_ios *ios) 1605 { 1606 struct msdc_host *host = mmc_priv(mmc); 1607 int ret; 1608 1609 if (!IS_ERR(mmc->supply.vqmmc)) { 1610 if (ios->signal_voltage != MMC_SIGNAL_VOLTAGE_330 && 1611 ios->signal_voltage != MMC_SIGNAL_VOLTAGE_180) { 1612 dev_err(host->dev, "Unsupported signal voltage!\n"); 1613 return -EINVAL; 1614 } 1615 1616 ret = mmc_regulator_set_vqmmc(mmc, ios); 1617 if (ret < 0) { 1618 dev_dbg(host->dev, "Regulator set error %d (%d)\n", 1619 ret, ios->signal_voltage); 1620 return ret; 1621 } 1622 1623 /* Apply different pinctrl settings for different signal voltage */ 1624 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) 1625 pinctrl_select_state(host->pinctrl, host->pins_uhs); 1626 else 1627 pinctrl_select_state(host->pinctrl, host->pins_default); 1628 } 1629 return 0; 1630 } 1631 1632 static int msdc_card_busy(struct mmc_host *mmc) 1633 { 1634 struct msdc_host *host = mmc_priv(mmc); 1635 u32 status = readl(host->base + MSDC_PS); 1636 1637 /* only check if data0 is low */ 1638 return !(status & BIT(16)); 1639 } 1640 1641 static void msdc_request_timeout(struct work_struct *work) 1642 { 1643 struct msdc_host *host = container_of(work, struct msdc_host, 1644 req_timeout.work); 1645 1646 /* simulate HW timeout status */ 1647 dev_err(host->dev, "%s: aborting cmd/data/mrq\n", __func__); 1648 if (host->mrq) { 1649 dev_err(host->dev, "%s: aborting mrq=%p cmd=%d\n", __func__, 1650 host->mrq, host->mrq->cmd->opcode); 1651 if (host->cmd) { 1652 dev_err(host->dev, "%s: aborting cmd=%d\n", 1653 __func__, host->cmd->opcode); 1654 msdc_cmd_done(host, MSDC_INT_CMDTMO, host->mrq, 1655 host->cmd); 1656 } else if (host->data) { 1657 dev_err(host->dev, "%s: abort data: cmd%d; %d blocks\n", 1658 __func__, host->mrq->cmd->opcode, 1659 host->data->blocks); 1660 msdc_data_xfer_done(host, MSDC_INT_DATTMO, host->mrq, 1661 host->data); 1662 } 1663 } 1664 } 1665 1666 static void __msdc_enable_sdio_irq(struct msdc_host *host, int enb) 1667 { 1668 if (enb) { 1669 sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ); 1670 sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE); 1671 if (host->dev_comp->recheck_sdio_irq) 1672 msdc_recheck_sdio_irq(host); 1673 } else { 1674 sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ); 1675 sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE); 1676 } 1677 } 1678 1679 static void msdc_enable_sdio_irq(struct mmc_host *mmc, int enb) 1680 { 1681 struct msdc_host *host = mmc_priv(mmc); 1682 unsigned long flags; 1683 int ret; 1684 1685 spin_lock_irqsave(&host->lock, flags); 1686 __msdc_enable_sdio_irq(host, enb); 1687 spin_unlock_irqrestore(&host->lock, flags); 1688 1689 if (mmc_card_enable_async_irq(mmc->card) && host->pins_eint) { 1690 if (enb) { 1691 /* 1692 * In dev_pm_set_dedicated_wake_irq_reverse(), eint pin will be set to 1693 * GPIO mode. We need to restore it to SDIO DAT1 mode after that. 1694 * Since the current pinstate is pins_uhs, to ensure pinctrl select take 1695 * affect successfully, we change the pinstate to pins_eint firstly. 1696 */ 1697 pinctrl_select_state(host->pinctrl, host->pins_eint); 1698 ret = dev_pm_set_dedicated_wake_irq_reverse(host->dev, host->eint_irq); 1699 1700 if (ret) { 1701 dev_err(host->dev, "Failed to register SDIO wakeup irq!\n"); 1702 host->pins_eint = NULL; 1703 pm_runtime_get_noresume(host->dev); 1704 } else { 1705 dev_dbg(host->dev, "SDIO eint irq: %d!\n", host->eint_irq); 1706 } 1707 1708 pinctrl_select_state(host->pinctrl, host->pins_uhs); 1709 } else { 1710 dev_pm_clear_wake_irq(host->dev); 1711 } 1712 } else { 1713 if (enb) { 1714 /* Ensure host->pins_eint is NULL */ 1715 host->pins_eint = NULL; 1716 pm_runtime_get_noresume(host->dev); 1717 } else { 1718 pm_runtime_put_noidle(host->dev); 1719 } 1720 } 1721 } 1722 1723 static irqreturn_t msdc_cmdq_irq(struct msdc_host *host, u32 intsts) 1724 { 1725 struct mmc_host *mmc = mmc_from_priv(host); 1726 int cmd_err = 0, dat_err = 0; 1727 1728 if (intsts & MSDC_INT_RSPCRCERR) { 1729 cmd_err = -EILSEQ; 1730 dev_err(host->dev, "%s: CMD CRC ERR", __func__); 1731 } else if (intsts & MSDC_INT_CMDTMO) { 1732 cmd_err = -ETIMEDOUT; 1733 dev_err(host->dev, "%s: CMD TIMEOUT ERR", __func__); 1734 } 1735 1736 if (intsts & MSDC_INT_DATCRCERR) { 1737 dat_err = -EILSEQ; 1738 dev_err(host->dev, "%s: DATA CRC ERR", __func__); 1739 } else if (intsts & MSDC_INT_DATTMO) { 1740 dat_err = -ETIMEDOUT; 1741 dev_err(host->dev, "%s: DATA TIMEOUT ERR", __func__); 1742 } 1743 1744 if (cmd_err || dat_err) { 1745 dev_err(host->dev, "cmd_err = %d, dat_err = %d, intsts = 0x%x", 1746 cmd_err, dat_err, intsts); 1747 } 1748 1749 return cqhci_irq(mmc, 0, cmd_err, dat_err); 1750 } 1751 1752 static irqreturn_t msdc_irq(int irq, void *dev_id) 1753 { 1754 struct msdc_host *host = (struct msdc_host *) dev_id; 1755 struct mmc_host *mmc = mmc_from_priv(host); 1756 1757 while (true) { 1758 struct mmc_request *mrq; 1759 struct mmc_command *cmd; 1760 struct mmc_data *data; 1761 u32 events, event_mask; 1762 1763 spin_lock(&host->lock); 1764 events = readl(host->base + MSDC_INT); 1765 event_mask = readl(host->base + MSDC_INTEN); 1766 if ((events & event_mask) & MSDC_INT_SDIOIRQ) 1767 __msdc_enable_sdio_irq(host, 0); 1768 /* clear interrupts */ 1769 writel(events & event_mask, host->base + MSDC_INT); 1770 1771 mrq = host->mrq; 1772 cmd = host->cmd; 1773 data = host->data; 1774 spin_unlock(&host->lock); 1775 1776 if ((events & event_mask) & MSDC_INT_SDIOIRQ) 1777 sdio_signal_irq(mmc); 1778 1779 if ((events & event_mask) & MSDC_INT_CDSC) { 1780 if (host->internal_cd) 1781 mmc_detect_change(mmc, msecs_to_jiffies(20)); 1782 events &= ~MSDC_INT_CDSC; 1783 } 1784 1785 if (!(events & (event_mask & ~MSDC_INT_SDIOIRQ))) 1786 break; 1787 1788 if ((mmc->caps2 & MMC_CAP2_CQE) && 1789 (events & MSDC_INT_CMDQ)) { 1790 msdc_cmdq_irq(host, events); 1791 /* clear interrupts */ 1792 writel(events, host->base + MSDC_INT); 1793 return IRQ_HANDLED; 1794 } 1795 1796 if (!mrq) { 1797 dev_err(host->dev, 1798 "%s: MRQ=NULL; events=%08X; event_mask=%08X\n", 1799 __func__, events, event_mask); 1800 WARN_ON(1); 1801 break; 1802 } 1803 1804 dev_dbg(host->dev, "%s: events=%08X\n", __func__, events); 1805 1806 if (cmd) 1807 msdc_cmd_done(host, events, mrq, cmd); 1808 else if (data) 1809 msdc_data_xfer_done(host, events, mrq, data); 1810 } 1811 1812 return IRQ_HANDLED; 1813 } 1814 1815 static void msdc_init_hw(struct msdc_host *host) 1816 { 1817 u32 val; 1818 u32 tune_reg = host->dev_comp->pad_tune_reg; 1819 struct mmc_host *mmc = mmc_from_priv(host); 1820 1821 if (host->reset) { 1822 reset_control_assert(host->reset); 1823 usleep_range(10, 50); 1824 reset_control_deassert(host->reset); 1825 } 1826 1827 /* New tx/rx enable bit need to be 0->1 for hardware check */ 1828 if (host->dev_comp->support_new_tx) { 1829 sdr_clr_bits(host->base + SDC_ADV_CFG0, SDC_NEW_TX_EN); 1830 sdr_set_bits(host->base + SDC_ADV_CFG0, SDC_NEW_TX_EN); 1831 msdc_new_tx_setting(host); 1832 } 1833 if (host->dev_comp->support_new_rx) { 1834 sdr_clr_bits(host->base + MSDC_NEW_RX_CFG, MSDC_NEW_RX_PATH_SEL); 1835 sdr_set_bits(host->base + MSDC_NEW_RX_CFG, MSDC_NEW_RX_PATH_SEL); 1836 } 1837 1838 /* Configure to MMC/SD mode, clock free running */ 1839 sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN); 1840 1841 /* Reset */ 1842 msdc_reset_hw(host); 1843 1844 /* Disable and clear all interrupts */ 1845 writel(0, host->base + MSDC_INTEN); 1846 val = readl(host->base + MSDC_INT); 1847 writel(val, host->base + MSDC_INT); 1848 1849 /* Configure card detection */ 1850 if (host->internal_cd) { 1851 sdr_set_field(host->base + MSDC_PS, MSDC_PS_CDDEBOUNCE, 1852 DEFAULT_DEBOUNCE); 1853 sdr_set_bits(host->base + MSDC_PS, MSDC_PS_CDEN); 1854 sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC); 1855 sdr_set_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP); 1856 } else { 1857 sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP); 1858 sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN); 1859 sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC); 1860 } 1861 1862 if (host->top_base) { 1863 writel(0, host->top_base + EMMC_TOP_CONTROL); 1864 writel(0, host->top_base + EMMC_TOP_CMD); 1865 } else { 1866 writel(0, host->base + tune_reg); 1867 } 1868 writel(0, host->base + MSDC_IOCON); 1869 sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0); 1870 writel(0x403c0046, host->base + MSDC_PATCH_BIT); 1871 sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1); 1872 writel(0xffff4089, host->base + MSDC_PATCH_BIT1); 1873 sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL); 1874 1875 if (host->dev_comp->stop_clk_fix) { 1876 if (host->dev_comp->stop_dly_sel) 1877 sdr_set_field(host->base + MSDC_PATCH_BIT1, 1878 MSDC_PATCH_BIT1_STOP_DLY, 1879 host->dev_comp->stop_dly_sel); 1880 1881 if (host->dev_comp->pop_en_cnt) 1882 sdr_set_field(host->base + MSDC_PATCH_BIT2, 1883 MSDC_PB2_POP_EN_CNT, 1884 host->dev_comp->pop_en_cnt); 1885 1886 sdr_clr_bits(host->base + SDC_FIFO_CFG, 1887 SDC_FIFO_CFG_WRVALIDSEL); 1888 sdr_clr_bits(host->base + SDC_FIFO_CFG, 1889 SDC_FIFO_CFG_RDVALIDSEL); 1890 } 1891 1892 if (host->dev_comp->busy_check) 1893 sdr_clr_bits(host->base + MSDC_PATCH_BIT1, BIT(7)); 1894 1895 if (host->dev_comp->async_fifo) { 1896 sdr_set_field(host->base + MSDC_PATCH_BIT2, 1897 MSDC_PB2_RESPWAIT, 3); 1898 if (host->dev_comp->enhance_rx) { 1899 if (host->top_base) 1900 sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, 1901 SDC_RX_ENH_EN); 1902 else 1903 sdr_set_bits(host->base + SDC_ADV_CFG0, 1904 SDC_RX_ENHANCE_EN); 1905 } else { 1906 sdr_set_field(host->base + MSDC_PATCH_BIT2, 1907 MSDC_PB2_RESPSTSENSEL, 2); 1908 sdr_set_field(host->base + MSDC_PATCH_BIT2, 1909 MSDC_PB2_CRCSTSENSEL, 2); 1910 } 1911 /* use async fifo, then no need tune internal delay */ 1912 sdr_clr_bits(host->base + MSDC_PATCH_BIT2, 1913 MSDC_PATCH_BIT2_CFGRESP); 1914 sdr_set_bits(host->base + MSDC_PATCH_BIT2, 1915 MSDC_PATCH_BIT2_CFGCRCSTS); 1916 } 1917 1918 if (host->dev_comp->support_64g) 1919 sdr_set_bits(host->base + MSDC_PATCH_BIT2, 1920 MSDC_PB2_SUPPORT_64G); 1921 if (host->dev_comp->data_tune) { 1922 if (host->top_base) { 1923 sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, 1924 PAD_DAT_RD_RXDLY_SEL); 1925 sdr_clr_bits(host->top_base + EMMC_TOP_CONTROL, 1926 DATA_K_VALUE_SEL); 1927 sdr_set_bits(host->top_base + EMMC_TOP_CMD, 1928 PAD_CMD_RD_RXDLY_SEL); 1929 if (host->tuning_step > PAD_DELAY_HALF) { 1930 sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, 1931 PAD_DAT_RD_RXDLY2_SEL); 1932 sdr_set_bits(host->top_base + EMMC_TOP_CMD, 1933 PAD_CMD_RD_RXDLY2_SEL); 1934 } 1935 } else { 1936 sdr_set_bits(host->base + tune_reg, 1937 MSDC_PAD_TUNE_RD_SEL | 1938 MSDC_PAD_TUNE_CMD_SEL); 1939 if (host->tuning_step > PAD_DELAY_HALF) 1940 sdr_set_bits(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, 1941 MSDC_PAD_TUNE_RD2_SEL | 1942 MSDC_PAD_TUNE_CMD2_SEL); 1943 } 1944 } else { 1945 /* choose clock tune */ 1946 if (host->top_base) 1947 sdr_set_bits(host->top_base + EMMC_TOP_CONTROL, 1948 PAD_RXDLY_SEL); 1949 else 1950 sdr_set_bits(host->base + tune_reg, 1951 MSDC_PAD_TUNE_RXDLYSEL); 1952 } 1953 1954 if (mmc->caps2 & MMC_CAP2_NO_SDIO) { 1955 sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIO); 1956 sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_SDIOIRQ); 1957 sdr_clr_bits(host->base + SDC_ADV_CFG0, SDC_DAT1_IRQ_TRIGGER); 1958 } else { 1959 /* Configure to enable SDIO mode, otherwise SDIO CMD5 fails */ 1960 sdr_set_bits(host->base + SDC_CFG, SDC_CFG_SDIO); 1961 1962 /* Config SDIO device detect interrupt function */ 1963 sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_SDIOIDE); 1964 sdr_set_bits(host->base + SDC_ADV_CFG0, SDC_DAT1_IRQ_TRIGGER); 1965 } 1966 1967 /* Configure to default data timeout */ 1968 sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3); 1969 1970 host->def_tune_para.iocon = readl(host->base + MSDC_IOCON); 1971 host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON); 1972 if (host->top_base) { 1973 host->def_tune_para.emmc_top_control = 1974 readl(host->top_base + EMMC_TOP_CONTROL); 1975 host->def_tune_para.emmc_top_cmd = 1976 readl(host->top_base + EMMC_TOP_CMD); 1977 host->saved_tune_para.emmc_top_control = 1978 readl(host->top_base + EMMC_TOP_CONTROL); 1979 host->saved_tune_para.emmc_top_cmd = 1980 readl(host->top_base + EMMC_TOP_CMD); 1981 } else { 1982 host->def_tune_para.pad_tune = readl(host->base + tune_reg); 1983 host->saved_tune_para.pad_tune = readl(host->base + tune_reg); 1984 } 1985 dev_dbg(host->dev, "init hardware done!"); 1986 } 1987 1988 static void msdc_deinit_hw(struct msdc_host *host) 1989 { 1990 u32 val; 1991 1992 if (host->internal_cd) { 1993 /* Disabled card-detect */ 1994 sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN); 1995 sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP); 1996 } 1997 1998 /* Disable and clear all interrupts */ 1999 writel(0, host->base + MSDC_INTEN); 2000 2001 val = readl(host->base + MSDC_INT); 2002 writel(val, host->base + MSDC_INT); 2003 } 2004 2005 /* init gpd and bd list in msdc_drv_probe */ 2006 static void msdc_init_gpd_bd(struct msdc_host *host, struct msdc_dma *dma) 2007 { 2008 struct mt_gpdma_desc *gpd = dma->gpd; 2009 struct mt_bdma_desc *bd = dma->bd; 2010 dma_addr_t dma_addr; 2011 int i; 2012 2013 memset(gpd, 0, sizeof(struct mt_gpdma_desc) * 2); 2014 2015 dma_addr = dma->gpd_addr + sizeof(struct mt_gpdma_desc); 2016 gpd->gpd_info = GPDMA_DESC_BDP; /* hwo, cs, bd pointer */ 2017 /* gpd->next is must set for desc DMA 2018 * That's why must alloc 2 gpd structure. 2019 */ 2020 gpd->next = lower_32_bits(dma_addr); 2021 if (host->dev_comp->support_64g) 2022 gpd->gpd_info |= (upper_32_bits(dma_addr) & 0xf) << 24; 2023 2024 dma_addr = dma->bd_addr; 2025 gpd->ptr = lower_32_bits(dma->bd_addr); /* physical address */ 2026 if (host->dev_comp->support_64g) 2027 gpd->gpd_info |= (upper_32_bits(dma_addr) & 0xf) << 28; 2028 2029 memset(bd, 0, sizeof(struct mt_bdma_desc) * MAX_BD_NUM); 2030 for (i = 0; i < (MAX_BD_NUM - 1); i++) { 2031 dma_addr = dma->bd_addr + sizeof(*bd) * (i + 1); 2032 bd[i].next = lower_32_bits(dma_addr); 2033 if (host->dev_comp->support_64g) 2034 bd[i].bd_info |= (upper_32_bits(dma_addr) & 0xf) << 24; 2035 } 2036 } 2037 2038 static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 2039 { 2040 struct msdc_host *host = mmc_priv(mmc); 2041 int ret; 2042 2043 msdc_set_buswidth(host, ios->bus_width); 2044 2045 /* Suspend/Resume will do power off/on */ 2046 switch (ios->power_mode) { 2047 case MMC_POWER_UP: 2048 if (!IS_ERR(mmc->supply.vmmc)) { 2049 msdc_init_hw(host); 2050 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 2051 ios->vdd); 2052 if (ret) { 2053 dev_err(host->dev, "Failed to set vmmc power!\n"); 2054 return; 2055 } 2056 } 2057 break; 2058 case MMC_POWER_ON: 2059 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { 2060 ret = regulator_enable(mmc->supply.vqmmc); 2061 if (ret) 2062 dev_err(host->dev, "Failed to set vqmmc power!\n"); 2063 else 2064 host->vqmmc_enabled = true; 2065 } 2066 break; 2067 case MMC_POWER_OFF: 2068 if (!IS_ERR(mmc->supply.vmmc)) 2069 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 2070 2071 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { 2072 regulator_disable(mmc->supply.vqmmc); 2073 host->vqmmc_enabled = false; 2074 } 2075 break; 2076 default: 2077 break; 2078 } 2079 2080 if (host->mclk != ios->clock || host->timing != ios->timing) 2081 msdc_set_mclk(host, ios->timing, ios->clock); 2082 } 2083 2084 static u64 test_delay_bit(u64 delay, u32 bit) 2085 { 2086 bit %= PAD_DELAY_FULL; 2087 return delay & BIT_ULL(bit); 2088 } 2089 2090 static int get_delay_len(u64 delay, u32 start_bit) 2091 { 2092 int i; 2093 2094 for (i = 0; i < (PAD_DELAY_FULL - start_bit); i++) { 2095 if (test_delay_bit(delay, start_bit + i) == 0) 2096 return i; 2097 } 2098 return PAD_DELAY_FULL - start_bit; 2099 } 2100 2101 static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u64 delay) 2102 { 2103 int start = 0, len = 0; 2104 int start_final = 0, len_final = 0; 2105 u8 final_phase = 0xff; 2106 struct msdc_delay_phase delay_phase = { 0, }; 2107 2108 if (delay == 0) { 2109 dev_err(host->dev, "phase error: [map:%016llx]\n", delay); 2110 delay_phase.final_phase = final_phase; 2111 return delay_phase; 2112 } 2113 2114 while (start < PAD_DELAY_FULL) { 2115 len = get_delay_len(delay, start); 2116 if (len_final < len) { 2117 start_final = start; 2118 len_final = len; 2119 } 2120 start += len ? len : 1; 2121 if (!upper_32_bits(delay) && len >= 12 && start_final < 4) 2122 break; 2123 } 2124 2125 /* The rule is that to find the smallest delay cell */ 2126 if (start_final == 0) 2127 final_phase = (start_final + len_final / 3) % PAD_DELAY_FULL; 2128 else 2129 final_phase = (start_final + len_final / 2) % PAD_DELAY_FULL; 2130 dev_dbg(host->dev, "phase: [map:%016llx] [maxlen:%d] [final:%d]\n", 2131 delay, len_final, final_phase); 2132 2133 delay_phase.maxlen = len_final; 2134 delay_phase.start = start_final; 2135 delay_phase.final_phase = final_phase; 2136 return delay_phase; 2137 } 2138 2139 static inline void msdc_set_cmd_delay(struct msdc_host *host, u32 value) 2140 { 2141 u32 tune_reg = host->dev_comp->pad_tune_reg; 2142 2143 if (host->top_base) { 2144 if (value < PAD_DELAY_HALF) { 2145 sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY, value); 2146 sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY2, 0); 2147 } else { 2148 sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY, 2149 PAD_DELAY_HALF - 1); 2150 sdr_set_field(host->top_base + EMMC_TOP_CMD, PAD_CMD_RXDLY2, 2151 value - PAD_DELAY_HALF); 2152 } 2153 } else { 2154 if (value < PAD_DELAY_HALF) { 2155 sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, value); 2156 sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, 2157 MSDC_PAD_TUNE_CMDRDLY2, 0); 2158 } else { 2159 sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, 2160 PAD_DELAY_HALF - 1); 2161 sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, 2162 MSDC_PAD_TUNE_CMDRDLY2, value - PAD_DELAY_HALF); 2163 } 2164 } 2165 } 2166 2167 static inline void msdc_set_data_delay(struct msdc_host *host, u32 value) 2168 { 2169 u32 tune_reg = host->dev_comp->pad_tune_reg; 2170 2171 if (host->top_base) { 2172 if (value < PAD_DELAY_HALF) { 2173 sdr_set_field(host->top_base + EMMC_TOP_CONTROL, 2174 PAD_DAT_RD_RXDLY, value); 2175 sdr_set_field(host->top_base + EMMC_TOP_CONTROL, 2176 PAD_DAT_RD_RXDLY2, 0); 2177 } else { 2178 sdr_set_field(host->top_base + EMMC_TOP_CONTROL, 2179 PAD_DAT_RD_RXDLY, PAD_DELAY_HALF - 1); 2180 sdr_set_field(host->top_base + EMMC_TOP_CONTROL, 2181 PAD_DAT_RD_RXDLY2, value - PAD_DELAY_HALF); 2182 } 2183 } else { 2184 if (value < PAD_DELAY_HALF) { 2185 sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, value); 2186 sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, 2187 MSDC_PAD_TUNE_DATRRDLY2, 0); 2188 } else { 2189 sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, 2190 PAD_DELAY_HALF - 1); 2191 sdr_set_field(host->base + tune_reg + TUNING_REG2_FIXED_OFFEST, 2192 MSDC_PAD_TUNE_DATRRDLY2, value - PAD_DELAY_HALF); 2193 } 2194 } 2195 } 2196 2197 static inline void msdc_set_data_sample_edge(struct msdc_host *host, bool rising) 2198 { 2199 u32 value = rising ? 0 : 1; 2200 2201 if (host->dev_comp->support_new_rx) { 2202 sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_PATCH_BIT_RD_DAT_SEL, value); 2203 sdr_set_field(host->base + MSDC_PATCH_BIT2, MSDC_PB2_CFGCRCSTSEDGE, value); 2204 } else { 2205 sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DSPL, value); 2206 sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL, value); 2207 } 2208 } 2209 2210 static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) 2211 { 2212 struct msdc_host *host = mmc_priv(mmc); 2213 u64 rise_delay = 0, fall_delay = 0; 2214 struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; 2215 struct msdc_delay_phase internal_delay_phase; 2216 u8 final_delay, final_maxlen; 2217 u32 internal_delay = 0; 2218 u32 tune_reg = host->dev_comp->pad_tune_reg; 2219 int cmd_err; 2220 int i, j; 2221 2222 if (mmc->ios.timing == MMC_TIMING_MMC_HS200 || 2223 mmc->ios.timing == MMC_TIMING_UHS_SDR104) 2224 sdr_set_field(host->base + tune_reg, 2225 MSDC_PAD_TUNE_CMDRRDLY, 2226 host->hs200_cmd_int_delay); 2227 2228 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2229 for (i = 0; i < host->tuning_step; i++) { 2230 msdc_set_cmd_delay(host, i); 2231 /* 2232 * Using the same parameters, it may sometimes pass the test, 2233 * but sometimes it may fail. To make sure the parameters are 2234 * more stable, we test each set of parameters 3 times. 2235 */ 2236 for (j = 0; j < 3; j++) { 2237 mmc_send_tuning(mmc, opcode, &cmd_err); 2238 if (!cmd_err) { 2239 rise_delay |= BIT_ULL(i); 2240 } else { 2241 rise_delay &= ~BIT_ULL(i); 2242 break; 2243 } 2244 } 2245 } 2246 final_rise_delay = get_best_delay(host, rise_delay); 2247 /* if rising edge has enough margin, then do not scan falling edge */ 2248 if (final_rise_delay.maxlen >= 12 || 2249 (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4)) 2250 goto skip_fall; 2251 2252 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2253 for (i = 0; i < host->tuning_step; i++) { 2254 msdc_set_cmd_delay(host, i); 2255 /* 2256 * Using the same parameters, it may sometimes pass the test, 2257 * but sometimes it may fail. To make sure the parameters are 2258 * more stable, we test each set of parameters 3 times. 2259 */ 2260 for (j = 0; j < 3; j++) { 2261 mmc_send_tuning(mmc, opcode, &cmd_err); 2262 if (!cmd_err) { 2263 fall_delay |= BIT_ULL(i); 2264 } else { 2265 fall_delay &= ~BIT_ULL(i); 2266 break; 2267 } 2268 } 2269 } 2270 final_fall_delay = get_best_delay(host, fall_delay); 2271 2272 skip_fall: 2273 final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen); 2274 if (final_fall_delay.maxlen >= 12 && final_fall_delay.start < 4) 2275 final_maxlen = final_fall_delay.maxlen; 2276 if (final_maxlen == final_rise_delay.maxlen) { 2277 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2278 final_delay = final_rise_delay.final_phase; 2279 } else { 2280 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2281 final_delay = final_fall_delay.final_phase; 2282 } 2283 msdc_set_cmd_delay(host, final_delay); 2284 2285 if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay) 2286 goto skip_internal; 2287 2288 for (i = 0; i < host->tuning_step; i++) { 2289 sdr_set_field(host->base + tune_reg, 2290 MSDC_PAD_TUNE_CMDRRDLY, i); 2291 mmc_send_tuning(mmc, opcode, &cmd_err); 2292 if (!cmd_err) 2293 internal_delay |= BIT_ULL(i); 2294 } 2295 dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay); 2296 internal_delay_phase = get_best_delay(host, internal_delay); 2297 sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY, 2298 internal_delay_phase.final_phase); 2299 skip_internal: 2300 dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay); 2301 return final_delay == 0xff ? -EIO : 0; 2302 } 2303 2304 static int hs400_tune_response(struct mmc_host *mmc, u32 opcode) 2305 { 2306 struct msdc_host *host = mmc_priv(mmc); 2307 u32 cmd_delay = 0; 2308 struct msdc_delay_phase final_cmd_delay = { 0,}; 2309 u8 final_delay; 2310 int cmd_err; 2311 int i, j; 2312 2313 /* select EMMC50 PAD CMD tune */ 2314 sdr_set_bits(host->base + PAD_CMD_TUNE, BIT(0)); 2315 sdr_set_field(host->base + MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMDTA, 2); 2316 2317 if (mmc->ios.timing == MMC_TIMING_MMC_HS200 || 2318 mmc->ios.timing == MMC_TIMING_UHS_SDR104) 2319 sdr_set_field(host->base + MSDC_PAD_TUNE, 2320 MSDC_PAD_TUNE_CMDRRDLY, 2321 host->hs200_cmd_int_delay); 2322 2323 if (host->hs400_cmd_resp_sel_rising) 2324 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2325 else 2326 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2327 2328 for (i = 0; i < PAD_DELAY_HALF; i++) { 2329 sdr_set_field(host->base + PAD_CMD_TUNE, 2330 PAD_CMD_TUNE_RX_DLY3, i); 2331 /* 2332 * Using the same parameters, it may sometimes pass the test, 2333 * but sometimes it may fail. To make sure the parameters are 2334 * more stable, we test each set of parameters 3 times. 2335 */ 2336 for (j = 0; j < 3; j++) { 2337 mmc_send_tuning(mmc, opcode, &cmd_err); 2338 if (!cmd_err) { 2339 cmd_delay |= BIT(i); 2340 } else { 2341 cmd_delay &= ~BIT(i); 2342 break; 2343 } 2344 } 2345 } 2346 final_cmd_delay = get_best_delay(host, cmd_delay); 2347 sdr_set_field(host->base + PAD_CMD_TUNE, PAD_CMD_TUNE_RX_DLY3, 2348 final_cmd_delay.final_phase); 2349 final_delay = final_cmd_delay.final_phase; 2350 2351 dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay); 2352 return final_delay == 0xff ? -EIO : 0; 2353 } 2354 2355 static int msdc_tune_data(struct mmc_host *mmc, u32 opcode) 2356 { 2357 struct msdc_host *host = mmc_priv(mmc); 2358 u64 rise_delay = 0, fall_delay = 0; 2359 struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; 2360 u8 final_delay, final_maxlen; 2361 int i, ret; 2362 2363 sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL, 2364 host->latch_ck); 2365 msdc_set_data_sample_edge(host, true); 2366 for (i = 0; i < host->tuning_step; i++) { 2367 msdc_set_data_delay(host, i); 2368 ret = mmc_send_tuning(mmc, opcode, NULL); 2369 if (!ret) 2370 rise_delay |= BIT_ULL(i); 2371 } 2372 final_rise_delay = get_best_delay(host, rise_delay); 2373 /* if rising edge has enough margin, then do not scan falling edge */ 2374 if (final_rise_delay.maxlen >= 12 || 2375 (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4)) 2376 goto skip_fall; 2377 2378 msdc_set_data_sample_edge(host, false); 2379 for (i = 0; i < host->tuning_step; i++) { 2380 msdc_set_data_delay(host, i); 2381 ret = mmc_send_tuning(mmc, opcode, NULL); 2382 if (!ret) 2383 fall_delay |= BIT_ULL(i); 2384 } 2385 final_fall_delay = get_best_delay(host, fall_delay); 2386 2387 skip_fall: 2388 final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen); 2389 if (final_maxlen == final_rise_delay.maxlen) { 2390 msdc_set_data_sample_edge(host, true); 2391 final_delay = final_rise_delay.final_phase; 2392 } else { 2393 msdc_set_data_sample_edge(host, false); 2394 final_delay = final_fall_delay.final_phase; 2395 } 2396 msdc_set_data_delay(host, final_delay); 2397 2398 dev_dbg(host->dev, "Final data pad delay: %x\n", final_delay); 2399 return final_delay == 0xff ? -EIO : 0; 2400 } 2401 2402 /* 2403 * MSDC IP which supports data tune + async fifo can do CMD/DAT tune 2404 * together, which can save the tuning time. 2405 */ 2406 static int msdc_tune_together(struct mmc_host *mmc, u32 opcode) 2407 { 2408 struct msdc_host *host = mmc_priv(mmc); 2409 u64 rise_delay = 0, fall_delay = 0; 2410 struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; 2411 u8 final_delay, final_maxlen; 2412 int i, ret; 2413 2414 sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL, 2415 host->latch_ck); 2416 2417 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2418 msdc_set_data_sample_edge(host, true); 2419 for (i = 0; i < host->tuning_step; i++) { 2420 msdc_set_cmd_delay(host, i); 2421 msdc_set_data_delay(host, i); 2422 ret = mmc_send_tuning(mmc, opcode, NULL); 2423 if (!ret) 2424 rise_delay |= BIT_ULL(i); 2425 } 2426 final_rise_delay = get_best_delay(host, rise_delay); 2427 /* if rising edge has enough margin, then do not scan falling edge */ 2428 if (final_rise_delay.maxlen >= 12 || 2429 (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4)) 2430 goto skip_fall; 2431 2432 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2433 msdc_set_data_sample_edge(host, false); 2434 for (i = 0; i < host->tuning_step; i++) { 2435 msdc_set_cmd_delay(host, i); 2436 msdc_set_data_delay(host, i); 2437 ret = mmc_send_tuning(mmc, opcode, NULL); 2438 if (!ret) 2439 fall_delay |= BIT_ULL(i); 2440 } 2441 final_fall_delay = get_best_delay(host, fall_delay); 2442 2443 skip_fall: 2444 final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen); 2445 if (final_maxlen == final_rise_delay.maxlen) { 2446 sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2447 msdc_set_data_sample_edge(host, true); 2448 final_delay = final_rise_delay.final_phase; 2449 } else { 2450 sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); 2451 msdc_set_data_sample_edge(host, false); 2452 final_delay = final_fall_delay.final_phase; 2453 } 2454 2455 msdc_set_cmd_delay(host, final_delay); 2456 msdc_set_data_delay(host, final_delay); 2457 2458 dev_dbg(host->dev, "Final pad delay: %x\n", final_delay); 2459 return final_delay == 0xff ? -EIO : 0; 2460 } 2461 2462 static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode) 2463 { 2464 struct msdc_host *host = mmc_priv(mmc); 2465 int ret; 2466 u32 tune_reg = host->dev_comp->pad_tune_reg; 2467 2468 if (host->dev_comp->data_tune && host->dev_comp->async_fifo) { 2469 ret = msdc_tune_together(mmc, opcode); 2470 if (host->hs400_mode) { 2471 msdc_set_data_sample_edge(host, true); 2472 msdc_set_data_delay(host, 0); 2473 } 2474 goto tune_done; 2475 } 2476 if (host->hs400_mode && 2477 host->dev_comp->hs400_tune) 2478 ret = hs400_tune_response(mmc, opcode); 2479 else 2480 ret = msdc_tune_response(mmc, opcode); 2481 if (ret == -EIO) { 2482 dev_err(host->dev, "Tune response fail!\n"); 2483 return ret; 2484 } 2485 if (host->hs400_mode == false) { 2486 ret = msdc_tune_data(mmc, opcode); 2487 if (ret == -EIO) 2488 dev_err(host->dev, "Tune data fail!\n"); 2489 } 2490 2491 tune_done: 2492 host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON); 2493 host->saved_tune_para.pad_tune = readl(host->base + tune_reg); 2494 host->saved_tune_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE); 2495 if (host->top_base) { 2496 host->saved_tune_para.emmc_top_control = readl(host->top_base + 2497 EMMC_TOP_CONTROL); 2498 host->saved_tune_para.emmc_top_cmd = readl(host->top_base + 2499 EMMC_TOP_CMD); 2500 } 2501 return ret; 2502 } 2503 2504 static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 2505 { 2506 struct msdc_host *host = mmc_priv(mmc); 2507 host->hs400_mode = true; 2508 2509 if (host->top_base) 2510 writel(host->hs400_ds_delay, 2511 host->top_base + EMMC50_PAD_DS_TUNE); 2512 else 2513 writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE); 2514 /* hs400 mode must set it to 0 */ 2515 sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS); 2516 /* to improve read performance, set outstanding to 2 */ 2517 sdr_set_field(host->base + EMMC50_CFG3, EMMC50_CFG3_OUTS_WR, 2); 2518 2519 return 0; 2520 } 2521 2522 static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card) 2523 { 2524 struct msdc_host *host = mmc_priv(mmc); 2525 struct msdc_delay_phase dly1_delay; 2526 u32 val, result_dly1 = 0; 2527 u8 *ext_csd; 2528 int i, ret; 2529 2530 if (host->top_base) { 2531 sdr_set_bits(host->top_base + EMMC50_PAD_DS_TUNE, 2532 PAD_DS_DLY_SEL); 2533 if (host->hs400_ds_dly3) 2534 sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE, 2535 PAD_DS_DLY3, host->hs400_ds_dly3); 2536 } else { 2537 sdr_set_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY_SEL); 2538 if (host->hs400_ds_dly3) 2539 sdr_set_field(host->base + PAD_DS_TUNE, 2540 PAD_DS_TUNE_DLY3, host->hs400_ds_dly3); 2541 } 2542 2543 host->hs400_tuning = true; 2544 for (i = 0; i < PAD_DELAY_HALF; i++) { 2545 if (host->top_base) 2546 sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE, 2547 PAD_DS_DLY1, i); 2548 else 2549 sdr_set_field(host->base + PAD_DS_TUNE, 2550 PAD_DS_TUNE_DLY1, i); 2551 ret = mmc_get_ext_csd(card, &ext_csd); 2552 if (!ret) { 2553 result_dly1 |= BIT(i); 2554 kfree(ext_csd); 2555 } 2556 } 2557 host->hs400_tuning = false; 2558 2559 dly1_delay = get_best_delay(host, result_dly1); 2560 if (dly1_delay.maxlen == 0) { 2561 dev_err(host->dev, "Failed to get DLY1 delay!\n"); 2562 goto fail; 2563 } 2564 if (host->top_base) 2565 sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE, 2566 PAD_DS_DLY1, dly1_delay.final_phase); 2567 else 2568 sdr_set_field(host->base + PAD_DS_TUNE, 2569 PAD_DS_TUNE_DLY1, dly1_delay.final_phase); 2570 2571 if (host->top_base) 2572 val = readl(host->top_base + EMMC50_PAD_DS_TUNE); 2573 else 2574 val = readl(host->base + PAD_DS_TUNE); 2575 2576 dev_info(host->dev, "Final PAD_DS_TUNE: 0x%x\n", val); 2577 2578 return 0; 2579 2580 fail: 2581 dev_err(host->dev, "Failed to tuning DS pin delay!\n"); 2582 return -EIO; 2583 } 2584 2585 static void msdc_hw_reset(struct mmc_host *mmc) 2586 { 2587 struct msdc_host *host = mmc_priv(mmc); 2588 2589 sdr_set_bits(host->base + EMMC_IOCON, 1); 2590 udelay(10); /* 10us is enough */ 2591 sdr_clr_bits(host->base + EMMC_IOCON, 1); 2592 } 2593 2594 static void msdc_ack_sdio_irq(struct mmc_host *mmc) 2595 { 2596 unsigned long flags; 2597 struct msdc_host *host = mmc_priv(mmc); 2598 2599 spin_lock_irqsave(&host->lock, flags); 2600 __msdc_enable_sdio_irq(host, 1); 2601 spin_unlock_irqrestore(&host->lock, flags); 2602 } 2603 2604 static int msdc_get_cd(struct mmc_host *mmc) 2605 { 2606 struct msdc_host *host = mmc_priv(mmc); 2607 int val; 2608 2609 if (mmc->caps & MMC_CAP_NONREMOVABLE) 2610 return 1; 2611 2612 if (!host->internal_cd) 2613 return mmc_gpio_get_cd(mmc); 2614 2615 val = readl(host->base + MSDC_PS) & MSDC_PS_CDSTS; 2616 if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH) 2617 return !!val; 2618 else 2619 return !val; 2620 } 2621 2622 static void msdc_hs400_enhanced_strobe(struct mmc_host *mmc, 2623 struct mmc_ios *ios) 2624 { 2625 struct msdc_host *host = mmc_priv(mmc); 2626 2627 if (ios->enhanced_strobe) { 2628 msdc_prepare_hs400_tuning(mmc, ios); 2629 sdr_set_field(host->base + EMMC50_CFG0, EMMC50_CFG_PADCMD_LATCHCK, 1); 2630 sdr_set_field(host->base + EMMC50_CFG0, EMMC50_CFG_CMD_RESP_SEL, 1); 2631 sdr_set_field(host->base + EMMC50_CFG1, EMMC50_CFG1_DS_CFG, 1); 2632 2633 sdr_clr_bits(host->base + CQHCI_SETTING, CQHCI_RD_CMD_WND_SEL); 2634 sdr_clr_bits(host->base + CQHCI_SETTING, CQHCI_WR_CMD_WND_SEL); 2635 sdr_clr_bits(host->base + EMMC51_CFG0, CMDQ_RDAT_CNT); 2636 } else { 2637 sdr_set_field(host->base + EMMC50_CFG0, EMMC50_CFG_PADCMD_LATCHCK, 0); 2638 sdr_set_field(host->base + EMMC50_CFG0, EMMC50_CFG_CMD_RESP_SEL, 0); 2639 sdr_set_field(host->base + EMMC50_CFG1, EMMC50_CFG1_DS_CFG, 0); 2640 2641 sdr_set_bits(host->base + CQHCI_SETTING, CQHCI_RD_CMD_WND_SEL); 2642 sdr_set_bits(host->base + CQHCI_SETTING, CQHCI_WR_CMD_WND_SEL); 2643 sdr_set_field(host->base + EMMC51_CFG0, CMDQ_RDAT_CNT, 0xb4); 2644 } 2645 } 2646 2647 static void msdc_cqe_cit_cal(struct msdc_host *host, u64 timer_ns) 2648 { 2649 struct mmc_host *mmc = mmc_from_priv(host); 2650 struct cqhci_host *cq_host = mmc->cqe_private; 2651 u8 itcfmul; 2652 u64 hclk_freq, value; 2653 2654 /* 2655 * On MediaTek SoCs the MSDC controller's CQE uses msdc_hclk as ITCFVAL 2656 * so we multiply/divide the HCLK frequency by ITCFMUL to calculate the 2657 * Send Status Command Idle Timer (CIT) value. 2658 */ 2659 hclk_freq = (u64)clk_get_rate(host->h_clk); 2660 itcfmul = CQHCI_ITCFMUL(cqhci_readl(cq_host, CQHCI_CAP)); 2661 switch (itcfmul) { 2662 case 0x0: 2663 do_div(hclk_freq, 1000); 2664 break; 2665 case 0x1: 2666 do_div(hclk_freq, 100); 2667 break; 2668 case 0x2: 2669 do_div(hclk_freq, 10); 2670 break; 2671 case 0x3: 2672 break; 2673 case 0x4: 2674 hclk_freq = hclk_freq * 10; 2675 break; 2676 default: 2677 host->cq_ssc1_time = 0x40; 2678 return; 2679 } 2680 2681 value = hclk_freq * timer_ns; 2682 do_div(value, 1000000000); 2683 host->cq_ssc1_time = value; 2684 } 2685 2686 static void msdc_cqe_enable(struct mmc_host *mmc) 2687 { 2688 struct msdc_host *host = mmc_priv(mmc); 2689 struct cqhci_host *cq_host = mmc->cqe_private; 2690 2691 /* enable cmdq irq */ 2692 writel(MSDC_INT_CMDQ, host->base + MSDC_INTEN); 2693 /* enable busy check */ 2694 sdr_set_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL); 2695 /* default write data / busy timeout 20s */ 2696 msdc_set_busy_timeout(host, 20 * 1000000000ULL, 0); 2697 /* default read data timeout 1s */ 2698 msdc_set_timeout(host, 1000000000ULL, 0); 2699 2700 /* Set the send status command idle timer */ 2701 cqhci_writel(cq_host, host->cq_ssc1_time, CQHCI_SSC1); 2702 } 2703 2704 static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery) 2705 { 2706 struct msdc_host *host = mmc_priv(mmc); 2707 unsigned int val = 0; 2708 2709 /* disable cmdq irq */ 2710 sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INT_CMDQ); 2711 /* disable busy check */ 2712 sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL); 2713 2714 val = readl(host->base + MSDC_INT); 2715 writel(val, host->base + MSDC_INT); 2716 2717 if (recovery) { 2718 sdr_set_field(host->base + MSDC_DMA_CTRL, 2719 MSDC_DMA_CTRL_STOP, 1); 2720 if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CTRL, val, 2721 !(val & MSDC_DMA_CTRL_STOP), 1, 3000))) 2722 return; 2723 if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CFG, val, 2724 !(val & MSDC_DMA_CFG_STS), 1, 3000))) 2725 return; 2726 msdc_reset_hw(host); 2727 } 2728 } 2729 2730 static void msdc_cqe_pre_enable(struct mmc_host *mmc) 2731 { 2732 struct cqhci_host *cq_host = mmc->cqe_private; 2733 u32 reg; 2734 2735 reg = cqhci_readl(cq_host, CQHCI_CFG); 2736 reg |= CQHCI_ENABLE; 2737 cqhci_writel(cq_host, reg, CQHCI_CFG); 2738 } 2739 2740 static void msdc_cqe_post_disable(struct mmc_host *mmc) 2741 { 2742 struct cqhci_host *cq_host = mmc->cqe_private; 2743 u32 reg; 2744 2745 reg = cqhci_readl(cq_host, CQHCI_CFG); 2746 reg &= ~CQHCI_ENABLE; 2747 cqhci_writel(cq_host, reg, CQHCI_CFG); 2748 } 2749 2750 static const struct mmc_host_ops mt_msdc_ops = { 2751 .post_req = msdc_post_req, 2752 .pre_req = msdc_pre_req, 2753 .request = msdc_ops_request, 2754 .set_ios = msdc_ops_set_ios, 2755 .get_ro = mmc_gpio_get_ro, 2756 .get_cd = msdc_get_cd, 2757 .hs400_enhanced_strobe = msdc_hs400_enhanced_strobe, 2758 .enable_sdio_irq = msdc_enable_sdio_irq, 2759 .ack_sdio_irq = msdc_ack_sdio_irq, 2760 .start_signal_voltage_switch = msdc_ops_switch_volt, 2761 .card_busy = msdc_card_busy, 2762 .execute_tuning = msdc_execute_tuning, 2763 .prepare_hs400_tuning = msdc_prepare_hs400_tuning, 2764 .execute_hs400_tuning = msdc_execute_hs400_tuning, 2765 .card_hw_reset = msdc_hw_reset, 2766 }; 2767 2768 static const struct cqhci_host_ops msdc_cmdq_ops = { 2769 .enable = msdc_cqe_enable, 2770 .disable = msdc_cqe_disable, 2771 .pre_enable = msdc_cqe_pre_enable, 2772 .post_disable = msdc_cqe_post_disable, 2773 }; 2774 2775 static void msdc_of_property_parse(struct platform_device *pdev, 2776 struct msdc_host *host) 2777 { 2778 struct mmc_host *mmc = mmc_from_priv(host); 2779 2780 of_property_read_u32(pdev->dev.of_node, "mediatek,latch-ck", 2781 &host->latch_ck); 2782 2783 of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay", 2784 &host->hs400_ds_delay); 2785 2786 of_property_read_u32(pdev->dev.of_node, "mediatek,hs400-ds-dly3", 2787 &host->hs400_ds_dly3); 2788 2789 of_property_read_u32(pdev->dev.of_node, "mediatek,hs200-cmd-int-delay", 2790 &host->hs200_cmd_int_delay); 2791 2792 of_property_read_u32(pdev->dev.of_node, "mediatek,hs400-cmd-int-delay", 2793 &host->hs400_cmd_int_delay); 2794 2795 if (of_property_read_bool(pdev->dev.of_node, 2796 "mediatek,hs400-cmd-resp-sel-rising")) 2797 host->hs400_cmd_resp_sel_rising = true; 2798 else 2799 host->hs400_cmd_resp_sel_rising = false; 2800 2801 if (of_property_read_u32(pdev->dev.of_node, "mediatek,tuning-step", 2802 &host->tuning_step)) { 2803 if (mmc->caps2 & MMC_CAP2_NO_MMC) 2804 host->tuning_step = PAD_DELAY_FULL; 2805 else 2806 host->tuning_step = PAD_DELAY_HALF; 2807 } 2808 2809 if (of_property_read_bool(pdev->dev.of_node, 2810 "supports-cqe")) 2811 host->cqhci = true; 2812 else 2813 host->cqhci = false; 2814 } 2815 2816 static int msdc_of_clock_parse(struct platform_device *pdev, 2817 struct msdc_host *host) 2818 { 2819 int ret; 2820 2821 host->src_clk = devm_clk_get(&pdev->dev, "source"); 2822 if (IS_ERR(host->src_clk)) 2823 return PTR_ERR(host->src_clk); 2824 2825 host->h_clk = devm_clk_get(&pdev->dev, "hclk"); 2826 if (IS_ERR(host->h_clk)) 2827 return PTR_ERR(host->h_clk); 2828 2829 host->bus_clk = devm_clk_get_optional(&pdev->dev, "bus_clk"); 2830 if (IS_ERR(host->bus_clk)) 2831 host->bus_clk = NULL; 2832 2833 /*source clock control gate is optional clock*/ 2834 host->src_clk_cg = devm_clk_get_optional(&pdev->dev, "source_cg"); 2835 if (IS_ERR(host->src_clk_cg)) 2836 return PTR_ERR(host->src_clk_cg); 2837 2838 /* 2839 * Fallback for legacy device-trees: src_clk and HCLK use the same 2840 * bit to control gating but they are parented to a different mux, 2841 * hence if our intention is to gate only the source, required 2842 * during a clk mode switch to avoid hw hangs, we need to gate 2843 * its parent (specified as a different clock only on new DTs). 2844 */ 2845 if (!host->src_clk_cg) { 2846 host->src_clk_cg = clk_get_parent(host->src_clk); 2847 if (IS_ERR(host->src_clk_cg)) 2848 return PTR_ERR(host->src_clk_cg); 2849 } 2850 2851 /* If present, always enable for this clock gate */ 2852 host->sys_clk_cg = devm_clk_get_optional_enabled(&pdev->dev, "sys_cg"); 2853 if (IS_ERR(host->sys_clk_cg)) 2854 host->sys_clk_cg = NULL; 2855 2856 host->bulk_clks[0].id = "pclk_cg"; 2857 host->bulk_clks[1].id = "axi_cg"; 2858 host->bulk_clks[2].id = "ahb_cg"; 2859 ret = devm_clk_bulk_get_optional(&pdev->dev, MSDC_NR_CLOCKS, 2860 host->bulk_clks); 2861 if (ret) { 2862 dev_err(&pdev->dev, "Cannot get pclk/axi/ahb clock gates\n"); 2863 return ret; 2864 } 2865 2866 return 0; 2867 } 2868 2869 static int msdc_drv_probe(struct platform_device *pdev) 2870 { 2871 struct mmc_host *mmc; 2872 struct msdc_host *host; 2873 int ret; 2874 2875 if (!pdev->dev.of_node) { 2876 dev_err(&pdev->dev, "No DT found\n"); 2877 return -EINVAL; 2878 } 2879 2880 /* Allocate MMC host for this device */ 2881 mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(struct msdc_host)); 2882 if (!mmc) 2883 return -ENOMEM; 2884 2885 host = mmc_priv(mmc); 2886 ret = mmc_of_parse(mmc); 2887 if (ret) 2888 return ret; 2889 2890 host->base = devm_platform_ioremap_resource(pdev, 0); 2891 if (IS_ERR(host->base)) 2892 return PTR_ERR(host->base); 2893 2894 host->dev_comp = of_device_get_match_data(&pdev->dev); 2895 2896 if (host->dev_comp->needs_top_base) { 2897 host->top_base = devm_platform_ioremap_resource(pdev, 1); 2898 if (IS_ERR(host->top_base)) 2899 return PTR_ERR(host->top_base); 2900 } 2901 2902 ret = mmc_regulator_get_supply(mmc); 2903 if (ret) 2904 return ret; 2905 2906 ret = msdc_of_clock_parse(pdev, host); 2907 if (ret) 2908 return ret; 2909 2910 host->reset = devm_reset_control_get_optional_exclusive(&pdev->dev, 2911 "hrst"); 2912 if (IS_ERR(host->reset)) 2913 return PTR_ERR(host->reset); 2914 2915 /* only eMMC has crypto property */ 2916 if (!(mmc->caps2 & MMC_CAP2_NO_MMC)) { 2917 host->crypto_clk = devm_clk_get_optional(&pdev->dev, "crypto"); 2918 if (IS_ERR(host->crypto_clk)) 2919 return PTR_ERR(host->crypto_clk); 2920 else if (host->crypto_clk) 2921 mmc->caps2 |= MMC_CAP2_CRYPTO; 2922 } 2923 2924 host->irq = platform_get_irq(pdev, 0); 2925 if (host->irq < 0) 2926 return host->irq; 2927 2928 host->pinctrl = devm_pinctrl_get(&pdev->dev); 2929 if (IS_ERR(host->pinctrl)) 2930 return dev_err_probe(&pdev->dev, PTR_ERR(host->pinctrl), 2931 "Cannot find pinctrl"); 2932 2933 host->pins_default = pinctrl_lookup_state(host->pinctrl, "default"); 2934 if (IS_ERR(host->pins_default)) { 2935 dev_err(&pdev->dev, "Cannot find pinctrl default!\n"); 2936 return PTR_ERR(host->pins_default); 2937 } 2938 2939 host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs"); 2940 if (IS_ERR(host->pins_uhs)) { 2941 dev_err(&pdev->dev, "Cannot find pinctrl uhs!\n"); 2942 return PTR_ERR(host->pins_uhs); 2943 } 2944 2945 /* Support for SDIO eint irq ? */ 2946 if ((mmc->pm_caps & MMC_PM_WAKE_SDIO_IRQ) && (mmc->pm_caps & MMC_PM_KEEP_POWER)) { 2947 host->eint_irq = platform_get_irq_byname_optional(pdev, "sdio_wakeup"); 2948 if (host->eint_irq > 0) { 2949 host->pins_eint = pinctrl_lookup_state(host->pinctrl, "state_eint"); 2950 if (IS_ERR(host->pins_eint)) { 2951 dev_err(&pdev->dev, "Cannot find pinctrl eint!\n"); 2952 host->pins_eint = NULL; 2953 } else { 2954 device_init_wakeup(&pdev->dev, true); 2955 } 2956 } 2957 } 2958 2959 msdc_of_property_parse(pdev, host); 2960 2961 host->dev = &pdev->dev; 2962 host->src_clk_freq = clk_get_rate(host->src_clk); 2963 /* Set host parameters to mmc */ 2964 mmc->ops = &mt_msdc_ops; 2965 if (host->dev_comp->clk_div_bits == 8) 2966 mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255); 2967 else 2968 mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 4095); 2969 2970 if (!(mmc->caps & MMC_CAP_NONREMOVABLE) && 2971 !mmc_can_gpio_cd(mmc) && 2972 host->dev_comp->use_internal_cd) { 2973 /* 2974 * Is removable but no GPIO declared, so 2975 * use internal functionality. 2976 */ 2977 host->internal_cd = true; 2978 } 2979 2980 if (mmc->caps & MMC_CAP_SDIO_IRQ) 2981 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 2982 2983 mmc->caps |= MMC_CAP_CMD23; 2984 if (host->cqhci) 2985 mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; 2986 /* MMC core transfer sizes tunable parameters */ 2987 mmc->max_segs = MAX_BD_NUM; 2988 if (host->dev_comp->support_64g) 2989 mmc->max_seg_size = BDMA_DESC_BUFLEN_EXT; 2990 else 2991 mmc->max_seg_size = BDMA_DESC_BUFLEN; 2992 mmc->max_blk_size = 2048; 2993 mmc->max_req_size = 512 * 1024; 2994 mmc->max_blk_count = mmc->max_req_size / 512; 2995 if (host->dev_comp->support_64g) 2996 host->dma_mask = DMA_BIT_MASK(36); 2997 else 2998 host->dma_mask = DMA_BIT_MASK(32); 2999 mmc_dev(mmc)->dma_mask = &host->dma_mask; 3000 3001 host->timeout_clks = 3 * 1048576; 3002 host->dma.gpd = dma_alloc_coherent(&pdev->dev, 3003 2 * sizeof(struct mt_gpdma_desc), 3004 &host->dma.gpd_addr, GFP_KERNEL); 3005 host->dma.bd = dma_alloc_coherent(&pdev->dev, 3006 MAX_BD_NUM * sizeof(struct mt_bdma_desc), 3007 &host->dma.bd_addr, GFP_KERNEL); 3008 if (!host->dma.gpd || !host->dma.bd) { 3009 ret = -ENOMEM; 3010 goto release_mem; 3011 } 3012 msdc_init_gpd_bd(host, &host->dma); 3013 INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout); 3014 spin_lock_init(&host->lock); 3015 3016 platform_set_drvdata(pdev, mmc); 3017 ret = msdc_ungate_clock(host); 3018 if (ret) { 3019 dev_err(&pdev->dev, "Cannot ungate clocks!\n"); 3020 goto release_clk; 3021 } 3022 msdc_init_hw(host); 3023 3024 if (mmc->caps2 & MMC_CAP2_CQE) { 3025 host->cq_host = devm_kzalloc(mmc->parent, 3026 sizeof(*host->cq_host), 3027 GFP_KERNEL); 3028 if (!host->cq_host) { 3029 ret = -ENOMEM; 3030 goto release; 3031 } 3032 host->cq_host->caps |= CQHCI_TASK_DESC_SZ_128; 3033 host->cq_host->mmio = host->base + 0x800; 3034 host->cq_host->ops = &msdc_cmdq_ops; 3035 ret = cqhci_init(host->cq_host, mmc, true); 3036 if (ret) 3037 goto release; 3038 mmc->max_segs = 128; 3039 /* cqhci 16bit length */ 3040 /* 0 size, means 65536 so we don't have to -1 here */ 3041 mmc->max_seg_size = 64 * 1024; 3042 /* Reduce CIT to 0x40 that corresponds to 2.35us */ 3043 msdc_cqe_cit_cal(host, 2350); 3044 } else if (mmc->caps2 & MMC_CAP2_NO_SDIO) { 3045 /* Use HSQ on eMMC/SD (but not on SDIO) if HW CQE not supported */ 3046 struct mmc_hsq *hsq = devm_kzalloc(&pdev->dev, sizeof(*hsq), GFP_KERNEL); 3047 if (!hsq) { 3048 ret = -ENOMEM; 3049 goto release; 3050 } 3051 3052 ret = mmc_hsq_init(hsq, mmc); 3053 if (ret) 3054 goto release; 3055 3056 host->hsq_en = true; 3057 } 3058 3059 ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq, 3060 IRQF_TRIGGER_NONE, pdev->name, host); 3061 if (ret) 3062 goto release; 3063 3064 pm_runtime_set_active(host->dev); 3065 pm_runtime_set_autosuspend_delay(host->dev, MTK_MMC_AUTOSUSPEND_DELAY); 3066 pm_runtime_use_autosuspend(host->dev); 3067 pm_runtime_enable(host->dev); 3068 ret = mmc_add_host(mmc); 3069 3070 if (ret) 3071 goto end; 3072 3073 return 0; 3074 end: 3075 pm_runtime_disable(host->dev); 3076 release: 3077 msdc_deinit_hw(host); 3078 release_clk: 3079 msdc_gate_clock(host); 3080 platform_set_drvdata(pdev, NULL); 3081 release_mem: 3082 device_init_wakeup(&pdev->dev, false); 3083 if (host->dma.gpd) 3084 dma_free_coherent(&pdev->dev, 3085 2 * sizeof(struct mt_gpdma_desc), 3086 host->dma.gpd, host->dma.gpd_addr); 3087 if (host->dma.bd) 3088 dma_free_coherent(&pdev->dev, 3089 MAX_BD_NUM * sizeof(struct mt_bdma_desc), 3090 host->dma.bd, host->dma.bd_addr); 3091 return ret; 3092 } 3093 3094 static void msdc_drv_remove(struct platform_device *pdev) 3095 { 3096 struct mmc_host *mmc; 3097 struct msdc_host *host; 3098 3099 mmc = platform_get_drvdata(pdev); 3100 host = mmc_priv(mmc); 3101 3102 pm_runtime_get_sync(host->dev); 3103 3104 platform_set_drvdata(pdev, NULL); 3105 mmc_remove_host(mmc); 3106 msdc_deinit_hw(host); 3107 msdc_gate_clock(host); 3108 3109 pm_runtime_disable(host->dev); 3110 pm_runtime_put_noidle(host->dev); 3111 dma_free_coherent(&pdev->dev, 3112 2 * sizeof(struct mt_gpdma_desc), 3113 host->dma.gpd, host->dma.gpd_addr); 3114 dma_free_coherent(&pdev->dev, MAX_BD_NUM * sizeof(struct mt_bdma_desc), 3115 host->dma.bd, host->dma.bd_addr); 3116 device_init_wakeup(&pdev->dev, false); 3117 } 3118 3119 static void msdc_save_reg(struct msdc_host *host) 3120 { 3121 u32 tune_reg = host->dev_comp->pad_tune_reg; 3122 3123 host->save_para.msdc_cfg = readl(host->base + MSDC_CFG); 3124 host->save_para.iocon = readl(host->base + MSDC_IOCON); 3125 host->save_para.sdc_cfg = readl(host->base + SDC_CFG); 3126 host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT); 3127 host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1); 3128 host->save_para.patch_bit2 = readl(host->base + MSDC_PATCH_BIT2); 3129 host->save_para.pad_ds_tune = readl(host->base + PAD_DS_TUNE); 3130 host->save_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE); 3131 host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0); 3132 host->save_para.emmc50_cfg3 = readl(host->base + EMMC50_CFG3); 3133 host->save_para.sdc_fifo_cfg = readl(host->base + SDC_FIFO_CFG); 3134 if (host->top_base) { 3135 host->save_para.emmc_top_control = 3136 readl(host->top_base + EMMC_TOP_CONTROL); 3137 host->save_para.emmc_top_cmd = 3138 readl(host->top_base + EMMC_TOP_CMD); 3139 host->save_para.emmc50_pad_ds_tune = 3140 readl(host->top_base + EMMC50_PAD_DS_TUNE); 3141 host->save_para.loop_test_control = 3142 readl(host->top_base + LOOP_TEST_CONTROL); 3143 } else { 3144 host->save_para.pad_tune = readl(host->base + tune_reg); 3145 } 3146 } 3147 3148 static void msdc_restore_reg(struct msdc_host *host) 3149 { 3150 struct mmc_host *mmc = mmc_from_priv(host); 3151 u32 tune_reg = host->dev_comp->pad_tune_reg; 3152 3153 if (host->dev_comp->support_new_tx) { 3154 sdr_clr_bits(host->base + SDC_ADV_CFG0, SDC_NEW_TX_EN); 3155 sdr_set_bits(host->base + SDC_ADV_CFG0, SDC_NEW_TX_EN); 3156 } 3157 if (host->dev_comp->support_new_rx) { 3158 sdr_clr_bits(host->base + MSDC_NEW_RX_CFG, MSDC_NEW_RX_PATH_SEL); 3159 sdr_set_bits(host->base + MSDC_NEW_RX_CFG, MSDC_NEW_RX_PATH_SEL); 3160 } 3161 3162 writel(host->save_para.msdc_cfg, host->base + MSDC_CFG); 3163 writel(host->save_para.iocon, host->base + MSDC_IOCON); 3164 writel(host->save_para.sdc_cfg, host->base + SDC_CFG); 3165 writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT); 3166 writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1); 3167 writel(host->save_para.patch_bit2, host->base + MSDC_PATCH_BIT2); 3168 writel(host->save_para.pad_ds_tune, host->base + PAD_DS_TUNE); 3169 writel(host->save_para.pad_cmd_tune, host->base + PAD_CMD_TUNE); 3170 writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0); 3171 writel(host->save_para.emmc50_cfg3, host->base + EMMC50_CFG3); 3172 writel(host->save_para.sdc_fifo_cfg, host->base + SDC_FIFO_CFG); 3173 if (host->top_base) { 3174 writel(host->save_para.emmc_top_control, 3175 host->top_base + EMMC_TOP_CONTROL); 3176 writel(host->save_para.emmc_top_cmd, 3177 host->top_base + EMMC_TOP_CMD); 3178 writel(host->save_para.emmc50_pad_ds_tune, 3179 host->top_base + EMMC50_PAD_DS_TUNE); 3180 writel(host->save_para.loop_test_control, 3181 host->top_base + LOOP_TEST_CONTROL); 3182 } else { 3183 writel(host->save_para.pad_tune, host->base + tune_reg); 3184 } 3185 3186 if (sdio_irq_claimed(mmc)) 3187 __msdc_enable_sdio_irq(host, 1); 3188 } 3189 3190 static int __maybe_unused msdc_runtime_suspend(struct device *dev) 3191 { 3192 struct mmc_host *mmc = dev_get_drvdata(dev); 3193 struct msdc_host *host = mmc_priv(mmc); 3194 3195 if (host->hsq_en) 3196 mmc_hsq_suspend(mmc); 3197 3198 msdc_save_reg(host); 3199 3200 if (sdio_irq_claimed(mmc)) { 3201 if (host->pins_eint) { 3202 disable_irq(host->irq); 3203 pinctrl_select_state(host->pinctrl, host->pins_eint); 3204 } 3205 3206 __msdc_enable_sdio_irq(host, 0); 3207 } 3208 msdc_gate_clock(host); 3209 return 0; 3210 } 3211 3212 static int __maybe_unused msdc_runtime_resume(struct device *dev) 3213 { 3214 struct mmc_host *mmc = dev_get_drvdata(dev); 3215 struct msdc_host *host = mmc_priv(mmc); 3216 int ret; 3217 3218 ret = msdc_ungate_clock(host); 3219 if (ret) 3220 return ret; 3221 3222 msdc_restore_reg(host); 3223 3224 if (sdio_irq_claimed(mmc) && host->pins_eint) { 3225 pinctrl_select_state(host->pinctrl, host->pins_uhs); 3226 enable_irq(host->irq); 3227 } 3228 3229 if (host->hsq_en) 3230 mmc_hsq_resume(mmc); 3231 3232 return 0; 3233 } 3234 3235 static int __maybe_unused msdc_suspend(struct device *dev) 3236 { 3237 struct mmc_host *mmc = dev_get_drvdata(dev); 3238 struct msdc_host *host = mmc_priv(mmc); 3239 int ret; 3240 u32 val; 3241 3242 if (mmc->caps2 & MMC_CAP2_CQE) { 3243 ret = cqhci_suspend(mmc); 3244 if (ret) 3245 return ret; 3246 val = readl(host->base + MSDC_INT); 3247 writel(val, host->base + MSDC_INT); 3248 } 3249 3250 /* 3251 * Bump up runtime PM usage counter otherwise dev->power.needs_force_resume will 3252 * not be marked as 1, pm_runtime_force_resume() will go out directly. 3253 */ 3254 if (sdio_irq_claimed(mmc) && host->pins_eint) 3255 pm_runtime_get_noresume(dev); 3256 3257 return pm_runtime_force_suspend(dev); 3258 } 3259 3260 static int __maybe_unused msdc_resume(struct device *dev) 3261 { 3262 struct mmc_host *mmc = dev_get_drvdata(dev); 3263 struct msdc_host *host = mmc_priv(mmc); 3264 3265 if (sdio_irq_claimed(mmc) && host->pins_eint) 3266 pm_runtime_put_noidle(dev); 3267 3268 return pm_runtime_force_resume(dev); 3269 } 3270 3271 static const struct dev_pm_ops msdc_dev_pm_ops = { 3272 SET_SYSTEM_SLEEP_PM_OPS(msdc_suspend, msdc_resume) 3273 SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL) 3274 }; 3275 3276 static struct platform_driver mt_msdc_driver = { 3277 .probe = msdc_drv_probe, 3278 .remove = msdc_drv_remove, 3279 .driver = { 3280 .name = "mtk-msdc", 3281 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 3282 .of_match_table = msdc_of_ids, 3283 .pm = &msdc_dev_pm_ops, 3284 }, 3285 }; 3286 3287 module_platform_driver(mt_msdc_driver); 3288 MODULE_LICENSE("GPL v2"); 3289 MODULE_DESCRIPTION("MediaTek SD/MMC Card Driver"); 3290