1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Driver for Synopsys DesignWare Cores Mobile Storage Host Controller
4 *
5 * Copyright (C) 2018 Synaptics Incorporated
6 *
7 * Author: Jisheng Zhang <jszhang@kernel.org>
8 */
9
10 #include <linux/acpi.h>
11 #include <linux/arm-smccc.h>
12 #include <linux/bitfield.h>
13 #include <linux/clk.h>
14 #include <linux/clk-provider.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_domain.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/regmap.h>
24 #include <linux/reset.h>
25 #include <linux/sizes.h>
26 #include <linux/mfd/syscon.h>
27 #include <linux/units.h>
28
29 #include "sdhci-pltfm.h"
30 #include "cqhci.h"
31 #include "sdhci-cqhci.h"
32
33 #define SDHCI_DWCMSHC_ARG2_STUFF GENMASK(31, 16)
34
35 /* DWCMSHC specific Mode Select value */
36 #define DWCMSHC_CTRL_HS400 0x7
37
38 /* DWC IP vendor area 1 pointer */
39 #define DWCMSHC_P_VENDOR_AREA1 0xe8
40 #define DWCMSHC_AREA1_MASK GENMASK(11, 0)
41 /* Offset inside the vendor area 1 */
42 #define DWCMSHC_HOST_CTRL3 0x8
43 #define DWCMSHC_EMMC_CONTROL 0x2c
44 #define DWCMSHC_CARD_IS_EMMC BIT(0)
45 #define DWCMSHC_ENHANCED_STROBE BIT(8)
46 #define DWCMSHC_EMMC_ATCTRL 0x40
47 #define DWCMSHC_AT_STAT 0x44
48 /* Tuning and auto-tuning fields in AT_CTRL_R control register */
49 #define AT_CTRL_AT_EN BIT(0) /* autotuning is enabled */
50 #define AT_CTRL_CI_SEL BIT(1) /* interval to drive center phase select */
51 #define AT_CTRL_SWIN_TH_EN BIT(2) /* sampling window threshold enable */
52 #define AT_CTRL_RPT_TUNE_ERR BIT(3) /* enable reporting framing errors */
53 #define AT_CTRL_SW_TUNE_EN BIT(4) /* enable software managed tuning */
54 #define AT_CTRL_WIN_EDGE_SEL_MASK GENMASK(11, 8) /* bits [11:8] */
55 #define AT_CTRL_WIN_EDGE_SEL 0xf /* sampling window edge select */
56 #define AT_CTRL_TUNE_CLK_STOP_EN BIT(16) /* clocks stopped during phase code change */
57 #define AT_CTRL_PRE_CHANGE_DLY_MASK GENMASK(18, 17) /* bits [18:17] */
58 #define AT_CTRL_PRE_CHANGE_DLY 0x1 /* 2-cycle latency */
59 #define AT_CTRL_POST_CHANGE_DLY_MASK GENMASK(20, 19) /* bits [20:19] */
60 #define AT_CTRL_POST_CHANGE_DLY 0x3 /* 4-cycle latency */
61 #define AT_CTRL_SWIN_TH_VAL_MASK GENMASK(31, 24) /* bits [31:24] */
62 #define AT_CTRL_SWIN_TH_VAL 0x9 /* sampling window threshold */
63
64 /* DWC IP vendor area 2 pointer */
65 #define DWCMSHC_P_VENDOR_AREA2 0xea
66
67 /* Sophgo CV18XX specific Registers */
68 #define CV18XX_SDHCI_MSHC_CTRL 0x00
69 #define CV18XX_EMMC_FUNC_EN BIT(0)
70 #define CV18XX_LATANCY_1T BIT(1)
71 #define CV18XX_SDHCI_PHY_TX_RX_DLY 0x40
72 #define CV18XX_PHY_TX_DLY_MSK GENMASK(6, 0)
73 #define CV18XX_PHY_TX_SRC_MSK GENMASK(9, 8)
74 #define CV18XX_PHY_TX_SRC_INVERT_CLK_TX 0x1
75 #define CV18XX_PHY_RX_DLY_MSK GENMASK(22, 16)
76 #define CV18XX_PHY_RX_SRC_MSK GENMASK(25, 24)
77 #define CV18XX_PHY_RX_SRC_INVERT_RX_CLK 0x1
78 #define CV18XX_SDHCI_PHY_CONFIG 0x4c
79 #define CV18XX_PHY_TX_BPS BIT(0)
80
81 #define CV18XX_TUNE_MAX 128
82 #define CV18XX_TUNE_STEP 1
83 #define CV18XX_RETRY_TUNING_MAX 50
84
85 /* Rockchip specific Registers */
86 #define DWCMSHC_EMMC_DLL_CTRL 0x800
87 #define DWCMSHC_EMMC_DLL_RXCLK 0x804
88 #define DWCMSHC_EMMC_DLL_TXCLK 0x808
89 #define DWCMSHC_EMMC_DLL_STRBIN 0x80c
90 #define DECMSHC_EMMC_DLL_CMDOUT 0x810
91 #define DECMSHC_EMMC_MISC_CON 0x81C
92 #define MISC_INTCLK_EN BIT(1)
93 #define DWCMSHC_EMMC_DLL_STATUS0 0x840
94 #define DWCMSHC_EMMC_DLL_START BIT(0)
95 #define DWCMSHC_EMMC_DLL_LOCKED BIT(8)
96 #define DWCMSHC_EMMC_DLL_TIMEOUT BIT(9)
97 #define DWCMSHC_EMMC_DLL_RXCLK_SRCSEL 29
98 #define DWCMSHC_EMMC_DLL_START_POINT 16
99 #define DWCMSHC_EMMC_DLL_INC 8
100 #define DWCMSHC_EMMC_DLL_BYPASS BIT(24)
101 #define DWCMSHC_EMMC_DLL_DLYENA BIT(27)
102 #define DLL_TXCLK_TAPNUM_DEFAULT 0x10
103 #define DLL_TXCLK_TAPNUM_90_DEGREES 0xA
104 #define DLL_TXCLK_TAPNUM_FROM_SW BIT(24)
105 #define DLL_STRBIN_TAPNUM_DEFAULT 0x4
106 #define DLL_STRBIN_TAPNUM_FROM_SW BIT(24)
107 #define DLL_STRBIN_DELAY_NUM_SEL BIT(26)
108 #define DLL_STRBIN_DELAY_NUM_OFFSET 16
109 #define DLL_STRBIN_DELAY_NUM_DEFAULT 0x16
110 #define DLL_RXCLK_NO_INVERTER 1
111 #define DLL_RXCLK_INVERTER 0
112 #define DLL_CMDOUT_TAPNUM_90_DEGREES 0x8
113 #define DLL_RXCLK_ORI_GATE BIT(31)
114 #define DLL_CMDOUT_TAPNUM_FROM_SW BIT(24)
115 #define DLL_CMDOUT_SRC_CLK_NEG BIT(28)
116 #define DLL_CMDOUT_EN_SRC_CLK_NEG BIT(29)
117
118 #define DLL_LOCK_WO_TMOUT(x) \
119 ((((x) & DWCMSHC_EMMC_DLL_LOCKED) == DWCMSHC_EMMC_DLL_LOCKED) && \
120 (((x) & DWCMSHC_EMMC_DLL_TIMEOUT) == 0))
121
122 /* PHY register area pointer */
123 #define DWC_MSHC_PTR_PHY_R 0x300
124
125 /* PHY general configuration */
126 #define PHY_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x00)
127 #define PHY_CNFG_RSTN_DEASSERT 0x1 /* Deassert PHY reset */
128 #define PHY_CNFG_PHY_PWRGOOD_MASK BIT_MASK(1) /* bit [1] */
129 #define PHY_CNFG_PAD_SP_MASK GENMASK(19, 16) /* bits [19:16] */
130 #define PHY_CNFG_PAD_SP 0x0c /* PMOS TX drive strength */
131 #define PHY_CNFG_PAD_SP_SG2042 0x09 /* PMOS TX drive strength for SG2042 */
132 #define PHY_CNFG_PAD_SN_MASK GENMASK(23, 20) /* bits [23:20] */
133 #define PHY_CNFG_PAD_SN 0x0c /* NMOS TX drive strength */
134 #define PHY_CNFG_PAD_SN_SG2042 0x08 /* NMOS TX drive strength for SG2042 */
135
136 /* PHY command/response pad settings */
137 #define PHY_CMDPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x04)
138
139 /* PHY data pad settings */
140 #define PHY_DATAPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x06)
141
142 /* PHY clock pad settings */
143 #define PHY_CLKPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x08)
144
145 /* PHY strobe pad settings */
146 #define PHY_STBPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x0a)
147
148 /* PHY reset pad settings */
149 #define PHY_RSTNPAD_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x0c)
150
151 /* Bitfields are common for all pad settings */
152 #define PHY_PAD_RXSEL_1V8 0x1 /* Receiver type select for 1.8V */
153 #define PHY_PAD_RXSEL_3V3 0x2 /* Receiver type select for 3.3V */
154
155 #define PHY_PAD_WEAKPULL_MASK GENMASK(4, 3) /* bits [4:3] */
156 #define PHY_PAD_WEAKPULL_PULLUP 0x1 /* Weak pull up enabled */
157 #define PHY_PAD_WEAKPULL_PULLDOWN 0x2 /* Weak pull down enabled */
158
159 #define PHY_PAD_TXSLEW_CTRL_P_MASK GENMASK(8, 5) /* bits [8:5] */
160 #define PHY_PAD_TXSLEW_CTRL_P 0x3 /* Slew control for P-Type pad TX */
161 #define PHY_PAD_TXSLEW_CTRL_N_MASK GENMASK(12, 9) /* bits [12:9] */
162 #define PHY_PAD_TXSLEW_CTRL_N 0x3 /* Slew control for N-Type pad TX */
163 #define PHY_PAD_TXSLEW_CTRL_N_SG2042 0x2 /* Slew control for N-Type pad TX for SG2042 */
164
165 /* PHY CLK delay line settings */
166 #define PHY_SDCLKDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x1d)
167 #define PHY_SDCLKDL_CNFG_EXTDLY_EN BIT(0)
168 #define PHY_SDCLKDL_CNFG_UPDATE BIT(4) /* set before writing to SDCLKDL_DC */
169
170 /* PHY CLK delay line delay code */
171 #define PHY_SDCLKDL_DC_R (DWC_MSHC_PTR_PHY_R + 0x1e)
172 #define PHY_SDCLKDL_DC_INITIAL 0x40 /* initial delay code */
173 #define PHY_SDCLKDL_DC_DEFAULT 0x32 /* default delay code */
174 #define PHY_SDCLKDL_DC_HS400 0x18 /* delay code for HS400 mode */
175
176 #define PHY_SMPLDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x20)
177 #define PHY_SMPLDL_CNFG_BYPASS_EN BIT(1)
178
179 /* PHY drift_cclk_rx delay line configuration setting */
180 #define PHY_ATDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x21)
181 #define PHY_ATDL_CNFG_INPSEL_MASK GENMASK(3, 2) /* bits [3:2] */
182 #define PHY_ATDL_CNFG_INPSEL 0x3 /* delay line input source */
183 #define PHY_ATDL_CNFG_INPSEL_SG2042 0x2 /* delay line input source for SG2042 */
184
185 /* PHY DLL control settings */
186 #define PHY_DLL_CTRL_R (DWC_MSHC_PTR_PHY_R + 0x24)
187 #define PHY_DLL_CTRL_DISABLE 0x0 /* PHY DLL is enabled */
188 #define PHY_DLL_CTRL_ENABLE 0x1 /* PHY DLL is disabled */
189
190 /* PHY DLL configuration register 1 */
191 #define PHY_DLL_CNFG1_R (DWC_MSHC_PTR_PHY_R + 0x25)
192 #define PHY_DLL_CNFG1_SLVDLY_MASK GENMASK(5, 4) /* bits [5:4] */
193 #define PHY_DLL_CNFG1_SLVDLY 0x2 /* DLL slave update delay input */
194 #define PHY_DLL_CNFG1_WAITCYCLE 0x5 /* DLL wait cycle input */
195
196 /* PHY DLL configuration register 2 */
197 #define PHY_DLL_CNFG2_R (DWC_MSHC_PTR_PHY_R + 0x26)
198 #define PHY_DLL_CNFG2_JUMPSTEP 0xa /* DLL jump step input */
199
200 /* PHY DLL master and slave delay line configuration settings */
201 #define PHY_DLLDL_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x28)
202 #define PHY_DLLDL_CNFG_SLV_INPSEL_MASK GENMASK(6, 5) /* bits [6:5] */
203 #define PHY_DLLDL_CNFG_SLV_INPSEL 0x3 /* clock source select for slave DL */
204
205 /* PHY DLL offset setting register */
206 #define PHY_DLL_OFFST_R (DWC_MSHC_PTR_PHY_R + 0x29)
207 /* DLL LBT setting register */
208 #define PHY_DLLBT_CNFG_R (DWC_MSHC_PTR_PHY_R + 0x2c)
209 /* DLL Status register */
210 #define PHY_DLL_STATUS_R (DWC_MSHC_PTR_PHY_R + 0x2e)
211 #define DLL_LOCK_STS BIT(0)/* DLL is locked and ready */
212 /*
213 * Captures the value of DLL's lock error status information. Value is valid
214 * only when LOCK_STS is set.
215 */
216 #define DLL_ERROR_STS BIT(1)
217
218 #define FLAG_IO_FIXED_1V8 BIT(0)
219
220 #define BOUNDARY_OK(addr, len) \
221 ((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1)))
222
223 #define DWCMSHC_SDHCI_CQE_TRNS_MODE (SDHCI_TRNS_MULTI | \
224 SDHCI_TRNS_BLK_CNT_EN | \
225 SDHCI_TRNS_DMA)
226
227 /* SMC call for BlueField-3 eMMC RST_N */
228 #define BLUEFIELD_SMC_SET_EMMC_RST_N 0x82000007
229
230 /* Eswin specific Registers */
231 #define EIC7700_CARD_CLK_STABLE BIT(28)
232 #define EIC7700_INT_BCLK_STABLE BIT(16)
233 #define EIC7700_INT_ACLK_STABLE BIT(8)
234 #define EIC7700_INT_TMCLK_STABLE BIT(0)
235 #define EIC7700_INT_CLK_STABLE (EIC7700_CARD_CLK_STABLE | \
236 EIC7700_INT_ACLK_STABLE | \
237 EIC7700_INT_BCLK_STABLE | \
238 EIC7700_INT_TMCLK_STABLE)
239 #define EIC7700_HOST_VAL_STABLE BIT(0)
240
241 /* strength definition */
242 #define PHYCTRL_DR_33OHM 0xee
243 #define PHYCTRL_DR_40OHM 0xcc
244 #define PHYCTRL_DR_50OHM 0x88
245 #define PHYCTRL_DR_66OHM 0x44
246 #define PHYCTRL_DR_100OHM 0x00
247
248 #define MAX_PHASE_CODE 0xff
249 #define TUNING_RANGE_THRESHOLD 40
250 #define PHY_CLK_MAX_DELAY_MASK 0x7f
251 #define PHY_DELAY_CODE_MAX 0x7f
252 #define PHY_DELAY_CODE_EMMC 0x17
253 #define PHY_DELAY_CODE_SD 0x55
254
255 enum dwcmshc_rk_type {
256 DWCMSHC_RK3568,
257 DWCMSHC_RK3588,
258 };
259
260 struct rk35xx_priv {
261 struct reset_control *reset;
262 enum dwcmshc_rk_type devtype;
263 u8 txclk_tapnum;
264 };
265
266 struct eic7700_priv {
267 struct reset_control *reset;
268 unsigned int drive_impedance;
269 };
270
271 #define DWCMSHC_MAX_OTHER_CLKS 3
272
273 struct dwcmshc_priv {
274 struct clk *bus_clk;
275 int vendor_specific_area1; /* P_VENDOR_SPECIFIC_AREA1 reg */
276 int vendor_specific_area2; /* P_VENDOR_SPECIFIC_AREA2 reg */
277
278 int num_other_clks;
279 struct clk_bulk_data other_clks[DWCMSHC_MAX_OTHER_CLKS];
280
281 void *priv; /* pointer to SoC private stuff */
282 u16 delay_line;
283 u16 flags;
284 };
285
286 struct dwcmshc_pltfm_data {
287 const struct sdhci_pltfm_data pdata;
288 const struct cqhci_host_ops *cqhci_host_ops;
289 int (*init)(struct device *dev, struct sdhci_host *host, struct dwcmshc_priv *dwc_priv);
290 void (*postinit)(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv);
291 };
292
dwcmshc_enable_card_clk(struct sdhci_host * host)293 static void dwcmshc_enable_card_clk(struct sdhci_host *host)
294 {
295 u16 ctrl;
296
297 ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
298 if ((ctrl & SDHCI_CLOCK_INT_EN) && !(ctrl & SDHCI_CLOCK_CARD_EN)) {
299 ctrl |= SDHCI_CLOCK_CARD_EN;
300 sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL);
301 }
302 }
303
dwcmshc_get_enable_other_clks(struct device * dev,struct dwcmshc_priv * priv,int num_clks,const char * const clk_ids[])304 static int dwcmshc_get_enable_other_clks(struct device *dev,
305 struct dwcmshc_priv *priv,
306 int num_clks,
307 const char * const clk_ids[])
308 {
309 int err;
310
311 if (num_clks > DWCMSHC_MAX_OTHER_CLKS)
312 return -EINVAL;
313
314 for (int i = 0; i < num_clks; i++)
315 priv->other_clks[i].id = clk_ids[i];
316
317 err = devm_clk_bulk_get_optional(dev, num_clks, priv->other_clks);
318 if (err) {
319 dev_err(dev, "failed to get clocks %d\n", err);
320 return err;
321 }
322
323 err = clk_bulk_prepare_enable(num_clks, priv->other_clks);
324 if (err)
325 dev_err(dev, "failed to enable clocks %d\n", err);
326
327 priv->num_other_clks = num_clks;
328
329 return err;
330 }
331
332 /*
333 * If DMA addr spans 128MB boundary, we split the DMA transfer into two
334 * so that each DMA transfer doesn't exceed the boundary.
335 */
dwcmshc_adma_write_desc(struct sdhci_host * host,void ** desc,dma_addr_t addr,int len,unsigned int cmd)336 static void dwcmshc_adma_write_desc(struct sdhci_host *host, void **desc,
337 dma_addr_t addr, int len, unsigned int cmd)
338 {
339 int tmplen, offset;
340
341 if (likely(!len || BOUNDARY_OK(addr, len))) {
342 sdhci_adma_write_desc(host, desc, addr, len, cmd);
343 return;
344 }
345
346 offset = addr & (SZ_128M - 1);
347 tmplen = SZ_128M - offset;
348 sdhci_adma_write_desc(host, desc, addr, tmplen, cmd);
349
350 addr += tmplen;
351 len -= tmplen;
352 sdhci_adma_write_desc(host, desc, addr, len, cmd);
353 }
354
dwcmshc_reset(struct sdhci_host * host,u8 mask)355 static void dwcmshc_reset(struct sdhci_host *host, u8 mask)
356 {
357 sdhci_reset(host, mask);
358
359 /* The dwcmshc does not comply with the SDHCI specification
360 * regarding the "Software Reset for CMD line should clear 'Command
361 * Complete' in the Normal Interrupt Status Register." Clear the bit
362 * here to compensate for this quirk.
363 */
364 if (mask & SDHCI_RESET_CMD)
365 sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
366 }
367
dwcmshc_get_max_clock(struct sdhci_host * host)368 static unsigned int dwcmshc_get_max_clock(struct sdhci_host *host)
369 {
370 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
371
372 if (pltfm_host->clk)
373 return sdhci_pltfm_clk_get_max_clock(host);
374 else
375 return pltfm_host->clock;
376 }
377
rk35xx_get_max_clock(struct sdhci_host * host)378 static unsigned int rk35xx_get_max_clock(struct sdhci_host *host)
379 {
380 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
381
382 return clk_round_rate(pltfm_host->clk, ULONG_MAX);
383 }
384
dwcmshc_check_auto_cmd23(struct mmc_host * mmc,struct mmc_request * mrq)385 static void dwcmshc_check_auto_cmd23(struct mmc_host *mmc,
386 struct mmc_request *mrq)
387 {
388 struct sdhci_host *host = mmc_priv(mmc);
389
390 /*
391 * No matter V4 is enabled or not, ARGUMENT2 register is 32-bit
392 * block count register which doesn't support stuff bits of
393 * CMD23 argument on dwcmsch host controller.
394 */
395 if (mrq->sbc && (mrq->sbc->arg & SDHCI_DWCMSHC_ARG2_STUFF))
396 host->flags &= ~SDHCI_AUTO_CMD23;
397 else
398 host->flags |= SDHCI_AUTO_CMD23;
399 }
400
dwcmshc_request(struct mmc_host * mmc,struct mmc_request * mrq)401 static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq)
402 {
403 dwcmshc_check_auto_cmd23(mmc, mrq);
404
405 sdhci_request(mmc, mrq);
406 }
407
dwcmshc_phy_init(struct sdhci_host * host)408 static void dwcmshc_phy_init(struct sdhci_host *host)
409 {
410 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
411 struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
412 u32 rxsel = PHY_PAD_RXSEL_3V3;
413 u32 val;
414
415 if (priv->flags & FLAG_IO_FIXED_1V8 ||
416 host->mmc->ios.timing & MMC_SIGNAL_VOLTAGE_180)
417 rxsel = PHY_PAD_RXSEL_1V8;
418
419 /* deassert phy reset & set tx drive strength */
420 val = PHY_CNFG_RSTN_DEASSERT;
421 val |= FIELD_PREP(PHY_CNFG_PAD_SP_MASK, PHY_CNFG_PAD_SP);
422 val |= FIELD_PREP(PHY_CNFG_PAD_SN_MASK, PHY_CNFG_PAD_SN);
423 sdhci_writel(host, val, PHY_CNFG_R);
424
425 /* disable delay line */
426 sdhci_writeb(host, PHY_SDCLKDL_CNFG_UPDATE, PHY_SDCLKDL_CNFG_R);
427
428 /* set delay line */
429 sdhci_writeb(host, priv->delay_line, PHY_SDCLKDL_DC_R);
430 sdhci_writeb(host, PHY_DLL_CNFG2_JUMPSTEP, PHY_DLL_CNFG2_R);
431
432 /* enable delay lane */
433 val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R);
434 val &= ~(PHY_SDCLKDL_CNFG_UPDATE);
435 sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R);
436
437 /* configure phy pads */
438 val = rxsel;
439 val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP);
440 val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
441 val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
442 sdhci_writew(host, val, PHY_CMDPAD_CNFG_R);
443 sdhci_writew(host, val, PHY_DATAPAD_CNFG_R);
444 sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R);
445
446 val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
447 val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
448 sdhci_writew(host, val, PHY_CLKPAD_CNFG_R);
449
450 val = rxsel;
451 val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLDOWN);
452 val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
453 val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
454 sdhci_writew(host, val, PHY_STBPAD_CNFG_R);
455
456 /* enable data strobe mode */
457 if (rxsel == PHY_PAD_RXSEL_1V8) {
458 u8 sel = FIELD_PREP(PHY_DLLDL_CNFG_SLV_INPSEL_MASK, PHY_DLLDL_CNFG_SLV_INPSEL);
459
460 sdhci_writeb(host, sel, PHY_DLLDL_CNFG_R);
461 }
462
463 /* enable phy dll */
464 sdhci_writeb(host, PHY_DLL_CTRL_ENABLE, PHY_DLL_CTRL_R);
465
466 }
467
th1520_sdhci_set_phy(struct sdhci_host * host)468 static void th1520_sdhci_set_phy(struct sdhci_host *host)
469 {
470 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
471 struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
472 u32 emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
473 u16 emmc_ctrl;
474
475 dwcmshc_phy_init(host);
476
477 if ((host->mmc->caps2 & emmc_caps) == emmc_caps) {
478 emmc_ctrl = sdhci_readw(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
479 emmc_ctrl |= DWCMSHC_CARD_IS_EMMC;
480 sdhci_writew(host, emmc_ctrl, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
481 }
482
483 sdhci_writeb(host, FIELD_PREP(PHY_DLL_CNFG1_SLVDLY_MASK, PHY_DLL_CNFG1_SLVDLY) |
484 PHY_DLL_CNFG1_WAITCYCLE, PHY_DLL_CNFG1_R);
485 }
486
dwcmshc_set_uhs_signaling(struct sdhci_host * host,unsigned int timing)487 static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
488 unsigned int timing)
489 {
490 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
491 struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
492 u16 ctrl, ctrl_2;
493
494 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
495 /* Select Bus Speed Mode for host */
496 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
497 if ((timing == MMC_TIMING_MMC_HS200) ||
498 (timing == MMC_TIMING_UHS_SDR104))
499 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
500 else if (timing == MMC_TIMING_UHS_SDR12)
501 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
502 else if ((timing == MMC_TIMING_UHS_SDR25) ||
503 (timing == MMC_TIMING_MMC_HS))
504 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
505 else if (timing == MMC_TIMING_UHS_SDR50)
506 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
507 else if ((timing == MMC_TIMING_UHS_DDR50) ||
508 (timing == MMC_TIMING_MMC_DDR52))
509 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
510 else if (timing == MMC_TIMING_MMC_HS400) {
511 /* set CARD_IS_EMMC bit to enable Data Strobe for HS400 */
512 ctrl = sdhci_readw(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
513 ctrl |= DWCMSHC_CARD_IS_EMMC;
514 sdhci_writew(host, ctrl, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
515
516 ctrl_2 |= DWCMSHC_CTRL_HS400;
517 }
518
519 if (priv->flags & FLAG_IO_FIXED_1V8)
520 ctrl_2 |= SDHCI_CTRL_VDD_180;
521 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
522 }
523
th1520_set_uhs_signaling(struct sdhci_host * host,unsigned int timing)524 static void th1520_set_uhs_signaling(struct sdhci_host *host,
525 unsigned int timing)
526 {
527 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
528 struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
529
530 dwcmshc_set_uhs_signaling(host, timing);
531 if (timing == MMC_TIMING_MMC_HS400)
532 priv->delay_line = PHY_SDCLKDL_DC_HS400;
533 else
534 sdhci_writeb(host, 0, PHY_DLLDL_CNFG_R);
535 th1520_sdhci_set_phy(host);
536 }
537
dwcmshc_hs400_enhanced_strobe(struct mmc_host * mmc,struct mmc_ios * ios)538 static void dwcmshc_hs400_enhanced_strobe(struct mmc_host *mmc,
539 struct mmc_ios *ios)
540 {
541 u32 vendor;
542 struct sdhci_host *host = mmc_priv(mmc);
543 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
544 struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
545 int reg = priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL;
546
547 vendor = sdhci_readl(host, reg);
548 if (ios->enhanced_strobe)
549 vendor |= DWCMSHC_ENHANCED_STROBE;
550 else
551 vendor &= ~DWCMSHC_ENHANCED_STROBE;
552
553 sdhci_writel(host, vendor, reg);
554 }
555
dwcmshc_execute_tuning(struct mmc_host * mmc,u32 opcode)556 static int dwcmshc_execute_tuning(struct mmc_host *mmc, u32 opcode)
557 {
558 int err = sdhci_execute_tuning(mmc, opcode);
559 struct sdhci_host *host = mmc_priv(mmc);
560
561 if (err)
562 return err;
563
564 /*
565 * Tuning can leave the IP in an active state (Buffer Read Enable bit
566 * set) which prevents the entry to low power states (i.e. S0i3). Data
567 * reset will clear it.
568 */
569 sdhci_reset(host, SDHCI_RESET_DATA);
570
571 return 0;
572 }
573
dwcmshc_cqe_irq_handler(struct sdhci_host * host,u32 intmask)574 static u32 dwcmshc_cqe_irq_handler(struct sdhci_host *host, u32 intmask)
575 {
576 int cmd_error = 0;
577 int data_error = 0;
578
579 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
580 return intmask;
581
582 cqhci_irq(host->mmc, intmask, cmd_error, data_error);
583
584 return 0;
585 }
586
dwcmshc_sdhci_cqe_enable(struct mmc_host * mmc)587 static void dwcmshc_sdhci_cqe_enable(struct mmc_host *mmc)
588 {
589 struct sdhci_host *host = mmc_priv(mmc);
590 u8 ctrl;
591
592 sdhci_writew(host, DWCMSHC_SDHCI_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
593
594 sdhci_cqe_enable(mmc);
595
596 /*
597 * The "DesignWare Cores Mobile Storage Host Controller
598 * DWC_mshc / DWC_mshc_lite Databook" says:
599 * when Host Version 4 Enable" is 1 in Host Control 2 register,
600 * SDHCI_CTRL_ADMA32 bit means ADMA2 is selected.
601 * Selection of 32-bit/64-bit System Addressing:
602 * either 32-bit or 64-bit system addressing is selected by
603 * 64-bit Addressing bit in Host Control 2 register.
604 *
605 * On the other hand the "DesignWare Cores Mobile Storage Host
606 * Controller DWC_mshc / DWC_mshc_lite User Guide" says, that we have to
607 * set DMA_SEL to ADMA2 _only_ mode in the Host Control 2 register.
608 */
609 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
610 ctrl &= ~SDHCI_CTRL_DMA_MASK;
611 ctrl |= SDHCI_CTRL_ADMA32;
612 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
613 }
614
dwcmshc_set_tran_desc(struct cqhci_host * cq_host,u8 ** desc,dma_addr_t addr,int len,bool end,bool dma64)615 static void dwcmshc_set_tran_desc(struct cqhci_host *cq_host, u8 **desc,
616 dma_addr_t addr, int len, bool end, bool dma64)
617 {
618 int tmplen, offset;
619
620 if (likely(!len || BOUNDARY_OK(addr, len))) {
621 cqhci_set_tran_desc(*desc, addr, len, end, dma64);
622 return;
623 }
624
625 offset = addr & (SZ_128M - 1);
626 tmplen = SZ_128M - offset;
627 cqhci_set_tran_desc(*desc, addr, tmplen, false, dma64);
628
629 addr += tmplen;
630 len -= tmplen;
631 *desc += cq_host->trans_desc_len;
632 cqhci_set_tran_desc(*desc, addr, len, end, dma64);
633 }
634
dwcmshc_cqhci_dumpregs(struct mmc_host * mmc)635 static void dwcmshc_cqhci_dumpregs(struct mmc_host *mmc)
636 {
637 sdhci_dumpregs(mmc_priv(mmc));
638 }
639
rk35xx_sdhci_cqe_pre_enable(struct mmc_host * mmc)640 static void rk35xx_sdhci_cqe_pre_enable(struct mmc_host *mmc)
641 {
642 struct sdhci_host *host = mmc_priv(mmc);
643 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
644 struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
645 u32 reg;
646
647 /* Set Send Status Command Idle Timer to 10.66us (256 * 1 / 24) */
648 reg = sdhci_readl(host, dwc_priv->vendor_specific_area2 + CQHCI_SSC1);
649 reg = (reg & ~CQHCI_SSC1_CIT_MASK) | 0x0100;
650 sdhci_writel(host, reg, dwc_priv->vendor_specific_area2 + CQHCI_SSC1);
651
652 reg = sdhci_readl(host, dwc_priv->vendor_specific_area2 + CQHCI_CFG);
653 reg |= CQHCI_ENABLE;
654 sdhci_writel(host, reg, dwc_priv->vendor_specific_area2 + CQHCI_CFG);
655 }
656
rk35xx_sdhci_cqe_enable(struct mmc_host * mmc)657 static void rk35xx_sdhci_cqe_enable(struct mmc_host *mmc)
658 {
659 struct sdhci_host *host = mmc_priv(mmc);
660 u32 reg;
661
662 reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
663 while (reg & SDHCI_DATA_AVAILABLE) {
664 sdhci_readl(host, SDHCI_BUFFER);
665 reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
666 }
667
668 sdhci_writew(host, DWCMSHC_SDHCI_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
669
670 sdhci_cqe_enable(mmc);
671 }
672
rk35xx_sdhci_cqe_disable(struct mmc_host * mmc,bool recovery)673 static void rk35xx_sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
674 {
675 struct sdhci_host *host = mmc_priv(mmc);
676 unsigned long flags;
677 u32 ctrl;
678
679 /*
680 * During CQE command transfers, command complete bit gets latched.
681 * So s/w should clear command complete interrupt status when CQE is
682 * either halted or disabled. Otherwise unexpected SDCHI legacy
683 * interrupt gets triggered when CQE is halted/disabled.
684 */
685 spin_lock_irqsave(&host->lock, flags);
686 ctrl = sdhci_readl(host, SDHCI_INT_ENABLE);
687 ctrl |= SDHCI_INT_RESPONSE;
688 sdhci_writel(host, ctrl, SDHCI_INT_ENABLE);
689 sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
690 spin_unlock_irqrestore(&host->lock, flags);
691
692 sdhci_cqe_disable(mmc, recovery);
693 }
694
rk35xx_sdhci_cqe_post_disable(struct mmc_host * mmc)695 static void rk35xx_sdhci_cqe_post_disable(struct mmc_host *mmc)
696 {
697 struct sdhci_host *host = mmc_priv(mmc);
698 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
699 struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
700 u32 ctrl;
701
702 ctrl = sdhci_readl(host, dwc_priv->vendor_specific_area2 + CQHCI_CFG);
703 ctrl &= ~CQHCI_ENABLE;
704 sdhci_writel(host, ctrl, dwc_priv->vendor_specific_area2 + CQHCI_CFG);
705 }
706
dwcmshc_rk3568_set_clock(struct sdhci_host * host,unsigned int clock)707 static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock)
708 {
709 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
710 struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
711 struct rk35xx_priv *priv = dwc_priv->priv;
712 u8 txclk_tapnum = DLL_TXCLK_TAPNUM_DEFAULT;
713 u32 extra, reg;
714 int err;
715
716 host->mmc->actual_clock = 0;
717
718 if (clock == 0) {
719 /* Disable interface clock at initial state. */
720 sdhci_set_clock(host, clock);
721 return;
722 }
723
724 /* Rockchip platform only support 375KHz for identify mode */
725 if (clock <= 400000)
726 clock = 375000;
727
728 err = clk_set_rate(pltfm_host->clk, clock);
729 if (err)
730 dev_err(mmc_dev(host->mmc), "fail to set clock %d", clock);
731
732 sdhci_set_clock(host, clock);
733
734 /* Disable cmd conflict check and internal clock gate */
735 reg = dwc_priv->vendor_specific_area1 + DWCMSHC_HOST_CTRL3;
736 extra = sdhci_readl(host, reg);
737 extra &= ~BIT(0);
738 extra |= BIT(4);
739 sdhci_writel(host, extra, reg);
740
741 if (clock <= 52000000) {
742 /*
743 * Disable DLL and reset both of sample and drive clock.
744 * The bypass bit and start bit need to be set if DLL is not locked.
745 */
746 sdhci_writel(host, DWCMSHC_EMMC_DLL_BYPASS | DWCMSHC_EMMC_DLL_START, DWCMSHC_EMMC_DLL_CTRL);
747 sdhci_writel(host, DLL_RXCLK_ORI_GATE, DWCMSHC_EMMC_DLL_RXCLK);
748 sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_TXCLK);
749 sdhci_writel(host, 0, DECMSHC_EMMC_DLL_CMDOUT);
750 /*
751 * Before switching to hs400es mode, the driver will enable
752 * enhanced strobe first. PHY needs to configure the parameters
753 * of enhanced strobe first.
754 */
755 extra = DWCMSHC_EMMC_DLL_DLYENA |
756 DLL_STRBIN_DELAY_NUM_SEL |
757 DLL_STRBIN_DELAY_NUM_DEFAULT << DLL_STRBIN_DELAY_NUM_OFFSET;
758 sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_STRBIN);
759 return;
760 }
761
762 /* Reset DLL */
763 sdhci_writel(host, BIT(1), DWCMSHC_EMMC_DLL_CTRL);
764 udelay(1);
765 sdhci_writel(host, 0x0, DWCMSHC_EMMC_DLL_CTRL);
766
767 /*
768 * We shouldn't set DLL_RXCLK_NO_INVERTER for identify mode but
769 * we must set it in higher speed mode.
770 */
771 extra = DWCMSHC_EMMC_DLL_DLYENA;
772 if (priv->devtype == DWCMSHC_RK3568)
773 extra |= DLL_RXCLK_NO_INVERTER << DWCMSHC_EMMC_DLL_RXCLK_SRCSEL;
774 sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_RXCLK);
775
776 /* Init DLL settings */
777 extra = 0x5 << DWCMSHC_EMMC_DLL_START_POINT |
778 0x2 << DWCMSHC_EMMC_DLL_INC |
779 DWCMSHC_EMMC_DLL_START;
780 sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_CTRL);
781 err = readl_poll_timeout(host->ioaddr + DWCMSHC_EMMC_DLL_STATUS0,
782 extra, DLL_LOCK_WO_TMOUT(extra), 1,
783 500 * USEC_PER_MSEC);
784 if (err) {
785 dev_err(mmc_dev(host->mmc), "DLL lock timeout!\n");
786 return;
787 }
788
789 extra = 0x1 << 16 | /* tune clock stop en */
790 0x3 << 17 | /* pre-change delay */
791 0x3 << 19; /* post-change delay */
792 sdhci_writel(host, extra, dwc_priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
793
794 if (host->mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
795 host->mmc->ios.timing == MMC_TIMING_MMC_HS400)
796 txclk_tapnum = priv->txclk_tapnum;
797
798 if ((priv->devtype == DWCMSHC_RK3588) && host->mmc->ios.timing == MMC_TIMING_MMC_HS400) {
799 txclk_tapnum = DLL_TXCLK_TAPNUM_90_DEGREES;
800
801 extra = DLL_CMDOUT_SRC_CLK_NEG |
802 DLL_CMDOUT_EN_SRC_CLK_NEG |
803 DWCMSHC_EMMC_DLL_DLYENA |
804 DLL_CMDOUT_TAPNUM_90_DEGREES |
805 DLL_CMDOUT_TAPNUM_FROM_SW;
806 sdhci_writel(host, extra, DECMSHC_EMMC_DLL_CMDOUT);
807 }
808
809 extra = DWCMSHC_EMMC_DLL_DLYENA |
810 DLL_TXCLK_TAPNUM_FROM_SW |
811 DLL_RXCLK_NO_INVERTER << DWCMSHC_EMMC_DLL_RXCLK_SRCSEL |
812 txclk_tapnum;
813 sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_TXCLK);
814
815 extra = DWCMSHC_EMMC_DLL_DLYENA |
816 DLL_STRBIN_TAPNUM_DEFAULT |
817 DLL_STRBIN_TAPNUM_FROM_SW;
818 sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_STRBIN);
819 }
820
rk35xx_sdhci_reset(struct sdhci_host * host,u8 mask)821 static void rk35xx_sdhci_reset(struct sdhci_host *host, u8 mask)
822 {
823 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
824 struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
825 struct rk35xx_priv *priv = dwc_priv->priv;
826 u32 extra = sdhci_readl(host, DECMSHC_EMMC_MISC_CON);
827
828 if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL))
829 cqhci_deactivate(host->mmc);
830
831 if (mask & SDHCI_RESET_ALL && priv->reset) {
832 reset_control_assert(priv->reset);
833 udelay(1);
834 reset_control_deassert(priv->reset);
835 }
836
837 sdhci_reset(host, mask);
838
839 /* Enable INTERNAL CLOCK */
840 sdhci_writel(host, MISC_INTCLK_EN | extra, DECMSHC_EMMC_MISC_CON);
841 }
842
dwcmshc_rk35xx_init(struct device * dev,struct sdhci_host * host,struct dwcmshc_priv * dwc_priv)843 static int dwcmshc_rk35xx_init(struct device *dev, struct sdhci_host *host,
844 struct dwcmshc_priv *dwc_priv)
845 {
846 static const char * const clk_ids[] = {"axi", "block", "timer"};
847 struct rk35xx_priv *priv;
848 int err;
849
850 priv = devm_kzalloc(dev, sizeof(struct rk35xx_priv), GFP_KERNEL);
851 if (!priv)
852 return -ENOMEM;
853
854 if (of_device_is_compatible(dev->of_node, "rockchip,rk3588-dwcmshc"))
855 priv->devtype = DWCMSHC_RK3588;
856 else
857 priv->devtype = DWCMSHC_RK3568;
858
859 priv->reset = devm_reset_control_array_get_optional_exclusive(mmc_dev(host->mmc));
860 if (IS_ERR(priv->reset)) {
861 err = PTR_ERR(priv->reset);
862 dev_err(mmc_dev(host->mmc), "failed to get reset control %d\n", err);
863 return err;
864 }
865
866 err = dwcmshc_get_enable_other_clks(mmc_dev(host->mmc), dwc_priv,
867 ARRAY_SIZE(clk_ids), clk_ids);
868 if (err)
869 return err;
870
871 if (of_property_read_u8(mmc_dev(host->mmc)->of_node, "rockchip,txclk-tapnum",
872 &priv->txclk_tapnum))
873 priv->txclk_tapnum = DLL_TXCLK_TAPNUM_DEFAULT;
874
875 /* Disable cmd conflict check */
876 sdhci_writel(host, 0x0, dwc_priv->vendor_specific_area1 + DWCMSHC_HOST_CTRL3);
877 /* Reset previous settings */
878 sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_TXCLK);
879 sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_STRBIN);
880
881 dwc_priv->priv = priv;
882
883 return 0;
884 }
885
dwcmshc_rk35xx_postinit(struct sdhci_host * host,struct dwcmshc_priv * dwc_priv)886 static void dwcmshc_rk35xx_postinit(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
887 {
888 /*
889 * Don't support highspeed bus mode with low clk speed as we
890 * cannot use DLL for this condition.
891 */
892 if (host->mmc->f_max <= 52000000) {
893 dev_info(mmc_dev(host->mmc), "Disabling HS200/HS400, frequency too low (%d)\n",
894 host->mmc->f_max);
895 host->mmc->caps2 &= ~(MMC_CAP2_HS200 | MMC_CAP2_HS400);
896 host->mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR);
897 }
898 }
899
dwcmshc_rk3576_postinit(struct sdhci_host * host,struct dwcmshc_priv * dwc_priv)900 static void dwcmshc_rk3576_postinit(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
901 {
902 struct device *dev = mmc_dev(host->mmc);
903 int ret;
904
905 /*
906 * This works around the design of the RK3576's power domains, which
907 * makes the PD_NVM power domain, which the sdhci controller on the
908 * RK3576 is in, never come back the same way once it's run-time
909 * suspended once. This can happen during early kernel boot if no driver
910 * is using either PD_NVM or its child power domain PD_SDGMAC for a
911 * short moment, leading to it being turned off to save power. By
912 * keeping it on, sdhci suspending won't lead to PD_NVM becoming a
913 * candidate for getting turned off.
914 */
915 ret = dev_pm_genpd_rpm_always_on(dev, true);
916 if (ret && ret != -EOPNOTSUPP)
917 dev_warn(dev, "failed to set PD rpm always on, SoC may hang later: %pe\n",
918 ERR_PTR(ret));
919
920 dwcmshc_rk35xx_postinit(host, dwc_priv);
921 }
922
th1520_execute_tuning(struct sdhci_host * host,u32 opcode)923 static int th1520_execute_tuning(struct sdhci_host *host, u32 opcode)
924 {
925 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
926 struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
927 u32 val = 0;
928
929 if (host->flags & SDHCI_HS400_TUNING)
930 return 0;
931
932 sdhci_writeb(host, FIELD_PREP(PHY_ATDL_CNFG_INPSEL_MASK, PHY_ATDL_CNFG_INPSEL),
933 PHY_ATDL_CNFG_R);
934 val = sdhci_readl(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
935
936 /*
937 * configure tuning settings:
938 * - center phase select code driven in block gap interval
939 * - disable reporting of framing errors
940 * - disable software managed tuning
941 * - disable user selection of sampling window edges,
942 * instead tuning calculated edges are used
943 */
944 val &= ~(AT_CTRL_CI_SEL | AT_CTRL_RPT_TUNE_ERR | AT_CTRL_SW_TUNE_EN |
945 FIELD_PREP(AT_CTRL_WIN_EDGE_SEL_MASK, AT_CTRL_WIN_EDGE_SEL));
946
947 /*
948 * configure tuning settings:
949 * - enable auto-tuning
950 * - enable sampling window threshold
951 * - stop clocks during phase code change
952 * - set max latency in cycles between tx and rx clocks
953 * - set max latency in cycles to switch output phase
954 * - set max sampling window threshold value
955 */
956 val |= AT_CTRL_AT_EN | AT_CTRL_SWIN_TH_EN | AT_CTRL_TUNE_CLK_STOP_EN;
957 val |= FIELD_PREP(AT_CTRL_PRE_CHANGE_DLY_MASK, AT_CTRL_PRE_CHANGE_DLY);
958 val |= FIELD_PREP(AT_CTRL_POST_CHANGE_DLY_MASK, AT_CTRL_POST_CHANGE_DLY);
959 val |= FIELD_PREP(AT_CTRL_SWIN_TH_VAL_MASK, AT_CTRL_SWIN_TH_VAL);
960
961 sdhci_writel(host, val, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
962 val = sdhci_readl(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
963
964 /* perform tuning */
965 sdhci_start_tuning(host);
966 host->tuning_loop_count = 128;
967 host->tuning_err = __sdhci_execute_tuning(host, opcode);
968 if (host->tuning_err) {
969 /* disable auto-tuning upon tuning error */
970 val &= ~AT_CTRL_AT_EN;
971 sdhci_writel(host, val, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
972 dev_err(mmc_dev(host->mmc), "tuning failed: %d\n", host->tuning_err);
973 return -EIO;
974 }
975 sdhci_end_tuning(host);
976
977 return 0;
978 }
979
th1520_sdhci_reset(struct sdhci_host * host,u8 mask)980 static void th1520_sdhci_reset(struct sdhci_host *host, u8 mask)
981 {
982 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
983 struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
984 u16 ctrl_2;
985
986 dwcmshc_reset(host, mask);
987
988 if (priv->flags & FLAG_IO_FIXED_1V8) {
989 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
990 if (!(ctrl_2 & SDHCI_CTRL_VDD_180)) {
991 ctrl_2 |= SDHCI_CTRL_VDD_180;
992 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
993 }
994 }
995 }
996
th1520_init(struct device * dev,struct sdhci_host * host,struct dwcmshc_priv * dwc_priv)997 static int th1520_init(struct device *dev,
998 struct sdhci_host *host,
999 struct dwcmshc_priv *dwc_priv)
1000 {
1001 dwc_priv->delay_line = PHY_SDCLKDL_DC_DEFAULT;
1002
1003 if (device_property_read_bool(dev, "mmc-ddr-1_8v") ||
1004 device_property_read_bool(dev, "mmc-hs200-1_8v") ||
1005 device_property_read_bool(dev, "mmc-hs400-1_8v"))
1006 dwc_priv->flags |= FLAG_IO_FIXED_1V8;
1007 else
1008 dwc_priv->flags &= ~FLAG_IO_FIXED_1V8;
1009
1010 /*
1011 * start_signal_voltage_switch() will try 3.3V first
1012 * then 1.8V. Use SDHCI_SIGNALING_180 rather than
1013 * SDHCI_SIGNALING_330 to avoid setting voltage to 3.3V
1014 * in sdhci_start_signal_voltage_switch().
1015 */
1016 if (dwc_priv->flags & FLAG_IO_FIXED_1V8) {
1017 host->flags &= ~SDHCI_SIGNALING_330;
1018 host->flags |= SDHCI_SIGNALING_180;
1019 }
1020
1021 sdhci_enable_v4_mode(host);
1022
1023 return 0;
1024 }
1025
cv18xx_sdhci_reset(struct sdhci_host * host,u8 mask)1026 static void cv18xx_sdhci_reset(struct sdhci_host *host, u8 mask)
1027 {
1028 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1029 struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
1030 u32 val, emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
1031
1032 dwcmshc_reset(host, mask);
1033
1034 if ((host->mmc->caps2 & emmc_caps) == emmc_caps) {
1035 val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
1036 val |= CV18XX_EMMC_FUNC_EN;
1037 sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
1038 }
1039
1040 val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
1041 val |= CV18XX_LATANCY_1T;
1042 sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
1043
1044 val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_CONFIG);
1045 val |= CV18XX_PHY_TX_BPS;
1046 sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_CONFIG);
1047
1048 val = (FIELD_PREP(CV18XX_PHY_TX_DLY_MSK, 0) |
1049 FIELD_PREP(CV18XX_PHY_TX_SRC_MSK, CV18XX_PHY_TX_SRC_INVERT_CLK_TX) |
1050 FIELD_PREP(CV18XX_PHY_RX_DLY_MSK, 0) |
1051 FIELD_PREP(CV18XX_PHY_RX_SRC_MSK, CV18XX_PHY_RX_SRC_INVERT_RX_CLK));
1052 sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_TX_RX_DLY);
1053 }
1054
cv18xx_sdhci_set_tap(struct sdhci_host * host,int tap)1055 static void cv18xx_sdhci_set_tap(struct sdhci_host *host, int tap)
1056 {
1057 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1058 struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
1059 u16 clk;
1060 u32 val;
1061
1062 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1063 clk &= ~SDHCI_CLOCK_CARD_EN;
1064 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1065
1066 val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
1067 val &= ~CV18XX_LATANCY_1T;
1068 sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
1069
1070 val = (FIELD_PREP(CV18XX_PHY_TX_DLY_MSK, 0) |
1071 FIELD_PREP(CV18XX_PHY_TX_SRC_MSK, CV18XX_PHY_TX_SRC_INVERT_CLK_TX) |
1072 FIELD_PREP(CV18XX_PHY_RX_DLY_MSK, tap));
1073 sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_TX_RX_DLY);
1074
1075 sdhci_writel(host, 0, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_CONFIG);
1076
1077 clk |= SDHCI_CLOCK_CARD_EN;
1078 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1079 usleep_range(1000, 2000);
1080 }
1081
cv18xx_retry_tuning(struct mmc_host * mmc,u32 opcode,int * cmd_error)1082 static int cv18xx_retry_tuning(struct mmc_host *mmc, u32 opcode, int *cmd_error)
1083 {
1084 int ret, retry = 0;
1085
1086 while (retry < CV18XX_RETRY_TUNING_MAX) {
1087 ret = mmc_send_tuning(mmc, opcode, NULL);
1088 if (ret)
1089 return ret;
1090 retry++;
1091 }
1092
1093 return 0;
1094 }
1095
cv18xx_sdhci_post_tuning(struct sdhci_host * host)1096 static void cv18xx_sdhci_post_tuning(struct sdhci_host *host)
1097 {
1098 u32 val;
1099
1100 val = sdhci_readl(host, SDHCI_INT_STATUS);
1101 val |= SDHCI_INT_DATA_AVAIL;
1102 sdhci_writel(host, val, SDHCI_INT_STATUS);
1103
1104 dwcmshc_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1105 }
1106
cv18xx_sdhci_execute_tuning(struct sdhci_host * host,u32 opcode)1107 static int cv18xx_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
1108 {
1109 int min, max, avg, ret;
1110 int win_length, target_min, target_max, target_win_length;
1111
1112 min = max = 0;
1113 target_win_length = 0;
1114
1115 sdhci_reset_tuning(host);
1116
1117 while (max < CV18XX_TUNE_MAX) {
1118 /* find the mininum delay first which can pass tuning */
1119 while (min < CV18XX_TUNE_MAX) {
1120 cv18xx_sdhci_set_tap(host, min);
1121 if (!cv18xx_retry_tuning(host->mmc, opcode, NULL))
1122 break;
1123 min += CV18XX_TUNE_STEP;
1124 }
1125
1126 /* find the maxinum delay which can not pass tuning */
1127 max = min + CV18XX_TUNE_STEP;
1128 while (max < CV18XX_TUNE_MAX) {
1129 cv18xx_sdhci_set_tap(host, max);
1130 if (cv18xx_retry_tuning(host->mmc, opcode, NULL)) {
1131 max -= CV18XX_TUNE_STEP;
1132 break;
1133 }
1134 max += CV18XX_TUNE_STEP;
1135 }
1136
1137 win_length = max - min + 1;
1138 /* get the largest pass window */
1139 if (win_length > target_win_length) {
1140 target_win_length = win_length;
1141 target_min = min;
1142 target_max = max;
1143 }
1144
1145 /* continue to find the next pass window */
1146 min = max + CV18XX_TUNE_STEP;
1147 }
1148
1149 cv18xx_sdhci_post_tuning(host);
1150
1151 /* use average delay to get the best timing */
1152 avg = (target_min + target_max) / 2;
1153 cv18xx_sdhci_set_tap(host, avg);
1154 ret = mmc_send_tuning(host->mmc, opcode, NULL);
1155
1156 dev_dbg(mmc_dev(host->mmc), "tuning %s at 0x%x ret %d\n",
1157 ret ? "failed" : "passed", avg, ret);
1158
1159 return ret;
1160 }
1161
sg2042_sdhci_phy_init(struct sdhci_host * host)1162 static inline void sg2042_sdhci_phy_init(struct sdhci_host *host)
1163 {
1164 u32 val;
1165
1166 /* Asset phy reset & set tx drive strength */
1167 val = sdhci_readl(host, PHY_CNFG_R);
1168 val &= ~PHY_CNFG_RSTN_DEASSERT;
1169 val |= FIELD_PREP(PHY_CNFG_PHY_PWRGOOD_MASK, 1);
1170 val |= FIELD_PREP(PHY_CNFG_PAD_SP_MASK, PHY_CNFG_PAD_SP_SG2042);
1171 val |= FIELD_PREP(PHY_CNFG_PAD_SN_MASK, PHY_CNFG_PAD_SN_SG2042);
1172 sdhci_writel(host, val, PHY_CNFG_R);
1173
1174 /* Configure phy pads */
1175 val = PHY_PAD_RXSEL_3V3;
1176 val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP);
1177 val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
1178 val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
1179 sdhci_writew(host, val, PHY_CMDPAD_CNFG_R);
1180 sdhci_writew(host, val, PHY_DATAPAD_CNFG_R);
1181 sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R);
1182
1183 val = PHY_PAD_RXSEL_3V3;
1184 val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
1185 val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
1186 sdhci_writew(host, val, PHY_CLKPAD_CNFG_R);
1187
1188 val = PHY_PAD_RXSEL_3V3;
1189 val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLDOWN);
1190 val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
1191 val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
1192 sdhci_writew(host, val, PHY_STBPAD_CNFG_R);
1193
1194 /* Configure delay line */
1195 /* Enable fixed delay */
1196 sdhci_writeb(host, PHY_SDCLKDL_CNFG_EXTDLY_EN, PHY_SDCLKDL_CNFG_R);
1197 /*
1198 * Set delay line.
1199 * Its recommended that bit UPDATE_DC[4] is 1 when SDCLKDL_DC is being written.
1200 * Ensure UPDATE_DC[4] is '0' when not updating code.
1201 */
1202 val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R);
1203 val |= PHY_SDCLKDL_CNFG_UPDATE;
1204 sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R);
1205 /* Add 10 * 70ps = 0.7ns for output delay */
1206 sdhci_writeb(host, 10, PHY_SDCLKDL_DC_R);
1207 val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R);
1208 val &= ~(PHY_SDCLKDL_CNFG_UPDATE);
1209 sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R);
1210
1211 /* Set SMPLDL_CNFG, Bypass */
1212 sdhci_writeb(host, PHY_SMPLDL_CNFG_BYPASS_EN, PHY_SMPLDL_CNFG_R);
1213
1214 /* Set ATDL_CNFG, tuning clk not use for init */
1215 val = FIELD_PREP(PHY_ATDL_CNFG_INPSEL_MASK, PHY_ATDL_CNFG_INPSEL_SG2042);
1216 sdhci_writeb(host, val, PHY_ATDL_CNFG_R);
1217
1218 /* Deasset phy reset */
1219 val = sdhci_readl(host, PHY_CNFG_R);
1220 val |= PHY_CNFG_RSTN_DEASSERT;
1221 sdhci_writel(host, val, PHY_CNFG_R);
1222 }
1223
sg2042_sdhci_reset(struct sdhci_host * host,u8 mask)1224 static void sg2042_sdhci_reset(struct sdhci_host *host, u8 mask)
1225 {
1226 sdhci_reset(host, mask);
1227
1228 if (mask & SDHCI_RESET_ALL)
1229 sg2042_sdhci_phy_init(host);
1230 }
1231
sg2042_init(struct device * dev,struct sdhci_host * host,struct dwcmshc_priv * dwc_priv)1232 static int sg2042_init(struct device *dev, struct sdhci_host *host,
1233 struct dwcmshc_priv *dwc_priv)
1234 {
1235 static const char * const clk_ids[] = {"timer"};
1236
1237 return dwcmshc_get_enable_other_clks(mmc_dev(host->mmc), dwc_priv,
1238 ARRAY_SIZE(clk_ids), clk_ids);
1239 }
1240
sdhci_eic7700_set_clock(struct sdhci_host * host,unsigned int clock)1241 static void sdhci_eic7700_set_clock(struct sdhci_host *host, unsigned int clock)
1242 {
1243 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1244 u16 clk;
1245
1246 host->mmc->actual_clock = clock;
1247
1248 if (clock == 0) {
1249 sdhci_set_clock(host, clock);
1250 return;
1251 }
1252
1253 clk_set_rate(pltfm_host->clk, clock);
1254
1255 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1256 clk |= SDHCI_CLOCK_INT_EN;
1257 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1258
1259 dwcmshc_enable_card_clk(host);
1260 }
1261
sdhci_eic7700_config_phy_delay(struct sdhci_host * host,int delay)1262 static void sdhci_eic7700_config_phy_delay(struct sdhci_host *host, int delay)
1263 {
1264 delay &= PHY_CLK_MAX_DELAY_MASK;
1265
1266 /* phy clk delay line config */
1267 sdhci_writeb(host, PHY_SDCLKDL_CNFG_UPDATE, PHY_SDCLKDL_CNFG_R);
1268 sdhci_writeb(host, delay, PHY_SDCLKDL_DC_R);
1269 sdhci_writeb(host, 0x0, PHY_SDCLKDL_CNFG_R);
1270 }
1271
sdhci_eic7700_config_phy(struct sdhci_host * host)1272 static void sdhci_eic7700_config_phy(struct sdhci_host *host)
1273 {
1274 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1275 struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
1276 u32 emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
1277 struct eic7700_priv *priv = dwc_priv->priv;
1278 unsigned int val, drv;
1279
1280 drv = FIELD_PREP(PHY_CNFG_PAD_SP_MASK, priv->drive_impedance & 0xF);
1281 drv |= FIELD_PREP(PHY_CNFG_PAD_SN_MASK, (priv->drive_impedance >> 4) & 0xF);
1282
1283 if ((host->mmc->caps2 & emmc_caps) == emmc_caps) {
1284 val = sdhci_readw(host, dwc_priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
1285 val |= DWCMSHC_CARD_IS_EMMC;
1286 sdhci_writew(host, val, dwc_priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
1287 }
1288
1289 /* reset phy, config phy's pad */
1290 sdhci_writel(host, drv | ~PHY_CNFG_RSTN_DEASSERT, PHY_CNFG_R);
1291
1292 /* configure phy pads */
1293 val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
1294 val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
1295 val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP);
1296 val |= PHY_PAD_RXSEL_1V8;
1297 sdhci_writew(host, val, PHY_CMDPAD_CNFG_R);
1298 sdhci_writew(host, val, PHY_DATAPAD_CNFG_R);
1299 sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R);
1300
1301 /* Clock PAD Setting */
1302 val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
1303 val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
1304 sdhci_writew(host, val, PHY_CLKPAD_CNFG_R);
1305
1306 /* PHY strobe PAD setting (EMMC only) */
1307 if ((host->mmc->caps2 & emmc_caps) == emmc_caps) {
1308 val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
1309 val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
1310 val |= PHY_PAD_RXSEL_1V8;
1311 sdhci_writew(host, val, PHY_STBPAD_CNFG_R);
1312 }
1313 usleep_range(2000, 3000);
1314 sdhci_writel(host, drv | PHY_CNFG_RSTN_DEASSERT, PHY_CNFG_R);
1315 sdhci_eic7700_config_phy_delay(host, dwc_priv->delay_line);
1316 }
1317
sdhci_eic7700_reset(struct sdhci_host * host,u8 mask)1318 static void sdhci_eic7700_reset(struct sdhci_host *host, u8 mask)
1319 {
1320 sdhci_reset(host, mask);
1321
1322 /* after reset all, the phy's config will be clear */
1323 if (mask == SDHCI_RESET_ALL)
1324 sdhci_eic7700_config_phy(host);
1325 }
1326
sdhci_eic7700_reset_init(struct device * dev,struct eic7700_priv * priv)1327 static int sdhci_eic7700_reset_init(struct device *dev, struct eic7700_priv *priv)
1328 {
1329 int ret;
1330
1331 priv->reset = devm_reset_control_array_get_optional_exclusive(dev);
1332 if (IS_ERR(priv->reset)) {
1333 ret = PTR_ERR(priv->reset);
1334 dev_err(dev, "failed to get reset control %d\n", ret);
1335 return ret;
1336 }
1337
1338 ret = reset_control_assert(priv->reset);
1339 if (ret) {
1340 dev_err(dev, "Failed to assert reset signals: %d\n", ret);
1341 return ret;
1342 }
1343 usleep_range(2000, 2100);
1344 ret = reset_control_deassert(priv->reset);
1345 if (ret) {
1346 dev_err(dev, "Failed to deassert reset signals: %d\n", ret);
1347 return ret;
1348 }
1349
1350 return ret;
1351 }
1352
eic7700_convert_drive_impedance_ohm(struct device * dev,unsigned int dr_ohm)1353 static unsigned int eic7700_convert_drive_impedance_ohm(struct device *dev, unsigned int dr_ohm)
1354 {
1355 switch (dr_ohm) {
1356 case 100:
1357 return PHYCTRL_DR_100OHM;
1358 case 66:
1359 return PHYCTRL_DR_66OHM;
1360 case 50:
1361 return PHYCTRL_DR_50OHM;
1362 case 40:
1363 return PHYCTRL_DR_40OHM;
1364 case 33:
1365 return PHYCTRL_DR_33OHM;
1366 }
1367
1368 dev_warn(dev, "Invalid value %u for drive-impedance-ohms.\n", dr_ohm);
1369 return PHYCTRL_DR_50OHM;
1370 }
1371
sdhci_eic7700_delay_tuning(struct sdhci_host * host,u32 opcode)1372 static int sdhci_eic7700_delay_tuning(struct sdhci_host *host, u32 opcode)
1373 {
1374 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1375 struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
1376 int delay_min = -1;
1377 int delay_max = -1;
1378 int cmd_error = 0;
1379 int delay = 0;
1380 int i = 0;
1381 int ret;
1382
1383 for (i = 0; i <= PHY_DELAY_CODE_MAX; i++) {
1384 sdhci_eic7700_config_phy_delay(host, i);
1385 ret = mmc_send_tuning(host->mmc, opcode, &cmd_error);
1386 if (ret) {
1387 host->ops->reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1388 usleep_range(200, 210);
1389 if (delay_min != -1 && delay_max != -1)
1390 break;
1391 } else {
1392 if (delay_min == -1) {
1393 delay_min = i;
1394 continue;
1395 } else {
1396 delay_max = i;
1397 continue;
1398 }
1399 }
1400 }
1401 if (delay_min == -1 && delay_max == -1) {
1402 pr_err("%s: delay code tuning failed!\n", mmc_hostname(host->mmc));
1403 sdhci_eic7700_config_phy_delay(host, dwc_priv->delay_line);
1404 return ret;
1405 }
1406
1407 delay = (delay_min + delay_max) / 2;
1408 sdhci_eic7700_config_phy_delay(host, delay);
1409
1410 return 0;
1411 }
1412
sdhci_eic7700_phase_code_tuning(struct sdhci_host * host,u32 opcode)1413 static int sdhci_eic7700_phase_code_tuning(struct sdhci_host *host, u32 opcode)
1414 {
1415 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1416 struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
1417 u32 sd_caps = MMC_CAP2_NO_MMC | MMC_CAP2_NO_SDIO;
1418 int phase_code = -1;
1419 int code_range = -1;
1420 bool is_sd = false;
1421 int code_min = -1;
1422 int code_max = -1;
1423 int cmd_error = 0;
1424 int ret = 0;
1425 int i = 0;
1426
1427 if ((host->mmc->caps2 & sd_caps) == sd_caps)
1428 is_sd = true;
1429
1430 for (i = 0; i <= MAX_PHASE_CODE; i++) {
1431 /* Centered Phase code */
1432 sdhci_writew(host, i, priv->vendor_specific_area1 + DWCMSHC_AT_STAT);
1433 ret = mmc_send_tuning(host->mmc, opcode, &cmd_error);
1434 host->ops->reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1435
1436 if (ret) {
1437 /* SD specific range tracking */
1438 if (is_sd && code_min != -1 && code_max != -1) {
1439 if (code_max - code_min > code_range) {
1440 code_range = code_max - code_min;
1441 phase_code = (code_min + code_max) / 2;
1442 if (code_range > TUNING_RANGE_THRESHOLD)
1443 break;
1444 }
1445 code_min = -1;
1446 code_max = -1;
1447 }
1448 /* EMMC breaks after first valid range */
1449 if (!is_sd && code_min != -1 && code_max != -1)
1450 break;
1451 } else {
1452 /* Track valid phase code range */
1453 if (code_min == -1) {
1454 code_min = i;
1455 if (!is_sd)
1456 continue;
1457 }
1458 code_max = i;
1459 if (is_sd && i == MAX_PHASE_CODE) {
1460 if (code_max - code_min > code_range) {
1461 code_range = code_max - code_min;
1462 phase_code = (code_min + code_max) / 2;
1463 }
1464 }
1465 }
1466 }
1467
1468 /* Handle tuning failure case */
1469 if ((is_sd && phase_code == -1) ||
1470 (!is_sd && code_min == -1 && code_max == -1)) {
1471 pr_err("%s: phase code tuning failed!\n", mmc_hostname(host->mmc));
1472 sdhci_writew(host, 0, priv->vendor_specific_area1 + DWCMSHC_AT_STAT);
1473 return -EIO;
1474 }
1475 if (!is_sd)
1476 phase_code = (code_min + code_max) / 2;
1477
1478 sdhci_writew(host, phase_code, priv->vendor_specific_area1 + DWCMSHC_AT_STAT);
1479
1480 /* SD specific final verification */
1481 if (is_sd) {
1482 ret = mmc_send_tuning(host->mmc, opcode, &cmd_error);
1483 host->ops->reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1484 if (ret) {
1485 pr_err("%s: Final phase code 0x%x verification failed!\n",
1486 mmc_hostname(host->mmc), phase_code);
1487 return ret;
1488 }
1489 }
1490
1491 return 0;
1492 }
1493
sdhci_eic7700_executing_tuning(struct sdhci_host * host,u32 opcode)1494 static int sdhci_eic7700_executing_tuning(struct sdhci_host *host, u32 opcode)
1495 {
1496 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1497 struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
1498 u32 emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
1499 int ret = 0;
1500 u16 ctrl;
1501 u32 val;
1502
1503 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1504 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
1505 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1506
1507 val = sdhci_readl(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
1508 val |= AT_CTRL_SW_TUNE_EN;
1509 sdhci_writew(host, val, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
1510
1511 sdhci_writew(host, 0, priv->vendor_specific_area1 + DWCMSHC_AT_STAT);
1512 sdhci_writew(host, 0x0, SDHCI_CMD_DATA);
1513
1514 if ((host->mmc->caps2 & emmc_caps) == emmc_caps) {
1515 ret = sdhci_eic7700_delay_tuning(host, opcode);
1516 if (ret)
1517 return ret;
1518 }
1519
1520 ret = sdhci_eic7700_phase_code_tuning(host, opcode);
1521 if (ret)
1522 return ret;
1523
1524 return 0;
1525 }
1526
sdhci_eic7700_set_uhs_signaling(struct sdhci_host * host,unsigned int timing)1527 static void sdhci_eic7700_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
1528 {
1529 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1530 struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
1531 u8 status;
1532 u32 val;
1533 int ret;
1534
1535 dwcmshc_set_uhs_signaling(host, timing);
1536
1537 /* here need make dll locked when in hs400 at 200MHz */
1538 if (timing == MMC_TIMING_MMC_HS400 && host->clock == 200000000) {
1539 val = sdhci_readl(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
1540 val &= ~(FIELD_PREP(AT_CTRL_POST_CHANGE_DLY_MASK, AT_CTRL_POST_CHANGE_DLY));
1541 /* 2-cycle latency */
1542 val |= FIELD_PREP(AT_CTRL_POST_CHANGE_DLY_MASK, 0x2);
1543 sdhci_writew(host, val, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
1544
1545 sdhci_writeb(host, FIELD_PREP(PHY_DLL_CNFG1_SLVDLY_MASK, PHY_DLL_CNFG1_SLVDLY) |
1546 0x3, PHY_DLL_CNFG1_R);/* DLL wait cycle input */
1547 /* DLL jump step input */
1548 sdhci_writeb(host, 0x02, PHY_DLL_CNFG2_R);
1549 sdhci_writeb(host, FIELD_PREP(PHY_DLLDL_CNFG_SLV_INPSEL_MASK,
1550 PHY_DLLDL_CNFG_SLV_INPSEL), PHY_DLLDL_CNFG_R);
1551 /* Sets the value of DLL's offset input */
1552 sdhci_writeb(host, 0x00, PHY_DLL_OFFST_R);
1553 /*
1554 * Sets the value of DLL's olbt loadval input. Controls the Ibt
1555 * timer's timeout value at which DLL runs a revalidation cycle.
1556 */
1557 sdhci_writew(host, 0xffff, PHY_DLLBT_CNFG_R);
1558 sdhci_writeb(host, PHY_DLL_CTRL_ENABLE, PHY_DLL_CTRL_R);
1559 usleep_range(100, 110);
1560
1561 ret = read_poll_timeout(sdhci_readb, status, status & DLL_LOCK_STS, 100, 1000000,
1562 false, host, PHY_DLL_STATUS_R);
1563 if (ret) {
1564 pr_err("%s: DLL lock timeout! status: 0x%x\n",
1565 mmc_hostname(host->mmc), status);
1566 return;
1567 }
1568
1569 status = sdhci_readb(host, PHY_DLL_STATUS_R);
1570 if (status & DLL_ERROR_STS) {
1571 pr_err("%s: DLL lock failed!err_status:0x%x\n",
1572 mmc_hostname(host->mmc), status);
1573 }
1574 }
1575 }
1576
sdhci_eic7700_set_uhs_wrapper(struct sdhci_host * host,unsigned int timing)1577 static void sdhci_eic7700_set_uhs_wrapper(struct sdhci_host *host, unsigned int timing)
1578 {
1579 u32 sd_caps = MMC_CAP2_NO_MMC | MMC_CAP2_NO_SDIO;
1580
1581 if ((host->mmc->caps2 & sd_caps) == sd_caps)
1582 sdhci_set_uhs_signaling(host, timing);
1583 else
1584 sdhci_eic7700_set_uhs_signaling(host, timing);
1585 }
1586
eic7700_init(struct device * dev,struct sdhci_host * host,struct dwcmshc_priv * dwc_priv)1587 static int eic7700_init(struct device *dev, struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
1588 {
1589 u32 emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
1590 unsigned int val, hsp_int_status, hsp_pwr_ctrl;
1591 struct of_phandle_args args;
1592 struct eic7700_priv *priv;
1593 struct regmap *hsp_regmap;
1594 int ret;
1595
1596 priv = devm_kzalloc(dev, sizeof(struct eic7700_priv), GFP_KERNEL);
1597 if (!priv)
1598 return -ENOMEM;
1599
1600 dwc_priv->priv = priv;
1601
1602 ret = sdhci_eic7700_reset_init(dev, dwc_priv->priv);
1603 if (ret) {
1604 dev_err(dev, "failed to reset\n");
1605 return ret;
1606 }
1607
1608 ret = of_parse_phandle_with_fixed_args(dev->of_node, "eswin,hsp-sp-csr", 2, 0, &args);
1609 if (ret) {
1610 dev_err(dev, "Fail to parse 'eswin,hsp-sp-csr' phandle (%d)\n", ret);
1611 return ret;
1612 }
1613
1614 hsp_regmap = syscon_node_to_regmap(args.np);
1615 if (IS_ERR(hsp_regmap)) {
1616 dev_err(dev, "Failed to get regmap for 'eswin,hsp-sp-csr'\n");
1617 of_node_put(args.np);
1618 return PTR_ERR(hsp_regmap);
1619 }
1620 hsp_int_status = args.args[0];
1621 hsp_pwr_ctrl = args.args[1];
1622 of_node_put(args.np);
1623 /*
1624 * Assert clock stability: write EIC7700_INT_CLK_STABLE to hsp_int_status.
1625 * This signals to the eMMC controller that platform clocks (card, ACLK,
1626 * BCLK, TMCLK) are enabled and stable.
1627 */
1628 regmap_write(hsp_regmap, hsp_int_status, EIC7700_INT_CLK_STABLE);
1629 /*
1630 * Assert voltage stability: write EIC7700_HOST_VAL_STABLE to hsp_pwr_ctrl.
1631 * This signals that VDD is stable and permits transition to high-speed
1632 * modes (e.g., UHS-I).
1633 */
1634 regmap_write(hsp_regmap, hsp_pwr_ctrl, EIC7700_HOST_VAL_STABLE);
1635
1636 if ((host->mmc->caps2 & emmc_caps) == emmc_caps)
1637 dwc_priv->delay_line = PHY_DELAY_CODE_EMMC;
1638 else
1639 dwc_priv->delay_line = PHY_DELAY_CODE_SD;
1640
1641 if (!of_property_read_u32(dev->of_node, "eswin,drive-impedance-ohms", &val))
1642 priv->drive_impedance = eic7700_convert_drive_impedance_ohm(dev, val);
1643 return 0;
1644 }
1645
1646 static const struct sdhci_ops sdhci_dwcmshc_ops = {
1647 .set_clock = sdhci_set_clock,
1648 .set_bus_width = sdhci_set_bus_width,
1649 .set_uhs_signaling = dwcmshc_set_uhs_signaling,
1650 .get_max_clock = dwcmshc_get_max_clock,
1651 .reset = dwcmshc_reset,
1652 .adma_write_desc = dwcmshc_adma_write_desc,
1653 .irq = dwcmshc_cqe_irq_handler,
1654 };
1655
1656 #ifdef CONFIG_ACPI
dwcmshc_bf3_hw_reset(struct sdhci_host * host)1657 static void dwcmshc_bf3_hw_reset(struct sdhci_host *host)
1658 {
1659 struct arm_smccc_res res = { 0 };
1660
1661 arm_smccc_smc(BLUEFIELD_SMC_SET_EMMC_RST_N, 0, 0, 0, 0, 0, 0, 0, &res);
1662
1663 if (res.a0)
1664 pr_err("%s: RST_N failed.\n", mmc_hostname(host->mmc));
1665 }
1666
1667 static const struct sdhci_ops sdhci_dwcmshc_bf3_ops = {
1668 .set_clock = sdhci_set_clock,
1669 .set_bus_width = sdhci_set_bus_width,
1670 .set_uhs_signaling = dwcmshc_set_uhs_signaling,
1671 .get_max_clock = dwcmshc_get_max_clock,
1672 .reset = sdhci_reset,
1673 .adma_write_desc = dwcmshc_adma_write_desc,
1674 .irq = dwcmshc_cqe_irq_handler,
1675 .hw_reset = dwcmshc_bf3_hw_reset,
1676 };
1677 #endif
1678
1679 static const struct sdhci_ops sdhci_dwcmshc_rk35xx_ops = {
1680 .set_clock = dwcmshc_rk3568_set_clock,
1681 .set_bus_width = sdhci_set_bus_width,
1682 .set_uhs_signaling = dwcmshc_set_uhs_signaling,
1683 .get_max_clock = rk35xx_get_max_clock,
1684 .reset = rk35xx_sdhci_reset,
1685 .adma_write_desc = dwcmshc_adma_write_desc,
1686 .irq = dwcmshc_cqe_irq_handler,
1687 };
1688
1689 static const struct sdhci_ops sdhci_dwcmshc_th1520_ops = {
1690 .set_clock = sdhci_set_clock,
1691 .set_bus_width = sdhci_set_bus_width,
1692 .set_uhs_signaling = th1520_set_uhs_signaling,
1693 .get_max_clock = dwcmshc_get_max_clock,
1694 .reset = th1520_sdhci_reset,
1695 .adma_write_desc = dwcmshc_adma_write_desc,
1696 .voltage_switch = dwcmshc_phy_init,
1697 .platform_execute_tuning = th1520_execute_tuning,
1698 };
1699
1700 static const struct sdhci_ops sdhci_dwcmshc_cv18xx_ops = {
1701 .set_clock = sdhci_set_clock,
1702 .set_bus_width = sdhci_set_bus_width,
1703 .set_uhs_signaling = dwcmshc_set_uhs_signaling,
1704 .get_max_clock = dwcmshc_get_max_clock,
1705 .reset = cv18xx_sdhci_reset,
1706 .adma_write_desc = dwcmshc_adma_write_desc,
1707 .platform_execute_tuning = cv18xx_sdhci_execute_tuning,
1708 };
1709
1710 static const struct sdhci_ops sdhci_dwcmshc_sg2042_ops = {
1711 .set_clock = sdhci_set_clock,
1712 .set_bus_width = sdhci_set_bus_width,
1713 .set_uhs_signaling = dwcmshc_set_uhs_signaling,
1714 .get_max_clock = dwcmshc_get_max_clock,
1715 .reset = sg2042_sdhci_reset,
1716 .adma_write_desc = dwcmshc_adma_write_desc,
1717 .platform_execute_tuning = th1520_execute_tuning,
1718 };
1719
1720 static const struct sdhci_ops sdhci_dwcmshc_eic7700_ops = {
1721 .set_clock = sdhci_eic7700_set_clock,
1722 .get_max_clock = sdhci_pltfm_clk_get_max_clock,
1723 .get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
1724 .set_bus_width = sdhci_set_bus_width,
1725 .reset = sdhci_eic7700_reset,
1726 .set_uhs_signaling = sdhci_eic7700_set_uhs_wrapper,
1727 .set_power = sdhci_set_power_and_bus_voltage,
1728 .irq = dwcmshc_cqe_irq_handler,
1729 .platform_execute_tuning = sdhci_eic7700_executing_tuning,
1730 };
1731
1732 static const struct dwcmshc_pltfm_data sdhci_dwcmshc_pdata = {
1733 .pdata = {
1734 .ops = &sdhci_dwcmshc_ops,
1735 .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1736 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1737 },
1738 };
1739
1740 #ifdef CONFIG_ACPI
1741 static const struct dwcmshc_pltfm_data sdhci_dwcmshc_bf3_pdata = {
1742 .pdata = {
1743 .ops = &sdhci_dwcmshc_bf3_ops,
1744 .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1745 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1746 SDHCI_QUIRK2_ACMD23_BROKEN,
1747 },
1748 };
1749 #endif
1750
1751 static const struct cqhci_host_ops rk35xx_cqhci_ops = {
1752 .pre_enable = rk35xx_sdhci_cqe_pre_enable,
1753 .enable = rk35xx_sdhci_cqe_enable,
1754 .disable = rk35xx_sdhci_cqe_disable,
1755 .post_disable = rk35xx_sdhci_cqe_post_disable,
1756 .dumpregs = dwcmshc_cqhci_dumpregs,
1757 .set_tran_desc = dwcmshc_set_tran_desc,
1758 };
1759
1760 static const struct dwcmshc_pltfm_data sdhci_dwcmshc_rk35xx_pdata = {
1761 .pdata = {
1762 .ops = &sdhci_dwcmshc_rk35xx_ops,
1763 .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
1764 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
1765 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1766 SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
1767 },
1768 .cqhci_host_ops = &rk35xx_cqhci_ops,
1769 .init = dwcmshc_rk35xx_init,
1770 .postinit = dwcmshc_rk35xx_postinit,
1771 };
1772
1773 static const struct dwcmshc_pltfm_data sdhci_dwcmshc_rk3576_pdata = {
1774 .pdata = {
1775 .ops = &sdhci_dwcmshc_rk35xx_ops,
1776 .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
1777 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
1778 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1779 SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
1780 },
1781 .cqhci_host_ops = &rk35xx_cqhci_ops,
1782 .init = dwcmshc_rk35xx_init,
1783 .postinit = dwcmshc_rk3576_postinit,
1784 };
1785
1786 static const struct dwcmshc_pltfm_data sdhci_dwcmshc_th1520_pdata = {
1787 .pdata = {
1788 .ops = &sdhci_dwcmshc_th1520_ops,
1789 .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1790 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1791 },
1792 .init = th1520_init,
1793 };
1794
1795 static const struct dwcmshc_pltfm_data sdhci_dwcmshc_cv18xx_pdata = {
1796 .pdata = {
1797 .ops = &sdhci_dwcmshc_cv18xx_ops,
1798 .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1799 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1800 },
1801 };
1802
1803 static const struct dwcmshc_pltfm_data sdhci_dwcmshc_sg2042_pdata = {
1804 .pdata = {
1805 .ops = &sdhci_dwcmshc_sg2042_ops,
1806 .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1807 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1808 },
1809 .init = sg2042_init,
1810 };
1811
1812 static const struct dwcmshc_pltfm_data sdhci_dwcmshc_eic7700_pdata = {
1813 .pdata = {
1814 .ops = &sdhci_dwcmshc_eic7700_ops,
1815 .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
1816 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
1817 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1818 SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
1819 },
1820 .init = eic7700_init,
1821 };
1822
1823 static const struct cqhci_host_ops dwcmshc_cqhci_ops = {
1824 .enable = dwcmshc_sdhci_cqe_enable,
1825 .disable = sdhci_cqe_disable,
1826 .dumpregs = dwcmshc_cqhci_dumpregs,
1827 .set_tran_desc = dwcmshc_set_tran_desc,
1828 };
1829
dwcmshc_cqhci_init(struct sdhci_host * host,struct platform_device * pdev,const struct dwcmshc_pltfm_data * pltfm_data)1830 static void dwcmshc_cqhci_init(struct sdhci_host *host, struct platform_device *pdev,
1831 const struct dwcmshc_pltfm_data *pltfm_data)
1832 {
1833 struct cqhci_host *cq_host;
1834 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1835 struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
1836 bool dma64 = false;
1837 u16 clk;
1838 int err;
1839
1840 host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1841 cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
1842 if (!cq_host) {
1843 dev_err(mmc_dev(host->mmc), "Unable to setup CQE: not enough memory\n");
1844 goto dsbl_cqe_caps;
1845 }
1846
1847 /*
1848 * For dwcmshc host controller we have to enable internal clock
1849 * before access to some registers from Vendor Specific Area 2.
1850 */
1851 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1852 clk |= SDHCI_CLOCK_INT_EN;
1853 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1854 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1855 if (!(clk & SDHCI_CLOCK_INT_EN)) {
1856 dev_err(mmc_dev(host->mmc), "Unable to setup CQE: internal clock enable error\n");
1857 goto free_cq_host;
1858 }
1859
1860 cq_host->mmio = host->ioaddr + priv->vendor_specific_area2;
1861 if (pltfm_data->cqhci_host_ops)
1862 cq_host->ops = pltfm_data->cqhci_host_ops;
1863 else
1864 cq_host->ops = &dwcmshc_cqhci_ops;
1865
1866 /* Enable using of 128-bit task descriptors */
1867 dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1868 if (dma64) {
1869 dev_dbg(mmc_dev(host->mmc), "128-bit task descriptors\n");
1870 cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
1871 }
1872 err = cqhci_init(cq_host, host->mmc, dma64);
1873 if (err) {
1874 dev_err(mmc_dev(host->mmc), "Unable to setup CQE: error %d\n", err);
1875 goto int_clock_disable;
1876 }
1877
1878 dev_dbg(mmc_dev(host->mmc), "CQE init done\n");
1879
1880 return;
1881
1882 int_clock_disable:
1883 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1884 clk &= ~SDHCI_CLOCK_INT_EN;
1885 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1886
1887 free_cq_host:
1888 devm_kfree(&pdev->dev, cq_host);
1889
1890 dsbl_cqe_caps:
1891 host->mmc->caps2 &= ~(MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD);
1892 }
1893
1894 static const struct of_device_id sdhci_dwcmshc_dt_ids[] = {
1895 {
1896 .compatible = "rockchip,rk3588-dwcmshc",
1897 .data = &sdhci_dwcmshc_rk35xx_pdata,
1898 },
1899 {
1900 .compatible = "rockchip,rk3576-dwcmshc",
1901 .data = &sdhci_dwcmshc_rk3576_pdata,
1902 },
1903 {
1904 .compatible = "rockchip,rk3568-dwcmshc",
1905 .data = &sdhci_dwcmshc_rk35xx_pdata,
1906 },
1907 {
1908 .compatible = "snps,dwcmshc-sdhci",
1909 .data = &sdhci_dwcmshc_pdata,
1910 },
1911 {
1912 .compatible = "sophgo,cv1800b-dwcmshc",
1913 .data = &sdhci_dwcmshc_cv18xx_pdata,
1914 },
1915 {
1916 .compatible = "sophgo,sg2002-dwcmshc",
1917 .data = &sdhci_dwcmshc_cv18xx_pdata,
1918 },
1919 {
1920 .compatible = "thead,th1520-dwcmshc",
1921 .data = &sdhci_dwcmshc_th1520_pdata,
1922 },
1923 {
1924 .compatible = "sophgo,sg2042-dwcmshc",
1925 .data = &sdhci_dwcmshc_sg2042_pdata,
1926 },
1927 {
1928 .compatible = "eswin,eic7700-dwcmshc",
1929 .data = &sdhci_dwcmshc_eic7700_pdata,
1930 },
1931 {},
1932 };
1933 MODULE_DEVICE_TABLE(of, sdhci_dwcmshc_dt_ids);
1934
1935 #ifdef CONFIG_ACPI
1936 static const struct acpi_device_id sdhci_dwcmshc_acpi_ids[] = {
1937 {
1938 .id = "MLNXBF30",
1939 .driver_data = (kernel_ulong_t)&sdhci_dwcmshc_bf3_pdata,
1940 },
1941 {}
1942 };
1943 MODULE_DEVICE_TABLE(acpi, sdhci_dwcmshc_acpi_ids);
1944 #endif
1945
dwcmshc_probe(struct platform_device * pdev)1946 static int dwcmshc_probe(struct platform_device *pdev)
1947 {
1948 struct device *dev = &pdev->dev;
1949 struct sdhci_pltfm_host *pltfm_host;
1950 struct sdhci_host *host;
1951 struct dwcmshc_priv *priv;
1952 const struct dwcmshc_pltfm_data *pltfm_data;
1953 int err;
1954 u32 extra, caps;
1955
1956 pltfm_data = device_get_match_data(&pdev->dev);
1957 if (!pltfm_data) {
1958 dev_err(&pdev->dev, "Error: No device match data found\n");
1959 return -ENODEV;
1960 }
1961
1962 host = sdhci_pltfm_init(pdev, &pltfm_data->pdata,
1963 sizeof(struct dwcmshc_priv));
1964 if (IS_ERR(host))
1965 return PTR_ERR(host);
1966
1967 /*
1968 * extra adma table cnt for cross 128M boundary handling.
1969 */
1970 extra = DIV_ROUND_UP_ULL(dma_get_required_mask(dev), SZ_128M);
1971 if (extra > SDHCI_MAX_SEGS)
1972 extra = SDHCI_MAX_SEGS;
1973 host->adma_table_cnt += extra;
1974
1975 pltfm_host = sdhci_priv(host);
1976 priv = sdhci_pltfm_priv(pltfm_host);
1977
1978 if (dev->of_node) {
1979 pltfm_host->clk = devm_clk_get(dev, "core");
1980 if (IS_ERR(pltfm_host->clk))
1981 return dev_err_probe(dev, PTR_ERR(pltfm_host->clk),
1982 "failed to get core clk\n");
1983
1984 err = clk_prepare_enable(pltfm_host->clk);
1985 if (err)
1986 return err;
1987
1988 priv->bus_clk = devm_clk_get(dev, "bus");
1989 if (!IS_ERR(priv->bus_clk))
1990 clk_prepare_enable(priv->bus_clk);
1991 }
1992
1993 err = mmc_of_parse(host->mmc);
1994 if (err)
1995 goto err_clk;
1996
1997 sdhci_get_of_property(pdev);
1998
1999 priv->vendor_specific_area1 =
2000 sdhci_readl(host, DWCMSHC_P_VENDOR_AREA1) & DWCMSHC_AREA1_MASK;
2001
2002 host->mmc_host_ops.request = dwcmshc_request;
2003 host->mmc_host_ops.hs400_enhanced_strobe = dwcmshc_hs400_enhanced_strobe;
2004 host->mmc_host_ops.execute_tuning = dwcmshc_execute_tuning;
2005
2006 if (pltfm_data->init) {
2007 err = pltfm_data->init(&pdev->dev, host, priv);
2008 if (err)
2009 goto err_clk;
2010 }
2011
2012 #ifdef CONFIG_ACPI
2013 if (pltfm_data == &sdhci_dwcmshc_bf3_pdata)
2014 sdhci_enable_v4_mode(host);
2015 #endif
2016
2017 caps = sdhci_readl(host, SDHCI_CAPABILITIES);
2018 if (caps & SDHCI_CAN_64BIT_V4)
2019 sdhci_enable_v4_mode(host);
2020
2021 host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
2022
2023 pm_runtime_get_noresume(dev);
2024 pm_runtime_set_active(dev);
2025 pm_runtime_enable(dev);
2026
2027 err = sdhci_setup_host(host);
2028 if (err)
2029 goto err_rpm;
2030
2031 /* Setup Command Queue Engine if enabled */
2032 if (device_property_read_bool(&pdev->dev, "supports-cqe")) {
2033 priv->vendor_specific_area2 =
2034 sdhci_readw(host, DWCMSHC_P_VENDOR_AREA2);
2035
2036 dwcmshc_cqhci_init(host, pdev, pltfm_data);
2037 }
2038
2039 if (pltfm_data->postinit)
2040 pltfm_data->postinit(host, priv);
2041
2042 err = __sdhci_add_host(host);
2043 if (err)
2044 goto err_setup_host;
2045
2046 pm_runtime_put(dev);
2047
2048 return 0;
2049
2050 err_setup_host:
2051 sdhci_cleanup_host(host);
2052 err_rpm:
2053 pm_runtime_disable(dev);
2054 pm_runtime_put_noidle(dev);
2055 err_clk:
2056 clk_disable_unprepare(pltfm_host->clk);
2057 clk_disable_unprepare(priv->bus_clk);
2058 clk_bulk_disable_unprepare(priv->num_other_clks, priv->other_clks);
2059 return err;
2060 }
2061
dwcmshc_disable_card_clk(struct sdhci_host * host)2062 static void dwcmshc_disable_card_clk(struct sdhci_host *host)
2063 {
2064 u16 ctrl;
2065
2066 ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2067 if (ctrl & SDHCI_CLOCK_CARD_EN) {
2068 ctrl &= ~SDHCI_CLOCK_CARD_EN;
2069 sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL);
2070 }
2071 }
2072
dwcmshc_remove(struct platform_device * pdev)2073 static void dwcmshc_remove(struct platform_device *pdev)
2074 {
2075 struct sdhci_host *host = platform_get_drvdata(pdev);
2076 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2077 struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
2078
2079 pm_runtime_get_sync(&pdev->dev);
2080 pm_runtime_disable(&pdev->dev);
2081 pm_runtime_put_noidle(&pdev->dev);
2082
2083 sdhci_remove_host(host, 0);
2084
2085 dwcmshc_disable_card_clk(host);
2086
2087 clk_disable_unprepare(pltfm_host->clk);
2088 clk_disable_unprepare(priv->bus_clk);
2089 clk_bulk_disable_unprepare(priv->num_other_clks, priv->other_clks);
2090 }
2091
dwcmshc_suspend(struct device * dev)2092 static int dwcmshc_suspend(struct device *dev)
2093 {
2094 struct sdhci_host *host = dev_get_drvdata(dev);
2095 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2096 struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
2097 int ret;
2098
2099 pm_runtime_resume(dev);
2100
2101 if (host->mmc->caps2 & MMC_CAP2_CQE) {
2102 ret = cqhci_suspend(host->mmc);
2103 if (ret)
2104 return ret;
2105 }
2106
2107 ret = sdhci_suspend_host(host);
2108 if (ret)
2109 return ret;
2110
2111 clk_disable_unprepare(pltfm_host->clk);
2112 if (!IS_ERR(priv->bus_clk))
2113 clk_disable_unprepare(priv->bus_clk);
2114
2115 clk_bulk_disable_unprepare(priv->num_other_clks, priv->other_clks);
2116
2117 return ret;
2118 }
2119
dwcmshc_resume(struct device * dev)2120 static int dwcmshc_resume(struct device *dev)
2121 {
2122 struct sdhci_host *host = dev_get_drvdata(dev);
2123 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2124 struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
2125 int ret;
2126
2127 ret = clk_prepare_enable(pltfm_host->clk);
2128 if (ret)
2129 return ret;
2130
2131 if (!IS_ERR(priv->bus_clk)) {
2132 ret = clk_prepare_enable(priv->bus_clk);
2133 if (ret)
2134 goto disable_clk;
2135 }
2136
2137 ret = clk_bulk_prepare_enable(priv->num_other_clks, priv->other_clks);
2138 if (ret)
2139 goto disable_bus_clk;
2140
2141 ret = sdhci_resume_host(host);
2142 if (ret)
2143 goto disable_other_clks;
2144
2145 if (host->mmc->caps2 & MMC_CAP2_CQE) {
2146 ret = cqhci_resume(host->mmc);
2147 if (ret)
2148 goto disable_other_clks;
2149 }
2150
2151 return 0;
2152
2153 disable_other_clks:
2154 clk_bulk_disable_unprepare(priv->num_other_clks, priv->other_clks);
2155 disable_bus_clk:
2156 if (!IS_ERR(priv->bus_clk))
2157 clk_disable_unprepare(priv->bus_clk);
2158 disable_clk:
2159 clk_disable_unprepare(pltfm_host->clk);
2160 return ret;
2161 }
2162
dwcmshc_runtime_suspend(struct device * dev)2163 static int dwcmshc_runtime_suspend(struct device *dev)
2164 {
2165 struct sdhci_host *host = dev_get_drvdata(dev);
2166
2167 dwcmshc_disable_card_clk(host);
2168
2169 return 0;
2170 }
2171
dwcmshc_runtime_resume(struct device * dev)2172 static int dwcmshc_runtime_resume(struct device *dev)
2173 {
2174 struct sdhci_host *host = dev_get_drvdata(dev);
2175
2176 dwcmshc_enable_card_clk(host);
2177
2178 return 0;
2179 }
2180
2181 static const struct dev_pm_ops dwcmshc_pmops = {
2182 SYSTEM_SLEEP_PM_OPS(dwcmshc_suspend, dwcmshc_resume)
2183 RUNTIME_PM_OPS(dwcmshc_runtime_suspend, dwcmshc_runtime_resume, NULL)
2184 };
2185
2186 static struct platform_driver sdhci_dwcmshc_driver = {
2187 .driver = {
2188 .name = "sdhci-dwcmshc",
2189 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
2190 .of_match_table = sdhci_dwcmshc_dt_ids,
2191 .acpi_match_table = ACPI_PTR(sdhci_dwcmshc_acpi_ids),
2192 .pm = pm_ptr(&dwcmshc_pmops),
2193 },
2194 .probe = dwcmshc_probe,
2195 .remove = dwcmshc_remove,
2196 };
2197 module_platform_driver(sdhci_dwcmshc_driver);
2198
2199 MODULE_DESCRIPTION("SDHCI platform driver for Synopsys DWC MSHC");
2200 MODULE_AUTHOR("Jisheng Zhang <jszhang@kernel.org>");
2201 MODULE_LICENSE("GPL v2");
2202