xref: /linux/drivers/mmc/host/sdhci-of-dwcmshc.c (revision d4be90cce60e60cf99c419b74105d217eec194c1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for Synopsys DesignWare Cores Mobile Storage Host Controller
4  *
5  * Copyright (C) 2018 Synaptics Incorporated
6  *
7  * Author: Jisheng Zhang <jszhang@kernel.org>
8  */
9 
10 #include <linux/acpi.h>
11 #include <linux/arm-smccc.h>
12 #include <linux/bitfield.h>
13 #include <linux/clk.h>
14 #include <linux/clk-provider.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_domain.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/regmap.h>
24 #include <linux/reset.h>
25 #include <linux/sizes.h>
26 #include <linux/mfd/syscon.h>
27 #include <linux/units.h>
28 
29 #include "sdhci-pltfm.h"
30 #include "cqhci.h"
31 #include "sdhci-cqhci.h"
32 
33 #define SDHCI_DWCMSHC_ARG2_STUFF	GENMASK(31, 16)
34 
35 /* DWCMSHC specific Mode Select value */
36 #define DWCMSHC_CTRL_HS400		0x7
37 
38 /* DWC IP vendor area 1 pointer */
39 #define DWCMSHC_P_VENDOR_AREA1		0xe8
40 #define DWCMSHC_AREA1_MASK		GENMASK(11, 0)
41 /* Offset inside the  vendor area 1 */
42 #define DWCMSHC_HOST_CTRL3		0x8
43 #define DWCMSHC_EMMC_CONTROL		0x2c
44 #define DWCMSHC_CARD_IS_EMMC		BIT(0)
45 #define DWCMSHC_ENHANCED_STROBE		BIT(8)
46 #define DWCMSHC_EMMC_ATCTRL		0x40
47 #define DWCMSHC_AT_STAT			0x44
48 /* Tuning and auto-tuning fields in AT_CTRL_R control register */
49 #define AT_CTRL_AT_EN			BIT(0) /* autotuning is enabled */
50 #define AT_CTRL_CI_SEL			BIT(1) /* interval to drive center phase select */
51 #define AT_CTRL_SWIN_TH_EN		BIT(2) /* sampling window threshold enable */
52 #define AT_CTRL_RPT_TUNE_ERR		BIT(3) /* enable reporting framing errors */
53 #define AT_CTRL_SW_TUNE_EN		BIT(4) /* enable software managed tuning */
54 #define AT_CTRL_WIN_EDGE_SEL_MASK	GENMASK(11, 8) /* bits [11:8] */
55 #define AT_CTRL_WIN_EDGE_SEL		0xf /* sampling window edge select */
56 #define AT_CTRL_TUNE_CLK_STOP_EN	BIT(16) /* clocks stopped during phase code change */
57 #define AT_CTRL_PRE_CHANGE_DLY_MASK	GENMASK(18, 17) /* bits [18:17] */
58 #define AT_CTRL_PRE_CHANGE_DLY		0x1  /* 2-cycle latency */
59 #define AT_CTRL_POST_CHANGE_DLY_MASK	GENMASK(20, 19) /* bits [20:19] */
60 #define AT_CTRL_POST_CHANGE_DLY		0x3  /* 4-cycle latency */
61 #define AT_CTRL_SWIN_TH_VAL_MASK	GENMASK(31, 24) /* bits [31:24] */
62 #define AT_CTRL_SWIN_TH_VAL		0x9  /* sampling window threshold */
63 
64 /* DWC IP vendor area 2 pointer */
65 #define DWCMSHC_P_VENDOR_AREA2		0xea
66 
67 /* Sophgo CV18XX specific Registers */
68 #define CV18XX_SDHCI_MSHC_CTRL			0x00
69 #define  CV18XX_EMMC_FUNC_EN			BIT(0)
70 #define  CV18XX_LATANCY_1T			BIT(1)
71 #define CV18XX_SDHCI_PHY_TX_RX_DLY		0x40
72 #define  CV18XX_PHY_TX_DLY_MSK			GENMASK(6, 0)
73 #define  CV18XX_PHY_TX_SRC_MSK			GENMASK(9, 8)
74 #define  CV18XX_PHY_TX_SRC_INVERT_CLK_TX	0x1
75 #define  CV18XX_PHY_RX_DLY_MSK			GENMASK(22, 16)
76 #define  CV18XX_PHY_RX_SRC_MSK			GENMASK(25, 24)
77 #define  CV18XX_PHY_RX_SRC_INVERT_RX_CLK	0x1
78 #define CV18XX_SDHCI_PHY_CONFIG			0x4c
79 #define  CV18XX_PHY_TX_BPS			BIT(0)
80 
81 #define CV18XX_TUNE_MAX				128
82 #define CV18XX_TUNE_STEP			1
83 #define CV18XX_RETRY_TUNING_MAX			50
84 
85 /* Rockchip specific Registers */
86 #define DWCMSHC_EMMC_DLL_CTRL		0x800
87 #define DWCMSHC_EMMC_DLL_RXCLK		0x804
88 #define DWCMSHC_EMMC_DLL_TXCLK		0x808
89 #define DWCMSHC_EMMC_DLL_STRBIN		0x80c
90 #define DECMSHC_EMMC_DLL_CMDOUT		0x810
91 #define DECMSHC_EMMC_MISC_CON		0x81C
92 #define MISC_INTCLK_EN			BIT(1)
93 #define DWCMSHC_EMMC_DLL_STATUS0	0x840
94 #define DWCMSHC_EMMC_DLL_START		BIT(0)
95 #define DWCMSHC_EMMC_DLL_LOCKED		BIT(8)
96 #define DWCMSHC_EMMC_DLL_TIMEOUT	BIT(9)
97 #define DWCMSHC_EMMC_DLL_RXCLK_SRCSEL	29
98 #define DWCMSHC_EMMC_DLL_START_POINT	16
99 #define DWCMSHC_EMMC_DLL_INC		8
100 #define DWCMSHC_EMMC_DLL_BYPASS		BIT(24)
101 #define DWCMSHC_EMMC_DLL_DLYENA		BIT(27)
102 #define DLL_TXCLK_TAPNUM_DEFAULT	0x10
103 #define DLL_TXCLK_TAPNUM_90_DEGREES	0xA
104 #define DLL_TXCLK_TAPNUM_FROM_SW	BIT(24)
105 #define DLL_STRBIN_TAPNUM_DEFAULT	0x4
106 #define DLL_STRBIN_TAPNUM_FROM_SW	BIT(24)
107 #define DLL_STRBIN_DELAY_NUM_SEL	BIT(26)
108 #define DLL_STRBIN_DELAY_NUM_OFFSET	16
109 #define DLL_STRBIN_DELAY_NUM_DEFAULT	0x16
110 #define DLL_RXCLK_NO_INVERTER		1
111 #define DLL_RXCLK_INVERTER		0
112 #define DLL_CMDOUT_TAPNUM_90_DEGREES	0x8
113 #define DLL_RXCLK_ORI_GATE		BIT(31)
114 #define DLL_CMDOUT_TAPNUM_FROM_SW	BIT(24)
115 #define DLL_CMDOUT_SRC_CLK_NEG		BIT(28)
116 #define DLL_CMDOUT_EN_SRC_CLK_NEG	BIT(29)
117 
118 #define DLL_LOCK_WO_TMOUT(x) \
119 	((((x) & DWCMSHC_EMMC_DLL_LOCKED) == DWCMSHC_EMMC_DLL_LOCKED) && \
120 	(((x) & DWCMSHC_EMMC_DLL_TIMEOUT) == 0))
121 
122 /* PHY register area pointer */
123 #define DWC_MSHC_PTR_PHY_R	0x300
124 
125 /* PHY general configuration */
126 #define PHY_CNFG_R			(DWC_MSHC_PTR_PHY_R + 0x00)
127 #define PHY_CNFG_RSTN_DEASSERT		0x1  /* Deassert PHY reset */
128 #define PHY_CNFG_PHY_PWRGOOD_MASK	BIT_MASK(1) /* bit [1] */
129 #define PHY_CNFG_PAD_SP_MASK		GENMASK(19, 16) /* bits [19:16] */
130 #define PHY_CNFG_PAD_SP			0x0c /* PMOS TX drive strength */
131 #define PHY_CNFG_PAD_SP_SG2042		0x09 /* PMOS TX drive strength for SG2042 */
132 #define PHY_CNFG_PAD_SN_MASK		GENMASK(23, 20) /* bits [23:20] */
133 #define PHY_CNFG_PAD_SN			0x0c /* NMOS TX drive strength */
134 #define PHY_CNFG_PAD_SN_SG2042		0x08 /* NMOS TX drive strength for SG2042 */
135 
136 /* PHY command/response pad settings */
137 #define PHY_CMDPAD_CNFG_R	(DWC_MSHC_PTR_PHY_R + 0x04)
138 
139 /* PHY data pad settings */
140 #define PHY_DATAPAD_CNFG_R	(DWC_MSHC_PTR_PHY_R + 0x06)
141 
142 /* PHY clock pad settings */
143 #define PHY_CLKPAD_CNFG_R	(DWC_MSHC_PTR_PHY_R + 0x08)
144 
145 /* PHY strobe pad settings */
146 #define PHY_STBPAD_CNFG_R	(DWC_MSHC_PTR_PHY_R + 0x0a)
147 
148 /* PHY reset pad settings */
149 #define PHY_RSTNPAD_CNFG_R	(DWC_MSHC_PTR_PHY_R + 0x0c)
150 
151 /* Bitfields are common for all pad settings */
152 #define PHY_PAD_RXSEL_1V8		0x1 /* Receiver type select for 1.8V */
153 #define PHY_PAD_RXSEL_3V3		0x2 /* Receiver type select for 3.3V */
154 
155 #define PHY_PAD_WEAKPULL_MASK		GENMASK(4, 3) /* bits [4:3] */
156 #define PHY_PAD_WEAKPULL_PULLUP		0x1 /* Weak pull up enabled */
157 #define PHY_PAD_WEAKPULL_PULLDOWN	0x2 /* Weak pull down enabled */
158 
159 #define PHY_PAD_TXSLEW_CTRL_P_MASK	GENMASK(8, 5) /* bits [8:5] */
160 #define PHY_PAD_TXSLEW_CTRL_P		0x3 /* Slew control for P-Type pad TX */
161 #define PHY_PAD_TXSLEW_CTRL_N_MASK	GENMASK(12, 9) /* bits [12:9] */
162 #define PHY_PAD_TXSLEW_CTRL_N		0x3 /* Slew control for N-Type pad TX */
163 #define PHY_PAD_TXSLEW_CTRL_N_SG2042	0x2 /* Slew control for N-Type pad TX for SG2042 */
164 
165 /* PHY CLK delay line settings */
166 #define PHY_SDCLKDL_CNFG_R		(DWC_MSHC_PTR_PHY_R + 0x1d)
167 #define PHY_SDCLKDL_CNFG_EXTDLY_EN	BIT(0)
168 #define PHY_SDCLKDL_CNFG_UPDATE		BIT(4) /* set before writing to SDCLKDL_DC */
169 
170 /* PHY CLK delay line delay code */
171 #define PHY_SDCLKDL_DC_R		(DWC_MSHC_PTR_PHY_R + 0x1e)
172 #define PHY_SDCLKDL_DC_INITIAL		0x40 /* initial delay code */
173 #define PHY_SDCLKDL_DC_DEFAULT		0x32 /* default delay code */
174 #define PHY_SDCLKDL_DC_HS400		0x18 /* delay code for HS400 mode */
175 
176 #define PHY_SMPLDL_CNFG_R		(DWC_MSHC_PTR_PHY_R + 0x20)
177 #define PHY_SMPLDL_CNFG_BYPASS_EN	BIT(1)
178 
179 /* PHY drift_cclk_rx delay line configuration setting */
180 #define PHY_ATDL_CNFG_R			(DWC_MSHC_PTR_PHY_R + 0x21)
181 #define PHY_ATDL_CNFG_INPSEL_MASK	GENMASK(3, 2) /* bits [3:2] */
182 #define PHY_ATDL_CNFG_INPSEL		0x3 /* delay line input source */
183 #define PHY_ATDL_CNFG_INPSEL_SG2042	0x2 /* delay line input source for SG2042 */
184 
185 /* PHY DLL control settings */
186 #define PHY_DLL_CTRL_R			(DWC_MSHC_PTR_PHY_R + 0x24)
187 #define PHY_DLL_CTRL_DISABLE		0x0 /* PHY DLL is enabled */
188 #define PHY_DLL_CTRL_ENABLE		0x1 /* PHY DLL is disabled */
189 
190 /* PHY DLL  configuration register 1 */
191 #define PHY_DLL_CNFG1_R			(DWC_MSHC_PTR_PHY_R + 0x25)
192 #define PHY_DLL_CNFG1_SLVDLY_MASK	GENMASK(5, 4) /* bits [5:4] */
193 #define PHY_DLL_CNFG1_SLVDLY		0x2 /* DLL slave update delay input */
194 #define PHY_DLL_CNFG1_WAITCYCLE		0x5 /* DLL wait cycle input */
195 
196 /* PHY DLL configuration register 2 */
197 #define PHY_DLL_CNFG2_R			(DWC_MSHC_PTR_PHY_R + 0x26)
198 #define PHY_DLL_CNFG2_JUMPSTEP		0xa /* DLL jump step input */
199 
200 /* PHY DLL master and slave delay line configuration settings */
201 #define PHY_DLLDL_CNFG_R		(DWC_MSHC_PTR_PHY_R + 0x28)
202 #define PHY_DLLDL_CNFG_SLV_INPSEL_MASK	GENMASK(6, 5) /* bits [6:5] */
203 #define PHY_DLLDL_CNFG_SLV_INPSEL	0x3 /* clock source select for slave DL */
204 
205 /* PHY DLL offset setting register */
206 #define PHY_DLL_OFFST_R			(DWC_MSHC_PTR_PHY_R + 0x29)
207 /* DLL LBT setting register */
208 #define PHY_DLLBT_CNFG_R		(DWC_MSHC_PTR_PHY_R + 0x2c)
209 /* DLL Status register */
210 #define PHY_DLL_STATUS_R		(DWC_MSHC_PTR_PHY_R + 0x2e)
211 #define DLL_LOCK_STS			BIT(0)/* DLL is locked and ready */
212 /*
213  * Captures the value of DLL's lock error status information. Value is valid
214  * only when LOCK_STS is set.
215  */
216 #define DLL_ERROR_STS			BIT(1)
217 
218 #define FLAG_IO_FIXED_1V8	BIT(0)
219 
220 #define BOUNDARY_OK(addr, len) \
221 	((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1)))
222 
223 #define DWCMSHC_SDHCI_CQE_TRNS_MODE	(SDHCI_TRNS_MULTI | \
224 					 SDHCI_TRNS_BLK_CNT_EN | \
225 					 SDHCI_TRNS_DMA)
226 
227 /* SMC call for BlueField-3 eMMC RST_N */
228 #define BLUEFIELD_SMC_SET_EMMC_RST_N	0x82000007
229 
230 /* Eswin specific Registers */
231 #define EIC7700_CARD_CLK_STABLE		BIT(28)
232 #define EIC7700_INT_BCLK_STABLE		BIT(16)
233 #define EIC7700_INT_ACLK_STABLE		BIT(8)
234 #define EIC7700_INT_TMCLK_STABLE	BIT(0)
235 #define EIC7700_INT_CLK_STABLE		(EIC7700_CARD_CLK_STABLE | \
236 					 EIC7700_INT_ACLK_STABLE | \
237 					 EIC7700_INT_BCLK_STABLE | \
238 					 EIC7700_INT_TMCLK_STABLE)
239 #define EIC7700_HOST_VAL_STABLE		BIT(0)
240 
241 /* strength definition */
242 #define PHYCTRL_DR_33OHM		0xee
243 #define PHYCTRL_DR_40OHM		0xcc
244 #define PHYCTRL_DR_50OHM		0x88
245 #define PHYCTRL_DR_66OHM		0x44
246 #define PHYCTRL_DR_100OHM		0x00
247 
248 #define MAX_PHASE_CODE			0xff
249 #define TUNING_RANGE_THRESHOLD		40
250 #define PHY_CLK_MAX_DELAY_MASK		0x7f
251 #define PHY_DELAY_CODE_MAX		0x7f
252 #define PHY_DELAY_CODE_EMMC		0x17
253 #define PHY_DELAY_CODE_SD		0x55
254 
255 enum dwcmshc_rk_type {
256 	DWCMSHC_RK3568,
257 	DWCMSHC_RK3588,
258 };
259 
260 struct rk35xx_priv {
261 	struct reset_control *reset;
262 	enum dwcmshc_rk_type devtype;
263 	u8 txclk_tapnum;
264 };
265 
266 struct eic7700_priv {
267 	struct reset_control *reset;
268 	unsigned int drive_impedance;
269 };
270 
271 #define DWCMSHC_MAX_OTHER_CLKS 3
272 
273 struct dwcmshc_priv {
274 	struct clk	*bus_clk;
275 	int vendor_specific_area1; /* P_VENDOR_SPECIFIC_AREA1 reg */
276 	int vendor_specific_area2; /* P_VENDOR_SPECIFIC_AREA2 reg */
277 
278 	int num_other_clks;
279 	struct clk_bulk_data other_clks[DWCMSHC_MAX_OTHER_CLKS];
280 
281 	void *priv; /* pointer to SoC private stuff */
282 	u16 delay_line;
283 	u16 flags;
284 };
285 
286 struct dwcmshc_pltfm_data {
287 	const struct sdhci_pltfm_data pdata;
288 	const struct cqhci_host_ops *cqhci_host_ops;
289 	int (*init)(struct device *dev, struct sdhci_host *host, struct dwcmshc_priv *dwc_priv);
290 	void (*postinit)(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv);
291 };
292 
dwcmshc_enable_card_clk(struct sdhci_host * host)293 static void dwcmshc_enable_card_clk(struct sdhci_host *host)
294 {
295 	u16 ctrl;
296 
297 	ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
298 	if ((ctrl & SDHCI_CLOCK_INT_EN) && !(ctrl & SDHCI_CLOCK_CARD_EN)) {
299 		ctrl |= SDHCI_CLOCK_CARD_EN;
300 		sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL);
301 	}
302 }
303 
dwcmshc_get_enable_other_clks(struct device * dev,struct dwcmshc_priv * priv,int num_clks,const char * const clk_ids[])304 static int dwcmshc_get_enable_other_clks(struct device *dev,
305 					 struct dwcmshc_priv *priv,
306 					 int num_clks,
307 					 const char * const clk_ids[])
308 {
309 	int err;
310 
311 	if (num_clks > DWCMSHC_MAX_OTHER_CLKS)
312 		return -EINVAL;
313 
314 	for (int i = 0; i < num_clks; i++)
315 		priv->other_clks[i].id = clk_ids[i];
316 
317 	err = devm_clk_bulk_get_optional(dev, num_clks, priv->other_clks);
318 	if (err) {
319 		dev_err(dev, "failed to get clocks %d\n", err);
320 		return err;
321 	}
322 
323 	err = clk_bulk_prepare_enable(num_clks, priv->other_clks);
324 	if (err)
325 		dev_err(dev, "failed to enable clocks %d\n", err);
326 
327 	priv->num_other_clks = num_clks;
328 
329 	return err;
330 }
331 
332 /*
333  * If DMA addr spans 128MB boundary, we split the DMA transfer into two
334  * so that each DMA transfer doesn't exceed the boundary.
335  */
dwcmshc_adma_write_desc(struct sdhci_host * host,void ** desc,dma_addr_t addr,int len,unsigned int cmd)336 static void dwcmshc_adma_write_desc(struct sdhci_host *host, void **desc,
337 				    dma_addr_t addr, int len, unsigned int cmd)
338 {
339 	int tmplen, offset;
340 
341 	if (likely(!len || BOUNDARY_OK(addr, len))) {
342 		sdhci_adma_write_desc(host, desc, addr, len, cmd);
343 		return;
344 	}
345 
346 	offset = addr & (SZ_128M - 1);
347 	tmplen = SZ_128M - offset;
348 	sdhci_adma_write_desc(host, desc, addr, tmplen, cmd);
349 
350 	addr += tmplen;
351 	len -= tmplen;
352 	sdhci_adma_write_desc(host, desc, addr, len, cmd);
353 }
354 
dwcmshc_reset(struct sdhci_host * host,u8 mask)355 static void dwcmshc_reset(struct sdhci_host *host, u8 mask)
356 {
357 	sdhci_reset(host, mask);
358 
359 	/* The dwcmshc does not comply with the SDHCI specification
360 	 * regarding the "Software Reset for CMD line should clear 'Command
361 	 * Complete' in the Normal Interrupt Status Register." Clear the bit
362 	 * here to compensate for this quirk.
363 	 */
364 	if (mask & SDHCI_RESET_CMD)
365 		sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
366 }
367 
dwcmshc_get_max_clock(struct sdhci_host * host)368 static unsigned int dwcmshc_get_max_clock(struct sdhci_host *host)
369 {
370 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
371 
372 	if (pltfm_host->clk)
373 		return sdhci_pltfm_clk_get_max_clock(host);
374 	else
375 		return pltfm_host->clock;
376 }
377 
rk35xx_get_max_clock(struct sdhci_host * host)378 static unsigned int rk35xx_get_max_clock(struct sdhci_host *host)
379 {
380 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
381 
382 	return clk_round_rate(pltfm_host->clk, ULONG_MAX);
383 }
384 
dwcmshc_check_auto_cmd23(struct mmc_host * mmc,struct mmc_request * mrq)385 static void dwcmshc_check_auto_cmd23(struct mmc_host *mmc,
386 				     struct mmc_request *mrq)
387 {
388 	struct sdhci_host *host = mmc_priv(mmc);
389 
390 	/*
391 	 * No matter V4 is enabled or not, ARGUMENT2 register is 32-bit
392 	 * block count register which doesn't support stuff bits of
393 	 * CMD23 argument on dwcmsch host controller.
394 	 */
395 	if (mrq->sbc && (mrq->sbc->arg & SDHCI_DWCMSHC_ARG2_STUFF))
396 		host->flags &= ~SDHCI_AUTO_CMD23;
397 	else
398 		host->flags |= SDHCI_AUTO_CMD23;
399 }
400 
dwcmshc_request(struct mmc_host * mmc,struct mmc_request * mrq)401 static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq)
402 {
403 	dwcmshc_check_auto_cmd23(mmc, mrq);
404 
405 	sdhci_request(mmc, mrq);
406 }
407 
dwcmshc_phy_init(struct sdhci_host * host)408 static void dwcmshc_phy_init(struct sdhci_host *host)
409 {
410 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
411 	struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
412 	u32 rxsel = PHY_PAD_RXSEL_3V3;
413 	u32 val;
414 
415 	if (priv->flags & FLAG_IO_FIXED_1V8 ||
416 		host->mmc->ios.timing & MMC_SIGNAL_VOLTAGE_180)
417 		rxsel = PHY_PAD_RXSEL_1V8;
418 
419 	/* deassert phy reset & set tx drive strength */
420 	val = PHY_CNFG_RSTN_DEASSERT;
421 	val |= FIELD_PREP(PHY_CNFG_PAD_SP_MASK, PHY_CNFG_PAD_SP);
422 	val |= FIELD_PREP(PHY_CNFG_PAD_SN_MASK, PHY_CNFG_PAD_SN);
423 	sdhci_writel(host, val, PHY_CNFG_R);
424 
425 	/* disable delay line */
426 	sdhci_writeb(host, PHY_SDCLKDL_CNFG_UPDATE, PHY_SDCLKDL_CNFG_R);
427 
428 	/* set delay line */
429 	sdhci_writeb(host, priv->delay_line, PHY_SDCLKDL_DC_R);
430 	sdhci_writeb(host, PHY_DLL_CNFG2_JUMPSTEP, PHY_DLL_CNFG2_R);
431 
432 	/* enable delay lane */
433 	val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R);
434 	val &= ~(PHY_SDCLKDL_CNFG_UPDATE);
435 	sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R);
436 
437 	/* configure phy pads */
438 	val = rxsel;
439 	val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP);
440 	val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
441 	val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
442 	sdhci_writew(host, val, PHY_CMDPAD_CNFG_R);
443 	sdhci_writew(host, val, PHY_DATAPAD_CNFG_R);
444 	sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R);
445 
446 	val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
447 	val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
448 	sdhci_writew(host, val, PHY_CLKPAD_CNFG_R);
449 
450 	val = rxsel;
451 	val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLDOWN);
452 	val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
453 	val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N);
454 	sdhci_writew(host, val, PHY_STBPAD_CNFG_R);
455 
456 	/* enable data strobe mode */
457 	if (rxsel == PHY_PAD_RXSEL_1V8) {
458 		u8 sel = FIELD_PREP(PHY_DLLDL_CNFG_SLV_INPSEL_MASK, PHY_DLLDL_CNFG_SLV_INPSEL);
459 
460 		sdhci_writeb(host, sel, PHY_DLLDL_CNFG_R);
461 	}
462 
463 	/* enable phy dll */
464 	sdhci_writeb(host, PHY_DLL_CTRL_ENABLE, PHY_DLL_CTRL_R);
465 
466 }
467 
th1520_sdhci_set_phy(struct sdhci_host * host)468 static void th1520_sdhci_set_phy(struct sdhci_host *host)
469 {
470 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
471 	struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
472 	u32 emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
473 	u16 emmc_ctrl;
474 
475 	dwcmshc_phy_init(host);
476 
477 	if ((host->mmc->caps2 & emmc_caps) == emmc_caps) {
478 		emmc_ctrl = sdhci_readw(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
479 		emmc_ctrl |= DWCMSHC_CARD_IS_EMMC;
480 		sdhci_writew(host, emmc_ctrl, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
481 	}
482 
483 	sdhci_writeb(host, FIELD_PREP(PHY_DLL_CNFG1_SLVDLY_MASK, PHY_DLL_CNFG1_SLVDLY) |
484 		     PHY_DLL_CNFG1_WAITCYCLE, PHY_DLL_CNFG1_R);
485 }
486 
dwcmshc_set_uhs_signaling(struct sdhci_host * host,unsigned int timing)487 static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
488 				      unsigned int timing)
489 {
490 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
491 	struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
492 	u16 ctrl, ctrl_2;
493 
494 	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
495 	/* Select Bus Speed Mode for host */
496 	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
497 	if ((timing == MMC_TIMING_MMC_HS200) ||
498 	    (timing == MMC_TIMING_UHS_SDR104))
499 		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
500 	else if (timing == MMC_TIMING_UHS_SDR12)
501 		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
502 	else if ((timing == MMC_TIMING_UHS_SDR25) ||
503 		 (timing == MMC_TIMING_MMC_HS))
504 		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
505 	else if (timing == MMC_TIMING_UHS_SDR50)
506 		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
507 	else if ((timing == MMC_TIMING_UHS_DDR50) ||
508 		 (timing == MMC_TIMING_MMC_DDR52))
509 		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
510 	else if (timing == MMC_TIMING_MMC_HS400) {
511 		/* set CARD_IS_EMMC bit to enable Data Strobe for HS400 */
512 		ctrl = sdhci_readw(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
513 		ctrl |= DWCMSHC_CARD_IS_EMMC;
514 		sdhci_writew(host, ctrl, priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
515 
516 		ctrl_2 |= DWCMSHC_CTRL_HS400;
517 	}
518 
519 	if (priv->flags & FLAG_IO_FIXED_1V8)
520 		ctrl_2 |= SDHCI_CTRL_VDD_180;
521 	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
522 }
523 
th1520_set_uhs_signaling(struct sdhci_host * host,unsigned int timing)524 static void th1520_set_uhs_signaling(struct sdhci_host *host,
525 				     unsigned int timing)
526 {
527 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
528 	struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
529 
530 	dwcmshc_set_uhs_signaling(host, timing);
531 	if (timing == MMC_TIMING_MMC_HS400)
532 		priv->delay_line = PHY_SDCLKDL_DC_HS400;
533 	else
534 		sdhci_writeb(host, 0, PHY_DLLDL_CNFG_R);
535 	th1520_sdhci_set_phy(host);
536 }
537 
dwcmshc_hs400_enhanced_strobe(struct mmc_host * mmc,struct mmc_ios * ios)538 static void dwcmshc_hs400_enhanced_strobe(struct mmc_host *mmc,
539 					  struct mmc_ios *ios)
540 {
541 	u32 vendor;
542 	struct sdhci_host *host = mmc_priv(mmc);
543 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
544 	struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
545 	int reg = priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL;
546 
547 	vendor = sdhci_readl(host, reg);
548 	if (ios->enhanced_strobe)
549 		vendor |= DWCMSHC_ENHANCED_STROBE;
550 	else
551 		vendor &= ~DWCMSHC_ENHANCED_STROBE;
552 
553 	sdhci_writel(host, vendor, reg);
554 }
555 
dwcmshc_execute_tuning(struct mmc_host * mmc,u32 opcode)556 static int dwcmshc_execute_tuning(struct mmc_host *mmc, u32 opcode)
557 {
558 	int err = sdhci_execute_tuning(mmc, opcode);
559 	struct sdhci_host *host = mmc_priv(mmc);
560 
561 	if (err)
562 		return err;
563 
564 	/*
565 	 * Tuning can leave the IP in an active state (Buffer Read Enable bit
566 	 * set) which prevents the entry to low power states (i.e. S0i3). Data
567 	 * reset will clear it.
568 	 */
569 	sdhci_reset(host, SDHCI_RESET_DATA);
570 
571 	return 0;
572 }
573 
dwcmshc_cqe_irq_handler(struct sdhci_host * host,u32 intmask)574 static u32 dwcmshc_cqe_irq_handler(struct sdhci_host *host, u32 intmask)
575 {
576 	int cmd_error = 0;
577 	int data_error = 0;
578 
579 	if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
580 		return intmask;
581 
582 	cqhci_irq(host->mmc, intmask, cmd_error, data_error);
583 
584 	return 0;
585 }
586 
dwcmshc_sdhci_cqe_enable(struct mmc_host * mmc)587 static void dwcmshc_sdhci_cqe_enable(struct mmc_host *mmc)
588 {
589 	struct sdhci_host *host = mmc_priv(mmc);
590 	u8 ctrl;
591 
592 	sdhci_writew(host, DWCMSHC_SDHCI_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
593 
594 	sdhci_cqe_enable(mmc);
595 
596 	/*
597 	 * The "DesignWare Cores Mobile Storage Host Controller
598 	 * DWC_mshc / DWC_mshc_lite Databook" says:
599 	 * when Host Version 4 Enable" is 1 in Host Control 2 register,
600 	 * SDHCI_CTRL_ADMA32 bit means ADMA2 is selected.
601 	 * Selection of 32-bit/64-bit System Addressing:
602 	 * either 32-bit or 64-bit system addressing is selected by
603 	 * 64-bit Addressing bit in Host Control 2 register.
604 	 *
605 	 * On the other hand the "DesignWare Cores Mobile Storage Host
606 	 * Controller DWC_mshc / DWC_mshc_lite User Guide" says, that we have to
607 	 * set DMA_SEL to ADMA2 _only_ mode in the Host Control 2 register.
608 	 */
609 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
610 	ctrl &= ~SDHCI_CTRL_DMA_MASK;
611 	ctrl |= SDHCI_CTRL_ADMA32;
612 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
613 }
614 
dwcmshc_set_tran_desc(struct cqhci_host * cq_host,u8 ** desc,dma_addr_t addr,int len,bool end,bool dma64)615 static void dwcmshc_set_tran_desc(struct cqhci_host *cq_host, u8 **desc,
616 				  dma_addr_t addr, int len, bool end, bool dma64)
617 {
618 	int tmplen, offset;
619 
620 	if (likely(!len || BOUNDARY_OK(addr, len))) {
621 		cqhci_set_tran_desc(*desc, addr, len, end, dma64);
622 		return;
623 	}
624 
625 	offset = addr & (SZ_128M - 1);
626 	tmplen = SZ_128M - offset;
627 	cqhci_set_tran_desc(*desc, addr, tmplen, false, dma64);
628 
629 	addr += tmplen;
630 	len -= tmplen;
631 	*desc += cq_host->trans_desc_len;
632 	cqhci_set_tran_desc(*desc, addr, len, end, dma64);
633 }
634 
dwcmshc_cqhci_dumpregs(struct mmc_host * mmc)635 static void dwcmshc_cqhci_dumpregs(struct mmc_host *mmc)
636 {
637 	sdhci_dumpregs(mmc_priv(mmc));
638 }
639 
rk35xx_sdhci_cqe_pre_enable(struct mmc_host * mmc)640 static void rk35xx_sdhci_cqe_pre_enable(struct mmc_host *mmc)
641 {
642 	struct sdhci_host *host = mmc_priv(mmc);
643 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
644 	struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
645 	u32 reg;
646 
647 	/* Set Send Status Command Idle Timer to 10.66us (256 * 1 / 24) */
648 	reg = sdhci_readl(host, dwc_priv->vendor_specific_area2 + CQHCI_SSC1);
649 	reg = (reg & ~CQHCI_SSC1_CIT_MASK) | 0x0100;
650 	sdhci_writel(host, reg, dwc_priv->vendor_specific_area2 + CQHCI_SSC1);
651 
652 	reg = sdhci_readl(host, dwc_priv->vendor_specific_area2 + CQHCI_CFG);
653 	reg |= CQHCI_ENABLE;
654 	sdhci_writel(host, reg, dwc_priv->vendor_specific_area2 + CQHCI_CFG);
655 }
656 
rk35xx_sdhci_cqe_enable(struct mmc_host * mmc)657 static void rk35xx_sdhci_cqe_enable(struct mmc_host *mmc)
658 {
659 	struct sdhci_host *host = mmc_priv(mmc);
660 	u32 reg;
661 
662 	reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
663 	while (reg & SDHCI_DATA_AVAILABLE) {
664 		sdhci_readl(host, SDHCI_BUFFER);
665 		reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
666 	}
667 
668 	sdhci_writew(host, DWCMSHC_SDHCI_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
669 
670 	sdhci_cqe_enable(mmc);
671 }
672 
rk35xx_sdhci_cqe_disable(struct mmc_host * mmc,bool recovery)673 static void rk35xx_sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
674 {
675 	struct sdhci_host *host = mmc_priv(mmc);
676 	unsigned long flags;
677 	u32 ctrl;
678 
679 	/*
680 	 * During CQE command transfers, command complete bit gets latched.
681 	 * So s/w should clear command complete interrupt status when CQE is
682 	 * either halted or disabled. Otherwise unexpected SDCHI legacy
683 	 * interrupt gets triggered when CQE is halted/disabled.
684 	 */
685 	spin_lock_irqsave(&host->lock, flags);
686 	ctrl = sdhci_readl(host, SDHCI_INT_ENABLE);
687 	ctrl |= SDHCI_INT_RESPONSE;
688 	sdhci_writel(host,  ctrl, SDHCI_INT_ENABLE);
689 	sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
690 	spin_unlock_irqrestore(&host->lock, flags);
691 
692 	sdhci_cqe_disable(mmc, recovery);
693 }
694 
rk35xx_sdhci_cqe_post_disable(struct mmc_host * mmc)695 static void rk35xx_sdhci_cqe_post_disable(struct mmc_host *mmc)
696 {
697 	struct sdhci_host *host = mmc_priv(mmc);
698 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
699 	struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
700 	u32 ctrl;
701 
702 	ctrl = sdhci_readl(host, dwc_priv->vendor_specific_area2 + CQHCI_CFG);
703 	ctrl &= ~CQHCI_ENABLE;
704 	sdhci_writel(host, ctrl, dwc_priv->vendor_specific_area2 + CQHCI_CFG);
705 }
706 
dwcmshc_rk3568_set_clock(struct sdhci_host * host,unsigned int clock)707 static void dwcmshc_rk3568_set_clock(struct sdhci_host *host, unsigned int clock)
708 {
709 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
710 	struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
711 	struct rk35xx_priv *priv = dwc_priv->priv;
712 	u8 txclk_tapnum = DLL_TXCLK_TAPNUM_DEFAULT;
713 	u32 extra, reg;
714 	int err;
715 
716 	host->mmc->actual_clock = 0;
717 
718 	if (clock == 0) {
719 		/* Disable interface clock at initial state. */
720 		sdhci_set_clock(host, clock);
721 		return;
722 	}
723 
724 	/* Rockchip platform only support 375KHz for identify mode */
725 	if (clock <= 400000)
726 		clock = 375000;
727 
728 	err = clk_set_rate(pltfm_host->clk, clock);
729 	if (err)
730 		dev_err(mmc_dev(host->mmc), "fail to set clock %d", clock);
731 
732 	sdhci_set_clock(host, clock);
733 
734 	/* Disable cmd conflict check and internal clock gate */
735 	reg = dwc_priv->vendor_specific_area1 + DWCMSHC_HOST_CTRL3;
736 	extra = sdhci_readl(host, reg);
737 	extra &= ~BIT(0);
738 	extra |= BIT(4);
739 	sdhci_writel(host, extra, reg);
740 
741 	if (clock <= 52000000) {
742 		if (host->mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
743 		    host->mmc->ios.timing == MMC_TIMING_MMC_HS400) {
744 			dev_err(mmc_dev(host->mmc),
745 				"Can't reduce the clock below 52MHz in HS200/HS400 mode");
746 			return;
747 		}
748 
749 		/*
750 		 * Disable DLL and reset both of sample and drive clock.
751 		 * The bypass bit and start bit need to be set if DLL is not locked.
752 		 */
753 		sdhci_writel(host, DWCMSHC_EMMC_DLL_BYPASS | DWCMSHC_EMMC_DLL_START, DWCMSHC_EMMC_DLL_CTRL);
754 		sdhci_writel(host, DLL_RXCLK_ORI_GATE, DWCMSHC_EMMC_DLL_RXCLK);
755 		sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_TXCLK);
756 		sdhci_writel(host, 0, DECMSHC_EMMC_DLL_CMDOUT);
757 		/*
758 		 * Before switching to hs400es mode, the driver will enable
759 		 * enhanced strobe first. PHY needs to configure the parameters
760 		 * of enhanced strobe first.
761 		 */
762 		extra = DWCMSHC_EMMC_DLL_DLYENA |
763 			DLL_STRBIN_DELAY_NUM_SEL |
764 			DLL_STRBIN_DELAY_NUM_DEFAULT << DLL_STRBIN_DELAY_NUM_OFFSET;
765 		sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_STRBIN);
766 		return;
767 	}
768 
769 	/* Reset DLL */
770 	sdhci_writel(host, BIT(1), DWCMSHC_EMMC_DLL_CTRL);
771 	udelay(1);
772 	sdhci_writel(host, 0x0, DWCMSHC_EMMC_DLL_CTRL);
773 
774 	/*
775 	 * We shouldn't set DLL_RXCLK_NO_INVERTER for identify mode but
776 	 * we must set it in higher speed mode.
777 	 */
778 	extra = DWCMSHC_EMMC_DLL_DLYENA;
779 	if (priv->devtype == DWCMSHC_RK3568)
780 		extra |= DLL_RXCLK_NO_INVERTER << DWCMSHC_EMMC_DLL_RXCLK_SRCSEL;
781 	sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_RXCLK);
782 
783 	/* Init DLL settings */
784 	extra = 0x5 << DWCMSHC_EMMC_DLL_START_POINT |
785 		0x2 << DWCMSHC_EMMC_DLL_INC |
786 		DWCMSHC_EMMC_DLL_START;
787 	sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_CTRL);
788 	err = readl_poll_timeout(host->ioaddr + DWCMSHC_EMMC_DLL_STATUS0,
789 				 extra, DLL_LOCK_WO_TMOUT(extra), 1,
790 				 500 * USEC_PER_MSEC);
791 	if (err) {
792 		dev_err(mmc_dev(host->mmc), "DLL lock timeout!\n");
793 		return;
794 	}
795 
796 	extra = 0x1 << 16 | /* tune clock stop en */
797 		0x3 << 17 | /* pre-change delay */
798 		0x3 << 19;  /* post-change delay */
799 	sdhci_writel(host, extra, dwc_priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
800 
801 	if (host->mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
802 	    host->mmc->ios.timing == MMC_TIMING_MMC_HS400)
803 		txclk_tapnum = priv->txclk_tapnum;
804 
805 	if ((priv->devtype == DWCMSHC_RK3588) && host->mmc->ios.timing == MMC_TIMING_MMC_HS400) {
806 		txclk_tapnum = DLL_TXCLK_TAPNUM_90_DEGREES;
807 
808 		extra = DLL_CMDOUT_SRC_CLK_NEG |
809 			DLL_CMDOUT_EN_SRC_CLK_NEG |
810 			DWCMSHC_EMMC_DLL_DLYENA |
811 			DLL_CMDOUT_TAPNUM_90_DEGREES |
812 			DLL_CMDOUT_TAPNUM_FROM_SW;
813 		sdhci_writel(host, extra, DECMSHC_EMMC_DLL_CMDOUT);
814 	}
815 
816 	extra = DWCMSHC_EMMC_DLL_DLYENA |
817 		DLL_TXCLK_TAPNUM_FROM_SW |
818 		DLL_RXCLK_NO_INVERTER << DWCMSHC_EMMC_DLL_RXCLK_SRCSEL |
819 		txclk_tapnum;
820 	sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_TXCLK);
821 
822 	extra = DWCMSHC_EMMC_DLL_DLYENA |
823 		DLL_STRBIN_TAPNUM_DEFAULT |
824 		DLL_STRBIN_TAPNUM_FROM_SW;
825 	sdhci_writel(host, extra, DWCMSHC_EMMC_DLL_STRBIN);
826 }
827 
rk35xx_sdhci_reset(struct sdhci_host * host,u8 mask)828 static void rk35xx_sdhci_reset(struct sdhci_host *host, u8 mask)
829 {
830 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
831 	struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
832 	struct rk35xx_priv *priv = dwc_priv->priv;
833 	u32 extra = sdhci_readl(host, DECMSHC_EMMC_MISC_CON);
834 
835 	if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL))
836 		cqhci_deactivate(host->mmc);
837 
838 	if (mask & SDHCI_RESET_ALL && priv->reset) {
839 		reset_control_assert(priv->reset);
840 		udelay(1);
841 		reset_control_deassert(priv->reset);
842 	}
843 
844 	sdhci_reset(host, mask);
845 
846 	/* Enable INTERNAL CLOCK */
847 	sdhci_writel(host, MISC_INTCLK_EN | extra, DECMSHC_EMMC_MISC_CON);
848 }
849 
dwcmshc_rk35xx_init(struct device * dev,struct sdhci_host * host,struct dwcmshc_priv * dwc_priv)850 static int dwcmshc_rk35xx_init(struct device *dev, struct sdhci_host *host,
851 			       struct dwcmshc_priv *dwc_priv)
852 {
853 	static const char * const clk_ids[] = {"axi", "block", "timer"};
854 	struct rk35xx_priv *priv;
855 	int err;
856 
857 	priv = devm_kzalloc(dev, sizeof(struct rk35xx_priv), GFP_KERNEL);
858 	if (!priv)
859 		return -ENOMEM;
860 
861 	if (of_device_is_compatible(dev->of_node, "rockchip,rk3588-dwcmshc"))
862 		priv->devtype = DWCMSHC_RK3588;
863 	else
864 		priv->devtype = DWCMSHC_RK3568;
865 
866 	priv->reset = devm_reset_control_array_get_optional_exclusive(mmc_dev(host->mmc));
867 	if (IS_ERR(priv->reset)) {
868 		err = PTR_ERR(priv->reset);
869 		dev_err(mmc_dev(host->mmc), "failed to get reset control %d\n", err);
870 		return err;
871 	}
872 
873 	err = dwcmshc_get_enable_other_clks(mmc_dev(host->mmc), dwc_priv,
874 					    ARRAY_SIZE(clk_ids), clk_ids);
875 	if (err)
876 		return err;
877 
878 	if (of_property_read_u8(mmc_dev(host->mmc)->of_node, "rockchip,txclk-tapnum",
879 				&priv->txclk_tapnum))
880 		priv->txclk_tapnum = DLL_TXCLK_TAPNUM_DEFAULT;
881 
882 	/* Disable cmd conflict check */
883 	sdhci_writel(host, 0x0, dwc_priv->vendor_specific_area1 + DWCMSHC_HOST_CTRL3);
884 	/* Reset previous settings */
885 	sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_TXCLK);
886 	sdhci_writel(host, 0, DWCMSHC_EMMC_DLL_STRBIN);
887 
888 	dwc_priv->priv = priv;
889 
890 	return 0;
891 }
892 
dwcmshc_rk35xx_postinit(struct sdhci_host * host,struct dwcmshc_priv * dwc_priv)893 static void dwcmshc_rk35xx_postinit(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
894 {
895 	/*
896 	 * Don't support highspeed bus mode with low clk speed as we
897 	 * cannot use DLL for this condition.
898 	 */
899 	if (host->mmc->f_max <= 52000000) {
900 		dev_info(mmc_dev(host->mmc), "Disabling HS200/HS400, frequency too low (%d)\n",
901 			 host->mmc->f_max);
902 		host->mmc->caps2 &= ~(MMC_CAP2_HS200 | MMC_CAP2_HS400);
903 		host->mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR);
904 	}
905 }
906 
dwcmshc_rk3576_postinit(struct sdhci_host * host,struct dwcmshc_priv * dwc_priv)907 static void dwcmshc_rk3576_postinit(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
908 {
909 	struct device *dev = mmc_dev(host->mmc);
910 	int ret;
911 
912 	/*
913 	 * This works around the design of the RK3576's power domains, which
914 	 * makes the PD_NVM power domain, which the sdhci controller on the
915 	 * RK3576 is in, never come back the same way once it's run-time
916 	 * suspended once. This can happen during early kernel boot if no driver
917 	 * is using either PD_NVM or its child power domain PD_SDGMAC for a
918 	 * short moment, leading to it being turned off to save power. By
919 	 * keeping it on, sdhci suspending won't lead to PD_NVM becoming a
920 	 * candidate for getting turned off.
921 	 */
922 	ret = dev_pm_genpd_rpm_always_on(dev, true);
923 	if (ret && ret != -EOPNOTSUPP)
924 		dev_warn(dev, "failed to set PD rpm always on, SoC may hang later: %pe\n",
925 			 ERR_PTR(ret));
926 
927 	dwcmshc_rk35xx_postinit(host, dwc_priv);
928 }
929 
th1520_execute_tuning(struct sdhci_host * host,u32 opcode)930 static int th1520_execute_tuning(struct sdhci_host *host, u32 opcode)
931 {
932 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
933 	struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
934 	u32 val = 0;
935 
936 	if (host->flags & SDHCI_HS400_TUNING)
937 		return 0;
938 
939 	sdhci_writeb(host, FIELD_PREP(PHY_ATDL_CNFG_INPSEL_MASK, PHY_ATDL_CNFG_INPSEL),
940 		     PHY_ATDL_CNFG_R);
941 	val = sdhci_readl(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
942 
943 	/*
944 	 * configure tuning settings:
945 	 *  - center phase select code driven in block gap interval
946 	 *  - disable reporting of framing errors
947 	 *  - disable software managed tuning
948 	 *  - disable user selection of sampling window edges,
949 	 *    instead tuning calculated edges are used
950 	 */
951 	val &= ~(AT_CTRL_CI_SEL | AT_CTRL_RPT_TUNE_ERR | AT_CTRL_SW_TUNE_EN |
952 		 FIELD_PREP(AT_CTRL_WIN_EDGE_SEL_MASK, AT_CTRL_WIN_EDGE_SEL));
953 
954 	/*
955 	 * configure tuning settings:
956 	 *  - enable auto-tuning
957 	 *  - enable sampling window threshold
958 	 *  - stop clocks during phase code change
959 	 *  - set max latency in cycles between tx and rx clocks
960 	 *  - set max latency in cycles to switch output phase
961 	 *  - set max sampling window threshold value
962 	 */
963 	val |= AT_CTRL_AT_EN | AT_CTRL_SWIN_TH_EN | AT_CTRL_TUNE_CLK_STOP_EN;
964 	val |= FIELD_PREP(AT_CTRL_PRE_CHANGE_DLY_MASK, AT_CTRL_PRE_CHANGE_DLY);
965 	val |= FIELD_PREP(AT_CTRL_POST_CHANGE_DLY_MASK, AT_CTRL_POST_CHANGE_DLY);
966 	val |= FIELD_PREP(AT_CTRL_SWIN_TH_VAL_MASK, AT_CTRL_SWIN_TH_VAL);
967 
968 	sdhci_writel(host, val, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
969 	val = sdhci_readl(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
970 
971 	/* perform tuning */
972 	sdhci_start_tuning(host);
973 	host->tuning_loop_count = 128;
974 	host->tuning_err = __sdhci_execute_tuning(host, opcode);
975 	if (host->tuning_err) {
976 		/* disable auto-tuning upon tuning error */
977 		val &= ~AT_CTRL_AT_EN;
978 		sdhci_writel(host, val, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
979 		dev_err(mmc_dev(host->mmc), "tuning failed: %d\n", host->tuning_err);
980 		return -EIO;
981 	}
982 	sdhci_end_tuning(host);
983 
984 	return 0;
985 }
986 
th1520_sdhci_reset(struct sdhci_host * host,u8 mask)987 static void th1520_sdhci_reset(struct sdhci_host *host, u8 mask)
988 {
989 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
990 	struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
991 	u16 ctrl_2;
992 
993 	dwcmshc_reset(host, mask);
994 
995 	if (priv->flags & FLAG_IO_FIXED_1V8) {
996 		ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
997 		if (!(ctrl_2 & SDHCI_CTRL_VDD_180)) {
998 			ctrl_2 |= SDHCI_CTRL_VDD_180;
999 			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1000 		}
1001 	}
1002 }
1003 
th1520_init(struct device * dev,struct sdhci_host * host,struct dwcmshc_priv * dwc_priv)1004 static int th1520_init(struct device *dev,
1005 		       struct sdhci_host *host,
1006 		       struct dwcmshc_priv *dwc_priv)
1007 {
1008 	dwc_priv->delay_line = PHY_SDCLKDL_DC_DEFAULT;
1009 
1010 	if (device_property_read_bool(dev, "mmc-ddr-1_8v") ||
1011 	    device_property_read_bool(dev, "mmc-hs200-1_8v") ||
1012 	    device_property_read_bool(dev, "mmc-hs400-1_8v"))
1013 		dwc_priv->flags |= FLAG_IO_FIXED_1V8;
1014 	else
1015 		dwc_priv->flags &= ~FLAG_IO_FIXED_1V8;
1016 
1017 	/*
1018 	 * start_signal_voltage_switch() will try 3.3V first
1019 	 * then 1.8V. Use SDHCI_SIGNALING_180 rather than
1020 	 * SDHCI_SIGNALING_330 to avoid setting voltage to 3.3V
1021 	 * in sdhci_start_signal_voltage_switch().
1022 	 */
1023 	if (dwc_priv->flags & FLAG_IO_FIXED_1V8) {
1024 		host->flags &= ~SDHCI_SIGNALING_330;
1025 		host->flags |=  SDHCI_SIGNALING_180;
1026 	}
1027 
1028 	sdhci_enable_v4_mode(host);
1029 
1030 	return 0;
1031 }
1032 
cv18xx_sdhci_reset(struct sdhci_host * host,u8 mask)1033 static void cv18xx_sdhci_reset(struct sdhci_host *host, u8 mask)
1034 {
1035 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1036 	struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
1037 	u32 val, emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
1038 
1039 	dwcmshc_reset(host, mask);
1040 
1041 	if ((host->mmc->caps2 & emmc_caps) == emmc_caps) {
1042 		val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
1043 		val |= CV18XX_EMMC_FUNC_EN;
1044 		sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
1045 	}
1046 
1047 	val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
1048 	val |= CV18XX_LATANCY_1T;
1049 	sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
1050 
1051 	val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_CONFIG);
1052 	val |= CV18XX_PHY_TX_BPS;
1053 	sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_CONFIG);
1054 
1055 	val =  (FIELD_PREP(CV18XX_PHY_TX_DLY_MSK, 0) |
1056 		FIELD_PREP(CV18XX_PHY_TX_SRC_MSK, CV18XX_PHY_TX_SRC_INVERT_CLK_TX) |
1057 		FIELD_PREP(CV18XX_PHY_RX_DLY_MSK, 0) |
1058 		FIELD_PREP(CV18XX_PHY_RX_SRC_MSK, CV18XX_PHY_RX_SRC_INVERT_RX_CLK));
1059 	sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_TX_RX_DLY);
1060 }
1061 
cv18xx_sdhci_set_tap(struct sdhci_host * host,int tap)1062 static void cv18xx_sdhci_set_tap(struct sdhci_host *host, int tap)
1063 {
1064 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1065 	struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
1066 	u16 clk;
1067 	u32 val;
1068 
1069 	clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1070 	clk &= ~SDHCI_CLOCK_CARD_EN;
1071 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1072 
1073 	val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
1074 	val &= ~CV18XX_LATANCY_1T;
1075 	sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
1076 
1077 	val =  (FIELD_PREP(CV18XX_PHY_TX_DLY_MSK, 0) |
1078 		FIELD_PREP(CV18XX_PHY_TX_SRC_MSK, CV18XX_PHY_TX_SRC_INVERT_CLK_TX) |
1079 		FIELD_PREP(CV18XX_PHY_RX_DLY_MSK, tap));
1080 	sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_TX_RX_DLY);
1081 
1082 	sdhci_writel(host, 0, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_CONFIG);
1083 
1084 	clk |= SDHCI_CLOCK_CARD_EN;
1085 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1086 	usleep_range(1000, 2000);
1087 }
1088 
cv18xx_retry_tuning(struct mmc_host * mmc,u32 opcode,int * cmd_error)1089 static int cv18xx_retry_tuning(struct mmc_host *mmc, u32 opcode, int *cmd_error)
1090 {
1091 	int ret, retry = 0;
1092 
1093 	while (retry < CV18XX_RETRY_TUNING_MAX) {
1094 		ret = mmc_send_tuning(mmc, opcode, NULL);
1095 		if (ret)
1096 			return ret;
1097 		retry++;
1098 	}
1099 
1100 	return 0;
1101 }
1102 
cv18xx_sdhci_post_tuning(struct sdhci_host * host)1103 static void cv18xx_sdhci_post_tuning(struct sdhci_host *host)
1104 {
1105 	u32 val;
1106 
1107 	val = sdhci_readl(host, SDHCI_INT_STATUS);
1108 	val |= SDHCI_INT_DATA_AVAIL;
1109 	sdhci_writel(host, val, SDHCI_INT_STATUS);
1110 
1111 	dwcmshc_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1112 }
1113 
cv18xx_sdhci_execute_tuning(struct sdhci_host * host,u32 opcode)1114 static int cv18xx_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
1115 {
1116 	int min, max, avg, ret;
1117 	int win_length, target_min, target_max, target_win_length;
1118 
1119 	min = max = 0;
1120 	target_win_length = 0;
1121 
1122 	sdhci_reset_tuning(host);
1123 
1124 	while (max < CV18XX_TUNE_MAX) {
1125 		/* find the mininum delay first which can pass tuning */
1126 		while (min < CV18XX_TUNE_MAX) {
1127 			cv18xx_sdhci_set_tap(host, min);
1128 			if (!cv18xx_retry_tuning(host->mmc, opcode, NULL))
1129 				break;
1130 			min += CV18XX_TUNE_STEP;
1131 		}
1132 
1133 		/* find the maxinum delay which can not pass tuning */
1134 		max = min + CV18XX_TUNE_STEP;
1135 		while (max < CV18XX_TUNE_MAX) {
1136 			cv18xx_sdhci_set_tap(host, max);
1137 			if (cv18xx_retry_tuning(host->mmc, opcode, NULL)) {
1138 				max -= CV18XX_TUNE_STEP;
1139 				break;
1140 			}
1141 			max += CV18XX_TUNE_STEP;
1142 		}
1143 
1144 		win_length = max - min + 1;
1145 		/* get the largest pass window */
1146 		if (win_length > target_win_length) {
1147 			target_win_length = win_length;
1148 			target_min = min;
1149 			target_max = max;
1150 		}
1151 
1152 		/* continue to find the next pass window */
1153 		min = max + CV18XX_TUNE_STEP;
1154 	}
1155 
1156 	cv18xx_sdhci_post_tuning(host);
1157 
1158 	/* use average delay to get the best timing */
1159 	avg = (target_min + target_max) / 2;
1160 	cv18xx_sdhci_set_tap(host, avg);
1161 	ret = mmc_send_tuning(host->mmc, opcode, NULL);
1162 
1163 	dev_dbg(mmc_dev(host->mmc), "tuning %s at 0x%x ret %d\n",
1164 		ret ? "failed" : "passed", avg, ret);
1165 
1166 	return ret;
1167 }
1168 
sg2042_sdhci_phy_init(struct sdhci_host * host)1169 static inline void sg2042_sdhci_phy_init(struct sdhci_host *host)
1170 {
1171 	u32 val;
1172 
1173 	/* Asset phy reset & set tx drive strength */
1174 	val = sdhci_readl(host, PHY_CNFG_R);
1175 	val &= ~PHY_CNFG_RSTN_DEASSERT;
1176 	val |= FIELD_PREP(PHY_CNFG_PHY_PWRGOOD_MASK, 1);
1177 	val |= FIELD_PREP(PHY_CNFG_PAD_SP_MASK, PHY_CNFG_PAD_SP_SG2042);
1178 	val |= FIELD_PREP(PHY_CNFG_PAD_SN_MASK, PHY_CNFG_PAD_SN_SG2042);
1179 	sdhci_writel(host, val, PHY_CNFG_R);
1180 
1181 	/* Configure phy pads */
1182 	val = PHY_PAD_RXSEL_3V3;
1183 	val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP);
1184 	val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
1185 	val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
1186 	sdhci_writew(host, val, PHY_CMDPAD_CNFG_R);
1187 	sdhci_writew(host, val, PHY_DATAPAD_CNFG_R);
1188 	sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R);
1189 
1190 	val = PHY_PAD_RXSEL_3V3;
1191 	val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
1192 	val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
1193 	sdhci_writew(host, val, PHY_CLKPAD_CNFG_R);
1194 
1195 	val = PHY_PAD_RXSEL_3V3;
1196 	val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLDOWN);
1197 	val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_P);
1198 	val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
1199 	sdhci_writew(host, val, PHY_STBPAD_CNFG_R);
1200 
1201 	/* Configure delay line */
1202 	/* Enable fixed delay */
1203 	sdhci_writeb(host, PHY_SDCLKDL_CNFG_EXTDLY_EN, PHY_SDCLKDL_CNFG_R);
1204 	/*
1205 	 * Set delay line.
1206 	 * Its recommended that bit UPDATE_DC[4] is 1 when SDCLKDL_DC is being written.
1207 	 * Ensure UPDATE_DC[4] is '0' when not updating code.
1208 	 */
1209 	val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R);
1210 	val |= PHY_SDCLKDL_CNFG_UPDATE;
1211 	sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R);
1212 	/* Add 10 * 70ps = 0.7ns for output delay */
1213 	sdhci_writeb(host, 10, PHY_SDCLKDL_DC_R);
1214 	val = sdhci_readb(host, PHY_SDCLKDL_CNFG_R);
1215 	val &= ~(PHY_SDCLKDL_CNFG_UPDATE);
1216 	sdhci_writeb(host, val, PHY_SDCLKDL_CNFG_R);
1217 
1218 	/* Set SMPLDL_CNFG, Bypass */
1219 	sdhci_writeb(host, PHY_SMPLDL_CNFG_BYPASS_EN, PHY_SMPLDL_CNFG_R);
1220 
1221 	/* Set ATDL_CNFG, tuning clk not use for init */
1222 	val = FIELD_PREP(PHY_ATDL_CNFG_INPSEL_MASK, PHY_ATDL_CNFG_INPSEL_SG2042);
1223 	sdhci_writeb(host, val, PHY_ATDL_CNFG_R);
1224 
1225 	/* Deasset phy reset */
1226 	val = sdhci_readl(host, PHY_CNFG_R);
1227 	val |= PHY_CNFG_RSTN_DEASSERT;
1228 	sdhci_writel(host, val, PHY_CNFG_R);
1229 }
1230 
sg2042_sdhci_reset(struct sdhci_host * host,u8 mask)1231 static void sg2042_sdhci_reset(struct sdhci_host *host, u8 mask)
1232 {
1233 	sdhci_reset(host, mask);
1234 
1235 	if (mask & SDHCI_RESET_ALL)
1236 		sg2042_sdhci_phy_init(host);
1237 }
1238 
sg2042_init(struct device * dev,struct sdhci_host * host,struct dwcmshc_priv * dwc_priv)1239 static int sg2042_init(struct device *dev, struct sdhci_host *host,
1240 		       struct dwcmshc_priv *dwc_priv)
1241 {
1242 	static const char * const clk_ids[] = {"timer"};
1243 
1244 	return dwcmshc_get_enable_other_clks(mmc_dev(host->mmc), dwc_priv,
1245 					     ARRAY_SIZE(clk_ids), clk_ids);
1246 }
1247 
sdhci_eic7700_set_clock(struct sdhci_host * host,unsigned int clock)1248 static void sdhci_eic7700_set_clock(struct sdhci_host *host, unsigned int clock)
1249 {
1250 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1251 	u16 clk;
1252 
1253 	host->mmc->actual_clock = clock;
1254 
1255 	if (clock == 0) {
1256 		sdhci_set_clock(host, clock);
1257 		return;
1258 	}
1259 
1260 	clk_set_rate(pltfm_host->clk, clock);
1261 
1262 	clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1263 	clk |= SDHCI_CLOCK_INT_EN;
1264 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1265 
1266 	dwcmshc_enable_card_clk(host);
1267 }
1268 
sdhci_eic7700_config_phy_delay(struct sdhci_host * host,int delay)1269 static void sdhci_eic7700_config_phy_delay(struct sdhci_host *host, int delay)
1270 {
1271 	delay &= PHY_CLK_MAX_DELAY_MASK;
1272 
1273 	/* phy clk delay line config */
1274 	sdhci_writeb(host, PHY_SDCLKDL_CNFG_UPDATE, PHY_SDCLKDL_CNFG_R);
1275 	sdhci_writeb(host, delay, PHY_SDCLKDL_DC_R);
1276 	sdhci_writeb(host, 0x0, PHY_SDCLKDL_CNFG_R);
1277 }
1278 
sdhci_eic7700_config_phy(struct sdhci_host * host)1279 static void sdhci_eic7700_config_phy(struct sdhci_host *host)
1280 {
1281 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1282 	struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
1283 	u32 emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
1284 	struct eic7700_priv *priv = dwc_priv->priv;
1285 	unsigned int val, drv;
1286 
1287 	drv = FIELD_PREP(PHY_CNFG_PAD_SP_MASK, priv->drive_impedance & 0xF);
1288 	drv |= FIELD_PREP(PHY_CNFG_PAD_SN_MASK, (priv->drive_impedance >> 4) & 0xF);
1289 
1290 	if ((host->mmc->caps2 & emmc_caps) == emmc_caps) {
1291 		val = sdhci_readw(host, dwc_priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
1292 		val |= DWCMSHC_CARD_IS_EMMC;
1293 		sdhci_writew(host, val, dwc_priv->vendor_specific_area1 + DWCMSHC_EMMC_CONTROL);
1294 	}
1295 
1296 	/* reset phy, config phy's pad */
1297 	sdhci_writel(host, drv | ~PHY_CNFG_RSTN_DEASSERT, PHY_CNFG_R);
1298 
1299 	/* configure phy pads */
1300 	val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
1301 	val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
1302 	val |= FIELD_PREP(PHY_PAD_WEAKPULL_MASK, PHY_PAD_WEAKPULL_PULLUP);
1303 	val |= PHY_PAD_RXSEL_1V8;
1304 	sdhci_writew(host, val, PHY_CMDPAD_CNFG_R);
1305 	sdhci_writew(host, val, PHY_DATAPAD_CNFG_R);
1306 	sdhci_writew(host, val, PHY_RSTNPAD_CNFG_R);
1307 
1308 	/* Clock PAD Setting */
1309 	val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
1310 	val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
1311 	sdhci_writew(host, val, PHY_CLKPAD_CNFG_R);
1312 
1313 	/* PHY strobe PAD setting (EMMC only) */
1314 	if ((host->mmc->caps2 & emmc_caps) == emmc_caps) {
1315 		val = FIELD_PREP(PHY_PAD_TXSLEW_CTRL_P_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
1316 		val |= FIELD_PREP(PHY_PAD_TXSLEW_CTRL_N_MASK, PHY_PAD_TXSLEW_CTRL_N_SG2042);
1317 		val |= PHY_PAD_RXSEL_1V8;
1318 		sdhci_writew(host, val, PHY_STBPAD_CNFG_R);
1319 	}
1320 	usleep_range(2000, 3000);
1321 	sdhci_writel(host, drv | PHY_CNFG_RSTN_DEASSERT, PHY_CNFG_R);
1322 	sdhci_eic7700_config_phy_delay(host, dwc_priv->delay_line);
1323 }
1324 
sdhci_eic7700_reset(struct sdhci_host * host,u8 mask)1325 static void sdhci_eic7700_reset(struct sdhci_host *host, u8 mask)
1326 {
1327 	sdhci_reset(host, mask);
1328 
1329 	/* after reset all, the phy's config will be clear */
1330 	if (mask == SDHCI_RESET_ALL)
1331 		sdhci_eic7700_config_phy(host);
1332 }
1333 
sdhci_eic7700_reset_init(struct device * dev,struct eic7700_priv * priv)1334 static int sdhci_eic7700_reset_init(struct device *dev, struct eic7700_priv *priv)
1335 {
1336 	int ret;
1337 
1338 	priv->reset = devm_reset_control_array_get_optional_exclusive(dev);
1339 	if (IS_ERR(priv->reset)) {
1340 		ret = PTR_ERR(priv->reset);
1341 		dev_err(dev, "failed to get reset control %d\n", ret);
1342 		return ret;
1343 	}
1344 
1345 	ret = reset_control_assert(priv->reset);
1346 	if (ret) {
1347 		dev_err(dev, "Failed to assert reset signals: %d\n", ret);
1348 		return ret;
1349 	}
1350 	usleep_range(2000, 2100);
1351 	ret = reset_control_deassert(priv->reset);
1352 	if (ret) {
1353 		dev_err(dev, "Failed to deassert reset signals: %d\n", ret);
1354 		return ret;
1355 	}
1356 
1357 	return ret;
1358 }
1359 
eic7700_convert_drive_impedance_ohm(struct device * dev,unsigned int dr_ohm)1360 static unsigned int eic7700_convert_drive_impedance_ohm(struct device *dev, unsigned int dr_ohm)
1361 {
1362 	switch (dr_ohm) {
1363 	case 100:
1364 		return PHYCTRL_DR_100OHM;
1365 	case 66:
1366 		return PHYCTRL_DR_66OHM;
1367 	case 50:
1368 		return PHYCTRL_DR_50OHM;
1369 	case 40:
1370 		return PHYCTRL_DR_40OHM;
1371 	case 33:
1372 		return PHYCTRL_DR_33OHM;
1373 	}
1374 
1375 	dev_warn(dev, "Invalid value %u for drive-impedance-ohms.\n", dr_ohm);
1376 	return PHYCTRL_DR_50OHM;
1377 }
1378 
sdhci_eic7700_delay_tuning(struct sdhci_host * host,u32 opcode)1379 static int sdhci_eic7700_delay_tuning(struct sdhci_host *host, u32 opcode)
1380 {
1381 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1382 	struct dwcmshc_priv *dwc_priv = sdhci_pltfm_priv(pltfm_host);
1383 	int delay_min = -1;
1384 	int delay_max = -1;
1385 	int cmd_error = 0;
1386 	int delay = 0;
1387 	int i = 0;
1388 	int ret;
1389 
1390 	for (i = 0; i <= PHY_DELAY_CODE_MAX; i++) {
1391 		sdhci_eic7700_config_phy_delay(host, i);
1392 		ret = mmc_send_tuning(host->mmc, opcode, &cmd_error);
1393 		if (ret) {
1394 			host->ops->reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1395 			usleep_range(200, 210);
1396 			if (delay_min != -1 && delay_max != -1)
1397 				break;
1398 		} else {
1399 			if (delay_min == -1) {
1400 				delay_min = i;
1401 				continue;
1402 			} else {
1403 				delay_max = i;
1404 				continue;
1405 			}
1406 		}
1407 	}
1408 	if (delay_min == -1 && delay_max == -1) {
1409 		pr_err("%s: delay code tuning failed!\n", mmc_hostname(host->mmc));
1410 		sdhci_eic7700_config_phy_delay(host, dwc_priv->delay_line);
1411 		return ret;
1412 	}
1413 
1414 	delay = (delay_min + delay_max) / 2;
1415 	sdhci_eic7700_config_phy_delay(host, delay);
1416 
1417 	return 0;
1418 }
1419 
sdhci_eic7700_phase_code_tuning(struct sdhci_host * host,u32 opcode)1420 static int sdhci_eic7700_phase_code_tuning(struct sdhci_host *host, u32 opcode)
1421 {
1422 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1423 	struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
1424 	u32 sd_caps = MMC_CAP2_NO_MMC | MMC_CAP2_NO_SDIO;
1425 	int phase_code = -1;
1426 	int code_range = -1;
1427 	bool is_sd = false;
1428 	int code_min = -1;
1429 	int code_max = -1;
1430 	int cmd_error = 0;
1431 	int ret = 0;
1432 	int i = 0;
1433 
1434 	if ((host->mmc->caps2 & sd_caps) == sd_caps)
1435 		is_sd = true;
1436 
1437 	for (i = 0; i <= MAX_PHASE_CODE; i++) {
1438 		/* Centered Phase code */
1439 		sdhci_writew(host, i, priv->vendor_specific_area1 + DWCMSHC_AT_STAT);
1440 		ret = mmc_send_tuning(host->mmc, opcode, &cmd_error);
1441 		host->ops->reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1442 
1443 		if (ret) {
1444 			/* SD specific range tracking */
1445 			if (is_sd && code_min != -1 && code_max != -1) {
1446 				if (code_max - code_min > code_range) {
1447 					code_range = code_max - code_min;
1448 					phase_code = (code_min + code_max) / 2;
1449 					if (code_range > TUNING_RANGE_THRESHOLD)
1450 						break;
1451 				}
1452 				code_min = -1;
1453 				code_max = -1;
1454 			}
1455 			/* EMMC breaks after first valid range */
1456 			if (!is_sd && code_min != -1 && code_max != -1)
1457 				break;
1458 		} else {
1459 			/* Track valid phase code range */
1460 			if (code_min == -1) {
1461 				code_min = i;
1462 				if (!is_sd)
1463 					continue;
1464 			}
1465 			code_max = i;
1466 			if (is_sd && i == MAX_PHASE_CODE) {
1467 				if (code_max - code_min > code_range) {
1468 					code_range = code_max - code_min;
1469 					phase_code = (code_min + code_max) / 2;
1470 				}
1471 			}
1472 		}
1473 	}
1474 
1475 	/* Handle tuning failure case */
1476 	if ((is_sd && phase_code == -1) ||
1477 	    (!is_sd && code_min == -1 && code_max == -1)) {
1478 		pr_err("%s: phase code tuning failed!\n", mmc_hostname(host->mmc));
1479 		sdhci_writew(host, 0, priv->vendor_specific_area1 + DWCMSHC_AT_STAT);
1480 		return -EIO;
1481 	}
1482 	if (!is_sd)
1483 		phase_code = (code_min + code_max) / 2;
1484 
1485 	sdhci_writew(host, phase_code, priv->vendor_specific_area1 + DWCMSHC_AT_STAT);
1486 
1487 	/* SD specific final verification */
1488 	if (is_sd) {
1489 		ret = mmc_send_tuning(host->mmc, opcode, &cmd_error);
1490 		host->ops->reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1491 		if (ret) {
1492 			pr_err("%s: Final phase code 0x%x verification failed!\n",
1493 			       mmc_hostname(host->mmc), phase_code);
1494 			return ret;
1495 		}
1496 	}
1497 
1498 	return 0;
1499 }
1500 
sdhci_eic7700_executing_tuning(struct sdhci_host * host,u32 opcode)1501 static int sdhci_eic7700_executing_tuning(struct sdhci_host *host, u32 opcode)
1502 {
1503 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1504 	struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
1505 	u32 emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
1506 	int ret = 0;
1507 	u16 ctrl;
1508 	u32 val;
1509 
1510 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1511 	ctrl &= ~SDHCI_CTRL_TUNED_CLK;
1512 	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1513 
1514 	val = sdhci_readl(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
1515 	val |= AT_CTRL_SW_TUNE_EN;
1516 	sdhci_writew(host, val, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
1517 
1518 	sdhci_writew(host, 0, priv->vendor_specific_area1 + DWCMSHC_AT_STAT);
1519 	sdhci_writew(host, 0x0, SDHCI_CMD_DATA);
1520 
1521 	if ((host->mmc->caps2 & emmc_caps) == emmc_caps) {
1522 		ret = sdhci_eic7700_delay_tuning(host, opcode);
1523 		if (ret)
1524 			return ret;
1525 	}
1526 
1527 	ret = sdhci_eic7700_phase_code_tuning(host, opcode);
1528 	if (ret)
1529 		return ret;
1530 
1531 	return 0;
1532 }
1533 
sdhci_eic7700_set_uhs_signaling(struct sdhci_host * host,unsigned int timing)1534 static void sdhci_eic7700_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
1535 {
1536 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1537 	struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
1538 	u8 status;
1539 	u32 val;
1540 	int ret;
1541 
1542 	dwcmshc_set_uhs_signaling(host, timing);
1543 
1544 	/* here need make dll locked when in hs400 at 200MHz */
1545 	if (timing == MMC_TIMING_MMC_HS400 && host->clock == 200000000) {
1546 		val = sdhci_readl(host, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
1547 		val &= ~(FIELD_PREP(AT_CTRL_POST_CHANGE_DLY_MASK, AT_CTRL_POST_CHANGE_DLY));
1548 		/* 2-cycle latency */
1549 		val |= FIELD_PREP(AT_CTRL_POST_CHANGE_DLY_MASK, 0x2);
1550 		sdhci_writew(host, val, priv->vendor_specific_area1 + DWCMSHC_EMMC_ATCTRL);
1551 
1552 		sdhci_writeb(host, FIELD_PREP(PHY_DLL_CNFG1_SLVDLY_MASK, PHY_DLL_CNFG1_SLVDLY) |
1553 			     0x3, PHY_DLL_CNFG1_R);/* DLL wait cycle input */
1554 		/* DLL jump step input */
1555 		sdhci_writeb(host, 0x02, PHY_DLL_CNFG2_R);
1556 		sdhci_writeb(host, FIELD_PREP(PHY_DLLDL_CNFG_SLV_INPSEL_MASK,
1557 					      PHY_DLLDL_CNFG_SLV_INPSEL), PHY_DLLDL_CNFG_R);
1558 		/* Sets the value of DLL's offset input */
1559 		sdhci_writeb(host, 0x00, PHY_DLL_OFFST_R);
1560 		/*
1561 		 * Sets the value of DLL's olbt loadval input. Controls the Ibt
1562 		 * timer's timeout value at which DLL runs a revalidation cycle.
1563 		 */
1564 		sdhci_writew(host, 0xffff, PHY_DLLBT_CNFG_R);
1565 		sdhci_writeb(host, PHY_DLL_CTRL_ENABLE, PHY_DLL_CTRL_R);
1566 		usleep_range(100, 110);
1567 
1568 		ret = read_poll_timeout(sdhci_readb, status, status & DLL_LOCK_STS, 100, 1000000,
1569 					false, host, PHY_DLL_STATUS_R);
1570 		if (ret) {
1571 			pr_err("%s: DLL lock timeout! status: 0x%x\n",
1572 			       mmc_hostname(host->mmc), status);
1573 			return;
1574 		}
1575 
1576 		status = sdhci_readb(host, PHY_DLL_STATUS_R);
1577 		if (status & DLL_ERROR_STS) {
1578 			pr_err("%s: DLL lock failed!err_status:0x%x\n",
1579 			       mmc_hostname(host->mmc), status);
1580 		}
1581 	}
1582 }
1583 
sdhci_eic7700_set_uhs_wrapper(struct sdhci_host * host,unsigned int timing)1584 static void sdhci_eic7700_set_uhs_wrapper(struct sdhci_host *host, unsigned int timing)
1585 {
1586 	u32 sd_caps = MMC_CAP2_NO_MMC | MMC_CAP2_NO_SDIO;
1587 
1588 	if ((host->mmc->caps2 & sd_caps) == sd_caps)
1589 		sdhci_set_uhs_signaling(host, timing);
1590 	else
1591 		sdhci_eic7700_set_uhs_signaling(host, timing);
1592 }
1593 
eic7700_init(struct device * dev,struct sdhci_host * host,struct dwcmshc_priv * dwc_priv)1594 static int eic7700_init(struct device *dev, struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
1595 {
1596 	u32 emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
1597 	unsigned int val, hsp_int_status, hsp_pwr_ctrl;
1598 	static const char * const clk_ids[] = {"axi"};
1599 	struct of_phandle_args args;
1600 	struct eic7700_priv *priv;
1601 	struct regmap *hsp_regmap;
1602 	int ret;
1603 
1604 	priv = devm_kzalloc(dev, sizeof(struct eic7700_priv), GFP_KERNEL);
1605 	if (!priv)
1606 		return -ENOMEM;
1607 
1608 	dwc_priv->priv = priv;
1609 
1610 	ret = sdhci_eic7700_reset_init(dev, dwc_priv->priv);
1611 	if (ret) {
1612 		dev_err(dev, "failed to reset\n");
1613 		return ret;
1614 	}
1615 
1616 	ret = dwcmshc_get_enable_other_clks(mmc_dev(host->mmc), dwc_priv,
1617 					    ARRAY_SIZE(clk_ids), clk_ids);
1618 	if (ret)
1619 		return ret;
1620 
1621 	ret = of_parse_phandle_with_fixed_args(dev->of_node, "eswin,hsp-sp-csr", 2, 0, &args);
1622 	if (ret) {
1623 		dev_err(dev, "Fail to parse 'eswin,hsp-sp-csr' phandle (%d)\n", ret);
1624 		return ret;
1625 	}
1626 
1627 	hsp_regmap = syscon_node_to_regmap(args.np);
1628 	if (IS_ERR(hsp_regmap)) {
1629 		dev_err(dev, "Failed to get regmap for 'eswin,hsp-sp-csr'\n");
1630 		of_node_put(args.np);
1631 		return PTR_ERR(hsp_regmap);
1632 	}
1633 	hsp_int_status = args.args[0];
1634 	hsp_pwr_ctrl = args.args[1];
1635 	of_node_put(args.np);
1636 	/*
1637 	 * Assert clock stability: write EIC7700_INT_CLK_STABLE to hsp_int_status.
1638 	 * This signals to the eMMC controller that platform clocks (card, ACLK,
1639 	 * BCLK, TMCLK) are enabled and stable.
1640 	 */
1641 	regmap_write(hsp_regmap, hsp_int_status, EIC7700_INT_CLK_STABLE);
1642 	/*
1643 	 * Assert voltage stability: write EIC7700_HOST_VAL_STABLE to hsp_pwr_ctrl.
1644 	 * This signals that VDD is stable and permits transition to high-speed
1645 	 * modes (e.g., UHS-I).
1646 	 */
1647 	regmap_write(hsp_regmap, hsp_pwr_ctrl, EIC7700_HOST_VAL_STABLE);
1648 
1649 	if ((host->mmc->caps2 & emmc_caps) == emmc_caps)
1650 		dwc_priv->delay_line = PHY_DELAY_CODE_EMMC;
1651 	else
1652 		dwc_priv->delay_line = PHY_DELAY_CODE_SD;
1653 
1654 	if (!of_property_read_u32(dev->of_node, "eswin,drive-impedance-ohms", &val))
1655 		priv->drive_impedance = eic7700_convert_drive_impedance_ohm(dev, val);
1656 	return 0;
1657 }
1658 
1659 static const struct sdhci_ops sdhci_dwcmshc_ops = {
1660 	.set_clock		= sdhci_set_clock,
1661 	.set_bus_width		= sdhci_set_bus_width,
1662 	.set_uhs_signaling	= dwcmshc_set_uhs_signaling,
1663 	.get_max_clock		= dwcmshc_get_max_clock,
1664 	.reset			= dwcmshc_reset,
1665 	.adma_write_desc	= dwcmshc_adma_write_desc,
1666 	.irq			= dwcmshc_cqe_irq_handler,
1667 };
1668 
1669 #ifdef CONFIG_ACPI
dwcmshc_bf3_hw_reset(struct sdhci_host * host)1670 static void dwcmshc_bf3_hw_reset(struct sdhci_host *host)
1671 {
1672 	struct arm_smccc_res res = { 0 };
1673 
1674 	arm_smccc_smc(BLUEFIELD_SMC_SET_EMMC_RST_N, 0, 0, 0, 0, 0, 0, 0, &res);
1675 
1676 	if (res.a0)
1677 		pr_err("%s: RST_N failed.\n", mmc_hostname(host->mmc));
1678 }
1679 
1680 static const struct sdhci_ops sdhci_dwcmshc_bf3_ops = {
1681 	.set_clock		= sdhci_set_clock,
1682 	.set_bus_width		= sdhci_set_bus_width,
1683 	.set_uhs_signaling	= dwcmshc_set_uhs_signaling,
1684 	.get_max_clock		= dwcmshc_get_max_clock,
1685 	.reset			= sdhci_reset,
1686 	.adma_write_desc	= dwcmshc_adma_write_desc,
1687 	.irq			= dwcmshc_cqe_irq_handler,
1688 	.hw_reset		= dwcmshc_bf3_hw_reset,
1689 };
1690 #endif
1691 
1692 static const struct sdhci_ops sdhci_dwcmshc_rk35xx_ops = {
1693 	.set_clock		= dwcmshc_rk3568_set_clock,
1694 	.set_bus_width		= sdhci_set_bus_width,
1695 	.set_uhs_signaling	= dwcmshc_set_uhs_signaling,
1696 	.get_max_clock		= rk35xx_get_max_clock,
1697 	.reset			= rk35xx_sdhci_reset,
1698 	.adma_write_desc	= dwcmshc_adma_write_desc,
1699 	.irq			= dwcmshc_cqe_irq_handler,
1700 };
1701 
1702 static const struct sdhci_ops sdhci_dwcmshc_th1520_ops = {
1703 	.set_clock		= sdhci_set_clock,
1704 	.set_bus_width		= sdhci_set_bus_width,
1705 	.set_uhs_signaling	= th1520_set_uhs_signaling,
1706 	.get_max_clock		= dwcmshc_get_max_clock,
1707 	.reset			= th1520_sdhci_reset,
1708 	.adma_write_desc	= dwcmshc_adma_write_desc,
1709 	.voltage_switch		= dwcmshc_phy_init,
1710 	.platform_execute_tuning = th1520_execute_tuning,
1711 };
1712 
1713 static const struct sdhci_ops sdhci_dwcmshc_cv18xx_ops = {
1714 	.set_clock		= sdhci_set_clock,
1715 	.set_bus_width		= sdhci_set_bus_width,
1716 	.set_uhs_signaling	= dwcmshc_set_uhs_signaling,
1717 	.get_max_clock		= dwcmshc_get_max_clock,
1718 	.reset			= cv18xx_sdhci_reset,
1719 	.adma_write_desc	= dwcmshc_adma_write_desc,
1720 	.platform_execute_tuning = cv18xx_sdhci_execute_tuning,
1721 };
1722 
1723 static const struct sdhci_ops sdhci_dwcmshc_sg2042_ops = {
1724 	.set_clock		= sdhci_set_clock,
1725 	.set_bus_width		= sdhci_set_bus_width,
1726 	.set_uhs_signaling	= dwcmshc_set_uhs_signaling,
1727 	.get_max_clock		= dwcmshc_get_max_clock,
1728 	.reset			= sg2042_sdhci_reset,
1729 	.adma_write_desc	= dwcmshc_adma_write_desc,
1730 	.platform_execute_tuning = th1520_execute_tuning,
1731 };
1732 
1733 static const struct sdhci_ops sdhci_dwcmshc_eic7700_ops = {
1734 	.set_clock = sdhci_eic7700_set_clock,
1735 	.get_max_clock = sdhci_pltfm_clk_get_max_clock,
1736 	.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
1737 	.set_bus_width = sdhci_set_bus_width,
1738 	.reset = sdhci_eic7700_reset,
1739 	.set_uhs_signaling = sdhci_eic7700_set_uhs_wrapper,
1740 	.set_power = sdhci_set_power_and_bus_voltage,
1741 	.irq = dwcmshc_cqe_irq_handler,
1742 	.adma_write_desc = dwcmshc_adma_write_desc,
1743 	.platform_execute_tuning = sdhci_eic7700_executing_tuning,
1744 };
1745 
1746 static const struct dwcmshc_pltfm_data sdhci_dwcmshc_pdata = {
1747 	.pdata = {
1748 		.ops = &sdhci_dwcmshc_ops,
1749 		.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1750 		.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1751 	},
1752 };
1753 
1754 #ifdef CONFIG_ACPI
1755 static const struct dwcmshc_pltfm_data sdhci_dwcmshc_bf3_pdata = {
1756 	.pdata = {
1757 		.ops = &sdhci_dwcmshc_bf3_ops,
1758 		.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1759 		.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1760 			   SDHCI_QUIRK2_ACMD23_BROKEN,
1761 	},
1762 };
1763 #endif
1764 
1765 static const struct cqhci_host_ops rk35xx_cqhci_ops = {
1766 	.pre_enable	= rk35xx_sdhci_cqe_pre_enable,
1767 	.enable		= rk35xx_sdhci_cqe_enable,
1768 	.disable	= rk35xx_sdhci_cqe_disable,
1769 	.post_disable	= rk35xx_sdhci_cqe_post_disable,
1770 	.dumpregs	= dwcmshc_cqhci_dumpregs,
1771 	.set_tran_desc	= dwcmshc_set_tran_desc,
1772 };
1773 
1774 static const struct dwcmshc_pltfm_data sdhci_dwcmshc_rk35xx_pdata = {
1775 	.pdata = {
1776 		.ops = &sdhci_dwcmshc_rk35xx_ops,
1777 		.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
1778 			  SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
1779 		.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1780 			   SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
1781 	},
1782 	.cqhci_host_ops = &rk35xx_cqhci_ops,
1783 	.init = dwcmshc_rk35xx_init,
1784 	.postinit = dwcmshc_rk35xx_postinit,
1785 };
1786 
1787 static const struct dwcmshc_pltfm_data sdhci_dwcmshc_rk3576_pdata = {
1788 	.pdata = {
1789 		.ops = &sdhci_dwcmshc_rk35xx_ops,
1790 		.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
1791 			  SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
1792 		.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1793 			   SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
1794 	},
1795 	.cqhci_host_ops = &rk35xx_cqhci_ops,
1796 	.init = dwcmshc_rk35xx_init,
1797 	.postinit = dwcmshc_rk3576_postinit,
1798 };
1799 
1800 static const struct dwcmshc_pltfm_data sdhci_dwcmshc_th1520_pdata = {
1801 	.pdata = {
1802 		.ops = &sdhci_dwcmshc_th1520_ops,
1803 		.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1804 		.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1805 	},
1806 	.init = th1520_init,
1807 };
1808 
1809 static const struct dwcmshc_pltfm_data sdhci_dwcmshc_cv18xx_pdata = {
1810 	.pdata = {
1811 		.ops = &sdhci_dwcmshc_cv18xx_ops,
1812 		.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1813 		.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1814 	},
1815 };
1816 
1817 static const struct dwcmshc_pltfm_data sdhci_dwcmshc_sg2042_pdata = {
1818 	.pdata = {
1819 		.ops = &sdhci_dwcmshc_sg2042_ops,
1820 		.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1821 		.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1822 	},
1823 	.init = sg2042_init,
1824 };
1825 
1826 static const struct dwcmshc_pltfm_data sdhci_dwcmshc_eic7700_pdata = {
1827 	.pdata = {
1828 		.ops = &sdhci_dwcmshc_eic7700_ops,
1829 		.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
1830 			  SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
1831 		.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1832 			   SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
1833 	},
1834 	.init = eic7700_init,
1835 };
1836 
1837 static const struct cqhci_host_ops dwcmshc_cqhci_ops = {
1838 	.enable		= dwcmshc_sdhci_cqe_enable,
1839 	.disable	= sdhci_cqe_disable,
1840 	.dumpregs	= dwcmshc_cqhci_dumpregs,
1841 	.set_tran_desc	= dwcmshc_set_tran_desc,
1842 };
1843 
dwcmshc_cqhci_init(struct sdhci_host * host,struct platform_device * pdev,const struct dwcmshc_pltfm_data * pltfm_data)1844 static void dwcmshc_cqhci_init(struct sdhci_host *host, struct platform_device *pdev,
1845 			       const struct dwcmshc_pltfm_data *pltfm_data)
1846 {
1847 	struct cqhci_host *cq_host;
1848 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1849 	struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
1850 	bool dma64 = false;
1851 	u16 clk;
1852 	int err;
1853 
1854 	host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1855 	cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
1856 	if (!cq_host) {
1857 		dev_err(mmc_dev(host->mmc), "Unable to setup CQE: not enough memory\n");
1858 		goto dsbl_cqe_caps;
1859 	}
1860 
1861 	/*
1862 	 * For dwcmshc host controller we have to enable internal clock
1863 	 * before access to some registers from Vendor Specific Area 2.
1864 	 */
1865 	clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1866 	clk |= SDHCI_CLOCK_INT_EN;
1867 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1868 	clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1869 	if (!(clk & SDHCI_CLOCK_INT_EN)) {
1870 		dev_err(mmc_dev(host->mmc), "Unable to setup CQE: internal clock enable error\n");
1871 		goto free_cq_host;
1872 	}
1873 
1874 	cq_host->mmio = host->ioaddr + priv->vendor_specific_area2;
1875 	if (pltfm_data->cqhci_host_ops)
1876 		cq_host->ops = pltfm_data->cqhci_host_ops;
1877 	else
1878 		cq_host->ops = &dwcmshc_cqhci_ops;
1879 
1880 	/* Enable using of 128-bit task descriptors */
1881 	dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1882 	if (dma64) {
1883 		dev_dbg(mmc_dev(host->mmc), "128-bit task descriptors\n");
1884 		cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
1885 	}
1886 	err = cqhci_init(cq_host, host->mmc, dma64);
1887 	if (err) {
1888 		dev_err(mmc_dev(host->mmc), "Unable to setup CQE: error %d\n", err);
1889 		goto int_clock_disable;
1890 	}
1891 
1892 	dev_dbg(mmc_dev(host->mmc), "CQE init done\n");
1893 
1894 	return;
1895 
1896 int_clock_disable:
1897 	clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1898 	clk &= ~SDHCI_CLOCK_INT_EN;
1899 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1900 
1901 free_cq_host:
1902 	devm_kfree(&pdev->dev, cq_host);
1903 
1904 dsbl_cqe_caps:
1905 	host->mmc->caps2 &= ~(MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD);
1906 }
1907 
1908 static const struct of_device_id sdhci_dwcmshc_dt_ids[] = {
1909 	{
1910 		.compatible = "rockchip,rk3588-dwcmshc",
1911 		.data = &sdhci_dwcmshc_rk35xx_pdata,
1912 	},
1913 	{
1914 		.compatible = "rockchip,rk3576-dwcmshc",
1915 		.data = &sdhci_dwcmshc_rk3576_pdata,
1916 	},
1917 	{
1918 		.compatible = "rockchip,rk3568-dwcmshc",
1919 		.data = &sdhci_dwcmshc_rk35xx_pdata,
1920 	},
1921 	{
1922 		.compatible = "snps,dwcmshc-sdhci",
1923 		.data = &sdhci_dwcmshc_pdata,
1924 	},
1925 	{
1926 		.compatible = "sophgo,cv1800b-dwcmshc",
1927 		.data = &sdhci_dwcmshc_cv18xx_pdata,
1928 	},
1929 	{
1930 		.compatible = "sophgo,sg2002-dwcmshc",
1931 		.data = &sdhci_dwcmshc_cv18xx_pdata,
1932 	},
1933 	{
1934 		.compatible = "thead,th1520-dwcmshc",
1935 		.data = &sdhci_dwcmshc_th1520_pdata,
1936 	},
1937 	{
1938 		.compatible = "sophgo,sg2042-dwcmshc",
1939 		.data = &sdhci_dwcmshc_sg2042_pdata,
1940 	},
1941 	{
1942 		.compatible = "eswin,eic7700-dwcmshc",
1943 		.data = &sdhci_dwcmshc_eic7700_pdata,
1944 	},
1945 	{},
1946 };
1947 MODULE_DEVICE_TABLE(of, sdhci_dwcmshc_dt_ids);
1948 
1949 #ifdef CONFIG_ACPI
1950 static const struct acpi_device_id sdhci_dwcmshc_acpi_ids[] = {
1951 	{
1952 		.id = "MLNXBF30",
1953 		.driver_data = (kernel_ulong_t)&sdhci_dwcmshc_bf3_pdata,
1954 	},
1955 	{}
1956 };
1957 MODULE_DEVICE_TABLE(acpi, sdhci_dwcmshc_acpi_ids);
1958 #endif
1959 
dwcmshc_probe(struct platform_device * pdev)1960 static int dwcmshc_probe(struct platform_device *pdev)
1961 {
1962 	struct device *dev = &pdev->dev;
1963 	struct sdhci_pltfm_host *pltfm_host;
1964 	struct sdhci_host *host;
1965 	struct dwcmshc_priv *priv;
1966 	const struct dwcmshc_pltfm_data *pltfm_data;
1967 	int err;
1968 	u32 extra, caps;
1969 
1970 	pltfm_data = device_get_match_data(&pdev->dev);
1971 	if (!pltfm_data) {
1972 		dev_err(&pdev->dev, "Error: No device match data found\n");
1973 		return -ENODEV;
1974 	}
1975 
1976 	host = sdhci_pltfm_init(pdev, &pltfm_data->pdata,
1977 				sizeof(struct dwcmshc_priv));
1978 	if (IS_ERR(host))
1979 		return PTR_ERR(host);
1980 
1981 	/*
1982 	 * extra adma table cnt for cross 128M boundary handling.
1983 	 */
1984 	extra = DIV_ROUND_UP_ULL(dma_get_required_mask(dev), SZ_128M);
1985 	if (extra > SDHCI_MAX_SEGS)
1986 		extra = SDHCI_MAX_SEGS;
1987 	host->adma_table_cnt += extra;
1988 
1989 	pltfm_host = sdhci_priv(host);
1990 	priv = sdhci_pltfm_priv(pltfm_host);
1991 
1992 	if (dev->of_node) {
1993 		pltfm_host->clk = devm_clk_get(dev, "core");
1994 		if (IS_ERR(pltfm_host->clk))
1995 			return dev_err_probe(dev, PTR_ERR(pltfm_host->clk),
1996 					     "failed to get core clk\n");
1997 
1998 		err = clk_prepare_enable(pltfm_host->clk);
1999 		if (err)
2000 			return err;
2001 
2002 		priv->bus_clk = devm_clk_get(dev, "bus");
2003 		if (!IS_ERR(priv->bus_clk))
2004 			clk_prepare_enable(priv->bus_clk);
2005 	}
2006 
2007 	err = mmc_of_parse(host->mmc);
2008 	if (err)
2009 		goto err_clk;
2010 
2011 	sdhci_get_of_property(pdev);
2012 
2013 	priv->vendor_specific_area1 =
2014 		sdhci_readl(host, DWCMSHC_P_VENDOR_AREA1) & DWCMSHC_AREA1_MASK;
2015 
2016 	host->mmc_host_ops.request = dwcmshc_request;
2017 	host->mmc_host_ops.hs400_enhanced_strobe = dwcmshc_hs400_enhanced_strobe;
2018 	host->mmc_host_ops.execute_tuning = dwcmshc_execute_tuning;
2019 
2020 	if (pltfm_data->init) {
2021 		err = pltfm_data->init(&pdev->dev, host, priv);
2022 		if (err)
2023 			goto err_clk;
2024 	}
2025 
2026 #ifdef CONFIG_ACPI
2027 	if (pltfm_data == &sdhci_dwcmshc_bf3_pdata)
2028 		sdhci_enable_v4_mode(host);
2029 #endif
2030 
2031 	caps = sdhci_readl(host, SDHCI_CAPABILITIES);
2032 	if (caps & SDHCI_CAN_64BIT_V4)
2033 		sdhci_enable_v4_mode(host);
2034 
2035 	host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
2036 
2037 	pm_runtime_get_noresume(dev);
2038 	pm_runtime_set_active(dev);
2039 	pm_runtime_enable(dev);
2040 
2041 	err = sdhci_setup_host(host);
2042 	if (err)
2043 		goto err_rpm;
2044 
2045 	/* Setup Command Queue Engine if enabled */
2046 	if (device_property_read_bool(&pdev->dev, "supports-cqe")) {
2047 		priv->vendor_specific_area2 =
2048 			sdhci_readw(host, DWCMSHC_P_VENDOR_AREA2);
2049 
2050 		dwcmshc_cqhci_init(host, pdev, pltfm_data);
2051 	}
2052 
2053 	if (pltfm_data->postinit)
2054 		pltfm_data->postinit(host, priv);
2055 
2056 	err = __sdhci_add_host(host);
2057 	if (err)
2058 		goto err_setup_host;
2059 
2060 	pm_runtime_put(dev);
2061 
2062 	return 0;
2063 
2064 err_setup_host:
2065 	sdhci_cleanup_host(host);
2066 err_rpm:
2067 	pm_runtime_disable(dev);
2068 	pm_runtime_put_noidle(dev);
2069 err_clk:
2070 	clk_disable_unprepare(pltfm_host->clk);
2071 	clk_disable_unprepare(priv->bus_clk);
2072 	clk_bulk_disable_unprepare(priv->num_other_clks, priv->other_clks);
2073 	return err;
2074 }
2075 
dwcmshc_disable_card_clk(struct sdhci_host * host)2076 static void dwcmshc_disable_card_clk(struct sdhci_host *host)
2077 {
2078 	u16 ctrl;
2079 
2080 	ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2081 	if (ctrl & SDHCI_CLOCK_CARD_EN) {
2082 		ctrl &= ~SDHCI_CLOCK_CARD_EN;
2083 		sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL);
2084 	}
2085 }
2086 
dwcmshc_remove(struct platform_device * pdev)2087 static void dwcmshc_remove(struct platform_device *pdev)
2088 {
2089 	struct sdhci_host *host = platform_get_drvdata(pdev);
2090 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2091 	struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
2092 
2093 	pm_runtime_get_sync(&pdev->dev);
2094 	pm_runtime_disable(&pdev->dev);
2095 	pm_runtime_put_noidle(&pdev->dev);
2096 
2097 	sdhci_remove_host(host, 0);
2098 
2099 	dwcmshc_disable_card_clk(host);
2100 
2101 	clk_disable_unprepare(pltfm_host->clk);
2102 	clk_disable_unprepare(priv->bus_clk);
2103 	clk_bulk_disable_unprepare(priv->num_other_clks, priv->other_clks);
2104 }
2105 
dwcmshc_suspend(struct device * dev)2106 static int dwcmshc_suspend(struct device *dev)
2107 {
2108 	struct sdhci_host *host = dev_get_drvdata(dev);
2109 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2110 	struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
2111 	int ret;
2112 
2113 	pm_runtime_resume(dev);
2114 
2115 	if (host->mmc->caps2 & MMC_CAP2_CQE) {
2116 		ret = cqhci_suspend(host->mmc);
2117 		if (ret)
2118 			return ret;
2119 	}
2120 
2121 	ret = sdhci_suspend_host(host);
2122 	if (ret)
2123 		return ret;
2124 
2125 	clk_disable_unprepare(pltfm_host->clk);
2126 	if (!IS_ERR(priv->bus_clk))
2127 		clk_disable_unprepare(priv->bus_clk);
2128 
2129 	clk_bulk_disable_unprepare(priv->num_other_clks, priv->other_clks);
2130 
2131 	return ret;
2132 }
2133 
dwcmshc_resume(struct device * dev)2134 static int dwcmshc_resume(struct device *dev)
2135 {
2136 	struct sdhci_host *host = dev_get_drvdata(dev);
2137 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2138 	struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
2139 	int ret;
2140 
2141 	ret = clk_prepare_enable(pltfm_host->clk);
2142 	if (ret)
2143 		return ret;
2144 
2145 	if (!IS_ERR(priv->bus_clk)) {
2146 		ret = clk_prepare_enable(priv->bus_clk);
2147 		if (ret)
2148 			goto disable_clk;
2149 	}
2150 
2151 	ret = clk_bulk_prepare_enable(priv->num_other_clks, priv->other_clks);
2152 	if (ret)
2153 		goto disable_bus_clk;
2154 
2155 	ret = sdhci_resume_host(host);
2156 	if (ret)
2157 		goto disable_other_clks;
2158 
2159 	if (host->mmc->caps2 & MMC_CAP2_CQE) {
2160 		ret = cqhci_resume(host->mmc);
2161 		if (ret)
2162 			goto disable_other_clks;
2163 	}
2164 
2165 	return 0;
2166 
2167 disable_other_clks:
2168 	clk_bulk_disable_unprepare(priv->num_other_clks, priv->other_clks);
2169 disable_bus_clk:
2170 	if (!IS_ERR(priv->bus_clk))
2171 		clk_disable_unprepare(priv->bus_clk);
2172 disable_clk:
2173 	clk_disable_unprepare(pltfm_host->clk);
2174 	return ret;
2175 }
2176 
dwcmshc_runtime_suspend(struct device * dev)2177 static int dwcmshc_runtime_suspend(struct device *dev)
2178 {
2179 	struct sdhci_host *host = dev_get_drvdata(dev);
2180 
2181 	dwcmshc_disable_card_clk(host);
2182 
2183 	return 0;
2184 }
2185 
dwcmshc_runtime_resume(struct device * dev)2186 static int dwcmshc_runtime_resume(struct device *dev)
2187 {
2188 	struct sdhci_host *host = dev_get_drvdata(dev);
2189 
2190 	dwcmshc_enable_card_clk(host);
2191 
2192 	return 0;
2193 }
2194 
2195 static const struct dev_pm_ops dwcmshc_pmops = {
2196 	SYSTEM_SLEEP_PM_OPS(dwcmshc_suspend, dwcmshc_resume)
2197 	RUNTIME_PM_OPS(dwcmshc_runtime_suspend, dwcmshc_runtime_resume, NULL)
2198 };
2199 
2200 static struct platform_driver sdhci_dwcmshc_driver = {
2201 	.driver	= {
2202 		.name	= "sdhci-dwcmshc",
2203 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
2204 		.of_match_table = sdhci_dwcmshc_dt_ids,
2205 		.acpi_match_table = ACPI_PTR(sdhci_dwcmshc_acpi_ids),
2206 		.pm = pm_ptr(&dwcmshc_pmops),
2207 	},
2208 	.probe	= dwcmshc_probe,
2209 	.remove = dwcmshc_remove,
2210 };
2211 module_platform_driver(sdhci_dwcmshc_driver);
2212 
2213 MODULE_DESCRIPTION("SDHCI platform driver for Synopsys DWC MSHC");
2214 MODULE_AUTHOR("Jisheng Zhang <jszhang@kernel.org>");
2215 MODULE_LICENSE("GPL v2");
2216