xref: /linux/drivers/usb/dwc3/dwc3-xilinx.c (revision 172cdcaefea5c297fdb3d20b7d5aff60ae4fbce6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * dwc3-xilinx.c - Xilinx DWC3 controller specific glue driver
4  *
5  * Authors: Manish Narani <manish.narani@xilinx.com>
6  *          Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/clk.h>
13 #include <linux/of.h>
14 #include <linux/platform_device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/of_platform.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/reset.h>
19 #include <linux/of_address.h>
20 #include <linux/delay.h>
21 #include <linux/firmware/xlnx-zynqmp.h>
22 #include <linux/io.h>
23 
24 #include <linux/phy/phy.h>
25 
26 /* USB phy reset mask register */
27 #define XLNX_USB_PHY_RST_EN			0x001C
28 #define XLNX_PHY_RST_MASK			0x1
29 
30 /* Xilinx USB 3.0 IP Register */
31 #define XLNX_USB_TRAFFIC_ROUTE_CONFIG		0x005C
32 #define XLNX_USB_TRAFFIC_ROUTE_FPD		0x1
33 
34 /* Versal USB Reset ID */
35 #define VERSAL_USB_RESET_ID			0xC104036
36 
37 #define XLNX_USB_FPD_PIPE_CLK			0x7c
38 #define PIPE_CLK_DESELECT			1
39 #define PIPE_CLK_SELECT				0
40 #define XLNX_USB_FPD_POWER_PRSNT		0x80
41 #define FPD_POWER_PRSNT_OPTION			BIT(0)
42 
43 struct dwc3_xlnx {
44 	int				num_clocks;
45 	struct clk_bulk_data		*clks;
46 	struct device			*dev;
47 	void __iomem			*regs;
48 	int				(*pltfm_init)(struct dwc3_xlnx *data);
49 };
50 
51 static void dwc3_xlnx_mask_phy_rst(struct dwc3_xlnx *priv_data, bool mask)
52 {
53 	u32 reg;
54 
55 	/*
56 	 * Enable or disable ULPI PHY reset from USB Controller.
57 	 * This does not actually reset the phy, but just controls
58 	 * whether USB controller can or cannot reset ULPI PHY.
59 	 */
60 	reg = readl(priv_data->regs + XLNX_USB_PHY_RST_EN);
61 
62 	if (mask)
63 		reg &= ~XLNX_PHY_RST_MASK;
64 	else
65 		reg |= XLNX_PHY_RST_MASK;
66 
67 	writel(reg, priv_data->regs + XLNX_USB_PHY_RST_EN);
68 }
69 
70 static int dwc3_xlnx_init_versal(struct dwc3_xlnx *priv_data)
71 {
72 	struct device		*dev = priv_data->dev;
73 	int			ret;
74 
75 	dwc3_xlnx_mask_phy_rst(priv_data, false);
76 
77 	/* Assert and De-assert reset */
78 	ret = zynqmp_pm_reset_assert(VERSAL_USB_RESET_ID,
79 				     PM_RESET_ACTION_ASSERT);
80 	if (ret < 0) {
81 		dev_err_probe(dev, ret, "failed to assert Reset\n");
82 		return ret;
83 	}
84 
85 	ret = zynqmp_pm_reset_assert(VERSAL_USB_RESET_ID,
86 				     PM_RESET_ACTION_RELEASE);
87 	if (ret < 0) {
88 		dev_err_probe(dev, ret, "failed to De-assert Reset\n");
89 		return ret;
90 	}
91 
92 	dwc3_xlnx_mask_phy_rst(priv_data, true);
93 
94 	return 0;
95 }
96 
97 static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
98 {
99 	struct device		*dev = priv_data->dev;
100 	struct reset_control	*crst, *hibrst, *apbrst;
101 	struct phy		*usb3_phy;
102 	int			ret;
103 	u32			reg;
104 
105 	usb3_phy = devm_phy_get(dev, "usb3-phy");
106 	if (PTR_ERR(usb3_phy) == -EPROBE_DEFER) {
107 		ret = -EPROBE_DEFER;
108 		goto err;
109 	} else if (IS_ERR(usb3_phy)) {
110 		usb3_phy = NULL;
111 	}
112 
113 	crst = devm_reset_control_get_exclusive(dev, "usb_crst");
114 	if (IS_ERR(crst)) {
115 		ret = PTR_ERR(crst);
116 		dev_err_probe(dev, ret,
117 			      "failed to get core reset signal\n");
118 		goto err;
119 	}
120 
121 	hibrst = devm_reset_control_get_exclusive(dev, "usb_hibrst");
122 	if (IS_ERR(hibrst)) {
123 		ret = PTR_ERR(hibrst);
124 		dev_err_probe(dev, ret,
125 			      "failed to get hibernation reset signal\n");
126 		goto err;
127 	}
128 
129 	apbrst = devm_reset_control_get_exclusive(dev, "usb_apbrst");
130 	if (IS_ERR(apbrst)) {
131 		ret = PTR_ERR(apbrst);
132 		dev_err_probe(dev, ret,
133 			      "failed to get APB reset signal\n");
134 		goto err;
135 	}
136 
137 	ret = reset_control_assert(crst);
138 	if (ret < 0) {
139 		dev_err(dev, "Failed to assert core reset\n");
140 		goto err;
141 	}
142 
143 	ret = reset_control_assert(hibrst);
144 	if (ret < 0) {
145 		dev_err(dev, "Failed to assert hibernation reset\n");
146 		goto err;
147 	}
148 
149 	ret = reset_control_assert(apbrst);
150 	if (ret < 0) {
151 		dev_err(dev, "Failed to assert APB reset\n");
152 		goto err;
153 	}
154 
155 	ret = phy_init(usb3_phy);
156 	if (ret < 0) {
157 		phy_exit(usb3_phy);
158 		goto err;
159 	}
160 
161 	ret = reset_control_deassert(apbrst);
162 	if (ret < 0) {
163 		dev_err(dev, "Failed to release APB reset\n");
164 		goto err;
165 	}
166 
167 	/* Set PIPE Power Present signal in FPD Power Present Register*/
168 	writel(FPD_POWER_PRSNT_OPTION, priv_data->regs + XLNX_USB_FPD_POWER_PRSNT);
169 
170 	/* Set the PIPE Clock Select bit in FPD PIPE Clock register */
171 	writel(PIPE_CLK_SELECT, priv_data->regs + XLNX_USB_FPD_PIPE_CLK);
172 
173 	ret = reset_control_deassert(crst);
174 	if (ret < 0) {
175 		dev_err(dev, "Failed to release core reset\n");
176 		goto err;
177 	}
178 
179 	ret = reset_control_deassert(hibrst);
180 	if (ret < 0) {
181 		dev_err(dev, "Failed to release hibernation reset\n");
182 		goto err;
183 	}
184 
185 	ret = phy_power_on(usb3_phy);
186 	if (ret < 0) {
187 		phy_exit(usb3_phy);
188 		goto err;
189 	}
190 
191 	/*
192 	 * This routes the USB DMA traffic to go through FPD path instead
193 	 * of reaching DDR directly. This traffic routing is needed to
194 	 * make SMMU and CCI work with USB DMA.
195 	 */
196 	if (of_dma_is_coherent(dev->of_node) || device_iommu_mapped(dev)) {
197 		reg = readl(priv_data->regs + XLNX_USB_TRAFFIC_ROUTE_CONFIG);
198 		reg |= XLNX_USB_TRAFFIC_ROUTE_FPD;
199 		writel(reg, priv_data->regs + XLNX_USB_TRAFFIC_ROUTE_CONFIG);
200 	}
201 
202 err:
203 	return ret;
204 }
205 
206 static const struct of_device_id dwc3_xlnx_of_match[] = {
207 	{
208 		.compatible = "xlnx,zynqmp-dwc3",
209 		.data = &dwc3_xlnx_init_zynqmp,
210 	},
211 	{
212 		.compatible = "xlnx,versal-dwc3",
213 		.data = &dwc3_xlnx_init_versal,
214 	},
215 	{ /* Sentinel */ }
216 };
217 MODULE_DEVICE_TABLE(of, dwc3_xlnx_of_match);
218 
219 static int dwc3_xlnx_probe(struct platform_device *pdev)
220 {
221 	struct dwc3_xlnx		*priv_data;
222 	struct device			*dev = &pdev->dev;
223 	struct device_node		*np = dev->of_node;
224 	const struct of_device_id	*match;
225 	void __iomem			*regs;
226 	int				ret;
227 
228 	priv_data = devm_kzalloc(dev, sizeof(*priv_data), GFP_KERNEL);
229 	if (!priv_data)
230 		return -ENOMEM;
231 
232 	regs = devm_platform_ioremap_resource(pdev, 0);
233 	if (IS_ERR(regs)) {
234 		ret = PTR_ERR(regs);
235 		dev_err_probe(dev, ret, "failed to map registers\n");
236 		return ret;
237 	}
238 
239 	match = of_match_node(dwc3_xlnx_of_match, pdev->dev.of_node);
240 
241 	priv_data->pltfm_init = match->data;
242 	priv_data->regs = regs;
243 	priv_data->dev = dev;
244 
245 	platform_set_drvdata(pdev, priv_data);
246 
247 	ret = devm_clk_bulk_get_all(priv_data->dev, &priv_data->clks);
248 	if (ret < 0)
249 		return ret;
250 
251 	priv_data->num_clocks = ret;
252 
253 	ret = clk_bulk_prepare_enable(priv_data->num_clocks, priv_data->clks);
254 	if (ret)
255 		return ret;
256 
257 	ret = priv_data->pltfm_init(priv_data);
258 	if (ret)
259 		goto err_clk_put;
260 
261 	ret = of_platform_populate(np, NULL, NULL, dev);
262 	if (ret)
263 		goto err_clk_put;
264 
265 	pm_runtime_set_active(dev);
266 	pm_runtime_enable(dev);
267 	pm_suspend_ignore_children(dev, false);
268 	pm_runtime_get_sync(dev);
269 
270 	return 0;
271 
272 err_clk_put:
273 	clk_bulk_disable_unprepare(priv_data->num_clocks, priv_data->clks);
274 
275 	return ret;
276 }
277 
278 static int dwc3_xlnx_remove(struct platform_device *pdev)
279 {
280 	struct dwc3_xlnx	*priv_data = platform_get_drvdata(pdev);
281 	struct device		*dev = &pdev->dev;
282 
283 	of_platform_depopulate(dev);
284 
285 	clk_bulk_disable_unprepare(priv_data->num_clocks, priv_data->clks);
286 	priv_data->num_clocks = 0;
287 
288 	pm_runtime_disable(dev);
289 	pm_runtime_put_noidle(dev);
290 	pm_runtime_set_suspended(dev);
291 
292 	return 0;
293 }
294 
295 static int __maybe_unused dwc3_xlnx_suspend_common(struct device *dev)
296 {
297 	struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
298 
299 	clk_bulk_disable(priv_data->num_clocks, priv_data->clks);
300 
301 	return 0;
302 }
303 
304 static int __maybe_unused dwc3_xlnx_resume_common(struct device *dev)
305 {
306 	struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
307 
308 	return clk_bulk_enable(priv_data->num_clocks, priv_data->clks);
309 }
310 
311 static int __maybe_unused dwc3_xlnx_runtime_idle(struct device *dev)
312 {
313 	pm_runtime_mark_last_busy(dev);
314 	pm_runtime_autosuspend(dev);
315 
316 	return 0;
317 }
318 
319 static UNIVERSAL_DEV_PM_OPS(dwc3_xlnx_dev_pm_ops, dwc3_xlnx_suspend_common,
320 			    dwc3_xlnx_resume_common, dwc3_xlnx_runtime_idle);
321 
322 static struct platform_driver dwc3_xlnx_driver = {
323 	.probe		= dwc3_xlnx_probe,
324 	.remove		= dwc3_xlnx_remove,
325 	.driver		= {
326 		.name		= "dwc3-xilinx",
327 		.of_match_table	= dwc3_xlnx_of_match,
328 		.pm		= &dwc3_xlnx_dev_pm_ops,
329 	},
330 };
331 
332 module_platform_driver(dwc3_xlnx_driver);
333 
334 MODULE_LICENSE("GPL v2");
335 MODULE_DESCRIPTION("Xilinx DWC3 controller specific glue driver");
336 MODULE_AUTHOR("Manish Narani <manish.narani@xilinx.com>");
337 MODULE_AUTHOR("Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>");
338