1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * dwc3-xilinx.c - Xilinx DWC3 controller specific glue driver
4 *
5 * Authors: Manish Narani <manish.narani@xilinx.com>
6 * Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
7 */
8
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/clk.h>
13 #include <linux/of.h>
14 #include <linux/platform_device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/gpio/consumer.h>
17 #include <linux/of_platform.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/reset.h>
20 #include <linux/of_address.h>
21 #include <linux/delay.h>
22 #include <linux/firmware/xlnx-zynqmp.h>
23 #include <linux/io.h>
24
25 #include <linux/phy/phy.h>
26
27 /* USB phy reset mask register */
28 #define XLNX_USB_PHY_RST_EN 0x001C
29 #define XLNX_PHY_RST_MASK 0x1
30
31 /* Xilinx USB 3.0 IP Register */
32 #define XLNX_USB_TRAFFIC_ROUTE_CONFIG 0x005C
33 #define XLNX_USB_TRAFFIC_ROUTE_FPD 0x1
34
35 #define XLNX_USB_FPD_PIPE_CLK 0x7c
36 #define PIPE_CLK_DESELECT 1
37 #define PIPE_CLK_SELECT 0
38 #define XLNX_USB_FPD_POWER_PRSNT 0x80
39 #define FPD_POWER_PRSNT_OPTION BIT(0)
40
41 struct dwc3_xlnx {
42 int num_clocks;
43 struct clk_bulk_data *clks;
44 struct device *dev;
45 void __iomem *regs;
46 int (*pltfm_init)(struct dwc3_xlnx *data);
47 struct phy *usb3_phy;
48 };
49
dwc3_xlnx_mask_phy_rst(struct dwc3_xlnx * priv_data,bool mask)50 static void dwc3_xlnx_mask_phy_rst(struct dwc3_xlnx *priv_data, bool mask)
51 {
52 u32 reg;
53
54 /*
55 * Enable or disable ULPI PHY reset from USB Controller.
56 * This does not actually reset the phy, but just controls
57 * whether USB controller can or cannot reset ULPI PHY.
58 */
59 reg = readl(priv_data->regs + XLNX_USB_PHY_RST_EN);
60
61 if (mask)
62 reg &= ~XLNX_PHY_RST_MASK;
63 else
64 reg |= XLNX_PHY_RST_MASK;
65
66 writel(reg, priv_data->regs + XLNX_USB_PHY_RST_EN);
67 }
68
dwc3_xlnx_init_versal(struct dwc3_xlnx * priv_data)69 static int dwc3_xlnx_init_versal(struct dwc3_xlnx *priv_data)
70 {
71 struct device *dev = priv_data->dev;
72 struct reset_control *crst;
73 int ret;
74
75 crst = devm_reset_control_get_exclusive(dev, NULL);
76 if (IS_ERR(crst))
77 return dev_err_probe(dev, PTR_ERR(crst), "failed to get reset signal\n");
78
79 dwc3_xlnx_mask_phy_rst(priv_data, false);
80
81 /* Assert and De-assert reset */
82 ret = reset_control_assert(crst);
83 if (ret < 0) {
84 dev_err_probe(dev, ret, "failed to assert Reset\n");
85 return ret;
86 }
87
88 ret = reset_control_deassert(crst);
89 if (ret < 0) {
90 dev_err_probe(dev, ret, "failed to De-assert Reset\n");
91 return ret;
92 }
93
94 dwc3_xlnx_mask_phy_rst(priv_data, true);
95
96 return 0;
97 }
98
dwc3_xlnx_init_zynqmp(struct dwc3_xlnx * priv_data)99 static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
100 {
101 struct device *dev = priv_data->dev;
102 struct reset_control *crst, *hibrst, *apbrst;
103 struct gpio_desc *reset_gpio;
104 int ret = 0;
105 u32 reg;
106
107 priv_data->usb3_phy = devm_phy_optional_get(dev, "usb3-phy");
108 if (IS_ERR(priv_data->usb3_phy)) {
109 ret = PTR_ERR(priv_data->usb3_phy);
110 dev_err_probe(dev, ret,
111 "failed to get USB3 PHY\n");
112 goto err;
113 }
114
115 /*
116 * The following core resets are not required unless a USB3 PHY
117 * is used, and the subsequent register settings are not required
118 * unless a core reset is performed (they should be set properly
119 * by the first-stage boot loader, but may be reverted by a core
120 * reset). They may also break the configuration if USB3 is actually
121 * in use but the usb3-phy entry is missing from the device tree.
122 * Therefore, skip these operations in this case.
123 */
124 if (!priv_data->usb3_phy) {
125 /* Deselect the PIPE Clock Select bit in FPD PIPE Clock register */
126 writel(PIPE_CLK_DESELECT, priv_data->regs + XLNX_USB_FPD_PIPE_CLK);
127 goto skip_usb3_phy;
128 }
129
130 crst = devm_reset_control_get_exclusive(dev, "usb_crst");
131 if (IS_ERR(crst)) {
132 ret = PTR_ERR(crst);
133 dev_err_probe(dev, ret,
134 "failed to get core reset signal\n");
135 goto err;
136 }
137
138 hibrst = devm_reset_control_get_exclusive(dev, "usb_hibrst");
139 if (IS_ERR(hibrst)) {
140 ret = PTR_ERR(hibrst);
141 dev_err_probe(dev, ret,
142 "failed to get hibernation reset signal\n");
143 goto err;
144 }
145
146 apbrst = devm_reset_control_get_exclusive(dev, "usb_apbrst");
147 if (IS_ERR(apbrst)) {
148 ret = PTR_ERR(apbrst);
149 dev_err_probe(dev, ret,
150 "failed to get APB reset signal\n");
151 goto err;
152 }
153
154 ret = reset_control_assert(crst);
155 if (ret < 0) {
156 dev_err(dev, "Failed to assert core reset\n");
157 goto err;
158 }
159
160 ret = reset_control_assert(hibrst);
161 if (ret < 0) {
162 dev_err(dev, "Failed to assert hibernation reset\n");
163 goto err;
164 }
165
166 ret = reset_control_assert(apbrst);
167 if (ret < 0) {
168 dev_err(dev, "Failed to assert APB reset\n");
169 goto err;
170 }
171
172 ret = phy_init(priv_data->usb3_phy);
173 if (ret < 0) {
174 phy_exit(priv_data->usb3_phy);
175 goto err;
176 }
177
178 ret = reset_control_deassert(apbrst);
179 if (ret < 0) {
180 dev_err(dev, "Failed to release APB reset\n");
181 goto err;
182 }
183
184 /* Set PIPE Power Present signal in FPD Power Present Register*/
185 writel(FPD_POWER_PRSNT_OPTION, priv_data->regs + XLNX_USB_FPD_POWER_PRSNT);
186
187 /* Set the PIPE Clock Select bit in FPD PIPE Clock register */
188 writel(PIPE_CLK_SELECT, priv_data->regs + XLNX_USB_FPD_PIPE_CLK);
189
190 ret = reset_control_deassert(crst);
191 if (ret < 0) {
192 dev_err(dev, "Failed to release core reset\n");
193 goto err;
194 }
195
196 ret = reset_control_deassert(hibrst);
197 if (ret < 0) {
198 dev_err(dev, "Failed to release hibernation reset\n");
199 goto err;
200 }
201
202 ret = phy_power_on(priv_data->usb3_phy);
203 if (ret < 0) {
204 phy_exit(priv_data->usb3_phy);
205 goto err;
206 }
207
208 skip_usb3_phy:
209 /* ulpi reset via gpio-modepin or gpio-framework driver */
210 reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
211 if (IS_ERR(reset_gpio)) {
212 return dev_err_probe(dev, PTR_ERR(reset_gpio),
213 "Failed to request reset GPIO\n");
214 }
215
216 if (reset_gpio) {
217 usleep_range(5000, 10000);
218 gpiod_set_value_cansleep(reset_gpio, 0);
219 usleep_range(5000, 10000);
220 }
221
222 /*
223 * This routes the USB DMA traffic to go through FPD path instead
224 * of reaching DDR directly. This traffic routing is needed to
225 * make SMMU and CCI work with USB DMA.
226 */
227 if (of_dma_is_coherent(dev->of_node) || device_iommu_mapped(dev)) {
228 reg = readl(priv_data->regs + XLNX_USB_TRAFFIC_ROUTE_CONFIG);
229 reg |= XLNX_USB_TRAFFIC_ROUTE_FPD;
230 writel(reg, priv_data->regs + XLNX_USB_TRAFFIC_ROUTE_CONFIG);
231 }
232
233 err:
234 return ret;
235 }
236
237 static const struct of_device_id dwc3_xlnx_of_match[] = {
238 {
239 .compatible = "xlnx,zynqmp-dwc3",
240 .data = &dwc3_xlnx_init_zynqmp,
241 },
242 {
243 .compatible = "xlnx,versal-dwc3",
244 .data = &dwc3_xlnx_init_versal,
245 },
246 { /* Sentinel */ }
247 };
248 MODULE_DEVICE_TABLE(of, dwc3_xlnx_of_match);
249
dwc3_set_swnode(struct device * dev)250 static int dwc3_set_swnode(struct device *dev)
251 {
252 struct device_node *np = dev->of_node, *dwc3_np;
253 struct property_entry props[2];
254 int prop_idx = 0, ret = 0;
255
256 dwc3_np = of_get_compatible_child(np, "snps,dwc3");
257 if (!dwc3_np) {
258 ret = -ENODEV;
259 dev_err(dev, "failed to find dwc3 core child\n");
260 return ret;
261 }
262
263 memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
264 if (of_dma_is_coherent(dwc3_np))
265 props[prop_idx++] = PROPERTY_ENTRY_U16("snps,gsbuscfg0-reqinfo",
266 0xffff);
267 of_node_put(dwc3_np);
268
269 if (prop_idx)
270 ret = device_create_managed_software_node(dev, props, NULL);
271
272 return ret;
273 }
274
dwc3_xlnx_probe(struct platform_device * pdev)275 static int dwc3_xlnx_probe(struct platform_device *pdev)
276 {
277 struct dwc3_xlnx *priv_data;
278 struct device *dev = &pdev->dev;
279 struct device_node *np = dev->of_node;
280 const struct of_device_id *match;
281 void __iomem *regs;
282 int ret;
283
284 priv_data = devm_kzalloc(dev, sizeof(*priv_data), GFP_KERNEL);
285 if (!priv_data)
286 return -ENOMEM;
287
288 regs = devm_platform_ioremap_resource(pdev, 0);
289 if (IS_ERR(regs))
290 return dev_err_probe(dev, PTR_ERR(regs), "failed to map registers\n");
291
292 match = of_match_node(dwc3_xlnx_of_match, pdev->dev.of_node);
293
294 priv_data->pltfm_init = match->data;
295 priv_data->regs = regs;
296 priv_data->dev = dev;
297
298 platform_set_drvdata(pdev, priv_data);
299
300 ret = devm_clk_bulk_get_all(priv_data->dev, &priv_data->clks);
301 if (ret < 0)
302 return ret;
303
304 priv_data->num_clocks = ret;
305
306 ret = clk_bulk_prepare_enable(priv_data->num_clocks, priv_data->clks);
307 if (ret)
308 return ret;
309
310 ret = priv_data->pltfm_init(priv_data);
311 if (ret)
312 goto err_clk_put;
313
314 ret = dwc3_set_swnode(dev);
315 if (ret)
316 goto err_clk_put;
317
318 ret = of_platform_populate(np, NULL, NULL, dev);
319 if (ret)
320 goto err_clk_put;
321
322 pm_runtime_set_active(dev);
323 ret = devm_pm_runtime_enable(dev);
324 if (ret < 0)
325 goto err_pm_set_suspended;
326
327 pm_suspend_ignore_children(dev, false);
328 ret = pm_runtime_resume_and_get(dev);
329 if (ret < 0)
330 goto err_pm_set_suspended;
331
332 return 0;
333
334 err_pm_set_suspended:
335 of_platform_depopulate(dev);
336 pm_runtime_set_suspended(dev);
337
338 err_clk_put:
339 clk_bulk_disable_unprepare(priv_data->num_clocks, priv_data->clks);
340
341 return ret;
342 }
343
dwc3_xlnx_remove(struct platform_device * pdev)344 static void dwc3_xlnx_remove(struct platform_device *pdev)
345 {
346 struct dwc3_xlnx *priv_data = platform_get_drvdata(pdev);
347 struct device *dev = &pdev->dev;
348
349 of_platform_depopulate(dev);
350
351 clk_bulk_disable_unprepare(priv_data->num_clocks, priv_data->clks);
352 priv_data->num_clocks = 0;
353
354 pm_runtime_put_noidle(dev);
355 pm_runtime_set_suspended(dev);
356 }
357
dwc3_xlnx_runtime_suspend(struct device * dev)358 static int __maybe_unused dwc3_xlnx_runtime_suspend(struct device *dev)
359 {
360 struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
361
362 clk_bulk_disable(priv_data->num_clocks, priv_data->clks);
363
364 return 0;
365 }
366
dwc3_xlnx_runtime_resume(struct device * dev)367 static int __maybe_unused dwc3_xlnx_runtime_resume(struct device *dev)
368 {
369 struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
370
371 return clk_bulk_enable(priv_data->num_clocks, priv_data->clks);
372 }
373
dwc3_xlnx_runtime_idle(struct device * dev)374 static int __maybe_unused dwc3_xlnx_runtime_idle(struct device *dev)
375 {
376 pm_runtime_mark_last_busy(dev);
377 pm_runtime_autosuspend(dev);
378
379 return 0;
380 }
381
dwc3_xlnx_suspend(struct device * dev)382 static int __maybe_unused dwc3_xlnx_suspend(struct device *dev)
383 {
384 struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
385
386 phy_exit(priv_data->usb3_phy);
387
388 /* Disable the clocks */
389 clk_bulk_disable(priv_data->num_clocks, priv_data->clks);
390
391 return 0;
392 }
393
dwc3_xlnx_resume(struct device * dev)394 static int __maybe_unused dwc3_xlnx_resume(struct device *dev)
395 {
396 struct dwc3_xlnx *priv_data = dev_get_drvdata(dev);
397 int ret;
398
399 ret = clk_bulk_enable(priv_data->num_clocks, priv_data->clks);
400 if (ret)
401 return ret;
402
403 ret = phy_init(priv_data->usb3_phy);
404 if (ret < 0)
405 return ret;
406
407 ret = phy_power_on(priv_data->usb3_phy);
408 if (ret < 0) {
409 phy_exit(priv_data->usb3_phy);
410 return ret;
411 }
412
413 return 0;
414 }
415
416 static const struct dev_pm_ops dwc3_xlnx_dev_pm_ops = {
417 SET_SYSTEM_SLEEP_PM_OPS(dwc3_xlnx_suspend, dwc3_xlnx_resume)
418 SET_RUNTIME_PM_OPS(dwc3_xlnx_runtime_suspend,
419 dwc3_xlnx_runtime_resume, dwc3_xlnx_runtime_idle)
420 };
421
422 static struct platform_driver dwc3_xlnx_driver = {
423 .probe = dwc3_xlnx_probe,
424 .remove = dwc3_xlnx_remove,
425 .driver = {
426 .name = "dwc3-xilinx",
427 .of_match_table = dwc3_xlnx_of_match,
428 .pm = &dwc3_xlnx_dev_pm_ops,
429 },
430 };
431
432 module_platform_driver(dwc3_xlnx_driver);
433
434 MODULE_LICENSE("GPL v2");
435 MODULE_DESCRIPTION("Xilinx DWC3 controller specific glue driver");
436 MODULE_AUTHOR("Manish Narani <manish.narani@xilinx.com>");
437 MODULE_AUTHOR("Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>");
438