1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6 #include <linux/platform_device.h>
7 #include <linux/slab.h>
8 #include <linux/err.h>
9 #include <linux/init.h>
10 #include <linux/list.h>
11 #include <linux/io.h>
12 #include <linux/of.h>
13 #include <linux/of_dma.h>
14 #include <linux/of_platform.h>
15
16 #define TI_XBAR_DRA7 0
17 #define TI_XBAR_AM335X 1
18 static const u32 ti_xbar_type[] = {
19 [TI_XBAR_DRA7] = TI_XBAR_DRA7,
20 [TI_XBAR_AM335X] = TI_XBAR_AM335X,
21 };
22
23 static const struct of_device_id ti_dma_xbar_match[] = {
24 {
25 .compatible = "ti,dra7-dma-crossbar",
26 .data = &ti_xbar_type[TI_XBAR_DRA7],
27 },
28 {
29 .compatible = "ti,am335x-edma-crossbar",
30 .data = &ti_xbar_type[TI_XBAR_AM335X],
31 },
32 {},
33 };
34
35 /* Crossbar on AM335x/AM437x family */
36 #define TI_AM335X_XBAR_LINES 64
37
38 struct ti_am335x_xbar_data {
39 void __iomem *iomem;
40
41 struct dma_router dmarouter;
42
43 u32 xbar_events; /* maximum number of events to select in xbar */
44 u32 dma_requests; /* number of DMA requests on eDMA */
45 };
46
47 struct ti_am335x_xbar_map {
48 u16 dma_line;
49 u8 mux_val;
50 };
51
ti_am335x_xbar_write(void __iomem * iomem,int event,u8 val)52 static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
53 {
54 /*
55 * TPCC_EVT_MUX_60_63 register layout is different than the
56 * rest, in the sense, that event 63 is mapped to lowest byte
57 * and event 60 is mapped to highest, handle it separately.
58 */
59 if (event >= 60 && event <= 63)
60 writeb_relaxed(val, iomem + (63 - event % 4));
61 else
62 writeb_relaxed(val, iomem + event);
63 }
64
ti_am335x_xbar_free(struct device * dev,void * route_data)65 static void ti_am335x_xbar_free(struct device *dev, void *route_data)
66 {
67 struct ti_am335x_xbar_data *xbar = dev_get_drvdata(dev);
68 struct ti_am335x_xbar_map *map = route_data;
69
70 dev_dbg(dev, "Unmapping XBAR event %u on channel %u\n",
71 map->mux_val, map->dma_line);
72
73 ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0);
74 kfree(map);
75 }
76
ti_am335x_xbar_route_allocate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)77 static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
78 struct of_dma *ofdma)
79 {
80 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
81 struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev);
82 struct ti_am335x_xbar_map *map = ERR_PTR(-EINVAL);
83
84 if (dma_spec->args_count != 3)
85 goto out_put_pdev;
86
87 if (dma_spec->args[2] >= xbar->xbar_events) {
88 dev_err(&pdev->dev, "Invalid XBAR event number: %d\n",
89 dma_spec->args[2]);
90 goto out_put_pdev;
91 }
92
93 if (dma_spec->args[0] >= xbar->dma_requests) {
94 dev_err(&pdev->dev, "Invalid DMA request line number: %d\n",
95 dma_spec->args[0]);
96 goto out_put_pdev;
97 }
98
99 /* The of_node_put() will be done in the core for the node */
100 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
101 if (!dma_spec->np) {
102 dev_err(&pdev->dev, "Can't get DMA master\n");
103 goto out_put_pdev;
104 }
105
106 map = kzalloc(sizeof(*map), GFP_KERNEL);
107 if (!map) {
108 of_node_put(dma_spec->np);
109 map = ERR_PTR(-ENOMEM);
110 goto out_put_pdev;
111 }
112
113 map->dma_line = (u16)dma_spec->args[0];
114 map->mux_val = (u8)dma_spec->args[2];
115
116 dma_spec->args[2] = 0;
117 dma_spec->args_count = 2;
118
119 dev_dbg(&pdev->dev, "Mapping XBAR event%u to DMA%u\n",
120 map->mux_val, map->dma_line);
121
122 ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val);
123
124 out_put_pdev:
125 put_device(&pdev->dev);
126
127 return map;
128 }
129
130 static const struct of_device_id ti_am335x_master_match[] __maybe_unused = {
131 { .compatible = "ti,edma3-tpcc", },
132 {},
133 };
134
ti_am335x_xbar_probe(struct platform_device * pdev)135 static int ti_am335x_xbar_probe(struct platform_device *pdev)
136 {
137 struct device_node *node = pdev->dev.of_node;
138 const struct of_device_id *match;
139 struct device_node *dma_node;
140 struct ti_am335x_xbar_data *xbar;
141 void __iomem *iomem;
142 int i, ret;
143
144 if (!node)
145 return -ENODEV;
146
147 xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
148 if (!xbar)
149 return -ENOMEM;
150
151 dma_node = of_parse_phandle(node, "dma-masters", 0);
152 if (!dma_node) {
153 dev_err(&pdev->dev, "Can't get DMA master node\n");
154 return -ENODEV;
155 }
156
157 match = of_match_node(ti_am335x_master_match, dma_node);
158 if (!match) {
159 dev_err(&pdev->dev, "DMA master is not supported\n");
160 of_node_put(dma_node);
161 return -EINVAL;
162 }
163
164 if (of_property_read_u32(dma_node, "dma-requests",
165 &xbar->dma_requests)) {
166 dev_info(&pdev->dev,
167 "Missing XBAR output information, using %u.\n",
168 TI_AM335X_XBAR_LINES);
169 xbar->dma_requests = TI_AM335X_XBAR_LINES;
170 }
171 of_node_put(dma_node);
172
173 if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) {
174 dev_info(&pdev->dev,
175 "Missing XBAR input information, using %u.\n",
176 TI_AM335X_XBAR_LINES);
177 xbar->xbar_events = TI_AM335X_XBAR_LINES;
178 }
179
180 iomem = devm_platform_ioremap_resource(pdev, 0);
181 if (IS_ERR(iomem))
182 return PTR_ERR(iomem);
183
184 xbar->iomem = iomem;
185
186 xbar->dmarouter.dev = &pdev->dev;
187 xbar->dmarouter.route_free = ti_am335x_xbar_free;
188
189 platform_set_drvdata(pdev, xbar);
190
191 /* Reset the crossbar */
192 for (i = 0; i < xbar->dma_requests; i++)
193 ti_am335x_xbar_write(xbar->iomem, i, 0);
194
195 ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate,
196 &xbar->dmarouter);
197
198 return ret;
199 }
200
201 /* Crossbar on DRA7xx family */
202 #define TI_DRA7_XBAR_OUTPUTS 127
203 #define TI_DRA7_XBAR_INPUTS 256
204
205 struct ti_dra7_xbar_data {
206 void __iomem *iomem;
207
208 struct dma_router dmarouter;
209 struct mutex mutex;
210 unsigned long *dma_inuse;
211
212 u16 safe_val; /* Value to rest the crossbar lines */
213 u32 xbar_requests; /* number of DMA requests connected to XBAR */
214 u32 dma_requests; /* number of DMA requests forwarded to DMA */
215 u32 dma_offset;
216 };
217
218 struct ti_dra7_xbar_map {
219 u16 xbar_in;
220 int xbar_out;
221 };
222
ti_dra7_xbar_write(void __iomem * iomem,int xbar,u16 val)223 static inline void ti_dra7_xbar_write(void __iomem *iomem, int xbar, u16 val)
224 {
225 writew_relaxed(val, iomem + (xbar * 2));
226 }
227
ti_dra7_xbar_free(struct device * dev,void * route_data)228 static void ti_dra7_xbar_free(struct device *dev, void *route_data)
229 {
230 struct ti_dra7_xbar_data *xbar = dev_get_drvdata(dev);
231 struct ti_dra7_xbar_map *map = route_data;
232
233 dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n",
234 map->xbar_in, map->xbar_out);
235
236 ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val);
237 mutex_lock(&xbar->mutex);
238 clear_bit(map->xbar_out, xbar->dma_inuse);
239 mutex_unlock(&xbar->mutex);
240 kfree(map);
241 }
242
ti_dra7_xbar_route_allocate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)243 static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
244 struct of_dma *ofdma)
245 {
246 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
247 struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev);
248 struct ti_dra7_xbar_map *map = ERR_PTR(-EINVAL);
249
250 if (dma_spec->args[0] >= xbar->xbar_requests) {
251 dev_err(&pdev->dev, "Invalid XBAR request number: %d\n",
252 dma_spec->args[0]);
253 goto out_put_pdev;
254 }
255
256 /* The of_node_put() will be done in the core for the node */
257 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
258 if (!dma_spec->np) {
259 dev_err(&pdev->dev, "Can't get DMA master\n");
260 goto out_put_pdev;
261 }
262
263 map = kzalloc(sizeof(*map), GFP_KERNEL);
264 if (!map) {
265 of_node_put(dma_spec->np);
266 map = ERR_PTR(-ENOMEM);
267 goto out_put_pdev;
268 }
269
270 mutex_lock(&xbar->mutex);
271 map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
272 xbar->dma_requests);
273 if (map->xbar_out == xbar->dma_requests) {
274 mutex_unlock(&xbar->mutex);
275 dev_err(&pdev->dev, "Run out of free DMA requests\n");
276 kfree(map);
277 of_node_put(dma_spec->np);
278 map = ERR_PTR(-ENOMEM);
279 goto out_put_pdev;
280 }
281 set_bit(map->xbar_out, xbar->dma_inuse);
282 mutex_unlock(&xbar->mutex);
283
284 map->xbar_in = (u16)dma_spec->args[0];
285
286 dma_spec->args[0] = map->xbar_out + xbar->dma_offset;
287
288 dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n",
289 map->xbar_in, map->xbar_out);
290
291 ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in);
292
293 out_put_pdev:
294 put_device(&pdev->dev);
295
296 return map;
297 }
298
299 #define TI_XBAR_EDMA_OFFSET 0
300 #define TI_XBAR_SDMA_OFFSET 1
301 static const u32 ti_dma_offset[] = {
302 [TI_XBAR_EDMA_OFFSET] = 0,
303 [TI_XBAR_SDMA_OFFSET] = 1,
304 };
305
306 static const struct of_device_id ti_dra7_master_match[] __maybe_unused = {
307 {
308 .compatible = "ti,omap4430-sdma",
309 .data = &ti_dma_offset[TI_XBAR_SDMA_OFFSET],
310 },
311 {
312 .compatible = "ti,edma3",
313 .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
314 },
315 {
316 .compatible = "ti,edma3-tpcc",
317 .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
318 },
319 {},
320 };
321
ti_dra7_xbar_reserve(int offset,int len,unsigned long * p)322 static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p)
323 {
324 for (; len > 0; len--)
325 set_bit(offset + (len - 1), p);
326 }
327
ti_dra7_xbar_probe(struct platform_device * pdev)328 static int ti_dra7_xbar_probe(struct platform_device *pdev)
329 {
330 struct device_node *node = pdev->dev.of_node;
331 const struct of_device_id *match;
332 struct device_node *dma_node;
333 struct ti_dra7_xbar_data *xbar;
334 struct property *prop;
335 u32 safe_val;
336 int sz;
337 void __iomem *iomem;
338 int i, ret;
339
340 if (!node)
341 return -ENODEV;
342
343 xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
344 if (!xbar)
345 return -ENOMEM;
346
347 dma_node = of_parse_phandle(node, "dma-masters", 0);
348 if (!dma_node) {
349 dev_err(&pdev->dev, "Can't get DMA master node\n");
350 return -ENODEV;
351 }
352
353 match = of_match_node(ti_dra7_master_match, dma_node);
354 if (!match) {
355 dev_err(&pdev->dev, "DMA master is not supported\n");
356 of_node_put(dma_node);
357 return -EINVAL;
358 }
359
360 if (of_property_read_u32(dma_node, "dma-requests",
361 &xbar->dma_requests)) {
362 dev_info(&pdev->dev,
363 "Missing XBAR output information, using %u.\n",
364 TI_DRA7_XBAR_OUTPUTS);
365 xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS;
366 }
367 of_node_put(dma_node);
368
369 xbar->dma_inuse = devm_kcalloc(&pdev->dev,
370 BITS_TO_LONGS(xbar->dma_requests),
371 sizeof(unsigned long), GFP_KERNEL);
372 if (!xbar->dma_inuse)
373 return -ENOMEM;
374
375 if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) {
376 dev_info(&pdev->dev,
377 "Missing XBAR input information, using %u.\n",
378 TI_DRA7_XBAR_INPUTS);
379 xbar->xbar_requests = TI_DRA7_XBAR_INPUTS;
380 }
381
382 if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val))
383 xbar->safe_val = (u16)safe_val;
384
385
386 prop = of_find_property(node, "ti,reserved-dma-request-ranges", &sz);
387 if (prop) {
388 const char pname[] = "ti,reserved-dma-request-ranges";
389 u32 (*rsv_events)[2];
390 size_t nelm = sz / sizeof(*rsv_events);
391 int i;
392
393 if (!nelm)
394 return -EINVAL;
395
396 rsv_events = kcalloc(nelm, sizeof(*rsv_events), GFP_KERNEL);
397 if (!rsv_events)
398 return -ENOMEM;
399
400 ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
401 nelm * 2);
402 if (ret) {
403 kfree(rsv_events);
404 return ret;
405 }
406
407 for (i = 0; i < nelm; i++) {
408 ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
409 xbar->dma_inuse);
410 }
411 kfree(rsv_events);
412 }
413
414 iomem = devm_platform_ioremap_resource(pdev, 0);
415 if (IS_ERR(iomem))
416 return PTR_ERR(iomem);
417
418 xbar->iomem = iomem;
419
420 xbar->dmarouter.dev = &pdev->dev;
421 xbar->dmarouter.route_free = ti_dra7_xbar_free;
422 xbar->dma_offset = *(u32 *)match->data;
423
424 mutex_init(&xbar->mutex);
425 platform_set_drvdata(pdev, xbar);
426
427 /* Reset the crossbar */
428 for (i = 0; i < xbar->dma_requests; i++) {
429 if (!test_bit(i, xbar->dma_inuse))
430 ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val);
431 }
432
433 ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate,
434 &xbar->dmarouter);
435 if (ret) {
436 /* Restore the defaults for the crossbar */
437 for (i = 0; i < xbar->dma_requests; i++) {
438 if (!test_bit(i, xbar->dma_inuse))
439 ti_dra7_xbar_write(xbar->iomem, i, i);
440 }
441 }
442
443 return ret;
444 }
445
ti_dma_xbar_probe(struct platform_device * pdev)446 static int ti_dma_xbar_probe(struct platform_device *pdev)
447 {
448 const struct of_device_id *match;
449 int ret;
450
451 match = of_match_node(ti_dma_xbar_match, pdev->dev.of_node);
452 if (unlikely(!match))
453 return -EINVAL;
454
455 switch (*(u32 *)match->data) {
456 case TI_XBAR_DRA7:
457 ret = ti_dra7_xbar_probe(pdev);
458 break;
459 case TI_XBAR_AM335X:
460 ret = ti_am335x_xbar_probe(pdev);
461 break;
462 default:
463 dev_err(&pdev->dev, "Unsupported crossbar\n");
464 ret = -ENODEV;
465 break;
466 }
467
468 return ret;
469 }
470
471 static struct platform_driver ti_dma_xbar_driver = {
472 .driver = {
473 .name = "ti-dma-crossbar",
474 .of_match_table = ti_dma_xbar_match,
475 },
476 .probe = ti_dma_xbar_probe,
477 };
478
omap_dmaxbar_init(void)479 static int omap_dmaxbar_init(void)
480 {
481 return platform_driver_register(&ti_dma_xbar_driver);
482 }
483 arch_initcall(omap_dmaxbar_init);
484