xref: /linux/drivers/dma/dw/platform.c (revision e7b8514e4d68bec21fc6385fa0a66797ddc34ac9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Platform driver for the Synopsys DesignWare DMA Controller
4  *
5  * Copyright (C) 2007-2008 Atmel Corporation
6  * Copyright (C) 2010-2011 ST Microelectronics
7  * Copyright (C) 2013 Intel Corporation
8  *
9  * Some parts of this driver are derived from the original dw_dmac.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/clk.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/platform_device.h>
17 #include <linux/dmaengine.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/of.h>
20 #include <linux/of_dma.h>
21 #include <linux/acpi.h>
22 #include <linux/acpi_dma.h>
23 
24 #include "internal.h"
25 
26 #define DRV_NAME	"dw_dmac"
27 
28 static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
29 					struct of_dma *ofdma)
30 {
31 	struct dw_dma *dw = ofdma->of_dma_data;
32 	struct dw_dma_slave slave = {
33 		.dma_dev = dw->dma.dev,
34 	};
35 	dma_cap_mask_t cap;
36 
37 	if (dma_spec->args_count != 3)
38 		return NULL;
39 
40 	slave.src_id = dma_spec->args[0];
41 	slave.dst_id = dma_spec->args[0];
42 	slave.m_master = dma_spec->args[1];
43 	slave.p_master = dma_spec->args[2];
44 
45 	if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
46 		    slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
47 		    slave.m_master >= dw->pdata->nr_masters ||
48 		    slave.p_master >= dw->pdata->nr_masters))
49 		return NULL;
50 
51 	dma_cap_zero(cap);
52 	dma_cap_set(DMA_SLAVE, cap);
53 
54 	/* TODO: there should be a simpler way to do this */
55 	return dma_request_channel(cap, dw_dma_filter, &slave);
56 }
57 
58 #ifdef CONFIG_ACPI
59 static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
60 {
61 	struct acpi_dma_spec *dma_spec = param;
62 	struct dw_dma_slave slave = {
63 		.dma_dev = dma_spec->dev,
64 		.src_id = dma_spec->slave_id,
65 		.dst_id = dma_spec->slave_id,
66 		.m_master = 0,
67 		.p_master = 1,
68 	};
69 
70 	return dw_dma_filter(chan, &slave);
71 }
72 
73 static void dw_dma_acpi_controller_register(struct dw_dma *dw)
74 {
75 	struct device *dev = dw->dma.dev;
76 	struct acpi_dma_filter_info *info;
77 	int ret;
78 
79 	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
80 	if (!info)
81 		return;
82 
83 	dma_cap_zero(info->dma_cap);
84 	dma_cap_set(DMA_SLAVE, info->dma_cap);
85 	info->filter_fn = dw_dma_acpi_filter;
86 
87 	ret = acpi_dma_controller_register(dev, acpi_dma_simple_xlate, info);
88 	if (ret)
89 		dev_err(dev, "could not register acpi_dma_controller\n");
90 }
91 
92 static void dw_dma_acpi_controller_free(struct dw_dma *dw)
93 {
94 	struct device *dev = dw->dma.dev;
95 
96 	acpi_dma_controller_free(dev);
97 }
98 #else /* !CONFIG_ACPI */
99 static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
100 static inline void dw_dma_acpi_controller_free(struct dw_dma *dw) {}
101 #endif /* !CONFIG_ACPI */
102 
103 #ifdef CONFIG_OF
104 static struct dw_dma_platform_data *
105 dw_dma_parse_dt(struct platform_device *pdev)
106 {
107 	struct device_node *np = pdev->dev.of_node;
108 	struct dw_dma_platform_data *pdata;
109 	u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
110 	u32 nr_masters;
111 	u32 nr_channels;
112 
113 	if (!np) {
114 		dev_err(&pdev->dev, "Missing DT data\n");
115 		return NULL;
116 	}
117 
118 	if (of_property_read_u32(np, "dma-masters", &nr_masters))
119 		return NULL;
120 	if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
121 		return NULL;
122 
123 	if (of_property_read_u32(np, "dma-channels", &nr_channels))
124 		return NULL;
125 	if (nr_channels > DW_DMA_MAX_NR_CHANNELS)
126 		return NULL;
127 
128 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
129 	if (!pdata)
130 		return NULL;
131 
132 	pdata->nr_masters = nr_masters;
133 	pdata->nr_channels = nr_channels;
134 
135 	if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
136 		pdata->chan_allocation_order = (unsigned char)tmp;
137 
138 	if (!of_property_read_u32(np, "chan_priority", &tmp))
139 		pdata->chan_priority = tmp;
140 
141 	if (!of_property_read_u32(np, "block_size", &tmp))
142 		pdata->block_size = tmp;
143 
144 	if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
145 		for (tmp = 0; tmp < nr_masters; tmp++)
146 			pdata->data_width[tmp] = arr[tmp];
147 	} else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
148 		for (tmp = 0; tmp < nr_masters; tmp++)
149 			pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
150 	}
151 
152 	if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
153 		for (tmp = 0; tmp < nr_channels; tmp++)
154 			pdata->multi_block[tmp] = mb[tmp];
155 	} else {
156 		for (tmp = 0; tmp < nr_channels; tmp++)
157 			pdata->multi_block[tmp] = 1;
158 	}
159 
160 	if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) {
161 		if (tmp > CHAN_PROTCTL_MASK)
162 			return NULL;
163 		pdata->protctl = tmp;
164 	}
165 
166 	return pdata;
167 }
168 #else
169 static inline struct dw_dma_platform_data *
170 dw_dma_parse_dt(struct platform_device *pdev)
171 {
172 	return NULL;
173 }
174 #endif
175 
176 static int dw_probe(struct platform_device *pdev)
177 {
178 	const struct dw_dma_chip_pdata *match;
179 	struct dw_dma_chip_pdata *data;
180 	struct dw_dma_chip *chip;
181 	struct device *dev = &pdev->dev;
182 	int err;
183 
184 	match = device_get_match_data(dev);
185 	if (!match)
186 		return -ENODEV;
187 
188 	data = devm_kmemdup(&pdev->dev, match, sizeof(*match), GFP_KERNEL);
189 	if (!data)
190 		return -ENOMEM;
191 
192 	chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
193 	if (!chip)
194 		return -ENOMEM;
195 
196 	chip->irq = platform_get_irq(pdev, 0);
197 	if (chip->irq < 0)
198 		return chip->irq;
199 
200 	chip->regs = devm_platform_ioremap_resource(pdev, 0);
201 	if (IS_ERR(chip->regs))
202 		return PTR_ERR(chip->regs);
203 
204 	err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
205 	if (err)
206 		return err;
207 
208 	if (!data->pdata)
209 		data->pdata = dev_get_platdata(dev);
210 	if (!data->pdata)
211 		data->pdata = dw_dma_parse_dt(pdev);
212 
213 	chip->dev = dev;
214 	chip->id = pdev->id;
215 	chip->pdata = data->pdata;
216 
217 	data->chip = chip;
218 
219 	chip->clk = devm_clk_get(chip->dev, "hclk");
220 	if (IS_ERR(chip->clk))
221 		return PTR_ERR(chip->clk);
222 	err = clk_prepare_enable(chip->clk);
223 	if (err)
224 		return err;
225 
226 	pm_runtime_enable(&pdev->dev);
227 
228 	err = data->probe(chip);
229 	if (err)
230 		goto err_dw_dma_probe;
231 
232 	platform_set_drvdata(pdev, data);
233 
234 	if (pdev->dev.of_node) {
235 		err = of_dma_controller_register(pdev->dev.of_node,
236 						 dw_dma_of_xlate, chip->dw);
237 		if (err)
238 			dev_err(&pdev->dev,
239 				"could not register of_dma_controller\n");
240 	}
241 
242 	if (ACPI_HANDLE(&pdev->dev))
243 		dw_dma_acpi_controller_register(chip->dw);
244 
245 	return 0;
246 
247 err_dw_dma_probe:
248 	pm_runtime_disable(&pdev->dev);
249 	clk_disable_unprepare(chip->clk);
250 	return err;
251 }
252 
253 static int dw_remove(struct platform_device *pdev)
254 {
255 	struct dw_dma_chip_pdata *data = platform_get_drvdata(pdev);
256 	struct dw_dma_chip *chip = data->chip;
257 	int ret;
258 
259 	if (ACPI_HANDLE(&pdev->dev))
260 		dw_dma_acpi_controller_free(chip->dw);
261 
262 	if (pdev->dev.of_node)
263 		of_dma_controller_free(pdev->dev.of_node);
264 
265 	ret = data->remove(chip);
266 	if (ret)
267 		dev_warn(chip->dev, "can't remove device properly: %d\n", ret);
268 
269 	pm_runtime_disable(&pdev->dev);
270 	clk_disable_unprepare(chip->clk);
271 
272 	return 0;
273 }
274 
275 static void dw_shutdown(struct platform_device *pdev)
276 {
277 	struct dw_dma_chip_pdata *data = platform_get_drvdata(pdev);
278 	struct dw_dma_chip *chip = data->chip;
279 
280 	/*
281 	 * We have to call do_dw_dma_disable() to stop any ongoing transfer. On
282 	 * some platforms we can't do that since DMA device is powered off.
283 	 * Moreover we have no possibility to check if the platform is affected
284 	 * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put()
285 	 * unconditionally. On the other hand we can't use
286 	 * pm_runtime_suspended() because runtime PM framework is not fully
287 	 * used by the driver.
288 	 */
289 	pm_runtime_get_sync(chip->dev);
290 	do_dw_dma_disable(chip);
291 	pm_runtime_put_sync_suspend(chip->dev);
292 
293 	clk_disable_unprepare(chip->clk);
294 }
295 
296 #ifdef CONFIG_OF
297 static const struct of_device_id dw_dma_of_id_table[] = {
298 	{ .compatible = "snps,dma-spear1340", .data = &dw_dma_chip_pdata },
299 	{}
300 };
301 MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
302 #endif
303 
304 #ifdef CONFIG_ACPI
305 static const struct acpi_device_id dw_dma_acpi_id_table[] = {
306 	{ "INTL9C60", (kernel_ulong_t)&dw_dma_chip_pdata },
307 	{ "80862286", (kernel_ulong_t)&dw_dma_chip_pdata },
308 	{ "808622C0", (kernel_ulong_t)&dw_dma_chip_pdata },
309 
310 	/* Elkhart Lake iDMA 32-bit (PSE DMA) */
311 	{ "80864BB4", (kernel_ulong_t)&idma32_chip_pdata },
312 	{ "80864BB5", (kernel_ulong_t)&idma32_chip_pdata },
313 	{ "80864BB6", (kernel_ulong_t)&idma32_chip_pdata },
314 
315 	{ }
316 };
317 MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
318 #endif
319 
320 #ifdef CONFIG_PM_SLEEP
321 
322 static int dw_suspend_late(struct device *dev)
323 {
324 	struct dw_dma_chip_pdata *data = dev_get_drvdata(dev);
325 	struct dw_dma_chip *chip = data->chip;
326 
327 	do_dw_dma_disable(chip);
328 	clk_disable_unprepare(chip->clk);
329 
330 	return 0;
331 }
332 
333 static int dw_resume_early(struct device *dev)
334 {
335 	struct dw_dma_chip_pdata *data = dev_get_drvdata(dev);
336 	struct dw_dma_chip *chip = data->chip;
337 	int ret;
338 
339 	ret = clk_prepare_enable(chip->clk);
340 	if (ret)
341 		return ret;
342 
343 	return do_dw_dma_enable(chip);
344 }
345 
346 #endif /* CONFIG_PM_SLEEP */
347 
348 static const struct dev_pm_ops dw_dev_pm_ops = {
349 	SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_suspend_late, dw_resume_early)
350 };
351 
352 static struct platform_driver dw_driver = {
353 	.probe		= dw_probe,
354 	.remove		= dw_remove,
355 	.shutdown       = dw_shutdown,
356 	.driver = {
357 		.name	= DRV_NAME,
358 		.pm	= &dw_dev_pm_ops,
359 		.of_match_table = of_match_ptr(dw_dma_of_id_table),
360 		.acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
361 	},
362 };
363 
364 static int __init dw_init(void)
365 {
366 	return platform_driver_register(&dw_driver);
367 }
368 subsys_initcall(dw_init);
369 
370 static void __exit dw_exit(void)
371 {
372 	platform_driver_unregister(&dw_driver);
373 }
374 module_exit(dw_exit);
375 
376 MODULE_LICENSE("GPL v2");
377 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
378 MODULE_ALIAS("platform:" DRV_NAME);
379