xref: /linux/drivers/dma/qcom/hidma_mgmt.c (revision da1d9caf95def6f0320819cf941c9fd1069ba9e1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Qualcomm Technologies HIDMA DMA engine Management interface
4  *
5  * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
6  */
7 
8 #include <linux/dmaengine.h>
9 #include <linux/acpi.h>
10 #include <linux/of.h>
11 #include <linux/property.h>
12 #include <linux/of_address.h>
13 #include <linux/of_irq.h>
14 #include <linux/of_platform.h>
15 #include <linux/module.h>
16 #include <linux/uaccess.h>
17 #include <linux/slab.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/bitops.h>
20 #include <linux/dma-mapping.h>
21 
22 #include "hidma_mgmt.h"
23 
24 #define HIDMA_QOS_N_OFFSET		0x700
25 #define HIDMA_CFG_OFFSET		0x400
26 #define HIDMA_MAX_BUS_REQ_LEN_OFFSET	0x41C
27 #define HIDMA_MAX_XACTIONS_OFFSET	0x420
28 #define HIDMA_HW_VERSION_OFFSET	0x424
29 #define HIDMA_CHRESET_TIMEOUT_OFFSET	0x418
30 
31 #define HIDMA_MAX_WR_XACTIONS_MASK	GENMASK(4, 0)
32 #define HIDMA_MAX_RD_XACTIONS_MASK	GENMASK(4, 0)
33 #define HIDMA_WEIGHT_MASK		GENMASK(6, 0)
34 #define HIDMA_MAX_BUS_REQ_LEN_MASK	GENMASK(15, 0)
35 #define HIDMA_CHRESET_TIMEOUT_MASK	GENMASK(19, 0)
36 
37 #define HIDMA_MAX_WR_XACTIONS_BIT_POS	16
38 #define HIDMA_MAX_BUS_WR_REQ_BIT_POS	16
39 #define HIDMA_WRR_BIT_POS		8
40 #define HIDMA_PRIORITY_BIT_POS		15
41 
42 #define HIDMA_AUTOSUSPEND_TIMEOUT	2000
43 #define HIDMA_MAX_CHANNEL_WEIGHT	15
44 
45 static unsigned int max_write_request;
46 module_param(max_write_request, uint, 0644);
47 MODULE_PARM_DESC(max_write_request,
48 		"maximum write burst (default: ACPI/DT value)");
49 
50 static unsigned int max_read_request;
51 module_param(max_read_request, uint, 0644);
52 MODULE_PARM_DESC(max_read_request,
53 		"maximum read burst (default: ACPI/DT value)");
54 
55 static unsigned int max_wr_xactions;
56 module_param(max_wr_xactions, uint, 0644);
57 MODULE_PARM_DESC(max_wr_xactions,
58 	"maximum number of write transactions (default: ACPI/DT value)");
59 
60 static unsigned int max_rd_xactions;
61 module_param(max_rd_xactions, uint, 0644);
62 MODULE_PARM_DESC(max_rd_xactions,
63 	"maximum number of read transactions (default: ACPI/DT value)");
64 
65 int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev)
66 {
67 	unsigned int i;
68 	u32 val;
69 
70 	if (!is_power_of_2(mgmtdev->max_write_request) ||
71 	    (mgmtdev->max_write_request < 128) ||
72 	    (mgmtdev->max_write_request > 1024)) {
73 		dev_err(&mgmtdev->pdev->dev, "invalid write request %d\n",
74 			mgmtdev->max_write_request);
75 		return -EINVAL;
76 	}
77 
78 	if (!is_power_of_2(mgmtdev->max_read_request) ||
79 	    (mgmtdev->max_read_request < 128) ||
80 	    (mgmtdev->max_read_request > 1024)) {
81 		dev_err(&mgmtdev->pdev->dev, "invalid read request %d\n",
82 			mgmtdev->max_read_request);
83 		return -EINVAL;
84 	}
85 
86 	if (mgmtdev->max_wr_xactions > HIDMA_MAX_WR_XACTIONS_MASK) {
87 		dev_err(&mgmtdev->pdev->dev,
88 			"max_wr_xactions cannot be bigger than %ld\n",
89 			HIDMA_MAX_WR_XACTIONS_MASK);
90 		return -EINVAL;
91 	}
92 
93 	if (mgmtdev->max_rd_xactions > HIDMA_MAX_RD_XACTIONS_MASK) {
94 		dev_err(&mgmtdev->pdev->dev,
95 			"max_rd_xactions cannot be bigger than %ld\n",
96 			HIDMA_MAX_RD_XACTIONS_MASK);
97 		return -EINVAL;
98 	}
99 
100 	for (i = 0; i < mgmtdev->dma_channels; i++) {
101 		if (mgmtdev->priority[i] > 1) {
102 			dev_err(&mgmtdev->pdev->dev,
103 				"priority can be 0 or 1\n");
104 			return -EINVAL;
105 		}
106 
107 		if (mgmtdev->weight[i] > HIDMA_MAX_CHANNEL_WEIGHT) {
108 			dev_err(&mgmtdev->pdev->dev,
109 				"max value of weight can be %d.\n",
110 				HIDMA_MAX_CHANNEL_WEIGHT);
111 			return -EINVAL;
112 		}
113 
114 		/* weight needs to be at least one */
115 		if (mgmtdev->weight[i] == 0)
116 			mgmtdev->weight[i] = 1;
117 	}
118 
119 	pm_runtime_get_sync(&mgmtdev->pdev->dev);
120 	val = readl(mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
121 	val &= ~(HIDMA_MAX_BUS_REQ_LEN_MASK << HIDMA_MAX_BUS_WR_REQ_BIT_POS);
122 	val |= mgmtdev->max_write_request << HIDMA_MAX_BUS_WR_REQ_BIT_POS;
123 	val &= ~HIDMA_MAX_BUS_REQ_LEN_MASK;
124 	val |= mgmtdev->max_read_request;
125 	writel(val, mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
126 
127 	val = readl(mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
128 	val &= ~(HIDMA_MAX_WR_XACTIONS_MASK << HIDMA_MAX_WR_XACTIONS_BIT_POS);
129 	val |= mgmtdev->max_wr_xactions << HIDMA_MAX_WR_XACTIONS_BIT_POS;
130 	val &= ~HIDMA_MAX_RD_XACTIONS_MASK;
131 	val |= mgmtdev->max_rd_xactions;
132 	writel(val, mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
133 
134 	mgmtdev->hw_version =
135 	    readl(mgmtdev->virtaddr + HIDMA_HW_VERSION_OFFSET);
136 	mgmtdev->hw_version_major = (mgmtdev->hw_version >> 28) & 0xF;
137 	mgmtdev->hw_version_minor = (mgmtdev->hw_version >> 16) & 0xF;
138 
139 	for (i = 0; i < mgmtdev->dma_channels; i++) {
140 		u32 weight = mgmtdev->weight[i];
141 		u32 priority = mgmtdev->priority[i];
142 
143 		val = readl(mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
144 		val &= ~(1 << HIDMA_PRIORITY_BIT_POS);
145 		val |= (priority & 0x1) << HIDMA_PRIORITY_BIT_POS;
146 		val &= ~(HIDMA_WEIGHT_MASK << HIDMA_WRR_BIT_POS);
147 		val |= (weight & HIDMA_WEIGHT_MASK) << HIDMA_WRR_BIT_POS;
148 		writel(val, mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
149 	}
150 
151 	val = readl(mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
152 	val &= ~HIDMA_CHRESET_TIMEOUT_MASK;
153 	val |= mgmtdev->chreset_timeout_cycles & HIDMA_CHRESET_TIMEOUT_MASK;
154 	writel(val, mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
155 
156 	pm_runtime_mark_last_busy(&mgmtdev->pdev->dev);
157 	pm_runtime_put_autosuspend(&mgmtdev->pdev->dev);
158 	return 0;
159 }
160 EXPORT_SYMBOL_GPL(hidma_mgmt_setup);
161 
162 static int hidma_mgmt_probe(struct platform_device *pdev)
163 {
164 	struct hidma_mgmt_dev *mgmtdev;
165 	struct resource *res;
166 	void __iomem *virtaddr;
167 	int irq;
168 	int rc;
169 	u32 val;
170 
171 	pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
172 	pm_runtime_use_autosuspend(&pdev->dev);
173 	pm_runtime_set_active(&pdev->dev);
174 	pm_runtime_enable(&pdev->dev);
175 	pm_runtime_get_sync(&pdev->dev);
176 
177 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
178 	virtaddr = devm_ioremap_resource(&pdev->dev, res);
179 	if (IS_ERR(virtaddr)) {
180 		rc = -ENOMEM;
181 		goto out;
182 	}
183 
184 	irq = platform_get_irq(pdev, 0);
185 	if (irq < 0) {
186 		rc = irq;
187 		goto out;
188 	}
189 
190 	mgmtdev = devm_kzalloc(&pdev->dev, sizeof(*mgmtdev), GFP_KERNEL);
191 	if (!mgmtdev) {
192 		rc = -ENOMEM;
193 		goto out;
194 	}
195 
196 	mgmtdev->pdev = pdev;
197 	mgmtdev->addrsize = resource_size(res);
198 	mgmtdev->virtaddr = virtaddr;
199 
200 	rc = device_property_read_u32(&pdev->dev, "dma-channels",
201 				      &mgmtdev->dma_channels);
202 	if (rc) {
203 		dev_err(&pdev->dev, "number of channels missing\n");
204 		goto out;
205 	}
206 
207 	rc = device_property_read_u32(&pdev->dev,
208 				      "channel-reset-timeout-cycles",
209 				      &mgmtdev->chreset_timeout_cycles);
210 	if (rc) {
211 		dev_err(&pdev->dev, "channel reset timeout missing\n");
212 		goto out;
213 	}
214 
215 	rc = device_property_read_u32(&pdev->dev, "max-write-burst-bytes",
216 				      &mgmtdev->max_write_request);
217 	if (rc) {
218 		dev_err(&pdev->dev, "max-write-burst-bytes missing\n");
219 		goto out;
220 	}
221 
222 	if (max_write_request &&
223 			(max_write_request != mgmtdev->max_write_request)) {
224 		dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n",
225 			max_write_request);
226 		mgmtdev->max_write_request = max_write_request;
227 	} else
228 		max_write_request = mgmtdev->max_write_request;
229 
230 	rc = device_property_read_u32(&pdev->dev, "max-read-burst-bytes",
231 				      &mgmtdev->max_read_request);
232 	if (rc) {
233 		dev_err(&pdev->dev, "max-read-burst-bytes missing\n");
234 		goto out;
235 	}
236 	if (max_read_request &&
237 			(max_read_request != mgmtdev->max_read_request)) {
238 		dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n",
239 			max_read_request);
240 		mgmtdev->max_read_request = max_read_request;
241 	} else
242 		max_read_request = mgmtdev->max_read_request;
243 
244 	rc = device_property_read_u32(&pdev->dev, "max-write-transactions",
245 				      &mgmtdev->max_wr_xactions);
246 	if (rc) {
247 		dev_err(&pdev->dev, "max-write-transactions missing\n");
248 		goto out;
249 	}
250 	if (max_wr_xactions &&
251 			(max_wr_xactions != mgmtdev->max_wr_xactions)) {
252 		dev_info(&pdev->dev, "overriding max-write-transactions: %d\n",
253 			max_wr_xactions);
254 		mgmtdev->max_wr_xactions = max_wr_xactions;
255 	} else
256 		max_wr_xactions = mgmtdev->max_wr_xactions;
257 
258 	rc = device_property_read_u32(&pdev->dev, "max-read-transactions",
259 				      &mgmtdev->max_rd_xactions);
260 	if (rc) {
261 		dev_err(&pdev->dev, "max-read-transactions missing\n");
262 		goto out;
263 	}
264 	if (max_rd_xactions &&
265 			(max_rd_xactions != mgmtdev->max_rd_xactions)) {
266 		dev_info(&pdev->dev, "overriding max-read-transactions: %d\n",
267 			max_rd_xactions);
268 		mgmtdev->max_rd_xactions = max_rd_xactions;
269 	} else
270 		max_rd_xactions = mgmtdev->max_rd_xactions;
271 
272 	mgmtdev->priority = devm_kcalloc(&pdev->dev,
273 					 mgmtdev->dma_channels,
274 					 sizeof(*mgmtdev->priority),
275 					 GFP_KERNEL);
276 	if (!mgmtdev->priority) {
277 		rc = -ENOMEM;
278 		goto out;
279 	}
280 
281 	mgmtdev->weight = devm_kcalloc(&pdev->dev,
282 				       mgmtdev->dma_channels,
283 				       sizeof(*mgmtdev->weight), GFP_KERNEL);
284 	if (!mgmtdev->weight) {
285 		rc = -ENOMEM;
286 		goto out;
287 	}
288 
289 	rc = hidma_mgmt_setup(mgmtdev);
290 	if (rc) {
291 		dev_err(&pdev->dev, "setup failed\n");
292 		goto out;
293 	}
294 
295 	/* start the HW */
296 	val = readl(mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
297 	val |= 1;
298 	writel(val, mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
299 
300 	rc = hidma_mgmt_init_sys(mgmtdev);
301 	if (rc) {
302 		dev_err(&pdev->dev, "sysfs setup failed\n");
303 		goto out;
304 	}
305 
306 	dev_info(&pdev->dev,
307 		 "HW rev: %d.%d @ %pa with %d physical channels\n",
308 		 mgmtdev->hw_version_major, mgmtdev->hw_version_minor,
309 		 &res->start, mgmtdev->dma_channels);
310 
311 	platform_set_drvdata(pdev, mgmtdev);
312 	pm_runtime_mark_last_busy(&pdev->dev);
313 	pm_runtime_put_autosuspend(&pdev->dev);
314 	return 0;
315 out:
316 	pm_runtime_put_sync_suspend(&pdev->dev);
317 	pm_runtime_disable(&pdev->dev);
318 	return rc;
319 }
320 
321 #if IS_ENABLED(CONFIG_ACPI)
322 static const struct acpi_device_id hidma_mgmt_acpi_ids[] = {
323 	{"QCOM8060"},
324 	{},
325 };
326 MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids);
327 #endif
328 
329 static const struct of_device_id hidma_mgmt_match[] = {
330 	{.compatible = "qcom,hidma-mgmt-1.0",},
331 	{},
332 };
333 MODULE_DEVICE_TABLE(of, hidma_mgmt_match);
334 
335 static struct platform_driver hidma_mgmt_driver = {
336 	.probe = hidma_mgmt_probe,
337 	.driver = {
338 		   .name = "hidma-mgmt",
339 		   .of_match_table = hidma_mgmt_match,
340 		   .acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids),
341 	},
342 };
343 
344 #if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
345 static int object_counter;
346 
347 static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
348 {
349 	struct platform_device *pdev_parent = of_find_device_by_node(np);
350 	struct platform_device_info pdevinfo;
351 	struct device_node *child;
352 	struct resource *res;
353 	int ret = 0;
354 
355 	/* allocate a resource array */
356 	res = kcalloc(3, sizeof(*res), GFP_KERNEL);
357 	if (!res)
358 		return -ENOMEM;
359 
360 	for_each_available_child_of_node(np, child) {
361 		struct platform_device *new_pdev;
362 
363 		ret = of_address_to_resource(child, 0, &res[0]);
364 		if (!ret)
365 			goto out;
366 
367 		ret = of_address_to_resource(child, 1, &res[1]);
368 		if (!ret)
369 			goto out;
370 
371 		ret = of_irq_to_resource(child, 0, &res[2]);
372 		if (ret <= 0)
373 			goto out;
374 
375 		memset(&pdevinfo, 0, sizeof(pdevinfo));
376 		pdevinfo.fwnode = &child->fwnode;
377 		pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL;
378 		pdevinfo.name = child->name;
379 		pdevinfo.id = object_counter++;
380 		pdevinfo.res = res;
381 		pdevinfo.num_res = 3;
382 		pdevinfo.data = NULL;
383 		pdevinfo.size_data = 0;
384 		pdevinfo.dma_mask = DMA_BIT_MASK(64);
385 		new_pdev = platform_device_register_full(&pdevinfo);
386 		if (IS_ERR(new_pdev)) {
387 			ret = PTR_ERR(new_pdev);
388 			goto out;
389 		}
390 		new_pdev->dev.of_node = child;
391 		of_dma_configure(&new_pdev->dev, child, true);
392 		/*
393 		 * It is assumed that calling of_msi_configure is safe on
394 		 * platforms with or without MSI support.
395 		 */
396 		of_msi_configure(&new_pdev->dev, child);
397 	}
398 
399 	kfree(res);
400 
401 	return ret;
402 
403 out:
404 	of_node_put(child);
405 	kfree(res);
406 
407 	return ret;
408 }
409 #endif
410 
411 static int __init hidma_mgmt_init(void)
412 {
413 #if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
414 	struct device_node *child;
415 
416 	for_each_matching_node(child, hidma_mgmt_match) {
417 		/* device tree based firmware here */
418 		hidma_mgmt_of_populate_channels(child);
419 	}
420 #endif
421 	/*
422 	 * We do not check for return value here, as it is assumed that
423 	 * platform_driver_register must not fail. The reason for this is that
424 	 * the (potential) hidma_mgmt_of_populate_channels calls above are not
425 	 * cleaned up if it does fail, and to do this work is quite
426 	 * complicated. In particular, various calls of of_address_to_resource,
427 	 * of_irq_to_resource, platform_device_register_full, of_dma_configure,
428 	 * and of_msi_configure which then call other functions and so on, must
429 	 * be cleaned up - this is not a trivial exercise.
430 	 *
431 	 * Currently, this module is not intended to be unloaded, and there is
432 	 * no module_exit function defined which does the needed cleanup. For
433 	 * this reason, we have to assume success here.
434 	 */
435 	platform_driver_register(&hidma_mgmt_driver);
436 
437 	return 0;
438 }
439 module_init(hidma_mgmt_init);
440 MODULE_LICENSE("GPL v2");
441