xref: /linux/arch/arm/mach-omap2/dma.c (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 /*
2  * OMAP2+ DMA driver
3  *
4  * Copyright (C) 2003 - 2008 Nokia Corporation
5  * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6  * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
7  * Graphics DMA and LCD DMA graphics tranformations
8  * by Imre Deak <imre.deak@nokia.com>
9  * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
10  * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
11  *
12  * Copyright (C) 2009 Texas Instruments
13  * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
14  *
15  * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
16  * Converted DMA library into platform driver
17  *	- G, Manjunath Kondaiah <manjugk@ti.com>
18  *
19  * This program is free software; you can redistribute it and/or modify
20  * it under the terms of the GNU General Public License version 2 as
21  * published by the Free Software Foundation.
22  */
23 
24 #include <linux/err.h>
25 #include <linux/io.h>
26 #include <linux/slab.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/device.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/of.h>
32 #include <linux/omap-dma.h>
33 
34 #include "soc.h"
35 #include "omap_hwmod.h"
36 #include "omap_device.h"
37 
38 #define OMAP2_DMA_STRIDE	0x60
39 
40 static u32 errata;
41 static u8 dma_stride;
42 
43 static struct omap_dma_dev_attr *d;
44 
45 static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end;
46 
47 static u16 reg_map[] = {
48 	[REVISION]		= 0x00,
49 	[GCR]			= 0x78,
50 	[IRQSTATUS_L0]		= 0x08,
51 	[IRQSTATUS_L1]		= 0x0c,
52 	[IRQSTATUS_L2]		= 0x10,
53 	[IRQSTATUS_L3]		= 0x14,
54 	[IRQENABLE_L0]		= 0x18,
55 	[IRQENABLE_L1]		= 0x1c,
56 	[IRQENABLE_L2]		= 0x20,
57 	[IRQENABLE_L3]		= 0x24,
58 	[SYSSTATUS]		= 0x28,
59 	[OCP_SYSCONFIG]		= 0x2c,
60 	[CAPS_0]		= 0x64,
61 	[CAPS_2]		= 0x6c,
62 	[CAPS_3]		= 0x70,
63 	[CAPS_4]		= 0x74,
64 
65 	/* Common register offsets */
66 	[CCR]			= 0x80,
67 	[CLNK_CTRL]		= 0x84,
68 	[CICR]			= 0x88,
69 	[CSR]			= 0x8c,
70 	[CSDP]			= 0x90,
71 	[CEN]			= 0x94,
72 	[CFN]			= 0x98,
73 	[CSEI]			= 0xa4,
74 	[CSFI]			= 0xa8,
75 	[CDEI]			= 0xac,
76 	[CDFI]			= 0xb0,
77 	[CSAC]			= 0xb4,
78 	[CDAC]			= 0xb8,
79 
80 	/* Channel specific register offsets */
81 	[CSSA]			= 0x9c,
82 	[CDSA]			= 0xa0,
83 	[CCEN]			= 0xbc,
84 	[CCFN]			= 0xc0,
85 	[COLOR]			= 0xc4,
86 
87 	/* OMAP4 specific registers */
88 	[CDP]			= 0xd0,
89 	[CNDP]			= 0xd4,
90 	[CCDN]			= 0xd8,
91 };
92 
93 static void __iomem *dma_base;
94 static inline void dma_write(u32 val, int reg, int lch)
95 {
96 	u8  stride;
97 	u32 offset;
98 
99 	stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
100 	offset = reg_map[reg] + (stride * lch);
101 	__raw_writel(val, dma_base + offset);
102 }
103 
104 static inline u32 dma_read(int reg, int lch)
105 {
106 	u8 stride;
107 	u32 offset, val;
108 
109 	stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
110 	offset = reg_map[reg] + (stride * lch);
111 	val = __raw_readl(dma_base + offset);
112 	return val;
113 }
114 
115 static inline void omap2_disable_irq_lch(int lch)
116 {
117 	u32 val;
118 
119 	val = dma_read(IRQENABLE_L0, lch);
120 	val &= ~(1 << lch);
121 	dma_write(val, IRQENABLE_L0, lch);
122 }
123 
124 static void omap2_clear_dma(int lch)
125 {
126 	int i = dma_common_ch_start;
127 
128 	for (; i <= dma_common_ch_end; i += 1)
129 		dma_write(0, i, lch);
130 }
131 
132 static void omap2_show_dma_caps(void)
133 {
134 	u8 revision = dma_read(REVISION, 0) & 0xff;
135 	printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
136 				revision >> 4, revision & 0xf);
137 	return;
138 }
139 
140 static u32 configure_dma_errata(void)
141 {
142 
143 	/*
144 	 * Errata applicable for OMAP2430ES1.0 and all omap2420
145 	 *
146 	 * I.
147 	 * Erratum ID: Not Available
148 	 * Inter Frame DMA buffering issue DMA will wrongly
149 	 * buffer elements if packing and bursting is enabled. This might
150 	 * result in data gets stalled in FIFO at the end of the block.
151 	 * Workaround: DMA channels must have BUFFERING_DISABLED bit set to
152 	 * guarantee no data will stay in the DMA FIFO in case inter frame
153 	 * buffering occurs
154 	 *
155 	 * II.
156 	 * Erratum ID: Not Available
157 	 * DMA may hang when several channels are used in parallel
158 	 * In the following configuration, DMA channel hanging can occur:
159 	 * a. Channel i, hardware synchronized, is enabled
160 	 * b. Another channel (Channel x), software synchronized, is enabled.
161 	 * c. Channel i is disabled before end of transfer
162 	 * d. Channel i is reenabled.
163 	 * e. Steps 1 to 4 are repeated a certain number of times.
164 	 * f. A third channel (Channel y), software synchronized, is enabled.
165 	 * Channel x and Channel y may hang immediately after step 'f'.
166 	 * Workaround:
167 	 * For any channel used - make sure NextLCH_ID is set to the value j.
168 	 */
169 	if (cpu_is_omap2420() || (cpu_is_omap2430() &&
170 				(omap_type() == OMAP2430_REV_ES1_0))) {
171 
172 		SET_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING);
173 		SET_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS);
174 	}
175 
176 	/*
177 	 * Erratum ID: i378: OMAP2+: sDMA Channel is not disabled
178 	 * after a transaction error.
179 	 * Workaround: SW should explicitely disable the channel.
180 	 */
181 	if (cpu_class_is_omap2())
182 		SET_DMA_ERRATA(DMA_ERRATA_i378);
183 
184 	/*
185 	 * Erratum ID: i541: sDMA FIFO draining does not finish
186 	 * If sDMA channel is disabled on the fly, sDMA enters standby even
187 	 * through FIFO Drain is still in progress
188 	 * Workaround: Put sDMA in NoStandby more before a logical channel is
189 	 * disabled, then put it back to SmartStandby right after the channel
190 	 * finishes FIFO draining.
191 	 */
192 	if (cpu_is_omap34xx())
193 		SET_DMA_ERRATA(DMA_ERRATA_i541);
194 
195 	/*
196 	 * Erratum ID: i88 : Special programming model needed to disable DMA
197 	 * before end of block.
198 	 * Workaround: software must ensure that the DMA is configured in No
199 	 * Standby mode(DMAx_OCP_SYSCONFIG.MIDLEMODE = "01")
200 	 */
201 	if (omap_type() == OMAP3430_REV_ES1_0)
202 		SET_DMA_ERRATA(DMA_ERRATA_i88);
203 
204 	/*
205 	 * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is
206 	 * read before the DMA controller finished disabling the channel.
207 	 */
208 	SET_DMA_ERRATA(DMA_ERRATA_3_3);
209 
210 	/*
211 	 * Erratum ID: Not Available
212 	 * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared
213 	 * after secure sram context save and restore.
214 	 * Work around: Hence we need to manually clear those IRQs to avoid
215 	 * spurious interrupts. This affects only secure devices.
216 	 */
217 	if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
218 		SET_DMA_ERRATA(DMA_ROMCODE_BUG);
219 
220 	return errata;
221 }
222 
223 /* One time initializations */
224 static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)
225 {
226 	struct platform_device			*pdev;
227 	struct omap_system_dma_plat_info	*p;
228 	struct resource				*mem;
229 	char					*name = "omap_dma_system";
230 
231 	dma_stride		= OMAP2_DMA_STRIDE;
232 	dma_common_ch_start	= CSDP;
233 
234 	p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL);
235 	if (!p) {
236 		pr_err("%s: Unable to allocate pdata for %s:%s\n",
237 			__func__, name, oh->name);
238 		return -ENOMEM;
239 	}
240 
241 	p->dma_attr		= (struct omap_dma_dev_attr *)oh->dev_attr;
242 	p->disable_irq_lch	= omap2_disable_irq_lch;
243 	p->show_dma_caps	= omap2_show_dma_caps;
244 	p->clear_dma		= omap2_clear_dma;
245 	p->dma_write		= dma_write;
246 	p->dma_read		= dma_read;
247 
248 	p->clear_lch_regs	= NULL;
249 
250 	p->errata		= configure_dma_errata();
251 
252 	pdev = omap_device_build(name, 0, oh, p, sizeof(*p));
253 	kfree(p);
254 	if (IS_ERR(pdev)) {
255 		pr_err("%s: Can't build omap_device for %s:%s.\n",
256 			__func__, name, oh->name);
257 		return PTR_ERR(pdev);
258 	}
259 
260 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
261 	if (!mem) {
262 		dev_err(&pdev->dev, "%s: no mem resource\n", __func__);
263 		return -EINVAL;
264 	}
265 	dma_base = ioremap(mem->start, resource_size(mem));
266 	if (!dma_base) {
267 		dev_err(&pdev->dev, "%s: ioremap fail\n", __func__);
268 		return -ENOMEM;
269 	}
270 
271 	d = oh->dev_attr;
272 	d->chan = kzalloc(sizeof(struct omap_dma_lch) *
273 					(d->lch_count), GFP_KERNEL);
274 
275 	if (!d->chan) {
276 		dev_err(&pdev->dev, "%s: kzalloc fail\n", __func__);
277 		return -ENOMEM;
278 	}
279 
280 	if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
281 		d->dev_caps |= HS_CHANNELS_RESERVED;
282 
283 	/* Check the capabilities register for descriptor loading feature */
284 	if (dma_read(CAPS_0, 0) & DMA_HAS_DESCRIPTOR_CAPS)
285 		dma_common_ch_end = CCDN;
286 	else
287 		dma_common_ch_end = CCFN;
288 
289 	return 0;
290 }
291 
292 static const struct platform_device_info omap_dma_dev_info = {
293 	.name = "omap-dma-engine",
294 	.id = -1,
295 	.dma_mask = DMA_BIT_MASK(32),
296 };
297 
298 static int __init omap2_system_dma_init(void)
299 {
300 	struct platform_device *pdev;
301 	int res;
302 
303 	res = omap_hwmod_for_each_by_class("dma",
304 			omap2_system_dma_init_dev, NULL);
305 	if (res)
306 		return res;
307 
308 	if (of_have_populated_dt())
309 		return res;
310 
311 	pdev = platform_device_register_full(&omap_dma_dev_info);
312 	if (IS_ERR(pdev))
313 		return PTR_ERR(pdev);
314 
315 	return res;
316 }
317 omap_arch_initcall(omap2_system_dma_init);
318