xref: /linux/drivers/dma/arm-dma350.c (revision bfdf35c5dc6267f70f76abddfacface4dd3b9ac0)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2024-2025 Arm Limited
3 // Arm DMA-350 driver
4 
5 #include <linux/bitfield.h>
6 #include <linux/dmaengine.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/io.h>
9 #include <linux/of.h>
10 #include <linux/module.h>
11 #include <linux/platform_device.h>
12 
13 #include "dmaengine.h"
14 #include "virt-dma.h"
15 
16 #define DMAINFO			0x0f00
17 
18 #define DMA_BUILDCFG0		0xb0
19 #define DMA_CFG_DATA_WIDTH	GENMASK(18, 16)
20 #define DMA_CFG_ADDR_WIDTH	GENMASK(15, 10)
21 #define DMA_CFG_NUM_CHANNELS	GENMASK(9, 4)
22 
23 #define DMA_BUILDCFG1		0xb4
24 #define DMA_CFG_NUM_TRIGGER_IN	GENMASK(8, 0)
25 
26 #define IIDR			0xc8
27 #define IIDR_PRODUCTID		GENMASK(31, 20)
28 #define IIDR_VARIANT		GENMASK(19, 16)
29 #define IIDR_REVISION		GENMASK(15, 12)
30 #define IIDR_IMPLEMENTER	GENMASK(11, 0)
31 
32 #define PRODUCTID_DMA350	0x3a0
33 #define IMPLEMENTER_ARM		0x43b
34 
35 #define DMACH(n)		(0x1000 + 0x0100 * (n))
36 
37 #define CH_CMD			0x00
38 #define CH_CMD_RESUME		BIT(5)
39 #define CH_CMD_PAUSE		BIT(4)
40 #define CH_CMD_STOP		BIT(3)
41 #define CH_CMD_DISABLE		BIT(2)
42 #define CH_CMD_CLEAR		BIT(1)
43 #define CH_CMD_ENABLE		BIT(0)
44 
45 #define CH_STATUS		0x04
46 #define CH_STAT_RESUMEWAIT	BIT(21)
47 #define CH_STAT_PAUSED		BIT(20)
48 #define CH_STAT_STOPPED		BIT(19)
49 #define CH_STAT_DISABLED	BIT(18)
50 #define CH_STAT_ERR		BIT(17)
51 #define CH_STAT_DONE		BIT(16)
52 #define CH_STAT_INTR_ERR	BIT(1)
53 #define CH_STAT_INTR_DONE	BIT(0)
54 
55 #define CH_INTREN		0x08
56 #define CH_INTREN_ERR		BIT(1)
57 #define CH_INTREN_DONE		BIT(0)
58 
59 #define CH_CTRL			0x0c
60 #define CH_CTRL_USEDESTRIGIN	BIT(26)
61 #define CH_CTRL_USESRCTRIGIN	BIT(26)
62 #define CH_CTRL_DONETYPE	GENMASK(23, 21)
63 #define CH_CTRL_REGRELOADTYPE	GENMASK(20, 18)
64 #define CH_CTRL_XTYPE		GENMASK(11, 9)
65 #define CH_CTRL_TRANSIZE	GENMASK(2, 0)
66 
67 #define CH_SRCADDR		0x10
68 #define CH_SRCADDRHI		0x14
69 #define CH_DESADDR		0x18
70 #define CH_DESADDRHI		0x1c
71 #define CH_XSIZE		0x20
72 #define CH_XSIZEHI		0x24
73 #define CH_SRCTRANSCFG		0x28
74 #define CH_DESTRANSCFG		0x2c
75 #define CH_CFG_MAXBURSTLEN	GENMASK(19, 16)
76 #define CH_CFG_PRIVATTR		BIT(11)
77 #define CH_CFG_SHAREATTR	GENMASK(9, 8)
78 #define CH_CFG_MEMATTR		GENMASK(7, 0)
79 
80 #define TRANSCFG_DEVICE					\
81 	FIELD_PREP(CH_CFG_MAXBURSTLEN, 0xf) |		\
82 	FIELD_PREP(CH_CFG_SHAREATTR, SHAREATTR_OSH) |	\
83 	FIELD_PREP(CH_CFG_MEMATTR, MEMATTR_DEVICE)
84 #define TRANSCFG_NC					\
85 	FIELD_PREP(CH_CFG_MAXBURSTLEN, 0xf) |		\
86 	FIELD_PREP(CH_CFG_SHAREATTR, SHAREATTR_OSH) |	\
87 	FIELD_PREP(CH_CFG_MEMATTR, MEMATTR_NC)
88 #define TRANSCFG_WB					\
89 	FIELD_PREP(CH_CFG_MAXBURSTLEN, 0xf) |		\
90 	FIELD_PREP(CH_CFG_SHAREATTR, SHAREATTR_ISH) |	\
91 	FIELD_PREP(CH_CFG_MEMATTR, MEMATTR_WB)
92 
93 #define CH_XADDRINC		0x30
94 #define CH_XY_DES		GENMASK(31, 16)
95 #define CH_XY_SRC		GENMASK(15, 0)
96 
97 #define CH_FILLVAL		0x38
98 #define CH_SRCTRIGINCFG		0x4c
99 #define CH_DESTRIGINCFG		0x50
100 #define CH_LINKATTR		0x70
101 #define CH_LINK_SHAREATTR	GENMASK(9, 8)
102 #define CH_LINK_MEMATTR		GENMASK(7, 0)
103 
104 #define CH_AUTOCFG		0x74
105 #define CH_LINKADDR		0x78
106 #define CH_LINKADDR_EN		BIT(0)
107 
108 #define CH_LINKADDRHI		0x7c
109 #define CH_ERRINFO		0x90
110 #define CH_ERRINFO_AXIRDPOISERR BIT(18)
111 #define CH_ERRINFO_AXIWRRESPERR BIT(17)
112 #define CH_ERRINFO_AXIRDRESPERR BIT(16)
113 
114 #define CH_BUILDCFG0		0xf8
115 #define CH_CFG_INC_WIDTH	GENMASK(29, 26)
116 #define CH_CFG_DATA_WIDTH	GENMASK(24, 22)
117 #define CH_CFG_DATA_BUF_SIZE	GENMASK(7, 0)
118 
119 #define CH_BUILDCFG1		0xfc
120 #define CH_CFG_HAS_CMDLINK	BIT(8)
121 #define CH_CFG_HAS_TRIGSEL	BIT(7)
122 #define CH_CFG_HAS_TRIGIN	BIT(5)
123 #define CH_CFG_HAS_WRAP		BIT(1)
124 
125 
126 #define LINK_REGCLEAR		BIT(0)
127 #define LINK_INTREN		BIT(2)
128 #define LINK_CTRL		BIT(3)
129 #define LINK_SRCADDR		BIT(4)
130 #define LINK_SRCADDRHI		BIT(5)
131 #define LINK_DESADDR		BIT(6)
132 #define LINK_DESADDRHI		BIT(7)
133 #define LINK_XSIZE		BIT(8)
134 #define LINK_XSIZEHI		BIT(9)
135 #define LINK_SRCTRANSCFG	BIT(10)
136 #define LINK_DESTRANSCFG	BIT(11)
137 #define LINK_XADDRINC		BIT(12)
138 #define LINK_FILLVAL		BIT(14)
139 #define LINK_SRCTRIGINCFG	BIT(19)
140 #define LINK_DESTRIGINCFG	BIT(20)
141 #define LINK_AUTOCFG		BIT(29)
142 #define LINK_LINKADDR		BIT(30)
143 #define LINK_LINKADDRHI		BIT(31)
144 
145 
146 enum ch_ctrl_donetype {
147 	CH_CTRL_DONETYPE_NONE = 0,
148 	CH_CTRL_DONETYPE_CMD = 1,
149 	CH_CTRL_DONETYPE_CYCLE = 3
150 };
151 
152 enum ch_ctrl_xtype {
153 	CH_CTRL_XTYPE_DISABLE = 0,
154 	CH_CTRL_XTYPE_CONTINUE = 1,
155 	CH_CTRL_XTYPE_WRAP = 2,
156 	CH_CTRL_XTYPE_FILL = 3
157 };
158 
159 enum ch_cfg_shareattr {
160 	SHAREATTR_NSH = 0,
161 	SHAREATTR_OSH = 2,
162 	SHAREATTR_ISH = 3
163 };
164 
165 enum ch_cfg_memattr {
166 	MEMATTR_DEVICE = 0x00,
167 	MEMATTR_NC = 0x44,
168 	MEMATTR_WB = 0xff
169 };
170 
171 struct d350_desc {
172 	struct virt_dma_desc vd;
173 	u32 command[16];
174 	u16 xsize;
175 	u16 xsizehi;
176 	u8 tsz;
177 };
178 
179 struct d350_chan {
180 	struct virt_dma_chan vc;
181 	struct d350_desc *desc;
182 	void __iomem *base;
183 	int irq;
184 	enum dma_status status;
185 	dma_cookie_t cookie;
186 	u32 residue;
187 	u8 tsz;
188 	bool has_trig;
189 	bool has_wrap;
190 	bool coherent;
191 };
192 
193 struct d350 {
194 	struct dma_device dma;
195 	int nchan;
196 	int nreq;
197 	struct d350_chan channels[] __counted_by(nchan);
198 };
199 
to_d350_chan(struct dma_chan * chan)200 static inline struct d350_chan *to_d350_chan(struct dma_chan *chan)
201 {
202 	return container_of(chan, struct d350_chan, vc.chan);
203 }
204 
to_d350_desc(struct virt_dma_desc * vd)205 static inline struct d350_desc *to_d350_desc(struct virt_dma_desc *vd)
206 {
207 	return container_of(vd, struct d350_desc, vd);
208 }
209 
d350_desc_free(struct virt_dma_desc * vd)210 static void d350_desc_free(struct virt_dma_desc *vd)
211 {
212 	kfree(to_d350_desc(vd));
213 }
214 
d350_prep_memcpy(struct dma_chan * chan,dma_addr_t dest,dma_addr_t src,size_t len,unsigned long flags)215 static struct dma_async_tx_descriptor *d350_prep_memcpy(struct dma_chan *chan,
216 		dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags)
217 {
218 	struct d350_chan *dch = to_d350_chan(chan);
219 	struct d350_desc *desc;
220 	u32 *cmd;
221 
222 	desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
223 	if (!desc)
224 		return NULL;
225 
226 	desc->tsz = __ffs(len | dest | src | (1 << dch->tsz));
227 	desc->xsize = lower_16_bits(len >> desc->tsz);
228 	desc->xsizehi = upper_16_bits(len >> desc->tsz);
229 
230 	cmd = desc->command;
231 	cmd[0] = LINK_CTRL | LINK_SRCADDR | LINK_SRCADDRHI | LINK_DESADDR |
232 		 LINK_DESADDRHI | LINK_XSIZE | LINK_XSIZEHI | LINK_SRCTRANSCFG |
233 		 LINK_DESTRANSCFG | LINK_XADDRINC | LINK_LINKADDR;
234 
235 	cmd[1] = FIELD_PREP(CH_CTRL_TRANSIZE, desc->tsz) |
236 		 FIELD_PREP(CH_CTRL_XTYPE, CH_CTRL_XTYPE_CONTINUE) |
237 		 FIELD_PREP(CH_CTRL_DONETYPE, CH_CTRL_DONETYPE_CMD);
238 
239 	cmd[2] = lower_32_bits(src);
240 	cmd[3] = upper_32_bits(src);
241 	cmd[4] = lower_32_bits(dest);
242 	cmd[5] = upper_32_bits(dest);
243 	cmd[6] = FIELD_PREP(CH_XY_SRC, desc->xsize) | FIELD_PREP(CH_XY_DES, desc->xsize);
244 	cmd[7] = FIELD_PREP(CH_XY_SRC, desc->xsizehi) | FIELD_PREP(CH_XY_DES, desc->xsizehi);
245 	cmd[8] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
246 	cmd[9] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
247 	cmd[10] = FIELD_PREP(CH_XY_SRC, 1) | FIELD_PREP(CH_XY_DES, 1);
248 	cmd[11] = 0;
249 
250 	return vchan_tx_prep(&dch->vc, &desc->vd, flags);
251 }
252 
d350_prep_memset(struct dma_chan * chan,dma_addr_t dest,int value,size_t len,unsigned long flags)253 static struct dma_async_tx_descriptor *d350_prep_memset(struct dma_chan *chan,
254 		dma_addr_t dest, int value, size_t len, unsigned long flags)
255 {
256 	struct d350_chan *dch = to_d350_chan(chan);
257 	struct d350_desc *desc;
258 	u32 *cmd;
259 
260 	desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
261 	if (!desc)
262 		return NULL;
263 
264 	desc->tsz = __ffs(len | dest | (1 << dch->tsz));
265 	desc->xsize = lower_16_bits(len >> desc->tsz);
266 	desc->xsizehi = upper_16_bits(len >> desc->tsz);
267 
268 	cmd = desc->command;
269 	cmd[0] = LINK_CTRL | LINK_DESADDR | LINK_DESADDRHI |
270 		 LINK_XSIZE | LINK_XSIZEHI | LINK_DESTRANSCFG |
271 		 LINK_XADDRINC | LINK_FILLVAL | LINK_LINKADDR;
272 
273 	cmd[1] = FIELD_PREP(CH_CTRL_TRANSIZE, desc->tsz) |
274 		 FIELD_PREP(CH_CTRL_XTYPE, CH_CTRL_XTYPE_FILL) |
275 		 FIELD_PREP(CH_CTRL_DONETYPE, CH_CTRL_DONETYPE_CMD);
276 
277 	cmd[2] = lower_32_bits(dest);
278 	cmd[3] = upper_32_bits(dest);
279 	cmd[4] = FIELD_PREP(CH_XY_DES, desc->xsize);
280 	cmd[5] = FIELD_PREP(CH_XY_DES, desc->xsizehi);
281 	cmd[6] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
282 	cmd[7] = FIELD_PREP(CH_XY_DES, 1);
283 	cmd[8] = (u8)value * 0x01010101;
284 	cmd[9] = 0;
285 
286 	return vchan_tx_prep(&dch->vc, &desc->vd, flags);
287 }
288 
d350_pause(struct dma_chan * chan)289 static int d350_pause(struct dma_chan *chan)
290 {
291 	struct d350_chan *dch = to_d350_chan(chan);
292 	unsigned long flags;
293 
294 	spin_lock_irqsave(&dch->vc.lock, flags);
295 	if (dch->status == DMA_IN_PROGRESS) {
296 		writel_relaxed(CH_CMD_PAUSE, dch->base + CH_CMD);
297 		dch->status = DMA_PAUSED;
298 	}
299 	spin_unlock_irqrestore(&dch->vc.lock, flags);
300 
301 	return 0;
302 }
303 
d350_resume(struct dma_chan * chan)304 static int d350_resume(struct dma_chan *chan)
305 {
306 	struct d350_chan *dch = to_d350_chan(chan);
307 	unsigned long flags;
308 
309 	spin_lock_irqsave(&dch->vc.lock, flags);
310 	if (dch->status == DMA_PAUSED) {
311 		writel_relaxed(CH_CMD_RESUME, dch->base + CH_CMD);
312 		dch->status = DMA_IN_PROGRESS;
313 	}
314 	spin_unlock_irqrestore(&dch->vc.lock, flags);
315 
316 	return 0;
317 }
318 
d350_get_residue(struct d350_chan * dch)319 static u32 d350_get_residue(struct d350_chan *dch)
320 {
321 	u32 res, xsize, xsizehi, hi_new;
322 	int retries = 3; /* 1st time unlucky, 2nd improbable, 3rd just broken */
323 
324 	hi_new = readl_relaxed(dch->base + CH_XSIZEHI);
325 	do {
326 		xsizehi = hi_new;
327 		xsize = readl_relaxed(dch->base + CH_XSIZE);
328 		hi_new = readl_relaxed(dch->base + CH_XSIZEHI);
329 	} while (xsizehi != hi_new && --retries);
330 
331 	res = FIELD_GET(CH_XY_DES, xsize);
332 	res |= FIELD_GET(CH_XY_DES, xsizehi) << 16;
333 
334 	return res << dch->desc->tsz;
335 }
336 
d350_terminate_all(struct dma_chan * chan)337 static int d350_terminate_all(struct dma_chan *chan)
338 {
339 	struct d350_chan *dch = to_d350_chan(chan);
340 	unsigned long flags;
341 	LIST_HEAD(list);
342 
343 	spin_lock_irqsave(&dch->vc.lock, flags);
344 	writel_relaxed(CH_CMD_STOP, dch->base + CH_CMD);
345 	if (dch->desc) {
346 		if (dch->status != DMA_ERROR)
347 			vchan_terminate_vdesc(&dch->desc->vd);
348 		dch->desc = NULL;
349 		dch->status = DMA_COMPLETE;
350 	}
351 	vchan_get_all_descriptors(&dch->vc, &list);
352 	list_splice_tail(&list, &dch->vc.desc_terminated);
353 	spin_unlock_irqrestore(&dch->vc.lock, flags);
354 
355 	return 0;
356 }
357 
d350_synchronize(struct dma_chan * chan)358 static void d350_synchronize(struct dma_chan *chan)
359 {
360 	struct d350_chan *dch = to_d350_chan(chan);
361 
362 	vchan_synchronize(&dch->vc);
363 }
364 
d350_desc_bytes(struct d350_desc * desc)365 static u32 d350_desc_bytes(struct d350_desc *desc)
366 {
367 	return ((u32)desc->xsizehi << 16 | desc->xsize) << desc->tsz;
368 }
369 
d350_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * state)370 static enum dma_status d350_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
371 				      struct dma_tx_state *state)
372 {
373 	struct d350_chan *dch = to_d350_chan(chan);
374 	struct virt_dma_desc *vd;
375 	enum dma_status status;
376 	unsigned long flags;
377 	u32 residue = 0;
378 
379 	status = dma_cookie_status(chan, cookie, state);
380 
381 	spin_lock_irqsave(&dch->vc.lock, flags);
382 	if (cookie == dch->cookie) {
383 		status = dch->status;
384 		if (status == DMA_IN_PROGRESS || status == DMA_PAUSED)
385 			dch->residue = d350_get_residue(dch);
386 		residue = dch->residue;
387 	} else if ((vd = vchan_find_desc(&dch->vc, cookie))) {
388 		residue = d350_desc_bytes(to_d350_desc(vd));
389 	} else if (status == DMA_IN_PROGRESS) {
390 		/* Somebody else terminated it? */
391 		status = DMA_ERROR;
392 	}
393 	spin_unlock_irqrestore(&dch->vc.lock, flags);
394 
395 	dma_set_residue(state, residue);
396 	return status;
397 }
398 
d350_start_next(struct d350_chan * dch)399 static void d350_start_next(struct d350_chan *dch)
400 {
401 	u32 hdr, *reg;
402 
403 	dch->desc = to_d350_desc(vchan_next_desc(&dch->vc));
404 	if (!dch->desc)
405 		return;
406 
407 	list_del(&dch->desc->vd.node);
408 	dch->status = DMA_IN_PROGRESS;
409 	dch->cookie = dch->desc->vd.tx.cookie;
410 	dch->residue = d350_desc_bytes(dch->desc);
411 
412 	hdr = dch->desc->command[0];
413 	reg = &dch->desc->command[1];
414 
415 	if (hdr & LINK_INTREN)
416 		writel_relaxed(*reg++, dch->base + CH_INTREN);
417 	if (hdr & LINK_CTRL)
418 		writel_relaxed(*reg++, dch->base + CH_CTRL);
419 	if (hdr & LINK_SRCADDR)
420 		writel_relaxed(*reg++, dch->base + CH_SRCADDR);
421 	if (hdr & LINK_SRCADDRHI)
422 		writel_relaxed(*reg++, dch->base + CH_SRCADDRHI);
423 	if (hdr & LINK_DESADDR)
424 		writel_relaxed(*reg++, dch->base + CH_DESADDR);
425 	if (hdr & LINK_DESADDRHI)
426 		writel_relaxed(*reg++, dch->base + CH_DESADDRHI);
427 	if (hdr & LINK_XSIZE)
428 		writel_relaxed(*reg++, dch->base + CH_XSIZE);
429 	if (hdr & LINK_XSIZEHI)
430 		writel_relaxed(*reg++, dch->base + CH_XSIZEHI);
431 	if (hdr & LINK_SRCTRANSCFG)
432 		writel_relaxed(*reg++, dch->base + CH_SRCTRANSCFG);
433 	if (hdr & LINK_DESTRANSCFG)
434 		writel_relaxed(*reg++, dch->base + CH_DESTRANSCFG);
435 	if (hdr & LINK_XADDRINC)
436 		writel_relaxed(*reg++, dch->base + CH_XADDRINC);
437 	if (hdr & LINK_FILLVAL)
438 		writel_relaxed(*reg++, dch->base + CH_FILLVAL);
439 	if (hdr & LINK_SRCTRIGINCFG)
440 		writel_relaxed(*reg++, dch->base + CH_SRCTRIGINCFG);
441 	if (hdr & LINK_DESTRIGINCFG)
442 		writel_relaxed(*reg++, dch->base + CH_DESTRIGINCFG);
443 	if (hdr & LINK_AUTOCFG)
444 		writel_relaxed(*reg++, dch->base + CH_AUTOCFG);
445 	if (hdr & LINK_LINKADDR)
446 		writel_relaxed(*reg++, dch->base + CH_LINKADDR);
447 	if (hdr & LINK_LINKADDRHI)
448 		writel_relaxed(*reg++, dch->base + CH_LINKADDRHI);
449 
450 	writel(CH_CMD_ENABLE, dch->base + CH_CMD);
451 }
452 
d350_issue_pending(struct dma_chan * chan)453 static void d350_issue_pending(struct dma_chan *chan)
454 {
455 	struct d350_chan *dch = to_d350_chan(chan);
456 	unsigned long flags;
457 
458 	spin_lock_irqsave(&dch->vc.lock, flags);
459 	if (vchan_issue_pending(&dch->vc) && !dch->desc)
460 		d350_start_next(dch);
461 	spin_unlock_irqrestore(&dch->vc.lock, flags);
462 }
463 
d350_irq(int irq,void * data)464 static irqreturn_t d350_irq(int irq, void *data)
465 {
466 	struct d350_chan *dch = data;
467 	struct device *dev = dch->vc.chan.device->dev;
468 	struct virt_dma_desc *vd = &dch->desc->vd;
469 	u32 ch_status;
470 
471 	ch_status = readl(dch->base + CH_STATUS);
472 	if (!ch_status)
473 		return IRQ_NONE;
474 
475 	if (ch_status & CH_STAT_INTR_ERR) {
476 		u32 errinfo = readl_relaxed(dch->base + CH_ERRINFO);
477 
478 		if (errinfo & (CH_ERRINFO_AXIRDPOISERR | CH_ERRINFO_AXIRDRESPERR))
479 			vd->tx_result.result = DMA_TRANS_READ_FAILED;
480 		else if (errinfo & CH_ERRINFO_AXIWRRESPERR)
481 			vd->tx_result.result = DMA_TRANS_WRITE_FAILED;
482 		else
483 			vd->tx_result.result = DMA_TRANS_ABORTED;
484 
485 		vd->tx_result.residue = d350_get_residue(dch);
486 	} else if (!(ch_status & CH_STAT_INTR_DONE)) {
487 		dev_warn(dev, "Unexpected IRQ source? 0x%08x\n", ch_status);
488 	}
489 	writel_relaxed(ch_status, dch->base + CH_STATUS);
490 
491 	spin_lock(&dch->vc.lock);
492 	vchan_cookie_complete(vd);
493 	if (ch_status & CH_STAT_INTR_DONE) {
494 		dch->status = DMA_COMPLETE;
495 		dch->residue = 0;
496 		d350_start_next(dch);
497 	} else {
498 		dch->status = DMA_ERROR;
499 		dch->residue = vd->tx_result.residue;
500 	}
501 	spin_unlock(&dch->vc.lock);
502 
503 	return IRQ_HANDLED;
504 }
505 
d350_alloc_chan_resources(struct dma_chan * chan)506 static int d350_alloc_chan_resources(struct dma_chan *chan)
507 {
508 	struct d350_chan *dch = to_d350_chan(chan);
509 	int ret = request_irq(dch->irq, d350_irq, IRQF_SHARED,
510 			      dev_name(&dch->vc.chan.dev->device), dch);
511 	if (!ret)
512 		writel_relaxed(CH_INTREN_DONE | CH_INTREN_ERR, dch->base + CH_INTREN);
513 
514 	return ret;
515 }
516 
d350_free_chan_resources(struct dma_chan * chan)517 static void d350_free_chan_resources(struct dma_chan *chan)
518 {
519 	struct d350_chan *dch = to_d350_chan(chan);
520 
521 	writel_relaxed(0, dch->base + CH_INTREN);
522 	free_irq(dch->irq, dch);
523 	vchan_free_chan_resources(&dch->vc);
524 }
525 
d350_probe(struct platform_device * pdev)526 static int d350_probe(struct platform_device *pdev)
527 {
528 	struct device *dev = &pdev->dev;
529 	struct d350 *dmac;
530 	void __iomem *base;
531 	u32 reg;
532 	int ret, nchan, dw, aw, r, p;
533 	bool coherent, memset;
534 
535 	base = devm_platform_ioremap_resource(pdev, 0);
536 	if (IS_ERR(base))
537 		return PTR_ERR(base);
538 
539 	reg = readl_relaxed(base + DMAINFO + IIDR);
540 	r = FIELD_GET(IIDR_VARIANT, reg);
541 	p = FIELD_GET(IIDR_REVISION, reg);
542 	if (FIELD_GET(IIDR_IMPLEMENTER, reg) != IMPLEMENTER_ARM ||
543 	    FIELD_GET(IIDR_PRODUCTID, reg) != PRODUCTID_DMA350)
544 		return dev_err_probe(dev, -ENODEV, "Not a DMA-350!");
545 
546 	reg = readl_relaxed(base + DMAINFO + DMA_BUILDCFG0);
547 	nchan = FIELD_GET(DMA_CFG_NUM_CHANNELS, reg) + 1;
548 	dw = 1 << FIELD_GET(DMA_CFG_DATA_WIDTH, reg);
549 	aw = FIELD_GET(DMA_CFG_ADDR_WIDTH, reg) + 1;
550 
551 	dma_set_mask_and_coherent(dev, DMA_BIT_MASK(aw));
552 	coherent = device_get_dma_attr(dev) == DEV_DMA_COHERENT;
553 
554 	dmac = devm_kzalloc(dev, struct_size(dmac, channels, nchan), GFP_KERNEL);
555 	if (!dmac)
556 		return -ENOMEM;
557 
558 	dmac->nchan = nchan;
559 
560 	reg = readl_relaxed(base + DMAINFO + DMA_BUILDCFG1);
561 	dmac->nreq = FIELD_GET(DMA_CFG_NUM_TRIGGER_IN, reg);
562 
563 	dev_dbg(dev, "DMA-350 r%dp%d with %d channels, %d requests\n", r, p, dmac->nchan, dmac->nreq);
564 
565 	dmac->dma.dev = dev;
566 	for (int i = min(dw, 16); i > 0; i /= 2) {
567 		dmac->dma.src_addr_widths |= BIT(i);
568 		dmac->dma.dst_addr_widths |= BIT(i);
569 	}
570 	dmac->dma.directions = BIT(DMA_MEM_TO_MEM);
571 	dmac->dma.descriptor_reuse = true;
572 	dmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
573 	dmac->dma.device_alloc_chan_resources = d350_alloc_chan_resources;
574 	dmac->dma.device_free_chan_resources = d350_free_chan_resources;
575 	dma_cap_set(DMA_MEMCPY, dmac->dma.cap_mask);
576 	dmac->dma.device_prep_dma_memcpy = d350_prep_memcpy;
577 	dmac->dma.device_pause = d350_pause;
578 	dmac->dma.device_resume = d350_resume;
579 	dmac->dma.device_terminate_all = d350_terminate_all;
580 	dmac->dma.device_synchronize = d350_synchronize;
581 	dmac->dma.device_tx_status = d350_tx_status;
582 	dmac->dma.device_issue_pending = d350_issue_pending;
583 	INIT_LIST_HEAD(&dmac->dma.channels);
584 
585 	/* Would be nice to have per-channel caps for this... */
586 	memset = true;
587 	for (int i = 0; i < nchan; i++) {
588 		struct d350_chan *dch = &dmac->channels[i];
589 
590 		dch->base = base + DMACH(i);
591 		writel_relaxed(CH_CMD_CLEAR, dch->base + CH_CMD);
592 
593 		reg = readl_relaxed(dch->base + CH_BUILDCFG1);
594 		if (!(FIELD_GET(CH_CFG_HAS_CMDLINK, reg))) {
595 			dev_warn(dev, "No command link support on channel %d\n", i);
596 			continue;
597 		}
598 		dch->irq = platform_get_irq(pdev, i);
599 		if (dch->irq < 0)
600 			return dev_err_probe(dev, dch->irq,
601 					     "Failed to get IRQ for channel %d\n", i);
602 
603 		dch->has_wrap = FIELD_GET(CH_CFG_HAS_WRAP, reg);
604 		dch->has_trig = FIELD_GET(CH_CFG_HAS_TRIGIN, reg) &
605 				FIELD_GET(CH_CFG_HAS_TRIGSEL, reg);
606 
607 		/* Fill is a special case of Wrap */
608 		memset &= dch->has_wrap;
609 
610 		reg = readl_relaxed(dch->base + CH_BUILDCFG0);
611 		dch->tsz = FIELD_GET(CH_CFG_DATA_WIDTH, reg);
612 
613 		reg = FIELD_PREP(CH_LINK_SHAREATTR, coherent ? SHAREATTR_ISH : SHAREATTR_OSH);
614 		reg |= FIELD_PREP(CH_LINK_MEMATTR, coherent ? MEMATTR_WB : MEMATTR_NC);
615 		writel_relaxed(reg, dch->base + CH_LINKATTR);
616 
617 		dch->vc.desc_free = d350_desc_free;
618 		vchan_init(&dch->vc, &dmac->dma);
619 	}
620 
621 	if (memset) {
622 		dma_cap_set(DMA_MEMSET, dmac->dma.cap_mask);
623 		dmac->dma.device_prep_dma_memset = d350_prep_memset;
624 	}
625 
626 	platform_set_drvdata(pdev, dmac);
627 
628 	ret = dma_async_device_register(&dmac->dma);
629 	if (ret)
630 		return dev_err_probe(dev, ret, "Failed to register DMA device\n");
631 
632 	return 0;
633 }
634 
d350_remove(struct platform_device * pdev)635 static void d350_remove(struct platform_device *pdev)
636 {
637 	struct d350 *dmac = platform_get_drvdata(pdev);
638 
639 	dma_async_device_unregister(&dmac->dma);
640 }
641 
642 static const struct of_device_id d350_of_match[] __maybe_unused = {
643 	{ .compatible = "arm,dma-350" },
644 	{}
645 };
646 MODULE_DEVICE_TABLE(of, d350_of_match);
647 
648 static struct platform_driver d350_driver = {
649 	.driver = {
650 		.name = "arm-dma350",
651 		.of_match_table = of_match_ptr(d350_of_match),
652 	},
653 	.probe = d350_probe,
654 	.remove = d350_remove,
655 };
656 module_platform_driver(d350_driver);
657 
658 MODULE_AUTHOR("Robin Murphy <robin.murphy@arm.com>");
659 MODULE_DESCRIPTION("Arm DMA-350 driver");
660 MODULE_LICENSE("GPL v2");
661