xref: /linux/drivers/dma/sh/shdmac.c (revision e4c0fdd5af4c590ca07880b97e286c6532437658)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Renesas SuperH DMA Engine support
4  *
5  * base is drivers/dma/flsdma.c
6  *
7  * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
8  * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
9  * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
10  * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
11  *
12  * - DMA of SuperH does not have Hardware DMA chain mode.
13  * - MAX DMA size is 16MB.
14  *
15  */
16 
17 #include <linux/delay.h>
18 #include <linux/dmaengine.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/kdebug.h>
23 #include <linux/module.h>
24 #include <linux/notifier.h>
25 #include <linux/of.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/rculist.h>
29 #include <linux/sh_dma.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 
33 #include "../dmaengine.h"
34 #include "shdma.h"
35 
36 /* DMA registers */
37 #define SAR	0x00	/* Source Address Register */
38 #define DAR	0x04	/* Destination Address Register */
39 #define TCR	0x08	/* Transfer Count Register */
40 #define CHCR	0x0C	/* Channel Control Register */
41 #define DMAOR	0x40	/* DMA Operation Register */
42 
43 #define TEND	0x18 /* USB-DMAC */
44 
45 #define SH_DMAE_DRV_NAME "sh-dma-engine"
46 
47 /* Default MEMCPY transfer size = 2^2 = 4 bytes */
48 #define LOG2_DEFAULT_XFER_SIZE	2
49 #define SH_DMA_SLAVE_NUMBER 256
50 #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
51 
52 /*
53  * Used for write-side mutual exclusion for the global device list,
54  * read-side synchronization by way of RCU, and per-controller data.
55  */
56 static DEFINE_SPINLOCK(sh_dmae_lock);
57 static LIST_HEAD(sh_dmae_devices);
58 
59 /*
60  * Different DMAC implementations provide different ways to clear DMA channels:
61  * (1) none - no CHCLR registers are available
62  * (2) one CHCLR register per channel - 0 has to be written to it to clear
63  *     channel buffers
64  * (3) one CHCLR per several channels - 1 has to be written to the bit,
65  *     corresponding to the specific channel to reset it
66  */
channel_clear(struct sh_dmae_chan * sh_dc)67 static void channel_clear(struct sh_dmae_chan *sh_dc)
68 {
69 	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
70 	const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel +
71 		sh_dc->shdma_chan.id;
72 	u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0;
73 
74 	__raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset);
75 }
76 
sh_dmae_writel(struct sh_dmae_chan * sh_dc,u32 data,u32 reg)77 static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
78 {
79 	__raw_writel(data, sh_dc->base + reg);
80 }
81 
sh_dmae_readl(struct sh_dmae_chan * sh_dc,u32 reg)82 static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
83 {
84 	return __raw_readl(sh_dc->base + reg);
85 }
86 
dmaor_read(struct sh_dmae_device * shdev)87 static u16 dmaor_read(struct sh_dmae_device *shdev)
88 {
89 	void __iomem *addr = shdev->chan_reg + DMAOR;
90 
91 	if (shdev->pdata->dmaor_is_32bit)
92 		return __raw_readl(addr);
93 	else
94 		return __raw_readw(addr);
95 }
96 
dmaor_write(struct sh_dmae_device * shdev,u16 data)97 static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
98 {
99 	void __iomem *addr = shdev->chan_reg + DMAOR;
100 
101 	if (shdev->pdata->dmaor_is_32bit)
102 		__raw_writel(data, addr);
103 	else
104 		__raw_writew(data, addr);
105 }
106 
chcr_write(struct sh_dmae_chan * sh_dc,u32 data)107 static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
108 {
109 	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
110 
111 	__raw_writel(data, sh_dc->base + shdev->chcr_offset);
112 }
113 
chcr_read(struct sh_dmae_chan * sh_dc)114 static u32 chcr_read(struct sh_dmae_chan *sh_dc)
115 {
116 	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
117 
118 	return __raw_readl(sh_dc->base + shdev->chcr_offset);
119 }
120 
121 /*
122  * Reset DMA controller
123  *
124  * SH7780 has two DMAOR register
125  */
sh_dmae_ctl_stop(struct sh_dmae_device * shdev)126 static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
127 {
128 	unsigned short dmaor;
129 	unsigned long flags;
130 
131 	spin_lock_irqsave(&sh_dmae_lock, flags);
132 
133 	dmaor = dmaor_read(shdev);
134 	dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
135 
136 	spin_unlock_irqrestore(&sh_dmae_lock, flags);
137 }
138 
sh_dmae_rst(struct sh_dmae_device * shdev)139 static int sh_dmae_rst(struct sh_dmae_device *shdev)
140 {
141 	unsigned short dmaor;
142 	unsigned long flags;
143 
144 	spin_lock_irqsave(&sh_dmae_lock, flags);
145 
146 	dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
147 
148 	if (shdev->pdata->chclr_present) {
149 		int i;
150 		for (i = 0; i < shdev->pdata->channel_num; i++) {
151 			struct sh_dmae_chan *sh_chan = shdev->chan[i];
152 			if (sh_chan)
153 				channel_clear(sh_chan);
154 		}
155 	}
156 
157 	dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
158 
159 	dmaor = dmaor_read(shdev);
160 
161 	spin_unlock_irqrestore(&sh_dmae_lock, flags);
162 
163 	if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
164 		dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n");
165 		return -EIO;
166 	}
167 	if (shdev->pdata->dmaor_init & ~dmaor)
168 		dev_warn(shdev->shdma_dev.dma_dev.dev,
169 			 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
170 			 dmaor, shdev->pdata->dmaor_init);
171 	return 0;
172 }
173 
dmae_is_busy(struct sh_dmae_chan * sh_chan)174 static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
175 {
176 	u32 chcr = chcr_read(sh_chan);
177 
178 	if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
179 		return true; /* working */
180 
181 	return false; /* waiting */
182 }
183 
calc_xmit_shift(struct sh_dmae_chan * sh_chan,u32 chcr)184 static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
185 {
186 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
187 	const struct sh_dmae_pdata *pdata = shdev->pdata;
188 	int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
189 		((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
190 
191 	if (cnt >= pdata->ts_shift_num)
192 		cnt = 0;
193 
194 	return pdata->ts_shift[cnt];
195 }
196 
log2size_to_chcr(struct sh_dmae_chan * sh_chan,int l2size)197 static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
198 {
199 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
200 	const struct sh_dmae_pdata *pdata = shdev->pdata;
201 	int i;
202 
203 	for (i = 0; i < pdata->ts_shift_num; i++)
204 		if (pdata->ts_shift[i] == l2size)
205 			break;
206 
207 	if (i == pdata->ts_shift_num)
208 		i = 0;
209 
210 	return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
211 		((i << pdata->ts_high_shift) & pdata->ts_high_mask);
212 }
213 
dmae_set_reg(struct sh_dmae_chan * sh_chan,struct sh_dmae_regs * hw)214 static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
215 {
216 	sh_dmae_writel(sh_chan, hw->sar, SAR);
217 	sh_dmae_writel(sh_chan, hw->dar, DAR);
218 	sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
219 }
220 
dmae_start(struct sh_dmae_chan * sh_chan)221 static void dmae_start(struct sh_dmae_chan *sh_chan)
222 {
223 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
224 	u32 chcr = chcr_read(sh_chan);
225 
226 	if (shdev->pdata->needs_tend_set)
227 		sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
228 
229 	chcr |= CHCR_DE | shdev->chcr_ie_bit;
230 	chcr_write(sh_chan, chcr & ~CHCR_TE);
231 }
232 
dmae_init(struct sh_dmae_chan * sh_chan)233 static void dmae_init(struct sh_dmae_chan *sh_chan)
234 {
235 	/*
236 	 * Default configuration for dual address memory-memory transfer.
237 	 */
238 	u32 chcr = DM_INC | SM_INC | RS_AUTO | log2size_to_chcr(sh_chan,
239 						   LOG2_DEFAULT_XFER_SIZE);
240 	sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
241 	chcr_write(sh_chan, chcr);
242 }
243 
dmae_set_chcr(struct sh_dmae_chan * sh_chan,u32 val)244 static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
245 {
246 	/* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
247 	if (dmae_is_busy(sh_chan))
248 		return -EBUSY;
249 
250 	sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
251 	chcr_write(sh_chan, val);
252 
253 	return 0;
254 }
255 
dmae_set_dmars(struct sh_dmae_chan * sh_chan,u16 val)256 static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
257 {
258 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
259 	const struct sh_dmae_pdata *pdata = shdev->pdata;
260 	const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
261 	void __iomem *addr = shdev->dmars;
262 	unsigned int shift = chan_pdata->dmars_bit;
263 
264 	if (dmae_is_busy(sh_chan))
265 		return -EBUSY;
266 
267 	if (pdata->no_dmars)
268 		return 0;
269 
270 	/* in the case of a missing DMARS resource use first memory window */
271 	if (!addr)
272 		addr = shdev->chan_reg;
273 	addr += chan_pdata->dmars;
274 
275 	__raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
276 		     addr);
277 
278 	return 0;
279 }
280 
sh_dmae_start_xfer(struct shdma_chan * schan,struct shdma_desc * sdesc)281 static void sh_dmae_start_xfer(struct shdma_chan *schan,
282 			       struct shdma_desc *sdesc)
283 {
284 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
285 						    shdma_chan);
286 	struct sh_dmae_desc *sh_desc = container_of(sdesc,
287 					struct sh_dmae_desc, shdma_desc);
288 	dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n",
289 		sdesc->async_tx.cookie, sh_chan->shdma_chan.id,
290 		sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar);
291 	/* Get the ld start address from ld_queue */
292 	dmae_set_reg(sh_chan, &sh_desc->hw);
293 	dmae_start(sh_chan);
294 }
295 
sh_dmae_channel_busy(struct shdma_chan * schan)296 static bool sh_dmae_channel_busy(struct shdma_chan *schan)
297 {
298 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
299 						    shdma_chan);
300 	return dmae_is_busy(sh_chan);
301 }
302 
sh_dmae_setup_xfer(struct shdma_chan * schan,int slave_id)303 static int sh_dmae_setup_xfer(struct shdma_chan *schan, int slave_id)
304 {
305 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
306 						    shdma_chan);
307 
308 	int ret = 0;
309 	if (slave_id >= 0) {
310 		const struct sh_dmae_slave_config *cfg =
311 			sh_chan->config;
312 
313 		ret = dmae_set_dmars(sh_chan, cfg->mid_rid);
314 		if (ret < 0)
315 			goto END;
316 
317 		ret = dmae_set_chcr(sh_chan, cfg->chcr);
318 		if (ret < 0)
319 			goto END;
320 
321 	} else {
322 		dmae_init(sh_chan);
323 	}
324 
325 END:
326 	return ret;
327 }
328 
329 /*
330  * Find a slave channel configuration from the controller list by either a slave
331  * ID in the non-DT case, or by a MID/RID value in the DT case
332  */
dmae_find_slave(struct sh_dmae_chan * sh_chan,int match)333 static const struct sh_dmae_slave_config *dmae_find_slave(
334 	struct sh_dmae_chan *sh_chan, int match)
335 {
336 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
337 	const struct sh_dmae_pdata *pdata = shdev->pdata;
338 	const struct sh_dmae_slave_config *cfg;
339 	int i;
340 
341 	if (!sh_chan->shdma_chan.dev->of_node) {
342 		if (match >= SH_DMA_SLAVE_NUMBER)
343 			return NULL;
344 
345 		for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
346 			if (cfg->slave_id == match)
347 				return cfg;
348 	} else {
349 		for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
350 			if (cfg->mid_rid == match) {
351 				sh_chan->shdma_chan.slave_id = i;
352 				return cfg;
353 			}
354 	}
355 
356 	return NULL;
357 }
358 
sh_dmae_set_slave(struct shdma_chan * schan,int slave_id,dma_addr_t slave_addr,bool try)359 static int sh_dmae_set_slave(struct shdma_chan *schan,
360 			     int slave_id, dma_addr_t slave_addr, bool try)
361 {
362 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
363 						    shdma_chan);
364 	const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
365 	if (!cfg)
366 		return -ENXIO;
367 
368 	if (!try) {
369 		sh_chan->config = cfg;
370 		sh_chan->slave_addr = slave_addr ? : cfg->addr;
371 	}
372 
373 	return 0;
374 }
375 
dmae_halt(struct sh_dmae_chan * sh_chan)376 static void dmae_halt(struct sh_dmae_chan *sh_chan)
377 {
378 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
379 	u32 chcr = chcr_read(sh_chan);
380 
381 	chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
382 	chcr_write(sh_chan, chcr);
383 }
384 
sh_dmae_desc_setup(struct shdma_chan * schan,struct shdma_desc * sdesc,dma_addr_t src,dma_addr_t dst,size_t * len)385 static int sh_dmae_desc_setup(struct shdma_chan *schan,
386 			      struct shdma_desc *sdesc,
387 			      dma_addr_t src, dma_addr_t dst, size_t *len)
388 {
389 	struct sh_dmae_desc *sh_desc = container_of(sdesc,
390 					struct sh_dmae_desc, shdma_desc);
391 
392 	if (*len > schan->max_xfer_len)
393 		*len = schan->max_xfer_len;
394 
395 	sh_desc->hw.sar = src;
396 	sh_desc->hw.dar = dst;
397 	sh_desc->hw.tcr = *len;
398 
399 	return 0;
400 }
401 
sh_dmae_halt(struct shdma_chan * schan)402 static void sh_dmae_halt(struct shdma_chan *schan)
403 {
404 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
405 						    shdma_chan);
406 	dmae_halt(sh_chan);
407 }
408 
sh_dmae_chan_irq(struct shdma_chan * schan,int irq)409 static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
410 {
411 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
412 						    shdma_chan);
413 
414 	if (!(chcr_read(sh_chan) & CHCR_TE))
415 		return false;
416 
417 	/* DMA stop */
418 	dmae_halt(sh_chan);
419 
420 	return true;
421 }
422 
sh_dmae_get_partial(struct shdma_chan * schan,struct shdma_desc * sdesc)423 static size_t sh_dmae_get_partial(struct shdma_chan *schan,
424 				  struct shdma_desc *sdesc)
425 {
426 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
427 						    shdma_chan);
428 	struct sh_dmae_desc *sh_desc = container_of(sdesc,
429 					struct sh_dmae_desc, shdma_desc);
430 	return sh_desc->hw.tcr -
431 		(sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift);
432 }
433 
434 /* Called from error IRQ or NMI */
sh_dmae_reset(struct sh_dmae_device * shdev)435 static bool sh_dmae_reset(struct sh_dmae_device *shdev)
436 {
437 	bool ret;
438 
439 	/* halt the dma controller */
440 	sh_dmae_ctl_stop(shdev);
441 
442 	/* We cannot detect, which channel caused the error, have to reset all */
443 	ret = shdma_reset(&shdev->shdma_dev);
444 
445 	sh_dmae_rst(shdev);
446 
447 	return ret;
448 }
449 
sh_dmae_err(int irq,void * data)450 static irqreturn_t sh_dmae_err(int irq, void *data)
451 {
452 	struct sh_dmae_device *shdev = data;
453 
454 	if (!(dmaor_read(shdev) & DMAOR_AE))
455 		return IRQ_NONE;
456 
457 	sh_dmae_reset(shdev);
458 	return IRQ_HANDLED;
459 }
460 
sh_dmae_desc_completed(struct shdma_chan * schan,struct shdma_desc * sdesc)461 static bool sh_dmae_desc_completed(struct shdma_chan *schan,
462 				   struct shdma_desc *sdesc)
463 {
464 	struct sh_dmae_chan *sh_chan = container_of(schan,
465 					struct sh_dmae_chan, shdma_chan);
466 	struct sh_dmae_desc *sh_desc = container_of(sdesc,
467 					struct sh_dmae_desc, shdma_desc);
468 	u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
469 	u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
470 
471 	return	(sdesc->direction == DMA_DEV_TO_MEM &&
472 		 (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) ||
473 		(sdesc->direction != DMA_DEV_TO_MEM &&
474 		 (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf);
475 }
476 
sh_dmae_nmi_notify(struct sh_dmae_device * shdev)477 static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
478 {
479 	/* Fast path out if NMIF is not asserted for this controller */
480 	if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
481 		return false;
482 
483 	return sh_dmae_reset(shdev);
484 }
485 
sh_dmae_nmi_handler(struct notifier_block * self,unsigned long cmd,void * data)486 static int sh_dmae_nmi_handler(struct notifier_block *self,
487 			       unsigned long cmd, void *data)
488 {
489 	struct sh_dmae_device *shdev;
490 	int ret = NOTIFY_DONE;
491 	bool triggered;
492 
493 	/*
494 	 * Only concern ourselves with NMI events.
495 	 *
496 	 * Normally we would check the die chain value, but as this needs
497 	 * to be architecture independent, check for NMI context instead.
498 	 */
499 	if (!in_nmi())
500 		return NOTIFY_DONE;
501 
502 	rcu_read_lock();
503 	list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
504 		/*
505 		 * Only stop if one of the controllers has NMIF asserted,
506 		 * we do not want to interfere with regular address error
507 		 * handling or NMI events that don't concern the DMACs.
508 		 */
509 		triggered = sh_dmae_nmi_notify(shdev);
510 		if (triggered == true)
511 			ret = NOTIFY_OK;
512 	}
513 	rcu_read_unlock();
514 
515 	return ret;
516 }
517 
518 static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
519 	.notifier_call	= sh_dmae_nmi_handler,
520 
521 	/* Run before NMI debug handler and KGDB */
522 	.priority	= 1,
523 };
524 
sh_dmae_chan_probe(struct sh_dmae_device * shdev,int id,int irq,unsigned long flags)525 static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
526 					int irq, unsigned long flags)
527 {
528 	const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
529 	struct shdma_dev *sdev = &shdev->shdma_dev;
530 	struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
531 	struct sh_dmae_chan *sh_chan;
532 	struct shdma_chan *schan;
533 	int err;
534 
535 	sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
536 			       GFP_KERNEL);
537 	if (!sh_chan)
538 		return -ENOMEM;
539 
540 	schan = &sh_chan->shdma_chan;
541 	schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
542 
543 	shdma_chan_probe(sdev, schan, id);
544 
545 	sh_chan->base = shdev->chan_reg + chan_pdata->offset;
546 
547 	/* set up channel irq */
548 	if (pdev->id >= 0)
549 		snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
550 			 "sh-dmae%d.%d", pdev->id, id);
551 	else
552 		snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
553 			 "sh-dma%d", id);
554 
555 	err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id);
556 	if (err) {
557 		dev_err(sdev->dma_dev.dev,
558 			"DMA channel %d request_irq error %d\n",
559 			id, err);
560 		goto err_no_irq;
561 	}
562 
563 	shdev->chan[id] = sh_chan;
564 	return 0;
565 
566 err_no_irq:
567 	/* remove from dmaengine device node */
568 	shdma_chan_remove(schan);
569 	return err;
570 }
571 
sh_dmae_chan_remove(struct sh_dmae_device * shdev)572 static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
573 {
574 	struct shdma_chan *schan;
575 	int i;
576 
577 	shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
578 		BUG_ON(!schan);
579 
580 		shdma_chan_remove(schan);
581 	}
582 }
583 
584 #ifdef CONFIG_PM
sh_dmae_runtime_suspend(struct device * dev)585 static int sh_dmae_runtime_suspend(struct device *dev)
586 {
587 	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
588 
589 	sh_dmae_ctl_stop(shdev);
590 	return 0;
591 }
592 
sh_dmae_runtime_resume(struct device * dev)593 static int sh_dmae_runtime_resume(struct device *dev)
594 {
595 	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
596 
597 	return sh_dmae_rst(shdev);
598 }
599 #endif
600 
601 #ifdef CONFIG_PM_SLEEP
sh_dmae_suspend(struct device * dev)602 static int sh_dmae_suspend(struct device *dev)
603 {
604 	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
605 
606 	sh_dmae_ctl_stop(shdev);
607 	return 0;
608 }
609 
sh_dmae_resume(struct device * dev)610 static int sh_dmae_resume(struct device *dev)
611 {
612 	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
613 	int i, ret;
614 
615 	ret = sh_dmae_rst(shdev);
616 	if (ret < 0)
617 		dev_err(dev, "Failed to reset!\n");
618 
619 	for (i = 0; i < shdev->pdata->channel_num; i++) {
620 		struct sh_dmae_chan *sh_chan = shdev->chan[i];
621 
622 		if (!sh_chan->shdma_chan.desc_num)
623 			continue;
624 
625 		if (sh_chan->shdma_chan.slave_id >= 0) {
626 			const struct sh_dmae_slave_config *cfg = sh_chan->config;
627 			dmae_set_dmars(sh_chan, cfg->mid_rid);
628 			dmae_set_chcr(sh_chan, cfg->chcr);
629 		} else {
630 			dmae_init(sh_chan);
631 		}
632 	}
633 
634 	return 0;
635 }
636 #endif
637 
638 static const struct dev_pm_ops sh_dmae_pm = {
639 	SET_SYSTEM_SLEEP_PM_OPS(sh_dmae_suspend, sh_dmae_resume)
640 	SET_RUNTIME_PM_OPS(sh_dmae_runtime_suspend, sh_dmae_runtime_resume,
641 			   NULL)
642 };
643 
sh_dmae_slave_addr(struct shdma_chan * schan)644 static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
645 {
646 	struct sh_dmae_chan *sh_chan = container_of(schan,
647 					struct sh_dmae_chan, shdma_chan);
648 
649 	/*
650 	 * Implicit BUG_ON(!sh_chan->config)
651 	 * This is an exclusive slave DMA operation, may only be called after a
652 	 * successful slave configuration.
653 	 */
654 	return sh_chan->slave_addr;
655 }
656 
sh_dmae_embedded_desc(void * buf,int i)657 static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
658 {
659 	return &((struct sh_dmae_desc *)buf)[i].shdma_desc;
660 }
661 
662 static const struct shdma_ops sh_dmae_shdma_ops = {
663 	.desc_completed = sh_dmae_desc_completed,
664 	.halt_channel = sh_dmae_halt,
665 	.channel_busy = sh_dmae_channel_busy,
666 	.slave_addr = sh_dmae_slave_addr,
667 	.desc_setup = sh_dmae_desc_setup,
668 	.set_slave = sh_dmae_set_slave,
669 	.setup_xfer = sh_dmae_setup_xfer,
670 	.start_xfer = sh_dmae_start_xfer,
671 	.embedded_desc = sh_dmae_embedded_desc,
672 	.chan_irq = sh_dmae_chan_irq,
673 	.get_partial = sh_dmae_get_partial,
674 };
675 
sh_dmae_probe(struct platform_device * pdev)676 static int sh_dmae_probe(struct platform_device *pdev)
677 {
678 	const enum dma_slave_buswidth widths =
679 		DMA_SLAVE_BUSWIDTH_1_BYTE   | DMA_SLAVE_BUSWIDTH_2_BYTES |
680 		DMA_SLAVE_BUSWIDTH_4_BYTES  | DMA_SLAVE_BUSWIDTH_8_BYTES |
681 		DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES;
682 	const struct sh_dmae_pdata *pdata;
683 	unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
684 	int chan_irq[SH_DMAE_MAX_CHANNELS];
685 	unsigned long irqflags = 0;
686 	int err, errirq, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
687 	struct sh_dmae_device *shdev;
688 	struct dma_device *dma_dev;
689 	struct resource *dmars, *errirq_res, *chanirq_res;
690 
691 	if (pdev->dev.of_node)
692 		pdata = of_device_get_match_data(&pdev->dev);
693 	else
694 		pdata = dev_get_platdata(&pdev->dev);
695 
696 	/* get platform data */
697 	if (!pdata || !pdata->channel_num)
698 		return -ENODEV;
699 
700 	/* DMARS area is optional */
701 	dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
702 	/*
703 	 * IRQ resources:
704 	 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
705 	 *    the error IRQ, in which case it is the only IRQ in this resource:
706 	 *    start == end. If it is the only IRQ resource, all channels also
707 	 *    use the same IRQ.
708 	 * 2. DMA channel IRQ resources can be specified one per resource or in
709 	 *    ranges (start != end)
710 	 * 3. iff all events (channels and, optionally, error) on this
711 	 *    controller use the same IRQ, only one IRQ resource can be
712 	 *    specified, otherwise there must be one IRQ per channel, even if
713 	 *    some of them are equal
714 	 * 4. if all IRQs on this controller are equal or if some specific IRQs
715 	 *    specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
716 	 *    requested with the IRQF_SHARED flag
717 	 */
718 	errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
719 	if (!errirq_res)
720 		return -ENODEV;
721 
722 	shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
723 			     GFP_KERNEL);
724 	if (!shdev)
725 		return -ENOMEM;
726 
727 	dma_dev = &shdev->shdma_dev.dma_dev;
728 
729 	shdev->chan_reg = devm_platform_ioremap_resource(pdev, 0);
730 	if (IS_ERR(shdev->chan_reg))
731 		return PTR_ERR(shdev->chan_reg);
732 	if (dmars) {
733 		shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars);
734 		if (IS_ERR(shdev->dmars))
735 			return PTR_ERR(shdev->dmars);
736 	}
737 
738 	dma_dev->src_addr_widths = widths;
739 	dma_dev->dst_addr_widths = widths;
740 	dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
741 	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
742 
743 	if (!pdata->slave_only)
744 		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
745 	if (pdata->slave && pdata->slave_num)
746 		dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
747 
748 	/* Default transfer size of 32 bytes requires 32-byte alignment */
749 	dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
750 
751 	shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
752 	shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
753 	err = shdma_init(&pdev->dev, &shdev->shdma_dev,
754 			      pdata->channel_num);
755 	if (err < 0)
756 		goto eshdma;
757 
758 	/* platform data */
759 	shdev->pdata = pdata;
760 
761 	if (pdata->chcr_offset)
762 		shdev->chcr_offset = pdata->chcr_offset;
763 	else
764 		shdev->chcr_offset = CHCR;
765 
766 	if (pdata->chcr_ie_bit)
767 		shdev->chcr_ie_bit = pdata->chcr_ie_bit;
768 	else
769 		shdev->chcr_ie_bit = CHCR_IE;
770 
771 	platform_set_drvdata(pdev, shdev);
772 
773 	pm_runtime_enable(&pdev->dev);
774 	err = pm_runtime_get_sync(&pdev->dev);
775 	if (err < 0)
776 		dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
777 
778 	spin_lock_irq(&sh_dmae_lock);
779 	list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
780 	spin_unlock_irq(&sh_dmae_lock);
781 
782 	/* reset dma controller - only needed as a test */
783 	err = sh_dmae_rst(shdev);
784 	if (err)
785 		goto rst_err;
786 
787 	if (IS_ENABLED(CONFIG_CPU_SH4) || IS_ENABLED(CONFIG_ARCH_RENESAS)) {
788 		chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
789 
790 		if (!chanirq_res)
791 			chanirq_res = errirq_res;
792 		else
793 			irqres++;
794 
795 		if (chanirq_res == errirq_res ||
796 		    (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
797 			irqflags = IRQF_SHARED;
798 
799 		errirq = errirq_res->start;
800 
801 		err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err,
802 				       irqflags, "DMAC Address Error", shdev);
803 		if (err) {
804 			dev_err(&pdev->dev,
805 				"DMA failed requesting irq #%d, error %d\n",
806 				errirq, err);
807 			goto eirq_err;
808 		}
809 	} else {
810 		chanirq_res = errirq_res;
811 	}
812 
813 	if (chanirq_res->start == chanirq_res->end &&
814 	    !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
815 		/* Special case - all multiplexed */
816 		for (; irq_cnt < pdata->channel_num; irq_cnt++) {
817 			if (irq_cnt < SH_DMAE_MAX_CHANNELS) {
818 				chan_irq[irq_cnt] = chanirq_res->start;
819 				chan_flag[irq_cnt] = IRQF_SHARED;
820 			} else {
821 				irq_cap = 1;
822 				break;
823 			}
824 		}
825 	} else {
826 		do {
827 			for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
828 				if (irq_cnt >= SH_DMAE_MAX_CHANNELS) {
829 					irq_cap = 1;
830 					break;
831 				}
832 
833 				if ((errirq_res->flags & IORESOURCE_BITS) ==
834 				    IORESOURCE_IRQ_SHAREABLE)
835 					chan_flag[irq_cnt] = IRQF_SHARED;
836 				else
837 					chan_flag[irq_cnt] = 0;
838 				dev_dbg(&pdev->dev,
839 					"Found IRQ %d for channel %d\n",
840 					i, irq_cnt);
841 				chan_irq[irq_cnt++] = i;
842 			}
843 
844 			if (irq_cnt >= SH_DMAE_MAX_CHANNELS)
845 				break;
846 
847 			chanirq_res = platform_get_resource(pdev,
848 						IORESOURCE_IRQ, ++irqres);
849 		} while (irq_cnt < pdata->channel_num && chanirq_res);
850 	}
851 
852 	/* Create DMA Channel */
853 	for (i = 0; i < irq_cnt; i++) {
854 		err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
855 		if (err)
856 			goto chan_probe_err;
857 	}
858 
859 	if (irq_cap)
860 		dev_notice(&pdev->dev, "Attempting to register %d DMA "
861 			   "channels when a maximum of %d are supported.\n",
862 			   pdata->channel_num, SH_DMAE_MAX_CHANNELS);
863 
864 	pm_runtime_put(&pdev->dev);
865 
866 	err = dma_async_device_register(&shdev->shdma_dev.dma_dev);
867 	if (err < 0)
868 		goto edmadevreg;
869 
870 	return err;
871 
872 edmadevreg:
873 	pm_runtime_get(&pdev->dev);
874 
875 chan_probe_err:
876 	sh_dmae_chan_remove(shdev);
877 
878 eirq_err:
879 rst_err:
880 	spin_lock_irq(&sh_dmae_lock);
881 	list_del_rcu(&shdev->node);
882 	spin_unlock_irq(&sh_dmae_lock);
883 
884 	pm_runtime_put(&pdev->dev);
885 	pm_runtime_disable(&pdev->dev);
886 
887 	shdma_cleanup(&shdev->shdma_dev);
888 eshdma:
889 	synchronize_rcu();
890 
891 	return err;
892 }
893 
sh_dmae_remove(struct platform_device * pdev)894 static void sh_dmae_remove(struct platform_device *pdev)
895 {
896 	struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
897 	struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
898 
899 	dma_async_device_unregister(dma_dev);
900 
901 	spin_lock_irq(&sh_dmae_lock);
902 	list_del_rcu(&shdev->node);
903 	spin_unlock_irq(&sh_dmae_lock);
904 
905 	pm_runtime_disable(&pdev->dev);
906 
907 	sh_dmae_chan_remove(shdev);
908 	shdma_cleanup(&shdev->shdma_dev);
909 
910 	synchronize_rcu();
911 }
912 
913 static struct platform_driver sh_dmae_driver = {
914 	.driver		= {
915 		.pm	= &sh_dmae_pm,
916 		.name	= SH_DMAE_DRV_NAME,
917 	},
918 	.remove		= sh_dmae_remove,
919 };
920 
sh_dmae_init(void)921 static int __init sh_dmae_init(void)
922 {
923 	/* Wire up NMI handling */
924 	int err = register_die_notifier(&sh_dmae_nmi_notifier);
925 	if (err)
926 		return err;
927 
928 	return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
929 }
930 module_init(sh_dmae_init);
931 
sh_dmae_exit(void)932 static void __exit sh_dmae_exit(void)
933 {
934 	platform_driver_unregister(&sh_dmae_driver);
935 
936 	unregister_die_notifier(&sh_dmae_nmi_notifier);
937 }
938 module_exit(sh_dmae_exit);
939 
940 MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
941 MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
942 MODULE_LICENSE("GPL");
943 MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);
944