xref: /linux/drivers/dma/sh/shdma-base.c (revision 2ba9268dd603d23e17643437b2246acb6844953b)
1 /*
2  * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
3  *
4  * extracted from shdma.c
5  *
6  * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
7  * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
8  * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
9  * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
10  *
11  * This is free software; you can redistribute it and/or modify
12  * it under the terms of version 2 of the GNU General Public License as
13  * published by the Free Software Foundation.
14  */
15 
16 #include <linux/delay.h>
17 #include <linux/shdma-base.h>
18 #include <linux/dmaengine.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/module.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 
26 #include "../dmaengine.h"
27 
28 /* DMA descriptor control */
29 enum shdma_desc_status {
30 	DESC_IDLE,
31 	DESC_PREPARED,
32 	DESC_SUBMITTED,
33 	DESC_COMPLETED,	/* completed, have to call callback */
34 	DESC_WAITING,	/* callback called, waiting for ack / re-submit */
35 };
36 
37 #define NR_DESCS_PER_CHANNEL 32
38 
39 #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
40 #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
41 
42 /*
43  * For slave DMA we assume, that there is a finite number of DMA slaves in the
44  * system, and that each such slave can only use a finite number of channels.
45  * We use slave channel IDs to make sure, that no such slave channel ID is
46  * allocated more than once.
47  */
48 static unsigned int slave_num = 256;
49 module_param(slave_num, uint, 0444);
50 
51 /* A bitmask with slave_num bits */
52 static unsigned long *shdma_slave_used;
53 
54 /* Called under spin_lock_irq(&schan->chan_lock") */
55 static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
56 {
57 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
58 	const struct shdma_ops *ops = sdev->ops;
59 	struct shdma_desc *sdesc;
60 
61 	/* DMA work check */
62 	if (ops->channel_busy(schan))
63 		return;
64 
65 	/* Find the first not transferred descriptor */
66 	list_for_each_entry(sdesc, &schan->ld_queue, node)
67 		if (sdesc->mark == DESC_SUBMITTED) {
68 			ops->start_xfer(schan, sdesc);
69 			break;
70 		}
71 }
72 
73 static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
74 {
75 	struct shdma_desc *chunk, *c, *desc =
76 		container_of(tx, struct shdma_desc, async_tx);
77 	struct shdma_chan *schan = to_shdma_chan(tx->chan);
78 	dma_async_tx_callback callback = tx->callback;
79 	dma_cookie_t cookie;
80 	bool power_up;
81 
82 	spin_lock_irq(&schan->chan_lock);
83 
84 	power_up = list_empty(&schan->ld_queue);
85 
86 	cookie = dma_cookie_assign(tx);
87 
88 	/* Mark all chunks of this descriptor as submitted, move to the queue */
89 	list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
90 		/*
91 		 * All chunks are on the global ld_free, so, we have to find
92 		 * the end of the chain ourselves
93 		 */
94 		if (chunk != desc && (chunk->mark == DESC_IDLE ||
95 				      chunk->async_tx.cookie > 0 ||
96 				      chunk->async_tx.cookie == -EBUSY ||
97 				      &chunk->node == &schan->ld_free))
98 			break;
99 		chunk->mark = DESC_SUBMITTED;
100 		if (chunk->chunks == 1) {
101 			chunk->async_tx.callback = callback;
102 			chunk->async_tx.callback_param = tx->callback_param;
103 		} else {
104 			/* Callback goes to the last chunk */
105 			chunk->async_tx.callback = NULL;
106 		}
107 		chunk->cookie = cookie;
108 		list_move_tail(&chunk->node, &schan->ld_queue);
109 
110 		dev_dbg(schan->dev, "submit #%d@%p on %d\n",
111 			tx->cookie, &chunk->async_tx, schan->id);
112 	}
113 
114 	if (power_up) {
115 		int ret;
116 		schan->pm_state = SHDMA_PM_BUSY;
117 
118 		ret = pm_runtime_get(schan->dev);
119 
120 		spin_unlock_irq(&schan->chan_lock);
121 		if (ret < 0)
122 			dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
123 
124 		pm_runtime_barrier(schan->dev);
125 
126 		spin_lock_irq(&schan->chan_lock);
127 
128 		/* Have we been reset, while waiting? */
129 		if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
130 			struct shdma_dev *sdev =
131 				to_shdma_dev(schan->dma_chan.device);
132 			const struct shdma_ops *ops = sdev->ops;
133 			dev_dbg(schan->dev, "Bring up channel %d\n",
134 				schan->id);
135 			/*
136 			 * TODO: .xfer_setup() might fail on some platforms.
137 			 * Make it int then, on error remove chunks from the
138 			 * queue again
139 			 */
140 			ops->setup_xfer(schan, schan->slave_id);
141 
142 			if (schan->pm_state == SHDMA_PM_PENDING)
143 				shdma_chan_xfer_ld_queue(schan);
144 			schan->pm_state = SHDMA_PM_ESTABLISHED;
145 		}
146 	} else {
147 		/*
148 		 * Tell .device_issue_pending() not to run the queue, interrupts
149 		 * will do it anyway
150 		 */
151 		schan->pm_state = SHDMA_PM_PENDING;
152 	}
153 
154 	spin_unlock_irq(&schan->chan_lock);
155 
156 	return cookie;
157 }
158 
159 /* Called with desc_lock held */
160 static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
161 {
162 	struct shdma_desc *sdesc;
163 
164 	list_for_each_entry(sdesc, &schan->ld_free, node)
165 		if (sdesc->mark != DESC_PREPARED) {
166 			BUG_ON(sdesc->mark != DESC_IDLE);
167 			list_del(&sdesc->node);
168 			return sdesc;
169 		}
170 
171 	return NULL;
172 }
173 
174 static int shdma_setup_slave(struct shdma_chan *schan, int slave_id,
175 			     dma_addr_t slave_addr)
176 {
177 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
178 	const struct shdma_ops *ops = sdev->ops;
179 	int ret, match;
180 
181 	if (schan->dev->of_node) {
182 		match = schan->hw_req;
183 		ret = ops->set_slave(schan, match, slave_addr, true);
184 		if (ret < 0)
185 			return ret;
186 
187 		slave_id = schan->slave_id;
188 	} else {
189 		match = slave_id;
190 	}
191 
192 	if (slave_id < 0 || slave_id >= slave_num)
193 		return -EINVAL;
194 
195 	if (test_and_set_bit(slave_id, shdma_slave_used))
196 		return -EBUSY;
197 
198 	ret = ops->set_slave(schan, match, slave_addr, false);
199 	if (ret < 0) {
200 		clear_bit(slave_id, shdma_slave_used);
201 		return ret;
202 	}
203 
204 	schan->slave_id = slave_id;
205 
206 	return 0;
207 }
208 
209 static int shdma_alloc_chan_resources(struct dma_chan *chan)
210 {
211 	struct shdma_chan *schan = to_shdma_chan(chan);
212 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
213 	const struct shdma_ops *ops = sdev->ops;
214 	struct shdma_desc *desc;
215 	struct shdma_slave *slave = chan->private;
216 	int ret, i;
217 
218 	/*
219 	 * This relies on the guarantee from dmaengine that alloc_chan_resources
220 	 * never runs concurrently with itself or free_chan_resources.
221 	 */
222 	if (slave) {
223 		/* Legacy mode: .private is set in filter */
224 		ret = shdma_setup_slave(schan, slave->slave_id, 0);
225 		if (ret < 0)
226 			goto esetslave;
227 	} else {
228 		schan->slave_id = -EINVAL;
229 	}
230 
231 	schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
232 			      sdev->desc_size, GFP_KERNEL);
233 	if (!schan->desc) {
234 		ret = -ENOMEM;
235 		goto edescalloc;
236 	}
237 	schan->desc_num = NR_DESCS_PER_CHANNEL;
238 
239 	for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
240 		desc = ops->embedded_desc(schan->desc, i);
241 		dma_async_tx_descriptor_init(&desc->async_tx,
242 					     &schan->dma_chan);
243 		desc->async_tx.tx_submit = shdma_tx_submit;
244 		desc->mark = DESC_IDLE;
245 
246 		list_add(&desc->node, &schan->ld_free);
247 	}
248 
249 	return NR_DESCS_PER_CHANNEL;
250 
251 edescalloc:
252 	if (slave)
253 esetslave:
254 		clear_bit(slave->slave_id, shdma_slave_used);
255 	chan->private = NULL;
256 	return ret;
257 }
258 
259 /*
260  * This is the standard shdma filter function to be used as a replacement to the
261  * "old" method, using the .private pointer. If for some reason you allocate a
262  * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter
263  * parameter. If this filter is used, the slave driver, after calling
264  * dma_request_channel(), will also have to call dmaengine_slave_config() with
265  * .slave_id, .direction, and either .src_addr or .dst_addr set.
266  * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
267  * capability! If this becomes a requirement, hardware glue drivers, using this
268  * services would have to provide their own filters, which first would check
269  * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
270  * this, and only then, in case of a match, call this common filter.
271  * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate().
272  * In that case the MID-RID value is used for slave channel filtering and is
273  * passed to this function in the "arg" parameter.
274  */
275 bool shdma_chan_filter(struct dma_chan *chan, void *arg)
276 {
277 	struct shdma_chan *schan;
278 	struct shdma_dev *sdev;
279 	int match = (long)arg;
280 	int ret;
281 
282 	/* Only support channels handled by this driver. */
283 	if (chan->device->device_alloc_chan_resources !=
284 	    shdma_alloc_chan_resources)
285 		return false;
286 
287 	if (match < 0)
288 		/* No slave requested - arbitrary channel */
289 		return true;
290 
291 	schan = to_shdma_chan(chan);
292 	if (!schan->dev->of_node && match >= slave_num)
293 		return false;
294 
295 	sdev = to_shdma_dev(schan->dma_chan.device);
296 	ret = sdev->ops->set_slave(schan, match, 0, true);
297 	if (ret < 0)
298 		return false;
299 
300 	return true;
301 }
302 EXPORT_SYMBOL(shdma_chan_filter);
303 
304 static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
305 {
306 	struct shdma_desc *desc, *_desc;
307 	/* Is the "exposed" head of a chain acked? */
308 	bool head_acked = false;
309 	dma_cookie_t cookie = 0;
310 	dma_async_tx_callback callback = NULL;
311 	void *param = NULL;
312 	unsigned long flags;
313 	LIST_HEAD(cyclic_list);
314 
315 	spin_lock_irqsave(&schan->chan_lock, flags);
316 	list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
317 		struct dma_async_tx_descriptor *tx = &desc->async_tx;
318 
319 		BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
320 		BUG_ON(desc->mark != DESC_SUBMITTED &&
321 		       desc->mark != DESC_COMPLETED &&
322 		       desc->mark != DESC_WAITING);
323 
324 		/*
325 		 * queue is ordered, and we use this loop to (1) clean up all
326 		 * completed descriptors, and to (2) update descriptor flags of
327 		 * any chunks in a (partially) completed chain
328 		 */
329 		if (!all && desc->mark == DESC_SUBMITTED &&
330 		    desc->cookie != cookie)
331 			break;
332 
333 		if (tx->cookie > 0)
334 			cookie = tx->cookie;
335 
336 		if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
337 			if (schan->dma_chan.completed_cookie != desc->cookie - 1)
338 				dev_dbg(schan->dev,
339 					"Completing cookie %d, expected %d\n",
340 					desc->cookie,
341 					schan->dma_chan.completed_cookie + 1);
342 			schan->dma_chan.completed_cookie = desc->cookie;
343 		}
344 
345 		/* Call callback on the last chunk */
346 		if (desc->mark == DESC_COMPLETED && tx->callback) {
347 			desc->mark = DESC_WAITING;
348 			callback = tx->callback;
349 			param = tx->callback_param;
350 			dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
351 				tx->cookie, tx, schan->id);
352 			BUG_ON(desc->chunks != 1);
353 			break;
354 		}
355 
356 		if (tx->cookie > 0 || tx->cookie == -EBUSY) {
357 			if (desc->mark == DESC_COMPLETED) {
358 				BUG_ON(tx->cookie < 0);
359 				desc->mark = DESC_WAITING;
360 			}
361 			head_acked = async_tx_test_ack(tx);
362 		} else {
363 			switch (desc->mark) {
364 			case DESC_COMPLETED:
365 				desc->mark = DESC_WAITING;
366 				/* Fall through */
367 			case DESC_WAITING:
368 				if (head_acked)
369 					async_tx_ack(&desc->async_tx);
370 			}
371 		}
372 
373 		dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
374 			tx, tx->cookie);
375 
376 		if (((desc->mark == DESC_COMPLETED ||
377 		      desc->mark == DESC_WAITING) &&
378 		     async_tx_test_ack(&desc->async_tx)) || all) {
379 
380 			if (all || !desc->cyclic) {
381 				/* Remove from ld_queue list */
382 				desc->mark = DESC_IDLE;
383 				list_move(&desc->node, &schan->ld_free);
384 			} else {
385 				/* reuse as cyclic */
386 				desc->mark = DESC_SUBMITTED;
387 				list_move_tail(&desc->node, &cyclic_list);
388 			}
389 
390 			if (list_empty(&schan->ld_queue)) {
391 				dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
392 				pm_runtime_put(schan->dev);
393 				schan->pm_state = SHDMA_PM_ESTABLISHED;
394 			} else if (schan->pm_state == SHDMA_PM_PENDING) {
395 				shdma_chan_xfer_ld_queue(schan);
396 			}
397 		}
398 	}
399 
400 	if (all && !callback)
401 		/*
402 		 * Terminating and the loop completed normally: forgive
403 		 * uncompleted cookies
404 		 */
405 		schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
406 
407 	list_splice_tail(&cyclic_list, &schan->ld_queue);
408 
409 	spin_unlock_irqrestore(&schan->chan_lock, flags);
410 
411 	if (callback)
412 		callback(param);
413 
414 	return callback;
415 }
416 
417 /*
418  * shdma_chan_ld_cleanup - Clean up link descriptors
419  *
420  * Clean up the ld_queue of DMA channel.
421  */
422 static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
423 {
424 	while (__ld_cleanup(schan, all))
425 		;
426 }
427 
428 /*
429  * shdma_free_chan_resources - Free all resources of the channel.
430  */
431 static void shdma_free_chan_resources(struct dma_chan *chan)
432 {
433 	struct shdma_chan *schan = to_shdma_chan(chan);
434 	struct shdma_dev *sdev = to_shdma_dev(chan->device);
435 	const struct shdma_ops *ops = sdev->ops;
436 	LIST_HEAD(list);
437 
438 	/* Protect against ISR */
439 	spin_lock_irq(&schan->chan_lock);
440 	ops->halt_channel(schan);
441 	spin_unlock_irq(&schan->chan_lock);
442 
443 	/* Now no new interrupts will occur */
444 
445 	/* Prepared and not submitted descriptors can still be on the queue */
446 	if (!list_empty(&schan->ld_queue))
447 		shdma_chan_ld_cleanup(schan, true);
448 
449 	if (schan->slave_id >= 0) {
450 		/* The caller is holding dma_list_mutex */
451 		clear_bit(schan->slave_id, shdma_slave_used);
452 		chan->private = NULL;
453 	}
454 
455 	spin_lock_irq(&schan->chan_lock);
456 
457 	list_splice_init(&schan->ld_free, &list);
458 	schan->desc_num = 0;
459 
460 	spin_unlock_irq(&schan->chan_lock);
461 
462 	kfree(schan->desc);
463 }
464 
465 /**
466  * shdma_add_desc - get, set up and return one transfer descriptor
467  * @schan:	DMA channel
468  * @flags:	DMA transfer flags
469  * @dst:	destination DMA address, incremented when direction equals
470  *		DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
471  * @src:	source DMA address, incremented when direction equals
472  *		DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
473  * @len:	DMA transfer length
474  * @first:	if NULL, set to the current descriptor and cookie set to -EBUSY
475  * @direction:	needed for slave DMA to decide which address to keep constant,
476  *		equals DMA_MEM_TO_MEM for MEMCPY
477  * Returns 0 or an error
478  * Locks: called with desc_lock held
479  */
480 static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
481 	unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len,
482 	struct shdma_desc **first, enum dma_transfer_direction direction)
483 {
484 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
485 	const struct shdma_ops *ops = sdev->ops;
486 	struct shdma_desc *new;
487 	size_t copy_size = *len;
488 
489 	if (!copy_size)
490 		return NULL;
491 
492 	/* Allocate the link descriptor from the free list */
493 	new = shdma_get_desc(schan);
494 	if (!new) {
495 		dev_err(schan->dev, "No free link descriptor available\n");
496 		return NULL;
497 	}
498 
499 	ops->desc_setup(schan, new, *src, *dst, &copy_size);
500 
501 	if (!*first) {
502 		/* First desc */
503 		new->async_tx.cookie = -EBUSY;
504 		*first = new;
505 	} else {
506 		/* Other desc - invisible to the user */
507 		new->async_tx.cookie = -EINVAL;
508 	}
509 
510 	dev_dbg(schan->dev,
511 		"chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n",
512 		copy_size, *len, src, dst, &new->async_tx,
513 		new->async_tx.cookie);
514 
515 	new->mark = DESC_PREPARED;
516 	new->async_tx.flags = flags;
517 	new->direction = direction;
518 	new->partial = 0;
519 
520 	*len -= copy_size;
521 	if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
522 		*src += copy_size;
523 	if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
524 		*dst += copy_size;
525 
526 	return new;
527 }
528 
529 /*
530  * shdma_prep_sg - prepare transfer descriptors from an SG list
531  *
532  * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
533  * converted to scatter-gather to guarantee consistent locking and a correct
534  * list manipulation. For slave DMA direction carries the usual meaning, and,
535  * logically, the SG list is RAM and the addr variable contains slave address,
536  * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
537  * and the SG list contains only one element and points at the source buffer.
538  */
539 static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
540 	struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
541 	enum dma_transfer_direction direction, unsigned long flags, bool cyclic)
542 {
543 	struct scatterlist *sg;
544 	struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
545 	LIST_HEAD(tx_list);
546 	int chunks = 0;
547 	unsigned long irq_flags;
548 	int i;
549 
550 	for_each_sg(sgl, sg, sg_len, i)
551 		chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
552 
553 	/* Have to lock the whole loop to protect against concurrent release */
554 	spin_lock_irqsave(&schan->chan_lock, irq_flags);
555 
556 	/*
557 	 * Chaining:
558 	 * first descriptor is what user is dealing with in all API calls, its
559 	 *	cookie is at first set to -EBUSY, at tx-submit to a positive
560 	 *	number
561 	 * if more than one chunk is needed further chunks have cookie = -EINVAL
562 	 * the last chunk, if not equal to the first, has cookie = -ENOSPC
563 	 * all chunks are linked onto the tx_list head with their .node heads
564 	 *	only during this function, then they are immediately spliced
565 	 *	back onto the free list in form of a chain
566 	 */
567 	for_each_sg(sgl, sg, sg_len, i) {
568 		dma_addr_t sg_addr = sg_dma_address(sg);
569 		size_t len = sg_dma_len(sg);
570 
571 		if (!len)
572 			goto err_get_desc;
573 
574 		do {
575 			dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n",
576 				i, sg, len, &sg_addr);
577 
578 			if (direction == DMA_DEV_TO_MEM)
579 				new = shdma_add_desc(schan, flags,
580 						&sg_addr, addr, &len, &first,
581 						direction);
582 			else
583 				new = shdma_add_desc(schan, flags,
584 						addr, &sg_addr, &len, &first,
585 						direction);
586 			if (!new)
587 				goto err_get_desc;
588 
589 			new->cyclic = cyclic;
590 			if (cyclic)
591 				new->chunks = 1;
592 			else
593 				new->chunks = chunks--;
594 			list_add_tail(&new->node, &tx_list);
595 		} while (len);
596 	}
597 
598 	if (new != first)
599 		new->async_tx.cookie = -ENOSPC;
600 
601 	/* Put them back on the free list, so, they don't get lost */
602 	list_splice_tail(&tx_list, &schan->ld_free);
603 
604 	spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
605 
606 	return &first->async_tx;
607 
608 err_get_desc:
609 	list_for_each_entry(new, &tx_list, node)
610 		new->mark = DESC_IDLE;
611 	list_splice(&tx_list, &schan->ld_free);
612 
613 	spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
614 
615 	return NULL;
616 }
617 
618 static struct dma_async_tx_descriptor *shdma_prep_memcpy(
619 	struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
620 	size_t len, unsigned long flags)
621 {
622 	struct shdma_chan *schan = to_shdma_chan(chan);
623 	struct scatterlist sg;
624 
625 	if (!chan || !len)
626 		return NULL;
627 
628 	BUG_ON(!schan->desc_num);
629 
630 	sg_init_table(&sg, 1);
631 	sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
632 		    offset_in_page(dma_src));
633 	sg_dma_address(&sg) = dma_src;
634 	sg_dma_len(&sg) = len;
635 
636 	return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
637 			     flags, false);
638 }
639 
640 static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
641 	struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
642 	enum dma_transfer_direction direction, unsigned long flags, void *context)
643 {
644 	struct shdma_chan *schan = to_shdma_chan(chan);
645 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
646 	const struct shdma_ops *ops = sdev->ops;
647 	int slave_id = schan->slave_id;
648 	dma_addr_t slave_addr;
649 
650 	if (!chan)
651 		return NULL;
652 
653 	BUG_ON(!schan->desc_num);
654 
655 	/* Someone calling slave DMA on a generic channel? */
656 	if (slave_id < 0 || !sg_len) {
657 		dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
658 			 __func__, sg_len, slave_id);
659 		return NULL;
660 	}
661 
662 	slave_addr = ops->slave_addr(schan);
663 
664 	return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
665 			     direction, flags, false);
666 }
667 
668 #define SHDMA_MAX_SG_LEN 32
669 
670 static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
671 	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
672 	size_t period_len, enum dma_transfer_direction direction,
673 	unsigned long flags)
674 {
675 	struct shdma_chan *schan = to_shdma_chan(chan);
676 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
677 	struct dma_async_tx_descriptor *desc;
678 	const struct shdma_ops *ops = sdev->ops;
679 	unsigned int sg_len = buf_len / period_len;
680 	int slave_id = schan->slave_id;
681 	dma_addr_t slave_addr;
682 	struct scatterlist *sgl;
683 	int i;
684 
685 	if (!chan)
686 		return NULL;
687 
688 	BUG_ON(!schan->desc_num);
689 
690 	if (sg_len > SHDMA_MAX_SG_LEN) {
691 		dev_err(schan->dev, "sg length %d exceds limit %d",
692 				sg_len, SHDMA_MAX_SG_LEN);
693 		return NULL;
694 	}
695 
696 	/* Someone calling slave DMA on a generic channel? */
697 	if (slave_id < 0 || (buf_len < period_len)) {
698 		dev_warn(schan->dev,
699 			"%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
700 			__func__, buf_len, period_len, slave_id);
701 		return NULL;
702 	}
703 
704 	slave_addr = ops->slave_addr(schan);
705 
706 	/*
707 	 * Allocate the sg list dynamically as it would consumer too much stack
708 	 * space.
709 	 */
710 	sgl = kcalloc(sg_len, sizeof(*sgl), GFP_KERNEL);
711 	if (!sgl)
712 		return NULL;
713 
714 	sg_init_table(sgl, sg_len);
715 
716 	for (i = 0; i < sg_len; i++) {
717 		dma_addr_t src = buf_addr + (period_len * i);
718 
719 		sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
720 			    offset_in_page(src));
721 		sg_dma_address(&sgl[i]) = src;
722 		sg_dma_len(&sgl[i]) = period_len;
723 	}
724 
725 	desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
726 			     direction, flags, true);
727 
728 	kfree(sgl);
729 	return desc;
730 }
731 
732 static int shdma_terminate_all(struct dma_chan *chan)
733 {
734 	struct shdma_chan *schan = to_shdma_chan(chan);
735 	struct shdma_dev *sdev = to_shdma_dev(chan->device);
736 	const struct shdma_ops *ops = sdev->ops;
737 	unsigned long flags;
738 
739 	spin_lock_irqsave(&schan->chan_lock, flags);
740 	ops->halt_channel(schan);
741 
742 	if (ops->get_partial && !list_empty(&schan->ld_queue)) {
743 		/* Record partial transfer */
744 		struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
745 							   struct shdma_desc, node);
746 		desc->partial = ops->get_partial(schan, desc);
747 	}
748 
749 	spin_unlock_irqrestore(&schan->chan_lock, flags);
750 
751 	shdma_chan_ld_cleanup(schan, true);
752 
753 	return 0;
754 }
755 
756 static int shdma_config(struct dma_chan *chan,
757 			struct dma_slave_config *config)
758 {
759 	struct shdma_chan *schan = to_shdma_chan(chan);
760 
761 	/*
762 	 * So far only .slave_id is used, but the slave drivers are
763 	 * encouraged to also set a transfer direction and an address.
764 	 */
765 	if (!config)
766 		return -EINVAL;
767 	/*
768 	 * We could lock this, but you shouldn't be configuring the
769 	 * channel, while using it...
770 	 */
771 	return shdma_setup_slave(schan, config->slave_id,
772 				 config->direction == DMA_DEV_TO_MEM ?
773 				 config->src_addr : config->dst_addr);
774 }
775 
776 static void shdma_issue_pending(struct dma_chan *chan)
777 {
778 	struct shdma_chan *schan = to_shdma_chan(chan);
779 
780 	spin_lock_irq(&schan->chan_lock);
781 	if (schan->pm_state == SHDMA_PM_ESTABLISHED)
782 		shdma_chan_xfer_ld_queue(schan);
783 	else
784 		schan->pm_state = SHDMA_PM_PENDING;
785 	spin_unlock_irq(&schan->chan_lock);
786 }
787 
788 static enum dma_status shdma_tx_status(struct dma_chan *chan,
789 					dma_cookie_t cookie,
790 					struct dma_tx_state *txstate)
791 {
792 	struct shdma_chan *schan = to_shdma_chan(chan);
793 	enum dma_status status;
794 	unsigned long flags;
795 
796 	shdma_chan_ld_cleanup(schan, false);
797 
798 	spin_lock_irqsave(&schan->chan_lock, flags);
799 
800 	status = dma_cookie_status(chan, cookie, txstate);
801 
802 	/*
803 	 * If we don't find cookie on the queue, it has been aborted and we have
804 	 * to report error
805 	 */
806 	if (status != DMA_COMPLETE) {
807 		struct shdma_desc *sdesc;
808 		status = DMA_ERROR;
809 		list_for_each_entry(sdesc, &schan->ld_queue, node)
810 			if (sdesc->cookie == cookie) {
811 				status = DMA_IN_PROGRESS;
812 				break;
813 			}
814 	}
815 
816 	spin_unlock_irqrestore(&schan->chan_lock, flags);
817 
818 	return status;
819 }
820 
821 /* Called from error IRQ or NMI */
822 bool shdma_reset(struct shdma_dev *sdev)
823 {
824 	const struct shdma_ops *ops = sdev->ops;
825 	struct shdma_chan *schan;
826 	unsigned int handled = 0;
827 	int i;
828 
829 	/* Reset all channels */
830 	shdma_for_each_chan(schan, sdev, i) {
831 		struct shdma_desc *sdesc;
832 		LIST_HEAD(dl);
833 
834 		if (!schan)
835 			continue;
836 
837 		spin_lock(&schan->chan_lock);
838 
839 		/* Stop the channel */
840 		ops->halt_channel(schan);
841 
842 		list_splice_init(&schan->ld_queue, &dl);
843 
844 		if (!list_empty(&dl)) {
845 			dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
846 			pm_runtime_put(schan->dev);
847 		}
848 		schan->pm_state = SHDMA_PM_ESTABLISHED;
849 
850 		spin_unlock(&schan->chan_lock);
851 
852 		/* Complete all  */
853 		list_for_each_entry(sdesc, &dl, node) {
854 			struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
855 			sdesc->mark = DESC_IDLE;
856 			if (tx->callback)
857 				tx->callback(tx->callback_param);
858 		}
859 
860 		spin_lock(&schan->chan_lock);
861 		list_splice(&dl, &schan->ld_free);
862 		spin_unlock(&schan->chan_lock);
863 
864 		handled++;
865 	}
866 
867 	return !!handled;
868 }
869 EXPORT_SYMBOL(shdma_reset);
870 
871 static irqreturn_t chan_irq(int irq, void *dev)
872 {
873 	struct shdma_chan *schan = dev;
874 	const struct shdma_ops *ops =
875 		to_shdma_dev(schan->dma_chan.device)->ops;
876 	irqreturn_t ret;
877 
878 	spin_lock(&schan->chan_lock);
879 
880 	ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
881 
882 	spin_unlock(&schan->chan_lock);
883 
884 	return ret;
885 }
886 
887 static irqreturn_t chan_irqt(int irq, void *dev)
888 {
889 	struct shdma_chan *schan = dev;
890 	const struct shdma_ops *ops =
891 		to_shdma_dev(schan->dma_chan.device)->ops;
892 	struct shdma_desc *sdesc;
893 
894 	spin_lock_irq(&schan->chan_lock);
895 	list_for_each_entry(sdesc, &schan->ld_queue, node) {
896 		if (sdesc->mark == DESC_SUBMITTED &&
897 		    ops->desc_completed(schan, sdesc)) {
898 			dev_dbg(schan->dev, "done #%d@%p\n",
899 				sdesc->async_tx.cookie, &sdesc->async_tx);
900 			sdesc->mark = DESC_COMPLETED;
901 			break;
902 		}
903 	}
904 	/* Next desc */
905 	shdma_chan_xfer_ld_queue(schan);
906 	spin_unlock_irq(&schan->chan_lock);
907 
908 	shdma_chan_ld_cleanup(schan, false);
909 
910 	return IRQ_HANDLED;
911 }
912 
913 int shdma_request_irq(struct shdma_chan *schan, int irq,
914 			   unsigned long flags, const char *name)
915 {
916 	int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
917 					    chan_irqt, flags, name, schan);
918 
919 	schan->irq = ret < 0 ? ret : irq;
920 
921 	return ret;
922 }
923 EXPORT_SYMBOL(shdma_request_irq);
924 
925 void shdma_chan_probe(struct shdma_dev *sdev,
926 			   struct shdma_chan *schan, int id)
927 {
928 	schan->pm_state = SHDMA_PM_ESTABLISHED;
929 
930 	/* reference struct dma_device */
931 	schan->dma_chan.device = &sdev->dma_dev;
932 	dma_cookie_init(&schan->dma_chan);
933 
934 	schan->dev = sdev->dma_dev.dev;
935 	schan->id = id;
936 
937 	if (!schan->max_xfer_len)
938 		schan->max_xfer_len = PAGE_SIZE;
939 
940 	spin_lock_init(&schan->chan_lock);
941 
942 	/* Init descripter manage list */
943 	INIT_LIST_HEAD(&schan->ld_queue);
944 	INIT_LIST_HEAD(&schan->ld_free);
945 
946 	/* Add the channel to DMA device channel list */
947 	list_add_tail(&schan->dma_chan.device_node,
948 			&sdev->dma_dev.channels);
949 	sdev->schan[id] = schan;
950 }
951 EXPORT_SYMBOL(shdma_chan_probe);
952 
953 void shdma_chan_remove(struct shdma_chan *schan)
954 {
955 	list_del(&schan->dma_chan.device_node);
956 }
957 EXPORT_SYMBOL(shdma_chan_remove);
958 
959 int shdma_init(struct device *dev, struct shdma_dev *sdev,
960 		    int chan_num)
961 {
962 	struct dma_device *dma_dev = &sdev->dma_dev;
963 
964 	/*
965 	 * Require all call-backs for now, they can trivially be made optional
966 	 * later as required
967 	 */
968 	if (!sdev->ops ||
969 	    !sdev->desc_size ||
970 	    !sdev->ops->embedded_desc ||
971 	    !sdev->ops->start_xfer ||
972 	    !sdev->ops->setup_xfer ||
973 	    !sdev->ops->set_slave ||
974 	    !sdev->ops->desc_setup ||
975 	    !sdev->ops->slave_addr ||
976 	    !sdev->ops->channel_busy ||
977 	    !sdev->ops->halt_channel ||
978 	    !sdev->ops->desc_completed)
979 		return -EINVAL;
980 
981 	sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
982 	if (!sdev->schan)
983 		return -ENOMEM;
984 
985 	INIT_LIST_HEAD(&dma_dev->channels);
986 
987 	/* Common and MEMCPY operations */
988 	dma_dev->device_alloc_chan_resources
989 		= shdma_alloc_chan_resources;
990 	dma_dev->device_free_chan_resources = shdma_free_chan_resources;
991 	dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy;
992 	dma_dev->device_tx_status = shdma_tx_status;
993 	dma_dev->device_issue_pending = shdma_issue_pending;
994 
995 	/* Compulsory for DMA_SLAVE fields */
996 	dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
997 	dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
998 	dma_dev->device_config = shdma_config;
999 	dma_dev->device_terminate_all = shdma_terminate_all;
1000 
1001 	dma_dev->dev = dev;
1002 
1003 	return 0;
1004 }
1005 EXPORT_SYMBOL(shdma_init);
1006 
1007 void shdma_cleanup(struct shdma_dev *sdev)
1008 {
1009 	kfree(sdev->schan);
1010 }
1011 EXPORT_SYMBOL(shdma_cleanup);
1012 
1013 static int __init shdma_enter(void)
1014 {
1015 	shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) *
1016 				    sizeof(long), GFP_KERNEL);
1017 	if (!shdma_slave_used)
1018 		return -ENOMEM;
1019 	return 0;
1020 }
1021 module_init(shdma_enter);
1022 
1023 static void __exit shdma_exit(void)
1024 {
1025 	kfree(shdma_slave_used);
1026 }
1027 module_exit(shdma_exit);
1028 
1029 MODULE_LICENSE("GPL v2");
1030 MODULE_DESCRIPTION("SH-DMA driver base library");
1031 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
1032