xref: /linux/drivers/dma/sh/shdma-base.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
4  *
5  * extracted from shdma.c
6  *
7  * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
8  * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
9  * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
10  * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
11  */
12 
13 #include <linux/delay.h>
14 #include <linux/shdma-base.h>
15 #include <linux/dmaengine.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 
23 #include "../dmaengine.h"
24 
25 /* DMA descriptor control */
26 enum shdma_desc_status {
27 	DESC_IDLE,
28 	DESC_PREPARED,
29 	DESC_SUBMITTED,
30 	DESC_COMPLETED,	/* completed, have to call callback */
31 	DESC_WAITING,	/* callback called, waiting for ack / re-submit */
32 };
33 
34 #define NR_DESCS_PER_CHANNEL 32
35 
36 #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
37 #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
38 
39 /*
40  * For slave DMA we assume, that there is a finite number of DMA slaves in the
41  * system, and that each such slave can only use a finite number of channels.
42  * We use slave channel IDs to make sure, that no such slave channel ID is
43  * allocated more than once.
44  */
45 static unsigned int slave_num = 256;
46 module_param(slave_num, uint, 0444);
47 
48 /* A bitmask with slave_num bits */
49 static unsigned long *shdma_slave_used;
50 
51 /* Called under spin_lock_irq(&schan->chan_lock") */
shdma_chan_xfer_ld_queue(struct shdma_chan * schan)52 static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
53 {
54 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
55 	const struct shdma_ops *ops = sdev->ops;
56 	struct shdma_desc *sdesc;
57 
58 	/* DMA work check */
59 	if (ops->channel_busy(schan))
60 		return;
61 
62 	/* Find the first not transferred descriptor */
63 	list_for_each_entry(sdesc, &schan->ld_queue, node)
64 		if (sdesc->mark == DESC_SUBMITTED) {
65 			ops->start_xfer(schan, sdesc);
66 			break;
67 		}
68 }
69 
shdma_tx_submit(struct dma_async_tx_descriptor * tx)70 static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
71 {
72 	struct shdma_desc *chunk, *c, *desc =
73 		container_of(tx, struct shdma_desc, async_tx);
74 	struct shdma_chan *schan = to_shdma_chan(tx->chan);
75 	dma_async_tx_callback callback = tx->callback;
76 	dma_cookie_t cookie;
77 	bool power_up;
78 
79 	spin_lock_irq(&schan->chan_lock);
80 
81 	power_up = list_empty(&schan->ld_queue);
82 
83 	cookie = dma_cookie_assign(tx);
84 
85 	/* Mark all chunks of this descriptor as submitted, move to the queue */
86 	list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
87 		/*
88 		 * All chunks are on the global ld_free, so, we have to find
89 		 * the end of the chain ourselves
90 		 */
91 		if (chunk != desc && (chunk->mark == DESC_IDLE ||
92 				      chunk->async_tx.cookie > 0 ||
93 				      chunk->async_tx.cookie == -EBUSY ||
94 				      &chunk->node == &schan->ld_free))
95 			break;
96 		chunk->mark = DESC_SUBMITTED;
97 		if (chunk->chunks == 1) {
98 			chunk->async_tx.callback = callback;
99 			chunk->async_tx.callback_param = tx->callback_param;
100 		} else {
101 			/* Callback goes to the last chunk */
102 			chunk->async_tx.callback = NULL;
103 		}
104 		chunk->cookie = cookie;
105 		list_move_tail(&chunk->node, &schan->ld_queue);
106 
107 		dev_dbg(schan->dev, "submit #%d@%p on %d\n",
108 			tx->cookie, &chunk->async_tx, schan->id);
109 	}
110 
111 	if (power_up) {
112 		int ret;
113 		schan->pm_state = SHDMA_PM_BUSY;
114 
115 		ret = pm_runtime_get(schan->dev);
116 
117 		spin_unlock_irq(&schan->chan_lock);
118 		if (ret < 0)
119 			dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
120 
121 		pm_runtime_barrier(schan->dev);
122 
123 		spin_lock_irq(&schan->chan_lock);
124 
125 		/* Have we been reset, while waiting? */
126 		if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
127 			struct shdma_dev *sdev =
128 				to_shdma_dev(schan->dma_chan.device);
129 			const struct shdma_ops *ops = sdev->ops;
130 			dev_dbg(schan->dev, "Bring up channel %d\n",
131 				schan->id);
132 
133 			ret = ops->setup_xfer(schan, schan->slave_id);
134 			if (ret < 0) {
135 				dev_err(schan->dev, "setup_xfer failed: %d\n", ret);
136 
137 				/* Remove chunks from the queue and mark them as idle */
138 				list_for_each_entry_safe(chunk, c, &schan->ld_queue, node) {
139 					if (chunk->cookie == cookie) {
140 						chunk->mark = DESC_IDLE;
141 						list_move(&chunk->node, &schan->ld_free);
142 					}
143 				}
144 
145 				schan->pm_state = SHDMA_PM_ESTABLISHED;
146 				pm_runtime_put(schan->dev);
147 
148 				spin_unlock_irq(&schan->chan_lock);
149 				return ret;
150 			}
151 
152 			if (schan->pm_state == SHDMA_PM_PENDING)
153 				shdma_chan_xfer_ld_queue(schan);
154 			schan->pm_state = SHDMA_PM_ESTABLISHED;
155 		}
156 	} else {
157 		/*
158 		 * Tell .device_issue_pending() not to run the queue, interrupts
159 		 * will do it anyway
160 		 */
161 		schan->pm_state = SHDMA_PM_PENDING;
162 	}
163 
164 	spin_unlock_irq(&schan->chan_lock);
165 
166 	return cookie;
167 }
168 
169 /* Called with desc_lock held */
shdma_get_desc(struct shdma_chan * schan)170 static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
171 {
172 	struct shdma_desc *sdesc;
173 
174 	list_for_each_entry(sdesc, &schan->ld_free, node)
175 		if (sdesc->mark != DESC_PREPARED) {
176 			BUG_ON(sdesc->mark != DESC_IDLE);
177 			list_del(&sdesc->node);
178 			return sdesc;
179 		}
180 
181 	return NULL;
182 }
183 
shdma_setup_slave(struct shdma_chan * schan,dma_addr_t slave_addr)184 static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr)
185 {
186 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
187 	const struct shdma_ops *ops = sdev->ops;
188 	int ret, match;
189 
190 	if (schan->dev->of_node) {
191 		match = schan->hw_req;
192 		ret = ops->set_slave(schan, match, slave_addr, true);
193 		if (ret < 0)
194 			return ret;
195 	} else {
196 		match = schan->real_slave_id;
197 	}
198 
199 	if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num)
200 		return -EINVAL;
201 
202 	if (test_and_set_bit(schan->real_slave_id, shdma_slave_used))
203 		return -EBUSY;
204 
205 	ret = ops->set_slave(schan, match, slave_addr, false);
206 	if (ret < 0) {
207 		clear_bit(schan->real_slave_id, shdma_slave_used);
208 		return ret;
209 	}
210 
211 	schan->slave_id = schan->real_slave_id;
212 
213 	return 0;
214 }
215 
shdma_alloc_chan_resources(struct dma_chan * chan)216 static int shdma_alloc_chan_resources(struct dma_chan *chan)
217 {
218 	struct shdma_chan *schan = to_shdma_chan(chan);
219 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
220 	const struct shdma_ops *ops = sdev->ops;
221 	struct shdma_desc *desc;
222 	struct shdma_slave *slave = chan->private;
223 	int ret, i;
224 
225 	/*
226 	 * This relies on the guarantee from dmaengine that alloc_chan_resources
227 	 * never runs concurrently with itself or free_chan_resources.
228 	 */
229 	if (slave) {
230 		/* Legacy mode: .private is set in filter */
231 		schan->real_slave_id = slave->slave_id;
232 		ret = shdma_setup_slave(schan, 0);
233 		if (ret < 0)
234 			goto esetslave;
235 	} else {
236 		/* Normal mode: real_slave_id was set by filter */
237 		schan->slave_id = -EINVAL;
238 	}
239 
240 	schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
241 			      sdev->desc_size, GFP_KERNEL);
242 	if (!schan->desc) {
243 		ret = -ENOMEM;
244 		goto edescalloc;
245 	}
246 	schan->desc_num = NR_DESCS_PER_CHANNEL;
247 
248 	for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
249 		desc = ops->embedded_desc(schan->desc, i);
250 		dma_async_tx_descriptor_init(&desc->async_tx,
251 					     &schan->dma_chan);
252 		desc->async_tx.tx_submit = shdma_tx_submit;
253 		desc->mark = DESC_IDLE;
254 
255 		list_add(&desc->node, &schan->ld_free);
256 	}
257 
258 	return NR_DESCS_PER_CHANNEL;
259 
260 edescalloc:
261 	if (slave)
262 esetslave:
263 		clear_bit(slave->slave_id, shdma_slave_used);
264 	chan->private = NULL;
265 	return ret;
266 }
267 
268 /*
269  * This is the standard shdma filter function to be used as a replacement to the
270  * "old" method, using the .private pointer.
271  * You always have to pass a valid slave id as the argument, old drivers that
272  * pass ERR_PTR(-EINVAL) as a filter parameter and set it up in dma_slave_config
273  * need to be updated so we can remove the slave_id field from dma_slave_config.
274  * parameter. If this filter is used, the slave driver, after calling
275  * dma_request_channel(), will also have to call dmaengine_slave_config() with
276  * .direction, and either .src_addr or .dst_addr set.
277  *
278  * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
279  * capability! If this becomes a requirement, hardware glue drivers, using this
280  * services would have to provide their own filters, which first would check
281  * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
282  * this, and only then, in case of a match, call this common filter.
283  * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate().
284  * In that case the MID-RID value is used for slave channel filtering and is
285  * passed to this function in the "arg" parameter.
286  */
shdma_chan_filter(struct dma_chan * chan,void * arg)287 bool shdma_chan_filter(struct dma_chan *chan, void *arg)
288 {
289 	struct shdma_chan *schan;
290 	struct shdma_dev *sdev;
291 	int slave_id = (long)arg;
292 	int ret;
293 
294 	/* Only support channels handled by this driver. */
295 	if (chan->device->device_alloc_chan_resources !=
296 	    shdma_alloc_chan_resources)
297 		return false;
298 
299 	schan = to_shdma_chan(chan);
300 	sdev = to_shdma_dev(chan->device);
301 
302 	/*
303 	 * For DT, the schan->slave_id field is generated by the
304 	 * set_slave function from the slave ID that is passed in
305 	 * from xlate. For the non-DT case, the slave ID is
306 	 * directly passed into the filter function by the driver
307 	 */
308 	if (schan->dev->of_node) {
309 		ret = sdev->ops->set_slave(schan, slave_id, 0, true);
310 		if (ret < 0)
311 			return false;
312 
313 		schan->real_slave_id = schan->slave_id;
314 		return true;
315 	}
316 
317 	if (slave_id < 0) {
318 		/* No slave requested - arbitrary channel */
319 		dev_warn(sdev->dma_dev.dev, "invalid slave ID passed to dma_request_slave\n");
320 		return true;
321 	}
322 
323 	if (slave_id >= slave_num)
324 		return false;
325 
326 	ret = sdev->ops->set_slave(schan, slave_id, 0, true);
327 	if (ret < 0)
328 		return false;
329 
330 	schan->real_slave_id = slave_id;
331 
332 	return true;
333 }
334 EXPORT_SYMBOL(shdma_chan_filter);
335 
__ld_cleanup(struct shdma_chan * schan,bool all)336 static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
337 {
338 	struct shdma_desc *desc, *_desc;
339 	/* Is the "exposed" head of a chain acked? */
340 	bool head_acked = false;
341 	dma_cookie_t cookie = 0;
342 	dma_async_tx_callback callback = NULL;
343 	struct dmaengine_desc_callback cb;
344 	unsigned long flags;
345 	LIST_HEAD(cyclic_list);
346 
347 	memset(&cb, 0, sizeof(cb));
348 	spin_lock_irqsave(&schan->chan_lock, flags);
349 	list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
350 		struct dma_async_tx_descriptor *tx = &desc->async_tx;
351 
352 		BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
353 		BUG_ON(desc->mark != DESC_SUBMITTED &&
354 		       desc->mark != DESC_COMPLETED &&
355 		       desc->mark != DESC_WAITING);
356 
357 		/*
358 		 * queue is ordered, and we use this loop to (1) clean up all
359 		 * completed descriptors, and to (2) update descriptor flags of
360 		 * any chunks in a (partially) completed chain
361 		 */
362 		if (!all && desc->mark == DESC_SUBMITTED &&
363 		    desc->cookie != cookie)
364 			break;
365 
366 		if (tx->cookie > 0)
367 			cookie = tx->cookie;
368 
369 		if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
370 			if (schan->dma_chan.completed_cookie != desc->cookie - 1)
371 				dev_dbg(schan->dev,
372 					"Completing cookie %d, expected %d\n",
373 					desc->cookie,
374 					schan->dma_chan.completed_cookie + 1);
375 			schan->dma_chan.completed_cookie = desc->cookie;
376 		}
377 
378 		/* Call callback on the last chunk */
379 		if (desc->mark == DESC_COMPLETED && tx->callback) {
380 			desc->mark = DESC_WAITING;
381 			dmaengine_desc_get_callback(tx, &cb);
382 			callback = tx->callback;
383 			dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
384 				tx->cookie, tx, schan->id);
385 			BUG_ON(desc->chunks != 1);
386 			break;
387 		}
388 
389 		if (tx->cookie > 0 || tx->cookie == -EBUSY) {
390 			if (desc->mark == DESC_COMPLETED) {
391 				BUG_ON(tx->cookie < 0);
392 				desc->mark = DESC_WAITING;
393 			}
394 			head_acked = async_tx_test_ack(tx);
395 		} else {
396 			switch (desc->mark) {
397 			case DESC_COMPLETED:
398 				desc->mark = DESC_WAITING;
399 				fallthrough;
400 			case DESC_WAITING:
401 				if (head_acked)
402 					async_tx_ack(&desc->async_tx);
403 			}
404 		}
405 
406 		dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
407 			tx, tx->cookie);
408 
409 		if (((desc->mark == DESC_COMPLETED ||
410 		      desc->mark == DESC_WAITING) &&
411 		     async_tx_test_ack(&desc->async_tx)) || all) {
412 
413 			if (all || !desc->cyclic) {
414 				/* Remove from ld_queue list */
415 				desc->mark = DESC_IDLE;
416 				list_move(&desc->node, &schan->ld_free);
417 			} else {
418 				/* reuse as cyclic */
419 				desc->mark = DESC_SUBMITTED;
420 				list_move_tail(&desc->node, &cyclic_list);
421 			}
422 
423 			if (list_empty(&schan->ld_queue)) {
424 				dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
425 				pm_runtime_put(schan->dev);
426 				schan->pm_state = SHDMA_PM_ESTABLISHED;
427 			} else if (schan->pm_state == SHDMA_PM_PENDING) {
428 				shdma_chan_xfer_ld_queue(schan);
429 			}
430 		}
431 	}
432 
433 	if (all && !callback)
434 		/*
435 		 * Terminating and the loop completed normally: forgive
436 		 * uncompleted cookies
437 		 */
438 		schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
439 
440 	list_splice_tail(&cyclic_list, &schan->ld_queue);
441 
442 	spin_unlock_irqrestore(&schan->chan_lock, flags);
443 
444 	dmaengine_desc_callback_invoke(&cb, NULL);
445 
446 	return callback;
447 }
448 
449 /*
450  * shdma_chan_ld_cleanup - Clean up link descriptors
451  *
452  * Clean up the ld_queue of DMA channel.
453  */
shdma_chan_ld_cleanup(struct shdma_chan * schan,bool all)454 static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
455 {
456 	while (__ld_cleanup(schan, all))
457 		;
458 }
459 
460 /*
461  * shdma_free_chan_resources - Free all resources of the channel.
462  */
shdma_free_chan_resources(struct dma_chan * chan)463 static void shdma_free_chan_resources(struct dma_chan *chan)
464 {
465 	struct shdma_chan *schan = to_shdma_chan(chan);
466 	struct shdma_dev *sdev = to_shdma_dev(chan->device);
467 	const struct shdma_ops *ops = sdev->ops;
468 	LIST_HEAD(list);
469 
470 	/* Protect against ISR */
471 	spin_lock_irq(&schan->chan_lock);
472 	ops->halt_channel(schan);
473 	spin_unlock_irq(&schan->chan_lock);
474 
475 	/* Now no new interrupts will occur */
476 
477 	/* Prepared and not submitted descriptors can still be on the queue */
478 	if (!list_empty(&schan->ld_queue))
479 		shdma_chan_ld_cleanup(schan, true);
480 
481 	if (schan->slave_id >= 0) {
482 		/* The caller is holding dma_list_mutex */
483 		clear_bit(schan->slave_id, shdma_slave_used);
484 		chan->private = NULL;
485 	}
486 
487 	schan->real_slave_id = 0;
488 
489 	spin_lock_irq(&schan->chan_lock);
490 
491 	list_splice_init(&schan->ld_free, &list);
492 	schan->desc_num = 0;
493 
494 	spin_unlock_irq(&schan->chan_lock);
495 
496 	kfree(schan->desc);
497 }
498 
499 /**
500  * shdma_add_desc - get, set up and return one transfer descriptor
501  * @schan:	DMA channel
502  * @flags:	DMA transfer flags
503  * @dst:	destination DMA address, incremented when direction equals
504  *		DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
505  * @src:	source DMA address, incremented when direction equals
506  *		DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
507  * @len:	DMA transfer length
508  * @first:	if NULL, set to the current descriptor and cookie set to -EBUSY
509  * @direction:	needed for slave DMA to decide which address to keep constant,
510  *		equals DMA_MEM_TO_MEM for MEMCPY
511  * Returns 0 or an error
512  * Locks: called with desc_lock held
513  */
shdma_add_desc(struct shdma_chan * schan,unsigned long flags,dma_addr_t * dst,dma_addr_t * src,size_t * len,struct shdma_desc ** first,enum dma_transfer_direction direction)514 static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
515 	unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len,
516 	struct shdma_desc **first, enum dma_transfer_direction direction)
517 {
518 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
519 	const struct shdma_ops *ops = sdev->ops;
520 	struct shdma_desc *new;
521 	size_t copy_size = *len;
522 
523 	if (!copy_size)
524 		return NULL;
525 
526 	/* Allocate the link descriptor from the free list */
527 	new = shdma_get_desc(schan);
528 	if (!new) {
529 		dev_err(schan->dev, "No free link descriptor available\n");
530 		return NULL;
531 	}
532 
533 	ops->desc_setup(schan, new, *src, *dst, &copy_size);
534 
535 	if (!*first) {
536 		/* First desc */
537 		new->async_tx.cookie = -EBUSY;
538 		*first = new;
539 	} else {
540 		/* Other desc - invisible to the user */
541 		new->async_tx.cookie = -EINVAL;
542 	}
543 
544 	dev_dbg(schan->dev,
545 		"chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n",
546 		copy_size, *len, src, dst, &new->async_tx,
547 		new->async_tx.cookie);
548 
549 	new->mark = DESC_PREPARED;
550 	new->async_tx.flags = flags;
551 	new->direction = direction;
552 	new->partial = 0;
553 
554 	*len -= copy_size;
555 	if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
556 		*src += copy_size;
557 	if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
558 		*dst += copy_size;
559 
560 	return new;
561 }
562 
563 /*
564  * shdma_prep_sg - prepare transfer descriptors from an SG list
565  *
566  * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
567  * converted to scatter-gather to guarantee consistent locking and a correct
568  * list manipulation. For slave DMA direction carries the usual meaning, and,
569  * logically, the SG list is RAM and the addr variable contains slave address,
570  * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
571  * and the SG list contains only one element and points at the source buffer.
572  */
shdma_prep_sg(struct shdma_chan * schan,struct scatterlist * sgl,unsigned int sg_len,dma_addr_t * addr,enum dma_transfer_direction direction,unsigned long flags,bool cyclic)573 static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
574 	struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
575 	enum dma_transfer_direction direction, unsigned long flags, bool cyclic)
576 {
577 	struct scatterlist *sg;
578 	struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
579 	LIST_HEAD(tx_list);
580 	int chunks;
581 	unsigned long irq_flags;
582 	int i;
583 
584 	chunks = sg_nents_for_dma(sgl, sg_len, schan->max_xfer_len);
585 
586 	/* Have to lock the whole loop to protect against concurrent release */
587 	spin_lock_irqsave(&schan->chan_lock, irq_flags);
588 
589 	/*
590 	 * Chaining:
591 	 * first descriptor is what user is dealing with in all API calls, its
592 	 *	cookie is at first set to -EBUSY, at tx-submit to a positive
593 	 *	number
594 	 * if more than one chunk is needed further chunks have cookie = -EINVAL
595 	 * the last chunk, if not equal to the first, has cookie = -ENOSPC
596 	 * all chunks are linked onto the tx_list head with their .node heads
597 	 *	only during this function, then they are immediately spliced
598 	 *	back onto the free list in form of a chain
599 	 */
600 	for_each_sg(sgl, sg, sg_len, i) {
601 		dma_addr_t sg_addr = sg_dma_address(sg);
602 		size_t len = sg_dma_len(sg);
603 
604 		if (!len)
605 			goto err_get_desc;
606 
607 		do {
608 			dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n",
609 				i, sg, len, &sg_addr);
610 
611 			if (direction == DMA_DEV_TO_MEM)
612 				new = shdma_add_desc(schan, flags,
613 						&sg_addr, addr, &len, &first,
614 						direction);
615 			else
616 				new = shdma_add_desc(schan, flags,
617 						addr, &sg_addr, &len, &first,
618 						direction);
619 			if (!new)
620 				goto err_get_desc;
621 
622 			new->cyclic = cyclic;
623 			if (cyclic)
624 				new->chunks = 1;
625 			else
626 				new->chunks = chunks--;
627 			list_add_tail(&new->node, &tx_list);
628 		} while (len);
629 	}
630 
631 	if (new != first)
632 		new->async_tx.cookie = -ENOSPC;
633 
634 	/* Put them back on the free list, so, they don't get lost */
635 	list_splice_tail(&tx_list, &schan->ld_free);
636 
637 	spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
638 
639 	return &first->async_tx;
640 
641 err_get_desc:
642 	list_for_each_entry(new, &tx_list, node)
643 		new->mark = DESC_IDLE;
644 	list_splice(&tx_list, &schan->ld_free);
645 
646 	spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
647 
648 	return NULL;
649 }
650 
shdma_prep_memcpy(struct dma_chan * chan,dma_addr_t dma_dest,dma_addr_t dma_src,size_t len,unsigned long flags)651 static struct dma_async_tx_descriptor *shdma_prep_memcpy(
652 	struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
653 	size_t len, unsigned long flags)
654 {
655 	struct shdma_chan *schan = to_shdma_chan(chan);
656 	struct scatterlist sg;
657 
658 	if (!chan || !len)
659 		return NULL;
660 
661 	BUG_ON(!schan->desc_num);
662 
663 	sg_init_table(&sg, 1);
664 	sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
665 		    offset_in_page(dma_src));
666 	sg_dma_address(&sg) = dma_src;
667 	sg_dma_len(&sg) = len;
668 
669 	return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
670 			     flags, false);
671 }
672 
shdma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)673 static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
674 	struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
675 	enum dma_transfer_direction direction, unsigned long flags, void *context)
676 {
677 	struct shdma_chan *schan = to_shdma_chan(chan);
678 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
679 	const struct shdma_ops *ops = sdev->ops;
680 	int slave_id = schan->slave_id;
681 	dma_addr_t slave_addr;
682 
683 	if (!chan)
684 		return NULL;
685 
686 	BUG_ON(!schan->desc_num);
687 
688 	/* Someone calling slave DMA on a generic channel? */
689 	if (slave_id < 0 || !sg_len) {
690 		dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
691 			 __func__, sg_len, slave_id);
692 		return NULL;
693 	}
694 
695 	slave_addr = ops->slave_addr(schan);
696 
697 	return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
698 			     direction, flags, false);
699 }
700 
701 #define SHDMA_MAX_SG_LEN 32
702 
shdma_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction direction,unsigned long flags)703 static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
704 	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
705 	size_t period_len, enum dma_transfer_direction direction,
706 	unsigned long flags)
707 {
708 	struct shdma_chan *schan = to_shdma_chan(chan);
709 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
710 	struct dma_async_tx_descriptor *desc;
711 	const struct shdma_ops *ops = sdev->ops;
712 	unsigned int sg_len = buf_len / period_len;
713 	int slave_id = schan->slave_id;
714 	dma_addr_t slave_addr;
715 	struct scatterlist *sgl;
716 	int i;
717 
718 	if (!chan)
719 		return NULL;
720 
721 	BUG_ON(!schan->desc_num);
722 
723 	if (sg_len > SHDMA_MAX_SG_LEN) {
724 		dev_err(schan->dev, "sg length %d exceeds limit %d",
725 				sg_len, SHDMA_MAX_SG_LEN);
726 		return NULL;
727 	}
728 
729 	/* Someone calling slave DMA on a generic channel? */
730 	if (slave_id < 0 || (buf_len < period_len)) {
731 		dev_warn(schan->dev,
732 			"%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
733 			__func__, buf_len, period_len, slave_id);
734 		return NULL;
735 	}
736 
737 	slave_addr = ops->slave_addr(schan);
738 
739 	/*
740 	 * Allocate the sg list dynamically as it would consume too much stack
741 	 * space.
742 	 */
743 	sgl = kmalloc_objs(*sgl, sg_len);
744 	if (!sgl)
745 		return NULL;
746 
747 	sg_init_table(sgl, sg_len);
748 
749 	for (i = 0; i < sg_len; i++) {
750 		dma_addr_t src = buf_addr + (period_len * i);
751 
752 		sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
753 			    offset_in_page(src));
754 		sg_dma_address(&sgl[i]) = src;
755 		sg_dma_len(&sgl[i]) = period_len;
756 	}
757 
758 	desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
759 			     direction, flags, true);
760 
761 	kfree(sgl);
762 	return desc;
763 }
764 
shdma_terminate_all(struct dma_chan * chan)765 static int shdma_terminate_all(struct dma_chan *chan)
766 {
767 	struct shdma_chan *schan = to_shdma_chan(chan);
768 	struct shdma_dev *sdev = to_shdma_dev(chan->device);
769 	const struct shdma_ops *ops = sdev->ops;
770 	unsigned long flags;
771 
772 	spin_lock_irqsave(&schan->chan_lock, flags);
773 	ops->halt_channel(schan);
774 
775 	if (ops->get_partial && !list_empty(&schan->ld_queue)) {
776 		/* Record partial transfer */
777 		struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
778 							   struct shdma_desc, node);
779 		desc->partial = ops->get_partial(schan, desc);
780 	}
781 
782 	spin_unlock_irqrestore(&schan->chan_lock, flags);
783 
784 	shdma_chan_ld_cleanup(schan, true);
785 
786 	return 0;
787 }
788 
shdma_config(struct dma_chan * chan,struct dma_slave_config * config)789 static int shdma_config(struct dma_chan *chan,
790 			struct dma_slave_config *config)
791 {
792 	struct shdma_chan *schan = to_shdma_chan(chan);
793 
794 	/*
795 	 * So far only .slave_id is used, but the slave drivers are
796 	 * encouraged to also set a transfer direction and an address.
797 	 */
798 	if (!config)
799 		return -EINVAL;
800 
801 	/*
802 	 * We could lock this, but you shouldn't be configuring the
803 	 * channel, while using it...
804 	 */
805 	return shdma_setup_slave(schan,
806 				 config->direction == DMA_DEV_TO_MEM ?
807 				 config->src_addr : config->dst_addr);
808 }
809 
shdma_issue_pending(struct dma_chan * chan)810 static void shdma_issue_pending(struct dma_chan *chan)
811 {
812 	struct shdma_chan *schan = to_shdma_chan(chan);
813 
814 	spin_lock_irq(&schan->chan_lock);
815 	if (schan->pm_state == SHDMA_PM_ESTABLISHED)
816 		shdma_chan_xfer_ld_queue(schan);
817 	else
818 		schan->pm_state = SHDMA_PM_PENDING;
819 	spin_unlock_irq(&schan->chan_lock);
820 }
821 
shdma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)822 static enum dma_status shdma_tx_status(struct dma_chan *chan,
823 					dma_cookie_t cookie,
824 					struct dma_tx_state *txstate)
825 {
826 	struct shdma_chan *schan = to_shdma_chan(chan);
827 	enum dma_status status;
828 	unsigned long flags;
829 
830 	shdma_chan_ld_cleanup(schan, false);
831 
832 	spin_lock_irqsave(&schan->chan_lock, flags);
833 
834 	status = dma_cookie_status(chan, cookie, txstate);
835 
836 	/*
837 	 * If we don't find cookie on the queue, it has been aborted and we have
838 	 * to report error
839 	 */
840 	if (status != DMA_COMPLETE) {
841 		struct shdma_desc *sdesc;
842 		status = DMA_ERROR;
843 		list_for_each_entry(sdesc, &schan->ld_queue, node)
844 			if (sdesc->cookie == cookie) {
845 				status = DMA_IN_PROGRESS;
846 				break;
847 			}
848 	}
849 
850 	spin_unlock_irqrestore(&schan->chan_lock, flags);
851 
852 	return status;
853 }
854 
855 /* Called from error IRQ or NMI */
shdma_reset(struct shdma_dev * sdev)856 bool shdma_reset(struct shdma_dev *sdev)
857 {
858 	const struct shdma_ops *ops = sdev->ops;
859 	struct shdma_chan *schan;
860 	unsigned int handled = 0;
861 	int i;
862 
863 	/* Reset all channels */
864 	shdma_for_each_chan(schan, sdev, i) {
865 		struct shdma_desc *sdesc;
866 		LIST_HEAD(dl);
867 
868 		if (!schan)
869 			continue;
870 
871 		spin_lock(&schan->chan_lock);
872 
873 		/* Stop the channel */
874 		ops->halt_channel(schan);
875 
876 		list_splice_init(&schan->ld_queue, &dl);
877 
878 		if (!list_empty(&dl)) {
879 			dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
880 			pm_runtime_put(schan->dev);
881 		}
882 		schan->pm_state = SHDMA_PM_ESTABLISHED;
883 
884 		spin_unlock(&schan->chan_lock);
885 
886 		/* Complete all  */
887 		list_for_each_entry(sdesc, &dl, node) {
888 			struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
889 
890 			sdesc->mark = DESC_IDLE;
891 			dmaengine_desc_get_callback_invoke(tx, NULL);
892 		}
893 
894 		spin_lock(&schan->chan_lock);
895 		list_splice(&dl, &schan->ld_free);
896 		spin_unlock(&schan->chan_lock);
897 
898 		handled++;
899 	}
900 
901 	return !!handled;
902 }
903 EXPORT_SYMBOL(shdma_reset);
904 
chan_irq(int irq,void * dev)905 static irqreturn_t chan_irq(int irq, void *dev)
906 {
907 	struct shdma_chan *schan = dev;
908 	const struct shdma_ops *ops =
909 		to_shdma_dev(schan->dma_chan.device)->ops;
910 	irqreturn_t ret;
911 
912 	spin_lock(&schan->chan_lock);
913 
914 	ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
915 
916 	spin_unlock(&schan->chan_lock);
917 
918 	return ret;
919 }
920 
chan_irqt(int irq,void * dev)921 static irqreturn_t chan_irqt(int irq, void *dev)
922 {
923 	struct shdma_chan *schan = dev;
924 	const struct shdma_ops *ops =
925 		to_shdma_dev(schan->dma_chan.device)->ops;
926 	struct shdma_desc *sdesc;
927 
928 	spin_lock_irq(&schan->chan_lock);
929 	list_for_each_entry(sdesc, &schan->ld_queue, node) {
930 		if (sdesc->mark == DESC_SUBMITTED &&
931 		    ops->desc_completed(schan, sdesc)) {
932 			dev_dbg(schan->dev, "done #%d@%p\n",
933 				sdesc->async_tx.cookie, &sdesc->async_tx);
934 			sdesc->mark = DESC_COMPLETED;
935 			break;
936 		}
937 	}
938 	/* Next desc */
939 	shdma_chan_xfer_ld_queue(schan);
940 	spin_unlock_irq(&schan->chan_lock);
941 
942 	shdma_chan_ld_cleanup(schan, false);
943 
944 	return IRQ_HANDLED;
945 }
946 
shdma_request_irq(struct shdma_chan * schan,int irq,unsigned long flags,const char * name)947 int shdma_request_irq(struct shdma_chan *schan, int irq,
948 			   unsigned long flags, const char *name)
949 {
950 	int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
951 					    chan_irqt, flags, name, schan);
952 
953 	schan->irq = ret < 0 ? ret : irq;
954 
955 	return ret;
956 }
957 EXPORT_SYMBOL(shdma_request_irq);
958 
shdma_chan_probe(struct shdma_dev * sdev,struct shdma_chan * schan,int id)959 void shdma_chan_probe(struct shdma_dev *sdev,
960 			   struct shdma_chan *schan, int id)
961 {
962 	schan->pm_state = SHDMA_PM_ESTABLISHED;
963 
964 	/* reference struct dma_device */
965 	schan->dma_chan.device = &sdev->dma_dev;
966 	dma_cookie_init(&schan->dma_chan);
967 
968 	schan->dev = sdev->dma_dev.dev;
969 	schan->id = id;
970 
971 	if (!schan->max_xfer_len)
972 		schan->max_xfer_len = PAGE_SIZE;
973 
974 	spin_lock_init(&schan->chan_lock);
975 
976 	/* Init descriptor manage list */
977 	INIT_LIST_HEAD(&schan->ld_queue);
978 	INIT_LIST_HEAD(&schan->ld_free);
979 
980 	/* Add the channel to DMA device channel list */
981 	list_add_tail(&schan->dma_chan.device_node,
982 			&sdev->dma_dev.channels);
983 	sdev->schan[id] = schan;
984 }
985 EXPORT_SYMBOL(shdma_chan_probe);
986 
shdma_chan_remove(struct shdma_chan * schan)987 void shdma_chan_remove(struct shdma_chan *schan)
988 {
989 	list_del(&schan->dma_chan.device_node);
990 }
991 EXPORT_SYMBOL(shdma_chan_remove);
992 
shdma_init(struct device * dev,struct shdma_dev * sdev,int chan_num)993 int shdma_init(struct device *dev, struct shdma_dev *sdev,
994 		    int chan_num)
995 {
996 	struct dma_device *dma_dev = &sdev->dma_dev;
997 
998 	/*
999 	 * Require all call-backs for now, they can trivially be made optional
1000 	 * later as required
1001 	 */
1002 	if (!sdev->ops ||
1003 	    !sdev->desc_size ||
1004 	    !sdev->ops->embedded_desc ||
1005 	    !sdev->ops->start_xfer ||
1006 	    !sdev->ops->setup_xfer ||
1007 	    !sdev->ops->set_slave ||
1008 	    !sdev->ops->desc_setup ||
1009 	    !sdev->ops->slave_addr ||
1010 	    !sdev->ops->channel_busy ||
1011 	    !sdev->ops->halt_channel ||
1012 	    !sdev->ops->desc_completed)
1013 		return -EINVAL;
1014 
1015 	sdev->schan = kzalloc_objs(*sdev->schan, chan_num);
1016 	if (!sdev->schan)
1017 		return -ENOMEM;
1018 
1019 	INIT_LIST_HEAD(&dma_dev->channels);
1020 
1021 	/* Common and MEMCPY operations */
1022 	dma_dev->device_alloc_chan_resources
1023 		= shdma_alloc_chan_resources;
1024 	dma_dev->device_free_chan_resources = shdma_free_chan_resources;
1025 	dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy;
1026 	dma_dev->device_tx_status = shdma_tx_status;
1027 	dma_dev->device_issue_pending = shdma_issue_pending;
1028 
1029 	/* Compulsory for DMA_SLAVE fields */
1030 	dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
1031 	dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
1032 	dma_dev->device_config = shdma_config;
1033 	dma_dev->device_terminate_all = shdma_terminate_all;
1034 
1035 	dma_dev->dev = dev;
1036 
1037 	return 0;
1038 }
1039 EXPORT_SYMBOL(shdma_init);
1040 
shdma_cleanup(struct shdma_dev * sdev)1041 void shdma_cleanup(struct shdma_dev *sdev)
1042 {
1043 	kfree(sdev->schan);
1044 }
1045 EXPORT_SYMBOL(shdma_cleanup);
1046 
shdma_enter(void)1047 static int __init shdma_enter(void)
1048 {
1049 	shdma_slave_used = bitmap_zalloc(slave_num, GFP_KERNEL);
1050 	if (!shdma_slave_used)
1051 		return -ENOMEM;
1052 	return 0;
1053 }
1054 module_init(shdma_enter);
1055 
shdma_exit(void)1056 static void __exit shdma_exit(void)
1057 {
1058 	bitmap_free(shdma_slave_used);
1059 }
1060 module_exit(shdma_exit);
1061 
1062 MODULE_DESCRIPTION("SH-DMA driver base library");
1063 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
1064