xref: /linux/drivers/dma/sh/shdma-base.c (revision e4c0fdd5af4c590ca07880b97e286c6532437658)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
4  *
5  * extracted from shdma.c
6  *
7  * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
8  * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
9  * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
10  * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
11  */
12 
13 #include <linux/delay.h>
14 #include <linux/shdma-base.h>
15 #include <linux/dmaengine.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 
23 #include "../dmaengine.h"
24 
25 /* DMA descriptor control */
26 enum shdma_desc_status {
27 	DESC_IDLE,
28 	DESC_PREPARED,
29 	DESC_SUBMITTED,
30 	DESC_COMPLETED,	/* completed, have to call callback */
31 	DESC_WAITING,	/* callback called, waiting for ack / re-submit */
32 };
33 
34 #define NR_DESCS_PER_CHANNEL 32
35 
36 #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
37 #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
38 
39 /*
40  * For slave DMA we assume, that there is a finite number of DMA slaves in the
41  * system, and that each such slave can only use a finite number of channels.
42  * We use slave channel IDs to make sure, that no such slave channel ID is
43  * allocated more than once.
44  */
45 static unsigned int slave_num = 256;
46 module_param(slave_num, uint, 0444);
47 
48 /* A bitmask with slave_num bits */
49 static unsigned long *shdma_slave_used;
50 
51 /* Called under spin_lock_irq(&schan->chan_lock") */
shdma_chan_xfer_ld_queue(struct shdma_chan * schan)52 static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
53 {
54 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
55 	const struct shdma_ops *ops = sdev->ops;
56 	struct shdma_desc *sdesc;
57 
58 	/* DMA work check */
59 	if (ops->channel_busy(schan))
60 		return;
61 
62 	/* Find the first not transferred descriptor */
63 	list_for_each_entry(sdesc, &schan->ld_queue, node)
64 		if (sdesc->mark == DESC_SUBMITTED) {
65 			ops->start_xfer(schan, sdesc);
66 			break;
67 		}
68 }
69 
shdma_tx_submit(struct dma_async_tx_descriptor * tx)70 static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
71 {
72 	struct shdma_desc *chunk, *c, *desc =
73 		container_of(tx, struct shdma_desc, async_tx);
74 	struct shdma_chan *schan = to_shdma_chan(tx->chan);
75 	dma_async_tx_callback callback = tx->callback;
76 	dma_cookie_t cookie;
77 	bool power_up;
78 
79 	spin_lock_irq(&schan->chan_lock);
80 
81 	power_up = list_empty(&schan->ld_queue);
82 
83 	cookie = dma_cookie_assign(tx);
84 
85 	/* Mark all chunks of this descriptor as submitted, move to the queue */
86 	list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
87 		/*
88 		 * All chunks are on the global ld_free, so, we have to find
89 		 * the end of the chain ourselves
90 		 */
91 		if (chunk != desc && (chunk->mark == DESC_IDLE ||
92 				      chunk->async_tx.cookie > 0 ||
93 				      chunk->async_tx.cookie == -EBUSY ||
94 				      &chunk->node == &schan->ld_free))
95 			break;
96 		chunk->mark = DESC_SUBMITTED;
97 		if (chunk->chunks == 1) {
98 			chunk->async_tx.callback = callback;
99 			chunk->async_tx.callback_param = tx->callback_param;
100 		} else {
101 			/* Callback goes to the last chunk */
102 			chunk->async_tx.callback = NULL;
103 		}
104 		chunk->cookie = cookie;
105 		list_move_tail(&chunk->node, &schan->ld_queue);
106 
107 		dev_dbg(schan->dev, "submit #%d@%p on %d\n",
108 			tx->cookie, &chunk->async_tx, schan->id);
109 	}
110 
111 	if (power_up) {
112 		int ret;
113 		schan->pm_state = SHDMA_PM_BUSY;
114 
115 		ret = pm_runtime_get(schan->dev);
116 
117 		spin_unlock_irq(&schan->chan_lock);
118 		if (ret < 0)
119 			dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
120 
121 		pm_runtime_barrier(schan->dev);
122 
123 		spin_lock_irq(&schan->chan_lock);
124 
125 		/* Have we been reset, while waiting? */
126 		if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
127 			struct shdma_dev *sdev =
128 				to_shdma_dev(schan->dma_chan.device);
129 			const struct shdma_ops *ops = sdev->ops;
130 			dev_dbg(schan->dev, "Bring up channel %d\n",
131 				schan->id);
132 
133 			ret = ops->setup_xfer(schan, schan->slave_id);
134 			if (ret < 0) {
135 				dev_err(schan->dev, "setup_xfer failed: %d\n", ret);
136 
137 				/* Remove chunks from the queue and mark them as idle */
138 				list_for_each_entry_safe(chunk, c, &schan->ld_queue, node) {
139 					if (chunk->cookie == cookie) {
140 						chunk->mark = DESC_IDLE;
141 						list_move(&chunk->node, &schan->ld_free);
142 					}
143 				}
144 
145 				schan->pm_state = SHDMA_PM_ESTABLISHED;
146 				ret = pm_runtime_put(schan->dev);
147 
148 				spin_unlock_irq(&schan->chan_lock);
149 				return ret;
150 			}
151 
152 			if (schan->pm_state == SHDMA_PM_PENDING)
153 				shdma_chan_xfer_ld_queue(schan);
154 			schan->pm_state = SHDMA_PM_ESTABLISHED;
155 		}
156 	} else {
157 		/*
158 		 * Tell .device_issue_pending() not to run the queue, interrupts
159 		 * will do it anyway
160 		 */
161 		schan->pm_state = SHDMA_PM_PENDING;
162 	}
163 
164 	spin_unlock_irq(&schan->chan_lock);
165 
166 	return cookie;
167 }
168 
169 /* Called with desc_lock held */
shdma_get_desc(struct shdma_chan * schan)170 static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
171 {
172 	struct shdma_desc *sdesc;
173 
174 	list_for_each_entry(sdesc, &schan->ld_free, node)
175 		if (sdesc->mark != DESC_PREPARED) {
176 			BUG_ON(sdesc->mark != DESC_IDLE);
177 			list_del(&sdesc->node);
178 			return sdesc;
179 		}
180 
181 	return NULL;
182 }
183 
shdma_setup_slave(struct shdma_chan * schan,dma_addr_t slave_addr)184 static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr)
185 {
186 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
187 	const struct shdma_ops *ops = sdev->ops;
188 	int ret, match;
189 
190 	if (schan->dev->of_node) {
191 		match = schan->hw_req;
192 		ret = ops->set_slave(schan, match, slave_addr, true);
193 		if (ret < 0)
194 			return ret;
195 	} else {
196 		match = schan->real_slave_id;
197 	}
198 
199 	if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num)
200 		return -EINVAL;
201 
202 	if (test_and_set_bit(schan->real_slave_id, shdma_slave_used))
203 		return -EBUSY;
204 
205 	ret = ops->set_slave(schan, match, slave_addr, false);
206 	if (ret < 0) {
207 		clear_bit(schan->real_slave_id, shdma_slave_used);
208 		return ret;
209 	}
210 
211 	schan->slave_id = schan->real_slave_id;
212 
213 	return 0;
214 }
215 
shdma_alloc_chan_resources(struct dma_chan * chan)216 static int shdma_alloc_chan_resources(struct dma_chan *chan)
217 {
218 	struct shdma_chan *schan = to_shdma_chan(chan);
219 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
220 	const struct shdma_ops *ops = sdev->ops;
221 	struct shdma_desc *desc;
222 	struct shdma_slave *slave = chan->private;
223 	int ret, i;
224 
225 	/*
226 	 * This relies on the guarantee from dmaengine that alloc_chan_resources
227 	 * never runs concurrently with itself or free_chan_resources.
228 	 */
229 	if (slave) {
230 		/* Legacy mode: .private is set in filter */
231 		schan->real_slave_id = slave->slave_id;
232 		ret = shdma_setup_slave(schan, 0);
233 		if (ret < 0)
234 			goto esetslave;
235 	} else {
236 		/* Normal mode: real_slave_id was set by filter */
237 		schan->slave_id = -EINVAL;
238 	}
239 
240 	schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
241 			      sdev->desc_size, GFP_KERNEL);
242 	if (!schan->desc) {
243 		ret = -ENOMEM;
244 		goto edescalloc;
245 	}
246 	schan->desc_num = NR_DESCS_PER_CHANNEL;
247 
248 	for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
249 		desc = ops->embedded_desc(schan->desc, i);
250 		dma_async_tx_descriptor_init(&desc->async_tx,
251 					     &schan->dma_chan);
252 		desc->async_tx.tx_submit = shdma_tx_submit;
253 		desc->mark = DESC_IDLE;
254 
255 		list_add(&desc->node, &schan->ld_free);
256 	}
257 
258 	return NR_DESCS_PER_CHANNEL;
259 
260 edescalloc:
261 	if (slave)
262 esetslave:
263 		clear_bit(slave->slave_id, shdma_slave_used);
264 	chan->private = NULL;
265 	return ret;
266 }
267 
268 /*
269  * This is the standard shdma filter function to be used as a replacement to the
270  * "old" method, using the .private pointer.
271  * You always have to pass a valid slave id as the argument, old drivers that
272  * pass ERR_PTR(-EINVAL) as a filter parameter and set it up in dma_slave_config
273  * need to be updated so we can remove the slave_id field from dma_slave_config.
274  * parameter. If this filter is used, the slave driver, after calling
275  * dma_request_channel(), will also have to call dmaengine_slave_config() with
276  * .direction, and either .src_addr or .dst_addr set.
277  *
278  * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
279  * capability! If this becomes a requirement, hardware glue drivers, using this
280  * services would have to provide their own filters, which first would check
281  * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
282  * this, and only then, in case of a match, call this common filter.
283  * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate().
284  * In that case the MID-RID value is used for slave channel filtering and is
285  * passed to this function in the "arg" parameter.
286  */
shdma_chan_filter(struct dma_chan * chan,void * arg)287 bool shdma_chan_filter(struct dma_chan *chan, void *arg)
288 {
289 	struct shdma_chan *schan;
290 	struct shdma_dev *sdev;
291 	int slave_id = (long)arg;
292 	int ret;
293 
294 	/* Only support channels handled by this driver. */
295 	if (chan->device->device_alloc_chan_resources !=
296 	    shdma_alloc_chan_resources)
297 		return false;
298 
299 	schan = to_shdma_chan(chan);
300 	sdev = to_shdma_dev(chan->device);
301 
302 	/*
303 	 * For DT, the schan->slave_id field is generated by the
304 	 * set_slave function from the slave ID that is passed in
305 	 * from xlate. For the non-DT case, the slave ID is
306 	 * directly passed into the filter function by the driver
307 	 */
308 	if (schan->dev->of_node) {
309 		ret = sdev->ops->set_slave(schan, slave_id, 0, true);
310 		if (ret < 0)
311 			return false;
312 
313 		schan->real_slave_id = schan->slave_id;
314 		return true;
315 	}
316 
317 	if (slave_id < 0) {
318 		/* No slave requested - arbitrary channel */
319 		dev_warn(sdev->dma_dev.dev, "invalid slave ID passed to dma_request_slave\n");
320 		return true;
321 	}
322 
323 	if (slave_id >= slave_num)
324 		return false;
325 
326 	ret = sdev->ops->set_slave(schan, slave_id, 0, true);
327 	if (ret < 0)
328 		return false;
329 
330 	schan->real_slave_id = slave_id;
331 
332 	return true;
333 }
334 EXPORT_SYMBOL(shdma_chan_filter);
335 
__ld_cleanup(struct shdma_chan * schan,bool all)336 static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
337 {
338 	struct shdma_desc *desc, *_desc;
339 	/* Is the "exposed" head of a chain acked? */
340 	bool head_acked = false;
341 	dma_cookie_t cookie = 0;
342 	dma_async_tx_callback callback = NULL;
343 	struct dmaengine_desc_callback cb;
344 	unsigned long flags;
345 	LIST_HEAD(cyclic_list);
346 
347 	memset(&cb, 0, sizeof(cb));
348 	spin_lock_irqsave(&schan->chan_lock, flags);
349 	list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
350 		struct dma_async_tx_descriptor *tx = &desc->async_tx;
351 
352 		BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
353 		BUG_ON(desc->mark != DESC_SUBMITTED &&
354 		       desc->mark != DESC_COMPLETED &&
355 		       desc->mark != DESC_WAITING);
356 
357 		/*
358 		 * queue is ordered, and we use this loop to (1) clean up all
359 		 * completed descriptors, and to (2) update descriptor flags of
360 		 * any chunks in a (partially) completed chain
361 		 */
362 		if (!all && desc->mark == DESC_SUBMITTED &&
363 		    desc->cookie != cookie)
364 			break;
365 
366 		if (tx->cookie > 0)
367 			cookie = tx->cookie;
368 
369 		if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
370 			if (schan->dma_chan.completed_cookie != desc->cookie - 1)
371 				dev_dbg(schan->dev,
372 					"Completing cookie %d, expected %d\n",
373 					desc->cookie,
374 					schan->dma_chan.completed_cookie + 1);
375 			schan->dma_chan.completed_cookie = desc->cookie;
376 		}
377 
378 		/* Call callback on the last chunk */
379 		if (desc->mark == DESC_COMPLETED && tx->callback) {
380 			desc->mark = DESC_WAITING;
381 			dmaengine_desc_get_callback(tx, &cb);
382 			callback = tx->callback;
383 			dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
384 				tx->cookie, tx, schan->id);
385 			BUG_ON(desc->chunks != 1);
386 			break;
387 		}
388 
389 		if (tx->cookie > 0 || tx->cookie == -EBUSY) {
390 			if (desc->mark == DESC_COMPLETED) {
391 				BUG_ON(tx->cookie < 0);
392 				desc->mark = DESC_WAITING;
393 			}
394 			head_acked = async_tx_test_ack(tx);
395 		} else {
396 			switch (desc->mark) {
397 			case DESC_COMPLETED:
398 				desc->mark = DESC_WAITING;
399 				fallthrough;
400 			case DESC_WAITING:
401 				if (head_acked)
402 					async_tx_ack(&desc->async_tx);
403 			}
404 		}
405 
406 		dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
407 			tx, tx->cookie);
408 
409 		if (((desc->mark == DESC_COMPLETED ||
410 		      desc->mark == DESC_WAITING) &&
411 		     async_tx_test_ack(&desc->async_tx)) || all) {
412 
413 			if (all || !desc->cyclic) {
414 				/* Remove from ld_queue list */
415 				desc->mark = DESC_IDLE;
416 				list_move(&desc->node, &schan->ld_free);
417 			} else {
418 				/* reuse as cyclic */
419 				desc->mark = DESC_SUBMITTED;
420 				list_move_tail(&desc->node, &cyclic_list);
421 			}
422 
423 			if (list_empty(&schan->ld_queue)) {
424 				dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
425 				pm_runtime_put(schan->dev);
426 				schan->pm_state = SHDMA_PM_ESTABLISHED;
427 			} else if (schan->pm_state == SHDMA_PM_PENDING) {
428 				shdma_chan_xfer_ld_queue(schan);
429 			}
430 		}
431 	}
432 
433 	if (all && !callback)
434 		/*
435 		 * Terminating and the loop completed normally: forgive
436 		 * uncompleted cookies
437 		 */
438 		schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
439 
440 	list_splice_tail(&cyclic_list, &schan->ld_queue);
441 
442 	spin_unlock_irqrestore(&schan->chan_lock, flags);
443 
444 	dmaengine_desc_callback_invoke(&cb, NULL);
445 
446 	return callback;
447 }
448 
449 /*
450  * shdma_chan_ld_cleanup - Clean up link descriptors
451  *
452  * Clean up the ld_queue of DMA channel.
453  */
shdma_chan_ld_cleanup(struct shdma_chan * schan,bool all)454 static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
455 {
456 	while (__ld_cleanup(schan, all))
457 		;
458 }
459 
460 /*
461  * shdma_free_chan_resources - Free all resources of the channel.
462  */
shdma_free_chan_resources(struct dma_chan * chan)463 static void shdma_free_chan_resources(struct dma_chan *chan)
464 {
465 	struct shdma_chan *schan = to_shdma_chan(chan);
466 	struct shdma_dev *sdev = to_shdma_dev(chan->device);
467 	const struct shdma_ops *ops = sdev->ops;
468 	LIST_HEAD(list);
469 
470 	/* Protect against ISR */
471 	spin_lock_irq(&schan->chan_lock);
472 	ops->halt_channel(schan);
473 	spin_unlock_irq(&schan->chan_lock);
474 
475 	/* Now no new interrupts will occur */
476 
477 	/* Prepared and not submitted descriptors can still be on the queue */
478 	if (!list_empty(&schan->ld_queue))
479 		shdma_chan_ld_cleanup(schan, true);
480 
481 	if (schan->slave_id >= 0) {
482 		/* The caller is holding dma_list_mutex */
483 		clear_bit(schan->slave_id, shdma_slave_used);
484 		chan->private = NULL;
485 	}
486 
487 	schan->real_slave_id = 0;
488 
489 	spin_lock_irq(&schan->chan_lock);
490 
491 	list_splice_init(&schan->ld_free, &list);
492 	schan->desc_num = 0;
493 
494 	spin_unlock_irq(&schan->chan_lock);
495 
496 	kfree(schan->desc);
497 }
498 
499 /**
500  * shdma_add_desc - get, set up and return one transfer descriptor
501  * @schan:	DMA channel
502  * @flags:	DMA transfer flags
503  * @dst:	destination DMA address, incremented when direction equals
504  *		DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
505  * @src:	source DMA address, incremented when direction equals
506  *		DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
507  * @len:	DMA transfer length
508  * @first:	if NULL, set to the current descriptor and cookie set to -EBUSY
509  * @direction:	needed for slave DMA to decide which address to keep constant,
510  *		equals DMA_MEM_TO_MEM for MEMCPY
511  * Returns 0 or an error
512  * Locks: called with desc_lock held
513  */
shdma_add_desc(struct shdma_chan * schan,unsigned long flags,dma_addr_t * dst,dma_addr_t * src,size_t * len,struct shdma_desc ** first,enum dma_transfer_direction direction)514 static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
515 	unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len,
516 	struct shdma_desc **first, enum dma_transfer_direction direction)
517 {
518 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
519 	const struct shdma_ops *ops = sdev->ops;
520 	struct shdma_desc *new;
521 	size_t copy_size = *len;
522 
523 	if (!copy_size)
524 		return NULL;
525 
526 	/* Allocate the link descriptor from the free list */
527 	new = shdma_get_desc(schan);
528 	if (!new) {
529 		dev_err(schan->dev, "No free link descriptor available\n");
530 		return NULL;
531 	}
532 
533 	ops->desc_setup(schan, new, *src, *dst, &copy_size);
534 
535 	if (!*first) {
536 		/* First desc */
537 		new->async_tx.cookie = -EBUSY;
538 		*first = new;
539 	} else {
540 		/* Other desc - invisible to the user */
541 		new->async_tx.cookie = -EINVAL;
542 	}
543 
544 	dev_dbg(schan->dev,
545 		"chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n",
546 		copy_size, *len, src, dst, &new->async_tx,
547 		new->async_tx.cookie);
548 
549 	new->mark = DESC_PREPARED;
550 	new->async_tx.flags = flags;
551 	new->direction = direction;
552 	new->partial = 0;
553 
554 	*len -= copy_size;
555 	if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
556 		*src += copy_size;
557 	if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
558 		*dst += copy_size;
559 
560 	return new;
561 }
562 
563 /*
564  * shdma_prep_sg - prepare transfer descriptors from an SG list
565  *
566  * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
567  * converted to scatter-gather to guarantee consistent locking and a correct
568  * list manipulation. For slave DMA direction carries the usual meaning, and,
569  * logically, the SG list is RAM and the addr variable contains slave address,
570  * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
571  * and the SG list contains only one element and points at the source buffer.
572  */
shdma_prep_sg(struct shdma_chan * schan,struct scatterlist * sgl,unsigned int sg_len,dma_addr_t * addr,enum dma_transfer_direction direction,unsigned long flags,bool cyclic)573 static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
574 	struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
575 	enum dma_transfer_direction direction, unsigned long flags, bool cyclic)
576 {
577 	struct scatterlist *sg;
578 	struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
579 	LIST_HEAD(tx_list);
580 	int chunks = 0;
581 	unsigned long irq_flags;
582 	int i;
583 
584 	for_each_sg(sgl, sg, sg_len, i)
585 		chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
586 
587 	/* Have to lock the whole loop to protect against concurrent release */
588 	spin_lock_irqsave(&schan->chan_lock, irq_flags);
589 
590 	/*
591 	 * Chaining:
592 	 * first descriptor is what user is dealing with in all API calls, its
593 	 *	cookie is at first set to -EBUSY, at tx-submit to a positive
594 	 *	number
595 	 * if more than one chunk is needed further chunks have cookie = -EINVAL
596 	 * the last chunk, if not equal to the first, has cookie = -ENOSPC
597 	 * all chunks are linked onto the tx_list head with their .node heads
598 	 *	only during this function, then they are immediately spliced
599 	 *	back onto the free list in form of a chain
600 	 */
601 	for_each_sg(sgl, sg, sg_len, i) {
602 		dma_addr_t sg_addr = sg_dma_address(sg);
603 		size_t len = sg_dma_len(sg);
604 
605 		if (!len)
606 			goto err_get_desc;
607 
608 		do {
609 			dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n",
610 				i, sg, len, &sg_addr);
611 
612 			if (direction == DMA_DEV_TO_MEM)
613 				new = shdma_add_desc(schan, flags,
614 						&sg_addr, addr, &len, &first,
615 						direction);
616 			else
617 				new = shdma_add_desc(schan, flags,
618 						addr, &sg_addr, &len, &first,
619 						direction);
620 			if (!new)
621 				goto err_get_desc;
622 
623 			new->cyclic = cyclic;
624 			if (cyclic)
625 				new->chunks = 1;
626 			else
627 				new->chunks = chunks--;
628 			list_add_tail(&new->node, &tx_list);
629 		} while (len);
630 	}
631 
632 	if (new != first)
633 		new->async_tx.cookie = -ENOSPC;
634 
635 	/* Put them back on the free list, so, they don't get lost */
636 	list_splice_tail(&tx_list, &schan->ld_free);
637 
638 	spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
639 
640 	return &first->async_tx;
641 
642 err_get_desc:
643 	list_for_each_entry(new, &tx_list, node)
644 		new->mark = DESC_IDLE;
645 	list_splice(&tx_list, &schan->ld_free);
646 
647 	spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
648 
649 	return NULL;
650 }
651 
shdma_prep_memcpy(struct dma_chan * chan,dma_addr_t dma_dest,dma_addr_t dma_src,size_t len,unsigned long flags)652 static struct dma_async_tx_descriptor *shdma_prep_memcpy(
653 	struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
654 	size_t len, unsigned long flags)
655 {
656 	struct shdma_chan *schan = to_shdma_chan(chan);
657 	struct scatterlist sg;
658 
659 	if (!chan || !len)
660 		return NULL;
661 
662 	BUG_ON(!schan->desc_num);
663 
664 	sg_init_table(&sg, 1);
665 	sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
666 		    offset_in_page(dma_src));
667 	sg_dma_address(&sg) = dma_src;
668 	sg_dma_len(&sg) = len;
669 
670 	return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
671 			     flags, false);
672 }
673 
shdma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)674 static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
675 	struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
676 	enum dma_transfer_direction direction, unsigned long flags, void *context)
677 {
678 	struct shdma_chan *schan = to_shdma_chan(chan);
679 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
680 	const struct shdma_ops *ops = sdev->ops;
681 	int slave_id = schan->slave_id;
682 	dma_addr_t slave_addr;
683 
684 	if (!chan)
685 		return NULL;
686 
687 	BUG_ON(!schan->desc_num);
688 
689 	/* Someone calling slave DMA on a generic channel? */
690 	if (slave_id < 0 || !sg_len) {
691 		dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
692 			 __func__, sg_len, slave_id);
693 		return NULL;
694 	}
695 
696 	slave_addr = ops->slave_addr(schan);
697 
698 	return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
699 			     direction, flags, false);
700 }
701 
702 #define SHDMA_MAX_SG_LEN 32
703 
shdma_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t buf_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction direction,unsigned long flags)704 static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
705 	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
706 	size_t period_len, enum dma_transfer_direction direction,
707 	unsigned long flags)
708 {
709 	struct shdma_chan *schan = to_shdma_chan(chan);
710 	struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
711 	struct dma_async_tx_descriptor *desc;
712 	const struct shdma_ops *ops = sdev->ops;
713 	unsigned int sg_len = buf_len / period_len;
714 	int slave_id = schan->slave_id;
715 	dma_addr_t slave_addr;
716 	struct scatterlist *sgl;
717 	int i;
718 
719 	if (!chan)
720 		return NULL;
721 
722 	BUG_ON(!schan->desc_num);
723 
724 	if (sg_len > SHDMA_MAX_SG_LEN) {
725 		dev_err(schan->dev, "sg length %d exceeds limit %d",
726 				sg_len, SHDMA_MAX_SG_LEN);
727 		return NULL;
728 	}
729 
730 	/* Someone calling slave DMA on a generic channel? */
731 	if (slave_id < 0 || (buf_len < period_len)) {
732 		dev_warn(schan->dev,
733 			"%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
734 			__func__, buf_len, period_len, slave_id);
735 		return NULL;
736 	}
737 
738 	slave_addr = ops->slave_addr(schan);
739 
740 	/*
741 	 * Allocate the sg list dynamically as it would consume too much stack
742 	 * space.
743 	 */
744 	sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_KERNEL);
745 	if (!sgl)
746 		return NULL;
747 
748 	sg_init_table(sgl, sg_len);
749 
750 	for (i = 0; i < sg_len; i++) {
751 		dma_addr_t src = buf_addr + (period_len * i);
752 
753 		sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
754 			    offset_in_page(src));
755 		sg_dma_address(&sgl[i]) = src;
756 		sg_dma_len(&sgl[i]) = period_len;
757 	}
758 
759 	desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
760 			     direction, flags, true);
761 
762 	kfree(sgl);
763 	return desc;
764 }
765 
shdma_terminate_all(struct dma_chan * chan)766 static int shdma_terminate_all(struct dma_chan *chan)
767 {
768 	struct shdma_chan *schan = to_shdma_chan(chan);
769 	struct shdma_dev *sdev = to_shdma_dev(chan->device);
770 	const struct shdma_ops *ops = sdev->ops;
771 	unsigned long flags;
772 
773 	spin_lock_irqsave(&schan->chan_lock, flags);
774 	ops->halt_channel(schan);
775 
776 	if (ops->get_partial && !list_empty(&schan->ld_queue)) {
777 		/* Record partial transfer */
778 		struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
779 							   struct shdma_desc, node);
780 		desc->partial = ops->get_partial(schan, desc);
781 	}
782 
783 	spin_unlock_irqrestore(&schan->chan_lock, flags);
784 
785 	shdma_chan_ld_cleanup(schan, true);
786 
787 	return 0;
788 }
789 
shdma_config(struct dma_chan * chan,struct dma_slave_config * config)790 static int shdma_config(struct dma_chan *chan,
791 			struct dma_slave_config *config)
792 {
793 	struct shdma_chan *schan = to_shdma_chan(chan);
794 
795 	/*
796 	 * So far only .slave_id is used, but the slave drivers are
797 	 * encouraged to also set a transfer direction and an address.
798 	 */
799 	if (!config)
800 		return -EINVAL;
801 
802 	/*
803 	 * We could lock this, but you shouldn't be configuring the
804 	 * channel, while using it...
805 	 */
806 	return shdma_setup_slave(schan,
807 				 config->direction == DMA_DEV_TO_MEM ?
808 				 config->src_addr : config->dst_addr);
809 }
810 
shdma_issue_pending(struct dma_chan * chan)811 static void shdma_issue_pending(struct dma_chan *chan)
812 {
813 	struct shdma_chan *schan = to_shdma_chan(chan);
814 
815 	spin_lock_irq(&schan->chan_lock);
816 	if (schan->pm_state == SHDMA_PM_ESTABLISHED)
817 		shdma_chan_xfer_ld_queue(schan);
818 	else
819 		schan->pm_state = SHDMA_PM_PENDING;
820 	spin_unlock_irq(&schan->chan_lock);
821 }
822 
shdma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)823 static enum dma_status shdma_tx_status(struct dma_chan *chan,
824 					dma_cookie_t cookie,
825 					struct dma_tx_state *txstate)
826 {
827 	struct shdma_chan *schan = to_shdma_chan(chan);
828 	enum dma_status status;
829 	unsigned long flags;
830 
831 	shdma_chan_ld_cleanup(schan, false);
832 
833 	spin_lock_irqsave(&schan->chan_lock, flags);
834 
835 	status = dma_cookie_status(chan, cookie, txstate);
836 
837 	/*
838 	 * If we don't find cookie on the queue, it has been aborted and we have
839 	 * to report error
840 	 */
841 	if (status != DMA_COMPLETE) {
842 		struct shdma_desc *sdesc;
843 		status = DMA_ERROR;
844 		list_for_each_entry(sdesc, &schan->ld_queue, node)
845 			if (sdesc->cookie == cookie) {
846 				status = DMA_IN_PROGRESS;
847 				break;
848 			}
849 	}
850 
851 	spin_unlock_irqrestore(&schan->chan_lock, flags);
852 
853 	return status;
854 }
855 
856 /* Called from error IRQ or NMI */
shdma_reset(struct shdma_dev * sdev)857 bool shdma_reset(struct shdma_dev *sdev)
858 {
859 	const struct shdma_ops *ops = sdev->ops;
860 	struct shdma_chan *schan;
861 	unsigned int handled = 0;
862 	int i;
863 
864 	/* Reset all channels */
865 	shdma_for_each_chan(schan, sdev, i) {
866 		struct shdma_desc *sdesc;
867 		LIST_HEAD(dl);
868 
869 		if (!schan)
870 			continue;
871 
872 		spin_lock(&schan->chan_lock);
873 
874 		/* Stop the channel */
875 		ops->halt_channel(schan);
876 
877 		list_splice_init(&schan->ld_queue, &dl);
878 
879 		if (!list_empty(&dl)) {
880 			dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
881 			pm_runtime_put(schan->dev);
882 		}
883 		schan->pm_state = SHDMA_PM_ESTABLISHED;
884 
885 		spin_unlock(&schan->chan_lock);
886 
887 		/* Complete all  */
888 		list_for_each_entry(sdesc, &dl, node) {
889 			struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
890 
891 			sdesc->mark = DESC_IDLE;
892 			dmaengine_desc_get_callback_invoke(tx, NULL);
893 		}
894 
895 		spin_lock(&schan->chan_lock);
896 		list_splice(&dl, &schan->ld_free);
897 		spin_unlock(&schan->chan_lock);
898 
899 		handled++;
900 	}
901 
902 	return !!handled;
903 }
904 EXPORT_SYMBOL(shdma_reset);
905 
chan_irq(int irq,void * dev)906 static irqreturn_t chan_irq(int irq, void *dev)
907 {
908 	struct shdma_chan *schan = dev;
909 	const struct shdma_ops *ops =
910 		to_shdma_dev(schan->dma_chan.device)->ops;
911 	irqreturn_t ret;
912 
913 	spin_lock(&schan->chan_lock);
914 
915 	ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
916 
917 	spin_unlock(&schan->chan_lock);
918 
919 	return ret;
920 }
921 
chan_irqt(int irq,void * dev)922 static irqreturn_t chan_irqt(int irq, void *dev)
923 {
924 	struct shdma_chan *schan = dev;
925 	const struct shdma_ops *ops =
926 		to_shdma_dev(schan->dma_chan.device)->ops;
927 	struct shdma_desc *sdesc;
928 
929 	spin_lock_irq(&schan->chan_lock);
930 	list_for_each_entry(sdesc, &schan->ld_queue, node) {
931 		if (sdesc->mark == DESC_SUBMITTED &&
932 		    ops->desc_completed(schan, sdesc)) {
933 			dev_dbg(schan->dev, "done #%d@%p\n",
934 				sdesc->async_tx.cookie, &sdesc->async_tx);
935 			sdesc->mark = DESC_COMPLETED;
936 			break;
937 		}
938 	}
939 	/* Next desc */
940 	shdma_chan_xfer_ld_queue(schan);
941 	spin_unlock_irq(&schan->chan_lock);
942 
943 	shdma_chan_ld_cleanup(schan, false);
944 
945 	return IRQ_HANDLED;
946 }
947 
shdma_request_irq(struct shdma_chan * schan,int irq,unsigned long flags,const char * name)948 int shdma_request_irq(struct shdma_chan *schan, int irq,
949 			   unsigned long flags, const char *name)
950 {
951 	int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
952 					    chan_irqt, flags, name, schan);
953 
954 	schan->irq = ret < 0 ? ret : irq;
955 
956 	return ret;
957 }
958 EXPORT_SYMBOL(shdma_request_irq);
959 
shdma_chan_probe(struct shdma_dev * sdev,struct shdma_chan * schan,int id)960 void shdma_chan_probe(struct shdma_dev *sdev,
961 			   struct shdma_chan *schan, int id)
962 {
963 	schan->pm_state = SHDMA_PM_ESTABLISHED;
964 
965 	/* reference struct dma_device */
966 	schan->dma_chan.device = &sdev->dma_dev;
967 	dma_cookie_init(&schan->dma_chan);
968 
969 	schan->dev = sdev->dma_dev.dev;
970 	schan->id = id;
971 
972 	if (!schan->max_xfer_len)
973 		schan->max_xfer_len = PAGE_SIZE;
974 
975 	spin_lock_init(&schan->chan_lock);
976 
977 	/* Init descriptor manage list */
978 	INIT_LIST_HEAD(&schan->ld_queue);
979 	INIT_LIST_HEAD(&schan->ld_free);
980 
981 	/* Add the channel to DMA device channel list */
982 	list_add_tail(&schan->dma_chan.device_node,
983 			&sdev->dma_dev.channels);
984 	sdev->schan[id] = schan;
985 }
986 EXPORT_SYMBOL(shdma_chan_probe);
987 
shdma_chan_remove(struct shdma_chan * schan)988 void shdma_chan_remove(struct shdma_chan *schan)
989 {
990 	list_del(&schan->dma_chan.device_node);
991 }
992 EXPORT_SYMBOL(shdma_chan_remove);
993 
shdma_init(struct device * dev,struct shdma_dev * sdev,int chan_num)994 int shdma_init(struct device *dev, struct shdma_dev *sdev,
995 		    int chan_num)
996 {
997 	struct dma_device *dma_dev = &sdev->dma_dev;
998 
999 	/*
1000 	 * Require all call-backs for now, they can trivially be made optional
1001 	 * later as required
1002 	 */
1003 	if (!sdev->ops ||
1004 	    !sdev->desc_size ||
1005 	    !sdev->ops->embedded_desc ||
1006 	    !sdev->ops->start_xfer ||
1007 	    !sdev->ops->setup_xfer ||
1008 	    !sdev->ops->set_slave ||
1009 	    !sdev->ops->desc_setup ||
1010 	    !sdev->ops->slave_addr ||
1011 	    !sdev->ops->channel_busy ||
1012 	    !sdev->ops->halt_channel ||
1013 	    !sdev->ops->desc_completed)
1014 		return -EINVAL;
1015 
1016 	sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
1017 	if (!sdev->schan)
1018 		return -ENOMEM;
1019 
1020 	INIT_LIST_HEAD(&dma_dev->channels);
1021 
1022 	/* Common and MEMCPY operations */
1023 	dma_dev->device_alloc_chan_resources
1024 		= shdma_alloc_chan_resources;
1025 	dma_dev->device_free_chan_resources = shdma_free_chan_resources;
1026 	dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy;
1027 	dma_dev->device_tx_status = shdma_tx_status;
1028 	dma_dev->device_issue_pending = shdma_issue_pending;
1029 
1030 	/* Compulsory for DMA_SLAVE fields */
1031 	dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
1032 	dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
1033 	dma_dev->device_config = shdma_config;
1034 	dma_dev->device_terminate_all = shdma_terminate_all;
1035 
1036 	dma_dev->dev = dev;
1037 
1038 	return 0;
1039 }
1040 EXPORT_SYMBOL(shdma_init);
1041 
shdma_cleanup(struct shdma_dev * sdev)1042 void shdma_cleanup(struct shdma_dev *sdev)
1043 {
1044 	kfree(sdev->schan);
1045 }
1046 EXPORT_SYMBOL(shdma_cleanup);
1047 
shdma_enter(void)1048 static int __init shdma_enter(void)
1049 {
1050 	shdma_slave_used = bitmap_zalloc(slave_num, GFP_KERNEL);
1051 	if (!shdma_slave_used)
1052 		return -ENOMEM;
1053 	return 0;
1054 }
1055 module_init(shdma_enter);
1056 
shdma_exit(void)1057 static void __exit shdma_exit(void)
1058 {
1059 	bitmap_free(shdma_slave_used);
1060 }
1061 module_exit(shdma_exit);
1062 
1063 MODULE_DESCRIPTION("SH-DMA driver base library");
1064 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
1065