xref: /linux/drivers/dma/dmaengine.c (revision 7d083ae983573de16e3ab0bfd47486996d211417)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4  */
5 
6 /*
7  * This code implements the DMA subsystem. It provides a HW-neutral interface
8  * for other kernel code to use asynchronous memory copy capabilities,
9  * if present, and allows different HW DMA drivers to register as providing
10  * this capability.
11  *
12  * Due to the fact we are accelerating what is already a relatively fast
13  * operation, the code goes to great lengths to avoid additional overhead,
14  * such as locking.
15  *
16  * LOCKING:
17  *
18  * The subsystem keeps a global list of dma_device structs it is protected by a
19  * mutex, dma_list_mutex.
20  *
21  * A subsystem can get access to a channel by calling dmaengine_get() followed
22  * by dma_find_channel(), or if it has need for an exclusive channel it can call
23  * dma_request_channel().  Once a channel is allocated a reference is taken
24  * against its corresponding driver to disable removal.
25  *
26  * Each device has a channels list, which runs unlocked but is never modified
27  * once the device is registered, it's just setup by the driver.
28  *
29  * See Documentation/driver-api/dmaengine for more details
30  */
31 
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 
34 #include <linux/platform_device.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/init.h>
37 #include <linux/module.h>
38 #include <linux/mm.h>
39 #include <linux/device.h>
40 #include <linux/dmaengine.h>
41 #include <linux/hardirq.h>
42 #include <linux/spinlock.h>
43 #include <linux/percpu.h>
44 #include <linux/rcupdate.h>
45 #include <linux/mutex.h>
46 #include <linux/jiffies.h>
47 #include <linux/rculist.h>
48 #include <linux/idr.h>
49 #include <linux/slab.h>
50 #include <linux/acpi.h>
51 #include <linux/acpi_dma.h>
52 #include <linux/of_dma.h>
53 #include <linux/mempool.h>
54 #include <linux/numa.h>
55 
56 static DEFINE_MUTEX(dma_list_mutex);
57 static DEFINE_IDA(dma_ida);
58 static LIST_HEAD(dma_device_list);
59 static long dmaengine_ref_count;
60 
61 /* --- sysfs implementation --- */
62 
63 /**
64  * dev_to_dma_chan - convert a device pointer to its sysfs container object
65  * @dev - device node
66  *
67  * Must be called under dma_list_mutex
68  */
69 static struct dma_chan *dev_to_dma_chan(struct device *dev)
70 {
71 	struct dma_chan_dev *chan_dev;
72 
73 	chan_dev = container_of(dev, typeof(*chan_dev), device);
74 	return chan_dev->chan;
75 }
76 
77 static ssize_t memcpy_count_show(struct device *dev,
78 				 struct device_attribute *attr, char *buf)
79 {
80 	struct dma_chan *chan;
81 	unsigned long count = 0;
82 	int i;
83 	int err;
84 
85 	mutex_lock(&dma_list_mutex);
86 	chan = dev_to_dma_chan(dev);
87 	if (chan) {
88 		for_each_possible_cpu(i)
89 			count += per_cpu_ptr(chan->local, i)->memcpy_count;
90 		err = sprintf(buf, "%lu\n", count);
91 	} else
92 		err = -ENODEV;
93 	mutex_unlock(&dma_list_mutex);
94 
95 	return err;
96 }
97 static DEVICE_ATTR_RO(memcpy_count);
98 
99 static ssize_t bytes_transferred_show(struct device *dev,
100 				      struct device_attribute *attr, char *buf)
101 {
102 	struct dma_chan *chan;
103 	unsigned long count = 0;
104 	int i;
105 	int err;
106 
107 	mutex_lock(&dma_list_mutex);
108 	chan = dev_to_dma_chan(dev);
109 	if (chan) {
110 		for_each_possible_cpu(i)
111 			count += per_cpu_ptr(chan->local, i)->bytes_transferred;
112 		err = sprintf(buf, "%lu\n", count);
113 	} else
114 		err = -ENODEV;
115 	mutex_unlock(&dma_list_mutex);
116 
117 	return err;
118 }
119 static DEVICE_ATTR_RO(bytes_transferred);
120 
121 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
122 			   char *buf)
123 {
124 	struct dma_chan *chan;
125 	int err;
126 
127 	mutex_lock(&dma_list_mutex);
128 	chan = dev_to_dma_chan(dev);
129 	if (chan)
130 		err = sprintf(buf, "%d\n", chan->client_count);
131 	else
132 		err = -ENODEV;
133 	mutex_unlock(&dma_list_mutex);
134 
135 	return err;
136 }
137 static DEVICE_ATTR_RO(in_use);
138 
139 static struct attribute *dma_dev_attrs[] = {
140 	&dev_attr_memcpy_count.attr,
141 	&dev_attr_bytes_transferred.attr,
142 	&dev_attr_in_use.attr,
143 	NULL,
144 };
145 ATTRIBUTE_GROUPS(dma_dev);
146 
147 static void chan_dev_release(struct device *dev)
148 {
149 	struct dma_chan_dev *chan_dev;
150 
151 	chan_dev = container_of(dev, typeof(*chan_dev), device);
152 	if (atomic_dec_and_test(chan_dev->idr_ref)) {
153 		ida_free(&dma_ida, chan_dev->dev_id);
154 		kfree(chan_dev->idr_ref);
155 	}
156 	kfree(chan_dev);
157 }
158 
159 static struct class dma_devclass = {
160 	.name		= "dma",
161 	.dev_groups	= dma_dev_groups,
162 	.dev_release	= chan_dev_release,
163 };
164 
165 /* --- client and device registration --- */
166 
167 /**
168  * dma_cap_mask_all - enable iteration over all operation types
169  */
170 static dma_cap_mask_t dma_cap_mask_all;
171 
172 /**
173  * dma_chan_tbl_ent - tracks channel allocations per core/operation
174  * @chan - associated channel for this entry
175  */
176 struct dma_chan_tbl_ent {
177 	struct dma_chan *chan;
178 };
179 
180 /**
181  * channel_table - percpu lookup table for memory-to-memory offload providers
182  */
183 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
184 
185 static int __init dma_channel_table_init(void)
186 {
187 	enum dma_transaction_type cap;
188 	int err = 0;
189 
190 	bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
191 
192 	/* 'interrupt', 'private', and 'slave' are channel capabilities,
193 	 * but are not associated with an operation so they do not need
194 	 * an entry in the channel_table
195 	 */
196 	clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
197 	clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
198 	clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
199 
200 	for_each_dma_cap_mask(cap, dma_cap_mask_all) {
201 		channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
202 		if (!channel_table[cap]) {
203 			err = -ENOMEM;
204 			break;
205 		}
206 	}
207 
208 	if (err) {
209 		pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
210 		for_each_dma_cap_mask(cap, dma_cap_mask_all)
211 			free_percpu(channel_table[cap]);
212 	}
213 
214 	return err;
215 }
216 arch_initcall(dma_channel_table_init);
217 
218 /**
219  * dma_chan_is_local - returns true if the channel is in the same numa-node as
220  *	the cpu
221  */
222 static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
223 {
224 	int node = dev_to_node(chan->device->dev);
225 	return node == NUMA_NO_NODE ||
226 		cpumask_test_cpu(cpu, cpumask_of_node(node));
227 }
228 
229 /**
230  * min_chan - returns the channel with min count and in the same numa-node as
231  *	the cpu
232  * @cap: capability to match
233  * @cpu: cpu index which the channel should be close to
234  *
235  * If some channels are close to the given cpu, the one with the lowest
236  * reference count is returned. Otherwise, cpu is ignored and only the
237  * reference count is taken into account.
238  * Must be called under dma_list_mutex.
239  */
240 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
241 {
242 	struct dma_device *device;
243 	struct dma_chan *chan;
244 	struct dma_chan *min = NULL;
245 	struct dma_chan *localmin = NULL;
246 
247 	list_for_each_entry(device, &dma_device_list, global_node) {
248 		if (!dma_has_cap(cap, device->cap_mask) ||
249 		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
250 			continue;
251 		list_for_each_entry(chan, &device->channels, device_node) {
252 			if (!chan->client_count)
253 				continue;
254 			if (!min || chan->table_count < min->table_count)
255 				min = chan;
256 
257 			if (dma_chan_is_local(chan, cpu))
258 				if (!localmin ||
259 				    chan->table_count < localmin->table_count)
260 					localmin = chan;
261 		}
262 	}
263 
264 	chan = localmin ? localmin : min;
265 
266 	if (chan)
267 		chan->table_count++;
268 
269 	return chan;
270 }
271 
272 /**
273  * dma_channel_rebalance - redistribute the available channels
274  *
275  * Optimize for cpu isolation (each cpu gets a dedicated channel for an
276  * operation type) in the SMP case,  and operation isolation (avoid
277  * multi-tasking channels) in the non-SMP case.  Must be called under
278  * dma_list_mutex.
279  */
280 static void dma_channel_rebalance(void)
281 {
282 	struct dma_chan *chan;
283 	struct dma_device *device;
284 	int cpu;
285 	int cap;
286 
287 	/* undo the last distribution */
288 	for_each_dma_cap_mask(cap, dma_cap_mask_all)
289 		for_each_possible_cpu(cpu)
290 			per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
291 
292 	list_for_each_entry(device, &dma_device_list, global_node) {
293 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
294 			continue;
295 		list_for_each_entry(chan, &device->channels, device_node)
296 			chan->table_count = 0;
297 	}
298 
299 	/* don't populate the channel_table if no clients are available */
300 	if (!dmaengine_ref_count)
301 		return;
302 
303 	/* redistribute available channels */
304 	for_each_dma_cap_mask(cap, dma_cap_mask_all)
305 		for_each_online_cpu(cpu) {
306 			chan = min_chan(cap, cpu);
307 			per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
308 		}
309 }
310 
311 #define dma_device_satisfies_mask(device, mask) \
312 	__dma_device_satisfies_mask((device), &(mask))
313 static int
314 __dma_device_satisfies_mask(struct dma_device *device,
315 			    const dma_cap_mask_t *want)
316 {
317 	dma_cap_mask_t has;
318 
319 	bitmap_and(has.bits, want->bits, device->cap_mask.bits,
320 		DMA_TX_TYPE_END);
321 	return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
322 }
323 
324 static struct module *dma_chan_to_owner(struct dma_chan *chan)
325 {
326 	return chan->device->owner;
327 }
328 
329 /**
330  * balance_ref_count - catch up the channel reference count
331  * @chan - channel to balance ->client_count versus dmaengine_ref_count
332  *
333  * balance_ref_count must be called under dma_list_mutex
334  */
335 static void balance_ref_count(struct dma_chan *chan)
336 {
337 	struct module *owner = dma_chan_to_owner(chan);
338 
339 	while (chan->client_count < dmaengine_ref_count) {
340 		__module_get(owner);
341 		chan->client_count++;
342 	}
343 }
344 
345 static void dma_device_release(struct kref *ref)
346 {
347 	struct dma_device *device = container_of(ref, struct dma_device, ref);
348 
349 	list_del_rcu(&device->global_node);
350 	dma_channel_rebalance();
351 
352 	if (device->device_release)
353 		device->device_release(device);
354 }
355 
356 static void dma_device_put(struct dma_device *device)
357 {
358 	lockdep_assert_held(&dma_list_mutex);
359 	kref_put(&device->ref, dma_device_release);
360 }
361 
362 /**
363  * dma_chan_get - try to grab a dma channel's parent driver module
364  * @chan - channel to grab
365  *
366  * Must be called under dma_list_mutex
367  */
368 static int dma_chan_get(struct dma_chan *chan)
369 {
370 	struct module *owner = dma_chan_to_owner(chan);
371 	int ret;
372 
373 	/* The channel is already in use, update client count */
374 	if (chan->client_count) {
375 		__module_get(owner);
376 		goto out;
377 	}
378 
379 	if (!try_module_get(owner))
380 		return -ENODEV;
381 
382 	ret = kref_get_unless_zero(&chan->device->ref);
383 	if (!ret) {
384 		ret = -ENODEV;
385 		goto module_put_out;
386 	}
387 
388 	/* allocate upon first client reference */
389 	if (chan->device->device_alloc_chan_resources) {
390 		ret = chan->device->device_alloc_chan_resources(chan);
391 		if (ret < 0)
392 			goto err_out;
393 	}
394 
395 	if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
396 		balance_ref_count(chan);
397 
398 out:
399 	chan->client_count++;
400 	return 0;
401 
402 err_out:
403 	dma_device_put(chan->device);
404 module_put_out:
405 	module_put(owner);
406 	return ret;
407 }
408 
409 /**
410  * dma_chan_put - drop a reference to a dma channel's parent driver module
411  * @chan - channel to release
412  *
413  * Must be called under dma_list_mutex
414  */
415 static void dma_chan_put(struct dma_chan *chan)
416 {
417 	/* This channel is not in use, bail out */
418 	if (!chan->client_count)
419 		return;
420 
421 	chan->client_count--;
422 
423 	/* This channel is not in use anymore, free it */
424 	if (!chan->client_count && chan->device->device_free_chan_resources) {
425 		/* Make sure all operations have completed */
426 		dmaengine_synchronize(chan);
427 		chan->device->device_free_chan_resources(chan);
428 	}
429 
430 	/* If the channel is used via a DMA request router, free the mapping */
431 	if (chan->router && chan->router->route_free) {
432 		chan->router->route_free(chan->router->dev, chan->route_data);
433 		chan->router = NULL;
434 		chan->route_data = NULL;
435 	}
436 
437 	dma_device_put(chan->device);
438 	module_put(dma_chan_to_owner(chan));
439 }
440 
441 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
442 {
443 	enum dma_status status;
444 	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
445 
446 	dma_async_issue_pending(chan);
447 	do {
448 		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
449 		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
450 			dev_err(chan->device->dev, "%s: timeout!\n", __func__);
451 			return DMA_ERROR;
452 		}
453 		if (status != DMA_IN_PROGRESS)
454 			break;
455 		cpu_relax();
456 	} while (1);
457 
458 	return status;
459 }
460 EXPORT_SYMBOL(dma_sync_wait);
461 
462 /**
463  * dma_find_channel - find a channel to carry out the operation
464  * @tx_type: transaction type
465  */
466 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
467 {
468 	return this_cpu_read(channel_table[tx_type]->chan);
469 }
470 EXPORT_SYMBOL(dma_find_channel);
471 
472 /**
473  * dma_issue_pending_all - flush all pending operations across all channels
474  */
475 void dma_issue_pending_all(void)
476 {
477 	struct dma_device *device;
478 	struct dma_chan *chan;
479 
480 	rcu_read_lock();
481 	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
482 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
483 			continue;
484 		list_for_each_entry(chan, &device->channels, device_node)
485 			if (chan->client_count)
486 				device->device_issue_pending(chan);
487 	}
488 	rcu_read_unlock();
489 }
490 EXPORT_SYMBOL(dma_issue_pending_all);
491 
492 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
493 {
494 	struct dma_device *device;
495 
496 	if (!chan || !caps)
497 		return -EINVAL;
498 
499 	device = chan->device;
500 
501 	/* check if the channel supports slave transactions */
502 	if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
503 	      test_bit(DMA_CYCLIC, device->cap_mask.bits)))
504 		return -ENXIO;
505 
506 	/*
507 	 * Check whether it reports it uses the generic slave
508 	 * capabilities, if not, that means it doesn't support any
509 	 * kind of slave capabilities reporting.
510 	 */
511 	if (!device->directions)
512 		return -ENXIO;
513 
514 	caps->src_addr_widths = device->src_addr_widths;
515 	caps->dst_addr_widths = device->dst_addr_widths;
516 	caps->directions = device->directions;
517 	caps->max_burst = device->max_burst;
518 	caps->residue_granularity = device->residue_granularity;
519 	caps->descriptor_reuse = device->descriptor_reuse;
520 	caps->cmd_pause = !!device->device_pause;
521 	caps->cmd_resume = !!device->device_resume;
522 	caps->cmd_terminate = !!device->device_terminate_all;
523 
524 	return 0;
525 }
526 EXPORT_SYMBOL_GPL(dma_get_slave_caps);
527 
528 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
529 					  struct dma_device *dev,
530 					  dma_filter_fn fn, void *fn_param)
531 {
532 	struct dma_chan *chan;
533 
534 	if (mask && !__dma_device_satisfies_mask(dev, mask)) {
535 		dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
536 		return NULL;
537 	}
538 	/* devices with multiple channels need special handling as we need to
539 	 * ensure that all channels are either private or public.
540 	 */
541 	if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
542 		list_for_each_entry(chan, &dev->channels, device_node) {
543 			/* some channels are already publicly allocated */
544 			if (chan->client_count)
545 				return NULL;
546 		}
547 
548 	list_for_each_entry(chan, &dev->channels, device_node) {
549 		if (chan->client_count) {
550 			dev_dbg(dev->dev, "%s: %s busy\n",
551 				 __func__, dma_chan_name(chan));
552 			continue;
553 		}
554 		if (fn && !fn(chan, fn_param)) {
555 			dev_dbg(dev->dev, "%s: %s filter said false\n",
556 				 __func__, dma_chan_name(chan));
557 			continue;
558 		}
559 		return chan;
560 	}
561 
562 	return NULL;
563 }
564 
565 static struct dma_chan *find_candidate(struct dma_device *device,
566 				       const dma_cap_mask_t *mask,
567 				       dma_filter_fn fn, void *fn_param)
568 {
569 	struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
570 	int err;
571 
572 	if (chan) {
573 		/* Found a suitable channel, try to grab, prep, and return it.
574 		 * We first set DMA_PRIVATE to disable balance_ref_count as this
575 		 * channel will not be published in the general-purpose
576 		 * allocator
577 		 */
578 		dma_cap_set(DMA_PRIVATE, device->cap_mask);
579 		device->privatecnt++;
580 		err = dma_chan_get(chan);
581 
582 		if (err) {
583 			if (err == -ENODEV) {
584 				dev_dbg(device->dev, "%s: %s module removed\n",
585 					__func__, dma_chan_name(chan));
586 				list_del_rcu(&device->global_node);
587 			} else
588 				dev_dbg(device->dev,
589 					"%s: failed to get %s: (%d)\n",
590 					 __func__, dma_chan_name(chan), err);
591 
592 			if (--device->privatecnt == 0)
593 				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
594 
595 			chan = ERR_PTR(err);
596 		}
597 	}
598 
599 	return chan ? chan : ERR_PTR(-EPROBE_DEFER);
600 }
601 
602 /**
603  * dma_get_slave_channel - try to get specific channel exclusively
604  * @chan: target channel
605  */
606 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
607 {
608 	int err = -EBUSY;
609 
610 	/* lock against __dma_request_channel */
611 	mutex_lock(&dma_list_mutex);
612 
613 	if (chan->client_count == 0) {
614 		struct dma_device *device = chan->device;
615 
616 		dma_cap_set(DMA_PRIVATE, device->cap_mask);
617 		device->privatecnt++;
618 		err = dma_chan_get(chan);
619 		if (err) {
620 			dev_dbg(chan->device->dev,
621 				"%s: failed to get %s: (%d)\n",
622 				__func__, dma_chan_name(chan), err);
623 			chan = NULL;
624 			if (--device->privatecnt == 0)
625 				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
626 		}
627 	} else
628 		chan = NULL;
629 
630 	mutex_unlock(&dma_list_mutex);
631 
632 
633 	return chan;
634 }
635 EXPORT_SYMBOL_GPL(dma_get_slave_channel);
636 
637 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
638 {
639 	dma_cap_mask_t mask;
640 	struct dma_chan *chan;
641 
642 	dma_cap_zero(mask);
643 	dma_cap_set(DMA_SLAVE, mask);
644 
645 	/* lock against __dma_request_channel */
646 	mutex_lock(&dma_list_mutex);
647 
648 	chan = find_candidate(device, &mask, NULL, NULL);
649 
650 	mutex_unlock(&dma_list_mutex);
651 
652 	return IS_ERR(chan) ? NULL : chan;
653 }
654 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
655 
656 /**
657  * __dma_request_channel - try to allocate an exclusive channel
658  * @mask: capabilities that the channel must satisfy
659  * @fn: optional callback to disposition available channels
660  * @fn_param: opaque parameter to pass to dma_filter_fn
661  * @np: device node to look for DMA channels
662  *
663  * Returns pointer to appropriate DMA channel on success or NULL.
664  */
665 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
666 				       dma_filter_fn fn, void *fn_param,
667 				       struct device_node *np)
668 {
669 	struct dma_device *device, *_d;
670 	struct dma_chan *chan = NULL;
671 
672 	/* Find a channel */
673 	mutex_lock(&dma_list_mutex);
674 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
675 		/* Finds a DMA controller with matching device node */
676 		if (np && device->dev->of_node && np != device->dev->of_node)
677 			continue;
678 
679 		chan = find_candidate(device, mask, fn, fn_param);
680 		if (!IS_ERR(chan))
681 			break;
682 
683 		chan = NULL;
684 	}
685 	mutex_unlock(&dma_list_mutex);
686 
687 	pr_debug("%s: %s (%s)\n",
688 		 __func__,
689 		 chan ? "success" : "fail",
690 		 chan ? dma_chan_name(chan) : NULL);
691 
692 	return chan;
693 }
694 EXPORT_SYMBOL_GPL(__dma_request_channel);
695 
696 static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
697 						    const char *name,
698 						    struct device *dev)
699 {
700 	int i;
701 
702 	if (!device->filter.mapcnt)
703 		return NULL;
704 
705 	for (i = 0; i < device->filter.mapcnt; i++) {
706 		const struct dma_slave_map *map = &device->filter.map[i];
707 
708 		if (!strcmp(map->devname, dev_name(dev)) &&
709 		    !strcmp(map->slave, name))
710 			return map;
711 	}
712 
713 	return NULL;
714 }
715 
716 /**
717  * dma_request_chan - try to allocate an exclusive slave channel
718  * @dev:	pointer to client device structure
719  * @name:	slave channel name
720  *
721  * Returns pointer to appropriate DMA channel on success or an error pointer.
722  */
723 struct dma_chan *dma_request_chan(struct device *dev, const char *name)
724 {
725 	struct dma_device *d, *_d;
726 	struct dma_chan *chan = NULL;
727 
728 	/* If device-tree is present get slave info from here */
729 	if (dev->of_node)
730 		chan = of_dma_request_slave_channel(dev->of_node, name);
731 
732 	/* If device was enumerated by ACPI get slave info from here */
733 	if (has_acpi_companion(dev) && !chan)
734 		chan = acpi_dma_request_slave_chan_by_name(dev, name);
735 
736 	if (chan) {
737 		/* Valid channel found or requester needs to be deferred */
738 		if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
739 			return chan;
740 	}
741 
742 	/* Try to find the channel via the DMA filter map(s) */
743 	mutex_lock(&dma_list_mutex);
744 	list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
745 		dma_cap_mask_t mask;
746 		const struct dma_slave_map *map = dma_filter_match(d, name, dev);
747 
748 		if (!map)
749 			continue;
750 
751 		dma_cap_zero(mask);
752 		dma_cap_set(DMA_SLAVE, mask);
753 
754 		chan = find_candidate(d, &mask, d->filter.fn, map->param);
755 		if (!IS_ERR(chan))
756 			break;
757 	}
758 	mutex_unlock(&dma_list_mutex);
759 
760 	return chan ? chan : ERR_PTR(-EPROBE_DEFER);
761 }
762 EXPORT_SYMBOL_GPL(dma_request_chan);
763 
764 /**
765  * dma_request_slave_channel - try to allocate an exclusive slave channel
766  * @dev:	pointer to client device structure
767  * @name:	slave channel name
768  *
769  * Returns pointer to appropriate DMA channel on success or NULL.
770  */
771 struct dma_chan *dma_request_slave_channel(struct device *dev,
772 					   const char *name)
773 {
774 	struct dma_chan *ch = dma_request_chan(dev, name);
775 	if (IS_ERR(ch))
776 		return NULL;
777 
778 	return ch;
779 }
780 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
781 
782 /**
783  * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
784  * @mask: capabilities that the channel must satisfy
785  *
786  * Returns pointer to appropriate DMA channel on success or an error pointer.
787  */
788 struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
789 {
790 	struct dma_chan *chan;
791 
792 	if (!mask)
793 		return ERR_PTR(-ENODEV);
794 
795 	chan = __dma_request_channel(mask, NULL, NULL, NULL);
796 	if (!chan) {
797 		mutex_lock(&dma_list_mutex);
798 		if (list_empty(&dma_device_list))
799 			chan = ERR_PTR(-EPROBE_DEFER);
800 		else
801 			chan = ERR_PTR(-ENODEV);
802 		mutex_unlock(&dma_list_mutex);
803 	}
804 
805 	return chan;
806 }
807 EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
808 
809 void dma_release_channel(struct dma_chan *chan)
810 {
811 	mutex_lock(&dma_list_mutex);
812 	WARN_ONCE(chan->client_count != 1,
813 		  "chan reference count %d != 1\n", chan->client_count);
814 	dma_chan_put(chan);
815 	/* drop PRIVATE cap enabled by __dma_request_channel() */
816 	if (--chan->device->privatecnt == 0)
817 		dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
818 	mutex_unlock(&dma_list_mutex);
819 }
820 EXPORT_SYMBOL_GPL(dma_release_channel);
821 
822 /**
823  * dmaengine_get - register interest in dma_channels
824  */
825 void dmaengine_get(void)
826 {
827 	struct dma_device *device, *_d;
828 	struct dma_chan *chan;
829 	int err;
830 
831 	mutex_lock(&dma_list_mutex);
832 	dmaengine_ref_count++;
833 
834 	/* try to grab channels */
835 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
836 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
837 			continue;
838 		list_for_each_entry(chan, &device->channels, device_node) {
839 			err = dma_chan_get(chan);
840 			if (err == -ENODEV) {
841 				/* module removed before we could use it */
842 				list_del_rcu(&device->global_node);
843 				break;
844 			} else if (err)
845 				dev_dbg(chan->device->dev,
846 					"%s: failed to get %s: (%d)\n",
847 					__func__, dma_chan_name(chan), err);
848 		}
849 	}
850 
851 	/* if this is the first reference and there were channels
852 	 * waiting we need to rebalance to get those channels
853 	 * incorporated into the channel table
854 	 */
855 	if (dmaengine_ref_count == 1)
856 		dma_channel_rebalance();
857 	mutex_unlock(&dma_list_mutex);
858 }
859 EXPORT_SYMBOL(dmaengine_get);
860 
861 /**
862  * dmaengine_put - let dma drivers be removed when ref_count == 0
863  */
864 void dmaengine_put(void)
865 {
866 	struct dma_device *device, *_d;
867 	struct dma_chan *chan;
868 
869 	mutex_lock(&dma_list_mutex);
870 	dmaengine_ref_count--;
871 	BUG_ON(dmaengine_ref_count < 0);
872 	/* drop channel references */
873 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
874 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
875 			continue;
876 		list_for_each_entry(chan, &device->channels, device_node)
877 			dma_chan_put(chan);
878 	}
879 	mutex_unlock(&dma_list_mutex);
880 }
881 EXPORT_SYMBOL(dmaengine_put);
882 
883 static bool device_has_all_tx_types(struct dma_device *device)
884 {
885 	/* A device that satisfies this test has channels that will never cause
886 	 * an async_tx channel switch event as all possible operation types can
887 	 * be handled.
888 	 */
889 	#ifdef CONFIG_ASYNC_TX_DMA
890 	if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
891 		return false;
892 	#endif
893 
894 	#if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
895 	if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
896 		return false;
897 	#endif
898 
899 	#if IS_ENABLED(CONFIG_ASYNC_XOR)
900 	if (!dma_has_cap(DMA_XOR, device->cap_mask))
901 		return false;
902 
903 	#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
904 	if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
905 		return false;
906 	#endif
907 	#endif
908 
909 	#if IS_ENABLED(CONFIG_ASYNC_PQ)
910 	if (!dma_has_cap(DMA_PQ, device->cap_mask))
911 		return false;
912 
913 	#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
914 	if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
915 		return false;
916 	#endif
917 	#endif
918 
919 	return true;
920 }
921 
922 static int get_dma_id(struct dma_device *device)
923 {
924 	int rc = ida_alloc(&dma_ida, GFP_KERNEL);
925 
926 	if (rc < 0)
927 		return rc;
928 	device->dev_id = rc;
929 	return 0;
930 }
931 
932 /**
933  * dma_async_device_register - registers DMA devices found
934  * @device: &dma_device
935  *
936  * After calling this routine the structure should not be freed except in the
937  * device_release() callback which will be called after
938  * dma_async_device_unregister() is called and no further references are taken.
939  */
940 int dma_async_device_register(struct dma_device *device)
941 {
942 	int chancnt = 0, rc;
943 	struct dma_chan* chan;
944 	atomic_t *idr_ref;
945 
946 	if (!device)
947 		return -ENODEV;
948 
949 	/* validate device routines */
950 	if (!device->dev) {
951 		pr_err("DMAdevice must have dev\n");
952 		return -EIO;
953 	}
954 
955 	device->owner = device->dev->driver->owner;
956 
957 	if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
958 		dev_err(device->dev,
959 			"Device claims capability %s, but op is not defined\n",
960 			"DMA_MEMCPY");
961 		return -EIO;
962 	}
963 
964 	if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
965 		dev_err(device->dev,
966 			"Device claims capability %s, but op is not defined\n",
967 			"DMA_XOR");
968 		return -EIO;
969 	}
970 
971 	if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
972 		dev_err(device->dev,
973 			"Device claims capability %s, but op is not defined\n",
974 			"DMA_XOR_VAL");
975 		return -EIO;
976 	}
977 
978 	if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
979 		dev_err(device->dev,
980 			"Device claims capability %s, but op is not defined\n",
981 			"DMA_PQ");
982 		return -EIO;
983 	}
984 
985 	if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
986 		dev_err(device->dev,
987 			"Device claims capability %s, but op is not defined\n",
988 			"DMA_PQ_VAL");
989 		return -EIO;
990 	}
991 
992 	if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
993 		dev_err(device->dev,
994 			"Device claims capability %s, but op is not defined\n",
995 			"DMA_MEMSET");
996 		return -EIO;
997 	}
998 
999 	if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
1000 		dev_err(device->dev,
1001 			"Device claims capability %s, but op is not defined\n",
1002 			"DMA_INTERRUPT");
1003 		return -EIO;
1004 	}
1005 
1006 	if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
1007 		dev_err(device->dev,
1008 			"Device claims capability %s, but op is not defined\n",
1009 			"DMA_CYCLIC");
1010 		return -EIO;
1011 	}
1012 
1013 	if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
1014 		dev_err(device->dev,
1015 			"Device claims capability %s, but op is not defined\n",
1016 			"DMA_INTERLEAVE");
1017 		return -EIO;
1018 	}
1019 
1020 
1021 	if (!device->device_tx_status) {
1022 		dev_err(device->dev, "Device tx_status is not defined\n");
1023 		return -EIO;
1024 	}
1025 
1026 
1027 	if (!device->device_issue_pending) {
1028 		dev_err(device->dev, "Device issue_pending is not defined\n");
1029 		return -EIO;
1030 	}
1031 
1032 	if (!device->device_release)
1033 		dev_warn(device->dev,
1034 			 "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
1035 
1036 	kref_init(&device->ref);
1037 
1038 	/* note: this only matters in the
1039 	 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
1040 	 */
1041 	if (device_has_all_tx_types(device))
1042 		dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
1043 
1044 	idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
1045 	if (!idr_ref)
1046 		return -ENOMEM;
1047 	rc = get_dma_id(device);
1048 	if (rc != 0) {
1049 		kfree(idr_ref);
1050 		return rc;
1051 	}
1052 
1053 	atomic_set(idr_ref, 0);
1054 
1055 	/* represent channels in sysfs. Probably want devs too */
1056 	list_for_each_entry(chan, &device->channels, device_node) {
1057 		rc = -ENOMEM;
1058 		chan->local = alloc_percpu(typeof(*chan->local));
1059 		if (chan->local == NULL)
1060 			goto err_out;
1061 		chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
1062 		if (chan->dev == NULL) {
1063 			free_percpu(chan->local);
1064 			chan->local = NULL;
1065 			goto err_out;
1066 		}
1067 
1068 		chan->chan_id = chancnt++;
1069 		chan->dev->device.class = &dma_devclass;
1070 		chan->dev->device.parent = device->dev;
1071 		chan->dev->chan = chan;
1072 		chan->dev->idr_ref = idr_ref;
1073 		chan->dev->dev_id = device->dev_id;
1074 		atomic_inc(idr_ref);
1075 		dev_set_name(&chan->dev->device, "dma%dchan%d",
1076 			     device->dev_id, chan->chan_id);
1077 
1078 		rc = device_register(&chan->dev->device);
1079 		if (rc) {
1080 			free_percpu(chan->local);
1081 			chan->local = NULL;
1082 			kfree(chan->dev);
1083 			atomic_dec(idr_ref);
1084 			goto err_out;
1085 		}
1086 		chan->client_count = 0;
1087 	}
1088 
1089 	if (!chancnt) {
1090 		dev_err(device->dev, "%s: device has no channels!\n", __func__);
1091 		rc = -ENODEV;
1092 		goto err_out;
1093 	}
1094 
1095 	device->chancnt = chancnt;
1096 
1097 	mutex_lock(&dma_list_mutex);
1098 	/* take references on public channels */
1099 	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
1100 		list_for_each_entry(chan, &device->channels, device_node) {
1101 			/* if clients are already waiting for channels we need
1102 			 * to take references on their behalf
1103 			 */
1104 			if (dma_chan_get(chan) == -ENODEV) {
1105 				/* note we can only get here for the first
1106 				 * channel as the remaining channels are
1107 				 * guaranteed to get a reference
1108 				 */
1109 				rc = -ENODEV;
1110 				mutex_unlock(&dma_list_mutex);
1111 				goto err_out;
1112 			}
1113 		}
1114 	list_add_tail_rcu(&device->global_node, &dma_device_list);
1115 	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1116 		device->privatecnt++;	/* Always private */
1117 	dma_channel_rebalance();
1118 	mutex_unlock(&dma_list_mutex);
1119 
1120 	return 0;
1121 
1122 err_out:
1123 	/* if we never registered a channel just release the idr */
1124 	if (atomic_read(idr_ref) == 0) {
1125 		ida_free(&dma_ida, device->dev_id);
1126 		kfree(idr_ref);
1127 		return rc;
1128 	}
1129 
1130 	list_for_each_entry(chan, &device->channels, device_node) {
1131 		if (chan->local == NULL)
1132 			continue;
1133 		mutex_lock(&dma_list_mutex);
1134 		chan->dev->chan = NULL;
1135 		mutex_unlock(&dma_list_mutex);
1136 		device_unregister(&chan->dev->device);
1137 		free_percpu(chan->local);
1138 	}
1139 	return rc;
1140 }
1141 EXPORT_SYMBOL(dma_async_device_register);
1142 
1143 /**
1144  * dma_async_device_unregister - unregister a DMA device
1145  * @device: &dma_device
1146  *
1147  * This routine is called by dma driver exit routines, dmaengine holds module
1148  * references to prevent it being called while channels are in use.
1149  */
1150 void dma_async_device_unregister(struct dma_device *device)
1151 {
1152 	struct dma_chan *chan;
1153 
1154 	list_for_each_entry(chan, &device->channels, device_node) {
1155 		WARN_ONCE(!device->device_release && chan->client_count,
1156 			  "%s called while %d clients hold a reference\n",
1157 			  __func__, chan->client_count);
1158 		mutex_lock(&dma_list_mutex);
1159 		chan->dev->chan = NULL;
1160 		mutex_unlock(&dma_list_mutex);
1161 		device_unregister(&chan->dev->device);
1162 		free_percpu(chan->local);
1163 	}
1164 
1165 	mutex_lock(&dma_list_mutex);
1166 	/*
1167 	 * setting DMA_PRIVATE ensures the device being torn down will not
1168 	 * be used in the channel_table
1169 	 */
1170 	dma_cap_set(DMA_PRIVATE, device->cap_mask);
1171 	dma_channel_rebalance();
1172 	dma_device_put(device);
1173 	mutex_unlock(&dma_list_mutex);
1174 }
1175 EXPORT_SYMBOL(dma_async_device_unregister);
1176 
1177 static void dmam_device_release(struct device *dev, void *res)
1178 {
1179 	struct dma_device *device;
1180 
1181 	device = *(struct dma_device **)res;
1182 	dma_async_device_unregister(device);
1183 }
1184 
1185 /**
1186  * dmaenginem_async_device_register - registers DMA devices found
1187  * @device: &dma_device
1188  *
1189  * The operation is managed and will be undone on driver detach.
1190  */
1191 int dmaenginem_async_device_register(struct dma_device *device)
1192 {
1193 	void *p;
1194 	int ret;
1195 
1196 	p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
1197 	if (!p)
1198 		return -ENOMEM;
1199 
1200 	ret = dma_async_device_register(device);
1201 	if (!ret) {
1202 		*(struct dma_device **)p = device;
1203 		devres_add(device->dev, p);
1204 	} else {
1205 		devres_free(p);
1206 	}
1207 
1208 	return ret;
1209 }
1210 EXPORT_SYMBOL(dmaenginem_async_device_register);
1211 
1212 struct dmaengine_unmap_pool {
1213 	struct kmem_cache *cache;
1214 	const char *name;
1215 	mempool_t *pool;
1216 	size_t size;
1217 };
1218 
1219 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1220 static struct dmaengine_unmap_pool unmap_pool[] = {
1221 	__UNMAP_POOL(2),
1222 	#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1223 	__UNMAP_POOL(16),
1224 	__UNMAP_POOL(128),
1225 	__UNMAP_POOL(256),
1226 	#endif
1227 };
1228 
1229 static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1230 {
1231 	int order = get_count_order(nr);
1232 
1233 	switch (order) {
1234 	case 0 ... 1:
1235 		return &unmap_pool[0];
1236 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1237 	case 2 ... 4:
1238 		return &unmap_pool[1];
1239 	case 5 ... 7:
1240 		return &unmap_pool[2];
1241 	case 8:
1242 		return &unmap_pool[3];
1243 #endif
1244 	default:
1245 		BUG();
1246 		return NULL;
1247 	}
1248 }
1249 
1250 static void dmaengine_unmap(struct kref *kref)
1251 {
1252 	struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1253 	struct device *dev = unmap->dev;
1254 	int cnt, i;
1255 
1256 	cnt = unmap->to_cnt;
1257 	for (i = 0; i < cnt; i++)
1258 		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1259 			       DMA_TO_DEVICE);
1260 	cnt += unmap->from_cnt;
1261 	for (; i < cnt; i++)
1262 		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1263 			       DMA_FROM_DEVICE);
1264 	cnt += unmap->bidi_cnt;
1265 	for (; i < cnt; i++) {
1266 		if (unmap->addr[i] == 0)
1267 			continue;
1268 		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1269 			       DMA_BIDIRECTIONAL);
1270 	}
1271 	cnt = unmap->map_cnt;
1272 	mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1273 }
1274 
1275 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1276 {
1277 	if (unmap)
1278 		kref_put(&unmap->kref, dmaengine_unmap);
1279 }
1280 EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1281 
1282 static void dmaengine_destroy_unmap_pool(void)
1283 {
1284 	int i;
1285 
1286 	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1287 		struct dmaengine_unmap_pool *p = &unmap_pool[i];
1288 
1289 		mempool_destroy(p->pool);
1290 		p->pool = NULL;
1291 		kmem_cache_destroy(p->cache);
1292 		p->cache = NULL;
1293 	}
1294 }
1295 
1296 static int __init dmaengine_init_unmap_pool(void)
1297 {
1298 	int i;
1299 
1300 	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1301 		struct dmaengine_unmap_pool *p = &unmap_pool[i];
1302 		size_t size;
1303 
1304 		size = sizeof(struct dmaengine_unmap_data) +
1305 		       sizeof(dma_addr_t) * p->size;
1306 
1307 		p->cache = kmem_cache_create(p->name, size, 0,
1308 					     SLAB_HWCACHE_ALIGN, NULL);
1309 		if (!p->cache)
1310 			break;
1311 		p->pool = mempool_create_slab_pool(1, p->cache);
1312 		if (!p->pool)
1313 			break;
1314 	}
1315 
1316 	if (i == ARRAY_SIZE(unmap_pool))
1317 		return 0;
1318 
1319 	dmaengine_destroy_unmap_pool();
1320 	return -ENOMEM;
1321 }
1322 
1323 struct dmaengine_unmap_data *
1324 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1325 {
1326 	struct dmaengine_unmap_data *unmap;
1327 
1328 	unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1329 	if (!unmap)
1330 		return NULL;
1331 
1332 	memset(unmap, 0, sizeof(*unmap));
1333 	kref_init(&unmap->kref);
1334 	unmap->dev = dev;
1335 	unmap->map_cnt = nr;
1336 
1337 	return unmap;
1338 }
1339 EXPORT_SYMBOL(dmaengine_get_unmap_data);
1340 
1341 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1342 	struct dma_chan *chan)
1343 {
1344 	tx->chan = chan;
1345 	#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1346 	spin_lock_init(&tx->lock);
1347 	#endif
1348 }
1349 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1350 
1351 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1352  * @tx: in-flight transaction to wait on
1353  */
1354 enum dma_status
1355 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1356 {
1357 	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1358 
1359 	if (!tx)
1360 		return DMA_COMPLETE;
1361 
1362 	while (tx->cookie == -EBUSY) {
1363 		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1364 			dev_err(tx->chan->device->dev,
1365 				"%s timeout waiting for descriptor submission\n",
1366 				__func__);
1367 			return DMA_ERROR;
1368 		}
1369 		cpu_relax();
1370 	}
1371 	return dma_sync_wait(tx->chan, tx->cookie);
1372 }
1373 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1374 
1375 /* dma_run_dependencies - helper routine for dma drivers to process
1376  *	(start) dependent operations on their target channel
1377  * @tx: transaction with dependencies
1378  */
1379 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1380 {
1381 	struct dma_async_tx_descriptor *dep = txd_next(tx);
1382 	struct dma_async_tx_descriptor *dep_next;
1383 	struct dma_chan *chan;
1384 
1385 	if (!dep)
1386 		return;
1387 
1388 	/* we'll submit tx->next now, so clear the link */
1389 	txd_clear_next(tx);
1390 	chan = dep->chan;
1391 
1392 	/* keep submitting up until a channel switch is detected
1393 	 * in that case we will be called again as a result of
1394 	 * processing the interrupt from async_tx_channel_switch
1395 	 */
1396 	for (; dep; dep = dep_next) {
1397 		txd_lock(dep);
1398 		txd_clear_parent(dep);
1399 		dep_next = txd_next(dep);
1400 		if (dep_next && dep_next->chan == chan)
1401 			txd_clear_next(dep); /* ->next will be submitted */
1402 		else
1403 			dep_next = NULL; /* submit current dep and terminate */
1404 		txd_unlock(dep);
1405 
1406 		dep->tx_submit(dep);
1407 	}
1408 
1409 	chan->device->device_issue_pending(chan);
1410 }
1411 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1412 
1413 static int __init dma_bus_init(void)
1414 {
1415 	int err = dmaengine_init_unmap_pool();
1416 
1417 	if (err)
1418 		return err;
1419 	return class_register(&dma_devclass);
1420 }
1421 arch_initcall(dma_bus_init);
1422