xref: /linux/drivers/dma/dmaengine.c (revision 0262163136de813894cb172aa8ccf762b92e5fd7)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4  */
5 
6 /*
7  * This code implements the DMA subsystem. It provides a HW-neutral interface
8  * for other kernel code to use asynchronous memory copy capabilities,
9  * if present, and allows different HW DMA drivers to register as providing
10  * this capability.
11  *
12  * Due to the fact we are accelerating what is already a relatively fast
13  * operation, the code goes to great lengths to avoid additional overhead,
14  * such as locking.
15  *
16  * LOCKING:
17  *
18  * The subsystem keeps a global list of dma_device structs it is protected by a
19  * mutex, dma_list_mutex.
20  *
21  * A subsystem can get access to a channel by calling dmaengine_get() followed
22  * by dma_find_channel(), or if it has need for an exclusive channel it can call
23  * dma_request_channel().  Once a channel is allocated a reference is taken
24  * against its corresponding driver to disable removal.
25  *
26  * Each device has a channels list, which runs unlocked but is never modified
27  * once the device is registered, it's just setup by the driver.
28  *
29  * See Documentation/driver-api/dmaengine for more details
30  */
31 
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 
34 #include <linux/platform_device.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/init.h>
37 #include <linux/module.h>
38 #include <linux/mm.h>
39 #include <linux/device.h>
40 #include <linux/dmaengine.h>
41 #include <linux/hardirq.h>
42 #include <linux/spinlock.h>
43 #include <linux/of.h>
44 #include <linux/property.h>
45 #include <linux/percpu.h>
46 #include <linux/rcupdate.h>
47 #include <linux/mutex.h>
48 #include <linux/jiffies.h>
49 #include <linux/rculist.h>
50 #include <linux/idr.h>
51 #include <linux/slab.h>
52 #include <linux/acpi.h>
53 #include <linux/acpi_dma.h>
54 #include <linux/of_dma.h>
55 #include <linux/mempool.h>
56 #include <linux/numa.h>
57 
58 #include "dmaengine.h"
59 
60 static DEFINE_MUTEX(dma_list_mutex);
61 static DEFINE_IDA(dma_ida);
62 static LIST_HEAD(dma_device_list);
63 static long dmaengine_ref_count;
64 
65 /* --- debugfs implementation --- */
66 #ifdef CONFIG_DEBUG_FS
67 #include <linux/debugfs.h>
68 
69 static struct dentry *rootdir;
70 
dmaengine_debug_register(struct dma_device * dma_dev)71 static void dmaengine_debug_register(struct dma_device *dma_dev)
72 {
73 	dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev),
74 						   rootdir);
75 	if (IS_ERR(dma_dev->dbg_dev_root))
76 		dma_dev->dbg_dev_root = NULL;
77 }
78 
dmaengine_debug_unregister(struct dma_device * dma_dev)79 static void dmaengine_debug_unregister(struct dma_device *dma_dev)
80 {
81 	debugfs_remove_recursive(dma_dev->dbg_dev_root);
82 	dma_dev->dbg_dev_root = NULL;
83 }
84 
dmaengine_dbg_summary_show(struct seq_file * s,struct dma_device * dma_dev)85 static void dmaengine_dbg_summary_show(struct seq_file *s,
86 				       struct dma_device *dma_dev)
87 {
88 	struct dma_chan *chan;
89 
90 	list_for_each_entry(chan, &dma_dev->channels, device_node) {
91 		if (chan->client_count) {
92 			seq_printf(s, " %-13s| %s", dma_chan_name(chan),
93 				   chan->dbg_client_name ?: "in-use");
94 
95 			if (chan->router)
96 				seq_printf(s, " (via router: %s)\n",
97 					dev_name(chan->router->dev));
98 			else
99 				seq_puts(s, "\n");
100 		}
101 	}
102 }
103 
dmaengine_summary_show(struct seq_file * s,void * data)104 static int dmaengine_summary_show(struct seq_file *s, void *data)
105 {
106 	struct dma_device *dma_dev = NULL;
107 
108 	mutex_lock(&dma_list_mutex);
109 	list_for_each_entry(dma_dev, &dma_device_list, global_node) {
110 		seq_printf(s, "dma%d (%s): number of channels: %u\n",
111 			   dma_dev->dev_id, dev_name(dma_dev->dev),
112 			   dma_dev->chancnt);
113 
114 		if (dma_dev->dbg_summary_show)
115 			dma_dev->dbg_summary_show(s, dma_dev);
116 		else
117 			dmaengine_dbg_summary_show(s, dma_dev);
118 
119 		if (!list_is_last(&dma_dev->global_node, &dma_device_list))
120 			seq_puts(s, "\n");
121 	}
122 	mutex_unlock(&dma_list_mutex);
123 
124 	return 0;
125 }
126 DEFINE_SHOW_ATTRIBUTE(dmaengine_summary);
127 
dmaengine_debugfs_init(void)128 static void __init dmaengine_debugfs_init(void)
129 {
130 	rootdir = debugfs_create_dir("dmaengine", NULL);
131 
132 	/* /sys/kernel/debug/dmaengine/summary */
133 	debugfs_create_file("summary", 0444, rootdir, NULL,
134 			    &dmaengine_summary_fops);
135 }
136 #else
dmaengine_debugfs_init(void)137 static inline void dmaengine_debugfs_init(void) { }
dmaengine_debug_register(struct dma_device * dma_dev)138 static inline int dmaengine_debug_register(struct dma_device *dma_dev)
139 {
140 	return 0;
141 }
142 
dmaengine_debug_unregister(struct dma_device * dma_dev)143 static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
144 #endif	/* DEBUG_FS */
145 
146 /* --- sysfs implementation --- */
147 
148 #define DMA_SLAVE_NAME	"slave"
149 
150 /**
151  * dev_to_dma_chan - convert a device pointer to its sysfs container object
152  * @dev:	device node
153  *
154  * Must be called under dma_list_mutex.
155  */
dev_to_dma_chan(struct device * dev)156 static struct dma_chan *dev_to_dma_chan(struct device *dev)
157 {
158 	struct dma_chan_dev *chan_dev;
159 
160 	chan_dev = container_of(dev, typeof(*chan_dev), device);
161 	return chan_dev->chan;
162 }
163 
memcpy_count_show(struct device * dev,struct device_attribute * attr,char * buf)164 static ssize_t memcpy_count_show(struct device *dev,
165 				 struct device_attribute *attr, char *buf)
166 {
167 	struct dma_chan *chan;
168 	unsigned long count = 0;
169 	int i;
170 	int err;
171 
172 	mutex_lock(&dma_list_mutex);
173 	chan = dev_to_dma_chan(dev);
174 	if (chan) {
175 		for_each_possible_cpu(i)
176 			count += per_cpu_ptr(chan->local, i)->memcpy_count;
177 		err = sysfs_emit(buf, "%lu\n", count);
178 	} else
179 		err = -ENODEV;
180 	mutex_unlock(&dma_list_mutex);
181 
182 	return err;
183 }
184 static DEVICE_ATTR_RO(memcpy_count);
185 
bytes_transferred_show(struct device * dev,struct device_attribute * attr,char * buf)186 static ssize_t bytes_transferred_show(struct device *dev,
187 				      struct device_attribute *attr, char *buf)
188 {
189 	struct dma_chan *chan;
190 	unsigned long count = 0;
191 	int i;
192 	int err;
193 
194 	mutex_lock(&dma_list_mutex);
195 	chan = dev_to_dma_chan(dev);
196 	if (chan) {
197 		for_each_possible_cpu(i)
198 			count += per_cpu_ptr(chan->local, i)->bytes_transferred;
199 		err = sysfs_emit(buf, "%lu\n", count);
200 	} else
201 		err = -ENODEV;
202 	mutex_unlock(&dma_list_mutex);
203 
204 	return err;
205 }
206 static DEVICE_ATTR_RO(bytes_transferred);
207 
in_use_show(struct device * dev,struct device_attribute * attr,char * buf)208 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
209 			   char *buf)
210 {
211 	struct dma_chan *chan;
212 	int err;
213 
214 	mutex_lock(&dma_list_mutex);
215 	chan = dev_to_dma_chan(dev);
216 	if (chan)
217 		err = sysfs_emit(buf, "%d\n", chan->client_count);
218 	else
219 		err = -ENODEV;
220 	mutex_unlock(&dma_list_mutex);
221 
222 	return err;
223 }
224 static DEVICE_ATTR_RO(in_use);
225 
226 static struct attribute *dma_dev_attrs[] = {
227 	&dev_attr_memcpy_count.attr,
228 	&dev_attr_bytes_transferred.attr,
229 	&dev_attr_in_use.attr,
230 	NULL,
231 };
232 ATTRIBUTE_GROUPS(dma_dev);
233 
chan_dev_release(struct device * dev)234 static void chan_dev_release(struct device *dev)
235 {
236 	struct dma_chan_dev *chan_dev;
237 
238 	chan_dev = container_of(dev, typeof(*chan_dev), device);
239 	kfree(chan_dev);
240 }
241 
242 static struct class dma_devclass = {
243 	.name		= "dma",
244 	.dev_groups	= dma_dev_groups,
245 	.dev_release	= chan_dev_release,
246 };
247 
248 /* --- client and device registration --- */
249 
250 /* enable iteration over all operation types */
251 static dma_cap_mask_t dma_cap_mask_all;
252 
253 /**
254  * struct dma_chan_tbl_ent - tracks channel allocations per core/operation
255  * @chan:	associated channel for this entry
256  */
257 struct dma_chan_tbl_ent {
258 	struct dma_chan *chan;
259 };
260 
261 /* percpu lookup table for memory-to-memory offload providers */
262 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
263 
dma_channel_table_init(void)264 static int __init dma_channel_table_init(void)
265 {
266 	enum dma_transaction_type cap;
267 	int err = 0;
268 
269 	bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
270 
271 	/* 'interrupt', 'private', and 'slave' are channel capabilities,
272 	 * but are not associated with an operation so they do not need
273 	 * an entry in the channel_table
274 	 */
275 	clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
276 	clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
277 	clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
278 
279 	for_each_dma_cap_mask(cap, dma_cap_mask_all) {
280 		channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
281 		if (!channel_table[cap]) {
282 			err = -ENOMEM;
283 			break;
284 		}
285 	}
286 
287 	if (err) {
288 		pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
289 		for_each_dma_cap_mask(cap, dma_cap_mask_all)
290 			free_percpu(channel_table[cap]);
291 	}
292 
293 	return err;
294 }
295 arch_initcall(dma_channel_table_init);
296 
297 /**
298  * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU
299  * @chan:	DMA channel to test
300  * @cpu:	CPU index which the channel should be close to
301  *
302  * Returns true if the channel is in the same NUMA-node as the CPU.
303  */
dma_chan_is_local(struct dma_chan * chan,int cpu)304 static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
305 {
306 	int node = dev_to_node(chan->device->dev);
307 	return node == NUMA_NO_NODE ||
308 		cpumask_test_cpu(cpu, cpumask_of_node(node));
309 }
310 
311 /**
312  * min_chan - finds the channel with min count and in the same NUMA-node as the CPU
313  * @cap:	capability to match
314  * @cpu:	CPU index which the channel should be close to
315  *
316  * If some channels are close to the given CPU, the one with the lowest
317  * reference count is returned. Otherwise, CPU is ignored and only the
318  * reference count is taken into account.
319  *
320  * Must be called under dma_list_mutex.
321  */
min_chan(enum dma_transaction_type cap,int cpu)322 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
323 {
324 	struct dma_device *device;
325 	struct dma_chan *chan;
326 	struct dma_chan *min = NULL;
327 	struct dma_chan *localmin = NULL;
328 
329 	list_for_each_entry(device, &dma_device_list, global_node) {
330 		if (!dma_has_cap(cap, device->cap_mask) ||
331 		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
332 			continue;
333 		list_for_each_entry(chan, &device->channels, device_node) {
334 			if (!chan->client_count)
335 				continue;
336 			if (!min || chan->table_count < min->table_count)
337 				min = chan;
338 
339 			if (dma_chan_is_local(chan, cpu))
340 				if (!localmin ||
341 				    chan->table_count < localmin->table_count)
342 					localmin = chan;
343 		}
344 	}
345 
346 	chan = localmin ? localmin : min;
347 
348 	if (chan)
349 		chan->table_count++;
350 
351 	return chan;
352 }
353 
354 /**
355  * dma_channel_rebalance - redistribute the available channels
356  *
357  * Optimize for CPU isolation (each CPU gets a dedicated channel for an
358  * operation type) in the SMP case, and operation isolation (avoid
359  * multi-tasking channels) in the non-SMP case.
360  *
361  * Must be called under dma_list_mutex.
362  */
dma_channel_rebalance(void)363 static void dma_channel_rebalance(void)
364 {
365 	struct dma_chan *chan;
366 	struct dma_device *device;
367 	int cpu;
368 	int cap;
369 
370 	/* undo the last distribution */
371 	for_each_dma_cap_mask(cap, dma_cap_mask_all)
372 		for_each_possible_cpu(cpu)
373 			per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
374 
375 	list_for_each_entry(device, &dma_device_list, global_node) {
376 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
377 			continue;
378 		list_for_each_entry(chan, &device->channels, device_node)
379 			chan->table_count = 0;
380 	}
381 
382 	/* don't populate the channel_table if no clients are available */
383 	if (!dmaengine_ref_count)
384 		return;
385 
386 	/* redistribute available channels */
387 	for_each_dma_cap_mask(cap, dma_cap_mask_all)
388 		for_each_online_cpu(cpu) {
389 			chan = min_chan(cap, cpu);
390 			per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
391 		}
392 }
393 
dma_device_satisfies_mask(struct dma_device * device,const dma_cap_mask_t * want)394 static int dma_device_satisfies_mask(struct dma_device *device,
395 				     const dma_cap_mask_t *want)
396 {
397 	dma_cap_mask_t has;
398 
399 	bitmap_and(has.bits, want->bits, device->cap_mask.bits,
400 		DMA_TX_TYPE_END);
401 	return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
402 }
403 
dma_chan_to_owner(struct dma_chan * chan)404 static struct module *dma_chan_to_owner(struct dma_chan *chan)
405 {
406 	return chan->device->owner;
407 }
408 
409 /**
410  * balance_ref_count - catch up the channel reference count
411  * @chan:	channel to balance ->client_count versus dmaengine_ref_count
412  *
413  * Must be called under dma_list_mutex.
414  */
balance_ref_count(struct dma_chan * chan)415 static void balance_ref_count(struct dma_chan *chan)
416 {
417 	struct module *owner = dma_chan_to_owner(chan);
418 
419 	while (chan->client_count < dmaengine_ref_count) {
420 		__module_get(owner);
421 		chan->client_count++;
422 	}
423 }
424 
dma_device_release(struct kref * ref)425 static void dma_device_release(struct kref *ref)
426 {
427 	struct dma_device *device = container_of(ref, struct dma_device, ref);
428 
429 	list_del_rcu(&device->global_node);
430 	dma_channel_rebalance();
431 
432 	if (device->device_release)
433 		device->device_release(device);
434 }
435 
dma_device_put(struct dma_device * device)436 static void dma_device_put(struct dma_device *device)
437 {
438 	lockdep_assert_held(&dma_list_mutex);
439 	kref_put(&device->ref, dma_device_release);
440 }
441 
442 /**
443  * dma_chan_get - try to grab a DMA channel's parent driver module
444  * @chan:	channel to grab
445  *
446  * Must be called under dma_list_mutex.
447  */
dma_chan_get(struct dma_chan * chan)448 static int dma_chan_get(struct dma_chan *chan)
449 {
450 	struct module *owner = dma_chan_to_owner(chan);
451 	int ret;
452 
453 	/* The channel is already in use, update client count */
454 	if (chan->client_count) {
455 		__module_get(owner);
456 		chan->client_count++;
457 		return 0;
458 	}
459 
460 	if (!try_module_get(owner))
461 		return -ENODEV;
462 
463 	ret = kref_get_unless_zero(&chan->device->ref);
464 	if (!ret) {
465 		ret = -ENODEV;
466 		goto module_put_out;
467 	}
468 
469 	/* allocate upon first client reference */
470 	if (chan->device->device_alloc_chan_resources) {
471 		ret = chan->device->device_alloc_chan_resources(chan);
472 		if (ret < 0)
473 			goto err_out;
474 	}
475 
476 	chan->client_count++;
477 
478 	if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
479 		balance_ref_count(chan);
480 
481 	return 0;
482 
483 err_out:
484 	dma_device_put(chan->device);
485 module_put_out:
486 	module_put(owner);
487 	return ret;
488 }
489 
490 /**
491  * dma_chan_put - drop a reference to a DMA channel's parent driver module
492  * @chan:	channel to release
493  *
494  * Must be called under dma_list_mutex.
495  */
dma_chan_put(struct dma_chan * chan)496 static void dma_chan_put(struct dma_chan *chan)
497 {
498 	/* This channel is not in use, bail out */
499 	if (!chan->client_count)
500 		return;
501 
502 	chan->client_count--;
503 
504 	/* This channel is not in use anymore, free it */
505 	if (!chan->client_count && chan->device->device_free_chan_resources) {
506 		/* Make sure all operations have completed */
507 		dmaengine_synchronize(chan);
508 		chan->device->device_free_chan_resources(chan);
509 	}
510 
511 	/* If the channel is used via a DMA request router, free the mapping */
512 	if (chan->router && chan->router->route_free) {
513 		chan->router->route_free(chan->router->dev, chan->route_data);
514 		chan->router = NULL;
515 		chan->route_data = NULL;
516 	}
517 
518 	dma_device_put(chan->device);
519 	module_put(dma_chan_to_owner(chan));
520 }
521 
dma_sync_wait(struct dma_chan * chan,dma_cookie_t cookie)522 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
523 {
524 	enum dma_status status;
525 	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
526 
527 	dma_async_issue_pending(chan);
528 	do {
529 		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
530 		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
531 			dev_err(chan->device->dev, "%s: timeout!\n", __func__);
532 			return DMA_ERROR;
533 		}
534 		if (status != DMA_IN_PROGRESS)
535 			break;
536 		cpu_relax();
537 	} while (1);
538 
539 	return status;
540 }
541 EXPORT_SYMBOL(dma_sync_wait);
542 
543 /**
544  * dma_find_channel - find a channel to carry out the operation
545  * @tx_type:	transaction type
546  */
dma_find_channel(enum dma_transaction_type tx_type)547 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
548 {
549 	return this_cpu_read(channel_table[tx_type]->chan);
550 }
551 EXPORT_SYMBOL(dma_find_channel);
552 
553 /**
554  * dma_issue_pending_all - flush all pending operations across all channels
555  */
dma_issue_pending_all(void)556 void dma_issue_pending_all(void)
557 {
558 	struct dma_device *device;
559 	struct dma_chan *chan;
560 
561 	rcu_read_lock();
562 	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
563 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
564 			continue;
565 		list_for_each_entry(chan, &device->channels, device_node)
566 			if (chan->client_count)
567 				device->device_issue_pending(chan);
568 	}
569 	rcu_read_unlock();
570 }
571 EXPORT_SYMBOL(dma_issue_pending_all);
572 
dma_get_slave_caps(struct dma_chan * chan,struct dma_slave_caps * caps)573 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
574 {
575 	struct dma_device *device;
576 
577 	if (!chan || !caps)
578 		return -EINVAL;
579 
580 	device = chan->device;
581 
582 	/* check if the channel supports slave transactions */
583 	if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
584 	      test_bit(DMA_CYCLIC, device->cap_mask.bits)))
585 		return -ENXIO;
586 
587 	/*
588 	 * Check whether it reports it uses the generic slave
589 	 * capabilities, if not, that means it doesn't support any
590 	 * kind of slave capabilities reporting.
591 	 */
592 	if (!device->directions)
593 		return -ENXIO;
594 
595 	caps->src_addr_widths = device->src_addr_widths;
596 	caps->dst_addr_widths = device->dst_addr_widths;
597 	caps->directions = device->directions;
598 	caps->min_burst = device->min_burst;
599 	caps->max_burst = device->max_burst;
600 	caps->max_sg_burst = device->max_sg_burst;
601 	caps->residue_granularity = device->residue_granularity;
602 	caps->descriptor_reuse = device->descriptor_reuse;
603 	caps->cmd_pause = !!device->device_pause;
604 	caps->cmd_resume = !!device->device_resume;
605 	caps->cmd_terminate = !!device->device_terminate_all;
606 
607 	/*
608 	 * DMA engine device might be configured with non-uniformly
609 	 * distributed slave capabilities per device channels. In this
610 	 * case the corresponding driver may provide the device_caps
611 	 * callback to override the generic capabilities with
612 	 * channel-specific ones.
613 	 */
614 	if (device->device_caps)
615 		device->device_caps(chan, caps);
616 
617 	return 0;
618 }
619 EXPORT_SYMBOL_GPL(dma_get_slave_caps);
620 
private_candidate(const dma_cap_mask_t * mask,struct dma_device * dev,dma_filter_fn fn,void * fn_param)621 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
622 					  struct dma_device *dev,
623 					  dma_filter_fn fn, void *fn_param)
624 {
625 	struct dma_chan *chan;
626 
627 	if (mask && !dma_device_satisfies_mask(dev, mask)) {
628 		dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
629 		return NULL;
630 	}
631 	/* devices with multiple channels need special handling as we need to
632 	 * ensure that all channels are either private or public.
633 	 */
634 	if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
635 		list_for_each_entry(chan, &dev->channels, device_node) {
636 			/* some channels are already publicly allocated */
637 			if (chan->client_count)
638 				return NULL;
639 		}
640 
641 	list_for_each_entry(chan, &dev->channels, device_node) {
642 		if (chan->client_count) {
643 			dev_dbg(dev->dev, "%s: %s busy\n",
644 				 __func__, dma_chan_name(chan));
645 			continue;
646 		}
647 		if (fn && !fn(chan, fn_param)) {
648 			dev_dbg(dev->dev, "%s: %s filter said false\n",
649 				 __func__, dma_chan_name(chan));
650 			continue;
651 		}
652 		return chan;
653 	}
654 
655 	return NULL;
656 }
657 
find_candidate(struct dma_device * device,const dma_cap_mask_t * mask,dma_filter_fn fn,void * fn_param)658 static struct dma_chan *find_candidate(struct dma_device *device,
659 				       const dma_cap_mask_t *mask,
660 				       dma_filter_fn fn, void *fn_param)
661 {
662 	struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
663 	int err;
664 
665 	if (chan) {
666 		/* Found a suitable channel, try to grab, prep, and return it.
667 		 * We first set DMA_PRIVATE to disable balance_ref_count as this
668 		 * channel will not be published in the general-purpose
669 		 * allocator
670 		 */
671 		dma_cap_set(DMA_PRIVATE, device->cap_mask);
672 		device->privatecnt++;
673 		err = dma_chan_get(chan);
674 
675 		if (err) {
676 			if (err == -ENODEV) {
677 				dev_dbg(device->dev, "%s: %s module removed\n",
678 					__func__, dma_chan_name(chan));
679 				list_del_rcu(&device->global_node);
680 			} else
681 				dev_dbg(device->dev,
682 					"%s: failed to get %s: (%d)\n",
683 					 __func__, dma_chan_name(chan), err);
684 
685 			if (--device->privatecnt == 0)
686 				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
687 
688 			chan = ERR_PTR(err);
689 		}
690 	}
691 
692 	return chan ? chan : ERR_PTR(-EPROBE_DEFER);
693 }
694 
695 /**
696  * dma_get_slave_channel - try to get specific channel exclusively
697  * @chan:	target channel
698  */
dma_get_slave_channel(struct dma_chan * chan)699 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
700 {
701 	/* lock against __dma_request_channel */
702 	mutex_lock(&dma_list_mutex);
703 
704 	if (chan->client_count == 0) {
705 		struct dma_device *device = chan->device;
706 		int err;
707 
708 		dma_cap_set(DMA_PRIVATE, device->cap_mask);
709 		device->privatecnt++;
710 		err = dma_chan_get(chan);
711 		if (err) {
712 			dev_dbg(chan->device->dev,
713 				"%s: failed to get %s: (%d)\n",
714 				__func__, dma_chan_name(chan), err);
715 			chan = NULL;
716 			if (--device->privatecnt == 0)
717 				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
718 		}
719 	} else
720 		chan = NULL;
721 
722 	mutex_unlock(&dma_list_mutex);
723 
724 
725 	return chan;
726 }
727 EXPORT_SYMBOL_GPL(dma_get_slave_channel);
728 
dma_get_any_slave_channel(struct dma_device * device)729 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
730 {
731 	dma_cap_mask_t mask;
732 	struct dma_chan *chan;
733 
734 	dma_cap_zero(mask);
735 	dma_cap_set(DMA_SLAVE, mask);
736 
737 	/* lock against __dma_request_channel */
738 	mutex_lock(&dma_list_mutex);
739 
740 	chan = find_candidate(device, &mask, NULL, NULL);
741 
742 	mutex_unlock(&dma_list_mutex);
743 
744 	return IS_ERR(chan) ? NULL : chan;
745 }
746 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
747 
748 /**
749  * __dma_request_channel - try to allocate an exclusive channel
750  * @mask:	capabilities that the channel must satisfy
751  * @fn:		optional callback to disposition available channels
752  * @fn_param:	opaque parameter to pass to dma_filter_fn()
753  * @np:		device node to look for DMA channels
754  *
755  * Returns pointer to appropriate DMA channel on success or NULL.
756  */
__dma_request_channel(const dma_cap_mask_t * mask,dma_filter_fn fn,void * fn_param,struct device_node * np)757 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
758 				       dma_filter_fn fn, void *fn_param,
759 				       struct device_node *np)
760 {
761 	struct dma_device *device, *_d;
762 	struct dma_chan *chan = NULL;
763 
764 	/* Find a channel */
765 	mutex_lock(&dma_list_mutex);
766 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
767 		/* Finds a DMA controller with matching device node */
768 		if (np && device->dev->of_node && np != device->dev->of_node)
769 			continue;
770 
771 		chan = find_candidate(device, mask, fn, fn_param);
772 		if (!IS_ERR(chan))
773 			break;
774 
775 		chan = NULL;
776 	}
777 	mutex_unlock(&dma_list_mutex);
778 
779 	pr_debug("%s: %s (%s)\n",
780 		 __func__,
781 		 chan ? "success" : "fail",
782 		 chan ? dma_chan_name(chan) : NULL);
783 
784 	return chan;
785 }
786 EXPORT_SYMBOL_GPL(__dma_request_channel);
787 
dma_filter_match(struct dma_device * device,const char * name,struct device * dev)788 static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
789 						    const char *name,
790 						    struct device *dev)
791 {
792 	int i;
793 
794 	if (!device->filter.mapcnt)
795 		return NULL;
796 
797 	for (i = 0; i < device->filter.mapcnt; i++) {
798 		const struct dma_slave_map *map = &device->filter.map[i];
799 
800 		if (!strcmp(map->devname, dev_name(dev)) &&
801 		    !strcmp(map->slave, name))
802 			return map;
803 	}
804 
805 	return NULL;
806 }
807 
808 /**
809  * dma_request_chan - try to allocate an exclusive slave channel
810  * @dev:	pointer to client device structure
811  * @name:	slave channel name
812  *
813  * Returns pointer to appropriate DMA channel on success or an error pointer.
814  */
dma_request_chan(struct device * dev,const char * name)815 struct dma_chan *dma_request_chan(struct device *dev, const char *name)
816 {
817 	struct fwnode_handle *fwnode = dev_fwnode(dev);
818 	struct dma_device *d, *_d;
819 	struct dma_chan *chan = NULL;
820 
821 	if (is_of_node(fwnode))
822 		chan = of_dma_request_slave_channel(to_of_node(fwnode), name);
823 	else if (is_acpi_device_node(fwnode))
824 		chan = acpi_dma_request_slave_chan_by_name(dev, name);
825 
826 	if (PTR_ERR(chan) == -EPROBE_DEFER)
827 		return chan;
828 
829 	if (!IS_ERR_OR_NULL(chan))
830 		goto found;
831 
832 	/* Try to find the channel via the DMA filter map(s) */
833 	mutex_lock(&dma_list_mutex);
834 	list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
835 		dma_cap_mask_t mask;
836 		const struct dma_slave_map *map = dma_filter_match(d, name, dev);
837 
838 		if (!map)
839 			continue;
840 
841 		dma_cap_zero(mask);
842 		dma_cap_set(DMA_SLAVE, mask);
843 
844 		chan = find_candidate(d, &mask, d->filter.fn, map->param);
845 		if (!IS_ERR(chan))
846 			break;
847 	}
848 	mutex_unlock(&dma_list_mutex);
849 
850 	if (IS_ERR(chan))
851 		return chan;
852 	if (!chan)
853 		return ERR_PTR(-EPROBE_DEFER);
854 
855 found:
856 #ifdef CONFIG_DEBUG_FS
857 	chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev), name);
858 	/* No functional issue if it fails, users are supposed to test before use */
859 #endif
860 
861 	chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
862 	if (!chan->name)
863 		return chan;
864 	chan->slave = dev;
865 
866 	if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
867 			      DMA_SLAVE_NAME))
868 		dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
869 	if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
870 		dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name);
871 
872 	return chan;
873 }
874 EXPORT_SYMBOL_GPL(dma_request_chan);
875 
876 /**
877  * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
878  * @mask:	capabilities that the channel must satisfy
879  *
880  * Returns pointer to appropriate DMA channel on success or an error pointer.
881  */
dma_request_chan_by_mask(const dma_cap_mask_t * mask)882 struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
883 {
884 	struct dma_chan *chan;
885 
886 	if (!mask)
887 		return ERR_PTR(-ENODEV);
888 
889 	chan = __dma_request_channel(mask, NULL, NULL, NULL);
890 	if (!chan) {
891 		mutex_lock(&dma_list_mutex);
892 		if (list_empty(&dma_device_list))
893 			chan = ERR_PTR(-EPROBE_DEFER);
894 		else
895 			chan = ERR_PTR(-ENODEV);
896 		mutex_unlock(&dma_list_mutex);
897 	}
898 
899 	return chan;
900 }
901 EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
902 
dma_release_channel(struct dma_chan * chan)903 void dma_release_channel(struct dma_chan *chan)
904 {
905 	mutex_lock(&dma_list_mutex);
906 	WARN_ONCE(chan->client_count != 1,
907 		  "chan reference count %d != 1\n", chan->client_count);
908 	dma_chan_put(chan);
909 	/* drop PRIVATE cap enabled by __dma_request_channel() */
910 	if (--chan->device->privatecnt == 0)
911 		dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
912 
913 	if (chan->slave) {
914 		sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
915 		sysfs_remove_link(&chan->slave->kobj, chan->name);
916 		kfree(chan->name);
917 		chan->name = NULL;
918 		chan->slave = NULL;
919 	}
920 
921 #ifdef CONFIG_DEBUG_FS
922 	kfree(chan->dbg_client_name);
923 	chan->dbg_client_name = NULL;
924 #endif
925 	mutex_unlock(&dma_list_mutex);
926 }
927 EXPORT_SYMBOL_GPL(dma_release_channel);
928 
dmaenginem_release_channel(void * chan)929 static void dmaenginem_release_channel(void *chan)
930 {
931 	dma_release_channel(chan);
932 }
933 
934 /**
935  * devm_dma_request_chan - try to allocate an exclusive slave channel
936  * @dev:	pointer to client device structure
937  * @name:	slave channel name
938  *
939  * Returns pointer to appropriate DMA channel on success or an error pointer.
940  *
941  * The operation is managed and will be undone on driver detach.
942  */
943 
devm_dma_request_chan(struct device * dev,const char * name)944 struct dma_chan *devm_dma_request_chan(struct device *dev, const char *name)
945 {
946 	struct dma_chan *chan = dma_request_chan(dev, name);
947 	int ret = 0;
948 
949 	if (!IS_ERR(chan))
950 		ret = devm_add_action_or_reset(dev, dmaenginem_release_channel, chan);
951 
952 	if (ret)
953 		return ERR_PTR(ret);
954 
955 	return chan;
956 }
957 EXPORT_SYMBOL_GPL(devm_dma_request_chan);
958 
959 /**
960  * dmaengine_get - register interest in dma_channels
961  */
dmaengine_get(void)962 void dmaengine_get(void)
963 {
964 	struct dma_device *device, *_d;
965 	struct dma_chan *chan;
966 	int err;
967 
968 	mutex_lock(&dma_list_mutex);
969 	dmaengine_ref_count++;
970 
971 	/* try to grab channels */
972 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
973 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
974 			continue;
975 		list_for_each_entry(chan, &device->channels, device_node) {
976 			err = dma_chan_get(chan);
977 			if (err == -ENODEV) {
978 				/* module removed before we could use it */
979 				list_del_rcu(&device->global_node);
980 				break;
981 			} else if (err)
982 				dev_dbg(chan->device->dev,
983 					"%s: failed to get %s: (%d)\n",
984 					__func__, dma_chan_name(chan), err);
985 		}
986 	}
987 
988 	/* if this is the first reference and there were channels
989 	 * waiting we need to rebalance to get those channels
990 	 * incorporated into the channel table
991 	 */
992 	if (dmaengine_ref_count == 1)
993 		dma_channel_rebalance();
994 	mutex_unlock(&dma_list_mutex);
995 }
996 EXPORT_SYMBOL(dmaengine_get);
997 
998 /**
999  * dmaengine_put - let DMA drivers be removed when ref_count == 0
1000  */
dmaengine_put(void)1001 void dmaengine_put(void)
1002 {
1003 	struct dma_device *device, *_d;
1004 	struct dma_chan *chan;
1005 
1006 	mutex_lock(&dma_list_mutex);
1007 	dmaengine_ref_count--;
1008 	BUG_ON(dmaengine_ref_count < 0);
1009 	/* drop channel references */
1010 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
1011 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1012 			continue;
1013 		list_for_each_entry(chan, &device->channels, device_node)
1014 			dma_chan_put(chan);
1015 	}
1016 	mutex_unlock(&dma_list_mutex);
1017 }
1018 EXPORT_SYMBOL(dmaengine_put);
1019 
device_has_all_tx_types(struct dma_device * device)1020 static bool device_has_all_tx_types(struct dma_device *device)
1021 {
1022 	/* A device that satisfies this test has channels that will never cause
1023 	 * an async_tx channel switch event as all possible operation types can
1024 	 * be handled.
1025 	 */
1026 	#ifdef CONFIG_ASYNC_TX_DMA
1027 	if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
1028 		return false;
1029 	#endif
1030 
1031 	#if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
1032 	if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
1033 		return false;
1034 	#endif
1035 
1036 	#if IS_ENABLED(CONFIG_ASYNC_XOR)
1037 	if (!dma_has_cap(DMA_XOR, device->cap_mask))
1038 		return false;
1039 
1040 	#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
1041 	if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
1042 		return false;
1043 	#endif
1044 	#endif
1045 
1046 	#if IS_ENABLED(CONFIG_ASYNC_PQ)
1047 	if (!dma_has_cap(DMA_PQ, device->cap_mask))
1048 		return false;
1049 
1050 	#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
1051 	if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
1052 		return false;
1053 	#endif
1054 	#endif
1055 
1056 	return true;
1057 }
1058 
get_dma_id(struct dma_device * device)1059 static int get_dma_id(struct dma_device *device)
1060 {
1061 	int rc = ida_alloc(&dma_ida, GFP_KERNEL);
1062 
1063 	if (rc < 0)
1064 		return rc;
1065 	device->dev_id = rc;
1066 	return 0;
1067 }
1068 
__dma_async_device_channel_register(struct dma_device * device,struct dma_chan * chan,const char * name)1069 static int __dma_async_device_channel_register(struct dma_device *device,
1070 					       struct dma_chan *chan,
1071 					       const char *name)
1072 {
1073 	int rc;
1074 
1075 	chan->local = alloc_percpu(typeof(*chan->local));
1076 	if (!chan->local)
1077 		return -ENOMEM;
1078 	chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
1079 	if (!chan->dev) {
1080 		rc = -ENOMEM;
1081 		goto err_free_local;
1082 	}
1083 
1084 	/*
1085 	 * When the chan_id is a negative value, we are dynamically adding
1086 	 * the channel. Otherwise we are static enumerating.
1087 	 */
1088 	chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
1089 	if (chan->chan_id < 0) {
1090 		pr_err("%s: unable to alloc ida for chan: %d\n",
1091 		       __func__, chan->chan_id);
1092 		rc = chan->chan_id;
1093 		goto err_free_dev;
1094 	}
1095 
1096 	chan->dev->device.class = &dma_devclass;
1097 	chan->dev->device.parent = device->dev;
1098 	chan->dev->chan = chan;
1099 	chan->dev->dev_id = device->dev_id;
1100 	if (!name)
1101 		dev_set_name(&chan->dev->device, "dma%dchan%d", device->dev_id, chan->chan_id);
1102 	else
1103 		dev_set_name(&chan->dev->device, "%s", name);
1104 	rc = device_register(&chan->dev->device);
1105 	if (rc)
1106 		goto err_out_ida;
1107 	chan->client_count = 0;
1108 	device->chancnt++;
1109 
1110 	return 0;
1111 
1112  err_out_ida:
1113 	ida_free(&device->chan_ida, chan->chan_id);
1114  err_free_dev:
1115 	kfree(chan->dev);
1116  err_free_local:
1117 	free_percpu(chan->local);
1118 	chan->local = NULL;
1119 	return rc;
1120 }
1121 
dma_async_device_channel_register(struct dma_device * device,struct dma_chan * chan,const char * name)1122 int dma_async_device_channel_register(struct dma_device *device,
1123 				      struct dma_chan *chan,
1124 				      const char *name)
1125 {
1126 	int rc;
1127 
1128 	rc = __dma_async_device_channel_register(device, chan, name);
1129 	if (rc < 0)
1130 		return rc;
1131 
1132 	dma_channel_rebalance();
1133 	return 0;
1134 }
1135 EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
1136 
__dma_async_device_channel_unregister(struct dma_device * device,struct dma_chan * chan)1137 static void __dma_async_device_channel_unregister(struct dma_device *device,
1138 						  struct dma_chan *chan)
1139 {
1140 	if (chan->local == NULL)
1141 		return;
1142 
1143 	WARN_ONCE(!device->device_release && chan->client_count,
1144 		  "%s called while %d clients hold a reference\n",
1145 		  __func__, chan->client_count);
1146 	mutex_lock(&dma_list_mutex);
1147 	device->chancnt--;
1148 	chan->dev->chan = NULL;
1149 	mutex_unlock(&dma_list_mutex);
1150 	ida_free(&device->chan_ida, chan->chan_id);
1151 	device_unregister(&chan->dev->device);
1152 	free_percpu(chan->local);
1153 }
1154 
dma_async_device_channel_unregister(struct dma_device * device,struct dma_chan * chan)1155 void dma_async_device_channel_unregister(struct dma_device *device,
1156 					 struct dma_chan *chan)
1157 {
1158 	__dma_async_device_channel_unregister(device, chan);
1159 	dma_channel_rebalance();
1160 }
1161 EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
1162 
1163 /**
1164  * dma_async_device_register - registers DMA devices found
1165  * @device:	pointer to &struct dma_device
1166  *
1167  * After calling this routine the structure should not be freed except in the
1168  * device_release() callback which will be called after
1169  * dma_async_device_unregister() is called and no further references are taken.
1170  */
dma_async_device_register(struct dma_device * device)1171 int dma_async_device_register(struct dma_device *device)
1172 {
1173 	int rc;
1174 	struct dma_chan* chan;
1175 
1176 	if (!device)
1177 		return -ENODEV;
1178 
1179 	/* validate device routines */
1180 	if (!device->dev) {
1181 		pr_err("DMAdevice must have dev\n");
1182 		return -EIO;
1183 	}
1184 
1185 	device->owner = device->dev->driver->owner;
1186 
1187 #define CHECK_CAP(_name, _type)								\
1188 {											\
1189 	if (dma_has_cap(_type, device->cap_mask) && !device->device_prep_##_name) {	\
1190 		dev_err(device->dev,							\
1191 			"Device claims capability %s, but op is not defined\n",		\
1192 			__stringify(_type));						\
1193 		return -EIO;								\
1194 	}										\
1195 }
1196 
1197 	CHECK_CAP(dma_memcpy,      DMA_MEMCPY);
1198 	CHECK_CAP(dma_xor,         DMA_XOR);
1199 	CHECK_CAP(dma_xor_val,     DMA_XOR_VAL);
1200 	CHECK_CAP(dma_pq,          DMA_PQ);
1201 	CHECK_CAP(dma_pq_val,      DMA_PQ_VAL);
1202 	CHECK_CAP(dma_memset,      DMA_MEMSET);
1203 	CHECK_CAP(dma_interrupt,   DMA_INTERRUPT);
1204 	CHECK_CAP(dma_cyclic,      DMA_CYCLIC);
1205 	CHECK_CAP(interleaved_dma, DMA_INTERLEAVE);
1206 
1207 #undef CHECK_CAP
1208 
1209 	if (!device->device_tx_status) {
1210 		dev_err(device->dev, "Device tx_status is not defined\n");
1211 		return -EIO;
1212 	}
1213 
1214 
1215 	if (!device->device_issue_pending) {
1216 		dev_err(device->dev, "Device issue_pending is not defined\n");
1217 		return -EIO;
1218 	}
1219 
1220 	if (!device->device_release)
1221 		dev_dbg(device->dev,
1222 			 "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
1223 
1224 	kref_init(&device->ref);
1225 
1226 	/* note: this only matters in the
1227 	 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
1228 	 */
1229 	if (device_has_all_tx_types(device))
1230 		dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
1231 
1232 	rc = get_dma_id(device);
1233 	if (rc != 0)
1234 		return rc;
1235 
1236 	ida_init(&device->chan_ida);
1237 
1238 	/* represent channels in sysfs. Probably want devs too */
1239 	list_for_each_entry(chan, &device->channels, device_node) {
1240 		rc = __dma_async_device_channel_register(device, chan, NULL);
1241 		if (rc < 0)
1242 			goto err_out;
1243 	}
1244 
1245 	mutex_lock(&dma_list_mutex);
1246 	/* take references on public channels */
1247 	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
1248 		list_for_each_entry(chan, &device->channels, device_node) {
1249 			/* if clients are already waiting for channels we need
1250 			 * to take references on their behalf
1251 			 */
1252 			if (dma_chan_get(chan) == -ENODEV) {
1253 				/* note we can only get here for the first
1254 				 * channel as the remaining channels are
1255 				 * guaranteed to get a reference
1256 				 */
1257 				rc = -ENODEV;
1258 				mutex_unlock(&dma_list_mutex);
1259 				goto err_out;
1260 			}
1261 		}
1262 	list_add_tail_rcu(&device->global_node, &dma_device_list);
1263 	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1264 		device->privatecnt++;	/* Always private */
1265 	dma_channel_rebalance();
1266 	mutex_unlock(&dma_list_mutex);
1267 
1268 	dmaengine_debug_register(device);
1269 
1270 	return 0;
1271 
1272 err_out:
1273 	/* if we never registered a channel just release the idr */
1274 	if (!device->chancnt) {
1275 		ida_free(&dma_ida, device->dev_id);
1276 		return rc;
1277 	}
1278 
1279 	list_for_each_entry(chan, &device->channels, device_node) {
1280 		if (chan->local == NULL)
1281 			continue;
1282 		mutex_lock(&dma_list_mutex);
1283 		chan->dev->chan = NULL;
1284 		mutex_unlock(&dma_list_mutex);
1285 		device_unregister(&chan->dev->device);
1286 		free_percpu(chan->local);
1287 	}
1288 	return rc;
1289 }
1290 EXPORT_SYMBOL(dma_async_device_register);
1291 
1292 /**
1293  * dma_async_device_unregister - unregister a DMA device
1294  * @device:	pointer to &struct dma_device
1295  *
1296  * This routine is called by dma driver exit routines, dmaengine holds module
1297  * references to prevent it being called while channels are in use.
1298  */
dma_async_device_unregister(struct dma_device * device)1299 void dma_async_device_unregister(struct dma_device *device)
1300 {
1301 	struct dma_chan *chan, *n;
1302 
1303 	dmaengine_debug_unregister(device);
1304 
1305 	list_for_each_entry_safe(chan, n, &device->channels, device_node)
1306 		__dma_async_device_channel_unregister(device, chan);
1307 
1308 	mutex_lock(&dma_list_mutex);
1309 	/*
1310 	 * setting DMA_PRIVATE ensures the device being torn down will not
1311 	 * be used in the channel_table
1312 	 */
1313 	dma_cap_set(DMA_PRIVATE, device->cap_mask);
1314 	dma_channel_rebalance();
1315 	ida_free(&dma_ida, device->dev_id);
1316 	dma_device_put(device);
1317 	mutex_unlock(&dma_list_mutex);
1318 }
1319 EXPORT_SYMBOL(dma_async_device_unregister);
1320 
dmaenginem_async_device_unregister(void * device)1321 static void dmaenginem_async_device_unregister(void *device)
1322 {
1323 	dma_async_device_unregister(device);
1324 }
1325 
1326 /**
1327  * dmaenginem_async_device_register - registers DMA devices found
1328  * @device:	pointer to &struct dma_device
1329  *
1330  * The operation is managed and will be undone on driver detach.
1331  */
dmaenginem_async_device_register(struct dma_device * device)1332 int dmaenginem_async_device_register(struct dma_device *device)
1333 {
1334 	int ret;
1335 
1336 	ret = dma_async_device_register(device);
1337 	if (ret)
1338 		return ret;
1339 
1340 	return devm_add_action_or_reset(device->dev, dmaenginem_async_device_unregister, device);
1341 }
1342 EXPORT_SYMBOL(dmaenginem_async_device_register);
1343 
1344 struct dmaengine_unmap_pool {
1345 	struct kmem_cache *cache;
1346 	const char *name;
1347 	mempool_t *pool;
1348 	size_t size;
1349 };
1350 
1351 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1352 static struct dmaengine_unmap_pool unmap_pool[] = {
1353 	__UNMAP_POOL(2),
1354 	#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1355 	__UNMAP_POOL(16),
1356 	__UNMAP_POOL(128),
1357 	__UNMAP_POOL(256),
1358 	#endif
1359 };
1360 
__get_unmap_pool(int nr)1361 static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1362 {
1363 	int order = get_count_order(nr);
1364 
1365 	switch (order) {
1366 	case 0 ... 1:
1367 		return &unmap_pool[0];
1368 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1369 	case 2 ... 4:
1370 		return &unmap_pool[1];
1371 	case 5 ... 7:
1372 		return &unmap_pool[2];
1373 	case 8:
1374 		return &unmap_pool[3];
1375 #endif
1376 	default:
1377 		BUG();
1378 		return NULL;
1379 	}
1380 }
1381 
dmaengine_unmap(struct kref * kref)1382 static void dmaengine_unmap(struct kref *kref)
1383 {
1384 	struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1385 	struct device *dev = unmap->dev;
1386 	int cnt, i;
1387 
1388 	cnt = unmap->to_cnt;
1389 	for (i = 0; i < cnt; i++)
1390 		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1391 			       DMA_TO_DEVICE);
1392 	cnt += unmap->from_cnt;
1393 	for (; i < cnt; i++)
1394 		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1395 			       DMA_FROM_DEVICE);
1396 	cnt += unmap->bidi_cnt;
1397 	for (; i < cnt; i++) {
1398 		if (unmap->addr[i] == 0)
1399 			continue;
1400 		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1401 			       DMA_BIDIRECTIONAL);
1402 	}
1403 	cnt = unmap->map_cnt;
1404 	mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1405 }
1406 
dmaengine_unmap_put(struct dmaengine_unmap_data * unmap)1407 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1408 {
1409 	if (unmap)
1410 		kref_put(&unmap->kref, dmaengine_unmap);
1411 }
1412 EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1413 
dmaengine_destroy_unmap_pool(void)1414 static void dmaengine_destroy_unmap_pool(void)
1415 {
1416 	int i;
1417 
1418 	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1419 		struct dmaengine_unmap_pool *p = &unmap_pool[i];
1420 
1421 		mempool_destroy(p->pool);
1422 		p->pool = NULL;
1423 		kmem_cache_destroy(p->cache);
1424 		p->cache = NULL;
1425 	}
1426 }
1427 
dmaengine_init_unmap_pool(void)1428 static int __init dmaengine_init_unmap_pool(void)
1429 {
1430 	int i;
1431 
1432 	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1433 		struct dmaengine_unmap_pool *p = &unmap_pool[i];
1434 		size_t size;
1435 
1436 		size = sizeof(struct dmaengine_unmap_data) +
1437 		       sizeof(dma_addr_t) * p->size;
1438 
1439 		p->cache = kmem_cache_create(p->name, size, 0,
1440 					     SLAB_HWCACHE_ALIGN, NULL);
1441 		if (!p->cache)
1442 			break;
1443 		p->pool = mempool_create_slab_pool(1, p->cache);
1444 		if (!p->pool)
1445 			break;
1446 	}
1447 
1448 	if (i == ARRAY_SIZE(unmap_pool))
1449 		return 0;
1450 
1451 	dmaengine_destroy_unmap_pool();
1452 	return -ENOMEM;
1453 }
1454 
1455 struct dmaengine_unmap_data *
dmaengine_get_unmap_data(struct device * dev,int nr,gfp_t flags)1456 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1457 {
1458 	struct dmaengine_unmap_data *unmap;
1459 
1460 	unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1461 	if (!unmap)
1462 		return NULL;
1463 
1464 	memset(unmap, 0, sizeof(*unmap));
1465 	kref_init(&unmap->kref);
1466 	unmap->dev = dev;
1467 	unmap->map_cnt = nr;
1468 
1469 	return unmap;
1470 }
1471 EXPORT_SYMBOL(dmaengine_get_unmap_data);
1472 
dma_async_tx_descriptor_init(struct dma_async_tx_descriptor * tx,struct dma_chan * chan)1473 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1474 	struct dma_chan *chan)
1475 {
1476 	tx->chan = chan;
1477 	#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1478 	spin_lock_init(&tx->lock);
1479 	#endif
1480 }
1481 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1482 
desc_check_and_set_metadata_mode(struct dma_async_tx_descriptor * desc,enum dma_desc_metadata_mode mode)1483 static inline int desc_check_and_set_metadata_mode(
1484 	struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
1485 {
1486 	/* Make sure that the metadata mode is not mixed */
1487 	if (!desc->desc_metadata_mode) {
1488 		if (dmaengine_is_metadata_mode_supported(desc->chan, mode))
1489 			desc->desc_metadata_mode = mode;
1490 		else
1491 			return -ENOTSUPP;
1492 	} else if (desc->desc_metadata_mode != mode) {
1493 		return -EINVAL;
1494 	}
1495 
1496 	return 0;
1497 }
1498 
dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor * desc,void * data,size_t len)1499 int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
1500 				   void *data, size_t len)
1501 {
1502 	int ret;
1503 
1504 	if (!desc)
1505 		return -EINVAL;
1506 
1507 	ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT);
1508 	if (ret)
1509 		return ret;
1510 
1511 	if (!desc->metadata_ops || !desc->metadata_ops->attach)
1512 		return -ENOTSUPP;
1513 
1514 	return desc->metadata_ops->attach(desc, data, len);
1515 }
1516 EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata);
1517 
dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor * desc,size_t * payload_len,size_t * max_len)1518 void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
1519 				      size_t *payload_len, size_t *max_len)
1520 {
1521 	int ret;
1522 
1523 	if (!desc)
1524 		return ERR_PTR(-EINVAL);
1525 
1526 	ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
1527 	if (ret)
1528 		return ERR_PTR(ret);
1529 
1530 	if (!desc->metadata_ops || !desc->metadata_ops->get_ptr)
1531 		return ERR_PTR(-ENOTSUPP);
1532 
1533 	return desc->metadata_ops->get_ptr(desc, payload_len, max_len);
1534 }
1535 EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr);
1536 
dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor * desc,size_t payload_len)1537 int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
1538 				    size_t payload_len)
1539 {
1540 	int ret;
1541 
1542 	if (!desc)
1543 		return -EINVAL;
1544 
1545 	ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
1546 	if (ret)
1547 		return ret;
1548 
1549 	if (!desc->metadata_ops || !desc->metadata_ops->set_len)
1550 		return -ENOTSUPP;
1551 
1552 	return desc->metadata_ops->set_len(desc, payload_len);
1553 }
1554 EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
1555 
1556 /**
1557  * dma_wait_for_async_tx - spin wait for a transaction to complete
1558  * @tx:		in-flight transaction to wait on
1559  */
1560 enum dma_status
dma_wait_for_async_tx(struct dma_async_tx_descriptor * tx)1561 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1562 {
1563 	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1564 
1565 	if (!tx)
1566 		return DMA_COMPLETE;
1567 
1568 	while (tx->cookie == -EBUSY) {
1569 		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1570 			dev_err(tx->chan->device->dev,
1571 				"%s timeout waiting for descriptor submission\n",
1572 				__func__);
1573 			return DMA_ERROR;
1574 		}
1575 		cpu_relax();
1576 	}
1577 	return dma_sync_wait(tx->chan, tx->cookie);
1578 }
1579 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1580 
1581 /**
1582  * dma_run_dependencies - process dependent operations on the target channel
1583  * @tx:		transaction with dependencies
1584  *
1585  * Helper routine for DMA drivers to process (start) dependent operations
1586  * on their target channel.
1587  */
dma_run_dependencies(struct dma_async_tx_descriptor * tx)1588 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1589 {
1590 	struct dma_async_tx_descriptor *dep = txd_next(tx);
1591 	struct dma_async_tx_descriptor *dep_next;
1592 	struct dma_chan *chan;
1593 
1594 	if (!dep)
1595 		return;
1596 
1597 	/* we'll submit tx->next now, so clear the link */
1598 	txd_clear_next(tx);
1599 	chan = dep->chan;
1600 
1601 	/* keep submitting up until a channel switch is detected
1602 	 * in that case we will be called again as a result of
1603 	 * processing the interrupt from async_tx_channel_switch
1604 	 */
1605 	for (; dep; dep = dep_next) {
1606 		txd_lock(dep);
1607 		txd_clear_parent(dep);
1608 		dep_next = txd_next(dep);
1609 		if (dep_next && dep_next->chan == chan)
1610 			txd_clear_next(dep); /* ->next will be submitted */
1611 		else
1612 			dep_next = NULL; /* submit current dep and terminate */
1613 		txd_unlock(dep);
1614 
1615 		dep->tx_submit(dep);
1616 	}
1617 
1618 	chan->device->device_issue_pending(chan);
1619 }
1620 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1621 
dma_bus_init(void)1622 static int __init dma_bus_init(void)
1623 {
1624 	int err = dmaengine_init_unmap_pool();
1625 
1626 	if (err)
1627 		return err;
1628 
1629 	err = class_register(&dma_devclass);
1630 	if (!err)
1631 		dmaengine_debugfs_init();
1632 
1633 	return err;
1634 }
1635 arch_initcall(dma_bus_init);
1636