xref: /linux/drivers/dma/dmaengine.c (revision 6b2d2cec1081a979e0efd6a1e9559e5a01a3c10e)
1 /*
2  * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License as published by the Free
6  * Software Foundation; either version 2 of the License, or (at your option)
7  * any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59
16  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
17  *
18  * The full GNU General Public License is included in this distribution in the
19  * file called COPYING.
20  */
21 
22 /*
23  * This code implements the DMA subsystem. It provides a HW-neutral interface
24  * for other kernel code to use asynchronous memory copy capabilities,
25  * if present, and allows different HW DMA drivers to register as providing
26  * this capability.
27  *
28  * Due to the fact we are accelerating what is already a relatively fast
29  * operation, the code goes to great lengths to avoid additional overhead,
30  * such as locking.
31  *
32  * LOCKING:
33  *
34  * The subsystem keeps two global lists, dma_device_list and dma_client_list.
35  * Both of these are protected by a mutex, dma_list_mutex.
36  *
37  * Each device has a channels list, which runs unlocked but is never modified
38  * once the device is registered, it's just setup by the driver.
39  *
40  * Each client is responsible for keeping track of the channels it uses.  See
41  * the definition of dma_event_callback in dmaengine.h.
42  *
43  * Each device has a kref, which is initialized to 1 when the device is
44  * registered. A kref_get is done for each class_device registered.  When the
45  * class_device is released, the coresponding kref_put is done in the release
46  * method. Every time one of the device's channels is allocated to a client,
47  * a kref_get occurs.  When the channel is freed, the coresponding kref_put
48  * happens. The device's release function does a completion, so
49  * unregister_device does a remove event, class_device_unregister, a kref_put
50  * for the first reference, then waits on the completion for all other
51  * references to finish.
52  *
53  * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
54  * with a kref and a per_cpu local_t.  A dma_chan_get is called when a client
55  * signals that it wants to use a channel, and dma_chan_put is called when
56  * a channel is removed or a client using it is unregesitered.  A client can
57  * take extra references per outstanding transaction, as is the case with
58  * the NET DMA client.  The release function does a kref_put on the device.
59  *	-ChrisL, DanW
60  */
61 
62 #include <linux/init.h>
63 #include <linux/module.h>
64 #include <linux/mm.h>
65 #include <linux/device.h>
66 #include <linux/dmaengine.h>
67 #include <linux/hardirq.h>
68 #include <linux/spinlock.h>
69 #include <linux/percpu.h>
70 #include <linux/rcupdate.h>
71 #include <linux/mutex.h>
72 #include <linux/jiffies.h>
73 
74 static DEFINE_MUTEX(dma_list_mutex);
75 static LIST_HEAD(dma_device_list);
76 static LIST_HEAD(dma_client_list);
77 
78 /* --- sysfs implementation --- */
79 
80 static ssize_t show_memcpy_count(struct class_device *cd, char *buf)
81 {
82 	struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
83 	unsigned long count = 0;
84 	int i;
85 
86 	for_each_possible_cpu(i)
87 		count += per_cpu_ptr(chan->local, i)->memcpy_count;
88 
89 	return sprintf(buf, "%lu\n", count);
90 }
91 
92 static ssize_t show_bytes_transferred(struct class_device *cd, char *buf)
93 {
94 	struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
95 	unsigned long count = 0;
96 	int i;
97 
98 	for_each_possible_cpu(i)
99 		count += per_cpu_ptr(chan->local, i)->bytes_transferred;
100 
101 	return sprintf(buf, "%lu\n", count);
102 }
103 
104 static ssize_t show_in_use(struct class_device *cd, char *buf)
105 {
106 	struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
107 	int in_use = 0;
108 
109 	if (unlikely(chan->slow_ref) &&
110 		atomic_read(&chan->refcount.refcount) > 1)
111 		in_use = 1;
112 	else {
113 		if (local_read(&(per_cpu_ptr(chan->local,
114 			get_cpu())->refcount)) > 0)
115 			in_use = 1;
116 		put_cpu();
117 	}
118 
119 	return sprintf(buf, "%d\n", in_use);
120 }
121 
122 static struct class_device_attribute dma_class_attrs[] = {
123 	__ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
124 	__ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
125 	__ATTR(in_use, S_IRUGO, show_in_use, NULL),
126 	__ATTR_NULL
127 };
128 
129 static void dma_async_device_cleanup(struct kref *kref);
130 
131 static void dma_class_dev_release(struct class_device *cd)
132 {
133 	struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
134 	kref_put(&chan->device->refcount, dma_async_device_cleanup);
135 }
136 
137 static struct class dma_devclass = {
138 	.name            = "dma",
139 	.class_dev_attrs = dma_class_attrs,
140 	.release = dma_class_dev_release,
141 };
142 
143 /* --- client and device registration --- */
144 
145 #define dma_chan_satisfies_mask(chan, mask) \
146 	__dma_chan_satisfies_mask((chan), &(mask))
147 static int
148 __dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want)
149 {
150 	dma_cap_mask_t has;
151 
152 	bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits,
153 		DMA_TX_TYPE_END);
154 	return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
155 }
156 
157 /**
158  * dma_client_chan_alloc - try to allocate channels to a client
159  * @client: &dma_client
160  *
161  * Called with dma_list_mutex held.
162  */
163 static void dma_client_chan_alloc(struct dma_client *client)
164 {
165 	struct dma_device *device;
166 	struct dma_chan *chan;
167 	int desc;	/* allocated descriptor count */
168 	enum dma_state_client ack;
169 
170 	/* Find a channel */
171 	list_for_each_entry(device, &dma_device_list, global_node)
172 		list_for_each_entry(chan, &device->channels, device_node) {
173 			if (!dma_chan_satisfies_mask(chan, client->cap_mask))
174 				continue;
175 
176 			desc = chan->device->device_alloc_chan_resources(chan);
177 			if (desc >= 0) {
178 				ack = client->event_callback(client,
179 						chan,
180 						DMA_RESOURCE_AVAILABLE);
181 
182 				/* we are done once this client rejects
183 				 * an available resource
184 				 */
185 				if (ack == DMA_ACK)
186 					dma_chan_get(chan);
187 				else if (ack == DMA_NAK)
188 					return;
189 			}
190 		}
191 }
192 
193 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
194 {
195 	enum dma_status status;
196 	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
197 
198 	dma_async_issue_pending(chan);
199 	do {
200 		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
201 		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
202 			printk(KERN_ERR "dma_sync_wait_timeout!\n");
203 			return DMA_ERROR;
204 		}
205 	} while (status == DMA_IN_PROGRESS);
206 
207 	return status;
208 }
209 EXPORT_SYMBOL(dma_sync_wait);
210 
211 /**
212  * dma_chan_cleanup - release a DMA channel's resources
213  * @kref: kernel reference structure that contains the DMA channel device
214  */
215 void dma_chan_cleanup(struct kref *kref)
216 {
217 	struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
218 	chan->device->device_free_chan_resources(chan);
219 	kref_put(&chan->device->refcount, dma_async_device_cleanup);
220 }
221 EXPORT_SYMBOL(dma_chan_cleanup);
222 
223 static void dma_chan_free_rcu(struct rcu_head *rcu)
224 {
225 	struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
226 	int bias = 0x7FFFFFFF;
227 	int i;
228 	for_each_possible_cpu(i)
229 		bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount);
230 	atomic_sub(bias, &chan->refcount.refcount);
231 	kref_put(&chan->refcount, dma_chan_cleanup);
232 }
233 
234 static void dma_chan_release(struct dma_chan *chan)
235 {
236 	atomic_add(0x7FFFFFFF, &chan->refcount.refcount);
237 	chan->slow_ref = 1;
238 	call_rcu(&chan->rcu, dma_chan_free_rcu);
239 }
240 
241 /**
242  * dma_chans_notify_available - broadcast available channels to the clients
243  */
244 static void dma_clients_notify_available(void)
245 {
246 	struct dma_client *client;
247 
248 	mutex_lock(&dma_list_mutex);
249 
250 	list_for_each_entry(client, &dma_client_list, global_node)
251 		dma_client_chan_alloc(client);
252 
253 	mutex_unlock(&dma_list_mutex);
254 }
255 
256 /**
257  * dma_chans_notify_available - tell the clients that a channel is going away
258  * @chan: channel on its way out
259  */
260 static void dma_clients_notify_removed(struct dma_chan *chan)
261 {
262 	struct dma_client *client;
263 	enum dma_state_client ack;
264 
265 	mutex_lock(&dma_list_mutex);
266 
267 	list_for_each_entry(client, &dma_client_list, global_node) {
268 		ack = client->event_callback(client, chan,
269 				DMA_RESOURCE_REMOVED);
270 
271 		/* client was holding resources for this channel so
272 		 * free it
273 		 */
274 		if (ack == DMA_ACK)
275 			dma_chan_put(chan);
276 	}
277 
278 	mutex_unlock(&dma_list_mutex);
279 }
280 
281 /**
282  * dma_async_client_register - register a &dma_client
283  * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
284  */
285 void dma_async_client_register(struct dma_client *client)
286 {
287 	mutex_lock(&dma_list_mutex);
288 	list_add_tail(&client->global_node, &dma_client_list);
289 	mutex_unlock(&dma_list_mutex);
290 }
291 EXPORT_SYMBOL(dma_async_client_register);
292 
293 /**
294  * dma_async_client_unregister - unregister a client and free the &dma_client
295  * @client: &dma_client to free
296  *
297  * Force frees any allocated DMA channels, frees the &dma_client memory
298  */
299 void dma_async_client_unregister(struct dma_client *client)
300 {
301 	struct dma_device *device;
302 	struct dma_chan *chan;
303 	enum dma_state_client ack;
304 
305 	if (!client)
306 		return;
307 
308 	mutex_lock(&dma_list_mutex);
309 	/* free all channels the client is holding */
310 	list_for_each_entry(device, &dma_device_list, global_node)
311 		list_for_each_entry(chan, &device->channels, device_node) {
312 			ack = client->event_callback(client, chan,
313 				DMA_RESOURCE_REMOVED);
314 
315 			if (ack == DMA_ACK)
316 				dma_chan_put(chan);
317 		}
318 
319 	list_del(&client->global_node);
320 	mutex_unlock(&dma_list_mutex);
321 }
322 EXPORT_SYMBOL(dma_async_client_unregister);
323 
324 /**
325  * dma_async_client_chan_request - send all available channels to the
326  * client that satisfy the capability mask
327  * @client - requester
328  */
329 void dma_async_client_chan_request(struct dma_client *client)
330 {
331 	mutex_lock(&dma_list_mutex);
332 	dma_client_chan_alloc(client);
333 	mutex_unlock(&dma_list_mutex);
334 }
335 EXPORT_SYMBOL(dma_async_client_chan_request);
336 
337 /**
338  * dma_async_device_register - registers DMA devices found
339  * @device: &dma_device
340  */
341 int dma_async_device_register(struct dma_device *device)
342 {
343 	static int id;
344 	int chancnt = 0, rc;
345 	struct dma_chan* chan;
346 
347 	if (!device)
348 		return -ENODEV;
349 
350 	/* validate device routines */
351 	BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
352 		!device->device_prep_dma_memcpy);
353 	BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
354 		!device->device_prep_dma_xor);
355 	BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
356 		!device->device_prep_dma_zero_sum);
357 	BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
358 		!device->device_prep_dma_memset);
359 	BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
360 		!device->device_prep_dma_interrupt);
361 
362 	BUG_ON(!device->device_alloc_chan_resources);
363 	BUG_ON(!device->device_free_chan_resources);
364 	BUG_ON(!device->device_dependency_added);
365 	BUG_ON(!device->device_is_tx_complete);
366 	BUG_ON(!device->device_issue_pending);
367 	BUG_ON(!device->dev);
368 
369 	init_completion(&device->done);
370 	kref_init(&device->refcount);
371 	device->dev_id = id++;
372 
373 	/* represent channels in sysfs. Probably want devs too */
374 	list_for_each_entry(chan, &device->channels, device_node) {
375 		chan->local = alloc_percpu(typeof(*chan->local));
376 		if (chan->local == NULL)
377 			continue;
378 
379 		chan->chan_id = chancnt++;
380 		chan->class_dev.class = &dma_devclass;
381 		chan->class_dev.dev = NULL;
382 		snprintf(chan->class_dev.class_id, BUS_ID_SIZE, "dma%dchan%d",
383 		         device->dev_id, chan->chan_id);
384 
385 		rc = class_device_register(&chan->class_dev);
386 		if (rc) {
387 			chancnt--;
388 			free_percpu(chan->local);
389 			chan->local = NULL;
390 			goto err_out;
391 		}
392 
393 		/* One for the channel, one of the class device */
394 		kref_get(&device->refcount);
395 		kref_get(&device->refcount);
396 		kref_init(&chan->refcount);
397 		chan->slow_ref = 0;
398 		INIT_RCU_HEAD(&chan->rcu);
399 	}
400 
401 	mutex_lock(&dma_list_mutex);
402 	list_add_tail(&device->global_node, &dma_device_list);
403 	mutex_unlock(&dma_list_mutex);
404 
405 	dma_clients_notify_available();
406 
407 	return 0;
408 
409 err_out:
410 	list_for_each_entry(chan, &device->channels, device_node) {
411 		if (chan->local == NULL)
412 			continue;
413 		kref_put(&device->refcount, dma_async_device_cleanup);
414 		class_device_unregister(&chan->class_dev);
415 		chancnt--;
416 		free_percpu(chan->local);
417 	}
418 	return rc;
419 }
420 EXPORT_SYMBOL(dma_async_device_register);
421 
422 /**
423  * dma_async_device_cleanup - function called when all references are released
424  * @kref: kernel reference object
425  */
426 static void dma_async_device_cleanup(struct kref *kref)
427 {
428 	struct dma_device *device;
429 
430 	device = container_of(kref, struct dma_device, refcount);
431 	complete(&device->done);
432 }
433 
434 /**
435  * dma_async_device_unregister - unregisters DMA devices
436  * @device: &dma_device
437  */
438 void dma_async_device_unregister(struct dma_device *device)
439 {
440 	struct dma_chan *chan;
441 
442 	mutex_lock(&dma_list_mutex);
443 	list_del(&device->global_node);
444 	mutex_unlock(&dma_list_mutex);
445 
446 	list_for_each_entry(chan, &device->channels, device_node) {
447 		dma_clients_notify_removed(chan);
448 		class_device_unregister(&chan->class_dev);
449 		dma_chan_release(chan);
450 	}
451 
452 	kref_put(&device->refcount, dma_async_device_cleanup);
453 	wait_for_completion(&device->done);
454 }
455 EXPORT_SYMBOL(dma_async_device_unregister);
456 
457 /**
458  * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
459  * @chan: DMA channel to offload copy to
460  * @dest: destination address (virtual)
461  * @src: source address (virtual)
462  * @len: length
463  *
464  * Both @dest and @src must be mappable to a bus address according to the
465  * DMA mapping API rules for streaming mappings.
466  * Both @dest and @src must stay memory resident (kernel memory or locked
467  * user space pages).
468  */
469 dma_cookie_t
470 dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
471 			void *src, size_t len)
472 {
473 	struct dma_device *dev = chan->device;
474 	struct dma_async_tx_descriptor *tx;
475 	dma_addr_t addr;
476 	dma_cookie_t cookie;
477 	int cpu;
478 
479 	tx = dev->device_prep_dma_memcpy(chan, len, 0);
480 	if (!tx)
481 		return -ENOMEM;
482 
483 	tx->ack = 1;
484 	tx->callback = NULL;
485 	addr = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
486 	tx->tx_set_src(addr, tx, 0);
487 	addr = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
488 	tx->tx_set_dest(addr, tx, 0);
489 	cookie = tx->tx_submit(tx);
490 
491 	cpu = get_cpu();
492 	per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
493 	per_cpu_ptr(chan->local, cpu)->memcpy_count++;
494 	put_cpu();
495 
496 	return cookie;
497 }
498 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
499 
500 /**
501  * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
502  * @chan: DMA channel to offload copy to
503  * @page: destination page
504  * @offset: offset in page to copy to
505  * @kdata: source address (virtual)
506  * @len: length
507  *
508  * Both @page/@offset and @kdata must be mappable to a bus address according
509  * to the DMA mapping API rules for streaming mappings.
510  * Both @page/@offset and @kdata must stay memory resident (kernel memory or
511  * locked user space pages)
512  */
513 dma_cookie_t
514 dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
515 			unsigned int offset, void *kdata, size_t len)
516 {
517 	struct dma_device *dev = chan->device;
518 	struct dma_async_tx_descriptor *tx;
519 	dma_addr_t addr;
520 	dma_cookie_t cookie;
521 	int cpu;
522 
523 	tx = dev->device_prep_dma_memcpy(chan, len, 0);
524 	if (!tx)
525 		return -ENOMEM;
526 
527 	tx->ack = 1;
528 	tx->callback = NULL;
529 	addr = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
530 	tx->tx_set_src(addr, tx, 0);
531 	addr = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
532 	tx->tx_set_dest(addr, tx, 0);
533 	cookie = tx->tx_submit(tx);
534 
535 	cpu = get_cpu();
536 	per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
537 	per_cpu_ptr(chan->local, cpu)->memcpy_count++;
538 	put_cpu();
539 
540 	return cookie;
541 }
542 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
543 
544 /**
545  * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
546  * @chan: DMA channel to offload copy to
547  * @dest_pg: destination page
548  * @dest_off: offset in page to copy to
549  * @src_pg: source page
550  * @src_off: offset in page to copy from
551  * @len: length
552  *
553  * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
554  * address according to the DMA mapping API rules for streaming mappings.
555  * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
556  * (kernel memory or locked user space pages).
557  */
558 dma_cookie_t
559 dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
560 	unsigned int dest_off, struct page *src_pg, unsigned int src_off,
561 	size_t len)
562 {
563 	struct dma_device *dev = chan->device;
564 	struct dma_async_tx_descriptor *tx;
565 	dma_addr_t addr;
566 	dma_cookie_t cookie;
567 	int cpu;
568 
569 	tx = dev->device_prep_dma_memcpy(chan, len, 0);
570 	if (!tx)
571 		return -ENOMEM;
572 
573 	tx->ack = 1;
574 	tx->callback = NULL;
575 	addr = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
576 	tx->tx_set_src(addr, tx, 0);
577 	addr = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE);
578 	tx->tx_set_dest(addr, tx, 0);
579 	cookie = tx->tx_submit(tx);
580 
581 	cpu = get_cpu();
582 	per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
583 	per_cpu_ptr(chan->local, cpu)->memcpy_count++;
584 	put_cpu();
585 
586 	return cookie;
587 }
588 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
589 
590 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
591 	struct dma_chan *chan)
592 {
593 	tx->chan = chan;
594 	spin_lock_init(&tx->lock);
595 	INIT_LIST_HEAD(&tx->depend_node);
596 	INIT_LIST_HEAD(&tx->depend_list);
597 }
598 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
599 
600 static int __init dma_bus_init(void)
601 {
602 	mutex_init(&dma_list_mutex);
603 	return class_register(&dma_devclass);
604 }
605 subsys_initcall(dma_bus_init);
606 
607