xref: /linux/drivers/crypto/ccp/ccp-dmaengine.c (revision 0866ba23b7efcc6837d6b4231bf91b79647b81ea)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Cryptographic Coprocessor (CCP) driver
4  *
5  * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
6  *
7  * Author: Gary R Hook <gary.hook@amd.com>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/dmaengine.h>
13 #include <linux/spinlock.h>
14 #include <linux/mutex.h>
15 #include <linux/ccp.h>
16 
17 #include "ccp-dev.h"
18 #include "../../dma/dmaengine.h"
19 
20 #define CCP_DMA_WIDTH(_mask)		\
21 ({					\
22 	u64 mask = _mask + 1;		\
23 	(mask == 0) ? 64 : fls64(mask);	\
24 })
25 
26 /* The CCP as a DMA provider can be configured for public or private
27  * channels. Default is specified in the vdata for the device (PCI ID).
28  * This module parameter will override for all channels on all devices:
29  *   dma_chan_attr = 0x2 to force all channels public
30  *                 = 0x1 to force all channels private
31  *                 = 0x0 to defer to the vdata setting
32  *                 = any other value: warning, revert to 0x0
33  */
34 static unsigned int dma_chan_attr = CCP_DMA_DFLT;
35 module_param(dma_chan_attr, uint, 0444);
36 MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public");
37 
38 static unsigned int dmaengine = 1;
39 module_param(dmaengine, uint, 0444);
40 MODULE_PARM_DESC(dmaengine, "Register services with the DMA subsystem (any non-zero value, default: 1)");
41 
42 static unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
43 {
44 	switch (dma_chan_attr) {
45 	case CCP_DMA_DFLT:
46 		return ccp->vdata->dma_chan_attr;
47 
48 	case CCP_DMA_PRIV:
49 		return DMA_PRIVATE;
50 
51 	case CCP_DMA_PUB:
52 		return 0;
53 
54 	default:
55 		dev_info_once(ccp->dev, "Invalid value for dma_chan_attr: %d\n",
56 			      dma_chan_attr);
57 		return ccp->vdata->dma_chan_attr;
58 	}
59 }
60 
61 static void ccp_free_cmd_resources(struct ccp_device *ccp,
62 				   struct list_head *list)
63 {
64 	struct ccp_dma_cmd *cmd, *ctmp;
65 
66 	list_for_each_entry_safe(cmd, ctmp, list, entry) {
67 		list_del(&cmd->entry);
68 		kmem_cache_free(ccp->dma_cmd_cache, cmd);
69 	}
70 }
71 
72 static void ccp_free_desc_resources(struct ccp_device *ccp,
73 				    struct list_head *list)
74 {
75 	struct ccp_dma_desc *desc, *dtmp;
76 
77 	list_for_each_entry_safe(desc, dtmp, list, entry) {
78 		ccp_free_cmd_resources(ccp, &desc->active);
79 		ccp_free_cmd_resources(ccp, &desc->pending);
80 
81 		list_del(&desc->entry);
82 		kmem_cache_free(ccp->dma_desc_cache, desc);
83 	}
84 }
85 
86 static void ccp_free_chan_resources(struct dma_chan *dma_chan)
87 {
88 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
89 						 dma_chan);
90 	unsigned long flags;
91 
92 	dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan);
93 
94 	spin_lock_irqsave(&chan->lock, flags);
95 
96 	ccp_free_desc_resources(chan->ccp, &chan->complete);
97 	ccp_free_desc_resources(chan->ccp, &chan->active);
98 	ccp_free_desc_resources(chan->ccp, &chan->pending);
99 	ccp_free_desc_resources(chan->ccp, &chan->created);
100 
101 	spin_unlock_irqrestore(&chan->lock, flags);
102 }
103 
104 static void ccp_cleanup_desc_resources(struct ccp_device *ccp,
105 				       struct list_head *list)
106 {
107 	struct ccp_dma_desc *desc, *dtmp;
108 
109 	list_for_each_entry_safe_reverse(desc, dtmp, list, entry) {
110 		if (!async_tx_test_ack(&desc->tx_desc))
111 			continue;
112 
113 		dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
114 
115 		ccp_free_cmd_resources(ccp, &desc->active);
116 		ccp_free_cmd_resources(ccp, &desc->pending);
117 
118 		list_del(&desc->entry);
119 		kmem_cache_free(ccp->dma_desc_cache, desc);
120 	}
121 }
122 
123 static void ccp_do_cleanup(unsigned long data)
124 {
125 	struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data;
126 	unsigned long flags;
127 
128 	dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__,
129 		dma_chan_name(&chan->dma_chan));
130 
131 	spin_lock_irqsave(&chan->lock, flags);
132 
133 	ccp_cleanup_desc_resources(chan->ccp, &chan->complete);
134 
135 	spin_unlock_irqrestore(&chan->lock, flags);
136 }
137 
138 static int ccp_issue_next_cmd(struct ccp_dma_desc *desc)
139 {
140 	struct ccp_dma_cmd *cmd;
141 	int ret;
142 
143 	cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry);
144 	list_move(&cmd->entry, &desc->active);
145 
146 	dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__,
147 		desc->tx_desc.cookie, cmd);
148 
149 	ret = ccp_enqueue_cmd(&cmd->ccp_cmd);
150 	if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY))
151 		return 0;
152 
153 	dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__,
154 		ret, desc->tx_desc.cookie, cmd);
155 
156 	return ret;
157 }
158 
159 static void ccp_free_active_cmd(struct ccp_dma_desc *desc)
160 {
161 	struct ccp_dma_cmd *cmd;
162 
163 	cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd,
164 				       entry);
165 	if (!cmd)
166 		return;
167 
168 	dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n",
169 		__func__, desc->tx_desc.cookie, cmd);
170 
171 	list_del(&cmd->entry);
172 	kmem_cache_free(desc->ccp->dma_cmd_cache, cmd);
173 }
174 
175 static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan,
176 						struct ccp_dma_desc *desc)
177 {
178 	/* Move current DMA descriptor to the complete list */
179 	if (desc)
180 		list_move(&desc->entry, &chan->complete);
181 
182 	/* Get the next DMA descriptor on the active list */
183 	desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
184 					entry);
185 
186 	return desc;
187 }
188 
189 static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
190 						   struct ccp_dma_desc *desc)
191 {
192 	struct dma_async_tx_descriptor *tx_desc;
193 	unsigned long flags;
194 
195 	/* Loop over descriptors until one is found with commands */
196 	do {
197 		if (desc) {
198 			/* Remove the DMA command from the list and free it */
199 			ccp_free_active_cmd(desc);
200 
201 			if (!list_empty(&desc->pending)) {
202 				/* No errors, keep going */
203 				if (desc->status != DMA_ERROR)
204 					return desc;
205 
206 				/* Error, free remaining commands and move on */
207 				ccp_free_cmd_resources(desc->ccp,
208 						       &desc->pending);
209 			}
210 
211 			tx_desc = &desc->tx_desc;
212 		} else {
213 			tx_desc = NULL;
214 		}
215 
216 		spin_lock_irqsave(&chan->lock, flags);
217 
218 		if (desc) {
219 			if (desc->status != DMA_ERROR)
220 				desc->status = DMA_COMPLETE;
221 
222 			dev_dbg(desc->ccp->dev,
223 				"%s - tx %d complete, status=%u\n", __func__,
224 				desc->tx_desc.cookie, desc->status);
225 
226 			dma_cookie_complete(tx_desc);
227 			dma_descriptor_unmap(tx_desc);
228 		}
229 
230 		desc = __ccp_next_dma_desc(chan, desc);
231 
232 		spin_unlock_irqrestore(&chan->lock, flags);
233 
234 		if (tx_desc) {
235 			dmaengine_desc_get_callback_invoke(tx_desc, NULL);
236 
237 			dma_run_dependencies(tx_desc);
238 		}
239 	} while (desc);
240 
241 	return NULL;
242 }
243 
244 static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan)
245 {
246 	struct ccp_dma_desc *desc;
247 
248 	if (list_empty(&chan->pending))
249 		return NULL;
250 
251 	desc = list_empty(&chan->active)
252 		? list_first_entry(&chan->pending, struct ccp_dma_desc, entry)
253 		: NULL;
254 
255 	list_splice_tail_init(&chan->pending, &chan->active);
256 
257 	return desc;
258 }
259 
260 static void ccp_cmd_callback(void *data, int err)
261 {
262 	struct ccp_dma_desc *desc = data;
263 	struct ccp_dma_chan *chan;
264 	int ret;
265 
266 	if (err == -EINPROGRESS)
267 		return;
268 
269 	chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan,
270 			    dma_chan);
271 
272 	dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n",
273 		__func__, desc->tx_desc.cookie, err);
274 
275 	if (err)
276 		desc->status = DMA_ERROR;
277 
278 	while (true) {
279 		/* Check for DMA descriptor completion */
280 		desc = ccp_handle_active_desc(chan, desc);
281 
282 		/* Don't submit cmd if no descriptor or DMA is paused */
283 		if (!desc || (chan->status == DMA_PAUSED))
284 			break;
285 
286 		ret = ccp_issue_next_cmd(desc);
287 		if (!ret)
288 			break;
289 
290 		desc->status = DMA_ERROR;
291 	}
292 
293 	tasklet_schedule(&chan->cleanup_tasklet);
294 }
295 
296 static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
297 {
298 	struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc,
299 						 tx_desc);
300 	struct ccp_dma_chan *chan;
301 	dma_cookie_t cookie;
302 	unsigned long flags;
303 
304 	chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan);
305 
306 	spin_lock_irqsave(&chan->lock, flags);
307 
308 	cookie = dma_cookie_assign(tx_desc);
309 	list_del(&desc->entry);
310 	list_add_tail(&desc->entry, &chan->pending);
311 
312 	spin_unlock_irqrestore(&chan->lock, flags);
313 
314 	dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n",
315 		__func__, cookie);
316 
317 	return cookie;
318 }
319 
320 static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan)
321 {
322 	struct ccp_dma_cmd *cmd;
323 
324 	cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT);
325 	if (cmd)
326 		memset(cmd, 0, sizeof(*cmd));
327 
328 	return cmd;
329 }
330 
331 static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
332 					       unsigned long flags)
333 {
334 	struct ccp_dma_desc *desc;
335 
336 	desc = kmem_cache_zalloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
337 	if (!desc)
338 		return NULL;
339 
340 	dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
341 	desc->tx_desc.flags = flags;
342 	desc->tx_desc.tx_submit = ccp_tx_submit;
343 	desc->ccp = chan->ccp;
344 	INIT_LIST_HEAD(&desc->pending);
345 	INIT_LIST_HEAD(&desc->active);
346 	desc->status = DMA_IN_PROGRESS;
347 
348 	return desc;
349 }
350 
351 static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
352 					    struct scatterlist *dst_sg,
353 					    unsigned int dst_nents,
354 					    struct scatterlist *src_sg,
355 					    unsigned int src_nents,
356 					    unsigned long flags)
357 {
358 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
359 						 dma_chan);
360 	struct ccp_device *ccp = chan->ccp;
361 	struct ccp_dma_desc *desc;
362 	struct ccp_dma_cmd *cmd;
363 	struct ccp_cmd *ccp_cmd;
364 	struct ccp_passthru_nomap_engine *ccp_pt;
365 	unsigned int src_offset, src_len;
366 	unsigned int dst_offset, dst_len;
367 	unsigned int len;
368 	unsigned long sflags;
369 	size_t total_len;
370 
371 	if (!dst_sg || !src_sg)
372 		return NULL;
373 
374 	if (!dst_nents || !src_nents)
375 		return NULL;
376 
377 	desc = ccp_alloc_dma_desc(chan, flags);
378 	if (!desc)
379 		return NULL;
380 
381 	total_len = 0;
382 
383 	src_len = sg_dma_len(src_sg);
384 	src_offset = 0;
385 
386 	dst_len = sg_dma_len(dst_sg);
387 	dst_offset = 0;
388 
389 	while (true) {
390 		if (!src_len) {
391 			src_nents--;
392 			if (!src_nents)
393 				break;
394 
395 			src_sg = sg_next(src_sg);
396 			if (!src_sg)
397 				break;
398 
399 			src_len = sg_dma_len(src_sg);
400 			src_offset = 0;
401 			continue;
402 		}
403 
404 		if (!dst_len) {
405 			dst_nents--;
406 			if (!dst_nents)
407 				break;
408 
409 			dst_sg = sg_next(dst_sg);
410 			if (!dst_sg)
411 				break;
412 
413 			dst_len = sg_dma_len(dst_sg);
414 			dst_offset = 0;
415 			continue;
416 		}
417 
418 		len = min(dst_len, src_len);
419 
420 		cmd = ccp_alloc_dma_cmd(chan);
421 		if (!cmd)
422 			goto err;
423 
424 		ccp_cmd = &cmd->ccp_cmd;
425 		ccp_cmd->ccp = chan->ccp;
426 		ccp_pt = &ccp_cmd->u.passthru_nomap;
427 		ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
428 		ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
429 		ccp_cmd->engine = CCP_ENGINE_PASSTHRU;
430 		ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
431 		ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
432 		ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset;
433 		ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset;
434 		ccp_pt->src_len = len;
435 		ccp_pt->final = 1;
436 		ccp_cmd->callback = ccp_cmd_callback;
437 		ccp_cmd->data = desc;
438 
439 		list_add_tail(&cmd->entry, &desc->pending);
440 
441 		dev_dbg(ccp->dev,
442 			"%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__,
443 			cmd, &ccp_pt->src_dma,
444 			&ccp_pt->dst_dma, ccp_pt->src_len);
445 
446 		total_len += len;
447 
448 		src_len -= len;
449 		src_offset += len;
450 
451 		dst_len -= len;
452 		dst_offset += len;
453 	}
454 
455 	desc->len = total_len;
456 
457 	if (list_empty(&desc->pending))
458 		goto err;
459 
460 	dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
461 
462 	spin_lock_irqsave(&chan->lock, sflags);
463 
464 	list_add_tail(&desc->entry, &chan->created);
465 
466 	spin_unlock_irqrestore(&chan->lock, sflags);
467 
468 	return desc;
469 
470 err:
471 	ccp_free_cmd_resources(ccp, &desc->pending);
472 	kmem_cache_free(ccp->dma_desc_cache, desc);
473 
474 	return NULL;
475 }
476 
477 static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
478 	struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len,
479 	unsigned long flags)
480 {
481 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
482 						 dma_chan);
483 	struct ccp_dma_desc *desc;
484 	struct scatterlist dst_sg, src_sg;
485 
486 	dev_dbg(chan->ccp->dev,
487 		"%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
488 		__func__, &src, &dst, len, flags);
489 
490 	sg_init_table(&dst_sg, 1);
491 	sg_dma_address(&dst_sg) = dst;
492 	sg_dma_len(&dst_sg) = len;
493 
494 	sg_init_table(&src_sg, 1);
495 	sg_dma_address(&src_sg) = src;
496 	sg_dma_len(&src_sg) = len;
497 
498 	desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags);
499 	if (!desc)
500 		return NULL;
501 
502 	return &desc->tx_desc;
503 }
504 
505 static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
506 	struct dma_chan *dma_chan, unsigned long flags)
507 {
508 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
509 						 dma_chan);
510 	struct ccp_dma_desc *desc;
511 
512 	desc = ccp_alloc_dma_desc(chan, flags);
513 	if (!desc)
514 		return NULL;
515 
516 	return &desc->tx_desc;
517 }
518 
519 static void ccp_issue_pending(struct dma_chan *dma_chan)
520 {
521 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
522 						 dma_chan);
523 	struct ccp_dma_desc *desc;
524 	unsigned long flags;
525 
526 	dev_dbg(chan->ccp->dev, "%s\n", __func__);
527 
528 	spin_lock_irqsave(&chan->lock, flags);
529 
530 	desc = __ccp_pending_to_active(chan);
531 
532 	spin_unlock_irqrestore(&chan->lock, flags);
533 
534 	/* If there was nothing active, start processing */
535 	if (desc)
536 		ccp_cmd_callback(desc, 0);
537 }
538 
539 static enum dma_status ccp_tx_status(struct dma_chan *dma_chan,
540 				     dma_cookie_t cookie,
541 				     struct dma_tx_state *state)
542 {
543 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
544 						 dma_chan);
545 	struct ccp_dma_desc *desc;
546 	enum dma_status ret;
547 	unsigned long flags;
548 
549 	if (chan->status == DMA_PAUSED) {
550 		ret = DMA_PAUSED;
551 		goto out;
552 	}
553 
554 	ret = dma_cookie_status(dma_chan, cookie, state);
555 	if (ret == DMA_COMPLETE) {
556 		spin_lock_irqsave(&chan->lock, flags);
557 
558 		/* Get status from complete chain, if still there */
559 		list_for_each_entry(desc, &chan->complete, entry) {
560 			if (desc->tx_desc.cookie != cookie)
561 				continue;
562 
563 			ret = desc->status;
564 			break;
565 		}
566 
567 		spin_unlock_irqrestore(&chan->lock, flags);
568 	}
569 
570 out:
571 	dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret);
572 
573 	return ret;
574 }
575 
576 static int ccp_pause(struct dma_chan *dma_chan)
577 {
578 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
579 						 dma_chan);
580 
581 	chan->status = DMA_PAUSED;
582 
583 	/*TODO: Wait for active DMA to complete before returning? */
584 
585 	return 0;
586 }
587 
588 static int ccp_resume(struct dma_chan *dma_chan)
589 {
590 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
591 						 dma_chan);
592 	struct ccp_dma_desc *desc;
593 	unsigned long flags;
594 
595 	spin_lock_irqsave(&chan->lock, flags);
596 
597 	desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
598 					entry);
599 
600 	spin_unlock_irqrestore(&chan->lock, flags);
601 
602 	/* Indicate the channel is running again */
603 	chan->status = DMA_IN_PROGRESS;
604 
605 	/* If there was something active, re-start */
606 	if (desc)
607 		ccp_cmd_callback(desc, 0);
608 
609 	return 0;
610 }
611 
612 static int ccp_terminate_all(struct dma_chan *dma_chan)
613 {
614 	struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
615 						 dma_chan);
616 	unsigned long flags;
617 
618 	dev_dbg(chan->ccp->dev, "%s\n", __func__);
619 
620 	/*TODO: Wait for active DMA to complete before continuing */
621 
622 	spin_lock_irqsave(&chan->lock, flags);
623 
624 	/*TODO: Purge the complete list? */
625 	ccp_free_desc_resources(chan->ccp, &chan->active);
626 	ccp_free_desc_resources(chan->ccp, &chan->pending);
627 	ccp_free_desc_resources(chan->ccp, &chan->created);
628 
629 	spin_unlock_irqrestore(&chan->lock, flags);
630 
631 	return 0;
632 }
633 
634 int ccp_dmaengine_register(struct ccp_device *ccp)
635 {
636 	struct ccp_dma_chan *chan;
637 	struct dma_device *dma_dev = &ccp->dma_dev;
638 	struct dma_chan *dma_chan;
639 	char *dma_cmd_cache_name;
640 	char *dma_desc_cache_name;
641 	unsigned int i;
642 	int ret;
643 
644 	if (!dmaengine)
645 		return 0;
646 
647 	ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count,
648 					 sizeof(*(ccp->ccp_dma_chan)),
649 					 GFP_KERNEL);
650 	if (!ccp->ccp_dma_chan)
651 		return -ENOMEM;
652 
653 	dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
654 					    "%s-dmaengine-cmd-cache",
655 					    ccp->name);
656 	if (!dma_cmd_cache_name)
657 		return -ENOMEM;
658 
659 	ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name,
660 					       sizeof(struct ccp_dma_cmd),
661 					       sizeof(void *),
662 					       SLAB_HWCACHE_ALIGN, NULL);
663 	if (!ccp->dma_cmd_cache)
664 		return -ENOMEM;
665 
666 	dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
667 					     "%s-dmaengine-desc-cache",
668 					     ccp->name);
669 	if (!dma_desc_cache_name) {
670 		ret = -ENOMEM;
671 		goto err_cache;
672 	}
673 
674 	ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
675 						sizeof(struct ccp_dma_desc),
676 						sizeof(void *),
677 						SLAB_HWCACHE_ALIGN, NULL);
678 	if (!ccp->dma_desc_cache) {
679 		ret = -ENOMEM;
680 		goto err_cache;
681 	}
682 
683 	dma_dev->dev = ccp->dev;
684 	dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
685 	dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
686 	dma_dev->directions = DMA_MEM_TO_MEM;
687 	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
688 	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
689 	dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
690 
691 	/* The DMA channels for this device can be set to public or private,
692 	 * and overridden by the module parameter dma_chan_attr.
693 	 * Default: according to the value in vdata (dma_chan_attr=0)
694 	 * dma_chan_attr=0x1: all channels private (override vdata)
695 	 * dma_chan_attr=0x2: all channels public (override vdata)
696 	 */
697 	if (ccp_get_dma_chan_attr(ccp) == DMA_PRIVATE)
698 		dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
699 
700 	INIT_LIST_HEAD(&dma_dev->channels);
701 	for (i = 0; i < ccp->cmd_q_count; i++) {
702 		chan = ccp->ccp_dma_chan + i;
703 		dma_chan = &chan->dma_chan;
704 
705 		chan->ccp = ccp;
706 
707 		spin_lock_init(&chan->lock);
708 		INIT_LIST_HEAD(&chan->created);
709 		INIT_LIST_HEAD(&chan->pending);
710 		INIT_LIST_HEAD(&chan->active);
711 		INIT_LIST_HEAD(&chan->complete);
712 
713 		tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup,
714 			     (unsigned long)chan);
715 
716 		dma_chan->device = dma_dev;
717 		dma_cookie_init(dma_chan);
718 
719 		list_add_tail(&dma_chan->device_node, &dma_dev->channels);
720 	}
721 
722 	dma_dev->device_free_chan_resources = ccp_free_chan_resources;
723 	dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
724 	dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
725 	dma_dev->device_issue_pending = ccp_issue_pending;
726 	dma_dev->device_tx_status = ccp_tx_status;
727 	dma_dev->device_pause = ccp_pause;
728 	dma_dev->device_resume = ccp_resume;
729 	dma_dev->device_terminate_all = ccp_terminate_all;
730 
731 	ret = dma_async_device_register(dma_dev);
732 	if (ret)
733 		goto err_reg;
734 
735 	return 0;
736 
737 err_reg:
738 	kmem_cache_destroy(ccp->dma_desc_cache);
739 
740 err_cache:
741 	kmem_cache_destroy(ccp->dma_cmd_cache);
742 
743 	return ret;
744 }
745 
746 void ccp_dmaengine_unregister(struct ccp_device *ccp)
747 {
748 	struct dma_device *dma_dev = &ccp->dma_dev;
749 
750 	if (!dmaengine)
751 		return;
752 
753 	dma_async_device_unregister(dma_dev);
754 
755 	kmem_cache_destroy(ccp->dma_desc_cache);
756 	kmem_cache_destroy(ccp->dma_cmd_cache);
757 }
758