xref: /linux/drivers/dma/mv_xor.c (revision 005438a8eef063495ac059d128eea71b58de50e5)
1 /*
2  * offload engine driver for the Marvell XOR engine
3  * Copyright (C) 2007, 2008, Marvell International Ltd.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/delay.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/spinlock.h>
21 #include <linux/interrupt.h>
22 #include <linux/of_device.h>
23 #include <linux/platform_device.h>
24 #include <linux/memory.h>
25 #include <linux/clk.h>
26 #include <linux/of.h>
27 #include <linux/of_irq.h>
28 #include <linux/irqdomain.h>
29 #include <linux/platform_data/dma-mv_xor.h>
30 
31 #include "dmaengine.h"
32 #include "mv_xor.h"
33 
34 enum mv_xor_mode {
35 	XOR_MODE_IN_REG,
36 	XOR_MODE_IN_DESC,
37 };
38 
39 static void mv_xor_issue_pending(struct dma_chan *chan);
40 
41 #define to_mv_xor_chan(chan)		\
42 	container_of(chan, struct mv_xor_chan, dmachan)
43 
44 #define to_mv_xor_slot(tx)		\
45 	container_of(tx, struct mv_xor_desc_slot, async_tx)
46 
47 #define mv_chan_to_devp(chan)           \
48 	((chan)->dmadev.dev)
49 
50 static void mv_desc_init(struct mv_xor_desc_slot *desc,
51 			 dma_addr_t addr, u32 byte_count,
52 			 enum dma_ctrl_flags flags)
53 {
54 	struct mv_xor_desc *hw_desc = desc->hw_desc;
55 
56 	hw_desc->status = XOR_DESC_DMA_OWNED;
57 	hw_desc->phy_next_desc = 0;
58 	/* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
59 	hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
60 				XOR_DESC_EOD_INT_EN : 0;
61 	hw_desc->phy_dest_addr = addr;
62 	hw_desc->byte_count = byte_count;
63 }
64 
65 static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
66 {
67 	struct mv_xor_desc *hw_desc = desc->hw_desc;
68 
69 	switch (desc->type) {
70 	case DMA_XOR:
71 	case DMA_INTERRUPT:
72 		hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
73 		break;
74 	case DMA_MEMCPY:
75 		hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
76 		break;
77 	default:
78 		BUG();
79 		return;
80 	}
81 }
82 
83 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
84 				  u32 next_desc_addr)
85 {
86 	struct mv_xor_desc *hw_desc = desc->hw_desc;
87 	BUG_ON(hw_desc->phy_next_desc);
88 	hw_desc->phy_next_desc = next_desc_addr;
89 }
90 
91 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
92 				 int index, dma_addr_t addr)
93 {
94 	struct mv_xor_desc *hw_desc = desc->hw_desc;
95 	hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
96 	if (desc->type == DMA_XOR)
97 		hw_desc->desc_command |= (1 << index);
98 }
99 
100 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
101 {
102 	return readl_relaxed(XOR_CURR_DESC(chan));
103 }
104 
105 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
106 					u32 next_desc_addr)
107 {
108 	writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
109 }
110 
111 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
112 {
113 	u32 val = readl_relaxed(XOR_INTR_MASK(chan));
114 	val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
115 	writel_relaxed(val, XOR_INTR_MASK(chan));
116 }
117 
118 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
119 {
120 	u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
121 	intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
122 	return intr_cause;
123 }
124 
125 static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
126 {
127 	u32 val;
128 
129 	val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
130 	val = ~(val << (chan->idx * 16));
131 	dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
132 	writel_relaxed(val, XOR_INTR_CAUSE(chan));
133 }
134 
135 static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
136 {
137 	u32 val = 0xFFFF0000 >> (chan->idx * 16);
138 	writel_relaxed(val, XOR_INTR_CAUSE(chan));
139 }
140 
141 static void mv_chan_set_mode(struct mv_xor_chan *chan,
142 			     enum dma_transaction_type type)
143 {
144 	u32 op_mode;
145 	u32 config = readl_relaxed(XOR_CONFIG(chan));
146 
147 	switch (type) {
148 	case DMA_XOR:
149 		op_mode = XOR_OPERATION_MODE_XOR;
150 		break;
151 	case DMA_MEMCPY:
152 		op_mode = XOR_OPERATION_MODE_MEMCPY;
153 		break;
154 	default:
155 		dev_err(mv_chan_to_devp(chan),
156 			"error: unsupported operation %d\n",
157 			type);
158 		BUG();
159 		return;
160 	}
161 
162 	config &= ~0x7;
163 	config |= op_mode;
164 
165 	if (IS_ENABLED(__BIG_ENDIAN))
166 		config |= XOR_DESCRIPTOR_SWAP;
167 	else
168 		config &= ~XOR_DESCRIPTOR_SWAP;
169 
170 	writel_relaxed(config, XOR_CONFIG(chan));
171 	chan->current_type = type;
172 }
173 
174 static void mv_chan_set_mode_to_desc(struct mv_xor_chan *chan)
175 {
176 	u32 op_mode;
177 	u32 config = readl_relaxed(XOR_CONFIG(chan));
178 
179 	op_mode = XOR_OPERATION_MODE_IN_DESC;
180 
181 	config &= ~0x7;
182 	config |= op_mode;
183 
184 #if defined(__BIG_ENDIAN)
185 	config |= XOR_DESCRIPTOR_SWAP;
186 #else
187 	config &= ~XOR_DESCRIPTOR_SWAP;
188 #endif
189 
190 	writel_relaxed(config, XOR_CONFIG(chan));
191 }
192 
193 static void mv_chan_activate(struct mv_xor_chan *chan)
194 {
195 	dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
196 
197 	/* writel ensures all descriptors are flushed before activation */
198 	writel(BIT(0), XOR_ACTIVATION(chan));
199 }
200 
201 static char mv_chan_is_busy(struct mv_xor_chan *chan)
202 {
203 	u32 state = readl_relaxed(XOR_ACTIVATION(chan));
204 
205 	state = (state >> 4) & 0x3;
206 
207 	return (state == 1) ? 1 : 0;
208 }
209 
210 /*
211  * mv_chan_start_new_chain - program the engine to operate on new
212  * chain headed by sw_desc
213  * Caller must hold &mv_chan->lock while calling this function
214  */
215 static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
216 				    struct mv_xor_desc_slot *sw_desc)
217 {
218 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
219 		__func__, __LINE__, sw_desc);
220 
221 	/* set the hardware chain */
222 	mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
223 
224 	mv_chan->pending++;
225 	mv_xor_issue_pending(&mv_chan->dmachan);
226 }
227 
228 static dma_cookie_t
229 mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
230 				struct mv_xor_chan *mv_chan,
231 				dma_cookie_t cookie)
232 {
233 	BUG_ON(desc->async_tx.cookie < 0);
234 
235 	if (desc->async_tx.cookie > 0) {
236 		cookie = desc->async_tx.cookie;
237 
238 		/* call the callback (must not sleep or submit new
239 		 * operations to this channel)
240 		 */
241 		if (desc->async_tx.callback)
242 			desc->async_tx.callback(
243 				desc->async_tx.callback_param);
244 
245 		dma_descriptor_unmap(&desc->async_tx);
246 	}
247 
248 	/* run dependent operations */
249 	dma_run_dependencies(&desc->async_tx);
250 
251 	return cookie;
252 }
253 
254 static int
255 mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
256 {
257 	struct mv_xor_desc_slot *iter, *_iter;
258 
259 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
260 	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
261 				 node) {
262 
263 		if (async_tx_test_ack(&iter->async_tx))
264 			list_move_tail(&iter->node, &mv_chan->free_slots);
265 	}
266 	return 0;
267 }
268 
269 static int
270 mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
271 		   struct mv_xor_chan *mv_chan)
272 {
273 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
274 		__func__, __LINE__, desc, desc->async_tx.flags);
275 
276 	/* the client is allowed to attach dependent operations
277 	 * until 'ack' is set
278 	 */
279 	if (!async_tx_test_ack(&desc->async_tx))
280 		/* move this slot to the completed_slots */
281 		list_move_tail(&desc->node, &mv_chan->completed_slots);
282 	else
283 		list_move_tail(&desc->node, &mv_chan->free_slots);
284 
285 	return 0;
286 }
287 
288 /* This function must be called with the mv_xor_chan spinlock held */
289 static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
290 {
291 	struct mv_xor_desc_slot *iter, *_iter;
292 	dma_cookie_t cookie = 0;
293 	int busy = mv_chan_is_busy(mv_chan);
294 	u32 current_desc = mv_chan_get_current_desc(mv_chan);
295 	int current_cleaned = 0;
296 	struct mv_xor_desc *hw_desc;
297 
298 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
299 	dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
300 	mv_chan_clean_completed_slots(mv_chan);
301 
302 	/* free completed slots from the chain starting with
303 	 * the oldest descriptor
304 	 */
305 
306 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
307 				 node) {
308 
309 		/* clean finished descriptors */
310 		hw_desc = iter->hw_desc;
311 		if (hw_desc->status & XOR_DESC_SUCCESS) {
312 			cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
313 								 cookie);
314 
315 			/* done processing desc, clean slot */
316 			mv_desc_clean_slot(iter, mv_chan);
317 
318 			/* break if we did cleaned the current */
319 			if (iter->async_tx.phys == current_desc) {
320 				current_cleaned = 1;
321 				break;
322 			}
323 		} else {
324 			if (iter->async_tx.phys == current_desc) {
325 				current_cleaned = 0;
326 				break;
327 			}
328 		}
329 	}
330 
331 	if ((busy == 0) && !list_empty(&mv_chan->chain)) {
332 		if (current_cleaned) {
333 			/*
334 			 * current descriptor cleaned and removed, run
335 			 * from list head
336 			 */
337 			iter = list_entry(mv_chan->chain.next,
338 					  struct mv_xor_desc_slot,
339 					  node);
340 			mv_chan_start_new_chain(mv_chan, iter);
341 		} else {
342 			if (!list_is_last(&iter->node, &mv_chan->chain)) {
343 				/*
344 				 * descriptors are still waiting after
345 				 * current, trigger them
346 				 */
347 				iter = list_entry(iter->node.next,
348 						  struct mv_xor_desc_slot,
349 						  node);
350 				mv_chan_start_new_chain(mv_chan, iter);
351 			} else {
352 				/*
353 				 * some descriptors are still waiting
354 				 * to be cleaned
355 				 */
356 				tasklet_schedule(&mv_chan->irq_tasklet);
357 			}
358 		}
359 	}
360 
361 	if (cookie > 0)
362 		mv_chan->dmachan.completed_cookie = cookie;
363 }
364 
365 static void mv_xor_tasklet(unsigned long data)
366 {
367 	struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
368 
369 	spin_lock_bh(&chan->lock);
370 	mv_chan_slot_cleanup(chan);
371 	spin_unlock_bh(&chan->lock);
372 }
373 
374 static struct mv_xor_desc_slot *
375 mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
376 {
377 	struct mv_xor_desc_slot *iter;
378 
379 	spin_lock_bh(&mv_chan->lock);
380 
381 	if (!list_empty(&mv_chan->free_slots)) {
382 		iter = list_first_entry(&mv_chan->free_slots,
383 					struct mv_xor_desc_slot,
384 					node);
385 
386 		list_move_tail(&iter->node, &mv_chan->allocated_slots);
387 
388 		spin_unlock_bh(&mv_chan->lock);
389 
390 		/* pre-ack descriptor */
391 		async_tx_ack(&iter->async_tx);
392 		iter->async_tx.cookie = -EBUSY;
393 
394 		return iter;
395 
396 	}
397 
398 	spin_unlock_bh(&mv_chan->lock);
399 
400 	/* try to free some slots if the allocation fails */
401 	tasklet_schedule(&mv_chan->irq_tasklet);
402 
403 	return NULL;
404 }
405 
406 /************************ DMA engine API functions ****************************/
407 static dma_cookie_t
408 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
409 {
410 	struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
411 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
412 	struct mv_xor_desc_slot *old_chain_tail;
413 	dma_cookie_t cookie;
414 	int new_hw_chain = 1;
415 
416 	dev_dbg(mv_chan_to_devp(mv_chan),
417 		"%s sw_desc %p: async_tx %p\n",
418 		__func__, sw_desc, &sw_desc->async_tx);
419 
420 	spin_lock_bh(&mv_chan->lock);
421 	cookie = dma_cookie_assign(tx);
422 
423 	if (list_empty(&mv_chan->chain))
424 		list_move_tail(&sw_desc->node, &mv_chan->chain);
425 	else {
426 		new_hw_chain = 0;
427 
428 		old_chain_tail = list_entry(mv_chan->chain.prev,
429 					    struct mv_xor_desc_slot,
430 					    node);
431 		list_move_tail(&sw_desc->node, &mv_chan->chain);
432 
433 		dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
434 			&old_chain_tail->async_tx.phys);
435 
436 		/* fix up the hardware chain */
437 		mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
438 
439 		/* if the channel is not busy */
440 		if (!mv_chan_is_busy(mv_chan)) {
441 			u32 current_desc = mv_chan_get_current_desc(mv_chan);
442 			/*
443 			 * and the curren desc is the end of the chain before
444 			 * the append, then we need to start the channel
445 			 */
446 			if (current_desc == old_chain_tail->async_tx.phys)
447 				new_hw_chain = 1;
448 		}
449 	}
450 
451 	if (new_hw_chain)
452 		mv_chan_start_new_chain(mv_chan, sw_desc);
453 
454 	spin_unlock_bh(&mv_chan->lock);
455 
456 	return cookie;
457 }
458 
459 /* returns the number of allocated descriptors */
460 static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
461 {
462 	void *virt_desc;
463 	dma_addr_t dma_desc;
464 	int idx;
465 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
466 	struct mv_xor_desc_slot *slot = NULL;
467 	int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
468 
469 	/* Allocate descriptor slots */
470 	idx = mv_chan->slots_allocated;
471 	while (idx < num_descs_in_pool) {
472 		slot = kzalloc(sizeof(*slot), GFP_KERNEL);
473 		if (!slot) {
474 			dev_info(mv_chan_to_devp(mv_chan),
475 				 "channel only initialized %d descriptor slots",
476 				 idx);
477 			break;
478 		}
479 		virt_desc = mv_chan->dma_desc_pool_virt;
480 		slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
481 
482 		dma_async_tx_descriptor_init(&slot->async_tx, chan);
483 		slot->async_tx.tx_submit = mv_xor_tx_submit;
484 		INIT_LIST_HEAD(&slot->node);
485 		dma_desc = mv_chan->dma_desc_pool;
486 		slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
487 		slot->idx = idx++;
488 
489 		spin_lock_bh(&mv_chan->lock);
490 		mv_chan->slots_allocated = idx;
491 		list_add_tail(&slot->node, &mv_chan->free_slots);
492 		spin_unlock_bh(&mv_chan->lock);
493 	}
494 
495 	dev_dbg(mv_chan_to_devp(mv_chan),
496 		"allocated %d descriptor slots\n",
497 		mv_chan->slots_allocated);
498 
499 	return mv_chan->slots_allocated ? : -ENOMEM;
500 }
501 
502 static struct dma_async_tx_descriptor *
503 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
504 		    unsigned int src_cnt, size_t len, unsigned long flags)
505 {
506 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
507 	struct mv_xor_desc_slot *sw_desc;
508 
509 	if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
510 		return NULL;
511 
512 	BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
513 
514 	dev_dbg(mv_chan_to_devp(mv_chan),
515 		"%s src_cnt: %d len: %u dest %pad flags: %ld\n",
516 		__func__, src_cnt, len, &dest, flags);
517 
518 	sw_desc = mv_chan_alloc_slot(mv_chan);
519 	if (sw_desc) {
520 		sw_desc->type = DMA_XOR;
521 		sw_desc->async_tx.flags = flags;
522 		mv_desc_init(sw_desc, dest, len, flags);
523 		if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
524 			mv_desc_set_mode(sw_desc);
525 		while (src_cnt--)
526 			mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
527 	}
528 
529 	dev_dbg(mv_chan_to_devp(mv_chan),
530 		"%s sw_desc %p async_tx %p \n",
531 		__func__, sw_desc, &sw_desc->async_tx);
532 	return sw_desc ? &sw_desc->async_tx : NULL;
533 }
534 
535 static struct dma_async_tx_descriptor *
536 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
537 		size_t len, unsigned long flags)
538 {
539 	/*
540 	 * A MEMCPY operation is identical to an XOR operation with only
541 	 * a single source address.
542 	 */
543 	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
544 }
545 
546 static struct dma_async_tx_descriptor *
547 mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
548 {
549 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
550 	dma_addr_t src, dest;
551 	size_t len;
552 
553 	src = mv_chan->dummy_src_addr;
554 	dest = mv_chan->dummy_dst_addr;
555 	len = MV_XOR_MIN_BYTE_COUNT;
556 
557 	/*
558 	 * We implement the DMA_INTERRUPT operation as a minimum sized
559 	 * XOR operation with a single dummy source address.
560 	 */
561 	return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
562 }
563 
564 static void mv_xor_free_chan_resources(struct dma_chan *chan)
565 {
566 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
567 	struct mv_xor_desc_slot *iter, *_iter;
568 	int in_use_descs = 0;
569 
570 	spin_lock_bh(&mv_chan->lock);
571 
572 	mv_chan_slot_cleanup(mv_chan);
573 
574 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
575 					node) {
576 		in_use_descs++;
577 		list_move_tail(&iter->node, &mv_chan->free_slots);
578 	}
579 	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
580 				 node) {
581 		in_use_descs++;
582 		list_move_tail(&iter->node, &mv_chan->free_slots);
583 	}
584 	list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
585 				 node) {
586 		in_use_descs++;
587 		list_move_tail(&iter->node, &mv_chan->free_slots);
588 	}
589 	list_for_each_entry_safe_reverse(
590 		iter, _iter, &mv_chan->free_slots, node) {
591 		list_del(&iter->node);
592 		kfree(iter);
593 		mv_chan->slots_allocated--;
594 	}
595 
596 	dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
597 		__func__, mv_chan->slots_allocated);
598 	spin_unlock_bh(&mv_chan->lock);
599 
600 	if (in_use_descs)
601 		dev_err(mv_chan_to_devp(mv_chan),
602 			"freeing %d in use descriptors!\n", in_use_descs);
603 }
604 
605 /**
606  * mv_xor_status - poll the status of an XOR transaction
607  * @chan: XOR channel handle
608  * @cookie: XOR transaction identifier
609  * @txstate: XOR transactions state holder (or NULL)
610  */
611 static enum dma_status mv_xor_status(struct dma_chan *chan,
612 					  dma_cookie_t cookie,
613 					  struct dma_tx_state *txstate)
614 {
615 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
616 	enum dma_status ret;
617 
618 	ret = dma_cookie_status(chan, cookie, txstate);
619 	if (ret == DMA_COMPLETE)
620 		return ret;
621 
622 	spin_lock_bh(&mv_chan->lock);
623 	mv_chan_slot_cleanup(mv_chan);
624 	spin_unlock_bh(&mv_chan->lock);
625 
626 	return dma_cookie_status(chan, cookie, txstate);
627 }
628 
629 static void mv_chan_dump_regs(struct mv_xor_chan *chan)
630 {
631 	u32 val;
632 
633 	val = readl_relaxed(XOR_CONFIG(chan));
634 	dev_err(mv_chan_to_devp(chan), "config       0x%08x\n", val);
635 
636 	val = readl_relaxed(XOR_ACTIVATION(chan));
637 	dev_err(mv_chan_to_devp(chan), "activation   0x%08x\n", val);
638 
639 	val = readl_relaxed(XOR_INTR_CAUSE(chan));
640 	dev_err(mv_chan_to_devp(chan), "intr cause   0x%08x\n", val);
641 
642 	val = readl_relaxed(XOR_INTR_MASK(chan));
643 	dev_err(mv_chan_to_devp(chan), "intr mask    0x%08x\n", val);
644 
645 	val = readl_relaxed(XOR_ERROR_CAUSE(chan));
646 	dev_err(mv_chan_to_devp(chan), "error cause  0x%08x\n", val);
647 
648 	val = readl_relaxed(XOR_ERROR_ADDR(chan));
649 	dev_err(mv_chan_to_devp(chan), "error addr   0x%08x\n", val);
650 }
651 
652 static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
653 					  u32 intr_cause)
654 {
655 	if (intr_cause & XOR_INT_ERR_DECODE) {
656 		dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
657 		return;
658 	}
659 
660 	dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
661 		chan->idx, intr_cause);
662 
663 	mv_chan_dump_regs(chan);
664 	WARN_ON(1);
665 }
666 
667 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
668 {
669 	struct mv_xor_chan *chan = data;
670 	u32 intr_cause = mv_chan_get_intr_cause(chan);
671 
672 	dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
673 
674 	if (intr_cause & XOR_INTR_ERRORS)
675 		mv_chan_err_interrupt_handler(chan, intr_cause);
676 
677 	tasklet_schedule(&chan->irq_tasklet);
678 
679 	mv_chan_clear_eoc_cause(chan);
680 
681 	return IRQ_HANDLED;
682 }
683 
684 static void mv_xor_issue_pending(struct dma_chan *chan)
685 {
686 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
687 
688 	if (mv_chan->pending >= MV_XOR_THRESHOLD) {
689 		mv_chan->pending = 0;
690 		mv_chan_activate(mv_chan);
691 	}
692 }
693 
694 /*
695  * Perform a transaction to verify the HW works.
696  */
697 
698 static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
699 {
700 	int i, ret;
701 	void *src, *dest;
702 	dma_addr_t src_dma, dest_dma;
703 	struct dma_chan *dma_chan;
704 	dma_cookie_t cookie;
705 	struct dma_async_tx_descriptor *tx;
706 	struct dmaengine_unmap_data *unmap;
707 	int err = 0;
708 
709 	src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
710 	if (!src)
711 		return -ENOMEM;
712 
713 	dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
714 	if (!dest) {
715 		kfree(src);
716 		return -ENOMEM;
717 	}
718 
719 	/* Fill in src buffer */
720 	for (i = 0; i < PAGE_SIZE; i++)
721 		((u8 *) src)[i] = (u8)i;
722 
723 	dma_chan = &mv_chan->dmachan;
724 	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
725 		err = -ENODEV;
726 		goto out;
727 	}
728 
729 	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
730 	if (!unmap) {
731 		err = -ENOMEM;
732 		goto free_resources;
733 	}
734 
735 	src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
736 				 PAGE_SIZE, DMA_TO_DEVICE);
737 	unmap->addr[0] = src_dma;
738 
739 	ret = dma_mapping_error(dma_chan->device->dev, src_dma);
740 	if (ret) {
741 		err = -ENOMEM;
742 		goto free_resources;
743 	}
744 	unmap->to_cnt = 1;
745 
746 	dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
747 				  PAGE_SIZE, DMA_FROM_DEVICE);
748 	unmap->addr[1] = dest_dma;
749 
750 	ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
751 	if (ret) {
752 		err = -ENOMEM;
753 		goto free_resources;
754 	}
755 	unmap->from_cnt = 1;
756 	unmap->len = PAGE_SIZE;
757 
758 	tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
759 				    PAGE_SIZE, 0);
760 	if (!tx) {
761 		dev_err(dma_chan->device->dev,
762 			"Self-test cannot prepare operation, disabling\n");
763 		err = -ENODEV;
764 		goto free_resources;
765 	}
766 
767 	cookie = mv_xor_tx_submit(tx);
768 	if (dma_submit_error(cookie)) {
769 		dev_err(dma_chan->device->dev,
770 			"Self-test submit error, disabling\n");
771 		err = -ENODEV;
772 		goto free_resources;
773 	}
774 
775 	mv_xor_issue_pending(dma_chan);
776 	async_tx_ack(tx);
777 	msleep(1);
778 
779 	if (mv_xor_status(dma_chan, cookie, NULL) !=
780 	    DMA_COMPLETE) {
781 		dev_err(dma_chan->device->dev,
782 			"Self-test copy timed out, disabling\n");
783 		err = -ENODEV;
784 		goto free_resources;
785 	}
786 
787 	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
788 				PAGE_SIZE, DMA_FROM_DEVICE);
789 	if (memcmp(src, dest, PAGE_SIZE)) {
790 		dev_err(dma_chan->device->dev,
791 			"Self-test copy failed compare, disabling\n");
792 		err = -ENODEV;
793 		goto free_resources;
794 	}
795 
796 free_resources:
797 	dmaengine_unmap_put(unmap);
798 	mv_xor_free_chan_resources(dma_chan);
799 out:
800 	kfree(src);
801 	kfree(dest);
802 	return err;
803 }
804 
805 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
806 static int
807 mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
808 {
809 	int i, src_idx, ret;
810 	struct page *dest;
811 	struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
812 	dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
813 	dma_addr_t dest_dma;
814 	struct dma_async_tx_descriptor *tx;
815 	struct dmaengine_unmap_data *unmap;
816 	struct dma_chan *dma_chan;
817 	dma_cookie_t cookie;
818 	u8 cmp_byte = 0;
819 	u32 cmp_word;
820 	int err = 0;
821 	int src_count = MV_XOR_NUM_SRC_TEST;
822 
823 	for (src_idx = 0; src_idx < src_count; src_idx++) {
824 		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
825 		if (!xor_srcs[src_idx]) {
826 			while (src_idx--)
827 				__free_page(xor_srcs[src_idx]);
828 			return -ENOMEM;
829 		}
830 	}
831 
832 	dest = alloc_page(GFP_KERNEL);
833 	if (!dest) {
834 		while (src_idx--)
835 			__free_page(xor_srcs[src_idx]);
836 		return -ENOMEM;
837 	}
838 
839 	/* Fill in src buffers */
840 	for (src_idx = 0; src_idx < src_count; src_idx++) {
841 		u8 *ptr = page_address(xor_srcs[src_idx]);
842 		for (i = 0; i < PAGE_SIZE; i++)
843 			ptr[i] = (1 << src_idx);
844 	}
845 
846 	for (src_idx = 0; src_idx < src_count; src_idx++)
847 		cmp_byte ^= (u8) (1 << src_idx);
848 
849 	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
850 		(cmp_byte << 8) | cmp_byte;
851 
852 	memset(page_address(dest), 0, PAGE_SIZE);
853 
854 	dma_chan = &mv_chan->dmachan;
855 	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
856 		err = -ENODEV;
857 		goto out;
858 	}
859 
860 	unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
861 					 GFP_KERNEL);
862 	if (!unmap) {
863 		err = -ENOMEM;
864 		goto free_resources;
865 	}
866 
867 	/* test xor */
868 	for (i = 0; i < src_count; i++) {
869 		unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
870 					      0, PAGE_SIZE, DMA_TO_DEVICE);
871 		dma_srcs[i] = unmap->addr[i];
872 		ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
873 		if (ret) {
874 			err = -ENOMEM;
875 			goto free_resources;
876 		}
877 		unmap->to_cnt++;
878 	}
879 
880 	unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
881 				      DMA_FROM_DEVICE);
882 	dest_dma = unmap->addr[src_count];
883 	ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
884 	if (ret) {
885 		err = -ENOMEM;
886 		goto free_resources;
887 	}
888 	unmap->from_cnt = 1;
889 	unmap->len = PAGE_SIZE;
890 
891 	tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
892 				 src_count, PAGE_SIZE, 0);
893 	if (!tx) {
894 		dev_err(dma_chan->device->dev,
895 			"Self-test cannot prepare operation, disabling\n");
896 		err = -ENODEV;
897 		goto free_resources;
898 	}
899 
900 	cookie = mv_xor_tx_submit(tx);
901 	if (dma_submit_error(cookie)) {
902 		dev_err(dma_chan->device->dev,
903 			"Self-test submit error, disabling\n");
904 		err = -ENODEV;
905 		goto free_resources;
906 	}
907 
908 	mv_xor_issue_pending(dma_chan);
909 	async_tx_ack(tx);
910 	msleep(8);
911 
912 	if (mv_xor_status(dma_chan, cookie, NULL) !=
913 	    DMA_COMPLETE) {
914 		dev_err(dma_chan->device->dev,
915 			"Self-test xor timed out, disabling\n");
916 		err = -ENODEV;
917 		goto free_resources;
918 	}
919 
920 	dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
921 				PAGE_SIZE, DMA_FROM_DEVICE);
922 	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
923 		u32 *ptr = page_address(dest);
924 		if (ptr[i] != cmp_word) {
925 			dev_err(dma_chan->device->dev,
926 				"Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
927 				i, ptr[i], cmp_word);
928 			err = -ENODEV;
929 			goto free_resources;
930 		}
931 	}
932 
933 free_resources:
934 	dmaengine_unmap_put(unmap);
935 	mv_xor_free_chan_resources(dma_chan);
936 out:
937 	src_idx = src_count;
938 	while (src_idx--)
939 		__free_page(xor_srcs[src_idx]);
940 	__free_page(dest);
941 	return err;
942 }
943 
944 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
945 {
946 	struct dma_chan *chan, *_chan;
947 	struct device *dev = mv_chan->dmadev.dev;
948 
949 	dma_async_device_unregister(&mv_chan->dmadev);
950 
951 	dma_free_coherent(dev, MV_XOR_POOL_SIZE,
952 			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
953 	dma_unmap_single(dev, mv_chan->dummy_src_addr,
954 			 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
955 	dma_unmap_single(dev, mv_chan->dummy_dst_addr,
956 			 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
957 
958 	list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
959 				 device_node) {
960 		list_del(&chan->device_node);
961 	}
962 
963 	free_irq(mv_chan->irq, mv_chan);
964 
965 	return 0;
966 }
967 
968 static struct mv_xor_chan *
969 mv_xor_channel_add(struct mv_xor_device *xordev,
970 		   struct platform_device *pdev,
971 		   int idx, dma_cap_mask_t cap_mask, int irq, int op_in_desc)
972 {
973 	int ret = 0;
974 	struct mv_xor_chan *mv_chan;
975 	struct dma_device *dma_dev;
976 
977 	mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
978 	if (!mv_chan)
979 		return ERR_PTR(-ENOMEM);
980 
981 	mv_chan->idx = idx;
982 	mv_chan->irq = irq;
983 	mv_chan->op_in_desc = op_in_desc;
984 
985 	dma_dev = &mv_chan->dmadev;
986 
987 	/*
988 	 * These source and destination dummy buffers are used to implement
989 	 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
990 	 * Hence, we only need to map the buffers at initialization-time.
991 	 */
992 	mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
993 		mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
994 	mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
995 		mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
996 
997 	/* allocate coherent memory for hardware descriptors
998 	 * note: writecombine gives slightly better performance, but
999 	 * requires that we explicitly flush the writes
1000 	 */
1001 	mv_chan->dma_desc_pool_virt =
1002 	  dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
1003 				 &mv_chan->dma_desc_pool, GFP_KERNEL);
1004 	if (!mv_chan->dma_desc_pool_virt)
1005 		return ERR_PTR(-ENOMEM);
1006 
1007 	/* discover transaction capabilites from the platform data */
1008 	dma_dev->cap_mask = cap_mask;
1009 
1010 	INIT_LIST_HEAD(&dma_dev->channels);
1011 
1012 	/* set base routines */
1013 	dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1014 	dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1015 	dma_dev->device_tx_status = mv_xor_status;
1016 	dma_dev->device_issue_pending = mv_xor_issue_pending;
1017 	dma_dev->dev = &pdev->dev;
1018 
1019 	/* set prep routines based on capability */
1020 	if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1021 		dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
1022 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1023 		dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1024 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1025 		dma_dev->max_xor = 8;
1026 		dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1027 	}
1028 
1029 	mv_chan->mmr_base = xordev->xor_base;
1030 	mv_chan->mmr_high_base = xordev->xor_high_base;
1031 	tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1032 		     mv_chan);
1033 
1034 	/* clear errors before enabling interrupts */
1035 	mv_chan_clear_err_status(mv_chan);
1036 
1037 	ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1038 			  0, dev_name(&pdev->dev), mv_chan);
1039 	if (ret)
1040 		goto err_free_dma;
1041 
1042 	mv_chan_unmask_interrupts(mv_chan);
1043 
1044 	if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
1045 		mv_chan_set_mode_to_desc(mv_chan);
1046 	else
1047 		mv_chan_set_mode(mv_chan, DMA_XOR);
1048 
1049 	spin_lock_init(&mv_chan->lock);
1050 	INIT_LIST_HEAD(&mv_chan->chain);
1051 	INIT_LIST_HEAD(&mv_chan->completed_slots);
1052 	INIT_LIST_HEAD(&mv_chan->free_slots);
1053 	INIT_LIST_HEAD(&mv_chan->allocated_slots);
1054 	mv_chan->dmachan.device = dma_dev;
1055 	dma_cookie_init(&mv_chan->dmachan);
1056 
1057 	list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1058 
1059 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1060 		ret = mv_chan_memcpy_self_test(mv_chan);
1061 		dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1062 		if (ret)
1063 			goto err_free_irq;
1064 	}
1065 
1066 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1067 		ret = mv_chan_xor_self_test(mv_chan);
1068 		dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1069 		if (ret)
1070 			goto err_free_irq;
1071 	}
1072 
1073 	dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
1074 		 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
1075 		 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1076 		 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1077 		 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1078 
1079 	dma_async_device_register(dma_dev);
1080 	return mv_chan;
1081 
1082 err_free_irq:
1083 	free_irq(mv_chan->irq, mv_chan);
1084  err_free_dma:
1085 	dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1086 			  mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1087 	return ERR_PTR(ret);
1088 }
1089 
1090 static void
1091 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1092 			 const struct mbus_dram_target_info *dram)
1093 {
1094 	void __iomem *base = xordev->xor_high_base;
1095 	u32 win_enable = 0;
1096 	int i;
1097 
1098 	for (i = 0; i < 8; i++) {
1099 		writel(0, base + WINDOW_BASE(i));
1100 		writel(0, base + WINDOW_SIZE(i));
1101 		if (i < 4)
1102 			writel(0, base + WINDOW_REMAP_HIGH(i));
1103 	}
1104 
1105 	for (i = 0; i < dram->num_cs; i++) {
1106 		const struct mbus_dram_window *cs = dram->cs + i;
1107 
1108 		writel((cs->base & 0xffff0000) |
1109 		       (cs->mbus_attr << 8) |
1110 		       dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1111 		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1112 
1113 		win_enable |= (1 << i);
1114 		win_enable |= 3 << (16 + (2 * i));
1115 	}
1116 
1117 	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1118 	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1119 	writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1120 	writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1121 }
1122 
1123 static const struct of_device_id mv_xor_dt_ids[] = {
1124 	{ .compatible = "marvell,orion-xor", .data = (void *)XOR_MODE_IN_REG },
1125 	{ .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC },
1126 	{},
1127 };
1128 MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1129 
1130 static int mv_xor_probe(struct platform_device *pdev)
1131 {
1132 	const struct mbus_dram_target_info *dram;
1133 	struct mv_xor_device *xordev;
1134 	struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
1135 	struct resource *res;
1136 	int i, ret;
1137 	int op_in_desc;
1138 
1139 	dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1140 
1141 	xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1142 	if (!xordev)
1143 		return -ENOMEM;
1144 
1145 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1146 	if (!res)
1147 		return -ENODEV;
1148 
1149 	xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1150 					resource_size(res));
1151 	if (!xordev->xor_base)
1152 		return -EBUSY;
1153 
1154 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1155 	if (!res)
1156 		return -ENODEV;
1157 
1158 	xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1159 					     resource_size(res));
1160 	if (!xordev->xor_high_base)
1161 		return -EBUSY;
1162 
1163 	platform_set_drvdata(pdev, xordev);
1164 
1165 	/*
1166 	 * (Re-)program MBUS remapping windows if we are asked to.
1167 	 */
1168 	dram = mv_mbus_dram_info();
1169 	if (dram)
1170 		mv_xor_conf_mbus_windows(xordev, dram);
1171 
1172 	/* Not all platforms can gate the clock, so it is not
1173 	 * an error if the clock does not exists.
1174 	 */
1175 	xordev->clk = clk_get(&pdev->dev, NULL);
1176 	if (!IS_ERR(xordev->clk))
1177 		clk_prepare_enable(xordev->clk);
1178 
1179 	if (pdev->dev.of_node) {
1180 		struct device_node *np;
1181 		int i = 0;
1182 		const struct of_device_id *of_id =
1183 			of_match_device(mv_xor_dt_ids,
1184 					&pdev->dev);
1185 
1186 		for_each_child_of_node(pdev->dev.of_node, np) {
1187 			struct mv_xor_chan *chan;
1188 			dma_cap_mask_t cap_mask;
1189 			int irq;
1190 			op_in_desc = (int)of_id->data;
1191 
1192 			dma_cap_zero(cap_mask);
1193 			if (of_property_read_bool(np, "dmacap,memcpy"))
1194 				dma_cap_set(DMA_MEMCPY, cap_mask);
1195 			if (of_property_read_bool(np, "dmacap,xor"))
1196 				dma_cap_set(DMA_XOR, cap_mask);
1197 			if (of_property_read_bool(np, "dmacap,interrupt"))
1198 				dma_cap_set(DMA_INTERRUPT, cap_mask);
1199 
1200 			irq = irq_of_parse_and_map(np, 0);
1201 			if (!irq) {
1202 				ret = -ENODEV;
1203 				goto err_channel_add;
1204 			}
1205 
1206 			chan = mv_xor_channel_add(xordev, pdev, i,
1207 						  cap_mask, irq, op_in_desc);
1208 			if (IS_ERR(chan)) {
1209 				ret = PTR_ERR(chan);
1210 				irq_dispose_mapping(irq);
1211 				goto err_channel_add;
1212 			}
1213 
1214 			xordev->channels[i] = chan;
1215 			i++;
1216 		}
1217 	} else if (pdata && pdata->channels) {
1218 		for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1219 			struct mv_xor_channel_data *cd;
1220 			struct mv_xor_chan *chan;
1221 			int irq;
1222 
1223 			cd = &pdata->channels[i];
1224 			if (!cd) {
1225 				ret = -ENODEV;
1226 				goto err_channel_add;
1227 			}
1228 
1229 			irq = platform_get_irq(pdev, i);
1230 			if (irq < 0) {
1231 				ret = irq;
1232 				goto err_channel_add;
1233 			}
1234 
1235 			chan = mv_xor_channel_add(xordev, pdev, i,
1236 						  cd->cap_mask, irq,
1237 						  XOR_MODE_IN_REG);
1238 			if (IS_ERR(chan)) {
1239 				ret = PTR_ERR(chan);
1240 				goto err_channel_add;
1241 			}
1242 
1243 			xordev->channels[i] = chan;
1244 		}
1245 	}
1246 
1247 	return 0;
1248 
1249 err_channel_add:
1250 	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1251 		if (xordev->channels[i]) {
1252 			mv_xor_channel_remove(xordev->channels[i]);
1253 			if (pdev->dev.of_node)
1254 				irq_dispose_mapping(xordev->channels[i]->irq);
1255 		}
1256 
1257 	if (!IS_ERR(xordev->clk)) {
1258 		clk_disable_unprepare(xordev->clk);
1259 		clk_put(xordev->clk);
1260 	}
1261 
1262 	return ret;
1263 }
1264 
1265 static int mv_xor_remove(struct platform_device *pdev)
1266 {
1267 	struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1268 	int i;
1269 
1270 	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1271 		if (xordev->channels[i])
1272 			mv_xor_channel_remove(xordev->channels[i]);
1273 	}
1274 
1275 	if (!IS_ERR(xordev->clk)) {
1276 		clk_disable_unprepare(xordev->clk);
1277 		clk_put(xordev->clk);
1278 	}
1279 
1280 	return 0;
1281 }
1282 
1283 static struct platform_driver mv_xor_driver = {
1284 	.probe		= mv_xor_probe,
1285 	.remove		= mv_xor_remove,
1286 	.driver		= {
1287 		.name	        = MV_XOR_NAME,
1288 		.of_match_table = of_match_ptr(mv_xor_dt_ids),
1289 	},
1290 };
1291 
1292 
1293 static int __init mv_xor_init(void)
1294 {
1295 	return platform_driver_register(&mv_xor_driver);
1296 }
1297 module_init(mv_xor_init);
1298 
1299 /* it's currently unsafe to unload this module */
1300 #if 0
1301 static void __exit mv_xor_exit(void)
1302 {
1303 	platform_driver_unregister(&mv_xor_driver);
1304 	return;
1305 }
1306 
1307 module_exit(mv_xor_exit);
1308 #endif
1309 
1310 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1311 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1312 MODULE_LICENSE("GPL");
1313