1 /* 2 * offload engine driver for the Marvell XOR engine 3 * Copyright (C) 2007, 2008, Marvell International Ltd. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 15 #include <linux/init.h> 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/delay.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/spinlock.h> 21 #include <linux/interrupt.h> 22 #include <linux/of_device.h> 23 #include <linux/platform_device.h> 24 #include <linux/memory.h> 25 #include <linux/clk.h> 26 #include <linux/of.h> 27 #include <linux/of_irq.h> 28 #include <linux/irqdomain.h> 29 #include <linux/platform_data/dma-mv_xor.h> 30 31 #include "dmaengine.h" 32 #include "mv_xor.h" 33 34 enum mv_xor_mode { 35 XOR_MODE_IN_REG, 36 XOR_MODE_IN_DESC, 37 }; 38 39 static void mv_xor_issue_pending(struct dma_chan *chan); 40 41 #define to_mv_xor_chan(chan) \ 42 container_of(chan, struct mv_xor_chan, dmachan) 43 44 #define to_mv_xor_slot(tx) \ 45 container_of(tx, struct mv_xor_desc_slot, async_tx) 46 47 #define mv_chan_to_devp(chan) \ 48 ((chan)->dmadev.dev) 49 50 static void mv_desc_init(struct mv_xor_desc_slot *desc, 51 dma_addr_t addr, u32 byte_count, 52 enum dma_ctrl_flags flags) 53 { 54 struct mv_xor_desc *hw_desc = desc->hw_desc; 55 56 hw_desc->status = XOR_DESC_DMA_OWNED; 57 hw_desc->phy_next_desc = 0; 58 /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */ 59 hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ? 60 XOR_DESC_EOD_INT_EN : 0; 61 hw_desc->phy_dest_addr = addr; 62 hw_desc->byte_count = byte_count; 63 } 64 65 static void mv_desc_set_mode(struct mv_xor_desc_slot *desc) 66 { 67 struct mv_xor_desc *hw_desc = desc->hw_desc; 68 69 switch (desc->type) { 70 case DMA_XOR: 71 case DMA_INTERRUPT: 72 hw_desc->desc_command |= XOR_DESC_OPERATION_XOR; 73 break; 74 case DMA_MEMCPY: 75 hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY; 76 break; 77 default: 78 BUG(); 79 return; 80 } 81 } 82 83 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, 84 u32 next_desc_addr) 85 { 86 struct mv_xor_desc *hw_desc = desc->hw_desc; 87 BUG_ON(hw_desc->phy_next_desc); 88 hw_desc->phy_next_desc = next_desc_addr; 89 } 90 91 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, 92 int index, dma_addr_t addr) 93 { 94 struct mv_xor_desc *hw_desc = desc->hw_desc; 95 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr; 96 if (desc->type == DMA_XOR) 97 hw_desc->desc_command |= (1 << index); 98 } 99 100 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) 101 { 102 return readl_relaxed(XOR_CURR_DESC(chan)); 103 } 104 105 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, 106 u32 next_desc_addr) 107 { 108 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan)); 109 } 110 111 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) 112 { 113 u32 val = readl_relaxed(XOR_INTR_MASK(chan)); 114 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); 115 writel_relaxed(val, XOR_INTR_MASK(chan)); 116 } 117 118 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) 119 { 120 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan)); 121 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; 122 return intr_cause; 123 } 124 125 static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan) 126 { 127 u32 val; 128 129 val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED; 130 val = ~(val << (chan->idx * 16)); 131 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); 132 writel_relaxed(val, XOR_INTR_CAUSE(chan)); 133 } 134 135 static void mv_chan_clear_err_status(struct mv_xor_chan *chan) 136 { 137 u32 val = 0xFFFF0000 >> (chan->idx * 16); 138 writel_relaxed(val, XOR_INTR_CAUSE(chan)); 139 } 140 141 static void mv_chan_set_mode(struct mv_xor_chan *chan, 142 enum dma_transaction_type type) 143 { 144 u32 op_mode; 145 u32 config = readl_relaxed(XOR_CONFIG(chan)); 146 147 switch (type) { 148 case DMA_XOR: 149 op_mode = XOR_OPERATION_MODE_XOR; 150 break; 151 case DMA_MEMCPY: 152 op_mode = XOR_OPERATION_MODE_MEMCPY; 153 break; 154 default: 155 dev_err(mv_chan_to_devp(chan), 156 "error: unsupported operation %d\n", 157 type); 158 BUG(); 159 return; 160 } 161 162 config &= ~0x7; 163 config |= op_mode; 164 165 #if defined(__BIG_ENDIAN) 166 config |= XOR_DESCRIPTOR_SWAP; 167 #else 168 config &= ~XOR_DESCRIPTOR_SWAP; 169 #endif 170 171 writel_relaxed(config, XOR_CONFIG(chan)); 172 chan->current_type = type; 173 } 174 175 static void mv_chan_set_mode_to_desc(struct mv_xor_chan *chan) 176 { 177 u32 op_mode; 178 u32 config = readl_relaxed(XOR_CONFIG(chan)); 179 180 op_mode = XOR_OPERATION_MODE_IN_DESC; 181 182 config &= ~0x7; 183 config |= op_mode; 184 185 #if defined(__BIG_ENDIAN) 186 config |= XOR_DESCRIPTOR_SWAP; 187 #else 188 config &= ~XOR_DESCRIPTOR_SWAP; 189 #endif 190 191 writel_relaxed(config, XOR_CONFIG(chan)); 192 } 193 194 static void mv_chan_activate(struct mv_xor_chan *chan) 195 { 196 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); 197 198 /* writel ensures all descriptors are flushed before activation */ 199 writel(BIT(0), XOR_ACTIVATION(chan)); 200 } 201 202 static char mv_chan_is_busy(struct mv_xor_chan *chan) 203 { 204 u32 state = readl_relaxed(XOR_ACTIVATION(chan)); 205 206 state = (state >> 4) & 0x3; 207 208 return (state == 1) ? 1 : 0; 209 } 210 211 /* 212 * mv_chan_start_new_chain - program the engine to operate on new 213 * chain headed by sw_desc 214 * Caller must hold &mv_chan->lock while calling this function 215 */ 216 static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan, 217 struct mv_xor_desc_slot *sw_desc) 218 { 219 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n", 220 __func__, __LINE__, sw_desc); 221 222 /* set the hardware chain */ 223 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); 224 225 mv_chan->pending++; 226 mv_xor_issue_pending(&mv_chan->dmachan); 227 } 228 229 static dma_cookie_t 230 mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc, 231 struct mv_xor_chan *mv_chan, 232 dma_cookie_t cookie) 233 { 234 BUG_ON(desc->async_tx.cookie < 0); 235 236 if (desc->async_tx.cookie > 0) { 237 cookie = desc->async_tx.cookie; 238 239 /* call the callback (must not sleep or submit new 240 * operations to this channel) 241 */ 242 if (desc->async_tx.callback) 243 desc->async_tx.callback( 244 desc->async_tx.callback_param); 245 246 dma_descriptor_unmap(&desc->async_tx); 247 } 248 249 /* run dependent operations */ 250 dma_run_dependencies(&desc->async_tx); 251 252 return cookie; 253 } 254 255 static int 256 mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan) 257 { 258 struct mv_xor_desc_slot *iter, *_iter; 259 260 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); 261 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 262 node) { 263 264 if (async_tx_test_ack(&iter->async_tx)) 265 list_move_tail(&iter->node, &mv_chan->free_slots); 266 } 267 return 0; 268 } 269 270 static int 271 mv_desc_clean_slot(struct mv_xor_desc_slot *desc, 272 struct mv_xor_chan *mv_chan) 273 { 274 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n", 275 __func__, __LINE__, desc, desc->async_tx.flags); 276 277 /* the client is allowed to attach dependent operations 278 * until 'ack' is set 279 */ 280 if (!async_tx_test_ack(&desc->async_tx)) 281 /* move this slot to the completed_slots */ 282 list_move_tail(&desc->node, &mv_chan->completed_slots); 283 else 284 list_move_tail(&desc->node, &mv_chan->free_slots); 285 286 return 0; 287 } 288 289 /* This function must be called with the mv_xor_chan spinlock held */ 290 static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan) 291 { 292 struct mv_xor_desc_slot *iter, *_iter; 293 dma_cookie_t cookie = 0; 294 int busy = mv_chan_is_busy(mv_chan); 295 u32 current_desc = mv_chan_get_current_desc(mv_chan); 296 int current_cleaned = 0; 297 struct mv_xor_desc *hw_desc; 298 299 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); 300 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc); 301 mv_chan_clean_completed_slots(mv_chan); 302 303 /* free completed slots from the chain starting with 304 * the oldest descriptor 305 */ 306 307 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 308 node) { 309 310 /* clean finished descriptors */ 311 hw_desc = iter->hw_desc; 312 if (hw_desc->status & XOR_DESC_SUCCESS) { 313 cookie = mv_desc_run_tx_complete_actions(iter, mv_chan, 314 cookie); 315 316 /* done processing desc, clean slot */ 317 mv_desc_clean_slot(iter, mv_chan); 318 319 /* break if we did cleaned the current */ 320 if (iter->async_tx.phys == current_desc) { 321 current_cleaned = 1; 322 break; 323 } 324 } else { 325 if (iter->async_tx.phys == current_desc) { 326 current_cleaned = 0; 327 break; 328 } 329 } 330 } 331 332 if ((busy == 0) && !list_empty(&mv_chan->chain)) { 333 if (current_cleaned) { 334 /* 335 * current descriptor cleaned and removed, run 336 * from list head 337 */ 338 iter = list_entry(mv_chan->chain.next, 339 struct mv_xor_desc_slot, 340 node); 341 mv_chan_start_new_chain(mv_chan, iter); 342 } else { 343 if (!list_is_last(&iter->node, &mv_chan->chain)) { 344 /* 345 * descriptors are still waiting after 346 * current, trigger them 347 */ 348 iter = list_entry(iter->node.next, 349 struct mv_xor_desc_slot, 350 node); 351 mv_chan_start_new_chain(mv_chan, iter); 352 } else { 353 /* 354 * some descriptors are still waiting 355 * to be cleaned 356 */ 357 tasklet_schedule(&mv_chan->irq_tasklet); 358 } 359 } 360 } 361 362 if (cookie > 0) 363 mv_chan->dmachan.completed_cookie = cookie; 364 } 365 366 static void mv_xor_tasklet(unsigned long data) 367 { 368 struct mv_xor_chan *chan = (struct mv_xor_chan *) data; 369 370 spin_lock_bh(&chan->lock); 371 mv_chan_slot_cleanup(chan); 372 spin_unlock_bh(&chan->lock); 373 } 374 375 static struct mv_xor_desc_slot * 376 mv_chan_alloc_slot(struct mv_xor_chan *mv_chan) 377 { 378 struct mv_xor_desc_slot *iter; 379 380 spin_lock_bh(&mv_chan->lock); 381 382 if (!list_empty(&mv_chan->free_slots)) { 383 iter = list_first_entry(&mv_chan->free_slots, 384 struct mv_xor_desc_slot, 385 node); 386 387 list_move_tail(&iter->node, &mv_chan->allocated_slots); 388 389 spin_unlock_bh(&mv_chan->lock); 390 391 /* pre-ack descriptor */ 392 async_tx_ack(&iter->async_tx); 393 iter->async_tx.cookie = -EBUSY; 394 395 return iter; 396 397 } 398 399 spin_unlock_bh(&mv_chan->lock); 400 401 /* try to free some slots if the allocation fails */ 402 tasklet_schedule(&mv_chan->irq_tasklet); 403 404 return NULL; 405 } 406 407 /************************ DMA engine API functions ****************************/ 408 static dma_cookie_t 409 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) 410 { 411 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); 412 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); 413 struct mv_xor_desc_slot *old_chain_tail; 414 dma_cookie_t cookie; 415 int new_hw_chain = 1; 416 417 dev_dbg(mv_chan_to_devp(mv_chan), 418 "%s sw_desc %p: async_tx %p\n", 419 __func__, sw_desc, &sw_desc->async_tx); 420 421 spin_lock_bh(&mv_chan->lock); 422 cookie = dma_cookie_assign(tx); 423 424 if (list_empty(&mv_chan->chain)) 425 list_move_tail(&sw_desc->node, &mv_chan->chain); 426 else { 427 new_hw_chain = 0; 428 429 old_chain_tail = list_entry(mv_chan->chain.prev, 430 struct mv_xor_desc_slot, 431 node); 432 list_move_tail(&sw_desc->node, &mv_chan->chain); 433 434 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n", 435 &old_chain_tail->async_tx.phys); 436 437 /* fix up the hardware chain */ 438 mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys); 439 440 /* if the channel is not busy */ 441 if (!mv_chan_is_busy(mv_chan)) { 442 u32 current_desc = mv_chan_get_current_desc(mv_chan); 443 /* 444 * and the curren desc is the end of the chain before 445 * the append, then we need to start the channel 446 */ 447 if (current_desc == old_chain_tail->async_tx.phys) 448 new_hw_chain = 1; 449 } 450 } 451 452 if (new_hw_chain) 453 mv_chan_start_new_chain(mv_chan, sw_desc); 454 455 spin_unlock_bh(&mv_chan->lock); 456 457 return cookie; 458 } 459 460 /* returns the number of allocated descriptors */ 461 static int mv_xor_alloc_chan_resources(struct dma_chan *chan) 462 { 463 void *virt_desc; 464 dma_addr_t dma_desc; 465 int idx; 466 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 467 struct mv_xor_desc_slot *slot = NULL; 468 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE; 469 470 /* Allocate descriptor slots */ 471 idx = mv_chan->slots_allocated; 472 while (idx < num_descs_in_pool) { 473 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 474 if (!slot) { 475 dev_info(mv_chan_to_devp(mv_chan), 476 "channel only initialized %d descriptor slots", 477 idx); 478 break; 479 } 480 virt_desc = mv_chan->dma_desc_pool_virt; 481 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE; 482 483 dma_async_tx_descriptor_init(&slot->async_tx, chan); 484 slot->async_tx.tx_submit = mv_xor_tx_submit; 485 INIT_LIST_HEAD(&slot->node); 486 dma_desc = mv_chan->dma_desc_pool; 487 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; 488 slot->idx = idx++; 489 490 spin_lock_bh(&mv_chan->lock); 491 mv_chan->slots_allocated = idx; 492 list_add_tail(&slot->node, &mv_chan->free_slots); 493 spin_unlock_bh(&mv_chan->lock); 494 } 495 496 dev_dbg(mv_chan_to_devp(mv_chan), 497 "allocated %d descriptor slots\n", 498 mv_chan->slots_allocated); 499 500 return mv_chan->slots_allocated ? : -ENOMEM; 501 } 502 503 static struct dma_async_tx_descriptor * 504 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 505 unsigned int src_cnt, size_t len, unsigned long flags) 506 { 507 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 508 struct mv_xor_desc_slot *sw_desc; 509 510 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 511 return NULL; 512 513 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 514 515 dev_dbg(mv_chan_to_devp(mv_chan), 516 "%s src_cnt: %d len: %u dest %pad flags: %ld\n", 517 __func__, src_cnt, len, &dest, flags); 518 519 sw_desc = mv_chan_alloc_slot(mv_chan); 520 if (sw_desc) { 521 sw_desc->type = DMA_XOR; 522 sw_desc->async_tx.flags = flags; 523 mv_desc_init(sw_desc, dest, len, flags); 524 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC) 525 mv_desc_set_mode(sw_desc); 526 while (src_cnt--) 527 mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]); 528 } 529 530 dev_dbg(mv_chan_to_devp(mv_chan), 531 "%s sw_desc %p async_tx %p \n", 532 __func__, sw_desc, &sw_desc->async_tx); 533 return sw_desc ? &sw_desc->async_tx : NULL; 534 } 535 536 static struct dma_async_tx_descriptor * 537 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 538 size_t len, unsigned long flags) 539 { 540 /* 541 * A MEMCPY operation is identical to an XOR operation with only 542 * a single source address. 543 */ 544 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); 545 } 546 547 static struct dma_async_tx_descriptor * 548 mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) 549 { 550 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 551 dma_addr_t src, dest; 552 size_t len; 553 554 src = mv_chan->dummy_src_addr; 555 dest = mv_chan->dummy_dst_addr; 556 len = MV_XOR_MIN_BYTE_COUNT; 557 558 /* 559 * We implement the DMA_INTERRUPT operation as a minimum sized 560 * XOR operation with a single dummy source address. 561 */ 562 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); 563 } 564 565 static void mv_xor_free_chan_resources(struct dma_chan *chan) 566 { 567 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 568 struct mv_xor_desc_slot *iter, *_iter; 569 int in_use_descs = 0; 570 571 spin_lock_bh(&mv_chan->lock); 572 573 mv_chan_slot_cleanup(mv_chan); 574 575 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 576 node) { 577 in_use_descs++; 578 list_move_tail(&iter->node, &mv_chan->free_slots); 579 } 580 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 581 node) { 582 in_use_descs++; 583 list_move_tail(&iter->node, &mv_chan->free_slots); 584 } 585 list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots, 586 node) { 587 in_use_descs++; 588 list_move_tail(&iter->node, &mv_chan->free_slots); 589 } 590 list_for_each_entry_safe_reverse( 591 iter, _iter, &mv_chan->free_slots, node) { 592 list_del(&iter->node); 593 kfree(iter); 594 mv_chan->slots_allocated--; 595 } 596 597 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n", 598 __func__, mv_chan->slots_allocated); 599 spin_unlock_bh(&mv_chan->lock); 600 601 if (in_use_descs) 602 dev_err(mv_chan_to_devp(mv_chan), 603 "freeing %d in use descriptors!\n", in_use_descs); 604 } 605 606 /** 607 * mv_xor_status - poll the status of an XOR transaction 608 * @chan: XOR channel handle 609 * @cookie: XOR transaction identifier 610 * @txstate: XOR transactions state holder (or NULL) 611 */ 612 static enum dma_status mv_xor_status(struct dma_chan *chan, 613 dma_cookie_t cookie, 614 struct dma_tx_state *txstate) 615 { 616 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 617 enum dma_status ret; 618 619 ret = dma_cookie_status(chan, cookie, txstate); 620 if (ret == DMA_COMPLETE) 621 return ret; 622 623 spin_lock_bh(&mv_chan->lock); 624 mv_chan_slot_cleanup(mv_chan); 625 spin_unlock_bh(&mv_chan->lock); 626 627 return dma_cookie_status(chan, cookie, txstate); 628 } 629 630 static void mv_chan_dump_regs(struct mv_xor_chan *chan) 631 { 632 u32 val; 633 634 val = readl_relaxed(XOR_CONFIG(chan)); 635 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val); 636 637 val = readl_relaxed(XOR_ACTIVATION(chan)); 638 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val); 639 640 val = readl_relaxed(XOR_INTR_CAUSE(chan)); 641 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val); 642 643 val = readl_relaxed(XOR_INTR_MASK(chan)); 644 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val); 645 646 val = readl_relaxed(XOR_ERROR_CAUSE(chan)); 647 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val); 648 649 val = readl_relaxed(XOR_ERROR_ADDR(chan)); 650 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val); 651 } 652 653 static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan, 654 u32 intr_cause) 655 { 656 if (intr_cause & XOR_INT_ERR_DECODE) { 657 dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n"); 658 return; 659 } 660 661 dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n", 662 chan->idx, intr_cause); 663 664 mv_chan_dump_regs(chan); 665 WARN_ON(1); 666 } 667 668 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) 669 { 670 struct mv_xor_chan *chan = data; 671 u32 intr_cause = mv_chan_get_intr_cause(chan); 672 673 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause); 674 675 if (intr_cause & XOR_INTR_ERRORS) 676 mv_chan_err_interrupt_handler(chan, intr_cause); 677 678 tasklet_schedule(&chan->irq_tasklet); 679 680 mv_chan_clear_eoc_cause(chan); 681 682 return IRQ_HANDLED; 683 } 684 685 static void mv_xor_issue_pending(struct dma_chan *chan) 686 { 687 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 688 689 if (mv_chan->pending >= MV_XOR_THRESHOLD) { 690 mv_chan->pending = 0; 691 mv_chan_activate(mv_chan); 692 } 693 } 694 695 /* 696 * Perform a transaction to verify the HW works. 697 */ 698 699 static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) 700 { 701 int i, ret; 702 void *src, *dest; 703 dma_addr_t src_dma, dest_dma; 704 struct dma_chan *dma_chan; 705 dma_cookie_t cookie; 706 struct dma_async_tx_descriptor *tx; 707 struct dmaengine_unmap_data *unmap; 708 int err = 0; 709 710 src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); 711 if (!src) 712 return -ENOMEM; 713 714 dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); 715 if (!dest) { 716 kfree(src); 717 return -ENOMEM; 718 } 719 720 /* Fill in src buffer */ 721 for (i = 0; i < PAGE_SIZE; i++) 722 ((u8 *) src)[i] = (u8)i; 723 724 dma_chan = &mv_chan->dmachan; 725 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 726 err = -ENODEV; 727 goto out; 728 } 729 730 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); 731 if (!unmap) { 732 err = -ENOMEM; 733 goto free_resources; 734 } 735 736 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, 737 PAGE_SIZE, DMA_TO_DEVICE); 738 unmap->addr[0] = src_dma; 739 740 ret = dma_mapping_error(dma_chan->device->dev, src_dma); 741 if (ret) { 742 err = -ENOMEM; 743 goto free_resources; 744 } 745 unmap->to_cnt = 1; 746 747 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, 748 PAGE_SIZE, DMA_FROM_DEVICE); 749 unmap->addr[1] = dest_dma; 750 751 ret = dma_mapping_error(dma_chan->device->dev, dest_dma); 752 if (ret) { 753 err = -ENOMEM; 754 goto free_resources; 755 } 756 unmap->from_cnt = 1; 757 unmap->len = PAGE_SIZE; 758 759 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 760 PAGE_SIZE, 0); 761 if (!tx) { 762 dev_err(dma_chan->device->dev, 763 "Self-test cannot prepare operation, disabling\n"); 764 err = -ENODEV; 765 goto free_resources; 766 } 767 768 cookie = mv_xor_tx_submit(tx); 769 if (dma_submit_error(cookie)) { 770 dev_err(dma_chan->device->dev, 771 "Self-test submit error, disabling\n"); 772 err = -ENODEV; 773 goto free_resources; 774 } 775 776 mv_xor_issue_pending(dma_chan); 777 async_tx_ack(tx); 778 msleep(1); 779 780 if (mv_xor_status(dma_chan, cookie, NULL) != 781 DMA_COMPLETE) { 782 dev_err(dma_chan->device->dev, 783 "Self-test copy timed out, disabling\n"); 784 err = -ENODEV; 785 goto free_resources; 786 } 787 788 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 789 PAGE_SIZE, DMA_FROM_DEVICE); 790 if (memcmp(src, dest, PAGE_SIZE)) { 791 dev_err(dma_chan->device->dev, 792 "Self-test copy failed compare, disabling\n"); 793 err = -ENODEV; 794 goto free_resources; 795 } 796 797 free_resources: 798 dmaengine_unmap_put(unmap); 799 mv_xor_free_chan_resources(dma_chan); 800 out: 801 kfree(src); 802 kfree(dest); 803 return err; 804 } 805 806 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ 807 static int 808 mv_chan_xor_self_test(struct mv_xor_chan *mv_chan) 809 { 810 int i, src_idx, ret; 811 struct page *dest; 812 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; 813 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; 814 dma_addr_t dest_dma; 815 struct dma_async_tx_descriptor *tx; 816 struct dmaengine_unmap_data *unmap; 817 struct dma_chan *dma_chan; 818 dma_cookie_t cookie; 819 u8 cmp_byte = 0; 820 u32 cmp_word; 821 int err = 0; 822 int src_count = MV_XOR_NUM_SRC_TEST; 823 824 for (src_idx = 0; src_idx < src_count; src_idx++) { 825 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 826 if (!xor_srcs[src_idx]) { 827 while (src_idx--) 828 __free_page(xor_srcs[src_idx]); 829 return -ENOMEM; 830 } 831 } 832 833 dest = alloc_page(GFP_KERNEL); 834 if (!dest) { 835 while (src_idx--) 836 __free_page(xor_srcs[src_idx]); 837 return -ENOMEM; 838 } 839 840 /* Fill in src buffers */ 841 for (src_idx = 0; src_idx < src_count; src_idx++) { 842 u8 *ptr = page_address(xor_srcs[src_idx]); 843 for (i = 0; i < PAGE_SIZE; i++) 844 ptr[i] = (1 << src_idx); 845 } 846 847 for (src_idx = 0; src_idx < src_count; src_idx++) 848 cmp_byte ^= (u8) (1 << src_idx); 849 850 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 851 (cmp_byte << 8) | cmp_byte; 852 853 memset(page_address(dest), 0, PAGE_SIZE); 854 855 dma_chan = &mv_chan->dmachan; 856 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 857 err = -ENODEV; 858 goto out; 859 } 860 861 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1, 862 GFP_KERNEL); 863 if (!unmap) { 864 err = -ENOMEM; 865 goto free_resources; 866 } 867 868 /* test xor */ 869 for (i = 0; i < src_count; i++) { 870 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 871 0, PAGE_SIZE, DMA_TO_DEVICE); 872 dma_srcs[i] = unmap->addr[i]; 873 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]); 874 if (ret) { 875 err = -ENOMEM; 876 goto free_resources; 877 } 878 unmap->to_cnt++; 879 } 880 881 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, 882 DMA_FROM_DEVICE); 883 dest_dma = unmap->addr[src_count]; 884 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]); 885 if (ret) { 886 err = -ENOMEM; 887 goto free_resources; 888 } 889 unmap->from_cnt = 1; 890 unmap->len = PAGE_SIZE; 891 892 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 893 src_count, PAGE_SIZE, 0); 894 if (!tx) { 895 dev_err(dma_chan->device->dev, 896 "Self-test cannot prepare operation, disabling\n"); 897 err = -ENODEV; 898 goto free_resources; 899 } 900 901 cookie = mv_xor_tx_submit(tx); 902 if (dma_submit_error(cookie)) { 903 dev_err(dma_chan->device->dev, 904 "Self-test submit error, disabling\n"); 905 err = -ENODEV; 906 goto free_resources; 907 } 908 909 mv_xor_issue_pending(dma_chan); 910 async_tx_ack(tx); 911 msleep(8); 912 913 if (mv_xor_status(dma_chan, cookie, NULL) != 914 DMA_COMPLETE) { 915 dev_err(dma_chan->device->dev, 916 "Self-test xor timed out, disabling\n"); 917 err = -ENODEV; 918 goto free_resources; 919 } 920 921 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 922 PAGE_SIZE, DMA_FROM_DEVICE); 923 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 924 u32 *ptr = page_address(dest); 925 if (ptr[i] != cmp_word) { 926 dev_err(dma_chan->device->dev, 927 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n", 928 i, ptr[i], cmp_word); 929 err = -ENODEV; 930 goto free_resources; 931 } 932 } 933 934 free_resources: 935 dmaengine_unmap_put(unmap); 936 mv_xor_free_chan_resources(dma_chan); 937 out: 938 src_idx = src_count; 939 while (src_idx--) 940 __free_page(xor_srcs[src_idx]); 941 __free_page(dest); 942 return err; 943 } 944 945 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) 946 { 947 struct dma_chan *chan, *_chan; 948 struct device *dev = mv_chan->dmadev.dev; 949 950 dma_async_device_unregister(&mv_chan->dmadev); 951 952 dma_free_coherent(dev, MV_XOR_POOL_SIZE, 953 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); 954 dma_unmap_single(dev, mv_chan->dummy_src_addr, 955 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); 956 dma_unmap_single(dev, mv_chan->dummy_dst_addr, 957 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); 958 959 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels, 960 device_node) { 961 list_del(&chan->device_node); 962 } 963 964 free_irq(mv_chan->irq, mv_chan); 965 966 return 0; 967 } 968 969 static struct mv_xor_chan * 970 mv_xor_channel_add(struct mv_xor_device *xordev, 971 struct platform_device *pdev, 972 int idx, dma_cap_mask_t cap_mask, int irq, int op_in_desc) 973 { 974 int ret = 0; 975 struct mv_xor_chan *mv_chan; 976 struct dma_device *dma_dev; 977 978 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); 979 if (!mv_chan) 980 return ERR_PTR(-ENOMEM); 981 982 mv_chan->idx = idx; 983 mv_chan->irq = irq; 984 mv_chan->op_in_desc = op_in_desc; 985 986 dma_dev = &mv_chan->dmadev; 987 988 /* 989 * These source and destination dummy buffers are used to implement 990 * a DMA_INTERRUPT operation as a minimum-sized XOR operation. 991 * Hence, we only need to map the buffers at initialization-time. 992 */ 993 mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev, 994 mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); 995 mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev, 996 mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); 997 998 /* allocate coherent memory for hardware descriptors 999 * note: writecombine gives slightly better performance, but 1000 * requires that we explicitly flush the writes 1001 */ 1002 mv_chan->dma_desc_pool_virt = 1003 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE, 1004 &mv_chan->dma_desc_pool, GFP_KERNEL); 1005 if (!mv_chan->dma_desc_pool_virt) 1006 return ERR_PTR(-ENOMEM); 1007 1008 /* discover transaction capabilites from the platform data */ 1009 dma_dev->cap_mask = cap_mask; 1010 1011 INIT_LIST_HEAD(&dma_dev->channels); 1012 1013 /* set base routines */ 1014 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; 1015 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; 1016 dma_dev->device_tx_status = mv_xor_status; 1017 dma_dev->device_issue_pending = mv_xor_issue_pending; 1018 dma_dev->dev = &pdev->dev; 1019 1020 /* set prep routines based on capability */ 1021 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) 1022 dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt; 1023 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) 1024 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; 1025 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1026 dma_dev->max_xor = 8; 1027 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; 1028 } 1029 1030 mv_chan->mmr_base = xordev->xor_base; 1031 mv_chan->mmr_high_base = xordev->xor_high_base; 1032 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) 1033 mv_chan); 1034 1035 /* clear errors before enabling interrupts */ 1036 mv_chan_clear_err_status(mv_chan); 1037 1038 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler, 1039 0, dev_name(&pdev->dev), mv_chan); 1040 if (ret) 1041 goto err_free_dma; 1042 1043 mv_chan_unmask_interrupts(mv_chan); 1044 1045 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC) 1046 mv_chan_set_mode_to_desc(mv_chan); 1047 else 1048 mv_chan_set_mode(mv_chan, DMA_XOR); 1049 1050 spin_lock_init(&mv_chan->lock); 1051 INIT_LIST_HEAD(&mv_chan->chain); 1052 INIT_LIST_HEAD(&mv_chan->completed_slots); 1053 INIT_LIST_HEAD(&mv_chan->free_slots); 1054 INIT_LIST_HEAD(&mv_chan->allocated_slots); 1055 mv_chan->dmachan.device = dma_dev; 1056 dma_cookie_init(&mv_chan->dmachan); 1057 1058 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels); 1059 1060 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 1061 ret = mv_chan_memcpy_self_test(mv_chan); 1062 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); 1063 if (ret) 1064 goto err_free_irq; 1065 } 1066 1067 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1068 ret = mv_chan_xor_self_test(mv_chan); 1069 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); 1070 if (ret) 1071 goto err_free_irq; 1072 } 1073 1074 dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n", 1075 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode", 1076 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1077 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", 1078 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1079 1080 dma_async_device_register(dma_dev); 1081 return mv_chan; 1082 1083 err_free_irq: 1084 free_irq(mv_chan->irq, mv_chan); 1085 err_free_dma: 1086 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, 1087 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); 1088 return ERR_PTR(ret); 1089 } 1090 1091 static void 1092 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, 1093 const struct mbus_dram_target_info *dram) 1094 { 1095 void __iomem *base = xordev->xor_high_base; 1096 u32 win_enable = 0; 1097 int i; 1098 1099 for (i = 0; i < 8; i++) { 1100 writel(0, base + WINDOW_BASE(i)); 1101 writel(0, base + WINDOW_SIZE(i)); 1102 if (i < 4) 1103 writel(0, base + WINDOW_REMAP_HIGH(i)); 1104 } 1105 1106 for (i = 0; i < dram->num_cs; i++) { 1107 const struct mbus_dram_window *cs = dram->cs + i; 1108 1109 writel((cs->base & 0xffff0000) | 1110 (cs->mbus_attr << 8) | 1111 dram->mbus_dram_target_id, base + WINDOW_BASE(i)); 1112 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); 1113 1114 win_enable |= (1 << i); 1115 win_enable |= 3 << (16 + (2 * i)); 1116 } 1117 1118 writel(win_enable, base + WINDOW_BAR_ENABLE(0)); 1119 writel(win_enable, base + WINDOW_BAR_ENABLE(1)); 1120 writel(0, base + WINDOW_OVERRIDE_CTRL(0)); 1121 writel(0, base + WINDOW_OVERRIDE_CTRL(1)); 1122 } 1123 1124 static const struct of_device_id mv_xor_dt_ids[] = { 1125 { .compatible = "marvell,orion-xor", .data = (void *)XOR_MODE_IN_REG }, 1126 { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC }, 1127 {}, 1128 }; 1129 MODULE_DEVICE_TABLE(of, mv_xor_dt_ids); 1130 1131 static int mv_xor_probe(struct platform_device *pdev) 1132 { 1133 const struct mbus_dram_target_info *dram; 1134 struct mv_xor_device *xordev; 1135 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev); 1136 struct resource *res; 1137 int i, ret; 1138 int op_in_desc; 1139 1140 dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); 1141 1142 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL); 1143 if (!xordev) 1144 return -ENOMEM; 1145 1146 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1147 if (!res) 1148 return -ENODEV; 1149 1150 xordev->xor_base = devm_ioremap(&pdev->dev, res->start, 1151 resource_size(res)); 1152 if (!xordev->xor_base) 1153 return -EBUSY; 1154 1155 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1156 if (!res) 1157 return -ENODEV; 1158 1159 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start, 1160 resource_size(res)); 1161 if (!xordev->xor_high_base) 1162 return -EBUSY; 1163 1164 platform_set_drvdata(pdev, xordev); 1165 1166 /* 1167 * (Re-)program MBUS remapping windows if we are asked to. 1168 */ 1169 dram = mv_mbus_dram_info(); 1170 if (dram) 1171 mv_xor_conf_mbus_windows(xordev, dram); 1172 1173 /* Not all platforms can gate the clock, so it is not 1174 * an error if the clock does not exists. 1175 */ 1176 xordev->clk = clk_get(&pdev->dev, NULL); 1177 if (!IS_ERR(xordev->clk)) 1178 clk_prepare_enable(xordev->clk); 1179 1180 if (pdev->dev.of_node) { 1181 struct device_node *np; 1182 int i = 0; 1183 const struct of_device_id *of_id = 1184 of_match_device(mv_xor_dt_ids, 1185 &pdev->dev); 1186 1187 for_each_child_of_node(pdev->dev.of_node, np) { 1188 struct mv_xor_chan *chan; 1189 dma_cap_mask_t cap_mask; 1190 int irq; 1191 op_in_desc = (int)of_id->data; 1192 1193 dma_cap_zero(cap_mask); 1194 if (of_property_read_bool(np, "dmacap,memcpy")) 1195 dma_cap_set(DMA_MEMCPY, cap_mask); 1196 if (of_property_read_bool(np, "dmacap,xor")) 1197 dma_cap_set(DMA_XOR, cap_mask); 1198 if (of_property_read_bool(np, "dmacap,interrupt")) 1199 dma_cap_set(DMA_INTERRUPT, cap_mask); 1200 1201 irq = irq_of_parse_and_map(np, 0); 1202 if (!irq) { 1203 ret = -ENODEV; 1204 goto err_channel_add; 1205 } 1206 1207 chan = mv_xor_channel_add(xordev, pdev, i, 1208 cap_mask, irq, op_in_desc); 1209 if (IS_ERR(chan)) { 1210 ret = PTR_ERR(chan); 1211 irq_dispose_mapping(irq); 1212 goto err_channel_add; 1213 } 1214 1215 xordev->channels[i] = chan; 1216 i++; 1217 } 1218 } else if (pdata && pdata->channels) { 1219 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 1220 struct mv_xor_channel_data *cd; 1221 struct mv_xor_chan *chan; 1222 int irq; 1223 1224 cd = &pdata->channels[i]; 1225 if (!cd) { 1226 ret = -ENODEV; 1227 goto err_channel_add; 1228 } 1229 1230 irq = platform_get_irq(pdev, i); 1231 if (irq < 0) { 1232 ret = irq; 1233 goto err_channel_add; 1234 } 1235 1236 chan = mv_xor_channel_add(xordev, pdev, i, 1237 cd->cap_mask, irq, 1238 XOR_MODE_IN_REG); 1239 if (IS_ERR(chan)) { 1240 ret = PTR_ERR(chan); 1241 goto err_channel_add; 1242 } 1243 1244 xordev->channels[i] = chan; 1245 } 1246 } 1247 1248 return 0; 1249 1250 err_channel_add: 1251 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) 1252 if (xordev->channels[i]) { 1253 mv_xor_channel_remove(xordev->channels[i]); 1254 if (pdev->dev.of_node) 1255 irq_dispose_mapping(xordev->channels[i]->irq); 1256 } 1257 1258 if (!IS_ERR(xordev->clk)) { 1259 clk_disable_unprepare(xordev->clk); 1260 clk_put(xordev->clk); 1261 } 1262 1263 return ret; 1264 } 1265 1266 static int mv_xor_remove(struct platform_device *pdev) 1267 { 1268 struct mv_xor_device *xordev = platform_get_drvdata(pdev); 1269 int i; 1270 1271 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 1272 if (xordev->channels[i]) 1273 mv_xor_channel_remove(xordev->channels[i]); 1274 } 1275 1276 if (!IS_ERR(xordev->clk)) { 1277 clk_disable_unprepare(xordev->clk); 1278 clk_put(xordev->clk); 1279 } 1280 1281 return 0; 1282 } 1283 1284 static struct platform_driver mv_xor_driver = { 1285 .probe = mv_xor_probe, 1286 .remove = mv_xor_remove, 1287 .driver = { 1288 .name = MV_XOR_NAME, 1289 .of_match_table = of_match_ptr(mv_xor_dt_ids), 1290 }, 1291 }; 1292 1293 1294 static int __init mv_xor_init(void) 1295 { 1296 return platform_driver_register(&mv_xor_driver); 1297 } 1298 module_init(mv_xor_init); 1299 1300 /* it's currently unsafe to unload this module */ 1301 #if 0 1302 static void __exit mv_xor_exit(void) 1303 { 1304 platform_driver_unregister(&mv_xor_driver); 1305 return; 1306 } 1307 1308 module_exit(mv_xor_exit); 1309 #endif 1310 1311 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); 1312 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); 1313 MODULE_LICENSE("GPL"); 1314