Lines Matching +full:armada +full:- +full:3700 +full:- +full:pcie

1 // SPDX-License-Identifier: GPL-2.0-only
10 #include <linux/dma-mapping.h>
21 #include <linux/platform_data/dma-mv_xor.h>
46 ((chan)->dmadev.dev)
52 struct mv_xor_desc *hw_desc = desc->hw_desc; in mv_desc_init()
54 hw_desc->status = XOR_DESC_DMA_OWNED; in mv_desc_init()
55 hw_desc->phy_next_desc = 0; in mv_desc_init()
56 /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */ in mv_desc_init()
57 hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ? in mv_desc_init()
59 hw_desc->phy_dest_addr = addr; in mv_desc_init()
60 hw_desc->byte_count = byte_count; in mv_desc_init()
65 struct mv_xor_desc *hw_desc = desc->hw_desc; in mv_desc_set_mode()
67 switch (desc->type) { in mv_desc_set_mode()
70 hw_desc->desc_command |= XOR_DESC_OPERATION_XOR; in mv_desc_set_mode()
73 hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY; in mv_desc_set_mode()
84 struct mv_xor_desc *hw_desc = desc->hw_desc; in mv_desc_set_next_desc()
85 BUG_ON(hw_desc->phy_next_desc); in mv_desc_set_next_desc()
86 hw_desc->phy_next_desc = next_desc_addr; in mv_desc_set_next_desc()
92 struct mv_xor_desc *hw_desc = desc->hw_desc; in mv_desc_set_src_addr()
93 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr; in mv_desc_set_src_addr()
94 if (desc->type == DMA_XOR) in mv_desc_set_src_addr()
95 hw_desc->desc_command |= (1 << index); in mv_desc_set_src_addr()
112 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); in mv_chan_unmask_interrupts()
119 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; in mv_chan_get_intr_cause()
128 val = ~(val << (chan->idx * 16)); in mv_chan_clear_eoc_cause()
135 u32 val = 0xFFFF0000 >> (chan->idx * 16); in mv_chan_clear_err_status()
174 * mv_chan_start_new_chain - program the engine to operate on new
176 * Caller must hold &mv_chan->lock while calling this function
185 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); in mv_chan_start_new_chain()
187 mv_chan->pending++; in mv_chan_start_new_chain()
188 mv_xor_issue_pending(&mv_chan->dmachan); in mv_chan_start_new_chain()
196 BUG_ON(desc->async_tx.cookie < 0); in mv_desc_run_tx_complete_actions()
198 if (desc->async_tx.cookie > 0) { in mv_desc_run_tx_complete_actions()
199 cookie = desc->async_tx.cookie; in mv_desc_run_tx_complete_actions()
201 dma_descriptor_unmap(&desc->async_tx); in mv_desc_run_tx_complete_actions()
205 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); in mv_desc_run_tx_complete_actions()
209 dma_run_dependencies(&desc->async_tx); in mv_desc_run_tx_complete_actions()
220 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, in mv_chan_clean_completed_slots()
223 if (async_tx_test_ack(&iter->async_tx)) { in mv_chan_clean_completed_slots()
224 list_move_tail(&iter->node, &mv_chan->free_slots); in mv_chan_clean_completed_slots()
225 if (!list_empty(&iter->sg_tx_list)) { in mv_chan_clean_completed_slots()
226 list_splice_tail_init(&iter->sg_tx_list, in mv_chan_clean_completed_slots()
227 &mv_chan->free_slots); in mv_chan_clean_completed_slots()
239 __func__, __LINE__, desc, desc->async_tx.flags); in mv_desc_clean_slot()
244 if (!async_tx_test_ack(&desc->async_tx)) { in mv_desc_clean_slot()
246 list_move_tail(&desc->node, &mv_chan->completed_slots); in mv_desc_clean_slot()
247 if (!list_empty(&desc->sg_tx_list)) { in mv_desc_clean_slot()
248 list_splice_tail_init(&desc->sg_tx_list, in mv_desc_clean_slot()
249 &mv_chan->completed_slots); in mv_desc_clean_slot()
252 list_move_tail(&desc->node, &mv_chan->free_slots); in mv_desc_clean_slot()
253 if (!list_empty(&desc->sg_tx_list)) { in mv_desc_clean_slot()
254 list_splice_tail_init(&desc->sg_tx_list, in mv_desc_clean_slot()
255 &mv_chan->free_slots); in mv_desc_clean_slot()
280 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, in mv_chan_slot_cleanup()
284 hw_desc = iter->hw_desc; in mv_chan_slot_cleanup()
285 if (hw_desc->status & XOR_DESC_SUCCESS) { in mv_chan_slot_cleanup()
293 if (iter->async_tx.phys == current_desc) { in mv_chan_slot_cleanup()
298 if (iter->async_tx.phys == current_desc) { in mv_chan_slot_cleanup()
305 if ((busy == 0) && !list_empty(&mv_chan->chain)) { in mv_chan_slot_cleanup()
311 iter = list_entry(mv_chan->chain.next, in mv_chan_slot_cleanup()
316 if (!list_is_last(&iter->node, &mv_chan->chain)) { in mv_chan_slot_cleanup()
321 iter = list_entry(iter->node.next, in mv_chan_slot_cleanup()
330 tasklet_schedule(&mv_chan->irq_tasklet); in mv_chan_slot_cleanup()
336 mv_chan->dmachan.completed_cookie = cookie; in mv_chan_slot_cleanup()
343 spin_lock(&chan->lock); in mv_xor_tasklet()
345 spin_unlock(&chan->lock); in mv_xor_tasklet()
353 spin_lock_bh(&mv_chan->lock); in mv_chan_alloc_slot()
355 if (!list_empty(&mv_chan->free_slots)) { in mv_chan_alloc_slot()
356 iter = list_first_entry(&mv_chan->free_slots, in mv_chan_alloc_slot()
360 list_move_tail(&iter->node, &mv_chan->allocated_slots); in mv_chan_alloc_slot()
362 spin_unlock_bh(&mv_chan->lock); in mv_chan_alloc_slot()
364 /* pre-ack descriptor */ in mv_chan_alloc_slot()
365 async_tx_ack(&iter->async_tx); in mv_chan_alloc_slot()
366 iter->async_tx.cookie = -EBUSY; in mv_chan_alloc_slot()
372 spin_unlock_bh(&mv_chan->lock); in mv_chan_alloc_slot()
375 tasklet_schedule(&mv_chan->irq_tasklet); in mv_chan_alloc_slot()
385 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); in mv_xor_tx_submit()
392 __func__, sw_desc, &sw_desc->async_tx); in mv_xor_tx_submit()
394 spin_lock_bh(&mv_chan->lock); in mv_xor_tx_submit()
397 if (list_empty(&mv_chan->chain)) in mv_xor_tx_submit()
398 list_move_tail(&sw_desc->node, &mv_chan->chain); in mv_xor_tx_submit()
402 old_chain_tail = list_entry(mv_chan->chain.prev, in mv_xor_tx_submit()
405 list_move_tail(&sw_desc->node, &mv_chan->chain); in mv_xor_tx_submit()
408 &old_chain_tail->async_tx.phys); in mv_xor_tx_submit()
411 mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys); in mv_xor_tx_submit()
420 if (current_desc == old_chain_tail->async_tx.phys) in mv_xor_tx_submit()
428 spin_unlock_bh(&mv_chan->lock); in mv_xor_tx_submit()
444 idx = mv_chan->slots_allocated; in mv_xor_alloc_chan_resources()
453 virt_desc = mv_chan->dma_desc_pool_virt; in mv_xor_alloc_chan_resources()
454 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE; in mv_xor_alloc_chan_resources()
456 dma_async_tx_descriptor_init(&slot->async_tx, chan); in mv_xor_alloc_chan_resources()
457 slot->async_tx.tx_submit = mv_xor_tx_submit; in mv_xor_alloc_chan_resources()
458 INIT_LIST_HEAD(&slot->node); in mv_xor_alloc_chan_resources()
459 INIT_LIST_HEAD(&slot->sg_tx_list); in mv_xor_alloc_chan_resources()
460 dma_desc = mv_chan->dma_desc_pool; in mv_xor_alloc_chan_resources()
461 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; in mv_xor_alloc_chan_resources()
462 slot->idx = idx++; in mv_xor_alloc_chan_resources()
464 spin_lock_bh(&mv_chan->lock); in mv_xor_alloc_chan_resources()
465 mv_chan->slots_allocated = idx; in mv_xor_alloc_chan_resources()
466 list_add_tail(&slot->node, &mv_chan->free_slots); in mv_xor_alloc_chan_resources()
467 spin_unlock_bh(&mv_chan->lock); in mv_xor_alloc_chan_resources()
472 mv_chan->slots_allocated); in mv_xor_alloc_chan_resources()
474 return mv_chan->slots_allocated ? : -ENOMEM; in mv_xor_alloc_chan_resources()
478 * Check if source or destination is an PCIe/IO address (non-SDRAM) and add
485 struct mv_xor_device *xordev = mv_chan->xordev; in mv_xor_add_io_win()
486 void __iomem *base = mv_chan->mmr_high_base; in mv_xor_add_io_win()
493 /* Nothing needs to get done for the Armada 3700 */ in mv_xor_add_io_win()
494 if (xordev->xor_type == XOR_ARMADA_37XX) in mv_xor_add_io_win()
503 if (addr >= xordev->win_start[i] && in mv_xor_add_io_win()
504 addr <= xordev->win_end[i]) { in mv_xor_add_io_win()
524 size -= 1; in mv_xor_add_io_win()
534 i = ffs(~win_enable) - 1; in mv_xor_add_io_win()
536 return -ENOMEM; in mv_xor_add_io_win()
543 xordev->win_start[i] = addr; in mv_xor_add_io_win()
544 xordev->win_end[i] = addr + size; in mv_xor_add_io_win()
578 sw_desc->type = DMA_XOR; in mv_xor_prep_dma_xor()
579 sw_desc->async_tx.flags = flags; in mv_xor_prep_dma_xor()
581 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC) in mv_xor_prep_dma_xor()
583 while (src_cnt--) { in mv_xor_prep_dma_xor()
594 __func__, sw_desc, &sw_desc->async_tx); in mv_xor_prep_dma_xor()
595 return sw_desc ? &sw_desc->async_tx : NULL; in mv_xor_prep_dma_xor()
616 src = mv_chan->dummy_src_addr; in mv_xor_prep_dma_interrupt()
617 dest = mv_chan->dummy_dst_addr; in mv_xor_prep_dma_interrupt()
633 spin_lock_bh(&mv_chan->lock); in mv_xor_free_chan_resources()
637 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, in mv_xor_free_chan_resources()
640 list_move_tail(&iter->node, &mv_chan->free_slots); in mv_xor_free_chan_resources()
642 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, in mv_xor_free_chan_resources()
645 list_move_tail(&iter->node, &mv_chan->free_slots); in mv_xor_free_chan_resources()
647 list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots, in mv_xor_free_chan_resources()
650 list_move_tail(&iter->node, &mv_chan->free_slots); in mv_xor_free_chan_resources()
653 iter, _iter, &mv_chan->free_slots, node) { in mv_xor_free_chan_resources()
654 list_del(&iter->node); in mv_xor_free_chan_resources()
656 mv_chan->slots_allocated--; in mv_xor_free_chan_resources()
660 __func__, mv_chan->slots_allocated); in mv_xor_free_chan_resources()
661 spin_unlock_bh(&mv_chan->lock); in mv_xor_free_chan_resources()
669 * mv_xor_status - poll the status of an XOR transaction
685 spin_lock_bh(&mv_chan->lock); in mv_xor_status()
687 spin_unlock_bh(&mv_chan->lock); in mv_xor_status()
724 chan->idx, intr_cause); in mv_chan_err_interrupt_handler()
740 tasklet_schedule(&chan->irq_tasklet); in mv_xor_interrupt_handler()
751 if (mv_chan->pending >= MV_XOR_THRESHOLD) { in mv_xor_issue_pending()
752 mv_chan->pending = 0; in mv_xor_issue_pending()
774 return -ENOMEM; in mv_chan_memcpy_self_test()
779 return -ENOMEM; in mv_chan_memcpy_self_test()
786 dma_chan = &mv_chan->dmachan; in mv_chan_memcpy_self_test()
788 err = -ENODEV; in mv_chan_memcpy_self_test()
792 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); in mv_chan_memcpy_self_test()
794 err = -ENOMEM; in mv_chan_memcpy_self_test()
798 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), in mv_chan_memcpy_self_test()
801 unmap->addr[0] = src_dma; in mv_chan_memcpy_self_test()
803 ret = dma_mapping_error(dma_chan->device->dev, src_dma); in mv_chan_memcpy_self_test()
805 err = -ENOMEM; in mv_chan_memcpy_self_test()
808 unmap->to_cnt = 1; in mv_chan_memcpy_self_test()
810 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), in mv_chan_memcpy_self_test()
813 unmap->addr[1] = dest_dma; in mv_chan_memcpy_self_test()
815 ret = dma_mapping_error(dma_chan->device->dev, dest_dma); in mv_chan_memcpy_self_test()
817 err = -ENOMEM; in mv_chan_memcpy_self_test()
820 unmap->from_cnt = 1; in mv_chan_memcpy_self_test()
821 unmap->len = PAGE_SIZE; in mv_chan_memcpy_self_test()
826 dev_err(dma_chan->device->dev, in mv_chan_memcpy_self_test()
827 "Self-test cannot prepare operation, disabling\n"); in mv_chan_memcpy_self_test()
828 err = -ENODEV; in mv_chan_memcpy_self_test()
834 dev_err(dma_chan->device->dev, in mv_chan_memcpy_self_test()
835 "Self-test submit error, disabling\n"); in mv_chan_memcpy_self_test()
836 err = -ENODEV; in mv_chan_memcpy_self_test()
846 dev_err(dma_chan->device->dev, in mv_chan_memcpy_self_test()
847 "Self-test copy timed out, disabling\n"); in mv_chan_memcpy_self_test()
848 err = -ENODEV; in mv_chan_memcpy_self_test()
852 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, in mv_chan_memcpy_self_test()
855 dev_err(dma_chan->device->dev, in mv_chan_memcpy_self_test()
856 "Self-test copy failed compare, disabling\n"); in mv_chan_memcpy_self_test()
857 err = -ENODEV; in mv_chan_memcpy_self_test()
891 while (src_idx--) in mv_chan_xor_self_test()
893 return -ENOMEM; in mv_chan_xor_self_test()
899 while (src_idx--) in mv_chan_xor_self_test()
901 return -ENOMEM; in mv_chan_xor_self_test()
919 dma_chan = &mv_chan->dmachan; in mv_chan_xor_self_test()
921 err = -ENODEV; in mv_chan_xor_self_test()
925 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1, in mv_chan_xor_self_test()
928 err = -ENOMEM; in mv_chan_xor_self_test()
934 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], in mv_chan_xor_self_test()
936 dma_srcs[i] = unmap->addr[i]; in mv_chan_xor_self_test()
937 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]); in mv_chan_xor_self_test()
939 err = -ENOMEM; in mv_chan_xor_self_test()
942 unmap->to_cnt++; in mv_chan_xor_self_test()
945 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, in mv_chan_xor_self_test()
947 dest_dma = unmap->addr[src_count]; in mv_chan_xor_self_test()
948 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]); in mv_chan_xor_self_test()
950 err = -ENOMEM; in mv_chan_xor_self_test()
953 unmap->from_cnt = 1; in mv_chan_xor_self_test()
954 unmap->len = PAGE_SIZE; in mv_chan_xor_self_test()
959 dev_err(dma_chan->device->dev, in mv_chan_xor_self_test()
960 "Self-test cannot prepare operation, disabling\n"); in mv_chan_xor_self_test()
961 err = -ENODEV; in mv_chan_xor_self_test()
967 dev_err(dma_chan->device->dev, in mv_chan_xor_self_test()
968 "Self-test submit error, disabling\n"); in mv_chan_xor_self_test()
969 err = -ENODEV; in mv_chan_xor_self_test()
979 dev_err(dma_chan->device->dev, in mv_chan_xor_self_test()
980 "Self-test xor timed out, disabling\n"); in mv_chan_xor_self_test()
981 err = -ENODEV; in mv_chan_xor_self_test()
985 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, in mv_chan_xor_self_test()
990 dev_err(dma_chan->device->dev, in mv_chan_xor_self_test()
991 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n", in mv_chan_xor_self_test()
993 err = -ENODEV; in mv_chan_xor_self_test()
1003 while (src_idx--) in mv_chan_xor_self_test()
1012 struct device *dev = mv_chan->dmadev.dev; in mv_xor_channel_remove()
1014 dma_async_device_unregister(&mv_chan->dmadev); in mv_xor_channel_remove()
1017 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); in mv_xor_channel_remove()
1018 dma_unmap_single(dev, mv_chan->dummy_src_addr, in mv_xor_channel_remove()
1020 dma_unmap_single(dev, mv_chan->dummy_dst_addr, in mv_xor_channel_remove()
1023 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels, in mv_xor_channel_remove()
1025 list_del(&chan->device_node); in mv_xor_channel_remove()
1028 free_irq(mv_chan->irq, mv_chan); in mv_xor_channel_remove()
1042 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); in mv_xor_channel_add()
1044 return ERR_PTR(-ENOMEM); in mv_xor_channel_add()
1046 mv_chan->idx = idx; in mv_xor_channel_add()
1047 mv_chan->irq = irq; in mv_xor_channel_add()
1048 if (xordev->xor_type == XOR_ORION) in mv_xor_channel_add()
1049 mv_chan->op_in_desc = XOR_MODE_IN_REG; in mv_xor_channel_add()
1051 mv_chan->op_in_desc = XOR_MODE_IN_DESC; in mv_xor_channel_add()
1053 dma_dev = &mv_chan->dmadev; in mv_xor_channel_add()
1054 dma_dev->dev = &pdev->dev; in mv_xor_channel_add()
1055 mv_chan->xordev = xordev; in mv_xor_channel_add()
1059 * a DMA_INTERRUPT operation as a minimum-sized XOR operation. in mv_xor_channel_add()
1060 * Hence, we only need to map the buffers at initialization-time. in mv_xor_channel_add()
1062 mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev, in mv_xor_channel_add()
1063 mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); in mv_xor_channel_add()
1064 if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_src_addr)) in mv_xor_channel_add()
1065 return ERR_PTR(-ENOMEM); in mv_xor_channel_add()
1067 mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev, in mv_xor_channel_add()
1068 mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); in mv_xor_channel_add()
1069 if (dma_mapping_error(dma_dev->dev, mv_chan->dummy_dst_addr)) { in mv_xor_channel_add()
1070 ret = -ENOMEM; in mv_xor_channel_add()
1079 mv_chan->dma_desc_pool_virt = in mv_xor_channel_add()
1080 dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool, in mv_xor_channel_add()
1082 if (!mv_chan->dma_desc_pool_virt) { in mv_xor_channel_add()
1083 ret = -ENOMEM; in mv_xor_channel_add()
1088 dma_dev->cap_mask = cap_mask; in mv_xor_channel_add()
1090 INIT_LIST_HEAD(&dma_dev->channels); in mv_xor_channel_add()
1093 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; in mv_xor_channel_add()
1094 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; in mv_xor_channel_add()
1095 dma_dev->device_tx_status = mv_xor_status; in mv_xor_channel_add()
1096 dma_dev->device_issue_pending = mv_xor_issue_pending; in mv_xor_channel_add()
1099 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) in mv_xor_channel_add()
1100 dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt; in mv_xor_channel_add()
1101 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) in mv_xor_channel_add()
1102 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; in mv_xor_channel_add()
1103 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { in mv_xor_channel_add()
1104 dma_dev->max_xor = 8; in mv_xor_channel_add()
1105 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; in mv_xor_channel_add()
1108 mv_chan->mmr_base = xordev->xor_base; in mv_xor_channel_add()
1109 mv_chan->mmr_high_base = xordev->xor_high_base; in mv_xor_channel_add()
1110 tasklet_setup(&mv_chan->irq_tasklet, mv_xor_tasklet); in mv_xor_channel_add()
1115 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler, in mv_xor_channel_add()
1116 0, dev_name(&pdev->dev), mv_chan); in mv_xor_channel_add()
1122 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC) in mv_xor_channel_add()
1127 spin_lock_init(&mv_chan->lock); in mv_xor_channel_add()
1128 INIT_LIST_HEAD(&mv_chan->chain); in mv_xor_channel_add()
1129 INIT_LIST_HEAD(&mv_chan->completed_slots); in mv_xor_channel_add()
1130 INIT_LIST_HEAD(&mv_chan->free_slots); in mv_xor_channel_add()
1131 INIT_LIST_HEAD(&mv_chan->allocated_slots); in mv_xor_channel_add()
1132 mv_chan->dmachan.device = dma_dev; in mv_xor_channel_add()
1133 dma_cookie_init(&mv_chan->dmachan); in mv_xor_channel_add()
1135 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels); in mv_xor_channel_add()
1137 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { in mv_xor_channel_add()
1139 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); in mv_xor_channel_add()
1144 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { in mv_xor_channel_add()
1146 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); in mv_xor_channel_add()
1151 dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n", in mv_xor_channel_add()
1152 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode", in mv_xor_channel_add()
1153 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", in mv_xor_channel_add()
1154 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", in mv_xor_channel_add()
1155 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); in mv_xor_channel_add()
1164 free_irq(mv_chan->irq, mv_chan); in mv_xor_channel_add()
1166 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, in mv_xor_channel_add()
1167 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); in mv_xor_channel_add()
1169 dma_unmap_single(dma_dev->dev, mv_chan->dummy_dst_addr, in mv_xor_channel_add()
1172 dma_unmap_single(dma_dev->dev, mv_chan->dummy_src_addr, in mv_xor_channel_add()
1182 void __iomem *base = xordev->xor_high_base; in mv_xor_conf_mbus_windows()
1193 for (i = 0; i < dram->num_cs; i++) { in mv_xor_conf_mbus_windows()
1194 const struct mbus_dram_window *cs = dram->cs + i; in mv_xor_conf_mbus_windows()
1196 writel((cs->base & 0xffff0000) | in mv_xor_conf_mbus_windows()
1197 (cs->mbus_attr << 8) | in mv_xor_conf_mbus_windows()
1198 dram->mbus_dram_target_id, base + WINDOW_BASE(i)); in mv_xor_conf_mbus_windows()
1199 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); in mv_xor_conf_mbus_windows()
1202 xordev->win_start[i] = cs->base; in mv_xor_conf_mbus_windows()
1203 xordev->win_end[i] = cs->base + cs->size - 1; in mv_xor_conf_mbus_windows()
1218 void __iomem *base = xordev->xor_high_base; in mv_xor_conf_mbus_windows_a3700()
1244 * need to care about synchronizing ->suspend with DMA activity,
1254 struct mv_xor_chan *mv_chan = xordev->channels[i]; in mv_xor_suspend()
1259 mv_chan->saved_config_reg = in mv_xor_suspend()
1261 mv_chan->saved_int_mask_reg = in mv_xor_suspend()
1275 struct mv_xor_chan *mv_chan = xordev->channels[i]; in mv_xor_resume()
1280 writel_relaxed(mv_chan->saved_config_reg, in mv_xor_resume()
1282 writel_relaxed(mv_chan->saved_int_mask_reg, in mv_xor_resume()
1286 if (xordev->xor_type == XOR_ARMADA_37XX) { in mv_xor_resume()
1299 { .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION },
1300 { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X },
1301 { .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX },
1311 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev); in mv_xor_probe()
1316 dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); in mv_xor_probe()
1318 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL); in mv_xor_probe()
1320 return -ENOMEM; in mv_xor_probe()
1324 return -ENODEV; in mv_xor_probe()
1326 xordev->xor_base = devm_ioremap(&pdev->dev, res->start, in mv_xor_probe()
1328 if (!xordev->xor_base) in mv_xor_probe()
1329 return -EBUSY; in mv_xor_probe()
1333 return -ENODEV; in mv_xor_probe()
1335 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start, in mv_xor_probe()
1337 if (!xordev->xor_high_base) in mv_xor_probe()
1338 return -EBUSY; in mv_xor_probe()
1345 * setting up. In non-dt case it can only be the legacy one. in mv_xor_probe()
1347 xordev->xor_type = XOR_ORION; in mv_xor_probe()
1348 if (pdev->dev.of_node) in mv_xor_probe()
1349 xordev->xor_type = (uintptr_t)device_get_match_data(&pdev->dev); in mv_xor_probe()
1352 * (Re-)program MBUS remapping windows if we are asked to. in mv_xor_probe()
1354 if (xordev->xor_type == XOR_ARMADA_37XX) { in mv_xor_probe()
1365 xordev->clk = clk_get(&pdev->dev, NULL); in mv_xor_probe()
1366 if (!IS_ERR(xordev->clk)) in mv_xor_probe()
1367 clk_prepare_enable(xordev->clk); in mv_xor_probe()
1374 * separate engines when possible. For dual-CPU Armada 3700 in mv_xor_probe()
1378 if (xordev->xor_type == XOR_ARMADA_37XX) in mv_xor_probe()
1388 if (pdev->dev.of_node) { in mv_xor_probe()
1391 for_each_child_of_node_scoped(pdev->dev.of_node, np) { in mv_xor_probe()
1406 ret = -ENODEV; in mv_xor_probe()
1418 xordev->channels[i] = chan; in mv_xor_probe()
1421 } else if (pdata && pdata->channels) { in mv_xor_probe()
1427 cd = &pdata->channels[i]; in mv_xor_probe()
1435 cd->cap_mask, irq); in mv_xor_probe()
1441 xordev->channels[i] = chan; in mv_xor_probe()
1449 if (xordev->channels[i]) { in mv_xor_probe()
1450 mv_xor_channel_remove(xordev->channels[i]); in mv_xor_probe()
1451 if (pdev->dev.of_node) in mv_xor_probe()
1452 irq_dispose_mapping(xordev->channels[i]->irq); in mv_xor_probe()
1455 if (!IS_ERR(xordev->clk)) { in mv_xor_probe()
1456 clk_disable_unprepare(xordev->clk); in mv_xor_probe()
1457 clk_put(xordev->clk); in mv_xor_probe()