| /linux/drivers/dma/ |
| H A D | mmp_pdma.c | 99 struct dma_async_tx_descriptor async_tx; member 190 container_of(tx, struct mmp_pdma_desc_sw, async_tx) 491 pdev->ops->write_next_addr(chan->phy, desc->async_tx.phys); in start_pending_queue() 509 cookie = dma_cookie_assign(&child->async_tx); in mmp_pdma_tx_submit() 533 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); in mmp_pdma_alloc_descriptor() 535 desc->async_tx.tx_submit = mmp_pdma_tx_submit; in mmp_pdma_alloc_descriptor() 536 desc->async_tx.phys = pdesc; in mmp_pdma_alloc_descriptor() 579 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); in mmp_pdma_free_desc_list() 644 new->async_tx.phys); in mmp_pdma_prep_memcpy() 646 new->async_tx.cookie = 0; in mmp_pdma_prep_memcpy() [all …]
|
| H A D | mv_xor.c | 43 container_of(tx, struct mv_xor_desc_slot, async_tx) 185 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); in mv_chan_start_new_chain() 196 BUG_ON(desc->async_tx.cookie < 0); in mv_desc_run_tx_complete_actions() 198 if (desc->async_tx.cookie > 0) { in mv_desc_run_tx_complete_actions() 199 cookie = desc->async_tx.cookie; in mv_desc_run_tx_complete_actions() 201 dma_descriptor_unmap(&desc->async_tx); in mv_desc_run_tx_complete_actions() 205 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); in mv_desc_run_tx_complete_actions() 209 dma_run_dependencies(&desc->async_tx); in mv_desc_run_tx_complete_actions() 223 if (async_tx_test_ack(&iter->async_tx)) { in mv_chan_clean_completed_slots() 239 __func__, __LINE__, desc, desc->async_tx.flags); in mv_desc_clean_slot() [all …]
|
| H A D | fsldma.c | 397 set_desc_next(chan, &tail->hw, desc->async_tx.phys); in append_ld_queue() 429 cookie = dma_cookie_assign(&child->async_tx); in fsl_dma_tx_submit() 450 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); in fsl_dma_free_descriptor() 471 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); in fsl_dma_alloc_descriptor() 472 desc->async_tx.tx_submit = fsl_dma_tx_submit; in fsl_dma_alloc_descriptor() 473 desc->async_tx.phys = pdesc; in fsl_dma_alloc_descriptor() 494 if (async_tx_test_ack(&desc->async_tx)) in fsldma_clean_completed_descriptor() 510 struct dma_async_tx_descriptor *txd = &desc->async_tx; in fsldma_run_tx_complete_actions() 548 if (!async_tx_test_ack(&desc->async_tx)) { in fsldma_clean_running_descriptor() 557 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); in fsldma_clean_running_descriptor() [all …]
|
| H A D | nbpfaxi.c | 151 struct dma_async_tx_descriptor async_tx; member 636 running = chan->running ? chan->running->async_tx.cookie : -EINVAL; in nbpf_tx_status() 647 if (desc->async_tx.cookie == cookie) { in nbpf_tx_status() 654 if (desc->async_tx.cookie == cookie) { in nbpf_tx_status() 674 struct nbpf_desc *desc = container_of(tx, struct nbpf_desc, async_tx); in nbpf_tx_submit() 725 dma_async_tx_descriptor_init(&desc->async_tx, dchan); in nbpf_desc_page_alloc() 726 desc->async_tx.tx_submit = nbpf_tx_submit; in nbpf_desc_page_alloc() 777 if (async_tx_test_ack(&desc->async_tx) && desc->user_wait) { in nbpf_scan_acked() 870 __func__, desc, desc->async_tx.cookie); in nbpf_chan_idle() 968 desc->async_tx.flags = flags; in nbpf_prep_sg() [all …]
|
| H A D | fsldma.h | 104 struct dma_async_tx_descriptor async_tx; member 193 #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
|
| H A D | mv_xor.h | 147 struct dma_async_tx_descriptor async_tx; member
|
| H A D | fsl_raid.h | 294 struct dma_async_tx_descriptor async_tx; member
|
| H A D | Kconfig | 784 bool "Async_tx: Offload support for the async_tx api" 787 This allows the async_tx api to take advantage of offload engines for
|
| /linux/drivers/dma/sh/ |
| H A D | shdma-base.c | 73 container_of(tx, struct shdma_desc, async_tx); in shdma_tx_submit() 92 chunk->async_tx.cookie > 0 || in shdma_tx_submit() 93 chunk->async_tx.cookie == -EBUSY || in shdma_tx_submit() 98 chunk->async_tx.callback = callback; in shdma_tx_submit() 99 chunk->async_tx.callback_param = tx->callback_param; in shdma_tx_submit() 102 chunk->async_tx.callback = NULL; in shdma_tx_submit() 108 tx->cookie, &chunk->async_tx, schan->id); in shdma_tx_submit() 250 dma_async_tx_descriptor_init(&desc->async_tx, in shdma_alloc_chan_resources() 252 desc->async_tx.tx_submit = shdma_tx_submit; in shdma_alloc_chan_resources() 350 struct dma_async_tx_descriptor *tx = &desc->async_tx; in __ld_cleanup() [all …]
|
| H A D | rcar-dmac.c | 73 struct dma_async_tx_descriptor async_tx; member 94 #define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx) 440 else if (desc->async_tx.callback) in rcar_dmac_chan_start_xfer() 544 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); in rcar_dmac_desc_alloc() 545 desc->async_tx.tx_submit = rcar_dmac_tx_submit; in rcar_dmac_desc_alloc() 599 if (async_tx_test_ack(&desc->async_tx)) { in rcar_dmac_desc_recycle_acked() 950 desc->async_tx.flags = dma_flags; in rcar_dmac_chan_prep_sg() 951 desc->async_tx.cookie = -EBUSY; in rcar_dmac_chan_prep_sg() 1047 return &desc->async_tx; in rcar_dmac_chan_prep_sg() 1351 if (cookie != desc->async_tx.cookie) { in rcar_dmac_chan_get_residue() [all …]
|
| H A D | shdma.h | 57 #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
|
| H A D | shdmac.c | 289 sdesc->async_tx.cookie, sh_chan->shdma_chan.id, in sh_dmae_start_xfer()
|
| /linux/drivers/dma/xilinx/ |
| H A D | xilinx_dma.c | 373 struct dma_async_tx_descriptor async_tx; member 530 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx) 1036 dmaengine_desc_get_callback(&desc->async_tx, &cb); in xilinx_dma_chan_handle_cyclic() 1079 dmaengine_desc_get_callback_invoke(&desc->async_tx, &result); in xilinx_dma_chan_desc_cleanup() 1083 dma_run_dependencies(&desc->async_tx); in xilinx_dma_chan_desc_cleanup() 1504 head_desc->async_tx.phys); in xilinx_cdma_start_transfer() 1572 head_desc->async_tx.phys); in xilinx_dma_start_transfer() 1656 head_desc->async_tx.phys); in xilinx_mcdma_start_transfer() 1741 dma_cookie_complete(&desc->async_tx); in xilinx_dma_complete_descriptor() 1953 tail_segment->hw.next_desc = (u32)desc->async_tx.phys; in append_desc_queue() [all …]
|
| H A D | zynqmp_dma.c | 146 async_tx) 186 struct dma_async_tx_descriptor async_tx; member 497 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); in zynqmp_dma_alloc_chan_resources() 498 desc->async_tx.tx_submit = zynqmp_dma_tx_submit; in zynqmp_dma_alloc_chan_resources() 625 dmaengine_desc_get_callback(&desc->async_tx, &cb); in zynqmp_dma_chan_desc_cleanup() 652 dma_cookie_complete(&desc->async_tx); in zynqmp_dma_complete_descriptor() 876 async_tx_ack(&first->async_tx); in zynqmp_dma_prep_memcpy() 877 first->async_tx.flags = (enum dma_ctrl_flags)flags; in zynqmp_dma_prep_memcpy() 878 return &first->async_tx; in zynqmp_dma_prep_memcpy()
|
| /linux/Documentation/crypto/ |
| H A D | async-tx-api.rst | 31 The async_tx API provides methods for describing a chain of asynchronous 106 async_tx call will implicitly set the acknowledged state. 153 #include <linux/async_tx.h> 191 See include/linux/async_tx.h for more information on the flags. See the 202 accommodate assumptions made by applications using the async_tx API: 263 include/linux/async_tx.h: 264 core header file for the async_tx api 265 crypto/async_tx/async_tx.c: 266 async_tx interface to dmaengine and common code 267 crypto/async_tx/async_memcpy.c: [all …]
|
| /linux/drivers/dma/ppc4xx/ |
| H A D | adma.c | 1466 BUG_ON(desc->async_tx.cookie < 0); in ppc440spe_adma_run_tx_complete_actions() 1467 if (desc->async_tx.cookie > 0) { in ppc440spe_adma_run_tx_complete_actions() 1468 cookie = desc->async_tx.cookie; in ppc440spe_adma_run_tx_complete_actions() 1469 desc->async_tx.cookie = 0; in ppc440spe_adma_run_tx_complete_actions() 1471 dma_descriptor_unmap(&desc->async_tx); in ppc440spe_adma_run_tx_complete_actions() 1475 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); in ppc440spe_adma_run_tx_complete_actions() 1479 dma_run_dependencies(&desc->async_tx); in ppc440spe_adma_run_tx_complete_actions() 1493 if (!async_tx_test_ack(&desc->async_tx)) in ppc440spe_adma_clean_slot() 1559 iter->async_tx.cookie, iter->idx, busy, iter->phys, in __ppc440spe_adma_slot_cleanup() 1561 async_tx_test_ack(&iter->async_tx)); in __ppc440spe_adma_slot_cleanup() [all …]
|
| H A D | adma.h | 20 container_of(tx, struct ppc440spe_adma_desc_slot, async_tx) 147 struct dma_async_tx_descriptor async_tx; member
|
| /linux/crypto/async_tx/ |
| H A D | Makefile | 2 obj-$(CONFIG_ASYNC_CORE) += async_tx.o
|
| /linux/include/linux/platform_data/ |
| H A D | dma-iop32x.h | 92 struct dma_async_tx_descriptor async_tx; member
|
| /linux/drivers/dma/sf-pdma/ |
| H A D | sf-pdma.h | 83 struct dma_async_tx_descriptor *async_tx; member
|
| /linux/include/linux/ |
| H A D | shdma-base.h | 48 struct dma_async_tx_descriptor async_tx; member
|
| /linux/Documentation/driver-api/dmaengine/ |
| H A D | client.rst | 7 .. note:: For DMA Engine usage in async_tx please see: 148 Although the async_tx API specifies that completion callback
|
| /linux/crypto/ |
| H A D | Makefile | 199 obj-$(CONFIG_ASYNC_CORE) += async_tx/
|
| H A D | Kconfig | 9 # async_tx api: hardware offloaded memory transfer/transform support 11 source "crypto/async_tx/Kconfig"
|
| /linux/ |
| H A D | MAINTAINERS | 3990 F: crypto/async_tx/ 3991 F: include/linux/async_tx.h
|