| /linux/drivers/gpu/drm/msm/ |
| H A D | msm_gem_submit.c | 25 #define SUBMIT_ERROR(err, submit, fmt, ...) \ argument 26 UERR(err, (submit)->dev, fmt, ##__VA_ARGS__) 38 struct msm_gem_submit *submit; in submit_create() local 42 sz = struct_size(submit, bos, nr_bos) + in submit_create() 43 ((u64)nr_cmds * sizeof(submit->cmd[0])); in submit_create() 48 submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN); in submit_create() 49 if (!submit) in submit_create() 52 submit->hw_fence = msm_fence_alloc(); in submit_create() 53 if (IS_ERR(submit->hw_fence)) { in submit_create() 54 ret = PTR_ERR(submit->hw_fence); in submit_create() [all …]
|
| H A D | msm_gpu.c | 257 static void crashstate_get_bos(struct msm_gpu_state *state, struct msm_gem_submit *submit) in crashstate_get_bos() argument 261 if (msm_context_is_vmbind(submit->queue->ctx)) { in crashstate_get_bos() 270 drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(submit->vm)); in crashstate_get_bos() 273 drm_gpuvm_for_each_va (vma, submit->vm) { in crashstate_get_bos() 284 drm_gpuvm_for_each_va (vma, submit->vm) in crashstate_get_bos() 290 drm_gpuvm_for_each_va(vma, submit->vm) { in crashstate_get_bos() 303 state->bos = kcalloc(submit->nr_bos, in crashstate_get_bos() 306 for (int i = 0; state->bos && i < submit->nr_bos; i++) { in crashstate_get_bos() 307 struct drm_gem_object *obj = submit->bos[i].obj; in crashstate_get_bos() 308 bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP); in crashstate_get_bos() [all …]
|
| H A D | msm_gem.h | 489 static inline void msm_gem_submit_get(struct msm_gem_submit *submit) in msm_gem_submit_get() argument 491 kref_get(&submit->ref); in msm_gem_submit_get() 494 static inline void msm_gem_submit_put(struct msm_gem_submit *submit) in msm_gem_submit_put() argument 496 kref_put(&submit->ref, __msm_gem_submit_destroy); in msm_gem_submit_put() 499 void msm_submit_retire(struct msm_gem_submit *submit);
|
| /linux/drivers/gpu/drm/virtio/ |
| H A D | virtgpu_submit.c | 48 static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit, in virtio_gpu_do_fence_wait() argument 51 u64 context = submit->fence_ctx + submit->ring_idx; in virtio_gpu_do_fence_wait() 59 static int virtio_gpu_dma_fence_wait(struct virtio_gpu_submit *submit, in virtio_gpu_dma_fence_wait() argument 67 err = virtio_gpu_do_fence_wait(submit, f); in virtio_gpu_dma_fence_wait() 89 virtio_gpu_parse_deps(struct virtio_gpu_submit *submit) in virtio_gpu_parse_deps() argument 91 struct drm_virtgpu_execbuffer *exbuf = submit->exbuf; in virtio_gpu_parse_deps() 129 ret = drm_syncobj_find_fence(submit->file, syncobj_desc.handle, in virtio_gpu_parse_deps() 134 ret = virtio_gpu_dma_fence_wait(submit, fence); in virtio_gpu_parse_deps() 141 syncobjs[i] = drm_syncobj_find(submit->file, in virtio_gpu_parse_deps() 155 submit->num_in_syncobjs = num_in_syncobjs; in virtio_gpu_parse_deps() [all …]
|
| /linux/crypto/async_tx/ |
| H A D | async_raid6_recov.c | 20 size_t len, struct async_submit_ctl *submit) in async_sum_product() argument 22 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, in async_sum_product() 39 if (submit->flags & ASYNC_TX_FENCE) in async_sum_product() 58 async_tx_submit(chan, tx, submit); in async_sum_product() 70 async_tx_quiesce(&submit->depend_tx); in async_sum_product() 89 struct async_submit_ctl *submit) in async_mult() argument 91 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, in async_mult() 107 if (submit->flags & ASYNC_TX_FENCE) in async_mult() 128 async_tx_submit(chan, tx, submit); in async_mult() 141 async_tx_quiesce(&submit->depend_tx); in async_mult() [all …]
|
| H A D | async_pq.c | 39 struct async_submit_ctl *submit) in do_async_gen_syndrome() argument 43 enum async_tx_flags flags_orig = submit->flags; in do_async_gen_syndrome() 44 dma_async_tx_callback cb_fn_orig = submit->cb_fn; in do_async_gen_syndrome() 45 dma_async_tx_callback cb_param_orig = submit->cb_param; in do_async_gen_syndrome() 52 submit->flags = flags_orig; in do_async_gen_syndrome() 59 submit->flags &= ~ASYNC_TX_ACK; in do_async_gen_syndrome() 60 submit->flags |= ASYNC_TX_FENCE; in do_async_gen_syndrome() 61 submit->cb_fn = NULL; in do_async_gen_syndrome() 62 submit->cb_param = NULL; in do_async_gen_syndrome() 64 submit->cb_fn = cb_fn_orig; in do_async_gen_syndrome() [all …]
|
| H A D | raid6test.c | 60 struct async_submit_ctl submit; in raid6_dual_recov() local 71 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); in raid6_dual_recov() 73 disks, bytes, &submit); in raid6_dual_recov() 91 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, in raid6_dual_recov() 93 tx = async_xor(dest, blocks, 0, count, bytes, &submit); in raid6_dual_recov() 95 init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv); in raid6_dual_recov() 97 disks, bytes, &submit); in raid6_dual_recov() 102 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); in raid6_dual_recov() 104 faila, ptrs, offs, &submit); in raid6_dual_recov() 107 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); in raid6_dual_recov() [all …]
|
| H A D | async_tx.c | 43 __async_tx_find_channel(struct async_submit_ctl *submit, in __async_tx_find_channel() argument 46 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; in __async_tx_find_channel() 144 struct async_submit_ctl *submit) in async_tx_submit() argument 146 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; in async_tx_submit() 148 tx->callback = submit->cb_fn; in async_tx_submit() 149 tx->callback_param = submit->cb_param; in async_tx_submit() 204 if (submit->flags & ASYNC_TX_ACK) in async_tx_submit() 221 async_trigger_callback(struct async_submit_ctl *submit) in async_trigger_callback() argument 226 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; in async_trigger_callback() 245 async_tx_submit(chan, tx, submit); in async_trigger_callback() [all …]
|
| H A D | async_memcpy.c | 34 struct async_submit_ctl *submit) in async_memcpy() argument 36 struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY, in async_memcpy() 48 if (submit->cb_fn) in async_memcpy() 50 if (submit->flags & ASYNC_TX_FENCE) in async_memcpy() 70 async_tx_submit(chan, tx, submit); in async_memcpy() 76 async_tx_quiesce(&submit->depend_tx); in async_memcpy() 86 async_tx_sync_epilog(submit); in async_memcpy()
|
| /linux/drivers/gpu/drm/etnaviv/ |
| H A D | etnaviv_sched.c | 23 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); in etnaviv_sched_run_job() local 27 fence = etnaviv_gpu_submit(submit); in etnaviv_sched_run_job() 29 dev_dbg(submit->gpu->dev, "skipping bad job\n"); in etnaviv_sched_run_job() 37 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); in etnaviv_sched_timedout_job() local 38 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_sched_timedout_job() 46 if (dma_fence_is_signaled(submit->out_fence)) in etnaviv_sched_timedout_job() 56 if (submit->exec_state == ETNA_PIPE_3D) { in etnaviv_sched_timedout_job() 68 (submit->exec_state == ETNA_PIPE_3D && in etnaviv_sched_timedout_job() 83 etnaviv_core_dump(submit); in etnaviv_sched_timedout_job() 84 etnaviv_gpu_recover_hang(submit); in etnaviv_sched_timedout_job() [all …]
|
| H A D | etnaviv_dump.c | 118 void etnaviv_core_dump(struct etnaviv_gem_submit *submit) in etnaviv_core_dump() argument 120 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_core_dump() 133 mutex_lock(&submit->mmu_context->lock); in etnaviv_core_dump() 135 mmu_size = etnaviv_iommu_dump_size(submit->mmu_context); in etnaviv_core_dump() 142 mmu_size + gpu->buffer.size + submit->cmdbuf.size; in etnaviv_core_dump() 145 for (i = 0; i < submit->nr_bos; i++) { in etnaviv_core_dump() 146 obj = submit->bos[i].obj; in etnaviv_core_dump() 164 mutex_unlock(&submit->mmu_context->lock); in etnaviv_core_dump() 176 etnaviv_core_dump_mmu(&iter, submit->mmu_context, mmu_size); in etnaviv_core_dump() 180 &submit->mmu_context->cmdbuf_mapping)); in etnaviv_core_dump() [all …]
|
| H A D | etnaviv_gpu.h | 88 struct etnaviv_gem_submit *submit; member 216 void etnaviv_gpu_recover_hang(struct etnaviv_gem_submit *submit); 223 struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit);
|
| /linux/drivers/gpu/drm/lima/ |
| H A D | lima_drv.c | 111 struct lima_submit submit = {0}; in lima_ioctl_gem_submit() local 125 bos = kvcalloc(args->nr_bos, sizeof(*submit.bos) + sizeof(*submit.lbos), GFP_KERNEL); in lima_ioctl_gem_submit() 129 size = args->nr_bos * sizeof(*submit.bos); in lima_ioctl_gem_submit() 157 submit.pipe = args->pipe; in lima_ioctl_gem_submit() 158 submit.bos = bos; in lima_ioctl_gem_submit() 159 submit.lbos = (void *)bos + size; in lima_ioctl_gem_submit() 160 submit.nr_bos = args->nr_bos; in lima_ioctl_gem_submit() 161 submit.task = task; in lima_ioctl_gem_submit() 162 submit.ctx = ctx; in lima_ioctl_gem_submit() 163 submit.flags = args->flags; in lima_ioctl_gem_submit() [all …]
|
| /linux/fs/iomap/ |
| H A D | direct-io.c | 42 } submit; member 165 struct task_struct *waiter = dio->submit.waiter; in iomap_dio_done() 167 WRITE_ONCE(dio->submit.waiter, NULL); in iomap_dio_done() 419 orig_count = iov_iter_count(dio->submit.iter); in iomap_dio_bio_iter() 420 iov_iter_truncate(dio->submit.iter, length); in iomap_dio_bio_iter() 422 if (!iov_iter_count(dio->submit.iter)) in iomap_dio_bio_iter() 442 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS); in iomap_dio_bio_iter() 446 iov_iter_revert(dio->submit.iter, copied); in iomap_dio_bio_iter() 460 ret = bio_iov_iter_get_pages(bio, dio->submit.iter, in iomap_dio_bio_iter() 493 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, in iomap_dio_bio_iter() [all …]
|
| /linux/drivers/gpu/drm/msm/adreno/ |
| H A D | a2xx_gpu.c | 13 static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a2xx_submit() argument 15 struct msm_ringbuffer *ring = submit->ring; in a2xx_submit() 18 for (i = 0; i < submit->nr_cmds; i++) { in a2xx_submit() 19 switch (submit->cmd[i].type) { in a2xx_submit() 25 if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) in a2xx_submit() 30 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a2xx_submit() 31 OUT_RING(ring, submit->cmd[i].size); in a2xx_submit() 38 OUT_RING(ring, submit->seqno); in a2xx_submit() 47 OUT_RING(ring, submit->seqno); in a2xx_submit() 549 .submit = a2xx_submit,
|
| H A D | a5xx_gpu.c | 66 static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a5xx_submit_in_rb() argument 70 struct msm_ringbuffer *ring = submit->ring; in a5xx_submit_in_rb() 75 for (i = 0; i < submit->nr_cmds; i++) { in a5xx_submit_in_rb() 76 switch (submit->cmd[i].type) { in a5xx_submit_in_rb() 80 if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) in a5xx_submit_in_rb() 85 obj = submit->bos[submit->cmd[i].idx].obj; in a5xx_submit_in_rb() 86 dwords = submit->cmd[i].size; in a5xx_submit_in_rb() 114 a5xx_gpu->last_seqno[ring->id] = submit->seqno; in a5xx_submit_in_rb() 123 ring->memptrs->fence = submit->seqno; in a5xx_submit_in_rb() 127 static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a5xx_submit() argument [all …]
|
| H A D | a6xx_gpu.c | 202 struct msm_ringbuffer *ring, struct msm_gem_submit *submit) in a6xx_set_pagetable() argument 205 struct msm_context *ctx = submit->queue->ctx; in a6xx_set_pagetable() 206 struct drm_gpuvm *vm = msm_context_vm(submit->dev, ctx); in a6xx_set_pagetable() 224 OUT_RING(ring, submit->seqno - 1); in a6xx_set_pagetable() 328 static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a6xx_submit() argument 330 unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; in a6xx_submit() 333 struct msm_ringbuffer *ring = submit->ring; in a6xx_submit() 338 a6xx_set_pagetable(a6xx_gpu, ring, submit); in a6xx_submit() 359 for (i = 0; i < submit->nr_cmds; i++) { in a6xx_submit() 360 switch (submit->cmd[i].type) { in a6xx_submit() [all …]
|
| H A D | a4xx_gpu.c | 25 static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a4xx_submit() argument 27 struct msm_ringbuffer *ring = submit->ring; in a4xx_submit() 30 for (i = 0; i < submit->nr_cmds; i++) { in a4xx_submit() 31 switch (submit->cmd[i].type) { in a4xx_submit() 37 if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) in a4xx_submit() 42 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a4xx_submit() 43 OUT_RING(ring, submit->cmd[i].size); in a4xx_submit() 50 OUT_RING(ring, submit->seqno); in a4xx_submit() 67 OUT_RING(ring, submit->seqno); in a4xx_submit() 715 .submit = a4xx_submit,
|
| H A D | a3xx_gpu.c | 31 static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a3xx_submit() argument 33 struct msm_ringbuffer *ring = submit->ring; in a3xx_submit() 36 for (i = 0; i < submit->nr_cmds; i++) { in a3xx_submit() 37 switch (submit->cmd[i].type) { in a3xx_submit() 43 if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) in a3xx_submit() 48 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a3xx_submit() 49 OUT_RING(ring, submit->cmd[i].size); in a3xx_submit() 56 OUT_RING(ring, submit->seqno); in a3xx_submit() 73 OUT_RING(ring, submit->seqno); in a3xx_submit() 603 .submit = a3xx_submit,
|
| /linux/drivers/gpu/drm/tegra/ |
| H A D | firewall.c | 9 struct tegra_drm_submit_data *submit; member 31 for (i = 0; i < fw->submit->num_used_mappings; i++) { in fw_check_addr_valid() 32 struct tegra_drm_mapping *m = fw->submit->used_mappings[i].mapping; in fw_check_addr_valid() 145 u32 words, struct tegra_drm_submit_data *submit, in tegra_drm_fw_validate() argument 149 .submit = submit, in tegra_drm_fw_validate()
|
| /linux/Documentation/crypto/ |
| H A D | async-tx-api.rst | 69 async_<operation>(<op specific parameters>, struct async_submit_ctl *submit) 97 operations complete. When an application needs to submit a chain of 134 async_<operation>, or when the application needs to submit a chain of 143 2. Completion callback routines cannot submit new operations. This 170 struct async_submit_ctl submit; 174 init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL, 176 tx = async_xor(xor_dest, xor_srcs, 0, NDISKS, xor_len, &submit); 178 submit.depend_tx = tx; 179 tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len, &submit); 182 init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST | ASYNC_TX_ACK, tx, [all …]
|
| /linux/Documentation/filesystems/fuse/ |
| H A D | fuse-io-uring.rst | 28 needs to submit SQEs (opcode = IORING_OP_URING_CMD) to the /dev/fuse 29 connection file descriptor. Initial submit is with the sub command 79 | | [submit result and fetch next]
|
| /linux/Documentation/gpu/rfc/ |
| H A D | i915_scheduler.rst | 27 * After I915_CONTEXT_ENGINES_EXT_PARALLEL a user can submit N batches to 92 whether a submission is a single context submit or parallel submit isn't known 93 until execbuf time activated via the I915_SUBMIT_FENCE. To submit multiple 148 submit N BBs in a single execbuf2 IOCTL. The BBs are either the last N objects
|
| /linux/drivers/gpu/drm/ci/xfails/ |
| H A D | panfrost-rk3288-fails.txt | 2 panfrost/panfrost_submit@pan-submit-error-bad-requirements,Crash
|
| H A D | panfrost-mt8183-fails.txt | 2 panfrost/panfrost_submit@pan-submit-error-bad-requirements,Fail
|