Home
last modified time | relevance | path

Searched refs:submit (Results 1 – 25 of 147) sorted by relevance

123456

/linux/drivers/gpu/drm/msm/
H A Dmsm_gem_submit.c25 #define SUBMIT_ERROR(err, submit, fmt, ...) \ argument
26 UERR(err, (submit)->dev, fmt, ##__VA_ARGS__)
38 struct msm_gem_submit *submit; in submit_create() local
42 sz = struct_size(submit, bos, nr_bos) + in submit_create()
43 ((u64)nr_cmds * sizeof(submit->cmd[0])); in submit_create()
48 submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN); in submit_create()
49 if (!submit) in submit_create()
52 submit->hw_fence = msm_fence_alloc(); in submit_create()
53 if (IS_ERR(submit->hw_fence)) { in submit_create()
54 ret = PTR_ERR(submit->hw_fence); in submit_create()
[all …]
H A Dmsm_gpu.c257 static void crashstate_get_bos(struct msm_gpu_state *state, struct msm_gem_submit *submit) in crashstate_get_bos() argument
261 if (msm_context_is_vmbind(submit->queue->ctx)) { in crashstate_get_bos()
270 drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(submit->vm)); in crashstate_get_bos()
273 drm_gpuvm_for_each_va (vma, submit->vm) { in crashstate_get_bos()
284 drm_gpuvm_for_each_va (vma, submit->vm) in crashstate_get_bos()
290 drm_gpuvm_for_each_va(vma, submit->vm) { in crashstate_get_bos()
303 state->bos = kcalloc(submit->nr_bos, in crashstate_get_bos()
306 for (int i = 0; state->bos && i < submit->nr_bos; i++) { in crashstate_get_bos()
307 struct drm_gem_object *obj = submit->bos[i].obj; in crashstate_get_bos()
308 bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP); in crashstate_get_bos()
[all …]
H A Dmsm_ringbuffer.c16 struct msm_gem_submit *submit = to_msm_submit(job); in msm_job_run() local
17 struct msm_fence_context *fctx = submit->ring->fctx; in msm_job_run()
18 struct msm_gpu *gpu = submit->gpu; in msm_job_run()
20 unsigned nr_cmds = submit->nr_cmds; in msm_job_run()
23 msm_fence_init(submit->hw_fence, fctx); in msm_job_run()
27 for (i = 0; i < submit->nr_bos; i++) { in msm_job_run()
28 struct drm_gem_object *obj = submit->bos[i].obj; in msm_job_run()
33 submit->bos_pinned = false; in msm_job_run()
40 if (submit->queue->ctx->closed) in msm_job_run()
41 submit->nr_cmds = 0; in msm_job_run()
[all …]
H A Dmsm_rd.c338 void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, in msm_rd_dump_submit() argument
362 task = pid_task(submit->pid, PIDTYPE_PID); in msm_rd_dump_submit()
366 pid_nr(submit->pid), submit->seqno); in msm_rd_dump_submit()
369 pid_nr(submit->pid), submit->seqno); in msm_rd_dump_submit()
375 if (msm_context_is_vmbind(submit->queue->ctx)) { in msm_rd_dump_submit()
378 drm_gpuvm_resv_assert_held(submit->vm); in msm_rd_dump_submit()
380 drm_gpuvm_for_each_va (vma, submit->vm) { in msm_rd_dump_submit()
392 for (i = 0; i < submit->nr_bos; i++) { in msm_rd_dump_submit()
393 struct drm_gem_object *obj = submit->bos[i].obj; in msm_rd_dump_submit()
394 bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP); in msm_rd_dump_submit()
[all …]
/linux/drivers/gpu/drm/virtio/
H A Dvirtgpu_submit.c48 static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit, in virtio_gpu_do_fence_wait() argument
51 u64 context = submit->fence_ctx + submit->ring_idx; in virtio_gpu_do_fence_wait()
59 static int virtio_gpu_dma_fence_wait(struct virtio_gpu_submit *submit, in virtio_gpu_dma_fence_wait() argument
67 err = virtio_gpu_do_fence_wait(submit, f); in virtio_gpu_dma_fence_wait()
89 virtio_gpu_parse_deps(struct virtio_gpu_submit *submit) in virtio_gpu_parse_deps() argument
91 struct drm_virtgpu_execbuffer *exbuf = submit->exbuf; in virtio_gpu_parse_deps()
129 ret = drm_syncobj_find_fence(submit->file, syncobj_desc.handle, in virtio_gpu_parse_deps()
134 ret = virtio_gpu_dma_fence_wait(submit, fence); in virtio_gpu_parse_deps()
141 syncobjs[i] = drm_syncobj_find(submit->file, in virtio_gpu_parse_deps()
155 submit->num_in_syncobjs = num_in_syncobjs; in virtio_gpu_parse_deps()
[all …]
/linux/drivers/gpu/drm/etnaviv/
H A Detnaviv_gem_submit.c34 struct etnaviv_gem_submit *submit; in submit_create() local
35 size_t sz = size_vstruct(nr_bos, sizeof(submit->bos[0]), sizeof(*submit)); in submit_create()
37 submit = kzalloc(sz, GFP_KERNEL); in submit_create()
38 if (!submit) in submit_create()
41 submit->pmrs = kzalloc_objs(struct etnaviv_perfmon_request, nr_pmrs); in submit_create()
42 if (!submit->pmrs) { in submit_create()
43 kfree(submit); in submit_create()
46 submit->nr_pmrs = nr_pmrs; in submit_create()
48 submit->gpu = gpu; in submit_create()
49 kref_init(&submit->refcount); in submit_create()
[all …]
H A Detnaviv_sched.c23 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); in etnaviv_sched_run_job() local
27 fence = etnaviv_gpu_submit(submit); in etnaviv_sched_run_job()
29 dev_dbg(submit->gpu->dev, "skipping bad job\n"); in etnaviv_sched_run_job()
37 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); in etnaviv_sched_timedout_job() local
38 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_sched_timedout_job()
46 if (dma_fence_is_signaled(submit->out_fence)) in etnaviv_sched_timedout_job()
56 if (submit->exec_state == ETNA_PIPE_3D) { in etnaviv_sched_timedout_job()
68 (submit->exec_state == ETNA_PIPE_3D && in etnaviv_sched_timedout_job()
83 etnaviv_core_dump(submit); in etnaviv_sched_timedout_job()
84 etnaviv_gpu_recover_hang(submit); in etnaviv_sched_timedout_job()
[all …]
H A Detnaviv_dump.c118 void etnaviv_core_dump(struct etnaviv_gem_submit *submit) in etnaviv_core_dump() argument
120 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_core_dump()
133 mutex_lock(&submit->mmu_context->lock); in etnaviv_core_dump()
135 mmu_size = etnaviv_iommu_dump_size(submit->mmu_context); in etnaviv_core_dump()
142 mmu_size + gpu->buffer.size + submit->cmdbuf.size; in etnaviv_core_dump()
145 for (i = 0; i < submit->nr_bos; i++) { in etnaviv_core_dump()
146 obj = submit->bos[i].obj; in etnaviv_core_dump()
164 mutex_unlock(&submit->mmu_context->lock); in etnaviv_core_dump()
176 etnaviv_core_dump_mmu(&iter, submit->mmu_context, mmu_size); in etnaviv_core_dump()
180 &submit->mmu_context->cmdbuf_mapping)); in etnaviv_core_dump()
[all …]
H A Detnaviv_gpu.c1351 const struct etnaviv_gem_submit *submit = event->submit; in sync_point_perfmon_sample() local
1354 for (i = 0; i < submit->nr_pmrs; i++) { in sync_point_perfmon_sample()
1355 const struct etnaviv_perfmon_request *pmr = submit->pmrs + i; in sync_point_perfmon_sample()
1358 etnaviv_perfmon_process(gpu, pmr, submit->exec_state); in sync_point_perfmon_sample()
1382 const struct etnaviv_gem_submit *submit = event->submit; in sync_point_perfmon_sample_post() local
1397 for (i = 0; i < submit->nr_pmrs; i++) { in sync_point_perfmon_sample_post()
1398 const struct etnaviv_perfmon_request *pmr = submit->pmrs + i; in sync_point_perfmon_sample_post()
1406 struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) in etnaviv_gpu_submit() argument
1408 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_gpu_submit()
1420 if (submit->nr_pmrs) in etnaviv_gpu_submit()
[all …]
/linux/crypto/async_tx/
H A Dasync_raid6_recov.c20 size_t len, struct async_submit_ctl *submit) in async_sum_product() argument
22 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, in async_sum_product()
39 if (submit->flags & ASYNC_TX_FENCE) in async_sum_product()
58 async_tx_submit(chan, tx, submit); in async_sum_product()
70 async_tx_quiesce(&submit->depend_tx); in async_sum_product()
89 struct async_submit_ctl *submit) in async_mult() argument
91 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, in async_mult()
107 if (submit->flags & ASYNC_TX_FENCE) in async_mult()
128 async_tx_submit(chan, tx, submit); in async_mult()
141 async_tx_quiesce(&submit->depend_tx); in async_mult()
[all …]
H A Dasync_pq.c39 struct async_submit_ctl *submit) in do_async_gen_syndrome() argument
43 enum async_tx_flags flags_orig = submit->flags; in do_async_gen_syndrome()
44 dma_async_tx_callback cb_fn_orig = submit->cb_fn; in do_async_gen_syndrome()
45 dma_async_tx_callback cb_param_orig = submit->cb_param; in do_async_gen_syndrome()
52 submit->flags = flags_orig; in do_async_gen_syndrome()
59 submit->flags &= ~ASYNC_TX_ACK; in do_async_gen_syndrome()
60 submit->flags |= ASYNC_TX_FENCE; in do_async_gen_syndrome()
61 submit->cb_fn = NULL; in do_async_gen_syndrome()
62 submit->cb_param = NULL; in do_async_gen_syndrome()
64 submit->cb_fn = cb_fn_orig; in do_async_gen_syndrome()
[all …]
H A Draid6test.c60 struct async_submit_ctl submit; in raid6_dual_recov() local
71 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); in raid6_dual_recov()
73 disks, bytes, &submit); in raid6_dual_recov()
91 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, in raid6_dual_recov()
93 tx = async_xor(dest, blocks, 0, count, bytes, &submit); in raid6_dual_recov()
95 init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv); in raid6_dual_recov()
97 disks, bytes, &submit); in raid6_dual_recov()
102 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); in raid6_dual_recov()
104 faila, ptrs, offs, &submit); in raid6_dual_recov()
107 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); in raid6_dual_recov()
[all …]
H A Dasync_tx.c43 __async_tx_find_channel(struct async_submit_ctl *submit, in __async_tx_find_channel() argument
46 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; in __async_tx_find_channel()
144 struct async_submit_ctl *submit) in async_tx_submit() argument
146 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; in async_tx_submit()
148 tx->callback = submit->cb_fn; in async_tx_submit()
149 tx->callback_param = submit->cb_param; in async_tx_submit()
204 if (submit->flags & ASYNC_TX_ACK) in async_tx_submit()
221 async_trigger_callback(struct async_submit_ctl *submit) in async_trigger_callback() argument
226 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; in async_trigger_callback()
245 async_tx_submit(chan, tx, submit); in async_trigger_callback()
[all …]
H A Dasync_memcpy.c34 struct async_submit_ctl *submit) in async_memcpy() argument
36 struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY, in async_memcpy()
48 if (submit->cb_fn) in async_memcpy()
50 if (submit->flags & ASYNC_TX_FENCE) in async_memcpy()
70 async_tx_submit(chan, tx, submit); in async_memcpy()
76 async_tx_quiesce(&submit->depend_tx); in async_memcpy()
86 async_tx_sync_epilog(submit); in async_memcpy()
/linux/drivers/gpu/drm/lima/
H A Dlima_gem.c280 static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit) in lima_gem_add_deps() argument
284 for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) { in lima_gem_add_deps()
285 if (!submit->in_sync[i]) in lima_gem_add_deps()
288 err = drm_sched_job_add_syncobj_dependency(&submit->task->base, file, in lima_gem_add_deps()
289 submit->in_sync[i], 0); in lima_gem_add_deps()
297 int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) in lima_gem_submit() argument
305 struct lima_bo **bos = submit->lbos; in lima_gem_submit()
307 if (submit->out_sync) { in lima_gem_submit()
308 out_sync = drm_syncobj_find(file, submit->out_sync); in lima_gem_submit()
313 for (i = 0; i < submit->nr_bos; i++) { in lima_gem_submit()
[all …]
H A Dlima_drv.c111 struct lima_submit submit = {0}; in lima_ioctl_gem_submit() local
125 bos = kvcalloc(args->nr_bos, sizeof(*submit.bos) + sizeof(*submit.lbos), GFP_KERNEL); in lima_ioctl_gem_submit()
129 size = args->nr_bos * sizeof(*submit.bos); in lima_ioctl_gem_submit()
157 submit.pipe = args->pipe; in lima_ioctl_gem_submit()
158 submit.bos = bos; in lima_ioctl_gem_submit()
159 submit.lbos = (void *)bos + size; in lima_ioctl_gem_submit()
160 submit.nr_bos = args->nr_bos; in lima_ioctl_gem_submit()
161 submit.task = task; in lima_ioctl_gem_submit()
162 submit.ctx = ctx; in lima_ioctl_gem_submit()
163 submit.flags = args->flags; in lima_ioctl_gem_submit()
[all …]
/linux/fs/iomap/
H A Ddirect-io.c44 } submit; member
178 struct task_struct *waiter = dio->submit.waiter; in iomap_dio_done()
180 WRITE_ONCE(dio->submit.waiter, NULL); in iomap_dio_done()
327 nr_vecs = bio_iov_bounce_nr_vecs(dio->submit.iter, op); in iomap_dio_bio_iter_one()
329 nr_vecs = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS); in iomap_dio_bio_iter_one()
341 ret = bio_iov_iter_bounce(bio, dio->submit.iter); in iomap_dio_bio_iter_one()
343 ret = bio_iov_iter_get_pages(bio, dio->submit.iter, in iomap_dio_bio_iter_one()
367 if (iov_iter_count(dio->submit.iter)) in iomap_dio_bio_iter_one()
491 orig_count = iov_iter_count(dio->submit.iter); in iomap_dio_bio_iter()
492 iov_iter_truncate(dio->submit.iter, length); in iomap_dio_bio_iter()
[all …]
/linux/drivers/gpu/drm/msm/adreno/
H A Da2xx_gpu.c13 static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a2xx_submit() argument
15 struct msm_ringbuffer *ring = submit->ring; in a2xx_submit()
18 for (i = 0; i < submit->nr_cmds; i++) { in a2xx_submit()
19 switch (submit->cmd[i].type) { in a2xx_submit()
25 if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) in a2xx_submit()
30 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a2xx_submit()
31 OUT_RING(ring, submit->cmd[i].size); in a2xx_submit()
38 OUT_RING(ring, submit->seqno); in a2xx_submit()
47 OUT_RING(ring, submit->seqno); in a2xx_submit()
552 .submit = a2xx_submit,
H A Da5xx_gpu.c66 static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a5xx_submit_in_rb() argument
70 struct msm_ringbuffer *ring = submit->ring; in a5xx_submit_in_rb()
75 for (i = 0; i < submit->nr_cmds; i++) { in a5xx_submit_in_rb()
76 switch (submit->cmd[i].type) { in a5xx_submit_in_rb()
80 if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) in a5xx_submit_in_rb()
85 obj = submit->bos[submit->cmd[i].idx].obj; in a5xx_submit_in_rb()
86 dwords = submit->cmd[i].size; in a5xx_submit_in_rb()
114 a5xx_gpu->last_seqno[ring->id] = submit->seqno; in a5xx_submit_in_rb()
123 ring->memptrs->fence = submit->seqno; in a5xx_submit_in_rb()
127 static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a5xx_submit() argument
[all …]
H A Da6xx_gpu.c202 struct msm_ringbuffer *ring, struct msm_gem_submit *submit) in a6xx_set_pagetable() argument
205 struct msm_context *ctx = submit->queue->ctx; in a6xx_set_pagetable()
206 struct drm_gpuvm *vm = msm_context_vm(submit->dev, ctx); in a6xx_set_pagetable()
224 OUT_RING(ring, submit->seqno - 1); in a6xx_set_pagetable()
328 static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a6xx_submit() argument
330 unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT; in a6xx_submit()
333 struct msm_ringbuffer *ring = submit->ring; in a6xx_submit()
338 a6xx_set_pagetable(a6xx_gpu, ring, submit); in a6xx_submit()
359 for (i = 0; i < submit->nr_cmds; i++) { in a6xx_submit()
360 switch (submit->cmd[i].type) { in a6xx_submit()
[all …]
H A Da3xx_gpu.c31 static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a3xx_submit() argument
33 struct msm_ringbuffer *ring = submit->ring; in a3xx_submit()
36 for (i = 0; i < submit->nr_cmds; i++) { in a3xx_submit()
37 switch (submit->cmd[i].type) { in a3xx_submit()
43 if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) in a3xx_submit()
48 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a3xx_submit()
49 OUT_RING(ring, submit->cmd[i].size); in a3xx_submit()
56 OUT_RING(ring, submit->seqno); in a3xx_submit()
73 OUT_RING(ring, submit->seqno); in a3xx_submit()
603 .submit = a3xx_submit,
H A Da4xx_gpu.c25 static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a4xx_submit() argument
27 struct msm_ringbuffer *ring = submit->ring; in a4xx_submit()
30 for (i = 0; i < submit->nr_cmds; i++) { in a4xx_submit()
31 switch (submit->cmd[i].type) { in a4xx_submit()
37 if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) in a4xx_submit()
42 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a4xx_submit()
43 OUT_RING(ring, submit->cmd[i].size); in a4xx_submit()
50 OUT_RING(ring, submit->seqno); in a4xx_submit()
67 OUT_RING(ring, submit->seqno); in a4xx_submit()
715 .submit = a4xx_submit,
/linux/drivers/gpu/drm/i915/selftests/
H A Di915_active.c83 struct i915_sw_fence *submit; in __live_active_setup() local
92 submit = heap_fence_create(GFP_KERNEL); in __live_active_setup()
93 if (!submit) { in __live_active_setup()
111 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit, in __live_active_setup()
112 submit, in __live_active_setup()
137 i915_sw_fence_commit(submit); in __live_active_setup()
138 heap_fence_put(submit); in __live_active_setup()
/linux/drivers/gpu/drm/tegra/
H A Dfirewall.c9 struct tegra_drm_submit_data *submit; member
31 for (i = 0; i < fw->submit->num_used_mappings; i++) { in fw_check_addr_valid()
32 struct tegra_drm_mapping *m = fw->submit->used_mappings[i].mapping; in fw_check_addr_valid()
145 u32 words, struct tegra_drm_submit_data *submit, in tegra_drm_fw_validate() argument
149 .submit = submit, in tegra_drm_fw_validate()
/linux/Documentation/crypto/
H A Dasync-tx-api.rst69 async_<operation>(<op specific parameters>, struct async_submit_ctl *submit)
97 operations complete. When an application needs to submit a chain of
134 async_<operation>, or when the application needs to submit a chain of
143 2. Completion callback routines cannot submit new operations. This
170 struct async_submit_ctl submit;
174 init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL,
176 tx = async_xor(xor_dest, xor_srcs, 0, NDISKS, xor_len, &submit);
178 submit.depend_tx = tx;
179 tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len, &submit);
182 init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST | ASYNC_TX_ACK, tx,
[all …]

123456