| /linux/drivers/gpu/drm/msm/ |
| H A D | msm_gem_submit.c | 25 #define SUBMIT_ERROR(err, submit, fmt, ...) \ argument 26 UERR(err, (submit)->dev, fmt, ##__VA_ARGS__) 38 struct msm_gem_submit *submit; in submit_create() local 42 sz = struct_size(submit, bos, nr_bos) + in submit_create() 43 ((u64)nr_cmds * sizeof(submit->cmd[0])); in submit_create() 48 submit = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN); in submit_create() 49 if (!submit) in submit_create() 52 submit->hw_fence = msm_fence_alloc(); in submit_create() 53 if (IS_ERR(submit->hw_fence)) { in submit_create() 54 ret = PTR_ERR(submit->hw_fence); in submit_create() [all …]
|
| H A D | msm_gpu.c | 258 static void crashstate_get_bos(struct msm_gpu_state *state, struct msm_gem_submit *submit) in crashstate_get_bos() argument 262 if (msm_context_is_vmbind(submit->queue->ctx)) { in crashstate_get_bos() 271 drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(submit->vm)); in crashstate_get_bos() 274 drm_gpuvm_for_each_va (vma, submit->vm) { in crashstate_get_bos() 285 drm_gpuvm_for_each_va (vma, submit->vm) in crashstate_get_bos() 290 drm_gpuvm_for_each_va (vma, submit->vm) { in crashstate_get_bos() 303 state->bos = kcalloc(submit->nr_bos, in crashstate_get_bos() 306 for (int i = 0; state->bos && i < submit->nr_bos; i++) { in crashstate_get_bos() 307 struct drm_gem_object *obj = submit->bos[i].obj; in crashstate_get_bos() 308 bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP); in crashstate_get_bos() [all …]
|
| H A D | msm_gem.h | 428 /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, 438 struct list_head node; /* node in ring submit list */ 440 uint32_t seqno; /* Sequence number of the submit on the ring */ 456 bool fault_dumped:1;/* Limit devcoredump dumping to one per submit */ 461 u32 ident; /* A "identifier" for the submit for logging */ 489 static inline void msm_gem_submit_get(struct msm_gem_submit *submit) in msm_gem_submit_get() argument 491 kref_get(&submit->ref); in msm_gem_submit_get() 494 static inline void msm_gem_submit_put(struct msm_gem_submit *submit) in msm_gem_submit_put() argument 496 kref_put(&submit->ref, __msm_gem_submit_destroy); in msm_gem_submit_put() 499 void msm_submit_retire(struct msm_gem_submit *submit);
|
| /linux/drivers/gpu/drm/virtio/ |
| H A D | virtgpu_submit.c | 48 static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit, in virtio_gpu_do_fence_wait() argument 51 u64 context = submit->fence_ctx + submit->ring_idx; in virtio_gpu_do_fence_wait() 59 static int virtio_gpu_dma_fence_wait(struct virtio_gpu_submit *submit, in virtio_gpu_dma_fence_wait() argument 67 err = virtio_gpu_do_fence_wait(submit, f); in virtio_gpu_dma_fence_wait() 89 virtio_gpu_parse_deps(struct virtio_gpu_submit *submit) in virtio_gpu_parse_deps() argument 91 struct drm_virtgpu_execbuffer *exbuf = submit->exbuf; in virtio_gpu_parse_deps() 129 ret = drm_syncobj_find_fence(submit->file, syncobj_desc.handle, in virtio_gpu_parse_deps() 134 ret = virtio_gpu_dma_fence_wait(submit, fence); in virtio_gpu_parse_deps() 141 syncobjs[i] = drm_syncobj_find(submit->file, in virtio_gpu_parse_deps() 155 submit->num_in_syncobjs = num_in_syncobjs; in virtio_gpu_parse_deps() [all …]
|
| /linux/crypto/async_tx/ |
| H A D | async_raid6_recov.c | 20 size_t len, struct async_submit_ctl *submit) in async_sum_product() argument 22 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, in async_sum_product() 39 if (submit->flags & ASYNC_TX_FENCE) in async_sum_product() 58 async_tx_submit(chan, tx, submit); in async_sum_product() 70 async_tx_quiesce(&submit->depend_tx); in async_sum_product() 89 struct async_submit_ctl *submit) in async_mult() argument 91 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, in async_mult() 107 if (submit->flags & ASYNC_TX_FENCE) in async_mult() 128 async_tx_submit(chan, tx, submit); in async_mult() 141 async_tx_quiesce(&submit->depend_tx); in async_mult() [all …]
|
| H A D | async_pq.c | 39 struct async_submit_ctl *submit) in do_async_gen_syndrome() argument 43 enum async_tx_flags flags_orig = submit->flags; in do_async_gen_syndrome() 44 dma_async_tx_callback cb_fn_orig = submit->cb_fn; in do_async_gen_syndrome() 45 dma_async_tx_callback cb_param_orig = submit->cb_param; in do_async_gen_syndrome() 52 submit->flags = flags_orig; in do_async_gen_syndrome() 59 submit->flags &= ~ASYNC_TX_ACK; in do_async_gen_syndrome() 60 submit->flags |= ASYNC_TX_FENCE; in do_async_gen_syndrome() 61 submit->cb_fn = NULL; in do_async_gen_syndrome() 62 submit->cb_param = NULL; in do_async_gen_syndrome() 64 submit->cb_fn = cb_fn_orig; in do_async_gen_syndrome() [all …]
|
| H A D | async_tx.c | 39 * @submit: transaction dependency and submission modifiers 43 __async_tx_find_channel(struct async_submit_ctl *submit, in __async_tx_find_channel() argument 46 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; in __async_tx_find_channel() 130 * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly 144 struct async_submit_ctl *submit) in async_tx_submit() argument 146 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; in async_tx_submit() 148 tx->callback = submit->cb_fn; in async_tx_submit() 149 tx->callback_param = submit->cb_param; in async_tx_submit() 168 /* we have a parent so we can not submit directly in async_tx_submit() 178 /* we do not have a parent so we may be able to submit in async_tx_submit() [all …]
|
| H A D | raid6test.c | 60 struct async_submit_ctl submit; in raid6_dual_recov() local 71 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); in raid6_dual_recov() 73 disks, bytes, &submit); in raid6_dual_recov() 91 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, in raid6_dual_recov() 93 tx = async_xor(dest, blocks, 0, count, bytes, &submit); in raid6_dual_recov() 95 init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv); in raid6_dual_recov() 97 disks, bytes, &submit); in raid6_dual_recov() 102 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); in raid6_dual_recov() 104 faila, ptrs, offs, &submit); in raid6_dual_recov() 107 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); in raid6_dual_recov() [all …]
|
| H A D | async_memcpy.c | 27 * @submit: submission / completion modifiers 34 struct async_submit_ctl *submit) in async_memcpy() argument 36 struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY, in async_memcpy() 48 if (submit->cb_fn) in async_memcpy() 50 if (submit->flags & ASYNC_TX_FENCE) in async_memcpy() 70 async_tx_submit(chan, tx, submit); in async_memcpy() 76 async_tx_quiesce(&submit->depend_tx); in async_memcpy() 86 async_tx_sync_epilog(submit); in async_memcpy()
|
| /linux/drivers/gpu/drm/etnaviv/ |
| H A D | etnaviv_sched.c | 23 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); in etnaviv_sched_run_job() local 27 fence = etnaviv_gpu_submit(submit); in etnaviv_sched_run_job() 29 dev_dbg(submit->gpu->dev, "skipping bad job\n"); in etnaviv_sched_run_job() 37 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); in etnaviv_sched_timedout_job() local 38 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_sched_timedout_job() 46 if (dma_fence_is_signaled(submit->out_fence)) in etnaviv_sched_timedout_job() 56 if (submit->exec_state == ETNA_PIPE_3D) { in etnaviv_sched_timedout_job() 68 (submit->exec_state == ETNA_PIPE_3D && in etnaviv_sched_timedout_job() 83 etnaviv_core_dump(submit); in etnaviv_sched_timedout_job() 84 etnaviv_gpu_recover_hang(submit); in etnaviv_sched_timedout_job() [all …]
|
| H A D | etnaviv_dump.c | 118 void etnaviv_core_dump(struct etnaviv_gem_submit *submit) in etnaviv_core_dump() argument 120 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_core_dump() 133 mutex_lock(&submit->mmu_context->lock); in etnaviv_core_dump() 135 mmu_size = etnaviv_iommu_dump_size(submit->mmu_context); in etnaviv_core_dump() 142 mmu_size + gpu->buffer.size + submit->cmdbuf.size; in etnaviv_core_dump() 145 for (i = 0; i < submit->nr_bos; i++) { in etnaviv_core_dump() 146 obj = submit->bos[i].obj; in etnaviv_core_dump() 164 mutex_unlock(&submit->mmu_context->lock); in etnaviv_core_dump() 176 etnaviv_core_dump_mmu(&iter, submit->mmu_context, mmu_size); in etnaviv_core_dump() 180 &submit->mmu_context->cmdbuf_mapping)); in etnaviv_core_dump() [all …]
|
| H A D | etnaviv_gpu.c | 1338 const struct etnaviv_gem_submit *submit = event->submit; in sync_point_perfmon_sample() local 1341 for (i = 0; i < submit->nr_pmrs; i++) { in sync_point_perfmon_sample() 1342 const struct etnaviv_perfmon_request *pmr = submit->pmrs + i; in sync_point_perfmon_sample() 1345 etnaviv_perfmon_process(gpu, pmr, submit->exec_state); in sync_point_perfmon_sample() 1369 const struct etnaviv_gem_submit *submit = event->submit; in sync_point_perfmon_sample_post() local 1384 for (i = 0; i < submit->nr_pmrs; i++) { in sync_point_perfmon_sample_post() 1385 const struct etnaviv_perfmon_request *pmr = submit->pmrs + i; in sync_point_perfmon_sample_post() 1393 struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) in etnaviv_gpu_submit() argument 1395 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_gpu_submit() 1407 if (submit->nr_pmrs) in etnaviv_gpu_submit() [all …]
|
| H A D | etnaviv_gem.h | 88 /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, 100 struct list_head node; /* GPU active submit list */ 112 void etnaviv_submit_put(struct etnaviv_gem_submit * submit);
|
| /linux/drivers/gpu/drm/lima/ |
| H A D | lima_drv.c | 111 struct lima_submit submit = {0}; in lima_ioctl_gem_submit() local 125 bos = kvcalloc(args->nr_bos, sizeof(*submit.bos) + sizeof(*submit.lbos), GFP_KERNEL); in lima_ioctl_gem_submit() 129 size = args->nr_bos * sizeof(*submit.bos); in lima_ioctl_gem_submit() 157 submit.pipe = args->pipe; in lima_ioctl_gem_submit() 158 submit.bos = bos; in lima_ioctl_gem_submit() 159 submit.lbos = (void *)bos + size; in lima_ioctl_gem_submit() 160 submit.nr_bos = args->nr_bos; in lima_ioctl_gem_submit() 161 submit.task = task; in lima_ioctl_gem_submit() 162 submit.ctx = ctx; in lima_ioctl_gem_submit() 163 submit.flags = args->flags; in lima_ioctl_gem_submit() [all …]
|
| /linux/Documentation/crypto/ |
| H A D | async-tx-api.rst | 69 async_<operation>(<op specific parameters>, struct async_submit_ctl *submit) 97 operations complete. When an application needs to submit a chain of 134 async_<operation>, or when the application needs to submit a chain of 143 2. Completion callback routines cannot submit new operations. This 170 struct async_submit_ctl submit; 174 init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL, 176 tx = async_xor(xor_dest, xor_srcs, 0, NDISKS, xor_len, &submit); 178 submit.depend_tx = tx; 179 tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len, &submit); 182 init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST | ASYNC_TX_ACK, tx, [all …]
|
| /linux/drivers/gpu/drm/tegra/ |
| H A D | firewall.c | 5 #include "submit.h" 9 struct tegra_drm_submit_data *submit; member 31 for (i = 0; i < fw->submit->num_used_mappings; i++) { in fw_check_addr_valid() 32 struct tegra_drm_mapping *m = fw->submit->used_mappings[i].mapping; in fw_check_addr_valid() 145 u32 words, struct tegra_drm_submit_data *submit, in tegra_drm_fw_validate() argument 149 .submit = submit, in tegra_drm_fw_validate()
|
| /linux/fs/iomap/ |
| H A D | direct-io.c | 50 } submit; member 178 struct task_struct *waiter = dio->submit.waiter; in iomap_dio_done() 180 WRITE_ONCE(dio->submit.waiter, NULL); in iomap_dio_done() 395 orig_count = iov_iter_count(dio->submit.iter); in iomap_dio_bio_iter() 396 iov_iter_truncate(dio->submit.iter, length); in iomap_dio_bio_iter() 398 if (!iov_iter_count(dio->submit.iter)) in iomap_dio_bio_iter() 418 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS); in iomap_dio_bio_iter() 422 iov_iter_revert(dio->submit.iter, copied); in iomap_dio_bio_iter() 436 ret = bio_iov_iter_get_pages(bio, dio->submit.iter, in iomap_dio_bio_iter() 469 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, in iomap_dio_bio_iter() [all …]
|
| /linux/drivers/gpu/drm/i915/selftests/ |
| H A D | i915_active.c | 81 struct i915_sw_fence *submit; in __live_active_setup() local 90 submit = heap_fence_create(GFP_KERNEL); in __live_active_setup() 91 if (!submit) { in __live_active_setup() 109 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit, in __live_active_setup() 110 submit, in __live_active_setup() 135 i915_sw_fence_commit(submit); in __live_active_setup() 136 heap_fence_put(submit); in __live_active_setup()
|
| /linux/Documentation/driver-api/dmaengine/ |
| H A D | client.rst | 25 - Submit the transaction 149 routines cannot submit any new operations, this is not the 154 slave DMA callbacks are permitted to prepare and submit a new 220 3. submit the transfer 227 3. submit the transfer 241 5. submit the transfer 246 2. submit the transfer 260 4. Submit the transaction
|
| /linux/include/uapi/drm/ |
| H A D | qaic_accel.h | 266 * struct qaic_execute_entry - Defines a BO to submit to the device. 276 * struct qaic_partial_execute_entry - Defines a BO to resize and submit. 291 * @count: In. Number of BOs to submit. 292 * @dbc_id: In. DBC to submit the BOs on. 300 * struct qaic_execute - Defines a list of BOs to submit to the device. 302 * @data: In. Pointer to an array of BOs to submit. 350 * @num_queue_element: Out. Number of elements added to the queue to submit 352 * @submit_latency_us: Out. Time taken by the driver to submit this BO.
|
| H A D | etnaviv_drm.h | 145 /* Each buffer referenced elsewhere in the cmdstream submit (ie. the 147 * one) entry in the submit->bos[] table. 155 * If the submit is a softpin submit (ETNA_SUBMIT_SOFTPIN) the 'presumed' 158 * this location the submit will fail. This means userspace is responsible 182 /* Each cmdstream submit consists of a table of buffers involved, and
|
| /linux/drivers/gpu/host1x/hw/ |
| H A D | channel_hw.c | 235 /* Submit work. */ in channel_program_cdma() 303 /* get submit lock */ in channel_submit() 312 /* begin a CDMA submit */ in channel_submit() 327 if (WARN(IS_ERR(job->fence), "Failed to create submit complete fence")) { in channel_submit() 334 /* end CDMA submit & stash pinned hMems into sync queue */ in channel_submit() 344 WARN(err, "Failed to set submit complete interrupt"); in channel_submit() 362 .submit = channel_submit,
|
| /linux/drivers/gpu/drm/msm/adreno/ |
| H A D | a5xx_gpu.c | 66 static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a5xx_submit_in_rb() argument 70 struct msm_ringbuffer *ring = submit->ring; in a5xx_submit_in_rb() 75 for (i = 0; i < submit->nr_cmds; i++) { in a5xx_submit_in_rb() 76 switch (submit->cmd[i].type) { in a5xx_submit_in_rb() 80 if (ring->cur_ctx_seqno == submit->queue->ctx->seqno) in a5xx_submit_in_rb() 85 obj = submit->bos[submit->cmd[i].idx].obj; in a5xx_submit_in_rb() 86 dwords = submit->cmd[i].size; in a5xx_submit_in_rb() 114 a5xx_gpu->last_seqno[ring->id] = submit->seqno; in a5xx_submit_in_rb() 119 * trigger an event to know that submit has completed, so in a5xx_submit_in_rb() 123 ring->memptrs->fence = submit->seqno; in a5xx_submit_in_rb() [all …]
|
| /linux/Documentation/gpu/rfc/ |
| H A D | i915_scheduler.rst | 27 * After I915_CONTEXT_ENGINES_EXT_PARALLEL a user can submit N batches to 92 whether a submission is a single context submit or parallel submit isn't known 93 until execbuf time activated via the I915_SUBMIT_FENCE. To submit multiple 148 submit N BBs in a single execbuf2 IOCTL. The BBs are either the last N objects
|
| /linux/tools/usb/ffs-aio-example/simple/device_app/ |
| H A D | aio_simple.c | 348 /* submit table of requests */ in main() 352 printf("submit: in\n"); in main() 354 perror("unable to submit request"); in main() 362 /* submit table of requests */ in main() 366 printf("submit: out\n"); in main() 368 perror("unable to submit request"); in main()
|