/linux/drivers/gpu/drm/nouveau/ |
H A D | nouveau_sched.c | 26 nouveau_job_init(struct nouveau_job *job, in nouveau_job_init() argument 32 INIT_LIST_HEAD(&job->entry); in nouveau_job_init() 34 job->file_priv = args->file_priv; in nouveau_job_init() 35 job->cli = nouveau_cli(args->file_priv); in nouveau_job_init() 36 job->sched = sched; in nouveau_job_init() 38 job->sync = args->sync; in nouveau_job_init() 39 job->resv_usage = args->resv_usage; in nouveau_job_init() 41 job->ops = args->ops; in nouveau_job_init() 43 job->in_sync.count = args->in_sync.count; in nouveau_job_init() 44 if (job->in_sync.count) { in nouveau_job_init() [all …]
|
H A D | nouveau_exec.c | 87 nouveau_exec_job_submit(struct nouveau_job *job, in nouveau_exec_job_submit() argument 90 struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job); in nouveau_exec_job_submit() 91 struct nouveau_cli *cli = job->cli; in nouveau_exec_job_submit() 118 nouveau_exec_job_armed_submit(struct nouveau_job *job, in nouveau_exec_job_armed_submit() argument 121 drm_gpuvm_exec_resv_add_fence(vme, job->done_fence, in nouveau_exec_job_armed_submit() 122 job->resv_usage, job->resv_usage); in nouveau_exec_job_armed_submit() 127 nouveau_exec_job_run(struct nouveau_job *job) in nouveau_exec_job_run() argument 129 struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job); in nouveau_exec_job_run() 136 NV_PRINTK(err, job->cli, "nv50cal_space: %d\n", ret); in nouveau_exec_job_run() 150 NV_PRINTK(err, job->cli, "error fencing pushbuf: %d\n", ret); in nouveau_exec_job_run() [all …]
|
/linux/drivers/gpu/host1x/ |
H A D | job.c | 30 struct host1x_job *job = NULL; in host1x_job_alloc() local 51 mem = job = kzalloc(total, GFP_KERNEL); in host1x_job_alloc() 52 if (!job) in host1x_job_alloc() 55 job->enable_firewall = enable_firewall; in host1x_job_alloc() 57 kref_init(&job->ref); in host1x_job_alloc() 58 job->channel = ch; in host1x_job_alloc() 62 job->relocs = num_relocs ? mem : NULL; in host1x_job_alloc() 64 job->unpins = num_unpins ? mem : NULL; in host1x_job_alloc() 66 job->cmds = num_cmdbufs ? mem : NULL; in host1x_job_alloc() 68 job->addr_phys = num_unpins ? mem : NULL; in host1x_job_alloc() [all …]
|
H A D | cdma.c | 274 struct host1x_job *job) in cdma_start_timer_locked() argument 281 cdma->timeout.client = job->client; in cdma_start_timer_locked() 282 cdma->timeout.syncpt = job->syncpt; in cdma_start_timer_locked() 283 cdma->timeout.syncpt_val = job->syncpt_end; in cdma_start_timer_locked() 287 msecs_to_jiffies(job->timeout)); in cdma_start_timer_locked() 313 struct host1x_job *job, *n; in update_cdma_locked() local 319 list_for_each_entry_safe(job, n, &cdma->sync_queue, list) { in update_cdma_locked() 320 struct host1x_syncpt *sp = job->syncpt; in update_cdma_locked() 323 if (!host1x_syncpt_is_expired(sp, job->syncpt_end) && in update_cdma_locked() 324 !job->cancelled) { in update_cdma_locked() [all …]
|
/linux/drivers/md/ |
H A D | dm-kcopyd.c | 420 struct kcopyd_job *job; in pop_io_job() local 426 list_for_each_entry(job, jobs, list) { in pop_io_job() 427 if (job->op == REQ_OP_READ || in pop_io_job() 428 !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) { in pop_io_job() 429 list_del(&job->list); in pop_io_job() 430 return job; in pop_io_job() 433 if (job->write_offset == job->master_job->write_offset) { in pop_io_job() 434 job->master_job->write_offset += job->source.count; in pop_io_job() 435 list_del(&job->list); in pop_io_job() 436 return job; in pop_io_job() [all …]
|
/linux/drivers/gpu/drm/xe/ |
H A D | xe_sched_job.c | 68 static void job_free(struct xe_sched_job *job) in job_free() argument 70 struct xe_exec_queue *q = job->q; in job_free() 73 kmem_cache_free(xe_exec_queue_is_parallel(job->q) || is_migration ? in job_free() 74 xe_sched_job_parallel_slab : xe_sched_job_slab, job); in job_free() 77 static struct xe_device *job_to_xe(struct xe_sched_job *job) in job_to_xe() argument 79 return gt_to_xe(job->q->gt); in job_to_xe() 83 static void xe_sched_job_free_fences(struct xe_sched_job *job) in xe_sched_job_free_fences() argument 87 for (i = 0; i < job->q->width; ++i) { in xe_sched_job_free_fences() 88 struct xe_job_ptrs *ptrs = &job->ptrs[i]; in xe_sched_job_free_fences() 100 struct xe_sched_job *job; in xe_sched_job_create() local [all …]
|
H A D | xe_sched_job.h | 31 static inline struct xe_sched_job *xe_sched_job_get(struct xe_sched_job *job) in xe_sched_job_get() argument 33 kref_get(&job->refcount); in xe_sched_job_get() 34 return job; in xe_sched_job_get() 44 static inline void xe_sched_job_put(struct xe_sched_job *job) in xe_sched_job_put() argument 46 kref_put(&job->refcount, xe_sched_job_destroy); in xe_sched_job_put() 49 void xe_sched_job_set_error(struct xe_sched_job *job, int error); 50 static inline bool xe_sched_job_is_error(struct xe_sched_job *job) in xe_sched_job_is_error() argument 52 return job->fence->error < 0; in xe_sched_job_is_error() 55 bool xe_sched_job_started(struct xe_sched_job *job); 56 bool xe_sched_job_completed(struct xe_sched_job *job); [all …]
|
H A D | xe_ring_ops.c | 174 static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i) in emit_render_cache_flush() argument 176 struct xe_gt *gt = job->q->gt; in emit_render_cache_flush() 192 else if (job->q->class == XE_ENGINE_CLASS_COMPUTE) in emit_render_cache_flush() 222 static u32 get_ppgtt_flag(struct xe_sched_job *job) in get_ppgtt_flag() argument 224 if (job->q->vm && !job->ggtt) in get_ppgtt_flag() 244 static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc, in __emit_job_gen12_simple() argument 248 u32 ppgtt_flag = get_ppgtt_flag(job); in __emit_job_gen12_simple() 249 struct xe_gt *gt = job->q->gt; in __emit_job_gen12_simple() 253 if (job->ring_ops_flush_tlb) { in __emit_job_gen12_simple() 265 if (job->user_fence.used) { in __emit_job_gen12_simple() [all …]
|
/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_job.c | 37 struct amdgpu_job *job) in amdgpu_job_do_core_dump() argument 48 amdgpu_coredump(adev, true, false, job); in amdgpu_job_do_core_dump() 52 struct amdgpu_job *job) in amdgpu_job_core_dump() argument 80 amdgpu_job_do_core_dump(tmp_adev, job); in amdgpu_job_core_dump() 91 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_timedout() local 112 amdgpu_job_core_dump(adev, job); in amdgpu_job_timedout() 115 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { in amdgpu_job_timedout() 122 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), in amdgpu_job_timedout() 125 ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid); in amdgpu_job_timedout() 144 r = amdgpu_ring_reset(ring, job->vmid); in amdgpu_job_timedout() [all …]
|
H A D | amdgpu_ib.c | 125 struct amdgpu_ib *ibs, struct amdgpu_job *job, in amdgpu_ib_schedule() argument 138 int vmid = AMDGPU_JOB_GET_VMID(job); in amdgpu_ib_schedule() 149 if (job) { in amdgpu_ib_schedule() 150 vm = job->vm; in amdgpu_ib_schedule() 151 fence_ctx = job->base.s_fence ? in amdgpu_ib_schedule() 152 job->base.s_fence->scheduled.context : 0; in amdgpu_ib_schedule() 153 shadow_va = job->shadow_va; in amdgpu_ib_schedule() 154 csa_va = job->csa_va; in amdgpu_ib_schedule() 155 gds_va = job->gds_va; in amdgpu_ib_schedule() 156 init_shadow = job->init_shadow; in amdgpu_ib_schedule() [all …]
|
H A D | amdgpu_ids.c | 169 struct amdgpu_job *job) in amdgpu_vmid_gds_switch_needed() argument 171 return id->gds_base != job->gds_base || in amdgpu_vmid_gds_switch_needed() 172 id->gds_size != job->gds_size || in amdgpu_vmid_gds_switch_needed() 173 id->gws_base != job->gws_base || in amdgpu_vmid_gds_switch_needed() 174 id->gws_size != job->gws_size || in amdgpu_vmid_gds_switch_needed() 175 id->oa_base != job->oa_base || in amdgpu_vmid_gds_switch_needed() 176 id->oa_size != job->oa_size; in amdgpu_vmid_gds_switch_needed() 181 struct amdgpu_job *job) in amdgpu_vmid_compatible() argument 183 return id->pd_gpu_addr == job->vm_pd_addr && in amdgpu_vmid_compatible() 184 !amdgpu_vmid_gds_switch_needed(id, job); in amdgpu_vmid_compatible() [all …]
|
H A D | amdgpu_job.h | 42 #define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0) argument 86 static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job) in amdgpu_job_ring() argument 88 return to_amdgpu_ring(job->base.entity->rq->sched); in amdgpu_job_ring() 93 unsigned int num_ibs, struct amdgpu_job **job); 97 struct amdgpu_job **job); 98 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, 100 void amdgpu_job_free_resources(struct amdgpu_job *job); 101 void amdgpu_job_set_gang_leader(struct amdgpu_job *job, 103 void amdgpu_job_free(struct amdgpu_job *job); 104 struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job); [all …]
|
/linux/drivers/gpu/host1x/hw/ |
H A D | channel_hw.c | 50 static void submit_wait(struct host1x_job *job, u32 id, u32 threshold, in submit_wait() argument 53 struct host1x_cdma *cdma = &job->channel->cdma; in submit_wait() 63 if (job->memory_context) in submit_wait() 64 stream_id = job->memory_context->stream_id; in submit_wait() 66 stream_id = job->engine_fallback_streamid; in submit_wait() 79 host1x_cdma_push_wide(&job->channel->cdma, in submit_wait() 80 host1x_opcode_setclass(job->class, 0, 0), in submit_wait() 82 host1x_opcode_setstreamid(job->engine_streamid_offset / 4), in submit_wait() 113 static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base) in submit_gathers() argument 115 struct host1x_cdma *cdma = &job->channel->cdma; in submit_gathers() [all …]
|
/linux/drivers/gpu/drm/v3d/ |
H A D | v3d_sched.c | 71 struct v3d_job *job = to_v3d_job(sched_job); in v3d_sched_job_free() local 73 v3d_job_cleanup(job); in v3d_sched_job_free() 109 struct v3d_cpu_job *job = to_cpu_job(sched_job); in v3d_cpu_job_free() local 111 v3d_timestamp_query_info_free(&job->timestamp_query, in v3d_cpu_job_free() 112 job->timestamp_query.count); in v3d_cpu_job_free() 114 v3d_performance_query_info_free(&job->performance_query, in v3d_cpu_job_free() 115 job->performance_query.count); in v3d_cpu_job_free() 117 v3d_job_cleanup(&job->base); in v3d_cpu_job_free() 121 v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job) in v3d_switch_perfmon() argument 126 perfmon = job->perfmon; in v3d_switch_perfmon() [all …]
|
H A D | v3d_submit.c | 22 v3d_lock_bo_reservations(struct v3d_job *job, in v3d_lock_bo_reservations() argument 27 ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx); in v3d_lock_bo_reservations() 31 for (i = 0; i < job->bo_count; i++) { in v3d_lock_bo_reservations() 32 ret = dma_resv_reserve_fences(job->bo[i]->resv, 1); in v3d_lock_bo_reservations() 36 ret = drm_sched_job_add_implicit_dependencies(&job->base, in v3d_lock_bo_reservations() 37 job->bo[i], true); in v3d_lock_bo_reservations() 45 drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); in v3d_lock_bo_reservations() 68 struct v3d_job *job, in v3d_lookup_bos() argument 72 job->bo_count = bo_count; in v3d_lookup_bos() 74 if (!job->bo_count) { in v3d_lookup_bos() [all …]
|
/linux/drivers/gpu/drm/imagination/ |
H A D | pvr_queue.c | 350 static u32 job_cmds_size(struct pvr_job *job, u32 ufo_wait_count) in job_cmds_size() argument 356 pvr_cccb_get_size_of_cmd_with_hdr(job->cmd_len); in job_cmds_size() 365 static unsigned long job_count_remaining_native_deps(struct pvr_job *job) in job_count_remaining_native_deps() argument 371 xa_for_each(&job->base.dependencies, index, fence) { in job_count_remaining_native_deps() 398 pvr_queue_get_job_cccb_fence(struct pvr_queue *queue, struct pvr_job *job) in pvr_queue_get_job_cccb_fence() argument 406 if (!job->cccb_fence) in pvr_queue_get_job_cccb_fence() 412 native_deps_remaining = job_count_remaining_native_deps(job); in pvr_queue_get_job_cccb_fence() 413 if (pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) { in pvr_queue_get_job_cccb_fence() 414 pvr_queue_fence_put(job->cccb_fence); in pvr_queue_get_job_cccb_fence() 415 job->cccb_fence = NULL; in pvr_queue_get_job_cccb_fence() [all …]
|
H A D | pvr_job.h | 104 pvr_job_get(struct pvr_job *job) in pvr_job_get() argument 106 if (job) in pvr_job_get() 107 kref_get(&job->ref_count); in pvr_job_get() 109 return job; in pvr_job_get() 112 void pvr_job_put(struct pvr_job *job); 119 pvr_job_release_pm_ref(struct pvr_job *job) in pvr_job_release_pm_ref() argument 121 if (job->has_pm_ref) { in pvr_job_release_pm_ref() 122 pvr_power_put(job->pvr_dev); in pvr_job_release_pm_ref() 123 job->has_pm_ref = false; in pvr_job_release_pm_ref() 136 pvr_job_get_pm_ref(struct pvr_job *job) in pvr_job_get_pm_ref() argument [all …]
|
/linux/drivers/gpu/drm/panfrost/ |
H A D | panfrost_job.c | 106 int panfrost_job_get_slot(struct panfrost_job *job) in panfrost_job_get_slot() argument 112 if (job->requirements & PANFROST_JD_REQ_FS) in panfrost_job_get_slot() 117 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) { in panfrost_job_get_slot() 118 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) && in panfrost_job_get_slot() 119 (job->pfdev->features.nr_core_groups == 2)) in panfrost_job_get_slot() 121 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987)) in panfrost_job_get_slot() 146 panfrost_get_job_chain_flag(const struct panfrost_job *job) in panfrost_get_job_chain_flag() argument 148 struct panfrost_fence *f = to_panfrost_fence(job->done_fence); in panfrost_get_job_chain_flag() 150 if (!panfrost_has_hw_feature(job->pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) in panfrost_get_job_chain_flag() 159 struct panfrost_job *job = pfdev->jobs[slot][0]; in panfrost_dequeue_job() local [all …]
|
/linux/drivers/accel/amdxdna/ |
H A D | aie2_ctx.c | 33 struct amdxdna_sched_job *job; in aie2_job_release() local 35 job = container_of(ref, struct amdxdna_sched_job, refcnt); in aie2_job_release() 36 amdxdna_sched_job_cleanup(job); in aie2_job_release() 37 if (job->out_fence) in aie2_job_release() 38 dma_fence_put(job->out_fence); in aie2_job_release() 39 kfree(job); in aie2_job_release() 42 static void aie2_job_put(struct amdxdna_sched_job *job) in aie2_job_put() argument 44 kref_put(&job->refcnt, aie2_job_release); in aie2_job_put() 172 aie2_sched_notify(struct amdxdna_sched_job *job) in aie2_sched_notify() argument 174 struct dma_fence *fence = job->fence; in aie2_sched_notify() [all …]
|
H A D | amdxdna_ctx.c | 341 amdxdna_arg_bos_put(struct amdxdna_sched_job *job) in amdxdna_arg_bos_put() argument 345 for (i = 0; i < job->bo_cnt; i++) { in amdxdna_arg_bos_put() 346 if (!job->bos[i]) in amdxdna_arg_bos_put() 348 drm_gem_object_put(job->bos[i]); in amdxdna_arg_bos_put() 354 struct amdxdna_sched_job *job, in amdxdna_arg_bos_lookup() argument 360 job->bo_cnt = bo_cnt; in amdxdna_arg_bos_lookup() 361 for (i = 0; i < job->bo_cnt; i++) { in amdxdna_arg_bos_lookup() 374 job->bos[i] = gobj; in amdxdna_arg_bos_lookup() 387 job->bos[i] = gobj; in amdxdna_arg_bos_lookup() 393 amdxdna_arg_bos_put(job); in amdxdna_arg_bos_lookup() [all …]
|
/linux/drivers/gpu/drm/tegra/ |
H A D | submit.c | 327 static int submit_get_syncpt(struct tegra_drm_context *context, struct host1x_job *job, in submit_get_syncpt() argument 344 job->syncpt = host1x_syncpt_get(sp); in submit_get_syncpt() 345 job->syncpt_incrs = args->syncpt.increments; in submit_get_syncpt() 350 static int submit_job_add_gather(struct host1x_job *job, struct tegra_drm_context *context, in submit_job_add_gather() argument 385 host1x_job_add_gather(job, &bo->base, cmd->words, *offset * 4); in submit_job_add_gather() 399 struct host1x_job *job; in submit_create_job() local 412 job = host1x_job_alloc(context->channel, args->num_cmds, 0, true); in submit_create_job() 413 if (!job) { in submit_create_job() 415 job = ERR_PTR(-ENOMEM); in submit_create_job() 419 err = submit_get_syncpt(context, job, syncpoints, args); in submit_create_job() [all …]
|
/linux/drivers/scsi/lpfc/ |
H A D | lpfc_bsg.c | 305 struct bsg_job *job; in lpfc_bsg_send_mgmt_cmd_cmp() local 318 job = dd_data->set_job; in lpfc_bsg_send_mgmt_cmd_cmp() 319 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp() 320 bsg_reply = job->reply; in lpfc_bsg_send_mgmt_cmd_cmp() 322 job->dd_data = NULL; in lpfc_bsg_send_mgmt_cmd_cmp() 342 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp() 361 lpfc_bsg_copy_data(rmp, &job->reply_payload, in lpfc_bsg_send_mgmt_cmd_cmp() 376 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp() 378 bsg_job_done(job, bsg_reply->result, in lpfc_bsg_send_mgmt_cmd_cmp() 389 lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) in lpfc_bsg_send_mgmt_cmd() argument [all …]
|
/linux/drivers/gpu/drm/scheduler/ |
H A D | sched_main.c | 392 struct drm_sched_job *job; in drm_sched_run_free_queue() local 395 job = list_first_entry_or_null(&sched->pending_list, in drm_sched_run_free_queue() 397 if (job && dma_fence_is_signaled(&job->s_fence->finished)) in drm_sched_run_free_queue() 554 struct drm_sched_job *job; in drm_sched_job_timedout() local 561 job = list_first_entry_or_null(&sched->pending_list, in drm_sched_job_timedout() 564 if (job) { in drm_sched_job_timedout() 570 list_del_init(&job->list); in drm_sched_job_timedout() 573 status = job->sched->ops->timedout_job(job); in drm_sched_job_timedout() 580 job->sched->ops->free_job(job); in drm_sched_job_timedout() 797 int drm_sched_job_init(struct drm_sched_job *job, in drm_sched_job_init() argument [all …]
|
/linux/drivers/ufs/core/ |
H A D | ufs_bsg.c | 29 static int ufs_bsg_alloc_desc_buffer(struct ufs_hba *hba, struct bsg_job *job, in ufs_bsg_alloc_desc_buffer() argument 33 struct ufs_bsg_request *bsg_request = job->request; in ufs_bsg_alloc_desc_buffer() 47 if (*desc_len > job->request_payload.payload_len) { in ufs_bsg_alloc_desc_buffer() 57 sg_copy_to_buffer(job->request_payload.sg_list, in ufs_bsg_alloc_desc_buffer() 58 job->request_payload.sg_cnt, descp, in ufs_bsg_alloc_desc_buffer() 67 static int ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba *hba, struct bsg_job *job) in ufs_bsg_exec_advanced_rpmb_req() argument 69 struct ufs_rpmb_request *rpmb_request = job->request; in ufs_bsg_exec_advanced_rpmb_req() 70 struct ufs_rpmb_reply *rpmb_reply = job->reply; in ufs_bsg_exec_advanced_rpmb_req() 107 payload = &job->request_payload; in ufs_bsg_exec_advanced_rpmb_req() 132 static int ufs_bsg_request(struct bsg_job *job) in ufs_bsg_request() argument [all …]
|
/linux/drivers/gpu/drm/ci/ |
H A D | lava-submit.sh | 16 mkdir -p results/job-rootfs-overlay/ 18 cp artifacts/ci-common/capture-devcoredump.sh results/job-rootfs-overlay/ 19 cp artifacts/ci-common/init-*.sh results/job-rootfs-overlay/ 20 cp artifacts/ci-common/intel-gpu-freq.sh results/job-rootfs-overlay/ 21 cp "$SCRIPTS_DIR"/setup-test-env.sh results/job-rootfs-overlay/ 26 artifacts/ci-common/generate-env.sh | tee results/job-rootfs-overlay/set-job-env-vars.sh 29 tar zcf job-rootfs-overlay.tar.gz -C results/job-rootfs-overlay/ . 30 ci-fairy s3cp --token-file "${S3_JWT_FILE}" job-rootfs-overlay.tar.gz "https://${JOB_ROOTFS_OVERLAY… 42 --job-rootfs-overlay-url "${FDO_HTTP_CACHE_URI:-}https://${JOB_ROOTFS_OVERLAY_PATH}" \ 43 --job-timeout-min ${JOB_TIMEOUT:-80} \ [all …]
|