| /linux/drivers/gpu/drm/xe/ |
| H A D | xe_tlb_inval_job.c | 19 /** struct xe_tlb_inval_job - TLB invalidation job */ 21 /** @dep: base generic dependency Xe job */ 31 /** @refcount: ref count of this job */ 34 * @fence: dma fence to indicate completion. 1 way relationship - job 35 * can safely reference fence, fence cannot safely reference job. 50 struct xe_tlb_inval_job *job = in xe_tlb_inval_job_run() local 51 container_of(dep_job, typeof(*job), dep); in xe_tlb_inval_job_run() 53 container_of(job->fence, typeof(*ifence), base); in xe_tlb_inval_job_run() 56 if (xe_page_reclaim_list_valid(&job->prl)) { in xe_tlb_inval_job_run() 57 prl_sa = xe_page_reclaim_create_prl_bo(job->tlb_inval, &job->prl, ifence); in xe_tlb_inval_job_run() [all …]
|
| H A D | xe_ring_ops.c | 211 static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i) in emit_render_cache_flush() argument 213 struct xe_exec_queue *q = job->q; in emit_render_cache_flush() 234 else if (job->q->class == XE_ENGINE_CLASS_COMPUTE) in emit_render_cache_flush() 261 static u32 get_ppgtt_flag(struct xe_sched_job *job) in get_ppgtt_flag() argument 263 if (job->q->vm && !job->ggtt) in get_ppgtt_flag() 278 * Ensure CTX timestamp >= Job timestamp during VF sampling to avoid in emit_copy_timestamp() 319 static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc, in __emit_job_gen12_simple() argument 323 u32 ppgtt_flag = get_ppgtt_flag(job); in __emit_job_gen12_simple() 324 struct xe_gt *gt = job->q->gt; in __emit_job_gen12_simple() 328 if (job->ring_ops_force_reset) in __emit_job_gen12_simple() [all …]
|
| /linux/drivers/gpu/drm/nouveau/ |
| H A D | nouveau_sched.c | 26 nouveau_job_init(struct nouveau_job *job, in nouveau_job_init() argument 32 INIT_LIST_HEAD(&job->entry); in nouveau_job_init() 34 job->file_priv = args->file_priv; in nouveau_job_init() 35 job->cli = nouveau_cli(args->file_priv); in nouveau_job_init() 36 job->sched = sched; in nouveau_job_init() 38 job->sync = args->sync; in nouveau_job_init() 39 job->resv_usage = args->resv_usage; in nouveau_job_init() 41 job->ops = args->ops; in nouveau_job_init() 43 job->in_sync.count = args->in_sync.count; in nouveau_job_init() 44 if (job->in_sync.count) { in nouveau_job_init() [all …]
|
| H A D | nouveau_exec.c | 67 * DRM_NOUVEAU_EXEC ioctl is called to submit an exec job. 69 * A VM_BIND job can be executed either synchronously or asynchronously. If 70 * executed asynchronously, userspace may provide a list of syncobjs this job 72 * VM_BIND job finished execution. If executed synchronously the ioctl will 73 * block until the bind job is finished. For synchronous jobs the kernel will 82 * Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have 89 nouveau_exec_job_submit(struct nouveau_job *job, in nouveau_exec_job_submit() argument 92 struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job); in nouveau_exec_job_submit() 93 struct nouveau_cli *cli = job->cli; in nouveau_exec_job_submit() 120 nouveau_exec_job_armed_submit(struct nouveau_job *job, in nouveau_exec_job_armed_submit() argument [all …]
|
| /linux/drivers/gpu/host1x/ |
| H A D | job.c | 3 * Tegra host1x Job 21 #include "job.h" 30 struct host1x_job *job = NULL; in host1x_job_alloc() local 51 mem = job = kzalloc(total, GFP_KERNEL); in host1x_job_alloc() 52 if (!job) in host1x_job_alloc() 55 job->enable_firewall = enable_firewall; in host1x_job_alloc() 57 kref_init(&job->ref); in host1x_job_alloc() 58 job->channel = ch; in host1x_job_alloc() 62 job->relocs = num_relocs ? mem : NULL; in host1x_job_alloc() 64 job->unpins = num_unpins ? mem : NULL; in host1x_job_alloc() [all …]
|
| /linux/drivers/gpu/drm/imagination/ |
| H A D | pvr_job.c | 25 struct pvr_job *job = container_of(kref, struct pvr_job, ref_count); in pvr_job_release() local 27 xa_erase(&job->pvr_dev->job_ids, job->id); in pvr_job_release() 29 pvr_hwrt_data_put(job->hwrt); in pvr_job_release() 30 pvr_context_put(job->ctx); in pvr_job_release() 32 WARN_ON(job->paired_job); in pvr_job_release() 34 pvr_queue_job_cleanup(job); in pvr_job_release() 35 pvr_job_release_pm_ref(job); in pvr_job_release() 37 kfree(job->cmd); in pvr_job_release() 38 kfree(job); in pvr_job_release() 42 * pvr_job_put() - Release reference on job [all …]
|
| H A D | pvr_queue.c | 238 * Call this function to allocate job CCCB and done fences. This only 303 * pvr_queue_job_fence_init() - Initializes a job done fence object. 308 * a job. 350 static u32 job_cmds_size(struct pvr_job *job, u32 ufo_wait_count) in job_cmds_size() argument 353 * and a command for the job itself. in job_cmds_size() 356 pvr_cccb_get_size_of_cmd_with_hdr(job->cmd_len); in job_cmds_size() 361 * @job: Job to operate on. 365 static unsigned long job_count_remaining_native_deps(struct pvr_job *job) in job_count_remaining_native_deps() argument 371 xa_for_each(&job->base.dependencies, index, fence) { in job_count_remaining_native_deps() 386 * pvr_queue_get_job_cccb_fence() - Get the CCCB fence attached to a job. [all …]
|
| H A D | pvr_job.h | 34 /** @ref_count: Refcount for job. */ 37 /** @type: Type of job. */ 40 /** @id: Job ID number. */ 44 * @paired_job: Job paired to this job. 50 * fragment job to execute when the Parameter Manager runs out of memory. 52 * The geometry job should point to the fragment job it's paired with, 53 * and the fragment job should point to the geometry job it's paired with. 63 /** @done_fence: Fence to signal when the job is done. */ 87 * @has_pm_ref: True if the job has a power ref, thus forcing the GPU to stay on until 88 * the job is done. [all …]
|
| /linux/drivers/md/ |
| H A D | dm-kcopyd.c | 41 MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients"); 353 * Error state of the job. 373 * Set this to ensure you are notified when the job has 380 * These fields are only used if the job has been split 414 * Functions to push and pop a job onto the head of a given job 420 struct kcopyd_job *job; in pop_io_job() local 426 list_for_each_entry(job, jobs, list) { in pop_io_job() 427 if (job->op == REQ_OP_READ || in pop_io_job() 428 !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) { in pop_io_job() 429 list_del(&job->list); in pop_io_job() [all …]
|
| /linux/drivers/gpu/drm/v3d/ |
| H A D | v3d_submit.c | 15 * we can attach fences and update the reservations after pushing the job 23 v3d_lock_bo_reservations(struct v3d_job *job, in v3d_lock_bo_reservations() argument 28 ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx); in v3d_lock_bo_reservations() 32 for (i = 0; i < job->bo_count; i++) { in v3d_lock_bo_reservations() 33 ret = dma_resv_reserve_fences(job->bo[i]->resv, 1); in v3d_lock_bo_reservations() 37 ret = drm_sched_job_add_implicit_dependencies(&job->base, in v3d_lock_bo_reservations() 38 job->bo[i], true); in v3d_lock_bo_reservations() 46 drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx); in v3d_lock_bo_reservations() 51 * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects 52 * referenced by the job. [all …]
|
| /linux/drivers/accel/rocket/ |
| H A D | rocket_job.c | 61 struct drm_rocket_job *job, in rocket_copy_tasks() argument 66 if (job->task_struct_size < sizeof(struct drm_rocket_task)) in rocket_copy_tasks() 69 rjob->task_count = job->task_count; in rocket_copy_tasks() 74 rjob->tasks = kvmalloc_objs(*rjob->tasks, job->task_count); in rocket_copy_tasks() 84 u64_to_user_ptr(job->tasks) + i * job->task_struct_size, in rocket_copy_tasks() 108 static void rocket_job_hw_submit(struct rocket_core *core, struct rocket_job *job) in rocket_job_hw_submit() argument 113 /* Don't queue the job if a reset is in progress */ in rocket_job_hw_submit() 119 task = &job->tasks[job->next_task_idx]; in rocket_job_hw_submit() 120 job->next_task_idx++; in rocket_job_hw_submit() 157 struct drm_sched_job *job, in rocket_acquire_object_fences() argument [all …]
|
| /linux/drivers/gpu/drm/panfrost/ |
| H A D | panfrost_job.c | 111 int panfrost_job_get_slot(struct panfrost_job *job) in panfrost_job_get_slot() argument 117 if (job->requirements & PANFROST_JD_REQ_FS) in panfrost_job_get_slot() 122 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) { in panfrost_job_get_slot() 123 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) && in panfrost_job_get_slot() 124 (job->pfdev->features.nr_core_groups == 2)) in panfrost_job_get_slot() 126 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987)) in panfrost_job_get_slot() 151 panfrost_get_job_chain_flag(const struct panfrost_job *job) in panfrost_get_job_chain_flag() argument 153 struct panfrost_fence *f = to_panfrost_fence(job->done_fence); in panfrost_get_job_chain_flag() 155 if (!panfrost_has_hw_feature(job->pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) in panfrost_get_job_chain_flag() 164 struct panfrost_job *job = pfdev->jobs[slot][0]; in panfrost_dequeue_job() local [all …]
|
| /linux/drivers/accel/ethosu/ |
| H A D | ethosu_job.c | 45 static void ethosu_job_hw_submit(struct ethosu_device *dev, struct ethosu_job *job) in ethosu_job_hw_submit() argument 47 struct drm_gem_dma_object *cmd_bo = to_drm_gem_dma_obj(job->cmd_bo); in ethosu_job_hw_submit() 48 struct ethosu_validated_cmdstream_info *cmd_info = to_ethosu_bo(job->cmd_bo)->info; in ethosu_job_hw_submit() 50 for (int i = 0; i < job->region_cnt; i++) { in ethosu_job_hw_submit() 52 int region = job->region_bo_num[i]; in ethosu_job_hw_submit() 54 bo = to_drm_gem_dma_obj(job->region_bo[i]); in ethosu_job_hw_submit() 60 if (job->sram_size) { in ethosu_job_hw_submit() 79 static int ethosu_acquire_object_fences(struct ethosu_job *job) in ethosu_acquire_object_fences() argument 82 struct drm_gem_object **bos = job->region_bo; in ethosu_acquire_object_fences() 83 struct ethosu_validated_cmdstream_info *info = to_ethosu_bo(job->cmd_bo)->info; in ethosu_acquire_object_fences() [all …]
|
| /linux/drivers/gpu/drm/scheduler/ |
| H A D | sched_main.c | 32 * backend operations to the scheduler like submitting a job to hardware run queue, 33 * returning the dependencies of a job etc. 46 * Note that once a job was taken from the entities queue and pushed to the 61 * Once a job is executed (but not yet finished), the job's credits contribute 62 * to the scheduler's credit count until the job is finished. If by executing 63 * one more job the scheduler's credit count would exceed the scheduler's 64 * credit limit, the job won't be executed. Instead, the scheduler will wait 112 * Return true if we can push at least one more job from @entity, false 124 /* If a job exceeds the credit limit, truncate it to the credit limit in drm_sched_can_queue() 244 * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run [all …]
|
| H A D | sched_entity.c | 44 * @guilty: atomic_t set to 1 when a job on this queue 156 * drm_sched_entity_error - return error of last scheduled job 159 * Opportunistically return the error of the last scheduled job. Result can 181 struct drm_sched_job *job = container_of(wrk, typeof(*job), work); in drm_sched_entity_kill_jobs_work() local 186 xa_for_each(&job->dependencies, index, f) { in drm_sched_entity_kill_jobs_work() 204 xa_erase(&job->dependencies, index); in drm_sched_entity_kill_jobs_work() 205 if (f && !dma_fence_add_callback(f, &job->finish_cb, in drm_sched_entity_kill_jobs_work() 212 drm_sched_fence_scheduled(job->s_fence, NULL); in drm_sched_entity_kill_jobs_work() 213 drm_sched_fence_finished(job->s_fence, -ESRCH); in drm_sched_entity_kill_jobs_work() 214 WARN_ON(job->s_fence->parent); in drm_sched_entity_kill_jobs_work() [all …]
|
| /linux/drivers/accel/amdxdna/ |
| H A D | aie2_ctx.c | 44 struct amdxdna_sched_job *job; in aie2_job_release() local 46 job = container_of(ref, struct amdxdna_sched_job, refcnt); in aie2_job_release() 47 amdxdna_sched_job_cleanup(job); in aie2_job_release() 48 atomic64_inc(&job->hwctx->job_free_cnt); in aie2_job_release() 49 wake_up(&job->hwctx->priv->job_free_wq); in aie2_job_release() 50 if (job->out_fence) in aie2_job_release() 51 dma_fence_put(job->out_fence); in aie2_job_release() 52 kfree(job->aie2_job_health); in aie2_job_release() 53 kfree(job); in aie2_job_release() 56 static void aie2_job_put(struct amdxdna_sched_job *job) in aie2_job_put() argument [all …]
|
| H A D | amdxdna_ctx.c | 146 struct amdxdna_sched_job *job, u32 cmd_idx, in amdxdna_cmd_set_error() argument 150 struct amdxdna_client *client = job->hwctx->client; in amdxdna_cmd_set_error() 405 amdxdna_arg_bos_put(struct amdxdna_sched_job *job) in amdxdna_arg_bos_put() argument 409 for (i = 0; i < job->bo_cnt; i++) { in amdxdna_arg_bos_put() 410 if (!job->bos[i]) in amdxdna_arg_bos_put() 412 drm_gem_object_put(job->bos[i]); in amdxdna_arg_bos_put() 418 struct amdxdna_sched_job *job, in amdxdna_arg_bos_lookup() argument 424 job->bo_cnt = bo_cnt; in amdxdna_arg_bos_lookup() 425 for (i = 0; i < job->bo_cnt; i++) { in amdxdna_arg_bos_lookup() 438 job->bos[i] = gobj; in amdxdna_arg_bos_lookup() [all …]
|
| /linux/drivers/scsi/lpfc/ |
| H A D | lpfc_bsg.c | 71 /* driver data associated with the job */ 96 struct bsg_job *set_job; /* job waiting for this iocb to finish */ 305 struct bsg_job *job; in lpfc_bsg_send_mgmt_cmd_cmp() local 316 /* Determine if job has been aborted */ in lpfc_bsg_send_mgmt_cmd_cmp() 318 job = dd_data->set_job; in lpfc_bsg_send_mgmt_cmd_cmp() 319 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp() 320 bsg_reply = job->reply; in lpfc_bsg_send_mgmt_cmd_cmp() 321 /* Prevent timeout handling from trying to abort job */ in lpfc_bsg_send_mgmt_cmd_cmp() 322 job->dd_data = NULL; in lpfc_bsg_send_mgmt_cmd_cmp() 342 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp() [all …]
|
| /linux/drivers/gpu/drm/tegra/ |
| H A D | submit.c | 28 "%s: job submission failed: " fmt "\n", \ 327 static int submit_get_syncpt(struct tegra_drm_context *context, struct host1x_job *job, in submit_get_syncpt() argument 337 /* Syncpt ref will be dropped on job release */ in submit_get_syncpt() 344 job->syncpt = host1x_syncpt_get(sp); in submit_get_syncpt() 345 job->syncpt_incrs = args->syncpt.increments; in submit_get_syncpt() 350 static int submit_job_add_gather(struct host1x_job *job, struct tegra_drm_context *context, in submit_job_add_gather() argument 370 SUBMIT_ERR(context, "too many total words in job"); in submit_job_add_gather() 381 SUBMIT_ERR(context, "job was rejected by firewall"); in submit_job_add_gather() 385 host1x_job_add_gather(job, &bo->base, cmd->words, *offset * 4); in submit_job_add_gather() 399 struct host1x_job *job; in submit_create_job() local [all …]
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| H A D | amdgpu_ids.c | 180 struct amdgpu_job *job) in amdgpu_vmid_gds_switch_needed() argument 182 return id->gds_base != job->gds_base || in amdgpu_vmid_gds_switch_needed() 183 id->gds_size != job->gds_size || in amdgpu_vmid_gds_switch_needed() 184 id->gws_base != job->gws_base || in amdgpu_vmid_gds_switch_needed() 185 id->gws_size != job->gws_size || in amdgpu_vmid_gds_switch_needed() 186 id->oa_base != job->oa_base || in amdgpu_vmid_gds_switch_needed() 187 id->oa_size != job->oa_size; in amdgpu_vmid_gds_switch_needed() 190 /* Check if the id is compatible with the job */ 192 struct amdgpu_job *job) in amdgpu_vmid_compatible() argument 194 return id->pd_gpu_addr == job->vm_pd_addr && in amdgpu_vmid_compatible() [all …]
|
| /linux/Documentation/devicetree/bindings/powerpc/fsl/ |
| H A D | raideng.txt | 30 There must be a sub-node for each job queue present in RAID Engine 33 - compatible: Should contain "fsl,raideng-v1.0-job-queue" as the value 34 This identifies the job queue interface 35 - reg: offset and length of the register set for job queue 42 compatible = "fsl,raideng-v1.0-job-queue"; 48 There must be a sub-node for each job ring present in RAID Engine 49 This node must be a sub-node of job queue node 51 - compatible: Must contain "fsl,raideng-v1.0-job-ring" as the value 52 This identifies job ring. Should contain either 55 - reg: offset and length of the register set for job ring [all …]
|
| /linux/drivers/crypto/caam/ |
| H A D | Kconfig | 20 This module creates job ring devices, and configures h/w 36 tristate "Freescale CAAM Job Ring driver backend" 40 Enables the driver module for Job Rings which are part of 42 and Assurance Module (CAAM). This module adds a job ring operation 51 int "Job Ring size" 55 Select size of Job Rings as a power of 2, within the 68 bool "Job Ring interrupt coalescing" 70 Enable the Job Ring's interrupt coalescing feature. 76 int "Job Ring interrupt coalescing count threshold" 84 equal or greater than the job ring size will force timeouts. [all …]
|
| /linux/Documentation/core-api/ |
| H A D | padata.rst | 14 is currently the sole consumer of padata's serialized job support. 16 Padata also supports multithreaded jobs, splitting up the job evenly while load 38 A padata_shell is used to submit a job to padata and allows a series of such 80 Running A Job 84 padata_priv structure, which represents one job:: 99 The submission of the job is done with:: 105 points to the preferred CPU to be used for the final callback when the job is 108 padata_do_parallel() is zero on success, indicating that the job is in 114 Each job submitted to padata_do_parallel() will, in turn, be passed to 123 parallel() will take responsibility for the job from this point. The job [all …]
|
| /linux/drivers/ufs/core/ |
| H A D | ufs_bsg.c | 29 static int ufs_bsg_alloc_desc_buffer(struct ufs_hba *hba, struct bsg_job *job, in ufs_bsg_alloc_desc_buffer() argument 33 struct ufs_bsg_request *bsg_request = job->request; in ufs_bsg_alloc_desc_buffer() 47 if (*desc_len > job->request_payload.payload_len) { in ufs_bsg_alloc_desc_buffer() 57 sg_copy_to_buffer(job->request_payload.sg_list, in ufs_bsg_alloc_desc_buffer() 58 job->request_payload.sg_cnt, descp, in ufs_bsg_alloc_desc_buffer() 67 static int ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba *hba, struct bsg_job *job) in ufs_bsg_exec_advanced_rpmb_req() argument 69 struct ufs_rpmb_request *rpmb_request = job->request; in ufs_bsg_exec_advanced_rpmb_req() 70 struct ufs_rpmb_reply *rpmb_reply = job->reply; in ufs_bsg_exec_advanced_rpmb_req() 107 payload = &job->request_payload; in ufs_bsg_exec_advanced_rpmb_req() 132 static int ufs_bsg_request(struct bsg_job *job) in ufs_bsg_request() argument [all …]
|
| /linux/drivers/accel/habanalabs/common/ |
| H A D | hw_queue.c | 229 * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion 272 * ext_queue_schedule_job - submit a JOB to an external queue 274 * @job: pointer to the job that needs to be submitted to the queue 279 static void ext_queue_schedule_job(struct hl_cs_job *job) in ext_queue_schedule_job() argument 281 struct hl_device *hdev = job->cs->ctx->hdev; in ext_queue_schedule_job() 282 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; in ext_queue_schedule_job() 292 * Update the JOB ID inside the BD CTL so the device would know what in ext_queue_schedule_job() 297 cb = job->patched_cb; in ext_queue_schedule_job() 298 len = job->job_cb_size; in ext_queue_schedule_job() 302 if (!cs_needs_completion(job->cs)) in ext_queue_schedule_job() [all …]
|