/linux/drivers/gpu/drm/nouveau/ |
H A D | nouveau_sched.c | 26 nouveau_job_init(struct nouveau_job *job, in nouveau_job_init() argument 32 INIT_LIST_HEAD(&job->entry); in nouveau_job_init() 34 job->file_priv = args->file_priv; in nouveau_job_init() 35 job->cli = nouveau_cli(args->file_priv); in nouveau_job_init() 36 job->sched = sched; in nouveau_job_init() 38 job->sync = args->sync; in nouveau_job_init() 39 job->resv_usage = args->resv_usage; in nouveau_job_init() 41 job->ops = args->ops; in nouveau_job_init() 43 job->in_sync.count = args->in_sync.count; in nouveau_job_init() 44 if (job->in_sync.count) { in nouveau_job_init() [all …]
|
H A D | nouveau_exec.c | 67 * DRM_NOUVEAU_EXEC ioctl is called to submit an exec job. 69 * A VM_BIND job can be executed either synchronously or asynchronously. If 70 * executed asynchronously, userspace may provide a list of syncobjs this job 72 * VM_BIND job finished execution. If executed synchronously the ioctl will 73 * block until the bind job is finished. For synchronous jobs the kernel will 82 * Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have 89 nouveau_exec_job_submit(struct nouveau_job *job, in nouveau_exec_job_submit() argument 92 struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job); in nouveau_exec_job_submit() 93 struct nouveau_cli *cli = job->cli; in nouveau_exec_job_submit() 120 nouveau_exec_job_armed_submit(struct nouveau_job *job, in nouveau_exec_job_armed_submit() argument [all …]
|
/linux/drivers/gpu/host1x/ |
H A D | job.c | 3 * Tegra host1x Job 21 #include "job.h" 30 struct host1x_job *job = NULL; in host1x_job_alloc() local 51 mem = job = kzalloc(total, GFP_KERNEL); in host1x_job_alloc() 52 if (!job) in host1x_job_alloc() 55 job->enable_firewall = enable_firewall; in host1x_job_alloc() 57 kref_init(&job->ref); in host1x_job_alloc() 58 job->channel = ch; in host1x_job_alloc() 62 job->relocs = num_relocs ? mem : NULL; in host1x_job_alloc() 64 job->unpins = num_unpins ? mem : NULL; in host1x_job_alloc() [all …]
|
/linux/drivers/gpu/drm/imagination/ |
H A D | pvr_queue.c | 238 * Call this function to allocate job CCCB and done fences. This only 303 * pvr_queue_job_fence_init() - Initializes a job done fence object. 308 * a job. 350 static u32 job_cmds_size(struct pvr_job *job, u32 ufo_wait_count) in job_cmds_size() argument 353 * and a command for the job itself. in job_cmds_size() 356 pvr_cccb_get_size_of_cmd_with_hdr(job->cmd_len); in job_cmds_size() 361 * @job: Job to operate on. 365 static unsigned long job_count_remaining_native_deps(struct pvr_job *job) in job_count_remaining_native_deps() argument 371 xa_for_each(&job->base.dependencies, index, fence) { in job_count_remaining_native_deps() 386 * pvr_queue_get_job_cccb_fence() - Get the CCCB fence attached to a job. [all …]
|
H A D | pvr_job.h | 34 /** @ref_count: Refcount for job. */ 37 /** @type: Type of job. */ 40 /** @id: Job ID number. */ 44 * @paired_job: Job paired to this job. 50 * fragment job to execute when the Parameter Manager runs out of memory. 52 * The geometry job should point to the fragment job it's paired with, 53 * and the fragment job should point to the geometry job it's paired with. 63 /** @done_fence: Fence to signal when the job is done. */ 87 * @has_pm_ref: True if the job has a power ref, thus forcing the GPU to stay on until 88 * the job is done. [all …]
|
/linux/drivers/md/ |
H A D | dm-kcopyd.c | 41 MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients"); 353 * Error state of the job. 373 * Set this to ensure you are notified when the job has 380 * These fields are only used if the job has been split 414 * Functions to push and pop a job onto the head of a given job 420 struct kcopyd_job *job; in pop_io_job() local 426 list_for_each_entry(job, jobs, list) { in pop_io_job() 427 if (job->op == REQ_OP_READ || in pop_io_job() 428 !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) { in pop_io_job() 429 list_del(&job->list); in pop_io_job() [all …]
|
/linux/block/ |
H A D | bsg-lib.c | 31 struct bsg_job *job; in bsg_transport_sg_io_fn() local 49 job = blk_mq_rq_to_pdu(rq); in bsg_transport_sg_io_fn() 50 reply = job->reply; in bsg_transport_sg_io_fn() 51 memset(job, 0, sizeof(*job)); in bsg_transport_sg_io_fn() 52 job->reply = reply; in bsg_transport_sg_io_fn() 53 job->reply_len = SCSI_SENSE_BUFFERSIZE; in bsg_transport_sg_io_fn() 54 job->dd_data = job + 1; in bsg_transport_sg_io_fn() 56 job->request_len = hdr->request_len; in bsg_transport_sg_io_fn() 57 job->request = memdup_user(uptr64(hdr->request), hdr->request_len); in bsg_transport_sg_io_fn() 58 if (IS_ERR(job->request)) { in bsg_transport_sg_io_fn() [all …]
|
/linux/drivers/gpu/host1x/hw/ |
H A D | channel_hw.c | 17 #include "../job.h" 50 static void submit_wait(struct host1x_job *job, u32 id, u32 threshold, in submit_wait() argument 53 struct host1x_cdma *cdma = &job->channel->cdma; in submit_wait() 63 if (job->memory_context) in submit_wait() 64 stream_id = job->memory_context->stream_id; in submit_wait() 66 stream_id = job->engine_fallback_streamid; in submit_wait() 79 host1x_cdma_push_wide(&job->channel->cdma, in submit_wait() 80 host1x_opcode_setclass(job->class, 0, 0), in submit_wait() 82 host1x_opcode_setstreamid(job->engine_streamid_offset / 4), in submit_wait() 113 static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base) in submit_gathers() argument [all …]
|
/linux/drivers/gpu/drm/xe/ |
H A D | xe_sched_job.h | 26 * xe_sched_job_get - get reference to XE schedule job 27 * @job: XE schedule job object 29 * Increment XE schedule job's reference count 31 static inline struct xe_sched_job *xe_sched_job_get(struct xe_sched_job *job) in xe_sched_job_get() argument 33 kref_get(&job->refcount); in xe_sched_job_get() 34 return job; in xe_sched_job_get() 38 * xe_sched_job_put - put reference to XE schedule job 39 * @job: XE schedule job object 41 * Decrement XE schedule job's reference count, call xe_sched_job_destroy when 44 static inline void xe_sched_job_put(struct xe_sched_job *job) in xe_sched_job_put() argument [all …]
|
H A D | xe_exec.c | 65 * submission time and set the DRM scheduler max job limit SIZE_OF_RING / 83 * Create job | 85 * Add rebind fence dependency to job | 86 * Add job VM dma-resv bookkeeping slot (non-compute mode) | 87 * Add job to external BOs dma-resv write slots (non-compute mode) | 89 * Install in / out fences for job 90 * Submit job 103 /* The fence slot added here is intended for the exec sched job. */ in xe_exec_fn() 124 struct xe_sched_job *job; in xe_exec_ioctl() local 280 job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ? in xe_exec_ioctl() [all …]
|
/linux/drivers/gpu/drm/panfrost/ |
H A D | panfrost_job.c | 106 int panfrost_job_get_slot(struct panfrost_job *job) in panfrost_job_get_slot() argument 112 if (job->requirements & PANFROST_JD_REQ_FS) in panfrost_job_get_slot() 117 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) { in panfrost_job_get_slot() 118 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) && in panfrost_job_get_slot() 119 (job->pfdev->features.nr_core_groups == 2)) in panfrost_job_get_slot() 121 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987)) in panfrost_job_get_slot() 146 panfrost_get_job_chain_flag(const struct panfrost_job *job) in panfrost_get_job_chain_flag() argument 148 struct panfrost_fence *f = to_panfrost_fence(job->done_fence); in panfrost_get_job_chain_flag() 150 if (!panfrost_has_hw_feature(job->pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) in panfrost_get_job_chain_flag() 159 struct panfrost_job *job = pfdev->jobs[slot][0]; in panfrost_dequeue_job() local [all …]
|
/linux/drivers/accel/ivpu/ |
H A D | ivpu_job.c | 174 ivpu_dbg(vdev, JOB, "Command queue %d created, ctx %d, flags 0x%08x\n", in ivpu_cmdq_create() 223 ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n", in ivpu_register_db() 276 ivpu_dbg(vdev, JOB, "DB %d unregistered\n", cmdq->db_id); in ivpu_cmdq_unregister() 281 ivpu_dbg(vdev, JOB, "Command queue %d destroyed, ctx %d\n", in ivpu_cmdq_unregister() 357 * and FW loses job queue state. The next time job queue is used it 395 ivpu_dbg(vdev, JOB, "Context ID: %u abort\n", file_priv->ctx.id); in ivpu_context_abort_locked() 408 static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job) in ivpu_cmdq_push_job() argument 410 struct ivpu_device *vdev = job->vdev; in ivpu_cmdq_push_job() 416 /* Check if there is space left in job queue */ in ivpu_cmdq_push_job() 418 ivpu_dbg(vdev, JOB, "Job queue full: ctx %d cmdq %d db %d head %d tail %d\n", in ivpu_cmdq_push_job() [all …]
|
/linux/drivers/scsi/lpfc/ |
H A D | lpfc_bsg.c | 71 /* driver data associated with the job */ 96 struct bsg_job *set_job; /* job waiting for this iocb to finish */ 305 struct bsg_job *job; in lpfc_bsg_send_mgmt_cmd_cmp() local 316 /* Determine if job has been aborted */ in lpfc_bsg_send_mgmt_cmd_cmp() 318 job = dd_data->set_job; in lpfc_bsg_send_mgmt_cmd_cmp() 319 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp() 320 bsg_reply = job->reply; in lpfc_bsg_send_mgmt_cmd_cmp() 321 /* Prevent timeout handling from trying to abort job */ in lpfc_bsg_send_mgmt_cmd_cmp() 322 job->dd_data = NULL; in lpfc_bsg_send_mgmt_cmd_cmp() 342 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp() [all …]
|
/linux/drivers/gpu/drm/msm/ |
H A D | msm_gem_vma.c | 113 /** @fence: The fence that is signaled when job completes */ 115 /** @queue: The queue that the job runs on */ 153 static inline struct msm_vm_bind_job *to_msm_vm_bind_job(struct drm_sched_job *job) in to_msm_vm_bind_job() argument 155 return container_of(job, struct msm_vm_bind_job, base); in to_msm_vm_bind_job() 460 struct msm_vm_bind_job *job; member 470 list_add_tail(&op->node, &arg->job->vm_ops); in vm_op_enqueue() 479 return msm_gem_vma_new(arg->job->vm, op->gem.obj, op->gem.offset, in vma_from_op() 487 struct msm_vm_bind_job *job = arg->job; in msm_gem_vm_sm_step_map() local 521 .queue_id = job->queue->id, in msm_gem_vm_sm_step_map() 534 struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job; in msm_gem_vm_sm_step_remap() local [all …]
|
/linux/drivers/gpu/drm/tegra/ |
H A D | submit.c | 28 "%s: job submission failed: " fmt "\n", \ 327 static int submit_get_syncpt(struct tegra_drm_context *context, struct host1x_job *job, in submit_get_syncpt() argument 337 /* Syncpt ref will be dropped on job release */ in submit_get_syncpt() 344 job->syncpt = host1x_syncpt_get(sp); in submit_get_syncpt() 345 job->syncpt_incrs = args->syncpt.increments; in submit_get_syncpt() 350 static int submit_job_add_gather(struct host1x_job *job, struct tegra_drm_context *context, in submit_job_add_gather() argument 370 SUBMIT_ERR(context, "too many total words in job"); in submit_job_add_gather() 381 SUBMIT_ERR(context, "job was rejected by firewall"); in submit_job_add_gather() 385 host1x_job_add_gather(job, &bo->base, cmd->words, *offset * 4); in submit_job_add_gather() 399 struct host1x_job *job; in submit_create_job() local [all …]
|
/linux/include/drm/ |
H A D | gpu_scheduler.h | 75 * struct drm_sched_entity - A wrapper around a job queue (typically 173 * The dependency fence of the job which is on the top of the job queue. 194 * Points to the finished fence of the last scheduled job. Only written 201 * @last_user: last group leader pushing a job into the entity. 225 * Marks earliest job waiting in SW queue 262 * struct drm_sched_fence - fences corresponding to the scheduling of a job. 267 * when the job is scheduled. 273 * when the job is completed. 275 * When setting up an out fence for the job, you should use 291 * when scheduling the job on hardware. We signal the [all …]
|
/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_ib.c | 108 * @job: job to schedule 125 struct amdgpu_ib *ibs, struct amdgpu_job *job, in amdgpu_ib_schedule() argument 139 int vmid = AMDGPU_JOB_GET_VMID(job); in amdgpu_ib_schedule() 148 /* ring tests don't use a job */ in amdgpu_ib_schedule() 149 if (job) { in amdgpu_ib_schedule() 150 vm = job->vm; in amdgpu_ib_schedule() 151 fence_ctx = job->base.s_fence ? in amdgpu_ib_schedule() 152 job->base.s_fence->scheduled.context : 0; in amdgpu_ib_schedule() 153 shadow_va = job->shadow_va; in amdgpu_ib_schedule() 154 csa_va = job->csa_va; in amdgpu_ib_schedule() [all …]
|
/linux/Documentation/devicetree/bindings/powerpc/fsl/ |
H A D | raideng.txt | 30 There must be a sub-node for each job queue present in RAID Engine 33 - compatible: Should contain "fsl,raideng-v1.0-job-queue" as the value 34 This identifies the job queue interface 35 - reg: offset and length of the register set for job queue 42 compatible = "fsl,raideng-v1.0-job-queue"; 48 There must be a sub-node for each job ring present in RAID Engine 49 This node must be a sub-node of job queue node 51 - compatible: Must contain "fsl,raideng-v1.0-job-ring" as the value 52 This identifies job ring. Should contain either 55 - reg: offset and length of the register set for job ring [all …]
|
/linux/drivers/crypto/caam/ |
H A D | Kconfig | 20 This module creates job ring devices, and configures h/w 36 tristate "Freescale CAAM Job Ring driver backend" 40 Enables the driver module for Job Rings which are part of 42 and Assurance Module (CAAM). This module adds a job ring operation 51 int "Job Ring size" 55 Select size of Job Rings as a power of 2, within the 68 bool "Job Ring interrupt coalescing" 70 Enable the Job Ring's interrupt coalescing feature. 76 int "Job Ring interrupt coalescing count threshold" 84 equal or greater than the job ring size will force timeouts. [all …]
|
/linux/Documentation/core-api/ |
H A D | padata.rst | 14 is currently the sole consumer of padata's serialized job support. 16 Padata also supports multithreaded jobs, splitting up the job evenly while load 38 A padata_shell is used to submit a job to padata and allows a series of such 80 Running A Job 84 padata_priv structure, which represents one job:: 99 The submission of the job is done with:: 105 points to the preferred CPU to be used for the final callback when the job is 108 padata_do_parallel() is zero on success, indicating that the job is in 114 Each job submitted to padata_do_parallel() will, in turn, be passed to 123 parallel() will take responsibility for the job from this point. The job [all …]
|
/linux/drivers/gpu/drm/ |
H A D | drm_writeback.c | 438 int drm_writeback_prepare_job(struct drm_writeback_job *job) in drm_writeback_prepare_job() argument 440 struct drm_writeback_connector *connector = job->connector; in drm_writeback_prepare_job() 446 ret = funcs->prepare_writeback_job(connector, job); in drm_writeback_prepare_job() 451 job->prepared = true; in drm_writeback_prepare_job() 457 * drm_writeback_queue_job - Queue a writeback job for later signalling 458 * @wb_connector: The writeback connector to queue a job on 459 * @conn_state: The connector state containing the job to queue 461 * This function adds the job contained in @conn_state to the job_queue for a 462 * writeback connector. It takes ownership of the writeback job and sets the 463 * @conn_state->writeback_job to NULL, and so no access to the job may be [all …]
|
/linux/drivers/gpu/drm/scheduler/ |
H A D | sched_entity.c | 44 * @guilty: atomic_t set to 1 when a job on this queue 155 * drm_sched_entity_error - return error of last scheduled job 158 * Opportunistically return the error of the last scheduled job. Result can 177 struct drm_sched_job *job = container_of(wrk, typeof(*job), work); in drm_sched_entity_kill_jobs_work() local 179 drm_sched_fence_scheduled(job->s_fence, NULL); in drm_sched_entity_kill_jobs_work() 180 drm_sched_fence_finished(job->s_fence, -ESRCH); in drm_sched_entity_kill_jobs_work() 181 WARN_ON(job->s_fence->parent); in drm_sched_entity_kill_jobs_work() 182 job->sched->ops->free_job(job); in drm_sched_entity_kill_jobs_work() 189 struct drm_sched_job *job = container_of(cb, struct drm_sched_job, in drm_sched_entity_kill_jobs_cb() local 196 xa_for_each(&job->dependencies, index, f) { in drm_sched_entity_kill_jobs_cb() [all …]
|
/linux/drivers/ufs/core/ |
H A D | ufs_bsg.c | 29 static int ufs_bsg_alloc_desc_buffer(struct ufs_hba *hba, struct bsg_job *job, in ufs_bsg_alloc_desc_buffer() argument 33 struct ufs_bsg_request *bsg_request = job->request; in ufs_bsg_alloc_desc_buffer() 47 if (*desc_len > job->request_payload.payload_len) { in ufs_bsg_alloc_desc_buffer() 57 sg_copy_to_buffer(job->request_payload.sg_list, in ufs_bsg_alloc_desc_buffer() 58 job->request_payload.sg_cnt, descp, in ufs_bsg_alloc_desc_buffer() 67 static int ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba *hba, struct bsg_job *job) in ufs_bsg_exec_advanced_rpmb_req() argument 69 struct ufs_rpmb_request *rpmb_request = job->request; in ufs_bsg_exec_advanced_rpmb_req() 70 struct ufs_rpmb_reply *rpmb_reply = job->reply; in ufs_bsg_exec_advanced_rpmb_req() 107 payload = &job->request_payload; in ufs_bsg_exec_advanced_rpmb_req() 132 static int ufs_bsg_request(struct bsg_job *job) in ufs_bsg_request() argument [all …]
|
/linux/drivers/accel/habanalabs/common/ |
H A D | hw_queue.c | 229 * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion 272 * ext_queue_schedule_job - submit a JOB to an external queue 274 * @job: pointer to the job that needs to be submitted to the queue 279 static void ext_queue_schedule_job(struct hl_cs_job *job) in ext_queue_schedule_job() argument 281 struct hl_device *hdev = job->cs->ctx->hdev; in ext_queue_schedule_job() 282 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; in ext_queue_schedule_job() 292 * Update the JOB ID inside the BD CTL so the device would know what in ext_queue_schedule_job() 297 cb = job->patched_cb; in ext_queue_schedule_job() 298 len = job->job_cb_size; in ext_queue_schedule_job() 302 if (!cs_needs_completion(job->cs)) in ext_queue_schedule_job() [all …]
|
/linux/arch/powerpc/boot/dts/fsl/ |
H A D | qoriq-sec6.0-0.dtsi | 42 compatible = "fsl,sec-v6.0-job-ring", 43 "fsl,sec-v5.2-job-ring", 44 "fsl,sec-v5.0-job-ring", 45 "fsl,sec-v4.4-job-ring", 46 "fsl,sec-v4.0-job-ring"; 51 compatible = "fsl,sec-v6.0-job-ring", 52 "fsl,sec-v5.2-job-ring", 53 "fsl,sec-v5.0-job-ring", 54 "fsl,sec-v4.4-job-ring", 55 "fsl,sec-v4.0-job-ring";
|