Home
last modified time | relevance | path

Searched full:job (Results 1 – 25 of 563) sorted by relevance

12345678910>>...23

/linux/drivers/gpu/drm/nouveau/
H A Dnouveau_sched.c26 nouveau_job_init(struct nouveau_job *job, in nouveau_job_init() argument
32 INIT_LIST_HEAD(&job->entry); in nouveau_job_init()
34 job->file_priv = args->file_priv; in nouveau_job_init()
35 job->cli = nouveau_cli(args->file_priv); in nouveau_job_init()
36 job->sched = sched; in nouveau_job_init()
38 job->sync = args->sync; in nouveau_job_init()
39 job->resv_usage = args->resv_usage; in nouveau_job_init()
41 job->ops = args->ops; in nouveau_job_init()
43 job->in_sync.count = args->in_sync.count; in nouveau_job_init()
44 if (job->in_sync.count) { in nouveau_job_init()
[all …]
H A Dnouveau_exec.c67 * DRM_NOUVEAU_EXEC ioctl is called to submit an exec job.
69 * A VM_BIND job can be executed either synchronously or asynchronously. If
70 * executed asynchronously, userspace may provide a list of syncobjs this job
72 * VM_BIND job finished execution. If executed synchronously the ioctl will
73 * block until the bind job is finished. For synchronous jobs the kernel will
82 * Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have
89 nouveau_exec_job_submit(struct nouveau_job *job, in nouveau_exec_job_submit() argument
92 struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job); in nouveau_exec_job_submit()
93 struct nouveau_cli *cli = job->cli; in nouveau_exec_job_submit()
120 nouveau_exec_job_armed_submit(struct nouveau_job *job, in nouveau_exec_job_armed_submit() argument
[all …]
/linux/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_job.c37 struct amdgpu_job *job) in amdgpu_job_do_core_dump() argument
48 amdgpu_coredump(adev, true, false, job); in amdgpu_job_do_core_dump()
52 struct amdgpu_job *job) in amdgpu_job_core_dump() argument
80 amdgpu_job_do_core_dump(tmp_adev, job); in amdgpu_job_core_dump()
91 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_timedout() local
101 /* Effectively the job is aborted as the device is gone */ in amdgpu_job_timedout()
106 * Do the coredump immediately after a job timeout to get a very in amdgpu_job_timedout()
109 * before job timeout in amdgpu_job_timedout()
112 amdgpu_job_core_dump(adev, job); in amdgpu_job_timedout()
116 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { in amdgpu_job_timedout()
[all …]
H A Damdgpu_ib.c108 * @job: job to schedule
125 struct amdgpu_ib *ibs, struct amdgpu_job *job, in amdgpu_ib_schedule() argument
139 int vmid = AMDGPU_JOB_GET_VMID(job); in amdgpu_ib_schedule()
148 /* ring tests don't use a job */ in amdgpu_ib_schedule()
149 if (job) { in amdgpu_ib_schedule()
150 vm = job->vm; in amdgpu_ib_schedule()
151 fence_ctx = job->base.s_fence ? in amdgpu_ib_schedule()
152 job->base.s_fence->finished.context : 0; in amdgpu_ib_schedule()
153 shadow_va = job in amdgpu_ib_schedule()
[all...]
/linux/drivers/gpu/host1x/
H A Djob.c3 * Tegra host1x Job
21 #include "job.h"
30 struct host1x_job *job = NULL; in host1x_job_alloc() local
51 mem = job = kzalloc(total, GFP_KERNEL); in host1x_job_alloc()
52 if (!job) in host1x_job_alloc()
55 job->enable_firewall = enable_firewall; in host1x_job_alloc()
57 kref_init(&job->ref); in host1x_job_alloc()
58 job->channel = ch; in host1x_job_alloc()
62 job->relocs = num_relocs ? mem : NULL; in host1x_job_alloc()
64 job->unpins = num_unpins ? mem : NULL; in host1x_job_alloc()
[all …]
/linux/drivers/gpu/drm/imagination/
H A Dpvr_queue.c238 * Call this function to allocate job CCCB and done fences. This only
303 * pvr_queue_job_fence_init() - Initializes a job done fence object.
308 * a job.
350 static u32 job_cmds_size(struct pvr_job *job, u32 ufo_wait_count) in job_cmds_size() argument
353 * and a command for the job itself. in job_cmds_size()
356 pvr_cccb_get_size_of_cmd_with_hdr(job->cmd_len); in job_cmds_size()
361 * @job: Job to operate on.
365 static unsigned long job_count_remaining_native_deps(struct pvr_job *job) in job_count_remaining_native_deps() argument
371 xa_for_each(&job->base.dependencies, index, fence) { in job_count_remaining_native_deps()
386 * pvr_queue_get_job_cccb_fence() - Get the CCCB fence attached to a job.
[all …]
H A Dpvr_job.h34 /** @ref_count: Refcount for job. */
37 /** @type: Type of job. */
40 /** @id: Job ID number. */
44 * @paired_job: Job paired to this job.
50 * fragment job to execute when the Parameter Manager runs out of memory.
52 * The geometry job should point to the fragment job it's paired with,
53 * and the fragment job should point to the geometry job it's paired with.
63 /** @done_fence: Fence to signal when the job is done. */
87 * @has_pm_ref: True if the job has a power ref, thus forcing the GPU to stay on until
88 * the job is done.
[all …]
/linux/drivers/md/
H A Ddm-kcopyd.c41 MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
353 * Error state of the job.
373 * Set this to ensure you are notified when the job has
380 * These fields are only used if the job has been split
414 * Functions to push and pop a job onto the head of a given job
420 struct kcopyd_job *job; in pop_io_job() local
426 list_for_each_entry(job, jobs, list) { in pop_io_job()
427 if (job->op == REQ_OP_READ || in pop_io_job()
428 !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) { in pop_io_job()
429 list_del(&job->list); in pop_io_job()
[all …]
/linux/block/
H A Dbsg-lib.c31 struct bsg_job *job; in bsg_transport_sg_io_fn() local
49 job = blk_mq_rq_to_pdu(rq); in bsg_transport_sg_io_fn()
50 reply = job->reply; in bsg_transport_sg_io_fn()
51 memset(job, 0, sizeof(*job)); in bsg_transport_sg_io_fn()
52 job->reply = reply; in bsg_transport_sg_io_fn()
53 job->reply_len = SCSI_SENSE_BUFFERSIZE; in bsg_transport_sg_io_fn()
54 job->dd_data = job + 1; in bsg_transport_sg_io_fn()
56 job->request_len = hdr->request_len; in bsg_transport_sg_io_fn()
57 job->request = memdup_user(uptr64(hdr->request), hdr->request_len); in bsg_transport_sg_io_fn()
58 if (IS_ERR(job->request)) { in bsg_transport_sg_io_fn()
[all …]
/linux/drivers/gpu/host1x/hw/
H A Dchannel_hw.c17 #include "../job.h"
50 static void submit_wait(struct host1x_job *job, u32 id, u32 threshold) in submit_wait() argument
52 struct host1x_cdma *cdma = &job->channel->cdma; in submit_wait()
79 static void submit_setclass(struct host1x_job *job, u32 next_class) in submit_wait()
81 struct host1x_cdma *cdma = &job->channel->cdma; in submit_wait()
91 if (job->memory_context) in submit_wait()
92 stream_id = job->memory_context->stream_id; in submit_wait()
94 stream_id = job->engine_fallback_streamid; in submit_wait()
99 host1x_opcode_setstreamid(job->engine_streamid_offset / 4), in submit_wait()
109 static void submit_gathers(struct host1x_job *job, struc in submit_wait()
113 submit_gathers(struct host1x_job * job,u32 job_syncpt_base) submit_gathers() argument
163 synchronize_syncpt_base(struct host1x_job * job) synchronize_syncpt_base() argument
212 channel_program_cdma(struct host1x_job * job) channel_program_cdma() argument
281 struct host1x_job *job = container_of(cb, struct host1x_job, fence_cb); job_complete_callback() local
287 channel_submit(struct host1x_job * job) channel_submit() argument
[all...]
/linux/drivers/gpu/drm/xe/
H A Dxe_sched_job.h26 * xe_sched_job_get - get reference to Xe schedule job
27 * @job: Xe schedule job object
29 * Increment Xe schedule job's reference count
31 static inline struct xe_sched_job *xe_sched_job_get(struct xe_sched_job *job) in xe_sched_job_get() argument
33 kref_get(&job->refcount); in xe_sched_job_get()
34 return job; in xe_sched_job_get()
38 * xe_sched_job_put - put reference to Xe schedule job
39 * @job: Xe schedule job objec
44 xe_sched_job_put(struct xe_sched_job * job) xe_sched_job_put() argument
50 xe_sched_job_is_error(struct xe_sched_job * job) xe_sched_job_is_error() argument
71 xe_sched_job_seqno(struct xe_sched_job * job) xe_sched_job_seqno() argument
76 xe_sched_job_lrc_seqno(struct xe_sched_job * job) xe_sched_job_lrc_seqno() argument
82 xe_sched_job_add_migrate_flush(struct xe_sched_job * job,u32 flags) xe_sched_job_add_migrate_flush() argument
[all...]
/linux/drivers/gpu/drm/panfrost/
H A Dpanfrost_job.c111 int panfrost_job_get_slot(struct panfrost_job *job) in panfrost_job_get_slot()
117 if (job->requirements & PANFROST_JD_REQ_FS) in panfrost_job_get_slot()
122 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) { in panfrost_job_get_slot()
123 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) && in panfrost_job_get_slot()
124 (job->pfdev->features.nr_core_groups == 2)) in panfrost_job_get_slot()
126 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987)) in panfrost_job_get_slot()
151 panfrost_get_job_chain_flag(const struct panfrost_job *job) in panfrost_get_job_chain_flag()
153 struct panfrost_fence *f = to_panfrost_fence(job->done_fence); in panfrost_get_job_chain_flag()
155 if (!panfrost_has_hw_feature(job->pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION))
164 struct panfrost_job *job in panfrost_dequeue_job()
106 panfrost_job_get_slot(struct panfrost_job * job) panfrost_job_get_slot() argument
146 panfrost_get_job_chain_flag(const struct panfrost_job * job) panfrost_get_job_chain_flag() argument
159 struct panfrost_job *job = pfdev->jobs[slot][0]; panfrost_dequeue_job() local
181 panfrost_enqueue_job(struct panfrost_device * pfdev,int slot,struct panfrost_job * job) panfrost_enqueue_job() argument
198 panfrost_job_hw_submit(struct panfrost_job * job,int js) panfrost_job_hw_submit() argument
268 panfrost_acquire_object_fences(struct drm_gem_object ** bos,int bo_count,struct drm_sched_job * job) panfrost_acquire_object_fences() argument
297 panfrost_job_push(struct panfrost_job * job) panfrost_job_push() argument
337 struct panfrost_job *job = container_of(ref, struct panfrost_job, panfrost_job_cleanup() local
365 panfrost_job_put(struct panfrost_job * job) panfrost_job_put() argument
372 struct panfrost_job *job = to_panfrost_job(sched_job); panfrost_job_free() local
381 struct panfrost_job *job = to_panfrost_job(sched_job); panfrost_job_run() local
432 panfrost_job_handle_err(struct panfrost_device * pfdev,struct panfrost_job * job,unsigned int js) panfrost_job_handle_err() argument
486 panfrost_job_handle_done(struct panfrost_device * pfdev,struct panfrost_job * job) panfrost_job_handle_done() argument
749 struct panfrost_job *job = to_panfrost_job(sched_job); panfrost_job_timedout() local
953 struct panfrost_job *job = pfdev->jobs[i][j]; panfrost_job_close() local
[all...]
/linux/drivers/accel/ivpu/
H A Divpu_job.c74 struct ivpu_cmdq *cmdq, struct ivpu_job *job) in ivpu_preemption_buffers_free()
79 if (job->primary_preempt_buf) in ivpu_cmdq_alloc()
90 job->primary_preempt_buf = cmdq->primary_preempt_buf; in ivpu_cmdq_alloc()
91 job->secondary_preempt_buf = cmdq->secondary_preempt_buf; in ivpu_cmdq_alloc()
187 ivpu_dbg(vdev, JOB, "Command queue %d created, ctx %d, flags 0x%08x\n", in ivpu_hws_cmdq_init()
236 ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n", in ivpu_cmdq_jobq_reset()
291 ivpu_dbg(vdev, JOB, "DB %d unregistered\n", cmdq->db_id); in ivpu_job_to_jsm_priority()
296 ivpu_dbg(vdev, JOB, "Command queue %d destroyed, ctx %d\n", in ivpu_job_to_jsm_priority()
372 * and FW loses job queue state. The next time job queu in ivpu_cmdq_reset()
408 ivpu_cmdq_push_job(struct ivpu_cmdq * cmdq,struct ivpu_job * job) ivpu_cmdq_push_job() argument
493 ivpu_job_destroy(struct ivpu_job * job) ivpu_job_destroy() argument
514 struct ivpu_job *job; ivpu_job_create() local
542 struct ivpu_job *job; ivpu_job_remove_from_submitted_jobs() local
557 struct ivpu_job *job; ivpu_job_signal_and_destroy() local
608 struct ivpu_job *job; ivpu_jobs_abort_all() local
621 struct ivpu_job *job; ivpu_cmdq_abort_all_jobs() local
633 ivpu_job_submit(struct ivpu_job * job,u8 priority,u32 cmdq_id) ivpu_job_submit() argument
716 ivpu_job_prepare_bos_for_submit(struct drm_file * file,struct ivpu_job * job,u32 * buf_handles,u32 buf_count,u32 commands_offset) ivpu_job_prepare_bos_for_submit() argument
786 struct ivpu_job *job; ivpu_submit() local
1006 struct ivpu_job *job; ivpu_context_abort_work_fn() local
[all...]
/linux/drivers/gpu/drm/scheduler/
H A Dsched_main.c32 * backend operations to the scheduler like submitting a job to hardware run queue,
33 * returning the dependencies of a job etc.
46 * Note that once a job was taken from the entities queue and pushed to the
61 * Once a job is executed (but not yet finished), the job's credits contribute
62 * to the scheduler's credit count until the job is finished. If by executing
63 * one more job the scheduler's credit count would exceed the scheduler's
64 * credit limit, the job won't be executed. Instead, the scheduler will wait
112 * Return true if we can push at least one more job from @entity, false
124 /* If a job exceeds the credit limit, truncate it to the credit limit in drm_sched_can_queue()
244 * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
[all …]
H A Dsched_entity.c44 * @guilty: atomic_t set to 1 when a job on this queue
156 * drm_sched_entity_error - return error of last scheduled job
159 * Opportunistically return the error of the last scheduled job. Result can
181 struct drm_sched_job *job = container_of(wrk, typeof(*job), work); in drm_sched_entity_kill_jobs_work() local
186 xa_for_each(&job->dependencies, index, f) { in drm_sched_entity_kill_jobs_work()
204 xa_erase(&job->dependencies, index); in drm_sched_entity_kill_jobs_work()
205 if (f && !dma_fence_add_callback(f, &job->finish_cb, in drm_sched_entity_kill_jobs_work()
212 drm_sched_fence_scheduled(job->s_fence, NULL); in drm_sched_entity_kill_jobs_work()
213 drm_sched_fence_finished(job->s_fence, -ESRCH); in drm_sched_entity_kill_jobs_work()
214 WARN_ON(job->s_fence->parent); in drm_sched_entity_kill_jobs_work()
[all …]
/linux/drivers/scsi/lpfc/
H A Dlpfc_bsg.c71 /* driver data associated with the job */
96 struct bsg_job *set_job; /* job waiting for this iocb to finish */
305 struct bsg_job *job; in lpfc_bsg_send_mgmt_cmd_cmp() local
316 /* Determine if job has been aborted */ in lpfc_bsg_send_mgmt_cmd_cmp()
318 job = dd_data->set_job; in lpfc_bsg_send_mgmt_cmd_cmp()
319 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp()
320 bsg_reply = job->reply; in lpfc_bsg_send_mgmt_cmd_cmp()
321 /* Prevent timeout handling from trying to abort job */ in lpfc_bsg_send_mgmt_cmd_cmp()
322 job->dd_data = NULL; in lpfc_bsg_send_mgmt_cmd_cmp()
342 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp()
[all …]
/linux/drivers/gpu/drm/msm/
H A Dmsm_gem_vma.c113 /** @fence: The fence that is signaled when job completes */
115 /** @queue: The queue that the job runs on */
153 static inline struct msm_vm_bind_job *to_msm_vm_bind_job(struct drm_sched_job *job) in to_msm_vm_bind_job() argument
155 return container_of(job, struct msm_vm_bind_job, base); in to_msm_vm_bind_job()
460 struct msm_vm_bind_job *job; member
473 list_add_tail(&op->node, &arg->job->vm_ops); in vm_op_enqueue()
484 return msm_gem_vma_new(arg->job->vm, op->gem.obj, op->gem.offset, in vma_from_op()
492 struct msm_vm_bind_job *job = arg->job; in msm_gem_vm_sm_step_map() local
525 .queue_id = job->queue->id, in msm_gem_vm_sm_step_map()
542 struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job; in msm_gem_vm_sm_step_remap() local
[all …]
/linux/drivers/gpu/drm/tegra/
H A Dsubmit.c28 "%s: job submission failed: " fmt "\n", \
327 static int submit_get_syncpt(struct tegra_drm_context *context, struct host1x_job *job, in submit_get_syncpt() argument
337 /* Syncpt ref will be dropped on job release */ in submit_get_syncpt()
344 job->syncpt = host1x_syncpt_get(sp); in submit_get_syncpt()
345 job->syncpt_incrs = args->syncpt.increments; in submit_get_syncpt()
350 static int submit_job_add_gather(struct host1x_job *job, struct tegra_drm_context *context, in submit_job_add_gather() argument
370 SUBMIT_ERR(context, "too many total words in job"); in submit_job_add_gather()
381 SUBMIT_ERR(context, "job was rejected by firewall"); in submit_job_add_gather()
385 host1x_job_add_gather(job, &bo->base, cmd->words, *offset * 4); in submit_job_add_gather()
399 struct host1x_job *job; in submit_create_job() local
[all …]
/linux/include/drm/
H A Dgpu_scheduler.h75 * struct drm_sched_entity - A wrapper around a job queue (typically
173 * The dependency fence of the job which is on the top of the job queue.
194 * Points to the finished fence of the last scheduled job. Only written
201 * @last_user: last group leader pushing a job into the entity.
225 * Marks earliest job waiting in SW queue
262 * struct drm_sched_fence - fences corresponding to the scheduling of a job.
267 * when the job is scheduled.
273 * when the job is completed.
275 * When setting up an out fence for the job, yo
[all...]
/linux/Documentation/devicetree/bindings/powerpc/fsl/
H A Draideng.txt30 There must be a sub-node for each job queue present in RAID Engine
33 - compatible: Should contain "fsl,raideng-v1.0-job-queue" as the value
34 This identifies the job queue interface
35 - reg: offset and length of the register set for job queue
42 compatible = "fsl,raideng-v1.0-job-queue";
48 There must be a sub-node for each job ring present in RAID Engine
49 This node must be a sub-node of job queue node
51 - compatible: Must contain "fsl,raideng-v1.0-job-ring" as the value
52 This identifies job ring. Should contain either
55 - reg: offset and length of the register set for job ring
[all …]
/linux/drivers/crypto/caam/
H A DKconfig20 This module creates job ring devices, and configures h/w
36 tristate "Freescale CAAM Job Ring driver backend"
40 Enables the driver module for Job Rings which are part of
42 and Assurance Module (CAAM). This module adds a job ring operation
51 int "Job Ring size"
55 Select size of Job Rings as a power of 2, within the
68 bool "Job Ring interrupt coalescing"
70 Enable the Job Ring's interrupt coalescing feature.
76 int "Job Ring interrupt coalescing count threshold"
84 equal or greater than the job ring size will force timeouts.
[all …]
/linux/Documentation/core-api/
H A Dpadata.rst14 is currently the sole consumer of padata's serialized job support.
16 Padata also supports multithreaded jobs, splitting up the job evenly while load
38 A padata_shell is used to submit a job to padata and allows a series of such
80 Running A Job
84 padata_priv structure, which represents one job::
99 The submission of the job is done with::
105 points to the preferred CPU to be used for the final callback when the job is
108 padata_do_parallel() is zero on success, indicating that the job is in
114 Each job submitted to padata_do_parallel() will, in turn, be passed to
123 parallel() will take responsibility for the job from this point. The job
[all …]
/linux/drivers/gpu/drm/
H A Ddrm_writeback.c438 int drm_writeback_prepare_job(struct drm_writeback_job *job) in drm_writeback_prepare_job() argument
440 struct drm_writeback_connector *connector = job->connector; in drm_writeback_prepare_job()
446 ret = funcs->prepare_writeback_job(connector, job); in drm_writeback_prepare_job()
451 job->prepared = true; in drm_writeback_prepare_job()
457 * drm_writeback_queue_job - Queue a writeback job for later signalling
458 * @wb_connector: The writeback connector to queue a job on
459 * @conn_state: The connector state containing the job to queue
461 * This function adds the job contained in @conn_state to the job_queue for a
462 * writeback connector. It takes ownership of the writeback job and sets the
463 * @conn_state->writeback_job to NULL, and so no access to the job may be
[all …]
/linux/drivers/ufs/core/
H A Dufs_bsg.c29 static int ufs_bsg_alloc_desc_buffer(struct ufs_hba *hba, struct bsg_job *job, in ufs_bsg_alloc_desc_buffer() argument
33 struct ufs_bsg_request *bsg_request = job->request; in ufs_bsg_alloc_desc_buffer()
47 if (*desc_len > job->request_payload.payload_len) { in ufs_bsg_alloc_desc_buffer()
57 sg_copy_to_buffer(job->request_payload.sg_list, in ufs_bsg_alloc_desc_buffer()
58 job->request_payload.sg_cnt, descp, in ufs_bsg_alloc_desc_buffer()
67 static int ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba *hba, struct bsg_job *job) in ufs_bsg_exec_advanced_rpmb_req() argument
69 struct ufs_rpmb_request *rpmb_request = job->request; in ufs_bsg_exec_advanced_rpmb_req()
70 struct ufs_rpmb_reply *rpmb_reply = job->reply; in ufs_bsg_exec_advanced_rpmb_req()
107 payload = &job->request_payload; in ufs_bsg_exec_advanced_rpmb_req()
132 static int ufs_bsg_request(struct bsg_job *job) in ufs_bsg_request() argument
[all …]
/linux/drivers/accel/habanalabs/common/
H A Dhw_queue.c229 * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion
272 * ext_queue_schedule_job - submit a JOB to an external queue
274 * @job: pointer to the job that needs to be submitted to the queue
279 static void ext_queue_schedule_job(struct hl_cs_job *job) in ext_queue_schedule_job() argument
281 struct hl_device *hdev = job->cs->ctx->hdev; in ext_queue_schedule_job()
282 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; in ext_queue_schedule_job()
292 * Update the JOB ID inside the BD CTL so the device would know what in ext_queue_schedule_job()
297 cb = job->patched_cb; in ext_queue_schedule_job()
298 len = job->job_cb_size; in ext_queue_schedule_job()
302 if (!cs_needs_completion(job->cs)) in ext_queue_schedule_job()
[all …]

12345678910>>...23