| /linux/drivers/gpu/drm/nouveau/ |
| H A D | nouveau_sched.c | 26 nouveau_job_init(struct nouveau_job *job, in nouveau_job_init() argument 32 INIT_LIST_HEAD(&job->entry); in nouveau_job_init() 34 job->file_priv = args->file_priv; in nouveau_job_init() 35 job->cli = nouveau_cli(args->file_priv); in nouveau_job_init() 36 job->sched = sched; in nouveau_job_init() 38 job->sync = args->sync; in nouveau_job_init() 39 job->resv_usage = args->resv_usage; in nouveau_job_init() 41 job->ops = args->ops; in nouveau_job_init() 43 job->in_sync.count = args->in_sync.count; in nouveau_job_init() 44 if (job->in_sync.count) { in nouveau_job_init() [all …]
|
| H A D | nouveau_exec.c | 89 nouveau_exec_job_submit(struct nouveau_job *job, in nouveau_exec_job_submit() argument 92 struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job); in nouveau_exec_job_submit() 93 struct nouveau_cli *cli = job->cli; in nouveau_exec_job_submit() 120 nouveau_exec_job_armed_submit(struct nouveau_job *job, in nouveau_exec_job_armed_submit() argument 123 drm_gpuvm_exec_resv_add_fence(vme, job->done_fence, in nouveau_exec_job_armed_submit() 124 job->resv_usage, job->resv_usage); in nouveau_exec_job_armed_submit() 129 nouveau_exec_job_run(struct nouveau_job *job) in nouveau_exec_job_run() argument 131 struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job); in nouveau_exec_job_run() 138 NV_PRINTK(err, job->cli, "nv50cal_space: %d\n", ret); in nouveau_exec_job_run() 154 NV_PRINTK(err, job->cli, "error fencing pushbuf: %d\n", ret); in nouveau_exec_job_run() [all …]
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| H A D | amdgpu_job.c | 37 struct amdgpu_job *job) in amdgpu_job_do_core_dump() argument 48 amdgpu_coredump(adev, true, false, job); in amdgpu_job_do_core_dump() 52 struct amdgpu_job *job) in amdgpu_job_core_dump() argument 80 amdgpu_job_do_core_dump(tmp_adev, job); in amdgpu_job_core_dump() 91 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_timedout() local 112 amdgpu_job_core_dump(adev, job); in amdgpu_job_timedout() 116 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { in amdgpu_job_timedout() 123 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), in amdgpu_job_timedout() 126 ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid); in amdgpu_job_timedout() 138 r = amdgpu_ring_reset(ring, job->vmid, job->hw_fence); in amdgpu_job_timedout() [all …]
|
| /linux/drivers/gpu/host1x/ |
| H A D | job.c | 30 struct host1x_job *job = NULL; in host1x_job_alloc() local 51 mem = job = kzalloc(total, GFP_KERNEL); in host1x_job_alloc() 52 if (!job) in host1x_job_alloc() 55 job->enable_firewall = enable_firewall; in host1x_job_alloc() 57 kref_init(&job->ref); in host1x_job_alloc() 58 job->channel = ch; in host1x_job_alloc() 62 job->relocs = num_relocs ? mem : NULL; in host1x_job_alloc() 64 job->unpins = num_unpins ? mem : NULL; in host1x_job_alloc() 66 job->cmds = num_cmdbufs ? mem : NULL; in host1x_job_alloc() 68 job->addr_phys = num_unpins ? mem : NULL; in host1x_job_alloc() [all …]
|
| /linux/drivers/md/ |
| H A D | dm-kcopyd.c | 420 struct kcopyd_job *job; in pop_io_job() local 426 list_for_each_entry(job, jobs, list) { in pop_io_job() 427 if (job->op == REQ_OP_READ || in pop_io_job() 428 !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) { in pop_io_job() 429 list_del(&job->list); in pop_io_job() 430 return job; in pop_io_job() 433 if (job->write_offset == job->master_job->write_offset) { in pop_io_job() 434 job->master_job->write_offset += job->source.count; in pop_io_job() 435 list_del(&job->list); in pop_io_job() 436 return job; in pop_io_job() [all …]
|
| /linux/block/ |
| H A D | bsg-lib.c | 31 struct bsg_job *job; in bsg_transport_sg_io_fn() local 49 job = blk_mq_rq_to_pdu(rq); in bsg_transport_sg_io_fn() 50 reply = job->reply; in bsg_transport_sg_io_fn() 51 memset(job, 0, sizeof(*job)); in bsg_transport_sg_io_fn() 52 job->reply = reply; in bsg_transport_sg_io_fn() 53 job->reply_len = SCSI_SENSE_BUFFERSIZE; in bsg_transport_sg_io_fn() 54 job->dd_data = job + 1; in bsg_transport_sg_io_fn() 56 job->request_len = hdr->request_len; in bsg_transport_sg_io_fn() 57 job->request = memdup_user(uptr64(hdr->request), hdr->request_len); in bsg_transport_sg_io_fn() 58 if (IS_ERR(job->request)) { in bsg_transport_sg_io_fn() [all …]
|
| /linux/drivers/gpu/drm/imagination/ |
| H A D | pvr_queue.c | 350 static u32 job_cmds_size(struct pvr_job *job, u32 ufo_wait_count) in job_cmds_size() argument 356 pvr_cccb_get_size_of_cmd_with_hdr(job->cmd_len); in job_cmds_size() 365 static unsigned long job_count_remaining_native_deps(struct pvr_job *job) in job_count_remaining_native_deps() argument 371 xa_for_each(&job->base.dependencies, index, fence) { in job_count_remaining_native_deps() 398 pvr_queue_get_job_cccb_fence(struct pvr_queue *queue, struct pvr_job *job) in pvr_queue_get_job_cccb_fence() argument 406 if (!job->cccb_fence) in pvr_queue_get_job_cccb_fence() 412 native_deps_remaining = job_count_remaining_native_deps(job); in pvr_queue_get_job_cccb_fence() 413 if (pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) { in pvr_queue_get_job_cccb_fence() 414 pvr_queue_fence_put(job->cccb_fence); in pvr_queue_get_job_cccb_fence() 415 job->cccb_fence = NULL; in pvr_queue_get_job_cccb_fence() [all …]
|
| H A D | pvr_job.h | 104 pvr_job_get(struct pvr_job *job) in pvr_job_get() argument 106 if (job) in pvr_job_get() 107 kref_get(&job->ref_count); in pvr_job_get() 109 return job; in pvr_job_get() 112 void pvr_job_put(struct pvr_job *job); 119 pvr_job_release_pm_ref(struct pvr_job *job) in pvr_job_release_pm_ref() argument 121 if (job->has_pm_ref) { in pvr_job_release_pm_ref() 122 pvr_power_put(job->pvr_dev); in pvr_job_release_pm_ref() 123 job->has_pm_ref = false; in pvr_job_release_pm_ref() 136 pvr_job_get_pm_ref(struct pvr_job *job) in pvr_job_get_pm_ref() argument [all …]
|
| /linux/drivers/gpu/drm/msm/ |
| H A D | msm_gem_vma.c | 153 static inline struct msm_vm_bind_job *to_msm_vm_bind_job(struct drm_sched_job *job) in to_msm_vm_bind_job() argument 155 return container_of(job, struct msm_vm_bind_job, base); in to_msm_vm_bind_job() 460 struct msm_vm_bind_job *job; member 473 list_add_tail(&op->node, &arg->job->vm_ops); in vm_op_enqueue() 484 return msm_gem_vma_new(arg->job->vm, op->gem.obj, op->gem.offset, in vma_from_op() 492 struct msm_vm_bind_job *job = arg->job; in msm_gem_vm_sm_step_map() local 525 .queue_id = job->queue->id, in msm_gem_vm_sm_step_map() 542 struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job; in msm_gem_vm_sm_step_remap() local 543 struct drm_gpuvm *vm = job->vm; in msm_gem_vm_sm_step_remap() 564 .queue_id = job->queue->id, in msm_gem_vm_sm_step_remap() [all …]
|
| /linux/drivers/gpu/drm/scheduler/ |
| H A D | sched_main.c | 523 struct drm_sched_job *job) in drm_sched_job_reinsert_on_false_timeout() argument 526 list_add(&job->list, &sched->pending_list); in drm_sched_job_reinsert_on_false_timeout() 539 struct drm_sched_job *job; in drm_sched_job_timedout() local 546 job = list_first_entry_or_null(&sched->pending_list, in drm_sched_job_timedout() 549 if (job) { in drm_sched_job_timedout() 556 list_del_init(&job->list); in drm_sched_job_timedout() 559 status = job->sched->ops->timedout_job(job); in drm_sched_job_timedout() 566 job->sched->ops->free_job(job); in drm_sched_job_timedout() 571 drm_sched_job_reinsert_on_false_timeout(sched, job); in drm_sched_job_timedout() 797 int drm_sched_job_init(struct drm_sched_job *job, in drm_sched_job_init() argument [all …]
|
| H A D | sched_entity.c | 181 struct drm_sched_job *job = container_of(wrk, typeof(*job), work); in drm_sched_entity_kill_jobs_work() local 186 xa_for_each(&job->dependencies, index, f) { in drm_sched_entity_kill_jobs_work() 204 xa_erase(&job->dependencies, index); in drm_sched_entity_kill_jobs_work() 205 if (f && !dma_fence_add_callback(f, &job->finish_cb, in drm_sched_entity_kill_jobs_work() 212 drm_sched_fence_scheduled(job->s_fence, NULL); in drm_sched_entity_kill_jobs_work() 213 drm_sched_fence_finished(job->s_fence, -ESRCH); in drm_sched_entity_kill_jobs_work() 214 WARN_ON(job->s_fence->parent); in drm_sched_entity_kill_jobs_work() 215 job->sched->ops->free_job(job); in drm_sched_entity_kill_jobs_work() 222 struct drm_sched_job *job = container_of(cb, struct drm_sched_job, in drm_sched_entity_kill_jobs_cb() local 227 INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); in drm_sched_entity_kill_jobs_cb() [all …]
|
| /linux/drivers/gpu/drm/tegra/ |
| H A D | submit.c | 327 static int submit_get_syncpt(struct tegra_drm_context *context, struct host1x_job *job, in submit_get_syncpt() argument 344 job->syncpt = host1x_syncpt_get(sp); in submit_get_syncpt() 345 job->syncpt_incrs = args->syncpt.increments; in submit_get_syncpt() 350 static int submit_job_add_gather(struct host1x_job *job, struct tegra_drm_context *context, in submit_job_add_gather() argument 385 host1x_job_add_gather(job, &bo->base, cmd->words, *offset * 4); in submit_job_add_gather() 399 struct host1x_job *job; in submit_create_job() local 412 job = host1x_job_alloc(context->channel, args->num_cmds, 0, true); in submit_create_job() 413 if (!job) { in submit_create_job() 415 job = ERR_PTR(-ENOMEM); in submit_create_job() 419 err = submit_get_syncpt(context, job, syncpoints, args); in submit_create_job() [all …]
|
| /linux/drivers/gpu/drm/ |
| H A D | drm_writeback.c | 438 int drm_writeback_prepare_job(struct drm_writeback_job *job) in drm_writeback_prepare_job() argument 440 struct drm_writeback_connector *connector = job->connector; in drm_writeback_prepare_job() 446 ret = funcs->prepare_writeback_job(connector, job); in drm_writeback_prepare_job() 451 job->prepared = true; in drm_writeback_prepare_job() 478 struct drm_writeback_job *job; in drm_writeback_queue_job() local 481 job = conn_state->writeback_job; in drm_writeback_queue_job() 485 list_add_tail(&job->list_entry, &wb_connector->job_queue); in drm_writeback_queue_job() 490 void drm_writeback_cleanup_job(struct drm_writeback_job *job) in drm_writeback_cleanup_job() argument 492 struct drm_writeback_connector *connector = job->connector; in drm_writeback_cleanup_job() 496 if (job->prepared && funcs->cleanup_writeback_job) in drm_writeback_cleanup_job() [all …]
|
| /linux/drivers/scsi/lpfc/ |
| H A D | lpfc_bsg.c | 305 struct bsg_job *job; in lpfc_bsg_send_mgmt_cmd_cmp() local 318 job = dd_data->set_job; in lpfc_bsg_send_mgmt_cmd_cmp() 319 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp() 320 bsg_reply = job->reply; in lpfc_bsg_send_mgmt_cmd_cmp() 322 job->dd_data = NULL; in lpfc_bsg_send_mgmt_cmd_cmp() 342 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp() 361 lpfc_bsg_copy_data(rmp, &job->reply_payload, in lpfc_bsg_send_mgmt_cmd_cmp() 376 if (job) { in lpfc_bsg_send_mgmt_cmd_cmp() 378 bsg_job_done(job, bsg_reply->result, in lpfc_bsg_send_mgmt_cmd_cmp() 389 lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) in lpfc_bsg_send_mgmt_cmd() argument [all …]
|
| /linux/drivers/ufs/core/ |
| H A D | ufs_bsg.c | 29 static int ufs_bsg_alloc_desc_buffer(struct ufs_hba *hba, struct bsg_job *job, in ufs_bsg_alloc_desc_buffer() argument 33 struct ufs_bsg_request *bsg_request = job->request; in ufs_bsg_alloc_desc_buffer() 47 if (*desc_len > job->request_payload.payload_len) { in ufs_bsg_alloc_desc_buffer() 57 sg_copy_to_buffer(job->request_payload.sg_list, in ufs_bsg_alloc_desc_buffer() 58 job->request_payload.sg_cnt, descp, in ufs_bsg_alloc_desc_buffer() 67 static int ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba *hba, struct bsg_job *job) in ufs_bsg_exec_advanced_rpmb_req() argument 69 struct ufs_rpmb_request *rpmb_request = job->request; in ufs_bsg_exec_advanced_rpmb_req() 70 struct ufs_rpmb_reply *rpmb_reply = job->reply; in ufs_bsg_exec_advanced_rpmb_req() 107 payload = &job->request_payload; in ufs_bsg_exec_advanced_rpmb_req() 132 static int ufs_bsg_request(struct bsg_job *job) in ufs_bsg_request() argument [all …]
|
| /linux/drivers/accel/habanalabs/common/ |
| H A D | hw_queue.c | 279 static void ext_queue_schedule_job(struct hl_cs_job *job) in ext_queue_schedule_job() argument 281 struct hl_device *hdev = job->cs->ctx->hdev; in ext_queue_schedule_job() 282 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; in ext_queue_schedule_job() 297 cb = job->patched_cb; in ext_queue_schedule_job() 298 len = job->job_cb_size; in ext_queue_schedule_job() 302 if (!cs_needs_completion(job->cs)) in ext_queue_schedule_job() 322 job->user_cb_size, in ext_queue_schedule_job() 326 job->contains_dma_pkt); in ext_queue_schedule_job() 328 q->shadow_queue[hl_pi_2_offset(q->pi)] = job; in ext_queue_schedule_job() 344 static void int_queue_schedule_job(struct hl_cs_job *job) in int_queue_schedule_job() argument [all …]
|
| H A D | irq.c | 81 struct hl_cs_job *job; in job_finish() local 84 job = queue->shadow_queue[hl_pi_2_offset(cs_seq)]; in job_finish() 85 job->timestamp = timestamp; in job_finish() 86 queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work); in job_finish() 104 struct hl_cs_job *job; in cs_finish() local 114 list_for_each_entry(job, &cs->job_list, cs_node) { in cs_finish() 115 queue = &hdev->kernel_queues[job->hw_queue_id]; in cs_finish() 205 struct timestamp_reg_work_obj *job = in hl_ts_free_objects() local 207 struct list_head *dynamic_alloc_free_list_head = job->dynamic_alloc_free_obj_head; in hl_ts_free_objects() 209 struct list_head *free_list_head = job->free_obj_head; in hl_ts_free_objects() [all …]
|
| /linux/drivers/scsi/libsas/ |
| H A D | sas_host_smp.c | 225 void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost) in sas_smp_host_handler() argument 233 if (job->request_payload.payload_len < 8 || in sas_smp_host_handler() 234 job->reply_payload.payload_len < 8) in sas_smp_host_handler() 238 req_data = kzalloc(job->request_payload.payload_len, GFP_KERNEL); in sas_smp_host_handler() 241 sg_copy_to_buffer(job->request_payload.sg_list, in sas_smp_host_handler() 242 job->request_payload.sg_cnt, req_data, in sas_smp_host_handler() 243 job->request_payload.payload_len); in sas_smp_host_handler() 247 resp_data = kzalloc(max(job->reply_payload.payload_len, 128U), in sas_smp_host_handler() 282 if (job->request_payload.payload_len < 16) in sas_smp_host_handler() 294 if (job->request_payload.payload_len < 16) in sas_smp_host_handler() [all …]
|
| /linux/Documentation/devicetree/bindings/powerpc/fsl/ |
| H A D | raideng.txt | 30 There must be a sub-node for each job queue present in RAID Engine 33 - compatible: Should contain "fsl,raideng-v1.0-job-queue" as the value 34 This identifies the job queue interface 35 - reg: offset and length of the register set for job queue 42 compatible = "fsl,raideng-v1.0-job-queue"; 48 There must be a sub-node for each job ring present in RAID Engine 49 This node must be a sub-node of job queue node 51 - compatible: Must contain "fsl,raideng-v1.0-job-ring" as the value 52 This identifies job ring. Should contain either 55 - reg: offset and length of the register set for job ring [all …]
|
| /linux/drivers/gpu/drm/panthor/ |
| H A D | panthor_sched.c | 1108 struct panthor_job *job; in queue_suspend_timeout_locked() local 1117 job = list_first_entry_or_null(&queue->fence_ctx.in_flight_jobs, in queue_suspend_timeout_locked() 1119 group = job ? job->group : NULL; in queue_suspend_timeout_locked() 1127 (group->blocked_queues & BIT(job->queue_idx)) && in queue_suspend_timeout_locked() 1136 if (!timer_was_active || !job) in queue_suspend_timeout_locked() 1541 struct panthor_job *job; in cs_slot_process_fault_event_locked() local 1544 list_for_each_entry(job, &queue->fence_ctx.in_flight_jobs, node) { in cs_slot_process_fault_event_locked() 1545 if (cs_extract >= job->ringbuf.end) in cs_slot_process_fault_event_locked() 1548 if (cs_extract < job->ringbuf.start) in cs_slot_process_fault_event_locked() 1551 dma_fence_set_error(job->done_fence, -EINVAL); in cs_slot_process_fault_event_locked() [all …]
|
| /linux/drivers/gpu/drm/xe/ |
| H A D | xe_gpu_scheduler.h | 70 xe_sched_invalidate_job(struct xe_sched_job *job, int threshold) in xe_sched_invalidate_job() argument 72 return drm_sched_invalidate_job(&job->drm, threshold); in xe_sched_invalidate_job() 76 struct xe_sched_job *job) in xe_sched_add_pending_job() argument 79 list_add(&job->drm.list, &sched->base.pending_list); in xe_sched_add_pending_job() 92 struct xe_sched_job *job, *r_job = NULL; in xe_sched_first_pending_job() local 95 list_for_each_entry(job, &sched->base.pending_list, drm.list) { in xe_sched_first_pending_job() 96 struct drm_sched_fence *s_fence = job->drm.s_fence; in xe_sched_first_pending_job() 100 r_job = job; in xe_sched_first_pending_job()
|
| /linux/arch/powerpc/boot/dts/fsl/ |
| H A D | qoriq-sec6.0-0.dtsi | 42 compatible = "fsl,sec-v6.0-job-ring", 43 "fsl,sec-v5.2-job-ring", 44 "fsl,sec-v5.0-job-ring", 45 "fsl,sec-v4.4-job-ring", 46 "fsl,sec-v4.0-job-ring"; 51 compatible = "fsl,sec-v6.0-job-ring", 52 "fsl,sec-v5.2-job-ring", 53 "fsl,sec-v5.0-job-ring", 54 "fsl,sec-v4.4-job-ring", 55 "fsl,sec-v4.0-job-ring";
|
| H A D | qoriq-sec5.3-0.dtsi | 45 compatible = "fsl,sec-v5.3-job-ring", 46 "fsl,sec-v5.0-job-ring", 47 "fsl,sec-v4.0-job-ring"; 53 compatible = "fsl,sec-v5.3-job-ring", 54 "fsl,sec-v5.0-job-ring", 55 "fsl,sec-v4.0-job-ring"; 61 compatible = "fsl,sec-v5.3-job-ring", 62 "fsl,sec-v5.0-job-ring", 63 "fsl,sec-v4.0-job-ring"; 69 compatible = "fsl,sec-v5.3-job-ring", [all …]
|
| /linux/drivers/scsi/smartpqi/ |
| H A D | smartpqi_sas_transport.c | 458 struct bsg_job *job) in pqi_build_csmi_smp_passthru_buffer() argument 470 req_size = job->request_payload.payload_len; in pqi_build_csmi_smp_passthru_buffer() 471 resp_size = job->reply_payload.payload_len; in pqi_build_csmi_smp_passthru_buffer() 494 sg_copy_to_buffer(job->request_payload.sg_list, in pqi_build_csmi_smp_passthru_buffer() 495 job->reply_payload.sg_cnt, ¶meters->request, in pqi_build_csmi_smp_passthru_buffer() 502 struct bmic_csmi_smp_passthru_buffer *smp_buf, struct bsg_job *job, in pqi_build_sas_smp_handler_reply() argument 505 sg_copy_from_buffer(job->reply_payload.sg_list, in pqi_build_sas_smp_handler_reply() 506 job->reply_payload.sg_cnt, &smp_buf->parameters.response, in pqi_build_sas_smp_handler_reply() 509 job->reply_len = le16_to_cpu(error_info->sense_data_length); in pqi_build_sas_smp_handler_reply() 510 memcpy(job->reply, error_info->data, in pqi_build_sas_smp_handler_reply() [all …]
|
| /linux/Documentation/core-api/ |
| H A D | padata.rst | 14 is currently the sole consumer of padata's serialized job support. 16 Padata also supports multithreaded jobs, splitting up the job evenly while load 38 A padata_shell is used to submit a job to padata and allows a series of such 84 padata_priv structure, which represents one job:: 99 The submission of the job is done with:: 105 points to the preferred CPU to be used for the final callback when the job is 108 padata_do_parallel() is zero on success, indicating that the job is in 114 Each job submitted to padata_do_parallel() will, in turn, be passed to 123 parallel() will take responsibility for the job from this point. The job 125 outstanding, it should be prepared to be called again with a new job before [all …]
|