Lines Matching full:job

229  * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion
272 * ext_queue_schedule_job - submit a JOB to an external queue
274 * @job: pointer to the job that needs to be submitted to the queue
279 static void ext_queue_schedule_job(struct hl_cs_job *job) in ext_queue_schedule_job() argument
281 struct hl_device *hdev = job->cs->ctx->hdev; in ext_queue_schedule_job()
282 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; in ext_queue_schedule_job()
292 * Update the JOB ID inside the BD CTL so the device would know what in ext_queue_schedule_job()
297 cb = job->patched_cb; in ext_queue_schedule_job()
298 len = job->job_cb_size; in ext_queue_schedule_job()
302 if (!cs_needs_completion(job->cs)) in ext_queue_schedule_job()
322 job->user_cb_size, in ext_queue_schedule_job()
326 job->contains_dma_pkt); in ext_queue_schedule_job()
328 q->shadow_queue[hl_pi_2_offset(q->pi)] = job; in ext_queue_schedule_job()
337 * int_queue_schedule_job - submit a JOB to an internal queue
339 * @job: pointer to the job that needs to be submitted to the queue
344 static void int_queue_schedule_job(struct hl_cs_job *job) in int_queue_schedule_job() argument
346 struct hl_device *hdev = job->cs->ctx->hdev; in int_queue_schedule_job()
347 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; in int_queue_schedule_job()
352 bd.len = cpu_to_le32(job->job_cb_size); in int_queue_schedule_job()
354 if (job->is_kernel_allocated_cb) in int_queue_schedule_job()
358 bd.ptr = cpu_to_le64(job->user_cb->bus_address); in int_queue_schedule_job()
360 bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb); in int_queue_schedule_job()
373 * hw_queue_schedule_job - submit a JOB to a H/W queue
375 * @job: pointer to the job that needs to be submitted to the queue
380 static void hw_queue_schedule_job(struct hl_cs_job *job) in hw_queue_schedule_job() argument
382 struct hl_device *hdev = job->cs->ctx->hdev; in hw_queue_schedule_job()
383 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; in hw_queue_schedule_job()
393 offset = job->cs->sequence & (hdev->asic_prop.max_pending_cs - 1); in hw_queue_schedule_job()
397 len = job->job_cb_size; in hw_queue_schedule_job()
405 if (job->patched_cb) in hw_queue_schedule_job()
406 ptr = job->patched_cb->bus_address; in hw_queue_schedule_job()
407 else if (job->is_kernel_allocated_cb) in hw_queue_schedule_job()
408 ptr = job->user_cb->bus_address; in hw_queue_schedule_job()
410 ptr = (u64) (uintptr_t) job->user_cb; in hw_queue_schedule_job()
416 struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl) in init_signal_cs() argument
423 q_idx = job->hw_queue_id; in init_signal_cs()
438 hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb, in init_signal_cs()
444 job->cs->sob_addr_offset = hw_sob->sob_addr; in init_signal_cs()
445 job->cs->initial_sob_count = prop->next_sob_val - 1; in init_signal_cs()
451 struct hl_cs *cs, struct hl_cs_job *job, in hl_hw_queue_encaps_sig_set_sob_info() argument
468 if (job->encaps_sig_wait_offset) in hl_hw_queue_encaps_sig_set_sob_info()
469 offset = job->encaps_sig_wait_offset - 1; in hl_hw_queue_encaps_sig_set_sob_info()
475 struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl) in init_wait_cs() argument
482 q_idx = job->hw_queue_id; in init_wait_cs()
494 hl_hw_queue_encaps_sig_set_sob_info(hdev, cs, job, cs_cmpl); in init_wait_cs()
500 job->encaps_sig_wait_offset); in init_wait_cs()
535 wait_prop.data = (void *) job->patched_cb; in init_wait_cs()
562 struct hl_cs_job *job; in init_signal_wait_cs() local
567 /* There is only one job in a signal/wait CS */ in init_signal_wait_cs()
568 job = list_first_entry(&cs->job_list, struct hl_cs_job, in init_signal_wait_cs()
572 rc = init_signal_cs(hdev, job, cs_cmpl); in init_signal_wait_cs()
574 rc = init_wait_cs(hdev, cs, job, cs_cmpl); in init_signal_wait_cs()
642 struct hl_cs_job *job, *tmp; in hl_hw_queue_schedule_cs() local
772 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) in hl_hw_queue_schedule_cs()
773 switch (job->queue_type) { in hl_hw_queue_schedule_cs()
775 ext_queue_schedule_job(job); in hl_hw_queue_schedule_cs()
778 int_queue_schedule_job(job); in hl_hw_queue_schedule_cs()
781 hw_queue_schedule_job(job); in hl_hw_queue_schedule_cs()
1061 * user context. It also means that if a job was submitted by in queue_fini()
1062 * the kernel driver (e.g. context creation), the job itself was in queue_fini()